Add most of base/ build/ buildtools/ testing/ third_party/googletest/

Enough to make ./tools/gn/bootstrap/bootstrap.py work on Linux.

Change-Id: I94de95f1ce87dd3672d1a99c62254edee8be45bd
Reviewed-on: https://gn-review.googlesource.com/1100
Reviewed-by: Petr Hosek <phosek@google.com>
Commit-Queue: Scott Graham <scottmg@chromium.org>
diff --git a/base/BUILD.gn b/base/BUILD.gn
new file mode 100644
index 0000000..28542b2
--- /dev/null
+++ b/base/BUILD.gn
@@ -0,0 +1,3164 @@
+# Copyright (c) 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# HOW TO WRITE CONDITIONALS IN THIS FILE
+# ======================================
+#
+# In many other places, one would write a conditional that expresses all the
+# cases when a source file is used or unused, and then either add or subtract
+# it from the sources list in that case
+#
+# Since base includes so many low-level things that vary widely and
+# unpredictably for the various build types, we prefer a slightly different
+# style. Instead, there are big per-platform blocks of inclusions and
+# exclusions. If a given file has an inclusion or exclusion rule that applies
+# for multiple conditions, prefer to duplicate it in both lists. This makes it
+# a bit easier to see which files apply in which cases rather than having a
+# huge sequence of random-looking conditionals.
+
+import("//build/buildflag_header.gni")
+import("//build/config/allocator.gni")
+import("//build/config/arm.gni")
+import("//build/config/c++/c++.gni")
+import("//build/config/chromecast_build.gni")
+import("//build/config/compiler/compiler.gni")
+import("//build/config/dcheck_always_on.gni")
+import("//build/config/jumbo.gni")
+import("//build/config/nacl/config.gni")
+import("//build/config/sysroot.gni")
+import("//build/config/ui.gni")
+import("//build/nocompile.gni")
+import("//testing/libfuzzer/fuzzer_test.gni")
+import("//testing/test.gni")
+
+declare_args() {
+  # Override this value to give a specific build date.
+  # See //base/build_time.cc and //build/write_build_date_header.py for more
+  # details and the expected format.
+  override_build_date = "N/A"
+
+  # Indicates if the Location object contains the source code information
+  # (file, function, line). False means only the program counter (and currently
+  # file name) is saved.
+  enable_location_source = true
+
+  # Unsafe developer build. Has developer-friendly features that may weaken or
+  # disable security measures like sandboxing or ASLR.
+  # IMPORTANT: Unsafe developer builds should never be distributed to end users.
+  is_unsafe_developer_build = !is_official_build
+
+  # Set to true to disable COM init check hooks.
+  com_init_check_hook_disabled = false
+
+  # Set to true to enable mutex priority inheritance. See the comments in
+  # LockImpl::PriorityInheritanceAvailable() in lock_impl_posix.cc for the
+  # platform requirements to safely enable priority inheritance.
+  enable_mutex_priority_inheritance = false
+}
+
+# Determines whether libevent should be dep.
+dep_libevent = !is_fuchsia && !is_win && !(is_nacl && !is_nacl_nonsfi)
+
+# Determines whether message_pump_libevent should be used.
+use_libevent = dep_libevent && !is_ios
+
+if (is_android) {
+  import("//build/config/android/rules.gni")
+}
+
+if (is_fuchsia) {
+  import("//third_party/fuchsia-sdk/fidl_library.gni")
+}
+
+config("base_flags") {
+  if (is_clang) {
+    cflags = [
+      # Don't die on dtoa code that uses a char as an array index.
+      # This is required solely for base/third_party/dmg_fp/dtoa_wrapper.cc.
+      "-Wno-char-subscripts",
+
+      # Ideally all product code (but no test code) in chrome would have these
+      # flags. But this isn't trivial so start with //base as a minimum
+      # requirement.
+      # https://groups.google.com/a/chromium.org/d/topic/chromium-dev/B9Q5KTD7iCo/discussion
+      "-Wglobal-constructors",
+      "-Wexit-time-destructors",
+    ]
+  }
+}
+
+config("base_implementation") {
+  defines = [ "BASE_IMPLEMENTATION" ]
+  configs = [ "//build/config/compiler:wexit_time_destructors" ]
+}
+
+if (is_win) {
+  # This is in a separate config so the flags can be applied to dependents.
+  # ldflags in GN aren't automatically inherited.
+  config("base_win_linker_flags") {
+    ldflags = [
+      "/DELAYLOAD:cfgmgr32.dll",
+      "/DELAYLOAD:powrprof.dll",
+      "/DELAYLOAD:setupapi.dll",
+    ]
+  }
+}
+
+if (is_nacl_nonsfi) {
+  # Must be in a config because of how GN orders flags (otherwise -Wall will
+  # appear after this, and turn it back on).
+  config("nacl_nonsfi_warnings") {
+    # file_util_posix.cc contains a function which is not
+    # being used by nacl_helper_nonsfi.
+    cflags = [ "-Wno-unused-function" ]
+  }
+}
+
+if (is_android) {
+  config("android_system_libs") {
+    libs = [ "log" ]  # Used by logging.cc.
+  }
+}
+
+# Base and everything it depends on should be a static library rather than
+# a source set. Base is more of a "library" in the classic sense in that many
+# small parts of it are used in many different contexts. This combined with a
+# few static initializers floating around means that dead code stripping
+# still leaves a lot of code behind that isn't always used. For example, this
+# saves more than 40K for a smaller target like chrome_elf.
+#
+# Use static libraries for the helper stuff as well like //base/debug since
+# those things refer back to base code, which will force base compilation units
+# to be linked in where they wouldn't have otherwise. This does not include
+# test code (test support and anything in the test directory) which should use
+# source_set as is recommended for GN targets).
+jumbo_component("base") {
+  if (is_nacl_nonsfi) {
+    # TODO(phosek) bug 570839: If field_trial.cc is in a static library,
+    # nacl_helper_nonsfi doesn't link properly on Linux in debug builds. The
+    # reasons for this seem to involve obscure toolchain bugs. This should be
+    # fixed and this target should always be a static_library in the
+    # non-component case.
+    static_component_type = "source_set"
+  }
+  if (is_nacl || is_ios) {
+    # Link errors related to malloc functions if libbase for nacl is
+    # compiled with jumbo: https://crbug.com/775959.
+    # Same for ios: https://crbug.com/776313.
+    never_build_jumbo = true
+  }
+
+  sources = [
+    "allocator/allocator_check.cc",
+    "allocator/allocator_check.h",
+    "allocator/allocator_extension.cc",
+    "allocator/allocator_extension.h",
+    "allocator/allocator_interception_mac.h",
+    "allocator/allocator_interception_mac.mm",
+    "allocator/allocator_shim.h",
+    "allocator/malloc_zone_functions_mac.cc",
+    "allocator/malloc_zone_functions_mac.h",
+    "android/android_hardware_buffer_abi.h",
+    "android/android_hardware_buffer_compat.cc",
+    "android/android_hardware_buffer_compat.h",
+    "android/animation_frame_time_histogram.cc",
+    "android/apk_assets.cc",
+    "android/apk_assets.h",
+    "android/application_status_listener.cc",
+    "android/application_status_listener.h",
+    "android/base_jni_onload.cc",
+    "android/base_jni_onload.h",
+    "android/build_info.cc",
+    "android/build_info.h",
+    "android/callback_android.cc",
+    "android/callback_android.h",
+    "android/child_process_service.cc",
+    "android/command_line_android.cc",
+    "android/content_uri_utils.cc",
+    "android/content_uri_utils.h",
+    "android/cpu_features.cc",
+    "android/early_trace_event_binding.cc",
+    "android/event_log.cc",
+    "android/event_log.h",
+    "android/field_trial_list.cc",
+    "android/important_file_writer_android.cc",
+    "android/java_exception_reporter.cc",
+    "android/java_exception_reporter.h",
+    "android/java_handler_thread.cc",
+    "android/java_handler_thread.h",
+    "android/java_runtime.cc",
+    "android/java_runtime.h",
+    "android/jni_android.cc",
+    "android/jni_android.h",
+    "android/jni_array.cc",
+    "android/jni_array.h",
+    "android/jni_generator/jni_generator_helper.h",
+    "android/jni_int_wrapper.h",
+    "android/jni_registrar.cc",
+    "android/jni_registrar.h",
+    "android/jni_string.cc",
+    "android/jni_string.h",
+    "android/jni_utils.cc",
+    "android/jni_utils.h",
+    "android/jni_weak_ref.cc",
+    "android/jni_weak_ref.h",
+    "android/library_loader/anchor_functions.cc",
+    "android/library_loader/anchor_functions.h",
+    "android/library_loader/library_load_from_apk_status_codes.h",
+    "android/library_loader/library_loader_hooks.cc",
+    "android/library_loader/library_loader_hooks.h",
+    "android/library_loader/library_prefetcher.cc",
+    "android/library_loader/library_prefetcher.h",
+    "android/locale_utils.cc",
+    "android/locale_utils.h",
+    "android/memory_pressure_listener_android.cc",
+    "android/memory_pressure_listener_android.h",
+    "android/path_service_android.cc",
+    "android/path_utils.cc",
+    "android/path_utils.h",
+    "android/record_histogram.cc",
+    "android/record_user_action.cc",
+    "android/scoped_hardware_buffer_handle.cc",
+    "android/scoped_hardware_buffer_handle.h",
+    "android/scoped_java_ref.cc",
+    "android/scoped_java_ref.h",
+    "android/statistics_recorder_android.cc",
+    "android/sys_utils.cc",
+    "android/sys_utils.h",
+    "android/throw_uncaught_exception.cc",
+    "android/throw_uncaught_exception.h",
+    "android/time_utils.cc",
+    "android/timezone_utils.cc",
+    "android/timezone_utils.h",
+    "android/trace_event_binding.cc",
+    "android/unguessable_token_android.cc",
+    "android/unguessable_token_android.h",
+    "at_exit.cc",
+    "at_exit.h",
+    "atomic_ref_count.h",
+    "atomic_sequence_num.h",
+    "atomicops.h",
+    "atomicops_internals_atomicword_compat.h",
+    "atomicops_internals_portable.h",
+    "atomicops_internals_x86_msvc.h",
+    "auto_reset.h",
+    "barrier_closure.cc",
+    "barrier_closure.h",
+    "base64.cc",
+    "base64.h",
+    "base64url.cc",
+    "base64url.h",
+    "base_export.h",
+    "base_switches.h",
+    "big_endian.cc",
+    "big_endian.h",
+    "bind.h",
+    "bind_helpers.h",
+    "bind_internal.h",
+    "bit_cast.h",
+    "bits.h",
+    "build_time.cc",
+    "build_time.h",
+    "callback.h",
+    "callback_forward.h",
+    "callback_helpers.cc",
+    "callback_helpers.h",
+    "callback_internal.cc",
+    "callback_internal.h",
+    "callback_list.h",
+    "cancelable_callback.h",
+    "command_line.cc",
+    "command_line.h",
+    "compiler_specific.h",
+    "component_export.h",
+    "containers/adapters.h",
+    "containers/circular_deque.h",
+    "containers/flat_map.h",
+    "containers/flat_set.h",
+    "containers/flat_tree.h",
+    "containers/hash_tables.h",
+    "containers/id_map.h",
+    "containers/linked_list.h",
+    "containers/mru_cache.h",
+    "containers/small_map.h",
+    "containers/span.h",
+    "containers/stack.h",
+    "containers/stack_container.h",
+    "containers/unique_ptr_adapters.h",
+    "containers/vector_buffer.h",
+    "cpu.cc",
+    "cpu.h",
+    "critical_closure.h",
+    "critical_closure_internal_ios.mm",
+
+    # This file depends on files from the "debug/allocator" target,
+    # but this target does not depend on "debug/allocator".
+    "debug/activity_analyzer.cc",
+    "debug/activity_analyzer.h",
+    "debug/activity_tracker.cc",
+    "debug/activity_tracker.h",
+    "debug/alias.cc",
+    "debug/alias.h",
+    "debug/asan_invalid_access.cc",
+    "debug/asan_invalid_access.h",
+    "debug/close_handle_hook_win.cc",
+    "debug/close_handle_hook_win.h",
+    "debug/crash_logging.cc",
+    "debug/crash_logging.h",
+    "debug/debugger.cc",
+    "debug/debugger.h",
+    "debug/debugger_win.cc",
+    "debug/dump_without_crashing.cc",
+    "debug/dump_without_crashing.h",
+    "debug/gdi_debug_util_win.cc",
+    "debug/gdi_debug_util_win.h",
+    "debug/leak_annotations.h",
+    "debug/leak_tracker.h",
+    "debug/proc_maps_linux.cc",
+    "debug/proc_maps_linux.h",
+    "debug/profiler.cc",
+    "debug/profiler.h",
+    "debug/stack_trace.cc",
+    "debug/stack_trace.h",
+    "debug/stack_trace_android.cc",
+    "debug/stack_trace_win.cc",
+    "debug/task_annotator.cc",
+    "debug/task_annotator.h",
+    "debug/thread_heap_usage_tracker.cc",
+    "debug/thread_heap_usage_tracker.h",
+    "deferred_sequenced_task_runner.cc",
+    "deferred_sequenced_task_runner.h",
+    "environment.cc",
+    "environment.h",
+    "export_template.h",
+    "feature_list.cc",
+    "feature_list.h",
+    "file_descriptor_store.cc",
+    "file_descriptor_store.h",
+    "file_version_info.h",
+    "file_version_info_mac.h",
+    "file_version_info_mac.mm",
+    "file_version_info_win.cc",
+    "file_version_info_win.h",
+    "files/dir_reader_fallback.h",
+    "files/dir_reader_linux.h",
+    "files/file.cc",
+    "files/file.h",
+    "files/file_enumerator.cc",
+    "files/file_enumerator.h",
+    "files/file_enumerator_win.cc",
+    "files/file_path.cc",
+    "files/file_path.h",
+    "files/file_path_constants.cc",
+    "files/file_path_watcher.cc",
+    "files/file_path_watcher.h",
+    "files/file_path_watcher_fsevents.cc",
+    "files/file_path_watcher_fsevents.h",
+    "files/file_path_watcher_kqueue.cc",
+    "files/file_path_watcher_kqueue.h",
+    "files/file_path_watcher_linux.cc",
+    "files/file_path_watcher_mac.cc",
+    "files/file_path_watcher_win.cc",
+    "files/file_proxy.cc",
+    "files/file_proxy.h",
+    "files/file_tracing.cc",
+    "files/file_tracing.h",
+    "files/file_util.cc",
+    "files/file_util.h",
+    "files/file_util_android.cc",
+    "files/file_util_linux.cc",
+    "files/file_util_mac.mm",
+    "files/file_util_win.cc",
+    "files/file_win.cc",
+    "files/important_file_writer.cc",
+    "files/important_file_writer.h",
+    "files/memory_mapped_file.cc",
+    "files/memory_mapped_file.h",
+    "files/memory_mapped_file_win.cc",
+    "files/platform_file.h",
+    "files/scoped_file.cc",
+    "files/scoped_file.h",
+    "files/scoped_temp_dir.cc",
+    "files/scoped_temp_dir.h",
+    "format_macros.h",
+    "gtest_prod_util.h",
+    "guid.cc",
+    "guid.h",
+    "hash.cc",
+    "hash.h",
+    "ios/block_types.h",
+    "ios/crb_protocol_observers.h",
+    "ios/crb_protocol_observers.mm",
+    "ios/device_util.h",
+    "ios/device_util.mm",
+    "ios/ios_util.h",
+    "ios/ios_util.mm",
+    "ios/ns_error_util.h",
+    "ios/ns_error_util.mm",
+    "ios/scoped_critical_action.h",
+    "ios/scoped_critical_action.mm",
+    "ios/weak_nsobject.h",
+    "ios/weak_nsobject.mm",
+    "json/json_file_value_serializer.cc",
+    "json/json_file_value_serializer.h",
+    "json/json_parser.cc",
+    "json/json_parser.h",
+    "json/json_reader.cc",
+    "json/json_reader.h",
+    "json/json_string_value_serializer.cc",
+    "json/json_string_value_serializer.h",
+    "json/json_value_converter.cc",
+    "json/json_value_converter.h",
+    "json/json_writer.cc",
+    "json/json_writer.h",
+    "json/string_escape.cc",
+    "json/string_escape.h",
+    "lazy_instance.h",
+    "lazy_instance_helpers.cc",
+    "lazy_instance_helpers.h",
+    "linux_util.cc",
+    "linux_util.h",
+    "location.cc",
+    "location.h",
+    "logging.cc",
+    "logging.h",
+    "logging_win.cc",
+    "logging_win.h",
+    "mac/authorization_util.h",
+    "mac/authorization_util.mm",
+    "mac/availability.h",
+    "mac/bind_objc_block.h",
+    "mac/bundle_locations.h",
+    "mac/bundle_locations.mm",
+    "mac/call_with_eh_frame.cc",
+    "mac/call_with_eh_frame.h",
+    "mac/call_with_eh_frame_asm.S",
+    "mac/close_nocancel.cc",
+    "mac/dispatch_source_mach.cc",
+    "mac/dispatch_source_mach.h",
+    "mac/foundation_util.h",
+    "mac/foundation_util.mm",
+    "mac/launch_services_util.h",
+    "mac/launch_services_util.mm",
+    "mac/launchd.cc",
+    "mac/launchd.h",
+    "mac/mac_logging.h",
+    "mac/mac_logging.mm",
+    "mac/mac_util.h",
+    "mac/mac_util.mm",
+    "mac/mach_logging.cc",
+    "mac/mach_logging.h",
+    "mac/mach_port_broker.h",
+    "mac/mach_port_broker.mm",
+    "mac/mach_port_util.cc",
+    "mac/mach_port_util.h",
+    "mac/objc_release_properties.h",
+    "mac/objc_release_properties.mm",
+    "mac/os_crash_dumps.cc",
+    "mac/os_crash_dumps.h",
+    "mac/scoped_aedesc.h",
+    "mac/scoped_authorizationref.h",
+    "mac/scoped_block.h",
+    "mac/scoped_cffiledescriptorref.h",
+    "mac/scoped_cftyperef.h",
+    "mac/scoped_dispatch_object.h",
+    "mac/scoped_ionotificationportref.h",
+    "mac/scoped_ioobject.h",
+    "mac/scoped_ioplugininterface.h",
+    "mac/scoped_launch_data.h",
+    "mac/scoped_mach_port.cc",
+    "mac/scoped_mach_port.h",
+    "mac/scoped_mach_vm.cc",
+    "mac/scoped_mach_vm.h",
+    "mac/scoped_nsautorelease_pool.h",
+    "mac/scoped_nsautorelease_pool.mm",
+    "mac/scoped_nsobject.h",
+    "mac/scoped_nsobject.mm",
+    "mac/scoped_objc_class_swizzler.h",
+    "mac/scoped_objc_class_swizzler.mm",
+    "mac/scoped_sending_event.h",
+    "mac/scoped_sending_event.mm",
+    "mac/sdk_forward_declarations.h",
+    "mac/sdk_forward_declarations.mm",
+    "macros.h",
+    "md5.cc",
+    "md5.h",
+    "memory/aligned_memory.cc",
+    "memory/aligned_memory.h",
+    "memory/discardable_memory.cc",
+    "memory/discardable_memory.h",
+    "memory/discardable_memory_allocator.cc",
+    "memory/discardable_memory_allocator.h",
+    "memory/discardable_shared_memory.cc",
+    "memory/discardable_shared_memory.h",
+    "memory/free_deleter.h",
+    "memory/linked_ptr.h",
+    "memory/memory_coordinator_client.cc",
+    "memory/memory_coordinator_client.h",
+    "memory/memory_coordinator_client_registry.cc",
+    "memory/memory_coordinator_client_registry.h",
+    "memory/memory_coordinator_proxy.cc",
+    "memory/memory_coordinator_proxy.h",
+    "memory/memory_pressure_listener.cc",
+    "memory/memory_pressure_listener.h",
+    "memory/memory_pressure_monitor.cc",
+    "memory/memory_pressure_monitor.h",
+    "memory/memory_pressure_monitor_chromeos.cc",
+    "memory/memory_pressure_monitor_chromeos.h",
+    "memory/memory_pressure_monitor_mac.cc",
+    "memory/memory_pressure_monitor_mac.h",
+    "memory/memory_pressure_monitor_win.cc",
+    "memory/memory_pressure_monitor_win.h",
+    "memory/platform_shared_memory_region.cc",
+    "memory/platform_shared_memory_region.h",
+    "memory/protected_memory.cc",
+    "memory/protected_memory.h",
+    "memory/protected_memory_cfi.h",
+    "memory/protected_memory_win.cc",
+    "memory/ptr_util.h",
+    "memory/raw_scoped_refptr_mismatch_checker.h",
+    "memory/read_only_shared_memory_region.cc",
+    "memory/read_only_shared_memory_region.h",
+    "memory/ref_counted.cc",
+    "memory/ref_counted.h",
+    "memory/ref_counted_delete_on_sequence.h",
+    "memory/ref_counted_memory.cc",
+    "memory/ref_counted_memory.h",
+    "memory/scoped_policy.h",
+    "memory/scoped_refptr.h",
+    "memory/shared_memory.h",
+    "memory/shared_memory_handle.cc",
+    "memory/shared_memory_handle.h",
+    "memory/shared_memory_helper.cc",
+    "memory/shared_memory_helper.h",
+    "memory/shared_memory_mapping.cc",
+    "memory/shared_memory_mapping.h",
+    "memory/shared_memory_tracker.cc",
+    "memory/shared_memory_tracker.h",
+    "memory/singleton.h",
+    "memory/unsafe_shared_memory_region.cc",
+    "memory/unsafe_shared_memory_region.h",
+    "memory/weak_ptr.cc",
+    "memory/weak_ptr.h",
+    "memory/writable_shared_memory_region.cc",
+    "memory/writable_shared_memory_region.h",
+    "message_loop/incoming_task_queue.cc",
+    "message_loop/incoming_task_queue.h",
+    "message_loop/message_loop.cc",
+    "message_loop/message_loop.h",
+    "message_loop/message_loop_current.cc",
+    "message_loop/message_loop_current.h",
+    "message_loop/message_loop_task_runner.cc",
+    "message_loop/message_loop_task_runner.h",
+    "message_loop/message_pump.cc",
+    "message_loop/message_pump.h",
+    "message_loop/message_pump_android.cc",
+    "message_loop/message_pump_android.h",
+    "message_loop/message_pump_default.cc",
+    "message_loop/message_pump_default.h",
+    "message_loop/message_pump_for_io.h",
+    "message_loop/message_pump_for_ui.h",
+    "message_loop/message_pump_glib.cc",
+    "message_loop/message_pump_glib.h",
+    "message_loop/message_pump_io_ios.cc",
+    "message_loop/message_pump_io_ios.h",
+    "message_loop/message_pump_mac.h",
+    "message_loop/message_pump_mac.mm",
+    "message_loop/message_pump_win.cc",
+    "message_loop/message_pump_win.h",
+    "message_loop/timer_slack.h",
+    "metrics/bucket_ranges.cc",
+    "metrics/bucket_ranges.h",
+    "metrics/dummy_histogram.cc",
+    "metrics/dummy_histogram.h",
+    "metrics/field_trial.cc",
+    "metrics/field_trial.h",
+    "metrics/field_trial_param_associator.cc",
+    "metrics/field_trial_param_associator.h",
+    "metrics/field_trial_params.cc",
+    "metrics/field_trial_params.h",
+    "metrics/histogram.cc",
+    "metrics/histogram.h",
+    "metrics/histogram_base.cc",
+    "metrics/histogram_base.h",
+    "metrics/histogram_delta_serialization.cc",
+    "metrics/histogram_delta_serialization.h",
+    "metrics/histogram_flattener.h",
+    "metrics/histogram_functions.cc",
+    "metrics/histogram_functions.h",
+    "metrics/histogram_macros.h",
+    "metrics/histogram_macros_internal.h",
+    "metrics/histogram_macros_local.h",
+    "metrics/histogram_samples.cc",
+    "metrics/histogram_samples.h",
+    "metrics/histogram_snapshot_manager.cc",
+    "metrics/histogram_snapshot_manager.h",
+    "metrics/metrics_hashes.cc",
+    "metrics/metrics_hashes.h",
+    "metrics/persistent_histogram_allocator.cc",
+    "metrics/persistent_histogram_allocator.h",
+    "metrics/persistent_memory_allocator.cc",
+    "metrics/persistent_memory_allocator.h",
+    "metrics/persistent_sample_map.cc",
+    "metrics/persistent_sample_map.h",
+    "metrics/record_histogram_checker.h",
+    "metrics/sample_map.cc",
+    "metrics/sample_map.h",
+    "metrics/sample_vector.cc",
+    "metrics/sample_vector.h",
+    "metrics/single_sample_metrics.cc",
+    "metrics/single_sample_metrics.h",
+    "metrics/sparse_histogram.cc",
+    "metrics/sparse_histogram.h",
+    "metrics/statistics_recorder.cc",
+    "metrics/statistics_recorder.h",
+    "metrics/user_metrics.cc",
+    "metrics/user_metrics.h",
+    "metrics/user_metrics_action.h",
+    "native_library.cc",
+    "native_library.h",
+    "native_library_ios.mm",
+    "native_library_mac.mm",
+    "native_library_win.cc",
+    "nix/mime_util_xdg.cc",
+    "nix/mime_util_xdg.h",
+    "nix/xdg_util.cc",
+    "nix/xdg_util.h",
+    "no_destructor.h",
+    "observer_list.h",
+    "observer_list_threadsafe.cc",
+    "observer_list_threadsafe.h",
+    "optional.h",
+    "os_compat_android.cc",
+    "os_compat_android.h",
+    "os_compat_nacl.cc",
+    "os_compat_nacl.h",
+    "path_service.cc",
+    "path_service.h",
+    "pending_task.cc",
+    "pending_task.h",
+    "pickle.cc",
+    "pickle.h",
+    "post_task_and_reply_with_result_internal.h",
+    "power_monitor/power_monitor.cc",
+    "power_monitor/power_monitor.h",
+    "power_monitor/power_monitor_device_source.cc",
+    "power_monitor/power_monitor_device_source.h",
+    "power_monitor/power_monitor_source.cc",
+    "power_monitor/power_monitor_source.h",
+    "power_monitor/power_observer.h",
+    "process/internal_linux.cc",
+    "process/internal_linux.h",
+    "process/kill.cc",
+    "process/kill.h",
+    "process/kill_mac.cc",
+    "process/kill_win.cc",
+    "process/launch.cc",
+    "process/launch.h",
+    "process/launch_ios.cc",
+    "process/launch_mac.cc",
+    "process/launch_win.cc",
+    "process/memory.cc",
+    "process/memory.h",
+    "process/memory_linux.cc",
+    "process/memory_mac.mm",
+    "process/memory_win.cc",
+    "process/port_provider_mac.cc",
+    "process/port_provider_mac.h",
+    "process/process.h",
+    "process/process_handle.cc",
+    "process/process_handle.h",
+
+    #"process/process_handle_freebsd.cc",  # Unused in Chromium build.
+    "process/process_handle_linux.cc",
+    "process/process_handle_mac.cc",
+
+    #"process/process_handle_openbsd.cc",  # Unused in Chromium build.
+    "process/process_handle_win.cc",
+    "process/process_info.h",
+    "process/process_info_linux.cc",
+    "process/process_info_mac.cc",
+    "process/process_info_win.cc",
+    "process/process_iterator.cc",
+    "process/process_iterator.h",
+
+    #"process/process_iterator_freebsd.cc",  # Unused in Chromium build.
+    "process/process_iterator_linux.cc",
+    "process/process_iterator_mac.cc",
+
+    #"process/process_iterator_openbsd.cc",  # Unused in Chromium build.
+    "process/process_iterator_win.cc",
+    "process/process_linux.cc",
+    "process/process_mac.cc",
+    "process/process_metrics.cc",
+    "process/process_metrics.h",
+
+    #"process/process_metrics_freebsd.cc",  # Unused in Chromium build.
+    "process/process_metrics_ios.cc",
+    "process/process_metrics_linux.cc",
+    "process/process_metrics_mac.cc",
+
+    #"process/process_metrics_openbsd.cc",  # Unused in Chromium build.
+    "process/process_metrics_win.cc",
+    "process/process_win.cc",
+    "profiler/native_stack_sampler.cc",
+    "profiler/native_stack_sampler.h",
+    "profiler/native_stack_sampler_mac.cc",
+    "profiler/native_stack_sampler_win.cc",
+    "profiler/stack_sampling_profiler.cc",
+    "profiler/stack_sampling_profiler.h",
+    "rand_util.cc",
+    "rand_util.h",
+    "rand_util_nacl.cc",
+    "rand_util_win.cc",
+    "run_loop.cc",
+    "run_loop.h",
+    "sampling_heap_profiler/sampling_heap_profiler.cc",
+    "sampling_heap_profiler/sampling_heap_profiler.h",
+    "scoped_clear_errno.h",
+    "scoped_generic.h",
+    "scoped_native_library.cc",
+    "scoped_native_library.h",
+    "scoped_observer.h",
+    "sequence_checker.h",
+    "sequence_checker_impl.cc",
+    "sequence_checker_impl.h",
+    "sequence_token.cc",
+    "sequence_token.h",
+    "sequenced_task_runner.cc",
+    "sequenced_task_runner.h",
+    "sequenced_task_runner_helpers.h",
+    "sha1.cc",
+    "sha1.h",
+    "single_thread_task_runner.h",
+    "stl_util.h",
+    "strings/char_traits.h",
+    "strings/latin1_string_conversions.cc",
+    "strings/latin1_string_conversions.h",
+    "strings/nullable_string16.cc",
+    "strings/nullable_string16.h",
+    "strings/pattern.cc",
+    "strings/pattern.h",
+    "strings/safe_sprintf.cc",
+    "strings/safe_sprintf.h",
+    "strings/strcat.cc",
+    "strings/strcat.h",
+    "strings/string16.cc",
+    "strings/string16.h",
+    "strings/string_number_conversions.cc",
+    "strings/string_number_conversions.h",
+    "strings/string_piece.cc",
+    "strings/string_piece.h",
+    "strings/string_piece_forward.h",
+    "strings/string_split.cc",
+    "strings/string_split.h",
+    "strings/string_tokenizer.h",
+    "strings/string_util.cc",
+    "strings/string_util.h",
+    "strings/string_util_constants.cc",
+    "strings/string_util_win.h",
+    "strings/stringize_macros.h",
+    "strings/stringprintf.cc",
+    "strings/stringprintf.h",
+    "strings/sys_string_conversions.h",
+    "strings/sys_string_conversions_mac.mm",
+    "strings/sys_string_conversions_win.cc",
+    "strings/utf_offset_string_conversions.cc",
+    "strings/utf_offset_string_conversions.h",
+    "strings/utf_string_conversion_utils.cc",
+    "strings/utf_string_conversion_utils.h",
+    "strings/utf_string_conversions.cc",
+    "strings/utf_string_conversions.h",
+    "supports_user_data.cc",
+    "supports_user_data.h",
+    "sync_socket.h",
+    "sync_socket_win.cc",
+    "synchronization/atomic_flag.cc",
+    "synchronization/atomic_flag.h",
+    "synchronization/cancellation_flag.h",
+    "synchronization/condition_variable.h",
+    "synchronization/condition_variable_win.cc",
+    "synchronization/lock.cc",
+    "synchronization/lock.h",
+    "synchronization/lock_impl.h",
+    "synchronization/lock_impl_win.cc",
+    "synchronization/spin_wait.h",
+    "synchronization/waitable_event.h",
+    "synchronization/waitable_event_mac.cc",
+    "synchronization/waitable_event_watcher.h",
+    "synchronization/waitable_event_watcher_mac.cc",
+    "synchronization/waitable_event_watcher_win.cc",
+    "synchronization/waitable_event_win.cc",
+    "sys_byteorder.h",
+    "sys_info.cc",
+    "sys_info.h",
+    "sys_info_android.cc",
+    "sys_info_chromeos.cc",
+    "sys_info_internal.h",
+    "syslog_logging.cc",
+    "syslog_logging.h",
+
+    #"sys_info_freebsd.cc",  # Unused in Chromium build.
+    "sys_info_ios.mm",
+    "sys_info_linux.cc",
+    "sys_info_mac.mm",
+
+    #"sys_info_openbsd.cc",  # Unused in Chromium build.
+    "sys_info_win.cc",
+    "system_monitor/system_monitor.cc",
+    "system_monitor/system_monitor.h",
+    "task/cancelable_task_tracker.cc",
+    "task/cancelable_task_tracker.h",
+    "task_runner.cc",
+    "task_runner.h",
+    "task_runner_util.h",
+    "task_scheduler/can_schedule_sequence_observer.h",
+    "task_scheduler/delayed_task_manager.cc",
+    "task_scheduler/delayed_task_manager.h",
+    "task_scheduler/environment_config.cc",
+    "task_scheduler/environment_config.h",
+    "task_scheduler/initialization_util.cc",
+    "task_scheduler/initialization_util.h",
+    "task_scheduler/lazy_task_runner.cc",
+    "task_scheduler/lazy_task_runner.h",
+    "task_scheduler/platform_native_worker_pool_win.cc",
+    "task_scheduler/platform_native_worker_pool_win.h",
+    "task_scheduler/post_task.cc",
+    "task_scheduler/post_task.h",
+    "task_scheduler/priority_queue.cc",
+    "task_scheduler/priority_queue.h",
+    "task_scheduler/scheduler_lock.h",
+    "task_scheduler/scheduler_lock_impl.cc",
+    "task_scheduler/scheduler_lock_impl.h",
+    "task_scheduler/scheduler_single_thread_task_runner_manager.cc",
+    "task_scheduler/scheduler_single_thread_task_runner_manager.h",
+    "task_scheduler/scheduler_worker.cc",
+    "task_scheduler/scheduler_worker.h",
+    "task_scheduler/scheduler_worker_observer.h",
+    "task_scheduler/scheduler_worker_params.h",
+    "task_scheduler/scheduler_worker_pool.cc",
+    "task_scheduler/scheduler_worker_pool.h",
+    "task_scheduler/scheduler_worker_pool_impl.cc",
+    "task_scheduler/scheduler_worker_pool_impl.h",
+    "task_scheduler/scheduler_worker_pool_params.cc",
+    "task_scheduler/scheduler_worker_pool_params.h",
+    "task_scheduler/scheduler_worker_stack.cc",
+    "task_scheduler/scheduler_worker_stack.h",
+    "task_scheduler/scoped_set_task_priority_for_current_thread.cc",
+    "task_scheduler/scoped_set_task_priority_for_current_thread.h",
+    "task_scheduler/sequence.cc",
+    "task_scheduler/sequence.h",
+    "task_scheduler/sequence_sort_key.cc",
+    "task_scheduler/sequence_sort_key.h",
+    "task_scheduler/service_thread.cc",
+    "task_scheduler/service_thread.h",
+    "task_scheduler/single_thread_task_runner_thread_mode.h",
+    "task_scheduler/task.cc",
+    "task_scheduler/task.h",
+    "task_scheduler/task_scheduler.cc",
+    "task_scheduler/task_scheduler.h",
+    "task_scheduler/task_scheduler_impl.cc",
+    "task_scheduler/task_scheduler_impl.h",
+    "task_scheduler/task_tracker.cc",
+    "task_scheduler/task_tracker.h",
+    "task_scheduler/task_traits.cc",
+    "task_scheduler/task_traits.h",
+    "task_scheduler/task_traits_details.h",
+    "task_scheduler/tracked_ref.h",
+    "template_util.h",
+    "test/malloc_wrapper.h",
+    "third_party/dmg_fp/dmg_fp.h",
+    "third_party/dmg_fp/dtoa_wrapper.cc",
+    "third_party/dmg_fp/g_fmt.cc",
+    "third_party/icu/icu_utf.cc",
+    "third_party/icu/icu_utf.h",
+    "third_party/nspr/prtime.cc",
+    "third_party/nspr/prtime.h",
+    "third_party/superfasthash/superfasthash.c",
+    "thread_annotations.h",
+    "threading/platform_thread.h",
+    "threading/platform_thread_android.cc",
+    "threading/platform_thread_linux.cc",
+    "threading/platform_thread_mac.mm",
+    "threading/platform_thread_win.cc",
+    "threading/post_task_and_reply_impl.cc",
+    "threading/post_task_and_reply_impl.h",
+    "threading/scoped_blocking_call.cc",
+    "threading/scoped_blocking_call.h",
+    "threading/sequence_local_storage_map.cc",
+    "threading/sequence_local_storage_map.h",
+    "threading/sequence_local_storage_slot.cc",
+    "threading/sequence_local_storage_slot.h",
+    "threading/sequenced_task_runner_handle.cc",
+    "threading/sequenced_task_runner_handle.h",
+    "threading/simple_thread.cc",
+    "threading/simple_thread.h",
+    "threading/thread.cc",
+    "threading/thread.h",
+    "threading/thread_checker.h",
+    "threading/thread_checker_impl.cc",
+    "threading/thread_checker_impl.h",
+    "threading/thread_collision_warner.cc",
+    "threading/thread_collision_warner.h",
+    "threading/thread_id_name_manager.cc",
+    "threading/thread_id_name_manager.h",
+    "threading/thread_local.h",
+    "threading/thread_local_storage.cc",
+    "threading/thread_local_storage.h",
+    "threading/thread_local_storage_win.cc",
+    "threading/thread_restrictions.cc",
+    "threading/thread_restrictions.h",
+    "threading/thread_task_runner_handle.cc",
+    "threading/thread_task_runner_handle.h",
+    "threading/watchdog.cc",
+    "threading/watchdog.h",
+    "time/clock.cc",
+    "time/clock.h",
+    "time/default_clock.cc",
+    "time/default_clock.h",
+    "time/default_tick_clock.cc",
+    "time/default_tick_clock.h",
+    "time/tick_clock.cc",
+    "time/tick_clock.h",
+    "time/time.cc",
+    "time/time.h",
+    "time/time_override.cc",
+    "time/time_override.h",
+    "time/time_to_iso8601.cc",
+    "time/time_to_iso8601.h",
+    "timer/elapsed_timer.cc",
+    "timer/elapsed_timer.h",
+    "timer/hi_res_timer_manager.h",
+    "timer/hi_res_timer_manager_win.cc",
+    "timer/mock_timer.cc",
+    "timer/mock_timer.h",
+    "timer/timer.cc",
+    "timer/timer.h",
+    "trace_event/auto_open_close_event.cc",
+    "trace_event/auto_open_close_event.h",
+    "trace_event/blame_context.cc",
+    "trace_event/blame_context.h",
+    "trace_event/category_registry.cc",
+    "trace_event/category_registry.h",
+    "trace_event/common/trace_event_common.h",
+    "trace_event/event_name_filter.cc",
+    "trace_event/event_name_filter.h",
+    "trace_event/heap_profiler.h",
+    "trace_event/heap_profiler_allocation_context.cc",
+    "trace_event/heap_profiler_allocation_context.h",
+    "trace_event/heap_profiler_allocation_context_tracker.cc",
+    "trace_event/heap_profiler_allocation_context_tracker.h",
+    "trace_event/heap_profiler_event_filter.cc",
+    "trace_event/heap_profiler_event_filter.h",
+    "trace_event/heap_profiler_heap_dump_writer.cc",
+    "trace_event/heap_profiler_heap_dump_writer.h",
+    "trace_event/heap_profiler_serialization_state.cc",
+    "trace_event/heap_profiler_serialization_state.h",
+    "trace_event/heap_profiler_stack_frame_deduplicator.cc",
+    "trace_event/heap_profiler_stack_frame_deduplicator.h",
+    "trace_event/heap_profiler_type_name_deduplicator.cc",
+    "trace_event/heap_profiler_type_name_deduplicator.h",
+    "trace_event/java_heap_dump_provider_android.cc",
+    "trace_event/java_heap_dump_provider_android.h",
+    "trace_event/malloc_dump_provider.cc",
+    "trace_event/malloc_dump_provider.h",
+    "trace_event/memory_allocator_dump.cc",
+    "trace_event/memory_allocator_dump.h",
+    "trace_event/memory_allocator_dump_guid.cc",
+    "trace_event/memory_allocator_dump_guid.h",
+    "trace_event/memory_dump_manager.cc",
+    "trace_event/memory_dump_manager.h",
+    "trace_event/memory_dump_manager_test_utils.h",
+    "trace_event/memory_dump_provider.h",
+    "trace_event/memory_dump_provider_info.cc",
+    "trace_event/memory_dump_provider_info.h",
+    "trace_event/memory_dump_request_args.cc",
+    "trace_event/memory_dump_request_args.h",
+    "trace_event/memory_dump_scheduler.cc",
+    "trace_event/memory_dump_scheduler.h",
+    "trace_event/memory_infra_background_whitelist.cc",
+    "trace_event/memory_infra_background_whitelist.h",
+    "trace_event/memory_peak_detector.cc",
+    "trace_event/memory_peak_detector.h",
+    "trace_event/memory_usage_estimator.cc",
+    "trace_event/memory_usage_estimator.h",
+    "trace_event/process_memory_dump.cc",
+    "trace_event/process_memory_dump.h",
+    "trace_event/trace_buffer.cc",
+    "trace_event/trace_buffer.h",
+    "trace_event/trace_category.h",
+    "trace_event/trace_config.cc",
+    "trace_event/trace_config.h",
+    "trace_event/trace_config_category_filter.cc",
+    "trace_event/trace_config_category_filter.h",
+    "trace_event/trace_event.h",
+    "trace_event/trace_event_android.cc",
+    "trace_event/trace_event_argument.cc",
+    "trace_event/trace_event_argument.h",
+    "trace_event/trace_event_etw_export_win.cc",
+    "trace_event/trace_event_etw_export_win.h",
+    "trace_event/trace_event_filter.cc",
+    "trace_event/trace_event_filter.h",
+    "trace_event/trace_event_impl.cc",
+    "trace_event/trace_event_impl.h",
+    "trace_event/trace_event_memory_overhead.cc",
+    "trace_event/trace_event_memory_overhead.h",
+    "trace_event/trace_event_system_stats_monitor.cc",
+    "trace_event/trace_event_system_stats_monitor.h",
+    "trace_event/trace_log.cc",
+    "trace_event/trace_log.h",
+    "trace_event/trace_log_constants.cc",
+    "trace_event/tracing_agent.cc",
+    "trace_event/tracing_agent.h",
+    "tuple.h",
+    "unguessable_token.cc",
+    "unguessable_token.h",
+    "value_conversions.cc",
+    "value_conversions.h",
+    "value_iterators.cc",
+    "value_iterators.h",
+    "values.cc",
+    "values.h",
+    "version.cc",
+    "version.h",
+    "vlog.cc",
+    "vlog.h",
+    "win/async_operation.h",
+    "win/com_init_check_hook.cc",
+    "win/com_init_check_hook.h",
+    "win/com_init_util.cc",
+    "win/com_init_util.h",
+    "win/core_winrt_util.cc",
+    "win/core_winrt_util.h",
+    "win/current_module.h",
+    "win/enum_variant.cc",
+    "win/enum_variant.h",
+    "win/event_trace_consumer.h",
+    "win/event_trace_controller.cc",
+    "win/event_trace_controller.h",
+    "win/event_trace_provider.cc",
+    "win/event_trace_provider.h",
+    "win/i18n.cc",
+    "win/i18n.h",
+    "win/iat_patch_function.cc",
+    "win/iat_patch_function.h",
+    "win/iunknown_impl.cc",
+    "win/iunknown_impl.h",
+    "win/message_window.cc",
+    "win/message_window.h",
+    "win/object_watcher.cc",
+    "win/object_watcher.h",
+    "win/patch_util.cc",
+    "win/patch_util.h",
+    "win/process_startup_helper.cc",
+    "win/process_startup_helper.h",
+    "win/registry.cc",
+    "win/registry.h",
+    "win/resource_util.cc",
+    "win/resource_util.h",
+    "win/scoped_bstr.cc",
+    "win/scoped_bstr.h",
+    "win/scoped_co_mem.h",
+    "win/scoped_com_initializer.cc",
+    "win/scoped_com_initializer.h",
+    "win/scoped_gdi_object.h",
+    "win/scoped_handle.cc",
+    "win/scoped_handle.h",
+    "win/scoped_handle_verifier.cc",
+    "win/scoped_handle_verifier.h",
+    "win/scoped_hdc.h",
+    "win/scoped_hglobal.h",
+    "win/scoped_hstring.cc",
+    "win/scoped_hstring.h",
+    "win/scoped_process_information.cc",
+    "win/scoped_process_information.h",
+    "win/scoped_propvariant.h",
+    "win/scoped_select_object.h",
+    "win/scoped_variant.cc",
+    "win/scoped_variant.h",
+    "win/scoped_windows_thread_environment.h",
+    "win/scoped_winrt_initializer.cc",
+    "win/scoped_winrt_initializer.h",
+    "win/shortcut.cc",
+    "win/shortcut.h",
+    "win/startup_information.cc",
+    "win/startup_information.h",
+    "win/typed_event_handler.h",
+    "win/wait_chain.cc",
+    "win/wait_chain.h",
+    "win/win_util.cc",
+    "win/win_util.h",
+    "win/windows_version.cc",
+    "win/windows_version.h",
+    "win/winrt_storage_util.cc",
+    "win/winrt_storage_util.h",
+    "win/wrapped_window_proc.cc",
+    "win/wrapped_window_proc.h",
+  ]
+
+  # winternl.h and NTSecAPI.h have different definitions of UNICODE_STRING.
+  # There's only one client of NTSecAPI.h in base but several of winternl.h,
+  # so exclude the NTSecAPI.h one.
+  if (is_win) {
+    jumbo_excluded_sources = [ "rand_util_win.cc" ]
+  }
+
+  if (is_posix) {
+    sources += [
+      "base_paths_posix.h",
+      "debug/debugger_posix.cc",
+      "debug/stack_trace_posix.cc",
+      "file_descriptor_posix.h",
+      "files/dir_reader_posix.h",
+      "files/file_descriptor_watcher_posix.cc",
+      "files/file_descriptor_watcher_posix.h",
+      "files/file_enumerator_posix.cc",
+      "files/file_posix.cc",
+      "files/file_util_posix.cc",
+      "files/memory_mapped_file_posix.cc",
+      "memory/protected_memory_posix.cc",
+      "message_loop/watchable_io_message_pump_posix.cc",
+      "message_loop/watchable_io_message_pump_posix.h",
+      "native_library_posix.cc",
+      "posix/eintr_wrapper.h",
+      "posix/file_descriptor_shuffle.cc",
+      "posix/file_descriptor_shuffle.h",
+      "posix/global_descriptors.cc",
+      "posix/global_descriptors.h",
+      "posix/safe_strerror.cc",
+      "posix/safe_strerror.h",
+      "posix/unix_domain_socket.cc",
+      "posix/unix_domain_socket.h",
+      "process/kill_posix.cc",
+      "process/launch_posix.cc",
+      "process/process_handle_posix.cc",
+      "process/process_metrics_posix.cc",
+      "process/process_posix.cc",
+      "profiler/native_stack_sampler_posix.cc",
+      "rand_util_posix.cc",
+      "strings/string_util_posix.h",
+      "strings/sys_string_conversions_posix.cc",
+      "sync_socket_posix.cc",
+      "synchronization/condition_variable_posix.cc",
+      "synchronization/lock_impl_posix.cc",
+      "synchronization/waitable_event_posix.cc",
+      "synchronization/waitable_event_watcher_posix.cc",
+      "sys_info_posix.cc",
+      "task_scheduler/task_tracker_posix.cc",
+      "task_scheduler/task_tracker_posix.h",
+      "threading/platform_thread_internal_posix.cc",
+      "threading/platform_thread_internal_posix.h",
+      "threading/platform_thread_posix.cc",
+      "threading/thread_local_storage_posix.cc",
+      "timer/hi_res_timer_manager_posix.cc",
+    ]
+  }
+
+  if (!is_nacl) {
+    sources += [
+      "base_paths.cc",
+      "base_paths.h",
+      "base_paths_android.cc",
+      "base_paths_android.h",
+      "base_paths_mac.h",
+      "base_paths_mac.mm",
+      "base_paths_posix.h",
+      "base_paths_win.cc",
+      "base_paths_win.h",
+      "metrics/persistent_histogram_storage.cc",
+      "metrics/persistent_histogram_storage.h",
+    ]
+
+    if (is_linux) {
+      sources += [
+        "base_paths_posix.cc",
+        "debug/elf_reader_linux.cc",
+        "debug/elf_reader_linux.h",
+      ]
+    }
+  }
+
+  all_dependent_configs = []
+  defines = []
+  data = []
+  data_deps = []
+
+  configs += [
+    ":base_flags",
+    ":base_implementation",
+    "//build/config:precompiled_headers",
+    "//build/config/compiler:noshadowing",
+  ]
+
+  deps = [
+    "//base/allocator",
+    "//base/allocator:buildflags",
+    "//base/third_party/dynamic_annotations",
+    "//third_party/modp_b64",
+  ]
+
+  public_deps = [
+    ":anchor_functions_buildflags",
+    ":base_static",
+    ":build_date",
+    ":cfi_buildflags",
+    ":debugging_buildflags",
+    ":partition_alloc_buildflags",
+    ":protected_memory_buildflags",
+    ":synchronization_buildflags",
+    "//base/numerics:base_numerics",
+  ]
+
+  # Needed for <atomic> if using newer C++ library than sysroot, except if
+  # building inside the cros_sdk environment - use host_toolchain as a
+  # more robust check for this.
+  if (!use_sysroot && (is_android || (is_linux && !is_chromecast)) &&
+      host_toolchain != "//build/toolchain/cros:host") {
+    libs = [ "atomic" ]
+  }
+
+  if (use_allocator_shim) {
+    sources += [
+      "allocator/allocator_shim.cc",
+      "allocator/allocator_shim.h",
+      "allocator/allocator_shim_internals.h",
+      "allocator/allocator_shim_override_cpp_symbols.h",
+      "allocator/allocator_shim_override_libc_symbols.h",
+    ]
+    if (is_win) {
+      sources += [
+        "allocator/allocator_shim_default_dispatch_to_winheap.cc",
+        "allocator/allocator_shim_override_ucrt_symbols_win.h",
+        "allocator/winheap_stubs_win.cc",
+        "allocator/winheap_stubs_win.h",
+      ]
+    } else if (is_linux && use_allocator == "tcmalloc") {
+      sources += [
+        "allocator/allocator_shim_default_dispatch_to_tcmalloc.cc",
+        "allocator/allocator_shim_override_glibc_weak_symbols.h",
+      ]
+      deps += [ "//base/allocator:tcmalloc" ]
+    } else if (is_linux && use_allocator == "none") {
+      sources += [ "allocator/allocator_shim_default_dispatch_to_glibc.cc" ]
+    } else if (is_android && use_allocator == "none") {
+      sources += [
+        "allocator/allocator_shim_default_dispatch_to_linker_wrapped_symbols.cc",
+        "allocator/allocator_shim_override_linker_wrapped_symbols.h",
+      ]
+      all_dependent_configs += [ "//base/allocator:wrap_malloc_symbols" ]
+    } else if (is_mac) {
+      sources += [
+        "allocator/allocator_shim_default_dispatch_to_mac_zoned_malloc.cc",
+        "allocator/allocator_shim_default_dispatch_to_mac_zoned_malloc.h",
+        "allocator/allocator_shim_override_mac_symbols.h",
+      ]
+    }
+  }
+
+  # Allow more direct string conversions on platforms with native utf8
+  # strings
+  if (is_mac || is_ios || is_chromeos || is_chromecast || is_fuchsia) {
+    defines += [ "SYSTEM_NATIVE_UTF8" ]
+  }
+
+  # Android.
+  if (is_android) {
+    sources -= [ "debug/stack_trace_posix.cc" ]
+    sources += [
+      "memory/platform_shared_memory_region_android.cc",
+      "memory/shared_memory_android.cc",
+      "memory/shared_memory_handle_android.cc",
+      "time/time_android.cc",
+    ]
+
+    # Android uses some Linux sources, put those back.
+    set_sources_assignment_filter([])
+    sources += [
+      "debug/elf_reader_linux.cc",
+      "debug/elf_reader_linux.h",
+      "debug/proc_maps_linux.cc",
+      "debug/proc_maps_linux.h",
+      "files/file_path_watcher_linux.cc",
+      "power_monitor/power_monitor_device_source_android.cc",
+      "process/internal_linux.cc",
+      "process/internal_linux.h",
+      "process/memory_linux.cc",
+      "process/process_handle_linux.cc",
+      "process/process_info_linux.cc",
+      "process/process_iterator_linux.cc",
+      "process/process_metrics_linux.cc",
+      "sys_info_linux.cc",
+    ]
+    set_sources_assignment_filter(sources_assignment_filter)
+
+    deps += [
+      ":base_jni_headers",
+      "//third_party/android_tools:cpu_features",
+      "//third_party/ashmem",
+    ]
+
+    # TODO(thomasanderson): Remove this once use_custom_libcxx is always set to
+    # true on Android.
+    if (!use_custom_libcxx) {
+      deps += [ "//buildtools/third_party/libc++abi:cxa_demangle_stub" ]
+    }
+
+    # Needs to be a public config so that dependent targets link against it as
+    # well when doing a component build.
+    public_configs = [ ":android_system_libs" ]
+
+    if (can_unwind_with_cfi_table) {
+      sources += [
+        "trace_event/cfi_backtrace_android.cc",
+        "trace_event/cfi_backtrace_android.h",
+      ]
+    }
+
+    # This is actually a linker script, but it can be added to the link in the
+    # same way as a library.
+    libs = [ "android/library_loader/anchor_functions.lds" ]
+  }
+
+  # Chromeos.
+  if (is_chromeos) {
+    sources += [ "power_monitor/power_monitor_device_source_chromeos.cc" ]
+  }
+
+  # Fuchsia.
+  if (is_fuchsia) {
+    sources += [
+      "base_paths_fuchsia.cc",
+      "base_paths_fuchsia.h",
+      "debug/debugger_posix.cc",
+      "debug/stack_trace_fuchsia.cc",
+      "file_descriptor_posix.h",
+      "files/dir_reader_posix.h",
+      "files/file_descriptor_watcher_posix.cc",
+      "files/file_descriptor_watcher_posix.h",
+      "files/file_enumerator_posix.cc",
+      "files/file_path_watcher_fuchsia.cc",
+      "files/file_posix.cc",
+      "files/file_util_posix.cc",
+      "files/memory_mapped_file_posix.cc",
+      "fuchsia/async_dispatcher.cc",
+      "fuchsia/async_dispatcher.h",
+      "fuchsia/component_context.cc",
+      "fuchsia/component_context.h",
+      "fuchsia/default_job.cc",
+      "fuchsia/default_job.h",
+      "fuchsia/fidl_interface_request.cc",
+      "fuchsia/fidl_interface_request.h",
+      "fuchsia/fuchsia_logging.cc",
+      "fuchsia/fuchsia_logging.h",
+      "fuchsia/scoped_zx_handle.cc",
+      "fuchsia/scoped_zx_handle.h",
+      "fuchsia/services_directory.cc",
+      "fuchsia/services_directory.h",
+      "memory/platform_shared_memory_region_fuchsia.cc",
+      "memory/protected_memory_posix.cc",
+      "memory/shared_memory_fuchsia.cc",
+      "memory/shared_memory_handle_fuchsia.cc",
+      "message_loop/message_pump_fuchsia.cc",
+      "message_loop/message_pump_fuchsia.h",
+      "message_loop/watchable_io_message_pump_posix.cc",
+      "message_loop/watchable_io_message_pump_posix.h",
+      "native_library_fuchsia.cc",
+      "posix/eintr_wrapper.h",
+      "posix/file_descriptor_shuffle.cc",
+      "posix/file_descriptor_shuffle.h",
+      "posix/global_descriptors.cc",
+      "posix/global_descriptors.h",
+      "posix/safe_strerror.cc",
+      "posix/safe_strerror.h",
+      "process/kill_fuchsia.cc",
+      "process/launch_fuchsia.cc",
+      "process/memory_fuchsia.cc",
+      "process/process_fuchsia.cc",
+      "process/process_handle_fuchsia.cc",
+      "process/process_iterator_fuchsia.cc",
+      "process/process_metrics_fuchsia.cc",
+      "process/process_metrics_posix.cc",
+      "profiler/native_stack_sampler_posix.cc",
+      "rand_util_fuchsia.cc",
+      "strings/string_util_posix.h",
+      "strings/sys_string_conversions_posix.cc",
+      "sync_socket_posix.cc",
+      "synchronization/condition_variable_posix.cc",
+      "synchronization/lock_impl_posix.cc",
+      "synchronization/waitable_event_posix.cc",
+      "synchronization/waitable_event_watcher_posix.cc",
+      "sys_info_fuchsia.cc",
+      "sys_info_posix.cc",
+      "task_scheduler/task_tracker_posix.cc",
+      "task_scheduler/task_tracker_posix.h",
+      "threading/platform_thread_fuchsia.cc",
+      "threading/platform_thread_posix.cc",
+      "threading/thread_local_storage_posix.cc",
+      "time/time_conversion_posix.cc",
+      "time/time_exploded_posix.cc",
+      "time/time_fuchsia.cc",
+      "timer/hi_res_timer_manager_posix.cc",
+    ]
+
+    # These only need to be public deps because of includes of their headers
+    # by public //base headers, which requires they be on the include path.
+    # TODO(https://crbug.com/841171): Move these back to |deps|.
+    public_deps += [
+      "//third_party/fuchsia-sdk:async",
+      "//third_party/fuchsia-sdk:launchpad",
+    ]
+
+    deps += [
+      "//third_party/fuchsia-sdk:async_default",
+      "//third_party/fuchsia-sdk:fdio",
+      "//third_party/fuchsia-sdk:fidl",
+      "//third_party/fuchsia-sdk:svc",
+      "//third_party/fuchsia-sdk:zx",
+    ]
+  }
+
+  # NaCl.
+  if (is_nacl) {
+    # We reset sources_assignment_filter in order to explicitly include
+    # the linux file (which would otherwise be filtered out).
+    set_sources_assignment_filter([])
+    sources += [
+      "files/file_path_watcher_stub.cc",
+      "memory/shared_memory_nacl.cc",
+      "process/process_metrics_nacl.cc",
+      "sync_socket_nacl.cc",
+      "threading/platform_thread_linux.cc",
+    ]
+    set_sources_assignment_filter(sources_assignment_filter)
+
+    sources -= [
+      "cpu.cc",
+      "debug/crash_logging.cc",
+      "debug/crash_logging.h",
+      "debug/stack_trace.cc",
+      "debug/stack_trace_posix.cc",
+      "files/file_enumerator_posix.cc",
+      "files/file_proxy.cc",
+      "files/important_file_writer.cc",
+      "files/important_file_writer.h",
+      "files/scoped_temp_dir.cc",
+      "memory/discardable_memory.cc",
+      "memory/discardable_memory.h",
+      "memory/discardable_memory_allocator.cc",
+      "memory/discardable_memory_allocator.h",
+      "memory/discardable_shared_memory.cc",
+      "memory/discardable_shared_memory.h",
+      "memory/shared_memory_helper.cc",
+      "memory/shared_memory_helper.h",
+      "native_library.cc",
+      "native_library_posix.cc",
+      "path_service.cc",
+      "process/kill.cc",
+      "process/kill.h",
+      "process/memory.cc",
+      "process/memory.h",
+      "process/process_iterator.cc",
+      "process/process_iterator.h",
+      "process/process_metrics.cc",
+      "process/process_metrics_posix.cc",
+      "process/process_posix.cc",
+      "scoped_native_library.cc",
+      "sync_socket_posix.cc",
+      "sys_info.cc",
+      "sys_info_posix.cc",
+      "task_scheduler/initialization_util.cc",
+      "task_scheduler/initialization_util.h",
+      "trace_event/trace_event_system_stats_monitor.cc",
+    ]
+
+    if (is_nacl_nonsfi) {
+      sources -= [ "rand_util_nacl.cc" ]
+      configs += [ ":nacl_nonsfi_warnings" ]
+    } else {
+      sources -= [
+        "files/file_descriptor_watcher_posix.cc",
+        "files/file_descriptor_watcher_posix.h",
+        "files/file_util.cc",
+        "files/file_util.h",
+        "files/file_util_posix.cc",
+        "json/json_file_value_serializer.cc",
+        "json/json_file_value_serializer.h",
+        "posix/unix_domain_socket.cc",
+        "process/kill_posix.cc",
+        "process/launch.cc",
+        "process/launch.h",
+        "process/launch_posix.cc",
+        "rand_util_posix.cc",
+        "task_scheduler/task_tracker_posix.cc",
+        "task_scheduler/task_tracker_posix.h",
+      ]
+    }
+  } else {
+    # Remove NaCl stuff.
+    sources -= [
+      "os_compat_nacl.cc",
+      "os_compat_nacl.h",
+      "rand_util_nacl.cc",
+    ]
+
+    if (use_partition_alloc) {
+      # Add stuff that doesn't work in NaCl.
+      sources += [
+        # PartitionAlloc uses SpinLock, which doesn't work in NaCl (see below).
+        "allocator/partition_allocator/address_space_randomization.cc",
+        "allocator/partition_allocator/address_space_randomization.h",
+        "allocator/partition_allocator/oom.h",
+        "allocator/partition_allocator/page_allocator.cc",
+        "allocator/partition_allocator/page_allocator.h",
+        "allocator/partition_allocator/page_allocator_internal.h",
+        "allocator/partition_allocator/partition_alloc.cc",
+        "allocator/partition_allocator/partition_alloc.h",
+        "allocator/partition_allocator/partition_alloc_constants.h",
+        "allocator/partition_allocator/partition_bucket.cc",
+        "allocator/partition_allocator/partition_bucket.h",
+        "allocator/partition_allocator/partition_cookie.h",
+        "allocator/partition_allocator/partition_direct_map_extent.h",
+        "allocator/partition_allocator/partition_freelist_entry.h",
+        "allocator/partition_allocator/partition_oom.cc",
+        "allocator/partition_allocator/partition_oom.h",
+        "allocator/partition_allocator/partition_page.cc",
+        "allocator/partition_allocator/partition_page.h",
+        "allocator/partition_allocator/partition_root_base.cc",
+        "allocator/partition_allocator/partition_root_base.h",
+        "allocator/partition_allocator/spin_lock.cc",
+        "allocator/partition_allocator/spin_lock.h",
+      ]
+      if (is_win) {
+        sources +=
+            [ "allocator/partition_allocator/page_allocator_internals_win.h" ]
+      } else if (is_posix || is_fuchsia) {
+        sources +=
+            [ "allocator/partition_allocator/page_allocator_internals_posix.h" ]
+      }
+    }
+  }
+
+  # Windows.
+  if (is_win) {
+    sources += [
+      "memory/platform_shared_memory_region_win.cc",
+      "memory/shared_memory_handle_win.cc",
+      "memory/shared_memory_win.cc",
+      "power_monitor/power_monitor_device_source_win.cc",
+      "profiler/win32_stack_frame_unwinder.cc",
+      "profiler/win32_stack_frame_unwinder.h",
+      "time/time_win.cc",
+    ]
+
+    sources -= [
+      "file_descriptor_store.cc",
+      "file_descriptor_store.h",
+      "memory/shared_memory_helper.cc",
+      "memory/shared_memory_helper.h",
+      "strings/string16.cc",
+    ]
+
+    deps += [
+      "//base/trace_event/etw_manifest:chrome_events_win",
+      "//base/win:base_win_buildflags",
+    ]
+
+    data_deps += [ "//build/win:runtime_libs" ]
+
+    if (com_init_check_hook_disabled) {
+      defines += [ "COM_INIT_CHECK_HOOK_DISABLED" ]
+    }
+
+    # TODO(jschuh): crbug.com/167187 fix size_t to int truncations.
+    configs += [ "//build/config/compiler:no_size_t_to_int_warning" ]
+
+    libs = [
+      "cfgmgr32.lib",
+      "powrprof.lib",
+      "propsys.lib",
+      "setupapi.lib",
+      "userenv.lib",
+      "winmm.lib",
+    ]
+    all_dependent_configs += [
+      ":base_win_linker_flags",
+      "//tools/win/DebugVisualizers:chrome",
+    ]
+  }
+
+  # Desktop Mac.
+  if (is_mac) {
+    sources -= [ "profiler/native_stack_sampler_posix.cc" ]
+    sources += [
+      "mac/scoped_typeref.h",
+      "memory/platform_shared_memory_region_mac.cc",
+      "memory/shared_memory_handle_mac.cc",
+      "memory/shared_memory_mac.cc",
+      "power_monitor/power_monitor_device_source_mac.mm",
+      "time/time_conversion_posix.cc",
+      "time/time_exploded_posix.cc",
+      "time/time_mac.cc",
+    ]
+
+    libs = [
+      "ApplicationServices.framework",
+      "AppKit.framework",
+      "bsm",
+      "CoreFoundation.framework",
+      "IOKit.framework",
+      "Security.framework",
+    ]
+  }
+
+  # Mac or iOS.
+  if (is_mac || is_ios) {
+    sources -= [
+      "native_library_posix.cc",
+      "strings/sys_string_conversions_posix.cc",
+      "synchronization/waitable_event_posix.cc",
+      "synchronization/waitable_event_watcher_posix.cc",
+      "threading/platform_thread_internal_posix.cc",
+    ]
+  } else {
+    # Non-Mac/ios.
+    sources -= [
+      "files/file_path_watcher_fsevents.cc",
+      "files/file_path_watcher_fsevents.h",
+      "files/file_path_watcher_kqueue.cc",
+      "files/file_path_watcher_kqueue.h",
+    ]
+  }
+
+  # Linux.
+  if (is_linux) {
+    # TODO(brettw) this will need to be parameterized at some point.
+    linux_configs = []
+    if (use_glib) {
+      linux_configs += [ "//build/config/linux:glib" ]
+    }
+
+    defines += [ "USE_SYMBOLIZE" ]
+
+    configs += linux_configs
+    all_dependent_configs += linux_configs
+
+    # These dependencies are not required on Android, and in the case
+    # of xdg_mime must be excluded due to licensing restrictions.
+    deps += [
+      "//base/third_party/symbolize",
+      "//base/third_party/xdg_mime",
+      "//base/third_party/xdg_user_dirs",
+    ]
+  } else {
+    # Non-Linux.
+    sources -= [
+      "nix/mime_util_xdg.cc",
+      "nix/mime_util_xdg.h",
+      "nix/xdg_util.cc",
+      "nix/xdg_util.h",
+    ]
+
+    if (!is_android) {
+      sources -= [
+        "linux_util.cc",
+        "linux_util.h",
+      ]
+    }
+  }
+
+  # iOS
+  if (is_ios) {
+    set_sources_assignment_filter([])
+
+    sources -= [
+      "files/file_path_watcher.cc",
+      "files/file_path_watcher.h",
+      "files/file_path_watcher_fsevents.cc",
+      "files/file_path_watcher_fsevents.h",
+      "files/file_path_watcher_kqueue.cc",
+      "files/file_path_watcher_kqueue.h",
+      "memory/discardable_shared_memory.cc",
+      "memory/discardable_shared_memory.h",
+      "process/kill.cc",
+      "process/kill.h",
+      "process/kill_posix.cc",
+      "process/launch.cc",
+      "process/launch.h",
+      "process/launch_posix.cc",
+      "process/memory.cc",
+      "process/memory.h",
+      "process/process_iterator.cc",
+      "process/process_iterator.h",
+      "process/process_metrics_posix.cc",
+      "process/process_posix.cc",
+      "sync_socket.h",
+      "sync_socket_posix.cc",
+      "synchronization/waitable_event_watcher.h",
+    ]
+    sources += [
+      "base_paths_mac.h",
+      "base_paths_mac.mm",
+      "file_version_info_mac.h",
+      "file_version_info_mac.mm",
+      "files/file_util_mac.mm",
+      "mac/bundle_locations.h",
+      "mac/bundle_locations.mm",
+      "mac/call_with_eh_frame.cc",
+      "mac/call_with_eh_frame.h",
+      "mac/foundation_util.h",
+      "mac/foundation_util.mm",
+      "mac/mac_logging.h",
+      "mac/mac_logging.mm",
+      "mac/mach_logging.cc",
+      "mac/mach_logging.h",
+      "mac/objc_release_properties.h",
+      "mac/objc_release_properties.mm",
+      "mac/scoped_block.h",
+      "mac/scoped_mach_port.cc",
+      "mac/scoped_mach_port.h",
+      "mac/scoped_mach_vm.cc",
+      "mac/scoped_mach_vm.h",
+      "mac/scoped_nsautorelease_pool.h",
+      "mac/scoped_nsautorelease_pool.mm",
+      "mac/scoped_nsobject.h",
+      "mac/scoped_nsobject.mm",
+      "mac/scoped_objc_class_swizzler.h",
+      "mac/scoped_objc_class_swizzler.mm",
+      "mac/scoped_typeref.h",
+      "message_loop/message_pump_mac.h",
+      "message_loop/message_pump_mac.mm",
+      "power_monitor/power_monitor_device_source_ios.mm",
+      "process/memory_stubs.cc",
+      "strings/sys_string_conversions_mac.mm",
+      "synchronization/waitable_event_mac.cc",
+      "threading/platform_thread_mac.mm",
+      "time/time_conversion_posix.cc",
+      "time/time_mac.cc",
+    ]
+
+    set_sources_assignment_filter(sources_assignment_filter)
+  }
+
+  if (dep_libevent) {
+    deps += [ "//base/third_party/libevent" ]
+  }
+
+  if (use_libevent) {
+    sources += [
+      "message_loop/message_pump_libevent.cc",
+      "message_loop/message_pump_libevent.h",
+    ]
+  }
+
+  # Android and MacOS have their own custom shared memory handle
+  # implementations. e.g. due to supporting both POSIX and native handles.
+  if (is_posix && !is_android && !is_mac) {
+    sources += [
+      "memory/platform_shared_memory_region_posix.cc",
+      "memory/shared_memory_handle_posix.cc",
+    ]
+  }
+
+  if (is_posix && !is_mac && !is_nacl) {
+    sources += [ "memory/shared_memory_posix.cc" ]
+  }
+
+  if (is_posix && !is_mac && !is_ios) {
+    sources += [
+      "time/time_conversion_posix.cc",
+      "time/time_exploded_posix.cc",
+      "time/time_now_posix.cc",
+    ]
+  }
+
+  if ((is_posix && !is_mac && !is_ios && !is_android && !is_chromeos) ||
+      is_fuchsia) {
+    sources += [ "power_monitor/power_monitor_device_source_stub.cc" ]
+  }
+
+  if (!use_glib) {
+    sources -= [
+      "message_loop/message_pump_glib.cc",
+      "message_loop/message_pump_glib.h",
+    ]
+  }
+
+  if (using_sanitizer) {
+    data += [ "//tools/valgrind/asan/" ]
+    if (is_win) {
+      data +=
+          [ "//third_party/llvm-build/Release+Asserts/bin/llvm-symbolizer.exe" ]
+    } else {
+      data += [ "//third_party/llvm-build/Release+Asserts/bin/llvm-symbolizer" ]
+    }
+  }
+
+  configs += [ "//build/config/compiler:wexit_time_destructors" ]
+  if (!is_debug) {
+    configs -= [ "//build/config/compiler:default_optimization" ]
+    configs += [ "//build/config/compiler:optimize_max" ]
+  }
+}
+
+# Build flags for Control Flow Integrity
+# https://www.chromium.org/developers/testing/control-flow-integrity
+buildflag_header("cfi_buildflags") {
+  header = "cfi_buildflags.h"
+
+  # buildflag entries added to this header must also must be manually added to
+  # tools/gn/bootstrap/bootstrap.py
+  flags = [
+    # TODO(pcc): remove CFI_CAST_CHECK, see https://crbug.com/626794.
+    "CFI_CAST_CHECK=$is_cfi && $use_cfi_cast",
+    "CFI_ICALL_CHECK=$is_cfi && $use_cfi_icall",
+    "CFI_ENFORCEMENT_TRAP=$is_cfi && !$use_cfi_diag",
+    "CFI_ENFORCEMENT_DIAGNOSTIC=$is_cfi && $use_cfi_diag && !$use_cfi_recover",
+  ]
+}
+
+buildflag_header("debugging_buildflags") {
+  header = "debugging_buildflags.h"
+  header_dir = "base/debug"
+
+  # buildflag entries added to this header must also must be manually added to
+  # tools/gn/bootstrap/bootstrap.py
+  flags = [
+    "ENABLE_LOCATION_SOURCE=$enable_location_source",
+    "ENABLE_PROFILING=$enable_profiling",
+    "CAN_UNWIND_WITH_FRAME_POINTERS=$can_unwind_with_frame_pointers",
+    "UNSAFE_DEVELOPER_BUILD=$is_unsafe_developer_build",
+    "CAN_UNWIND_WITH_CFI_TABLE=$can_unwind_with_cfi_table",
+  ]
+}
+
+# Build flags for ProtectedMemory, temporary workaround for crbug.com/792777
+# TODO(vtsyrklevich): Remove once support for gold on Android/CrOs is dropped
+buildflag_header("protected_memory_buildflags") {
+  header = "protected_memory_buildflags.h"
+  header_dir = "base/memory"
+
+  # buildflag entries added to this header must also must be manually added to
+  # tools/gn/bootstrap/bootstrap.py
+  flags = [ "USE_LLD=$use_lld" ]
+}
+
+buildflag_header("synchronization_buildflags") {
+  header = "synchronization_buildflags.h"
+  header_dir = "base/synchronization"
+
+  flags =
+      [ "ENABLE_MUTEX_PRIORITY_INHERITANCE=$enable_mutex_priority_inheritance" ]
+}
+
+buildflag_header("anchor_functions_buildflags") {
+  header = "anchor_functions_buildflags.h"
+  header_dir = "base/android/library_loader"
+  _supports_code_ordering = current_cpu == "arm"
+
+  # buildflag entries added to this header must also must be manually added to
+  # tools/gn/bootstrap/bootstrap.py
+  flags = [
+    "USE_LLD=$use_lld",
+    "SUPPORTS_CODE_ORDERING=$_supports_code_ordering",
+  ]
+}
+
+buildflag_header("partition_alloc_buildflags") {
+  header = "partition_alloc_buildflags.h"
+  header_dir = "base"
+
+  flags = [ "USE_PARTITION_ALLOC=$use_partition_alloc" ]
+}
+
+# This is the subset of files from base that should not be used with a dynamic
+# library. Note that this library cannot depend on base because base depends on
+# base_static.
+static_library("base_static") {
+  sources = [
+    "base_switches.cc",
+    "base_switches.h",
+  ]
+
+  if (is_win) {
+    public_deps = [
+      "//base/win:pe_image",
+    ]
+
+    # Disable sanitizer coverage in win/pe_image.cc. It is called by the sandbox
+    # before sanitizer coverage can initialize. http://crbug.com/484711
+    configs -= [ "//build/config/sanitizers:default_sanitizer_flags" ]
+    configs +=
+        [ "//build/config/sanitizers:default_sanitizer_flags_but_coverage" ]
+  }
+
+  if (!is_debug) {
+    configs -= [ "//build/config/compiler:default_optimization" ]
+    configs += [ "//build/config/compiler:optimize_max" ]
+  }
+}
+
+component("i18n") {
+  output_name = "base_i18n"
+  sources = [
+    "i18n/base_i18n_export.h",
+    "i18n/base_i18n_switches.cc",
+    "i18n/base_i18n_switches.h",
+    "i18n/bidi_line_iterator.cc",
+    "i18n/bidi_line_iterator.h",
+    "i18n/break_iterator.cc",
+    "i18n/break_iterator.h",
+    "i18n/case_conversion.cc",
+    "i18n/case_conversion.h",
+    "i18n/char_iterator.cc",
+    "i18n/char_iterator.h",
+    "i18n/character_encoding.cc",
+    "i18n/character_encoding.h",
+    "i18n/encoding_detection.cc",
+    "i18n/encoding_detection.h",
+    "i18n/file_util_icu.cc",
+    "i18n/file_util_icu.h",
+    "i18n/i18n_constants.cc",
+    "i18n/i18n_constants.h",
+    "i18n/icu_string_conversions.cc",
+    "i18n/icu_string_conversions.h",
+    "i18n/icu_util.cc",
+    "i18n/icu_util.h",
+    "i18n/message_formatter.cc",
+    "i18n/message_formatter.h",
+    "i18n/number_formatting.cc",
+    "i18n/number_formatting.h",
+    "i18n/rtl.cc",
+    "i18n/rtl.h",
+    "i18n/streaming_utf8_validator.cc",
+    "i18n/streaming_utf8_validator.h",
+    "i18n/string_compare.cc",
+    "i18n/string_compare.h",
+    "i18n/string_search.cc",
+    "i18n/string_search.h",
+    "i18n/time_formatting.cc",
+    "i18n/time_formatting.h",
+    "i18n/timezone.cc",
+    "i18n/timezone.h",
+    "i18n/unicodestring.h",
+    "i18n/utf8_validator_tables.cc",
+    "i18n/utf8_validator_tables.h",
+  ]
+  defines = [ "BASE_I18N_IMPLEMENTATION" ]
+  configs += [ "//build/config/compiler:wexit_time_destructors" ]
+  public_deps = [
+    "//third_party/ced",
+    "//third_party/icu",
+  ]
+  deps = [
+    ":base",
+    "//base/third_party/dynamic_annotations",
+  ]
+
+  if (!is_debug) {
+    configs -= [ "//build/config/compiler:default_optimization" ]
+    configs += [ "//build/config/compiler:optimize_max" ]
+  }
+
+  # TODO(jschuh): crbug.com/167187 fix size_t to int truncations.
+  configs += [ "//build/config/compiler:no_size_t_to_int_warning" ]
+
+  if (is_mac) {
+    libs = [ "CoreFoundation.framework" ]
+  }
+}
+
+test("base_perftests") {
+  sources = [
+    "message_loop/message_loop_perftest.cc",
+    "message_loop/message_pump_perftest.cc",
+
+    # "test/run_all_unittests.cc",
+    "json/json_perftest.cc",
+    "synchronization/waitable_event_perftest.cc",
+    "threading/thread_perftest.cc",
+  ]
+  deps = [
+    ":base",
+    "//base/test:test_support",
+    "//base/test:test_support_perf",
+    "//testing/gtest",
+    "//testing/perf",
+  ]
+
+  if (is_android) {
+    deps += [ "//testing/android/native_test:native_test_native_code" ]
+  }
+}
+
+test("base_i18n_perftests") {
+  sources = [
+    "i18n/streaming_utf8_validator_perftest.cc",
+  ]
+  deps = [
+    ":base",
+    ":i18n",
+    "//base/test:test_support",
+    "//base/test:test_support_perf",
+    "//testing/gtest",
+  ]
+}
+
+if (!is_ios) {
+  executable("build_utf8_validator_tables") {
+    sources = [
+      "i18n/build_utf8_validator_tables.cc",
+    ]
+    deps = [
+      ":base",
+      "//build/config:exe_and_shlib_deps",
+      "//build/win:default_exe_manifest",
+      "//third_party/icu:icuuc",
+    ]
+  }
+
+  executable("check_example") {
+    sources = [
+      "check_example.cc",
+    ]
+    deps = [
+      ":base",
+      "//build/config:exe_and_shlib_deps",
+      "//build/win:default_exe_manifest",
+    ]
+  }
+}
+
+if (is_win) {
+  # Target to manually rebuild pe_image_test.dll which is checked into
+  # base/test/data/pe_image.
+  shared_library("pe_image_test") {
+    sources = [
+      "win/pe_image_test.cc",
+    ]
+    ldflags = [
+      "/DELAYLOAD:cfgmgr32.dll",
+      "/DELAYLOAD:shell32.dll",
+      "/SUBSYSTEM:WINDOWS",
+    ]
+    libs = [
+      "cfgmgr32.lib",
+      "shell32.lib",
+    ]
+    deps = [
+      "//build/config:exe_and_shlib_deps",
+    ]
+  }
+
+  loadable_module("scoped_handle_test_dll") {
+    sources = [
+      "win/scoped_handle_test_dll.cc",
+    ]
+    deps = [
+      ":base",
+      "//base/win:base_win_buildflags",
+    ]
+  }
+}
+
+if (is_win || is_mac) {
+  if (current_cpu == "x64") {
+    # Must be a shared library so that it can be unloaded during testing.
+    shared_library("base_profiler_test_support_library") {
+      sources = [
+        "profiler/test_support_library.cc",
+      ]
+      deps = [
+        "//build/config:exe_and_shlib_deps",
+      ]
+    }
+  }
+}
+
+bundle_data("base_unittests_bundle_data") {
+  testonly = true
+  sources = [
+    "test/data/file_util/binary_file.bin",
+    "test/data/file_util/binary_file_diff.bin",
+    "test/data/file_util/binary_file_same.bin",
+    "test/data/file_util/blank_line.txt",
+    "test/data/file_util/blank_line_crlf.txt",
+    "test/data/file_util/crlf.txt",
+    "test/data/file_util/different.txt",
+    "test/data/file_util/different_first.txt",
+    "test/data/file_util/different_last.txt",
+    "test/data/file_util/empty1.txt",
+    "test/data/file_util/empty2.txt",
+    "test/data/file_util/first1.txt",
+    "test/data/file_util/first2.txt",
+    "test/data/file_util/original.txt",
+    "test/data/file_util/same.txt",
+    "test/data/file_util/same_length.txt",
+    "test/data/file_util/shortened.txt",
+    "test/data/json/bom_feff.json",
+    "test/data/serializer_nested_test.json",
+    "test/data/serializer_test.json",
+    "test/data/serializer_test_nowhitespace.json",
+  ]
+  outputs = [
+    "{{bundle_resources_dir}}/" +
+        "{{source_root_relative_dir}}/{{source_file_part}}",
+  ]
+}
+
+if (is_ios || is_mac) {
+  source_set("base_unittests_arc") {
+    testonly = true
+    set_sources_assignment_filter([])
+    sources = [
+      "mac/bind_objc_block_unittest_arc.mm",
+      "mac/scoped_nsobject_unittest_arc.mm",
+    ]
+    set_sources_assignment_filter(sources_assignment_filter)
+    configs += [ "//build/config/compiler:enable_arc" ]
+    deps = [
+      ":base",
+      "//testing/gtest",
+    ]
+  }
+}
+
+if (is_fuchsia) {
+  fidl_library("test_fidl") {
+    namespace = "base.fuchsia"
+    namespace_path = "base/fuchsia"
+
+    sources = [
+      "fuchsia/test.fidl",
+    ]
+  }
+}
+
+test("base_unittests") {
+  sources = [
+    "allocator/allocator_interception_mac_unittest.mm",
+    "allocator/malloc_zone_functions_mac_unittest.cc",
+    "allocator/tcmalloc_unittest.cc",
+    "android/application_status_listener_unittest.cc",
+    "android/content_uri_utils_unittest.cc",
+    "android/jni_android_unittest.cc",
+    "android/jni_array_unittest.cc",
+    "android/jni_string_unittest.cc",
+    "android/library_loader/library_prefetcher_unittest.cc",
+    "android/path_utils_unittest.cc",
+    "android/scoped_java_ref_unittest.cc",
+    "android/sys_utils_unittest.cc",
+    "android/unguessable_token_android_unittest.cc",
+    "at_exit_unittest.cc",
+    "atomicops_unittest.cc",
+    "barrier_closure_unittest.cc",
+    "base64_unittest.cc",
+    "base64url_unittest.cc",
+    "big_endian_unittest.cc",
+    "bind_unittest.cc",
+    "bit_cast_unittest.cc",
+    "bits_unittest.cc",
+    "build_time_unittest.cc",
+    "callback_helpers_unittest.cc",
+    "callback_list_unittest.cc",
+    "callback_unittest.cc",
+    "cancelable_callback_unittest.cc",
+    "command_line_unittest.cc",
+    "component_export_unittest.cc",
+    "containers/adapters_unittest.cc",
+    "containers/circular_deque_unittest.cc",
+    "containers/flat_map_unittest.cc",
+    "containers/flat_set_unittest.cc",
+    "containers/flat_tree_unittest.cc",
+    "containers/hash_tables_unittest.cc",
+    "containers/id_map_unittest.cc",
+    "containers/linked_list_unittest.cc",
+    "containers/mru_cache_unittest.cc",
+    "containers/small_map_unittest.cc",
+    "containers/span_unittest.cc",
+    "containers/stack_container_unittest.cc",
+    "containers/unique_ptr_adapters_unittest.cc",
+    "containers/vector_buffer_unittest.cc",
+    "cpu_unittest.cc",
+    "debug/activity_analyzer_unittest.cc",
+    "debug/activity_tracker_unittest.cc",
+    "debug/alias_unittest.cc",
+    "debug/crash_logging_unittest.cc",
+    "debug/debugger_unittest.cc",
+    "debug/elf_reader_linux_unittest.cc",
+    "debug/leak_tracker_unittest.cc",
+    "debug/proc_maps_linux_unittest.cc",
+    "debug/stack_trace_unittest.cc",
+    "debug/task_annotator_unittest.cc",
+    "debug/thread_heap_usage_tracker_unittest.cc",
+    "deferred_sequenced_task_runner_unittest.cc",
+    "environment_unittest.cc",
+    "feature_list_unittest.cc",
+    "file_version_info_win_unittest.cc",
+    "files/file_enumerator_unittest.cc",
+    "files/file_path_unittest.cc",
+    "files/file_path_watcher_unittest.cc",
+    "files/file_proxy_unittest.cc",
+    "files/file_unittest.cc",
+    "files/file_util_unittest.cc",
+    "files/important_file_writer_unittest.cc",
+    "files/memory_mapped_file_unittest.cc",
+    "files/scoped_temp_dir_unittest.cc",
+    "gmock_unittest.cc",
+    "guid_unittest.cc",
+    "hash_unittest.cc",
+    "i18n/bidi_line_iterator_unittest.cc",
+    "i18n/break_iterator_unittest.cc",
+    "i18n/case_conversion_unittest.cc",
+    "i18n/char_iterator_unittest.cc",
+    "i18n/character_encoding_unittest.cc",
+    "i18n/file_util_icu_unittest.cc",
+    "i18n/icu_string_conversions_unittest.cc",
+    "i18n/message_formatter_unittest.cc",
+    "i18n/number_formatting_unittest.cc",
+    "i18n/rtl_unittest.cc",
+    "i18n/streaming_utf8_validator_unittest.cc",
+    "i18n/string_search_unittest.cc",
+    "i18n/time_formatting_unittest.cc",
+    "i18n/timezone_unittest.cc",
+    "ios/crb_protocol_observers_unittest.mm",
+    "ios/device_util_unittest.mm",
+    "ios/weak_nsobject_unittest.mm",
+    "json/json_parser_unittest.cc",
+    "json/json_reader_unittest.cc",
+    "json/json_value_converter_unittest.cc",
+    "json/json_value_serializer_unittest.cc",
+    "json/json_writer_unittest.cc",
+    "json/string_escape_unittest.cc",
+    "lazy_instance_unittest.cc",
+    "logging_unittest.cc",
+    "mac/bind_objc_block_unittest.mm",
+    "mac/call_with_eh_frame_unittest.mm",
+    "mac/dispatch_source_mach_unittest.cc",
+    "mac/foundation_util_unittest.mm",
+    "mac/mac_util_unittest.mm",
+    "mac/mach_port_broker_unittest.cc",
+    "mac/objc_release_properties_unittest.mm",
+    "mac/scoped_nsobject_unittest.mm",
+    "mac/scoped_objc_class_swizzler_unittest.mm",
+    "mac/scoped_sending_event_unittest.mm",
+    "md5_unittest.cc",
+    "memory/aligned_memory_unittest.cc",
+    "memory/discardable_shared_memory_unittest.cc",
+    "memory/linked_ptr_unittest.cc",
+    "memory/memory_coordinator_client_registry_unittest.cc",
+    "memory/memory_pressure_listener_unittest.cc",
+    "memory/memory_pressure_monitor_chromeos_unittest.cc",
+    "memory/memory_pressure_monitor_mac_unittest.cc",
+    "memory/memory_pressure_monitor_unittest.cc",
+    "memory/memory_pressure_monitor_win_unittest.cc",
+    "memory/platform_shared_memory_region_unittest.cc",
+    "memory/protected_memory_unittest.cc",
+    "memory/ptr_util_unittest.cc",
+    "memory/ref_counted_memory_unittest.cc",
+    "memory/ref_counted_unittest.cc",
+    "memory/shared_memory_mac_unittest.cc",
+    "memory/shared_memory_region_unittest.cc",
+    "memory/shared_memory_unittest.cc",
+    "memory/shared_memory_win_unittest.cc",
+    "memory/singleton_unittest.cc",
+    "memory/weak_ptr_unittest.cc",
+    "message_loop/message_loop_task_runner_unittest.cc",
+    "message_loop/message_loop_unittest.cc",
+    "message_loop/message_pump_glib_unittest.cc",
+    "message_loop/message_pump_io_ios_unittest.cc",
+    "message_loop/message_pump_mac_unittest.mm",
+    "metrics/bucket_ranges_unittest.cc",
+    "metrics/field_trial_params_unittest.cc",
+    "metrics/field_trial_unittest.cc",
+    "metrics/histogram_base_unittest.cc",
+    "metrics/histogram_delta_serialization_unittest.cc",
+    "metrics/histogram_functions_unittest.cc",
+    "metrics/histogram_macros_unittest.cc",
+    "metrics/histogram_samples_unittest.cc",
+    "metrics/histogram_snapshot_manager_unittest.cc",
+    "metrics/histogram_unittest.cc",
+    "metrics/metrics_hashes_unittest.cc",
+    "metrics/persistent_histogram_allocator_unittest.cc",
+    "metrics/persistent_histogram_storage_unittest.cc",
+    "metrics/persistent_memory_allocator_unittest.cc",
+    "metrics/persistent_sample_map_unittest.cc",
+    "metrics/sample_map_unittest.cc",
+    "metrics/sample_vector_unittest.cc",
+    "metrics/single_sample_metrics_unittest.cc",
+    "metrics/sparse_histogram_unittest.cc",
+    "metrics/statistics_recorder_unittest.cc",
+    "native_library_unittest.cc",
+    "no_destructor_unittest.cc",
+    "observer_list_unittest.cc",
+    "optional_unittest.cc",
+    "os_compat_android_unittest.cc",
+    "path_service_unittest.cc",
+    "pickle_unittest.cc",
+    "power_monitor/power_monitor_unittest.cc",
+    "process/launch_unittest_win.cc",
+    "process/memory_unittest.cc",
+    "process/memory_unittest_mac.h",
+    "process/memory_unittest_mac.mm",
+    "process/process_info_unittest.cc",
+    "process/process_metrics_unittest.cc",
+    "process/process_unittest.cc",
+    "process/process_util_unittest.cc",
+    "profiler/stack_sampling_profiler_unittest.cc",
+    "rand_util_unittest.cc",
+    "run_loop_unittest.cc",
+    "safe_numerics_unittest.cc",
+    "scoped_clear_errno_unittest.cc",
+    "scoped_generic_unittest.cc",
+    "scoped_native_library_unittest.cc",
+    "security_unittest.cc",
+    "sequence_checker_unittest.cc",
+    "sequence_token_unittest.cc",
+    "sequenced_task_runner_unittest.cc",
+    "sha1_unittest.cc",
+    "stl_util_unittest.cc",
+    "strings/char_traits_unittest.cc",
+    "strings/nullable_string16_unittest.cc",
+    "strings/pattern_unittest.cc",
+    "strings/safe_sprintf_unittest.cc",
+    "strings/strcat_unittest.cc",
+    "strings/string16_unittest.cc",
+    "strings/string_number_conversions_unittest.cc",
+    "strings/string_piece_unittest.cc",
+    "strings/string_split_unittest.cc",
+    "strings/string_tokenizer_unittest.cc",
+    "strings/string_util_unittest.cc",
+    "strings/stringize_macros_unittest.cc",
+    "strings/stringprintf_unittest.cc",
+    "strings/sys_string_conversions_mac_unittest.mm",
+    "strings/sys_string_conversions_unittest.cc",
+    "strings/utf_offset_string_conversions_unittest.cc",
+    "strings/utf_string_conversions_unittest.cc",
+    "supports_user_data_unittest.cc",
+    "sync_socket_unittest.cc",
+    "synchronization/atomic_flag_unittest.cc",
+    "synchronization/condition_variable_unittest.cc",
+    "synchronization/lock_unittest.cc",
+    "synchronization/waitable_event_unittest.cc",
+    "synchronization/waitable_event_watcher_unittest.cc",
+    "sys_byteorder_unittest.cc",
+    "sys_info_unittest.cc",
+    "system_monitor/system_monitor_unittest.cc",
+    "task/cancelable_task_tracker_unittest.cc",
+    "task_runner_util_unittest.cc",
+    "task_scheduler/delayed_task_manager_unittest.cc",
+    "task_scheduler/lazy_task_runner_unittest.cc",
+    "task_scheduler/priority_queue_unittest.cc",
+    "task_scheduler/scheduler_lock_unittest.cc",
+    "task_scheduler/scheduler_single_thread_task_runner_manager_unittest.cc",
+    "task_scheduler/scheduler_worker_pool_impl_unittest.cc",
+    "task_scheduler/scheduler_worker_pool_unittest.cc",
+    "task_scheduler/scheduler_worker_stack_unittest.cc",
+    "task_scheduler/scheduler_worker_unittest.cc",
+    "task_scheduler/scoped_set_task_priority_for_current_thread_unittest.cc",
+    "task_scheduler/sequence_sort_key_unittest.cc",
+    "task_scheduler/sequence_unittest.cc",
+    "task_scheduler/service_thread_unittest.cc",
+    "task_scheduler/task_scheduler_impl_unittest.cc",
+    "task_scheduler/task_tracker_unittest.cc",
+    "task_scheduler/task_traits_unittest.cc",
+    "task_scheduler/task_unittest.cc",
+    "task_scheduler/test_task_factory.cc",
+    "task_scheduler/test_task_factory.h",
+    "task_scheduler/test_utils.cc",
+    "task_scheduler/test_utils.h",
+    "task_scheduler/tracked_ref_unittest.cc",
+    "template_util_unittest.cc",
+    "test/histogram_tester_unittest.cc",
+    "test/mock_callback_unittest.cc",
+    "test/scoped_feature_list_unittest.cc",
+    "test/scoped_mock_time_message_loop_task_runner_unittest.cc",
+    "test/scoped_task_environment_unittest.cc",
+    "test/test_mock_time_task_runner_unittest.cc",
+    "test/test_pending_task_unittest.cc",
+    "test/test_reg_util_win_unittest.cc",
+    "test/trace_event_analyzer_unittest.cc",
+    "test/user_action_tester_unittest.cc",
+    "thread_annotations_unittest.cc",
+    "threading/platform_thread_unittest.cc",
+    "threading/post_task_and_reply_impl_unittest.cc",
+    "threading/scoped_blocking_call_unittest.cc",
+    "threading/sequence_local_storage_map_unittest.cc",
+    "threading/sequence_local_storage_slot_unittest.cc",
+    "threading/sequenced_task_runner_handle_unittest.cc",
+    "threading/simple_thread_unittest.cc",
+    "threading/thread_checker_unittest.cc",
+    "threading/thread_collision_warner_unittest.cc",
+    "threading/thread_id_name_manager_unittest.cc",
+    "threading/thread_local_storage_unittest.cc",
+    "threading/thread_local_unittest.cc",
+    "threading/thread_restrictions_unittest.cc",
+    "threading/thread_task_runner_handle_unittest.cc",
+    "threading/thread_unittest.cc",
+    "threading/watchdog_unittest.cc",
+    "time/pr_time_unittest.cc",
+    "time/time_unittest.cc",
+    "time/time_win_unittest.cc",
+    "timer/hi_res_timer_manager_unittest.cc",
+    "timer/mock_timer_unittest.cc",
+    "timer/timer_unittest.cc",
+    "tools_sanity_unittest.cc",
+    "trace_event/blame_context_unittest.cc",
+    "trace_event/event_name_filter_unittest.cc",
+    "trace_event/heap_profiler_allocation_context_tracker_unittest.cc",
+    "trace_event/heap_profiler_heap_dump_writer_unittest.cc",
+    "trace_event/heap_profiler_stack_frame_deduplicator_unittest.cc",
+    "trace_event/heap_profiler_type_name_deduplicator_unittest.cc",
+    "trace_event/java_heap_dump_provider_android_unittest.cc",
+    "trace_event/memory_allocator_dump_unittest.cc",
+    "trace_event/memory_dump_manager_unittest.cc",
+    "trace_event/memory_dump_scheduler_unittest.cc",
+    "trace_event/memory_peak_detector_unittest.cc",
+    "trace_event/memory_usage_estimator_unittest.cc",
+    "trace_event/process_memory_dump_unittest.cc",
+    "trace_event/trace_category_unittest.cc",
+    "trace_event/trace_config_unittest.cc",
+    "trace_event/trace_event_argument_unittest.cc",
+    "trace_event/trace_event_filter_test_utils.cc",
+    "trace_event/trace_event_filter_test_utils.h",
+    "trace_event/trace_event_system_stats_monitor_unittest.cc",
+    "trace_event/trace_event_unittest.cc",
+    "tuple_unittest.cc",
+    "unguessable_token_unittest.cc",
+    "value_iterators_unittest.cc",
+    "values_unittest.cc",
+    "version_unittest.cc",
+    "vlog_unittest.cc",
+    "win/async_operation_unittest.cc",
+    "win/com_init_check_hook_unittest.cc",
+    "win/com_init_util_unittest.cc",
+    "win/core_winrt_util_unittest.cc",
+    "win/dllmain.cc",
+    "win/enum_variant_unittest.cc",
+    "win/event_trace_consumer_unittest.cc",
+    "win/event_trace_controller_unittest.cc",
+    "win/event_trace_provider_unittest.cc",
+    "win/i18n_unittest.cc",
+    "win/iunknown_impl_unittest.cc",
+    "win/message_window_unittest.cc",
+    "win/object_watcher_unittest.cc",
+    "win/pe_image_unittest.cc",
+    "win/registry_unittest.cc",
+    "win/scoped_bstr_unittest.cc",
+    "win/scoped_handle_unittest.cc",
+    "win/scoped_hstring_unittest.cc",
+    "win/scoped_process_information_unittest.cc",
+    "win/scoped_variant_unittest.cc",
+    "win/scoped_winrt_initializer_unittest.cc",
+    "win/shortcut_unittest.cc",
+    "win/startup_information_unittest.cc",
+    "win/typed_event_handler_unittest.cc",
+    "win/wait_chain_unittest.cc",
+    "win/win_includes_unittest.cc",
+    "win/win_util_unittest.cc",
+    "win/windows_version_unittest.cc",
+    "win/winrt_storage_util_unittest.cc",
+    "win/wrapped_window_proc_unittest.cc",
+  ]
+
+  defines = []
+
+  deps = [
+    ":base",
+    ":i18n",
+    "//base/allocator:buildflags",
+    "//base/test:native_library_test_utils",
+    "//base/test:run_all_base_unittests",
+    "//base/test:test_support",
+    "//base/third_party/dynamic_annotations",
+    "//testing/gmock",
+    "//testing/gtest",
+    "//third_party/icu",
+  ]
+
+  data_deps = [
+    "//base/test:test_child_process",
+    "//base/test:test_shared_library",
+  ]
+
+  if (is_ios || is_mac) {
+    deps += [ ":base_unittests_arc" ]
+  }
+
+  public_deps = [
+    ":base_unittests_bundle_data",
+  ]
+
+  data = [
+    "test/data/",
+  ]
+
+  if (is_posix) {
+    sources += [
+      "files/dir_reader_posix_unittest.cc",
+      "files/file_descriptor_watcher_posix_unittest.cc",
+      "message_loop/message_loop_io_posix_unittest.cc",
+      "posix/file_descriptor_shuffle_unittest.cc",
+      "posix/unix_domain_socket_unittest.cc",
+      "task_scheduler/task_tracker_posix_unittest.cc",
+    ]
+  }
+
+  # Allow more direct string conversions on platforms with native utf8
+  # strings
+  if (is_mac || is_ios || is_chromeos || is_chromecast || is_fuchsia) {
+    defines += [ "SYSTEM_NATIVE_UTF8" ]
+  }
+
+  if (is_android) {
+    # Add unwind tables in base_unittests_apk test apk. The unwind tables are
+    # generated from debug info in the binary. Removing "default_symbols" and
+    # adding symbols config removes the "strip_debug" config that strips the
+    # debug info, on base unittests apk.
+    if (can_unwind_with_cfi_table) {
+      configs -= [ "//build/config/compiler:default_symbols" ]
+      if (symbol_level == 2) {
+        configs += [ "//build/config/compiler:symbols" ]
+      } else {
+        configs += [ "//build/config/compiler:minimal_symbols" ]
+      }
+      add_unwind_tables_in_apk = true
+      sources += [ "trace_event/cfi_backtrace_android_unittest.cc" ]
+    }
+    sources -= [
+      "process/process_unittest.cc",
+      "process/process_util_unittest.cc",
+    ]
+    deps += [
+      ":base_java",
+      ":base_java_unittest_support",
+      "//base/test:test_support_java",
+    ]
+  }
+
+  if (is_ios) {
+    sources -= [
+      "files/file_path_watcher_unittest.cc",
+      "memory/discardable_shared_memory_unittest.cc",
+      "memory/shared_memory_unittest.cc",
+      "process/memory_unittest.cc",
+      "process/process_unittest.cc",
+      "process/process_util_unittest.cc",
+      "sync_socket_unittest.cc",
+      "synchronization/waitable_event_watcher_unittest.cc",
+    ]
+
+    # Pull in specific Mac files for iOS (which have been filtered out by file
+    # name rules).
+    set_sources_assignment_filter([])
+    sources += [
+      "mac/bind_objc_block_unittest.mm",
+      "mac/foundation_util_unittest.mm",
+      "mac/objc_release_properties_unittest.mm",
+      "mac/scoped_nsobject_unittest.mm",
+      "strings/sys_string_conversions_mac_unittest.mm",
+    ]
+    set_sources_assignment_filter(sources_assignment_filter)
+
+    # TODO(GYP): dep on copy_test_data_ios action.
+  }
+
+  if (use_partition_alloc) {
+    sources += [
+      "allocator/partition_allocator/address_space_randomization_unittest.cc",
+      "allocator/partition_allocator/page_allocator_unittest.cc",
+      "allocator/partition_allocator/partition_alloc_unittest.cc",
+      "allocator/partition_allocator/spin_lock_unittest.cc",
+    ]
+  }
+
+  if (is_mac) {
+    libs = [
+      "CoreFoundation.framework",
+      "Foundation.framework",
+    ]
+    if (current_cpu == "x64") {
+      data_deps += [ ":base_profiler_test_support_library" ]
+    }
+  }
+
+  if (is_linux) {
+    if (is_desktop_linux) {
+      sources += [ "nix/xdg_util_unittest.cc" ]
+    }
+
+    deps += [ "//base/test:malloc_wrapper" ]
+    defines += [
+      # This library is used by ElfReaderTest to test reading elf files.
+      "MALLOC_WRAPPER_LIB=\"${shlib_prefix}malloc_wrapper${shlib_extension}\"",
+    ]
+
+    if (!is_component_build) {
+      # Set rpath to find libmalloc_wrapper.so even in a non-component build.
+      configs += [ "//build/config/gcc:rpath_for_built_shared_libraries" ]
+    }
+  }
+
+  if (!use_glib) {
+    sources -= [ "message_loop/message_pump_glib_unittest.cc" ]
+  }
+
+  if (use_libevent) {
+    sources += [ "message_loop/message_pump_libevent_unittest.cc" ]
+    deps += [ "//base/third_party/libevent" ]
+  }
+
+  if (is_fuchsia) {
+    sources += [
+      "files/dir_reader_posix_unittest.cc",
+      "files/file_descriptor_watcher_posix_unittest.cc",
+      "fuchsia/services_directory_unittest.cc",
+      "message_loop/message_loop_io_posix_unittest.cc",
+      "posix/file_descriptor_shuffle_unittest.cc",
+      "task_scheduler/task_tracker_posix_unittest.cc",
+    ]
+
+    sources += [ "fuchsia/async_dispatcher_unittest.cc" ]
+    deps += [
+      ":test_fidl",
+      "//third_party/fuchsia-sdk:async",
+      "//third_party/fuchsia-sdk:async_default",
+      "//third_party/fuchsia-sdk:fdio",
+    ]
+  }
+
+  if (!is_fuchsia && !is_ios) {
+    sources += [ "files/file_locking_unittest.cc" ]
+  }
+
+  if (is_android) {
+    deps += [ "//testing/android/native_test:native_test_native_code" ]
+    set_sources_assignment_filter([])
+    sources += [
+      "debug/elf_reader_linux_unittest.cc",
+      "debug/proc_maps_linux_unittest.cc",
+      "trace_event/trace_event_android_unittest.cc",
+    ]
+    set_sources_assignment_filter(sources_assignment_filter)
+  }
+
+  if (is_win) {
+    deps += [ "//base:scoped_handle_test_dll" ]
+    if (current_cpu == "x64") {
+      sources += [ "profiler/win32_stack_frame_unwinder_unittest.cc" ]
+      data_deps += [ ":base_profiler_test_support_library" ]
+    }
+  }
+
+  if (use_allocator_shim) {
+    sources += [
+      "allocator/allocator_shim_unittest.cc",
+      "sampling_heap_profiler/sampling_heap_profiler_unittest.cc",
+    ]
+  }
+
+  # TODO(jschuh): crbug.com/167187 fix size_t to int truncations.
+  configs += [ "//build/config/compiler:no_size_t_to_int_warning" ]
+
+  # Symbols for crashes when running tests on swarming.
+  if (symbol_level > 0) {
+    if (is_win) {
+      data += [ "$root_out_dir/base_unittests.exe.pdb" ]
+    } else if (is_mac) {
+      # TODO(crbug.com/330301): make this conditional on mac_strip_release.
+      # data += [ "$root_out_dir/base_unittests.dSYM/" ]
+    }
+  }
+}
+
+action("build_date") {
+  script = "//build/write_build_date_header.py"
+
+  # Force recalculation if there's been a change.
+  inputs = [
+    "//build/util/LASTCHANGE",
+  ]
+  outputs = [
+    "$target_gen_dir/generated_build_date.h",
+  ]
+
+  args =
+      [ rebase_path("$target_gen_dir/generated_build_date.h", root_build_dir) ]
+
+  if (is_official_build) {
+    args += [ "official" ]
+  } else {
+    args += [ "default" ]
+  }
+
+  if (override_build_date != "N/A") {
+    args += [ override_build_date ]
+  }
+}
+
+if (enable_nocompile_tests) {
+  nocompile_test("base_nocompile_tests") {
+    sources = [
+      "bind_unittest.nc",
+      "callback_list_unittest.nc",
+      "callback_unittest.nc",
+      "containers/span_unittest.nc",
+      "memory/ref_counted_unittest.nc",
+      "memory/weak_ptr_unittest.nc",
+      "metrics/field_trial_params_unittest.nc",
+      "metrics/histogram_unittest.nc",
+      "optional_unittest.nc",
+      "strings/string16_unittest.nc",
+      "task_scheduler/task_traits_unittest.nc",
+      "thread_annotations_unittest.nc",
+    ]
+
+    deps = [
+      ":base",
+      "//base/test:run_all_unittests",
+      "//testing/gtest",
+    ]
+  }
+}
+
+if (is_android) {
+  generate_jni("base_jni_headers") {
+    sources = [
+      "android/java/src/org/chromium/base/AnimationFrameTimeHistogram.java",
+      "android/java/src/org/chromium/base/ApkAssets.java",
+      "android/java/src/org/chromium/base/ApplicationStatus.java",
+      "android/java/src/org/chromium/base/BuildInfo.java",
+      "android/java/src/org/chromium/base/Callback.java",
+      "android/java/src/org/chromium/base/CommandLine.java",
+      "android/java/src/org/chromium/base/ContentUriUtils.java",
+      "android/java/src/org/chromium/base/CpuFeatures.java",
+      "android/java/src/org/chromium/base/EarlyTraceEvent.java",
+      "android/java/src/org/chromium/base/EventLog.java",
+      "android/java/src/org/chromium/base/FieldTrialList.java",
+      "android/java/src/org/chromium/base/ImportantFileWriterAndroid.java",
+      "android/java/src/org/chromium/base/JNIUtils.java",
+      "android/java/src/org/chromium/base/JavaExceptionReporter.java",
+      "android/java/src/org/chromium/base/JavaHandlerThread.java",
+      "android/java/src/org/chromium/base/LocaleUtils.java",
+      "android/java/src/org/chromium/base/MemoryPressureListener.java",
+      "android/java/src/org/chromium/base/PathService.java",
+      "android/java/src/org/chromium/base/PathUtils.java",
+      "android/java/src/org/chromium/base/PowerMonitor.java",
+      "android/java/src/org/chromium/base/SysUtils.java",
+      "android/java/src/org/chromium/base/SystemMessageHandler.java",
+      "android/java/src/org/chromium/base/ThreadUtils.java",
+      "android/java/src/org/chromium/base/ThrowUncaughtException.java",
+      "android/java/src/org/chromium/base/TimeUtils.java",
+      "android/java/src/org/chromium/base/TimezoneUtils.java",
+      "android/java/src/org/chromium/base/TraceEvent.java",
+      "android/java/src/org/chromium/base/UnguessableToken.java",
+      "android/java/src/org/chromium/base/library_loader/LibraryLoader.java",
+      "android/java/src/org/chromium/base/metrics/RecordHistogram.java",
+      "android/java/src/org/chromium/base/metrics/RecordUserAction.java",
+      "android/java/src/org/chromium/base/metrics/StatisticsRecorderAndroid.java",
+      "android/java/src/org/chromium/base/process_launcher/ChildProcessService.java",
+    ]
+
+    public_deps = [
+      ":android_runtime_jni_headers",
+    ]
+
+    jni_package = "base"
+  }
+
+  generate_jar_jni("android_runtime_jni_headers") {
+    jni_package = "base"
+    classes = [ "java/lang/Runtime.class" ]
+  }
+
+  android_library("base_java") {
+    srcjar_deps = [
+      ":base_android_java_enums_srcjar",
+      ":base_build_config_gen",
+      ":base_java_aidl",
+      ":base_native_libraries_gen",
+    ]
+
+    deps = [
+      "//third_party/android_tools:android_support_annotations_java",
+      "//third_party/android_tools:android_support_multidex_java",
+      "//third_party/android_tools:android_support_v4_java",
+      "//third_party/jsr-305:jsr_305_javalib",
+    ]
+
+    java_files = [
+      "android/java/src/org/chromium/base/ActivityState.java",
+      "android/java/src/org/chromium/base/AnimationFrameTimeHistogram.java",
+      "android/java/src/org/chromium/base/ApiCompatibilityUtils.java",
+      "android/java/src/org/chromium/base/ApkAssets.java",
+      "android/java/src/org/chromium/base/ApplicationStatus.java",
+      "android/java/src/org/chromium/base/BaseSwitches.java",
+      "android/java/src/org/chromium/base/BuildInfo.java",
+      "android/java/src/org/chromium/base/Callback.java",
+      "android/java/src/org/chromium/base/CollectionUtil.java",
+      "android/java/src/org/chromium/base/CommandLine.java",
+      "android/java/src/org/chromium/base/CommandLineInitUtil.java",
+      "android/java/src/org/chromium/base/ContentUriUtils.java",
+      "android/java/src/org/chromium/base/ContextUtils.java",
+      "android/java/src/org/chromium/base/CpuFeatures.java",
+      "android/java/src/org/chromium/base/DiscardableReferencePool.java",
+      "android/java/src/org/chromium/base/EarlyTraceEvent.java",
+      "android/java/src/org/chromium/base/EventLog.java",
+      "android/java/src/org/chromium/base/FieldTrialList.java",
+      "android/java/src/org/chromium/base/FileUtils.java",
+      "android/java/src/org/chromium/base/ImportantFileWriterAndroid.java",
+      "android/java/src/org/chromium/base/JNIUtils.java",
+      "android/java/src/org/chromium/base/JavaExceptionReporter.java",
+      "android/java/src/org/chromium/base/JavaHandlerThread.java",
+      "android/java/src/org/chromium/base/LocaleUtils.java",
+      "android/java/src/org/chromium/base/Log.java",
+      "android/java/src/org/chromium/base/MemoryPressureListener.java",
+      "android/java/src/org/chromium/base/NonThreadSafe.java",
+      "android/java/src/org/chromium/base/ObserverList.java",
+      "android/java/src/org/chromium/base/PackageUtils.java",
+      "android/java/src/org/chromium/base/PathService.java",
+      "android/java/src/org/chromium/base/PathUtils.java",
+      "android/java/src/org/chromium/base/PowerMonitor.java",
+      "android/java/src/org/chromium/base/Promise.java",
+      "android/java/src/org/chromium/base/ResourceExtractor.java",
+      "android/java/src/org/chromium/base/SecureRandomInitializer.java",
+      "android/java/src/org/chromium/base/StreamUtil.java",
+      "android/java/src/org/chromium/base/StrictModeContext.java",
+      "android/java/src/org/chromium/base/Supplier.java",
+      "android/java/src/org/chromium/base/SysUtils.java",
+      "android/java/src/org/chromium/base/SystemMessageHandler.java",
+      "android/java/src/org/chromium/base/ThreadUtils.java",
+      "android/java/src/org/chromium/base/ThrowUncaughtException.java",
+      "android/java/src/org/chromium/base/TimeUtils.java",
+      "android/java/src/org/chromium/base/TimezoneUtils.java",
+      "android/java/src/org/chromium/base/TraceEvent.java",
+      "android/java/src/org/chromium/base/UnguessableToken.java",
+      "android/java/src/org/chromium/base/VisibleForTesting.java",
+      "android/java/src/org/chromium/base/annotations/AccessedByNative.java",
+      "android/java/src/org/chromium/base/annotations/CalledByNative.java",
+      "android/java/src/org/chromium/base/annotations/CalledByNativeUnchecked.java",
+      "android/java/src/org/chromium/base/annotations/JNIAdditionalImport.java",
+      "android/java/src/org/chromium/base/annotations/JNINamespace.java",
+      "android/java/src/org/chromium/base/annotations/MainDex.java",
+      "android/java/src/org/chromium/base/annotations/NativeCall.java",
+      "android/java/src/org/chromium/base/annotations/NativeClassQualifiedName.java",
+      "android/java/src/org/chromium/base/annotations/RemovableInRelease.java",
+      "android/java/src/org/chromium/base/annotations/UsedByReflection.java",
+      "android/java/src/org/chromium/base/library_loader/LegacyLinker.java",
+      "android/java/src/org/chromium/base/library_loader/LibraryLoader.java",
+      "android/java/src/org/chromium/base/library_loader/Linker.java",
+      "android/java/src/org/chromium/base/library_loader/LoaderErrors.java",
+      "android/java/src/org/chromium/base/library_loader/NativeLibraryPreloader.java",
+      "android/java/src/org/chromium/base/library_loader/ProcessInitException.java",
+      "android/java/src/org/chromium/base/metrics/CachedMetrics.java",
+      "android/java/src/org/chromium/base/metrics/RecordHistogram.java",
+      "android/java/src/org/chromium/base/metrics/RecordUserAction.java",
+      "android/java/src/org/chromium/base/metrics/StatisticsRecorderAndroid.java",
+      "android/java/src/org/chromium/base/multidex/ChromiumMultiDexInstaller.java",
+      "android/java/src/org/chromium/base/process_launcher/ChildConnectionAllocator.java",
+      "android/java/src/org/chromium/base/process_launcher/ChildProcessConnection.java",
+      "android/java/src/org/chromium/base/process_launcher/ChildProcessConstants.java",
+      "android/java/src/org/chromium/base/process_launcher/ChildProcessLauncher.java",
+      "android/java/src/org/chromium/base/process_launcher/ChildProcessService.java",
+      "android/java/src/org/chromium/base/process_launcher/ChildProcessServiceDelegate.java",
+      "android/java/src/org/chromium/base/process_launcher/FileDescriptorInfo.java",
+      "android/java/src/org/chromium/base/memory/MemoryPressureMonitor.java",
+      "android/java/src/org/chromium/base/memory/MemoryPressureCallback.java",
+      "android/java/src/org/chromium/base/memory/MemoryPressureUma.java",
+    ]
+
+    # New versions of BuildConfig.java and NativeLibraries.java
+    # (with the actual correct values) will be created when creating an apk.
+    jar_excluded_patterns = [
+      "*/BuildConfig.class",
+      "*/NativeLibraries.class",
+      "*/NativeLibraries##*.class",
+    ]
+  }
+
+  android_aidl("base_java_aidl") {
+    import_include = [ "android/java/src" ]
+    sources = [
+      "android/java/src/org/chromium/base/process_launcher/ICallbackInt.aidl",
+      "android/java/src/org/chromium/base/process_launcher/IChildProcessService.aidl",
+    ]
+  }
+
+  android_library("base_javatests") {
+    testonly = true
+    deps = [
+      ":base_java",
+      ":base_java_test_support",
+      "//third_party/android_support_test_runner:runner_java",
+      "//third_party/junit:junit",
+    ]
+    java_files = [
+      "android/javatests/src/org/chromium/base/AdvancedMockContextTest.java",
+      "android/javatests/src/org/chromium/base/ApiCompatibilityUtilsTest.java",
+      "android/javatests/src/org/chromium/base/CommandLineInitUtilTest.java",
+      "android/javatests/src/org/chromium/base/CommandLineTest.java",
+      "android/javatests/src/org/chromium/base/EarlyTraceEventTest.java",
+
+      # TODO(nona): move to Junit once that is built for Android N.
+      "android/javatests/src/org/chromium/base/LocaleUtilsTest.java",
+      "android/javatests/src/org/chromium/base/ObserverListTest.java",
+      "android/javatests/src/org/chromium/base/StrictModeContextTest.java",
+      "android/javatests/src/org/chromium/base/metrics/RecordHistogramTest.java",
+    ]
+  }
+
+  android_library("base_java_test_support") {
+    testonly = true
+    deps = [
+      ":base_java",
+      "//testing/android/reporter:reporter_java",
+      "//third_party/android_support_test_runner:exposed_instrumentation_api_publish_java",
+      "//third_party/android_support_test_runner:rules_java",
+      "//third_party/android_support_test_runner:runner_java",
+      "//third_party/android_tools:android_support_annotations_java",
+      "//third_party/android_tools:android_support_chromium_java",
+      "//third_party/android_tools:android_support_compat_java",
+      "//third_party/hamcrest:hamcrest_core_java",
+      "//third_party/junit",
+      "//third_party/ub-uiautomator:ub_uiautomator_java",
+    ]
+
+    deps += android_extra_test_deps
+
+    java_files = [
+      "test/android/javatests/src/org/chromium/base/test/BaseJUnit4ClassRunner.java",
+      "test/android/javatests/src/org/chromium/base/test/BaseChromiumAndroidJUnitRunner.java",
+      "test/android/javatests/src/org/chromium/base/test/BaseChromiumRunnerCommon.java",
+      "test/android/javatests/src/org/chromium/base/test/BaseTestResult.java",
+      "test/android/javatests/src/org/chromium/base/test/ScreenshotOnFailureStatement.java",
+      "test/android/javatests/src/org/chromium/base/test/SetUpTestRule.java",
+      "test/android/javatests/src/org/chromium/base/test/SetUpStatement.java",
+      "test/android/javatests/src/org/chromium/base/test/TestListInstrumentationRunListener.java",
+      "test/android/javatests/src/org/chromium/base/test/TestTraceEvent.java",
+      "test/android/javatests/src/org/chromium/base/test/params/ParameterizedRunner.java",
+      "test/android/javatests/src/org/chromium/base/test/params/BlockJUnit4RunnerDelegate.java",
+      "test/android/javatests/src/org/chromium/base/test/params/BaseJUnit4RunnerDelegate.java",
+      "test/android/javatests/src/org/chromium/base/test/params/MethodParamAnnotationRule.java",
+      "test/android/javatests/src/org/chromium/base/test/params/MethodParamRule.java",
+      "test/android/javatests/src/org/chromium/base/test/params/ParameterAnnotations.java",
+      "test/android/javatests/src/org/chromium/base/test/params/ParameterizedFrameworkMethod.java",
+      "test/android/javatests/src/org/chromium/base/test/params/ParameterizedRunnerDelegate.java",
+      "test/android/javatests/src/org/chromium/base/test/params/ParameterizedRunnerDelegateCommon.java",
+      "test/android/javatests/src/org/chromium/base/test/params/ParameterizedRunnerDelegateFactory.java",
+      "test/android/javatests/src/org/chromium/base/test/params/ParameterProvider.java",
+      "test/android/javatests/src/org/chromium/base/test/params/ParameterSet.java",
+      "test/android/javatests/src/org/chromium/base/test/util/AdvancedMockContext.java",
+      "test/android/javatests/src/org/chromium/base/test/util/AnnotationRule.java",
+      "test/android/javatests/src/org/chromium/base/test/util/CallbackHelper.java",
+      "test/android/javatests/src/org/chromium/base/test/util/CommandLineFlags.java",
+      "test/android/javatests/src/org/chromium/base/test/util/DisableIf.java",
+      "test/android/javatests/src/org/chromium/base/test/util/DisableIfSkipCheck.java",
+      "test/android/javatests/src/org/chromium/base/test/util/AnnotationProcessingUtils.java",
+      "test/android/javatests/src/org/chromium/base/test/util/DisabledTest.java",
+      "test/android/javatests/src/org/chromium/base/test/util/EnormousTest.java",
+      "test/android/javatests/src/org/chromium/base/test/util/Feature.java",
+      "test/android/javatests/src/org/chromium/base/test/util/FlakyTest.java",
+      "test/android/javatests/src/org/chromium/base/test/util/InMemorySharedPreferences.java",
+      "test/android/javatests/src/org/chromium/base/test/util/InstrumentationUtils.java",
+      "test/android/javatests/src/org/chromium/base/test/util/IntegrationTest.java",
+      "test/android/javatests/src/org/chromium/base/test/util/Manual.java",
+      "test/android/javatests/src/org/chromium/base/test/util/ManualSkipCheck.java",
+      "test/android/javatests/src/org/chromium/base/test/util/Matchers.java",
+      "test/android/javatests/src/org/chromium/base/test/util/MetricsUtils.java",
+      "test/android/javatests/src/org/chromium/base/test/util/MinAndroidSdkLevel.java",
+      "test/android/javatests/src/org/chromium/base/test/util/MinAndroidSdkLevelSkipCheck.java",
+      "test/android/javatests/src/org/chromium/base/test/util/Restriction.java",
+      "test/android/javatests/src/org/chromium/base/test/util/RestrictionSkipCheck.java",
+      "test/android/javatests/src/org/chromium/base/test/util/RetryOnFailure.java",
+      "test/android/javatests/src/org/chromium/base/test/util/ScalableTimeout.java",
+      "test/android/javatests/src/org/chromium/base/test/util/SkipCheck.java",
+      "test/android/javatests/src/org/chromium/base/test/util/TestFileUtil.java",
+      "test/android/javatests/src/org/chromium/base/test/util/TestThread.java",
+      "test/android/javatests/src/org/chromium/base/test/util/TimeoutScale.java",
+      "test/android/javatests/src/org/chromium/base/test/util/UserActionTester.java",
+      "test/android/javatests/src/org/chromium/base/test/util/UrlUtils.java",
+      "test/android/javatests/src/org/chromium/base/test/util/parameter/CommandLineParameter.java",
+      "test/android/javatests/src/org/chromium/base/test/util/parameter/SkipCommandLineParameterization.java",
+    ]
+  }
+
+  android_library("base_java_process_launcher_test_support") {
+    testonly = true
+    deps = [
+      ":base_java",
+      ":base_java_test_support",
+    ]
+    java_files = [ "test/android/javatests/src/org/chromium/base/test/TestChildProcessConnection.java" ]
+  }
+
+  android_library("base_junit_test_support") {
+    # Plaform checks are broken for Robolectric.
+    bypass_platform_checks = true
+    testonly = true
+    java_files = [
+      "android/junit/src/org/chromium/base/metrics/test/ShadowRecordHistogram.java",
+      "test/android/junit/src/org/chromium/base/test/BaseRobolectricTestRunner.java",
+    ]
+    deps = [
+      ":base_java",
+      "//testing/android/junit:junit_test_support",
+      "//third_party/robolectric:robolectric_all_java",
+    ]
+  }
+
+  junit_binary("base_junit_tests") {
+    java_files = [
+      "android/junit/src/org/chromium/base/ApplicationStatusTest.java",
+      "android/junit/src/org/chromium/base/DiscardableReferencePoolTest.java",
+      "android/junit/src/org/chromium/base/LogTest.java",
+      "android/junit/src/org/chromium/base/NonThreadSafeTest.java",
+      "android/junit/src/org/chromium/base/PromiseTest.java",
+      "android/junit/src/org/chromium/base/memory/MemoryPressureMonitorTest.java",
+      "android/junit/src/org/chromium/base/process_launcher/ChildConnectionAllocatorTest.java",
+      "android/junit/src/org/chromium/base/process_launcher/ChildProcessConnectionTest.java",
+      "test/android/junit/src/org/chromium/base/test/SetUpStatementTest.java",
+      "test/android/junit/src/org/chromium/base/test/TestListInstrumentationRunListenerTest.java",
+      "test/android/junit/src/org/chromium/base/test/util/AnnotationProcessingUtilsTest.java",
+      "test/android/junit/src/org/chromium/base/test/util/DisableIfTest.java",
+      "test/android/junit/src/org/chromium/base/test/util/ManualSkipCheckTest.java",
+      "test/android/junit/src/org/chromium/base/test/util/MinAndroidSdkLevelSkipCheckTest.java",
+      "test/android/junit/src/org/chromium/base/test/util/RestrictionSkipCheckTest.java",
+      "test/android/junit/src/org/chromium/base/test/util/SkipCheckTest.java",
+      "test/android/junit/src/org/chromium/base/test/params/ExampleParameterizedTest.java",
+      "test/android/junit/src/org/chromium/base/test/params/ParameterizedRunnerTest.java",
+      "test/android/junit/src/org/chromium/base/test/params/ParameterizedRunnerDelegateCommonTest.java",
+      "test/android/junit/src/org/chromium/base/test/params/ParameterizedRunnerDelegateFactoryTest.java",
+      "test/android/junit/src/org/chromium/base/test/params/ParameterizedTestNameTest.java",
+    ]
+    deps = [
+      ":base_java",
+      ":base_java_process_launcher_test_support",
+      ":base_java_test_support",
+      ":base_junit_test_support",
+      "//third_party/hamcrest:hamcrest_java",
+    ]
+  }
+
+  java_cpp_enum("base_android_java_enums_srcjar") {
+    sources = [
+      "android/application_status_listener.h",
+      "android/library_loader/library_load_from_apk_status_codes.h",
+      "android/library_loader/library_loader_hooks.h",
+      "memory/memory_pressure_listener.h",
+      "metrics/histogram_base.h",
+      "trace_event/trace_config.h",
+    ]
+  }
+
+  generate_build_config_srcjar("base_build_config_gen") {
+    use_final_fields = false
+  }
+
+  java_cpp_template("base_native_libraries_gen") {
+    sources = [
+      "android/java/templates/NativeLibraries.template",
+    ]
+    package_path = "org/chromium/base/library_loader"
+  }
+
+  android_library("base_java_unittest_support") {
+    testonly = true
+    deps = [
+      ":base_java",
+    ]
+    java_files = [
+      "test/android/java/src/org/chromium/base/ContentUriTestUtils.java",
+      "test/android/java/src/org/chromium/base/JavaHandlerThreadHelpers.java",
+    ]
+  }
+}
+
+# Keep the list of fuzzer_tests in alphabetical order.
+fuzzer_test("base64_decode_fuzzer") {
+  sources = [
+    "base64_decode_fuzzer.cc",
+  ]
+  deps = [
+    "//base",
+  ]
+}
+
+fuzzer_test("base64_encode_fuzzer") {
+  sources = [
+    "base64_encode_fuzzer.cc",
+  ]
+  deps = [
+    "//base",
+  ]
+}
+
+fuzzer_test("base_json_correctness_fuzzer") {
+  sources = [
+    "json/json_correctness_fuzzer.cc",
+  ]
+  deps = [
+    ":base",
+  ]
+  dict = "//testing/libfuzzer/fuzzers/dicts/json.dict"
+}
+
+fuzzer_test("base_json_reader_fuzzer") {
+  sources = [
+    "json/json_reader_fuzzer.cc",
+  ]
+  deps = [
+    "//base",
+  ]
+  dict = "//testing/libfuzzer/fuzzers/dicts/json.dict"
+}
+
+fuzzer_test("base_json_string_escape_fuzzer") {
+  sources = [
+    "json/string_escape_fuzzer.cc",
+  ]
+  deps = [
+    "//base",
+  ]
+}
+
+fuzzer_test("string_number_conversions_fuzzer") {
+  sources = [
+    "strings/string_number_conversions_fuzzer.cc",
+  ]
+  deps = [
+    "//base",
+  ]
+}
+
+fuzzer_test("string_tokenizer_fuzzer") {
+  sources = [
+    "strings/string_tokenizer_fuzzer.cc",
+  ]
+  deps = [
+    "//base",
+  ]
+}
+
+fuzzer_test("utf_string_conversions_fuzzer") {
+  sources = [
+    "strings/utf_string_conversions_fuzzer.cc",
+  ]
+  deps = [
+    "//base",
+  ]
+}
+
+# TODO(dyaroshev): remove regression fuzzer, after we run it for a few days
+#                  and are confident that the transition was ok.
+fuzzer_test("utf_string_conversions_regression_fuzzer") {
+  sources = [
+    "strings/old_utf_string_conversions.cc",
+    "strings/old_utf_string_conversions.h",
+    "strings/utf_string_conversions_regression_fuzzer.cc",
+  ]
+  deps = [
+    ":base",
+  ]
+
+  libfuzzer_options = [ "max_len=32" ]
+}
diff --git a/base/DEPS b/base/DEPS
new file mode 100644
index 0000000..4b25f3f
--- /dev/null
+++ b/base/DEPS
@@ -0,0 +1,16 @@
+include_rules = [
+  "+jni",
+  "+third_party/ashmem",
+  "+third_party/apple_apsl",
+  "+third_party/ced",
+  "+third_party/lss",
+  "+third_party/modp_b64",
+  "+third_party/tcmalloc",
+
+  # These are implicitly brought in from the root, and we don't want them.
+  "-ipc",
+  "-url",
+
+  # ICU dependendencies must be separate from the rest of base.
+  "-i18n",
+]
diff --git a/base/OWNERS b/base/OWNERS
new file mode 100644
index 0000000..21d1970
--- /dev/null
+++ b/base/OWNERS
@@ -0,0 +1,52 @@
+# About src/base:
+#
+# Chromium is a very mature project, most things that are generally useful are
+# already here, and that things not here aren't generally useful.
+#
+# Base is pulled into many projects. For example, various ChromeOS daemons. So
+# the bar for adding stuff is that it must have demonstrated wide
+# applicability. Prefer to add things closer to where they're used (i.e. "not
+# base"), and pull into base only when needed.  In a project our size,
+# sometimes even duplication is OK and inevitable.
+#
+# Adding a new logging macro DPVELOG_NE is not more clear than just
+# writing the stuff you want to log in a regular logging statement, even
+# if it makes your calling code longer. Just add it to your own code.
+#
+# If the code in question does not need to be used inside base, but will have
+# multiple consumers across the codebase, consider placing it in a new directory
+# under components/ instead.
+
+danakj@chromium.org
+dcheng@chromium.org
+gab@chromium.org
+mark@chromium.org
+thakis@chromium.org
+thestig@chromium.org
+
+# For Bind/Callback:
+per-file bind*=tzik@chromium.org
+per-file callback*=tzik@chromium.org
+
+# For Android-specific changes:
+per-file *android*=file://base/android/OWNERS
+per-file BUILD.gn=file://base/android/OWNERS
+
+# For Fuchsia-specific changes:
+per-file *_fuchsia*=file://build/fuchsia/OWNERS
+
+# For FeatureList API:
+per-file feature_list*=asvitkine@chromium.org
+per-file feature_list*=isherman@chromium.org
+
+# Restricted since rand_util.h also backs the cryptographically secure RNG.
+per-file rand_util*=set noparent
+per-file rand_util*=file://ipc/SECURITY_OWNERS
+
+# For TCMalloc tests:
+per-file security_unittest.cc=jln@chromium.org
+
+# For Value:
+per-file values*=jdoerrie@chromium.org
+
+# COMPONENT: Internals>Core
diff --git a/base/PRESUBMIT.py b/base/PRESUBMIT.py
new file mode 100644
index 0000000..7fc8107
--- /dev/null
+++ b/base/PRESUBMIT.py
@@ -0,0 +1,49 @@
+# Copyright (c) 2012 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Chromium presubmit script for src/base.
+
+See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts
+for more details on the presubmit API built into depot_tools.
+"""
+
+def _CheckNoInterfacesInBase(input_api, output_api):
+  """Checks to make sure no files in libbase.a have |@interface|."""
+  pattern = input_api.re.compile(r'^\s*@interface', input_api.re.MULTILINE)
+  files = []
+  for f in input_api.AffectedSourceFiles(input_api.FilterSourceFile):
+    if (f.LocalPath().startswith('base/') and
+        not "/ios/" in f.LocalPath() and
+        not "/test/" in f.LocalPath() and
+        not f.LocalPath().endswith('_unittest.mm') and
+        not f.LocalPath().endswith('mac/sdk_forward_declarations.h')):
+      contents = input_api.ReadFile(f)
+      if pattern.search(contents):
+        files.append(f)
+
+  if len(files):
+    return [ output_api.PresubmitError(
+        'Objective-C interfaces or categories are forbidden in libbase. ' +
+        'See http://groups.google.com/a/chromium.org/group/chromium-dev/' +
+        'browse_thread/thread/efb28c10435987fd',
+        files) ]
+  return []
+
+
+def _CommonChecks(input_api, output_api):
+  """Checks common to both upload and commit."""
+  results = []
+  results.extend(_CheckNoInterfacesInBase(input_api, output_api))
+  return results
+
+def CheckChangeOnUpload(input_api, output_api):
+  results = []
+  results.extend(_CommonChecks(input_api, output_api))
+  return results
+
+
+def CheckChangeOnCommit(input_api, output_api):
+  results = []
+  results.extend(_CommonChecks(input_api, output_api))
+  return results
diff --git a/base/allocator/BUILD.gn b/base/allocator/BUILD.gn
new file mode 100644
index 0000000..636a342
--- /dev/null
+++ b/base/allocator/BUILD.gn
@@ -0,0 +1,284 @@
+# Copyright (c) 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import("//build/buildflag_header.gni")
+import("//build/config/allocator.gni")
+import("//build/config/compiler/compiler.gni")
+
+declare_args() {
+  # Provide a way to force disable debugallocation in Debug builds,
+  # e.g. for profiling (it's more rare to profile Debug builds,
+  # but people sometimes need to do that).
+  enable_debugallocation = is_debug
+}
+
+# This "allocator" meta-target will forward to the default allocator according
+# to the build settings.
+group("allocator") {
+  public_deps = []
+  deps = []
+
+  if (use_allocator == "tcmalloc") {
+    deps += [ ":tcmalloc" ]
+  }
+}
+
+config("tcmalloc_flags") {
+  defines = []
+  if (enable_debugallocation) {
+    defines += [
+      # Use debugallocation for Debug builds to catch problems early
+      # and cleanly, http://crbug.com/30715 .
+      "TCMALLOC_FOR_DEBUGALLOCATION",
+    ]
+  }
+  if (use_allocator_shim) {
+    defines += [ "TCMALLOC_DONT_REPLACE_SYSTEM_ALLOC" ]
+  }
+  if (is_clang) {
+    cflags = [
+      # tcmalloc initializes some fields in the wrong order.
+      "-Wno-reorder",
+
+      # tcmalloc contains some unused local template specializations.
+      "-Wno-unused-function",
+
+      # tcmalloc uses COMPILE_ASSERT without static_assert but with typedefs.
+      "-Wno-unused-local-typedefs",
+
+      # for magic2_ in debugallocation.cc (only built in Debug builds) typedefs.
+      "-Wno-unused-private-field",
+    ]
+  } else {
+    cflags = []
+  }
+
+  if (is_linux || is_android) {
+    # We enable all warnings by default, but upstream disables a few.
+    # Keep "-Wno-*" flags in sync with upstream by comparing against:
+    # http://code.google.com/p/google-perftools/source/browse/trunk/Makefile.am
+    cflags += [
+      "-Wno-sign-compare",
+      "-Wno-unused-result",
+    ]
+  }
+}
+
+if (use_allocator == "tcmalloc") {
+  # tcmalloc currently won't compile on Android.
+  source_set("tcmalloc") {
+    tcmalloc_dir = "//third_party/tcmalloc/chromium"
+
+    # Don't check tcmalloc's includes. These files include various files like
+    # base/foo.h and they actually refer to tcmalloc's forked copy of base
+    # rather than the regular one, which confuses the header checker.
+    check_includes = false
+
+    sources = [
+      # Generated for our configuration from tcmalloc's build
+      # and checked in.
+      "$tcmalloc_dir/src/config.h",
+      "$tcmalloc_dir/src/config_android.h",
+      "$tcmalloc_dir/src/config_linux.h",
+      "$tcmalloc_dir/src/config_win.h",
+
+      # tcmalloc native and forked files.
+      "$tcmalloc_dir/src/base/abort.cc",
+      "$tcmalloc_dir/src/base/abort.h",
+      "$tcmalloc_dir/src/base/arm_instruction_set_select.h",
+      "$tcmalloc_dir/src/base/atomicops-internals-arm-generic.h",
+      "$tcmalloc_dir/src/base/atomicops-internals-arm-v6plus.h",
+      "$tcmalloc_dir/src/base/atomicops-internals-linuxppc.h",
+      "$tcmalloc_dir/src/base/atomicops-internals-macosx.h",
+      "$tcmalloc_dir/src/base/atomicops-internals-windows.h",
+      "$tcmalloc_dir/src/base/atomicops-internals-x86.cc",
+      "$tcmalloc_dir/src/base/atomicops-internals-x86.h",
+      "$tcmalloc_dir/src/base/atomicops.h",
+      "$tcmalloc_dir/src/base/commandlineflags.h",
+      "$tcmalloc_dir/src/base/cycleclock.h",
+
+      # We don't list dynamic_annotations.c since its copy is already
+      # present in the dynamic_annotations target.
+      "$tcmalloc_dir/src/base/elf_mem_image.cc",
+      "$tcmalloc_dir/src/base/elf_mem_image.h",
+      "$tcmalloc_dir/src/base/linuxthreads.cc",
+      "$tcmalloc_dir/src/base/linuxthreads.h",
+      "$tcmalloc_dir/src/base/logging.cc",
+      "$tcmalloc_dir/src/base/logging.h",
+      "$tcmalloc_dir/src/base/low_level_alloc.cc",
+      "$tcmalloc_dir/src/base/low_level_alloc.h",
+      "$tcmalloc_dir/src/base/spinlock.cc",
+      "$tcmalloc_dir/src/base/spinlock.h",
+      "$tcmalloc_dir/src/base/spinlock_internal.cc",
+      "$tcmalloc_dir/src/base/spinlock_internal.h",
+      "$tcmalloc_dir/src/base/synchronization_profiling.h",
+      "$tcmalloc_dir/src/base/sysinfo.cc",
+      "$tcmalloc_dir/src/base/sysinfo.h",
+      "$tcmalloc_dir/src/base/vdso_support.cc",
+      "$tcmalloc_dir/src/base/vdso_support.h",
+      "$tcmalloc_dir/src/central_freelist.cc",
+      "$tcmalloc_dir/src/central_freelist.h",
+      "$tcmalloc_dir/src/common.cc",
+      "$tcmalloc_dir/src/common.h",
+
+      # #included by debugallocation_shim.cc
+      #"$tcmalloc_dir/src/debugallocation.cc",
+      "$tcmalloc_dir/src/free_list.cc",
+      "$tcmalloc_dir/src/free_list.h",
+      "$tcmalloc_dir/src/gperftools/heap-profiler.h",
+      "$tcmalloc_dir/src/gperftools/malloc_extension.h",
+      "$tcmalloc_dir/src/gperftools/malloc_hook.h",
+      "$tcmalloc_dir/src/gperftools/stacktrace.h",
+      "$tcmalloc_dir/src/heap-profile-table.cc",
+      "$tcmalloc_dir/src/heap-profile-table.h",
+      "$tcmalloc_dir/src/heap-profiler.cc",
+      "$tcmalloc_dir/src/internal_logging.cc",
+      "$tcmalloc_dir/src/internal_logging.h",
+      "$tcmalloc_dir/src/linked_list.h",
+      "$tcmalloc_dir/src/malloc_extension.cc",
+      "$tcmalloc_dir/src/malloc_hook-inl.h",
+      "$tcmalloc_dir/src/malloc_hook.cc",
+      "$tcmalloc_dir/src/maybe_threads.cc",
+      "$tcmalloc_dir/src/maybe_threads.h",
+      "$tcmalloc_dir/src/memory_region_map.cc",
+      "$tcmalloc_dir/src/memory_region_map.h",
+      "$tcmalloc_dir/src/page_heap.cc",
+      "$tcmalloc_dir/src/page_heap.h",
+      "$tcmalloc_dir/src/raw_printer.cc",
+      "$tcmalloc_dir/src/raw_printer.h",
+      "$tcmalloc_dir/src/sampler.cc",
+      "$tcmalloc_dir/src/sampler.h",
+      "$tcmalloc_dir/src/span.cc",
+      "$tcmalloc_dir/src/span.h",
+      "$tcmalloc_dir/src/stack_trace_table.cc",
+      "$tcmalloc_dir/src/stack_trace_table.h",
+      "$tcmalloc_dir/src/stacktrace.cc",
+      "$tcmalloc_dir/src/static_vars.cc",
+      "$tcmalloc_dir/src/static_vars.h",
+      "$tcmalloc_dir/src/symbolize.cc",
+      "$tcmalloc_dir/src/symbolize.h",
+      "$tcmalloc_dir/src/system-alloc.cc",
+      "$tcmalloc_dir/src/system-alloc.h",
+
+      # #included by debugallocation_shim.cc
+      #"$tcmalloc_dir/src/tcmalloc.cc",
+      #"$tcmalloc_dir/src/tcmalloc.h",
+      "$tcmalloc_dir/src/thread_cache.cc",
+      "$tcmalloc_dir/src/thread_cache.h",
+      "$tcmalloc_dir/src/windows/port.cc",
+      "$tcmalloc_dir/src/windows/port.h",
+      "debugallocation_shim.cc",
+
+      # These are both #included by allocator_shim for maximal linking.
+      #"generic_allocators.cc",
+      #"win_allocator.cc",
+    ]
+
+    # Not included on mips64el.
+    if (current_cpu == "mips64el") {
+      sources -= [
+        "$tcmalloc_dir/src/base/linuxthreads.cc",
+        "$tcmalloc_dir/src/base/linuxthreads.h",
+      ]
+    }
+
+    # Disable the heap checker in tcmalloc.
+    defines = [ "NO_HEAP_CHECK" ]
+
+    include_dirs = [
+      ".",
+      "$tcmalloc_dir/src/base",
+      "$tcmalloc_dir/src",
+    ]
+
+    configs -= [ "//build/config/compiler:chromium_code" ]
+    configs += [
+      "//build/config/compiler:no_chromium_code",
+      ":tcmalloc_flags",
+    ]
+
+    # Thumb mode disabled due to bug in clang integrated assembler
+    # TODO(https://llvm.org/bugs/show_bug.cgi?id=31058)
+    configs -= [ "//build/config/compiler:compiler_arm_thumb" ]
+    configs += [ "//build/config/compiler:compiler_arm" ]
+
+    # TODO(crbug.com/633719) Make tcmalloc work with AFDO on GCC if possible.
+    if (!is_clang) {
+      configs -= [ "//build/config/compiler:afdo" ]
+    }
+
+    deps = []
+
+    if (enable_profiling) {
+      sources += [
+        "$tcmalloc_dir/src/base/thread_lister.c",
+        "$tcmalloc_dir/src/base/thread_lister.h",
+        "$tcmalloc_dir/src/profile-handler.cc",
+        "$tcmalloc_dir/src/profile-handler.h",
+        "$tcmalloc_dir/src/profiledata.cc",
+        "$tcmalloc_dir/src/profiledata.h",
+        "$tcmalloc_dir/src/profiler.cc",
+      ]
+      defines += [ "ENABLE_PROFILING=1" ]
+    }
+
+    if (is_linux || is_android) {
+      sources -= [
+        "$tcmalloc_dir/src/system-alloc.h",
+        "$tcmalloc_dir/src/windows/port.cc",
+        "$tcmalloc_dir/src/windows/port.h",
+      ]
+
+      # Compiling tcmalloc with -fvisibility=default is only necessary when
+      # not using the allocator shim, which provides the correct visibility
+      # annotations for those symbols which need to be exported (see
+      # //base/allocator/allocator_shim_override_glibc_weak_symbols.h and
+      # //base/allocator/allocator_shim_internals.h for the definition of
+      # SHIM_ALWAYS_EXPORT).
+      if (!use_allocator_shim) {
+        configs -= [ "//build/config/gcc:symbol_visibility_hidden" ]
+        configs += [ "//build/config/gcc:symbol_visibility_default" ]
+      }
+
+      ldflags = [
+        # Don't let linker rip this symbol out, otherwise the heap&cpu
+        # profilers will not initialize properly on startup.
+        "-Wl,-uIsHeapProfilerRunning,-uProfilerStart",
+
+        # Do the same for heap leak checker.
+        "-Wl,-u_Z21InitialMallocHook_NewPKvj,-u_Z22InitialMallocHook_MMapPKvS0_jiiix,-u_Z22InitialMallocHook_SbrkPKvi",
+        "-Wl,-u_Z21InitialMallocHook_NewPKvm,-u_Z22InitialMallocHook_MMapPKvS0_miiil,-u_Z22InitialMallocHook_SbrkPKvl",
+        "-Wl,-u_ZN15HeapLeakChecker12IgnoreObjectEPKv,-u_ZN15HeapLeakChecker14UnIgnoreObjectEPKv",
+      ]
+    }
+
+    # Make sure the allocation library is optimized as much as possible when
+    # we"re in release mode.
+    if (!is_debug) {
+      configs -= [ "//build/config/compiler:default_optimization" ]
+      configs += [ "//build/config/compiler:optimize_max" ]
+    }
+
+    deps += [ "//base/third_party/dynamic_annotations" ]
+  }
+}  # use_allocator == "tcmalloc"
+
+buildflag_header("buildflags") {
+  header = "buildflags.h"
+  flags = [ "USE_ALLOCATOR_SHIM=$use_allocator_shim" ]
+}
+
+# Used to shim malloc symbols on Android. see //base/allocator/README.md.
+config("wrap_malloc_symbols") {
+  ldflags = [
+    "-Wl,-wrap,calloc",
+    "-Wl,-wrap,free",
+    "-Wl,-wrap,malloc",
+    "-Wl,-wrap,memalign",
+    "-Wl,-wrap,posix_memalign",
+    "-Wl,-wrap,pvalloc",
+    "-Wl,-wrap,realloc",
+    "-Wl,-wrap,valloc",
+  ]
+}
diff --git a/base/allocator/OWNERS b/base/allocator/OWNERS
new file mode 100644
index 0000000..de658d0
--- /dev/null
+++ b/base/allocator/OWNERS
@@ -0,0 +1,4 @@
+primiano@chromium.org
+wfh@chromium.org
+
+# COMPONENT: Internals
diff --git a/base/allocator/README.md b/base/allocator/README.md
new file mode 100644
index 0000000..62b9be6
--- /dev/null
+++ b/base/allocator/README.md
@@ -0,0 +1,197 @@
+This document describes how malloc / new calls are routed in the various Chrome
+platforms.
+
+Bare in mind that the chromium codebase does not always just use `malloc()`.
+Some examples:
+ - Large parts of the renderer (Blink) use two home-brewed allocators,
+   PartitionAlloc and BlinkGC (Oilpan).
+ - Some subsystems, such as the V8 JavaScript engine, handle memory management
+   autonomously.
+ - Various parts of the codebase use abstractions such as `SharedMemory` or
+   `DiscardableMemory` which, similarly to the above, have their own page-level
+   memory management.
+
+Background
+----------
+The `allocator` target defines at compile-time the platform-specific choice of
+the allocator and extra-hooks which services calls to malloc/new. The relevant
+build-time flags involved are `use_allocator` and `use_allocator_shim`.
+
+The default choices are as follows:
+
+**Windows**
+`use_allocator: winheap`, the default Windows heap.
+Additionally, `static_library` (i.e. non-component) builds have a shim
+layer wrapping malloc/new, which is controlled by `use_allocator_shim`.
+The shim layer provides extra security features, such as preventing large
+allocations that can hit signed vs. unsigned bugs in third_party code.
+
+**Linux Desktop / CrOS**
+`use_allocator: tcmalloc`, a forked copy of tcmalloc which resides in
+`third_party/tcmalloc/chromium`. Setting `use_allocator: none` causes the build
+to fall back to the system (Glibc) symbols.
+
+**Android**
+`use_allocator: none`, always use the allocator symbols coming from Android's
+libc (Bionic). As it is developed as part of the OS, it is considered to be
+optimized for small devices and more memory-efficient than other choices.
+The actual implementation backing malloc symbols in Bionic is up to the board
+config and can vary (typically *dlmalloc* or *jemalloc* on most Nexus devices).
+
+**Mac/iOS**
+`use_allocator: none`, we always use the system's allocator implementation.
+
+In addition, when building for `asan` / `msan` both the allocator and the shim
+layer are disabled.
+
+Layering and build deps
+-----------------------
+The `allocator` target provides both the source files for tcmalloc (where
+applicable) and the linker flags required for the Windows shim layer.
+The `base` target is (almost) the only one depending on `allocator`. No other
+targets should depend on it, with the exception of the very few executables /
+dynamic libraries that don't depend, either directly or indirectly, on `base`
+within the scope of a linker unit.
+
+More importantly, **no other place outside of `/base` should depend on the
+specific allocator** (e.g., directly include `third_party/tcmalloc`).
+If such a functional dependency is required that should be achieved using
+abstractions in `base` (see `/base/allocator/allocator_extension.h` and
+`/base/memory/`)
+
+**Why `base` depends on `allocator`?**
+Because it needs to provide services that depend on the actual allocator
+implementation. In the past `base` used to pretend to be allocator-agnostic
+and get the dependencies injected by other layers. This ended up being an
+inconsistent mess.
+See the [allocator cleanup doc][url-allocator-cleanup] for more context.
+
+Linker unit targets (executables and shared libraries) that depend in some way
+on `base` (most of the targets in the codebase) get automatically the correct
+set of linker flags to pull in tcmalloc or the Windows shim-layer.
+
+
+Source code
+-----------
+This directory contains just the allocator (i.e. shim) layer that switches
+between the different underlying memory allocation implementations.
+
+The tcmalloc library originates outside of Chromium and exists in
+`../../third_party/tcmalloc` (currently, the actual location is defined in the
+allocator.gyp file). The third party sources use a vendor-branch SCM pattern to
+track Chromium-specific changes independently from upstream changes.
+
+The general intent is to push local changes upstream so that over
+time we no longer need any forked files.
+
+
+Unified allocator shim
+----------------------
+On most platforms, Chrome overrides the malloc / operator new symbols (and
+corresponding free / delete and other variants). This is to enforce security
+checks and lately to enable the
+[memory-infra heap profiler][url-memory-infra-heap-profiler].
+Historically each platform had its special logic for defining the allocator
+symbols in different places of the codebase. The unified allocator shim is
+a project aimed to unify the symbol definition and allocator routing logic in
+a central place.
+
+ - Full documentation: [Allocator shim design doc][url-allocator-shim].
+ - Current state: Available and enabled by default on Android, CrOS, Linux,
+   Mac OS and Windows.
+ - Tracking bug: [https://crbug.com/550886][crbug.com/550886].
+ - Build-time flag: `use_allocator_shim`.
+
+**Overview of the unified allocator shim**
+The allocator shim consists of three stages:
+```
++-------------------------+    +-----------------------+    +----------------+
+|     malloc & friends    | -> |       shim layer      | -> |   Routing to   |
+|    symbols definition   |    |     implementation    |    |    allocator   |
++-------------------------+    +-----------------------+    +----------------+
+| - libc symbols (malloc, |    | - Security checks     |    | - tcmalloc     |
+|   calloc, free, ...)    |    | - Chain of dispatchers|    | - glibc        |
+| - C++ symbols (operator |    |   that can intercept  |    | - Android      |
+|   new, delete, ...)     |    |   and override        |    |   bionic       |
+| - glibc weak symbols    |    |   allocations         |    | - WinHeap      |
+|   (__libc_malloc, ...)  |    +-----------------------+    +----------------+
++-------------------------+
+```
+
+**1. malloc symbols definition**
+This stage takes care of overriding the symbols `malloc`, `free`,
+`operator new`, `operator delete` and friends and routing those calls inside the
+allocator shim (next point).
+This is taken care of by the headers in `allocator_shim_override_*`.
+
+*On Linux/CrOS*: the allocator symbols are defined as exported global symbols
+in `allocator_shim_override_libc_symbols.h` (for `malloc`, `free` and friends)
+and in `allocator_shim_override_cpp_symbols.h` (for `operator new`,
+`operator delete` and friends).
+This enables proper interposition of malloc symbols referenced by the main
+executable and any third party libraries. Symbol resolution on Linux is a breadth first search that starts from the root link unit, that is the executable
+(see EXECUTABLE AND LINKABLE FORMAT (ELF) - Portable Formats Specification).
+Additionally, when tcmalloc is the default allocator, some extra glibc symbols
+are also defined in `allocator_shim_override_glibc_weak_symbols.h`, for subtle
+reasons explained in that file.
+The Linux/CrOS shim was introduced by
+[crrev.com/1675143004](https://crrev.com/1675143004).
+
+*On Android*: load-time symbol interposition (unlike the Linux/CrOS case) is not
+possible. This is because Android processes are `fork()`-ed from the Android
+zygote, which pre-loads libc.so and only later native code gets loaded via
+`dlopen()` (symbols from `dlopen()`-ed libraries get a different resolution
+scope).
+In this case, the approach instead of wrapping symbol resolution at link time
+(i.e. during the build), via the `--Wl,-wrap,malloc` linker flag.
+The use of this wrapping flag causes:
+ - All references to allocator symbols in the Chrome codebase to be rewritten as
+   references to `__wrap_malloc` and friends. The `__wrap_malloc` symbols are
+   defined in the `allocator_shim_override_linker_wrapped_symbols.h` and
+   route allocator calls inside the shim layer.
+ - The reference to the original `malloc` symbols (which typically is defined by
+   the system's libc.so) are accessible via the special `__real_malloc` and
+   friends symbols (which will be relocated, at load time, against `malloc`).
+
+In summary, this approach is transparent to the dynamic loader, which still sees
+undefined symbol references to malloc symbols.
+These symbols will be resolved against libc.so as usual.
+More details in [crrev.com/1719433002](https://crrev.com/1719433002).
+
+**2. Shim layer implementation**
+This stage contains the actual shim implementation. This consists of:
+- A singly linked list of dispatchers (structs with function pointers to `malloc`-like functions). Dispatchers can be dynamically inserted at runtime
+(using the `InsertAllocatorDispatch` API). They can intercept and override
+allocator calls.
+- The security checks (suicide on malloc-failure via `std::new_handler`, etc).
+This happens inside `allocator_shim.cc`
+
+**3. Final allocator routing**
+The final element of the aforementioned dispatcher chain is statically defined
+at build time and ultimately routes the allocator calls to the actual allocator
+(as described in the *Background* section above). This is taken care of by the
+headers in `allocator_shim_default_dispatch_to_*` files.
+
+
+Appendixes
+----------
+**How does the Windows shim layer replace the malloc symbols?**
+The mechanism for hooking LIBCMT in Windows is rather tricky.  The core
+problem is that by default, the Windows library does not declare malloc and
+free as weak symbols.  Because of this, they cannot be overridden.  To work
+around this, we start with the LIBCMT.LIB, and manually remove all allocator
+related functions from it using the visual studio library tool.  Once removed,
+we can now link against the library and provide custom versions of the
+allocator related functionality.
+See the script `preb_libc.py` in this folder.
+
+Related links
+-------------
+- [Unified allocator shim doc - Feb 2016][url-allocator-shim]
+- [Allocator cleanup doc - Jan 2016][url-allocator-cleanup]
+- [Proposal to use PartitionAlloc as default allocator](https://crbug.com/339604)
+- [Memory-Infra: Tools to profile memory usage in Chrome](/docs/memory-infra/README.md)
+
+[url-allocator-cleanup]: https://docs.google.com/document/d/1V77Kgp_4tfaaWPEZVxNevoD02wXiatnAv7Ssgr0hmjg/edit?usp=sharing
+[url-memory-infra-heap-profiler]: /docs/memory-infra/heap_profiler.md
+[url-allocator-shim]: https://docs.google.com/document/d/1yKlO1AO4XjpDad9rjcBOI15EKdAGsuGO_IeZy0g0kxo/edit?usp=sharing
diff --git a/base/allocator/allocator_check.cc b/base/allocator/allocator_check.cc
new file mode 100644
index 0000000..5fb8646
--- /dev/null
+++ b/base/allocator/allocator_check.cc
@@ -0,0 +1,46 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/allocator/allocator_check.h"
+
+#include "base/allocator/buildflags.h"
+#include "build/build_config.h"
+
+#if defined(OS_WIN)
+#include "base/allocator/winheap_stubs_win.h"
+#endif
+
+#if defined(OS_LINUX)
+#include <malloc.h>
+#endif
+
+#if defined(OS_MACOSX)
+#include "base/allocator/allocator_interception_mac.h"
+#endif
+
+namespace base {
+namespace allocator {
+
+bool IsAllocatorInitialized() {
+#if defined(OS_WIN) && BUILDFLAG(USE_ALLOCATOR_SHIM)
+  // Set by allocator_shim_override_ucrt_symbols_win.h when the
+  // shimmed _set_new_mode() is called.
+  return g_is_win_shim_layer_initialized;
+#elif defined(OS_LINUX) && defined(USE_TCMALLOC) && \
+    !defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
+// From third_party/tcmalloc/chromium/src/gperftools/tcmalloc.h.
+// TODO(primiano): replace with an include once base can depend on allocator.
+#define TC_MALLOPT_IS_OVERRIDDEN_BY_TCMALLOC 0xbeef42
+  return (mallopt(TC_MALLOPT_IS_OVERRIDDEN_BY_TCMALLOC, 0) ==
+          TC_MALLOPT_IS_OVERRIDDEN_BY_TCMALLOC);
+#elif defined(OS_MACOSX) && !defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
+  // From allocator_interception_mac.mm.
+  return base::allocator::g_replaced_default_zone;
+#else
+  return true;
+#endif
+}
+
+}  // namespace allocator
+}  // namespace base
diff --git a/base/allocator/allocator_check.h b/base/allocator/allocator_check.h
new file mode 100644
index 0000000..cf519fd
--- /dev/null
+++ b/base/allocator/allocator_check.h
@@ -0,0 +1,18 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_ALLOCATOR_ALLOCATOR_ALLOCATOR_CHECK_H_
+#define BASE_ALLOCATOR_ALLOCATOR_ALLOCATOR_CHECK_H_
+
+#include "base/base_export.h"
+
+namespace base {
+namespace allocator {
+
+BASE_EXPORT bool IsAllocatorInitialized();
+
+}  // namespace allocator
+}  // namespace base
+
+#endif  // BASE_ALLOCATOR_ALLOCATOR_ALLOCATOR_CHECK_H_
diff --git a/base/allocator/allocator_extension.cc b/base/allocator/allocator_extension.cc
new file mode 100644
index 0000000..9a3d114
--- /dev/null
+++ b/base/allocator/allocator_extension.cc
@@ -0,0 +1,60 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/allocator/allocator_extension.h"
+
+#include "base/logging.h"
+
+#if defined(USE_TCMALLOC)
+#include "third_party/tcmalloc/chromium/src/gperftools/heap-profiler.h"
+#include "third_party/tcmalloc/chromium/src/gperftools/malloc_extension.h"
+#include "third_party/tcmalloc/chromium/src/gperftools/malloc_hook.h"
+#endif
+
+namespace base {
+namespace allocator {
+
+void ReleaseFreeMemory() {
+#if defined(USE_TCMALLOC)
+  ::MallocExtension::instance()->ReleaseFreeMemory();
+#endif
+}
+
+bool GetNumericProperty(const char* name, size_t* value) {
+#if defined(USE_TCMALLOC)
+  return ::MallocExtension::instance()->GetNumericProperty(name, value);
+#endif
+  return false;
+}
+
+bool IsHeapProfilerRunning() {
+#if defined(USE_TCMALLOC)
+  return ::IsHeapProfilerRunning();
+#endif
+  return false;
+}
+
+void SetHooks(AllocHookFunc alloc_hook, FreeHookFunc free_hook) {
+// TODO(sque): Use allocator shim layer instead.
+#if defined(USE_TCMALLOC)
+  // Make sure no hooks get overwritten.
+  auto prev_alloc_hook = MallocHook::SetNewHook(alloc_hook);
+  if (alloc_hook)
+    DCHECK(!prev_alloc_hook);
+
+  auto prev_free_hook = MallocHook::SetDeleteHook(free_hook);
+  if (free_hook)
+    DCHECK(!prev_free_hook);
+#endif
+}
+
+int GetCallStack(void** stack, int max_stack_size) {
+#if defined(USE_TCMALLOC)
+  return MallocHook::GetCallerStackTrace(stack, max_stack_size, 0);
+#endif
+  return 0;
+}
+
+}  // namespace allocator
+}  // namespace base
diff --git a/base/allocator/allocator_extension.h b/base/allocator/allocator_extension.h
new file mode 100644
index 0000000..9f2775a
--- /dev/null
+++ b/base/allocator/allocator_extension.h
@@ -0,0 +1,51 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_ALLOCATOR_ALLOCATOR_EXTENSION_H_
+#define BASE_ALLOCATOR_ALLOCATOR_EXTENSION_H_
+
+#include <stddef.h> // for size_t
+
+#include "base/base_export.h"
+#include "build/build_config.h"
+
+namespace base {
+namespace allocator {
+
+// Callback types for alloc and free.
+using AllocHookFunc = void (*)(const void*, size_t);
+using FreeHookFunc = void (*)(const void*);
+
+// Request that the allocator release any free memory it knows about to the
+// system.
+BASE_EXPORT void ReleaseFreeMemory();
+
+// Get the named property's |value|. Returns true if the property is known.
+// Returns false if the property is not a valid property name for the current
+// allocator implementation.
+// |name| or |value| cannot be NULL
+BASE_EXPORT bool GetNumericProperty(const char* name, size_t* value);
+
+BASE_EXPORT bool IsHeapProfilerRunning();
+
+// Register callbacks for alloc and free. Can only store one callback at a time
+// for each of alloc and free.
+BASE_EXPORT void SetHooks(AllocHookFunc alloc_hook, FreeHookFunc free_hook);
+
+// Attempts to unwind the call stack from the current location where this
+// function is being called from. Must be called from a hook function registered
+// by calling SetSingle{Alloc,Free}Hook, directly or indirectly.
+//
+// Arguments:
+//   stack:          pointer to a pre-allocated array of void*'s.
+//   max_stack_size: indicates the size of the array in |stack|.
+//
+// Returns the number of call stack frames stored in |stack|, or 0 if no call
+// stack information is available.
+BASE_EXPORT int GetCallStack(void** stack, int max_stack_size);
+
+}  // namespace allocator
+}  // namespace base
+
+#endif  // BASE_ALLOCATOR_ALLOCATOR_EXTENSION_H_
diff --git a/base/allocator/allocator_interception_mac.h b/base/allocator/allocator_interception_mac.h
new file mode 100644
index 0000000..68f1d53
--- /dev/null
+++ b/base/allocator/allocator_interception_mac.h
@@ -0,0 +1,56 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_ALLOCATOR_ALLOCATOR_INTERCEPTION_MAC_H_
+#define BASE_ALLOCATOR_ALLOCATOR_INTERCEPTION_MAC_H_
+
+#include <stddef.h>
+
+#include "base/base_export.h"
+#include "third_party/apple_apsl/malloc.h"
+
+namespace base {
+namespace allocator {
+
+struct MallocZoneFunctions;
+
+// Saves the function pointers currently used by the default zone.
+void StoreFunctionsForDefaultZone();
+
+// Same as StoreFunctionsForDefaultZone, but for all malloc zones.
+void StoreFunctionsForAllZones();
+
+// For all malloc zones that have been stored, replace their functions with
+// |functions|.
+void ReplaceFunctionsForStoredZones(const MallocZoneFunctions* functions);
+
+extern bool g_replaced_default_zone;
+
+// Calls the original implementation of malloc/calloc prior to interception.
+bool UncheckedMallocMac(size_t size, void** result);
+bool UncheckedCallocMac(size_t num_items, size_t size, void** result);
+
+// Intercepts calls to default and purgeable malloc zones. Intercepts Core
+// Foundation and Objective-C allocations.
+// Has no effect on the default malloc zone if the allocator shim already
+// performs that interception.
+BASE_EXPORT void InterceptAllocationsMac();
+
+// Updates all malloc zones to use their original functions.
+// Also calls ClearAllMallocZonesForTesting.
+BASE_EXPORT void UninterceptMallocZonesForTesting();
+
+// Periodically checks for, and shims new malloc zones. Stops checking after 1
+// minute.
+BASE_EXPORT void PeriodicallyShimNewMallocZones();
+
+// Exposed for testing.
+BASE_EXPORT void ShimNewMallocZones();
+BASE_EXPORT void ReplaceZoneFunctions(ChromeMallocZone* zone,
+                                      const MallocZoneFunctions* functions);
+
+}  // namespace allocator
+}  // namespace base
+
+#endif  // BASE_ALLOCATOR_ALLOCATOR_INTERCEPTION_MAC_H_
diff --git a/base/allocator/allocator_interception_mac.mm b/base/allocator/allocator_interception_mac.mm
new file mode 100644
index 0000000..5020287
--- /dev/null
+++ b/base/allocator/allocator_interception_mac.mm
@@ -0,0 +1,568 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file contains all the logic necessary to intercept allocations on
+// macOS. "malloc zones" are an abstraction that allows the process to intercept
+// all malloc-related functions.  There is no good mechanism [short of
+// interposition] to determine new malloc zones are added, so there's no clean
+// mechanism to intercept all malloc zones. This file contains logic to
+// intercept the default and purgeable zones, which always exist. A cursory
+// review of Chrome seems to imply that non-default zones are almost never used.
+//
+// This file also contains logic to intercept Core Foundation and Objective-C
+// allocations. The implementations forward to the default malloc zone, so the
+// only reason to intercept these calls is to re-label OOM crashes with slightly
+// more details.
+
+#include "base/allocator/allocator_interception_mac.h"
+
+#include <CoreFoundation/CoreFoundation.h>
+#import <Foundation/Foundation.h>
+#include <errno.h>
+#include <mach/mach.h>
+#include <mach/mach_vm.h>
+#import <objc/runtime.h>
+#include <stddef.h>
+
+#include <new>
+
+#include "base/allocator/buildflags.h"
+#include "base/allocator/malloc_zone_functions_mac.h"
+#include "base/bind.h"
+#include "base/logging.h"
+#include "base/mac/mac_util.h"
+#include "base/mac/mach_logging.h"
+#include "base/process/memory.h"
+#include "base/scoped_clear_errno.h"
+#include "base/threading/sequenced_task_runner_handle.h"
+#include "build/build_config.h"
+#include "third_party/apple_apsl/CFBase.h"
+
+namespace base {
+namespace allocator {
+
+bool g_replaced_default_zone = false;
+
+namespace {
+
+bool g_oom_killer_enabled;
+
+// Starting with Mac OS X 10.7, the zone allocators set up by the system are
+// read-only, to prevent them from being overwritten in an attack. However,
+// blindly unprotecting and reprotecting the zone allocators fails with
+// GuardMalloc because GuardMalloc sets up its zone allocator using a block of
+// memory in its bss. Explicit saving/restoring of the protection is required.
+//
+// This function takes a pointer to a malloc zone, de-protects it if necessary,
+// and returns (in the out parameters) a region of memory (if any) to be
+// re-protected when modifications are complete. This approach assumes that
+// there is no contention for the protection of this memory.
+void DeprotectMallocZone(ChromeMallocZone* default_zone,
+                         mach_vm_address_t* reprotection_start,
+                         mach_vm_size_t* reprotection_length,
+                         vm_prot_t* reprotection_value) {
+  mach_port_t unused;
+  *reprotection_start = reinterpret_cast<mach_vm_address_t>(default_zone);
+  struct vm_region_basic_info_64 info;
+  mach_msg_type_number_t count = VM_REGION_BASIC_INFO_COUNT_64;
+  kern_return_t result = mach_vm_region(
+      mach_task_self(), reprotection_start, reprotection_length,
+      VM_REGION_BASIC_INFO_64, reinterpret_cast<vm_region_info_t>(&info),
+      &count, &unused);
+  MACH_CHECK(result == KERN_SUCCESS, result) << "mach_vm_region";
+
+  // The kernel always returns a null object for VM_REGION_BASIC_INFO_64, but
+  // balance it with a deallocate in case this ever changes. See 10.9.2
+  // xnu-2422.90.20/osfmk/vm/vm_map.c vm_map_region.
+  mach_port_deallocate(mach_task_self(), unused);
+
+  // Does the region fully enclose the zone pointers? Possibly unwarranted
+  // simplification used: using the size of a full version 8 malloc zone rather
+  // than the actual smaller size if the passed-in zone is not version 8.
+  CHECK(*reprotection_start <=
+        reinterpret_cast<mach_vm_address_t>(default_zone));
+  mach_vm_size_t zone_offset =
+      reinterpret_cast<mach_vm_size_t>(default_zone) -
+      reinterpret_cast<mach_vm_size_t>(*reprotection_start);
+  CHECK(zone_offset + sizeof(ChromeMallocZone) <= *reprotection_length);
+
+  if (info.protection & VM_PROT_WRITE) {
+    // No change needed; the zone is already writable.
+    *reprotection_start = 0;
+    *reprotection_length = 0;
+    *reprotection_value = VM_PROT_NONE;
+  } else {
+    *reprotection_value = info.protection;
+    result = mach_vm_protect(mach_task_self(), *reprotection_start,
+                             *reprotection_length, false,
+                             info.protection | VM_PROT_WRITE);
+    MACH_CHECK(result == KERN_SUCCESS, result) << "mach_vm_protect";
+  }
+}
+
+#if !defined(ADDRESS_SANITIZER)
+
+MallocZoneFunctions g_old_zone;
+MallocZoneFunctions g_old_purgeable_zone;
+
+void* oom_killer_malloc(struct _malloc_zone_t* zone, size_t size) {
+  void* result = g_old_zone.malloc(zone, size);
+  if (!result && size)
+    TerminateBecauseOutOfMemory(size);
+  return result;
+}
+
+void* oom_killer_calloc(struct _malloc_zone_t* zone,
+                        size_t num_items,
+                        size_t size) {
+  void* result = g_old_zone.calloc(zone, num_items, size);
+  if (!result && num_items && size)
+    TerminateBecauseOutOfMemory(num_items * size);
+  return result;
+}
+
+void* oom_killer_valloc(struct _malloc_zone_t* zone, size_t size) {
+  void* result = g_old_zone.valloc(zone, size);
+  if (!result && size)
+    TerminateBecauseOutOfMemory(size);
+  return result;
+}
+
+void oom_killer_free(struct _malloc_zone_t* zone, void* ptr) {
+  g_old_zone.free(zone, ptr);
+}
+
+void* oom_killer_realloc(struct _malloc_zone_t* zone, void* ptr, size_t size) {
+  void* result = g_old_zone.realloc(zone, ptr, size);
+  if (!result && size)
+    TerminateBecauseOutOfMemory(size);
+  return result;
+}
+
+void* oom_killer_memalign(struct _malloc_zone_t* zone,
+                          size_t alignment,
+                          size_t size) {
+  void* result = g_old_zone.memalign(zone, alignment, size);
+  // Only die if posix_memalign would have returned ENOMEM, since there are
+  // other reasons why NULL might be returned (see
+  // http://opensource.apple.com/source/Libc/Libc-583/gen/malloc.c ).
+  if (!result && size && alignment >= sizeof(void*) &&
+      (alignment & (alignment - 1)) == 0) {
+    TerminateBecauseOutOfMemory(size);
+  }
+  return result;
+}
+
+void* oom_killer_malloc_purgeable(struct _malloc_zone_t* zone, size_t size) {
+  void* result = g_old_purgeable_zone.malloc(zone, size);
+  if (!result && size)
+    TerminateBecauseOutOfMemory(size);
+  return result;
+}
+
+void* oom_killer_calloc_purgeable(struct _malloc_zone_t* zone,
+                                  size_t num_items,
+                                  size_t size) {
+  void* result = g_old_purgeable_zone.calloc(zone, num_items, size);
+  if (!result && num_items && size)
+    TerminateBecauseOutOfMemory(num_items * size);
+  return result;
+}
+
+void* oom_killer_valloc_purgeable(struct _malloc_zone_t* zone, size_t size) {
+  void* result = g_old_purgeable_zone.valloc(zone, size);
+  if (!result && size)
+    TerminateBecauseOutOfMemory(size);
+  return result;
+}
+
+void oom_killer_free_purgeable(struct _malloc_zone_t* zone, void* ptr) {
+  g_old_purgeable_zone.free(zone, ptr);
+}
+
+void* oom_killer_realloc_purgeable(struct _malloc_zone_t* zone,
+                                   void* ptr,
+                                   size_t size) {
+  void* result = g_old_purgeable_zone.realloc(zone, ptr, size);
+  if (!result && size)
+    TerminateBecauseOutOfMemory(size);
+  return result;
+}
+
+void* oom_killer_memalign_purgeable(struct _malloc_zone_t* zone,
+                                    size_t alignment,
+                                    size_t size) {
+  void* result = g_old_purgeable_zone.memalign(zone, alignment, size);
+  // Only die if posix_memalign would have returned ENOMEM, since there are
+  // other reasons why NULL might be returned (see
+  // http://opensource.apple.com/source/Libc/Libc-583/gen/malloc.c ).
+  if (!result && size && alignment >= sizeof(void*) &&
+      (alignment & (alignment - 1)) == 0) {
+    TerminateBecauseOutOfMemory(size);
+  }
+  return result;
+}
+
+#endif  // !defined(ADDRESS_SANITIZER)
+
+#if !defined(ADDRESS_SANITIZER)
+
+// === Core Foundation CFAllocators ===
+
+bool CanGetContextForCFAllocator() {
+  return !base::mac::IsOSLaterThan10_13_DontCallThis();
+}
+
+CFAllocatorContext* ContextForCFAllocator(CFAllocatorRef allocator) {
+  ChromeCFAllocatorLions* our_allocator = const_cast<ChromeCFAllocatorLions*>(
+      reinterpret_cast<const ChromeCFAllocatorLions*>(allocator));
+  return &our_allocator->_context;
+}
+
+CFAllocatorAllocateCallBack g_old_cfallocator_system_default;
+CFAllocatorAllocateCallBack g_old_cfallocator_malloc;
+CFAllocatorAllocateCallBack g_old_cfallocator_malloc_zone;
+
+void* oom_killer_cfallocator_system_default(CFIndex alloc_size,
+                                            CFOptionFlags hint,
+                                            void* info) {
+  void* result = g_old_cfallocator_system_default(alloc_size, hint, info);
+  if (!result)
+    TerminateBecauseOutOfMemory(alloc_size);
+  return result;
+}
+
+void* oom_killer_cfallocator_malloc(CFIndex alloc_size,
+                                    CFOptionFlags hint,
+                                    void* info) {
+  void* result = g_old_cfallocator_malloc(alloc_size, hint, info);
+  if (!result)
+    TerminateBecauseOutOfMemory(alloc_size);
+  return result;
+}
+
+void* oom_killer_cfallocator_malloc_zone(CFIndex alloc_size,
+                                         CFOptionFlags hint,
+                                         void* info) {
+  void* result = g_old_cfallocator_malloc_zone(alloc_size, hint, info);
+  if (!result)
+    TerminateBecauseOutOfMemory(alloc_size);
+  return result;
+}
+
+#endif  // !defined(ADDRESS_SANITIZER)
+
+// === Cocoa NSObject allocation ===
+
+typedef id (*allocWithZone_t)(id, SEL, NSZone*);
+allocWithZone_t g_old_allocWithZone;
+
+id oom_killer_allocWithZone(id self, SEL _cmd, NSZone* zone) {
+  id result = g_old_allocWithZone(self, _cmd, zone);
+  if (!result)
+    TerminateBecauseOutOfMemory(0);
+  return result;
+}
+
+void UninterceptMallocZoneForTesting(struct _malloc_zone_t* zone) {
+  ChromeMallocZone* chrome_zone = reinterpret_cast<ChromeMallocZone*>(zone);
+  if (!IsMallocZoneAlreadyStored(chrome_zone))
+    return;
+  MallocZoneFunctions& functions = GetFunctionsForZone(zone);
+  ReplaceZoneFunctions(chrome_zone, &functions);
+}
+
+}  // namespace
+
+bool UncheckedMallocMac(size_t size, void** result) {
+#if defined(ADDRESS_SANITIZER)
+  *result = malloc(size);
+#else
+  if (g_old_zone.malloc) {
+    *result = g_old_zone.malloc(malloc_default_zone(), size);
+  } else {
+    *result = malloc(size);
+  }
+#endif  // defined(ADDRESS_SANITIZER)
+
+  return *result != NULL;
+}
+
+bool UncheckedCallocMac(size_t num_items, size_t size, void** result) {
+#if defined(ADDRESS_SANITIZER)
+  *result = calloc(num_items, size);
+#else
+  if (g_old_zone.calloc) {
+    *result = g_old_zone.calloc(malloc_default_zone(), num_items, size);
+  } else {
+    *result = calloc(num_items, size);
+  }
+#endif  // defined(ADDRESS_SANITIZER)
+
+  return *result != NULL;
+}
+
+void StoreFunctionsForDefaultZone() {
+  ChromeMallocZone* default_zone = reinterpret_cast<ChromeMallocZone*>(
+      malloc_default_zone());
+  StoreMallocZone(default_zone);
+}
+
+void StoreFunctionsForAllZones() {
+  // This ensures that the default zone is always at the front of the array,
+  // which is important for performance.
+  StoreFunctionsForDefaultZone();
+
+  vm_address_t* zones;
+  unsigned int count;
+  kern_return_t kr = malloc_get_all_zones(mach_task_self(), 0, &zones, &count);
+  if (kr != KERN_SUCCESS)
+    return;
+  for (unsigned int i = 0; i < count; ++i) {
+    ChromeMallocZone* zone = reinterpret_cast<ChromeMallocZone*>(zones[i]);
+    StoreMallocZone(zone);
+  }
+}
+
+void ReplaceFunctionsForStoredZones(const MallocZoneFunctions* functions) {
+  // The default zone does not get returned in malloc_get_all_zones().
+  ChromeMallocZone* default_zone =
+      reinterpret_cast<ChromeMallocZone*>(malloc_default_zone());
+  if (DoesMallocZoneNeedReplacing(default_zone, functions)) {
+    ReplaceZoneFunctions(default_zone, functions);
+  }
+
+  vm_address_t* zones;
+  unsigned int count;
+  kern_return_t kr =
+      malloc_get_all_zones(mach_task_self(), nullptr, &zones, &count);
+  if (kr != KERN_SUCCESS)
+    return;
+  for (unsigned int i = 0; i < count; ++i) {
+    ChromeMallocZone* zone = reinterpret_cast<ChromeMallocZone*>(zones[i]);
+    if (DoesMallocZoneNeedReplacing(zone, functions)) {
+      ReplaceZoneFunctions(zone, functions);
+    }
+  }
+  g_replaced_default_zone = true;
+}
+
+void InterceptAllocationsMac() {
+  if (g_oom_killer_enabled)
+    return;
+
+  g_oom_killer_enabled = true;
+
+// === C malloc/calloc/valloc/realloc/posix_memalign ===
+
+// This approach is not perfect, as requests for amounts of memory larger than
+// MALLOC_ABSOLUTE_MAX_SIZE (currently SIZE_T_MAX - (2 * PAGE_SIZE)) will
+// still fail with a NULL rather than dying (see
+// http://opensource.apple.com/source/Libc/Libc-583/gen/malloc.c for details).
+// Unfortunately, it's the best we can do. Also note that this does not affect
+// allocations from non-default zones.
+
+#if !defined(ADDRESS_SANITIZER)
+  // Don't do anything special on OOM for the malloc zones replaced by
+  // AddressSanitizer, as modifying or protecting them may not work correctly.
+  ChromeMallocZone* default_zone =
+      reinterpret_cast<ChromeMallocZone*>(malloc_default_zone());
+  if (!IsMallocZoneAlreadyStored(default_zone)) {
+    StoreZoneFunctions(default_zone, &g_old_zone);
+    MallocZoneFunctions new_functions = {};
+    new_functions.malloc = oom_killer_malloc;
+    new_functions.calloc = oom_killer_calloc;
+    new_functions.valloc = oom_killer_valloc;
+    new_functions.free = oom_killer_free;
+    new_functions.realloc = oom_killer_realloc;
+    new_functions.memalign = oom_killer_memalign;
+
+    ReplaceZoneFunctions(default_zone, &new_functions);
+    g_replaced_default_zone = true;
+  }
+
+  ChromeMallocZone* purgeable_zone =
+      reinterpret_cast<ChromeMallocZone*>(malloc_default_purgeable_zone());
+  if (purgeable_zone && !IsMallocZoneAlreadyStored(purgeable_zone)) {
+    StoreZoneFunctions(purgeable_zone, &g_old_purgeable_zone);
+    MallocZoneFunctions new_functions = {};
+    new_functions.malloc = oom_killer_malloc_purgeable;
+    new_functions.calloc = oom_killer_calloc_purgeable;
+    new_functions.valloc = oom_killer_valloc_purgeable;
+    new_functions.free = oom_killer_free_purgeable;
+    new_functions.realloc = oom_killer_realloc_purgeable;
+    new_functions.memalign = oom_killer_memalign_purgeable;
+    ReplaceZoneFunctions(purgeable_zone, &new_functions);
+  }
+#endif
+
+  // === C malloc_zone_batch_malloc ===
+
+  // batch_malloc is omitted because the default malloc zone's implementation
+  // only supports batch_malloc for "tiny" allocations from the free list. It
+  // will fail for allocations larger than "tiny", and will only allocate as
+  // many blocks as it's able to from the free list. These factors mean that it
+  // can return less than the requested memory even in a non-out-of-memory
+  // situation. There's no good way to detect whether a batch_malloc failure is
+  // due to these other factors, or due to genuine memory or address space
+  // exhaustion. The fact that it only allocates space from the "tiny" free list
+  // means that it's likely that a failure will not be due to memory exhaustion.
+  // Similarly, these constraints on batch_malloc mean that callers must always
+  // be expecting to receive less memory than was requested, even in situations
+  // where memory pressure is not a concern. Finally, the only public interface
+  // to batch_malloc is malloc_zone_batch_malloc, which is specific to the
+  // system's malloc implementation. It's unlikely that anyone's even heard of
+  // it.
+
+#ifndef ADDRESS_SANITIZER
+  // === Core Foundation CFAllocators ===
+
+  // This will not catch allocation done by custom allocators, but will catch
+  // all allocation done by system-provided ones.
+
+  CHECK(!g_old_cfallocator_system_default && !g_old_cfallocator_malloc &&
+        !g_old_cfallocator_malloc_zone)
+      << "Old allocators unexpectedly non-null";
+
+  bool cf_allocator_internals_known = CanGetContextForCFAllocator();
+
+  if (cf_allocator_internals_known) {
+    CFAllocatorContext* context =
+        ContextForCFAllocator(kCFAllocatorSystemDefault);
+    CHECK(context) << "Failed to get context for kCFAllocatorSystemDefault.";
+    g_old_cfallocator_system_default = context->allocate;
+    CHECK(g_old_cfallocator_system_default)
+        << "Failed to get kCFAllocatorSystemDefault allocation function.";
+    context->allocate = oom_killer_cfallocator_system_default;
+
+    context = ContextForCFAllocator(kCFAllocatorMalloc);
+    CHECK(context) << "Failed to get context for kCFAllocatorMalloc.";
+    g_old_cfallocator_malloc = context->allocate;
+    CHECK(g_old_cfallocator_malloc)
+        << "Failed to get kCFAllocatorMalloc allocation function.";
+    context->allocate = oom_killer_cfallocator_malloc;
+
+    context = ContextForCFAllocator(kCFAllocatorMallocZone);
+    CHECK(context) << "Failed to get context for kCFAllocatorMallocZone.";
+    g_old_cfallocator_malloc_zone = context->allocate;
+    CHECK(g_old_cfallocator_malloc_zone)
+        << "Failed to get kCFAllocatorMallocZone allocation function.";
+    context->allocate = oom_killer_cfallocator_malloc_zone;
+  } else {
+    DLOG(WARNING) << "Internals of CFAllocator not known; out-of-memory "
+                     "failures via CFAllocator will not result in termination. "
+                     "http://crbug.com/45650";
+  }
+#endif
+
+  // === Cocoa NSObject allocation ===
+
+  // Note that both +[NSObject new] and +[NSObject alloc] call through to
+  // +[NSObject allocWithZone:].
+
+  CHECK(!g_old_allocWithZone) << "Old allocator unexpectedly non-null";
+
+  Class nsobject_class = [NSObject class];
+  Method orig_method =
+      class_getClassMethod(nsobject_class, @selector(allocWithZone:));
+  g_old_allocWithZone =
+      reinterpret_cast<allocWithZone_t>(method_getImplementation(orig_method));
+  CHECK(g_old_allocWithZone)
+      << "Failed to get allocWithZone allocation function.";
+  method_setImplementation(orig_method,
+                           reinterpret_cast<IMP>(oom_killer_allocWithZone));
+}
+
+void UninterceptMallocZonesForTesting() {
+  UninterceptMallocZoneForTesting(malloc_default_zone());
+  vm_address_t* zones;
+  unsigned int count;
+  kern_return_t kr = malloc_get_all_zones(mach_task_self(), 0, &zones, &count);
+  CHECK(kr == KERN_SUCCESS);
+  for (unsigned int i = 0; i < count; ++i) {
+    UninterceptMallocZoneForTesting(
+        reinterpret_cast<struct _malloc_zone_t*>(zones[i]));
+  }
+
+  ClearAllMallocZonesForTesting();
+}
+
+namespace {
+
+void ShimNewMallocZonesAndReschedule(base::Time end_time,
+                                     base::TimeDelta delay) {
+  ShimNewMallocZones();
+
+  if (base::Time::Now() > end_time)
+    return;
+
+  base::TimeDelta next_delay = delay * 2;
+  SequencedTaskRunnerHandle::Get()->PostDelayedTask(
+      FROM_HERE,
+      base::Bind(&ShimNewMallocZonesAndReschedule, end_time, next_delay),
+      delay);
+}
+
+}  // namespace
+
+void PeriodicallyShimNewMallocZones() {
+  base::Time end_time = base::Time::Now() + base::TimeDelta::FromMinutes(1);
+  base::TimeDelta initial_delay = base::TimeDelta::FromSeconds(1);
+  ShimNewMallocZonesAndReschedule(end_time, initial_delay);
+}
+
+void ShimNewMallocZones() {
+  StoreFunctionsForAllZones();
+
+  // Use the functions for the default zone as a template to replace those
+  // new zones.
+  ChromeMallocZone* default_zone =
+      reinterpret_cast<ChromeMallocZone*>(malloc_default_zone());
+  DCHECK(IsMallocZoneAlreadyStored(default_zone));
+
+  MallocZoneFunctions new_functions;
+  StoreZoneFunctions(default_zone, &new_functions);
+  ReplaceFunctionsForStoredZones(&new_functions);
+}
+
+void ReplaceZoneFunctions(ChromeMallocZone* zone,
+                          const MallocZoneFunctions* functions) {
+  // Remove protection.
+  mach_vm_address_t reprotection_start = 0;
+  mach_vm_size_t reprotection_length = 0;
+  vm_prot_t reprotection_value = VM_PROT_NONE;
+  DeprotectMallocZone(zone, &reprotection_start, &reprotection_length,
+                      &reprotection_value);
+
+  CHECK(functions->malloc && functions->calloc && functions->valloc &&
+        functions->free && functions->realloc);
+  zone->malloc = functions->malloc;
+  zone->calloc = functions->calloc;
+  zone->valloc = functions->valloc;
+  zone->free = functions->free;
+  zone->realloc = functions->realloc;
+  if (functions->batch_malloc)
+    zone->batch_malloc = functions->batch_malloc;
+  if (functions->batch_free)
+    zone->batch_free = functions->batch_free;
+  if (functions->size)
+    zone->size = functions->size;
+  if (zone->version >= 5 && functions->memalign) {
+    zone->memalign = functions->memalign;
+  }
+  if (zone->version >= 6 && functions->free_definite_size) {
+    zone->free_definite_size = functions->free_definite_size;
+  }
+
+  // Restore protection if it was active.
+  if (reprotection_start) {
+    kern_return_t result =
+        mach_vm_protect(mach_task_self(), reprotection_start,
+                        reprotection_length, false, reprotection_value);
+    MACH_CHECK(result == KERN_SUCCESS, result) << "mach_vm_protect";
+  }
+}
+
+}  // namespace allocator
+}  // namespace base
diff --git a/base/allocator/allocator_interception_mac_unittest.mm b/base/allocator/allocator_interception_mac_unittest.mm
new file mode 100644
index 0000000..c919ca0
--- /dev/null
+++ b/base/allocator/allocator_interception_mac_unittest.mm
@@ -0,0 +1,64 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <mach/mach.h>
+
+#include "base/allocator/allocator_interception_mac.h"
+#include "base/allocator/allocator_shim.h"
+#include "base/allocator/malloc_zone_functions_mac.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+namespace allocator {
+
+namespace {
+void ResetMallocZone(ChromeMallocZone* zone) {
+  MallocZoneFunctions& functions = GetFunctionsForZone(zone);
+  ReplaceZoneFunctions(zone, &functions);
+}
+
+void ResetAllMallocZones() {
+  ChromeMallocZone* default_malloc_zone =
+      reinterpret_cast<ChromeMallocZone*>(malloc_default_zone());
+  ResetMallocZone(default_malloc_zone);
+
+  vm_address_t* zones;
+  unsigned int count;
+  kern_return_t kr = malloc_get_all_zones(mach_task_self(), 0, &zones, &count);
+  if (kr != KERN_SUCCESS)
+    return;
+  for (unsigned int i = 0; i < count; ++i) {
+    ChromeMallocZone* zone = reinterpret_cast<ChromeMallocZone*>(zones[i]);
+    ResetMallocZone(zone);
+  }
+}
+}  // namespace
+
+class AllocatorInterceptionTest : public testing::Test {
+ protected:
+  void TearDown() override {
+    ResetAllMallocZones();
+    ClearAllMallocZonesForTesting();
+  }
+};
+
+#if !defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
+TEST_F(AllocatorInterceptionTest, ShimNewMallocZones) {
+  InitializeAllocatorShim();
+  ChromeMallocZone* default_malloc_zone =
+      reinterpret_cast<ChromeMallocZone*>(malloc_default_zone());
+
+  malloc_zone_t new_zone;
+  memset(&new_zone, 1, sizeof(malloc_zone_t));
+  malloc_zone_register(&new_zone);
+  EXPECT_NE(new_zone.malloc, default_malloc_zone->malloc);
+  ShimNewMallocZones();
+  EXPECT_EQ(new_zone.malloc, default_malloc_zone->malloc);
+
+  malloc_zone_unregister(&new_zone);
+}
+#endif
+
+}  // namespace allocator
+}  // namespace base
diff --git a/base/allocator/allocator_shim.cc b/base/allocator/allocator_shim.cc
new file mode 100644
index 0000000..e919f09
--- /dev/null
+++ b/base/allocator/allocator_shim.cc
@@ -0,0 +1,336 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/allocator/allocator_shim.h"
+
+#include <errno.h>
+
+#include <new>
+
+#include "base/atomicops.h"
+#include "base/logging.h"
+#include "base/macros.h"
+#include "base/process/process_metrics.h"
+#include "base/threading/platform_thread.h"
+#include "build/build_config.h"
+
+#if !defined(OS_WIN)
+#include <unistd.h>
+#else
+#include "base/allocator/winheap_stubs_win.h"
+#endif
+
+#if defined(OS_MACOSX)
+#include <malloc/malloc.h>
+
+#include "base/allocator/allocator_interception_mac.h"
+#endif
+
+// No calls to malloc / new in this file. They would would cause re-entrancy of
+// the shim, which is hard to deal with. Keep this code as simple as possible
+// and don't use any external C++ object here, not even //base ones. Even if
+// they are safe to use today, in future they might be refactored.
+
+namespace {
+
+using namespace base;
+
+subtle::AtomicWord g_chain_head = reinterpret_cast<subtle::AtomicWord>(
+    &allocator::AllocatorDispatch::default_dispatch);
+
+bool g_call_new_handler_on_malloc_failure = false;
+
+inline size_t GetCachedPageSize() {
+  static size_t pagesize = 0;
+  if (!pagesize)
+    pagesize = base::GetPageSize();
+  return pagesize;
+}
+
+// Calls the std::new handler thread-safely. Returns true if a new_handler was
+// set and called, false if no new_handler was set.
+bool CallNewHandler(size_t size) {
+#if defined(OS_WIN)
+  return base::allocator::WinCallNewHandler(size);
+#else
+  std::new_handler nh = std::get_new_handler();
+  if (!nh)
+    return false;
+  (*nh)();
+  // Assume the new_handler will abort if it fails. Exception are disabled and
+  // we don't support the case of a new_handler throwing std::bad_balloc.
+  return true;
+#endif
+}
+
+inline const allocator::AllocatorDispatch* GetChainHead() {
+  // TODO(primiano): Just use NoBarrier_Load once crbug.com/593344 is fixed.
+  // Unfortunately due to that bug NoBarrier_Load() is mistakenly fully
+  // barriered on Linux+Clang, and that causes visible perf regressons.
+  return reinterpret_cast<const allocator::AllocatorDispatch*>(
+#if defined(OS_LINUX) && defined(__clang__)
+      *static_cast<const volatile subtle::AtomicWord*>(&g_chain_head)
+#else
+      subtle::NoBarrier_Load(&g_chain_head)
+#endif
+  );
+}
+
+}  // namespace
+
+namespace base {
+namespace allocator {
+
+void SetCallNewHandlerOnMallocFailure(bool value) {
+  g_call_new_handler_on_malloc_failure = value;
+}
+
+void* UncheckedAlloc(size_t size) {
+  const allocator::AllocatorDispatch* const chain_head = GetChainHead();
+  return chain_head->alloc_function(chain_head, size, nullptr);
+}
+
+void InsertAllocatorDispatch(AllocatorDispatch* dispatch) {
+  // Loop in case of (an unlikely) race on setting the list head.
+  size_t kMaxRetries = 7;
+  for (size_t i = 0; i < kMaxRetries; ++i) {
+    const AllocatorDispatch* chain_head = GetChainHead();
+    dispatch->next = chain_head;
+
+    // This function guarantees to be thread-safe w.r.t. concurrent
+    // insertions. It also has to guarantee that all the threads always
+    // see a consistent chain, hence the MemoryBarrier() below.
+    // InsertAllocatorDispatch() is NOT a fastpath, as opposite to malloc(), so
+    // we don't really want this to be a release-store with a corresponding
+    // acquire-load during malloc().
+    subtle::MemoryBarrier();
+    subtle::AtomicWord old_value =
+        reinterpret_cast<subtle::AtomicWord>(chain_head);
+    // Set the chain head to the new dispatch atomically. If we lose the race,
+    // the comparison will fail, and the new head of chain will be returned.
+    if (subtle::NoBarrier_CompareAndSwap(
+            &g_chain_head, old_value,
+            reinterpret_cast<subtle::AtomicWord>(dispatch)) == old_value) {
+      // Success.
+      return;
+    }
+  }
+
+  CHECK(false);  // Too many retries, this shouldn't happen.
+}
+
+void RemoveAllocatorDispatchForTesting(AllocatorDispatch* dispatch) {
+  DCHECK_EQ(GetChainHead(), dispatch);
+  subtle::NoBarrier_Store(&g_chain_head,
+                          reinterpret_cast<subtle::AtomicWord>(dispatch->next));
+}
+
+}  // namespace allocator
+}  // namespace base
+
+// The Shim* functions below are the entry-points into the shim-layer and
+// are supposed to be invoked by the allocator_shim_override_*
+// headers to route the malloc / new symbols through the shim layer.
+// They are defined as ALWAYS_INLINE in order to remove a level of indirection
+// between the system-defined entry points and the shim implementations.
+extern "C" {
+
+// The general pattern for allocations is:
+// - Try to allocate, if succeded return the pointer.
+// - If the allocation failed:
+//   - Call the std::new_handler if it was a C++ allocation.
+//   - Call the std::new_handler if it was a malloc() (or calloc() or similar)
+//     AND SetCallNewHandlerOnMallocFailure(true).
+//   - If the std::new_handler is NOT set just return nullptr.
+//   - If the std::new_handler is set:
+//     - Assume it will abort() if it fails (very likely the new_handler will
+//       just suicide priting a message).
+//     - Assume it did succeed if it returns, in which case reattempt the alloc.
+
+ALWAYS_INLINE void* ShimCppNew(size_t size) {
+  const allocator::AllocatorDispatch* const chain_head = GetChainHead();
+  void* ptr;
+  do {
+    void* context = nullptr;
+#if defined(OS_MACOSX)
+    context = malloc_default_zone();
+#endif
+    ptr = chain_head->alloc_function(chain_head, size, context);
+  } while (!ptr && CallNewHandler(size));
+  return ptr;
+}
+
+ALWAYS_INLINE void ShimCppDelete(void* address) {
+  void* context = nullptr;
+#if defined(OS_MACOSX)
+  context = malloc_default_zone();
+#endif
+  const allocator::AllocatorDispatch* const chain_head = GetChainHead();
+  return chain_head->free_function(chain_head, address, context);
+}
+
+ALWAYS_INLINE void* ShimMalloc(size_t size, void* context) {
+  const allocator::AllocatorDispatch* const chain_head = GetChainHead();
+  void* ptr;
+  do {
+    ptr = chain_head->alloc_function(chain_head, size, context);
+  } while (!ptr && g_call_new_handler_on_malloc_failure &&
+           CallNewHandler(size));
+  return ptr;
+}
+
+ALWAYS_INLINE void* ShimCalloc(size_t n, size_t size, void* context) {
+  const allocator::AllocatorDispatch* const chain_head = GetChainHead();
+  void* ptr;
+  do {
+    ptr = chain_head->alloc_zero_initialized_function(chain_head, n, size,
+                                                      context);
+  } while (!ptr && g_call_new_handler_on_malloc_failure &&
+           CallNewHandler(size));
+  return ptr;
+}
+
+ALWAYS_INLINE void* ShimRealloc(void* address, size_t size, void* context) {
+  // realloc(size == 0) means free() and might return a nullptr. We should
+  // not call the std::new_handler in that case, though.
+  const allocator::AllocatorDispatch* const chain_head = GetChainHead();
+  void* ptr;
+  do {
+    ptr = chain_head->realloc_function(chain_head, address, size, context);
+  } while (!ptr && size && g_call_new_handler_on_malloc_failure &&
+           CallNewHandler(size));
+  return ptr;
+}
+
+ALWAYS_INLINE void* ShimMemalign(size_t alignment, size_t size, void* context) {
+  const allocator::AllocatorDispatch* const chain_head = GetChainHead();
+  void* ptr;
+  do {
+    ptr = chain_head->alloc_aligned_function(chain_head, alignment, size,
+                                             context);
+  } while (!ptr && g_call_new_handler_on_malloc_failure &&
+           CallNewHandler(size));
+  return ptr;
+}
+
+ALWAYS_INLINE int ShimPosixMemalign(void** res, size_t alignment, size_t size) {
+  // posix_memalign is supposed to check the arguments. See tc_posix_memalign()
+  // in tc_malloc.cc.
+  if (((alignment % sizeof(void*)) != 0) ||
+      ((alignment & (alignment - 1)) != 0) || (alignment == 0)) {
+    return EINVAL;
+  }
+  void* ptr = ShimMemalign(alignment, size, nullptr);
+  *res = ptr;
+  return ptr ? 0 : ENOMEM;
+}
+
+ALWAYS_INLINE void* ShimValloc(size_t size, void* context) {
+  return ShimMemalign(GetCachedPageSize(), size, context);
+}
+
+ALWAYS_INLINE void* ShimPvalloc(size_t size) {
+  // pvalloc(0) should allocate one page, according to its man page.
+  if (size == 0) {
+    size = GetCachedPageSize();
+  } else {
+    size = (size + GetCachedPageSize() - 1) & ~(GetCachedPageSize() - 1);
+  }
+  // The third argument is nullptr because pvalloc is glibc only and does not
+  // exist on OSX/BSD systems.
+  return ShimMemalign(GetCachedPageSize(), size, nullptr);
+}
+
+ALWAYS_INLINE void ShimFree(void* address, void* context) {
+  const allocator::AllocatorDispatch* const chain_head = GetChainHead();
+  return chain_head->free_function(chain_head, address, context);
+}
+
+ALWAYS_INLINE size_t ShimGetSizeEstimate(const void* address, void* context) {
+  const allocator::AllocatorDispatch* const chain_head = GetChainHead();
+  return chain_head->get_size_estimate_function(
+      chain_head, const_cast<void*>(address), context);
+}
+
+ALWAYS_INLINE unsigned ShimBatchMalloc(size_t size,
+                                       void** results,
+                                       unsigned num_requested,
+                                       void* context) {
+  const allocator::AllocatorDispatch* const chain_head = GetChainHead();
+  return chain_head->batch_malloc_function(chain_head, size, results,
+                                           num_requested, context);
+}
+
+ALWAYS_INLINE void ShimBatchFree(void** to_be_freed,
+                                 unsigned num_to_be_freed,
+                                 void* context) {
+  const allocator::AllocatorDispatch* const chain_head = GetChainHead();
+  return chain_head->batch_free_function(chain_head, to_be_freed,
+                                         num_to_be_freed, context);
+}
+
+ALWAYS_INLINE void ShimFreeDefiniteSize(void* ptr, size_t size, void* context) {
+  const allocator::AllocatorDispatch* const chain_head = GetChainHead();
+  return chain_head->free_definite_size_function(chain_head, ptr, size,
+                                                 context);
+}
+
+}  // extern "C"
+
+#if !defined(OS_WIN) && !defined(OS_MACOSX)
+// Cpp symbols (new / delete) should always be routed through the shim layer
+// except on Windows and macOS where the malloc intercept is deep enough that it
+// also catches the cpp calls.
+#include "base/allocator/allocator_shim_override_cpp_symbols.h"
+#endif
+
+#if defined(OS_ANDROID)
+// Android does not support symbol interposition. The way malloc symbols are
+// intercepted on Android is by using link-time -wrap flags.
+#include "base/allocator/allocator_shim_override_linker_wrapped_symbols.h"
+#elif defined(OS_WIN)
+// On Windows we use plain link-time overriding of the CRT symbols.
+#include "base/allocator/allocator_shim_override_ucrt_symbols_win.h"
+#elif defined(OS_MACOSX)
+#include "base/allocator/allocator_shim_default_dispatch_to_mac_zoned_malloc.h"
+#include "base/allocator/allocator_shim_override_mac_symbols.h"
+#else
+#include "base/allocator/allocator_shim_override_libc_symbols.h"
+#endif
+
+// In the case of tcmalloc we also want to plumb into the glibc hooks
+// to avoid that allocations made in glibc itself (e.g., strdup()) get
+// accidentally performed on the glibc heap instead of the tcmalloc one.
+#if defined(USE_TCMALLOC)
+#include "base/allocator/allocator_shim_override_glibc_weak_symbols.h"
+#endif
+
+#if defined(OS_MACOSX)
+namespace base {
+namespace allocator {
+void InitializeAllocatorShim() {
+  // Prepares the default dispatch. After the intercepted malloc calls have
+  // traversed the shim this will route them to the default malloc zone.
+  InitializeDefaultDispatchToMacAllocator();
+
+  MallocZoneFunctions functions = MallocZoneFunctionsToReplaceDefault();
+
+  // This replaces the default malloc zone, causing calls to malloc & friends
+  // from the codebase to be routed to ShimMalloc() above.
+  base::allocator::ReplaceFunctionsForStoredZones(&functions);
+}
+}  // namespace allocator
+}  // namespace base
+#endif
+
+// Cross-checks.
+
+#if defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
+#error The allocator shim should not be compiled when building for memory tools.
+#endif
+
+#if (defined(__GNUC__) && defined(__EXCEPTIONS)) || \
+    (defined(_MSC_VER) && defined(_CPPUNWIND))
+#error This code cannot be used when exceptions are turned on.
+#endif
diff --git a/base/allocator/allocator_shim.h b/base/allocator/allocator_shim.h
new file mode 100644
index 0000000..527e414
--- /dev/null
+++ b/base/allocator/allocator_shim.h
@@ -0,0 +1,133 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_ALLOCATOR_ALLOCATOR_SHIM_H_
+#define BASE_ALLOCATOR_ALLOCATOR_SHIM_H_
+
+#include <stddef.h>
+
+#include "base/base_export.h"
+#include "build/build_config.h"
+
+namespace base {
+namespace allocator {
+
+// Allocator Shim API. Allows to:
+//  - Configure the behavior of the allocator (what to do on OOM failures).
+//  - Install new hooks (AllocatorDispatch) in the allocator chain.
+
+// When this shim layer is enabled, the route of an allocation is as-follows:
+//
+// [allocator_shim_override_*.h] Intercept malloc() / operator new calls:
+//   The override_* headers define the symbols required to intercept calls to
+//   malloc() and operator new (if not overridden by specific C++ classes).
+//
+// [allocator_shim.cc] Routing allocation calls to the shim:
+//   The headers above route the calls to the internal ShimMalloc(), ShimFree(),
+//   ShimCppNew() etc. methods defined in allocator_shim.cc.
+//   These methods will: (1) forward the allocation call to the front of the
+//   AllocatorDispatch chain. (2) perform security hardenings (e.g., might
+//   call std::new_handler on OOM failure).
+//
+// [allocator_shim_default_dispatch_to_*.cc] The AllocatorDispatch chain:
+//   It is a singly linked list where each element is a struct with function
+//   pointers (|malloc_function|, |free_function|, etc). Normally the chain
+//   consists of a single AllocatorDispatch element, herein called
+//   the "default dispatch", which is statically defined at build time and
+//   ultimately routes the calls to the actual allocator defined by the build
+//   config (tcmalloc, glibc, ...).
+//
+// It is possible to dynamically insert further AllocatorDispatch stages
+// to the front of the chain, for debugging / profiling purposes.
+//
+// All the functions must be thred safe. The shim does not enforce any
+// serialization. This is to route to thread-aware allocators (e.g, tcmalloc)
+// wihout introducing unnecessary perf hits.
+
+struct AllocatorDispatch {
+  using AllocFn = void*(const AllocatorDispatch* self,
+                        size_t size,
+                        void* context);
+  using AllocZeroInitializedFn = void*(const AllocatorDispatch* self,
+                                       size_t n,
+                                       size_t size,
+                                       void* context);
+  using AllocAlignedFn = void*(const AllocatorDispatch* self,
+                               size_t alignment,
+                               size_t size,
+                               void* context);
+  using ReallocFn = void*(const AllocatorDispatch* self,
+                          void* address,
+                          size_t size,
+                          void* context);
+  using FreeFn = void(const AllocatorDispatch* self,
+                      void* address,
+                      void* context);
+  // Returns the best available estimate for the actual amount of memory
+  // consumed by the allocation |address|. If possible, this should include
+  // heap overhead or at least a decent estimate of the full cost of the
+  // allocation. If no good estimate is possible, returns zero.
+  using GetSizeEstimateFn = size_t(const AllocatorDispatch* self,
+                                   void* address,
+                                   void* context);
+  using BatchMallocFn = unsigned(const AllocatorDispatch* self,
+                                 size_t size,
+                                 void** results,
+                                 unsigned num_requested,
+                                 void* context);
+  using BatchFreeFn = void(const AllocatorDispatch* self,
+                           void** to_be_freed,
+                           unsigned num_to_be_freed,
+                           void* context);
+  using FreeDefiniteSizeFn = void(const AllocatorDispatch* self,
+                                  void* ptr,
+                                  size_t size,
+                                  void* context);
+
+  AllocFn* const alloc_function;
+  AllocZeroInitializedFn* const alloc_zero_initialized_function;
+  AllocAlignedFn* const alloc_aligned_function;
+  ReallocFn* const realloc_function;
+  FreeFn* const free_function;
+  GetSizeEstimateFn* const get_size_estimate_function;
+  BatchMallocFn* const batch_malloc_function;
+  BatchFreeFn* const batch_free_function;
+  FreeDefiniteSizeFn* const free_definite_size_function;
+
+  const AllocatorDispatch* next;
+
+  // |default_dispatch| is statically defined by one (and only one) of the
+  // allocator_shim_default_dispatch_to_*.cc files, depending on the build
+  // configuration.
+  static const AllocatorDispatch default_dispatch;
+};
+
+// When true makes malloc behave like new, w.r.t calling the new_handler if
+// the allocation fails (see set_new_mode() in Windows).
+BASE_EXPORT void SetCallNewHandlerOnMallocFailure(bool value);
+
+// Allocates |size| bytes or returns nullptr. It does NOT call the new_handler,
+// regardless of SetCallNewHandlerOnMallocFailure().
+BASE_EXPORT void* UncheckedAlloc(size_t size);
+
+// Inserts |dispatch| in front of the allocator chain. This method is
+// thread-safe w.r.t concurrent invocations of InsertAllocatorDispatch().
+// The callers have responsibility for inserting a single dispatch no more
+// than once.
+BASE_EXPORT void InsertAllocatorDispatch(AllocatorDispatch* dispatch);
+
+// Test-only. Rationale: (1) lack of use cases; (2) dealing safely with a
+// removal of arbitrary elements from a singly linked list would require a lock
+// in malloc(), which we really don't want.
+BASE_EXPORT void RemoveAllocatorDispatchForTesting(AllocatorDispatch* dispatch);
+
+#if defined(OS_MACOSX)
+// On macOS, the allocator shim needs to be turned on during runtime.
+BASE_EXPORT void InitializeAllocatorShim();
+#endif  // defined(OS_MACOSX)
+
+}  // namespace allocator
+}  // namespace base
+
+#endif  // BASE_ALLOCATOR_ALLOCATOR_SHIM_H_
diff --git a/base/allocator/allocator_shim_default_dispatch_to_glibc.cc b/base/allocator/allocator_shim_default_dispatch_to_glibc.cc
new file mode 100644
index 0000000..8574da3
--- /dev/null
+++ b/base/allocator/allocator_shim_default_dispatch_to_glibc.cc
@@ -0,0 +1,75 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/allocator/allocator_shim.h"
+
+#include <malloc.h>
+
+// This translation unit defines a default dispatch for the allocator shim which
+// routes allocations to libc functions.
+// The code here is strongly inspired from tcmalloc's libc_override_glibc.h.
+
+extern "C" {
+void* __libc_malloc(size_t size);
+void* __libc_calloc(size_t n, size_t size);
+void* __libc_realloc(void* address, size_t size);
+void* __libc_memalign(size_t alignment, size_t size);
+void __libc_free(void* ptr);
+}  // extern "C"
+
+namespace {
+
+using base::allocator::AllocatorDispatch;
+
+void* GlibcMalloc(const AllocatorDispatch*, size_t size, void* context) {
+  return __libc_malloc(size);
+}
+
+void* GlibcCalloc(const AllocatorDispatch*,
+                  size_t n,
+                  size_t size,
+                  void* context) {
+  return __libc_calloc(n, size);
+}
+
+void* GlibcRealloc(const AllocatorDispatch*,
+                   void* address,
+                   size_t size,
+                   void* context) {
+  return __libc_realloc(address, size);
+}
+
+void* GlibcMemalign(const AllocatorDispatch*,
+                    size_t alignment,
+                    size_t size,
+                    void* context) {
+  return __libc_memalign(alignment, size);
+}
+
+void GlibcFree(const AllocatorDispatch*, void* address, void* context) {
+  __libc_free(address);
+}
+
+size_t GlibcGetSizeEstimate(const AllocatorDispatch*,
+                            void* address,
+                            void* context) {
+  // TODO(siggi, primiano): malloc_usable_size may need redirection in the
+  //     presence of interposing shims that divert allocations.
+  return malloc_usable_size(address);
+}
+
+}  // namespace
+
+const AllocatorDispatch AllocatorDispatch::default_dispatch = {
+    &GlibcMalloc,          /* alloc_function */
+    &GlibcCalloc,          /* alloc_zero_initialized_function */
+    &GlibcMemalign,        /* alloc_aligned_function */
+    &GlibcRealloc,         /* realloc_function */
+    &GlibcFree,            /* free_function */
+    &GlibcGetSizeEstimate, /* get_size_estimate_function */
+    nullptr,               /* batch_malloc_function */
+    nullptr,               /* batch_free_function */
+    nullptr,               /* free_definite_size_function */
+    nullptr,               /* next */
+};
diff --git a/base/allocator/allocator_shim_default_dispatch_to_linker_wrapped_symbols.cc b/base/allocator/allocator_shim_default_dispatch_to_linker_wrapped_symbols.cc
new file mode 100644
index 0000000..c351a7c
--- /dev/null
+++ b/base/allocator/allocator_shim_default_dispatch_to_linker_wrapped_symbols.cc
@@ -0,0 +1,113 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <malloc.h>
+
+#include "base/allocator/allocator_shim.h"
+#include "build/build_config.h"
+
+#if defined(OS_ANDROID) && __ANDROID_API__ < 17
+#include <dlfcn.h>
+// This is defined in malloc.h on other platforms. We just need the definition
+// for the decltype(malloc_usable_size)* call to work.
+size_t malloc_usable_size(const void*);
+#endif
+
+// This translation unit defines a default dispatch for the allocator shim which
+// routes allocations to the original libc functions when using the link-time
+// -Wl,-wrap,malloc approach (see README.md).
+// The __real_X functions here are special symbols that the linker will relocate
+// against the real "X" undefined symbol, so that __real_malloc becomes the
+// equivalent of what an undefined malloc symbol reference would have been.
+// This is the counterpart of allocator_shim_override_linker_wrapped_symbols.h,
+// which routes the __wrap_X functions into the shim.
+
+extern "C" {
+void* __real_malloc(size_t);
+void* __real_calloc(size_t, size_t);
+void* __real_realloc(void*, size_t);
+void* __real_memalign(size_t, size_t);
+void* __real_free(void*);
+}  // extern "C"
+
+namespace {
+
+using base::allocator::AllocatorDispatch;
+
+void* RealMalloc(const AllocatorDispatch*, size_t size, void* context) {
+  return __real_malloc(size);
+}
+
+void* RealCalloc(const AllocatorDispatch*,
+                 size_t n,
+                 size_t size,
+                 void* context) {
+  return __real_calloc(n, size);
+}
+
+void* RealRealloc(const AllocatorDispatch*,
+                  void* address,
+                  size_t size,
+                  void* context) {
+  return __real_realloc(address, size);
+}
+
+void* RealMemalign(const AllocatorDispatch*,
+                   size_t alignment,
+                   size_t size,
+                   void* context) {
+  return __real_memalign(alignment, size);
+}
+
+void RealFree(const AllocatorDispatch*, void* address, void* context) {
+  __real_free(address);
+}
+
+#if defined(OS_ANDROID) && __ANDROID_API__ < 17
+size_t DummyMallocUsableSize(const void*) { return 0; }
+#endif
+
+size_t RealSizeEstimate(const AllocatorDispatch*,
+                        void* address,
+                        void* context) {
+#if defined(OS_ANDROID)
+#if __ANDROID_API__ < 17
+  // malloc_usable_size() is available only starting from API 17.
+  // TODO(dskiba): remove once we start building against 17+.
+  using MallocUsableSizeFunction = decltype(malloc_usable_size)*;
+  static MallocUsableSizeFunction usable_size_function = nullptr;
+  if (!usable_size_function) {
+    void* function_ptr = dlsym(RTLD_DEFAULT, "malloc_usable_size");
+    if (function_ptr) {
+      usable_size_function = reinterpret_cast<MallocUsableSizeFunction>(
+          function_ptr);
+    } else {
+      usable_size_function = &DummyMallocUsableSize;
+    }
+  }
+  return usable_size_function(address);
+#else
+  return malloc_usable_size(address);
+#endif
+#endif  // OS_ANDROID
+
+  // TODO(primiano): This should be redirected to malloc_usable_size or
+  //     the like.
+  return 0;
+}
+
+}  // namespace
+
+const AllocatorDispatch AllocatorDispatch::default_dispatch = {
+    &RealMalloc,       /* alloc_function */
+    &RealCalloc,       /* alloc_zero_initialized_function */
+    &RealMemalign,     /* alloc_aligned_function */
+    &RealRealloc,      /* realloc_function */
+    &RealFree,         /* free_function */
+    &RealSizeEstimate, /* get_size_estimate_function */
+    nullptr,           /* batch_malloc_function */
+    nullptr,           /* batch_free_function */
+    nullptr,           /* free_definite_size_function */
+    nullptr,           /* next */
+};
diff --git a/base/allocator/allocator_shim_default_dispatch_to_mac_zoned_malloc.cc b/base/allocator/allocator_shim_default_dispatch_to_mac_zoned_malloc.cc
new file mode 100644
index 0000000..32898ef
--- /dev/null
+++ b/base/allocator/allocator_shim_default_dispatch_to_mac_zoned_malloc.cc
@@ -0,0 +1,109 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/allocator/allocator_shim_default_dispatch_to_mac_zoned_malloc.h"
+
+#include <utility>
+
+#include "base/allocator/allocator_interception_mac.h"
+#include "base/allocator/allocator_shim.h"
+#include "base/allocator/malloc_zone_functions_mac.h"
+
+namespace base {
+namespace allocator {
+namespace {
+
+void* MallocImpl(const AllocatorDispatch*, size_t size, void* context) {
+  MallocZoneFunctions& functions = GetFunctionsForZone(context);
+  return functions.malloc(reinterpret_cast<struct _malloc_zone_t*>(context),
+                          size);
+}
+
+void* CallocImpl(const AllocatorDispatch*,
+                 size_t n,
+                 size_t size,
+                 void* context) {
+  MallocZoneFunctions& functions = GetFunctionsForZone(context);
+  return functions.calloc(reinterpret_cast<struct _malloc_zone_t*>(context), n,
+                          size);
+}
+
+void* MemalignImpl(const AllocatorDispatch*,
+                   size_t alignment,
+                   size_t size,
+                   void* context) {
+  MallocZoneFunctions& functions = GetFunctionsForZone(context);
+  return functions.memalign(reinterpret_cast<struct _malloc_zone_t*>(context),
+                            alignment, size);
+}
+
+void* ReallocImpl(const AllocatorDispatch*,
+                  void* ptr,
+                  size_t size,
+                  void* context) {
+  MallocZoneFunctions& functions = GetFunctionsForZone(context);
+  return functions.realloc(reinterpret_cast<struct _malloc_zone_t*>(context),
+                           ptr, size);
+}
+
+void FreeImpl(const AllocatorDispatch*, void* ptr, void* context) {
+  MallocZoneFunctions& functions = GetFunctionsForZone(context);
+  functions.free(reinterpret_cast<struct _malloc_zone_t*>(context), ptr);
+}
+
+size_t GetSizeEstimateImpl(const AllocatorDispatch*, void* ptr, void* context) {
+  MallocZoneFunctions& functions = GetFunctionsForZone(context);
+  return functions.size(reinterpret_cast<struct _malloc_zone_t*>(context), ptr);
+}
+
+unsigned BatchMallocImpl(const AllocatorDispatch* self,
+                         size_t size,
+                         void** results,
+                         unsigned num_requested,
+                         void* context) {
+  MallocZoneFunctions& functions = GetFunctionsForZone(context);
+  return functions.batch_malloc(
+      reinterpret_cast<struct _malloc_zone_t*>(context), size, results,
+      num_requested);
+}
+
+void BatchFreeImpl(const AllocatorDispatch* self,
+                   void** to_be_freed,
+                   unsigned num_to_be_freed,
+                   void* context) {
+  MallocZoneFunctions& functions = GetFunctionsForZone(context);
+  functions.batch_free(reinterpret_cast<struct _malloc_zone_t*>(context),
+                       to_be_freed, num_to_be_freed);
+}
+
+void FreeDefiniteSizeImpl(const AllocatorDispatch* self,
+                          void* ptr,
+                          size_t size,
+                          void* context) {
+  MallocZoneFunctions& functions = GetFunctionsForZone(context);
+  functions.free_definite_size(
+      reinterpret_cast<struct _malloc_zone_t*>(context), ptr, size);
+}
+
+}  // namespace
+
+void InitializeDefaultDispatchToMacAllocator() {
+  StoreFunctionsForAllZones();
+}
+
+const AllocatorDispatch AllocatorDispatch::default_dispatch = {
+    &MallocImpl,           /* alloc_function */
+    &CallocImpl,           /* alloc_zero_initialized_function */
+    &MemalignImpl,         /* alloc_aligned_function */
+    &ReallocImpl,          /* realloc_function */
+    &FreeImpl,             /* free_function */
+    &GetSizeEstimateImpl,  /* get_size_estimate_function */
+    &BatchMallocImpl,      /* batch_malloc_function */
+    &BatchFreeImpl,        /* batch_free_function */
+    &FreeDefiniteSizeImpl, /* free_definite_size_function */
+    nullptr,               /* next */
+};
+
+}  // namespace allocator
+}  // namespace base
diff --git a/base/allocator/allocator_shim_default_dispatch_to_mac_zoned_malloc.h b/base/allocator/allocator_shim_default_dispatch_to_mac_zoned_malloc.h
new file mode 100644
index 0000000..77d533c
--- /dev/null
+++ b/base/allocator/allocator_shim_default_dispatch_to_mac_zoned_malloc.h
@@ -0,0 +1,19 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_ALLOCATOR_ALLOCATOR_SHIM_DEFAULT_DISPATCH_TO_ZONED_MALLOC_H_
+#define BASE_ALLOCATOR_ALLOCATOR_SHIM_DEFAULT_DISPATCH_TO_ZONED_MALLOC_H_
+
+namespace base {
+namespace allocator {
+
+// This initializes AllocatorDispatch::default_dispatch by saving pointers to
+// the functions in the current default malloc zone. This must be called before
+// the default malloc zone is changed to have its intended effect.
+void InitializeDefaultDispatchToMacAllocator();
+
+}  // namespace allocator
+}  // namespace base
+
+#endif  // BASE_ALLOCATOR_ALLOCATOR_SHIM_DEFAULT_DISPATCH_TO_ZONED_MALLOC_H_
diff --git a/base/allocator/allocator_shim_default_dispatch_to_tcmalloc.cc b/base/allocator/allocator_shim_default_dispatch_to_tcmalloc.cc
new file mode 100644
index 0000000..878e8a7
--- /dev/null
+++ b/base/allocator/allocator_shim_default_dispatch_to_tcmalloc.cc
@@ -0,0 +1,92 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/allocator/allocator_shim.h"
+#include "base/allocator/allocator_shim_internals.h"
+#include "third_party/tcmalloc/chromium/src/config.h"
+#include "third_party/tcmalloc/chromium/src/gperftools/tcmalloc.h"
+
+namespace {
+
+using base::allocator::AllocatorDispatch;
+
+void* TCMalloc(const AllocatorDispatch*, size_t size, void* context) {
+  return tc_malloc(size);
+}
+
+void* TCCalloc(const AllocatorDispatch*, size_t n, size_t size, void* context) {
+  return tc_calloc(n, size);
+}
+
+void* TCMemalign(const AllocatorDispatch*,
+                 size_t alignment,
+                 size_t size,
+                 void* context) {
+  return tc_memalign(alignment, size);
+}
+
+void* TCRealloc(const AllocatorDispatch*,
+                void* address,
+                size_t size,
+                void* context) {
+  return tc_realloc(address, size);
+}
+
+void TCFree(const AllocatorDispatch*, void* address, void* context) {
+  tc_free(address);
+}
+
+size_t TCGetSizeEstimate(const AllocatorDispatch*,
+                         void* address,
+                         void* context) {
+  return tc_malloc_size(address);
+}
+
+}  // namespace
+
+const AllocatorDispatch AllocatorDispatch::default_dispatch = {
+    &TCMalloc,          /* alloc_function */
+    &TCCalloc,          /* alloc_zero_initialized_function */
+    &TCMemalign,        /* alloc_aligned_function */
+    &TCRealloc,         /* realloc_function */
+    &TCFree,            /* free_function */
+    &TCGetSizeEstimate, /* get_size_estimate_function */
+    nullptr,            /* batch_malloc_function */
+    nullptr,            /* batch_free_function */
+    nullptr,            /* free_definite_size_function */
+    nullptr,            /* next */
+};
+
+// In the case of tcmalloc we have also to route the diagnostic symbols,
+// which are not part of the unified shim layer, to tcmalloc for consistency.
+
+extern "C" {
+
+SHIM_ALWAYS_EXPORT void malloc_stats(void) __THROW {
+  return tc_malloc_stats();
+}
+
+SHIM_ALWAYS_EXPORT int mallopt(int cmd, int value) __THROW {
+  return tc_mallopt(cmd, value);
+}
+
+#ifdef HAVE_STRUCT_MALLINFO
+SHIM_ALWAYS_EXPORT struct mallinfo mallinfo(void) __THROW {
+  return tc_mallinfo();
+}
+#endif
+
+SHIM_ALWAYS_EXPORT size_t malloc_size(void* address) __THROW {
+  return tc_malloc_size(address);
+}
+
+#if defined(__ANDROID__)
+SHIM_ALWAYS_EXPORT size_t malloc_usable_size(const void* address) __THROW {
+#else
+SHIM_ALWAYS_EXPORT size_t malloc_usable_size(void* address) __THROW {
+#endif
+  return tc_malloc_size(address);
+}
+
+}  // extern "C"
diff --git a/base/allocator/allocator_shim_default_dispatch_to_winheap.cc b/base/allocator/allocator_shim_default_dispatch_to_winheap.cc
new file mode 100644
index 0000000..6aba5a3
--- /dev/null
+++ b/base/allocator/allocator_shim_default_dispatch_to_winheap.cc
@@ -0,0 +1,79 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/allocator/allocator_shim.h"
+
+#include "base/allocator/winheap_stubs_win.h"
+#include "base/logging.h"
+
+namespace {
+
+using base::allocator::AllocatorDispatch;
+
+void* DefaultWinHeapMallocImpl(const AllocatorDispatch*,
+                               size_t size,
+                               void* context) {
+  return base::allocator::WinHeapMalloc(size);
+}
+
+void* DefaultWinHeapCallocImpl(const AllocatorDispatch* self,
+                               size_t n,
+                               size_t elem_size,
+                               void* context) {
+  // Overflow check.
+  const size_t size = n * elem_size;
+  if (elem_size != 0 && size / elem_size != n)
+    return nullptr;
+
+  void* result = DefaultWinHeapMallocImpl(self, size, context);
+  if (result) {
+    memset(result, 0, size);
+  }
+  return result;
+}
+
+void* DefaultWinHeapMemalignImpl(const AllocatorDispatch* self,
+                                 size_t alignment,
+                                 size_t size,
+                                 void* context) {
+  CHECK(false) << "The windows heap does not support memalign.";
+  return nullptr;
+}
+
+void* DefaultWinHeapReallocImpl(const AllocatorDispatch* self,
+                                void* address,
+                                size_t size,
+                                void* context) {
+  return base::allocator::WinHeapRealloc(address, size);
+}
+
+void DefaultWinHeapFreeImpl(const AllocatorDispatch*,
+                            void* address,
+                            void* context) {
+  base::allocator::WinHeapFree(address);
+}
+
+size_t DefaultWinHeapGetSizeEstimateImpl(const AllocatorDispatch*,
+                                         void* address,
+                                         void* context) {
+  return base::allocator::WinHeapGetSizeEstimate(address);
+}
+
+}  // namespace
+
+// Guarantee that default_dispatch is compile-time initialized to avoid using
+// it before initialization (allocations before main in release builds with
+// optimizations disabled).
+constexpr AllocatorDispatch AllocatorDispatch::default_dispatch = {
+    &DefaultWinHeapMallocImpl,
+    &DefaultWinHeapCallocImpl,
+    &DefaultWinHeapMemalignImpl,
+    &DefaultWinHeapReallocImpl,
+    &DefaultWinHeapFreeImpl,
+    &DefaultWinHeapGetSizeEstimateImpl,
+    nullptr, /* batch_malloc_function */
+    nullptr, /* batch_free_function */
+    nullptr, /* free_definite_size_function */
+    nullptr, /* next */
+};
diff --git a/base/allocator/allocator_shim_internals.h b/base/allocator/allocator_shim_internals.h
new file mode 100644
index 0000000..0196f89
--- /dev/null
+++ b/base/allocator/allocator_shim_internals.h
@@ -0,0 +1,44 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_ALLOCATOR_ALLOCATOR_SHIM_INTERNALS_H_
+#define BASE_ALLOCATOR_ALLOCATOR_SHIM_INTERNALS_H_
+
+#if defined(__GNUC__)
+
+#include <sys/cdefs.h>  // for __THROW
+
+#ifndef __THROW  // Not a glibc system
+#ifdef _NOEXCEPT  // LLVM libc++ uses noexcept instead
+#define __THROW _NOEXCEPT
+#else
+#define __THROW
+#endif  // !_NOEXCEPT
+#endif
+
+// Shim layer symbols need to be ALWAYS exported, regardless of component build.
+//
+// If an exported symbol is linked into a DSO, it may be preempted by a
+// definition in the main executable. If this happens to an allocator symbol, it
+// will mean that the DSO will use the main executable's allocator. This is
+// normally relatively harmless -- regular allocations should all use the same
+// allocator, but if the DSO tries to hook the allocator it will not see any
+// allocations.
+//
+// However, if LLVM LTO is enabled, the compiler may inline the shim layer
+// symbols into callers. The end result is that allocator calls in DSOs may use
+// either the main executable's allocator or the DSO's allocator, depending on
+// whether the call was inlined. This is arguably a bug in LLVM caused by its
+// somewhat irregular handling of symbol interposition (see llvm.org/PR23501).
+// To work around the bug we use noinline to prevent the symbols from being
+// inlined.
+//
+// In the long run we probably want to avoid linking the allocator bits into
+// DSOs altogether. This will save a little space and stop giving DSOs the false
+// impression that they can hook the allocator.
+#define SHIM_ALWAYS_EXPORT __attribute__((visibility("default"), noinline))
+
+#endif  // __GNUC__
+
+#endif  // BASE_ALLOCATOR_ALLOCATOR_SHIM_INTERNALS_H_
diff --git a/base/allocator/allocator_shim_override_cpp_symbols.h b/base/allocator/allocator_shim_override_cpp_symbols.h
new file mode 100644
index 0000000..3313687
--- /dev/null
+++ b/base/allocator/allocator_shim_override_cpp_symbols.h
@@ -0,0 +1,51 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifdef BASE_ALLOCATOR_ALLOCATOR_SHIM_OVERRIDE_CPP_SYMBOLS_H_
+#error This header is meant to be included only once by allocator_shim.cc
+#endif
+#define BASE_ALLOCATOR_ALLOCATOR_SHIM_OVERRIDE_CPP_SYMBOLS_H_
+
+// Preempt the default new/delete C++ symbols so they call the shim entry
+// points. This file is strongly inspired by tcmalloc's
+// libc_override_redefine.h.
+
+#include <new>
+
+#include "base/allocator/allocator_shim_internals.h"
+
+SHIM_ALWAYS_EXPORT void* operator new(size_t size) {
+  return ShimCppNew(size);
+}
+
+SHIM_ALWAYS_EXPORT void operator delete(void* p) __THROW {
+  ShimCppDelete(p);
+}
+
+SHIM_ALWAYS_EXPORT void* operator new[](size_t size) {
+  return ShimCppNew(size);
+}
+
+SHIM_ALWAYS_EXPORT void operator delete[](void* p) __THROW {
+  ShimCppDelete(p);
+}
+
+SHIM_ALWAYS_EXPORT void* operator new(size_t size,
+                                      const std::nothrow_t&) __THROW {
+  return ShimCppNew(size);
+}
+
+SHIM_ALWAYS_EXPORT void* operator new[](size_t size,
+                                        const std::nothrow_t&) __THROW {
+  return ShimCppNew(size);
+}
+
+SHIM_ALWAYS_EXPORT void operator delete(void* p, const std::nothrow_t&) __THROW {
+  ShimCppDelete(p);
+}
+
+SHIM_ALWAYS_EXPORT void operator delete[](void* p,
+                                          const std::nothrow_t&) __THROW {
+  ShimCppDelete(p);
+}
diff --git a/base/allocator/allocator_shim_override_glibc_weak_symbols.h b/base/allocator/allocator_shim_override_glibc_weak_symbols.h
new file mode 100644
index 0000000..9142bda
--- /dev/null
+++ b/base/allocator/allocator_shim_override_glibc_weak_symbols.h
@@ -0,0 +1,119 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifdef BASE_ALLOCATOR_ALLOCATOR_SHIM_OVERRIDE_GLIBC_WEAK_SYMBOLS_H_
+#error This header is meant to be included only once by allocator_shim.cc
+#endif
+#define BASE_ALLOCATOR_ALLOCATOR_SHIM_OVERRIDE_GLIBC_WEAK_SYMBOLS_H_
+
+// Alias the internal Glibc symbols to the shim entry points.
+// This file is strongly inspired by tcmalloc's libc_override_glibc.h.
+// Effectively this file does two things:
+//  1) Re-define the  __malloc_hook & co symbols. Those symbols are defined as
+//     weak in glibc and are meant to be defined strongly by client processes
+//     to hook calls initiated from within glibc.
+//  2) Re-define Glibc-specific symbols (__libc_malloc). The historical reason
+//     is that in the past (in RedHat 9) we had instances of libraries that were
+//     allocating via malloc() and freeing using __libc_free().
+//     See tcmalloc's libc_override_glibc.h for more context.
+
+#include <features.h>  // for __GLIBC__
+#include <malloc.h>
+#include <unistd.h>
+
+#include <new>
+
+#include "base/allocator/allocator_shim_internals.h"
+
+// __MALLOC_HOOK_VOLATILE not defined in all Glibc headers.
+#if !defined(__MALLOC_HOOK_VOLATILE)
+#define MALLOC_HOOK_MAYBE_VOLATILE /**/
+#else
+#define MALLOC_HOOK_MAYBE_VOLATILE __MALLOC_HOOK_VOLATILE
+#endif
+
+extern "C" {
+
+// 1) Re-define malloc_hook weak symbols.
+namespace {
+
+void* GlibcMallocHook(size_t size, const void* caller) {
+  return ShimMalloc(size, nullptr);
+}
+
+void* GlibcReallocHook(void* ptr, size_t size, const void* caller) {
+  return ShimRealloc(ptr, size, nullptr);
+}
+
+void GlibcFreeHook(void* ptr, const void* caller) {
+  return ShimFree(ptr, nullptr);
+}
+
+void* GlibcMemalignHook(size_t align, size_t size, const void* caller) {
+  return ShimMemalign(align, size, nullptr);
+}
+
+}  // namespace
+
+__attribute__((visibility("default"))) void* (
+    *MALLOC_HOOK_MAYBE_VOLATILE __malloc_hook)(size_t,
+                                               const void*) = &GlibcMallocHook;
+
+__attribute__((visibility("default"))) void* (
+    *MALLOC_HOOK_MAYBE_VOLATILE __realloc_hook)(void*, size_t, const void*) =
+    &GlibcReallocHook;
+
+__attribute__((visibility("default"))) void (
+    *MALLOC_HOOK_MAYBE_VOLATILE __free_hook)(void*,
+                                             const void*) = &GlibcFreeHook;
+
+__attribute__((visibility("default"))) void* (
+    *MALLOC_HOOK_MAYBE_VOLATILE __memalign_hook)(size_t, size_t, const void*) =
+    &GlibcMemalignHook;
+
+// 2) Redefine libc symbols themselves.
+
+SHIM_ALWAYS_EXPORT void* __libc_malloc(size_t size) {
+  return ShimMalloc(size, nullptr);
+}
+
+SHIM_ALWAYS_EXPORT void __libc_free(void* ptr) {
+  ShimFree(ptr, nullptr);
+}
+
+SHIM_ALWAYS_EXPORT void* __libc_realloc(void* ptr, size_t size) {
+  return ShimRealloc(ptr, size, nullptr);
+}
+
+SHIM_ALWAYS_EXPORT void* __libc_calloc(size_t n, size_t size) {
+  return ShimCalloc(n, size, nullptr);
+}
+
+SHIM_ALWAYS_EXPORT void __libc_cfree(void* ptr) {
+  return ShimFree(ptr, nullptr);
+}
+
+SHIM_ALWAYS_EXPORT void* __libc_memalign(size_t align, size_t s) {
+  return ShimMemalign(align, s, nullptr);
+}
+
+SHIM_ALWAYS_EXPORT void* __libc_valloc(size_t size) {
+  return ShimValloc(size, nullptr);
+}
+
+SHIM_ALWAYS_EXPORT void* __libc_pvalloc(size_t size) {
+  return ShimPvalloc(size);
+}
+
+SHIM_ALWAYS_EXPORT int __posix_memalign(void** r, size_t a, size_t s) {
+  return ShimPosixMemalign(r, a, s);
+}
+
+}  // extern "C"
+
+// Safety check.
+#if !defined(__GLIBC__)
+#error The target platform does not seem to use Glibc. Disable the allocator \
+shim by setting use_allocator_shim=false in GN args.
+#endif
diff --git a/base/allocator/allocator_shim_override_libc_symbols.h b/base/allocator/allocator_shim_override_libc_symbols.h
new file mode 100644
index 0000000..b77cbb1
--- /dev/null
+++ b/base/allocator/allocator_shim_override_libc_symbols.h
@@ -0,0 +1,63 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Its purpose is to preempt the Libc symbols for malloc/new so they call the
+// shim layer entry points.
+
+#ifdef BASE_ALLOCATOR_ALLOCATOR_SHIM_OVERRIDE_LIBC_SYMBOLS_H_
+#error This header is meant to be included only once by allocator_shim.cc
+#endif
+#define BASE_ALLOCATOR_ALLOCATOR_SHIM_OVERRIDE_LIBC_SYMBOLS_H_
+
+#include <malloc.h>
+
+#include "base/allocator/allocator_shim_internals.h"
+
+extern "C" {
+
+SHIM_ALWAYS_EXPORT void* malloc(size_t size) __THROW {
+  return ShimMalloc(size, nullptr);
+}
+
+SHIM_ALWAYS_EXPORT void free(void* ptr) __THROW {
+  ShimFree(ptr, nullptr);
+}
+
+SHIM_ALWAYS_EXPORT void* realloc(void* ptr, size_t size) __THROW {
+  return ShimRealloc(ptr, size, nullptr);
+}
+
+SHIM_ALWAYS_EXPORT void* calloc(size_t n, size_t size) __THROW {
+  return ShimCalloc(n, size, nullptr);
+}
+
+SHIM_ALWAYS_EXPORT void cfree(void* ptr) __THROW {
+  ShimFree(ptr, nullptr);
+}
+
+SHIM_ALWAYS_EXPORT void* memalign(size_t align, size_t s) __THROW {
+  return ShimMemalign(align, s, nullptr);
+}
+
+SHIM_ALWAYS_EXPORT void* valloc(size_t size) __THROW {
+  return ShimValloc(size, nullptr);
+}
+
+SHIM_ALWAYS_EXPORT void* pvalloc(size_t size) __THROW {
+  return ShimPvalloc(size);
+}
+
+SHIM_ALWAYS_EXPORT int posix_memalign(void** r, size_t a, size_t s) __THROW {
+  return ShimPosixMemalign(r, a, s);
+}
+
+// The default dispatch translation unit has to define also the following
+// symbols (unless they are ultimately routed to the system symbols):
+//   void malloc_stats(void);
+//   int mallopt(int, int);
+//   struct mallinfo mallinfo(void);
+//   size_t malloc_size(void*);
+//   size_t malloc_usable_size(const void*);
+
+}  // extern "C"
diff --git a/base/allocator/allocator_shim_override_linker_wrapped_symbols.h b/base/allocator/allocator_shim_override_linker_wrapped_symbols.h
new file mode 100644
index 0000000..6bf73c3
--- /dev/null
+++ b/base/allocator/allocator_shim_override_linker_wrapped_symbols.h
@@ -0,0 +1,54 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifdef BASE_ALLOCATOR_ALLOCATOR_SHIM_OVERRIDE_LINKER_WRAPPED_SYMBOLS_H_
+#error This header is meant to be included only once by allocator_shim.cc
+#endif
+#define BASE_ALLOCATOR_ALLOCATOR_SHIM_OVERRIDE_LINKER_WRAPPED_SYMBOLS_H_
+
+// This header overrides the __wrap_X symbols when using the link-time
+// -Wl,-wrap,malloc shim-layer approach (see README.md).
+// All references to malloc, free, etc. within the linker unit that gets the
+// -wrap linker flags (e.g., libchrome.so) will be rewritten to the
+// linker as references to __wrap_malloc, __wrap_free, which are defined here.
+
+#include "base/allocator/allocator_shim_internals.h"
+
+extern "C" {
+
+SHIM_ALWAYS_EXPORT void* __wrap_calloc(size_t n, size_t size) {
+  return ShimCalloc(n, size, nullptr);
+}
+
+SHIM_ALWAYS_EXPORT void __wrap_free(void* ptr) {
+  ShimFree(ptr, nullptr);
+}
+
+SHIM_ALWAYS_EXPORT void* __wrap_malloc(size_t size) {
+  return ShimMalloc(size, nullptr);
+}
+
+SHIM_ALWAYS_EXPORT void* __wrap_memalign(size_t align, size_t size) {
+  return ShimMemalign(align, size, nullptr);
+}
+
+SHIM_ALWAYS_EXPORT int __wrap_posix_memalign(void** res,
+                                             size_t align,
+                                             size_t size) {
+  return ShimPosixMemalign(res, align, size);
+}
+
+SHIM_ALWAYS_EXPORT void* __wrap_pvalloc(size_t size) {
+  return ShimPvalloc(size);
+}
+
+SHIM_ALWAYS_EXPORT void* __wrap_realloc(void* address, size_t size) {
+  return ShimRealloc(address, size, nullptr);
+}
+
+SHIM_ALWAYS_EXPORT void* __wrap_valloc(size_t size) {
+  return ShimValloc(size, nullptr);
+}
+
+}  // extern "C"
diff --git a/base/allocator/allocator_shim_override_mac_symbols.h b/base/allocator/allocator_shim_override_mac_symbols.h
new file mode 100644
index 0000000..0b65edb
--- /dev/null
+++ b/base/allocator/allocator_shim_override_mac_symbols.h
@@ -0,0 +1,60 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifdef BASE_ALLOCATOR_ALLOCATOR_SHIM_OVERRIDE_MAC_SYMBOLS_H_
+#error This header is meant to be included only once by allocator_shim.cc
+#endif
+#define BASE_ALLOCATOR_ALLOCATOR_SHIM_OVERRIDE_MAC_SYMBOLS_H_
+
+#include "base/allocator/malloc_zone_functions_mac.h"
+#include "third_party/apple_apsl/malloc.h"
+
+namespace base {
+namespace allocator {
+
+MallocZoneFunctions MallocZoneFunctionsToReplaceDefault() {
+  MallocZoneFunctions new_functions;
+  memset(&new_functions, 0, sizeof(MallocZoneFunctions));
+  new_functions.size = [](malloc_zone_t* zone, const void* ptr) -> size_t {
+    return ShimGetSizeEstimate(ptr, zone);
+  };
+  new_functions.malloc = [](malloc_zone_t* zone, size_t size) -> void* {
+    return ShimMalloc(size, zone);
+  };
+  new_functions.calloc = [](malloc_zone_t* zone, size_t n,
+                            size_t size) -> void* {
+    return ShimCalloc(n, size, zone);
+  };
+  new_functions.valloc = [](malloc_zone_t* zone, size_t size) -> void* {
+    return ShimValloc(size, zone);
+  };
+  new_functions.free = [](malloc_zone_t* zone, void* ptr) {
+    ShimFree(ptr, zone);
+  };
+  new_functions.realloc = [](malloc_zone_t* zone, void* ptr,
+                             size_t size) -> void* {
+    return ShimRealloc(ptr, size, zone);
+  };
+  new_functions.batch_malloc = [](struct _malloc_zone_t* zone, size_t size,
+                                  void** results,
+                                  unsigned num_requested) -> unsigned {
+    return ShimBatchMalloc(size, results, num_requested, zone);
+  };
+  new_functions.batch_free = [](struct _malloc_zone_t* zone, void** to_be_freed,
+                                unsigned num_to_be_freed) -> void {
+    ShimBatchFree(to_be_freed, num_to_be_freed, zone);
+  };
+  new_functions.memalign = [](malloc_zone_t* zone, size_t alignment,
+                              size_t size) -> void* {
+    return ShimMemalign(alignment, size, zone);
+  };
+  new_functions.free_definite_size = [](malloc_zone_t* zone, void* ptr,
+                                        size_t size) {
+    ShimFreeDefiniteSize(ptr, size, zone);
+  };
+  return new_functions;
+}
+
+}  // namespace allocator
+}  // namespace base
diff --git a/base/allocator/allocator_shim_override_ucrt_symbols_win.h b/base/allocator/allocator_shim_override_ucrt_symbols_win.h
new file mode 100644
index 0000000..ed02656
--- /dev/null
+++ b/base/allocator/allocator_shim_override_ucrt_symbols_win.h
@@ -0,0 +1,100 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This header defines symbols to override the same functions in the Visual C++
+// CRT implementation.
+
+#ifdef BASE_ALLOCATOR_ALLOCATOR_SHIM_OVERRIDE_UCRT_SYMBOLS_WIN_H_
+#error This header is meant to be included only once by allocator_shim.cc
+#endif
+#define BASE_ALLOCATOR_ALLOCATOR_SHIM_OVERRIDE_UCRT_SYMBOLS_WIN_H_
+
+#include <malloc.h>
+
+#include <windows.h>
+
+extern "C" {
+
+void* (*malloc_unchecked)(size_t) = &base::allocator::UncheckedAlloc;
+
+namespace {
+
+int win_new_mode = 0;
+
+}  // namespace
+
+// This function behaves similarly to MSVC's _set_new_mode.
+// If flag is 0 (default), calls to malloc will behave normally.
+// If flag is 1, calls to malloc will behave like calls to new,
+// and the std_new_handler will be invoked on failure.
+// Returns the previous mode.
+//
+// Replaces _set_new_mode in ucrt\heap\new_mode.cpp
+int _set_new_mode(int flag) {
+  // The MS CRT calls this function early on in startup, so this serves as a low
+  // overhead proof that the allocator shim is in place for this process.
+  base::allocator::g_is_win_shim_layer_initialized = true;
+  int old_mode = win_new_mode;
+  win_new_mode = flag;
+
+  base::allocator::SetCallNewHandlerOnMallocFailure(win_new_mode != 0);
+
+  return old_mode;
+}
+
+// Replaces _query_new_mode in ucrt\heap\new_mode.cpp
+int _query_new_mode() {
+  return win_new_mode;
+}
+
+// These symbols override the CRT's implementation of the same functions.
+__declspec(restrict) void* malloc(size_t size) {
+  return ShimMalloc(size, nullptr);
+}
+
+void free(void* ptr) {
+  ShimFree(ptr, nullptr);
+}
+
+__declspec(restrict) void* realloc(void* ptr, size_t size) {
+  return ShimRealloc(ptr, size, nullptr);
+}
+
+__declspec(restrict) void* calloc(size_t n, size_t size) {
+  return ShimCalloc(n, size, nullptr);
+}
+
+// The symbols
+//   * __acrt_heap
+//   * __acrt_initialize_heap
+//   * __acrt_uninitialize_heap
+//   * _get_heap_handle
+// must be overridden all or none, as they are otherwise supplied
+// by heap_handle.obj in the ucrt.lib file.
+HANDLE __acrt_heap = nullptr;
+
+bool __acrt_initialize_heap() {
+  __acrt_heap = ::HeapCreate(0, 0, 0);
+  return true;
+}
+
+bool __acrt_uninitialize_heap() {
+  ::HeapDestroy(__acrt_heap);
+  __acrt_heap = nullptr;
+  return true;
+}
+
+intptr_t _get_heap_handle(void) {
+  return reinterpret_cast<intptr_t>(__acrt_heap);
+}
+
+// The default dispatch translation unit has to define also the following
+// symbols (unless they are ultimately routed to the system symbols):
+//   void malloc_stats(void);
+//   int mallopt(int, int);
+//   struct mallinfo mallinfo(void);
+//   size_t malloc_size(void*);
+//   size_t malloc_usable_size(const void*);
+
+}  // extern "C"
diff --git a/base/allocator/allocator_shim_unittest.cc b/base/allocator/allocator_shim_unittest.cc
new file mode 100644
index 0000000..3be8f2c
--- /dev/null
+++ b/base/allocator/allocator_shim_unittest.cc
@@ -0,0 +1,467 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/allocator/allocator_shim.h"
+
+#include <stdlib.h>
+#include <string.h>
+
+#include <memory>
+#include <new>
+#include <vector>
+
+#include "base/allocator/buildflags.h"
+#include "base/allocator/partition_allocator/partition_alloc.h"
+#include "base/atomicops.h"
+#include "base/process/process_metrics.h"
+#include "base/synchronization/waitable_event.h"
+#include "base/threading/platform_thread.h"
+#include "base/threading/thread_local.h"
+#include "build/build_config.h"
+#include "testing/gmock/include/gmock/gmock.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+#if defined(OS_WIN)
+#include <windows.h>
+#elif defined(OS_MACOSX)
+#include <malloc/malloc.h>
+#include "base/allocator/allocator_interception_mac.h"
+#include "base/mac/mac_util.h"
+#include "third_party/apple_apsl/malloc.h"
+#else
+#include <malloc.h>
+#endif
+
+#if !defined(OS_WIN)
+#include <unistd.h>
+#endif
+
+// Some new Android NDKs (64 bit) does not expose (p)valloc anymore. These
+// functions are implemented at the shim-layer level.
+#if defined(OS_ANDROID)
+extern "C" {
+void* valloc(size_t size);
+void* pvalloc(size_t size);
+}
+#endif
+
+namespace base {
+namespace allocator {
+namespace {
+
+using testing::MockFunction;
+using testing::_;
+
+class AllocatorShimTest : public testing::Test {
+ public:
+  static const size_t kMaxSizeTracked = 2 * base::kSystemPageSize;
+  AllocatorShimTest() : testing::Test() {}
+
+  static size_t Hash(const void* ptr) {
+    return reinterpret_cast<uintptr_t>(ptr) % kMaxSizeTracked;
+  }
+
+  static void* MockAlloc(const AllocatorDispatch* self,
+                         size_t size,
+                         void* context) {
+    if (instance_ && size < kMaxSizeTracked)
+      ++(instance_->allocs_intercepted_by_size[size]);
+    return self->next->alloc_function(self->next, size, context);
+  }
+
+  static void* MockAllocZeroInit(const AllocatorDispatch* self,
+                                 size_t n,
+                                 size_t size,
+                                 void* context) {
+    const size_t real_size = n * size;
+    if (instance_ && real_size < kMaxSizeTracked)
+      ++(instance_->zero_allocs_intercepted_by_size[real_size]);
+    return self->next->alloc_zero_initialized_function(self->next, n, size,
+                                                       context);
+  }
+
+  static void* MockAllocAligned(const AllocatorDispatch* self,
+                                size_t alignment,
+                                size_t size,
+                                void* context) {
+    if (instance_) {
+      if (size < kMaxSizeTracked)
+        ++(instance_->aligned_allocs_intercepted_by_size[size]);
+      if (alignment < kMaxSizeTracked)
+        ++(instance_->aligned_allocs_intercepted_by_alignment[alignment]);
+    }
+    return self->next->alloc_aligned_function(self->next, alignment, size,
+                                              context);
+  }
+
+  static void* MockRealloc(const AllocatorDispatch* self,
+                           void* address,
+                           size_t size,
+                           void* context) {
+    if (instance_) {
+      // Size 0xFEED a special sentinel for the NewHandlerConcurrency test.
+      // Hitting it for the first time will cause a failure, causing the
+      // invocation of the std::new_handler.
+      if (size == 0xFEED) {
+        if (!instance_->did_fail_realloc_0xfeed_once->Get()) {
+          instance_->did_fail_realloc_0xfeed_once->Set(true);
+          return nullptr;
+        } else {
+          return address;
+        }
+      }
+
+      if (size < kMaxSizeTracked)
+        ++(instance_->reallocs_intercepted_by_size[size]);
+      ++instance_->reallocs_intercepted_by_addr[Hash(address)];
+    }
+    return self->next->realloc_function(self->next, address, size, context);
+  }
+
+  static void MockFree(const AllocatorDispatch* self,
+                       void* address,
+                       void* context) {
+    if (instance_) {
+      ++instance_->frees_intercepted_by_addr[Hash(address)];
+    }
+    self->next->free_function(self->next, address, context);
+  }
+
+  static size_t MockGetSizeEstimate(const AllocatorDispatch* self,
+                                    void* address,
+                                    void* context) {
+    return self->next->get_size_estimate_function(self->next, address, context);
+  }
+
+  static unsigned MockBatchMalloc(const AllocatorDispatch* self,
+                                  size_t size,
+                                  void** results,
+                                  unsigned num_requested,
+                                  void* context) {
+    if (instance_) {
+      instance_->batch_mallocs_intercepted_by_size[size] =
+          instance_->batch_mallocs_intercepted_by_size[size] + num_requested;
+    }
+    return self->next->batch_malloc_function(self->next, size, results,
+                                             num_requested, context);
+  }
+
+  static void MockBatchFree(const AllocatorDispatch* self,
+                            void** to_be_freed,
+                            unsigned num_to_be_freed,
+                            void* context) {
+    if (instance_) {
+      for (unsigned i = 0; i < num_to_be_freed; ++i) {
+        ++instance_->batch_frees_intercepted_by_addr[Hash(to_be_freed[i])];
+      }
+    }
+    self->next->batch_free_function(self->next, to_be_freed, num_to_be_freed,
+                                    context);
+  }
+
+  static void MockFreeDefiniteSize(const AllocatorDispatch* self,
+                                   void* ptr,
+                                   size_t size,
+                                   void* context) {
+    if (instance_) {
+      ++instance_->frees_intercepted_by_addr[Hash(ptr)];
+      ++instance_->free_definite_sizes_intercepted_by_size[size];
+    }
+    self->next->free_definite_size_function(self->next, ptr, size, context);
+  }
+
+  static void NewHandler() {
+    if (!instance_)
+      return;
+    subtle::Barrier_AtomicIncrement(&instance_->num_new_handler_calls, 1);
+  }
+
+  int32_t GetNumberOfNewHandlerCalls() {
+    return subtle::Acquire_Load(&instance_->num_new_handler_calls);
+  }
+
+  void SetUp() override {
+    const size_t array_size = kMaxSizeTracked * sizeof(size_t);
+    memset(&allocs_intercepted_by_size, 0, array_size);
+    memset(&zero_allocs_intercepted_by_size, 0, array_size);
+    memset(&aligned_allocs_intercepted_by_size, 0, array_size);
+    memset(&aligned_allocs_intercepted_by_alignment, 0, array_size);
+    memset(&reallocs_intercepted_by_size, 0, array_size);
+    memset(&frees_intercepted_by_addr, 0, array_size);
+    memset(&batch_mallocs_intercepted_by_size, 0, array_size);
+    memset(&batch_frees_intercepted_by_addr, 0, array_size);
+    memset(&free_definite_sizes_intercepted_by_size, 0, array_size);
+    did_fail_realloc_0xfeed_once.reset(new ThreadLocalBoolean());
+    subtle::Release_Store(&num_new_handler_calls, 0);
+    instance_ = this;
+
+#if defined(OS_MACOSX)
+    InitializeAllocatorShim();
+#endif
+  }
+
+  void TearDown() override {
+    instance_ = nullptr;
+#if defined(OS_MACOSX)
+    UninterceptMallocZonesForTesting();
+#endif
+  }
+
+ protected:
+  size_t allocs_intercepted_by_size[kMaxSizeTracked];
+  size_t zero_allocs_intercepted_by_size[kMaxSizeTracked];
+  size_t aligned_allocs_intercepted_by_size[kMaxSizeTracked];
+  size_t aligned_allocs_intercepted_by_alignment[kMaxSizeTracked];
+  size_t reallocs_intercepted_by_size[kMaxSizeTracked];
+  size_t reallocs_intercepted_by_addr[kMaxSizeTracked];
+  size_t frees_intercepted_by_addr[kMaxSizeTracked];
+  size_t batch_mallocs_intercepted_by_size[kMaxSizeTracked];
+  size_t batch_frees_intercepted_by_addr[kMaxSizeTracked];
+  size_t free_definite_sizes_intercepted_by_size[kMaxSizeTracked];
+  std::unique_ptr<ThreadLocalBoolean> did_fail_realloc_0xfeed_once;
+  subtle::Atomic32 num_new_handler_calls;
+
+ private:
+  static AllocatorShimTest* instance_;
+};
+
+struct TestStruct1 {
+  uint32_t ignored;
+  uint8_t ignored_2;
+};
+
+struct TestStruct2 {
+  uint64_t ignored;
+  uint8_t ignored_3;
+};
+
+class ThreadDelegateForNewHandlerTest : public PlatformThread::Delegate {
+ public:
+  ThreadDelegateForNewHandlerTest(WaitableEvent* event) : event_(event) {}
+
+  void ThreadMain() override {
+    event_->Wait();
+    void* temp = malloc(1);
+    void* res = realloc(temp, 0xFEED);
+    EXPECT_EQ(temp, res);
+  }
+
+ private:
+  WaitableEvent* event_;
+};
+
+AllocatorShimTest* AllocatorShimTest::instance_ = nullptr;
+
+AllocatorDispatch g_mock_dispatch = {
+    &AllocatorShimTest::MockAlloc,         /* alloc_function */
+    &AllocatorShimTest::MockAllocZeroInit, /* alloc_zero_initialized_function */
+    &AllocatorShimTest::MockAllocAligned,  /* alloc_aligned_function */
+    &AllocatorShimTest::MockRealloc,       /* realloc_function */
+    &AllocatorShimTest::MockFree,          /* free_function */
+    &AllocatorShimTest::MockGetSizeEstimate,  /* get_size_estimate_function */
+    &AllocatorShimTest::MockBatchMalloc,      /* batch_malloc_function */
+    &AllocatorShimTest::MockBatchFree,        /* batch_free_function */
+    &AllocatorShimTest::MockFreeDefiniteSize, /* free_definite_size_function */
+    nullptr,                                  /* next */
+};
+
+TEST_F(AllocatorShimTest, InterceptLibcSymbols) {
+  InsertAllocatorDispatch(&g_mock_dispatch);
+
+  void* alloc_ptr = malloc(19);
+  ASSERT_NE(nullptr, alloc_ptr);
+  ASSERT_GE(allocs_intercepted_by_size[19], 1u);
+
+  void* zero_alloc_ptr = calloc(2, 23);
+  ASSERT_NE(nullptr, zero_alloc_ptr);
+  ASSERT_GE(zero_allocs_intercepted_by_size[2 * 23], 1u);
+
+#if !defined(OS_WIN)
+  const size_t kPageSize = base::GetPageSize();
+  void* posix_memalign_ptr = nullptr;
+  int res = posix_memalign(&posix_memalign_ptr, 256, 59);
+  ASSERT_EQ(0, res);
+  ASSERT_NE(nullptr, posix_memalign_ptr);
+  ASSERT_EQ(0u, reinterpret_cast<uintptr_t>(posix_memalign_ptr) % 256);
+  ASSERT_GE(aligned_allocs_intercepted_by_alignment[256], 1u);
+  ASSERT_GE(aligned_allocs_intercepted_by_size[59], 1u);
+
+  void* valloc_ptr = valloc(61);
+  ASSERT_NE(nullptr, valloc_ptr);
+  ASSERT_EQ(0u, reinterpret_cast<uintptr_t>(valloc_ptr) % kPageSize);
+  ASSERT_GE(aligned_allocs_intercepted_by_alignment[kPageSize], 1u);
+  ASSERT_GE(aligned_allocs_intercepted_by_size[61], 1u);
+#endif  // !OS_WIN
+
+#if !defined(OS_WIN) && !defined(OS_MACOSX)
+  void* memalign_ptr = memalign(128, 53);
+  ASSERT_NE(nullptr, memalign_ptr);
+  ASSERT_EQ(0u, reinterpret_cast<uintptr_t>(memalign_ptr) % 128);
+  ASSERT_GE(aligned_allocs_intercepted_by_alignment[128], 1u);
+  ASSERT_GE(aligned_allocs_intercepted_by_size[53], 1u);
+
+  void* pvalloc_ptr = pvalloc(67);
+  ASSERT_NE(nullptr, pvalloc_ptr);
+  ASSERT_EQ(0u, reinterpret_cast<uintptr_t>(pvalloc_ptr) % kPageSize);
+  ASSERT_GE(aligned_allocs_intercepted_by_alignment[kPageSize], 1u);
+  // pvalloc rounds the size up to the next page.
+  ASSERT_GE(aligned_allocs_intercepted_by_size[kPageSize], 1u);
+#endif  // !OS_WIN && !OS_MACOSX
+
+  char* realloc_ptr = static_cast<char*>(malloc(10));
+  strcpy(realloc_ptr, "foobar");
+  void* old_realloc_ptr = realloc_ptr;
+  realloc_ptr = static_cast<char*>(realloc(realloc_ptr, 73));
+  ASSERT_GE(reallocs_intercepted_by_size[73], 1u);
+  ASSERT_GE(reallocs_intercepted_by_addr[Hash(old_realloc_ptr)], 1u);
+  ASSERT_EQ(0, strcmp(realloc_ptr, "foobar"));
+
+  free(alloc_ptr);
+  ASSERT_GE(frees_intercepted_by_addr[Hash(alloc_ptr)], 1u);
+
+  free(zero_alloc_ptr);
+  ASSERT_GE(frees_intercepted_by_addr[Hash(zero_alloc_ptr)], 1u);
+
+#if !defined(OS_WIN) && !defined(OS_MACOSX)
+  free(memalign_ptr);
+  ASSERT_GE(frees_intercepted_by_addr[Hash(memalign_ptr)], 1u);
+
+  free(pvalloc_ptr);
+  ASSERT_GE(frees_intercepted_by_addr[Hash(pvalloc_ptr)], 1u);
+#endif  // !OS_WIN && !OS_MACOSX
+
+#if !defined(OS_WIN)
+  free(posix_memalign_ptr);
+  ASSERT_GE(frees_intercepted_by_addr[Hash(posix_memalign_ptr)], 1u);
+
+  free(valloc_ptr);
+  ASSERT_GE(frees_intercepted_by_addr[Hash(valloc_ptr)], 1u);
+#endif  // !OS_WIN
+
+  free(realloc_ptr);
+  ASSERT_GE(frees_intercepted_by_addr[Hash(realloc_ptr)], 1u);
+
+  RemoveAllocatorDispatchForTesting(&g_mock_dispatch);
+
+  void* non_hooked_ptr = malloc(4095);
+  ASSERT_NE(nullptr, non_hooked_ptr);
+  ASSERT_EQ(0u, allocs_intercepted_by_size[4095]);
+  free(non_hooked_ptr);
+}
+
+#if defined(OS_MACOSX)
+TEST_F(AllocatorShimTest, InterceptLibcSymbolsBatchMallocFree) {
+  InsertAllocatorDispatch(&g_mock_dispatch);
+
+  unsigned count = 13;
+  std::vector<void*> results;
+  results.resize(count);
+  unsigned result_count = malloc_zone_batch_malloc(malloc_default_zone(), 99,
+                                                   results.data(), count);
+  ASSERT_EQ(count, result_count);
+
+  // TODO(erikchen): On macOS 10.12+, batch_malloc in the default zone may
+  // forward to another zone, which we've also shimmed, resulting in
+  // MockBatchMalloc getting called twice as often as we'd expect. This
+  // re-entrancy into the allocator shim is a bug that needs to be fixed.
+  // https://crbug.com/693237.
+  // ASSERT_EQ(count, batch_mallocs_intercepted_by_size[99]);
+
+  std::vector<void*> results_copy(results);
+  malloc_zone_batch_free(malloc_default_zone(), results.data(), count);
+  for (void* result : results_copy) {
+    ASSERT_GE(batch_frees_intercepted_by_addr[Hash(result)], 1u);
+  }
+  RemoveAllocatorDispatchForTesting(&g_mock_dispatch);
+}
+
+TEST_F(AllocatorShimTest, InterceptLibcSymbolsFreeDefiniteSize) {
+  InsertAllocatorDispatch(&g_mock_dispatch);
+
+  void* alloc_ptr = malloc(19);
+  ASSERT_NE(nullptr, alloc_ptr);
+  ASSERT_GE(allocs_intercepted_by_size[19], 1u);
+
+  ChromeMallocZone* default_zone =
+          reinterpret_cast<ChromeMallocZone*>(malloc_default_zone());
+  default_zone->free_definite_size(malloc_default_zone(), alloc_ptr, 19);
+  ASSERT_GE(free_definite_sizes_intercepted_by_size[19], 1u);
+  RemoveAllocatorDispatchForTesting(&g_mock_dispatch);
+}
+#endif  // defined(OS_MACOSX)
+
+TEST_F(AllocatorShimTest, InterceptCppSymbols) {
+  InsertAllocatorDispatch(&g_mock_dispatch);
+
+  TestStruct1* new_ptr = new TestStruct1;
+  ASSERT_NE(nullptr, new_ptr);
+  ASSERT_GE(allocs_intercepted_by_size[sizeof(TestStruct1)], 1u);
+
+  TestStruct1* new_array_ptr = new TestStruct1[3];
+  ASSERT_NE(nullptr, new_array_ptr);
+  ASSERT_GE(allocs_intercepted_by_size[sizeof(TestStruct1) * 3], 1u);
+
+  TestStruct2* new_nt_ptr = new (std::nothrow) TestStruct2;
+  ASSERT_NE(nullptr, new_nt_ptr);
+  ASSERT_GE(allocs_intercepted_by_size[sizeof(TestStruct2)], 1u);
+
+  TestStruct2* new_array_nt_ptr = new TestStruct2[3];
+  ASSERT_NE(nullptr, new_array_nt_ptr);
+  ASSERT_GE(allocs_intercepted_by_size[sizeof(TestStruct2) * 3], 1u);
+
+  delete new_ptr;
+  ASSERT_GE(frees_intercepted_by_addr[Hash(new_ptr)], 1u);
+
+  delete[] new_array_ptr;
+  ASSERT_GE(frees_intercepted_by_addr[Hash(new_array_ptr)], 1u);
+
+  delete new_nt_ptr;
+  ASSERT_GE(frees_intercepted_by_addr[Hash(new_nt_ptr)], 1u);
+
+  delete[] new_array_nt_ptr;
+  ASSERT_GE(frees_intercepted_by_addr[Hash(new_array_nt_ptr)], 1u);
+
+  RemoveAllocatorDispatchForTesting(&g_mock_dispatch);
+}
+
+// This test exercises the case of concurrent OOM failure, which would end up
+// invoking std::new_handler concurrently. This is to cover the CallNewHandler()
+// paths of allocator_shim.cc and smoke-test its thread safey.
+// The test creates kNumThreads threads. Each of them mallocs some memory, and
+// then does a realloc(<new memory>, 0xFEED).
+// The shim intercepts such realloc and makes it fail only once on each thread.
+// We expect to see excactly kNumThreads invocations of the new_handler.
+TEST_F(AllocatorShimTest, NewHandlerConcurrency) {
+  const int kNumThreads = 32;
+  PlatformThreadHandle threads[kNumThreads];
+
+  // The WaitableEvent here is used to attempt to trigger all the threads at
+  // the same time, after they have been initialized.
+  WaitableEvent event(WaitableEvent::ResetPolicy::MANUAL,
+                      WaitableEvent::InitialState::NOT_SIGNALED);
+
+  ThreadDelegateForNewHandlerTest mock_thread_main(&event);
+
+  for (int i = 0; i < kNumThreads; ++i)
+    PlatformThread::Create(0, &mock_thread_main, &threads[i]);
+
+  std::set_new_handler(&AllocatorShimTest::NewHandler);
+  SetCallNewHandlerOnMallocFailure(true);  // It's going to fail on realloc().
+  InsertAllocatorDispatch(&g_mock_dispatch);
+  event.Signal();
+  for (int i = 0; i < kNumThreads; ++i)
+    PlatformThread::Join(threads[i]);
+  RemoveAllocatorDispatchForTesting(&g_mock_dispatch);
+  ASSERT_EQ(kNumThreads, GetNumberOfNewHandlerCalls());
+}
+
+#if defined(OS_WIN) && BUILDFLAG(USE_ALLOCATOR_SHIM)
+TEST_F(AllocatorShimTest, ShimReplacesCRTHeapWhenEnabled) {
+  ASSERT_NE(::GetProcessHeap(), reinterpret_cast<HANDLE>(_get_heap_handle()));
+}
+#endif  // defined(OS_WIN) && BUILDFLAG(USE_ALLOCATOR_SHIM)
+
+}  // namespace
+}  // namespace allocator
+}  // namespace base
diff --git a/base/allocator/debugallocation_shim.cc b/base/allocator/debugallocation_shim.cc
new file mode 100644
index 0000000..479cfca
--- /dev/null
+++ b/base/allocator/debugallocation_shim.cc
@@ -0,0 +1,20 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Workaround for crosbug:629593.  Using AFDO on the tcmalloc files is
+// causing problems. The tcmalloc files depend on stack layouts and
+// AFDO can mess with them. Better not to use AFDO there.  This is a
+// temporary hack. We will add a mechanism in the build system to
+// avoid using -fauto-profile for tcmalloc files.
+#if !defined(__clang__) && (defined(OS_CHROMEOS) || __GNUC__ > 5)
+// Note that this option only seems to be available in the chromeos GCC 4.9
+// toolchain, and stock GCC 5 and up.
+#pragma GCC optimize ("no-auto-profile")
+#endif
+
+#if defined(TCMALLOC_FOR_DEBUGALLOCATION)
+#include "third_party/tcmalloc/chromium/src/debugallocation.cc"
+#else
+#include "third_party/tcmalloc/chromium/src/tcmalloc.cc"
+#endif
diff --git a/base/allocator/malloc_zone_functions_mac.cc b/base/allocator/malloc_zone_functions_mac.cc
new file mode 100644
index 0000000..9a41496
--- /dev/null
+++ b/base/allocator/malloc_zone_functions_mac.cc
@@ -0,0 +1,114 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/allocator/malloc_zone_functions_mac.h"
+
+#include "base/atomicops.h"
+#include "base/synchronization/lock.h"
+
+namespace base {
+namespace allocator {
+
+MallocZoneFunctions g_malloc_zones[kMaxZoneCount];
+static_assert(std::is_pod<MallocZoneFunctions>::value,
+              "MallocZoneFunctions must be POD");
+
+void StoreZoneFunctions(const ChromeMallocZone* zone,
+                        MallocZoneFunctions* functions) {
+  memset(functions, 0, sizeof(MallocZoneFunctions));
+  functions->malloc = zone->malloc;
+  functions->calloc = zone->calloc;
+  functions->valloc = zone->valloc;
+  functions->free = zone->free;
+  functions->realloc = zone->realloc;
+  functions->size = zone->size;
+  CHECK(functions->malloc && functions->calloc && functions->valloc &&
+        functions->free && functions->realloc && functions->size);
+
+  // These functions might be nullptr.
+  functions->batch_malloc = zone->batch_malloc;
+  functions->batch_free = zone->batch_free;
+
+  if (zone->version >= 5) {
+    // Not all custom malloc zones have a memalign.
+    functions->memalign = zone->memalign;
+  }
+  if (zone->version >= 6) {
+    // This may be nullptr.
+    functions->free_definite_size = zone->free_definite_size;
+  }
+
+  functions->context = zone;
+}
+
+namespace {
+
+// All modifications to g_malloc_zones are gated behind this lock.
+// Dispatch to a malloc zone does not need to acquire this lock.
+base::Lock& GetLock() {
+  static base::Lock* g_lock = new base::Lock;
+  return *g_lock;
+}
+
+void EnsureMallocZonesInitializedLocked() {
+  GetLock().AssertAcquired();
+}
+
+int g_zone_count = 0;
+
+bool IsMallocZoneAlreadyStoredLocked(ChromeMallocZone* zone) {
+  EnsureMallocZonesInitializedLocked();
+  GetLock().AssertAcquired();
+  for (int i = 0; i < g_zone_count; ++i) {
+    if (g_malloc_zones[i].context == reinterpret_cast<void*>(zone))
+      return true;
+  }
+  return false;
+}
+
+}  // namespace
+
+bool StoreMallocZone(ChromeMallocZone* zone) {
+  base::AutoLock l(GetLock());
+  EnsureMallocZonesInitializedLocked();
+  if (IsMallocZoneAlreadyStoredLocked(zone))
+    return false;
+
+  if (g_zone_count == kMaxZoneCount)
+    return false;
+
+  StoreZoneFunctions(zone, &g_malloc_zones[g_zone_count]);
+  ++g_zone_count;
+
+  // No other thread can possibly see these stores at this point. The code that
+  // reads these values is triggered after this function returns. so we want to
+  // guarantee that they are committed at this stage"
+  base::subtle::MemoryBarrier();
+  return true;
+}
+
+bool IsMallocZoneAlreadyStored(ChromeMallocZone* zone) {
+  base::AutoLock l(GetLock());
+  return IsMallocZoneAlreadyStoredLocked(zone);
+}
+
+bool DoesMallocZoneNeedReplacing(ChromeMallocZone* zone,
+                                 const MallocZoneFunctions* functions) {
+  return IsMallocZoneAlreadyStored(zone) && zone->malloc != functions->malloc;
+}
+
+int GetMallocZoneCountForTesting() {
+  base::AutoLock l(GetLock());
+  return g_zone_count;
+}
+
+void ClearAllMallocZonesForTesting() {
+  base::AutoLock l(GetLock());
+  EnsureMallocZonesInitializedLocked();
+  memset(g_malloc_zones, 0, kMaxZoneCount * sizeof(MallocZoneFunctions));
+  g_zone_count = 0;
+}
+
+}  // namespace allocator
+}  // namespace base
diff --git a/base/allocator/malloc_zone_functions_mac.h b/base/allocator/malloc_zone_functions_mac.h
new file mode 100644
index 0000000..a7f5543
--- /dev/null
+++ b/base/allocator/malloc_zone_functions_mac.h
@@ -0,0 +1,103 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_ALLOCATOR_MALLOC_ZONE_FUNCTIONS_MAC_H_
+#define BASE_ALLOCATOR_MALLOC_ZONE_FUNCTIONS_MAC_H_
+
+#include <malloc/malloc.h>
+#include <stddef.h>
+
+#include "base/base_export.h"
+#include "base/logging.h"
+#include "third_party/apple_apsl/malloc.h"
+
+namespace base {
+namespace allocator {
+
+typedef void* (*malloc_type)(struct _malloc_zone_t* zone, size_t size);
+typedef void* (*calloc_type)(struct _malloc_zone_t* zone,
+                             size_t num_items,
+                             size_t size);
+typedef void* (*valloc_type)(struct _malloc_zone_t* zone, size_t size);
+typedef void (*free_type)(struct _malloc_zone_t* zone, void* ptr);
+typedef void* (*realloc_type)(struct _malloc_zone_t* zone,
+                              void* ptr,
+                              size_t size);
+typedef void* (*memalign_type)(struct _malloc_zone_t* zone,
+                               size_t alignment,
+                               size_t size);
+typedef unsigned (*batch_malloc_type)(struct _malloc_zone_t* zone,
+                                      size_t size,
+                                      void** results,
+                                      unsigned num_requested);
+typedef void (*batch_free_type)(struct _malloc_zone_t* zone,
+                                void** to_be_freed,
+                                unsigned num_to_be_freed);
+typedef void (*free_definite_size_type)(struct _malloc_zone_t* zone,
+                                        void* ptr,
+                                        size_t size);
+typedef size_t (*size_fn_type)(struct _malloc_zone_t* zone, const void* ptr);
+
+struct MallocZoneFunctions {
+  malloc_type malloc;
+  calloc_type calloc;
+  valloc_type valloc;
+  free_type free;
+  realloc_type realloc;
+  memalign_type memalign;
+  batch_malloc_type batch_malloc;
+  batch_free_type batch_free;
+  free_definite_size_type free_definite_size;
+  size_fn_type size;
+  const ChromeMallocZone* context;
+};
+
+BASE_EXPORT void StoreZoneFunctions(const ChromeMallocZone* zone,
+                                    MallocZoneFunctions* functions);
+static constexpr int kMaxZoneCount = 30;
+BASE_EXPORT extern MallocZoneFunctions g_malloc_zones[kMaxZoneCount];
+
+// The array g_malloc_zones stores all information about malloc zones before
+// they are shimmed. This information needs to be accessed during dispatch back
+// into the zone, and additional zones may be added later in the execution fo
+// the program, so the array needs to be both thread-safe and high-performance.
+//
+// We begin by creating an array of MallocZoneFunctions of fixed size. We will
+// never modify the container, which provides thread-safety to iterators.  When
+// we want to add a MallocZoneFunctions to the container, we:
+//   1. Fill in all the fields.
+//   2. Update the total zone count.
+//   3. Insert a memory barrier.
+//   4. Insert our shim.
+//
+// Each MallocZoneFunctions is uniquely identified by |context|, which is a
+// pointer to the original malloc zone. When we wish to dispatch back to the
+// original malloc zones, we iterate through the array, looking for a matching
+// |context|.
+//
+// Most allocations go through the default allocator. We will ensure that the
+// default allocator is stored as the first MallocZoneFunctions.
+//
+// Returns whether the zone was successfully stored.
+BASE_EXPORT bool StoreMallocZone(ChromeMallocZone* zone);
+BASE_EXPORT bool IsMallocZoneAlreadyStored(ChromeMallocZone* zone);
+BASE_EXPORT bool DoesMallocZoneNeedReplacing(
+    ChromeMallocZone* zone,
+    const MallocZoneFunctions* functions);
+
+BASE_EXPORT int GetMallocZoneCountForTesting();
+BASE_EXPORT void ClearAllMallocZonesForTesting();
+
+inline MallocZoneFunctions& GetFunctionsForZone(void* zone) {
+  for (unsigned int i = 0; i < kMaxZoneCount; ++i) {
+    if (g_malloc_zones[i].context == zone)
+      return g_malloc_zones[i];
+  }
+  IMMEDIATE_CRASH();
+}
+
+}  // namespace allocator
+}  // namespace base
+
+#endif  // BASE_ALLOCATOR_MALLOC_ZONE_FUNCTIONS_MAC_H_
diff --git a/base/allocator/malloc_zone_functions_mac_unittest.cc b/base/allocator/malloc_zone_functions_mac_unittest.cc
new file mode 100644
index 0000000..09aa429
--- /dev/null
+++ b/base/allocator/malloc_zone_functions_mac_unittest.cc
@@ -0,0 +1,57 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/allocator/malloc_zone_functions_mac.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+namespace allocator {
+
+class MallocZoneFunctionsTest : public testing::Test {
+ protected:
+  void TearDown() override { ClearAllMallocZonesForTesting(); }
+};
+
+TEST_F(MallocZoneFunctionsTest, TestDefaultZoneMallocFree) {
+  ChromeMallocZone* malloc_zone =
+      reinterpret_cast<ChromeMallocZone*>(malloc_default_zone());
+  StoreMallocZone(malloc_zone);
+  int* test = reinterpret_cast<int*>(
+      g_malloc_zones[0].malloc(malloc_default_zone(), 33));
+  test[0] = 1;
+  test[1] = 2;
+  g_malloc_zones[0].free(malloc_default_zone(), test);
+}
+
+TEST_F(MallocZoneFunctionsTest, IsZoneAlreadyStored) {
+  ChromeMallocZone* malloc_zone =
+      reinterpret_cast<ChromeMallocZone*>(malloc_default_zone());
+  EXPECT_FALSE(IsMallocZoneAlreadyStored(malloc_zone));
+  StoreMallocZone(malloc_zone);
+  EXPECT_TRUE(IsMallocZoneAlreadyStored(malloc_zone));
+}
+
+TEST_F(MallocZoneFunctionsTest, CannotDoubleStoreZone) {
+  ChromeMallocZone* malloc_zone =
+      reinterpret_cast<ChromeMallocZone*>(malloc_default_zone());
+  StoreMallocZone(malloc_zone);
+  StoreMallocZone(malloc_zone);
+  EXPECT_EQ(1, GetMallocZoneCountForTesting());
+}
+
+TEST_F(MallocZoneFunctionsTest, CannotStoreMoreThanMaxZones) {
+  std::vector<ChromeMallocZone> zones;
+  zones.resize(kMaxZoneCount * 2);
+  for (int i = 0; i < kMaxZoneCount * 2; ++i) {
+    ChromeMallocZone& zone = zones[i];
+    memcpy(&zone, malloc_default_zone(), sizeof(ChromeMallocZone));
+    StoreMallocZone(&zone);
+  }
+
+  int max_zone_count = kMaxZoneCount;
+  EXPECT_EQ(max_zone_count, GetMallocZoneCountForTesting());
+}
+
+}  // namespace allocator
+}  // namespace base
diff --git a/base/allocator/partition_allocator/OWNERS b/base/allocator/partition_allocator/OWNERS
new file mode 100644
index 0000000..b0a2a85
--- /dev/null
+++ b/base/allocator/partition_allocator/OWNERS
@@ -0,0 +1,8 @@
+ajwong@chromium.org
+haraken@chromium.org
+palmer@chromium.org
+tsepez@chromium.org
+
+# TEAM: platform-architecture-dev@chromium.org
+#       Also: security-dev@chromium.org
+# COMPONENT: Blink>MemoryAllocator>Partition
diff --git a/base/allocator/partition_allocator/PartitionAlloc.md b/base/allocator/partition_allocator/PartitionAlloc.md
new file mode 100644
index 0000000..982d91f
--- /dev/null
+++ b/base/allocator/partition_allocator/PartitionAlloc.md
@@ -0,0 +1,102 @@
+# PartitionAlloc Design
+
+This document describes PartitionAlloc at a high level. For documentation about
+its implementation, see the comments in `partition_alloc.h`.
+
+[TOC]
+
+## Overview
+
+PartitionAlloc is a memory allocator optimized for security, low allocation
+latency (when called appropriately), and good space efficiency (when called
+appropriately). This document aims to help you understand how PartitionAlloc
+works so that you can use it effectively.
+
+## Partitions And Buckets
+
+A *partition* is a heap that contains certain object types, objects of certain
+sizes, or objects of a certain lifetime (as the caller prefers). Callers can
+create as many partitions as they need. Each partition is separate and protected
+from any other partitions.
+
+Each partition holds multiple buckets. A *bucket* is a region in a partition
+that contains similar-sized objects.
+
+PartitionAlloc aligns each object allocation with the closest bucket size. For
+example, if a partition has 3 buckets for 64 bytes, 256 bytes, and 1024 bytes,
+then PartitionAlloc will satisfy an allocation request for 128 bytes by rounding
+it up to 256 bytes and allocating from the second bucket.
+
+The special allocator class `template <size_t N> class
+SizeSpecificPartitionAllocator` will satisfy allocations only of size
+`kMaxAllocation = N - kAllocationGranularity` or less, and contains buckets for
+all `n * kAllocationGranularity` (n = 1, 2, ..., `kMaxAllocation`). Attempts to
+allocate more than `kMaxAllocation` will fail.
+
+## Performance
+
+The current implementation is optimized for the main thread use-case. For
+example, PartitionAlloc doesn't have threaded caches.
+
+PartitionAlloc is designed to be extremely fast in its fast paths. The fast
+paths of allocation and deallocation require just 2 (reasonably predictable)
+branches. The number of operations in the fast paths is minimal, leading to the
+possibility of inlining.
+
+For an example of how to use partitions to get good performance and good safety,
+see Blink's usage, as described in `wtf/allocator/Allocator.md`.
+
+Large allocations (> kGenericMaxBucketed == 960KB) are realized by direct
+memory mmapping. This size makes sense because 960KB = 0xF0000. The next larger
+bucket size is 1MB = 0x100000 which is greater than 1/2 the available space in
+a SuperPage meaning it would not be possible to pack even 2 sequential
+alloctions in a SuperPage.
+
+`PartitionRootGeneric::Alloc()` acquires a lock for thread safety. (The current
+implementation uses a spin lock on the assumption that thread contention will be
+rare in its callers. The original caller was Blink, where this is generally
+true. Spin locks also have the benefit of simplicity.)
+
+Callers can get thread-unsafe performance using a
+`SizeSpecificPartitionAllocator` or otherwise using `PartitionAlloc` (instead of
+`PartitionRootGeneric::Alloc()`). Callers can also arrange for low contention,
+such as by using a dedicated partition for single-threaded, latency-critical
+allocations.
+
+Because PartitionAlloc guarantees that address space regions used for one
+partition are never reused for other partitions, partitions can eat a large
+amount of virtual address space (even if not of actual memory).
+
+Mixing various random objects in the same partition will generally lead to lower
+efficiency. For good performance, group similar objects into the same partition.
+
+## Security
+
+Security is one of the most important goals of PartitionAlloc.
+
+PartitionAlloc guarantees that different partitions exist in different regions
+of the process' address space. When the caller has freed all objects contained
+in a page in a partition, PartitionAlloc returns the physical memory to the
+operating system, but continues to reserve the region of address space.
+PartitionAlloc will only reuse an address space region for the same partition.
+
+PartitionAlloc also guarantees that:
+
+* Linear overflows cannot corrupt into the partition. (There is a guard page at
+the beginning of each partition.)
+
+* Linear overflows cannot corrupt out of the partition. (There is a guard page
+at the end of each partition.)
+
+* Linear overflow or underflow cannot corrupt the allocation metadata.
+PartitionAlloc records metadata in a dedicated region out-of-line (not adjacent
+to objects).
+
+* Objects of different sizes will likely be allocated in different buckets, and
+hence at different addresses. One page can contain only similar-sized objects.
+
+* Dereference of a freelist pointer should fault.
+
+* Partial pointer overwrite of freelist pointer should fault.
+
+* Large allocations have guard pages at the beginning and end.
diff --git a/base/allocator/partition_allocator/address_space_randomization.cc b/base/allocator/partition_allocator/address_space_randomization.cc
new file mode 100644
index 0000000..a7e17c7
--- /dev/null
+++ b/base/allocator/partition_allocator/address_space_randomization.cc
@@ -0,0 +1,124 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/allocator/partition_allocator/address_space_randomization.h"
+
+#include "base/allocator/partition_allocator/page_allocator.h"
+#include "base/allocator/partition_allocator/spin_lock.h"
+#include "base/lazy_instance.h"
+#include "base/rand_util.h"
+#include "build/build_config.h"
+
+#if defined(OS_WIN)
+#include <windows.h>  // Must be in front of other Windows header files.
+
+#include <VersionHelpers.h>
+#endif
+
+namespace base {
+
+namespace {
+
+// This is the same PRNG as used by tcmalloc for mapping address randomness;
+// see http://burtleburtle.net/bob/rand/smallprng.html
+struct ranctx {
+  subtle::SpinLock lock;
+  bool initialized;
+  uint32_t a;
+  uint32_t b;
+  uint32_t c;
+  uint32_t d;
+};
+
+static LazyInstance<ranctx>::Leaky s_ranctx = LAZY_INSTANCE_INITIALIZER;
+
+#define rot(x, k) (((x) << (k)) | ((x) >> (32 - (k))))
+
+uint32_t ranvalInternal(ranctx* x) {
+  uint32_t e = x->a - rot(x->b, 27);
+  x->a = x->b ^ rot(x->c, 17);
+  x->b = x->c + x->d;
+  x->c = x->d + e;
+  x->d = e + x->a;
+  return x->d;
+}
+
+#undef rot
+
+uint32_t ranval(ranctx* x) {
+  subtle::SpinLock::Guard guard(x->lock);
+  if (UNLIKELY(!x->initialized)) {
+    const uint64_t r1 = RandUint64();
+    const uint64_t r2 = RandUint64();
+
+    x->a = static_cast<uint32_t>(r1);
+    x->b = static_cast<uint32_t>(r1 >> 32);
+    x->c = static_cast<uint32_t>(r2);
+    x->d = static_cast<uint32_t>(r2 >> 32);
+
+    x->initialized = true;
+  }
+
+  return ranvalInternal(x);
+}
+
+}  // namespace
+
+void SetRandomPageBaseSeed(int64_t seed) {
+  ranctx* x = s_ranctx.Pointer();
+  subtle::SpinLock::Guard guard(x->lock);
+  // Set RNG to initial state.
+  x->initialized = true;
+  x->a = x->b = static_cast<uint32_t>(seed);
+  x->c = x->d = static_cast<uint32_t>(seed >> 32);
+}
+
+void* GetRandomPageBase() {
+  uintptr_t random = static_cast<uintptr_t>(ranval(s_ranctx.Pointer()));
+
+#if defined(ARCH_CPU_64_BITS)
+  random <<= 32ULL;
+  random |= static_cast<uintptr_t>(ranval(s_ranctx.Pointer()));
+
+// The kASLRMask and kASLROffset constants will be suitable for the
+// OS and build configuration.
+#if defined(OS_WIN) && !defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
+  // Windows >= 8.1 has the full 47 bits. Use them where available.
+  static bool windows_81 = false;
+  static bool windows_81_initialized = false;
+  if (!windows_81_initialized) {
+    windows_81 = IsWindows8Point1OrGreater();
+    windows_81_initialized = true;
+  }
+  if (!windows_81) {
+    random &= internal::kASLRMaskBefore8_10;
+  } else {
+    random &= internal::kASLRMask;
+  }
+  random += internal::kASLROffset;
+#else
+  random &= internal::kASLRMask;
+  random += internal::kASLROffset;
+#endif  // defined(OS_WIN) && !defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
+#else   // defined(ARCH_CPU_32_BITS)
+#if defined(OS_WIN)
+  // On win32 host systems the randomization plus huge alignment causes
+  // excessive fragmentation. Plus most of these systems lack ASLR, so the
+  // randomization isn't buying anything. In that case we just skip it.
+  // TODO(jschuh): Just dump the randomization when HE-ASLR is present.
+  static BOOL is_wow64 = -1;
+  if (is_wow64 == -1 && !IsWow64Process(GetCurrentProcess(), &is_wow64))
+    is_wow64 = FALSE;
+  if (!is_wow64)
+    return nullptr;
+#endif  // defined(OS_WIN)
+  random &= internal::kASLRMask;
+  random += internal::kASLROffset;
+#endif  // defined(ARCH_CPU_32_BITS)
+
+  DCHECK_EQ(0ULL, (random & kPageAllocationGranularityOffsetMask));
+  return reinterpret_cast<void*>(random);
+}
+
+}  // namespace base
diff --git a/base/allocator/partition_allocator/address_space_randomization.h b/base/allocator/partition_allocator/address_space_randomization.h
new file mode 100644
index 0000000..3f65a87
--- /dev/null
+++ b/base/allocator/partition_allocator/address_space_randomization.h
@@ -0,0 +1,206 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_ADDRESS_SPACE_RANDOMIZATION
+#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_ADDRESS_SPACE_RANDOMIZATION
+
+#include "base/allocator/partition_allocator/page_allocator.h"
+#include "base/base_export.h"
+#include "build/build_config.h"
+
+namespace base {
+
+// Sets the seed for the random number generator used by GetRandomPageBase in
+// order to generate a predictable sequence of addresses. May be called multiple
+// times.
+BASE_EXPORT void SetRandomPageBaseSeed(int64_t seed);
+
+// Calculates a random preferred mapping address. In calculating an address, we
+// balance good ASLR against not fragmenting the address space too badly.
+BASE_EXPORT void* GetRandomPageBase();
+
+namespace internal {
+
+constexpr uintptr_t AslrAddress(uintptr_t mask) {
+  return mask & kPageAllocationGranularityBaseMask;
+}
+constexpr uintptr_t AslrMask(uintptr_t bits) {
+  return AslrAddress((1ULL << bits) - 1ULL);
+}
+
+// Turn off formatting, because the thicket of nested ifdefs below is
+// incomprehensible without indentation. It is also incomprehensible with
+// indentation, but the only other option is a combinatorial explosion of
+// *_{win,linux,mac,foo}_{32,64}.h files.
+//
+// clang-format off
+
+#if defined(ARCH_CPU_64_BITS)
+
+  #if defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
+
+    // We shouldn't allocate system pages at all for sanitizer builds. However,
+    // we do, and if random hint addresses interfere with address ranges
+    // hard-coded in those tools, bad things happen. This address range is
+    // copied from TSAN source but works with all tools. See
+    // https://crbug.com/539863.
+    constexpr uintptr_t kASLRMask = AslrAddress(0x007fffffffffULL);
+    constexpr uintptr_t kASLROffset = AslrAddress(0x7e8000000000ULL);
+
+  #elif defined(OS_WIN)
+
+    // Windows 8.10 and newer support the full 48 bit address range. Older
+    // versions of Windows only support 44 bits. Since kASLROffset is non-zero
+    // and may cause a carry, use 47 and 43 bit masks. See
+    // http://www.alex-ionescu.com/?p=246
+    constexpr uintptr_t kASLRMask = AslrMask(47);
+    constexpr uintptr_t kASLRMaskBefore8_10 = AslrMask(43);
+    // Try not to map pages into the range where Windows loads DLLs by default.
+    constexpr uintptr_t kASLROffset = 0x80000000ULL;
+
+  #elif defined(OS_MACOSX)
+
+    // macOS as of 10.12.5 does not clean up entries in page map levels 3/4
+    // [PDP/PML4] created from mmap or mach_vm_allocate, even after the region
+    // is destroyed. Using a virtual address space that is too large causes a
+    // leak of about 1 wired [can never be paged out] page per call to mmap. The
+    // page is only reclaimed when the process is killed. Confine the hint to a
+    // 39-bit section of the virtual address space.
+    //
+    // This implementation adapted from
+    // https://chromium-review.googlesource.com/c/v8/v8/+/557958. The difference
+    // is that here we clamp to 39 bits, not 32.
+    //
+    // TODO(crbug.com/738925): Remove this limitation if/when the macOS behavior
+    // changes.
+    constexpr uintptr_t kASLRMask = AslrMask(38);
+    constexpr uintptr_t kASLROffset = AslrAddress(0x1000000000ULL);
+
+  #elif defined(OS_POSIX) || defined(OS_FUCHSIA)
+
+    #if defined(ARCH_CPU_X86_64)
+
+      // Linux (and macOS) support the full 47-bit user space of x64 processors.
+      // Use only 46 to allow the kernel a chance to fulfill the request.
+      constexpr uintptr_t kASLRMask = AslrMask(46);
+      constexpr uintptr_t kASLROffset = AslrAddress(0);
+
+    #elif defined(ARCH_CPU_ARM64)
+
+      #if defined(OS_ANDROID)
+
+      // Restrict the address range on Android to avoid a large performance
+      // regression in single-process WebViews. See https://crbug.com/837640.
+      constexpr uintptr_t kASLRMask = AslrMask(30);
+      constexpr uintptr_t kASLROffset = AslrAddress(0x20000000ULL);
+
+      #else
+
+      // ARM64 on Linux has 39-bit user space. Use 38 bits since kASLROffset
+      // could cause a carry.
+      constexpr uintptr_t kASLRMask = AslrMask(38);
+      constexpr uintptr_t kASLROffset = AslrAddress(0x1000000000ULL);
+
+      #endif
+
+    #elif defined(ARCH_CPU_PPC64)
+
+      #if defined(OS_AIX)
+
+        // AIX has 64 bits of virtual addressing, but we limit the address range
+        // to (a) minimize segment lookaside buffer (SLB) misses; and (b) use
+        // extra address space to isolate the mmap regions.
+        constexpr uintptr_t kASLRMask = AslrMask(30);
+        constexpr uintptr_t kASLROffset = AslrAddress(0x400000000000ULL);
+
+      #elif defined(ARCH_CPU_BIG_ENDIAN)
+
+        // Big-endian Linux PPC has 44 bits of virtual addressing. Use 42.
+        constexpr uintptr_t kASLRMask = AslrMask(42);
+        constexpr uintptr_t kASLROffset = AslrAddress(0);
+
+      #else  // !defined(OS_AIX) && !defined(ARCH_CPU_BIG_ENDIAN)
+
+        // Little-endian Linux PPC has 48 bits of virtual addressing. Use 46.
+        constexpr uintptr_t kASLRMask = AslrMask(46);
+        constexpr uintptr_t kASLROffset = AslrAddress(0);
+
+      #endif  // !defined(OS_AIX) && !defined(ARCH_CPU_BIG_ENDIAN)
+
+    #elif defined(ARCH_CPU_S390X)
+
+      // Linux on Z uses bits 22 - 32 for Region Indexing, which translates to
+      // 42 bits of virtual addressing. Truncate to 40 bits to allow kernel a
+      // chance to fulfill the request.
+      constexpr uintptr_t kASLRMask = AslrMask(40);
+      constexpr uintptr_t kASLROffset = AslrAddress(0);
+
+    #elif defined(ARCH_CPU_S390)
+
+      // 31 bits of virtual addressing. Truncate to 29 bits to allow the kernel
+      // a chance to fulfill the request.
+      constexpr uintptr_t kASLRMask = AslrMask(29);
+      constexpr uintptr_t kASLROffset = AslrAddress(0);
+
+    #else  // !defined(ARCH_CPU_X86_64) && !defined(ARCH_CPU_PPC64) &&
+           // !defined(ARCH_CPU_S390X) && !defined(ARCH_CPU_S390)
+
+      // For all other POSIX variants, use 30 bits.
+      constexpr uintptr_t kASLRMask = AslrMask(30);
+
+      #if defined(OS_SOLARIS)
+
+        // For our Solaris/illumos mmap hint, we pick a random address in the
+        // bottom half of the top half of the address space (that is, the third
+        // quarter). Because we do not MAP_FIXED, this will be treated only as a
+        // hint -- the system will not fail to mmap because something else
+        // happens to already be mapped at our random address. We deliberately
+        // set the hint high enough to get well above the system's break (that
+        // is, the heap); Solaris and illumos will try the hint and if that
+        // fails allocate as if there were no hint at all. The high hint
+        // prevents the break from getting hemmed in at low values, ceding half
+        // of the address space to the system heap.
+        constexpr uintptr_t kASLROffset = AslrAddress(0x80000000ULL);
+
+      #elif defined(OS_AIX)
+
+        // The range 0x30000000 - 0xD0000000 is available on AIX; choose the
+        // upper range.
+        constexpr uintptr_t kASLROffset = AslrAddress(0x90000000ULL);
+
+      #else  // !defined(OS_SOLARIS) && !defined(OS_AIX)
+
+        // The range 0x20000000 - 0x60000000 is relatively unpopulated across a
+        // variety of ASLR modes (PAE kernel, NX compat mode, etc) and on macOS
+        // 10.6 and 10.7.
+        constexpr uintptr_t kASLROffset = AslrAddress(0x20000000ULL);
+
+      #endif  // !defined(OS_SOLARIS) && !defined(OS_AIX)
+
+    #endif  // !defined(ARCH_CPU_X86_64) && !defined(ARCH_CPU_PPC64) &&
+            // !defined(ARCH_CPU_S390X) && !defined(ARCH_CPU_S390)
+
+  #endif  // defined(OS_POSIX)
+
+#elif defined(ARCH_CPU_32_BITS)
+
+  // This is a good range on 32-bit Windows and Android (the only platforms on
+  // which we support 32-bitness). Allocates in the 0.5 - 1.5 GiB region. There
+  // is no issue with carries here.
+  constexpr uintptr_t kASLRMask = AslrMask(30);
+  constexpr uintptr_t kASLROffset = AslrAddress(0x20000000ULL);
+
+#else
+
+  #error Please tell us about your exotic hardware! Sounds interesting.
+
+#endif  // defined(ARCH_CPU_32_BITS)
+
+// clang-format on
+
+}  // namespace internal
+
+}  // namespace base
+
+#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_ADDRESS_SPACE_RANDOMIZATION
diff --git a/base/allocator/partition_allocator/address_space_randomization_unittest.cc b/base/allocator/partition_allocator/address_space_randomization_unittest.cc
new file mode 100644
index 0000000..40f494d
--- /dev/null
+++ b/base/allocator/partition_allocator/address_space_randomization_unittest.cc
@@ -0,0 +1,244 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/allocator/partition_allocator/address_space_randomization.h"
+
+#include "base/allocator/partition_allocator/page_allocator.h"
+#include "base/bit_cast.h"
+#include "base/bits.h"
+#include "base/sys_info.h"
+#include "build/build_config.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+#if defined(OS_WIN)
+#include <windows.h>
+#include "base/win/windows_version.h"
+// VersionHelpers.h must be included after windows.h.
+#include <VersionHelpers.h>
+#endif
+
+namespace base {
+
+namespace {
+
+uintptr_t GetMask() {
+  uintptr_t mask = internal::kASLRMask;
+#if defined(ARCH_CPU_64_BITS)
+// Sanitizers use their own kASLRMask constant.
+#if defined(OS_WIN) && !defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
+  if (!IsWindows8Point1OrGreater()) {
+    mask = internal::kASLRMaskBefore8_10;
+  }
+#endif  // defined(OS_WIN) && !defined(MEMORY_TOOL_REPLACES_ALLOCATOR))
+#elif defined(ARCH_CPU_32_BITS)
+#if defined(OS_WIN)
+  BOOL is_wow64 = FALSE;
+  if (!IsWow64Process(GetCurrentProcess(), &is_wow64))
+    is_wow64 = FALSE;
+  if (!is_wow64) {
+    mask = 0;
+  }
+#endif  // defined(OS_WIN)
+#endif  // defined(ARCH_CPU_32_BITS)
+  return mask;
+}
+
+const size_t kSamples = 100;
+
+uintptr_t GetAddressBits() {
+  return reinterpret_cast<uintptr_t>(base::GetRandomPageBase());
+}
+
+uintptr_t GetRandomBits() {
+  return GetAddressBits() - internal::kASLROffset;
+}
+
+}  // namespace
+
+// Configurations without ASLR are tested here.
+TEST(AddressSpaceRandomizationTest, DisabledASLR) {
+  uintptr_t mask = GetMask();
+  if (!mask) {
+#if defined(OS_WIN) && defined(ARCH_CPU_32_BITS)
+    // ASLR should be turned off on 32-bit Windows.
+    EXPECT_EQ(nullptr, base::GetRandomPageBase());
+#else
+    // Otherwise, nullptr is very unexpected.
+    EXPECT_NE(nullptr, base::GetRandomPageBase());
+#endif
+  }
+}
+
+TEST(AddressSpaceRandomizationTest, Alignment) {
+  uintptr_t mask = GetMask();
+  if (!mask)
+    return;
+
+  for (size_t i = 0; i < kSamples; ++i) {
+    uintptr_t address = GetAddressBits();
+    EXPECT_EQ(0ULL, (address & kPageAllocationGranularityOffsetMask));
+  }
+}
+
+TEST(AddressSpaceRandomizationTest, Range) {
+  uintptr_t mask = GetMask();
+  if (!mask)
+    return;
+
+  uintptr_t min = internal::kASLROffset;
+  uintptr_t max = internal::kASLROffset + internal::kASLRMask;
+  for (size_t i = 0; i < kSamples; ++i) {
+    uintptr_t address = GetAddressBits();
+    EXPECT_LE(min, address);
+    EXPECT_GE(max + mask, address);
+  }
+}
+
+TEST(AddressSpaceRandomizationTest, Predictable) {
+  uintptr_t mask = GetMask();
+  if (!mask)
+    return;
+
+  const uintptr_t kInitialSeed = 0xfeed5eedULL;
+  base::SetRandomPageBaseSeed(kInitialSeed);
+
+  std::vector<uintptr_t> sequence;
+  for (size_t i = 0; i < kSamples; ++i) {
+    uintptr_t address = reinterpret_cast<uintptr_t>(base::GetRandomPageBase());
+    sequence.push_back(address);
+  }
+
+  base::SetRandomPageBaseSeed(kInitialSeed);
+
+  for (size_t i = 0; i < kSamples; ++i) {
+    uintptr_t address = reinterpret_cast<uintptr_t>(base::GetRandomPageBase());
+    EXPECT_EQ(address, sequence[i]);
+  }
+}
+
+// This randomness test is adapted from V8's PRNG tests.
+
+// Chi squared for getting m 0s out of n bits.
+double ChiSquared(int m, int n) {
+  double ys_minus_np1 = (m - n / 2.0);
+  double chi_squared_1 = ys_minus_np1 * ys_minus_np1 * 2.0 / n;
+  double ys_minus_np2 = ((n - m) - n / 2.0);
+  double chi_squared_2 = ys_minus_np2 * ys_minus_np2 * 2.0 / n;
+  return chi_squared_1 + chi_squared_2;
+}
+
+// Test for correlations between recent bits from the PRNG, or bits that are
+// biased.
+void RandomBitCorrelation(int random_bit) {
+  uintptr_t mask = GetMask();
+  if ((mask & (1ULL << random_bit)) == 0)
+    return;  // bit is always 0.
+
+#ifdef DEBUG
+  constexpr int kHistory = 2;
+  constexpr int kRepeats = 1000;
+#else
+  constexpr int kHistory = 8;
+  constexpr int kRepeats = 10000;
+#endif
+  constexpr int kPointerBits = 8 * sizeof(void*);
+  uintptr_t history[kHistory];
+  // The predictor bit is either constant 0 or 1, or one of the bits from the
+  // history.
+  for (int predictor_bit = -2; predictor_bit < kPointerBits; predictor_bit++) {
+    // The predicted bit is one of the bits from the PRNG.
+    for (int ago = 0; ago < kHistory; ago++) {
+      // We don't want to check whether each bit predicts itself.
+      if (ago == 0 && predictor_bit == random_bit)
+        continue;
+
+      // Enter the new random value into the history.
+      for (int i = ago; i >= 0; i--) {
+        history[i] = GetRandomBits();
+      }
+
+      // Find out how many of the bits are the same as the prediction bit.
+      int m = 0;
+      for (int i = 0; i < kRepeats; i++) {
+        uintptr_t random = GetRandomBits();
+        for (int j = ago - 1; j >= 0; j--)
+          history[j + 1] = history[j];
+        history[0] = random;
+
+        int predicted;
+        if (predictor_bit >= 0) {
+          predicted = (history[ago] >> predictor_bit) & 1;
+        } else {
+          predicted = predictor_bit == -2 ? 0 : 1;
+        }
+        int bit = (random >> random_bit) & 1;
+        if (bit == predicted)
+          m++;
+      }
+
+      // Chi squared analysis for k = 2 (2, states: same/not-same) and one
+      // degree of freedom (k - 1).
+      double chi_squared = ChiSquared(m, kRepeats);
+      // For 1 degree of freedom this corresponds to 1 in a million.  We are
+      // running ~8000 tests, so that would be surprising.
+      CHECK_GE(24, chi_squared);
+      // If the predictor bit is a fixed 0 or 1 then it makes no sense to
+      // repeat the test with a different age.
+      if (predictor_bit < 0)
+        break;
+    }
+  }
+}
+
+// Tests are fairly slow, so give each random bit its own test.
+#define TEST_RANDOM_BIT(BIT)                                        \
+  TEST(AddressSpaceRandomizationTest, RandomBitCorrelations##BIT) { \
+    RandomBitCorrelation(BIT);                                      \
+  }
+
+// The first 12 bits on all platforms are always 0.
+TEST_RANDOM_BIT(12)
+TEST_RANDOM_BIT(13)
+TEST_RANDOM_BIT(14)
+TEST_RANDOM_BIT(15)
+TEST_RANDOM_BIT(16)
+TEST_RANDOM_BIT(17)
+TEST_RANDOM_BIT(18)
+TEST_RANDOM_BIT(19)
+TEST_RANDOM_BIT(20)
+TEST_RANDOM_BIT(21)
+TEST_RANDOM_BIT(22)
+TEST_RANDOM_BIT(23)
+TEST_RANDOM_BIT(24)
+TEST_RANDOM_BIT(25)
+TEST_RANDOM_BIT(26)
+TEST_RANDOM_BIT(27)
+TEST_RANDOM_BIT(28)
+TEST_RANDOM_BIT(29)
+TEST_RANDOM_BIT(30)
+TEST_RANDOM_BIT(31)
+#if defined(ARCH_CPU_64_BITS)
+TEST_RANDOM_BIT(32)
+TEST_RANDOM_BIT(33)
+TEST_RANDOM_BIT(34)
+TEST_RANDOM_BIT(35)
+TEST_RANDOM_BIT(36)
+TEST_RANDOM_BIT(37)
+TEST_RANDOM_BIT(38)
+TEST_RANDOM_BIT(39)
+TEST_RANDOM_BIT(40)
+TEST_RANDOM_BIT(41)
+TEST_RANDOM_BIT(42)
+TEST_RANDOM_BIT(43)
+TEST_RANDOM_BIT(44)
+TEST_RANDOM_BIT(45)
+TEST_RANDOM_BIT(46)
+TEST_RANDOM_BIT(47)
+TEST_RANDOM_BIT(48)
+// No platforms have more than 48 address bits.
+#endif  // defined(ARCH_CPU_64_BITS)
+
+#undef TEST_RANDOM_BIT
+
+}  // namespace base
diff --git a/base/allocator/partition_allocator/oom.h b/base/allocator/partition_allocator/oom.h
new file mode 100644
index 0000000..e2d197c
--- /dev/null
+++ b/base/allocator/partition_allocator/oom.h
@@ -0,0 +1,37 @@
+// Copyright (c) 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_ALLOCATOR_OOM_H
+#define BASE_ALLOCATOR_OOM_H
+
+#include "base/logging.h"
+
+#if defined(OS_WIN)
+#include <windows.h>
+#endif
+
+// Do not want trivial entry points just calling OOM_CRASH() to be
+// commoned up by linker icf/comdat folding.
+#define OOM_CRASH_PREVENT_ICF()                  \
+  volatile int oom_crash_inhibit_icf = __LINE__; \
+  ALLOW_UNUSED_LOCAL(oom_crash_inhibit_icf)
+
+// OOM_CRASH() - Specialization of IMMEDIATE_CRASH which will raise a custom
+// exception on Windows to signal this is OOM and not a normal assert.
+#if defined(OS_WIN)
+#define OOM_CRASH()                                                     \
+  do {                                                                  \
+    OOM_CRASH_PREVENT_ICF();                                            \
+    ::RaiseException(0xE0000008, EXCEPTION_NONCONTINUABLE, 0, nullptr); \
+    IMMEDIATE_CRASH();                                                  \
+  } while (0)
+#else
+#define OOM_CRASH()          \
+  do {                       \
+    OOM_CRASH_PREVENT_ICF(); \
+    IMMEDIATE_CRASH();       \
+  } while (0)
+#endif
+
+#endif  // BASE_ALLOCATOR_OOM_H
diff --git a/base/allocator/partition_allocator/page_allocator.cc b/base/allocator/partition_allocator/page_allocator.cc
new file mode 100644
index 0000000..328384e
--- /dev/null
+++ b/base/allocator/partition_allocator/page_allocator.cc
@@ -0,0 +1,258 @@
+// Copyright (c) 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/allocator/partition_allocator/page_allocator.h"
+
+#include <limits.h>
+
+#include "base/allocator/partition_allocator/address_space_randomization.h"
+#include "base/allocator/partition_allocator/page_allocator_internal.h"
+#include "base/allocator/partition_allocator/spin_lock.h"
+#include "base/base_export.h"
+#include "base/compiler_specific.h"
+#include "base/lazy_instance.h"
+#include "base/logging.h"
+#include "base/numerics/checked_math.h"
+#include "build/build_config.h"
+
+#include <atomic>
+
+#if defined(OS_WIN)
+#include <windows.h>
+#endif
+
+#if defined(OS_WIN)
+#include "base/allocator/partition_allocator/page_allocator_internals_win.h"
+#elif defined(OS_POSIX) || defined(OS_FUCHSIA)
+#include "base/allocator/partition_allocator/page_allocator_internals_posix.h"
+#else
+#error Platform not supported.
+#endif
+
+namespace base {
+
+namespace {
+
+// We may reserve/release address space on different threads.
+LazyInstance<subtle::SpinLock>::Leaky s_reserveLock = LAZY_INSTANCE_INITIALIZER;
+
+// We only support a single block of reserved address space.
+void* s_reservation_address = nullptr;
+size_t s_reservation_size = 0;
+
+void* AllocPagesIncludingReserved(void* address,
+                                  size_t length,
+                                  PageAccessibilityConfiguration accessibility,
+                                  PageTag page_tag,
+                                  bool commit) {
+  void* ret =
+      SystemAllocPages(address, length, accessibility, page_tag, commit);
+  if (ret == nullptr) {
+    const bool cant_alloc_length = kHintIsAdvisory || address == nullptr;
+    if (cant_alloc_length) {
+      // The system cannot allocate |length| bytes. Release any reserved address
+      // space and try once more.
+      ReleaseReservation();
+      ret = SystemAllocPages(address, length, accessibility, page_tag, commit);
+    }
+  }
+  return ret;
+}
+
+// Trims |base| to given |trim_length| and |alignment|.
+//
+// On failure, on Windows, this function returns nullptr and frees |base|.
+void* TrimMapping(void* base,
+                  size_t base_length,
+                  size_t trim_length,
+                  uintptr_t alignment,
+                  PageAccessibilityConfiguration accessibility,
+                  bool commit) {
+  size_t pre_slack = reinterpret_cast<uintptr_t>(base) & (alignment - 1);
+  if (pre_slack) {
+    pre_slack = alignment - pre_slack;
+  }
+  size_t post_slack = base_length - pre_slack - trim_length;
+  DCHECK(base_length >= trim_length || pre_slack || post_slack);
+  DCHECK(pre_slack < base_length);
+  DCHECK(post_slack < base_length);
+  return TrimMappingInternal(base, base_length, trim_length, accessibility,
+                             commit, pre_slack, post_slack);
+}
+
+}  // namespace
+
+void* SystemAllocPages(void* hint,
+                       size_t length,
+                       PageAccessibilityConfiguration accessibility,
+                       PageTag page_tag,
+                       bool commit) {
+  DCHECK(!(length & kPageAllocationGranularityOffsetMask));
+  DCHECK(!(reinterpret_cast<uintptr_t>(hint) &
+           kPageAllocationGranularityOffsetMask));
+  DCHECK(commit || accessibility == PageInaccessible);
+  return SystemAllocPagesInternal(hint, length, accessibility, page_tag,
+                                  commit);
+}
+
+void* AllocPages(void* address,
+                 size_t length,
+                 size_t align,
+                 PageAccessibilityConfiguration accessibility,
+                 PageTag page_tag,
+                 bool commit) {
+  DCHECK(length >= kPageAllocationGranularity);
+  DCHECK(!(length & kPageAllocationGranularityOffsetMask));
+  DCHECK(align >= kPageAllocationGranularity);
+  // Alignment must be power of 2 for masking math to work.
+  DCHECK_EQ(align & (align - 1), 0UL);
+  DCHECK(!(reinterpret_cast<uintptr_t>(address) &
+           kPageAllocationGranularityOffsetMask));
+  uintptr_t align_offset_mask = align - 1;
+  uintptr_t align_base_mask = ~align_offset_mask;
+  DCHECK(!(reinterpret_cast<uintptr_t>(address) & align_offset_mask));
+
+#if defined(OS_LINUX) && defined(ARCH_CPU_64_BITS)
+  // On 64 bit Linux, we may need to adjust the address space limit for
+  // guarded allocations.
+  if (length >= kMinimumGuardedMemorySize) {
+    CHECK_EQ(PageInaccessible, accessibility);
+    CHECK(!commit);
+    if (!AdjustAddressSpaceLimit(base::checked_cast<int64_t>(length))) {
+      DLOG(WARNING) << "Could not adjust address space by " << length;
+      // Fall through. Try the allocation, since we may have a reserve.
+    }
+  }
+#endif
+
+  // If the client passed null as the address, choose a good one.
+  if (address == nullptr) {
+    address = GetRandomPageBase();
+    address = reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(address) &
+                                      align_base_mask);
+  }
+
+  // First try to force an exact-size, aligned allocation from our random base.
+#if defined(ARCH_CPU_32_BITS)
+  // On 32 bit systems, first try one random aligned address, and then try an
+  // aligned address derived from the value of |ret|.
+  constexpr int kExactSizeTries = 2;
+#else
+  // On 64 bit systems, try 3 random aligned addresses.
+  constexpr int kExactSizeTries = 3;
+#endif
+
+  for (int i = 0; i < kExactSizeTries; ++i) {
+    void* ret = AllocPagesIncludingReserved(address, length, accessibility,
+                                            page_tag, commit);
+    if (ret != nullptr) {
+      // If the alignment is to our liking, we're done.
+      if (!(reinterpret_cast<uintptr_t>(ret) & align_offset_mask))
+        return ret;
+      // Free the memory and try again.
+      FreePages(ret, length);
+    } else {
+      // |ret| is null; if this try was unhinted, we're OOM.
+      if (kHintIsAdvisory || address == nullptr)
+        return nullptr;
+    }
+
+#if defined(ARCH_CPU_32_BITS)
+    // For small address spaces, try the first aligned address >= |ret|. Note
+    // |ret| may be null, in which case |address| becomes null.
+    address = reinterpret_cast<void*>(
+        (reinterpret_cast<uintptr_t>(ret) + align_offset_mask) &
+        align_base_mask);
+#else  // defined(ARCH_CPU_64_BITS)
+    // Keep trying random addresses on systems that have a large address space.
+    address = GetRandomPageBase();
+    address = reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(address) &
+                                      align_base_mask);
+#endif
+  }
+
+  // Make a larger allocation so we can force alignment.
+  size_t try_length = length + (align - kPageAllocationGranularity);
+  CHECK(try_length >= length);
+  void* ret;
+
+  do {
+    // Continue randomizing only on POSIX.
+    address = kHintIsAdvisory ? GetRandomPageBase() : nullptr;
+    ret = AllocPagesIncludingReserved(address, try_length, accessibility,
+                                      page_tag, commit);
+    // The retries are for Windows, where a race can steal our mapping on
+    // resize.
+  } while (ret != nullptr &&
+           (ret = TrimMapping(ret, try_length, length, align, accessibility,
+                              commit)) == nullptr);
+
+  return ret;
+}
+
+void FreePages(void* address, size_t length) {
+  DCHECK(!(reinterpret_cast<uintptr_t>(address) &
+           kPageAllocationGranularityOffsetMask));
+  DCHECK(!(length & kPageAllocationGranularityOffsetMask));
+  FreePagesInternal(address, length);
+}
+
+bool SetSystemPagesAccess(void* address,
+                          size_t length,
+                          PageAccessibilityConfiguration accessibility) {
+  DCHECK(!(length & kSystemPageOffsetMask));
+  return SetSystemPagesAccessInternal(address, length, accessibility);
+}
+
+void DecommitSystemPages(void* address, size_t length) {
+  DCHECK_EQ(0UL, length & kSystemPageOffsetMask);
+  DecommitSystemPagesInternal(address, length);
+}
+
+bool RecommitSystemPages(void* address,
+                         size_t length,
+                         PageAccessibilityConfiguration accessibility) {
+  DCHECK_EQ(0UL, length & kSystemPageOffsetMask);
+  DCHECK_NE(PageInaccessible, accessibility);
+  return RecommitSystemPagesInternal(address, length, accessibility);
+}
+
+void DiscardSystemPages(void* address, size_t length) {
+  DCHECK_EQ(0UL, length & kSystemPageOffsetMask);
+  DiscardSystemPagesInternal(address, length);
+}
+
+bool ReserveAddressSpace(size_t size) {
+  // To avoid deadlock, call only SystemAllocPages.
+  subtle::SpinLock::Guard guard(s_reserveLock.Get());
+  if (s_reservation_address == nullptr) {
+    void* mem = SystemAllocPages(nullptr, size, PageInaccessible,
+                                 PageTag::kChromium, false);
+    if (mem != nullptr) {
+      // We guarantee this alignment when reserving address space.
+      DCHECK(!(reinterpret_cast<uintptr_t>(mem) &
+               kPageAllocationGranularityOffsetMask));
+      s_reservation_address = mem;
+      s_reservation_size = size;
+      return true;
+    }
+  }
+  return false;
+}
+
+void ReleaseReservation() {
+  // To avoid deadlock, call only FreePages.
+  subtle::SpinLock::Guard guard(s_reserveLock.Get());
+  if (s_reservation_address != nullptr) {
+    FreePages(s_reservation_address, s_reservation_size);
+    s_reservation_address = nullptr;
+    s_reservation_size = 0;
+  }
+}
+
+uint32_t GetAllocPageErrorCode() {
+  return s_allocPageErrorCode;
+}
+
+}  // namespace base
diff --git a/base/allocator/partition_allocator/page_allocator.h b/base/allocator/partition_allocator/page_allocator.h
new file mode 100644
index 0000000..4973348
--- /dev/null
+++ b/base/allocator/partition_allocator/page_allocator.h
@@ -0,0 +1,179 @@
+// Copyright (c) 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PAGE_ALLOCATOR_H
+#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PAGE_ALLOCATOR_H
+
+#include <stdint.h>
+
+#include <cstddef>
+
+#include "base/allocator/partition_allocator/page_allocator_constants.h"
+#include "base/base_export.h"
+#include "base/compiler_specific.h"
+#include "build/build_config.h"
+
+namespace base {
+
+enum PageAccessibilityConfiguration {
+  PageInaccessible,
+  PageRead,
+  PageReadWrite,
+  PageReadExecute,
+  // This flag is deprecated and will go away soon.
+  // TODO(bbudge) Remove this as soon as V8 doesn't need RWX pages.
+  PageReadWriteExecute,
+};
+
+// Mac OSX supports tagged memory regions, to help in debugging.
+enum class PageTag {
+  kFirst = 240,     // Minimum tag value.
+  kChromium = 254,  // Chromium page, including off-heap V8 ArrayBuffers.
+  kV8 = 255,        // V8 heap pages.
+  kLast = kV8       // Maximum tag value.
+};
+
+// Allocate one or more pages.
+//
+// The requested |address| is just a hint; the actual address returned may
+// differ. The returned address will be aligned at least to |align| bytes.
+// |length| is in bytes, and must be a multiple of |kPageAllocationGranularity|.
+// |align| is in bytes, and must be a power-of-two multiple of
+// |kPageAllocationGranularity|.
+//
+// If |address| is null, then a suitable and randomized address will be chosen
+// automatically.
+//
+// |page_accessibility| controls the permission of the allocated pages.
+//
+// This call will return null if the allocation cannot be satisfied.
+BASE_EXPORT void* AllocPages(void* address,
+                             size_t length,
+                             size_t align,
+                             PageAccessibilityConfiguration page_accessibility,
+                             PageTag tag = PageTag::kChromium,
+                             bool commit = true);
+
+// Free one or more pages starting at |address| and continuing for |length|
+// bytes.
+//
+// |address| and |length| must match a previous call to |AllocPages|. Therefore,
+// |address| must be aligned to |kPageAllocationGranularity| bytes, and |length|
+// must be a multiple of |kPageAllocationGranularity|.
+BASE_EXPORT void FreePages(void* address, size_t length);
+
+// Mark one or more system pages, starting at |address| with the given
+// |page_accessibility|. |length| must be a multiple of |kSystemPageSize| bytes.
+//
+// Returns true if the permission change succeeded. In most cases you must
+// |CHECK| the result.
+BASE_EXPORT WARN_UNUSED_RESULT bool SetSystemPagesAccess(
+    void* address,
+    size_t length,
+    PageAccessibilityConfiguration page_accessibility);
+
+// Decommit one or more system pages starting at |address| and continuing for
+// |length| bytes. |length| must be a multiple of |kSystemPageSize|.
+//
+// Decommitted means that physical resources (RAM or swap) backing the allocated
+// virtual address range are released back to the system, but the address space
+// is still allocated to the process (possibly using up page table entries or
+// other accounting resources). Any access to a decommitted region of memory
+// is an error and will generate a fault.
+//
+// This operation is not atomic on all platforms.
+//
+// Note: "Committed memory" is a Windows Memory Subsystem concept that ensures
+// processes will not fault when touching a committed memory region. There is
+// no analogue in the POSIX memory API where virtual memory pages are
+// best-effort allocated resources on the first touch. To create a
+// platform-agnostic abstraction, this API simulates the Windows "decommit"
+// state by both discarding the region (allowing the OS to avoid swap
+// operations) and changing the page protections so accesses fault.
+//
+// TODO(ajwong): This currently does not change page protections on POSIX
+// systems due to a perf regression. Tracked at http://crbug.com/766882.
+BASE_EXPORT void DecommitSystemPages(void* address, size_t length);
+
+// Recommit one or more system pages, starting at |address| and continuing for
+// |length| bytes with the given |page_accessibility|. |length| must be a
+// multiple of |kSystemPageSize|.
+//
+// Decommitted system pages must be recommitted with their original permissions
+// before they are used again.
+//
+// Returns true if the recommit change succeeded. In most cases you must |CHECK|
+// the result.
+BASE_EXPORT WARN_UNUSED_RESULT bool RecommitSystemPages(
+    void* address,
+    size_t length,
+    PageAccessibilityConfiguration page_accessibility);
+
+// Discard one or more system pages starting at |address| and continuing for
+// |length| bytes. |length| must be a multiple of |kSystemPageSize|.
+//
+// Discarding is a hint to the system that the page is no longer required. The
+// hint may:
+//   - Do nothing.
+//   - Discard the page immediately, freeing up physical pages.
+//   - Discard the page at some time in the future in response to memory
+//   pressure.
+//
+// Only committed pages should be discarded. Discarding a page does not decommit
+// it, and it is valid to discard an already-discarded page. A read or write to
+// a discarded page will not fault.
+//
+// Reading from a discarded page may return the original page content, or a page
+// full of zeroes.
+//
+// Writing to a discarded page is the only guaranteed way to tell the system
+// that the page is required again. Once written to, the content of the page is
+// guaranteed stable once more. After being written to, the page content may be
+// based on the original page content, or a page of zeroes.
+BASE_EXPORT void DiscardSystemPages(void* address, size_t length);
+
+// Rounds up |address| to the next multiple of |kSystemPageSize|. Returns
+// 0 for an |address| of 0.
+constexpr ALWAYS_INLINE uintptr_t RoundUpToSystemPage(uintptr_t address) {
+  return (address + kSystemPageOffsetMask) & kSystemPageBaseMask;
+}
+
+// Rounds down |address| to the previous multiple of |kSystemPageSize|. Returns
+// 0 for an |address| of 0.
+constexpr ALWAYS_INLINE uintptr_t RoundDownToSystemPage(uintptr_t address) {
+  return address & kSystemPageBaseMask;
+}
+
+// Rounds up |address| to the next multiple of |kPageAllocationGranularity|.
+// Returns 0 for an |address| of 0.
+constexpr ALWAYS_INLINE uintptr_t
+RoundUpToPageAllocationGranularity(uintptr_t address) {
+  return (address + kPageAllocationGranularityOffsetMask) &
+         kPageAllocationGranularityBaseMask;
+}
+
+// Rounds down |address| to the previous multiple of
+// |kPageAllocationGranularity|. Returns 0 for an |address| of 0.
+constexpr ALWAYS_INLINE uintptr_t
+RoundDownToPageAllocationGranularity(uintptr_t address) {
+  return address & kPageAllocationGranularityBaseMask;
+}
+
+// Reserves (at least) |size| bytes of address space, aligned to
+// |kPageAllocationGranularity|. This can be called early on to make it more
+// likely that large allocations will succeed. Returns true if the reservation
+// succeeded, false if the reservation failed or a reservation was already made.
+BASE_EXPORT bool ReserveAddressSpace(size_t size);
+
+// Releases any reserved address space. |AllocPages| calls this automatically on
+// an allocation failure. External allocators may also call this on failure.
+BASE_EXPORT void ReleaseReservation();
+
+// Returns |errno| (POSIX) or the result of |GetLastError| (Windows) when |mmap|
+// (POSIX) or |VirtualAlloc| (Windows) fails.
+BASE_EXPORT uint32_t GetAllocPageErrorCode();
+
+}  // namespace base
+
+#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PAGE_ALLOCATOR_H
diff --git a/base/allocator/partition_allocator/page_allocator_constants.h b/base/allocator/partition_allocator/page_allocator_constants.h
new file mode 100644
index 0000000..308d099
--- /dev/null
+++ b/base/allocator/partition_allocator/page_allocator_constants.h
@@ -0,0 +1,42 @@
+// Copyright (c) 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PAGE_ALLOCATOR_CONSTANTS_H_
+#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PAGE_ALLOCATOR_CONSTANTS_H_
+
+#include <stddef.h>
+
+#include "build/build_config.h"
+
+namespace base {
+#if defined(OS_WIN)
+static constexpr size_t kPageAllocationGranularityShift = 16;  // 64KB
+#elif defined(_MIPS_ARCH_LOONGSON)
+static constexpr size_t kPageAllocationGranularityShift = 14;  // 16KB
+#else
+static constexpr size_t kPageAllocationGranularityShift = 12;  // 4KB
+#endif
+static constexpr size_t kPageAllocationGranularity =
+    1 << kPageAllocationGranularityShift;
+static constexpr size_t kPageAllocationGranularityOffsetMask =
+    kPageAllocationGranularity - 1;
+static constexpr size_t kPageAllocationGranularityBaseMask =
+    ~kPageAllocationGranularityOffsetMask;
+
+#if defined(_MIPS_ARCH_LOONGSON)
+static constexpr size_t kSystemPageSize = 16384;
+#else
+static constexpr size_t kSystemPageSize = 4096;
+#endif
+static constexpr size_t kSystemPageOffsetMask = kSystemPageSize - 1;
+static_assert((kSystemPageSize & (kSystemPageSize - 1)) == 0,
+              "kSystemPageSize must be power of 2");
+static constexpr size_t kSystemPageBaseMask = ~kSystemPageOffsetMask;
+
+static constexpr size_t kPageMetadataShift = 5;  // 32 bytes per partition page.
+static constexpr size_t kPageMetadataSize = 1 << kPageMetadataShift;
+
+}  // namespace base
+
+#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PAGE_ALLOCATOR_CONSTANTS_H
diff --git a/base/allocator/partition_allocator/page_allocator_internal.h b/base/allocator/partition_allocator/page_allocator_internal.h
new file mode 100644
index 0000000..c8c003d
--- /dev/null
+++ b/base/allocator/partition_allocator/page_allocator_internal.h
@@ -0,0 +1,18 @@
+// Copyright (c) 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PAGE_ALLOCATOR_INTERNAL_H_
+#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PAGE_ALLOCATOR_INTERNAL_H_
+
+namespace base {
+
+void* SystemAllocPages(void* hint,
+                       size_t length,
+                       PageAccessibilityConfiguration accessibility,
+                       PageTag page_tag,
+                       bool commit);
+
+}  // namespace base
+
+#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PAGE_ALLOCATOR_INTERNAL_H_
diff --git a/base/allocator/partition_allocator/page_allocator_internals_posix.h b/base/allocator/partition_allocator/page_allocator_internals_posix.h
new file mode 100644
index 0000000..a579266
--- /dev/null
+++ b/base/allocator/partition_allocator/page_allocator_internals_posix.h
@@ -0,0 +1,183 @@
+// Copyright (c) 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PAGE_ALLOCATOR_INTERNALS_POSIX_H_
+#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PAGE_ALLOCATOR_INTERNALS_POSIX_H_
+
+#include <errno.h>
+#include <sys/mman.h>
+
+#if defined(OS_MACOSX)
+#include <mach/mach.h>
+#endif
+#if defined(OS_LINUX)
+#include <sys/resource.h>
+#endif
+
+#include "build/build_config.h"
+
+#ifndef MAP_ANONYMOUS
+#define MAP_ANONYMOUS MAP_ANON
+#endif
+
+namespace base {
+
+// |mmap| uses a nearby address if the hint address is blocked.
+const bool kHintIsAdvisory = true;
+std::atomic<int32_t> s_allocPageErrorCode{0};
+
+int GetAccessFlags(PageAccessibilityConfiguration accessibility) {
+  switch (accessibility) {
+    case PageRead:
+      return PROT_READ;
+    case PageReadWrite:
+      return PROT_READ | PROT_WRITE;
+    case PageReadExecute:
+      return PROT_READ | PROT_EXEC;
+    case PageReadWriteExecute:
+      return PROT_READ | PROT_WRITE | PROT_EXEC;
+    default:
+      NOTREACHED();
+      FALLTHROUGH;
+    case PageInaccessible:
+      return PROT_NONE;
+  }
+}
+
+#if defined(OS_LINUX) && defined(ARCH_CPU_64_BITS)
+
+// Multiple guarded memory regions may exceed the process address space limit.
+// This function will raise or lower the limit by |amount|.
+bool AdjustAddressSpaceLimit(int64_t amount) {
+  struct rlimit old_rlimit;
+  if (getrlimit(RLIMIT_AS, &old_rlimit))
+    return false;
+  const rlim_t new_limit =
+      CheckAdd(old_rlimit.rlim_cur, amount).ValueOrDefault(old_rlimit.rlim_max);
+  const struct rlimit new_rlimit = {std::min(new_limit, old_rlimit.rlim_max),
+                                    old_rlimit.rlim_max};
+  // setrlimit will fail if limit > old_rlimit.rlim_max.
+  return setrlimit(RLIMIT_AS, &new_rlimit) == 0;
+}
+
+// Current WASM guarded memory regions have 8 GiB of address space. There are
+// schemes that reduce that to 4 GiB.
+constexpr size_t kMinimumGuardedMemorySize = 1ULL << 32;  // 4 GiB
+
+#endif  // defined(OS_LINUX) && defined(ARCH_CPU_64_BITS)
+
+void* SystemAllocPagesInternal(void* hint,
+                               size_t length,
+                               PageAccessibilityConfiguration accessibility,
+                               PageTag page_tag,
+                               bool commit) {
+#if defined(OS_MACOSX)
+  // Use a custom tag to make it easier to distinguish Partition Alloc regions
+  // in vmmap(1). Tags between 240-255 are supported.
+  DCHECK_LE(PageTag::kFirst, page_tag);
+  DCHECK_GE(PageTag::kLast, page_tag);
+  int fd = VM_MAKE_TAG(static_cast<int>(page_tag));
+#else
+  int fd = -1;
+#endif
+
+  int access_flag = GetAccessFlags(accessibility);
+  void* ret =
+      mmap(hint, length, access_flag, MAP_ANONYMOUS | MAP_PRIVATE, fd, 0);
+  if (ret == MAP_FAILED) {
+    s_allocPageErrorCode = errno;
+    ret = nullptr;
+  }
+  return ret;
+}
+
+void* TrimMappingInternal(void* base,
+                          size_t base_length,
+                          size_t trim_length,
+                          PageAccessibilityConfiguration accessibility,
+                          bool commit,
+                          size_t pre_slack,
+                          size_t post_slack) {
+  void* ret = base;
+  // We can resize the allocation run. Release unneeded memory before and after
+  // the aligned range.
+  if (pre_slack) {
+    int res = munmap(base, pre_slack);
+    CHECK(!res);
+    ret = reinterpret_cast<char*>(base) + pre_slack;
+  }
+  if (post_slack) {
+    int res = munmap(reinterpret_cast<char*>(ret) + trim_length, post_slack);
+    CHECK(!res);
+  }
+  return ret;
+}
+
+bool SetSystemPagesAccessInternal(
+    void* address,
+    size_t length,
+    PageAccessibilityConfiguration accessibility) {
+  return 0 == mprotect(address, length, GetAccessFlags(accessibility));
+}
+
+void FreePagesInternal(void* address, size_t length) {
+  CHECK(!munmap(address, length));
+
+#if defined(OS_LINUX) && defined(ARCH_CPU_64_BITS)
+  // Restore the address space limit.
+  if (length >= kMinimumGuardedMemorySize) {
+    CHECK(AdjustAddressSpaceLimit(-base::checked_cast<int64_t>(length)));
+  }
+#endif
+}
+
+void DecommitSystemPagesInternal(void* address, size_t length) {
+  // In POSIX, there is no decommit concept. Discarding is an effective way of
+  // implementing the Windows semantics where the OS is allowed to not swap the
+  // pages in the region.
+  //
+  // TODO(ajwong): Also explore setting PageInaccessible to make the protection
+  // semantics consistent between Windows and POSIX. This might have a perf cost
+  // though as both decommit and recommit would incur an extra syscall.
+  // http://crbug.com/766882
+  DiscardSystemPages(address, length);
+}
+
+bool RecommitSystemPagesInternal(void* address,
+                                 size_t length,
+                                 PageAccessibilityConfiguration accessibility) {
+#if defined(OS_MACOSX)
+  // On macOS, to update accounting, we need to make another syscall. For more
+  // details, see https://crbug.com/823915.
+  madvise(address, length, MADV_FREE_REUSE);
+#endif
+
+  // On POSIX systems, the caller need simply read the memory to recommit it.
+  // This has the correct behavior because the API requires the permissions to
+  // be the same as before decommitting and all configurations can read.
+  return true;
+}
+
+void DiscardSystemPagesInternal(void* address, size_t length) {
+#if defined(OS_MACOSX)
+  int ret = madvise(address, length, MADV_FREE_REUSABLE);
+  if (ret) {
+    // MADV_FREE_REUSABLE sometimes fails, so fall back to MADV_DONTNEED.
+    ret = madvise(address, length, MADV_DONTNEED);
+  }
+  CHECK(0 == ret);
+#else
+  // We have experimented with other flags, but with suboptimal results.
+  //
+  // MADV_FREE (Linux): Makes our memory measurements less predictable;
+  // performance benefits unclear.
+  //
+  // Therefore, we just do the simple thing: MADV_DONTNEED.
+  CHECK(!madvise(address, length, MADV_DONTNEED));
+#endif
+}
+
+}  // namespace base
+
+#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PAGE_ALLOCATOR_INTERNALS_POSIX_H_
diff --git a/base/allocator/partition_allocator/page_allocator_internals_win.h b/base/allocator/partition_allocator/page_allocator_internals_win.h
new file mode 100644
index 0000000..1b6adb2
--- /dev/null
+++ b/base/allocator/partition_allocator/page_allocator_internals_win.h
@@ -0,0 +1,121 @@
+// Copyright (c) 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PAGE_ALLOCATOR_INTERNALS_WIN_H_
+#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PAGE_ALLOCATOR_INTERNALS_WIN_H_
+
+#include "base/allocator/partition_allocator/page_allocator_internal.h"
+
+namespace base {
+
+// |VirtualAlloc| will fail if allocation at the hint address is blocked.
+const bool kHintIsAdvisory = false;
+std::atomic<int32_t> s_allocPageErrorCode{ERROR_SUCCESS};
+
+int GetAccessFlags(PageAccessibilityConfiguration accessibility) {
+  switch (accessibility) {
+    case PageRead:
+      return PAGE_READONLY;
+    case PageReadWrite:
+      return PAGE_READWRITE;
+    case PageReadExecute:
+      return PAGE_EXECUTE_READ;
+    case PageReadWriteExecute:
+      return PAGE_EXECUTE_READWRITE;
+    default:
+      NOTREACHED();
+      FALLTHROUGH;
+    case PageInaccessible:
+      return PAGE_NOACCESS;
+  }
+}
+
+void* SystemAllocPagesInternal(void* hint,
+                               size_t length,
+                               PageAccessibilityConfiguration accessibility,
+                               PageTag page_tag,
+                               bool commit) {
+  DWORD access_flag = GetAccessFlags(accessibility);
+  const DWORD type_flags = commit ? (MEM_RESERVE | MEM_COMMIT) : MEM_RESERVE;
+  void* ret = VirtualAlloc(hint, length, type_flags, access_flag);
+  if (ret == nullptr) {
+    s_allocPageErrorCode = GetLastError();
+  }
+  return ret;
+}
+
+void* TrimMappingInternal(void* base,
+                          size_t base_length,
+                          size_t trim_length,
+                          PageAccessibilityConfiguration accessibility,
+                          bool commit,
+                          size_t pre_slack,
+                          size_t post_slack) {
+  void* ret = base;
+  if (pre_slack || post_slack) {
+    // We cannot resize the allocation run. Free it and retry at the aligned
+    // address within the freed range.
+    ret = reinterpret_cast<char*>(base) + pre_slack;
+    FreePages(base, base_length);
+    ret = SystemAllocPages(ret, trim_length, accessibility, PageTag::kChromium,
+                           commit);
+  }
+  return ret;
+}
+
+bool SetSystemPagesAccessInternal(
+    void* address,
+    size_t length,
+    PageAccessibilityConfiguration accessibility) {
+  if (accessibility == PageInaccessible) {
+    return VirtualFree(address, length, MEM_DECOMMIT) != 0;
+  } else {
+    return nullptr != VirtualAlloc(address, length, MEM_COMMIT,
+                                   GetAccessFlags(accessibility));
+  }
+}
+
+void FreePagesInternal(void* address, size_t length) {
+  CHECK(VirtualFree(address, 0, MEM_RELEASE));
+}
+
+void DecommitSystemPagesInternal(void* address, size_t length) {
+  CHECK(SetSystemPagesAccess(address, length, PageInaccessible));
+}
+
+bool RecommitSystemPagesInternal(void* address,
+                                 size_t length,
+                                 PageAccessibilityConfiguration accessibility) {
+  return SetSystemPagesAccess(address, length, accessibility);
+}
+
+void DiscardSystemPagesInternal(void* address, size_t length) {
+  // On Windows, discarded pages are not returned to the system immediately and
+  // not guaranteed to be zeroed when returned to the application.
+  using DiscardVirtualMemoryFunction =
+      DWORD(WINAPI*)(PVOID virtualAddress, SIZE_T size);
+  static DiscardVirtualMemoryFunction discard_virtual_memory =
+      reinterpret_cast<DiscardVirtualMemoryFunction>(-1);
+  if (discard_virtual_memory ==
+      reinterpret_cast<DiscardVirtualMemoryFunction>(-1))
+    discard_virtual_memory =
+        reinterpret_cast<DiscardVirtualMemoryFunction>(GetProcAddress(
+            GetModuleHandle(L"Kernel32.dll"), "DiscardVirtualMemory"));
+  // Use DiscardVirtualMemory when available because it releases faster than
+  // MEM_RESET.
+  DWORD ret = 1;
+  if (discard_virtual_memory) {
+    ret = discard_virtual_memory(address, length);
+  }
+  // DiscardVirtualMemory is buggy in Win10 SP0, so fall back to MEM_RESET on
+  // failure.
+  if (ret) {
+    void* ptr = VirtualAlloc(address, length, MEM_RESET, PAGE_READWRITE);
+    CHECK(ptr);
+  }
+}
+
+}  // namespace base
+
+#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PAGE_ALLOCATOR_INTERNALS_WIN_H_
diff --git a/base/allocator/partition_allocator/page_allocator_unittest.cc b/base/allocator/partition_allocator/page_allocator_unittest.cc
new file mode 100644
index 0000000..22c6455
--- /dev/null
+++ b/base/allocator/partition_allocator/page_allocator_unittest.cc
@@ -0,0 +1,224 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/allocator/partition_allocator/page_allocator.h"
+
+#include <stdlib.h>
+#include <string.h>
+
+#include "base/allocator/partition_allocator/address_space_randomization.h"
+#include "build/build_config.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+#if defined(OS_POSIX) && !defined(OS_FUCHSIA)
+#include <setjmp.h>
+#include <signal.h>
+#include <sys/mman.h>
+#include <sys/time.h>
+#endif  // defined(OS_POSIX) && !defined(OS_FUCHSIA)
+
+#if !defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
+
+namespace base {
+
+namespace {
+
+// Any number of bytes that can be allocated with no trouble.
+constexpr size_t kEasyAllocSize =
+    (1024 * 1024) & ~(kPageAllocationGranularity - 1);
+
+// A huge amount of memory, greater than or equal to the ASLR space.
+constexpr size_t kHugeMemoryAmount =
+    std::max(internal::kASLRMask, std::size_t{2} * internal::kASLRMask);
+
+}  // namespace
+
+TEST(PageAllocatorTest, Rounding) {
+  EXPECT_EQ(0u, RoundUpToSystemPage(0u));
+  EXPECT_EQ(kSystemPageSize, RoundUpToSystemPage(1));
+  EXPECT_EQ(kSystemPageSize, RoundUpToSystemPage(kSystemPageSize - 1));
+  EXPECT_EQ(kSystemPageSize, RoundUpToSystemPage(kSystemPageSize));
+  EXPECT_EQ(2 * kSystemPageSize, RoundUpToSystemPage(kSystemPageSize + 1));
+  EXPECT_EQ(0u, RoundDownToSystemPage(0u));
+  EXPECT_EQ(0u, RoundDownToSystemPage(kSystemPageSize - 1));
+  EXPECT_EQ(kSystemPageSize, RoundDownToSystemPage(kSystemPageSize));
+  EXPECT_EQ(kSystemPageSize, RoundDownToSystemPage(kSystemPageSize + 1));
+  EXPECT_EQ(kSystemPageSize, RoundDownToSystemPage(2 * kSystemPageSize - 1));
+  EXPECT_EQ(0u, RoundUpToPageAllocationGranularity(0u));
+  EXPECT_EQ(kPageAllocationGranularity, RoundUpToPageAllocationGranularity(1));
+  EXPECT_EQ(kPageAllocationGranularity,
+            RoundUpToPageAllocationGranularity(kPageAllocationGranularity - 1));
+  EXPECT_EQ(kPageAllocationGranularity,
+            RoundUpToPageAllocationGranularity(kPageAllocationGranularity));
+  EXPECT_EQ(2 * kPageAllocationGranularity,
+            RoundUpToPageAllocationGranularity(kPageAllocationGranularity + 1));
+  EXPECT_EQ(0u, RoundDownToPageAllocationGranularity(0u));
+  EXPECT_EQ(
+      0u, RoundDownToPageAllocationGranularity(kPageAllocationGranularity - 1));
+  EXPECT_EQ(kPageAllocationGranularity,
+            RoundDownToPageAllocationGranularity(kPageAllocationGranularity));
+  EXPECT_EQ(kPageAllocationGranularity, RoundDownToPageAllocationGranularity(
+                                            kPageAllocationGranularity + 1));
+  EXPECT_EQ(
+      kPageAllocationGranularity,
+      RoundDownToPageAllocationGranularity(2 * kPageAllocationGranularity - 1));
+}
+
+// Test that failed page allocations invoke base::ReleaseReservation().
+// We detect this by making a reservation and ensuring that after failure, we
+// can make a new reservation.
+TEST(PageAllocatorTest, AllocFailure) {
+  // Release any reservation made by another test.
+  ReleaseReservation();
+
+  // We can make a reservation.
+  EXPECT_TRUE(ReserveAddressSpace(kEasyAllocSize));
+
+  // We can't make another reservation until we trigger an allocation failure.
+  EXPECT_FALSE(ReserveAddressSpace(kEasyAllocSize));
+
+  size_t size = kHugeMemoryAmount;
+  // Skip the test for sanitizers and platforms with ASLR turned off.
+  if (size == 0)
+    return;
+
+  void* result = AllocPages(nullptr, size, kPageAllocationGranularity,
+                            PageInaccessible, PageTag::kChromium, false);
+  if (result == nullptr) {
+    // We triggered allocation failure. Our reservation should have been
+    // released, and we should be able to make a new reservation.
+    EXPECT_TRUE(ReserveAddressSpace(kEasyAllocSize));
+    ReleaseReservation();
+    return;
+  }
+  // We couldn't fail. Make sure reservation is still there.
+  EXPECT_FALSE(ReserveAddressSpace(kEasyAllocSize));
+}
+
+// TODO(crbug.com/765801): Test failed on chromium.win/Win10 Tests x64.
+#if defined(OS_WIN) && defined(ARCH_CPU_64_BITS)
+#define MAYBE_ReserveAddressSpace DISABLED_ReserveAddressSpace
+#else
+#define MAYBE_ReserveAddressSpace ReserveAddressSpace
+#endif  // defined(OS_WIN) && defined(ARCH_CPU_64_BITS)
+
+// Test that reserving address space can fail.
+TEST(PageAllocatorTest, MAYBE_ReserveAddressSpace) {
+  // Release any reservation made by another test.
+  ReleaseReservation();
+
+  size_t size = kHugeMemoryAmount;
+  // Skip the test for sanitizers and platforms with ASLR turned off.
+  if (size == 0)
+    return;
+
+  bool success = ReserveAddressSpace(size);
+  if (!success) {
+    EXPECT_TRUE(ReserveAddressSpace(kEasyAllocSize));
+    return;
+  }
+  // We couldn't fail. Make sure reservation is still there.
+  EXPECT_FALSE(ReserveAddressSpace(kEasyAllocSize));
+}
+
+TEST(PageAllocatorTest, AllocAndFreePages) {
+  void* buffer = AllocPages(nullptr, kPageAllocationGranularity,
+                            kPageAllocationGranularity, PageReadWrite,
+                            PageTag::kChromium, true);
+  EXPECT_TRUE(buffer);
+  int* buffer0 = reinterpret_cast<int*>(buffer);
+  *buffer0 = 42;
+  EXPECT_EQ(42, *buffer0);
+  FreePages(buffer, kPageAllocationGranularity);
+}
+
+// Test permission setting on POSIX, where we can set a trap handler.
+#if defined(OS_POSIX) && !defined(OS_FUCHSIA)
+
+namespace {
+sigjmp_buf g_continuation;
+
+void SignalHandler(int signal, siginfo_t* info, void*) {
+  siglongjmp(g_continuation, 1);
+}
+}  // namespace
+
+// On Mac, sometimes we get SIGBUS instead of SIGSEGV, so handle that too.
+#if defined(OS_MACOSX)
+#define EXTRA_FAULT_BEGIN_ACTION() \
+  struct sigaction old_bus_action; \
+  sigaction(SIGBUS, &action, &old_bus_action);
+#define EXTRA_FAULT_END_ACTION() sigaction(SIGBUS, &old_bus_action, nullptr);
+#else
+#define EXTRA_FAULT_BEGIN_ACTION()
+#define EXTRA_FAULT_END_ACTION()
+#endif
+
+// Install a signal handler so we can catch the fault we're about to trigger.
+#define FAULT_TEST_BEGIN()                  \
+  struct sigaction action = {};             \
+  struct sigaction old_action = {};         \
+  action.sa_sigaction = SignalHandler;      \
+  sigemptyset(&action.sa_mask);             \
+  action.sa_flags = SA_SIGINFO;             \
+  sigaction(SIGSEGV, &action, &old_action); \
+  EXTRA_FAULT_BEGIN_ACTION();               \
+  int const save_sigs = 1;                  \
+  if (!sigsetjmp(g_continuation, save_sigs)) {
+// Fault generating code goes here...
+
+// Handle when sigsetjmp returns nonzero (we are returning from our handler).
+#define FAULT_TEST_END()                      \
+  }                                           \
+  else {                                      \
+    sigaction(SIGSEGV, &old_action, nullptr); \
+    EXTRA_FAULT_END_ACTION();                 \
+  }
+
+TEST(PageAllocatorTest, InaccessiblePages) {
+  void* buffer = AllocPages(nullptr, kPageAllocationGranularity,
+                            kPageAllocationGranularity, PageInaccessible,
+                            PageTag::kChromium, true);
+  EXPECT_TRUE(buffer);
+
+  FAULT_TEST_BEGIN();
+
+  // Reading from buffer should fault.
+  int* buffer0 = reinterpret_cast<int*>(buffer);
+  int buffer0_contents = *buffer0;
+  EXPECT_EQ(buffer0_contents, *buffer0);
+  EXPECT_TRUE(false);
+
+  FAULT_TEST_END();
+
+  FreePages(buffer, kPageAllocationGranularity);
+}
+
+TEST(PageAllocatorTest, ReadExecutePages) {
+  void* buffer = AllocPages(nullptr, kPageAllocationGranularity,
+                            kPageAllocationGranularity, PageReadExecute,
+                            PageTag::kChromium, true);
+  EXPECT_TRUE(buffer);
+  int* buffer0 = reinterpret_cast<int*>(buffer);
+  // Reading from buffer should succeed.
+  int buffer0_contents = *buffer0;
+
+  FAULT_TEST_BEGIN();
+
+  // Writing to buffer should fault.
+  *buffer0 = ~buffer0_contents;
+  EXPECT_TRUE(false);
+
+  FAULT_TEST_END();
+
+  // Make sure no write occurred.
+  EXPECT_EQ(buffer0_contents, *buffer0);
+  FreePages(buffer, kPageAllocationGranularity);
+}
+
+#endif  // defined(OS_POSIX) && !defined(OS_FUCHSIA)
+
+}  // namespace base
+
+#endif  // !defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
diff --git a/base/allocator/partition_allocator/partition_alloc.cc b/base/allocator/partition_allocator/partition_alloc.cc
new file mode 100644
index 0000000..8554673
--- /dev/null
+++ b/base/allocator/partition_allocator/partition_alloc.cc
@@ -0,0 +1,727 @@
+// Copyright (c) 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/allocator/partition_allocator/partition_alloc.h"
+
+#include <string.h>
+#include <type_traits>
+
+#include "base/allocator/partition_allocator/partition_direct_map_extent.h"
+#include "base/allocator/partition_allocator/partition_oom.h"
+#include "base/allocator/partition_allocator/partition_page.h"
+#include "base/allocator/partition_allocator/spin_lock.h"
+#include "base/compiler_specific.h"
+#include "base/lazy_instance.h"
+
+// Two partition pages are used as guard / metadata page so make sure the super
+// page size is bigger.
+static_assert(base::kPartitionPageSize * 4 <= base::kSuperPageSize,
+              "ok super page size");
+static_assert(!(base::kSuperPageSize % base::kPartitionPageSize),
+              "ok super page multiple");
+// Four system pages gives us room to hack out a still-guard-paged piece
+// of metadata in the middle of a guard partition page.
+static_assert(base::kSystemPageSize * 4 <= base::kPartitionPageSize,
+              "ok partition page size");
+static_assert(!(base::kPartitionPageSize % base::kSystemPageSize),
+              "ok partition page multiple");
+static_assert(sizeof(base::internal::PartitionPage) <= base::kPageMetadataSize,
+              "PartitionPage should not be too big");
+static_assert(sizeof(base::internal::PartitionBucket) <=
+                  base::kPageMetadataSize,
+              "PartitionBucket should not be too big");
+static_assert(sizeof(base::internal::PartitionSuperPageExtentEntry) <=
+                  base::kPageMetadataSize,
+              "PartitionSuperPageExtentEntry should not be too big");
+static_assert(base::kPageMetadataSize * base::kNumPartitionPagesPerSuperPage <=
+                  base::kSystemPageSize,
+              "page metadata fits in hole");
+// Limit to prevent callers accidentally overflowing an int size.
+static_assert(base::kGenericMaxDirectMapped <=
+                  (1UL << 31) + base::kPageAllocationGranularity,
+              "maximum direct mapped allocation");
+// Check that some of our zanier calculations worked out as expected.
+static_assert(base::kGenericSmallestBucket == 8, "generic smallest bucket");
+static_assert(base::kGenericMaxBucketed == 983040, "generic max bucketed");
+static_assert(base::kMaxSystemPagesPerSlotSpan < (1 << 8),
+              "System pages per slot span must be less than 128.");
+
+namespace base {
+
+internal::PartitionRootBase::PartitionRootBase() = default;
+internal::PartitionRootBase::~PartitionRootBase() = default;
+PartitionRoot::PartitionRoot() = default;
+PartitionRoot::~PartitionRoot() = default;
+PartitionRootGeneric::PartitionRootGeneric() = default;
+PartitionRootGeneric::~PartitionRootGeneric() = default;
+PartitionAllocatorGeneric::PartitionAllocatorGeneric() = default;
+PartitionAllocatorGeneric::~PartitionAllocatorGeneric() = default;
+
+static LazyInstance<subtle::SpinLock>::Leaky g_initialized_lock =
+    LAZY_INSTANCE_INITIALIZER;
+static bool g_initialized = false;
+
+void (*internal::PartitionRootBase::gOomHandlingFunction)() = nullptr;
+PartitionAllocHooks::AllocationHook* PartitionAllocHooks::allocation_hook_ =
+    nullptr;
+PartitionAllocHooks::FreeHook* PartitionAllocHooks::free_hook_ = nullptr;
+
+static void PartitionAllocBaseInit(internal::PartitionRootBase* root) {
+  DCHECK(!root->initialized);
+  {
+    subtle::SpinLock::Guard guard(g_initialized_lock.Get());
+    if (!g_initialized) {
+      g_initialized = true;
+      // We mark the sentinel bucket/page as free to make sure it is skipped by
+      // our logic to find a new active page.
+      internal::PartitionBucket::get_sentinel_bucket()->active_pages_head =
+          internal::PartitionPage::get_sentinel_page();
+    }
+  }
+
+  root->initialized = true;
+
+  // This is a "magic" value so we can test if a root pointer is valid.
+  root->inverted_self = ~reinterpret_cast<uintptr_t>(root);
+}
+
+void PartitionAllocGlobalInit(void (*oom_handling_function)()) {
+  DCHECK(oom_handling_function);
+  internal::PartitionRootBase::gOomHandlingFunction = oom_handling_function;
+}
+
+void PartitionRoot::Init(size_t num_buckets, size_t max_allocation) {
+  PartitionAllocBaseInit(this);
+
+  this->num_buckets = num_buckets;
+  this->max_allocation = max_allocation;
+  size_t i;
+  for (i = 0; i < this->num_buckets; ++i) {
+    internal::PartitionBucket* bucket = &this->buckets()[i];
+    if (!i)
+      bucket->Init(kAllocationGranularity);
+    else
+      bucket->Init(i << kBucketShift);
+  }
+}
+
+void PartitionRootGeneric::Init() {
+  subtle::SpinLock::Guard guard(this->lock);
+
+  PartitionAllocBaseInit(this);
+
+  // Precalculate some shift and mask constants used in the hot path.
+  // Example: malloc(41) == 101001 binary.
+  // Order is 6 (1 << 6-1) == 32 is highest bit set.
+  // order_index is the next three MSB == 010 == 2.
+  // sub_order_index_mask is a mask for the remaining bits == 11 (masking to 01
+  // for
+  // the sub_order_index).
+  size_t order;
+  for (order = 0; order <= kBitsPerSizeT; ++order) {
+    size_t order_index_shift;
+    if (order < kGenericNumBucketsPerOrderBits + 1)
+      order_index_shift = 0;
+    else
+      order_index_shift = order - (kGenericNumBucketsPerOrderBits + 1);
+    this->order_index_shifts[order] = order_index_shift;
+    size_t sub_order_index_mask;
+    if (order == kBitsPerSizeT) {
+      // This avoids invoking undefined behavior for an excessive shift.
+      sub_order_index_mask =
+          static_cast<size_t>(-1) >> (kGenericNumBucketsPerOrderBits + 1);
+    } else {
+      sub_order_index_mask = ((static_cast<size_t>(1) << order) - 1) >>
+                             (kGenericNumBucketsPerOrderBits + 1);
+    }
+    this->order_sub_index_masks[order] = sub_order_index_mask;
+  }
+
+  // Set up the actual usable buckets first.
+  // Note that typical values (i.e. min allocation size of 8) will result in
+  // pseudo buckets (size==9 etc. or more generally, size is not a multiple
+  // of the smallest allocation granularity).
+  // We avoid them in the bucket lookup map, but we tolerate them to keep the
+  // code simpler and the structures more generic.
+  size_t i, j;
+  size_t current_size = kGenericSmallestBucket;
+  size_t currentIncrement =
+      kGenericSmallestBucket >> kGenericNumBucketsPerOrderBits;
+  internal::PartitionBucket* bucket = &this->buckets[0];
+  for (i = 0; i < kGenericNumBucketedOrders; ++i) {
+    for (j = 0; j < kGenericNumBucketsPerOrder; ++j) {
+      bucket->Init(current_size);
+      // Disable psuedo buckets so that touching them faults.
+      if (current_size % kGenericSmallestBucket)
+        bucket->active_pages_head = nullptr;
+      current_size += currentIncrement;
+      ++bucket;
+    }
+    currentIncrement <<= 1;
+  }
+  DCHECK(current_size == 1 << kGenericMaxBucketedOrder);
+  DCHECK(bucket == &this->buckets[0] + kGenericNumBuckets);
+
+  // Then set up the fast size -> bucket lookup table.
+  bucket = &this->buckets[0];
+  internal::PartitionBucket** bucketPtr = &this->bucket_lookups[0];
+  for (order = 0; order <= kBitsPerSizeT; ++order) {
+    for (j = 0; j < kGenericNumBucketsPerOrder; ++j) {
+      if (order < kGenericMinBucketedOrder) {
+        // Use the bucket of the finest granularity for malloc(0) etc.
+        *bucketPtr++ = &this->buckets[0];
+      } else if (order > kGenericMaxBucketedOrder) {
+        *bucketPtr++ = internal::PartitionBucket::get_sentinel_bucket();
+      } else {
+        internal::PartitionBucket* validBucket = bucket;
+        // Skip over invalid buckets.
+        while (validBucket->slot_size % kGenericSmallestBucket)
+          validBucket++;
+        *bucketPtr++ = validBucket;
+        bucket++;
+      }
+    }
+  }
+  DCHECK(bucket == &this->buckets[0] + kGenericNumBuckets);
+  DCHECK(bucketPtr == &this->bucket_lookups[0] +
+                          ((kBitsPerSizeT + 1) * kGenericNumBucketsPerOrder));
+  // And there's one last bucket lookup that will be hit for e.g. malloc(-1),
+  // which tries to overflow to a non-existant order.
+  *bucketPtr = internal::PartitionBucket::get_sentinel_bucket();
+}
+
+bool PartitionReallocDirectMappedInPlace(PartitionRootGeneric* root,
+                                         internal::PartitionPage* page,
+                                         size_t raw_size) {
+  DCHECK(page->bucket->is_direct_mapped());
+
+  raw_size = internal::PartitionCookieSizeAdjustAdd(raw_size);
+
+  // Note that the new size might be a bucketed size; this function is called
+  // whenever we're reallocating a direct mapped allocation.
+  size_t new_size = internal::PartitionBucket::get_direct_map_size(raw_size);
+  if (new_size < kGenericMinDirectMappedDownsize)
+    return false;
+
+  // bucket->slot_size is the current size of the allocation.
+  size_t current_size = page->bucket->slot_size;
+  if (new_size == current_size)
+    return true;
+
+  char* char_ptr = static_cast<char*>(internal::PartitionPage::ToPointer(page));
+
+  if (new_size < current_size) {
+    size_t map_size =
+        internal::PartitionDirectMapExtent::FromPage(page)->map_size;
+
+    // Don't reallocate in-place if new size is less than 80 % of the full
+    // map size, to avoid holding on to too much unused address space.
+    if ((new_size / kSystemPageSize) * 5 < (map_size / kSystemPageSize) * 4)
+      return false;
+
+    // Shrink by decommitting unneeded pages and making them inaccessible.
+    size_t decommitSize = current_size - new_size;
+    root->DecommitSystemPages(char_ptr + new_size, decommitSize);
+    CHECK(SetSystemPagesAccess(char_ptr + new_size, decommitSize,
+                               PageInaccessible));
+  } else if (new_size <=
+             internal::PartitionDirectMapExtent::FromPage(page)->map_size) {
+    // Grow within the actually allocated memory. Just need to make the
+    // pages accessible again.
+    size_t recommit_size = new_size - current_size;
+    CHECK(SetSystemPagesAccess(char_ptr + current_size, recommit_size,
+                               PageReadWrite));
+    root->RecommitSystemPages(char_ptr + current_size, recommit_size);
+
+#if DCHECK_IS_ON()
+    memset(char_ptr + current_size, internal::kUninitializedByte,
+           recommit_size);
+#endif
+  } else {
+    // We can't perform the realloc in-place.
+    // TODO: support this too when possible.
+    return false;
+  }
+
+#if DCHECK_IS_ON()
+  // Write a new trailing cookie.
+  internal::PartitionCookieWriteValue(char_ptr + raw_size -
+                                      internal::kCookieSize);
+#endif
+
+  page->set_raw_size(raw_size);
+  DCHECK(page->get_raw_size() == raw_size);
+
+  page->bucket->slot_size = new_size;
+  return true;
+}
+
+void* PartitionReallocGenericFlags(PartitionRootGeneric* root,
+                                   int flags,
+                                   void* ptr,
+                                   size_t new_size,
+                                   const char* type_name) {
+#if defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
+  void* result = realloc(ptr, new_size);
+  CHECK(result || flags & PartitionAllocReturnNull);
+  return result;
+#else
+  if (UNLIKELY(!ptr))
+    return PartitionAllocGenericFlags(root, flags, new_size, type_name);
+  if (UNLIKELY(!new_size)) {
+    root->Free(ptr);
+    return nullptr;
+  }
+
+  if (new_size > kGenericMaxDirectMapped) {
+    if (flags & PartitionAllocReturnNull)
+      return nullptr;
+    else
+      internal::PartitionExcessiveAllocationSize();
+  }
+
+  internal::PartitionPage* page = internal::PartitionPage::FromPointer(
+      internal::PartitionCookieFreePointerAdjust(ptr));
+  // TODO(palmer): See if we can afford to make this a CHECK.
+  DCHECK(root->IsValidPage(page));
+
+  if (UNLIKELY(page->bucket->is_direct_mapped())) {
+    // We may be able to perform the realloc in place by changing the
+    // accessibility of memory pages and, if reducing the size, decommitting
+    // them.
+    if (PartitionReallocDirectMappedInPlace(root, page, new_size)) {
+      PartitionAllocHooks::ReallocHookIfEnabled(ptr, ptr, new_size, type_name);
+      return ptr;
+    }
+  }
+
+  size_t actual_new_size = root->ActualSize(new_size);
+  size_t actual_old_size = PartitionAllocGetSize(ptr);
+
+  // TODO: note that tcmalloc will "ignore" a downsizing realloc() unless the
+  // new size is a significant percentage smaller. We could do the same if we
+  // determine it is a win.
+  if (actual_new_size == actual_old_size) {
+    // Trying to allocate a block of size new_size would give us a block of
+    // the same size as the one we've already got, so re-use the allocation
+    // after updating statistics (and cookies, if present).
+    page->set_raw_size(internal::PartitionCookieSizeAdjustAdd(new_size));
+#if DCHECK_IS_ON()
+    // Write a new trailing cookie when it is possible to keep track of
+    // |new_size| via the raw size pointer.
+    if (page->get_raw_size_ptr())
+      internal::PartitionCookieWriteValue(static_cast<char*>(ptr) + new_size);
+#endif
+    return ptr;
+  }
+
+  // This realloc cannot be resized in-place. Sadness.
+  void* ret = PartitionAllocGenericFlags(root, flags, new_size, type_name);
+  if (!ret) {
+    if (flags & PartitionAllocReturnNull)
+      return nullptr;
+    else
+      internal::PartitionExcessiveAllocationSize();
+  }
+
+  size_t copy_size = actual_old_size;
+  if (new_size < copy_size)
+    copy_size = new_size;
+
+  memcpy(ret, ptr, copy_size);
+  root->Free(ptr);
+  return ret;
+#endif
+}
+
+void* PartitionRootGeneric::Realloc(void* ptr,
+                                    size_t new_size,
+                                    const char* type_name) {
+  return PartitionReallocGenericFlags(this, 0, ptr, new_size, type_name);
+}
+
+static size_t PartitionPurgePage(internal::PartitionPage* page, bool discard) {
+  const internal::PartitionBucket* bucket = page->bucket;
+  size_t slot_size = bucket->slot_size;
+  if (slot_size < kSystemPageSize || !page->num_allocated_slots)
+    return 0;
+
+  size_t bucket_num_slots = bucket->get_slots_per_span();
+  size_t discardable_bytes = 0;
+
+  size_t raw_size = page->get_raw_size();
+  if (raw_size) {
+    uint32_t usedBytes = static_cast<uint32_t>(RoundUpToSystemPage(raw_size));
+    discardable_bytes = bucket->slot_size - usedBytes;
+    if (discardable_bytes && discard) {
+      char* ptr =
+          reinterpret_cast<char*>(internal::PartitionPage::ToPointer(page));
+      ptr += usedBytes;
+      DiscardSystemPages(ptr, discardable_bytes);
+    }
+    return discardable_bytes;
+  }
+
+  constexpr size_t kMaxSlotCount =
+      (kPartitionPageSize * kMaxPartitionPagesPerSlotSpan) / kSystemPageSize;
+  DCHECK(bucket_num_slots <= kMaxSlotCount);
+  DCHECK(page->num_unprovisioned_slots < bucket_num_slots);
+  size_t num_slots = bucket_num_slots - page->num_unprovisioned_slots;
+  char slot_usage[kMaxSlotCount];
+#if !defined(OS_WIN)
+  // The last freelist entry should not be discarded when using OS_WIN.
+  // DiscardVirtualMemory makes the contents of discarded memory undefined.
+  size_t last_slot = static_cast<size_t>(-1);
+#endif
+  memset(slot_usage, 1, num_slots);
+  char* ptr = reinterpret_cast<char*>(internal::PartitionPage::ToPointer(page));
+  // First, walk the freelist for this page and make a bitmap of which slots
+  // are not in use.
+  for (internal::PartitionFreelistEntry* entry = page->freelist_head; entry;
+       /**/) {
+    size_t slotIndex = (reinterpret_cast<char*>(entry) - ptr) / slot_size;
+    DCHECK(slotIndex < num_slots);
+    slot_usage[slotIndex] = 0;
+    entry = internal::PartitionFreelistEntry::Transform(entry->next);
+#if !defined(OS_WIN)
+    // If we have a slot where the masked freelist entry is 0, we can
+    // actually discard that freelist entry because touching a discarded
+    // page is guaranteed to return original content or 0.
+    // (Note that this optimization won't fire on big endian machines
+    // because the masking function is negation.)
+    if (!internal::PartitionFreelistEntry::Transform(entry))
+      last_slot = slotIndex;
+#endif
+  }
+
+  // If the slot(s) at the end of the slot span are not in used, we can
+  // truncate them entirely and rewrite the freelist.
+  size_t truncated_slots = 0;
+  while (!slot_usage[num_slots - 1]) {
+    truncated_slots++;
+    num_slots--;
+    DCHECK(num_slots);
+  }
+  // First, do the work of calculating the discardable bytes. Don't actually
+  // discard anything unless the discard flag was passed in.
+  if (truncated_slots) {
+    size_t unprovisioned_bytes = 0;
+    char* begin_ptr = ptr + (num_slots * slot_size);
+    char* end_ptr = begin_ptr + (slot_size * truncated_slots);
+    begin_ptr = reinterpret_cast<char*>(
+        RoundUpToSystemPage(reinterpret_cast<size_t>(begin_ptr)));
+    // We round the end pointer here up and not down because we're at the
+    // end of a slot span, so we "own" all the way up the page boundary.
+    end_ptr = reinterpret_cast<char*>(
+        RoundUpToSystemPage(reinterpret_cast<size_t>(end_ptr)));
+    DCHECK(end_ptr <= ptr + bucket->get_bytes_per_span());
+    if (begin_ptr < end_ptr) {
+      unprovisioned_bytes = end_ptr - begin_ptr;
+      discardable_bytes += unprovisioned_bytes;
+    }
+    if (unprovisioned_bytes && discard) {
+      DCHECK(truncated_slots > 0);
+      size_t num_new_entries = 0;
+      page->num_unprovisioned_slots += static_cast<uint16_t>(truncated_slots);
+      // Rewrite the freelist.
+      internal::PartitionFreelistEntry** entry_ptr = &page->freelist_head;
+      for (size_t slotIndex = 0; slotIndex < num_slots; ++slotIndex) {
+        if (slot_usage[slotIndex])
+          continue;
+        auto* entry = reinterpret_cast<internal::PartitionFreelistEntry*>(
+            ptr + (slot_size * slotIndex));
+        *entry_ptr = internal::PartitionFreelistEntry::Transform(entry);
+        entry_ptr = reinterpret_cast<internal::PartitionFreelistEntry**>(entry);
+        num_new_entries++;
+#if !defined(OS_WIN)
+        last_slot = slotIndex;
+#endif
+      }
+      // Terminate the freelist chain.
+      *entry_ptr = nullptr;
+      // The freelist head is stored unmasked.
+      page->freelist_head =
+          internal::PartitionFreelistEntry::Transform(page->freelist_head);
+      DCHECK(num_new_entries == num_slots - page->num_allocated_slots);
+      // Discard the memory.
+      DiscardSystemPages(begin_ptr, unprovisioned_bytes);
+    }
+  }
+
+  // Next, walk the slots and for any not in use, consider where the system
+  // page boundaries occur. We can release any system pages back to the
+  // system as long as we don't interfere with a freelist pointer or an
+  // adjacent slot.
+  for (size_t i = 0; i < num_slots; ++i) {
+    if (slot_usage[i])
+      continue;
+    // The first address we can safely discard is just after the freelist
+    // pointer. There's one quirk: if the freelist pointer is actually a
+    // null, we can discard that pointer value too.
+    char* begin_ptr = ptr + (i * slot_size);
+    char* end_ptr = begin_ptr + slot_size;
+#if !defined(OS_WIN)
+    if (i != last_slot)
+      begin_ptr += sizeof(internal::PartitionFreelistEntry);
+#else
+    begin_ptr += sizeof(internal::PartitionFreelistEntry);
+#endif
+    begin_ptr = reinterpret_cast<char*>(
+        RoundUpToSystemPage(reinterpret_cast<size_t>(begin_ptr)));
+    end_ptr = reinterpret_cast<char*>(
+        RoundDownToSystemPage(reinterpret_cast<size_t>(end_ptr)));
+    if (begin_ptr < end_ptr) {
+      size_t partial_slot_bytes = end_ptr - begin_ptr;
+      discardable_bytes += partial_slot_bytes;
+      if (discard)
+        DiscardSystemPages(begin_ptr, partial_slot_bytes);
+    }
+  }
+  return discardable_bytes;
+}
+
+static void PartitionPurgeBucket(internal::PartitionBucket* bucket) {
+  if (bucket->active_pages_head !=
+      internal::PartitionPage::get_sentinel_page()) {
+    for (internal::PartitionPage* page = bucket->active_pages_head; page;
+         page = page->next_page) {
+      DCHECK(page != internal::PartitionPage::get_sentinel_page());
+      PartitionPurgePage(page, true);
+    }
+  }
+}
+
+void PartitionRoot::PurgeMemory(int flags) {
+  if (flags & PartitionPurgeDecommitEmptyPages)
+    DecommitEmptyPages();
+  // We don't currently do anything for PartitionPurgeDiscardUnusedSystemPages
+  // here because that flag is only useful for allocations >= system page
+  // size. We only have allocations that large inside generic partitions
+  // at the moment.
+}
+
+void PartitionRootGeneric::PurgeMemory(int flags) {
+  subtle::SpinLock::Guard guard(this->lock);
+  if (flags & PartitionPurgeDecommitEmptyPages)
+    DecommitEmptyPages();
+  if (flags & PartitionPurgeDiscardUnusedSystemPages) {
+    for (size_t i = 0; i < kGenericNumBuckets; ++i) {
+      internal::PartitionBucket* bucket = &this->buckets[i];
+      if (bucket->slot_size >= kSystemPageSize)
+        PartitionPurgeBucket(bucket);
+    }
+  }
+}
+
+static void PartitionDumpPageStats(PartitionBucketMemoryStats* stats_out,
+                                   internal::PartitionPage* page) {
+  uint16_t bucket_num_slots = page->bucket->get_slots_per_span();
+
+  if (page->is_decommitted()) {
+    ++stats_out->num_decommitted_pages;
+    return;
+  }
+
+  stats_out->discardable_bytes += PartitionPurgePage(page, false);
+
+  size_t raw_size = page->get_raw_size();
+  if (raw_size) {
+    stats_out->active_bytes += static_cast<uint32_t>(raw_size);
+  } else {
+    stats_out->active_bytes +=
+        (page->num_allocated_slots * stats_out->bucket_slot_size);
+  }
+
+  size_t page_bytes_resident =
+      RoundUpToSystemPage((bucket_num_slots - page->num_unprovisioned_slots) *
+                          stats_out->bucket_slot_size);
+  stats_out->resident_bytes += page_bytes_resident;
+  if (page->is_empty()) {
+    stats_out->decommittable_bytes += page_bytes_resident;
+    ++stats_out->num_empty_pages;
+  } else if (page->is_full()) {
+    ++stats_out->num_full_pages;
+  } else {
+    DCHECK(page->is_active());
+    ++stats_out->num_active_pages;
+  }
+}
+
+static void PartitionDumpBucketStats(PartitionBucketMemoryStats* stats_out,
+                                     const internal::PartitionBucket* bucket) {
+  DCHECK(!bucket->is_direct_mapped());
+  stats_out->is_valid = false;
+  // If the active page list is empty (==
+  // internal::PartitionPage::get_sentinel_page()),
+  // the bucket might still need to be reported if it has a list of empty,
+  // decommitted or full pages.
+  if (bucket->active_pages_head ==
+          internal::PartitionPage::get_sentinel_page() &&
+      !bucket->empty_pages_head && !bucket->decommitted_pages_head &&
+      !bucket->num_full_pages)
+    return;
+
+  memset(stats_out, '\0', sizeof(*stats_out));
+  stats_out->is_valid = true;
+  stats_out->is_direct_map = false;
+  stats_out->num_full_pages = static_cast<size_t>(bucket->num_full_pages);
+  stats_out->bucket_slot_size = bucket->slot_size;
+  uint16_t bucket_num_slots = bucket->get_slots_per_span();
+  size_t bucket_useful_storage = stats_out->bucket_slot_size * bucket_num_slots;
+  stats_out->allocated_page_size = bucket->get_bytes_per_span();
+  stats_out->active_bytes = bucket->num_full_pages * bucket_useful_storage;
+  stats_out->resident_bytes =
+      bucket->num_full_pages * stats_out->allocated_page_size;
+
+  for (internal::PartitionPage* page = bucket->empty_pages_head; page;
+       page = page->next_page) {
+    DCHECK(page->is_empty() || page->is_decommitted());
+    PartitionDumpPageStats(stats_out, page);
+  }
+  for (internal::PartitionPage* page = bucket->decommitted_pages_head; page;
+       page = page->next_page) {
+    DCHECK(page->is_decommitted());
+    PartitionDumpPageStats(stats_out, page);
+  }
+
+  if (bucket->active_pages_head !=
+      internal::PartitionPage::get_sentinel_page()) {
+    for (internal::PartitionPage* page = bucket->active_pages_head; page;
+         page = page->next_page) {
+      DCHECK(page != internal::PartitionPage::get_sentinel_page());
+      PartitionDumpPageStats(stats_out, page);
+    }
+  }
+}
+
+void PartitionRootGeneric::DumpStats(const char* partition_name,
+                                     bool is_light_dump,
+                                     PartitionStatsDumper* dumper) {
+  PartitionMemoryStats stats = {0};
+  stats.total_mmapped_bytes =
+      this->total_size_of_super_pages + this->total_size_of_direct_mapped_pages;
+  stats.total_committed_bytes = this->total_size_of_committed_pages;
+
+  size_t direct_mapped_allocations_total_size = 0;
+
+  static const size_t kMaxReportableDirectMaps = 4096;
+
+  // Allocate on the heap rather than on the stack to avoid stack overflow
+  // skirmishes (on Windows, in particular).
+  std::unique_ptr<uint32_t[]> direct_map_lengths = nullptr;
+  if (!is_light_dump) {
+    direct_map_lengths =
+        std::unique_ptr<uint32_t[]>(new uint32_t[kMaxReportableDirectMaps]);
+  }
+
+  PartitionBucketMemoryStats bucket_stats[kGenericNumBuckets];
+  size_t num_direct_mapped_allocations = 0;
+  {
+    subtle::SpinLock::Guard guard(this->lock);
+
+    for (size_t i = 0; i < kGenericNumBuckets; ++i) {
+      const internal::PartitionBucket* bucket = &this->buckets[i];
+      // Don't report the pseudo buckets that the generic allocator sets up in
+      // order to preserve a fast size->bucket map (see
+      // PartitionRootGeneric::Init() for details).
+      if (!bucket->active_pages_head)
+        bucket_stats[i].is_valid = false;
+      else
+        PartitionDumpBucketStats(&bucket_stats[i], bucket);
+      if (bucket_stats[i].is_valid) {
+        stats.total_resident_bytes += bucket_stats[i].resident_bytes;
+        stats.total_active_bytes += bucket_stats[i].active_bytes;
+        stats.total_decommittable_bytes += bucket_stats[i].decommittable_bytes;
+        stats.total_discardable_bytes += bucket_stats[i].discardable_bytes;
+      }
+    }
+
+    for (internal::PartitionDirectMapExtent *extent = this->direct_map_list;
+         extent && num_direct_mapped_allocations < kMaxReportableDirectMaps;
+         extent = extent->next_extent, ++num_direct_mapped_allocations) {
+      DCHECK(!extent->next_extent ||
+             extent->next_extent->prev_extent == extent);
+      size_t slot_size = extent->bucket->slot_size;
+      direct_mapped_allocations_total_size += slot_size;
+      if (is_light_dump)
+        continue;
+      direct_map_lengths[num_direct_mapped_allocations] = slot_size;
+    }
+  }
+
+  if (!is_light_dump) {
+    // Call |PartitionsDumpBucketStats| after collecting stats because it can
+    // try to allocate using |PartitionRootGeneric::Alloc()| and it can't
+    // obtain the lock.
+    for (size_t i = 0; i < kGenericNumBuckets; ++i) {
+      if (bucket_stats[i].is_valid)
+        dumper->PartitionsDumpBucketStats(partition_name, &bucket_stats[i]);
+    }
+
+    for (size_t i = 0; i < num_direct_mapped_allocations; ++i) {
+      uint32_t size = direct_map_lengths[i];
+
+      PartitionBucketMemoryStats mapped_stats = {};
+      mapped_stats.is_valid = true;
+      mapped_stats.is_direct_map = true;
+      mapped_stats.num_full_pages = 1;
+      mapped_stats.allocated_page_size = size;
+      mapped_stats.bucket_slot_size = size;
+      mapped_stats.active_bytes = size;
+      mapped_stats.resident_bytes = size;
+      dumper->PartitionsDumpBucketStats(partition_name, &mapped_stats);
+    }
+  }
+
+  stats.total_resident_bytes += direct_mapped_allocations_total_size;
+  stats.total_active_bytes += direct_mapped_allocations_total_size;
+  dumper->PartitionDumpTotals(partition_name, &stats);
+}
+
+void PartitionRoot::DumpStats(const char* partition_name,
+                              bool is_light_dump,
+                              PartitionStatsDumper* dumper) {
+  PartitionMemoryStats stats = {0};
+  stats.total_mmapped_bytes = this->total_size_of_super_pages;
+  stats.total_committed_bytes = this->total_size_of_committed_pages;
+  DCHECK(!this->total_size_of_direct_mapped_pages);
+
+  static const size_t kMaxReportableBuckets = 4096 / sizeof(void*);
+  std::unique_ptr<PartitionBucketMemoryStats[]> memory_stats;
+  if (!is_light_dump)
+    memory_stats = std::unique_ptr<PartitionBucketMemoryStats[]>(
+        new PartitionBucketMemoryStats[kMaxReportableBuckets]);
+
+  const size_t partitionNumBuckets = this->num_buckets;
+  DCHECK(partitionNumBuckets <= kMaxReportableBuckets);
+
+  for (size_t i = 0; i < partitionNumBuckets; ++i) {
+    PartitionBucketMemoryStats bucket_stats = {0};
+    PartitionDumpBucketStats(&bucket_stats, &this->buckets()[i]);
+    if (bucket_stats.is_valid) {
+      stats.total_resident_bytes += bucket_stats.resident_bytes;
+      stats.total_active_bytes += bucket_stats.active_bytes;
+      stats.total_decommittable_bytes += bucket_stats.decommittable_bytes;
+      stats.total_discardable_bytes += bucket_stats.discardable_bytes;
+    }
+    if (!is_light_dump) {
+      if (bucket_stats.is_valid)
+        memory_stats[i] = bucket_stats;
+      else
+        memory_stats[i].is_valid = false;
+    }
+  }
+  if (!is_light_dump) {
+    // PartitionsDumpBucketStats is called after collecting stats because it
+    // can use PartitionRoot::Alloc() to allocate and this can affect the
+    // statistics.
+    for (size_t i = 0; i < partitionNumBuckets; ++i) {
+      if (memory_stats[i].is_valid)
+        dumper->PartitionsDumpBucketStats(partition_name, &memory_stats[i]);
+    }
+  }
+  dumper->PartitionDumpTotals(partition_name, &stats);
+}
+
+}  // namespace base
diff --git a/base/allocator/partition_allocator/partition_alloc.h b/base/allocator/partition_allocator/partition_alloc.h
new file mode 100644
index 0000000..79d0905
--- /dev/null
+++ b/base/allocator/partition_allocator/partition_alloc.h
@@ -0,0 +1,438 @@
+// Copyright (c) 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_H_
+#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_H_
+
+// DESCRIPTION
+// PartitionRoot::Alloc() / PartitionRootGeneric::Alloc() and PartitionFree() /
+// PartitionRootGeneric::Free() are approximately analagous to malloc() and
+// free().
+//
+// The main difference is that a PartitionRoot / PartitionRootGeneric object
+// must be supplied to these functions, representing a specific "heap partition"
+// that will be used to satisfy the allocation. Different partitions are
+// guaranteed to exist in separate address spaces, including being separate from
+// the main system heap. If the contained objects are all freed, physical memory
+// is returned to the system but the address space remains reserved.
+// See PartitionAlloc.md for other security properties PartitionAlloc provides.
+//
+// THE ONLY LEGITIMATE WAY TO OBTAIN A PartitionRoot IS THROUGH THE
+// SizeSpecificPartitionAllocator / PartitionAllocatorGeneric classes. To
+// minimize the instruction count to the fullest extent possible, the
+// PartitionRoot is really just a header adjacent to other data areas provided
+// by the allocator class.
+//
+// The PartitionRoot::Alloc() variant of the API has the following caveats:
+// - Allocations and frees against a single partition must be single threaded.
+// - Allocations must not exceed a max size, chosen at compile-time via a
+// templated parameter to PartitionAllocator.
+// - Allocation sizes must be aligned to the system pointer size.
+// - Allocations are bucketed exactly according to size.
+//
+// And for PartitionRootGeneric::Alloc():
+// - Multi-threaded use against a single partition is ok; locking is handled.
+// - Allocations of any arbitrary size can be handled (subject to a limit of
+// INT_MAX bytes for security reasons).
+// - Bucketing is by approximate size, for example an allocation of 4000 bytes
+// might be placed into a 4096-byte bucket. Bucket sizes are chosen to try and
+// keep worst-case waste to ~10%.
+//
+// The allocators are designed to be extremely fast, thanks to the following
+// properties and design:
+// - Just two single (reasonably predicatable) branches in the hot / fast path
+//   for both allocating and (significantly) freeing.
+// - A minimal number of operations in the hot / fast path, with the slow paths
+//   in separate functions, leading to the possibility of inlining.
+// - Each partition page (which is usually multiple physical pages) has a
+//   metadata structure which allows fast mapping of free() address to an
+//   underlying bucket.
+// - Supports a lock-free API for fast performance in single-threaded cases.
+// - The freelist for a given bucket is split across a number of partition
+//   pages, enabling various simple tricks to try and minimize fragmentation.
+// - Fine-grained bucket sizes leading to less waste and better packing.
+//
+// The following security properties could be investigated in the future:
+// - Per-object bucketing (instead of per-size) is mostly available at the API,
+// but not used yet.
+// - No randomness of freelist entries or bucket position.
+// - Better checking for wild pointers in free().
+// - Better freelist masking function to guarantee fault on 32-bit.
+
+#include <limits.h>
+#include <string.h>
+
+#include "base/allocator/partition_allocator/page_allocator.h"
+#include "base/allocator/partition_allocator/partition_alloc_constants.h"
+#include "base/allocator/partition_allocator/partition_bucket.h"
+#include "base/allocator/partition_allocator/partition_cookie.h"
+#include "base/allocator/partition_allocator/partition_page.h"
+#include "base/allocator/partition_allocator/partition_root_base.h"
+#include "base/allocator/partition_allocator/spin_lock.h"
+#include "base/base_export.h"
+#include "base/bits.h"
+#include "base/compiler_specific.h"
+#include "base/logging.h"
+#include "base/macros.h"
+#include "base/sys_byteorder.h"
+#include "build/build_config.h"
+
+#if defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
+#include <stdlib.h>
+#endif
+
+namespace base {
+
+class PartitionStatsDumper;
+
+enum PartitionPurgeFlags {
+  // Decommitting the ring list of empty pages is reasonably fast.
+  PartitionPurgeDecommitEmptyPages = 1 << 0,
+  // Discarding unused system pages is slower, because it involves walking all
+  // freelists in all active partition pages of all buckets >= system page
+  // size. It often frees a similar amount of memory to decommitting the empty
+  // pages, though.
+  PartitionPurgeDiscardUnusedSystemPages = 1 << 1,
+};
+
+// Never instantiate a PartitionRoot directly, instead use PartitionAlloc.
+struct BASE_EXPORT PartitionRoot : public internal::PartitionRootBase {
+  PartitionRoot();
+  ~PartitionRoot() override;
+  // This references the buckets OFF the edge of this struct. All uses of
+  // PartitionRoot must have the bucket array come right after.
+  //
+  // The PartitionAlloc templated class ensures the following is correct.
+  ALWAYS_INLINE internal::PartitionBucket* buckets() {
+    return reinterpret_cast<internal::PartitionBucket*>(this + 1);
+  }
+  ALWAYS_INLINE const internal::PartitionBucket* buckets() const {
+    return reinterpret_cast<const internal::PartitionBucket*>(this + 1);
+  }
+
+  void Init(size_t num_buckets, size_t max_allocation);
+
+  ALWAYS_INLINE void* Alloc(size_t size, const char* type_name);
+
+  void PurgeMemory(int flags);
+
+  void DumpStats(const char* partition_name,
+                 bool is_light_dump,
+                 PartitionStatsDumper* dumper);
+};
+
+// Never instantiate a PartitionRootGeneric directly, instead use
+// PartitionAllocatorGeneric.
+struct BASE_EXPORT PartitionRootGeneric : public internal::PartitionRootBase {
+  PartitionRootGeneric();
+  ~PartitionRootGeneric() override;
+  subtle::SpinLock lock;
+  // Some pre-computed constants.
+  size_t order_index_shifts[kBitsPerSizeT + 1] = {};
+  size_t order_sub_index_masks[kBitsPerSizeT + 1] = {};
+  // The bucket lookup table lets us map a size_t to a bucket quickly.
+  // The trailing +1 caters for the overflow case for very large allocation
+  // sizes.  It is one flat array instead of a 2D array because in the 2D
+  // world, we'd need to index array[blah][max+1] which risks undefined
+  // behavior.
+  internal::PartitionBucket*
+      bucket_lookups[((kBitsPerSizeT + 1) * kGenericNumBucketsPerOrder) + 1] =
+          {};
+  internal::PartitionBucket buckets[kGenericNumBuckets] = {};
+
+  // Public API.
+  void Init();
+
+  ALWAYS_INLINE void* Alloc(size_t size, const char* type_name);
+  ALWAYS_INLINE void Free(void* ptr);
+
+  NOINLINE void* Realloc(void* ptr, size_t new_size, const char* type_name);
+
+  ALWAYS_INLINE size_t ActualSize(size_t size);
+
+  void PurgeMemory(int flags);
+
+  void DumpStats(const char* partition_name,
+                 bool is_light_dump,
+                 PartitionStatsDumper* partition_stats_dumper);
+};
+
+// Struct used to retrieve total memory usage of a partition. Used by
+// PartitionStatsDumper implementation.
+struct PartitionMemoryStats {
+  size_t total_mmapped_bytes;    // Total bytes mmaped from the system.
+  size_t total_committed_bytes;  // Total size of commmitted pages.
+  size_t total_resident_bytes;   // Total bytes provisioned by the partition.
+  size_t total_active_bytes;     // Total active bytes in the partition.
+  size_t total_decommittable_bytes;  // Total bytes that could be decommitted.
+  size_t total_discardable_bytes;    // Total bytes that could be discarded.
+};
+
+// Struct used to retrieve memory statistics about a partition bucket. Used by
+// PartitionStatsDumper implementation.
+struct PartitionBucketMemoryStats {
+  bool is_valid;       // Used to check if the stats is valid.
+  bool is_direct_map;  // True if this is a direct mapping; size will not be
+                       // unique.
+  uint32_t bucket_slot_size;     // The size of the slot in bytes.
+  uint32_t allocated_page_size;  // Total size the partition page allocated from
+                                 // the system.
+  uint32_t active_bytes;         // Total active bytes used in the bucket.
+  uint32_t resident_bytes;       // Total bytes provisioned in the bucket.
+  uint32_t decommittable_bytes;  // Total bytes that could be decommitted.
+  uint32_t discardable_bytes;    // Total bytes that could be discarded.
+  uint32_t num_full_pages;       // Number of pages with all slots allocated.
+  uint32_t num_active_pages;     // Number of pages that have at least one
+                                 // provisioned slot.
+  uint32_t num_empty_pages;      // Number of pages that are empty
+                                 // but not decommitted.
+  uint32_t num_decommitted_pages;  // Number of pages that are empty
+                                   // and decommitted.
+};
+
+// Interface that is passed to PartitionDumpStats and
+// PartitionDumpStatsGeneric for using the memory statistics.
+class BASE_EXPORT PartitionStatsDumper {
+ public:
+  // Called to dump total memory used by partition, once per partition.
+  virtual void PartitionDumpTotals(const char* partition_name,
+                                   const PartitionMemoryStats*) = 0;
+
+  // Called to dump stats about buckets, for each bucket.
+  virtual void PartitionsDumpBucketStats(const char* partition_name,
+                                         const PartitionBucketMemoryStats*) = 0;
+};
+
+BASE_EXPORT void PartitionAllocGlobalInit(void (*oom_handling_function)());
+
+class BASE_EXPORT PartitionAllocHooks {
+ public:
+  typedef void AllocationHook(void* address, size_t, const char* type_name);
+  typedef void FreeHook(void* address);
+
+  // To unhook, call Set*Hook with nullptr.
+  static void SetAllocationHook(AllocationHook* hook) {
+    // Chained allocation hooks are not supported. Registering a non-null
+    // hook when a non-null hook is already registered indicates somebody is
+    // trying to overwrite a hook.
+    CHECK(!hook || !allocation_hook_) << "Overwriting allocation hook";
+    allocation_hook_ = hook;
+  }
+  static void SetFreeHook(FreeHook* hook) {
+    CHECK(!hook || !free_hook_) << "Overwriting free hook";
+    free_hook_ = hook;
+  }
+
+  static void AllocationHookIfEnabled(void* address,
+                                      size_t size,
+                                      const char* type_name) {
+    AllocationHook* hook = allocation_hook_;
+    if (UNLIKELY(hook != nullptr))
+      hook(address, size, type_name);
+  }
+
+  static void FreeHookIfEnabled(void* address) {
+    FreeHook* hook = free_hook_;
+    if (UNLIKELY(hook != nullptr))
+      hook(address);
+  }
+
+  static void ReallocHookIfEnabled(void* old_address,
+                                   void* new_address,
+                                   size_t size,
+                                   const char* type_name) {
+    // Report a reallocation as a free followed by an allocation.
+    AllocationHook* allocation_hook = allocation_hook_;
+    FreeHook* free_hook = free_hook_;
+    if (UNLIKELY(allocation_hook && free_hook)) {
+      free_hook(old_address);
+      allocation_hook(new_address, size, type_name);
+    }
+  }
+
+ private:
+  // Pointers to hook functions that PartitionAlloc will call on allocation and
+  // free if the pointers are non-null.
+  static AllocationHook* allocation_hook_;
+  static FreeHook* free_hook_;
+};
+
+ALWAYS_INLINE void* PartitionRoot::Alloc(size_t size, const char* type_name) {
+#if defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
+  void* result = malloc(size);
+  CHECK(result);
+  return result;
+#else
+  size_t requested_size = size;
+  size = internal::PartitionCookieSizeAdjustAdd(size);
+  DCHECK(this->initialized);
+  size_t index = size >> kBucketShift;
+  DCHECK(index < this->num_buckets);
+  DCHECK(size == index << kBucketShift);
+  internal::PartitionBucket* bucket = &this->buckets()[index];
+  void* result = AllocFromBucket(bucket, 0, size);
+  PartitionAllocHooks::AllocationHookIfEnabled(result, requested_size,
+                                               type_name);
+  return result;
+#endif  // defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
+}
+
+ALWAYS_INLINE void PartitionFree(void* ptr) {
+#if defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
+  free(ptr);
+#else
+  // TODO(palmer): Check ptr alignment before continuing. Shall we do the check
+  // inside PartitionCookieFreePointerAdjust?
+  PartitionAllocHooks::FreeHookIfEnabled(ptr);
+  ptr = internal::PartitionCookieFreePointerAdjust(ptr);
+  internal::PartitionPage* page = internal::PartitionPage::FromPointer(ptr);
+  // TODO(palmer): See if we can afford to make this a CHECK.
+  DCHECK(internal::PartitionRootBase::IsValidPage(page));
+  page->Free(ptr);
+#endif
+}
+
+ALWAYS_INLINE internal::PartitionBucket* PartitionGenericSizeToBucket(
+    PartitionRootGeneric* root,
+    size_t size) {
+  size_t order = kBitsPerSizeT - bits::CountLeadingZeroBitsSizeT(size);
+  // The order index is simply the next few bits after the most significant bit.
+  size_t order_index = (size >> root->order_index_shifts[order]) &
+                       (kGenericNumBucketsPerOrder - 1);
+  // And if the remaining bits are non-zero we must bump the bucket up.
+  size_t sub_order_index = size & root->order_sub_index_masks[order];
+  internal::PartitionBucket* bucket =
+      root->bucket_lookups[(order << kGenericNumBucketsPerOrderBits) +
+                           order_index + !!sub_order_index];
+  DCHECK(!bucket->slot_size || bucket->slot_size >= size);
+  DCHECK(!(bucket->slot_size % kGenericSmallestBucket));
+  return bucket;
+}
+
+ALWAYS_INLINE void* PartitionAllocGenericFlags(PartitionRootGeneric* root,
+                                               int flags,
+                                               size_t size,
+                                               const char* type_name) {
+#if defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
+  void* result = malloc(size);
+  CHECK(result || flags & PartitionAllocReturnNull);
+  return result;
+#else
+  DCHECK(root->initialized);
+  size_t requested_size = size;
+  size = internal::PartitionCookieSizeAdjustAdd(size);
+  internal::PartitionBucket* bucket = PartitionGenericSizeToBucket(root, size);
+  void* ret = nullptr;
+  {
+    subtle::SpinLock::Guard guard(root->lock);
+    ret = root->AllocFromBucket(bucket, flags, size);
+  }
+  PartitionAllocHooks::AllocationHookIfEnabled(ret, requested_size, type_name);
+  return ret;
+#endif
+}
+
+ALWAYS_INLINE void* PartitionRootGeneric::Alloc(size_t size,
+                                                const char* type_name) {
+  return PartitionAllocGenericFlags(this, 0, size, type_name);
+}
+
+ALWAYS_INLINE void PartitionRootGeneric::Free(void* ptr) {
+#if defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
+  free(ptr);
+#else
+  DCHECK(this->initialized);
+
+  if (UNLIKELY(!ptr))
+    return;
+
+  PartitionAllocHooks::FreeHookIfEnabled(ptr);
+  ptr = internal::PartitionCookieFreePointerAdjust(ptr);
+  internal::PartitionPage* page = internal::PartitionPage::FromPointer(ptr);
+  // TODO(palmer): See if we can afford to make this a CHECK.
+  DCHECK(IsValidPage(page));
+  {
+    subtle::SpinLock::Guard guard(this->lock);
+    page->Free(ptr);
+  }
+#endif
+}
+
+BASE_EXPORT void* PartitionReallocGenericFlags(PartitionRootGeneric* root,
+                                               int flags,
+                                               void* ptr,
+                                               size_t new_size,
+                                               const char* type_name);
+
+ALWAYS_INLINE size_t PartitionRootGeneric::ActualSize(size_t size) {
+#if defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
+  return size;
+#else
+  DCHECK(this->initialized);
+  size = internal::PartitionCookieSizeAdjustAdd(size);
+  internal::PartitionBucket* bucket = PartitionGenericSizeToBucket(this, size);
+  if (LIKELY(!bucket->is_direct_mapped())) {
+    size = bucket->slot_size;
+  } else if (size > kGenericMaxDirectMapped) {
+    // Too large to allocate => return the size unchanged.
+  } else {
+    size = internal::PartitionBucket::get_direct_map_size(size);
+  }
+  return internal::PartitionCookieSizeAdjustSubtract(size);
+#endif
+}
+
+ALWAYS_INLINE bool PartitionAllocSupportsGetSize() {
+#if defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
+  return false;
+#else
+  return true;
+#endif
+}
+
+ALWAYS_INLINE size_t PartitionAllocGetSize(void* ptr) {
+  // No need to lock here. Only |ptr| being freed by another thread could
+  // cause trouble, and the caller is responsible for that not happening.
+  DCHECK(PartitionAllocSupportsGetSize());
+  ptr = internal::PartitionCookieFreePointerAdjust(ptr);
+  internal::PartitionPage* page = internal::PartitionPage::FromPointer(ptr);
+  // TODO(palmer): See if we can afford to make this a CHECK.
+  DCHECK(internal::PartitionRootBase::IsValidPage(page));
+  size_t size = page->bucket->slot_size;
+  return internal::PartitionCookieSizeAdjustSubtract(size);
+}
+
+template <size_t N>
+class SizeSpecificPartitionAllocator {
+ public:
+  SizeSpecificPartitionAllocator() {
+    memset(actual_buckets_, 0,
+           sizeof(internal::PartitionBucket) * arraysize(actual_buckets_));
+  }
+  ~SizeSpecificPartitionAllocator() = default;
+  static const size_t kMaxAllocation = N - kAllocationGranularity;
+  static const size_t kNumBuckets = N / kAllocationGranularity;
+  void init() { partition_root_.Init(kNumBuckets, kMaxAllocation); }
+  ALWAYS_INLINE PartitionRoot* root() { return &partition_root_; }
+
+ private:
+  PartitionRoot partition_root_;
+  internal::PartitionBucket actual_buckets_[kNumBuckets];
+};
+
+class BASE_EXPORT PartitionAllocatorGeneric {
+ public:
+  PartitionAllocatorGeneric();
+  ~PartitionAllocatorGeneric();
+
+  void init() { partition_root_.Init(); }
+  ALWAYS_INLINE PartitionRootGeneric* root() { return &partition_root_; }
+
+ private:
+  PartitionRootGeneric partition_root_;
+};
+
+}  // namespace base
+
+#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_H_
diff --git a/base/allocator/partition_allocator/partition_alloc_constants.h b/base/allocator/partition_allocator/partition_alloc_constants.h
new file mode 100644
index 0000000..deaa19e
--- /dev/null
+++ b/base/allocator/partition_allocator/partition_alloc_constants.h
@@ -0,0 +1,161 @@
+// Copyright (c) 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_CONSTANTS_H_
+#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_CONSTANTS_H_
+
+#include <limits.h>
+
+#include "base/allocator/partition_allocator/page_allocator_constants.h"
+#include "base/bits.h"
+#include "base/logging.h"
+
+namespace base {
+
+// Allocation granularity of sizeof(void*) bytes.
+static const size_t kAllocationGranularity = sizeof(void*);
+static const size_t kAllocationGranularityMask = kAllocationGranularity - 1;
+static const size_t kBucketShift = (kAllocationGranularity == 8) ? 3 : 2;
+
+// Underlying partition storage pages are a power-of-two size. It is typical
+// for a partition page to be based on multiple system pages. Most references to
+// "page" refer to partition pages.
+// We also have the concept of "super pages" -- these are the underlying system
+// allocations we make. Super pages contain multiple partition pages inside them
+// and include space for a small amount of metadata per partition page.
+// Inside super pages, we store "slot spans". A slot span is a continguous range
+// of one or more partition pages that stores allocations of the same size.
+// Slot span sizes are adjusted depending on the allocation size, to make sure
+// the packing does not lead to unused (wasted) space at the end of the last
+// system page of the span. For our current max slot span size of 64k and other
+// constant values, we pack _all_ PartitionRootGeneric::Alloc() sizes perfectly
+// up against the end of a system page.
+#if defined(_MIPS_ARCH_LOONGSON)
+static const size_t kPartitionPageShift = 16;  // 64KB
+#else
+static const size_t kPartitionPageShift = 14;  // 16KB
+#endif
+static const size_t kPartitionPageSize = 1 << kPartitionPageShift;
+static const size_t kPartitionPageOffsetMask = kPartitionPageSize - 1;
+static const size_t kPartitionPageBaseMask = ~kPartitionPageOffsetMask;
+static const size_t kMaxPartitionPagesPerSlotSpan = 4;
+
+// To avoid fragmentation via never-used freelist entries, we hand out partition
+// freelist sections gradually, in units of the dominant system page size.
+// What we're actually doing is avoiding filling the full partition page (16 KB)
+// with freelist pointers right away. Writing freelist pointers will fault and
+// dirty a private page, which is very wasteful if we never actually store
+// objects there.
+static const size_t kNumSystemPagesPerPartitionPage =
+    kPartitionPageSize / kSystemPageSize;
+static const size_t kMaxSystemPagesPerSlotSpan =
+    kNumSystemPagesPerPartitionPage * kMaxPartitionPagesPerSlotSpan;
+
+// We reserve virtual address space in 2MB chunks (aligned to 2MB as well).
+// These chunks are called "super pages". We do this so that we can store
+// metadata in the first few pages of each 2MB aligned section. This leads to
+// a very fast free(). We specifically choose 2MB because this virtual address
+// block represents a full but single PTE allocation on ARM, ia32 and x64.
+//
+// The layout of the super page is as follows. The sizes below are the same
+// for 32 bit and 64 bit.
+//
+//   | Guard page (4KB)    |
+//   | Metadata page (4KB) |
+//   | Guard pages (8KB)   |
+//   | Slot span           |
+//   | Slot span           |
+//   | ...                 |
+//   | Slot span           |
+//   | Guard page (4KB)    |
+//
+//   - Each slot span is a contiguous range of one or more PartitionPages.
+//   - The metadata page has the following format. Note that the PartitionPage
+//     that is not at the head of a slot span is "unused". In other words,
+//     the metadata for the slot span is stored only in the first PartitionPage
+//     of the slot span. Metadata accesses to other PartitionPages are
+//     redirected to the first PartitionPage.
+//
+//     | SuperPageExtentEntry (32B)                 |
+//     | PartitionPage of slot span 1 (32B, used)   |
+//     | PartitionPage of slot span 1 (32B, unused) |
+//     | PartitionPage of slot span 1 (32B, unused) |
+//     | PartitionPage of slot span 2 (32B, used)   |
+//     | PartitionPage of slot span 3 (32B, used)   |
+//     | ...                                        |
+//     | PartitionPage of slot span N (32B, unused) |
+//
+// A direct mapped page has a similar layout to fake it looking like a super
+// page:
+//
+//     | Guard page (4KB)     |
+//     | Metadata page (4KB)  |
+//     | Guard pages (8KB)    |
+//     | Direct mapped object |
+//     | Guard page (4KB)     |
+//
+//    - The metadata page has the following layout:
+//
+//     | SuperPageExtentEntry (32B)    |
+//     | PartitionPage (32B)           |
+//     | PartitionBucket (32B)         |
+//     | PartitionDirectMapExtent (8B) |
+static const size_t kSuperPageShift = 21;  // 2MB
+static const size_t kSuperPageSize = 1 << kSuperPageShift;
+static const size_t kSuperPageOffsetMask = kSuperPageSize - 1;
+static const size_t kSuperPageBaseMask = ~kSuperPageOffsetMask;
+static const size_t kNumPartitionPagesPerSuperPage =
+    kSuperPageSize / kPartitionPageSize;
+
+// The following kGeneric* constants apply to the generic variants of the API.
+// The "order" of an allocation is closely related to the power-of-two size of
+// the allocation. More precisely, the order is the bit index of the
+// most-significant-bit in the allocation size, where the bit numbers starts
+// at index 1 for the least-significant-bit.
+// In terms of allocation sizes, order 0 covers 0, order 1 covers 1, order 2
+// covers 2->3, order 3 covers 4->7, order 4 covers 8->15.
+static const size_t kGenericMinBucketedOrder = 4;  // 8 bytes.
+static const size_t kGenericMaxBucketedOrder =
+    20;  // Largest bucketed order is 1<<(20-1) (storing 512KB -> almost 1MB)
+static const size_t kGenericNumBucketedOrders =
+    (kGenericMaxBucketedOrder - kGenericMinBucketedOrder) + 1;
+// Eight buckets per order (for the higher orders), e.g. order 8 is 128, 144,
+// 160, ..., 240:
+static const size_t kGenericNumBucketsPerOrderBits = 3;
+static const size_t kGenericNumBucketsPerOrder =
+    1 << kGenericNumBucketsPerOrderBits;
+static const size_t kGenericNumBuckets =
+    kGenericNumBucketedOrders * kGenericNumBucketsPerOrder;
+static const size_t kGenericSmallestBucket = 1
+                                             << (kGenericMinBucketedOrder - 1);
+static const size_t kGenericMaxBucketSpacing =
+    1 << ((kGenericMaxBucketedOrder - 1) - kGenericNumBucketsPerOrderBits);
+static const size_t kGenericMaxBucketed =
+    (1 << (kGenericMaxBucketedOrder - 1)) +
+    ((kGenericNumBucketsPerOrder - 1) * kGenericMaxBucketSpacing);
+static const size_t kGenericMinDirectMappedDownsize =
+    kGenericMaxBucketed +
+    1;  // Limit when downsizing a direct mapping using realloc().
+static const size_t kGenericMaxDirectMapped =
+    (1UL << 31) + kPageAllocationGranularity;  // 2 GB plus one more page.
+static const size_t kBitsPerSizeT = sizeof(void*) * CHAR_BIT;
+
+// Constant for the memory reclaim logic.
+static const size_t kMaxFreeableSpans = 16;
+
+// If the total size in bytes of allocated but not committed pages exceeds this
+// value (probably it is a "out of virtual address space" crash),
+// a special crash stack trace is generated at |PartitionOutOfMemory|.
+// This is to distinguish "out of virtual address space" from
+// "out of physical memory" in crash reports.
+static const size_t kReasonableSizeOfUnusedPages = 1024 * 1024 * 1024;  // 1GB
+
+// Flags for PartitionAllocGenericFlags.
+enum PartitionAllocFlags {
+  PartitionAllocReturnNull = 1 << 0,
+};
+
+}  // namespace base
+
+#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_CONSTANTS_H_
diff --git a/base/allocator/partition_allocator/partition_alloc_unittest.cc b/base/allocator/partition_allocator/partition_alloc_unittest.cc
new file mode 100644
index 0000000..4bf6b26
--- /dev/null
+++ b/base/allocator/partition_allocator/partition_alloc_unittest.cc
@@ -0,0 +1,2129 @@
+// Copyright (c) 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/allocator/partition_allocator/partition_alloc.h"
+
+#include <stdlib.h>
+#include <string.h>
+
+#include <memory>
+#include <vector>
+
+#include "base/allocator/partition_allocator/address_space_randomization.h"
+#include "base/bit_cast.h"
+#include "base/bits.h"
+#include "base/sys_info.h"
+#include "build/build_config.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+#if defined(OS_POSIX)
+#include <sys/mman.h>
+#if !defined(OS_FUCHSIA)
+#include <sys/resource.h>
+#endif
+#include <sys/time.h>
+#endif  // defined(OS_POSIX)
+
+#if !defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
+
+// Because there is so much deep inspection of the internal objects,
+// explicitly annotating the namespaces for commonly expected objects makes the
+// code unreadable. Prefer using directives instead.
+using base::internal::PartitionBucket;
+using base::internal::PartitionPage;
+
+namespace {
+
+constexpr size_t kTestMaxAllocation = base::kSystemPageSize;
+
+bool IsLargeMemoryDevice() {
+  // Treat any device with 2GiB or more of physical memory as a "large memory
+  // device". We check for slightly less than 2GiB so that devices with a small
+  // amount of memory not accessible to the OS still count as "large".
+  return base::SysInfo::AmountOfPhysicalMemory() >= 2040LL * 1024 * 1024;
+}
+
+bool SetAddressSpaceLimit() {
+#if !defined(ARCH_CPU_64_BITS) || !defined(OS_POSIX)
+  // 32 bits => address space is limited already.
+  return true;
+#elif defined(OS_POSIX) && !defined(OS_MACOSX) && !defined(OS_FUCHSIA)
+  // macOS will accept, but not enforce, |RLIMIT_AS| changes. See
+  // https://crbug.com/435269 and rdar://17576114.
+  //
+  // Note: This number must be not less than 6 GB, because with
+  // sanitizer_coverage_flags=edge, it reserves > 5 GB of address space. See
+  // https://crbug.com/674665.
+  const size_t kAddressSpaceLimit = static_cast<size_t>(6144) * 1024 * 1024;
+  struct rlimit limit;
+  if (getrlimit(RLIMIT_AS, &limit) != 0)
+    return false;
+  if (limit.rlim_cur == RLIM_INFINITY || limit.rlim_cur > kAddressSpaceLimit) {
+    limit.rlim_cur = kAddressSpaceLimit;
+    if (setrlimit(RLIMIT_AS, &limit) != 0)
+      return false;
+  }
+  return true;
+#else
+  return false;
+#endif
+}
+
+bool ClearAddressSpaceLimit() {
+#if !defined(ARCH_CPU_64_BITS) || !defined(OS_POSIX) || defined(OS_FUCHSIA)
+  return true;
+#elif defined(OS_POSIX)
+  struct rlimit limit;
+  if (getrlimit(RLIMIT_AS, &limit) != 0)
+    return false;
+  limit.rlim_cur = limit.rlim_max;
+  if (setrlimit(RLIMIT_AS, &limit) != 0)
+    return false;
+  return true;
+#else
+  return false;
+#endif
+}
+
+}  // namespace
+
+namespace base {
+
+// NOTE: Though this test actually excercises interfaces inside the ::base
+// namespace, the unittest is inside the ::base::internal spaces because a
+// portion of the test expectations require inspecting objects and behavior
+// in the ::base::internal namespace. An alternate formulation would be to
+// explicitly add using statements for each inspected type but this felt more
+// readable.
+namespace internal {
+
+const size_t kTestAllocSize = 16;
+#if !DCHECK_IS_ON()
+const size_t kPointerOffset = 0;
+const size_t kExtraAllocSize = 0;
+#else
+const size_t kPointerOffset = kCookieSize;
+const size_t kExtraAllocSize = kCookieSize * 2;
+#endif
+const size_t kRealAllocSize = kTestAllocSize + kExtraAllocSize;
+const size_t kTestBucketIndex = kRealAllocSize >> kBucketShift;
+
+const char* type_name = nullptr;
+
+class PartitionAllocTest : public testing::Test {
+ protected:
+  PartitionAllocTest() = default;
+
+  ~PartitionAllocTest() override = default;
+
+  void SetUp() override {
+    allocator.init();
+    generic_allocator.init();
+  }
+
+  PartitionPage* GetFullPage(size_t size) {
+    size_t real_size = size + kExtraAllocSize;
+    size_t bucket_index = real_size >> kBucketShift;
+    PartitionBucket* bucket = &allocator.root()->buckets()[bucket_index];
+    size_t num_slots =
+        (bucket->num_system_pages_per_slot_span * kSystemPageSize) / real_size;
+    void* first = nullptr;
+    void* last = nullptr;
+    size_t i;
+    for (i = 0; i < num_slots; ++i) {
+      void* ptr = allocator.root()->Alloc(size, type_name);
+      EXPECT_TRUE(ptr);
+      if (!i)
+        first = PartitionCookieFreePointerAdjust(ptr);
+      else if (i == num_slots - 1)
+        last = PartitionCookieFreePointerAdjust(ptr);
+    }
+    EXPECT_EQ(PartitionPage::FromPointer(first),
+              PartitionPage::FromPointer(last));
+    if (bucket->num_system_pages_per_slot_span ==
+        kNumSystemPagesPerPartitionPage)
+      EXPECT_EQ(reinterpret_cast<size_t>(first) & kPartitionPageBaseMask,
+                reinterpret_cast<size_t>(last) & kPartitionPageBaseMask);
+    EXPECT_EQ(num_slots, static_cast<size_t>(
+                             bucket->active_pages_head->num_allocated_slots));
+    EXPECT_EQ(nullptr, bucket->active_pages_head->freelist_head);
+    EXPECT_TRUE(bucket->active_pages_head);
+    EXPECT_TRUE(bucket->active_pages_head !=
+                PartitionPage::get_sentinel_page());
+    return bucket->active_pages_head;
+  }
+
+  void CycleFreeCache(size_t size) {
+    size_t real_size = size + kExtraAllocSize;
+    size_t bucket_index = real_size >> kBucketShift;
+    PartitionBucket* bucket = &allocator.root()->buckets()[bucket_index];
+    DCHECK(!bucket->active_pages_head->num_allocated_slots);
+
+    for (size_t i = 0; i < kMaxFreeableSpans; ++i) {
+      void* ptr = allocator.root()->Alloc(size, type_name);
+      EXPECT_EQ(1, bucket->active_pages_head->num_allocated_slots);
+      PartitionFree(ptr);
+      EXPECT_EQ(0, bucket->active_pages_head->num_allocated_slots);
+      EXPECT_NE(-1, bucket->active_pages_head->empty_cache_index);
+    }
+  }
+
+  void CycleGenericFreeCache(size_t size) {
+    for (size_t i = 0; i < kMaxFreeableSpans; ++i) {
+      void* ptr = generic_allocator.root()->Alloc(size, type_name);
+      PartitionPage* page =
+          PartitionPage::FromPointer(PartitionCookieFreePointerAdjust(ptr));
+      PartitionBucket* bucket = page->bucket;
+      EXPECT_EQ(1, bucket->active_pages_head->num_allocated_slots);
+      generic_allocator.root()->Free(ptr);
+      EXPECT_EQ(0, bucket->active_pages_head->num_allocated_slots);
+      EXPECT_NE(-1, bucket->active_pages_head->empty_cache_index);
+    }
+  }
+
+  void DoReturnNullTest(size_t allocSize, bool use_realloc) {
+    // TODO(crbug.com/678782): Where necessary and possible, disable the
+    // platform's OOM-killing behavior. OOM-killing makes this test flaky on
+    // low-memory devices.
+    if (!IsLargeMemoryDevice()) {
+      LOG(WARNING)
+          << "Skipping test on this device because of crbug.com/678782";
+      return;
+    }
+
+    ASSERT_TRUE(SetAddressSpaceLimit());
+
+    // Work out the number of allocations for 6 GB of memory.
+    const int numAllocations = (6 * 1024 * 1024) / (allocSize / 1024);
+
+    void** ptrs = reinterpret_cast<void**>(generic_allocator.root()->Alloc(
+        numAllocations * sizeof(void*), type_name));
+    int i;
+
+    for (i = 0; i < numAllocations; ++i) {
+      if (use_realloc) {
+        ptrs[i] = PartitionAllocGenericFlags(
+            generic_allocator.root(), PartitionAllocReturnNull, 1, type_name);
+        ptrs[i] = PartitionReallocGenericFlags(generic_allocator.root(),
+                                               PartitionAllocReturnNull,
+                                               ptrs[i], allocSize, type_name);
+      } else {
+        ptrs[i] = PartitionAllocGenericFlags(generic_allocator.root(),
+                                             PartitionAllocReturnNull,
+                                             allocSize, type_name);
+      }
+      if (!i)
+        EXPECT_TRUE(ptrs[0]);
+      if (!ptrs[i]) {
+        ptrs[i] = PartitionAllocGenericFlags(generic_allocator.root(),
+                                             PartitionAllocReturnNull,
+                                             allocSize, type_name);
+        EXPECT_FALSE(ptrs[i]);
+        break;
+      }
+    }
+
+    // We shouldn't succeed in allocating all 6 GB of memory. If we do, then
+    // we're not actually testing anything here.
+    EXPECT_LT(i, numAllocations);
+
+    // Free, reallocate and free again each block we allocated. We do this to
+    // check that freeing memory also works correctly after a failed allocation.
+    for (--i; i >= 0; --i) {
+      generic_allocator.root()->Free(ptrs[i]);
+      ptrs[i] = PartitionAllocGenericFlags(generic_allocator.root(),
+                                           PartitionAllocReturnNull, allocSize,
+                                           type_name);
+      EXPECT_TRUE(ptrs[i]);
+      generic_allocator.root()->Free(ptrs[i]);
+    }
+
+    generic_allocator.root()->Free(ptrs);
+
+    EXPECT_TRUE(ClearAddressSpaceLimit());
+  }
+
+  SizeSpecificPartitionAllocator<kTestMaxAllocation> allocator;
+  PartitionAllocatorGeneric generic_allocator;
+};
+
+class PartitionAllocDeathTest : public PartitionAllocTest {};
+
+namespace {
+
+void FreeFullPage(PartitionPage* page) {
+  size_t size = page->bucket->slot_size;
+  size_t num_slots =
+      (page->bucket->num_system_pages_per_slot_span * kSystemPageSize) / size;
+  EXPECT_EQ(num_slots, static_cast<size_t>(abs(page->num_allocated_slots)));
+  char* ptr = reinterpret_cast<char*>(PartitionPage::ToPointer(page));
+  size_t i;
+  for (i = 0; i < num_slots; ++i) {
+    PartitionFree(ptr + kPointerOffset);
+    ptr += size;
+  }
+}
+
+#if defined(OS_LINUX)
+bool CheckPageInCore(void* ptr, bool in_core) {
+  unsigned char ret = 0;
+  EXPECT_EQ(0, mincore(ptr, kSystemPageSize, &ret));
+  return in_core == (ret & 1);
+}
+
+#define CHECK_PAGE_IN_CORE(ptr, in_core) \
+  EXPECT_TRUE(CheckPageInCore(ptr, in_core))
+#else
+#define CHECK_PAGE_IN_CORE(ptr, in_core) (void)(0)
+#endif  // defined(OS_LINUX)
+
+class MockPartitionStatsDumper : public PartitionStatsDumper {
+ public:
+  MockPartitionStatsDumper()
+      : total_resident_bytes(0),
+        total_active_bytes(0),
+        total_decommittable_bytes(0),
+        total_discardable_bytes(0) {}
+
+  void PartitionDumpTotals(const char* partition_name,
+                           const PartitionMemoryStats* stats) override {
+    EXPECT_GE(stats->total_mmapped_bytes, stats->total_resident_bytes);
+    EXPECT_EQ(total_resident_bytes, stats->total_resident_bytes);
+    EXPECT_EQ(total_active_bytes, stats->total_active_bytes);
+    EXPECT_EQ(total_decommittable_bytes, stats->total_decommittable_bytes);
+    EXPECT_EQ(total_discardable_bytes, stats->total_discardable_bytes);
+  }
+
+  void PartitionsDumpBucketStats(
+      const char* partition_name,
+      const PartitionBucketMemoryStats* stats) override {
+    (void)partition_name;
+    EXPECT_TRUE(stats->is_valid);
+    EXPECT_EQ(0u, stats->bucket_slot_size & kAllocationGranularityMask);
+    bucket_stats.push_back(*stats);
+    total_resident_bytes += stats->resident_bytes;
+    total_active_bytes += stats->active_bytes;
+    total_decommittable_bytes += stats->decommittable_bytes;
+    total_discardable_bytes += stats->discardable_bytes;
+  }
+
+  bool IsMemoryAllocationRecorded() {
+    return total_resident_bytes != 0 && total_active_bytes != 0;
+  }
+
+  const PartitionBucketMemoryStats* GetBucketStats(size_t bucket_size) {
+    for (size_t i = 0; i < bucket_stats.size(); ++i) {
+      if (bucket_stats[i].bucket_slot_size == bucket_size)
+        return &bucket_stats[i];
+    }
+    return nullptr;
+  }
+
+ private:
+  size_t total_resident_bytes;
+  size_t total_active_bytes;
+  size_t total_decommittable_bytes;
+  size_t total_discardable_bytes;
+
+  std::vector<PartitionBucketMemoryStats> bucket_stats;
+};
+
+}  // namespace
+
+// Check that the most basic of allocate / free pairs work.
+TEST_F(PartitionAllocTest, Basic) {
+  PartitionBucket* bucket = &allocator.root()->buckets()[kTestBucketIndex];
+  PartitionPage* seedPage = PartitionPage::get_sentinel_page();
+
+  EXPECT_FALSE(bucket->empty_pages_head);
+  EXPECT_FALSE(bucket->decommitted_pages_head);
+  EXPECT_EQ(seedPage, bucket->active_pages_head);
+  EXPECT_EQ(nullptr, bucket->active_pages_head->next_page);
+
+  void* ptr = allocator.root()->Alloc(kTestAllocSize, type_name);
+  EXPECT_TRUE(ptr);
+  EXPECT_EQ(kPointerOffset,
+            reinterpret_cast<size_t>(ptr) & kPartitionPageOffsetMask);
+  // Check that the offset appears to include a guard page.
+  EXPECT_EQ(kPartitionPageSize + kPointerOffset,
+            reinterpret_cast<size_t>(ptr) & kSuperPageOffsetMask);
+
+  PartitionFree(ptr);
+  // Expect that the last active page gets noticed as empty but doesn't get
+  // decommitted.
+  EXPECT_TRUE(bucket->empty_pages_head);
+  EXPECT_FALSE(bucket->decommitted_pages_head);
+}
+
+// Test multiple allocations, and freelist handling.
+TEST_F(PartitionAllocTest, MultiAlloc) {
+  char* ptr1 = reinterpret_cast<char*>(
+      allocator.root()->Alloc(kTestAllocSize, type_name));
+  char* ptr2 = reinterpret_cast<char*>(
+      allocator.root()->Alloc(kTestAllocSize, type_name));
+  EXPECT_TRUE(ptr1);
+  EXPECT_TRUE(ptr2);
+  ptrdiff_t diff = ptr2 - ptr1;
+  EXPECT_EQ(static_cast<ptrdiff_t>(kRealAllocSize), diff);
+
+  // Check that we re-use the just-freed slot.
+  PartitionFree(ptr2);
+  ptr2 = reinterpret_cast<char*>(
+      allocator.root()->Alloc(kTestAllocSize, type_name));
+  EXPECT_TRUE(ptr2);
+  diff = ptr2 - ptr1;
+  EXPECT_EQ(static_cast<ptrdiff_t>(kRealAllocSize), diff);
+  PartitionFree(ptr1);
+  ptr1 = reinterpret_cast<char*>(
+      allocator.root()->Alloc(kTestAllocSize, type_name));
+  EXPECT_TRUE(ptr1);
+  diff = ptr2 - ptr1;
+  EXPECT_EQ(static_cast<ptrdiff_t>(kRealAllocSize), diff);
+
+  char* ptr3 = reinterpret_cast<char*>(
+      allocator.root()->Alloc(kTestAllocSize, type_name));
+  EXPECT_TRUE(ptr3);
+  diff = ptr3 - ptr1;
+  EXPECT_EQ(static_cast<ptrdiff_t>(kRealAllocSize * 2), diff);
+
+  PartitionFree(ptr1);
+  PartitionFree(ptr2);
+  PartitionFree(ptr3);
+}
+
+// Test a bucket with multiple pages.
+TEST_F(PartitionAllocTest, MultiPages) {
+  PartitionBucket* bucket = &allocator.root()->buckets()[kTestBucketIndex];
+
+  PartitionPage* page = GetFullPage(kTestAllocSize);
+  FreeFullPage(page);
+  EXPECT_TRUE(bucket->empty_pages_head);
+  EXPECT_EQ(PartitionPage::get_sentinel_page(), bucket->active_pages_head);
+  EXPECT_EQ(nullptr, page->next_page);
+  EXPECT_EQ(0, page->num_allocated_slots);
+
+  page = GetFullPage(kTestAllocSize);
+  PartitionPage* page2 = GetFullPage(kTestAllocSize);
+
+  EXPECT_EQ(page2, bucket->active_pages_head);
+  EXPECT_EQ(nullptr, page2->next_page);
+  EXPECT_EQ(reinterpret_cast<uintptr_t>(PartitionPage::ToPointer(page)) &
+                kSuperPageBaseMask,
+            reinterpret_cast<uintptr_t>(PartitionPage::ToPointer(page2)) &
+                kSuperPageBaseMask);
+
+  // Fully free the non-current page. This will leave us with no current
+  // active page because one is empty and the other is full.
+  FreeFullPage(page);
+  EXPECT_EQ(0, page->num_allocated_slots);
+  EXPECT_TRUE(bucket->empty_pages_head);
+  EXPECT_EQ(PartitionPage::get_sentinel_page(), bucket->active_pages_head);
+
+  // Allocate a new page, it should pull from the freelist.
+  page = GetFullPage(kTestAllocSize);
+  EXPECT_FALSE(bucket->empty_pages_head);
+  EXPECT_EQ(page, bucket->active_pages_head);
+
+  FreeFullPage(page);
+  FreeFullPage(page2);
+  EXPECT_EQ(0, page->num_allocated_slots);
+  EXPECT_EQ(0, page2->num_allocated_slots);
+  EXPECT_EQ(0, page2->num_unprovisioned_slots);
+  EXPECT_NE(-1, page2->empty_cache_index);
+}
+
+// Test some finer aspects of internal page transitions.
+TEST_F(PartitionAllocTest, PageTransitions) {
+  PartitionBucket* bucket = &allocator.root()->buckets()[kTestBucketIndex];
+
+  PartitionPage* page1 = GetFullPage(kTestAllocSize);
+  EXPECT_EQ(page1, bucket->active_pages_head);
+  EXPECT_EQ(nullptr, page1->next_page);
+  PartitionPage* page2 = GetFullPage(kTestAllocSize);
+  EXPECT_EQ(page2, bucket->active_pages_head);
+  EXPECT_EQ(nullptr, page2->next_page);
+
+  // Bounce page1 back into the non-full list then fill it up again.
+  char* ptr =
+      reinterpret_cast<char*>(PartitionPage::ToPointer(page1)) + kPointerOffset;
+  PartitionFree(ptr);
+  EXPECT_EQ(page1, bucket->active_pages_head);
+  (void)allocator.root()->Alloc(kTestAllocSize, type_name);
+  EXPECT_EQ(page1, bucket->active_pages_head);
+  EXPECT_EQ(page2, bucket->active_pages_head->next_page);
+
+  // Allocating another page at this point should cause us to scan over page1
+  // (which is both full and NOT our current page), and evict it from the
+  // freelist. Older code had a O(n^2) condition due to failure to do this.
+  PartitionPage* page3 = GetFullPage(kTestAllocSize);
+  EXPECT_EQ(page3, bucket->active_pages_head);
+  EXPECT_EQ(nullptr, page3->next_page);
+
+  // Work out a pointer into page2 and free it.
+  ptr =
+      reinterpret_cast<char*>(PartitionPage::ToPointer(page2)) + kPointerOffset;
+  PartitionFree(ptr);
+  // Trying to allocate at this time should cause us to cycle around to page2
+  // and find the recently freed slot.
+  char* newPtr = reinterpret_cast<char*>(
+      allocator.root()->Alloc(kTestAllocSize, type_name));
+  EXPECT_EQ(ptr, newPtr);
+  EXPECT_EQ(page2, bucket->active_pages_head);
+  EXPECT_EQ(page3, page2->next_page);
+
+  // Work out a pointer into page1 and free it. This should pull the page
+  // back into the list of available pages.
+  ptr =
+      reinterpret_cast<char*>(PartitionPage::ToPointer(page1)) + kPointerOffset;
+  PartitionFree(ptr);
+  // This allocation should be satisfied by page1.
+  newPtr = reinterpret_cast<char*>(
+      allocator.root()->Alloc(kTestAllocSize, type_name));
+  EXPECT_EQ(ptr, newPtr);
+  EXPECT_EQ(page1, bucket->active_pages_head);
+  EXPECT_EQ(page2, page1->next_page);
+
+  FreeFullPage(page3);
+  FreeFullPage(page2);
+  FreeFullPage(page1);
+
+  // Allocating whilst in this state exposed a bug, so keep the test.
+  ptr = reinterpret_cast<char*>(
+      allocator.root()->Alloc(kTestAllocSize, type_name));
+  PartitionFree(ptr);
+}
+
+// Test some corner cases relating to page transitions in the internal
+// free page list metadata bucket.
+TEST_F(PartitionAllocTest, FreePageListPageTransitions) {
+  PartitionBucket* bucket = &allocator.root()->buckets()[kTestBucketIndex];
+
+  size_t numToFillFreeListPage =
+      kPartitionPageSize / (sizeof(PartitionPage) + kExtraAllocSize);
+  // The +1 is because we need to account for the fact that the current page
+  // never gets thrown on the freelist.
+  ++numToFillFreeListPage;
+  auto pages = std::make_unique<PartitionPage* []>(numToFillFreeListPage);
+
+  size_t i;
+  for (i = 0; i < numToFillFreeListPage; ++i) {
+    pages[i] = GetFullPage(kTestAllocSize);
+  }
+  EXPECT_EQ(pages[numToFillFreeListPage - 1], bucket->active_pages_head);
+  for (i = 0; i < numToFillFreeListPage; ++i)
+    FreeFullPage(pages[i]);
+  EXPECT_EQ(PartitionPage::get_sentinel_page(), bucket->active_pages_head);
+  EXPECT_TRUE(bucket->empty_pages_head);
+
+  // Allocate / free in a different bucket size so we get control of a
+  // different free page list. We need two pages because one will be the last
+  // active page and not get freed.
+  PartitionPage* page1 = GetFullPage(kTestAllocSize * 2);
+  PartitionPage* page2 = GetFullPage(kTestAllocSize * 2);
+  FreeFullPage(page1);
+  FreeFullPage(page2);
+
+  for (i = 0; i < numToFillFreeListPage; ++i) {
+    pages[i] = GetFullPage(kTestAllocSize);
+  }
+  EXPECT_EQ(pages[numToFillFreeListPage - 1], bucket->active_pages_head);
+
+  for (i = 0; i < numToFillFreeListPage; ++i)
+    FreeFullPage(pages[i]);
+  EXPECT_EQ(PartitionPage::get_sentinel_page(), bucket->active_pages_head);
+  EXPECT_TRUE(bucket->empty_pages_head);
+}
+
+// Test a large series of allocations that cross more than one underlying
+// 64KB super page allocation.
+TEST_F(PartitionAllocTest, MultiPageAllocs) {
+  // This is guaranteed to cross a super page boundary because the first
+  // partition page "slot" will be taken up by a guard page.
+  size_t numPagesNeeded = kNumPartitionPagesPerSuperPage;
+  // The super page should begin and end in a guard so we one less page in
+  // order to allocate a single page in the new super page.
+  --numPagesNeeded;
+
+  EXPECT_GT(numPagesNeeded, 1u);
+  auto pages = std::make_unique<PartitionPage* []>(numPagesNeeded);
+  uintptr_t firstSuperPageBase = 0;
+  size_t i;
+  for (i = 0; i < numPagesNeeded; ++i) {
+    pages[i] = GetFullPage(kTestAllocSize);
+    void* storagePtr = PartitionPage::ToPointer(pages[i]);
+    if (!i)
+      firstSuperPageBase =
+          reinterpret_cast<uintptr_t>(storagePtr) & kSuperPageBaseMask;
+    if (i == numPagesNeeded - 1) {
+      uintptr_t secondSuperPageBase =
+          reinterpret_cast<uintptr_t>(storagePtr) & kSuperPageBaseMask;
+      uintptr_t secondSuperPageOffset =
+          reinterpret_cast<uintptr_t>(storagePtr) & kSuperPageOffsetMask;
+      EXPECT_FALSE(secondSuperPageBase == firstSuperPageBase);
+      // Check that we allocated a guard page for the second page.
+      EXPECT_EQ(kPartitionPageSize, secondSuperPageOffset);
+    }
+  }
+  for (i = 0; i < numPagesNeeded; ++i)
+    FreeFullPage(pages[i]);
+}
+
+// Test the generic allocation functions that can handle arbitrary sizes and
+// reallocing etc.
+TEST_F(PartitionAllocTest, GenericAlloc) {
+  void* ptr = generic_allocator.root()->Alloc(1, type_name);
+  EXPECT_TRUE(ptr);
+  generic_allocator.root()->Free(ptr);
+  ptr = generic_allocator.root()->Alloc(kGenericMaxBucketed + 1, type_name);
+  EXPECT_TRUE(ptr);
+  generic_allocator.root()->Free(ptr);
+
+  ptr = generic_allocator.root()->Alloc(1, type_name);
+  EXPECT_TRUE(ptr);
+  void* origPtr = ptr;
+  char* charPtr = static_cast<char*>(ptr);
+  *charPtr = 'A';
+
+  // Change the size of the realloc, remaining inside the same bucket.
+  void* newPtr = generic_allocator.root()->Realloc(ptr, 2, type_name);
+  EXPECT_EQ(ptr, newPtr);
+  newPtr = generic_allocator.root()->Realloc(ptr, 1, type_name);
+  EXPECT_EQ(ptr, newPtr);
+  newPtr =
+      generic_allocator.root()->Realloc(ptr, kGenericSmallestBucket, type_name);
+  EXPECT_EQ(ptr, newPtr);
+
+  // Change the size of the realloc, switching buckets.
+  newPtr = generic_allocator.root()->Realloc(ptr, kGenericSmallestBucket + 1,
+                                             type_name);
+  EXPECT_NE(newPtr, ptr);
+  // Check that the realloc copied correctly.
+  char* newCharPtr = static_cast<char*>(newPtr);
+  EXPECT_EQ(*newCharPtr, 'A');
+#if DCHECK_IS_ON()
+  // Subtle: this checks for an old bug where we copied too much from the
+  // source of the realloc. The condition can be detected by a trashing of
+  // the uninitialized value in the space of the upsized allocation.
+  EXPECT_EQ(kUninitializedByte,
+            static_cast<unsigned char>(*(newCharPtr + kGenericSmallestBucket)));
+#endif
+  *newCharPtr = 'B';
+  // The realloc moved. To check that the old allocation was freed, we can
+  // do an alloc of the old allocation size and check that the old allocation
+  // address is at the head of the freelist and reused.
+  void* reusedPtr = generic_allocator.root()->Alloc(1, type_name);
+  EXPECT_EQ(reusedPtr, origPtr);
+  generic_allocator.root()->Free(reusedPtr);
+
+  // Downsize the realloc.
+  ptr = newPtr;
+  newPtr = generic_allocator.root()->Realloc(ptr, 1, type_name);
+  EXPECT_EQ(newPtr, origPtr);
+  newCharPtr = static_cast<char*>(newPtr);
+  EXPECT_EQ(*newCharPtr, 'B');
+  *newCharPtr = 'C';
+
+  // Upsize the realloc to outside the partition.
+  ptr = newPtr;
+  newPtr = generic_allocator.root()->Realloc(ptr, kGenericMaxBucketed + 1,
+                                             type_name);
+  EXPECT_NE(newPtr, ptr);
+  newCharPtr = static_cast<char*>(newPtr);
+  EXPECT_EQ(*newCharPtr, 'C');
+  *newCharPtr = 'D';
+
+  // Upsize and downsize the realloc, remaining outside the partition.
+  ptr = newPtr;
+  newPtr = generic_allocator.root()->Realloc(ptr, kGenericMaxBucketed * 10,
+                                             type_name);
+  newCharPtr = static_cast<char*>(newPtr);
+  EXPECT_EQ(*newCharPtr, 'D');
+  *newCharPtr = 'E';
+  ptr = newPtr;
+  newPtr = generic_allocator.root()->Realloc(ptr, kGenericMaxBucketed * 2,
+                                             type_name);
+  newCharPtr = static_cast<char*>(newPtr);
+  EXPECT_EQ(*newCharPtr, 'E');
+  *newCharPtr = 'F';
+
+  // Downsize the realloc to inside the partition.
+  ptr = newPtr;
+  newPtr = generic_allocator.root()->Realloc(ptr, 1, type_name);
+  EXPECT_NE(newPtr, ptr);
+  EXPECT_EQ(newPtr, origPtr);
+  newCharPtr = static_cast<char*>(newPtr);
+  EXPECT_EQ(*newCharPtr, 'F');
+
+  generic_allocator.root()->Free(newPtr);
+}
+
+// Test the generic allocation functions can handle some specific sizes of
+// interest.
+TEST_F(PartitionAllocTest, GenericAllocSizes) {
+  void* ptr = generic_allocator.root()->Alloc(0, type_name);
+  EXPECT_TRUE(ptr);
+  generic_allocator.root()->Free(ptr);
+
+  // kPartitionPageSize is interesting because it results in just one
+  // allocation per page, which tripped up some corner cases.
+  size_t size = kPartitionPageSize - kExtraAllocSize;
+  ptr = generic_allocator.root()->Alloc(size, type_name);
+  EXPECT_TRUE(ptr);
+  void* ptr2 = generic_allocator.root()->Alloc(size, type_name);
+  EXPECT_TRUE(ptr2);
+  generic_allocator.root()->Free(ptr);
+  // Should be freeable at this point.
+  PartitionPage* page =
+      PartitionPage::FromPointer(PartitionCookieFreePointerAdjust(ptr));
+  EXPECT_NE(-1, page->empty_cache_index);
+  generic_allocator.root()->Free(ptr2);
+
+  size = (((kPartitionPageSize * kMaxPartitionPagesPerSlotSpan) -
+           kSystemPageSize) /
+          2) -
+         kExtraAllocSize;
+  ptr = generic_allocator.root()->Alloc(size, type_name);
+  EXPECT_TRUE(ptr);
+  memset(ptr, 'A', size);
+  ptr2 = generic_allocator.root()->Alloc(size, type_name);
+  EXPECT_TRUE(ptr2);
+  void* ptr3 = generic_allocator.root()->Alloc(size, type_name);
+  EXPECT_TRUE(ptr3);
+  void* ptr4 = generic_allocator.root()->Alloc(size, type_name);
+  EXPECT_TRUE(ptr4);
+
+  page = PartitionPage::FromPointer(PartitionCookieFreePointerAdjust(ptr));
+  PartitionPage* page2 =
+      PartitionPage::FromPointer(PartitionCookieFreePointerAdjust(ptr3));
+  EXPECT_NE(page, page2);
+
+  generic_allocator.root()->Free(ptr);
+  generic_allocator.root()->Free(ptr3);
+  generic_allocator.root()->Free(ptr2);
+  // Should be freeable at this point.
+  EXPECT_NE(-1, page->empty_cache_index);
+  EXPECT_EQ(0, page->num_allocated_slots);
+  EXPECT_EQ(0, page->num_unprovisioned_slots);
+  void* newPtr = generic_allocator.root()->Alloc(size, type_name);
+  EXPECT_EQ(ptr3, newPtr);
+  newPtr = generic_allocator.root()->Alloc(size, type_name);
+  EXPECT_EQ(ptr2, newPtr);
+#if defined(OS_LINUX) && !DCHECK_IS_ON()
+  // On Linux, we have a guarantee that freelisting a page should cause its
+  // contents to be nulled out. We check for null here to detect an bug we
+  // had where a large slot size was causing us to not properly free all
+  // resources back to the system.
+  // We only run the check when asserts are disabled because when they are
+  // enabled, the allocated area is overwritten with an "uninitialized"
+  // byte pattern.
+  EXPECT_EQ(0, *(reinterpret_cast<char*>(newPtr) + (size - 1)));
+#endif
+  generic_allocator.root()->Free(newPtr);
+  generic_allocator.root()->Free(ptr3);
+  generic_allocator.root()->Free(ptr4);
+
+  // Can we allocate a massive (512MB) size?
+  // Allocate 512MB, but +1, to test for cookie writing alignment issues.
+  // Test this only if the device has enough memory or it might fail due
+  // to OOM.
+  if (IsLargeMemoryDevice()) {
+    ptr = generic_allocator.root()->Alloc(512 * 1024 * 1024 + 1, type_name);
+    generic_allocator.root()->Free(ptr);
+  }
+
+  // Check a more reasonable, but still direct mapped, size.
+  // Chop a system page and a byte off to test for rounding errors.
+  size = 20 * 1024 * 1024;
+  size -= kSystemPageSize;
+  size -= 1;
+  ptr = generic_allocator.root()->Alloc(size, type_name);
+  char* charPtr = reinterpret_cast<char*>(ptr);
+  *(charPtr + (size - 1)) = 'A';
+  generic_allocator.root()->Free(ptr);
+
+  // Can we free null?
+  generic_allocator.root()->Free(nullptr);
+
+  // Do we correctly get a null for a failed allocation?
+  EXPECT_EQ(nullptr, PartitionAllocGenericFlags(
+                         generic_allocator.root(), PartitionAllocReturnNull,
+                         3u * 1024 * 1024 * 1024, type_name));
+}
+
+// Test that we can fetch the real allocated size after an allocation.
+TEST_F(PartitionAllocTest, GenericAllocGetSize) {
+  void* ptr;
+  size_t requested_size, actual_size, predicted_size;
+
+  EXPECT_TRUE(PartitionAllocSupportsGetSize());
+
+  // Allocate something small.
+  requested_size = 511 - kExtraAllocSize;
+  predicted_size = generic_allocator.root()->ActualSize(requested_size);
+  ptr = generic_allocator.root()->Alloc(requested_size, type_name);
+  EXPECT_TRUE(ptr);
+  actual_size = PartitionAllocGetSize(ptr);
+  EXPECT_EQ(predicted_size, actual_size);
+  EXPECT_LT(requested_size, actual_size);
+  generic_allocator.root()->Free(ptr);
+
+  // Allocate a size that should be a perfect match for a bucket, because it
+  // is an exact power of 2.
+  requested_size = (256 * 1024) - kExtraAllocSize;
+  predicted_size = generic_allocator.root()->ActualSize(requested_size);
+  ptr = generic_allocator.root()->Alloc(requested_size, type_name);
+  EXPECT_TRUE(ptr);
+  actual_size = PartitionAllocGetSize(ptr);
+  EXPECT_EQ(predicted_size, actual_size);
+  EXPECT_EQ(requested_size, actual_size);
+  generic_allocator.root()->Free(ptr);
+
+  // Allocate a size that is a system page smaller than a bucket. GetSize()
+  // should return a larger size than we asked for now.
+  size_t num = 64;
+  while (num * kSystemPageSize >= 1024 * 1024) {
+    num /= 2;
+  }
+  requested_size = num * kSystemPageSize - kSystemPageSize - kExtraAllocSize;
+  predicted_size = generic_allocator.root()->ActualSize(requested_size);
+  ptr = generic_allocator.root()->Alloc(requested_size, type_name);
+  EXPECT_TRUE(ptr);
+  actual_size = PartitionAllocGetSize(ptr);
+  EXPECT_EQ(predicted_size, actual_size);
+  EXPECT_EQ(requested_size + kSystemPageSize, actual_size);
+  // Check that we can write at the end of the reported size too.
+  char* charPtr = reinterpret_cast<char*>(ptr);
+  *(charPtr + (actual_size - 1)) = 'A';
+  generic_allocator.root()->Free(ptr);
+
+  // Allocate something very large, and uneven.
+  if (IsLargeMemoryDevice()) {
+    requested_size = 512 * 1024 * 1024 - 1;
+    predicted_size = generic_allocator.root()->ActualSize(requested_size);
+    ptr = generic_allocator.root()->Alloc(requested_size, type_name);
+    EXPECT_TRUE(ptr);
+    actual_size = PartitionAllocGetSize(ptr);
+    EXPECT_EQ(predicted_size, actual_size);
+    EXPECT_LT(requested_size, actual_size);
+    generic_allocator.root()->Free(ptr);
+  }
+
+  // Too large allocation.
+  requested_size = kGenericMaxDirectMapped + 1;
+  predicted_size = generic_allocator.root()->ActualSize(requested_size);
+  EXPECT_EQ(requested_size, predicted_size);
+}
+
+// Test the realloc() contract.
+TEST_F(PartitionAllocTest, Realloc) {
+  // realloc(0, size) should be equivalent to malloc().
+  void* ptr =
+      generic_allocator.root()->Realloc(nullptr, kTestAllocSize, type_name);
+  memset(ptr, 'A', kTestAllocSize);
+  PartitionPage* page =
+      PartitionPage::FromPointer(PartitionCookieFreePointerAdjust(ptr));
+  // realloc(ptr, 0) should be equivalent to free().
+  void* ptr2 = generic_allocator.root()->Realloc(ptr, 0, type_name);
+  EXPECT_EQ(nullptr, ptr2);
+  EXPECT_EQ(PartitionCookieFreePointerAdjust(ptr), page->freelist_head);
+
+  // Test that growing an allocation with realloc() copies everything from the
+  // old allocation.
+  size_t size = kSystemPageSize - kExtraAllocSize;
+  EXPECT_EQ(size, generic_allocator.root()->ActualSize(size));
+  ptr = generic_allocator.root()->Alloc(size, type_name);
+  memset(ptr, 'A', size);
+  ptr2 = generic_allocator.root()->Realloc(ptr, size + 1, type_name);
+  EXPECT_NE(ptr, ptr2);
+  char* charPtr2 = static_cast<char*>(ptr2);
+  EXPECT_EQ('A', charPtr2[0]);
+  EXPECT_EQ('A', charPtr2[size - 1]);
+#if DCHECK_IS_ON()
+  EXPECT_EQ(kUninitializedByte, static_cast<unsigned char>(charPtr2[size]));
+#endif
+
+  // Test that shrinking an allocation with realloc() also copies everything
+  // from the old allocation.
+  ptr = generic_allocator.root()->Realloc(ptr2, size - 1, type_name);
+  EXPECT_NE(ptr2, ptr);
+  char* charPtr = static_cast<char*>(ptr);
+  EXPECT_EQ('A', charPtr[0]);
+  EXPECT_EQ('A', charPtr[size - 2]);
+#if DCHECK_IS_ON()
+  EXPECT_EQ(kUninitializedByte, static_cast<unsigned char>(charPtr[size - 1]));
+#endif
+
+  generic_allocator.root()->Free(ptr);
+
+  // Test that shrinking a direct mapped allocation happens in-place.
+  size = kGenericMaxBucketed + 16 * kSystemPageSize;
+  ptr = generic_allocator.root()->Alloc(size, type_name);
+  size_t actual_size = PartitionAllocGetSize(ptr);
+  ptr2 = generic_allocator.root()->Realloc(
+      ptr, kGenericMaxBucketed + 8 * kSystemPageSize, type_name);
+  EXPECT_EQ(ptr, ptr2);
+  EXPECT_EQ(actual_size - 8 * kSystemPageSize, PartitionAllocGetSize(ptr2));
+
+  // Test that a previously in-place shrunk direct mapped allocation can be
+  // expanded up again within its original size.
+  ptr = generic_allocator.root()->Realloc(ptr2, size - kSystemPageSize,
+                                          type_name);
+  EXPECT_EQ(ptr2, ptr);
+  EXPECT_EQ(actual_size - kSystemPageSize, PartitionAllocGetSize(ptr));
+
+  // Test that a direct mapped allocation is performed not in-place when the
+  // new size is small enough.
+  ptr2 = generic_allocator.root()->Realloc(ptr, kSystemPageSize, type_name);
+  EXPECT_NE(ptr, ptr2);
+
+  generic_allocator.root()->Free(ptr2);
+}
+
+// Tests the handing out of freelists for partial pages.
+TEST_F(PartitionAllocTest, PartialPageFreelists) {
+  size_t big_size = allocator.root()->max_allocation - kExtraAllocSize;
+  EXPECT_EQ(kSystemPageSize - kAllocationGranularity,
+            big_size + kExtraAllocSize);
+  size_t bucket_index = (big_size + kExtraAllocSize) >> kBucketShift;
+  PartitionBucket* bucket = &allocator.root()->buckets()[bucket_index];
+  EXPECT_EQ(nullptr, bucket->empty_pages_head);
+
+  void* ptr = allocator.root()->Alloc(big_size, type_name);
+  EXPECT_TRUE(ptr);
+
+  PartitionPage* page =
+      PartitionPage::FromPointer(PartitionCookieFreePointerAdjust(ptr));
+  size_t totalSlots =
+      (page->bucket->num_system_pages_per_slot_span * kSystemPageSize) /
+      (big_size + kExtraAllocSize);
+  EXPECT_EQ(4u, totalSlots);
+  // The freelist should have one entry, because we were able to exactly fit
+  // one object slot and one freelist pointer (the null that the head points
+  // to) into a system page.
+  EXPECT_TRUE(page->freelist_head);
+  EXPECT_EQ(1, page->num_allocated_slots);
+  EXPECT_EQ(2, page->num_unprovisioned_slots);
+
+  void* ptr2 = allocator.root()->Alloc(big_size, type_name);
+  EXPECT_TRUE(ptr2);
+  EXPECT_FALSE(page->freelist_head);
+  EXPECT_EQ(2, page->num_allocated_slots);
+  EXPECT_EQ(2, page->num_unprovisioned_slots);
+
+  void* ptr3 = allocator.root()->Alloc(big_size, type_name);
+  EXPECT_TRUE(ptr3);
+  EXPECT_TRUE(page->freelist_head);
+  EXPECT_EQ(3, page->num_allocated_slots);
+  EXPECT_EQ(0, page->num_unprovisioned_slots);
+
+  void* ptr4 = allocator.root()->Alloc(big_size, type_name);
+  EXPECT_TRUE(ptr4);
+  EXPECT_FALSE(page->freelist_head);
+  EXPECT_EQ(4, page->num_allocated_slots);
+  EXPECT_EQ(0, page->num_unprovisioned_slots);
+
+  void* ptr5 = allocator.root()->Alloc(big_size, type_name);
+  EXPECT_TRUE(ptr5);
+
+  PartitionPage* page2 =
+      PartitionPage::FromPointer(PartitionCookieFreePointerAdjust(ptr5));
+  EXPECT_EQ(1, page2->num_allocated_slots);
+
+  // Churn things a little whilst there's a partial page freelist.
+  PartitionFree(ptr);
+  ptr = allocator.root()->Alloc(big_size, type_name);
+  void* ptr6 = allocator.root()->Alloc(big_size, type_name);
+
+  PartitionFree(ptr);
+  PartitionFree(ptr2);
+  PartitionFree(ptr3);
+  PartitionFree(ptr4);
+  PartitionFree(ptr5);
+  PartitionFree(ptr6);
+  EXPECT_NE(-1, page->empty_cache_index);
+  EXPECT_NE(-1, page2->empty_cache_index);
+  EXPECT_TRUE(page2->freelist_head);
+  EXPECT_EQ(0, page2->num_allocated_slots);
+
+  // And test a couple of sizes that do not cross kSystemPageSize with a single
+  // allocation.
+  size_t mediumSize = (kSystemPageSize / 2) - kExtraAllocSize;
+  bucket_index = (mediumSize + kExtraAllocSize) >> kBucketShift;
+  bucket = &allocator.root()->buckets()[bucket_index];
+  EXPECT_EQ(nullptr, bucket->empty_pages_head);
+
+  ptr = allocator.root()->Alloc(mediumSize, type_name);
+  EXPECT_TRUE(ptr);
+  page = PartitionPage::FromPointer(PartitionCookieFreePointerAdjust(ptr));
+  EXPECT_EQ(1, page->num_allocated_slots);
+  totalSlots =
+      (page->bucket->num_system_pages_per_slot_span * kSystemPageSize) /
+      (mediumSize + kExtraAllocSize);
+  size_t firstPageSlots = kSystemPageSize / (mediumSize + kExtraAllocSize);
+  EXPECT_EQ(2u, firstPageSlots);
+  EXPECT_EQ(totalSlots - firstPageSlots, page->num_unprovisioned_slots);
+
+  PartitionFree(ptr);
+
+  size_t smallSize = (kSystemPageSize / 4) - kExtraAllocSize;
+  bucket_index = (smallSize + kExtraAllocSize) >> kBucketShift;
+  bucket = &allocator.root()->buckets()[bucket_index];
+  EXPECT_EQ(nullptr, bucket->empty_pages_head);
+
+  ptr = allocator.root()->Alloc(smallSize, type_name);
+  EXPECT_TRUE(ptr);
+  page = PartitionPage::FromPointer(PartitionCookieFreePointerAdjust(ptr));
+  EXPECT_EQ(1, page->num_allocated_slots);
+  totalSlots =
+      (page->bucket->num_system_pages_per_slot_span * kSystemPageSize) /
+      (smallSize + kExtraAllocSize);
+  firstPageSlots = kSystemPageSize / (smallSize + kExtraAllocSize);
+  EXPECT_EQ(totalSlots - firstPageSlots, page->num_unprovisioned_slots);
+
+  PartitionFree(ptr);
+  EXPECT_TRUE(page->freelist_head);
+  EXPECT_EQ(0, page->num_allocated_slots);
+
+  size_t verySmallSize = 32 - kExtraAllocSize;
+  bucket_index = (verySmallSize + kExtraAllocSize) >> kBucketShift;
+  bucket = &allocator.root()->buckets()[bucket_index];
+  EXPECT_EQ(nullptr, bucket->empty_pages_head);
+
+  ptr = allocator.root()->Alloc(verySmallSize, type_name);
+  EXPECT_TRUE(ptr);
+  page = PartitionPage::FromPointer(PartitionCookieFreePointerAdjust(ptr));
+  EXPECT_EQ(1, page->num_allocated_slots);
+  totalSlots =
+      (page->bucket->num_system_pages_per_slot_span * kSystemPageSize) /
+      (verySmallSize + kExtraAllocSize);
+  firstPageSlots = kSystemPageSize / (verySmallSize + kExtraAllocSize);
+  EXPECT_EQ(totalSlots - firstPageSlots, page->num_unprovisioned_slots);
+
+  PartitionFree(ptr);
+  EXPECT_TRUE(page->freelist_head);
+  EXPECT_EQ(0, page->num_allocated_slots);
+
+  // And try an allocation size (against the generic allocator) that is
+  // larger than a system page.
+  size_t pageAndAHalfSize =
+      (kSystemPageSize + (kSystemPageSize / 2)) - kExtraAllocSize;
+  ptr = generic_allocator.root()->Alloc(pageAndAHalfSize, type_name);
+  EXPECT_TRUE(ptr);
+  page = PartitionPage::FromPointer(PartitionCookieFreePointerAdjust(ptr));
+  EXPECT_EQ(1, page->num_allocated_slots);
+  EXPECT_TRUE(page->freelist_head);
+  totalSlots =
+      (page->bucket->num_system_pages_per_slot_span * kSystemPageSize) /
+      (pageAndAHalfSize + kExtraAllocSize);
+  EXPECT_EQ(totalSlots - 2, page->num_unprovisioned_slots);
+  generic_allocator.root()->Free(ptr);
+
+  // And then make sure than exactly the page size only faults one page.
+  size_t pageSize = kSystemPageSize - kExtraAllocSize;
+  ptr = generic_allocator.root()->Alloc(pageSize, type_name);
+  EXPECT_TRUE(ptr);
+  page = PartitionPage::FromPointer(PartitionCookieFreePointerAdjust(ptr));
+  EXPECT_EQ(1, page->num_allocated_slots);
+  EXPECT_FALSE(page->freelist_head);
+  totalSlots =
+      (page->bucket->num_system_pages_per_slot_span * kSystemPageSize) /
+      (pageSize + kExtraAllocSize);
+  EXPECT_EQ(totalSlots - 1, page->num_unprovisioned_slots);
+  generic_allocator.root()->Free(ptr);
+}
+
+// Test some of the fragmentation-resistant properties of the allocator.
+TEST_F(PartitionAllocTest, PageRefilling) {
+  PartitionBucket* bucket = &allocator.root()->buckets()[kTestBucketIndex];
+
+  // Grab two full pages and a non-full page.
+  PartitionPage* page1 = GetFullPage(kTestAllocSize);
+  PartitionPage* page2 = GetFullPage(kTestAllocSize);
+  void* ptr = allocator.root()->Alloc(kTestAllocSize, type_name);
+  EXPECT_TRUE(ptr);
+  EXPECT_NE(page1, bucket->active_pages_head);
+  EXPECT_NE(page2, bucket->active_pages_head);
+  PartitionPage* page =
+      PartitionPage::FromPointer(PartitionCookieFreePointerAdjust(ptr));
+  EXPECT_EQ(1, page->num_allocated_slots);
+
+  // Work out a pointer into page2 and free it; and then page1 and free it.
+  char* ptr2 =
+      reinterpret_cast<char*>(PartitionPage::ToPointer(page1)) + kPointerOffset;
+  PartitionFree(ptr2);
+  ptr2 =
+      reinterpret_cast<char*>(PartitionPage::ToPointer(page2)) + kPointerOffset;
+  PartitionFree(ptr2);
+
+  // If we perform two allocations from the same bucket now, we expect to
+  // refill both the nearly full pages.
+  (void)allocator.root()->Alloc(kTestAllocSize, type_name);
+  (void)allocator.root()->Alloc(kTestAllocSize, type_name);
+  EXPECT_EQ(1, page->num_allocated_slots);
+
+  FreeFullPage(page2);
+  FreeFullPage(page1);
+  PartitionFree(ptr);
+}
+
+// Basic tests to ensure that allocations work for partial page buckets.
+TEST_F(PartitionAllocTest, PartialPages) {
+  // Find a size that is backed by a partial partition page.
+  size_t size = sizeof(void*);
+  PartitionBucket* bucket = nullptr;
+  while (size < kTestMaxAllocation) {
+    bucket = &allocator.root()->buckets()[size >> kBucketShift];
+    if (bucket->num_system_pages_per_slot_span %
+        kNumSystemPagesPerPartitionPage)
+      break;
+    size += sizeof(void*);
+  }
+  EXPECT_LT(size, kTestMaxAllocation);
+
+  PartitionPage* page1 = GetFullPage(size);
+  PartitionPage* page2 = GetFullPage(size);
+  FreeFullPage(page2);
+  FreeFullPage(page1);
+}
+
+// Test correct handling if our mapping collides with another.
+TEST_F(PartitionAllocTest, MappingCollision) {
+  // The -2 is because the first and last partition pages in a super page are
+  // guard pages.
+  size_t numPartitionPagesNeeded = kNumPartitionPagesPerSuperPage - 2;
+  auto firstSuperPagePages =
+      std::make_unique<PartitionPage* []>(numPartitionPagesNeeded);
+  auto secondSuperPagePages =
+      std::make_unique<PartitionPage* []>(numPartitionPagesNeeded);
+
+  size_t i;
+  for (i = 0; i < numPartitionPagesNeeded; ++i)
+    firstSuperPagePages[i] = GetFullPage(kTestAllocSize);
+
+  char* pageBase =
+      reinterpret_cast<char*>(PartitionPage::ToPointer(firstSuperPagePages[0]));
+  EXPECT_EQ(kPartitionPageSize,
+            reinterpret_cast<uintptr_t>(pageBase) & kSuperPageOffsetMask);
+  pageBase -= kPartitionPageSize;
+  // Map a single system page either side of the mapping for our allocations,
+  // with the goal of tripping up alignment of the next mapping.
+  void* map1 = AllocPages(pageBase - kPageAllocationGranularity,
+                          kPageAllocationGranularity,
+                          kPageAllocationGranularity, PageInaccessible);
+  EXPECT_TRUE(map1);
+  void* map2 = AllocPages(pageBase + kSuperPageSize, kPageAllocationGranularity,
+                          kPageAllocationGranularity, PageInaccessible);
+  EXPECT_TRUE(map2);
+
+  for (i = 0; i < numPartitionPagesNeeded; ++i)
+    secondSuperPagePages[i] = GetFullPage(kTestAllocSize);
+
+  FreePages(map1, kPageAllocationGranularity);
+  FreePages(map2, kPageAllocationGranularity);
+
+  pageBase = reinterpret_cast<char*>(
+      PartitionPage::ToPointer(secondSuperPagePages[0]));
+  EXPECT_EQ(kPartitionPageSize,
+            reinterpret_cast<uintptr_t>(pageBase) & kSuperPageOffsetMask);
+  pageBase -= kPartitionPageSize;
+  // Map a single system page either side of the mapping for our allocations,
+  // with the goal of tripping up alignment of the next mapping.
+  map1 = AllocPages(pageBase - kPageAllocationGranularity,
+                    kPageAllocationGranularity, kPageAllocationGranularity,
+                    PageReadWrite);
+  EXPECT_TRUE(map1);
+  map2 = AllocPages(pageBase + kSuperPageSize, kPageAllocationGranularity,
+                    kPageAllocationGranularity, PageReadWrite);
+  EXPECT_TRUE(map2);
+  EXPECT_TRUE(
+      SetSystemPagesAccess(map1, kPageAllocationGranularity, PageInaccessible));
+  EXPECT_TRUE(
+      SetSystemPagesAccess(map2, kPageAllocationGranularity, PageInaccessible));
+
+  PartitionPage* pageInThirdSuperPage = GetFullPage(kTestAllocSize);
+  FreePages(map1, kPageAllocationGranularity);
+  FreePages(map2, kPageAllocationGranularity);
+
+  EXPECT_EQ(0u, reinterpret_cast<uintptr_t>(
+                    PartitionPage::ToPointer(pageInThirdSuperPage)) &
+                    kPartitionPageOffsetMask);
+
+  // And make sure we really did get a page in a new superpage.
+  EXPECT_NE(reinterpret_cast<uintptr_t>(
+                PartitionPage::ToPointer(firstSuperPagePages[0])) &
+                kSuperPageBaseMask,
+            reinterpret_cast<uintptr_t>(
+                PartitionPage::ToPointer(pageInThirdSuperPage)) &
+                kSuperPageBaseMask);
+  EXPECT_NE(reinterpret_cast<uintptr_t>(
+                PartitionPage::ToPointer(secondSuperPagePages[0])) &
+                kSuperPageBaseMask,
+            reinterpret_cast<uintptr_t>(
+                PartitionPage::ToPointer(pageInThirdSuperPage)) &
+                kSuperPageBaseMask);
+
+  FreeFullPage(pageInThirdSuperPage);
+  for (i = 0; i < numPartitionPagesNeeded; ++i) {
+    FreeFullPage(firstSuperPagePages[i]);
+    FreeFullPage(secondSuperPagePages[i]);
+  }
+}
+
+// Tests that pages in the free page cache do get freed as appropriate.
+TEST_F(PartitionAllocTest, FreeCache) {
+  EXPECT_EQ(0U, allocator.root()->total_size_of_committed_pages);
+
+  size_t big_size = allocator.root()->max_allocation - kExtraAllocSize;
+  size_t bucket_index = (big_size + kExtraAllocSize) >> kBucketShift;
+  PartitionBucket* bucket = &allocator.root()->buckets()[bucket_index];
+
+  void* ptr = allocator.root()->Alloc(big_size, type_name);
+  EXPECT_TRUE(ptr);
+  PartitionPage* page =
+      PartitionPage::FromPointer(PartitionCookieFreePointerAdjust(ptr));
+  EXPECT_EQ(nullptr, bucket->empty_pages_head);
+  EXPECT_EQ(1, page->num_allocated_slots);
+  EXPECT_EQ(kPartitionPageSize,
+            allocator.root()->total_size_of_committed_pages);
+  PartitionFree(ptr);
+  EXPECT_EQ(0, page->num_allocated_slots);
+  EXPECT_NE(-1, page->empty_cache_index);
+  EXPECT_TRUE(page->freelist_head);
+
+  CycleFreeCache(kTestAllocSize);
+
+  // Flushing the cache should have really freed the unused page.
+  EXPECT_FALSE(page->freelist_head);
+  EXPECT_EQ(-1, page->empty_cache_index);
+  EXPECT_EQ(0, page->num_allocated_slots);
+  PartitionBucket* cycle_free_cache_bucket =
+      &allocator.root()->buckets()[kTestBucketIndex];
+  EXPECT_EQ(
+      cycle_free_cache_bucket->num_system_pages_per_slot_span * kSystemPageSize,
+      allocator.root()->total_size_of_committed_pages);
+
+  // Check that an allocation works ok whilst in this state (a free'd page
+  // as the active pages head).
+  ptr = allocator.root()->Alloc(big_size, type_name);
+  EXPECT_FALSE(bucket->empty_pages_head);
+  PartitionFree(ptr);
+
+  // Also check that a page that is bouncing immediately between empty and
+  // used does not get freed.
+  for (size_t i = 0; i < kMaxFreeableSpans * 2; ++i) {
+    ptr = allocator.root()->Alloc(big_size, type_name);
+    EXPECT_TRUE(page->freelist_head);
+    PartitionFree(ptr);
+    EXPECT_TRUE(page->freelist_head);
+  }
+  EXPECT_EQ(kPartitionPageSize,
+            allocator.root()->total_size_of_committed_pages);
+}
+
+// Tests for a bug we had with losing references to free pages.
+TEST_F(PartitionAllocTest, LostFreePagesBug) {
+  size_t size = kPartitionPageSize - kExtraAllocSize;
+
+  void* ptr = generic_allocator.root()->Alloc(size, type_name);
+  EXPECT_TRUE(ptr);
+  void* ptr2 = generic_allocator.root()->Alloc(size, type_name);
+  EXPECT_TRUE(ptr2);
+
+  PartitionPage* page =
+      PartitionPage::FromPointer(PartitionCookieFreePointerAdjust(ptr));
+  PartitionPage* page2 =
+      PartitionPage::FromPointer(PartitionCookieFreePointerAdjust(ptr2));
+  PartitionBucket* bucket = page->bucket;
+
+  EXPECT_EQ(nullptr, bucket->empty_pages_head);
+  EXPECT_EQ(-1, page->num_allocated_slots);
+  EXPECT_EQ(1, page2->num_allocated_slots);
+
+  generic_allocator.root()->Free(ptr);
+  generic_allocator.root()->Free(ptr2);
+
+  EXPECT_TRUE(bucket->empty_pages_head);
+  EXPECT_TRUE(bucket->empty_pages_head->next_page);
+  EXPECT_EQ(0, page->num_allocated_slots);
+  EXPECT_EQ(0, page2->num_allocated_slots);
+  EXPECT_TRUE(page->freelist_head);
+  EXPECT_TRUE(page2->freelist_head);
+
+  CycleGenericFreeCache(kTestAllocSize);
+
+  EXPECT_FALSE(page->freelist_head);
+  EXPECT_FALSE(page2->freelist_head);
+
+  EXPECT_TRUE(bucket->empty_pages_head);
+  EXPECT_TRUE(bucket->empty_pages_head->next_page);
+  EXPECT_EQ(PartitionPage::get_sentinel_page(), bucket->active_pages_head);
+
+  // At this moment, we have two decommitted pages, on the empty list.
+  ptr = generic_allocator.root()->Alloc(size, type_name);
+  EXPECT_TRUE(ptr);
+  generic_allocator.root()->Free(ptr);
+
+  EXPECT_EQ(PartitionPage::get_sentinel_page(), bucket->active_pages_head);
+  EXPECT_TRUE(bucket->empty_pages_head);
+  EXPECT_TRUE(bucket->decommitted_pages_head);
+
+  CycleGenericFreeCache(kTestAllocSize);
+
+  // We're now set up to trigger a historical bug by scanning over the active
+  // pages list. The current code gets into a different state, but we'll keep
+  // the test as being an interesting corner case.
+  ptr = generic_allocator.root()->Alloc(size, type_name);
+  EXPECT_TRUE(ptr);
+  generic_allocator.root()->Free(ptr);
+
+  EXPECT_TRUE(bucket->active_pages_head);
+  EXPECT_TRUE(bucket->empty_pages_head);
+  EXPECT_TRUE(bucket->decommitted_pages_head);
+}
+
+// Unit tests that check if an allocation fails in "return null" mode,
+// repeating it doesn't crash, and still returns null. The tests need to
+// stress memory subsystem limits to do so, hence they try to allocate
+// 6 GB of memory, each with a different per-allocation block sizes.
+//
+// On 64-bit systems we need to restrict the address space to force allocation
+// failure, so these tests run only on POSIX systems that provide setrlimit(),
+// and use it to limit address space to 6GB.
+//
+// Disable these tests on Android because, due to the allocation-heavy behavior,
+// they tend to get OOM-killed rather than pass.
+// TODO(https://crbug.com/779645): Fuchsia currently sets OS_POSIX, but does
+// not provide a working setrlimit().
+#if !defined(ARCH_CPU_64_BITS) || \
+    (defined(OS_POSIX) &&         \
+     !(defined(OS_FUCHSIA) || defined(OS_MACOSX) || defined(OS_ANDROID)))
+
+// This is defined as a separate test class because RepeatedReturnNull
+// test exhausts the process memory, and breaks any test in the same
+// class that runs after it.
+class PartitionAllocReturnNullTest : public PartitionAllocTest {};
+
+// Test "return null" for larger, direct-mapped allocations first. As a
+// direct-mapped allocation's pages are unmapped and freed on release, this
+// test is performd first for these "return null" tests in order to leave
+// sufficient unreserved virtual memory around for the later one(s).
+TEST_F(PartitionAllocReturnNullTest, RepeatedReturnNullDirect) {
+  // A direct-mapped allocation size.
+  DoReturnNullTest(32 * 1024 * 1024, false);
+}
+
+// Test "return null" with a 512 kB block size.
+TEST_F(PartitionAllocReturnNullTest, RepeatedReturnNull) {
+  // A single-slot but non-direct-mapped allocation size.
+  DoReturnNullTest(512 * 1024, false);
+}
+
+// Repeating the above tests using Realloc instead of Alloc.
+class PartitionReallocReturnNullTest : public PartitionAllocTest {};
+
+TEST_F(PartitionReallocReturnNullTest, RepeatedReturnNullDirect) {
+  DoReturnNullTest(32 * 1024 * 1024, true);
+}
+
+TEST_F(PartitionReallocReturnNullTest, RepeatedReturnNull) {
+  DoReturnNullTest(512 * 1024, true);
+}
+
+#endif  // !defined(ARCH_CPU_64_BITS) || (defined(OS_POSIX) &&
+        // !(defined(OS_FUCHSIA) || defined(OS_MACOSX) || defined(OS_ANDROID)))
+
+// Death tests misbehave on Android, http://crbug.com/643760.
+#if defined(GTEST_HAS_DEATH_TEST) && !defined(OS_ANDROID)
+
+// Make sure that malloc(-1) dies.
+// In the past, we had an integer overflow that would alias malloc(-1) to
+// malloc(0), which is not good.
+TEST_F(PartitionAllocDeathTest, LargeAllocs) {
+  // Largest alloc.
+  EXPECT_DEATH(
+      generic_allocator.root()->Alloc(static_cast<size_t>(-1), type_name), "");
+  // And the smallest allocation we expect to die.
+  EXPECT_DEATH(
+      generic_allocator.root()->Alloc(kGenericMaxDirectMapped + 1, type_name),
+      "");
+}
+
+// Check that our immediate double-free detection works.
+TEST_F(PartitionAllocDeathTest, ImmediateDoubleFree) {
+  void* ptr = generic_allocator.root()->Alloc(kTestAllocSize, type_name);
+  EXPECT_TRUE(ptr);
+  generic_allocator.root()->Free(ptr);
+
+  EXPECT_DEATH(generic_allocator.root()->Free(ptr), "");
+}
+
+// Check that our refcount-based double-free detection works.
+TEST_F(PartitionAllocDeathTest, RefcountDoubleFree) {
+  void* ptr = generic_allocator.root()->Alloc(kTestAllocSize, type_name);
+  EXPECT_TRUE(ptr);
+  void* ptr2 = generic_allocator.root()->Alloc(kTestAllocSize, type_name);
+  EXPECT_TRUE(ptr2);
+  generic_allocator.root()->Free(ptr);
+  generic_allocator.root()->Free(ptr2);
+  // This is not an immediate double-free so our immediate detection won't
+  // fire. However, it does take the "refcount" of the partition page to -1,
+  // which is illegal and should be trapped.
+  EXPECT_DEATH(generic_allocator.root()->Free(ptr), "");
+}
+
+// Check that guard pages are present where expected.
+TEST_F(PartitionAllocDeathTest, GuardPages) {
+// PartitionAlloc adds kPartitionPageSize to the requested size
+// (for metadata), and then rounds that size to kPageAllocationGranularity.
+// To be able to reliably write one past a direct allocation, choose a size
+// that's
+// a) larger than kGenericMaxBucketed (to make the allocation direct)
+// b) aligned at kPageAllocationGranularity boundaries after
+//    kPartitionPageSize has been added to it.
+// (On 32-bit, PartitionAlloc adds another kSystemPageSize to the
+// allocation size before rounding, but there it marks the memory right
+// after size as inaccessible, so it's fine to write 1 past the size we
+// hand to PartitionAlloc and we don't need to worry about allocation
+// granularities.)
+#define ALIGN(N, A) (((N) + (A)-1) / (A) * (A))
+  const int kSize = ALIGN(kGenericMaxBucketed + 1 + kPartitionPageSize,
+                          kPageAllocationGranularity) -
+                    kPartitionPageSize;
+#undef ALIGN
+  static_assert(kSize > kGenericMaxBucketed,
+                "allocation not large enough for direct allocation");
+  size_t size = kSize - kExtraAllocSize;
+  void* ptr = generic_allocator.root()->Alloc(size, type_name);
+
+  EXPECT_TRUE(ptr);
+  char* charPtr = reinterpret_cast<char*>(ptr) - kPointerOffset;
+
+  EXPECT_DEATH(*(charPtr - 1) = 'A', "");
+  EXPECT_DEATH(*(charPtr + size + kExtraAllocSize) = 'A', "");
+
+  generic_allocator.root()->Free(ptr);
+}
+
+// Check that a bad free() is caught where the free() refers to an unused
+// partition page of a large allocation.
+TEST_F(PartitionAllocDeathTest, FreeWrongPartitionPage) {
+  // This large size will result in a direct mapped allocation with guard
+  // pages at either end.
+  void* ptr =
+      generic_allocator.root()->Alloc(kPartitionPageSize * 2, type_name);
+  EXPECT_TRUE(ptr);
+  char* badPtr = reinterpret_cast<char*>(ptr) + kPartitionPageSize;
+
+  EXPECT_DEATH(generic_allocator.root()->Free(badPtr), "");
+
+  generic_allocator.root()->Free(ptr);
+}
+
+#endif  // !defined(OS_ANDROID) && !defined(OS_IOS)
+
+// Tests that |PartitionDumpStatsGeneric| and |PartitionDumpStats| run without
+// crashing and return non-zero values when memory is allocated.
+TEST_F(PartitionAllocTest, DumpMemoryStats) {
+  {
+    void* ptr = allocator.root()->Alloc(kTestAllocSize, type_name);
+    MockPartitionStatsDumper mockStatsDumper;
+    allocator.root()->DumpStats("mock_allocator", false /* detailed dump */,
+                                &mockStatsDumper);
+    EXPECT_TRUE(mockStatsDumper.IsMemoryAllocationRecorded());
+    PartitionFree(ptr);
+  }
+
+  // This series of tests checks the active -> empty -> decommitted states.
+  {
+    {
+      void* ptr =
+          generic_allocator.root()->Alloc(2048 - kExtraAllocSize, type_name);
+      MockPartitionStatsDumper dumper;
+      generic_allocator.root()->DumpStats("mock_generic_allocator",
+                                          false /* detailed dump */, &dumper);
+      EXPECT_TRUE(dumper.IsMemoryAllocationRecorded());
+
+      const PartitionBucketMemoryStats* stats = dumper.GetBucketStats(2048);
+      EXPECT_TRUE(stats);
+      EXPECT_TRUE(stats->is_valid);
+      EXPECT_EQ(2048u, stats->bucket_slot_size);
+      EXPECT_EQ(2048u, stats->active_bytes);
+      EXPECT_EQ(kSystemPageSize, stats->resident_bytes);
+      EXPECT_EQ(0u, stats->decommittable_bytes);
+      EXPECT_EQ(0u, stats->discardable_bytes);
+      EXPECT_EQ(0u, stats->num_full_pages);
+      EXPECT_EQ(1u, stats->num_active_pages);
+      EXPECT_EQ(0u, stats->num_empty_pages);
+      EXPECT_EQ(0u, stats->num_decommitted_pages);
+      generic_allocator.root()->Free(ptr);
+    }
+
+    {
+      MockPartitionStatsDumper dumper;
+      generic_allocator.root()->DumpStats("mock_generic_allocator",
+                                          false /* detailed dump */, &dumper);
+      EXPECT_FALSE(dumper.IsMemoryAllocationRecorded());
+
+      const PartitionBucketMemoryStats* stats = dumper.GetBucketStats(2048);
+      EXPECT_TRUE(stats);
+      EXPECT_TRUE(stats->is_valid);
+      EXPECT_EQ(2048u, stats->bucket_slot_size);
+      EXPECT_EQ(0u, stats->active_bytes);
+      EXPECT_EQ(kSystemPageSize, stats->resident_bytes);
+      EXPECT_EQ(kSystemPageSize, stats->decommittable_bytes);
+      EXPECT_EQ(0u, stats->discardable_bytes);
+      EXPECT_EQ(0u, stats->num_full_pages);
+      EXPECT_EQ(0u, stats->num_active_pages);
+      EXPECT_EQ(1u, stats->num_empty_pages);
+      EXPECT_EQ(0u, stats->num_decommitted_pages);
+    }
+
+    // TODO(crbug.com/722911): Commenting this out causes this test to fail when
+    // run singly (--gtest_filter=PartitionAllocTest.DumpMemoryStats), but not
+    // when run with the others (--gtest_filter=PartitionAllocTest.*).
+    CycleGenericFreeCache(kTestAllocSize);
+
+    {
+      MockPartitionStatsDumper dumper;
+      generic_allocator.root()->DumpStats("mock_generic_allocator",
+                                          false /* detailed dump */, &dumper);
+      EXPECT_FALSE(dumper.IsMemoryAllocationRecorded());
+
+      const PartitionBucketMemoryStats* stats = dumper.GetBucketStats(2048);
+      EXPECT_TRUE(stats);
+      EXPECT_TRUE(stats->is_valid);
+      EXPECT_EQ(2048u, stats->bucket_slot_size);
+      EXPECT_EQ(0u, stats->active_bytes);
+      EXPECT_EQ(0u, stats->resident_bytes);
+      EXPECT_EQ(0u, stats->decommittable_bytes);
+      EXPECT_EQ(0u, stats->discardable_bytes);
+      EXPECT_EQ(0u, stats->num_full_pages);
+      EXPECT_EQ(0u, stats->num_active_pages);
+      EXPECT_EQ(0u, stats->num_empty_pages);
+      EXPECT_EQ(1u, stats->num_decommitted_pages);
+    }
+  }
+
+  // This test checks for correct empty page list accounting.
+  {
+    size_t size = kPartitionPageSize - kExtraAllocSize;
+    void* ptr1 = generic_allocator.root()->Alloc(size, type_name);
+    void* ptr2 = generic_allocator.root()->Alloc(size, type_name);
+    generic_allocator.root()->Free(ptr1);
+    generic_allocator.root()->Free(ptr2);
+
+    CycleGenericFreeCache(kTestAllocSize);
+
+    ptr1 = generic_allocator.root()->Alloc(size, type_name);
+
+    {
+      MockPartitionStatsDumper dumper;
+      generic_allocator.root()->DumpStats("mock_generic_allocator",
+                                          false /* detailed dump */, &dumper);
+      EXPECT_TRUE(dumper.IsMemoryAllocationRecorded());
+
+      const PartitionBucketMemoryStats* stats =
+          dumper.GetBucketStats(kPartitionPageSize);
+      EXPECT_TRUE(stats);
+      EXPECT_TRUE(stats->is_valid);
+      EXPECT_EQ(kPartitionPageSize, stats->bucket_slot_size);
+      EXPECT_EQ(kPartitionPageSize, stats->active_bytes);
+      EXPECT_EQ(kPartitionPageSize, stats->resident_bytes);
+      EXPECT_EQ(0u, stats->decommittable_bytes);
+      EXPECT_EQ(0u, stats->discardable_bytes);
+      EXPECT_EQ(1u, stats->num_full_pages);
+      EXPECT_EQ(0u, stats->num_active_pages);
+      EXPECT_EQ(0u, stats->num_empty_pages);
+      EXPECT_EQ(1u, stats->num_decommitted_pages);
+    }
+    generic_allocator.root()->Free(ptr1);
+  }
+
+  // This test checks for correct direct mapped accounting.
+  {
+    size_t size_smaller = kGenericMaxBucketed + 1;
+    size_t size_bigger = (kGenericMaxBucketed * 2) + 1;
+    size_t real_size_smaller =
+        (size_smaller + kSystemPageOffsetMask) & kSystemPageBaseMask;
+    size_t real_size_bigger =
+        (size_bigger + kSystemPageOffsetMask) & kSystemPageBaseMask;
+    void* ptr = generic_allocator.root()->Alloc(size_smaller, type_name);
+    void* ptr2 = generic_allocator.root()->Alloc(size_bigger, type_name);
+
+    {
+      MockPartitionStatsDumper dumper;
+      generic_allocator.root()->DumpStats("mock_generic_allocator",
+                                          false /* detailed dump */, &dumper);
+      EXPECT_TRUE(dumper.IsMemoryAllocationRecorded());
+
+      const PartitionBucketMemoryStats* stats =
+          dumper.GetBucketStats(real_size_smaller);
+      EXPECT_TRUE(stats);
+      EXPECT_TRUE(stats->is_valid);
+      EXPECT_TRUE(stats->is_direct_map);
+      EXPECT_EQ(real_size_smaller, stats->bucket_slot_size);
+      EXPECT_EQ(real_size_smaller, stats->active_bytes);
+      EXPECT_EQ(real_size_smaller, stats->resident_bytes);
+      EXPECT_EQ(0u, stats->decommittable_bytes);
+      EXPECT_EQ(0u, stats->discardable_bytes);
+      EXPECT_EQ(1u, stats->num_full_pages);
+      EXPECT_EQ(0u, stats->num_active_pages);
+      EXPECT_EQ(0u, stats->num_empty_pages);
+      EXPECT_EQ(0u, stats->num_decommitted_pages);
+
+      stats = dumper.GetBucketStats(real_size_bigger);
+      EXPECT_TRUE(stats);
+      EXPECT_TRUE(stats->is_valid);
+      EXPECT_TRUE(stats->is_direct_map);
+      EXPECT_EQ(real_size_bigger, stats->bucket_slot_size);
+      EXPECT_EQ(real_size_bigger, stats->active_bytes);
+      EXPECT_EQ(real_size_bigger, stats->resident_bytes);
+      EXPECT_EQ(0u, stats->decommittable_bytes);
+      EXPECT_EQ(0u, stats->discardable_bytes);
+      EXPECT_EQ(1u, stats->num_full_pages);
+      EXPECT_EQ(0u, stats->num_active_pages);
+      EXPECT_EQ(0u, stats->num_empty_pages);
+      EXPECT_EQ(0u, stats->num_decommitted_pages);
+    }
+
+    generic_allocator.root()->Free(ptr2);
+    generic_allocator.root()->Free(ptr);
+
+    // Whilst we're here, allocate again and free with different ordering to
+    // give a workout to our linked list code.
+    ptr = generic_allocator.root()->Alloc(size_smaller, type_name);
+    ptr2 = generic_allocator.root()->Alloc(size_bigger, type_name);
+    generic_allocator.root()->Free(ptr);
+    generic_allocator.root()->Free(ptr2);
+  }
+
+  // This test checks large-but-not-quite-direct allocations.
+  {
+    constexpr size_t requested_size = 16 * kSystemPageSize;
+    void* ptr = generic_allocator.root()->Alloc(requested_size + 1, type_name);
+
+    {
+      MockPartitionStatsDumper dumper;
+      generic_allocator.root()->DumpStats("mock_generic_allocator",
+                                          false /* detailed dump */, &dumper);
+      EXPECT_TRUE(dumper.IsMemoryAllocationRecorded());
+
+      size_t slot_size =
+          requested_size + (requested_size / kGenericNumBucketsPerOrder);
+      const PartitionBucketMemoryStats* stats =
+          dumper.GetBucketStats(slot_size);
+      EXPECT_TRUE(stats);
+      EXPECT_TRUE(stats->is_valid);
+      EXPECT_FALSE(stats->is_direct_map);
+      EXPECT_EQ(slot_size, stats->bucket_slot_size);
+      EXPECT_EQ(requested_size + 1 + kExtraAllocSize, stats->active_bytes);
+      EXPECT_EQ(slot_size, stats->resident_bytes);
+      EXPECT_EQ(0u, stats->decommittable_bytes);
+      EXPECT_EQ(kSystemPageSize, stats->discardable_bytes);
+      EXPECT_EQ(1u, stats->num_full_pages);
+      EXPECT_EQ(0u, stats->num_active_pages);
+      EXPECT_EQ(0u, stats->num_empty_pages);
+      EXPECT_EQ(0u, stats->num_decommitted_pages);
+    }
+
+    generic_allocator.root()->Free(ptr);
+
+    {
+      MockPartitionStatsDumper dumper;
+      generic_allocator.root()->DumpStats("mock_generic_allocator",
+                                          false /* detailed dump */, &dumper);
+      EXPECT_FALSE(dumper.IsMemoryAllocationRecorded());
+
+      size_t slot_size =
+          requested_size + (requested_size / kGenericNumBucketsPerOrder);
+      const PartitionBucketMemoryStats* stats =
+          dumper.GetBucketStats(slot_size);
+      EXPECT_TRUE(stats);
+      EXPECT_TRUE(stats->is_valid);
+      EXPECT_FALSE(stats->is_direct_map);
+      EXPECT_EQ(slot_size, stats->bucket_slot_size);
+      EXPECT_EQ(0u, stats->active_bytes);
+      EXPECT_EQ(slot_size, stats->resident_bytes);
+      EXPECT_EQ(slot_size, stats->decommittable_bytes);
+      EXPECT_EQ(0u, stats->num_full_pages);
+      EXPECT_EQ(0u, stats->num_active_pages);
+      EXPECT_EQ(1u, stats->num_empty_pages);
+      EXPECT_EQ(0u, stats->num_decommitted_pages);
+    }
+
+    void* ptr2 = generic_allocator.root()->Alloc(
+        requested_size + kSystemPageSize + 1, type_name);
+    EXPECT_EQ(ptr, ptr2);
+
+    {
+      MockPartitionStatsDumper dumper;
+      generic_allocator.root()->DumpStats("mock_generic_allocator",
+                                          false /* detailed dump */, &dumper);
+      EXPECT_TRUE(dumper.IsMemoryAllocationRecorded());
+
+      size_t slot_size =
+          requested_size + (requested_size / kGenericNumBucketsPerOrder);
+      const PartitionBucketMemoryStats* stats =
+          dumper.GetBucketStats(slot_size);
+      EXPECT_TRUE(stats);
+      EXPECT_TRUE(stats->is_valid);
+      EXPECT_FALSE(stats->is_direct_map);
+      EXPECT_EQ(slot_size, stats->bucket_slot_size);
+      EXPECT_EQ(requested_size + kSystemPageSize + 1 + kExtraAllocSize,
+                stats->active_bytes);
+      EXPECT_EQ(slot_size, stats->resident_bytes);
+      EXPECT_EQ(0u, stats->decommittable_bytes);
+      EXPECT_EQ(0u, stats->discardable_bytes);
+      EXPECT_EQ(1u, stats->num_full_pages);
+      EXPECT_EQ(0u, stats->num_active_pages);
+      EXPECT_EQ(0u, stats->num_empty_pages);
+      EXPECT_EQ(0u, stats->num_decommitted_pages);
+    }
+
+    generic_allocator.root()->Free(ptr2);
+  }
+}
+
+// Tests the API to purge freeable memory.
+TEST_F(PartitionAllocTest, Purge) {
+  char* ptr = reinterpret_cast<char*>(
+      generic_allocator.root()->Alloc(2048 - kExtraAllocSize, type_name));
+  generic_allocator.root()->Free(ptr);
+  {
+    MockPartitionStatsDumper dumper;
+    generic_allocator.root()->DumpStats("mock_generic_allocator",
+                                        false /* detailed dump */, &dumper);
+    EXPECT_FALSE(dumper.IsMemoryAllocationRecorded());
+
+    const PartitionBucketMemoryStats* stats = dumper.GetBucketStats(2048);
+    EXPECT_TRUE(stats);
+    EXPECT_TRUE(stats->is_valid);
+    EXPECT_EQ(kSystemPageSize, stats->decommittable_bytes);
+    EXPECT_EQ(kSystemPageSize, stats->resident_bytes);
+  }
+  generic_allocator.root()->PurgeMemory(PartitionPurgeDecommitEmptyPages);
+  {
+    MockPartitionStatsDumper dumper;
+    generic_allocator.root()->DumpStats("mock_generic_allocator",
+                                        false /* detailed dump */, &dumper);
+    EXPECT_FALSE(dumper.IsMemoryAllocationRecorded());
+
+    const PartitionBucketMemoryStats* stats = dumper.GetBucketStats(2048);
+    EXPECT_TRUE(stats);
+    EXPECT_TRUE(stats->is_valid);
+    EXPECT_EQ(0u, stats->decommittable_bytes);
+    EXPECT_EQ(0u, stats->resident_bytes);
+  }
+  // Calling purge again here is a good way of testing we didn't mess up the
+  // state of the free cache ring.
+  generic_allocator.root()->PurgeMemory(PartitionPurgeDecommitEmptyPages);
+
+  char* bigPtr = reinterpret_cast<char*>(
+      generic_allocator.root()->Alloc(256 * 1024, type_name));
+  generic_allocator.root()->Free(bigPtr);
+  generic_allocator.root()->PurgeMemory(PartitionPurgeDecommitEmptyPages);
+
+  CHECK_PAGE_IN_CORE(ptr - kPointerOffset, false);
+  CHECK_PAGE_IN_CORE(bigPtr - kPointerOffset, false);
+}
+
+// Tests that we prefer to allocate into a non-empty partition page over an
+// empty one. This is an important aspect of minimizing memory usage for some
+// allocation sizes, particularly larger ones.
+TEST_F(PartitionAllocTest, PreferActiveOverEmpty) {
+  size_t size = (kSystemPageSize * 2) - kExtraAllocSize;
+  // Allocate 3 full slot spans worth of 8192-byte allocations.
+  // Each slot span for this size is 16384 bytes, or 1 partition page and 2
+  // slots.
+  void* ptr1 = generic_allocator.root()->Alloc(size, type_name);
+  void* ptr2 = generic_allocator.root()->Alloc(size, type_name);
+  void* ptr3 = generic_allocator.root()->Alloc(size, type_name);
+  void* ptr4 = generic_allocator.root()->Alloc(size, type_name);
+  void* ptr5 = generic_allocator.root()->Alloc(size, type_name);
+  void* ptr6 = generic_allocator.root()->Alloc(size, type_name);
+
+  PartitionPage* page1 =
+      PartitionPage::FromPointer(PartitionCookieFreePointerAdjust(ptr1));
+  PartitionPage* page2 =
+      PartitionPage::FromPointer(PartitionCookieFreePointerAdjust(ptr3));
+  PartitionPage* page3 =
+      PartitionPage::FromPointer(PartitionCookieFreePointerAdjust(ptr6));
+  EXPECT_NE(page1, page2);
+  EXPECT_NE(page2, page3);
+  PartitionBucket* bucket = page1->bucket;
+  EXPECT_EQ(page3, bucket->active_pages_head);
+
+  // Free up the 2nd slot in each slot span.
+  // This leaves the active list containing 3 pages, each with 1 used and 1
+  // free slot. The active page will be the one containing ptr1.
+  generic_allocator.root()->Free(ptr6);
+  generic_allocator.root()->Free(ptr4);
+  generic_allocator.root()->Free(ptr2);
+  EXPECT_EQ(page1, bucket->active_pages_head);
+
+  // Empty the middle page in the active list.
+  generic_allocator.root()->Free(ptr3);
+  EXPECT_EQ(page1, bucket->active_pages_head);
+
+  // Empty the the first page in the active list -- also the current page.
+  generic_allocator.root()->Free(ptr1);
+
+  // A good choice here is to re-fill the third page since the first two are
+  // empty. We used to fail that.
+  void* ptr7 = generic_allocator.root()->Alloc(size, type_name);
+  EXPECT_EQ(ptr6, ptr7);
+  EXPECT_EQ(page3, bucket->active_pages_head);
+
+  generic_allocator.root()->Free(ptr5);
+  generic_allocator.root()->Free(ptr7);
+}
+
+// Tests the API to purge discardable memory.
+TEST_F(PartitionAllocTest, PurgeDiscardable) {
+  // Free the second of two 4096 byte allocations and then purge.
+  {
+    void* ptr1 = generic_allocator.root()->Alloc(
+        kSystemPageSize - kExtraAllocSize, type_name);
+    char* ptr2 = reinterpret_cast<char*>(generic_allocator.root()->Alloc(
+        kSystemPageSize - kExtraAllocSize, type_name));
+    generic_allocator.root()->Free(ptr2);
+    PartitionPage* page =
+        PartitionPage::FromPointer(PartitionCookieFreePointerAdjust(ptr1));
+    EXPECT_EQ(2u, page->num_unprovisioned_slots);
+    {
+      MockPartitionStatsDumper dumper;
+      generic_allocator.root()->DumpStats("mock_generic_allocator",
+                                          false /* detailed dump */, &dumper);
+      EXPECT_TRUE(dumper.IsMemoryAllocationRecorded());
+
+      const PartitionBucketMemoryStats* stats =
+          dumper.GetBucketStats(kSystemPageSize);
+      EXPECT_TRUE(stats);
+      EXPECT_TRUE(stats->is_valid);
+      EXPECT_EQ(0u, stats->decommittable_bytes);
+      EXPECT_EQ(kSystemPageSize, stats->discardable_bytes);
+      EXPECT_EQ(kSystemPageSize, stats->active_bytes);
+      EXPECT_EQ(2 * kSystemPageSize, stats->resident_bytes);
+    }
+    CHECK_PAGE_IN_CORE(ptr2 - kPointerOffset, true);
+    generic_allocator.root()->PurgeMemory(
+        PartitionPurgeDiscardUnusedSystemPages);
+    CHECK_PAGE_IN_CORE(ptr2 - kPointerOffset, false);
+    EXPECT_EQ(3u, page->num_unprovisioned_slots);
+
+    generic_allocator.root()->Free(ptr1);
+  }
+  // Free the first of two 4096 byte allocations and then purge.
+  {
+    char* ptr1 = reinterpret_cast<char*>(generic_allocator.root()->Alloc(
+        kSystemPageSize - kExtraAllocSize, type_name));
+    void* ptr2 = generic_allocator.root()->Alloc(
+        kSystemPageSize - kExtraAllocSize, type_name);
+    generic_allocator.root()->Free(ptr1);
+    {
+      MockPartitionStatsDumper dumper;
+      generic_allocator.root()->DumpStats("mock_generic_allocator",
+                                          false /* detailed dump */, &dumper);
+      EXPECT_TRUE(dumper.IsMemoryAllocationRecorded());
+
+      const PartitionBucketMemoryStats* stats =
+          dumper.GetBucketStats(kSystemPageSize);
+      EXPECT_TRUE(stats);
+      EXPECT_TRUE(stats->is_valid);
+      EXPECT_EQ(0u, stats->decommittable_bytes);
+#if defined(OS_WIN)
+      EXPECT_EQ(0u, stats->discardable_bytes);
+#else
+      EXPECT_EQ(kSystemPageSize, stats->discardable_bytes);
+#endif
+      EXPECT_EQ(kSystemPageSize, stats->active_bytes);
+      EXPECT_EQ(2 * kSystemPageSize, stats->resident_bytes);
+    }
+    CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset, true);
+    generic_allocator.root()->PurgeMemory(
+        PartitionPurgeDiscardUnusedSystemPages);
+    CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset, false);
+
+    generic_allocator.root()->Free(ptr2);
+  }
+  {
+    constexpr size_t requested_size = 2.25 * kSystemPageSize;
+    char* ptr1 = reinterpret_cast<char*>(generic_allocator.root()->Alloc(
+        requested_size - kExtraAllocSize, type_name));
+    void* ptr2 = generic_allocator.root()->Alloc(
+        requested_size - kExtraAllocSize, type_name);
+    void* ptr3 = generic_allocator.root()->Alloc(
+        requested_size - kExtraAllocSize, type_name);
+    void* ptr4 = generic_allocator.root()->Alloc(
+        requested_size - kExtraAllocSize, type_name);
+    memset(ptr1, 'A', requested_size - kExtraAllocSize);
+    memset(ptr2, 'A', requested_size - kExtraAllocSize);
+    generic_allocator.root()->Free(ptr2);
+    generic_allocator.root()->Free(ptr1);
+    {
+      MockPartitionStatsDumper dumper;
+      generic_allocator.root()->DumpStats("mock_generic_allocator",
+                                          false /* detailed dump */, &dumper);
+      EXPECT_TRUE(dumper.IsMemoryAllocationRecorded());
+
+      const PartitionBucketMemoryStats* stats =
+          dumper.GetBucketStats(requested_size);
+      EXPECT_TRUE(stats);
+      EXPECT_TRUE(stats->is_valid);
+      EXPECT_EQ(0u, stats->decommittable_bytes);
+      EXPECT_EQ(2 * kSystemPageSize, stats->discardable_bytes);
+      EXPECT_EQ(requested_size * 2, stats->active_bytes);
+      EXPECT_EQ(9 * kSystemPageSize, stats->resident_bytes);
+    }
+    CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset, true);
+    CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset + kSystemPageSize, true);
+    CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset + (kSystemPageSize * 2), true);
+    CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset + (kSystemPageSize * 3), true);
+    CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset + (kSystemPageSize * 4), true);
+    generic_allocator.root()->PurgeMemory(
+        PartitionPurgeDiscardUnusedSystemPages);
+    CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset, true);
+    CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset + kSystemPageSize, false);
+    CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset + (kSystemPageSize * 2), true);
+    CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset + (kSystemPageSize * 3), false);
+    CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset + (kSystemPageSize * 4), true);
+
+    generic_allocator.root()->Free(ptr3);
+    generic_allocator.root()->Free(ptr4);
+  }
+
+// When kSystemPageSize = 16384 (as on _MIPS_ARCH_LOONGSON), 64 *
+// kSystemPageSize (see the #else branch below) caused this test to OOM.
+// Therefore, for systems with 16 KiB pages, use 32 * kSystemPageSize.
+//
+// TODO(palmer): Refactor this to branch on page size instead of architecture,
+// for clarity of purpose and for applicability to more architectures.
+#if defined(_MIPS_ARCH_LOONGSON)
+  {
+    char* ptr1 = reinterpret_cast<char*>(PartitionAllocGeneric(
+        generic_allocator.root(), (32 * kSystemPageSize) - kExtraAllocSize,
+        type_name));
+    memset(ptr1, 'A', (32 * kSystemPageSize) - kExtraAllocSize);
+    PartitionFreeGeneric(generic_allocator.root(), ptr1);
+    ptr1 = reinterpret_cast<char*>(PartitionAllocGeneric(
+        generic_allocator.root(), (31 * kSystemPageSize) - kExtraAllocSize,
+        type_name));
+    {
+      MockPartitionStatsDumper dumper;
+      PartitionDumpStatsGeneric(generic_allocator.root(),
+                                "mock_generic_allocator",
+                                false /* detailed dump */, &dumper);
+      EXPECT_TRUE(dumper.IsMemoryAllocationRecorded());
+
+      const PartitionBucketMemoryStats* stats =
+          dumper.GetBucketStats(32 * kSystemPageSize);
+      EXPECT_TRUE(stats);
+      EXPECT_TRUE(stats->is_valid);
+      EXPECT_EQ(0u, stats->decommittable_bytes);
+      EXPECT_EQ(kSystemPageSize, stats->discardable_bytes);
+      EXPECT_EQ(31 * kSystemPageSize, stats->active_bytes);
+      EXPECT_EQ(32 * kSystemPageSize, stats->resident_bytes);
+    }
+    CheckPageInCore(ptr1 - kPointerOffset + (kSystemPageSize * 30), true);
+    CheckPageInCore(ptr1 - kPointerOffset + (kSystemPageSize * 31), true);
+    PartitionPurgeMemoryGeneric(generic_allocator.root(),
+                                PartitionPurgeDiscardUnusedSystemPages);
+    CheckPageInCore(ptr1 - kPointerOffset + (kSystemPageSize * 30), true);
+    CheckPageInCore(ptr1 - kPointerOffset + (kSystemPageSize * 31), false);
+
+    PartitionFreeGeneric(generic_allocator.root(), ptr1);
+  }
+#else
+  {
+    char* ptr1 = reinterpret_cast<char*>(generic_allocator.root()->Alloc(
+        (64 * kSystemPageSize) - kExtraAllocSize, type_name));
+    memset(ptr1, 'A', (64 * kSystemPageSize) - kExtraAllocSize);
+    generic_allocator.root()->Free(ptr1);
+    ptr1 = reinterpret_cast<char*>(generic_allocator.root()->Alloc(
+        (61 * kSystemPageSize) - kExtraAllocSize, type_name));
+    {
+      MockPartitionStatsDumper dumper;
+      generic_allocator.root()->DumpStats("mock_generic_allocator",
+                                          false /* detailed dump */, &dumper);
+      EXPECT_TRUE(dumper.IsMemoryAllocationRecorded());
+
+      const PartitionBucketMemoryStats* stats =
+          dumper.GetBucketStats(64 * kSystemPageSize);
+      EXPECT_TRUE(stats);
+      EXPECT_TRUE(stats->is_valid);
+      EXPECT_EQ(0u, stats->decommittable_bytes);
+      EXPECT_EQ(3 * kSystemPageSize, stats->discardable_bytes);
+      EXPECT_EQ(61 * kSystemPageSize, stats->active_bytes);
+      EXPECT_EQ(64 * kSystemPageSize, stats->resident_bytes);
+    }
+    CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset + (kSystemPageSize * 60), true);
+    CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset + (kSystemPageSize * 61), true);
+    CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset + (kSystemPageSize * 62), true);
+    CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset + (kSystemPageSize * 63), true);
+    generic_allocator.root()->PurgeMemory(
+        PartitionPurgeDiscardUnusedSystemPages);
+    CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset + (kSystemPageSize * 60), true);
+    CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset + (kSystemPageSize * 61), false);
+    CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset + (kSystemPageSize * 62), false);
+    CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset + (kSystemPageSize * 63), false);
+
+    generic_allocator.root()->Free(ptr1);
+  }
+#endif
+  // This sub-test tests truncation of the provisioned slots in a trickier
+  // case where the freelist is rewritten.
+  generic_allocator.root()->PurgeMemory(PartitionPurgeDecommitEmptyPages);
+  {
+    char* ptr1 = reinterpret_cast<char*>(generic_allocator.root()->Alloc(
+        kSystemPageSize - kExtraAllocSize, type_name));
+    void* ptr2 = generic_allocator.root()->Alloc(
+        kSystemPageSize - kExtraAllocSize, type_name);
+    void* ptr3 = generic_allocator.root()->Alloc(
+        kSystemPageSize - kExtraAllocSize, type_name);
+    void* ptr4 = generic_allocator.root()->Alloc(
+        kSystemPageSize - kExtraAllocSize, type_name);
+    ptr1[0] = 'A';
+    ptr1[kSystemPageSize] = 'A';
+    ptr1[kSystemPageSize * 2] = 'A';
+    ptr1[kSystemPageSize * 3] = 'A';
+    PartitionPage* page =
+        PartitionPage::FromPointer(PartitionCookieFreePointerAdjust(ptr1));
+    generic_allocator.root()->Free(ptr2);
+    generic_allocator.root()->Free(ptr4);
+    generic_allocator.root()->Free(ptr1);
+    EXPECT_EQ(0u, page->num_unprovisioned_slots);
+
+    {
+      MockPartitionStatsDumper dumper;
+      generic_allocator.root()->DumpStats("mock_generic_allocator",
+                                          false /* detailed dump */, &dumper);
+      EXPECT_TRUE(dumper.IsMemoryAllocationRecorded());
+
+      const PartitionBucketMemoryStats* stats =
+          dumper.GetBucketStats(kSystemPageSize);
+      EXPECT_TRUE(stats);
+      EXPECT_TRUE(stats->is_valid);
+      EXPECT_EQ(0u, stats->decommittable_bytes);
+#if defined(OS_WIN)
+      EXPECT_EQ(kSystemPageSize, stats->discardable_bytes);
+#else
+      EXPECT_EQ(2 * kSystemPageSize, stats->discardable_bytes);
+#endif
+      EXPECT_EQ(kSystemPageSize, stats->active_bytes);
+      EXPECT_EQ(4 * kSystemPageSize, stats->resident_bytes);
+    }
+    CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset, true);
+    CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset + kSystemPageSize, true);
+    CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset + (kSystemPageSize * 2), true);
+    CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset + (kSystemPageSize * 3), true);
+    generic_allocator.root()->PurgeMemory(
+        PartitionPurgeDiscardUnusedSystemPages);
+    EXPECT_EQ(1u, page->num_unprovisioned_slots);
+    CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset, true);
+    CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset + kSystemPageSize, false);
+    CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset + (kSystemPageSize * 2), true);
+    CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset + (kSystemPageSize * 3), false);
+
+    // Let's check we didn't brick the freelist.
+    void* ptr1b = generic_allocator.root()->Alloc(
+        kSystemPageSize - kExtraAllocSize, type_name);
+    EXPECT_EQ(ptr1, ptr1b);
+    void* ptr2b = generic_allocator.root()->Alloc(
+        kSystemPageSize - kExtraAllocSize, type_name);
+    EXPECT_EQ(ptr2, ptr2b);
+    EXPECT_FALSE(page->freelist_head);
+
+    generic_allocator.root()->Free(ptr1);
+    generic_allocator.root()->Free(ptr2);
+    generic_allocator.root()->Free(ptr3);
+  }
+  // This sub-test is similar, but tests a double-truncation.
+  generic_allocator.root()->PurgeMemory(PartitionPurgeDecommitEmptyPages);
+  {
+    char* ptr1 = reinterpret_cast<char*>(generic_allocator.root()->Alloc(
+        kSystemPageSize - kExtraAllocSize, type_name));
+    void* ptr2 = generic_allocator.root()->Alloc(
+        kSystemPageSize - kExtraAllocSize, type_name);
+    void* ptr3 = generic_allocator.root()->Alloc(
+        kSystemPageSize - kExtraAllocSize, type_name);
+    void* ptr4 = generic_allocator.root()->Alloc(
+        kSystemPageSize - kExtraAllocSize, type_name);
+    ptr1[0] = 'A';
+    ptr1[kSystemPageSize] = 'A';
+    ptr1[kSystemPageSize * 2] = 'A';
+    ptr1[kSystemPageSize * 3] = 'A';
+    PartitionPage* page =
+        PartitionPage::FromPointer(PartitionCookieFreePointerAdjust(ptr1));
+    generic_allocator.root()->Free(ptr4);
+    generic_allocator.root()->Free(ptr3);
+    EXPECT_EQ(0u, page->num_unprovisioned_slots);
+
+    {
+      MockPartitionStatsDumper dumper;
+      generic_allocator.root()->DumpStats("mock_generic_allocator",
+                                          false /* detailed dump */, &dumper);
+      EXPECT_TRUE(dumper.IsMemoryAllocationRecorded());
+
+      const PartitionBucketMemoryStats* stats =
+          dumper.GetBucketStats(kSystemPageSize);
+      EXPECT_TRUE(stats);
+      EXPECT_TRUE(stats->is_valid);
+      EXPECT_EQ(0u, stats->decommittable_bytes);
+      EXPECT_EQ(2 * kSystemPageSize, stats->discardable_bytes);
+      EXPECT_EQ(2 * kSystemPageSize, stats->active_bytes);
+      EXPECT_EQ(4 * kSystemPageSize, stats->resident_bytes);
+    }
+    CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset, true);
+    CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset + kSystemPageSize, true);
+    CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset + (kSystemPageSize * 2), true);
+    CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset + (kSystemPageSize * 3), true);
+    generic_allocator.root()->PurgeMemory(
+        PartitionPurgeDiscardUnusedSystemPages);
+    EXPECT_EQ(2u, page->num_unprovisioned_slots);
+    CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset, true);
+    CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset + kSystemPageSize, true);
+    CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset + (kSystemPageSize * 2), false);
+    CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset + (kSystemPageSize * 3), false);
+
+    EXPECT_FALSE(page->freelist_head);
+
+    generic_allocator.root()->Free(ptr1);
+    generic_allocator.root()->Free(ptr2);
+  }
+}
+
+TEST_F(PartitionAllocTest, ReallocMovesCookies) {
+  // Resize so as to be sure to hit a "resize in place" case, and ensure that
+  // use of the entire result is compatible with the debug mode's cookies, even
+  // when the bucket size is large enough to span more than one partition page
+  // and we can track the "raw" size. See https://crbug.com/709271
+  static constexpr size_t kSize =
+      base::kMaxSystemPagesPerSlotSpan * base::kSystemPageSize;
+  void* ptr = generic_allocator.root()->Alloc(kSize + 1, type_name);
+  EXPECT_TRUE(ptr);
+
+  memset(ptr, 0xbd, kSize + 1);
+  ptr = generic_allocator.root()->Realloc(ptr, kSize + 2, type_name);
+  EXPECT_TRUE(ptr);
+
+  memset(ptr, 0xbd, kSize + 2);
+  generic_allocator.root()->Free(ptr);
+}
+
+TEST_F(PartitionAllocTest, SmallReallocDoesNotMoveTrailingCookie) {
+  // For crbug.com/781473
+  static constexpr size_t kSize = 264;
+  void* ptr = generic_allocator.root()->Alloc(kSize, type_name);
+  EXPECT_TRUE(ptr);
+
+  ptr = generic_allocator.root()->Realloc(ptr, kSize + 16, type_name);
+  EXPECT_TRUE(ptr);
+
+  generic_allocator.root()->Free(ptr);
+}
+
+}  // namespace internal
+}  // namespace base
+
+#endif  // !defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
diff --git a/base/allocator/partition_allocator/partition_bucket.cc b/base/allocator/partition_allocator/partition_bucket.cc
new file mode 100644
index 0000000..fcea523
--- /dev/null
+++ b/base/allocator/partition_allocator/partition_bucket.cc
@@ -0,0 +1,554 @@
+// Copyright (c) 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/allocator/partition_allocator/partition_bucket.h"
+#include "base/allocator/partition_allocator/oom.h"
+#include "base/allocator/partition_allocator/page_allocator.h"
+#include "base/allocator/partition_allocator/partition_alloc_constants.h"
+#include "base/allocator/partition_allocator/partition_direct_map_extent.h"
+#include "base/allocator/partition_allocator/partition_oom.h"
+#include "base/allocator/partition_allocator/partition_page.h"
+#include "base/allocator/partition_allocator/partition_root_base.h"
+#include "build/build_config.h"
+
+namespace base {
+namespace internal {
+
+namespace {
+
+ALWAYS_INLINE PartitionPage* PartitionDirectMap(PartitionRootBase* root,
+                                                int flags,
+                                                size_t raw_size) {
+  size_t size = PartitionBucket::get_direct_map_size(raw_size);
+
+  // Because we need to fake looking like a super page, we need to allocate
+  // a bunch of system pages more than "size":
+  // - The first few system pages are the partition page in which the super
+  // page metadata is stored. We fault just one system page out of a partition
+  // page sized clump.
+  // - We add a trailing guard page on 32-bit (on 64-bit we rely on the
+  // massive address space plus randomization instead).
+  size_t map_size = size + kPartitionPageSize;
+#if !defined(ARCH_CPU_64_BITS)
+  map_size += kSystemPageSize;
+#endif
+  // Round up to the allocation granularity.
+  map_size += kPageAllocationGranularityOffsetMask;
+  map_size &= kPageAllocationGranularityBaseMask;
+
+  // TODO: these pages will be zero-filled. Consider internalizing an
+  // allocZeroed() API so we can avoid a memset() entirely in this case.
+  char* ptr = reinterpret_cast<char*>(
+      AllocPages(nullptr, map_size, kSuperPageSize, PageReadWrite));
+  if (UNLIKELY(!ptr))
+    return nullptr;
+
+  size_t committed_page_size = size + kSystemPageSize;
+  root->total_size_of_direct_mapped_pages += committed_page_size;
+  root->IncreaseCommittedPages(committed_page_size);
+
+  char* slot = ptr + kPartitionPageSize;
+  CHECK(SetSystemPagesAccess(ptr + (kSystemPageSize * 2),
+                             kPartitionPageSize - (kSystemPageSize * 2),
+                             PageInaccessible));
+#if !defined(ARCH_CPU_64_BITS)
+  CHECK(SetSystemPagesAccess(ptr, kSystemPageSize, PageInaccessible));
+  CHECK(SetSystemPagesAccess(slot + size, kSystemPageSize, PageInaccessible));
+#endif
+
+  PartitionSuperPageExtentEntry* extent =
+      reinterpret_cast<PartitionSuperPageExtentEntry*>(
+          PartitionSuperPageToMetadataArea(ptr));
+  extent->root = root;
+  // The new structures are all located inside a fresh system page so they
+  // will all be zeroed out. These DCHECKs are for documentation.
+  DCHECK(!extent->super_page_base);
+  DCHECK(!extent->super_pages_end);
+  DCHECK(!extent->next);
+  PartitionPage* page = PartitionPage::FromPointerNoAlignmentCheck(slot);
+  PartitionBucket* bucket = reinterpret_cast<PartitionBucket*>(
+      reinterpret_cast<char*>(page) + (kPageMetadataSize * 2));
+  DCHECK(!page->next_page);
+  DCHECK(!page->num_allocated_slots);
+  DCHECK(!page->num_unprovisioned_slots);
+  DCHECK(!page->page_offset);
+  DCHECK(!page->empty_cache_index);
+  page->bucket = bucket;
+  page->freelist_head = reinterpret_cast<PartitionFreelistEntry*>(slot);
+  PartitionFreelistEntry* next_entry =
+      reinterpret_cast<PartitionFreelistEntry*>(slot);
+  next_entry->next = PartitionFreelistEntry::Transform(nullptr);
+
+  DCHECK(!bucket->active_pages_head);
+  DCHECK(!bucket->empty_pages_head);
+  DCHECK(!bucket->decommitted_pages_head);
+  DCHECK(!bucket->num_system_pages_per_slot_span);
+  DCHECK(!bucket->num_full_pages);
+  bucket->slot_size = size;
+
+  PartitionDirectMapExtent* map_extent =
+      PartitionDirectMapExtent::FromPage(page);
+  map_extent->map_size = map_size - kPartitionPageSize - kSystemPageSize;
+  map_extent->bucket = bucket;
+
+  // Maintain the doubly-linked list of all direct mappings.
+  map_extent->next_extent = root->direct_map_list;
+  if (map_extent->next_extent)
+    map_extent->next_extent->prev_extent = map_extent;
+  map_extent->prev_extent = nullptr;
+  root->direct_map_list = map_extent;
+
+  return page;
+}
+
+}  // namespace
+
+// static
+PartitionBucket PartitionBucket::sentinel_bucket_;
+
+PartitionBucket* PartitionBucket::get_sentinel_bucket() {
+  return &sentinel_bucket_;
+}
+
+// TODO(ajwong): This seems to interact badly with
+// get_pages_per_slot_span() which rounds the value from this up to a
+// multiple of kNumSystemPagesPerPartitionPage (aka 4) anyways.
+// http://crbug.com/776537
+//
+// TODO(ajwong): The waste calculation seems wrong. The PTE usage should cover
+// both used and unsed pages.
+// http://crbug.com/776537
+uint8_t PartitionBucket::get_system_pages_per_slot_span() {
+  // This works out reasonably for the current bucket sizes of the generic
+  // allocator, and the current values of partition page size and constants.
+  // Specifically, we have enough room to always pack the slots perfectly into
+  // some number of system pages. The only waste is the waste associated with
+  // unfaulted pages (i.e. wasted address space).
+  // TODO: we end up using a lot of system pages for very small sizes. For
+  // example, we'll use 12 system pages for slot size 24. The slot size is
+  // so small that the waste would be tiny with just 4, or 1, system pages.
+  // Later, we can investigate whether there are anti-fragmentation benefits
+  // to using fewer system pages.
+  double best_waste_ratio = 1.0f;
+  uint16_t best_pages = 0;
+  if (this->slot_size > kMaxSystemPagesPerSlotSpan * kSystemPageSize) {
+    // TODO(ajwong): Why is there a DCHECK here for this?
+    // http://crbug.com/776537
+    DCHECK(!(this->slot_size % kSystemPageSize));
+    best_pages = static_cast<uint16_t>(this->slot_size / kSystemPageSize);
+    // TODO(ajwong): Should this be checking against
+    // kMaxSystemPagesPerSlotSpan or numeric_limits<uint8_t>::max?
+    // http://crbug.com/776537
+    CHECK(best_pages < (1 << 8));
+    return static_cast<uint8_t>(best_pages);
+  }
+  DCHECK(this->slot_size <= kMaxSystemPagesPerSlotSpan * kSystemPageSize);
+  for (uint16_t i = kNumSystemPagesPerPartitionPage - 1;
+       i <= kMaxSystemPagesPerSlotSpan; ++i) {
+    size_t page_size = kSystemPageSize * i;
+    size_t num_slots = page_size / this->slot_size;
+    size_t waste = page_size - (num_slots * this->slot_size);
+    // Leaving a page unfaulted is not free; the page will occupy an empty page
+    // table entry.  Make a simple attempt to account for that.
+    //
+    // TODO(ajwong): This looks wrong. PTEs are allocated for all pages
+    // regardless of whether or not they are wasted. Should it just
+    // be waste += i * sizeof(void*)?
+    // http://crbug.com/776537
+    size_t num_remainder_pages = i & (kNumSystemPagesPerPartitionPage - 1);
+    size_t num_unfaulted_pages =
+        num_remainder_pages
+            ? (kNumSystemPagesPerPartitionPage - num_remainder_pages)
+            : 0;
+    waste += sizeof(void*) * num_unfaulted_pages;
+    double waste_ratio = (double)waste / (double)page_size;
+    if (waste_ratio < best_waste_ratio) {
+      best_waste_ratio = waste_ratio;
+      best_pages = i;
+    }
+  }
+  DCHECK(best_pages > 0);
+  CHECK(best_pages <= kMaxSystemPagesPerSlotSpan);
+  return static_cast<uint8_t>(best_pages);
+}
+
+void PartitionBucket::Init(uint32_t new_slot_size) {
+  slot_size = new_slot_size;
+  active_pages_head = PartitionPage::get_sentinel_page();
+  empty_pages_head = nullptr;
+  decommitted_pages_head = nullptr;
+  num_full_pages = 0;
+  num_system_pages_per_slot_span = get_system_pages_per_slot_span();
+}
+
+NOINLINE void PartitionBucket::OnFull() {
+  OOM_CRASH();
+}
+
+ALWAYS_INLINE void* PartitionBucket::AllocNewSlotSpan(
+    PartitionRootBase* root,
+    int flags,
+    uint16_t num_partition_pages) {
+  DCHECK(!(reinterpret_cast<uintptr_t>(root->next_partition_page) %
+           kPartitionPageSize));
+  DCHECK(!(reinterpret_cast<uintptr_t>(root->next_partition_page_end) %
+           kPartitionPageSize));
+  DCHECK(num_partition_pages <= kNumPartitionPagesPerSuperPage);
+  size_t total_size = kPartitionPageSize * num_partition_pages;
+  size_t num_partition_pages_left =
+      (root->next_partition_page_end - root->next_partition_page) >>
+      kPartitionPageShift;
+  if (LIKELY(num_partition_pages_left >= num_partition_pages)) {
+    // In this case, we can still hand out pages from the current super page
+    // allocation.
+    char* ret = root->next_partition_page;
+
+    // Fresh System Pages in the SuperPages are decommited. Commit them
+    // before vending them back.
+    CHECK(SetSystemPagesAccess(ret, total_size, PageReadWrite));
+
+    root->next_partition_page += total_size;
+    root->IncreaseCommittedPages(total_size);
+    return ret;
+  }
+
+  // Need a new super page. We want to allocate super pages in a continguous
+  // address region as much as possible. This is important for not causing
+  // page table bloat and not fragmenting address spaces in 32 bit
+  // architectures.
+  char* requestedAddress = root->next_super_page;
+  char* super_page = reinterpret_cast<char*>(AllocPages(
+      requestedAddress, kSuperPageSize, kSuperPageSize, PageReadWrite));
+  if (UNLIKELY(!super_page))
+    return nullptr;
+
+  root->total_size_of_super_pages += kSuperPageSize;
+  root->IncreaseCommittedPages(total_size);
+
+  // |total_size| MUST be less than kSuperPageSize - (kPartitionPageSize*2).
+  // This is a trustworthy value because num_partition_pages is not user
+  // controlled.
+  //
+  // TODO(ajwong): Introduce a DCHECK.
+  root->next_super_page = super_page + kSuperPageSize;
+  char* ret = super_page + kPartitionPageSize;
+  root->next_partition_page = ret + total_size;
+  root->next_partition_page_end = root->next_super_page - kPartitionPageSize;
+  // Make the first partition page in the super page a guard page, but leave a
+  // hole in the middle.
+  // This is where we put page metadata and also a tiny amount of extent
+  // metadata.
+  CHECK(SetSystemPagesAccess(super_page, kSystemPageSize, PageInaccessible));
+  CHECK(SetSystemPagesAccess(super_page + (kSystemPageSize * 2),
+                             kPartitionPageSize - (kSystemPageSize * 2),
+                             PageInaccessible));
+  //  CHECK(SetSystemPagesAccess(super_page + (kSuperPageSize -
+  //  kPartitionPageSize),
+  //                             kPartitionPageSize, PageInaccessible));
+  // All remaining slotspans for the unallocated PartitionPages inside the
+  // SuperPage are conceptually decommitted. Correctly set the state here
+  // so they do not occupy resources.
+  //
+  // TODO(ajwong): Refactor Page Allocator API so the SuperPage comes in
+  // decommited initially.
+  CHECK(SetSystemPagesAccess(super_page + kPartitionPageSize + total_size,
+                             (kSuperPageSize - kPartitionPageSize - total_size),
+                             PageInaccessible));
+
+  // If we were after a specific address, but didn't get it, assume that
+  // the system chose a lousy address. Here most OS'es have a default
+  // algorithm that isn't randomized. For example, most Linux
+  // distributions will allocate the mapping directly before the last
+  // successful mapping, which is far from random. So we just get fresh
+  // randomness for the next mapping attempt.
+  if (requestedAddress && requestedAddress != super_page)
+    root->next_super_page = nullptr;
+
+  // We allocated a new super page so update super page metadata.
+  // First check if this is a new extent or not.
+  PartitionSuperPageExtentEntry* latest_extent =
+      reinterpret_cast<PartitionSuperPageExtentEntry*>(
+          PartitionSuperPageToMetadataArea(super_page));
+  // By storing the root in every extent metadata object, we have a fast way
+  // to go from a pointer within the partition to the root object.
+  latest_extent->root = root;
+  // Most new extents will be part of a larger extent, and these three fields
+  // are unused, but we initialize them to 0 so that we get a clear signal
+  // in case they are accidentally used.
+  latest_extent->super_page_base = nullptr;
+  latest_extent->super_pages_end = nullptr;
+  latest_extent->next = nullptr;
+
+  PartitionSuperPageExtentEntry* current_extent = root->current_extent;
+  bool isNewExtent = (super_page != requestedAddress);
+  if (UNLIKELY(isNewExtent)) {
+    if (UNLIKELY(!current_extent)) {
+      DCHECK(!root->first_extent);
+      root->first_extent = latest_extent;
+    } else {
+      DCHECK(current_extent->super_page_base);
+      current_extent->next = latest_extent;
+    }
+    root->current_extent = latest_extent;
+    latest_extent->super_page_base = super_page;
+    latest_extent->super_pages_end = super_page + kSuperPageSize;
+  } else {
+    // We allocated next to an existing extent so just nudge the size up a
+    // little.
+    DCHECK(current_extent->super_pages_end);
+    current_extent->super_pages_end += kSuperPageSize;
+    DCHECK(ret >= current_extent->super_page_base &&
+           ret < current_extent->super_pages_end);
+  }
+  return ret;
+}
+
+ALWAYS_INLINE uint16_t PartitionBucket::get_pages_per_slot_span() {
+  // Rounds up to nearest multiple of kNumSystemPagesPerPartitionPage.
+  return (num_system_pages_per_slot_span +
+          (kNumSystemPagesPerPartitionPage - 1)) /
+         kNumSystemPagesPerPartitionPage;
+}
+
+ALWAYS_INLINE void PartitionBucket::InitializeSlotSpan(PartitionPage* page) {
+  // The bucket never changes. We set it up once.
+  page->bucket = this;
+  page->empty_cache_index = -1;
+
+  page->Reset();
+
+  // If this page has just a single slot, do not set up page offsets for any
+  // page metadata other than the first one. This ensures that attempts to
+  // touch invalid page metadata fail.
+  if (page->num_unprovisioned_slots == 1)
+    return;
+
+  uint16_t num_partition_pages = get_pages_per_slot_span();
+  char* page_char_ptr = reinterpret_cast<char*>(page);
+  for (uint16_t i = 1; i < num_partition_pages; ++i) {
+    page_char_ptr += kPageMetadataSize;
+    PartitionPage* secondary_page =
+        reinterpret_cast<PartitionPage*>(page_char_ptr);
+    secondary_page->page_offset = i;
+  }
+}
+
+ALWAYS_INLINE char* PartitionBucket::AllocAndFillFreelist(PartitionPage* page) {
+  DCHECK(page != PartitionPage::get_sentinel_page());
+  uint16_t num_slots = page->num_unprovisioned_slots;
+  DCHECK(num_slots);
+  // We should only get here when _every_ slot is either used or unprovisioned.
+  // (The third state is "on the freelist". If we have a non-empty freelist, we
+  // should not get here.)
+  DCHECK(num_slots + page->num_allocated_slots == this->get_slots_per_span());
+  // Similarly, make explicitly sure that the freelist is empty.
+  DCHECK(!page->freelist_head);
+  DCHECK(page->num_allocated_slots >= 0);
+
+  size_t size = this->slot_size;
+  char* base = reinterpret_cast<char*>(PartitionPage::ToPointer(page));
+  char* return_object = base + (size * page->num_allocated_slots);
+  char* first_freelist_pointer = return_object + size;
+  char* first_freelist_pointer_extent =
+      first_freelist_pointer + sizeof(PartitionFreelistEntry*);
+  // Our goal is to fault as few system pages as possible. We calculate the
+  // page containing the "end" of the returned slot, and then allow freelist
+  // pointers to be written up to the end of that page.
+  char* sub_page_limit = reinterpret_cast<char*>(
+      RoundUpToSystemPage(reinterpret_cast<size_t>(first_freelist_pointer)));
+  char* slots_limit = return_object + (size * num_slots);
+  char* freelist_limit = sub_page_limit;
+  if (UNLIKELY(slots_limit < freelist_limit))
+    freelist_limit = slots_limit;
+
+  uint16_t num_new_freelist_entries = 0;
+  if (LIKELY(first_freelist_pointer_extent <= freelist_limit)) {
+    // Only consider used space in the slot span. If we consider wasted
+    // space, we may get an off-by-one when a freelist pointer fits in the
+    // wasted space, but a slot does not.
+    // We know we can fit at least one freelist pointer.
+    num_new_freelist_entries = 1;
+    // Any further entries require space for the whole slot span.
+    num_new_freelist_entries += static_cast<uint16_t>(
+        (freelist_limit - first_freelist_pointer_extent) / size);
+  }
+
+  // We always return an object slot -- that's the +1 below.
+  // We do not neccessarily create any new freelist entries, because we cross
+  // sub page boundaries frequently for large bucket sizes.
+  DCHECK(num_new_freelist_entries + 1 <= num_slots);
+  num_slots -= (num_new_freelist_entries + 1);
+  page->num_unprovisioned_slots = num_slots;
+  page->num_allocated_slots++;
+
+  if (LIKELY(num_new_freelist_entries)) {
+    char* freelist_pointer = first_freelist_pointer;
+    PartitionFreelistEntry* entry =
+        reinterpret_cast<PartitionFreelistEntry*>(freelist_pointer);
+    page->freelist_head = entry;
+    while (--num_new_freelist_entries) {
+      freelist_pointer += size;
+      PartitionFreelistEntry* next_entry =
+          reinterpret_cast<PartitionFreelistEntry*>(freelist_pointer);
+      entry->next = PartitionFreelistEntry::Transform(next_entry);
+      entry = next_entry;
+    }
+    entry->next = PartitionFreelistEntry::Transform(nullptr);
+  } else {
+    page->freelist_head = nullptr;
+  }
+  return return_object;
+}
+
+bool PartitionBucket::SetNewActivePage() {
+  PartitionPage* page = this->active_pages_head;
+  if (page == PartitionPage::get_sentinel_page())
+    return false;
+
+  PartitionPage* next_page;
+
+  for (; page; page = next_page) {
+    next_page = page->next_page;
+    DCHECK(page->bucket == this);
+    DCHECK(page != this->empty_pages_head);
+    DCHECK(page != this->decommitted_pages_head);
+
+    if (LIKELY(page->is_active())) {
+      // This page is usable because it has freelist entries, or has
+      // unprovisioned slots we can create freelist entries from.
+      this->active_pages_head = page;
+      return true;
+    }
+
+    // Deal with empty and decommitted pages.
+    if (LIKELY(page->is_empty())) {
+      page->next_page = this->empty_pages_head;
+      this->empty_pages_head = page;
+    } else if (LIKELY(page->is_decommitted())) {
+      page->next_page = this->decommitted_pages_head;
+      this->decommitted_pages_head = page;
+    } else {
+      DCHECK(page->is_full());
+      // If we get here, we found a full page. Skip over it too, and also
+      // tag it as full (via a negative value). We need it tagged so that
+      // free'ing can tell, and move it back into the active page list.
+      page->num_allocated_slots = -page->num_allocated_slots;
+      ++this->num_full_pages;
+      // num_full_pages is a uint16_t for efficient packing so guard against
+      // overflow to be safe.
+      if (UNLIKELY(!this->num_full_pages))
+        OnFull();
+      // Not necessary but might help stop accidents.
+      page->next_page = nullptr;
+    }
+  }
+
+  this->active_pages_head = PartitionPage::get_sentinel_page();
+  return false;
+}
+
+void* PartitionBucket::SlowPathAlloc(PartitionRootBase* root,
+                                     int flags,
+                                     size_t size) {
+  // The slow path is called when the freelist is empty.
+  DCHECK(!this->active_pages_head->freelist_head);
+
+  PartitionPage* new_page = nullptr;
+
+  // For the PartitionRootGeneric::Alloc() API, we have a bunch of buckets
+  // marked as special cases. We bounce them through to the slow path so that
+  // we can still have a blazing fast hot path due to lack of corner-case
+  // branches.
+  //
+  // Note: The ordering of the conditionals matter! In particular,
+  // SetNewActivePage() has a side-effect even when returning
+  // false where it sweeps the active page list and may move things into
+  // the empty or decommitted lists which affects the subsequent conditional.
+  bool return_null = flags & PartitionAllocReturnNull;
+  if (UNLIKELY(this->is_direct_mapped())) {
+    DCHECK(size > kGenericMaxBucketed);
+    DCHECK(this == get_sentinel_bucket());
+    DCHECK(this->active_pages_head == PartitionPage::get_sentinel_page());
+    if (size > kGenericMaxDirectMapped) {
+      if (return_null)
+        return nullptr;
+      PartitionExcessiveAllocationSize();
+    }
+    new_page = PartitionDirectMap(root, flags, size);
+  } else if (LIKELY(this->SetNewActivePage())) {
+    // First, did we find an active page in the active pages list?
+    new_page = this->active_pages_head;
+    DCHECK(new_page->is_active());
+  } else if (LIKELY(this->empty_pages_head != nullptr) ||
+             LIKELY(this->decommitted_pages_head != nullptr)) {
+    // Second, look in our lists of empty and decommitted pages.
+    // Check empty pages first, which are preferred, but beware that an
+    // empty page might have been decommitted.
+    while (LIKELY((new_page = this->empty_pages_head) != nullptr)) {
+      DCHECK(new_page->bucket == this);
+      DCHECK(new_page->is_empty() || new_page->is_decommitted());
+      this->empty_pages_head = new_page->next_page;
+      // Accept the empty page unless it got decommitted.
+      if (new_page->freelist_head) {
+        new_page->next_page = nullptr;
+        break;
+      }
+      DCHECK(new_page->is_decommitted());
+      new_page->next_page = this->decommitted_pages_head;
+      this->decommitted_pages_head = new_page;
+    }
+    if (UNLIKELY(!new_page) &&
+        LIKELY(this->decommitted_pages_head != nullptr)) {
+      new_page = this->decommitted_pages_head;
+      DCHECK(new_page->bucket == this);
+      DCHECK(new_page->is_decommitted());
+      this->decommitted_pages_head = new_page->next_page;
+      void* addr = PartitionPage::ToPointer(new_page);
+      root->RecommitSystemPages(addr, new_page->bucket->get_bytes_per_span());
+      new_page->Reset();
+    }
+    DCHECK(new_page);
+  } else {
+    // Third. If we get here, we need a brand new page.
+    uint16_t num_partition_pages = this->get_pages_per_slot_span();
+    void* rawPages = AllocNewSlotSpan(root, flags, num_partition_pages);
+    if (LIKELY(rawPages != nullptr)) {
+      new_page = PartitionPage::FromPointerNoAlignmentCheck(rawPages);
+      InitializeSlotSpan(new_page);
+    }
+  }
+
+  // Bail if we had a memory allocation failure.
+  if (UNLIKELY(!new_page)) {
+    DCHECK(this->active_pages_head == PartitionPage::get_sentinel_page());
+    if (return_null)
+      return nullptr;
+    root->OutOfMemory();
+  }
+
+  // TODO(ajwong): Is there a way to avoid the reading of bucket here?
+  // It seems like in many of the conditional branches above, |this| ==
+  // |new_page->bucket|. Maybe pull this into another function?
+  PartitionBucket* bucket = new_page->bucket;
+  DCHECK(bucket != get_sentinel_bucket());
+  bucket->active_pages_head = new_page;
+  new_page->set_raw_size(size);
+
+  // If we found an active page with free slots, or an empty page, we have a
+  // usable freelist head.
+  if (LIKELY(new_page->freelist_head != nullptr)) {
+    PartitionFreelistEntry* entry = new_page->freelist_head;
+    PartitionFreelistEntry* new_head =
+        PartitionFreelistEntry::Transform(entry->next);
+    new_page->freelist_head = new_head;
+    new_page->num_allocated_slots++;
+    return entry;
+  }
+  // Otherwise, we need to build the freelist.
+  DCHECK(new_page->num_unprovisioned_slots);
+  return AllocAndFillFreelist(new_page);
+}
+
+}  // namespace internal
+}  // namespace base
diff --git a/base/allocator/partition_allocator/partition_bucket.h b/base/allocator/partition_allocator/partition_bucket.h
new file mode 100644
index 0000000..a626dfa
--- /dev/null
+++ b/base/allocator/partition_allocator/partition_bucket.h
@@ -0,0 +1,121 @@
+// Copyright (c) 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_BUCKET_H_
+#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_BUCKET_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include "base/allocator/partition_allocator/partition_alloc_constants.h"
+#include "base/base_export.h"
+#include "base/compiler_specific.h"
+
+namespace base {
+namespace internal {
+
+struct PartitionPage;
+struct PartitionRootBase;
+
+struct PartitionBucket {
+  // Accessed most in hot path => goes first.
+  PartitionPage* active_pages_head;
+
+  PartitionPage* empty_pages_head;
+  PartitionPage* decommitted_pages_head;
+  uint32_t slot_size;
+  uint32_t num_system_pages_per_slot_span : 8;
+  uint32_t num_full_pages : 24;
+
+  // Public API.
+  void Init(uint32_t new_slot_size);
+
+  // Note the matching Free() functions are in PartitionPage.
+  BASE_EXPORT NOINLINE void* SlowPathAlloc(PartitionRootBase* root,
+                                           int flags,
+                                           size_t size);
+
+  ALWAYS_INLINE bool is_direct_mapped() const {
+    return !num_system_pages_per_slot_span;
+  }
+  ALWAYS_INLINE size_t get_bytes_per_span() const {
+    // TODO(ajwong): Change to CheckedMul. https://crbug.com/787153
+    // https://crbug.com/680657
+    return num_system_pages_per_slot_span * kSystemPageSize;
+  }
+  ALWAYS_INLINE uint16_t get_slots_per_span() const {
+    // TODO(ajwong): Change to CheckedMul. https://crbug.com/787153
+    // https://crbug.com/680657
+    return static_cast<uint16_t>(get_bytes_per_span() / slot_size);
+  }
+
+  static ALWAYS_INLINE size_t get_direct_map_size(size_t size) {
+    // Caller must check that the size is not above the kGenericMaxDirectMapped
+    // limit before calling. This also guards against integer overflow in the
+    // calculation here.
+    DCHECK(size <= kGenericMaxDirectMapped);
+    return (size + kSystemPageOffsetMask) & kSystemPageBaseMask;
+  }
+
+  // TODO(ajwong): Can this be made private?  https://crbug.com/787153
+  static PartitionBucket* get_sentinel_bucket();
+
+  // This helper function scans a bucket's active page list for a suitable new
+  // active page.  When it finds a suitable new active page (one that has
+  // free slots and is not empty), it is set as the new active page. If there
+  // is no suitable new active page, the current active page is set to
+  // PartitionPage::get_sentinel_page(). As potential pages are scanned, they
+  // are tidied up according to their state. Empty pages are swept on to the
+  // empty page list, decommitted pages on to the decommitted page list and full
+  // pages are unlinked from any list.
+  //
+  // This is where the guts of the bucket maintenance is done!
+  bool SetNewActivePage();
+
+ private:
+  static void OutOfMemory(const PartitionRootBase* root);
+  static void OutOfMemoryWithLotsOfUncommitedPages();
+
+  static NOINLINE void OnFull();
+
+  // Returns a natural number of PartitionPages (calculated by
+  // get_system_pages_per_slot_span()) to allocate from the current
+  // SuperPage when the bucket runs out of slots.
+  ALWAYS_INLINE uint16_t get_pages_per_slot_span();
+
+  // Returns the number of system pages in a slot span.
+  //
+  // The calculation attemps to find the best number of System Pages to
+  // allocate for the given slot_size to minimize wasted space. It uses a
+  // heuristic that looks at number of bytes wasted after the last slot and
+  // attempts to account for the PTE usage of each System Page.
+  uint8_t get_system_pages_per_slot_span();
+
+  // Allocates a new slot span with size |num_partition_pages| from the
+  // current extent. Metadata within this slot span will be uninitialized.
+  // Returns nullptr on error.
+  ALWAYS_INLINE void* AllocNewSlotSpan(PartitionRootBase* root,
+                                       int flags,
+                                       uint16_t num_partition_pages);
+
+  // Each bucket allocates a slot span when it runs out of slots.
+  // A slot span's size is equal to get_pages_per_slot_span() number of
+  // PartitionPages. This function initializes all PartitionPage within the
+  // span to point to the first PartitionPage which holds all the metadata
+  // for the span and registers this bucket as the owner of the span. It does
+  // NOT put the slots into the bucket's freelist.
+  ALWAYS_INLINE void InitializeSlotSpan(PartitionPage* page);
+
+  // Allocates one slot from the given |page| and then adds the remainder to
+  // the current bucket. If the |page| was freshly allocated, it must have been
+  // passed through InitializeSlotSpan() first.
+  ALWAYS_INLINE char* AllocAndFillFreelist(PartitionPage* page);
+
+  static PartitionBucket sentinel_bucket_;
+};
+
+}  // namespace internal
+}  // namespace base
+
+#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_BUCKET_H_
diff --git a/base/allocator/partition_allocator/partition_cookie.h b/base/allocator/partition_allocator/partition_cookie.h
new file mode 100644
index 0000000..8e6cb20
--- /dev/null
+++ b/base/allocator/partition_allocator/partition_cookie.h
@@ -0,0 +1,72 @@
+// Copyright (c) 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_COOKIE_H_
+#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_COOKIE_H_
+
+#include "base/compiler_specific.h"
+#include "base/logging.h"
+
+namespace base {
+namespace internal {
+
+#if DCHECK_IS_ON()
+// These two byte values match tcmalloc.
+static const unsigned char kUninitializedByte = 0xAB;
+static const unsigned char kFreedByte = 0xCD;
+static const size_t kCookieSize =
+    16;  // Handles alignment up to XMM instructions on Intel.
+static const unsigned char kCookieValue[kCookieSize] = {
+    0xDE, 0xAD, 0xBE, 0xEF, 0xCA, 0xFE, 0xD0, 0x0D,
+    0x13, 0x37, 0xF0, 0x05, 0xBA, 0x11, 0xAB, 0x1E};
+#endif
+
+ALWAYS_INLINE void PartitionCookieCheckValue(void* ptr) {
+#if DCHECK_IS_ON()
+  unsigned char* cookie_ptr = reinterpret_cast<unsigned char*>(ptr);
+  for (size_t i = 0; i < kCookieSize; ++i, ++cookie_ptr)
+    DCHECK(*cookie_ptr == kCookieValue[i]);
+#endif
+}
+
+ALWAYS_INLINE size_t PartitionCookieSizeAdjustAdd(size_t size) {
+#if DCHECK_IS_ON()
+  // Add space for cookies, checking for integer overflow. TODO(palmer):
+  // Investigate the performance and code size implications of using
+  // CheckedNumeric throughout PA.
+  DCHECK(size + (2 * kCookieSize) > size);
+  size += 2 * kCookieSize;
+#endif
+  return size;
+}
+
+ALWAYS_INLINE void* PartitionCookieFreePointerAdjust(void* ptr) {
+#if DCHECK_IS_ON()
+  // The value given to the application is actually just after the cookie.
+  ptr = static_cast<char*>(ptr) - kCookieSize;
+#endif
+  return ptr;
+}
+
+ALWAYS_INLINE size_t PartitionCookieSizeAdjustSubtract(size_t size) {
+#if DCHECK_IS_ON()
+  // Remove space for cookies.
+  DCHECK(size >= 2 * kCookieSize);
+  size -= 2 * kCookieSize;
+#endif
+  return size;
+}
+
+ALWAYS_INLINE void PartitionCookieWriteValue(void* ptr) {
+#if DCHECK_IS_ON()
+  unsigned char* cookie_ptr = reinterpret_cast<unsigned char*>(ptr);
+  for (size_t i = 0; i < kCookieSize; ++i, ++cookie_ptr)
+    *cookie_ptr = kCookieValue[i];
+#endif
+}
+
+}  // namespace internal
+}  // namespace base
+
+#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_COOKIE_H_
diff --git a/base/allocator/partition_allocator/partition_direct_map_extent.h b/base/allocator/partition_allocator/partition_direct_map_extent.h
new file mode 100644
index 0000000..2a0bb19
--- /dev/null
+++ b/base/allocator/partition_allocator/partition_direct_map_extent.h
@@ -0,0 +1,33 @@
+// Copyright (c) 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_DIRECT_MAP_EXTENT_H_
+#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_DIRECT_MAP_EXTENT_H_
+
+#include "base/allocator/partition_allocator/partition_bucket.h"
+#include "base/allocator/partition_allocator/partition_page.h"
+
+namespace base {
+namespace internal {
+
+struct PartitionDirectMapExtent {
+  PartitionDirectMapExtent* next_extent;
+  PartitionDirectMapExtent* prev_extent;
+  PartitionBucket* bucket;
+  size_t map_size;  // Mapped size, not including guard pages and meta-data.
+
+  ALWAYS_INLINE static PartitionDirectMapExtent* FromPage(PartitionPage* page);
+};
+
+ALWAYS_INLINE PartitionDirectMapExtent* PartitionDirectMapExtent::FromPage(
+    PartitionPage* page) {
+  DCHECK(page->bucket->is_direct_mapped());
+  return reinterpret_cast<PartitionDirectMapExtent*>(
+      reinterpret_cast<char*>(page) + 3 * kPageMetadataSize);
+}
+
+}  // namespace internal
+}  // namespace base
+
+#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_DIRECT_MAP_EXTENT_H_
diff --git a/base/allocator/partition_allocator/partition_freelist_entry.h b/base/allocator/partition_allocator/partition_freelist_entry.h
new file mode 100644
index 0000000..7e3282e
--- /dev/null
+++ b/base/allocator/partition_allocator/partition_freelist_entry.h
@@ -0,0 +1,48 @@
+// Copyright (c) 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_FREELIST_ENTRY_H_
+#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_FREELIST_ENTRY_H_
+
+#include <stdint.h>
+
+#include "base/allocator/partition_allocator/partition_alloc_constants.h"
+#include "base/compiler_specific.h"
+#include "base/sys_byteorder.h"
+#include "build/build_config.h"
+
+namespace base {
+namespace internal {
+
+// TODO(ajwong): Introduce an EncodedFreelistEntry type and then replace
+// Transform() with Encode()/Decode() such that the API provides some static
+// type safety.
+//
+// https://crbug.com/787153
+struct PartitionFreelistEntry {
+  PartitionFreelistEntry* next;
+
+  static ALWAYS_INLINE PartitionFreelistEntry* Transform(
+      PartitionFreelistEntry* ptr) {
+// We use bswap on little endian as a fast mask for two reasons:
+// 1) If an object is freed and its vtable used where the attacker doesn't
+// get the chance to run allocations between the free and use, the vtable
+// dereference is likely to fault.
+// 2) If the attacker has a linear buffer overflow and elects to try and
+// corrupt a freelist pointer, partial pointer overwrite attacks are
+// thwarted.
+// For big endian, similar guarantees are arrived at with a negation.
+#if defined(ARCH_CPU_BIG_ENDIAN)
+    uintptr_t masked = ~reinterpret_cast<uintptr_t>(ptr);
+#else
+    uintptr_t masked = ByteSwapUintPtrT(reinterpret_cast<uintptr_t>(ptr));
+#endif
+    return reinterpret_cast<PartitionFreelistEntry*>(masked);
+  }
+};
+
+}  // namespace internal
+}  // namespace base
+
+#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_FREELIST_ENTRY_H_
diff --git a/base/allocator/partition_allocator/partition_oom.cc b/base/allocator/partition_allocator/partition_oom.cc
new file mode 100644
index 0000000..5e1cf79
--- /dev/null
+++ b/base/allocator/partition_allocator/partition_oom.cc
@@ -0,0 +1,24 @@
+// Copyright (c) 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/allocator/partition_allocator/partition_oom.h"
+
+#include "base/allocator/partition_allocator/oom.h"
+#include "build/build_config.h"
+
+namespace base {
+namespace internal {
+
+void NOINLINE PartitionExcessiveAllocationSize() {
+  OOM_CRASH();
+}
+
+#if !defined(ARCH_CPU_64_BITS)
+NOINLINE void PartitionOutOfMemoryWithLotsOfUncommitedPages() {
+  OOM_CRASH();
+}
+#endif
+
+}  // namespace internal
+}  // namespace base
diff --git a/base/allocator/partition_allocator/partition_oom.h b/base/allocator/partition_allocator/partition_oom.h
new file mode 100644
index 0000000..da8fc15
--- /dev/null
+++ b/base/allocator/partition_allocator/partition_oom.h
@@ -0,0 +1,26 @@
+// Copyright (c) 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Holds functions for generating OOM errors from PartitionAlloc. This is
+// distinct from oom.h in that it is meant only for use in PartitionAlloc.
+
+#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_OOM_H_
+#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_OOM_H_
+
+#include "base/compiler_specific.h"
+#include "build/build_config.h"
+
+namespace base {
+namespace internal {
+
+NOINLINE void PartitionExcessiveAllocationSize();
+
+#if !defined(ARCH_CPU_64_BITS)
+NOINLINE void PartitionOutOfMemoryWithLotsOfUncommitedPages();
+#endif
+
+}  // namespace internal
+}  // namespace base
+
+#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_OOM_H_
diff --git a/base/allocator/partition_allocator/partition_page.cc b/base/allocator/partition_allocator/partition_page.cc
new file mode 100644
index 0000000..3c9e041
--- /dev/null
+++ b/base/allocator/partition_allocator/partition_page.cc
@@ -0,0 +1,163 @@
+// Copyright (c) 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/allocator/partition_allocator/partition_page.h"
+
+#include "base/allocator/partition_allocator/partition_direct_map_extent.h"
+#include "base/allocator/partition_allocator/partition_root_base.h"
+
+namespace base {
+namespace internal {
+
+namespace {
+
+ALWAYS_INLINE void PartitionDirectUnmap(PartitionPage* page) {
+  PartitionRootBase* root = PartitionRootBase::FromPage(page);
+  const PartitionDirectMapExtent* extent =
+      PartitionDirectMapExtent::FromPage(page);
+  size_t unmap_size = extent->map_size;
+
+  // Maintain the doubly-linked list of all direct mappings.
+  if (extent->prev_extent) {
+    DCHECK(extent->prev_extent->next_extent == extent);
+    extent->prev_extent->next_extent = extent->next_extent;
+  } else {
+    root->direct_map_list = extent->next_extent;
+  }
+  if (extent->next_extent) {
+    DCHECK(extent->next_extent->prev_extent == extent);
+    extent->next_extent->prev_extent = extent->prev_extent;
+  }
+
+  // Add on the size of the trailing guard page and preceeding partition
+  // page.
+  unmap_size += kPartitionPageSize + kSystemPageSize;
+
+  size_t uncommitted_page_size = page->bucket->slot_size + kSystemPageSize;
+  root->DecreaseCommittedPages(uncommitted_page_size);
+  DCHECK(root->total_size_of_direct_mapped_pages >= uncommitted_page_size);
+  root->total_size_of_direct_mapped_pages -= uncommitted_page_size;
+
+  DCHECK(!(unmap_size & kPageAllocationGranularityOffsetMask));
+
+  char* ptr = reinterpret_cast<char*>(PartitionPage::ToPointer(page));
+  // Account for the mapping starting a partition page before the actual
+  // allocation address.
+  ptr -= kPartitionPageSize;
+
+  FreePages(ptr, unmap_size);
+}
+
+ALWAYS_INLINE void PartitionRegisterEmptyPage(PartitionPage* page) {
+  DCHECK(page->is_empty());
+  PartitionRootBase* root = PartitionRootBase::FromPage(page);
+
+  // If the page is already registered as empty, give it another life.
+  if (page->empty_cache_index != -1) {
+    DCHECK(page->empty_cache_index >= 0);
+    DCHECK(static_cast<unsigned>(page->empty_cache_index) < kMaxFreeableSpans);
+    DCHECK(root->global_empty_page_ring[page->empty_cache_index] == page);
+    root->global_empty_page_ring[page->empty_cache_index] = nullptr;
+  }
+
+  int16_t current_index = root->global_empty_page_ring_index;
+  PartitionPage* page_to_decommit = root->global_empty_page_ring[current_index];
+  // The page might well have been re-activated, filled up, etc. before we get
+  // around to looking at it here.
+  if (page_to_decommit)
+    page_to_decommit->DecommitIfPossible(root);
+
+  // We put the empty slot span on our global list of "pages that were once
+  // empty". thus providing it a bit of breathing room to get re-used before
+  // we really free it. This improves performance, particularly on Mac OS X
+  // which has subpar memory management performance.
+  root->global_empty_page_ring[current_index] = page;
+  page->empty_cache_index = current_index;
+  ++current_index;
+  if (current_index == kMaxFreeableSpans)
+    current_index = 0;
+  root->global_empty_page_ring_index = current_index;
+}
+
+}  // namespace
+
+// static
+PartitionPage PartitionPage::sentinel_page_;
+
+PartitionPage* PartitionPage::get_sentinel_page() {
+  return &sentinel_page_;
+}
+
+void PartitionPage::FreeSlowPath() {
+  DCHECK(this != get_sentinel_page());
+  if (LIKELY(this->num_allocated_slots == 0)) {
+    // Page became fully unused.
+    if (UNLIKELY(bucket->is_direct_mapped())) {
+      PartitionDirectUnmap(this);
+      return;
+    }
+    // If it's the current active page, change it. We bounce the page to
+    // the empty list as a force towards defragmentation.
+    if (LIKELY(this == bucket->active_pages_head))
+      bucket->SetNewActivePage();
+    DCHECK(bucket->active_pages_head != this);
+
+    set_raw_size(0);
+    DCHECK(!get_raw_size());
+
+    PartitionRegisterEmptyPage(this);
+  } else {
+    DCHECK(!bucket->is_direct_mapped());
+    // Ensure that the page is full. That's the only valid case if we
+    // arrive here.
+    DCHECK(this->num_allocated_slots < 0);
+    // A transition of num_allocated_slots from 0 to -1 is not legal, and
+    // likely indicates a double-free.
+    CHECK(this->num_allocated_slots != -1);
+    this->num_allocated_slots = -this->num_allocated_slots - 2;
+    DCHECK(this->num_allocated_slots == bucket->get_slots_per_span() - 1);
+    // Fully used page became partially used. It must be put back on the
+    // non-full page list. Also make it the current page to increase the
+    // chances of it being filled up again. The old current page will be
+    // the next page.
+    DCHECK(!this->next_page);
+    if (LIKELY(bucket->active_pages_head != get_sentinel_page()))
+      this->next_page = bucket->active_pages_head;
+    bucket->active_pages_head = this;
+    --bucket->num_full_pages;
+    // Special case: for a partition page with just a single slot, it may
+    // now be empty and we want to run it through the empty logic.
+    if (UNLIKELY(this->num_allocated_slots == 0))
+      FreeSlowPath();
+  }
+}
+
+void PartitionPage::Decommit(PartitionRootBase* root) {
+  DCHECK(is_empty());
+  DCHECK(!bucket->is_direct_mapped());
+  void* addr = PartitionPage::ToPointer(this);
+  root->DecommitSystemPages(addr, bucket->get_bytes_per_span());
+
+  // We actually leave the decommitted page in the active list. We'll sweep
+  // it on to the decommitted page list when we next walk the active page
+  // list.
+  // Pulling this trick enables us to use a singly-linked page list for all
+  // cases, which is critical in keeping the page metadata structure down to
+  // 32 bytes in size.
+  freelist_head = nullptr;
+  num_unprovisioned_slots = 0;
+  DCHECK(is_decommitted());
+}
+
+void PartitionPage::DecommitIfPossible(PartitionRootBase* root) {
+  DCHECK(empty_cache_index >= 0);
+  DCHECK(static_cast<unsigned>(empty_cache_index) < kMaxFreeableSpans);
+  DCHECK(this == root->global_empty_page_ring[empty_cache_index]);
+  empty_cache_index = -1;
+  if (is_empty())
+    Decommit(root);
+}
+
+}  // namespace internal
+}  // namespace base
diff --git a/base/allocator/partition_allocator/partition_page.h b/base/allocator/partition_allocator/partition_page.h
new file mode 100644
index 0000000..e6a6eb7
--- /dev/null
+++ b/base/allocator/partition_allocator/partition_page.h
@@ -0,0 +1,288 @@
+// Copyright (c) 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_PAGE_H_
+#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_PAGE_H_
+
+#include "base/allocator/partition_allocator/partition_alloc_constants.h"
+#include "base/allocator/partition_allocator/partition_bucket.h"
+#include "base/allocator/partition_allocator/partition_cookie.h"
+#include "base/allocator/partition_allocator/partition_freelist_entry.h"
+
+namespace base {
+namespace internal {
+
+struct PartitionRootBase;
+
+// Some notes on page states. A page can be in one of four major states:
+// 1) Active.
+// 2) Full.
+// 3) Empty.
+// 4) Decommitted.
+// An active page has available free slots. A full page has no free slots. An
+// empty page has no free slots, and a decommitted page is an empty page that
+// had its backing memory released back to the system.
+// There are two linked lists tracking the pages. The "active page" list is an
+// approximation of a list of active pages. It is an approximation because
+// full, empty and decommitted pages may briefly be present in the list until
+// we next do a scan over it.
+// The "empty page" list is an accurate list of pages which are either empty
+// or decommitted.
+//
+// The significant page transitions are:
+// - free() will detect when a full page has a slot free()'d and immediately
+// return the page to the head of the active list.
+// - free() will detect when a page is fully emptied. It _may_ add it to the
+// empty list or it _may_ leave it on the active list until a future list scan.
+// - malloc() _may_ scan the active page list in order to fulfil the request.
+// If it does this, full, empty and decommitted pages encountered will be
+// booted out of the active list. If there are no suitable active pages found,
+// an empty or decommitted page (if one exists) will be pulled from the empty
+// list on to the active list.
+//
+// TODO(ajwong): Evaluate if this should be named PartitionSlotSpanMetadata or
+// similar. If so, all uses of the term "page" in comments, member variables,
+// local variables, and documentation that refer to this concept should be
+// updated.
+struct PartitionPage {
+  PartitionFreelistEntry* freelist_head;
+  PartitionPage* next_page;
+  PartitionBucket* bucket;
+  // Deliberately signed, 0 for empty or decommitted page, -n for full pages:
+  int16_t num_allocated_slots;
+  uint16_t num_unprovisioned_slots;
+  uint16_t page_offset;
+  int16_t empty_cache_index;  // -1 if not in the empty cache.
+
+  // Public API
+
+  // Note the matching Alloc() functions are in PartitionPage.
+  BASE_EXPORT NOINLINE void FreeSlowPath();
+  ALWAYS_INLINE void Free(void* ptr);
+
+  void Decommit(PartitionRootBase* root);
+  void DecommitIfPossible(PartitionRootBase* root);
+
+  // Pointer manipulation functions. These must be static as the input |page|
+  // pointer may be the result of an offset calculation and therefore cannot
+  // be trusted. The objective of these functions is to sanitize this input.
+  ALWAYS_INLINE static void* ToPointer(const PartitionPage* page);
+  ALWAYS_INLINE static PartitionPage* FromPointerNoAlignmentCheck(void* ptr);
+  ALWAYS_INLINE static PartitionPage* FromPointer(void* ptr);
+
+  ALWAYS_INLINE const size_t* get_raw_size_ptr() const;
+  ALWAYS_INLINE size_t* get_raw_size_ptr() {
+    return const_cast<size_t*>(
+        const_cast<const PartitionPage*>(this)->get_raw_size_ptr());
+  }
+
+  ALWAYS_INLINE size_t get_raw_size() const;
+  ALWAYS_INLINE void set_raw_size(size_t size);
+
+  ALWAYS_INLINE void Reset();
+
+  // TODO(ajwong): Can this be made private?  https://crbug.com/787153
+  BASE_EXPORT static PartitionPage* get_sentinel_page();
+
+  // Page State accessors.
+  // Note that it's only valid to call these functions on pages found on one of
+  // the page lists. Specifically, you can't call these functions on full pages
+  // that were detached from the active list.
+  //
+  // This restriction provides the flexibity for some of the status fields to
+  // be repurposed when a page is taken off a list. See the negation of
+  // |num_allocated_slots| when a full page is removed from the active list
+  // for an example of such repurposing.
+  ALWAYS_INLINE bool is_active() const;
+  ALWAYS_INLINE bool is_full() const;
+  ALWAYS_INLINE bool is_empty() const;
+  ALWAYS_INLINE bool is_decommitted() const;
+
+ private:
+  // g_sentinel_page is used as a sentinel to indicate that there is no page
+  // in the active page list. We can use nullptr, but in that case we need
+  // to add a null-check branch to the hot allocation path. We want to avoid
+  // that.
+  //
+  // Note, this declaration is kept in the header as opposed to an anonymous
+  // namespace so the getter can be fully inlined.
+  static PartitionPage sentinel_page_;
+};
+static_assert(sizeof(PartitionPage) <= kPageMetadataSize,
+              "PartitionPage must be able to fit in a metadata slot");
+
+ALWAYS_INLINE char* PartitionSuperPageToMetadataArea(char* ptr) {
+  uintptr_t pointer_as_uint = reinterpret_cast<uintptr_t>(ptr);
+  DCHECK(!(pointer_as_uint & kSuperPageOffsetMask));
+  // The metadata area is exactly one system page (the guard page) into the
+  // super page.
+  return reinterpret_cast<char*>(pointer_as_uint + kSystemPageSize);
+}
+
+ALWAYS_INLINE PartitionPage* PartitionPage::FromPointerNoAlignmentCheck(
+    void* ptr) {
+  uintptr_t pointer_as_uint = reinterpret_cast<uintptr_t>(ptr);
+  char* super_page_ptr =
+      reinterpret_cast<char*>(pointer_as_uint & kSuperPageBaseMask);
+  uintptr_t partition_page_index =
+      (pointer_as_uint & kSuperPageOffsetMask) >> kPartitionPageShift;
+  // Index 0 is invalid because it is the metadata and guard area and
+  // the last index is invalid because it is a guard page.
+  DCHECK(partition_page_index);
+  DCHECK(partition_page_index < kNumPartitionPagesPerSuperPage - 1);
+  PartitionPage* page = reinterpret_cast<PartitionPage*>(
+      PartitionSuperPageToMetadataArea(super_page_ptr) +
+      (partition_page_index << kPageMetadataShift));
+  // Partition pages in the same slot span can share the same page object.
+  // Adjust for that.
+  size_t delta = page->page_offset << kPageMetadataShift;
+  page =
+      reinterpret_cast<PartitionPage*>(reinterpret_cast<char*>(page) - delta);
+  return page;
+}
+
+// Resturns start of the slot span for the PartitionPage.
+ALWAYS_INLINE void* PartitionPage::ToPointer(const PartitionPage* page) {
+  uintptr_t pointer_as_uint = reinterpret_cast<uintptr_t>(page);
+
+  uintptr_t super_page_offset = (pointer_as_uint & kSuperPageOffsetMask);
+
+  // A valid |page| must be past the first guard System page and within
+  // the following metadata region.
+  DCHECK(super_page_offset > kSystemPageSize);
+  // Must be less than total metadata region.
+  DCHECK(super_page_offset < kSystemPageSize + (kNumPartitionPagesPerSuperPage *
+                                                kPageMetadataSize));
+  uintptr_t partition_page_index =
+      (super_page_offset - kSystemPageSize) >> kPageMetadataShift;
+  // Index 0 is invalid because it is the superpage extent metadata and the
+  // last index is invalid because the whole PartitionPage is set as guard
+  // pages for the metadata region.
+  DCHECK(partition_page_index);
+  DCHECK(partition_page_index < kNumPartitionPagesPerSuperPage - 1);
+  uintptr_t super_page_base = (pointer_as_uint & kSuperPageBaseMask);
+  void* ret = reinterpret_cast<void*>(
+      super_page_base + (partition_page_index << kPartitionPageShift));
+  return ret;
+}
+
+ALWAYS_INLINE PartitionPage* PartitionPage::FromPointer(void* ptr) {
+  PartitionPage* page = PartitionPage::FromPointerNoAlignmentCheck(ptr);
+  // Checks that the pointer is a multiple of bucket size.
+  DCHECK(!((reinterpret_cast<uintptr_t>(ptr) -
+            reinterpret_cast<uintptr_t>(PartitionPage::ToPointer(page))) %
+           page->bucket->slot_size));
+  return page;
+}
+
+ALWAYS_INLINE const size_t* PartitionPage::get_raw_size_ptr() const {
+  // For single-slot buckets which span more than one partition page, we
+  // have some spare metadata space to store the raw allocation size. We
+  // can use this to report better statistics.
+  if (bucket->slot_size <= kMaxSystemPagesPerSlotSpan * kSystemPageSize)
+    return nullptr;
+
+  DCHECK((bucket->slot_size % kSystemPageSize) == 0);
+  DCHECK(bucket->is_direct_mapped() || bucket->get_slots_per_span() == 1);
+
+  const PartitionPage* the_next_page = this + 1;
+  return reinterpret_cast<const size_t*>(&the_next_page->freelist_head);
+}
+
+ALWAYS_INLINE size_t PartitionPage::get_raw_size() const {
+  const size_t* ptr = get_raw_size_ptr();
+  if (UNLIKELY(ptr != nullptr))
+    return *ptr;
+  return 0;
+}
+
+ALWAYS_INLINE void PartitionPage::Free(void* ptr) {
+// If these asserts fire, you probably corrupted memory.
+#if DCHECK_IS_ON()
+  size_t slot_size = this->bucket->slot_size;
+  size_t raw_size = get_raw_size();
+  if (raw_size)
+    slot_size = raw_size;
+  PartitionCookieCheckValue(ptr);
+  PartitionCookieCheckValue(reinterpret_cast<char*>(ptr) + slot_size -
+                            kCookieSize);
+  memset(ptr, kFreedByte, slot_size);
+#endif
+  DCHECK(this->num_allocated_slots);
+  // TODO(palmer): See if we can afford to make this a CHECK.
+  // FIX FIX FIX
+  //  DCHECK(!freelist_head || PartitionRootBase::IsValidPage(
+  //                               PartitionPage::FromPointer(freelist_head)));
+  CHECK(ptr != freelist_head);  // Catches an immediate double free.
+  // Look for double free one level deeper in debug.
+  DCHECK(!freelist_head || ptr != internal::PartitionFreelistEntry::Transform(
+                                      freelist_head->next));
+  internal::PartitionFreelistEntry* entry =
+      static_cast<internal::PartitionFreelistEntry*>(ptr);
+  entry->next = internal::PartitionFreelistEntry::Transform(freelist_head);
+  freelist_head = entry;
+  --this->num_allocated_slots;
+  if (UNLIKELY(this->num_allocated_slots <= 0)) {
+    FreeSlowPath();
+  } else {
+    // All single-slot allocations must go through the slow path to
+    // correctly update the size metadata.
+    DCHECK(get_raw_size() == 0);
+  }
+}
+
+ALWAYS_INLINE bool PartitionPage::is_active() const {
+  DCHECK(this != get_sentinel_page());
+  DCHECK(!page_offset);
+  return (num_allocated_slots > 0 &&
+          (freelist_head || num_unprovisioned_slots));
+}
+
+ALWAYS_INLINE bool PartitionPage::is_full() const {
+  DCHECK(this != get_sentinel_page());
+  DCHECK(!page_offset);
+  bool ret = (num_allocated_slots == bucket->get_slots_per_span());
+  if (ret) {
+    DCHECK(!freelist_head);
+    DCHECK(!num_unprovisioned_slots);
+  }
+  return ret;
+}
+
+ALWAYS_INLINE bool PartitionPage::is_empty() const {
+  DCHECK(this != get_sentinel_page());
+  DCHECK(!page_offset);
+  return (!num_allocated_slots && freelist_head);
+}
+
+ALWAYS_INLINE bool PartitionPage::is_decommitted() const {
+  DCHECK(this != get_sentinel_page());
+  DCHECK(!page_offset);
+  bool ret = (!num_allocated_slots && !freelist_head);
+  if (ret) {
+    DCHECK(!num_unprovisioned_slots);
+    DCHECK(empty_cache_index == -1);
+  }
+  return ret;
+}
+
+ALWAYS_INLINE void PartitionPage::set_raw_size(size_t size) {
+  size_t* raw_size_ptr = get_raw_size_ptr();
+  if (UNLIKELY(raw_size_ptr != nullptr))
+    *raw_size_ptr = size;
+}
+
+ALWAYS_INLINE void PartitionPage::Reset() {
+  DCHECK(this->is_decommitted());
+
+  num_unprovisioned_slots = bucket->get_slots_per_span();
+  DCHECK(num_unprovisioned_slots);
+
+  next_page = nullptr;
+}
+
+}  // namespace internal
+}  // namespace base
+
+#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_PAGE_H_
diff --git a/base/allocator/partition_allocator/partition_root_base.cc b/base/allocator/partition_allocator/partition_root_base.cc
new file mode 100644
index 0000000..91b998f
--- /dev/null
+++ b/base/allocator/partition_allocator/partition_root_base.cc
@@ -0,0 +1,40 @@
+// Copyright (c) 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/allocator/partition_allocator/partition_root_base.h"
+
+#include "base/allocator/partition_allocator/oom.h"
+#include "base/allocator/partition_allocator/partition_oom.h"
+#include "base/allocator/partition_allocator/partition_page.h"
+#include "build/build_config.h"
+
+namespace base {
+namespace internal {
+
+NOINLINE void PartitionRootBase::OutOfMemory() {
+#if !defined(ARCH_CPU_64_BITS)
+  // Check whether this OOM is due to a lot of super pages that are allocated
+  // but not committed, probably due to http://crbug.com/421387.
+  if (total_size_of_super_pages + total_size_of_direct_mapped_pages -
+          total_size_of_committed_pages >
+      kReasonableSizeOfUnusedPages) {
+    PartitionOutOfMemoryWithLotsOfUncommitedPages();
+  }
+#endif
+  if (PartitionRootBase::gOomHandlingFunction)
+    (*PartitionRootBase::gOomHandlingFunction)();
+  OOM_CRASH();
+}
+
+void PartitionRootBase::DecommitEmptyPages() {
+  for (size_t i = 0; i < kMaxFreeableSpans; ++i) {
+    internal::PartitionPage* page = global_empty_page_ring[i];
+    if (page)
+      page->DecommitIfPossible(this);
+    global_empty_page_ring[i] = nullptr;
+  }
+}
+
+}  // namespace internal
+}  // namespace base
diff --git a/base/allocator/partition_allocator/partition_root_base.h b/base/allocator/partition_allocator/partition_root_base.h
new file mode 100644
index 0000000..e20990e
--- /dev/null
+++ b/base/allocator/partition_allocator/partition_root_base.h
@@ -0,0 +1,177 @@
+// Copyright (c) 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ROOT_BASE_H_
+#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ROOT_BASE_H_
+
+#include "base/allocator/partition_allocator/page_allocator.h"
+#include "base/allocator/partition_allocator/partition_alloc_constants.h"
+#include "base/allocator/partition_allocator/partition_bucket.h"
+#include "base/allocator/partition_allocator/partition_direct_map_extent.h"
+#include "base/allocator/partition_allocator/partition_page.h"
+
+namespace base {
+namespace internal {
+
+struct PartitionPage;
+struct PartitionRootBase;
+
+// An "extent" is a span of consecutive superpages. We link to the partition's
+// next extent (if there is one) to the very start of a superpage's metadata
+// area.
+struct PartitionSuperPageExtentEntry {
+  PartitionRootBase* root;
+  char* super_page_base;
+  char* super_pages_end;
+  PartitionSuperPageExtentEntry* next;
+};
+static_assert(
+    sizeof(PartitionSuperPageExtentEntry) <= kPageMetadataSize,
+    "PartitionSuperPageExtentEntry must be able to fit in a metadata slot");
+
+struct BASE_EXPORT PartitionRootBase {
+  PartitionRootBase();
+  virtual ~PartitionRootBase();
+  size_t total_size_of_committed_pages = 0;
+  size_t total_size_of_super_pages = 0;
+  size_t total_size_of_direct_mapped_pages = 0;
+  // Invariant: total_size_of_committed_pages <=
+  //                total_size_of_super_pages +
+  //                total_size_of_direct_mapped_pages.
+  unsigned num_buckets = 0;
+  unsigned max_allocation = 0;
+  bool initialized = false;
+  char* next_super_page = nullptr;
+  char* next_partition_page = nullptr;
+  char* next_partition_page_end = nullptr;
+  PartitionSuperPageExtentEntry* current_extent = nullptr;
+  PartitionSuperPageExtentEntry* first_extent = nullptr;
+  PartitionDirectMapExtent* direct_map_list = nullptr;
+  PartitionPage* global_empty_page_ring[kMaxFreeableSpans] = {};
+  int16_t global_empty_page_ring_index = 0;
+  uintptr_t inverted_self = 0;
+
+  // Public API
+
+  // Allocates out of the given bucket. Properly, this function should probably
+  // be in PartitionBucket, but because the implementation needs to be inlined
+  // for performance, and because it needs to inspect PartitionPage,
+  // it becomes impossible to have it in PartitionBucket as this causes a
+  // cyclical dependency on PartitionPage function implementations.
+  //
+  // Moving it a layer lower couples PartitionRootBase and PartitionBucket, but
+  // preserves the layering of the includes.
+  //
+  // Note the matching Free() functions are in PartitionPage.
+  ALWAYS_INLINE void* AllocFromBucket(PartitionBucket* bucket,
+                                      int flags,
+                                      size_t size);
+
+  ALWAYS_INLINE static bool IsValidPage(PartitionPage* page);
+  ALWAYS_INLINE static PartitionRootBase* FromPage(PartitionPage* page);
+
+  // gOomHandlingFunction is invoked when PartitionAlloc hits OutOfMemory.
+  static void (*gOomHandlingFunction)();
+  NOINLINE void OutOfMemory();
+
+  ALWAYS_INLINE void IncreaseCommittedPages(size_t len);
+  ALWAYS_INLINE void DecreaseCommittedPages(size_t len);
+  ALWAYS_INLINE void DecommitSystemPages(void* address, size_t length);
+  ALWAYS_INLINE void RecommitSystemPages(void* address, size_t length);
+
+  void DecommitEmptyPages();
+};
+
+ALWAYS_INLINE void* PartitionRootBase::AllocFromBucket(PartitionBucket* bucket,
+                                                       int flags,
+                                                       size_t size) {
+  PartitionPage* page = bucket->active_pages_head;
+  // Check that this page is neither full nor freed.
+  DCHECK(page->num_allocated_slots >= 0);
+  void* ret = page->freelist_head;
+  if (LIKELY(ret != 0)) {
+    // If these DCHECKs fire, you probably corrupted memory.
+    // TODO(palmer): See if we can afford to make this a CHECK.
+    DCHECK(PartitionRootBase::IsValidPage(page));
+    // All large allocations must go through the slow path to correctly
+    // update the size metadata.
+    DCHECK(page->get_raw_size() == 0);
+    internal::PartitionFreelistEntry* new_head =
+        internal::PartitionFreelistEntry::Transform(
+            static_cast<internal::PartitionFreelistEntry*>(ret)->next);
+    page->freelist_head = new_head;
+    page->num_allocated_slots++;
+  } else {
+    ret = bucket->SlowPathAlloc(this, flags, size);
+    // TODO(palmer): See if we can afford to make this a CHECK.
+    DCHECK(!ret ||
+           PartitionRootBase::IsValidPage(PartitionPage::FromPointer(ret)));
+  }
+#if DCHECK_IS_ON()
+  if (!ret)
+    return 0;
+  // Fill the uninitialized pattern, and write the cookies.
+  page = PartitionPage::FromPointer(ret);
+  // TODO(ajwong): Can |page->bucket| ever not be |this|? If not, can this just
+  // be bucket->slot_size?
+  size_t new_slot_size = page->bucket->slot_size;
+  size_t raw_size = page->get_raw_size();
+  if (raw_size) {
+    DCHECK(raw_size == size);
+    new_slot_size = raw_size;
+  }
+  size_t no_cookie_size = PartitionCookieSizeAdjustSubtract(new_slot_size);
+  char* char_ret = static_cast<char*>(ret);
+  // The value given to the application is actually just after the cookie.
+  ret = char_ret + kCookieSize;
+
+  // Debug fill region kUninitializedByte and surround it with 2 cookies.
+  PartitionCookieWriteValue(char_ret);
+  memset(ret, kUninitializedByte, no_cookie_size);
+  PartitionCookieWriteValue(char_ret + kCookieSize + no_cookie_size);
+#endif
+  return ret;
+}
+
+ALWAYS_INLINE bool PartitionRootBase::IsValidPage(PartitionPage* page) {
+  PartitionRootBase* root = PartitionRootBase::FromPage(page);
+  return root->inverted_self == ~reinterpret_cast<uintptr_t>(root);
+}
+
+ALWAYS_INLINE PartitionRootBase* PartitionRootBase::FromPage(
+    PartitionPage* page) {
+  PartitionSuperPageExtentEntry* extent_entry =
+      reinterpret_cast<PartitionSuperPageExtentEntry*>(
+          reinterpret_cast<uintptr_t>(page) & kSystemPageBaseMask);
+  return extent_entry->root;
+}
+
+ALWAYS_INLINE void PartitionRootBase::IncreaseCommittedPages(size_t len) {
+  total_size_of_committed_pages += len;
+  DCHECK(total_size_of_committed_pages <=
+         total_size_of_super_pages + total_size_of_direct_mapped_pages);
+}
+
+ALWAYS_INLINE void PartitionRootBase::DecreaseCommittedPages(size_t len) {
+  total_size_of_committed_pages -= len;
+  DCHECK(total_size_of_committed_pages <=
+         total_size_of_super_pages + total_size_of_direct_mapped_pages);
+}
+
+ALWAYS_INLINE void PartitionRootBase::DecommitSystemPages(void* address,
+                                                          size_t length) {
+  ::base::DecommitSystemPages(address, length);
+  DecreaseCommittedPages(length);
+}
+
+ALWAYS_INLINE void PartitionRootBase::RecommitSystemPages(void* address,
+                                                          size_t length) {
+  CHECK(::base::RecommitSystemPages(address, length, PageReadWrite));
+  IncreaseCommittedPages(length);
+}
+
+}  // namespace internal
+}  // namespace base
+
+#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ROOT_BASE_H_
diff --git a/base/allocator/partition_allocator/spin_lock.cc b/base/allocator/partition_allocator/spin_lock.cc
new file mode 100644
index 0000000..46f4965
--- /dev/null
+++ b/base/allocator/partition_allocator/spin_lock.cc
@@ -0,0 +1,104 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/allocator/partition_allocator/spin_lock.h"
+#include "build/build_config.h"
+
+#if defined(OS_WIN)
+#include <windows.h>
+#elif defined(OS_POSIX) || defined(OS_FUCHSIA)
+#include <sched.h>
+#endif
+
+#include "base/threading/platform_thread.h"
+
+// The YIELD_PROCESSOR macro wraps an architecture specific-instruction that
+// informs the processor we're in a busy wait, so it can handle the branch more
+// intelligently and e.g. reduce power to our core or give more resources to the
+// other hyper-thread on this core. See the following for context:
+// https://software.intel.com/en-us/articles/benefitting-power-and-performance-sleep-loops
+//
+// The YIELD_THREAD macro tells the OS to relinquish our quantum. This is
+// basically a worst-case fallback, and if you're hitting it with any frequency
+// you really should be using a proper lock (such as |base::Lock|)rather than
+// these spinlocks.
+#if defined(OS_WIN)
+
+#define YIELD_PROCESSOR YieldProcessor()
+#define YIELD_THREAD SwitchToThread()
+
+#elif defined(OS_POSIX) || defined(OS_FUCHSIA)
+
+#if defined(ARCH_CPU_X86_64) || defined(ARCH_CPU_X86)
+#define YIELD_PROCESSOR __asm__ __volatile__("pause")
+#elif (defined(ARCH_CPU_ARMEL) && __ARM_ARCH >= 6) || defined(ARCH_CPU_ARM64)
+#define YIELD_PROCESSOR __asm__ __volatile__("yield")
+#elif defined(ARCH_CPU_MIPSEL)
+// The MIPS32 docs state that the PAUSE instruction is a no-op on older
+// architectures (first added in MIPS32r2). To avoid assembler errors when
+// targeting pre-r2, we must encode the instruction manually.
+#define YIELD_PROCESSOR __asm__ __volatile__(".word 0x00000140")
+#elif defined(ARCH_CPU_MIPS64EL) && __mips_isa_rev >= 2
+// Don't bother doing using .word here since r2 is the lowest supported mips64
+// that Chromium supports.
+#define YIELD_PROCESSOR __asm__ __volatile__("pause")
+#elif defined(ARCH_CPU_PPC64_FAMILY)
+#define YIELD_PROCESSOR __asm__ __volatile__("or 31,31,31")
+#elif defined(ARCH_CPU_S390_FAMILY)
+// just do nothing
+#define YIELD_PROCESSOR ((void)0)
+#endif  // ARCH
+
+#ifndef YIELD_PROCESSOR
+#warning "Processor yield not supported on this architecture."
+#define YIELD_PROCESSOR ((void)0)
+#endif
+
+#define YIELD_THREAD sched_yield()
+
+#else  // Other OS
+
+#warning "Thread yield not supported on this OS."
+#define YIELD_THREAD ((void)0)
+
+#endif  // OS_WIN
+
+namespace base {
+namespace subtle {
+
+void SpinLock::LockSlow() {
+  // The value of |kYieldProcessorTries| is cargo culted from TCMalloc, Windows
+  // critical section defaults, and various other recommendations.
+  // TODO(jschuh): Further tuning may be warranted.
+  static const int kYieldProcessorTries = 1000;
+  // The value of |kYieldThreadTries| is completely made up.
+  static const int kYieldThreadTries = 10;
+  int yield_thread_count = 0;
+  do {
+    do {
+      for (int count = 0; count < kYieldProcessorTries; ++count) {
+        // Let the processor know we're spinning.
+        YIELD_PROCESSOR;
+        if (!lock_.load(std::memory_order_relaxed) &&
+            LIKELY(!lock_.exchange(true, std::memory_order_acquire)))
+          return;
+      }
+
+      if (yield_thread_count < kYieldThreadTries) {
+        ++yield_thread_count;
+        // Give the OS a chance to schedule something on this core.
+        YIELD_THREAD;
+      } else {
+        // At this point, it's likely that the lock is held by a lower priority
+        // thread that is unavailable to finish its work because of higher
+        // priority threads spinning here. Sleeping should ensure that they make
+        // progress.
+        PlatformThread::Sleep(base::TimeDelta::FromMilliseconds(1));
+      }
+    } while (lock_.load(std::memory_order_relaxed));
+  } while (UNLIKELY(lock_.exchange(true, std::memory_order_acquire)));
+}
+
+}  // namespace subtle
+}  // namespace base
diff --git a/base/allocator/partition_allocator/spin_lock.h b/base/allocator/partition_allocator/spin_lock.h
new file mode 100644
index 0000000..e698b56
--- /dev/null
+++ b/base/allocator/partition_allocator/spin_lock.h
@@ -0,0 +1,50 @@
+// Copyright (c) 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_SPIN_LOCK_H
+#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_SPIN_LOCK_H
+
+#include <atomic>
+#include <memory>
+#include <mutex>
+
+#include "base/base_export.h"
+#include "base/compiler_specific.h"
+
+// Spinlock is a simple spinlock class based on the standard CPU primitive of
+// atomic increment and decrement of an int at a given memory address. These are
+// intended only for very short duration locks and assume a system with multiple
+// cores. For any potentially longer wait you should use a real lock, such as
+// |base::Lock|.
+namespace base {
+namespace subtle {
+
+class BASE_EXPORT SpinLock {
+ public:
+  constexpr SpinLock() = default;
+  ~SpinLock() = default;
+  using Guard = std::lock_guard<SpinLock>;
+
+  ALWAYS_INLINE void lock() {
+    static_assert(sizeof(lock_) == sizeof(int),
+                  "int and lock_ are different sizes");
+    if (LIKELY(!lock_.exchange(true, std::memory_order_acquire)))
+      return;
+    LockSlow();
+  }
+
+  ALWAYS_INLINE void unlock() { lock_.store(false, std::memory_order_release); }
+
+ private:
+  // This is called if the initial attempt to acquire the lock fails. It's
+  // slower, but has a much better scheduling and power consumption behavior.
+  void LockSlow();
+
+  std::atomic_int lock_{0};
+};
+
+}  // namespace subtle
+}  // namespace base
+
+#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_SPIN_LOCK_H
diff --git a/base/allocator/partition_allocator/spin_lock_unittest.cc b/base/allocator/partition_allocator/spin_lock_unittest.cc
new file mode 100644
index 0000000..6a1fd6b
--- /dev/null
+++ b/base/allocator/partition_allocator/spin_lock_unittest.cc
@@ -0,0 +1,61 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/allocator/partition_allocator/spin_lock.h"
+
+#include <memory>
+#include "base/bind.h"
+#include "base/bind_helpers.h"
+#include "base/threading/thread.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+
+static const size_t kBufferSize = 16;
+
+static subtle::SpinLock g_lock;
+
+static void FillBuffer(volatile char* buffer, char fill_pattern) {
+  for (size_t i = 0; i < kBufferSize; ++i)
+    buffer[i] = fill_pattern;
+}
+
+static void ChangeAndCheckBuffer(volatile char* buffer) {
+  FillBuffer(buffer, '\0');
+  int total = 0;
+  for (size_t i = 0; i < kBufferSize; ++i)
+    total += buffer[i];
+
+  EXPECT_EQ(0, total);
+
+  // This will mess with the other thread's calculation if we accidentally get
+  // concurrency.
+  FillBuffer(buffer, '!');
+}
+
+static void ThreadMain(volatile char* buffer) {
+  for (int i = 0; i < 500000; ++i) {
+    subtle::SpinLock::Guard guard(g_lock);
+    ChangeAndCheckBuffer(buffer);
+  }
+}
+
+TEST(SpinLockTest, Torture) {
+  char shared_buffer[kBufferSize];
+
+  Thread thread1("thread1");
+  Thread thread2("thread2");
+
+  thread1.StartAndWaitForTesting();
+  thread2.StartAndWaitForTesting();
+
+  thread1.task_runner()->PostTask(
+      FROM_HERE,
+      BindOnce(&ThreadMain, Unretained(static_cast<char*>(shared_buffer))));
+  thread2.task_runner()->PostTask(
+      FROM_HERE,
+      BindOnce(&ThreadMain, Unretained(static_cast<char*>(shared_buffer))));
+}
+
+}  // namespace base
diff --git a/base/allocator/tcmalloc_unittest.cc b/base/allocator/tcmalloc_unittest.cc
new file mode 100644
index 0000000..78c4f84
--- /dev/null
+++ b/base/allocator/tcmalloc_unittest.cc
@@ -0,0 +1,235 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <stddef.h>
+#include <stdio.h>
+
+#include "base/compiler_specific.h"
+#include "base/logging.h"
+#include "base/process/process_metrics.h"
+#include "base/sys_info.h"
+#include "build/build_config.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+#if defined(USE_TCMALLOC)
+namespace {
+
+using std::min;
+
+#ifdef NDEBUG
+// We wrap malloc and free in noinline functions to ensure that we test the real
+// implementation of the allocator. Otherwise, the compiler may specifically
+// recognize the calls to malloc and free in our tests and optimize them away.
+NOINLINE void* TCMallocDoMallocForTest(size_t size) {
+  return malloc(size);
+}
+
+NOINLINE void TCMallocDoFreeForTest(void* ptr) {
+  free(ptr);
+}
+#endif
+
+// Fill a buffer of the specified size with a predetermined pattern
+static void Fill(unsigned char* buffer, int n) {
+  for (int i = 0; i < n; i++) {
+    buffer[i] = (i & 0xff);
+  }
+}
+
+// Check that the specified buffer has the predetermined pattern
+// generated by Fill()
+static bool Valid(unsigned char* buffer, int n) {
+  for (int i = 0; i < n; i++) {
+    if (buffer[i] != (i & 0xff)) {
+      return false;
+    }
+  }
+  return true;
+}
+
+// Return the next interesting size/delta to check.  Returns -1 if no more.
+static int NextSize(int size) {
+  if (size < 100)
+    return size + 1;
+
+  if (size < 100000) {
+    // Find next power of two
+    int power = 1;
+    while (power < size)
+      power <<= 1;
+
+    // Yield (power-1, power, power+1)
+    if (size < power - 1)
+      return power - 1;
+
+    if (size == power - 1)
+      return power;
+
+    CHECK_EQ(size, power);
+    return power + 1;
+  } else {
+    return -1;
+  }
+}
+
+static void TestCalloc(size_t n, size_t s, bool ok) {
+  char* p = reinterpret_cast<char*>(calloc(n, s));
+  if (!ok) {
+    EXPECT_EQ(nullptr, p) << "calloc(n, s) should not succeed";
+  } else {
+    EXPECT_NE(reinterpret_cast<void*>(NULL), p)
+        << "calloc(n, s) should succeed";
+    for (size_t i = 0; i < n * s; i++) {
+      EXPECT_EQ('\0', p[i]);
+    }
+    free(p);
+  }
+}
+
+bool IsLowMemoryDevice() {
+  return base::SysInfo::AmountOfPhysicalMemory() <= 256LL * 1024 * 1024;
+}
+
+}  // namespace
+
+TEST(TCMallocTest, Malloc) {
+  // Try allocating data with a bunch of alignments and sizes
+  for (int size = 1; size < 1048576; size *= 2) {
+    unsigned char* ptr = reinterpret_cast<unsigned char*>(malloc(size));
+    // Should be 2 byte aligned
+    EXPECT_EQ(0u, reinterpret_cast<uintptr_t>(ptr) & 1);
+    Fill(ptr, size);
+    EXPECT_TRUE(Valid(ptr, size));
+    free(ptr);
+  }
+}
+
+TEST(TCMallocTest, Calloc) {
+  TestCalloc(0, 0, true);
+  TestCalloc(0, 1, true);
+  TestCalloc(1, 1, true);
+  TestCalloc(1 << 10, 0, true);
+  TestCalloc(1 << 20, 0, true);
+  TestCalloc(0, 1 << 10, true);
+  TestCalloc(0, 1 << 20, true);
+  TestCalloc(1 << 20, 2, true);
+  TestCalloc(2, 1 << 20, true);
+  TestCalloc(1000, 1000, true);
+}
+
+#ifdef NDEBUG
+// This makes sure that reallocing a small number of bytes in either
+// direction doesn't cause us to allocate new memory. Tcmalloc in debug mode
+// does not follow this.
+TEST(TCMallocTest, ReallocSmallDelta) {
+  int start_sizes[] = {100, 1000, 10000, 100000};
+  int deltas[] = {1, -2, 4, -8, 16, -32, 64, -128};
+
+  for (unsigned s = 0; s < sizeof(start_sizes) / sizeof(*start_sizes); ++s) {
+    void* p = malloc(start_sizes[s]);
+    ASSERT_TRUE(p);
+    // The larger the start-size, the larger the non-reallocing delta.
+    for (unsigned d = 0; d < s * 2; ++d) {
+      void* new_p = realloc(p, start_sizes[s] + deltas[d]);
+      ASSERT_EQ(p, new_p);  // realloc should not allocate new memory
+    }
+    // Test again, but this time reallocing smaller first.
+    for (unsigned d = 0; d < s * 2; ++d) {
+      void* new_p = realloc(p, start_sizes[s] - deltas[d]);
+      ASSERT_EQ(p, new_p);  // realloc should not allocate new memory
+    }
+    free(p);
+  }
+}
+#endif
+
+TEST(TCMallocTest, Realloc) {
+  for (int src_size = 0; src_size >= 0; src_size = NextSize(src_size)) {
+    for (int dst_size = 0; dst_size >= 0; dst_size = NextSize(dst_size)) {
+      unsigned char* src = reinterpret_cast<unsigned char*>(malloc(src_size));
+      Fill(src, src_size);
+      unsigned char* dst =
+          reinterpret_cast<unsigned char*>(realloc(src, dst_size));
+      EXPECT_TRUE(Valid(dst, min(src_size, dst_size)));
+      Fill(dst, dst_size);
+      EXPECT_TRUE(Valid(dst, dst_size));
+      if (dst != nullptr)
+        free(dst);
+    }
+  }
+
+  // The logic below tries to allocate kNumEntries * 9000 ~= 130 MB of memory.
+  // This would cause the test to crash on low memory devices with no VM
+  // overcommit (e.g., chromecast).
+  if (IsLowMemoryDevice())
+    return;
+
+  // Now make sure realloc works correctly even when we overflow the
+  // packed cache, so some entries are evicted from the cache.
+  // The cache has 2^12 entries, keyed by page number.
+  const int kNumEntries = 1 << 14;
+  int** p = reinterpret_cast<int**>(malloc(sizeof(*p) * kNumEntries));
+  int sum = 0;
+  for (int i = 0; i < kNumEntries; i++) {
+    // no page size is likely to be bigger than 8192?
+    p[i] = reinterpret_cast<int*>(malloc(8192));
+    p[i][1000] = i;  // use memory deep in the heart of p
+  }
+  for (int i = 0; i < kNumEntries; i++) {
+    p[i] = reinterpret_cast<int*>(realloc(p[i], 9000));
+  }
+  for (int i = 0; i < kNumEntries; i++) {
+    sum += p[i][1000];
+    free(p[i]);
+  }
+  EXPECT_EQ(kNumEntries / 2 * (kNumEntries - 1), sum);  // assume kNE is even
+  free(p);
+}
+
+#ifdef NDEBUG
+TEST(TCMallocFreeTest, BadPointerInFirstPageOfTheLargeObject) {
+  const size_t kPageSize = base::GetPageSize();
+  char* p =
+      reinterpret_cast<char*>(TCMallocDoMallocForTest(10 * kPageSize + 1));
+  for (unsigned offset = 1; offset < kPageSize; offset <<= 1) {
+    ASSERT_DEATH(TCMallocDoFreeForTest(p + offset),
+                 "Pointer is not pointing to the start of a span");
+  }
+  TCMallocDoFreeForTest(p);
+}
+
+// TODO(ssid): Fix flakiness and enable the test, crbug.com/571549.
+TEST(TCMallocFreeTest, DISABLED_BadPageAlignedPointerInsideLargeObject) {
+  const size_t kPageSize = base::GetPageSize();
+  const size_t kMaxSize = 10 * kPageSize;
+  char* p = reinterpret_cast<char*>(TCMallocDoMallocForTest(kMaxSize + 1));
+
+  for (unsigned offset = kPageSize; offset < kMaxSize; offset += kPageSize) {
+    // Only the first and last page of a span are in heap map. So for others
+    // tcmalloc will give a general error of invalid pointer.
+    ASSERT_DEATH(TCMallocDoFreeForTest(p + offset), "");
+  }
+  ASSERT_DEATH(TCMallocDoFreeForTest(p + kMaxSize),
+               "Pointer is not pointing to the start of a span");
+  TCMallocDoFreeForTest(p);
+}
+
+TEST(TCMallocFreeTest, DoubleFreeLargeObject) {
+  const size_t kMaxSize = 10 * base::GetPageSize();
+  char* p = reinterpret_cast<char*>(TCMallocDoMallocForTest(kMaxSize + 1));
+  ASSERT_DEATH(TCMallocDoFreeForTest(p); TCMallocDoFreeForTest(p),
+               "Object was not in-use");
+}
+
+TEST(TCMallocFreeTest, DoubleFreeSmallObject) {
+  const size_t kPageSize = base::GetPageSize();
+  for (size_t size = 1; size <= kPageSize; size <<= 1) {
+    char* p = reinterpret_cast<char*>(TCMallocDoMallocForTest(size));
+    ASSERT_DEATH(TCMallocDoFreeForTest(p); TCMallocDoFreeForTest(p),
+                 "Circular loop in list detected");
+  }
+}
+#endif  // NDEBUG
+
+#endif
diff --git a/base/allocator/unittest_utils.cc b/base/allocator/unittest_utils.cc
new file mode 100644
index 0000000..051d568
--- /dev/null
+++ b/base/allocator/unittest_utils.cc
@@ -0,0 +1,19 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// The unittests need a this in order to link up without pulling in tons
+// of other libraries
+
+#include <config.h>
+#include <stddef.h>
+
+inline int snprintf(char* buffer, size_t count, const char* format, ...) {
+    int result;
+    va_list args;
+    va_start(args, format);
+    result = _vsnprintf(buffer, count, format, args);
+    va_end(args);
+    return result;
+}
+
diff --git a/base/allocator/winheap_stubs_win.cc b/base/allocator/winheap_stubs_win.cc
new file mode 100644
index 0000000..8aa5298
--- /dev/null
+++ b/base/allocator/winheap_stubs_win.cc
@@ -0,0 +1,94 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This code should move into the default Windows shim once the win-specific
+// allocation shim has been removed, and the generic shim has becaome the
+// default.
+
+#include "winheap_stubs_win.h"
+
+#include <limits.h>
+#include <malloc.h>
+#include <new.h>
+#include <windows.h>
+
+namespace base {
+namespace allocator {
+
+bool g_is_win_shim_layer_initialized = false;
+
+namespace {
+
+const size_t kWindowsPageSize = 4096;
+const size_t kMaxWindowsAllocation = INT_MAX - kWindowsPageSize;
+
+inline HANDLE get_heap_handle() {
+  return reinterpret_cast<HANDLE>(_get_heap_handle());
+}
+
+}  // namespace
+
+void* WinHeapMalloc(size_t size) {
+  if (size < kMaxWindowsAllocation)
+    return HeapAlloc(get_heap_handle(), 0, size);
+  return nullptr;
+}
+
+void WinHeapFree(void* ptr) {
+  if (!ptr)
+    return;
+
+  HeapFree(get_heap_handle(), 0, ptr);
+}
+
+void* WinHeapRealloc(void* ptr, size_t size) {
+  if (!ptr)
+    return WinHeapMalloc(size);
+  if (!size) {
+    WinHeapFree(ptr);
+    return nullptr;
+  }
+  if (size < kMaxWindowsAllocation)
+    return HeapReAlloc(get_heap_handle(), 0, ptr, size);
+  return nullptr;
+}
+
+size_t WinHeapGetSizeEstimate(void* ptr) {
+  if (!ptr)
+    return 0;
+
+  // Get the user size of the allocation.
+  size_t size = HeapSize(get_heap_handle(), 0, ptr);
+
+  // Account for the 8-byte HEAP_HEADER preceding the block.
+  size += 8;
+
+// Round up to the nearest allocation granularity, which is 8 for
+// 32 bit machines, and 16 for 64 bit machines.
+#if defined(ARCH_CPU_64_BITS)
+  const size_t kAllocationGranularity = 16;
+#else
+  const size_t kAllocationGranularity = 8;
+#endif
+
+  return (size + kAllocationGranularity - 1) & ~(kAllocationGranularity - 1);
+}
+
+// Call the new handler, if one has been set.
+// Returns true on successfully calling the handler, false otherwise.
+bool WinCallNewHandler(size_t size) {
+#ifdef _CPPUNWIND
+#error "Exceptions in allocator shim are not supported!"
+#endif  // _CPPUNWIND
+  // Get the current new handler.
+  _PNH nh = _query_new_handler();
+  if (!nh)
+    return false;
+  // Since exceptions are disabled, we don't really know if new_handler
+  // failed.  Assume it will abort if it fails.
+  return nh(size) ? true : false;
+}
+
+}  // namespace allocator
+}  // namespace base
diff --git a/base/allocator/winheap_stubs_win.h b/base/allocator/winheap_stubs_win.h
new file mode 100644
index 0000000..422dfe0
--- /dev/null
+++ b/base/allocator/winheap_stubs_win.h
@@ -0,0 +1,38 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Thin allocation wrappers for the windows heap. This file should be deleted
+// once the win-specific allocation shim has been removed, and the generic shim
+// has becaome the default.
+
+#ifndef BASE_ALLOCATOR_WINHEAP_STUBS_H_
+#define BASE_ALLOCATOR_WINHEAP_STUBS_H_
+
+#include <stdint.h>
+
+namespace base {
+namespace allocator {
+
+// Set to true if the link-time magic has successfully hooked into the CRT's
+// heap initialization.
+extern bool g_is_win_shim_layer_initialized;
+
+// Thin wrappers to implement the standard C allocation semantics on the
+// CRT's Windows heap.
+void* WinHeapMalloc(size_t size);
+void WinHeapFree(void* ptr);
+void* WinHeapRealloc(void* ptr, size_t size);
+
+// Returns a lower-bound estimate for the full amount of memory consumed by the
+// the allocation |ptr|.
+size_t WinHeapGetSizeEstimate(void* ptr);
+
+// Call the new handler, if one has been set.
+// Returns true on successfully calling the handler, false otherwise.
+bool WinCallNewHandler(size_t size);
+
+}  // namespace allocator
+}  // namespace base
+
+#endif  // BASE_ALLOCATOR_WINHEAP_STUBS_H_
\ No newline at end of file
diff --git a/base/at_exit.cc b/base/at_exit.cc
new file mode 100644
index 0000000..52c2151
--- /dev/null
+++ b/base/at_exit.cc
@@ -0,0 +1,109 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/at_exit.h"
+
+#include <stddef.h>
+#include <ostream>
+#include <utility>
+
+#include "base/bind.h"
+#include "base/callback.h"
+#include "base/logging.h"
+
+namespace base {
+
+// Keep a stack of registered AtExitManagers.  We always operate on the most
+// recent, and we should never have more than one outside of testing (for a
+// statically linked version of this library).  Testing may use the shadow
+// version of the constructor, and if we are building a dynamic library we may
+// end up with multiple AtExitManagers on the same process.  We don't protect
+// this for thread-safe access, since it will only be modified in testing.
+static AtExitManager* g_top_manager = nullptr;
+
+static bool g_disable_managers = false;
+
+AtExitManager::AtExitManager()
+    : processing_callbacks_(false), next_manager_(g_top_manager) {
+// If multiple modules instantiate AtExitManagers they'll end up living in this
+// module... they have to coexist.
+#if !defined(COMPONENT_BUILD)
+  DCHECK(!g_top_manager);
+#endif
+  g_top_manager = this;
+}
+
+AtExitManager::~AtExitManager() {
+  if (!g_top_manager) {
+    NOTREACHED() << "Tried to ~AtExitManager without an AtExitManager";
+    return;
+  }
+  DCHECK_EQ(this, g_top_manager);
+
+  if (!g_disable_managers)
+    ProcessCallbacksNow();
+  g_top_manager = next_manager_;
+}
+
+// static
+void AtExitManager::RegisterCallback(AtExitCallbackType func, void* param) {
+  DCHECK(func);
+  RegisterTask(base::Bind(func, param));
+}
+
+// static
+void AtExitManager::RegisterTask(base::Closure task) {
+  if (!g_top_manager) {
+    NOTREACHED() << "Tried to RegisterCallback without an AtExitManager";
+    return;
+  }
+
+  AutoLock lock(g_top_manager->lock_);
+  DCHECK(!g_top_manager->processing_callbacks_);
+  g_top_manager->stack_.push(std::move(task));
+}
+
+// static
+void AtExitManager::ProcessCallbacksNow() {
+  if (!g_top_manager) {
+    NOTREACHED() << "Tried to ProcessCallbacksNow without an AtExitManager";
+    return;
+  }
+
+  // Callbacks may try to add new callbacks, so run them without holding
+  // |lock_|. This is an error and caught by the DCHECK in RegisterTask(), but
+  // handle it gracefully in release builds so we don't deadlock.
+  base::stack<base::Closure> tasks;
+  {
+    AutoLock lock(g_top_manager->lock_);
+    tasks.swap(g_top_manager->stack_);
+    g_top_manager->processing_callbacks_ = true;
+  }
+
+  // Relax the cross-thread access restriction to non-thread-safe RefCount.
+  // It's safe since all other threads should be terminated at this point.
+  ScopedAllowCrossThreadRefCountAccess allow_cross_thread_ref_count_access;
+
+  while (!tasks.empty()) {
+    base::Closure task = tasks.top();
+    task.Run();
+    tasks.pop();
+  }
+
+  // Expect that all callbacks have been run.
+  DCHECK(g_top_manager->stack_.empty());
+}
+
+void AtExitManager::DisableAllAtExitManagers() {
+  AutoLock lock(g_top_manager->lock_);
+  g_disable_managers = true;
+}
+
+AtExitManager::AtExitManager(bool shadow)
+    : processing_callbacks_(false), next_manager_(g_top_manager) {
+  DCHECK(shadow || !g_top_manager);
+  g_top_manager = this;
+}
+
+}  // namespace base
diff --git a/base/at_exit.h b/base/at_exit.h
new file mode 100644
index 0000000..e74de8d
--- /dev/null
+++ b/base/at_exit.h
@@ -0,0 +1,80 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_AT_EXIT_H_
+#define BASE_AT_EXIT_H_
+
+#include "base/base_export.h"
+#include "base/callback.h"
+#include "base/containers/stack.h"
+#include "base/macros.h"
+#include "base/synchronization/lock.h"
+
+namespace base {
+
+// This class provides a facility similar to the CRT atexit(), except that
+// we control when the callbacks are executed. Under Windows for a DLL they
+// happen at a really bad time and under the loader lock. This facility is
+// mostly used by base::Singleton.
+//
+// The usage is simple. Early in the main() or WinMain() scope create an
+// AtExitManager object on the stack:
+// int main(...) {
+//    base::AtExitManager exit_manager;
+//
+// }
+// When the exit_manager object goes out of scope, all the registered
+// callbacks and singleton destructors will be called.
+
+class BASE_EXPORT AtExitManager {
+ public:
+  typedef void (*AtExitCallbackType)(void*);
+
+  AtExitManager();
+
+  // The dtor calls all the registered callbacks. Do not try to register more
+  // callbacks after this point.
+  ~AtExitManager();
+
+  // Registers the specified function to be called at exit. The prototype of
+  // the callback function is void func(void*).
+  static void RegisterCallback(AtExitCallbackType func, void* param);
+
+  // Registers the specified task to be called at exit.
+  static void RegisterTask(base::Closure task);
+
+  // Calls the functions registered with RegisterCallback in LIFO order. It
+  // is possible to register new callbacks after calling this function.
+  static void ProcessCallbacksNow();
+
+  // Disable all registered at-exit callbacks. This is used only in a single-
+  // process mode.
+  static void DisableAllAtExitManagers();
+
+ protected:
+  // This constructor will allow this instance of AtExitManager to be created
+  // even if one already exists.  This should only be used for testing!
+  // AtExitManagers are kept on a global stack, and it will be removed during
+  // destruction.  This allows you to shadow another AtExitManager.
+  explicit AtExitManager(bool shadow);
+
+ private:
+  base::Lock lock_;
+  base::stack<base::Closure> stack_;
+  bool processing_callbacks_;
+  AtExitManager* next_manager_;  // Stack of managers to allow shadowing.
+
+  DISALLOW_COPY_AND_ASSIGN(AtExitManager);
+};
+
+#if defined(UNIT_TEST)
+class ShadowingAtExitManager : public AtExitManager {
+ public:
+  ShadowingAtExitManager() : AtExitManager(true) {}
+};
+#endif  // defined(UNIT_TEST)
+
+}  // namespace base
+
+#endif  // BASE_AT_EXIT_H_
diff --git a/base/at_exit_unittest.cc b/base/at_exit_unittest.cc
new file mode 100644
index 0000000..3de061f
--- /dev/null
+++ b/base/at_exit_unittest.cc
@@ -0,0 +1,87 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/at_exit.h"
+#include "base/bind.h"
+
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace {
+
+int g_test_counter_1 = 0;
+int g_test_counter_2 = 0;
+
+void IncrementTestCounter1(void* unused) {
+  ++g_test_counter_1;
+}
+
+void IncrementTestCounter2(void* unused) {
+  ++g_test_counter_2;
+}
+
+void ZeroTestCounters() {
+  g_test_counter_1 = 0;
+  g_test_counter_2 = 0;
+}
+
+void ExpectCounter1IsZero(void* unused) {
+  EXPECT_EQ(0, g_test_counter_1);
+}
+
+void ExpectParamIsNull(void* param) {
+  EXPECT_EQ(nullptr, param);
+}
+
+void ExpectParamIsCounter(void* param) {
+  EXPECT_EQ(&g_test_counter_1, param);
+}
+
+}  // namespace
+
+class AtExitTest : public testing::Test {
+ private:
+  // Don't test the global AtExitManager, because asking it to process its
+  // AtExit callbacks can ruin the global state that other tests may depend on.
+  base::ShadowingAtExitManager exit_manager_;
+};
+
+TEST_F(AtExitTest, Basic) {
+  ZeroTestCounters();
+  base::AtExitManager::RegisterCallback(&IncrementTestCounter1, nullptr);
+  base::AtExitManager::RegisterCallback(&IncrementTestCounter2, nullptr);
+  base::AtExitManager::RegisterCallback(&IncrementTestCounter1, nullptr);
+
+  EXPECT_EQ(0, g_test_counter_1);
+  EXPECT_EQ(0, g_test_counter_2);
+  base::AtExitManager::ProcessCallbacksNow();
+  EXPECT_EQ(2, g_test_counter_1);
+  EXPECT_EQ(1, g_test_counter_2);
+}
+
+TEST_F(AtExitTest, LIFOOrder) {
+  ZeroTestCounters();
+  base::AtExitManager::RegisterCallback(&IncrementTestCounter1, nullptr);
+  base::AtExitManager::RegisterCallback(&ExpectCounter1IsZero, nullptr);
+  base::AtExitManager::RegisterCallback(&IncrementTestCounter2, nullptr);
+
+  EXPECT_EQ(0, g_test_counter_1);
+  EXPECT_EQ(0, g_test_counter_2);
+  base::AtExitManager::ProcessCallbacksNow();
+  EXPECT_EQ(1, g_test_counter_1);
+  EXPECT_EQ(1, g_test_counter_2);
+}
+
+TEST_F(AtExitTest, Param) {
+  base::AtExitManager::RegisterCallback(&ExpectParamIsNull, nullptr);
+  base::AtExitManager::RegisterCallback(&ExpectParamIsCounter,
+                                        &g_test_counter_1);
+  base::AtExitManager::ProcessCallbacksNow();
+}
+
+TEST_F(AtExitTest, Task) {
+  ZeroTestCounters();
+  base::AtExitManager::RegisterTask(base::Bind(&ExpectParamIsCounter,
+                                               &g_test_counter_1));
+  base::AtExitManager::ProcessCallbacksNow();
+}
diff --git a/base/atomic_ref_count.h b/base/atomic_ref_count.h
new file mode 100644
index 0000000..3ffa017
--- /dev/null
+++ b/base/atomic_ref_count.h
@@ -0,0 +1,67 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This is a low level implementation of atomic semantics for reference
+// counting.  Please use base/memory/ref_counted.h directly instead.
+
+#ifndef BASE_ATOMIC_REF_COUNT_H_
+#define BASE_ATOMIC_REF_COUNT_H_
+
+#include <atomic>
+
+namespace base {
+
+class AtomicRefCount {
+ public:
+  constexpr AtomicRefCount() : ref_count_(0) {}
+  explicit constexpr AtomicRefCount(int initial_value)
+      : ref_count_(initial_value) {}
+
+  // Increment a reference count.
+  void Increment() { Increment(1); }
+
+  // Increment a reference count by "increment", which must exceed 0.
+  void Increment(int increment) {
+    ref_count_.fetch_add(increment, std::memory_order_relaxed);
+  }
+
+  // Decrement a reference count, and return whether the result is non-zero.
+  // Insert barriers to ensure that state written before the reference count
+  // became zero will be visible to a thread that has just made the count zero.
+  bool Decrement() {
+    // TODO(jbroman): Technically this doesn't need to be an acquire operation
+    // unless the result is 1 (i.e., the ref count did indeed reach zero).
+    // However, there are toolchain issues that make that not work as well at
+    // present (notably TSAN doesn't like it).
+    return ref_count_.fetch_sub(1, std::memory_order_acq_rel) != 1;
+  }
+
+  // Return whether the reference count is one.  If the reference count is used
+  // in the conventional way, a refrerence count of 1 implies that the current
+  // thread owns the reference and no other thread shares it.  This call
+  // performs the test for a reference count of one, and performs the memory
+  // barrier needed for the owning thread to act on the object, knowing that it
+  // has exclusive access to the object.
+  bool IsOne() const { return ref_count_.load(std::memory_order_acquire) == 1; }
+
+  // Return whether the reference count is zero.  With conventional object
+  // referencing counting, the object will be destroyed, so the reference count
+  // should never be zero.  Hence this is generally used for a debug check.
+  bool IsZero() const {
+    return ref_count_.load(std::memory_order_acquire) == 0;
+  }
+
+  // Returns the current reference count (with no barriers). This is subtle, and
+  // should be used only for debugging.
+  int SubtleRefCountForDebug() const {
+    return ref_count_.load(std::memory_order_relaxed);
+  }
+
+ private:
+  std::atomic_int ref_count_;
+};
+
+}  // namespace base
+
+#endif  // BASE_ATOMIC_REF_COUNT_H_
diff --git a/base/atomic_sequence_num.h b/base/atomic_sequence_num.h
new file mode 100644
index 0000000..717e37a
--- /dev/null
+++ b/base/atomic_sequence_num.h
@@ -0,0 +1,33 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_ATOMIC_SEQUENCE_NUM_H_
+#define BASE_ATOMIC_SEQUENCE_NUM_H_
+
+#include <atomic>
+
+#include "base/macros.h"
+
+namespace base {
+
+// AtomicSequenceNumber is a thread safe increasing sequence number generator.
+// Its constructor doesn't emit a static initializer, so it's safe to use as a
+// global variable or static member.
+class AtomicSequenceNumber {
+ public:
+  constexpr AtomicSequenceNumber() = default;
+
+  // Returns an increasing sequence number starts from 0 for each call.
+  // This function can be called from any thread without data race.
+  inline int GetNext() { return seq_.fetch_add(1, std::memory_order_relaxed); }
+
+ private:
+  std::atomic_int seq_{0};
+
+  DISALLOW_COPY_AND_ASSIGN(AtomicSequenceNumber);
+};
+
+}  // namespace base
+
+#endif  // BASE_ATOMIC_SEQUENCE_NUM_H_
diff --git a/base/atomicops.h b/base/atomicops.h
new file mode 100644
index 0000000..4d8510e
--- /dev/null
+++ b/base/atomicops.h
@@ -0,0 +1,161 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// For atomic operations on reference counts, see atomic_refcount.h.
+// For atomic operations on sequence numbers, see atomic_sequence_num.h.
+
+// The routines exported by this module are subtle.  If you use them, even if
+// you get the code right, it will depend on careful reasoning about atomicity
+// and memory ordering; it will be less readable, and harder to maintain.  If
+// you plan to use these routines, you should have a good reason, such as solid
+// evidence that performance would otherwise suffer, or there being no
+// alternative.  You should assume only properties explicitly guaranteed by the
+// specifications in this file.  You are almost certainly _not_ writing code
+// just for the x86; if you assume x86 semantics, x86 hardware bugs and
+// implementations on other archtectures will cause your code to break.  If you
+// do not know what you are doing, avoid these routines, and use a Mutex.
+//
+// It is incorrect to make direct assignments to/from an atomic variable.
+// You should use one of the Load or Store routines.  The NoBarrier
+// versions are provided when no barriers are needed:
+//   NoBarrier_Store()
+//   NoBarrier_Load()
+// Although there are currently no compiler enforcement, you are encouraged
+// to use these.
+//
+
+#ifndef BASE_ATOMICOPS_H_
+#define BASE_ATOMICOPS_H_
+
+#include <stdint.h>
+
+// Small C++ header which defines implementation specific macros used to
+// identify the STL implementation.
+// - libc++: captures __config for _LIBCPP_VERSION
+// - libstdc++: captures bits/c++config.h for __GLIBCXX__
+#include <cstddef>
+
+#include "base/base_export.h"
+#include "build/build_config.h"
+
+#if defined(OS_WIN) && defined(ARCH_CPU_64_BITS)
+// windows.h #defines this (only on x64). This causes problems because the
+// public API also uses MemoryBarrier at the public name for this fence. So, on
+// X64, undef it, and call its documented
+// (http://msdn.microsoft.com/en-us/library/windows/desktop/ms684208.aspx)
+// implementation directly.
+#undef MemoryBarrier
+#endif
+
+namespace base {
+namespace subtle {
+
+typedef int32_t Atomic32;
+#ifdef ARCH_CPU_64_BITS
+// We need to be able to go between Atomic64 and AtomicWord implicitly.  This
+// means Atomic64 and AtomicWord should be the same type on 64-bit.
+#if defined(__ILP32__) || defined(OS_NACL)
+// NaCl's intptr_t is not actually 64-bits on 64-bit!
+// http://code.google.com/p/nativeclient/issues/detail?id=1162
+typedef int64_t Atomic64;
+#else
+typedef intptr_t Atomic64;
+#endif
+#endif
+
+// Use AtomicWord for a machine-sized pointer.  It will use the Atomic32 or
+// Atomic64 routines below, depending on your architecture.
+typedef intptr_t AtomicWord;
+
+// Atomically execute:
+//      result = *ptr;
+//      if (*ptr == old_value)
+//        *ptr = new_value;
+//      return result;
+//
+// I.e., replace "*ptr" with "new_value" if "*ptr" used to be "old_value".
+// Always return the old value of "*ptr"
+//
+// This routine implies no memory barriers.
+Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
+                                  Atomic32 old_value,
+                                  Atomic32 new_value);
+
+// Atomically store new_value into *ptr, returning the previous value held in
+// *ptr.  This routine implies no memory barriers.
+Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr, Atomic32 new_value);
+
+// Atomically increment *ptr by "increment".  Returns the new value of
+// *ptr with the increment applied.  This routine implies no memory barriers.
+Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr, Atomic32 increment);
+
+Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
+                                 Atomic32 increment);
+
+// These following lower-level operations are typically useful only to people
+// implementing higher-level synchronization operations like spinlocks,
+// mutexes, and condition-variables.  They combine CompareAndSwap(), a load, or
+// a store with appropriate memory-ordering instructions.  "Acquire" operations
+// ensure that no later memory access can be reordered ahead of the operation.
+// "Release" operations ensure that no previous memory access can be reordered
+// after the operation.  "Barrier" operations have both "Acquire" and "Release"
+// semantics.   A MemoryBarrier() has "Barrier" semantics, but does no memory
+// access.
+Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
+                                Atomic32 old_value,
+                                Atomic32 new_value);
+Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
+                                Atomic32 old_value,
+                                Atomic32 new_value);
+
+void MemoryBarrier();
+void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value);
+void Acquire_Store(volatile Atomic32* ptr, Atomic32 value);
+void Release_Store(volatile Atomic32* ptr, Atomic32 value);
+
+Atomic32 NoBarrier_Load(volatile const Atomic32* ptr);
+Atomic32 Acquire_Load(volatile const Atomic32* ptr);
+Atomic32 Release_Load(volatile const Atomic32* ptr);
+
+// 64-bit atomic operations (only available on 64-bit processors).
+#ifdef ARCH_CPU_64_BITS
+Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
+                                  Atomic64 old_value,
+                                  Atomic64 new_value);
+Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr, Atomic64 new_value);
+Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr, Atomic64 increment);
+Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr, Atomic64 increment);
+
+Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
+                                Atomic64 old_value,
+                                Atomic64 new_value);
+Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
+                                Atomic64 old_value,
+                                Atomic64 new_value);
+void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value);
+void Acquire_Store(volatile Atomic64* ptr, Atomic64 value);
+void Release_Store(volatile Atomic64* ptr, Atomic64 value);
+Atomic64 NoBarrier_Load(volatile const Atomic64* ptr);
+Atomic64 Acquire_Load(volatile const Atomic64* ptr);
+Atomic64 Release_Load(volatile const Atomic64* ptr);
+#endif  // ARCH_CPU_64_BITS
+
+}  // namespace subtle
+}  // namespace base
+
+#if defined(OS_WIN)
+// TODO(jfb): Try to use base/atomicops_internals_portable.h everywhere.
+// https://crbug.com/559247.
+#  include "base/atomicops_internals_x86_msvc.h"
+#else
+#  include "base/atomicops_internals_portable.h"
+#endif
+
+// On some platforms we need additional declarations to make
+// AtomicWord compatible with our other Atomic* types.
+#if defined(OS_MACOSX) || defined(OS_OPENBSD)
+#include "base/atomicops_internals_atomicword_compat.h"
+#endif
+
+#endif  // BASE_ATOMICOPS_H_
diff --git a/base/atomicops_internals_atomicword_compat.h b/base/atomicops_internals_atomicword_compat.h
new file mode 100644
index 0000000..8b000d2
--- /dev/null
+++ b/base/atomicops_internals_atomicword_compat.h
@@ -0,0 +1,104 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file is an internal atomic implementation, use base/atomicops.h instead.
+
+#ifndef BASE_ATOMICOPS_INTERNALS_ATOMICWORD_COMPAT_H_
+#define BASE_ATOMICOPS_INTERNALS_ATOMICWORD_COMPAT_H_
+
+#include <stdint.h>
+
+#include "build/build_config.h"
+
+// AtomicWord is a synonym for intptr_t, and Atomic32 is a synonym for int32_t,
+// which in turn means int. On some LP32 platforms, intptr_t is an int, but
+// on others, it's a long. When AtomicWord and Atomic32 are based on different
+// fundamental types, their pointers are incompatible.
+//
+// This file defines function overloads to allow both AtomicWord and Atomic32
+// data to be used with this interface.
+//
+// On LP64 platforms, AtomicWord and Atomic64 are both always long,
+// so this problem doesn't occur.
+
+#if !defined(ARCH_CPU_64_BITS)
+
+namespace base {
+namespace subtle {
+
+inline AtomicWord NoBarrier_CompareAndSwap(volatile AtomicWord* ptr,
+                                           AtomicWord old_value,
+                                           AtomicWord new_value) {
+  return NoBarrier_CompareAndSwap(
+      reinterpret_cast<volatile Atomic32*>(ptr), old_value, new_value);
+}
+
+inline AtomicWord NoBarrier_AtomicExchange(volatile AtomicWord* ptr,
+                                           AtomicWord new_value) {
+  return NoBarrier_AtomicExchange(
+      reinterpret_cast<volatile Atomic32*>(ptr), new_value);
+}
+
+inline AtomicWord NoBarrier_AtomicIncrement(volatile AtomicWord* ptr,
+                                            AtomicWord increment) {
+  return NoBarrier_AtomicIncrement(
+      reinterpret_cast<volatile Atomic32*>(ptr), increment);
+}
+
+inline AtomicWord Barrier_AtomicIncrement(volatile AtomicWord* ptr,
+                                          AtomicWord increment) {
+  return Barrier_AtomicIncrement(
+      reinterpret_cast<volatile Atomic32*>(ptr), increment);
+}
+
+inline AtomicWord Acquire_CompareAndSwap(volatile AtomicWord* ptr,
+                                         AtomicWord old_value,
+                                         AtomicWord new_value) {
+  return base::subtle::Acquire_CompareAndSwap(
+      reinterpret_cast<volatile Atomic32*>(ptr), old_value, new_value);
+}
+
+inline AtomicWord Release_CompareAndSwap(volatile AtomicWord* ptr,
+                                         AtomicWord old_value,
+                                         AtomicWord new_value) {
+  return base::subtle::Release_CompareAndSwap(
+      reinterpret_cast<volatile Atomic32*>(ptr), old_value, new_value);
+}
+
+inline void NoBarrier_Store(volatile AtomicWord *ptr, AtomicWord value) {
+  NoBarrier_Store(
+      reinterpret_cast<volatile Atomic32*>(ptr), value);
+}
+
+inline void Acquire_Store(volatile AtomicWord* ptr, AtomicWord value) {
+  return base::subtle::Acquire_Store(
+      reinterpret_cast<volatile Atomic32*>(ptr), value);
+}
+
+inline void Release_Store(volatile AtomicWord* ptr, AtomicWord value) {
+  return base::subtle::Release_Store(
+      reinterpret_cast<volatile Atomic32*>(ptr), value);
+}
+
+inline AtomicWord NoBarrier_Load(volatile const AtomicWord *ptr) {
+  return NoBarrier_Load(
+      reinterpret_cast<volatile const Atomic32*>(ptr));
+}
+
+inline AtomicWord Acquire_Load(volatile const AtomicWord* ptr) {
+  return base::subtle::Acquire_Load(
+      reinterpret_cast<volatile const Atomic32*>(ptr));
+}
+
+inline AtomicWord Release_Load(volatile const AtomicWord* ptr) {
+  return base::subtle::Release_Load(
+      reinterpret_cast<volatile const Atomic32*>(ptr));
+}
+
+}  // namespace subtle
+}  // namespace base
+
+#endif  // !defined(ARCH_CPU_64_BITS)
+
+#endif  // BASE_ATOMICOPS_INTERNALS_ATOMICWORD_COMPAT_H_
diff --git a/base/atomicops_internals_portable.h b/base/atomicops_internals_portable.h
new file mode 100644
index 0000000..ee034de
--- /dev/null
+++ b/base/atomicops_internals_portable.h
@@ -0,0 +1,229 @@
+// Copyright (c) 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file is an internal atomic implementation, use atomicops.h instead.
+//
+// This implementation uses C++11 atomics' member functions. The code base is
+// currently written assuming atomicity revolves around accesses instead of
+// C++11's memory locations. The burden is on the programmer to ensure that all
+// memory locations accessed atomically are never accessed non-atomically (tsan
+// should help with this).
+//
+// TODO(jfb) Modify the atomicops.h API and user code to declare atomic
+//           locations as truly atomic. See the static_assert below.
+//
+// Of note in this implementation:
+//  * All NoBarrier variants are implemented as relaxed.
+//  * All Barrier variants are implemented as sequentially-consistent.
+//  * Compare exchange's failure ordering is always the same as the success one
+//    (except for release, which fails as relaxed): using a weaker ordering is
+//    only valid under certain uses of compare exchange.
+//  * Acquire store doesn't exist in the C11 memory model, it is instead
+//    implemented as a relaxed store followed by a sequentially consistent
+//    fence.
+//  * Release load doesn't exist in the C11 memory model, it is instead
+//    implemented as sequentially consistent fence followed by a relaxed load.
+//  * Atomic increment is expected to return the post-incremented value, whereas
+//    C11 fetch add returns the previous value. The implementation therefore
+//    needs to increment twice (which the compiler should be able to detect and
+//    optimize).
+
+#ifndef BASE_ATOMICOPS_INTERNALS_PORTABLE_H_
+#define BASE_ATOMICOPS_INTERNALS_PORTABLE_H_
+
+#include <atomic>
+
+#include "build/build_config.h"
+
+namespace base {
+namespace subtle {
+
+// This implementation is transitional and maintains the original API for
+// atomicops.h. This requires casting memory locations to the atomic types, and
+// assumes that the API and the C++11 implementation are layout-compatible,
+// which isn't true for all implementations or hardware platforms. The static
+// assertion should detect this issue, were it to fire then this header
+// shouldn't be used.
+//
+// TODO(jfb) If this header manages to stay committed then the API should be
+//           modified, and all call sites updated.
+typedef volatile std::atomic<Atomic32>* AtomicLocation32;
+static_assert(sizeof(*(AtomicLocation32) nullptr) == sizeof(Atomic32),
+              "incompatible 32-bit atomic layout");
+
+inline void MemoryBarrier() {
+#if defined(__GLIBCXX__)
+  // Work around libstdc++ bug 51038 where atomic_thread_fence was declared but
+  // not defined, leading to the linker complaining about undefined references.
+  __atomic_thread_fence(std::memory_order_seq_cst);
+#else
+  std::atomic_thread_fence(std::memory_order_seq_cst);
+#endif
+}
+
+inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
+                                         Atomic32 old_value,
+                                         Atomic32 new_value) {
+  ((AtomicLocation32)ptr)
+      ->compare_exchange_strong(old_value,
+                                new_value,
+                                std::memory_order_relaxed,
+                                std::memory_order_relaxed);
+  return old_value;
+}
+
+inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
+                                         Atomic32 new_value) {
+  return ((AtomicLocation32)ptr)
+      ->exchange(new_value, std::memory_order_relaxed);
+}
+
+inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
+                                          Atomic32 increment) {
+  return increment +
+         ((AtomicLocation32)ptr)
+             ->fetch_add(increment, std::memory_order_relaxed);
+}
+
+inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
+                                        Atomic32 increment) {
+  return increment + ((AtomicLocation32)ptr)->fetch_add(increment);
+}
+
+inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
+                                       Atomic32 old_value,
+                                       Atomic32 new_value) {
+  ((AtomicLocation32)ptr)
+      ->compare_exchange_strong(old_value,
+                                new_value,
+                                std::memory_order_acquire,
+                                std::memory_order_acquire);
+  return old_value;
+}
+
+inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
+                                       Atomic32 old_value,
+                                       Atomic32 new_value) {
+  ((AtomicLocation32)ptr)
+      ->compare_exchange_strong(old_value,
+                                new_value,
+                                std::memory_order_release,
+                                std::memory_order_relaxed);
+  return old_value;
+}
+
+inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
+  ((AtomicLocation32)ptr)->store(value, std::memory_order_relaxed);
+}
+
+inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
+  ((AtomicLocation32)ptr)->store(value, std::memory_order_relaxed);
+  MemoryBarrier();
+}
+
+inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
+  ((AtomicLocation32)ptr)->store(value, std::memory_order_release);
+}
+
+inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
+  return ((AtomicLocation32)ptr)->load(std::memory_order_relaxed);
+}
+
+inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
+  return ((AtomicLocation32)ptr)->load(std::memory_order_acquire);
+}
+
+inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
+  MemoryBarrier();
+  return ((AtomicLocation32)ptr)->load(std::memory_order_relaxed);
+}
+
+#if defined(ARCH_CPU_64_BITS)
+
+typedef volatile std::atomic<Atomic64>* AtomicLocation64;
+static_assert(sizeof(*(AtomicLocation64) nullptr) == sizeof(Atomic64),
+              "incompatible 64-bit atomic layout");
+
+inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
+                                         Atomic64 old_value,
+                                         Atomic64 new_value) {
+  ((AtomicLocation64)ptr)
+      ->compare_exchange_strong(old_value,
+                                new_value,
+                                std::memory_order_relaxed,
+                                std::memory_order_relaxed);
+  return old_value;
+}
+
+inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr,
+                                         Atomic64 new_value) {
+  return ((AtomicLocation64)ptr)
+      ->exchange(new_value, std::memory_order_relaxed);
+}
+
+inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr,
+                                          Atomic64 increment) {
+  return increment +
+         ((AtomicLocation64)ptr)
+             ->fetch_add(increment, std::memory_order_relaxed);
+}
+
+inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr,
+                                        Atomic64 increment) {
+  return increment + ((AtomicLocation64)ptr)->fetch_add(increment);
+}
+
+inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
+                                       Atomic64 old_value,
+                                       Atomic64 new_value) {
+  ((AtomicLocation64)ptr)
+      ->compare_exchange_strong(old_value,
+                                new_value,
+                                std::memory_order_acquire,
+                                std::memory_order_acquire);
+  return old_value;
+}
+
+inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
+                                       Atomic64 old_value,
+                                       Atomic64 new_value) {
+  ((AtomicLocation64)ptr)
+      ->compare_exchange_strong(old_value,
+                                new_value,
+                                std::memory_order_release,
+                                std::memory_order_relaxed);
+  return old_value;
+}
+
+inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
+  ((AtomicLocation64)ptr)->store(value, std::memory_order_relaxed);
+}
+
+inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) {
+  ((AtomicLocation64)ptr)->store(value, std::memory_order_relaxed);
+  MemoryBarrier();
+}
+
+inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
+  ((AtomicLocation64)ptr)->store(value, std::memory_order_release);
+}
+
+inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
+  return ((AtomicLocation64)ptr)->load(std::memory_order_relaxed);
+}
+
+inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
+  return ((AtomicLocation64)ptr)->load(std::memory_order_acquire);
+}
+
+inline Atomic64 Release_Load(volatile const Atomic64* ptr) {
+  MemoryBarrier();
+  return ((AtomicLocation64)ptr)->load(std::memory_order_relaxed);
+}
+
+#endif  // defined(ARCH_CPU_64_BITS)
+}  // namespace subtle
+}  // namespace base
+
+#endif  // BASE_ATOMICOPS_INTERNALS_PORTABLE_H_
diff --git a/base/atomicops_internals_x86_msvc.h b/base/atomicops_internals_x86_msvc.h
new file mode 100644
index 0000000..ee9043e
--- /dev/null
+++ b/base/atomicops_internals_x86_msvc.h
@@ -0,0 +1,198 @@
+// Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file is an internal atomic implementation, use base/atomicops.h instead.
+
+#ifndef BASE_ATOMICOPS_INTERNALS_X86_MSVC_H_
+#define BASE_ATOMICOPS_INTERNALS_X86_MSVC_H_
+
+#include "base/win/windows_types.h"
+
+#include <intrin.h>
+
+#include "base/macros.h"
+#include "build/build_config.h"
+
+#if defined(ARCH_CPU_64_BITS)
+// windows.h #defines this (only on x64). This causes problems because the
+// public API also uses MemoryBarrier at the public name for this fence. So, on
+// X64, undef it, and call its documented
+// (http://msdn.microsoft.com/en-us/library/windows/desktop/ms684208.aspx)
+// implementation directly.
+#undef MemoryBarrier
+#endif
+
+namespace base {
+namespace subtle {
+
+inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
+                                         Atomic32 old_value,
+                                         Atomic32 new_value) {
+  LONG result = _InterlockedCompareExchange(
+      reinterpret_cast<volatile LONG*>(ptr),
+      static_cast<LONG>(new_value),
+      static_cast<LONG>(old_value));
+  return static_cast<Atomic32>(result);
+}
+
+inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
+                                         Atomic32 new_value) {
+  LONG result = _InterlockedExchange(
+      reinterpret_cast<volatile LONG*>(ptr),
+      static_cast<LONG>(new_value));
+  return static_cast<Atomic32>(result);
+}
+
+inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
+                                        Atomic32 increment) {
+  return _InterlockedExchangeAdd(
+      reinterpret_cast<volatile LONG*>(ptr),
+      static_cast<LONG>(increment)) + increment;
+}
+
+inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
+                                          Atomic32 increment) {
+  return Barrier_AtomicIncrement(ptr, increment);
+}
+
+inline void MemoryBarrier() {
+#if defined(ARCH_CPU_64_BITS)
+  // See #undef and note at the top of this file.
+  __faststorefence();
+#else
+  // We use the implementation of MemoryBarrier from WinNT.h
+  LONG barrier;
+
+  _InterlockedOr(&barrier, 0);
+#endif
+}
+
+inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
+                                       Atomic32 old_value,
+                                       Atomic32 new_value) {
+  return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
+}
+
+inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
+                                       Atomic32 old_value,
+                                       Atomic32 new_value) {
+  return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
+}
+
+inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
+  *ptr = value;
+}
+
+inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
+  NoBarrier_AtomicExchange(ptr, value);
+              // acts as a barrier in this implementation
+}
+
+inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
+  *ptr = value; // works w/o barrier for current Intel chips as of June 2005
+  // See comments in Atomic64 version of Release_Store() below.
+}
+
+inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
+  return *ptr;
+}
+
+inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
+  Atomic32 value = *ptr;
+  return value;
+}
+
+inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
+  MemoryBarrier();
+  return *ptr;
+}
+
+#if defined(_WIN64)
+
+// 64-bit low-level operations on 64-bit platform.
+
+static_assert(sizeof(Atomic64) == sizeof(PVOID), "atomic word is atomic");
+
+inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
+                                         Atomic64 old_value,
+                                         Atomic64 new_value) {
+  PVOID result = _InterlockedCompareExchangePointer(
+      reinterpret_cast<volatile PVOID*>(ptr),
+      reinterpret_cast<PVOID>(new_value), reinterpret_cast<PVOID>(old_value));
+  return reinterpret_cast<Atomic64>(result);
+}
+
+inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr,
+                                         Atomic64 new_value) {
+  PVOID result =
+      _InterlockedExchangePointer(reinterpret_cast<volatile PVOID*>(ptr),
+                                  reinterpret_cast<PVOID>(new_value));
+  return reinterpret_cast<Atomic64>(result);
+}
+
+inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr,
+                                        Atomic64 increment) {
+  return _InterlockedExchangeAdd64(reinterpret_cast<volatile LONGLONG*>(ptr),
+                                   static_cast<LONGLONG>(increment)) +
+         increment;
+}
+
+inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr,
+                                          Atomic64 increment) {
+  return Barrier_AtomicIncrement(ptr, increment);
+}
+
+inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
+  *ptr = value;
+}
+
+inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) {
+  NoBarrier_AtomicExchange(ptr, value);
+              // acts as a barrier in this implementation
+}
+
+inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
+  *ptr = value; // works w/o barrier for current Intel chips as of June 2005
+
+  // When new chips come out, check:
+  //  IA-32 Intel Architecture Software Developer's Manual, Volume 3:
+  //  System Programming Guide, Chatper 7: Multiple-processor management,
+  //  Section 7.2, Memory Ordering.
+  // Last seen at:
+  //   http://developer.intel.com/design/pentium4/manuals/index_new.htm
+}
+
+inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
+  return *ptr;
+}
+
+inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
+  Atomic64 value = *ptr;
+  return value;
+}
+
+inline Atomic64 Release_Load(volatile const Atomic64* ptr) {
+  MemoryBarrier();
+  return *ptr;
+}
+
+inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
+                                       Atomic64 old_value,
+                                       Atomic64 new_value) {
+  return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
+}
+
+inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
+                                       Atomic64 old_value,
+                                       Atomic64 new_value) {
+  return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
+}
+
+
+#endif  // defined(_WIN64)
+
+}  // namespace subtle
+}  // namespace base
+
+#endif  // BASE_ATOMICOPS_INTERNALS_X86_MSVC_H_
diff --git a/base/atomicops_unittest.cc b/base/atomicops_unittest.cc
new file mode 100644
index 0000000..7298609
--- /dev/null
+++ b/base/atomicops_unittest.cc
@@ -0,0 +1,248 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/atomicops.h"
+
+#include <stdint.h>
+#include <string.h>
+
+#include "testing/gtest/include/gtest/gtest.h"
+
+template <class AtomicType>
+static void TestAtomicIncrement() {
+  // For now, we just test single threaded execution
+
+  // use a guard value to make sure the NoBarrier_AtomicIncrement doesn't go
+  // outside the expected address bounds.  This is in particular to
+  // test that some future change to the asm code doesn't cause the
+  // 32-bit NoBarrier_AtomicIncrement doesn't do the wrong thing on 64-bit
+  // machines.
+  struct {
+    AtomicType prev_word;
+    AtomicType count;
+    AtomicType next_word;
+  } s;
+
+  AtomicType prev_word_value, next_word_value;
+  memset(&prev_word_value, 0xFF, sizeof(AtomicType));
+  memset(&next_word_value, 0xEE, sizeof(AtomicType));
+
+  s.prev_word = prev_word_value;
+  s.count = 0;
+  s.next_word = next_word_value;
+
+  EXPECT_EQ(base::subtle::NoBarrier_AtomicIncrement(&s.count, 1), 1);
+  EXPECT_EQ(s.count, 1);
+  EXPECT_EQ(s.prev_word, prev_word_value);
+  EXPECT_EQ(s.next_word, next_word_value);
+
+  EXPECT_EQ(base::subtle::NoBarrier_AtomicIncrement(&s.count, 2), 3);
+  EXPECT_EQ(s.count, 3);
+  EXPECT_EQ(s.prev_word, prev_word_value);
+  EXPECT_EQ(s.next_word, next_word_value);
+
+  EXPECT_EQ(base::subtle::NoBarrier_AtomicIncrement(&s.count, 3), 6);
+  EXPECT_EQ(s.count, 6);
+  EXPECT_EQ(s.prev_word, prev_word_value);
+  EXPECT_EQ(s.next_word, next_word_value);
+
+  EXPECT_EQ(base::subtle::NoBarrier_AtomicIncrement(&s.count, -3), 3);
+  EXPECT_EQ(s.count, 3);
+  EXPECT_EQ(s.prev_word, prev_word_value);
+  EXPECT_EQ(s.next_word, next_word_value);
+
+  EXPECT_EQ(base::subtle::NoBarrier_AtomicIncrement(&s.count, -2), 1);
+  EXPECT_EQ(s.count, 1);
+  EXPECT_EQ(s.prev_word, prev_word_value);
+  EXPECT_EQ(s.next_word, next_word_value);
+
+  EXPECT_EQ(base::subtle::NoBarrier_AtomicIncrement(&s.count, -1), 0);
+  EXPECT_EQ(s.count, 0);
+  EXPECT_EQ(s.prev_word, prev_word_value);
+  EXPECT_EQ(s.next_word, next_word_value);
+
+  EXPECT_EQ(base::subtle::NoBarrier_AtomicIncrement(&s.count, -1), -1);
+  EXPECT_EQ(s.count, -1);
+  EXPECT_EQ(s.prev_word, prev_word_value);
+  EXPECT_EQ(s.next_word, next_word_value);
+
+  EXPECT_EQ(base::subtle::NoBarrier_AtomicIncrement(&s.count, -4), -5);
+  EXPECT_EQ(s.count, -5);
+  EXPECT_EQ(s.prev_word, prev_word_value);
+  EXPECT_EQ(s.next_word, next_word_value);
+
+  EXPECT_EQ(base::subtle::NoBarrier_AtomicIncrement(&s.count, 5), 0);
+  EXPECT_EQ(s.count, 0);
+  EXPECT_EQ(s.prev_word, prev_word_value);
+  EXPECT_EQ(s.next_word, next_word_value);
+}
+
+
+#define NUM_BITS(T) (sizeof(T) * 8)
+
+
+template <class AtomicType>
+static void TestCompareAndSwap() {
+  AtomicType value = 0;
+  AtomicType prev = base::subtle::NoBarrier_CompareAndSwap(&value, 0, 1);
+  EXPECT_EQ(1, value);
+  EXPECT_EQ(0, prev);
+
+  // Verify that CAS will *not* change "value" if it doesn't match the
+  // expected  number. CAS will always return the actual value of the
+  // variable from before any change.
+  AtomicType fail = base::subtle::NoBarrier_CompareAndSwap(&value, 0, 2);
+  EXPECT_EQ(1, value);
+  EXPECT_EQ(1, fail);
+
+  // Use test value that has non-zero bits in both halves, more for testing
+  // 64-bit implementation on 32-bit platforms.
+  const AtomicType k_test_val = (static_cast<uint64_t>(1) <<
+                                 (NUM_BITS(AtomicType) - 2)) + 11;
+  value = k_test_val;
+  prev = base::subtle::NoBarrier_CompareAndSwap(&value, 0, 5);
+  EXPECT_EQ(k_test_val, value);
+  EXPECT_EQ(k_test_val, prev);
+
+  value = k_test_val;
+  prev = base::subtle::NoBarrier_CompareAndSwap(&value, k_test_val, 5);
+  EXPECT_EQ(5, value);
+  EXPECT_EQ(k_test_val, prev);
+}
+
+
+template <class AtomicType>
+static void TestAtomicExchange() {
+  AtomicType value = 0;
+  AtomicType new_value = base::subtle::NoBarrier_AtomicExchange(&value, 1);
+  EXPECT_EQ(1, value);
+  EXPECT_EQ(0, new_value);
+
+  // Use test value that has non-zero bits in both halves, more for testing
+  // 64-bit implementation on 32-bit platforms.
+  const AtomicType k_test_val = (static_cast<uint64_t>(1) <<
+                                 (NUM_BITS(AtomicType) - 2)) + 11;
+  value = k_test_val;
+  new_value = base::subtle::NoBarrier_AtomicExchange(&value, k_test_val);
+  EXPECT_EQ(k_test_val, value);
+  EXPECT_EQ(k_test_val, new_value);
+
+  value = k_test_val;
+  new_value = base::subtle::NoBarrier_AtomicExchange(&value, 5);
+  EXPECT_EQ(5, value);
+  EXPECT_EQ(k_test_val, new_value);
+}
+
+
+template <class AtomicType>
+static void TestAtomicIncrementBounds() {
+  // Test at rollover boundary between int_max and int_min
+  AtomicType test_val = (static_cast<uint64_t>(1) <<
+                         (NUM_BITS(AtomicType) - 1));
+  AtomicType value = -1 ^ test_val;
+  AtomicType new_value = base::subtle::NoBarrier_AtomicIncrement(&value, 1);
+  EXPECT_EQ(test_val, value);
+  EXPECT_EQ(value, new_value);
+
+  base::subtle::NoBarrier_AtomicIncrement(&value, -1);
+  EXPECT_EQ(-1 ^ test_val, value);
+
+  // Test at 32-bit boundary for 64-bit atomic type.
+  test_val = static_cast<uint64_t>(1) << (NUM_BITS(AtomicType) / 2);
+  value = test_val - 1;
+  new_value = base::subtle::NoBarrier_AtomicIncrement(&value, 1);
+  EXPECT_EQ(test_val, value);
+  EXPECT_EQ(value, new_value);
+
+  base::subtle::NoBarrier_AtomicIncrement(&value, -1);
+  EXPECT_EQ(test_val - 1, value);
+}
+
+// Return an AtomicType with the value 0xa5a5a5..
+template <class AtomicType>
+static AtomicType TestFillValue() {
+  AtomicType val = 0;
+  memset(&val, 0xa5, sizeof(AtomicType));
+  return val;
+}
+
+// This is a simple sanity check that values are correct. Not testing
+// atomicity
+template <class AtomicType>
+static void TestStore() {
+  const AtomicType kVal1 = TestFillValue<AtomicType>();
+  const AtomicType kVal2 = static_cast<AtomicType>(-1);
+
+  AtomicType value;
+
+  base::subtle::NoBarrier_Store(&value, kVal1);
+  EXPECT_EQ(kVal1, value);
+  base::subtle::NoBarrier_Store(&value, kVal2);
+  EXPECT_EQ(kVal2, value);
+
+  base::subtle::Acquire_Store(&value, kVal1);
+  EXPECT_EQ(kVal1, value);
+  base::subtle::Acquire_Store(&value, kVal2);
+  EXPECT_EQ(kVal2, value);
+
+  base::subtle::Release_Store(&value, kVal1);
+  EXPECT_EQ(kVal1, value);
+  base::subtle::Release_Store(&value, kVal2);
+  EXPECT_EQ(kVal2, value);
+}
+
+// This is a simple sanity check that values are correct. Not testing
+// atomicity
+template <class AtomicType>
+static void TestLoad() {
+  const AtomicType kVal1 = TestFillValue<AtomicType>();
+  const AtomicType kVal2 = static_cast<AtomicType>(-1);
+
+  AtomicType value;
+
+  value = kVal1;
+  EXPECT_EQ(kVal1, base::subtle::NoBarrier_Load(&value));
+  value = kVal2;
+  EXPECT_EQ(kVal2, base::subtle::NoBarrier_Load(&value));
+
+  value = kVal1;
+  EXPECT_EQ(kVal1, base::subtle::Acquire_Load(&value));
+  value = kVal2;
+  EXPECT_EQ(kVal2, base::subtle::Acquire_Load(&value));
+
+  value = kVal1;
+  EXPECT_EQ(kVal1, base::subtle::Release_Load(&value));
+  value = kVal2;
+  EXPECT_EQ(kVal2, base::subtle::Release_Load(&value));
+}
+
+TEST(AtomicOpsTest, Inc) {
+  TestAtomicIncrement<base::subtle::Atomic32>();
+  TestAtomicIncrement<base::subtle::AtomicWord>();
+}
+
+TEST(AtomicOpsTest, CompareAndSwap) {
+  TestCompareAndSwap<base::subtle::Atomic32>();
+  TestCompareAndSwap<base::subtle::AtomicWord>();
+}
+
+TEST(AtomicOpsTest, Exchange) {
+  TestAtomicExchange<base::subtle::Atomic32>();
+  TestAtomicExchange<base::subtle::AtomicWord>();
+}
+
+TEST(AtomicOpsTest, IncrementBounds) {
+  TestAtomicIncrementBounds<base::subtle::Atomic32>();
+  TestAtomicIncrementBounds<base::subtle::AtomicWord>();
+}
+
+TEST(AtomicOpsTest, Store) {
+  TestStore<base::subtle::Atomic32>();
+  TestStore<base::subtle::AtomicWord>();
+}
+
+TEST(AtomicOpsTest, Load) {
+  TestLoad<base::subtle::Atomic32>();
+  TestLoad<base::subtle::AtomicWord>();
+}
diff --git a/base/auto_reset.h b/base/auto_reset.h
new file mode 100644
index 0000000..8515fe9
--- /dev/null
+++ b/base/auto_reset.h
@@ -0,0 +1,43 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_AUTO_RESET_H_
+#define BASE_AUTO_RESET_H_
+
+#include <utility>
+
+#include "base/macros.h"
+
+// base::AutoReset<> is useful for setting a variable to a new value only within
+// a particular scope. An base::AutoReset<> object resets a variable to its
+// original value upon destruction, making it an alternative to writing
+// "var = false;" or "var = old_val;" at all of a block's exit points.
+//
+// This should be obvious, but note that an base::AutoReset<> instance should
+// have a shorter lifetime than its scoped_variable, to prevent invalid memory
+// writes when the base::AutoReset<> object is destroyed.
+
+namespace base {
+
+template<typename T>
+class AutoReset {
+ public:
+  AutoReset(T* scoped_variable, T new_value)
+      : scoped_variable_(scoped_variable),
+        original_value_(std::move(*scoped_variable)) {
+    *scoped_variable_ = std::move(new_value);
+  }
+
+  ~AutoReset() { *scoped_variable_ = std::move(original_value_); }
+
+ private:
+  T* scoped_variable_;
+  T original_value_;
+
+  DISALLOW_COPY_AND_ASSIGN(AutoReset);
+};
+
+}  // namespace base
+
+#endif  // BASE_AUTO_RESET_H_
diff --git a/base/barrier_closure.cc b/base/barrier_closure.cc
new file mode 100644
index 0000000..4426bb9
--- /dev/null
+++ b/base/barrier_closure.cc
@@ -0,0 +1,51 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/barrier_closure.h"
+
+#include <utility>
+
+#include "base/atomic_ref_count.h"
+#include "base/bind.h"
+#include "base/memory/ptr_util.h"
+
+namespace base {
+namespace {
+
+// Maintains state for a BarrierClosure.
+class BarrierInfo {
+ public:
+  BarrierInfo(int num_callbacks_left, OnceClosure done_closure);
+  void Run();
+
+ private:
+  AtomicRefCount num_callbacks_left_;
+  OnceClosure done_closure_;
+};
+
+BarrierInfo::BarrierInfo(int num_callbacks, OnceClosure done_closure)
+    : num_callbacks_left_(num_callbacks),
+      done_closure_(std::move(done_closure)) {}
+
+void BarrierInfo::Run() {
+  DCHECK(!num_callbacks_left_.IsZero());
+  if (!num_callbacks_left_.Decrement())
+    std::move(done_closure_).Run();
+}
+
+}  // namespace
+
+RepeatingClosure BarrierClosure(int num_callbacks_left,
+                                OnceClosure done_closure) {
+  DCHECK_GE(num_callbacks_left, 0);
+
+  if (num_callbacks_left == 0)
+    std::move(done_closure).Run();
+
+  return BindRepeating(
+      &BarrierInfo::Run,
+      Owned(new BarrierInfo(num_callbacks_left, std::move(done_closure))));
+}
+
+}  // namespace base
diff --git a/base/barrier_closure.h b/base/barrier_closure.h
new file mode 100644
index 0000000..282aa39
--- /dev/null
+++ b/base/barrier_closure.h
@@ -0,0 +1,28 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_BARRIER_CLOSURE_H_
+#define BASE_BARRIER_CLOSURE_H_
+
+#include "base/base_export.h"
+#include "base/callback.h"
+
+namespace base {
+
+// BarrierClosure executes |done_closure| after it has been invoked
+// |num_closures| times.
+//
+// If |num_closures| is 0, |done_closure| is executed immediately.
+//
+// BarrierClosure is thread-safe - the count of remaining closures is
+// maintained as a base::AtomicRefCount. |done_closure| will be run on
+// the thread that calls the final Run() on the returned closures.
+//
+// |done_closure| is also cleared on the final calling thread.
+BASE_EXPORT RepeatingClosure BarrierClosure(int num_closures,
+                                            OnceClosure done_closure);
+
+}  // namespace base
+
+#endif  // BASE_BARRIER_CLOSURE_H_
diff --git a/base/barrier_closure_unittest.cc b/base/barrier_closure_unittest.cc
new file mode 100644
index 0000000..819f6ac
--- /dev/null
+++ b/base/barrier_closure_unittest.cc
@@ -0,0 +1,81 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/barrier_closure.h"
+
+#include "base/bind.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace {
+
+void Increment(int* count) { (*count)++; }
+
+TEST(BarrierClosureTest, RunImmediatelyForZeroClosures) {
+  int count = 0;
+  base::Closure done_closure(base::Bind(&Increment, base::Unretained(&count)));
+
+  base::Closure barrier_closure = base::BarrierClosure(0, done_closure);
+  EXPECT_EQ(1, count);
+}
+
+TEST(BarrierClosureTest, RunAfterNumClosures) {
+  int count = 0;
+  base::Closure done_closure(base::Bind(&Increment, base::Unretained(&count)));
+
+  base::Closure barrier_closure = base::BarrierClosure(2, done_closure);
+  EXPECT_EQ(0, count);
+
+  barrier_closure.Run();
+  EXPECT_EQ(0, count);
+
+  barrier_closure.Run();
+  EXPECT_EQ(1, count);
+}
+
+class DestructionIndicator {
+ public:
+  // Sets |*destructed| to true in destructor.
+  DestructionIndicator(bool* destructed) : destructed_(destructed) {
+    *destructed_ = false;
+  }
+
+  ~DestructionIndicator() { *destructed_ = true; }
+
+  void DoNothing() {}
+
+ private:
+  bool* destructed_;
+};
+
+TEST(BarrierClosureTest, ReleasesDoneClosureWhenDone) {
+  bool done_destructed = false;
+  base::Closure barrier_closure = base::BarrierClosure(
+      1,
+      base::BindOnce(&DestructionIndicator::DoNothing,
+                     base::Owned(new DestructionIndicator(&done_destructed))));
+  EXPECT_FALSE(done_destructed);
+  barrier_closure.Run();
+  EXPECT_TRUE(done_destructed);
+}
+
+void ResetBarrierClosure(base::Closure* closure) {
+  *closure = base::Closure();
+}
+
+// Tests a case when |done_closure| resets a |barrier_closure|.
+// |barrier_closure| is a Closure holding the |done_closure|. |done_closure|
+// holds a pointer back to the |barrier_closure|. When |barrier_closure| is
+// Run() it calls ResetBarrierClosure() which erases the |barrier_closure| while
+// still inside of its Run(). The Run() implementation (in base::BarrierClosure)
+// must not try use itself after executing ResetBarrierClosure() or this test
+// would crash inside Run().
+TEST(BarrierClosureTest, KeepingClosureAliveUntilDone) {
+  base::Closure barrier_closure;
+  base::Closure done_closure =
+      base::Bind(ResetBarrierClosure, &barrier_closure);
+  barrier_closure = base::BarrierClosure(1, done_closure);
+  barrier_closure.Run();
+}
+
+}  // namespace
diff --git a/base/base64.cc b/base/base64.cc
new file mode 100644
index 0000000..ca8ee93
--- /dev/null
+++ b/base/base64.cc
@@ -0,0 +1,39 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/base64.h"
+
+#include <stddef.h>
+
+#include "third_party/modp_b64/modp_b64.h"
+
+namespace base {
+
+void Base64Encode(const StringPiece& input, std::string* output) {
+  std::string temp;
+  temp.resize(modp_b64_encode_len(input.size()));  // makes room for null byte
+
+  // modp_b64_encode_len() returns at least 1, so temp[0] is safe to use.
+  size_t output_size = modp_b64_encode(&(temp[0]), input.data(), input.size());
+
+  temp.resize(output_size);  // strips off null byte
+  output->swap(temp);
+}
+
+bool Base64Decode(const StringPiece& input, std::string* output) {
+  std::string temp;
+  temp.resize(modp_b64_decode_len(input.size()));
+
+  // does not null terminate result since result is binary data!
+  size_t input_size = input.size();
+  size_t output_size = modp_b64_decode(&(temp[0]), input.data(), input_size);
+  if (output_size == MODP_B64_ERROR)
+    return false;
+
+  temp.resize(output_size);
+  output->swap(temp);
+  return true;
+}
+
+}  // namespace base
diff --git a/base/base64.h b/base/base64.h
new file mode 100644
index 0000000..dd72c39
--- /dev/null
+++ b/base/base64.h
@@ -0,0 +1,25 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_BASE64_H_
+#define BASE_BASE64_H_
+
+#include <string>
+
+#include "base/base_export.h"
+#include "base/strings/string_piece.h"
+
+namespace base {
+
+// Encodes the input string in base64. The encoding can be done in-place.
+BASE_EXPORT void Base64Encode(const StringPiece& input, std::string* output);
+
+// Decodes the base64 input string.  Returns true if successful and false
+// otherwise. The output string is only modified if successful. The decoding can
+// be done in-place.
+BASE_EXPORT bool Base64Decode(const StringPiece& input, std::string* output);
+
+}  // namespace base
+
+#endif  // BASE_BASE64_H_
diff --git a/base/base64_decode_fuzzer.cc b/base/base64_decode_fuzzer.cc
new file mode 100644
index 0000000..3716f72
--- /dev/null
+++ b/base/base64_decode_fuzzer.cc
@@ -0,0 +1,15 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <string>
+
+#include "base/base64.h"
+#include "base/strings/string_piece.h"
+
+extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) {
+  std::string decode_output;
+  base::StringPiece data_piece(reinterpret_cast<const char*>(data), size);
+  base::Base64Decode(data_piece, &decode_output);
+  return 0;
+}
diff --git a/base/base64_encode_fuzzer.cc b/base/base64_encode_fuzzer.cc
new file mode 100644
index 0000000..c324be0
--- /dev/null
+++ b/base/base64_encode_fuzzer.cc
@@ -0,0 +1,20 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <string>
+
+#include "base/base64.h"
+#include "base/logging.h"
+#include "base/strings/string_piece.h"
+
+// Encode some random data, and then decode it.
+extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) {
+  std::string encode_output;
+  std::string decode_output;
+  base::StringPiece data_piece(reinterpret_cast<const char*>(data), size);
+  base::Base64Encode(data_piece, &encode_output);
+  CHECK(base::Base64Decode(encode_output, &decode_output));
+  CHECK_EQ(data_piece, decode_output);
+  return 0;
+}
diff --git a/base/base64_unittest.cc b/base/base64_unittest.cc
new file mode 100644
index 0000000..91651f4
--- /dev/null
+++ b/base/base64_unittest.cc
@@ -0,0 +1,40 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/base64.h"
+
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+
+TEST(Base64Test, Basic) {
+  const std::string kText = "hello world";
+  const std::string kBase64Text = "aGVsbG8gd29ybGQ=";
+
+  std::string encoded;
+  std::string decoded;
+  bool ok;
+
+  Base64Encode(kText, &encoded);
+  EXPECT_EQ(kBase64Text, encoded);
+
+  ok = Base64Decode(encoded, &decoded);
+  EXPECT_TRUE(ok);
+  EXPECT_EQ(kText, decoded);
+}
+
+TEST(Base64Test, InPlace) {
+  const std::string kText = "hello world";
+  const std::string kBase64Text = "aGVsbG8gd29ybGQ=";
+  std::string text(kText);
+
+  Base64Encode(text, &text);
+  EXPECT_EQ(kBase64Text, text);
+
+  bool ok = Base64Decode(text, &text);
+  EXPECT_TRUE(ok);
+  EXPECT_EQ(text, kText);
+}
+
+}  // namespace base
diff --git a/base/base64url.cc b/base/base64url.cc
new file mode 100644
index 0000000..0a2c045
--- /dev/null
+++ b/base/base64url.cc
@@ -0,0 +1,101 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/base64url.h"
+
+#include <stddef.h>
+
+#include "base/base64.h"
+#include "base/macros.h"
+#include "base/numerics/safe_math.h"
+#include "base/strings/string_util.h"
+#include "third_party/modp_b64/modp_b64.h"
+
+namespace base {
+
+const char kPaddingChar = '=';
+
+// Base64url maps {+, /} to {-, _} in order for the encoded content to be safe
+// to use in a URL. These characters will be translated by this implementation.
+const char kBase64Chars[] = "+/";
+const char kBase64UrlSafeChars[] = "-_";
+
+void Base64UrlEncode(const StringPiece& input,
+                     Base64UrlEncodePolicy policy,
+                     std::string* output) {
+  Base64Encode(input, output);
+
+  ReplaceChars(*output, "+", "-", output);
+  ReplaceChars(*output, "/", "_", output);
+
+  switch (policy) {
+    case Base64UrlEncodePolicy::INCLUDE_PADDING:
+      // The padding included in |*output| will not be amended.
+      break;
+    case Base64UrlEncodePolicy::OMIT_PADDING:
+      // The padding included in |*output| will be removed.
+      const size_t last_non_padding_pos =
+          output->find_last_not_of(kPaddingChar);
+      if (last_non_padding_pos != std::string::npos)
+        output->resize(last_non_padding_pos + 1);
+
+      break;
+  }
+}
+
+bool Base64UrlDecode(const StringPiece& input,
+                     Base64UrlDecodePolicy policy,
+                     std::string* output) {
+  // Characters outside of the base64url alphabet are disallowed, which includes
+  // the {+, /} characters found in the conventional base64 alphabet.
+  if (input.find_first_of(kBase64Chars) != std::string::npos)
+    return false;
+
+  const size_t required_padding_characters = input.size() % 4;
+  const bool needs_replacement =
+      input.find_first_of(kBase64UrlSafeChars) != std::string::npos;
+
+  switch (policy) {
+    case Base64UrlDecodePolicy::REQUIRE_PADDING:
+      // Fail if the required padding is not included in |input|.
+      if (required_padding_characters > 0)
+        return false;
+      break;
+    case Base64UrlDecodePolicy::IGNORE_PADDING:
+      // Missing padding will be silently appended.
+      break;
+    case Base64UrlDecodePolicy::DISALLOW_PADDING:
+      // Fail if padding characters are included in |input|.
+      if (input.find_first_of(kPaddingChar) != std::string::npos)
+        return false;
+      break;
+  }
+
+  // If the string either needs replacement of URL-safe characters to normal
+  // base64 ones, or additional padding, a copy of |input| needs to be made in
+  // order to make these adjustments without side effects.
+  if (required_padding_characters > 0 || needs_replacement) {
+    std::string base64_input;
+
+    CheckedNumeric<size_t> base64_input_size = input.size();
+    if (required_padding_characters > 0)
+      base64_input_size += 4 - required_padding_characters;
+
+    base64_input.reserve(base64_input_size.ValueOrDie());
+    input.AppendToString(&base64_input);
+
+    // Substitute the base64url URL-safe characters to their base64 equivalents.
+    ReplaceChars(base64_input, "-", "+", &base64_input);
+    ReplaceChars(base64_input, "_", "/", &base64_input);
+
+    // Append the necessary padding characters.
+    base64_input.resize(base64_input_size.ValueOrDie(), '=');
+
+    return Base64Decode(base64_input, output);
+  }
+
+  return Base64Decode(input, output);
+}
+
+}  // namespace base
diff --git a/base/base64url.h b/base/base64url.h
new file mode 100644
index 0000000..66a4824
--- /dev/null
+++ b/base/base64url.h
@@ -0,0 +1,56 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_BASE64URL_H_
+#define BASE_BASE64URL_H_
+
+#include <string>
+
+#include "base/base_export.h"
+#include "base/compiler_specific.h"
+#include "base/macros.h"
+#include "base/strings/string_piece.h"
+
+namespace base {
+
+enum class Base64UrlEncodePolicy {
+  // Include the trailing padding in the output, when necessary.
+  INCLUDE_PADDING,
+
+  // Remove the trailing padding from the output.
+  OMIT_PADDING
+};
+
+// Encodes the |input| string in base64url, defined in RFC 4648:
+// https://tools.ietf.org/html/rfc4648#section-5
+//
+// The |policy| defines whether padding should be included or omitted from the
+// encoded |*output|. |input| and |*output| may reference the same storage.
+BASE_EXPORT void Base64UrlEncode(const StringPiece& input,
+                                 Base64UrlEncodePolicy policy,
+                                 std::string* output);
+
+enum class Base64UrlDecodePolicy {
+  // Require inputs contain trailing padding if non-aligned.
+  REQUIRE_PADDING,
+
+  // Accept inputs regardless of whether or not they have the correct padding.
+  IGNORE_PADDING,
+
+  // Reject inputs if they contain any trailing padding.
+  DISALLOW_PADDING
+};
+
+// Decodes the |input| string in base64url, defined in RFC 4648:
+// https://tools.ietf.org/html/rfc4648#section-5
+//
+// The |policy| defines whether padding will be required, ignored or disallowed
+// altogether. |input| and |*output| may reference the same storage.
+BASE_EXPORT bool Base64UrlDecode(const StringPiece& input,
+                                 Base64UrlDecodePolicy policy,
+                                 std::string* output) WARN_UNUSED_RESULT;
+
+}  // namespace base
+
+#endif  // BASE_BASE64URL_H_
diff --git a/base/base64url_unittest.cc b/base/base64url_unittest.cc
new file mode 100644
index 0000000..45aa4a8
--- /dev/null
+++ b/base/base64url_unittest.cc
@@ -0,0 +1,115 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/base64url.h"
+
+#include "base/macros.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+
+namespace {
+
+TEST(Base64UrlTest, EncodeIncludePaddingPolicy) {
+  std::string output;
+  Base64UrlEncode("hello?world", Base64UrlEncodePolicy::INCLUDE_PADDING,
+                  &output);
+
+  // Base64 version: aGVsbG8/d29ybGQ=
+  EXPECT_EQ("aGVsbG8_d29ybGQ=", output);
+
+  // Test for behavior for very short and empty strings.
+  Base64UrlEncode("??", Base64UrlEncodePolicy::INCLUDE_PADDING, &output);
+  EXPECT_EQ("Pz8=", output);
+
+  Base64UrlEncode("", Base64UrlEncodePolicy::INCLUDE_PADDING, &output);
+  EXPECT_EQ("", output);
+}
+
+TEST(Base64UrlTest, EncodeOmitPaddingPolicy) {
+  std::string output;
+  Base64UrlEncode("hello?world", Base64UrlEncodePolicy::OMIT_PADDING, &output);
+
+  // base64 version: aGVsbG8/d29ybGQ=
+  EXPECT_EQ("aGVsbG8_d29ybGQ", output);
+
+  // Test for behavior for very short and empty strings.
+  Base64UrlEncode("??", Base64UrlEncodePolicy::OMIT_PADDING, &output);
+  EXPECT_EQ("Pz8", output);
+
+  Base64UrlEncode("", Base64UrlEncodePolicy::OMIT_PADDING, &output);
+  EXPECT_EQ("", output);
+}
+
+TEST(Base64UrlTest, DecodeRequirePaddingPolicy) {
+  std::string output;
+  ASSERT_TRUE(Base64UrlDecode("aGVsbG8_d29ybGQ=",
+                              Base64UrlDecodePolicy::REQUIRE_PADDING, &output));
+
+  EXPECT_EQ("hello?world", output);
+
+  ASSERT_FALSE(Base64UrlDecode(
+      "aGVsbG8_d29ybGQ", Base64UrlDecodePolicy::REQUIRE_PADDING, &output));
+
+  // Test for behavior for very short and empty strings.
+  ASSERT_TRUE(
+      Base64UrlDecode("Pz8=", Base64UrlDecodePolicy::REQUIRE_PADDING, &output));
+  EXPECT_EQ("??", output);
+
+  ASSERT_TRUE(
+      Base64UrlDecode("", Base64UrlDecodePolicy::REQUIRE_PADDING, &output));
+  EXPECT_EQ("", output);
+}
+
+TEST(Base64UrlTest, DecodeIgnorePaddingPolicy) {
+  std::string output;
+  ASSERT_TRUE(Base64UrlDecode("aGVsbG8_d29ybGQ",
+                              Base64UrlDecodePolicy::IGNORE_PADDING, &output));
+
+  EXPECT_EQ("hello?world", output);
+
+  // Including the padding is accepted as well.
+  ASSERT_TRUE(Base64UrlDecode("aGVsbG8_d29ybGQ=",
+                              Base64UrlDecodePolicy::IGNORE_PADDING, &output));
+
+  EXPECT_EQ("hello?world", output);
+}
+
+TEST(Base64UrlTest, DecodeDisallowPaddingPolicy) {
+  std::string output;
+  ASSERT_FALSE(Base64UrlDecode(
+      "aGVsbG8_d29ybGQ=", Base64UrlDecodePolicy::DISALLOW_PADDING, &output));
+
+  // The policy will allow the input when padding has been omitted.
+  ASSERT_TRUE(Base64UrlDecode(
+      "aGVsbG8_d29ybGQ", Base64UrlDecodePolicy::DISALLOW_PADDING, &output));
+
+  EXPECT_EQ("hello?world", output);
+}
+
+TEST(Base64UrlTest, DecodeDisallowsBase64Alphabet) {
+  std::string output;
+
+  // The "/" character is part of the conventional base64 alphabet, but has been
+  // substituted with "_" in the base64url alphabet.
+  ASSERT_FALSE(Base64UrlDecode(
+      "aGVsbG8/d29ybGQ=", Base64UrlDecodePolicy::REQUIRE_PADDING, &output));
+}
+
+TEST(Base64UrlTest, DecodeDisallowsPaddingOnly) {
+  std::string output;
+
+  ASSERT_FALSE(Base64UrlDecode(
+      "=", Base64UrlDecodePolicy::IGNORE_PADDING, &output));
+  ASSERT_FALSE(Base64UrlDecode(
+      "==", Base64UrlDecodePolicy::IGNORE_PADDING, &output));
+  ASSERT_FALSE(Base64UrlDecode(
+      "===", Base64UrlDecodePolicy::IGNORE_PADDING, &output));
+  ASSERT_FALSE(Base64UrlDecode(
+      "====", Base64UrlDecodePolicy::IGNORE_PADDING, &output));
+}
+
+}  // namespace
+
+}  // namespace base
diff --git a/base/base_export.h b/base/base_export.h
new file mode 100644
index 0000000..cf7ebd7
--- /dev/null
+++ b/base/base_export.h
@@ -0,0 +1,29 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_BASE_EXPORT_H_
+#define BASE_BASE_EXPORT_H_
+
+#if defined(COMPONENT_BUILD)
+#if defined(WIN32)
+
+#if defined(BASE_IMPLEMENTATION)
+#define BASE_EXPORT __declspec(dllexport)
+#else
+#define BASE_EXPORT __declspec(dllimport)
+#endif  // defined(BASE_IMPLEMENTATION)
+
+#else  // defined(WIN32)
+#if defined(BASE_IMPLEMENTATION)
+#define BASE_EXPORT __attribute__((visibility("default")))
+#else
+#define BASE_EXPORT
+#endif  // defined(BASE_IMPLEMENTATION)
+#endif
+
+#else  // defined(COMPONENT_BUILD)
+#define BASE_EXPORT
+#endif
+
+#endif  // BASE_BASE_EXPORT_H_
diff --git a/base/base_paths.cc b/base/base_paths.cc
new file mode 100644
index 0000000..e3f322e
--- /dev/null
+++ b/base/base_paths.cc
@@ -0,0 +1,51 @@
+// Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/base_paths.h"
+
+#include "base/files/file_path.h"
+#include "base/files/file_util.h"
+#include "base/path_service.h"
+
+namespace base {
+
+bool PathProvider(int key, FilePath* result) {
+  // NOTE: DIR_CURRENT is a special case in PathService::Get
+
+  switch (key) {
+    case DIR_EXE:
+      if (!PathService::Get(FILE_EXE, result))
+        return false;
+      *result = result->DirName();
+      return true;
+    case DIR_MODULE:
+      if (!PathService::Get(FILE_MODULE, result))
+        return false;
+      *result = result->DirName();
+      return true;
+    case DIR_ASSETS:
+      return PathService::Get(DIR_MODULE, result);
+    case DIR_TEMP:
+      return GetTempDir(result);
+    case base::DIR_HOME:
+      *result = GetHomeDir();
+      return true;
+    case DIR_TEST_DATA: {
+      FilePath test_data_path;
+      if (!PathService::Get(DIR_SOURCE_ROOT, &test_data_path))
+        return false;
+      test_data_path = test_data_path.Append(FILE_PATH_LITERAL("base"));
+      test_data_path = test_data_path.Append(FILE_PATH_LITERAL("test"));
+      test_data_path = test_data_path.Append(FILE_PATH_LITERAL("data"));
+      if (!PathExists(test_data_path))  // We don't want to create this.
+        return false;
+      *result = test_data_path;
+      return true;
+    }
+    default:
+      return false;
+  }
+}
+
+}  // namespace base
diff --git a/base/base_paths.h b/base/base_paths.h
new file mode 100644
index 0000000..2a163f4
--- /dev/null
+++ b/base/base_paths.h
@@ -0,0 +1,55 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_BASE_PATHS_H_
+#define BASE_BASE_PATHS_H_
+
+// This file declares path keys for the base module.  These can be used with
+// the PathService to access various special directories and files.
+
+#include "build/build_config.h"
+
+#if defined(OS_WIN)
+#include "base/base_paths_win.h"
+#elif defined(OS_MACOSX)
+#include "base/base_paths_mac.h"
+#elif defined(OS_ANDROID)
+#include "base/base_paths_android.h"
+#endif
+
+#if defined(OS_POSIX) || defined(OS_FUCHSIA)
+#include "base/base_paths_posix.h"
+#endif
+
+namespace base {
+
+enum BasePathKey {
+  PATH_START = 0,
+
+  DIR_CURRENT,       // Current directory.
+  DIR_EXE,           // Directory containing FILE_EXE.
+  DIR_MODULE,        // Directory containing FILE_MODULE.
+  DIR_ASSETS,        // Directory that contains application assets.
+  DIR_TEMP,          // Temporary directory.
+  DIR_HOME,          // User's root home directory. On Windows this will look
+                     // like "C:\Users\<user>"  which isn't necessarily a great
+                     // place to put files.
+  FILE_EXE,          // Path and filename of the current executable.
+  FILE_MODULE,       // Path and filename of the module containing the code for
+                     // the PathService (which could differ from FILE_EXE if the
+                     // PathService were compiled into a shared object, for
+                     // example).
+  DIR_SOURCE_ROOT,   // Returns the root of the source tree. This key is useful
+                     // for tests that need to locate various resources. It
+                     // should not be used outside of test code.
+  DIR_USER_DESKTOP,  // The current user's Desktop.
+
+  DIR_TEST_DATA,  // Used only for testing.
+
+  PATH_END
+};
+
+}  // namespace base
+
+#endif  // BASE_BASE_PATHS_H_
diff --git a/base/base_paths_android.cc b/base/base_paths_android.cc
new file mode 100644
index 0000000..078f565
--- /dev/null
+++ b/base/base_paths_android.cc
@@ -0,0 +1,66 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Defines base::PathProviderAndroid which replaces base::PathProviderPosix for
+// Android in base/path_service.cc.
+
+#include <limits.h>
+#include <unistd.h>
+
+#include "base/android/jni_android.h"
+#include "base/android/path_utils.h"
+#include "base/base_paths.h"
+#include "base/files/file_path.h"
+#include "base/files/file_util.h"
+#include "base/logging.h"
+#include "base/process/process_metrics.h"
+
+namespace base {
+
+bool PathProviderAndroid(int key, FilePath* result) {
+  switch (key) {
+    case base::FILE_EXE: {
+      FilePath bin_dir;
+      if (!ReadSymbolicLink(FilePath(kProcSelfExe), &bin_dir)) {
+        NOTREACHED() << "Unable to resolve " << kProcSelfExe << ".";
+        return false;
+      }
+      *result = bin_dir;
+      return true;
+    }
+    case base::FILE_MODULE:
+      // dladdr didn't work in Android as only the file name was returned.
+      NOTIMPLEMENTED();
+      return false;
+    case base::DIR_MODULE:
+      return base::android::GetNativeLibraryDirectory(result);
+    case base::DIR_SOURCE_ROOT:
+      // Used only by tests.
+      // In that context, hooked up via base/test/test_support_android.cc.
+      NOTIMPLEMENTED();
+      return false;
+    case base::DIR_USER_DESKTOP:
+      // Android doesn't support GetUserDesktop.
+      NOTIMPLEMENTED();
+      return false;
+    case base::DIR_CACHE:
+      return base::android::GetCacheDirectory(result);
+    case base::DIR_ASSETS:
+      // On Android assets are normally loaded from the APK using
+      // base::android::OpenApkAsset(). In tests, since the assets are no
+      // packaged, DIR_ASSETS is overridden to point to the build directory.
+      return false;
+    case base::DIR_ANDROID_APP_DATA:
+      return base::android::GetDataDirectory(result);
+    case base::DIR_ANDROID_EXTERNAL_STORAGE:
+      return base::android::GetExternalStorageDirectory(result);
+    default:
+      // Note: the path system expects this function to override the default
+      // behavior. So no need to log an error if we don't support a given
+      // path. The system will just use the default.
+      return false;
+  }
+}
+
+}  // namespace base
diff --git a/base/base_paths_android.h b/base/base_paths_android.h
new file mode 100644
index 0000000..7a9ac4a
--- /dev/null
+++ b/base/base_paths_android.h
@@ -0,0 +1,25 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_BASE_PATHS_ANDROID_H_
+#define BASE_BASE_PATHS_ANDROID_H_
+
+// This file declares Android-specific path keys for the base module.
+// These can be used with the PathService to access various special
+// directories and files.
+
+namespace base {
+
+enum {
+  PATH_ANDROID_START = 300,
+
+  DIR_ANDROID_APP_DATA,  // Directory where to put Android app's data.
+  DIR_ANDROID_EXTERNAL_STORAGE,  // Android external storage directory.
+
+  PATH_ANDROID_END
+};
+
+}  // namespace base
+
+#endif  // BASE_BASE_PATHS_ANDROID_H_
diff --git a/base/base_paths_fuchsia.cc b/base/base_paths_fuchsia.cc
new file mode 100644
index 0000000..afe449f
--- /dev/null
+++ b/base/base_paths_fuchsia.cc
@@ -0,0 +1,62 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/base_paths.h"
+
+#include <stdlib.h>
+
+#include "base/base_paths_fuchsia.h"
+#include "base/command_line.h"
+#include "base/files/file_util.h"
+#include "base/path_service.h"
+#include "base/process/process.h"
+
+namespace base {
+namespace {
+
+constexpr char kPackageRoot[] = "/pkg";
+
+}  // namespace
+
+base::FilePath GetPackageRoot() {
+  base::FilePath path_obj(kPackageRoot);
+
+  // Fuchsia's appmgr will set argv[0] to a fully qualified executable path
+  // under /pkg for packaged binaries.
+  if (path_obj.IsParent(base::CommandLine::ForCurrentProcess()->GetProgram())) {
+    return path_obj;
+  } else {
+    return base::FilePath();
+  }
+}
+
+bool PathProviderFuchsia(int key, FilePath* result) {
+  switch (key) {
+    case FILE_MODULE:
+      NOTIMPLEMENTED();
+      return false;
+    case FILE_EXE:
+      *result = CommandLine::ForCurrentProcess()->GetProgram();
+      return true;
+    case DIR_SOURCE_ROOT:
+      *result = GetPackageRoot();
+      return true;
+    case DIR_APP_DATA:
+      // TODO(https://crbug.com/840598): Switch to /data when minfs supports
+      // mmap().
+      DLOG(WARNING) << "Using /tmp as app data dir, changes will NOT be "
+                       "persisted! (crbug.com/840598)";
+      *result = FilePath("/tmp");
+      return true;
+    case DIR_CACHE:
+      *result = FilePath("/data");
+      return true;
+    case DIR_ASSETS:
+      *result = GetPackageRoot();
+      return true;
+  }
+  return false;
+}
+
+}  // namespace base
diff --git a/base/base_paths_fuchsia.h b/base/base_paths_fuchsia.h
new file mode 100644
index 0000000..a2d4194
--- /dev/null
+++ b/base/base_paths_fuchsia.h
@@ -0,0 +1,34 @@
+// Copyright (c) 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_BASE_PATHS_FUCHSIA_H_
+#define BASE_BASE_PATHS_FUCHSIA_H_
+
+#include "base/base_export.h"
+#include "base/files/file_path.h"
+
+namespace base {
+
+// These can be used with the PathService to access various special
+// directories and files.
+enum {
+  PATH_FUCHSIA_START = 1200,
+
+  // Path to the directory which contains application libraries and resources.
+  DIR_FUCHSIA_RESOURCES,
+
+  // Path to the directory which contains application user data.
+  DIR_APP_DATA,
+
+  PATH_FUCHSIA_END,
+};
+
+// If running inside a package, returns a FilePath of the root path
+// of the currently deployed package.
+// Otherwise returns an empty FilePath.
+BASE_EXPORT base::FilePath GetPackageRoot();
+
+}  // namespace base
+
+#endif  // BASE_BASE_PATHS_FUCHSIA_H_
diff --git a/base/base_paths_mac.h b/base/base_paths_mac.h
new file mode 100644
index 0000000..ac75402
--- /dev/null
+++ b/base/base_paths_mac.h
@@ -0,0 +1,24 @@
+// Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_BASE_PATHS_MAC_H_
+#define BASE_BASE_PATHS_MAC_H_
+
+// This file declares Mac-specific path keys for the base module.
+// These can be used with the PathService to access various special
+// directories and files.
+
+namespace base {
+
+enum {
+  PATH_MAC_START = 200,
+
+  DIR_APP_DATA,  // ~/Library/Application Support
+
+  PATH_MAC_END
+};
+
+}  // namespace base
+
+#endif  // BASE_BASE_PATHS_MAC_H_
diff --git a/base/base_paths_mac.mm b/base/base_paths_mac.mm
new file mode 100644
index 0000000..46bbd16
--- /dev/null
+++ b/base/base_paths_mac.mm
@@ -0,0 +1,127 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Defines base::PathProviderMac which replaces base::PathProviderPosix for Mac
+// in base/path_service.cc.
+
+#include <dlfcn.h>
+#import <Foundation/Foundation.h>
+#include <mach-o/dyld.h>
+#include <stdint.h>
+
+#include "base/base_paths.h"
+#include "base/compiler_specific.h"
+#include "base/files/file_path.h"
+#include "base/files/file_util.h"
+#include "base/logging.h"
+#include "base/mac/bundle_locations.h"
+#include "base/mac/foundation_util.h"
+#include "base/path_service.h"
+#include "base/strings/string_util.h"
+#include "base/threading/thread_restrictions.h"
+#include "build/build_config.h"
+
+namespace {
+
+void GetNSExecutablePath(base::FilePath* path) {
+  DCHECK(path);
+  // Executable path can have relative references ("..") depending on
+  // how the app was launched.
+  uint32_t executable_length = 0;
+  _NSGetExecutablePath(NULL, &executable_length);
+  DCHECK_GT(executable_length, 1u);
+  std::string executable_path;
+  int rv = _NSGetExecutablePath(
+      base::WriteInto(&executable_path, executable_length),
+                      &executable_length);
+  DCHECK_EQ(rv, 0);
+
+  // _NSGetExecutablePath may return paths containing ./ or ../ which makes
+  // FilePath::DirName() work incorrectly, convert it to absolute path so that
+  // paths such as DIR_SOURCE_ROOT can work, since we expect absolute paths to
+  // be returned here.
+  // TODO(bauerb): http://crbug.com/259796, http://crbug.com/373477
+  base::ThreadRestrictions::ScopedAllowIO allow_io;
+  *path = base::MakeAbsoluteFilePath(base::FilePath(executable_path));
+}
+
+// Returns true if the module for |address| is found. |path| will contain
+// the path to the module. Note that |path| may not be absolute.
+bool GetModulePathForAddress(base::FilePath* path,
+                             const void* address) WARN_UNUSED_RESULT;
+
+bool GetModulePathForAddress(base::FilePath* path, const void* address) {
+  Dl_info info;
+  if (dladdr(address, &info) == 0)
+    return false;
+  *path = base::FilePath(info.dli_fname);
+  return true;
+}
+
+}  // namespace
+
+namespace base {
+
+bool PathProviderMac(int key, base::FilePath* result) {
+  switch (key) {
+    case base::FILE_EXE:
+      GetNSExecutablePath(result);
+      return true;
+    case base::FILE_MODULE:
+      return GetModulePathForAddress(result,
+          reinterpret_cast<const void*>(&base::PathProviderMac));
+    case base::DIR_APP_DATA: {
+      bool success = base::mac::GetUserDirectory(NSApplicationSupportDirectory,
+                                                 result);
+#if defined(OS_IOS)
+      // On IOS, this directory does not exist unless it is created explicitly.
+      if (success && !base::PathExists(*result))
+        success = base::CreateDirectory(*result);
+#endif  // defined(OS_IOS)
+      return success;
+    }
+    case base::DIR_SOURCE_ROOT:
+      // Go through PathService to catch overrides.
+      if (!PathService::Get(base::FILE_EXE, result))
+        return false;
+
+      // Start with the executable's directory.
+      *result = result->DirName();
+
+#if !defined(OS_IOS)
+      if (base::mac::AmIBundled()) {
+        // The bundled app executables (Chromium, TestShell, etc) live five
+        // levels down, eg:
+        // src/xcodebuild/{Debug|Release}/Chromium.app/Contents/MacOS/Chromium
+        *result = result->DirName().DirName().DirName().DirName().DirName();
+      } else {
+        // Unit tests execute two levels deep from the source root, eg:
+        // src/xcodebuild/{Debug|Release}/base_unittests
+        *result = result->DirName().DirName();
+      }
+#endif
+      return true;
+    case base::DIR_USER_DESKTOP:
+#if defined(OS_IOS)
+      // iOS does not have desktop directories.
+      NOTIMPLEMENTED();
+      return false;
+#else
+      return base::mac::GetUserDirectory(NSDesktopDirectory, result);
+#endif
+    case base::DIR_ASSETS:
+      if (!base::mac::AmIBundled()) {
+        return PathService::Get(base::DIR_MODULE, result);
+      }
+      *result = base::mac::FrameworkBundlePath().Append(
+          FILE_PATH_LITERAL("Resources"));
+      return true;
+    case base::DIR_CACHE:
+      return base::mac::GetUserDirectory(NSCachesDirectory, result);
+    default:
+      return false;
+  }
+}
+
+}  // namespace base
diff --git a/base/base_paths_posix.cc b/base/base_paths_posix.cc
new file mode 100644
index 0000000..00a1569
--- /dev/null
+++ b/base/base_paths_posix.cc
@@ -0,0 +1,119 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Defines base::PathProviderPosix, default path provider on POSIX OSes that
+// don't have their own base_paths_OS.cc implementation (i.e. all but Mac and
+// Android).
+
+#include "base/base_paths.h"
+
+#include <limits.h>
+#include <stddef.h>
+
+#include <memory>
+#include <ostream>
+#include <string>
+
+#include "base/environment.h"
+#include "base/files/file_path.h"
+#include "base/files/file_util.h"
+#include "base/logging.h"
+#include "base/nix/xdg_util.h"
+#include "base/path_service.h"
+#include "base/process/process_metrics.h"
+#include "build/build_config.h"
+
+#if defined(OS_FREEBSD)
+#include <sys/param.h>
+#include <sys/sysctl.h>
+#elif defined(OS_SOLARIS) || defined(OS_AIX)
+#include <stdlib.h>
+#endif
+
+namespace base {
+
+bool PathProviderPosix(int key, FilePath* result) {
+  switch (key) {
+    case FILE_EXE:
+    case FILE_MODULE: {  // TODO(evanm): is this correct?
+#if defined(OS_LINUX)
+      FilePath bin_dir;
+      if (!ReadSymbolicLink(FilePath(kProcSelfExe), &bin_dir)) {
+        NOTREACHED() << "Unable to resolve " << kProcSelfExe << ".";
+        return false;
+      }
+      *result = bin_dir;
+      return true;
+#elif defined(OS_FREEBSD)
+      int name[] = { CTL_KERN, KERN_PROC, KERN_PROC_PATHNAME, -1 };
+      char bin_dir[PATH_MAX + 1];
+      size_t length = sizeof(bin_dir);
+      // Upon return, |length| is the number of bytes written to |bin_dir|
+      // including the string terminator.
+      int error = sysctl(name, 4, bin_dir, &length, NULL, 0);
+      if (error < 0 || length <= 1) {
+        NOTREACHED() << "Unable to resolve path.";
+        return false;
+      }
+      *result = FilePath(FilePath::StringType(bin_dir, length - 1));
+      return true;
+#elif defined(OS_SOLARIS)
+      char bin_dir[PATH_MAX + 1];
+      if (realpath(getexecname(), bin_dir) == NULL) {
+        NOTREACHED() << "Unable to resolve " << getexecname() << ".";
+        return false;
+      }
+      *result = FilePath(bin_dir);
+      return true;
+#elif defined(OS_OPENBSD) || defined(OS_AIX)
+      // There is currently no way to get the executable path on OpenBSD
+      char* cpath;
+      if ((cpath = getenv("CHROME_EXE_PATH")) != NULL)
+        *result = FilePath(cpath);
+      else
+        *result = FilePath("/usr/local/chrome/chrome");
+      return true;
+#endif
+    }
+    case DIR_SOURCE_ROOT: {
+      // Allow passing this in the environment, for more flexibility in build
+      // tree configurations (sub-project builds, gyp --output_dir, etc.)
+      std::unique_ptr<Environment> env(Environment::Create());
+      std::string cr_source_root;
+      FilePath path;
+      if (env->GetVar("CR_SOURCE_ROOT", &cr_source_root)) {
+        path = FilePath(cr_source_root);
+        if (PathExists(path)) {
+          *result = path;
+          return true;
+        }
+        DLOG(WARNING) << "CR_SOURCE_ROOT is set, but it appears to not "
+                      << "point to a directory.";
+      }
+      // On POSIX, unit tests execute two levels deep from the source root.
+      // For example:  out/{Debug|Release}/net_unittest
+      if (PathService::Get(DIR_EXE, &path)) {
+        *result = path.DirName().DirName();
+        return true;
+      }
+
+      DLOG(ERROR) << "Couldn't find your source root.  "
+                  << "Try running from your chromium/src directory.";
+      return false;
+    }
+    case DIR_USER_DESKTOP:
+      *result = nix::GetXDGUserDirectory("DESKTOP", "Desktop");
+      return true;
+    case DIR_CACHE: {
+      std::unique_ptr<Environment> env(Environment::Create());
+      FilePath cache_dir(
+          nix::GetXDGDirectory(env.get(), "XDG_CACHE_HOME", ".cache"));
+      *result = cache_dir;
+      return true;
+    }
+  }
+  return false;
+}
+
+}  // namespace base
diff --git a/base/base_paths_posix.h b/base/base_paths_posix.h
new file mode 100644
index 0000000..ef002ae
--- /dev/null
+++ b/base/base_paths_posix.h
@@ -0,0 +1,27 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_BASE_PATHS_POSIX_H_
+#define BASE_BASE_PATHS_POSIX_H_
+
+// This file declares windows-specific path keys for the base module.
+// These can be used with the PathService to access various special
+// directories and files.
+
+namespace base {
+
+enum {
+  PATH_POSIX_START = 400,
+
+  DIR_CACHE,    // Directory where to put cache data.  Note this is
+                // *not* where the browser cache lives, but the
+                // browser cache can be a subdirectory.
+                // This is $XDG_CACHE_HOME on Linux and
+                // ~/Library/Caches on Mac.
+  PATH_POSIX_END
+};
+
+}  // namespace base
+
+#endif  // BASE_BASE_PATHS_POSIX_H_
diff --git a/base/base_paths_win.cc b/base/base_paths_win.cc
new file mode 100644
index 0000000..e7e5d1f
--- /dev/null
+++ b/base/base_paths_win.cc
@@ -0,0 +1,202 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <windows.h>
+#include <KnownFolders.h>
+#include <shlobj.h>
+
+#include "base/base_paths.h"
+#include "base/environment.h"
+#include "base/files/file_path.h"
+#include "base/path_service.h"
+#include "base/strings/utf_string_conversions.h"
+#include "base/win/current_module.h"
+#include "base/win/scoped_co_mem.h"
+#include "base/win/windows_version.h"
+
+using base::FilePath;
+
+namespace base {
+
+bool PathProviderWin(int key, FilePath* result) {
+  // We need to go compute the value. It would be nice to support paths with
+  // names longer than MAX_PATH, but the system functions don't seem to be
+  // designed for it either, with the exception of GetTempPath (but other
+  // things will surely break if the temp path is too long, so we don't bother
+  // handling it.
+  wchar_t system_buffer[MAX_PATH];
+  system_buffer[0] = 0;
+
+  FilePath cur;
+  switch (key) {
+    case base::FILE_EXE:
+      if (GetModuleFileName(NULL, system_buffer, MAX_PATH) == 0)
+        return false;
+      cur = FilePath(system_buffer);
+      break;
+    case base::FILE_MODULE: {
+      // the resource containing module is assumed to be the one that
+      // this code lives in, whether that's a dll or exe
+      if (GetModuleFileName(CURRENT_MODULE(), system_buffer, MAX_PATH) == 0)
+        return false;
+      cur = FilePath(system_buffer);
+      break;
+    }
+    case base::DIR_WINDOWS:
+      GetWindowsDirectory(system_buffer, MAX_PATH);
+      cur = FilePath(system_buffer);
+      break;
+    case base::DIR_SYSTEM:
+      GetSystemDirectory(system_buffer, MAX_PATH);
+      cur = FilePath(system_buffer);
+      break;
+    case base::DIR_PROGRAM_FILESX86:
+      if (base::win::OSInfo::GetInstance()->architecture() !=
+          base::win::OSInfo::X86_ARCHITECTURE) {
+        if (FAILED(SHGetFolderPath(NULL, CSIDL_PROGRAM_FILESX86, NULL,
+                                   SHGFP_TYPE_CURRENT, system_buffer)))
+          return false;
+        cur = FilePath(system_buffer);
+        break;
+      }
+      // Fall through to base::DIR_PROGRAM_FILES if we're on an X86 machine.
+      FALLTHROUGH;
+    case base::DIR_PROGRAM_FILES:
+      if (FAILED(SHGetFolderPath(NULL, CSIDL_PROGRAM_FILES, NULL,
+                                 SHGFP_TYPE_CURRENT, system_buffer)))
+        return false;
+      cur = FilePath(system_buffer);
+      break;
+    case base::DIR_PROGRAM_FILES6432:
+#if !defined(_WIN64)
+      if (base::win::OSInfo::GetInstance()->wow64_status() ==
+          base::win::OSInfo::WOW64_ENABLED) {
+        std::unique_ptr<base::Environment> env(base::Environment::Create());
+        std::string programfiles_w6432;
+        // 32-bit process running in WOW64 sets ProgramW6432 environment
+        // variable. See
+        // https://msdn.microsoft.com/library/windows/desktop/aa384274.aspx.
+        if (!env->GetVar("ProgramW6432", &programfiles_w6432))
+          return false;
+        // GetVar returns UTF8 - convert back to Wide.
+        cur = FilePath(UTF8ToWide(programfiles_w6432));
+        break;
+      }
+#endif
+      if (FAILED(SHGetFolderPath(NULL, CSIDL_PROGRAM_FILES, NULL,
+                                 SHGFP_TYPE_CURRENT, system_buffer)))
+        return false;
+      cur = FilePath(system_buffer);
+      break;
+    case base::DIR_IE_INTERNET_CACHE:
+      if (FAILED(SHGetFolderPath(NULL, CSIDL_INTERNET_CACHE, NULL,
+                                 SHGFP_TYPE_CURRENT, system_buffer)))
+        return false;
+      cur = FilePath(system_buffer);
+      break;
+    case base::DIR_COMMON_START_MENU:
+      if (FAILED(SHGetFolderPath(NULL, CSIDL_COMMON_PROGRAMS, NULL,
+                                 SHGFP_TYPE_CURRENT, system_buffer)))
+        return false;
+      cur = FilePath(system_buffer);
+      break;
+    case base::DIR_START_MENU:
+      if (FAILED(SHGetFolderPath(NULL, CSIDL_PROGRAMS, NULL,
+                                 SHGFP_TYPE_CURRENT, system_buffer)))
+        return false;
+      cur = FilePath(system_buffer);
+      break;
+    case base::DIR_APP_DATA:
+      if (FAILED(SHGetFolderPath(NULL, CSIDL_APPDATA, NULL, SHGFP_TYPE_CURRENT,
+                                 system_buffer)))
+        return false;
+      cur = FilePath(system_buffer);
+      break;
+    case base::DIR_COMMON_APP_DATA:
+      if (FAILED(SHGetFolderPath(NULL, CSIDL_COMMON_APPDATA, NULL,
+                                 SHGFP_TYPE_CURRENT, system_buffer)))
+        return false;
+      cur = FilePath(system_buffer);
+      break;
+    case base::DIR_LOCAL_APP_DATA:
+      if (FAILED(SHGetFolderPath(NULL, CSIDL_LOCAL_APPDATA, NULL,
+                                 SHGFP_TYPE_CURRENT, system_buffer)))
+        return false;
+      cur = FilePath(system_buffer);
+      break;
+    case base::DIR_SOURCE_ROOT: {
+      FilePath executableDir;
+      // On Windows, unit tests execute two levels deep from the source root.
+      // For example:  chrome/{Debug|Release}/ui_tests.exe
+      PathService::Get(base::DIR_EXE, &executableDir);
+      cur = executableDir.DirName().DirName();
+      break;
+    }
+    case base::DIR_APP_SHORTCUTS: {
+      if (win::GetVersion() < win::VERSION_WIN8)
+        return false;
+
+      base::win::ScopedCoMem<wchar_t> path_buf;
+      if (FAILED(SHGetKnownFolderPath(FOLDERID_ApplicationShortcuts, 0, NULL,
+                                      &path_buf)))
+        return false;
+
+      cur = FilePath(string16(path_buf));
+      break;
+    }
+    case base::DIR_USER_DESKTOP:
+      if (FAILED(SHGetFolderPath(NULL, CSIDL_DESKTOPDIRECTORY, NULL,
+                                 SHGFP_TYPE_CURRENT, system_buffer))) {
+        return false;
+      }
+      cur = FilePath(system_buffer);
+      break;
+    case base::DIR_COMMON_DESKTOP:
+      if (FAILED(SHGetFolderPath(NULL, CSIDL_COMMON_DESKTOPDIRECTORY, NULL,
+                                 SHGFP_TYPE_CURRENT, system_buffer))) {
+        return false;
+      }
+      cur = FilePath(system_buffer);
+      break;
+    case base::DIR_USER_QUICK_LAUNCH:
+      if (!PathService::Get(base::DIR_APP_DATA, &cur))
+        return false;
+      // According to various sources, appending
+      // "Microsoft\Internet Explorer\Quick Launch" to %appdata% is the only
+      // reliable way to get the quick launch folder across all versions of
+      // Windows.
+      // http://stackoverflow.com/questions/76080/how-do-you-reliably-get-the-quick-
+      // http://www.microsoft.com/technet/scriptcenter/resources/qanda/sept05/hey0901.mspx
+      cur = cur.Append(FILE_PATH_LITERAL("Microsoft"))
+                .Append(FILE_PATH_LITERAL("Internet Explorer"))
+                .Append(FILE_PATH_LITERAL("Quick Launch"));
+      break;
+    case base::DIR_TASKBAR_PINS:
+      if (!PathService::Get(base::DIR_USER_QUICK_LAUNCH, &cur))
+        return false;
+      cur = cur.Append(FILE_PATH_LITERAL("User Pinned"))
+                .Append(FILE_PATH_LITERAL("TaskBar"));
+      break;
+    case base::DIR_IMPLICIT_APP_SHORTCUTS:
+      if (!PathService::Get(base::DIR_USER_QUICK_LAUNCH, &cur))
+        return false;
+      cur = cur.Append(FILE_PATH_LITERAL("User Pinned"))
+                .Append(FILE_PATH_LITERAL("ImplicitAppShortcuts"));
+      break;
+    case base::DIR_WINDOWS_FONTS:
+      if (FAILED(SHGetFolderPath(
+              NULL, CSIDL_FONTS, NULL, SHGFP_TYPE_CURRENT, system_buffer))) {
+        return false;
+      }
+      cur = FilePath(system_buffer);
+      break;
+    default:
+      return false;
+  }
+
+  *result = cur;
+  return true;
+}
+
+}  // namespace base
diff --git a/base/base_paths_win.h b/base/base_paths_win.h
new file mode 100644
index 0000000..2db16a6
--- /dev/null
+++ b/base/base_paths_win.h
@@ -0,0 +1,53 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_BASE_PATHS_WIN_H_
+#define BASE_BASE_PATHS_WIN_H_
+
+// This file declares windows-specific path keys for the base module.
+// These can be used with the PathService to access various special
+// directories and files.
+
+namespace base {
+
+enum {
+  PATH_WIN_START = 100,
+
+  DIR_WINDOWS,  // Windows directory, usually "c:\windows"
+  DIR_SYSTEM,   // Usually c:\windows\system32"
+  //                         32-bit     32-bit on 64-bit   64-bit on 64-bit
+  // DIR_PROGRAM_FILES         1               2                  1
+  // DIR_PROGRAM_FILESX86      1               2                  2
+  // DIR_PROGRAM_FILES6432     1               1                  1
+  // 1 - C:\Program Files   2 - C:\Program Files (x86)
+  DIR_PROGRAM_FILES,      // See table above.
+  DIR_PROGRAM_FILESX86,   // See table above.
+  DIR_PROGRAM_FILES6432,  // See table above.
+
+  DIR_IE_INTERNET_CACHE,       // Temporary Internet Files directory.
+  DIR_COMMON_START_MENU,       // Usually "C:\ProgramData\Microsoft\Windows\
+                               // Start Menu\Programs"
+  DIR_START_MENU,              // Usually "C:\Users\<user>\AppData\Roaming\
+                               // Microsoft\Windows\Start Menu\Programs"
+  DIR_APP_DATA,                // Application Data directory under the user
+                               // profile.
+  DIR_LOCAL_APP_DATA,          // "Local Settings\Application Data" directory
+                               // under the user profile.
+  DIR_COMMON_APP_DATA,         // Usually "C:\ProgramData".
+  DIR_APP_SHORTCUTS,           // Where tiles on the start screen are stored,
+                               // only for Windows 8. Maps to "Local\AppData\
+                               // Microsoft\Windows\Application Shortcuts\".
+  DIR_COMMON_DESKTOP,          // Directory for the common desktop (visible
+                               // on all user's Desktop).
+  DIR_USER_QUICK_LAUNCH,       // Directory for the quick launch shortcuts.
+  DIR_TASKBAR_PINS,            // Directory for the shortcuts pinned to taskbar.
+  DIR_IMPLICIT_APP_SHORTCUTS,  // The implicit user pinned shortcut directory.
+  DIR_WINDOWS_FONTS,           // Usually C:\Windows\Fonts.
+
+  PATH_WIN_END
+};
+
+}  // namespace base
+
+#endif  // BASE_BASE_PATHS_WIN_H_
diff --git a/base/base_switches.cc b/base/base_switches.cc
new file mode 100644
index 0000000..7ce7380
--- /dev/null
+++ b/base/base_switches.cc
@@ -0,0 +1,127 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/base_switches.h"
+#include "build/build_config.h"
+
+namespace switches {
+
+// Delays execution of base::TaskPriority::BACKGROUND tasks until shutdown.
+const char kDisableBackgroundTasks[] = "disable-background-tasks";
+
+// Disables the crash reporting.
+const char kDisableBreakpad[]               = "disable-breakpad";
+
+// Comma-separated list of feature names to disable. See also kEnableFeatures.
+const char kDisableFeatures[] = "disable-features";
+
+// Indicates that crash reporting should be enabled. On platforms where helper
+// processes cannot access to files needed to make this decision, this flag is
+// generated internally.
+const char kEnableCrashReporter[]           = "enable-crash-reporter";
+
+// Comma-separated list of feature names to enable. See also kDisableFeatures.
+const char kEnableFeatures[] = "enable-features";
+
+// Generates full memory crash dump.
+const char kFullMemoryCrashReport[]         = "full-memory-crash-report";
+
+// Force low-end device mode when set.
+const char kEnableLowEndDeviceMode[]        = "enable-low-end-device-mode";
+
+// Force disabling of low-end device mode when set.
+const char kDisableLowEndDeviceMode[]       = "disable-low-end-device-mode";
+
+// This option can be used to force field trials when testing changes locally.
+// The argument is a list of name and value pairs, separated by slashes. If a
+// trial name is prefixed with an asterisk, that trial will start activated.
+// For example, the following argument defines two trials, with the second one
+// activated: "GoogleNow/Enable/*MaterialDesignNTP/Default/" This option can
+// also be used by the browser process to send the list of trials to a
+// non-browser process, using the same format. See
+// FieldTrialList::CreateTrialsFromString() in field_trial.h for details.
+const char kForceFieldTrials[]              = "force-fieldtrials";
+
+// Suppresses all error dialogs when present.
+const char kNoErrorDialogs[]                = "noerrdialogs";
+
+// When running certain tests that spawn child processes, this switch indicates
+// to the test framework that the current process is a child process.
+const char kTestChildProcess[]              = "test-child-process";
+
+// When running certain tests that spawn child processes, this switch indicates
+// to the test framework that the current process should not initialize ICU to
+// avoid creating any scoped handles too early in startup.
+const char kTestDoNotInitializeIcu[]        = "test-do-not-initialize-icu";
+
+// Gives the default maximal active V-logging level; 0 is the default.
+// Normally positive values are used for V-logging levels.
+const char kV[]                             = "v";
+
+// Gives the per-module maximal V-logging levels to override the value
+// given by --v.  E.g. "my_module=2,foo*=3" would change the logging
+// level for all code in source files "my_module.*" and "foo*.*"
+// ("-inl" suffixes are also disregarded for this matching).
+//
+// Any pattern containing a forward or backward slash will be tested
+// against the whole pathname and not just the module.  E.g.,
+// "*/foo/bar/*=2" would change the logging level for all code in
+// source files under a "foo/bar" directory.
+const char kVModule[]                       = "vmodule";
+
+// Will wait for 60 seconds for a debugger to come to attach to the process.
+const char kWaitForDebugger[]               = "wait-for-debugger";
+
+// Sends trace events from these categories to a file.
+// --trace-to-file on its own sends to default categories.
+const char kTraceToFile[]                   = "trace-to-file";
+
+// Specifies the file name for --trace-to-file. If unspecified, it will
+// go to a default file name.
+const char kTraceToFileName[]               = "trace-to-file-name";
+
+// Specifies a location for profiling output. This will only work if chrome has
+// been built with the gyp variable profiling=1 or gn arg enable_profiling=true.
+//
+//   {pid} if present will be replaced by the pid of the process.
+//   {count} if present will be incremented each time a profile is generated
+//           for this process.
+// The default is chrome-profile-{pid} for the browser and test-profile-{pid}
+// for tests.
+const char kProfilingFile[] = "profiling-file";
+
+#if defined(OS_WIN)
+// Disables the USB keyboard detection for blocking the OSK on Win8+.
+const char kDisableUsbKeyboardDetect[]      = "disable-usb-keyboard-detect";
+#endif
+
+#if defined(OS_LINUX) && !defined(OS_CHROMEOS)
+// The /dev/shm partition is too small in certain VM environments, causing
+// Chrome to fail or crash (see http://crbug.com/715363). Use this flag to
+// work-around this issue (a temporary directory will always be used to create
+// anonymous shared memory files).
+const char kDisableDevShmUsage[] = "disable-dev-shm-usage";
+#endif
+
+#if defined(OS_POSIX)
+// Used for turning on Breakpad crash reporting in a debug environment where
+// crash reporting is typically compiled but disabled.
+const char kEnableCrashReporterForTesting[] =
+    "enable-crash-reporter-for-testing";
+#endif
+
+#if defined(OS_ANDROID)
+// Optimizes memory layout of the native library using the orderfile symbols
+// given in base/android/library_loader/anchor_functions.h, via madvise and
+// changing the library prefetch behavior.
+const char kOrderfileMemoryOptimization[] = "orderfile-memory-optimization";
+// Force prefetching of the native library even if otherwise disabled, eg by
+// --orderfile-memory-optimization.
+const char kForceNativePrefetch[] = "force-native-prefetch";
+// If prefetching is enabled, only prefetch the ordered part of the native
+// library. Has no effect if prefetching is disabled.
+const char kNativePrefetchOrderedOnly[] = "native-prefetch-ordered-only";
+#endif
+
+}  // namespace switches
diff --git a/base/base_switches.h b/base/base_switches.h
new file mode 100644
index 0000000..3425e6f
--- /dev/null
+++ b/base/base_switches.h
@@ -0,0 +1,53 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Defines all the "base" command-line switches.
+
+#ifndef BASE_BASE_SWITCHES_H_
+#define BASE_BASE_SWITCHES_H_
+
+#include "build/build_config.h"
+
+namespace switches {
+
+extern const char kDisableBackgroundTasks[];
+extern const char kDisableBreakpad[];
+extern const char kDisableFeatures[];
+extern const char kDisableLowEndDeviceMode[];
+extern const char kEnableCrashReporter[];
+extern const char kEnableFeatures[];
+extern const char kEnableLowEndDeviceMode[];
+extern const char kForceFieldTrials[];
+extern const char kFullMemoryCrashReport[];
+extern const char kNoErrorDialogs[];
+extern const char kProfilingFile[];
+extern const char kTestChildProcess[];
+extern const char kTestDoNotInitializeIcu[];
+extern const char kTraceToFile[];
+extern const char kTraceToFileName[];
+extern const char kV[];
+extern const char kVModule[];
+extern const char kWaitForDebugger[];
+
+#if defined(OS_WIN)
+extern const char kDisableUsbKeyboardDetect[];
+#endif
+
+#if defined(OS_LINUX) && !defined(OS_CHROMEOS)
+extern const char kDisableDevShmUsage[];
+#endif
+
+#if defined(OS_POSIX)
+extern const char kEnableCrashReporterForTesting[];
+#endif
+
+#if defined(OS_ANDROID)
+extern const char kOrderfileMemoryOptimization[];
+extern const char kForceNativePrefetch[];
+extern const char kNativePrefetchOrderedOnly[];
+#endif
+
+}  // namespace switches
+
+#endif  // BASE_BASE_SWITCHES_H_
diff --git a/base/big_endian.cc b/base/big_endian.cc
new file mode 100644
index 0000000..514581f
--- /dev/null
+++ b/base/big_endian.cc
@@ -0,0 +1,105 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/big_endian.h"
+
+#include "base/strings/string_piece.h"
+
+namespace base {
+
+BigEndianReader::BigEndianReader(const char* buf, size_t len)
+    : ptr_(buf), end_(ptr_ + len) {}
+
+bool BigEndianReader::Skip(size_t len) {
+  if (ptr_ + len > end_)
+    return false;
+  ptr_ += len;
+  return true;
+}
+
+bool BigEndianReader::ReadBytes(void* out, size_t len) {
+  if (ptr_ + len > end_)
+    return false;
+  memcpy(out, ptr_, len);
+  ptr_ += len;
+  return true;
+}
+
+bool BigEndianReader::ReadPiece(base::StringPiece* out, size_t len) {
+  if (ptr_ + len > end_)
+    return false;
+  *out = base::StringPiece(ptr_, len);
+  ptr_ += len;
+  return true;
+}
+
+template<typename T>
+bool BigEndianReader::Read(T* value) {
+  if (ptr_ + sizeof(T) > end_)
+    return false;
+  ReadBigEndian<T>(ptr_, value);
+  ptr_ += sizeof(T);
+  return true;
+}
+
+bool BigEndianReader::ReadU8(uint8_t* value) {
+  return Read(value);
+}
+
+bool BigEndianReader::ReadU16(uint16_t* value) {
+  return Read(value);
+}
+
+bool BigEndianReader::ReadU32(uint32_t* value) {
+  return Read(value);
+}
+
+bool BigEndianReader::ReadU64(uint64_t* value) {
+  return Read(value);
+}
+
+BigEndianWriter::BigEndianWriter(char* buf, size_t len)
+    : ptr_(buf), end_(ptr_ + len) {}
+
+bool BigEndianWriter::Skip(size_t len) {
+  if (ptr_ + len > end_)
+    return false;
+  ptr_ += len;
+  return true;
+}
+
+bool BigEndianWriter::WriteBytes(const void* buf, size_t len) {
+  if (ptr_ + len > end_)
+    return false;
+  memcpy(ptr_, buf, len);
+  ptr_ += len;
+  return true;
+}
+
+template<typename T>
+bool BigEndianWriter::Write(T value) {
+  if (ptr_ + sizeof(T) > end_)
+    return false;
+  WriteBigEndian<T>(ptr_, value);
+  ptr_ += sizeof(T);
+  return true;
+}
+
+bool BigEndianWriter::WriteU8(uint8_t value) {
+  return Write(value);
+}
+
+bool BigEndianWriter::WriteU16(uint16_t value) {
+  return Write(value);
+}
+
+bool BigEndianWriter::WriteU32(uint32_t value) {
+  return Write(value);
+}
+
+bool BigEndianWriter::WriteU64(uint64_t value) {
+  return Write(value);
+}
+
+}  // namespace base
diff --git a/base/big_endian.h b/base/big_endian.h
new file mode 100644
index 0000000..5684c67
--- /dev/null
+++ b/base/big_endian.h
@@ -0,0 +1,106 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_BIG_ENDIAN_H_
+#define BASE_BIG_ENDIAN_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include "base/base_export.h"
+#include "base/strings/string_piece.h"
+
+namespace base {
+
+// Read an integer (signed or unsigned) from |buf| in Big Endian order.
+// Note: this loop is unrolled with -O1 and above.
+// NOTE(szym): glibc dns-canon.c use ntohs(*(uint16_t*)ptr) which is
+// potentially unaligned.
+// This would cause SIGBUS on ARMv5 or earlier and ARMv6-M.
+template<typename T>
+inline void ReadBigEndian(const char buf[], T* out) {
+  *out = buf[0];
+  for (size_t i = 1; i < sizeof(T); ++i) {
+    *out <<= 8;
+    // Must cast to uint8_t to avoid clobbering by sign extension.
+    *out |= static_cast<uint8_t>(buf[i]);
+  }
+}
+
+// Write an integer (signed or unsigned) |val| to |buf| in Big Endian order.
+// Note: this loop is unrolled with -O1 and above.
+template<typename T>
+inline void WriteBigEndian(char buf[], T val) {
+  for (size_t i = 0; i < sizeof(T); ++i) {
+    buf[sizeof(T)-i-1] = static_cast<char>(val & 0xFF);
+    val >>= 8;
+  }
+}
+
+// Specializations to make clang happy about the (dead code) shifts above.
+template <>
+inline void ReadBigEndian<uint8_t>(const char buf[], uint8_t* out) {
+  *out = buf[0];
+}
+
+template <>
+inline void WriteBigEndian<uint8_t>(char buf[], uint8_t val) {
+  buf[0] = static_cast<char>(val);
+}
+
+// Allows reading integers in network order (big endian) while iterating over
+// an underlying buffer. All the reading functions advance the internal pointer.
+class BASE_EXPORT BigEndianReader {
+ public:
+  BigEndianReader(const char* buf, size_t len);
+
+  const char* ptr() const { return ptr_; }
+  int remaining() const { return end_ - ptr_; }
+
+  bool Skip(size_t len);
+  bool ReadBytes(void* out, size_t len);
+  // Creates a StringPiece in |out| that points to the underlying buffer.
+  bool ReadPiece(base::StringPiece* out, size_t len);
+  bool ReadU8(uint8_t* value);
+  bool ReadU16(uint16_t* value);
+  bool ReadU32(uint32_t* value);
+  bool ReadU64(uint64_t* value);
+
+ private:
+  // Hidden to promote type safety.
+  template<typename T>
+  bool Read(T* v);
+
+  const char* ptr_;
+  const char* end_;
+};
+
+// Allows writing integers in network order (big endian) while iterating over
+// an underlying buffer. All the writing functions advance the internal pointer.
+class BASE_EXPORT BigEndianWriter {
+ public:
+  BigEndianWriter(char* buf, size_t len);
+
+  char* ptr() const { return ptr_; }
+  int remaining() const { return end_ - ptr_; }
+
+  bool Skip(size_t len);
+  bool WriteBytes(const void* buf, size_t len);
+  bool WriteU8(uint8_t value);
+  bool WriteU16(uint16_t value);
+  bool WriteU32(uint32_t value);
+  bool WriteU64(uint64_t value);
+
+ private:
+  // Hidden to promote type safety.
+  template<typename T>
+  bool Write(T v);
+
+  char* ptr_;
+  char* end_;
+};
+
+}  // namespace base
+
+#endif  // BASE_BIG_ENDIAN_H_
diff --git a/base/big_endian_unittest.cc b/base/big_endian_unittest.cc
new file mode 100644
index 0000000..4e1e7ce
--- /dev/null
+++ b/base/big_endian_unittest.cc
@@ -0,0 +1,116 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/big_endian.h"
+
+#include <stdint.h>
+
+#include "base/strings/string_piece.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+
+TEST(BigEndianReaderTest, ReadsValues) {
+  char data[] = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0xA, 0xB, 0xC, 0xD, 0xE, 0xF,
+                  0x1A, 0x2B, 0x3C, 0x4D, 0x5E };
+  char buf[2];
+  uint8_t u8;
+  uint16_t u16;
+  uint32_t u32;
+  uint64_t u64;
+  base::StringPiece piece;
+  BigEndianReader reader(data, sizeof(data));
+
+  EXPECT_TRUE(reader.Skip(2));
+  EXPECT_EQ(data + 2, reader.ptr());
+  EXPECT_EQ(reader.remaining(), static_cast<int>(sizeof(data)) - 2);
+  EXPECT_TRUE(reader.ReadBytes(buf, sizeof(buf)));
+  EXPECT_EQ(0x2, buf[0]);
+  EXPECT_EQ(0x3, buf[1]);
+  EXPECT_TRUE(reader.ReadU8(&u8));
+  EXPECT_EQ(0x4, u8);
+  EXPECT_TRUE(reader.ReadU16(&u16));
+  EXPECT_EQ(0x0506, u16);
+  EXPECT_TRUE(reader.ReadU32(&u32));
+  EXPECT_EQ(0x0708090Au, u32);
+  EXPECT_TRUE(reader.ReadU64(&u64));
+  EXPECT_EQ(0x0B0C0D0E0F1A2B3Cllu, u64);
+  base::StringPiece expected(reader.ptr(), 2);
+  EXPECT_TRUE(reader.ReadPiece(&piece, 2));
+  EXPECT_EQ(2u, piece.size());
+  EXPECT_EQ(expected.data(), piece.data());
+}
+
+TEST(BigEndianReaderTest, RespectsLength) {
+  char data[8];
+  char buf[2];
+  uint8_t u8;
+  uint16_t u16;
+  uint32_t u32;
+  uint64_t u64;
+  base::StringPiece piece;
+  BigEndianReader reader(data, sizeof(data));
+  // 8 left
+  EXPECT_FALSE(reader.Skip(9));
+  EXPECT_TRUE(reader.Skip(1));
+  // 7 left
+  EXPECT_FALSE(reader.ReadU64(&u64));
+  EXPECT_TRUE(reader.Skip(4));
+  // 3 left
+  EXPECT_FALSE(reader.ReadU32(&u32));
+  EXPECT_FALSE(reader.ReadPiece(&piece, 4));
+  EXPECT_TRUE(reader.Skip(2));
+  // 1 left
+  EXPECT_FALSE(reader.ReadU16(&u16));
+  EXPECT_FALSE(reader.ReadBytes(buf, 2));
+  EXPECT_TRUE(reader.Skip(1));
+  // 0 left
+  EXPECT_FALSE(reader.ReadU8(&u8));
+  EXPECT_EQ(0, reader.remaining());
+}
+
+TEST(BigEndianWriterTest, WritesValues) {
+  char expected[] = { 0, 0, 2, 3, 4, 5, 6, 7, 8, 9, 0xA, 0xB, 0xC, 0xD, 0xE,
+                      0xF, 0x1A, 0x2B, 0x3C };
+  char data[sizeof(expected)];
+  char buf[] = { 0x2, 0x3 };
+  memset(data, 0, sizeof(data));
+  BigEndianWriter writer(data, sizeof(data));
+
+  EXPECT_TRUE(writer.Skip(2));
+  EXPECT_TRUE(writer.WriteBytes(buf, sizeof(buf)));
+  EXPECT_TRUE(writer.WriteU8(0x4));
+  EXPECT_TRUE(writer.WriteU16(0x0506));
+  EXPECT_TRUE(writer.WriteU32(0x0708090A));
+  EXPECT_TRUE(writer.WriteU64(0x0B0C0D0E0F1A2B3Cllu));
+  EXPECT_EQ(0, memcmp(expected, data, sizeof(expected)));
+}
+
+TEST(BigEndianWriterTest, RespectsLength) {
+  char data[8];
+  char buf[2];
+  uint8_t u8 = 0;
+  uint16_t u16 = 0;
+  uint32_t u32 = 0;
+  uint64_t u64 = 0;
+  BigEndianWriter writer(data, sizeof(data));
+  // 8 left
+  EXPECT_FALSE(writer.Skip(9));
+  EXPECT_TRUE(writer.Skip(1));
+  // 7 left
+  EXPECT_FALSE(writer.WriteU64(u64));
+  EXPECT_TRUE(writer.Skip(4));
+  // 3 left
+  EXPECT_FALSE(writer.WriteU32(u32));
+  EXPECT_TRUE(writer.Skip(2));
+  // 1 left
+  EXPECT_FALSE(writer.WriteU16(u16));
+  EXPECT_FALSE(writer.WriteBytes(buf, 2));
+  EXPECT_TRUE(writer.Skip(1));
+  // 0 left
+  EXPECT_FALSE(writer.WriteU8(u8));
+  EXPECT_EQ(0, writer.remaining());
+}
+
+}  // namespace base
diff --git a/base/bind.h b/base/bind.h
new file mode 100644
index 0000000..aab6828
--- /dev/null
+++ b/base/bind.h
@@ -0,0 +1,457 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_BIND_H_
+#define BASE_BIND_H_
+
+#include <utility>
+
+#include "base/bind_internal.h"
+
+// -----------------------------------------------------------------------------
+// Usage documentation
+// -----------------------------------------------------------------------------
+//
+// Overview:
+// base::BindOnce() and base::BindRepeating() are helpers for creating
+// base::OnceCallback and base::RepeatingCallback objects respectively.
+//
+// For a runnable object of n-arity, the base::Bind*() family allows partial
+// application of the first m arguments. The remaining n - m arguments must be
+// passed when invoking the callback with Run().
+//
+//   // The first argument is bound at callback creation; the remaining
+//   // two must be passed when calling Run() on the callback object.
+//   base::OnceCallback<void(int, long)> cb = base::BindOnce(
+//       [](short x, int y, long z) { return x * y * z; }, 42);
+//
+// When binding to a method, the receiver object must also be specified at
+// callback creation time. When Run() is invoked, the method will be invoked on
+// the specified receiver object.
+//
+//   class C : public base::RefCounted<C> { void F(); };
+//   auto instance = base::MakeRefCounted<C>();
+//   auto cb = base::BindOnce(&C::F, instance);
+//   cb.Run();  // Identical to instance->F()
+//
+// base::Bind is currently a type alias for base::BindRepeating(). In the
+// future, we expect to flip this to default to base::BindOnce().
+//
+// See //docs/callback.md for the full documentation.
+//
+// -----------------------------------------------------------------------------
+// Implementation notes
+// -----------------------------------------------------------------------------
+//
+// If you're reading the implementation, before proceeding further, you should
+// read the top comment of base/bind_internal.h for a definition of common
+// terms and concepts.
+
+namespace base {
+
+namespace internal {
+
+// IsOnceCallback<T> is a std::true_type if |T| is a OnceCallback.
+template <typename T>
+struct IsOnceCallback : std::false_type {};
+
+template <typename Signature>
+struct IsOnceCallback<OnceCallback<Signature>> : std::true_type {};
+
+// Helper to assert that parameter |i| of type |Arg| can be bound, which means:
+// - |Arg| can be retained internally as |Storage|.
+// - |Arg| can be forwarded as |Unwrapped| to |Param|.
+template <size_t i,
+          typename Arg,
+          typename Storage,
+          typename Unwrapped,
+          typename Param>
+struct AssertConstructible {
+ private:
+  static constexpr bool param_is_forwardable =
+      std::is_constructible<Param, Unwrapped>::value;
+  // Unlike the check for binding into storage below, the check for
+  // forwardability drops the const qualifier for repeating callbacks. This is
+  // to try to catch instances where std::move()--which forwards as a const
+  // reference with repeating callbacks--is used instead of base::Passed().
+  static_assert(
+      param_is_forwardable ||
+          !std::is_constructible<Param, std::decay_t<Unwrapped>&&>::value,
+      "Bound argument |i| is move-only but will be forwarded by copy. "
+      "Ensure |Arg| is bound using base::Passed(), not std::move().");
+  static_assert(
+      param_is_forwardable,
+      "Bound argument |i| of type |Arg| cannot be forwarded as "
+      "|Unwrapped| to the bound functor, which declares it as |Param|.");
+
+  static constexpr bool arg_is_storable =
+      std::is_constructible<Storage, Arg>::value;
+  static_assert(arg_is_storable ||
+                    !std::is_constructible<Storage, std::decay_t<Arg>&&>::value,
+                "Bound argument |i| is move-only but will be bound by copy. "
+                "Ensure |Arg| is mutable and bound using std::move().");
+  static_assert(arg_is_storable,
+                "Bound argument |i| of type |Arg| cannot be converted and "
+                "bound as |Storage|.");
+};
+
+// Takes three same-length TypeLists, and applies AssertConstructible for each
+// triples.
+template <typename Index,
+          typename Args,
+          typename UnwrappedTypeList,
+          typename ParamsList>
+struct AssertBindArgsValidity;
+
+template <size_t... Ns,
+          typename... Args,
+          typename... Unwrapped,
+          typename... Params>
+struct AssertBindArgsValidity<std::index_sequence<Ns...>,
+                              TypeList<Args...>,
+                              TypeList<Unwrapped...>,
+                              TypeList<Params...>>
+    : AssertConstructible<Ns, Args, std::decay_t<Args>, Unwrapped, Params>... {
+  static constexpr bool ok = true;
+};
+
+// The implementation of TransformToUnwrappedType below.
+template <bool is_once, typename T>
+struct TransformToUnwrappedTypeImpl;
+
+template <typename T>
+struct TransformToUnwrappedTypeImpl<true, T> {
+  using StoredType = std::decay_t<T>;
+  using ForwardType = StoredType&&;
+  using Unwrapped = decltype(Unwrap(std::declval<ForwardType>()));
+};
+
+template <typename T>
+struct TransformToUnwrappedTypeImpl<false, T> {
+  using StoredType = std::decay_t<T>;
+  using ForwardType = const StoredType&;
+  using Unwrapped = decltype(Unwrap(std::declval<ForwardType>()));
+};
+
+// Transform |T| into `Unwrapped` type, which is passed to the target function.
+// Example:
+//   In is_once == true case,
+//     `int&&` -> `int&&`,
+//     `const int&` -> `int&&`,
+//     `OwnedWrapper<int>&` -> `int*&&`.
+//   In is_once == false case,
+//     `int&&` -> `const int&`,
+//     `const int&` -> `const int&`,
+//     `OwnedWrapper<int>&` -> `int* const &`.
+template <bool is_once, typename T>
+using TransformToUnwrappedType =
+    typename TransformToUnwrappedTypeImpl<is_once, T>::Unwrapped;
+
+// Transforms |Args| into `Unwrapped` types, and packs them into a TypeList.
+// If |is_method| is true, tries to dereference the first argument to support
+// smart pointers.
+template <bool is_once, bool is_method, typename... Args>
+struct MakeUnwrappedTypeListImpl {
+  using Type = TypeList<TransformToUnwrappedType<is_once, Args>...>;
+};
+
+// Performs special handling for this pointers.
+// Example:
+//   int* -> int*,
+//   std::unique_ptr<int> -> int*.
+template <bool is_once, typename Receiver, typename... Args>
+struct MakeUnwrappedTypeListImpl<is_once, true, Receiver, Args...> {
+  using UnwrappedReceiver = TransformToUnwrappedType<is_once, Receiver>;
+  using Type = TypeList<decltype(&*std::declval<UnwrappedReceiver>()),
+                        TransformToUnwrappedType<is_once, Args>...>;
+};
+
+template <bool is_once, bool is_method, typename... Args>
+using MakeUnwrappedTypeList =
+    typename MakeUnwrappedTypeListImpl<is_once, is_method, Args...>::Type;
+
+}  // namespace internal
+
+// Bind as OnceCallback.
+template <typename Functor, typename... Args>
+inline OnceCallback<MakeUnboundRunType<Functor, Args...>>
+BindOnce(Functor&& functor, Args&&... args) {
+  static_assert(!internal::IsOnceCallback<std::decay_t<Functor>>() ||
+                    (std::is_rvalue_reference<Functor&&>() &&
+                     !std::is_const<std::remove_reference_t<Functor>>()),
+                "BindOnce requires non-const rvalue for OnceCallback binding."
+                " I.e.: base::BindOnce(std::move(callback)).");
+
+  // This block checks if each |args| matches to the corresponding params of the
+  // target function. This check does not affect the behavior of Bind, but its
+  // error message should be more readable.
+  using Helper = internal::BindTypeHelper<Functor, Args...>;
+  using FunctorTraits = typename Helper::FunctorTraits;
+  using BoundArgsList = typename Helper::BoundArgsList;
+  using UnwrappedArgsList =
+      internal::MakeUnwrappedTypeList<true, FunctorTraits::is_method,
+                                      Args&&...>;
+  using BoundParamsList = typename Helper::BoundParamsList;
+  static_assert(internal::AssertBindArgsValidity<
+                    std::make_index_sequence<Helper::num_bounds>, BoundArgsList,
+                    UnwrappedArgsList, BoundParamsList>::ok,
+                "The bound args need to be convertible to the target params.");
+
+  using BindState = internal::MakeBindStateType<Functor, Args...>;
+  using UnboundRunType = MakeUnboundRunType<Functor, Args...>;
+  using Invoker = internal::Invoker<BindState, UnboundRunType>;
+  using CallbackType = OnceCallback<UnboundRunType>;
+
+  // Store the invoke func into PolymorphicInvoke before casting it to
+  // InvokeFuncStorage, so that we can ensure its type matches to
+  // PolymorphicInvoke, to which CallbackType will cast back.
+  using PolymorphicInvoke = typename CallbackType::PolymorphicInvoke;
+  PolymorphicInvoke invoke_func = &Invoker::RunOnce;
+
+  using InvokeFuncStorage = internal::BindStateBase::InvokeFuncStorage;
+  return CallbackType(new BindState(
+      reinterpret_cast<InvokeFuncStorage>(invoke_func),
+      std::forward<Functor>(functor),
+      std::forward<Args>(args)...));
+}
+
+// Bind as RepeatingCallback.
+template <typename Functor, typename... Args>
+inline RepeatingCallback<MakeUnboundRunType<Functor, Args...>>
+BindRepeating(Functor&& functor, Args&&... args) {
+  static_assert(
+      !internal::IsOnceCallback<std::decay_t<Functor>>(),
+      "BindRepeating cannot bind OnceCallback. Use BindOnce with std::move().");
+
+  // This block checks if each |args| matches to the corresponding params of the
+  // target function. This check does not affect the behavior of Bind, but its
+  // error message should be more readable.
+  using Helper = internal::BindTypeHelper<Functor, Args...>;
+  using FunctorTraits = typename Helper::FunctorTraits;
+  using BoundArgsList = typename Helper::BoundArgsList;
+  using UnwrappedArgsList =
+      internal::MakeUnwrappedTypeList<false, FunctorTraits::is_method,
+                                      Args&&...>;
+  using BoundParamsList = typename Helper::BoundParamsList;
+  static_assert(internal::AssertBindArgsValidity<
+                    std::make_index_sequence<Helper::num_bounds>, BoundArgsList,
+                    UnwrappedArgsList, BoundParamsList>::ok,
+                "The bound args need to be convertible to the target params.");
+
+  using BindState = internal::MakeBindStateType<Functor, Args...>;
+  using UnboundRunType = MakeUnboundRunType<Functor, Args...>;
+  using Invoker = internal::Invoker<BindState, UnboundRunType>;
+  using CallbackType = RepeatingCallback<UnboundRunType>;
+
+  // Store the invoke func into PolymorphicInvoke before casting it to
+  // InvokeFuncStorage, so that we can ensure its type matches to
+  // PolymorphicInvoke, to which CallbackType will cast back.
+  using PolymorphicInvoke = typename CallbackType::PolymorphicInvoke;
+  PolymorphicInvoke invoke_func = &Invoker::Run;
+
+  using InvokeFuncStorage = internal::BindStateBase::InvokeFuncStorage;
+  return CallbackType(new BindState(
+      reinterpret_cast<InvokeFuncStorage>(invoke_func),
+      std::forward<Functor>(functor),
+      std::forward<Args>(args)...));
+}
+
+// Unannotated Bind.
+// TODO(tzik): Deprecate this and migrate to OnceCallback and
+// RepeatingCallback, once they get ready.
+template <typename Functor, typename... Args>
+inline Callback<MakeUnboundRunType<Functor, Args...>>
+Bind(Functor&& functor, Args&&... args) {
+  return base::BindRepeating(std::forward<Functor>(functor),
+                             std::forward<Args>(args)...);
+}
+
+// Special cases for binding to a base::Callback without extra bound arguments.
+template <typename Signature>
+OnceCallback<Signature> BindOnce(OnceCallback<Signature> closure) {
+  return closure;
+}
+
+template <typename Signature>
+RepeatingCallback<Signature> BindRepeating(
+    RepeatingCallback<Signature> closure) {
+  return closure;
+}
+
+template <typename Signature>
+Callback<Signature> Bind(Callback<Signature> closure) {
+  return closure;
+}
+
+// Unretained() allows Bind() to bind a non-refcounted class, and to disable
+// refcounting on arguments that are refcounted objects.
+//
+// EXAMPLE OF Unretained():
+//
+//   class Foo {
+//    public:
+//     void func() { cout << "Foo:f" << endl; }
+//   };
+//
+//   // In some function somewhere.
+//   Foo foo;
+//   Closure foo_callback =
+//       Bind(&Foo::func, Unretained(&foo));
+//   foo_callback.Run();  // Prints "Foo:f".
+//
+// Without the Unretained() wrapper on |&foo|, the above call would fail
+// to compile because Foo does not support the AddRef() and Release() methods.
+template <typename T>
+static inline internal::UnretainedWrapper<T> Unretained(T* o) {
+  return internal::UnretainedWrapper<T>(o);
+}
+
+// RetainedRef() accepts a ref counted object and retains a reference to it.
+// When the callback is called, the object is passed as a raw pointer.
+//
+// EXAMPLE OF RetainedRef():
+//
+//    void foo(RefCountedBytes* bytes) {}
+//
+//    scoped_refptr<RefCountedBytes> bytes = ...;
+//    Closure callback = Bind(&foo, base::RetainedRef(bytes));
+//    callback.Run();
+//
+// Without RetainedRef, the scoped_refptr would try to implicitly convert to
+// a raw pointer and fail compilation:
+//
+//    Closure callback = Bind(&foo, bytes); // ERROR!
+template <typename T>
+static inline internal::RetainedRefWrapper<T> RetainedRef(T* o) {
+  return internal::RetainedRefWrapper<T>(o);
+}
+template <typename T>
+static inline internal::RetainedRefWrapper<T> RetainedRef(scoped_refptr<T> o) {
+  return internal::RetainedRefWrapper<T>(std::move(o));
+}
+
+// ConstRef() allows binding a constant reference to an argument rather
+// than a copy.
+//
+// EXAMPLE OF ConstRef():
+//
+//   void foo(int arg) { cout << arg << endl }
+//
+//   int n = 1;
+//   Closure no_ref = Bind(&foo, n);
+//   Closure has_ref = Bind(&foo, ConstRef(n));
+//
+//   no_ref.Run();  // Prints "1"
+//   has_ref.Run();  // Prints "1"
+//
+//   n = 2;
+//   no_ref.Run();  // Prints "1"
+//   has_ref.Run();  // Prints "2"
+//
+// Note that because ConstRef() takes a reference on |n|, |n| must outlive all
+// its bound callbacks.
+template <typename T>
+static inline internal::ConstRefWrapper<T> ConstRef(const T& o) {
+  return internal::ConstRefWrapper<T>(o);
+}
+
+// Owned() transfers ownership of an object to the Callback resulting from
+// bind; the object will be deleted when the Callback is deleted.
+//
+// EXAMPLE OF Owned():
+//
+//   void foo(int* arg) { cout << *arg << endl }
+//
+//   int* pn = new int(1);
+//   Closure foo_callback = Bind(&foo, Owned(pn));
+//
+//   foo_callback.Run();  // Prints "1"
+//   foo_callback.Run();  // Prints "1"
+//   *n = 2;
+//   foo_callback.Run();  // Prints "2"
+//
+//   foo_callback.Reset();  // |pn| is deleted.  Also will happen when
+//                          // |foo_callback| goes out of scope.
+//
+// Without Owned(), someone would have to know to delete |pn| when the last
+// reference to the Callback is deleted.
+template <typename T>
+static inline internal::OwnedWrapper<T> Owned(T* o) {
+  return internal::OwnedWrapper<T>(o);
+}
+
+// Passed() is for transferring movable-but-not-copyable types (eg. unique_ptr)
+// through a Callback. Logically, this signifies a destructive transfer of
+// the state of the argument into the target function.  Invoking
+// Callback::Run() twice on a Callback that was created with a Passed()
+// argument will CHECK() because the first invocation would have already
+// transferred ownership to the target function.
+//
+// Note that Passed() is not necessary with BindOnce(), as std::move() does the
+// same thing. Avoid Passed() in favor of std::move() with BindOnce().
+//
+// EXAMPLE OF Passed():
+//
+//   void TakesOwnership(std::unique_ptr<Foo> arg) { }
+//   std::unique_ptr<Foo> CreateFoo() { return std::make_unique<Foo>();
+//   }
+//
+//   auto f = std::make_unique<Foo>();
+//
+//   // |cb| is given ownership of Foo(). |f| is now NULL.
+//   // You can use std::move(f) in place of &f, but it's more verbose.
+//   Closure cb = Bind(&TakesOwnership, Passed(&f));
+//
+//   // Run was never called so |cb| still owns Foo() and deletes
+//   // it on Reset().
+//   cb.Reset();
+//
+//   // |cb| is given a new Foo created by CreateFoo().
+//   cb = Bind(&TakesOwnership, Passed(CreateFoo()));
+//
+//   // |arg| in TakesOwnership() is given ownership of Foo(). |cb|
+//   // no longer owns Foo() and, if reset, would not delete Foo().
+//   cb.Run();  // Foo() is now transferred to |arg| and deleted.
+//   cb.Run();  // This CHECK()s since Foo() already been used once.
+//
+// We offer 2 syntaxes for calling Passed().  The first takes an rvalue and
+// is best suited for use with the return value of a function or other temporary
+// rvalues. The second takes a pointer to the scoper and is just syntactic sugar
+// to avoid having to write Passed(std::move(scoper)).
+//
+// Both versions of Passed() prevent T from being an lvalue reference. The first
+// via use of enable_if, and the second takes a T* which will not bind to T&.
+template <typename T,
+          std::enable_if_t<!std::is_lvalue_reference<T>::value>* = nullptr>
+static inline internal::PassedWrapper<T> Passed(T&& scoper) {
+  return internal::PassedWrapper<T>(std::move(scoper));
+}
+template <typename T>
+static inline internal::PassedWrapper<T> Passed(T* scoper) {
+  return internal::PassedWrapper<T>(std::move(*scoper));
+}
+
+// IgnoreResult() is used to adapt a function or Callback with a return type to
+// one with a void return. This is most useful if you have a function with,
+// say, a pesky ignorable bool return that you want to use with PostTask or
+// something else that expect a Callback with a void return.
+//
+// EXAMPLE OF IgnoreResult():
+//
+//   int DoSomething(int arg) { cout << arg << endl; }
+//
+//   // Assign to a Callback with a void return type.
+//   Callback<void(int)> cb = Bind(IgnoreResult(&DoSomething));
+//   cb->Run(1);  // Prints "1".
+//
+//   // Prints "1" on |ml|.
+//   ml->PostTask(FROM_HERE, Bind(IgnoreResult(&DoSomething), 1);
+template <typename T>
+static inline internal::IgnoreResultHelper<T> IgnoreResult(T data) {
+  return internal::IgnoreResultHelper<T>(std::move(data));
+}
+
+}  // namespace base
+
+#endif  // BASE_BIND_H_
diff --git a/base/bind_helpers.h b/base/bind_helpers.h
new file mode 100644
index 0000000..15961e6
--- /dev/null
+++ b/base/bind_helpers.h
@@ -0,0 +1,69 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_BIND_HELPERS_H_
+#define BASE_BIND_HELPERS_H_
+
+#include <stddef.h>
+
+#include <type_traits>
+#include <utility>
+
+#include "base/bind.h"
+#include "base/callback.h"
+#include "base/memory/weak_ptr.h"
+#include "build/build_config.h"
+
+// This defines a set of simple functions and utilities that people want when
+// using Callback<> and Bind().
+
+namespace base {
+
+// Creates a null callback.
+class BASE_EXPORT NullCallback {
+ public:
+  template <typename R, typename... Args>
+  operator RepeatingCallback<R(Args...)>() const {
+    return RepeatingCallback<R(Args...)>();
+  }
+  template <typename R, typename... Args>
+  operator OnceCallback<R(Args...)>() const {
+    return OnceCallback<R(Args...)>();
+  }
+};
+
+// Creates a callback that does nothing when called.
+class BASE_EXPORT DoNothing {
+ public:
+  template <typename... Args>
+  operator RepeatingCallback<void(Args...)>() const {
+    return Repeatedly<Args...>();
+  }
+  template <typename... Args>
+  operator OnceCallback<void(Args...)>() const {
+    return Once<Args...>();
+  }
+  // Explicit way of specifying a specific callback type when the compiler can't
+  // deduce it.
+  template <typename... Args>
+  static RepeatingCallback<void(Args...)> Repeatedly() {
+    return BindRepeating([](Args... args) {});
+  }
+  template <typename... Args>
+  static OnceCallback<void(Args...)> Once() {
+    return BindOnce([](Args... args) {});
+  }
+};
+
+// Useful for creating a Closure that will delete a pointer when invoked. Only
+// use this when necessary. In most cases MessageLoop::DeleteSoon() is a better
+// fit.
+template <typename T>
+void DeletePointer(T* obj) {
+  delete obj;
+}
+
+}  // namespace base
+
+#endif  // BASE_BIND_HELPERS_H_
diff --git a/base/bind_internal.h b/base/bind_internal.h
new file mode 100644
index 0000000..d748f89
--- /dev/null
+++ b/base/bind_internal.h
@@ -0,0 +1,912 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_BIND_INTERNAL_H_
+#define BASE_BIND_INTERNAL_H_
+
+#include <stddef.h>
+
+#include <type_traits>
+#include <utility>
+
+#include "base/callback_internal.h"
+#include "base/memory/raw_scoped_refptr_mismatch_checker.h"
+#include "base/memory/weak_ptr.h"
+#include "base/template_util.h"
+#include "build/build_config.h"
+
+// See base/callback.h for user documentation.
+//
+//
+// CONCEPTS:
+//  Functor -- A movable type representing something that should be called.
+//             All function pointers and Callback<> are functors even if the
+//             invocation syntax differs.
+//  RunType -- A function type (as opposed to function _pointer_ type) for
+//             a Callback<>::Run().  Usually just a convenience typedef.
+//  (Bound)Args -- A set of types that stores the arguments.
+//
+// Types:
+//  ForceVoidReturn<> -- Helper class for translating function signatures to
+//                       equivalent forms with a "void" return type.
+//  FunctorTraits<> -- Type traits used to determine the correct RunType and
+//                     invocation manner for a Functor.  This is where function
+//                     signature adapters are applied.
+//  InvokeHelper<> -- Take a Functor + arguments and actully invokes it.
+//                    Handle the differing syntaxes needed for WeakPtr<>
+//                    support.  This is separate from Invoker to avoid creating
+//                    multiple version of Invoker<>.
+//  Invoker<> -- Unwraps the curried parameters and executes the Functor.
+//  BindState<> -- Stores the curried parameters, and is the main entry point
+//                 into the Bind() system.
+
+namespace base {
+
+template <typename T>
+struct IsWeakReceiver;
+
+template <typename>
+struct BindUnwrapTraits;
+
+template <typename Functor, typename BoundArgsTuple, typename SFINAE = void>
+struct CallbackCancellationTraits;
+
+namespace internal {
+
+template <typename Functor, typename SFINAE = void>
+struct FunctorTraits;
+
+template <typename T>
+class UnretainedWrapper {
+ public:
+  explicit UnretainedWrapper(T* o) : ptr_(o) {}
+  T* get() const { return ptr_; }
+
+ private:
+  T* ptr_;
+};
+
+template <typename T>
+class ConstRefWrapper {
+ public:
+  explicit ConstRefWrapper(const T& o) : ptr_(&o) {}
+  const T& get() const { return *ptr_; }
+
+ private:
+  const T* ptr_;
+};
+
+template <typename T>
+class RetainedRefWrapper {
+ public:
+  explicit RetainedRefWrapper(T* o) : ptr_(o) {}
+  explicit RetainedRefWrapper(scoped_refptr<T> o) : ptr_(std::move(o)) {}
+  T* get() const { return ptr_.get(); }
+
+ private:
+  scoped_refptr<T> ptr_;
+};
+
+template <typename T>
+struct IgnoreResultHelper {
+  explicit IgnoreResultHelper(T functor) : functor_(std::move(functor)) {}
+  explicit operator bool() const { return !!functor_; }
+
+  T functor_;
+};
+
+// An alternate implementation is to avoid the destructive copy, and instead
+// specialize ParamTraits<> for OwnedWrapper<> to change the StorageType to
+// a class that is essentially a std::unique_ptr<>.
+//
+// The current implementation has the benefit though of leaving ParamTraits<>
+// fully in callback_internal.h as well as avoiding type conversions during
+// storage.
+template <typename T>
+class OwnedWrapper {
+ public:
+  explicit OwnedWrapper(T* o) : ptr_(o) {}
+  ~OwnedWrapper() { delete ptr_; }
+  T* get() const { return ptr_; }
+  OwnedWrapper(OwnedWrapper&& other) {
+    ptr_ = other.ptr_;
+    other.ptr_ = NULL;
+  }
+
+ private:
+  mutable T* ptr_;
+};
+
+// PassedWrapper is a copyable adapter for a scoper that ignores const.
+//
+// It is needed to get around the fact that Bind() takes a const reference to
+// all its arguments.  Because Bind() takes a const reference to avoid
+// unnecessary copies, it is incompatible with movable-but-not-copyable
+// types; doing a destructive "move" of the type into Bind() would violate
+// the const correctness.
+//
+// This conundrum cannot be solved without either C++11 rvalue references or
+// a O(2^n) blowup of Bind() templates to handle each combination of regular
+// types and movable-but-not-copyable types.  Thus we introduce a wrapper type
+// that is copyable to transmit the correct type information down into
+// BindState<>. Ignoring const in this type makes sense because it is only
+// created when we are explicitly trying to do a destructive move.
+//
+// Two notes:
+//  1) PassedWrapper supports any type that has a move constructor, however
+//     the type will need to be specifically whitelisted in order for it to be
+//     bound to a Callback. We guard this explicitly at the call of Passed()
+//     to make for clear errors. Things not given to Passed() will be forwarded
+//     and stored by value which will not work for general move-only types.
+//  2) is_valid_ is distinct from NULL because it is valid to bind a "NULL"
+//     scoper to a Callback and allow the Callback to execute once.
+template <typename T>
+class PassedWrapper {
+ public:
+  explicit PassedWrapper(T&& scoper)
+      : is_valid_(true), scoper_(std::move(scoper)) {}
+  PassedWrapper(PassedWrapper&& other)
+      : is_valid_(other.is_valid_), scoper_(std::move(other.scoper_)) {}
+  T Take() const {
+    CHECK(is_valid_);
+    is_valid_ = false;
+    return std::move(scoper_);
+  }
+
+ private:
+  mutable bool is_valid_;
+  mutable T scoper_;
+};
+
+template <typename T>
+using Unwrapper = BindUnwrapTraits<std::decay_t<T>>;
+
+template <typename T>
+decltype(auto) Unwrap(T&& o) {
+  return Unwrapper<T>::Unwrap(std::forward<T>(o));
+}
+
+// IsWeakMethod is a helper that determine if we are binding a WeakPtr<> to a
+// method.  It is used internally by Bind() to select the correct
+// InvokeHelper that will no-op itself in the event the WeakPtr<> for
+// the target object is invalidated.
+//
+// The first argument should be the type of the object that will be received by
+// the method.
+template <bool is_method, typename... Args>
+struct IsWeakMethod : std::false_type {};
+
+template <typename T, typename... Args>
+struct IsWeakMethod<true, T, Args...> : IsWeakReceiver<T> {};
+
+// Packs a list of types to hold them in a single type.
+template <typename... Types>
+struct TypeList {};
+
+// Used for DropTypeListItem implementation.
+template <size_t n, typename List>
+struct DropTypeListItemImpl;
+
+// Do not use enable_if and SFINAE here to avoid MSVC2013 compile failure.
+template <size_t n, typename T, typename... List>
+struct DropTypeListItemImpl<n, TypeList<T, List...>>
+    : DropTypeListItemImpl<n - 1, TypeList<List...>> {};
+
+template <typename T, typename... List>
+struct DropTypeListItemImpl<0, TypeList<T, List...>> {
+  using Type = TypeList<T, List...>;
+};
+
+template <>
+struct DropTypeListItemImpl<0, TypeList<>> {
+  using Type = TypeList<>;
+};
+
+// A type-level function that drops |n| list item from given TypeList.
+template <size_t n, typename List>
+using DropTypeListItem = typename DropTypeListItemImpl<n, List>::Type;
+
+// Used for TakeTypeListItem implementation.
+template <size_t n, typename List, typename... Accum>
+struct TakeTypeListItemImpl;
+
+// Do not use enable_if and SFINAE here to avoid MSVC2013 compile failure.
+template <size_t n, typename T, typename... List, typename... Accum>
+struct TakeTypeListItemImpl<n, TypeList<T, List...>, Accum...>
+    : TakeTypeListItemImpl<n - 1, TypeList<List...>, Accum..., T> {};
+
+template <typename T, typename... List, typename... Accum>
+struct TakeTypeListItemImpl<0, TypeList<T, List...>, Accum...> {
+  using Type = TypeList<Accum...>;
+};
+
+template <typename... Accum>
+struct TakeTypeListItemImpl<0, TypeList<>, Accum...> {
+  using Type = TypeList<Accum...>;
+};
+
+// A type-level function that takes first |n| list item from given TypeList.
+// E.g. TakeTypeListItem<3, TypeList<A, B, C, D>> is evaluated to
+// TypeList<A, B, C>.
+template <size_t n, typename List>
+using TakeTypeListItem = typename TakeTypeListItemImpl<n, List>::Type;
+
+// Used for ConcatTypeLists implementation.
+template <typename List1, typename List2>
+struct ConcatTypeListsImpl;
+
+template <typename... Types1, typename... Types2>
+struct ConcatTypeListsImpl<TypeList<Types1...>, TypeList<Types2...>> {
+  using Type = TypeList<Types1..., Types2...>;
+};
+
+// A type-level function that concats two TypeLists.
+template <typename List1, typename List2>
+using ConcatTypeLists = typename ConcatTypeListsImpl<List1, List2>::Type;
+
+// Used for MakeFunctionType implementation.
+template <typename R, typename ArgList>
+struct MakeFunctionTypeImpl;
+
+template <typename R, typename... Args>
+struct MakeFunctionTypeImpl<R, TypeList<Args...>> {
+  // MSVC 2013 doesn't support Type Alias of function types.
+  // Revisit this after we update it to newer version.
+  typedef R Type(Args...);
+};
+
+// A type-level function that constructs a function type that has |R| as its
+// return type and has TypeLists items as its arguments.
+template <typename R, typename ArgList>
+using MakeFunctionType = typename MakeFunctionTypeImpl<R, ArgList>::Type;
+
+// Used for ExtractArgs and ExtractReturnType.
+template <typename Signature>
+struct ExtractArgsImpl;
+
+template <typename R, typename... Args>
+struct ExtractArgsImpl<R(Args...)> {
+  using ReturnType = R;
+  using ArgsList = TypeList<Args...>;
+};
+
+// A type-level function that extracts function arguments into a TypeList.
+// E.g. ExtractArgs<R(A, B, C)> is evaluated to TypeList<A, B, C>.
+template <typename Signature>
+using ExtractArgs = typename ExtractArgsImpl<Signature>::ArgsList;
+
+// A type-level function that extracts the return type of a function.
+// E.g. ExtractReturnType<R(A, B, C)> is evaluated to R.
+template <typename Signature>
+using ExtractReturnType = typename ExtractArgsImpl<Signature>::ReturnType;
+
+template <typename Callable,
+          typename Signature = decltype(&Callable::operator())>
+struct ExtractCallableRunTypeImpl;
+
+template <typename Callable, typename R, typename... Args>
+struct ExtractCallableRunTypeImpl<Callable, R (Callable::*)(Args...)> {
+  using Type = R(Args...);
+};
+
+template <typename Callable, typename R, typename... Args>
+struct ExtractCallableRunTypeImpl<Callable, R (Callable::*)(Args...) const> {
+  using Type = R(Args...);
+};
+
+// Evaluated to RunType of the given callable type.
+// Example:
+//   auto f = [](int, char*) { return 0.1; };
+//   ExtractCallableRunType<decltype(f)>
+//   is evaluated to
+//   double(int, char*);
+template <typename Callable>
+using ExtractCallableRunType =
+    typename ExtractCallableRunTypeImpl<Callable>::Type;
+
+// IsCallableObject<Functor> is std::true_type if |Functor| has operator().
+// Otherwise, it's std::false_type.
+// Example:
+//   IsCallableObject<void(*)()>::value is false.
+//
+//   struct Foo {};
+//   IsCallableObject<void(Foo::*)()>::value is false.
+//
+//   int i = 0;
+//   auto f = [i]() {};
+//   IsCallableObject<decltype(f)>::value is false.
+template <typename Functor, typename SFINAE = void>
+struct IsCallableObject : std::false_type {};
+
+template <typename Callable>
+struct IsCallableObject<Callable, void_t<decltype(&Callable::operator())>>
+    : std::true_type {};
+
+// HasRefCountedTypeAsRawPtr selects true_type when any of the |Args| is a raw
+// pointer to a RefCounted type.
+// Implementation note: This non-specialized case handles zero-arity case only.
+// Non-zero-arity cases should be handled by the specialization below.
+template <typename... Args>
+struct HasRefCountedTypeAsRawPtr : std::false_type {};
+
+// Implementation note: Select true_type if the first parameter is a raw pointer
+// to a RefCounted type. Otherwise, skip the first parameter and check rest of
+// parameters recursively.
+template <typename T, typename... Args>
+struct HasRefCountedTypeAsRawPtr<T, Args...>
+    : std::conditional_t<NeedsScopedRefptrButGetsRawPtr<T>::value,
+                         std::true_type,
+                         HasRefCountedTypeAsRawPtr<Args...>> {};
+
+// ForceVoidReturn<>
+//
+// Set of templates that support forcing the function return type to void.
+template <typename Sig>
+struct ForceVoidReturn;
+
+template <typename R, typename... Args>
+struct ForceVoidReturn<R(Args...)> {
+  using RunType = void(Args...);
+};
+
+// FunctorTraits<>
+//
+// See description at top of file.
+template <typename Functor, typename SFINAE>
+struct FunctorTraits;
+
+// For empty callable types.
+// This specialization is intended to allow binding captureless lambdas by
+// base::Bind(), based on the fact that captureless lambdas are empty while
+// capturing lambdas are not. This also allows any functors as far as it's an
+// empty class.
+// Example:
+//
+//   // Captureless lambdas are allowed.
+//   []() {return 42;};
+//
+//   // Capturing lambdas are *not* allowed.
+//   int x;
+//   [x]() {return x;};
+//
+//   // Any empty class with operator() is allowed.
+//   struct Foo {
+//     void operator()() const {}
+//     // No non-static member variable and no virtual functions.
+//   };
+template <typename Functor>
+struct FunctorTraits<Functor,
+                     std::enable_if_t<IsCallableObject<Functor>::value &&
+                                      std::is_empty<Functor>::value>> {
+  using RunType = ExtractCallableRunType<Functor>;
+  static constexpr bool is_method = false;
+  static constexpr bool is_nullable = false;
+
+  template <typename RunFunctor, typename... RunArgs>
+  static ExtractReturnType<RunType> Invoke(RunFunctor&& functor,
+                                           RunArgs&&... args) {
+    return std::forward<RunFunctor>(functor)(std::forward<RunArgs>(args)...);
+  }
+};
+
+// For functions.
+template <typename R, typename... Args>
+struct FunctorTraits<R (*)(Args...)> {
+  using RunType = R(Args...);
+  static constexpr bool is_method = false;
+  static constexpr bool is_nullable = true;
+
+  template <typename Function, typename... RunArgs>
+  static R Invoke(Function&& function, RunArgs&&... args) {
+    return std::forward<Function>(function)(std::forward<RunArgs>(args)...);
+  }
+};
+
+#if defined(OS_WIN) && !defined(ARCH_CPU_X86_64)
+
+// For functions.
+template <typename R, typename... Args>
+struct FunctorTraits<R(__stdcall*)(Args...)> {
+  using RunType = R(Args...);
+  static constexpr bool is_method = false;
+  static constexpr bool is_nullable = true;
+
+  template <typename... RunArgs>
+  static R Invoke(R(__stdcall* function)(Args...), RunArgs&&... args) {
+    return function(std::forward<RunArgs>(args)...);
+  }
+};
+
+// For functions.
+template <typename R, typename... Args>
+struct FunctorTraits<R(__fastcall*)(Args...)> {
+  using RunType = R(Args...);
+  static constexpr bool is_method = false;
+  static constexpr bool is_nullable = true;
+
+  template <typename... RunArgs>
+  static R Invoke(R(__fastcall* function)(Args...), RunArgs&&... args) {
+    return function(std::forward<RunArgs>(args)...);
+  }
+};
+
+#endif  // defined(OS_WIN) && !defined(ARCH_CPU_X86_64)
+
+// For methods.
+template <typename R, typename Receiver, typename... Args>
+struct FunctorTraits<R (Receiver::*)(Args...)> {
+  using RunType = R(Receiver*, Args...);
+  static constexpr bool is_method = true;
+  static constexpr bool is_nullable = true;
+
+  template <typename Method, typename ReceiverPtr, typename... RunArgs>
+  static R Invoke(Method method,
+                  ReceiverPtr&& receiver_ptr,
+                  RunArgs&&... args) {
+    return ((*receiver_ptr).*method)(std::forward<RunArgs>(args)...);
+  }
+};
+
+// For const methods.
+template <typename R, typename Receiver, typename... Args>
+struct FunctorTraits<R (Receiver::*)(Args...) const> {
+  using RunType = R(const Receiver*, Args...);
+  static constexpr bool is_method = true;
+  static constexpr bool is_nullable = true;
+
+  template <typename Method, typename ReceiverPtr, typename... RunArgs>
+  static R Invoke(Method method,
+                  ReceiverPtr&& receiver_ptr,
+                  RunArgs&&... args) {
+    return ((*receiver_ptr).*method)(std::forward<RunArgs>(args)...);
+  }
+};
+
+#ifdef __cpp_noexcept_function_type
+// noexcept makes a distinct function type in C++17.
+// I.e. `void(*)()` and `void(*)() noexcept` are same in pre-C++17, and
+// different in C++17.
+template <typename R, typename... Args>
+struct FunctorTraits<R (*)(Args...) noexcept> : FunctorTraits<R (*)(Args...)> {
+};
+
+template <typename R, typename Receiver, typename... Args>
+struct FunctorTraits<R (Receiver::*)(Args...) noexcept>
+    : FunctorTraits<R (Receiver::*)(Args...)> {};
+
+template <typename R, typename Receiver, typename... Args>
+struct FunctorTraits<R (Receiver::*)(Args...) const noexcept>
+    : FunctorTraits<R (Receiver::*)(Args...) const> {};
+#endif
+
+// For IgnoreResults.
+template <typename T>
+struct FunctorTraits<IgnoreResultHelper<T>> : FunctorTraits<T> {
+  using RunType =
+      typename ForceVoidReturn<typename FunctorTraits<T>::RunType>::RunType;
+
+  template <typename IgnoreResultType, typename... RunArgs>
+  static void Invoke(IgnoreResultType&& ignore_result_helper,
+                     RunArgs&&... args) {
+    FunctorTraits<T>::Invoke(
+        std::forward<IgnoreResultType>(ignore_result_helper).functor_,
+        std::forward<RunArgs>(args)...);
+  }
+};
+
+// For OnceCallbacks.
+template <typename R, typename... Args>
+struct FunctorTraits<OnceCallback<R(Args...)>> {
+  using RunType = R(Args...);
+  static constexpr bool is_method = false;
+  static constexpr bool is_nullable = true;
+
+  template <typename CallbackType, typename... RunArgs>
+  static R Invoke(CallbackType&& callback, RunArgs&&... args) {
+    DCHECK(!callback.is_null());
+    return std::forward<CallbackType>(callback).Run(
+        std::forward<RunArgs>(args)...);
+  }
+};
+
+// For RepeatingCallbacks.
+template <typename R, typename... Args>
+struct FunctorTraits<RepeatingCallback<R(Args...)>> {
+  using RunType = R(Args...);
+  static constexpr bool is_method = false;
+  static constexpr bool is_nullable = true;
+
+  template <typename CallbackType, typename... RunArgs>
+  static R Invoke(CallbackType&& callback, RunArgs&&... args) {
+    DCHECK(!callback.is_null());
+    return std::forward<CallbackType>(callback).Run(
+        std::forward<RunArgs>(args)...);
+  }
+};
+
+template <typename Functor>
+using MakeFunctorTraits = FunctorTraits<std::decay_t<Functor>>;
+
+// InvokeHelper<>
+//
+// There are 2 logical InvokeHelper<> specializations: normal, WeakCalls.
+//
+// The normal type just calls the underlying runnable.
+//
+// WeakCalls need special syntax that is applied to the first argument to check
+// if they should no-op themselves.
+template <bool is_weak_call, typename ReturnType>
+struct InvokeHelper;
+
+template <typename ReturnType>
+struct InvokeHelper<false, ReturnType> {
+  template <typename Functor, typename... RunArgs>
+  static inline ReturnType MakeItSo(Functor&& functor, RunArgs&&... args) {
+    using Traits = MakeFunctorTraits<Functor>;
+    return Traits::Invoke(std::forward<Functor>(functor),
+                          std::forward<RunArgs>(args)...);
+  }
+};
+
+template <typename ReturnType>
+struct InvokeHelper<true, ReturnType> {
+  // WeakCalls are only supported for functions with a void return type.
+  // Otherwise, the function result would be undefined if the the WeakPtr<>
+  // is invalidated.
+  static_assert(std::is_void<ReturnType>::value,
+                "weak_ptrs can only bind to methods without return values");
+
+  template <typename Functor, typename BoundWeakPtr, typename... RunArgs>
+  static inline void MakeItSo(Functor&& functor,
+                              BoundWeakPtr&& weak_ptr,
+                              RunArgs&&... args) {
+    if (!weak_ptr)
+      return;
+    using Traits = MakeFunctorTraits<Functor>;
+    Traits::Invoke(std::forward<Functor>(functor),
+                   std::forward<BoundWeakPtr>(weak_ptr),
+                   std::forward<RunArgs>(args)...);
+  }
+};
+
+// Invoker<>
+//
+// See description at the top of the file.
+template <typename StorageType, typename UnboundRunType>
+struct Invoker;
+
+template <typename StorageType, typename R, typename... UnboundArgs>
+struct Invoker<StorageType, R(UnboundArgs...)> {
+  static R RunOnce(BindStateBase* base,
+                   PassingTraitsType<UnboundArgs>... unbound_args) {
+    // Local references to make debugger stepping easier. If in a debugger,
+    // you really want to warp ahead and step through the
+    // InvokeHelper<>::MakeItSo() call below.
+    StorageType* storage = static_cast<StorageType*>(base);
+    static constexpr size_t num_bound_args =
+        std::tuple_size<decltype(storage->bound_args_)>::value;
+    return RunImpl(std::move(storage->functor_),
+                   std::move(storage->bound_args_),
+                   std::make_index_sequence<num_bound_args>(),
+                   std::forward<UnboundArgs>(unbound_args)...);
+  }
+
+  static R Run(BindStateBase* base,
+               PassingTraitsType<UnboundArgs>... unbound_args) {
+    // Local references to make debugger stepping easier. If in a debugger,
+    // you really want to warp ahead and step through the
+    // InvokeHelper<>::MakeItSo() call below.
+    const StorageType* storage = static_cast<StorageType*>(base);
+    static constexpr size_t num_bound_args =
+        std::tuple_size<decltype(storage->bound_args_)>::value;
+    return RunImpl(storage->functor_, storage->bound_args_,
+                   std::make_index_sequence<num_bound_args>(),
+                   std::forward<UnboundArgs>(unbound_args)...);
+  }
+
+ private:
+  template <typename Functor, typename BoundArgsTuple, size_t... indices>
+  static inline R RunImpl(Functor&& functor,
+                          BoundArgsTuple&& bound,
+                          std::index_sequence<indices...>,
+                          UnboundArgs&&... unbound_args) {
+    static constexpr bool is_method = MakeFunctorTraits<Functor>::is_method;
+
+    using DecayedArgsTuple = std::decay_t<BoundArgsTuple>;
+    static constexpr bool is_weak_call =
+        IsWeakMethod<is_method,
+                     std::tuple_element_t<indices, DecayedArgsTuple>...>();
+
+    return InvokeHelper<is_weak_call, R>::MakeItSo(
+        std::forward<Functor>(functor),
+        Unwrap(std::get<indices>(std::forward<BoundArgsTuple>(bound)))...,
+        std::forward<UnboundArgs>(unbound_args)...);
+  }
+};
+
+// Extracts necessary type info from Functor and BoundArgs.
+// Used to implement MakeUnboundRunType, BindOnce and BindRepeating.
+template <typename Functor, typename... BoundArgs>
+struct BindTypeHelper {
+  static constexpr size_t num_bounds = sizeof...(BoundArgs);
+  using FunctorTraits = MakeFunctorTraits<Functor>;
+
+  // Example:
+  //   When Functor is `double (Foo::*)(int, const std::string&)`, and BoundArgs
+  //   is a template pack of `Foo*` and `int16_t`:
+  //    - RunType is `double(Foo*, int, const std::string&)`,
+  //    - ReturnType is `double`,
+  //    - RunParamsList is `TypeList<Foo*, int, const std::string&>`,
+  //    - BoundParamsList is `TypeList<Foo*, int>`,
+  //    - UnboundParamsList is `TypeList<const std::string&>`,
+  //    - BoundArgsList is `TypeList<Foo*, int16_t>`,
+  //    - UnboundRunType is `double(const std::string&)`.
+  using RunType = typename FunctorTraits::RunType;
+  using ReturnType = ExtractReturnType<RunType>;
+
+  using RunParamsList = ExtractArgs<RunType>;
+  using BoundParamsList = TakeTypeListItem<num_bounds, RunParamsList>;
+  using UnboundParamsList = DropTypeListItem<num_bounds, RunParamsList>;
+
+  using BoundArgsList = TypeList<BoundArgs...>;
+
+  using UnboundRunType = MakeFunctionType<ReturnType, UnboundParamsList>;
+};
+
+template <typename Functor>
+std::enable_if_t<FunctorTraits<Functor>::is_nullable, bool> IsNull(
+    const Functor& functor) {
+  return !functor;
+}
+
+template <typename Functor>
+std::enable_if_t<!FunctorTraits<Functor>::is_nullable, bool> IsNull(
+    const Functor&) {
+  return false;
+}
+
+// Used by ApplyCancellationTraits below.
+template <typename Functor, typename BoundArgsTuple, size_t... indices>
+bool ApplyCancellationTraitsImpl(const Functor& functor,
+                                 const BoundArgsTuple& bound_args,
+                                 std::index_sequence<indices...>) {
+  return CallbackCancellationTraits<Functor, BoundArgsTuple>::IsCancelled(
+      functor, std::get<indices>(bound_args)...);
+}
+
+// Relays |base| to corresponding CallbackCancellationTraits<>::Run(). Returns
+// true if the callback |base| represents is canceled.
+template <typename BindStateType>
+bool ApplyCancellationTraits(const BindStateBase* base) {
+  const BindStateType* storage = static_cast<const BindStateType*>(base);
+  static constexpr size_t num_bound_args =
+      std::tuple_size<decltype(storage->bound_args_)>::value;
+  return ApplyCancellationTraitsImpl(
+      storage->functor_, storage->bound_args_,
+      std::make_index_sequence<num_bound_args>());
+};
+
+// BindState<>
+//
+// This stores all the state passed into Bind().
+template <typename Functor, typename... BoundArgs>
+struct BindState final : BindStateBase {
+  using IsCancellable = std::integral_constant<
+      bool,
+      CallbackCancellationTraits<Functor,
+                                 std::tuple<BoundArgs...>>::is_cancellable>;
+
+  template <typename ForwardFunctor, typename... ForwardBoundArgs>
+  explicit BindState(BindStateBase::InvokeFuncStorage invoke_func,
+                     ForwardFunctor&& functor,
+                     ForwardBoundArgs&&... bound_args)
+      // IsCancellable is std::false_type if
+      // CallbackCancellationTraits<>::IsCancelled returns always false.
+      // Otherwise, it's std::true_type.
+      : BindState(IsCancellable{},
+                  invoke_func,
+                  std::forward<ForwardFunctor>(functor),
+                  std::forward<ForwardBoundArgs>(bound_args)...) {}
+
+  Functor functor_;
+  std::tuple<BoundArgs...> bound_args_;
+
+ private:
+  template <typename ForwardFunctor, typename... ForwardBoundArgs>
+  explicit BindState(std::true_type,
+                     BindStateBase::InvokeFuncStorage invoke_func,
+                     ForwardFunctor&& functor,
+                     ForwardBoundArgs&&... bound_args)
+      : BindStateBase(invoke_func,
+                      &Destroy,
+                      &ApplyCancellationTraits<BindState>),
+        functor_(std::forward<ForwardFunctor>(functor)),
+        bound_args_(std::forward<ForwardBoundArgs>(bound_args)...) {
+    DCHECK(!IsNull(functor_));
+  }
+
+  template <typename ForwardFunctor, typename... ForwardBoundArgs>
+  explicit BindState(std::false_type,
+                     BindStateBase::InvokeFuncStorage invoke_func,
+                     ForwardFunctor&& functor,
+                     ForwardBoundArgs&&... bound_args)
+      : BindStateBase(invoke_func, &Destroy),
+        functor_(std::forward<ForwardFunctor>(functor)),
+        bound_args_(std::forward<ForwardBoundArgs>(bound_args)...) {
+    DCHECK(!IsNull(functor_));
+  }
+
+  ~BindState() = default;
+
+  static void Destroy(const BindStateBase* self) {
+    delete static_cast<const BindState*>(self);
+  }
+};
+
+// Used to implement MakeBindStateType.
+template <bool is_method, typename Functor, typename... BoundArgs>
+struct MakeBindStateTypeImpl;
+
+template <typename Functor, typename... BoundArgs>
+struct MakeBindStateTypeImpl<false, Functor, BoundArgs...> {
+  static_assert(!HasRefCountedTypeAsRawPtr<std::decay_t<BoundArgs>...>::value,
+                "A parameter is a refcounted type and needs scoped_refptr.");
+  using Type = BindState<std::decay_t<Functor>, std::decay_t<BoundArgs>...>;
+};
+
+template <typename Functor>
+struct MakeBindStateTypeImpl<true, Functor> {
+  using Type = BindState<std::decay_t<Functor>>;
+};
+
+template <typename Functor, typename Receiver, typename... BoundArgs>
+struct MakeBindStateTypeImpl<true, Functor, Receiver, BoundArgs...> {
+ private:
+  using DecayedReceiver = std::decay_t<Receiver>;
+
+  static_assert(!std::is_array<std::remove_reference_t<Receiver>>::value,
+                "First bound argument to a method cannot be an array.");
+  static_assert(
+      !std::is_pointer<DecayedReceiver>::value ||
+          IsRefCountedType<std::remove_pointer_t<DecayedReceiver>>::value,
+      "Receivers may not be raw pointers. If using a raw pointer here is safe"
+      " and has no lifetime concerns, use base::Unretained() and document why"
+      " it's safe.");
+  static_assert(!HasRefCountedTypeAsRawPtr<std::decay_t<BoundArgs>...>::value,
+                "A parameter is a refcounted type and needs scoped_refptr.");
+
+ public:
+  using Type = BindState<
+      std::decay_t<Functor>,
+      std::conditional_t<std::is_pointer<DecayedReceiver>::value,
+                         scoped_refptr<std::remove_pointer_t<DecayedReceiver>>,
+                         DecayedReceiver>,
+      std::decay_t<BoundArgs>...>;
+};
+
+template <typename Functor, typename... BoundArgs>
+using MakeBindStateType =
+    typename MakeBindStateTypeImpl<MakeFunctorTraits<Functor>::is_method,
+                                   Functor,
+                                   BoundArgs...>::Type;
+
+}  // namespace internal
+
+// An injection point to control |this| pointer behavior on a method invocation.
+// If IsWeakReceiver<> is true_type for |T| and |T| is used for a receiver of a
+// method, base::Bind cancels the method invocation if the receiver is tested as
+// false.
+// E.g. Foo::bar() is not called:
+//   struct Foo : base::SupportsWeakPtr<Foo> {
+//     void bar() {}
+//   };
+//
+//   WeakPtr<Foo> oo = nullptr;
+//   base::Bind(&Foo::bar, oo).Run();
+template <typename T>
+struct IsWeakReceiver : std::false_type {};
+
+template <typename T>
+struct IsWeakReceiver<internal::ConstRefWrapper<T>> : IsWeakReceiver<T> {};
+
+template <typename T>
+struct IsWeakReceiver<WeakPtr<T>> : std::true_type {};
+
+// An injection point to control how bound objects passed to the target
+// function. BindUnwrapTraits<>::Unwrap() is called for each bound objects right
+// before the target function is invoked.
+template <typename>
+struct BindUnwrapTraits {
+  template <typename T>
+  static T&& Unwrap(T&& o) {
+    return std::forward<T>(o);
+  }
+};
+
+template <typename T>
+struct BindUnwrapTraits<internal::UnretainedWrapper<T>> {
+  static T* Unwrap(const internal::UnretainedWrapper<T>& o) { return o.get(); }
+};
+
+template <typename T>
+struct BindUnwrapTraits<internal::ConstRefWrapper<T>> {
+  static const T& Unwrap(const internal::ConstRefWrapper<T>& o) {
+    return o.get();
+  }
+};
+
+template <typename T>
+struct BindUnwrapTraits<internal::RetainedRefWrapper<T>> {
+  static T* Unwrap(const internal::RetainedRefWrapper<T>& o) { return o.get(); }
+};
+
+template <typename T>
+struct BindUnwrapTraits<internal::OwnedWrapper<T>> {
+  static T* Unwrap(const internal::OwnedWrapper<T>& o) { return o.get(); }
+};
+
+template <typename T>
+struct BindUnwrapTraits<internal::PassedWrapper<T>> {
+  static T Unwrap(const internal::PassedWrapper<T>& o) { return o.Take(); }
+};
+
+// CallbackCancellationTraits allows customization of Callback's cancellation
+// semantics. By default, callbacks are not cancellable. A specialization should
+// set is_cancellable = true and implement an IsCancelled() that returns if the
+// callback should be cancelled.
+template <typename Functor, typename BoundArgsTuple, typename SFINAE>
+struct CallbackCancellationTraits {
+  static constexpr bool is_cancellable = false;
+};
+
+// Specialization for method bound to weak pointer receiver.
+template <typename Functor, typename... BoundArgs>
+struct CallbackCancellationTraits<
+    Functor,
+    std::tuple<BoundArgs...>,
+    std::enable_if_t<
+        internal::IsWeakMethod<internal::FunctorTraits<Functor>::is_method,
+                               BoundArgs...>::value>> {
+  static constexpr bool is_cancellable = true;
+
+  template <typename Receiver, typename... Args>
+  static bool IsCancelled(const Functor&,
+                          const Receiver& receiver,
+                          const Args&...) {
+    return !receiver;
+  }
+};
+
+// Specialization for a nested bind.
+template <typename Signature, typename... BoundArgs>
+struct CallbackCancellationTraits<OnceCallback<Signature>,
+                                  std::tuple<BoundArgs...>> {
+  static constexpr bool is_cancellable = true;
+
+  template <typename Functor>
+  static bool IsCancelled(const Functor& functor, const BoundArgs&...) {
+    return functor.IsCancelled();
+  }
+};
+
+template <typename Signature, typename... BoundArgs>
+struct CallbackCancellationTraits<RepeatingCallback<Signature>,
+                                  std::tuple<BoundArgs...>> {
+  static constexpr bool is_cancellable = true;
+
+  template <typename Functor>
+  static bool IsCancelled(const Functor& functor, const BoundArgs&...) {
+    return functor.IsCancelled();
+  }
+};
+
+// Returns a RunType of bound functor.
+// E.g. MakeUnboundRunType<R(A, B, C), A, B> is evaluated to R(C).
+template <typename Functor, typename... BoundArgs>
+using MakeUnboundRunType =
+    typename internal::BindTypeHelper<Functor, BoundArgs...>::UnboundRunType;
+
+}  // namespace base
+
+#endif  // BASE_BIND_INTERNAL_H_
diff --git a/base/bind_unittest.cc b/base/bind_unittest.cc
new file mode 100644
index 0000000..f1d19a1
--- /dev/null
+++ b/base/bind_unittest.cc
@@ -0,0 +1,1496 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/bind.h"
+
+#include <memory>
+#include <utility>
+#include <vector>
+
+#include "base/callback.h"
+#include "base/macros.h"
+#include "base/memory/ptr_util.h"
+#include "base/memory/ref_counted.h"
+#include "base/memory/weak_ptr.h"
+#include "base/test/bind_test_util.h"
+#include "base/test/gtest_util.h"
+#include "build/build_config.h"
+#include "testing/gmock/include/gmock/gmock.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+using ::testing::_;
+using ::testing::Mock;
+using ::testing::ByMove;
+using ::testing::Return;
+using ::testing::StrictMock;
+
+namespace base {
+namespace {
+
+class IncompleteType;
+
+class NoRef {
+ public:
+  NoRef() = default;
+
+  MOCK_METHOD0(VoidMethod0, void());
+  MOCK_CONST_METHOD0(VoidConstMethod0, void());
+
+  MOCK_METHOD0(IntMethod0, int());
+  MOCK_CONST_METHOD0(IntConstMethod0, int());
+
+  MOCK_METHOD1(VoidMethodWithIntArg, void(int));
+  MOCK_METHOD0(UniquePtrMethod0, std::unique_ptr<int>());
+
+ private:
+  // Particularly important in this test to ensure no copies are made.
+  DISALLOW_COPY_AND_ASSIGN(NoRef);
+};
+
+class HasRef : public NoRef {
+ public:
+  HasRef() = default;
+
+  MOCK_CONST_METHOD0(AddRef, void());
+  MOCK_CONST_METHOD0(Release, bool());
+
+ private:
+  // Particularly important in this test to ensure no copies are made.
+  DISALLOW_COPY_AND_ASSIGN(HasRef);
+};
+
+class HasRefPrivateDtor : public HasRef {
+ private:
+  ~HasRefPrivateDtor() = default;
+};
+
+static const int kParentValue = 1;
+static const int kChildValue = 2;
+
+class Parent {
+ public:
+  void AddRef() const {}
+  void Release() const {}
+  virtual void VirtualSet() { value = kParentValue; }
+  void NonVirtualSet() { value = kParentValue; }
+  int value;
+};
+
+class Child : public Parent {
+ public:
+  void VirtualSet() override { value = kChildValue; }
+  void NonVirtualSet() { value = kChildValue; }
+};
+
+class NoRefParent {
+ public:
+  virtual void VirtualSet() { value = kParentValue; }
+  void NonVirtualSet() { value = kParentValue; }
+  int value;
+};
+
+class NoRefChild : public NoRefParent {
+  void VirtualSet() override { value = kChildValue; }
+  void NonVirtualSet() { value = kChildValue; }
+};
+
+// Used for probing the number of copies and moves that occur if a type must be
+// coerced during argument forwarding in the Run() methods.
+struct DerivedCopyMoveCounter {
+  DerivedCopyMoveCounter(int* copies,
+                         int* assigns,
+                         int* move_constructs,
+                         int* move_assigns)
+      : copies_(copies),
+        assigns_(assigns),
+        move_constructs_(move_constructs),
+        move_assigns_(move_assigns) {}
+  int* copies_;
+  int* assigns_;
+  int* move_constructs_;
+  int* move_assigns_;
+};
+
+// Used for probing the number of copies and moves in an argument.
+class CopyMoveCounter {
+ public:
+  CopyMoveCounter(int* copies,
+                  int* assigns,
+                  int* move_constructs,
+                  int* move_assigns)
+      : copies_(copies),
+        assigns_(assigns),
+        move_constructs_(move_constructs),
+        move_assigns_(move_assigns) {}
+
+  CopyMoveCounter(const CopyMoveCounter& other)
+      : copies_(other.copies_),
+        assigns_(other.assigns_),
+        move_constructs_(other.move_constructs_),
+        move_assigns_(other.move_assigns_) {
+    (*copies_)++;
+  }
+
+  CopyMoveCounter(CopyMoveCounter&& other)
+      : copies_(other.copies_),
+        assigns_(other.assigns_),
+        move_constructs_(other.move_constructs_),
+        move_assigns_(other.move_assigns_) {
+    (*move_constructs_)++;
+  }
+
+  // Probing for copies from coercion.
+  explicit CopyMoveCounter(const DerivedCopyMoveCounter& other)
+      : copies_(other.copies_),
+        assigns_(other.assigns_),
+        move_constructs_(other.move_constructs_),
+        move_assigns_(other.move_assigns_) {
+    (*copies_)++;
+  }
+
+  // Probing for moves from coercion.
+  explicit CopyMoveCounter(DerivedCopyMoveCounter&& other)
+      : copies_(other.copies_),
+        assigns_(other.assigns_),
+        move_constructs_(other.move_constructs_),
+        move_assigns_(other.move_assigns_) {
+    (*move_constructs_)++;
+  }
+
+  const CopyMoveCounter& operator=(const CopyMoveCounter& rhs) {
+    copies_ = rhs.copies_;
+    assigns_ = rhs.assigns_;
+    move_constructs_ = rhs.move_constructs_;
+    move_assigns_ = rhs.move_assigns_;
+
+    (*assigns_)++;
+
+    return *this;
+  }
+
+  const CopyMoveCounter& operator=(CopyMoveCounter&& rhs) {
+    copies_ = rhs.copies_;
+    assigns_ = rhs.assigns_;
+    move_constructs_ = rhs.move_constructs_;
+    move_assigns_ = rhs.move_assigns_;
+
+    (*move_assigns_)++;
+
+    return *this;
+  }
+
+  int copies() const {
+    return *copies_;
+  }
+
+ private:
+  int* copies_;
+  int* assigns_;
+  int* move_constructs_;
+  int* move_assigns_;
+};
+
+// Used for probing the number of copies in an argument. The instance is a
+// copyable and non-movable type.
+class CopyCounter {
+ public:
+  CopyCounter(int* copies, int* assigns)
+      : counter_(copies, assigns, nullptr, nullptr) {}
+  CopyCounter(const CopyCounter& other) = default;
+  CopyCounter& operator=(const CopyCounter& other) = default;
+
+  explicit CopyCounter(const DerivedCopyMoveCounter& other) : counter_(other) {}
+
+  int copies() const { return counter_.copies(); }
+
+ private:
+  CopyMoveCounter counter_;
+};
+
+// Used for probing the number of moves in an argument. The instance is a
+// non-copyable and movable type.
+class MoveCounter {
+ public:
+  MoveCounter(int* move_constructs, int* move_assigns)
+      : counter_(nullptr, nullptr, move_constructs, move_assigns) {}
+  MoveCounter(MoveCounter&& other) : counter_(std::move(other.counter_)) {}
+  MoveCounter& operator=(MoveCounter&& other) {
+    counter_ = std::move(other.counter_);
+    return *this;
+  }
+
+  explicit MoveCounter(DerivedCopyMoveCounter&& other)
+      : counter_(std::move(other)) {}
+
+ private:
+  CopyMoveCounter counter_;
+};
+
+class DeleteCounter {
+ public:
+  explicit DeleteCounter(int* deletes)
+      : deletes_(deletes) {
+  }
+
+  ~DeleteCounter() {
+    (*deletes_)++;
+  }
+
+  void VoidMethod0() {}
+
+ private:
+  int* deletes_;
+};
+
+template <typename T>
+T PassThru(T scoper) {
+  return scoper;
+}
+
+// Some test functions that we can Bind to.
+template <typename T>
+T PolymorphicIdentity(T t) {
+  return t;
+}
+
+template <typename... Ts>
+struct VoidPolymorphic {
+  static void Run(Ts... t) {}
+};
+
+int Identity(int n) {
+  return n;
+}
+
+int ArrayGet(const int array[], int n) {
+  return array[n];
+}
+
+int Sum(int a, int b, int c, int d, int e, int f) {
+  return a + b + c + d + e + f;
+}
+
+const char* CStringIdentity(const char* s) {
+  return s;
+}
+
+int GetCopies(const CopyMoveCounter& counter) {
+  return counter.copies();
+}
+
+int UnwrapNoRefParent(NoRefParent p) {
+  return p.value;
+}
+
+int UnwrapNoRefParentPtr(NoRefParent* p) {
+  return p->value;
+}
+
+int UnwrapNoRefParentConstRef(const NoRefParent& p) {
+  return p.value;
+}
+
+void RefArgSet(int &n) {
+  n = 2;
+}
+
+void PtrArgSet(int *n) {
+  *n = 2;
+}
+
+int FunctionWithWeakFirstParam(WeakPtr<NoRef> o, int n) {
+  return n;
+}
+
+int FunctionWithScopedRefptrFirstParam(const scoped_refptr<HasRef>& o, int n) {
+  return n;
+}
+
+void TakesACallback(const Closure& callback) {
+  callback.Run();
+}
+
+int Noexcept() noexcept {
+  return 42;
+}
+
+class BindTest : public ::testing::Test {
+ public:
+  BindTest() {
+    const_has_ref_ptr_ = &has_ref_;
+    const_no_ref_ptr_ = &no_ref_;
+    static_func_mock_ptr = &static_func_mock_;
+  }
+
+  ~BindTest() override = default;
+
+  static void VoidFunc0() {
+    static_func_mock_ptr->VoidMethod0();
+  }
+
+  static int IntFunc0() { return static_func_mock_ptr->IntMethod0(); }
+  int NoexceptMethod() noexcept { return 42; }
+  int ConstNoexceptMethod() const noexcept { return 42; }
+
+ protected:
+  StrictMock<NoRef> no_ref_;
+  StrictMock<HasRef> has_ref_;
+  const HasRef* const_has_ref_ptr_;
+  const NoRef* const_no_ref_ptr_;
+  StrictMock<NoRef> static_func_mock_;
+
+  // Used by the static functions to perform expectations.
+  static StrictMock<NoRef>* static_func_mock_ptr;
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(BindTest);
+};
+
+StrictMock<NoRef>* BindTest::static_func_mock_ptr;
+StrictMock<NoRef>* g_func_mock_ptr;
+
+void VoidFunc0() {
+  g_func_mock_ptr->VoidMethod0();
+}
+
+int IntFunc0() {
+  return g_func_mock_ptr->IntMethod0();
+}
+
+TEST_F(BindTest, BasicTest) {
+  Callback<int(int, int, int)> cb = Bind(&Sum, 32, 16, 8);
+  EXPECT_EQ(92, cb.Run(13, 12, 11));
+
+  Callback<int(int, int, int, int, int, int)> c1 = Bind(&Sum);
+  EXPECT_EQ(69, c1.Run(14, 13, 12, 11, 10, 9));
+
+  Callback<int(int, int, int)> c2 = Bind(c1, 32, 16, 8);
+  EXPECT_EQ(86, c2.Run(11, 10, 9));
+
+  Callback<int()> c3 = Bind(c2, 4, 2, 1);
+  EXPECT_EQ(63, c3.Run());
+}
+
+// Test that currying the rvalue result of another Bind() works correctly.
+//   - rvalue should be usable as argument to Bind().
+//   - multiple runs of resulting Callback remain valid.
+TEST_F(BindTest, CurryingRvalueResultOfBind) {
+  int n = 0;
+  RepeatingClosure cb = BindRepeating(&TakesACallback,
+                                      BindRepeating(&PtrArgSet, &n));
+
+  // If we implement Bind() such that the return value has auto_ptr-like
+  // semantics, the second call here will fail because ownership of
+  // the internal BindState<> would have been transfered to a *temporary*
+  // constructon of a Callback object on the first call.
+  cb.Run();
+  EXPECT_EQ(2, n);
+
+  n = 0;
+  cb.Run();
+  EXPECT_EQ(2, n);
+}
+
+TEST_F(BindTest, RepeatingCallbackBasicTest) {
+  RepeatingCallback<int(int)> c0 = BindRepeating(&Sum, 1, 2, 4, 8, 16);
+
+  // RepeatingCallback can run via a lvalue-reference.
+  EXPECT_EQ(63, c0.Run(32));
+
+  // It is valid to call a RepeatingCallback more than once.
+  EXPECT_EQ(54, c0.Run(23));
+
+  // BindRepeating can handle a RepeatingCallback as the target functor.
+  RepeatingCallback<int()> c1 = BindRepeating(c0, 11);
+
+  // RepeatingCallback can run via a rvalue-reference.
+  EXPECT_EQ(42, std::move(c1).Run());
+
+  // BindRepeating can handle a rvalue-reference of RepeatingCallback.
+  EXPECT_EQ(32, BindRepeating(std::move(c0), 1).Run());
+}
+
+TEST_F(BindTest, OnceCallbackBasicTest) {
+  OnceCallback<int(int)> c0 = BindOnce(&Sum, 1, 2, 4, 8, 16);
+
+  // OnceCallback can run via a rvalue-reference.
+  EXPECT_EQ(63, std::move(c0).Run(32));
+
+  // After running via the rvalue-reference, the value of the OnceCallback
+  // is undefined. The implementation simply clears the instance after the
+  // invocation.
+  EXPECT_TRUE(c0.is_null());
+
+  c0 = BindOnce(&Sum, 2, 3, 5, 7, 11);
+
+  // BindOnce can handle a rvalue-reference of OnceCallback as the target
+  // functor.
+  OnceCallback<int()> c1 = BindOnce(std::move(c0), 13);
+  EXPECT_EQ(41, std::move(c1).Run());
+
+  RepeatingCallback<int(int)> c2 = BindRepeating(&Sum, 2, 3, 5, 7, 11);
+  EXPECT_EQ(41, BindOnce(c2, 13).Run());
+}
+
+// IgnoreResult adapter test.
+//   - Function with return value.
+//   - Method with return value.
+//   - Const Method with return.
+//   - Method with return value bound to WeakPtr<>.
+//   - Const Method with return bound to WeakPtr<>.
+TEST_F(BindTest, IgnoreResultForRepeating) {
+  EXPECT_CALL(static_func_mock_, IntMethod0()).WillOnce(Return(1337));
+  EXPECT_CALL(has_ref_, AddRef()).Times(2);
+  EXPECT_CALL(has_ref_, Release()).Times(2);
+  EXPECT_CALL(has_ref_, IntMethod0()).WillOnce(Return(10));
+  EXPECT_CALL(has_ref_, IntConstMethod0()).WillOnce(Return(11));
+  EXPECT_CALL(no_ref_, IntMethod0()).WillOnce(Return(12));
+  EXPECT_CALL(no_ref_, IntConstMethod0()).WillOnce(Return(13));
+
+  RepeatingClosure normal_func_cb = BindRepeating(IgnoreResult(&IntFunc0));
+  normal_func_cb.Run();
+
+  RepeatingClosure non_void_method_cb =
+      BindRepeating(IgnoreResult(&HasRef::IntMethod0), &has_ref_);
+  non_void_method_cb.Run();
+
+  RepeatingClosure non_void_const_method_cb =
+      BindRepeating(IgnoreResult(&HasRef::IntConstMethod0), &has_ref_);
+  non_void_const_method_cb.Run();
+
+  WeakPtrFactory<NoRef> weak_factory(&no_ref_);
+  WeakPtrFactory<const NoRef> const_weak_factory(const_no_ref_ptr_);
+
+  RepeatingClosure non_void_weak_method_cb  =
+      BindRepeating(IgnoreResult(&NoRef::IntMethod0),
+                    weak_factory.GetWeakPtr());
+  non_void_weak_method_cb.Run();
+
+  RepeatingClosure non_void_weak_const_method_cb =
+      BindRepeating(IgnoreResult(&NoRef::IntConstMethod0),
+                    weak_factory.GetWeakPtr());
+  non_void_weak_const_method_cb.Run();
+
+  weak_factory.InvalidateWeakPtrs();
+  non_void_weak_const_method_cb.Run();
+  non_void_weak_method_cb.Run();
+}
+
+TEST_F(BindTest, IgnoreResultForOnce) {
+  EXPECT_CALL(static_func_mock_, IntMethod0()).WillOnce(Return(1337));
+  EXPECT_CALL(has_ref_, AddRef()).Times(2);
+  EXPECT_CALL(has_ref_, Release()).Times(2);
+  EXPECT_CALL(has_ref_, IntMethod0()).WillOnce(Return(10));
+  EXPECT_CALL(has_ref_, IntConstMethod0()).WillOnce(Return(11));
+
+  OnceClosure normal_func_cb = BindOnce(IgnoreResult(&IntFunc0));
+  std::move(normal_func_cb).Run();
+
+  OnceClosure non_void_method_cb =
+      BindOnce(IgnoreResult(&HasRef::IntMethod0), &has_ref_);
+  std::move(non_void_method_cb).Run();
+
+  OnceClosure non_void_const_method_cb =
+      BindOnce(IgnoreResult(&HasRef::IntConstMethod0), &has_ref_);
+  std::move(non_void_const_method_cb).Run();
+
+  WeakPtrFactory<NoRef> weak_factory(&no_ref_);
+  WeakPtrFactory<const NoRef> const_weak_factory(const_no_ref_ptr_);
+
+  OnceClosure non_void_weak_method_cb  =
+      BindOnce(IgnoreResult(&NoRef::IntMethod0),
+                  weak_factory.GetWeakPtr());
+  OnceClosure non_void_weak_const_method_cb =
+      BindOnce(IgnoreResult(&NoRef::IntConstMethod0),
+                  weak_factory.GetWeakPtr());
+
+  weak_factory.InvalidateWeakPtrs();
+  std::move(non_void_weak_const_method_cb).Run();
+  std::move(non_void_weak_method_cb).Run();
+}
+
+// Functions that take reference parameters.
+//  - Forced reference parameter type still stores a copy.
+//  - Forced const reference parameter type still stores a copy.
+TEST_F(BindTest, ReferenceArgumentBindingForRepeating) {
+  int n = 1;
+  int& ref_n = n;
+  const int& const_ref_n = n;
+
+  RepeatingCallback<int()> ref_copies_cb = BindRepeating(&Identity, ref_n);
+  EXPECT_EQ(n, ref_copies_cb.Run());
+  n++;
+  EXPECT_EQ(n - 1, ref_copies_cb.Run());
+
+  RepeatingCallback<int()> const_ref_copies_cb =
+      BindRepeating(&Identity, const_ref_n);
+  EXPECT_EQ(n, const_ref_copies_cb.Run());
+  n++;
+  EXPECT_EQ(n - 1, const_ref_copies_cb.Run());
+}
+
+TEST_F(BindTest, ReferenceArgumentBindingForOnce) {
+  int n = 1;
+  int& ref_n = n;
+  const int& const_ref_n = n;
+
+  OnceCallback<int()> ref_copies_cb = BindOnce(&Identity, ref_n);
+  n++;
+  EXPECT_EQ(n - 1, std::move(ref_copies_cb).Run());
+
+  OnceCallback<int()> const_ref_copies_cb =
+      BindOnce(&Identity, const_ref_n);
+  n++;
+  EXPECT_EQ(n - 1, std::move(const_ref_copies_cb).Run());
+}
+
+// Check that we can pass in arrays and have them be stored as a pointer.
+//  - Array of values stores a pointer.
+//  - Array of const values stores a pointer.
+TEST_F(BindTest, ArrayArgumentBindingForRepeating) {
+  int array[4] = {1, 1, 1, 1};
+  const int (*const_array_ptr)[4] = &array;
+
+  RepeatingCallback<int()> array_cb = BindRepeating(&ArrayGet, array, 1);
+  EXPECT_EQ(1, array_cb.Run());
+
+  RepeatingCallback<int()> const_array_cb =
+      BindRepeating(&ArrayGet, *const_array_ptr, 1);
+  EXPECT_EQ(1, const_array_cb.Run());
+
+  array[1] = 3;
+  EXPECT_EQ(3, array_cb.Run());
+  EXPECT_EQ(3, const_array_cb.Run());
+}
+
+TEST_F(BindTest, ArrayArgumentBindingForOnce) {
+  int array[4] = {1, 1, 1, 1};
+  const int (*const_array_ptr)[4] = &array;
+
+  OnceCallback<int()> array_cb = BindOnce(&ArrayGet, array, 1);
+  OnceCallback<int()> const_array_cb =
+      BindOnce(&ArrayGet, *const_array_ptr, 1);
+
+  array[1] = 3;
+  EXPECT_EQ(3, std::move(array_cb).Run());
+  EXPECT_EQ(3, std::move(const_array_cb).Run());
+}
+
+// WeakPtr() support.
+//   - Method bound to WeakPtr<> to non-const object.
+//   - Const method bound to WeakPtr<> to non-const object.
+//   - Const method bound to WeakPtr<> to const object.
+//   - Normal Function with WeakPtr<> as P1 can have return type and is
+//     not canceled.
+TEST_F(BindTest, WeakPtrForRepeating) {
+  EXPECT_CALL(no_ref_, VoidMethod0());
+  EXPECT_CALL(no_ref_, VoidConstMethod0()).Times(2);
+
+  WeakPtrFactory<NoRef> weak_factory(&no_ref_);
+  WeakPtrFactory<const NoRef> const_weak_factory(const_no_ref_ptr_);
+
+  RepeatingClosure method_cb =
+      BindRepeating(&NoRef::VoidMethod0, weak_factory.GetWeakPtr());
+  method_cb.Run();
+
+  RepeatingClosure const_method_cb =
+      BindRepeating(&NoRef::VoidConstMethod0, const_weak_factory.GetWeakPtr());
+  const_method_cb.Run();
+
+  RepeatingClosure const_method_const_ptr_cb =
+      BindRepeating(&NoRef::VoidConstMethod0, const_weak_factory.GetWeakPtr());
+  const_method_const_ptr_cb.Run();
+
+  RepeatingCallback<int(int)> normal_func_cb =
+      BindRepeating(&FunctionWithWeakFirstParam, weak_factory.GetWeakPtr());
+  EXPECT_EQ(1, normal_func_cb.Run(1));
+
+  weak_factory.InvalidateWeakPtrs();
+  const_weak_factory.InvalidateWeakPtrs();
+
+  method_cb.Run();
+  const_method_cb.Run();
+  const_method_const_ptr_cb.Run();
+
+  // Still runs even after the pointers are invalidated.
+  EXPECT_EQ(2, normal_func_cb.Run(2));
+}
+
+TEST_F(BindTest, WeakPtrForOnce) {
+  WeakPtrFactory<NoRef> weak_factory(&no_ref_);
+  WeakPtrFactory<const NoRef> const_weak_factory(const_no_ref_ptr_);
+
+  OnceClosure method_cb =
+      BindOnce(&NoRef::VoidMethod0, weak_factory.GetWeakPtr());
+  OnceClosure const_method_cb =
+      BindOnce(&NoRef::VoidConstMethod0, const_weak_factory.GetWeakPtr());
+  OnceClosure const_method_const_ptr_cb =
+      BindOnce(&NoRef::VoidConstMethod0, const_weak_factory.GetWeakPtr());
+  Callback<int(int)> normal_func_cb =
+      Bind(&FunctionWithWeakFirstParam, weak_factory.GetWeakPtr());
+
+  weak_factory.InvalidateWeakPtrs();
+  const_weak_factory.InvalidateWeakPtrs();
+
+  std::move(method_cb).Run();
+  std::move(const_method_cb).Run();
+  std::move(const_method_const_ptr_cb).Run();
+
+  // Still runs even after the pointers are invalidated.
+  EXPECT_EQ(2, std::move(normal_func_cb).Run(2));
+}
+
+// ConstRef() wrapper support.
+//   - Binding w/o ConstRef takes a copy.
+//   - Binding a ConstRef takes a reference.
+//   - Binding ConstRef to a function ConstRef does not copy on invoke.
+TEST_F(BindTest, ConstRefForRepeating) {
+  int n = 1;
+
+  RepeatingCallback<int()> copy_cb = BindRepeating(&Identity, n);
+  RepeatingCallback<int()> const_ref_cb = BindRepeating(&Identity, ConstRef(n));
+  EXPECT_EQ(n, copy_cb.Run());
+  EXPECT_EQ(n, const_ref_cb.Run());
+  n++;
+  EXPECT_EQ(n - 1, copy_cb.Run());
+  EXPECT_EQ(n, const_ref_cb.Run());
+
+  int copies = 0;
+  int assigns = 0;
+  int move_constructs = 0;
+  int move_assigns = 0;
+  CopyMoveCounter counter(&copies, &assigns, &move_constructs, &move_assigns);
+  RepeatingCallback<int()> all_const_ref_cb =
+      BindRepeating(&GetCopies, ConstRef(counter));
+  EXPECT_EQ(0, all_const_ref_cb.Run());
+  EXPECT_EQ(0, copies);
+  EXPECT_EQ(0, assigns);
+  EXPECT_EQ(0, move_constructs);
+  EXPECT_EQ(0, move_assigns);
+}
+
+TEST_F(BindTest, ConstRefForOnce) {
+  int n = 1;
+
+  OnceCallback<int()> copy_cb = BindOnce(&Identity, n);
+  OnceCallback<int()> const_ref_cb = BindOnce(&Identity, ConstRef(n));
+  n++;
+  EXPECT_EQ(n - 1, std::move(copy_cb).Run());
+  EXPECT_EQ(n, std::move(const_ref_cb).Run());
+
+  int copies = 0;
+  int assigns = 0;
+  int move_constructs = 0;
+  int move_assigns = 0;
+  CopyMoveCounter counter(&copies, &assigns, &move_constructs, &move_assigns);
+  OnceCallback<int()> all_const_ref_cb =
+      BindOnce(&GetCopies, ConstRef(counter));
+  EXPECT_EQ(0, std::move(all_const_ref_cb).Run());
+  EXPECT_EQ(0, copies);
+  EXPECT_EQ(0, assigns);
+  EXPECT_EQ(0, move_constructs);
+  EXPECT_EQ(0, move_assigns);
+}
+
+// Test Owned() support.
+TEST_F(BindTest, OwnedForRepeating) {
+  int deletes = 0;
+  DeleteCounter* counter = new DeleteCounter(&deletes);
+
+  // If we don't capture, delete happens on Callback destruction/reset.
+  // return the same value.
+  RepeatingCallback<DeleteCounter*()> no_capture_cb =
+      BindRepeating(&PolymorphicIdentity<DeleteCounter*>, Owned(counter));
+  ASSERT_EQ(counter, no_capture_cb.Run());
+  ASSERT_EQ(counter, no_capture_cb.Run());
+  EXPECT_EQ(0, deletes);
+  no_capture_cb.Reset();  // This should trigger a delete.
+  EXPECT_EQ(1, deletes);
+
+  deletes = 0;
+  counter = new DeleteCounter(&deletes);
+  RepeatingClosure own_object_cb =
+      BindRepeating(&DeleteCounter::VoidMethod0, Owned(counter));
+  own_object_cb.Run();
+  EXPECT_EQ(0, deletes);
+  own_object_cb.Reset();
+  EXPECT_EQ(1, deletes);
+}
+
+TEST_F(BindTest, OwnedForOnce) {
+  int deletes = 0;
+  DeleteCounter* counter = new DeleteCounter(&deletes);
+
+  // If we don't capture, delete happens on Callback destruction/reset.
+  // return the same value.
+  OnceCallback<DeleteCounter*()> no_capture_cb =
+      BindOnce(&PolymorphicIdentity<DeleteCounter*>, Owned(counter));
+  EXPECT_EQ(0, deletes);
+  no_capture_cb.Reset();  // This should trigger a delete.
+  EXPECT_EQ(1, deletes);
+
+  deletes = 0;
+  counter = new DeleteCounter(&deletes);
+  OnceClosure own_object_cb =
+      BindOnce(&DeleteCounter::VoidMethod0, Owned(counter));
+  EXPECT_EQ(0, deletes);
+  own_object_cb.Reset();
+  EXPECT_EQ(1, deletes);
+}
+
+template <typename T>
+class BindVariantsTest : public ::testing::Test {
+};
+
+struct RepeatingTestConfig {
+  template <typename Signature>
+  using CallbackType = RepeatingCallback<Signature>;
+  using ClosureType = RepeatingClosure;
+
+  template <typename F, typename... Args>
+  static CallbackType<MakeUnboundRunType<F, Args...>>
+  Bind(F&& f, Args&&... args) {
+    return BindRepeating(std::forward<F>(f), std::forward<Args>(args)...);
+  }
+};
+
+struct OnceTestConfig {
+  template <typename Signature>
+  using CallbackType = OnceCallback<Signature>;
+  using ClosureType = OnceClosure;
+
+  template <typename F, typename... Args>
+  static CallbackType<MakeUnboundRunType<F, Args...>>
+  Bind(F&& f, Args&&... args) {
+    return BindOnce(std::forward<F>(f), std::forward<Args>(args)...);
+  }
+};
+
+using BindVariantsTestConfig = ::testing::Types<
+  RepeatingTestConfig, OnceTestConfig>;
+TYPED_TEST_CASE(BindVariantsTest, BindVariantsTestConfig);
+
+template <typename TypeParam, typename Signature>
+using CallbackType = typename TypeParam::template CallbackType<Signature>;
+
+// Function type support.
+//   - Normal function.
+//   - Normal function bound with non-refcounted first argument.
+//   - Method bound to non-const object.
+//   - Method bound to scoped_refptr.
+//   - Const method bound to non-const object.
+//   - Const method bound to const object.
+//   - Derived classes can be used with pointers to non-virtual base functions.
+//   - Derived classes can be used with pointers to virtual base functions (and
+//     preserve virtual dispatch).
+TYPED_TEST(BindVariantsTest, FunctionTypeSupport) {
+  using ClosureType = typename TypeParam::ClosureType;
+
+  StrictMock<HasRef> has_ref;
+  StrictMock<NoRef> no_ref;
+  StrictMock<NoRef> static_func_mock;
+  const HasRef* const_has_ref_ptr = &has_ref;
+  g_func_mock_ptr = &static_func_mock;
+
+  EXPECT_CALL(static_func_mock, VoidMethod0());
+  EXPECT_CALL(has_ref, AddRef()).Times(4);
+  EXPECT_CALL(has_ref, Release()).Times(4);
+  EXPECT_CALL(has_ref, VoidMethod0()).Times(2);
+  EXPECT_CALL(has_ref, VoidConstMethod0()).Times(2);
+
+  ClosureType normal_cb = TypeParam::Bind(&VoidFunc0);
+  CallbackType<TypeParam, NoRef*()> normal_non_refcounted_cb =
+      TypeParam::Bind(&PolymorphicIdentity<NoRef*>, &no_ref);
+  std::move(normal_cb).Run();
+  EXPECT_EQ(&no_ref, std::move(normal_non_refcounted_cb).Run());
+
+  ClosureType method_cb = TypeParam::Bind(&HasRef::VoidMethod0, &has_ref);
+  ClosureType method_refptr_cb =
+      TypeParam::Bind(&HasRef::VoidMethod0, WrapRefCounted(&has_ref));
+  ClosureType const_method_nonconst_obj_cb =
+      TypeParam::Bind(&HasRef::VoidConstMethod0, &has_ref);
+  ClosureType const_method_const_obj_cb =
+      TypeParam::Bind(&HasRef::VoidConstMethod0, const_has_ref_ptr);
+  std::move(method_cb).Run();
+  std::move(method_refptr_cb).Run();
+  std::move(const_method_nonconst_obj_cb).Run();
+  std::move(const_method_const_obj_cb).Run();
+
+  Child child;
+  child.value = 0;
+  ClosureType virtual_set_cb = TypeParam::Bind(&Parent::VirtualSet, &child);
+  std::move(virtual_set_cb).Run();
+  EXPECT_EQ(kChildValue, child.value);
+
+  child.value = 0;
+  ClosureType non_virtual_set_cb =
+      TypeParam::Bind(&Parent::NonVirtualSet, &child);
+  std::move(non_virtual_set_cb).Run();
+  EXPECT_EQ(kParentValue, child.value);
+}
+
+// Return value support.
+//   - Function with return value.
+//   - Method with return value.
+//   - Const method with return value.
+//   - Move-only return value.
+TYPED_TEST(BindVariantsTest, ReturnValues) {
+  StrictMock<NoRef> static_func_mock;
+  StrictMock<HasRef> has_ref;
+  g_func_mock_ptr = &static_func_mock;
+  const HasRef* const_has_ref_ptr = &has_ref;
+
+  EXPECT_CALL(static_func_mock, IntMethod0()).WillOnce(Return(1337));
+  EXPECT_CALL(has_ref, AddRef()).Times(4);
+  EXPECT_CALL(has_ref, Release()).Times(4);
+  EXPECT_CALL(has_ref, IntMethod0()).WillOnce(Return(31337));
+  EXPECT_CALL(has_ref, IntConstMethod0())
+      .WillOnce(Return(41337))
+      .WillOnce(Return(51337));
+  EXPECT_CALL(has_ref, UniquePtrMethod0())
+      .WillOnce(Return(ByMove(std::make_unique<int>(42))));
+
+  CallbackType<TypeParam, int()> normal_cb = TypeParam::Bind(&IntFunc0);
+  CallbackType<TypeParam, int()> method_cb =
+      TypeParam::Bind(&HasRef::IntMethod0, &has_ref);
+  CallbackType<TypeParam, int()> const_method_nonconst_obj_cb =
+      TypeParam::Bind(&HasRef::IntConstMethod0, &has_ref);
+  CallbackType<TypeParam, int()> const_method_const_obj_cb =
+      TypeParam::Bind(&HasRef::IntConstMethod0, const_has_ref_ptr);
+  CallbackType<TypeParam, std::unique_ptr<int>()> move_only_rv_cb =
+      TypeParam::Bind(&HasRef::UniquePtrMethod0, &has_ref);
+  EXPECT_EQ(1337, std::move(normal_cb).Run());
+  EXPECT_EQ(31337, std::move(method_cb).Run());
+  EXPECT_EQ(41337, std::move(const_method_nonconst_obj_cb).Run());
+  EXPECT_EQ(51337, std::move(const_method_const_obj_cb).Run());
+  EXPECT_EQ(42, *std::move(move_only_rv_cb).Run());
+}
+
+// Argument binding tests.
+//   - Argument binding to primitive.
+//   - Argument binding to primitive pointer.
+//   - Argument binding to a literal integer.
+//   - Argument binding to a literal string.
+//   - Argument binding with template function.
+//   - Argument binding to an object.
+//   - Argument binding to pointer to incomplete type.
+//   - Argument gets type converted.
+//   - Pointer argument gets converted.
+//   - Const Reference forces conversion.
+TYPED_TEST(BindVariantsTest, ArgumentBinding) {
+  int n = 2;
+
+  EXPECT_EQ(n, TypeParam::Bind(&Identity, n).Run());
+  EXPECT_EQ(&n, TypeParam::Bind(&PolymorphicIdentity<int*>, &n).Run());
+  EXPECT_EQ(3, TypeParam::Bind(&Identity, 3).Run());
+  EXPECT_STREQ("hi", TypeParam::Bind(&CStringIdentity, "hi").Run());
+  EXPECT_EQ(4, TypeParam::Bind(&PolymorphicIdentity<int>, 4).Run());
+
+  NoRefParent p;
+  p.value = 5;
+  EXPECT_EQ(5, TypeParam::Bind(&UnwrapNoRefParent, p).Run());
+
+  IncompleteType* incomplete_ptr = reinterpret_cast<IncompleteType*>(123);
+  EXPECT_EQ(incomplete_ptr,
+            TypeParam::Bind(&PolymorphicIdentity<IncompleteType*>,
+                            incomplete_ptr).Run());
+
+  NoRefChild c;
+  c.value = 6;
+  EXPECT_EQ(6, TypeParam::Bind(&UnwrapNoRefParent, c).Run());
+
+  c.value = 7;
+  EXPECT_EQ(7, TypeParam::Bind(&UnwrapNoRefParentPtr, &c).Run());
+
+  c.value = 8;
+  EXPECT_EQ(8, TypeParam::Bind(&UnwrapNoRefParentConstRef, c).Run());
+}
+
+// Unbound argument type support tests.
+//   - Unbound value.
+//   - Unbound pointer.
+//   - Unbound reference.
+//   - Unbound const reference.
+//   - Unbound unsized array.
+//   - Unbound sized array.
+//   - Unbound array-of-arrays.
+TYPED_TEST(BindVariantsTest, UnboundArgumentTypeSupport) {
+  CallbackType<TypeParam, void(int)> unbound_value_cb =
+      TypeParam::Bind(&VoidPolymorphic<int>::Run);
+  CallbackType<TypeParam, void(int*)> unbound_pointer_cb =
+      TypeParam::Bind(&VoidPolymorphic<int*>::Run);
+  CallbackType<TypeParam, void(int&)> unbound_ref_cb =
+      TypeParam::Bind(&VoidPolymorphic<int&>::Run);
+  CallbackType<TypeParam, void(const int&)> unbound_const_ref_cb =
+      TypeParam::Bind(&VoidPolymorphic<const int&>::Run);
+  CallbackType<TypeParam, void(int[])> unbound_unsized_array_cb =
+      TypeParam::Bind(&VoidPolymorphic<int[]>::Run);
+  CallbackType<TypeParam, void(int[2])> unbound_sized_array_cb =
+      TypeParam::Bind(&VoidPolymorphic<int[2]>::Run);
+  CallbackType<TypeParam, void(int[][2])> unbound_array_of_arrays_cb =
+      TypeParam::Bind(&VoidPolymorphic<int[][2]>::Run);
+  CallbackType<TypeParam, void(int&)> unbound_ref_with_bound_arg =
+      TypeParam::Bind(&VoidPolymorphic<int, int&>::Run, 1);
+}
+
+// Function with unbound reference parameter.
+//   - Original parameter is modified by callback.
+TYPED_TEST(BindVariantsTest, UnboundReferenceSupport) {
+  int n = 0;
+  CallbackType<TypeParam, void(int&)> unbound_ref_cb =
+      TypeParam::Bind(&RefArgSet);
+  std::move(unbound_ref_cb).Run(n);
+  EXPECT_EQ(2, n);
+}
+
+// Unretained() wrapper support.
+//   - Method bound to Unretained() non-const object.
+//   - Const method bound to Unretained() non-const object.
+//   - Const method bound to Unretained() const object.
+TYPED_TEST(BindVariantsTest, Unretained) {
+  StrictMock<NoRef> no_ref;
+  const NoRef* const_no_ref_ptr = &no_ref;
+
+  EXPECT_CALL(no_ref, VoidMethod0());
+  EXPECT_CALL(no_ref, VoidConstMethod0()).Times(2);
+
+  TypeParam::Bind(&NoRef::VoidMethod0, Unretained(&no_ref)).Run();
+  TypeParam::Bind(&NoRef::VoidConstMethod0, Unretained(&no_ref)).Run();
+  TypeParam::Bind(&NoRef::VoidConstMethod0, Unretained(const_no_ref_ptr)).Run();
+}
+
+TYPED_TEST(BindVariantsTest, ScopedRefptr) {
+  StrictMock<HasRef> has_ref;
+  EXPECT_CALL(has_ref, AddRef()).Times(1);
+  EXPECT_CALL(has_ref, Release()).Times(1);
+
+  const scoped_refptr<HasRef> refptr(&has_ref);
+  CallbackType<TypeParam, int()> scoped_refptr_const_ref_cb =
+      TypeParam::Bind(&FunctionWithScopedRefptrFirstParam,
+                      base::ConstRef(refptr), 1);
+  EXPECT_EQ(1, std::move(scoped_refptr_const_ref_cb).Run());
+}
+
+TYPED_TEST(BindVariantsTest, UniquePtrReceiver) {
+  std::unique_ptr<StrictMock<NoRef>> no_ref(new StrictMock<NoRef>);
+  EXPECT_CALL(*no_ref, VoidMethod0()).Times(1);
+  TypeParam::Bind(&NoRef::VoidMethod0, std::move(no_ref)).Run();
+}
+
+// Tests for Passed() wrapper support:
+//   - Passed() can be constructed from a pointer to scoper.
+//   - Passed() can be constructed from a scoper rvalue.
+//   - Using Passed() gives Callback Ownership.
+//   - Ownership is transferred from Callback to callee on the first Run().
+//   - Callback supports unbound arguments.
+template <typename T>
+class BindMoveOnlyTypeTest : public ::testing::Test {
+};
+
+struct CustomDeleter {
+  void operator()(DeleteCounter* c) { delete c; }
+};
+
+using MoveOnlyTypesToTest =
+    ::testing::Types<std::unique_ptr<DeleteCounter>,
+                     std::unique_ptr<DeleteCounter, CustomDeleter>>;
+TYPED_TEST_CASE(BindMoveOnlyTypeTest, MoveOnlyTypesToTest);
+
+TYPED_TEST(BindMoveOnlyTypeTest, PassedToBoundCallback) {
+  int deletes = 0;
+
+  TypeParam ptr(new DeleteCounter(&deletes));
+  Callback<TypeParam()> callback = Bind(&PassThru<TypeParam>, Passed(&ptr));
+  EXPECT_FALSE(ptr.get());
+  EXPECT_EQ(0, deletes);
+
+  // If we never invoke the Callback, it retains ownership and deletes.
+  callback.Reset();
+  EXPECT_EQ(1, deletes);
+}
+
+TYPED_TEST(BindMoveOnlyTypeTest, PassedWithRvalue) {
+  int deletes = 0;
+  Callback<TypeParam()> callback = Bind(
+      &PassThru<TypeParam>, Passed(TypeParam(new DeleteCounter(&deletes))));
+  EXPECT_EQ(0, deletes);
+
+  // If we never invoke the Callback, it retains ownership and deletes.
+  callback.Reset();
+  EXPECT_EQ(1, deletes);
+}
+
+// Check that ownership can be transferred back out.
+TYPED_TEST(BindMoveOnlyTypeTest, ReturnMoveOnlyType) {
+  int deletes = 0;
+  DeleteCounter* counter = new DeleteCounter(&deletes);
+  Callback<TypeParam()> callback =
+      Bind(&PassThru<TypeParam>, Passed(TypeParam(counter)));
+  TypeParam result = callback.Run();
+  ASSERT_EQ(counter, result.get());
+  EXPECT_EQ(0, deletes);
+
+  // Resetting does not delete since ownership was transferred.
+  callback.Reset();
+  EXPECT_EQ(0, deletes);
+
+  // Ensure that we actually did get ownership.
+  result.reset();
+  EXPECT_EQ(1, deletes);
+}
+
+TYPED_TEST(BindMoveOnlyTypeTest, UnboundForwarding) {
+  int deletes = 0;
+  TypeParam ptr(new DeleteCounter(&deletes));
+  // Test unbound argument forwarding.
+  Callback<TypeParam(TypeParam)> cb_unbound = Bind(&PassThru<TypeParam>);
+  cb_unbound.Run(std::move(ptr));
+  EXPECT_EQ(1, deletes);
+}
+
+void VerifyVector(const std::vector<std::unique_ptr<int>>& v) {
+  ASSERT_EQ(1u, v.size());
+  EXPECT_EQ(12345, *v[0]);
+}
+
+std::vector<std::unique_ptr<int>> AcceptAndReturnMoveOnlyVector(
+    std::vector<std::unique_ptr<int>> v) {
+  VerifyVector(v);
+  return v;
+}
+
+// Test that a vector containing move-only types can be used with Callback.
+TEST_F(BindTest, BindMoveOnlyVector) {
+  using MoveOnlyVector = std::vector<std::unique_ptr<int>>;
+
+  MoveOnlyVector v;
+  v.push_back(WrapUnique(new int(12345)));
+
+  // Early binding should work:
+  base::Callback<MoveOnlyVector()> bound_cb =
+      base::Bind(&AcceptAndReturnMoveOnlyVector, Passed(&v));
+  MoveOnlyVector intermediate_result = bound_cb.Run();
+  VerifyVector(intermediate_result);
+
+  // As should passing it as an argument to Run():
+  base::Callback<MoveOnlyVector(MoveOnlyVector)> unbound_cb =
+      base::Bind(&AcceptAndReturnMoveOnlyVector);
+  MoveOnlyVector final_result = unbound_cb.Run(std::move(intermediate_result));
+  VerifyVector(final_result);
+}
+
+// Argument copy-constructor usage for non-reference copy-only parameters.
+//   - Bound arguments are only copied once.
+//   - Forwarded arguments are only copied once.
+//   - Forwarded arguments with coercions are only copied twice (once for the
+//     coercion, and one for the final dispatch).
+TEST_F(BindTest, ArgumentCopies) {
+  int copies = 0;
+  int assigns = 0;
+
+  CopyCounter counter(&copies, &assigns);
+  Bind(&VoidPolymorphic<CopyCounter>::Run, counter);
+  EXPECT_EQ(1, copies);
+  EXPECT_EQ(0, assigns);
+
+  copies = 0;
+  assigns = 0;
+  Bind(&VoidPolymorphic<CopyCounter>::Run, CopyCounter(&copies, &assigns));
+  EXPECT_EQ(1, copies);
+  EXPECT_EQ(0, assigns);
+
+  copies = 0;
+  assigns = 0;
+  Bind(&VoidPolymorphic<CopyCounter>::Run).Run(counter);
+  EXPECT_EQ(2, copies);
+  EXPECT_EQ(0, assigns);
+
+  copies = 0;
+  assigns = 0;
+  Bind(&VoidPolymorphic<CopyCounter>::Run).Run(CopyCounter(&copies, &assigns));
+  EXPECT_EQ(1, copies);
+  EXPECT_EQ(0, assigns);
+
+  copies = 0;
+  assigns = 0;
+  DerivedCopyMoveCounter derived(&copies, &assigns, nullptr, nullptr);
+  Bind(&VoidPolymorphic<CopyCounter>::Run).Run(CopyCounter(derived));
+  EXPECT_EQ(2, copies);
+  EXPECT_EQ(0, assigns);
+
+  copies = 0;
+  assigns = 0;
+  Bind(&VoidPolymorphic<CopyCounter>::Run)
+      .Run(CopyCounter(
+          DerivedCopyMoveCounter(&copies, &assigns, nullptr, nullptr)));
+  EXPECT_EQ(2, copies);
+  EXPECT_EQ(0, assigns);
+}
+
+// Argument move-constructor usage for move-only parameters.
+//   - Bound arguments passed by move are not copied.
+TEST_F(BindTest, ArgumentMoves) {
+  int move_constructs = 0;
+  int move_assigns = 0;
+
+  Bind(&VoidPolymorphic<const MoveCounter&>::Run,
+       MoveCounter(&move_constructs, &move_assigns));
+  EXPECT_EQ(1, move_constructs);
+  EXPECT_EQ(0, move_assigns);
+
+  // TODO(tzik): Support binding move-only type into a non-reference parameter
+  // of a variant of Callback.
+
+  move_constructs = 0;
+  move_assigns = 0;
+  Bind(&VoidPolymorphic<MoveCounter>::Run)
+      .Run(MoveCounter(&move_constructs, &move_assigns));
+  EXPECT_EQ(1, move_constructs);
+  EXPECT_EQ(0, move_assigns);
+
+  move_constructs = 0;
+  move_assigns = 0;
+  Bind(&VoidPolymorphic<MoveCounter>::Run)
+      .Run(MoveCounter(DerivedCopyMoveCounter(
+          nullptr, nullptr, &move_constructs, &move_assigns)));
+  EXPECT_EQ(2, move_constructs);
+  EXPECT_EQ(0, move_assigns);
+}
+
+// Argument constructor usage for non-reference movable-copyable
+// parameters.
+//   - Bound arguments passed by move are not copied.
+//   - Forwarded arguments are only copied once.
+//   - Forwarded arguments with coercions are only copied once and moved once.
+TEST_F(BindTest, ArgumentCopiesAndMoves) {
+  int copies = 0;
+  int assigns = 0;
+  int move_constructs = 0;
+  int move_assigns = 0;
+
+  CopyMoveCounter counter(&copies, &assigns, &move_constructs, &move_assigns);
+  Bind(&VoidPolymorphic<CopyMoveCounter>::Run, counter);
+  EXPECT_EQ(1, copies);
+  EXPECT_EQ(0, assigns);
+  EXPECT_EQ(0, move_constructs);
+  EXPECT_EQ(0, move_assigns);
+
+  copies = 0;
+  assigns = 0;
+  move_constructs = 0;
+  move_assigns = 0;
+  Bind(&VoidPolymorphic<CopyMoveCounter>::Run,
+       CopyMoveCounter(&copies, &assigns, &move_constructs, &move_assigns));
+  EXPECT_EQ(0, copies);
+  EXPECT_EQ(0, assigns);
+  EXPECT_EQ(1, move_constructs);
+  EXPECT_EQ(0, move_assigns);
+
+  copies = 0;
+  assigns = 0;
+  move_constructs = 0;
+  move_assigns = 0;
+  Bind(&VoidPolymorphic<CopyMoveCounter>::Run).Run(counter);
+  EXPECT_EQ(1, copies);
+  EXPECT_EQ(0, assigns);
+  EXPECT_EQ(1, move_constructs);
+  EXPECT_EQ(0, move_assigns);
+
+  copies = 0;
+  assigns = 0;
+  move_constructs = 0;
+  move_assigns = 0;
+  Bind(&VoidPolymorphic<CopyMoveCounter>::Run)
+      .Run(CopyMoveCounter(&copies, &assigns, &move_constructs, &move_assigns));
+  EXPECT_EQ(0, copies);
+  EXPECT_EQ(0, assigns);
+  EXPECT_EQ(1, move_constructs);
+  EXPECT_EQ(0, move_assigns);
+
+  DerivedCopyMoveCounter derived_counter(&copies, &assigns, &move_constructs,
+                                         &move_assigns);
+  copies = 0;
+  assigns = 0;
+  move_constructs = 0;
+  move_assigns = 0;
+  Bind(&VoidPolymorphic<CopyMoveCounter>::Run)
+      .Run(CopyMoveCounter(derived_counter));
+  EXPECT_EQ(1, copies);
+  EXPECT_EQ(0, assigns);
+  EXPECT_EQ(1, move_constructs);
+  EXPECT_EQ(0, move_assigns);
+
+  copies = 0;
+  assigns = 0;
+  move_constructs = 0;
+  move_assigns = 0;
+  Bind(&VoidPolymorphic<CopyMoveCounter>::Run)
+      .Run(CopyMoveCounter(DerivedCopyMoveCounter(
+          &copies, &assigns, &move_constructs, &move_assigns)));
+  EXPECT_EQ(0, copies);
+  EXPECT_EQ(0, assigns);
+  EXPECT_EQ(2, move_constructs);
+  EXPECT_EQ(0, move_assigns);
+}
+
+TEST_F(BindTest, CapturelessLambda) {
+  EXPECT_FALSE(internal::IsCallableObject<void>::value);
+  EXPECT_FALSE(internal::IsCallableObject<int>::value);
+  EXPECT_FALSE(internal::IsCallableObject<void (*)()>::value);
+  EXPECT_FALSE(internal::IsCallableObject<void (NoRef::*)()>::value);
+
+  auto f = []() {};
+  EXPECT_TRUE(internal::IsCallableObject<decltype(f)>::value);
+
+  int i = 0;
+  auto g = [i]() { (void)i; };
+  EXPECT_TRUE(internal::IsCallableObject<decltype(g)>::value);
+
+  auto h = [](int, double) { return 'k'; };
+  EXPECT_TRUE((std::is_same<
+      char(int, double),
+      internal::ExtractCallableRunType<decltype(h)>>::value));
+
+  EXPECT_EQ(42, Bind([] { return 42; }).Run());
+  EXPECT_EQ(42, Bind([](int i) { return i * 7; }, 6).Run());
+
+  int x = 1;
+  base::Callback<void(int)> cb =
+      Bind([](int* x, int i) { *x *= i; }, Unretained(&x));
+  cb.Run(6);
+  EXPECT_EQ(6, x);
+  cb.Run(7);
+  EXPECT_EQ(42, x);
+}
+
+TEST_F(BindTest, EmptyFunctor) {
+  struct NonEmptyFunctor {
+    int operator()() const { return x; }
+    int x = 42;
+  };
+
+  struct EmptyFunctor {
+    int operator()() { return 42; }
+  };
+
+  struct EmptyFunctorConst {
+    int operator()() const { return 42; }
+  };
+
+  EXPECT_TRUE(internal::IsCallableObject<NonEmptyFunctor>::value);
+  EXPECT_TRUE(internal::IsCallableObject<EmptyFunctor>::value);
+  EXPECT_TRUE(internal::IsCallableObject<EmptyFunctorConst>::value);
+  EXPECT_EQ(42, BindOnce(EmptyFunctor()).Run());
+  EXPECT_EQ(42, BindOnce(EmptyFunctorConst()).Run());
+  EXPECT_EQ(42, BindRepeating(EmptyFunctorConst()).Run());
+}
+
+TEST_F(BindTest, CapturingLambdaForTesting) {
+  int x = 6;
+  EXPECT_EQ(42, BindLambdaForTesting([=](int y) { return x * y; }).Run(7));
+
+  auto f = [x](std::unique_ptr<int> y) { return x * *y; };
+  EXPECT_EQ(42, BindLambdaForTesting(f).Run(std::make_unique<int>(7)));
+}
+
+TEST_F(BindTest, Cancellation) {
+  EXPECT_CALL(no_ref_, VoidMethodWithIntArg(_)).Times(2);
+
+  WeakPtrFactory<NoRef> weak_factory(&no_ref_);
+  RepeatingCallback<void(int)> cb =
+      BindRepeating(&NoRef::VoidMethodWithIntArg, weak_factory.GetWeakPtr());
+  RepeatingClosure cb2 = BindRepeating(cb, 8);
+  OnceClosure cb3 = BindOnce(cb, 8);
+
+  OnceCallback<void(int)> cb4 =
+      BindOnce(&NoRef::VoidMethodWithIntArg, weak_factory.GetWeakPtr());
+  EXPECT_FALSE(cb4.IsCancelled());
+
+  OnceClosure cb5 = BindOnce(std::move(cb4), 8);
+
+  EXPECT_FALSE(cb.IsCancelled());
+  EXPECT_FALSE(cb2.IsCancelled());
+  EXPECT_FALSE(cb3.IsCancelled());
+  EXPECT_FALSE(cb5.IsCancelled());
+
+  cb.Run(6);
+  cb2.Run();
+
+  weak_factory.InvalidateWeakPtrs();
+
+  EXPECT_TRUE(cb.IsCancelled());
+  EXPECT_TRUE(cb2.IsCancelled());
+  EXPECT_TRUE(cb3.IsCancelled());
+  EXPECT_TRUE(cb5.IsCancelled());
+
+  cb.Run(6);
+  cb2.Run();
+  std::move(cb3).Run();
+  std::move(cb5).Run();
+}
+
+TEST_F(BindTest, OnceCallback) {
+  // Check if Callback variants have declarations of conversions as expected.
+  // Copy constructor and assignment of RepeatingCallback.
+  static_assert(std::is_constructible<
+      RepeatingClosure, const RepeatingClosure&>::value,
+      "RepeatingClosure should be copyable.");
+  static_assert(
+      std::is_assignable<RepeatingClosure, const RepeatingClosure&>::value,
+      "RepeatingClosure should be copy-assignable.");
+
+  // Move constructor and assignment of RepeatingCallback.
+  static_assert(std::is_constructible<
+      RepeatingClosure, RepeatingClosure&&>::value,
+      "RepeatingClosure should be movable.");
+  static_assert(std::is_assignable<RepeatingClosure, RepeatingClosure&&>::value,
+                "RepeatingClosure should be move-assignable");
+
+  // Conversions from OnceCallback to RepeatingCallback.
+  static_assert(!std::is_constructible<
+      RepeatingClosure, const OnceClosure&>::value,
+      "OnceClosure should not be convertible to RepeatingClosure.");
+  static_assert(
+      !std::is_assignable<RepeatingClosure, const OnceClosure&>::value,
+      "OnceClosure should not be convertible to RepeatingClosure.");
+
+  // Destructive conversions from OnceCallback to RepeatingCallback.
+  static_assert(!std::is_constructible<
+      RepeatingClosure, OnceClosure&&>::value,
+      "OnceClosure should not be convertible to RepeatingClosure.");
+  static_assert(!std::is_assignable<RepeatingClosure, OnceClosure&&>::value,
+                "OnceClosure should not be convertible to RepeatingClosure.");
+
+  // Copy constructor and assignment of OnceCallback.
+  static_assert(!std::is_constructible<
+      OnceClosure, const OnceClosure&>::value,
+      "OnceClosure should not be copyable.");
+  static_assert(!std::is_assignable<OnceClosure, const OnceClosure&>::value,
+                "OnceClosure should not be copy-assignable");
+
+  // Move constructor and assignment of OnceCallback.
+  static_assert(std::is_constructible<
+      OnceClosure, OnceClosure&&>::value,
+      "OnceClosure should be movable.");
+  static_assert(std::is_assignable<OnceClosure, OnceClosure&&>::value,
+                "OnceClosure should be move-assignable.");
+
+  // Conversions from RepeatingCallback to OnceCallback.
+  static_assert(std::is_constructible<
+      OnceClosure, const RepeatingClosure&>::value,
+      "RepeatingClosure should be convertible to OnceClosure.");
+  static_assert(std::is_assignable<OnceClosure, const RepeatingClosure&>::value,
+                "RepeatingClosure should be convertible to OnceClosure.");
+
+  // Destructive conversions from RepeatingCallback to OnceCallback.
+  static_assert(std::is_constructible<
+      OnceClosure, RepeatingClosure&&>::value,
+      "RepeatingClosure should be convertible to OnceClosure.");
+  static_assert(std::is_assignable<OnceClosure, RepeatingClosure&&>::value,
+                "RepeatingClosure should be covretible to OnceClosure.");
+
+  OnceClosure cb = BindOnce(&VoidPolymorphic<>::Run);
+  std::move(cb).Run();
+
+  // RepeatingCallback should be convertible to OnceCallback.
+  OnceClosure cb2 = BindRepeating(&VoidPolymorphic<>::Run);
+  std::move(cb2).Run();
+
+  RepeatingClosure cb3 = BindRepeating(&VoidPolymorphic<>::Run);
+  cb = cb3;
+  std::move(cb).Run();
+
+  cb = std::move(cb2);
+
+  OnceCallback<void(int)> cb4 =
+      BindOnce(&VoidPolymorphic<std::unique_ptr<int>, int>::Run,
+               std::make_unique<int>(0));
+  BindOnce(std::move(cb4), 1).Run();
+}
+
+// Callback construction and assignment tests.
+//   - Construction from an InvokerStorageHolder should not cause ref/deref.
+//   - Assignment from other callback should only cause one ref
+//
+// TODO(ajwong): Is there actually a way to test this?
+
+#if defined(OS_WIN)
+int __fastcall FastCallFunc(int n) {
+  return n;
+}
+
+int __stdcall StdCallFunc(int n) {
+  return n;
+}
+
+// Windows specific calling convention support.
+//   - Can bind a __fastcall function.
+//   - Can bind a __stdcall function.
+TEST_F(BindTest, WindowsCallingConventions) {
+  Callback<int()> fastcall_cb = Bind(&FastCallFunc, 1);
+  EXPECT_EQ(1, fastcall_cb.Run());
+
+  Callback<int()> stdcall_cb = Bind(&StdCallFunc, 2);
+  EXPECT_EQ(2, stdcall_cb.Run());
+}
+#endif
+
+// Test unwrapping the various wrapping functions.
+
+TEST_F(BindTest, UnwrapUnretained) {
+  int i = 0;
+  auto unretained = Unretained(&i);
+  EXPECT_EQ(&i, internal::Unwrap(unretained));
+  EXPECT_EQ(&i, internal::Unwrap(std::move(unretained)));
+}
+
+TEST_F(BindTest, UnwrapConstRef) {
+  int p = 0;
+  auto const_ref = ConstRef(p);
+  EXPECT_EQ(&p, &internal::Unwrap(const_ref));
+  EXPECT_EQ(&p, &internal::Unwrap(std::move(const_ref)));
+}
+
+TEST_F(BindTest, UnwrapRetainedRef) {
+  auto p = MakeRefCounted<RefCountedData<int>>();
+  auto retained_ref = RetainedRef(p);
+  EXPECT_EQ(p.get(), internal::Unwrap(retained_ref));
+  EXPECT_EQ(p.get(), internal::Unwrap(std::move(retained_ref)));
+}
+
+TEST_F(BindTest, UnwrapOwned) {
+  int* p = new int;
+  auto owned = Owned(p);
+  EXPECT_EQ(p, internal::Unwrap(owned));
+  EXPECT_EQ(p, internal::Unwrap(std::move(owned)));
+}
+
+TEST_F(BindTest, UnwrapPassed) {
+  int* p = new int;
+  auto passed = Passed(WrapUnique(p));
+  EXPECT_EQ(p, internal::Unwrap(passed).get());
+
+  p = new int;
+  EXPECT_EQ(p, internal::Unwrap(Passed(WrapUnique(p))).get());
+}
+
+TEST_F(BindTest, BindNoexcept) {
+  EXPECT_EQ(42, base::BindOnce(&Noexcept).Run());
+  EXPECT_EQ(
+      42,
+      base::BindOnce(&BindTest::NoexceptMethod, base::Unretained(this)).Run());
+  EXPECT_EQ(
+      42, base::BindOnce(&BindTest::ConstNoexceptMethod, base::Unretained(this))
+              .Run());
+}
+
+// Test null callbacks cause a DCHECK.
+TEST(BindDeathTest, NullCallback) {
+  base::Callback<void(int)> null_cb;
+  ASSERT_TRUE(null_cb.is_null());
+  EXPECT_DCHECK_DEATH(base::Bind(null_cb, 42));
+}
+
+}  // namespace
+}  // namespace base
diff --git a/base/bind_unittest.nc b/base/bind_unittest.nc
new file mode 100644
index 0000000..d549d2e
--- /dev/null
+++ b/base/bind_unittest.nc
@@ -0,0 +1,322 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This is a "No Compile Test" suite.
+// http://dev.chromium.org/developers/testing/no-compile-tests
+
+#include <utility>
+
+#include "base/bind.h"
+#include "base/callback.h"
+#include "base/macros.h"
+#include "base/memory/ref_counted.h"
+#include "base/test/bind_test_util.h"
+
+namespace base {
+
+// Do not put everything inside an anonymous namespace.  If you do, many of the
+// helper function declarations will generate unused definition warnings.
+
+static const int kParentValue = 1;
+static const int kChildValue = 2;
+
+class NoRef {
+ public:
+  void VoidMethod0() {}
+  void VoidConstMethod0() const {}
+  int IntMethod0() { return 1; }
+};
+
+class HasRef : public NoRef, public base::RefCounted<HasRef> {
+};
+
+class Parent {
+ public:
+  void AddRef() const {}
+  void Release() const {}
+  virtual void VirtualSet() { value = kParentValue; }
+  void NonVirtualSet() { value = kParentValue; }
+  int value;
+};
+
+class Child : public Parent {
+ public:
+  virtual void VirtualSet() { value = kChildValue; }
+  void NonVirtualSet() { value = kChildValue; }
+};
+
+class NoRefParent {
+ public:
+  virtual void VirtualSet() { value = kParentValue; }
+  void NonVirtualSet() { value = kParentValue; }
+  int value;
+};
+
+class NoRefChild : public NoRefParent {
+  virtual void VirtualSet() { value = kChildValue; }
+  void NonVirtualSet() { value = kChildValue; }
+};
+
+template <typename T>
+T PolymorphicIdentity(T t) {
+  return t;
+}
+
+int UnwrapParentRef(Parent& p) {
+  return p.value;
+}
+
+template <typename T>
+void VoidPolymorphic1(T t) {
+}
+
+void TakesMoveOnly(std::unique_ptr<int>) {
+}
+
+struct NonEmptyFunctor {
+  int x;
+  void operator()() const {}
+};
+
+// TODO(hans): Remove .* and update the static_assert expectations once we roll
+// past Clang r313315. https://crbug.com/765692.
+
+#if defined(NCTEST_METHOD_ON_CONST_OBJECT)  // [r"fatal error: static_assert failed .*\"Bound argument \|i\| of type \|Arg\| cannot be forwarded as \|Unwrapped\| to the bound functor, which declares it as \|Param\|\.\""]
+
+// Method bound to const-object.
+//
+// Only const methods should be allowed to work with const objects.
+void WontCompile() {
+  HasRef has_ref;
+  const HasRef* const_has_ref_ptr_ = &has_ref;
+  Callback<void()> method_to_const_cb =
+      Bind(&HasRef::VoidMethod0, const_has_ref_ptr_);
+  method_to_const_cb.Run();
+}
+
+#elif defined(NCTEST_METHOD_BIND_NEEDS_REFCOUNTED_OBJECT)  // [r"fatal error: static_assert failed \"Receivers may not be raw pointers\."]
+
+
+// Method bound to non-refcounted object.
+//
+// We require refcounts unless you have Unretained().
+void WontCompile() {
+  NoRef no_ref;
+  Callback<void()> no_ref_cb =
+      Bind(&NoRef::VoidMethod0, &no_ref);
+  no_ref_cb.Run();
+}
+
+#elif defined(NCTEST_CONST_METHOD_NEEDS_REFCOUNTED_OBJECT)  // [r"fatal error: static_assert failed \"Receivers may not be raw pointers\."]
+
+// Const Method bound to non-refcounted object.
+//
+// We require refcounts unless you have Unretained().
+void WontCompile() {
+  NoRef no_ref;
+  Callback<void()> no_ref_const_cb =
+      Bind(&NoRef::VoidConstMethod0, &no_ref);
+  no_ref_const_cb.Run();
+}
+
+#elif defined(NCTEST_CONST_POINTER)  // [r"fatal error: static_assert failed .*\"Bound argument \|i\| of type \|Arg\| cannot be forwarded as \|Unwrapped\| to the bound functor, which declares it as \|Param\|\.\""]
+
+// Const argument used with non-const pointer parameter of same type.
+//
+// This is just a const-correctness check.
+void WontCompile() {
+  const NoRef* const_no_ref_ptr;
+  Callback<NoRef*()> pointer_same_cb =
+      Bind(&PolymorphicIdentity<NoRef*>, const_no_ref_ptr);
+  pointer_same_cb.Run();
+}
+
+#elif defined(NCTEST_CONST_POINTER_SUBTYPE)  // [r"fatal error: static_assert failed .*\"Bound argument \|i\| of type \|Arg\| cannot be forwarded as \|Unwrapped\| to the bound functor, which declares it as \|Param\|\.\""]
+
+// Const argument used with non-const pointer parameter of super type.
+//
+// This is just a const-correctness check.
+void WontCompile() {
+  const NoRefChild* const_child_ptr;
+  Callback<NoRefParent*()> pointer_super_cb =
+    Bind(&PolymorphicIdentity<NoRefParent*>, const_child_ptr);
+  pointer_super_cb.Run();
+}
+
+#elif defined(DISABLED_NCTEST_DISALLOW_NON_CONST_REF_PARAM)  // [r"fatal error: no member named 'AddRef' in 'base::NoRef'"]
+// TODO(dcheng): I think there's a type safety promotion issue here where we can
+// pass a const ref to a non const-ref function, or vice versa accidentally. Or
+// we make a copy accidentally. Check.
+
+// Functions with reference parameters, unsupported.
+//
+// First, non-const reference parameters are disallowed by the Google
+// style guide. Second, since we are doing argument forwarding it becomes
+// very tricky to avoid copies, maintain const correctness, and not
+// accidentally have the function be modifying a temporary, or a copy.
+void WontCompile() {
+  Parent p;
+  Callback<int(Parent&)> ref_arg_cb = Bind(&UnwrapParentRef);
+  ref_arg_cb.Run(p);
+}
+
+#elif defined(NCTEST_DISALLOW_BIND_TO_NON_CONST_REF_PARAM)  // [r"fatal error: static_assert failed .*\"Bound argument \|i\| of type \|Arg\| cannot be forwarded as \|Unwrapped\| to the bound functor, which declares it as \|Param\|\.\""]
+
+// Binding functions with reference parameters, unsupported.
+//
+// See comment in NCTEST_DISALLOW_NON_CONST_REF_PARAM
+void WontCompile() {
+  Parent p;
+  Callback<int()> ref_cb = Bind(&UnwrapParentRef, p);
+  ref_cb.Run();
+}
+
+#elif defined(NCTEST_NO_IMPLICIT_ARRAY_PTR_CONVERSION)  // [r"fatal error: static_assert failed .*\"First bound argument to a method cannot be an array\.\""]
+
+// A method should not be bindable with an array of objects.
+//
+// This is likely not wanted behavior. We specifically check for it though
+// because it is possible, depending on how you implement prebinding, to
+// implicitly convert an array type to a pointer type.
+void WontCompile() {
+  HasRef p[10];
+  Callback<void()> method_bound_to_array_cb =
+      Bind(&HasRef::VoidMethod0, p);
+  method_bound_to_array_cb.Run();
+}
+
+#elif defined(NCTEST_NO_RVALUE_RAW_PTR_FOR_REFCOUNTED_TYPES)  // [r"fatal error: static_assert failed .*\"A parameter is a refcounted type and needs scoped_refptr\.\""]
+
+// Refcounted types should not be bound as a raw pointer.
+void WontCompile() {
+  HasRef for_raw_ptr;
+  int a;
+  Callback<void()> ref_count_as_raw_ptr_a =
+      Bind(&VoidPolymorphic1<int*>, &a);
+  Callback<void()> ref_count_as_raw_ptr =
+      Bind(&VoidPolymorphic1<HasRef*>, &for_raw_ptr);
+}
+
+#elif defined(NCTEST_NO_LVALUE_RAW_PTR_FOR_REFCOUNTED_TYPES)  // [r"fatal error: static_assert failed .*\"A parameter is a refcounted type and needs scoped_refptr\.\""]
+
+// Refcounted types should not be bound as a raw pointer.
+void WontCompile() {
+  HasRef* for_raw_ptr = nullptr;
+  Callback<void()> ref_count_as_raw_ptr =
+      Bind(&VoidPolymorphic1<HasRef*>, for_raw_ptr);
+}
+
+#elif defined(NCTEST_NO_RVALUE_CONST_RAW_PTR_FOR_REFCOUNTED_TYPES)  // [r"fatal error: static_assert failed .*\"A parameter is a refcounted type and needs scoped_refptr\.\""]
+
+// Refcounted types should not be bound as a raw pointer.
+void WontCompile() {
+  const HasRef for_raw_ptr;
+  Callback<void()> ref_count_as_raw_ptr =
+      Bind(&VoidPolymorphic1<const HasRef*>, &for_raw_ptr);
+}
+
+#elif defined(NCTEST_NO_LVALUE_CONST_RAW_PTR_FOR_REFCOUNTED_TYPES)  // [r"fatal error: static_assert failed .*\"A parameter is a refcounted type and needs scoped_refptr\.\""]
+
+// Refcounted types should not be bound as a raw pointer.
+void WontCompile() {
+  const HasRef* for_raw_ptr = nullptr;
+  Callback<void()> ref_count_as_raw_ptr =
+      Bind(&VoidPolymorphic1<const HasRef*>, for_raw_ptr);
+}
+
+#elif defined(NCTEST_WEAKPTR_BIND_MUST_RETURN_VOID)  // [r"fatal error: static_assert failed .*\"weak_ptrs can only bind to methods without return values\""]
+
+// WeakPtrs cannot be bound to methods with return types.
+void WontCompile() {
+  NoRef no_ref;
+  WeakPtrFactory<NoRef> weak_factory(&no_ref);
+  Callback<int()> weak_ptr_with_non_void_return_type =
+      Bind(&NoRef::IntMethod0, weak_factory.GetWeakPtr());
+  weak_ptr_with_non_void_return_type.Run();
+}
+
+#elif defined(NCTEST_DISALLOW_ASSIGN_DIFFERENT_TYPES)  // [r"fatal error: no viable conversion from 'Callback<MakeUnboundRunType<void \(\*\)\(int\)>>' to 'Callback<void \(\)>'"]
+
+// Bind result cannot be assigned to Callbacks with a mismatching type.
+void WontCompile() {
+  Closure callback_mismatches_bind_type = Bind(&VoidPolymorphic1<int>);
+}
+
+#elif defined(NCTEST_DISALLOW_CAPTURING_LAMBDA)  // [r"fatal error: implicit instantiation of undefined template 'base::internal::FunctorTraits<\(lambda at (\.\./)+base/bind_unittest.nc:[0-9]+:[0-9]+\), void>'"]
+
+void WontCompile() {
+  int i = 0, j = 0;
+  Bind([i,&j]() {j = i;});
+}
+
+#elif defined(NCTEST_DISALLOW_ONCECALLBACK_RUN_ON_LVALUE)  // [r"static_assert failed .*\"OnceCallback::Run\(\) may only be invoked on a non-const rvalue, i\.e\. std::move\(callback\)\.Run\(\)\.\""]
+
+void WontCompile() {
+  OnceClosure cb = Bind([] {});
+  cb.Run();
+}
+
+#elif defined(NCTEST_DISALLOW_ONCECALLBACK_RUN_ON_CONST_LVALUE)  // [r"static_assert failed .*\"OnceCallback::Run\(\) may only be invoked on a non-const rvalue, i\.e\. std::move\(callback\)\.Run\(\)\.\""]
+
+void WontCompile() {
+  const OnceClosure cb = Bind([] {});
+  cb.Run();
+}
+
+#elif defined(NCTEST_DISALLOW_ONCECALLBACK_RUN_ON_CONST_RVALUE)  // [r"static_assert failed .*\"OnceCallback::Run\(\) may only be invoked on a non-const rvalue, i\.e\. std::move\(callback\)\.Run\(\)\.\""]
+
+void WontCompile() {
+  const OnceClosure cb = Bind([] {});
+  std::move(cb).Run();
+}
+
+#elif defined(NCTEST_DISALLOW_BIND_ONCECALLBACK)  // [r"fatal error: static_assert failed .*\"BindRepeating cannot bind OnceCallback. Use BindOnce with std::move\(\)\.\""]
+
+void WontCompile() {
+  Bind(BindOnce([](int) {}), 42);
+}
+
+#elif defined(NCTEST_DISALLOW_BINDONCE_LVALUE_ONCECALLBACK)  // [r"fatal error: static_assert failed .*\"BindOnce requires non-const rvalue for OnceCallback binding\."]
+void WontCompile() {
+  auto cb = BindOnce([](int) {});
+  BindOnce(cb, 42);
+}
+
+#elif defined(NCTEST_DISALLOW_BINDONCE_RVALUE_CONST_ONCECALLBACK)  // [r"fatal error: static_assert failed .*\"BindOnce requires non-const rvalue for OnceCallback binding\."]
+
+void WontCompile() {
+  const auto cb = BindOnce([](int) {});
+  BindOnce(std::move(cb), 42);
+}
+
+#elif defined(NCTEST_BINDONCE_MOVEONLY_TYPE_BY_VALUE)  // [r"fatal error: static_assert failed .*\"Bound argument \|i\| is move-only but will be bound by copy\. Ensure \|Arg\| is mutable and bound using std::move\(\)\.\""]
+
+void WontCompile() {
+  std::unique_ptr<int> x;
+  BindOnce(&TakesMoveOnly, x);
+}
+
+#elif defined(NCTEST_BIND_MOVEONLY_TYPE_BY_VALUE)  // [r"Bound argument \|i\| is move-only but will be forwarded by copy\. Ensure \|Arg\| is bound using base::Passed\(\), not std::move\(\)."]
+
+void WontCompile() {
+  std::unique_ptr<int> x;
+  Bind(&TakesMoveOnly, x);
+}
+
+#elif defined(NCTEST_BIND_MOVEONLY_TYPE_WITH_STDMOVE)  // [r"Bound argument \|i\| is move-only but will be forwarded by copy\. Ensure \|Arg\| is bound using base::Passed\(\), not std::move\(\)."]
+
+void WontCompile() {
+  std::unique_ptr<int> x;
+  Bind(&TakesMoveOnly, std::move(x));
+}
+
+#elif defined(NCTEST_BIND_NON_EMPTY_FUNCTOR)  // [r"fatal error: implicit instantiation of undefined template 'base::internal::FunctorTraits<base::NonEmptyFunctor, void>'"]
+
+void WontCompile() {
+  Bind(NonEmptyFunctor());
+}
+
+#endif
+
+}  // namespace base
diff --git a/base/bit_cast.h b/base/bit_cast.h
new file mode 100644
index 0000000..90dd925
--- /dev/null
+++ b/base/bit_cast.h
@@ -0,0 +1,77 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_BIT_CAST_H_
+#define BASE_BIT_CAST_H_
+
+#include <string.h>
+#include <type_traits>
+
+#include "base/compiler_specific.h"
+#include "base/template_util.h"
+#include "build/build_config.h"
+
+// bit_cast<Dest,Source> is a template function that implements the equivalent
+// of "*reinterpret_cast<Dest*>(&source)".  We need this in very low-level
+// functions like the protobuf library and fast math support.
+//
+//   float f = 3.14159265358979;
+//   int i = bit_cast<int32_t>(f);
+//   // i = 0x40490fdb
+//
+// The classical address-casting method is:
+//
+//   // WRONG
+//   float f = 3.14159265358979;            // WRONG
+//   int i = * reinterpret_cast<int*>(&f);  // WRONG
+//
+// The address-casting method actually produces undefined behavior according to
+// the ISO C++98 specification, section 3.10 ("basic.lval"), paragraph 15.
+// (This did not substantially change in C++11.)  Roughly, this section says: if
+// an object in memory has one type, and a program accesses it with a different
+// type, then the result is undefined behavior for most values of "different
+// type".
+//
+// This is true for any cast syntax, either *(int*)&f or
+// *reinterpret_cast<int*>(&f).  And it is particularly true for conversions
+// between integral lvalues and floating-point lvalues.
+//
+// The purpose of this paragraph is to allow optimizing compilers to assume that
+// expressions with different types refer to different memory.  Compilers are
+// known to take advantage of this.  So a non-conforming program quietly
+// produces wildly incorrect output.
+//
+// The problem is not the use of reinterpret_cast.  The problem is type punning:
+// holding an object in memory of one type and reading its bits back using a
+// different type.
+//
+// The C++ standard is more subtle and complex than this, but that is the basic
+// idea.
+//
+// Anyways ...
+//
+// bit_cast<> calls memcpy() which is blessed by the standard, especially by the
+// example in section 3.9 .  Also, of course, bit_cast<> wraps up the nasty
+// logic in one place.
+//
+// Fortunately memcpy() is very fast.  In optimized mode, compilers replace
+// calls to memcpy() with inline object code when the size argument is a
+// compile-time constant.  On a 32-bit system, memcpy(d,s,4) compiles to one
+// load and one store, and memcpy(d,s,8) compiles to two loads and two stores.
+
+template <class Dest, class Source>
+inline Dest bit_cast(const Source& source) {
+  static_assert(sizeof(Dest) == sizeof(Source),
+                "bit_cast requires source and destination to be the same size");
+  static_assert(base::is_trivially_copyable<Dest>::value,
+                "bit_cast requires the destination type to be copyable");
+  static_assert(base::is_trivially_copyable<Source>::value,
+                "bit_cast requires the source type to be copyable");
+
+  Dest dest;
+  memcpy(&dest, &source, sizeof(dest));
+  return dest;
+}
+
+#endif  // BASE_BIT_CAST_H_
diff --git a/base/bit_cast_unittest.cc b/base/bit_cast_unittest.cc
new file mode 100644
index 0000000..f36d3fe
--- /dev/null
+++ b/base/bit_cast_unittest.cc
@@ -0,0 +1,31 @@
+// Copyright (c) 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+
+#include "base/bit_cast.h"
+
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+namespace {
+
+TEST(BitCastTest, FloatIntFloat) {
+  float f = 3.1415926f;
+  int i = bit_cast<int32_t>(f);
+  float f2 = bit_cast<float>(i);
+  EXPECT_EQ(f, f2);
+}
+
+struct A {
+  int x;
+};
+
+TEST(BitCastTest, StructureInt) {
+  A a = { 1 };
+  int b = bit_cast<int>(a);
+  EXPECT_EQ(1, b);
+}
+
+}  // namespace
+}  // namespace base
diff --git a/base/bits.h b/base/bits.h
new file mode 100644
index 0000000..a1c8b5d
--- /dev/null
+++ b/base/bits.h
@@ -0,0 +1,184 @@
+// Copyright (c) 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file defines some bit utilities.
+
+#ifndef BASE_BITS_H_
+#define BASE_BITS_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include "base/compiler_specific.h"
+#include "base/logging.h"
+#include "build/build_config.h"
+
+#if defined(COMPILER_MSVC)
+#include <intrin.h>
+#endif
+
+namespace base {
+namespace bits {
+
+// Returns true iff |value| is a power of 2.
+template <typename T,
+          typename = typename std::enable_if<std::is_integral<T>::value>>
+constexpr inline bool IsPowerOfTwo(T value) {
+  // From "Hacker's Delight": Section 2.1 Manipulating Rightmost Bits.
+  //
+  // Only positive integers with a single bit set are powers of two. If only one
+  // bit is set in x (e.g. 0b00000100000000) then |x-1| will have that bit set
+  // to zero and all bits to its right set to 1 (e.g. 0b00000011111111). Hence
+  // |x & (x-1)| is 0 iff x is a power of two.
+  return value > 0 && (value & (value - 1)) == 0;
+}
+
+// Round up |size| to a multiple of alignment, which must be a power of two.
+inline size_t Align(size_t size, size_t alignment) {
+  DCHECK(IsPowerOfTwo(alignment));
+  return (size + alignment - 1) & ~(alignment - 1);
+}
+
+// CountLeadingZeroBits(value) returns the number of zero bits following the
+// most significant 1 bit in |value| if |value| is non-zero, otherwise it
+// returns {sizeof(T) * 8}.
+// Example: 00100010 -> 2
+//
+// CountTrailingZeroBits(value) returns the number of zero bits preceding the
+// least significant 1 bit in |value| if |value| is non-zero, otherwise it
+// returns {sizeof(T) * 8}.
+// Example: 00100010 -> 1
+//
+// C does not have an operator to do this, but fortunately the various
+// compilers have built-ins that map to fast underlying processor instructions.
+#if defined(COMPILER_MSVC)
+
+template <typename T, unsigned bits = sizeof(T) * 8>
+ALWAYS_INLINE
+    typename std::enable_if<std::is_unsigned<T>::value && sizeof(T) <= 4,
+                            unsigned>::type
+    CountLeadingZeroBits(T x) {
+  static_assert(bits > 0, "invalid instantiation");
+  unsigned long index;
+  return LIKELY(_BitScanReverse(&index, static_cast<uint32_t>(x)))
+             ? (31 - index - (32 - bits))
+             : bits;
+}
+
+template <typename T, unsigned bits = sizeof(T) * 8>
+ALWAYS_INLINE
+    typename std::enable_if<std::is_unsigned<T>::value && sizeof(T) == 8,
+                            unsigned>::type
+    CountLeadingZeroBits(T x) {
+  static_assert(bits > 0, "invalid instantiation");
+  unsigned long index;
+  return LIKELY(_BitScanReverse64(&index, static_cast<uint64_t>(x)))
+             ? (63 - index)
+             : 64;
+}
+
+template <typename T, unsigned bits = sizeof(T) * 8>
+ALWAYS_INLINE
+    typename std::enable_if<std::is_unsigned<T>::value && sizeof(T) <= 4,
+                            unsigned>::type
+    CountTrailingZeroBits(T x) {
+  static_assert(bits > 0, "invalid instantiation");
+  unsigned long index;
+  return LIKELY(_BitScanForward(&index, static_cast<uint32_t>(x))) ? index
+                                                                   : bits;
+}
+
+template <typename T, unsigned bits = sizeof(T) * 8>
+ALWAYS_INLINE
+    typename std::enable_if<std::is_unsigned<T>::value && sizeof(T) == 8,
+                            unsigned>::type
+    CountTrailingZeroBits(T x) {
+  static_assert(bits > 0, "invalid instantiation");
+  unsigned long index;
+  return LIKELY(_BitScanForward64(&index, static_cast<uint64_t>(x))) ? index
+                                                                     : 64;
+}
+
+ALWAYS_INLINE uint32_t CountLeadingZeroBits32(uint32_t x) {
+  return CountLeadingZeroBits(x);
+}
+
+#if defined(ARCH_CPU_64_BITS)
+
+// MSVC only supplies _BitScanForward64 when building for a 64-bit target.
+ALWAYS_INLINE uint64_t CountLeadingZeroBits64(uint64_t x) {
+  return CountLeadingZeroBits(x);
+}
+
+#endif
+
+#elif defined(COMPILER_GCC)
+
+// __builtin_clz has undefined behaviour for an input of 0, even though there's
+// clearly a return value that makes sense, and even though some processor clz
+// instructions have defined behaviour for 0. We could drop to raw __asm__ to
+// do better, but we'll avoid doing that unless we see proof that we need to.
+template <typename T, unsigned bits = sizeof(T) * 8>
+ALWAYS_INLINE
+    typename std::enable_if<std::is_unsigned<T>::value && sizeof(T) <= 8,
+                            unsigned>::type
+    CountLeadingZeroBits(T value) {
+  static_assert(bits > 0, "invalid instantiation");
+  return LIKELY(value)
+             ? bits == 64
+                   ? __builtin_clzll(static_cast<uint64_t>(value))
+                   : __builtin_clz(static_cast<uint32_t>(value)) - (32 - bits)
+             : bits;
+}
+
+template <typename T, unsigned bits = sizeof(T) * 8>
+ALWAYS_INLINE
+    typename std::enable_if<std::is_unsigned<T>::value && sizeof(T) <= 8,
+                            unsigned>::type
+    CountTrailingZeroBits(T value) {
+  return LIKELY(value) ? bits == 64
+                             ? __builtin_ctzll(static_cast<uint64_t>(value))
+                             : __builtin_ctz(static_cast<uint32_t>(value))
+                       : bits;
+}
+
+ALWAYS_INLINE uint32_t CountLeadingZeroBits32(uint32_t x) {
+  return CountLeadingZeroBits(x);
+}
+
+#if defined(ARCH_CPU_64_BITS)
+
+ALWAYS_INLINE uint64_t CountLeadingZeroBits64(uint64_t x) {
+  return CountLeadingZeroBits(x);
+}
+
+#endif
+
+#endif
+
+ALWAYS_INLINE size_t CountLeadingZeroBitsSizeT(size_t x) {
+  return CountLeadingZeroBits(x);
+}
+
+ALWAYS_INLINE size_t CountTrailingZeroBitsSizeT(size_t x) {
+  return CountTrailingZeroBits(x);
+}
+
+// Returns the integer i such as 2^i <= n < 2^(i+1)
+inline int Log2Floor(uint32_t n) {
+  return 31 - CountLeadingZeroBits(n);
+}
+
+// Returns the integer i such as 2^(i-1) < n <= 2^i
+inline int Log2Ceiling(uint32_t n) {
+  // When n == 0, we want the function to return -1.
+  // When n == 0, (n - 1) will underflow to 0xFFFFFFFF, which is
+  // why the statement below starts with (n ? 32 : -1).
+  return (n ? 32 : -1) - CountLeadingZeroBits(n - 1);
+}
+
+}  // namespace bits
+}  // namespace base
+
+#endif  // BASE_BITS_H_
diff --git a/base/bits_unittest.cc b/base/bits_unittest.cc
new file mode 100644
index 0000000..98b9c08
--- /dev/null
+++ b/base/bits_unittest.cc
@@ -0,0 +1,197 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file contains the unit tests for the bit utilities.
+
+#include "base/bits.h"
+#include "build/build_config.h"
+
+#include <stddef.h>
+
+#include <limits>
+
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+namespace bits {
+
+TEST(BitsTest, Log2Floor) {
+  EXPECT_EQ(-1, Log2Floor(0));
+  EXPECT_EQ(0, Log2Floor(1));
+  EXPECT_EQ(1, Log2Floor(2));
+  EXPECT_EQ(1, Log2Floor(3));
+  EXPECT_EQ(2, Log2Floor(4));
+  for (int i = 3; i < 31; ++i) {
+    unsigned int value = 1U << i;
+    EXPECT_EQ(i, Log2Floor(value));
+    EXPECT_EQ(i, Log2Floor(value + 1));
+    EXPECT_EQ(i, Log2Floor(value + 2));
+    EXPECT_EQ(i - 1, Log2Floor(value - 1));
+    EXPECT_EQ(i - 1, Log2Floor(value - 2));
+  }
+  EXPECT_EQ(31, Log2Floor(0xffffffffU));
+}
+
+TEST(BitsTest, Log2Ceiling) {
+  EXPECT_EQ(-1, Log2Ceiling(0));
+  EXPECT_EQ(0, Log2Ceiling(1));
+  EXPECT_EQ(1, Log2Ceiling(2));
+  EXPECT_EQ(2, Log2Ceiling(3));
+  EXPECT_EQ(2, Log2Ceiling(4));
+  for (int i = 3; i < 31; ++i) {
+    unsigned int value = 1U << i;
+    EXPECT_EQ(i, Log2Ceiling(value));
+    EXPECT_EQ(i + 1, Log2Ceiling(value + 1));
+    EXPECT_EQ(i + 1, Log2Ceiling(value + 2));
+    EXPECT_EQ(i, Log2Ceiling(value - 1));
+    EXPECT_EQ(i, Log2Ceiling(value - 2));
+  }
+  EXPECT_EQ(32, Log2Ceiling(0xffffffffU));
+}
+
+TEST(BitsTest, Align) {
+  const size_t kSizeTMax = std::numeric_limits<size_t>::max();
+  EXPECT_EQ(0ul, Align(0, 4));
+  EXPECT_EQ(4ul, Align(1, 4));
+  EXPECT_EQ(4096ul, Align(1, 4096));
+  EXPECT_EQ(4096ul, Align(4096, 4096));
+  EXPECT_EQ(4096ul, Align(4095, 4096));
+  EXPECT_EQ(8192ul, Align(4097, 4096));
+  EXPECT_EQ(kSizeTMax - 31, Align(kSizeTMax - 62, 32));
+  EXPECT_EQ(kSizeTMax / 2 + 1, Align(1, kSizeTMax / 2 + 1));
+}
+
+TEST(BitsTest, CountLeadingZeroBits8) {
+  EXPECT_EQ(8u, CountLeadingZeroBits(uint8_t{0}));
+  EXPECT_EQ(7u, CountLeadingZeroBits(uint8_t{1}));
+  for (uint8_t shift = 0; shift <= 7; shift++) {
+    EXPECT_EQ(7u - shift,
+              CountLeadingZeroBits(static_cast<uint8_t>(1 << shift)));
+  }
+  EXPECT_EQ(4u, CountLeadingZeroBits(uint8_t{0x0f}));
+}
+
+TEST(BitsTest, CountLeadingZeroBits16) {
+  EXPECT_EQ(16u, CountLeadingZeroBits(uint16_t{0}));
+  EXPECT_EQ(15u, CountLeadingZeroBits(uint16_t{1}));
+  for (uint16_t shift = 0; shift <= 15; shift++) {
+    EXPECT_EQ(15u - shift,
+              CountLeadingZeroBits(static_cast<uint16_t>(1 << shift)));
+  }
+  EXPECT_EQ(4u, CountLeadingZeroBits(uint16_t{0x0f0f}));
+}
+
+TEST(BitsTest, CountLeadingZeroBits32) {
+  EXPECT_EQ(32u, CountLeadingZeroBits(uint32_t{0}));
+  EXPECT_EQ(31u, CountLeadingZeroBits(uint32_t{1}));
+  for (uint32_t shift = 0; shift <= 31; shift++) {
+    EXPECT_EQ(31u - shift, CountLeadingZeroBits(uint32_t{1} << shift));
+  }
+  EXPECT_EQ(4u, CountLeadingZeroBits(uint32_t{0x0f0f0f0f}));
+}
+
+TEST(BitsTest, CountTrailingeZeroBits8) {
+  EXPECT_EQ(8u, CountTrailingZeroBits(uint8_t{0}));
+  EXPECT_EQ(7u, CountTrailingZeroBits(uint8_t{128}));
+  for (uint8_t shift = 0; shift <= 7; shift++) {
+    EXPECT_EQ(shift, CountTrailingZeroBits(static_cast<uint8_t>(1 << shift)));
+  }
+  EXPECT_EQ(4u, CountTrailingZeroBits(uint8_t{0xf0}));
+}
+
+TEST(BitsTest, CountTrailingeZeroBits16) {
+  EXPECT_EQ(16u, CountTrailingZeroBits(uint16_t{0}));
+  EXPECT_EQ(15u, CountTrailingZeroBits(uint16_t{32768}));
+  for (uint16_t shift = 0; shift <= 15; shift++) {
+    EXPECT_EQ(shift, CountTrailingZeroBits(static_cast<uint16_t>(1 << shift)));
+  }
+  EXPECT_EQ(4u, CountTrailingZeroBits(uint16_t{0xf0f0}));
+}
+
+TEST(BitsTest, CountTrailingeZeroBits32) {
+  EXPECT_EQ(32u, CountTrailingZeroBits(uint32_t{0}));
+  EXPECT_EQ(31u, CountTrailingZeroBits(uint32_t{1} << 31));
+  for (uint32_t shift = 0; shift <= 31; shift++) {
+    EXPECT_EQ(shift, CountTrailingZeroBits(uint32_t{1} << shift));
+  }
+  EXPECT_EQ(4u, CountTrailingZeroBits(uint32_t{0xf0f0f0f0}));
+}
+
+#if defined(ARCH_CPU_64_BITS)
+
+TEST(BitsTest, CountLeadingZeroBits64) {
+  EXPECT_EQ(64u, CountLeadingZeroBits(uint64_t{0}));
+  EXPECT_EQ(63u, CountLeadingZeroBits(uint64_t{1}));
+  for (uint64_t shift = 0; shift <= 63; shift++) {
+    EXPECT_EQ(63u - shift, CountLeadingZeroBits(uint64_t{1} << shift));
+  }
+  EXPECT_EQ(4u, CountLeadingZeroBits(uint64_t{0x0f0f0f0f0f0f0f0f}));
+}
+
+TEST(BitsTest, CountTrailingeZeroBits64) {
+  EXPECT_EQ(64u, CountTrailingZeroBits(uint64_t{0}));
+  EXPECT_EQ(63u, CountTrailingZeroBits(uint64_t{1} << 63));
+  for (uint64_t shift = 0; shift <= 31; shift++) {
+    EXPECT_EQ(shift, CountTrailingZeroBits(uint64_t{1} << shift));
+  }
+  EXPECT_EQ(4u, CountTrailingZeroBits(uint64_t{0xf0f0f0f0f0f0f0f0}));
+}
+
+#endif  // ARCH_CPU_64_BITS
+
+TEST(BitsTest, CountLeadingZeroBitsSizeT) {
+#if defined(ARCH_CPU_64_BITS)
+  EXPECT_EQ(64u, CountLeadingZeroBitsSizeT(size_t{0}));
+  EXPECT_EQ(63u, CountLeadingZeroBitsSizeT(size_t{1}));
+  EXPECT_EQ(32u, CountLeadingZeroBitsSizeT(size_t{1} << 31));
+  EXPECT_EQ(1u, CountLeadingZeroBitsSizeT(size_t{1} << 62));
+  EXPECT_EQ(0u, CountLeadingZeroBitsSizeT(size_t{1} << 63));
+#else
+  EXPECT_EQ(32u, CountLeadingZeroBitsSizeT(size_t{0}));
+  EXPECT_EQ(31u, CountLeadingZeroBitsSizeT(size_t{1}));
+  EXPECT_EQ(1u, CountLeadingZeroBitsSizeT(size_t{1} << 30));
+  EXPECT_EQ(0u, CountLeadingZeroBitsSizeT(size_t{1} << 31));
+#endif  // ARCH_CPU_64_BITS
+}
+
+TEST(BitsTest, CountTrailingZeroBitsSizeT) {
+#if defined(ARCH_CPU_64_BITS)
+  EXPECT_EQ(64u, CountTrailingZeroBitsSizeT(size_t{0}));
+  EXPECT_EQ(63u, CountTrailingZeroBitsSizeT(size_t{1} << 63));
+  EXPECT_EQ(31u, CountTrailingZeroBitsSizeT(size_t{1} << 31));
+  EXPECT_EQ(1u, CountTrailingZeroBitsSizeT(size_t{2}));
+  EXPECT_EQ(0u, CountTrailingZeroBitsSizeT(size_t{1}));
+#else
+  EXPECT_EQ(32u, CountTrailingZeroBitsSizeT(size_t{0}));
+  EXPECT_EQ(31u, CountTrailingZeroBitsSizeT(size_t{1} << 31));
+  EXPECT_EQ(1u, CountTrailingZeroBitsSizeT(size_t{2}));
+  EXPECT_EQ(0u, CountTrailingZeroBitsSizeT(size_t{1}));
+#endif  // ARCH_CPU_64_BITS
+}
+
+TEST(BitsTest, PowerOfTwo) {
+  EXPECT_FALSE(IsPowerOfTwo(-1));
+  EXPECT_FALSE(IsPowerOfTwo(0));
+  EXPECT_TRUE(IsPowerOfTwo(1));
+  EXPECT_TRUE(IsPowerOfTwo(2));
+  // Unsigned 64 bit cases.
+  for (uint32_t i = 2; i < 64; i++) {
+    const uint64_t val = uint64_t{1} << i;
+    EXPECT_FALSE(IsPowerOfTwo(val - 1));
+    EXPECT_TRUE(IsPowerOfTwo(val));
+    EXPECT_FALSE(IsPowerOfTwo(val + 1));
+  }
+  // Signed 64 bit cases.
+  for (uint32_t i = 2; i < 63; i++) {
+    const int64_t val = int64_t{1} << i;
+    EXPECT_FALSE(IsPowerOfTwo(val - 1));
+    EXPECT_TRUE(IsPowerOfTwo(val));
+    EXPECT_FALSE(IsPowerOfTwo(val + 1));
+  }
+  // Signed integers with only the last bit set are negative, not powers of two.
+  EXPECT_FALSE(IsPowerOfTwo(int64_t{1} << 63));
+}
+
+}  // namespace bits
+}  // namespace base
diff --git a/base/build_time.cc b/base/build_time.cc
new file mode 100644
index 0000000..834b041
--- /dev/null
+++ b/base/build_time.cc
@@ -0,0 +1,25 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/build_time.h"
+
+// Imports the generated build date, i.e. BUILD_DATE.
+#include "base/generated_build_date.h"
+
+#include "base/logging.h"
+#include "base/time/time.h"
+
+namespace base {
+
+Time GetBuildTime() {
+  Time integral_build_time;
+  // BUILD_DATE is exactly "Mmm DD YYYY HH:MM:SS".
+  // See //build/write_build_date_header.py. "HH:MM:SS" is normally expected to
+  // be "05:00:00" but is not enforced here.
+  bool result = Time::FromUTCString(BUILD_DATE, &integral_build_time);
+  DCHECK(result);
+  return integral_build_time;
+}
+
+}  // namespace base
diff --git a/base/build_time.h b/base/build_time.h
new file mode 100644
index 0000000..83c9875
--- /dev/null
+++ b/base/build_time.h
@@ -0,0 +1,30 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_BUILD_TIME_H_
+#define BASE_BUILD_TIME_H_
+
+#include "base/base_export.h"
+#include "base/time/time.h"
+
+namespace base {
+
+// GetBuildTime returns the time at which the current binary was built,
+// rounded down to 5:00:00am at the start of the day in UTC.
+//
+// This uses a generated file, which doesn't trigger a rebuild when the time
+// changes. It will, however, be updated whenever //build/util/LASTCHANGE
+// changes.
+//
+// This value should only be considered accurate to within a day.
+// It will always be in the past.
+//
+// Note: If the build is not official (i.e. is_official_build = false)
+// this time will be set to 5:00:00am on the most recent first Sunday
+// of a month.
+Time BASE_EXPORT GetBuildTime();
+
+}  // namespace base
+
+#endif  // BASE_BUILD_TIME_H_
diff --git a/base/build_time_unittest.cc b/base/build_time_unittest.cc
new file mode 100644
index 0000000..3a35736
--- /dev/null
+++ b/base/build_time_unittest.cc
@@ -0,0 +1,37 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/build_time.h"
+#include "base/generated_build_date.h"
+#include "base/time/time.h"
+
+#include "testing/gtest/include/gtest/gtest.h"
+
+TEST(BuildTime, DateLooksValid) {
+  char build_date[] = BUILD_DATE;
+
+  EXPECT_EQ(20u, strlen(build_date));
+  EXPECT_EQ(' ', build_date[3]);
+  EXPECT_EQ(' ', build_date[6]);
+  EXPECT_EQ(' ', build_date[11]);
+  EXPECT_EQ('0', build_date[12]);
+  EXPECT_EQ('5', build_date[13]);
+  EXPECT_EQ(':', build_date[14]);
+  EXPECT_EQ('0', build_date[15]);
+  EXPECT_EQ('0', build_date[16]);
+  EXPECT_EQ(':', build_date[17]);
+  EXPECT_EQ('0', build_date[18]);
+  EXPECT_EQ('0', build_date[19]);
+}
+
+TEST(BuildTime, InThePast) {
+  EXPECT_LT(base::GetBuildTime(), base::Time::Now());
+  EXPECT_LT(base::GetBuildTime(), base::Time::NowFromSystemTime());
+}
+
+TEST(BuildTime, NotTooFar) {
+  // BuildTime must be less than 45 days old.
+  base::Time cutoff(base::Time::Now() - base::TimeDelta::FromDays(45));
+  EXPECT_GT(base::GetBuildTime(), cutoff);
+}
diff --git a/base/callback.h b/base/callback.h
new file mode 100644
index 0000000..00675be
--- /dev/null
+++ b/base/callback.h
@@ -0,0 +1,142 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// NOTE: Header files that do not require the full definition of Callback or
+// Closure should #include "base/callback_forward.h" instead of this file.
+
+#ifndef BASE_CALLBACK_H_
+#define BASE_CALLBACK_H_
+
+#include "base/callback_forward.h"
+#include "base/callback_internal.h"
+
+// -----------------------------------------------------------------------------
+// Usage documentation
+// -----------------------------------------------------------------------------
+//
+// Overview:
+// A callback is similar in concept to a function pointer: it wraps a runnable
+// object such as a function, method, lambda, or even another callback, allowing
+// the runnable object to be invoked later via the callback object.
+//
+// Unlike function pointers, callbacks are created with base::BindOnce() or
+// base::BindRepeating() and support partial function application.
+//
+// A base::OnceCallback may be Run() at most once; a base::RepeatingCallback may
+// be Run() any number of times. |is_null()| is guaranteed to return true for a
+// moved-from callback.
+//
+//   // The lambda takes two arguments, but the first argument |x| is bound at
+//   // callback creation.
+//   base::OnceCallback<int(int)> cb = base::BindOnce([] (int x, int y) {
+//     return x + y;
+//   }, 1);
+//   // Run() only needs the remaining unbound argument |y|.
+//   printf("1 + 2 = %d\n", std::move(cb).Run(2));  // Prints 3
+//   printf("cb is null? %s\n",
+//          cb.is_null() ? "true" : "false");  // Prints true
+//   std::move(cb).Run(2);  // Crashes since |cb| has already run.
+//
+// Callbacks also support cancellation. A common use is binding the receiver
+// object as a WeakPtr<T>. If that weak pointer is invalidated, calling Run()
+// will be a no-op. Note that |is_cancelled()| and |is_null()| are distinct:
+// simply cancelling a callback will not also make it null.
+//
+// base::Callback is currently a type alias for base::RepeatingCallback. In the
+// future, we expect to flip this to default to base::OnceCallback.
+//
+// See //docs/callback.md for the full documentation.
+
+namespace base {
+
+template <typename R, typename... Args>
+class OnceCallback<R(Args...)> : public internal::CallbackBase {
+ public:
+  using RunType = R(Args...);
+  using PolymorphicInvoke = R (*)(internal::BindStateBase*,
+                                  internal::PassingTraitsType<Args>...);
+
+  constexpr OnceCallback() = default;
+
+  explicit OnceCallback(internal::BindStateBase* bind_state)
+      : internal::CallbackBase(bind_state) {}
+
+  OnceCallback(const OnceCallback&) = delete;
+  OnceCallback& operator=(const OnceCallback&) = delete;
+
+  OnceCallback(OnceCallback&&) noexcept = default;
+  OnceCallback& operator=(OnceCallback&&) noexcept = default;
+
+  OnceCallback(RepeatingCallback<RunType> other)
+      : internal::CallbackBase(std::move(other)) {}
+
+  OnceCallback& operator=(RepeatingCallback<RunType> other) {
+    static_cast<internal::CallbackBase&>(*this) = std::move(other);
+    return *this;
+  }
+
+  bool Equals(const OnceCallback& other) const { return EqualsInternal(other); }
+
+  R Run(Args... args) const & {
+    static_assert(!sizeof(*this),
+                  "OnceCallback::Run() may only be invoked on a non-const "
+                  "rvalue, i.e. std::move(callback).Run().");
+    NOTREACHED();
+  }
+
+  R Run(Args... args) && {
+    // Move the callback instance into a local variable before the invocation,
+    // that ensures the internal state is cleared after the invocation.
+    // It's not safe to touch |this| after the invocation, since running the
+    // bound function may destroy |this|.
+    OnceCallback cb = std::move(*this);
+    PolymorphicInvoke f =
+        reinterpret_cast<PolymorphicInvoke>(cb.polymorphic_invoke());
+    return f(cb.bind_state_.get(), std::forward<Args>(args)...);
+  }
+};
+
+template <typename R, typename... Args>
+class RepeatingCallback<R(Args...)> : public internal::CallbackBaseCopyable {
+ public:
+  using RunType = R(Args...);
+  using PolymorphicInvoke = R (*)(internal::BindStateBase*,
+                                  internal::PassingTraitsType<Args>...);
+
+  constexpr RepeatingCallback() = default;
+
+  explicit RepeatingCallback(internal::BindStateBase* bind_state)
+      : internal::CallbackBaseCopyable(bind_state) {}
+
+  // Copyable and movable.
+  RepeatingCallback(const RepeatingCallback&) = default;
+  RepeatingCallback& operator=(const RepeatingCallback&) = default;
+  RepeatingCallback(RepeatingCallback&&) noexcept = default;
+  RepeatingCallback& operator=(RepeatingCallback&&) noexcept = default;
+
+  bool Equals(const RepeatingCallback& other) const {
+    return EqualsInternal(other);
+  }
+
+  R Run(Args... args) const & {
+    PolymorphicInvoke f =
+        reinterpret_cast<PolymorphicInvoke>(this->polymorphic_invoke());
+    return f(this->bind_state_.get(), std::forward<Args>(args)...);
+  }
+
+  R Run(Args... args) && {
+    // Move the callback instance into a local variable before the invocation,
+    // that ensures the internal state is cleared after the invocation.
+    // It's not safe to touch |this| after the invocation, since running the
+    // bound function may destroy |this|.
+    RepeatingCallback cb = std::move(*this);
+    PolymorphicInvoke f =
+        reinterpret_cast<PolymorphicInvoke>(cb.polymorphic_invoke());
+    return f(cb.bind_state_.get(), std::forward<Args>(args)...);
+  }
+};
+
+}  // namespace base
+
+#endif  // BASE_CALLBACK_H_
diff --git a/base/callback_forward.h b/base/callback_forward.h
new file mode 100644
index 0000000..f1851c4
--- /dev/null
+++ b/base/callback_forward.h
@@ -0,0 +1,27 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_CALLBACK_FORWARD_H_
+#define BASE_CALLBACK_FORWARD_H_
+
+namespace base {
+
+template <typename Signature>
+class OnceCallback;
+
+template <typename Signature>
+class RepeatingCallback;
+
+template <typename Signature>
+using Callback = RepeatingCallback<Signature>;
+
+// Syntactic sugar to make Callback<void()> easier to declare since it
+// will be used in a lot of APIs with delayed execution.
+using OnceClosure = OnceCallback<void()>;
+using RepeatingClosure = RepeatingCallback<void()>;
+using Closure = Callback<void()>;
+
+}  // namespace base
+
+#endif  // BASE_CALLBACK_FORWARD_H_
diff --git a/base/callback_helpers.cc b/base/callback_helpers.cc
new file mode 100644
index 0000000..9086731
--- /dev/null
+++ b/base/callback_helpers.cc
@@ -0,0 +1,40 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/callback_helpers.h"
+
+namespace base {
+
+ScopedClosureRunner::ScopedClosureRunner() = default;
+
+ScopedClosureRunner::ScopedClosureRunner(OnceClosure closure)
+    : closure_(std::move(closure)) {}
+
+ScopedClosureRunner::~ScopedClosureRunner() {
+  if (!closure_.is_null())
+    std::move(closure_).Run();
+}
+
+ScopedClosureRunner::ScopedClosureRunner(ScopedClosureRunner&& other)
+    : closure_(other.Release()) {}
+
+ScopedClosureRunner& ScopedClosureRunner::operator=(
+    ScopedClosureRunner&& other) {
+  ReplaceClosure(other.Release());
+  return *this;
+}
+
+void ScopedClosureRunner::RunAndReset() {
+  std::move(closure_).Run();
+}
+
+void ScopedClosureRunner::ReplaceClosure(OnceClosure closure) {
+  closure_ = std::move(closure);
+}
+
+OnceClosure ScopedClosureRunner::Release() {
+  return std::move(closure_);
+}
+
+}  // namespace base
diff --git a/base/callback_helpers.h b/base/callback_helpers.h
new file mode 100644
index 0000000..0cdda6d
--- /dev/null
+++ b/base/callback_helpers.h
@@ -0,0 +1,104 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This defines helpful methods for dealing with Callbacks.  Because Callbacks
+// are implemented using templates, with a class per callback signature, adding
+// methods to Callback<> itself is unattractive (lots of extra code gets
+// generated).  Instead, consider adding methods here.
+
+#ifndef BASE_CALLBACK_HELPERS_H_
+#define BASE_CALLBACK_HELPERS_H_
+
+#include <utility>
+
+#include "base/atomicops.h"
+#include "base/bind.h"
+#include "base/callback.h"
+#include "base/compiler_specific.h"
+#include "base/macros.h"
+#include "base/memory/ptr_util.h"
+
+namespace base {
+
+// Prefer std::move() over ResetAndReturn().
+template <typename CallbackType>
+CallbackType ResetAndReturn(CallbackType* cb) {
+  CallbackType ret(std::move(*cb));
+  DCHECK(!*cb);
+  return ret;
+}
+
+namespace internal {
+
+template <typename... Args>
+class AdaptCallbackForRepeatingHelper final {
+ public:
+  explicit AdaptCallbackForRepeatingHelper(OnceCallback<void(Args...)> callback)
+      : callback_(std::move(callback)) {
+    DCHECK(callback_);
+  }
+
+  void Run(Args... args) {
+    if (subtle::NoBarrier_AtomicExchange(&has_run_, 1))
+      return;
+    DCHECK(callback_);
+    std::move(callback_).Run(std::forward<Args>(args)...);
+  }
+
+ private:
+  volatile subtle::Atomic32 has_run_ = 0;
+  base::OnceCallback<void(Args...)> callback_;
+
+  DISALLOW_COPY_AND_ASSIGN(AdaptCallbackForRepeatingHelper);
+};
+
+}  // namespace internal
+
+// Wraps the given OnceCallback into a RepeatingCallback that relays its
+// invocation to the original OnceCallback on the first invocation. The
+// following invocations are just ignored.
+//
+// Note that this deliberately subverts the Once/Repeating paradigm of Callbacks
+// but helps ease the migration from old-style Callbacks. Avoid if possible; use
+// if necessary for migration. TODO(tzik): Remove it. https://crbug.com/730593
+template <typename... Args>
+RepeatingCallback<void(Args...)> AdaptCallbackForRepeating(
+    OnceCallback<void(Args...)> callback) {
+  using Helper = internal::AdaptCallbackForRepeatingHelper<Args...>;
+  return base::BindRepeating(&Helper::Run,
+                             std::make_unique<Helper>(std::move(callback)));
+}
+
+// ScopedClosureRunner is akin to std::unique_ptr<> for Closures. It ensures
+// that the Closure is executed no matter how the current scope exits.
+class BASE_EXPORT ScopedClosureRunner {
+ public:
+  ScopedClosureRunner();
+  explicit ScopedClosureRunner(OnceClosure closure);
+  ~ScopedClosureRunner();
+
+  ScopedClosureRunner(ScopedClosureRunner&& other);
+
+  // Releases the current closure if it's set and replaces it with the closure
+  // from |other|.
+  ScopedClosureRunner& operator=(ScopedClosureRunner&& other);
+
+  // Calls the current closure and resets it, so it wont be called again.
+  void RunAndReset();
+
+  // Replaces closure with the new one releasing the old one without calling it.
+  void ReplaceClosure(OnceClosure closure);
+
+  // Releases the Closure without calling.
+  OnceClosure Release() WARN_UNUSED_RESULT;
+
+ private:
+  OnceClosure closure_;
+
+  DISALLOW_COPY_AND_ASSIGN(ScopedClosureRunner);
+};
+
+}  // namespace base
+
+#endif  // BASE_CALLBACK_HELPERS_H_
diff --git a/base/callback_helpers_unittest.cc b/base/callback_helpers_unittest.cc
new file mode 100644
index 0000000..1c1102d
--- /dev/null
+++ b/base/callback_helpers_unittest.cc
@@ -0,0 +1,127 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/callback_helpers.h"
+
+#include "base/bind.h"
+#include "base/callback.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace {
+
+void Increment(int* value) {
+  (*value)++;
+}
+
+TEST(CallbackHelpersTest, TestResetAndReturn) {
+  int run_count = 0;
+
+  base::Closure cb = base::Bind(&Increment, &run_count);
+  EXPECT_EQ(0, run_count);
+  base::ResetAndReturn(&cb).Run();
+  EXPECT_EQ(1, run_count);
+  EXPECT_FALSE(cb);
+
+  run_count = 0;
+
+  base::OnceClosure cb2 = base::BindOnce(&Increment, &run_count);
+  EXPECT_EQ(0, run_count);
+  base::ResetAndReturn(&cb2).Run();
+  EXPECT_EQ(1, run_count);
+  EXPECT_FALSE(cb2);
+}
+
+TEST(CallbackHelpersTest, TestScopedClosureRunnerExitScope) {
+  int run_count = 0;
+  {
+    base::ScopedClosureRunner runner(base::Bind(&Increment, &run_count));
+    EXPECT_EQ(0, run_count);
+  }
+  EXPECT_EQ(1, run_count);
+}
+
+TEST(CallbackHelpersTest, TestScopedClosureRunnerRelease) {
+  int run_count = 0;
+  base::OnceClosure c;
+  {
+    base::ScopedClosureRunner runner(base::Bind(&Increment, &run_count));
+    c = runner.Release();
+    EXPECT_EQ(0, run_count);
+  }
+  EXPECT_EQ(0, run_count);
+  std::move(c).Run();
+  EXPECT_EQ(1, run_count);
+}
+
+TEST(CallbackHelpersTest, TestScopedClosureRunnerReplaceClosure) {
+  int run_count_1 = 0;
+  int run_count_2 = 0;
+  {
+    base::ScopedClosureRunner runner;
+    runner.ReplaceClosure(base::Bind(&Increment, &run_count_1));
+    runner.ReplaceClosure(base::Bind(&Increment, &run_count_2));
+    EXPECT_EQ(0, run_count_1);
+    EXPECT_EQ(0, run_count_2);
+  }
+  EXPECT_EQ(0, run_count_1);
+  EXPECT_EQ(1, run_count_2);
+}
+
+TEST(CallbackHelpersTest, TestScopedClosureRunnerRunAndReset) {
+  int run_count_3 = 0;
+  {
+    base::ScopedClosureRunner runner(base::Bind(&Increment, &run_count_3));
+    EXPECT_EQ(0, run_count_3);
+    runner.RunAndReset();
+    EXPECT_EQ(1, run_count_3);
+  }
+  EXPECT_EQ(1, run_count_3);
+}
+
+TEST(CallbackHelpersTest, TestScopedClosureRunnerMoveConstructor) {
+  int run_count = 0;
+  {
+    std::unique_ptr<base::ScopedClosureRunner> runner(
+        new base::ScopedClosureRunner(base::Bind(&Increment, &run_count)));
+    base::ScopedClosureRunner runner2(std::move(*runner));
+    runner.reset();
+    EXPECT_EQ(0, run_count);
+  }
+  EXPECT_EQ(1, run_count);
+}
+
+TEST(CallbackHelpersTest, TestScopedClosureRunnerMoveAssignment) {
+  int run_count_1 = 0;
+  int run_count_2 = 0;
+  {
+    base::ScopedClosureRunner runner(base::Bind(&Increment, &run_count_1));
+    {
+      base::ScopedClosureRunner runner2(base::Bind(&Increment, &run_count_2));
+      runner = std::move(runner2);
+      EXPECT_EQ(0, run_count_1);
+      EXPECT_EQ(0, run_count_2);
+    }
+    EXPECT_EQ(0, run_count_1);
+    EXPECT_EQ(0, run_count_2);
+  }
+  EXPECT_EQ(0, run_count_1);
+  EXPECT_EQ(1, run_count_2);
+}
+
+TEST(CallbackHelpersTest, TestAdaptCallbackForRepeating) {
+  int count = 0;
+  base::OnceCallback<void(int*)> cb =
+      base::BindOnce([](int* count) { ++*count; });
+
+  base::RepeatingCallback<void(int*)> wrapped =
+      base::AdaptCallbackForRepeating(std::move(cb));
+
+  EXPECT_EQ(0, count);
+  wrapped.Run(&count);
+  EXPECT_EQ(1, count);
+  wrapped.Run(&count);
+  EXPECT_EQ(1, count);
+}
+
+}  // namespace
diff --git a/base/callback_internal.cc b/base/callback_internal.cc
new file mode 100644
index 0000000..6cef841
--- /dev/null
+++ b/base/callback_internal.cc
@@ -0,0 +1,94 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/callback_internal.h"
+
+#include "base/logging.h"
+
+namespace base {
+namespace internal {
+
+namespace {
+
+bool ReturnFalse(const BindStateBase*) {
+  return false;
+}
+
+}  // namespace
+
+void BindStateBaseRefCountTraits::Destruct(const BindStateBase* bind_state) {
+  bind_state->destructor_(bind_state);
+}
+
+BindStateBase::BindStateBase(InvokeFuncStorage polymorphic_invoke,
+                             void (*destructor)(const BindStateBase*))
+    : BindStateBase(polymorphic_invoke, destructor, &ReturnFalse) {
+}
+
+BindStateBase::BindStateBase(InvokeFuncStorage polymorphic_invoke,
+                             void (*destructor)(const BindStateBase*),
+                             bool (*is_cancelled)(const BindStateBase*))
+    : polymorphic_invoke_(polymorphic_invoke),
+      destructor_(destructor),
+      is_cancelled_(is_cancelled) {}
+
+CallbackBase::CallbackBase(CallbackBase&& c) noexcept = default;
+CallbackBase& CallbackBase::operator=(CallbackBase&& c) noexcept = default;
+CallbackBase::CallbackBase(const CallbackBaseCopyable& c)
+    : bind_state_(c.bind_state_) {}
+
+CallbackBase& CallbackBase::operator=(const CallbackBaseCopyable& c) {
+  bind_state_ = c.bind_state_;
+  return *this;
+}
+
+CallbackBase::CallbackBase(CallbackBaseCopyable&& c) noexcept
+    : bind_state_(std::move(c.bind_state_)) {}
+
+CallbackBase& CallbackBase::operator=(CallbackBaseCopyable&& c) noexcept {
+  bind_state_ = std::move(c.bind_state_);
+  return *this;
+}
+
+void CallbackBase::Reset() {
+  // NULL the bind_state_ last, since it may be holding the last ref to whatever
+  // object owns us, and we may be deleted after that.
+  bind_state_ = nullptr;
+}
+
+bool CallbackBase::IsCancelled() const {
+  DCHECK(bind_state_);
+  return bind_state_->IsCancelled();
+}
+
+bool CallbackBase::EqualsInternal(const CallbackBase& other) const {
+  return bind_state_ == other.bind_state_;
+}
+
+CallbackBase::CallbackBase(BindStateBase* bind_state)
+    : bind_state_(bind_state ? AdoptRef(bind_state) : nullptr) {
+  DCHECK(!bind_state_.get() || bind_state_->HasOneRef());
+}
+
+CallbackBase::~CallbackBase() = default;
+
+CallbackBaseCopyable::CallbackBaseCopyable(const CallbackBaseCopyable& c)
+    : CallbackBase(nullptr) {
+  bind_state_ = c.bind_state_;
+}
+
+CallbackBaseCopyable::CallbackBaseCopyable(CallbackBaseCopyable&& c) noexcept =
+    default;
+
+CallbackBaseCopyable& CallbackBaseCopyable::operator=(
+    const CallbackBaseCopyable& c) {
+  bind_state_ = c.bind_state_;
+  return *this;
+}
+
+CallbackBaseCopyable& CallbackBaseCopyable::operator=(
+    CallbackBaseCopyable&& c) noexcept = default;
+
+}  // namespace internal
+}  // namespace base
diff --git a/base/callback_internal.h b/base/callback_internal.h
new file mode 100644
index 0000000..bfa5a6a
--- /dev/null
+++ b/base/callback_internal.h
@@ -0,0 +1,175 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file contains utility functions and classes that help the
+// implementation, and management of the Callback objects.
+
+#ifndef BASE_CALLBACK_INTERNAL_H_
+#define BASE_CALLBACK_INTERNAL_H_
+
+#include "base/base_export.h"
+#include "base/callback_forward.h"
+#include "base/macros.h"
+#include "base/memory/ref_counted.h"
+
+namespace base {
+
+struct FakeBindState;
+
+namespace internal {
+
+class CallbackBase;
+class CallbackBaseCopyable;
+
+class BindStateBase;
+
+template <typename Functor, typename... BoundArgs>
+struct BindState;
+
+struct BindStateBaseRefCountTraits {
+  static void Destruct(const BindStateBase*);
+};
+
+template <typename T, bool IsScalar = std::is_scalar<T>::value>
+struct PassingTraits;
+
+template <typename T>
+struct PassingTraits<T, false> {
+  using Type = T&&;
+};
+
+template <typename T>
+struct PassingTraits<T, true> {
+  using Type = T;
+};
+
+template <typename T>
+using PassingTraitsType = typename PassingTraits<T>::Type;
+
+// BindStateBase is used to provide an opaque handle that the Callback
+// class can use to represent a function object with bound arguments.  It
+// behaves as an existential type that is used by a corresponding
+// DoInvoke function to perform the function execution.  This allows
+// us to shield the Callback class from the types of the bound argument via
+// "type erasure."
+// At the base level, the only task is to add reference counting data. Don't use
+// RefCountedThreadSafe since it requires the destructor to be a virtual method.
+// Creating a vtable for every BindState template instantiation results in a lot
+// of bloat. Its only task is to call the destructor which can be done with a
+// function pointer.
+class BASE_EXPORT BindStateBase
+    : public RefCountedThreadSafe<BindStateBase, BindStateBaseRefCountTraits> {
+ public:
+  REQUIRE_ADOPTION_FOR_REFCOUNTED_TYPE();
+
+  using InvokeFuncStorage = void(*)();
+
+ private:
+  BindStateBase(InvokeFuncStorage polymorphic_invoke,
+                void (*destructor)(const BindStateBase*));
+  BindStateBase(InvokeFuncStorage polymorphic_invoke,
+                void (*destructor)(const BindStateBase*),
+                bool (*is_cancelled)(const BindStateBase*));
+
+  ~BindStateBase() = default;
+
+  friend struct BindStateBaseRefCountTraits;
+  friend class RefCountedThreadSafe<BindStateBase, BindStateBaseRefCountTraits>;
+
+  friend class CallbackBase;
+  friend class CallbackBaseCopyable;
+
+  // Whitelist subclasses that access the destructor of BindStateBase.
+  template <typename Functor, typename... BoundArgs>
+  friend struct BindState;
+  friend struct ::base::FakeBindState;
+
+  bool IsCancelled() const {
+    return is_cancelled_(this);
+  }
+
+  // In C++, it is safe to cast function pointers to function pointers of
+  // another type. It is not okay to use void*. We create a InvokeFuncStorage
+  // that that can store our function pointer, and then cast it back to
+  // the original type on usage.
+  InvokeFuncStorage polymorphic_invoke_;
+
+  // Pointer to a function that will properly destroy |this|.
+  void (*destructor_)(const BindStateBase*);
+  bool (*is_cancelled_)(const BindStateBase*);
+
+  DISALLOW_COPY_AND_ASSIGN(BindStateBase);
+};
+
+// Holds the Callback methods that don't require specialization to reduce
+// template bloat.
+// CallbackBase<MoveOnly> is a direct base class of MoveOnly callbacks, and
+// CallbackBase<Copyable> uses CallbackBase<MoveOnly> for its implementation.
+class BASE_EXPORT CallbackBase {
+ public:
+  CallbackBase(CallbackBase&& c) noexcept;
+  CallbackBase& operator=(CallbackBase&& c) noexcept;
+
+  explicit CallbackBase(const CallbackBaseCopyable& c);
+  CallbackBase& operator=(const CallbackBaseCopyable& c);
+
+  explicit CallbackBase(CallbackBaseCopyable&& c) noexcept;
+  CallbackBase& operator=(CallbackBaseCopyable&& c) noexcept;
+
+  // Returns true if Callback is null (doesn't refer to anything).
+  bool is_null() const { return !bind_state_; }
+  explicit operator bool() const { return !is_null(); }
+
+  // Returns true if the callback invocation will be nop due to an cancellation.
+  // It's invalid to call this on uninitialized callback.
+  bool IsCancelled() const;
+
+  // Returns the Callback into an uninitialized state.
+  void Reset();
+
+ protected:
+  using InvokeFuncStorage = BindStateBase::InvokeFuncStorage;
+
+  // Returns true if this callback equals |other|. |other| may be null.
+  bool EqualsInternal(const CallbackBase& other) const;
+
+  constexpr inline CallbackBase();
+
+  // Allow initializing of |bind_state_| via the constructor to avoid default
+  // initialization of the scoped_refptr.
+  explicit CallbackBase(BindStateBase* bind_state);
+
+  InvokeFuncStorage polymorphic_invoke() const {
+    return bind_state_->polymorphic_invoke_;
+  }
+
+  // Force the destructor to be instantiated inside this translation unit so
+  // that our subclasses will not get inlined versions.  Avoids more template
+  // bloat.
+  ~CallbackBase();
+
+  scoped_refptr<BindStateBase> bind_state_;
+};
+
+constexpr CallbackBase::CallbackBase() = default;
+
+// CallbackBase<Copyable> is a direct base class of Copyable Callbacks.
+class BASE_EXPORT CallbackBaseCopyable : public CallbackBase {
+ public:
+  CallbackBaseCopyable(const CallbackBaseCopyable& c);
+  CallbackBaseCopyable(CallbackBaseCopyable&& c) noexcept;
+  CallbackBaseCopyable& operator=(const CallbackBaseCopyable& c);
+  CallbackBaseCopyable& operator=(CallbackBaseCopyable&& c) noexcept;
+
+ protected:
+  constexpr CallbackBaseCopyable() = default;
+  explicit CallbackBaseCopyable(BindStateBase* bind_state)
+      : CallbackBase(bind_state) {}
+  ~CallbackBaseCopyable() = default;
+};
+
+}  // namespace internal
+}  // namespace base
+
+#endif  // BASE_CALLBACK_INTERNAL_H_
diff --git a/base/callback_list.h b/base/callback_list.h
new file mode 100644
index 0000000..f455c65
--- /dev/null
+++ b/base/callback_list.h
@@ -0,0 +1,226 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_CALLBACK_LIST_H_
+#define BASE_CALLBACK_LIST_H_
+
+#include <list>
+#include <memory>
+
+#include "base/callback.h"
+#include "base/compiler_specific.h"
+#include "base/logging.h"
+#include "base/macros.h"
+
+// OVERVIEW:
+//
+// A container for a list of (repeating) callbacks. Unlike a normal vector or
+// list, this container can be modified during iteration without invalidating
+// the iterator. It safely handles the case of a callback removing itself or
+// another callback from the list while callbacks are being run.
+//
+// TYPICAL USAGE:
+//
+// class MyWidget {
+//  public:
+//   ...
+//
+//   std::unique_ptr<base::CallbackList<void(const Foo&)>::Subscription>
+//   RegisterCallback(const base::RepeatingCallback<void(const Foo&)>& cb) {
+//     return callback_list_.Add(cb);
+//   }
+//
+//  private:
+//   void NotifyFoo(const Foo& foo) {
+//      callback_list_.Notify(foo);
+//   }
+//
+//   base::CallbackList<void(const Foo&)> callback_list_;
+//
+//   DISALLOW_COPY_AND_ASSIGN(MyWidget);
+// };
+//
+//
+// class MyWidgetListener {
+//  public:
+//   MyWidgetListener::MyWidgetListener() {
+//     foo_subscription_ = MyWidget::GetCurrent()->RegisterCallback(
+//             base::BindRepeating(&MyWidgetListener::OnFoo, this)));
+//   }
+//
+//   MyWidgetListener::~MyWidgetListener() {
+//      // Subscription gets deleted automatically and will deregister
+//      // the callback in the process.
+//   }
+//
+//  private:
+//   void OnFoo(const Foo& foo) {
+//     // Do something.
+//   }
+//
+//   std::unique_ptr<base::CallbackList<void(const Foo&)>::Subscription>
+//       foo_subscription_;
+//
+//   DISALLOW_COPY_AND_ASSIGN(MyWidgetListener);
+// };
+
+namespace base {
+
+namespace internal {
+
+template <typename CallbackType>
+class CallbackListBase {
+ public:
+  class Subscription {
+   public:
+    Subscription(CallbackListBase<CallbackType>* list,
+                 typename std::list<CallbackType>::iterator iter)
+        : list_(list),
+          iter_(iter) {
+    }
+
+    ~Subscription() {
+      if (list_->active_iterator_count_) {
+        iter_->Reset();
+      } else {
+        list_->callbacks_.erase(iter_);
+        if (!list_->removal_callback_.is_null())
+          list_->removal_callback_.Run();
+      }
+    }
+
+   private:
+    CallbackListBase<CallbackType>* list_;
+    typename std::list<CallbackType>::iterator iter_;
+
+    DISALLOW_COPY_AND_ASSIGN(Subscription);
+  };
+
+  // Add a callback to the list. The callback will remain registered until the
+  // returned Subscription is destroyed, which must occur before the
+  // CallbackList is destroyed.
+  std::unique_ptr<Subscription> Add(const CallbackType& cb) WARN_UNUSED_RESULT {
+    DCHECK(!cb.is_null());
+    return std::make_unique<Subscription>(
+        this, callbacks_.insert(callbacks_.end(), cb));
+  }
+
+  // Sets a callback which will be run when a subscription list is changed.
+  void set_removal_callback(const RepeatingClosure& callback) {
+    removal_callback_ = callback;
+  }
+
+  // Returns true if there are no subscriptions. This is only valid to call when
+  // not looping through the list.
+  bool empty() {
+    DCHECK_EQ(0, active_iterator_count_);
+    return callbacks_.empty();
+  }
+
+ protected:
+  // An iterator class that can be used to access the list of callbacks.
+  class Iterator {
+   public:
+    explicit Iterator(CallbackListBase<CallbackType>* list)
+        : list_(list),
+          list_iter_(list_->callbacks_.begin()) {
+      ++list_->active_iterator_count_;
+    }
+
+    Iterator(const Iterator& iter)
+        : list_(iter.list_),
+          list_iter_(iter.list_iter_) {
+      ++list_->active_iterator_count_;
+    }
+
+    ~Iterator() {
+      if (list_ && --list_->active_iterator_count_ == 0) {
+        list_->Compact();
+      }
+    }
+
+    CallbackType* GetNext() {
+      while ((list_iter_ != list_->callbacks_.end()) && list_iter_->is_null())
+        ++list_iter_;
+
+      CallbackType* cb = nullptr;
+      if (list_iter_ != list_->callbacks_.end()) {
+        cb = &(*list_iter_);
+        ++list_iter_;
+      }
+      return cb;
+    }
+
+   private:
+    CallbackListBase<CallbackType>* list_;
+    typename std::list<CallbackType>::iterator list_iter_;
+  };
+
+  CallbackListBase() : active_iterator_count_(0) {}
+
+  ~CallbackListBase() {
+    DCHECK_EQ(0, active_iterator_count_);
+    DCHECK_EQ(0U, callbacks_.size());
+  }
+
+  // Returns an instance of a CallbackListBase::Iterator which can be used
+  // to run callbacks.
+  Iterator GetIterator() {
+    return Iterator(this);
+  }
+
+  // Compact the list: remove any entries which were nulled out during
+  // iteration.
+  void Compact() {
+    auto it = callbacks_.begin();
+    bool updated = false;
+    while (it != callbacks_.end()) {
+      if ((*it).is_null()) {
+        updated = true;
+        it = callbacks_.erase(it);
+      } else {
+        ++it;
+      }
+    }
+
+    if (updated && !removal_callback_.is_null())
+      removal_callback_.Run();
+  }
+
+ private:
+  std::list<CallbackType> callbacks_;
+  int active_iterator_count_;
+  RepeatingClosure removal_callback_;
+
+  DISALLOW_COPY_AND_ASSIGN(CallbackListBase);
+};
+
+}  // namespace internal
+
+template <typename Sig> class CallbackList;
+
+template <typename... Args>
+class CallbackList<void(Args...)>
+    : public internal::CallbackListBase<RepeatingCallback<void(Args...)>> {
+ public:
+  using CallbackType = RepeatingCallback<void(Args...)>;
+
+  CallbackList() = default;
+
+  template <typename... RunArgs>
+  void Notify(RunArgs&&... args) {
+    auto it = this->GetIterator();
+    CallbackType* cb;
+    while ((cb = it.GetNext()) != nullptr) {
+      cb->Run(args...);
+    }
+  }
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(CallbackList);
+};
+
+}  // namespace base
+
+#endif  // BASE_CALLBACK_LIST_H_
diff --git a/base/callback_list_unittest.cc b/base/callback_list_unittest.cc
new file mode 100644
index 0000000..6eb5ff7
--- /dev/null
+++ b/base/callback_list_unittest.cc
@@ -0,0 +1,339 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/callback_list.h"
+
+#include <memory>
+#include <utility>
+
+#include "base/bind.h"
+#include "base/bind_helpers.h"
+#include "base/macros.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+namespace {
+
+class Listener {
+ public:
+  Listener() : total_(0), scaler_(1) {}
+  explicit Listener(int scaler) : total_(0), scaler_(scaler) {}
+  void IncrementTotal() { total_++; }
+  void IncrementByMultipleOfScaler(int x) { total_ += x * scaler_; }
+
+  int total() const { return total_; }
+
+ private:
+  int total_;
+  int scaler_;
+  DISALLOW_COPY_AND_ASSIGN(Listener);
+};
+
+class Remover {
+ public:
+  Remover() : total_(0) {}
+  void IncrementTotalAndRemove() {
+    total_++;
+    removal_subscription_.reset();
+  }
+  void SetSubscriptionToRemove(
+      std::unique_ptr<CallbackList<void(void)>::Subscription> sub) {
+    removal_subscription_ = std::move(sub);
+  }
+
+  int total() const { return total_; }
+
+ private:
+  int total_;
+  std::unique_ptr<CallbackList<void(void)>::Subscription> removal_subscription_;
+  DISALLOW_COPY_AND_ASSIGN(Remover);
+};
+
+class Adder {
+ public:
+  explicit Adder(CallbackList<void(void)>* cb_reg)
+      : added_(false),
+        total_(0),
+        cb_reg_(cb_reg) {
+  }
+  void AddCallback() {
+    if (!added_) {
+      added_ = true;
+      subscription_ =
+          cb_reg_->Add(Bind(&Adder::IncrementTotal, Unretained(this)));
+    }
+  }
+  void IncrementTotal() { total_++; }
+
+  bool added() const { return added_; }
+
+  int total() const { return total_; }
+
+ private:
+  bool added_;
+  int total_;
+  CallbackList<void(void)>* cb_reg_;
+  std::unique_ptr<CallbackList<void(void)>::Subscription> subscription_;
+  DISALLOW_COPY_AND_ASSIGN(Adder);
+};
+
+class Summer {
+ public:
+  Summer() : value_(0) {}
+
+  void AddOneParam(int a) { value_ = a; }
+  void AddTwoParam(int a, int b) { value_ = a + b; }
+  void AddThreeParam(int a, int b, int c) { value_ = a + b + c; }
+  void AddFourParam(int a, int b, int c, int d) { value_ = a + b + c + d; }
+  void AddFiveParam(int a, int b, int c, int d, int e) {
+    value_ = a + b + c + d + e;
+  }
+  void AddSixParam(int a, int b, int c, int d, int e , int f) {
+    value_ = a + b + c + d + e + f;
+  }
+
+  int value() const { return value_; }
+
+ private:
+  int value_;
+  DISALLOW_COPY_AND_ASSIGN(Summer);
+};
+
+class Counter {
+ public:
+  Counter() : value_(0) {}
+
+  void Increment() { value_++; }
+
+  int value() const { return value_; }
+
+ private:
+  int value_;
+  DISALLOW_COPY_AND_ASSIGN(Counter);
+};
+
+// Sanity check that we can instantiate a CallbackList for each arity.
+TEST(CallbackListTest, ArityTest) {
+  Summer s;
+
+  CallbackList<void(int)> c1;
+  std::unique_ptr<CallbackList<void(int)>::Subscription> subscription1 =
+      c1.Add(Bind(&Summer::AddOneParam, Unretained(&s)));
+
+  c1.Notify(1);
+  EXPECT_EQ(1, s.value());
+
+  CallbackList<void(int, int)> c2;
+  std::unique_ptr<CallbackList<void(int, int)>::Subscription> subscription2 =
+      c2.Add(Bind(&Summer::AddTwoParam, Unretained(&s)));
+
+  c2.Notify(1, 2);
+  EXPECT_EQ(3, s.value());
+
+  CallbackList<void(int, int, int)> c3;
+  std::unique_ptr<CallbackList<void(int, int, int)>::Subscription>
+      subscription3 = c3.Add(Bind(&Summer::AddThreeParam, Unretained(&s)));
+
+  c3.Notify(1, 2, 3);
+  EXPECT_EQ(6, s.value());
+
+  CallbackList<void(int, int, int, int)> c4;
+  std::unique_ptr<CallbackList<void(int, int, int, int)>::Subscription>
+      subscription4 = c4.Add(Bind(&Summer::AddFourParam, Unretained(&s)));
+
+  c4.Notify(1, 2, 3, 4);
+  EXPECT_EQ(10, s.value());
+
+  CallbackList<void(int, int, int, int, int)> c5;
+  std::unique_ptr<CallbackList<void(int, int, int, int, int)>::Subscription>
+      subscription5 = c5.Add(Bind(&Summer::AddFiveParam, Unretained(&s)));
+
+  c5.Notify(1, 2, 3, 4, 5);
+  EXPECT_EQ(15, s.value());
+
+  CallbackList<void(int, int, int, int, int, int)> c6;
+  std::unique_ptr<
+      CallbackList<void(int, int, int, int, int, int)>::Subscription>
+      subscription6 = c6.Add(Bind(&Summer::AddSixParam, Unretained(&s)));
+
+  c6.Notify(1, 2, 3, 4, 5, 6);
+  EXPECT_EQ(21, s.value());
+}
+
+// Sanity check that closures added to the list will be run, and those removed
+// from the list will not be run.
+TEST(CallbackListTest, BasicTest) {
+  CallbackList<void(void)> cb_reg;
+  Listener a, b, c;
+
+  std::unique_ptr<CallbackList<void(void)>::Subscription> a_subscription =
+      cb_reg.Add(Bind(&Listener::IncrementTotal, Unretained(&a)));
+  std::unique_ptr<CallbackList<void(void)>::Subscription> b_subscription =
+      cb_reg.Add(Bind(&Listener::IncrementTotal, Unretained(&b)));
+
+  EXPECT_TRUE(a_subscription.get());
+  EXPECT_TRUE(b_subscription.get());
+
+  cb_reg.Notify();
+
+  EXPECT_EQ(1, a.total());
+  EXPECT_EQ(1, b.total());
+
+  b_subscription.reset();
+
+  std::unique_ptr<CallbackList<void(void)>::Subscription> c_subscription =
+      cb_reg.Add(Bind(&Listener::IncrementTotal, Unretained(&c)));
+
+  cb_reg.Notify();
+
+  EXPECT_EQ(2, a.total());
+  EXPECT_EQ(1, b.total());
+  EXPECT_EQ(1, c.total());
+
+  a_subscription.reset();
+  b_subscription.reset();
+  c_subscription.reset();
+}
+
+// Sanity check that callbacks with details added to the list will be run, with
+// the correct details, and those removed from the list will not be run.
+TEST(CallbackListTest, BasicTestWithParams) {
+  CallbackList<void(int)> cb_reg;
+  Listener a(1), b(-1), c(1);
+
+  std::unique_ptr<CallbackList<void(int)>::Subscription> a_subscription =
+      cb_reg.Add(Bind(&Listener::IncrementByMultipleOfScaler, Unretained(&a)));
+  std::unique_ptr<CallbackList<void(int)>::Subscription> b_subscription =
+      cb_reg.Add(Bind(&Listener::IncrementByMultipleOfScaler, Unretained(&b)));
+
+  EXPECT_TRUE(a_subscription.get());
+  EXPECT_TRUE(b_subscription.get());
+
+  cb_reg.Notify(10);
+
+  EXPECT_EQ(10, a.total());
+  EXPECT_EQ(-10, b.total());
+
+  b_subscription.reset();
+
+  std::unique_ptr<CallbackList<void(int)>::Subscription> c_subscription =
+      cb_reg.Add(Bind(&Listener::IncrementByMultipleOfScaler, Unretained(&c)));
+
+  cb_reg.Notify(10);
+
+  EXPECT_EQ(20, a.total());
+  EXPECT_EQ(-10, b.total());
+  EXPECT_EQ(10, c.total());
+
+  a_subscription.reset();
+  b_subscription.reset();
+  c_subscription.reset();
+}
+
+// Test the a callback can remove itself or a different callback from the list
+// during iteration without invalidating the iterator.
+TEST(CallbackListTest, RemoveCallbacksDuringIteration) {
+  CallbackList<void(void)> cb_reg;
+  Listener a, b;
+  Remover remover_1, remover_2;
+
+  std::unique_ptr<CallbackList<void(void)>::Subscription> remover_1_sub =
+      cb_reg.Add(
+          Bind(&Remover::IncrementTotalAndRemove, Unretained(&remover_1)));
+  std::unique_ptr<CallbackList<void(void)>::Subscription> remover_2_sub =
+      cb_reg.Add(
+          Bind(&Remover::IncrementTotalAndRemove, Unretained(&remover_2)));
+  std::unique_ptr<CallbackList<void(void)>::Subscription> a_subscription =
+      cb_reg.Add(Bind(&Listener::IncrementTotal, Unretained(&a)));
+  std::unique_ptr<CallbackList<void(void)>::Subscription> b_subscription =
+      cb_reg.Add(Bind(&Listener::IncrementTotal, Unretained(&b)));
+
+  // |remover_1| will remove itself.
+  remover_1.SetSubscriptionToRemove(std::move(remover_1_sub));
+  // |remover_2| will remove a.
+  remover_2.SetSubscriptionToRemove(std::move(a_subscription));
+
+  cb_reg.Notify();
+
+  // |remover_1| runs once (and removes itself), |remover_2| runs once (and
+  // removes a), |a| never runs, and |b| runs once.
+  EXPECT_EQ(1, remover_1.total());
+  EXPECT_EQ(1, remover_2.total());
+  EXPECT_EQ(0, a.total());
+  EXPECT_EQ(1, b.total());
+
+  cb_reg.Notify();
+
+  // Only |remover_2| and |b| run this time.
+  EXPECT_EQ(1, remover_1.total());
+  EXPECT_EQ(2, remover_2.total());
+  EXPECT_EQ(0, a.total());
+  EXPECT_EQ(2, b.total());
+}
+
+// Test that a callback can add another callback to the list durning iteration
+// without invalidating the iterator. The newly added callback should be run on
+// the current iteration as will all other callbacks in the list.
+TEST(CallbackListTest, AddCallbacksDuringIteration) {
+  CallbackList<void(void)> cb_reg;
+  Adder a(&cb_reg);
+  Listener b;
+  std::unique_ptr<CallbackList<void(void)>::Subscription> a_subscription =
+      cb_reg.Add(Bind(&Adder::AddCallback, Unretained(&a)));
+  std::unique_ptr<CallbackList<void(void)>::Subscription> b_subscription =
+      cb_reg.Add(Bind(&Listener::IncrementTotal, Unretained(&b)));
+
+  cb_reg.Notify();
+
+  EXPECT_EQ(1, a.total());
+  EXPECT_EQ(1, b.total());
+  EXPECT_TRUE(a.added());
+
+  cb_reg.Notify();
+
+  EXPECT_EQ(2, a.total());
+  EXPECT_EQ(2, b.total());
+}
+
+// Sanity check: notifying an empty list is a no-op.
+TEST(CallbackListTest, EmptyList) {
+  CallbackList<void(void)> cb_reg;
+
+  cb_reg.Notify();
+}
+
+TEST(CallbackList, RemovalCallback) {
+  Counter remove_count;
+  CallbackList<void(void)> cb_reg;
+  cb_reg.set_removal_callback(
+      Bind(&Counter::Increment, Unretained(&remove_count)));
+
+  std::unique_ptr<CallbackList<void(void)>::Subscription> subscription =
+      cb_reg.Add(DoNothing());
+
+  // Removing a subscription outside of iteration signals the callback.
+  EXPECT_EQ(0, remove_count.value());
+  subscription.reset();
+  EXPECT_EQ(1, remove_count.value());
+
+  // Configure two subscriptions to remove themselves.
+  Remover remover_1, remover_2;
+  std::unique_ptr<CallbackList<void(void)>::Subscription> remover_1_sub =
+      cb_reg.Add(
+          Bind(&Remover::IncrementTotalAndRemove, Unretained(&remover_1)));
+  std::unique_ptr<CallbackList<void(void)>::Subscription> remover_2_sub =
+      cb_reg.Add(
+          Bind(&Remover::IncrementTotalAndRemove, Unretained(&remover_2)));
+  remover_1.SetSubscriptionToRemove(std::move(remover_1_sub));
+  remover_2.SetSubscriptionToRemove(std::move(remover_2_sub));
+
+  // The callback should be signaled exactly once.
+  EXPECT_EQ(1, remove_count.value());
+  cb_reg.Notify();
+  EXPECT_EQ(2, remove_count.value());
+  EXPECT_TRUE(cb_reg.empty());
+}
+
+}  // namespace
+}  // namespace base
diff --git a/base/callback_list_unittest.nc b/base/callback_list_unittest.nc
new file mode 100644
index 0000000..7347f76
--- /dev/null
+++ b/base/callback_list_unittest.nc
@@ -0,0 +1,56 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This is a "No Compile Test" suite.
+// http://dev.chromium.org/developers/testing/no-compile-tests
+
+#include "base/callback_list.h"
+
+#include <memory>
+#include <utility>
+
+#include "base/bind.h"
+#include "base/bind_helpers.h"
+#include "base/macros.h"
+
+namespace base {
+
+class Foo {
+ public:
+  Foo() {}
+  ~Foo() {}
+};
+
+class FooListener {
+ public:
+  FooListener() {}
+
+  void GotAScopedFoo(std::unique_ptr<Foo> f) { foo_ = std::move(f); }
+
+  std::unique_ptr<Foo> foo_;
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(FooListener);
+};
+
+
+#if defined(NCTEST_MOVE_ONLY_TYPE_PARAMETER)  // [r"fatal error: call to (implicitly-)?deleted( copy)? constructor"]
+
+// Callbacks run with a move-only typed parameter.
+//
+// CallbackList does not support move-only typed parameters. Notify() is
+// designed to take zero or more parameters, and run each registered callback
+// with them. With move-only types, the parameter will be set to NULL after the
+// first callback has been run.
+void WontCompile() {
+  FooListener f;
+  CallbackList<void(std::unique_ptr<Foo>)> c1;
+  std::unique_ptr<CallbackList<void(std::unique_ptr<Foo>)>::Subscription> sub =
+      c1.Add(Bind(&FooListener::GotAScopedFoo, Unretained(&f)));
+  c1.Notify(std::unique_ptr<Foo>(new Foo()));
+}
+
+#endif
+
+}  // namespace base
diff --git a/base/callback_unittest.cc b/base/callback_unittest.cc
new file mode 100644
index 0000000..c07d3ee
--- /dev/null
+++ b/base/callback_unittest.cc
@@ -0,0 +1,187 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/callback.h"
+
+#include <memory>
+
+#include "base/bind.h"
+#include "base/callback_helpers.h"
+#include "base/callback_internal.h"
+#include "base/memory/ref_counted.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+
+void NopInvokeFunc() {}
+
+// White-box testpoints to inject into a Callback<> object for checking
+// comparators and emptiness APIs.  Use a BindState that is specialized
+// based on a type we declared in the anonymous namespace above to remove any
+// chance of colliding with another instantiation and breaking the
+// one-definition-rule.
+struct FakeBindState : internal::BindStateBase {
+  FakeBindState() : BindStateBase(&NopInvokeFunc, &Destroy, &IsCancelled) {}
+
+ private:
+  ~FakeBindState() = default;
+  static void Destroy(const internal::BindStateBase* self) {
+    delete static_cast<const FakeBindState*>(self);
+  }
+  static bool IsCancelled(const internal::BindStateBase*) {
+    return false;
+  }
+};
+
+namespace {
+
+class CallbackTest : public ::testing::Test {
+ public:
+  CallbackTest()
+      : callback_a_(new FakeBindState()), callback_b_(new FakeBindState()) {}
+
+  ~CallbackTest() override = default;
+
+ protected:
+  Callback<void()> callback_a_;
+  const Callback<void()> callback_b_;  // Ensure APIs work with const.
+  Callback<void()> null_callback_;
+};
+
+// Ensure we can create unbound callbacks. We need this to be able to store
+// them in class members that can be initialized later.
+TEST_F(CallbackTest, DefaultConstruction) {
+  Callback<void()> c0;
+  Callback<void(int)> c1;
+  Callback<void(int,int)> c2;
+  Callback<void(int,int,int)> c3;
+  Callback<void(int,int,int,int)> c4;
+  Callback<void(int,int,int,int,int)> c5;
+  Callback<void(int,int,int,int,int,int)> c6;
+
+  EXPECT_TRUE(c0.is_null());
+  EXPECT_TRUE(c1.is_null());
+  EXPECT_TRUE(c2.is_null());
+  EXPECT_TRUE(c3.is_null());
+  EXPECT_TRUE(c4.is_null());
+  EXPECT_TRUE(c5.is_null());
+  EXPECT_TRUE(c6.is_null());
+}
+
+TEST_F(CallbackTest, IsNull) {
+  EXPECT_TRUE(null_callback_.is_null());
+  EXPECT_FALSE(callback_a_.is_null());
+  EXPECT_FALSE(callback_b_.is_null());
+}
+
+TEST_F(CallbackTest, Equals) {
+  EXPECT_TRUE(callback_a_.Equals(callback_a_));
+  EXPECT_FALSE(callback_a_.Equals(callback_b_));
+  EXPECT_FALSE(callback_b_.Equals(callback_a_));
+
+  // We should compare based on instance, not type.
+  Callback<void()> callback_c(new FakeBindState());
+  Callback<void()> callback_a2 = callback_a_;
+  EXPECT_TRUE(callback_a_.Equals(callback_a2));
+  EXPECT_FALSE(callback_a_.Equals(callback_c));
+
+  // Empty, however, is always equal to empty.
+  Callback<void()> empty2;
+  EXPECT_TRUE(null_callback_.Equals(empty2));
+}
+
+TEST_F(CallbackTest, Reset) {
+  // Resetting should bring us back to empty.
+  ASSERT_FALSE(callback_a_.is_null());
+  ASSERT_FALSE(callback_a_.Equals(null_callback_));
+
+  callback_a_.Reset();
+
+  EXPECT_TRUE(callback_a_.is_null());
+  EXPECT_TRUE(callback_a_.Equals(null_callback_));
+}
+
+TEST_F(CallbackTest, Move) {
+  // Moving should reset the callback.
+  ASSERT_FALSE(callback_a_.is_null());
+  ASSERT_FALSE(callback_a_.Equals(null_callback_));
+
+  auto tmp = std::move(callback_a_);
+
+  EXPECT_TRUE(callback_a_.is_null());
+  EXPECT_TRUE(callback_a_.Equals(null_callback_));
+}
+
+struct TestForReentrancy {
+  TestForReentrancy()
+      : cb_already_run(false),
+        cb(Bind(&TestForReentrancy::AssertCBIsNull, Unretained(this))) {
+  }
+  void AssertCBIsNull() {
+    ASSERT_TRUE(cb.is_null());
+    cb_already_run = true;
+  }
+  bool cb_already_run;
+  Closure cb;
+};
+
+TEST_F(CallbackTest, ResetAndReturn) {
+  TestForReentrancy tfr;
+  ASSERT_FALSE(tfr.cb.is_null());
+  ASSERT_FALSE(tfr.cb_already_run);
+  ResetAndReturn(&tfr.cb).Run();
+  ASSERT_TRUE(tfr.cb.is_null());
+  ASSERT_TRUE(tfr.cb_already_run);
+}
+
+TEST_F(CallbackTest, NullAfterMoveRun) {
+  Closure cb = Bind([] {});
+  ASSERT_TRUE(cb);
+  std::move(cb).Run();
+  ASSERT_FALSE(cb);
+
+  const Closure cb2 = Bind([] {});
+  ASSERT_TRUE(cb2);
+  std::move(cb2).Run();
+  ASSERT_TRUE(cb2);
+
+  OnceClosure cb3 = BindOnce([] {});
+  ASSERT_TRUE(cb3);
+  std::move(cb3).Run();
+  ASSERT_FALSE(cb3);
+}
+
+class CallbackOwner : public base::RefCounted<CallbackOwner> {
+ public:
+  explicit CallbackOwner(bool* deleted) {
+    callback_ = Bind(&CallbackOwner::Unused, this);
+    deleted_ = deleted;
+  }
+  void Reset() {
+    callback_.Reset();
+    // We are deleted here if no-one else had a ref to us.
+  }
+
+ private:
+  friend class base::RefCounted<CallbackOwner>;
+  virtual ~CallbackOwner() {
+    *deleted_ = true;
+  }
+  void Unused() {
+    FAIL() << "Should never be called";
+  }
+
+  Closure callback_;
+  bool* deleted_;
+};
+
+TEST_F(CallbackTest, CallbackHasLastRefOnContainingObject) {
+  bool deleted = false;
+  CallbackOwner* owner = new CallbackOwner(&deleted);
+  owner->Reset();
+  ASSERT_TRUE(deleted);
+}
+
+}  // namespace
+}  // namespace base
diff --git a/base/callback_unittest.nc b/base/callback_unittest.nc
new file mode 100644
index 0000000..3261529
--- /dev/null
+++ b/base/callback_unittest.nc
@@ -0,0 +1,53 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This is a "No Compile Test" suite.
+// http://dev.chromium.org/developers/testing/no-compile-tests
+
+#include "base/callback.h"
+
+namespace base {
+
+class Parent {
+};
+
+class Child : Parent {
+};
+
+#if defined(NCTEST_EQUALS_REQUIRES_SAMETYPE)  // [r"fatal error: no viable conversion from 'RepeatingCallback<int \(\)>' to 'const RepeatingCallback<void \(\)>'"]
+
+// Attempting to call comparison function on two callbacks of different type.
+//
+// This should be a compile time failure because each callback type should be
+// considered distinct.
+void WontCompile() {
+  Closure c1;
+  Callback<int()> c2;
+  c1.Equals(c2);
+}
+
+#elif defined(NCTEST_CONSTRUCTION_FROM_SUBTYPE)  // [r"fatal error: no viable conversion from 'Callback<base::Parent \(\)>' to 'Callback<base::Child \(\)>'"]
+
+// Construction of Callback<A> from Callback<B> if A is supertype of B.
+//
+// While this is technically safe, most people aren't used to it when coding
+// C++ so if this is happening, it is almost certainly an error.
+void WontCompile() {
+  Callback<Parent()> cb_a;
+  Callback<Child()> cb_b = cb_a;
+}
+
+#elif defined(NCTEST_ASSIGNMENT_FROM_SUBTYPE)  // [r"fatal error: no viable overloaded '='"]
+
+// Assignment of Callback<A> from Callback<B> if A is supertype of B.
+// See explanation for NCTEST_CONSTRUCTION_FROM_SUBTYPE
+void WontCompile() {
+  Callback<Parent()> cb_a;
+  Callback<Child()> cb_b;
+  cb_a = cb_b;
+}
+
+#endif
+
+}  // namespace base
diff --git a/base/cancelable_callback.h b/base/cancelable_callback.h
new file mode 100644
index 0000000..a98101a
--- /dev/null
+++ b/base/cancelable_callback.h
@@ -0,0 +1,156 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// CancelableCallback is a wrapper around base::Callback that allows
+// cancellation of a callback. CancelableCallback takes a reference on the
+// wrapped callback until this object is destroyed or Reset()/Cancel() are
+// called.
+//
+// NOTE:
+//
+// Calling CancelableCallback::Cancel() brings the object back to its natural,
+// default-constructed state, i.e., CancelableCallback::callback() will return
+// a null callback.
+//
+// THREAD-SAFETY:
+//
+// CancelableCallback objects must be created on, posted to, cancelled on, and
+// destroyed on the same thread.
+//
+//
+// EXAMPLE USAGE:
+//
+// In the following example, the test is verifying that RunIntensiveTest()
+// Quit()s the message loop within 4 seconds. The cancelable callback is posted
+// to the message loop, the intensive test runs, the message loop is run,
+// then the callback is cancelled.
+//
+// RunLoop run_loop;
+//
+// void TimeoutCallback(const std::string& timeout_message) {
+//   FAIL() << timeout_message;
+//   run_loop.QuitWhenIdle();
+// }
+//
+// CancelableClosure timeout(base::Bind(&TimeoutCallback, "Test timed out."));
+// ThreadTaskRunnerHandle::Get()->PostDelayedTask(FROM_HERE, timeout.callback(),
+//                                                TimeDelta::FromSeconds(4));
+// RunIntensiveTest();
+// run_loop.Run();
+// timeout.Cancel();  // Hopefully this is hit before the timeout callback runs.
+//
+
+#ifndef BASE_CANCELABLE_CALLBACK_H_
+#define BASE_CANCELABLE_CALLBACK_H_
+
+#include <utility>
+
+#include "base/base_export.h"
+#include "base/bind.h"
+#include "base/callback.h"
+#include "base/callback_internal.h"
+#include "base/compiler_specific.h"
+#include "base/logging.h"
+#include "base/macros.h"
+#include "base/memory/weak_ptr.h"
+
+namespace base {
+namespace internal {
+
+template <typename CallbackType>
+class CancelableCallbackImpl {
+ public:
+  CancelableCallbackImpl() : weak_ptr_factory_(this) {}
+
+  // |callback| must not be null.
+  explicit CancelableCallbackImpl(CallbackType callback)
+      : callback_(std::move(callback)), weak_ptr_factory_(this) {
+    DCHECK(callback_);
+  }
+
+  ~CancelableCallbackImpl() = default;
+
+  // Cancels and drops the reference to the wrapped callback.
+  void Cancel() {
+    weak_ptr_factory_.InvalidateWeakPtrs();
+    callback_.Reset();
+  }
+
+  // Returns true if the wrapped callback has been cancelled.
+  bool IsCancelled() const {
+    return callback_.is_null();
+  }
+
+  // Sets |callback| as the closure that may be cancelled. |callback| may not
+  // be null. Outstanding and any previously wrapped callbacks are cancelled.
+  void Reset(CallbackType callback) {
+    DCHECK(callback);
+    // Outstanding tasks (e.g., posted to a message loop) must not be called.
+    Cancel();
+    callback_ = std::move(callback);
+  }
+
+  // Returns a callback that can be disabled by calling Cancel().
+  CallbackType callback() const {
+    if (!callback_)
+      return CallbackType();
+    CallbackType forwarder;
+    MakeForwarder(&forwarder);
+    return forwarder;
+  }
+
+ private:
+  template <typename... Args>
+  void MakeForwarder(RepeatingCallback<void(Args...)>* out) const {
+    using ForwarderType = void (CancelableCallbackImpl::*)(Args...);
+    ForwarderType forwarder = &CancelableCallbackImpl::ForwardRepeating;
+    *out = BindRepeating(forwarder, weak_ptr_factory_.GetWeakPtr());
+  }
+
+  template <typename... Args>
+  void MakeForwarder(OnceCallback<void(Args...)>* out) const {
+    using ForwarderType = void (CancelableCallbackImpl::*)(Args...);
+    ForwarderType forwarder = &CancelableCallbackImpl::ForwardOnce;
+    *out = BindOnce(forwarder, weak_ptr_factory_.GetWeakPtr());
+  }
+
+  template <typename... Args>
+  void ForwardRepeating(Args... args) {
+    callback_.Run(std::forward<Args>(args)...);
+  }
+
+  template <typename... Args>
+  void ForwardOnce(Args... args) {
+    weak_ptr_factory_.InvalidateWeakPtrs();
+    std::move(callback_).Run(std::forward<Args>(args)...);
+  }
+
+  // The stored closure that may be cancelled.
+  CallbackType callback_;
+  mutable base::WeakPtrFactory<CancelableCallbackImpl> weak_ptr_factory_;
+
+  DISALLOW_COPY_AND_ASSIGN(CancelableCallbackImpl);
+};
+
+}  // namespace internal
+
+// Consider using base::WeakPtr directly instead of base::CancelableCallback for
+// the task cancellation.
+template <typename Signature>
+using CancelableOnceCallback =
+    internal::CancelableCallbackImpl<OnceCallback<Signature>>;
+using CancelableOnceClosure = CancelableOnceCallback<void()>;
+
+template <typename Signature>
+using CancelableRepeatingCallback =
+    internal::CancelableCallbackImpl<RepeatingCallback<Signature>>;
+using CancelableRepeatingClosure = CancelableOnceCallback<void()>;
+
+template <typename Signature>
+using CancelableCallback = CancelableRepeatingCallback<Signature>;
+using CancelableClosure = CancelableCallback<void()>;
+
+}  // namespace base
+
+#endif  // BASE_CANCELABLE_CALLBACK_H_
diff --git a/base/cancelable_callback_unittest.cc b/base/cancelable_callback_unittest.cc
new file mode 100644
index 0000000..373498c
--- /dev/null
+++ b/base/cancelable_callback_unittest.cc
@@ -0,0 +1,207 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/cancelable_callback.h"
+
+#include <memory>
+
+#include "base/bind.h"
+#include "base/bind_helpers.h"
+#include "base/location.h"
+#include "base/memory/ptr_util.h"
+#include "base/memory/ref_counted.h"
+#include "base/message_loop/message_loop.h"
+#include "base/run_loop.h"
+#include "base/single_thread_task_runner.h"
+#include "base/threading/thread_task_runner_handle.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+namespace {
+
+class TestRefCounted : public RefCountedThreadSafe<TestRefCounted> {
+ private:
+  friend class RefCountedThreadSafe<TestRefCounted>;
+  ~TestRefCounted() = default;
+  ;
+};
+
+void Increment(int* count) { (*count)++; }
+void IncrementBy(int* count, int n) { (*count) += n; }
+void RefCountedParam(const scoped_refptr<TestRefCounted>& ref_counted) {}
+
+void OnMoveOnlyReceived(int* value, std::unique_ptr<int> result) {
+  *value = *result;
+}
+
+// Cancel().
+//  - Callback can be run multiple times.
+//  - After Cancel(), Run() completes but has no effect.
+TEST(CancelableCallbackTest, Cancel) {
+  int count = 0;
+  CancelableClosure cancelable(
+      base::Bind(&Increment, base::Unretained(&count)));
+
+  base::Closure callback = cancelable.callback();
+  callback.Run();
+  EXPECT_EQ(1, count);
+
+  callback.Run();
+  EXPECT_EQ(2, count);
+
+  cancelable.Cancel();
+  callback.Run();
+  EXPECT_EQ(2, count);
+}
+
+// Cancel() called multiple times.
+//  - Cancel() cancels all copies of the wrapped callback.
+//  - Calling Cancel() more than once has no effect.
+//  - After Cancel(), callback() returns a null callback.
+TEST(CancelableCallbackTest, MultipleCancel) {
+  int count = 0;
+  CancelableClosure cancelable(
+      base::Bind(&Increment, base::Unretained(&count)));
+
+  base::Closure callback1 = cancelable.callback();
+  base::Closure callback2 = cancelable.callback();
+  cancelable.Cancel();
+
+  callback1.Run();
+  EXPECT_EQ(0, count);
+
+  callback2.Run();
+  EXPECT_EQ(0, count);
+
+  // Calling Cancel() again has no effect.
+  cancelable.Cancel();
+
+  // callback() of a cancelled callback is null.
+  base::Closure callback3 = cancelable.callback();
+  EXPECT_TRUE(callback3.is_null());
+}
+
+// CancelableCallback destroyed before callback is run.
+//  - Destruction of CancelableCallback cancels outstanding callbacks.
+TEST(CancelableCallbackTest, CallbackCanceledOnDestruction) {
+  int count = 0;
+  base::Closure callback;
+
+  {
+    CancelableClosure cancelable(
+        base::Bind(&Increment, base::Unretained(&count)));
+
+    callback = cancelable.callback();
+    callback.Run();
+    EXPECT_EQ(1, count);
+  }
+
+  callback.Run();
+  EXPECT_EQ(1, count);
+}
+
+// Cancel() called on bound closure with a RefCounted parameter.
+//  - Cancel drops wrapped callback (and, implicitly, its bound arguments).
+TEST(CancelableCallbackTest, CancelDropsCallback) {
+  scoped_refptr<TestRefCounted> ref_counted = new TestRefCounted;
+  EXPECT_TRUE(ref_counted->HasOneRef());
+
+  CancelableClosure cancelable(base::Bind(RefCountedParam, ref_counted));
+  EXPECT_FALSE(cancelable.IsCancelled());
+  EXPECT_TRUE(ref_counted.get());
+  EXPECT_FALSE(ref_counted->HasOneRef());
+
+  // There is only one reference to |ref_counted| after the Cancel().
+  cancelable.Cancel();
+  EXPECT_TRUE(cancelable.IsCancelled());
+  EXPECT_TRUE(ref_counted.get());
+  EXPECT_TRUE(ref_counted->HasOneRef());
+}
+
+// Reset().
+//  - Reset() replaces the existing wrapped callback with a new callback.
+//  - Reset() deactivates outstanding callbacks.
+TEST(CancelableCallbackTest, Reset) {
+  int count = 0;
+  CancelableClosure cancelable(
+      base::Bind(&Increment, base::Unretained(&count)));
+
+  base::Closure callback = cancelable.callback();
+  callback.Run();
+  EXPECT_EQ(1, count);
+
+  callback.Run();
+  EXPECT_EQ(2, count);
+
+  cancelable.Reset(
+      base::Bind(&IncrementBy, base::Unretained(&count), 3));
+  EXPECT_FALSE(cancelable.IsCancelled());
+
+  // The stale copy of the cancelable callback is non-null.
+  ASSERT_FALSE(callback.is_null());
+
+  // The stale copy of the cancelable callback is no longer active.
+  callback.Run();
+  EXPECT_EQ(2, count);
+
+  base::Closure callback2 = cancelable.callback();
+  ASSERT_FALSE(callback2.is_null());
+
+  callback2.Run();
+  EXPECT_EQ(5, count);
+}
+
+// IsCanceled().
+//  - Cancel() transforms the CancelableCallback into a cancelled state.
+TEST(CancelableCallbackTest, IsNull) {
+  CancelableClosure cancelable;
+  EXPECT_TRUE(cancelable.IsCancelled());
+
+  int count = 0;
+  cancelable.Reset(base::Bind(&Increment,
+                              base::Unretained(&count)));
+  EXPECT_FALSE(cancelable.IsCancelled());
+
+  cancelable.Cancel();
+  EXPECT_TRUE(cancelable.IsCancelled());
+}
+
+// CancelableCallback posted to a MessageLoop with PostTask.
+//  - Callbacks posted to a MessageLoop can be cancelled.
+TEST(CancelableCallbackTest, PostTask) {
+  MessageLoop loop;
+
+  int count = 0;
+  CancelableClosure cancelable(base::Bind(&Increment,
+                                           base::Unretained(&count)));
+
+  ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE, cancelable.callback());
+  RunLoop().RunUntilIdle();
+
+  EXPECT_EQ(1, count);
+
+  ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE, cancelable.callback());
+
+  // Cancel before running the message loop.
+  cancelable.Cancel();
+  RunLoop().RunUntilIdle();
+
+  // Callback never ran due to cancellation; count is the same.
+  EXPECT_EQ(1, count);
+}
+
+// CancelableCallback can be used with move-only types.
+TEST(CancelableCallbackTest, MoveOnlyType) {
+  const int kExpectedResult = 42;
+
+  int result = 0;
+  CancelableCallback<void(std::unique_ptr<int>)> cb(
+      base::Bind(&OnMoveOnlyReceived, base::Unretained(&result)));
+  cb.callback().Run(base::WrapUnique(new int(kExpectedResult)));
+
+  EXPECT_EQ(kExpectedResult, result);
+}
+
+}  // namespace
+}  // namespace base
diff --git a/base/check_example.cc b/base/check_example.cc
new file mode 100644
index 0000000..7b9d8e6
--- /dev/null
+++ b/base/check_example.cc
@@ -0,0 +1,37 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file is meant for analyzing the code generated by the CHECK
+// macros in a small executable file that's easy to disassemble.
+
+#include "base/compiler_specific.h"
+#include "base/logging.h"
+
+// An official build shouldn't generate code to print out messages for
+// the CHECK* macros, nor should it have the strings in the
+// executable. It is also important that the CHECK() function collapse to the
+// same implementation as RELEASE_ASSERT(), in particular on Windows x86.
+// Historically, the stream eating caused additional unnecessary instructions.
+// See https://crbug.com/672699.
+
+#define BLINK_RELEASE_ASSERT_EQUIVALENT(assertion) \
+  (UNLIKELY(!(assertion)) ? (IMMEDIATE_CRASH()) : (void)0)
+
+void DoCheck(bool b) {
+  CHECK(b) << "DoCheck " << b;
+}
+
+void DoBlinkReleaseAssert(bool b) {
+  BLINK_RELEASE_ASSERT_EQUIVALENT(b);
+}
+
+void DoCheckEq(int x, int y) {
+  CHECK_EQ(x, y);
+}
+
+int main(int argc, const char* argv[]) {
+  DoCheck(argc > 1);
+  DoCheckEq(argc, 1);
+  DoBlinkReleaseAssert(argc > 1);
+}
diff --git a/base/command_line.cc b/base/command_line.cc
new file mode 100644
index 0000000..aec89f5
--- /dev/null
+++ b/base/command_line.cc
@@ -0,0 +1,487 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/command_line.h"
+
+#include <algorithm>
+#include <ostream>
+
+#include "base/files/file_path.h"
+#include "base/logging.h"
+#include "base/macros.h"
+#include "base/stl_util.h"
+#include "base/strings/string_split.h"
+#include "base/strings/string_tokenizer.h"
+#include "base/strings/string_util.h"
+#include "base/strings/utf_string_conversions.h"
+#include "build/build_config.h"
+
+#if defined(OS_WIN)
+#include <windows.h>
+#include <shellapi.h>
+#endif
+
+namespace base {
+
+CommandLine* CommandLine::current_process_commandline_ = nullptr;
+
+namespace {
+
+const CommandLine::CharType kSwitchTerminator[] = FILE_PATH_LITERAL("--");
+const CommandLine::CharType kSwitchValueSeparator[] = FILE_PATH_LITERAL("=");
+
+// Since we use a lazy match, make sure that longer versions (like "--") are
+// listed before shorter versions (like "-") of similar prefixes.
+#if defined(OS_WIN)
+// By putting slash last, we can control whether it is treaded as a switch
+// value by changing the value of switch_prefix_count to be one less than
+// the array size.
+const CommandLine::CharType* const kSwitchPrefixes[] = {L"--", L"-", L"/"};
+#elif defined(OS_POSIX) || defined(OS_FUCHSIA)
+// Unixes don't use slash as a switch.
+const CommandLine::CharType* const kSwitchPrefixes[] = {"--", "-"};
+#endif
+size_t switch_prefix_count = arraysize(kSwitchPrefixes);
+
+size_t GetSwitchPrefixLength(const CommandLine::StringType& string) {
+  for (size_t i = 0; i < switch_prefix_count; ++i) {
+    CommandLine::StringType prefix(kSwitchPrefixes[i]);
+    if (string.compare(0, prefix.length(), prefix) == 0)
+      return prefix.length();
+  }
+  return 0;
+}
+
+// Fills in |switch_string| and |switch_value| if |string| is a switch.
+// This will preserve the input switch prefix in the output |switch_string|.
+bool IsSwitch(const CommandLine::StringType& string,
+              CommandLine::StringType* switch_string,
+              CommandLine::StringType* switch_value) {
+  switch_string->clear();
+  switch_value->clear();
+  size_t prefix_length = GetSwitchPrefixLength(string);
+  if (prefix_length == 0 || prefix_length == string.length())
+    return false;
+
+  const size_t equals_position = string.find(kSwitchValueSeparator);
+  *switch_string = string.substr(0, equals_position);
+  if (equals_position != CommandLine::StringType::npos)
+    *switch_value = string.substr(equals_position + 1);
+  return true;
+}
+
+// Append switches and arguments, keeping switches before arguments.
+void AppendSwitchesAndArguments(CommandLine* command_line,
+                                const CommandLine::StringVector& argv) {
+  bool parse_switches = true;
+  for (size_t i = 1; i < argv.size(); ++i) {
+    CommandLine::StringType arg = argv[i];
+#if defined(OS_WIN)
+    TrimWhitespace(arg, TRIM_ALL, &arg);
+#elif defined(OS_POSIX) || defined(OS_FUCHSIA)
+    TrimWhitespaceASCII(arg, TRIM_ALL, &arg);
+#endif
+
+    CommandLine::StringType switch_string;
+    CommandLine::StringType switch_value;
+    parse_switches &= (arg != kSwitchTerminator);
+    if (parse_switches && IsSwitch(arg, &switch_string, &switch_value)) {
+#if defined(OS_WIN)
+      command_line->AppendSwitchNative(UTF16ToASCII(switch_string),
+                                       switch_value);
+#elif defined(OS_POSIX) || defined(OS_FUCHSIA)
+      command_line->AppendSwitchNative(switch_string, switch_value);
+#else
+#error Unsupported platform
+#endif
+    } else {
+      command_line->AppendArgNative(arg);
+    }
+  }
+}
+
+#if defined(OS_WIN)
+// Quote a string as necessary for CommandLineToArgvW compatiblity *on Windows*.
+string16 QuoteForCommandLineToArgvW(const string16& arg,
+                                    bool quote_placeholders) {
+  // We follow the quoting rules of CommandLineToArgvW.
+  // http://msdn.microsoft.com/en-us/library/17w5ykft.aspx
+  string16 quotable_chars(L" \\\"");
+  // We may also be required to quote '%', which is commonly used in a command
+  // line as a placeholder. (It may be substituted for a string with spaces.)
+  if (quote_placeholders)
+    quotable_chars.push_back(L'%');
+  if (arg.find_first_of(quotable_chars) == string16::npos) {
+    // No quoting necessary.
+    return arg;
+  }
+
+  string16 out;
+  out.push_back(L'"');
+  for (size_t i = 0; i < arg.size(); ++i) {
+    if (arg[i] == '\\') {
+      // Find the extent of this run of backslashes.
+      size_t start = i, end = start + 1;
+      for (; end < arg.size() && arg[end] == '\\'; ++end) {}
+      size_t backslash_count = end - start;
+
+      // Backslashes are escapes only if the run is followed by a double quote.
+      // Since we also will end the string with a double quote, we escape for
+      // either a double quote or the end of the string.
+      if (end == arg.size() || arg[end] == '"') {
+        // To quote, we need to output 2x as many backslashes.
+        backslash_count *= 2;
+      }
+      for (size_t j = 0; j < backslash_count; ++j)
+        out.push_back('\\');
+
+      // Advance i to one before the end to balance i++ in loop.
+      i = end - 1;
+    } else if (arg[i] == '"') {
+      out.push_back('\\');
+      out.push_back('"');
+    } else {
+      out.push_back(arg[i]);
+    }
+  }
+  out.push_back('"');
+
+  return out;
+}
+#endif
+
+}  // namespace
+
+CommandLine::CommandLine(NoProgram no_program)
+    : argv_(1),
+      begin_args_(1) {
+}
+
+CommandLine::CommandLine(const FilePath& program)
+    : argv_(1),
+      begin_args_(1) {
+  SetProgram(program);
+}
+
+CommandLine::CommandLine(int argc, const CommandLine::CharType* const* argv)
+    : argv_(1),
+      begin_args_(1) {
+  InitFromArgv(argc, argv);
+}
+
+CommandLine::CommandLine(const StringVector& argv)
+    : argv_(1),
+      begin_args_(1) {
+  InitFromArgv(argv);
+}
+
+CommandLine::CommandLine(const CommandLine& other) = default;
+
+CommandLine& CommandLine::operator=(const CommandLine& other) = default;
+
+CommandLine::~CommandLine() = default;
+
+#if defined(OS_WIN)
+// static
+void CommandLine::set_slash_is_not_a_switch() {
+  // The last switch prefix should be slash, so adjust the size to skip it.
+  DCHECK_EQ(wcscmp(kSwitchPrefixes[arraysize(kSwitchPrefixes) - 1], L"/"), 0);
+  switch_prefix_count = arraysize(kSwitchPrefixes) - 1;
+}
+
+// static
+void CommandLine::InitUsingArgvForTesting(int argc, const char* const* argv) {
+  DCHECK(!current_process_commandline_);
+  current_process_commandline_ = new CommandLine(NO_PROGRAM);
+  // On Windows we need to convert the command line arguments to string16.
+  base::CommandLine::StringVector argv_vector;
+  for (int i = 0; i < argc; ++i)
+    argv_vector.push_back(UTF8ToUTF16(argv[i]));
+  current_process_commandline_->InitFromArgv(argv_vector);
+}
+#endif
+
+// static
+bool CommandLine::Init(int argc, const char* const* argv) {
+  if (current_process_commandline_) {
+    // If this is intentional, Reset() must be called first. If we are using
+    // the shared build mode, we have to share a single object across multiple
+    // shared libraries.
+    return false;
+  }
+
+  current_process_commandline_ = new CommandLine(NO_PROGRAM);
+#if defined(OS_WIN)
+  current_process_commandline_->ParseFromString(::GetCommandLineW());
+#elif defined(OS_POSIX) || defined(OS_FUCHSIA)
+  current_process_commandline_->InitFromArgv(argc, argv);
+#else
+#error Unsupported platform
+#endif
+
+  return true;
+}
+
+// static
+void CommandLine::Reset() {
+  DCHECK(current_process_commandline_);
+  delete current_process_commandline_;
+  current_process_commandline_ = nullptr;
+}
+
+// static
+CommandLine* CommandLine::ForCurrentProcess() {
+  DCHECK(current_process_commandline_);
+  return current_process_commandline_;
+}
+
+// static
+bool CommandLine::InitializedForCurrentProcess() {
+  return !!current_process_commandline_;
+}
+
+#if defined(OS_WIN)
+// static
+CommandLine CommandLine::FromString(const string16& command_line) {
+  CommandLine cmd(NO_PROGRAM);
+  cmd.ParseFromString(command_line);
+  return cmd;
+}
+#endif
+
+void CommandLine::InitFromArgv(int argc,
+                               const CommandLine::CharType* const* argv) {
+  StringVector new_argv;
+  for (int i = 0; i < argc; ++i)
+    new_argv.push_back(argv[i]);
+  InitFromArgv(new_argv);
+}
+
+void CommandLine::InitFromArgv(const StringVector& argv) {
+  argv_ = StringVector(1);
+  switches_.clear();
+  begin_args_ = 1;
+  SetProgram(argv.empty() ? FilePath() : FilePath(argv[0]));
+  AppendSwitchesAndArguments(this, argv);
+}
+
+FilePath CommandLine::GetProgram() const {
+  return FilePath(argv_[0]);
+}
+
+void CommandLine::SetProgram(const FilePath& program) {
+#if defined(OS_WIN)
+  TrimWhitespace(program.value(), TRIM_ALL, &argv_[0]);
+#elif defined(OS_POSIX) || defined(OS_FUCHSIA)
+  TrimWhitespaceASCII(program.value(), TRIM_ALL, &argv_[0]);
+#else
+#error Unsupported platform
+#endif
+}
+
+bool CommandLine::HasSwitch(const base::StringPiece& switch_string) const {
+  DCHECK_EQ(ToLowerASCII(switch_string), switch_string);
+  return ContainsKey(switches_, switch_string);
+}
+
+bool CommandLine::HasSwitch(const char switch_constant[]) const {
+  return HasSwitch(base::StringPiece(switch_constant));
+}
+
+std::string CommandLine::GetSwitchValueASCII(
+    const base::StringPiece& switch_string) const {
+  StringType value = GetSwitchValueNative(switch_string);
+  if (!IsStringASCII(value)) {
+    DLOG(WARNING) << "Value of switch (" << switch_string << ") must be ASCII.";
+    return std::string();
+  }
+#if defined(OS_WIN)
+  return UTF16ToASCII(value);
+#elif defined(OS_POSIX) || defined(OS_FUCHSIA)
+  return value;
+#endif
+}
+
+FilePath CommandLine::GetSwitchValuePath(
+    const base::StringPiece& switch_string) const {
+  return FilePath(GetSwitchValueNative(switch_string));
+}
+
+CommandLine::StringType CommandLine::GetSwitchValueNative(
+    const base::StringPiece& switch_string) const {
+  DCHECK_EQ(ToLowerASCII(switch_string), switch_string);
+  auto result = switches_.find(switch_string);
+  return result == switches_.end() ? StringType() : result->second;
+}
+
+void CommandLine::AppendSwitch(const std::string& switch_string) {
+  AppendSwitchNative(switch_string, StringType());
+}
+
+void CommandLine::AppendSwitchPath(const std::string& switch_string,
+                                   const FilePath& path) {
+  AppendSwitchNative(switch_string, path.value());
+}
+
+void CommandLine::AppendSwitchNative(const std::string& switch_string,
+                                     const CommandLine::StringType& value) {
+#if defined(OS_WIN)
+  const std::string switch_key = ToLowerASCII(switch_string);
+  StringType combined_switch_string(ASCIIToUTF16(switch_key));
+#elif defined(OS_POSIX) || defined(OS_FUCHSIA)
+  const std::string& switch_key = switch_string;
+  StringType combined_switch_string(switch_key);
+#endif
+  size_t prefix_length = GetSwitchPrefixLength(combined_switch_string);
+  auto insertion =
+      switches_.insert(make_pair(switch_key.substr(prefix_length), value));
+  if (!insertion.second)
+    insertion.first->second = value;
+  // Preserve existing switch prefixes in |argv_|; only append one if necessary.
+  if (prefix_length == 0)
+    combined_switch_string = kSwitchPrefixes[0] + combined_switch_string;
+  if (!value.empty())
+    combined_switch_string += kSwitchValueSeparator + value;
+  // Append the switch and update the switches/arguments divider |begin_args_|.
+  argv_.insert(argv_.begin() + begin_args_++, combined_switch_string);
+}
+
+void CommandLine::AppendSwitchASCII(const std::string& switch_string,
+                                    const std::string& value_string) {
+#if defined(OS_WIN)
+  AppendSwitchNative(switch_string, ASCIIToUTF16(value_string));
+#elif defined(OS_POSIX) || defined(OS_FUCHSIA)
+  AppendSwitchNative(switch_string, value_string);
+#else
+#error Unsupported platform
+#endif
+}
+
+void CommandLine::CopySwitchesFrom(const CommandLine& source,
+                                   const char* const switches[],
+                                   size_t count) {
+  for (size_t i = 0; i < count; ++i) {
+    if (source.HasSwitch(switches[i]))
+      AppendSwitchNative(switches[i], source.GetSwitchValueNative(switches[i]));
+  }
+}
+
+CommandLine::StringVector CommandLine::GetArgs() const {
+  // Gather all arguments after the last switch (may include kSwitchTerminator).
+  StringVector args(argv_.begin() + begin_args_, argv_.end());
+  // Erase only the first kSwitchTerminator (maybe "--" is a legitimate page?)
+  StringVector::iterator switch_terminator =
+      std::find(args.begin(), args.end(), kSwitchTerminator);
+  if (switch_terminator != args.end())
+    args.erase(switch_terminator);
+  return args;
+}
+
+void CommandLine::AppendArg(const std::string& value) {
+#if defined(OS_WIN)
+  DCHECK(IsStringUTF8(value));
+  AppendArgNative(UTF8ToWide(value));
+#elif defined(OS_POSIX) || defined(OS_FUCHSIA)
+  AppendArgNative(value);
+#else
+#error Unsupported platform
+#endif
+}
+
+void CommandLine::AppendArgPath(const FilePath& path) {
+  AppendArgNative(path.value());
+}
+
+void CommandLine::AppendArgNative(const CommandLine::StringType& value) {
+  argv_.push_back(value);
+}
+
+void CommandLine::AppendArguments(const CommandLine& other,
+                                  bool include_program) {
+  if (include_program)
+    SetProgram(other.GetProgram());
+  AppendSwitchesAndArguments(this, other.argv());
+}
+
+void CommandLine::PrependWrapper(const CommandLine::StringType& wrapper) {
+  if (wrapper.empty())
+    return;
+  // Split the wrapper command based on whitespace (with quoting).
+  using CommandLineTokenizer =
+      StringTokenizerT<StringType, StringType::const_iterator>;
+  CommandLineTokenizer tokenizer(wrapper, FILE_PATH_LITERAL(" "));
+  tokenizer.set_quote_chars(FILE_PATH_LITERAL("'\""));
+  std::vector<StringType> wrapper_argv;
+  while (tokenizer.GetNext())
+    wrapper_argv.emplace_back(tokenizer.token());
+
+  // Prepend the wrapper and update the switches/arguments |begin_args_|.
+  argv_.insert(argv_.begin(), wrapper_argv.begin(), wrapper_argv.end());
+  begin_args_ += wrapper_argv.size();
+}
+
+#if defined(OS_WIN)
+void CommandLine::ParseFromString(const string16& command_line) {
+  string16 command_line_string;
+  TrimWhitespace(command_line, TRIM_ALL, &command_line_string);
+  if (command_line_string.empty())
+    return;
+
+  int num_args = 0;
+  wchar_t** args = NULL;
+  args = ::CommandLineToArgvW(command_line_string.c_str(), &num_args);
+
+  DPLOG_IF(FATAL, !args) << "CommandLineToArgvW failed on command line: "
+                         << UTF16ToUTF8(command_line);
+  InitFromArgv(num_args, args);
+  LocalFree(args);
+}
+#endif
+
+CommandLine::StringType CommandLine::GetCommandLineStringInternal(
+    bool quote_placeholders) const {
+  StringType string(argv_[0]);
+#if defined(OS_WIN)
+  string = QuoteForCommandLineToArgvW(string, quote_placeholders);
+#endif
+  StringType params(GetArgumentsStringInternal(quote_placeholders));
+  if (!params.empty()) {
+    string.append(StringType(FILE_PATH_LITERAL(" ")));
+    string.append(params);
+  }
+  return string;
+}
+
+CommandLine::StringType CommandLine::GetArgumentsStringInternal(
+    bool quote_placeholders) const {
+  StringType params;
+  // Append switches and arguments.
+  bool parse_switches = true;
+  for (size_t i = 1; i < argv_.size(); ++i) {
+    StringType arg = argv_[i];
+    StringType switch_string;
+    StringType switch_value;
+    parse_switches &= arg != kSwitchTerminator;
+    if (i > 1)
+      params.append(StringType(FILE_PATH_LITERAL(" ")));
+    if (parse_switches && IsSwitch(arg, &switch_string, &switch_value)) {
+      params.append(switch_string);
+      if (!switch_value.empty()) {
+#if defined(OS_WIN)
+        switch_value =
+            QuoteForCommandLineToArgvW(switch_value, quote_placeholders);
+#endif
+        params.append(kSwitchValueSeparator + switch_value);
+      }
+    } else {
+#if defined(OS_WIN)
+      arg = QuoteForCommandLineToArgvW(arg, quote_placeholders);
+#endif
+      params.append(arg);
+    }
+  }
+  return params;
+}
+
+}  // namespace base
diff --git a/base/command_line.h b/base/command_line.h
new file mode 100644
index 0000000..25fd7d9
--- /dev/null
+++ b/base/command_line.h
@@ -0,0 +1,247 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This class works with command lines: building and parsing.
+// Arguments with prefixes ('--', '-', and on Windows, '/') are switches.
+// Switches will precede all other arguments without switch prefixes.
+// Switches can optionally have values, delimited by '=', e.g., "-switch=value".
+// An argument of "--" will terminate switch parsing during initialization,
+// interpreting subsequent tokens as non-switch arguments, regardless of prefix.
+
+// There is a singleton read-only CommandLine that represents the command line
+// that the current process was started with.  It must be initialized in main().
+
+#ifndef BASE_COMMAND_LINE_H_
+#define BASE_COMMAND_LINE_H_
+
+#include <stddef.h>
+#include <map>
+#include <string>
+#include <vector>
+
+#include "base/base_export.h"
+#include "base/strings/string16.h"
+#include "base/strings/string_piece.h"
+#include "build/build_config.h"
+
+namespace base {
+
+class FilePath;
+
+class BASE_EXPORT CommandLine {
+ public:
+#if defined(OS_WIN)
+  // The native command line string type.
+  using StringType = string16;
+#elif defined(OS_POSIX) || defined(OS_FUCHSIA)
+  using StringType = std::string;
+#endif
+
+  using CharType = StringType::value_type;
+  using StringVector = std::vector<StringType>;
+  using SwitchMap = std::map<std::string, StringType, std::less<>>;
+
+  // A constructor for CommandLines that only carry switches and arguments.
+  enum NoProgram { NO_PROGRAM };
+  explicit CommandLine(NoProgram no_program);
+
+  // Construct a new command line with |program| as argv[0].
+  explicit CommandLine(const FilePath& program);
+
+  // Construct a new command line from an argument list.
+  CommandLine(int argc, const CharType* const* argv);
+  explicit CommandLine(const StringVector& argv);
+
+  // Override copy and assign to ensure |switches_by_stringpiece_| is valid.
+  CommandLine(const CommandLine& other);
+  CommandLine& operator=(const CommandLine& other);
+
+  ~CommandLine();
+
+#if defined(OS_WIN)
+  // By default this class will treat command-line arguments beginning with
+  // slashes as switches on Windows, but not other platforms.
+  //
+  // If this behavior is inappropriate for your application, you can call this
+  // function BEFORE initializing the current process' global command line
+  // object and the behavior will be the same as Posix systems (only hyphens
+  // begin switches, everything else will be an arg).
+  static void set_slash_is_not_a_switch();
+
+  // Normally when the CommandLine singleton is initialized it gets the command
+  // line via the GetCommandLineW API and then uses the shell32 API
+  // CommandLineToArgvW to parse the command line and convert it back to
+  // argc and argv. Tests who don't want this dependency on shell32 and need
+  // to honor the arguments passed in should use this function.
+  static void InitUsingArgvForTesting(int argc, const char* const* argv);
+#endif
+
+  // Initialize the current process CommandLine singleton. On Windows, ignores
+  // its arguments (we instead parse GetCommandLineW() directly) because we
+  // don't trust the CRT's parsing of the command line, but it still must be
+  // called to set up the command line. Returns false if initialization has
+  // already occurred, and true otherwise. Only the caller receiving a 'true'
+  // return value should take responsibility for calling Reset.
+  static bool Init(int argc, const char* const* argv);
+
+  // Destroys the current process CommandLine singleton. This is necessary if
+  // you want to reset the base library to its initial state (for example, in an
+  // outer library that needs to be able to terminate, and be re-initialized).
+  // If Init is called only once, as in main(), Reset() is not necessary.
+  // Do not call this in tests. Use base::test::ScopedCommandLine instead.
+  static void Reset();
+
+  // Get the singleton CommandLine representing the current process's
+  // command line. Note: returned value is mutable, but not thread safe;
+  // only mutate if you know what you're doing!
+  static CommandLine* ForCurrentProcess();
+
+  // Returns true if the CommandLine has been initialized for the given process.
+  static bool InitializedForCurrentProcess();
+
+#if defined(OS_WIN)
+  static CommandLine FromString(const string16& command_line);
+#endif
+
+  // Initialize from an argv vector.
+  void InitFromArgv(int argc, const CharType* const* argv);
+  void InitFromArgv(const StringVector& argv);
+
+  // Constructs and returns the represented command line string.
+  // CAUTION! This should be avoided on POSIX because quoting behavior is
+  // unclear.
+  StringType GetCommandLineString() const {
+    return GetCommandLineStringInternal(false);
+  }
+
+#if defined(OS_WIN)
+  // Constructs and returns the represented command line string. Assumes the
+  // command line contains placeholders (eg, %1) and quotes any program or
+  // argument with a '%' in it. This should be avoided unless the placeholder is
+  // required by an external interface (eg, the Windows registry), because it is
+  // not generally safe to replace it with an arbitrary string. If possible,
+  // placeholders should be replaced *before* converting the command line to a
+  // string.
+  StringType GetCommandLineStringWithPlaceholders() const {
+    return GetCommandLineStringInternal(true);
+  }
+#endif
+
+  // Constructs and returns the represented arguments string.
+  // CAUTION! This should be avoided on POSIX because quoting behavior is
+  // unclear.
+  StringType GetArgumentsString() const {
+    return GetArgumentsStringInternal(false);
+  }
+
+#if defined(OS_WIN)
+  // Constructs and returns the represented arguments string. Assumes the
+  // command line contains placeholders (eg, %1) and quotes any argument with a
+  // '%' in it. This should be avoided unless the placeholder is required by an
+  // external interface (eg, the Windows registry), because it is not generally
+  // safe to replace it with an arbitrary string. If possible, placeholders
+  // should be replaced *before* converting the arguments to a string.
+  StringType GetArgumentsStringWithPlaceholders() const {
+    return GetArgumentsStringInternal(true);
+  }
+#endif
+
+  // Returns the original command line string as a vector of strings.
+  const StringVector& argv() const { return argv_; }
+
+  // Get and Set the program part of the command line string (the first item).
+  FilePath GetProgram() const;
+  void SetProgram(const FilePath& program);
+
+  // Returns true if this command line contains the given switch.
+  // Switch names must be lowercase.
+  // The second override provides an optimized version to avoid inlining codegen
+  // at every callsite to find the length of the constant and construct a
+  // StringPiece.
+  bool HasSwitch(const StringPiece& switch_string) const;
+  bool HasSwitch(const char switch_constant[]) const;
+
+  // Returns the value associated with the given switch. If the switch has no
+  // value or isn't present, this method returns the empty string.
+  // Switch names must be lowercase.
+  std::string GetSwitchValueASCII(const StringPiece& switch_string) const;
+  FilePath GetSwitchValuePath(const StringPiece& switch_string) const;
+  StringType GetSwitchValueNative(const StringPiece& switch_string) const;
+
+  // Get a copy of all switches, along with their values.
+  const SwitchMap& GetSwitches() const { return switches_; }
+
+  // Append a switch [with optional value] to the command line.
+  // Note: Switches will precede arguments regardless of appending order.
+  void AppendSwitch(const std::string& switch_string);
+  void AppendSwitchPath(const std::string& switch_string,
+                        const FilePath& path);
+  void AppendSwitchNative(const std::string& switch_string,
+                          const StringType& value);
+  void AppendSwitchASCII(const std::string& switch_string,
+                         const std::string& value);
+
+  // Copy a set of switches (and any values) from another command line.
+  // Commonly used when launching a subprocess.
+  void CopySwitchesFrom(const CommandLine& source,
+                        const char* const switches[],
+                        size_t count);
+
+  // Get the remaining arguments to the command.
+  StringVector GetArgs() const;
+
+  // Append an argument to the command line. Note that the argument is quoted
+  // properly such that it is interpreted as one argument to the target command.
+  // AppendArg is primarily for ASCII; non-ASCII input is interpreted as UTF-8.
+  // Note: Switches will precede arguments regardless of appending order.
+  void AppendArg(const std::string& value);
+  void AppendArgPath(const FilePath& value);
+  void AppendArgNative(const StringType& value);
+
+  // Append the switches and arguments from another command line to this one.
+  // If |include_program| is true, include |other|'s program as well.
+  void AppendArguments(const CommandLine& other, bool include_program);
+
+  // Insert a command before the current command.
+  // Common for debuggers, like "gdb --args".
+  void PrependWrapper(const StringType& wrapper);
+
+#if defined(OS_WIN)
+  // Initialize by parsing the given command line string.
+  // The program name is assumed to be the first item in the string.
+  void ParseFromString(const string16& command_line);
+#endif
+
+ private:
+  // Disallow default constructor; a program name must be explicitly specified.
+  CommandLine() = delete;
+  // Allow the copy constructor. A common pattern is to copy of the current
+  // process's command line and then add some flags to it. For example:
+  //   CommandLine cl(*CommandLine::ForCurrentProcess());
+  //   cl.AppendSwitch(...);
+
+  // Internal version of GetCommandLineString. If |quote_placeholders| is true,
+  // also quotes parts with '%' in them.
+  StringType GetCommandLineStringInternal(bool quote_placeholders) const;
+
+  // Internal version of GetArgumentsString. If |quote_placeholders| is true,
+  // also quotes parts with '%' in them.
+  StringType GetArgumentsStringInternal(bool quote_placeholders) const;
+
+  // The singleton CommandLine representing the current process's command line.
+  static CommandLine* current_process_commandline_;
+
+  // The argv array: { program, [(--|-|/)switch[=value]]*, [--], [argument]* }
+  StringVector argv_;
+
+  // Parsed-out switch keys and values.
+  SwitchMap switches_;
+
+  // The index after the program and switches, any arguments start here.
+  size_t begin_args_;
+};
+
+}  // namespace base
+
+#endif  // BASE_COMMAND_LINE_H_
diff --git a/base/command_line_unittest.cc b/base/command_line_unittest.cc
new file mode 100644
index 0000000..3718cd9
--- /dev/null
+++ b/base/command_line_unittest.cc
@@ -0,0 +1,440 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/command_line.h"
+
+#include <memory>
+#include <string>
+#include <vector>
+
+#include "base/files/file_path.h"
+#include "base/macros.h"
+#include "base/strings/utf_string_conversions.h"
+#include "build/build_config.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+
+// To test Windows quoting behavior, we use a string that has some backslashes
+// and quotes.
+// Consider the command-line argument: q\"bs1\bs2\\bs3q\\\"
+// Here it is with C-style escapes.
+static const CommandLine::StringType kTrickyQuoted =
+    FILE_PATH_LITERAL("q\\\"bs1\\bs2\\\\bs3q\\\\\\\"");
+// It should be parsed by Windows as: q"bs1\bs2\\bs3q\"
+// Here that is with C-style escapes.
+static const CommandLine::StringType kTricky =
+    FILE_PATH_LITERAL("q\"bs1\\bs2\\\\bs3q\\\"");
+
+TEST(CommandLineTest, CommandLineConstructor) {
+  const CommandLine::CharType* argv[] = {
+      FILE_PATH_LITERAL("program"),
+      FILE_PATH_LITERAL("--foo="),
+      FILE_PATH_LITERAL("-bAr"),
+      FILE_PATH_LITERAL("-spaetzel=pierogi"),
+      FILE_PATH_LITERAL("-baz"),
+      FILE_PATH_LITERAL("flim"),
+      FILE_PATH_LITERAL("--other-switches=--dog=canine --cat=feline"),
+      FILE_PATH_LITERAL("-spaetzle=Crepe"),
+      FILE_PATH_LITERAL("-=loosevalue"),
+      FILE_PATH_LITERAL("-"),
+      FILE_PATH_LITERAL("FLAN"),
+      FILE_PATH_LITERAL("a"),
+      FILE_PATH_LITERAL("--input-translation=45--output-rotation"),
+      FILE_PATH_LITERAL("--"),
+      FILE_PATH_LITERAL("--"),
+      FILE_PATH_LITERAL("--not-a-switch"),
+      FILE_PATH_LITERAL("\"in the time of submarines...\""),
+      FILE_PATH_LITERAL("unquoted arg-with-space")};
+  CommandLine cl(arraysize(argv), argv);
+
+  EXPECT_FALSE(cl.GetCommandLineString().empty());
+  EXPECT_FALSE(cl.HasSwitch("cruller"));
+  EXPECT_FALSE(cl.HasSwitch("flim"));
+  EXPECT_FALSE(cl.HasSwitch("program"));
+  EXPECT_FALSE(cl.HasSwitch("dog"));
+  EXPECT_FALSE(cl.HasSwitch("cat"));
+  EXPECT_FALSE(cl.HasSwitch("output-rotation"));
+  EXPECT_FALSE(cl.HasSwitch("not-a-switch"));
+  EXPECT_FALSE(cl.HasSwitch("--"));
+
+  EXPECT_EQ(FilePath(FILE_PATH_LITERAL("program")).value(),
+            cl.GetProgram().value());
+
+  EXPECT_TRUE(cl.HasSwitch("foo"));
+#if defined(OS_WIN)
+  EXPECT_TRUE(cl.HasSwitch("bar"));
+#else
+  EXPECT_FALSE(cl.HasSwitch("bar"));
+#endif
+  EXPECT_TRUE(cl.HasSwitch("baz"));
+  EXPECT_TRUE(cl.HasSwitch("spaetzle"));
+  EXPECT_TRUE(cl.HasSwitch("other-switches"));
+  EXPECT_TRUE(cl.HasSwitch("input-translation"));
+
+  EXPECT_EQ("Crepe", cl.GetSwitchValueASCII("spaetzle"));
+  EXPECT_EQ("", cl.GetSwitchValueASCII("foo"));
+  EXPECT_EQ("", cl.GetSwitchValueASCII("bar"));
+  EXPECT_EQ("", cl.GetSwitchValueASCII("cruller"));
+  EXPECT_EQ("--dog=canine --cat=feline", cl.GetSwitchValueASCII(
+      "other-switches"));
+  EXPECT_EQ("45--output-rotation", cl.GetSwitchValueASCII("input-translation"));
+
+  const CommandLine::StringVector& args = cl.GetArgs();
+  ASSERT_EQ(8U, args.size());
+
+  std::vector<CommandLine::StringType>::const_iterator iter = args.begin();
+  EXPECT_EQ(FILE_PATH_LITERAL("flim"), *iter);
+  ++iter;
+  EXPECT_EQ(FILE_PATH_LITERAL("-"), *iter);
+  ++iter;
+  EXPECT_EQ(FILE_PATH_LITERAL("FLAN"), *iter);
+  ++iter;
+  EXPECT_EQ(FILE_PATH_LITERAL("a"), *iter);
+  ++iter;
+  EXPECT_EQ(FILE_PATH_LITERAL("--"), *iter);
+  ++iter;
+  EXPECT_EQ(FILE_PATH_LITERAL("--not-a-switch"), *iter);
+  ++iter;
+  EXPECT_EQ(FILE_PATH_LITERAL("\"in the time of submarines...\""), *iter);
+  ++iter;
+  EXPECT_EQ(FILE_PATH_LITERAL("unquoted arg-with-space"), *iter);
+  ++iter;
+  EXPECT_TRUE(iter == args.end());
+}
+
+TEST(CommandLineTest, CommandLineFromString) {
+#if defined(OS_WIN)
+  CommandLine cl = CommandLine::FromString(
+      L"program --foo= -bAr  /Spaetzel=pierogi /Baz flim "
+      L"--other-switches=\"--dog=canine --cat=feline\" "
+      L"-spaetzle=Crepe   -=loosevalue  FLAN "
+      L"--input-translation=\"45\"--output-rotation "
+      L"--quotes=" + kTrickyQuoted + L" "
+      L"-- -- --not-a-switch "
+      L"\"in the time of submarines...\"");
+
+  EXPECT_FALSE(cl.GetCommandLineString().empty());
+  EXPECT_FALSE(cl.HasSwitch("cruller"));
+  EXPECT_FALSE(cl.HasSwitch("flim"));
+  EXPECT_FALSE(cl.HasSwitch("program"));
+  EXPECT_FALSE(cl.HasSwitch("dog"));
+  EXPECT_FALSE(cl.HasSwitch("cat"));
+  EXPECT_FALSE(cl.HasSwitch("output-rotation"));
+  EXPECT_FALSE(cl.HasSwitch("not-a-switch"));
+  EXPECT_FALSE(cl.HasSwitch("--"));
+
+  EXPECT_EQ(FilePath(FILE_PATH_LITERAL("program")).value(),
+            cl.GetProgram().value());
+
+  EXPECT_TRUE(cl.HasSwitch("foo"));
+  EXPECT_TRUE(cl.HasSwitch("bar"));
+  EXPECT_TRUE(cl.HasSwitch("baz"));
+  EXPECT_TRUE(cl.HasSwitch("spaetzle"));
+  EXPECT_TRUE(cl.HasSwitch("other-switches"));
+  EXPECT_TRUE(cl.HasSwitch("input-translation"));
+  EXPECT_TRUE(cl.HasSwitch("quotes"));
+
+  EXPECT_EQ("Crepe", cl.GetSwitchValueASCII("spaetzle"));
+  EXPECT_EQ("", cl.GetSwitchValueASCII("foo"));
+  EXPECT_EQ("", cl.GetSwitchValueASCII("bar"));
+  EXPECT_EQ("", cl.GetSwitchValueASCII("cruller"));
+  EXPECT_EQ("--dog=canine --cat=feline", cl.GetSwitchValueASCII(
+      "other-switches"));
+  EXPECT_EQ("45--output-rotation", cl.GetSwitchValueASCII("input-translation"));
+  EXPECT_EQ(kTricky, cl.GetSwitchValueNative("quotes"));
+
+  const CommandLine::StringVector& args = cl.GetArgs();
+  ASSERT_EQ(5U, args.size());
+
+  std::vector<CommandLine::StringType>::const_iterator iter = args.begin();
+  EXPECT_EQ(FILE_PATH_LITERAL("flim"), *iter);
+  ++iter;
+  EXPECT_EQ(FILE_PATH_LITERAL("FLAN"), *iter);
+  ++iter;
+  EXPECT_EQ(FILE_PATH_LITERAL("--"), *iter);
+  ++iter;
+  EXPECT_EQ(FILE_PATH_LITERAL("--not-a-switch"), *iter);
+  ++iter;
+  EXPECT_EQ(FILE_PATH_LITERAL("in the time of submarines..."), *iter);
+  ++iter;
+  EXPECT_TRUE(iter == args.end());
+
+  // Check that a generated string produces an equivalent command line.
+  CommandLine cl_duplicate = CommandLine::FromString(cl.GetCommandLineString());
+  EXPECT_EQ(cl.GetCommandLineString(), cl_duplicate.GetCommandLineString());
+#endif
+}
+
+// Tests behavior with an empty input string.
+TEST(CommandLineTest, EmptyString) {
+#if defined(OS_WIN)
+  CommandLine cl_from_string = CommandLine::FromString(L"");
+  EXPECT_TRUE(cl_from_string.GetCommandLineString().empty());
+  EXPECT_TRUE(cl_from_string.GetProgram().empty());
+  EXPECT_EQ(1U, cl_from_string.argv().size());
+  EXPECT_TRUE(cl_from_string.GetArgs().empty());
+#endif
+  CommandLine cl_from_argv(0, nullptr);
+  EXPECT_TRUE(cl_from_argv.GetCommandLineString().empty());
+  EXPECT_TRUE(cl_from_argv.GetProgram().empty());
+  EXPECT_EQ(1U, cl_from_argv.argv().size());
+  EXPECT_TRUE(cl_from_argv.GetArgs().empty());
+}
+
+TEST(CommandLineTest, GetArgumentsString) {
+  static const FilePath::CharType kPath1[] =
+      FILE_PATH_LITERAL("C:\\Some File\\With Spaces.ggg");
+  static const FilePath::CharType kPath2[] =
+      FILE_PATH_LITERAL("C:\\no\\spaces.ggg");
+
+  static const char kFirstArgName[] = "first-arg";
+  static const char kSecondArgName[] = "arg2";
+  static const char kThirdArgName[] = "arg with space";
+  static const char kFourthArgName[] = "nospace";
+  static const char kFifthArgName[] = "%1";
+
+  CommandLine cl(CommandLine::NO_PROGRAM);
+  cl.AppendSwitchPath(kFirstArgName, FilePath(kPath1));
+  cl.AppendSwitchPath(kSecondArgName, FilePath(kPath2));
+  cl.AppendArg(kThirdArgName);
+  cl.AppendArg(kFourthArgName);
+  cl.AppendArg(kFifthArgName);
+
+#if defined(OS_WIN)
+  CommandLine::StringType expected_first_arg(UTF8ToUTF16(kFirstArgName));
+  CommandLine::StringType expected_second_arg(UTF8ToUTF16(kSecondArgName));
+  CommandLine::StringType expected_third_arg(UTF8ToUTF16(kThirdArgName));
+  CommandLine::StringType expected_fourth_arg(UTF8ToUTF16(kFourthArgName));
+  CommandLine::StringType expected_fifth_arg(UTF8ToUTF16(kFifthArgName));
+#elif defined(OS_POSIX) || defined(OS_FUCHSIA)
+  CommandLine::StringType expected_first_arg(kFirstArgName);
+  CommandLine::StringType expected_second_arg(kSecondArgName);
+  CommandLine::StringType expected_third_arg(kThirdArgName);
+  CommandLine::StringType expected_fourth_arg(kFourthArgName);
+  CommandLine::StringType expected_fifth_arg(kFifthArgName);
+#endif
+
+#if defined(OS_WIN)
+#define QUOTE_ON_WIN FILE_PATH_LITERAL("\"")
+#else
+#define QUOTE_ON_WIN FILE_PATH_LITERAL("")
+#endif  // OS_WIN
+
+  CommandLine::StringType expected_str;
+  expected_str.append(FILE_PATH_LITERAL("--"))
+              .append(expected_first_arg)
+              .append(FILE_PATH_LITERAL("="))
+              .append(QUOTE_ON_WIN)
+              .append(kPath1)
+              .append(QUOTE_ON_WIN)
+              .append(FILE_PATH_LITERAL(" "))
+              .append(FILE_PATH_LITERAL("--"))
+              .append(expected_second_arg)
+              .append(FILE_PATH_LITERAL("="))
+              .append(QUOTE_ON_WIN)
+              .append(kPath2)
+              .append(QUOTE_ON_WIN)
+              .append(FILE_PATH_LITERAL(" "))
+              .append(QUOTE_ON_WIN)
+              .append(expected_third_arg)
+              .append(QUOTE_ON_WIN)
+              .append(FILE_PATH_LITERAL(" "))
+              .append(expected_fourth_arg)
+              .append(FILE_PATH_LITERAL(" "));
+
+  CommandLine::StringType expected_str_no_quote_placeholders(expected_str);
+  expected_str_no_quote_placeholders.append(expected_fifth_arg);
+  EXPECT_EQ(expected_str_no_quote_placeholders, cl.GetArgumentsString());
+
+#if defined(OS_WIN)
+  CommandLine::StringType expected_str_quote_placeholders(expected_str);
+  expected_str_quote_placeholders.append(QUOTE_ON_WIN)
+                                 .append(expected_fifth_arg)
+                                 .append(QUOTE_ON_WIN);
+  EXPECT_EQ(expected_str_quote_placeholders,
+            cl.GetArgumentsStringWithPlaceholders());
+#endif
+}
+
+// Test methods for appending switches to a command line.
+TEST(CommandLineTest, AppendSwitches) {
+  std::string switch1 = "switch1";
+  std::string switch2 = "switch2";
+  std::string value2 = "value";
+  std::string switch3 = "switch3";
+  std::string value3 = "a value with spaces";
+  std::string switch4 = "switch4";
+  std::string value4 = "\"a value with quotes\"";
+  std::string switch5 = "quotes";
+  CommandLine::StringType value5 = kTricky;
+
+  CommandLine cl(FilePath(FILE_PATH_LITERAL("Program")));
+
+  cl.AppendSwitch(switch1);
+  cl.AppendSwitchASCII(switch2, value2);
+  cl.AppendSwitchASCII(switch3, value3);
+  cl.AppendSwitchASCII(switch4, value4);
+  cl.AppendSwitchASCII(switch5, value4);
+  cl.AppendSwitchNative(switch5, value5);
+
+  EXPECT_TRUE(cl.HasSwitch(switch1));
+  EXPECT_TRUE(cl.HasSwitch(switch2));
+  EXPECT_EQ(value2, cl.GetSwitchValueASCII(switch2));
+  EXPECT_TRUE(cl.HasSwitch(switch3));
+  EXPECT_EQ(value3, cl.GetSwitchValueASCII(switch3));
+  EXPECT_TRUE(cl.HasSwitch(switch4));
+  EXPECT_EQ(value4, cl.GetSwitchValueASCII(switch4));
+  EXPECT_TRUE(cl.HasSwitch(switch5));
+  EXPECT_EQ(value5, cl.GetSwitchValueNative(switch5));
+
+#if defined(OS_WIN)
+  EXPECT_EQ(L"Program "
+            L"--switch1 "
+            L"--switch2=value "
+            L"--switch3=\"a value with spaces\" "
+            L"--switch4=\"\\\"a value with quotes\\\"\" "
+            // Even though the switches are unique, appending can add repeat
+            // switches to argv.
+            L"--quotes=\"\\\"a value with quotes\\\"\" "
+            L"--quotes=\"" + kTrickyQuoted + L"\"",
+            cl.GetCommandLineString());
+#endif
+}
+
+TEST(CommandLineTest, AppendSwitchesDashDash) {
+ const CommandLine::CharType* raw_argv[] = { FILE_PATH_LITERAL("prog"),
+                                             FILE_PATH_LITERAL("--"),
+                                             FILE_PATH_LITERAL("--arg1") };
+  CommandLine cl(arraysize(raw_argv), raw_argv);
+
+  cl.AppendSwitch("switch1");
+  cl.AppendSwitchASCII("switch2", "foo");
+
+  cl.AppendArg("--arg2");
+
+  EXPECT_EQ(FILE_PATH_LITERAL("prog --switch1 --switch2=foo -- --arg1 --arg2"),
+            cl.GetCommandLineString());
+  CommandLine::StringVector cl_argv = cl.argv();
+  EXPECT_EQ(FILE_PATH_LITERAL("prog"), cl_argv[0]);
+  EXPECT_EQ(FILE_PATH_LITERAL("--switch1"), cl_argv[1]);
+  EXPECT_EQ(FILE_PATH_LITERAL("--switch2=foo"), cl_argv[2]);
+  EXPECT_EQ(FILE_PATH_LITERAL("--"), cl_argv[3]);
+  EXPECT_EQ(FILE_PATH_LITERAL("--arg1"), cl_argv[4]);
+  EXPECT_EQ(FILE_PATH_LITERAL("--arg2"), cl_argv[5]);
+}
+
+// Tests that when AppendArguments is called that the program is set correctly
+// on the target CommandLine object and the switches from the source
+// CommandLine are added to the target.
+TEST(CommandLineTest, AppendArguments) {
+  CommandLine cl1(FilePath(FILE_PATH_LITERAL("Program")));
+  cl1.AppendSwitch("switch1");
+  cl1.AppendSwitchASCII("switch2", "foo");
+
+  CommandLine cl2(CommandLine::NO_PROGRAM);
+  cl2.AppendArguments(cl1, true);
+  EXPECT_EQ(cl1.GetProgram().value(), cl2.GetProgram().value());
+  EXPECT_EQ(cl1.GetCommandLineString(), cl2.GetCommandLineString());
+
+  CommandLine c1(FilePath(FILE_PATH_LITERAL("Program1")));
+  c1.AppendSwitch("switch1");
+  CommandLine c2(FilePath(FILE_PATH_LITERAL("Program2")));
+  c2.AppendSwitch("switch2");
+
+  c1.AppendArguments(c2, true);
+  EXPECT_EQ(c1.GetProgram().value(), c2.GetProgram().value());
+  EXPECT_TRUE(c1.HasSwitch("switch1"));
+  EXPECT_TRUE(c1.HasSwitch("switch2"));
+}
+
+#if defined(OS_WIN)
+// Make sure that the command line string program paths are quoted as necessary.
+// This only makes sense on Windows and the test is basically here to guard
+// against regressions.
+TEST(CommandLineTest, ProgramQuotes) {
+  // Check that quotes are not added for paths without spaces.
+  const FilePath kProgram(L"Program");
+  CommandLine cl_program(kProgram);
+  EXPECT_EQ(kProgram.value(), cl_program.GetProgram().value());
+  EXPECT_EQ(kProgram.value(), cl_program.GetCommandLineString());
+
+  const FilePath kProgramPath(L"Program Path");
+
+  // Check that quotes are not returned from GetProgram().
+  CommandLine cl_program_path(kProgramPath);
+  EXPECT_EQ(kProgramPath.value(), cl_program_path.GetProgram().value());
+
+  // Check that quotes are added to command line string paths containing spaces.
+  CommandLine::StringType cmd_string(cl_program_path.GetCommandLineString());
+  EXPECT_EQ(L"\"Program Path\"", cmd_string);
+
+  // Check the optional quoting of placeholders in programs.
+  CommandLine cl_quote_placeholder(FilePath(L"%1"));
+  EXPECT_EQ(L"%1", cl_quote_placeholder.GetCommandLineString());
+  EXPECT_EQ(L"\"%1\"",
+            cl_quote_placeholder.GetCommandLineStringWithPlaceholders());
+}
+#endif
+
+// Calling Init multiple times should not modify the previous CommandLine.
+TEST(CommandLineTest, Init) {
+  // Call Init without checking output once so we know it's been called
+  // whether or not the test runner does so.
+  CommandLine::Init(0, nullptr);
+  CommandLine* initial = CommandLine::ForCurrentProcess();
+  EXPECT_FALSE(CommandLine::Init(0, nullptr));
+  CommandLine* current = CommandLine::ForCurrentProcess();
+  EXPECT_EQ(initial, current);
+}
+
+// Test that copies of CommandLine have a valid StringPiece map.
+TEST(CommandLineTest, Copy) {
+  std::unique_ptr<CommandLine> initial(
+      new CommandLine(CommandLine::NO_PROGRAM));
+  initial->AppendSwitch("a");
+  initial->AppendSwitch("bbbbbbbbbbbbbbb");
+  initial->AppendSwitch("c");
+  CommandLine copy_constructed(*initial);
+  CommandLine assigned = *initial;
+  CommandLine::SwitchMap switch_map = initial->GetSwitches();
+  initial.reset();
+  for (const auto& pair : switch_map)
+    EXPECT_TRUE(copy_constructed.HasSwitch(pair.first));
+  for (const auto& pair : switch_map)
+    EXPECT_TRUE(assigned.HasSwitch(pair.first));
+}
+
+TEST(CommandLineTest, PrependSimpleWrapper) {
+  CommandLine cl(FilePath(FILE_PATH_LITERAL("Program")));
+  cl.AppendSwitch("a");
+  cl.AppendSwitch("b");
+  cl.PrependWrapper(FILE_PATH_LITERAL("wrapper --foo --bar"));
+
+  EXPECT_EQ(6u, cl.argv().size());
+  EXPECT_EQ(FILE_PATH_LITERAL("wrapper"), cl.argv()[0]);
+  EXPECT_EQ(FILE_PATH_LITERAL("--foo"), cl.argv()[1]);
+  EXPECT_EQ(FILE_PATH_LITERAL("--bar"), cl.argv()[2]);
+  EXPECT_EQ(FILE_PATH_LITERAL("Program"), cl.argv()[3]);
+  EXPECT_EQ(FILE_PATH_LITERAL("--a"), cl.argv()[4]);
+  EXPECT_EQ(FILE_PATH_LITERAL("--b"), cl.argv()[5]);
+}
+
+TEST(CommandLineTest, PrependComplexWrapper) {
+  CommandLine cl(FilePath(FILE_PATH_LITERAL("Program")));
+  cl.AppendSwitch("a");
+  cl.AppendSwitch("b");
+  cl.PrependWrapper(
+      FILE_PATH_LITERAL("wrapper --foo='hello world' --bar=\"let's go\""));
+
+  EXPECT_EQ(6u, cl.argv().size());
+  EXPECT_EQ(FILE_PATH_LITERAL("wrapper"), cl.argv()[0]);
+  EXPECT_EQ(FILE_PATH_LITERAL("--foo='hello world'"), cl.argv()[1]);
+  EXPECT_EQ(FILE_PATH_LITERAL("--bar=\"let's go\""), cl.argv()[2]);
+  EXPECT_EQ(FILE_PATH_LITERAL("Program"), cl.argv()[3]);
+  EXPECT_EQ(FILE_PATH_LITERAL("--a"), cl.argv()[4]);
+  EXPECT_EQ(FILE_PATH_LITERAL("--b"), cl.argv()[5]);
+}
+
+} // namespace base
diff --git a/base/compiler_specific.h b/base/compiler_specific.h
new file mode 100644
index 0000000..88c290f
--- /dev/null
+++ b/base/compiler_specific.h
@@ -0,0 +1,231 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_COMPILER_SPECIFIC_H_
+#define BASE_COMPILER_SPECIFIC_H_
+
+#include "build/build_config.h"
+
+#if defined(COMPILER_MSVC)
+
+// For _Printf_format_string_.
+#include <sal.h>
+
+// Macros for suppressing and disabling warnings on MSVC.
+//
+// Warning numbers are enumerated at:
+// http://msdn.microsoft.com/en-us/library/8x5x43k7(VS.80).aspx
+//
+// The warning pragma:
+// http://msdn.microsoft.com/en-us/library/2c8f766e(VS.80).aspx
+//
+// Using __pragma instead of #pragma inside macros:
+// http://msdn.microsoft.com/en-us/library/d9x1s805.aspx
+
+// MSVC_SUPPRESS_WARNING disables warning |n| for the remainder of the line and
+// for the next line of the source file.
+#define MSVC_SUPPRESS_WARNING(n) __pragma(warning(suppress:n))
+
+// MSVC_PUSH_DISABLE_WARNING pushes |n| onto a stack of warnings to be disabled.
+// The warning remains disabled until popped by MSVC_POP_WARNING.
+#define MSVC_PUSH_DISABLE_WARNING(n) __pragma(warning(push)) \
+                                     __pragma(warning(disable:n))
+
+// MSVC_PUSH_WARNING_LEVEL pushes |n| as the global warning level.  The level
+// remains in effect until popped by MSVC_POP_WARNING().  Use 0 to disable all
+// warnings.
+#define MSVC_PUSH_WARNING_LEVEL(n) __pragma(warning(push, n))
+
+// Pop effects of innermost MSVC_PUSH_* macro.
+#define MSVC_POP_WARNING() __pragma(warning(pop))
+
+#define MSVC_DISABLE_OPTIMIZE() __pragma(optimize("", off))
+#define MSVC_ENABLE_OPTIMIZE() __pragma(optimize("", on))
+
+#else  // Not MSVC
+
+#define _Printf_format_string_
+#define MSVC_SUPPRESS_WARNING(n)
+#define MSVC_PUSH_DISABLE_WARNING(n)
+#define MSVC_PUSH_WARNING_LEVEL(n)
+#define MSVC_POP_WARNING()
+#define MSVC_DISABLE_OPTIMIZE()
+#define MSVC_ENABLE_OPTIMIZE()
+
+#endif  // COMPILER_MSVC
+
+// Annotate a variable indicating it's ok if the variable is not used.
+// (Typically used to silence a compiler warning when the assignment
+// is important for some other reason.)
+// Use like:
+//   int x = ...;
+//   ALLOW_UNUSED_LOCAL(x);
+#define ALLOW_UNUSED_LOCAL(x) (void)x
+
+// Annotate a typedef or function indicating it's ok if it's not used.
+// Use like:
+//   typedef Foo Bar ALLOW_UNUSED_TYPE;
+#if defined(COMPILER_GCC) || defined(__clang__)
+#define ALLOW_UNUSED_TYPE __attribute__((unused))
+#else
+#define ALLOW_UNUSED_TYPE
+#endif
+
+// Annotate a function indicating it should not be inlined.
+// Use like:
+//   NOINLINE void DoStuff() { ... }
+#if defined(COMPILER_GCC)
+#define NOINLINE __attribute__((noinline))
+#elif defined(COMPILER_MSVC)
+#define NOINLINE __declspec(noinline)
+#else
+#define NOINLINE
+#endif
+
+#if COMPILER_GCC && defined(NDEBUG)
+#define ALWAYS_INLINE inline __attribute__((__always_inline__))
+#elif COMPILER_MSVC && defined(NDEBUG)
+#define ALWAYS_INLINE __forceinline
+#else
+#define ALWAYS_INLINE inline
+#endif
+
+// Specify memory alignment for structs, classes, etc.
+// Use like:
+//   class ALIGNAS(16) MyClass { ... }
+//   ALIGNAS(16) int array[4];
+//
+// In most places you can use the C++11 keyword "alignas", which is preferred.
+//
+// But compilers have trouble mixing __attribute__((...)) syntax with
+// alignas(...) syntax.
+//
+// Doesn't work in clang or gcc:
+//   struct alignas(16) __attribute__((packed)) S { char c; };
+// Works in clang but not gcc:
+//   struct __attribute__((packed)) alignas(16) S2 { char c; };
+// Works in clang and gcc:
+//   struct alignas(16) S3 { char c; } __attribute__((packed));
+//
+// There are also some attributes that must be specified *before* a class
+// definition: visibility (used for exporting functions/classes) is one of
+// these attributes. This means that it is not possible to use alignas() with a
+// class that is marked as exported.
+#if defined(COMPILER_MSVC)
+#define ALIGNAS(byte_alignment) __declspec(align(byte_alignment))
+#elif defined(COMPILER_GCC)
+#define ALIGNAS(byte_alignment) __attribute__((aligned(byte_alignment)))
+#endif
+
+// Annotate a function indicating the caller must examine the return value.
+// Use like:
+//   int foo() WARN_UNUSED_RESULT;
+// To explicitly ignore a result, see |ignore_result()| in base/macros.h.
+#undef WARN_UNUSED_RESULT
+#if defined(COMPILER_GCC) || defined(__clang__)
+#define WARN_UNUSED_RESULT __attribute__((warn_unused_result))
+#else
+#define WARN_UNUSED_RESULT
+#endif
+
+// Tell the compiler a function is using a printf-style format string.
+// |format_param| is the one-based index of the format string parameter;
+// |dots_param| is the one-based index of the "..." parameter.
+// For v*printf functions (which take a va_list), pass 0 for dots_param.
+// (This is undocumented but matches what the system C headers do.)
+#if defined(COMPILER_GCC) || defined(__clang__)
+#define PRINTF_FORMAT(format_param, dots_param) \
+    __attribute__((format(printf, format_param, dots_param)))
+#else
+#define PRINTF_FORMAT(format_param, dots_param)
+#endif
+
+// WPRINTF_FORMAT is the same, but for wide format strings.
+// This doesn't appear to yet be implemented in any compiler.
+// See http://gcc.gnu.org/bugzilla/show_bug.cgi?id=38308 .
+#define WPRINTF_FORMAT(format_param, dots_param)
+// If available, it would look like:
+//   __attribute__((format(wprintf, format_param, dots_param)))
+
+// Sanitizers annotations.
+#if defined(__has_attribute)
+#if __has_attribute(no_sanitize)
+#define NO_SANITIZE(what) __attribute__((no_sanitize(what)))
+#endif
+#endif
+#if !defined(NO_SANITIZE)
+#define NO_SANITIZE(what)
+#endif
+
+// MemorySanitizer annotations.
+#if defined(MEMORY_SANITIZER) && !defined(OS_NACL)
+#include <sanitizer/msan_interface.h>
+
+// Mark a memory region fully initialized.
+// Use this to annotate code that deliberately reads uninitialized data, for
+// example a GC scavenging root set pointers from the stack.
+#define MSAN_UNPOISON(p, size)  __msan_unpoison(p, size)
+
+// Check a memory region for initializedness, as if it was being used here.
+// If any bits are uninitialized, crash with an MSan report.
+// Use this to sanitize data which MSan won't be able to track, e.g. before
+// passing data to another process via shared memory.
+#define MSAN_CHECK_MEM_IS_INITIALIZED(p, size) \
+    __msan_check_mem_is_initialized(p, size)
+#else  // MEMORY_SANITIZER
+#define MSAN_UNPOISON(p, size)
+#define MSAN_CHECK_MEM_IS_INITIALIZED(p, size)
+#endif  // MEMORY_SANITIZER
+
+// DISABLE_CFI_PERF -- Disable Control Flow Integrity for perf reasons.
+#if !defined(DISABLE_CFI_PERF)
+#if defined(__clang__) && defined(OFFICIAL_BUILD)
+#define DISABLE_CFI_PERF __attribute__((no_sanitize("cfi")))
+#else
+#define DISABLE_CFI_PERF
+#endif
+#endif
+
+// Macro useful for writing cross-platform function pointers.
+#if !defined(CDECL)
+#if defined(OS_WIN)
+#define CDECL __cdecl
+#else  // defined(OS_WIN)
+#define CDECL
+#endif  // defined(OS_WIN)
+#endif  // !defined(CDECL)
+
+// Macro for hinting that an expression is likely to be false.
+#if !defined(UNLIKELY)
+#if defined(COMPILER_GCC) || defined(__clang__)
+#define UNLIKELY(x) __builtin_expect(!!(x), 0)
+#else
+#define UNLIKELY(x) (x)
+#endif  // defined(COMPILER_GCC)
+#endif  // !defined(UNLIKELY)
+
+#if !defined(LIKELY)
+#if defined(COMPILER_GCC) || defined(__clang__)
+#define LIKELY(x) __builtin_expect(!!(x), 1)
+#else
+#define LIKELY(x) (x)
+#endif  // defined(COMPILER_GCC)
+#endif  // !defined(LIKELY)
+
+// Compiler feature-detection.
+// clang.llvm.org/docs/LanguageExtensions.html#has-feature-and-has-extension
+#if defined(__has_feature)
+#define HAS_FEATURE(FEATURE) __has_feature(FEATURE)
+#else
+#define HAS_FEATURE(FEATURE) 0
+#endif
+
+// Macro for telling -Wimplicit-fallthrough that a fallthrough is intentional.
+#if defined(__clang__)
+#define FALLTHROUGH [[clang::fallthrough]]
+#else
+#define FALLTHROUGH
+#endif
+
+#endif  // BASE_COMPILER_SPECIFIC_H_
diff --git a/base/component_export.h b/base/component_export.h
new file mode 100644
index 0000000..b5cb364
--- /dev/null
+++ b/base/component_export.h
@@ -0,0 +1,87 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_COMPONENT_EXPORT_H_
+#define BASE_COMPONENT_EXPORT_H_
+
+#include "build/build_config.h"
+
+// Used to annotate symbols which are exported by the component named
+// |component|. Note that this only does the right thing if the corresponding
+// component target's sources are compiled with |IS_$component_IMPL| defined
+// as 1. For example:
+//
+//   class COMPONENT_EXPORT(FOO) Bar {};
+//
+// If IS_FOO_IMPL=1 at compile time, then Bar will be annotated using the
+// COMPONENT_EXPORT_ANNOTATION macro defined below. Otherwise it will be
+// annotated using the COMPONENT_IMPORT_ANNOTATION macro.
+#define COMPONENT_EXPORT(component)                         \
+  COMPONENT_MACRO_CONDITIONAL_(IS_##component##_IMPL,       \
+                               COMPONENT_EXPORT_ANNOTATION, \
+                               COMPONENT_IMPORT_ANNOTATION)
+
+// Indicates whether the current compilation unit is being compiled as part of
+// the implementation of the component named |component|. Expands to |1| if
+// |IS_$component_IMPL| is defined as |1|; expands to |0| otherwise.
+//
+// Note in particular that if |IS_$component_IMPL| is not defined at all, it is
+// still fine to test INSIDE_COMPONENT_IMPL(component), which expands to |0| as
+// expected.
+#define INSIDE_COMPONENT_IMPL(component) \
+  COMPONENT_MACRO_CONDITIONAL_(IS_##component##_IMPL, 1, 0)
+
+// Compiler-specific macros to annotate for export or import of a symbol. No-op
+// in non-component builds. These should not see much if any direct use.
+// Instead use the COMPONENT_EXPORT macro defined above.
+#if defined(COMPONENT_BUILD)
+#if defined(WIN32)
+#define COMPONENT_EXPORT_ANNOTATION __declspec(dllexport)
+#define COMPONENT_IMPORT_ANNOTATION __declspec(dllimport)
+#else  // defined(WIN32)
+#define COMPONENT_EXPORT_ANNOTATION __attribute__((visibility("default")))
+#define COMPONENT_IMPORT_ANNOTATION
+#endif  // defined(WIN32)
+#else   // defined(COMPONENT_BUILD)
+#define COMPONENT_EXPORT_ANNOTATION
+#define COMPONENT_IMPORT_ANNOTATION
+#endif  // defined(COMPONENT_BUILD)
+
+// Below this point are several internal utility macros used for the
+// implementation of the above macros. Not intended for external use.
+
+// Helper for conditional expansion to one of two token strings. If |condition|
+// expands to |1| then this macro expands to |consequent|; otherwise it expands
+// to |alternate|.
+#define COMPONENT_MACRO_CONDITIONAL_(condition, consequent, alternate) \
+  COMPONENT_MACRO_SELECT_THIRD_ARGUMENT_(                              \
+      COMPONENT_MACRO_CONDITIONAL_COMMA_(condition), consequent, alternate)
+
+// Expands to a comma (,) iff its first argument expands to |1|. Used in
+// conjunction with |COMPONENT_MACRO_SELECT_THIRD_ARGUMENT_()|, as the presence
+// or absense of an extra comma can be used to conditionally shift subsequent
+// argument positions and thus influence which argument is selected.
+#define COMPONENT_MACRO_CONDITIONAL_COMMA_(...) \
+  COMPONENT_MACRO_CONDITIONAL_COMMA_IMPL_(__VA_ARGS__,)
+#define COMPONENT_MACRO_CONDITIONAL_COMMA_IMPL_(x, ...) \
+  COMPONENT_MACRO_CONDITIONAL_COMMA_##x##_
+#define COMPONENT_MACRO_CONDITIONAL_COMMA_1_ ,
+
+// Helper which simply selects its third argument. Used in conjunction with
+// |COMPONENT_MACRO_CONDITIONAL_COMMA_()| above to implement conditional macro
+// expansion.
+#define COMPONENT_MACRO_SELECT_THIRD_ARGUMENT_(...) \
+  COMPONENT_MACRO_EXPAND_(                          \
+      COMPONENT_MACRO_SELECT_THIRD_ARGUMENT_IMPL_(__VA_ARGS__))
+#define COMPONENT_MACRO_SELECT_THIRD_ARGUMENT_IMPL_(a, b, c, ...) c
+
+// Helper to work around MSVC quirkiness wherein a macro expansion like |,|
+// within a parameter list will be treated as a single macro argument. This is
+// needed to ensure that |COMPONENT_MACRO_CONDITIONAL_COMMA_()| above can expand
+// to multiple separate positional arguments in the affirmative case, thus
+// elliciting the desired conditional behavior with
+// |COMPONENT_MACRO_SELECT_THIRD_ARGUMENT_()|.
+#define COMPONENT_MACRO_EXPAND_(x) x
+
+#endif  // BASE_COMPONENT_EXPORT_H_
diff --git a/base/component_export_unittest.cc b/base/component_export_unittest.cc
new file mode 100644
index 0000000..e994353
--- /dev/null
+++ b/base/component_export_unittest.cc
@@ -0,0 +1,82 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/component_export.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+namespace {
+
+using ComponentExportTest = testing::Test;
+
+#define IS_TEST_COMPONENT_A_IMPL 1
+#define IS_TEST_COMPONENT_B_IMPL
+#define IS_TEST_COMPONENT_C_IMPL 0
+#define IS_TEST_COMPONENT_D_IMPL 2
+#define IS_TEST_COMPONENT_E_IMPL xyz
+
+TEST(ComponentExportTest, ImportExport) {
+  // Defined as 1. Treat as export.
+  EXPECT_EQ(1, INSIDE_COMPONENT_IMPL(TEST_COMPONENT_A));
+
+  // Defined, but empty. Treat as import.
+  EXPECT_EQ(0, INSIDE_COMPONENT_IMPL(TEST_COMPONENT_B));
+
+  // Defined, but 0. Treat as import.
+  EXPECT_EQ(0, INSIDE_COMPONENT_IMPL(TEST_COMPONENT_C));
+
+  // Defined, but some other arbitrary thing that isn't 1. Treat as import.
+  EXPECT_EQ(0, INSIDE_COMPONENT_IMPL(TEST_COMPONENT_D));
+  EXPECT_EQ(0, INSIDE_COMPONENT_IMPL(TEST_COMPONENT_E));
+
+  // Undefined. Treat as import.
+  EXPECT_EQ(0, INSIDE_COMPONENT_IMPL(TEST_COMPONENT_F));
+
+  // And just for good measure, ensure that the macros evaluate properly in the
+  // context of preprocessor #if blocks.
+#if INSIDE_COMPONENT_IMPL(TEST_COMPONENT_A)
+  EXPECT_TRUE(true);
+#else
+  EXPECT_TRUE(false);
+#endif
+
+#if !INSIDE_COMPONENT_IMPL(TEST_COMPONENT_B)
+  EXPECT_TRUE(true);
+#else
+  EXPECT_TRUE(false);
+#endif
+
+#if !INSIDE_COMPONENT_IMPL(TEST_COMPONENT_C)
+  EXPECT_TRUE(true);
+#else
+  EXPECT_TRUE(false);
+#endif
+
+#if !INSIDE_COMPONENT_IMPL(TEST_COMPONENT_D)
+  EXPECT_TRUE(true);
+#else
+  EXPECT_TRUE(false);
+#endif
+
+#if !INSIDE_COMPONENT_IMPL(TEST_COMPONENT_E)
+  EXPECT_TRUE(true);
+#else
+  EXPECT_TRUE(false);
+#endif
+
+#if !INSIDE_COMPONENT_IMPL(TEST_COMPONENT_F)
+  EXPECT_TRUE(true);
+#else
+  EXPECT_TRUE(false);
+#endif
+}
+
+#undef IS_TEST_COMPONENT_A_IMPL
+#undef IS_TEST_COMPONENT_B_IMPL
+#undef IS_TEST_COMPONENT_C_IMPL
+#undef IS_TEST_COMPONENT_D_IMPL
+#undef IS_TEST_COMPONENT_E_IMPL
+
+}  // namespace
+}  // namespace base
diff --git a/base/containers/OWNERS b/base/containers/OWNERS
new file mode 100644
index 0000000..cc39b28
--- /dev/null
+++ b/base/containers/OWNERS
@@ -0,0 +1,3 @@
+danakj@chromium.org
+dcheng@chromium.org
+vmpstr@chromium.org
diff --git a/base/containers/README.md b/base/containers/README.md
new file mode 100644
index 0000000..092a264
--- /dev/null
+++ b/base/containers/README.md
@@ -0,0 +1,295 @@
+# base/containers library
+
+## What goes here
+
+This directory contains some STL-like containers.
+
+Things should be moved here that are generally applicable across the code base.
+Don't add things here just because you need them in one place and think others
+may someday want something similar. You can put specialized containers in
+your component's directory and we can promote them here later if we feel there
+is broad applicability.
+
+### Design and naming
+
+Containers should adhere as closely to STL as possible. Functions and behaviors
+not present in STL should only be added when they are related to the specific
+data structure implemented by the container.
+
+For STL-like containers our policy is that they should use STL-like naming even
+when it may conflict with the style guide. So functions and class names should
+be lower case with underscores. Non-STL-like classes and functions should use
+Google naming. Be sure to use the base namespace.
+
+## Map and set selection
+
+### Usage advice
+
+  * Generally avoid **std::unordered\_set** and **std::unordered\_map**. In the
+    common case, query performance is unlikely to be sufficiently higher than
+    std::map to make a difference, insert performance is slightly worse, and
+    the memory overhead is high. This makes sense mostly for large tables where
+    you expect a lot of lookups.
+
+  * Most maps and sets in Chrome are small and contain objects that can be
+    moved efficiently. In this case, consider **base::flat\_map** and
+    **base::flat\_set**. You need to be aware of the maximum expected size of
+    the container since individual inserts and deletes are O(n), giving O(n^2)
+    construction time for the entire map. But because it avoids mallocs in most
+    cases, inserts are better or comparable to other containers even for
+    several dozen items, and efficiently-moved types are unlikely to have
+    performance problems for most cases until you have hundreds of items. If
+    your container can be constructed in one shot, the constructor from vector
+    gives O(n log n) construction times and it should be strictly better than
+    a std::map.
+
+  * **base::small\_map** has better runtime memory usage without the poor
+    mutation performance of large containers that base::flat\_map has. But this
+    advantage is partially offset by additional code size. Prefer in cases
+    where you make many objects so that the code/heap tradeoff is good.
+
+  * Use **std::map** and **std::set** if you can't decide. Even if they're not
+    great, they're unlikely to be bad or surprising.
+
+### Map and set details
+
+Sizes are on 64-bit platforms. Stable iterators aren't invalidated when the
+container is mutated.
+
+| Container                                | Empty size            | Per-item overhead | Stable iterators? |
+|:---------------------------------------- |:--------------------- |:----------------- |:----------------- |
+| std::map, std::set                       | 16 bytes              | 32 bytes          | Yes               |
+| std::unordered\_map, std::unordered\_set | 128 bytes             | 16-24 bytes       | No                |
+| base::flat\_map and base::flat\_set      | 24 bytes              | 0 (see notes)     | No                |
+| base::small\_map                         | 24 bytes (see notes)  | 32 bytes          | No                |
+
+**Takeaways:** std::unordered\_map and std::unordered\_map have high
+overhead for small container sizes, prefer these only for larger workloads.
+
+Code size comparisons for a block of code (see appendix) on Windows using
+strings as keys.
+
+| Container           | Code size  |
+|:------------------- |:---------- |
+| std::unordered\_map | 1646 bytes |
+| std::map            | 1759 bytes |
+| base::flat\_map     | 1872 bytes |
+| base::small\_map    | 2410 bytes |
+
+**Takeaways:** base::small\_map generates more code because of the inlining of
+both brute-force and red-black tree searching. This makes it less attractive
+for random one-off uses. But if your code is called frequently, the runtime
+memory benefits will be more important. The code sizes of the other maps are
+close enough it's not worth worrying about.
+
+### std::map and std::set
+
+A red-black tree. Each inserted item requires the memory allocation of a node
+on the heap. Each node contains a left pointer, a right pointer, a parent
+pointer, and a "color" for the red-black tree (32-bytes per item on 64-bits).
+
+### std::unordered\_map and std::unordered\_set
+
+A hash table. Implemented on Windows as a std::vector + std::list and in libc++
+as the equivalent of a std::vector + a std::forward\_list. Both implementations
+allocate an 8-entry hash table (containing iterators into the list) on
+initialization, and grow to 64 entries once 8 items are inserted. Above 64
+items, the size doubles every time the load factor exceeds 1.
+
+The empty size is sizeof(std::unordered\_map) = 64 +
+the initial hash table size which is 8 pointers. The per-item overhead in the
+table above counts the list node (2 pointers on Windows, 1 pointer in libc++),
+plus amortizes the hash table assuming a 0.5 load factor on average.
+
+In a microbenchmark on Windows, inserts of 1M integers into a
+std::unordered\_set took 1.07x the time of std::set, and queries took 0.67x the
+time of std::set. For a typical 4-entry set (the statistical mode of map sizes
+in the browser), query performance is identical to std::set and base::flat\_set.
+On ARM, unordered\_set performance can be worse because integer division to
+compute the bucket is slow, and a few "less than" operations can be faster than
+computing a hash depending on the key type. The takeaway is that you should not
+default to using unordered maps because "they're faster."
+
+### base::flat\_map and base::flat\_set
+
+A sorted std::vector. Seached via binary search, inserts in the middle require
+moving elements to make room. Good cache locality. For large objects and large
+set sizes, std::vector's doubling-when-full strategy can waste memory.
+
+Supports efficient construction from a vector of items which avoids the O(n^2)
+insertion time of each element separately.
+
+The per-item overhead will depend on the underlying std::vector's reallocation
+strategy and the memory access pattern. Assuming items are being linearly added,
+one would expect it to be 3/4 full, so per-item overhead will be 0.25 *
+sizeof(T).
+
+
+flat\_set/flat\_map support a notion of transparent comparisons. Therefore you
+can, for example, lookup base::StringPiece in a set of std::strings without
+constructing a temporary std::string. This functionality is based on C++14
+extensions to std::set/std::map interface.
+
+You can find more information about transparent comparisons here:
+http://en.cppreference.com/w/cpp/utility/functional/less_void
+
+Example, smart pointer set:
+
+```cpp
+// Declare a type alias using base::UniquePtrComparator.
+template <typename T>
+using UniquePtrSet = base::flat_set<std::unique_ptr<T>,
+                                    base::UniquePtrComparator>;
+
+// ...
+// Collect data.
+std::vector<std::unique_ptr<int>> ptr_vec;
+ptr_vec.reserve(5);
+std::generate_n(std::back_inserter(ptr_vec), 5, []{
+  return std::make_unique<int>(0);
+});
+
+// Construct a set.
+UniquePtrSet<int> ptr_set(std::move(ptr_vec), base::KEEP_FIRST_OF_DUPES);
+
+// Use raw pointers to lookup keys.
+int* ptr = ptr_set.begin()->get();
+EXPECT_TRUE(ptr_set.find(ptr) == ptr_set.begin());
+```
+
+Example flat_map<std\::string, int>:
+
+```cpp
+base::flat_map<std::string, int> str_to_int({{"a", 1}, {"c", 2},{"b", 2}},
+                                            base::KEEP_FIRST_OF_DUPES);
+
+// Does not construct temporary strings.
+str_to_int.find("c")->second = 3;
+str_to_int.erase("c");
+EXPECT_EQ(str_to_int.end(), str_to_int.find("c")->second);
+
+// NOTE: This does construct a temporary string. This happens since if the
+// item is not in the container, then it needs to be constructed, which is
+// something that transparent comparators don't have to guarantee.
+str_to_int["c"] = 3;
+```
+
+### base::small\_map
+
+A small inline buffer that is brute-force searched that overflows into a full
+std::map or std::unordered\_map. This gives the memory benefit of
+base::flat\_map for small data sizes without the degenerate insertion
+performance for large container sizes.
+
+Since instantiations require both code for a std::map and a brute-force search
+of the inline container, plus a fancy iterator to cover both cases, code size
+is larger.
+
+The initial size in the above table is assuming a very small inline table. The
+actual size will be sizeof(int) + min(sizeof(std::map), sizeof(T) *
+inline\_size).
+
+# Deque
+
+### Usage advice
+
+Chromium code should always use `base::circular_deque` or `base::queue` in
+preference to `std::deque` or `std::queue` due to memory usage and platform
+variation.
+
+The `base::circular_deque` implementation (and the `base::queue` which uses it)
+provide performance consistent across platforms that better matches most
+programmer's expectations on performance (it doesn't waste as much space as
+libc++ and doesn't do as many heap allocations as MSVC). It also generates less
+code tham `std::queue`: using it across the code base saves several hundred
+kilobytes.
+
+Since `base::deque` does not have stable iterators and it will move the objects
+it contains, it may not be appropriate for all uses. If you need these,
+consider using a `std::list` which will provide constant time insert and erase.
+
+### std::deque and std::queue
+
+The implementation of `std::deque` varies considerably which makes it hard to
+reason about. All implementations use a sequence of data blocks referenced by
+an array of pointers. The standard guarantees random access, amortized
+constant operations at the ends, and linear mutations in the middle.
+
+In Microsoft's implementation, each block is the smaller of 16 bytes or the
+size of the contained element. This means in practice that every expansion of
+the deque of non-trivial classes requires a heap allocation. libc++ (on Android
+and Mac) uses 4K blocks which elimiates the problem of many heap allocations,
+but generally wastes a large amount of space (an Android analysis revealed more
+than 2.5MB wasted space from deque alone, resulting in some optimizations).
+libstdc++ uses an intermediate-size 512 byte buffer.
+
+Microsoft's implementation never shrinks the deque capacity, so the capacity
+will always be the maximum number of elements ever contained. libstdc++
+deallocates blocks as they are freed. libc++ keeps up to two empty blocks.
+
+### base::circular_deque and base::queue
+
+A deque implemented as a circular buffer in an array. The underlying array will
+grow like a `std::vector` while the beginning and end of the deque will move
+around. The items will wrap around the underlying buffer so the storage will
+not be contiguous, but fast random access iterators are still possible.
+
+When the underlying buffer is filled, it will be reallocated and the constents
+moved (like a `std::vector`). The underlying buffer will be shrunk if there is
+too much wasted space (_unlike_ a `std::vector`). As a result, iterators are
+not stable across mutations.
+
+# Stack
+
+`std::stack` is like `std::queue` in that it is a wrapper around an underlying
+container. The default container is `std::deque` so everything from the deque
+section applies.
+
+Chromium provides `base/containers/stack.h` which defines `base::stack` that
+should be used in preference to std::stack. This changes the underlying
+container to `base::circular_deque`. The result will be very similar to
+manually specifying a `std::vector` for the underlying implementation except
+that the storage will shrink when it gets too empty (vector will never
+reallocate to a smaller size).
+
+Watch out: with some stack usage patterns it's easy to depend on unstable
+behavior:
+
+```cpp
+base::stack<Foo> stack;
+for (...) {
+  Foo& current = stack.top();
+  DoStuff();  // May call stack.push(), say if writing a parser.
+  current.done = true;  // Current may reference deleted item!
+}
+```
+
+## Appendix
+
+### Code for map code size comparison
+
+This just calls insert and query a number of times, with printfs that prevent
+things from being dead-code eliminated.
+
+```cpp
+TEST(Foo, Bar) {
+  base::small_map<std::map<std::string, Flubber>> foo;
+  foo.insert(std::make_pair("foo", Flubber(8, "bar")));
+  foo.insert(std::make_pair("bar", Flubber(8, "bar")));
+  foo.insert(std::make_pair("foo1", Flubber(8, "bar")));
+  foo.insert(std::make_pair("bar1", Flubber(8, "bar")));
+  foo.insert(std::make_pair("foo", Flubber(8, "bar")));
+  foo.insert(std::make_pair("bar", Flubber(8, "bar")));
+  auto found = foo.find("asdf");
+  printf("Found is %d\n", (int)(found == foo.end()));
+  found = foo.find("foo");
+  printf("Found is %d\n", (int)(found == foo.end()));
+  found = foo.find("bar");
+  printf("Found is %d\n", (int)(found == foo.end()));
+  found = foo.find("asdfhf");
+  printf("Found is %d\n", (int)(found == foo.end()));
+  found = foo.find("bar1");
+  printf("Found is %d\n", (int)(found == foo.end()));
+}
+```
+
diff --git a/base/containers/adapters.h b/base/containers/adapters.h
new file mode 100644
index 0000000..fa671b4
--- /dev/null
+++ b/base/containers/adapters.h
@@ -0,0 +1,73 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_CONTAINERS_ADAPTERS_H_
+#define BASE_CONTAINERS_ADAPTERS_H_
+
+#include <stddef.h>
+
+#include <iterator>
+
+#include "base/macros.h"
+
+namespace base {
+
+namespace internal {
+
+// Internal adapter class for implementing base::Reversed.
+template <typename T>
+class ReversedAdapter {
+ public:
+  using Iterator = decltype(static_cast<T*>(nullptr)->rbegin());
+
+  explicit ReversedAdapter(T& t) : t_(t) {}
+  ReversedAdapter(const ReversedAdapter& ra) : t_(ra.t_) {}
+
+  // TODO(mdempsky): Once we can use C++14 library features, use std::rbegin
+  // and std::rend instead, so we can remove the specialization below.
+  Iterator begin() const { return t_.rbegin(); }
+  Iterator end() const { return t_.rend(); }
+
+ private:
+  T& t_;
+
+  DISALLOW_ASSIGN(ReversedAdapter);
+};
+
+template <typename T, size_t N>
+class ReversedAdapter<T[N]> {
+ public:
+  using Iterator = std::reverse_iterator<T*>;
+
+  explicit ReversedAdapter(T (&t)[N]) : t_(t) {}
+  ReversedAdapter(const ReversedAdapter& ra) : t_(ra.t_) {}
+
+  Iterator begin() const { return Iterator(&t_[N]); }
+  Iterator end() const { return Iterator(&t_[0]); }
+
+ private:
+  T (&t_)[N];
+
+  DISALLOW_ASSIGN(ReversedAdapter);
+};
+
+}  // namespace internal
+
+// Reversed returns a container adapter usable in a range-based "for" statement
+// for iterating a reversible container in reverse order.
+//
+// Example:
+//
+//   std::vector<int> v = ...;
+//   for (int i : base::Reversed(v)) {
+//     // iterates through v from back to front
+//   }
+template <typename T>
+internal::ReversedAdapter<T> Reversed(T& t) {
+  return internal::ReversedAdapter<T>(t);
+}
+
+}  // namespace base
+
+#endif  // BASE_CONTAINERS_ADAPTERS_H_
diff --git a/base/containers/adapters_unittest.cc b/base/containers/adapters_unittest.cc
new file mode 100644
index 0000000..92554b7
--- /dev/null
+++ b/base/containers/adapters_unittest.cc
@@ -0,0 +1,52 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/containers/adapters.h"
+
+#include <vector>
+
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace {
+
+TEST(AdaptersTest, Reversed) {
+  std::vector<int> v;
+  v.push_back(3);
+  v.push_back(2);
+  v.push_back(1);
+  int j = 0;
+  for (int& i : base::Reversed(v)) {
+    EXPECT_EQ(++j, i);
+    i += 100;
+  }
+  EXPECT_EQ(103, v[0]);
+  EXPECT_EQ(102, v[1]);
+  EXPECT_EQ(101, v[2]);
+}
+
+TEST(AdaptersTest, ReversedArray) {
+  int v[3] = {3, 2, 1};
+  int j = 0;
+  for (int& i : base::Reversed(v)) {
+    EXPECT_EQ(++j, i);
+    i += 100;
+  }
+  EXPECT_EQ(103, v[0]);
+  EXPECT_EQ(102, v[1]);
+  EXPECT_EQ(101, v[2]);
+}
+
+TEST(AdaptersTest, ReversedConst) {
+  std::vector<int> v;
+  v.push_back(3);
+  v.push_back(2);
+  v.push_back(1);
+  const std::vector<int>& cv = v;
+  int j = 0;
+  for (int i : base::Reversed(cv)) {
+    EXPECT_EQ(++j, i);
+  }
+}
+
+}  // namespace
diff --git a/base/containers/circular_deque.h b/base/containers/circular_deque.h
new file mode 100644
index 0000000..bf42a95
--- /dev/null
+++ b/base/containers/circular_deque.h
@@ -0,0 +1,1111 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_CONTAINERS_CIRCULAR_DEQUE_H_
+#define BASE_CONTAINERS_CIRCULAR_DEQUE_H_
+
+#include <algorithm>
+#include <cstddef>
+#include <iterator>
+#include <type_traits>
+#include <utility>
+
+#include "base/containers/vector_buffer.h"
+#include "base/logging.h"
+#include "base/macros.h"
+#include "base/template_util.h"
+
+// base::circular_deque is similar to std::deque. Unlike std::deque, the
+// storage is provided in a flat circular buffer conceptually similar to a
+// vector. The beginning and end will wrap around as necessary so that
+// pushes and pops will be constant time as long as a capacity expansion is
+// not required.
+//
+// The API should be identical to std::deque with the following differences:
+//
+//  - ITERATORS ARE NOT STABLE. Mutating the container will invalidate all
+//    iterators.
+//
+//  - Insertions may resize the vector and so are not constant time (std::deque
+//    guarantees constant time for insertions at the ends).
+//
+//  - Container-wide comparisons are not implemented. If you want to compare
+//    two containers, use an algorithm so the expensive iteration is explicit.
+//
+// If you want a similar container with only a queue API, use base::queue in
+// base/containers/queue.h.
+//
+// Constructors:
+//   circular_deque();
+//   circular_deque(size_t count);
+//   circular_deque(size_t count, const T& value);
+//   circular_deque(InputIterator first, InputIterator last);
+//   circular_deque(const circular_deque&);
+//   circular_deque(circular_deque&&);
+//   circular_deque(std::initializer_list<value_type>);
+//
+// Assignment functions:
+//   circular_deque& operator=(const circular_deque&);
+//   circular_deque& operator=(circular_deque&&);
+//   circular_deque& operator=(std::initializer_list<T>);
+//   void assign(size_t count, const T& value);
+//   void assign(InputIterator first, InputIterator last);
+//   void assign(std::initializer_list<T> value);
+//
+// Random accessors:
+//   T& at(size_t);
+//   const T& at(size_t) const;
+//   T& operator[](size_t);
+//   const T& operator[](size_t) const;
+//
+// End accessors:
+//   T& front();
+//   const T& front() const;
+//   T& back();
+//   const T& back() const;
+//
+// Iterator functions:
+//   iterator               begin();
+//   const_iterator         begin() const;
+//   const_iterator         cbegin() const;
+//   iterator               end();
+//   const_iterator         end() const;
+//   const_iterator         cend() const;
+//   reverse_iterator       rbegin();
+//   const_reverse_iterator rbegin() const;
+//   const_reverse_iterator crbegin() const;
+//   reverse_iterator       rend();
+//   const_reverse_iterator rend() const;
+//   const_reverse_iterator crend() const;
+//
+// Memory management:
+//   void reserve(size_t);  // SEE IMPLEMENTATION FOR SOME GOTCHAS.
+//   size_t capacity() const;
+//   void shrink_to_fit();
+//
+// Size management:
+//   void clear();
+//   bool empty() const;
+//   size_t size() const;
+//   void resize(size_t);
+//   void resize(size_t count, const T& value);
+//
+// Positional insert and erase:
+//   void insert(const_iterator pos, size_type count, const T& value);
+//   void insert(const_iterator pos,
+//               InputIterator first, InputIterator last);
+//   iterator insert(const_iterator pos, const T& value);
+//   iterator insert(const_iterator pos, T&& value);
+//   iterator emplace(const_iterator pos, Args&&... args);
+//   iterator erase(const_iterator pos);
+//   iterator erase(const_iterator first, const_iterator last);
+//
+// End insert and erase:
+//   void push_front(const T&);
+//   void push_front(T&&);
+//   void push_back(const T&);
+//   void push_back(T&&);
+//   T& emplace_front(Args&&...);
+//   T& emplace_back(Args&&...);
+//   void pop_front();
+//   void pop_back();
+//
+// General:
+//   void swap(circular_deque&);
+
+namespace base {
+
+template <class T>
+class circular_deque;
+
+namespace internal {
+
+// Start allocating nonempty buffers with this many entries. This is the
+// external capacity so the internal buffer will be one larger (= 4) which is
+// more even for the allocator. See the descriptions of internal vs. external
+// capacity on the comment above the buffer_ variable below.
+constexpr size_t kCircularBufferInitialCapacity = 3;
+
+template <typename T>
+class circular_deque_const_iterator {
+ public:
+  using difference_type = std::ptrdiff_t;
+  using value_type = T;
+  using pointer = const T*;
+  using reference = const T&;
+  using iterator_category = std::random_access_iterator_tag;
+
+  circular_deque_const_iterator() : parent_deque_(nullptr), index_(0) {
+#if DCHECK_IS_ON()
+    created_generation_ = 0;
+#endif  // DCHECK_IS_ON()
+  }
+
+  // Dereferencing.
+  const T& operator*() const {
+    CheckUnstableUsage();
+    parent_deque_->CheckValidIndex(index_);
+    return parent_deque_->buffer_[index_];
+  }
+  const T* operator->() const {
+    CheckUnstableUsage();
+    parent_deque_->CheckValidIndex(index_);
+    return &parent_deque_->buffer_[index_];
+  }
+  const value_type& operator[](difference_type i) const { return *(*this + i); }
+
+  // Increment and decrement.
+  circular_deque_const_iterator& operator++() {
+    Increment();
+    return *this;
+  }
+  circular_deque_const_iterator operator++(int) {
+    circular_deque_const_iterator ret = *this;
+    Increment();
+    return ret;
+  }
+  circular_deque_const_iterator& operator--() {
+    Decrement();
+    return *this;
+  }
+  circular_deque_const_iterator operator--(int) {
+    circular_deque_const_iterator ret = *this;
+    Decrement();
+    return ret;
+  }
+
+  // Random access mutation.
+  friend circular_deque_const_iterator operator+(
+      const circular_deque_const_iterator& iter,
+      difference_type offset) {
+    circular_deque_const_iterator ret = iter;
+    ret.Add(offset);
+    return ret;
+  }
+  circular_deque_const_iterator& operator+=(difference_type offset) {
+    Add(offset);
+    return *this;
+  }
+  friend circular_deque_const_iterator operator-(
+      const circular_deque_const_iterator& iter,
+      difference_type offset) {
+    circular_deque_const_iterator ret = iter;
+    ret.Add(-offset);
+    return ret;
+  }
+  circular_deque_const_iterator& operator-=(difference_type offset) {
+    Add(-offset);
+    return *this;
+  }
+
+  friend std::ptrdiff_t operator-(const circular_deque_const_iterator& lhs,
+                                  const circular_deque_const_iterator& rhs) {
+    lhs.CheckComparable(rhs);
+    return lhs.OffsetFromBegin() - rhs.OffsetFromBegin();
+  }
+
+  // Comparisons.
+  friend bool operator==(const circular_deque_const_iterator& lhs,
+                         const circular_deque_const_iterator& rhs) {
+    lhs.CheckComparable(rhs);
+    return lhs.index_ == rhs.index_;
+  }
+  friend bool operator!=(const circular_deque_const_iterator& lhs,
+                         const circular_deque_const_iterator& rhs) {
+    return !(lhs == rhs);
+  }
+  friend bool operator<(const circular_deque_const_iterator& lhs,
+                        const circular_deque_const_iterator& rhs) {
+    lhs.CheckComparable(rhs);
+    return lhs.OffsetFromBegin() < rhs.OffsetFromBegin();
+  }
+  friend bool operator<=(const circular_deque_const_iterator& lhs,
+                         const circular_deque_const_iterator& rhs) {
+    return !(lhs > rhs);
+  }
+  friend bool operator>(const circular_deque_const_iterator& lhs,
+                        const circular_deque_const_iterator& rhs) {
+    lhs.CheckComparable(rhs);
+    return lhs.OffsetFromBegin() > rhs.OffsetFromBegin();
+  }
+  friend bool operator>=(const circular_deque_const_iterator& lhs,
+                         const circular_deque_const_iterator& rhs) {
+    return !(lhs < rhs);
+  }
+
+ protected:
+  friend class circular_deque<T>;
+
+  circular_deque_const_iterator(const circular_deque<T>* parent, size_t index)
+      : parent_deque_(parent), index_(index) {
+#if DCHECK_IS_ON()
+    created_generation_ = parent->generation_;
+#endif  // DCHECK_IS_ON()
+  }
+
+  // Returns the offset from the beginning index of the buffer to the current
+  // item.
+  size_t OffsetFromBegin() const {
+    if (index_ >= parent_deque_->begin_)
+      return index_ - parent_deque_->begin_;  // On the same side as begin.
+    return parent_deque_->buffer_.capacity() - parent_deque_->begin_ + index_;
+  }
+
+  // Most uses will be ++ and -- so use a simplified implementation.
+  void Increment() {
+    CheckUnstableUsage();
+    parent_deque_->CheckValidIndex(index_);
+    index_++;
+    if (index_ == parent_deque_->buffer_.capacity())
+      index_ = 0;
+  }
+  void Decrement() {
+    CheckUnstableUsage();
+    parent_deque_->CheckValidIndexOrEnd(index_);
+    if (index_ == 0)
+      index_ = parent_deque_->buffer_.capacity() - 1;
+    else
+      index_--;
+  }
+  void Add(difference_type delta) {
+    CheckUnstableUsage();
+#if DCHECK_IS_ON()
+    if (delta <= 0)
+      parent_deque_->CheckValidIndexOrEnd(index_);
+    else
+      parent_deque_->CheckValidIndex(index_);
+#endif
+    // It should be valid to add 0 to any iterator, even if the container is
+    // empty and the iterator points to end(). The modulo below will divide
+    // by 0 if the buffer capacity is empty, so it's important to check for
+    // this case explicitly.
+    if (delta == 0)
+      return;
+
+    difference_type new_offset = OffsetFromBegin() + delta;
+    DCHECK(new_offset >= 0 &&
+           new_offset <= static_cast<difference_type>(parent_deque_->size()));
+    index_ = (new_offset + parent_deque_->begin_) %
+             parent_deque_->buffer_.capacity();
+  }
+
+#if DCHECK_IS_ON()
+  void CheckUnstableUsage() const {
+    DCHECK(parent_deque_);
+    // Since circular_deque doesn't guarantee stability, any attempt to
+    // dereference this iterator after a mutation (i.e. the generation doesn't
+    // match the original) in the container is illegal.
+    DCHECK_EQ(created_generation_, parent_deque_->generation_)
+        << "circular_deque iterator dereferenced after mutation.";
+  }
+  void CheckComparable(const circular_deque_const_iterator& other) const {
+    DCHECK_EQ(parent_deque_, other.parent_deque_);
+    // Since circular_deque doesn't guarantee stability, two iterators that
+    // are compared must have been generated without mutating the container.
+    // If this fires, the container was mutated between generating the two
+    // iterators being compared.
+    DCHECK_EQ(created_generation_, other.created_generation_);
+  }
+#else
+  inline void CheckUnstableUsage() const {}
+  inline void CheckComparable(const circular_deque_const_iterator&) const {}
+#endif  // DCHECK_IS_ON()
+
+  const circular_deque<T>* parent_deque_;
+  size_t index_;
+
+#if DCHECK_IS_ON()
+  // The generation of the parent deque when this iterator was created. The
+  // container will update the generation for every modification so we can
+  // test if the container was modified by comparing them.
+  uint64_t created_generation_;
+#endif  // DCHECK_IS_ON()
+};
+
+template <typename T>
+class circular_deque_iterator : public circular_deque_const_iterator<T> {
+  using base = circular_deque_const_iterator<T>;
+
+ public:
+  friend class circular_deque<T>;
+
+  using difference_type = std::ptrdiff_t;
+  using value_type = T;
+  using pointer = T*;
+  using reference = T&;
+  using iterator_category = std::random_access_iterator_tag;
+
+  // Expose the base class' constructor.
+  circular_deque_iterator() : circular_deque_const_iterator<T>() {}
+
+  // Dereferencing.
+  T& operator*() const { return const_cast<T&>(base::operator*()); }
+  T* operator->() const { return const_cast<T*>(base::operator->()); }
+  T& operator[](difference_type i) {
+    return const_cast<T&>(base::operator[](i));
+  }
+
+  // Random access mutation.
+  friend circular_deque_iterator operator+(const circular_deque_iterator& iter,
+                                           difference_type offset) {
+    circular_deque_iterator ret = iter;
+    ret.Add(offset);
+    return ret;
+  }
+  circular_deque_iterator& operator+=(difference_type offset) {
+    base::Add(offset);
+    return *this;
+  }
+  friend circular_deque_iterator operator-(const circular_deque_iterator& iter,
+                                           difference_type offset) {
+    circular_deque_iterator ret = iter;
+    ret.Add(-offset);
+    return ret;
+  }
+  circular_deque_iterator& operator-=(difference_type offset) {
+    base::Add(-offset);
+    return *this;
+  }
+
+  // Increment and decrement.
+  circular_deque_iterator& operator++() {
+    base::Increment();
+    return *this;
+  }
+  circular_deque_iterator operator++(int) {
+    circular_deque_iterator ret = *this;
+    base::Increment();
+    return ret;
+  }
+  circular_deque_iterator& operator--() {
+    base::Decrement();
+    return *this;
+  }
+  circular_deque_iterator operator--(int) {
+    circular_deque_iterator ret = *this;
+    base::Decrement();
+    return ret;
+  }
+
+ private:
+  circular_deque_iterator(const circular_deque<T>* parent, size_t index)
+      : circular_deque_const_iterator<T>(parent, index) {}
+};
+
+}  // namespace internal
+
+template <typename T>
+class circular_deque {
+ private:
+  using VectorBuffer = internal::VectorBuffer<T>;
+
+ public:
+  using value_type = T;
+  using size_type = std::size_t;
+  using difference_type = std::ptrdiff_t;
+  using reference = value_type&;
+  using const_reference = const value_type&;
+  using pointer = value_type*;
+  using const_pointer = const value_type*;
+
+  using iterator = internal::circular_deque_iterator<T>;
+  using const_iterator = internal::circular_deque_const_iterator<T>;
+  using reverse_iterator = std::reverse_iterator<iterator>;
+  using const_reverse_iterator = std::reverse_iterator<const_iterator>;
+
+  // ---------------------------------------------------------------------------
+  // Constructor
+
+  constexpr circular_deque() = default;
+
+  // Constructs with |count| copies of |value| or default constructed version.
+  circular_deque(size_type count) { resize(count); }
+  circular_deque(size_type count, const T& value) { resize(count, value); }
+
+  // Range constructor.
+  template <class InputIterator>
+  circular_deque(InputIterator first, InputIterator last) {
+    assign(first, last);
+  }
+
+  // Copy/move.
+  circular_deque(const circular_deque& other) : buffer_(other.size() + 1) {
+    assign(other.begin(), other.end());
+  }
+  circular_deque(circular_deque&& other) noexcept
+      : buffer_(std::move(other.buffer_)),
+        begin_(other.begin_),
+        end_(other.end_) {
+    other.begin_ = 0;
+    other.end_ = 0;
+  }
+
+  circular_deque(std::initializer_list<value_type> init) { assign(init); }
+
+  ~circular_deque() { DestructRange(begin_, end_); }
+
+  // ---------------------------------------------------------------------------
+  // Assignments.
+  //
+  // All of these may invalidate iterators and references.
+
+  circular_deque& operator=(const circular_deque& other) {
+    if (&other == this)
+      return *this;
+
+    reserve(other.size());
+    assign(other.begin(), other.end());
+    return *this;
+  }
+  circular_deque& operator=(circular_deque&& other) noexcept {
+    if (&other == this)
+      return *this;
+
+    // We're about to overwrite the buffer, so don't free it in clear to
+    // avoid doing it twice.
+    ClearRetainCapacity();
+    buffer_ = std::move(other.buffer_);
+    begin_ = other.begin_;
+    end_ = other.end_;
+
+    other.begin_ = 0;
+    other.end_ = 0;
+
+    IncrementGeneration();
+    return *this;
+  }
+  circular_deque& operator=(std::initializer_list<value_type> ilist) {
+    reserve(ilist.size());
+    assign(std::begin(ilist), std::end(ilist));
+    return *this;
+  }
+
+  void assign(size_type count, const value_type& value) {
+    ClearRetainCapacity();
+    reserve(count);
+    for (size_t i = 0; i < count; i++)
+      emplace_back(value);
+    IncrementGeneration();
+  }
+
+  // This variant should be enabled only when InputIterator is an iterator.
+  template <typename InputIterator>
+  typename std::enable_if<::base::internal::is_iterator<InputIterator>::value,
+                          void>::type
+  assign(InputIterator first, InputIterator last) {
+    // Possible future enhancement, dispatch on iterator tag type. For forward
+    // iterators we can use std::difference to preallocate the space required
+    // and only do one copy.
+    ClearRetainCapacity();
+    for (; first != last; ++first)
+      emplace_back(*first);
+    IncrementGeneration();
+  }
+
+  void assign(std::initializer_list<value_type> value) {
+    reserve(std::distance(value.begin(), value.end()));
+    assign(value.begin(), value.end());
+  }
+
+  // ---------------------------------------------------------------------------
+  // Accessors.
+  //
+  // Since this class assumes no exceptions, at() and operator[] are equivalent.
+
+  const value_type& at(size_type i) const {
+    DCHECK(i < size());
+    size_t right_size = buffer_.capacity() - begin_;
+    if (begin_ <= end_ || i < right_size)
+      return buffer_[begin_ + i];
+    return buffer_[i - right_size];
+  }
+  value_type& at(size_type i) {
+    return const_cast<value_type&>(
+        const_cast<const circular_deque*>(this)->at(i));
+  }
+
+  value_type& operator[](size_type i) { return at(i); }
+  const value_type& operator[](size_type i) const {
+    return const_cast<circular_deque*>(this)->at(i);
+  }
+
+  value_type& front() {
+    DCHECK(!empty());
+    return buffer_[begin_];
+  }
+  const value_type& front() const {
+    DCHECK(!empty());
+    return buffer_[begin_];
+  }
+
+  value_type& back() {
+    DCHECK(!empty());
+    return *(--end());
+  }
+  const value_type& back() const {
+    DCHECK(!empty());
+    return *(--end());
+  }
+
+  // ---------------------------------------------------------------------------
+  // Iterators.
+
+  iterator begin() { return iterator(this, begin_); }
+  const_iterator begin() const { return const_iterator(this, begin_); }
+  const_iterator cbegin() const { return const_iterator(this, begin_); }
+
+  iterator end() { return iterator(this, end_); }
+  const_iterator end() const { return const_iterator(this, end_); }
+  const_iterator cend() const { return const_iterator(this, end_); }
+
+  reverse_iterator rbegin() { return reverse_iterator(end()); }
+  const_reverse_iterator rbegin() const {
+    return const_reverse_iterator(end());
+  }
+  const_reverse_iterator crbegin() const { return rbegin(); }
+
+  reverse_iterator rend() { return reverse_iterator(begin()); }
+  const_reverse_iterator rend() const {
+    return const_reverse_iterator(begin());
+  }
+  const_reverse_iterator crend() const { return rend(); }
+
+  // ---------------------------------------------------------------------------
+  // Memory management.
+
+  // IMPORTANT NOTE ON reserve(...): This class implements auto-shrinking of
+  // the buffer when elements are deleted and there is "too much" wasted space.
+  // So if you call reserve() with a large size in anticipation of pushing many
+  // elements, but pop an element before the queue is full, the capacity you
+  // reserved may be lost.
+  //
+  // As a result, it's only worthwhile to call reserve() when you're adding
+  // many things at once with no intermediate operations.
+  void reserve(size_type new_capacity) {
+    if (new_capacity > capacity())
+      SetCapacityTo(new_capacity);
+  }
+
+  size_type capacity() const {
+    // One item is wasted to indicate end().
+    return buffer_.capacity() == 0 ? 0 : buffer_.capacity() - 1;
+  }
+
+  void shrink_to_fit() {
+    if (empty()) {
+      // Optimize empty case to really delete everything if there was
+      // something.
+      if (buffer_.capacity())
+        buffer_ = VectorBuffer();
+    } else {
+      SetCapacityTo(size());
+    }
+  }
+
+  // ---------------------------------------------------------------------------
+  // Size management.
+
+  // This will additionally reset the capacity() to 0.
+  void clear() {
+    // This can't resize(0) because that requires a default constructor to
+    // compile, which not all contained classes may implement.
+    ClearRetainCapacity();
+    buffer_ = VectorBuffer();
+  }
+
+  bool empty() const { return begin_ == end_; }
+
+  size_type size() const {
+    if (begin_ <= end_)
+      return end_ - begin_;
+    return buffer_.capacity() - begin_ + end_;
+  }
+
+  // When reducing size, the elements are deleted from the end. When expanding
+  // size, elements are added to the end with |value| or the default
+  // constructed version. Even when using resize(count) to shrink, a default
+  // constructor is required for the code to compile, even though it will not
+  // be called.
+  //
+  // There are two versions rather than using a default value to avoid
+  // creating a temporary when shrinking (when it's not needed). Plus if
+  // the default constructor is desired when expanding usually just calling it
+  // for each element is faster than making a default-constructed temporary and
+  // copying it.
+  void resize(size_type count) {
+    // SEE BELOW VERSION if you change this. The code is mostly the same.
+    if (count > size()) {
+      // This could be slighly more efficient but expanding a queue with
+      // identical elements is unusual and the extra computations of emplacing
+      // one-by-one will typically be small relative to calling the constructor
+      // for every item.
+      ExpandCapacityIfNecessary(count - size());
+      while (size() < count)
+        emplace_back();
+    } else if (count < size()) {
+      size_t new_end = (begin_ + count) % buffer_.capacity();
+      DestructRange(new_end, end_);
+      end_ = new_end;
+
+      ShrinkCapacityIfNecessary();
+    }
+    IncrementGeneration();
+  }
+  void resize(size_type count, const value_type& value) {
+    // SEE ABOVE VERSION if you change this. The code is mostly the same.
+    if (count > size()) {
+      ExpandCapacityIfNecessary(count - size());
+      while (size() < count)
+        emplace_back(value);
+    } else if (count < size()) {
+      size_t new_end = (begin_ + count) % buffer_.capacity();
+      DestructRange(new_end, end_);
+      end_ = new_end;
+
+      ShrinkCapacityIfNecessary();
+    }
+    IncrementGeneration();
+  }
+
+  // ---------------------------------------------------------------------------
+  // Insert and erase.
+  //
+  // Insertion and deletion in the middle is O(n) and invalidates all existing
+  // iterators.
+  //
+  // The implementation of insert isn't optimized as much as it could be. If
+  // the insertion requires that the buffer be grown, it will first be grown
+  // and everything moved, and then the items will be inserted, potentially
+  // moving some items twice. This simplifies the implemntation substantially
+  // and means less generated templatized code. Since this is an uncommon
+  // operation for deques, and already relatively slow, it doesn't seem worth
+  // the benefit to optimize this.
+
+  void insert(const_iterator pos, size_type count, const T& value) {
+    ValidateIterator(pos);
+
+    // Optimize insert at the beginning.
+    if (pos == begin()) {
+      ExpandCapacityIfNecessary(count);
+      for (size_t i = 0; i < count; i++)
+        push_front(value);
+      return;
+    }
+
+    iterator insert_cur(this, pos.index_);
+    iterator insert_end;
+    MakeRoomFor(count, &insert_cur, &insert_end);
+    while (insert_cur < insert_end) {
+      new (&buffer_[insert_cur.index_]) T(value);
+      ++insert_cur;
+    }
+
+    IncrementGeneration();
+  }
+
+  // This enable_if keeps this call from getting confused with the (pos, count,
+  // value) version when value is an integer.
+  template <class InputIterator>
+  typename std::enable_if<::base::internal::is_iterator<InputIterator>::value,
+                          void>::type
+  insert(const_iterator pos, InputIterator first, InputIterator last) {
+    ValidateIterator(pos);
+
+    size_t inserted_items = std::distance(first, last);
+    if (inserted_items == 0)
+      return;  // Can divide by 0 when doing modulo below, so return early.
+
+    // Make a hole to copy the items into.
+    iterator insert_cur;
+    iterator insert_end;
+    if (pos == begin()) {
+      // Optimize insert at the beginning, nothing needs to be shifted and the
+      // hole is the |inserted_items| block immediately before |begin_|.
+      ExpandCapacityIfNecessary(inserted_items);
+      insert_end = begin();
+      begin_ =
+          (begin_ + buffer_.capacity() - inserted_items) % buffer_.capacity();
+      insert_cur = begin();
+    } else {
+      insert_cur = iterator(this, pos.index_);
+      MakeRoomFor(inserted_items, &insert_cur, &insert_end);
+    }
+
+    // Copy the items.
+    while (insert_cur < insert_end) {
+      new (&buffer_[insert_cur.index_]) T(*first);
+      ++insert_cur;
+      ++first;
+    }
+
+    IncrementGeneration();
+  }
+
+  // These all return an iterator to the inserted item. Existing iterators will
+  // be invalidated.
+  iterator insert(const_iterator pos, const T& value) {
+    return emplace(pos, value);
+  }
+  iterator insert(const_iterator pos, T&& value) {
+    return emplace(pos, std::move(value));
+  }
+  template <class... Args>
+  iterator emplace(const_iterator pos, Args&&... args) {
+    ValidateIterator(pos);
+
+    // Optimize insert at beginning which doesn't require shifting.
+    if (pos == cbegin()) {
+      emplace_front(std::forward<Args>(args)...);
+      return begin();
+    }
+
+    // Do this before we make the new iterators we return.
+    IncrementGeneration();
+
+    iterator insert_begin(this, pos.index_);
+    iterator insert_end;
+    MakeRoomFor(1, &insert_begin, &insert_end);
+    new (&buffer_[insert_begin.index_]) T(std::forward<Args>(args)...);
+
+    return insert_begin;
+  }
+
+  // Calling erase() won't automatically resize the buffer smaller like resize
+  // or the pop functions. Erase is slow and relatively uncommon, and for
+  // normal deque usage a pop will normally be done on a regular basis that
+  // will prevent excessive buffer usage over long periods of time. It's not
+  // worth having the extra code for every template instantiation of erase()
+  // to resize capacity downward to a new buffer.
+  iterator erase(const_iterator pos) { return erase(pos, pos + 1); }
+  iterator erase(const_iterator first, const_iterator last) {
+    ValidateIterator(first);
+    ValidateIterator(last);
+
+    IncrementGeneration();
+
+    // First, call the destructor on the deleted items.
+    if (first.index_ == last.index_) {
+      // Nothing deleted. Need to return early to avoid falling through to
+      // moving items on top of themselves.
+      return iterator(this, first.index_);
+    } else if (first.index_ < last.index_) {
+      // Contiguous range.
+      buffer_.DestructRange(&buffer_[first.index_], &buffer_[last.index_]);
+    } else {
+      // Deleted range wraps around.
+      buffer_.DestructRange(&buffer_[first.index_],
+                            &buffer_[buffer_.capacity()]);
+      buffer_.DestructRange(&buffer_[0], &buffer_[last.index_]);
+    }
+
+    if (first.index_ == begin_) {
+      // This deletion is from the beginning. Nothing needs to be copied, only
+      // begin_ needs to be updated.
+      begin_ = last.index_;
+      return iterator(this, last.index_);
+    }
+
+    // In an erase operation, the shifted items all move logically to the left,
+    // so move them from left-to-right.
+    iterator move_src(this, last.index_);
+    iterator move_src_end = end();
+    iterator move_dest(this, first.index_);
+    for (; move_src < move_src_end; move_src++, move_dest++) {
+      buffer_.MoveRange(&buffer_[move_src.index_],
+                        &buffer_[move_src.index_ + 1],
+                        &buffer_[move_dest.index_]);
+    }
+
+    end_ = move_dest.index_;
+
+    // Since we did not reallocate and only changed things after the erase
+    // element(s), the input iterator's index points to the thing following the
+    // deletion.
+    return iterator(this, first.index_);
+  }
+
+  // ---------------------------------------------------------------------------
+  // Begin/end operations.
+
+  void push_front(const T& value) { emplace_front(value); }
+  void push_front(T&& value) { emplace_front(std::move(value)); }
+
+  void push_back(const T& value) { emplace_back(value); }
+  void push_back(T&& value) { emplace_back(std::move(value)); }
+
+  template <class... Args>
+  reference emplace_front(Args&&... args) {
+    ExpandCapacityIfNecessary(1);
+    if (begin_ == 0)
+      begin_ = buffer_.capacity() - 1;
+    else
+      begin_--;
+    IncrementGeneration();
+    new (&buffer_[begin_]) T(std::forward<Args>(args)...);
+    return front();
+  }
+
+  template <class... Args>
+  reference emplace_back(Args&&... args) {
+    ExpandCapacityIfNecessary(1);
+    new (&buffer_[end_]) T(std::forward<Args>(args)...);
+    if (end_ == buffer_.capacity() - 1)
+      end_ = 0;
+    else
+      end_++;
+    IncrementGeneration();
+    return back();
+  }
+
+  void pop_front() {
+    DCHECK(size());
+    buffer_.DestructRange(&buffer_[begin_], &buffer_[begin_ + 1]);
+    begin_++;
+    if (begin_ == buffer_.capacity())
+      begin_ = 0;
+
+    ShrinkCapacityIfNecessary();
+
+    // Technically popping will not invalidate any iterators since the
+    // underlying buffer will be stable. But in the future we may want to add a
+    // feature that resizes the buffer smaller if there is too much wasted
+    // space. This ensures we can make such a change safely.
+    IncrementGeneration();
+  }
+  void pop_back() {
+    DCHECK(size());
+    if (end_ == 0)
+      end_ = buffer_.capacity() - 1;
+    else
+      end_--;
+    buffer_.DestructRange(&buffer_[end_], &buffer_[end_ + 1]);
+
+    ShrinkCapacityIfNecessary();
+
+    // See pop_front comment about why this is here.
+    IncrementGeneration();
+  }
+
+  // ---------------------------------------------------------------------------
+  // General operations.
+
+  void swap(circular_deque& other) {
+    std::swap(buffer_, other.buffer_);
+    std::swap(begin_, other.begin_);
+    std::swap(end_, other.end_);
+    IncrementGeneration();
+  }
+
+  friend void swap(circular_deque& lhs, circular_deque& rhs) { lhs.swap(rhs); }
+
+ private:
+  friend internal::circular_deque_iterator<T>;
+  friend internal::circular_deque_const_iterator<T>;
+
+  // Moves the items in the given circular buffer to the current one. The
+  // source is moved from so will become invalid. The destination buffer must
+  // have already been allocated with enough size.
+  static void MoveBuffer(VectorBuffer& from_buf,
+                         size_t from_begin,
+                         size_t from_end,
+                         VectorBuffer* to_buf,
+                         size_t* to_begin,
+                         size_t* to_end) {
+    size_t from_capacity = from_buf.capacity();
+
+    *to_begin = 0;
+    if (from_begin < from_end) {
+      // Contiguous.
+      from_buf.MoveRange(&from_buf[from_begin], &from_buf[from_end],
+                         to_buf->begin());
+      *to_end = from_end - from_begin;
+    } else if (from_begin > from_end) {
+      // Discontiguous, copy the right side to the beginning of the new buffer.
+      from_buf.MoveRange(&from_buf[from_begin], &from_buf[from_capacity],
+                         to_buf->begin());
+      size_t right_size = from_capacity - from_begin;
+      // Append the left side.
+      from_buf.MoveRange(&from_buf[0], &from_buf[from_end],
+                         &(*to_buf)[right_size]);
+      *to_end = right_size + from_end;
+    } else {
+      // No items.
+      *to_end = 0;
+    }
+  }
+
+  // Expands the buffer size. This assumes the size is larger than the
+  // number of elements in the vector (it won't call delete on anything).
+  void SetCapacityTo(size_t new_capacity) {
+    // Use the capacity + 1 as the internal buffer size to differentiate
+    // empty and full (see definition of buffer_ below).
+    VectorBuffer new_buffer(new_capacity + 1);
+    MoveBuffer(buffer_, begin_, end_, &new_buffer, &begin_, &end_);
+    buffer_ = std::move(new_buffer);
+  }
+  void ExpandCapacityIfNecessary(size_t additional_elts) {
+    size_t min_new_capacity = size() + additional_elts;
+    if (capacity() >= min_new_capacity)
+      return;  // Already enough room.
+
+    min_new_capacity =
+        std::max(min_new_capacity, internal::kCircularBufferInitialCapacity);
+
+    // std::vector always grows by at least 50%. WTF::Deque grows by at least
+    // 25%. We expect queue workloads to generally stay at a similar size and
+    // grow less than a vector might, so use 25%.
+    size_t new_capacity =
+        std::max(min_new_capacity, capacity() + capacity() / 4);
+    SetCapacityTo(new_capacity);
+  }
+
+  void ShrinkCapacityIfNecessary() {
+    // Don't auto-shrink below this size.
+    if (capacity() <= internal::kCircularBufferInitialCapacity)
+      return;
+
+    // Shrink when 100% of the size() is wasted.
+    size_t sz = size();
+    size_t empty_spaces = capacity() - sz;
+    if (empty_spaces < sz)
+      return;
+
+    // Leave 1/4 the size as free capacity, not going below the initial
+    // capacity.
+    size_t new_capacity =
+        std::max(internal::kCircularBufferInitialCapacity, sz + sz / 4);
+    if (new_capacity < capacity()) {
+      // Count extra item to convert to internal capacity.
+      SetCapacityTo(new_capacity);
+    }
+  }
+
+  // Backend for clear() but does not resize the internal buffer.
+  void ClearRetainCapacity() {
+    // This can't resize(0) because that requires a default constructor to
+    // compile, which not all contained classes may implement.
+    DestructRange(begin_, end_);
+    begin_ = 0;
+    end_ = 0;
+    IncrementGeneration();
+  }
+
+  // Calls destructors for the given begin->end indices. The indices may wrap
+  // around. The buffer is not resized, and the begin_ and end_ members are
+  // not changed.
+  void DestructRange(size_t begin, size_t end) {
+    if (end == begin) {
+      return;
+    } else if (end > begin) {
+      buffer_.DestructRange(&buffer_[begin], &buffer_[end]);
+    } else {
+      buffer_.DestructRange(&buffer_[begin], &buffer_[buffer_.capacity()]);
+      buffer_.DestructRange(&buffer_[0], &buffer_[end]);
+    }
+  }
+
+  // Makes room for |count| items starting at |*insert_begin|. Since iterators
+  // are not stable across buffer resizes, |*insert_begin| will be updated to
+  // point to the beginning of the newly opened position in the new array (it's
+  // in/out), and the end of the newly opened position (it's out-only).
+  void MakeRoomFor(size_t count, iterator* insert_begin, iterator* insert_end) {
+    if (count == 0) {
+      *insert_end = *insert_begin;
+      return;
+    }
+
+    // The offset from the beginning will be stable across reallocations.
+    size_t begin_offset = insert_begin->OffsetFromBegin();
+    ExpandCapacityIfNecessary(count);
+
+    insert_begin->index_ = (begin_ + begin_offset) % buffer_.capacity();
+    *insert_end =
+        iterator(this, (insert_begin->index_ + count) % buffer_.capacity());
+
+    // Update the new end and prepare the iterators for copying.
+    iterator src = end();
+    end_ = (end_ + count) % buffer_.capacity();
+    iterator dest = end();
+
+    // Move the elements. This will always involve shifting logically to the
+    // right, so move in a right-to-left order.
+    while (true) {
+      if (src == *insert_begin)
+        break;
+      --src;
+      --dest;
+      buffer_.MoveRange(&buffer_[src.index_], &buffer_[src.index_ + 1],
+                        &buffer_[dest.index_]);
+    }
+  }
+
+#if DCHECK_IS_ON()
+  // Asserts the given index is dereferencable. The index is an index into the
+  // buffer, not an index used by operator[] or at() which will be offsets from
+  // begin.
+  void CheckValidIndex(size_t i) const {
+    if (begin_ <= end_)
+      DCHECK(i >= begin_ && i < end_);
+    else
+      DCHECK((i >= begin_ && i < buffer_.capacity()) || i < end_);
+  }
+
+  // Asserts the given index is either dereferencable or points to end().
+  void CheckValidIndexOrEnd(size_t i) const {
+    if (i != end_)
+      CheckValidIndex(i);
+  }
+
+  void ValidateIterator(const const_iterator& i) const {
+    DCHECK(i.parent_deque_ == this);
+    i.CheckUnstableUsage();
+  }
+
+  // See generation_ below.
+  void IncrementGeneration() { generation_++; }
+#else
+  // No-op versions of these functions for release builds.
+  void CheckValidIndex(size_t) const {}
+  void CheckValidIndexOrEnd(size_t) const {}
+  void ValidateIterator(const const_iterator& i) const {}
+  void IncrementGeneration() {}
+#endif
+
+  // Danger, the buffer_.capacity() is the "internal capacity" which is
+  // capacity() + 1 since there is an extra item to indicate the end. Otherwise
+  // being completely empty and completely full are indistinguishable (begin ==
+  // end). We could add a separate flag to avoid it, but that adds significant
+  // extra complexity since every computation will have to check for it. Always
+  // keeping one extra unused element in the buffer makes iterator computations
+  // much simpler.
+  //
+  // Container internal code will want to use buffer_.capacity() for offset
+  // computations rather than capacity().
+  VectorBuffer buffer_;
+  size_type begin_ = 0;
+  size_type end_ = 0;
+
+#if DCHECK_IS_ON()
+  // Incremented every time a modification is made that could affect iterator
+  // invalidations.
+  uint64_t generation_ = 0;
+#endif
+};
+
+// Implementations of base::Erase[If] (see base/stl_util.h).
+template <class T, class Value>
+void Erase(circular_deque<T>& container, const Value& value) {
+  container.erase(std::remove(container.begin(), container.end(), value),
+                  container.end());
+}
+
+template <class T, class Predicate>
+void EraseIf(circular_deque<T>& container, Predicate pred) {
+  container.erase(std::remove_if(container.begin(), container.end(), pred),
+                  container.end());
+}
+
+}  // namespace base
+
+#endif  // BASE_CONTAINERS_CIRCULAR_DEQUE_H_
diff --git a/base/containers/circular_deque_unittest.cc b/base/containers/circular_deque_unittest.cc
new file mode 100644
index 0000000..0c168e0
--- /dev/null
+++ b/base/containers/circular_deque_unittest.cc
@@ -0,0 +1,881 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/containers/circular_deque.h"
+
+#include "base/test/copy_only_int.h"
+#include "base/test/move_only_int.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+using base::internal::VectorBuffer;
+
+namespace base {
+
+namespace {
+
+circular_deque<int> MakeSequence(size_t max) {
+  circular_deque<int> ret;
+  for (size_t i = 0; i < max; i++)
+    ret.push_back(i);
+  return ret;
+}
+
+// Cycles through the queue, popping items from the back and pushing items
+// at the front to validate behavior across different configurations of the
+// queue in relation to the underlying buffer. The tester closure is run for
+// each cycle.
+template <class QueueT, class Tester>
+void CycleTest(circular_deque<QueueT>& queue, const Tester& tester) {
+  size_t steps = queue.size() * 2;
+  for (size_t i = 0; i < steps; i++) {
+    tester(queue, i);
+    queue.pop_back();
+    queue.push_front(QueueT());
+  }
+}
+
+class DestructorCounter {
+ public:
+  DestructorCounter(int* counter) : counter_(counter) {}
+  ~DestructorCounter() { ++(*counter_); }
+
+ private:
+  int* counter_;
+};
+
+}  // namespace
+
+TEST(CircularDeque, FillConstructor) {
+  constexpr size_t num_elts = 9;
+
+  std::vector<int> foo(15);
+  EXPECT_EQ(15u, foo.size());
+
+  // Fill with default constructor.
+  {
+    circular_deque<int> buf(num_elts);
+
+    EXPECT_EQ(num_elts, buf.size());
+    EXPECT_EQ(num_elts, static_cast<size_t>(buf.end() - buf.begin()));
+
+    for (size_t i = 0; i < num_elts; i++)
+      EXPECT_EQ(0, buf[i]);
+  }
+
+  // Fill with explicit value.
+  {
+    int value = 199;
+    circular_deque<int> buf(num_elts, value);
+
+    EXPECT_EQ(num_elts, buf.size());
+    EXPECT_EQ(num_elts, static_cast<size_t>(buf.end() - buf.begin()));
+
+    for (size_t i = 0; i < num_elts; i++)
+      EXPECT_EQ(value, buf[i]);
+  }
+}
+
+TEST(CircularDeque, CopyAndRangeConstructor) {
+  int values[] = {1, 2, 3, 4, 5, 6};
+  circular_deque<CopyOnlyInt> first(std::begin(values), std::end(values));
+
+  circular_deque<CopyOnlyInt> second(first);
+  EXPECT_EQ(6u, second.size());
+  for (int i = 0; i < 6; i++)
+    EXPECT_EQ(i + 1, second[i].data());
+}
+
+TEST(CircularDeque, MoveConstructor) {
+  int values[] = {1, 2, 3, 4, 5, 6};
+  circular_deque<MoveOnlyInt> first(std::begin(values), std::end(values));
+
+  circular_deque<MoveOnlyInt> second(std::move(first));
+  EXPECT_TRUE(first.empty());
+  EXPECT_EQ(6u, second.size());
+  for (int i = 0; i < 6; i++)
+    EXPECT_EQ(i + 1, second[i].data());
+}
+
+TEST(CircularDeque, InitializerListConstructor) {
+  circular_deque<int> empty({});
+  ASSERT_TRUE(empty.empty());
+
+  circular_deque<int> first({1, 2, 3, 4, 5, 6});
+  EXPECT_EQ(6u, first.size());
+  for (int i = 0; i < 6; i++)
+    EXPECT_EQ(i + 1, first[i]);
+}
+
+TEST(CircularDeque, Destructor) {
+  int destruct_count = 0;
+
+  // Contiguous buffer.
+  {
+    circular_deque<DestructorCounter> q;
+    q.resize(5, DestructorCounter(&destruct_count));
+
+    EXPECT_EQ(1, destruct_count);  // The temporary in the call to resize().
+    destruct_count = 0;
+  }
+  EXPECT_EQ(5, destruct_count);  // One call for each.
+
+  // Force a wraparound buffer.
+  {
+    circular_deque<DestructorCounter> q;
+    q.reserve(7);
+    q.resize(5, DestructorCounter(&destruct_count));
+
+    // Cycle throught some elements in our buffer to force a wraparound.
+    destruct_count = 0;
+    for (int i = 0; i < 4; i++) {
+      q.emplace_back(&destruct_count);
+      q.pop_front();
+    }
+    EXPECT_EQ(4, destruct_count);  // One for each cycle.
+    destruct_count = 0;
+  }
+  EXPECT_EQ(5, destruct_count);  // One call for each.
+}
+
+TEST(CircularDeque, EqualsCopy) {
+  circular_deque<int> first = {1, 2, 3, 4, 5, 6};
+  circular_deque<int> copy;
+  EXPECT_TRUE(copy.empty());
+  copy = first;
+  EXPECT_EQ(6u, copy.size());
+  for (int i = 0; i < 6; i++) {
+    EXPECT_EQ(i + 1, first[i]);
+    EXPECT_EQ(i + 1, copy[i]);
+    EXPECT_NE(&first[i], &copy[i]);
+  }
+}
+
+TEST(CircularDeque, EqualsMove) {
+  circular_deque<int> first = {1, 2, 3, 4, 5, 6};
+  circular_deque<int> move;
+  EXPECT_TRUE(move.empty());
+  move = std::move(first);
+  EXPECT_TRUE(first.empty());
+  EXPECT_EQ(6u, move.size());
+  for (int i = 0; i < 6; i++)
+    EXPECT_EQ(i + 1, move[i]);
+}
+
+// Tests that self-assignment is a no-op.
+TEST(CircularDeque, EqualsSelf) {
+  circular_deque<int> q = {1, 2, 3, 4, 5, 6};
+  q = *&q;  // The *& defeats Clang's -Wself-assign warning.
+  EXPECT_EQ(6u, q.size());
+  for (int i = 0; i < 6; i++)
+    EXPECT_EQ(i + 1, q[i]);
+}
+
+TEST(CircularDeque, EqualsInitializerList) {
+  circular_deque<int> q;
+  EXPECT_TRUE(q.empty());
+  q = {1, 2, 3, 4, 5, 6};
+  EXPECT_EQ(6u, q.size());
+  for (int i = 0; i < 6; i++)
+    EXPECT_EQ(i + 1, q[i]);
+}
+
+TEST(CircularDeque, AssignCountValue) {
+  circular_deque<int> empty;
+  empty.assign(0, 52);
+  EXPECT_EQ(0u, empty.size());
+
+  circular_deque<int> full;
+  size_t count = 13;
+  int value = 12345;
+  full.assign(count, value);
+  EXPECT_EQ(count, full.size());
+
+  for (size_t i = 0; i < count; i++)
+    EXPECT_EQ(value, full[i]);
+}
+
+TEST(CircularDeque, AssignIterator) {
+  int range[8] = {11, 12, 13, 14, 15, 16, 17, 18};
+
+  circular_deque<int> empty;
+  empty.assign(std::begin(range), std::begin(range));
+  EXPECT_TRUE(empty.empty());
+
+  circular_deque<int> full;
+  full.assign(std::begin(range), std::end(range));
+  EXPECT_EQ(8u, full.size());
+  for (size_t i = 0; i < 8; i++)
+    EXPECT_EQ(range[i], full[i]);
+}
+
+TEST(CircularDeque, AssignInitializerList) {
+  circular_deque<int> empty;
+  empty.assign({});
+  EXPECT_TRUE(empty.empty());
+
+  circular_deque<int> full;
+  full.assign({11, 12, 13, 14, 15, 16, 17, 18});
+  EXPECT_EQ(8u, full.size());
+  for (int i = 0; i < 8; i++)
+    EXPECT_EQ(11 + i, full[i]);
+}
+
+// Tests [] and .at().
+TEST(CircularDeque, At) {
+  circular_deque<int> q = MakeSequence(10);
+  CycleTest(q, [](const circular_deque<int>& q, size_t cycle) {
+    size_t expected_size = 10;
+    EXPECT_EQ(expected_size, q.size());
+
+    // A sequence of 0's.
+    size_t index = 0;
+    size_t num_zeros = std::min(expected_size, cycle);
+    for (size_t i = 0; i < num_zeros; i++, index++) {
+      EXPECT_EQ(0, q[index]);
+      EXPECT_EQ(0, q.at(index));
+    }
+
+    // Followed by a sequence of increasing ints.
+    size_t num_ints = expected_size - num_zeros;
+    for (int i = 0; i < static_cast<int>(num_ints); i++, index++) {
+      EXPECT_EQ(i, q[index]);
+      EXPECT_EQ(i, q.at(index));
+    }
+  });
+}
+
+// This also tests the copy constructor with lots of different types of
+// input configurations.
+TEST(CircularDeque, FrontBackPushPop) {
+  circular_deque<int> q = MakeSequence(10);
+
+  int expected_front = 0;
+  int expected_back = 9;
+
+  // Go in one direction.
+  for (int i = 0; i < 100; i++) {
+    const circular_deque<int> const_q(q);
+
+    EXPECT_EQ(expected_front, q.front());
+    EXPECT_EQ(expected_back, q.back());
+    EXPECT_EQ(expected_front, const_q.front());
+    EXPECT_EQ(expected_back, const_q.back());
+
+    expected_front++;
+    expected_back++;
+
+    q.pop_front();
+    q.push_back(expected_back);
+  }
+
+  // Go back in reverse.
+  for (int i = 0; i < 100; i++) {
+    const circular_deque<int> const_q(q);
+
+    EXPECT_EQ(expected_front, q.front());
+    EXPECT_EQ(expected_back, q.back());
+    EXPECT_EQ(expected_front, const_q.front());
+    EXPECT_EQ(expected_back, const_q.back());
+
+    expected_front--;
+    expected_back--;
+
+    q.pop_back();
+    q.push_front(expected_front);
+  }
+}
+
+TEST(CircularDeque, ReallocateWithSplitBuffer) {
+  // Tests reallocating a deque with an internal buffer that looks like this:
+  //   4   5   x   x   0   1   2   3
+  //       end-^       ^-begin
+  circular_deque<int> q;
+  q.reserve(7);  // Internal buffer is always 1 larger than requested.
+  q.push_back(-1);
+  q.push_back(-1);
+  q.push_back(-1);
+  q.push_back(-1);
+  q.push_back(0);
+  q.pop_front();
+  q.pop_front();
+  q.pop_front();
+  q.pop_front();
+  q.push_back(1);
+  q.push_back(2);
+  q.push_back(3);
+  q.push_back(4);
+  q.push_back(5);
+
+  q.shrink_to_fit();
+  EXPECT_EQ(6u, q.size());
+
+  EXPECT_EQ(0, q[0]);
+  EXPECT_EQ(1, q[1]);
+  EXPECT_EQ(2, q[2]);
+  EXPECT_EQ(3, q[3]);
+  EXPECT_EQ(4, q[4]);
+  EXPECT_EQ(5, q[5]);
+}
+
+TEST(CircularDeque, Swap) {
+  circular_deque<int> a = MakeSequence(10);
+  circular_deque<int> b = MakeSequence(100);
+
+  a.swap(b);
+  EXPECT_EQ(100u, a.size());
+  for (int i = 0; i < 100; i++)
+    EXPECT_EQ(i, a[i]);
+
+  EXPECT_EQ(10u, b.size());
+  for (int i = 0; i < 10; i++)
+    EXPECT_EQ(i, b[i]);
+}
+
+TEST(CircularDeque, Iteration) {
+  circular_deque<int> q = MakeSequence(10);
+
+  int expected_front = 0;
+  int expected_back = 9;
+
+  // This loop causes various combinations of begin and end to be tested.
+  for (int i = 0; i < 30; i++) {
+    // Range-based for loop going forward.
+    int current_expected = expected_front;
+    for (int cur : q) {
+      EXPECT_EQ(current_expected, cur);
+      current_expected++;
+    }
+
+    // Manually test reverse iterators.
+    current_expected = expected_back;
+    for (auto cur = q.crbegin(); cur < q.crend(); cur++) {
+      EXPECT_EQ(current_expected, *cur);
+      current_expected--;
+    }
+
+    expected_front++;
+    expected_back++;
+
+    q.pop_front();
+    q.push_back(expected_back);
+  }
+
+  // Go back in reverse.
+  for (int i = 0; i < 100; i++) {
+    const circular_deque<int> const_q(q);
+
+    EXPECT_EQ(expected_front, q.front());
+    EXPECT_EQ(expected_back, q.back());
+    EXPECT_EQ(expected_front, const_q.front());
+    EXPECT_EQ(expected_back, const_q.back());
+
+    expected_front--;
+    expected_back--;
+
+    q.pop_back();
+    q.push_front(expected_front);
+  }
+}
+
+TEST(CircularDeque, IteratorComparisons) {
+  circular_deque<int> q = MakeSequence(10);
+
+  // This loop causes various combinations of begin and end to be tested.
+  for (int i = 0; i < 30; i++) {
+    EXPECT_LT(q.begin(), q.end());
+    EXPECT_LE(q.begin(), q.end());
+    EXPECT_LE(q.begin(), q.begin());
+
+    EXPECT_GT(q.end(), q.begin());
+    EXPECT_GE(q.end(), q.begin());
+    EXPECT_GE(q.end(), q.end());
+
+    EXPECT_EQ(q.begin(), q.begin());
+    EXPECT_NE(q.begin(), q.end());
+
+    q.push_front(10);
+    q.pop_back();
+  }
+}
+
+TEST(CircularDeque, IteratorIncDec) {
+  circular_deque<int> q;
+
+  // No-op offset computations with no capacity.
+  EXPECT_EQ(q.end(), q.end() + 0);
+  EXPECT_EQ(q.end(), q.end() - 0);
+
+  q = MakeSequence(10);
+
+  // Mutable preincrement, predecrement.
+  {
+    circular_deque<int>::iterator it = q.begin();
+    circular_deque<int>::iterator op_result = ++it;
+    EXPECT_EQ(1, *op_result);
+    EXPECT_EQ(1, *it);
+
+    op_result = --it;
+    EXPECT_EQ(0, *op_result);
+    EXPECT_EQ(0, *it);
+  }
+
+  // Const preincrement, predecrement.
+  {
+    circular_deque<int>::const_iterator it = q.begin();
+    circular_deque<int>::const_iterator op_result = ++it;
+    EXPECT_EQ(1, *op_result);
+    EXPECT_EQ(1, *it);
+
+    op_result = --it;
+    EXPECT_EQ(0, *op_result);
+    EXPECT_EQ(0, *it);
+  }
+
+  // Mutable postincrement, postdecrement.
+  {
+    circular_deque<int>::iterator it = q.begin();
+    circular_deque<int>::iterator op_result = it++;
+    EXPECT_EQ(0, *op_result);
+    EXPECT_EQ(1, *it);
+
+    op_result = it--;
+    EXPECT_EQ(1, *op_result);
+    EXPECT_EQ(0, *it);
+  }
+
+  // Const postincrement, postdecrement.
+  {
+    circular_deque<int>::const_iterator it = q.begin();
+    circular_deque<int>::const_iterator op_result = it++;
+    EXPECT_EQ(0, *op_result);
+    EXPECT_EQ(1, *it);
+
+    op_result = it--;
+    EXPECT_EQ(1, *op_result);
+    EXPECT_EQ(0, *it);
+  }
+}
+
+TEST(CircularDeque, IteratorIntegerOps) {
+  circular_deque<int> q = MakeSequence(10);
+
+  int expected_front = 0;
+  int expected_back = 9;
+
+  for (int i = 0; i < 30; i++) {
+    EXPECT_EQ(0, q.begin() - q.begin());
+    EXPECT_EQ(0, q.end() - q.end());
+    EXPECT_EQ(q.size(), static_cast<size_t>(q.end() - q.begin()));
+
+    // +=
+    circular_deque<int>::iterator eight = q.begin();
+    eight += 8;
+    EXPECT_EQ(8, eight - q.begin());
+    EXPECT_EQ(expected_front + 8, *eight);
+
+    // -=
+    eight -= 8;
+    EXPECT_EQ(q.begin(), eight);
+
+    // +
+    eight = eight + 8;
+    EXPECT_EQ(8, eight - q.begin());
+
+    // -
+    eight = eight - 8;
+    EXPECT_EQ(q.begin(), eight);
+
+    expected_front++;
+    expected_back++;
+
+    q.pop_front();
+    q.push_back(expected_back);
+  }
+}
+
+TEST(CircularDeque, IteratorArrayAccess) {
+  circular_deque<int> q = MakeSequence(10);
+
+  circular_deque<int>::iterator begin = q.begin();
+  EXPECT_EQ(0, begin[0]);
+  EXPECT_EQ(9, begin[9]);
+
+  circular_deque<int>::iterator end = q.end();
+  EXPECT_EQ(0, end[-10]);
+  EXPECT_EQ(9, end[-1]);
+
+  begin[0] = 100;
+  EXPECT_EQ(100, end[-10]);
+}
+
+TEST(CircularDeque, ReverseIterator) {
+  circular_deque<int> q;
+  q.push_back(4);
+  q.push_back(3);
+  q.push_back(2);
+  q.push_back(1);
+
+  circular_deque<int>::reverse_iterator iter = q.rbegin();
+  EXPECT_EQ(1, *iter);
+  iter++;
+  EXPECT_EQ(2, *iter);
+  ++iter;
+  EXPECT_EQ(3, *iter);
+  iter++;
+  EXPECT_EQ(4, *iter);
+  ++iter;
+  EXPECT_EQ(q.rend(), iter);
+}
+
+TEST(CircularDeque, CapacityReserveShrink) {
+  circular_deque<int> q;
+
+  // A default constructed queue should have no capacity since it should waste
+  // no space.
+  EXPECT_TRUE(q.empty());
+  EXPECT_EQ(0u, q.size());
+  EXPECT_EQ(0u, q.capacity());
+
+  size_t new_capacity = 100;
+  q.reserve(new_capacity);
+  EXPECT_EQ(new_capacity, q.capacity());
+
+  // Adding that many items should not cause a resize.
+  for (size_t i = 0; i < new_capacity; i++)
+    q.push_back(i);
+  EXPECT_EQ(new_capacity, q.size());
+  EXPECT_EQ(new_capacity, q.capacity());
+
+  // Shrink to fit to a smaller size.
+  size_t capacity_2 = new_capacity / 2;
+  q.resize(capacity_2);
+  q.shrink_to_fit();
+  EXPECT_EQ(capacity_2, q.size());
+  EXPECT_EQ(capacity_2, q.capacity());
+}
+
+TEST(CircularDeque, CapacityAutoShrink) {
+  size_t big_size = 1000u;
+  circular_deque<int> q;
+  q.resize(big_size);
+
+  size_t big_capacity = q.capacity();
+
+  // Delete 3/4 of the items.
+  for (size_t i = 0; i < big_size / 4 * 3; i++)
+    q.pop_back();
+
+  // The capacity should have shrunk by deleting that many items.
+  size_t medium_capacity = q.capacity();
+  EXPECT_GT(big_capacity, medium_capacity);
+
+  // Using resize to shrink should keep some extra capacity.
+  q.resize(1);
+  EXPECT_LT(1u, q.capacity());
+
+  q.resize(0);
+  EXPECT_LT(0u, q.capacity());
+
+  // Using clear() should delete everything.
+  q.clear();
+  EXPECT_EQ(0u, q.capacity());
+}
+
+TEST(CircularDeque, ClearAndEmpty) {
+  circular_deque<int> q;
+  EXPECT_TRUE(q.empty());
+
+  q.resize(10);
+  EXPECT_EQ(10u, q.size());
+  EXPECT_FALSE(q.empty());
+
+  q.clear();
+  EXPECT_EQ(0u, q.size());
+  EXPECT_TRUE(q.empty());
+
+  // clear() also should reset the capacity.
+  EXPECT_EQ(0u, q.capacity());
+}
+
+TEST(CircularDeque, Resize) {
+  circular_deque<int> q;
+
+  // Resize with default constructor.
+  size_t first_size = 10;
+  q.resize(first_size);
+  EXPECT_EQ(first_size, q.size());
+  for (size_t i = 0; i < first_size; i++)
+    EXPECT_EQ(0, q[i]);
+
+  // Resize with different value.
+  size_t second_expand = 10;
+  q.resize(first_size + second_expand, 3);
+  EXPECT_EQ(first_size + second_expand, q.size());
+  for (size_t i = 0; i < first_size; i++)
+    EXPECT_EQ(0, q[i]);
+  for (size_t i = 0; i < second_expand; i++)
+    EXPECT_EQ(3, q[i + first_size]);
+
+  // Erase from the end and add to the beginning so resize is forced to cross
+  // a circular buffer wrap boundary.
+  q.shrink_to_fit();
+  for (int i = 0; i < 5; i++) {
+    q.pop_back();
+    q.push_front(6);
+  }
+  q.resize(10);
+
+  EXPECT_EQ(6, q[0]);
+  EXPECT_EQ(6, q[1]);
+  EXPECT_EQ(6, q[2]);
+  EXPECT_EQ(6, q[3]);
+  EXPECT_EQ(6, q[4]);
+  EXPECT_EQ(0, q[5]);
+  EXPECT_EQ(0, q[6]);
+  EXPECT_EQ(0, q[7]);
+  EXPECT_EQ(0, q[8]);
+  EXPECT_EQ(0, q[9]);
+}
+
+// Tests destructor behavior of resize.
+TEST(CircularDeque, ResizeDelete) {
+  int counter = 0;
+  circular_deque<DestructorCounter> q;
+  q.resize(10, DestructorCounter(&counter));
+  // The one temporary when calling resize() should be deleted, that's it.
+  EXPECT_EQ(1, counter);
+
+  // The loops below assume the capacity will be set by resize().
+  EXPECT_EQ(10u, q.capacity());
+
+  // Delete some via resize(). This is done so that the wasted items are
+  // still greater than the size() so that auto-shrinking is not triggered
+  // (which will mess up our destructor counting).
+  counter = 0;
+  q.resize(8, DestructorCounter(&counter));
+  // 2 deleted ones + the one temporary in the resize() call.
+  EXPECT_EQ(3, counter);
+
+  // Cycle through some items so two items will cross the boundary in the
+  // 11-item buffer (one more than the capacity).
+  //   Before: x x x x x x x x . . .
+  //   After:  x . . . x x x x x x x
+  counter = 0;
+  for (int i = 0; i < 4; i++) {
+    q.emplace_back(&counter);
+    q.pop_front();
+  }
+  EXPECT_EQ(4, counter);  // Loop should have deleted 7 items.
+
+  // Delete two items with resize, these should be on either side of the
+  // buffer wrap point.
+  counter = 0;
+  q.resize(6, DestructorCounter(&counter));
+  // 2 deleted ones + the one temporary in the resize() call.
+  EXPECT_EQ(3, counter);
+}
+
+TEST(CircularDeque, InsertEraseSingle) {
+  circular_deque<int> q;
+  q.push_back(1);
+  q.push_back(2);
+
+  // Insert at the beginning.
+  auto result = q.insert(q.begin(), 0);
+  EXPECT_EQ(q.begin(), result);
+  EXPECT_EQ(3u, q.size());
+  EXPECT_EQ(0, q[0]);
+  EXPECT_EQ(1, q[1]);
+  EXPECT_EQ(2, q[2]);
+
+  // Erase at the beginning.
+  result = q.erase(q.begin());
+  EXPECT_EQ(q.begin(), result);
+  EXPECT_EQ(2u, q.size());
+  EXPECT_EQ(1, q[0]);
+  EXPECT_EQ(2, q[1]);
+
+  // Insert at the end.
+  result = q.insert(q.end(), 3);
+  EXPECT_EQ(q.end() - 1, result);
+  EXPECT_EQ(1, q[0]);
+  EXPECT_EQ(2, q[1]);
+  EXPECT_EQ(3, q[2]);
+
+  // Erase at the end.
+  result = q.erase(q.end() - 1);
+  EXPECT_EQ(q.end(), result);
+  EXPECT_EQ(1, q[0]);
+  EXPECT_EQ(2, q[1]);
+
+  // Insert in the middle.
+  result = q.insert(q.begin() + 1, 10);
+  EXPECT_EQ(q.begin() + 1, result);
+  EXPECT_EQ(1, q[0]);
+  EXPECT_EQ(10, q[1]);
+  EXPECT_EQ(2, q[2]);
+
+  // Erase in the middle.
+  result = q.erase(q.begin() + 1);
+  EXPECT_EQ(q.begin() + 1, result);
+  EXPECT_EQ(1, q[0]);
+  EXPECT_EQ(2, q[1]);
+}
+
+TEST(CircularDeque, InsertFill) {
+  circular_deque<int> q;
+
+  // Fill when empty.
+  q.insert(q.begin(), 2, 1);
+
+  // 0's at the beginning.
+  q.insert(q.begin(), 2, 0);
+
+  // 50's in the middle (now at offset 3).
+  q.insert(q.begin() + 3, 2, 50);
+
+  // 200's at the end.
+  q.insert(q.end(), 2, 200);
+
+  ASSERT_EQ(8u, q.size());
+  EXPECT_EQ(0, q[0]);
+  EXPECT_EQ(0, q[1]);
+  EXPECT_EQ(1, q[2]);
+  EXPECT_EQ(50, q[3]);
+  EXPECT_EQ(50, q[4]);
+  EXPECT_EQ(1, q[5]);
+  EXPECT_EQ(200, q[6]);
+  EXPECT_EQ(200, q[7]);
+}
+
+TEST(CircularDeque, InsertEraseRange) {
+  circular_deque<int> q;
+
+  // Erase nothing from an empty deque should work.
+  q.erase(q.begin(), q.end());
+
+  // Loop index used below to shift the used items in the buffer.
+  for (int i = 0; i < 10; i++) {
+    circular_deque<int> source;
+
+    // Fill empty range.
+    q.insert(q.begin(), source.begin(), source.end());
+
+    // Have some stuff to insert.
+    source.push_back(1);
+    source.push_back(2);
+
+    q.insert(q.begin(), source.begin(), source.end());
+
+    // Shift the used items in the buffer by i which will place the two used
+    // elements in different places in the buffer each time through this loop.
+    for (int shift_i = 0; shift_i < i; shift_i++) {
+      q.push_back(0);
+      q.pop_front();
+    }
+
+    // Set the two items to notable values so we can check for them later.
+    ASSERT_EQ(2u, q.size());
+    q[0] = 100;
+    q[1] = 101;
+
+    // Insert at the beginning, middle (now at offset 3), and end.
+    q.insert(q.begin(), source.begin(), source.end());
+    q.insert(q.begin() + 3, source.begin(), source.end());
+    q.insert(q.end(), source.begin(), source.end());
+
+    ASSERT_EQ(8u, q.size());
+    EXPECT_EQ(1, q[0]);
+    EXPECT_EQ(2, q[1]);
+    EXPECT_EQ(100, q[2]);  // First inserted one.
+    EXPECT_EQ(1, q[3]);
+    EXPECT_EQ(2, q[4]);
+    EXPECT_EQ(101, q[5]);  // First inserted second one.
+    EXPECT_EQ(1, q[6]);
+    EXPECT_EQ(2, q[7]);
+
+    // Now erase the inserted ranges. Try each subsection also with no items
+    // being erased, which should be a no-op.
+    auto result = q.erase(q.begin(), q.begin());  // No-op.
+    EXPECT_EQ(q.begin(), result);
+    result = q.erase(q.begin(), q.begin() + 2);
+    EXPECT_EQ(q.begin(), result);
+
+    result = q.erase(q.begin() + 1, q.begin() + 1);  // No-op.
+    EXPECT_EQ(q.begin() + 1, result);
+    result = q.erase(q.begin() + 1, q.begin() + 3);
+    EXPECT_EQ(q.begin() + 1, result);
+
+    result = q.erase(q.end() - 2, q.end() - 2);  // No-op.
+    EXPECT_EQ(q.end() - 2, result);
+    result = q.erase(q.end() - 2, q.end());
+    EXPECT_EQ(q.end(), result);
+
+    ASSERT_EQ(2u, q.size());
+    EXPECT_EQ(100, q[0]);
+    EXPECT_EQ(101, q[1]);
+
+    // Erase everything.
+    result = q.erase(q.begin(), q.end());
+    EXPECT_EQ(q.end(), result);
+    EXPECT_TRUE(q.empty());
+  }
+}
+
+TEST(CircularDeque, EmplaceMoveOnly) {
+  int values[] = {1, 3};
+  circular_deque<MoveOnlyInt> q(std::begin(values), std::end(values));
+
+  q.emplace(q.begin(), MoveOnlyInt(0));
+  q.emplace(q.begin() + 2, MoveOnlyInt(2));
+  q.emplace(q.end(), MoveOnlyInt(4));
+
+  ASSERT_EQ(5u, q.size());
+  EXPECT_EQ(0, q[0].data());
+  EXPECT_EQ(1, q[1].data());
+  EXPECT_EQ(2, q[2].data());
+  EXPECT_EQ(3, q[3].data());
+  EXPECT_EQ(4, q[4].data());
+}
+
+TEST(CircularDeque, EmplaceFrontBackReturnsReference) {
+  circular_deque<int> q;
+  q.reserve(2);
+
+  int& front = q.emplace_front(1);
+  int& back = q.emplace_back(2);
+  ASSERT_EQ(2u, q.size());
+  EXPECT_EQ(1, q[0]);
+  EXPECT_EQ(2, q[1]);
+
+  EXPECT_EQ(&front, &q.front());
+  EXPECT_EQ(&back, &q.back());
+
+  front = 3;
+  back = 4;
+
+  ASSERT_EQ(2u, q.size());
+  EXPECT_EQ(3, q[0]);
+  EXPECT_EQ(4, q[1]);
+
+  EXPECT_EQ(&front, &q.front());
+  EXPECT_EQ(&back, &q.back());
+}
+
+/*
+This test should assert in a debug build. It tries to dereference an iterator
+after mutating the container. Uncomment to double-check that this works.
+TEST(CircularDeque, UseIteratorAfterMutate) {
+  circular_deque<int> q;
+  q.push_back(0);
+
+  auto old_begin = q.begin();
+  EXPECT_EQ(0, *old_begin);
+
+  q.push_back(1);
+  EXPECT_EQ(0, *old_begin);  // Should DCHECK.
+}
+*/
+
+}  // namespace base
diff --git a/base/containers/flat_map.h b/base/containers/flat_map.h
new file mode 100644
index 0000000..b4fe519
--- /dev/null
+++ b/base/containers/flat_map.h
@@ -0,0 +1,362 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_CONTAINERS_FLAT_MAP_H_
+#define BASE_CONTAINERS_FLAT_MAP_H_
+
+#include <functional>
+#include <tuple>
+#include <utility>
+
+#include "base/containers/flat_tree.h"
+#include "base/logging.h"
+#include "base/template_util.h"
+
+namespace base {
+
+namespace internal {
+
+// An implementation of the flat_tree GetKeyFromValue template parameter that
+// extracts the key as the first element of a pair.
+template <class Key, class Mapped>
+struct GetKeyFromValuePairFirst {
+  const Key& operator()(const std::pair<Key, Mapped>& p) const {
+    return p.first;
+  }
+};
+
+}  // namespace internal
+
+// flat_map is a container with a std::map-like interface that stores its
+// contents in a sorted vector.
+//
+// Please see //base/containers/README.md for an overview of which container
+// to select.
+//
+// PROS
+//
+//  - Good memory locality.
+//  - Low overhead, especially for smaller maps.
+//  - Performance is good for more workloads than you might expect (see
+//    overview link above).
+//  - Supports C++14 map interface.
+//
+// CONS
+//
+//  - Inserts and removals are O(n).
+//
+// IMPORTANT NOTES
+//
+//  - Iterators are invalidated across mutations.
+//  - If possible, construct a flat_map in one operation by inserting into
+//    a std::vector and moving that vector into the flat_map constructor.
+//
+// QUICK REFERENCE
+//
+// Most of the core functionality is inherited from flat_tree. Please see
+// flat_tree.h for more details for most of these functions. As a quick
+// reference, the functions available are:
+//
+// Constructors (inputs need not be sorted):
+//   flat_map(InputIterator first, InputIterator last,
+//            FlatContainerDupes = KEEP_FIRST_OF_DUPES,
+//            const Compare& compare = Compare());
+//   flat_map(const flat_map&);
+//   flat_map(flat_map&&);
+//   flat_map(std::vector<value_type>,
+//            FlatContainerDupes = KEEP_FIRST_OF_DUPES,
+//            const Compare& compare = Compare()); // Re-use storage.
+//   flat_map(std::initializer_list<value_type> ilist,
+//            FlatContainerDupes = KEEP_FIRST_OF_DUPES,
+//            const Compare& comp = Compare());
+//
+// Assignment functions:
+//   flat_map& operator=(const flat_map&);
+//   flat_map& operator=(flat_map&&);
+//   flat_map& operator=(initializer_list<value_type>);
+//
+// Memory management functions:
+//   void   reserve(size_t);
+//   size_t capacity() const;
+//   void   shrink_to_fit();
+//
+// Size management functions:
+//   void   clear();
+//   size_t size() const;
+//   size_t max_size() const;
+//   bool   empty() const;
+//
+// Iterator functions:
+//   iterator               begin();
+//   const_iterator         begin() const;
+//   const_iterator         cbegin() const;
+//   iterator               end();
+//   const_iterator         end() const;
+//   const_iterator         cend() const;
+//   reverse_iterator       rbegin();
+//   const reverse_iterator rbegin() const;
+//   const_reverse_iterator crbegin() const;
+//   reverse_iterator       rend();
+//   const_reverse_iterator rend() const;
+//   const_reverse_iterator crend() const;
+//
+// Insert and accessor functions:
+//   mapped_type&         operator[](const key_type&);
+//   mapped_type&         operator[](key_type&&);
+//   pair<iterator, bool> insert(const value_type&);
+//   pair<iterator, bool> insert(value_type&&);
+//   iterator             insert(const_iterator hint, const value_type&);
+//   iterator             insert(const_iterator hint, value_type&&);
+//   void                 insert(InputIterator first, InputIterator last,
+//                               FlatContainerDupes = KEEP_FIRST_OF_DUPES);
+//   pair<iterator, bool> insert_or_assign(K&&, M&&);
+//   iterator             insert_or_assign(const_iterator hint, K&&, M&&);
+//   pair<iterator, bool> emplace(Args&&...);
+//   iterator             emplace_hint(const_iterator, Args&&...);
+//   pair<iterator, bool> try_emplace(K&&, Args&&...);
+//   iterator             try_emplace(const_iterator hint, K&&, Args&&...);
+//
+// Erase functions:
+//   iterator erase(iterator);
+//   iterator erase(const_iterator);
+//   iterator erase(const_iterator first, const_iterator& last);
+//   template <class K> size_t erase(const K& key);
+//
+// Comparators (see std::map documentation).
+//   key_compare   key_comp() const;
+//   value_compare value_comp() const;
+//
+// Search functions:
+//   template <typename K> size_t                   count(const K&) const;
+//   template <typename K> iterator                 find(const K&);
+//   template <typename K> const_iterator           find(const K&) const;
+//   template <typename K> pair<iterator, iterator> equal_range(const K&);
+//   template <typename K> iterator                 lower_bound(const K&);
+//   template <typename K> const_iterator           lower_bound(const K&) const;
+//   template <typename K> iterator                 upper_bound(const K&);
+//   template <typename K> const_iterator           upper_bound(const K&) const;
+//
+// General functions:
+//   void swap(flat_map&&);
+//
+// Non-member operators:
+//   bool operator==(const flat_map&, const flat_map);
+//   bool operator!=(const flat_map&, const flat_map);
+//   bool operator<(const flat_map&, const flat_map);
+//   bool operator>(const flat_map&, const flat_map);
+//   bool operator>=(const flat_map&, const flat_map);
+//   bool operator<=(const flat_map&, const flat_map);
+//
+template <class Key, class Mapped, class Compare = std::less<>>
+class flat_map : public ::base::internal::flat_tree<
+                     Key,
+                     std::pair<Key, Mapped>,
+                     ::base::internal::GetKeyFromValuePairFirst<Key, Mapped>,
+                     Compare> {
+ private:
+  using tree = typename ::base::internal::flat_tree<
+      Key,
+      std::pair<Key, Mapped>,
+      ::base::internal::GetKeyFromValuePairFirst<Key, Mapped>,
+      Compare>;
+
+ public:
+  using key_type = typename tree::key_type;
+  using mapped_type = Mapped;
+  using value_type = typename tree::value_type;
+  using iterator = typename tree::iterator;
+  using const_iterator = typename tree::const_iterator;
+
+  // --------------------------------------------------------------------------
+  // Lifetime and assignments.
+  //
+  // Note: we could do away with these constructors, destructor and assignment
+  // operator overloads by inheriting |tree|'s, but this breaks the GCC build
+  // due to https://gcc.gnu.org/bugzilla/show_bug.cgi?id=84782 (see
+  // https://crbug.com/837221).
+
+  flat_map() = default;
+  explicit flat_map(const Compare& comp);
+
+  template <class InputIterator>
+  flat_map(InputIterator first,
+           InputIterator last,
+           FlatContainerDupes dupe_handling = KEEP_FIRST_OF_DUPES,
+           const Compare& comp = Compare());
+
+  flat_map(const flat_map&) = default;
+  flat_map(flat_map&&) noexcept = default;
+
+  flat_map(std::vector<value_type> items,
+           FlatContainerDupes dupe_handling = KEEP_FIRST_OF_DUPES,
+           const Compare& comp = Compare());
+
+  flat_map(std::initializer_list<value_type> ilist,
+           FlatContainerDupes dupe_handling = KEEP_FIRST_OF_DUPES,
+           const Compare& comp = Compare());
+
+  ~flat_map() = default;
+
+  flat_map& operator=(const flat_map&) = default;
+  flat_map& operator=(flat_map&&) = default;
+  // Takes the first if there are duplicates in the initializer list.
+  flat_map& operator=(std::initializer_list<value_type> ilist);
+
+  // --------------------------------------------------------------------------
+  // Map-specific insert operations.
+  //
+  // Normal insert() functions are inherited from flat_tree.
+  //
+  // Assume that every operation invalidates iterators and references.
+  // Insertion of one element can take O(size).
+
+  mapped_type& operator[](const key_type& key);
+  mapped_type& operator[](key_type&& key);
+
+  template <class K, class M>
+  std::pair<iterator, bool> insert_or_assign(K&& key, M&& obj);
+  template <class K, class M>
+  iterator insert_or_assign(const_iterator hint, K&& key, M&& obj);
+
+  template <class K, class... Args>
+  std::enable_if_t<std::is_constructible<key_type, K&&>::value,
+                   std::pair<iterator, bool>>
+  try_emplace(K&& key, Args&&... args);
+
+  template <class K, class... Args>
+  std::enable_if_t<std::is_constructible<key_type, K&&>::value, iterator>
+  try_emplace(const_iterator hint, K&& key, Args&&... args);
+
+  // --------------------------------------------------------------------------
+  // General operations.
+  //
+  // Assume that swap invalidates iterators and references.
+
+  void swap(flat_map& other) noexcept;
+
+  friend void swap(flat_map& lhs, flat_map& rhs) noexcept { lhs.swap(rhs); }
+};
+
+// ----------------------------------------------------------------------------
+// Lifetime.
+
+template <class Key, class Mapped, class Compare>
+flat_map<Key, Mapped, Compare>::flat_map(const Compare& comp) : tree(comp) {}
+
+template <class Key, class Mapped, class Compare>
+template <class InputIterator>
+flat_map<Key, Mapped, Compare>::flat_map(InputIterator first,
+                                         InputIterator last,
+                                         FlatContainerDupes dupe_handling,
+                                         const Compare& comp)
+    : tree(first, last, dupe_handling, comp) {}
+
+template <class Key, class Mapped, class Compare>
+flat_map<Key, Mapped, Compare>::flat_map(std::vector<value_type> items,
+                                         FlatContainerDupes dupe_handling,
+                                         const Compare& comp)
+    : tree(std::move(items), dupe_handling, comp) {}
+
+template <class Key, class Mapped, class Compare>
+flat_map<Key, Mapped, Compare>::flat_map(
+    std::initializer_list<value_type> ilist,
+    FlatContainerDupes dupe_handling,
+    const Compare& comp)
+    : flat_map(std::begin(ilist), std::end(ilist), dupe_handling, comp) {}
+
+// ----------------------------------------------------------------------------
+// Assignments.
+
+template <class Key, class Mapped, class Compare>
+auto flat_map<Key, Mapped, Compare>::operator=(
+    std::initializer_list<value_type> ilist) -> flat_map& {
+  // When https://gcc.gnu.org/bugzilla/show_bug.cgi?id=84782 gets fixed, we
+  // need to remember to inherit tree::operator= to prevent
+  //   flat_map<...> x;
+  //   x = {...};
+  // from first creating a flat_map and then move assigning it. This most
+  // likely would be optimized away but still affects our debug builds.
+  tree::operator=(ilist);
+  return *this;
+}
+
+// ----------------------------------------------------------------------------
+// Insert operations.
+
+template <class Key, class Mapped, class Compare>
+auto flat_map<Key, Mapped, Compare>::operator[](const key_type& key)
+    -> mapped_type& {
+  iterator found = tree::lower_bound(key);
+  if (found == tree::end() || tree::key_comp()(key, found->first))
+    found = tree::unsafe_emplace(found, key, mapped_type());
+  return found->second;
+}
+
+template <class Key, class Mapped, class Compare>
+auto flat_map<Key, Mapped, Compare>::operator[](key_type&& key)
+    -> mapped_type& {
+  iterator found = tree::lower_bound(key);
+  if (found == tree::end() || tree::key_comp()(key, found->first))
+    found = tree::unsafe_emplace(found, std::move(key), mapped_type());
+  return found->second;
+}
+
+template <class Key, class Mapped, class Compare>
+template <class K, class M>
+auto flat_map<Key, Mapped, Compare>::insert_or_assign(K&& key, M&& obj)
+    -> std::pair<iterator, bool> {
+  auto result =
+      tree::emplace_key_args(key, std::forward<K>(key), std::forward<M>(obj));
+  if (!result.second)
+    result.first->second = std::forward<M>(obj);
+  return result;
+}
+
+template <class Key, class Mapped, class Compare>
+template <class K, class M>
+auto flat_map<Key, Mapped, Compare>::insert_or_assign(const_iterator hint,
+                                                      K&& key,
+                                                      M&& obj) -> iterator {
+  auto result = tree::emplace_hint_key_args(hint, key, std::forward<K>(key),
+                                            std::forward<M>(obj));
+  if (!result.second)
+    result.first->second = std::forward<M>(obj);
+  return result.first;
+}
+
+template <class Key, class Mapped, class Compare>
+template <class K, class... Args>
+auto flat_map<Key, Mapped, Compare>::try_emplace(K&& key, Args&&... args)
+    -> std::enable_if_t<std::is_constructible<key_type, K&&>::value,
+                        std::pair<iterator, bool>> {
+  return tree::emplace_key_args(
+      key, std::piecewise_construct,
+      std::forward_as_tuple(std::forward<K>(key)),
+      std::forward_as_tuple(std::forward<Args>(args)...));
+}
+
+template <class Key, class Mapped, class Compare>
+template <class K, class... Args>
+auto flat_map<Key, Mapped, Compare>::try_emplace(const_iterator hint,
+                                                 K&& key,
+                                                 Args&&... args)
+    -> std::enable_if_t<std::is_constructible<key_type, K&&>::value, iterator> {
+  return tree::emplace_hint_key_args(
+             hint, key, std::piecewise_construct,
+             std::forward_as_tuple(std::forward<K>(key)),
+             std::forward_as_tuple(std::forward<Args>(args)...))
+      .first;
+}
+
+// ----------------------------------------------------------------------------
+// General operations.
+
+template <class Key, class Mapped, class Compare>
+void flat_map<Key, Mapped, Compare>::swap(flat_map& other) noexcept {
+  tree::swap(other);
+}
+
+}  // namespace base
+
+#endif  // BASE_CONTAINERS_FLAT_MAP_H_
diff --git a/base/containers/flat_map_unittest.cc b/base/containers/flat_map_unittest.cc
new file mode 100644
index 0000000..87958bd
--- /dev/null
+++ b/base/containers/flat_map_unittest.cc
@@ -0,0 +1,369 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/containers/flat_map.h"
+
+#include <string>
+#include <vector>
+
+#include "base/macros.h"
+#include "base/test/move_only_int.h"
+#include "testing/gmock/include/gmock/gmock.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+// A flat_map is basically a interface to flat_tree. So several basic
+// operations are tested to make sure things are set up properly, but the bulk
+// of the tests are in flat_tree_unittests.cc.
+
+using ::testing::ElementsAre;
+
+namespace base {
+
+TEST(FlatMap, IncompleteType) {
+  struct A {
+    using Map = flat_map<A, A>;
+    int data;
+    Map set_with_incomplete_type;
+    Map::iterator it;
+    Map::const_iterator cit;
+
+    // We do not declare operator< because clang complains that it's unused.
+  };
+
+  A a;
+}
+
+TEST(FlatMap, RangeConstructor) {
+  flat_map<int, int>::value_type input_vals[] = {
+      {1, 1}, {1, 2}, {1, 3}, {2, 1}, {2, 2}, {2, 3}, {3, 1}, {3, 2}, {3, 3}};
+
+  flat_map<int, int> first(std::begin(input_vals), std::end(input_vals));
+  EXPECT_THAT(first, ElementsAre(std::make_pair(1, 1), std::make_pair(2, 1),
+                                 std::make_pair(3, 1)));
+
+  flat_map<int, int> last(std::begin(input_vals), std::end(input_vals),
+                          KEEP_LAST_OF_DUPES);
+  EXPECT_THAT(last, ElementsAre(std::make_pair(1, 3), std::make_pair(2, 3),
+                                std::make_pair(3, 3)));
+}
+
+TEST(FlatMap, MoveConstructor) {
+  using pair = std::pair<MoveOnlyInt, MoveOnlyInt>;
+
+  flat_map<MoveOnlyInt, MoveOnlyInt> original;
+  original.insert(pair(MoveOnlyInt(1), MoveOnlyInt(1)));
+  original.insert(pair(MoveOnlyInt(2), MoveOnlyInt(2)));
+  original.insert(pair(MoveOnlyInt(3), MoveOnlyInt(3)));
+  original.insert(pair(MoveOnlyInt(4), MoveOnlyInt(4)));
+
+  flat_map<MoveOnlyInt, MoveOnlyInt> moved(std::move(original));
+
+  EXPECT_EQ(1U, moved.count(MoveOnlyInt(1)));
+  EXPECT_EQ(1U, moved.count(MoveOnlyInt(2)));
+  EXPECT_EQ(1U, moved.count(MoveOnlyInt(3)));
+  EXPECT_EQ(1U, moved.count(MoveOnlyInt(4)));
+}
+
+TEST(FlatMap, VectorConstructor) {
+  using IntPair = std::pair<int, int>;
+  using IntMap = flat_map<int, int>;
+  {
+    std::vector<IntPair> vect{{1, 1}, {1, 2}, {2, 1}};
+    IntMap map(std::move(vect));
+    EXPECT_THAT(map, ElementsAre(IntPair(1, 1), IntPair(2, 1)));
+  }
+  {
+    std::vector<IntPair> vect{{1, 1}, {1, 2}, {2, 1}};
+    IntMap map(std::move(vect), KEEP_LAST_OF_DUPES);
+    EXPECT_THAT(map, ElementsAre(IntPair(1, 2), IntPair(2, 1)));
+  }
+}
+
+TEST(FlatMap, InitializerListConstructor) {
+  {
+    flat_map<int, int> cont(
+        {{1, 1}, {2, 2}, {3, 3}, {4, 4}, {5, 5}, {1, 2}, {10, 10}, {8, 8}});
+    EXPECT_THAT(cont, ElementsAre(std::make_pair(1, 1), std::make_pair(2, 2),
+                                  std::make_pair(3, 3), std::make_pair(4, 4),
+                                  std::make_pair(5, 5), std::make_pair(8, 8),
+                                  std::make_pair(10, 10)));
+  }
+  {
+    flat_map<int, int> cont(
+        {{1, 1}, {2, 2}, {3, 3}, {4, 4}, {5, 5}, {1, 2}, {10, 10}, {8, 8}},
+        KEEP_LAST_OF_DUPES);
+    EXPECT_THAT(cont, ElementsAre(std::make_pair(1, 2), std::make_pair(2, 2),
+                                  std::make_pair(3, 3), std::make_pair(4, 4),
+                                  std::make_pair(5, 5), std::make_pair(8, 8),
+                                  std::make_pair(10, 10)));
+  }
+}
+
+TEST(FlatMap, InitializerListAssignment) {
+  flat_map<int, int> cont;
+  cont = {{1, 1}, {2, 2}};
+  EXPECT_THAT(cont, ElementsAre(std::make_pair(1, 1), std::make_pair(2, 2)));
+}
+
+TEST(FlatMap, InsertFindSize) {
+  base::flat_map<int, int> s;
+  s.insert(std::make_pair(1, 1));
+  s.insert(std::make_pair(1, 1));
+  s.insert(std::make_pair(2, 2));
+
+  EXPECT_EQ(2u, s.size());
+  EXPECT_EQ(std::make_pair(1, 1), *s.find(1));
+  EXPECT_EQ(std::make_pair(2, 2), *s.find(2));
+  EXPECT_EQ(s.end(), s.find(7));
+}
+
+TEST(FlatMap, CopySwap) {
+  base::flat_map<int, int> original;
+  original.insert({1, 1});
+  original.insert({2, 2});
+  EXPECT_THAT(original,
+              ElementsAre(std::make_pair(1, 1), std::make_pair(2, 2)));
+
+  base::flat_map<int, int> copy(original);
+  EXPECT_THAT(copy, ElementsAre(std::make_pair(1, 1), std::make_pair(2, 2)));
+
+  copy.erase(copy.begin());
+  copy.insert({10, 10});
+  EXPECT_THAT(copy, ElementsAre(std::make_pair(2, 2), std::make_pair(10, 10)));
+
+  original.swap(copy);
+  EXPECT_THAT(original,
+              ElementsAre(std::make_pair(2, 2), std::make_pair(10, 10)));
+  EXPECT_THAT(copy, ElementsAre(std::make_pair(1, 1), std::make_pair(2, 2)));
+}
+
+// operator[](const Key&)
+TEST(FlatMap, SubscriptConstKey) {
+  base::flat_map<std::string, int> m;
+
+  // Default construct elements that don't exist yet.
+  int& s = m["a"];
+  EXPECT_EQ(0, s);
+  EXPECT_EQ(1u, m.size());
+
+  // The returned mapped reference should refer into the map.
+  s = 22;
+  EXPECT_EQ(22, m["a"]);
+
+  // Overwrite existing elements.
+  m["a"] = 44;
+  EXPECT_EQ(44, m["a"]);
+}
+
+// operator[](Key&&)
+TEST(FlatMap, SubscriptMoveOnlyKey) {
+  base::flat_map<MoveOnlyInt, int> m;
+
+  // Default construct elements that don't exist yet.
+  int& s = m[MoveOnlyInt(1)];
+  EXPECT_EQ(0, s);
+  EXPECT_EQ(1u, m.size());
+
+  // The returned mapped reference should refer into the map.
+  s = 22;
+  EXPECT_EQ(22, m[MoveOnlyInt(1)]);
+
+  // Overwrite existing elements.
+  m[MoveOnlyInt(1)] = 44;
+  EXPECT_EQ(44, m[MoveOnlyInt(1)]);
+}
+
+// insert_or_assign(K&&, M&&)
+TEST(FlatMap, InsertOrAssignMoveOnlyKey) {
+  base::flat_map<MoveOnlyInt, MoveOnlyInt> m;
+
+  // Initial insertion should return an iterator to the element and set the
+  // second pair member to |true|. The inserted key and value should be moved
+  // from.
+  MoveOnlyInt key(1);
+  MoveOnlyInt val(22);
+  auto result = m.insert_or_assign(std::move(key), std::move(val));
+  EXPECT_EQ(1, result.first->first.data());
+  EXPECT_EQ(22, result.first->second.data());
+  EXPECT_TRUE(result.second);
+  EXPECT_EQ(1u, m.size());
+  EXPECT_EQ(0, key.data());  // moved from
+  EXPECT_EQ(0, val.data());  // moved from
+
+  // Second call with same key should result in an assignment, overwriting the
+  // old value. Assignment should be indicated by setting the second pair member
+  // to |false|. Only the inserted value should be moved from, the key should be
+  // left intact.
+  key = MoveOnlyInt(1);
+  val = MoveOnlyInt(44);
+  result = m.insert_or_assign(std::move(key), std::move(val));
+  EXPECT_EQ(1, result.first->first.data());
+  EXPECT_EQ(44, result.first->second.data());
+  EXPECT_FALSE(result.second);
+  EXPECT_EQ(1u, m.size());
+  EXPECT_EQ(1, key.data());  // not moved from
+  EXPECT_EQ(0, val.data());  // moved from
+
+  // Check that random insertion results in sorted range.
+  base::flat_map<MoveOnlyInt, int> map;
+  for (int i : {3, 1, 5, 6, 8, 7, 0, 9, 4, 2}) {
+    map.insert_or_assign(MoveOnlyInt(i), i);
+    EXPECT_TRUE(std::is_sorted(map.begin(), map.end()));
+  }
+}
+
+// insert_or_assign(const_iterator hint, K&&, M&&)
+TEST(FlatMap, InsertOrAssignMoveOnlyKeyWithHint) {
+  base::flat_map<MoveOnlyInt, MoveOnlyInt> m;
+
+  // Initial insertion should return an iterator to the element. The inserted
+  // key and value should be moved from.
+  MoveOnlyInt key(1);
+  MoveOnlyInt val(22);
+  auto result = m.insert_or_assign(m.end(), std::move(key), std::move(val));
+  EXPECT_EQ(1, result->first.data());
+  EXPECT_EQ(22, result->second.data());
+  EXPECT_EQ(1u, m.size());
+  EXPECT_EQ(0, key.data());  // moved from
+  EXPECT_EQ(0, val.data());  // moved from
+
+  // Second call with same key should result in an assignment, overwriting the
+  // old value. Only the inserted value should be moved from, the key should be
+  // left intact.
+  key = MoveOnlyInt(1);
+  val = MoveOnlyInt(44);
+  result = m.insert_or_assign(m.end(), std::move(key), std::move(val));
+  EXPECT_EQ(1, result->first.data());
+  EXPECT_EQ(44, result->second.data());
+  EXPECT_EQ(1u, m.size());
+  EXPECT_EQ(1, key.data());  // not moved from
+  EXPECT_EQ(0, val.data());  // moved from
+
+  // Check that random insertion results in sorted range.
+  base::flat_map<MoveOnlyInt, int> map;
+  for (int i : {3, 1, 5, 6, 8, 7, 0, 9, 4, 2}) {
+    map.insert_or_assign(map.end(), MoveOnlyInt(i), i);
+    EXPECT_TRUE(std::is_sorted(map.begin(), map.end()));
+  }
+}
+
+// try_emplace(K&&, Args&&...)
+TEST(FlatMap, TryEmplaceMoveOnlyKey) {
+  base::flat_map<MoveOnlyInt, std::pair<MoveOnlyInt, MoveOnlyInt>> m;
+
+  // Trying to emplace into an empty map should succeed. Insertion should return
+  // an iterator to the element and set the second pair member to |true|. The
+  // inserted key and value should be moved from.
+  MoveOnlyInt key(1);
+  MoveOnlyInt val1(22);
+  MoveOnlyInt val2(44);
+  // Test piecewise construction of mapped_type.
+  auto result = m.try_emplace(std::move(key), std::move(val1), std::move(val2));
+  EXPECT_EQ(1, result.first->first.data());
+  EXPECT_EQ(22, result.first->second.first.data());
+  EXPECT_EQ(44, result.first->second.second.data());
+  EXPECT_TRUE(result.second);
+  EXPECT_EQ(1u, m.size());
+  EXPECT_EQ(0, key.data());   // moved from
+  EXPECT_EQ(0, val1.data());  // moved from
+  EXPECT_EQ(0, val2.data());  // moved from
+
+  // Second call with same key should result in a no-op, returning an iterator
+  // to the existing element and returning false as the second pair member.
+  // Key and values that were attempted to be inserted should be left intact.
+  key = MoveOnlyInt(1);
+  auto paired_val = std::make_pair(MoveOnlyInt(33), MoveOnlyInt(55));
+  // Test construction of mapped_type from pair.
+  result = m.try_emplace(std::move(key), std::move(paired_val));
+  EXPECT_EQ(1, result.first->first.data());
+  EXPECT_EQ(22, result.first->second.first.data());
+  EXPECT_EQ(44, result.first->second.second.data());
+  EXPECT_FALSE(result.second);
+  EXPECT_EQ(1u, m.size());
+  EXPECT_EQ(1, key.data());                 // not moved from
+  EXPECT_EQ(33, paired_val.first.data());   // not moved from
+  EXPECT_EQ(55, paired_val.second.data());  // not moved from
+
+  // Check that random insertion results in sorted range.
+  base::flat_map<MoveOnlyInt, int> map;
+  for (int i : {3, 1, 5, 6, 8, 7, 0, 9, 4, 2}) {
+    map.try_emplace(MoveOnlyInt(i), i);
+    EXPECT_TRUE(std::is_sorted(map.begin(), map.end()));
+  }
+}
+
+// try_emplace(const_iterator hint, K&&, Args&&...)
+TEST(FlatMap, TryEmplaceMoveOnlyKeyWithHint) {
+  base::flat_map<MoveOnlyInt, std::pair<MoveOnlyInt, MoveOnlyInt>> m;
+
+  // Trying to emplace into an empty map should succeed. Insertion should return
+  // an iterator to the element. The inserted key and value should be moved
+  // from.
+  MoveOnlyInt key(1);
+  MoveOnlyInt val1(22);
+  MoveOnlyInt val2(44);
+  // Test piecewise construction of mapped_type.
+  auto result =
+      m.try_emplace(m.end(), std::move(key), std::move(val1), std::move(val2));
+  EXPECT_EQ(1, result->first.data());
+  EXPECT_EQ(22, result->second.first.data());
+  EXPECT_EQ(44, result->second.second.data());
+  EXPECT_EQ(1u, m.size());
+  EXPECT_EQ(0, key.data());   // moved from
+  EXPECT_EQ(0, val1.data());  // moved from
+  EXPECT_EQ(0, val2.data());  // moved from
+
+  // Second call with same key should result in a no-op, returning an iterator
+  // to the existing element. Key and values that were attempted to be inserted
+  // should be left intact.
+  key = MoveOnlyInt(1);
+  val1 = MoveOnlyInt(33);
+  val2 = MoveOnlyInt(55);
+  auto paired_val = std::make_pair(MoveOnlyInt(33), MoveOnlyInt(55));
+  // Test construction of mapped_type from pair.
+  result = m.try_emplace(m.end(), std::move(key), std::move(paired_val));
+  EXPECT_EQ(1, result->first.data());
+  EXPECT_EQ(22, result->second.first.data());
+  EXPECT_EQ(44, result->second.second.data());
+  EXPECT_EQ(1u, m.size());
+  EXPECT_EQ(1, key.data());                 // not moved from
+  EXPECT_EQ(33, paired_val.first.data());   // not moved from
+  EXPECT_EQ(55, paired_val.second.data());  // not moved from
+
+  // Check that random insertion results in sorted range.
+  base::flat_map<MoveOnlyInt, int> map;
+  for (int i : {3, 1, 5, 6, 8, 7, 0, 9, 4, 2}) {
+    map.try_emplace(map.end(), MoveOnlyInt(i), i);
+    EXPECT_TRUE(std::is_sorted(map.begin(), map.end()));
+  }
+}
+
+TEST(FlatMap, UsingTransparentCompare) {
+  using ExplicitInt = base::MoveOnlyInt;
+  base::flat_map<ExplicitInt, int> m;
+  const auto& m1 = m;
+  int x = 0;
+
+  // Check if we can use lookup functions without converting to key_type.
+  // Correctness is checked in flat_tree tests.
+  m.count(x);
+  m1.count(x);
+  m.find(x);
+  m1.find(x);
+  m.equal_range(x);
+  m1.equal_range(x);
+  m.lower_bound(x);
+  m1.lower_bound(x);
+  m.upper_bound(x);
+  m1.upper_bound(x);
+  m.erase(x);
+
+  // Check if we broke overload resolution.
+  m.emplace(ExplicitInt(0), 0);
+  m.emplace(ExplicitInt(1), 0);
+  m.erase(m.begin());
+  m.erase(m.cbegin());
+}
+
+}  // namespace base
diff --git a/base/containers/flat_set.h b/base/containers/flat_set.h
new file mode 100644
index 0000000..bf14c36
--- /dev/null
+++ b/base/containers/flat_set.h
@@ -0,0 +1,140 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_CONTAINERS_FLAT_SET_H_
+#define BASE_CONTAINERS_FLAT_SET_H_
+
+#include <functional>
+
+#include "base/containers/flat_tree.h"
+#include "base/template_util.h"
+
+namespace base {
+
+// flat_set is a container with a std::set-like interface that stores its
+// contents in a sorted vector.
+//
+// Please see //base/containers/README.md for an overview of which container
+// to select.
+//
+// PROS
+//
+//  - Good memory locality.
+//  - Low overhead, especially for smaller sets.
+//  - Performance is good for more workloads than you might expect (see
+//    overview link above).
+//  - Supports C++14 set interface.
+//
+// CONS
+//
+//  - Inserts and removals are O(n).
+//
+// IMPORTANT NOTES
+//
+//  - Iterators are invalidated across mutations.
+//  - If possible, construct a flat_set in one operation by inserting into
+//    a std::vector and moving that vector into the flat_set constructor.
+//  - For multiple removals use base::EraseIf() which is O(n) rather than
+//    O(n * removed_items).
+//
+// QUICK REFERENCE
+//
+// Most of the core functionality is inherited from flat_tree. Please see
+// flat_tree.h for more details for most of these functions. As a quick
+// reference, the functions available are:
+//
+// Constructors (inputs need not be sorted):
+//   flat_set(InputIterator first, InputIterator last,
+//            FlatContainerDupes = KEEP_FIRST_OF_DUPES,
+//            const Compare& compare = Compare());
+//   flat_set(const flat_set&);
+//   flat_set(flat_set&&);
+//   flat_set(std::vector<Key>,
+//            FlatContainerDupes = KEEP_FIRST_OF_DUPES,
+//            const Compare& compare = Compare());  // Re-use storage.
+//   flat_set(std::initializer_list<value_type> ilist,
+//            FlatContainerDupes = KEEP_FIRST_OF_DUPES,
+//            const Compare& comp = Compare());
+//
+// Assignment functions:
+//   flat_set& operator=(const flat_set&);
+//   flat_set& operator=(flat_set&&);
+//   flat_set& operator=(initializer_list<Key>);
+//
+// Memory management functions:
+//   void   reserve(size_t);
+//   size_t capacity() const;
+//   void   shrink_to_fit();
+//
+// Size management functions:
+//   void   clear();
+//   size_t size() const;
+//   size_t max_size() const;
+//   bool   empty() const;
+//
+// Iterator functions:
+//   iterator               begin();
+//   const_iterator         begin() const;
+//   const_iterator         cbegin() const;
+//   iterator               end();
+//   const_iterator         end() const;
+//   const_iterator         cend() const;
+//   reverse_iterator       rbegin();
+//   const reverse_iterator rbegin() const;
+//   const_reverse_iterator crbegin() const;
+//   reverse_iterator       rend();
+//   const_reverse_iterator rend() const;
+//   const_reverse_iterator crend() const;
+//
+// Insert and accessor functions:
+//   pair<iterator, bool> insert(const key_type&);
+//   pair<iterator, bool> insert(key_type&&);
+//   void                 insert(InputIterator first, InputIterator last,
+//                               FlatContainerDupes = KEEP_FIRST_OF_DUPES);
+//   iterator             insert(const_iterator hint, const key_type&);
+//   iterator             insert(const_iterator hint, key_type&&);
+//   pair<iterator, bool> emplace(Args&&...);
+//   iterator             emplace_hint(const_iterator, Args&&...);
+//
+// Erase functions:
+//   iterator erase(iterator);
+//   iterator erase(const_iterator);
+//   iterator erase(const_iterator first, const_iterator& last);
+//   template <typename K> size_t erase(const K& key);
+//
+// Comparators (see std::set documentation).
+//   key_compare   key_comp() const;
+//   value_compare value_comp() const;
+//
+// Search functions:
+//   template <typename K> size_t                   count(const K&) const;
+//   template <typename K> iterator                 find(const K&);
+//   template <typename K> const_iterator           find(const K&) const;
+//   template <typename K> pair<iterator, iterator> equal_range(K&);
+//   template <typename K> iterator                 lower_bound(const K&);
+//   template <typename K> const_iterator           lower_bound(const K&) const;
+//   template <typename K> iterator                 upper_bound(const K&);
+//   template <typename K> const_iterator           upper_bound(const K&) const;
+//
+// General functions:
+//   void swap(flat_set&&);
+//
+// Non-member operators:
+//   bool operator==(const flat_set&, const flat_set);
+//   bool operator!=(const flat_set&, const flat_set);
+//   bool operator<(const flat_set&, const flat_set);
+//   bool operator>(const flat_set&, const flat_set);
+//   bool operator>=(const flat_set&, const flat_set);
+//   bool operator<=(const flat_set&, const flat_set);
+//
+template <class Key, class Compare = std::less<>>
+using flat_set = typename ::base::internal::flat_tree<
+    Key,
+    Key,
+    ::base::internal::GetKeyFromValueIdentity<Key>,
+    Compare>;
+
+}  // namespace base
+
+#endif  // BASE_CONTAINERS_FLAT_SET_H_
diff --git a/base/containers/flat_set_unittest.cc b/base/containers/flat_set_unittest.cc
new file mode 100644
index 0000000..4596975
--- /dev/null
+++ b/base/containers/flat_set_unittest.cc
@@ -0,0 +1,121 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/containers/flat_set.h"
+
+#include <string>
+#include <vector>
+
+#include "base/macros.h"
+#include "base/memory/ptr_util.h"
+#include "base/test/move_only_int.h"
+#include "testing/gmock/include/gmock/gmock.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+// A flat_set is basically a interface to flat_tree. So several basic
+// operations are tested to make sure things are set up properly, but the bulk
+// of the tests are in flat_tree_unittests.cc.
+
+using ::testing::ElementsAre;
+
+namespace base {
+
+TEST(FlatSet, IncompleteType) {
+  struct A {
+    using Set = flat_set<A>;
+    int data;
+    Set set_with_incomplete_type;
+    Set::iterator it;
+    Set::const_iterator cit;
+
+    // We do not declare operator< because clang complains that it's unused.
+  };
+
+  A a;
+}
+
+TEST(FlatSet, RangeConstructor) {
+  flat_set<int>::value_type input_vals[] = {1, 1, 1, 2, 2, 2, 3, 3, 3};
+
+  flat_set<int> cont(std::begin(input_vals), std::end(input_vals),
+                     base::KEEP_FIRST_OF_DUPES);
+  EXPECT_THAT(cont, ElementsAre(1, 2, 3));
+}
+
+TEST(FlatSet, MoveConstructor) {
+  int input_range[] = {1, 2, 3, 4};
+
+  flat_set<MoveOnlyInt> original(std::begin(input_range), std::end(input_range),
+                                 base::KEEP_FIRST_OF_DUPES);
+  flat_set<MoveOnlyInt> moved(std::move(original));
+
+  EXPECT_EQ(1U, moved.count(MoveOnlyInt(1)));
+  EXPECT_EQ(1U, moved.count(MoveOnlyInt(2)));
+  EXPECT_EQ(1U, moved.count(MoveOnlyInt(3)));
+  EXPECT_EQ(1U, moved.count(MoveOnlyInt(4)));
+}
+
+TEST(FlatSet, InitializerListConstructor) {
+  flat_set<int> cont({1, 2, 3, 4, 5, 6, 10, 8}, KEEP_FIRST_OF_DUPES);
+  EXPECT_THAT(cont, ElementsAre(1, 2, 3, 4, 5, 6, 8, 10));
+}
+
+TEST(FlatSet, InsertFindSize) {
+  base::flat_set<int> s;
+  s.insert(1);
+  s.insert(1);
+  s.insert(2);
+
+  EXPECT_EQ(2u, s.size());
+  EXPECT_EQ(1, *s.find(1));
+  EXPECT_EQ(2, *s.find(2));
+  EXPECT_EQ(s.end(), s.find(7));
+}
+
+TEST(FlatSet, CopySwap) {
+  base::flat_set<int> original;
+  original.insert(1);
+  original.insert(2);
+  EXPECT_THAT(original, ElementsAre(1, 2));
+
+  base::flat_set<int> copy(original);
+  EXPECT_THAT(copy, ElementsAre(1, 2));
+
+  copy.erase(copy.begin());
+  copy.insert(10);
+  EXPECT_THAT(copy, ElementsAre(2, 10));
+
+  original.swap(copy);
+  EXPECT_THAT(original, ElementsAre(2, 10));
+  EXPECT_THAT(copy, ElementsAre(1, 2));
+}
+
+TEST(FlatSet, UsingTransparentCompare) {
+  using ExplicitInt = base::MoveOnlyInt;
+  base::flat_set<ExplicitInt> s;
+  const auto& s1 = s;
+  int x = 0;
+
+  // Check if we can use lookup functions without converting to key_type.
+  // Correctness is checked in flat_tree tests.
+  s.count(x);
+  s1.count(x);
+  s.find(x);
+  s1.find(x);
+  s.equal_range(x);
+  s1.equal_range(x);
+  s.lower_bound(x);
+  s1.lower_bound(x);
+  s.upper_bound(x);
+  s1.upper_bound(x);
+  s.erase(x);
+
+  // Check if we broke overload resolution.
+  s.emplace(0);
+  s.emplace(1);
+  s.erase(s.begin());
+  s.erase(s.cbegin());
+}
+
+}  // namespace base
diff --git a/base/containers/flat_tree.h b/base/containers/flat_tree.h
new file mode 100644
index 0000000..7856e24
--- /dev/null
+++ b/base/containers/flat_tree.h
@@ -0,0 +1,1004 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_CONTAINERS_FLAT_TREE_H_
+#define BASE_CONTAINERS_FLAT_TREE_H_
+
+#include <algorithm>
+#include <iterator>
+#include <type_traits>
+#include <vector>
+
+#include "base/template_util.h"
+
+namespace base {
+
+enum FlatContainerDupes {
+  KEEP_FIRST_OF_DUPES,
+  KEEP_LAST_OF_DUPES,
+};
+
+namespace internal {
+
+// This is a convenience method returning true if Iterator is at least a
+// ForwardIterator and thus supports multiple passes over a range.
+template <class Iterator>
+constexpr bool is_multipass() {
+  return std::is_base_of<
+      std::forward_iterator_tag,
+      typename std::iterator_traits<Iterator>::iterator_category>::value;
+}
+
+// This algorithm is like unique() from the standard library except it
+// selects only the last of consecutive values instead of the first.
+template <class Iterator, class BinaryPredicate>
+Iterator LastUnique(Iterator first, Iterator last, BinaryPredicate compare) {
+  Iterator replacable = std::adjacent_find(first, last, compare);
+
+  // No duplicate elements found.
+  if (replacable == last)
+    return last;
+
+  first = std::next(replacable);
+
+  // Last element is a duplicate but all others are unique.
+  if (first == last)
+    return replacable;
+
+  // This loop is based on std::adjacent_find but std::adjacent_find doesn't
+  // quite cut it.
+  for (Iterator next = std::next(first); next != last; ++next, ++first) {
+    if (!compare(*first, *next))
+      *replacable++ = std::move(*first);
+  }
+
+  // Last element should be copied unconditionally.
+  *replacable++ = std::move(*first);
+  return replacable;
+}
+
+// Uses SFINAE to detect whether type has is_transparent member.
+template <typename T, typename = void>
+struct IsTransparentCompare : std::false_type {};
+template <typename T>
+struct IsTransparentCompare<T, void_t<typename T::is_transparent>>
+    : std::true_type {};
+
+// Implementation -------------------------------------------------------------
+
+// Implementation of a sorted vector for backing flat_set and flat_map. Do not
+// use directly.
+//
+// The use of "value" in this is like std::map uses, meaning it's the thing
+// contained (in the case of map it's a <Kay, Mapped> pair). The Key is how
+// things are looked up. In the case of a set, Key == Value. In the case of
+// a map, the Key is a component of a Value.
+//
+// The helper class GetKeyFromValue provides the means to extract a key from a
+// value for comparison purposes. It should implement:
+//   const Key& operator()(const Value&).
+template <class Key, class Value, class GetKeyFromValue, class KeyCompare>
+class flat_tree {
+ private:
+  using underlying_type = std::vector<Value>;
+
+ public:
+  // --------------------------------------------------------------------------
+  // Types.
+  //
+  using key_type = Key;
+  using key_compare = KeyCompare;
+  using value_type = Value;
+
+  // Wraps the templated key comparison to compare values.
+  class value_compare : public key_compare {
+   public:
+    value_compare() = default;
+
+    template <class Cmp>
+    explicit value_compare(Cmp&& compare_arg)
+        : KeyCompare(std::forward<Cmp>(compare_arg)) {}
+
+    bool operator()(const value_type& left, const value_type& right) const {
+      GetKeyFromValue extractor;
+      return key_compare::operator()(extractor(left), extractor(right));
+    }
+  };
+
+  using pointer = typename underlying_type::pointer;
+  using const_pointer = typename underlying_type::const_pointer;
+  using reference = typename underlying_type::reference;
+  using const_reference = typename underlying_type::const_reference;
+  using size_type = typename underlying_type::size_type;
+  using difference_type = typename underlying_type::difference_type;
+  using iterator = typename underlying_type::iterator;
+  using const_iterator = typename underlying_type::const_iterator;
+  using reverse_iterator = typename underlying_type::reverse_iterator;
+  using const_reverse_iterator =
+      typename underlying_type::const_reverse_iterator;
+
+  // --------------------------------------------------------------------------
+  // Lifetime.
+  //
+  // Constructors that take range guarantee O(N * log^2(N)) + O(N) complexity
+  // and take O(N * log(N)) + O(N) if extra memory is available (N is a range
+  // length).
+  //
+  // Assume that move constructors invalidate iterators and references.
+  //
+  // The constructors that take ranges, lists, and vectors do not require that
+  // the input be sorted.
+
+  flat_tree();
+  explicit flat_tree(const key_compare& comp);
+
+  template <class InputIterator>
+  flat_tree(InputIterator first,
+            InputIterator last,
+            FlatContainerDupes dupe_handling = KEEP_FIRST_OF_DUPES,
+            const key_compare& comp = key_compare());
+
+  flat_tree(const flat_tree&);
+  flat_tree(flat_tree&&) noexcept = default;
+
+  flat_tree(std::vector<value_type> items,
+            FlatContainerDupes dupe_handling = KEEP_FIRST_OF_DUPES,
+            const key_compare& comp = key_compare());
+
+  flat_tree(std::initializer_list<value_type> ilist,
+            FlatContainerDupes dupe_handling = KEEP_FIRST_OF_DUPES,
+            const key_compare& comp = key_compare());
+
+  ~flat_tree();
+
+  // --------------------------------------------------------------------------
+  // Assignments.
+  //
+  // Assume that move assignment invalidates iterators and references.
+
+  flat_tree& operator=(const flat_tree&);
+  flat_tree& operator=(flat_tree&&);
+  // Takes the first if there are duplicates in the initializer list.
+  flat_tree& operator=(std::initializer_list<value_type> ilist);
+
+  // --------------------------------------------------------------------------
+  // Memory management.
+  //
+  // Beware that shrink_to_fit() simply forwards the request to the
+  // underlying_type and its implementation is free to optimize otherwise and
+  // leave capacity() to be greater that its size.
+  //
+  // reserve() and shrink_to_fit() invalidate iterators and references.
+
+  void reserve(size_type new_capacity);
+  size_type capacity() const;
+  void shrink_to_fit();
+
+  // --------------------------------------------------------------------------
+  // Size management.
+  //
+  // clear() leaves the capacity() of the flat_tree unchanged.
+
+  void clear();
+
+  size_type size() const;
+  size_type max_size() const;
+  bool empty() const;
+
+  // --------------------------------------------------------------------------
+  // Iterators.
+
+  iterator begin();
+  const_iterator begin() const;
+  const_iterator cbegin() const;
+
+  iterator end();
+  const_iterator end() const;
+  const_iterator cend() const;
+
+  reverse_iterator rbegin();
+  const_reverse_iterator rbegin() const;
+  const_reverse_iterator crbegin() const;
+
+  reverse_iterator rend();
+  const_reverse_iterator rend() const;
+  const_reverse_iterator crend() const;
+
+  // --------------------------------------------------------------------------
+  // Insert operations.
+  //
+  // Assume that every operation invalidates iterators and references.
+  // Insertion of one element can take O(size). Capacity of flat_tree grows in
+  // an implementation-defined manner.
+  //
+  // NOTE: Prefer to build a new flat_tree from a std::vector (or similar)
+  // instead of calling insert() repeatedly.
+
+  std::pair<iterator, bool> insert(const value_type& val);
+  std::pair<iterator, bool> insert(value_type&& val);
+
+  iterator insert(const_iterator position_hint, const value_type& x);
+  iterator insert(const_iterator position_hint, value_type&& x);
+
+  // This method inserts the values from the range [first, last) into the
+  // current tree. In case of KEEP_LAST_OF_DUPES newly added elements can
+  // overwrite existing values.
+  template <class InputIterator>
+  void insert(InputIterator first,
+              InputIterator last,
+              FlatContainerDupes dupes = KEEP_FIRST_OF_DUPES);
+
+  template <class... Args>
+  std::pair<iterator, bool> emplace(Args&&... args);
+
+  template <class... Args>
+  iterator emplace_hint(const_iterator position_hint, Args&&... args);
+
+  // --------------------------------------------------------------------------
+  // Erase operations.
+  //
+  // Assume that every operation invalidates iterators and references.
+  //
+  // erase(position), erase(first, last) can take O(size).
+  // erase(key) may take O(size) + O(log(size)).
+  //
+  // Prefer base::EraseIf() or some other variation on erase(remove(), end())
+  // idiom when deleting multiple non-consecutive elements.
+
+  iterator erase(iterator position);
+  iterator erase(const_iterator position);
+  iterator erase(const_iterator first, const_iterator last);
+  template <typename K>
+  size_type erase(const K& key);
+
+  // --------------------------------------------------------------------------
+  // Comparators.
+
+  key_compare key_comp() const;
+  value_compare value_comp() const;
+
+  // --------------------------------------------------------------------------
+  // Search operations.
+  //
+  // Search operations have O(log(size)) complexity.
+
+  template <typename K>
+  size_type count(const K& key) const;
+
+  template <typename K>
+  iterator find(const K& key);
+
+  template <typename K>
+  const_iterator find(const K& key) const;
+
+  template <typename K>
+  std::pair<iterator, iterator> equal_range(const K& key);
+
+  template <typename K>
+  std::pair<const_iterator, const_iterator> equal_range(const K& key) const;
+
+  template <typename K>
+  iterator lower_bound(const K& key);
+
+  template <typename K>
+  const_iterator lower_bound(const K& key) const;
+
+  template <typename K>
+  iterator upper_bound(const K& key);
+
+  template <typename K>
+  const_iterator upper_bound(const K& key) const;
+
+  // --------------------------------------------------------------------------
+  // General operations.
+  //
+  // Assume that swap invalidates iterators and references.
+  //
+  // Implementation note: currently we use operator==() and operator<() on
+  // std::vector, because they have the same contract we need, so we use them
+  // directly for brevity and in case it is more optimal than calling equal()
+  // and lexicograhpical_compare(). If the underlying container type is changed,
+  // this code may need to be modified.
+
+  void swap(flat_tree& other) noexcept;
+
+  friend bool operator==(const flat_tree& lhs, const flat_tree& rhs) {
+    return lhs.impl_.body_ == rhs.impl_.body_;
+  }
+
+  friend bool operator!=(const flat_tree& lhs, const flat_tree& rhs) {
+    return !(lhs == rhs);
+  }
+
+  friend bool operator<(const flat_tree& lhs, const flat_tree& rhs) {
+    return lhs.impl_.body_ < rhs.impl_.body_;
+  }
+
+  friend bool operator>(const flat_tree& lhs, const flat_tree& rhs) {
+    return rhs < lhs;
+  }
+
+  friend bool operator>=(const flat_tree& lhs, const flat_tree& rhs) {
+    return !(lhs < rhs);
+  }
+
+  friend bool operator<=(const flat_tree& lhs, const flat_tree& rhs) {
+    return !(lhs > rhs);
+  }
+
+  friend void swap(flat_tree& lhs, flat_tree& rhs) noexcept { lhs.swap(rhs); }
+
+ protected:
+  // Emplaces a new item into the tree that is known not to be in it. This
+  // is for implementing map operator[].
+  template <class... Args>
+  iterator unsafe_emplace(const_iterator position, Args&&... args);
+
+  // Attempts to emplace a new element with key |key|. Only if |key| is not yet
+  // present, construct value_type from |args| and insert it. Returns an
+  // iterator to the element with key |key| and a bool indicating whether an
+  // insertion happened.
+  template <class K, class... Args>
+  std::pair<iterator, bool> emplace_key_args(const K& key, Args&&... args);
+
+  // Similar to |emplace_key_args|, but checks |hint| first as a possible
+  // insertion position.
+  template <class K, class... Args>
+  std::pair<iterator, bool> emplace_hint_key_args(const_iterator hint,
+                                                  const K& key,
+                                                  Args&&... args);
+
+ private:
+  // Helper class for e.g. lower_bound that can compare a value on the left
+  // to a key on the right.
+  struct KeyValueCompare {
+    // The key comparison object must outlive this class.
+    explicit KeyValueCompare(const key_compare& key_comp)
+        : key_comp_(key_comp) {}
+
+    template <typename T, typename U>
+    bool operator()(const T& lhs, const U& rhs) const {
+      return key_comp_(extract_if_value_type(lhs), extract_if_value_type(rhs));
+    }
+
+   private:
+    const key_type& extract_if_value_type(const value_type& v) const {
+      GetKeyFromValue extractor;
+      return extractor(v);
+    }
+
+    template <typename K>
+    const K& extract_if_value_type(const K& k) const {
+      return k;
+    }
+
+    const key_compare& key_comp_;
+  };
+
+  const flat_tree& as_const() { return *this; }
+
+  iterator const_cast_it(const_iterator c_it) {
+    auto distance = std::distance(cbegin(), c_it);
+    return std::next(begin(), distance);
+  }
+
+  // This method is inspired by both std::map::insert(P&&) and
+  // std::map::insert_or_assign(const K&, V&&). It inserts val if an equivalent
+  // element is not present yet, otherwise it overwrites. It returns an iterator
+  // to the modified element and a flag indicating whether insertion or
+  // assignment happened.
+  template <class V>
+  std::pair<iterator, bool> insert_or_assign(V&& val) {
+    auto position = lower_bound(GetKeyFromValue()(val));
+
+    if (position == end() || value_comp()(val, *position))
+      return {impl_.body_.emplace(position, std::forward<V>(val)), true};
+
+    *position = std::forward<V>(val);
+    return {position, false};
+  }
+
+  // This method is similar to insert_or_assign, with the following differences:
+  // - Instead of searching [begin(), end()) it only searches [first, last).
+  // - In case no equivalent element is found, val is appended to the end of the
+  //   underlying body and an iterator to the next bigger element in [first,
+  //   last) is returned.
+  template <class V>
+  std::pair<iterator, bool> append_or_assign(iterator first,
+                                             iterator last,
+                                             V&& val) {
+    auto position = std::lower_bound(first, last, val, value_comp());
+
+    if (position == last || value_comp()(val, *position)) {
+      // emplace_back might invalidate position, which is why distance needs to
+      // be cached.
+      const difference_type distance = std::distance(begin(), position);
+      impl_.body_.emplace_back(std::forward<V>(val));
+      return {std::next(begin(), distance), true};
+    }
+
+    *position = std::forward<V>(val);
+    return {position, false};
+  }
+
+  // This method is similar to insert, with the following differences:
+  // - Instead of searching [begin(), end()) it only searches [first, last).
+  // - In case no equivalent element is found, val is appended to the end of the
+  //   underlying body and an iterator to the next bigger element in [first,
+  //   last) is returned.
+  template <class V>
+  std::pair<iterator, bool> append_unique(iterator first,
+                                          iterator last,
+                                          V&& val) {
+    auto position = std::lower_bound(first, last, val, value_comp());
+
+    if (position == last || value_comp()(val, *position)) {
+      // emplace_back might invalidate position, which is why distance needs to
+      // be cached.
+      const difference_type distance = std::distance(begin(), position);
+      impl_.body_.emplace_back(std::forward<V>(val));
+      return {std::next(begin(), distance), true};
+    }
+
+    return {position, false};
+  }
+
+  void sort_and_unique(iterator first,
+                       iterator last,
+                       FlatContainerDupes dupes) {
+    // Preserve stability for the unique code below.
+    std::stable_sort(first, last, impl_.get_value_comp());
+
+    auto comparator = [this](const value_type& lhs, const value_type& rhs) {
+      // lhs is already <= rhs due to sort, therefore
+      // !(lhs < rhs) <=> lhs == rhs.
+      return !impl_.get_value_comp()(lhs, rhs);
+    };
+
+    iterator erase_after;
+    switch (dupes) {
+      case KEEP_FIRST_OF_DUPES:
+        erase_after = std::unique(first, last, comparator);
+        break;
+      case KEEP_LAST_OF_DUPES:
+        erase_after = LastUnique(first, last, comparator);
+        break;
+    }
+    erase(erase_after, last);
+  }
+
+  // To support comparators that may not be possible to default-construct, we
+  // have to store an instance of Compare. Using this to store all internal
+  // state of flat_tree and using private inheritance to store compare lets us
+  // take advantage of an empty base class optimization to avoid extra space in
+  // the common case when Compare has no state.
+  struct Impl : private value_compare {
+    Impl() = default;
+
+    template <class Cmp, class... Body>
+    explicit Impl(Cmp&& compare_arg, Body&&... underlying_type_args)
+        : value_compare(std::forward<Cmp>(compare_arg)),
+          body_(std::forward<Body>(underlying_type_args)...) {}
+
+    const value_compare& get_value_comp() const { return *this; }
+    const key_compare& get_key_comp() const { return *this; }
+
+    underlying_type body_;
+  } impl_;
+
+  // If the compare is not transparent we want to construct key_type once.
+  template <typename K>
+  using KeyTypeOrK = typename std::
+      conditional<IsTransparentCompare<key_compare>::value, K, key_type>::type;
+};
+
+// ----------------------------------------------------------------------------
+// Lifetime.
+
+template <class Key, class Value, class GetKeyFromValue, class KeyCompare>
+flat_tree<Key, Value, GetKeyFromValue, KeyCompare>::flat_tree() = default;
+
+template <class Key, class Value, class GetKeyFromValue, class KeyCompare>
+flat_tree<Key, Value, GetKeyFromValue, KeyCompare>::flat_tree(
+    const KeyCompare& comp)
+    : impl_(comp) {}
+
+template <class Key, class Value, class GetKeyFromValue, class KeyCompare>
+template <class InputIterator>
+flat_tree<Key, Value, GetKeyFromValue, KeyCompare>::flat_tree(
+    InputIterator first,
+    InputIterator last,
+    FlatContainerDupes dupe_handling,
+    const KeyCompare& comp)
+    : impl_(comp, first, last) {
+  sort_and_unique(begin(), end(), dupe_handling);
+}
+
+template <class Key, class Value, class GetKeyFromValue, class KeyCompare>
+flat_tree<Key, Value, GetKeyFromValue, KeyCompare>::flat_tree(
+    const flat_tree&) = default;
+
+template <class Key, class Value, class GetKeyFromValue, class KeyCompare>
+flat_tree<Key, Value, GetKeyFromValue, KeyCompare>::flat_tree(
+    std::vector<value_type> items,
+    FlatContainerDupes dupe_handling,
+    const KeyCompare& comp)
+    : impl_(comp, std::move(items)) {
+  sort_and_unique(begin(), end(), dupe_handling);
+}
+
+template <class Key, class Value, class GetKeyFromValue, class KeyCompare>
+flat_tree<Key, Value, GetKeyFromValue, KeyCompare>::flat_tree(
+    std::initializer_list<value_type> ilist,
+    FlatContainerDupes dupe_handling,
+    const KeyCompare& comp)
+    : flat_tree(std::begin(ilist), std::end(ilist), dupe_handling, comp) {}
+
+template <class Key, class Value, class GetKeyFromValue, class KeyCompare>
+flat_tree<Key, Value, GetKeyFromValue, KeyCompare>::~flat_tree() = default;
+
+// ----------------------------------------------------------------------------
+// Assignments.
+
+template <class Key, class Value, class GetKeyFromValue, class KeyCompare>
+auto flat_tree<Key, Value, GetKeyFromValue, KeyCompare>::operator=(
+    const flat_tree&) -> flat_tree& = default;
+
+template <class Key, class Value, class GetKeyFromValue, class KeyCompare>
+auto flat_tree<Key, Value, GetKeyFromValue, KeyCompare>::operator=(flat_tree &&)
+    -> flat_tree& = default;
+
+template <class Key, class Value, class GetKeyFromValue, class KeyCompare>
+auto flat_tree<Key, Value, GetKeyFromValue, KeyCompare>::operator=(
+    std::initializer_list<value_type> ilist) -> flat_tree& {
+  impl_.body_ = ilist;
+  sort_and_unique(begin(), end(), KEEP_FIRST_OF_DUPES);
+  return *this;
+}
+
+// ----------------------------------------------------------------------------
+// Memory management.
+
+template <class Key, class Value, class GetKeyFromValue, class KeyCompare>
+void flat_tree<Key, Value, GetKeyFromValue, KeyCompare>::reserve(
+    size_type new_capacity) {
+  impl_.body_.reserve(new_capacity);
+}
+
+template <class Key, class Value, class GetKeyFromValue, class KeyCompare>
+auto flat_tree<Key, Value, GetKeyFromValue, KeyCompare>::capacity() const
+    -> size_type {
+  return impl_.body_.capacity();
+}
+
+template <class Key, class Value, class GetKeyFromValue, class KeyCompare>
+void flat_tree<Key, Value, GetKeyFromValue, KeyCompare>::shrink_to_fit() {
+  impl_.body_.shrink_to_fit();
+}
+
+// ----------------------------------------------------------------------------
+// Size management.
+
+template <class Key, class Value, class GetKeyFromValue, class KeyCompare>
+void flat_tree<Key, Value, GetKeyFromValue, KeyCompare>::clear() {
+  impl_.body_.clear();
+}
+
+template <class Key, class Value, class GetKeyFromValue, class KeyCompare>
+auto flat_tree<Key, Value, GetKeyFromValue, KeyCompare>::size() const
+    -> size_type {
+  return impl_.body_.size();
+}
+
+template <class Key, class Value, class GetKeyFromValue, class KeyCompare>
+auto flat_tree<Key, Value, GetKeyFromValue, KeyCompare>::max_size() const
+    -> size_type {
+  return impl_.body_.max_size();
+}
+
+template <class Key, class Value, class GetKeyFromValue, class KeyCompare>
+bool flat_tree<Key, Value, GetKeyFromValue, KeyCompare>::empty() const {
+  return impl_.body_.empty();
+}
+
+// ----------------------------------------------------------------------------
+// Iterators.
+
+template <class Key, class Value, class GetKeyFromValue, class KeyCompare>
+auto flat_tree<Key, Value, GetKeyFromValue, KeyCompare>::begin() -> iterator {
+  return impl_.body_.begin();
+}
+
+template <class Key, class Value, class GetKeyFromValue, class KeyCompare>
+auto flat_tree<Key, Value, GetKeyFromValue, KeyCompare>::begin() const
+    -> const_iterator {
+  return impl_.body_.begin();
+}
+
+template <class Key, class Value, class GetKeyFromValue, class KeyCompare>
+auto flat_tree<Key, Value, GetKeyFromValue, KeyCompare>::cbegin() const
+    -> const_iterator {
+  return impl_.body_.cbegin();
+}
+
+template <class Key, class Value, class GetKeyFromValue, class KeyCompare>
+auto flat_tree<Key, Value, GetKeyFromValue, KeyCompare>::end() -> iterator {
+  return impl_.body_.end();
+}
+
+template <class Key, class Value, class GetKeyFromValue, class KeyCompare>
+auto flat_tree<Key, Value, GetKeyFromValue, KeyCompare>::end() const
+    -> const_iterator {
+  return impl_.body_.end();
+}
+
+template <class Key, class Value, class GetKeyFromValue, class KeyCompare>
+auto flat_tree<Key, Value, GetKeyFromValue, KeyCompare>::cend() const
+    -> const_iterator {
+  return impl_.body_.cend();
+}
+
+template <class Key, class Value, class GetKeyFromValue, class KeyCompare>
+auto flat_tree<Key, Value, GetKeyFromValue, KeyCompare>::rbegin()
+    -> reverse_iterator {
+  return impl_.body_.rbegin();
+}
+
+template <class Key, class Value, class GetKeyFromValue, class KeyCompare>
+auto flat_tree<Key, Value, GetKeyFromValue, KeyCompare>::rbegin() const
+    -> const_reverse_iterator {
+  return impl_.body_.rbegin();
+}
+
+template <class Key, class Value, class GetKeyFromValue, class KeyCompare>
+auto flat_tree<Key, Value, GetKeyFromValue, KeyCompare>::crbegin() const
+    -> const_reverse_iterator {
+  return impl_.body_.crbegin();
+}
+
+template <class Key, class Value, class GetKeyFromValue, class KeyCompare>
+auto flat_tree<Key, Value, GetKeyFromValue, KeyCompare>::rend()
+    -> reverse_iterator {
+  return impl_.body_.rend();
+}
+
+template <class Key, class Value, class GetKeyFromValue, class KeyCompare>
+auto flat_tree<Key, Value, GetKeyFromValue, KeyCompare>::rend() const
+    -> const_reverse_iterator {
+  return impl_.body_.rend();
+}
+
+template <class Key, class Value, class GetKeyFromValue, class KeyCompare>
+auto flat_tree<Key, Value, GetKeyFromValue, KeyCompare>::crend() const
+    -> const_reverse_iterator {
+  return impl_.body_.crend();
+}
+
+// ----------------------------------------------------------------------------
+// Insert operations.
+//
+// Currently we use position_hint the same way as eastl or boost:
+// https://github.com/electronicarts/EASTL/blob/master/include/EASTL/vector_set.h#L493
+
+template <class Key, class Value, class GetKeyFromValue, class KeyCompare>
+auto flat_tree<Key, Value, GetKeyFromValue, KeyCompare>::insert(
+    const value_type& val) -> std::pair<iterator, bool> {
+  return emplace_key_args(GetKeyFromValue()(val), val);
+}
+
+template <class Key, class Value, class GetKeyFromValue, class KeyCompare>
+auto flat_tree<Key, Value, GetKeyFromValue, KeyCompare>::insert(
+    value_type&& val) -> std::pair<iterator, bool> {
+  return emplace_key_args(GetKeyFromValue()(val), std::move(val));
+}
+
+template <class Key, class Value, class GetKeyFromValue, class KeyCompare>
+auto flat_tree<Key, Value, GetKeyFromValue, KeyCompare>::insert(
+    const_iterator position_hint,
+    const value_type& val) -> iterator {
+  return emplace_hint_key_args(position_hint, GetKeyFromValue()(val), val)
+      .first;
+}
+
+template <class Key, class Value, class GetKeyFromValue, class KeyCompare>
+auto flat_tree<Key, Value, GetKeyFromValue, KeyCompare>::insert(
+    const_iterator position_hint,
+    value_type&& val) -> iterator {
+  return emplace_hint_key_args(position_hint, GetKeyFromValue()(val),
+                               std::move(val))
+      .first;
+}
+
+template <class Key, class Value, class GetKeyFromValue, class KeyCompare>
+template <class InputIterator>
+void flat_tree<Key, Value, GetKeyFromValue, KeyCompare>::insert(
+    InputIterator first,
+    InputIterator last,
+    FlatContainerDupes dupes) {
+  if (first == last)
+    return;
+
+  // Cache results whether existing elements should be overwritten and whether
+  // inserting new elements happens immediately or will be done in a batch.
+  const bool overwrite_existing = dupes == KEEP_LAST_OF_DUPES;
+  const bool insert_inplace =
+      is_multipass<InputIterator>() && std::next(first) == last;
+
+  if (insert_inplace) {
+    if (overwrite_existing) {
+      for (; first != last; ++first)
+        insert_or_assign(*first);
+    } else
+      std::copy(first, last, std::inserter(*this, end()));
+    return;
+  }
+
+  // Provide a convenience lambda to obtain an iterator pointing past the last
+  // old element. This needs to be dymanic due to possible re-allocations.
+  const size_type original_size = size();
+  auto middle = [this, original_size]() {
+    return std::next(begin(), original_size);
+  };
+
+  // For batch updates initialize the first insertion point.
+  difference_type pos_first_new = original_size;
+
+  // Loop over the input range while appending new values and overwriting
+  // existing ones, if applicable. Keep track of the first insertion point.
+  if (overwrite_existing) {
+    for (; first != last; ++first) {
+      std::pair<iterator, bool> result =
+          append_or_assign(begin(), middle(), *first);
+      if (result.second) {
+        pos_first_new =
+            std::min(pos_first_new, std::distance(begin(), result.first));
+      }
+    }
+  } else {
+    for (; first != last; ++first) {
+      std::pair<iterator, bool> result =
+          append_unique(begin(), middle(), *first);
+      if (result.second) {
+        pos_first_new =
+            std::min(pos_first_new, std::distance(begin(), result.first));
+      }
+    }
+  }
+
+  // The new elements might be unordered and contain duplicates, so post-process
+  // the just inserted elements and merge them with the rest, inserting them at
+  // the previously found spot.
+  sort_and_unique(middle(), end(), dupes);
+  std::inplace_merge(std::next(begin(), pos_first_new), middle(), end(),
+                     value_comp());
+}
+
+template <class Key, class Value, class GetKeyFromValue, class KeyCompare>
+template <class... Args>
+auto flat_tree<Key, Value, GetKeyFromValue, KeyCompare>::emplace(Args&&... args)
+    -> std::pair<iterator, bool> {
+  return insert(value_type(std::forward<Args>(args)...));
+}
+
+template <class Key, class Value, class GetKeyFromValue, class KeyCompare>
+template <class... Args>
+auto flat_tree<Key, Value, GetKeyFromValue, KeyCompare>::emplace_hint(
+    const_iterator position_hint,
+    Args&&... args) -> iterator {
+  return insert(position_hint, value_type(std::forward<Args>(args)...));
+}
+
+// ----------------------------------------------------------------------------
+// Erase operations.
+
+template <class Key, class Value, class GetKeyFromValue, class KeyCompare>
+auto flat_tree<Key, Value, GetKeyFromValue, KeyCompare>::erase(
+    iterator position) -> iterator {
+  return impl_.body_.erase(position);
+}
+
+template <class Key, class Value, class GetKeyFromValue, class KeyCompare>
+auto flat_tree<Key, Value, GetKeyFromValue, KeyCompare>::erase(
+    const_iterator position) -> iterator {
+  return impl_.body_.erase(position);
+}
+
+template <class Key, class Value, class GetKeyFromValue, class KeyCompare>
+template <typename K>
+auto flat_tree<Key, Value, GetKeyFromValue, KeyCompare>::erase(const K& val)
+    -> size_type {
+  auto eq_range = equal_range(val);
+  auto res = std::distance(eq_range.first, eq_range.second);
+  erase(eq_range.first, eq_range.second);
+  return res;
+}
+
+template <class Key, class Value, class GetKeyFromValue, class KeyCompare>
+auto flat_tree<Key, Value, GetKeyFromValue, KeyCompare>::erase(
+    const_iterator first,
+    const_iterator last) -> iterator {
+  return impl_.body_.erase(first, last);
+}
+
+// ----------------------------------------------------------------------------
+// Comparators.
+
+template <class Key, class Value, class GetKeyFromValue, class KeyCompare>
+auto flat_tree<Key, Value, GetKeyFromValue, KeyCompare>::key_comp() const
+    -> key_compare {
+  return impl_.get_key_comp();
+}
+
+template <class Key, class Value, class GetKeyFromValue, class KeyCompare>
+auto flat_tree<Key, Value, GetKeyFromValue, KeyCompare>::value_comp() const
+    -> value_compare {
+  return impl_.get_value_comp();
+}
+
+// ----------------------------------------------------------------------------
+// Search operations.
+
+template <class Key, class Value, class GetKeyFromValue, class KeyCompare>
+template <typename K>
+auto flat_tree<Key, Value, GetKeyFromValue, KeyCompare>::count(
+    const K& key) const -> size_type {
+  auto eq_range = equal_range(key);
+  return std::distance(eq_range.first, eq_range.second);
+}
+
+template <class Key, class Value, class GetKeyFromValue, class KeyCompare>
+template <typename K>
+auto flat_tree<Key, Value, GetKeyFromValue, KeyCompare>::find(const K& key)
+    -> iterator {
+  return const_cast_it(as_const().find(key));
+}
+
+template <class Key, class Value, class GetKeyFromValue, class KeyCompare>
+template <typename K>
+auto flat_tree<Key, Value, GetKeyFromValue, KeyCompare>::find(
+    const K& key) const -> const_iterator {
+  auto eq_range = equal_range(key);
+  return (eq_range.first == eq_range.second) ? end() : eq_range.first;
+}
+
+template <class Key, class Value, class GetKeyFromValue, class KeyCompare>
+template <typename K>
+auto flat_tree<Key, Value, GetKeyFromValue, KeyCompare>::equal_range(
+    const K& key) -> std::pair<iterator, iterator> {
+  auto res = as_const().equal_range(key);
+  return {const_cast_it(res.first), const_cast_it(res.second)};
+}
+
+template <class Key, class Value, class GetKeyFromValue, class KeyCompare>
+template <typename K>
+auto flat_tree<Key, Value, GetKeyFromValue, KeyCompare>::equal_range(
+    const K& key) const -> std::pair<const_iterator, const_iterator> {
+  auto lower = lower_bound(key);
+
+  GetKeyFromValue extractor;
+  if (lower == end() || impl_.get_key_comp()(key, extractor(*lower)))
+    return {lower, lower};
+
+  return {lower, std::next(lower)};
+}
+
+template <class Key, class Value, class GetKeyFromValue, class KeyCompare>
+template <typename K>
+auto flat_tree<Key, Value, GetKeyFromValue, KeyCompare>::lower_bound(
+    const K& key) -> iterator {
+  return const_cast_it(as_const().lower_bound(key));
+}
+
+template <class Key, class Value, class GetKeyFromValue, class KeyCompare>
+template <typename K>
+auto flat_tree<Key, Value, GetKeyFromValue, KeyCompare>::lower_bound(
+    const K& key) const -> const_iterator {
+  static_assert(std::is_convertible<const KeyTypeOrK<K>&, const K&>::value,
+                "Requested type cannot be bound to the container's key_type "
+                "which is required for a non-transparent compare.");
+
+  const KeyTypeOrK<K>& key_ref = key;
+
+  KeyValueCompare key_value(impl_.get_key_comp());
+  return std::lower_bound(begin(), end(), key_ref, key_value);
+}
+
+template <class Key, class Value, class GetKeyFromValue, class KeyCompare>
+template <typename K>
+auto flat_tree<Key, Value, GetKeyFromValue, KeyCompare>::upper_bound(
+    const K& key) -> iterator {
+  return const_cast_it(as_const().upper_bound(key));
+}
+
+template <class Key, class Value, class GetKeyFromValue, class KeyCompare>
+template <typename K>
+auto flat_tree<Key, Value, GetKeyFromValue, KeyCompare>::upper_bound(
+    const K& key) const -> const_iterator {
+  static_assert(std::is_convertible<const KeyTypeOrK<K>&, const K&>::value,
+                "Requested type cannot be bound to the container's key_type "
+                "which is required for a non-transparent compare.");
+
+  const KeyTypeOrK<K>& key_ref = key;
+
+  KeyValueCompare key_value(impl_.get_key_comp());
+  return std::upper_bound(begin(), end(), key_ref, key_value);
+}
+
+// ----------------------------------------------------------------------------
+// General operations.
+
+template <class Key, class Value, class GetKeyFromValue, class KeyCompare>
+void flat_tree<Key, Value, GetKeyFromValue, KeyCompare>::swap(
+    flat_tree& other) noexcept {
+  std::swap(impl_, other.impl_);
+}
+
+template <class Key, class Value, class GetKeyFromValue, class KeyCompare>
+template <class... Args>
+auto flat_tree<Key, Value, GetKeyFromValue, KeyCompare>::unsafe_emplace(
+    const_iterator position,
+    Args&&... args) -> iterator {
+  return impl_.body_.emplace(position, std::forward<Args>(args)...);
+}
+
+template <class Key, class Value, class GetKeyFromValue, class KeyCompare>
+template <class K, class... Args>
+auto flat_tree<Key, Value, GetKeyFromValue, KeyCompare>::emplace_key_args(
+    const K& key,
+    Args&&... args) -> std::pair<iterator, bool> {
+  auto lower = lower_bound(key);
+  if (lower == end() || key_comp()(key, GetKeyFromValue()(*lower)))
+    return {unsafe_emplace(lower, std::forward<Args>(args)...), true};
+  return {lower, false};
+}
+
+template <class Key, class Value, class GetKeyFromValue, class KeyCompare>
+template <class K, class... Args>
+auto flat_tree<Key, Value, GetKeyFromValue, KeyCompare>::emplace_hint_key_args(
+    const_iterator hint,
+    const K& key,
+    Args&&... args) -> std::pair<iterator, bool> {
+  GetKeyFromValue extractor;
+  if ((hint == begin() || key_comp()(extractor(*std::prev(hint)), key))) {
+    if (hint == end() || key_comp()(key, extractor(*hint))) {
+      // *(hint - 1) < key < *hint => key did not exist and hint is correct.
+      return {unsafe_emplace(hint, std::forward<Args>(args)...), true};
+    }
+    if (!key_comp()(extractor(*hint), key)) {
+      // key == *hint => no-op, return correct hint.
+      return {const_cast_it(hint), false};
+    }
+  }
+  // hint was not helpful, dispatch to hintless version.
+  return emplace_key_args(key, std::forward<Args>(args)...);
+}
+
+// For containers like sets, the key is the same as the value. This implements
+// the GetKeyFromValue template parameter to flat_tree for this case.
+template <class Key>
+struct GetKeyFromValueIdentity {
+  const Key& operator()(const Key& k) const { return k; }
+};
+
+}  // namespace internal
+
+// ----------------------------------------------------------------------------
+// Free functions.
+
+// Erases all elements that match predicate. It has O(size) complexity.
+template <class Key,
+          class Value,
+          class GetKeyFromValue,
+          class KeyCompare,
+          typename Predicate>
+void EraseIf(base::internal::flat_tree<Key, Value, GetKeyFromValue, KeyCompare>&
+                 container,
+             Predicate pred) {
+  container.erase(std::remove_if(container.begin(), container.end(), pred),
+                  container.end());
+}
+
+}  // namespace base
+
+#endif  // BASE_CONTAINERS_FLAT_TREE_H_
diff --git a/base/containers/flat_tree_unittest.cc b/base/containers/flat_tree_unittest.cc
new file mode 100644
index 0000000..5b788d5
--- /dev/null
+++ b/base/containers/flat_tree_unittest.cc
@@ -0,0 +1,1385 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/containers/flat_tree.h"
+
+// Following tests are ported and extended tests from libcpp for std::set.
+// They can be found here:
+// https://github.com/llvm-mirror/libcxx/tree/master/test/std/containers/associative/set
+//
+// Not ported tests:
+// * No tests with PrivateConstructor and std::less<> changed to std::less<T>
+//   These tests have to do with C++14 std::less<>
+//   http://en.cppreference.com/w/cpp/utility/functional/less_void
+//   and add support for templated versions of lookup functions.
+//   Because we use same implementation, we figured that it's OK just to check
+//   compilation and this is what we do in flat_set_unittest/flat_map_unittest.
+// * No tests for max_size()
+//   Has to do with allocator support.
+// * No tests with DefaultOnly.
+//   Standard containers allocate each element in the separate node on the heap
+//   and then manipulate these nodes. Flat containers store their elements in
+//   contiguous memory and move them around, type is required to be movable.
+// * No tests for N3644.
+//   This proposal suggests that all default constructed iterators compare
+//   equal. Currently we use std::vector iterators and they don't implement
+//   this.
+// * No tests with min_allocator and no tests counting allocations.
+//   Flat sets currently don't support allocators.
+
+#include <forward_list>
+#include <functional>
+#include <iterator>
+#include <list>
+#include <string>
+#include <vector>
+
+#include "base/macros.h"
+#include "base/template_util.h"
+#include "base/test/move_only_int.h"
+#include "testing/gmock/include/gmock/gmock.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+namespace internal {
+
+namespace {
+
+template <class It>
+class InputIterator {
+ public:
+  using iterator_category = std::input_iterator_tag;
+  using value_type = typename std::iterator_traits<It>::value_type;
+  using difference_type = typename std::iterator_traits<It>::difference_type;
+  using pointer = It;
+  using reference = typename std::iterator_traits<It>::reference;
+
+  InputIterator() : it_() {}
+  explicit InputIterator(It it) : it_(it) {}
+
+  reference operator*() const { return *it_; }
+  pointer operator->() const { return it_; }
+
+  InputIterator& operator++() {
+    ++it_;
+    return *this;
+  }
+  InputIterator operator++(int) {
+    InputIterator tmp(*this);
+    ++(*this);
+    return tmp;
+  }
+
+  friend bool operator==(const InputIterator& lhs, const InputIterator& rhs) {
+    return lhs.it_ == rhs.it_;
+  }
+  friend bool operator!=(const InputIterator& lhs, const InputIterator& rhs) {
+    return !(lhs == rhs);
+  }
+
+ private:
+  It it_;
+};
+
+template <typename It>
+InputIterator<It> MakeInputIterator(It it) {
+  return InputIterator<It>(it);
+}
+
+class Emplaceable {
+ public:
+  Emplaceable() : Emplaceable(0, 0.0) {}
+  Emplaceable(int i, double d) : int_(i), double_(d) {}
+  Emplaceable(Emplaceable&& other) : int_(other.int_), double_(other.double_) {
+    other.int_ = 0;
+    other.double_ = 0.0;
+  }
+
+  Emplaceable& operator=(Emplaceable&& other) {
+    int_ = other.int_;
+    other.int_ = 0;
+    double_ = other.double_;
+    other.double_ = 0.0;
+    return *this;
+  }
+
+  friend bool operator==(const Emplaceable& lhs, const Emplaceable& rhs) {
+    return std::tie(lhs.int_, lhs.double_) == std::tie(rhs.int_, rhs.double_);
+  }
+
+  friend bool operator<(const Emplaceable& lhs, const Emplaceable& rhs) {
+    return std::tie(lhs.int_, lhs.double_) < std::tie(rhs.int_, rhs.double_);
+  }
+
+ private:
+  int int_;
+  double double_;
+
+  DISALLOW_COPY_AND_ASSIGN(Emplaceable);
+};
+
+struct TemplateConstructor {
+  template <typename T>
+  TemplateConstructor(const T&) {}
+
+  friend bool operator<(const TemplateConstructor&,
+                        const TemplateConstructor&) {
+    return false;
+  }
+};
+
+class NonDefaultConstructibleCompare {
+ public:
+  explicit NonDefaultConstructibleCompare(int) {}
+
+  template <typename T>
+  bool operator()(const T& lhs, const T& rhs) const {
+    return std::less<T>()(lhs, rhs);
+  }
+};
+
+template <class PairType>
+struct LessByFirst {
+  bool operator()(const PairType& lhs, const PairType& rhs) const {
+    return lhs.first < rhs.first;
+  }
+};
+
+// Common test trees.
+using IntTree =
+    flat_tree<int, int, GetKeyFromValueIdentity<int>, std::less<int>>;
+using IntPair = std::pair<int, int>;
+using IntPairTree = flat_tree<IntPair,
+                              IntPair,
+                              GetKeyFromValueIdentity<IntPair>,
+                              LessByFirst<IntPair>>;
+using MoveOnlyTree = flat_tree<MoveOnlyInt,
+                               MoveOnlyInt,
+                               GetKeyFromValueIdentity<MoveOnlyInt>,
+                               std::less<MoveOnlyInt>>;
+using EmplaceableTree = flat_tree<Emplaceable,
+                                  Emplaceable,
+                                  GetKeyFromValueIdentity<Emplaceable>,
+                                  std::less<Emplaceable>>;
+using ReversedTree =
+    flat_tree<int, int, GetKeyFromValueIdentity<int>, std::greater<int>>;
+
+using TreeWithStrangeCompare = flat_tree<int,
+                                         int,
+                                         GetKeyFromValueIdentity<int>,
+                                         NonDefaultConstructibleCompare>;
+
+using ::testing::ElementsAre;
+
+}  // namespace
+
+TEST(FlatTree, IsMultipass) {
+  static_assert(!is_multipass<std::istream_iterator<int>>(),
+                "InputIterator is not multipass");
+  static_assert(!is_multipass<std::ostream_iterator<int>>(),
+                "OutputIterator is not multipass");
+
+  static_assert(is_multipass<std::forward_list<int>::iterator>(),
+                "ForwardIterator is multipass");
+  static_assert(is_multipass<std::list<int>::iterator>(),
+                "BidirectionalIterator is multipass");
+  static_assert(is_multipass<std::vector<int>::iterator>(),
+                "RandomAccessIterator is multipass");
+}
+
+TEST(FlatTree, LastUnique) {
+  using Pair = std::pair<int, int>;
+  using Vect = std::vector<Pair>;
+
+  auto cmp = [](const Pair& lhs, const Pair& rhs) {
+    return lhs.first == rhs.first;
+  };
+
+  // Empty case.
+  Vect empty;
+  EXPECT_EQ(empty.end(), LastUnique(empty.begin(), empty.end(), cmp));
+
+  // Single element.
+  Vect one;
+  one.push_back(Pair(1, 1));
+  EXPECT_EQ(one.end(), LastUnique(one.begin(), one.end(), cmp));
+  ASSERT_EQ(1u, one.size());
+  EXPECT_THAT(one, ElementsAre(Pair(1, 1)));
+
+  // Two elements, already unique.
+  Vect two_u;
+  two_u.push_back(Pair(1, 1));
+  two_u.push_back(Pair(2, 2));
+  EXPECT_EQ(two_u.end(), LastUnique(two_u.begin(), two_u.end(), cmp));
+  EXPECT_THAT(two_u, ElementsAre(Pair(1, 1), Pair(2, 2)));
+
+  // Two elements, dupes.
+  Vect two_d;
+  two_d.push_back(Pair(1, 1));
+  two_d.push_back(Pair(1, 2));
+  auto last = LastUnique(two_d.begin(), two_d.end(), cmp);
+  EXPECT_EQ(two_d.begin() + 1, last);
+  two_d.erase(last, two_d.end());
+  EXPECT_THAT(two_d, ElementsAre(Pair(1, 2)));
+
+  // Non-dupes, dupes, non-dupes.
+  Vect ndn;
+  ndn.push_back(Pair(1, 1));
+  ndn.push_back(Pair(2, 1));
+  ndn.push_back(Pair(2, 2));
+  ndn.push_back(Pair(2, 3));
+  ndn.push_back(Pair(3, 1));
+  last = LastUnique(ndn.begin(), ndn.end(), cmp);
+  EXPECT_EQ(ndn.begin() + 3, last);
+  ndn.erase(last, ndn.end());
+  EXPECT_THAT(ndn, ElementsAre(Pair(1, 1), Pair(2, 3), Pair(3, 1)));
+
+  // Dupes, non-dupes, dupes.
+  Vect dnd;
+  dnd.push_back(Pair(1, 1));
+  dnd.push_back(Pair(1, 2));
+  dnd.push_back(Pair(1, 3));
+  dnd.push_back(Pair(2, 1));
+  dnd.push_back(Pair(3, 1));
+  dnd.push_back(Pair(3, 2));
+  dnd.push_back(Pair(3, 3));
+  last = LastUnique(dnd.begin(), dnd.end(), cmp);
+  EXPECT_EQ(dnd.begin() + 3, last);
+  dnd.erase(last, dnd.end());
+  EXPECT_THAT(dnd, ElementsAre(Pair(1, 3), Pair(2, 1), Pair(3, 3)));
+}
+
+// ----------------------------------------------------------------------------
+// Class.
+
+// Check that flat_tree and its iterators can be instantiated with an
+// incomplete type.
+
+TEST(FlatTree, IncompleteType) {
+  struct A {
+    using Tree = flat_tree<A, A, GetKeyFromValueIdentity<A>, std::less<A>>;
+    int data;
+    Tree set_with_incomplete_type;
+    Tree::iterator it;
+    Tree::const_iterator cit;
+
+    // We do not declare operator< because clang complains that it's unused.
+  };
+
+  A a;
+}
+
+TEST(FlatTree, Stability) {
+  using Pair = std::pair<int, int>;
+
+  using Tree =
+      flat_tree<Pair, Pair, GetKeyFromValueIdentity<Pair>, LessByFirst<Pair>>;
+
+  // Constructors are stable.
+  Tree cont({{0, 0}, {1, 0}, {0, 1}, {2, 0}, {0, 2}, {1, 1}});
+
+  auto AllOfSecondsAreZero = [&cont] {
+    return std::all_of(cont.begin(), cont.end(),
+                       [](const Pair& elem) { return elem.second == 0; });
+  };
+
+  EXPECT_TRUE(AllOfSecondsAreZero()) << "constructor should be stable";
+
+  // Should not replace existing.
+  cont.insert(Pair(0, 2));
+  cont.insert(Pair(1, 2));
+  cont.insert(Pair(2, 2));
+
+  EXPECT_TRUE(AllOfSecondsAreZero()) << "insert should be stable";
+
+  cont.insert(Pair(3, 0));
+  cont.insert(Pair(3, 2));
+
+  EXPECT_TRUE(AllOfSecondsAreZero()) << "insert should be stable";
+}
+
+// ----------------------------------------------------------------------------
+// Types.
+
+// key_type
+// key_compare
+// value_type
+// value_compare
+// pointer
+// const_pointer
+// reference
+// const_reference
+// size_type
+// difference_type
+// iterator
+// const_iterator
+// reverse_iterator
+// const_reverse_iterator
+
+TEST(FlatTree, Types) {
+  // These are guaranteed to be portable.
+  static_assert((std::is_same<int, IntTree::key_type>::value), "");
+  static_assert((std::is_same<int, IntTree::value_type>::value), "");
+  static_assert((std::is_same<std::less<int>, IntTree::key_compare>::value),
+                "");
+  static_assert((std::is_same<int&, IntTree::reference>::value), "");
+  static_assert((std::is_same<const int&, IntTree::const_reference>::value),
+                "");
+  static_assert((std::is_same<int*, IntTree::pointer>::value), "");
+  static_assert((std::is_same<const int*, IntTree::const_pointer>::value), "");
+}
+
+// ----------------------------------------------------------------------------
+// Lifetime.
+
+// flat_tree()
+// flat_tree(const Compare& comp)
+
+TEST(FlatTree, DefaultConstructor) {
+  {
+    IntTree cont;
+    EXPECT_THAT(cont, ElementsAre());
+  }
+
+  {
+    TreeWithStrangeCompare cont(NonDefaultConstructibleCompare(0));
+    EXPECT_THAT(cont, ElementsAre());
+  }
+}
+
+// flat_tree(InputIterator first,
+//           InputIterator last,
+//           FlatContainerDupes dupe_handling,
+//           const Compare& comp = Compare())
+
+TEST(FlatTree, RangeConstructor) {
+  {
+    IntPair input_vals[] = {{1, 1}, {1, 2}, {2, 1}, {2, 2}, {1, 3},
+                            {2, 3}, {3, 1}, {3, 2}, {3, 3}};
+
+    IntPairTree first_of(MakeInputIterator(std::begin(input_vals)),
+                         MakeInputIterator(std::end(input_vals)));
+    EXPECT_THAT(first_of,
+                ElementsAre(IntPair(1, 1), IntPair(2, 1), IntPair(3, 1)));
+
+    IntPairTree last_of(MakeInputIterator(std::begin(input_vals)),
+                        MakeInputIterator(std::end(input_vals)),
+                        KEEP_LAST_OF_DUPES);
+    EXPECT_THAT(last_of,
+                ElementsAre(IntPair(1, 3), IntPair(2, 3), IntPair(3, 3)));
+  }
+  {
+    TreeWithStrangeCompare::value_type input_vals[] = {1, 1, 1, 2, 2,
+                                                       2, 3, 3, 3};
+
+    TreeWithStrangeCompare cont(MakeInputIterator(std::begin(input_vals)),
+                                MakeInputIterator(std::end(input_vals)),
+                                KEEP_FIRST_OF_DUPES,
+                                NonDefaultConstructibleCompare(0));
+    EXPECT_THAT(cont, ElementsAre(1, 2, 3));
+  }
+}
+
+// flat_tree(const flat_tree& x)
+
+TEST(FlatTree, CopyConstructor) {
+  IntTree original({1, 2, 3, 4});
+  IntTree copied(original);
+
+  EXPECT_THAT(copied, ElementsAre(1, 2, 3, 4));
+
+  EXPECT_THAT(copied, ElementsAre(1, 2, 3, 4));
+  EXPECT_THAT(original, ElementsAre(1, 2, 3, 4));
+  EXPECT_EQ(original, copied);
+}
+
+// flat_tree(flat_tree&& x)
+
+TEST(FlatTree, MoveConstructor) {
+  int input_range[] = {1, 2, 3, 4};
+
+  MoveOnlyTree original(std::begin(input_range), std::end(input_range));
+  MoveOnlyTree moved(std::move(original));
+
+  EXPECT_EQ(1U, moved.count(MoveOnlyInt(1)));
+  EXPECT_EQ(1U, moved.count(MoveOnlyInt(2)));
+  EXPECT_EQ(1U, moved.count(MoveOnlyInt(3)));
+  EXPECT_EQ(1U, moved.count(MoveOnlyInt(4)));
+}
+
+// flat_tree(std::vector<value_type>, FlatContainerDupes dupe_handling)
+
+TEST(FlatTree, VectorConstructor) {
+  using Pair = std::pair<int, MoveOnlyInt>;
+
+  // Construct an unsorted vector with a duplicate item in it. Sorted by the
+  // first item, the second allows us to test for stability. Using a move
+  // only type to ensure the vector is not copied.
+  std::vector<Pair> storage;
+  storage.push_back(Pair(2, MoveOnlyInt(0)));
+  storage.push_back(Pair(1, MoveOnlyInt(0)));
+  storage.push_back(Pair(2, MoveOnlyInt(1)));
+
+  using Tree =
+      flat_tree<Pair, Pair, GetKeyFromValueIdentity<Pair>, LessByFirst<Pair>>;
+  Tree tree(std::move(storage));
+
+  // The list should be two items long, with only the first "2" saved.
+  ASSERT_EQ(2u, tree.size());
+  const Pair& zeroth = *tree.begin();
+  ASSERT_EQ(1, zeroth.first);
+  ASSERT_EQ(0, zeroth.second.data());
+
+  const Pair& first = *(tree.begin() + 1);
+  ASSERT_EQ(2, first.first);
+  ASSERT_EQ(0, first.second.data());
+
+  // Test KEEP_LAST_OF_DUPES with a simple vector constructor.
+  std::vector<IntPair> int_storage{{1, 1}, {1, 2}, {2, 1}};
+  IntPairTree int_tree(std::move(int_storage), KEEP_LAST_OF_DUPES);
+  EXPECT_THAT(int_tree, ElementsAre(IntPair(1, 2), IntPair(2, 1)));
+}
+
+// flat_tree(std::initializer_list<value_type> ilist,
+//           FlatContainerDupes dupe_handling,
+//           const Compare& comp = Compare())
+
+TEST(FlatTree, InitializerListConstructor) {
+  {
+    IntTree cont({1, 2, 3, 4, 5, 6, 10, 8});
+    EXPECT_THAT(cont, ElementsAre(1, 2, 3, 4, 5, 6, 8, 10));
+  }
+  {
+    IntTree cont({1, 2, 3, 4, 5, 6, 10, 8});
+    EXPECT_THAT(cont, ElementsAre(1, 2, 3, 4, 5, 6, 8, 10));
+  }
+  {
+    TreeWithStrangeCompare cont({1, 2, 3, 4, 5, 6, 10, 8}, KEEP_FIRST_OF_DUPES,
+                                NonDefaultConstructibleCompare(0));
+    EXPECT_THAT(cont, ElementsAre(1, 2, 3, 4, 5, 6, 8, 10));
+  }
+  {
+    IntPairTree first_of({{1, 1}, {2, 1}, {1, 2}});
+    EXPECT_THAT(first_of, ElementsAre(IntPair(1, 1), IntPair(2, 1)));
+  }
+  {
+    IntPairTree last_of({{1, 1}, {2, 1}, {1, 2}}, KEEP_LAST_OF_DUPES);
+    EXPECT_THAT(last_of, ElementsAre(IntPair(1, 2), IntPair(2, 1)));
+  }
+}
+
+// ----------------------------------------------------------------------------
+// Assignments.
+
+// flat_tree& operator=(const flat_tree&)
+
+TEST(FlatTree, CopyAssignable) {
+  IntTree original({1, 2, 3, 4});
+  IntTree copied;
+  copied = original;
+
+  EXPECT_THAT(copied, ElementsAre(1, 2, 3, 4));
+  EXPECT_THAT(original, ElementsAre(1, 2, 3, 4));
+  EXPECT_EQ(original, copied);
+}
+
+// flat_tree& operator=(flat_tree&&)
+
+TEST(FlatTree, MoveAssignable) {
+  int input_range[] = {1, 2, 3, 4};
+
+  MoveOnlyTree original(std::begin(input_range), std::end(input_range));
+  MoveOnlyTree moved;
+  moved = std::move(original);
+
+  EXPECT_EQ(1U, moved.count(MoveOnlyInt(1)));
+  EXPECT_EQ(1U, moved.count(MoveOnlyInt(2)));
+  EXPECT_EQ(1U, moved.count(MoveOnlyInt(3)));
+  EXPECT_EQ(1U, moved.count(MoveOnlyInt(4)));
+}
+
+// flat_tree& operator=(std::initializer_list<value_type> ilist)
+
+TEST(FlatTree, InitializerListAssignable) {
+  IntTree cont({0});
+  cont = {1, 2, 3, 4, 5, 6, 10, 8};
+
+  EXPECT_EQ(0U, cont.count(0));
+  EXPECT_THAT(cont, ElementsAre(1, 2, 3, 4, 5, 6, 8, 10));
+}
+
+// --------------------------------------------------------------------------
+// Memory management.
+
+// void reserve(size_type new_capacity)
+
+TEST(FlatTree, Reserve) {
+  IntTree cont({1, 2, 3});
+
+  cont.reserve(5);
+  EXPECT_LE(5U, cont.capacity());
+}
+
+// size_type capacity() const
+
+TEST(FlatTree, Capacity) {
+  IntTree cont({1, 2, 3});
+
+  EXPECT_LE(cont.size(), cont.capacity());
+  cont.reserve(5);
+  EXPECT_LE(cont.size(), cont.capacity());
+}
+
+// void shrink_to_fit()
+
+TEST(FlatTree, ShrinkToFit) {
+  IntTree cont({1, 2, 3});
+
+  IntTree::size_type capacity_before = cont.capacity();
+  cont.shrink_to_fit();
+  EXPECT_GE(capacity_before, cont.capacity());
+}
+
+// ----------------------------------------------------------------------------
+// Size management.
+
+// void clear()
+
+TEST(FlatTree, Clear) {
+  IntTree cont({1, 2, 3, 4, 5, 6, 7, 8});
+  cont.clear();
+  EXPECT_THAT(cont, ElementsAre());
+}
+
+// size_type size() const
+
+TEST(FlatTree, Size) {
+  IntTree cont;
+
+  EXPECT_EQ(0U, cont.size());
+  cont.insert(2);
+  EXPECT_EQ(1U, cont.size());
+  cont.insert(1);
+  EXPECT_EQ(2U, cont.size());
+  cont.insert(3);
+  EXPECT_EQ(3U, cont.size());
+  cont.erase(cont.begin());
+  EXPECT_EQ(2U, cont.size());
+  cont.erase(cont.begin());
+  EXPECT_EQ(1U, cont.size());
+  cont.erase(cont.begin());
+  EXPECT_EQ(0U, cont.size());
+}
+
+// bool empty() const
+
+TEST(FlatTree, Empty) {
+  IntTree cont;
+
+  EXPECT_TRUE(cont.empty());
+  cont.insert(1);
+  EXPECT_FALSE(cont.empty());
+  cont.clear();
+  EXPECT_TRUE(cont.empty());
+}
+
+// ----------------------------------------------------------------------------
+// Iterators.
+
+// iterator begin()
+// const_iterator begin() const
+// iterator end()
+// const_iterator end() const
+//
+// reverse_iterator rbegin()
+// const_reverse_iterator rbegin() const
+// reverse_iterator rend()
+// const_reverse_iterator rend() const
+//
+// const_iterator cbegin() const
+// const_iterator cend() const
+// const_reverse_iterator crbegin() const
+// const_reverse_iterator crend() const
+
+TEST(FlatTree, Iterators) {
+  IntTree cont({1, 2, 3, 4, 5, 6, 7, 8});
+
+  auto size = static_cast<IntTree::difference_type>(cont.size());
+
+  EXPECT_EQ(size, std::distance(cont.begin(), cont.end()));
+  EXPECT_EQ(size, std::distance(cont.cbegin(), cont.cend()));
+  EXPECT_EQ(size, std::distance(cont.rbegin(), cont.rend()));
+  EXPECT_EQ(size, std::distance(cont.crbegin(), cont.crend()));
+
+  {
+    IntTree::iterator it = cont.begin();
+    IntTree::const_iterator c_it = cont.cbegin();
+    EXPECT_EQ(it, c_it);
+    for (int j = 1; it != cont.end(); ++it, ++c_it, ++j) {
+      EXPECT_EQ(j, *it);
+      EXPECT_EQ(j, *c_it);
+    }
+  }
+  {
+    IntTree::reverse_iterator rit = cont.rbegin();
+    IntTree::const_reverse_iterator c_rit = cont.crbegin();
+    EXPECT_EQ(rit, c_rit);
+    for (int j = static_cast<int>(size); rit != cont.rend();
+         ++rit, ++c_rit, --j) {
+      EXPECT_EQ(j, *rit);
+      EXPECT_EQ(j, *c_rit);
+    }
+  }
+}
+
+// ----------------------------------------------------------------------------
+// Insert operations.
+
+// pair<iterator, bool> insert(const value_type& val)
+
+TEST(FlatTree, InsertLValue) {
+  IntTree cont;
+
+  int value = 2;
+  std::pair<IntTree::iterator, bool> result = cont.insert(value);
+  EXPECT_TRUE(result.second);
+  EXPECT_EQ(cont.begin(), result.first);
+  EXPECT_EQ(1U, cont.size());
+  EXPECT_EQ(2, *result.first);
+
+  value = 1;
+  result = cont.insert(value);
+  EXPECT_TRUE(result.second);
+  EXPECT_EQ(cont.begin(), result.first);
+  EXPECT_EQ(2U, cont.size());
+  EXPECT_EQ(1, *result.first);
+
+  value = 3;
+  result = cont.insert(value);
+  EXPECT_TRUE(result.second);
+  EXPECT_EQ(std::prev(cont.end()), result.first);
+  EXPECT_EQ(3U, cont.size());
+  EXPECT_EQ(3, *result.first);
+
+  value = 3;
+  result = cont.insert(value);
+  EXPECT_FALSE(result.second);
+  EXPECT_EQ(std::prev(cont.end()), result.first);
+  EXPECT_EQ(3U, cont.size());
+  EXPECT_EQ(3, *result.first);
+}
+
+// pair<iterator, bool> insert(value_type&& val)
+
+TEST(FlatTree, InsertRValue) {
+  MoveOnlyTree cont;
+
+  std::pair<MoveOnlyTree::iterator, bool> result = cont.insert(MoveOnlyInt(2));
+  EXPECT_TRUE(result.second);
+  EXPECT_EQ(cont.begin(), result.first);
+  EXPECT_EQ(1U, cont.size());
+  EXPECT_EQ(2, result.first->data());
+
+  result = cont.insert(MoveOnlyInt(1));
+  EXPECT_TRUE(result.second);
+  EXPECT_EQ(cont.begin(), result.first);
+  EXPECT_EQ(2U, cont.size());
+  EXPECT_EQ(1, result.first->data());
+
+  result = cont.insert(MoveOnlyInt(3));
+  EXPECT_TRUE(result.second);
+  EXPECT_EQ(std::prev(cont.end()), result.first);
+  EXPECT_EQ(3U, cont.size());
+  EXPECT_EQ(3, result.first->data());
+
+  result = cont.insert(MoveOnlyInt(3));
+  EXPECT_FALSE(result.second);
+  EXPECT_EQ(std::prev(cont.end()), result.first);
+  EXPECT_EQ(3U, cont.size());
+  EXPECT_EQ(3, result.first->data());
+}
+
+// iterator insert(const_iterator position_hint, const value_type& val)
+
+TEST(FlatTree, InsertPositionLValue) {
+  IntTree cont;
+
+  IntTree::iterator result = cont.insert(cont.cend(), 2);
+  EXPECT_EQ(cont.begin(), result);
+  EXPECT_EQ(1U, cont.size());
+  EXPECT_EQ(2, *result);
+
+  result = cont.insert(cont.cend(), 1);
+  EXPECT_EQ(cont.begin(), result);
+  EXPECT_EQ(2U, cont.size());
+  EXPECT_EQ(1, *result);
+
+  result = cont.insert(cont.cend(), 3);
+  EXPECT_EQ(std::prev(cont.end()), result);
+  EXPECT_EQ(3U, cont.size());
+  EXPECT_EQ(3, *result);
+
+  result = cont.insert(cont.cend(), 3);
+  EXPECT_EQ(std::prev(cont.end()), result);
+  EXPECT_EQ(3U, cont.size());
+  EXPECT_EQ(3, *result);
+}
+
+// iterator insert(const_iterator position_hint, value_type&& val)
+
+TEST(FlatTree, InsertPositionRValue) {
+  MoveOnlyTree cont;
+
+  MoveOnlyTree::iterator result = cont.insert(cont.cend(), MoveOnlyInt(2));
+  EXPECT_EQ(cont.begin(), result);
+  EXPECT_EQ(1U, cont.size());
+  EXPECT_EQ(2, result->data());
+
+  result = cont.insert(cont.cend(), MoveOnlyInt(1));
+  EXPECT_EQ(cont.begin(), result);
+  EXPECT_EQ(2U, cont.size());
+  EXPECT_EQ(1, result->data());
+
+  result = cont.insert(cont.cend(), MoveOnlyInt(3));
+  EXPECT_EQ(std::prev(cont.end()), result);
+  EXPECT_EQ(3U, cont.size());
+  EXPECT_EQ(3, result->data());
+
+  result = cont.insert(cont.cend(), MoveOnlyInt(3));
+  EXPECT_EQ(std::prev(cont.end()), result);
+  EXPECT_EQ(3U, cont.size());
+  EXPECT_EQ(3, result->data());
+}
+
+// template <class InputIterator>
+//   void insert(InputIterator first, InputIterator last);
+
+TEST(FlatTree, InsertIterIter) {
+  struct GetKeyFromIntIntPair {
+    const int& operator()(const std::pair<int, int>& p) const {
+      return p.first;
+    }
+  };
+
+  using IntIntMap =
+      flat_tree<int, IntPair, GetKeyFromIntIntPair, std::less<int>>;
+
+  {
+    IntIntMap cont;
+    IntPair int_pairs[] = {{3, 1}, {1, 1}, {4, 1}, {2, 1}};
+    cont.insert(std::begin(int_pairs), std::end(int_pairs));
+    EXPECT_THAT(cont, ElementsAre(IntPair(1, 1), IntPair(2, 1), IntPair(3, 1),
+                                  IntPair(4, 1)));
+  }
+
+  {
+    IntIntMap cont({{1, 1}, {2, 1}, {3, 1}, {4, 1}});
+    std::vector<IntPair> int_pairs;
+    cont.insert(std::begin(int_pairs), std::end(int_pairs));
+    EXPECT_THAT(cont, ElementsAre(IntPair(1, 1), IntPair(2, 1), IntPair(3, 1),
+                                  IntPair(4, 1)));
+  }
+
+  {
+    IntIntMap cont({{1, 1}, {2, 1}, {3, 1}, {4, 1}});
+    IntPair int_pairs[] = {{1, 1}};
+    cont.insert(std::begin(int_pairs), std::end(int_pairs));
+    EXPECT_THAT(cont, ElementsAre(IntPair(1, 1), IntPair(2, 1), IntPair(3, 1),
+                                  IntPair(4, 1)));
+  }
+
+  {
+    IntIntMap cont({{1, 1}, {2, 1}, {3, 1}, {4, 1}});
+    IntPair int_pairs[] = {{1, 2}};
+    cont.insert(std::begin(int_pairs), std::end(int_pairs), KEEP_LAST_OF_DUPES);
+    EXPECT_THAT(cont, ElementsAre(IntPair(1, 2), IntPair(2, 1), IntPair(3, 1),
+                                  IntPair(4, 1)));
+  }
+
+  {
+    IntIntMap cont({{1, 1}, {2, 1}, {3, 1}, {4, 1}});
+    IntPair int_pairs[] = {{5, 1}};
+    cont.insert(std::begin(int_pairs), std::end(int_pairs));
+    EXPECT_THAT(cont, ElementsAre(IntPair(1, 1), IntPair(2, 1), IntPair(3, 1),
+                                  IntPair(4, 1), IntPair(5, 1)));
+  }
+
+  {
+    IntIntMap cont({{1, 1}, {2, 1}, {3, 1}, {4, 1}});
+    IntPair int_pairs[] = {{5, 1}};
+    cont.insert(std::begin(int_pairs), std::end(int_pairs), KEEP_LAST_OF_DUPES);
+    EXPECT_THAT(cont, ElementsAre(IntPair(1, 1), IntPair(2, 1), IntPair(3, 1),
+                                  IntPair(4, 1), IntPair(5, 1)));
+  }
+
+  {
+    IntIntMap cont({{1, 1}, {2, 1}, {3, 1}, {4, 1}});
+    IntPair int_pairs[] = {{3, 2}, {1, 2}, {4, 2}, {2, 2}};
+    cont.insert(std::begin(int_pairs), std::end(int_pairs));
+    EXPECT_THAT(cont, ElementsAre(IntPair(1, 1), IntPair(2, 1), IntPair(3, 1),
+                                  IntPair(4, 1)));
+  }
+
+  {
+    IntIntMap cont({{1, 1}, {2, 1}, {3, 1}, {4, 1}});
+    IntPair int_pairs[] = {{3, 2}, {1, 2}, {4, 2}, {2, 2}};
+    cont.insert(std::begin(int_pairs), std::end(int_pairs), KEEP_LAST_OF_DUPES);
+    EXPECT_THAT(cont, ElementsAre(IntPair(1, 2), IntPair(2, 2), IntPair(3, 2),
+                                  IntPair(4, 2)));
+  }
+
+  {
+    IntIntMap cont({{1, 1}, {2, 1}, {3, 1}, {4, 1}});
+    IntPair int_pairs[] = {{3, 2}, {1, 2}, {4, 2}, {2, 2}, {7, 2}, {6, 2},
+                           {8, 2}, {5, 2}, {5, 3}, {6, 3}, {7, 3}, {8, 3}};
+    cont.insert(std::begin(int_pairs), std::end(int_pairs));
+    EXPECT_THAT(cont, ElementsAre(IntPair(1, 1), IntPair(2, 1), IntPair(3, 1),
+                                  IntPair(4, 1), IntPair(5, 2), IntPair(6, 2),
+                                  IntPair(7, 2), IntPair(8, 2)));
+  }
+
+  {
+    IntIntMap cont({{1, 1}, {2, 1}, {3, 1}, {4, 1}});
+    IntPair int_pairs[] = {{3, 2}, {1, 2}, {4, 2}, {2, 2}, {7, 2}, {6, 2},
+                           {8, 2}, {5, 2}, {5, 3}, {6, 3}, {7, 3}, {8, 3}};
+    cont.insert(std::begin(int_pairs), std::end(int_pairs), KEEP_LAST_OF_DUPES);
+    EXPECT_THAT(cont, ElementsAre(IntPair(1, 2), IntPair(2, 2), IntPair(3, 2),
+                                  IntPair(4, 2), IntPair(5, 3), IntPair(6, 3),
+                                  IntPair(7, 3), IntPair(8, 3)));
+  }
+}
+
+// template <class... Args>
+// pair<iterator, bool> emplace(Args&&... args)
+
+TEST(FlatTree, Emplace) {
+  {
+    EmplaceableTree cont;
+
+    std::pair<EmplaceableTree::iterator, bool> result = cont.emplace();
+    EXPECT_TRUE(result.second);
+    EXPECT_EQ(cont.begin(), result.first);
+    EXPECT_EQ(1U, cont.size());
+    EXPECT_EQ(Emplaceable(), *cont.begin());
+
+    result = cont.emplace(2, 3.5);
+    EXPECT_TRUE(result.second);
+    EXPECT_EQ(std::next(cont.begin()), result.first);
+    EXPECT_EQ(2U, cont.size());
+    EXPECT_EQ(Emplaceable(2, 3.5), *result.first);
+
+    result = cont.emplace(2, 3.5);
+    EXPECT_FALSE(result.second);
+    EXPECT_EQ(std::next(cont.begin()), result.first);
+    EXPECT_EQ(2U, cont.size());
+    EXPECT_EQ(Emplaceable(2, 3.5), *result.first);
+  }
+  {
+    IntTree cont;
+
+    std::pair<IntTree::iterator, bool> result = cont.emplace(2);
+    EXPECT_TRUE(result.second);
+    EXPECT_EQ(cont.begin(), result.first);
+    EXPECT_EQ(1U, cont.size());
+    EXPECT_EQ(2, *result.first);
+  }
+}
+
+// template <class... Args>
+// iterator emplace_hint(const_iterator position_hint, Args&&... args)
+
+TEST(FlatTree, EmplacePosition) {
+  {
+    EmplaceableTree cont;
+
+    EmplaceableTree::iterator result = cont.emplace_hint(cont.cend());
+    EXPECT_EQ(cont.begin(), result);
+    EXPECT_EQ(1U, cont.size());
+    EXPECT_EQ(Emplaceable(), *cont.begin());
+
+    result = cont.emplace_hint(cont.cend(), 2, 3.5);
+    EXPECT_EQ(std::next(cont.begin()), result);
+    EXPECT_EQ(2U, cont.size());
+    EXPECT_EQ(Emplaceable(2, 3.5), *result);
+
+    result = cont.emplace_hint(cont.cbegin(), 2, 3.5);
+    EXPECT_EQ(std::next(cont.begin()), result);
+    EXPECT_EQ(2U, cont.size());
+    EXPECT_EQ(Emplaceable(2, 3.5), *result);
+  }
+  {
+    IntTree cont;
+
+    IntTree::iterator result = cont.emplace_hint(cont.cend(), 2);
+    EXPECT_EQ(cont.begin(), result);
+    EXPECT_EQ(1U, cont.size());
+    EXPECT_EQ(2, *result);
+  }
+}
+
+// ----------------------------------------------------------------------------
+// Erase operations.
+
+// iterator erase(const_iterator position_hint)
+
+TEST(FlatTree, ErasePosition) {
+  {
+    IntTree cont({1, 2, 3, 4, 5, 6, 7, 8});
+
+    IntTree::iterator it = cont.erase(std::next(cont.cbegin(), 3));
+    EXPECT_EQ(std::next(cont.begin(), 3), it);
+    EXPECT_THAT(cont, ElementsAre(1, 2, 3, 5, 6, 7, 8));
+
+    it = cont.erase(std::next(cont.cbegin(), 0));
+    EXPECT_EQ(cont.begin(), it);
+    EXPECT_THAT(cont, ElementsAre(2, 3, 5, 6, 7, 8));
+
+    it = cont.erase(std::next(cont.cbegin(), 5));
+    EXPECT_EQ(cont.end(), it);
+    EXPECT_THAT(cont, ElementsAre(2, 3, 5, 6, 7));
+
+    it = cont.erase(std::next(cont.cbegin(), 1));
+    EXPECT_EQ(std::next(cont.begin()), it);
+    EXPECT_THAT(cont, ElementsAre(2, 5, 6, 7));
+
+    it = cont.erase(std::next(cont.cbegin(), 2));
+    EXPECT_EQ(std::next(cont.begin(), 2), it);
+    EXPECT_THAT(cont, ElementsAre(2, 5, 7));
+
+    it = cont.erase(std::next(cont.cbegin(), 2));
+    EXPECT_EQ(std::next(cont.begin(), 2), it);
+    EXPECT_THAT(cont, ElementsAre(2, 5));
+
+    it = cont.erase(std::next(cont.cbegin(), 0));
+    EXPECT_EQ(std::next(cont.begin(), 0), it);
+    EXPECT_THAT(cont, ElementsAre(5));
+
+    it = cont.erase(cont.cbegin());
+    EXPECT_EQ(cont.begin(), it);
+    EXPECT_EQ(cont.end(), it);
+  }
+  //  This is LWG #2059.
+  //  There is a potential ambiguity between erase with an iterator and erase
+  //  with a key, if key has a templated constructor.
+  {
+    using T = TemplateConstructor;
+
+    flat_tree<T, T, GetKeyFromValueIdentity<T>, std::less<>> cont;
+    T v(0);
+
+    auto it = cont.find(v);
+    if (it != cont.end())
+      cont.erase(it);
+  }
+}
+
+// iterator erase(const_iterator first, const_iterator last)
+
+TEST(FlatTree, EraseRange) {
+  IntTree cont({1, 2, 3, 4, 5, 6, 7, 8});
+
+  IntTree::iterator it =
+      cont.erase(std::next(cont.cbegin(), 5), std::next(cont.cbegin(), 5));
+  EXPECT_EQ(std::next(cont.begin(), 5), it);
+  EXPECT_THAT(cont, ElementsAre(1, 2, 3, 4, 5, 6, 7, 8));
+
+  it = cont.erase(std::next(cont.cbegin(), 3), std::next(cont.cbegin(), 4));
+  EXPECT_EQ(std::next(cont.begin(), 3), it);
+  EXPECT_THAT(cont, ElementsAre(1, 2, 3, 5, 6, 7, 8));
+
+  it = cont.erase(std::next(cont.cbegin(), 2), std::next(cont.cbegin(), 5));
+  EXPECT_EQ(std::next(cont.begin(), 2), it);
+  EXPECT_THAT(cont, ElementsAre(1, 2, 7, 8));
+
+  it = cont.erase(std::next(cont.cbegin(), 0), std::next(cont.cbegin(), 2));
+  EXPECT_EQ(std::next(cont.begin(), 0), it);
+  EXPECT_THAT(cont, ElementsAre(7, 8));
+
+  it = cont.erase(cont.cbegin(), cont.cend());
+  EXPECT_EQ(cont.begin(), it);
+  EXPECT_EQ(cont.end(), it);
+}
+
+// size_type erase(const key_type& key)
+
+TEST(FlatTree, EraseKey) {
+  IntTree cont({1, 2, 3, 4, 5, 6, 7, 8});
+
+  EXPECT_EQ(0U, cont.erase(9));
+  EXPECT_THAT(cont, ElementsAre(1, 2, 3, 4, 5, 6, 7, 8));
+
+  EXPECT_EQ(1U, cont.erase(4));
+  EXPECT_THAT(cont, ElementsAre(1, 2, 3, 5, 6, 7, 8));
+
+  EXPECT_EQ(1U, cont.erase(1));
+  EXPECT_THAT(cont, ElementsAre(2, 3, 5, 6, 7, 8));
+
+  EXPECT_EQ(1U, cont.erase(8));
+  EXPECT_THAT(cont, ElementsAre(2, 3, 5, 6, 7));
+
+  EXPECT_EQ(1U, cont.erase(3));
+  EXPECT_THAT(cont, ElementsAre(2, 5, 6, 7));
+
+  EXPECT_EQ(1U, cont.erase(6));
+  EXPECT_THAT(cont, ElementsAre(2, 5, 7));
+
+  EXPECT_EQ(1U, cont.erase(7));
+  EXPECT_THAT(cont, ElementsAre(2, 5));
+
+  EXPECT_EQ(1U, cont.erase(2));
+  EXPECT_THAT(cont, ElementsAre(5));
+
+  EXPECT_EQ(1U, cont.erase(5));
+  EXPECT_THAT(cont, ElementsAre());
+}
+
+// ----------------------------------------------------------------------------
+// Comparators.
+
+// key_compare key_comp() const
+
+TEST(FlatTree, KeyComp) {
+  ReversedTree cont({1, 2, 3, 4, 5});
+
+  EXPECT_TRUE(std::is_sorted(cont.begin(), cont.end(), cont.key_comp()));
+  int new_elements[] = {6, 7, 8, 9, 10};
+  std::copy(std::begin(new_elements), std::end(new_elements),
+            std::inserter(cont, cont.end()));
+  EXPECT_TRUE(std::is_sorted(cont.begin(), cont.end(), cont.key_comp()));
+}
+
+// value_compare value_comp() const
+
+TEST(FlatTree, ValueComp) {
+  ReversedTree cont({1, 2, 3, 4, 5});
+
+  EXPECT_TRUE(std::is_sorted(cont.begin(), cont.end(), cont.value_comp()));
+  int new_elements[] = {6, 7, 8, 9, 10};
+  std::copy(std::begin(new_elements), std::end(new_elements),
+            std::inserter(cont, cont.end()));
+  EXPECT_TRUE(std::is_sorted(cont.begin(), cont.end(), cont.value_comp()));
+}
+
+// ----------------------------------------------------------------------------
+// Search operations.
+
+// size_type count(const key_type& key) const
+
+TEST(FlatTree, Count) {
+  const IntTree cont({5, 6, 7, 8, 9, 10, 11, 12});
+
+  EXPECT_EQ(1U, cont.count(5));
+  EXPECT_EQ(1U, cont.count(6));
+  EXPECT_EQ(1U, cont.count(7));
+  EXPECT_EQ(1U, cont.count(8));
+  EXPECT_EQ(1U, cont.count(9));
+  EXPECT_EQ(1U, cont.count(10));
+  EXPECT_EQ(1U, cont.count(11));
+  EXPECT_EQ(1U, cont.count(12));
+  EXPECT_EQ(0U, cont.count(4));
+}
+
+// iterator find(const key_type& key)
+// const_iterator find(const key_type& key) const
+
+TEST(FlatTree, Find) {
+  {
+    IntTree cont({5, 6, 7, 8, 9, 10, 11, 12});
+
+    EXPECT_EQ(cont.begin(), cont.find(5));
+    EXPECT_EQ(std::next(cont.begin()), cont.find(6));
+    EXPECT_EQ(std::next(cont.begin(), 2), cont.find(7));
+    EXPECT_EQ(std::next(cont.begin(), 3), cont.find(8));
+    EXPECT_EQ(std::next(cont.begin(), 4), cont.find(9));
+    EXPECT_EQ(std::next(cont.begin(), 5), cont.find(10));
+    EXPECT_EQ(std::next(cont.begin(), 6), cont.find(11));
+    EXPECT_EQ(std::next(cont.begin(), 7), cont.find(12));
+    EXPECT_EQ(std::next(cont.begin(), 8), cont.find(4));
+  }
+  {
+    const IntTree cont({5, 6, 7, 8, 9, 10, 11, 12});
+
+    EXPECT_EQ(cont.begin(), cont.find(5));
+    EXPECT_EQ(std::next(cont.begin()), cont.find(6));
+    EXPECT_EQ(std::next(cont.begin(), 2), cont.find(7));
+    EXPECT_EQ(std::next(cont.begin(), 3), cont.find(8));
+    EXPECT_EQ(std::next(cont.begin(), 4), cont.find(9));
+    EXPECT_EQ(std::next(cont.begin(), 5), cont.find(10));
+    EXPECT_EQ(std::next(cont.begin(), 6), cont.find(11));
+    EXPECT_EQ(std::next(cont.begin(), 7), cont.find(12));
+    EXPECT_EQ(std::next(cont.begin(), 8), cont.find(4));
+  }
+}
+
+// pair<iterator, iterator> equal_range(const key_type& key)
+// pair<const_iterator, const_iterator> equal_range(const key_type& key) const
+
+TEST(FlatTree, EqualRange) {
+  {
+    IntTree cont({5, 7, 9, 11, 13, 15, 17, 19});
+
+    std::pair<IntTree::iterator, IntTree::iterator> result =
+        cont.equal_range(5);
+    EXPECT_EQ(std::next(cont.begin(), 0), result.first);
+    EXPECT_EQ(std::next(cont.begin(), 1), result.second);
+    result = cont.equal_range(7);
+    EXPECT_EQ(std::next(cont.begin(), 1), result.first);
+    EXPECT_EQ(std::next(cont.begin(), 2), result.second);
+    result = cont.equal_range(9);
+    EXPECT_EQ(std::next(cont.begin(), 2), result.first);
+    EXPECT_EQ(std::next(cont.begin(), 3), result.second);
+    result = cont.equal_range(11);
+    EXPECT_EQ(std::next(cont.begin(), 3), result.first);
+    EXPECT_EQ(std::next(cont.begin(), 4), result.second);
+    result = cont.equal_range(13);
+    EXPECT_EQ(std::next(cont.begin(), 4), result.first);
+    EXPECT_EQ(std::next(cont.begin(), 5), result.second);
+    result = cont.equal_range(15);
+    EXPECT_EQ(std::next(cont.begin(), 5), result.first);
+    EXPECT_EQ(std::next(cont.begin(), 6), result.second);
+    result = cont.equal_range(17);
+    EXPECT_EQ(std::next(cont.begin(), 6), result.first);
+    EXPECT_EQ(std::next(cont.begin(), 7), result.second);
+    result = cont.equal_range(19);
+    EXPECT_EQ(std::next(cont.begin(), 7), result.first);
+    EXPECT_EQ(std::next(cont.begin(), 8), result.second);
+    result = cont.equal_range(4);
+    EXPECT_EQ(std::next(cont.begin(), 0), result.first);
+    EXPECT_EQ(std::next(cont.begin(), 0), result.second);
+    result = cont.equal_range(6);
+    EXPECT_EQ(std::next(cont.begin(), 1), result.first);
+    EXPECT_EQ(std::next(cont.begin(), 1), result.second);
+    result = cont.equal_range(8);
+    EXPECT_EQ(std::next(cont.begin(), 2), result.first);
+    EXPECT_EQ(std::next(cont.begin(), 2), result.second);
+    result = cont.equal_range(10);
+    EXPECT_EQ(std::next(cont.begin(), 3), result.first);
+    EXPECT_EQ(std::next(cont.begin(), 3), result.second);
+    result = cont.equal_range(12);
+    EXPECT_EQ(std::next(cont.begin(), 4), result.first);
+    EXPECT_EQ(std::next(cont.begin(), 4), result.second);
+    result = cont.equal_range(14);
+    EXPECT_EQ(std::next(cont.begin(), 5), result.first);
+    EXPECT_EQ(std::next(cont.begin(), 5), result.second);
+    result = cont.equal_range(16);
+    EXPECT_EQ(std::next(cont.begin(), 6), result.first);
+    EXPECT_EQ(std::next(cont.begin(), 6), result.second);
+    result = cont.equal_range(18);
+    EXPECT_EQ(std::next(cont.begin(), 7), result.first);
+    EXPECT_EQ(std::next(cont.begin(), 7), result.second);
+    result = cont.equal_range(20);
+    EXPECT_EQ(std::next(cont.begin(), 8), result.first);
+    EXPECT_EQ(std::next(cont.begin(), 8), result.second);
+  }
+  {
+    const IntTree cont({5, 7, 9, 11, 13, 15, 17, 19});
+
+    std::pair<IntTree::const_iterator, IntTree::const_iterator> result =
+        cont.equal_range(5);
+    EXPECT_EQ(std::next(cont.begin(), 0), result.first);
+    EXPECT_EQ(std::next(cont.begin(), 1), result.second);
+    result = cont.equal_range(7);
+    EXPECT_EQ(std::next(cont.begin(), 1), result.first);
+    EXPECT_EQ(std::next(cont.begin(), 2), result.second);
+    result = cont.equal_range(9);
+    EXPECT_EQ(std::next(cont.begin(), 2), result.first);
+    EXPECT_EQ(std::next(cont.begin(), 3), result.second);
+    result = cont.equal_range(11);
+    EXPECT_EQ(std::next(cont.begin(), 3), result.first);
+    EXPECT_EQ(std::next(cont.begin(), 4), result.second);
+    result = cont.equal_range(13);
+    EXPECT_EQ(std::next(cont.begin(), 4), result.first);
+    EXPECT_EQ(std::next(cont.begin(), 5), result.second);
+    result = cont.equal_range(15);
+    EXPECT_EQ(std::next(cont.begin(), 5), result.first);
+    EXPECT_EQ(std::next(cont.begin(), 6), result.second);
+    result = cont.equal_range(17);
+    EXPECT_EQ(std::next(cont.begin(), 6), result.first);
+    EXPECT_EQ(std::next(cont.begin(), 7), result.second);
+    result = cont.equal_range(19);
+    EXPECT_EQ(std::next(cont.begin(), 7), result.first);
+    EXPECT_EQ(std::next(cont.begin(), 8), result.second);
+    result = cont.equal_range(4);
+    EXPECT_EQ(std::next(cont.begin(), 0), result.first);
+    EXPECT_EQ(std::next(cont.begin(), 0), result.second);
+    result = cont.equal_range(6);
+    EXPECT_EQ(std::next(cont.begin(), 1), result.first);
+    EXPECT_EQ(std::next(cont.begin(), 1), result.second);
+    result = cont.equal_range(8);
+    EXPECT_EQ(std::next(cont.begin(), 2), result.first);
+    EXPECT_EQ(std::next(cont.begin(), 2), result.second);
+    result = cont.equal_range(10);
+    EXPECT_EQ(std::next(cont.begin(), 3), result.first);
+    EXPECT_EQ(std::next(cont.begin(), 3), result.second);
+    result = cont.equal_range(12);
+    EXPECT_EQ(std::next(cont.begin(), 4), result.first);
+    EXPECT_EQ(std::next(cont.begin(), 4), result.second);
+    result = cont.equal_range(14);
+    EXPECT_EQ(std::next(cont.begin(), 5), result.first);
+    EXPECT_EQ(std::next(cont.begin(), 5), result.second);
+    result = cont.equal_range(16);
+    EXPECT_EQ(std::next(cont.begin(), 6), result.first);
+    EXPECT_EQ(std::next(cont.begin(), 6), result.second);
+    result = cont.equal_range(18);
+    EXPECT_EQ(std::next(cont.begin(), 7), result.first);
+    EXPECT_EQ(std::next(cont.begin(), 7), result.second);
+    result = cont.equal_range(20);
+    EXPECT_EQ(std::next(cont.begin(), 8), result.first);
+    EXPECT_EQ(std::next(cont.begin(), 8), result.second);
+  }
+}
+
+//       iterator lower_bound(const key_type& key);
+// const_iterator lower_bound(const key_type& key) const;
+
+TEST(FlatTree, LowerBound) {
+  {
+    IntTree cont({5, 7, 9, 11, 13, 15, 17, 19});
+
+    EXPECT_EQ(cont.begin(), cont.lower_bound(5));
+    EXPECT_EQ(std::next(cont.begin()), cont.lower_bound(7));
+    EXPECT_EQ(std::next(cont.begin(), 2), cont.lower_bound(9));
+    EXPECT_EQ(std::next(cont.begin(), 3), cont.lower_bound(11));
+    EXPECT_EQ(std::next(cont.begin(), 4), cont.lower_bound(13));
+    EXPECT_EQ(std::next(cont.begin(), 5), cont.lower_bound(15));
+    EXPECT_EQ(std::next(cont.begin(), 6), cont.lower_bound(17));
+    EXPECT_EQ(std::next(cont.begin(), 7), cont.lower_bound(19));
+    EXPECT_EQ(std::next(cont.begin(), 0), cont.lower_bound(4));
+    EXPECT_EQ(std::next(cont.begin(), 1), cont.lower_bound(6));
+    EXPECT_EQ(std::next(cont.begin(), 2), cont.lower_bound(8));
+    EXPECT_EQ(std::next(cont.begin(), 3), cont.lower_bound(10));
+    EXPECT_EQ(std::next(cont.begin(), 4), cont.lower_bound(12));
+    EXPECT_EQ(std::next(cont.begin(), 5), cont.lower_bound(14));
+    EXPECT_EQ(std::next(cont.begin(), 6), cont.lower_bound(16));
+    EXPECT_EQ(std::next(cont.begin(), 7), cont.lower_bound(18));
+    EXPECT_EQ(std::next(cont.begin(), 8), cont.lower_bound(20));
+  }
+  {
+    const IntTree cont({5, 7, 9, 11, 13, 15, 17, 19});
+
+    EXPECT_EQ(cont.begin(), cont.lower_bound(5));
+    EXPECT_EQ(std::next(cont.begin()), cont.lower_bound(7));
+    EXPECT_EQ(std::next(cont.begin(), 2), cont.lower_bound(9));
+    EXPECT_EQ(std::next(cont.begin(), 3), cont.lower_bound(11));
+    EXPECT_EQ(std::next(cont.begin(), 4), cont.lower_bound(13));
+    EXPECT_EQ(std::next(cont.begin(), 5), cont.lower_bound(15));
+    EXPECT_EQ(std::next(cont.begin(), 6), cont.lower_bound(17));
+    EXPECT_EQ(std::next(cont.begin(), 7), cont.lower_bound(19));
+    EXPECT_EQ(std::next(cont.begin(), 0), cont.lower_bound(4));
+    EXPECT_EQ(std::next(cont.begin(), 1), cont.lower_bound(6));
+    EXPECT_EQ(std::next(cont.begin(), 2), cont.lower_bound(8));
+    EXPECT_EQ(std::next(cont.begin(), 3), cont.lower_bound(10));
+    EXPECT_EQ(std::next(cont.begin(), 4), cont.lower_bound(12));
+    EXPECT_EQ(std::next(cont.begin(), 5), cont.lower_bound(14));
+    EXPECT_EQ(std::next(cont.begin(), 6), cont.lower_bound(16));
+    EXPECT_EQ(std::next(cont.begin(), 7), cont.lower_bound(18));
+    EXPECT_EQ(std::next(cont.begin(), 8), cont.lower_bound(20));
+  }
+}
+
+// iterator upper_bound(const key_type& key)
+// const_iterator upper_bound(const key_type& key) const
+
+TEST(FlatTree, UpperBound) {
+  {
+    IntTree cont({5, 7, 9, 11, 13, 15, 17, 19});
+
+    EXPECT_EQ(std::next(cont.begin(), 1), cont.upper_bound(5));
+    EXPECT_EQ(std::next(cont.begin(), 2), cont.upper_bound(7));
+    EXPECT_EQ(std::next(cont.begin(), 3), cont.upper_bound(9));
+    EXPECT_EQ(std::next(cont.begin(), 4), cont.upper_bound(11));
+    EXPECT_EQ(std::next(cont.begin(), 5), cont.upper_bound(13));
+    EXPECT_EQ(std::next(cont.begin(), 6), cont.upper_bound(15));
+    EXPECT_EQ(std::next(cont.begin(), 7), cont.upper_bound(17));
+    EXPECT_EQ(std::next(cont.begin(), 8), cont.upper_bound(19));
+    EXPECT_EQ(std::next(cont.begin(), 0), cont.upper_bound(4));
+    EXPECT_EQ(std::next(cont.begin(), 1), cont.upper_bound(6));
+    EXPECT_EQ(std::next(cont.begin(), 2), cont.upper_bound(8));
+    EXPECT_EQ(std::next(cont.begin(), 3), cont.upper_bound(10));
+    EXPECT_EQ(std::next(cont.begin(), 4), cont.upper_bound(12));
+    EXPECT_EQ(std::next(cont.begin(), 5), cont.upper_bound(14));
+    EXPECT_EQ(std::next(cont.begin(), 6), cont.upper_bound(16));
+    EXPECT_EQ(std::next(cont.begin(), 7), cont.upper_bound(18));
+    EXPECT_EQ(std::next(cont.begin(), 8), cont.upper_bound(20));
+  }
+  {
+    const IntTree cont({5, 7, 9, 11, 13, 15, 17, 19});
+
+    EXPECT_EQ(std::next(cont.begin(), 1), cont.upper_bound(5));
+    EXPECT_EQ(std::next(cont.begin(), 2), cont.upper_bound(7));
+    EXPECT_EQ(std::next(cont.begin(), 3), cont.upper_bound(9));
+    EXPECT_EQ(std::next(cont.begin(), 4), cont.upper_bound(11));
+    EXPECT_EQ(std::next(cont.begin(), 5), cont.upper_bound(13));
+    EXPECT_EQ(std::next(cont.begin(), 6), cont.upper_bound(15));
+    EXPECT_EQ(std::next(cont.begin(), 7), cont.upper_bound(17));
+    EXPECT_EQ(std::next(cont.begin(), 8), cont.upper_bound(19));
+    EXPECT_EQ(std::next(cont.begin(), 0), cont.upper_bound(4));
+    EXPECT_EQ(std::next(cont.begin(), 1), cont.upper_bound(6));
+    EXPECT_EQ(std::next(cont.begin(), 2), cont.upper_bound(8));
+    EXPECT_EQ(std::next(cont.begin(), 3), cont.upper_bound(10));
+    EXPECT_EQ(std::next(cont.begin(), 4), cont.upper_bound(12));
+    EXPECT_EQ(std::next(cont.begin(), 5), cont.upper_bound(14));
+    EXPECT_EQ(std::next(cont.begin(), 6), cont.upper_bound(16));
+    EXPECT_EQ(std::next(cont.begin(), 7), cont.upper_bound(18));
+    EXPECT_EQ(std::next(cont.begin(), 8), cont.upper_bound(20));
+  }
+}
+
+// ----------------------------------------------------------------------------
+// General operations.
+
+// void swap(flat_tree& other)
+// void swap(flat_tree& lhs, flat_tree& rhs)
+
+TEST(FlatTreeOurs, Swap) {
+  IntTree x({1, 2, 3});
+  IntTree y({4});
+  swap(x, y);
+  EXPECT_THAT(x, ElementsAre(4));
+  EXPECT_THAT(y, ElementsAre(1, 2, 3));
+
+  y.swap(x);
+  EXPECT_THAT(x, ElementsAre(1, 2, 3));
+  EXPECT_THAT(y, ElementsAre(4));
+}
+
+// bool operator==(const flat_tree& lhs, const flat_tree& rhs)
+// bool operator!=(const flat_tree& lhs, const flat_tree& rhs)
+// bool operator<(const flat_tree& lhs, const flat_tree& rhs)
+// bool operator>(const flat_tree& lhs, const flat_tree& rhs)
+// bool operator<=(const flat_tree& lhs, const flat_tree& rhs)
+// bool operator>=(const flat_tree& lhs, const flat_tree& rhs)
+
+TEST(FlatTree, Comparison) {
+  // Provided comparator does not participate in comparison.
+  ReversedTree biggest({3});
+  ReversedTree smallest({1});
+  ReversedTree middle({1, 2});
+
+  EXPECT_EQ(biggest, biggest);
+  EXPECT_NE(biggest, smallest);
+  EXPECT_LT(smallest, middle);
+  EXPECT_LE(smallest, middle);
+  EXPECT_LE(middle, middle);
+  EXPECT_GT(biggest, middle);
+  EXPECT_GE(biggest, middle);
+  EXPECT_GE(biggest, biggest);
+}
+
+TEST(FlatSet, EraseIf) {
+  IntTree x;
+  EraseIf(x, [](int) { return false; });
+  EXPECT_THAT(x, ElementsAre());
+
+  x = {1, 2, 3};
+  EraseIf(x, [](int elem) { return !(elem & 1); });
+  EXPECT_THAT(x, ElementsAre(1, 3));
+
+  x = {1, 2, 3, 4};
+  EraseIf(x, [](int elem) { return elem & 1; });
+  EXPECT_THAT(x, ElementsAre(2, 4));
+}
+
+}  // namespace internal
+}  // namespace base
diff --git a/base/containers/hash_tables.h b/base/containers/hash_tables.h
new file mode 100644
index 0000000..8da7b67
--- /dev/null
+++ b/base/containers/hash_tables.h
@@ -0,0 +1,75 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_CONTAINERS_HASH_TABLES_H_
+#define BASE_CONTAINERS_HASH_TABLES_H_
+
+#include <cstddef>
+#include <unordered_map>
+#include <unordered_set>
+#include <utility>
+
+#include "base/hash.h"
+
+// This header file is deprecated. Use the corresponding C++11 type
+// instead. https://crbug.com/576864
+
+// Use a custom hasher instead.
+#define BASE_HASH_NAMESPACE base_hash
+
+namespace BASE_HASH_NAMESPACE {
+
+// A separate hasher which, by default, forwards to std::hash. This is so legacy
+// uses of BASE_HASH_NAMESPACE with base::hash_map do not interfere with
+// std::hash mid-transition.
+template<typename T>
+struct hash {
+  std::size_t operator()(const T& value) const { return std::hash<T>()(value); }
+};
+
+// Use base::IntPairHash from base/hash.h as a custom hasher instead.
+template <typename Type1, typename Type2>
+struct hash<std::pair<Type1, Type2>> {
+  std::size_t operator()(std::pair<Type1, Type2> value) const {
+    return base::HashInts(value.first, value.second);
+  }
+};
+
+}  // namespace BASE_HASH_NAMESPACE
+
+namespace base {
+
+// Use std::unordered_map instead.
+template <class Key,
+          class T,
+          class Hash = BASE_HASH_NAMESPACE::hash<Key>,
+          class Pred = std::equal_to<Key>,
+          class Alloc = std::allocator<std::pair<const Key, T>>>
+using hash_map = std::unordered_map<Key, T, Hash, Pred, Alloc>;
+
+// Use std::unordered_multimap instead.
+template <class Key,
+          class T,
+          class Hash = BASE_HASH_NAMESPACE::hash<Key>,
+          class Pred = std::equal_to<Key>,
+          class Alloc = std::allocator<std::pair<const Key, T>>>
+using hash_multimap = std::unordered_multimap<Key, T, Hash, Pred, Alloc>;
+
+// Use std::unordered_multiset instead.
+template <class Key,
+          class Hash = BASE_HASH_NAMESPACE::hash<Key>,
+          class Pred = std::equal_to<Key>,
+          class Alloc = std::allocator<Key>>
+using hash_multiset = std::unordered_multiset<Key, Hash, Pred, Alloc>;
+
+// Use std::unordered_set instead.
+template <class Key,
+          class Hash = BASE_HASH_NAMESPACE::hash<Key>,
+          class Pred = std::equal_to<Key>,
+          class Alloc = std::allocator<Key>>
+using hash_set = std::unordered_set<Key, Hash, Pred, Alloc>;
+
+}  // namespace base
+
+#endif  // BASE_CONTAINERS_HASH_TABLES_H_
diff --git a/base/containers/hash_tables_unittest.cc b/base/containers/hash_tables_unittest.cc
new file mode 100644
index 0000000..6072e5d
--- /dev/null
+++ b/base/containers/hash_tables_unittest.cc
@@ -0,0 +1,67 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/containers/hash_tables.h"
+
+#include <stdint.h>
+#include <string>
+
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace {
+
+class HashPairTest : public testing::Test {
+};
+
+#define INSERT_PAIR_TEST(Type, value1, value2) \
+  { \
+    Type pair(value1, value2); \
+    base::hash_map<Type, int> map; \
+    map[pair] = 1; \
+  }
+
+// Verify that a hash_map can be constructed for pairs of integers of various
+// sizes.
+TEST_F(HashPairTest, IntegerPairs) {
+  typedef std::pair<int16_t, int16_t> Int16Int16Pair;
+  typedef std::pair<int16_t, int32_t> Int16Int32Pair;
+  typedef std::pair<int16_t, int64_t> Int16Int64Pair;
+
+  INSERT_PAIR_TEST(Int16Int16Pair, 4, 6);
+  INSERT_PAIR_TEST(Int16Int32Pair, 9, (1 << 29) + 378128932);
+  INSERT_PAIR_TEST(Int16Int64Pair, 10,
+                   (INT64_C(1) << 60) + INT64_C(78931732321));
+
+  typedef std::pair<int32_t, int16_t> Int32Int16Pair;
+  typedef std::pair<int32_t, int32_t> Int32Int32Pair;
+  typedef std::pair<int32_t, int64_t> Int32Int64Pair;
+
+  INSERT_PAIR_TEST(Int32Int16Pair, 4, 6);
+  INSERT_PAIR_TEST(Int32Int32Pair, 9, (1 << 29) + 378128932);
+  INSERT_PAIR_TEST(Int32Int64Pair, 10,
+                   (INT64_C(1) << 60) + INT64_C(78931732321));
+
+  typedef std::pair<int64_t, int16_t> Int64Int16Pair;
+  typedef std::pair<int64_t, int32_t> Int64Int32Pair;
+  typedef std::pair<int64_t, int64_t> Int64Int64Pair;
+
+  INSERT_PAIR_TEST(Int64Int16Pair, 4, 6);
+  INSERT_PAIR_TEST(Int64Int32Pair, 9, (1 << 29) + 378128932);
+  INSERT_PAIR_TEST(Int64Int64Pair, 10,
+                   (INT64_C(1) << 60) + INT64_C(78931732321));
+}
+
+// Verify that base::hash_set<const char*> compares by pointer value, not as C
+// strings.
+TEST(HashTableTest, CharPointers) {
+  std::string str1("hello");
+  std::string str2("hello");
+  base::hash_set<const char*> set;
+
+  set.insert(str1.c_str());
+  EXPECT_EQ(1u, set.count(str1.c_str()));
+  EXPECT_EQ(0u, set.count(str2.c_str()));
+}
+
+}  // namespace
diff --git a/base/containers/id_map.h b/base/containers/id_map.h
new file mode 100644
index 0000000..4c816da
--- /dev/null
+++ b/base/containers/id_map.h
@@ -0,0 +1,290 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_CONTAINERS_ID_MAP_H_
+#define BASE_CONTAINERS_ID_MAP_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <memory>
+#include <set>
+#include <type_traits>
+#include <unordered_map>
+#include <utility>
+
+#include "base/containers/flat_set.h"
+#include "base/logging.h"
+#include "base/macros.h"
+#include "base/sequence_checker.h"
+
+namespace base {
+
+// This object maintains a list of IDs that can be quickly converted to
+// pointers to objects. It is implemented as a hash table, optimized for
+// relatively small data sets (in the common case, there will be exactly one
+// item in the list).
+//
+// Items can be inserted into the container with arbitrary ID, but the caller
+// must ensure they are unique. Inserting IDs and relying on automatically
+// generated ones is not allowed because they can collide.
+
+// The map's value type (the V param) can be any dereferenceable type, such as a
+// raw pointer or smart pointer
+template <typename V, typename K = int32_t>
+class IDMap final {
+ public:
+  using KeyType = K;
+
+ private:
+  using T = typename std::remove_reference<decltype(*V())>::type;
+
+  using HashTable = std::unordered_map<KeyType, V>;
+
+ public:
+  IDMap() : iteration_depth_(0), next_id_(1), check_on_null_data_(false) {
+    // A number of consumers of IDMap create it on one thread but always
+    // access it from a different, but consistent, thread (or sequence)
+    // post-construction. The first call to CalledOnValidSequence() will re-bind
+    // it.
+    DETACH_FROM_SEQUENCE(sequence_checker_);
+  }
+
+  ~IDMap() {
+    // Many IDMap's are static, and hence will be destroyed on the main
+    // thread. However, all the accesses may take place on another thread (or
+    // sequence), such as the IO thread. Detaching again to clean this up.
+    DETACH_FROM_SEQUENCE(sequence_checker_);
+  }
+
+  // Sets whether Add and Replace should DCHECK if passed in NULL data.
+  // Default is false.
+  void set_check_on_null_data(bool value) { check_on_null_data_ = value; }
+
+  // Adds a view with an automatically generated unique ID. See AddWithID.
+  KeyType Add(V data) { return AddInternal(std::move(data)); }
+
+  // Adds a new data member with the specified ID. The ID must not be in
+  // the list. The caller either must generate all unique IDs itself and use
+  // this function, or allow this object to generate IDs and call Add. These
+  // two methods may not be mixed, or duplicate IDs may be generated.
+  void AddWithID(V data, KeyType id) { AddWithIDInternal(std::move(data), id); }
+
+  void Remove(KeyType id) {
+    DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
+    typename HashTable::iterator i = data_.find(id);
+    if (i == data_.end() || IsRemoved(id)) {
+      NOTREACHED() << "Attempting to remove an item not in the list";
+      return;
+    }
+
+    if (iteration_depth_ == 0) {
+      data_.erase(i);
+    } else {
+      removed_ids_.insert(id);
+    }
+  }
+
+  // Replaces the value for |id| with |new_data| and returns the existing value.
+  // Should only be called with an already added id.
+  V Replace(KeyType id, V new_data) {
+    DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
+    DCHECK(!check_on_null_data_ || new_data);
+    typename HashTable::iterator i = data_.find(id);
+    DCHECK(i != data_.end());
+    DCHECK(!IsRemoved(id));
+
+    using std::swap;
+    swap(i->second, new_data);
+    return new_data;
+  }
+
+  void Clear() {
+    DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
+    if (iteration_depth_ == 0) {
+      data_.clear();
+    } else {
+      removed_ids_.reserve(data_.size());
+      removed_ids_.insert(KeyIterator(data_.begin()), KeyIterator(data_.end()));
+    }
+  }
+
+  bool IsEmpty() const {
+    DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
+    return size() == 0u;
+  }
+
+  T* Lookup(KeyType id) const {
+    DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
+    typename HashTable::const_iterator i = data_.find(id);
+    if (i == data_.end() || !i->second || IsRemoved(id))
+      return nullptr;
+    return &*i->second;
+  }
+
+  size_t size() const {
+    DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
+    return data_.size() - removed_ids_.size();
+  }
+
+#if defined(UNIT_TEST)
+  int iteration_depth() const {
+    return iteration_depth_;
+  }
+#endif  // defined(UNIT_TEST)
+
+  // It is safe to remove elements from the map during iteration. All iterators
+  // will remain valid.
+  template<class ReturnType>
+  class Iterator {
+   public:
+    Iterator(IDMap<V, K>* map) : map_(map), iter_(map_->data_.begin()) {
+      Init();
+    }
+
+    Iterator(const Iterator& iter)
+        : map_(iter.map_),
+          iter_(iter.iter_) {
+      Init();
+    }
+
+    const Iterator& operator=(const Iterator& iter) {
+      map_ = iter.map;
+      iter_ = iter.iter;
+      Init();
+      return *this;
+    }
+
+    ~Iterator() {
+      DCHECK_CALLED_ON_VALID_SEQUENCE(map_->sequence_checker_);
+
+      // We're going to decrement iteration depth. Make sure it's greater than
+      // zero so that it doesn't become negative.
+      DCHECK_LT(0, map_->iteration_depth_);
+
+      if (--map_->iteration_depth_ == 0)
+        map_->Compact();
+    }
+
+    bool IsAtEnd() const {
+      DCHECK_CALLED_ON_VALID_SEQUENCE(map_->sequence_checker_);
+      return iter_ == map_->data_.end();
+    }
+
+    KeyType GetCurrentKey() const {
+      DCHECK_CALLED_ON_VALID_SEQUENCE(map_->sequence_checker_);
+      return iter_->first;
+    }
+
+    ReturnType* GetCurrentValue() const {
+      DCHECK_CALLED_ON_VALID_SEQUENCE(map_->sequence_checker_);
+      if (!iter_->second || map_->IsRemoved(iter_->first))
+        return nullptr;
+      return &*iter_->second;
+    }
+
+    void Advance() {
+      DCHECK_CALLED_ON_VALID_SEQUENCE(map_->sequence_checker_);
+      ++iter_;
+      SkipRemovedEntries();
+    }
+
+   private:
+    void Init() {
+      DCHECK_CALLED_ON_VALID_SEQUENCE(map_->sequence_checker_);
+      ++map_->iteration_depth_;
+      SkipRemovedEntries();
+    }
+
+    void SkipRemovedEntries() {
+      while (iter_ != map_->data_.end() && map_->IsRemoved(iter_->first))
+        ++iter_;
+    }
+
+    IDMap<V, K>* map_;
+    typename HashTable::const_iterator iter_;
+  };
+
+  typedef Iterator<T> iterator;
+  typedef Iterator<const T> const_iterator;
+
+ private:
+  // Transforms a map iterator to an iterator on the keys of the map.
+  // Used by Clear() to populate |removed_ids_| in bulk.
+  struct KeyIterator : std::iterator<std::forward_iterator_tag, KeyType> {
+    using inner_iterator = typename HashTable::iterator;
+    inner_iterator iter_;
+
+    KeyIterator(inner_iterator iter) : iter_(iter) {}
+    KeyType operator*() const { return iter_->first; }
+    KeyIterator& operator++() {
+      ++iter_;
+      return *this;
+    }
+    KeyIterator operator++(int) { return KeyIterator(iter_++); }
+    bool operator==(const KeyIterator& other) const {
+      return iter_ == other.iter_;
+    }
+    bool operator!=(const KeyIterator& other) const {
+      return iter_ != other.iter_;
+    }
+  };
+
+  KeyType AddInternal(V data) {
+    DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
+    DCHECK(!check_on_null_data_ || data);
+    KeyType this_id = next_id_;
+    DCHECK(data_.find(this_id) == data_.end()) << "Inserting duplicate item";
+    data_[this_id] = std::move(data);
+    next_id_++;
+    return this_id;
+  }
+
+  void AddWithIDInternal(V data, KeyType id) {
+    DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
+    DCHECK(!check_on_null_data_ || data);
+    if (IsRemoved(id)) {
+      removed_ids_.erase(id);
+    } else {
+      DCHECK(data_.find(id) == data_.end()) << "Inserting duplicate item";
+    }
+    data_[id] = std::move(data);
+  }
+
+  bool IsRemoved(KeyType key) const {
+    return removed_ids_.find(key) != removed_ids_.end();
+  }
+
+  void Compact() {
+    DCHECK_EQ(0, iteration_depth_);
+    for (const auto& i : removed_ids_)
+      data_.erase(i);
+    removed_ids_.clear();
+  }
+
+  // Keep track of how many iterators are currently iterating on us to safely
+  // handle removing items during iteration.
+  int iteration_depth_;
+
+  // Keep set of IDs that should be removed after the outermost iteration has
+  // finished. This way we manage to not invalidate the iterator when an element
+  // is removed.
+  base::flat_set<KeyType> removed_ids_;
+
+  // The next ID that we will return from Add()
+  KeyType next_id_;
+
+  HashTable data_;
+
+  // See description above setter.
+  bool check_on_null_data_;
+
+  SEQUENCE_CHECKER(sequence_checker_);
+
+  DISALLOW_COPY_AND_ASSIGN(IDMap);
+};
+
+}  // namespace base
+
+#endif  // BASE_CONTAINERS_ID_MAP_H_
diff --git a/base/containers/id_map_unittest.cc b/base/containers/id_map_unittest.cc
new file mode 100644
index 0000000..346b69f
--- /dev/null
+++ b/base/containers/id_map_unittest.cc
@@ -0,0 +1,399 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/containers/id_map.h"
+
+#include <stdint.h>
+
+#include <memory>
+
+#include "base/test/gtest_util.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+
+namespace {
+
+class TestObject {
+};
+
+class DestructorCounter {
+ public:
+  explicit DestructorCounter(int* counter) : counter_(counter) {}
+  ~DestructorCounter() { ++(*counter_); }
+
+ private:
+  int* counter_;
+};
+
+}  // namespace
+
+TEST(IDMapTest, Basic) {
+  IDMap<TestObject*> map;
+  EXPECT_TRUE(map.IsEmpty());
+  EXPECT_EQ(0U, map.size());
+
+  TestObject obj1;
+  TestObject obj2;
+
+  int32_t id1 = map.Add(&obj1);
+  EXPECT_FALSE(map.IsEmpty());
+  EXPECT_EQ(1U, map.size());
+  EXPECT_EQ(&obj1, map.Lookup(id1));
+
+  int32_t id2 = map.Add(&obj2);
+  EXPECT_FALSE(map.IsEmpty());
+  EXPECT_EQ(2U, map.size());
+
+  EXPECT_EQ(&obj1, map.Lookup(id1));
+  EXPECT_EQ(&obj2, map.Lookup(id2));
+
+  map.Remove(id1);
+  EXPECT_FALSE(map.IsEmpty());
+  EXPECT_EQ(1U, map.size());
+
+  map.Remove(id2);
+  EXPECT_TRUE(map.IsEmpty());
+  EXPECT_EQ(0U, map.size());
+
+  map.AddWithID(&obj1, 1);
+  map.AddWithID(&obj2, 2);
+  EXPECT_EQ(&obj1, map.Lookup(1));
+  EXPECT_EQ(&obj2, map.Lookup(2));
+
+  EXPECT_EQ(&obj2, map.Replace(2, &obj1));
+  EXPECT_EQ(&obj1, map.Lookup(2));
+
+  EXPECT_EQ(0, map.iteration_depth());
+}
+
+TEST(IDMapTest, IteratorRemainsValidWhenRemovingCurrentElement) {
+  IDMap<TestObject*> map;
+
+  TestObject obj1;
+  TestObject obj2;
+  TestObject obj3;
+
+  map.Add(&obj1);
+  map.Add(&obj2);
+  map.Add(&obj3);
+
+  {
+    IDMap<TestObject*>::const_iterator iter(&map);
+
+    EXPECT_EQ(1, map.iteration_depth());
+
+    while (!iter.IsAtEnd()) {
+      map.Remove(iter.GetCurrentKey());
+      iter.Advance();
+    }
+
+    // Test that while an iterator is still in scope, we get the map emptiness
+    // right (http://crbug.com/35571).
+    EXPECT_TRUE(map.IsEmpty());
+    EXPECT_EQ(0U, map.size());
+  }
+
+  EXPECT_TRUE(map.IsEmpty());
+  EXPECT_EQ(0U, map.size());
+
+  EXPECT_EQ(0, map.iteration_depth());
+}
+
+TEST(IDMapTest, IteratorRemainsValidWhenRemovingOtherElements) {
+  IDMap<TestObject*> map;
+
+  const int kCount = 5;
+  TestObject obj[kCount];
+
+  for (int i = 0; i < kCount; i++)
+    map.Add(&obj[i]);
+
+  // IDMap has no predictable iteration order.
+  int32_t ids_in_iteration_order[kCount];
+  const TestObject* objs_in_iteration_order[kCount];
+  int counter = 0;
+  for (IDMap<TestObject*>::const_iterator iter(&map); !iter.IsAtEnd();
+       iter.Advance()) {
+    ids_in_iteration_order[counter] = iter.GetCurrentKey();
+    objs_in_iteration_order[counter] = iter.GetCurrentValue();
+    counter++;
+  }
+
+  counter = 0;
+  for (IDMap<TestObject*>::const_iterator iter(&map); !iter.IsAtEnd();
+       iter.Advance()) {
+    EXPECT_EQ(1, map.iteration_depth());
+
+    switch (counter) {
+      case 0:
+        EXPECT_EQ(ids_in_iteration_order[0], iter.GetCurrentKey());
+        EXPECT_EQ(objs_in_iteration_order[0], iter.GetCurrentValue());
+        map.Remove(ids_in_iteration_order[1]);
+        break;
+      case 1:
+        EXPECT_EQ(ids_in_iteration_order[2], iter.GetCurrentKey());
+        EXPECT_EQ(objs_in_iteration_order[2], iter.GetCurrentValue());
+        map.Remove(ids_in_iteration_order[3]);
+        break;
+      case 2:
+        EXPECT_EQ(ids_in_iteration_order[4], iter.GetCurrentKey());
+        EXPECT_EQ(objs_in_iteration_order[4], iter.GetCurrentValue());
+        map.Remove(ids_in_iteration_order[0]);
+        break;
+      default:
+        FAIL() << "should not have that many elements";
+        break;
+    }
+
+    counter++;
+  }
+
+  EXPECT_EQ(0, map.iteration_depth());
+}
+
+TEST(IDMapTest, CopyIterator) {
+  IDMap<TestObject*> map;
+
+  TestObject obj1;
+  TestObject obj2;
+  TestObject obj3;
+
+  map.Add(&obj1);
+  map.Add(&obj2);
+  map.Add(&obj3);
+
+  EXPECT_EQ(0, map.iteration_depth());
+
+  {
+    IDMap<TestObject*>::const_iterator iter1(&map);
+    EXPECT_EQ(1, map.iteration_depth());
+
+    // Make sure that copying the iterator correctly increments
+    // map's iteration depth.
+    IDMap<TestObject*>::const_iterator iter2(iter1);
+    EXPECT_EQ(2, map.iteration_depth());
+  }
+
+  // Make sure after destroying all iterators the map's iteration depth
+  // returns to initial state.
+  EXPECT_EQ(0, map.iteration_depth());
+}
+
+TEST(IDMapTest, AssignIterator) {
+  IDMap<TestObject*> map;
+
+  TestObject obj1;
+  TestObject obj2;
+  TestObject obj3;
+
+  map.Add(&obj1);
+  map.Add(&obj2);
+  map.Add(&obj3);
+
+  EXPECT_EQ(0, map.iteration_depth());
+
+  {
+    IDMap<TestObject*>::const_iterator iter1(&map);
+    EXPECT_EQ(1, map.iteration_depth());
+
+    IDMap<TestObject*>::const_iterator iter2(&map);
+    EXPECT_EQ(2, map.iteration_depth());
+
+    // Make sure that assigning the iterator correctly updates
+    // map's iteration depth (-1 for destruction, +1 for assignment).
+    EXPECT_EQ(2, map.iteration_depth());
+  }
+
+  // Make sure after destroying all iterators the map's iteration depth
+  // returns to initial state.
+  EXPECT_EQ(0, map.iteration_depth());
+}
+
+TEST(IDMapTest, IteratorRemainsValidWhenClearing) {
+  IDMap<TestObject*> map;
+
+  const int kCount = 5;
+  TestObject obj[kCount];
+
+  for (int i = 0; i < kCount; i++)
+    map.Add(&obj[i]);
+
+  // IDMap has no predictable iteration order.
+  int32_t ids_in_iteration_order[kCount];
+  const TestObject* objs_in_iteration_order[kCount];
+  int counter = 0;
+  for (IDMap<TestObject*>::const_iterator iter(&map); !iter.IsAtEnd();
+       iter.Advance()) {
+    ids_in_iteration_order[counter] = iter.GetCurrentKey();
+    objs_in_iteration_order[counter] = iter.GetCurrentValue();
+    counter++;
+  }
+
+  counter = 0;
+  for (IDMap<TestObject*>::const_iterator iter(&map); !iter.IsAtEnd();
+       iter.Advance()) {
+    switch (counter) {
+      case 0:
+        EXPECT_EQ(ids_in_iteration_order[0], iter.GetCurrentKey());
+        EXPECT_EQ(objs_in_iteration_order[0], iter.GetCurrentValue());
+        break;
+      case 1:
+        EXPECT_EQ(ids_in_iteration_order[1], iter.GetCurrentKey());
+        EXPECT_EQ(objs_in_iteration_order[1], iter.GetCurrentValue());
+        map.Clear();
+        EXPECT_TRUE(map.IsEmpty());
+        EXPECT_EQ(0U, map.size());
+        break;
+      default:
+        FAIL() << "should not have that many elements";
+        break;
+    }
+    counter++;
+  }
+
+  EXPECT_TRUE(map.IsEmpty());
+  EXPECT_EQ(0U, map.size());
+}
+
+TEST(IDMapTest, OwningPointersDeletesThemOnRemove) {
+  const int kCount = 3;
+
+  int external_del_count = 0;
+  DestructorCounter* external_obj[kCount];
+  int map_external_ids[kCount];
+
+  int owned_del_count = 0;
+  int map_owned_ids[kCount];
+
+  IDMap<DestructorCounter*> map_external;
+  IDMap<std::unique_ptr<DestructorCounter>> map_owned;
+
+  for (int i = 0; i < kCount; ++i) {
+    external_obj[i] = new DestructorCounter(&external_del_count);
+    map_external_ids[i] = map_external.Add(external_obj[i]);
+
+    map_owned_ids[i] =
+        map_owned.Add(std::make_unique<DestructorCounter>(&owned_del_count));
+  }
+
+  for (int i = 0; i < kCount; ++i) {
+    EXPECT_EQ(external_del_count, 0);
+    EXPECT_EQ(owned_del_count, i);
+
+    map_external.Remove(map_external_ids[i]);
+    map_owned.Remove(map_owned_ids[i]);
+  }
+
+  for (int i = 0; i < kCount; ++i) {
+    delete external_obj[i];
+  }
+
+  EXPECT_EQ(external_del_count, kCount);
+  EXPECT_EQ(owned_del_count, kCount);
+}
+
+TEST(IDMapTest, OwningPointersDeletesThemOnClear) {
+  const int kCount = 3;
+
+  int external_del_count = 0;
+  DestructorCounter* external_obj[kCount];
+
+  int owned_del_count = 0;
+
+  IDMap<DestructorCounter*> map_external;
+  IDMap<std::unique_ptr<DestructorCounter>> map_owned;
+
+  for (int i = 0; i < kCount; ++i) {
+    external_obj[i] = new DestructorCounter(&external_del_count);
+    map_external.Add(external_obj[i]);
+
+    map_owned.Add(std::make_unique<DestructorCounter>(&owned_del_count));
+  }
+
+  EXPECT_EQ(external_del_count, 0);
+  EXPECT_EQ(owned_del_count, 0);
+
+  map_external.Clear();
+  map_owned.Clear();
+
+  EXPECT_EQ(external_del_count, 0);
+  EXPECT_EQ(owned_del_count, kCount);
+
+  for (int i = 0; i < kCount; ++i) {
+    delete external_obj[i];
+  }
+
+  EXPECT_EQ(external_del_count, kCount);
+  EXPECT_EQ(owned_del_count, kCount);
+}
+
+TEST(IDMapTest, OwningPointersDeletesThemOnDestruct) {
+  const int kCount = 3;
+
+  int external_del_count = 0;
+  DestructorCounter* external_obj[kCount];
+
+  int owned_del_count = 0;
+
+  {
+    IDMap<DestructorCounter*> map_external;
+    IDMap<std::unique_ptr<DestructorCounter>> map_owned;
+
+    for (int i = 0; i < kCount; ++i) {
+      external_obj[i] = new DestructorCounter(&external_del_count);
+      map_external.Add(external_obj[i]);
+
+      map_owned.Add(std::make_unique<DestructorCounter>(&owned_del_count));
+    }
+  }
+
+  EXPECT_EQ(external_del_count, 0);
+
+  for (int i = 0; i < kCount; ++i) {
+    delete external_obj[i];
+  }
+
+  EXPECT_EQ(external_del_count, kCount);
+  EXPECT_EQ(owned_del_count, kCount);
+}
+
+TEST(IDMapTest, Int64KeyType) {
+  IDMap<TestObject*, int64_t> map;
+  TestObject obj1;
+  const int64_t kId1 = 999999999999999999;
+
+  map.AddWithID(&obj1, kId1);
+  EXPECT_EQ(&obj1, map.Lookup(kId1));
+
+  IDMap<TestObject*, int64_t>::const_iterator iter(&map);
+  ASSERT_FALSE(iter.IsAtEnd());
+  EXPECT_EQ(kId1, iter.GetCurrentKey());
+  EXPECT_EQ(&obj1, iter.GetCurrentValue());
+  iter.Advance();
+  ASSERT_TRUE(iter.IsAtEnd());
+
+  map.Remove(kId1);
+  EXPECT_TRUE(map.IsEmpty());
+}
+
+TEST(IDMapTest, RemovedValueHandling) {
+  TestObject obj;
+  IDMap<TestObject*> map;
+  int key = map.Add(&obj);
+
+  IDMap<TestObject*>::iterator itr(&map);
+  map.Clear();
+  EXPECT_DCHECK_DEATH(map.Remove(key));
+  EXPECT_DCHECK_DEATH(map.Replace(key, &obj));
+  EXPECT_FALSE(map.Lookup(key));
+  EXPECT_FALSE(itr.IsAtEnd());
+  EXPECT_FALSE(itr.GetCurrentValue());
+
+  EXPECT_TRUE(map.IsEmpty());
+  map.AddWithID(&obj, key);
+  EXPECT_EQ(1u, map.size());
+}
+
+}  // namespace base
diff --git a/base/containers/linked_list.h b/base/containers/linked_list.h
new file mode 100644
index 0000000..a913bad
--- /dev/null
+++ b/base/containers/linked_list.h
@@ -0,0 +1,190 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_CONTAINERS_LINKED_LIST_H_
+#define BASE_CONTAINERS_LINKED_LIST_H_
+
+#include "base/macros.h"
+
+// Simple LinkedList type. (See the Q&A section to understand how this
+// differs from std::list).
+//
+// To use, start by declaring the class which will be contained in the linked
+// list, as extending LinkNode (this gives it next/previous pointers).
+//
+//   class MyNodeType : public LinkNode<MyNodeType> {
+//     ...
+//   };
+//
+// Next, to keep track of the list's head/tail, use a LinkedList instance:
+//
+//   LinkedList<MyNodeType> list;
+//
+// To add elements to the list, use any of LinkedList::Append,
+// LinkNode::InsertBefore, or LinkNode::InsertAfter:
+//
+//   LinkNode<MyNodeType>* n1 = ...;
+//   LinkNode<MyNodeType>* n2 = ...;
+//   LinkNode<MyNodeType>* n3 = ...;
+//
+//   list.Append(n1);
+//   list.Append(n3);
+//   n3->InsertBefore(n3);
+//
+// Lastly, to iterate through the linked list forwards:
+//
+//   for (LinkNode<MyNodeType>* node = list.head();
+//        node != list.end();
+//        node = node->next()) {
+//     MyNodeType* value = node->value();
+//     ...
+//   }
+//
+// Or to iterate the linked list backwards:
+//
+//   for (LinkNode<MyNodeType>* node = list.tail();
+//        node != list.end();
+//        node = node->previous()) {
+//     MyNodeType* value = node->value();
+//     ...
+//   }
+//
+// Questions and Answers:
+//
+// Q. Should I use std::list or base::LinkedList?
+//
+// A. The main reason to use base::LinkedList over std::list is
+//    performance. If you don't care about the performance differences
+//    then use an STL container, as it makes for better code readability.
+//
+//    Comparing the performance of base::LinkedList<T> to std::list<T*>:
+//
+//    * Erasing an element of type T* from base::LinkedList<T> is
+//      an O(1) operation. Whereas for std::list<T*> it is O(n).
+//      That is because with std::list<T*> you must obtain an
+//      iterator to the T* element before you can call erase(iterator).
+//
+//    * Insertion operations with base::LinkedList<T> never require
+//      heap allocations.
+//
+// Q. How does base::LinkedList implementation differ from std::list?
+//
+// A. Doubly-linked lists are made up of nodes that contain "next" and
+//    "previous" pointers that reference other nodes in the list.
+//
+//    With base::LinkedList<T>, the type being inserted already reserves
+//    space for the "next" and "previous" pointers (base::LinkNode<T>*).
+//    Whereas with std::list<T> the type can be anything, so the implementation
+//    needs to glue on the "next" and "previous" pointers using
+//    some internal node type.
+
+namespace base {
+
+template <typename T>
+class LinkNode {
+ public:
+  LinkNode() : previous_(nullptr), next_(nullptr) {}
+  LinkNode(LinkNode<T>* previous, LinkNode<T>* next)
+      : previous_(previous), next_(next) {}
+
+  LinkNode(LinkNode<T>&& rhs) {
+    next_ = rhs.next_;
+    rhs.next_ = nullptr;
+    previous_ = rhs.previous_;
+    rhs.previous_ = nullptr;
+
+    // If the node belongs to a list, next_ and previous_ are both non-null.
+    // Otherwise, they are both null.
+    if (next_) {
+      next_->previous_ = this;
+      previous_->next_ = this;
+    }
+  }
+
+  // Insert |this| into the linked list, before |e|.
+  void InsertBefore(LinkNode<T>* e) {
+    this->next_ = e;
+    this->previous_ = e->previous_;
+    e->previous_->next_ = this;
+    e->previous_ = this;
+  }
+
+  // Insert |this| into the linked list, after |e|.
+  void InsertAfter(LinkNode<T>* e) {
+    this->next_ = e->next_;
+    this->previous_ = e;
+    e->next_->previous_ = this;
+    e->next_ = this;
+  }
+
+  // Remove |this| from the linked list.
+  void RemoveFromList() {
+    this->previous_->next_ = this->next_;
+    this->next_->previous_ = this->previous_;
+    // next() and previous() return non-null if and only this node is not in any
+    // list.
+    this->next_ = nullptr;
+    this->previous_ = nullptr;
+  }
+
+  LinkNode<T>* previous() const {
+    return previous_;
+  }
+
+  LinkNode<T>* next() const {
+    return next_;
+  }
+
+  // Cast from the node-type to the value type.
+  const T* value() const {
+    return static_cast<const T*>(this);
+  }
+
+  T* value() {
+    return static_cast<T*>(this);
+  }
+
+ private:
+  LinkNode<T>* previous_;
+  LinkNode<T>* next_;
+
+  DISALLOW_COPY_AND_ASSIGN(LinkNode);
+};
+
+template <typename T>
+class LinkedList {
+ public:
+  // The "root" node is self-referential, and forms the basis of a circular
+  // list (root_.next() will point back to the start of the list,
+  // and root_->previous() wraps around to the end of the list).
+  LinkedList() : root_(&root_, &root_) {}
+
+  // Appends |e| to the end of the linked list.
+  void Append(LinkNode<T>* e) {
+    e->InsertBefore(&root_);
+  }
+
+  LinkNode<T>* head() const {
+    return root_.next();
+  }
+
+  LinkNode<T>* tail() const {
+    return root_.previous();
+  }
+
+  const LinkNode<T>* end() const {
+    return &root_;
+  }
+
+  bool empty() const { return head() == end(); }
+
+ private:
+  LinkNode<T> root_;
+
+  DISALLOW_COPY_AND_ASSIGN(LinkedList);
+};
+
+}  // namespace base
+
+#endif  // BASE_CONTAINERS_LINKED_LIST_H_
diff --git a/base/containers/linked_list_unittest.cc b/base/containers/linked_list_unittest.cc
new file mode 100644
index 0000000..8e547ba
--- /dev/null
+++ b/base/containers/linked_list_unittest.cc
@@ -0,0 +1,349 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/containers/linked_list.h"
+#include "base/macros.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+namespace {
+
+class Node : public LinkNode<Node> {
+ public:
+  explicit Node(int id) : id_(id) {}
+
+  int id() const { return id_; }
+
+ private:
+  int id_;
+};
+
+class MultipleInheritanceNodeBase {
+ public:
+  MultipleInheritanceNodeBase() : field_taking_up_space_(0) {}
+  int field_taking_up_space_;
+};
+
+class MultipleInheritanceNode : public MultipleInheritanceNodeBase,
+                                public LinkNode<MultipleInheritanceNode> {
+ public:
+  MultipleInheritanceNode() = default;
+};
+
+class MovableNode : public LinkNode<MovableNode> {
+ public:
+  explicit MovableNode(int id) : id_(id) {}
+
+  MovableNode(MovableNode&&) = default;
+
+  int id() const { return id_; }
+
+ private:
+  int id_;
+};
+
+// Checks that when iterating |list| (either from head to tail, or from
+// tail to head, as determined by |forward|), we get back |node_ids|,
+// which is an array of size |num_nodes|.
+void ExpectListContentsForDirection(const LinkedList<Node>& list,
+  int num_nodes, const int* node_ids, bool forward) {
+  int i = 0;
+  for (const LinkNode<Node>* node = (forward ? list.head() : list.tail());
+       node != list.end();
+       node = (forward ? node->next() : node->previous())) {
+    ASSERT_LT(i, num_nodes);
+    int index_of_id = forward ? i : num_nodes - i - 1;
+    EXPECT_EQ(node_ids[index_of_id], node->value()->id());
+    ++i;
+  }
+  EXPECT_EQ(num_nodes, i);
+}
+
+void ExpectListContents(const LinkedList<Node>& list,
+                        int num_nodes,
+                        const int* node_ids) {
+  {
+    SCOPED_TRACE("Iterating forward (from head to tail)");
+    ExpectListContentsForDirection(list, num_nodes, node_ids, true);
+  }
+  {
+    SCOPED_TRACE("Iterating backward (from tail to head)");
+    ExpectListContentsForDirection(list, num_nodes, node_ids, false);
+  }
+}
+
+TEST(LinkedList, Empty) {
+  LinkedList<Node> list;
+  EXPECT_EQ(list.end(), list.head());
+  EXPECT_EQ(list.end(), list.tail());
+  ExpectListContents(list, 0, nullptr);
+}
+
+TEST(LinkedList, Append) {
+  LinkedList<Node> list;
+  ExpectListContents(list, 0, nullptr);
+
+  Node n1(1);
+  list.Append(&n1);
+
+  EXPECT_EQ(&n1, list.head());
+  EXPECT_EQ(&n1, list.tail());
+  {
+    const int expected[] = {1};
+    ExpectListContents(list, arraysize(expected), expected);
+  }
+
+  Node n2(2);
+  list.Append(&n2);
+
+  EXPECT_EQ(&n1, list.head());
+  EXPECT_EQ(&n2, list.tail());
+  {
+    const int expected[] = {1, 2};
+    ExpectListContents(list, arraysize(expected), expected);
+  }
+
+  Node n3(3);
+  list.Append(&n3);
+
+  EXPECT_EQ(&n1, list.head());
+  EXPECT_EQ(&n3, list.tail());
+  {
+    const int expected[] = {1, 2, 3};
+    ExpectListContents(list, arraysize(expected), expected);
+  }
+}
+
+TEST(LinkedList, RemoveFromList) {
+  LinkedList<Node> list;
+
+  Node n1(1);
+  Node n2(2);
+  Node n3(3);
+  Node n4(4);
+  Node n5(5);
+
+  list.Append(&n1);
+  list.Append(&n2);
+  list.Append(&n3);
+  list.Append(&n4);
+  list.Append(&n5);
+
+  EXPECT_EQ(&n1, list.head());
+  EXPECT_EQ(&n5, list.tail());
+  {
+    const int expected[] = {1, 2, 3, 4, 5};
+    ExpectListContents(list, arraysize(expected), expected);
+  }
+
+  // Remove from the middle.
+  n3.RemoveFromList();
+
+  EXPECT_EQ(&n1, list.head());
+  EXPECT_EQ(&n5, list.tail());
+  {
+    const int expected[] = {1, 2, 4, 5};
+    ExpectListContents(list, arraysize(expected), expected);
+  }
+
+  // Remove from the tail.
+  n5.RemoveFromList();
+
+  EXPECT_EQ(&n1, list.head());
+  EXPECT_EQ(&n4, list.tail());
+  {
+    const int expected[] = {1, 2, 4};
+    ExpectListContents(list, arraysize(expected), expected);
+  }
+
+  // Remove from the head.
+  n1.RemoveFromList();
+
+  EXPECT_EQ(&n2, list.head());
+  EXPECT_EQ(&n4, list.tail());
+  {
+    const int expected[] = {2, 4};
+    ExpectListContents(list, arraysize(expected), expected);
+  }
+
+  // Empty the list.
+  n2.RemoveFromList();
+  n4.RemoveFromList();
+
+  ExpectListContents(list, 0, nullptr);
+  EXPECT_EQ(list.end(), list.head());
+  EXPECT_EQ(list.end(), list.tail());
+
+  // Fill the list once again.
+  list.Append(&n1);
+  list.Append(&n2);
+  list.Append(&n3);
+  list.Append(&n4);
+  list.Append(&n5);
+
+  EXPECT_EQ(&n1, list.head());
+  EXPECT_EQ(&n5, list.tail());
+  {
+    const int expected[] = {1, 2, 3, 4, 5};
+    ExpectListContents(list, arraysize(expected), expected);
+  }
+}
+
+TEST(LinkedList, InsertBefore) {
+  LinkedList<Node> list;
+
+  Node n1(1);
+  Node n2(2);
+  Node n3(3);
+  Node n4(4);
+
+  list.Append(&n1);
+  list.Append(&n2);
+
+  EXPECT_EQ(&n1, list.head());
+  EXPECT_EQ(&n2, list.tail());
+  {
+    const int expected[] = {1, 2};
+    ExpectListContents(list, arraysize(expected), expected);
+  }
+
+  n3.InsertBefore(&n2);
+
+  EXPECT_EQ(&n1, list.head());
+  EXPECT_EQ(&n2, list.tail());
+  {
+    const int expected[] = {1, 3, 2};
+    ExpectListContents(list, arraysize(expected), expected);
+  }
+
+  n4.InsertBefore(&n1);
+
+  EXPECT_EQ(&n4, list.head());
+  EXPECT_EQ(&n2, list.tail());
+  {
+    const int expected[] = {4, 1, 3, 2};
+    ExpectListContents(list, arraysize(expected), expected);
+  }
+}
+
+TEST(LinkedList, InsertAfter) {
+  LinkedList<Node> list;
+
+  Node n1(1);
+  Node n2(2);
+  Node n3(3);
+  Node n4(4);
+
+  list.Append(&n1);
+  list.Append(&n2);
+
+  EXPECT_EQ(&n1, list.head());
+  EXPECT_EQ(&n2, list.tail());
+  {
+    const int expected[] = {1, 2};
+    ExpectListContents(list, arraysize(expected), expected);
+  }
+
+  n3.InsertAfter(&n2);
+
+  EXPECT_EQ(&n1, list.head());
+  EXPECT_EQ(&n3, list.tail());
+  {
+    const int expected[] = {1, 2, 3};
+    ExpectListContents(list, arraysize(expected), expected);
+  }
+
+  n4.InsertAfter(&n1);
+
+  EXPECT_EQ(&n1, list.head());
+  EXPECT_EQ(&n3, list.tail());
+  {
+    const int expected[] = {1, 4, 2, 3};
+    ExpectListContents(list, arraysize(expected), expected);
+  }
+}
+
+TEST(LinkedList, MultipleInheritanceNode) {
+  MultipleInheritanceNode node;
+  EXPECT_EQ(&node, node.value());
+}
+
+TEST(LinkedList, EmptyListIsEmpty) {
+  LinkedList<Node> list;
+  EXPECT_TRUE(list.empty());
+}
+
+TEST(LinkedList, NonEmptyListIsNotEmpty) {
+  LinkedList<Node> list;
+
+  Node n(1);
+  list.Append(&n);
+
+  EXPECT_FALSE(list.empty());
+}
+
+TEST(LinkedList, EmptiedListIsEmptyAgain) {
+  LinkedList<Node> list;
+
+  Node n(1);
+  list.Append(&n);
+  n.RemoveFromList();
+
+  EXPECT_TRUE(list.empty());
+}
+
+TEST(LinkedList, NodesCanBeReused) {
+  LinkedList<Node> list1;
+  LinkedList<Node> list2;
+
+  Node n(1);
+  list1.Append(&n);
+  n.RemoveFromList();
+  list2.Append(&n);
+
+  EXPECT_EQ(list2.head()->value(), &n);
+}
+
+TEST(LinkedList, RemovedNodeHasNullNextPrevious) {
+  LinkedList<Node> list;
+
+  Node n(1);
+  list.Append(&n);
+  n.RemoveFromList();
+
+  EXPECT_EQ(nullptr, n.next());
+  EXPECT_EQ(nullptr, n.previous());
+}
+
+TEST(LinkedList, NodeMoveConstructor) {
+  LinkedList<MovableNode> list;
+
+  MovableNode n1(1);
+  MovableNode n2(2);
+  MovableNode n3(3);
+
+  list.Append(&n1);
+  list.Append(&n2);
+  list.Append(&n3);
+
+  EXPECT_EQ(&n1, n2.previous());
+  EXPECT_EQ(&n2, n1.next());
+  EXPECT_EQ(&n3, n2.next());
+  EXPECT_EQ(&n2, n3.previous());
+  EXPECT_EQ(2, n2.id());
+
+  MovableNode n2_new(std::move(n2));
+
+  EXPECT_EQ(nullptr, n2.next());
+  EXPECT_EQ(nullptr, n2.previous());
+
+  EXPECT_EQ(&n1, n2_new.previous());
+  EXPECT_EQ(&n2_new, n1.next());
+  EXPECT_EQ(&n3, n2_new.next());
+  EXPECT_EQ(&n2_new, n3.previous());
+  EXPECT_EQ(2, n2_new.id());
+}
+
+}  // namespace
+}  // namespace base
diff --git a/base/containers/mru_cache.h b/base/containers/mru_cache.h
new file mode 100644
index 0000000..4a9f44e
--- /dev/null
+++ b/base/containers/mru_cache.h
@@ -0,0 +1,268 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file contains a template for a Most Recently Used cache that allows
+// constant-time access to items using a key, but easy identification of the
+// least-recently-used items for removal.  Each key can only be associated with
+// one payload item at a time.
+//
+// The key object will be stored twice, so it should support efficient copying.
+//
+// NOTE: While all operations are O(1), this code is written for
+// legibility rather than optimality. If future profiling identifies this as
+// a bottleneck, there is room for smaller values of 1 in the O(1). :]
+
+#ifndef BASE_CONTAINERS_MRU_CACHE_H_
+#define BASE_CONTAINERS_MRU_CACHE_H_
+
+#include <stddef.h>
+
+#include <algorithm>
+#include <functional>
+#include <list>
+#include <map>
+#include <unordered_map>
+#include <utility>
+
+#include "base/logging.h"
+#include "base/macros.h"
+
+namespace base {
+namespace trace_event {
+namespace internal {
+
+template <class MruCacheType>
+size_t DoEstimateMemoryUsageForMruCache(const MruCacheType&);
+
+}  // namespace internal
+}  // namespace trace_event
+
+// MRUCacheBase ----------------------------------------------------------------
+
+// This template is used to standardize map type containers that can be used
+// by MRUCacheBase. This level of indirection is necessary because of the way
+// that template template params and default template params interact.
+template <class KeyType, class ValueType, class CompareType>
+struct MRUCacheStandardMap {
+  typedef std::map<KeyType, ValueType, CompareType> Type;
+};
+
+// Base class for the MRU cache specializations defined below.
+template <class KeyType,
+          class PayloadType,
+          class HashOrCompareType,
+          template <typename, typename, typename> class MapType =
+              MRUCacheStandardMap>
+class MRUCacheBase {
+ public:
+  // The payload of the list. This maintains a copy of the key so we can
+  // efficiently delete things given an element of the list.
+  typedef std::pair<KeyType, PayloadType> value_type;
+
+ private:
+  typedef std::list<value_type> PayloadList;
+  typedef typename MapType<KeyType,
+                           typename PayloadList::iterator,
+                           HashOrCompareType>::Type KeyIndex;
+
+ public:
+  typedef typename PayloadList::size_type size_type;
+
+  typedef typename PayloadList::iterator iterator;
+  typedef typename PayloadList::const_iterator const_iterator;
+  typedef typename PayloadList::reverse_iterator reverse_iterator;
+  typedef typename PayloadList::const_reverse_iterator const_reverse_iterator;
+
+  enum { NO_AUTO_EVICT = 0 };
+
+  // The max_size is the size at which the cache will prune its members to when
+  // a new item is inserted. If the caller wants to manager this itself (for
+  // example, maybe it has special work to do when something is evicted), it
+  // can pass NO_AUTO_EVICT to not restrict the cache size.
+  explicit MRUCacheBase(size_type max_size) : max_size_(max_size) {}
+
+  virtual ~MRUCacheBase() = default;
+
+  size_type max_size() const { return max_size_; }
+
+  // Inserts a payload item with the given key. If an existing item has
+  // the same key, it is removed prior to insertion. An iterator indicating the
+  // inserted item will be returned (this will always be the front of the list).
+  //
+  // The payload will be forwarded.
+  template <typename Payload>
+  iterator Put(const KeyType& key, Payload&& payload) {
+    // Remove any existing payload with that key.
+    typename KeyIndex::iterator index_iter = index_.find(key);
+    if (index_iter != index_.end()) {
+      // Erase the reference to it. The index reference will be replaced in the
+      // code below.
+      Erase(index_iter->second);
+    } else if (max_size_ != NO_AUTO_EVICT) {
+      // New item is being inserted which might make it larger than the maximum
+      // size: kick the oldest thing out if necessary.
+      ShrinkToSize(max_size_ - 1);
+    }
+
+    ordering_.emplace_front(key, std::forward<Payload>(payload));
+    index_.emplace(key, ordering_.begin());
+    return ordering_.begin();
+  }
+
+  // Retrieves the contents of the given key, or end() if not found. This method
+  // has the side effect of moving the requested item to the front of the
+  // recency list.
+  iterator Get(const KeyType& key) {
+    typename KeyIndex::iterator index_iter = index_.find(key);
+    if (index_iter == index_.end())
+      return end();
+    typename PayloadList::iterator iter = index_iter->second;
+
+    // Move the touched item to the front of the recency ordering.
+    ordering_.splice(ordering_.begin(), ordering_, iter);
+    return ordering_.begin();
+  }
+
+  // Retrieves the payload associated with a given key and returns it via
+  // result without affecting the ordering (unlike Get).
+  iterator Peek(const KeyType& key) {
+    typename KeyIndex::const_iterator index_iter = index_.find(key);
+    if (index_iter == index_.end())
+      return end();
+    return index_iter->second;
+  }
+
+  const_iterator Peek(const KeyType& key) const {
+    typename KeyIndex::const_iterator index_iter = index_.find(key);
+    if (index_iter == index_.end())
+      return end();
+    return index_iter->second;
+  }
+
+  // Exchanges the contents of |this| by the contents of the |other|.
+  void Swap(MRUCacheBase& other) {
+    ordering_.swap(other.ordering_);
+    index_.swap(other.index_);
+    std::swap(max_size_, other.max_size_);
+  }
+
+  // Erases the item referenced by the given iterator. An iterator to the item
+  // following it will be returned. The iterator must be valid.
+  iterator Erase(iterator pos) {
+    index_.erase(pos->first);
+    return ordering_.erase(pos);
+  }
+
+  // MRUCache entries are often processed in reverse order, so we add this
+  // convenience function (not typically defined by STL containers).
+  reverse_iterator Erase(reverse_iterator pos) {
+    // We have to actually give it the incremented iterator to delete, since
+    // the forward iterator that base() returns is actually one past the item
+    // being iterated over.
+    return reverse_iterator(Erase((++pos).base()));
+  }
+
+  // Shrinks the cache so it only holds |new_size| items. If |new_size| is
+  // bigger or equal to the current number of items, this will do nothing.
+  void ShrinkToSize(size_type new_size) {
+    for (size_type i = size(); i > new_size; i--)
+      Erase(rbegin());
+  }
+
+  // Deletes everything from the cache.
+  void Clear() {
+    index_.clear();
+    ordering_.clear();
+  }
+
+  // Returns the number of elements in the cache.
+  size_type size() const {
+    // We don't use ordering_.size() for the return value because
+    // (as a linked list) it can be O(n).
+    DCHECK(index_.size() == ordering_.size());
+    return index_.size();
+  }
+
+  // Allows iteration over the list. Forward iteration starts with the most
+  // recent item and works backwards.
+  //
+  // Note that since these iterators are actually iterators over a list, you
+  // can keep them as you insert or delete things (as long as you don't delete
+  // the one you are pointing to) and they will still be valid.
+  iterator begin() { return ordering_.begin(); }
+  const_iterator begin() const { return ordering_.begin(); }
+  iterator end() { return ordering_.end(); }
+  const_iterator end() const { return ordering_.end(); }
+
+  reverse_iterator rbegin() { return ordering_.rbegin(); }
+  const_reverse_iterator rbegin() const { return ordering_.rbegin(); }
+  reverse_iterator rend() { return ordering_.rend(); }
+  const_reverse_iterator rend() const { return ordering_.rend(); }
+
+  bool empty() const { return ordering_.empty(); }
+
+ private:
+  template <class MruCacheType>
+  friend size_t trace_event::internal::DoEstimateMemoryUsageForMruCache(
+      const MruCacheType&);
+
+  PayloadList ordering_;
+  KeyIndex index_;
+
+  size_type max_size_;
+
+  DISALLOW_COPY_AND_ASSIGN(MRUCacheBase);
+};
+
+// MRUCache --------------------------------------------------------------------
+
+// A container that does not do anything to free its data. Use this when storing
+// value types (as opposed to pointers) in the list.
+template <class KeyType,
+          class PayloadType,
+          class CompareType = std::less<KeyType>>
+class MRUCache : public MRUCacheBase<KeyType, PayloadType, CompareType> {
+ private:
+  using ParentType = MRUCacheBase<KeyType, PayloadType, CompareType>;
+
+ public:
+  // See MRUCacheBase, noting the possibility of using NO_AUTO_EVICT.
+  explicit MRUCache(typename ParentType::size_type max_size)
+      : ParentType(max_size) {}
+  virtual ~MRUCache() = default;
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(MRUCache);
+};
+
+// HashingMRUCache ------------------------------------------------------------
+
+template <class KeyType, class ValueType, class HashType>
+struct MRUCacheHashMap {
+  typedef std::unordered_map<KeyType, ValueType, HashType> Type;
+};
+
+// This class is similar to MRUCache, except that it uses std::unordered_map as
+// the map type instead of std::map. Note that your KeyType must be hashable to
+// use this cache or you need to provide a hashing class.
+template <class KeyType, class PayloadType, class HashType = std::hash<KeyType>>
+class HashingMRUCache
+    : public MRUCacheBase<KeyType, PayloadType, HashType, MRUCacheHashMap> {
+ private:
+  using ParentType =
+      MRUCacheBase<KeyType, PayloadType, HashType, MRUCacheHashMap>;
+
+ public:
+  // See MRUCacheBase, noting the possibility of using NO_AUTO_EVICT.
+  explicit HashingMRUCache(typename ParentType::size_type max_size)
+      : ParentType(max_size) {}
+  virtual ~HashingMRUCache() = default;
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(HashingMRUCache);
+};
+
+}  // namespace base
+
+#endif  // BASE_CONTAINERS_MRU_CACHE_H_
diff --git a/base/containers/mru_cache_unittest.cc b/base/containers/mru_cache_unittest.cc
new file mode 100644
index 0000000..28e6f0d
--- /dev/null
+++ b/base/containers/mru_cache_unittest.cc
@@ -0,0 +1,394 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/containers/mru_cache.h"
+
+#include <cstddef>
+#include <memory>
+
+#include "base/memory/ptr_util.h"
+#include "base/trace_event/memory_usage_estimator.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+
+namespace {
+
+int cached_item_live_count = 0;
+
+struct CachedItem {
+  CachedItem() : value(0) {
+    cached_item_live_count++;
+  }
+
+  explicit CachedItem(int new_value) : value(new_value) {
+    cached_item_live_count++;
+  }
+
+  explicit CachedItem(const CachedItem& other) : value(other.value) {
+    cached_item_live_count++;
+  }
+
+  ~CachedItem() {
+    cached_item_live_count--;
+  }
+
+  int value;
+};
+
+}  // namespace
+
+TEST(MRUCacheTest, Basic) {
+  typedef base::MRUCache<int, CachedItem> Cache;
+  Cache cache(Cache::NO_AUTO_EVICT);
+
+  // Check failure conditions
+  {
+    CachedItem test_item;
+    EXPECT_TRUE(cache.Get(0) == cache.end());
+    EXPECT_TRUE(cache.Peek(0) == cache.end());
+  }
+
+  static const int kItem1Key = 5;
+  CachedItem item1(10);
+  Cache::iterator inserted_item = cache.Put(kItem1Key, item1);
+  EXPECT_EQ(1U, cache.size());
+
+  // Check that item1 was properly inserted.
+  {
+    Cache::iterator found = cache.Get(kItem1Key);
+    EXPECT_TRUE(inserted_item == cache.begin());
+    EXPECT_TRUE(found != cache.end());
+
+    found = cache.Peek(kItem1Key);
+    EXPECT_TRUE(found != cache.end());
+
+    EXPECT_EQ(kItem1Key, found->first);
+    EXPECT_EQ(item1.value, found->second.value);
+  }
+
+  static const int kItem2Key = 7;
+  CachedItem item2(12);
+  cache.Put(kItem2Key, item2);
+  EXPECT_EQ(2U, cache.size());
+
+  // Check that item1 is the oldest since item2 was added afterwards.
+  {
+    Cache::reverse_iterator oldest = cache.rbegin();
+    ASSERT_TRUE(oldest != cache.rend());
+    EXPECT_EQ(kItem1Key, oldest->first);
+    EXPECT_EQ(item1.value, oldest->second.value);
+  }
+
+  // Check that item1 is still accessible by key.
+  {
+    Cache::iterator test_item = cache.Get(kItem1Key);
+    ASSERT_TRUE(test_item != cache.end());
+    EXPECT_EQ(kItem1Key, test_item->first);
+    EXPECT_EQ(item1.value, test_item->second.value);
+  }
+
+  // Check that retrieving item1 pushed item2 to oldest.
+  {
+    Cache::reverse_iterator oldest = cache.rbegin();
+    ASSERT_TRUE(oldest != cache.rend());
+    EXPECT_EQ(kItem2Key, oldest->first);
+    EXPECT_EQ(item2.value, oldest->second.value);
+  }
+
+  // Remove the oldest item and check that item1 is now the only member.
+  {
+    Cache::reverse_iterator next = cache.Erase(cache.rbegin());
+
+    EXPECT_EQ(1U, cache.size());
+
+    EXPECT_TRUE(next == cache.rbegin());
+    EXPECT_EQ(kItem1Key, next->first);
+    EXPECT_EQ(item1.value, next->second.value);
+
+    cache.Erase(cache.begin());
+    EXPECT_EQ(0U, cache.size());
+  }
+
+  // Check that Clear() works properly.
+  cache.Put(kItem1Key, item1);
+  cache.Put(kItem2Key, item2);
+  EXPECT_EQ(2U, cache.size());
+  cache.Clear();
+  EXPECT_EQ(0U, cache.size());
+}
+
+TEST(MRUCacheTest, GetVsPeek) {
+  typedef base::MRUCache<int, CachedItem> Cache;
+  Cache cache(Cache::NO_AUTO_EVICT);
+
+  static const int kItem1Key = 1;
+  CachedItem item1(10);
+  cache.Put(kItem1Key, item1);
+
+  static const int kItem2Key = 2;
+  CachedItem item2(20);
+  cache.Put(kItem2Key, item2);
+
+  // This should do nothing since the size is bigger than the number of items.
+  cache.ShrinkToSize(100);
+
+  // Check that item1 starts out as oldest
+  {
+    Cache::reverse_iterator iter = cache.rbegin();
+    ASSERT_TRUE(iter != cache.rend());
+    EXPECT_EQ(kItem1Key, iter->first);
+    EXPECT_EQ(item1.value, iter->second.value);
+  }
+
+  // Check that Peek doesn't change ordering
+  {
+    Cache::iterator peekiter = cache.Peek(kItem1Key);
+    ASSERT_TRUE(peekiter != cache.end());
+
+    Cache::reverse_iterator iter = cache.rbegin();
+    ASSERT_TRUE(iter != cache.rend());
+    EXPECT_EQ(kItem1Key, iter->first);
+    EXPECT_EQ(item1.value, iter->second.value);
+  }
+}
+
+TEST(MRUCacheTest, KeyReplacement) {
+  typedef base::MRUCache<int, CachedItem> Cache;
+  Cache cache(Cache::NO_AUTO_EVICT);
+
+  static const int kItem1Key = 1;
+  CachedItem item1(10);
+  cache.Put(kItem1Key, item1);
+
+  static const int kItem2Key = 2;
+  CachedItem item2(20);
+  cache.Put(kItem2Key, item2);
+
+  static const int kItem3Key = 3;
+  CachedItem item3(30);
+  cache.Put(kItem3Key, item3);
+
+  static const int kItem4Key = 4;
+  CachedItem item4(40);
+  cache.Put(kItem4Key, item4);
+
+  CachedItem item5(50);
+  cache.Put(kItem3Key, item5);
+
+  EXPECT_EQ(4U, cache.size());
+  for (int i = 0; i < 3; ++i) {
+    Cache::reverse_iterator iter = cache.rbegin();
+    ASSERT_TRUE(iter != cache.rend());
+  }
+
+  // Make it so only the most important element is there.
+  cache.ShrinkToSize(1);
+
+  Cache::iterator iter = cache.begin();
+  EXPECT_EQ(kItem3Key, iter->first);
+  EXPECT_EQ(item5.value, iter->second.value);
+}
+
+// Make sure that the owning version release its pointers properly.
+TEST(MRUCacheTest, Owning) {
+  using Cache = base::MRUCache<int, std::unique_ptr<CachedItem>>;
+  Cache cache(Cache::NO_AUTO_EVICT);
+
+  int initial_count = cached_item_live_count;
+
+  // First insert and item and then overwrite it.
+  static const int kItem1Key = 1;
+  cache.Put(kItem1Key, WrapUnique(new CachedItem(20)));
+  cache.Put(kItem1Key, WrapUnique(new CachedItem(22)));
+
+  // There should still be one item, and one extra live item.
+  Cache::iterator iter = cache.Get(kItem1Key);
+  EXPECT_EQ(1U, cache.size());
+  EXPECT_TRUE(iter != cache.end());
+  EXPECT_EQ(initial_count + 1, cached_item_live_count);
+
+  // Now remove it.
+  cache.Erase(cache.begin());
+  EXPECT_EQ(initial_count, cached_item_live_count);
+
+  // Now try another cache that goes out of scope to make sure its pointers
+  // go away.
+  {
+    Cache cache2(Cache::NO_AUTO_EVICT);
+    cache2.Put(1, WrapUnique(new CachedItem(20)));
+    cache2.Put(2, WrapUnique(new CachedItem(20)));
+  }
+
+  // There should be no objects leaked.
+  EXPECT_EQ(initial_count, cached_item_live_count);
+
+  // Check that Clear() also frees things correctly.
+  {
+    Cache cache2(Cache::NO_AUTO_EVICT);
+    cache2.Put(1, WrapUnique(new CachedItem(20)));
+    cache2.Put(2, WrapUnique(new CachedItem(20)));
+    EXPECT_EQ(initial_count + 2, cached_item_live_count);
+    cache2.Clear();
+    EXPECT_EQ(initial_count, cached_item_live_count);
+  }
+}
+
+TEST(MRUCacheTest, AutoEvict) {
+  using Cache = base::MRUCache<int, std::unique_ptr<CachedItem>>;
+  static const Cache::size_type kMaxSize = 3;
+
+  int initial_count = cached_item_live_count;
+
+  {
+    Cache cache(kMaxSize);
+
+    static const int kItem1Key = 1, kItem2Key = 2, kItem3Key = 3, kItem4Key = 4;
+    cache.Put(kItem1Key, std::make_unique<CachedItem>(20));
+    cache.Put(kItem2Key, std::make_unique<CachedItem>(21));
+    cache.Put(kItem3Key, std::make_unique<CachedItem>(22));
+    cache.Put(kItem4Key, std::make_unique<CachedItem>(23));
+
+    // The cache should only have kMaxSize items in it even though we inserted
+    // more.
+    EXPECT_EQ(kMaxSize, cache.size());
+  }
+
+  // There should be no objects leaked.
+  EXPECT_EQ(initial_count, cached_item_live_count);
+}
+
+TEST(MRUCacheTest, HashingMRUCache) {
+  // Very simple test to make sure that the hashing cache works correctly.
+  typedef base::HashingMRUCache<std::string, CachedItem> Cache;
+  Cache cache(Cache::NO_AUTO_EVICT);
+
+  CachedItem one(1);
+  cache.Put("First", one);
+
+  CachedItem two(2);
+  cache.Put("Second", two);
+
+  EXPECT_EQ(one.value, cache.Get("First")->second.value);
+  EXPECT_EQ(two.value, cache.Get("Second")->second.value);
+  cache.ShrinkToSize(1);
+  EXPECT_EQ(two.value, cache.Get("Second")->second.value);
+  EXPECT_TRUE(cache.Get("First") == cache.end());
+}
+
+TEST(MRUCacheTest, Swap) {
+  typedef base::MRUCache<int, CachedItem> Cache;
+  Cache cache1(Cache::NO_AUTO_EVICT);
+
+  // Insert two items into cache1.
+  static const int kItem1Key = 1;
+  CachedItem item1(2);
+  Cache::iterator inserted_item = cache1.Put(kItem1Key, item1);
+  EXPECT_EQ(1U, cache1.size());
+
+  static const int kItem2Key = 3;
+  CachedItem item2(4);
+  cache1.Put(kItem2Key, item2);
+  EXPECT_EQ(2U, cache1.size());
+
+  // Verify cache1's elements.
+  {
+    Cache::iterator iter = cache1.begin();
+    ASSERT_TRUE(iter != cache1.end());
+    EXPECT_EQ(kItem2Key, iter->first);
+    EXPECT_EQ(item2.value, iter->second.value);
+
+    ++iter;
+    ASSERT_TRUE(iter != cache1.end());
+    EXPECT_EQ(kItem1Key, iter->first);
+    EXPECT_EQ(item1.value, iter->second.value);
+  }
+
+  // Create another cache2.
+  Cache cache2(Cache::NO_AUTO_EVICT);
+
+  // Insert three items into cache2.
+  static const int kItem3Key = 5;
+  CachedItem item3(6);
+  inserted_item = cache2.Put(kItem3Key, item3);
+  EXPECT_EQ(1U, cache2.size());
+
+  static const int kItem4Key = 7;
+  CachedItem item4(8);
+  cache2.Put(kItem4Key, item4);
+  EXPECT_EQ(2U, cache2.size());
+
+  static const int kItem5Key = 9;
+  CachedItem item5(10);
+  cache2.Put(kItem5Key, item5);
+  EXPECT_EQ(3U, cache2.size());
+
+  // Verify cache2's elements.
+  {
+    Cache::iterator iter = cache2.begin();
+    ASSERT_TRUE(iter != cache2.end());
+    EXPECT_EQ(kItem5Key, iter->first);
+    EXPECT_EQ(item5.value, iter->second.value);
+
+    ++iter;
+    ASSERT_TRUE(iter != cache2.end());
+    EXPECT_EQ(kItem4Key, iter->first);
+    EXPECT_EQ(item4.value, iter->second.value);
+
+    ++iter;
+    ASSERT_TRUE(iter != cache2.end());
+    EXPECT_EQ(kItem3Key, iter->first);
+    EXPECT_EQ(item3.value, iter->second.value);
+  }
+
+  // Swap cache1 and cache2 and verify cache2 has cache1's elements and cache1
+  // has cache2's elements.
+  cache2.Swap(cache1);
+
+  EXPECT_EQ(3U, cache1.size());
+  EXPECT_EQ(2U, cache2.size());
+
+  // Verify cache1's elements.
+  {
+    Cache::iterator iter = cache1.begin();
+    ASSERT_TRUE(iter != cache1.end());
+    EXPECT_EQ(kItem5Key, iter->first);
+    EXPECT_EQ(item5.value, iter->second.value);
+
+    ++iter;
+    ASSERT_TRUE(iter != cache1.end());
+    EXPECT_EQ(kItem4Key, iter->first);
+    EXPECT_EQ(item4.value, iter->second.value);
+
+    ++iter;
+    ASSERT_TRUE(iter != cache1.end());
+    EXPECT_EQ(kItem3Key, iter->first);
+    EXPECT_EQ(item3.value, iter->second.value);
+  }
+
+  // Verify cache2's elements.
+  {
+    Cache::iterator iter = cache2.begin();
+    ASSERT_TRUE(iter != cache2.end());
+    EXPECT_EQ(kItem2Key, iter->first);
+    EXPECT_EQ(item2.value, iter->second.value);
+
+    ++iter;
+    ASSERT_TRUE(iter != cache2.end());
+    EXPECT_EQ(kItem1Key, iter->first);
+    EXPECT_EQ(item1.value, iter->second.value);
+  }
+}
+
+TEST(MRUCacheTest, EstimateMemory) {
+  base::MRUCache<std::string, int> cache(10);
+
+  const std::string key(100u, 'a');
+  cache.Put(key, 1);
+
+  EXPECT_GT(trace_event::EstimateMemoryUsage(cache),
+            trace_event::EstimateMemoryUsage(key));
+}
+
+}  // namespace base
diff --git a/base/containers/queue.h b/base/containers/queue.h
new file mode 100644
index 0000000..2d3b480
--- /dev/null
+++ b/base/containers/queue.h
@@ -0,0 +1,23 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_CONTAINERS_QUEUE_H_
+#define BASE_CONTAINERS_QUEUE_H_
+
+#include <queue>
+
+#include "base/containers/circular_deque.h"
+
+namespace base {
+
+// Provides a definition of base::queue that's like std::queue but uses a
+// base::circular_queue instead of std::deque. Since std::queue is just a
+// wrapper for an underlying type, we can just provide a typedef for it that
+// defaults to the base circular_deque.
+template <class T, class Container = circular_deque<T>>
+using queue = std::queue<T, Container>;
+
+}  // namespace base
+
+#endif  // BASE_CONTAINERS_QUEUE_H_
diff --git a/base/containers/ring_buffer.h b/base/containers/ring_buffer.h
new file mode 100644
index 0000000..4e48907
--- /dev/null
+++ b/base/containers/ring_buffer.h
@@ -0,0 +1,119 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_CONTAINERS_RING_BUFFER_H_
+#define BASE_CONTAINERS_RING_BUFFER_H_
+
+#include <stddef.h>
+
+#include "base/logging.h"
+#include "base/macros.h"
+
+namespace base {
+
+// base::RingBuffer uses a fixed-size array, unlike base::circular_deque and
+// std::deque, and so, one can access only the last |kSize| elements. Also, you
+// can add elements to the front and read/modify random elements, but cannot
+// remove elements from the back. Therefore, it does not have a |Size| method,
+// only |BufferSize|, which is a constant, and |CurrentIndex|, which is the
+// number of elements added so far.
+//
+// If the above is sufficient for your use case, base::RingBuffer should be more
+// efficient than base::circular_deque.
+template <typename T, size_t kSize>
+class RingBuffer {
+ public:
+  RingBuffer() : current_index_(0) {}
+
+  size_t BufferSize() const { return kSize; }
+
+  size_t CurrentIndex() const { return current_index_; }
+
+  // tests if a value was saved to this index
+  bool IsFilledIndex(size_t n) const { return BufferIndex(n) < current_index_; }
+
+  // n = 0 returns the oldest value and
+  // n = bufferSize() - 1 returns the most recent value.
+  const T& ReadBuffer(size_t n) const {
+    DCHECK(IsFilledIndex(n));
+    return buffer_[BufferIndex(n)];
+  }
+
+  T* MutableReadBuffer(size_t n) {
+    DCHECK(IsFilledIndex(n));
+    return &buffer_[BufferIndex(n)];
+  }
+
+  void SaveToBuffer(const T& value) {
+    buffer_[BufferIndex(0)] = value;
+    current_index_++;
+  }
+
+  void Clear() { current_index_ = 0; }
+
+  // Iterator has const access to the RingBuffer it got retrieved from.
+  class Iterator {
+   public:
+    size_t index() const { return index_; }
+
+    const T* operator->() const { return &buffer_.ReadBuffer(index_); }
+    const T* operator*() const { return &buffer_.ReadBuffer(index_); }
+
+    Iterator& operator++() {
+      index_++;
+      if (index_ == kSize)
+        out_of_range_ = true;
+      return *this;
+    }
+
+    Iterator& operator--() {
+      if (index_ == 0)
+        out_of_range_ = true;
+      index_--;
+      return *this;
+    }
+
+    operator bool() const {
+      return buffer_.IsFilledIndex(index_) && !out_of_range_;
+    }
+
+   private:
+    Iterator(const RingBuffer<T, kSize>& buffer, size_t index)
+        : buffer_(buffer), index_(index), out_of_range_(false) {}
+
+    const RingBuffer<T, kSize>& buffer_;
+    size_t index_;
+    bool out_of_range_;
+
+    friend class RingBuffer<T, kSize>;
+  };
+
+  // Returns an Iterator pointing to the oldest value in the buffer.
+  // Example usage (iterate from oldest to newest value):
+  //  for (RingBuffer<T, kSize>::Iterator it = ring_buffer.Begin(); it; ++it) {}
+  Iterator Begin() const {
+    if (current_index_ < kSize)
+      return Iterator(*this, kSize - current_index_);
+    return Iterator(*this, 0);
+  }
+
+  // Returns an Iterator pointing to the newest value in the buffer.
+  // Example usage (iterate backwards from newest to oldest value):
+  //  for (RingBuffer<T, kSize>::Iterator it = ring_buffer.End(); it; --it) {}
+  Iterator End() const { return Iterator(*this, kSize - 1); }
+
+ private:
+  inline size_t BufferIndex(size_t n) const {
+    return (current_index_ + n) % kSize;
+  }
+
+  T buffer_[kSize];
+  size_t current_index_;
+
+  DISALLOW_COPY_AND_ASSIGN(RingBuffer);
+};
+
+}  // namespace base
+
+#endif  // BASE_CONTAINERS_RING_BUFFER_H_
diff --git a/base/containers/small_map.h b/base/containers/small_map.h
new file mode 100644
index 0000000..495332f
--- /dev/null
+++ b/base/containers/small_map.h
@@ -0,0 +1,660 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_CONTAINERS_SMALL_MAP_H_
+#define BASE_CONTAINERS_SMALL_MAP_H_
+
+#include <stddef.h>
+
+#include <map>
+#include <new>
+#include <string>
+#include <utility>
+
+#include "base/containers/hash_tables.h"
+#include "base/logging.h"
+
+namespace base {
+
+// small_map is a container with a std::map-like interface. It starts out
+// backed by a unsorted array but switches to some other container type if it
+// grows beyond this fixed size.
+//
+// Please see //base/containers/README.md for an overview of which container
+// to select.
+//
+// PROS
+//
+//  - Good memory locality and low overhead for smaller maps.
+//  - Handles large maps without the degenerate performance of flat_map.
+//
+// CONS
+//
+//  - Larger code size than the alternatives.
+//
+// IMPORTANT NOTES
+//
+//  - Iterators are invalidated across mutations.
+//
+// DETAILS
+//
+// base::small_map will pick up the comparator from the underlying map type. In
+// std::map only a "less" operator is defined, which requires us to do two
+// comparisons per element when doing the brute-force search in the simple
+// array. std::unordered_map has a key_equal function which will be used.
+//
+// We define default overrides for the common map types to avoid this
+// double-compare, but you should be aware of this if you use your own
+// operator< for your map and supply yor own version of == to the small_map.
+// You can use regular operator== by just doing:
+//
+//   base::small_map<std::map<MyKey, MyValue>, 4, std::equal_to<KyKey>>
+//
+//
+// USAGE
+// -----
+//
+// NormalMap:  The map type to fall back to.  This also defines the key
+//             and value types for the small_map.
+// kArraySize:  The size of the initial array of results. This will be
+//              allocated with the small_map object rather than separately on
+//              the heap. Once the map grows beyond this size, the map type
+//              will be used instead.
+// EqualKey:  A functor which tests two keys for equality.  If the wrapped
+//            map type has a "key_equal" member (hash_map does), then that will
+//            be used by default. If the wrapped map type has a strict weak
+//            ordering "key_compare" (std::map does), that will be used to
+//            implement equality by default.
+// MapInit: A functor that takes a NormalMap* and uses it to initialize the map.
+//          This functor will be called at most once per small_map, when the map
+//          exceeds the threshold of kArraySize and we are about to copy values
+//          from the array to the map. The functor *must* initialize the
+//          NormalMap* argument with placement new, since after it runs we
+//          assume that the NormalMap has been initialized.
+//
+// example:
+//   base::small_map<std::map<string, int>> days;
+//   days["sunday"   ] = 0;
+//   days["monday"   ] = 1;
+//   days["tuesday"  ] = 2;
+//   days["wednesday"] = 3;
+//   days["thursday" ] = 4;
+//   days["friday"   ] = 5;
+//   days["saturday" ] = 6;
+
+namespace internal {
+
+template <typename NormalMap>
+class small_map_default_init {
+ public:
+  void operator()(NormalMap* map) const { new (map) NormalMap(); }
+};
+
+// has_key_equal<M>::value is true iff there exists a type M::key_equal. This is
+// used to dispatch to one of the select_equal_key<> metafunctions below.
+template <typename M>
+struct has_key_equal {
+  typedef char sml;  // "small" is sometimes #defined so we use an abbreviation.
+  typedef struct { char dummy[2]; } big;
+  // Two functions, one accepts types that have a key_equal member, and one that
+  // accepts anything. They each return a value of a different size, so we can
+  // determine at compile-time which function would have been called.
+  template <typename U> static big test(typename U::key_equal*);
+  template <typename> static sml test(...);
+  // Determines if M::key_equal exists by looking at the size of the return
+  // type of the compiler-chosen test() function.
+  static const bool value = (sizeof(test<M>(0)) == sizeof(big));
+};
+template <typename M> const bool has_key_equal<M>::value;
+
+// Base template used for map types that do NOT have an M::key_equal member,
+// e.g., std::map<>. These maps have a strict weak ordering comparator rather
+// than an equality functor, so equality will be implemented in terms of that
+// comparator.
+//
+// There's a partial specialization of this template below for map types that do
+// have an M::key_equal member.
+template <typename M, bool has_key_equal_value>
+struct select_equal_key {
+  struct equal_key {
+    bool operator()(const typename M::key_type& left,
+                    const typename M::key_type& right) {
+      // Implements equality in terms of a strict weak ordering comparator.
+      typename M::key_compare comp;
+      return !comp(left, right) && !comp(right, left);
+    }
+  };
+};
+
+// Provide overrides to use operator== for key compare for the "normal" map and
+// hash map types. If you override the default comparator or allocator for a
+// map or hash_map, or use another type of map, this won't get used.
+//
+// If we switch to using std::unordered_map for base::hash_map, then the
+// hash_map specialization can be removed.
+template <typename KeyType, typename ValueType>
+struct select_equal_key<std::map<KeyType, ValueType>, false> {
+  struct equal_key {
+    bool operator()(const KeyType& left, const KeyType& right) {
+      return left == right;
+    }
+  };
+};
+template <typename KeyType, typename ValueType>
+struct select_equal_key<base::hash_map<KeyType, ValueType>, false> {
+  struct equal_key {
+    bool operator()(const KeyType& left, const KeyType& right) {
+      return left == right;
+    }
+  };
+};
+
+// Partial template specialization handles case where M::key_equal exists, e.g.,
+// hash_map<>.
+template <typename M>
+struct select_equal_key<M, true> {
+  typedef typename M::key_equal equal_key;
+};
+
+}  // namespace internal
+
+template <typename NormalMap,
+          int kArraySize = 4,
+          typename EqualKey = typename internal::select_equal_key<
+              NormalMap,
+              internal::has_key_equal<NormalMap>::value>::equal_key,
+          typename MapInit = internal::small_map_default_init<NormalMap>>
+class small_map {
+  // We cannot rely on the compiler to reject array of size 0.  In
+  // particular, gcc 2.95.3 does it but later versions allow 0-length
+  // arrays.  Therefore, we explicitly reject non-positive kArraySize
+  // here.
+  static_assert(kArraySize > 0, "default initial size should be positive");
+
+ public:
+  typedef typename NormalMap::key_type key_type;
+  typedef typename NormalMap::mapped_type data_type;
+  typedef typename NormalMap::mapped_type mapped_type;
+  typedef typename NormalMap::value_type value_type;
+  typedef EqualKey key_equal;
+
+  small_map() : size_(0), functor_(MapInit()) {}
+
+  explicit small_map(const MapInit& functor) : size_(0), functor_(functor) {}
+
+  // Allow copy-constructor and assignment, since STL allows them too.
+  small_map(const small_map& src) {
+    // size_ and functor_ are initted in InitFrom()
+    InitFrom(src);
+  }
+  void operator=(const small_map& src) {
+    if (&src == this) return;
+
+    // This is not optimal. If src and dest are both using the small
+    // array, we could skip the teardown and reconstruct. One problem
+    // to be resolved is that the value_type itself is pair<const K,
+    // V>, and const K is not assignable.
+    Destroy();
+    InitFrom(src);
+  }
+  ~small_map() { Destroy(); }
+
+  class const_iterator;
+
+  class iterator {
+   public:
+    typedef typename NormalMap::iterator::iterator_category iterator_category;
+    typedef typename NormalMap::iterator::value_type value_type;
+    typedef typename NormalMap::iterator::difference_type difference_type;
+    typedef typename NormalMap::iterator::pointer pointer;
+    typedef typename NormalMap::iterator::reference reference;
+
+    inline iterator(): array_iter_(NULL) {}
+
+    inline iterator& operator++() {
+      if (array_iter_ != NULL) {
+        ++array_iter_;
+      } else {
+        ++hash_iter_;
+      }
+      return *this;
+    }
+    inline iterator operator++(int /*unused*/) {
+      iterator result(*this);
+      ++(*this);
+      return result;
+    }
+    inline iterator& operator--() {
+      if (array_iter_ != NULL) {
+        --array_iter_;
+      } else {
+        --hash_iter_;
+      }
+      return *this;
+    }
+    inline iterator operator--(int /*unused*/) {
+      iterator result(*this);
+      --(*this);
+      return result;
+    }
+    inline value_type* operator->() const {
+      if (array_iter_ != NULL) {
+        return array_iter_;
+      } else {
+        return hash_iter_.operator->();
+      }
+    }
+
+    inline value_type& operator*() const {
+      if (array_iter_ != NULL) {
+        return *array_iter_;
+      } else {
+        return *hash_iter_;
+      }
+    }
+
+    inline bool operator==(const iterator& other) const {
+      if (array_iter_ != NULL) {
+        return array_iter_ == other.array_iter_;
+      } else {
+        return other.array_iter_ == NULL && hash_iter_ == other.hash_iter_;
+      }
+    }
+
+    inline bool operator!=(const iterator& other) const {
+      return !(*this == other);
+    }
+
+    bool operator==(const const_iterator& other) const;
+    bool operator!=(const const_iterator& other) const;
+
+   private:
+    friend class small_map;
+    friend class const_iterator;
+    inline explicit iterator(value_type* init) : array_iter_(init) {}
+    inline explicit iterator(const typename NormalMap::iterator& init)
+      : array_iter_(NULL), hash_iter_(init) {}
+
+    value_type* array_iter_;
+    typename NormalMap::iterator hash_iter_;
+  };
+
+  class const_iterator {
+   public:
+    typedef typename NormalMap::const_iterator::iterator_category
+        iterator_category;
+    typedef typename NormalMap::const_iterator::value_type value_type;
+    typedef typename NormalMap::const_iterator::difference_type difference_type;
+    typedef typename NormalMap::const_iterator::pointer pointer;
+    typedef typename NormalMap::const_iterator::reference reference;
+
+    inline const_iterator(): array_iter_(NULL) {}
+    // Non-explicit ctor lets us convert regular iterators to const iterators
+    inline const_iterator(const iterator& other)
+      : array_iter_(other.array_iter_), hash_iter_(other.hash_iter_) {}
+
+    inline const_iterator& operator++() {
+      if (array_iter_ != NULL) {
+        ++array_iter_;
+      } else {
+        ++hash_iter_;
+      }
+      return *this;
+    }
+    inline const_iterator operator++(int /*unused*/) {
+      const_iterator result(*this);
+      ++(*this);
+      return result;
+    }
+
+    inline const_iterator& operator--() {
+      if (array_iter_ != NULL) {
+        --array_iter_;
+      } else {
+        --hash_iter_;
+      }
+      return *this;
+    }
+    inline const_iterator operator--(int /*unused*/) {
+      const_iterator result(*this);
+      --(*this);
+      return result;
+    }
+
+    inline const value_type* operator->() const {
+      if (array_iter_ != NULL) {
+        return array_iter_;
+      } else {
+        return hash_iter_.operator->();
+      }
+    }
+
+    inline const value_type& operator*() const {
+      if (array_iter_ != NULL) {
+        return *array_iter_;
+      } else {
+        return *hash_iter_;
+      }
+    }
+
+    inline bool operator==(const const_iterator& other) const {
+      if (array_iter_ != NULL) {
+        return array_iter_ == other.array_iter_;
+      } else {
+        return other.array_iter_ == NULL && hash_iter_ == other.hash_iter_;
+      }
+    }
+
+    inline bool operator!=(const const_iterator& other) const {
+      return !(*this == other);
+    }
+
+   private:
+    friend class small_map;
+    inline explicit const_iterator(const value_type* init)
+        : array_iter_(init) {}
+    inline explicit const_iterator(
+        const typename NormalMap::const_iterator& init)
+      : array_iter_(NULL), hash_iter_(init) {}
+
+    const value_type* array_iter_;
+    typename NormalMap::const_iterator hash_iter_;
+  };
+
+  iterator find(const key_type& key) {
+    key_equal compare;
+    if (size_ >= 0) {
+      for (int i = 0; i < size_; i++) {
+        if (compare(array_[i].first, key)) {
+          return iterator(array_ + i);
+        }
+      }
+      return iterator(array_ + size_);
+    } else {
+      return iterator(map()->find(key));
+    }
+  }
+
+  const_iterator find(const key_type& key) const {
+    key_equal compare;
+    if (size_ >= 0) {
+      for (int i = 0; i < size_; i++) {
+        if (compare(array_[i].first, key)) {
+          return const_iterator(array_ + i);
+        }
+      }
+      return const_iterator(array_ + size_);
+    } else {
+      return const_iterator(map()->find(key));
+    }
+  }
+
+  // Invalidates iterators.
+  data_type& operator[](const key_type& key) {
+    key_equal compare;
+
+    if (size_ >= 0) {
+      // operator[] searches backwards, favoring recently-added
+      // elements.
+      for (int i = size_-1; i >= 0; --i) {
+        if (compare(array_[i].first, key)) {
+          return array_[i].second;
+        }
+      }
+      if (size_ == kArraySize) {
+        ConvertToRealMap();
+        return map_[key];
+      } else {
+        new (&array_[size_]) value_type(key, data_type());
+        return array_[size_++].second;
+      }
+    } else {
+      return map_[key];
+    }
+  }
+
+  // Invalidates iterators.
+  std::pair<iterator, bool> insert(const value_type& x) {
+    key_equal compare;
+
+    if (size_ >= 0) {
+      for (int i = 0; i < size_; i++) {
+        if (compare(array_[i].first, x.first)) {
+          return std::make_pair(iterator(array_ + i), false);
+        }
+      }
+      if (size_ == kArraySize) {
+        ConvertToRealMap();  // Invalidates all iterators!
+        std::pair<typename NormalMap::iterator, bool> ret = map_.insert(x);
+        return std::make_pair(iterator(ret.first), ret.second);
+      } else {
+        new (&array_[size_]) value_type(x);
+        return std::make_pair(iterator(array_ + size_++), true);
+      }
+    } else {
+      std::pair<typename NormalMap::iterator, bool> ret = map_.insert(x);
+      return std::make_pair(iterator(ret.first), ret.second);
+    }
+  }
+
+  // Invalidates iterators.
+  template <class InputIterator>
+  void insert(InputIterator f, InputIterator l) {
+    while (f != l) {
+      insert(*f);
+      ++f;
+    }
+  }
+
+  // Invalidates iterators.
+  template <typename... Args>
+  std::pair<iterator, bool> emplace(Args&&... args) {
+    key_equal compare;
+
+    if (size_ >= 0) {
+      value_type x(std::forward<Args>(args)...);
+      for (int i = 0; i < size_; i++) {
+        if (compare(array_[i].first, x.first)) {
+          return std::make_pair(iterator(array_ + i), false);
+        }
+      }
+      if (size_ == kArraySize) {
+        ConvertToRealMap();  // Invalidates all iterators!
+        std::pair<typename NormalMap::iterator, bool> ret =
+            map_.emplace(std::move(x));
+        return std::make_pair(iterator(ret.first), ret.second);
+      } else {
+        new (&array_[size_]) value_type(std::move(x));
+        return std::make_pair(iterator(array_ + size_++), true);
+      }
+    } else {
+      std::pair<typename NormalMap::iterator, bool> ret =
+          map_.emplace(std::forward<Args>(args)...);
+      return std::make_pair(iterator(ret.first), ret.second);
+    }
+  }
+
+  iterator begin() {
+    if (size_ >= 0) {
+      return iterator(array_);
+    } else {
+      return iterator(map_.begin());
+    }
+  }
+  const_iterator begin() const {
+    if (size_ >= 0) {
+      return const_iterator(array_);
+    } else {
+      return const_iterator(map_.begin());
+    }
+  }
+
+  iterator end() {
+    if (size_ >= 0) {
+      return iterator(array_ + size_);
+    } else {
+      return iterator(map_.end());
+    }
+  }
+  const_iterator end() const {
+    if (size_ >= 0) {
+      return const_iterator(array_ + size_);
+    } else {
+      return const_iterator(map_.end());
+    }
+  }
+
+  void clear() {
+    if (size_ >= 0) {
+      for (int i = 0; i < size_; i++) {
+        array_[i].~value_type();
+      }
+    } else {
+      map_.~NormalMap();
+    }
+    size_ = 0;
+  }
+
+  // Invalidates iterators. Returns iterator following the last removed element.
+  iterator erase(const iterator& position) {
+    if (size_ >= 0) {
+      int i = position.array_iter_ - array_;
+      array_[i].~value_type();
+      --size_;
+      if (i != size_) {
+        new (&array_[i]) value_type(std::move(array_[size_]));
+        array_[size_].~value_type();
+        return iterator(array_ + i);
+      }
+      return end();
+    }
+    return iterator(map_.erase(position.hash_iter_));
+  }
+
+  size_t erase(const key_type& key) {
+    iterator iter = find(key);
+    if (iter == end()) return 0u;
+    erase(iter);
+    return 1u;
+  }
+
+  size_t count(const key_type& key) const {
+    return (find(key) == end()) ? 0 : 1;
+  }
+
+  size_t size() const {
+    if (size_ >= 0) {
+      return static_cast<size_t>(size_);
+    } else {
+      return map_.size();
+    }
+  }
+
+  bool empty() const {
+    if (size_ >= 0) {
+      return (size_ == 0);
+    } else {
+      return map_.empty();
+    }
+  }
+
+  // Returns true if we have fallen back to using the underlying map
+  // representation.
+  bool UsingFullMap() const {
+    return size_ < 0;
+  }
+
+  inline NormalMap* map() {
+    CHECK(UsingFullMap());
+    return &map_;
+  }
+  inline const NormalMap* map() const {
+    CHECK(UsingFullMap());
+    return &map_;
+  }
+
+ private:
+  int size_;  // negative = using hash_map
+
+  MapInit functor_;
+
+  // We want to call constructors and destructors manually, but we don't want to
+  // allocate and deallocate the memory used for them separately. Since array_
+  // and map_ are mutually exclusive, we'll put them in a union.
+  union {
+    value_type array_[kArraySize];
+    NormalMap map_;
+  };
+
+  void ConvertToRealMap() {
+    // Storage for the elements in the temporary array. This is intentionally
+    // declared as a union to avoid having to default-construct |kArraySize|
+    // elements, only to move construct over them in the initial loop.
+    union Storage {
+      Storage() {}
+      ~Storage() {}
+      value_type array[kArraySize];
+    } temp;
+
+    // Move the current elements into a temporary array.
+    for (int i = 0; i < kArraySize; i++) {
+      new (&temp.array[i]) value_type(std::move(array_[i]));
+      array_[i].~value_type();
+    }
+
+    // Initialize the map.
+    size_ = -1;
+    functor_(&map_);
+
+    // Insert elements into it.
+    for (int i = 0; i < kArraySize; i++) {
+      map_.insert(std::move(temp.array[i]));
+      temp.array[i].~value_type();
+    }
+  }
+
+  // Helpers for constructors and destructors.
+  void InitFrom(const small_map& src) {
+    functor_ = src.functor_;
+    size_ = src.size_;
+    if (src.size_ >= 0) {
+      for (int i = 0; i < size_; i++) {
+        new (&array_[i]) value_type(src.array_[i]);
+      }
+    } else {
+      functor_(&map_);
+      map_ = src.map_;
+    }
+  }
+  void Destroy() {
+    if (size_ >= 0) {
+      for (int i = 0; i < size_; i++) {
+        array_[i].~value_type();
+      }
+    } else {
+      map_.~NormalMap();
+    }
+  }
+};
+
+template <typename NormalMap,
+          int kArraySize,
+          typename EqualKey,
+          typename Functor>
+inline bool small_map<NormalMap, kArraySize, EqualKey, Functor>::iterator::
+operator==(const const_iterator& other) const {
+  return other == *this;
+}
+template <typename NormalMap,
+          int kArraySize,
+          typename EqualKey,
+          typename Functor>
+inline bool small_map<NormalMap, kArraySize, EqualKey, Functor>::iterator::
+operator!=(const const_iterator& other) const {
+  return other != *this;
+}
+
+}  // namespace base
+
+#endif  // BASE_CONTAINERS_SMALL_MAP_H_
diff --git a/base/containers/small_map_unittest.cc b/base/containers/small_map_unittest.cc
new file mode 100644
index 0000000..6561851
--- /dev/null
+++ b/base/containers/small_map_unittest.cc
@@ -0,0 +1,603 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/containers/small_map.h"
+
+#include <stddef.h>
+
+#include <algorithm>
+#include <functional>
+#include <map>
+#include <unordered_map>
+
+#include "base/logging.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+
+TEST(SmallMap, General) {
+  small_map<std::unordered_map<int, int>> m;
+
+  EXPECT_TRUE(m.empty());
+
+  m[0] = 5;
+
+  EXPECT_FALSE(m.empty());
+  EXPECT_EQ(m.size(), 1u);
+
+  m[9] = 2;
+
+  EXPECT_FALSE(m.empty());
+  EXPECT_EQ(m.size(), 2u);
+
+  EXPECT_EQ(m[9], 2);
+  EXPECT_EQ(m[0], 5);
+  EXPECT_FALSE(m.UsingFullMap());
+
+  small_map<std::unordered_map<int, int>>::iterator iter(m.begin());
+  ASSERT_TRUE(iter != m.end());
+  EXPECT_EQ(iter->first, 0);
+  EXPECT_EQ(iter->second, 5);
+  ++iter;
+  ASSERT_TRUE(iter != m.end());
+  EXPECT_EQ((*iter).first, 9);
+  EXPECT_EQ((*iter).second, 2);
+  ++iter;
+  EXPECT_TRUE(iter == m.end());
+
+  m[8] = 23;
+  m[1234] = 90;
+  m[-5] = 6;
+
+  EXPECT_EQ(m[   9],  2);
+  EXPECT_EQ(m[   0],  5);
+  EXPECT_EQ(m[1234], 90);
+  EXPECT_EQ(m[   8], 23);
+  EXPECT_EQ(m[  -5],  6);
+  EXPECT_EQ(m.size(), 5u);
+  EXPECT_FALSE(m.empty());
+  EXPECT_TRUE(m.UsingFullMap());
+
+  iter = m.begin();
+  for (int i = 0; i < 5; i++) {
+    EXPECT_TRUE(iter != m.end());
+    ++iter;
+  }
+  EXPECT_TRUE(iter == m.end());
+
+  const small_map<std::unordered_map<int, int>>& ref = m;
+  EXPECT_TRUE(ref.find(1234) != m.end());
+  EXPECT_TRUE(ref.find(5678) == m.end());
+}
+
+TEST(SmallMap, PostFixIteratorIncrement) {
+  small_map<std::unordered_map<int, int>> m;
+  m[0] = 5;
+  m[2] = 3;
+
+  {
+    small_map<std::unordered_map<int, int>>::iterator iter(m.begin());
+    small_map<std::unordered_map<int, int>>::iterator last(iter++);
+    ++last;
+    EXPECT_TRUE(last == iter);
+  }
+
+  {
+    small_map<std::unordered_map<int, int>>::const_iterator iter(m.begin());
+    small_map<std::unordered_map<int, int>>::const_iterator last(iter++);
+    ++last;
+    EXPECT_TRUE(last == iter);
+  }
+}
+
+// Based on the General testcase.
+TEST(SmallMap, CopyConstructor) {
+  small_map<std::unordered_map<int, int>> src;
+
+  {
+    small_map<std::unordered_map<int, int>> m(src);
+    EXPECT_TRUE(m.empty());
+  }
+
+  src[0] = 5;
+
+  {
+    small_map<std::unordered_map<int, int>> m(src);
+    EXPECT_FALSE(m.empty());
+    EXPECT_EQ(m.size(), 1u);
+  }
+
+  src[9] = 2;
+
+  {
+    small_map<std::unordered_map<int, int>> m(src);
+    EXPECT_FALSE(m.empty());
+    EXPECT_EQ(m.size(), 2u);
+
+    EXPECT_EQ(m[9], 2);
+    EXPECT_EQ(m[0], 5);
+    EXPECT_FALSE(m.UsingFullMap());
+  }
+
+  src[8] = 23;
+  src[1234] = 90;
+  src[-5] = 6;
+
+  {
+    small_map<std::unordered_map<int, int>> m(src);
+    EXPECT_EQ(m[   9],  2);
+    EXPECT_EQ(m[   0],  5);
+    EXPECT_EQ(m[1234], 90);
+    EXPECT_EQ(m[   8], 23);
+    EXPECT_EQ(m[  -5],  6);
+    EXPECT_EQ(m.size(), 5u);
+    EXPECT_FALSE(m.empty());
+    EXPECT_TRUE(m.UsingFullMap());
+  }
+}
+
+template <class inner>
+static bool SmallMapIsSubset(small_map<inner> const& a,
+                             small_map<inner> const& b) {
+  typename small_map<inner>::const_iterator it;
+  for (it = a.begin(); it != a.end(); ++it) {
+    typename small_map<inner>::const_iterator it_in_b = b.find(it->first);
+    if (it_in_b == b.end() || it_in_b->second != it->second)
+      return false;
+  }
+  return true;
+}
+
+template <class inner>
+static bool SmallMapEqual(small_map<inner> const& a,
+                          small_map<inner> const& b) {
+  return SmallMapIsSubset(a, b) && SmallMapIsSubset(b, a);
+}
+
+TEST(SmallMap, AssignmentOperator) {
+  small_map<std::unordered_map<int, int>> src_small;
+  small_map<std::unordered_map<int, int>> src_large;
+
+  src_small[1] = 20;
+  src_small[2] = 21;
+  src_small[3] = 22;
+  EXPECT_FALSE(src_small.UsingFullMap());
+
+  src_large[1] = 20;
+  src_large[2] = 21;
+  src_large[3] = 22;
+  src_large[5] = 23;
+  src_large[6] = 24;
+  src_large[7] = 25;
+  EXPECT_TRUE(src_large.UsingFullMap());
+
+  // Assignments to empty.
+  small_map<std::unordered_map<int, int>> dest_small;
+  dest_small = src_small;
+  EXPECT_TRUE(SmallMapEqual(dest_small, src_small));
+  EXPECT_EQ(dest_small.UsingFullMap(),
+            src_small.UsingFullMap());
+
+  small_map<std::unordered_map<int, int>> dest_large;
+  dest_large = src_large;
+  EXPECT_TRUE(SmallMapEqual(dest_large, src_large));
+  EXPECT_EQ(dest_large.UsingFullMap(),
+            src_large.UsingFullMap());
+
+  // Assignments which assign from full to small, and vice versa.
+  dest_small = src_large;
+  EXPECT_TRUE(SmallMapEqual(dest_small, src_large));
+  EXPECT_EQ(dest_small.UsingFullMap(),
+            src_large.UsingFullMap());
+
+  dest_large = src_small;
+  EXPECT_TRUE(SmallMapEqual(dest_large, src_small));
+  EXPECT_EQ(dest_large.UsingFullMap(),
+            src_small.UsingFullMap());
+
+  // Double check that SmallMapEqual works:
+  dest_large[42] = 666;
+  EXPECT_FALSE(SmallMapEqual(dest_large, src_small));
+}
+
+TEST(SmallMap, Insert) {
+  small_map<std::unordered_map<int, int>> sm;
+
+  // loop through the transition from small map to map.
+  for (int i = 1; i <= 10; ++i) {
+    VLOG(1) << "Iteration " << i;
+    // insert an element
+    std::pair<small_map<std::unordered_map<int, int>>::iterator, bool> ret;
+    ret = sm.insert(std::make_pair(i, 100*i));
+    EXPECT_TRUE(ret.second);
+    EXPECT_TRUE(ret.first == sm.find(i));
+    EXPECT_EQ(ret.first->first, i);
+    EXPECT_EQ(ret.first->second, 100*i);
+
+    // try to insert it again with different value, fails, but we still get an
+    // iterator back with the original value.
+    ret = sm.insert(std::make_pair(i, -i));
+    EXPECT_FALSE(ret.second);
+    EXPECT_TRUE(ret.first == sm.find(i));
+    EXPECT_EQ(ret.first->first, i);
+    EXPECT_EQ(ret.first->second, 100*i);
+
+    // check the state of the map.
+    for (int j = 1; j <= i; ++j) {
+      small_map<std::unordered_map<int, int>>::iterator it = sm.find(j);
+      EXPECT_TRUE(it != sm.end());
+      EXPECT_EQ(it->first, j);
+      EXPECT_EQ(it->second, j * 100);
+    }
+    EXPECT_EQ(sm.size(), static_cast<size_t>(i));
+    EXPECT_FALSE(sm.empty());
+  }
+}
+
+TEST(SmallMap, InsertRange) {
+  // loop through the transition from small map to map.
+  for (int elements = 0; elements <= 10; ++elements) {
+    VLOG(1) << "Elements " << elements;
+    std::unordered_map<int, int> normal_map;
+    for (int i = 1; i <= elements; ++i) {
+      normal_map.insert(std::make_pair(i, 100*i));
+    }
+
+    small_map<std::unordered_map<int, int>> sm;
+    sm.insert(normal_map.begin(), normal_map.end());
+    EXPECT_EQ(normal_map.size(), sm.size());
+    for (int i = 1; i <= elements; ++i) {
+      VLOG(1) << "Iteration " << i;
+      EXPECT_TRUE(sm.find(i) != sm.end());
+      EXPECT_EQ(sm.find(i)->first, i);
+      EXPECT_EQ(sm.find(i)->second, 100*i);
+    }
+  }
+}
+
+TEST(SmallMap, Erase) {
+  small_map<std::unordered_map<std::string, int>> m;
+  small_map<std::unordered_map<std::string, int>>::iterator iter;
+
+  m["monday"] = 1;
+  m["tuesday"] = 2;
+  m["wednesday"] = 3;
+
+  EXPECT_EQ(m["monday"   ], 1);
+  EXPECT_EQ(m["tuesday"  ], 2);
+  EXPECT_EQ(m["wednesday"], 3);
+  EXPECT_EQ(m.count("tuesday"), 1u);
+  EXPECT_FALSE(m.UsingFullMap());
+
+  iter = m.begin();
+  ASSERT_TRUE(iter != m.end());
+  EXPECT_EQ(iter->first, "monday");
+  EXPECT_EQ(iter->second, 1);
+  ++iter;
+  ASSERT_TRUE(iter != m.end());
+  EXPECT_EQ(iter->first, "tuesday");
+  EXPECT_EQ(iter->second, 2);
+  ++iter;
+  ASSERT_TRUE(iter != m.end());
+  EXPECT_EQ(iter->first, "wednesday");
+  EXPECT_EQ(iter->second, 3);
+  ++iter;
+  EXPECT_TRUE(iter == m.end());
+
+  EXPECT_EQ(m.erase("tuesday"), 1u);
+
+  EXPECT_EQ(m["monday"   ], 1);
+  EXPECT_EQ(m["wednesday"], 3);
+  EXPECT_EQ(m.count("tuesday"), 0u);
+  EXPECT_EQ(m.erase("tuesday"), 0u);
+
+  iter = m.begin();
+  ASSERT_TRUE(iter != m.end());
+  EXPECT_EQ(iter->first, "monday");
+  EXPECT_EQ(iter->second, 1);
+  ++iter;
+  ASSERT_TRUE(iter != m.end());
+  EXPECT_EQ(iter->first, "wednesday");
+  EXPECT_EQ(iter->second, 3);
+  ++iter;
+  EXPECT_TRUE(iter == m.end());
+
+  m["thursday"] = 4;
+  m["friday"] = 5;
+  EXPECT_EQ(m.size(), 4u);
+  EXPECT_FALSE(m.empty());
+  EXPECT_FALSE(m.UsingFullMap());
+
+  m["saturday"] = 6;
+  EXPECT_TRUE(m.UsingFullMap());
+
+  EXPECT_EQ(m.count("friday"), 1u);
+  EXPECT_EQ(m.erase("friday"), 1u);
+  EXPECT_TRUE(m.UsingFullMap());
+  EXPECT_EQ(m.count("friday"), 0u);
+  EXPECT_EQ(m.erase("friday"), 0u);
+
+  EXPECT_EQ(m.size(), 4u);
+  EXPECT_FALSE(m.empty());
+  EXPECT_EQ(m.erase("monday"), 1u);
+  EXPECT_EQ(m.size(), 3u);
+  EXPECT_FALSE(m.empty());
+
+  m.clear();
+  EXPECT_FALSE(m.UsingFullMap());
+  EXPECT_EQ(m.size(), 0u);
+  EXPECT_TRUE(m.empty());
+}
+
+TEST(SmallMap, EraseReturnsIteratorFollowingRemovedElement) {
+  small_map<std::unordered_map<std::string, int>> m;
+  small_map<std::unordered_map<std::string, int>>::iterator iter;
+
+  m["a"] = 0;
+  m["b"] = 1;
+  m["c"] = 2;
+
+  // Erase first item.
+  auto following_iter = m.erase(m.begin());
+  EXPECT_EQ(m.begin(), following_iter);
+  EXPECT_EQ(2u, m.size());
+  EXPECT_EQ(m.count("a"), 0u);
+  EXPECT_EQ(m.count("b"), 1u);
+  EXPECT_EQ(m.count("c"), 1u);
+
+  // Iterate to last item and erase it.
+  ++following_iter;
+  following_iter = m.erase(following_iter);
+  ASSERT_EQ(1u, m.size());
+  EXPECT_EQ(m.end(), following_iter);
+  EXPECT_EQ(m.count("b"), 0u);
+  EXPECT_EQ(m.count("c"), 1u);
+
+  // Erase remaining item.
+  following_iter = m.erase(m.begin());
+  EXPECT_TRUE(m.empty());
+  EXPECT_EQ(m.end(), following_iter);
+}
+
+TEST(SmallMap, NonHashMap) {
+  small_map<std::map<int, int>, 4, std::equal_to<int>> m;
+  EXPECT_TRUE(m.empty());
+
+  m[9] = 2;
+  m[0] = 5;
+
+  EXPECT_EQ(m[9], 2);
+  EXPECT_EQ(m[0], 5);
+  EXPECT_EQ(m.size(), 2u);
+  EXPECT_FALSE(m.empty());
+  EXPECT_FALSE(m.UsingFullMap());
+
+  small_map<std::map<int, int>, 4, std::equal_to<int>>::iterator iter(
+      m.begin());
+  ASSERT_TRUE(iter != m.end());
+  EXPECT_EQ(iter->first, 9);
+  EXPECT_EQ(iter->second, 2);
+  ++iter;
+  ASSERT_TRUE(iter != m.end());
+  EXPECT_EQ(iter->first, 0);
+  EXPECT_EQ(iter->second, 5);
+  ++iter;
+  EXPECT_TRUE(iter == m.end());
+  --iter;
+  ASSERT_TRUE(iter != m.end());
+  EXPECT_EQ(iter->first, 0);
+  EXPECT_EQ(iter->second, 5);
+
+  m[8] = 23;
+  m[1234] = 90;
+  m[-5] = 6;
+
+  EXPECT_EQ(m[   9],  2);
+  EXPECT_EQ(m[   0],  5);
+  EXPECT_EQ(m[1234], 90);
+  EXPECT_EQ(m[   8], 23);
+  EXPECT_EQ(m[  -5],  6);
+  EXPECT_EQ(m.size(), 5u);
+  EXPECT_FALSE(m.empty());
+  EXPECT_TRUE(m.UsingFullMap());
+
+  iter = m.begin();
+  ASSERT_TRUE(iter != m.end());
+  EXPECT_EQ(iter->first, -5);
+  EXPECT_EQ(iter->second, 6);
+  ++iter;
+  ASSERT_TRUE(iter != m.end());
+  EXPECT_EQ(iter->first, 0);
+  EXPECT_EQ(iter->second, 5);
+  ++iter;
+  ASSERT_TRUE(iter != m.end());
+  EXPECT_EQ(iter->first, 8);
+  EXPECT_EQ(iter->second, 23);
+  ++iter;
+  ASSERT_TRUE(iter != m.end());
+  EXPECT_EQ(iter->first, 9);
+  EXPECT_EQ(iter->second, 2);
+  ++iter;
+  ASSERT_TRUE(iter != m.end());
+  EXPECT_EQ(iter->first, 1234);
+  EXPECT_EQ(iter->second, 90);
+  ++iter;
+  EXPECT_TRUE(iter == m.end());
+  --iter;
+  ASSERT_TRUE(iter != m.end());
+  EXPECT_EQ(iter->first, 1234);
+  EXPECT_EQ(iter->second, 90);
+}
+
+TEST(SmallMap, DefaultEqualKeyWorks) {
+  // If these tests compile, they pass. The EXPECT calls are only there to avoid
+  // unused variable warnings.
+  small_map<std::unordered_map<int, int>> hm;
+  EXPECT_EQ(0u, hm.size());
+  small_map<std::map<int, int>> m;
+  EXPECT_EQ(0u, m.size());
+}
+
+namespace {
+
+class unordered_map_add_item : public std::unordered_map<int, int> {
+ public:
+  unordered_map_add_item() = default;
+  explicit unordered_map_add_item(const std::pair<int, int>& item) {
+    insert(item);
+  }
+};
+
+void InitMap(unordered_map_add_item* map_ctor) {
+  new (map_ctor) unordered_map_add_item(std::make_pair(0, 0));
+}
+
+class unordered_map_add_item_initializer {
+ public:
+  explicit unordered_map_add_item_initializer(int item_to_add)
+      : item_(item_to_add) {}
+  unordered_map_add_item_initializer() : item_(0) {}
+  void operator()(unordered_map_add_item* map_ctor) const {
+    new (map_ctor) unordered_map_add_item(std::make_pair(item_, item_));
+  }
+
+  int item_;
+};
+
+}  // anonymous namespace
+
+TEST(SmallMap, SubclassInitializationWithFunctionPointer) {
+  small_map<unordered_map_add_item, 4, std::equal_to<int>,
+            void (&)(unordered_map_add_item*)>
+      m(InitMap);
+
+  EXPECT_TRUE(m.empty());
+
+  m[1] = 1;
+  m[2] = 2;
+  m[3] = 3;
+  m[4] = 4;
+
+  EXPECT_EQ(4u, m.size());
+  EXPECT_EQ(0u, m.count(0));
+
+  m[5] = 5;
+  EXPECT_EQ(6u, m.size());
+  // Our function adds an extra item when we convert to a map.
+  EXPECT_EQ(1u, m.count(0));
+}
+
+TEST(SmallMap, SubclassInitializationWithFunctionObject) {
+  small_map<unordered_map_add_item, 4, std::equal_to<int>,
+            unordered_map_add_item_initializer>
+      m(unordered_map_add_item_initializer(-1));
+
+  EXPECT_TRUE(m.empty());
+
+  m[1] = 1;
+  m[2] = 2;
+  m[3] = 3;
+  m[4] = 4;
+
+  EXPECT_EQ(4u, m.size());
+  EXPECT_EQ(0u, m.count(-1));
+
+  m[5] = 5;
+  EXPECT_EQ(6u, m.size());
+  // Our functor adds an extra item when we convert to a map.
+  EXPECT_EQ(1u, m.count(-1));
+}
+
+namespace {
+
+// This class acts as a basic implementation of a move-only type. The canonical
+// example of such a type is scoped_ptr/unique_ptr.
+template <typename V>
+class MoveOnlyType {
+ public:
+  MoveOnlyType() : value_(0) {}
+  explicit MoveOnlyType(V value) : value_(value) {}
+
+  MoveOnlyType(MoveOnlyType&& other) {
+    *this = std::move(other);
+  }
+
+  MoveOnlyType& operator=(MoveOnlyType&& other) {
+    value_ = other.value_;
+    other.value_ = 0;
+    return *this;
+  }
+
+  MoveOnlyType(const MoveOnlyType&) = delete;
+  MoveOnlyType& operator=(const MoveOnlyType&) = delete;
+
+  V value() const { return value_; }
+
+ private:
+  V value_;
+};
+
+}  // namespace
+
+TEST(SmallMap, MoveOnlyValueType) {
+  small_map<std::map<int, MoveOnlyType<int>>, 2> m;
+
+  m[0] = MoveOnlyType<int>(1);
+  m[1] = MoveOnlyType<int>(2);
+  m.erase(m.begin());
+
+  // small_map will move m[1] to an earlier index in the internal array.
+  EXPECT_EQ(m.size(), 1u);
+  EXPECT_EQ(m[1].value(), 2);
+
+  m[0] = MoveOnlyType<int>(1);
+  // small_map must move the values from the array into the internal std::map.
+  m[2] = MoveOnlyType<int>(3);
+
+  EXPECT_EQ(m.size(), 3u);
+  EXPECT_EQ(m[0].value(), 1);
+  EXPECT_EQ(m[1].value(), 2);
+  EXPECT_EQ(m[2].value(), 3);
+
+  m.erase(m.begin());
+
+  // small_map should also let internal std::map erase with a move-only type.
+  EXPECT_EQ(m.size(), 2u);
+  EXPECT_EQ(m[1].value(), 2);
+  EXPECT_EQ(m[2].value(), 3);
+}
+
+TEST(SmallMap, Emplace) {
+  small_map<std::map<size_t, MoveOnlyType<size_t>>> sm;
+
+  // loop through the transition from small map to map.
+  for (size_t i = 1; i <= 10; ++i) {
+    // insert an element
+    auto ret = sm.emplace(i, MoveOnlyType<size_t>(100 * i));
+    EXPECT_TRUE(ret.second);
+    EXPECT_TRUE(ret.first == sm.find(i));
+    EXPECT_EQ(ret.first->first, i);
+    EXPECT_EQ(ret.first->second.value(), 100 * i);
+
+    // try to insert it again with different value, fails, but we still get an
+    // iterator back with the original value.
+    ret = sm.emplace(i, MoveOnlyType<size_t>(i));
+    EXPECT_FALSE(ret.second);
+    EXPECT_TRUE(ret.first == sm.find(i));
+    EXPECT_EQ(ret.first->first, i);
+    EXPECT_EQ(ret.first->second.value(), 100 * i);
+
+    // check the state of the map.
+    for (size_t j = 1; j <= i; ++j) {
+      const auto it = sm.find(j);
+      EXPECT_TRUE(it != sm.end());
+      EXPECT_EQ(it->first, j);
+      EXPECT_EQ(it->second.value(), j * 100);
+    }
+    EXPECT_EQ(sm.size(), i);
+    EXPECT_FALSE(sm.empty());
+  }
+}
+
+}  // namespace base
diff --git a/base/containers/span.h b/base/containers/span.h
new file mode 100644
index 0000000..f1e0000
--- /dev/null
+++ b/base/containers/span.h
@@ -0,0 +1,453 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_CONTAINERS_SPAN_H_
+#define BASE_CONTAINERS_SPAN_H_
+
+#include <stddef.h>
+
+#include <algorithm>
+#include <array>
+#include <iterator>
+#include <type_traits>
+#include <utility>
+
+#include "base/logging.h"
+#include "base/stl_util.h"
+
+namespace base {
+
+// [views.constants]
+constexpr size_t dynamic_extent = static_cast<size_t>(-1);
+
+template <typename T, size_t Extent = dynamic_extent>
+class span;
+
+namespace internal {
+
+template <typename T>
+struct IsSpanImpl : std::false_type {};
+
+template <typename T, size_t Extent>
+struct IsSpanImpl<span<T, Extent>> : std::true_type {};
+
+template <typename T>
+using IsSpan = IsSpanImpl<std::decay_t<T>>;
+
+template <typename T>
+struct IsStdArrayImpl : std::false_type {};
+
+template <typename T, size_t N>
+struct IsStdArrayImpl<std::array<T, N>> : std::true_type {};
+
+template <typename T>
+using IsStdArray = IsStdArrayImpl<std::decay_t<T>>;
+
+template <typename T>
+using IsCArray = std::is_array<std::remove_reference_t<T>>;
+
+template <typename From, typename To>
+using IsLegalDataConversion = std::is_convertible<From (*)[], To (*)[]>;
+
+template <typename Container, typename T>
+using ContainerHasConvertibleData = IsLegalDataConversion<
+    std::remove_pointer_t<decltype(base::data(std::declval<Container>()))>,
+    T>;
+
+template <typename Container>
+using ContainerHasIntegralSize =
+    std::is_integral<decltype(base::size(std::declval<Container>()))>;
+
+template <typename From, size_t FromExtent, typename To, size_t ToExtent>
+using EnableIfLegalSpanConversion =
+    std::enable_if_t<(ToExtent == dynamic_extent || ToExtent == FromExtent) &&
+                     IsLegalDataConversion<From, To>::value>;
+
+// SFINAE check if Array can be converted to a span<T>.
+template <typename Array, size_t N, typename T, size_t Extent>
+using EnableIfSpanCompatibleArray =
+    std::enable_if_t<(Extent == dynamic_extent || Extent == N) &&
+                     ContainerHasConvertibleData<Array, T>::value>;
+
+// SFINAE check if Container can be converted to a span<T>.
+template <typename Container, typename T>
+using EnableIfSpanCompatibleContainer =
+    std::enable_if_t<!internal::IsSpan<Container>::value &&
+                     !internal::IsStdArray<Container>::value &&
+                     !internal::IsCArray<Container>::value &&
+                     ContainerHasConvertibleData<Container, T>::value &&
+                     ContainerHasIntegralSize<Container>::value>;
+
+}  // namespace internal
+
+// A span is a value type that represents an array of elements of type T. Since
+// it only consists of a pointer to memory with an associated size, it is very
+// light-weight. It is cheap to construct, copy, move and use spans, so that
+// users are encouraged to use it as a pass-by-value parameter. A span does not
+// own the underlying memory, so care must be taken to ensure that a span does
+// not outlive the backing store.
+//
+// span is somewhat analogous to StringPiece, but with arbitrary element types,
+// allowing mutation if T is non-const.
+//
+// span is implicitly convertible from C++ arrays, as well as most [1]
+// container-like types that provide a data() and size() method (such as
+// std::vector<T>). A mutable span<T> can also be implicitly converted to an
+// immutable span<const T>.
+//
+// Consider using a span for functions that take a data pointer and size
+// parameter: it allows the function to still act on an array-like type, while
+// allowing the caller code to be a bit more concise.
+//
+// For read-only data access pass a span<const T>: the caller can supply either
+// a span<const T> or a span<T>, while the callee will have a read-only view.
+// For read-write access a mutable span<T> is required.
+//
+// Without span:
+//   Read-Only:
+//     // std::string HexEncode(const uint8_t* data, size_t size);
+//     std::vector<uint8_t> data_buffer = GenerateData();
+//     std::string r = HexEncode(data_buffer.data(), data_buffer.size());
+//
+//  Mutable:
+//     // ssize_t SafeSNPrintf(char* buf, size_t N, const char* fmt, Args...);
+//     char str_buffer[100];
+//     SafeSNPrintf(str_buffer, sizeof(str_buffer), "Pi ~= %lf", 3.14);
+//
+// With span:
+//   Read-Only:
+//     // std::string HexEncode(base::span<const uint8_t> data);
+//     std::vector<uint8_t> data_buffer = GenerateData();
+//     std::string r = HexEncode(data_buffer);
+//
+//  Mutable:
+//     // ssize_t SafeSNPrintf(base::span<char>, const char* fmt, Args...);
+//     char str_buffer[100];
+//     SafeSNPrintf(str_buffer, "Pi ~= %lf", 3.14);
+//
+// Spans with "const" and pointers
+// -------------------------------
+//
+// Const and pointers can get confusing. Here are vectors of pointers and their
+// corresponding spans:
+//
+//   const std::vector<int*>        =>  base::span<int* const>
+//   std::vector<const int*>        =>  base::span<const int*>
+//   const std::vector<const int*>  =>  base::span<const int* const>
+//
+// Differences from the working group proposal
+// -------------------------------------------
+//
+// https://wg21.link/P0122 is the latest working group proposal, Chromium
+// currently implements R7. Differences between the proposal and the
+// implementation are documented in subsections below.
+//
+// Differences from [span.objectrep]:
+// - as_bytes() and as_writable_bytes() return spans of uint8_t instead of
+//   std::byte
+//
+// Differences in constants and types:
+// - index_type is aliased to size_t
+//
+// Differences from [span.sub]:
+// - using size_t instead of ptrdiff_t for indexing
+//
+// Differences from [span.obs]:
+// - using size_t instead of ptrdiff_t to represent size()
+//
+// Differences from [span.elem]:
+// - using size_t instead of ptrdiff_t for indexing
+//
+// Furthermore, all constructors and methods are marked noexcept due to the lack
+// of exceptions in Chromium.
+//
+// Due to the lack of class template argument deduction guides in C++14
+// appropriate make_span() utility functions are provided.
+
+// [span], class template span
+template <typename T, size_t Extent>
+class span {
+ public:
+  using element_type = T;
+  using value_type = std::remove_cv_t<T>;
+  using index_type = size_t;
+  using difference_type = ptrdiff_t;
+  using pointer = T*;
+  using reference = T&;
+  using iterator = T*;
+  using const_iterator = const T*;
+  using reverse_iterator = std::reverse_iterator<iterator>;
+  using const_reverse_iterator = std::reverse_iterator<const_iterator>;
+  static constexpr index_type extent = Extent;
+
+  // [span.cons], span constructors, copy, assignment, and destructor
+  constexpr span() noexcept : data_(nullptr), size_(0) {
+    static_assert(Extent == dynamic_extent || Extent == 0, "Invalid Extent");
+  }
+
+  constexpr span(T* data, size_t size) noexcept : data_(data), size_(size) {
+    CHECK(Extent == dynamic_extent || Extent == size);
+  }
+
+  // Artificially templatized to break ambiguity for span(ptr, 0).
+  template <typename = void>
+  constexpr span(T* begin, T* end) noexcept : span(begin, end - begin) {
+    // Note: CHECK_LE is not constexpr, hence regular CHECK must be used.
+    CHECK(begin <= end);
+  }
+
+  template <
+      size_t N,
+      typename = internal::EnableIfSpanCompatibleArray<T (&)[N], N, T, Extent>>
+  constexpr span(T (&array)[N]) noexcept : span(base::data(array), N) {}
+
+  template <
+      size_t N,
+      typename = internal::
+          EnableIfSpanCompatibleArray<std::array<value_type, N>&, N, T, Extent>>
+  constexpr span(std::array<value_type, N>& array) noexcept
+      : span(base::data(array), N) {}
+
+  template <size_t N,
+            typename = internal::EnableIfSpanCompatibleArray<
+                const std::array<value_type, N>&,
+                N,
+                T,
+                Extent>>
+  constexpr span(const std::array<value_type, N>& array) noexcept
+      : span(base::data(array), N) {}
+
+  // Conversion from a container that has compatible base::data() and integral
+  // base::size().
+  template <typename Container,
+            typename = internal::EnableIfSpanCompatibleContainer<Container&, T>>
+  constexpr span(Container& container) noexcept
+      : span(base::data(container), base::size(container)) {}
+
+  template <
+      typename Container,
+      typename = internal::EnableIfSpanCompatibleContainer<const Container&, T>>
+  span(const Container& container) noexcept
+      : span(base::data(container), base::size(container)) {}
+
+  constexpr span(const span& other) noexcept = default;
+
+  // Conversions from spans of compatible types and extents: this allows a
+  // span<T> to be seamlessly used as a span<const T>, but not the other way
+  // around. If extent is not dynamic, OtherExtent has to be equal to Extent.
+  template <
+      typename U,
+      size_t OtherExtent,
+      typename =
+          internal::EnableIfLegalSpanConversion<U, OtherExtent, T, Extent>>
+  constexpr span(const span<U, OtherExtent>& other)
+      : span(other.data(), other.size()) {}
+
+  constexpr span& operator=(const span& other) noexcept = default;
+  ~span() noexcept = default;
+
+  // [span.sub], span subviews
+  template <size_t Count>
+  constexpr span<T, Count> first() const noexcept {
+    static_assert(Extent == dynamic_extent || Count <= Extent,
+                  "Count must not exceed Extent");
+    CHECK(Extent != dynamic_extent || Count <= size());
+    return {data(), Count};
+  }
+
+  template <size_t Count>
+  constexpr span<T, Count> last() const noexcept {
+    static_assert(Extent == dynamic_extent || Count <= Extent,
+                  "Count must not exceed Extent");
+    CHECK(Extent != dynamic_extent || Count <= size());
+    return {data() + (size() - Count), Count};
+  }
+
+  template <size_t Offset, size_t Count = dynamic_extent>
+  constexpr span<T,
+                 (Count != dynamic_extent
+                     ? Count
+                     : (Extent != dynamic_extent ? Extent - Offset
+                                                 : dynamic_extent))>
+  subspan() const noexcept {
+    static_assert(Extent == dynamic_extent || Offset <= Extent,
+                  "Offset must not exceed Extent");
+    static_assert(Extent == dynamic_extent || Count == dynamic_extent ||
+                      Count <= Extent - Offset,
+                  "Count must not exceed Extent - Offset");
+    CHECK(Extent != dynamic_extent || Offset <= size());
+    CHECK(Extent != dynamic_extent || Count == dynamic_extent ||
+          Count <= size() - Offset);
+    return {data() + Offset, Count != dynamic_extent ? Count : size() - Offset};
+  }
+
+  constexpr span<T, dynamic_extent> first(size_t count) const noexcept {
+    // Note: CHECK_LE is not constexpr, hence regular CHECK must be used.
+    CHECK(count <= size());
+    return {data(), count};
+  }
+
+  constexpr span<T, dynamic_extent> last(size_t count) const noexcept {
+    // Note: CHECK_LE is not constexpr, hence regular CHECK must be used.
+    CHECK(count <= size());
+    return {data() + (size() - count), count};
+  }
+
+  constexpr span<T, dynamic_extent> subspan(size_t offset,
+                                            size_t count = dynamic_extent) const
+      noexcept {
+    // Note: CHECK_LE is not constexpr, hence regular CHECK must be used.
+    CHECK(offset <= size());
+    CHECK(count == dynamic_extent || count <= size() - offset);
+    return {data() + offset, count != dynamic_extent ? count : size() - offset};
+  }
+
+  // [span.obs], span observers
+  constexpr size_t size() const noexcept { return size_; }
+  constexpr size_t size_bytes() const noexcept { return size() * sizeof(T); }
+  constexpr bool empty() const noexcept { return size() == 0; }
+
+  // [span.elem], span element access
+  constexpr T& operator[](size_t idx) const noexcept {
+    // Note: CHECK_LT is not constexpr, hence regular CHECK must be used.
+    CHECK(idx < size());
+    return *(data() + idx);
+  }
+
+  constexpr T& operator()(size_t idx) const noexcept {
+    // Note: CHECK_LT is not constexpr, hence regular CHECK must be used.
+    CHECK(idx < size());
+    return *(data() + idx);
+  }
+
+  constexpr T* data() const noexcept { return data_; }
+
+  // [span.iter], span iterator support
+  constexpr iterator begin() const noexcept { return data(); }
+  constexpr iterator end() const noexcept { return data() + size(); }
+
+  constexpr const_iterator cbegin() const noexcept { return begin(); }
+  constexpr const_iterator cend() const noexcept { return end(); }
+
+  constexpr reverse_iterator rbegin() const noexcept {
+    return reverse_iterator(end());
+  }
+  constexpr reverse_iterator rend() const noexcept {
+    return reverse_iterator(begin());
+  }
+
+  constexpr const_reverse_iterator crbegin() const noexcept {
+    return const_reverse_iterator(cend());
+  }
+  constexpr const_reverse_iterator crend() const noexcept {
+    return const_reverse_iterator(cbegin());
+  }
+
+ private:
+  T* data_;
+  size_t size_;
+};
+
+// span<T, Extent>::extent can not be declared inline prior to C++17, hence this
+// definition is required.
+template <class T, size_t Extent>
+constexpr size_t span<T, Extent>::extent;
+
+// [span.comparison], span comparison operators
+// Relational operators. Equality is a element-wise comparison.
+template <typename T, size_t X, typename U, size_t Y>
+constexpr bool operator==(span<T, X> lhs, span<U, Y> rhs) noexcept {
+  return std::equal(lhs.cbegin(), lhs.cend(), rhs.cbegin(), rhs.cend());
+}
+
+template <typename T, size_t X, typename U, size_t Y>
+constexpr bool operator!=(span<T, X> lhs, span<U, Y> rhs) noexcept {
+  return !(lhs == rhs);
+}
+
+template <typename T, size_t X, typename U, size_t Y>
+constexpr bool operator<(span<T, X> lhs, span<U, Y> rhs) noexcept {
+  return std::lexicographical_compare(lhs.cbegin(), lhs.cend(), rhs.cbegin(),
+                                      rhs.cend());
+}
+
+template <typename T, size_t X, typename U, size_t Y>
+constexpr bool operator<=(span<T, X> lhs, span<U, Y> rhs) noexcept {
+  return !(rhs < lhs);
+}
+
+template <typename T, size_t X, typename U, size_t Y>
+constexpr bool operator>(span<T, X> lhs, span<U, Y> rhs) noexcept {
+  return rhs < lhs;
+}
+
+template <typename T, size_t X, typename U, size_t Y>
+constexpr bool operator>=(span<T, X> lhs, span<U, Y> rhs) noexcept {
+  return !(lhs < rhs);
+}
+
+// [span.objectrep], views of object representation
+template <typename T, size_t X>
+span<const uint8_t, (X == dynamic_extent ? dynamic_extent : sizeof(T) * X)>
+as_bytes(span<T, X> s) noexcept {
+  return {reinterpret_cast<const uint8_t*>(s.data()), s.size_bytes()};
+}
+
+template <typename T,
+          size_t X,
+          typename = std::enable_if_t<!std::is_const<T>::value>>
+span<uint8_t, (X == dynamic_extent ? dynamic_extent : sizeof(T) * X)>
+as_writable_bytes(span<T, X> s) noexcept {
+  return {reinterpret_cast<uint8_t*>(s.data()), s.size_bytes()};
+}
+
+// Type-deducing helpers for constructing a span.
+template <typename T>
+constexpr span<T> make_span(T* data, size_t size) noexcept {
+  return {data, size};
+}
+
+template <typename T>
+constexpr span<T> make_span(T* begin, T* end) noexcept {
+  return {begin, end};
+}
+
+template <typename T, size_t N>
+constexpr span<T, N> make_span(T (&array)[N]) noexcept {
+  return array;
+}
+
+template <typename T, size_t N>
+constexpr span<T, N> make_span(std::array<T, N>& array) noexcept {
+  return array;
+}
+
+template <typename T, size_t N>
+constexpr span<const T, N> make_span(const std::array<T, N>& array) noexcept {
+  return array;
+}
+
+template <typename Container,
+          typename T = typename Container::value_type,
+          typename = internal::EnableIfSpanCompatibleContainer<Container&, T>>
+constexpr span<T> make_span(Container& container) noexcept {
+  return container;
+}
+
+template <
+    typename Container,
+    typename T = const typename Container::value_type,
+    typename = internal::EnableIfSpanCompatibleContainer<const Container&, T>>
+constexpr span<T> make_span(const Container& container) noexcept {
+  return container;
+}
+
+template <typename T, size_t X>
+constexpr span<T, X> make_span(const span<T, X>& span) noexcept {
+  return span;
+}
+
+}  // namespace base
+
+#endif  // BASE_CONTAINERS_SPAN_H_
diff --git a/base/containers/span_unittest.cc b/base/containers/span_unittest.cc
new file mode 100644
index 0000000..de5e401
--- /dev/null
+++ b/base/containers/span_unittest.cc
@@ -0,0 +1,1170 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/containers/span.h"
+
+#include <stdint.h>
+
+#include <algorithm>
+#include <memory>
+#include <string>
+#include <vector>
+
+#include "base/macros.h"
+#include "testing/gmock/include/gmock/gmock.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+using ::testing::ElementsAre;
+using ::testing::Eq;
+using ::testing::Pointwise;
+
+namespace base {
+
+TEST(SpanTest, DefaultConstructor) {
+  span<int> dynamic_span;
+  EXPECT_EQ(nullptr, dynamic_span.data());
+  EXPECT_EQ(0u, dynamic_span.size());
+
+  constexpr span<int, 0> static_span;
+  static_assert(nullptr == static_span.data(), "");
+  static_assert(0u == static_span.size(), "");
+}
+
+TEST(SpanTest, ConstructFromDataAndSize) {
+  constexpr span<int> empty_span(nullptr, 0);
+  EXPECT_TRUE(empty_span.empty());
+  EXPECT_EQ(nullptr, empty_span.data());
+
+  std::vector<int> vector = {1, 1, 2, 3, 5, 8};
+
+  span<int> dynamic_span(vector.data(), vector.size());
+  EXPECT_EQ(vector.data(), dynamic_span.data());
+  EXPECT_EQ(vector.size(), dynamic_span.size());
+
+  for (size_t i = 0; i < dynamic_span.size(); ++i)
+    EXPECT_EQ(vector[i], dynamic_span[i]);
+
+  span<int, 6> static_span(vector.data(), vector.size());
+  EXPECT_EQ(vector.data(), static_span.data());
+  EXPECT_EQ(vector.size(), static_span.size());
+
+  for (size_t i = 0; i < static_span.size(); ++i)
+    EXPECT_EQ(vector[i], static_span[i]);
+}
+
+TEST(SpanTest, ConstructFromPointerPair) {
+  constexpr span<int> empty_span(nullptr, nullptr);
+  EXPECT_TRUE(empty_span.empty());
+  EXPECT_EQ(nullptr, empty_span.data());
+
+  std::vector<int> vector = {1, 1, 2, 3, 5, 8};
+
+  span<int> dynamic_span(vector.data(), vector.data() + vector.size() / 2);
+  EXPECT_EQ(vector.data(), dynamic_span.data());
+  EXPECT_EQ(vector.size() / 2, dynamic_span.size());
+
+  for (size_t i = 0; i < dynamic_span.size(); ++i)
+    EXPECT_EQ(vector[i], dynamic_span[i]);
+
+  span<int, 3> static_span(vector.data(), vector.data() + vector.size() / 2);
+  EXPECT_EQ(vector.data(), static_span.data());
+  EXPECT_EQ(vector.size() / 2, static_span.size());
+
+  for (size_t i = 0; i < static_span.size(); ++i)
+    EXPECT_EQ(vector[i], static_span[i]);
+}
+
+TEST(SpanTest, ConstructFromConstexprArray) {
+  static constexpr int kArray[] = {5, 4, 3, 2, 1};
+
+  constexpr span<const int> dynamic_span(kArray);
+  static_assert(kArray == dynamic_span.data(), "");
+  static_assert(base::size(kArray) == dynamic_span.size(), "");
+
+  static_assert(kArray[0] == dynamic_span[0], "");
+  static_assert(kArray[1] == dynamic_span[1], "");
+  static_assert(kArray[2] == dynamic_span[2], "");
+  static_assert(kArray[3] == dynamic_span[3], "");
+  static_assert(kArray[4] == dynamic_span[4], "");
+
+  constexpr span<const int, base::size(kArray)> static_span(kArray);
+  static_assert(kArray == static_span.data(), "");
+  static_assert(base::size(kArray) == static_span.size(), "");
+
+  static_assert(kArray[0] == static_span[0], "");
+  static_assert(kArray[1] == static_span[1], "");
+  static_assert(kArray[2] == static_span[2], "");
+  static_assert(kArray[3] == static_span[3], "");
+  static_assert(kArray[4] == static_span[4], "");
+}
+
+TEST(SpanTest, ConstructFromArray) {
+  int array[] = {5, 4, 3, 2, 1};
+
+  span<const int> const_span(array);
+  EXPECT_EQ(array, const_span.data());
+  EXPECT_EQ(arraysize(array), const_span.size());
+  for (size_t i = 0; i < const_span.size(); ++i)
+    EXPECT_EQ(array[i], const_span[i]);
+
+  span<int> dynamic_span(array);
+  EXPECT_EQ(array, dynamic_span.data());
+  EXPECT_EQ(base::size(array), dynamic_span.size());
+  for (size_t i = 0; i < dynamic_span.size(); ++i)
+    EXPECT_EQ(array[i], dynamic_span[i]);
+
+  span<int, base::size(array)> static_span(array);
+  EXPECT_EQ(array, static_span.data());
+  EXPECT_EQ(base::size(array), static_span.size());
+  for (size_t i = 0; i < static_span.size(); ++i)
+    EXPECT_EQ(array[i], static_span[i]);
+}
+
+TEST(SpanTest, ConstructFromStdArray) {
+  // Note: Constructing a constexpr span from a constexpr std::array does not
+  // work prior to C++17 due to non-constexpr std::array::data.
+  std::array<int, 5> array = {{5, 4, 3, 2, 1}};
+
+  span<const int> const_span(array);
+  EXPECT_EQ(array.data(), const_span.data());
+  EXPECT_EQ(array.size(), const_span.size());
+  for (size_t i = 0; i < const_span.size(); ++i)
+    EXPECT_EQ(array[i], const_span[i]);
+
+  span<int> dynamic_span(array);
+  EXPECT_EQ(array.data(), dynamic_span.data());
+  EXPECT_EQ(array.size(), dynamic_span.size());
+  for (size_t i = 0; i < dynamic_span.size(); ++i)
+    EXPECT_EQ(array[i], dynamic_span[i]);
+
+  span<int, base::size(array)> static_span(array);
+  EXPECT_EQ(array.data(), static_span.data());
+  EXPECT_EQ(array.size(), static_span.size());
+  for (size_t i = 0; i < static_span.size(); ++i)
+    EXPECT_EQ(array[i], static_span[i]);
+}
+
+TEST(SpanTest, ConstructFromInitializerList) {
+  std::initializer_list<int> il = {1, 1, 2, 3, 5, 8};
+
+  span<const int> const_span(il);
+  EXPECT_EQ(il.begin(), const_span.data());
+  EXPECT_EQ(il.size(), const_span.size());
+
+  for (size_t i = 0; i < const_span.size(); ++i)
+    EXPECT_EQ(il.begin()[i], const_span[i]);
+
+  span<const int, 6> static_span(il);
+  EXPECT_EQ(il.begin(), static_span.data());
+  EXPECT_EQ(il.size(), static_span.size());
+
+  for (size_t i = 0; i < static_span.size(); ++i)
+    EXPECT_EQ(il.begin()[i], static_span[i]);
+}
+
+TEST(SpanTest, ConstructFromStdString) {
+  std::string str = "foobar";
+
+  span<const char> const_span(str);
+  EXPECT_EQ(str.data(), const_span.data());
+  EXPECT_EQ(str.size(), const_span.size());
+
+  for (size_t i = 0; i < const_span.size(); ++i)
+    EXPECT_EQ(str[i], const_span[i]);
+
+  span<char> dynamic_span(str);
+  EXPECT_EQ(str.data(), dynamic_span.data());
+  EXPECT_EQ(str.size(), dynamic_span.size());
+
+  for (size_t i = 0; i < dynamic_span.size(); ++i)
+    EXPECT_EQ(str[i], dynamic_span[i]);
+
+  span<char, 6> static_span(str);
+  EXPECT_EQ(str.data(), static_span.data());
+  EXPECT_EQ(str.size(), static_span.size());
+
+  for (size_t i = 0; i < static_span.size(); ++i)
+    EXPECT_EQ(str[i], static_span[i]);
+}
+
+TEST(SpanTest, ConstructFromConstContainer) {
+  const std::vector<int> vector = {1, 1, 2, 3, 5, 8};
+
+  span<const int> const_span(vector);
+  EXPECT_EQ(vector.data(), const_span.data());
+  EXPECT_EQ(vector.size(), const_span.size());
+
+  for (size_t i = 0; i < const_span.size(); ++i)
+    EXPECT_EQ(vector[i], const_span[i]);
+
+  span<const int, 6> static_span(vector);
+  EXPECT_EQ(vector.data(), static_span.data());
+  EXPECT_EQ(vector.size(), static_span.size());
+
+  for (size_t i = 0; i < static_span.size(); ++i)
+    EXPECT_EQ(vector[i], static_span[i]);
+}
+
+TEST(SpanTest, ConstructFromContainer) {
+  std::vector<int> vector = {1, 1, 2, 3, 5, 8};
+
+  span<const int> const_span(vector);
+  EXPECT_EQ(vector.data(), const_span.data());
+  EXPECT_EQ(vector.size(), const_span.size());
+
+  for (size_t i = 0; i < const_span.size(); ++i)
+    EXPECT_EQ(vector[i], const_span[i]);
+
+  span<int> dynamic_span(vector);
+  EXPECT_EQ(vector.data(), dynamic_span.data());
+  EXPECT_EQ(vector.size(), dynamic_span.size());
+
+  for (size_t i = 0; i < dynamic_span.size(); ++i)
+    EXPECT_EQ(vector[i], dynamic_span[i]);
+
+  span<int, 6> static_span(vector);
+  EXPECT_EQ(vector.data(), static_span.data());
+  EXPECT_EQ(vector.size(), static_span.size());
+
+  for (size_t i = 0; i < static_span.size(); ++i)
+    EXPECT_EQ(vector[i], static_span[i]);
+}
+
+TEST(SpanTest, ConvertNonConstIntegralToConst) {
+  std::vector<int> vector = {1, 1, 2, 3, 5, 8};
+
+  span<int> int_span(vector.data(), vector.size());
+  span<const int> const_span(int_span);
+  EXPECT_THAT(const_span, Pointwise(Eq(), int_span));
+
+  span<int, 6> static_int_span(vector.data(), vector.size());
+  span<const int, 6> static_const_span(static_int_span);
+  EXPECT_THAT(static_const_span, Pointwise(Eq(), static_int_span));
+}
+
+TEST(SpanTest, ConvertNonConstPointerToConst) {
+  auto a = std::make_unique<int>(11);
+  auto b = std::make_unique<int>(22);
+  auto c = std::make_unique<int>(33);
+  std::vector<int*> vector = {a.get(), b.get(), c.get()};
+
+  span<int*> non_const_pointer_span(vector);
+  EXPECT_THAT(non_const_pointer_span, Pointwise(Eq(), vector));
+  span<int* const> const_pointer_span(non_const_pointer_span);
+  EXPECT_THAT(const_pointer_span, Pointwise(Eq(), non_const_pointer_span));
+  // Note: no test for conversion from span<int> to span<const int*>, since that
+  // would imply a conversion from int** to const int**, which is unsafe.
+  //
+  // Note: no test for conversion from span<int*> to span<const int* const>,
+  // due to CWG Defect 330:
+  // http://open-std.org/JTC1/SC22/WG21/docs/cwg_defects.html#330
+
+  span<int*, 3> static_non_const_pointer_span(vector);
+  EXPECT_THAT(static_non_const_pointer_span, Pointwise(Eq(), vector));
+  span<int* const, 3> static_const_pointer_span(static_non_const_pointer_span);
+  EXPECT_THAT(static_const_pointer_span,
+              Pointwise(Eq(), static_non_const_pointer_span));
+}
+
+TEST(SpanTest, ConvertBetweenEquivalentTypes) {
+  std::vector<int32_t> vector = {2, 4, 8, 16, 32};
+
+  span<int32_t> int32_t_span(vector);
+  span<int> converted_span(int32_t_span);
+  EXPECT_EQ(int32_t_span, converted_span);
+
+  span<int32_t, 5> static_int32_t_span(vector);
+  span<int, 5> static_converted_span(static_int32_t_span);
+  EXPECT_EQ(static_int32_t_span, static_converted_span);
+}
+
+TEST(SpanTest, TemplatedFirst) {
+  static constexpr int array[] = {1, 2, 3};
+  constexpr span<const int, 3> span(array);
+
+  {
+    constexpr auto subspan = span.first<0>();
+    static_assert(span.data() == subspan.data(), "");
+    static_assert(0u == subspan.size(), "");
+    static_assert(0u == decltype(subspan)::extent, "");
+  }
+
+  {
+    constexpr auto subspan = span.first<1>();
+    static_assert(span.data() == subspan.data(), "");
+    static_assert(1u == subspan.size(), "");
+    static_assert(1u == decltype(subspan)::extent, "");
+    static_assert(1 == subspan[0], "");
+  }
+
+  {
+    constexpr auto subspan = span.first<2>();
+    static_assert(span.data() == subspan.data(), "");
+    static_assert(2u == subspan.size(), "");
+    static_assert(2u == decltype(subspan)::extent, "");
+    static_assert(1 == subspan[0], "");
+    static_assert(2 == subspan[1], "");
+  }
+
+  {
+    constexpr auto subspan = span.first<3>();
+    static_assert(span.data() == subspan.data(), "");
+    static_assert(3u == subspan.size(), "");
+    static_assert(3u == decltype(subspan)::extent, "");
+    static_assert(1 == subspan[0], "");
+    static_assert(2 == subspan[1], "");
+    static_assert(3 == subspan[2], "");
+  }
+}
+
+TEST(SpanTest, TemplatedLast) {
+  static constexpr int array[] = {1, 2, 3};
+  constexpr span<const int, 3> span(array);
+
+  {
+    constexpr auto subspan = span.last<0>();
+    static_assert(span.data() + 3 == subspan.data(), "");
+    static_assert(0u == subspan.size(), "");
+    static_assert(0u == decltype(subspan)::extent, "");
+  }
+
+  {
+    constexpr auto subspan = span.last<1>();
+    static_assert(span.data() + 2 == subspan.data(), "");
+    static_assert(1u == subspan.size(), "");
+    static_assert(1u == decltype(subspan)::extent, "");
+    static_assert(3 == subspan[0], "");
+  }
+
+  {
+    constexpr auto subspan = span.last<2>();
+    static_assert(span.data() + 1 == subspan.data(), "");
+    static_assert(2u == subspan.size(), "");
+    static_assert(2u == decltype(subspan)::extent, "");
+    static_assert(2 == subspan[0], "");
+    static_assert(3 == subspan[1], "");
+  }
+
+  {
+    constexpr auto subspan = span.last<3>();
+    static_assert(span.data() == subspan.data(), "");
+    static_assert(3u == subspan.size(), "");
+    static_assert(3u == decltype(subspan)::extent, "");
+    static_assert(1 == subspan[0], "");
+    static_assert(2 == subspan[1], "");
+    static_assert(3 == subspan[2], "");
+  }
+}
+
+TEST(SpanTest, TemplatedSubspan) {
+  static constexpr int array[] = {1, 2, 3};
+  constexpr span<const int, 3> span(array);
+
+  {
+    constexpr auto subspan = span.subspan<0>();
+    static_assert(span.data() == subspan.data(), "");
+    static_assert(3u == subspan.size(), "");
+    static_assert(3u == decltype(subspan)::extent, "");
+    static_assert(1 == subspan[0], "");
+    static_assert(2 == subspan[1], "");
+    static_assert(3 == subspan[2], "");
+  }
+
+  {
+    constexpr auto subspan = span.subspan<1>();
+    static_assert(span.data() + 1 == subspan.data(), "");
+    static_assert(2u == subspan.size(), "");
+    static_assert(2u == decltype(subspan)::extent, "");
+    static_assert(2 == subspan[0], "");
+    static_assert(3 == subspan[1], "");
+  }
+
+  {
+    constexpr auto subspan = span.subspan<2>();
+    static_assert(span.data() + 2 == subspan.data(), "");
+    static_assert(1u == subspan.size(), "");
+    static_assert(1u == decltype(subspan)::extent, "");
+    static_assert(3 == subspan[0], "");
+  }
+
+  {
+    constexpr auto subspan = span.subspan<3>();
+    static_assert(span.data() + 3 == subspan.data(), "");
+    static_assert(0u == subspan.size(), "");
+    static_assert(0u == decltype(subspan)::extent, "");
+  }
+
+  {
+    constexpr auto subspan = span.subspan<0, 0>();
+    static_assert(span.data() == subspan.data(), "");
+    static_assert(0u == subspan.size(), "");
+    static_assert(0u == decltype(subspan)::extent, "");
+  }
+
+  {
+    constexpr auto subspan = span.subspan<1, 0>();
+    static_assert(span.data() + 1 == subspan.data(), "");
+    static_assert(0u == subspan.size(), "");
+    static_assert(0u == decltype(subspan)::extent, "");
+  }
+
+  {
+    constexpr auto subspan = span.subspan<2, 0>();
+    static_assert(span.data() + 2 == subspan.data(), "");
+    static_assert(0u == subspan.size(), "");
+    static_assert(0u == decltype(subspan)::extent, "");
+  }
+
+  {
+    constexpr auto subspan = span.subspan<0, 1>();
+    static_assert(span.data() == subspan.data(), "");
+    static_assert(1u == subspan.size(), "");
+    static_assert(1u == decltype(subspan)::extent, "");
+    static_assert(1 == subspan[0], "");
+  }
+
+  {
+    constexpr auto subspan = span.subspan<1, 1>();
+    static_assert(span.data() + 1 == subspan.data(), "");
+    static_assert(1u == subspan.size(), "");
+    static_assert(1u == decltype(subspan)::extent, "");
+    static_assert(2 == subspan[0], "");
+  }
+
+  {
+    constexpr auto subspan = span.subspan<2, 1>();
+    static_assert(span.data() + 2 == subspan.data(), "");
+    static_assert(1u == subspan.size(), "");
+    static_assert(1u == decltype(subspan)::extent, "");
+    static_assert(3 == subspan[0], "");
+  }
+
+  {
+    constexpr auto subspan = span.subspan<0, 2>();
+    static_assert(span.data() == subspan.data(), "");
+    static_assert(2u == subspan.size(), "");
+    static_assert(2u == decltype(subspan)::extent, "");
+    static_assert(1 == subspan[0], "");
+    static_assert(2 == subspan[1], "");
+  }
+
+  {
+    constexpr auto subspan = span.subspan<1, 2>();
+    static_assert(span.data() + 1 == subspan.data(), "");
+    static_assert(2u == subspan.size(), "");
+    static_assert(2u == decltype(subspan)::extent, "");
+    static_assert(2 == subspan[0], "");
+    static_assert(3 == subspan[1], "");
+  }
+
+  {
+    constexpr auto subspan = span.subspan<0, 3>();
+    static_assert(span.data() == subspan.data(), "");
+    static_assert(3u == subspan.size(), "");
+    static_assert(3u == decltype(subspan)::extent, "");
+    static_assert(1 == subspan[0], "");
+    static_assert(2 == subspan[1], "");
+    static_assert(3 == subspan[2], "");
+  }
+}
+
+TEST(SpanTest, TemplatedFirstOnDynamicSpan) {
+  int array[] = {1, 2, 3};
+  span<const int> span(array);
+
+  {
+    auto subspan = span.first<0>();
+    EXPECT_EQ(span.data(), subspan.data());
+    EXPECT_EQ(0u, subspan.size());
+    static_assert(0u == decltype(subspan)::extent, "");
+  }
+
+  {
+    auto subspan = span.first<1>();
+    EXPECT_EQ(span.data(), subspan.data());
+    EXPECT_EQ(1u, subspan.size());
+    static_assert(1u == decltype(subspan)::extent, "");
+    EXPECT_EQ(1, subspan[0]);
+  }
+
+  {
+    auto subspan = span.first<2>();
+    EXPECT_EQ(span.data(), subspan.data());
+    EXPECT_EQ(2u, subspan.size());
+    static_assert(2u == decltype(subspan)::extent, "");
+    EXPECT_EQ(1, subspan[0]);
+    EXPECT_EQ(2, subspan[1]);
+  }
+
+  {
+    auto subspan = span.first<3>();
+    EXPECT_EQ(span.data(), subspan.data());
+    EXPECT_EQ(3u, subspan.size());
+    static_assert(3u == decltype(subspan)::extent, "");
+    EXPECT_EQ(1, subspan[0]);
+    EXPECT_EQ(2, subspan[1]);
+    EXPECT_EQ(3, subspan[2]);
+  }
+}
+
+TEST(SpanTest, TemplatedLastOnDynamicSpan) {
+  int array[] = {1, 2, 3};
+  span<int> span(array);
+
+  {
+    auto subspan = span.last<0>();
+    EXPECT_EQ(span.data() + 3, subspan.data());
+    EXPECT_EQ(0u, subspan.size());
+    static_assert(0u == decltype(subspan)::extent, "");
+  }
+
+  {
+    auto subspan = span.last<1>();
+    EXPECT_EQ(span.data() + 2, subspan.data());
+    EXPECT_EQ(1u, subspan.size());
+    static_assert(1u == decltype(subspan)::extent, "");
+    EXPECT_EQ(3, subspan[0]);
+  }
+
+  {
+    auto subspan = span.last<2>();
+    EXPECT_EQ(span.data() + 1, subspan.data());
+    EXPECT_EQ(2u, subspan.size());
+    static_assert(2u == decltype(subspan)::extent, "");
+    EXPECT_EQ(2, subspan[0]);
+    EXPECT_EQ(3, subspan[1]);
+  }
+
+  {
+    auto subspan = span.last<3>();
+    EXPECT_EQ(span.data(), subspan.data());
+    EXPECT_EQ(3u, subspan.size());
+    static_assert(3u == decltype(subspan)::extent, "");
+    EXPECT_EQ(1, subspan[0]);
+    EXPECT_EQ(2, subspan[1]);
+    EXPECT_EQ(3, subspan[2]);
+  }
+}
+
+TEST(SpanTest, TemplatedSubspanFromDynamicSpan) {
+  int array[] = {1, 2, 3};
+  span<int, 3> span(array);
+
+  {
+    auto subspan = span.subspan<0>();
+    EXPECT_EQ(span.data(), subspan.data());
+    static_assert(3u == decltype(subspan)::extent, "");
+    EXPECT_EQ(3u, subspan.size());
+    EXPECT_EQ(1, subspan[0]);
+    EXPECT_EQ(2, subspan[1]);
+    EXPECT_EQ(3, subspan[2]);
+  }
+
+  {
+    auto subspan = span.subspan<1>();
+    EXPECT_EQ(span.data() + 1, subspan.data());
+    EXPECT_EQ(2u, subspan.size());
+    static_assert(2u == decltype(subspan)::extent, "");
+    EXPECT_EQ(2, subspan[0]);
+    EXPECT_EQ(3, subspan[1]);
+  }
+
+  {
+    auto subspan = span.subspan<2>();
+    EXPECT_EQ(span.data() + 2, subspan.data());
+    EXPECT_EQ(1u, subspan.size());
+    static_assert(1u == decltype(subspan)::extent, "");
+    EXPECT_EQ(3, subspan[0]);
+  }
+
+  {
+    auto subspan = span.subspan<3>();
+    EXPECT_EQ(span.data() + 3, subspan.data());
+    EXPECT_EQ(0u, subspan.size());
+    static_assert(0u == decltype(subspan)::extent, "");
+  }
+
+  {
+    auto subspan = span.subspan<0, 0>();
+    EXPECT_EQ(span.data(), subspan.data());
+    EXPECT_EQ(0u, subspan.size());
+    static_assert(0u == decltype(subspan)::extent, "");
+  }
+
+  {
+    auto subspan = span.subspan<1, 0>();
+    EXPECT_EQ(span.data() + 1, subspan.data());
+    EXPECT_EQ(0u, subspan.size());
+    static_assert(0u == decltype(subspan)::extent, "");
+  }
+
+  {
+    auto subspan = span.subspan<2, 0>();
+    EXPECT_EQ(span.data() + 2, subspan.data());
+    EXPECT_EQ(0u, subspan.size());
+    static_assert(0u == decltype(subspan)::extent, "");
+  }
+
+  {
+    auto subspan = span.subspan<0, 1>();
+    EXPECT_EQ(span.data(), subspan.data());
+    EXPECT_EQ(1u, subspan.size());
+    static_assert(1u == decltype(subspan)::extent, "");
+    EXPECT_EQ(1, subspan[0]);
+  }
+
+  {
+    auto subspan = span.subspan<1, 1>();
+    EXPECT_EQ(span.data() + 1, subspan.data());
+    EXPECT_EQ(1u, subspan.size());
+    static_assert(1u == decltype(subspan)::extent, "");
+    EXPECT_EQ(2, subspan[0]);
+  }
+
+  {
+    auto subspan = span.subspan<2, 1>();
+    EXPECT_EQ(span.data() + 2, subspan.data());
+    EXPECT_EQ(1u, subspan.size());
+    static_assert(1u == decltype(subspan)::extent, "");
+    EXPECT_EQ(3, subspan[0]);
+  }
+
+  {
+    auto subspan = span.subspan<0, 2>();
+    EXPECT_EQ(span.data(), subspan.data());
+    EXPECT_EQ(2u, subspan.size());
+    static_assert(2u == decltype(subspan)::extent, "");
+    EXPECT_EQ(1, subspan[0]);
+    EXPECT_EQ(2, subspan[1]);
+  }
+
+  {
+    auto subspan = span.subspan<1, 2>();
+    EXPECT_EQ(span.data() + 1, subspan.data());
+    EXPECT_EQ(2u, subspan.size());
+    static_assert(2u == decltype(subspan)::extent, "");
+    EXPECT_EQ(2, subspan[0]);
+    EXPECT_EQ(3, subspan[1]);
+  }
+
+  {
+    auto subspan = span.subspan<0, 3>();
+    EXPECT_EQ(span.data(), subspan.data());
+    EXPECT_EQ(3u, subspan.size());
+    static_assert(3u == decltype(subspan)::extent, "");
+    EXPECT_EQ(1, subspan[0]);
+    EXPECT_EQ(2, subspan[1]);
+    EXPECT_EQ(3, subspan[2]);
+  }
+}
+
+TEST(SpanTest, First) {
+  int array[] = {1, 2, 3};
+  span<int> span(array);
+
+  {
+    auto subspan = span.first(0);
+    EXPECT_EQ(span.data(), subspan.data());
+    EXPECT_EQ(0u, subspan.size());
+  }
+
+  {
+    auto subspan = span.first(1);
+    EXPECT_EQ(span.data(), subspan.data());
+    EXPECT_EQ(1u, subspan.size());
+    EXPECT_EQ(1, subspan[0]);
+  }
+
+  {
+    auto subspan = span.first(2);
+    EXPECT_EQ(span.data(), subspan.data());
+    EXPECT_EQ(2u, subspan.size());
+    EXPECT_EQ(1, subspan[0]);
+    EXPECT_EQ(2, subspan[1]);
+  }
+
+  {
+    auto subspan = span.first(3);
+    EXPECT_EQ(span.data(), subspan.data());
+    EXPECT_EQ(3u, subspan.size());
+    EXPECT_EQ(1, subspan[0]);
+    EXPECT_EQ(2, subspan[1]);
+    EXPECT_EQ(3, subspan[2]);
+  }
+}
+
+TEST(SpanTest, Last) {
+  int array[] = {1, 2, 3};
+  span<int> span(array);
+
+  {
+    auto subspan = span.last(0);
+    EXPECT_EQ(span.data() + 3, subspan.data());
+    EXPECT_EQ(0u, subspan.size());
+  }
+
+  {
+    auto subspan = span.last(1);
+    EXPECT_EQ(span.data() + 2, subspan.data());
+    EXPECT_EQ(1u, subspan.size());
+    EXPECT_EQ(3, subspan[0]);
+  }
+
+  {
+    auto subspan = span.last(2);
+    EXPECT_EQ(span.data() + 1, subspan.data());
+    EXPECT_EQ(2u, subspan.size());
+    EXPECT_EQ(2, subspan[0]);
+    EXPECT_EQ(3, subspan[1]);
+  }
+
+  {
+    auto subspan = span.last(3);
+    EXPECT_EQ(span.data(), subspan.data());
+    EXPECT_EQ(3u, subspan.size());
+    EXPECT_EQ(1, subspan[0]);
+    EXPECT_EQ(2, subspan[1]);
+    EXPECT_EQ(3, subspan[2]);
+  }
+}
+
+TEST(SpanTest, Subspan) {
+  int array[] = {1, 2, 3};
+  span<int> span(array);
+
+  {
+    auto subspan = span.subspan(0);
+    EXPECT_EQ(span.data(), subspan.data());
+    EXPECT_EQ(3u, subspan.size());
+    EXPECT_EQ(1, subspan[0]);
+    EXPECT_EQ(2, subspan[1]);
+    EXPECT_EQ(3, subspan[2]);
+  }
+
+  {
+    auto subspan = span.subspan(1);
+    EXPECT_EQ(span.data() + 1, subspan.data());
+    EXPECT_EQ(2u, subspan.size());
+    EXPECT_EQ(2, subspan[0]);
+    EXPECT_EQ(3, subspan[1]);
+  }
+
+  {
+    auto subspan = span.subspan(2);
+    EXPECT_EQ(span.data() + 2, subspan.data());
+    EXPECT_EQ(1u, subspan.size());
+    EXPECT_EQ(3, subspan[0]);
+  }
+
+  {
+    auto subspan = span.subspan(3);
+    EXPECT_EQ(span.data() + 3, subspan.data());
+    EXPECT_EQ(0u, subspan.size());
+  }
+
+  {
+    auto subspan = span.subspan(0, 0);
+    EXPECT_EQ(span.data(), subspan.data());
+    EXPECT_EQ(0u, subspan.size());
+  }
+
+  {
+    auto subspan = span.subspan(1, 0);
+    EXPECT_EQ(span.data() + 1, subspan.data());
+    EXPECT_EQ(0u, subspan.size());
+  }
+
+  {
+    auto subspan = span.subspan(2, 0);
+    EXPECT_EQ(span.data() + 2, subspan.data());
+    EXPECT_EQ(0u, subspan.size());
+  }
+
+  {
+    auto subspan = span.subspan(0, 1);
+    EXPECT_EQ(span.data(), subspan.data());
+    EXPECT_EQ(1u, subspan.size());
+    EXPECT_EQ(1, subspan[0]);
+  }
+
+  {
+    auto subspan = span.subspan(1, 1);
+    EXPECT_EQ(span.data() + 1, subspan.data());
+    EXPECT_EQ(1u, subspan.size());
+    EXPECT_EQ(2, subspan[0]);
+  }
+
+  {
+    auto subspan = span.subspan(2, 1);
+    EXPECT_EQ(span.data() + 2, subspan.data());
+    EXPECT_EQ(1u, subspan.size());
+    EXPECT_EQ(3, subspan[0]);
+  }
+
+  {
+    auto subspan = span.subspan(0, 2);
+    EXPECT_EQ(span.data(), subspan.data());
+    EXPECT_EQ(2u, subspan.size());
+    EXPECT_EQ(1, subspan[0]);
+    EXPECT_EQ(2, subspan[1]);
+  }
+
+  {
+    auto subspan = span.subspan(1, 2);
+    EXPECT_EQ(span.data() + 1, subspan.data());
+    EXPECT_EQ(2u, subspan.size());
+    EXPECT_EQ(2, subspan[0]);
+    EXPECT_EQ(3, subspan[1]);
+  }
+
+  {
+    auto subspan = span.subspan(0, 3);
+    EXPECT_EQ(span.data(), subspan.data());
+    EXPECT_EQ(span.size(), subspan.size());
+    EXPECT_EQ(1, subspan[0]);
+    EXPECT_EQ(2, subspan[1]);
+    EXPECT_EQ(3, subspan[2]);
+  }
+}
+
+TEST(SpanTest, Size) {
+  {
+    span<int> span;
+    EXPECT_EQ(0u, span.size());
+  }
+
+  {
+    int array[] = {1, 2, 3};
+    span<int> span(array);
+    EXPECT_EQ(3u, span.size());
+  }
+}
+
+TEST(SpanTest, SizeBytes) {
+  {
+    span<int> span;
+    EXPECT_EQ(0u, span.size_bytes());
+  }
+
+  {
+    int array[] = {1, 2, 3};
+    span<int> span(array);
+    EXPECT_EQ(3u * sizeof(int), span.size_bytes());
+  }
+}
+
+TEST(SpanTest, Empty) {
+  {
+    span<int> span;
+    EXPECT_TRUE(span.empty());
+  }
+
+  {
+    int array[] = {1, 2, 3};
+    span<int> span(array);
+    EXPECT_FALSE(span.empty());
+  }
+}
+
+TEST(SpanTest, OperatorAt) {
+  static constexpr int kArray[] = {1, 6, 1, 8, 0};
+  constexpr span<const int> span(kArray);
+
+  static_assert(kArray[0] == span[0], "span[0] does not equal kArray[0]");
+  static_assert(kArray[1] == span[1], "span[1] does not equal kArray[1]");
+  static_assert(kArray[2] == span[2], "span[2] does not equal kArray[2]");
+  static_assert(kArray[3] == span[3], "span[3] does not equal kArray[3]");
+  static_assert(kArray[4] == span[4], "span[4] does not equal kArray[4]");
+
+  static_assert(kArray[0] == span(0), "span(0) does not equal kArray[0]");
+  static_assert(kArray[1] == span(1), "span(1) does not equal kArray[1]");
+  static_assert(kArray[2] == span(2), "span(2) does not equal kArray[2]");
+  static_assert(kArray[3] == span(3), "span(3) does not equal kArray[3]");
+  static_assert(kArray[4] == span(4), "span(4) does not equal kArray[4]");
+}
+
+TEST(SpanTest, Iterator) {
+  static constexpr int kArray[] = {1, 6, 1, 8, 0};
+  constexpr span<const int> span(kArray);
+
+  std::vector<int> results;
+  for (int i : span)
+    results.emplace_back(i);
+  EXPECT_THAT(results, ElementsAre(1, 6, 1, 8, 0));
+}
+
+TEST(SpanTest, ReverseIterator) {
+  static constexpr int kArray[] = {1, 6, 1, 8, 0};
+  constexpr span<const int> span(kArray);
+
+  EXPECT_TRUE(std::equal(std::rbegin(kArray), std::rend(kArray), span.rbegin(),
+                         span.rend()));
+  EXPECT_TRUE(std::equal(std::crbegin(kArray), std::crend(kArray),
+                         span.crbegin(), span.crend()));
+}
+
+TEST(SpanTest, Equality) {
+  static constexpr int kArray1[] = {3, 1, 4, 1, 5};
+  static constexpr int kArray2[] = {3, 1, 4, 1, 5};
+  constexpr span<const int> span1(kArray1);
+  constexpr span<const int, 5> span2(kArray2);
+
+  EXPECT_EQ(span1, span2);
+
+  static constexpr int kArray3[] = {2, 7, 1, 8, 3};
+  constexpr span<const int> span3(kArray3);
+
+  EXPECT_FALSE(span1 == span3);
+
+  static double kArray4[] = {2.0, 7.0, 1.0, 8.0, 3.0};
+  span<double, 5> span4(kArray4);
+
+  EXPECT_EQ(span3, span4);
+}
+
+TEST(SpanTest, Inequality) {
+  static constexpr int kArray1[] = {2, 3, 5, 7, 11};
+  static constexpr int kArray2[] = {1, 4, 6, 8, 9};
+  constexpr span<const int> span1(kArray1);
+  constexpr span<const int, 5> span2(kArray2);
+
+  EXPECT_NE(span1, span2);
+
+  static constexpr int kArray3[] = {2, 3, 5, 7, 11};
+  constexpr span<const int> span3(kArray3);
+
+  EXPECT_FALSE(span1 != span3);
+
+  static double kArray4[] = {1.0, 4.0, 6.0, 8.0, 9.0};
+  span<double, 5> span4(kArray4);
+
+  EXPECT_NE(span3, span4);
+}
+
+TEST(SpanTest, LessThan) {
+  static constexpr int kArray1[] = {2, 3, 5, 7, 11};
+  static constexpr int kArray2[] = {2, 3, 5, 7, 11, 13};
+  constexpr span<const int> span1(kArray1);
+  constexpr span<const int, 6> span2(kArray2);
+
+  EXPECT_LT(span1, span2);
+
+  static constexpr int kArray3[] = {2, 3, 5, 7, 11};
+  constexpr span<const int> span3(kArray3);
+
+  EXPECT_FALSE(span1 < span3);
+
+  static double kArray4[] = {2.0, 3.0, 5.0, 7.0, 11.0, 13.0};
+  span<double, 6> span4(kArray4);
+
+  EXPECT_LT(span3, span4);
+}
+
+TEST(SpanTest, LessEqual) {
+  static constexpr int kArray1[] = {2, 3, 5, 7, 11};
+  static constexpr int kArray2[] = {2, 3, 5, 7, 11, 13};
+  constexpr span<const int> span1(kArray1);
+  constexpr span<const int, 6> span2(kArray2);
+
+  EXPECT_LE(span1, span1);
+  EXPECT_LE(span1, span2);
+
+  static constexpr int kArray3[] = {2, 3, 5, 7, 10};
+  constexpr span<const int> span3(kArray3);
+
+  EXPECT_FALSE(span1 <= span3);
+
+  static double kArray4[] = {2.0, 3.0, 5.0, 7.0, 11.0, 13.0};
+  span<double, 6> span4(kArray4);
+
+  EXPECT_LE(span3, span4);
+}
+
+TEST(SpanTest, GreaterThan) {
+  static constexpr int kArray1[] = {2, 3, 5, 7, 11, 13};
+  static constexpr int kArray2[] = {2, 3, 5, 7, 11};
+  constexpr span<const int> span1(kArray1);
+  constexpr span<const int, 5> span2(kArray2);
+
+  EXPECT_GT(span1, span2);
+
+  static constexpr int kArray3[] = {2, 3, 5, 7, 11, 13};
+  constexpr span<const int> span3(kArray3);
+
+  EXPECT_FALSE(span1 > span3);
+
+  static double kArray4[] = {2.0, 3.0, 5.0, 7.0, 11.0};
+  span<double, 5> span4(kArray4);
+
+  EXPECT_GT(span3, span4);
+}
+
+TEST(SpanTest, GreaterEqual) {
+  static constexpr int kArray1[] = {2, 3, 5, 7, 11, 13};
+  static constexpr int kArray2[] = {2, 3, 5, 7, 11};
+  constexpr span<const int> span1(kArray1);
+  constexpr span<const int, 5> span2(kArray2);
+
+  EXPECT_GE(span1, span1);
+  EXPECT_GE(span1, span2);
+
+  static constexpr int kArray3[] = {2, 3, 5, 7, 12};
+  constexpr span<const int> span3(kArray3);
+
+  EXPECT_FALSE(span1 >= span3);
+
+  static double kArray4[] = {2.0, 3.0, 5.0, 7.0, 11.0};
+  span<double, 5> span4(kArray4);
+
+  EXPECT_GE(span3, span4);
+}
+
+TEST(SpanTest, AsBytes) {
+  {
+    constexpr int kArray[] = {2, 3, 5, 7, 11, 13};
+    span<const uint8_t, sizeof(kArray)> bytes_span =
+        as_bytes(make_span(kArray));
+    EXPECT_EQ(reinterpret_cast<const uint8_t*>(kArray), bytes_span.data());
+    EXPECT_EQ(sizeof(kArray), bytes_span.size());
+    EXPECT_EQ(bytes_span.size(), bytes_span.size_bytes());
+  }
+
+  {
+    std::vector<int> vec = {1, 1, 2, 3, 5, 8};
+    span<int> mutable_span(vec);
+    span<const uint8_t> bytes_span = as_bytes(mutable_span);
+    EXPECT_EQ(reinterpret_cast<const uint8_t*>(vec.data()), bytes_span.data());
+    EXPECT_EQ(sizeof(int) * vec.size(), bytes_span.size());
+    EXPECT_EQ(bytes_span.size(), bytes_span.size_bytes());
+  }
+}
+
+TEST(SpanTest, AsWritableBytes) {
+  std::vector<int> vec = {1, 1, 2, 3, 5, 8};
+  span<int> mutable_span(vec);
+  span<uint8_t> writable_bytes_span = as_writable_bytes(mutable_span);
+  EXPECT_EQ(reinterpret_cast<uint8_t*>(vec.data()), writable_bytes_span.data());
+  EXPECT_EQ(sizeof(int) * vec.size(), writable_bytes_span.size());
+  EXPECT_EQ(writable_bytes_span.size(), writable_bytes_span.size_bytes());
+
+  // Set the first entry of vec to zero while writing through the span.
+  std::fill(writable_bytes_span.data(),
+            writable_bytes_span.data() + sizeof(int), 0);
+  EXPECT_EQ(0, vec[0]);
+}
+
+TEST(SpanTest, MakeSpanFromDataAndSize) {
+  int* nullint = nullptr;
+  auto empty_span = make_span(nullint, 0);
+  EXPECT_TRUE(empty_span.empty());
+  EXPECT_EQ(nullptr, empty_span.data());
+
+  std::vector<int> vector = {1, 1, 2, 3, 5, 8};
+  span<int> span(vector.data(), vector.size());
+  auto made_span = make_span(vector.data(), vector.size());
+  EXPECT_EQ(span, made_span);
+  static_assert(decltype(made_span)::extent == dynamic_extent, "");
+}
+
+TEST(SpanTest, MakeSpanFromPointerPair) {
+  int* nullint = nullptr;
+  auto empty_span = make_span(nullint, nullint);
+  EXPECT_TRUE(empty_span.empty());
+  EXPECT_EQ(nullptr, empty_span.data());
+
+  std::vector<int> vector = {1, 1, 2, 3, 5, 8};
+  span<int> span(vector.data(), vector.size());
+  auto made_span = make_span(vector.data(), vector.data() + vector.size());
+  EXPECT_EQ(span, made_span);
+  static_assert(decltype(made_span)::extent == dynamic_extent, "");
+}
+
+TEST(SpanTest, MakeSpanFromConstexprArray) {
+  static constexpr int kArray[] = {1, 2, 3, 4, 5};
+  constexpr span<const int> span(kArray);
+  EXPECT_EQ(span, make_span(kArray));
+  static_assert(decltype(make_span(kArray))::extent == 5, "");
+}
+
+TEST(SpanTest, MakeSpanFromStdArray) {
+  const std::array<int, 5> kArray = {{1, 2, 3, 4, 5}};
+  span<const int> span(kArray);
+  EXPECT_EQ(span, make_span(kArray));
+  static_assert(decltype(make_span(kArray))::extent == 5, "");
+}
+
+TEST(SpanTest, MakeSpanFromConstContainer) {
+  const std::vector<int> vector = {-1, -2, -3, -4, -5};
+  span<const int> span(vector);
+  EXPECT_EQ(span, make_span(vector));
+  static_assert(decltype(make_span(vector))::extent == dynamic_extent, "");
+}
+
+TEST(SpanTest, MakeSpanFromContainer) {
+  std::vector<int> vector = {-1, -2, -3, -4, -5};
+  span<int> span(vector);
+  EXPECT_EQ(span, make_span(vector));
+  static_assert(decltype(make_span(vector))::extent == dynamic_extent, "");
+}
+
+TEST(SpanTest, MakeSpanFromDynamicSpan) {
+  static constexpr int kArray[] = {1, 2, 3, 4, 5};
+  constexpr span<const int> span(kArray);
+  static_assert(std::is_same<decltype(span)::element_type,
+                             decltype(make_span(span))::element_type>::value,
+                "make_span(span) should have the same element_type as span");
+
+  static_assert(span.data() == make_span(span).data(),
+                "make_span(span) should have the same data() as span");
+
+  static_assert(span.size() == make_span(span).size(),
+                "make_span(span) should have the same size() as span");
+
+  static_assert(decltype(make_span(span))::extent == decltype(span)::extent,
+                "make_span(span) should have the same extent as span");
+}
+
+TEST(SpanTest, MakeSpanFromStaticSpan) {
+  static constexpr int kArray[] = {1, 2, 3, 4, 5};
+  constexpr span<const int, 5> span(kArray);
+  static_assert(std::is_same<decltype(span)::element_type,
+                             decltype(make_span(span))::element_type>::value,
+                "make_span(span) should have the same element_type as span");
+
+  static_assert(span.data() == make_span(span).data(),
+                "make_span(span) should have the same data() as span");
+
+  static_assert(span.size() == make_span(span).size(),
+                "make_span(span) should have the same size() as span");
+
+  static_assert(decltype(make_span(span))::extent == decltype(span)::extent,
+                "make_span(span) should have the same extent as span");
+}
+
+TEST(SpanTest, EnsureConstexprGoodness) {
+  static constexpr int kArray[] = {5, 4, 3, 2, 1};
+  constexpr span<const int> constexpr_span(kArray);
+  const size_t size = 2;
+
+  const size_t start = 1;
+  constexpr span<const int> subspan =
+      constexpr_span.subspan(start, start + size);
+  for (size_t i = 0; i < subspan.size(); ++i)
+    EXPECT_EQ(kArray[start + i], subspan[i]);
+
+  constexpr span<const int> firsts = constexpr_span.first(size);
+  for (size_t i = 0; i < firsts.size(); ++i)
+    EXPECT_EQ(kArray[i], firsts[i]);
+
+  constexpr span<const int> lasts = constexpr_span.last(size);
+  for (size_t i = 0; i < lasts.size(); ++i) {
+    const size_t j = (arraysize(kArray) - size) + i;
+    EXPECT_EQ(kArray[j], lasts[i]);
+  }
+
+  constexpr int item = constexpr_span[size];
+  EXPECT_EQ(kArray[size], item);
+}
+
+}  // namespace base
diff --git a/base/containers/span_unittest.nc b/base/containers/span_unittest.nc
new file mode 100644
index 0000000..0d2af89
--- /dev/null
+++ b/base/containers/span_unittest.nc
@@ -0,0 +1,167 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This is a "No Compile Test" suite.
+// http://dev.chromium.org/developers/testing/no-compile-tests
+
+#include "base/containers/span.h"
+
+#include <array>
+#include <set>
+#include <vector>
+
+namespace base {
+
+class Base {
+};
+
+class Derived : Base {
+};
+
+#if defined(NCTEST_DEFAULT_SPAN_WITH_NON_ZERO_STATIC_EXTENT_DISALLOWED)  // [r"fatal error: static_assert failed \"Invalid Extent\""]
+
+// A default constructed span must have an extent of 0 or dynamic_extent.
+void WontCompile() {
+  span<int, 1> span;
+}
+
+#elif defined(NCTEST_SPAN_FROM_ARRAY_WITH_NON_MATCHING_STATIC_EXTENT_DISALLOWED) // [r"fatal error: no matching constructor for initialization of 'span<int, 1>'"]
+
+// A span with static extent constructed from an array must match the size of
+// the array.
+void WontCompile() {
+  int array[] = {1, 2, 3};
+  span<int, 1> span(array);
+}
+
+#elif defined(NCTEST_SPAN_FROM_STD_ARRAY_WITH_NON_MATCHING_STATIC_EXTENT_DISALLOWED) // [r"fatal error: no matching constructor for initialization of 'span<int, 2>'"]
+
+// A span with static extent constructed from std::array must match the size of
+// the array.
+void WontCompile() {
+  std::array<int, 3> array = {1, 2, 3};
+  span<int, 2> span(array);
+}
+
+#elif defined(NCTEST_SPAN_FROM_CONST_STD_ARRAY_WITH_NON_MATCHING_STATIC_EXTENT_DISALLOWED) // [r"fatal error: no matching constructor for initialization of 'span<const int, 2>'"]
+
+// A span with static extent constructed from std::array must match the size of
+// the array.
+void WontCompile() {
+  const std::array<int, 3> array = {1, 2, 3};
+  span<const int, 2> span(array);
+}
+
+#elif defined(NCTEST_SPAN_FROM_OTHER_SPAN_WITH_MISMATCHING_EXTENT_DISALLOWED) // [r"fatal error: no matching constructor for initialization of 'span<int, 4>'"]
+
+// A span with static extent constructed from another span must match the
+// extent.
+void WontCompile() {
+  std::array<int, 3> array = {1, 2, 3};
+  span<int, 3> span3(array);
+  span<int, 4> span4(span3);
+}
+
+#elif defined(NCTEST_DYNAMIC_SPAN_TO_STATIC_SPAN_DISALLOWED)  // [r"fatal error: no matching constructor for initialization of 'span<int, 3>'"]
+
+// Converting a dynamic span to a static span should not be allowed.
+void WontCompile() {
+  span<int> dynamic_span;
+  span<int, 3> static_span(dynamic_span);
+}
+
+#elif defined(NCTEST_DERIVED_TO_BASE_CONVERSION_DISALLOWED)  // [r"fatal error: no matching constructor for initialization of 'span<base::Base \*>'"]
+
+// Internally, this is represented as a pointer to pointers to Derived. An
+// implicit conversion to a pointer to pointers to Base must not be allowed.
+// If it were allowed, then something like this would be possible.
+//   Cat** cats = GetCats();
+//   Animals** animals = cats;
+//   animals[0] = new Dog();  // Uhoh!
+void WontCompile() {
+  span<Derived*> derived_span;
+  span<Base*> base_span(derived_span);
+}
+
+#elif defined(NCTEST_PTR_TO_CONSTPTR_CONVERSION_DISALLOWED)  // [r"fatal error: no matching constructor for initialization of 'span<const int \*>'"]
+
+// Similarly, converting a span<int*> to span<const int*> requires internally
+// converting T** to const T**. This is also disallowed, as it would allow code
+// to violate the contract of const.
+void WontCompile() {
+  span<int*> non_const_span;
+  span<const int*> const_span(non_const_span);
+}
+
+#elif defined(NCTEST_CONST_CONTAINER_TO_MUTABLE_CONVERSION_DISALLOWED)  // [r"fatal error: no matching constructor for initialization of 'span<int>'"]
+
+// A const container should not be convertible to a mutable span.
+void WontCompile() {
+  const std::vector<int> v = {1, 2, 3};
+  span<int> span(v);
+}
+
+#elif defined(NCTEST_STD_SET_CONVERSION_DISALLOWED)  // [r"fatal error: no matching constructor for initialization of 'span<int>'"]
+
+// A std::set() should not satisfy the requirements for conversion to a span.
+void WontCompile() {
+  std::set<int> set;
+  span<int> span(set);
+}
+
+#elif defined(NCTEST_STATIC_FRONT_WITH_EXCEEDING_COUNT_DISALLOWED)  // [r"fatal error: static_assert failed \"Count must not exceed Extent\""]
+
+// Static first called on a span with static extent must not exceed the size.
+void WontCompile() {
+  std::array<int, 3> array = {1, 2, 3};
+  span<int, 3> span(array);
+  auto first = span.first<4>();
+}
+
+#elif defined(NCTEST_STATIC_LAST_WITH_EXCEEDING_COUNT_DISALLOWED)  // [r"fatal error: static_assert failed \"Count must not exceed Extent\""]
+
+// Static last called on a span with static extent must not exceed the size.
+void WontCompile() {
+  std::array<int, 3> array = {1, 2, 3};
+  span<int, 3> span(array);
+  auto last = span.last<4>();
+}
+
+#elif defined(NCTEST_STATIC_SUBSPAN_WITH_EXCEEDING_OFFSET_DISALLOWED)  // [r"fatal error: static_assert failed \"Offset must not exceed Extent\""]
+
+// Static subspan called on a span with static extent must not exceed the size.
+void WontCompile() {
+  std::array<int, 3> array = {1, 2, 3};
+  span<int, 3> span(array);
+  auto subspan = span.subspan<4>();
+}
+
+#elif defined(NCTEST_STATIC_SUBSPAN_WITH_EXCEEDING_COUNT_DISALLOWED)  // [r"fatal error: static_assert failed \"Count must not exceed Extent - Offset\""]
+
+// Static subspan called on a span with static extent must not exceed the size.
+void WontCompile() {
+  std::array<int, 3> array = {1, 2, 3};
+  span<int, 3> span(array);
+  auto subspan = span.subspan<0, 4>();
+}
+
+#elif defined(NCTEST_AS_WRITABLE_BYTES_WITH_CONST_CONTAINER_DISALLOWED)  // [r"fatal error: no matching function for call to 'as_writable_bytes'"]
+
+// as_writable_bytes should not be possible for a const container.
+void WontCompile() {
+  const std::vector<int> v = {1, 2, 3};
+  span<uint8_t> bytes = as_writable_bytes(make_span(v));
+}
+
+#elif defined(NCTEST_MAKE_SPAN_FROM_SET_CONVERSION_DISALLOWED)  // [r"fatal error: no matching function for call to 'make_span'"]
+
+// A std::set() should not satisfy the requirements for conversion to a span.
+void WontCompile() {
+  std::set<int> set;
+  auto span = make_span(set);
+}
+
+#endif
+
+}  // namespace base
diff --git a/base/containers/stack.h b/base/containers/stack.h
new file mode 100644
index 0000000..1aaa879
--- /dev/null
+++ b/base/containers/stack.h
@@ -0,0 +1,23 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_CONTAINERS_STACK_H_
+#define BASE_CONTAINERS_STACK_H_
+
+#include <stack>
+
+#include "base/containers/circular_deque.h"
+
+namespace base {
+
+// Provides a definition of base::stack that's like std::stack but uses a
+// base::circular_queue instead of std::deque. Since std::stack is just a
+// wrapper for an underlying type, we can just provide a typedef for it that
+// defaults to the base circular_deque.
+template <class T, class Container = circular_deque<T>>
+using stack = std::stack<T, Container>;
+
+}  // namespace base
+
+#endif  // BASE_CONTAINERS_STACK_H_
diff --git a/base/containers/stack_container.h b/base/containers/stack_container.h
new file mode 100644
index 0000000..c775744
--- /dev/null
+++ b/base/containers/stack_container.h
@@ -0,0 +1,229 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_CONTAINERS_STACK_CONTAINER_H_
+#define BASE_CONTAINERS_STACK_CONTAINER_H_
+
+#include <stddef.h>
+
+#include <vector>
+
+#include "base/macros.h"
+#include "build/build_config.h"
+
+namespace base {
+
+// This allocator can be used with STL containers to provide a stack buffer
+// from which to allocate memory and overflows onto the heap. This stack buffer
+// would be allocated on the stack and allows us to avoid heap operations in
+// some situations.
+//
+// STL likes to make copies of allocators, so the allocator itself can't hold
+// the data. Instead, we make the creator responsible for creating a
+// StackAllocator::Source which contains the data. Copying the allocator
+// merely copies the pointer to this shared source, so all allocators created
+// based on our allocator will share the same stack buffer.
+//
+// This stack buffer implementation is very simple. The first allocation that
+// fits in the stack buffer will use the stack buffer. Any subsequent
+// allocations will not use the stack buffer, even if there is unused room.
+// This makes it appropriate for array-like containers, but the caller should
+// be sure to reserve() in the container up to the stack buffer size. Otherwise
+// the container will allocate a small array which will "use up" the stack
+// buffer.
+template<typename T, size_t stack_capacity>
+class StackAllocator : public std::allocator<T> {
+ public:
+  typedef typename std::allocator<T>::pointer pointer;
+  typedef typename std::allocator<T>::size_type size_type;
+
+  // Backing store for the allocator. The container owner is responsible for
+  // maintaining this for as long as any containers using this allocator are
+  // live.
+  struct Source {
+    Source() : used_stack_buffer_(false) {
+    }
+
+    // Casts the buffer in its right type.
+    T* stack_buffer() { return reinterpret_cast<T*>(stack_buffer_); }
+    const T* stack_buffer() const {
+      return reinterpret_cast<const T*>(&stack_buffer_);
+    }
+
+    // The buffer itself. It is not of type T because we don't want the
+    // constructors and destructors to be automatically called. Define a POD
+    // buffer of the right size instead.
+    alignas(T) char stack_buffer_[sizeof(T[stack_capacity])];
+#if defined(__GNUC__) && !defined(ARCH_CPU_X86_FAMILY)
+    static_assert(alignof(T) <= 16, "http://crbug.com/115612");
+#endif
+
+    // Set when the stack buffer is used for an allocation. We do not track
+    // how much of the buffer is used, only that somebody is using it.
+    bool used_stack_buffer_;
+  };
+
+  // Used by containers when they want to refer to an allocator of type U.
+  template<typename U>
+  struct rebind {
+    typedef StackAllocator<U, stack_capacity> other;
+  };
+
+  // For the straight up copy c-tor, we can share storage.
+  StackAllocator(const StackAllocator<T, stack_capacity>& rhs)
+      : std::allocator<T>(), source_(rhs.source_) {
+  }
+
+  // ISO C++ requires the following constructor to be defined,
+  // and std::vector in VC++2008SP1 Release fails with an error
+  // in the class _Container_base_aux_alloc_real (from <xutility>)
+  // if the constructor does not exist.
+  // For this constructor, we cannot share storage; there's
+  // no guarantee that the Source buffer of Ts is large enough
+  // for Us.
+  // TODO: If we were fancy pants, perhaps we could share storage
+  // iff sizeof(T) == sizeof(U).
+  template<typename U, size_t other_capacity>
+  StackAllocator(const StackAllocator<U, other_capacity>& other)
+      : source_(NULL) {
+  }
+
+  // This constructor must exist. It creates a default allocator that doesn't
+  // actually have a stack buffer. glibc's std::string() will compare the
+  // current allocator against the default-constructed allocator, so this
+  // should be fast.
+  StackAllocator() : source_(NULL) {
+  }
+
+  explicit StackAllocator(Source* source) : source_(source) {
+  }
+
+  // Actually do the allocation. Use the stack buffer if nobody has used it yet
+  // and the size requested fits. Otherwise, fall through to the standard
+  // allocator.
+  pointer allocate(size_type n, void* hint = 0) {
+    if (source_ != NULL && !source_->used_stack_buffer_
+        && n <= stack_capacity) {
+      source_->used_stack_buffer_ = true;
+      return source_->stack_buffer();
+    } else {
+      return std::allocator<T>::allocate(n, hint);
+    }
+  }
+
+  // Free: when trying to free the stack buffer, just mark it as free. For
+  // non-stack-buffer pointers, just fall though to the standard allocator.
+  void deallocate(pointer p, size_type n) {
+    if (source_ != NULL && p == source_->stack_buffer())
+      source_->used_stack_buffer_ = false;
+    else
+      std::allocator<T>::deallocate(p, n);
+  }
+
+ private:
+  Source* source_;
+};
+
+// A wrapper around STL containers that maintains a stack-sized buffer that the
+// initial capacity of the vector is based on. Growing the container beyond the
+// stack capacity will transparently overflow onto the heap. The container must
+// support reserve().
+//
+// This will not work with std::string since some implementations allocate
+// more bytes than requested in calls to reserve(), forcing the allocation onto
+// the heap.  http://crbug.com/709273
+//
+// WATCH OUT: the ContainerType MUST use the proper StackAllocator for this
+// type. This object is really intended to be used only internally. You'll want
+// to use the wrappers below for different types.
+template<typename TContainerType, int stack_capacity>
+class StackContainer {
+ public:
+  typedef TContainerType ContainerType;
+  typedef typename ContainerType::value_type ContainedType;
+  typedef StackAllocator<ContainedType, stack_capacity> Allocator;
+
+  // Allocator must be constructed before the container!
+  StackContainer() : allocator_(&stack_data_), container_(allocator_) {
+    // Make the container use the stack allocation by reserving our buffer size
+    // before doing anything else.
+    container_.reserve(stack_capacity);
+  }
+
+  // Getters for the actual container.
+  //
+  // Danger: any copies of this made using the copy constructor must have
+  // shorter lifetimes than the source. The copy will share the same allocator
+  // and therefore the same stack buffer as the original. Use std::copy to
+  // copy into a "real" container for longer-lived objects.
+  ContainerType& container() { return container_; }
+  const ContainerType& container() const { return container_; }
+
+  // Support operator-> to get to the container. This allows nicer syntax like:
+  //   StackContainer<...> foo;
+  //   std::sort(foo->begin(), foo->end());
+  ContainerType* operator->() { return &container_; }
+  const ContainerType* operator->() const { return &container_; }
+
+#ifdef UNIT_TEST
+  // Retrieves the stack source so that that unit tests can verify that the
+  // buffer is being used properly.
+  const typename Allocator::Source& stack_data() const {
+    return stack_data_;
+  }
+#endif
+
+ protected:
+  typename Allocator::Source stack_data_;
+  Allocator allocator_;
+  ContainerType container_;
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(StackContainer);
+};
+
+// StackVector -----------------------------------------------------------------
+
+// Example:
+//   StackVector<int, 16> foo;
+//   foo->push_back(22);  // we have overloaded operator->
+//   foo[0] = 10;         // as well as operator[]
+template<typename T, size_t stack_capacity>
+class StackVector : public StackContainer<
+    std::vector<T, StackAllocator<T, stack_capacity> >,
+    stack_capacity> {
+ public:
+  StackVector() : StackContainer<
+      std::vector<T, StackAllocator<T, stack_capacity> >,
+      stack_capacity>() {
+  }
+
+  // We need to put this in STL containers sometimes, which requires a copy
+  // constructor. We can't call the regular copy constructor because that will
+  // take the stack buffer from the original. Here, we create an empty object
+  // and make a stack buffer of its own.
+  StackVector(const StackVector<T, stack_capacity>& other)
+      : StackContainer<
+            std::vector<T, StackAllocator<T, stack_capacity> >,
+            stack_capacity>() {
+    this->container().assign(other->begin(), other->end());
+  }
+
+  StackVector<T, stack_capacity>& operator=(
+      const StackVector<T, stack_capacity>& other) {
+    this->container().assign(other->begin(), other->end());
+    return *this;
+  }
+
+  // Vectors are commonly indexed, which isn't very convenient even with
+  // operator-> (using "->at()" does exception stuff we don't want).
+  T& operator[](size_t i) { return this->container().operator[](i); }
+  const T& operator[](size_t i) const {
+    return this->container().operator[](i);
+  }
+};
+
+}  // namespace base
+
+#endif  // BASE_CONTAINERS_STACK_CONTAINER_H_
diff --git a/base/containers/stack_container_unittest.cc b/base/containers/stack_container_unittest.cc
new file mode 100644
index 0000000..b6bb9b6
--- /dev/null
+++ b/base/containers/stack_container_unittest.cc
@@ -0,0 +1,145 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/containers/stack_container.h"
+
+#include <stddef.h>
+
+#include <algorithm>
+
+#include "base/memory/ref_counted.h"
+#include "build/build_config.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+
+namespace {
+
+class Dummy : public base::RefCounted<Dummy> {
+ public:
+  explicit Dummy(int* alive) : alive_(alive) {
+    ++*alive_;
+  }
+
+ private:
+  friend class base::RefCounted<Dummy>;
+
+  ~Dummy() {
+    --*alive_;
+  }
+
+  int* const alive_;
+};
+
+}  // namespace
+
+TEST(StackContainer, Vector) {
+  const int stack_size = 3;
+  StackVector<int, stack_size> vect;
+  const int* stack_buffer = &vect.stack_data().stack_buffer()[0];
+
+  // The initial |stack_size| elements should appear in the stack buffer.
+  EXPECT_EQ(static_cast<size_t>(stack_size), vect.container().capacity());
+  for (int i = 0; i < stack_size; i++) {
+    vect.container().push_back(i);
+    EXPECT_EQ(stack_buffer, &vect.container()[0]);
+    EXPECT_TRUE(vect.stack_data().used_stack_buffer_);
+  }
+
+  // Adding more elements should push the array onto the heap.
+  for (int i = 0; i < stack_size; i++) {
+    vect.container().push_back(i + stack_size);
+    EXPECT_NE(stack_buffer, &vect.container()[0]);
+    EXPECT_FALSE(vect.stack_data().used_stack_buffer_);
+  }
+
+  // The array should still be in order.
+  for (int i = 0; i < stack_size * 2; i++)
+    EXPECT_EQ(i, vect.container()[i]);
+
+  // Resize to smaller. Our STL implementation won't reallocate in this case,
+  // otherwise it might use our stack buffer. We reserve right after the resize
+  // to guarantee it isn't using the stack buffer, even though it doesn't have
+  // much data.
+  vect.container().resize(stack_size);
+  vect.container().reserve(stack_size * 2);
+  EXPECT_FALSE(vect.stack_data().used_stack_buffer_);
+
+  // Copying the small vector to another should use the same allocator and use
+  // the now-unused stack buffer. GENERALLY CALLERS SHOULD NOT DO THIS since
+  // they have to get the template types just right and it can cause errors.
+  std::vector<int, StackAllocator<int, stack_size> > other(vect.container());
+  EXPECT_EQ(stack_buffer, &other.front());
+  EXPECT_TRUE(vect.stack_data().used_stack_buffer_);
+  for (int i = 0; i < stack_size; i++)
+    EXPECT_EQ(i, other[i]);
+}
+
+TEST(StackContainer, VectorDoubleDelete) {
+  // Regression testing for double-delete.
+  typedef StackVector<scoped_refptr<Dummy>, 2> Vector;
+  typedef Vector::ContainerType Container;
+  Vector vect;
+
+  int alive = 0;
+  scoped_refptr<Dummy> dummy(new Dummy(&alive));
+  EXPECT_EQ(alive, 1);
+
+  vect->push_back(dummy);
+  EXPECT_EQ(alive, 1);
+
+  Dummy* dummy_unref = dummy.get();
+  dummy = nullptr;
+  EXPECT_EQ(alive, 1);
+
+  Container::iterator itr = std::find(vect->begin(), vect->end(), dummy_unref);
+  EXPECT_EQ(itr->get(), dummy_unref);
+  vect->erase(itr);
+  EXPECT_EQ(alive, 0);
+
+  // Shouldn't crash at exit.
+}
+
+namespace {
+
+template <size_t alignment>
+class AlignedData {
+ public:
+  AlignedData() { memset(data_, 0, alignment); }
+  ~AlignedData() = default;
+  alignas(alignment) char data_[alignment];
+};
+
+}  // anonymous namespace
+
+#define EXPECT_ALIGNED(ptr, align) \
+    EXPECT_EQ(0u, reinterpret_cast<uintptr_t>(ptr) & (align - 1))
+
+TEST(StackContainer, BufferAlignment) {
+  StackVector<wchar_t, 16> text;
+  text->push_back(L'A');
+  EXPECT_ALIGNED(&text[0], alignof(wchar_t));
+
+  StackVector<double, 1> doubles;
+  doubles->push_back(0.0);
+  EXPECT_ALIGNED(&doubles[0], alignof(double));
+
+  StackVector<AlignedData<16>, 1> aligned16;
+  aligned16->push_back(AlignedData<16>());
+  EXPECT_ALIGNED(&aligned16[0], 16);
+
+#if !defined(__GNUC__) || defined(ARCH_CPU_X86_FAMILY)
+  // It seems that non-X86 gcc doesn't respect greater than 16 byte alignment.
+  // See http://gcc.gnu.org/bugzilla/show_bug.cgi?id=33721 for details.
+  // TODO(sbc):re-enable this if GCC starts respecting higher alignments.
+  StackVector<AlignedData<256>, 1> aligned256;
+  aligned256->push_back(AlignedData<256>());
+  EXPECT_ALIGNED(&aligned256[0], 256);
+#endif
+}
+
+template class StackVector<int, 2>;
+template class StackVector<scoped_refptr<Dummy>, 2>;
+
+}  // namespace base
diff --git a/base/containers/unique_ptr_adapters.h b/base/containers/unique_ptr_adapters.h
new file mode 100644
index 0000000..42fab19
--- /dev/null
+++ b/base/containers/unique_ptr_adapters.h
@@ -0,0 +1,78 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_CONTAINERS_UNIQUE_PTR_ADAPTERS_H_
+#define BASE_CONTAINERS_UNIQUE_PTR_ADAPTERS_H_
+
+#include <memory>
+
+namespace base {
+
+// This transparent comparator allows to lookup by raw pointer in
+// a container of unique pointers. This functionality is based on C++14
+// extensions to std::set/std::map interface, and can also be used
+// with base::flat_set/base::flat_map.
+//
+// Example usage:
+//   Foo* foo = ...
+//   std::set<std::unique_ptr<Foo>, base::UniquePtrComparator> set;
+//   set.insert(std::unique_ptr<Foo>(foo));
+//   ...
+//   auto it = set.find(foo);
+//   EXPECT_EQ(foo, it->get());
+//
+// You can find more information about transparent comparisons here:
+// http://en.cppreference.com/w/cpp/utility/functional/less_void
+struct UniquePtrComparator {
+  using is_transparent = int;
+
+  template <typename T>
+  bool operator()(const std::unique_ptr<T>& lhs,
+                  const std::unique_ptr<T>& rhs) const {
+    return lhs < rhs;
+  }
+
+  template <typename T>
+  bool operator()(const T* lhs, const std::unique_ptr<T>& rhs) const {
+    return lhs < rhs.get();
+  }
+
+  template <typename T>
+  bool operator()(const std::unique_ptr<T>& lhs, const T* rhs) const {
+    return lhs.get() < rhs;
+  }
+};
+
+// UniquePtrMatcher is useful for finding an element in a container of
+// unique_ptrs when you have the raw pointer.
+//
+// Example usage:
+//   std::vector<std::unique_ptr<Foo>> vector;
+//   Foo* element = ...
+//   auto iter = std::find_if(vector.begin(), vector.end(),
+//                            MatchesUniquePtr(element));
+//
+// Example of erasing from container:
+//   EraseIf(v, MatchesUniquePtr(element));
+//
+template <class T, class Deleter = std::default_delete<T>>
+struct UniquePtrMatcher {
+  explicit UniquePtrMatcher(T* t) : t_(t) {}
+
+  bool operator()(const std::unique_ptr<T, Deleter>& o) {
+    return o.get() == t_;
+  }
+
+ private:
+  T* const t_;
+};
+
+template <class T, class Deleter = std::default_delete<T>>
+UniquePtrMatcher<T, Deleter> MatchesUniquePtr(T* t) {
+  return UniquePtrMatcher<T, Deleter>(t);
+}
+
+}  // namespace base
+
+#endif  // BASE_CONTAINERS_UNIQUE_PTR_ADAPTERS_H_
diff --git a/base/containers/unique_ptr_adapters_unittest.cc b/base/containers/unique_ptr_adapters_unittest.cc
new file mode 100644
index 0000000..5b8f1fc
--- /dev/null
+++ b/base/containers/unique_ptr_adapters_unittest.cc
@@ -0,0 +1,134 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/containers/unique_ptr_adapters.h"
+
+#include <memory>
+#include <vector>
+
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+namespace {
+
+class Foo {
+ public:
+  Foo() { instance_count++; }
+  ~Foo() { instance_count--; }
+  static int instance_count;
+};
+
+int Foo::instance_count = 0;
+
+TEST(UniquePtrComparatorTest, Basic) {
+  std::set<std::unique_ptr<Foo>, UniquePtrComparator> set;
+  Foo* foo1 = new Foo();
+  Foo* foo2 = new Foo();
+  Foo* foo3 = new Foo();
+  EXPECT_EQ(3, Foo::instance_count);
+
+  set.emplace(foo1);
+  set.emplace(foo2);
+
+  auto it1 = set.find(foo1);
+  EXPECT_TRUE(it1 != set.end());
+  EXPECT_EQ(foo1, it1->get());
+
+  {
+    auto it2 = set.find(foo2);
+    EXPECT_TRUE(it2 != set.end());
+    EXPECT_EQ(foo2, it2->get());
+  }
+
+  EXPECT_TRUE(set.find(foo3) == set.end());
+
+  set.erase(it1);
+  EXPECT_EQ(2, Foo::instance_count);
+
+  EXPECT_TRUE(set.find(foo1) == set.end());
+
+  {
+    auto it2 = set.find(foo2);
+    EXPECT_TRUE(it2 != set.end());
+    EXPECT_EQ(foo2, it2->get());
+  }
+
+  set.clear();
+  EXPECT_EQ(1, Foo::instance_count);
+
+  EXPECT_TRUE(set.find(foo1) == set.end());
+  EXPECT_TRUE(set.find(foo2) == set.end());
+  EXPECT_TRUE(set.find(foo3) == set.end());
+
+  delete foo3;
+  EXPECT_EQ(0, Foo::instance_count);
+}
+
+TEST(UniquePtrMatcherTest, Basic) {
+  std::vector<std::unique_ptr<Foo>> v;
+  auto foo_ptr1 = std::make_unique<Foo>();
+  Foo* foo1 = foo_ptr1.get();
+  v.push_back(std::move(foo_ptr1));
+  auto foo_ptr2 = std::make_unique<Foo>();
+  Foo* foo2 = foo_ptr2.get();
+  v.push_back(std::move(foo_ptr2));
+
+  {
+    auto iter = std::find_if(v.begin(), v.end(), UniquePtrMatcher<Foo>(foo1));
+    ASSERT_TRUE(iter != v.end());
+    EXPECT_EQ(foo1, iter->get());
+  }
+
+  {
+    auto iter = std::find_if(v.begin(), v.end(), UniquePtrMatcher<Foo>(foo2));
+    ASSERT_TRUE(iter != v.end());
+    EXPECT_EQ(foo2, iter->get());
+  }
+
+  {
+    auto iter = std::find_if(v.begin(), v.end(), MatchesUniquePtr(foo2));
+    ASSERT_TRUE(iter != v.end());
+    EXPECT_EQ(foo2, iter->get());
+  }
+}
+
+class TestDeleter {
+ public:
+  void operator()(Foo* foo) { delete foo; }
+};
+
+TEST(UniquePtrMatcherTest, Deleter) {
+  using UniqueFoo = std::unique_ptr<Foo, TestDeleter>;
+  std::vector<UniqueFoo> v;
+  UniqueFoo foo_ptr1(new Foo);
+  Foo* foo1 = foo_ptr1.get();
+  v.push_back(std::move(foo_ptr1));
+  UniqueFoo foo_ptr2(new Foo);
+  Foo* foo2 = foo_ptr2.get();
+  v.push_back(std::move(foo_ptr2));
+
+  {
+    auto iter = std::find_if(v.begin(), v.end(),
+                             UniquePtrMatcher<Foo, TestDeleter>(foo1));
+    ASSERT_TRUE(iter != v.end());
+    EXPECT_EQ(foo1, iter->get());
+  }
+
+  {
+    auto iter = std::find_if(v.begin(), v.end(),
+                             UniquePtrMatcher<Foo, TestDeleter>(foo2));
+    ASSERT_TRUE(iter != v.end());
+    EXPECT_EQ(foo2, iter->get());
+  }
+
+  {
+    auto iter = std::find_if(v.begin(), v.end(),
+                             MatchesUniquePtr<Foo, TestDeleter>(foo2));
+    ASSERT_TRUE(iter != v.end());
+    EXPECT_EQ(foo2, iter->get());
+  }
+}
+
+}  // namespace
+}  // namespace base
diff --git a/base/containers/vector_buffer.h b/base/containers/vector_buffer.h
new file mode 100644
index 0000000..a72c1ed
--- /dev/null
+++ b/base/containers/vector_buffer.h
@@ -0,0 +1,163 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_CONTAINERS_VECTOR_BUFFERS_H_
+#define BASE_CONTAINERS_VECTOR_BUFFERS_H_
+
+#include <stdlib.h>
+#include <string.h>
+
+#include <type_traits>
+#include <utility>
+
+#include "base/logging.h"
+#include "base/macros.h"
+
+namespace base {
+namespace internal {
+
+// Internal implementation detail of base/containers.
+//
+// Implements a vector-like buffer that holds a certain capacity of T. Unlike
+// std::vector, VectorBuffer never constructs or destructs its arguments, and
+// can't change sizes. But it does implement templates to assist in efficient
+// moving and destruction of those items manually.
+//
+// In particular, the destructor function does not iterate over the items if
+// there is no destructor. Moves should be implemented as a memcpy/memmove for
+// trivially copyable objects (POD) otherwise, it should be a std::move if
+// possible, and as a last resort it falls back to a copy. This behavior is
+// similar to std::vector.
+//
+// No special consideration is done for noexcept move constructors since
+// we compile without exceptions.
+//
+// The current API does not support moving overlapping ranges.
+template <typename T>
+class VectorBuffer {
+ public:
+  constexpr VectorBuffer() = default;
+
+#if defined(__clang__) && !defined(__native_client__)
+  // This constructor converts an uninitialized void* to a T* which triggers
+  // clang Control Flow Integrity. Since this is as-designed, disable.
+  __attribute__((no_sanitize("cfi-unrelated-cast", "vptr")))
+#endif
+  VectorBuffer(size_t count)
+      : buffer_(reinterpret_cast<T*>(malloc(sizeof(T) * count))),
+        capacity_(count) {
+  }
+  VectorBuffer(VectorBuffer&& other) noexcept
+      : buffer_(other.buffer_), capacity_(other.capacity_) {
+    other.buffer_ = nullptr;
+    other.capacity_ = 0;
+  }
+
+  ~VectorBuffer() { free(buffer_); }
+
+  VectorBuffer& operator=(VectorBuffer&& other) {
+    free(buffer_);
+    buffer_ = other.buffer_;
+    capacity_ = other.capacity_;
+
+    other.buffer_ = nullptr;
+    other.capacity_ = 0;
+    return *this;
+  }
+
+  size_t capacity() const { return capacity_; }
+
+  T& operator[](size_t i) { return buffer_[i]; }
+  const T& operator[](size_t i) const { return buffer_[i]; }
+  T* begin() { return buffer_; }
+  T* end() { return &buffer_[capacity_]; }
+
+  // DestructRange ------------------------------------------------------------
+
+  // Trivially destructible objects need not have their destructors called.
+  template <typename T2 = T,
+            typename std::enable_if<std::is_trivially_destructible<T2>::value,
+                                    int>::type = 0>
+  void DestructRange(T* begin, T* end) {}
+
+  // Non-trivially destructible objects must have their destructors called
+  // individually.
+  template <typename T2 = T,
+            typename std::enable_if<!std::is_trivially_destructible<T2>::value,
+                                    int>::type = 0>
+  void DestructRange(T* begin, T* end) {
+    while (begin != end) {
+      begin->~T();
+      begin++;
+    }
+  }
+
+  // MoveRange ----------------------------------------------------------------
+  //
+  // The destructor will be called (as necessary) for all moved types. The
+  // ranges must not overlap.
+  //
+  // The parameters and begin and end (one past the last) of the input buffer,
+  // and the address of the first element to copy to. There must be sufficient
+  // room in the destination for all items in the range [begin, end).
+
+  // Trivially copyable types can use memcpy. trivially copyable implies
+  // that there is a trivial destructor as we don't have to call it.
+  template <typename T2 = T,
+            typename std::enable_if<base::is_trivially_copyable<T2>::value,
+                                    int>::type = 0>
+  static void MoveRange(T* from_begin, T* from_end, T* to) {
+    DCHECK(!RangesOverlap(from_begin, from_end, to));
+    memcpy(to, from_begin, (from_end - from_begin) * sizeof(T));
+  }
+
+  // Not trivially copyable, but movable: call the move constructor and
+  // destruct the original.
+  template <typename T2 = T,
+            typename std::enable_if<std::is_move_constructible<T2>::value &&
+                                        !base::is_trivially_copyable<T2>::value,
+                                    int>::type = 0>
+  static void MoveRange(T* from_begin, T* from_end, T* to) {
+    DCHECK(!RangesOverlap(from_begin, from_end, to));
+    while (from_begin != from_end) {
+      new (to) T(std::move(*from_begin));
+      from_begin->~T();
+      from_begin++;
+      to++;
+    }
+  }
+
+  // Not movable, not trivially copyable: call the copy constructor and
+  // destruct the original.
+  template <typename T2 = T,
+            typename std::enable_if<!std::is_move_constructible<T2>::value &&
+                                        !base::is_trivially_copyable<T2>::value,
+                                    int>::type = 0>
+  static void MoveRange(T* from_begin, T* from_end, T* to) {
+    DCHECK(!RangesOverlap(from_begin, from_end, to));
+    while (from_begin != from_end) {
+      new (to) T(*from_begin);
+      from_begin->~T();
+      from_begin++;
+      to++;
+    }
+  }
+
+ private:
+  static bool RangesOverlap(const T* from_begin,
+                            const T* from_end,
+                            const T* to) {
+    return !(to >= from_end || to + (from_end - from_begin) <= from_begin);
+  }
+
+  T* buffer_ = nullptr;
+  size_t capacity_ = 0;
+
+  DISALLOW_COPY_AND_ASSIGN(VectorBuffer);
+};
+
+}  // namespace internal
+}  // namespace base
+
+#endif  // BASE_CONTAINERS_VECTOR_BUFFERS_H_
diff --git a/base/containers/vector_buffer_unittest.cc b/base/containers/vector_buffer_unittest.cc
new file mode 100644
index 0000000..6d49505
--- /dev/null
+++ b/base/containers/vector_buffer_unittest.cc
@@ -0,0 +1,89 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/containers/vector_buffer.h"
+
+#include "base/test/copy_only_int.h"
+#include "base/test/move_only_int.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+namespace internal {
+
+TEST(VectorBuffer, DeletePOD) {
+  constexpr int size = 10;
+  VectorBuffer<int> buffer(size);
+  for (int i = 0; i < size; i++)
+    buffer.begin()[i] = i + 1;
+
+  buffer.DestructRange(buffer.begin(), buffer.end());
+
+  // Delete should do nothing.
+  for (int i = 0; i < size; i++)
+    EXPECT_EQ(i + 1, buffer.begin()[i]);
+}
+
+TEST(VectorBuffer, DeleteMoveOnly) {
+  constexpr int size = 10;
+  VectorBuffer<MoveOnlyInt> buffer(size);
+  for (int i = 0; i < size; i++)
+    new (buffer.begin() + i) MoveOnlyInt(i + 1);
+
+  buffer.DestructRange(buffer.begin(), buffer.end());
+
+  // Delete should have reset all of the values to 0.
+  for (int i = 0; i < size; i++)
+    EXPECT_EQ(0, buffer.begin()[i].data());
+}
+
+TEST(VectorBuffer, PODMove) {
+  constexpr int size = 10;
+  VectorBuffer<int> dest(size);
+
+  VectorBuffer<int> original(size);
+  for (int i = 0; i < size; i++)
+    original.begin()[i] = i + 1;
+
+  original.MoveRange(original.begin(), original.end(), dest.begin());
+  for (int i = 0; i < size; i++)
+    EXPECT_EQ(i + 1, dest.begin()[i]);
+}
+
+TEST(VectorBuffer, MovableMove) {
+  constexpr int size = 10;
+  VectorBuffer<MoveOnlyInt> dest(size);
+
+  VectorBuffer<MoveOnlyInt> original(size);
+  for (int i = 0; i < size; i++)
+    new (original.begin() + i) MoveOnlyInt(i + 1);
+
+  original.MoveRange(original.begin(), original.end(), dest.begin());
+
+  // Moving from a MoveOnlyInt resets to 0.
+  for (int i = 0; i < size; i++) {
+    EXPECT_EQ(0, original.begin()[i].data());
+    EXPECT_EQ(i + 1, dest.begin()[i].data());
+  }
+}
+
+TEST(VectorBuffer, CopyToMove) {
+  constexpr int size = 10;
+  VectorBuffer<CopyOnlyInt> dest(size);
+
+  VectorBuffer<CopyOnlyInt> original(size);
+  for (int i = 0; i < size; i++)
+    new (original.begin() + i) CopyOnlyInt(i + 1);
+
+  original.MoveRange(original.begin(), original.end(), dest.begin());
+
+  // The original should have been destructed, which should reset the value to
+  // 0. Technically this dereferences the destructed object.
+  for (int i = 0; i < size; i++) {
+    EXPECT_EQ(0, original.begin()[i].data());
+    EXPECT_EQ(i + 1, dest.begin()[i].data());
+  }
+}
+
+}  // namespace internal
+}  // namespace base
diff --git a/base/cpu.cc b/base/cpu.cc
new file mode 100644
index 0000000..cd9066f
--- /dev/null
+++ b/base/cpu.cc
@@ -0,0 +1,242 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/cpu.h"
+
+#include <limits.h>
+#include <stddef.h>
+#include <stdint.h>
+#include <string.h>
+
+#include <algorithm>
+#include <utility>
+
+#include "base/macros.h"
+#include "build/build_config.h"
+
+#if defined(ARCH_CPU_ARM_FAMILY) && (defined(OS_ANDROID) || defined(OS_LINUX))
+#include "base/files/file_util.h"
+#endif
+
+#if defined(ARCH_CPU_X86_FAMILY)
+#if defined(COMPILER_MSVC)
+#include <intrin.h>
+#include <immintrin.h>  // For _xgetbv()
+#endif
+#endif
+
+namespace base {
+
+CPU::CPU()
+  : signature_(0),
+    type_(0),
+    family_(0),
+    model_(0),
+    stepping_(0),
+    ext_model_(0),
+    ext_family_(0),
+    has_mmx_(false),
+    has_sse_(false),
+    has_sse2_(false),
+    has_sse3_(false),
+    has_ssse3_(false),
+    has_sse41_(false),
+    has_sse42_(false),
+    has_popcnt_(false),
+    has_avx_(false),
+    has_avx2_(false),
+    has_aesni_(false),
+    has_non_stop_time_stamp_counter_(false),
+    cpu_vendor_("unknown") {
+  Initialize();
+}
+
+namespace {
+
+#if defined(ARCH_CPU_X86_FAMILY)
+#if !defined(COMPILER_MSVC)
+
+#if defined(__pic__) && defined(__i386__)
+
+void __cpuid(int cpu_info[4], int info_type) {
+  __asm__ volatile(
+      "mov %%ebx, %%edi\n"
+      "cpuid\n"
+      "xchg %%edi, %%ebx\n"
+      : "=a"(cpu_info[0]), "=D"(cpu_info[1]), "=c"(cpu_info[2]),
+        "=d"(cpu_info[3])
+      : "a"(info_type), "c"(0));
+}
+
+#else
+
+void __cpuid(int cpu_info[4], int info_type) {
+  __asm__ volatile("cpuid\n"
+                   : "=a"(cpu_info[0]), "=b"(cpu_info[1]), "=c"(cpu_info[2]),
+                     "=d"(cpu_info[3])
+                   : "a"(info_type), "c"(0));
+}
+
+#endif
+
+// _xgetbv returns the value of an Intel Extended Control Register (XCR).
+// Currently only XCR0 is defined by Intel so |xcr| should always be zero.
+uint64_t _xgetbv(uint32_t xcr) {
+  uint32_t eax, edx;
+
+  __asm__ volatile (
+    "xgetbv" : "=a"(eax), "=d"(edx) : "c"(xcr));
+  return (static_cast<uint64_t>(edx) << 32) | eax;
+}
+
+#endif  // !defined(COMPILER_MSVC)
+#endif  // ARCH_CPU_X86_FAMILY
+
+#if defined(ARCH_CPU_ARM_FAMILY) && (defined(OS_ANDROID) || defined(OS_LINUX))
+std::string* CpuInfoBrand() {
+  static std::string* brand = []() {
+    // This function finds the value from /proc/cpuinfo under the key "model
+    // name" or "Processor". "model name" is used in Linux 3.8 and later (3.7
+    // and later for arm64) and is shown once per CPU. "Processor" is used in
+    // earler versions and is shown only once at the top of /proc/cpuinfo
+    // regardless of the number CPUs.
+    const char kModelNamePrefix[] = "model name\t: ";
+    const char kProcessorPrefix[] = "Processor\t: ";
+
+    std::string contents;
+    ReadFileToString(FilePath("/proc/cpuinfo"), &contents);
+    DCHECK(!contents.empty());
+
+    std::istringstream iss(contents);
+    std::string line;
+    while (std::getline(iss, line)) {
+      if (line.compare(0, strlen(kModelNamePrefix), kModelNamePrefix) == 0)
+        return new std::string(line.substr(strlen(kModelNamePrefix)));
+      if (line.compare(0, strlen(kProcessorPrefix), kProcessorPrefix) == 0)
+        return new std::string(line.substr(strlen(kProcessorPrefix)));
+    }
+
+    return new std::string();
+  }();
+
+  return brand;
+}
+#endif  // defined(ARCH_CPU_ARM_FAMILY) && (defined(OS_ANDROID) ||
+        // defined(OS_LINUX))
+
+}  // namespace
+
+void CPU::Initialize() {
+#if defined(ARCH_CPU_X86_FAMILY)
+  int cpu_info[4] = {-1};
+  // This array is used to temporarily hold the vendor name and then the brand
+  // name. Thus it has to be big enough for both use cases. There are
+  // static_asserts below for each of the use cases to make sure this array is
+  // big enough.
+  char cpu_string[sizeof(cpu_info) * 3 + 1];
+
+  // __cpuid with an InfoType argument of 0 returns the number of
+  // valid Ids in CPUInfo[0] and the CPU identification string in
+  // the other three array elements. The CPU identification string is
+  // not in linear order. The code below arranges the information
+  // in a human readable form. The human readable order is CPUInfo[1] |
+  // CPUInfo[3] | CPUInfo[2]. CPUInfo[2] and CPUInfo[3] are swapped
+  // before using memcpy() to copy these three array elements to |cpu_string|.
+  __cpuid(cpu_info, 0);
+  int num_ids = cpu_info[0];
+  std::swap(cpu_info[2], cpu_info[3]);
+  static constexpr size_t kVendorNameSize = 3 * sizeof(cpu_info[1]);
+  static_assert(kVendorNameSize < arraysize(cpu_string),
+                "cpu_string too small");
+  memcpy(cpu_string, &cpu_info[1], kVendorNameSize);
+  cpu_string[kVendorNameSize] = '\0';
+  cpu_vendor_ = cpu_string;
+
+  // Interpret CPU feature information.
+  if (num_ids > 0) {
+    int cpu_info7[4] = {0};
+    __cpuid(cpu_info, 1);
+    if (num_ids >= 7) {
+      __cpuid(cpu_info7, 7);
+    }
+    signature_ = cpu_info[0];
+    stepping_ = cpu_info[0] & 0xf;
+    model_ = ((cpu_info[0] >> 4) & 0xf) + ((cpu_info[0] >> 12) & 0xf0);
+    family_ = (cpu_info[0] >> 8) & 0xf;
+    type_ = (cpu_info[0] >> 12) & 0x3;
+    ext_model_ = (cpu_info[0] >> 16) & 0xf;
+    ext_family_ = (cpu_info[0] >> 20) & 0xff;
+    has_mmx_ =   (cpu_info[3] & 0x00800000) != 0;
+    has_sse_ =   (cpu_info[3] & 0x02000000) != 0;
+    has_sse2_ =  (cpu_info[3] & 0x04000000) != 0;
+    has_sse3_ =  (cpu_info[2] & 0x00000001) != 0;
+    has_ssse3_ = (cpu_info[2] & 0x00000200) != 0;
+    has_sse41_ = (cpu_info[2] & 0x00080000) != 0;
+    has_sse42_ = (cpu_info[2] & 0x00100000) != 0;
+    has_popcnt_ = (cpu_info[2] & 0x00800000) != 0;
+
+    // AVX instructions will generate an illegal instruction exception unless
+    //   a) they are supported by the CPU,
+    //   b) XSAVE is supported by the CPU and
+    //   c) XSAVE is enabled by the kernel.
+    // See http://software.intel.com/en-us/blogs/2011/04/14/is-avx-enabled
+    //
+    // In addition, we have observed some crashes with the xgetbv instruction
+    // even after following Intel's example code. (See crbug.com/375968.)
+    // Because of that, we also test the XSAVE bit because its description in
+    // the CPUID documentation suggests that it signals xgetbv support.
+    has_avx_ =
+        (cpu_info[2] & 0x10000000) != 0 &&
+        (cpu_info[2] & 0x04000000) != 0 /* XSAVE */ &&
+        (cpu_info[2] & 0x08000000) != 0 /* OSXSAVE */ &&
+        (_xgetbv(0) & 6) == 6 /* XSAVE enabled by kernel */;
+    has_aesni_ = (cpu_info[2] & 0x02000000) != 0;
+    has_avx2_ = has_avx_ && (cpu_info7[1] & 0x00000020) != 0;
+  }
+
+  // Get the brand string of the cpu.
+  __cpuid(cpu_info, 0x80000000);
+  const int max_parameter = cpu_info[0];
+
+  static constexpr int kParameterStart = 0x80000002;
+  static constexpr int kParameterEnd = 0x80000004;
+  static constexpr int kParameterSize = kParameterEnd - kParameterStart + 1;
+  static_assert(kParameterSize * sizeof(cpu_info) + 1 == arraysize(cpu_string),
+                "cpu_string has wrong size");
+
+  if (max_parameter >= kParameterEnd) {
+    size_t i = 0;
+    for (int parameter = kParameterStart; parameter <= kParameterEnd;
+         ++parameter) {
+      __cpuid(cpu_info, parameter);
+      memcpy(&cpu_string[i], cpu_info, sizeof(cpu_info));
+      i += sizeof(cpu_info);
+    }
+    cpu_string[i] = '\0';
+    cpu_brand_ = cpu_string;
+  }
+
+  static constexpr int kParameterContainingNonStopTimeStampCounter = 0x80000007;
+  if (max_parameter >= kParameterContainingNonStopTimeStampCounter) {
+    __cpuid(cpu_info, kParameterContainingNonStopTimeStampCounter);
+    has_non_stop_time_stamp_counter_ = (cpu_info[3] & (1 << 8)) != 0;
+  }
+#elif defined(ARCH_CPU_ARM_FAMILY) && (defined(OS_ANDROID) || defined(OS_LINUX))
+  cpu_brand_ = *CpuInfoBrand();
+#endif
+}
+
+CPU::IntelMicroArchitecture CPU::GetIntelMicroArchitecture() const {
+  if (has_avx2()) return AVX2;
+  if (has_avx()) return AVX;
+  if (has_sse42()) return SSE42;
+  if (has_sse41()) return SSE41;
+  if (has_ssse3()) return SSSE3;
+  if (has_sse3()) return SSE3;
+  if (has_sse2()) return SSE2;
+  if (has_sse()) return SSE;
+  return PENTIUM;
+}
+
+}  // namespace base
diff --git a/base/cpu.h b/base/cpu.h
new file mode 100644
index 0000000..2c6caea
--- /dev/null
+++ b/base/cpu.h
@@ -0,0 +1,88 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_CPU_H_
+#define BASE_CPU_H_
+
+#include <string>
+
+#include "base/base_export.h"
+
+namespace base {
+
+// Query information about the processor.
+class BASE_EXPORT CPU final {
+ public:
+  CPU();
+
+  enum IntelMicroArchitecture {
+    PENTIUM,
+    SSE,
+    SSE2,
+    SSE3,
+    SSSE3,
+    SSE41,
+    SSE42,
+    AVX,
+    AVX2,
+    MAX_INTEL_MICRO_ARCHITECTURE
+  };
+
+  // Accessors for CPU information.
+  const std::string& vendor_name() const { return cpu_vendor_; }
+  int signature() const { return signature_; }
+  int stepping() const { return stepping_; }
+  int model() const { return model_; }
+  int family() const { return family_; }
+  int type() const { return type_; }
+  int extended_model() const { return ext_model_; }
+  int extended_family() const { return ext_family_; }
+  bool has_mmx() const { return has_mmx_; }
+  bool has_sse() const { return has_sse_; }
+  bool has_sse2() const { return has_sse2_; }
+  bool has_sse3() const { return has_sse3_; }
+  bool has_ssse3() const { return has_ssse3_; }
+  bool has_sse41() const { return has_sse41_; }
+  bool has_sse42() const { return has_sse42_; }
+  bool has_popcnt() const { return has_popcnt_; }
+  bool has_avx() const { return has_avx_; }
+  bool has_avx2() const { return has_avx2_; }
+  bool has_aesni() const { return has_aesni_; }
+  bool has_non_stop_time_stamp_counter() const {
+    return has_non_stop_time_stamp_counter_;
+  }
+
+  IntelMicroArchitecture GetIntelMicroArchitecture() const;
+  const std::string& cpu_brand() const { return cpu_brand_; }
+
+ private:
+  // Query the processor for CPUID information.
+  void Initialize();
+
+  int signature_;  // raw form of type, family, model, and stepping
+  int type_;  // process type
+  int family_;  // family of the processor
+  int model_;  // model of processor
+  int stepping_;  // processor revision number
+  int ext_model_;
+  int ext_family_;
+  bool has_mmx_;
+  bool has_sse_;
+  bool has_sse2_;
+  bool has_sse3_;
+  bool has_ssse3_;
+  bool has_sse41_;
+  bool has_sse42_;
+  bool has_popcnt_;
+  bool has_avx_;
+  bool has_avx2_;
+  bool has_aesni_;
+  bool has_non_stop_time_stamp_counter_;
+  std::string cpu_vendor_;
+  std::string cpu_brand_;
+};
+
+}  // namespace base
+
+#endif  // BASE_CPU_H_
diff --git a/base/cpu_unittest.cc b/base/cpu_unittest.cc
new file mode 100644
index 0000000..8a68ea0
--- /dev/null
+++ b/base/cpu_unittest.cc
@@ -0,0 +1,134 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/cpu.h"
+#include "base/stl_util.h"
+#include "build/build_config.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+#if _MSC_VER >= 1700
+// C4752: found Intel(R) Advanced Vector Extensions; consider using /arch:AVX.
+#pragma warning(disable: 4752)
+#endif
+
+// Tests whether we can run extended instructions represented by the CPU
+// information. This test actually executes some extended instructions (such as
+// MMX, SSE, etc.) supported by the CPU and sees we can run them without
+// "undefined instruction" exceptions. That is, this test succeeds when this
+// test finishes without a crash.
+TEST(CPU, RunExtendedInstructions) {
+#if defined(ARCH_CPU_X86_FAMILY)
+  // Retrieve the CPU information.
+  base::CPU cpu;
+
+  ASSERT_TRUE(cpu.has_mmx());
+  ASSERT_TRUE(cpu.has_sse());
+  ASSERT_TRUE(cpu.has_sse2());
+
+// GCC and clang instruction test.
+#if defined(COMPILER_GCC)
+  // Execute an MMX instruction.
+  __asm__ __volatile__("emms\n" : : : "mm0");
+
+  // Execute an SSE instruction.
+  __asm__ __volatile__("xorps %%xmm0, %%xmm0\n" : : : "xmm0");
+
+  // Execute an SSE 2 instruction.
+  __asm__ __volatile__("psrldq $0, %%xmm0\n" : : : "xmm0");
+
+  if (cpu.has_sse3()) {
+    // Execute an SSE 3 instruction.
+    __asm__ __volatile__("addsubpd %%xmm0, %%xmm0\n" : : : "xmm0");
+  }
+
+  if (cpu.has_ssse3()) {
+    // Execute a Supplimental SSE 3 instruction.
+    __asm__ __volatile__("psignb %%xmm0, %%xmm0\n" : : : "xmm0");
+  }
+
+  if (cpu.has_sse41()) {
+    // Execute an SSE 4.1 instruction.
+    __asm__ __volatile__("pmuldq %%xmm0, %%xmm0\n" : : : "xmm0");
+  }
+
+  if (cpu.has_sse42()) {
+    // Execute an SSE 4.2 instruction.
+    __asm__ __volatile__("crc32 %%eax, %%eax\n" : : : "eax");
+  }
+
+  if (cpu.has_popcnt()) {
+    // Execute a POPCNT instruction.
+    __asm__ __volatile__("popcnt %%eax, %%eax\n" : : : "eax");
+  }
+
+  if (cpu.has_avx()) {
+    // Execute an AVX instruction.
+    __asm__ __volatile__("vzeroupper\n" : : : "xmm0");
+  }
+
+  if (cpu.has_avx2()) {
+    // Execute an AVX 2 instruction.
+    __asm__ __volatile__("vpunpcklbw %%ymm0, %%ymm0, %%ymm0\n" : : : "xmm0");
+  }
+
+// Visual C 32 bit and ClangCL 32/64 bit test.
+#elif defined(COMPILER_MSVC) && (defined(ARCH_CPU_32_BITS) || \
+      (defined(ARCH_CPU_64_BITS) && defined(__clang__)))
+
+  // Execute an MMX instruction.
+  __asm emms;
+
+  // Execute an SSE instruction.
+  __asm xorps xmm0, xmm0;
+
+  // Execute an SSE 2 instruction.
+  __asm psrldq xmm0, 0;
+
+  if (cpu.has_sse3()) {
+    // Execute an SSE 3 instruction.
+    __asm addsubpd xmm0, xmm0;
+  }
+
+  if (cpu.has_ssse3()) {
+    // Execute a Supplimental SSE 3 instruction.
+    __asm psignb xmm0, xmm0;
+  }
+
+  if (cpu.has_sse41()) {
+    // Execute an SSE 4.1 instruction.
+    __asm pmuldq xmm0, xmm0;
+  }
+
+  if (cpu.has_sse42()) {
+    // Execute an SSE 4.2 instruction.
+    __asm crc32 eax, eax;
+  }
+
+  if (cpu.has_popcnt()) {
+    // Execute a POPCNT instruction.
+    __asm popcnt eax, eax;
+  }
+
+// Visual C 2012 required for AVX.
+#if _MSC_VER >= 1700
+  if (cpu.has_avx()) {
+    // Execute an AVX instruction.
+    __asm vzeroupper;
+  }
+
+  if (cpu.has_avx2()) {
+    // Execute an AVX 2 instruction.
+    __asm vpunpcklbw ymm0, ymm0, ymm0
+  }
+#endif  // _MSC_VER >= 1700
+#endif  // defined(COMPILER_GCC)
+#endif  // defined(ARCH_CPU_X86_FAMILY)
+}
+
+// For https://crbug.com/249713
+TEST(CPU, BrandAndVendorContainsNoNUL) {
+  base::CPU cpu;
+  EXPECT_FALSE(base::ContainsValue(cpu.cpu_brand(), '\0'));
+  EXPECT_FALSE(base::ContainsValue(cpu.vendor_name(), '\0'));
+}
diff --git a/base/critical_closure.h b/base/critical_closure.h
new file mode 100644
index 0000000..94c618d
--- /dev/null
+++ b/base/critical_closure.h
@@ -0,0 +1,76 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_CRITICAL_CLOSURE_H_
+#define BASE_CRITICAL_CLOSURE_H_
+
+#include <utility>
+
+#include "base/callback.h"
+#include "base/macros.h"
+#include "build/build_config.h"
+
+#if defined(OS_IOS)
+#include "base/bind.h"
+#include "base/ios/scoped_critical_action.h"
+#endif
+
+namespace base {
+
+namespace internal {
+
+#if defined(OS_IOS)
+// Returns true if multi-tasking is supported on this iOS device.
+bool IsMultiTaskingSupported();
+
+// This class wraps a closure so it can continue to run for a period of time
+// when the application goes to the background by using
+// |ios::ScopedCriticalAction|.
+class CriticalClosure {
+ public:
+  explicit CriticalClosure(OnceClosure closure);
+  ~CriticalClosure();
+  void Run();
+
+ private:
+  ios::ScopedCriticalAction critical_action_;
+  OnceClosure closure_;
+
+  DISALLOW_COPY_AND_ASSIGN(CriticalClosure);
+};
+#endif  // defined(OS_IOS)
+
+}  // namespace internal
+
+// Returns a closure that will continue to run for a period of time when the
+// application goes to the background if possible on platforms where
+// applications don't execute while backgrounded, otherwise the original task is
+// returned.
+//
+// Example:
+//   file_task_runner_->PostTask(
+//       FROM_HERE,
+//       MakeCriticalClosure(base::Bind(&WriteToDiskTask, path_, data)));
+//
+// Note new closures might be posted in this closure. If the new closures need
+// background running time, |MakeCriticalClosure| should be applied on them
+// before posting.
+#if defined(OS_IOS)
+inline OnceClosure MakeCriticalClosure(OnceClosure closure) {
+  DCHECK(internal::IsMultiTaskingSupported());
+  return base::BindOnce(
+      &internal::CriticalClosure::Run,
+      Owned(new internal::CriticalClosure(std::move(closure))));
+}
+#else  // defined(OS_IOS)
+inline OnceClosure MakeCriticalClosure(OnceClosure closure) {
+  // No-op for platforms where the application does not need to acquire
+  // background time for closures to finish when it goes into the background.
+  return closure;
+}
+#endif  // defined(OS_IOS)
+
+}  // namespace base
+
+#endif  // BASE_CRITICAL_CLOSURE_H_
diff --git a/base/critical_closure_internal_ios.mm b/base/critical_closure_internal_ios.mm
new file mode 100644
index 0000000..e35eca0
--- /dev/null
+++ b/base/critical_closure_internal_ios.mm
@@ -0,0 +1,26 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/critical_closure.h"
+
+#import <UIKit/UIKit.h>
+
+namespace base {
+namespace internal {
+
+bool IsMultiTaskingSupported() {
+  return [[UIDevice currentDevice] isMultitaskingSupported];
+}
+
+CriticalClosure::CriticalClosure(OnceClosure closure)
+    : closure_(std::move(closure)) {}
+
+CriticalClosure::~CriticalClosure() {}
+
+void CriticalClosure::Run() {
+  std::move(closure_).Run();
+}
+
+}  // namespace internal
+}  // namespace base
diff --git a/base/debug/OWNERS b/base/debug/OWNERS
new file mode 100644
index 0000000..6150257
--- /dev/null
+++ b/base/debug/OWNERS
@@ -0,0 +1,2 @@
+# For activity tracking:
+per-file activity_*=bcwhite@chromium.org
diff --git a/base/debug/activity_analyzer.cc b/base/debug/activity_analyzer.cc
new file mode 100644
index 0000000..d787829
--- /dev/null
+++ b/base/debug/activity_analyzer.cc
@@ -0,0 +1,412 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/debug/activity_analyzer.h"
+
+#include <algorithm>
+#include <utility>
+
+#include "base/files/file.h"
+#include "base/files/file_path.h"
+#include "base/files/memory_mapped_file.h"
+#include "base/lazy_instance.h"
+#include "base/logging.h"
+#include "base/memory/ptr_util.h"
+#include "base/metrics/histogram_macros.h"
+#include "base/stl_util.h"
+#include "base/strings/string_util.h"
+
+namespace base {
+namespace debug {
+
+namespace {
+// An empty snapshot that can be returned when there otherwise is none.
+LazyInstance<ActivityUserData::Snapshot>::Leaky g_empty_user_data_snapshot;
+
+// DO NOT CHANGE VALUES. This is logged persistently in a histogram.
+enum AnalyzerCreationError {
+  kInvalidMemoryMappedFile,
+  kPmaBadFile,
+  kPmaUninitialized,
+  kPmaDeleted,
+  kPmaCorrupt,
+  kAnalyzerCreationErrorMax  // Keep this last.
+};
+
+void LogAnalyzerCreationError(AnalyzerCreationError error) {
+  UMA_HISTOGRAM_ENUMERATION("ActivityTracker.Collect.AnalyzerCreationError",
+                            error, kAnalyzerCreationErrorMax);
+}
+
+}  // namespace
+
+ThreadActivityAnalyzer::Snapshot::Snapshot() = default;
+ThreadActivityAnalyzer::Snapshot::~Snapshot() = default;
+
+ThreadActivityAnalyzer::ThreadActivityAnalyzer(
+    const ThreadActivityTracker& tracker)
+    : activity_snapshot_valid_(tracker.CreateSnapshot(&activity_snapshot_)) {}
+
+ThreadActivityAnalyzer::ThreadActivityAnalyzer(void* base, size_t size)
+    : ThreadActivityAnalyzer(ThreadActivityTracker(base, size)) {}
+
+ThreadActivityAnalyzer::ThreadActivityAnalyzer(
+    PersistentMemoryAllocator* allocator,
+    PersistentMemoryAllocator::Reference reference)
+    : ThreadActivityAnalyzer(allocator->GetAsArray<char>(
+                                 reference,
+                                 GlobalActivityTracker::kTypeIdActivityTracker,
+                                 PersistentMemoryAllocator::kSizeAny),
+                             allocator->GetAllocSize(reference)) {}
+
+ThreadActivityAnalyzer::~ThreadActivityAnalyzer() = default;
+
+void ThreadActivityAnalyzer::AddGlobalInformation(
+    GlobalActivityAnalyzer* global) {
+  if (!IsValid())
+    return;
+
+  // User-data is held at the global scope even though it's referenced at the
+  // thread scope.
+  activity_snapshot_.user_data_stack.clear();
+  for (auto& activity : activity_snapshot_.activity_stack) {
+    // The global GetUserDataSnapshot will return an empty snapshot if the ref
+    // or id is not valid.
+    activity_snapshot_.user_data_stack.push_back(global->GetUserDataSnapshot(
+        activity_snapshot_.process_id, activity.user_data_ref,
+        activity.user_data_id));
+  }
+}
+
+GlobalActivityAnalyzer::GlobalActivityAnalyzer(
+    std::unique_ptr<PersistentMemoryAllocator> allocator)
+    : allocator_(std::move(allocator)),
+      analysis_stamp_(0LL),
+      allocator_iterator_(allocator_.get()) {
+  DCHECK(allocator_);
+}
+
+GlobalActivityAnalyzer::~GlobalActivityAnalyzer() = default;
+
+// static
+std::unique_ptr<GlobalActivityAnalyzer>
+GlobalActivityAnalyzer::CreateWithAllocator(
+    std::unique_ptr<PersistentMemoryAllocator> allocator) {
+  if (allocator->GetMemoryState() ==
+      PersistentMemoryAllocator::MEMORY_UNINITIALIZED) {
+    LogAnalyzerCreationError(kPmaUninitialized);
+    return nullptr;
+  }
+  if (allocator->GetMemoryState() ==
+      PersistentMemoryAllocator::MEMORY_DELETED) {
+    LogAnalyzerCreationError(kPmaDeleted);
+    return nullptr;
+  }
+  if (allocator->IsCorrupt()) {
+    LogAnalyzerCreationError(kPmaCorrupt);
+    return nullptr;
+  }
+
+  return WrapUnique(new GlobalActivityAnalyzer(std::move(allocator)));
+}
+
+#if !defined(OS_NACL)
+// static
+std::unique_ptr<GlobalActivityAnalyzer> GlobalActivityAnalyzer::CreateWithFile(
+    const FilePath& file_path) {
+  // Map the file read-write so it can guarantee consistency between
+  // the analyzer and any trackers that my still be active.
+  std::unique_ptr<MemoryMappedFile> mmfile(new MemoryMappedFile());
+  mmfile->Initialize(file_path, MemoryMappedFile::READ_WRITE);
+  if (!mmfile->IsValid()) {
+    LogAnalyzerCreationError(kInvalidMemoryMappedFile);
+    return nullptr;
+  }
+
+  if (!FilePersistentMemoryAllocator::IsFileAcceptable(*mmfile, true)) {
+    LogAnalyzerCreationError(kPmaBadFile);
+    return nullptr;
+  }
+
+  return CreateWithAllocator(std::make_unique<FilePersistentMemoryAllocator>(
+      std::move(mmfile), 0, 0, StringPiece(), /*readonly=*/true));
+}
+#endif  // !defined(OS_NACL)
+
+// static
+std::unique_ptr<GlobalActivityAnalyzer>
+GlobalActivityAnalyzer::CreateWithSharedMemory(
+    std::unique_ptr<SharedMemory> shm) {
+  if (shm->mapped_size() == 0 ||
+      !SharedPersistentMemoryAllocator::IsSharedMemoryAcceptable(*shm)) {
+    return nullptr;
+  }
+  return CreateWithAllocator(std::make_unique<SharedPersistentMemoryAllocator>(
+      std::move(shm), 0, StringPiece(), /*readonly=*/true));
+}
+
+// static
+std::unique_ptr<GlobalActivityAnalyzer>
+GlobalActivityAnalyzer::CreateWithSharedMemoryHandle(
+    const SharedMemoryHandle& handle,
+    size_t size) {
+  std::unique_ptr<SharedMemory> shm(
+      new SharedMemory(handle, /*readonly=*/true));
+  if (!shm->Map(size))
+    return nullptr;
+  return CreateWithSharedMemory(std::move(shm));
+}
+
+int64_t GlobalActivityAnalyzer::GetFirstProcess() {
+  PrepareAllAnalyzers();
+  return GetNextProcess();
+}
+
+int64_t GlobalActivityAnalyzer::GetNextProcess() {
+  if (process_ids_.empty())
+    return 0;
+  int64_t pid = process_ids_.back();
+  process_ids_.pop_back();
+  return pid;
+}
+
+ThreadActivityAnalyzer* GlobalActivityAnalyzer::GetFirstAnalyzer(int64_t pid) {
+  analyzers_iterator_ = analyzers_.begin();
+  analyzers_iterator_pid_ = pid;
+  if (analyzers_iterator_ == analyzers_.end())
+    return nullptr;
+  int64_t create_stamp;
+  if (analyzers_iterator_->second->GetProcessId(&create_stamp) == pid &&
+      create_stamp <= analysis_stamp_) {
+    return analyzers_iterator_->second.get();
+  }
+  return GetNextAnalyzer();
+}
+
+ThreadActivityAnalyzer* GlobalActivityAnalyzer::GetNextAnalyzer() {
+  DCHECK(analyzers_iterator_ != analyzers_.end());
+  int64_t create_stamp;
+  do {
+    ++analyzers_iterator_;
+    if (analyzers_iterator_ == analyzers_.end())
+      return nullptr;
+  } while (analyzers_iterator_->second->GetProcessId(&create_stamp) !=
+               analyzers_iterator_pid_ ||
+           create_stamp > analysis_stamp_);
+  return analyzers_iterator_->second.get();
+}
+
+ThreadActivityAnalyzer* GlobalActivityAnalyzer::GetAnalyzerForThread(
+    const ThreadKey& key) {
+  auto found = analyzers_.find(key);
+  if (found == analyzers_.end())
+    return nullptr;
+  return found->second.get();
+}
+
+ActivityUserData::Snapshot GlobalActivityAnalyzer::GetUserDataSnapshot(
+    int64_t pid,
+    uint32_t ref,
+    uint32_t id) {
+  ActivityUserData::Snapshot snapshot;
+
+  void* memory = allocator_->GetAsArray<char>(
+      ref, GlobalActivityTracker::kTypeIdUserDataRecord,
+      PersistentMemoryAllocator::kSizeAny);
+  if (memory) {
+    size_t size = allocator_->GetAllocSize(ref);
+    const ActivityUserData user_data(memory, size);
+    user_data.CreateSnapshot(&snapshot);
+    int64_t process_id;
+    int64_t create_stamp;
+    if (!ActivityUserData::GetOwningProcessId(memory, &process_id,
+                                              &create_stamp) ||
+        process_id != pid || user_data.id() != id) {
+      // This allocation has been overwritten since it was created. Return an
+      // empty snapshot because whatever was captured is incorrect.
+      snapshot.clear();
+    }
+  }
+
+  return snapshot;
+}
+
+const ActivityUserData::Snapshot&
+GlobalActivityAnalyzer::GetProcessDataSnapshot(int64_t pid) {
+  auto iter = process_data_.find(pid);
+  if (iter == process_data_.end())
+    return g_empty_user_data_snapshot.Get();
+  if (iter->second.create_stamp > analysis_stamp_)
+    return g_empty_user_data_snapshot.Get();
+  DCHECK_EQ(pid, iter->second.process_id);
+  return iter->second.data;
+}
+
+std::vector<std::string> GlobalActivityAnalyzer::GetLogMessages() {
+  std::vector<std::string> messages;
+  PersistentMemoryAllocator::Reference ref;
+
+  PersistentMemoryAllocator::Iterator iter(allocator_.get());
+  while ((ref = iter.GetNextOfType(
+              GlobalActivityTracker::kTypeIdGlobalLogMessage)) != 0) {
+    const char* message = allocator_->GetAsArray<char>(
+        ref, GlobalActivityTracker::kTypeIdGlobalLogMessage,
+        PersistentMemoryAllocator::kSizeAny);
+    if (message)
+      messages.push_back(message);
+  }
+
+  return messages;
+}
+
+std::vector<GlobalActivityTracker::ModuleInfo>
+GlobalActivityAnalyzer::GetModules(int64_t pid) {
+  std::vector<GlobalActivityTracker::ModuleInfo> modules;
+
+  PersistentMemoryAllocator::Iterator iter(allocator_.get());
+  const GlobalActivityTracker::ModuleInfoRecord* record;
+  while (
+      (record =
+           iter.GetNextOfObject<GlobalActivityTracker::ModuleInfoRecord>()) !=
+      nullptr) {
+    int64_t process_id;
+    int64_t create_stamp;
+    if (!OwningProcess::GetOwningProcessId(&record->owner, &process_id,
+                                           &create_stamp) ||
+        pid != process_id || create_stamp > analysis_stamp_) {
+      continue;
+    }
+    GlobalActivityTracker::ModuleInfo info;
+    if (record->DecodeTo(&info, allocator_->GetAllocSize(
+                                    allocator_->GetAsReference(record)))) {
+      modules.push_back(std::move(info));
+    }
+  }
+
+  return modules;
+}
+
+GlobalActivityAnalyzer::ProgramLocation
+GlobalActivityAnalyzer::GetProgramLocationFromAddress(uint64_t address) {
+  // TODO(bcwhite): Implement this.
+  return { 0, 0 };
+}
+
+bool GlobalActivityAnalyzer::IsDataComplete() const {
+  DCHECK(allocator_);
+  return !allocator_->IsFull();
+}
+
+GlobalActivityAnalyzer::UserDataSnapshot::UserDataSnapshot() = default;
+GlobalActivityAnalyzer::UserDataSnapshot::UserDataSnapshot(
+    const UserDataSnapshot& rhs) = default;
+GlobalActivityAnalyzer::UserDataSnapshot::UserDataSnapshot(
+    UserDataSnapshot&& rhs) = default;
+GlobalActivityAnalyzer::UserDataSnapshot::~UserDataSnapshot() = default;
+
+void GlobalActivityAnalyzer::PrepareAllAnalyzers() {
+  // Record the time when analysis started.
+  analysis_stamp_ = base::Time::Now().ToInternalValue();
+
+  // Fetch all the records. This will retrieve only ones created since the
+  // last run since the PMA iterator will continue from where it left off.
+  uint32_t type;
+  PersistentMemoryAllocator::Reference ref;
+  while ((ref = allocator_iterator_.GetNext(&type)) != 0) {
+    switch (type) {
+      case GlobalActivityTracker::kTypeIdActivityTracker:
+      case GlobalActivityTracker::kTypeIdActivityTrackerFree:
+      case GlobalActivityTracker::kTypeIdProcessDataRecord:
+      case GlobalActivityTracker::kTypeIdProcessDataRecordFree:
+      case PersistentMemoryAllocator::kTypeIdTransitioning:
+        // Active, free, or transitioning: add it to the list of references
+        // for later analysis.
+        memory_references_.insert(ref);
+        break;
+    }
+  }
+
+  // Clear out any old information.
+  analyzers_.clear();
+  process_data_.clear();
+  process_ids_.clear();
+  std::set<int64_t> seen_pids;
+
+  // Go through all the known references and create objects for them with
+  // snapshots of the current state.
+  for (PersistentMemoryAllocator::Reference memory_ref : memory_references_) {
+    // Get the actual data segment for the tracker. Any type will do since it
+    // is checked below.
+    void* const base = allocator_->GetAsArray<char>(
+        memory_ref, PersistentMemoryAllocator::kTypeIdAny,
+        PersistentMemoryAllocator::kSizeAny);
+    const size_t size = allocator_->GetAllocSize(memory_ref);
+    if (!base)
+      continue;
+
+    switch (allocator_->GetType(memory_ref)) {
+      case GlobalActivityTracker::kTypeIdActivityTracker: {
+        // Create the analyzer on the data. This will capture a snapshot of the
+        // tracker state. This can fail if the tracker is somehow corrupted or
+        // is in the process of shutting down.
+        std::unique_ptr<ThreadActivityAnalyzer> analyzer(
+            new ThreadActivityAnalyzer(base, size));
+        if (!analyzer->IsValid())
+          continue;
+        analyzer->AddGlobalInformation(this);
+
+        // Track PIDs.
+        int64_t pid = analyzer->GetProcessId();
+        if (seen_pids.find(pid) == seen_pids.end()) {
+          process_ids_.push_back(pid);
+          seen_pids.insert(pid);
+        }
+
+        // Add this analyzer to the map of known ones, indexed by a unique
+        // thread
+        // identifier.
+        DCHECK(!base::ContainsKey(analyzers_, analyzer->GetThreadKey()));
+        analyzer->allocator_reference_ = ref;
+        analyzers_[analyzer->GetThreadKey()] = std::move(analyzer);
+      } break;
+
+      case GlobalActivityTracker::kTypeIdProcessDataRecord: {
+        // Get the PID associated with this data record.
+        int64_t process_id;
+        int64_t create_stamp;
+        ActivityUserData::GetOwningProcessId(base, &process_id, &create_stamp);
+        DCHECK(!base::ContainsKey(process_data_, process_id));
+
+        // Create a snapshot of the data. This can fail if the data is somehow
+        // corrupted or the process shutdown and the memory being released.
+        UserDataSnapshot& snapshot = process_data_[process_id];
+        snapshot.process_id = process_id;
+        snapshot.create_stamp = create_stamp;
+        const ActivityUserData process_data(base, size);
+        if (!process_data.CreateSnapshot(&snapshot.data))
+          break;
+
+        // Check that nothing changed. If it did, forget what was recorded.
+        ActivityUserData::GetOwningProcessId(base, &process_id, &create_stamp);
+        if (process_id != snapshot.process_id ||
+            create_stamp != snapshot.create_stamp) {
+          process_data_.erase(process_id);
+          break;
+        }
+
+        // Track PIDs.
+        if (seen_pids.find(process_id) == seen_pids.end()) {
+          process_ids_.push_back(process_id);
+          seen_pids.insert(process_id);
+        }
+      } break;
+    }
+  }
+
+  // Reverse the list of PIDs so that they get popped in the order found.
+  std::reverse(process_ids_.begin(), process_ids_.end());
+}
+
+}  // namespace debug
+}  // namespace base
diff --git a/base/debug/activity_analyzer.h b/base/debug/activity_analyzer.h
new file mode 100644
index 0000000..9add85a
--- /dev/null
+++ b/base/debug/activity_analyzer.h
@@ -0,0 +1,262 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_DEBUG_ACTIVITY_ANALYZER_H_
+#define BASE_DEBUG_ACTIVITY_ANALYZER_H_
+
+#include <map>
+#include <memory>
+#include <set>
+#include <string>
+#include <vector>
+
+#include "base/base_export.h"
+#include "base/debug/activity_tracker.h"
+
+namespace base {
+namespace debug {
+
+class GlobalActivityAnalyzer;
+
+// This class provides analysis of data captured from a ThreadActivityTracker.
+// When created, it takes a snapshot of the data held by the tracker and
+// makes that information available to other code.
+class BASE_EXPORT ThreadActivityAnalyzer {
+ public:
+  struct BASE_EXPORT Snapshot : ThreadActivityTracker::Snapshot {
+    Snapshot();
+    ~Snapshot();
+
+    // The user-data snapshot for an activity, matching the |activity_stack|
+    // of ThreadActivityTracker::Snapshot, if any.
+    std::vector<ActivityUserData::Snapshot> user_data_stack;
+  };
+
+  // This class provides keys that uniquely identify a thread, even across
+  // multiple processes.
+  class ThreadKey {
+   public:
+    ThreadKey(int64_t pid, int64_t tid) : pid_(pid), tid_(tid) {}
+
+    bool operator<(const ThreadKey& rhs) const {
+      if (pid_ != rhs.pid_)
+        return pid_ < rhs.pid_;
+      return tid_ < rhs.tid_;
+    }
+
+    bool operator==(const ThreadKey& rhs) const {
+      return (pid_ == rhs.pid_ && tid_ == rhs.tid_);
+    }
+
+   private:
+    int64_t pid_;
+    int64_t tid_;
+  };
+
+  // Creates an analyzer for an existing activity |tracker|. A snapshot is taken
+  // immediately and the tracker is not referenced again.
+  explicit ThreadActivityAnalyzer(const ThreadActivityTracker& tracker);
+
+  // Creates an analyzer for a block of memory currently or previously in-use
+  // by an activity-tracker. A snapshot is taken immediately and the memory
+  // is not referenced again.
+  ThreadActivityAnalyzer(void* base, size_t size);
+
+  // Creates an analyzer for a block of memory held within a persistent-memory
+  // |allocator| at the given |reference|. A snapshot is taken immediately and
+  // the memory is not referenced again.
+  ThreadActivityAnalyzer(PersistentMemoryAllocator* allocator,
+                         PersistentMemoryAllocator::Reference reference);
+
+  ~ThreadActivityAnalyzer();
+
+  // Adds information from the global analyzer.
+  void AddGlobalInformation(GlobalActivityAnalyzer* global);
+
+  // Returns true iff the contained data is valid. Results from all other
+  // methods are undefined if this returns false.
+  bool IsValid() { return activity_snapshot_valid_; }
+
+  // Gets the process id and its creation stamp.
+  int64_t GetProcessId(int64_t* out_stamp = nullptr) {
+    if (out_stamp)
+      *out_stamp = activity_snapshot_.create_stamp;
+    return activity_snapshot_.process_id;
+  }
+
+  // Gets the name of the thread.
+  const std::string& GetThreadName() {
+    return activity_snapshot_.thread_name;
+  }
+
+  // Gets the TheadKey for this thread.
+  ThreadKey GetThreadKey() {
+    return ThreadKey(activity_snapshot_.process_id,
+                     activity_snapshot_.thread_id);
+  }
+
+  const Snapshot& activity_snapshot() { return activity_snapshot_; }
+
+ private:
+  friend class GlobalActivityAnalyzer;
+
+  // The snapshot of the activity tracker taken at the moment of construction.
+  Snapshot activity_snapshot_;
+
+  // Flag indicating if the snapshot data is valid.
+  bool activity_snapshot_valid_;
+
+  // A reference into a persistent memory allocator, used by the global
+  // analyzer to know where this tracker came from.
+  PersistentMemoryAllocator::Reference allocator_reference_ = 0;
+
+  DISALLOW_COPY_AND_ASSIGN(ThreadActivityAnalyzer);
+};
+
+
+// This class manages analyzers for all known processes and threads as stored
+// in a persistent memory allocator. It supports retrieval of them through
+// iteration and directly using a ThreadKey, which allows for cross-references
+// to be resolved.
+// Note that though atomic snapshots are used and everything has its snapshot
+// taken at the same time, the multi-snapshot itself is not atomic and thus may
+// show small inconsistencies between threads if attempted on a live system.
+class BASE_EXPORT GlobalActivityAnalyzer {
+ public:
+  struct ProgramLocation {
+    int module;
+    uintptr_t offset;
+  };
+
+  using ThreadKey = ThreadActivityAnalyzer::ThreadKey;
+
+  // Creates a global analyzer from a persistent memory allocator.
+  explicit GlobalActivityAnalyzer(
+      std::unique_ptr<PersistentMemoryAllocator> allocator);
+
+  ~GlobalActivityAnalyzer();
+
+  // Creates a global analyzer using a given persistent-memory |allocator|.
+  static std::unique_ptr<GlobalActivityAnalyzer> CreateWithAllocator(
+      std::unique_ptr<PersistentMemoryAllocator> allocator);
+
+#if !defined(OS_NACL)
+  // Creates a global analyzer using the contents of a file given in
+  // |file_path|.
+  static std::unique_ptr<GlobalActivityAnalyzer> CreateWithFile(
+      const FilePath& file_path);
+#endif  // !defined(OS_NACL)
+
+  // Like above but accesses an allocator in a mapped shared-memory segment.
+  static std::unique_ptr<GlobalActivityAnalyzer> CreateWithSharedMemory(
+      std::unique_ptr<SharedMemory> shm);
+
+  // Like above but takes a handle to an existing shared memory segment and
+  // maps it before creating the tracker.
+  static std::unique_ptr<GlobalActivityAnalyzer> CreateWithSharedMemoryHandle(
+      const SharedMemoryHandle& handle,
+      size_t size);
+
+  // Iterates over all known valid processes and returns their PIDs or zero
+  // if there are no more. Calls to GetFirstProcess() will perform a global
+  // snapshot in order to provide a relatively consistent state across the
+  // future calls to GetNextProcess() and GetFirst/NextAnalyzer(). PIDs are
+  // returned in the order they're found meaning that a first-launched
+  // controlling process will be found first. Note, however, that space
+  // freed by an exiting process may be re-used by a later process.
+  int64_t GetFirstProcess();
+  int64_t GetNextProcess();
+
+  // Iterates over all known valid analyzers for the a given process or returns
+  // null if there are no more.
+  //
+  // GetFirstProcess() must be called first in order to capture a global
+  // snapshot! Ownership stays with the global analyzer object and all existing
+  // analyzer pointers are invalidated when GetFirstProcess() is called.
+  ThreadActivityAnalyzer* GetFirstAnalyzer(int64_t pid);
+  ThreadActivityAnalyzer* GetNextAnalyzer();
+
+  // Gets the analyzer for a specific thread or null if there is none.
+  // Ownership stays with the global analyzer object.
+  ThreadActivityAnalyzer* GetAnalyzerForThread(const ThreadKey& key);
+
+  // Extract user data based on a reference and its identifier.
+  ActivityUserData::Snapshot GetUserDataSnapshot(int64_t pid,
+                                                 uint32_t ref,
+                                                 uint32_t id);
+
+  // Extract the data for a specific process. An empty snapshot will be
+  // returned if the process is not known.
+  const ActivityUserData::Snapshot& GetProcessDataSnapshot(int64_t pid);
+
+  // Gets all log messages stored within.
+  std::vector<std::string> GetLogMessages();
+
+  // Gets modules corresponding to a pid. This pid must come from a call to
+  // GetFirst/NextProcess. Only modules that were first registered prior to
+  // GetFirstProcess's snapshot are returned.
+  std::vector<GlobalActivityTracker::ModuleInfo> GetModules(int64_t pid);
+
+  // Gets the corresponding "program location" for a given "program counter".
+  // This will return {0,0} if no mapping could be found.
+  ProgramLocation GetProgramLocationFromAddress(uint64_t address);
+
+  // Returns whether the data is complete. Data can be incomplete if the
+  // recording size quota is hit.
+  bool IsDataComplete() const;
+
+ private:
+  using AnalyzerMap =
+      std::map<ThreadKey, std::unique_ptr<ThreadActivityAnalyzer>>;
+
+  struct UserDataSnapshot {
+    // Complex class needs out-of-line ctor/dtor.
+    UserDataSnapshot();
+    UserDataSnapshot(const UserDataSnapshot& rhs);
+    UserDataSnapshot(UserDataSnapshot&& rhs);
+    ~UserDataSnapshot();
+
+    int64_t process_id;
+    int64_t create_stamp;
+    ActivityUserData::Snapshot data;
+  };
+
+  // Finds, creates, and indexes analyzers for all known processes and threads.
+  void PrepareAllAnalyzers();
+
+  // The persistent memory allocator holding all tracking data.
+  std::unique_ptr<PersistentMemoryAllocator> allocator_;
+
+  // The time stamp when analysis began. This is used to prevent looking into
+  // process IDs that get reused when analyzing a live system.
+  int64_t analysis_stamp_;
+
+  // The iterator for finding tracking information in the allocator.
+  PersistentMemoryAllocator::Iterator allocator_iterator_;
+
+  // A set of all interesting memory references found within the allocator.
+  std::set<PersistentMemoryAllocator::Reference> memory_references_;
+
+  // A set of all process-data memory references found within the allocator.
+  std::map<int64_t, UserDataSnapshot> process_data_;
+
+  // A set of all process IDs collected during PrepareAllAnalyzers. These are
+  // popped and returned one-by-one with calls to GetFirst/NextProcess().
+  std::vector<int64_t> process_ids_;
+
+  // A map, keyed by ThreadKey, of all valid activity analyzers.
+  AnalyzerMap analyzers_;
+
+  // The iterator within the analyzers_ map for returning analyzers through
+  // first/next iteration.
+  AnalyzerMap::iterator analyzers_iterator_;
+  int64_t analyzers_iterator_pid_;
+
+  DISALLOW_COPY_AND_ASSIGN(GlobalActivityAnalyzer);
+};
+
+}  // namespace debug
+}  // namespace base
+
+#endif  // BASE_DEBUG_ACTIVITY_ANALYZER_H_
diff --git a/base/debug/activity_analyzer_unittest.cc b/base/debug/activity_analyzer_unittest.cc
new file mode 100644
index 0000000..e08b43a
--- /dev/null
+++ b/base/debug/activity_analyzer_unittest.cc
@@ -0,0 +1,546 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/debug/activity_analyzer.h"
+
+#include <atomic>
+#include <memory>
+
+#include "base/auto_reset.h"
+#include "base/bind.h"
+#include "base/debug/activity_tracker.h"
+#include "base/files/file.h"
+#include "base/files/file_util.h"
+#include "base/files/memory_mapped_file.h"
+#include "base/files/scoped_temp_dir.h"
+#include "base/memory/ptr_util.h"
+#include "base/pending_task.h"
+#include "base/process/process.h"
+#include "base/stl_util.h"
+#include "base/synchronization/condition_variable.h"
+#include "base/synchronization/lock.h"
+#include "base/synchronization/spin_wait.h"
+#include "base/threading/platform_thread.h"
+#include "base/threading/simple_thread.h"
+#include "base/time/time.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+namespace debug {
+
+namespace {
+
+class TestActivityTracker : public ThreadActivityTracker {
+ public:
+  TestActivityTracker(std::unique_ptr<char[]> memory, size_t mem_size)
+      : ThreadActivityTracker(memset(memory.get(), 0, mem_size), mem_size),
+        mem_segment_(std::move(memory)) {}
+
+  ~TestActivityTracker() override = default;
+
+ private:
+  std::unique_ptr<char[]> mem_segment_;
+};
+
+}  // namespace
+
+
+class ActivityAnalyzerTest : public testing::Test {
+ public:
+  const int kMemorySize = 1 << 20;  // 1MiB
+  const int kStackSize  = 1 << 10;  // 1KiB
+
+  ActivityAnalyzerTest() = default;
+
+  ~ActivityAnalyzerTest() override {
+    GlobalActivityTracker* global_tracker = GlobalActivityTracker::Get();
+    if (global_tracker) {
+      global_tracker->ReleaseTrackerForCurrentThreadForTesting();
+      delete global_tracker;
+    }
+  }
+
+  std::unique_ptr<ThreadActivityTracker> CreateActivityTracker() {
+    std::unique_ptr<char[]> memory(new char[kStackSize]);
+    return std::make_unique<TestActivityTracker>(std::move(memory), kStackSize);
+  }
+
+  template <typename Function>
+  void AsOtherProcess(int64_t pid, Function function) {
+    std::unique_ptr<GlobalActivityTracker> old_global =
+        GlobalActivityTracker::ReleaseForTesting();
+    ASSERT_TRUE(old_global);
+
+    PersistentMemoryAllocator* old_allocator = old_global->allocator();
+    std::unique_ptr<PersistentMemoryAllocator> new_allocator(
+        std::make_unique<PersistentMemoryAllocator>(
+            const_cast<void*>(old_allocator->data()), old_allocator->size(), 0,
+            0, "", false));
+    GlobalActivityTracker::CreateWithAllocator(std::move(new_allocator), 3,
+                                               pid);
+
+    function();
+
+    GlobalActivityTracker::ReleaseForTesting();
+    GlobalActivityTracker::SetForTesting(std::move(old_global));
+  }
+
+  static void DoNothing() {}
+};
+
+TEST_F(ActivityAnalyzerTest, ThreadAnalyzerConstruction) {
+  std::unique_ptr<ThreadActivityTracker> tracker = CreateActivityTracker();
+  {
+    ThreadActivityAnalyzer analyzer(*tracker);
+    EXPECT_TRUE(analyzer.IsValid());
+    EXPECT_EQ(PlatformThread::GetName(), analyzer.GetThreadName());
+  }
+
+  // TODO(bcwhite): More tests once Analyzer does more.
+}
+
+
+// GlobalActivityAnalyzer tests below.
+
+namespace {
+
+class SimpleActivityThread : public SimpleThread {
+ public:
+  SimpleActivityThread(const std::string& name,
+                       const void* source,
+                       Activity::Type activity,
+                       const ActivityData& data)
+      : SimpleThread(name, Options()),
+        source_(source),
+        activity_(activity),
+        data_(data),
+        ready_(false),
+        exit_(false),
+        exit_condition_(&lock_) {}
+
+  ~SimpleActivityThread() override = default;
+
+  void Run() override {
+    ThreadActivityTracker::ActivityId id =
+        GlobalActivityTracker::Get()
+            ->GetOrCreateTrackerForCurrentThread()
+            ->PushActivity(source_, activity_, data_);
+
+    {
+      AutoLock auto_lock(lock_);
+      ready_.store(true, std::memory_order_release);
+      while (!exit_.load(std::memory_order_relaxed))
+        exit_condition_.Wait();
+    }
+
+    GlobalActivityTracker::Get()->GetTrackerForCurrentThread()->PopActivity(id);
+  }
+
+  void Exit() {
+    AutoLock auto_lock(lock_);
+    exit_.store(true, std::memory_order_relaxed);
+    exit_condition_.Signal();
+  }
+
+  void WaitReady() {
+    SPIN_FOR_1_SECOND_OR_UNTIL_TRUE(ready_.load(std::memory_order_acquire));
+  }
+
+ private:
+  const void* source_;
+  Activity::Type activity_;
+  ActivityData data_;
+
+  std::atomic<bool> ready_;
+  std::atomic<bool> exit_;
+  Lock lock_;
+  ConditionVariable exit_condition_;
+
+  DISALLOW_COPY_AND_ASSIGN(SimpleActivityThread);
+};
+
+}  // namespace
+
+TEST_F(ActivityAnalyzerTest, GlobalAnalyzerConstruction) {
+  GlobalActivityTracker::CreateWithLocalMemory(kMemorySize, 0, "", 3, 0);
+  GlobalActivityTracker::Get()->process_data().SetString("foo", "bar");
+
+  PersistentMemoryAllocator* allocator =
+      GlobalActivityTracker::Get()->allocator();
+  GlobalActivityAnalyzer analyzer(std::make_unique<PersistentMemoryAllocator>(
+      const_cast<void*>(allocator->data()), allocator->size(), 0, 0, "", true));
+
+  // The only thread at this point is the test thread of this process.
+  const int64_t pid = analyzer.GetFirstProcess();
+  ASSERT_NE(0, pid);
+  ThreadActivityAnalyzer* ta1 = analyzer.GetFirstAnalyzer(pid);
+  ASSERT_TRUE(ta1);
+  EXPECT_FALSE(analyzer.GetNextAnalyzer());
+  ThreadActivityAnalyzer::ThreadKey tk1 = ta1->GetThreadKey();
+  EXPECT_EQ(ta1, analyzer.GetAnalyzerForThread(tk1));
+  EXPECT_EQ(0, analyzer.GetNextProcess());
+
+  // Create a second thread that will do something.
+  SimpleActivityThread t2("t2", nullptr, Activity::ACT_TASK,
+                          ActivityData::ForTask(11));
+  t2.Start();
+  t2.WaitReady();
+
+  // Now there should be two. Calling GetFirstProcess invalidates any
+  // previously returned analyzer pointers.
+  ASSERT_EQ(pid, analyzer.GetFirstProcess());
+  EXPECT_TRUE(analyzer.GetFirstAnalyzer(pid));
+  EXPECT_TRUE(analyzer.GetNextAnalyzer());
+  EXPECT_FALSE(analyzer.GetNextAnalyzer());
+  EXPECT_EQ(0, analyzer.GetNextProcess());
+
+  // Let thread exit.
+  t2.Exit();
+  t2.Join();
+
+  // Now there should be only one again.
+  ASSERT_EQ(pid, analyzer.GetFirstProcess());
+  ThreadActivityAnalyzer* ta2 = analyzer.GetFirstAnalyzer(pid);
+  ASSERT_TRUE(ta2);
+  EXPECT_FALSE(analyzer.GetNextAnalyzer());
+  ThreadActivityAnalyzer::ThreadKey tk2 = ta2->GetThreadKey();
+  EXPECT_EQ(ta2, analyzer.GetAnalyzerForThread(tk2));
+  EXPECT_EQ(tk1, tk2);
+  EXPECT_EQ(0, analyzer.GetNextProcess());
+
+  // Verify that there is process data.
+  const ActivityUserData::Snapshot& data_snapshot =
+      analyzer.GetProcessDataSnapshot(pid);
+  ASSERT_LE(1U, data_snapshot.size());
+  EXPECT_EQ("bar", data_snapshot.at("foo").GetString());
+}
+
+TEST_F(ActivityAnalyzerTest, GlobalAnalyzerFromSharedMemory) {
+  SharedMemoryHandle handle1;
+  SharedMemoryHandle handle2;
+
+  {
+    std::unique_ptr<SharedMemory> shmem(new SharedMemory());
+    ASSERT_TRUE(shmem->CreateAndMapAnonymous(kMemorySize));
+    handle1 = shmem->handle().Duplicate();
+    ASSERT_TRUE(handle1.IsValid());
+    handle2 = shmem->handle().Duplicate();
+    ASSERT_TRUE(handle2.IsValid());
+  }
+
+  GlobalActivityTracker::CreateWithSharedMemoryHandle(handle1, kMemorySize, 0,
+                                                      "", 3);
+  GlobalActivityTracker::Get()->process_data().SetString("foo", "bar");
+
+  std::unique_ptr<GlobalActivityAnalyzer> analyzer =
+      GlobalActivityAnalyzer::CreateWithSharedMemoryHandle(handle2,
+                                                           kMemorySize);
+
+  const int64_t pid = analyzer->GetFirstProcess();
+  ASSERT_NE(0, pid);
+  const ActivityUserData::Snapshot& data_snapshot =
+      analyzer->GetProcessDataSnapshot(pid);
+  ASSERT_LE(1U, data_snapshot.size());
+  EXPECT_EQ("bar", data_snapshot.at("foo").GetString());
+}
+
+TEST_F(ActivityAnalyzerTest, UserDataSnapshotTest) {
+  GlobalActivityTracker::CreateWithLocalMemory(kMemorySize, 0, "", 3, 0);
+  ThreadActivityAnalyzer::Snapshot tracker_snapshot;
+
+  const char string1a[] = "string1a";
+  const char string1b[] = "string1b";
+  const char string2a[] = "string2a";
+  const char string2b[] = "string2b";
+
+  PersistentMemoryAllocator* allocator =
+      GlobalActivityTracker::Get()->allocator();
+  GlobalActivityAnalyzer global_analyzer(
+      std::make_unique<PersistentMemoryAllocator>(
+          const_cast<void*>(allocator->data()), allocator->size(), 0, 0, "",
+          true));
+
+  ThreadActivityTracker* tracker =
+      GlobalActivityTracker::Get()->GetOrCreateTrackerForCurrentThread();
+
+  {
+    ScopedActivity activity1(1, 11, 111);
+    ActivityUserData& user_data1 = activity1.user_data();
+    user_data1.Set("raw1", "foo1", 4);
+    user_data1.SetString("string1", "bar1");
+    user_data1.SetChar("char1", '1');
+    user_data1.SetInt("int1", -1111);
+    user_data1.SetUint("uint1", 1111);
+    user_data1.SetBool("bool1", true);
+    user_data1.SetReference("ref1", string1a, sizeof(string1a));
+    user_data1.SetStringReference("sref1", string1b);
+
+    {
+      ScopedActivity activity2(2, 22, 222);
+      ActivityUserData& user_data2 = activity2.user_data();
+      user_data2.Set("raw2", "foo2", 4);
+      user_data2.SetString("string2", "bar2");
+      user_data2.SetChar("char2", '2');
+      user_data2.SetInt("int2", -2222);
+      user_data2.SetUint("uint2", 2222);
+      user_data2.SetBool("bool2", false);
+      user_data2.SetReference("ref2", string2a, sizeof(string2a));
+      user_data2.SetStringReference("sref2", string2b);
+
+      ASSERT_TRUE(tracker->CreateSnapshot(&tracker_snapshot));
+      ASSERT_EQ(2U, tracker_snapshot.activity_stack.size());
+
+      ThreadActivityAnalyzer analyzer(*tracker);
+      analyzer.AddGlobalInformation(&global_analyzer);
+      const ThreadActivityAnalyzer::Snapshot& analyzer_snapshot =
+          analyzer.activity_snapshot();
+      ASSERT_EQ(2U, analyzer_snapshot.user_data_stack.size());
+      const ActivityUserData::Snapshot& user_data =
+          analyzer_snapshot.user_data_stack.at(1);
+      EXPECT_EQ(8U, user_data.size());
+      ASSERT_TRUE(ContainsKey(user_data, "raw2"));
+      EXPECT_EQ("foo2", user_data.at("raw2").Get().as_string());
+      ASSERT_TRUE(ContainsKey(user_data, "string2"));
+      EXPECT_EQ("bar2", user_data.at("string2").GetString().as_string());
+      ASSERT_TRUE(ContainsKey(user_data, "char2"));
+      EXPECT_EQ('2', user_data.at("char2").GetChar());
+      ASSERT_TRUE(ContainsKey(user_data, "int2"));
+      EXPECT_EQ(-2222, user_data.at("int2").GetInt());
+      ASSERT_TRUE(ContainsKey(user_data, "uint2"));
+      EXPECT_EQ(2222U, user_data.at("uint2").GetUint());
+      ASSERT_TRUE(ContainsKey(user_data, "bool2"));
+      EXPECT_FALSE(user_data.at("bool2").GetBool());
+      ASSERT_TRUE(ContainsKey(user_data, "ref2"));
+      EXPECT_EQ(string2a, user_data.at("ref2").GetReference().data());
+      EXPECT_EQ(sizeof(string2a), user_data.at("ref2").GetReference().size());
+      ASSERT_TRUE(ContainsKey(user_data, "sref2"));
+      EXPECT_EQ(string2b, user_data.at("sref2").GetStringReference().data());
+      EXPECT_EQ(strlen(string2b),
+                user_data.at("sref2").GetStringReference().size());
+    }
+
+    ASSERT_TRUE(tracker->CreateSnapshot(&tracker_snapshot));
+    ASSERT_EQ(1U, tracker_snapshot.activity_stack.size());
+
+    ThreadActivityAnalyzer analyzer(*tracker);
+    analyzer.AddGlobalInformation(&global_analyzer);
+    const ThreadActivityAnalyzer::Snapshot& analyzer_snapshot =
+        analyzer.activity_snapshot();
+    ASSERT_EQ(1U, analyzer_snapshot.user_data_stack.size());
+    const ActivityUserData::Snapshot& user_data =
+        analyzer_snapshot.user_data_stack.at(0);
+    EXPECT_EQ(8U, user_data.size());
+    EXPECT_EQ("foo1", user_data.at("raw1").Get().as_string());
+    EXPECT_EQ("bar1", user_data.at("string1").GetString().as_string());
+    EXPECT_EQ('1', user_data.at("char1").GetChar());
+    EXPECT_EQ(-1111, user_data.at("int1").GetInt());
+    EXPECT_EQ(1111U, user_data.at("uint1").GetUint());
+    EXPECT_TRUE(user_data.at("bool1").GetBool());
+    EXPECT_EQ(string1a, user_data.at("ref1").GetReference().data());
+    EXPECT_EQ(sizeof(string1a), user_data.at("ref1").GetReference().size());
+    EXPECT_EQ(string1b, user_data.at("sref1").GetStringReference().data());
+    EXPECT_EQ(strlen(string1b),
+              user_data.at("sref1").GetStringReference().size());
+  }
+
+  ASSERT_TRUE(tracker->CreateSnapshot(&tracker_snapshot));
+  ASSERT_EQ(0U, tracker_snapshot.activity_stack.size());
+}
+
+TEST_F(ActivityAnalyzerTest, GlobalUserDataTest) {
+  const int64_t pid = GetCurrentProcId();
+  GlobalActivityTracker::CreateWithLocalMemory(kMemorySize, 0, "", 3, 0);
+
+  const char string1[] = "foo";
+  const char string2[] = "bar";
+
+  PersistentMemoryAllocator* allocator =
+      GlobalActivityTracker::Get()->allocator();
+  GlobalActivityAnalyzer global_analyzer(
+      std::make_unique<PersistentMemoryAllocator>(
+          const_cast<void*>(allocator->data()), allocator->size(), 0, 0, "",
+          true));
+
+  ActivityUserData& process_data = GlobalActivityTracker::Get()->process_data();
+  ASSERT_NE(0U, process_data.id());
+  process_data.Set("raw", "foo", 3);
+  process_data.SetString("string", "bar");
+  process_data.SetChar("char", '9');
+  process_data.SetInt("int", -9999);
+  process_data.SetUint("uint", 9999);
+  process_data.SetBool("bool", true);
+  process_data.SetReference("ref", string1, sizeof(string1));
+  process_data.SetStringReference("sref", string2);
+
+  int64_t first_pid = global_analyzer.GetFirstProcess();
+  DCHECK_EQ(pid, first_pid);
+  const ActivityUserData::Snapshot& snapshot =
+      global_analyzer.GetProcessDataSnapshot(pid);
+  ASSERT_TRUE(ContainsKey(snapshot, "raw"));
+  EXPECT_EQ("foo", snapshot.at("raw").Get().as_string());
+  ASSERT_TRUE(ContainsKey(snapshot, "string"));
+  EXPECT_EQ("bar", snapshot.at("string").GetString().as_string());
+  ASSERT_TRUE(ContainsKey(snapshot, "char"));
+  EXPECT_EQ('9', snapshot.at("char").GetChar());
+  ASSERT_TRUE(ContainsKey(snapshot, "int"));
+  EXPECT_EQ(-9999, snapshot.at("int").GetInt());
+  ASSERT_TRUE(ContainsKey(snapshot, "uint"));
+  EXPECT_EQ(9999U, snapshot.at("uint").GetUint());
+  ASSERT_TRUE(ContainsKey(snapshot, "bool"));
+  EXPECT_TRUE(snapshot.at("bool").GetBool());
+  ASSERT_TRUE(ContainsKey(snapshot, "ref"));
+  EXPECT_EQ(string1, snapshot.at("ref").GetReference().data());
+  EXPECT_EQ(sizeof(string1), snapshot.at("ref").GetReference().size());
+  ASSERT_TRUE(ContainsKey(snapshot, "sref"));
+  EXPECT_EQ(string2, snapshot.at("sref").GetStringReference().data());
+  EXPECT_EQ(strlen(string2), snapshot.at("sref").GetStringReference().size());
+}
+
+TEST_F(ActivityAnalyzerTest, GlobalModulesTest) {
+  GlobalActivityTracker::CreateWithLocalMemory(kMemorySize, 0, "", 3, 0);
+  GlobalActivityTracker* global = GlobalActivityTracker::Get();
+
+  PersistentMemoryAllocator* allocator = global->allocator();
+  GlobalActivityAnalyzer global_analyzer(
+      std::make_unique<PersistentMemoryAllocator>(
+          const_cast<void*>(allocator->data()), allocator->size(), 0, 0, "",
+          true));
+
+  GlobalActivityTracker::ModuleInfo info1;
+  info1.is_loaded = true;
+  info1.address = 0x12345678;
+  info1.load_time = 1111;
+  info1.size = 0xABCDEF;
+  info1.timestamp = 111;
+  info1.age = 11;
+  info1.identifier[0] = 1;
+  info1.file = "anything";
+  info1.debug_file = "elsewhere";
+
+  global->RecordModuleInfo(info1);
+  std::vector<GlobalActivityTracker::ModuleInfo> modules1;
+  modules1 = global_analyzer.GetModules(global_analyzer.GetFirstProcess());
+  ASSERT_EQ(1U, modules1.size());
+  GlobalActivityTracker::ModuleInfo& stored1a = modules1[0];
+  EXPECT_EQ(info1.is_loaded, stored1a.is_loaded);
+  EXPECT_EQ(info1.address, stored1a.address);
+  EXPECT_NE(info1.load_time, stored1a.load_time);
+  EXPECT_EQ(info1.size, stored1a.size);
+  EXPECT_EQ(info1.timestamp, stored1a.timestamp);
+  EXPECT_EQ(info1.age, stored1a.age);
+  EXPECT_EQ(info1.identifier[0], stored1a.identifier[0]);
+  EXPECT_EQ(info1.file, stored1a.file);
+  EXPECT_EQ(info1.debug_file, stored1a.debug_file);
+
+  info1.is_loaded = false;
+  global->RecordModuleInfo(info1);
+  modules1 = global_analyzer.GetModules(global_analyzer.GetFirstProcess());
+  ASSERT_EQ(1U, modules1.size());
+  GlobalActivityTracker::ModuleInfo& stored1b = modules1[0];
+  EXPECT_EQ(info1.is_loaded, stored1b.is_loaded);
+  EXPECT_EQ(info1.address, stored1b.address);
+  EXPECT_NE(info1.load_time, stored1b.load_time);
+  EXPECT_EQ(info1.size, stored1b.size);
+  EXPECT_EQ(info1.timestamp, stored1b.timestamp);
+  EXPECT_EQ(info1.age, stored1b.age);
+  EXPECT_EQ(info1.identifier[0], stored1b.identifier[0]);
+  EXPECT_EQ(info1.file, stored1b.file);
+  EXPECT_EQ(info1.debug_file, stored1b.debug_file);
+
+  GlobalActivityTracker::ModuleInfo info2;
+  info2.is_loaded = true;
+  info2.address = 0x87654321;
+  info2.load_time = 2222;
+  info2.size = 0xFEDCBA;
+  info2.timestamp = 222;
+  info2.age = 22;
+  info2.identifier[0] = 2;
+  info2.file = "nothing";
+  info2.debug_file = "farewell";
+
+  global->RecordModuleInfo(info2);
+  std::vector<GlobalActivityTracker::ModuleInfo> modules2;
+  modules2 = global_analyzer.GetModules(global_analyzer.GetFirstProcess());
+  ASSERT_EQ(2U, modules2.size());
+  GlobalActivityTracker::ModuleInfo& stored2 = modules2[1];
+  EXPECT_EQ(info2.is_loaded, stored2.is_loaded);
+  EXPECT_EQ(info2.address, stored2.address);
+  EXPECT_NE(info2.load_time, stored2.load_time);
+  EXPECT_EQ(info2.size, stored2.size);
+  EXPECT_EQ(info2.timestamp, stored2.timestamp);
+  EXPECT_EQ(info2.age, stored2.age);
+  EXPECT_EQ(info2.identifier[0], stored2.identifier[0]);
+  EXPECT_EQ(info2.file, stored2.file);
+  EXPECT_EQ(info2.debug_file, stored2.debug_file);
+}
+
+TEST_F(ActivityAnalyzerTest, GlobalLogMessages) {
+  GlobalActivityTracker::CreateWithLocalMemory(kMemorySize, 0, "", 3, 0);
+
+  PersistentMemoryAllocator* allocator =
+      GlobalActivityTracker::Get()->allocator();
+  GlobalActivityAnalyzer analyzer(std::make_unique<PersistentMemoryAllocator>(
+      const_cast<void*>(allocator->data()), allocator->size(), 0, 0, "", true));
+
+  GlobalActivityTracker::Get()->RecordLogMessage("hello world");
+  GlobalActivityTracker::Get()->RecordLogMessage("foo bar");
+
+  std::vector<std::string> messages = analyzer.GetLogMessages();
+  ASSERT_EQ(2U, messages.size());
+  EXPECT_EQ("hello world", messages[0]);
+  EXPECT_EQ("foo bar", messages[1]);
+}
+
+TEST_F(ActivityAnalyzerTest, GlobalMultiProcess) {
+  GlobalActivityTracker::CreateWithLocalMemory(kMemorySize, 0, "", 3, 1001);
+  GlobalActivityTracker* global = GlobalActivityTracker::Get();
+  PersistentMemoryAllocator* allocator = global->allocator();
+  EXPECT_EQ(1001, global->process_id());
+
+  int64_t process_id;
+  int64_t create_stamp;
+  ActivityUserData::GetOwningProcessId(
+      GlobalActivityTracker::Get()->process_data().GetBaseAddress(),
+      &process_id, &create_stamp);
+  ASSERT_EQ(1001, process_id);
+
+  GlobalActivityTracker::Get()->process_data().SetInt("pid",
+                                                      global->process_id());
+
+  GlobalActivityAnalyzer analyzer(std::make_unique<PersistentMemoryAllocator>(
+      const_cast<void*>(allocator->data()), allocator->size(), 0, 0, "", true));
+
+  AsOtherProcess(2002, [&global]() {
+    ASSERT_NE(global, GlobalActivityTracker::Get());
+    EXPECT_EQ(2002, GlobalActivityTracker::Get()->process_id());
+
+    int64_t process_id;
+    int64_t create_stamp;
+    ActivityUserData::GetOwningProcessId(
+        GlobalActivityTracker::Get()->process_data().GetBaseAddress(),
+        &process_id, &create_stamp);
+    ASSERT_EQ(2002, process_id);
+
+    GlobalActivityTracker::Get()->process_data().SetInt(
+        "pid", GlobalActivityTracker::Get()->process_id());
+  });
+  ASSERT_EQ(global, GlobalActivityTracker::Get());
+  EXPECT_EQ(1001, GlobalActivityTracker::Get()->process_id());
+
+  const int64_t pid1 = analyzer.GetFirstProcess();
+  ASSERT_EQ(1001, pid1);
+  const int64_t pid2 = analyzer.GetNextProcess();
+  ASSERT_EQ(2002, pid2);
+  EXPECT_EQ(0, analyzer.GetNextProcess());
+
+  const ActivityUserData::Snapshot& pdata1 =
+      analyzer.GetProcessDataSnapshot(pid1);
+  const ActivityUserData::Snapshot& pdata2 =
+      analyzer.GetProcessDataSnapshot(pid2);
+  EXPECT_EQ(1001, pdata1.at("pid").GetInt());
+  EXPECT_EQ(2002, pdata2.at("pid").GetInt());
+}
+
+}  // namespace debug
+}  // namespace base
diff --git a/base/debug/activity_tracker.cc b/base/debug/activity_tracker.cc
new file mode 100644
index 0000000..362013e
--- /dev/null
+++ b/base/debug/activity_tracker.cc
@@ -0,0 +1,1828 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/debug/activity_tracker.h"
+
+#include <algorithm>
+#include <limits>
+#include <utility>
+
+#include "base/atomic_sequence_num.h"
+#include "base/debug/stack_trace.h"
+#include "base/files/file.h"
+#include "base/files/file_path.h"
+#include "base/files/memory_mapped_file.h"
+#include "base/logging.h"
+#include "base/memory/ptr_util.h"
+#include "base/metrics/field_trial.h"
+#include "base/metrics/histogram_macros.h"
+#include "base/pending_task.h"
+#include "base/pickle.h"
+#include "base/process/process.h"
+#include "base/process/process_handle.h"
+#include "base/stl_util.h"
+#include "base/strings/string_util.h"
+#include "base/strings/utf_string_conversions.h"
+#include "base/threading/platform_thread.h"
+#include "build/build_config.h"
+
+namespace base {
+namespace debug {
+
+namespace {
+
+// The minimum depth a stack should support.
+const int kMinStackDepth = 2;
+
+// The amount of memory set aside for holding arbitrary user data (key/value
+// pairs) globally or associated with ActivityData entries.
+const size_t kUserDataSize = 1 << 10;     // 1 KiB
+const size_t kProcessDataSize = 4 << 10;  // 4 KiB
+const size_t kMaxUserDataNameLength =
+    static_cast<size_t>(std::numeric_limits<uint8_t>::max());
+
+// A constant used to indicate that module information is changing.
+const uint32_t kModuleInformationChanging = 0x80000000;
+
+// The key used to record process information.
+const char kProcessPhaseDataKey[] = "process-phase";
+
+// An atomically incrementing number, used to check for recreations of objects
+// in the same memory space.
+AtomicSequenceNumber g_next_id;
+
+union ThreadRef {
+  int64_t as_id;
+#if defined(OS_WIN)
+  // On Windows, the handle itself is often a pseudo-handle with a common
+  // value meaning "this thread" and so the thread-id is used. The former
+  // can be converted to a thread-id with a system call.
+  PlatformThreadId as_tid;
+#elif defined(OS_POSIX) || defined(OS_FUCHSIA)
+  // On Posix and Fuchsia, the handle is always a unique identifier so no
+  // conversion needs to be done. However, its value is officially opaque so
+  // there is no one correct way to convert it to a numerical identifier.
+  PlatformThreadHandle::Handle as_handle;
+#endif
+};
+
+// Gets the next non-zero identifier. It is only unique within a process.
+uint32_t GetNextDataId() {
+  uint32_t id;
+  while ((id = g_next_id.GetNext()) == 0)
+    ;
+  return id;
+}
+
+// Gets the current process-id, either from the GlobalActivityTracker if it
+// exists (where the PID can be defined for testing) or from the system if
+// there isn't such.
+int64_t GetProcessId() {
+  GlobalActivityTracker* global = GlobalActivityTracker::Get();
+  if (global)
+    return global->process_id();
+  return GetCurrentProcId();
+}
+
+// Finds and reuses a specific allocation or creates a new one.
+PersistentMemoryAllocator::Reference AllocateFrom(
+    PersistentMemoryAllocator* allocator,
+    uint32_t from_type,
+    size_t size,
+    uint32_t to_type) {
+  PersistentMemoryAllocator::Iterator iter(allocator);
+  PersistentMemoryAllocator::Reference ref;
+  while ((ref = iter.GetNextOfType(from_type)) != 0) {
+    DCHECK_LE(size, allocator->GetAllocSize(ref));
+    // This can fail if a another thread has just taken it. It is assumed that
+    // the memory is cleared during the "free" operation.
+    if (allocator->ChangeType(ref, to_type, from_type, /*clear=*/false))
+      return ref;
+  }
+
+  return allocator->Allocate(size, to_type);
+}
+
+// Determines the previous aligned index.
+size_t RoundDownToAlignment(size_t index, size_t alignment) {
+  return index & (0 - alignment);
+}
+
+// Determines the next aligned index.
+size_t RoundUpToAlignment(size_t index, size_t alignment) {
+  return (index + (alignment - 1)) & (0 - alignment);
+}
+
+// Converts "tick" timing into wall time.
+Time WallTimeFromTickTime(int64_t ticks_start, int64_t ticks, Time time_start) {
+  return time_start + TimeDelta::FromInternalValue(ticks - ticks_start);
+}
+
+}  // namespace
+
+OwningProcess::OwningProcess() = default;
+OwningProcess::~OwningProcess() = default;
+
+void OwningProcess::Release_Initialize(int64_t pid) {
+  uint32_t old_id = data_id.load(std::memory_order_acquire);
+  DCHECK_EQ(0U, old_id);
+  process_id = pid != 0 ? pid : GetProcessId();
+  create_stamp = Time::Now().ToInternalValue();
+  data_id.store(GetNextDataId(), std::memory_order_release);
+}
+
+void OwningProcess::SetOwningProcessIdForTesting(int64_t pid, int64_t stamp) {
+  DCHECK_NE(0U, data_id);
+  process_id = pid;
+  create_stamp = stamp;
+}
+
+// static
+bool OwningProcess::GetOwningProcessId(const void* memory,
+                                       int64_t* out_id,
+                                       int64_t* out_stamp) {
+  const OwningProcess* info = reinterpret_cast<const OwningProcess*>(memory);
+  uint32_t id = info->data_id.load(std::memory_order_acquire);
+  if (id == 0)
+    return false;
+
+  *out_id = info->process_id;
+  *out_stamp = info->create_stamp;
+  return id == info->data_id.load(std::memory_order_seq_cst);
+}
+
+// It doesn't matter what is contained in this (though it will be all zeros)
+// as only the address of it is important.
+const ActivityData kNullActivityData = {};
+
+ActivityData ActivityData::ForThread(const PlatformThreadHandle& handle) {
+  ThreadRef thread_ref;
+  thread_ref.as_id = 0;  // Zero the union in case other is smaller.
+#if defined(OS_WIN)
+  thread_ref.as_tid = ::GetThreadId(handle.platform_handle());
+#elif defined(OS_POSIX)
+  thread_ref.as_handle = handle.platform_handle();
+#endif
+  return ForThread(thread_ref.as_id);
+}
+
+ActivityTrackerMemoryAllocator::ActivityTrackerMemoryAllocator(
+    PersistentMemoryAllocator* allocator,
+    uint32_t object_type,
+    uint32_t object_free_type,
+    size_t object_size,
+    size_t cache_size,
+    bool make_iterable)
+    : allocator_(allocator),
+      object_type_(object_type),
+      object_free_type_(object_free_type),
+      object_size_(object_size),
+      cache_size_(cache_size),
+      make_iterable_(make_iterable),
+      iterator_(allocator),
+      cache_values_(new Reference[cache_size]),
+      cache_used_(0) {
+  DCHECK(allocator);
+}
+
+ActivityTrackerMemoryAllocator::~ActivityTrackerMemoryAllocator() = default;
+
+ActivityTrackerMemoryAllocator::Reference
+ActivityTrackerMemoryAllocator::GetObjectReference() {
+  // First see if there is a cached value that can be returned. This is much
+  // faster than searching the memory system for free blocks.
+  while (cache_used_ > 0) {
+    Reference cached = cache_values_[--cache_used_];
+    // Change the type of the cached object to the proper type and return it.
+    // If the type-change fails that means another thread has taken this from
+    // under us (via the search below) so ignore it and keep trying. Don't
+    // clear the memory because that was done when the type was made "free".
+    if (allocator_->ChangeType(cached, object_type_, object_free_type_, false))
+      return cached;
+  }
+
+  // Fetch the next "free" object from persistent memory. Rather than restart
+  // the iterator at the head each time and likely waste time going again
+  // through objects that aren't relevant, the iterator continues from where
+  // it last left off and is only reset when the end is reached. If the
+  // returned reference matches |last|, then it has wrapped without finding
+  // anything.
+  const Reference last = iterator_.GetLast();
+  while (true) {
+    uint32_t type;
+    Reference found = iterator_.GetNext(&type);
+    if (found && type == object_free_type_) {
+      // Found a free object. Change it to the proper type and return it. If
+      // the type-change fails that means another thread has taken this from
+      // under us so ignore it and keep trying.
+      if (allocator_->ChangeType(found, object_type_, object_free_type_, false))
+        return found;
+    }
+    if (found == last) {
+      // Wrapped. No desired object was found.
+      break;
+    }
+    if (!found) {
+      // Reached end; start over at the beginning.
+      iterator_.Reset();
+    }
+  }
+
+  // No free block was found so instead allocate a new one.
+  Reference allocated = allocator_->Allocate(object_size_, object_type_);
+  if (allocated && make_iterable_)
+    allocator_->MakeIterable(allocated);
+  return allocated;
+}
+
+void ActivityTrackerMemoryAllocator::ReleaseObjectReference(Reference ref) {
+  // Mark object as free.
+  bool success = allocator_->ChangeType(ref, object_free_type_, object_type_,
+                                        /*clear=*/true);
+  DCHECK(success);
+
+  // Add this reference to our "free" cache if there is space. If not, the type
+  // has still been changed to indicate that it is free so this (or another)
+  // thread can find it, albeit more slowly, using the iteration method above.
+  if (cache_used_ < cache_size_)
+    cache_values_[cache_used_++] = ref;
+}
+
+// static
+void Activity::FillFrom(Activity* activity,
+                        const void* program_counter,
+                        const void* origin,
+                        Type type,
+                        const ActivityData& data) {
+  activity->time_internal = base::TimeTicks::Now().ToInternalValue();
+  activity->calling_address = reinterpret_cast<uintptr_t>(program_counter);
+  activity->origin_address = reinterpret_cast<uintptr_t>(origin);
+  activity->activity_type = type;
+  activity->data = data;
+
+#if (!defined(OS_NACL) && DCHECK_IS_ON()) || defined(ADDRESS_SANITIZER)
+  // Create a stacktrace from the current location and get the addresses for
+  // improved debuggability.
+  StackTrace stack_trace;
+  size_t stack_depth;
+  const void* const* stack_addrs = stack_trace.Addresses(&stack_depth);
+  // Copy the stack addresses, ignoring the first one (here).
+  size_t i;
+  for (i = 1; i < stack_depth && i < kActivityCallStackSize; ++i) {
+    activity->call_stack[i - 1] = reinterpret_cast<uintptr_t>(stack_addrs[i]);
+  }
+  activity->call_stack[i - 1] = 0;
+#else
+  activity->call_stack[0] = 0;
+#endif
+}
+
+ActivityUserData::TypedValue::TypedValue() = default;
+ActivityUserData::TypedValue::TypedValue(const TypedValue& other) = default;
+ActivityUserData::TypedValue::~TypedValue() = default;
+
+StringPiece ActivityUserData::TypedValue::Get() const {
+  DCHECK_EQ(RAW_VALUE, type_);
+  return long_value_;
+}
+
+StringPiece ActivityUserData::TypedValue::GetString() const {
+  DCHECK_EQ(STRING_VALUE, type_);
+  return long_value_;
+}
+
+bool ActivityUserData::TypedValue::GetBool() const {
+  DCHECK_EQ(BOOL_VALUE, type_);
+  return short_value_ != 0;
+}
+
+char ActivityUserData::TypedValue::GetChar() const {
+  DCHECK_EQ(CHAR_VALUE, type_);
+  return static_cast<char>(short_value_);
+}
+
+int64_t ActivityUserData::TypedValue::GetInt() const {
+  DCHECK_EQ(SIGNED_VALUE, type_);
+  return static_cast<int64_t>(short_value_);
+}
+
+uint64_t ActivityUserData::TypedValue::GetUint() const {
+  DCHECK_EQ(UNSIGNED_VALUE, type_);
+  return static_cast<uint64_t>(short_value_);
+}
+
+StringPiece ActivityUserData::TypedValue::GetReference() const {
+  DCHECK_EQ(RAW_VALUE_REFERENCE, type_);
+  return ref_value_;
+}
+
+StringPiece ActivityUserData::TypedValue::GetStringReference() const {
+  DCHECK_EQ(STRING_VALUE_REFERENCE, type_);
+  return ref_value_;
+}
+
+// These are required because std::atomic is (currently) not a POD type and
+// thus clang requires explicit out-of-line constructors and destructors even
+// when they do nothing.
+ActivityUserData::ValueInfo::ValueInfo() = default;
+ActivityUserData::ValueInfo::ValueInfo(ValueInfo&&) = default;
+ActivityUserData::ValueInfo::~ValueInfo() = default;
+ActivityUserData::MemoryHeader::MemoryHeader() = default;
+ActivityUserData::MemoryHeader::~MemoryHeader() = default;
+ActivityUserData::FieldHeader::FieldHeader() = default;
+ActivityUserData::FieldHeader::~FieldHeader() = default;
+
+ActivityUserData::ActivityUserData() : ActivityUserData(nullptr, 0, -1) {}
+
+ActivityUserData::ActivityUserData(void* memory, size_t size, int64_t pid)
+    : memory_(reinterpret_cast<char*>(memory)),
+      available_(RoundDownToAlignment(size, kMemoryAlignment)),
+      header_(reinterpret_cast<MemoryHeader*>(memory)),
+      orig_data_id(0),
+      orig_process_id(0),
+      orig_create_stamp(0) {
+  // It's possible that no user data is being stored.
+  if (!memory_)
+    return;
+
+  static_assert(0 == sizeof(MemoryHeader) % kMemoryAlignment, "invalid header");
+  DCHECK_LT(sizeof(MemoryHeader), available_);
+  if (header_->owner.data_id.load(std::memory_order_acquire) == 0)
+    header_->owner.Release_Initialize(pid);
+  memory_ += sizeof(MemoryHeader);
+  available_ -= sizeof(MemoryHeader);
+
+  // Make a copy of identifying information for later comparison.
+  *const_cast<uint32_t*>(&orig_data_id) =
+      header_->owner.data_id.load(std::memory_order_acquire);
+  *const_cast<int64_t*>(&orig_process_id) = header_->owner.process_id;
+  *const_cast<int64_t*>(&orig_create_stamp) = header_->owner.create_stamp;
+
+  // If there is already data present, load that. This allows the same class
+  // to be used for analysis through snapshots.
+  ImportExistingData();
+}
+
+ActivityUserData::~ActivityUserData() = default;
+
+bool ActivityUserData::CreateSnapshot(Snapshot* output_snapshot) const {
+  DCHECK(output_snapshot);
+  DCHECK(output_snapshot->empty());
+
+  // Find any new data that may have been added by an active instance of this
+  // class that is adding records.
+  ImportExistingData();
+
+  // Add all the values to the snapshot.
+  for (const auto& entry : values_) {
+    TypedValue value;
+    const size_t size = entry.second.size_ptr->load(std::memory_order_acquire);
+    value.type_ = entry.second.type;
+    DCHECK_GE(entry.second.extent, size);
+
+    switch (entry.second.type) {
+      case RAW_VALUE:
+      case STRING_VALUE:
+        value.long_value_ =
+            std::string(reinterpret_cast<char*>(entry.second.memory), size);
+        break;
+      case RAW_VALUE_REFERENCE:
+      case STRING_VALUE_REFERENCE: {
+        ReferenceRecord* ref =
+            reinterpret_cast<ReferenceRecord*>(entry.second.memory);
+        value.ref_value_ = StringPiece(
+            reinterpret_cast<char*>(static_cast<uintptr_t>(ref->address)),
+            static_cast<size_t>(ref->size));
+      } break;
+      case BOOL_VALUE:
+      case CHAR_VALUE:
+        value.short_value_ = *reinterpret_cast<char*>(entry.second.memory);
+        break;
+      case SIGNED_VALUE:
+      case UNSIGNED_VALUE:
+        value.short_value_ = *reinterpret_cast<uint64_t*>(entry.second.memory);
+        break;
+      case END_OF_VALUES:  // Included for completeness purposes.
+        NOTREACHED();
+    }
+    auto inserted = output_snapshot->insert(
+        std::make_pair(entry.second.name.as_string(), std::move(value)));
+    DCHECK(inserted.second);  // True if inserted, false if existed.
+  }
+
+  // Another import attempt will validate that the underlying memory has not
+  // been reused for another purpose. Entries added since the first import
+  // will be ignored here but will be returned if another snapshot is created.
+  ImportExistingData();
+  if (!memory_) {
+    output_snapshot->clear();
+    return false;
+  }
+
+  // Successful snapshot.
+  return true;
+}
+
+const void* ActivityUserData::GetBaseAddress() const {
+  // The |memory_| pointer advances as elements are written but the |header_|
+  // value is always at the start of the block so just return that.
+  return header_;
+}
+
+void ActivityUserData::SetOwningProcessIdForTesting(int64_t pid,
+                                                    int64_t stamp) {
+  if (!header_)
+    return;
+  header_->owner.SetOwningProcessIdForTesting(pid, stamp);
+}
+
+// static
+bool ActivityUserData::GetOwningProcessId(const void* memory,
+                                          int64_t* out_id,
+                                          int64_t* out_stamp) {
+  const MemoryHeader* header = reinterpret_cast<const MemoryHeader*>(memory);
+  return OwningProcess::GetOwningProcessId(&header->owner, out_id, out_stamp);
+}
+
+void ActivityUserData::Set(StringPiece name,
+                           ValueType type,
+                           const void* memory,
+                           size_t size) {
+  DCHECK_GE(std::numeric_limits<uint8_t>::max(), name.length());
+  size = std::min(std::numeric_limits<uint16_t>::max() - (kMemoryAlignment - 1),
+                  size);
+
+  // It's possible that no user data is being stored.
+  if (!memory_)
+    return;
+
+  // The storage of a name is limited so use that limit during lookup.
+  if (name.length() > kMaxUserDataNameLength)
+    name.set(name.data(), kMaxUserDataNameLength);
+
+  ValueInfo* info;
+  auto existing = values_.find(name);
+  if (existing != values_.end()) {
+    info = &existing->second;
+  } else {
+    // The name size is limited to what can be held in a single byte but
+    // because there are not alignment constraints on strings, it's set tight
+    // against the header. Its extent (the reserved space, even if it's not
+    // all used) is calculated so that, when pressed against the header, the
+    // following field will be aligned properly.
+    size_t name_size = name.length();
+    size_t name_extent =
+        RoundUpToAlignment(sizeof(FieldHeader) + name_size, kMemoryAlignment) -
+        sizeof(FieldHeader);
+    size_t value_extent = RoundUpToAlignment(size, kMemoryAlignment);
+
+    // The "base size" is the size of the header and (padded) string key. Stop
+    // now if there's not room enough for even this.
+    size_t base_size = sizeof(FieldHeader) + name_extent;
+    if (base_size > available_)
+      return;
+
+    // The "full size" is the size for storing the entire value.
+    size_t full_size = std::min(base_size + value_extent, available_);
+
+    // If the value is actually a single byte, see if it can be stuffed at the
+    // end of the name extent rather than wasting kMemoryAlignment bytes.
+    if (size == 1 && name_extent > name_size) {
+      full_size = base_size;
+      --name_extent;
+      --base_size;
+    }
+
+    // Truncate the stored size to the amount of available memory. Stop now if
+    // there's not any room for even part of the value.
+    if (size != 0) {
+      size = std::min(full_size - base_size, size);
+      if (size == 0)
+        return;
+    }
+
+    // Allocate a chunk of memory.
+    FieldHeader* header = reinterpret_cast<FieldHeader*>(memory_);
+    memory_ += full_size;
+    available_ -= full_size;
+
+    // Datafill the header and name records. Memory must be zeroed. The |type|
+    // is written last, atomically, to release all the other values.
+    DCHECK_EQ(END_OF_VALUES, header->type.load(std::memory_order_relaxed));
+    DCHECK_EQ(0, header->value_size.load(std::memory_order_relaxed));
+    header->name_size = static_cast<uint8_t>(name_size);
+    header->record_size = full_size;
+    char* name_memory = reinterpret_cast<char*>(header) + sizeof(FieldHeader);
+    void* value_memory =
+        reinterpret_cast<char*>(header) + sizeof(FieldHeader) + name_extent;
+    memcpy(name_memory, name.data(), name_size);
+    header->type.store(type, std::memory_order_release);
+
+    // Create an entry in |values_| so that this field can be found and changed
+    // later on without having to allocate new entries.
+    StringPiece persistent_name(name_memory, name_size);
+    auto inserted =
+        values_.insert(std::make_pair(persistent_name, ValueInfo()));
+    DCHECK(inserted.second);  // True if inserted, false if existed.
+    info = &inserted.first->second;
+    info->name = persistent_name;
+    info->memory = value_memory;
+    info->size_ptr = &header->value_size;
+    info->extent = full_size - sizeof(FieldHeader) - name_extent;
+    info->type = type;
+  }
+
+  // Copy the value data to storage. The |size| is written last, atomically, to
+  // release the copied data. Until then, a parallel reader will just ignore
+  // records with a zero size.
+  DCHECK_EQ(type, info->type);
+  size = std::min(size, info->extent);
+  info->size_ptr->store(0, std::memory_order_seq_cst);
+  memcpy(info->memory, memory, size);
+  info->size_ptr->store(size, std::memory_order_release);
+}
+
+void ActivityUserData::SetReference(StringPiece name,
+                                    ValueType type,
+                                    const void* memory,
+                                    size_t size) {
+  ReferenceRecord rec;
+  rec.address = reinterpret_cast<uintptr_t>(memory);
+  rec.size = size;
+  Set(name, type, &rec, sizeof(rec));
+}
+
+void ActivityUserData::ImportExistingData() const {
+  // It's possible that no user data is being stored.
+  if (!memory_)
+    return;
+
+  while (available_ > sizeof(FieldHeader)) {
+    FieldHeader* header = reinterpret_cast<FieldHeader*>(memory_);
+    ValueType type =
+        static_cast<ValueType>(header->type.load(std::memory_order_acquire));
+    if (type == END_OF_VALUES)
+      return;
+    if (header->record_size > available_)
+      return;
+
+    size_t value_offset = RoundUpToAlignment(
+        sizeof(FieldHeader) + header->name_size, kMemoryAlignment);
+    if (header->record_size == value_offset &&
+        header->value_size.load(std::memory_order_relaxed) == 1) {
+      value_offset -= 1;
+    }
+    if (value_offset + header->value_size > header->record_size)
+      return;
+
+    ValueInfo info;
+    info.name = StringPiece(memory_ + sizeof(FieldHeader), header->name_size);
+    info.type = type;
+    info.memory = memory_ + value_offset;
+    info.size_ptr = &header->value_size;
+    info.extent = header->record_size - value_offset;
+
+    StringPiece key(info.name);
+    values_.insert(std::make_pair(key, std::move(info)));
+
+    memory_ += header->record_size;
+    available_ -= header->record_size;
+  }
+
+  // Check if memory has been completely reused.
+  if (header_->owner.data_id.load(std::memory_order_acquire) != orig_data_id ||
+      header_->owner.process_id != orig_process_id ||
+      header_->owner.create_stamp != orig_create_stamp) {
+    memory_ = nullptr;
+    values_.clear();
+  }
+}
+
+// This information is kept for every thread that is tracked. It is filled
+// the very first time the thread is seen. All fields must be of exact sizes
+// so there is no issue moving between 32 and 64-bit builds.
+struct ThreadActivityTracker::Header {
+  // Defined in .h for analyzer access. Increment this if structure changes!
+  static constexpr uint32_t kPersistentTypeId =
+      GlobalActivityTracker::kTypeIdActivityTracker;
+
+  // Expected size for 32/64-bit check.
+  static constexpr size_t kExpectedInstanceSize =
+      OwningProcess::kExpectedInstanceSize + Activity::kExpectedInstanceSize +
+      72;
+
+  // This information uniquely identifies a process.
+  OwningProcess owner;
+
+  // The thread-id (thread_ref.as_id) to which this data belongs. This number
+  // is not guaranteed to mean anything but combined with the process-id from
+  // OwningProcess is unique among all active trackers.
+  ThreadRef thread_ref;
+
+  // The start-time and start-ticks when the data was created. Each activity
+  // record has a |time_internal| value that can be converted to a "wall time"
+  // with these two values.
+  int64_t start_time;
+  int64_t start_ticks;
+
+  // The number of Activity slots (spaces that can hold an Activity) that
+  // immediately follow this structure in memory.
+  uint32_t stack_slots;
+
+  // Some padding to keep everything 64-bit aligned.
+  uint32_t padding;
+
+  // The current depth of the stack. This may be greater than the number of
+  // slots. If the depth exceeds the number of slots, the newest entries
+  // won't be recorded.
+  std::atomic<uint32_t> current_depth;
+
+  // A memory location used to indicate if changes have been made to the data
+  // that would invalidate an in-progress read of its contents. The active
+  // tracker will increment the value whenever something gets popped from the
+  // stack. A monitoring tracker can check the value before and after access
+  // to know, if it's still the same, that the contents didn't change while
+  // being copied.
+  std::atomic<uint32_t> data_version;
+
+  // The last "exception" activity. This can't be stored on the stack because
+  // that could get popped as things unwind.
+  Activity last_exception;
+
+  // The name of the thread (up to a maximum length). Dynamic-length names
+  // are not practical since the memory has to come from the same persistent
+  // allocator that holds this structure and to which this object has no
+  // reference.
+  char thread_name[32];
+};
+
+ThreadActivityTracker::Snapshot::Snapshot() = default;
+ThreadActivityTracker::Snapshot::~Snapshot() = default;
+
+ThreadActivityTracker::ScopedActivity::ScopedActivity(
+    ThreadActivityTracker* tracker,
+    const void* program_counter,
+    const void* origin,
+    Activity::Type type,
+    const ActivityData& data)
+    : tracker_(tracker) {
+  if (tracker_)
+    activity_id_ = tracker_->PushActivity(program_counter, origin, type, data);
+}
+
+ThreadActivityTracker::ScopedActivity::~ScopedActivity() {
+  if (tracker_)
+    tracker_->PopActivity(activity_id_);
+}
+
+void ThreadActivityTracker::ScopedActivity::ChangeTypeAndData(
+    Activity::Type type,
+    const ActivityData& data) {
+  if (tracker_)
+    tracker_->ChangeActivity(activity_id_, type, data);
+}
+
+ThreadActivityTracker::ThreadActivityTracker(void* base, size_t size)
+    : header_(static_cast<Header*>(base)),
+      stack_(reinterpret_cast<Activity*>(reinterpret_cast<char*>(base) +
+                                         sizeof(Header))),
+#if DCHECK_IS_ON()
+      thread_id_(PlatformThreadRef()),
+#endif
+      stack_slots_(
+          static_cast<uint32_t>((size - sizeof(Header)) / sizeof(Activity))) {
+
+  // Verify the parameters but fail gracefully if they're not valid so that
+  // production code based on external inputs will not crash.  IsValid() will
+  // return false in this case.
+  if (!base ||
+      // Ensure there is enough space for the header and at least a few records.
+      size < sizeof(Header) + kMinStackDepth * sizeof(Activity) ||
+      // Ensure that the |stack_slots_| calculation didn't overflow.
+      (size - sizeof(Header)) / sizeof(Activity) >
+          std::numeric_limits<uint32_t>::max()) {
+    NOTREACHED();
+    return;
+  }
+
+  // Ensure that the thread reference doesn't exceed the size of the ID number.
+  // This won't compile at the global scope because Header is a private struct.
+  static_assert(
+      sizeof(header_->thread_ref) == sizeof(header_->thread_ref.as_id),
+      "PlatformThreadHandle::Handle is too big to hold in 64-bit ID");
+
+  // Ensure that the alignment of Activity.data is properly aligned to a
+  // 64-bit boundary so there are no interoperability-issues across cpu
+  // architectures.
+  static_assert(offsetof(Activity, data) % sizeof(uint64_t) == 0,
+                "ActivityData.data is not 64-bit aligned");
+
+  // Provided memory should either be completely initialized or all zeros.
+  if (header_->owner.data_id.load(std::memory_order_relaxed) == 0) {
+    // This is a new file. Double-check other fields and then initialize.
+    DCHECK_EQ(0, header_->owner.process_id);
+    DCHECK_EQ(0, header_->owner.create_stamp);
+    DCHECK_EQ(0, header_->thread_ref.as_id);
+    DCHECK_EQ(0, header_->start_time);
+    DCHECK_EQ(0, header_->start_ticks);
+    DCHECK_EQ(0U, header_->stack_slots);
+    DCHECK_EQ(0U, header_->current_depth.load(std::memory_order_relaxed));
+    DCHECK_EQ(0U, header_->data_version.load(std::memory_order_relaxed));
+    DCHECK_EQ(0, stack_[0].time_internal);
+    DCHECK_EQ(0U, stack_[0].origin_address);
+    DCHECK_EQ(0U, stack_[0].call_stack[0]);
+    DCHECK_EQ(0U, stack_[0].data.task.sequence_id);
+
+#if defined(OS_WIN)
+    header_->thread_ref.as_tid = PlatformThread::CurrentId();
+#elif defined(OS_POSIX) || defined(OS_FUCHSIA)
+    header_->thread_ref.as_handle =
+        PlatformThread::CurrentHandle().platform_handle();
+#endif
+
+    header_->start_time = base::Time::Now().ToInternalValue();
+    header_->start_ticks = base::TimeTicks::Now().ToInternalValue();
+    header_->stack_slots = stack_slots_;
+    strlcpy(header_->thread_name, PlatformThread::GetName(),
+            sizeof(header_->thread_name));
+
+    // This is done last so as to guarantee that everything above is "released"
+    // by the time this value gets written.
+    header_->owner.Release_Initialize();
+
+    valid_ = true;
+    DCHECK(IsValid());
+  } else {
+    // This is a file with existing data. Perform basic consistency checks.
+    valid_ = true;
+    valid_ = IsValid();
+  }
+}
+
+ThreadActivityTracker::~ThreadActivityTracker() = default;
+
+ThreadActivityTracker::ActivityId ThreadActivityTracker::PushActivity(
+    const void* program_counter,
+    const void* origin,
+    Activity::Type type,
+    const ActivityData& data) {
+  // A thread-checker creates a lock to check the thread-id which means
+  // re-entry into this code if lock acquisitions are being tracked.
+  DCHECK(type == Activity::ACT_LOCK_ACQUIRE || CalledOnValidThread());
+
+  // Get the current depth of the stack. No access to other memory guarded
+  // by this variable is done here so a "relaxed" load is acceptable.
+  uint32_t depth = header_->current_depth.load(std::memory_order_relaxed);
+
+  // Handle the case where the stack depth has exceeded the storage capacity.
+  // Extra entries will be lost leaving only the base of the stack.
+  if (depth >= stack_slots_) {
+    // Since no other threads modify the data, no compare/exchange is needed.
+    // Since no other memory is being modified, a "relaxed" store is acceptable.
+    header_->current_depth.store(depth + 1, std::memory_order_relaxed);
+    return depth;
+  }
+
+  // Get a pointer to the next activity and load it. No atomicity is required
+  // here because the memory is known only to this thread. It will be made
+  // known to other threads once the depth is incremented.
+  Activity::FillFrom(&stack_[depth], program_counter, origin, type, data);
+
+  // Save the incremented depth. Because this guards |activity| memory filled
+  // above that may be read by another thread once the recorded depth changes,
+  // a "release" store is required.
+  header_->current_depth.store(depth + 1, std::memory_order_release);
+
+  // The current depth is used as the activity ID because it simply identifies
+  // an entry. Once an entry is pop'd, it's okay to reuse the ID.
+  return depth;
+}
+
+void ThreadActivityTracker::ChangeActivity(ActivityId id,
+                                           Activity::Type type,
+                                           const ActivityData& data) {
+  DCHECK(CalledOnValidThread());
+  DCHECK(type != Activity::ACT_NULL || &data != &kNullActivityData);
+  DCHECK_LT(id, header_->current_depth.load(std::memory_order_acquire));
+
+  // Update the information if it is being recorded (i.e. within slot limit).
+  if (id < stack_slots_) {
+    Activity* activity = &stack_[id];
+
+    if (type != Activity::ACT_NULL) {
+      DCHECK_EQ(activity->activity_type & Activity::ACT_CATEGORY_MASK,
+                type & Activity::ACT_CATEGORY_MASK);
+      activity->activity_type = type;
+    }
+
+    if (&data != &kNullActivityData)
+      activity->data = data;
+  }
+}
+
+void ThreadActivityTracker::PopActivity(ActivityId id) {
+  // Do an atomic decrement of the depth. No changes to stack entries guarded
+  // by this variable are done here so a "relaxed" operation is acceptable.
+  // |depth| will receive the value BEFORE it was modified which means the
+  // return value must also be decremented. The slot will be "free" after
+  // this call but since only a single thread can access this object, the
+  // data will remain valid until this method returns or calls outside.
+  uint32_t depth =
+      header_->current_depth.fetch_sub(1, std::memory_order_relaxed) - 1;
+
+  // Validate that everything is running correctly.
+  DCHECK_EQ(id, depth);
+
+  // A thread-checker creates a lock to check the thread-id which means
+  // re-entry into this code if lock acquisitions are being tracked.
+  DCHECK(stack_[depth].activity_type == Activity::ACT_LOCK_ACQUIRE ||
+         CalledOnValidThread());
+
+  // The stack has shrunk meaning that some other thread trying to copy the
+  // contents for reporting purposes could get bad data. Increment the data
+  // version so that it con tell that things have changed. This needs to
+  // happen after the atomic |depth| operation above so a "release" store
+  // is required.
+  header_->data_version.fetch_add(1, std::memory_order_release);
+}
+
+std::unique_ptr<ActivityUserData> ThreadActivityTracker::GetUserData(
+    ActivityId id,
+    ActivityTrackerMemoryAllocator* allocator) {
+  // Don't allow user data for lock acquisition as recursion may occur.
+  if (stack_[id].activity_type == Activity::ACT_LOCK_ACQUIRE) {
+    NOTREACHED();
+    return std::make_unique<ActivityUserData>();
+  }
+
+  // User-data is only stored for activities actually held in the stack.
+  if (id >= stack_slots_)
+    return std::make_unique<ActivityUserData>();
+
+  // Create and return a real UserData object.
+  return CreateUserDataForActivity(&stack_[id], allocator);
+}
+
+bool ThreadActivityTracker::HasUserData(ActivityId id) {
+  // User-data is only stored for activities actually held in the stack.
+  return (id < stack_slots_ && stack_[id].user_data_ref);
+}
+
+void ThreadActivityTracker::ReleaseUserData(
+    ActivityId id,
+    ActivityTrackerMemoryAllocator* allocator) {
+  // User-data is only stored for activities actually held in the stack.
+  if (id < stack_slots_ && stack_[id].user_data_ref) {
+    allocator->ReleaseObjectReference(stack_[id].user_data_ref);
+    stack_[id].user_data_ref = 0;
+  }
+}
+
+void ThreadActivityTracker::RecordExceptionActivity(const void* program_counter,
+                                                    const void* origin,
+                                                    Activity::Type type,
+                                                    const ActivityData& data) {
+  // A thread-checker creates a lock to check the thread-id which means
+  // re-entry into this code if lock acquisitions are being tracked.
+  DCHECK(CalledOnValidThread());
+
+  // Fill the reusable exception activity.
+  Activity::FillFrom(&header_->last_exception, program_counter, origin, type,
+                     data);
+
+  // The data has changed meaning that some other thread trying to copy the
+  // contents for reporting purposes could get bad data.
+  header_->data_version.fetch_add(1, std::memory_order_relaxed);
+}
+
+bool ThreadActivityTracker::IsValid() const {
+  if (header_->owner.data_id.load(std::memory_order_acquire) == 0 ||
+      header_->owner.process_id == 0 || header_->thread_ref.as_id == 0 ||
+      header_->start_time == 0 || header_->start_ticks == 0 ||
+      header_->stack_slots != stack_slots_ ||
+      header_->thread_name[sizeof(header_->thread_name) - 1] != '\0') {
+    return false;
+  }
+
+  return valid_;
+}
+
+bool ThreadActivityTracker::CreateSnapshot(Snapshot* output_snapshot) const {
+  DCHECK(output_snapshot);
+
+  // There is no "called on valid thread" check for this method as it can be
+  // called from other threads or even other processes. It is also the reason
+  // why atomic operations must be used in certain places above.
+
+  // It's possible for the data to change while reading it in such a way that it
+  // invalidates the read. Make several attempts but don't try forever.
+  const int kMaxAttempts = 10;
+  uint32_t depth;
+
+  // Stop here if the data isn't valid.
+  if (!IsValid())
+    return false;
+
+  // Allocate the maximum size for the stack so it doesn't have to be done
+  // during the time-sensitive snapshot operation. It is shrunk once the
+  // actual size is known.
+  output_snapshot->activity_stack.reserve(stack_slots_);
+
+  for (int attempt = 0; attempt < kMaxAttempts; ++attempt) {
+    // Remember the data IDs to ensure nothing is replaced during the snapshot
+    // operation. Use "acquire" so that all the non-atomic fields of the
+    // structure are valid (at least at the current moment in time).
+    const uint32_t starting_id =
+        header_->owner.data_id.load(std::memory_order_acquire);
+    const int64_t starting_create_stamp = header_->owner.create_stamp;
+    const int64_t starting_process_id = header_->owner.process_id;
+    const int64_t starting_thread_id = header_->thread_ref.as_id;
+
+    // Note the current |data_version| so it's possible to detect at the end
+    // that nothing has changed since copying the data began. A "cst" operation
+    // is required to ensure it occurs before everything else. Using "cst"
+    // memory ordering is relatively expensive but this is only done during
+    // analysis so doesn't directly affect the worker threads.
+    const uint32_t pre_version =
+        header_->data_version.load(std::memory_order_seq_cst);
+
+    // Fetching the current depth also "acquires" the contents of the stack.
+    depth = header_->current_depth.load(std::memory_order_acquire);
+    uint32_t count = std::min(depth, stack_slots_);
+    output_snapshot->activity_stack.resize(count);
+    if (count > 0) {
+      // Copy the existing contents. Memcpy is used for speed.
+      memcpy(&output_snapshot->activity_stack[0], stack_,
+             count * sizeof(Activity));
+    }
+
+    // Capture the last exception.
+    memcpy(&output_snapshot->last_exception, &header_->last_exception,
+           sizeof(Activity));
+
+    // TODO(bcwhite): Snapshot other things here.
+
+    // Retry if something changed during the copy. A "cst" operation ensures
+    // it must happen after all the above operations.
+    if (header_->data_version.load(std::memory_order_seq_cst) != pre_version)
+      continue;
+
+    // Stack copied. Record it's full depth.
+    output_snapshot->activity_stack_depth = depth;
+
+    // Get the general thread information.
+    output_snapshot->thread_name =
+        std::string(header_->thread_name, sizeof(header_->thread_name) - 1);
+    output_snapshot->create_stamp = header_->owner.create_stamp;
+    output_snapshot->thread_id = header_->thread_ref.as_id;
+    output_snapshot->process_id = header_->owner.process_id;
+
+    // All characters of the thread-name buffer were copied so as to not break
+    // if the trailing NUL were missing. Now limit the length if the actual
+    // name is shorter.
+    output_snapshot->thread_name.resize(
+        strlen(output_snapshot->thread_name.c_str()));
+
+    // If the data ID has changed then the tracker has exited and the memory
+    // reused by a new one. Try again.
+    if (header_->owner.data_id.load(std::memory_order_seq_cst) != starting_id ||
+        output_snapshot->create_stamp != starting_create_stamp ||
+        output_snapshot->process_id != starting_process_id ||
+        output_snapshot->thread_id != starting_thread_id) {
+      continue;
+    }
+
+    // Only successful if the data is still valid once everything is done since
+    // it's possible for the thread to end somewhere in the middle and all its
+    // values become garbage.
+    if (!IsValid())
+      return false;
+
+    // Change all the timestamps in the activities from "ticks" to "wall" time.
+    const Time start_time = Time::FromInternalValue(header_->start_time);
+    const int64_t start_ticks = header_->start_ticks;
+    for (Activity& activity : output_snapshot->activity_stack) {
+      activity.time_internal =
+          WallTimeFromTickTime(start_ticks, activity.time_internal, start_time)
+              .ToInternalValue();
+    }
+    output_snapshot->last_exception.time_internal =
+        WallTimeFromTickTime(start_ticks,
+                             output_snapshot->last_exception.time_internal,
+                             start_time)
+            .ToInternalValue();
+
+    // Success!
+    return true;
+  }
+
+  // Too many attempts.
+  return false;
+}
+
+const void* ThreadActivityTracker::GetBaseAddress() {
+  return header_;
+}
+
+uint32_t ThreadActivityTracker::GetDataVersionForTesting() {
+  return header_->data_version.load(std::memory_order_relaxed);
+}
+
+void ThreadActivityTracker::SetOwningProcessIdForTesting(int64_t pid,
+                                                         int64_t stamp) {
+  header_->owner.SetOwningProcessIdForTesting(pid, stamp);
+}
+
+// static
+bool ThreadActivityTracker::GetOwningProcessId(const void* memory,
+                                               int64_t* out_id,
+                                               int64_t* out_stamp) {
+  const Header* header = reinterpret_cast<const Header*>(memory);
+  return OwningProcess::GetOwningProcessId(&header->owner, out_id, out_stamp);
+}
+
+// static
+size_t ThreadActivityTracker::SizeForStackDepth(int stack_depth) {
+  return static_cast<size_t>(stack_depth) * sizeof(Activity) + sizeof(Header);
+}
+
+bool ThreadActivityTracker::CalledOnValidThread() {
+#if DCHECK_IS_ON()
+  return thread_id_ == PlatformThreadRef();
+#else
+  return true;
+#endif
+}
+
+std::unique_ptr<ActivityUserData>
+ThreadActivityTracker::CreateUserDataForActivity(
+    Activity* activity,
+    ActivityTrackerMemoryAllocator* allocator) {
+  DCHECK_EQ(0U, activity->user_data_ref);
+
+  PersistentMemoryAllocator::Reference ref = allocator->GetObjectReference();
+  void* memory = allocator->GetAsArray<char>(ref, kUserDataSize);
+  if (memory) {
+    std::unique_ptr<ActivityUserData> user_data =
+        std::make_unique<ActivityUserData>(memory, kUserDataSize);
+    activity->user_data_ref = ref;
+    activity->user_data_id = user_data->id();
+    return user_data;
+  }
+
+  // Return a dummy object that will still accept (but ignore) Set() calls.
+  return std::make_unique<ActivityUserData>();
+}
+
+// The instantiation of the GlobalActivityTracker object.
+// The object held here will obviously not be destructed at process exit
+// but that's best since PersistentMemoryAllocator objects (that underlie
+// GlobalActivityTracker objects) are explicitly forbidden from doing anything
+// essential at exit anyway due to the fact that they depend on data managed
+// elsewhere and which could be destructed first. An AtomicWord is used instead
+// of std::atomic because the latter can create global ctors and dtors.
+subtle::AtomicWord GlobalActivityTracker::g_tracker_ = 0;
+
+GlobalActivityTracker::ModuleInfo::ModuleInfo() = default;
+GlobalActivityTracker::ModuleInfo::ModuleInfo(ModuleInfo&& rhs) = default;
+GlobalActivityTracker::ModuleInfo::ModuleInfo(const ModuleInfo& rhs) = default;
+GlobalActivityTracker::ModuleInfo::~ModuleInfo() = default;
+
+GlobalActivityTracker::ModuleInfo& GlobalActivityTracker::ModuleInfo::operator=(
+    ModuleInfo&& rhs) = default;
+GlobalActivityTracker::ModuleInfo& GlobalActivityTracker::ModuleInfo::operator=(
+    const ModuleInfo& rhs) = default;
+
+GlobalActivityTracker::ModuleInfoRecord::ModuleInfoRecord() = default;
+GlobalActivityTracker::ModuleInfoRecord::~ModuleInfoRecord() = default;
+
+bool GlobalActivityTracker::ModuleInfoRecord::DecodeTo(
+    GlobalActivityTracker::ModuleInfo* info,
+    size_t record_size) const {
+  // Get the current "changes" indicator, acquiring all the other values.
+  uint32_t current_changes = changes.load(std::memory_order_acquire);
+
+  // Copy out the dynamic information.
+  info->is_loaded = loaded != 0;
+  info->address = static_cast<uintptr_t>(address);
+  info->load_time = load_time;
+
+  // Check to make sure no information changed while being read. A "seq-cst"
+  // operation is expensive but is only done during analysis and it's the only
+  // way to ensure this occurs after all the accesses above. If changes did
+  // occur then return a "not loaded" result so that |size| and |address|
+  // aren't expected to be accurate.
+  if ((current_changes & kModuleInformationChanging) != 0 ||
+      changes.load(std::memory_order_seq_cst) != current_changes) {
+    info->is_loaded = false;
+  }
+
+  // Copy out the static information. These never change so don't have to be
+  // protected by the atomic |current_changes| operations.
+  info->size = static_cast<size_t>(size);
+  info->timestamp = timestamp;
+  info->age = age;
+  memcpy(info->identifier, identifier, sizeof(info->identifier));
+
+  if (offsetof(ModuleInfoRecord, pickle) + pickle_size > record_size)
+    return false;
+  Pickle pickler(pickle, pickle_size);
+  PickleIterator iter(pickler);
+  return iter.ReadString(&info->file) && iter.ReadString(&info->debug_file);
+}
+
+GlobalActivityTracker::ModuleInfoRecord*
+GlobalActivityTracker::ModuleInfoRecord::CreateFrom(
+    const GlobalActivityTracker::ModuleInfo& info,
+    PersistentMemoryAllocator* allocator) {
+  Pickle pickler;
+  pickler.WriteString(info.file);
+  pickler.WriteString(info.debug_file);
+  size_t required_size = offsetof(ModuleInfoRecord, pickle) + pickler.size();
+  ModuleInfoRecord* record = allocator->New<ModuleInfoRecord>(required_size);
+  if (!record)
+    return nullptr;
+
+  // These fields never changes and are done before the record is made
+  // iterable so no thread protection is necessary.
+  record->size = info.size;
+  record->timestamp = info.timestamp;
+  record->age = info.age;
+  memcpy(record->identifier, info.identifier, sizeof(identifier));
+  memcpy(record->pickle, pickler.data(), pickler.size());
+  record->pickle_size = pickler.size();
+  record->changes.store(0, std::memory_order_relaxed);
+
+  // Initialize the owner info.
+  record->owner.Release_Initialize();
+
+  // Now set those fields that can change.
+  bool success = record->UpdateFrom(info);
+  DCHECK(success);
+  return record;
+}
+
+bool GlobalActivityTracker::ModuleInfoRecord::UpdateFrom(
+    const GlobalActivityTracker::ModuleInfo& info) {
+  // Updates can occur after the record is made visible so make changes atomic.
+  // A "strong" exchange ensures no false failures.
+  uint32_t old_changes = changes.load(std::memory_order_relaxed);
+  uint32_t new_changes = old_changes | kModuleInformationChanging;
+  if ((old_changes & kModuleInformationChanging) != 0 ||
+      !changes.compare_exchange_strong(old_changes, new_changes,
+                                       std::memory_order_acquire,
+                                       std::memory_order_acquire)) {
+    NOTREACHED() << "Multiple sources are updating module information.";
+    return false;
+  }
+
+  loaded = info.is_loaded ? 1 : 0;
+  address = info.address;
+  load_time = Time::Now().ToInternalValue();
+
+  bool success = changes.compare_exchange_strong(new_changes, old_changes + 1,
+                                                 std::memory_order_release,
+                                                 std::memory_order_relaxed);
+  DCHECK(success);
+  return true;
+}
+
+GlobalActivityTracker::ScopedThreadActivity::ScopedThreadActivity(
+    const void* program_counter,
+    const void* origin,
+    Activity::Type type,
+    const ActivityData& data,
+    bool lock_allowed)
+    : ThreadActivityTracker::ScopedActivity(GetOrCreateTracker(lock_allowed),
+                                            program_counter,
+                                            origin,
+                                            type,
+                                            data) {}
+
+GlobalActivityTracker::ScopedThreadActivity::~ScopedThreadActivity() {
+  if (tracker_ && tracker_->HasUserData(activity_id_)) {
+    GlobalActivityTracker* global = GlobalActivityTracker::Get();
+    AutoLock lock(global->user_data_allocator_lock_);
+    tracker_->ReleaseUserData(activity_id_, &global->user_data_allocator_);
+  }
+}
+
+ActivityUserData& GlobalActivityTracker::ScopedThreadActivity::user_data() {
+  if (!user_data_) {
+    if (tracker_) {
+      GlobalActivityTracker* global = GlobalActivityTracker::Get();
+      AutoLock lock(global->user_data_allocator_lock_);
+      user_data_ =
+          tracker_->GetUserData(activity_id_, &global->user_data_allocator_);
+    } else {
+      user_data_ = std::make_unique<ActivityUserData>();
+    }
+  }
+  return *user_data_;
+}
+
+GlobalActivityTracker::ThreadSafeUserData::ThreadSafeUserData(void* memory,
+                                                              size_t size,
+                                                              int64_t pid)
+    : ActivityUserData(memory, size, pid) {}
+
+GlobalActivityTracker::ThreadSafeUserData::~ThreadSafeUserData() = default;
+
+void GlobalActivityTracker::ThreadSafeUserData::Set(StringPiece name,
+                                                    ValueType type,
+                                                    const void* memory,
+                                                    size_t size) {
+  AutoLock lock(data_lock_);
+  ActivityUserData::Set(name, type, memory, size);
+}
+
+GlobalActivityTracker::ManagedActivityTracker::ManagedActivityTracker(
+    PersistentMemoryAllocator::Reference mem_reference,
+    void* base,
+    size_t size)
+    : ThreadActivityTracker(base, size),
+      mem_reference_(mem_reference),
+      mem_base_(base) {}
+
+GlobalActivityTracker::ManagedActivityTracker::~ManagedActivityTracker() {
+  // The global |g_tracker_| must point to the owner of this class since all
+  // objects of this type must be destructed before |g_tracker_| can be changed
+  // (something that only occurs in tests).
+  DCHECK(g_tracker_);
+  GlobalActivityTracker::Get()->ReturnTrackerMemory(this);
+}
+
+void GlobalActivityTracker::CreateWithAllocator(
+    std::unique_ptr<PersistentMemoryAllocator> allocator,
+    int stack_depth,
+    int64_t process_id) {
+  // There's no need to do anything with the result. It is self-managing.
+  GlobalActivityTracker* global_tracker =
+      new GlobalActivityTracker(std::move(allocator), stack_depth, process_id);
+  // Create a tracker for this thread since it is known.
+  global_tracker->CreateTrackerForCurrentThread();
+}
+
+#if !defined(OS_NACL)
+// static
+bool GlobalActivityTracker::CreateWithFile(const FilePath& file_path,
+                                           size_t size,
+                                           uint64_t id,
+                                           StringPiece name,
+                                           int stack_depth) {
+  DCHECK(!file_path.empty());
+  DCHECK_GE(static_cast<uint64_t>(std::numeric_limits<int64_t>::max()), size);
+
+  // Create and map the file into memory and make it globally available.
+  std::unique_ptr<MemoryMappedFile> mapped_file(new MemoryMappedFile());
+  bool success = mapped_file->Initialize(
+      File(file_path, File::FLAG_CREATE_ALWAYS | File::FLAG_READ |
+                          File::FLAG_WRITE | File::FLAG_SHARE_DELETE),
+      {0, size}, MemoryMappedFile::READ_WRITE_EXTEND);
+  if (!success)
+    return false;
+  if (!FilePersistentMemoryAllocator::IsFileAcceptable(*mapped_file, false))
+    return false;
+  CreateWithAllocator(std::make_unique<FilePersistentMemoryAllocator>(
+                          std::move(mapped_file), size, id, name, false),
+                      stack_depth, 0);
+  return true;
+}
+#endif  // !defined(OS_NACL)
+
+// static
+bool GlobalActivityTracker::CreateWithLocalMemory(size_t size,
+                                                  uint64_t id,
+                                                  StringPiece name,
+                                                  int stack_depth,
+                                                  int64_t process_id) {
+  CreateWithAllocator(
+      std::make_unique<LocalPersistentMemoryAllocator>(size, id, name),
+      stack_depth, process_id);
+  return true;
+}
+
+// static
+bool GlobalActivityTracker::CreateWithSharedMemory(
+    std::unique_ptr<SharedMemory> shm,
+    uint64_t id,
+    StringPiece name,
+    int stack_depth) {
+  if (shm->mapped_size() == 0 ||
+      !SharedPersistentMemoryAllocator::IsSharedMemoryAcceptable(*shm)) {
+    return false;
+  }
+  CreateWithAllocator(std::make_unique<SharedPersistentMemoryAllocator>(
+                          std::move(shm), id, name, false),
+                      stack_depth, 0);
+  return true;
+}
+
+// static
+bool GlobalActivityTracker::CreateWithSharedMemoryHandle(
+    const SharedMemoryHandle& handle,
+    size_t size,
+    uint64_t id,
+    StringPiece name,
+    int stack_depth) {
+  std::unique_ptr<SharedMemory> shm(
+      new SharedMemory(handle, /*readonly=*/false));
+  if (!shm->Map(size))
+    return false;
+  return CreateWithSharedMemory(std::move(shm), id, name, stack_depth);
+}
+
+// static
+void GlobalActivityTracker::SetForTesting(
+    std::unique_ptr<GlobalActivityTracker> tracker) {
+  CHECK(!subtle::NoBarrier_Load(&g_tracker_));
+  subtle::Release_Store(&g_tracker_,
+                        reinterpret_cast<uintptr_t>(tracker.release()));
+}
+
+// static
+std::unique_ptr<GlobalActivityTracker>
+GlobalActivityTracker::ReleaseForTesting() {
+  GlobalActivityTracker* tracker = Get();
+  if (!tracker)
+    return nullptr;
+
+  // Thread trackers assume that the global tracker is present for some
+  // operations so ensure that there aren't any.
+  tracker->ReleaseTrackerForCurrentThreadForTesting();
+  DCHECK_EQ(0, tracker->thread_tracker_count_.load(std::memory_order_relaxed));
+
+  subtle::Release_Store(&g_tracker_, 0);
+  return WrapUnique(tracker);
+}
+
+ThreadActivityTracker* GlobalActivityTracker::CreateTrackerForCurrentThread() {
+  DCHECK(!this_thread_tracker_.Get());
+
+  PersistentMemoryAllocator::Reference mem_reference;
+
+  {
+    base::AutoLock autolock(thread_tracker_allocator_lock_);
+    mem_reference = thread_tracker_allocator_.GetObjectReference();
+  }
+
+  if (!mem_reference) {
+    // Failure. This shouldn't happen. But be graceful if it does, probably
+    // because the underlying allocator wasn't given enough memory to satisfy
+    // to all possible requests.
+    NOTREACHED();
+    // Report the thread-count at which the allocator was full so that the
+    // failure can be seen and underlying memory resized appropriately.
+    UMA_HISTOGRAM_COUNTS_1000(
+        "ActivityTracker.ThreadTrackers.MemLimitTrackerCount",
+        thread_tracker_count_.load(std::memory_order_relaxed));
+    // Return null, just as if tracking wasn't enabled.
+    return nullptr;
+  }
+
+  // Convert the memory block found above into an actual memory address.
+  // Doing the conversion as a Header object enacts the 32/64-bit size
+  // consistency checks which would not otherwise be done. Unfortunately,
+  // some older compilers and MSVC don't have standard-conforming definitions
+  // of std::atomic which cause it not to be plain-old-data. Don't check on
+  // those platforms assuming that the checks on other platforms will be
+  // sufficient.
+  // TODO(bcwhite): Review this after major compiler releases.
+  DCHECK(mem_reference);
+  void* mem_base;
+  mem_base =
+      allocator_->GetAsObject<ThreadActivityTracker::Header>(mem_reference);
+
+  DCHECK(mem_base);
+  DCHECK_LE(stack_memory_size_, allocator_->GetAllocSize(mem_reference));
+
+  // Create a tracker with the acquired memory and set it as the tracker
+  // for this particular thread in thread-local-storage.
+  ManagedActivityTracker* tracker =
+      new ManagedActivityTracker(mem_reference, mem_base, stack_memory_size_);
+  DCHECK(tracker->IsValid());
+  this_thread_tracker_.Set(tracker);
+  int old_count = thread_tracker_count_.fetch_add(1, std::memory_order_relaxed);
+
+  UMA_HISTOGRAM_EXACT_LINEAR("ActivityTracker.ThreadTrackers.Count",
+                             old_count + 1, static_cast<int>(kMaxThreadCount));
+  return tracker;
+}
+
+void GlobalActivityTracker::ReleaseTrackerForCurrentThreadForTesting() {
+  ThreadActivityTracker* tracker =
+      reinterpret_cast<ThreadActivityTracker*>(this_thread_tracker_.Get());
+  if (tracker) {
+    this_thread_tracker_.Set(nullptr);
+    delete tracker;
+  }
+}
+
+void GlobalActivityTracker::SetBackgroundTaskRunner(
+    const scoped_refptr<TaskRunner>& runner) {
+  AutoLock lock(global_tracker_lock_);
+  background_task_runner_ = runner;
+}
+
+void GlobalActivityTracker::SetProcessExitCallback(
+    ProcessExitCallback callback) {
+  AutoLock lock(global_tracker_lock_);
+  process_exit_callback_ = callback;
+}
+
+void GlobalActivityTracker::RecordProcessLaunch(
+    ProcessId process_id,
+    const FilePath::StringType& cmd) {
+  const int64_t pid = process_id;
+  DCHECK_NE(GetProcessId(), pid);
+  DCHECK_NE(0, pid);
+
+  base::AutoLock lock(global_tracker_lock_);
+  if (base::ContainsKey(known_processes_, pid)) {
+    // TODO(bcwhite): Measure this in UMA.
+    NOTREACHED() << "Process #" << process_id
+                 << " was previously recorded as \"launched\""
+                 << " with no corresponding exit.\n"
+                 << known_processes_[pid];
+    known_processes_.erase(pid);
+  }
+
+#if defined(OS_WIN)
+  known_processes_.insert(std::make_pair(pid, UTF16ToUTF8(cmd)));
+#else
+  known_processes_.insert(std::make_pair(pid, cmd));
+#endif
+}
+
+void GlobalActivityTracker::RecordProcessLaunch(
+    ProcessId process_id,
+    const FilePath::StringType& exe,
+    const FilePath::StringType& args) {
+  if (exe.find(FILE_PATH_LITERAL(" "))) {
+    RecordProcessLaunch(process_id,
+                        FilePath::StringType(FILE_PATH_LITERAL("\"")) + exe +
+                            FILE_PATH_LITERAL("\" ") + args);
+  } else {
+    RecordProcessLaunch(process_id, exe + FILE_PATH_LITERAL(' ') + args);
+  }
+}
+
+void GlobalActivityTracker::RecordProcessExit(ProcessId process_id,
+                                              int exit_code) {
+  const int64_t pid = process_id;
+  DCHECK_NE(GetProcessId(), pid);
+  DCHECK_NE(0, pid);
+
+  scoped_refptr<TaskRunner> task_runner;
+  std::string command_line;
+  {
+    base::AutoLock lock(global_tracker_lock_);
+    task_runner = background_task_runner_;
+    auto found = known_processes_.find(pid);
+    if (found != known_processes_.end()) {
+      command_line = std::move(found->second);
+      known_processes_.erase(found);
+    } else {
+      DLOG(ERROR) << "Recording exit of unknown process #" << process_id;
+    }
+  }
+
+  // Use the current time to differentiate the process that just exited
+  // from any that might be created in the future with the same ID.
+  int64_t now_stamp = Time::Now().ToInternalValue();
+
+  // The persistent allocator is thread-safe so run the iteration and
+  // adjustments on a worker thread if one was provided.
+  if (task_runner && !task_runner->RunsTasksInCurrentSequence()) {
+    task_runner->PostTask(
+        FROM_HERE,
+        BindOnce(&GlobalActivityTracker::CleanupAfterProcess, Unretained(this),
+                 pid, now_stamp, exit_code, std::move(command_line)));
+    return;
+  }
+
+  CleanupAfterProcess(pid, now_stamp, exit_code, std::move(command_line));
+}
+
+void GlobalActivityTracker::SetProcessPhase(ProcessPhase phase) {
+  process_data().SetInt(kProcessPhaseDataKey, phase);
+}
+
+void GlobalActivityTracker::CleanupAfterProcess(int64_t process_id,
+                                                int64_t exit_stamp,
+                                                int exit_code,
+                                                std::string&& command_line) {
+  // The process may not have exited cleanly so its necessary to go through
+  // all the data structures it may have allocated in the persistent memory
+  // segment and mark them as "released". This will allow them to be reused
+  // later on.
+
+  PersistentMemoryAllocator::Iterator iter(allocator_.get());
+  PersistentMemoryAllocator::Reference ref;
+
+  ProcessExitCallback process_exit_callback;
+  {
+    AutoLock lock(global_tracker_lock_);
+    process_exit_callback = process_exit_callback_;
+  }
+  if (process_exit_callback) {
+    // Find the processes user-data record so the process phase can be passed
+    // to the callback.
+    ActivityUserData::Snapshot process_data_snapshot;
+    while ((ref = iter.GetNextOfType(kTypeIdProcessDataRecord)) != 0) {
+      const void* memory = allocator_->GetAsArray<char>(
+          ref, kTypeIdProcessDataRecord, PersistentMemoryAllocator::kSizeAny);
+      if (!memory)
+        continue;
+      int64_t found_id;
+      int64_t create_stamp;
+      if (ActivityUserData::GetOwningProcessId(memory, &found_id,
+                                               &create_stamp)) {
+        if (found_id == process_id && create_stamp < exit_stamp) {
+          const ActivityUserData process_data(const_cast<void*>(memory),
+                                              allocator_->GetAllocSize(ref));
+          process_data.CreateSnapshot(&process_data_snapshot);
+          break;  // No need to look for any others.
+        }
+      }
+    }
+    iter.Reset();  // So it starts anew when used below.
+
+    // Record the process's phase at exit so callback doesn't need to go
+    // searching based on a private key value.
+    ProcessPhase exit_phase = PROCESS_PHASE_UNKNOWN;
+    auto phase = process_data_snapshot.find(kProcessPhaseDataKey);
+    if (phase != process_data_snapshot.end())
+      exit_phase = static_cast<ProcessPhase>(phase->second.GetInt());
+
+    // Perform the callback.
+    process_exit_callback.Run(process_id, exit_stamp, exit_code, exit_phase,
+                              std::move(command_line),
+                              std::move(process_data_snapshot));
+  }
+
+  // Find all allocations associated with the exited process and free them.
+  uint32_t type;
+  while ((ref = iter.GetNext(&type)) != 0) {
+    switch (type) {
+      case kTypeIdActivityTracker:
+      case kTypeIdUserDataRecord:
+      case kTypeIdProcessDataRecord:
+      case ModuleInfoRecord::kPersistentTypeId: {
+        const void* memory = allocator_->GetAsArray<char>(
+            ref, type, PersistentMemoryAllocator::kSizeAny);
+        if (!memory)
+          continue;
+        int64_t found_id;
+        int64_t create_stamp;
+
+        // By convention, the OwningProcess structure is always the first
+        // field of the structure so there's no need to handle all the
+        // cases separately.
+        if (OwningProcess::GetOwningProcessId(memory, &found_id,
+                                              &create_stamp)) {
+          // Only change the type to be "free" if the process ID matches and
+          // the creation time is before the exit time (so PID re-use doesn't
+          // cause the erasure of something that is in-use). Memory is cleared
+          // here, rather than when it's needed, so as to limit the impact at
+          // that critical time.
+          if (found_id == process_id && create_stamp < exit_stamp)
+            allocator_->ChangeType(ref, ~type, type, /*clear=*/true);
+        }
+      } break;
+    }
+  }
+}
+
+void GlobalActivityTracker::RecordLogMessage(StringPiece message) {
+  // Allocate at least one extra byte so the string is NUL terminated. All
+  // memory returned by the allocator is guaranteed to be zeroed.
+  PersistentMemoryAllocator::Reference ref =
+      allocator_->Allocate(message.size() + 1, kTypeIdGlobalLogMessage);
+  char* memory = allocator_->GetAsArray<char>(ref, kTypeIdGlobalLogMessage,
+                                              message.size() + 1);
+  if (memory) {
+    memcpy(memory, message.data(), message.size());
+    allocator_->MakeIterable(ref);
+  }
+}
+
+void GlobalActivityTracker::RecordModuleInfo(const ModuleInfo& info) {
+  AutoLock lock(modules_lock_);
+  auto found = modules_.find(info.file);
+  if (found != modules_.end()) {
+    ModuleInfoRecord* record = found->second;
+    DCHECK(record);
+
+    // Update the basic state of module information that has been already
+    // recorded. It is assumed that the string information (identifier,
+    // version, etc.) remain unchanged which means that there's no need
+    // to create a new record to accommodate a possibly longer length.
+    record->UpdateFrom(info);
+    return;
+  }
+
+  ModuleInfoRecord* record =
+      ModuleInfoRecord::CreateFrom(info, allocator_.get());
+  if (!record)
+    return;
+  allocator_->MakeIterable(record);
+  modules_.emplace(info.file, record);
+}
+
+void GlobalActivityTracker::RecordFieldTrial(const std::string& trial_name,
+                                             StringPiece group_name) {
+  const std::string key = std::string("FieldTrial.") + trial_name;
+  process_data_.SetString(key, group_name);
+}
+
+void GlobalActivityTracker::RecordException(const void* pc,
+                                            const void* origin,
+                                            uint32_t code) {
+  RecordExceptionImpl(pc, origin, code);
+}
+
+void GlobalActivityTracker::MarkDeleted() {
+  allocator_->SetMemoryState(PersistentMemoryAllocator::MEMORY_DELETED);
+}
+
+GlobalActivityTracker::GlobalActivityTracker(
+    std::unique_ptr<PersistentMemoryAllocator> allocator,
+    int stack_depth,
+    int64_t process_id)
+    : allocator_(std::move(allocator)),
+      stack_memory_size_(ThreadActivityTracker::SizeForStackDepth(stack_depth)),
+      process_id_(process_id == 0 ? GetCurrentProcId() : process_id),
+      this_thread_tracker_(&OnTLSDestroy),
+      thread_tracker_count_(0),
+      thread_tracker_allocator_(allocator_.get(),
+                                kTypeIdActivityTracker,
+                                kTypeIdActivityTrackerFree,
+                                stack_memory_size_,
+                                kCachedThreadMemories,
+                                /*make_iterable=*/true),
+      user_data_allocator_(allocator_.get(),
+                           kTypeIdUserDataRecord,
+                           kTypeIdUserDataRecordFree,
+                           kUserDataSize,
+                           kCachedUserDataMemories,
+                           /*make_iterable=*/true),
+      process_data_(allocator_->GetAsArray<char>(
+                        AllocateFrom(allocator_.get(),
+                                     kTypeIdProcessDataRecordFree,
+                                     kProcessDataSize,
+                                     kTypeIdProcessDataRecord),
+                        kTypeIdProcessDataRecord,
+                        kProcessDataSize),
+                    kProcessDataSize,
+                    process_id_) {
+  DCHECK_NE(0, process_id_);
+
+  // Ensure that there is no other global object and then make this one such.
+  DCHECK(!g_tracker_);
+  subtle::Release_Store(&g_tracker_, reinterpret_cast<uintptr_t>(this));
+
+  // The data records must be iterable in order to be found by an analyzer.
+  allocator_->MakeIterable(allocator_->GetAsReference(
+      process_data_.GetBaseAddress(), kTypeIdProcessDataRecord));
+
+  // Note that this process has launched.
+  SetProcessPhase(PROCESS_LAUNCHED);
+
+  // Fetch and record all activated field trials.
+  FieldTrial::ActiveGroups active_groups;
+  FieldTrialList::GetActiveFieldTrialGroups(&active_groups);
+  for (auto& group : active_groups)
+    RecordFieldTrial(group.trial_name, group.group_name);
+}
+
+GlobalActivityTracker::~GlobalActivityTracker() {
+  DCHECK(Get() == nullptr || Get() == this);
+  DCHECK_EQ(0, thread_tracker_count_.load(std::memory_order_relaxed));
+  subtle::Release_Store(&g_tracker_, 0);
+}
+
+void GlobalActivityTracker::ReturnTrackerMemory(
+    ManagedActivityTracker* tracker) {
+  PersistentMemoryAllocator::Reference mem_reference = tracker->mem_reference_;
+  void* mem_base = tracker->mem_base_;
+  DCHECK(mem_reference);
+  DCHECK(mem_base);
+
+  // Remove the destructed tracker from the set of known ones.
+  DCHECK_LE(1, thread_tracker_count_.load(std::memory_order_relaxed));
+  thread_tracker_count_.fetch_sub(1, std::memory_order_relaxed);
+
+  // Release this memory for re-use at a later time.
+  base::AutoLock autolock(thread_tracker_allocator_lock_);
+  thread_tracker_allocator_.ReleaseObjectReference(mem_reference);
+}
+
+void GlobalActivityTracker::RecordExceptionImpl(const void* pc,
+                                                const void* origin,
+                                                uint32_t code) {
+  // Get an existing tracker for this thread. It's not possible to create
+  // one at this point because such would involve memory allocations and
+  // other potentially complex operations that can cause failures if done
+  // within an exception handler. In most cases various operations will
+  // have already created the tracker so this shouldn't generally be a
+  // problem.
+  ThreadActivityTracker* tracker = GetTrackerForCurrentThread();
+  if (!tracker)
+    return;
+
+  tracker->RecordExceptionActivity(pc, origin, Activity::ACT_EXCEPTION,
+                                   ActivityData::ForException(code));
+}
+
+// static
+void GlobalActivityTracker::OnTLSDestroy(void* value) {
+  delete reinterpret_cast<ManagedActivityTracker*>(value);
+}
+
+ScopedActivity::ScopedActivity(const void* program_counter,
+                               uint8_t action,
+                               uint32_t id,
+                               int32_t info)
+    : GlobalActivityTracker::ScopedThreadActivity(
+          program_counter,
+          nullptr,
+          static_cast<Activity::Type>(Activity::ACT_GENERIC | action),
+          ActivityData::ForGeneric(id, info),
+          /*lock_allowed=*/true),
+      id_(id) {
+  // The action must not affect the category bits of the activity type.
+  DCHECK_EQ(0, action & Activity::ACT_CATEGORY_MASK);
+}
+
+void ScopedActivity::ChangeAction(uint8_t action) {
+  DCHECK_EQ(0, action & Activity::ACT_CATEGORY_MASK);
+  ChangeTypeAndData(static_cast<Activity::Type>(Activity::ACT_GENERIC | action),
+                    kNullActivityData);
+}
+
+void ScopedActivity::ChangeInfo(int32_t info) {
+  ChangeTypeAndData(Activity::ACT_NULL, ActivityData::ForGeneric(id_, info));
+}
+
+void ScopedActivity::ChangeActionAndInfo(uint8_t action, int32_t info) {
+  DCHECK_EQ(0, action & Activity::ACT_CATEGORY_MASK);
+  ChangeTypeAndData(static_cast<Activity::Type>(Activity::ACT_GENERIC | action),
+                    ActivityData::ForGeneric(id_, info));
+}
+
+ScopedTaskRunActivity::ScopedTaskRunActivity(
+    const void* program_counter,
+    const base::PendingTask& task)
+    : GlobalActivityTracker::ScopedThreadActivity(
+          program_counter,
+          task.posted_from.program_counter(),
+          Activity::ACT_TASK_RUN,
+          ActivityData::ForTask(task.sequence_num),
+          /*lock_allowed=*/true) {}
+
+ScopedLockAcquireActivity::ScopedLockAcquireActivity(
+    const void* program_counter,
+    const base::internal::LockImpl* lock)
+    : GlobalActivityTracker::ScopedThreadActivity(
+          program_counter,
+          nullptr,
+          Activity::ACT_LOCK_ACQUIRE,
+          ActivityData::ForLock(lock),
+          /*lock_allowed=*/false) {}
+
+ScopedEventWaitActivity::ScopedEventWaitActivity(
+    const void* program_counter,
+    const base::WaitableEvent* event)
+    : GlobalActivityTracker::ScopedThreadActivity(
+          program_counter,
+          nullptr,
+          Activity::ACT_EVENT_WAIT,
+          ActivityData::ForEvent(event),
+          /*lock_allowed=*/true) {}
+
+ScopedThreadJoinActivity::ScopedThreadJoinActivity(
+    const void* program_counter,
+    const base::PlatformThreadHandle* thread)
+    : GlobalActivityTracker::ScopedThreadActivity(
+          program_counter,
+          nullptr,
+          Activity::ACT_THREAD_JOIN,
+          ActivityData::ForThread(*thread),
+          /*lock_allowed=*/true) {}
+
+#if !defined(OS_NACL) && !defined(OS_IOS)
+ScopedProcessWaitActivity::ScopedProcessWaitActivity(
+    const void* program_counter,
+    const base::Process* process)
+    : GlobalActivityTracker::ScopedThreadActivity(
+          program_counter,
+          nullptr,
+          Activity::ACT_PROCESS_WAIT,
+          ActivityData::ForProcess(process->Pid()),
+          /*lock_allowed=*/true) {}
+#endif
+
+}  // namespace debug
+}  // namespace base
diff --git a/base/debug/activity_tracker.h b/base/debug/activity_tracker.h
new file mode 100644
index 0000000..bfd9f9d
--- /dev/null
+++ b/base/debug/activity_tracker.h
@@ -0,0 +1,1360 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Activity tracking provides a low-overhead method of collecting information
+// about the state of the application for analysis both while it is running
+// and after it has terminated unexpectedly. Its primary purpose is to help
+// locate reasons the browser becomes unresponsive by providing insight into
+// what all the various threads and processes are (or were) doing.
+
+#ifndef BASE_DEBUG_ACTIVITY_TRACKER_H_
+#define BASE_DEBUG_ACTIVITY_TRACKER_H_
+
+// std::atomic is undesired due to performance issues when used as global
+// variables. There are no such instances here. This module uses the
+// PersistentMemoryAllocator which also uses std::atomic and is written
+// by the same author.
+#include <atomic>
+#include <map>
+#include <memory>
+#include <string>
+#include <vector>
+
+#include "base/atomicops.h"
+#include "base/base_export.h"
+#include "base/callback.h"
+#include "base/compiler_specific.h"
+#include "base/gtest_prod_util.h"
+#include "base/location.h"
+#include "base/memory/shared_memory.h"
+#include "base/metrics/persistent_memory_allocator.h"
+#include "base/process/process_handle.h"
+#include "base/strings/string_piece.h"
+#include "base/strings/utf_string_conversions.h"
+#include "base/task_runner.h"
+#include "base/threading/platform_thread.h"
+#include "base/threading/thread_local_storage.h"
+
+namespace base {
+
+struct PendingTask;
+
+class FilePath;
+class Lock;
+class PlatformThreadHandle;
+class Process;
+class WaitableEvent;
+
+namespace debug {
+
+class ThreadActivityTracker;
+
+
+enum : int {
+  // The maximum number of call-stack addresses stored per activity. This
+  // cannot be changed without also changing the version number of the
+  // structure. See kTypeIdActivityTracker in GlobalActivityTracker.
+  kActivityCallStackSize = 10,
+};
+
+// A class for keeping all information needed to verify that a structure is
+// associated with a given process.
+struct OwningProcess {
+  OwningProcess();
+  ~OwningProcess();
+
+  // Initializes structure with the current process id and the current time.
+  // These can uniquely identify a process. A unique non-zero data_id will be
+  // set making it possible to tell using atomic reads if the data has changed.
+  void Release_Initialize(int64_t pid = 0);
+
+  // Explicitly sets the process ID.
+  void SetOwningProcessIdForTesting(int64_t pid, int64_t stamp);
+
+  // Gets the associated process ID, in native form, and the creation timestamp
+  // from memory without loading the entire structure for analysis. This will
+  // return false if no valid process ID is available.
+  static bool GetOwningProcessId(const void* memory,
+                                 int64_t* out_id,
+                                 int64_t* out_stamp);
+
+  // SHA1(base::debug::OwningProcess): Increment this if structure changes!
+  static constexpr uint32_t kPersistentTypeId = 0xB1179672 + 1;
+
+  // Expected size for 32/64-bit check by PersistentMemoryAllocator.
+  static constexpr size_t kExpectedInstanceSize = 24;
+
+  std::atomic<uint32_t> data_id;
+  uint32_t padding;
+  int64_t process_id;
+  int64_t create_stamp;
+};
+
+// The data associated with an activity is dependent upon the activity type.
+// This union defines all of the various fields. All fields must be explicitly
+// sized types to ensure no interoperability problems between 32-bit and
+// 64-bit systems.
+union ActivityData {
+  // Expected size for 32/64-bit check.
+  // TODO(bcwhite): VC2015 doesn't allow statics in unions. Fix when it does.
+  // static constexpr size_t kExpectedInstanceSize = 8;
+
+  // Generic activities don't have any defined structure.
+  struct {
+    uint32_t id;   // An arbitrary identifier used for association.
+    int32_t info;  // An arbitrary value used for information purposes.
+  } generic;
+  struct {
+    uint64_t sequence_id;  // The sequence identifier of the posted task.
+  } task;
+  struct {
+    uint64_t lock_address;  // The memory address of the lock object.
+  } lock;
+  struct {
+    uint64_t event_address;  // The memory address of the event object.
+  } event;
+  struct {
+    int64_t thread_id;  // A unique identifier for a thread within a process.
+  } thread;
+  struct {
+    int64_t process_id;  // A unique identifier for a process.
+  } process;
+  struct {
+    uint32_t code;  // An "exception code" number.
+  } exception;
+
+  // These methods create an ActivityData object from the appropriate
+  // parameters. Objects of this type should always be created this way to
+  // ensure that no fields remain unpopulated should the set of recorded
+  // fields change. They're defined inline where practical because they
+  // reduce to loading a small local structure with a few values, roughly
+  // the same as loading all those values into parameters.
+
+  static ActivityData ForGeneric(uint32_t id, int32_t info) {
+    ActivityData data;
+    data.generic.id = id;
+    data.generic.info = info;
+    return data;
+  }
+
+  static ActivityData ForTask(uint64_t sequence) {
+    ActivityData data;
+    data.task.sequence_id = sequence;
+    return data;
+  }
+
+  static ActivityData ForLock(const void* lock) {
+    ActivityData data;
+    data.lock.lock_address = reinterpret_cast<uintptr_t>(lock);
+    return data;
+  }
+
+  static ActivityData ForEvent(const void* event) {
+    ActivityData data;
+    data.event.event_address = reinterpret_cast<uintptr_t>(event);
+    return data;
+  }
+
+  static ActivityData ForThread(const PlatformThreadHandle& handle);
+  static ActivityData ForThread(const int64_t id) {
+    ActivityData data;
+    data.thread.thread_id = id;
+    return data;
+  }
+
+  static ActivityData ForProcess(const int64_t id) {
+    ActivityData data;
+    data.process.process_id = id;
+    return data;
+  }
+
+  static ActivityData ForException(const uint32_t code) {
+    ActivityData data;
+    data.exception.code = code;
+    return data;
+  }
+};
+
+// A "null" activity-data that can be passed to indicate "do not change".
+extern const ActivityData kNullActivityData;
+
+
+// A helper class that is used for managing memory allocations within a
+// persistent memory allocator. Instances of this class are NOT thread-safe.
+// Use from a single thread or protect access with a lock.
+class BASE_EXPORT ActivityTrackerMemoryAllocator {
+ public:
+  using Reference = PersistentMemoryAllocator::Reference;
+
+  // Creates a instance for allocating objects of a fixed |object_type|, a
+  // corresponding |object_free| type, and the |object_size|. An internal
+  // cache of the last |cache_size| released references will be kept for
+  // quick future fetches. If |make_iterable| then allocated objects will
+  // be marked "iterable" in the allocator.
+  ActivityTrackerMemoryAllocator(PersistentMemoryAllocator* allocator,
+                                 uint32_t object_type,
+                                 uint32_t object_free_type,
+                                 size_t object_size,
+                                 size_t cache_size,
+                                 bool make_iterable);
+  ~ActivityTrackerMemoryAllocator();
+
+  // Gets a reference to an object of the configured type. This can return
+  // a null reference if it was not possible to allocate the memory.
+  Reference GetObjectReference();
+
+  // Returns an object to the "free" pool.
+  void ReleaseObjectReference(Reference ref);
+
+  // Helper function to access an object allocated using this instance.
+  template <typename T>
+  T* GetAsObject(Reference ref) {
+    return allocator_->GetAsObject<T>(ref);
+  }
+
+  // Similar to GetAsObject() but converts references to arrays of objects.
+  template <typename T>
+  T* GetAsArray(Reference ref, size_t count) {
+    return allocator_->GetAsArray<T>(ref, object_type_, count);
+  }
+
+  // The current "used size" of the internal cache, visible for testing.
+  size_t cache_used() const { return cache_used_; }
+
+ private:
+  PersistentMemoryAllocator* const allocator_;
+  const uint32_t object_type_;
+  const uint32_t object_free_type_;
+  const size_t object_size_;
+  const size_t cache_size_;
+  const bool make_iterable_;
+
+  // An iterator for going through persistent memory looking for free'd objects.
+  PersistentMemoryAllocator::Iterator iterator_;
+
+  // The cache of released object memories.
+  std::unique_ptr<Reference[]> cache_values_;
+  size_t cache_used_;
+
+  DISALLOW_COPY_AND_ASSIGN(ActivityTrackerMemoryAllocator);
+};
+
+
+// This structure is the full contents recorded for every activity pushed
+// onto the stack. The |activity_type| indicates what is actually stored in
+// the |data| field. All fields must be explicitly sized types to ensure no
+// interoperability problems between 32-bit and 64-bit systems.
+struct Activity {
+  // SHA1(base::debug::Activity): Increment this if structure changes!
+  static constexpr uint32_t kPersistentTypeId = 0x99425159 + 1;
+  // Expected size for 32/64-bit check. Update this if structure changes!
+  static constexpr size_t kExpectedInstanceSize =
+      48 + 8 * kActivityCallStackSize;
+
+  // The type of an activity on the stack. Activities are broken into
+  // categories with the category ID taking the top 4 bits and the lower
+  // bits representing an action within that category. This combination
+  // makes it easy to "switch" based on the type during analysis.
+  enum Type : uint8_t {
+    // This "null" constant is used to indicate "do not change" in calls.
+    ACT_NULL = 0,
+
+    // Task activities involve callbacks posted to a thread or thread-pool
+    // using the PostTask() method or any of its friends.
+    ACT_TASK = 1 << 4,
+    ACT_TASK_RUN = ACT_TASK,
+
+    // Lock activities involve the acquisition of "mutex" locks.
+    ACT_LOCK = 2 << 4,
+    ACT_LOCK_ACQUIRE = ACT_LOCK,
+    ACT_LOCK_RELEASE,
+
+    // Event activities involve operations on a WaitableEvent.
+    ACT_EVENT = 3 << 4,
+    ACT_EVENT_WAIT = ACT_EVENT,
+    ACT_EVENT_SIGNAL,
+
+    // Thread activities involve the life management of threads.
+    ACT_THREAD = 4 << 4,
+    ACT_THREAD_START = ACT_THREAD,
+    ACT_THREAD_JOIN,
+
+    // Process activities involve the life management of processes.
+    ACT_PROCESS = 5 << 4,
+    ACT_PROCESS_START = ACT_PROCESS,
+    ACT_PROCESS_WAIT,
+
+    // Exception activities indicate the occurence of something unexpected.
+    ACT_EXCEPTION = 14 << 4,
+
+    // Generic activities are user defined and can be anything.
+    ACT_GENERIC = 15 << 4,
+
+    // These constants can be used to separate the category and action from
+    // a combined activity type.
+    ACT_CATEGORY_MASK = 0xF << 4,
+    ACT_ACTION_MASK = 0xF
+  };
+
+  // Internal representation of time. During collection, this is in "ticks"
+  // but when returned in a snapshot, it is "wall time".
+  int64_t time_internal;
+
+  // The address that pushed the activity onto the stack as a raw number.
+  uint64_t calling_address;
+
+  // The address that is the origin of the activity if it not obvious from
+  // the call stack. This is useful for things like tasks that are posted
+  // from a completely different thread though most activities will leave
+  // it null.
+  uint64_t origin_address;
+
+  // Array of program-counters that make up the top of the call stack.
+  // Despite the fixed size, this list is always null-terminated. Entries
+  // after the terminator have no meaning and may or may not also be null.
+  // The list will be completely empty if call-stack collection is not
+  // enabled.
+  uint64_t call_stack[kActivityCallStackSize];
+
+  // Reference to arbitrary user data within the persistent memory segment
+  // and a unique identifier for it.
+  uint32_t user_data_ref;
+  uint32_t user_data_id;
+
+  // The (enumerated) type of the activity. This defines what fields of the
+  // |data| record are valid.
+  uint8_t activity_type;
+
+  // Padding to ensure that the next member begins on a 64-bit boundary
+  // even on 32-bit builds which ensures inter-operability between CPU
+  // architectures. New fields can be taken from this space.
+  uint8_t padding[7];
+
+  // Information specific to the |activity_type|.
+  ActivityData data;
+
+  static void FillFrom(Activity* activity,
+                       const void* program_counter,
+                       const void* origin,
+                       Type type,
+                       const ActivityData& data);
+};
+
+// This class manages arbitrary user data that can be associated with activities
+// done by a thread by supporting key/value pairs of any type. This can provide
+// additional information during debugging. It is also used to store arbitrary
+// global data. All updates must be done from the same thread though other
+// threads can read it concurrently if they create new objects using the same
+// memory.
+class BASE_EXPORT ActivityUserData {
+ public:
+  // List of known value type. REFERENCE types must immediately follow the non-
+  // external types.
+  enum ValueType : uint8_t {
+    END_OF_VALUES = 0,
+    RAW_VALUE,
+    RAW_VALUE_REFERENCE,
+    STRING_VALUE,
+    STRING_VALUE_REFERENCE,
+    CHAR_VALUE,
+    BOOL_VALUE,
+    SIGNED_VALUE,
+    UNSIGNED_VALUE,
+  };
+
+  class BASE_EXPORT TypedValue {
+   public:
+    TypedValue();
+    TypedValue(const TypedValue& other);
+    ~TypedValue();
+
+    ValueType type() const { return type_; }
+
+    // These methods return the extracted value in the correct format.
+    StringPiece Get() const;
+    StringPiece GetString() const;
+    bool GetBool() const;
+    char GetChar() const;
+    int64_t GetInt() const;
+    uint64_t GetUint() const;
+
+    // These methods return references to process memory as originally provided
+    // to corresponding Set calls. USE WITH CAUTION! There is no guarantee that
+    // the referenced memory is assessible or useful.  It's possible that:
+    //  - the memory was free'd and reallocated for a different purpose
+    //  - the memory has been released back to the OS
+    //  - the memory belongs to a different process's address space
+    // Dereferencing the returned StringPiece when the memory is not accessible
+    // will cause the program to SEGV!
+    StringPiece GetReference() const;
+    StringPiece GetStringReference() const;
+
+   private:
+    friend class ActivityUserData;
+
+    ValueType type_ = END_OF_VALUES;
+    uint64_t short_value_;    // Used to hold copy of numbers, etc.
+    std::string long_value_;  // Used to hold copy of raw/string data.
+    StringPiece ref_value_;   // Used to hold reference to external data.
+  };
+
+  using Snapshot = std::map<std::string, TypedValue>;
+
+  // Initialize the object either as a "sink" that just accepts and discards
+  // data or an active one that writes to a given (zeroed) memory block.
+  ActivityUserData();
+  ActivityUserData(void* memory, size_t size, int64_t pid = 0);
+  virtual ~ActivityUserData();
+
+  // Gets the unique ID number for this user data. If this changes then the
+  // contents have been overwritten by another thread. The return value is
+  // always non-zero unless it's actually just a data "sink".
+  uint32_t id() const {
+    return header_ ? header_->owner.data_id.load(std::memory_order_relaxed) : 0;
+  }
+
+  // Writes a |value| (as part of a key/value pair) that will be included with
+  // the activity in any reports. The same |name| can be written multiple times
+  // with each successive call overwriting the previously stored |value|. For
+  // raw and string values, the maximum size of successive writes is limited by
+  // the first call. The length of "name" is limited to 255 characters.
+  //
+  // This information is stored on a "best effort" basis. It may be dropped if
+  // the memory buffer is full or the associated activity is beyond the maximum
+  // recording depth.
+  void Set(StringPiece name, const void* memory, size_t size) {
+    Set(name, RAW_VALUE, memory, size);
+  }
+  void SetString(StringPiece name, StringPiece value) {
+    Set(name, STRING_VALUE, value.data(), value.length());
+  }
+  void SetString(StringPiece name, StringPiece16 value) {
+    SetString(name, UTF16ToUTF8(value));
+  }
+  void SetBool(StringPiece name, bool value) {
+    char cvalue = value ? 1 : 0;
+    Set(name, BOOL_VALUE, &cvalue, sizeof(cvalue));
+  }
+  void SetChar(StringPiece name, char value) {
+    Set(name, CHAR_VALUE, &value, sizeof(value));
+  }
+  void SetInt(StringPiece name, int64_t value) {
+    Set(name, SIGNED_VALUE, &value, sizeof(value));
+  }
+  void SetUint(StringPiece name, uint64_t value) {
+    Set(name, UNSIGNED_VALUE, &value, sizeof(value));
+  }
+
+  // These function as above but don't actually copy the data into the
+  // persistent memory. They store unaltered pointers along with a size. These
+  // can be used in conjuction with a memory dump to find certain large pieces
+  // of information.
+  void SetReference(StringPiece name, const void* memory, size_t size) {
+    SetReference(name, RAW_VALUE_REFERENCE, memory, size);
+  }
+  void SetStringReference(StringPiece name, StringPiece value) {
+    SetReference(name, STRING_VALUE_REFERENCE, value.data(), value.length());
+  }
+
+  // Creates a snapshot of the key/value pairs contained within. The returned
+  // data will be fixed, independent of whatever changes afterward. There is
+  // some protection against concurrent modification. This will return false
+  // if the data is invalid or if a complete overwrite of the contents is
+  // detected.
+  bool CreateSnapshot(Snapshot* output_snapshot) const;
+
+  // Gets the base memory address used for storing data.
+  const void* GetBaseAddress() const;
+
+  // Explicitly sets the process ID.
+  void SetOwningProcessIdForTesting(int64_t pid, int64_t stamp);
+
+  // Gets the associated process ID, in native form, and the creation timestamp
+  // from tracker memory without loading the entire structure for analysis. This
+  // will return false if no valid process ID is available.
+  static bool GetOwningProcessId(const void* memory,
+                                 int64_t* out_id,
+                                 int64_t* out_stamp);
+
+ protected:
+  virtual void Set(StringPiece name,
+                   ValueType type,
+                   const void* memory,
+                   size_t size);
+
+ private:
+  FRIEND_TEST_ALL_PREFIXES(ActivityTrackerTest, UserDataTest);
+
+  enum : size_t { kMemoryAlignment = sizeof(uint64_t) };
+
+  // A structure that defines the structure header in memory.
+  struct MemoryHeader {
+    MemoryHeader();
+    ~MemoryHeader();
+
+    OwningProcess owner;  // Information about the creating process.
+  };
+
+  // Header to a key/value record held in persistent memory.
+  struct FieldHeader {
+    FieldHeader();
+    ~FieldHeader();
+
+    std::atomic<uint8_t> type;         // Encoded ValueType
+    uint8_t name_size;                 // Length of "name" key.
+    std::atomic<uint16_t> value_size;  // Actual size of of the stored value.
+    uint16_t record_size;              // Total storage of name, value, header.
+  };
+
+  // A structure used to reference data held outside of persistent memory.
+  struct ReferenceRecord {
+    uint64_t address;
+    uint64_t size;
+  };
+
+  // This record is used to hold known value is a map so that they can be
+  // found and overwritten later.
+  struct ValueInfo {
+    ValueInfo();
+    ValueInfo(ValueInfo&&);
+    ~ValueInfo();
+
+    StringPiece name;                 // The "key" of the record.
+    ValueType type;                   // The type of the value.
+    void* memory;                     // Where the "value" is held.
+    std::atomic<uint16_t>* size_ptr;  // Address of the actual size of value.
+    size_t extent;                    // The total storage of the value,
+  };                                  // typically rounded up for alignment.
+
+  void SetReference(StringPiece name,
+                    ValueType type,
+                    const void* memory,
+                    size_t size);
+
+  // Loads any data already in the memory segment. This allows for accessing
+  // records created previously. If this detects that the underlying data has
+  // gone away (cleared by another thread/process), it will invalidate all the
+  // data in this object and turn it into simple "sink" with no values to
+  // return.
+  void ImportExistingData() const;
+
+  // A map of all the values within the memory block, keyed by name for quick
+  // updates of the values. This is "mutable" because it changes on "const"
+  // objects even when the actual data values can't change.
+  mutable std::map<StringPiece, ValueInfo> values_;
+
+  // Information about the memory block in which new data can be stored. These
+  // are "mutable" because they change even on "const" objects that are just
+  // skipping already set values.
+  mutable char* memory_;
+  mutable size_t available_;
+
+  // A pointer to the memory header for this instance.
+  MemoryHeader* const header_;
+
+  // These hold values used when initially creating the object. They are
+  // compared against current header values to check for outside changes.
+  const uint32_t orig_data_id;
+  const int64_t orig_process_id;
+  const int64_t orig_create_stamp;
+
+  DISALLOW_COPY_AND_ASSIGN(ActivityUserData);
+};
+
+// This class manages tracking a stack of activities for a single thread in
+// a persistent manner, implementing a bounded-size stack in a fixed-size
+// memory allocation. In order to support an operational mode where another
+// thread is analyzing this data in real-time, atomic operations are used
+// where necessary to guarantee a consistent view from the outside.
+//
+// This class is not generally used directly but instead managed by the
+// GlobalActivityTracker instance and updated using Scoped*Activity local
+// objects.
+class BASE_EXPORT ThreadActivityTracker {
+ public:
+  using ActivityId = uint32_t;
+
+  // This structure contains all the common information about the thread so
+  // it doesn't have to be repeated in every entry on the stack. It is defined
+  // and used completely within the .cc file.
+  struct Header;
+
+  // This structure holds a copy of all the internal data at the moment the
+  // "snapshot" operation is done. It is disconnected from the live tracker
+  // so that continued operation of the thread will not cause changes here.
+  struct BASE_EXPORT Snapshot {
+    // Explicit constructor/destructor are needed because of complex types
+    // with non-trivial default constructors and destructors.
+    Snapshot();
+    ~Snapshot();
+
+    // The name of the thread as set when it was created. The name may be
+    // truncated due to internal length limitations.
+    std::string thread_name;
+
+    // The timestamp at which this process was created.
+    int64_t create_stamp;
+
+    // The process and thread IDs. These values have no meaning other than
+    // they uniquely identify a running process and a running thread within
+    // that process.  Thread-IDs can be re-used across different processes
+    // and both can be re-used after the process/thread exits.
+    int64_t process_id = 0;
+    int64_t thread_id = 0;
+
+    // The current stack of activities that are underway for this thread. It
+    // is limited in its maximum size with later entries being left off.
+    std::vector<Activity> activity_stack;
+
+    // The current total depth of the activity stack, including those later
+    // entries not recorded in the |activity_stack| vector.
+    uint32_t activity_stack_depth = 0;
+
+    // The last recorded "exception" activity.
+    Activity last_exception;
+  };
+
+  // This is the base class for having the compiler manage an activity on the
+  // tracker's stack. It does nothing but call methods on the passed |tracker|
+  // if it is not null, making it safe (and cheap) to create these objects
+  // even if activity tracking is not enabled.
+  class BASE_EXPORT ScopedActivity {
+   public:
+    ScopedActivity(ThreadActivityTracker* tracker,
+                   const void* program_counter,
+                   const void* origin,
+                   Activity::Type type,
+                   const ActivityData& data);
+    ~ScopedActivity();
+
+    // Changes some basic metadata about the activity.
+    void ChangeTypeAndData(Activity::Type type, const ActivityData& data);
+
+   protected:
+    // The thread tracker to which this object reports. It can be null if
+    // activity tracking is not (yet) enabled.
+    ThreadActivityTracker* const tracker_;
+
+    // An identifier that indicates a specific activity on the stack.
+    ActivityId activity_id_;
+
+   private:
+    DISALLOW_COPY_AND_ASSIGN(ScopedActivity);
+  };
+
+  // A ThreadActivityTracker runs on top of memory that is managed externally.
+  // It must be large enough for the internal header and a few Activity
+  // blocks. See SizeForStackDepth().
+  ThreadActivityTracker(void* base, size_t size);
+  virtual ~ThreadActivityTracker();
+
+  // Indicates that an activity has started from a given |origin| address in
+  // the code, though it can be null if the creator's address is not known.
+  // The |type| and |data| describe the activity. |program_counter| should be
+  // the result of GetProgramCounter() where push is called. Returned is an
+  // ID that can be used to adjust the pushed activity.
+  ActivityId PushActivity(const void* program_counter,
+                          const void* origin,
+                          Activity::Type type,
+                          const ActivityData& data);
+
+  // An inlined version of the above that gets the program counter where it
+  // is called.
+  ALWAYS_INLINE
+  ActivityId PushActivity(const void* origin,
+                          Activity::Type type,
+                          const ActivityData& data) {
+    return PushActivity(GetProgramCounter(), origin, type, data);
+  }
+
+  // Changes the activity |type| and |data| of the top-most entry on the stack.
+  // This is useful if the information has changed and it is desireable to
+  // track that change without creating a new stack entry. If the type is
+  // ACT_NULL or the data is kNullActivityData then that value will remain
+  // unchanged. The type, if changed, must remain in the same category.
+  // Changing both is not atomic so a snapshot operation could occur between
+  // the update of |type| and |data| or between update of |data| fields.
+  void ChangeActivity(ActivityId id,
+                      Activity::Type type,
+                      const ActivityData& data);
+
+  // Indicates that an activity has completed.
+  void PopActivity(ActivityId id);
+
+  // Sets the user-data information for an activity.
+  std::unique_ptr<ActivityUserData> GetUserData(
+      ActivityId id,
+      ActivityTrackerMemoryAllocator* allocator);
+
+  // Returns if there is true use-data associated with a given ActivityId since
+  // it's possible than any returned object is just a sink.
+  bool HasUserData(ActivityId id);
+
+  // Release the user-data information for an activity.
+  void ReleaseUserData(ActivityId id,
+                       ActivityTrackerMemoryAllocator* allocator);
+
+  // Save an exception. |origin| is the location of the exception.
+  void RecordExceptionActivity(const void* program_counter,
+                               const void* origin,
+                               Activity::Type type,
+                               const ActivityData& data);
+
+  // Returns whether the current data is valid or not. It is not valid if
+  // corruption has been detected in the header or other data structures.
+  bool IsValid() const;
+
+  // Gets a copy of the tracker contents for analysis. Returns false if a
+  // snapshot was not possible, perhaps because the data is not valid; the
+  // contents of |output_snapshot| are undefined in that case. The current
+  // implementation does not support concurrent snapshot operations.
+  bool CreateSnapshot(Snapshot* output_snapshot) const;
+
+  // Gets the base memory address used for storing data.
+  const void* GetBaseAddress();
+
+  // Access the "data version" value so tests can determine if an activity
+  // was pushed and popped in a single call.
+  uint32_t GetDataVersionForTesting();
+
+  // Explicitly sets the process ID.
+  void SetOwningProcessIdForTesting(int64_t pid, int64_t stamp);
+
+  // Gets the associated process ID, in native form, and the creation timestamp
+  // from tracker memory without loading the entire structure for analysis. This
+  // will return false if no valid process ID is available.
+  static bool GetOwningProcessId(const void* memory,
+                                 int64_t* out_id,
+                                 int64_t* out_stamp);
+
+  // Calculates the memory size required for a given stack depth, including
+  // the internal header structure for the stack.
+  static size_t SizeForStackDepth(int stack_depth);
+
+ private:
+  friend class ActivityTrackerTest;
+
+  bool CalledOnValidThread();
+
+  std::unique_ptr<ActivityUserData> CreateUserDataForActivity(
+      Activity* activity,
+      ActivityTrackerMemoryAllocator* allocator);
+
+  Header* const header_;        // Pointer to the Header structure.
+  Activity* const stack_;       // The stack of activities.
+
+#if DCHECK_IS_ON()
+  // The ActivityTracker is thread bound, and will be invoked across all the
+  // sequences that run on the thread. A ThreadChecker does not work here, as it
+  // asserts on running in the same sequence each time.
+  const PlatformThreadRef thread_id_;  // The thread this instance is bound to.
+#endif
+  const uint32_t stack_slots_;  // The total number of stack slots.
+
+  bool valid_ = false;          // Tracks whether the data is valid or not.
+
+  DISALLOW_COPY_AND_ASSIGN(ThreadActivityTracker);
+};
+
+
+// The global tracker manages all the individual thread trackers. Memory for
+// the thread trackers is taken from a PersistentMemoryAllocator which allows
+// for the data to be analyzed by a parallel process or even post-mortem.
+class BASE_EXPORT GlobalActivityTracker {
+ public:
+  // Type identifiers used when storing in persistent memory so they can be
+  // identified during extraction; the first 4 bytes of the SHA1 of the name
+  // is used as a unique integer. A "version number" is added to the base
+  // so that, if the structure of that object changes, stored older versions
+  // will be safely ignored. These are public so that an external process
+  // can recognize records of this type within an allocator.
+  enum : uint32_t {
+    kTypeIdActivityTracker = 0x5D7381AF + 4,   // SHA1(ActivityTracker) v4
+    kTypeIdUserDataRecord = 0x615EDDD7 + 3,    // SHA1(UserDataRecord) v3
+    kTypeIdGlobalLogMessage = 0x4CF434F9 + 1,  // SHA1(GlobalLogMessage) v1
+    kTypeIdProcessDataRecord = kTypeIdUserDataRecord + 0x100,
+
+    kTypeIdActivityTrackerFree = ~kTypeIdActivityTracker,
+    kTypeIdUserDataRecordFree = ~kTypeIdUserDataRecord,
+    kTypeIdProcessDataRecordFree = ~kTypeIdProcessDataRecord,
+  };
+
+  // An enumeration of common process life stages. All entries are given an
+  // explicit number so they are known and remain constant; this allows for
+  // cross-version analysis either locally or on a server.
+  enum ProcessPhase : int {
+    // The phases are generic and may have meaning to the tracker.
+    PROCESS_PHASE_UNKNOWN = 0,
+    PROCESS_LAUNCHED = 1,
+    PROCESS_LAUNCH_FAILED = 2,
+    PROCESS_EXITED_CLEANLY = 10,
+    PROCESS_EXITED_WITH_CODE = 11,
+
+    // Add here whatever is useful for analysis.
+    PROCESS_SHUTDOWN_STARTED = 100,
+    PROCESS_MAIN_LOOP_STARTED = 101,
+  };
+
+  // A callback made when a process exits to allow immediate analysis of its
+  // data. Note that the system may reuse the |process_id| so when fetching
+  // records it's important to ensure that what is returned was created before
+  // the |exit_stamp|. Movement of |process_data| information is allowed.
+  using ProcessExitCallback =
+      Callback<void(int64_t process_id,
+                    int64_t exit_stamp,
+                    int exit_code,
+                    ProcessPhase exit_phase,
+                    std::string&& command_line,
+                    ActivityUserData::Snapshot&& process_data)>;
+
+  // This structure contains information about a loaded module, as shown to
+  // users of the tracker.
+  struct BASE_EXPORT ModuleInfo {
+    ModuleInfo();
+    ModuleInfo(ModuleInfo&& rhs);
+    ModuleInfo(const ModuleInfo& rhs);
+    ~ModuleInfo();
+
+    ModuleInfo& operator=(ModuleInfo&& rhs);
+    ModuleInfo& operator=(const ModuleInfo& rhs);
+
+    // Information about where and when the module was loaded/unloaded.
+    bool is_loaded = false;  // Was the last operation a load or unload?
+    uintptr_t address = 0;   // Address of the last load operation.
+    int64_t load_time = 0;   // Time of last change; set automatically.
+
+    // Information about the module itself. These never change no matter how
+    // many times a module may be loaded and unloaded.
+    size_t size = 0;         // The size of the loaded module.
+    uint32_t timestamp = 0;  // Opaque "timestamp" for the module.
+    uint32_t age = 0;        // Opaque "age" for the module.
+    uint8_t identifier[16];  // Opaque identifier (GUID, etc.) for the module.
+    std::string file;        // The full path to the file. (UTF-8)
+    std::string debug_file;  // The full path to the debug file.
+  };
+
+  // This is a thin wrapper around the thread-tracker's ScopedActivity that
+  // allows thread-safe access to data values. It is safe to use even if
+  // activity tracking is not enabled.
+  class BASE_EXPORT ScopedThreadActivity
+      : public ThreadActivityTracker::ScopedActivity {
+   public:
+    ScopedThreadActivity(const void* program_counter,
+                         const void* origin,
+                         Activity::Type type,
+                         const ActivityData& data,
+                         bool lock_allowed);
+    ~ScopedThreadActivity();
+
+    // Returns an object for manipulating user data.
+    ActivityUserData& user_data();
+
+   private:
+    // Gets (or creates) a tracker for the current thread. If locking is not
+    // allowed (because a lock is being tracked which would cause recursion)
+    // then the attempt to create one if none found will be skipped. Once
+    // the tracker for this thread has been created for other reasons, locks
+    // will be tracked. The thread-tracker uses locks.
+    static ThreadActivityTracker* GetOrCreateTracker(bool lock_allowed) {
+      GlobalActivityTracker* global_tracker = Get();
+      if (!global_tracker)
+        return nullptr;
+      if (lock_allowed)
+        return global_tracker->GetOrCreateTrackerForCurrentThread();
+      else
+        return global_tracker->GetTrackerForCurrentThread();
+    }
+
+    // An object that manages additional user data, created only upon request.
+    std::unique_ptr<ActivityUserData> user_data_;
+
+    DISALLOW_COPY_AND_ASSIGN(ScopedThreadActivity);
+  };
+
+  ~GlobalActivityTracker();
+
+  // Creates a global tracker using a given persistent-memory |allocator| and
+  // providing the given |stack_depth| to each thread tracker it manages. The
+  // created object is activated so tracking will begin immediately upon return.
+  // The |process_id| can be zero to get it from the OS but is taken for testing
+  // purposes.
+  static void CreateWithAllocator(
+      std::unique_ptr<PersistentMemoryAllocator> allocator,
+      int stack_depth,
+      int64_t process_id);
+
+#if !defined(OS_NACL)
+  // Like above but internally creates an allocator around a disk file with
+  // the specified |size| at the given |file_path|. Any existing file will be
+  // overwritten. The |id| and |name| are arbitrary and stored in the allocator
+  // for reference by whatever process reads it. Returns true if successful.
+  static bool CreateWithFile(const FilePath& file_path,
+                             size_t size,
+                             uint64_t id,
+                             StringPiece name,
+                             int stack_depth);
+#endif  // !defined(OS_NACL)
+
+  // Like above but internally creates an allocator using local heap memory of
+  // the specified size. This is used primarily for unit tests. The |process_id|
+  // can be zero to get it from the OS but is taken for testing purposes.
+  static bool CreateWithLocalMemory(size_t size,
+                                    uint64_t id,
+                                    StringPiece name,
+                                    int stack_depth,
+                                    int64_t process_id);
+
+  // Like above but internally creates an allocator using a shared-memory
+  // segment. The segment must already be mapped into the local memory space.
+  static bool CreateWithSharedMemory(std::unique_ptr<SharedMemory> shm,
+                                     uint64_t id,
+                                     StringPiece name,
+                                     int stack_depth);
+
+  // Like above but takes a handle to an existing shared memory segment and
+  // maps it before creating the tracker.
+  static bool CreateWithSharedMemoryHandle(const SharedMemoryHandle& handle,
+                                           size_t size,
+                                           uint64_t id,
+                                           StringPiece name,
+                                           int stack_depth);
+
+  // Gets the global activity-tracker or null if none exists.
+  static GlobalActivityTracker* Get() {
+    return reinterpret_cast<GlobalActivityTracker*>(
+        subtle::Acquire_Load(&g_tracker_));
+  }
+
+  // Sets the global activity-tracker for testing purposes.
+  static void SetForTesting(std::unique_ptr<GlobalActivityTracker> tracker);
+
+  // This access to the persistent allocator is only for testing; it extracts
+  // the global tracker completely. All tracked threads must exit before
+  // calling this. Tracking for the current thread will be automatically
+  // stopped.
+  static std::unique_ptr<GlobalActivityTracker> ReleaseForTesting();
+
+  // Convenience method for determining if a global tracker is active.
+  static bool IsEnabled() { return Get() != nullptr; }
+
+  // Gets the persistent-memory-allocator in which data is stored. Callers
+  // can store additional records here to pass more information to the
+  // analysis process.
+  PersistentMemoryAllocator* allocator() { return allocator_.get(); }
+
+  // Gets the thread's activity-tracker if it exists. This is inline for
+  // performance reasons and it uses thread-local-storage (TLS) so that there
+  // is no significant lookup time required to find the one for the calling
+  // thread. Ownership remains with the global tracker.
+  ThreadActivityTracker* GetTrackerForCurrentThread() {
+    return reinterpret_cast<ThreadActivityTracker*>(this_thread_tracker_.Get());
+  }
+
+  // Gets the thread's activity-tracker or creates one if none exists. This
+  // is inline for performance reasons. Ownership remains with the global
+  // tracker.
+  ThreadActivityTracker* GetOrCreateTrackerForCurrentThread() {
+    ThreadActivityTracker* tracker = GetTrackerForCurrentThread();
+    if (tracker)
+      return tracker;
+    return CreateTrackerForCurrentThread();
+  }
+
+  // Creates an activity-tracker for the current thread.
+  ThreadActivityTracker* CreateTrackerForCurrentThread();
+
+  // Releases the activity-tracker for the current thread (for testing only).
+  void ReleaseTrackerForCurrentThreadForTesting();
+
+  // Sets a task-runner that can be used for background work.
+  void SetBackgroundTaskRunner(const scoped_refptr<TaskRunner>& runner);
+
+  // Sets an optional callback to be called when a process exits.
+  void SetProcessExitCallback(ProcessExitCallback callback);
+
+  // Manages process lifetimes. These are called by the process that launched
+  // and reaped the subprocess, not the subprocess itself. If it is expensive
+  // to generate the parameters, Get() the global tracker and call these
+  // conditionally rather than using the static versions.
+  void RecordProcessLaunch(ProcessId process_id,
+                           const FilePath::StringType& cmd);
+  void RecordProcessLaunch(ProcessId process_id,
+                           const FilePath::StringType& exe,
+                           const FilePath::StringType& args);
+  void RecordProcessExit(ProcessId process_id, int exit_code);
+  static void RecordProcessLaunchIfEnabled(ProcessId process_id,
+                                           const FilePath::StringType& cmd) {
+    GlobalActivityTracker* tracker = Get();
+    if (tracker)
+      tracker->RecordProcessLaunch(process_id, cmd);
+  }
+  static void RecordProcessLaunchIfEnabled(ProcessId process_id,
+                                           const FilePath::StringType& exe,
+                                           const FilePath::StringType& args) {
+    GlobalActivityTracker* tracker = Get();
+    if (tracker)
+      tracker->RecordProcessLaunch(process_id, exe, args);
+  }
+  static void RecordProcessExitIfEnabled(ProcessId process_id, int exit_code) {
+    GlobalActivityTracker* tracker = Get();
+    if (tracker)
+      tracker->RecordProcessExit(process_id, exit_code);
+  }
+
+  // Sets the "phase" of the current process, useful for knowing what it was
+  // doing when it last reported.
+  void SetProcessPhase(ProcessPhase phase);
+  static void SetProcessPhaseIfEnabled(ProcessPhase phase) {
+    GlobalActivityTracker* tracker = Get();
+    if (tracker)
+      tracker->SetProcessPhase(phase);
+  }
+
+  // Records a log message. The current implementation does NOT recycle these
+  // only store critical messages such as FATAL ones.
+  void RecordLogMessage(StringPiece message);
+  static void RecordLogMessageIfEnabled(StringPiece message) {
+    GlobalActivityTracker* tracker = Get();
+    if (tracker)
+      tracker->RecordLogMessage(message);
+  }
+
+  // Records a module load/unload event. This is safe to call multiple times
+  // even with the same information.
+  void RecordModuleInfo(const ModuleInfo& info);
+  static void RecordModuleInfoIfEnabled(const ModuleInfo& info) {
+    GlobalActivityTracker* tracker = Get();
+    if (tracker)
+      tracker->RecordModuleInfo(info);
+  }
+
+  // Record field trial information. This call is thread-safe. In addition to
+  // this, construction of a GlobalActivityTracker will cause all existing
+  // active field trials to be fetched and recorded.
+  void RecordFieldTrial(const std::string& trial_name, StringPiece group_name);
+  static void RecordFieldTrialIfEnabled(const std::string& trial_name,
+                                        StringPiece group_name) {
+    GlobalActivityTracker* tracker = Get();
+    if (tracker)
+      tracker->RecordFieldTrial(trial_name, group_name);
+  }
+
+  // Record exception information for the current thread.
+  ALWAYS_INLINE
+  void RecordException(const void* origin, uint32_t code) {
+    return RecordExceptionImpl(GetProgramCounter(), origin, code);
+  }
+  void RecordException(const void* pc, const void* origin, uint32_t code);
+
+  // Marks the tracked data as deleted.
+  void MarkDeleted();
+
+  // Gets the process ID used for tracking. This is typically the same as what
+  // the OS thinks is the current process but can be overridden for testing.
+  int64_t process_id() { return process_id_; }
+
+  // Accesses the process data record for storing arbitrary key/value pairs.
+  // Updates to this are thread-safe.
+  ActivityUserData& process_data() { return process_data_; }
+
+ private:
+  friend class GlobalActivityAnalyzer;
+  friend class ScopedThreadActivity;
+  friend class ActivityTrackerTest;
+
+  enum : int {
+    // The maximum number of threads that can be tracked within a process. If
+    // more than this number run concurrently, tracking of new ones may cease.
+    kMaxThreadCount = 100,
+    kCachedThreadMemories = 10,
+    kCachedUserDataMemories = 10,
+  };
+
+  // A wrapper around ActivityUserData that is thread-safe and thus can be used
+  // in the global scope without the requirement of being called from only one
+  // thread.
+  class ThreadSafeUserData : public ActivityUserData {
+   public:
+    ThreadSafeUserData(void* memory, size_t size, int64_t pid = 0);
+    ~ThreadSafeUserData() override;
+
+   private:
+    void Set(StringPiece name,
+             ValueType type,
+             const void* memory,
+             size_t size) override;
+
+    Lock data_lock_;
+
+    DISALLOW_COPY_AND_ASSIGN(ThreadSafeUserData);
+  };
+
+  // State of a module as stored in persistent memory. This supports a single
+  // loading of a module only. If modules are loaded multiple times at
+  // different addresses, only the last will be recorded and an unload will
+  // not revert to the information of any other addresses.
+  struct BASE_EXPORT ModuleInfoRecord {
+    // SHA1(ModuleInfoRecord): Increment this if structure changes!
+    static constexpr uint32_t kPersistentTypeId = 0x05DB5F41 + 1;
+
+    // Expected size for 32/64-bit check by PersistentMemoryAllocator.
+    static constexpr size_t kExpectedInstanceSize =
+        OwningProcess::kExpectedInstanceSize + 56;
+
+    // The atomic unfortunately makes this a "complex" class on some compilers
+    // and thus requires an out-of-line constructor & destructor even though
+    // they do nothing.
+    ModuleInfoRecord();
+    ~ModuleInfoRecord();
+
+    OwningProcess owner;            // The process that created this record.
+    uint64_t address;               // The base address of the module.
+    uint64_t load_time;             // Time of last load/unload.
+    uint64_t size;                  // The size of the module in bytes.
+    uint32_t timestamp;             // Opaque timestamp of the module.
+    uint32_t age;                   // Opaque "age" associated with the module.
+    uint8_t identifier[16];         // Opaque identifier for the module.
+    std::atomic<uint32_t> changes;  // Number load/unload actions.
+    uint16_t pickle_size;           // The size of the following pickle.
+    uint8_t loaded;                 // Flag if module is loaded or not.
+    char pickle[1];                 // Other strings; may allocate larger.
+
+    // Decodes/encodes storage structure from more generic info structure.
+    bool DecodeTo(GlobalActivityTracker::ModuleInfo* info,
+                  size_t record_size) const;
+    static ModuleInfoRecord* CreateFrom(
+        const GlobalActivityTracker::ModuleInfo& info,
+        PersistentMemoryAllocator* allocator);
+
+    // Updates the core information without changing the encoded strings. This
+    // is useful when a known module changes state (i.e. new load or unload).
+    bool UpdateFrom(const GlobalActivityTracker::ModuleInfo& info);
+
+   private:
+    DISALLOW_COPY_AND_ASSIGN(ModuleInfoRecord);
+  };
+
+  // A thin wrapper around the main thread-tracker that keeps additional
+  // information that the global tracker needs to handle joined threads.
+  class ManagedActivityTracker : public ThreadActivityTracker {
+   public:
+    ManagedActivityTracker(PersistentMemoryAllocator::Reference mem_reference,
+                           void* base,
+                           size_t size);
+    ~ManagedActivityTracker() override;
+
+    // The reference into persistent memory from which the thread-tracker's
+    // memory was created.
+    const PersistentMemoryAllocator::Reference mem_reference_;
+
+    // The physical address used for the thread-tracker's memory.
+    void* const mem_base_;
+
+   private:
+    DISALLOW_COPY_AND_ASSIGN(ManagedActivityTracker);
+  };
+
+  // Creates a global tracker using a given persistent-memory |allocator| and
+  // providing the given |stack_depth| to each thread tracker it manages. The
+  // created object is activated so tracking has already started upon return.
+  // The |process_id| can be zero to get it from the OS but is taken for testing
+  // purposes.
+  GlobalActivityTracker(std::unique_ptr<PersistentMemoryAllocator> allocator,
+                        int stack_depth,
+                        int64_t process_id);
+
+  // Returns the memory used by an activity-tracker managed by this class.
+  // It is called during the destruction of a ManagedActivityTracker object.
+  void ReturnTrackerMemory(ManagedActivityTracker* tracker);
+
+  // Records exception information.
+  void RecordExceptionImpl(const void* pc, const void* origin, uint32_t code);
+
+  // Releases the activity-tracker associcated with thread. It is called
+  // automatically when a thread is joined and thus there is nothing more to
+  // be tracked. |value| is a pointer to a ManagedActivityTracker.
+  static void OnTLSDestroy(void* value);
+
+  // Does process-exit work. This can be run on any thread.
+  void CleanupAfterProcess(int64_t process_id,
+                           int64_t exit_stamp,
+                           int exit_code,
+                           std::string&& command_line);
+
+  // The persistent-memory allocator from which the memory for all trackers
+  // is taken.
+  std::unique_ptr<PersistentMemoryAllocator> allocator_;
+
+  // The size (in bytes) of memory required by a ThreadActivityTracker to
+  // provide the stack-depth requested during construction.
+  const size_t stack_memory_size_;
+
+  // The process-id of the current process. This is kept as a member variable,
+  // defined during initialization, for testing purposes.
+  const int64_t process_id_;
+
+  // The activity tracker for the currently executing thread.
+  ThreadLocalStorage::Slot this_thread_tracker_;
+
+  // The number of thread trackers currently active.
+  std::atomic<int> thread_tracker_count_;
+
+  // A caching memory allocator for thread-tracker objects.
+  ActivityTrackerMemoryAllocator thread_tracker_allocator_;
+  Lock thread_tracker_allocator_lock_;
+
+  // A caching memory allocator for user data attached to activity data.
+  ActivityTrackerMemoryAllocator user_data_allocator_;
+  Lock user_data_allocator_lock_;
+
+  // An object for holding arbitrary key value pairs with thread-safe access.
+  ThreadSafeUserData process_data_;
+
+  // A map of global module information, keyed by module path.
+  std::map<const std::string, ModuleInfoRecord*> modules_;
+  Lock modules_lock_;
+
+  // The active global activity tracker.
+  static subtle::AtomicWord g_tracker_;
+
+  // A lock that is used to protect access to the following fields.
+  Lock global_tracker_lock_;
+
+  // The collection of processes being tracked and their command-lines.
+  std::map<int64_t, std::string> known_processes_;
+
+  // A task-runner that can be used for doing background processing.
+  scoped_refptr<TaskRunner> background_task_runner_;
+
+  // A callback performed when a subprocess exits, including its exit-code
+  // and the phase it was in when that occurred. This will be called via
+  // the |background_task_runner_| if one is set or whatever thread reaped
+  // the process otherwise.
+  ProcessExitCallback process_exit_callback_;
+
+  DISALLOW_COPY_AND_ASSIGN(GlobalActivityTracker);
+};
+
+
+// Record entry in to and out of an arbitrary block of code.
+class BASE_EXPORT ScopedActivity
+    : public GlobalActivityTracker::ScopedThreadActivity {
+ public:
+  // Track activity at the specified FROM_HERE location for an arbitrary
+  // 4-bit |action|, an arbitrary 32-bit |id|, and 32-bits of arbitrary
+  // |info|. None of these values affect operation; they're all purely
+  // for association and analysis. To have unique identifiers across a
+  // diverse code-base, create the number by taking the first 8 characters
+  // of the hash of the activity being tracked.
+  //
+  // For example:
+  //   Tracking method: void MayNeverExit(uint32_t foo) {...}
+  //   echo -n "MayNeverExit" | sha1sum   =>   e44873ccab21e2b71270da24aa1...
+  //
+  //   void MayNeverExit(int32_t foo) {
+  //     base::debug::ScopedActivity track_me(0, 0xE44873CC, foo);
+  //     ...
+  //   }
+  ALWAYS_INLINE
+  ScopedActivity(uint8_t action, uint32_t id, int32_t info)
+      : ScopedActivity(GetProgramCounter(), action, id, info) {}
+  ScopedActivity() : ScopedActivity(0, 0, 0) {}
+
+  // Changes the |action| and/or |info| of this activity on the stack. This
+  // is useful for tracking progress through a function, updating the action
+  // to indicate "milestones" in the block (max 16 milestones: 0-15) or the
+  // info to reflect other changes. Changing both is not atomic so a snapshot
+  // operation could occur between the update of |action| and |info|.
+  void ChangeAction(uint8_t action);
+  void ChangeInfo(int32_t info);
+  void ChangeActionAndInfo(uint8_t action, int32_t info);
+
+ private:
+  // Constructs the object using a passed-in program-counter.
+  ScopedActivity(const void* program_counter,
+                 uint8_t action,
+                 uint32_t id,
+                 int32_t info);
+
+  // A copy of the ID code so it doesn't have to be passed by the caller when
+  // changing the |info| field.
+  uint32_t id_;
+
+  DISALLOW_COPY_AND_ASSIGN(ScopedActivity);
+};
+
+
+// These "scoped" classes provide easy tracking of various blocking actions.
+
+class BASE_EXPORT ScopedTaskRunActivity
+    : public GlobalActivityTracker::ScopedThreadActivity {
+ public:
+  ALWAYS_INLINE
+  explicit ScopedTaskRunActivity(const PendingTask& task)
+      : ScopedTaskRunActivity(GetProgramCounter(), task) {}
+
+ private:
+  ScopedTaskRunActivity(const void* program_counter, const PendingTask& task);
+  DISALLOW_COPY_AND_ASSIGN(ScopedTaskRunActivity);
+};
+
+class BASE_EXPORT ScopedLockAcquireActivity
+    : public GlobalActivityTracker::ScopedThreadActivity {
+ public:
+  ALWAYS_INLINE
+  explicit ScopedLockAcquireActivity(const base::internal::LockImpl* lock)
+      : ScopedLockAcquireActivity(GetProgramCounter(), lock) {}
+
+ private:
+  ScopedLockAcquireActivity(const void* program_counter,
+                            const base::internal::LockImpl* lock);
+  DISALLOW_COPY_AND_ASSIGN(ScopedLockAcquireActivity);
+};
+
+class BASE_EXPORT ScopedEventWaitActivity
+    : public GlobalActivityTracker::ScopedThreadActivity {
+ public:
+  ALWAYS_INLINE
+  explicit ScopedEventWaitActivity(const WaitableEvent* event)
+      : ScopedEventWaitActivity(GetProgramCounter(), event) {}
+
+ private:
+  ScopedEventWaitActivity(const void* program_counter,
+                          const WaitableEvent* event);
+  DISALLOW_COPY_AND_ASSIGN(ScopedEventWaitActivity);
+};
+
+class BASE_EXPORT ScopedThreadJoinActivity
+    : public GlobalActivityTracker::ScopedThreadActivity {
+ public:
+  ALWAYS_INLINE
+  explicit ScopedThreadJoinActivity(const PlatformThreadHandle* thread)
+      : ScopedThreadJoinActivity(GetProgramCounter(), thread) {}
+
+ private:
+  ScopedThreadJoinActivity(const void* program_counter,
+                           const PlatformThreadHandle* thread);
+  DISALLOW_COPY_AND_ASSIGN(ScopedThreadJoinActivity);
+};
+
+// Some systems don't have base::Process
+#if !defined(OS_NACL) && !defined(OS_IOS)
+class BASE_EXPORT ScopedProcessWaitActivity
+    : public GlobalActivityTracker::ScopedThreadActivity {
+ public:
+  ALWAYS_INLINE
+  explicit ScopedProcessWaitActivity(const Process* process)
+      : ScopedProcessWaitActivity(GetProgramCounter(), process) {}
+
+ private:
+  ScopedProcessWaitActivity(const void* program_counter,
+                            const Process* process);
+  DISALLOW_COPY_AND_ASSIGN(ScopedProcessWaitActivity);
+};
+#endif
+
+}  // namespace debug
+}  // namespace base
+
+#endif  // BASE_DEBUG_ACTIVITY_TRACKER_H_
diff --git a/base/debug/activity_tracker_unittest.cc b/base/debug/activity_tracker_unittest.cc
new file mode 100644
index 0000000..e2b61a9
--- /dev/null
+++ b/base/debug/activity_tracker_unittest.cc
@@ -0,0 +1,585 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/debug/activity_tracker.h"
+
+#include <memory>
+
+#include "base/bind.h"
+#include "base/bind_helpers.h"
+#include "base/files/file.h"
+#include "base/files/file_util.h"
+#include "base/files/memory_mapped_file.h"
+#include "base/files/scoped_temp_dir.h"
+#include "base/memory/ptr_util.h"
+#include "base/pending_task.h"
+#include "base/rand_util.h"
+#include "base/synchronization/condition_variable.h"
+#include "base/synchronization/lock.h"
+#include "base/synchronization/spin_wait.h"
+#include "base/threading/platform_thread.h"
+#include "base/threading/simple_thread.h"
+#include "base/time/time.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+namespace debug {
+
+namespace {
+
+class TestActivityTracker : public ThreadActivityTracker {
+ public:
+  TestActivityTracker(std::unique_ptr<char[]> memory, size_t mem_size)
+      : ThreadActivityTracker(memset(memory.get(), 0, mem_size), mem_size),
+        mem_segment_(std::move(memory)) {}
+
+  ~TestActivityTracker() override = default;
+
+ private:
+  std::unique_ptr<char[]> mem_segment_;
+};
+
+}  // namespace
+
+
+class ActivityTrackerTest : public testing::Test {
+ public:
+  const int kMemorySize = 1 << 20;  // 1MiB
+  const int kStackSize  = 1 << 10;  // 1KiB
+
+  using ActivityId = ThreadActivityTracker::ActivityId;
+
+  ActivityTrackerTest() = default;
+
+  ~ActivityTrackerTest() override {
+    GlobalActivityTracker* global_tracker = GlobalActivityTracker::Get();
+    if (global_tracker) {
+      global_tracker->ReleaseTrackerForCurrentThreadForTesting();
+      delete global_tracker;
+    }
+  }
+
+  std::unique_ptr<ThreadActivityTracker> CreateActivityTracker() {
+    std::unique_ptr<char[]> memory(new char[kStackSize]);
+    return std::make_unique<TestActivityTracker>(std::move(memory), kStackSize);
+  }
+
+  size_t GetGlobalActiveTrackerCount() {
+    GlobalActivityTracker* global_tracker = GlobalActivityTracker::Get();
+    if (!global_tracker)
+      return 0;
+    return global_tracker->thread_tracker_count_.load(
+        std::memory_order_relaxed);
+  }
+
+  size_t GetGlobalInactiveTrackerCount() {
+    GlobalActivityTracker* global_tracker = GlobalActivityTracker::Get();
+    if (!global_tracker)
+      return 0;
+    AutoLock autolock(global_tracker->thread_tracker_allocator_lock_);
+    return global_tracker->thread_tracker_allocator_.cache_used();
+  }
+
+  size_t GetGlobalUserDataMemoryCacheUsed() {
+    return GlobalActivityTracker::Get()->user_data_allocator_.cache_used();
+  }
+
+  void HandleProcessExit(int64_t id,
+                         int64_t stamp,
+                         int code,
+                         GlobalActivityTracker::ProcessPhase phase,
+                         std::string&& command,
+                         ActivityUserData::Snapshot&& data) {
+    exit_id_ = id;
+    exit_stamp_ = stamp;
+    exit_code_ = code;
+    exit_phase_ = phase;
+    exit_command_ = std::move(command);
+    exit_data_ = std::move(data);
+  }
+
+  int64_t exit_id_ = 0;
+  int64_t exit_stamp_;
+  int exit_code_;
+  GlobalActivityTracker::ProcessPhase exit_phase_;
+  std::string exit_command_;
+  ActivityUserData::Snapshot exit_data_;
+};
+
+TEST_F(ActivityTrackerTest, UserDataTest) {
+  char buffer[256];
+  memset(buffer, 0, sizeof(buffer));
+  ActivityUserData data(buffer, sizeof(buffer));
+  size_t space = sizeof(buffer) - sizeof(ActivityUserData::MemoryHeader);
+  ASSERT_EQ(space, data.available_);
+
+  data.SetInt("foo", 1);
+  space -= 24;
+  ASSERT_EQ(space, data.available_);
+
+  data.SetUint("b", 1U);  // Small names fit beside header in a word.
+  space -= 16;
+  ASSERT_EQ(space, data.available_);
+
+  data.Set("c", buffer, 10);
+  space -= 24;
+  ASSERT_EQ(space, data.available_);
+
+  data.SetString("dear john", "it's been fun");
+  space -= 32;
+  ASSERT_EQ(space, data.available_);
+
+  data.Set("c", buffer, 20);
+  ASSERT_EQ(space, data.available_);
+
+  data.SetString("dear john", "but we're done together");
+  ASSERT_EQ(space, data.available_);
+
+  data.SetString("dear john", "bye");
+  ASSERT_EQ(space, data.available_);
+
+  data.SetChar("d", 'x');
+  space -= 8;
+  ASSERT_EQ(space, data.available_);
+
+  data.SetBool("ee", true);
+  space -= 16;
+  ASSERT_EQ(space, data.available_);
+
+  data.SetString("f", "");
+  space -= 8;
+  ASSERT_EQ(space, data.available_);
+}
+
+TEST_F(ActivityTrackerTest, PushPopTest) {
+  std::unique_ptr<ThreadActivityTracker> tracker = CreateActivityTracker();
+  ThreadActivityTracker::Snapshot snapshot;
+
+  ASSERT_TRUE(tracker->CreateSnapshot(&snapshot));
+  ASSERT_EQ(0U, snapshot.activity_stack_depth);
+  ASSERT_EQ(0U, snapshot.activity_stack.size());
+
+  char origin1;
+  ActivityId id1 = tracker->PushActivity(&origin1, Activity::ACT_TASK,
+                                         ActivityData::ForTask(11));
+  ASSERT_TRUE(tracker->CreateSnapshot(&snapshot));
+  ASSERT_EQ(1U, snapshot.activity_stack_depth);
+  ASSERT_EQ(1U, snapshot.activity_stack.size());
+  EXPECT_NE(0, snapshot.activity_stack[0].time_internal);
+  EXPECT_EQ(Activity::ACT_TASK, snapshot.activity_stack[0].activity_type);
+  EXPECT_EQ(reinterpret_cast<uintptr_t>(&origin1),
+            snapshot.activity_stack[0].origin_address);
+  EXPECT_EQ(11U, snapshot.activity_stack[0].data.task.sequence_id);
+
+  char origin2;
+  char lock2;
+  ActivityId id2 = tracker->PushActivity(&origin2, Activity::ACT_LOCK,
+                                         ActivityData::ForLock(&lock2));
+  ASSERT_TRUE(tracker->CreateSnapshot(&snapshot));
+  ASSERT_EQ(2U, snapshot.activity_stack_depth);
+  ASSERT_EQ(2U, snapshot.activity_stack.size());
+  EXPECT_LE(snapshot.activity_stack[0].time_internal,
+            snapshot.activity_stack[1].time_internal);
+  EXPECT_EQ(Activity::ACT_LOCK, snapshot.activity_stack[1].activity_type);
+  EXPECT_EQ(reinterpret_cast<uintptr_t>(&origin2),
+            snapshot.activity_stack[1].origin_address);
+  EXPECT_EQ(reinterpret_cast<uintptr_t>(&lock2),
+            snapshot.activity_stack[1].data.lock.lock_address);
+
+  tracker->PopActivity(id2);
+  ASSERT_TRUE(tracker->CreateSnapshot(&snapshot));
+  ASSERT_EQ(1U, snapshot.activity_stack_depth);
+  ASSERT_EQ(1U, snapshot.activity_stack.size());
+  EXPECT_EQ(Activity::ACT_TASK, snapshot.activity_stack[0].activity_type);
+  EXPECT_EQ(reinterpret_cast<uintptr_t>(&origin1),
+            snapshot.activity_stack[0].origin_address);
+  EXPECT_EQ(11U, snapshot.activity_stack[0].data.task.sequence_id);
+
+  tracker->PopActivity(id1);
+  ASSERT_TRUE(tracker->CreateSnapshot(&snapshot));
+  ASSERT_EQ(0U, snapshot.activity_stack_depth);
+  ASSERT_EQ(0U, snapshot.activity_stack.size());
+}
+
+TEST_F(ActivityTrackerTest, ScopedTaskTest) {
+  GlobalActivityTracker::CreateWithLocalMemory(kMemorySize, 0, "", 3, 0);
+
+  ThreadActivityTracker* tracker =
+      GlobalActivityTracker::Get()->GetOrCreateTrackerForCurrentThread();
+  ThreadActivityTracker::Snapshot snapshot;
+  ASSERT_EQ(0U, GetGlobalUserDataMemoryCacheUsed());
+
+  ASSERT_TRUE(tracker->CreateSnapshot(&snapshot));
+  ASSERT_EQ(0U, snapshot.activity_stack_depth);
+  ASSERT_EQ(0U, snapshot.activity_stack.size());
+
+  {
+    PendingTask task1(FROM_HERE, DoNothing());
+    ScopedTaskRunActivity activity1(task1);
+    ActivityUserData& user_data1 = activity1.user_data();
+    (void)user_data1;  // Tell compiler it's been used.
+
+    ASSERT_TRUE(tracker->CreateSnapshot(&snapshot));
+    ASSERT_EQ(1U, snapshot.activity_stack_depth);
+    ASSERT_EQ(1U, snapshot.activity_stack.size());
+    EXPECT_EQ(Activity::ACT_TASK, snapshot.activity_stack[0].activity_type);
+
+    {
+      PendingTask task2(FROM_HERE, DoNothing());
+      ScopedTaskRunActivity activity2(task2);
+      ActivityUserData& user_data2 = activity2.user_data();
+      (void)user_data2;  // Tell compiler it's been used.
+
+      ASSERT_TRUE(tracker->CreateSnapshot(&snapshot));
+      ASSERT_EQ(2U, snapshot.activity_stack_depth);
+      ASSERT_EQ(2U, snapshot.activity_stack.size());
+      EXPECT_EQ(Activity::ACT_TASK, snapshot.activity_stack[1].activity_type);
+    }
+
+    ASSERT_TRUE(tracker->CreateSnapshot(&snapshot));
+    ASSERT_EQ(1U, snapshot.activity_stack_depth);
+    ASSERT_EQ(1U, snapshot.activity_stack.size());
+    EXPECT_EQ(Activity::ACT_TASK, snapshot.activity_stack[0].activity_type);
+  }
+
+  ASSERT_TRUE(tracker->CreateSnapshot(&snapshot));
+  ASSERT_EQ(0U, snapshot.activity_stack_depth);
+  ASSERT_EQ(0U, snapshot.activity_stack.size());
+  ASSERT_EQ(2U, GetGlobalUserDataMemoryCacheUsed());
+}
+
+namespace {
+
+class SimpleLockThread : public SimpleThread {
+ public:
+  SimpleLockThread(const std::string& name, Lock* lock)
+      : SimpleThread(name, Options()),
+        lock_(lock),
+        data_changed_(false),
+        is_running_(false) {}
+
+  ~SimpleLockThread() override = default;
+
+  void Run() override {
+    ThreadActivityTracker* tracker =
+        GlobalActivityTracker::Get()->GetOrCreateTrackerForCurrentThread();
+    uint32_t pre_version = tracker->GetDataVersionForTesting();
+
+    is_running_.store(true, std::memory_order_relaxed);
+    lock_->Acquire();
+    data_changed_ = tracker->GetDataVersionForTesting() != pre_version;
+    lock_->Release();
+    is_running_.store(false, std::memory_order_relaxed);
+  }
+
+  bool IsRunning() { return is_running_.load(std::memory_order_relaxed); }
+
+  bool WasDataChanged() { return data_changed_; };
+
+ private:
+  Lock* lock_;
+  bool data_changed_;
+  std::atomic<bool> is_running_;
+
+  DISALLOW_COPY_AND_ASSIGN(SimpleLockThread);
+};
+
+}  // namespace
+
+TEST_F(ActivityTrackerTest, LockTest) {
+  GlobalActivityTracker::CreateWithLocalMemory(kMemorySize, 0, "", 3, 0);
+
+  ThreadActivityTracker* tracker =
+      GlobalActivityTracker::Get()->GetOrCreateTrackerForCurrentThread();
+  ThreadActivityTracker::Snapshot snapshot;
+  ASSERT_EQ(0U, GetGlobalUserDataMemoryCacheUsed());
+
+  Lock lock;
+  uint32_t pre_version = tracker->GetDataVersionForTesting();
+
+  // Check no activity when only "trying" a lock.
+  EXPECT_TRUE(lock.Try());
+  EXPECT_EQ(pre_version, tracker->GetDataVersionForTesting());
+  lock.Release();
+  EXPECT_EQ(pre_version, tracker->GetDataVersionForTesting());
+
+  // Check no activity when acquiring a free lock.
+  SimpleLockThread t1("locker1", &lock);
+  t1.Start();
+  t1.Join();
+  EXPECT_FALSE(t1.WasDataChanged());
+
+  // Check that activity is recorded when acquring a busy lock.
+  SimpleLockThread t2("locker2", &lock);
+  lock.Acquire();
+  t2.Start();
+  while (!t2.IsRunning())
+    PlatformThread::Sleep(TimeDelta::FromMilliseconds(10));
+  // t2 can't join until the lock is released but have to give time for t2 to
+  // actually block on the lock before releasing it or the results will not
+  // be correct.
+  PlatformThread::Sleep(TimeDelta::FromMilliseconds(200));
+  lock.Release();
+  // Now the results will be valid.
+  t2.Join();
+  EXPECT_TRUE(t2.WasDataChanged());
+}
+
+TEST_F(ActivityTrackerTest, ExceptionTest) {
+  GlobalActivityTracker::CreateWithLocalMemory(kMemorySize, 0, "", 3, 0);
+  GlobalActivityTracker* global = GlobalActivityTracker::Get();
+
+  ThreadActivityTracker* tracker =
+      GlobalActivityTracker::Get()->GetOrCreateTrackerForCurrentThread();
+  ThreadActivityTracker::Snapshot snapshot;
+  ASSERT_EQ(0U, GetGlobalUserDataMemoryCacheUsed());
+
+  ASSERT_TRUE(tracker->CreateSnapshot(&snapshot));
+  ASSERT_EQ(0U, snapshot.last_exception.activity_type);
+
+  char origin;
+  global->RecordException(&origin, 42);
+
+  ASSERT_TRUE(tracker->CreateSnapshot(&snapshot));
+  EXPECT_EQ(Activity::ACT_EXCEPTION, snapshot.last_exception.activity_type);
+  EXPECT_EQ(reinterpret_cast<uintptr_t>(&origin),
+            snapshot.last_exception.origin_address);
+  EXPECT_EQ(42U, snapshot.last_exception.data.exception.code);
+}
+
+TEST_F(ActivityTrackerTest, CreateWithFileTest) {
+  const char temp_name[] = "CreateWithFileTest";
+  ScopedTempDir temp_dir;
+  ASSERT_TRUE(temp_dir.CreateUniqueTempDir());
+  FilePath temp_file = temp_dir.GetPath().AppendASCII(temp_name);
+  const size_t temp_size = 64 << 10;  // 64 KiB
+
+  // Create a global tracker on a new file.
+  ASSERT_FALSE(PathExists(temp_file));
+  GlobalActivityTracker::CreateWithFile(temp_file, temp_size, 0, "foo", 3);
+  GlobalActivityTracker* global = GlobalActivityTracker::Get();
+  EXPECT_EQ(std::string("foo"), global->allocator()->Name());
+  global->ReleaseTrackerForCurrentThreadForTesting();
+  delete global;
+
+  // Create a global tracker over an existing file, replacing it. If the
+  // replacement doesn't work, the name will remain as it was first created.
+  ASSERT_TRUE(PathExists(temp_file));
+  GlobalActivityTracker::CreateWithFile(temp_file, temp_size, 0, "bar", 3);
+  global = GlobalActivityTracker::Get();
+  EXPECT_EQ(std::string("bar"), global->allocator()->Name());
+  global->ReleaseTrackerForCurrentThreadForTesting();
+  delete global;
+}
+
+
+// GlobalActivityTracker tests below.
+
+TEST_F(ActivityTrackerTest, BasicTest) {
+  GlobalActivityTracker::CreateWithLocalMemory(kMemorySize, 0, "", 3, 0);
+  GlobalActivityTracker* global = GlobalActivityTracker::Get();
+
+  // Ensure the data repositories have backing store, indicated by non-zero ID.
+  EXPECT_NE(0U, global->process_data().id());
+}
+
+namespace {
+
+class SimpleActivityThread : public SimpleThread {
+ public:
+  SimpleActivityThread(const std::string& name,
+                       const void* origin,
+                       Activity::Type activity,
+                       const ActivityData& data)
+      : SimpleThread(name, Options()),
+        origin_(origin),
+        activity_(activity),
+        data_(data),
+        ready_(false),
+        exit_(false),
+        exit_condition_(&lock_) {}
+
+  ~SimpleActivityThread() override = default;
+
+  void Run() override {
+    ThreadActivityTracker::ActivityId id =
+        GlobalActivityTracker::Get()
+            ->GetOrCreateTrackerForCurrentThread()
+            ->PushActivity(origin_, activity_, data_);
+
+    {
+      AutoLock auto_lock(lock_);
+      ready_.store(true, std::memory_order_release);
+      while (!exit_.load(std::memory_order_relaxed))
+        exit_condition_.Wait();
+    }
+
+    GlobalActivityTracker::Get()->GetTrackerForCurrentThread()->PopActivity(id);
+  }
+
+  void Exit() {
+    AutoLock auto_lock(lock_);
+    exit_.store(true, std::memory_order_relaxed);
+    exit_condition_.Signal();
+  }
+
+  void WaitReady() {
+    SPIN_FOR_1_SECOND_OR_UNTIL_TRUE(ready_.load(std::memory_order_acquire));
+  }
+
+ private:
+  const void* origin_;
+  Activity::Type activity_;
+  ActivityData data_;
+
+  std::atomic<bool> ready_;
+  std::atomic<bool> exit_;
+  Lock lock_;
+  ConditionVariable exit_condition_;
+
+  DISALLOW_COPY_AND_ASSIGN(SimpleActivityThread);
+};
+
+}  // namespace
+
+TEST_F(ActivityTrackerTest, ThreadDeathTest) {
+  GlobalActivityTracker::CreateWithLocalMemory(kMemorySize, 0, "", 3, 0);
+  GlobalActivityTracker::Get()->GetOrCreateTrackerForCurrentThread();
+  const size_t starting_active = GetGlobalActiveTrackerCount();
+  const size_t starting_inactive = GetGlobalInactiveTrackerCount();
+
+  SimpleActivityThread t1("t1", nullptr, Activity::ACT_TASK,
+                          ActivityData::ForTask(11));
+  t1.Start();
+  t1.WaitReady();
+  EXPECT_EQ(starting_active + 1, GetGlobalActiveTrackerCount());
+  EXPECT_EQ(starting_inactive, GetGlobalInactiveTrackerCount());
+
+  t1.Exit();
+  t1.Join();
+  EXPECT_EQ(starting_active, GetGlobalActiveTrackerCount());
+  EXPECT_EQ(starting_inactive + 1, GetGlobalInactiveTrackerCount());
+
+  // Start another thread and ensure it re-uses the existing memory.
+
+  SimpleActivityThread t2("t2", nullptr, Activity::ACT_TASK,
+                          ActivityData::ForTask(22));
+  t2.Start();
+  t2.WaitReady();
+  EXPECT_EQ(starting_active + 1, GetGlobalActiveTrackerCount());
+  EXPECT_EQ(starting_inactive, GetGlobalInactiveTrackerCount());
+
+  t2.Exit();
+  t2.Join();
+  EXPECT_EQ(starting_active, GetGlobalActiveTrackerCount());
+  EXPECT_EQ(starting_inactive + 1, GetGlobalInactiveTrackerCount());
+}
+
+TEST_F(ActivityTrackerTest, ProcessDeathTest) {
+  // This doesn't actually create and destroy a process. Instead, it uses for-
+  // testing interfaces to simulate data created by other processes.
+  const int64_t other_process_id = GetCurrentProcId() + 1;
+
+  GlobalActivityTracker::CreateWithLocalMemory(kMemorySize, 0, "", 3, 0);
+  GlobalActivityTracker* global = GlobalActivityTracker::Get();
+  ThreadActivityTracker* thread = global->GetOrCreateTrackerForCurrentThread();
+
+  // Get callbacks for process exit.
+  global->SetProcessExitCallback(
+      Bind(&ActivityTrackerTest::HandleProcessExit, Unretained(this)));
+
+  // Pretend than another process has started.
+  global->RecordProcessLaunch(other_process_id, FILE_PATH_LITERAL("foo --bar"));
+
+  // Do some activities.
+  PendingTask task(FROM_HERE, DoNothing());
+  ScopedTaskRunActivity activity(task);
+  ActivityUserData& user_data = activity.user_data();
+  ASSERT_NE(0U, user_data.id());
+
+  // Get the memory-allocator references to that data.
+  PersistentMemoryAllocator::Reference proc_data_ref =
+      global->allocator()->GetAsReference(
+          global->process_data().GetBaseAddress(),
+          GlobalActivityTracker::kTypeIdProcessDataRecord);
+  ASSERT_TRUE(proc_data_ref);
+  PersistentMemoryAllocator::Reference tracker_ref =
+      global->allocator()->GetAsReference(
+          thread->GetBaseAddress(),
+          GlobalActivityTracker::kTypeIdActivityTracker);
+  ASSERT_TRUE(tracker_ref);
+  PersistentMemoryAllocator::Reference user_data_ref =
+      global->allocator()->GetAsReference(
+          user_data.GetBaseAddress(),
+          GlobalActivityTracker::kTypeIdUserDataRecord);
+  ASSERT_TRUE(user_data_ref);
+
+  // Make a copy of the thread-tracker state so it can be restored later.
+  const size_t tracker_size = global->allocator()->GetAllocSize(tracker_ref);
+  std::unique_ptr<char[]> tracker_copy(new char[tracker_size]);
+  memcpy(tracker_copy.get(), thread->GetBaseAddress(), tracker_size);
+
+  // Change the objects to appear to be owned by another process. Use a "past"
+  // time so that exit-time is always later than create-time.
+  const int64_t past_stamp = Time::Now().ToInternalValue() - 1;
+  int64_t owning_id;
+  int64_t stamp;
+  ASSERT_TRUE(ActivityUserData::GetOwningProcessId(
+      global->process_data().GetBaseAddress(), &owning_id, &stamp));
+  EXPECT_NE(other_process_id, owning_id);
+  ASSERT_TRUE(ThreadActivityTracker::GetOwningProcessId(
+      thread->GetBaseAddress(), &owning_id, &stamp));
+  EXPECT_NE(other_process_id, owning_id);
+  ASSERT_TRUE(ActivityUserData::GetOwningProcessId(user_data.GetBaseAddress(),
+                                                   &owning_id, &stamp));
+  EXPECT_NE(other_process_id, owning_id);
+  global->process_data().SetOwningProcessIdForTesting(other_process_id,
+                                                      past_stamp);
+  thread->SetOwningProcessIdForTesting(other_process_id, past_stamp);
+  user_data.SetOwningProcessIdForTesting(other_process_id, past_stamp);
+  ASSERT_TRUE(ActivityUserData::GetOwningProcessId(
+      global->process_data().GetBaseAddress(), &owning_id, &stamp));
+  EXPECT_EQ(other_process_id, owning_id);
+  ASSERT_TRUE(ThreadActivityTracker::GetOwningProcessId(
+      thread->GetBaseAddress(), &owning_id, &stamp));
+  EXPECT_EQ(other_process_id, owning_id);
+  ASSERT_TRUE(ActivityUserData::GetOwningProcessId(user_data.GetBaseAddress(),
+                                                   &owning_id, &stamp));
+  EXPECT_EQ(other_process_id, owning_id);
+
+  // Check that process exit will perform callback and free the allocations.
+  ASSERT_EQ(0, exit_id_);
+  ASSERT_EQ(GlobalActivityTracker::kTypeIdProcessDataRecord,
+            global->allocator()->GetType(proc_data_ref));
+  ASSERT_EQ(GlobalActivityTracker::kTypeIdActivityTracker,
+            global->allocator()->GetType(tracker_ref));
+  ASSERT_EQ(GlobalActivityTracker::kTypeIdUserDataRecord,
+            global->allocator()->GetType(user_data_ref));
+  global->RecordProcessExit(other_process_id, 0);
+  EXPECT_EQ(other_process_id, exit_id_);
+  EXPECT_EQ("foo --bar", exit_command_);
+  EXPECT_EQ(GlobalActivityTracker::kTypeIdProcessDataRecordFree,
+            global->allocator()->GetType(proc_data_ref));
+  EXPECT_EQ(GlobalActivityTracker::kTypeIdActivityTrackerFree,
+            global->allocator()->GetType(tracker_ref));
+  EXPECT_EQ(GlobalActivityTracker::kTypeIdUserDataRecordFree,
+            global->allocator()->GetType(user_data_ref));
+
+  // Restore memory contents and types so things don't crash when doing real
+  // process clean-up.
+  memcpy(const_cast<void*>(thread->GetBaseAddress()), tracker_copy.get(),
+         tracker_size);
+  global->allocator()->ChangeType(
+      proc_data_ref, GlobalActivityTracker::kTypeIdProcessDataRecord,
+      GlobalActivityTracker::kTypeIdUserDataRecordFree, false);
+  global->allocator()->ChangeType(
+      tracker_ref, GlobalActivityTracker::kTypeIdActivityTracker,
+      GlobalActivityTracker::kTypeIdActivityTrackerFree, false);
+  global->allocator()->ChangeType(
+      user_data_ref, GlobalActivityTracker::kTypeIdUserDataRecord,
+      GlobalActivityTracker::kTypeIdUserDataRecordFree, false);
+}
+
+}  // namespace debug
+}  // namespace base
diff --git a/base/debug/alias.cc b/base/debug/alias.cc
new file mode 100644
index 0000000..6b0caaa
--- /dev/null
+++ b/base/debug/alias.cc
@@ -0,0 +1,23 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/debug/alias.h"
+#include "build/build_config.h"
+
+namespace base {
+namespace debug {
+
+#if defined(COMPILER_MSVC)
+#pragma optimize("", off)
+#endif
+
+void Alias(const void* var) {
+}
+
+#if defined(COMPILER_MSVC)
+#pragma optimize("", on)
+#endif
+
+}  // namespace debug
+}  // namespace base
diff --git a/base/debug/alias.h b/base/debug/alias.h
new file mode 100644
index 0000000..128fdaa
--- /dev/null
+++ b/base/debug/alias.h
@@ -0,0 +1,43 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_DEBUG_ALIAS_H_
+#define BASE_DEBUG_ALIAS_H_
+
+#include "base/base_export.h"
+#include "base/strings/string_util.h"
+
+namespace base {
+namespace debug {
+
+// Make the optimizer think that var is aliased. This is to prevent it from
+// optimizing out local variables that would not otherwise be live at the point
+// of a potential crash.
+// base::debug::Alias should only be used for local variables, not globals,
+// object members, or function return values - these must be copied to locals if
+// you want to ensure they are recorded in crash dumps.
+// Note that if the local variable is a pointer then its value will be retained
+// but the memory that it points to will probably not be saved in the crash
+// dump - by default only stack memory is saved. Therefore the aliasing
+// technique is usually only worthwhile with non-pointer variables. If you have
+// a pointer to an object and you want to retain the object's state you need to
+// copy the object or its fields to local variables. Example usage:
+//   int last_error = err_;
+//   base::debug::Alias(&last_error);
+//   DEBUG_ALIAS_FOR_CSTR(name_copy, p->name, 16);
+//   CHECK(false);
+void BASE_EXPORT Alias(const void* var);
+
+}  // namespace debug
+}  // namespace base
+
+// Convenience macro that copies the null-terminated string from |c_str| into a
+// stack-allocated char array named |var_name| that holds up to |char_count|
+// characters and should be preserved in memory dumps.
+#define DEBUG_ALIAS_FOR_CSTR(var_name, c_str, char_count)  \
+  char var_name[char_count];                               \
+  ::base::strlcpy(var_name, (c_str), arraysize(var_name)); \
+  ::base::debug::Alias(var_name);
+
+#endif  // BASE_DEBUG_ALIAS_H_
diff --git a/base/debug/alias_unittest.cc b/base/debug/alias_unittest.cc
new file mode 100644
index 0000000..66682f1
--- /dev/null
+++ b/base/debug/alias_unittest.cc
@@ -0,0 +1,28 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <memory>
+#include <string>
+
+#include "base/debug/alias.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+TEST(DebugAlias, Test) {
+  std::unique_ptr<std::string> input =
+      std::make_unique<std::string>("string contents");
+
+  // Verify the contents get copied + the new local variable has the right type.
+  DEBUG_ALIAS_FOR_CSTR(copy1, input->c_str(), 100 /* > input->size() */);
+  static_assert(sizeof(copy1) == 100,
+                "Verification that copy1 has expected size");
+  EXPECT_STREQ("string contents", copy1);
+
+  // Verify that the copy is properly null-terminated even when it is smaller
+  // than the input string.
+  DEBUG_ALIAS_FOR_CSTR(copy2, input->c_str(), 3 /* < input->size() */);
+  static_assert(sizeof(copy2) == 3,
+                "Verification that copy2 has expected size");
+  EXPECT_STREQ("st", copy2);
+  EXPECT_EQ('\0', copy2[2]);
+}
diff --git a/base/debug/asan_invalid_access.cc b/base/debug/asan_invalid_access.cc
new file mode 100644
index 0000000..07c19db
--- /dev/null
+++ b/base/debug/asan_invalid_access.cc
@@ -0,0 +1,101 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/debug/asan_invalid_access.h"
+
+#include <stddef.h>
+
+#include <memory>
+
+#include "base/debug/alias.h"
+#include "base/logging.h"
+#include "build/build_config.h"
+
+#if defined(OS_WIN)
+#include <windows.h>
+#endif
+
+namespace base {
+namespace debug {
+
+namespace {
+
+#if defined(OS_WIN) && defined(ADDRESS_SANITIZER)
+// Corrupt a memory block and make sure that the corruption gets detected either
+// when we free it or when another crash happens (if |induce_crash| is set to
+// true).
+NOINLINE void CorruptMemoryBlock(bool induce_crash) {
+  // NOTE(sebmarchand): We intentionally corrupt a memory block here in order to
+  //     trigger an Address Sanitizer (ASAN) error report.
+  static const int kArraySize = 5;
+  LONG* array = new LONG[kArraySize];
+
+  // Explicitly call out to a kernel32 function to perform the memory access.
+  // This way the underflow won't be detected but the corruption will (as the
+  // allocator will still be hooked).
+  auto InterlockedIncrementFn =
+      reinterpret_cast<LONG (*)(LONG volatile * addend)>(
+          GetProcAddress(GetModuleHandle(L"kernel32"), "InterlockedIncrement"));
+  CHECK(InterlockedIncrementFn);
+
+  LONG volatile dummy = InterlockedIncrementFn(array - 1);
+  base::debug::Alias(const_cast<LONG*>(&dummy));
+
+  if (induce_crash)
+    CHECK(false);
+  delete[] array;
+}
+#endif  // OS_WIN && ADDRESS_SANITIZER
+
+}  // namespace
+
+#if defined(ADDRESS_SANITIZER)
+// NOTE(sebmarchand): We intentionally perform some invalid heap access here in
+//     order to trigger an AddressSanitizer (ASan) error report.
+
+static const size_t kArraySize = 5;
+
+void AsanHeapOverflow() {
+  // Declares the array as volatile to make sure it doesn't get optimized away.
+  std::unique_ptr<volatile int[]> array(
+      const_cast<volatile int*>(new int[kArraySize]));
+  int dummy = array[kArraySize];
+  base::debug::Alias(&dummy);
+}
+
+void AsanHeapUnderflow() {
+  // Declares the array as volatile to make sure it doesn't get optimized away.
+  std::unique_ptr<volatile int[]> array(
+      const_cast<volatile int*>(new int[kArraySize]));
+  // We need to store the underflow address in a temporary variable as trying to
+  // access array[-1] will trigger a warning C4245: "conversion from 'int' to
+  // 'size_t', signed/unsigned mismatch".
+  volatile int* underflow_address = &array[0] - 1;
+  int dummy = *underflow_address;
+  base::debug::Alias(&dummy);
+}
+
+void AsanHeapUseAfterFree() {
+  // Declares the array as volatile to make sure it doesn't get optimized away.
+  std::unique_ptr<volatile int[]> array(
+      const_cast<volatile int*>(new int[kArraySize]));
+  volatile int* dangling = array.get();
+  array.reset();
+  int dummy = dangling[kArraySize / 2];
+  base::debug::Alias(&dummy);
+}
+
+#if defined(OS_WIN)
+void AsanCorruptHeapBlock() {
+  CorruptMemoryBlock(false);
+}
+
+void AsanCorruptHeap() {
+  CorruptMemoryBlock(true);
+}
+#endif  // OS_WIN
+#endif  // ADDRESS_SANITIZER
+
+}  // namespace debug
+}  // namespace base
diff --git a/base/debug/asan_invalid_access.h b/base/debug/asan_invalid_access.h
new file mode 100644
index 0000000..dc9a7ee
--- /dev/null
+++ b/base/debug/asan_invalid_access.h
@@ -0,0 +1,46 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Defines some functions that intentionally do an invalid memory access in
+// order to trigger an AddressSanitizer (ASan) error report.
+
+#ifndef BASE_DEBUG_ASAN_INVALID_ACCESS_H_
+#define BASE_DEBUG_ASAN_INVALID_ACCESS_H_
+
+#include "base/base_export.h"
+#include "base/compiler_specific.h"
+#include "build/build_config.h"
+
+namespace base {
+namespace debug {
+
+#if defined(ADDRESS_SANITIZER)
+
+// Generates an heap buffer overflow.
+BASE_EXPORT NOINLINE void AsanHeapOverflow();
+
+// Generates an heap buffer underflow.
+BASE_EXPORT NOINLINE void AsanHeapUnderflow();
+
+// Generates an use after free.
+BASE_EXPORT NOINLINE void AsanHeapUseAfterFree();
+
+// The "corrupt-block" and "corrupt-heap" classes of bugs is specific to
+// Windows.
+#if defined(OS_WIN)
+// Corrupts a memory block and makes sure that the corruption gets detected when
+// we try to free this block.
+BASE_EXPORT NOINLINE void AsanCorruptHeapBlock();
+
+// Corrupts the heap and makes sure that the corruption gets detected when a
+// crash occur.
+BASE_EXPORT NOINLINE void AsanCorruptHeap();
+
+#endif  // OS_WIN
+#endif  // ADDRESS_SANITIZER
+
+}  // namespace debug
+}  // namespace base
+
+#endif  // BASE_DEBUG_ASAN_INVALID_ACCESS_H_
diff --git a/base/debug/close_handle_hook_win.cc b/base/debug/close_handle_hook_win.cc
new file mode 100644
index 0000000..1f1f432
--- /dev/null
+++ b/base/debug/close_handle_hook_win.cc
@@ -0,0 +1,263 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/debug/close_handle_hook_win.h"
+
+#include <Windows.h>
+#include <psapi.h>
+#include <stddef.h>
+
+#include <algorithm>
+#include <memory>
+#include <vector>
+
+#include "base/macros.h"
+#include "base/win/iat_patch_function.h"
+#include "base/win/pe_image.h"
+#include "base/win/scoped_handle.h"
+#include "build/build_config.h"
+
+namespace {
+
+typedef BOOL (WINAPI* CloseHandleType) (HANDLE handle);
+
+typedef BOOL (WINAPI* DuplicateHandleType)(HANDLE source_process,
+                                           HANDLE source_handle,
+                                           HANDLE target_process,
+                                           HANDLE* target_handle,
+                                           DWORD desired_access,
+                                           BOOL inherit_handle,
+                                           DWORD options);
+
+CloseHandleType g_close_function = NULL;
+DuplicateHandleType g_duplicate_function = NULL;
+
+// The entry point for CloseHandle interception. This function notifies the
+// verifier about the handle that is being closed, and calls the original
+// function.
+BOOL WINAPI CloseHandleHook(HANDLE handle) {
+  base::win::OnHandleBeingClosed(handle);
+  return g_close_function(handle);
+}
+
+BOOL WINAPI DuplicateHandleHook(HANDLE source_process,
+                                HANDLE source_handle,
+                                HANDLE target_process,
+                                HANDLE* target_handle,
+                                DWORD desired_access,
+                                BOOL inherit_handle,
+                                DWORD options) {
+  if ((options & DUPLICATE_CLOSE_SOURCE) &&
+      (GetProcessId(source_process) == ::GetCurrentProcessId())) {
+    base::win::OnHandleBeingClosed(source_handle);
+  }
+
+  return g_duplicate_function(source_process, source_handle, target_process,
+                              target_handle, desired_access, inherit_handle,
+                              options);
+}
+
+}  // namespace
+
+namespace base {
+namespace debug {
+
+namespace {
+
+// Provides a simple way to temporarily change the protection of a memory page.
+class AutoProtectMemory {
+ public:
+  AutoProtectMemory()
+      : changed_(false), address_(NULL), bytes_(0), old_protect_(0) {}
+
+  ~AutoProtectMemory() {
+    RevertProtection();
+  }
+
+  // Grants write access to a given memory range.
+  bool ChangeProtection(void* address, size_t bytes);
+
+  // Restores the original page protection.
+  void RevertProtection();
+
+ private:
+  bool changed_;
+  void* address_;
+  size_t bytes_;
+  DWORD old_protect_;
+
+  DISALLOW_COPY_AND_ASSIGN(AutoProtectMemory);
+};
+
+bool AutoProtectMemory::ChangeProtection(void* address, size_t bytes) {
+  DCHECK(!changed_);
+  DCHECK(address);
+
+  // Change the page protection so that we can write.
+  MEMORY_BASIC_INFORMATION memory_info;
+  if (!VirtualQuery(address, &memory_info, sizeof(memory_info)))
+    return false;
+
+  DWORD is_executable = (PAGE_EXECUTE | PAGE_EXECUTE_READ |
+                        PAGE_EXECUTE_READWRITE | PAGE_EXECUTE_WRITECOPY) &
+                        memory_info.Protect;
+
+  DWORD protect = is_executable ? PAGE_EXECUTE_READWRITE : PAGE_READWRITE;
+  if (!VirtualProtect(address, bytes, protect, &old_protect_))
+    return false;
+
+  changed_ = true;
+  address_ = address;
+  bytes_ = bytes;
+  return true;
+}
+
+void AutoProtectMemory::RevertProtection() {
+  if (!changed_)
+    return;
+
+  DCHECK(address_);
+  DCHECK(bytes_);
+
+  VirtualProtect(address_, bytes_, old_protect_, &old_protect_);
+  changed_ = false;
+  address_ = NULL;
+  bytes_ = 0;
+  old_protect_ = 0;
+}
+
+// Performs an EAT interception.
+void EATPatch(HMODULE module, const char* function_name,
+              void* new_function, void** old_function) {
+  if (!module)
+    return;
+
+  base::win::PEImage pe(module);
+  if (!pe.VerifyMagic())
+    return;
+
+  DWORD* eat_entry = pe.GetExportEntry(function_name);
+  if (!eat_entry)
+    return;
+
+  if (!(*old_function))
+    *old_function = pe.RVAToAddr(*eat_entry);
+
+  AutoProtectMemory memory;
+  if (!memory.ChangeProtection(eat_entry, sizeof(DWORD)))
+    return;
+
+  // Perform the patch.
+#pragma warning(push)
+#pragma warning(disable : 4311 4302)
+  // These casts generate truncation warnings because they are 32 bit specific.
+  *eat_entry = reinterpret_cast<DWORD>(new_function) -
+               reinterpret_cast<DWORD>(module);
+#pragma warning(pop)
+}
+
+// Performs an IAT interception.
+base::win::IATPatchFunction* IATPatch(HMODULE module, const char* function_name,
+                                      void* new_function, void** old_function) {
+  if (!module)
+    return NULL;
+
+  base::win::IATPatchFunction* patch = new base::win::IATPatchFunction;
+  __try {
+    // There is no guarantee that |module| is still loaded at this point.
+    if (patch->PatchFromModule(module, "kernel32.dll", function_name,
+                               new_function)) {
+      delete patch;
+      return NULL;
+    }
+  } __except((GetExceptionCode() == EXCEPTION_ACCESS_VIOLATION ||
+              GetExceptionCode() == EXCEPTION_GUARD_PAGE ||
+              GetExceptionCode() == EXCEPTION_IN_PAGE_ERROR) ?
+             EXCEPTION_EXECUTE_HANDLER : EXCEPTION_CONTINUE_SEARCH) {
+    // Leak the patch.
+    return NULL;
+  }
+
+  if (!(*old_function)) {
+    // Things are probably messed up if each intercepted function points to
+    // a different place, but we need only one function to call.
+    *old_function = patch->original_function();
+  }
+  return patch;
+}
+
+// Keeps track of all the hooks needed to intercept functions which could
+// possibly close handles.
+class HandleHooks {
+ public:
+  HandleHooks() {}
+  ~HandleHooks() {}
+
+  void AddIATPatch(HMODULE module);
+  void AddEATPatch();
+
+ private:
+  std::vector<base::win::IATPatchFunction*> hooks_;
+  DISALLOW_COPY_AND_ASSIGN(HandleHooks);
+};
+
+void HandleHooks::AddIATPatch(HMODULE module) {
+  if (!module)
+    return;
+
+  base::win::IATPatchFunction* patch = NULL;
+  patch =
+      IATPatch(module, "CloseHandle", reinterpret_cast<void*>(&CloseHandleHook),
+               reinterpret_cast<void**>(&g_close_function));
+  if (!patch)
+    return;
+  hooks_.push_back(patch);
+
+  patch = IATPatch(module, "DuplicateHandle",
+                   reinterpret_cast<void*>(&DuplicateHandleHook),
+                   reinterpret_cast<void**>(&g_duplicate_function));
+  if (!patch)
+    return;
+  hooks_.push_back(patch);
+}
+
+void HandleHooks::AddEATPatch() {
+  // An attempt to restore the entry on the table at destruction is not safe.
+  EATPatch(GetModuleHandleA("kernel32.dll"), "CloseHandle",
+           reinterpret_cast<void*>(&CloseHandleHook),
+           reinterpret_cast<void**>(&g_close_function));
+  EATPatch(GetModuleHandleA("kernel32.dll"), "DuplicateHandle",
+           reinterpret_cast<void*>(&DuplicateHandleHook),
+           reinterpret_cast<void**>(&g_duplicate_function));
+}
+
+void PatchLoadedModules(HandleHooks* hooks) {
+  const DWORD kSize = 256;
+  DWORD returned;
+  std::unique_ptr<HMODULE[]> modules(new HMODULE[kSize]);
+  if (!EnumProcessModules(GetCurrentProcess(), modules.get(),
+                          kSize * sizeof(HMODULE), &returned)) {
+    return;
+  }
+  returned /= sizeof(HMODULE);
+  returned = std::min(kSize, returned);
+
+  for (DWORD current = 0; current < returned; current++) {
+    hooks->AddIATPatch(modules[current]);
+  }
+}
+
+}  // namespace
+
+void InstallHandleHooks() {
+  static HandleHooks* hooks = new HandleHooks();
+
+  // Performing EAT interception first is safer in the presence of other
+  // threads attempting to call CloseHandle.
+  hooks->AddEATPatch();
+  PatchLoadedModules(hooks);
+}
+
+}  // namespace debug
+}  // namespace base
diff --git a/base/debug/close_handle_hook_win.h b/base/debug/close_handle_hook_win.h
new file mode 100644
index 0000000..c775d75
--- /dev/null
+++ b/base/debug/close_handle_hook_win.h
@@ -0,0 +1,19 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_DEBUG_CLOSE_HANDLE_HOOK_WIN_H_
+#define BASE_DEBUG_CLOSE_HANDLE_HOOK_WIN_H_
+
+#include "base/base_export.h"
+
+namespace base {
+namespace debug {
+
+// Installs the hooks required to debug use of improper handles.
+BASE_EXPORT void InstallHandleHooks();
+
+}  // namespace debug
+}  // namespace base
+
+#endif  // BASE_DEBUG_CLOSE_HANDLE_HOOK_WIN_H_
diff --git a/base/debug/crash_logging.cc b/base/debug/crash_logging.cc
new file mode 100644
index 0000000..1dabb6b
--- /dev/null
+++ b/base/debug/crash_logging.cc
@@ -0,0 +1,54 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/debug/crash_logging.h"
+
+namespace base {
+namespace debug {
+
+namespace {
+
+CrashKeyImplementation* g_crash_key_impl = nullptr;
+
+}  // namespace
+
+CrashKeyString* AllocateCrashKeyString(const char name[],
+                                       CrashKeySize value_length) {
+  if (!g_crash_key_impl)
+    return nullptr;
+
+  return g_crash_key_impl->Allocate(name, value_length);
+}
+
+void SetCrashKeyString(CrashKeyString* crash_key, base::StringPiece value) {
+  if (!g_crash_key_impl || !crash_key)
+    return;
+
+  g_crash_key_impl->Set(crash_key, value);
+}
+
+void ClearCrashKeyString(CrashKeyString* crash_key) {
+  if (!g_crash_key_impl || !crash_key)
+    return;
+
+  g_crash_key_impl->Clear(crash_key);
+}
+
+ScopedCrashKeyString::ScopedCrashKeyString(CrashKeyString* crash_key,
+                                           base::StringPiece value)
+    : crash_key_(crash_key) {
+  SetCrashKeyString(crash_key_, value);
+}
+
+ScopedCrashKeyString::~ScopedCrashKeyString() {
+  ClearCrashKeyString(crash_key_);
+}
+
+void SetCrashKeyImplementation(std::unique_ptr<CrashKeyImplementation> impl) {
+  delete g_crash_key_impl;
+  g_crash_key_impl = impl.release();
+}
+
+}  // namespace debug
+}  // namespace base
diff --git a/base/debug/crash_logging.h b/base/debug/crash_logging.h
new file mode 100644
index 0000000..9c6cd75
--- /dev/null
+++ b/base/debug/crash_logging.h
@@ -0,0 +1,104 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_DEBUG_CRASH_LOGGING_H_
+#define BASE_DEBUG_CRASH_LOGGING_H_
+
+#include <stddef.h>
+
+#include <memory>
+
+#include "base/base_export.h"
+#include "base/macros.h"
+#include "base/strings/string_piece.h"
+
+namespace base {
+namespace debug {
+
+// A crash key is an annotation that is carried along with a crash report, to
+// provide additional debugging information beyond a stack trace. Crash keys
+// have a name and a string value.
+//
+// The preferred API is //components/crash/core/common:crash_key, however not
+// all clients can hold a direct dependency on that target. The API provided
+// in this file indirects the dependency.
+//
+// Example usage:
+//   static CrashKeyString* crash_key =
+//       AllocateCrashKeyString("name", CrashKeySize::Size32);
+//   SetCrashKeyString(crash_key, "value");
+//   ClearCrashKeyString(crash_key);
+
+// The maximum length for a crash key's value must be one of the following
+// pre-determined values.
+enum class CrashKeySize {
+  Size32 = 32,
+  Size64 = 64,
+  Size256 = 256,
+};
+
+struct CrashKeyString;
+
+// Allocates a new crash key with the specified |name| with storage for a
+// value up to length |size|. This will return null if the crash key system is
+// not initialized.
+BASE_EXPORT CrashKeyString* AllocateCrashKeyString(const char name[],
+                                                   CrashKeySize size);
+
+// Stores |value| into the specified |crash_key|. The |crash_key| may be null
+// if AllocateCrashKeyString() returned null. If |value| is longer than the
+// size with which the key was allocated, it will be truncated.
+BASE_EXPORT void SetCrashKeyString(CrashKeyString* crash_key,
+                                   base::StringPiece value);
+
+// Clears any value that was stored in |crash_key|. The |crash_key| may be
+// null.
+BASE_EXPORT void ClearCrashKeyString(CrashKeyString* crash_key);
+
+// A scoper that sets the specified key to value for the lifetime of the
+// object, and clears it on destruction.
+class BASE_EXPORT ScopedCrashKeyString {
+ public:
+  ScopedCrashKeyString(CrashKeyString* crash_key, base::StringPiece value);
+  ~ScopedCrashKeyString();
+
+ private:
+  CrashKeyString* const crash_key_;
+
+  DISALLOW_COPY_AND_ASSIGN(ScopedCrashKeyString);
+};
+
+////////////////////////////////////////////////////////////////////////////////
+// The following declarations are used to initialize the crash key system
+// in //base by providing implementations for the above functions.
+
+// The virtual interface that provides the implementation for the crash key
+// API. This is implemented by a higher-layer component, and the instance is
+// set using the function below.
+class CrashKeyImplementation {
+ public:
+  virtual ~CrashKeyImplementation() = default;
+
+  virtual CrashKeyString* Allocate(const char name[], CrashKeySize size) = 0;
+  virtual void Set(CrashKeyString* crash_key, base::StringPiece value) = 0;
+  virtual void Clear(CrashKeyString* crash_key) = 0;
+};
+
+// Initializes the crash key system in base by replacing the existing
+// implementation, if it exists, with |impl|. The |impl| is copied into base.
+BASE_EXPORT void SetCrashKeyImplementation(
+    std::unique_ptr<CrashKeyImplementation> impl);
+
+// The base structure for a crash key, storing the allocation metadata.
+struct CrashKeyString {
+  constexpr CrashKeyString(const char name[], CrashKeySize size)
+      : name(name), size(size) {}
+  const char* const name;
+  const CrashKeySize size;
+};
+
+}  // namespace debug
+}  // namespace base
+
+#endif  // BASE_DEBUG_CRASH_LOGGING_H_
diff --git a/base/debug/crash_logging_unittest.cc b/base/debug/crash_logging_unittest.cc
new file mode 100644
index 0000000..c10d36e
--- /dev/null
+++ b/base/debug/crash_logging_unittest.cc
@@ -0,0 +1,17 @@
+// Copyright (c) 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/debug/crash_logging.h"
+
+#include "testing/gtest/include/gtest/gtest.h"
+
+TEST(CrashLoggingTest, UninitializedCrashKeyStringSupport) {
+  auto* crash_key = base::debug::AllocateCrashKeyString(
+      "test", base::debug::CrashKeySize::Size32);
+  EXPECT_FALSE(crash_key);
+
+  base::debug::SetCrashKeyString(crash_key, "value");
+
+  base::debug::ClearCrashKeyString(crash_key);
+}
diff --git a/base/debug/debugger.cc b/base/debug/debugger.cc
new file mode 100644
index 0000000..1ccee1c
--- /dev/null
+++ b/base/debug/debugger.cc
@@ -0,0 +1,42 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/debug/debugger.h"
+#include "base/logging.h"
+#include "base/threading/platform_thread.h"
+#include "build/build_config.h"
+
+namespace base {
+namespace debug {
+
+static bool is_debug_ui_suppressed = false;
+
+bool WaitForDebugger(int wait_seconds, bool silent) {
+#if defined(OS_ANDROID)
+  // The pid from which we know which process to attach to are not output by
+  // android ddms, so we have to print it out explicitly.
+  DLOG(INFO) << "DebugUtil::WaitForDebugger(pid=" << static_cast<int>(getpid())
+             << ")";
+#endif
+  for (int i = 0; i < wait_seconds * 10; ++i) {
+    if (BeingDebugged()) {
+      if (!silent)
+        BreakDebugger();
+      return true;
+    }
+    PlatformThread::Sleep(TimeDelta::FromMilliseconds(100));
+  }
+  return false;
+}
+
+void SetSuppressDebugUI(bool suppress) {
+  is_debug_ui_suppressed = suppress;
+}
+
+bool IsDebugUISuppressed() {
+  return is_debug_ui_suppressed;
+}
+
+}  // namespace debug
+}  // namespace base
diff --git a/base/debug/debugger.h b/base/debug/debugger.h
new file mode 100644
index 0000000..8680e28
--- /dev/null
+++ b/base/debug/debugger.h
@@ -0,0 +1,44 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This is a cross platform interface for helper functions related to
+// debuggers.  You should use this to test if you're running under a debugger,
+// and if you would like to yield (breakpoint) into the debugger.
+
+#ifndef BASE_DEBUG_DEBUGGER_H_
+#define BASE_DEBUG_DEBUGGER_H_
+
+#include "base/base_export.h"
+
+namespace base {
+namespace debug {
+
+// Waits wait_seconds seconds for a debugger to attach to the current process.
+// When silent is false, an exception is thrown when a debugger is detected.
+BASE_EXPORT bool WaitForDebugger(int wait_seconds, bool silent);
+
+// Returns true if the given process is being run under a debugger.
+//
+// On OS X, the underlying mechanism doesn't work when the sandbox is enabled.
+// To get around this, this function caches its value.
+//
+// WARNING: Because of this, on OS X, a call MUST be made to this function
+// BEFORE the sandbox is enabled.
+BASE_EXPORT bool BeingDebugged();
+
+// Break into the debugger, assumes a debugger is present.
+BASE_EXPORT void BreakDebugger();
+
+// Used in test code, this controls whether showing dialogs and breaking into
+// the debugger is suppressed for debug errors, even in debug mode (normally
+// release mode doesn't do this stuff --  this is controlled separately).
+// Normally UI is not suppressed.  This is normally used when running automated
+// tests where we want a crash rather than a dialog or a debugger.
+BASE_EXPORT void SetSuppressDebugUI(bool suppress);
+BASE_EXPORT bool IsDebugUISuppressed();
+
+}  // namespace debug
+}  // namespace base
+
+#endif  // BASE_DEBUG_DEBUGGER_H_
diff --git a/base/debug/debugger_posix.cc b/base/debug/debugger_posix.cc
new file mode 100644
index 0000000..b62bf01
--- /dev/null
+++ b/base/debug/debugger_posix.cc
@@ -0,0 +1,272 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/debug/debugger.h"
+
+#include <errno.h>
+#include <fcntl.h>
+#include <stddef.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <sys/param.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <unistd.h>
+
+#include <memory>
+#include <vector>
+
+#include "base/macros.h"
+#include "base/threading/platform_thread.h"
+#include "base/time/time.h"
+#include "build/build_config.h"
+
+#if defined(__GLIBCXX__)
+#include <cxxabi.h>
+#endif
+
+#if defined(OS_MACOSX)
+#include <AvailabilityMacros.h>
+#endif
+
+#if defined(OS_MACOSX) || defined(OS_BSD)
+#include <sys/sysctl.h>
+#endif
+
+#if defined(OS_FREEBSD)
+#include <sys/user.h>
+#endif
+
+#include <ostream>
+
+#include "base/debug/alias.h"
+#include "base/logging.h"
+#include "base/posix/eintr_wrapper.h"
+#include "base/strings/string_piece.h"
+
+#if defined(USE_SYMBOLIZE)
+#include "base/third_party/symbolize/symbolize.h"
+#endif
+
+#if defined(OS_ANDROID)
+#include "base/threading/platform_thread.h"
+#endif
+
+namespace base {
+namespace debug {
+
+#if defined(OS_MACOSX) || defined(OS_BSD)
+
+// Based on Apple's recommended method as described in
+// http://developer.apple.com/qa/qa2004/qa1361.html
+bool BeingDebugged() {
+  // NOTE: This code MUST be async-signal safe (it's used by in-process
+  // stack dumping signal handler). NO malloc or stdio is allowed here.
+  //
+  // While some code used below may be async-signal unsafe, note how
+  // the result is cached (see |is_set| and |being_debugged| static variables
+  // right below). If this code is properly warmed-up early
+  // in the start-up process, it should be safe to use later.
+
+  // If the process is sandboxed then we can't use the sysctl, so cache the
+  // value.
+  static bool is_set = false;
+  static bool being_debugged = false;
+
+  if (is_set)
+    return being_debugged;
+
+  // Initialize mib, which tells sysctl what info we want.  In this case,
+  // we're looking for information about a specific process ID.
+  int mib[] = {
+    CTL_KERN,
+    KERN_PROC,
+    KERN_PROC_PID,
+    getpid()
+#if defined(OS_OPENBSD)
+    , sizeof(struct kinfo_proc),
+    0
+#endif
+  };
+
+  // Caution: struct kinfo_proc is marked __APPLE_API_UNSTABLE.  The source and
+  // binary interfaces may change.
+  struct kinfo_proc info;
+  size_t info_size = sizeof(info);
+
+#if defined(OS_OPENBSD)
+  if (sysctl(mib, arraysize(mib), NULL, &info_size, NULL, 0) < 0)
+    return -1;
+
+  mib[5] = (info_size / sizeof(struct kinfo_proc));
+#endif
+
+  int sysctl_result = sysctl(mib, arraysize(mib), &info, &info_size, NULL, 0);
+  DCHECK_EQ(sysctl_result, 0);
+  if (sysctl_result != 0) {
+    is_set = true;
+    being_debugged = false;
+    return being_debugged;
+  }
+
+  // This process is being debugged if the P_TRACED flag is set.
+  is_set = true;
+#if defined(OS_FREEBSD)
+  being_debugged = (info.ki_flag & P_TRACED) != 0;
+#elif defined(OS_BSD)
+  being_debugged = (info.p_flag & P_TRACED) != 0;
+#else
+  being_debugged = (info.kp_proc.p_flag & P_TRACED) != 0;
+#endif
+  return being_debugged;
+}
+
+#elif defined(OS_LINUX) || defined(OS_ANDROID) || defined(OS_AIX)
+
+// We can look in /proc/self/status for TracerPid.  We are likely used in crash
+// handling, so we are careful not to use the heap or have side effects.
+// Another option that is common is to try to ptrace yourself, but then we
+// can't detach without forking(), and that's not so great.
+// static
+bool BeingDebugged() {
+  // NOTE: This code MUST be async-signal safe (it's used by in-process
+  // stack dumping signal handler). NO malloc or stdio is allowed here.
+
+  int status_fd = open("/proc/self/status", O_RDONLY);
+  if (status_fd == -1)
+    return false;
+
+  // We assume our line will be in the first 1024 characters and that we can
+  // read this much all at once.  In practice this will generally be true.
+  // This simplifies and speeds up things considerably.
+  char buf[1024];
+
+  ssize_t num_read = HANDLE_EINTR(read(status_fd, buf, sizeof(buf)));
+  if (IGNORE_EINTR(close(status_fd)) < 0)
+    return false;
+
+  if (num_read <= 0)
+    return false;
+
+  StringPiece status(buf, num_read);
+  StringPiece tracer("TracerPid:\t");
+
+  StringPiece::size_type pid_index = status.find(tracer);
+  if (pid_index == StringPiece::npos)
+    return false;
+
+  // Our pid is 0 without a debugger, assume this for any pid starting with 0.
+  pid_index += tracer.size();
+  return pid_index < status.size() && status[pid_index] != '0';
+}
+
+#elif defined(OS_FUCHSIA)
+
+bool BeingDebugged() {
+  // TODO(fuchsia): No gdb/gdbserver in the SDK yet.
+  return false;
+}
+
+#else
+
+bool BeingDebugged() {
+  NOTIMPLEMENTED();
+  return false;
+}
+
+#endif
+
+// We want to break into the debugger in Debug mode, and cause a crash dump in
+// Release mode. Breakpad behaves as follows:
+//
+// +-------+-----------------+-----------------+
+// | OS    | Dump on SIGTRAP | Dump on SIGABRT |
+// +-------+-----------------+-----------------+
+// | Linux |       N         |        Y        |
+// | Mac   |       Y         |        N        |
+// +-------+-----------------+-----------------+
+//
+// Thus we do the following:
+// Linux: Debug mode if a debugger is attached, send SIGTRAP; otherwise send
+//        SIGABRT
+// Mac: Always send SIGTRAP.
+
+#if defined(ARCH_CPU_ARMEL)
+#define DEBUG_BREAK_ASM() asm("bkpt 0")
+#elif defined(ARCH_CPU_ARM64)
+#define DEBUG_BREAK_ASM() asm("brk 0")
+#elif defined(ARCH_CPU_MIPS_FAMILY)
+#define DEBUG_BREAK_ASM() asm("break 2")
+#elif defined(ARCH_CPU_X86_FAMILY)
+#define DEBUG_BREAK_ASM() asm("int3")
+#endif
+
+#if defined(NDEBUG) && !defined(OS_MACOSX) && !defined(OS_ANDROID)
+#define DEBUG_BREAK() abort()
+#elif defined(OS_NACL)
+// The NaCl verifier doesn't let use use int3.  For now, we call abort().  We
+// should ask for advice from some NaCl experts about the optimum thing here.
+// http://code.google.com/p/nativeclient/issues/detail?id=645
+#define DEBUG_BREAK() abort()
+#elif !defined(OS_MACOSX)
+// Though Android has a "helpful" process called debuggerd to catch native
+// signals on the general assumption that they are fatal errors. If no debugger
+// is attached, we call abort since Breakpad needs SIGABRT to create a dump.
+// When debugger is attached, for ARM platform the bkpt instruction appears
+// to cause SIGBUS which is trapped by debuggerd, and we've had great
+// difficulty continuing in a debugger once we stop from SIG triggered by native
+// code, use GDB to set |go| to 1 to resume execution; for X86 platform, use
+// "int3" to setup breakpiont and raise SIGTRAP.
+//
+// On other POSIX architectures, except Mac OS X, we use the same logic to
+// ensure that breakpad creates a dump on crashes while it is still possible to
+// use a debugger.
+namespace {
+void DebugBreak() {
+  if (!BeingDebugged()) {
+    abort();
+  } else {
+#if defined(DEBUG_BREAK_ASM)
+    DEBUG_BREAK_ASM();
+#else
+    volatile int go = 0;
+    while (!go) {
+      base::PlatformThread::Sleep(base::TimeDelta::FromMilliseconds(100));
+    }
+#endif
+  }
+}
+}  // namespace
+#define DEBUG_BREAK() DebugBreak()
+#elif defined(DEBUG_BREAK_ASM)
+#define DEBUG_BREAK() DEBUG_BREAK_ASM()
+#else
+#error "Don't know how to debug break on this architecture/OS"
+#endif
+
+void BreakDebugger() {
+  // NOTE: This code MUST be async-signal safe (it's used by in-process
+  // stack dumping signal handler). NO malloc or stdio is allowed here.
+
+  // Linker's ICF feature may merge this function with other functions with the
+  // same definition (e.g. any function whose sole job is to call abort()) and
+  // it may confuse the crash report processing system. http://crbug.com/508489
+  static int static_variable_to_make_this_function_unique = 0;
+  base::debug::Alias(&static_variable_to_make_this_function_unique);
+
+  DEBUG_BREAK();
+#if defined(OS_ANDROID) && !defined(OFFICIAL_BUILD)
+  // For Android development we always build release (debug builds are
+  // unmanageably large), so the unofficial build is used for debugging. It is
+  // helpful to be able to insert BreakDebugger() statements in the source,
+  // attach the debugger, inspect the state of the program and then resume it by
+  // setting the 'go' variable above.
+#elif defined(NDEBUG)
+  // Terminate the program after signaling the debug break.
+  _exit(1);
+#endif
+}
+
+}  // namespace debug
+}  // namespace base
diff --git a/base/debug/debugger_unittest.cc b/base/debug/debugger_unittest.cc
new file mode 100644
index 0000000..0a5a039
--- /dev/null
+++ b/base/debug/debugger_unittest.cc
@@ -0,0 +1,43 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/debug/debugger.h"
+
+#include "build/build_config.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace {
+
+#if defined(GTEST_HAS_DEATH_TEST) && !defined(OS_ANDROID)
+void CrashWithBreakDebugger() {
+  base::debug::SetSuppressDebugUI(false);
+  base::debug::BreakDebugger();
+
+#if defined(OS_WIN)
+  // This should not be executed.
+  _exit(125);
+#endif
+}
+#endif  // defined(GTEST_HAS_DEATH_TEST)
+
+}  // namespace
+
+// Death tests misbehave on Android.
+#if defined(GTEST_HAS_DEATH_TEST) && !defined(OS_ANDROID)
+
+TEST(Debugger, CrashAtBreakpoint) {
+  EXPECT_DEATH(CrashWithBreakDebugger(), "");
+}
+
+#if defined(OS_WIN)
+TEST(Debugger, DoesntExecuteBeyondBreakpoint) {
+  EXPECT_EXIT(CrashWithBreakDebugger(),
+              ::testing::ExitedWithCode(0x80000003), "");
+}
+#endif  // defined(OS_WIN)
+
+#else  // defined(GTEST_HAS_DEATH_TEST) && !defined(OS_ANDROID)
+TEST(Debugger, NoTest) {
+}
+#endif  // defined(GTEST_HAS_DEATH_TEST) && !defined(OS_ANDROID)
diff --git a/base/debug/debugger_win.cc b/base/debug/debugger_win.cc
new file mode 100644
index 0000000..a1d86e4
--- /dev/null
+++ b/base/debug/debugger_win.cc
@@ -0,0 +1,25 @@
+// Copyright (c) 2010 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/debug/debugger.h"
+
+#include <stdlib.h>
+#include <windows.h>
+
+namespace base {
+namespace debug {
+
+bool BeingDebugged() {
+  return ::IsDebuggerPresent() != 0;
+}
+
+void BreakDebugger() {
+  if (IsDebugUISuppressed())
+    _exit(1);
+
+  __debugbreak();
+}
+
+}  // namespace debug
+}  // namespace base
diff --git a/base/debug/dump_without_crashing.cc b/base/debug/dump_without_crashing.cc
new file mode 100644
index 0000000..1ab8c9c
--- /dev/null
+++ b/base/debug/dump_without_crashing.cc
@@ -0,0 +1,41 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/debug/dump_without_crashing.h"
+
+#include "base/logging.h"
+
+namespace {
+
+// Pointer to the function that's called by DumpWithoutCrashing() to dump the
+// process's memory.
+void(CDECL* dump_without_crashing_function_)() = nullptr;
+
+}  // namespace
+
+namespace base {
+
+namespace debug {
+
+bool DumpWithoutCrashing() {
+  if (dump_without_crashing_function_) {
+    (*dump_without_crashing_function_)();
+    return true;
+  }
+  return false;
+}
+
+void SetDumpWithoutCrashingFunction(void (CDECL *function)()) {
+#if !defined(COMPONENT_BUILD)
+  // In component builds, the same base is shared between modules
+  // so might be initialized several times. However in non-
+  // component builds this should never happen.
+  DCHECK(!dump_without_crashing_function_);
+#endif
+  dump_without_crashing_function_ = function;
+}
+
+}  // namespace debug
+
+}  // namespace base
diff --git a/base/debug/dump_without_crashing.h b/base/debug/dump_without_crashing.h
new file mode 100644
index 0000000..913f6c4
--- /dev/null
+++ b/base/debug/dump_without_crashing.h
@@ -0,0 +1,37 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_DEBUG_DUMP_WITHOUT_CRASHING_H_
+#define BASE_DEBUG_DUMP_WITHOUT_CRASHING_H_
+
+#include "base/base_export.h"
+#include "base/compiler_specific.h"
+#include "build/build_config.h"
+
+namespace base {
+
+namespace debug {
+
+// Handler to silently dump the current process without crashing.
+// Before calling this function, call SetDumpWithoutCrashingFunction to pass a
+// function pointer.
+// Windows:
+// This must be done for each instance of base (i.e. module) and is normally
+// chrome_elf!DumpProcessWithoutCrash. See example code in chrome_main.cc that
+// does this for chrome.dll and chrome_child.dll. Note: Crashpad sets this up
+// for main chrome.exe as part of calling crash_reporter::InitializeCrashpad.
+// Mac/Linux:
+// Crashpad does this as part of crash_reporter::InitializeCrashpad.
+// Returns false if called before SetDumpWithoutCrashingFunction.
+BASE_EXPORT bool DumpWithoutCrashing();
+
+// Sets a function that'll be invoked to dump the current process when
+// DumpWithoutCrashing() is called.
+BASE_EXPORT void SetDumpWithoutCrashingFunction(void (CDECL *function)());
+
+}  // namespace debug
+
+}  // namespace base
+
+#endif  // BASE_DEBUG_DUMP_WITHOUT_CRASHING_H_
diff --git a/base/debug/elf_reader_linux.cc b/base/debug/elf_reader_linux.cc
new file mode 100644
index 0000000..cdf8193
--- /dev/null
+++ b/base/debug/elf_reader_linux.cc
@@ -0,0 +1,132 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/debug/elf_reader_linux.h"
+
+#include <arpa/inet.h>
+#include <elf.h>
+
+#include <vector>
+
+#include "base/bits.h"
+#include "base/containers/span.h"
+#include "base/sha1.h"
+#include "base/strings/stringprintf.h"
+
+namespace base {
+namespace debug {
+
+namespace {
+
+#if __SIZEOF_POINTER__ == 4
+using Ehdr = Elf32_Ehdr;
+using Dyn = Elf32_Dyn;
+using Half = Elf32_Half;
+using Nhdr = Elf32_Nhdr;
+using Phdr = Elf32_Phdr;
+using Word = Elf32_Word;
+#else
+using Ehdr = Elf64_Ehdr;
+using Dyn = Elf64_Dyn;
+using Half = Elf64_Half;
+using Nhdr = Elf64_Nhdr;
+using Phdr = Elf64_Phdr;
+using Word = Elf64_Word;
+#endif
+
+using ElfSegment = span<const char>;
+
+Optional<std::string> ElfSegmentBuildIDNoteAsString(const ElfSegment& segment) {
+  const void* section_end = segment.data() + segment.size_bytes();
+  const Nhdr* note_header = reinterpret_cast<const Nhdr*>(segment.data());
+  while (note_header < section_end) {
+    if (note_header->n_type == NT_GNU_BUILD_ID)
+      break;
+    note_header = reinterpret_cast<const Nhdr*>(
+        reinterpret_cast<const char*>(note_header) + sizeof(Nhdr) +
+        bits::Align(note_header->n_namesz, 4) +
+        bits::Align(note_header->n_descsz, 4));
+  }
+
+  if (note_header >= section_end || note_header->n_descsz != kSHA1Length)
+    return nullopt;
+
+  const uint8_t* guid = reinterpret_cast<const uint8_t*>(note_header) +
+                        sizeof(Nhdr) + bits::Align(note_header->n_namesz, 4);
+
+  uint32_t dword = htonl(*reinterpret_cast<const int32_t*>(guid));
+  uint16_t word1 = htons(*reinterpret_cast<const int16_t*>(guid + 4));
+  uint16_t word2 = htons(*reinterpret_cast<const int16_t*>(guid + 6));
+  std::string identifier;
+  identifier.reserve(kSHA1Length * 2);  // as hex string
+  SStringPrintf(&identifier, "%08X%04X%04X", dword, word1, word2);
+  for (size_t i = 8; i < note_header->n_descsz; ++i)
+    StringAppendF(&identifier, "%02X", guid[i]);
+
+  return identifier;
+}
+
+std::vector<ElfSegment> FindElfSegments(const void* elf_mapped_base,
+                                        uint32_t segment_type) {
+  const char* elf_base = reinterpret_cast<const char*>(elf_mapped_base);
+  if (strncmp(elf_base, ELFMAG, SELFMAG) != 0)
+    return std::vector<ElfSegment>();
+
+  const Ehdr* elf_header = reinterpret_cast<const Ehdr*>(elf_base);
+  const Phdr* phdrs =
+      reinterpret_cast<const Phdr*>(elf_base + elf_header->e_phoff);
+  std::vector<ElfSegment> segments;
+  for (Half i = 0; i < elf_header->e_phnum; ++i) {
+    if (phdrs[i].p_type == segment_type)
+      segments.push_back({elf_base + phdrs[i].p_offset, phdrs[i].p_filesz});
+  }
+  return segments;
+}
+
+}  // namespace
+
+Optional<std::string> ReadElfBuildId(const void* elf_base) {
+  // Elf program headers can have multiple PT_NOTE arrays.
+  std::vector<ElfSegment> segs = FindElfSegments(elf_base, PT_NOTE);
+  if (segs.empty())
+    return nullopt;
+  Optional<std::string> id;
+  for (const ElfSegment& seg : segs) {
+    id = ElfSegmentBuildIDNoteAsString(seg);
+    if (id)
+      return id;
+  }
+
+  return nullopt;
+}
+
+Optional<std::string> ReadElfLibraryName(const void* elf_base) {
+  std::vector<ElfSegment> segs = FindElfSegments(elf_base, PT_DYNAMIC);
+  if (segs.empty())
+    return nullopt;
+  DCHECK_EQ(1u, segs.size());
+
+  const ElfSegment& dynamic_seg = segs.front();
+  const Dyn* dynamic_start = reinterpret_cast<const Dyn*>(dynamic_seg.data());
+  const Dyn* dynamic_end = reinterpret_cast<const Dyn*>(
+      dynamic_seg.data() + dynamic_seg.size_bytes());
+  Optional<std::string> soname;
+  Word soname_strtab_offset = 0;
+  const char* strtab_addr = 0;
+  for (const Dyn* dynamic_iter = dynamic_start; dynamic_iter < dynamic_end;
+       ++dynamic_iter) {
+    if (dynamic_iter->d_tag == DT_STRTAB) {
+      strtab_addr =
+          dynamic_iter->d_un.d_ptr + reinterpret_cast<const char*>(elf_base);
+    } else if (dynamic_iter->d_tag == DT_SONAME) {
+      soname_strtab_offset = dynamic_iter->d_un.d_val;
+    }
+  }
+  if (soname_strtab_offset && strtab_addr)
+    return std::string(strtab_addr + soname_strtab_offset);
+  return nullopt;
+}
+
+}  // namespace debug
+}  // namespace base
diff --git a/base/debug/elf_reader_linux.h b/base/debug/elf_reader_linux.h
new file mode 100644
index 0000000..4086dfb
--- /dev/null
+++ b/base/debug/elf_reader_linux.h
@@ -0,0 +1,28 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_DEBUG_ELF_READER_LINUX_H_
+#define BASE_DEBUG_ELF_READER_LINUX_H_
+
+#include <string>
+
+#include "base/base_export.h"
+#include "base/optional.h"
+
+namespace base {
+namespace debug {
+
+// Returns the ELF section .note.gnu.build-id from the ELF file mapped at
+// |elf_base|, if present. The caller must ensure that the file is fully mapped
+// in memory.
+Optional<std::string> BASE_EXPORT ReadElfBuildId(const void* elf_base);
+
+// Returns the library name from the ELF file mapped at |elf_base|, if present.
+// The caller must ensure that the file is fully mapped in memory.
+Optional<std::string> BASE_EXPORT ReadElfLibraryName(const void* elf_base);
+
+}  // namespace debug
+}  // namespace base
+
+#endif  // BASE_DEBUG_ELF_READER_LINUX_H_
diff --git a/base/debug/elf_reader_linux_unittest.cc b/base/debug/elf_reader_linux_unittest.cc
new file mode 100644
index 0000000..5510418
--- /dev/null
+++ b/base/debug/elf_reader_linux_unittest.cc
@@ -0,0 +1,70 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/debug/elf_reader_linux.h"
+
+#include <dlfcn.h>
+
+#include "base/files/memory_mapped_file.h"
+#include "base/strings/string_util.h"
+#include "build/build_config.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+extern char __executable_start;
+
+namespace base {
+namespace debug {
+
+// The linker flag --build-id is passed only on official builds. Clang does not
+// enable it by default and we do not have build id section in non-official
+// builds.
+#if defined(OFFICIAL_BUILD)
+TEST(ElfReaderTest, ReadElfBuildId) {
+  Optional<std::string> build_id = ReadElfBuildId(&__executable_start);
+  ASSERT_TRUE(build_id);
+  const size_t kGuidBytes = 20;
+  EXPECT_EQ(2 * kGuidBytes, build_id.value().size());
+  for (char c : *build_id) {
+    EXPECT_TRUE(IsHexDigit(c));
+    EXPECT_FALSE(IsAsciiLower(c));
+  }
+}
+#endif
+
+TEST(ElfReaderTest, ReadElfLibraryName) {
+#if defined(OS_ANDROID)
+  // On Android the library loader memory maps the full so file.
+  const char kLibraryName[] = "lib_base_unittests__library";
+  const void* addr = &__executable_start;
+#else
+  // On Linux the executable does not contain soname and is not mapped till
+  // dynamic segment. So, use malloc wrapper so file on which the test already
+  // depends on.
+  const char kLibraryName[] = MALLOC_WRAPPER_LIB;
+  // Find any symbol in the loaded file.
+  void* handle = dlopen(kLibraryName, RTLD_NOW | RTLD_LOCAL);
+  const void* init_addr = dlsym(handle, "_init");
+  // Use this symbol to get full path to the loaded library.
+  Dl_info info;
+  int res = dladdr(init_addr, &info);
+  ASSERT_NE(0, res);
+  std::string filename(info.dli_fname);
+  EXPECT_FALSE(filename.empty());
+  EXPECT_NE(std::string::npos, filename.find(kLibraryName));
+
+  // Memory map the so file and use it to test reading so name.
+  MemoryMappedFile file;
+  file.Initialize(FilePath(filename));
+  const void* addr = file.data();
+#endif
+
+  auto name = ReadElfLibraryName(addr);
+  ASSERT_TRUE(name);
+  EXPECT_NE(std::string::npos, name->find(kLibraryName))
+      << "Library name " << *name << " doesn't contain expected "
+      << kLibraryName;
+}
+
+}  // namespace debug
+}  // namespace base
diff --git a/base/debug/gdi_debug_util_win.cc b/base/debug/gdi_debug_util_win.cc
new file mode 100644
index 0000000..bf9827c
--- /dev/null
+++ b/base/debug/gdi_debug_util_win.cc
@@ -0,0 +1,141 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+#include "base/debug/gdi_debug_util_win.h"
+
+#include <algorithm>
+#include <cmath>
+
+#include <psapi.h>
+#include <stddef.h>
+#include <TlHelp32.h>
+
+#include "base/debug/alias.h"
+#include "base/logging.h"
+#include "base/win/scoped_handle.h"
+#include "base/win/win_util.h"
+
+namespace {
+
+void CollectChildGDIUsageAndDie(DWORD parent_pid) {
+  HANDLE snapshot = CreateToolhelp32Snapshot(TH32CS_SNAPPROCESS, 0);
+  CHECK_NE(INVALID_HANDLE_VALUE, snapshot);
+
+  int total_process_count = 0;
+  base::debug::Alias(&total_process_count);
+  int total_peak_gdi_count = 0;
+  base::debug::Alias(&total_peak_gdi_count);
+  int total_gdi_count = 0;
+  base::debug::Alias(&total_gdi_count);
+  int total_user_count = 0;
+  base::debug::Alias(&total_user_count);
+
+  int child_count = 0;
+  base::debug::Alias(&child_count);
+  int peak_gdi_count = 0;
+  base::debug::Alias(&peak_gdi_count);
+  int sum_gdi_count = 0;
+  base::debug::Alias(&sum_gdi_count);
+  int sum_user_count = 0;
+  base::debug::Alias(&sum_user_count);
+
+  PROCESSENTRY32 proc_entry = {0};
+  proc_entry.dwSize = sizeof(PROCESSENTRY32);
+  CHECK(Process32First(snapshot, &proc_entry));
+
+  do {
+    base::win::ScopedHandle process(
+        OpenProcess(PROCESS_QUERY_INFORMATION,
+                    FALSE,
+                    proc_entry.th32ProcessID));
+    if (!process.IsValid())
+      continue;
+
+    int num_gdi_handles = GetGuiResources(process.Get(), GR_GDIOBJECTS);
+    int num_user_handles = GetGuiResources(process.Get(), GR_USEROBJECTS);
+
+    // Compute sum and peak counts for all processes.
+    ++total_process_count;
+    total_user_count += num_user_handles;
+    total_gdi_count += num_gdi_handles;
+    total_peak_gdi_count = std::max(total_peak_gdi_count, num_gdi_handles);
+
+    if (parent_pid != proc_entry.th32ParentProcessID)
+      continue;
+
+    // Compute sum and peak counts for child processes.
+    ++child_count;
+    sum_user_count += num_user_handles;
+    sum_gdi_count += num_gdi_handles;
+    peak_gdi_count = std::max(peak_gdi_count, num_gdi_handles);
+
+  } while (Process32Next(snapshot, &proc_entry));
+
+  CloseHandle(snapshot);
+  CHECK(false);
+}
+
+}  // namespace
+
+namespace base {
+namespace debug {
+
+void CollectGDIUsageAndDie(BITMAPINFOHEADER* header, HANDLE shared_section) {
+  // Make sure parameters are saved in the minidump.
+  DWORD last_error = GetLastError();
+  bool is_gdi_available = base::win::IsUser32AndGdi32Available();
+
+  LONG width = header ? header->biWidth : 0;
+  LONG height = header ? header->biHeight : 0;
+
+  base::debug::Alias(&last_error);
+  base::debug::Alias(&is_gdi_available);
+  base::debug::Alias(&width);
+  base::debug::Alias(&height);
+  base::debug::Alias(&shared_section);
+
+  DWORD num_user_handles = GetGuiResources(GetCurrentProcess(), GR_USEROBJECTS);
+
+  DWORD num_gdi_handles = GetGuiResources(GetCurrentProcess(), GR_GDIOBJECTS);
+  if (num_gdi_handles == 0) {
+    DWORD get_gui_resources_error = GetLastError();
+    base::debug::Alias(&get_gui_resources_error);
+    CHECK(false);
+  }
+
+  base::debug::Alias(&num_gdi_handles);
+  base::debug::Alias(&num_user_handles);
+
+  const DWORD kLotsOfHandles = 9990;
+  CHECK_LE(num_gdi_handles, kLotsOfHandles);
+
+  PROCESS_MEMORY_COUNTERS_EX pmc;
+  pmc.cb = sizeof(pmc);
+  CHECK(GetProcessMemoryInfo(GetCurrentProcess(),
+                             reinterpret_cast<PROCESS_MEMORY_COUNTERS*>(&pmc),
+                             sizeof(pmc)));
+  const size_t kLotsOfMemory = 1500 * 1024 * 1024; // 1.5GB
+  CHECK_LE(pmc.PagefileUsage, kLotsOfMemory);
+  CHECK_LE(pmc.PrivateUsage, kLotsOfMemory);
+
+  void* small_data = nullptr;
+  base::debug::Alias(&small_data);
+
+  if (std::abs(height) * width > 100) {
+    // Huh, that's weird.  We don't have crazy handle count, we don't have
+    // ridiculous memory usage. Try to allocate a small bitmap and see if that
+    // fails too.
+    header->biWidth = 5;
+    header->biHeight = -5;
+    HBITMAP small_bitmap = CreateDIBSection(
+        nullptr, reinterpret_cast<BITMAPINFO*>(&header),
+        0, &small_data, shared_section, 0);
+    CHECK(small_bitmap != nullptr);
+    DeleteObject(small_bitmap);
+  }
+  // Maybe the child processes are the ones leaking GDI or USER resouces.
+  CollectChildGDIUsageAndDie(GetCurrentProcessId());
+}
+
+}  // namespace debug
+}  // namespace base
diff --git a/base/debug/gdi_debug_util_win.h b/base/debug/gdi_debug_util_win.h
new file mode 100644
index 0000000..3383a4d
--- /dev/null
+++ b/base/debug/gdi_debug_util_win.h
@@ -0,0 +1,25 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_DEBUG_GDI_DEBUG_UTIL_WIN_H_
+#define BASE_DEBUG_GDI_DEBUG_UTIL_WIN_H_
+
+#include <windows.h>
+
+#include "base/base_export.h"
+
+namespace base {
+namespace debug {
+
+// Crashes the process, using base::debug::Alias to leave valuable debugging
+// information in the crash dump. Pass values for |header| and |shared_section|
+// in the event of a bitmap allocation failure, to gather information about
+// those as well.
+void BASE_EXPORT CollectGDIUsageAndDie(BITMAPINFOHEADER* header = nullptr,
+                                       HANDLE shared_section = nullptr);
+
+}  // namespace debug
+}  // namespace base
+
+#endif  // BASE_DEBUG_GDI_DEBUG_UTIL_WIN_H_
diff --git a/base/debug/leak_annotations.h b/base/debug/leak_annotations.h
new file mode 100644
index 0000000..dc50246
--- /dev/null
+++ b/base/debug/leak_annotations.h
@@ -0,0 +1,46 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_DEBUG_LEAK_ANNOTATIONS_H_
+#define BASE_DEBUG_LEAK_ANNOTATIONS_H_
+
+#include "base/macros.h"
+#include "build/build_config.h"
+
+// This file defines macros which can be used to annotate intentional memory
+// leaks. Support for annotations is implemented in LeakSanitizer. Annotated
+// objects will be treated as a source of live pointers, i.e. any heap objects
+// reachable by following pointers from an annotated object will not be
+// reported as leaks.
+//
+// ANNOTATE_SCOPED_MEMORY_LEAK: all allocations made in the current scope
+// will be annotated as leaks.
+// ANNOTATE_LEAKING_OBJECT_PTR(X): the heap object referenced by pointer X will
+// be annotated as a leak.
+
+#if defined(LEAK_SANITIZER) && !defined(OS_NACL)
+
+#include <sanitizer/lsan_interface.h>
+
+class ScopedLeakSanitizerDisabler {
+ public:
+  ScopedLeakSanitizerDisabler() { __lsan_disable(); }
+  ~ScopedLeakSanitizerDisabler() { __lsan_enable(); }
+ private:
+  DISALLOW_COPY_AND_ASSIGN(ScopedLeakSanitizerDisabler);
+};
+
+#define ANNOTATE_SCOPED_MEMORY_LEAK \
+    ScopedLeakSanitizerDisabler leak_sanitizer_disabler; static_cast<void>(0)
+
+#define ANNOTATE_LEAKING_OBJECT_PTR(X) __lsan_ignore_object(X);
+
+#else
+
+#define ANNOTATE_SCOPED_MEMORY_LEAK ((void)0)
+#define ANNOTATE_LEAKING_OBJECT_PTR(X) ((void)0)
+
+#endif
+
+#endif  // BASE_DEBUG_LEAK_ANNOTATIONS_H_
diff --git a/base/debug/leak_tracker.h b/base/debug/leak_tracker.h
new file mode 100644
index 0000000..7ddd5b6
--- /dev/null
+++ b/base/debug/leak_tracker.h
@@ -0,0 +1,142 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_DEBUG_LEAK_TRACKER_H_
+#define BASE_DEBUG_LEAK_TRACKER_H_
+
+#include <stddef.h>
+
+#include "build/build_config.h"
+
+// Only enable leak tracking in non-uClibc debug builds.
+#if !defined(NDEBUG) && !defined(__UCLIBC__)
+#define ENABLE_LEAK_TRACKER
+#endif
+
+#ifdef ENABLE_LEAK_TRACKER
+#include "base/containers/linked_list.h"
+#include "base/debug/stack_trace.h"
+#include "base/logging.h"
+#endif  // ENABLE_LEAK_TRACKER
+
+// LeakTracker is a helper to verify that all instances of a class
+// have been destroyed.
+//
+// It is particularly useful for classes that are bound to a single thread --
+// before destroying that thread, one can check that there are no remaining
+// instances of that class.
+//
+// For example, to enable leak tracking for class net::URLRequest, start by
+// adding a member variable of type LeakTracker<net::URLRequest>.
+//
+//   class URLRequest {
+//     ...
+//    private:
+//     base::LeakTracker<URLRequest> leak_tracker_;
+//   };
+//
+//
+// Next, when we believe all instances of net::URLRequest have been deleted:
+//
+//   LeakTracker<net::URLRequest>::CheckForLeaks();
+//
+// Should the check fail (because there are live instances of net::URLRequest),
+// then the allocation callstack for each leaked instances is dumped to
+// the error log.
+//
+// If ENABLE_LEAK_TRACKER is not defined, then the check has no effect.
+
+namespace base {
+namespace debug {
+
+#ifndef ENABLE_LEAK_TRACKER
+
+// If leak tracking is disabled, do nothing.
+template<typename T>
+class LeakTracker {
+ public:
+  // This destructor suppresses warnings about instances of this class not being
+  // used.
+  ~LeakTracker() {}
+  static void CheckForLeaks() {}
+  static int NumLiveInstances() { return -1; }
+};
+
+#else
+
+// If leak tracking is enabled we track where the object was allocated from.
+
+template<typename T>
+class LeakTracker : public LinkNode<LeakTracker<T> > {
+ public:
+  LeakTracker() {
+    instances()->Append(this);
+  }
+
+  ~LeakTracker() {
+    this->RemoveFromList();
+  }
+
+  static void CheckForLeaks() {
+    // Walk the allocation list and print each entry it contains.
+    size_t count = 0;
+
+    // Copy the first 3 leak allocation callstacks onto the stack.
+    // This way if we hit the CHECK() in a release build, the leak
+    // information will be available in mini-dump.
+    const size_t kMaxStackTracesToCopyOntoStack = 3;
+    StackTrace stacktraces[kMaxStackTracesToCopyOntoStack];
+
+    for (LinkNode<LeakTracker<T> >* node = instances()->head();
+         node != instances()->end();
+         node = node->next()) {
+      StackTrace& allocation_stack = node->value()->allocation_stack_;
+
+      if (count < kMaxStackTracesToCopyOntoStack)
+        stacktraces[count] = allocation_stack;
+
+      ++count;
+      if (LOG_IS_ON(ERROR)) {
+        LOG_STREAM(ERROR) << "Leaked " << node << " which was allocated by:";
+        allocation_stack.OutputToStream(&LOG_STREAM(ERROR));
+      }
+    }
+
+    CHECK_EQ(0u, count);
+
+    // Hack to keep |stacktraces| and |count| alive (so compiler
+    // doesn't optimize it out, and it will appear in mini-dumps).
+    if (count == 0x1234) {
+      for (size_t i = 0; i < kMaxStackTracesToCopyOntoStack; ++i)
+        stacktraces[i].Print();
+    }
+  }
+
+  static int NumLiveInstances() {
+    // Walk the allocation list and count how many entries it has.
+    int count = 0;
+    for (LinkNode<LeakTracker<T> >* node = instances()->head();
+         node != instances()->end();
+         node = node->next()) {
+      ++count;
+    }
+    return count;
+  }
+
+ private:
+  // Each specialization of LeakTracker gets its own static storage.
+  static LinkedList<LeakTracker<T> >* instances() {
+    static LinkedList<LeakTracker<T> > list;
+    return &list;
+  }
+
+  StackTrace allocation_stack_;
+};
+
+#endif  // ENABLE_LEAK_TRACKER
+
+}  // namespace debug
+}  // namespace base
+
+#endif  // BASE_DEBUG_LEAK_TRACKER_H_
diff --git a/base/debug/leak_tracker_unittest.cc b/base/debug/leak_tracker_unittest.cc
new file mode 100644
index 0000000..b9ecdcf
--- /dev/null
+++ b/base/debug/leak_tracker_unittest.cc
@@ -0,0 +1,115 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/debug/leak_tracker.h"
+
+#include <memory>
+
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+namespace debug {
+
+namespace {
+
+class ClassA {
+ private:
+  LeakTracker<ClassA> leak_tracker_;
+};
+
+class ClassB {
+ private:
+  LeakTracker<ClassB> leak_tracker_;
+};
+
+#ifndef ENABLE_LEAK_TRACKER
+
+// If leak tracking is disabled, we should do nothing.
+TEST(LeakTrackerTest, NotEnabled) {
+  EXPECT_EQ(-1, LeakTracker<ClassA>::NumLiveInstances());
+  EXPECT_EQ(-1, LeakTracker<ClassB>::NumLiveInstances());
+
+  // Use unique_ptr so compiler doesn't complain about unused variables.
+  std::unique_ptr<ClassA> a1(new ClassA);
+  std::unique_ptr<ClassB> b1(new ClassB);
+  std::unique_ptr<ClassB> b2(new ClassB);
+
+  EXPECT_EQ(-1, LeakTracker<ClassA>::NumLiveInstances());
+  EXPECT_EQ(-1, LeakTracker<ClassB>::NumLiveInstances());
+}
+
+#else
+
+TEST(LeakTrackerTest, Basic) {
+  {
+    ClassA a1;
+
+    EXPECT_EQ(1, LeakTracker<ClassA>::NumLiveInstances());
+    EXPECT_EQ(0, LeakTracker<ClassB>::NumLiveInstances());
+
+    ClassB b1;
+    ClassB b2;
+
+    EXPECT_EQ(1, LeakTracker<ClassA>::NumLiveInstances());
+    EXPECT_EQ(2, LeakTracker<ClassB>::NumLiveInstances());
+
+    std::unique_ptr<ClassA> a2(new ClassA);
+
+    EXPECT_EQ(2, LeakTracker<ClassA>::NumLiveInstances());
+    EXPECT_EQ(2, LeakTracker<ClassB>::NumLiveInstances());
+
+    a2.reset();
+
+    EXPECT_EQ(1, LeakTracker<ClassA>::NumLiveInstances());
+    EXPECT_EQ(2, LeakTracker<ClassB>::NumLiveInstances());
+  }
+
+  EXPECT_EQ(0, LeakTracker<ClassA>::NumLiveInstances());
+  EXPECT_EQ(0, LeakTracker<ClassB>::NumLiveInstances());
+}
+
+// Try some orderings of create/remove to hit different cases in the linked-list
+// assembly.
+TEST(LeakTrackerTest, LinkedList) {
+  EXPECT_EQ(0, LeakTracker<ClassB>::NumLiveInstances());
+
+  std::unique_ptr<ClassA> a1(new ClassA);
+  std::unique_ptr<ClassA> a2(new ClassA);
+  std::unique_ptr<ClassA> a3(new ClassA);
+  std::unique_ptr<ClassA> a4(new ClassA);
+
+  EXPECT_EQ(4, LeakTracker<ClassA>::NumLiveInstances());
+
+  // Remove the head of the list (a1).
+  a1.reset();
+  EXPECT_EQ(3, LeakTracker<ClassA>::NumLiveInstances());
+
+  // Remove the tail of the list (a4).
+  a4.reset();
+  EXPECT_EQ(2, LeakTracker<ClassA>::NumLiveInstances());
+
+  // Append to the new tail of the list (a3).
+  std::unique_ptr<ClassA> a5(new ClassA);
+  EXPECT_EQ(3, LeakTracker<ClassA>::NumLiveInstances());
+
+  a2.reset();
+  a3.reset();
+
+  EXPECT_EQ(1, LeakTracker<ClassA>::NumLiveInstances());
+
+  a5.reset();
+  EXPECT_EQ(0, LeakTracker<ClassA>::NumLiveInstances());
+}
+
+TEST(LeakTrackerTest, NoOpCheckForLeaks) {
+  // There are no live instances of ClassA, so this should do nothing.
+  LeakTracker<ClassA>::CheckForLeaks();
+}
+
+#endif  // ENABLE_LEAK_TRACKER
+
+}  // namespace
+
+}  // namespace debug
+}  // namespace base
diff --git a/base/debug/proc_maps_linux.cc b/base/debug/proc_maps_linux.cc
new file mode 100644
index 0000000..0bb44b4
--- /dev/null
+++ b/base/debug/proc_maps_linux.cc
@@ -0,0 +1,169 @@
+// Copyright (c) 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/debug/proc_maps_linux.h"
+
+#include <fcntl.h>
+#include <stddef.h>
+
+#include "base/files/file_util.h"
+#include "base/files/scoped_file.h"
+#include "base/strings/string_split.h"
+#include "build/build_config.h"
+
+#if defined(OS_LINUX) || defined(OS_ANDROID)
+#include <inttypes.h>
+#endif
+
+#if defined(OS_ANDROID) && !defined(__LP64__)
+// In 32-bit mode, Bionic's inttypes.h defines PRI/SCNxPTR as an
+// unsigned long int, which is incompatible with Bionic's stdint.h
+// defining uintptr_t as an unsigned int:
+// https://code.google.com/p/android/issues/detail?id=57218
+#undef SCNxPTR
+#define SCNxPTR "x"
+#endif
+
+namespace base {
+namespace debug {
+
+// Scans |proc_maps| starting from |pos| returning true if the gate VMA was
+// found, otherwise returns false.
+static bool ContainsGateVMA(std::string* proc_maps, size_t pos) {
+#if defined(ARCH_CPU_ARM_FAMILY)
+  // The gate VMA on ARM kernels is the interrupt vectors page.
+  return proc_maps->find(" [vectors]\n", pos) != std::string::npos;
+#elif defined(ARCH_CPU_X86_64)
+  // The gate VMA on x86 64-bit kernels is the virtual system call page.
+  return proc_maps->find(" [vsyscall]\n", pos) != std::string::npos;
+#else
+  // Otherwise assume there is no gate VMA in which case we shouldn't
+  // get duplicate entires.
+  return false;
+#endif
+}
+
+bool ReadProcMaps(std::string* proc_maps) {
+  // seq_file only writes out a page-sized amount on each call. Refer to header
+  // file for details.
+  const long kReadSize = sysconf(_SC_PAGESIZE);
+
+  base::ScopedFD fd(HANDLE_EINTR(open("/proc/self/maps", O_RDONLY)));
+  if (!fd.is_valid()) {
+    DPLOG(ERROR) << "Couldn't open /proc/self/maps";
+    return false;
+  }
+  proc_maps->clear();
+
+  while (true) {
+    // To avoid a copy, resize |proc_maps| so read() can write directly into it.
+    // Compute |buffer| afterwards since resize() may reallocate.
+    size_t pos = proc_maps->size();
+    proc_maps->resize(pos + kReadSize);
+    void* buffer = &(*proc_maps)[pos];
+
+    ssize_t bytes_read = HANDLE_EINTR(read(fd.get(), buffer, kReadSize));
+    if (bytes_read < 0) {
+      DPLOG(ERROR) << "Couldn't read /proc/self/maps";
+      proc_maps->clear();
+      return false;
+    }
+
+    // ... and don't forget to trim off excess bytes.
+    proc_maps->resize(pos + bytes_read);
+
+    if (bytes_read == 0)
+      break;
+
+    // The gate VMA is handled as a special case after seq_file has finished
+    // iterating through all entries in the virtual memory table.
+    //
+    // Unfortunately, if additional entries are added at this point in time
+    // seq_file gets confused and the next call to read() will return duplicate
+    // entries including the gate VMA again.
+    //
+    // Avoid this by searching for the gate VMA and breaking early.
+    if (ContainsGateVMA(proc_maps, pos))
+      break;
+  }
+
+  return true;
+}
+
+bool ParseProcMaps(const std::string& input,
+                   std::vector<MappedMemoryRegion>* regions_out) {
+  CHECK(regions_out);
+  std::vector<MappedMemoryRegion> regions;
+
+  // This isn't async safe nor terribly efficient, but it doesn't need to be at
+  // this point in time.
+  std::vector<std::string> lines = SplitString(
+      input, "\n", base::TRIM_WHITESPACE, base::SPLIT_WANT_ALL);
+
+  for (size_t i = 0; i < lines.size(); ++i) {
+    // Due to splitting on '\n' the last line should be empty.
+    if (i == lines.size() - 1) {
+      if (!lines[i].empty()) {
+        DLOG(WARNING) << "Last line not empty";
+        return false;
+      }
+      break;
+    }
+
+    MappedMemoryRegion region;
+    const char* line = lines[i].c_str();
+    char permissions[5] = {'\0'};  // Ensure NUL-terminated string.
+    uint8_t dev_major = 0;
+    uint8_t dev_minor = 0;
+    long inode = 0;
+    int path_index = 0;
+
+    // Sample format from man 5 proc:
+    //
+    // address           perms offset  dev   inode   pathname
+    // 08048000-08056000 r-xp 00000000 03:0c 64593   /usr/sbin/gpm
+    //
+    // The final %n term captures the offset in the input string, which is used
+    // to determine the path name. It *does not* increment the return value.
+    // Refer to man 3 sscanf for details.
+    if (sscanf(line, "%" SCNxPTR "-%" SCNxPTR " %4c %llx %hhx:%hhx %ld %n",
+               &region.start, &region.end, permissions, &region.offset,
+               &dev_major, &dev_minor, &inode, &path_index) < 7) {
+      DPLOG(WARNING) << "sscanf failed for line: " << line;
+      return false;
+    }
+
+    region.permissions = 0;
+
+    if (permissions[0] == 'r')
+      region.permissions |= MappedMemoryRegion::READ;
+    else if (permissions[0] != '-')
+      return false;
+
+    if (permissions[1] == 'w')
+      region.permissions |= MappedMemoryRegion::WRITE;
+    else if (permissions[1] != '-')
+      return false;
+
+    if (permissions[2] == 'x')
+      region.permissions |= MappedMemoryRegion::EXECUTE;
+    else if (permissions[2] != '-')
+      return false;
+
+    if (permissions[3] == 'p')
+      region.permissions |= MappedMemoryRegion::PRIVATE;
+    else if (permissions[3] != 's' && permissions[3] != 'S')  // Shared memory.
+      return false;
+
+    // Pushing then assigning saves us a string copy.
+    regions.push_back(region);
+    regions.back().path.assign(line + path_index);
+  }
+
+  regions_out->swap(regions);
+  return true;
+}
+
+}  // namespace debug
+}  // namespace base
diff --git a/base/debug/proc_maps_linux.h b/base/debug/proc_maps_linux.h
new file mode 100644
index 0000000..f5f8a59
--- /dev/null
+++ b/base/debug/proc_maps_linux.h
@@ -0,0 +1,94 @@
+// Copyright (c) 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_DEBUG_PROC_MAPS_LINUX_H_
+#define BASE_DEBUG_PROC_MAPS_LINUX_H_
+
+#include <stdint.h>
+
+#include <string>
+#include <vector>
+
+#include "base/base_export.h"
+
+namespace base {
+namespace debug {
+
+// Describes a region of mapped memory and the path of the file mapped.
+struct MappedMemoryRegion {
+  enum Permission {
+    READ = 1 << 0,
+    WRITE = 1 << 1,
+    EXECUTE = 1 << 2,
+    PRIVATE = 1 << 3,  // If set, region is private, otherwise it is shared.
+  };
+
+  // The address range [start,end) of mapped memory.
+  uintptr_t start;
+  uintptr_t end;
+
+  // Byte offset into |path| of the range mapped into memory.
+  unsigned long long offset;
+
+  // Image base, if this mapping corresponds to an ELF image.
+  uintptr_t base;
+
+  // Bitmask of read/write/execute/private/shared permissions.
+  uint8_t permissions;
+
+  // Name of the file mapped into memory.
+  //
+  // NOTE: path names aren't guaranteed to point at valid files. For example,
+  // "[heap]" and "[stack]" are used to represent the location of the process'
+  // heap and stack, respectively.
+  std::string path;
+};
+
+// Reads the data from /proc/self/maps and stores the result in |proc_maps|.
+// Returns true if successful, false otherwise.
+//
+// There is *NO* guarantee that the resulting contents will be free of
+// duplicates or even contain valid entries by time the method returns.
+//
+//
+// THE GORY DETAILS
+//
+// Did you know it's next-to-impossible to atomically read the whole contents
+// of /proc/<pid>/maps? You would think that if we passed in a large-enough
+// buffer to read() that It Should Just Work(tm), but sadly that's not the case.
+//
+// Linux's procfs uses seq_file [1] for handling iteration, text formatting,
+// and dealing with resulting data that is larger than the size of a page. That
+// last bit is especially important because it means that seq_file will never
+// return more than the size of a page in a single call to read().
+//
+// Unfortunately for a program like Chrome the size of /proc/self/maps is
+// larger than the size of page so we're forced to call read() multiple times.
+// If the virtual memory table changed in any way between calls to read() (e.g.,
+// a different thread calling mprotect()), it can make seq_file generate
+// duplicate entries or skip entries.
+//
+// Even if seq_file was changed to keep flushing the contents of its page-sized
+// buffer to the usermode buffer inside a single call to read(), it has to
+// release its lock on the virtual memory table to handle page faults while
+// copying data to usermode. This puts us in the same situation where the table
+// can change while we're copying data.
+//
+// Alternatives such as fork()-and-suspend-the-parent-while-child-reads were
+// attempted, but they present more subtle problems than it's worth. Depending
+// on your use case your best bet may be to read /proc/<pid>/maps prior to
+// starting other threads.
+//
+// [1] http://kernelnewbies.org/Documents/SeqFileHowTo
+BASE_EXPORT bool ReadProcMaps(std::string* proc_maps);
+
+// Parses /proc/<pid>/maps input data and stores in |regions|. Returns true
+// and updates |regions| if and only if all of |input| was successfully parsed.
+BASE_EXPORT bool ParseProcMaps(const std::string& input,
+                               std::vector<MappedMemoryRegion>* regions);
+
+}  // namespace debug
+}  // namespace base
+
+#endif  // BASE_DEBUG_PROC_MAPS_LINUX_H_
diff --git a/base/debug/proc_maps_linux_unittest.cc b/base/debug/proc_maps_linux_unittest.cc
new file mode 100644
index 0000000..7abf152
--- /dev/null
+++ b/base/debug/proc_maps_linux_unittest.cc
@@ -0,0 +1,328 @@
+// Copyright (c) 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include "base/debug/proc_maps_linux.h"
+#include "base/files/file_path.h"
+#include "base/macros.h"
+#include "base/path_service.h"
+#include "base/strings/stringprintf.h"
+#include "base/threading/platform_thread.h"
+#include "build/build_config.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+namespace debug {
+
+TEST(ProcMapsTest, Empty) {
+  std::vector<MappedMemoryRegion> regions;
+  EXPECT_TRUE(ParseProcMaps("", &regions));
+  EXPECT_EQ(0u, regions.size());
+}
+
+TEST(ProcMapsTest, NoSpaces) {
+  static const char kNoSpaces[] =
+      "00400000-0040b000 r-xp 00002200 fc:00 794418 /bin/cat\n";
+
+  std::vector<MappedMemoryRegion> regions;
+  ASSERT_TRUE(ParseProcMaps(kNoSpaces, &regions));
+  ASSERT_EQ(1u, regions.size());
+
+  EXPECT_EQ(0x00400000u, regions[0].start);
+  EXPECT_EQ(0x0040b000u, regions[0].end);
+  EXPECT_EQ(0x00002200u, regions[0].offset);
+  EXPECT_EQ("/bin/cat", regions[0].path);
+}
+
+TEST(ProcMapsTest, Spaces) {
+  static const char kSpaces[] =
+      "00400000-0040b000 r-xp 00002200 fc:00 794418 /bin/space cat\n";
+
+  std::vector<MappedMemoryRegion> regions;
+  ASSERT_TRUE(ParseProcMaps(kSpaces, &regions));
+  ASSERT_EQ(1u, regions.size());
+
+  EXPECT_EQ(0x00400000u, regions[0].start);
+  EXPECT_EQ(0x0040b000u, regions[0].end);
+  EXPECT_EQ(0x00002200u, regions[0].offset);
+  EXPECT_EQ("/bin/space cat", regions[0].path);
+}
+
+TEST(ProcMapsTest, NoNewline) {
+  static const char kNoSpaces[] =
+      "00400000-0040b000 r-xp 00002200 fc:00 794418 /bin/cat";
+
+  std::vector<MappedMemoryRegion> regions;
+  ASSERT_FALSE(ParseProcMaps(kNoSpaces, &regions));
+}
+
+TEST(ProcMapsTest, NoPath) {
+  static const char kNoPath[] =
+      "00400000-0040b000 rw-p 00000000 00:00 0 \n";
+
+  std::vector<MappedMemoryRegion> regions;
+  ASSERT_TRUE(ParseProcMaps(kNoPath, &regions));
+  ASSERT_EQ(1u, regions.size());
+
+  EXPECT_EQ(0x00400000u, regions[0].start);
+  EXPECT_EQ(0x0040b000u, regions[0].end);
+  EXPECT_EQ(0x00000000u, regions[0].offset);
+  EXPECT_EQ("", regions[0].path);
+}
+
+TEST(ProcMapsTest, Heap) {
+  static const char kHeap[] =
+      "022ac000-022cd000 rw-p 00000000 00:00 0 [heap]\n";
+
+  std::vector<MappedMemoryRegion> regions;
+  ASSERT_TRUE(ParseProcMaps(kHeap, &regions));
+  ASSERT_EQ(1u, regions.size());
+
+  EXPECT_EQ(0x022ac000u, regions[0].start);
+  EXPECT_EQ(0x022cd000u, regions[0].end);
+  EXPECT_EQ(0x00000000u, regions[0].offset);
+  EXPECT_EQ("[heap]", regions[0].path);
+}
+
+#if defined(ARCH_CPU_32_BITS)
+TEST(ProcMapsTest, Stack32) {
+  static const char kStack[] =
+      "beb04000-beb25000 rw-p 00000000 00:00 0 [stack]\n";
+
+  std::vector<MappedMemoryRegion> regions;
+  ASSERT_TRUE(ParseProcMaps(kStack, &regions));
+  ASSERT_EQ(1u, regions.size());
+
+  EXPECT_EQ(0xbeb04000u, regions[0].start);
+  EXPECT_EQ(0xbeb25000u, regions[0].end);
+  EXPECT_EQ(0x00000000u, regions[0].offset);
+  EXPECT_EQ("[stack]", regions[0].path);
+}
+#elif defined(ARCH_CPU_64_BITS)
+TEST(ProcMapsTest, Stack64) {
+  static const char kStack[] =
+      "7fff69c5b000-7fff69c7d000 rw-p 00000000 00:00 0 [stack]\n";
+
+  std::vector<MappedMemoryRegion> regions;
+  ASSERT_TRUE(ParseProcMaps(kStack, &regions));
+  ASSERT_EQ(1u, regions.size());
+
+  EXPECT_EQ(0x7fff69c5b000u, regions[0].start);
+  EXPECT_EQ(0x7fff69c7d000u, regions[0].end);
+  EXPECT_EQ(0x00000000u, regions[0].offset);
+  EXPECT_EQ("[stack]", regions[0].path);
+}
+#endif
+
+TEST(ProcMapsTest, Multiple) {
+  static const char kMultiple[] =
+      "00400000-0040b000 r-xp 00000000 fc:00 794418 /bin/cat\n"
+      "0060a000-0060b000 r--p 0000a000 fc:00 794418 /bin/cat\n"
+      "0060b000-0060c000 rw-p 0000b000 fc:00 794418 /bin/cat\n";
+
+  std::vector<MappedMemoryRegion> regions;
+  ASSERT_TRUE(ParseProcMaps(kMultiple, &regions));
+  ASSERT_EQ(3u, regions.size());
+
+  EXPECT_EQ(0x00400000u, regions[0].start);
+  EXPECT_EQ(0x0040b000u, regions[0].end);
+  EXPECT_EQ(0x00000000u, regions[0].offset);
+  EXPECT_EQ("/bin/cat", regions[0].path);
+
+  EXPECT_EQ(0x0060a000u, regions[1].start);
+  EXPECT_EQ(0x0060b000u, regions[1].end);
+  EXPECT_EQ(0x0000a000u, regions[1].offset);
+  EXPECT_EQ("/bin/cat", regions[1].path);
+
+  EXPECT_EQ(0x0060b000u, regions[2].start);
+  EXPECT_EQ(0x0060c000u, regions[2].end);
+  EXPECT_EQ(0x0000b000u, regions[2].offset);
+  EXPECT_EQ("/bin/cat", regions[2].path);
+}
+
+TEST(ProcMapsTest, Permissions) {
+  static struct {
+    const char* input;
+    uint8_t permissions;
+  } kTestCases[] = {
+    {"00400000-0040b000 ---s 00000000 fc:00 794418 /bin/cat\n", 0},
+    {"00400000-0040b000 ---S 00000000 fc:00 794418 /bin/cat\n", 0},
+    {"00400000-0040b000 r--s 00000000 fc:00 794418 /bin/cat\n",
+     MappedMemoryRegion::READ},
+    {"00400000-0040b000 -w-s 00000000 fc:00 794418 /bin/cat\n",
+     MappedMemoryRegion::WRITE},
+    {"00400000-0040b000 --xs 00000000 fc:00 794418 /bin/cat\n",
+     MappedMemoryRegion::EXECUTE},
+    {"00400000-0040b000 rwxs 00000000 fc:00 794418 /bin/cat\n",
+     MappedMemoryRegion::READ | MappedMemoryRegion::WRITE |
+         MappedMemoryRegion::EXECUTE},
+    {"00400000-0040b000 ---p 00000000 fc:00 794418 /bin/cat\n",
+     MappedMemoryRegion::PRIVATE},
+    {"00400000-0040b000 r--p 00000000 fc:00 794418 /bin/cat\n",
+     MappedMemoryRegion::READ | MappedMemoryRegion::PRIVATE},
+    {"00400000-0040b000 -w-p 00000000 fc:00 794418 /bin/cat\n",
+     MappedMemoryRegion::WRITE | MappedMemoryRegion::PRIVATE},
+    {"00400000-0040b000 --xp 00000000 fc:00 794418 /bin/cat\n",
+     MappedMemoryRegion::EXECUTE | MappedMemoryRegion::PRIVATE},
+    {"00400000-0040b000 rwxp 00000000 fc:00 794418 /bin/cat\n",
+     MappedMemoryRegion::READ | MappedMemoryRegion::WRITE |
+         MappedMemoryRegion::EXECUTE | MappedMemoryRegion::PRIVATE},
+  };
+
+  for (size_t i = 0; i < arraysize(kTestCases); ++i) {
+    SCOPED_TRACE(
+        base::StringPrintf("kTestCases[%zu] = %s", i, kTestCases[i].input));
+
+    std::vector<MappedMemoryRegion> regions;
+    EXPECT_TRUE(ParseProcMaps(kTestCases[i].input, &regions));
+    EXPECT_EQ(1u, regions.size());
+    if (regions.empty())
+      continue;
+    EXPECT_EQ(kTestCases[i].permissions, regions[0].permissions);
+  }
+}
+
+#if defined(ADDRESS_SANITIZER)
+// AddressSanitizer may move local variables to a dedicated "fake stack" which
+// is outside the stack region listed in /proc/self/maps. We disable ASan
+// instrumentation for this function to force the variable to be local.
+__attribute__((no_sanitize_address))
+#endif
+void CheckProcMapsRegions(const std::vector<MappedMemoryRegion> &regions) {
+  // We should be able to find both the current executable as well as the stack
+  // mapped into memory. Use the address of |exe_path| as a way of finding the
+  // stack.
+  FilePath exe_path;
+  EXPECT_TRUE(PathService::Get(FILE_EXE, &exe_path));
+  uintptr_t address = reinterpret_cast<uintptr_t>(&exe_path);
+  bool found_exe = false;
+  bool found_stack = false;
+  bool found_address = false;
+
+  for (size_t i = 0; i < regions.size(); ++i) {
+    if (regions[i].path == exe_path.value()) {
+      // It's OK to find the executable mapped multiple times as there'll be
+      // multiple sections (e.g., text, data).
+      found_exe = true;
+    }
+
+    if (regions[i].path == "[stack]") {
+// On Android the test is run on a background thread, since [stack] is for
+// the main thread, we cannot test this.
+#if !defined(OS_ANDROID)
+      EXPECT_GE(address, regions[i].start);
+      EXPECT_LT(address, regions[i].end);
+#endif
+      EXPECT_TRUE(regions[i].permissions & MappedMemoryRegion::READ);
+      EXPECT_TRUE(regions[i].permissions & MappedMemoryRegion::WRITE);
+      EXPECT_FALSE(regions[i].permissions & MappedMemoryRegion::EXECUTE);
+      EXPECT_TRUE(regions[i].permissions & MappedMemoryRegion::PRIVATE);
+      EXPECT_FALSE(found_stack) << "Found duplicate stacks";
+      found_stack = true;
+    }
+
+    if (address >= regions[i].start && address < regions[i].end) {
+      EXPECT_FALSE(found_address) << "Found same address in multiple regions";
+      found_address = true;
+    }
+  }
+
+  EXPECT_TRUE(found_exe);
+  EXPECT_TRUE(found_stack);
+  EXPECT_TRUE(found_address);
+}
+
+TEST(ProcMapsTest, ReadProcMaps) {
+  std::string proc_maps;
+  ASSERT_TRUE(ReadProcMaps(&proc_maps));
+
+  std::vector<MappedMemoryRegion> regions;
+  ASSERT_TRUE(ParseProcMaps(proc_maps, &regions));
+  ASSERT_FALSE(regions.empty());
+
+  CheckProcMapsRegions(regions);
+}
+
+TEST(ProcMapsTest, ReadProcMapsNonEmptyString) {
+  std::string old_string("I forgot to clear the string");
+  std::string proc_maps(old_string);
+  ASSERT_TRUE(ReadProcMaps(&proc_maps));
+  EXPECT_EQ(std::string::npos, proc_maps.find(old_string));
+}
+
+TEST(ProcMapsTest, MissingFields) {
+  static const char* const kTestCases[] = {
+    "00400000\n",                               // Missing end + beyond.
+    "00400000-0040b000\n",                      // Missing perms + beyond.
+    "00400000-0040b000 r-xp\n",                 // Missing offset + beyond.
+    "00400000-0040b000 r-xp 00000000\n",        // Missing device + beyond.
+    "00400000-0040b000 r-xp 00000000 fc:00\n",  // Missing inode + beyond.
+    "00400000-0040b000 00000000 fc:00 794418 /bin/cat\n",  // Missing perms.
+    "00400000-0040b000 r-xp fc:00 794418 /bin/cat\n",      // Missing offset.
+    "00400000-0040b000 r-xp 00000000 fc:00 /bin/cat\n",    // Missing inode.
+    "00400000 r-xp 00000000 fc:00 794418 /bin/cat\n",      // Missing end.
+    "-0040b000 r-xp 00000000 fc:00 794418 /bin/cat\n",     // Missing start.
+    "00400000-0040b000 r-xp 00000000 794418 /bin/cat\n",   // Missing device.
+  };
+
+  for (size_t i = 0; i < arraysize(kTestCases); ++i) {
+    SCOPED_TRACE(base::StringPrintf("kTestCases[%zu] = %s", i, kTestCases[i]));
+    std::vector<MappedMemoryRegion> regions;
+    EXPECT_FALSE(ParseProcMaps(kTestCases[i], &regions));
+  }
+}
+
+TEST(ProcMapsTest, InvalidInput) {
+  static const char* const kTestCases[] = {
+    "thisisal-0040b000 rwxp 00000000 fc:00 794418 /bin/cat\n",
+    "0040000d-linvalid rwxp 00000000 fc:00 794418 /bin/cat\n",
+    "00400000-0040b000 inpu 00000000 fc:00 794418 /bin/cat\n",
+    "00400000-0040b000 rwxp tforproc fc:00 794418 /bin/cat\n",
+    "00400000-0040b000 rwxp 00000000 ma:ps 794418 /bin/cat\n",
+    "00400000-0040b000 rwxp 00000000 fc:00 parse! /bin/cat\n",
+  };
+
+  for (size_t i = 0; i < arraysize(kTestCases); ++i) {
+    SCOPED_TRACE(base::StringPrintf("kTestCases[%zu] = %s", i, kTestCases[i]));
+    std::vector<MappedMemoryRegion> regions;
+    EXPECT_FALSE(ParseProcMaps(kTestCases[i], &regions));
+  }
+}
+
+TEST(ProcMapsTest, ParseProcMapsEmptyString) {
+  std::vector<MappedMemoryRegion> regions;
+  EXPECT_TRUE(ParseProcMaps("", &regions));
+  EXPECT_EQ(0ULL, regions.size());
+}
+
+// Testing a couple of remotely possible weird things in the input:
+// - Line ending with \r\n or \n\r.
+// - File name contains quotes.
+// - File name has whitespaces.
+TEST(ProcMapsTest, ParseProcMapsWeirdCorrectInput) {
+  std::vector<MappedMemoryRegion> regions;
+  const std::string kContents =
+    "00400000-0040b000 r-xp 00000000 fc:00 2106562 "
+      "               /bin/cat\r\n"
+    "7f53b7dad000-7f53b7f62000 r-xp 00000000 fc:00 263011 "
+      "       /lib/x86_64-linux-gnu/libc-2.15.so\n\r"
+    "7f53b816d000-7f53b818f000 r-xp 00000000 fc:00 264284 "
+      "        /lib/x86_64-linux-gnu/ld-2.15.so\n"
+    "7fff9c7ff000-7fff9c800000 r-xp 00000000 00:00 0 "
+      "               \"vd so\"\n"
+    "ffffffffff600000-ffffffffff601000 r-xp 00000000 00:00 0 "
+      "               [vsys call]\n";
+  EXPECT_TRUE(ParseProcMaps(kContents, &regions));
+  EXPECT_EQ(5ULL, regions.size());
+  EXPECT_EQ("/bin/cat", regions[0].path);
+  EXPECT_EQ("/lib/x86_64-linux-gnu/libc-2.15.so", regions[1].path);
+  EXPECT_EQ("/lib/x86_64-linux-gnu/ld-2.15.so", regions[2].path);
+  EXPECT_EQ("\"vd so\"", regions[3].path);
+  EXPECT_EQ("[vsys call]", regions[4].path);
+}
+
+}  // namespace debug
+}  // namespace base
diff --git a/base/debug/profiler.cc b/base/debug/profiler.cc
new file mode 100644
index 0000000..1ee9483
--- /dev/null
+++ b/base/debug/profiler.cc
@@ -0,0 +1,188 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/debug/profiler.h"
+
+#include <string>
+
+#include "base/debug/debugging_buildflags.h"
+#include "base/process/process_handle.h"
+#include "base/strings/string_number_conversions.h"
+#include "base/strings/string_util.h"
+#include "build/build_config.h"
+
+#if defined(OS_WIN)
+#include "base/win/current_module.h"
+#include "base/win/pe_image.h"
+#endif  // defined(OS_WIN)
+
+// TODO(peria): Enable profiling on Windows.
+#if BUILDFLAG(ENABLE_PROFILING) && !defined(NO_TCMALLOC) && !defined(OS_WIN)
+#include "third_party/tcmalloc/chromium/src/gperftools/profiler.h"
+#endif
+
+namespace base {
+namespace debug {
+
+// TODO(peria): Enable profiling on Windows.
+#if BUILDFLAG(ENABLE_PROFILING) && !defined(NO_TCMALLOC) && !defined(OS_WIN)
+
+static int profile_count = 0;
+
+void StartProfiling(const std::string& name) {
+  ++profile_count;
+  std::string full_name(name);
+  std::string pid = IntToString(GetCurrentProcId());
+  std::string count = IntToString(profile_count);
+  ReplaceSubstringsAfterOffset(&full_name, 0, "{pid}", pid);
+  ReplaceSubstringsAfterOffset(&full_name, 0, "{count}", count);
+  ProfilerStart(full_name.c_str());
+}
+
+void StopProfiling() {
+  ProfilerFlush();
+  ProfilerStop();
+}
+
+void FlushProfiling() {
+  ProfilerFlush();
+}
+
+bool BeingProfiled() {
+  return ProfilingIsEnabledForAllThreads();
+}
+
+void RestartProfilingAfterFork() {
+  ProfilerRegisterThread();
+}
+
+bool IsProfilingSupported() {
+  return true;
+}
+
+#else
+
+void StartProfiling(const std::string& name) {
+}
+
+void StopProfiling() {
+}
+
+void FlushProfiling() {
+}
+
+bool BeingProfiled() {
+  return false;
+}
+
+void RestartProfilingAfterFork() {
+}
+
+bool IsProfilingSupported() {
+  return false;
+}
+
+#endif
+
+#if !defined(OS_WIN)
+
+ReturnAddressLocationResolver GetProfilerReturnAddrResolutionFunc() {
+  return nullptr;
+}
+
+DynamicFunctionEntryHook GetProfilerDynamicFunctionEntryHookFunc() {
+  return nullptr;
+}
+
+AddDynamicSymbol GetProfilerAddDynamicSymbolFunc() {
+  return nullptr;
+}
+
+MoveDynamicSymbol GetProfilerMoveDynamicSymbolFunc() {
+  return nullptr;
+}
+
+#else  // defined(OS_WIN)
+
+namespace {
+
+struct FunctionSearchContext {
+  const char* name;
+  FARPROC function;
+};
+
+// Callback function to PEImage::EnumImportChunks.
+bool FindResolutionFunctionInImports(
+    const base::win::PEImage &image, const char* module_name,
+    PIMAGE_THUNK_DATA unused_name_table, PIMAGE_THUNK_DATA import_address_table,
+    PVOID cookie) {
+  FunctionSearchContext* context =
+      reinterpret_cast<FunctionSearchContext*>(cookie);
+
+  DCHECK(context);
+  DCHECK(!context->function);
+
+  // Our import address table contains pointers to the functions we import
+  // at this point. Let's retrieve the first such function and use it to
+  // find the module this import was resolved to by the loader.
+  const wchar_t* function_in_module =
+      reinterpret_cast<const wchar_t*>(import_address_table->u1.Function);
+
+  // Retrieve the module by a function in the module.
+  const DWORD kFlags = GET_MODULE_HANDLE_EX_FLAG_FROM_ADDRESS |
+                       GET_MODULE_HANDLE_EX_FLAG_UNCHANGED_REFCOUNT;
+  HMODULE module = NULL;
+  if (!::GetModuleHandleEx(kFlags, function_in_module, &module)) {
+    // This can happen if someone IAT patches us to a thunk.
+    return true;
+  }
+
+  // See whether this module exports the function we're looking for.
+  FARPROC exported_func = ::GetProcAddress(module, context->name);
+  if (exported_func != NULL) {
+    // We found it, return the function and terminate the enumeration.
+    context->function = exported_func;
+    return false;
+  }
+
+  // Keep going.
+  return true;
+}
+
+template <typename FunctionType>
+FunctionType FindFunctionInImports(const char* function_name) {
+  base::win::PEImage image(CURRENT_MODULE());
+
+  FunctionSearchContext ctx = { function_name, NULL };
+  image.EnumImportChunks(FindResolutionFunctionInImports, &ctx);
+
+  return reinterpret_cast<FunctionType>(ctx.function);
+}
+
+}  // namespace
+
+ReturnAddressLocationResolver GetProfilerReturnAddrResolutionFunc() {
+  return FindFunctionInImports<ReturnAddressLocationResolver>(
+      "ResolveReturnAddressLocation");
+}
+
+DynamicFunctionEntryHook GetProfilerDynamicFunctionEntryHookFunc() {
+  return FindFunctionInImports<DynamicFunctionEntryHook>(
+      "OnDynamicFunctionEntry");
+}
+
+AddDynamicSymbol GetProfilerAddDynamicSymbolFunc() {
+  return FindFunctionInImports<AddDynamicSymbol>(
+      "AddDynamicSymbol");
+}
+
+MoveDynamicSymbol GetProfilerMoveDynamicSymbolFunc() {
+  return FindFunctionInImports<MoveDynamicSymbol>(
+      "MoveDynamicSymbol");
+}
+
+#endif  // defined(OS_WIN)
+
+}  // namespace debug
+}  // namespace base
diff --git a/base/debug/profiler.h b/base/debug/profiler.h
new file mode 100644
index 0000000..f706a1a
--- /dev/null
+++ b/base/debug/profiler.h
@@ -0,0 +1,91 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_DEBUG_PROFILER_H_
+#define BASE_DEBUG_PROFILER_H_
+
+#include <stddef.h>
+
+#include <string>
+
+#include "base/base_export.h"
+
+// The Profiler functions allow usage of the underlying sampling based
+// profiler. If the application has not been built with the necessary
+// flags (-DENABLE_PROFILING and not -DNO_TCMALLOC) then these functions
+// are noops.
+namespace base {
+namespace debug {
+
+// Start profiling with the supplied name.
+// {pid} will be replaced by the process' pid and {count} will be replaced
+// by the count of the profile run (starts at 1 with each process).
+BASE_EXPORT void StartProfiling(const std::string& name);
+
+// Stop profiling and write out data.
+BASE_EXPORT void StopProfiling();
+
+// Force data to be written to file.
+BASE_EXPORT void FlushProfiling();
+
+// Returns true if process is being profiled.
+BASE_EXPORT bool BeingProfiled();
+
+// Reset profiling after a fork, which disables timers.
+BASE_EXPORT void RestartProfilingAfterFork();
+
+// Returns true iff this executable supports profiling.
+BASE_EXPORT bool IsProfilingSupported();
+
+// There's a class of profilers that use "return address swizzling" to get a
+// hook on function exits. This class of profilers uses some form of entry hook,
+// like e.g. binary instrumentation, or a compiler flag, that calls a hook each
+// time a function is invoked. The hook then switches the return address on the
+// stack for the address of an exit hook function, and pushes the original
+// return address to a shadow stack of some type. When in due course the CPU
+// executes a return to the exit hook, the exit hook will do whatever work it
+// does on function exit, then arrange to return to the original return address.
+// This class of profiler does not play well with programs that look at the
+// return address, as does e.g. V8. V8 uses the return address to certain
+// runtime functions to find the JIT code that called it, and from there finds
+// the V8 data structures associated to the JS function involved.
+// A return address resolution function is used to fix this. It allows such
+// programs to resolve a location on stack where a return address originally
+// resided, to the shadow stack location where the profiler stashed it.
+typedef uintptr_t (*ReturnAddressLocationResolver)(
+    uintptr_t return_addr_location);
+
+// This type declaration must match V8's FunctionEntryHook.
+typedef void (*DynamicFunctionEntryHook)(uintptr_t function,
+                                         uintptr_t return_addr_location);
+
+// The functions below here are to support profiling V8-generated code.
+// V8 has provisions for generating a call to an entry hook for newly generated
+// JIT code, and it can push symbol information on code generation and advise
+// when the garbage collector moves code. The functions declarations below here
+// make glue between V8's facilities and a profiler.
+
+// This type declaration must match V8's FunctionEntryHook.
+typedef void (*DynamicFunctionEntryHook)(uintptr_t function,
+                                         uintptr_t return_addr_location);
+
+typedef void (*AddDynamicSymbol)(const void* address,
+                                 size_t length,
+                                 const char* name,
+                                 size_t name_len);
+typedef void (*MoveDynamicSymbol)(const void* address, const void* new_address);
+
+
+// If this binary is instrumented and the instrumentation supplies a function
+// for each of those purposes, find and return the function in question.
+// Otherwise returns NULL.
+BASE_EXPORT ReturnAddressLocationResolver GetProfilerReturnAddrResolutionFunc();
+BASE_EXPORT DynamicFunctionEntryHook GetProfilerDynamicFunctionEntryHookFunc();
+BASE_EXPORT AddDynamicSymbol GetProfilerAddDynamicSymbolFunc();
+BASE_EXPORT MoveDynamicSymbol GetProfilerMoveDynamicSymbolFunc();
+
+}  // namespace debug
+}  // namespace base
+
+#endif  // BASE_DEBUG_PROFILER_H_
diff --git a/base/debug/stack_trace.cc b/base/debug/stack_trace.cc
new file mode 100644
index 0000000..7715121
--- /dev/null
+++ b/base/debug/stack_trace.cc
@@ -0,0 +1,277 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/debug/stack_trace.h"
+
+#include <string.h>
+
+#include <algorithm>
+#include <sstream>
+
+#include "base/logging.h"
+#include "base/macros.h"
+
+#if BUILDFLAG(CAN_UNWIND_WITH_FRAME_POINTERS)
+
+#if defined(OS_LINUX) || defined(OS_ANDROID)
+#include <pthread.h>
+#include "base/process/process_handle.h"
+#include "base/threading/platform_thread.h"
+#endif
+
+#if defined(OS_MACOSX)
+#include <pthread.h>
+#endif
+
+#if defined(OS_LINUX) && defined(__GLIBC__)
+extern "C" void* __libc_stack_end;
+#endif
+
+#endif  // BUILDFLAG(CAN_UNWIND_WITH_FRAME_POINTERS)
+
+namespace base {
+namespace debug {
+
+namespace {
+
+#if BUILDFLAG(CAN_UNWIND_WITH_FRAME_POINTERS)
+
+#if defined(__arm__) && defined(__GNUC__) && !defined(__clang__)
+// GCC and LLVM generate slightly different frames on ARM, see
+// https://llvm.org/bugs/show_bug.cgi?id=18505 - LLVM generates
+// x86-compatible frame, while GCC needs adjustment.
+constexpr size_t kStackFrameAdjustment = sizeof(uintptr_t);
+#else
+constexpr size_t kStackFrameAdjustment = 0;
+#endif
+
+uintptr_t GetNextStackFrame(uintptr_t fp) {
+  return reinterpret_cast<const uintptr_t*>(fp)[0] - kStackFrameAdjustment;
+}
+
+uintptr_t GetStackFramePC(uintptr_t fp) {
+  return reinterpret_cast<const uintptr_t*>(fp)[1];
+}
+
+bool IsStackFrameValid(uintptr_t fp, uintptr_t prev_fp, uintptr_t stack_end) {
+  // With the stack growing downwards, older stack frame must be
+  // at a greater address that the current one.
+  if (fp <= prev_fp) return false;
+
+  // Assume huge stack frames are bogus.
+  if (fp - prev_fp > 100000) return false;
+
+  // Check alignment.
+  if (fp & (sizeof(uintptr_t) - 1)) return false;
+
+  if (stack_end) {
+    // Both fp[0] and fp[1] must be within the stack.
+    if (fp > stack_end - 2 * sizeof(uintptr_t)) return false;
+
+    // Additional check to filter out false positives.
+    if (GetStackFramePC(fp) < 32768) return false;
+  }
+
+  return true;
+};
+
+// ScanStackForNextFrame() scans the stack for a valid frame to allow unwinding
+// past system libraries. Only supported on Linux where system libraries are
+// usually in the middle of the trace:
+//
+//   TraceStackFramePointers
+//   <more frames from Chrome>
+//   base::WorkSourceDispatch   <-- unwinding stops (next frame is invalid),
+//   g_main_context_dispatch        ScanStackForNextFrame() is called
+//   <more frames from glib>
+//   g_main_context_iteration
+//   base::MessagePumpGlib::Run <-- ScanStackForNextFrame() finds valid frame,
+//   base::RunLoop::Run             unwinding resumes
+//   <more frames from Chrome>
+//   __libc_start_main
+//
+// For stack scanning to be efficient it's very important for the thread to
+// be started by Chrome. In that case we naturally terminate unwinding once
+// we reach the origin of the stack (i.e. GetStackEnd()). If the thread is
+// not started by Chrome (e.g. Android's main thread), then we end up always
+// scanning area at the origin of the stack, wasting time and not finding any
+// frames (since Android libraries don't have frame pointers).
+//
+// ScanStackForNextFrame() returns 0 if it couldn't find a valid frame
+// (or if stack scanning is not supported on the current platform).
+uintptr_t ScanStackForNextFrame(uintptr_t fp, uintptr_t stack_end) {
+#if defined(OS_LINUX)
+  // Enough to resume almost all prematurely terminated traces.
+  constexpr size_t kMaxStackScanArea = 8192;
+
+  if (!stack_end) {
+    // Too dangerous to scan without knowing where the stack ends.
+    return 0;
+  }
+
+  fp += sizeof(uintptr_t);  // current frame is known to be invalid
+  uintptr_t last_fp_to_scan = std::min(fp + kMaxStackScanArea, stack_end) -
+                                  sizeof(uintptr_t);
+  for (;fp <= last_fp_to_scan; fp += sizeof(uintptr_t)) {
+    uintptr_t next_fp = GetNextStackFrame(fp);
+    if (IsStackFrameValid(next_fp, fp, stack_end)) {
+      // Check two frames deep. Since stack frame is just a pointer to
+      // a higher address on the stack, it's relatively easy to find
+      // something that looks like one. However two linked frames are
+      // far less likely to be bogus.
+      uintptr_t next2_fp = GetNextStackFrame(next_fp);
+      if (IsStackFrameValid(next2_fp, next_fp, stack_end)) {
+        return fp;
+      }
+    }
+  }
+#endif  // defined(OS_LINUX)
+
+  return 0;
+}
+
+// Links stack frame |fp| to |parent_fp|, so that during stack unwinding
+// TraceStackFramePointers() visits |parent_fp| after visiting |fp|.
+// Both frame pointers must come from __builtin_frame_address().
+// Returns previous stack frame |fp| was linked to.
+void* LinkStackFrames(void* fpp, void* parent_fp) {
+  uintptr_t fp = reinterpret_cast<uintptr_t>(fpp) - kStackFrameAdjustment;
+  void* prev_parent_fp = reinterpret_cast<void**>(fp)[0];
+  reinterpret_cast<void**>(fp)[0] = parent_fp;
+  return prev_parent_fp;
+}
+
+#endif  // BUILDFLAG(CAN_UNWIND_WITH_FRAME_POINTERS)
+
+}  // namespace
+
+#if BUILDFLAG(CAN_UNWIND_WITH_FRAME_POINTERS)
+uintptr_t GetStackEnd() {
+#if defined(OS_ANDROID)
+  // Bionic reads proc/maps on every call to pthread_getattr_np() when called
+  // from the main thread. So we need to cache end of stack in that case to get
+  // acceptable performance.
+  // For all other threads pthread_getattr_np() is fast enough as it just reads
+  // values from its pthread_t argument.
+  static uintptr_t main_stack_end = 0;
+
+  bool is_main_thread = GetCurrentProcId() == PlatformThread::CurrentId();
+  if (is_main_thread && main_stack_end) {
+    return main_stack_end;
+  }
+
+  uintptr_t stack_begin = 0;
+  size_t stack_size = 0;
+  pthread_attr_t attributes;
+  int error = pthread_getattr_np(pthread_self(), &attributes);
+  if (!error) {
+    error = pthread_attr_getstack(
+        &attributes, reinterpret_cast<void**>(&stack_begin), &stack_size);
+    pthread_attr_destroy(&attributes);
+  }
+  DCHECK(!error);
+
+  uintptr_t stack_end = stack_begin + stack_size;
+  if (is_main_thread) {
+    main_stack_end = stack_end;
+  }
+  return stack_end;  // 0 in case of error
+
+#elif defined(OS_LINUX) && defined(__GLIBC__)
+
+  if (GetCurrentProcId() == PlatformThread::CurrentId()) {
+    // For the main thread we have a shortcut.
+    return reinterpret_cast<uintptr_t>(__libc_stack_end);
+  }
+
+// No easy way to get end of the stack for non-main threads,
+// see crbug.com/617730.
+#elif defined(OS_MACOSX)
+  return reinterpret_cast<uintptr_t>(pthread_get_stackaddr_np(pthread_self()));
+#endif
+
+  // Don't know how to get end of the stack.
+  return 0;
+}
+#endif  // BUILDFLAG(CAN_UNWIND_WITH_FRAME_POINTERS)
+
+StackTrace::StackTrace() : StackTrace(arraysize(trace_)) {}
+
+StackTrace::StackTrace(const void* const* trace, size_t count) {
+  count = std::min(count, arraysize(trace_));
+  if (count)
+    memcpy(trace_, trace, count * sizeof(trace_[0]));
+  count_ = count;
+}
+
+const void *const *StackTrace::Addresses(size_t* count) const {
+  *count = count_;
+  if (count_)
+    return trace_;
+  return nullptr;
+}
+
+std::string StackTrace::ToString() const {
+  std::stringstream stream;
+#if !defined(__UCLIBC__) && !defined(_AIX)
+  OutputToStream(&stream);
+#endif
+  return stream.str();
+}
+
+#if BUILDFLAG(CAN_UNWIND_WITH_FRAME_POINTERS)
+
+size_t TraceStackFramePointers(const void** out_trace,
+                               size_t max_depth,
+                               size_t skip_initial) {
+  // Usage of __builtin_frame_address() enables frame pointers in this
+  // function even if they are not enabled globally. So 'fp' will always
+  // be valid.
+  uintptr_t fp = reinterpret_cast<uintptr_t>(__builtin_frame_address(0)) -
+                    kStackFrameAdjustment;
+
+  uintptr_t stack_end = GetStackEnd();
+
+  size_t depth = 0;
+  while (depth < max_depth) {
+    if (skip_initial != 0) {
+      skip_initial--;
+    } else {
+      out_trace[depth++] = reinterpret_cast<const void*>(GetStackFramePC(fp));
+    }
+
+    uintptr_t next_fp = GetNextStackFrame(fp);
+    if (IsStackFrameValid(next_fp, fp, stack_end)) {
+      fp = next_fp;
+      continue;
+    }
+
+    next_fp = ScanStackForNextFrame(fp, stack_end);
+    if (next_fp) {
+      fp = next_fp;
+      continue;
+    }
+
+    // Failed to find next frame.
+    break;
+  }
+
+  return depth;
+}
+
+ScopedStackFrameLinker::ScopedStackFrameLinker(void* fp, void* parent_fp)
+    : fp_(fp),
+      parent_fp_(parent_fp),
+      original_parent_fp_(LinkStackFrames(fp, parent_fp)) {}
+
+ScopedStackFrameLinker::~ScopedStackFrameLinker() {
+  void* previous_parent_fp = LinkStackFrames(fp_, original_parent_fp_);
+  CHECK_EQ(parent_fp_, previous_parent_fp)
+      << "Stack frame's parent pointer has changed!";
+}
+
+#endif  // BUILDFLAG(CAN_UNWIND_WITH_FRAME_POINTERS)
+
+}  // namespace debug
+}  // namespace base
diff --git a/base/debug/stack_trace.h b/base/debug/stack_trace.h
new file mode 100644
index 0000000..81e6672
--- /dev/null
+++ b/base/debug/stack_trace.h
@@ -0,0 +1,197 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_DEBUG_STACK_TRACE_H_
+#define BASE_DEBUG_STACK_TRACE_H_
+
+#include <stddef.h>
+
+#include <iosfwd>
+#include <string>
+
+#include "base/base_export.h"
+#include "base/debug/debugging_buildflags.h"
+#include "base/macros.h"
+#include "build/build_config.h"
+
+#if defined(OS_POSIX)
+#include <unistd.h>
+#endif
+
+#if defined(OS_WIN)
+struct _EXCEPTION_POINTERS;
+struct _CONTEXT;
+#endif
+
+namespace base {
+namespace debug {
+
+// Enables stack dump to console output on exception and signals.
+// When enabled, the process will quit immediately. This is meant to be used in
+// unit_tests only! This is not thread-safe: only call from main thread.
+// In sandboxed processes, this has to be called before the sandbox is turned
+// on.
+// Calling this function on Linux opens /proc/self/maps and caches its
+// contents. In non-official builds, this function also opens the object files
+// that are loaded in memory and caches their file descriptors (this cannot be
+// done in official builds because it has security implications).
+BASE_EXPORT bool EnableInProcessStackDumping();
+
+#if defined(OS_POSIX)
+BASE_EXPORT void SetStackDumpFirstChanceCallback(bool (*handler)(int,
+                                                                 void*,
+                                                                 void*));
+#endif
+
+// Returns end of the stack, or 0 if we couldn't get it.
+#if BUILDFLAG(CAN_UNWIND_WITH_FRAME_POINTERS)
+BASE_EXPORT uintptr_t GetStackEnd();
+#endif
+
+// A stacktrace can be helpful in debugging. For example, you can include a
+// stacktrace member in a object (probably around #ifndef NDEBUG) so that you
+// can later see where the given object was created from.
+class BASE_EXPORT StackTrace {
+ public:
+  // Creates a stacktrace from the current location.
+  StackTrace();
+
+  // Creates a stacktrace from the current location, of up to |count| entries.
+  // |count| will be limited to at most |kMaxTraces|.
+  explicit StackTrace(size_t count);
+
+  // Creates a stacktrace from an existing array of instruction
+  // pointers (such as returned by Addresses()).  |count| will be
+  // limited to at most |kMaxTraces|.
+  StackTrace(const void* const* trace, size_t count);
+
+#if defined(OS_WIN)
+  // Creates a stacktrace for an exception.
+  // Note: this function will throw an import not found (StackWalk64) exception
+  // on system without dbghelp 5.1.
+  StackTrace(_EXCEPTION_POINTERS* exception_pointers);
+  StackTrace(const _CONTEXT* context);
+#endif
+
+  // Copying and assignment are allowed with the default functions.
+
+  // Gets an array of instruction pointer values. |*count| will be set to the
+  // number of elements in the returned array.
+  const void* const* Addresses(size_t* count) const;
+
+  // Prints the stack trace to stderr.
+  void Print() const;
+
+#if !defined(__UCLIBC__) & !defined(_AIX)
+  // Resolves backtrace to symbols and write to stream.
+  void OutputToStream(std::ostream* os) const;
+#endif
+
+  // Resolves backtrace to symbols and returns as string.
+  std::string ToString() const;
+
+ private:
+#if defined(OS_WIN)
+  void InitTrace(const _CONTEXT* context_record);
+#endif
+
+  // From http://msdn.microsoft.com/en-us/library/bb204633.aspx,
+  // the sum of FramesToSkip and FramesToCapture must be less than 63,
+  // so set it to 62. Even if on POSIX it could be a larger value, it usually
+  // doesn't give much more information.
+  static const int kMaxTraces = 62;
+
+  void* trace_[kMaxTraces];
+
+  // The number of valid frames in |trace_|.
+  size_t count_;
+};
+
+#if BUILDFLAG(CAN_UNWIND_WITH_FRAME_POINTERS)
+// Traces the stack by using frame pointers. This function is faster but less
+// reliable than StackTrace. It should work for debug and profiling builds,
+// but not for release builds (although there are some exceptions).
+//
+// Writes at most |max_depth| frames (instruction pointers) into |out_trace|
+// after skipping |skip_initial| frames. Note that the function itself is not
+// added to the trace so |skip_initial| should be 0 in most cases.
+// Returns number of frames written.
+BASE_EXPORT size_t TraceStackFramePointers(const void** out_trace,
+                                           size_t max_depth,
+                                           size_t skip_initial);
+
+// Links stack frame |fp| to |parent_fp|, so that during stack unwinding
+// TraceStackFramePointers() visits |parent_fp| after visiting |fp|.
+// Both frame pointers must come from __builtin_frame_address().
+// Destructor restores original linkage of |fp| to avoid corrupting caller's
+// frame register on return.
+//
+// This class can be used to repair broken stack frame chain in cases
+// when execution flow goes into code built without frame pointers:
+//
+// void DoWork() {
+//   Call_SomeLibrary();
+// }
+// static __thread void*  g_saved_fp;
+// void Call_SomeLibrary() {
+//   g_saved_fp = __builtin_frame_address(0);
+//   some_library_call(...); // indirectly calls SomeLibrary_Callback()
+// }
+// void SomeLibrary_Callback() {
+//   ScopedStackFrameLinker linker(__builtin_frame_address(0), g_saved_fp);
+//   ...
+//   TraceStackFramePointers(...);
+// }
+//
+// This produces the following trace:
+//
+// #0 SomeLibrary_Callback()
+// #1 <address of the code inside SomeLibrary that called #0>
+// #2 DoWork()
+// ...rest of the trace...
+//
+// SomeLibrary doesn't use frame pointers, so when SomeLibrary_Callback()
+// is called, stack frame register contains bogus value that becomes callback'
+// parent frame address. Without ScopedStackFrameLinker unwinding would've
+// stopped at that bogus frame address yielding just two first frames (#0, #1).
+// ScopedStackFrameLinker overwrites callback's parent frame address with
+// Call_SomeLibrary's frame, so unwinder produces full trace without even
+// noticing that stack frame chain was broken.
+class BASE_EXPORT ScopedStackFrameLinker {
+ public:
+  ScopedStackFrameLinker(void* fp, void* parent_fp);
+  ~ScopedStackFrameLinker();
+
+ private:
+  void* fp_;
+  void* parent_fp_;
+  void* original_parent_fp_;
+
+  DISALLOW_COPY_AND_ASSIGN(ScopedStackFrameLinker);
+};
+
+#endif  // BUILDFLAG(CAN_UNWIND_WITH_FRAME_POINTERS)
+
+namespace internal {
+
+#if defined(OS_POSIX) && !defined(OS_ANDROID)
+// POSIX doesn't define any async-signal safe function for converting
+// an integer to ASCII. We'll have to define our own version.
+// itoa_r() converts a (signed) integer to ASCII. It returns "buf", if the
+// conversion was successful or NULL otherwise. It never writes more than "sz"
+// bytes. Output will be truncated as needed, and a NUL character is always
+// appended.
+BASE_EXPORT char *itoa_r(intptr_t i,
+                         char *buf,
+                         size_t sz,
+                         int base,
+                         size_t padding);
+#endif  // defined(OS_POSIX) && !defined(OS_ANDROID)
+
+}  // namespace internal
+
+}  // namespace debug
+}  // namespace base
+
+#endif  // BASE_DEBUG_STACK_TRACE_H_
diff --git a/base/debug/stack_trace_android.cc b/base/debug/stack_trace_android.cc
new file mode 100644
index 0000000..329204c
--- /dev/null
+++ b/base/debug/stack_trace_android.cc
@@ -0,0 +1,134 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/debug/stack_trace.h"
+
+#include <android/log.h>
+#include <stddef.h>
+#include <unwind.h>
+
+#include <algorithm>
+#include <ostream>
+
+#include "base/debug/proc_maps_linux.h"
+#include "base/strings/stringprintf.h"
+#include "base/threading/thread_restrictions.h"
+
+#ifdef __LP64__
+#define FMT_ADDR  "0x%016lx"
+#else
+#define FMT_ADDR  "0x%08x"
+#endif
+
+namespace {
+
+struct StackCrawlState {
+  StackCrawlState(uintptr_t* frames, size_t max_depth)
+      : frames(frames),
+        frame_count(0),
+        max_depth(max_depth),
+        have_skipped_self(false) {}
+
+  uintptr_t* frames;
+  size_t frame_count;
+  size_t max_depth;
+  bool have_skipped_self;
+};
+
+_Unwind_Reason_Code TraceStackFrame(_Unwind_Context* context, void* arg) {
+  StackCrawlState* state = static_cast<StackCrawlState*>(arg);
+  uintptr_t ip = _Unwind_GetIP(context);
+
+  // The first stack frame is this function itself.  Skip it.
+  if (ip != 0 && !state->have_skipped_self) {
+    state->have_skipped_self = true;
+    return _URC_NO_REASON;
+  }
+
+  state->frames[state->frame_count++] = ip;
+  if (state->frame_count >= state->max_depth)
+    return _URC_END_OF_STACK;
+  return _URC_NO_REASON;
+}
+
+}  // namespace
+
+namespace base {
+namespace debug {
+
+bool EnableInProcessStackDumping() {
+  // When running in an application, our code typically expects SIGPIPE
+  // to be ignored.  Therefore, when testing that same code, it should run
+  // with SIGPIPE ignored as well.
+  // TODO(phajdan.jr): De-duplicate this SIGPIPE code.
+  struct sigaction action;
+  memset(&action, 0, sizeof(action));
+  action.sa_handler = SIG_IGN;
+  sigemptyset(&action.sa_mask);
+  return (sigaction(SIGPIPE, &action, NULL) == 0);
+}
+
+StackTrace::StackTrace(size_t count) {
+  count = std::min(arraysize(trace_), count);
+
+  StackCrawlState state(reinterpret_cast<uintptr_t*>(trace_), count);
+  _Unwind_Backtrace(&TraceStackFrame, &state);
+  count_ = state.frame_count;
+}
+
+void StackTrace::Print() const {
+  std::string backtrace = ToString();
+  __android_log_write(ANDROID_LOG_ERROR, "chromium", backtrace.c_str());
+}
+
+// NOTE: Native libraries in APKs are stripped before installing. Print out the
+// relocatable address and library names so host computers can use tools to
+// symbolize and demangle (e.g., addr2line, c++filt).
+void StackTrace::OutputToStream(std::ostream* os) const {
+  std::string proc_maps;
+  std::vector<MappedMemoryRegion> regions;
+  // Allow IO to read /proc/self/maps. Reading this file doesn't hit the disk
+  // since it lives in procfs, and this is currently used to print a stack trace
+  // on fatal log messages in debug builds only. If the restriction is enabled
+  // then it will recursively trigger fatal failures when this enters on the
+  // UI thread.
+  base::ThreadRestrictions::ScopedAllowIO allow_io;
+  if (!ReadProcMaps(&proc_maps)) {
+    __android_log_write(
+        ANDROID_LOG_ERROR, "chromium", "Failed to read /proc/self/maps");
+  } else if (!ParseProcMaps(proc_maps, &regions)) {
+    __android_log_write(
+        ANDROID_LOG_ERROR, "chromium", "Failed to parse /proc/self/maps");
+  }
+
+  for (size_t i = 0; i < count_; ++i) {
+    // Subtract one as return address of function may be in the next
+    // function when a function is annotated as noreturn.
+    uintptr_t address = reinterpret_cast<uintptr_t>(trace_[i]) - 1;
+
+    std::vector<MappedMemoryRegion>::iterator iter = regions.begin();
+    while (iter != regions.end()) {
+      if (address >= iter->start && address < iter->end &&
+          !iter->path.empty()) {
+        break;
+      }
+      ++iter;
+    }
+
+    *os << base::StringPrintf("#%02zd " FMT_ADDR " ", i, address);
+
+    if (iter != regions.end()) {
+      uintptr_t rel_pc = address - iter->start + iter->offset;
+      const char* path = iter->path.c_str();
+      *os << base::StringPrintf("%s+" FMT_ADDR, path, rel_pc);
+    } else {
+      *os << "<unknown>";
+    }
+
+    *os << "\n";
+  }
+}
+
+}  // namespace debug
+}  // namespace base
diff --git a/base/debug/stack_trace_fuchsia.cc b/base/debug/stack_trace_fuchsia.cc
new file mode 100644
index 0000000..f996933
--- /dev/null
+++ b/base/debug/stack_trace_fuchsia.cc
@@ -0,0 +1,213 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/debug/stack_trace.h"
+
+#include <link.h>
+#include <stddef.h>
+#include <string.h>
+#include <threads.h>
+#include <unwind.h>
+#include <zircon/crashlogger.h>
+#include <zircon/process.h>
+#include <zircon/syscalls.h>
+#include <zircon/syscalls/definitions.h>
+#include <zircon/syscalls/port.h>
+#include <zircon/types.h>
+
+#include <algorithm>
+#include <iomanip>
+#include <iostream>
+
+#include "base/logging.h"
+
+namespace base {
+namespace debug {
+
+namespace {
+
+const char kProcessNamePrefix[] = "app:";
+const size_t kProcessNamePrefixLen = arraysize(kProcessNamePrefix) - 1;
+
+struct BacktraceData {
+  void** trace_array;
+  size_t* count;
+  size_t max;
+};
+
+_Unwind_Reason_Code UnwindStore(struct _Unwind_Context* context,
+                                void* user_data) {
+  BacktraceData* data = reinterpret_cast<BacktraceData*>(user_data);
+  uintptr_t pc = _Unwind_GetIP(context);
+  data->trace_array[*data->count] = reinterpret_cast<void*>(pc);
+  *data->count += 1;
+  if (*data->count == data->max)
+    return _URC_END_OF_STACK;
+  return _URC_NO_REASON;
+}
+
+// Stores and queries debugging symbol map info for the current process.
+class SymbolMap {
+ public:
+  struct Entry {
+    void* addr;
+    char name[ZX_MAX_NAME_LEN + kProcessNamePrefixLen];
+  };
+
+  SymbolMap();
+  ~SymbolMap() = default;
+
+  // Gets the symbol map entry for |address|. Returns null if no entry could be
+  // found for the address, or if the symbol map could not be queried.
+  Entry* GetForAddress(void* address);
+
+ private:
+  // Component builds of Chrome pull about 250 shared libraries (on Linux), so
+  // 512 entries should be enough in most cases.
+  static const size_t kMaxMapEntries = 512;
+
+  void Populate();
+
+  // Sorted in descending order by address, for lookup purposes.
+  Entry entries_[kMaxMapEntries];
+
+  size_t count_ = 0;
+  bool valid_ = false;
+
+  DISALLOW_COPY_AND_ASSIGN(SymbolMap);
+};
+
+SymbolMap::SymbolMap() {
+  Populate();
+}
+
+SymbolMap::Entry* SymbolMap::GetForAddress(void* address) {
+  if (!valid_) {
+    return nullptr;
+  }
+
+  // Working backwards in the address space, return the first map entry whose
+  // address comes before |address| (thereby enclosing it.)
+  for (size_t i = 0; i < count_; ++i) {
+    if (address >= entries_[i].addr) {
+      return &entries_[i];
+    }
+  }
+  return nullptr;
+}
+
+void SymbolMap::Populate() {
+  zx_handle_t process = zx_process_self();
+
+  // Try to fetch the name of the process' main executable, which was set as the
+  // name of the |process| kernel object.
+  // TODO(wez): Object names can only have up to ZX_MAX_NAME_LEN characters, so
+  // if we keep hitting problems with truncation, find a way to plumb argv[0]
+  // through to here instead, e.g. using CommandLine::GetProgramName().
+  char app_name[arraysize(SymbolMap::Entry::name)];
+  strcpy(app_name, kProcessNamePrefix);
+  zx_status_t status = zx_object_get_property(
+      process, ZX_PROP_NAME, app_name + kProcessNamePrefixLen,
+      sizeof(app_name) - kProcessNamePrefixLen);
+  if (status != ZX_OK) {
+    DPLOG(WARNING)
+        << "Couldn't get name, falling back to 'app' for program name: "
+        << status;
+    strlcat(app_name, "app", sizeof(app_name));
+  }
+
+  // Retrieve the debug info struct.
+  uintptr_t debug_addr;
+  status = zx_object_get_property(process, ZX_PROP_PROCESS_DEBUG_ADDR,
+                                  &debug_addr, sizeof(debug_addr));
+  if (status != ZX_OK) {
+    DPLOG(ERROR) << "Couldn't get symbol map for process: " << status;
+    return;
+  }
+  r_debug* debug_info = reinterpret_cast<r_debug*>(debug_addr);
+
+  // Get the link map from the debug info struct.
+  link_map* lmap = reinterpret_cast<link_map*>(debug_info->r_map);
+  if (!lmap) {
+    DPLOG(ERROR) << "Null link_map for process.";
+    return;
+  }
+
+  // Copy the contents of the link map linked list to |entries_|.
+  while (lmap != nullptr) {
+    if (count_ >= arraysize(entries_)) {
+      break;
+    }
+    SymbolMap::Entry* next_entry = &entries_[count_];
+    count_++;
+
+    next_entry->addr = reinterpret_cast<void*>(lmap->l_addr);
+    char* name_to_use = lmap->l_name[0] ? lmap->l_name : app_name;
+    strlcpy(next_entry->name, name_to_use, sizeof(next_entry->name));
+    lmap = lmap->l_next;
+  }
+
+  std::sort(
+      &entries_[0], &entries_[count_ - 1],
+      [](const Entry& a, const Entry& b) -> bool { return a.addr >= b.addr; });
+
+  valid_ = true;
+}
+
+}  // namespace
+
+// static
+bool EnableInProcessStackDumping() {
+  // StackTrace works to capture the current stack (e.g. for diagnostics added
+  // to code), but for local capture and print of backtraces, we just let the
+  // system crashlogger take over. It handles printing out a nicely formatted
+  // backtrace with dso information, relative offsets, etc. that we can then
+  // filter with addr2line in the run script to get file/line info.
+  return true;
+}
+
+StackTrace::StackTrace(size_t count) : count_(0) {
+  BacktraceData data = {&trace_[0], &count_,
+                        std::min(count, static_cast<size_t>(kMaxTraces))};
+  _Unwind_Backtrace(&UnwindStore, &data);
+}
+
+void StackTrace::Print() const {
+  OutputToStream(&std::cerr);
+}
+
+// Sample stack trace output is designed to be similar to Fuchsia's crashlogger:
+// bt#00: pc 0x1527a058aa00 (app:/system/base_unittests,0x18bda00)
+// bt#01: pc 0x1527a0254b5c (app:/system/base_unittests,0x1587b5c)
+// bt#02: pc 0x15279f446ece (app:/system/base_unittests,0x779ece)
+// ...
+// bt#21: pc 0x1527a05b51b4 (app:/system/base_unittests,0x18e81b4)
+// bt#22: pc 0x54fdbf3593de (libc.so,0x1c3de)
+// bt#23: end
+void StackTrace::OutputToStream(std::ostream* os) const {
+  SymbolMap map;
+
+  size_t i = 0;
+  for (; (i < count_) && os->good(); ++i) {
+    SymbolMap::Entry* entry = map.GetForAddress(trace_[i]);
+    if (entry) {
+      size_t offset = reinterpret_cast<uintptr_t>(trace_[i]) -
+                      reinterpret_cast<uintptr_t>(entry->addr);
+      *os << "bt#" << std::setw(2) << std::setfill('0') << i << std::setw(0)
+          << ": pc " << trace_[i] << " (" << entry->name << ",0x" << std::hex
+          << offset << std::dec << std::setw(0) << ")\n";
+    } else {
+      // Fallback if the DSO map isn't available.
+      // Logged PC values are absolute memory addresses, and the shared object
+      // name is not emitted.
+      *os << "bt#" << std::setw(2) << std::setfill('0') << i << std::setw(0)
+          << ": pc " << trace_[i] << "\n";
+    }
+  }
+
+  (*os) << "bt#" << std::setw(2) << i << ": end\n";
+}
+
+}  // namespace debug
+}  // namespace base
diff --git a/base/debug/stack_trace_posix.cc b/base/debug/stack_trace_posix.cc
new file mode 100644
index 0000000..f3f05da
--- /dev/null
+++ b/base/debug/stack_trace_posix.cc
@@ -0,0 +1,898 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/debug/stack_trace.h"
+
+#include <errno.h>
+#include <fcntl.h>
+#include <signal.h>
+#include <stddef.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/param.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <unistd.h>
+
+#include <algorithm>
+#include <map>
+#include <memory>
+#include <ostream>
+#include <string>
+#include <vector>
+
+#if !defined(USE_SYMBOLIZE)
+#include <cxxabi.h>
+#endif
+#if !defined(__UCLIBC__) && !defined(_AIX)
+#include <execinfo.h>
+#endif
+
+#if defined(OS_MACOSX)
+#include <AvailabilityMacros.h>
+#endif
+
+#if defined(OS_LINUX)
+#include "base/debug/proc_maps_linux.h"
+#endif
+
+#include "base/cfi_buildflags.h"
+#include "base/debug/debugger.h"
+#include "base/files/scoped_file.h"
+#include "base/logging.h"
+#include "base/macros.h"
+#include "base/memory/free_deleter.h"
+#include "base/memory/singleton.h"
+#include "base/numerics/safe_conversions.h"
+#include "base/posix/eintr_wrapper.h"
+#include "base/strings/string_number_conversions.h"
+#include "build/build_config.h"
+
+#if defined(USE_SYMBOLIZE)
+#include "base/third_party/symbolize/symbolize.h"
+#endif
+
+namespace base {
+namespace debug {
+
+namespace {
+
+volatile sig_atomic_t in_signal_handler = 0;
+
+bool (*try_handle_signal)(int, void*, void*) = nullptr;
+
+#if !defined(USE_SYMBOLIZE)
+// The prefix used for mangled symbols, per the Itanium C++ ABI:
+// http://www.codesourcery.com/cxx-abi/abi.html#mangling
+const char kMangledSymbolPrefix[] = "_Z";
+
+// Characters that can be used for symbols, generated by Ruby:
+// (('a'..'z').to_a+('A'..'Z').to_a+('0'..'9').to_a + ['_']).join
+const char kSymbolCharacters[] =
+    "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_";
+#endif  // !defined(USE_SYMBOLIZE)
+
+#if !defined(USE_SYMBOLIZE)
+// Demangles C++ symbols in the given text. Example:
+//
+// "out/Debug/base_unittests(_ZN10StackTraceC1Ev+0x20) [0x817778c]"
+// =>
+// "out/Debug/base_unittests(StackTrace::StackTrace()+0x20) [0x817778c]"
+void DemangleSymbols(std::string* text) {
+  // Note: code in this function is NOT async-signal safe (std::string uses
+  // malloc internally).
+
+#if !defined(__UCLIBC__) && !defined(_AIX)
+  std::string::size_type search_from = 0;
+  while (search_from < text->size()) {
+    // Look for the start of a mangled symbol, from search_from.
+    std::string::size_type mangled_start =
+        text->find(kMangledSymbolPrefix, search_from);
+    if (mangled_start == std::string::npos) {
+      break;  // Mangled symbol not found.
+    }
+
+    // Look for the end of the mangled symbol.
+    std::string::size_type mangled_end =
+        text->find_first_not_of(kSymbolCharacters, mangled_start);
+    if (mangled_end == std::string::npos) {
+      mangled_end = text->size();
+    }
+    std::string mangled_symbol =
+        text->substr(mangled_start, mangled_end - mangled_start);
+
+    // Try to demangle the mangled symbol candidate.
+    int status = 0;
+    std::unique_ptr<char, base::FreeDeleter> demangled_symbol(
+        abi::__cxa_demangle(mangled_symbol.c_str(), nullptr, 0, &status));
+    if (status == 0) {  // Demangling is successful.
+      // Remove the mangled symbol.
+      text->erase(mangled_start, mangled_end - mangled_start);
+      // Insert the demangled symbol.
+      text->insert(mangled_start, demangled_symbol.get());
+      // Next time, we'll start right after the demangled symbol we inserted.
+      search_from = mangled_start + strlen(demangled_symbol.get());
+    } else {
+      // Failed to demangle.  Retry after the "_Z" we just found.
+      search_from = mangled_start + 2;
+    }
+  }
+#endif  // !defined(__UCLIBC__) && !defined(_AIX)
+}
+#endif  // !defined(USE_SYMBOLIZE)
+
+class BacktraceOutputHandler {
+ public:
+  virtual void HandleOutput(const char* output) = 0;
+
+ protected:
+  virtual ~BacktraceOutputHandler() = default;
+};
+
+#if !defined(__UCLIBC__) && !defined(_AIX)
+void OutputPointer(void* pointer, BacktraceOutputHandler* handler) {
+  // This should be more than enough to store a 64-bit number in hex:
+  // 16 hex digits + 1 for null-terminator.
+  char buf[17] = { '\0' };
+  handler->HandleOutput("0x");
+  internal::itoa_r(reinterpret_cast<intptr_t>(pointer),
+                   buf, sizeof(buf), 16, 12);
+  handler->HandleOutput(buf);
+}
+
+#if defined(USE_SYMBOLIZE)
+void OutputFrameId(intptr_t frame_id, BacktraceOutputHandler* handler) {
+  // Max unsigned 64-bit number in decimal has 20 digits (18446744073709551615).
+  // Hence, 30 digits should be more than enough to represent it in decimal
+  // (including the null-terminator).
+  char buf[30] = { '\0' };
+  handler->HandleOutput("#");
+  internal::itoa_r(frame_id, buf, sizeof(buf), 10, 1);
+  handler->HandleOutput(buf);
+}
+#endif  // defined(USE_SYMBOLIZE)
+
+void ProcessBacktrace(void *const *trace,
+                      size_t size,
+                      BacktraceOutputHandler* handler) {
+  // NOTE: This code MUST be async-signal safe (it's used by in-process
+  // stack dumping signal handler). NO malloc or stdio is allowed here.
+
+#if defined(USE_SYMBOLIZE)
+  for (size_t i = 0; i < size; ++i) {
+    OutputFrameId(i, handler);
+    handler->HandleOutput(" ");
+    OutputPointer(trace[i], handler);
+    handler->HandleOutput(" ");
+
+    char buf[1024] = { '\0' };
+
+    // Subtract by one as return address of function may be in the next
+    // function when a function is annotated as noreturn.
+    void* address = static_cast<char*>(trace[i]) - 1;
+    if (google::Symbolize(address, buf, sizeof(buf)))
+      handler->HandleOutput(buf);
+    else
+      handler->HandleOutput("<unknown>");
+
+    handler->HandleOutput("\n");
+  }
+#else
+  bool printed = false;
+
+  // Below part is async-signal unsafe (uses malloc), so execute it only
+  // when we are not executing the signal handler.
+  if (in_signal_handler == 0) {
+    std::unique_ptr<char*, FreeDeleter> trace_symbols(
+        backtrace_symbols(trace, size));
+    if (trace_symbols.get()) {
+      for (size_t i = 0; i < size; ++i) {
+        std::string trace_symbol = trace_symbols.get()[i];
+        DemangleSymbols(&trace_symbol);
+        handler->HandleOutput(trace_symbol.c_str());
+        handler->HandleOutput("\n");
+      }
+
+      printed = true;
+    }
+  }
+
+  if (!printed) {
+    for (size_t i = 0; i < size; ++i) {
+      handler->HandleOutput(" [");
+      OutputPointer(trace[i], handler);
+      handler->HandleOutput("]\n");
+    }
+  }
+#endif  // defined(USE_SYMBOLIZE)
+}
+#endif  // !defined(__UCLIBC__) && !defined(_AIX)
+
+void PrintToStderr(const char* output) {
+  // NOTE: This code MUST be async-signal safe (it's used by in-process
+  // stack dumping signal handler). NO malloc or stdio is allowed here.
+  ignore_result(HANDLE_EINTR(write(STDERR_FILENO, output, strlen(output))));
+}
+
+void StackDumpSignalHandler(int signal, siginfo_t* info, void* void_context) {
+  // NOTE: This code MUST be async-signal safe.
+  // NO malloc or stdio is allowed here.
+
+  // Give a registered callback a chance to recover from this signal
+  //
+  // V8 uses guard regions to guarantee memory safety in WebAssembly. This means
+  // some signals might be expected if they originate from Wasm code while
+  // accessing the guard region. We give V8 the chance to handle and recover
+  // from these signals first.
+  if (try_handle_signal != nullptr &&
+      try_handle_signal(signal, info, void_context)) {
+    // The first chance handler took care of this. The SA_RESETHAND flag
+    // replaced this signal handler upon entry, but we want to stay
+    // installed. Thus, we reinstall ourselves before returning.
+    struct sigaction action;
+    memset(&action, 0, sizeof(action));
+    action.sa_flags = SA_RESETHAND | SA_SIGINFO;
+    action.sa_sigaction = &StackDumpSignalHandler;
+    sigemptyset(&action.sa_mask);
+
+    sigaction(signal, &action, nullptr);
+    return;
+  }
+
+// Do not take the "in signal handler" code path on Mac in a DCHECK-enabled
+// build, as this prevents seeing a useful (symbolized) stack trace on a crash
+// or DCHECK() failure. While it may not be fully safe to run the stack symbol
+// printing code, in practice it's better to provide meaningful stack traces -
+// and the risk is low given we're likely crashing already.
+#if !defined(OS_MACOSX) || !DCHECK_IS_ON()
+  // Record the fact that we are in the signal handler now, so that the rest
+  // of StackTrace can behave in an async-signal-safe manner.
+  in_signal_handler = 1;
+#endif
+
+  if (BeingDebugged())
+    BreakDebugger();
+
+  PrintToStderr("Received signal ");
+  char buf[1024] = { 0 };
+  internal::itoa_r(signal, buf, sizeof(buf), 10, 0);
+  PrintToStderr(buf);
+  if (signal == SIGBUS) {
+    if (info->si_code == BUS_ADRALN)
+      PrintToStderr(" BUS_ADRALN ");
+    else if (info->si_code == BUS_ADRERR)
+      PrintToStderr(" BUS_ADRERR ");
+    else if (info->si_code == BUS_OBJERR)
+      PrintToStderr(" BUS_OBJERR ");
+    else
+      PrintToStderr(" <unknown> ");
+  } else if (signal == SIGFPE) {
+    if (info->si_code == FPE_FLTDIV)
+      PrintToStderr(" FPE_FLTDIV ");
+    else if (info->si_code == FPE_FLTINV)
+      PrintToStderr(" FPE_FLTINV ");
+    else if (info->si_code == FPE_FLTOVF)
+      PrintToStderr(" FPE_FLTOVF ");
+    else if (info->si_code == FPE_FLTRES)
+      PrintToStderr(" FPE_FLTRES ");
+    else if (info->si_code == FPE_FLTSUB)
+      PrintToStderr(" FPE_FLTSUB ");
+    else if (info->si_code == FPE_FLTUND)
+      PrintToStderr(" FPE_FLTUND ");
+    else if (info->si_code == FPE_INTDIV)
+      PrintToStderr(" FPE_INTDIV ");
+    else if (info->si_code == FPE_INTOVF)
+      PrintToStderr(" FPE_INTOVF ");
+    else
+      PrintToStderr(" <unknown> ");
+  } else if (signal == SIGILL) {
+    if (info->si_code == ILL_BADSTK)
+      PrintToStderr(" ILL_BADSTK ");
+    else if (info->si_code == ILL_COPROC)
+      PrintToStderr(" ILL_COPROC ");
+    else if (info->si_code == ILL_ILLOPN)
+      PrintToStderr(" ILL_ILLOPN ");
+    else if (info->si_code == ILL_ILLADR)
+      PrintToStderr(" ILL_ILLADR ");
+    else if (info->si_code == ILL_ILLTRP)
+      PrintToStderr(" ILL_ILLTRP ");
+    else if (info->si_code == ILL_PRVOPC)
+      PrintToStderr(" ILL_PRVOPC ");
+    else if (info->si_code == ILL_PRVREG)
+      PrintToStderr(" ILL_PRVREG ");
+    else
+      PrintToStderr(" <unknown> ");
+  } else if (signal == SIGSEGV) {
+    if (info->si_code == SEGV_MAPERR)
+      PrintToStderr(" SEGV_MAPERR ");
+    else if (info->si_code == SEGV_ACCERR)
+      PrintToStderr(" SEGV_ACCERR ");
+    else
+      PrintToStderr(" <unknown> ");
+  }
+  if (signal == SIGBUS || signal == SIGFPE ||
+      signal == SIGILL || signal == SIGSEGV) {
+    internal::itoa_r(reinterpret_cast<intptr_t>(info->si_addr),
+                     buf, sizeof(buf), 16, 12);
+    PrintToStderr(buf);
+  }
+  PrintToStderr("\n");
+
+#if BUILDFLAG(CFI_ENFORCEMENT_TRAP)
+  if (signal == SIGILL && info->si_code == ILL_ILLOPN) {
+    PrintToStderr(
+        "CFI: Most likely a control flow integrity violation; for more "
+        "information see:\n");
+    PrintToStderr(
+        "https://www.chromium.org/developers/testing/control-flow-integrity\n");
+  }
+#endif  // BUILDFLAG(CFI_ENFORCEMENT_TRAP)
+
+  debug::StackTrace().Print();
+
+#if defined(OS_LINUX)
+#if ARCH_CPU_X86_FAMILY
+  ucontext_t* context = reinterpret_cast<ucontext_t*>(void_context);
+  const struct {
+    const char* label;
+    greg_t value;
+  } registers[] = {
+#if ARCH_CPU_32_BITS
+    { "  gs: ", context->uc_mcontext.gregs[REG_GS] },
+    { "  fs: ", context->uc_mcontext.gregs[REG_FS] },
+    { "  es: ", context->uc_mcontext.gregs[REG_ES] },
+    { "  ds: ", context->uc_mcontext.gregs[REG_DS] },
+    { " edi: ", context->uc_mcontext.gregs[REG_EDI] },
+    { " esi: ", context->uc_mcontext.gregs[REG_ESI] },
+    { " ebp: ", context->uc_mcontext.gregs[REG_EBP] },
+    { " esp: ", context->uc_mcontext.gregs[REG_ESP] },
+    { " ebx: ", context->uc_mcontext.gregs[REG_EBX] },
+    { " edx: ", context->uc_mcontext.gregs[REG_EDX] },
+    { " ecx: ", context->uc_mcontext.gregs[REG_ECX] },
+    { " eax: ", context->uc_mcontext.gregs[REG_EAX] },
+    { " trp: ", context->uc_mcontext.gregs[REG_TRAPNO] },
+    { " err: ", context->uc_mcontext.gregs[REG_ERR] },
+    { "  ip: ", context->uc_mcontext.gregs[REG_EIP] },
+    { "  cs: ", context->uc_mcontext.gregs[REG_CS] },
+    { " efl: ", context->uc_mcontext.gregs[REG_EFL] },
+    { " usp: ", context->uc_mcontext.gregs[REG_UESP] },
+    { "  ss: ", context->uc_mcontext.gregs[REG_SS] },
+#elif ARCH_CPU_64_BITS
+    { "  r8: ", context->uc_mcontext.gregs[REG_R8] },
+    { "  r9: ", context->uc_mcontext.gregs[REG_R9] },
+    { " r10: ", context->uc_mcontext.gregs[REG_R10] },
+    { " r11: ", context->uc_mcontext.gregs[REG_R11] },
+    { " r12: ", context->uc_mcontext.gregs[REG_R12] },
+    { " r13: ", context->uc_mcontext.gregs[REG_R13] },
+    { " r14: ", context->uc_mcontext.gregs[REG_R14] },
+    { " r15: ", context->uc_mcontext.gregs[REG_R15] },
+    { "  di: ", context->uc_mcontext.gregs[REG_RDI] },
+    { "  si: ", context->uc_mcontext.gregs[REG_RSI] },
+    { "  bp: ", context->uc_mcontext.gregs[REG_RBP] },
+    { "  bx: ", context->uc_mcontext.gregs[REG_RBX] },
+    { "  dx: ", context->uc_mcontext.gregs[REG_RDX] },
+    { "  ax: ", context->uc_mcontext.gregs[REG_RAX] },
+    { "  cx: ", context->uc_mcontext.gregs[REG_RCX] },
+    { "  sp: ", context->uc_mcontext.gregs[REG_RSP] },
+    { "  ip: ", context->uc_mcontext.gregs[REG_RIP] },
+    { " efl: ", context->uc_mcontext.gregs[REG_EFL] },
+    { " cgf: ", context->uc_mcontext.gregs[REG_CSGSFS] },
+    { " erf: ", context->uc_mcontext.gregs[REG_ERR] },
+    { " trp: ", context->uc_mcontext.gregs[REG_TRAPNO] },
+    { " msk: ", context->uc_mcontext.gregs[REG_OLDMASK] },
+    { " cr2: ", context->uc_mcontext.gregs[REG_CR2] },
+#endif  // ARCH_CPU_32_BITS
+  };
+
+#if ARCH_CPU_32_BITS
+  const int kRegisterPadding = 8;
+#elif ARCH_CPU_64_BITS
+  const int kRegisterPadding = 16;
+#endif
+
+  for (size_t i = 0; i < arraysize(registers); i++) {
+    PrintToStderr(registers[i].label);
+    internal::itoa_r(registers[i].value, buf, sizeof(buf),
+                     16, kRegisterPadding);
+    PrintToStderr(buf);
+
+    if ((i + 1) % 4 == 0)
+      PrintToStderr("\n");
+  }
+  PrintToStderr("\n");
+#endif  // ARCH_CPU_X86_FAMILY
+#endif  // defined(OS_LINUX)
+
+  PrintToStderr("[end of stack trace]\n");
+
+#if defined(OS_MACOSX) && !defined(OS_IOS)
+  if (::signal(signal, SIG_DFL) == SIG_ERR)
+    _exit(1);
+#else
+  // Non-Mac OSes should probably reraise the signal as well, but the Linux
+  // sandbox tests break on CrOS devices.
+  // https://code.google.com/p/chromium/issues/detail?id=551681
+  PrintToStderr("Calling _exit(1). Core file will not be generated.\n");
+  _exit(1);
+#endif  // defined(OS_MACOSX) && !defined(OS_IOS)
+}
+
+class PrintBacktraceOutputHandler : public BacktraceOutputHandler {
+ public:
+  PrintBacktraceOutputHandler() = default;
+
+  void HandleOutput(const char* output) override {
+    // NOTE: This code MUST be async-signal safe (it's used by in-process
+    // stack dumping signal handler). NO malloc or stdio is allowed here.
+    PrintToStderr(output);
+  }
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(PrintBacktraceOutputHandler);
+};
+
+class StreamBacktraceOutputHandler : public BacktraceOutputHandler {
+ public:
+  explicit StreamBacktraceOutputHandler(std::ostream* os) : os_(os) {
+  }
+
+  void HandleOutput(const char* output) override { (*os_) << output; }
+
+ private:
+  std::ostream* os_;
+
+  DISALLOW_COPY_AND_ASSIGN(StreamBacktraceOutputHandler);
+};
+
+void WarmUpBacktrace() {
+  // Warm up stack trace infrastructure. It turns out that on the first
+  // call glibc initializes some internal data structures using pthread_once,
+  // and even backtrace() can call malloc(), leading to hangs.
+  //
+  // Example stack trace snippet (with tcmalloc):
+  //
+  // #8  0x0000000000a173b5 in tc_malloc
+  //             at ./third_party/tcmalloc/chromium/src/debugallocation.cc:1161
+  // #9  0x00007ffff7de7900 in _dl_map_object_deps at dl-deps.c:517
+  // #10 0x00007ffff7ded8a9 in dl_open_worker at dl-open.c:262
+  // #11 0x00007ffff7de9176 in _dl_catch_error at dl-error.c:178
+  // #12 0x00007ffff7ded31a in _dl_open (file=0x7ffff625e298 "libgcc_s.so.1")
+  //             at dl-open.c:639
+  // #13 0x00007ffff6215602 in do_dlopen at dl-libc.c:89
+  // #14 0x00007ffff7de9176 in _dl_catch_error at dl-error.c:178
+  // #15 0x00007ffff62156c4 in dlerror_run at dl-libc.c:48
+  // #16 __GI___libc_dlopen_mode at dl-libc.c:165
+  // #17 0x00007ffff61ef8f5 in init
+  //             at ../sysdeps/x86_64/../ia64/backtrace.c:53
+  // #18 0x00007ffff6aad400 in pthread_once
+  //             at ../nptl/sysdeps/unix/sysv/linux/x86_64/pthread_once.S:104
+  // #19 0x00007ffff61efa14 in __GI___backtrace
+  //             at ../sysdeps/x86_64/../ia64/backtrace.c:104
+  // #20 0x0000000000752a54 in base::debug::StackTrace::StackTrace
+  //             at base/debug/stack_trace_posix.cc:175
+  // #21 0x00000000007a4ae5 in
+  //             base::(anonymous namespace)::StackDumpSignalHandler
+  //             at base/process_util_posix.cc:172
+  // #22 <signal handler called>
+  StackTrace stack_trace;
+}
+
+#if defined(USE_SYMBOLIZE)
+
+// class SandboxSymbolizeHelper.
+//
+// The purpose of this class is to prepare and install a "file open" callback
+// needed by the stack trace symbolization code
+// (base/third_party/symbolize/symbolize.h) so that it can function properly
+// in a sandboxed process.  The caveat is that this class must be instantiated
+// before the sandboxing is enabled so that it can get the chance to open all
+// the object files that are loaded in the virtual address space of the current
+// process.
+class SandboxSymbolizeHelper {
+ public:
+  // Returns the singleton instance.
+  static SandboxSymbolizeHelper* GetInstance() {
+    return Singleton<SandboxSymbolizeHelper,
+                     LeakySingletonTraits<SandboxSymbolizeHelper>>::get();
+  }
+
+ private:
+  friend struct DefaultSingletonTraits<SandboxSymbolizeHelper>;
+
+  SandboxSymbolizeHelper()
+      : is_initialized_(false) {
+    Init();
+  }
+
+  ~SandboxSymbolizeHelper() {
+    UnregisterCallback();
+    CloseObjectFiles();
+  }
+
+  // Returns a O_RDONLY file descriptor for |file_path| if it was opened
+  // successfully during the initialization.  The file is repositioned at
+  // offset 0.
+  // IMPORTANT: This function must be async-signal-safe because it can be
+  // called from a signal handler (symbolizing stack frames for a crash).
+  int GetFileDescriptor(const char* file_path) {
+    int fd = -1;
+
+#if !defined(OFFICIAL_BUILD)
+    if (file_path) {
+      // The assumption here is that iterating over std::map<std::string, int>
+      // using a const_iterator does not allocate dynamic memory, hense it is
+      // async-signal-safe.
+      std::map<std::string, int>::const_iterator it;
+      for (it = modules_.begin(); it != modules_.end(); ++it) {
+        if (strcmp((it->first).c_str(), file_path) == 0) {
+          // POSIX.1-2004 requires an implementation to guarantee that dup()
+          // is async-signal-safe.
+          fd = HANDLE_EINTR(dup(it->second));
+          break;
+        }
+      }
+      // POSIX.1-2004 requires an implementation to guarantee that lseek()
+      // is async-signal-safe.
+      if (fd >= 0 && lseek(fd, 0, SEEK_SET) < 0) {
+        // Failed to seek.
+        fd = -1;
+      }
+    }
+#endif  // !defined(OFFICIAL_BUILD)
+
+    return fd;
+  }
+
+  // Searches for the object file (from /proc/self/maps) that contains
+  // the specified pc.  If found, sets |start_address| to the start address
+  // of where this object file is mapped in memory, sets the module base
+  // address into |base_address|, copies the object file name into
+  // |out_file_name|, and attempts to open the object file.  If the object
+  // file is opened successfully, returns the file descriptor.  Otherwise,
+  // returns -1.  |out_file_name_size| is the size of the file name buffer
+  // (including the null terminator).
+  // IMPORTANT: This function must be async-signal-safe because it can be
+  // called from a signal handler (symbolizing stack frames for a crash).
+  static int OpenObjectFileContainingPc(uint64_t pc, uint64_t& start_address,
+                                        uint64_t& base_address, char* file_path,
+                                        int file_path_size) {
+    // This method can only be called after the singleton is instantiated.
+    // This is ensured by the following facts:
+    // * This is the only static method in this class, it is private, and
+    //   the class has no friends (except for the DefaultSingletonTraits).
+    //   The compiler guarantees that it can only be called after the
+    //   singleton is instantiated.
+    // * This method is used as a callback for the stack tracing code and
+    //   the callback registration is done in the constructor, so logically
+    //   it cannot be called before the singleton is created.
+    SandboxSymbolizeHelper* instance = GetInstance();
+
+    // The assumption here is that iterating over
+    // std::vector<MappedMemoryRegion> using a const_iterator does not allocate
+    // dynamic memory, hence it is async-signal-safe.
+    for (const MappedMemoryRegion& region : instance->regions_) {
+      if (region.start <= pc && pc < region.end) {
+        start_address = region.start;
+        base_address = region.base;
+        if (file_path && file_path_size > 0) {
+          strncpy(file_path, region.path.c_str(), file_path_size);
+          // Ensure null termination.
+          file_path[file_path_size - 1] = '\0';
+        }
+        return instance->GetFileDescriptor(region.path.c_str());
+      }
+    }
+    return -1;
+  }
+
+  // Set the base address for each memory region by reading ELF headers in
+  // process memory.
+  void SetBaseAddressesForMemoryRegions() {
+    base::ScopedFD mem_fd(
+        HANDLE_EINTR(open("/proc/self/mem", O_RDONLY | O_CLOEXEC)));
+    if (!mem_fd.is_valid())
+      return;
+
+    auto safe_memcpy = [&mem_fd](void* dst, uintptr_t src, size_t size) {
+      return HANDLE_EINTR(pread(mem_fd.get(), dst, size, src)) == ssize_t(size);
+    };
+
+    uintptr_t cur_base = 0;
+    for (auto& r : regions_) {
+      ElfW(Ehdr) ehdr;
+      static_assert(SELFMAG <= sizeof(ElfW(Ehdr)), "SELFMAG too large");
+      if ((r.permissions & MappedMemoryRegion::READ) &&
+          safe_memcpy(&ehdr, r.start, sizeof(ElfW(Ehdr))) &&
+          memcmp(ehdr.e_ident, ELFMAG, SELFMAG) == 0) {
+        switch (ehdr.e_type) {
+          case ET_EXEC:
+            cur_base = 0;
+            break;
+          case ET_DYN:
+            // Find the segment containing file offset 0. This will correspond
+            // to the ELF header that we just read. Normally this will have
+            // virtual address 0, but this is not guaranteed. We must subtract
+            // the virtual address from the address where the ELF header was
+            // mapped to get the base address.
+            //
+            // If we fail to find a segment for file offset 0, use the address
+            // of the ELF header as the base address.
+            cur_base = r.start;
+            for (unsigned i = 0; i != ehdr.e_phnum; ++i) {
+              ElfW(Phdr) phdr;
+              if (safe_memcpy(&phdr, r.start + ehdr.e_phoff + i * sizeof(phdr),
+                              sizeof(phdr)) &&
+                  phdr.p_type == PT_LOAD && phdr.p_offset == 0) {
+                cur_base = r.start - phdr.p_vaddr;
+                break;
+              }
+            }
+            break;
+          default:
+            // ET_REL or ET_CORE. These aren't directly executable, so they
+            // don't affect the base address.
+            break;
+        }
+      }
+
+      r.base = cur_base;
+    }
+  }
+
+  // Parses /proc/self/maps in order to compile a list of all object file names
+  // for the modules that are loaded in the current process.
+  // Returns true on success.
+  bool CacheMemoryRegions() {
+    // Reads /proc/self/maps.
+    std::string contents;
+    if (!ReadProcMaps(&contents)) {
+      LOG(ERROR) << "Failed to read /proc/self/maps";
+      return false;
+    }
+
+    // Parses /proc/self/maps.
+    if (!ParseProcMaps(contents, &regions_)) {
+      LOG(ERROR) << "Failed to parse the contents of /proc/self/maps";
+      return false;
+    }
+
+    SetBaseAddressesForMemoryRegions();
+
+    is_initialized_ = true;
+    return true;
+  }
+
+  // Opens all object files and caches their file descriptors.
+  void OpenSymbolFiles() {
+    // Pre-opening and caching the file descriptors of all loaded modules is
+    // not safe for production builds.  Hence it is only done in non-official
+    // builds.  For more details, take a look at: http://crbug.com/341966.
+#if !defined(OFFICIAL_BUILD)
+    // Open the object files for all read-only executable regions and cache
+    // their file descriptors.
+    std::vector<MappedMemoryRegion>::const_iterator it;
+    for (it = regions_.begin(); it != regions_.end(); ++it) {
+      const MappedMemoryRegion& region = *it;
+      // Only interesed in read-only executable regions.
+      if ((region.permissions & MappedMemoryRegion::READ) ==
+              MappedMemoryRegion::READ &&
+          (region.permissions & MappedMemoryRegion::WRITE) == 0 &&
+          (region.permissions & MappedMemoryRegion::EXECUTE) ==
+              MappedMemoryRegion::EXECUTE) {
+        if (region.path.empty()) {
+          // Skip regions with empty file names.
+          continue;
+        }
+        if (region.path[0] == '[') {
+          // Skip pseudo-paths, like [stack], [vdso], [heap], etc ...
+          continue;
+        }
+        // Avoid duplicates.
+        if (modules_.find(region.path) == modules_.end()) {
+          int fd = open(region.path.c_str(), O_RDONLY | O_CLOEXEC);
+          if (fd >= 0) {
+            modules_.insert(std::make_pair(region.path, fd));
+          } else {
+            LOG(WARNING) << "Failed to open file: " << region.path
+                         << "\n  Error: " << strerror(errno);
+          }
+        }
+      }
+    }
+#endif  // !defined(OFFICIAL_BUILD)
+  }
+
+  // Initializes and installs the symbolization callback.
+  void Init() {
+    if (CacheMemoryRegions()) {
+      OpenSymbolFiles();
+      google::InstallSymbolizeOpenObjectFileCallback(
+          &OpenObjectFileContainingPc);
+    }
+  }
+
+  // Unregister symbolization callback.
+  void UnregisterCallback() {
+    if (is_initialized_) {
+      google::InstallSymbolizeOpenObjectFileCallback(nullptr);
+      is_initialized_ = false;
+    }
+  }
+
+  // Closes all file descriptors owned by this instance.
+  void CloseObjectFiles() {
+#if !defined(OFFICIAL_BUILD)
+    std::map<std::string, int>::iterator it;
+    for (it = modules_.begin(); it != modules_.end(); ++it) {
+      int ret = IGNORE_EINTR(close(it->second));
+      DCHECK(!ret);
+      it->second = -1;
+    }
+    modules_.clear();
+#endif  // !defined(OFFICIAL_BUILD)
+  }
+
+  // Set to true upon successful initialization.
+  bool is_initialized_;
+
+#if !defined(OFFICIAL_BUILD)
+  // Mapping from file name to file descriptor.  Includes file descriptors
+  // for all successfully opened object files and the file descriptor for
+  // /proc/self/maps.  This code is not safe for production builds.
+  std::map<std::string, int> modules_;
+#endif  // !defined(OFFICIAL_BUILD)
+
+  // Cache for the process memory regions.  Produced by parsing the contents
+  // of /proc/self/maps cache.
+  std::vector<MappedMemoryRegion> regions_;
+
+  DISALLOW_COPY_AND_ASSIGN(SandboxSymbolizeHelper);
+};
+#endif  // USE_SYMBOLIZE
+
+}  // namespace
+
+bool EnableInProcessStackDumping() {
+#if defined(USE_SYMBOLIZE)
+  SandboxSymbolizeHelper::GetInstance();
+#endif  // USE_SYMBOLIZE
+
+  // When running in an application, our code typically expects SIGPIPE
+  // to be ignored.  Therefore, when testing that same code, it should run
+  // with SIGPIPE ignored as well.
+  struct sigaction sigpipe_action;
+  memset(&sigpipe_action, 0, sizeof(sigpipe_action));
+  sigpipe_action.sa_handler = SIG_IGN;
+  sigemptyset(&sigpipe_action.sa_mask);
+  bool success = (sigaction(SIGPIPE, &sigpipe_action, nullptr) == 0);
+
+  // Avoid hangs during backtrace initialization, see above.
+  WarmUpBacktrace();
+
+  struct sigaction action;
+  memset(&action, 0, sizeof(action));
+  action.sa_flags = SA_RESETHAND | SA_SIGINFO;
+  action.sa_sigaction = &StackDumpSignalHandler;
+  sigemptyset(&action.sa_mask);
+
+  success &= (sigaction(SIGILL, &action, nullptr) == 0);
+  success &= (sigaction(SIGABRT, &action, nullptr) == 0);
+  success &= (sigaction(SIGFPE, &action, nullptr) == 0);
+  success &= (sigaction(SIGBUS, &action, nullptr) == 0);
+  success &= (sigaction(SIGSEGV, &action, nullptr) == 0);
+// On Linux, SIGSYS is reserved by the kernel for seccomp-bpf sandboxing.
+#if !defined(OS_LINUX)
+  success &= (sigaction(SIGSYS, &action, nullptr) == 0);
+#endif  // !defined(OS_LINUX)
+
+  return success;
+}
+
+void SetStackDumpFirstChanceCallback(bool (*handler)(int, void*, void*)) {
+  DCHECK(try_handle_signal == nullptr || handler == nullptr);
+  try_handle_signal = handler;
+}
+
+StackTrace::StackTrace(size_t count) {
+// NOTE: This code MUST be async-signal safe (it's used by in-process
+// stack dumping signal handler). NO malloc or stdio is allowed here.
+
+#if !defined(__UCLIBC__) && !defined(_AIX)
+  count = std::min(arraysize(trace_), count);
+
+  // Though the backtrace API man page does not list any possible negative
+  // return values, we take no chance.
+  count_ = base::saturated_cast<size_t>(backtrace(trace_, count));
+#else
+  count_ = 0;
+#endif
+}
+
+void StackTrace::Print() const {
+  // NOTE: This code MUST be async-signal safe (it's used by in-process
+  // stack dumping signal handler). NO malloc or stdio is allowed here.
+
+#if !defined(__UCLIBC__) && !defined(_AIX)
+  PrintBacktraceOutputHandler handler;
+  ProcessBacktrace(trace_, count_, &handler);
+#endif
+}
+
+#if !defined(__UCLIBC__) && !defined(_AIX)
+void StackTrace::OutputToStream(std::ostream* os) const {
+  StreamBacktraceOutputHandler handler(os);
+  ProcessBacktrace(trace_, count_, &handler);
+}
+#endif
+
+namespace internal {
+
+// NOTE: code from sandbox/linux/seccomp-bpf/demo.cc.
+char* itoa_r(intptr_t i, char* buf, size_t sz, int base, size_t padding) {
+  // Make sure we can write at least one NUL byte.
+  size_t n = 1;
+  if (n > sz)
+    return nullptr;
+
+  if (base < 2 || base > 16) {
+    buf[0] = '\000';
+    return nullptr;
+  }
+
+  char* start = buf;
+
+  uintptr_t j = i;
+
+  // Handle negative numbers (only for base 10).
+  if (i < 0 && base == 10) {
+    // This does "j = -i" while avoiding integer overflow.
+    j = static_cast<uintptr_t>(-(i + 1)) + 1;
+
+    // Make sure we can write the '-' character.
+    if (++n > sz) {
+      buf[0] = '\000';
+      return nullptr;
+    }
+    *start++ = '-';
+  }
+
+  // Loop until we have converted the entire number. Output at least one
+  // character (i.e. '0').
+  char* ptr = start;
+  do {
+    // Make sure there is still enough space left in our output buffer.
+    if (++n > sz) {
+      buf[0] = '\000';
+      return nullptr;
+    }
+
+    // Output the next digit.
+    *ptr++ = "0123456789abcdef"[j % base];
+    j /= base;
+
+    if (padding > 0)
+      padding--;
+  } while (j > 0 || padding > 0);
+
+  // Terminate the output with a NUL character.
+  *ptr = '\000';
+
+  // Conversion to ASCII actually resulted in the digits being in reverse
+  // order. We can't easily generate them in forward order, as we can't tell
+  // the number of characters needed until we are done converting.
+  // So, now, we reverse the string (except for the possible "-" sign).
+  while (--ptr > start) {
+    char ch = *ptr;
+    *ptr = *start;
+    *start++ = ch;
+  }
+  return buf;
+}
+
+}  // namespace internal
+
+}  // namespace debug
+}  // namespace base
diff --git a/base/debug/stack_trace_unittest.cc b/base/debug/stack_trace_unittest.cc
new file mode 100644
index 0000000..959cd53
--- /dev/null
+++ b/base/debug/stack_trace_unittest.cc
@@ -0,0 +1,320 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <stddef.h>
+
+#include <limits>
+#include <sstream>
+#include <string>
+
+#include "base/debug/debugging_buildflags.h"
+#include "base/debug/stack_trace.h"
+#include "base/logging.h"
+#include "base/process/kill.h"
+#include "base/process/process_handle.h"
+#include "base/test/test_timeouts.h"
+#include "build/build_config.h"
+#include "testing/gtest/include/gtest/gtest.h"
+#include "testing/multiprocess_func_list.h"
+
+#if defined(OS_POSIX) && !defined(OS_ANDROID) && !defined(OS_IOS)
+#include "base/test/multiprocess_test.h"
+#endif
+
+namespace base {
+namespace debug {
+
+#if defined(OS_POSIX) && !defined(OS_ANDROID) && !defined(OS_IOS)
+typedef MultiProcessTest StackTraceTest;
+#else
+typedef testing::Test StackTraceTest;
+#endif
+
+// Note: On Linux, this test currently only fully works on Debug builds.
+// See comments in the #ifdef soup if you intend to change this.
+#if defined(OS_WIN)
+// Always fails on Windows: crbug.com/32070
+#define MAYBE_OutputToStream DISABLED_OutputToStream
+#else
+#define MAYBE_OutputToStream OutputToStream
+#endif
+#if !defined(__UCLIBC__) && !defined(_AIX)
+TEST_F(StackTraceTest, MAYBE_OutputToStream) {
+  StackTrace trace;
+
+  // Dump the trace into a string.
+  std::ostringstream os;
+  trace.OutputToStream(&os);
+  std::string backtrace_message = os.str();
+
+  // ToString() should produce the same output.
+  EXPECT_EQ(backtrace_message, trace.ToString());
+
+#if defined(OS_POSIX) && !defined(OS_MACOSX) && NDEBUG
+  // Stack traces require an extra data table that bloats our binaries,
+  // so they're turned off for release builds.  We stop the test here,
+  // at least letting us verify that the calls don't crash.
+  return;
+#endif  // defined(OS_POSIX) && !defined(OS_MACOSX) && NDEBUG
+
+  size_t frames_found = 0;
+  trace.Addresses(&frames_found);
+  ASSERT_GE(frames_found, 5u) <<
+      "No stack frames found.  Skipping rest of test.";
+
+  // Check if the output has symbol initialization warning.  If it does, fail.
+  ASSERT_EQ(backtrace_message.find("Dumping unresolved backtrace"),
+            std::string::npos) <<
+      "Unable to resolve symbols.  Skipping rest of test.";
+
+#if defined(OS_MACOSX)
+#if 0
+  // Disabled due to -fvisibility=hidden in build config.
+
+  // Symbol resolution via the backtrace_symbol function does not work well
+  // in OS X.
+  // See this thread:
+  //
+  //    http://lists.apple.com/archives/darwin-dev/2009/Mar/msg00111.html
+  //
+  // Just check instead that we find our way back to the "start" symbol
+  // which should be the first symbol in the trace.
+  //
+  // TODO(port): Find a more reliable way to resolve symbols.
+
+  // Expect to at least find main.
+  EXPECT_TRUE(backtrace_message.find("start") != std::string::npos)
+      << "Expected to find start in backtrace:\n"
+      << backtrace_message;
+
+#endif
+#elif defined(USE_SYMBOLIZE)
+  // This branch is for gcc-compiled code, but not Mac due to the
+  // above #if.
+  // Expect a demangled symbol.
+  EXPECT_TRUE(backtrace_message.find("testing::Test::Run()") !=
+              std::string::npos)
+      << "Expected a demangled symbol in backtrace:\n"
+      << backtrace_message;
+
+#elif 0
+  // This is the fall-through case; it used to cover Windows.
+  // But it's disabled because of varying buildbot configs;
+  // some lack symbols.
+
+  // Expect to at least find main.
+  EXPECT_TRUE(backtrace_message.find("main") != std::string::npos)
+      << "Expected to find main in backtrace:\n"
+      << backtrace_message;
+
+#if defined(OS_WIN)
+// MSVC doesn't allow the use of C99's __func__ within C++, so we fake it with
+// MSVC's __FUNCTION__ macro.
+#define __func__ __FUNCTION__
+#endif
+
+  // Expect to find this function as well.
+  // Note: This will fail if not linked with -rdynamic (aka -export_dynamic)
+  EXPECT_TRUE(backtrace_message.find(__func__) != std::string::npos)
+      << "Expected to find " << __func__ << " in backtrace:\n"
+      << backtrace_message;
+
+#endif  // define(OS_MACOSX)
+}
+
+#if !defined(OFFICIAL_BUILD) && !defined(NO_UNWIND_TABLES)
+// Disabled in Official builds, where Link-Time Optimization can result in two
+// or fewer stack frames being available, causing the test to fail.
+TEST_F(StackTraceTest, TruncatedTrace) {
+  StackTrace trace;
+
+  size_t count = 0;
+  trace.Addresses(&count);
+  ASSERT_LT(2u, count);
+
+  StackTrace truncated(2);
+  truncated.Addresses(&count);
+  EXPECT_EQ(2u, count);
+}
+#endif  // !defined(OFFICIAL_BUILD)
+
+// The test is used for manual testing, e.g., to see the raw output.
+TEST_F(StackTraceTest, DebugOutputToStream) {
+  StackTrace trace;
+  std::ostringstream os;
+  trace.OutputToStream(&os);
+  VLOG(1) << os.str();
+}
+
+// The test is used for manual testing, e.g., to see the raw output.
+TEST_F(StackTraceTest, DebugPrintBacktrace) {
+  StackTrace().Print();
+}
+#endif  // !defined(__UCLIBC__)
+
+#if defined(OS_POSIX) && !defined(OS_ANDROID) && !defined(OS_FUCHSIA)
+#if !defined(OS_IOS)
+static char* newArray() {
+  // Clang warns about the mismatched new[]/delete if they occur in the same
+  // function.
+  return new char[10];
+}
+
+MULTIPROCESS_TEST_MAIN(MismatchedMallocChildProcess) {
+  char* pointer = newArray();
+  delete pointer;
+  return 2;
+}
+
+// Regression test for StackDumpingSignalHandler async-signal unsafety.
+// Combined with tcmalloc's debugallocation, that signal handler
+// and e.g. mismatched new[]/delete would cause a hang because
+// of re-entering malloc.
+TEST_F(StackTraceTest, AsyncSignalUnsafeSignalHandlerHang) {
+  Process child = SpawnChild("MismatchedMallocChildProcess");
+  ASSERT_TRUE(child.IsValid());
+  int exit_code;
+  ASSERT_TRUE(
+      child.WaitForExitWithTimeout(TestTimeouts::action_timeout(), &exit_code));
+}
+#endif  // !defined(OS_IOS)
+
+namespace {
+
+std::string itoa_r_wrapper(intptr_t i, size_t sz, int base, size_t padding) {
+  char buffer[1024];
+  CHECK_LE(sz, sizeof(buffer));
+
+  char* result = internal::itoa_r(i, buffer, sz, base, padding);
+  EXPECT_TRUE(result);
+  return std::string(buffer);
+}
+
+}  // namespace
+
+TEST_F(StackTraceTest, itoa_r) {
+  EXPECT_EQ("0", itoa_r_wrapper(0, 128, 10, 0));
+  EXPECT_EQ("-1", itoa_r_wrapper(-1, 128, 10, 0));
+
+  // Test edge cases.
+  if (sizeof(intptr_t) == 4) {
+    EXPECT_EQ("ffffffff", itoa_r_wrapper(-1, 128, 16, 0));
+    EXPECT_EQ("-2147483648",
+              itoa_r_wrapper(std::numeric_limits<intptr_t>::min(), 128, 10, 0));
+    EXPECT_EQ("2147483647",
+              itoa_r_wrapper(std::numeric_limits<intptr_t>::max(), 128, 10, 0));
+
+    EXPECT_EQ("80000000",
+              itoa_r_wrapper(std::numeric_limits<intptr_t>::min(), 128, 16, 0));
+    EXPECT_EQ("7fffffff",
+              itoa_r_wrapper(std::numeric_limits<intptr_t>::max(), 128, 16, 0));
+  } else if (sizeof(intptr_t) == 8) {
+    EXPECT_EQ("ffffffffffffffff", itoa_r_wrapper(-1, 128, 16, 0));
+    EXPECT_EQ("-9223372036854775808",
+              itoa_r_wrapper(std::numeric_limits<intptr_t>::min(), 128, 10, 0));
+    EXPECT_EQ("9223372036854775807",
+              itoa_r_wrapper(std::numeric_limits<intptr_t>::max(), 128, 10, 0));
+
+    EXPECT_EQ("8000000000000000",
+              itoa_r_wrapper(std::numeric_limits<intptr_t>::min(), 128, 16, 0));
+    EXPECT_EQ("7fffffffffffffff",
+              itoa_r_wrapper(std::numeric_limits<intptr_t>::max(), 128, 16, 0));
+  } else {
+    ADD_FAILURE() << "Missing test case for your size of intptr_t ("
+                  << sizeof(intptr_t) << ")";
+  }
+
+  // Test hex output.
+  EXPECT_EQ("688", itoa_r_wrapper(0x688, 128, 16, 0));
+  EXPECT_EQ("deadbeef", itoa_r_wrapper(0xdeadbeef, 128, 16, 0));
+
+  // Check that itoa_r respects passed buffer size limit.
+  char buffer[1024];
+  EXPECT_TRUE(internal::itoa_r(0xdeadbeef, buffer, 10, 16, 0));
+  EXPECT_TRUE(internal::itoa_r(0xdeadbeef, buffer, 9, 16, 0));
+  EXPECT_FALSE(internal::itoa_r(0xdeadbeef, buffer, 8, 16, 0));
+  EXPECT_FALSE(internal::itoa_r(0xdeadbeef, buffer, 7, 16, 0));
+  EXPECT_TRUE(internal::itoa_r(0xbeef, buffer, 5, 16, 4));
+  EXPECT_FALSE(internal::itoa_r(0xbeef, buffer, 5, 16, 5));
+  EXPECT_FALSE(internal::itoa_r(0xbeef, buffer, 5, 16, 6));
+
+  // Test padding.
+  EXPECT_EQ("1", itoa_r_wrapper(1, 128, 10, 0));
+  EXPECT_EQ("1", itoa_r_wrapper(1, 128, 10, 1));
+  EXPECT_EQ("01", itoa_r_wrapper(1, 128, 10, 2));
+  EXPECT_EQ("001", itoa_r_wrapper(1, 128, 10, 3));
+  EXPECT_EQ("0001", itoa_r_wrapper(1, 128, 10, 4));
+  EXPECT_EQ("00001", itoa_r_wrapper(1, 128, 10, 5));
+  EXPECT_EQ("688", itoa_r_wrapper(0x688, 128, 16, 0));
+  EXPECT_EQ("688", itoa_r_wrapper(0x688, 128, 16, 1));
+  EXPECT_EQ("688", itoa_r_wrapper(0x688, 128, 16, 2));
+  EXPECT_EQ("688", itoa_r_wrapper(0x688, 128, 16, 3));
+  EXPECT_EQ("0688", itoa_r_wrapper(0x688, 128, 16, 4));
+  EXPECT_EQ("00688", itoa_r_wrapper(0x688, 128, 16, 5));
+}
+#endif  // defined(OS_POSIX) && !defined(OS_ANDROID) && !defined(OS_FUCHSIA)
+
+#if BUILDFLAG(CAN_UNWIND_WITH_FRAME_POINTERS)
+
+template <size_t Depth>
+void NOINLINE ExpectStackFramePointers(const void** frames,
+                                       size_t max_depth) {
+  code_start:
+  // Calling __builtin_frame_address() forces compiler to emit
+  // frame pointers, even if they are not enabled.
+  EXPECT_NE(nullptr, __builtin_frame_address(0));
+  ExpectStackFramePointers<Depth - 1>(frames, max_depth);
+
+  constexpr size_t frame_index = Depth - 1;
+  const void* frame = frames[frame_index];
+  EXPECT_GE(frame, &&code_start) << "For frame at index " << frame_index;
+  EXPECT_LE(frame, &&code_end) << "For frame at index " << frame_index;
+  code_end: return;
+}
+
+template <>
+void NOINLINE ExpectStackFramePointers<1>(const void** frames,
+                                          size_t max_depth) {
+  code_start:
+  // Calling __builtin_frame_address() forces compiler to emit
+  // frame pointers, even if they are not enabled.
+  EXPECT_NE(nullptr, __builtin_frame_address(0));
+  size_t count = TraceStackFramePointers(frames, max_depth, 0);
+  ASSERT_EQ(max_depth, count);
+
+  const void* frame = frames[0];
+  EXPECT_GE(frame, &&code_start) << "For the top frame";
+  EXPECT_LE(frame, &&code_end) << "For the top frame";
+  code_end: return;
+}
+
+#if defined(MEMORY_SANITIZER)
+// The test triggers use-of-uninitialized-value errors on MSan bots.
+// This is expected because we're walking and reading the stack, and
+// sometimes we read fp / pc from the place that previously held
+// uninitialized value.
+#define MAYBE_TraceStackFramePointers DISABLED_TraceStackFramePointers
+#else
+#define MAYBE_TraceStackFramePointers TraceStackFramePointers
+#endif
+TEST_F(StackTraceTest, MAYBE_TraceStackFramePointers) {
+  constexpr size_t kDepth = 5;
+  const void* frames[kDepth];
+  ExpectStackFramePointers<kDepth>(frames, kDepth);
+}
+
+#if defined(OS_ANDROID) || defined(OS_MACOSX)
+#define MAYBE_StackEnd StackEnd
+#else
+#define MAYBE_StackEnd DISABLED_StackEnd
+#endif
+
+TEST_F(StackTraceTest, MAYBE_StackEnd) {
+  EXPECT_NE(0u, GetStackEnd());
+}
+
+#endif  // BUILDFLAG(CAN_UNWIND_WITH_FRAME_POINTERS)
+
+}  // namespace debug
+}  // namespace base
diff --git a/base/debug/stack_trace_win.cc b/base/debug/stack_trace_win.cc
new file mode 100644
index 0000000..1ef2a06
--- /dev/null
+++ b/base/debug/stack_trace_win.cc
@@ -0,0 +1,365 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/debug/stack_trace.h"
+
+#include <windows.h>
+#include <dbghelp.h>
+#include <stddef.h>
+
+#include <algorithm>
+#include <iostream>
+#include <memory>
+
+#include "base/files/file_path.h"
+#include "base/logging.h"
+#include "base/macros.h"
+#include "base/memory/singleton.h"
+#include "base/synchronization/lock.h"
+
+namespace base {
+namespace debug {
+
+namespace {
+
+// Previous unhandled filter. Will be called if not NULL when we intercept an
+// exception. Only used in unit tests.
+LPTOP_LEVEL_EXCEPTION_FILTER g_previous_filter = NULL;
+
+bool g_initialized_symbols = false;
+DWORD g_init_error = ERROR_SUCCESS;
+
+// Prints the exception call stack.
+// This is the unit tests exception filter.
+long WINAPI StackDumpExceptionFilter(EXCEPTION_POINTERS* info) {
+  DWORD exc_code = info->ExceptionRecord->ExceptionCode;
+  std::cerr << "Received fatal exception ";
+  switch (exc_code) {
+    case EXCEPTION_ACCESS_VIOLATION:
+      std::cerr << "EXCEPTION_ACCESS_VIOLATION";
+      break;
+    case EXCEPTION_ARRAY_BOUNDS_EXCEEDED:
+      std::cerr << "EXCEPTION_ARRAY_BOUNDS_EXCEEDED";
+      break;
+    case EXCEPTION_BREAKPOINT:
+      std::cerr << "EXCEPTION_BREAKPOINT";
+      break;
+    case EXCEPTION_DATATYPE_MISALIGNMENT:
+      std::cerr << "EXCEPTION_DATATYPE_MISALIGNMENT";
+      break;
+    case EXCEPTION_FLT_DENORMAL_OPERAND:
+      std::cerr << "EXCEPTION_FLT_DENORMAL_OPERAND";
+      break;
+    case EXCEPTION_FLT_DIVIDE_BY_ZERO:
+      std::cerr << "EXCEPTION_FLT_DIVIDE_BY_ZERO";
+      break;
+    case EXCEPTION_FLT_INEXACT_RESULT:
+      std::cerr << "EXCEPTION_FLT_INEXACT_RESULT";
+      break;
+    case EXCEPTION_FLT_INVALID_OPERATION:
+      std::cerr << "EXCEPTION_FLT_INVALID_OPERATION";
+      break;
+    case EXCEPTION_FLT_OVERFLOW:
+      std::cerr << "EXCEPTION_FLT_OVERFLOW";
+      break;
+    case EXCEPTION_FLT_STACK_CHECK:
+      std::cerr << "EXCEPTION_FLT_STACK_CHECK";
+      break;
+    case EXCEPTION_FLT_UNDERFLOW:
+      std::cerr << "EXCEPTION_FLT_UNDERFLOW";
+      break;
+    case EXCEPTION_ILLEGAL_INSTRUCTION:
+      std::cerr << "EXCEPTION_ILLEGAL_INSTRUCTION";
+      break;
+    case EXCEPTION_IN_PAGE_ERROR:
+      std::cerr << "EXCEPTION_IN_PAGE_ERROR";
+      break;
+    case EXCEPTION_INT_DIVIDE_BY_ZERO:
+      std::cerr << "EXCEPTION_INT_DIVIDE_BY_ZERO";
+      break;
+    case EXCEPTION_INT_OVERFLOW:
+      std::cerr << "EXCEPTION_INT_OVERFLOW";
+      break;
+    case EXCEPTION_INVALID_DISPOSITION:
+      std::cerr << "EXCEPTION_INVALID_DISPOSITION";
+      break;
+    case EXCEPTION_NONCONTINUABLE_EXCEPTION:
+      std::cerr << "EXCEPTION_NONCONTINUABLE_EXCEPTION";
+      break;
+    case EXCEPTION_PRIV_INSTRUCTION:
+      std::cerr << "EXCEPTION_PRIV_INSTRUCTION";
+      break;
+    case EXCEPTION_SINGLE_STEP:
+      std::cerr << "EXCEPTION_SINGLE_STEP";
+      break;
+    case EXCEPTION_STACK_OVERFLOW:
+      std::cerr << "EXCEPTION_STACK_OVERFLOW";
+      break;
+    default:
+      std::cerr << "0x" << std::hex << exc_code;
+      break;
+  }
+  std::cerr << "\n";
+
+  debug::StackTrace(info).Print();
+  if (g_previous_filter)
+    return g_previous_filter(info);
+  return EXCEPTION_CONTINUE_SEARCH;
+}
+
+FilePath GetExePath() {
+  wchar_t system_buffer[MAX_PATH];
+  GetModuleFileName(NULL, system_buffer, MAX_PATH);
+  system_buffer[MAX_PATH - 1] = L'\0';
+  return FilePath(system_buffer);
+}
+
+bool InitializeSymbols() {
+  if (g_initialized_symbols)
+    return g_init_error == ERROR_SUCCESS;
+  g_initialized_symbols = true;
+  // Defer symbol load until they're needed, use undecorated names, and get line
+  // numbers.
+  SymSetOptions(SYMOPT_DEFERRED_LOADS |
+                SYMOPT_UNDNAME |
+                SYMOPT_LOAD_LINES);
+  if (!SymInitialize(GetCurrentProcess(), NULL, TRUE)) {
+    g_init_error = GetLastError();
+    // TODO(awong): Handle error: SymInitialize can fail with
+    // ERROR_INVALID_PARAMETER.
+    // When it fails, we should not call debugbreak since it kills the current
+    // process (prevents future tests from running or kills the browser
+    // process).
+    DLOG(ERROR) << "SymInitialize failed: " << g_init_error;
+    return false;
+  }
+
+  // When transferring the binaries e.g. between bots, path put
+  // into the executable will get off. To still retrieve symbols correctly,
+  // add the directory of the executable to symbol search path.
+  // All following errors are non-fatal.
+  const size_t kSymbolsArraySize = 1024;
+  std::unique_ptr<wchar_t[]> symbols_path(new wchar_t[kSymbolsArraySize]);
+
+  // Note: The below function takes buffer size as number of characters,
+  // not number of bytes!
+  if (!SymGetSearchPathW(GetCurrentProcess(),
+                         symbols_path.get(),
+                         kSymbolsArraySize)) {
+    g_init_error = GetLastError();
+    DLOG(WARNING) << "SymGetSearchPath failed: " << g_init_error;
+    return false;
+  }
+
+  std::wstring new_path(std::wstring(symbols_path.get()) +
+                        L";" + GetExePath().DirName().value());
+  if (!SymSetSearchPathW(GetCurrentProcess(), new_path.c_str())) {
+    g_init_error = GetLastError();
+    DLOG(WARNING) << "SymSetSearchPath failed." << g_init_error;
+    return false;
+  }
+
+  g_init_error = ERROR_SUCCESS;
+  return true;
+}
+
+// SymbolContext is a threadsafe singleton that wraps the DbgHelp Sym* family
+// of functions.  The Sym* family of functions may only be invoked by one
+// thread at a time.  SymbolContext code may access a symbol server over the
+// network while holding the lock for this singleton.  In the case of high
+// latency, this code will adversely affect performance.
+//
+// There is also a known issue where this backtrace code can interact
+// badly with breakpad if breakpad is invoked in a separate thread while
+// we are using the Sym* functions.  This is because breakpad does now
+// share a lock with this function.  See this related bug:
+//
+//   https://crbug.com/google-breakpad/311
+//
+// This is a very unlikely edge case, and the current solution is to
+// just ignore it.
+class SymbolContext {
+ public:
+  static SymbolContext* GetInstance() {
+    // We use a leaky singleton because code may call this during process
+    // termination.
+    return
+      Singleton<SymbolContext, LeakySingletonTraits<SymbolContext> >::get();
+  }
+
+  // For the given trace, attempts to resolve the symbols, and output a trace
+  // to the ostream os.  The format for each line of the backtrace is:
+  //
+  //    <tab>SymbolName[0xAddress+Offset] (FileName:LineNo)
+  //
+  // This function should only be called if Init() has been called.  We do not
+  // LOG(FATAL) here because this code is called might be triggered by a
+  // LOG(FATAL) itself. Also, it should not be calling complex code that is
+  // extensible like PathService since that can in turn fire CHECKs.
+  void OutputTraceToStream(const void* const* trace,
+                           size_t count,
+                           std::ostream* os) {
+    base::AutoLock lock(lock_);
+
+    for (size_t i = 0; (i < count) && os->good(); ++i) {
+      const int kMaxNameLength = 256;
+      DWORD_PTR frame = reinterpret_cast<DWORD_PTR>(trace[i]);
+
+      // Code adapted from MSDN example:
+      // http://msdn.microsoft.com/en-us/library/ms680578(VS.85).aspx
+      ULONG64 buffer[
+        (sizeof(SYMBOL_INFO) +
+          kMaxNameLength * sizeof(wchar_t) +
+          sizeof(ULONG64) - 1) /
+        sizeof(ULONG64)];
+      memset(buffer, 0, sizeof(buffer));
+
+      // Initialize symbol information retrieval structures.
+      DWORD64 sym_displacement = 0;
+      PSYMBOL_INFO symbol = reinterpret_cast<PSYMBOL_INFO>(&buffer[0]);
+      symbol->SizeOfStruct = sizeof(SYMBOL_INFO);
+      symbol->MaxNameLen = kMaxNameLength - 1;
+      BOOL has_symbol = SymFromAddr(GetCurrentProcess(), frame,
+                                    &sym_displacement, symbol);
+
+      // Attempt to retrieve line number information.
+      DWORD line_displacement = 0;
+      IMAGEHLP_LINE64 line = {};
+      line.SizeOfStruct = sizeof(IMAGEHLP_LINE64);
+      BOOL has_line = SymGetLineFromAddr64(GetCurrentProcess(), frame,
+                                           &line_displacement, &line);
+
+      // Output the backtrace line.
+      (*os) << "\t";
+      if (has_symbol) {
+        (*os) << symbol->Name << " [0x" << trace[i] << "+"
+              << sym_displacement << "]";
+      } else {
+        // If there is no symbol information, add a spacer.
+        (*os) << "(No symbol) [0x" << trace[i] << "]";
+      }
+      if (has_line) {
+        (*os) << " (" << line.FileName << ":" << line.LineNumber << ")";
+      }
+      (*os) << "\n";
+    }
+  }
+
+ private:
+  friend struct DefaultSingletonTraits<SymbolContext>;
+
+  SymbolContext() {
+    InitializeSymbols();
+  }
+
+  base::Lock lock_;
+  DISALLOW_COPY_AND_ASSIGN(SymbolContext);
+};
+
+}  // namespace
+
+bool EnableInProcessStackDumping() {
+  // Add stack dumping support on exception on windows. Similar to OS_POSIX
+  // signal() handling in process_util_posix.cc.
+  g_previous_filter = SetUnhandledExceptionFilter(&StackDumpExceptionFilter);
+
+  // Need to initialize symbols early in the process or else this fails on
+  // swarming (since symbols are in different directory than in the exes) and
+  // also release x64.
+  return InitializeSymbols();
+}
+
+// Disable optimizations for the StackTrace::StackTrace function. It is
+// important to disable at least frame pointer optimization ("y"), since
+// that breaks CaptureStackBackTrace() and prevents StackTrace from working
+// in Release builds (it may still be janky if other frames are using FPO,
+// but at least it will make it further).
+#if defined(COMPILER_MSVC)
+#pragma optimize("", off)
+#endif
+
+StackTrace::StackTrace(size_t count) {
+  count = std::min(arraysize(trace_), count);
+
+  // When walking our own stack, use CaptureStackBackTrace().
+  count_ = CaptureStackBackTrace(0, count, trace_, NULL);
+}
+
+#if defined(COMPILER_MSVC)
+#pragma optimize("", on)
+#endif
+
+StackTrace::StackTrace(EXCEPTION_POINTERS* exception_pointers) {
+  InitTrace(exception_pointers->ContextRecord);
+}
+
+StackTrace::StackTrace(const CONTEXT* context) {
+  InitTrace(context);
+}
+
+void StackTrace::InitTrace(const CONTEXT* context_record) {
+  // StackWalk64 modifies the register context in place, so we have to copy it
+  // so that downstream exception handlers get the right context.  The incoming
+  // context may have had more register state (YMM, etc) than we need to unwind
+  // the stack. Typically StackWalk64 only needs integer and control registers.
+  CONTEXT context_copy;
+  memcpy(&context_copy, context_record, sizeof(context_copy));
+  context_copy.ContextFlags = CONTEXT_INTEGER | CONTEXT_CONTROL;
+
+  // When walking an exception stack, we need to use StackWalk64().
+  count_ = 0;
+  // Initialize stack walking.
+  STACKFRAME64 stack_frame;
+  memset(&stack_frame, 0, sizeof(stack_frame));
+#if defined(_WIN64)
+  int machine_type = IMAGE_FILE_MACHINE_AMD64;
+  stack_frame.AddrPC.Offset = context_record->Rip;
+  stack_frame.AddrFrame.Offset = context_record->Rbp;
+  stack_frame.AddrStack.Offset = context_record->Rsp;
+#else
+  int machine_type = IMAGE_FILE_MACHINE_I386;
+  stack_frame.AddrPC.Offset = context_record->Eip;
+  stack_frame.AddrFrame.Offset = context_record->Ebp;
+  stack_frame.AddrStack.Offset = context_record->Esp;
+#endif
+  stack_frame.AddrPC.Mode = AddrModeFlat;
+  stack_frame.AddrFrame.Mode = AddrModeFlat;
+  stack_frame.AddrStack.Mode = AddrModeFlat;
+  while (StackWalk64(machine_type,
+                     GetCurrentProcess(),
+                     GetCurrentThread(),
+                     &stack_frame,
+                     &context_copy,
+                     NULL,
+                     &SymFunctionTableAccess64,
+                     &SymGetModuleBase64,
+                     NULL) &&
+         count_ < arraysize(trace_)) {
+    trace_[count_++] = reinterpret_cast<void*>(stack_frame.AddrPC.Offset);
+  }
+
+  for (size_t i = count_; i < arraysize(trace_); ++i)
+    trace_[i] = NULL;
+}
+
+void StackTrace::Print() const {
+  OutputToStream(&std::cerr);
+}
+
+void StackTrace::OutputToStream(std::ostream* os) const {
+  SymbolContext* context = SymbolContext::GetInstance();
+  if (g_init_error != ERROR_SUCCESS) {
+    (*os) << "Error initializing symbols (" << g_init_error
+          << ").  Dumping unresolved backtrace:\n";
+    for (size_t i = 0; (i < count_) && os->good(); ++i) {
+      (*os) << "\t" << trace_[i] << "\n";
+    }
+  } else {
+    (*os) << "Backtrace:\n";
+    context->OutputTraceToStream(trace_, count_, os);
+  }
+}
+
+}  // namespace debug
+}  // namespace base
diff --git a/base/debug/task_annotator.cc b/base/debug/task_annotator.cc
new file mode 100644
index 0000000..2197b85
--- /dev/null
+++ b/base/debug/task_annotator.cc
@@ -0,0 +1,124 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/debug/task_annotator.h"
+
+#include <array>
+
+#include "base/debug/activity_tracker.h"
+#include "base/debug/alias.h"
+#include "base/no_destructor.h"
+#include "base/pending_task.h"
+#include "base/threading/thread_local.h"
+#include "base/trace_event/trace_event.h"
+
+namespace base {
+namespace debug {
+
+namespace {
+
+TaskAnnotator::ObserverForTesting* g_task_annotator_observer = nullptr;
+
+// Returns the TLS slot that stores the PendingTask currently in progress on
+// each thread. Used to allow creating a breadcrumb of program counters on the
+// stack to help identify a task's origin in crashes.
+ThreadLocalPointer<const PendingTask>* GetTLSForCurrentPendingTask() {
+  static NoDestructor<ThreadLocalPointer<const PendingTask>>
+      tls_for_current_pending_task;
+  return tls_for_current_pending_task.get();
+}
+
+}  // namespace
+
+TaskAnnotator::TaskAnnotator() = default;
+
+TaskAnnotator::~TaskAnnotator() = default;
+
+void TaskAnnotator::DidQueueTask(const char* queue_function,
+                                 const PendingTask& pending_task) {
+  if (queue_function) {
+    TRACE_EVENT_WITH_FLOW0(TRACE_DISABLED_BY_DEFAULT("toplevel.flow"),
+                           queue_function,
+                           TRACE_ID_MANGLE(GetTaskTraceID(pending_task)),
+                           TRACE_EVENT_FLAG_FLOW_OUT);
+  }
+
+  // TODO(https://crbug.com/826902): Fix callers that invoke DidQueueTask()
+  // twice for the same PendingTask.
+  // DCHECK(!pending_task.task_backtrace[0])
+  //     << "Task backtrace was already set, task posted twice??";
+  if (!pending_task.task_backtrace[0]) {
+    const PendingTask* parent_task = GetTLSForCurrentPendingTask()->Get();
+    if (parent_task) {
+      pending_task.task_backtrace[0] =
+          parent_task->posted_from.program_counter();
+      std::copy(parent_task->task_backtrace.begin(),
+                parent_task->task_backtrace.end() - 1,
+                pending_task.task_backtrace.begin() + 1);
+    }
+  }
+}
+
+void TaskAnnotator::RunTask(const char* queue_function,
+                            PendingTask* pending_task) {
+  ScopedTaskRunActivity task_activity(*pending_task);
+
+  if (queue_function) {
+    TRACE_EVENT_WITH_FLOW0(TRACE_DISABLED_BY_DEFAULT("toplevel.flow"),
+                           queue_function,
+                           TRACE_ID_MANGLE(GetTaskTraceID(*pending_task)),
+                           TRACE_EVENT_FLAG_FLOW_IN);
+  }
+
+  // Before running the task, store the task backtrace with the chain of
+  // PostTasks that resulted in this call and deliberately alias it to ensure
+  // it is on the stack if the task crashes. Be careful not to assume that the
+  // variable itself will have the expected value when displayed by the
+  // optimizer in an optimized build. Look at a memory dump of the stack.
+  static constexpr int kStackTaskTraceSnapshotSize =
+      std::tuple_size<decltype(pending_task->task_backtrace)>::value + 3;
+  std::array<const void*, kStackTaskTraceSnapshotSize> task_backtrace;
+
+  // Store a marker to locate |task_backtrace| content easily on a memory
+  // dump.
+  task_backtrace.front() = reinterpret_cast<void*>(0xefefefefefefefef);
+  task_backtrace.back() = reinterpret_cast<void*>(0xfefefefefefefefe);
+
+  task_backtrace[1] = pending_task->posted_from.program_counter();
+  std::copy(pending_task->task_backtrace.begin(),
+            pending_task->task_backtrace.end(), task_backtrace.begin() + 2);
+  debug::Alias(&task_backtrace);
+
+  ThreadLocalPointer<const PendingTask>* tls_for_current_pending_task =
+      GetTLSForCurrentPendingTask();
+  const PendingTask* previous_pending_task =
+      tls_for_current_pending_task->Get();
+  tls_for_current_pending_task->Set(pending_task);
+
+  if (g_task_annotator_observer)
+    g_task_annotator_observer->BeforeRunTask(pending_task);
+  std::move(pending_task->task).Run();
+
+  tls_for_current_pending_task->Set(previous_pending_task);
+}
+
+uint64_t TaskAnnotator::GetTaskTraceID(const PendingTask& task) const {
+  return (static_cast<uint64_t>(task.sequence_num) << 32) |
+         ((static_cast<uint64_t>(reinterpret_cast<intptr_t>(this)) << 32) >>
+          32);
+}
+
+// static
+void TaskAnnotator::RegisterObserverForTesting(ObserverForTesting* observer) {
+  DCHECK(!g_task_annotator_observer);
+  g_task_annotator_observer = observer;
+}
+
+// static
+void TaskAnnotator::ClearObserverForTesting() {
+  g_task_annotator_observer = nullptr;
+}
+
+}  // namespace debug
+}  // namespace base
diff --git a/base/debug/task_annotator.h b/base/debug/task_annotator.h
new file mode 100644
index 0000000..f53d02c
--- /dev/null
+++ b/base/debug/task_annotator.h
@@ -0,0 +1,64 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_DEBUG_TASK_ANNOTATOR_H_
+#define BASE_DEBUG_TASK_ANNOTATOR_H_
+
+#include <stdint.h>
+
+#include "base/base_export.h"
+#include "base/macros.h"
+
+namespace base {
+struct PendingTask;
+namespace debug {
+
+// Implements common debug annotations for posted tasks. This includes data
+// such as task origins, queueing durations and memory usage.
+class BASE_EXPORT TaskAnnotator {
+ public:
+  class ObserverForTesting {
+   public:
+    // Invoked just before RunTask() in the scope in which the task is about to
+    // be executed.
+    virtual void BeforeRunTask(const PendingTask* pending_task) = 0;
+  };
+
+  TaskAnnotator();
+  ~TaskAnnotator();
+
+  // Called to indicate that a task has been queued to run in the future.
+  // |queue_function| is used as the trace flow event name. |queue_function| can
+  // be null if the caller doesn't want trace flow events logged to
+  // toplevel.flow.
+  void DidQueueTask(const char* queue_function,
+                    const PendingTask& pending_task);
+
+  // Run a previously queued task. |queue_function| should match what was
+  // passed into |DidQueueTask| for this task.
+  void RunTask(const char* queue_function, PendingTask* pending_task);
+
+  // Creates a process-wide unique ID to represent this task in trace events.
+  // This will be mangled with a Process ID hash to reduce the likelyhood of
+  // colliding with TaskAnnotator pointers on other processes. Callers may use
+  // this when generating their own flow events (i.e. when passing
+  // |queue_function == nullptr| in above methods).
+  uint64_t GetTaskTraceID(const PendingTask& task) const;
+
+ private:
+  friend class TaskAnnotatorBacktraceIntegrationTest;
+
+  // Registers an ObserverForTesting that will be invoked by all TaskAnnotators'
+  // RunTask(). This registration and the implementation of BeforeRunTask() are
+  // responsible to ensure thread-safety.
+  static void RegisterObserverForTesting(ObserverForTesting* observer);
+  static void ClearObserverForTesting();
+
+  DISALLOW_COPY_AND_ASSIGN(TaskAnnotator);
+};
+
+}  // namespace debug
+}  // namespace base
+
+#endif  // BASE_DEBUG_TASK_ANNOTATOR_H_
diff --git a/base/debug/task_annotator_unittest.cc b/base/debug/task_annotator_unittest.cc
new file mode 100644
index 0000000..51a5d32
--- /dev/null
+++ b/base/debug/task_annotator_unittest.cc
@@ -0,0 +1,371 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/debug/task_annotator.h"
+
+#include <algorithm>
+#include <vector>
+
+#include "base/bind.h"
+#include "base/bind_helpers.h"
+#include "base/callback.h"
+#include "base/macros.h"
+#include "base/message_loop/message_loop.h"
+#include "base/pending_task.h"
+#include "base/run_loop.h"
+#include "base/strings/stringprintf.h"
+#include "base/synchronization/lock.h"
+#include "base/synchronization/waitable_event.h"
+#include "base/task_scheduler/post_task.h"
+#include "base/test/scoped_task_environment.h"
+#include "base/threading/thread.h"
+#include "base/threading/thread_task_runner_handle.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+namespace debug {
+namespace {
+
+void TestTask(int* result) {
+  *result = 123;
+}
+
+}  // namespace
+
+TEST(TaskAnnotatorTest, QueueAndRunTask) {
+  int result = 0;
+  PendingTask pending_task(FROM_HERE, BindOnce(&TestTask, &result));
+
+  TaskAnnotator annotator;
+  annotator.DidQueueTask("TaskAnnotatorTest::Queue", pending_task);
+  EXPECT_EQ(0, result);
+  annotator.RunTask("TaskAnnotatorTest::Queue", &pending_task);
+  EXPECT_EQ(123, result);
+}
+
+// Test task annotator integration in base APIs and ensuing support for
+// backtraces. Tasks posted across multiple threads in this test fixture should
+// be synchronized as BeforeRunTask() and VerifyTraceAndPost() assume tasks are
+// observed in lock steps, one at a time.
+class TaskAnnotatorBacktraceIntegrationTest
+    : public ::testing::Test,
+      public TaskAnnotator::ObserverForTesting {
+ public:
+  using ExpectedTrace = std::vector<const void*>;
+
+  TaskAnnotatorBacktraceIntegrationTest() = default;
+
+  ~TaskAnnotatorBacktraceIntegrationTest() override = default;
+
+  // TaskAnnotator::ObserverForTesting:
+  void BeforeRunTask(const PendingTask* pending_task) override {
+    AutoLock auto_lock(on_before_run_task_lock_);
+    last_posted_from_ = pending_task->posted_from;
+    last_task_backtrace_ = pending_task->task_backtrace;
+  }
+
+  void SetUp() override { TaskAnnotator::RegisterObserverForTesting(this); }
+
+  void TearDown() override { TaskAnnotator::ClearObserverForTesting(); }
+
+  void VerifyTraceAndPost(const scoped_refptr<SequencedTaskRunner>& task_runner,
+                          const Location& posted_from,
+                          const Location& next_from_here,
+                          const ExpectedTrace& expected_trace,
+                          OnceClosure task) {
+    SCOPED_TRACE(StringPrintf("Callback Depth: %zu", expected_trace.size()));
+
+    EXPECT_EQ(posted_from, last_posted_from_);
+    for (size_t i = 0; i < last_task_backtrace_.size(); i++) {
+      SCOPED_TRACE(StringPrintf("Trace frame: %zu", i));
+      if (i < expected_trace.size())
+        EXPECT_EQ(expected_trace[i], last_task_backtrace_[i]);
+      else
+        EXPECT_EQ(nullptr, last_task_backtrace_[i]);
+    }
+
+    task_runner->PostTask(next_from_here, std::move(task));
+  }
+
+  // Same as VerifyTraceAndPost() with the exception that it also posts a task
+  // that will prevent |task| from running until |wait_before_next_task| is
+  // signaled.
+  void VerifyTraceAndPostWithBlocker(
+      const scoped_refptr<SequencedTaskRunner>& task_runner,
+      const Location& posted_from,
+      const Location& next_from_here,
+      const ExpectedTrace& expected_trace,
+      OnceClosure task,
+      WaitableEvent* wait_before_next_task) {
+    DCHECK(wait_before_next_task);
+
+    // Need to lock to ensure the upcoming VerifyTraceAndPost() runs before the
+    // BeforeRunTask() hook for the posted WaitableEvent::Wait(). Otherwise the
+    // upcoming VerifyTraceAndPost() will race to read the state saved in the
+    // BeforeRunTask() hook preceding the current task.
+    AutoLock auto_lock(on_before_run_task_lock_);
+    task_runner->PostTask(
+        FROM_HERE,
+        BindOnce(&WaitableEvent::Wait, Unretained(wait_before_next_task)));
+    VerifyTraceAndPost(task_runner, posted_from, next_from_here, expected_trace,
+                       std::move(task));
+  }
+
+ protected:
+  static void RunTwo(OnceClosure c1, OnceClosure c2) {
+    std::move(c1).Run();
+    std::move(c2).Run();
+  }
+
+ private:
+  // While calls to VerifyTraceAndPost() are strictly ordered in tests below
+  // (and hence non-racy), some helper methods (e.g. Wait/Signal) do racily call
+  // into BeforeRunTask(). This Lock ensures these unobserved writes are not
+  // racing. Locking isn't required on read per the VerifyTraceAndPost()
+  // themselves being ordered.
+  Lock on_before_run_task_lock_;
+
+  Location last_posted_from_ = {};
+  std::array<const void*, 4> last_task_backtrace_ = {};
+
+  DISALLOW_COPY_AND_ASSIGN(TaskAnnotatorBacktraceIntegrationTest);
+};
+
+// Ensure the task backtrace populates correctly.
+TEST_F(TaskAnnotatorBacktraceIntegrationTest, SingleThreadedSimple) {
+  MessageLoop loop;
+  const Location location0 = FROM_HERE;
+  const Location location1 = FROM_HERE;
+  const Location location2 = FROM_HERE;
+  const Location location3 = FROM_HERE;
+  const Location location4 = FROM_HERE;
+  const Location location5 = FROM_HERE;
+
+  RunLoop run_loop;
+
+  // Task 5 has tasks 4/3/2/1 as parents (task 0 isn't visible as only the
+  // last 4 parents are kept).
+  OnceClosure task5 = BindOnce(
+      &TaskAnnotatorBacktraceIntegrationTest::VerifyTraceAndPost,
+      Unretained(this), loop.task_runner(), location5, FROM_HERE,
+      ExpectedTrace({location4.program_counter(), location3.program_counter(),
+                     location2.program_counter(), location1.program_counter()}),
+      run_loop.QuitClosure());
+
+  // Task i=4/3/2/1/0 have tasks [0,i) as parents.
+  OnceClosure task4 = BindOnce(
+      &TaskAnnotatorBacktraceIntegrationTest::VerifyTraceAndPost,
+      Unretained(this), loop.task_runner(), location4, location5,
+      ExpectedTrace({location3.program_counter(), location2.program_counter(),
+                     location1.program_counter(), location0.program_counter()}),
+      std::move(task5));
+  OnceClosure task3 = BindOnce(
+      &TaskAnnotatorBacktraceIntegrationTest::VerifyTraceAndPost,
+      Unretained(this), loop.task_runner(), location3, location4,
+      ExpectedTrace({location2.program_counter(), location1.program_counter(),
+                     location0.program_counter()}),
+      std::move(task4));
+  OnceClosure task2 = BindOnce(
+      &TaskAnnotatorBacktraceIntegrationTest::VerifyTraceAndPost,
+      Unretained(this), loop.task_runner(), location2, location3,
+      ExpectedTrace({location1.program_counter(), location0.program_counter()}),
+      std::move(task3));
+  OnceClosure task1 =
+      BindOnce(&TaskAnnotatorBacktraceIntegrationTest::VerifyTraceAndPost,
+               Unretained(this), loop.task_runner(), location1, location2,
+               ExpectedTrace({location0.program_counter()}), std::move(task2));
+  OnceClosure task0 =
+      BindOnce(&TaskAnnotatorBacktraceIntegrationTest::VerifyTraceAndPost,
+               Unretained(this), loop.task_runner(), location0, location1,
+               ExpectedTrace({}), std::move(task1));
+
+  loop.task_runner()->PostTask(location0, std::move(task0));
+
+  run_loop.Run();
+}
+
+// Ensure it works when posting tasks across multiple threads managed by //base.
+TEST_F(TaskAnnotatorBacktraceIntegrationTest, MultipleThreads) {
+  test::ScopedTaskEnvironment scoped_task_environment;
+
+  // Use diverse task runners (a MessageLoop on the main thread, a TaskScheduler
+  // based SequencedTaskRunner, and a TaskScheduler based
+  // SingleThreadTaskRunner) to verify that TaskAnnotator can capture backtraces
+  // for PostTasks back-and-forth between these.
+  auto main_thread_a = ThreadTaskRunnerHandle::Get();
+  auto task_runner_b = CreateSingleThreadTaskRunnerWithTraits({});
+  auto task_runner_c = CreateSequencedTaskRunnerWithTraits(
+      {base::MayBlock(), base::WithBaseSyncPrimitives()});
+
+  const Location& location_a0 = FROM_HERE;
+  const Location& location_a1 = FROM_HERE;
+  const Location& location_a2 = FROM_HERE;
+  const Location& location_a3 = FROM_HERE;
+
+  const Location& location_b0 = FROM_HERE;
+  const Location& location_b1 = FROM_HERE;
+
+  const Location& location_c0 = FROM_HERE;
+
+  RunLoop run_loop;
+
+  // All tasks below happen in lock step by nature of being posted by the
+  // previous one (plus the synchronous nature of RunTwo()) with the exception
+  // of the follow-up local task to |task_b0_local|. This WaitableEvent ensures
+  // it completes before |task_c0| runs to avoid racy invocations of
+  // BeforeRunTask()+VerifyTraceAndPost().
+  WaitableEvent lock_step(WaitableEvent::ResetPolicy::AUTOMATIC,
+                          WaitableEvent::InitialState::NOT_SIGNALED);
+
+  // Here is the execution order generated below:
+  //  A: TA0 -> TA1 \                                    TA2
+  //  B:            TB0L \ + TB0F \  Signal \           /
+  //                      ---------\--/      \         /
+  //                                \         \       /
+  //  C:                            Wait........ TC0 /
+
+  // On task runner c, post a task back to main thread that verifies its trace
+  // and terminates after one more self-post.
+  OnceClosure task_a2 = BindOnce(
+      &TaskAnnotatorBacktraceIntegrationTest::VerifyTraceAndPost,
+      Unretained(this), main_thread_a, location_a2, location_a3,
+      ExpectedTrace(
+          {location_c0.program_counter(), location_b0.program_counter(),
+           location_a1.program_counter(), location_a0.program_counter()}),
+      run_loop.QuitClosure());
+  OnceClosure task_c0 =
+      BindOnce(&TaskAnnotatorBacktraceIntegrationTest::VerifyTraceAndPost,
+               Unretained(this), main_thread_a, location_c0, location_a2,
+               ExpectedTrace({location_b0.program_counter(),
+                              location_a1.program_counter(),
+                              location_a0.program_counter()}),
+               std::move(task_a2));
+
+  // On task runner b run two tasks that conceptually come from the same
+  // location (managed via RunTwo().) One will post back to task runner b and
+  // another will post to task runner c to test spawning multiple tasks on
+  // different message loops. The task posted to task runner c will not get
+  // location b1 whereas the one posted back to task runner b will.
+  OnceClosure task_b0_fork = BindOnce(
+      &TaskAnnotatorBacktraceIntegrationTest::VerifyTraceAndPostWithBlocker,
+      Unretained(this), task_runner_c, location_b0, location_c0,
+      ExpectedTrace(
+          {location_a1.program_counter(), location_a0.program_counter()}),
+      std::move(task_c0), &lock_step);
+  OnceClosure task_b0_local =
+      BindOnce(&TaskAnnotatorBacktraceIntegrationTest::VerifyTraceAndPost,
+               Unretained(this), task_runner_b, location_b0, location_b1,
+               ExpectedTrace({location_a1.program_counter(),
+                              location_a0.program_counter()}),
+               BindOnce(&WaitableEvent::Signal, Unretained(&lock_step)));
+
+  OnceClosure task_a1 =
+      BindOnce(&TaskAnnotatorBacktraceIntegrationTest::VerifyTraceAndPost,
+               Unretained(this), task_runner_b, location_a1, location_b0,
+               ExpectedTrace({location_a0.program_counter()}),
+               BindOnce(&TaskAnnotatorBacktraceIntegrationTest::RunTwo,
+                        std::move(task_b0_local), std::move(task_b0_fork)));
+  OnceClosure task_a0 =
+      BindOnce(&TaskAnnotatorBacktraceIntegrationTest::VerifyTraceAndPost,
+               Unretained(this), main_thread_a, location_a0, location_a1,
+               ExpectedTrace({}), std::move(task_a1));
+
+  main_thread_a->PostTask(location_a0, std::move(task_a0));
+
+  run_loop.Run();
+}
+
+// Ensure nesting doesn't break the chain.
+TEST_F(TaskAnnotatorBacktraceIntegrationTest, SingleThreadedNested) {
+  MessageLoop loop;
+  const Location location0 = FROM_HERE;
+  const Location location1 = FROM_HERE;
+  const Location location2 = FROM_HERE;
+  const Location location3 = FROM_HERE;
+  const Location location4 = FROM_HERE;
+  const Location location5 = FROM_HERE;
+
+  RunLoop run_loop;
+
+  // Task execution below looks like this, w.r.t. to RunLoop depths:
+  // 1 : T0 \ + NRL1 \                                 ---------> T4 -> T5
+  // 2 :     ---------> T1 \ -> NRL2 \ ----> T2 -> T3 / + Quit /
+  // 3 :                    ---------> DN /
+
+  // NRL1 tests that tasks that occur at a different nesting depth than their
+  // parent have a sane backtrace nonetheless (both ways).
+
+  // NRL2 tests that posting T2 right after exiting the RunLoop (from the same
+  // task) results in NRL2 being its parent (and not the DoNothing() task that
+  // just ran -- which would have been the case if the "current task" wasn't
+  // restored properly when returning from a task within a task).
+
+  // In other words, this is regression test for a bug in the previous
+  // implementation. In the current implementation, replacing
+  //   tls_for_current_pending_task->Set(previous_pending_task);
+  // by
+  //   tls_for_current_pending_task->Set(nullptr);
+  // at the end of TaskAnnotator::RunTask() makes this test fail.
+
+  RunLoop nested_run_loop1(RunLoop::Type::kNestableTasksAllowed);
+
+  // Expectations are the same as in SingleThreadedSimple test despite the
+  // nested loop starting between tasks 0 and 1 and stopping between tasks 3 and
+  // 4.
+  OnceClosure task5 = BindOnce(
+      &TaskAnnotatorBacktraceIntegrationTest::VerifyTraceAndPost,
+      Unretained(this), loop.task_runner(), location5, FROM_HERE,
+      ExpectedTrace({location4.program_counter(), location3.program_counter(),
+                     location2.program_counter(), location1.program_counter()}),
+      run_loop.QuitClosure());
+  OnceClosure task4 = BindOnce(
+      &TaskAnnotatorBacktraceIntegrationTest::VerifyTraceAndPost,
+      Unretained(this), loop.task_runner(), location4, location5,
+      ExpectedTrace({location3.program_counter(), location2.program_counter(),
+                     location1.program_counter(), location0.program_counter()}),
+      std::move(task5));
+  OnceClosure task3 = BindOnce(
+      &TaskAnnotatorBacktraceIntegrationTest::VerifyTraceAndPost,
+      Unretained(this), loop.task_runner(), location3, location4,
+      ExpectedTrace({location2.program_counter(), location1.program_counter(),
+                     location0.program_counter()}),
+      std::move(task4));
+
+  OnceClosure run_task_3_then_quit_nested_loop1 =
+      BindOnce(&TaskAnnotatorBacktraceIntegrationTest::RunTwo, std::move(task3),
+               nested_run_loop1.QuitClosure());
+
+  OnceClosure task2 = BindOnce(
+      &TaskAnnotatorBacktraceIntegrationTest::VerifyTraceAndPost,
+      Unretained(this), loop.task_runner(), location2, location3,
+      ExpectedTrace({location1.program_counter(), location0.program_counter()}),
+      std::move(run_task_3_then_quit_nested_loop1));
+
+  // Task 1 is custom. It enters another nested RunLoop, has it do work and exit
+  // before posting the next task. This confirms that |task1| is restored as the
+  // current task before posting |task2| after returning from the nested loop.
+  RunLoop nested_run_loop2(RunLoop::Type::kNestableTasksAllowed);
+  OnceClosure task1 = BindOnce(
+      [](RunLoop* nested_run_loop, const Location& location2,
+         OnceClosure task2) {
+        ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE, DoNothing());
+        nested_run_loop->RunUntilIdle();
+        ThreadTaskRunnerHandle::Get()->PostTask(location2, std::move(task2));
+      },
+      Unretained(&nested_run_loop2), location2, std::move(task2));
+
+  OnceClosure task0 =
+      BindOnce(&TaskAnnotatorBacktraceIntegrationTest::VerifyTraceAndPost,
+               Unretained(this), loop.task_runner(), location0, location1,
+               ExpectedTrace({}), std::move(task1));
+
+  loop.task_runner()->PostTask(location0, std::move(task0));
+  loop.task_runner()->PostTask(
+      FROM_HERE, BindOnce(&RunLoop::Run, Unretained(&nested_run_loop1)));
+
+  run_loop.Run();
+}
+
+}  // namespace debug
+}  // namespace base
diff --git a/base/debug/thread_heap_usage_tracker.cc b/base/debug/thread_heap_usage_tracker.cc
new file mode 100644
index 0000000..6d00b1c
--- /dev/null
+++ b/base/debug/thread_heap_usage_tracker.cc
@@ -0,0 +1,340 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/debug/thread_heap_usage_tracker.h"
+
+#include <stdint.h>
+#include <algorithm>
+#include <limits>
+#include <new>
+#include <type_traits>
+
+#include "base/allocator/allocator_shim.h"
+#include "base/allocator/buildflags.h"
+#include "base/logging.h"
+#include "base/no_destructor.h"
+#include "base/threading/thread_local_storage.h"
+#include "build/build_config.h"
+
+#if defined(OS_MACOSX) || defined(OS_IOS)
+#include <malloc/malloc.h>
+#else
+#include <malloc.h>
+#endif
+
+namespace base {
+namespace debug {
+
+namespace {
+
+using base::allocator::AllocatorDispatch;
+
+const uintptr_t kSentinelMask = std::numeric_limits<uintptr_t>::max() - 1;
+ThreadHeapUsage* const kInitializationSentinel =
+    reinterpret_cast<ThreadHeapUsage*>(kSentinelMask);
+ThreadHeapUsage* const kTeardownSentinel =
+    reinterpret_cast<ThreadHeapUsage*>(kSentinelMask | 1);
+
+ThreadLocalStorage::Slot& ThreadAllocationUsage() {
+  static NoDestructor<ThreadLocalStorage::Slot> thread_allocator_usage(
+      [](void* thread_heap_usage) {
+        // This destructor will be called twice. Once to destroy the actual
+        // ThreadHeapUsage instance and a second time, immediately after, for
+        // the sentinel. Re-setting the TLS slow (below) does re-initialize the
+        // TLS slot. The ThreadLocalStorage code is designed to deal with this
+        // use case and will re-call the destructor with the kTeardownSentinel
+        // as arg.
+        if (thread_heap_usage == kTeardownSentinel)
+          return;
+        DCHECK_NE(thread_heap_usage, kInitializationSentinel);
+
+        // Deleting the ThreadHeapUsage TLS object will re-enter the shim and
+        // hit RecordFree() (see below). The sentinel prevents RecordFree() from
+        // re-creating another ThreadHeapUsage object.
+        ThreadAllocationUsage().Set(kTeardownSentinel);
+        delete static_cast<ThreadHeapUsage*>(thread_heap_usage);
+      });
+  return *thread_allocator_usage;
+}
+
+bool g_heap_tracking_enabled = false;
+
+// Forward declared as it needs to delegate memory allocation to the next
+// lower shim.
+ThreadHeapUsage* GetOrCreateThreadUsage();
+
+size_t GetAllocSizeEstimate(const AllocatorDispatch* next,
+                            void* ptr,
+                            void* context) {
+  if (ptr == nullptr)
+    return 0U;
+
+  return next->get_size_estimate_function(next, ptr, context);
+}
+
+void RecordAlloc(const AllocatorDispatch* next,
+                 void* ptr,
+                 size_t size,
+                 void* context) {
+  ThreadHeapUsage* usage = GetOrCreateThreadUsage();
+  if (usage == nullptr)
+    return;
+
+  usage->alloc_ops++;
+  size_t estimate = GetAllocSizeEstimate(next, ptr, context);
+  if (size && estimate) {
+    // Only keep track of the net number of bytes allocated in the scope if the
+    // size estimate function returns sane values, e.g. non-zero.
+    usage->alloc_bytes += estimate;
+    usage->alloc_overhead_bytes += estimate - size;
+
+    // Record the max outstanding number of bytes, but only if the difference
+    // is net positive (e.g. more bytes allocated than freed in the scope).
+    if (usage->alloc_bytes > usage->free_bytes) {
+      uint64_t allocated_bytes = usage->alloc_bytes - usage->free_bytes;
+      if (allocated_bytes > usage->max_allocated_bytes)
+        usage->max_allocated_bytes = allocated_bytes;
+    }
+  } else {
+    usage->alloc_bytes += size;
+  }
+}
+
+void RecordFree(const AllocatorDispatch* next, void* ptr, void* context) {
+  ThreadHeapUsage* usage = GetOrCreateThreadUsage();
+  if (usage == nullptr)
+    return;
+
+  size_t estimate = GetAllocSizeEstimate(next, ptr, context);
+  usage->free_ops++;
+  usage->free_bytes += estimate;
+}
+
+void* AllocFn(const AllocatorDispatch* self, size_t size, void* context) {
+  void* ret = self->next->alloc_function(self->next, size, context);
+  if (ret != nullptr)
+    RecordAlloc(self->next, ret, size, context);
+
+  return ret;
+}
+
+void* AllocZeroInitializedFn(const AllocatorDispatch* self,
+                             size_t n,
+                             size_t size,
+                             void* context) {
+  void* ret =
+      self->next->alloc_zero_initialized_function(self->next, n, size, context);
+  if (ret != nullptr)
+    RecordAlloc(self->next, ret, size, context);
+
+  return ret;
+}
+
+void* AllocAlignedFn(const AllocatorDispatch* self,
+                     size_t alignment,
+                     size_t size,
+                     void* context) {
+  void* ret =
+      self->next->alloc_aligned_function(self->next, alignment, size, context);
+  if (ret != nullptr)
+    RecordAlloc(self->next, ret, size, context);
+
+  return ret;
+}
+
+void* ReallocFn(const AllocatorDispatch* self,
+                void* address,
+                size_t size,
+                void* context) {
+  if (address != nullptr)
+    RecordFree(self->next, address, context);
+
+  void* ret = self->next->realloc_function(self->next, address, size, context);
+  if (ret != nullptr && size != 0)
+    RecordAlloc(self->next, ret, size, context);
+
+  return ret;
+}
+
+void FreeFn(const AllocatorDispatch* self, void* address, void* context) {
+  if (address != nullptr)
+    RecordFree(self->next, address, context);
+  self->next->free_function(self->next, address, context);
+}
+
+size_t GetSizeEstimateFn(const AllocatorDispatch* self,
+                         void* address,
+                         void* context) {
+  return self->next->get_size_estimate_function(self->next, address, context);
+}
+
+unsigned BatchMallocFn(const AllocatorDispatch* self,
+                       size_t size,
+                       void** results,
+                       unsigned num_requested,
+                       void* context) {
+  unsigned count = self->next->batch_malloc_function(self->next, size, results,
+                                                     num_requested, context);
+  for (unsigned i = 0; i < count; ++i) {
+    RecordAlloc(self->next, results[i], size, context);
+  }
+  return count;
+}
+
+void BatchFreeFn(const AllocatorDispatch* self,
+                 void** to_be_freed,
+                 unsigned num_to_be_freed,
+                 void* context) {
+  for (unsigned i = 0; i < num_to_be_freed; ++i) {
+    if (to_be_freed[i] != nullptr) {
+      RecordFree(self->next, to_be_freed[i], context);
+    }
+  }
+  self->next->batch_free_function(self->next, to_be_freed, num_to_be_freed,
+                                  context);
+}
+
+void FreeDefiniteSizeFn(const AllocatorDispatch* self,
+                        void* ptr,
+                        size_t size,
+                        void* context) {
+  if (ptr != nullptr)
+    RecordFree(self->next, ptr, context);
+  self->next->free_definite_size_function(self->next, ptr, size, context);
+}
+
+// The allocator dispatch used to intercept heap operations.
+AllocatorDispatch allocator_dispatch = {&AllocFn,
+                                        &AllocZeroInitializedFn,
+                                        &AllocAlignedFn,
+                                        &ReallocFn,
+                                        &FreeFn,
+                                        &GetSizeEstimateFn,
+                                        &BatchMallocFn,
+                                        &BatchFreeFn,
+                                        &FreeDefiniteSizeFn,
+                                        nullptr};
+
+ThreadHeapUsage* GetOrCreateThreadUsage() {
+  auto tls_ptr = reinterpret_cast<uintptr_t>(ThreadAllocationUsage().Get());
+  if ((tls_ptr & kSentinelMask) == kSentinelMask)
+    return nullptr;  // Re-entrancy case.
+
+  auto* allocator_usage = reinterpret_cast<ThreadHeapUsage*>(tls_ptr);
+  if (allocator_usage == nullptr) {
+    // Prevent reentrancy due to the allocation below.
+    ThreadAllocationUsage().Set(kInitializationSentinel);
+
+    allocator_usage = new ThreadHeapUsage();
+    static_assert(std::is_pod<ThreadHeapUsage>::value,
+                  "AllocatorDispatch must be POD");
+    memset(allocator_usage, 0, sizeof(*allocator_usage));
+    ThreadAllocationUsage().Set(allocator_usage);
+  }
+
+  return allocator_usage;
+}
+
+}  // namespace
+
+ThreadHeapUsageTracker::ThreadHeapUsageTracker() : thread_usage_(nullptr) {
+  static_assert(std::is_pod<ThreadHeapUsage>::value, "Must be POD.");
+}
+
+ThreadHeapUsageTracker::~ThreadHeapUsageTracker() {
+  DCHECK(thread_checker_.CalledOnValidThread());
+
+  if (thread_usage_ != nullptr) {
+    // If this tracker wasn't stopped, make it inclusive so that the
+    // usage isn't lost.
+    Stop(false);
+  }
+}
+
+void ThreadHeapUsageTracker::Start() {
+  DCHECK(thread_checker_.CalledOnValidThread());
+
+  thread_usage_ = GetOrCreateThreadUsage();
+  usage_ = *thread_usage_;
+
+  // Reset the stats for our current scope.
+  // The per-thread usage instance now tracks this scope's usage, while this
+  // instance persists the outer scope's usage stats. On destruction, this
+  // instance will restore the outer scope's usage stats with this scope's
+  // usage added.
+  memset(thread_usage_, 0, sizeof(*thread_usage_));
+}
+
+void ThreadHeapUsageTracker::Stop(bool usage_is_exclusive) {
+  DCHECK(thread_checker_.CalledOnValidThread());
+  DCHECK_NE(nullptr, thread_usage_);
+
+  ThreadHeapUsage current = *thread_usage_;
+  if (usage_is_exclusive) {
+    // Restore the outer scope.
+    *thread_usage_ = usage_;
+  } else {
+    // Update the outer scope with the accrued inner usage.
+    if (thread_usage_->max_allocated_bytes) {
+      uint64_t outer_net_alloc_bytes = usage_.alloc_bytes - usage_.free_bytes;
+
+      thread_usage_->max_allocated_bytes =
+          std::max(usage_.max_allocated_bytes,
+                   outer_net_alloc_bytes + thread_usage_->max_allocated_bytes);
+    }
+
+    thread_usage_->alloc_ops += usage_.alloc_ops;
+    thread_usage_->alloc_bytes += usage_.alloc_bytes;
+    thread_usage_->alloc_overhead_bytes += usage_.alloc_overhead_bytes;
+    thread_usage_->free_ops += usage_.free_ops;
+    thread_usage_->free_bytes += usage_.free_bytes;
+  }
+
+  thread_usage_ = nullptr;
+  usage_ = current;
+}
+
+ThreadHeapUsage ThreadHeapUsageTracker::GetUsageSnapshot() {
+  ThreadHeapUsage* usage = GetOrCreateThreadUsage();
+  DCHECK_NE(nullptr, usage);
+  return *usage;
+}
+
+void ThreadHeapUsageTracker::EnableHeapTracking() {
+  EnsureTLSInitialized();
+
+  CHECK_EQ(false, g_heap_tracking_enabled) << "No double-enabling.";
+  g_heap_tracking_enabled = true;
+#if BUILDFLAG(USE_ALLOCATOR_SHIM)
+  base::allocator::InsertAllocatorDispatch(&allocator_dispatch);
+#else
+  CHECK(false) << "Can't enable heap tracking without the shim.";
+#endif  // BUILDFLAG(USE_ALLOCATOR_SHIM)
+}
+
+bool ThreadHeapUsageTracker::IsHeapTrackingEnabled() {
+  return g_heap_tracking_enabled;
+}
+
+void ThreadHeapUsageTracker::DisableHeapTrackingForTesting() {
+#if BUILDFLAG(USE_ALLOCATOR_SHIM)
+  base::allocator::RemoveAllocatorDispatchForTesting(&allocator_dispatch);
+#else
+  CHECK(false) << "Can't disable heap tracking without the shim.";
+#endif  // BUILDFLAG(USE_ALLOCATOR_SHIM)
+  DCHECK_EQ(true, g_heap_tracking_enabled) << "Heap tracking not enabled.";
+  g_heap_tracking_enabled = false;
+}
+
+base::allocator::AllocatorDispatch*
+ThreadHeapUsageTracker::GetDispatchForTesting() {
+  return &allocator_dispatch;
+}
+
+void ThreadHeapUsageTracker::EnsureTLSInitialized() {
+  ignore_result(ThreadAllocationUsage());
+}
+
+}  // namespace debug
+}  // namespace base
diff --git a/base/debug/thread_heap_usage_tracker.h b/base/debug/thread_heap_usage_tracker.h
new file mode 100644
index 0000000..eb03b3f
--- /dev/null
+++ b/base/debug/thread_heap_usage_tracker.h
@@ -0,0 +1,117 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_DEBUG_THREAD_HEAP_USAGE_TRACKER_H_
+#define BASE_DEBUG_THREAD_HEAP_USAGE_TRACKER_H_
+
+#include <stdint.h>
+
+#include "base/allocator/buildflags.h"
+#include "base/base_export.h"
+#include "base/threading/thread_checker.h"
+
+namespace base {
+namespace allocator {
+struct AllocatorDispatch;
+}  // namespace allocator
+
+namespace debug {
+
+// Used to store the heap allocator usage in a scope.
+struct ThreadHeapUsage {
+  // The cumulative number of allocation operations.
+  uint64_t alloc_ops;
+
+  // The cumulative number of allocated bytes. Where available, this is
+  // inclusive heap padding and estimated or actual heap overhead.
+  uint64_t alloc_bytes;
+
+  // Where available, cumulative number of heap padding and overhead bytes.
+  uint64_t alloc_overhead_bytes;
+
+  // The cumulative number of free operations.
+  uint64_t free_ops;
+
+  // The cumulative number of bytes freed.
+  // Only recorded if the underlying heap shim can return the size of an
+  // allocation.
+  uint64_t free_bytes;
+
+  // The maximal value of |alloc_bytes| - |free_bytes| seen for this thread.
+  // Only recorded if the underlying heap shim supports returning the size of
+  // an allocation.
+  uint64_t max_allocated_bytes;
+};
+
+// By keeping a tally on heap operations, it's possible to track:
+// - the number of alloc/free operations, where a realloc is zero or one
+//   of each, depending on the input parameters (see man realloc).
+// - the number of bytes allocated/freed.
+// - the number of estimated bytes of heap overhead used.
+// - the high-watermark amount of bytes allocated in the scope.
+// This in turn allows measuring the memory usage and memory usage churn over
+// a scope. Scopes must be cleanly nested, and each scope must be
+// destroyed on the thread where it's created.
+//
+// Note that this depends on the capabilities of the underlying heap shim. If
+// that shim can not yield a size estimate for an allocation, it's not possible
+// to keep track of overhead, freed bytes and the allocation high water mark.
+class BASE_EXPORT ThreadHeapUsageTracker {
+ public:
+  ThreadHeapUsageTracker();
+  ~ThreadHeapUsageTracker();
+
+  // Start tracking heap usage on this thread.
+  // This may only be called on the thread where the instance is created.
+  // Note IsHeapTrackingEnabled() must be true.
+  void Start();
+
+  // Stop tracking heap usage on this thread and store the usage tallied.
+  // If |usage_is_exclusive| is true, the usage tallied won't be added to the
+  // outer scope's usage. If |usage_is_exclusive| is false, the usage tallied
+  // in this scope will also tally to any outer scope.
+  // This may only be called on the thread where the instance is created.
+  void Stop(bool usage_is_exclusive);
+
+  // After Stop() returns the usage tallied from Start() to Stop().
+  const ThreadHeapUsage& usage() const { return usage_; }
+
+  // Returns this thread's heap usage from the start of the innermost
+  // enclosing ThreadHeapUsageTracker instance, if any.
+  static ThreadHeapUsage GetUsageSnapshot();
+
+  // Enables the heap intercept. May only be called once, and only if the heap
+  // shim is available, e.g. if BUILDFLAG(USE_ALLOCATOR_SHIM) is
+  // true.
+  static void EnableHeapTracking();
+
+  // Returns true iff heap tracking is enabled.
+  static bool IsHeapTrackingEnabled();
+
+ protected:
+  // Exposed for testing only - note that it's safe to re-EnableHeapTracking()
+  // after calling this function in tests.
+  static void DisableHeapTrackingForTesting();
+
+  // Exposed for testing only.
+  static void EnsureTLSInitialized();
+
+  // Exposed to allow testing the shim without inserting it in the allocator
+  // shim chain.
+  static base::allocator::AllocatorDispatch* GetDispatchForTesting();
+
+ private:
+  ThreadChecker thread_checker_;
+
+  // The heap usage at Start(), or the difference from Start() to Stop().
+  ThreadHeapUsage usage_;
+
+  // This thread's heap usage, non-null from Start() to Stop().
+  ThreadHeapUsage* thread_usage_;
+};
+
+}  // namespace debug
+}  // namespace base
+
+#endif  // BASE_DEBUG_THREAD_HEAP_USAGE_TRACKER_H_
\ No newline at end of file
diff --git a/base/debug/thread_heap_usage_tracker_unittest.cc b/base/debug/thread_heap_usage_tracker_unittest.cc
new file mode 100644
index 0000000..b99576c
--- /dev/null
+++ b/base/debug/thread_heap_usage_tracker_unittest.cc
@@ -0,0 +1,607 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/debug/thread_heap_usage_tracker.h"
+
+#include <map>
+
+#include "base/allocator/allocator_shim.h"
+#include "base/allocator/buildflags.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+#if defined(OS_MACOSX)
+#include "base/allocator/allocator_interception_mac.h"
+#endif
+
+namespace base {
+namespace debug {
+
+namespace {
+
+class TestingThreadHeapUsageTracker : public ThreadHeapUsageTracker {
+ public:
+  using ThreadHeapUsageTracker::DisableHeapTrackingForTesting;
+  using ThreadHeapUsageTracker::EnsureTLSInitialized;
+  using ThreadHeapUsageTracker::GetDispatchForTesting;
+};
+
+// A fixture class that allows testing the AllocatorDispatch associated with
+// the ThreadHeapUsageTracker class in isolation against a mocked
+// underlying
+// heap implementation.
+class ThreadHeapUsageTrackerTest : public testing::Test {
+ public:
+  using AllocatorDispatch = base::allocator::AllocatorDispatch;
+
+  static const size_t kAllocationPadding;
+  enum SizeFunctionKind {
+    EXACT_SIZE_FUNCTION,
+    PADDING_SIZE_FUNCTION,
+    ZERO_SIZE_FUNCTION,
+  };
+
+  ThreadHeapUsageTrackerTest() : size_function_kind_(EXACT_SIZE_FUNCTION) {
+    EXPECT_EQ(nullptr, g_self);
+    g_self = this;
+  }
+
+  ~ThreadHeapUsageTrackerTest() override {
+    EXPECT_EQ(this, g_self);
+    g_self = nullptr;
+  }
+
+  void set_size_function_kind(SizeFunctionKind kind) {
+    size_function_kind_ = kind;
+  }
+
+  void SetUp() override {
+    TestingThreadHeapUsageTracker::EnsureTLSInitialized();
+
+    dispatch_under_test_ =
+        TestingThreadHeapUsageTracker::GetDispatchForTesting();
+    ASSERT_EQ(nullptr, dispatch_under_test_->next);
+
+    dispatch_under_test_->next = &g_mock_dispatch;
+  }
+
+  void TearDown() override {
+    ASSERT_EQ(&g_mock_dispatch, dispatch_under_test_->next);
+
+    dispatch_under_test_->next = nullptr;
+  }
+
+  void* MockMalloc(size_t size) {
+    return dispatch_under_test_->alloc_function(dispatch_under_test_, size,
+                                                nullptr);
+  }
+
+  void* MockCalloc(size_t n, size_t size) {
+    return dispatch_under_test_->alloc_zero_initialized_function(
+        dispatch_under_test_, n, size, nullptr);
+  }
+
+  void* MockAllocAligned(size_t alignment, size_t size) {
+    return dispatch_under_test_->alloc_aligned_function(
+        dispatch_under_test_, alignment, size, nullptr);
+  }
+
+  void* MockRealloc(void* address, size_t size) {
+    return dispatch_under_test_->realloc_function(dispatch_under_test_, address,
+                                                  size, nullptr);
+  }
+
+  void MockFree(void* address) {
+    dispatch_under_test_->free_function(dispatch_under_test_, address, nullptr);
+  }
+
+  size_t MockGetSizeEstimate(void* address) {
+    return dispatch_under_test_->get_size_estimate_function(
+        dispatch_under_test_, address, nullptr);
+  }
+
+ private:
+  void RecordAlloc(void* address, size_t size) {
+    if (address != nullptr)
+      allocation_size_map_[address] = size;
+  }
+
+  void DeleteAlloc(void* address) {
+    if (address != nullptr)
+      EXPECT_EQ(1U, allocation_size_map_.erase(address));
+  }
+
+  size_t GetSizeEstimate(void* address) {
+    auto it = allocation_size_map_.find(address);
+    if (it == allocation_size_map_.end())
+      return 0;
+
+    size_t ret = it->second;
+    switch (size_function_kind_) {
+      case EXACT_SIZE_FUNCTION:
+        break;
+      case PADDING_SIZE_FUNCTION:
+        ret += kAllocationPadding;
+        break;
+      case ZERO_SIZE_FUNCTION:
+        ret = 0;
+        break;
+    }
+
+    return ret;
+  }
+
+  static void* OnAllocFn(const AllocatorDispatch* self,
+                         size_t size,
+                         void* context) {
+    EXPECT_EQ(&g_mock_dispatch, self);
+
+    void* ret = malloc(size);
+    g_self->RecordAlloc(ret, size);
+    return ret;
+  }
+
+  static void* OnAllocZeroInitializedFn(const AllocatorDispatch* self,
+                                        size_t n,
+                                        size_t size,
+                                        void* context) {
+    EXPECT_EQ(&g_mock_dispatch, self);
+
+    void* ret = calloc(n, size);
+    g_self->RecordAlloc(ret, n * size);
+    return ret;
+  }
+
+  static void* OnAllocAlignedFn(const AllocatorDispatch* self,
+                                size_t alignment,
+                                size_t size,
+                                void* context) {
+    EXPECT_EQ(&g_mock_dispatch, self);
+
+    // This is a cheat as it doesn't return aligned allocations. This has the
+    // advantage of working for all platforms for this test.
+    void* ret = malloc(size);
+    g_self->RecordAlloc(ret, size);
+    return ret;
+  }
+
+  static void* OnReallocFn(const AllocatorDispatch* self,
+                           void* address,
+                           size_t size,
+                           void* context) {
+    EXPECT_EQ(&g_mock_dispatch, self);
+
+    g_self->DeleteAlloc(address);
+    void* ret = realloc(address, size);
+    g_self->RecordAlloc(ret, size);
+    return ret;
+  }
+
+  static void OnFreeFn(const AllocatorDispatch* self,
+                       void* address,
+                       void* context) {
+    EXPECT_EQ(&g_mock_dispatch, self);
+
+    g_self->DeleteAlloc(address);
+    free(address);
+  }
+
+  static size_t OnGetSizeEstimateFn(const AllocatorDispatch* self,
+                                    void* address,
+                                    void* context) {
+    EXPECT_EQ(&g_mock_dispatch, self);
+
+    return g_self->GetSizeEstimate(address);
+  }
+
+  using AllocationSizeMap = std::map<void*, size_t>;
+
+  SizeFunctionKind size_function_kind_;
+  AllocationSizeMap allocation_size_map_;
+  AllocatorDispatch* dispatch_under_test_;
+
+  static base::allocator::AllocatorDispatch g_mock_dispatch;
+  static ThreadHeapUsageTrackerTest* g_self;
+};
+
+const size_t ThreadHeapUsageTrackerTest::kAllocationPadding = 23;
+
+ThreadHeapUsageTrackerTest* ThreadHeapUsageTrackerTest::g_self = nullptr;
+
+base::allocator::AllocatorDispatch ThreadHeapUsageTrackerTest::g_mock_dispatch =
+    {
+        &ThreadHeapUsageTrackerTest::OnAllocFn,  // alloc_function
+        &ThreadHeapUsageTrackerTest::
+            OnAllocZeroInitializedFn,  // alloc_zero_initialized_function
+        &ThreadHeapUsageTrackerTest::
+            OnAllocAlignedFn,                      // alloc_aligned_function
+        &ThreadHeapUsageTrackerTest::OnReallocFn,  // realloc_function
+        &ThreadHeapUsageTrackerTest::OnFreeFn,     // free_function
+        &ThreadHeapUsageTrackerTest::
+            OnGetSizeEstimateFn,  // get_size_estimate_function
+        nullptr,                  // batch_malloc
+        nullptr,                  // batch_free
+        nullptr,                  // free_definite_size_function
+        nullptr,                  // next
+};
+
+}  // namespace
+
+TEST_F(ThreadHeapUsageTrackerTest, SimpleUsageWithExactSizeFunction) {
+  set_size_function_kind(EXACT_SIZE_FUNCTION);
+
+  ThreadHeapUsageTracker usage_tracker;
+  usage_tracker.Start();
+
+  ThreadHeapUsage u1 = ThreadHeapUsageTracker::GetUsageSnapshot();
+
+  EXPECT_EQ(0U, u1.alloc_ops);
+  EXPECT_EQ(0U, u1.alloc_bytes);
+  EXPECT_EQ(0U, u1.alloc_overhead_bytes);
+  EXPECT_EQ(0U, u1.free_ops);
+  EXPECT_EQ(0U, u1.free_bytes);
+  EXPECT_EQ(0U, u1.max_allocated_bytes);
+
+  const size_t kAllocSize = 1029U;
+  void* ptr = MockMalloc(kAllocSize);
+  MockFree(ptr);
+
+  usage_tracker.Stop(false);
+  ThreadHeapUsage u2 = usage_tracker.usage();
+
+  EXPECT_EQ(1U, u2.alloc_ops);
+  EXPECT_EQ(kAllocSize, u2.alloc_bytes);
+  EXPECT_EQ(0U, u2.alloc_overhead_bytes);
+  EXPECT_EQ(1U, u2.free_ops);
+  EXPECT_EQ(kAllocSize, u2.free_bytes);
+  EXPECT_EQ(kAllocSize, u2.max_allocated_bytes);
+}
+
+TEST_F(ThreadHeapUsageTrackerTest, SimpleUsageWithPaddingSizeFunction) {
+  set_size_function_kind(PADDING_SIZE_FUNCTION);
+
+  ThreadHeapUsageTracker usage_tracker;
+  usage_tracker.Start();
+
+  ThreadHeapUsage u1 = ThreadHeapUsageTracker::GetUsageSnapshot();
+
+  EXPECT_EQ(0U, u1.alloc_ops);
+  EXPECT_EQ(0U, u1.alloc_bytes);
+  EXPECT_EQ(0U, u1.alloc_overhead_bytes);
+  EXPECT_EQ(0U, u1.free_ops);
+  EXPECT_EQ(0U, u1.free_bytes);
+  EXPECT_EQ(0U, u1.max_allocated_bytes);
+
+  const size_t kAllocSize = 1029U;
+  void* ptr = MockMalloc(kAllocSize);
+  MockFree(ptr);
+
+  usage_tracker.Stop(false);
+  ThreadHeapUsage u2 = usage_tracker.usage();
+
+  EXPECT_EQ(1U, u2.alloc_ops);
+  EXPECT_EQ(kAllocSize + kAllocationPadding, u2.alloc_bytes);
+  EXPECT_EQ(kAllocationPadding, u2.alloc_overhead_bytes);
+  EXPECT_EQ(1U, u2.free_ops);
+  EXPECT_EQ(kAllocSize + kAllocationPadding, u2.free_bytes);
+  EXPECT_EQ(kAllocSize + kAllocationPadding, u2.max_allocated_bytes);
+}
+
+TEST_F(ThreadHeapUsageTrackerTest, SimpleUsageWithZeroSizeFunction) {
+  set_size_function_kind(ZERO_SIZE_FUNCTION);
+
+  ThreadHeapUsageTracker usage_tracker;
+  usage_tracker.Start();
+
+  ThreadHeapUsage u1 = ThreadHeapUsageTracker::GetUsageSnapshot();
+  EXPECT_EQ(0U, u1.alloc_ops);
+  EXPECT_EQ(0U, u1.alloc_bytes);
+  EXPECT_EQ(0U, u1.alloc_overhead_bytes);
+  EXPECT_EQ(0U, u1.free_ops);
+  EXPECT_EQ(0U, u1.free_bytes);
+  EXPECT_EQ(0U, u1.max_allocated_bytes);
+
+  const size_t kAllocSize = 1029U;
+  void* ptr = MockMalloc(kAllocSize);
+  MockFree(ptr);
+
+  usage_tracker.Stop(false);
+  ThreadHeapUsage u2 = usage_tracker.usage();
+
+  // With a get-size function that returns zero, there's no way to get the size
+  // of an allocation that's being freed, hence the shim can't tally freed bytes
+  // nor the high-watermark allocated bytes.
+  EXPECT_EQ(1U, u2.alloc_ops);
+  EXPECT_EQ(kAllocSize, u2.alloc_bytes);
+  EXPECT_EQ(0U, u2.alloc_overhead_bytes);
+  EXPECT_EQ(1U, u2.free_ops);
+  EXPECT_EQ(0U, u2.free_bytes);
+  EXPECT_EQ(0U, u2.max_allocated_bytes);
+}
+
+TEST_F(ThreadHeapUsageTrackerTest, ReallocCorrectlyTallied) {
+  const size_t kAllocSize = 237U;
+
+  {
+    ThreadHeapUsageTracker usage_tracker;
+    usage_tracker.Start();
+
+    // Reallocating nullptr should count as a single alloc.
+    void* ptr = MockRealloc(nullptr, kAllocSize);
+    ThreadHeapUsage usage = ThreadHeapUsageTracker::GetUsageSnapshot();
+    EXPECT_EQ(1U, usage.alloc_ops);
+    EXPECT_EQ(kAllocSize, usage.alloc_bytes);
+    EXPECT_EQ(0U, usage.alloc_overhead_bytes);
+    EXPECT_EQ(0U, usage.free_ops);
+    EXPECT_EQ(0U, usage.free_bytes);
+    EXPECT_EQ(kAllocSize, usage.max_allocated_bytes);
+
+    // Reallocating a valid pointer to a zero size should count as a single
+    // free.
+    ptr = MockRealloc(ptr, 0U);
+
+    usage_tracker.Stop(false);
+    EXPECT_EQ(1U, usage_tracker.usage().alloc_ops);
+    EXPECT_EQ(kAllocSize, usage_tracker.usage().alloc_bytes);
+    EXPECT_EQ(0U, usage_tracker.usage().alloc_overhead_bytes);
+    EXPECT_EQ(1U, usage_tracker.usage().free_ops);
+    EXPECT_EQ(kAllocSize, usage_tracker.usage().free_bytes);
+    EXPECT_EQ(kAllocSize, usage_tracker.usage().max_allocated_bytes);
+
+    // Realloc to zero size may or may not return a nullptr - make sure to
+    // free the zero-size alloc in the latter case.
+    if (ptr != nullptr)
+      MockFree(ptr);
+  }
+
+  {
+    ThreadHeapUsageTracker usage_tracker;
+    usage_tracker.Start();
+
+    void* ptr = MockMalloc(kAllocSize);
+    ThreadHeapUsage usage = ThreadHeapUsageTracker::GetUsageSnapshot();
+    EXPECT_EQ(1U, usage.alloc_ops);
+
+    // Now try reallocating a valid pointer to a larger size, this should count
+    // as one free and one alloc.
+    const size_t kLargerAllocSize = kAllocSize + 928U;
+    ptr = MockRealloc(ptr, kLargerAllocSize);
+
+    usage_tracker.Stop(false);
+    EXPECT_EQ(2U, usage_tracker.usage().alloc_ops);
+    EXPECT_EQ(kAllocSize + kLargerAllocSize, usage_tracker.usage().alloc_bytes);
+    EXPECT_EQ(0U, usage_tracker.usage().alloc_overhead_bytes);
+    EXPECT_EQ(1U, usage_tracker.usage().free_ops);
+    EXPECT_EQ(kAllocSize, usage_tracker.usage().free_bytes);
+    EXPECT_EQ(kLargerAllocSize, usage_tracker.usage().max_allocated_bytes);
+
+    MockFree(ptr);
+  }
+}
+
+TEST_F(ThreadHeapUsageTrackerTest, NestedMaxWorks) {
+  ThreadHeapUsageTracker usage_tracker;
+  usage_tracker.Start();
+
+  const size_t kOuterAllocSize = 1029U;
+  void* ptr = MockMalloc(kOuterAllocSize);
+  MockFree(ptr);
+
+  EXPECT_EQ(kOuterAllocSize,
+            ThreadHeapUsageTracker::GetUsageSnapshot().max_allocated_bytes);
+
+  {
+    ThreadHeapUsageTracker inner_usage_tracker;
+    inner_usage_tracker.Start();
+
+    const size_t kInnerAllocSize = 673U;
+    ptr = MockMalloc(kInnerAllocSize);
+    MockFree(ptr);
+
+    inner_usage_tracker.Stop(false);
+
+    EXPECT_EQ(kInnerAllocSize, inner_usage_tracker.usage().max_allocated_bytes);
+  }
+
+  // The greater, outer allocation size should have been restored.
+  EXPECT_EQ(kOuterAllocSize,
+            ThreadHeapUsageTracker::GetUsageSnapshot().max_allocated_bytes);
+
+  const size_t kLargerInnerAllocSize = kOuterAllocSize + 673U;
+  {
+    ThreadHeapUsageTracker inner_usage_tracker;
+    inner_usage_tracker.Start();
+
+    ptr = MockMalloc(kLargerInnerAllocSize);
+    MockFree(ptr);
+
+    inner_usage_tracker.Stop(false);
+    EXPECT_EQ(kLargerInnerAllocSize,
+              inner_usage_tracker.usage().max_allocated_bytes);
+  }
+
+  // The greater, inner allocation size should have been preserved.
+  EXPECT_EQ(kLargerInnerAllocSize,
+            ThreadHeapUsageTracker::GetUsageSnapshot().max_allocated_bytes);
+
+  // Now try the case with an outstanding net alloc size when entering the
+  // inner scope.
+  void* outer_ptr = MockMalloc(kOuterAllocSize);
+  EXPECT_EQ(kLargerInnerAllocSize,
+            ThreadHeapUsageTracker::GetUsageSnapshot().max_allocated_bytes);
+  {
+    ThreadHeapUsageTracker inner_usage_tracker;
+    inner_usage_tracker.Start();
+
+    ptr = MockMalloc(kLargerInnerAllocSize);
+    MockFree(ptr);
+
+    inner_usage_tracker.Stop(false);
+    EXPECT_EQ(kLargerInnerAllocSize,
+              inner_usage_tracker.usage().max_allocated_bytes);
+  }
+
+  // While the inner scope saw only the inner net outstanding allocation size,
+  // the outer scope saw both outstanding at the same time.
+  EXPECT_EQ(kOuterAllocSize + kLargerInnerAllocSize,
+            ThreadHeapUsageTracker::GetUsageSnapshot().max_allocated_bytes);
+
+  MockFree(outer_ptr);
+
+  // Test a net-negative scope.
+  ptr = MockMalloc(kLargerInnerAllocSize);
+  {
+    ThreadHeapUsageTracker inner_usage_tracker;
+    inner_usage_tracker.Start();
+
+    MockFree(ptr);
+
+    const size_t kInnerAllocSize = 1;
+    ptr = MockMalloc(kInnerAllocSize);
+
+    inner_usage_tracker.Stop(false);
+    // Since the scope is still net-negative, the max is clamped at zero.
+    EXPECT_EQ(0U, inner_usage_tracker.usage().max_allocated_bytes);
+  }
+
+  MockFree(ptr);
+}
+
+TEST_F(ThreadHeapUsageTrackerTest, NoStopImpliesInclusive) {
+  ThreadHeapUsageTracker usage_tracker;
+  usage_tracker.Start();
+
+  const size_t kOuterAllocSize = 1029U;
+  void* ptr = MockMalloc(kOuterAllocSize);
+  MockFree(ptr);
+
+  ThreadHeapUsage usage = ThreadHeapUsageTracker::GetUsageSnapshot();
+  EXPECT_EQ(kOuterAllocSize, usage.max_allocated_bytes);
+
+  const size_t kInnerLargerAllocSize = kOuterAllocSize + 673U;
+
+  {
+    ThreadHeapUsageTracker inner_usage_tracker;
+    inner_usage_tracker.Start();
+
+    // Make a larger allocation than the outer scope.
+    ptr = MockMalloc(kInnerLargerAllocSize);
+    MockFree(ptr);
+
+    // inner_usage_tracker goes out of scope without a Stop().
+  }
+
+  ThreadHeapUsage current = ThreadHeapUsageTracker::GetUsageSnapshot();
+  EXPECT_EQ(usage.alloc_ops + 1, current.alloc_ops);
+  EXPECT_EQ(usage.alloc_bytes + kInnerLargerAllocSize, current.alloc_bytes);
+  EXPECT_EQ(usage.free_ops + 1, current.free_ops);
+  EXPECT_EQ(usage.free_bytes + kInnerLargerAllocSize, current.free_bytes);
+  EXPECT_EQ(kInnerLargerAllocSize, current.max_allocated_bytes);
+}
+
+TEST_F(ThreadHeapUsageTrackerTest, ExclusiveScopesWork) {
+  ThreadHeapUsageTracker usage_tracker;
+  usage_tracker.Start();
+
+  const size_t kOuterAllocSize = 1029U;
+  void* ptr = MockMalloc(kOuterAllocSize);
+  MockFree(ptr);
+
+  ThreadHeapUsage usage = ThreadHeapUsageTracker::GetUsageSnapshot();
+  EXPECT_EQ(kOuterAllocSize, usage.max_allocated_bytes);
+
+  {
+    ThreadHeapUsageTracker inner_usage_tracker;
+    inner_usage_tracker.Start();
+
+    // Make a larger allocation than the outer scope.
+    ptr = MockMalloc(kOuterAllocSize + 673U);
+    MockFree(ptr);
+
+    // This tracker is exlusive, all activity should be private to this scope.
+    inner_usage_tracker.Stop(true);
+  }
+
+  ThreadHeapUsage current = ThreadHeapUsageTracker::GetUsageSnapshot();
+  EXPECT_EQ(usage.alloc_ops, current.alloc_ops);
+  EXPECT_EQ(usage.alloc_bytes, current.alloc_bytes);
+  EXPECT_EQ(usage.alloc_overhead_bytes, current.alloc_overhead_bytes);
+  EXPECT_EQ(usage.free_ops, current.free_ops);
+  EXPECT_EQ(usage.free_bytes, current.free_bytes);
+  EXPECT_EQ(usage.max_allocated_bytes, current.max_allocated_bytes);
+}
+
+TEST_F(ThreadHeapUsageTrackerTest, AllShimFunctionsAreProvided) {
+  const size_t kAllocSize = 100;
+  void* alloc = MockMalloc(kAllocSize);
+  size_t estimate = MockGetSizeEstimate(alloc);
+  ASSERT_TRUE(estimate == 0 || estimate >= kAllocSize);
+  MockFree(alloc);
+
+  alloc = MockCalloc(kAllocSize, 1);
+  estimate = MockGetSizeEstimate(alloc);
+  ASSERT_TRUE(estimate == 0 || estimate >= kAllocSize);
+  MockFree(alloc);
+
+  alloc = MockAllocAligned(1, kAllocSize);
+  estimate = MockGetSizeEstimate(alloc);
+  ASSERT_TRUE(estimate == 0 || estimate >= kAllocSize);
+
+  alloc = MockRealloc(alloc, kAllocSize);
+  estimate = MockGetSizeEstimate(alloc);
+  ASSERT_TRUE(estimate == 0 || estimate >= kAllocSize);
+  MockFree(alloc);
+}
+
+#if BUILDFLAG(USE_ALLOCATOR_SHIM)
+class ThreadHeapUsageShimTest : public testing::Test {
+#if defined(OS_MACOSX)
+  void SetUp() override { allocator::InitializeAllocatorShim(); }
+  void TearDown() override { allocator::UninterceptMallocZonesForTesting(); }
+#endif
+};
+
+TEST_F(ThreadHeapUsageShimTest, HooksIntoMallocWhenShimAvailable) {
+  ASSERT_FALSE(ThreadHeapUsageTracker::IsHeapTrackingEnabled());
+
+  ThreadHeapUsageTracker::EnableHeapTracking();
+
+  ASSERT_TRUE(ThreadHeapUsageTracker::IsHeapTrackingEnabled());
+
+  const size_t kAllocSize = 9993;
+  // This test verifies that the scoped heap data is affected by malloc &
+  // free only when the shim is available.
+  ThreadHeapUsageTracker usage_tracker;
+  usage_tracker.Start();
+
+  ThreadHeapUsage u1 = ThreadHeapUsageTracker::GetUsageSnapshot();
+  void* ptr = malloc(kAllocSize);
+  // Prevent the compiler from optimizing out the malloc/free pair.
+  ASSERT_NE(nullptr, ptr);
+
+  ThreadHeapUsage u2 = ThreadHeapUsageTracker::GetUsageSnapshot();
+  free(ptr);
+
+  usage_tracker.Stop(false);
+  ThreadHeapUsage u3 = usage_tracker.usage();
+
+  // Verify that at least one allocation operation was recorded, and that free
+  // operations are at least monotonically growing.
+  EXPECT_LE(0U, u1.alloc_ops);
+  EXPECT_LE(u1.alloc_ops + 1, u2.alloc_ops);
+  EXPECT_LE(u1.alloc_ops + 1, u3.alloc_ops);
+
+  // Verify that at least the bytes above were recorded.
+  EXPECT_LE(u1.alloc_bytes + kAllocSize, u2.alloc_bytes);
+
+  // Verify that at least the one free operation above was recorded.
+  EXPECT_LE(u2.free_ops + 1, u3.free_ops);
+
+  TestingThreadHeapUsageTracker::DisableHeapTrackingForTesting();
+
+  ASSERT_FALSE(ThreadHeapUsageTracker::IsHeapTrackingEnabled());
+}
+#endif  // BUILDFLAG(USE_ALLOCATOR_SHIM)
+
+}  // namespace debug
+}  // namespace base
diff --git a/base/deferred_sequenced_task_runner.cc b/base/deferred_sequenced_task_runner.cc
new file mode 100644
index 0000000..f88170c
--- /dev/null
+++ b/base/deferred_sequenced_task_runner.cc
@@ -0,0 +1,129 @@
+// Copyright (c) 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/deferred_sequenced_task_runner.h"
+
+#include <utility>
+
+#include "base/bind.h"
+#include "base/logging.h"
+
+namespace base {
+
+DeferredSequencedTaskRunner::DeferredTask::DeferredTask()
+    : is_non_nestable(false) {
+}
+
+DeferredSequencedTaskRunner::DeferredTask::DeferredTask(DeferredTask&& other) =
+    default;
+
+DeferredSequencedTaskRunner::DeferredTask::~DeferredTask() = default;
+
+DeferredSequencedTaskRunner::DeferredTask&
+DeferredSequencedTaskRunner::DeferredTask::operator=(DeferredTask&& other) =
+    default;
+
+DeferredSequencedTaskRunner::DeferredSequencedTaskRunner(
+    scoped_refptr<SequencedTaskRunner> target_task_runner)
+    : DeferredSequencedTaskRunner() {
+  DCHECK(target_task_runner);
+  target_task_runner_ = std::move(target_task_runner);
+}
+
+DeferredSequencedTaskRunner::DeferredSequencedTaskRunner()
+    : created_thread_id_(PlatformThread::CurrentId()) {}
+
+bool DeferredSequencedTaskRunner::PostDelayedTask(const Location& from_here,
+                                                  OnceClosure task,
+                                                  TimeDelta delay) {
+  AutoLock lock(lock_);
+  if (started_) {
+    DCHECK(deferred_tasks_queue_.empty());
+    return target_task_runner_->PostDelayedTask(from_here, std::move(task),
+                                                delay);
+  }
+
+  QueueDeferredTask(from_here, std::move(task), delay,
+                    false /* is_non_nestable */);
+  return true;
+}
+
+bool DeferredSequencedTaskRunner::RunsTasksInCurrentSequence() const {
+  AutoLock lock(lock_);
+  if (target_task_runner_)
+    return target_task_runner_->RunsTasksInCurrentSequence();
+
+  return created_thread_id_ == PlatformThread::CurrentId();
+}
+
+bool DeferredSequencedTaskRunner::PostNonNestableDelayedTask(
+    const Location& from_here,
+    OnceClosure task,
+    TimeDelta delay) {
+  AutoLock lock(lock_);
+  if (started_) {
+    DCHECK(deferred_tasks_queue_.empty());
+    return target_task_runner_->PostNonNestableDelayedTask(
+        from_here, std::move(task), delay);
+  }
+  QueueDeferredTask(from_here, std::move(task), delay,
+                    true /* is_non_nestable */);
+  return true;
+}
+
+void DeferredSequencedTaskRunner::Start() {
+  AutoLock lock(lock_);
+  StartImpl();
+}
+
+void DeferredSequencedTaskRunner::StartWithTaskRunner(
+    scoped_refptr<SequencedTaskRunner> target_task_runner) {
+  AutoLock lock(lock_);
+  DCHECK(!target_task_runner_);
+  DCHECK(target_task_runner);
+  target_task_runner_ = std::move(target_task_runner);
+  StartImpl();
+}
+
+DeferredSequencedTaskRunner::~DeferredSequencedTaskRunner() = default;
+
+void DeferredSequencedTaskRunner::QueueDeferredTask(const Location& from_here,
+                                                    OnceClosure task,
+                                                    TimeDelta delay,
+                                                    bool is_non_nestable) {
+  lock_.AssertAcquired();
+
+  // Use CHECK instead of DCHECK to crash earlier. See http://crbug.com/711167
+  // for details.
+  CHECK(task);
+
+  DeferredTask deferred_task;
+  deferred_task.posted_from = from_here;
+  deferred_task.task = std::move(task);
+  deferred_task.delay = delay;
+  deferred_task.is_non_nestable = is_non_nestable;
+  deferred_tasks_queue_.push_back(std::move(deferred_task));
+}
+
+void DeferredSequencedTaskRunner::StartImpl() {
+  lock_.AssertAcquired();  // Callers should have grabbed the lock.
+  DCHECK(!started_);
+  started_ = true;
+  DCHECK(target_task_runner_);
+  for (std::vector<DeferredTask>::iterator i = deferred_tasks_queue_.begin();
+      i != deferred_tasks_queue_.end();
+      ++i) {
+    DeferredTask& task = *i;
+    if (task.is_non_nestable) {
+      target_task_runner_->PostNonNestableDelayedTask(
+          task.posted_from, std::move(task.task), task.delay);
+    } else {
+      target_task_runner_->PostDelayedTask(task.posted_from,
+                                           std::move(task.task), task.delay);
+    }
+  }
+  deferred_tasks_queue_.clear();
+}
+
+}  // namespace base
diff --git a/base/deferred_sequenced_task_runner.h b/base/deferred_sequenced_task_runner.h
new file mode 100644
index 0000000..2805f47
--- /dev/null
+++ b/base/deferred_sequenced_task_runner.h
@@ -0,0 +1,97 @@
+// Copyright (c) 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_DEFERRED_SEQUENCED_TASK_RUNNER_H_
+#define BASE_DEFERRED_SEQUENCED_TASK_RUNNER_H_
+
+#include <vector>
+
+#include "base/base_export.h"
+#include "base/callback.h"
+#include "base/compiler_specific.h"
+#include "base/macros.h"
+#include "base/memory/ref_counted.h"
+#include "base/sequenced_task_runner.h"
+#include "base/synchronization/lock.h"
+#include "base/threading/platform_thread.h"
+#include "base/time/time.h"
+
+namespace base {
+
+// A DeferredSequencedTaskRunner is a subclass of SequencedTaskRunner that
+// queues up all requests until the first call to Start() is issued.
+// DeferredSequencedTaskRunner may be created in two ways:
+// . with an explicit SequencedTaskRunner that the events are flushed to
+// . without a SequencedTaskRunner. In this configuration the
+//   SequencedTaskRunner is supplied in StartWithTaskRunner().
+class BASE_EXPORT DeferredSequencedTaskRunner : public SequencedTaskRunner {
+ public:
+  explicit DeferredSequencedTaskRunner(
+      scoped_refptr<SequencedTaskRunner> target_runner);
+
+  // Use this constructor when you don't have the target SequencedTaskRunner.
+  // When using this call StartWithTaskRunner().
+  DeferredSequencedTaskRunner();
+
+  // TaskRunner implementation
+  bool PostDelayedTask(const Location& from_here,
+                       OnceClosure task,
+                       TimeDelta delay) override;
+  bool RunsTasksInCurrentSequence() const override;
+
+  // SequencedTaskRunner implementation
+  bool PostNonNestableDelayedTask(const Location& from_here,
+                                  OnceClosure task,
+                                  TimeDelta delay) override;
+
+  // Start the execution - posts all queued tasks to the target executor. The
+  // deferred tasks are posted with their initial delay, meaning that the task
+  // execution delay is actually measured from Start.
+  // Fails when called a second time.
+  void Start();
+
+  // Same as Start(), but must be used with the no-arg constructor.
+  void StartWithTaskRunner(
+      scoped_refptr<SequencedTaskRunner> target_task_runner);
+
+ private:
+  struct DeferredTask  {
+    DeferredTask();
+    DeferredTask(DeferredTask&& other);
+    ~DeferredTask();
+    DeferredTask& operator=(DeferredTask&& other);
+
+    Location posted_from;
+    OnceClosure task;
+    // The delay this task was initially posted with.
+    TimeDelta delay;
+    bool is_non_nestable;
+  };
+
+  ~DeferredSequencedTaskRunner() override;
+
+  // Both variants of Start() call into this.
+  void StartImpl();
+
+  // Creates a |Task| object and adds it to |deferred_tasks_queue_|.
+  void QueueDeferredTask(const Location& from_here,
+                         OnceClosure task,
+                         TimeDelta delay,
+                         bool is_non_nestable);
+
+  // // Protects |started_| and |deferred_tasks_queue_|.
+  mutable Lock lock_;
+
+  const PlatformThreadId created_thread_id_;
+
+  bool started_ = false;
+  scoped_refptr<SequencedTaskRunner> target_task_runner_;
+  std::vector<DeferredTask> deferred_tasks_queue_;
+
+  DISALLOW_COPY_AND_ASSIGN(DeferredSequencedTaskRunner);
+};
+
+}  // namespace base
+
+#endif  // BASE_DEFERRED_SEQUENCED_TASK_RUNNER_H_
diff --git a/base/deferred_sequenced_task_runner_unittest.cc b/base/deferred_sequenced_task_runner_unittest.cc
new file mode 100644
index 0000000..5cb220f
--- /dev/null
+++ b/base/deferred_sequenced_task_runner_unittest.cc
@@ -0,0 +1,214 @@
+// Copyright (c) 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/deferred_sequenced_task_runner.h"
+
+#include "base/bind.h"
+#include "base/bind_helpers.h"
+#include "base/callback_forward.h"
+#include "base/location.h"
+#include "base/memory/ref_counted.h"
+#include "base/message_loop/message_loop.h"
+#include "base/run_loop.h"
+#include "base/single_thread_task_runner.h"
+#include "base/threading/thread.h"
+#include "testing/gmock/include/gmock/gmock.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+namespace {
+
+class DeferredSequencedTaskRunnerTest : public testing::Test {
+ public:
+  class ExecuteTaskOnDestructor : public RefCounted<ExecuteTaskOnDestructor> {
+   public:
+    ExecuteTaskOnDestructor(
+        DeferredSequencedTaskRunnerTest* executor,
+        int task_id)
+        : executor_(executor),
+          task_id_(task_id) {
+    }
+  private:
+   friend class RefCounted<ExecuteTaskOnDestructor>;
+   virtual ~ExecuteTaskOnDestructor() { executor_->ExecuteTask(task_id_); }
+   DeferredSequencedTaskRunnerTest* executor_;
+   int task_id_;
+  };
+
+  void ExecuteTask(int task_id) {
+    AutoLock lock(lock_);
+    executed_task_ids_.push_back(task_id);
+  }
+
+  void PostExecuteTask(int task_id) {
+    runner_->PostTask(FROM_HERE,
+                      BindOnce(&DeferredSequencedTaskRunnerTest::ExecuteTask,
+                               Unretained(this), task_id));
+  }
+
+  void StartRunner() {
+    runner_->Start();
+  }
+
+  void DoNothing(ExecuteTaskOnDestructor* object) {
+  }
+
+ protected:
+  DeferredSequencedTaskRunnerTest()
+      : loop_(),
+        runner_(new DeferredSequencedTaskRunner(loop_.task_runner())) {}
+
+  MessageLoop loop_;
+  scoped_refptr<DeferredSequencedTaskRunner> runner_;
+  mutable Lock lock_;
+  std::vector<int> executed_task_ids_;
+};
+
+TEST_F(DeferredSequencedTaskRunnerTest, Stopped) {
+  PostExecuteTask(1);
+  RunLoop().RunUntilIdle();
+  EXPECT_THAT(executed_task_ids_, testing::ElementsAre());
+}
+
+TEST_F(DeferredSequencedTaskRunnerTest, Start) {
+  StartRunner();
+  PostExecuteTask(1);
+  RunLoop().RunUntilIdle();
+  EXPECT_THAT(executed_task_ids_, testing::ElementsAre(1));
+}
+
+TEST_F(DeferredSequencedTaskRunnerTest, StartWithMultipleElements) {
+  StartRunner();
+  for (int i = 1; i < 5; ++i)
+    PostExecuteTask(i);
+
+  RunLoop().RunUntilIdle();
+  EXPECT_THAT(executed_task_ids_, testing::ElementsAre(1, 2, 3, 4));
+}
+
+TEST_F(DeferredSequencedTaskRunnerTest, DeferredStart) {
+  PostExecuteTask(1);
+  RunLoop().RunUntilIdle();
+  EXPECT_THAT(executed_task_ids_, testing::ElementsAre());
+
+  StartRunner();
+  RunLoop().RunUntilIdle();
+  EXPECT_THAT(executed_task_ids_, testing::ElementsAre(1));
+
+  PostExecuteTask(2);
+  RunLoop().RunUntilIdle();
+  EXPECT_THAT(executed_task_ids_, testing::ElementsAre(1, 2));
+}
+
+TEST_F(DeferredSequencedTaskRunnerTest, DeferredStartWithMultipleElements) {
+  for (int i = 1; i < 5; ++i)
+    PostExecuteTask(i);
+  RunLoop().RunUntilIdle();
+  EXPECT_THAT(executed_task_ids_, testing::ElementsAre());
+
+  StartRunner();
+  for (int i = 5; i < 9; ++i)
+    PostExecuteTask(i);
+  RunLoop().RunUntilIdle();
+  EXPECT_THAT(executed_task_ids_, testing::ElementsAre(1, 2, 3, 4, 5, 6, 7, 8));
+}
+
+TEST_F(DeferredSequencedTaskRunnerTest, DeferredStartWithMultipleThreads) {
+  {
+    Thread thread1("DeferredSequencedTaskRunnerTestThread1");
+    Thread thread2("DeferredSequencedTaskRunnerTestThread2");
+    thread1.Start();
+    thread2.Start();
+    for (int i = 0; i < 5; ++i) {
+      thread1.task_runner()->PostTask(
+          FROM_HERE, BindOnce(&DeferredSequencedTaskRunnerTest::PostExecuteTask,
+                              Unretained(this), 2 * i));
+      thread2.task_runner()->PostTask(
+          FROM_HERE, BindOnce(&DeferredSequencedTaskRunnerTest::PostExecuteTask,
+                              Unretained(this), 2 * i + 1));
+      if (i == 2) {
+        thread1.task_runner()->PostTask(
+            FROM_HERE, BindOnce(&DeferredSequencedTaskRunnerTest::StartRunner,
+                                Unretained(this)));
+      }
+    }
+  }
+
+  RunLoop().RunUntilIdle();
+  EXPECT_THAT(executed_task_ids_,
+      testing::WhenSorted(testing::ElementsAre(0, 1, 2, 3, 4, 5, 6, 7, 8, 9)));
+}
+
+TEST_F(DeferredSequencedTaskRunnerTest, ObjectDestructionOrder) {
+  {
+    Thread thread("DeferredSequencedTaskRunnerTestThread");
+    thread.Start();
+    runner_ = new DeferredSequencedTaskRunner(thread.task_runner());
+    for (int i = 0; i < 5; ++i) {
+      {
+        // Use a block to ensure that no reference to |short_lived_object|
+        // is kept on the main thread after it is posted to |runner_|.
+        scoped_refptr<ExecuteTaskOnDestructor> short_lived_object =
+            new ExecuteTaskOnDestructor(this, 2 * i);
+        runner_->PostTask(
+            FROM_HERE,
+            BindOnce(&DeferredSequencedTaskRunnerTest::DoNothing,
+                     Unretained(this), RetainedRef(short_lived_object)));
+      }
+      // |short_lived_object| with id |2 * i| should be destroyed before the
+      // task |2 * i + 1| is executed.
+      PostExecuteTask(2 * i + 1);
+    }
+    StartRunner();
+  }
+
+  // All |short_lived_object| with id |2 * i| are destroyed before the task
+  // |2 * i + 1| is executed.
+  EXPECT_THAT(executed_task_ids_,
+              testing::ElementsAre(0, 1, 2, 3, 4, 5, 6, 7, 8, 9));
+}
+
+void GetRunsTasksInCurrentSequence(bool* result,
+                                   scoped_refptr<SequencedTaskRunner> runner,
+                                   OnceClosure quit) {
+  *result = runner->RunsTasksInCurrentSequence();
+  std::move(quit).Run();
+}
+
+TEST_F(DeferredSequencedTaskRunnerTest, RunsTasksInCurrentSequence) {
+  scoped_refptr<DeferredSequencedTaskRunner> runner =
+      MakeRefCounted<DeferredSequencedTaskRunner>();
+  EXPECT_TRUE(runner->RunsTasksInCurrentSequence());
+
+  Thread thread1("DeferredSequencedTaskRunnerTestThread1");
+  thread1.Start();
+  bool runs_task_in_current_thread = true;
+  base::RunLoop run_loop;
+  thread1.task_runner()->PostTask(
+      FROM_HERE,
+      BindOnce(&GetRunsTasksInCurrentSequence, &runs_task_in_current_thread,
+               runner, run_loop.QuitClosure()));
+  run_loop.Run();
+  EXPECT_FALSE(runs_task_in_current_thread);
+}
+
+TEST_F(DeferredSequencedTaskRunnerTest, StartWithTaskRunner) {
+  scoped_refptr<DeferredSequencedTaskRunner> runner =
+      MakeRefCounted<DeferredSequencedTaskRunner>();
+  bool run_called = false;
+  base::RunLoop run_loop;
+  runner->PostTask(FROM_HERE,
+                   BindOnce(
+                       [](bool* run_called, base::Closure quit_closure) {
+                         *run_called = true;
+                         std::move(quit_closure).Run();
+                       },
+                       &run_called, run_loop.QuitClosure()));
+  runner->StartWithTaskRunner(loop_.task_runner());
+  run_loop.Run();
+  EXPECT_TRUE(run_called);
+}
+
+}  // namespace
+}  // namespace base
diff --git a/base/environment.cc b/base/environment.cc
new file mode 100644
index 0000000..cdea53c
--- /dev/null
+++ b/base/environment.cc
@@ -0,0 +1,238 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/environment.h"
+
+#include <stddef.h>
+
+#include <vector>
+
+#include "base/memory/ptr_util.h"
+#include "base/strings/string_piece.h"
+#include "base/strings/string_util.h"
+#include "base/strings/utf_string_conversions.h"
+#include "build/build_config.h"
+
+#if defined(OS_WIN)
+#include <windows.h>
+#elif defined(OS_POSIX) || defined(OS_FUCHSIA)
+#include <stdlib.h>
+#endif
+
+namespace base {
+
+namespace {
+
+class EnvironmentImpl : public Environment {
+ public:
+  bool GetVar(StringPiece variable_name, std::string* result) override {
+    if (GetVarImpl(variable_name, result))
+      return true;
+
+    // Some commonly used variable names are uppercase while others
+    // are lowercase, which is inconsistent. Let's try to be helpful
+    // and look for a variable name with the reverse case.
+    // I.e. HTTP_PROXY may be http_proxy for some users/systems.
+    char first_char = variable_name[0];
+    std::string alternate_case_var;
+    if (IsAsciiLower(first_char))
+      alternate_case_var = ToUpperASCII(variable_name);
+    else if (IsAsciiUpper(first_char))
+      alternate_case_var = ToLowerASCII(variable_name);
+    else
+      return false;
+    return GetVarImpl(alternate_case_var, result);
+  }
+
+  bool SetVar(StringPiece variable_name,
+              const std::string& new_value) override {
+    return SetVarImpl(variable_name, new_value);
+  }
+
+  bool UnSetVar(StringPiece variable_name) override {
+    return UnSetVarImpl(variable_name);
+  }
+
+ private:
+  bool GetVarImpl(StringPiece variable_name, std::string* result) {
+#if defined(OS_WIN)
+    DWORD value_length =
+        ::GetEnvironmentVariable(UTF8ToWide(variable_name).c_str(), nullptr, 0);
+    if (value_length == 0)
+      return false;
+    if (result) {
+      std::unique_ptr<wchar_t[]> value(new wchar_t[value_length]);
+      ::GetEnvironmentVariable(UTF8ToWide(variable_name).c_str(), value.get(),
+                               value_length);
+      *result = WideToUTF8(value.get());
+    }
+    return true;
+#elif defined(OS_POSIX) || defined(OS_FUCHSIA)
+    const char* env_value = getenv(variable_name.data());
+    if (!env_value)
+      return false;
+    // Note that the variable may be defined but empty.
+    if (result)
+      *result = env_value;
+    return true;
+#endif
+  }
+
+  bool SetVarImpl(StringPiece variable_name, const std::string& new_value) {
+#if defined(OS_WIN)
+    // On success, a nonzero value is returned.
+    return !!SetEnvironmentVariable(UTF8ToWide(variable_name).c_str(),
+                                    UTF8ToWide(new_value).c_str());
+#elif defined(OS_POSIX) || defined(OS_FUCHSIA)
+    // On success, zero is returned.
+    return !setenv(variable_name.data(), new_value.c_str(), 1);
+#endif
+  }
+
+  bool UnSetVarImpl(StringPiece variable_name) {
+#if defined(OS_WIN)
+    // On success, a nonzero value is returned.
+    return !!SetEnvironmentVariable(UTF8ToWide(variable_name).c_str(), nullptr);
+#elif defined(OS_POSIX) || defined(OS_FUCHSIA)
+    // On success, zero is returned.
+    return !unsetenv(variable_name.data());
+#endif
+  }
+};
+
+// Parses a null-terminated input string of an environment block. The key is
+// placed into the given string, and the total length of the line, including
+// the terminating null, is returned.
+size_t ParseEnvLine(const NativeEnvironmentString::value_type* input,
+                    NativeEnvironmentString* key) {
+  // Skip to the equals or end of the string, this is the key.
+  size_t cur = 0;
+  while (input[cur] && input[cur] != '=')
+    cur++;
+  *key = NativeEnvironmentString(&input[0], cur);
+
+  // Now just skip to the end of the string.
+  while (input[cur])
+    cur++;
+  return cur + 1;
+}
+
+}  // namespace
+
+namespace env_vars {
+
+#if defined(OS_POSIX) || defined(OS_FUCHSIA)
+// On Posix systems, this variable contains the location of the user's home
+// directory. (e.g, /home/username/).
+const char kHome[] = "HOME";
+#endif
+
+}  // namespace env_vars
+
+Environment::~Environment() = default;
+
+// static
+std::unique_ptr<Environment> Environment::Create() {
+  return std::make_unique<EnvironmentImpl>();
+}
+
+bool Environment::HasVar(StringPiece variable_name) {
+  return GetVar(variable_name, nullptr);
+}
+
+#if defined(OS_WIN)
+
+string16 AlterEnvironment(const wchar_t* env,
+                          const EnvironmentMap& changes) {
+  string16 result;
+
+  // First copy all unmodified values to the output.
+  size_t cur_env = 0;
+  string16 key;
+  while (env[cur_env]) {
+    const wchar_t* line = &env[cur_env];
+    size_t line_length = ParseEnvLine(line, &key);
+
+    // Keep only values not specified in the change vector.
+    EnvironmentMap::const_iterator found_change = changes.find(key);
+    if (found_change == changes.end())
+      result.append(line, line_length);
+
+    cur_env += line_length;
+  }
+
+  // Now append all modified and new values.
+  for (EnvironmentMap::const_iterator i = changes.begin();
+       i != changes.end(); ++i) {
+    if (!i->second.empty()) {
+      result.append(i->first);
+      result.push_back('=');
+      result.append(i->second);
+      result.push_back(0);
+    }
+  }
+
+  // An additional null marks the end of the list. We always need a double-null
+  // in case nothing was added above.
+  if (result.empty())
+    result.push_back(0);
+  result.push_back(0);
+  return result;
+}
+
+#elif defined(OS_POSIX) || defined(OS_FUCHSIA)
+
+std::unique_ptr<char* []> AlterEnvironment(const char* const* const env,
+                                           const EnvironmentMap& changes) {
+  std::string value_storage;  // Holds concatenated null-terminated strings.
+  std::vector<size_t> result_indices;  // Line indices into value_storage.
+
+  // First build up all of the unchanged environment strings. These are
+  // null-terminated of the form "key=value".
+  std::string key;
+  for (size_t i = 0; env[i]; i++) {
+    size_t line_length = ParseEnvLine(env[i], &key);
+
+    // Keep only values not specified in the change vector.
+    EnvironmentMap::const_iterator found_change = changes.find(key);
+    if (found_change == changes.end()) {
+      result_indices.push_back(value_storage.size());
+      value_storage.append(env[i], line_length);
+    }
+  }
+
+  // Now append all modified and new values.
+  for (EnvironmentMap::const_iterator i = changes.begin();
+       i != changes.end(); ++i) {
+    if (!i->second.empty()) {
+      result_indices.push_back(value_storage.size());
+      value_storage.append(i->first);
+      value_storage.push_back('=');
+      value_storage.append(i->second);
+      value_storage.push_back(0);
+    }
+  }
+
+  size_t pointer_count_required =
+      result_indices.size() + 1 +  // Null-terminated array of pointers.
+      (value_storage.size() + sizeof(char*) - 1) / sizeof(char*);  // Buffer.
+  std::unique_ptr<char* []> result(new char*[pointer_count_required]);
+
+  // The string storage goes after the array of pointers.
+  char* storage_data = reinterpret_cast<char*>(
+      &result.get()[result_indices.size() + 1]);
+  if (!value_storage.empty())
+    memcpy(storage_data, value_storage.data(), value_storage.size());
+
+  // Fill array of pointers at the beginning of the result.
+  for (size_t i = 0; i < result_indices.size(); i++)
+    result[i] = &storage_data[result_indices[i]];
+  result[result_indices.size()] = 0;  // Null terminator.
+
+  return result;
+}
+
+#endif  // OS_WIN
+
+}  // namespace base
diff --git a/base/environment.h b/base/environment.h
new file mode 100644
index 0000000..e842ab0
--- /dev/null
+++ b/base/environment.h
@@ -0,0 +1,90 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_ENVIRONMENT_H_
+#define BASE_ENVIRONMENT_H_
+
+#include <map>
+#include <memory>
+#include <string>
+
+#include "base/base_export.h"
+#include "base/strings/string16.h"
+#include "base/strings/string_piece.h"
+#include "build/build_config.h"
+
+namespace base {
+
+namespace env_vars {
+
+#if defined(OS_POSIX) || defined(OS_FUCHSIA)
+BASE_EXPORT extern const char kHome[];
+#endif
+
+}  // namespace env_vars
+
+class BASE_EXPORT Environment {
+ public:
+  virtual ~Environment();
+
+  // Returns the appropriate platform-specific instance.
+  static std::unique_ptr<Environment> Create();
+
+  // Gets an environment variable's value and stores it in |result|.
+  // Returns false if the key is unset.
+  virtual bool GetVar(StringPiece variable_name, std::string* result) = 0;
+
+  // Syntactic sugar for GetVar(variable_name, nullptr);
+  virtual bool HasVar(StringPiece variable_name);
+
+  // Returns true on success, otherwise returns false.
+  virtual bool SetVar(StringPiece variable_name,
+                      const std::string& new_value) = 0;
+
+  // Returns true on success, otherwise returns false.
+  virtual bool UnSetVar(StringPiece variable_name) = 0;
+};
+
+
+#if defined(OS_WIN)
+
+typedef string16 NativeEnvironmentString;
+typedef std::map<NativeEnvironmentString, NativeEnvironmentString>
+    EnvironmentMap;
+
+// Returns a modified environment vector constructed from the given environment
+// and the list of changes given in |changes|. Each key in the environment is
+// matched against the first element of the pairs. In the event of a match, the
+// value is replaced by the second of the pair, unless the second is empty, in
+// which case the key-value is removed.
+//
+// This Windows version takes and returns a Windows-style environment block
+// which is a concatenated list of null-terminated 16-bit strings. The end is
+// marked by a double-null terminator. The size of the returned string will
+// include the terminators.
+BASE_EXPORT string16 AlterEnvironment(const wchar_t* env,
+                                      const EnvironmentMap& changes);
+
+#elif defined(OS_POSIX) || defined(OS_FUCHSIA)
+
+typedef std::string NativeEnvironmentString;
+typedef std::map<NativeEnvironmentString, NativeEnvironmentString>
+    EnvironmentMap;
+
+// See general comments for the Windows version above.
+//
+// This Posix version takes and returns a Posix-style environment block, which
+// is a null-terminated list of pointers to null-terminated strings. The
+// returned array will have appended to it the storage for the array itself so
+// there is only one pointer to manage, but this means that you can't copy the
+// array without keeping the original around.
+BASE_EXPORT std::unique_ptr<char* []> AlterEnvironment(
+    const char* const* env,
+    const EnvironmentMap& changes);
+
+#endif
+
+}  // namespace base
+
+#endif  // BASE_ENVIRONMENT_H_
diff --git a/base/environment_unittest.cc b/base/environment_unittest.cc
new file mode 100644
index 0000000..23aec51
--- /dev/null
+++ b/base/environment_unittest.cc
@@ -0,0 +1,171 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/environment.h"
+
+#include <memory>
+
+#include "build/build_config.h"
+#include "testing/gtest/include/gtest/gtest.h"
+#include "testing/platform_test.h"
+
+typedef PlatformTest EnvironmentTest;
+
+namespace base {
+
+namespace {
+
+constexpr char kValidEnvironmentVariable[] = "PATH";
+
+}  // namespace
+
+TEST_F(EnvironmentTest, GetVar) {
+  std::unique_ptr<Environment> env(Environment::Create());
+  std::string env_value;
+  EXPECT_TRUE(env->GetVar(kValidEnvironmentVariable, &env_value));
+  EXPECT_NE(env_value, "");
+}
+
+TEST_F(EnvironmentTest, GetVarReverse) {
+  std::unique_ptr<Environment> env(Environment::Create());
+  const char kFooUpper[] = "FOO";
+  const char kFooLower[] = "foo";
+
+  // Set a variable in UPPER case.
+  EXPECT_TRUE(env->SetVar(kFooUpper, kFooLower));
+
+  // And then try to get this variable passing the lower case.
+  std::string env_value;
+  EXPECT_TRUE(env->GetVar(kFooLower, &env_value));
+
+  EXPECT_STREQ(env_value.c_str(), kFooLower);
+
+  EXPECT_TRUE(env->UnSetVar(kFooUpper));
+
+  const char kBar[] = "bar";
+  // Now do the opposite, set the variable in the lower case.
+  EXPECT_TRUE(env->SetVar(kFooLower, kBar));
+
+  // And then try to get this variable passing the UPPER case.
+  EXPECT_TRUE(env->GetVar(kFooUpper, &env_value));
+
+  EXPECT_STREQ(env_value.c_str(), kBar);
+
+  EXPECT_TRUE(env->UnSetVar(kFooLower));
+}
+
+TEST_F(EnvironmentTest, HasVar) {
+  std::unique_ptr<Environment> env(Environment::Create());
+  EXPECT_TRUE(env->HasVar(kValidEnvironmentVariable));
+}
+
+TEST_F(EnvironmentTest, SetVar) {
+  std::unique_ptr<Environment> env(Environment::Create());
+
+  const char kFooUpper[] = "FOO";
+  const char kFooLower[] = "foo";
+  EXPECT_TRUE(env->SetVar(kFooUpper, kFooLower));
+
+  // Now verify that the environment has the new variable.
+  EXPECT_TRUE(env->HasVar(kFooUpper));
+
+  std::string var_value;
+  EXPECT_TRUE(env->GetVar(kFooUpper, &var_value));
+  EXPECT_EQ(var_value, kFooLower);
+}
+
+TEST_F(EnvironmentTest, UnSetVar) {
+  std::unique_ptr<Environment> env(Environment::Create());
+
+  const char kFooUpper[] = "FOO";
+  const char kFooLower[] = "foo";
+  // First set some environment variable.
+  EXPECT_TRUE(env->SetVar(kFooUpper, kFooLower));
+
+  // Now verify that the environment has the new variable.
+  EXPECT_TRUE(env->HasVar(kFooUpper));
+
+  // Finally verify that the environment variable was erased.
+  EXPECT_TRUE(env->UnSetVar(kFooUpper));
+
+  // And check that the variable has been unset.
+  EXPECT_FALSE(env->HasVar(kFooUpper));
+}
+
+#if defined(OS_WIN)
+
+TEST_F(EnvironmentTest, AlterEnvironment) {
+  const wchar_t empty[] = L"\0";
+  const wchar_t a2[] = L"A=2\0";
+  EnvironmentMap changes;
+  string16 e;
+
+  e = AlterEnvironment(empty, changes);
+  EXPECT_EQ(0, e[0]);
+
+  changes[L"A"] = L"1";
+  e = AlterEnvironment(empty, changes);
+  EXPECT_EQ(string16(L"A=1\0\0", 5), e);
+
+  changes.clear();
+  changes[L"A"] = string16();
+  e = AlterEnvironment(empty, changes);
+  EXPECT_EQ(string16(L"\0\0", 2), e);
+
+  changes.clear();
+  e = AlterEnvironment(a2, changes);
+  EXPECT_EQ(string16(L"A=2\0\0", 5), e);
+
+  changes.clear();
+  changes[L"A"] = L"1";
+  e = AlterEnvironment(a2, changes);
+  EXPECT_EQ(string16(L"A=1\0\0", 5), e);
+
+  changes.clear();
+  changes[L"A"] = string16();
+  e = AlterEnvironment(a2, changes);
+  EXPECT_EQ(string16(L"\0\0", 2), e);
+}
+
+#else
+
+TEST_F(EnvironmentTest, AlterEnvironment) {
+  const char* const empty[] = {nullptr};
+  const char* const a2[] = {"A=2", nullptr};
+  EnvironmentMap changes;
+  std::unique_ptr<char* []> e;
+
+  e = AlterEnvironment(empty, changes);
+  EXPECT_TRUE(e[0] == nullptr);
+
+  changes["A"] = "1";
+  e = AlterEnvironment(empty, changes);
+  EXPECT_EQ(std::string("A=1"), e[0]);
+  EXPECT_TRUE(e[1] == nullptr);
+
+  changes.clear();
+  changes["A"] = std::string();
+  e = AlterEnvironment(empty, changes);
+  EXPECT_TRUE(e[0] == nullptr);
+
+  changes.clear();
+  e = AlterEnvironment(a2, changes);
+  EXPECT_EQ(std::string("A=2"), e[0]);
+  EXPECT_TRUE(e[1] == nullptr);
+
+  changes.clear();
+  changes["A"] = "1";
+  e = AlterEnvironment(a2, changes);
+  EXPECT_EQ(std::string("A=1"), e[0]);
+  EXPECT_TRUE(e[1] == nullptr);
+
+  changes.clear();
+  changes["A"] = std::string();
+  e = AlterEnvironment(a2, changes);
+  EXPECT_TRUE(e[0] == nullptr);
+}
+
+#endif
+
+}  // namespace base
diff --git a/base/export_template.h b/base/export_template.h
new file mode 100644
index 0000000..aac8b7c
--- /dev/null
+++ b/base/export_template.h
@@ -0,0 +1,163 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_EXPORT_TEMPLATE_H_
+#define BASE_EXPORT_TEMPLATE_H_
+
+// Synopsis
+//
+// This header provides macros for using FOO_EXPORT macros with explicit
+// template instantiation declarations and definitions.
+// Generally, the FOO_EXPORT macros are used at declarations,
+// and GCC requires them to be used at explicit instantiation declarations,
+// but MSVC requires __declspec(dllexport) to be used at the explicit
+// instantiation definitions instead.
+
+// Usage
+//
+// In a header file, write:
+//
+//   extern template class EXPORT_TEMPLATE_DECLARE(FOO_EXPORT) foo<bar>;
+//
+// In a source file, write:
+//
+//   template class EXPORT_TEMPLATE_DEFINE(FOO_EXPORT) foo<bar>;
+
+// Implementation notes
+//
+// The implementation of this header uses some subtle macro semantics to
+// detect what the provided FOO_EXPORT value was defined as and then
+// to dispatch to appropriate macro definitions.  Unfortunately,
+// MSVC's C preprocessor is rather non-compliant and requires special
+// care to make it work.
+//
+// Issue 1.
+//
+//   #define F(x)
+//   F()
+//
+// MSVC emits warning C4003 ("not enough actual parameters for macro
+// 'F'), even though it's a valid macro invocation.  This affects the
+// macros below that take just an "export" parameter, because export
+// may be empty.
+//
+// As a workaround, we can add a dummy parameter and arguments:
+//
+//   #define F(x,_)
+//   F(,)
+//
+// Issue 2.
+//
+//   #define F(x) G##x
+//   #define Gj() ok
+//   F(j())
+//
+// The correct replacement for "F(j())" is "ok", but MSVC replaces it
+// with "Gj()".  As a workaround, we can pass the result to an
+// identity macro to force MSVC to look for replacements again.  (This
+// is why EXPORT_TEMPLATE_STYLE_3 exists.)
+
+#define EXPORT_TEMPLATE_DECLARE(export) \
+  EXPORT_TEMPLATE_INVOKE(DECLARE, EXPORT_TEMPLATE_STYLE(export, ), export)
+#define EXPORT_TEMPLATE_DEFINE(export) \
+  EXPORT_TEMPLATE_INVOKE(DEFINE, EXPORT_TEMPLATE_STYLE(export, ), export)
+
+// INVOKE is an internal helper macro to perform parameter replacements
+// and token pasting to chain invoke another macro.  E.g.,
+//     EXPORT_TEMPLATE_INVOKE(DECLARE, DEFAULT, FOO_EXPORT)
+// will export to call
+//     EXPORT_TEMPLATE_DECLARE_DEFAULT(FOO_EXPORT, )
+// (but with FOO_EXPORT expanded too).
+#define EXPORT_TEMPLATE_INVOKE(which, style, export) \
+  EXPORT_TEMPLATE_INVOKE_2(which, style, export)
+#define EXPORT_TEMPLATE_INVOKE_2(which, style, export) \
+  EXPORT_TEMPLATE_##which##_##style(export, )
+
+// Default style is to apply the FOO_EXPORT macro at declaration sites.
+#define EXPORT_TEMPLATE_DECLARE_DEFAULT(export, _) export
+#define EXPORT_TEMPLATE_DEFINE_DEFAULT(export, _)
+
+// The "MSVC hack" style is used when FOO_EXPORT is defined
+// as __declspec(dllexport), which MSVC requires to be used at
+// definition sites instead.
+#define EXPORT_TEMPLATE_DECLARE_MSVC_HACK(export, _)
+#define EXPORT_TEMPLATE_DEFINE_MSVC_HACK(export, _) export
+
+// EXPORT_TEMPLATE_STYLE is an internal helper macro that identifies which
+// export style needs to be used for the provided FOO_EXPORT macro definition.
+// "", "__attribute__(...)", and "__declspec(dllimport)" are mapped
+// to "DEFAULT"; while "__declspec(dllexport)" is mapped to "MSVC_HACK".
+//
+// It's implemented with token pasting to transform the __attribute__ and
+// __declspec annotations into macro invocations.  E.g., if FOO_EXPORT is
+// defined as "__declspec(dllimport)", it undergoes the following sequence of
+// macro substitutions:
+//     EXPORT_TEMPLATE_STYLE(FOO_EXPORT, )
+//     EXPORT_TEMPLATE_STYLE_2(__declspec(dllimport), )
+//     EXPORT_TEMPLATE_STYLE_3(EXPORT_TEMPLATE_STYLE_MATCH__declspec(dllimport))
+//     EXPORT_TEMPLATE_STYLE_MATCH__declspec(dllimport)
+//     EXPORT_TEMPLATE_STYLE_MATCH_DECLSPEC_dllimport
+//     DEFAULT
+#define EXPORT_TEMPLATE_STYLE(export, _) EXPORT_TEMPLATE_STYLE_2(export, )
+#define EXPORT_TEMPLATE_STYLE_2(export, _) \
+  EXPORT_TEMPLATE_STYLE_3(                 \
+      EXPORT_TEMPLATE_STYLE_MATCH_foj3FJo5StF0OvIzl7oMxA##export)
+#define EXPORT_TEMPLATE_STYLE_3(style) style
+
+// Internal helper macros for EXPORT_TEMPLATE_STYLE.
+//
+// XXX: C++ reserves all identifiers containing "__" for the implementation,
+// but "__attribute__" and "__declspec" already contain "__" and the token-paste
+// operator can only add characters; not remove them.  To minimize the risk of
+// conflict with implementations, we include "foj3FJo5StF0OvIzl7oMxA" (a random
+// 128-bit string, encoded in Base64) in the macro name.
+#define EXPORT_TEMPLATE_STYLE_MATCH_foj3FJo5StF0OvIzl7oMxA DEFAULT
+#define EXPORT_TEMPLATE_STYLE_MATCH_foj3FJo5StF0OvIzl7oMxA__attribute__(...) \
+  DEFAULT
+#define EXPORT_TEMPLATE_STYLE_MATCH_foj3FJo5StF0OvIzl7oMxA__declspec(arg) \
+  EXPORT_TEMPLATE_STYLE_MATCH_DECLSPEC_##arg
+
+// Internal helper macros for EXPORT_TEMPLATE_STYLE.
+#define EXPORT_TEMPLATE_STYLE_MATCH_DECLSPEC_dllexport MSVC_HACK
+#define EXPORT_TEMPLATE_STYLE_MATCH_DECLSPEC_dllimport DEFAULT
+
+// Sanity checks.
+//
+// EXPORT_TEMPLATE_TEST uses the same macro invocation pattern as
+// EXPORT_TEMPLATE_DECLARE and EXPORT_TEMPLATE_DEFINE do to check that they're
+// working correctly.  When they're working correctly, the sequence of macro
+// replacements should go something like:
+//
+//     EXPORT_TEMPLATE_TEST(DEFAULT, __declspec(dllimport));
+//
+//     static_assert(EXPORT_TEMPLATE_INVOKE(TEST_DEFAULT,
+//         EXPORT_TEMPLATE_STYLE(__declspec(dllimport), ),
+//         __declspec(dllimport)), "__declspec(dllimport)");
+//
+//     static_assert(EXPORT_TEMPLATE_INVOKE(TEST_DEFAULT,
+//         DEFAULT, __declspec(dllimport)), "__declspec(dllimport)");
+//
+//     static_assert(EXPORT_TEMPLATE_TEST_DEFAULT_DEFAULT(
+//         __declspec(dllimport)), "__declspec(dllimport)");
+//
+//     static_assert(true, "__declspec(dllimport)");
+//
+// When they're not working correctly, a syntax error should occur instead.
+#define EXPORT_TEMPLATE_TEST(want, export)                                 \
+  static_assert(EXPORT_TEMPLATE_INVOKE(                                    \
+                    TEST_##want, EXPORT_TEMPLATE_STYLE(export, ), export), \
+                #export)
+#define EXPORT_TEMPLATE_TEST_DEFAULT_DEFAULT(...) true
+#define EXPORT_TEMPLATE_TEST_MSVC_HACK_MSVC_HACK(...) true
+
+EXPORT_TEMPLATE_TEST(DEFAULT, );
+EXPORT_TEMPLATE_TEST(DEFAULT, __attribute__((visibility("default"))));
+EXPORT_TEMPLATE_TEST(MSVC_HACK, __declspec(dllexport));
+EXPORT_TEMPLATE_TEST(DEFAULT, __declspec(dllimport));
+
+#undef EXPORT_TEMPLATE_TEST
+#undef EXPORT_TEMPLATE_TEST_DEFAULT_DEFAULT
+#undef EXPORT_TEMPLATE_TEST_MSVC_HACK_MSVC_HACK
+
+#endif  // BASE_EXPORT_TEMPLATE_H_
diff --git a/base/feature_list.cc b/base/feature_list.cc
new file mode 100644
index 0000000..1610eec
--- /dev/null
+++ b/base/feature_list.cc
@@ -0,0 +1,438 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/feature_list.h"
+
+#include <stddef.h>
+
+#include <utility>
+#include <vector>
+
+#include "base/logging.h"
+#include "base/memory/ptr_util.h"
+#include "base/metrics/field_trial.h"
+#include "base/pickle.h"
+#include "base/strings/string_split.h"
+#include "base/strings/string_util.h"
+
+namespace base {
+
+namespace {
+
+// Pointer to the FeatureList instance singleton that was set via
+// FeatureList::SetInstance(). Does not use base/memory/singleton.h in order to
+// have more control over initialization timing. Leaky.
+FeatureList* g_feature_list_instance = nullptr;
+
+// Tracks whether the FeatureList instance was initialized via an accessor.
+bool g_initialized_from_accessor = false;
+
+// An allocator entry for a feature in shared memory. The FeatureEntry is
+// followed by a base::Pickle object that contains the feature and trial name.
+struct FeatureEntry {
+  // SHA1(FeatureEntry): Increment this if structure changes!
+  static constexpr uint32_t kPersistentTypeId = 0x06567CA6 + 1;
+
+  // Expected size for 32/64-bit check.
+  static constexpr size_t kExpectedInstanceSize = 8;
+
+  // Specifies whether a feature override enables or disables the feature. Same
+  // values as the OverrideState enum in feature_list.h
+  uint32_t override_state;
+
+  // Size of the pickled structure, NOT the total size of this entry.
+  uint32_t pickle_size;
+
+  // Reads the feature and trial name from the pickle. Calling this is only
+  // valid on an initialized entry that's in shared memory.
+  bool GetFeatureAndTrialName(StringPiece* feature_name,
+                              StringPiece* trial_name) const {
+    const char* src =
+        reinterpret_cast<const char*>(this) + sizeof(FeatureEntry);
+
+    Pickle pickle(src, pickle_size);
+    PickleIterator pickle_iter(pickle);
+
+    if (!pickle_iter.ReadStringPiece(feature_name))
+      return false;
+
+    // Return true because we are not guaranteed to have a trial name anyways.
+    auto sink = pickle_iter.ReadStringPiece(trial_name);
+    ALLOW_UNUSED_LOCAL(sink);
+    return true;
+  }
+};
+
+// Some characters are not allowed to appear in feature names or the associated
+// field trial names, as they are used as special characters for command-line
+// serialization. This function checks that the strings are ASCII (since they
+// are used in command-line API functions that require ASCII) and whether there
+// are any reserved characters present, returning true if the string is valid.
+// Only called in DCHECKs.
+bool IsValidFeatureOrFieldTrialName(const std::string& name) {
+  return IsStringASCII(name) && name.find_first_of(",<*") == std::string::npos;
+}
+
+}  // namespace
+
+#if DCHECK_IS_CONFIGURABLE
+const Feature kDCheckIsFatalFeature{"DcheckIsFatal",
+                                    base::FEATURE_DISABLED_BY_DEFAULT};
+#endif  // DCHECK_IS_CONFIGURABLE
+
+FeatureList::FeatureList() = default;
+
+FeatureList::~FeatureList() = default;
+
+void FeatureList::InitializeFromCommandLine(
+    const std::string& enable_features,
+    const std::string& disable_features) {
+  DCHECK(!initialized_);
+
+  // Process disabled features first, so that disabled ones take precedence over
+  // enabled ones (since RegisterOverride() uses insert()).
+  RegisterOverridesFromCommandLine(disable_features, OVERRIDE_DISABLE_FEATURE);
+  RegisterOverridesFromCommandLine(enable_features, OVERRIDE_ENABLE_FEATURE);
+
+  initialized_from_command_line_ = true;
+}
+
+void FeatureList::InitializeFromSharedMemory(
+    PersistentMemoryAllocator* allocator) {
+  DCHECK(!initialized_);
+
+  PersistentMemoryAllocator::Iterator iter(allocator);
+  const FeatureEntry* entry;
+  while ((entry = iter.GetNextOfObject<FeatureEntry>()) != nullptr) {
+    OverrideState override_state =
+        static_cast<OverrideState>(entry->override_state);
+
+    StringPiece feature_name;
+    StringPiece trial_name;
+    if (!entry->GetFeatureAndTrialName(&feature_name, &trial_name))
+      continue;
+
+    FieldTrial* trial = FieldTrialList::Find(trial_name.as_string());
+    RegisterOverride(feature_name, override_state, trial);
+  }
+}
+
+bool FeatureList::IsFeatureOverriddenFromCommandLine(
+    const std::string& feature_name,
+    OverrideState state) const {
+  auto it = overrides_.find(feature_name);
+  return it != overrides_.end() && it->second.overridden_state == state &&
+         !it->second.overridden_by_field_trial;
+}
+
+void FeatureList::AssociateReportingFieldTrial(
+    const std::string& feature_name,
+    OverrideState for_overridden_state,
+    FieldTrial* field_trial) {
+  DCHECK(
+      IsFeatureOverriddenFromCommandLine(feature_name, for_overridden_state));
+
+  // Only one associated field trial is supported per feature. This is generally
+  // enforced server-side.
+  OverrideEntry* entry = &overrides_.find(feature_name)->second;
+  if (entry->field_trial) {
+    NOTREACHED() << "Feature " << feature_name
+                 << " already has trial: " << entry->field_trial->trial_name()
+                 << ", associating trial: " << field_trial->trial_name();
+    return;
+  }
+
+  entry->field_trial = field_trial;
+}
+
+void FeatureList::RegisterFieldTrialOverride(const std::string& feature_name,
+                                             OverrideState override_state,
+                                             FieldTrial* field_trial) {
+  DCHECK(field_trial);
+  DCHECK(!ContainsKey(overrides_, feature_name) ||
+         !overrides_.find(feature_name)->second.field_trial)
+      << "Feature " << feature_name
+      << " has conflicting field trial overrides: "
+      << overrides_.find(feature_name)->second.field_trial->trial_name()
+      << " / " << field_trial->trial_name();
+
+  RegisterOverride(feature_name, override_state, field_trial);
+}
+
+void FeatureList::AddFeaturesToAllocator(PersistentMemoryAllocator* allocator) {
+  DCHECK(initialized_);
+
+  for (const auto& override : overrides_) {
+    Pickle pickle;
+    pickle.WriteString(override.first);
+    if (override.second.field_trial)
+      pickle.WriteString(override.second.field_trial->trial_name());
+
+    size_t total_size = sizeof(FeatureEntry) + pickle.size();
+    FeatureEntry* entry = allocator->New<FeatureEntry>(total_size);
+    if (!entry)
+      return;
+
+    entry->override_state = override.second.overridden_state;
+    entry->pickle_size = pickle.size();
+
+    char* dst = reinterpret_cast<char*>(entry) + sizeof(FeatureEntry);
+    memcpy(dst, pickle.data(), pickle.size());
+
+    allocator->MakeIterable(entry);
+  }
+}
+
+void FeatureList::GetFeatureOverrides(std::string* enable_overrides,
+                                      std::string* disable_overrides) {
+  GetFeatureOverridesImpl(enable_overrides, disable_overrides, false);
+}
+
+void FeatureList::GetCommandLineFeatureOverrides(
+    std::string* enable_overrides,
+    std::string* disable_overrides) {
+  GetFeatureOverridesImpl(enable_overrides, disable_overrides, true);
+}
+
+// static
+bool FeatureList::IsEnabled(const Feature& feature) {
+  if (!g_feature_list_instance) {
+    g_initialized_from_accessor = true;
+    return feature.default_state == FEATURE_ENABLED_BY_DEFAULT;
+  }
+  return g_feature_list_instance->IsFeatureEnabled(feature);
+}
+
+// static
+FieldTrial* FeatureList::GetFieldTrial(const Feature& feature) {
+  if (!g_feature_list_instance) {
+    g_initialized_from_accessor = true;
+    return nullptr;
+  }
+  return g_feature_list_instance->GetAssociatedFieldTrial(feature);
+}
+
+// static
+std::vector<base::StringPiece> FeatureList::SplitFeatureListString(
+    base::StringPiece input) {
+  return SplitStringPiece(input, ",", TRIM_WHITESPACE, SPLIT_WANT_NONEMPTY);
+}
+
+// static
+bool FeatureList::InitializeInstance(const std::string& enable_features,
+                                     const std::string& disable_features) {
+  // We want to initialize a new instance here to support command-line features
+  // in testing better. For example, we initialize a dummy instance in
+  // base/test/test_suite.cc, and override it in content/browser/
+  // browser_main_loop.cc.
+  // On the other hand, we want to avoid re-initialization from command line.
+  // For example, we initialize an instance in chrome/browser/
+  // chrome_browser_main.cc and do not override it in content/browser/
+  // browser_main_loop.cc.
+  // If the singleton was previously initialized from within an accessor, we
+  // want to prevent callers from reinitializing the singleton and masking the
+  // accessor call(s) which likely returned incorrect information.
+  CHECK(!g_initialized_from_accessor);
+  bool instance_existed_before = false;
+  if (g_feature_list_instance) {
+    if (g_feature_list_instance->initialized_from_command_line_)
+      return false;
+
+    delete g_feature_list_instance;
+    g_feature_list_instance = nullptr;
+    instance_existed_before = true;
+  }
+
+  std::unique_ptr<base::FeatureList> feature_list(new base::FeatureList);
+  feature_list->InitializeFromCommandLine(enable_features, disable_features);
+  base::FeatureList::SetInstance(std::move(feature_list));
+  return !instance_existed_before;
+}
+
+// static
+FeatureList* FeatureList::GetInstance() {
+  return g_feature_list_instance;
+}
+
+// static
+void FeatureList::SetInstance(std::unique_ptr<FeatureList> instance) {
+  DCHECK(!g_feature_list_instance);
+  instance->FinalizeInitialization();
+
+  // Note: Intentional leak of global singleton.
+  g_feature_list_instance = instance.release();
+
+#if DCHECK_IS_CONFIGURABLE
+  // Update the behaviour of LOG_DCHECK to match the Feature configuration.
+  // DCHECK is also forced to be FATAL if we are running a death-test.
+  // TODO(asvitkine): If we find other use-cases that need integrating here
+  // then define a proper API/hook for the purpose.
+  if (base::FeatureList::IsEnabled(kDCheckIsFatalFeature) ||
+      base::CommandLine::ForCurrentProcess()->HasSwitch(
+          "gtest_internal_run_death_test")) {
+    logging::LOG_DCHECK = logging::LOG_FATAL;
+  } else {
+    logging::LOG_DCHECK = logging::LOG_INFO;
+  }
+#endif  // DCHECK_IS_CONFIGURABLE
+}
+
+// static
+std::unique_ptr<FeatureList> FeatureList::ClearInstanceForTesting() {
+  FeatureList* old_instance = g_feature_list_instance;
+  g_feature_list_instance = nullptr;
+  g_initialized_from_accessor = false;
+  return base::WrapUnique(old_instance);
+}
+
+// static
+void FeatureList::RestoreInstanceForTesting(
+    std::unique_ptr<FeatureList> instance) {
+  DCHECK(!g_feature_list_instance);
+  // Note: Intentional leak of global singleton.
+  g_feature_list_instance = instance.release();
+}
+
+void FeatureList::FinalizeInitialization() {
+  DCHECK(!initialized_);
+  initialized_ = true;
+}
+
+bool FeatureList::IsFeatureEnabled(const Feature& feature) {
+  DCHECK(initialized_);
+  DCHECK(IsValidFeatureOrFieldTrialName(feature.name)) << feature.name;
+  DCHECK(CheckFeatureIdentity(feature)) << feature.name;
+
+  auto it = overrides_.find(feature.name);
+  if (it != overrides_.end()) {
+    const OverrideEntry& entry = it->second;
+
+    // Activate the corresponding field trial, if necessary.
+    if (entry.field_trial)
+      entry.field_trial->group();
+
+    // TODO(asvitkine) Expand this section as more support is added.
+
+    // If marked as OVERRIDE_USE_DEFAULT, simply return the default state below.
+    if (entry.overridden_state != OVERRIDE_USE_DEFAULT)
+      return entry.overridden_state == OVERRIDE_ENABLE_FEATURE;
+  }
+  // Otherwise, return the default state.
+  return feature.default_state == FEATURE_ENABLED_BY_DEFAULT;
+}
+
+FieldTrial* FeatureList::GetAssociatedFieldTrial(const Feature& feature) {
+  DCHECK(initialized_);
+  DCHECK(IsValidFeatureOrFieldTrialName(feature.name)) << feature.name;
+  DCHECK(CheckFeatureIdentity(feature)) << feature.name;
+
+  auto it = overrides_.find(feature.name);
+  if (it != overrides_.end()) {
+    const OverrideEntry& entry = it->second;
+    return entry.field_trial;
+  }
+
+  return nullptr;
+}
+
+void FeatureList::RegisterOverridesFromCommandLine(
+    const std::string& feature_list,
+    OverrideState overridden_state) {
+  for (const auto& value : SplitFeatureListString(feature_list)) {
+    StringPiece feature_name = value;
+    base::FieldTrial* trial = nullptr;
+
+    // The entry may be of the form FeatureName<FieldTrialName - in which case,
+    // this splits off the field trial name and associates it with the override.
+    std::string::size_type pos = feature_name.find('<');
+    if (pos != std::string::npos) {
+      feature_name.set(value.data(), pos);
+      trial = base::FieldTrialList::Find(value.substr(pos + 1).as_string());
+    }
+
+    RegisterOverride(feature_name, overridden_state, trial);
+  }
+}
+
+void FeatureList::RegisterOverride(StringPiece feature_name,
+                                   OverrideState overridden_state,
+                                   FieldTrial* field_trial) {
+  DCHECK(!initialized_);
+  if (field_trial) {
+    DCHECK(IsValidFeatureOrFieldTrialName(field_trial->trial_name()))
+        << field_trial->trial_name();
+  }
+  if (feature_name.starts_with("*")) {
+    feature_name = feature_name.substr(1);
+    overridden_state = OVERRIDE_USE_DEFAULT;
+  }
+
+  // Note: The semantics of insert() is that it does not overwrite the entry if
+  // one already exists for the key. Thus, only the first override for a given
+  // feature name takes effect.
+  overrides_.insert(std::make_pair(
+      feature_name.as_string(), OverrideEntry(overridden_state, field_trial)));
+}
+
+void FeatureList::GetFeatureOverridesImpl(std::string* enable_overrides,
+                                          std::string* disable_overrides,
+                                          bool command_line_only) {
+  DCHECK(initialized_);
+
+  enable_overrides->clear();
+  disable_overrides->clear();
+
+  // Note: Since |overrides_| is a std::map, iteration will be in alphabetical
+  // order. This is not guaranteed to users of this function, but is useful for
+  // tests to assume the order.
+  for (const auto& entry : overrides_) {
+    if (command_line_only &&
+        (entry.second.field_trial != nullptr ||
+         entry.second.overridden_state == OVERRIDE_USE_DEFAULT)) {
+      continue;
+    }
+
+    std::string* target_list = nullptr;
+    switch (entry.second.overridden_state) {
+      case OVERRIDE_USE_DEFAULT:
+      case OVERRIDE_ENABLE_FEATURE:
+        target_list = enable_overrides;
+        break;
+      case OVERRIDE_DISABLE_FEATURE:
+        target_list = disable_overrides;
+        break;
+    }
+
+    if (!target_list->empty())
+      target_list->push_back(',');
+    if (entry.second.overridden_state == OVERRIDE_USE_DEFAULT)
+      target_list->push_back('*');
+    target_list->append(entry.first);
+    if (entry.second.field_trial) {
+      target_list->push_back('<');
+      target_list->append(entry.second.field_trial->trial_name());
+    }
+  }
+}
+
+bool FeatureList::CheckFeatureIdentity(const Feature& feature) {
+  AutoLock auto_lock(feature_identity_tracker_lock_);
+
+  auto it = feature_identity_tracker_.find(feature.name);
+  if (it == feature_identity_tracker_.end()) {
+    // If it's not tracked yet, register it.
+    feature_identity_tracker_[feature.name] = &feature;
+    return true;
+  }
+  // Compare address of |feature| to the existing tracked entry.
+  return it->second == &feature;
+}
+
+FeatureList::OverrideEntry::OverrideEntry(OverrideState overridden_state,
+                                          FieldTrial* field_trial)
+    : overridden_state(overridden_state),
+      field_trial(field_trial),
+      overridden_by_field_trial(field_trial != nullptr) {}
+
+}  // namespace base
diff --git a/base/feature_list.h b/base/feature_list.h
new file mode 100644
index 0000000..2237507
--- /dev/null
+++ b/base/feature_list.h
@@ -0,0 +1,310 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_FEATURE_LIST_H_
+#define BASE_FEATURE_LIST_H_
+
+#include <map>
+#include <memory>
+#include <string>
+#include <vector>
+
+#include "base/base_export.h"
+#include "base/gtest_prod_util.h"
+#include "base/macros.h"
+#include "base/metrics/persistent_memory_allocator.h"
+#include "base/strings/string_piece.h"
+#include "base/synchronization/lock.h"
+
+namespace base {
+
+class FieldTrial;
+
+// Specifies whether a given feature is enabled or disabled by default.
+enum FeatureState {
+  FEATURE_DISABLED_BY_DEFAULT,
+  FEATURE_ENABLED_BY_DEFAULT,
+};
+
+// The Feature struct is used to define the default state for a feature. See
+// comment below for more details. There must only ever be one struct instance
+// for a given feature name - generally defined as a constant global variable or
+// file static. It should never be used as a constexpr as it breaks
+// pointer-based identity lookup.
+struct BASE_EXPORT Feature {
+  // The name of the feature. This should be unique to each feature and is used
+  // for enabling/disabling features via command line flags and experiments.
+  // It is strongly recommended to use CamelCase style for feature names, e.g.
+  // "MyGreatFeature".
+  const char* const name;
+
+  // The default state (i.e. enabled or disabled) for this feature.
+  const FeatureState default_state;
+};
+
+#if DCHECK_IS_CONFIGURABLE
+// DCHECKs have been built-in, and are configurable at run-time to be fatal, or
+// not, via a DcheckIsFatal feature. We define the Feature here since it is
+// checked in FeatureList::SetInstance(). See https://crbug.com/596231.
+extern BASE_EXPORT const Feature kDCheckIsFatalFeature;
+#endif  // DCHECK_IS_CONFIGURABLE
+
+// The FeatureList class is used to determine whether a given feature is on or
+// off. It provides an authoritative answer, taking into account command-line
+// overrides and experimental control.
+//
+// The basic use case is for any feature that can be toggled (e.g. through
+// command-line or an experiment) to have a defined Feature struct, e.g.:
+//
+//   const base::Feature kMyGreatFeature {
+//     "MyGreatFeature", base::FEATURE_ENABLED_BY_DEFAULT
+//   };
+//
+// Then, client code that wishes to query the state of the feature would check:
+//
+//   if (base::FeatureList::IsEnabled(kMyGreatFeature)) {
+//     // Feature code goes here.
+//   }
+//
+// Behind the scenes, the above call would take into account any command-line
+// flags to enable or disable the feature, any experiments that may control it
+// and finally its default state (in that order of priority), to determine
+// whether the feature is on.
+//
+// Features can be explicitly forced on or off by specifying a list of comma-
+// separated feature names via the following command-line flags:
+//
+//   --enable-features=Feature5,Feature7
+//   --disable-features=Feature1,Feature2,Feature3
+//
+// To enable/disable features in a test, do NOT append --enable-features or
+// --disable-features to the command-line directly. Instead, use
+// ScopedFeatureList. See base/test/scoped_feature_list.h for details.
+//
+// After initialization (which should be done single-threaded), the FeatureList
+// API is thread safe.
+//
+// Note: This class is a singleton, but does not use base/memory/singleton.h in
+// order to have control over its initialization sequence. Specifically, the
+// intended use is to create an instance of this class and fully initialize it,
+// before setting it as the singleton for a process, via SetInstance().
+class BASE_EXPORT FeatureList {
+ public:
+  FeatureList();
+  ~FeatureList();
+
+  // Initializes feature overrides via command-line flags |enable_features| and
+  // |disable_features|, each of which is a comma-separated list of features to
+  // enable or disable, respectively. If a feature appears on both lists, then
+  // it will be disabled. If a list entry has the format "FeatureName<TrialName"
+  // then this initialization will also associate the feature state override
+  // with the named field trial, if it exists. If a feature name is prefixed
+  // with the '*' character, it will be created with OVERRIDE_USE_DEFAULT -
+  // which is useful for associating with a trial while using the default state.
+  // Must only be invoked during the initialization phase (before
+  // FinalizeInitialization() has been called).
+  void InitializeFromCommandLine(const std::string& enable_features,
+                                 const std::string& disable_features);
+
+  // Initializes feature overrides through the field trial allocator, which
+  // we're using to store the feature names, their override state, and the name
+  // of the associated field trial.
+  void InitializeFromSharedMemory(PersistentMemoryAllocator* allocator);
+
+  // Specifies whether a feature override enables or disables the feature.
+  enum OverrideState {
+    OVERRIDE_USE_DEFAULT,
+    OVERRIDE_DISABLE_FEATURE,
+    OVERRIDE_ENABLE_FEATURE,
+  };
+
+  // Returns true if the state of |feature_name| has been overridden via
+  // |InitializeFromCommandLine()|.
+  bool IsFeatureOverriddenFromCommandLine(const std::string& feature_name,
+                                          OverrideState state) const;
+
+  // Associates a field trial for reporting purposes corresponding to the
+  // command-line setting the feature state to |for_overridden_state|. The trial
+  // will be activated when the state of the feature is first queried. This
+  // should be called during registration, after InitializeFromCommandLine() has
+  // been called but before the instance is registered via SetInstance().
+  void AssociateReportingFieldTrial(const std::string& feature_name,
+                                    OverrideState for_overridden_state,
+                                    FieldTrial* field_trial);
+
+  // Registers a field trial to override the enabled state of the specified
+  // feature to |override_state|. Command-line overrides still take precedence
+  // over field trials, so this will have no effect if the feature is being
+  // overridden from the command-line. The associated field trial will be
+  // activated when the feature state for this feature is queried. This should
+  // be called during registration, after InitializeFromCommandLine() has been
+  // called but before the instance is registered via SetInstance().
+  void RegisterFieldTrialOverride(const std::string& feature_name,
+                                  OverrideState override_state,
+                                  FieldTrial* field_trial);
+
+  // Loops through feature overrides and serializes them all into |allocator|.
+  void AddFeaturesToAllocator(PersistentMemoryAllocator* allocator);
+
+  // Returns comma-separated lists of feature names (in the same format that is
+  // accepted by InitializeFromCommandLine()) corresponding to features that
+  // have been overridden - either through command-line or via FieldTrials. For
+  // those features that have an associated FieldTrial, the output entry will be
+  // of the format "FeatureName<TrialName", where "TrialName" is the name of the
+  // FieldTrial. Features that have overrides with OVERRIDE_USE_DEFAULT will be
+  // added to |enable_overrides| with a '*' character prefix. Must be called
+  // only after the instance has been initialized and registered.
+  void GetFeatureOverrides(std::string* enable_overrides,
+                           std::string* disable_overrides);
+
+  // Like GetFeatureOverrides(), but only returns overrides that were specified
+  // explicitly on the command-line, omitting the ones from field trials.
+  void GetCommandLineFeatureOverrides(std::string* enable_overrides,
+                                      std::string* disable_overrides);
+
+  // Returns whether the given |feature| is enabled. Must only be called after
+  // the singleton instance has been registered via SetInstance(). Additionally,
+  // a feature with a given name must only have a single corresponding Feature
+  // struct, which is checked in builds with DCHECKs enabled.
+  static bool IsEnabled(const Feature& feature);
+
+  // Returns the field trial associated with the given |feature|. Must only be
+  // called after the singleton instance has been registered via SetInstance().
+  static FieldTrial* GetFieldTrial(const Feature& feature);
+
+  // Splits a comma-separated string containing feature names into a vector. The
+  // resulting pieces point to parts of |input|.
+  static std::vector<base::StringPiece> SplitFeatureListString(
+      base::StringPiece input);
+
+  // Initializes and sets an instance of FeatureList with feature overrides via
+  // command-line flags |enable_features| and |disable_features| if one has not
+  // already been set from command-line flags. Returns true if an instance did
+  // not previously exist. See InitializeFromCommandLine() for more details
+  // about |enable_features| and |disable_features| parameters.
+  static bool InitializeInstance(const std::string& enable_features,
+                                 const std::string& disable_features);
+
+  // Returns the singleton instance of FeatureList. Will return null until an
+  // instance is registered via SetInstance().
+  static FeatureList* GetInstance();
+
+  // Registers the given |instance| to be the singleton feature list for this
+  // process. This should only be called once and |instance| must not be null.
+  // Note: If you are considering using this for the purposes of testing, take
+  // a look at using base/test/scoped_feature_list.h instead.
+  static void SetInstance(std::unique_ptr<FeatureList> instance);
+
+  // Clears the previously-registered singleton instance for tests and returns
+  // the old instance.
+  // Note: Most tests should never call this directly. Instead consider using
+  // base::test::ScopedFeatureList.
+  static std::unique_ptr<FeatureList> ClearInstanceForTesting();
+
+  // Sets a given (initialized) |instance| to be the singleton feature list,
+  // for testing. Existing instance must be null. This is primarily intended
+  // to support base::test::ScopedFeatureList helper class.
+  static void RestoreInstanceForTesting(std::unique_ptr<FeatureList> instance);
+
+ private:
+  FRIEND_TEST_ALL_PREFIXES(FeatureListTest, CheckFeatureIdentity);
+  FRIEND_TEST_ALL_PREFIXES(FeatureListTest,
+                           StoreAndRetrieveFeaturesFromSharedMemory);
+  FRIEND_TEST_ALL_PREFIXES(FeatureListTest,
+                           StoreAndRetrieveAssociatedFeaturesFromSharedMemory);
+
+  struct OverrideEntry {
+    // The overridden enable (on/off) state of the feature.
+    const OverrideState overridden_state;
+
+    // An optional associated field trial, which will be activated when the
+    // state of the feature is queried for the first time. Weak pointer to the
+    // FieldTrial object that is owned by the FieldTrialList singleton.
+    base::FieldTrial* field_trial;
+
+    // Specifies whether the feature's state is overridden by |field_trial|.
+    // If it's not, and |field_trial| is not null, it means it is simply an
+    // associated field trial for reporting purposes (and |overridden_state|
+    // came from the command-line).
+    const bool overridden_by_field_trial;
+
+    // TODO(asvitkine): Expand this as more support is added.
+
+    // Constructs an OverrideEntry for the given |overridden_state|. If
+    // |field_trial| is not null, it implies that |overridden_state| comes from
+    // the trial, so |overridden_by_field_trial| will be set to true.
+    OverrideEntry(OverrideState overridden_state, FieldTrial* field_trial);
+  };
+
+  // Finalizes the initialization state of the FeatureList, so that no further
+  // overrides can be registered. This is called by SetInstance() on the
+  // singleton feature list that is being registered.
+  void FinalizeInitialization();
+
+  // Returns whether the given |feature| is enabled. This is invoked by the
+  // public FeatureList::IsEnabled() static function on the global singleton.
+  // Requires the FeatureList to have already been fully initialized.
+  bool IsFeatureEnabled(const Feature& feature);
+
+  // Returns the field trial associated with the given |feature|. This is
+  // invoked by the public FeatureList::GetFieldTrial() static function on the
+  // global singleton. Requires the FeatureList to have already been fully
+  // initialized.
+  base::FieldTrial* GetAssociatedFieldTrial(const Feature& feature);
+
+  // For each feature name in comma-separated list of strings |feature_list|,
+  // registers an override with the specified |overridden_state|. Also, will
+  // associate an optional named field trial if the entry is of the format
+  // "FeatureName<TrialName".
+  void RegisterOverridesFromCommandLine(const std::string& feature_list,
+                                        OverrideState overridden_state);
+
+  // Registers an override for feature |feature_name|. The override specifies
+  // whether the feature should be on or off (via |overridden_state|), which
+  // will take precedence over the feature's default state. If |field_trial| is
+  // not null, registers the specified field trial object to be associated with
+  // the feature, which will activate the field trial when the feature state is
+  // queried. If an override is already registered for the given feature, it
+  // will not be changed.
+  void RegisterOverride(StringPiece feature_name,
+                        OverrideState overridden_state,
+                        FieldTrial* field_trial);
+
+  // Implementation of GetFeatureOverrides() with a parameter that specifies
+  // whether only command-line enabled overrides should be emitted. See that
+  // function's comments for more details.
+  void GetFeatureOverridesImpl(std::string* enable_overrides,
+                               std::string* disable_overrides,
+                               bool command_line_only);
+
+  // Verifies that there's only a single definition of a Feature struct for a
+  // given feature name. Keeps track of the first seen Feature struct for each
+  // feature. Returns false when called on a Feature struct with a different
+  // address than the first one it saw for that feature name. Used only from
+  // DCHECKs and tests.
+  bool CheckFeatureIdentity(const Feature& feature);
+
+  // Map from feature name to an OverrideEntry struct for the feature, if it
+  // exists.
+  std::map<std::string, OverrideEntry> overrides_;
+
+  // Locked map that keeps track of seen features, to ensure a single feature is
+  // only defined once. This verification is only done in builds with DCHECKs
+  // enabled.
+  Lock feature_identity_tracker_lock_;
+  std::map<std::string, const Feature*> feature_identity_tracker_;
+
+  // Whether this object has been fully initialized. This gets set to true as a
+  // result of FinalizeInitialization().
+  bool initialized_ = false;
+
+  // Whether this object has been initialized from command line.
+  bool initialized_from_command_line_ = false;
+
+  DISALLOW_COPY_AND_ASSIGN(FeatureList);
+};
+
+}  // namespace base
+
+#endif  // BASE_FEATURE_LIST_H_
diff --git a/base/feature_list_unittest.cc b/base/feature_list_unittest.cc
new file mode 100644
index 0000000..164997a
--- /dev/null
+++ b/base/feature_list_unittest.cc
@@ -0,0 +1,542 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/feature_list.h"
+
+#include <stddef.h>
+
+#include <algorithm>
+#include <utility>
+
+#include "base/format_macros.h"
+#include "base/macros.h"
+#include "base/memory/ptr_util.h"
+#include "base/metrics/field_trial.h"
+#include "base/metrics/persistent_memory_allocator.h"
+#include "base/strings/string_piece.h"
+#include "base/strings/string_util.h"
+#include "base/strings/stringprintf.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+
+namespace {
+
+constexpr char kFeatureOnByDefaultName[] = "OnByDefault";
+struct Feature kFeatureOnByDefault {
+  kFeatureOnByDefaultName, FEATURE_ENABLED_BY_DEFAULT
+};
+
+constexpr char kFeatureOffByDefaultName[] = "OffByDefault";
+struct Feature kFeatureOffByDefault {
+  kFeatureOffByDefaultName, FEATURE_DISABLED_BY_DEFAULT
+};
+
+std::string SortFeatureListString(const std::string& feature_list) {
+  std::vector<base::StringPiece> features =
+      FeatureList::SplitFeatureListString(feature_list);
+  std::sort(features.begin(), features.end());
+  return JoinString(features, ",");
+}
+
+}  // namespace
+
+class FeatureListTest : public testing::Test {
+ public:
+  FeatureListTest() : feature_list_(nullptr) {
+    RegisterFeatureListInstance(WrapUnique(new FeatureList));
+  }
+  ~FeatureListTest() override { ClearFeatureListInstance(); }
+
+  void RegisterFeatureListInstance(std::unique_ptr<FeatureList> feature_list) {
+    FeatureList::ClearInstanceForTesting();
+    feature_list_ = feature_list.get();
+    FeatureList::SetInstance(std::move(feature_list));
+  }
+  void ClearFeatureListInstance() {
+    FeatureList::ClearInstanceForTesting();
+    feature_list_ = nullptr;
+  }
+
+  FeatureList* feature_list() { return feature_list_; }
+
+ private:
+  // Weak. Owned by the FeatureList::SetInstance().
+  FeatureList* feature_list_;
+
+  DISALLOW_COPY_AND_ASSIGN(FeatureListTest);
+};
+
+TEST_F(FeatureListTest, DefaultStates) {
+  EXPECT_TRUE(FeatureList::IsEnabled(kFeatureOnByDefault));
+  EXPECT_FALSE(FeatureList::IsEnabled(kFeatureOffByDefault));
+}
+
+TEST_F(FeatureListTest, InitializeFromCommandLine) {
+  struct {
+    const char* enable_features;
+    const char* disable_features;
+    bool expected_feature_on_state;
+    bool expected_feature_off_state;
+  } test_cases[] = {
+      {"", "", true, false},
+      {"OffByDefault", "", true, true},
+      {"OffByDefault", "OnByDefault", false, true},
+      {"OnByDefault,OffByDefault", "", true, true},
+      {"", "OnByDefault,OffByDefault", false, false},
+      // In the case an entry is both, disable takes precedence.
+      {"OnByDefault", "OnByDefault,OffByDefault", false, false},
+  };
+
+  for (size_t i = 0; i < arraysize(test_cases); ++i) {
+    const auto& test_case = test_cases[i];
+    SCOPED_TRACE(base::StringPrintf("Test[%" PRIuS "]: [%s] [%s]", i,
+                                    test_case.enable_features,
+                                    test_case.disable_features));
+
+    ClearFeatureListInstance();
+    std::unique_ptr<FeatureList> feature_list(new FeatureList);
+    feature_list->InitializeFromCommandLine(test_case.enable_features,
+                                            test_case.disable_features);
+    RegisterFeatureListInstance(std::move(feature_list));
+
+    EXPECT_EQ(test_case.expected_feature_on_state,
+              FeatureList::IsEnabled(kFeatureOnByDefault))
+        << i;
+    EXPECT_EQ(test_case.expected_feature_off_state,
+              FeatureList::IsEnabled(kFeatureOffByDefault))
+        << i;
+  }
+}
+
+TEST_F(FeatureListTest, CheckFeatureIdentity) {
+  // Tests that CheckFeatureIdentity() correctly detects when two different
+  // structs with the same feature name are passed to it.
+
+  // Call it twice for each feature at the top of the file, since the first call
+  // makes it remember the entry and the second call will verify it.
+  EXPECT_TRUE(feature_list()->CheckFeatureIdentity(kFeatureOnByDefault));
+  EXPECT_TRUE(feature_list()->CheckFeatureIdentity(kFeatureOnByDefault));
+  EXPECT_TRUE(feature_list()->CheckFeatureIdentity(kFeatureOffByDefault));
+  EXPECT_TRUE(feature_list()->CheckFeatureIdentity(kFeatureOffByDefault));
+
+  // Now, call it with a distinct struct for |kFeatureOnByDefaultName|, which
+  // should return false.
+  struct Feature kFeatureOnByDefault2 {
+    kFeatureOnByDefaultName, FEATURE_ENABLED_BY_DEFAULT
+  };
+  EXPECT_FALSE(feature_list()->CheckFeatureIdentity(kFeatureOnByDefault2));
+}
+
+TEST_F(FeatureListTest, FieldTrialOverrides) {
+  struct {
+    FeatureList::OverrideState trial1_state;
+    FeatureList::OverrideState trial2_state;
+  } test_cases[] = {
+      {FeatureList::OVERRIDE_DISABLE_FEATURE,
+       FeatureList::OVERRIDE_DISABLE_FEATURE},
+      {FeatureList::OVERRIDE_DISABLE_FEATURE,
+       FeatureList::OVERRIDE_ENABLE_FEATURE},
+      {FeatureList::OVERRIDE_ENABLE_FEATURE,
+       FeatureList::OVERRIDE_DISABLE_FEATURE},
+      {FeatureList::OVERRIDE_ENABLE_FEATURE,
+       FeatureList::OVERRIDE_ENABLE_FEATURE},
+  };
+
+  FieldTrial::ActiveGroup active_group;
+  for (size_t i = 0; i < arraysize(test_cases); ++i) {
+    const auto& test_case = test_cases[i];
+    SCOPED_TRACE(base::StringPrintf("Test[%" PRIuS "]", i));
+
+    ClearFeatureListInstance();
+
+    FieldTrialList field_trial_list(nullptr);
+    std::unique_ptr<FeatureList> feature_list(new FeatureList);
+
+    FieldTrial* trial1 = FieldTrialList::CreateFieldTrial("TrialExample1", "A");
+    FieldTrial* trial2 = FieldTrialList::CreateFieldTrial("TrialExample2", "B");
+    feature_list->RegisterFieldTrialOverride(kFeatureOnByDefaultName,
+                                             test_case.trial1_state, trial1);
+    feature_list->RegisterFieldTrialOverride(kFeatureOffByDefaultName,
+                                             test_case.trial2_state, trial2);
+    RegisterFeatureListInstance(std::move(feature_list));
+
+    // Initially, neither trial should be active.
+    EXPECT_FALSE(FieldTrialList::IsTrialActive(trial1->trial_name()));
+    EXPECT_FALSE(FieldTrialList::IsTrialActive(trial2->trial_name()));
+
+    const bool expected_enabled_1 =
+        (test_case.trial1_state == FeatureList::OVERRIDE_ENABLE_FEATURE);
+    EXPECT_EQ(expected_enabled_1, FeatureList::IsEnabled(kFeatureOnByDefault));
+    // The above should have activated |trial1|.
+    EXPECT_TRUE(FieldTrialList::IsTrialActive(trial1->trial_name()));
+    EXPECT_FALSE(FieldTrialList::IsTrialActive(trial2->trial_name()));
+
+    const bool expected_enabled_2 =
+        (test_case.trial2_state == FeatureList::OVERRIDE_ENABLE_FEATURE);
+    EXPECT_EQ(expected_enabled_2, FeatureList::IsEnabled(kFeatureOffByDefault));
+    // The above should have activated |trial2|.
+    EXPECT_TRUE(FieldTrialList::IsTrialActive(trial1->trial_name()));
+    EXPECT_TRUE(FieldTrialList::IsTrialActive(trial2->trial_name()));
+  }
+}
+
+TEST_F(FeatureListTest, FieldTrialAssociateUseDefault) {
+  FieldTrialList field_trial_list(nullptr);
+  std::unique_ptr<FeatureList> feature_list(new FeatureList);
+
+  FieldTrial* trial1 = FieldTrialList::CreateFieldTrial("TrialExample1", "A");
+  FieldTrial* trial2 = FieldTrialList::CreateFieldTrial("TrialExample2", "B");
+  feature_list->RegisterFieldTrialOverride(
+      kFeatureOnByDefaultName, FeatureList::OVERRIDE_USE_DEFAULT, trial1);
+  feature_list->RegisterFieldTrialOverride(
+      kFeatureOffByDefaultName, FeatureList::OVERRIDE_USE_DEFAULT, trial2);
+  RegisterFeatureListInstance(std::move(feature_list));
+
+  // Initially, neither trial should be active.
+  EXPECT_FALSE(FieldTrialList::IsTrialActive(trial1->trial_name()));
+  EXPECT_FALSE(FieldTrialList::IsTrialActive(trial2->trial_name()));
+
+  // Check the feature enabled state is its default.
+  EXPECT_TRUE(FeatureList::IsEnabled(kFeatureOnByDefault));
+  // The above should have activated |trial1|.
+  EXPECT_TRUE(FieldTrialList::IsTrialActive(trial1->trial_name()));
+  EXPECT_FALSE(FieldTrialList::IsTrialActive(trial2->trial_name()));
+
+  // Check the feature enabled state is its default.
+  EXPECT_FALSE(FeatureList::IsEnabled(kFeatureOffByDefault));
+  // The above should have activated |trial2|.
+  EXPECT_TRUE(FieldTrialList::IsTrialActive(trial1->trial_name()));
+  EXPECT_TRUE(FieldTrialList::IsTrialActive(trial2->trial_name()));
+}
+
+TEST_F(FeatureListTest, CommandLineTakesPrecedenceOverFieldTrial) {
+  ClearFeatureListInstance();
+
+  FieldTrialList field_trial_list(nullptr);
+  std::unique_ptr<FeatureList> feature_list(new FeatureList);
+
+  // The feature is explicitly enabled on the command-line.
+  feature_list->InitializeFromCommandLine(kFeatureOffByDefaultName, "");
+
+  // But the FieldTrial would set the feature to disabled.
+  FieldTrial* trial = FieldTrialList::CreateFieldTrial("TrialExample2", "A");
+  feature_list->RegisterFieldTrialOverride(
+      kFeatureOffByDefaultName, FeatureList::OVERRIDE_DISABLE_FEATURE, trial);
+  RegisterFeatureListInstance(std::move(feature_list));
+
+  EXPECT_FALSE(FieldTrialList::IsTrialActive(trial->trial_name()));
+  // Command-line should take precedence.
+  EXPECT_TRUE(FeatureList::IsEnabled(kFeatureOffByDefault));
+  // Since the feature is on due to the command-line, and not as a result of the
+  // field trial, the field trial should not be activated (since the Associate*
+  // API wasn't used.)
+  EXPECT_FALSE(FieldTrialList::IsTrialActive(trial->trial_name()));
+}
+
+TEST_F(FeatureListTest, IsFeatureOverriddenFromCommandLine) {
+  ClearFeatureListInstance();
+
+  FieldTrialList field_trial_list(nullptr);
+  std::unique_ptr<FeatureList> feature_list(new FeatureList);
+
+  // No features are overridden from the command line yet
+  EXPECT_FALSE(feature_list->IsFeatureOverriddenFromCommandLine(
+      kFeatureOnByDefaultName, FeatureList::OVERRIDE_DISABLE_FEATURE));
+  EXPECT_FALSE(feature_list->IsFeatureOverriddenFromCommandLine(
+      kFeatureOnByDefaultName, FeatureList::OVERRIDE_ENABLE_FEATURE));
+  EXPECT_FALSE(feature_list->IsFeatureOverriddenFromCommandLine(
+      kFeatureOffByDefaultName, FeatureList::OVERRIDE_DISABLE_FEATURE));
+  EXPECT_FALSE(feature_list->IsFeatureOverriddenFromCommandLine(
+      kFeatureOffByDefaultName, FeatureList::OVERRIDE_ENABLE_FEATURE));
+
+  // Now, enable |kFeatureOffByDefaultName| via the command-line.
+  feature_list->InitializeFromCommandLine(kFeatureOffByDefaultName, "");
+
+  // It should now be overridden for the enabled group.
+  EXPECT_FALSE(feature_list->IsFeatureOverriddenFromCommandLine(
+      kFeatureOffByDefaultName, FeatureList::OVERRIDE_DISABLE_FEATURE));
+  EXPECT_TRUE(feature_list->IsFeatureOverriddenFromCommandLine(
+      kFeatureOffByDefaultName, FeatureList::OVERRIDE_ENABLE_FEATURE));
+
+  // Register a field trial to associate with the feature and ensure that the
+  // results are still the same.
+  feature_list->AssociateReportingFieldTrial(
+      kFeatureOffByDefaultName, FeatureList::OVERRIDE_ENABLE_FEATURE,
+      FieldTrialList::CreateFieldTrial("Trial1", "A"));
+  EXPECT_FALSE(feature_list->IsFeatureOverriddenFromCommandLine(
+      kFeatureOffByDefaultName, FeatureList::OVERRIDE_DISABLE_FEATURE));
+  EXPECT_TRUE(feature_list->IsFeatureOverriddenFromCommandLine(
+      kFeatureOffByDefaultName, FeatureList::OVERRIDE_ENABLE_FEATURE));
+
+  // Now, register a field trial to override |kFeatureOnByDefaultName| state
+  // and check that the function still returns false for that feature.
+  feature_list->RegisterFieldTrialOverride(
+      kFeatureOnByDefaultName, FeatureList::OVERRIDE_DISABLE_FEATURE,
+      FieldTrialList::CreateFieldTrial("Trial2", "A"));
+  EXPECT_FALSE(feature_list->IsFeatureOverriddenFromCommandLine(
+      kFeatureOnByDefaultName, FeatureList::OVERRIDE_DISABLE_FEATURE));
+  EXPECT_FALSE(feature_list->IsFeatureOverriddenFromCommandLine(
+      kFeatureOnByDefaultName, FeatureList::OVERRIDE_ENABLE_FEATURE));
+  RegisterFeatureListInstance(std::move(feature_list));
+
+  // Check the expected feature states for good measure.
+  EXPECT_TRUE(FeatureList::IsEnabled(kFeatureOffByDefault));
+  EXPECT_FALSE(FeatureList::IsEnabled(kFeatureOnByDefault));
+}
+
+TEST_F(FeatureListTest, AssociateReportingFieldTrial) {
+  struct {
+    const char* enable_features;
+    const char* disable_features;
+    bool expected_enable_trial_created;
+    bool expected_disable_trial_created;
+  } test_cases[] = {
+      // If no enable/disable flags are specified, no trials should be created.
+      {"", "", false, false},
+      // Enabling the feature should result in the enable trial created.
+      {kFeatureOffByDefaultName, "", true, false},
+      // Disabling the feature should result in the disable trial created.
+      {"", kFeatureOffByDefaultName, false, true},
+  };
+
+  const char kTrialName[] = "ForcingTrial";
+  const char kForcedOnGroupName[] = "ForcedOn";
+  const char kForcedOffGroupName[] = "ForcedOff";
+
+  for (size_t i = 0; i < arraysize(test_cases); ++i) {
+    const auto& test_case = test_cases[i];
+    SCOPED_TRACE(base::StringPrintf("Test[%" PRIuS "]: [%s] [%s]", i,
+                                    test_case.enable_features,
+                                    test_case.disable_features));
+
+    ClearFeatureListInstance();
+
+    FieldTrialList field_trial_list(nullptr);
+    std::unique_ptr<FeatureList> feature_list(new FeatureList);
+    feature_list->InitializeFromCommandLine(test_case.enable_features,
+                                            test_case.disable_features);
+
+    FieldTrial* enable_trial = nullptr;
+    if (feature_list->IsFeatureOverriddenFromCommandLine(
+            kFeatureOffByDefaultName, FeatureList::OVERRIDE_ENABLE_FEATURE)) {
+      enable_trial = base::FieldTrialList::CreateFieldTrial(kTrialName,
+                                                            kForcedOnGroupName);
+      feature_list->AssociateReportingFieldTrial(
+          kFeatureOffByDefaultName, FeatureList::OVERRIDE_ENABLE_FEATURE,
+          enable_trial);
+    }
+    FieldTrial* disable_trial = nullptr;
+    if (feature_list->IsFeatureOverriddenFromCommandLine(
+            kFeatureOffByDefaultName, FeatureList::OVERRIDE_DISABLE_FEATURE)) {
+      disable_trial = base::FieldTrialList::CreateFieldTrial(
+          kTrialName, kForcedOffGroupName);
+      feature_list->AssociateReportingFieldTrial(
+          kFeatureOffByDefaultName, FeatureList::OVERRIDE_DISABLE_FEATURE,
+          disable_trial);
+    }
+    EXPECT_EQ(test_case.expected_enable_trial_created, enable_trial != nullptr);
+    EXPECT_EQ(test_case.expected_disable_trial_created,
+              disable_trial != nullptr);
+    RegisterFeatureListInstance(std::move(feature_list));
+
+    EXPECT_FALSE(FieldTrialList::IsTrialActive(kTrialName));
+    if (disable_trial) {
+      EXPECT_FALSE(FeatureList::IsEnabled(kFeatureOffByDefault));
+      EXPECT_TRUE(FieldTrialList::IsTrialActive(kTrialName));
+      EXPECT_EQ(kForcedOffGroupName, disable_trial->group_name());
+    } else if (enable_trial) {
+      EXPECT_TRUE(FeatureList::IsEnabled(kFeatureOffByDefault));
+      EXPECT_TRUE(FieldTrialList::IsTrialActive(kTrialName));
+      EXPECT_EQ(kForcedOnGroupName, enable_trial->group_name());
+    }
+  }
+}
+
+TEST_F(FeatureListTest, GetFeatureOverrides) {
+  ClearFeatureListInstance();
+  FieldTrialList field_trial_list(nullptr);
+  std::unique_ptr<FeatureList> feature_list(new FeatureList);
+  feature_list->InitializeFromCommandLine("A,X", "D");
+
+  FieldTrial* trial = FieldTrialList::CreateFieldTrial("Trial", "Group");
+  feature_list->RegisterFieldTrialOverride(kFeatureOffByDefaultName,
+                                           FeatureList::OVERRIDE_ENABLE_FEATURE,
+                                           trial);
+
+  RegisterFeatureListInstance(std::move(feature_list));
+
+  std::string enable_features;
+  std::string disable_features;
+  FeatureList::GetInstance()->GetFeatureOverrides(&enable_features,
+                                                  &disable_features);
+  EXPECT_EQ("A,OffByDefault<Trial,X", SortFeatureListString(enable_features));
+  EXPECT_EQ("D", SortFeatureListString(disable_features));
+
+  FeatureList::GetInstance()->GetCommandLineFeatureOverrides(&enable_features,
+                                                             &disable_features);
+  EXPECT_EQ("A,X", SortFeatureListString(enable_features));
+  EXPECT_EQ("D", SortFeatureListString(disable_features));
+}
+
+TEST_F(FeatureListTest, GetFeatureOverrides_UseDefault) {
+  ClearFeatureListInstance();
+  FieldTrialList field_trial_list(nullptr);
+  std::unique_ptr<FeatureList> feature_list(new FeatureList);
+  feature_list->InitializeFromCommandLine("A,X", "D");
+
+  FieldTrial* trial = FieldTrialList::CreateFieldTrial("Trial", "Group");
+  feature_list->RegisterFieldTrialOverride(
+      kFeatureOffByDefaultName, FeatureList::OVERRIDE_USE_DEFAULT, trial);
+
+  RegisterFeatureListInstance(std::move(feature_list));
+
+  std::string enable_features;
+  std::string disable_features;
+  FeatureList::GetInstance()->GetFeatureOverrides(&enable_features,
+                                                  &disable_features);
+  EXPECT_EQ("*OffByDefault<Trial,A,X", SortFeatureListString(enable_features));
+  EXPECT_EQ("D", SortFeatureListString(disable_features));
+}
+
+TEST_F(FeatureListTest, GetFieldTrial) {
+  ClearFeatureListInstance();
+  FieldTrialList field_trial_list(nullptr);
+  FieldTrial* trial = FieldTrialList::CreateFieldTrial("Trial", "Group");
+  std::unique_ptr<FeatureList> feature_list(new FeatureList);
+  feature_list->RegisterFieldTrialOverride(
+      kFeatureOnByDefaultName, FeatureList::OVERRIDE_USE_DEFAULT, trial);
+  RegisterFeatureListInstance(std::move(feature_list));
+
+  EXPECT_EQ(trial, FeatureList::GetFieldTrial(kFeatureOnByDefault));
+  EXPECT_EQ(nullptr, FeatureList::GetFieldTrial(kFeatureOffByDefault));
+}
+
+TEST_F(FeatureListTest, InitializeFromCommandLine_WithFieldTrials) {
+  ClearFeatureListInstance();
+  FieldTrialList field_trial_list(nullptr);
+  FieldTrialList::CreateFieldTrial("Trial", "Group");
+  std::unique_ptr<FeatureList> feature_list(new FeatureList);
+  feature_list->InitializeFromCommandLine("A,OffByDefault<Trial,X", "D");
+  RegisterFeatureListInstance(std::move(feature_list));
+
+  EXPECT_FALSE(FieldTrialList::IsTrialActive("Trial"));
+  EXPECT_TRUE(FeatureList::IsEnabled(kFeatureOffByDefault));
+  EXPECT_TRUE(FieldTrialList::IsTrialActive("Trial"));
+}
+
+TEST_F(FeatureListTest, InitializeFromCommandLine_UseDefault) {
+  ClearFeatureListInstance();
+  FieldTrialList field_trial_list(nullptr);
+  FieldTrialList::CreateFieldTrial("T1", "Group");
+  FieldTrialList::CreateFieldTrial("T2", "Group");
+  std::unique_ptr<FeatureList> feature_list(new FeatureList);
+  feature_list->InitializeFromCommandLine(
+      "A,*OffByDefault<T1,*OnByDefault<T2,X", "D");
+  RegisterFeatureListInstance(std::move(feature_list));
+
+  EXPECT_FALSE(FieldTrialList::IsTrialActive("T1"));
+  EXPECT_FALSE(FeatureList::IsEnabled(kFeatureOffByDefault));
+  EXPECT_TRUE(FieldTrialList::IsTrialActive("T1"));
+
+  EXPECT_FALSE(FieldTrialList::IsTrialActive("T2"));
+  EXPECT_TRUE(FeatureList::IsEnabled(kFeatureOnByDefault));
+  EXPECT_TRUE(FieldTrialList::IsTrialActive("T2"));
+}
+
+TEST_F(FeatureListTest, InitializeInstance) {
+  ClearFeatureListInstance();
+
+  std::unique_ptr<base::FeatureList> feature_list(new base::FeatureList);
+  FeatureList::SetInstance(std::move(feature_list));
+  EXPECT_TRUE(FeatureList::IsEnabled(kFeatureOnByDefault));
+  EXPECT_FALSE(FeatureList::IsEnabled(kFeatureOffByDefault));
+
+  // Initialize from command line if we haven't yet.
+  FeatureList::InitializeInstance("", kFeatureOnByDefaultName);
+  EXPECT_FALSE(FeatureList::IsEnabled(kFeatureOnByDefault));
+  EXPECT_FALSE(FeatureList::IsEnabled(kFeatureOffByDefault));
+
+  // Do not initialize from commandline if we have already.
+  FeatureList::InitializeInstance(kFeatureOffByDefaultName, "");
+  EXPECT_FALSE(FeatureList::IsEnabled(kFeatureOnByDefault));
+  EXPECT_FALSE(FeatureList::IsEnabled(kFeatureOffByDefault));
+}
+
+TEST_F(FeatureListTest, UninitializedInstance_IsEnabledReturnsFalse) {
+  ClearFeatureListInstance();
+  // This test case simulates the calling pattern found in code which does not
+  // explicitly initialize the features list.
+  // All IsEnabled() calls should return the default value in this scenario.
+  EXPECT_EQ(nullptr, FeatureList::GetInstance());
+  EXPECT_TRUE(FeatureList::IsEnabled(kFeatureOnByDefault));
+  EXPECT_EQ(nullptr, FeatureList::GetInstance());
+  EXPECT_FALSE(FeatureList::IsEnabled(kFeatureOffByDefault));
+}
+
+TEST_F(FeatureListTest, StoreAndRetrieveFeaturesFromSharedMemory) {
+  std::unique_ptr<base::FeatureList> feature_list(new base::FeatureList);
+
+  // Create some overrides.
+  feature_list->RegisterOverride(kFeatureOffByDefaultName,
+                                 FeatureList::OVERRIDE_ENABLE_FEATURE, nullptr);
+  feature_list->RegisterOverride(
+      kFeatureOnByDefaultName, FeatureList::OVERRIDE_DISABLE_FEATURE, nullptr);
+  feature_list->FinalizeInitialization();
+
+  // Create an allocator and store the overrides.
+  std::unique_ptr<SharedMemory> shm(new SharedMemory());
+  shm->CreateAndMapAnonymous(4 << 10);
+  SharedPersistentMemoryAllocator allocator(std::move(shm), 1, "", false);
+  feature_list->AddFeaturesToAllocator(&allocator);
+
+  std::unique_ptr<base::FeatureList> feature_list2(new base::FeatureList);
+
+  // Check that the new feature list is empty.
+  EXPECT_FALSE(feature_list2->IsFeatureOverriddenFromCommandLine(
+      kFeatureOffByDefaultName, FeatureList::OVERRIDE_ENABLE_FEATURE));
+  EXPECT_FALSE(feature_list2->IsFeatureOverriddenFromCommandLine(
+      kFeatureOnByDefaultName, FeatureList::OVERRIDE_DISABLE_FEATURE));
+
+  feature_list2->InitializeFromSharedMemory(&allocator);
+  // Check that the new feature list now has 2 overrides.
+  EXPECT_TRUE(feature_list2->IsFeatureOverriddenFromCommandLine(
+      kFeatureOffByDefaultName, FeatureList::OVERRIDE_ENABLE_FEATURE));
+  EXPECT_TRUE(feature_list2->IsFeatureOverriddenFromCommandLine(
+      kFeatureOnByDefaultName, FeatureList::OVERRIDE_DISABLE_FEATURE));
+}
+
+TEST_F(FeatureListTest, StoreAndRetrieveAssociatedFeaturesFromSharedMemory) {
+  FieldTrialList field_trial_list(nullptr);
+  std::unique_ptr<base::FeatureList> feature_list(new base::FeatureList);
+
+  // Create some overrides.
+  FieldTrial* trial1 = FieldTrialList::CreateFieldTrial("TrialExample1", "A");
+  FieldTrial* trial2 = FieldTrialList::CreateFieldTrial("TrialExample2", "B");
+  feature_list->RegisterFieldTrialOverride(
+      kFeatureOnByDefaultName, FeatureList::OVERRIDE_USE_DEFAULT, trial1);
+  feature_list->RegisterFieldTrialOverride(
+      kFeatureOffByDefaultName, FeatureList::OVERRIDE_USE_DEFAULT, trial2);
+  feature_list->FinalizeInitialization();
+
+  // Create an allocator and store the overrides.
+  std::unique_ptr<SharedMemory> shm(new SharedMemory());
+  shm->CreateAndMapAnonymous(4 << 10);
+  SharedPersistentMemoryAllocator allocator(std::move(shm), 1, "", false);
+  feature_list->AddFeaturesToAllocator(&allocator);
+
+  std::unique_ptr<base::FeatureList> feature_list2(new base::FeatureList);
+  feature_list2->InitializeFromSharedMemory(&allocator);
+  feature_list2->FinalizeInitialization();
+
+  // Check that the field trials are still associated.
+  FieldTrial* associated_trial1 =
+      feature_list2->GetAssociatedFieldTrial(kFeatureOnByDefault);
+  FieldTrial* associated_trial2 =
+      feature_list2->GetAssociatedFieldTrial(kFeatureOffByDefault);
+  EXPECT_EQ(associated_trial1, trial1);
+  EXPECT_EQ(associated_trial2, trial2);
+}
+
+}  // namespace base
diff --git a/base/file_descriptor_posix.h b/base/file_descriptor_posix.h
new file mode 100644
index 0000000..2a36611
--- /dev/null
+++ b/base/file_descriptor_posix.h
@@ -0,0 +1,59 @@
+// Copyright (c) 2006-2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_FILE_DESCRIPTOR_POSIX_H_
+#define BASE_FILE_DESCRIPTOR_POSIX_H_
+
+#include "base/files/file.h"
+#include "base/files/scoped_file.h"
+
+namespace base {
+
+// -----------------------------------------------------------------------------
+// We introduct a special structure for file descriptors in order that we are
+// able to use template specialisation to special-case their handling.
+//
+// IMPORTANT: This is primarily intended for use when sending file descriptors
+// over IPC. Even if |auto_close| is true, base::FileDescriptor does NOT close()
+// |fd| when going out of scope. Instead, a consumer of a base::FileDescriptor
+// must invoke close() on |fd| if |auto_close| is true.
+//
+// In the case of IPC, the the IPC subsystem knows to close() |fd| after sending
+// a message that contains a base::FileDescriptor if auto_close == true. On the
+// other end, the receiver must make sure to close() |fd| after it has finished
+// processing the IPC message. See the IPC::ParamTraits<> specialization in
+// ipc/ipc_message_utils.h for all the details.
+// -----------------------------------------------------------------------------
+struct FileDescriptor {
+  FileDescriptor() : fd(-1), auto_close(false) {}
+
+  FileDescriptor(int ifd, bool iauto_close) : fd(ifd), auto_close(iauto_close) {
+  }
+
+  FileDescriptor(File file) : fd(file.TakePlatformFile()), auto_close(true) {}
+  explicit FileDescriptor(ScopedFD fd) : fd(fd.release()), auto_close(true) {}
+
+  bool operator==(const FileDescriptor& other) const {
+    return (fd == other.fd && auto_close == other.auto_close);
+  }
+
+  bool operator!=(const FileDescriptor& other) const {
+    return !operator==(other);
+  }
+
+  // A comparison operator so that we can use these as keys in a std::map.
+  bool operator<(const FileDescriptor& other) const {
+    return other.fd < fd;
+  }
+
+  int fd;
+  // If true, this file descriptor should be closed after it has been used. For
+  // example an IPC system might interpret this flag as indicating that the
+  // file descriptor it has been given should be closed after use.
+  bool auto_close;
+};
+
+}  // namespace base
+
+#endif  // BASE_FILE_DESCRIPTOR_POSIX_H_
diff --git a/base/file_descriptor_store.cc b/base/file_descriptor_store.cc
new file mode 100644
index 0000000..71cf2b3
--- /dev/null
+++ b/base/file_descriptor_store.cc
@@ -0,0 +1,73 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/file_descriptor_store.h"
+
+#include <utility>
+
+#include "base/logging.h"
+
+namespace base {
+
+FileDescriptorStore::Descriptor::Descriptor(const std::string& key,
+                                            base::ScopedFD fd)
+    : key(key),
+      fd(std::move(fd)),
+      region(base::MemoryMappedFile::Region::kWholeFile) {}
+
+FileDescriptorStore::Descriptor::Descriptor(
+    const std::string& key,
+    base::ScopedFD fd,
+    base::MemoryMappedFile::Region region)
+    : key(key), fd(std::move(fd)), region(region) {}
+
+FileDescriptorStore::Descriptor::Descriptor(
+    FileDescriptorStore::Descriptor&& other)
+    : key(other.key), fd(std::move(other.fd)), region(other.region) {}
+
+FileDescriptorStore::Descriptor::~Descriptor() = default;
+
+// static
+FileDescriptorStore& FileDescriptorStore::GetInstance() {
+  static FileDescriptorStore* store = new FileDescriptorStore;
+  return *store;
+}
+
+base::ScopedFD FileDescriptorStore::TakeFD(
+    const std::string& key,
+    base::MemoryMappedFile::Region* region) {
+  base::ScopedFD fd = MaybeTakeFD(key, region);
+  if (!fd.is_valid())
+    DLOG(DCHECK) << "Unknown global descriptor: " << key;
+  return fd;
+}
+
+base::ScopedFD FileDescriptorStore::MaybeTakeFD(
+    const std::string& key,
+    base::MemoryMappedFile::Region* region) {
+  auto iter = descriptors_.find(key);
+  if (iter == descriptors_.end())
+    return base::ScopedFD();
+  *region = iter->second.region;
+  base::ScopedFD result = std::move(iter->second.fd);
+  descriptors_.erase(iter);
+  return result;
+}
+
+void FileDescriptorStore::Set(const std::string& key, base::ScopedFD fd) {
+  Set(key, std::move(fd), base::MemoryMappedFile::Region::kWholeFile);
+}
+
+void FileDescriptorStore::Set(const std::string& key,
+                              base::ScopedFD fd,
+                              base::MemoryMappedFile::Region region) {
+  Descriptor descriptor(key, std::move(fd), region);
+  descriptors_.insert(std::make_pair(key, std::move(descriptor)));
+}
+
+FileDescriptorStore::FileDescriptorStore() = default;
+
+FileDescriptorStore::~FileDescriptorStore() = default;
+
+}  // namespace base
diff --git a/base/file_descriptor_store.h b/base/file_descriptor_store.h
new file mode 100644
index 0000000..b6bd079
--- /dev/null
+++ b/base/file_descriptor_store.h
@@ -0,0 +1,73 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_FILE_DESCRIPTOR_STORE_H_
+#define BASE_FILE_DESCRIPTOR_STORE_H_
+
+#include <map>
+#include <string>
+
+#include "base/files/memory_mapped_file.h"
+#include "base/files/scoped_file.h"
+#include "base/macros.h"
+
+namespace base {
+
+// The file descriptor store is used to associate file descriptors with keys
+// that must be unique.
+// It is used to share file descriptors from a process to its child.
+class BASE_EXPORT FileDescriptorStore {
+ public:
+  struct Descriptor {
+    Descriptor(const std::string& key, base::ScopedFD fd);
+    Descriptor(const std::string& key,
+               base::ScopedFD fd,
+               base::MemoryMappedFile::Region region);
+    Descriptor(Descriptor&& other);
+    ~Descriptor();
+
+    Descriptor& operator=(Descriptor&& other) = default;
+
+    // Globally unique key.
+    std::string key;
+    // Actual FD.
+    base::ScopedFD fd;
+    // Optional region, defaults to kWholeFile.
+    base::MemoryMappedFile::Region region;
+  };
+  using Mapping = std::map<std::string, Descriptor>;
+
+  // Returns the singleton instance of FileDescriptorStore.
+  static FileDescriptorStore& GetInstance();
+
+  // Gets a descriptor given a key and also populates |region|.
+  // It is a fatal error if the key is not known.
+  base::ScopedFD TakeFD(const std::string& key,
+                        base::MemoryMappedFile::Region* region);
+
+  // Gets a descriptor given a key. Returns an empty ScopedFD on error.
+  base::ScopedFD MaybeTakeFD(const std::string& key,
+                             base::MemoryMappedFile::Region* region);
+
+  // Sets the descriptor for the given |key|. This sets the region associated
+  // with |key| to kWholeFile.
+  void Set(const std::string& key, base::ScopedFD fd);
+
+  // Sets the descriptor and |region| for the given |key|.
+  void Set(const std::string& key,
+           base::ScopedFD fd,
+           base::MemoryMappedFile::Region region);
+
+ private:
+  FileDescriptorStore();
+  ~FileDescriptorStore();
+
+  Mapping descriptors_;
+
+  DISALLOW_COPY_AND_ASSIGN(FileDescriptorStore);
+};
+
+}  // namespace base
+
+#endif  // BASE_FILE_DESCRIPTOR_STORE_H_
diff --git a/base/file_version_info.h b/base/file_version_info.h
new file mode 100644
index 0000000..3b9457c
--- /dev/null
+++ b/base/file_version_info.h
@@ -0,0 +1,73 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_FILE_VERSION_INFO_H_
+#define BASE_FILE_VERSION_INFO_H_
+
+#include <string>
+
+#include "build/build_config.h"
+#include "base/base_export.h"
+#include "base/strings/string16.h"
+
+#if defined(OS_WIN)
+#include <windows.h>
+#endif
+
+namespace base {
+class FilePath;
+}
+
+// Provides an interface for accessing the version information for a file. This
+// is the information you access when you select a file in the Windows Explorer,
+// right-click select Properties, then click the Version tab, and on the Mac
+// when you select a file in the Finder and do a Get Info.
+//
+// This list of properties is straight out of Win32's VerQueryValue
+// <http://msdn.microsoft.com/en-us/library/ms647464.aspx> and the Mac
+// version returns values from the Info.plist as appropriate. TODO(avi): make
+// this a less-obvious Windows-ism.
+
+class BASE_EXPORT FileVersionInfo {
+ public:
+  virtual ~FileVersionInfo() {}
+#if defined(OS_WIN) || defined(OS_MACOSX)
+  // Creates a FileVersionInfo for the specified path. Returns NULL if something
+  // goes wrong (typically the file does not exit or cannot be opened). The
+  // returned object should be deleted when you are done with it.
+  static FileVersionInfo* CreateFileVersionInfo(
+      const base::FilePath& file_path);
+#endif  // OS_WIN || OS_MACOSX
+
+#if defined(OS_WIN)
+  // Creates a FileVersionInfo for the specified module. Returns NULL in case
+  // of error. The returned object should be deleted when you are done with it.
+  static FileVersionInfo* CreateFileVersionInfoForModule(HMODULE module);
+#else
+  // Creates a FileVersionInfo for the current module. Returns NULL in case
+  // of error. The returned object should be deleted when you are done with it.
+  static FileVersionInfo* CreateFileVersionInfoForCurrentModule();
+#endif  // OS_WIN
+
+  // Accessors to the different version properties.
+  // Returns an empty string if the property is not found.
+  virtual base::string16 company_name() = 0;
+  virtual base::string16 company_short_name() = 0;
+  virtual base::string16 product_name() = 0;
+  virtual base::string16 product_short_name() = 0;
+  virtual base::string16 internal_name() = 0;
+  virtual base::string16 product_version() = 0;
+  virtual base::string16 private_build() = 0;
+  virtual base::string16 special_build() = 0;
+  virtual base::string16 comments() = 0;
+  virtual base::string16 original_filename() = 0;
+  virtual base::string16 file_description() = 0;
+  virtual base::string16 file_version() = 0;
+  virtual base::string16 legal_copyright() = 0;
+  virtual base::string16 legal_trademarks() = 0;
+  virtual base::string16 last_change() = 0;
+  virtual bool is_official_build() = 0;
+};
+
+#endif  // BASE_FILE_VERSION_INFO_H_
diff --git a/base/file_version_info_mac.h b/base/file_version_info_mac.h
new file mode 100644
index 0000000..9cc4b10
--- /dev/null
+++ b/base/file_version_info_mac.h
@@ -0,0 +1,51 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_FILE_VERSION_INFO_MAC_H_
+#define BASE_FILE_VERSION_INFO_MAC_H_
+
+#include <CoreFoundation/CoreFoundation.h>
+#include <string>
+
+#include "base/file_version_info.h"
+#include "base/mac/scoped_nsobject.h"
+#include "base/macros.h"
+
+@class NSBundle;
+
+class FileVersionInfoMac : public FileVersionInfo {
+ public:
+  explicit FileVersionInfoMac(NSBundle *bundle);
+  ~FileVersionInfoMac() override;
+
+  // Accessors to the different version properties.
+  // Returns an empty string if the property is not found.
+  base::string16 company_name() override;
+  base::string16 company_short_name() override;
+  base::string16 product_name() override;
+  base::string16 product_short_name() override;
+  base::string16 internal_name() override;
+  base::string16 product_version() override;
+  base::string16 private_build() override;
+  base::string16 special_build() override;
+  base::string16 comments() override;
+  base::string16 original_filename() override;
+  base::string16 file_description() override;
+  base::string16 file_version() override;
+  base::string16 legal_copyright() override;
+  base::string16 legal_trademarks() override;
+  base::string16 last_change() override;
+  bool is_official_build() override;
+
+ private:
+  // Returns a base::string16 value for a property name.
+  // Returns the empty string if the property does not exist.
+  base::string16 GetString16Value(CFStringRef name);
+
+  base::scoped_nsobject<NSBundle> bundle_;
+
+  DISALLOW_COPY_AND_ASSIGN(FileVersionInfoMac);
+};
+
+#endif  // BASE_FILE_VERSION_INFO_MAC_H_
diff --git a/base/file_version_info_mac.mm b/base/file_version_info_mac.mm
new file mode 100644
index 0000000..ce42924
--- /dev/null
+++ b/base/file_version_info_mac.mm
@@ -0,0 +1,124 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/file_version_info_mac.h"
+
+#import <Foundation/Foundation.h>
+
+#include "base/files/file_path.h"
+#include "base/logging.h"
+#include "base/mac/bundle_locations.h"
+#include "base/mac/foundation_util.h"
+#include "base/strings/sys_string_conversions.h"
+#include "build/build_config.h"
+
+FileVersionInfoMac::FileVersionInfoMac(NSBundle *bundle)
+    : bundle_([bundle retain]) {
+}
+
+FileVersionInfoMac::~FileVersionInfoMac() {}
+
+// static
+FileVersionInfo* FileVersionInfo::CreateFileVersionInfoForCurrentModule() {
+  return CreateFileVersionInfo(base::mac::FrameworkBundlePath());
+}
+
+// static
+FileVersionInfo* FileVersionInfo::CreateFileVersionInfo(
+    const base::FilePath& file_path) {
+  NSString* path = base::SysUTF8ToNSString(file_path.value());
+  NSBundle* bundle = [NSBundle bundleWithPath:path];
+  return new FileVersionInfoMac(bundle);
+}
+
+base::string16 FileVersionInfoMac::company_name() {
+  return base::string16();
+}
+
+base::string16 FileVersionInfoMac::company_short_name() {
+  return base::string16();
+}
+
+base::string16 FileVersionInfoMac::internal_name() {
+  return base::string16();
+}
+
+base::string16 FileVersionInfoMac::product_name() {
+  return GetString16Value(kCFBundleNameKey);
+}
+
+base::string16 FileVersionInfoMac::product_short_name() {
+  return GetString16Value(kCFBundleNameKey);
+}
+
+base::string16 FileVersionInfoMac::comments() {
+  return base::string16();
+}
+
+base::string16 FileVersionInfoMac::legal_copyright() {
+  return GetString16Value(CFSTR("CFBundleGetInfoString"));
+}
+
+base::string16 FileVersionInfoMac::product_version() {
+  // On OS X, CFBundleVersion is used by LaunchServices, and must follow
+  // specific formatting rules, so the four-part Chrome version is in
+  // CFBundleShortVersionString. On iOS, both have a policy-enfoced limit
+  // of three version components, so the full version is stored in a custom
+  // key (CrBundleVersion) falling back to CFBundleVersion if not present.
+#if defined(OS_IOS)
+  base::string16 version(GetString16Value(CFSTR("CrBundleVersion")));
+  if (version.length() > 0)
+    return version;
+  return GetString16Value(CFSTR("CFBundleVersion"));
+#else
+  return GetString16Value(CFSTR("CFBundleShortVersionString"));
+#endif  // defined(OS_IOS)
+}
+
+base::string16 FileVersionInfoMac::file_description() {
+  return base::string16();
+}
+
+base::string16 FileVersionInfoMac::legal_trademarks() {
+  return base::string16();
+}
+
+base::string16 FileVersionInfoMac::private_build() {
+  return base::string16();
+}
+
+base::string16 FileVersionInfoMac::file_version() {
+  return product_version();
+}
+
+base::string16 FileVersionInfoMac::original_filename() {
+  return GetString16Value(kCFBundleNameKey);
+}
+
+base::string16 FileVersionInfoMac::special_build() {
+  return base::string16();
+}
+
+base::string16 FileVersionInfoMac::last_change() {
+  return GetString16Value(CFSTR("SCMRevision"));
+}
+
+bool FileVersionInfoMac::is_official_build() {
+#if defined (GOOGLE_CHROME_BUILD)
+  return true;
+#else
+  return false;
+#endif
+}
+
+base::string16 FileVersionInfoMac::GetString16Value(CFStringRef name) {
+  if (bundle_) {
+    NSString *ns_name = base::mac::CFToNSCast(name);
+    NSString* value = [bundle_ objectForInfoDictionaryKey:ns_name];
+    if (value) {
+      return base::SysNSStringToUTF16(value);
+    }
+  }
+  return base::string16();
+}
diff --git a/base/file_version_info_win.cc b/base/file_version_info_win.cc
new file mode 100644
index 0000000..4affd81
--- /dev/null
+++ b/base/file_version_info_win.cc
@@ -0,0 +1,218 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/file_version_info_win.h"
+
+#include <windows.h>
+#include <stddef.h>
+
+#include "base/files/file_path.h"
+#include "base/logging.h"
+#include "base/threading/thread_restrictions.h"
+#include "base/win/resource_util.h"
+
+using base::FilePath;
+
+namespace {
+
+struct LanguageAndCodePage {
+  WORD language;
+  WORD code_page;
+};
+
+// Returns the \\VarFileInfo\\Translation value extracted from the
+// VS_VERSION_INFO resource in |data|.
+LanguageAndCodePage* GetTranslate(const void* data) {
+  LanguageAndCodePage* translate = nullptr;
+  UINT length;
+  if (::VerQueryValue(data, L"\\VarFileInfo\\Translation",
+                      reinterpret_cast<void**>(&translate), &length)) {
+    return translate;
+  }
+  return nullptr;
+}
+
+VS_FIXEDFILEINFO* GetVsFixedFileInfo(const void* data) {
+  VS_FIXEDFILEINFO* fixed_file_info = nullptr;
+  UINT length;
+  if (::VerQueryValue(data, L"\\", reinterpret_cast<void**>(&fixed_file_info),
+                      &length)) {
+    return fixed_file_info;
+  }
+  return nullptr;
+}
+
+}  // namespace
+
+FileVersionInfoWin::~FileVersionInfoWin() = default;
+
+// static
+FileVersionInfo* FileVersionInfo::CreateFileVersionInfoForModule(
+    HMODULE module) {
+  void* data;
+  size_t version_info_length;
+  const bool has_version_resource = base::win::GetResourceFromModule(
+      module, VS_VERSION_INFO, RT_VERSION, &data, &version_info_length);
+  if (!has_version_resource)
+    return nullptr;
+
+  const LanguageAndCodePage* translate = GetTranslate(data);
+  if (!translate)
+    return nullptr;
+
+  return new FileVersionInfoWin(data, translate->language,
+                                translate->code_page);
+}
+
+// static
+FileVersionInfo* FileVersionInfo::CreateFileVersionInfo(
+    const FilePath& file_path) {
+  base::AssertBlockingAllowed();
+
+  DWORD dummy;
+  const wchar_t* path = file_path.value().c_str();
+  const DWORD length = ::GetFileVersionInfoSize(path, &dummy);
+  if (length == 0)
+    return nullptr;
+
+  std::vector<uint8_t> data(length, 0);
+
+  if (!::GetFileVersionInfo(path, dummy, length, data.data()))
+    return nullptr;
+
+  const LanguageAndCodePage* translate = GetTranslate(data.data());
+  if (!translate)
+    return nullptr;
+
+  return new FileVersionInfoWin(std::move(data), translate->language,
+                                translate->code_page);
+}
+
+base::string16 FileVersionInfoWin::company_name() {
+  return GetStringValue(L"CompanyName");
+}
+
+base::string16 FileVersionInfoWin::company_short_name() {
+  return GetStringValue(L"CompanyShortName");
+}
+
+base::string16 FileVersionInfoWin::internal_name() {
+  return GetStringValue(L"InternalName");
+}
+
+base::string16 FileVersionInfoWin::product_name() {
+  return GetStringValue(L"ProductName");
+}
+
+base::string16 FileVersionInfoWin::product_short_name() {
+  return GetStringValue(L"ProductShortName");
+}
+
+base::string16 FileVersionInfoWin::comments() {
+  return GetStringValue(L"Comments");
+}
+
+base::string16 FileVersionInfoWin::legal_copyright() {
+  return GetStringValue(L"LegalCopyright");
+}
+
+base::string16 FileVersionInfoWin::product_version() {
+  return GetStringValue(L"ProductVersion");
+}
+
+base::string16 FileVersionInfoWin::file_description() {
+  return GetStringValue(L"FileDescription");
+}
+
+base::string16 FileVersionInfoWin::legal_trademarks() {
+  return GetStringValue(L"LegalTrademarks");
+}
+
+base::string16 FileVersionInfoWin::private_build() {
+  return GetStringValue(L"PrivateBuild");
+}
+
+base::string16 FileVersionInfoWin::file_version() {
+  return GetStringValue(L"FileVersion");
+}
+
+base::string16 FileVersionInfoWin::original_filename() {
+  return GetStringValue(L"OriginalFilename");
+}
+
+base::string16 FileVersionInfoWin::special_build() {
+  return GetStringValue(L"SpecialBuild");
+}
+
+base::string16 FileVersionInfoWin::last_change() {
+  return GetStringValue(L"LastChange");
+}
+
+bool FileVersionInfoWin::is_official_build() {
+  return (GetStringValue(L"Official Build").compare(L"1") == 0);
+}
+
+bool FileVersionInfoWin::GetValue(const wchar_t* name,
+                                  std::wstring* value_str) {
+  WORD lang_codepage[8];
+  size_t i = 0;
+  // Use the language and codepage from the DLL.
+  lang_codepage[i++] = language_;
+  lang_codepage[i++] = code_page_;
+  // Use the default language and codepage from the DLL.
+  lang_codepage[i++] = ::GetUserDefaultLangID();
+  lang_codepage[i++] = code_page_;
+  // Use the language from the DLL and Latin codepage (most common).
+  lang_codepage[i++] = language_;
+  lang_codepage[i++] = 1252;
+  // Use the default language and Latin codepage (most common).
+  lang_codepage[i++] = ::GetUserDefaultLangID();
+  lang_codepage[i++] = 1252;
+
+  i = 0;
+  while (i < arraysize(lang_codepage)) {
+    wchar_t sub_block[MAX_PATH];
+    WORD language = lang_codepage[i++];
+    WORD code_page = lang_codepage[i++];
+    _snwprintf_s(sub_block, MAX_PATH, MAX_PATH,
+                 L"\\StringFileInfo\\%04x%04x\\%ls", language, code_page, name);
+    LPVOID value = NULL;
+    uint32_t size;
+    BOOL r = ::VerQueryValue(data_, sub_block, &value, &size);
+    if (r && value) {
+      value_str->assign(static_cast<wchar_t*>(value));
+      return true;
+    }
+  }
+  return false;
+}
+
+std::wstring FileVersionInfoWin::GetStringValue(const wchar_t* name) {
+  std::wstring str;
+  if (GetValue(name, &str))
+    return str;
+  else
+    return L"";
+}
+
+FileVersionInfoWin::FileVersionInfoWin(std::vector<uint8_t>&& data,
+                                       WORD language,
+                                       WORD code_page)
+    : owned_data_(std::move(data)),
+      data_(owned_data_.data()),
+      language_(language),
+      code_page_(code_page),
+      fixed_file_info_(GetVsFixedFileInfo(data_)) {
+  DCHECK(!owned_data_.empty());
+}
+
+FileVersionInfoWin::FileVersionInfoWin(void* data,
+                                       WORD language,
+                                       WORD code_page)
+    : data_(data),
+      language_(language),
+      code_page_(code_page),
+      fixed_file_info_(GetVsFixedFileInfo(data)) {
+  DCHECK(data_);
+}
diff --git a/base/file_version_info_win.h b/base/file_version_info_win.h
new file mode 100644
index 0000000..d91b67f
--- /dev/null
+++ b/base/file_version_info_win.h
@@ -0,0 +1,77 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_FILE_VERSION_INFO_WIN_H_
+#define BASE_FILE_VERSION_INFO_WIN_H_
+
+#include <windows.h>
+
+#include <stdint.h>
+
+#include <memory>
+#include <string>
+#include <vector>
+
+#include "base/base_export.h"
+#include "base/file_version_info.h"
+#include "base/macros.h"
+
+struct tagVS_FIXEDFILEINFO;
+typedef tagVS_FIXEDFILEINFO VS_FIXEDFILEINFO;
+
+class BASE_EXPORT FileVersionInfoWin : public FileVersionInfo {
+ public:
+  ~FileVersionInfoWin() override;
+
+  // Accessors to the different version properties.
+  // Returns an empty string if the property is not found.
+  base::string16 company_name() override;
+  base::string16 company_short_name() override;
+  base::string16 product_name() override;
+  base::string16 product_short_name() override;
+  base::string16 internal_name() override;
+  base::string16 product_version() override;
+  base::string16 private_build() override;
+  base::string16 special_build() override;
+  base::string16 comments() override;
+  base::string16 original_filename() override;
+  base::string16 file_description() override;
+  base::string16 file_version() override;
+  base::string16 legal_copyright() override;
+  base::string16 legal_trademarks() override;
+  base::string16 last_change() override;
+  bool is_official_build() override;
+
+  // Lets you access other properties not covered above.
+  bool GetValue(const wchar_t* name, std::wstring* value);
+
+  // Similar to GetValue but returns a wstring (empty string if the property
+  // does not exist).
+  std::wstring GetStringValue(const wchar_t* name);
+
+  // Get the fixed file info if it exists. Otherwise NULL
+  const VS_FIXEDFILEINFO* fixed_file_info() const { return fixed_file_info_; }
+
+ private:
+  friend FileVersionInfo;
+
+  // |data| is a VS_VERSION_INFO resource. |language| and |code_page| are
+  // extracted from the \VarFileInfo\Translation value of |data|.
+  FileVersionInfoWin(std::vector<uint8_t>&& data,
+                     WORD language,
+                     WORD code_page);
+  FileVersionInfoWin(void* data, WORD language, WORD code_page);
+
+  const std::vector<uint8_t> owned_data_;
+  const void* const data_;
+  const WORD language_;
+  const WORD code_page_;
+
+  // This is a pointer into |data_| if it exists. Otherwise nullptr.
+  const VS_FIXEDFILEINFO* const fixed_file_info_;
+
+  DISALLOW_COPY_AND_ASSIGN(FileVersionInfoWin);
+};
+
+#endif  // BASE_FILE_VERSION_INFO_WIN_H_
diff --git a/base/file_version_info_win_unittest.cc b/base/file_version_info_win_unittest.cc
new file mode 100644
index 0000000..a4acc4c
--- /dev/null
+++ b/base/file_version_info_win_unittest.cc
@@ -0,0 +1,175 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/file_version_info_win.h"
+
+#include <windows.h>
+
+#include <stddef.h>
+
+#include <memory>
+
+#include "base/file_version_info.h"
+#include "base/files/file_path.h"
+#include "base/macros.h"
+#include "base/memory/ptr_util.h"
+#include "base/path_service.h"
+#include "base/scoped_native_library.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+using base::FilePath;
+
+namespace {
+
+FilePath GetTestDataPath() {
+  FilePath path;
+  base::PathService::Get(base::DIR_SOURCE_ROOT, &path);
+  path = path.AppendASCII("base");
+  path = path.AppendASCII("test");
+  path = path.AppendASCII("data");
+  path = path.AppendASCII("file_version_info_unittest");
+  return path;
+}
+
+class FileVersionInfoFactory {
+ public:
+  explicit FileVersionInfoFactory(const FilePath& path) : path_(path) {}
+
+  std::unique_ptr<FileVersionInfo> Create() const {
+    return base::WrapUnique(FileVersionInfo::CreateFileVersionInfo(path_));
+  }
+
+ private:
+  const FilePath path_;
+
+  DISALLOW_COPY_AND_ASSIGN(FileVersionInfoFactory);
+};
+
+class FileVersionInfoForModuleFactory {
+ public:
+  explicit FileVersionInfoForModuleFactory(const FilePath& path)
+      // Load the library with LOAD_LIBRARY_AS_IMAGE_RESOURCE since it shouldn't
+      // be executed.
+      : library_(::LoadLibraryEx(path.value().c_str(),
+                                 nullptr,
+                                 LOAD_LIBRARY_AS_IMAGE_RESOURCE)) {
+    EXPECT_TRUE(library_.is_valid());
+  }
+
+  std::unique_ptr<FileVersionInfo> Create() const {
+    return base::WrapUnique(
+        FileVersionInfo::CreateFileVersionInfoForModule(library_.get()));
+  }
+
+ private:
+  const base::ScopedNativeLibrary library_;
+
+  DISALLOW_COPY_AND_ASSIGN(FileVersionInfoForModuleFactory);
+};
+
+template <typename T>
+class FileVersionInfoTest : public testing::Test {};
+
+using FileVersionInfoFactories =
+    ::testing::Types<FileVersionInfoFactory, FileVersionInfoForModuleFactory>;
+
+}  // namespace
+
+TYPED_TEST_CASE(FileVersionInfoTest, FileVersionInfoFactories);
+
+TYPED_TEST(FileVersionInfoTest, HardCodedProperties) {
+  const wchar_t kDLLName[] = {L"FileVersionInfoTest1.dll"};
+
+  const wchar_t* const kExpectedValues[15] = {
+      // FileVersionInfoTest.dll
+      L"Goooooogle",                                  // company_name
+      L"Google",                                      // company_short_name
+      L"This is the product name",                    // product_name
+      L"This is the product short name",              // product_short_name
+      L"The Internal Name",                           // internal_name
+      L"4.3.2.1",                                     // product_version
+      L"Private build property",                      // private_build
+      L"Special build property",                      // special_build
+      L"This is a particularly interesting comment",  // comments
+      L"This is the original filename",               // original_filename
+      L"This is my file description",                 // file_description
+      L"1.2.3.4",                                     // file_version
+      L"This is the legal copyright",                 // legal_copyright
+      L"This is the legal trademarks",                // legal_trademarks
+      L"This is the last change",                     // last_change
+  };
+
+  FilePath dll_path = GetTestDataPath();
+  dll_path = dll_path.Append(kDLLName);
+
+  TypeParam factory(dll_path);
+  std::unique_ptr<FileVersionInfo> version_info(factory.Create());
+  ASSERT_TRUE(version_info);
+
+  int j = 0;
+  EXPECT_EQ(kExpectedValues[j++], version_info->company_name());
+  EXPECT_EQ(kExpectedValues[j++], version_info->company_short_name());
+  EXPECT_EQ(kExpectedValues[j++], version_info->product_name());
+  EXPECT_EQ(kExpectedValues[j++], version_info->product_short_name());
+  EXPECT_EQ(kExpectedValues[j++], version_info->internal_name());
+  EXPECT_EQ(kExpectedValues[j++], version_info->product_version());
+  EXPECT_EQ(kExpectedValues[j++], version_info->private_build());
+  EXPECT_EQ(kExpectedValues[j++], version_info->special_build());
+  EXPECT_EQ(kExpectedValues[j++], version_info->comments());
+  EXPECT_EQ(kExpectedValues[j++], version_info->original_filename());
+  EXPECT_EQ(kExpectedValues[j++], version_info->file_description());
+  EXPECT_EQ(kExpectedValues[j++], version_info->file_version());
+  EXPECT_EQ(kExpectedValues[j++], version_info->legal_copyright());
+  EXPECT_EQ(kExpectedValues[j++], version_info->legal_trademarks());
+  EXPECT_EQ(kExpectedValues[j++], version_info->last_change());
+}
+
+TYPED_TEST(FileVersionInfoTest, IsOfficialBuild) {
+  constexpr struct {
+    const wchar_t* const dll_name;
+    const bool is_official_build;
+  } kTestItems[]{
+      {L"FileVersionInfoTest1.dll", true}, {L"FileVersionInfoTest2.dll", false},
+  };
+
+  for (const auto& test_item : kTestItems) {
+    const FilePath dll_path = GetTestDataPath().Append(test_item.dll_name);
+
+    TypeParam factory(dll_path);
+    std::unique_ptr<FileVersionInfo> version_info(factory.Create());
+    ASSERT_TRUE(version_info);
+
+    EXPECT_EQ(test_item.is_official_build, version_info->is_official_build());
+  }
+}
+
+TYPED_TEST(FileVersionInfoTest, CustomProperties) {
+  FilePath dll_path = GetTestDataPath();
+  dll_path = dll_path.AppendASCII("FileVersionInfoTest1.dll");
+
+  TypeParam factory(dll_path);
+  std::unique_ptr<FileVersionInfo> version_info(factory.Create());
+  ASSERT_TRUE(version_info);
+
+  // Test few existing properties.
+  std::wstring str;
+  FileVersionInfoWin* version_info_win =
+      static_cast<FileVersionInfoWin*>(version_info.get());
+  EXPECT_TRUE(version_info_win->GetValue(L"Custom prop 1", &str));
+  EXPECT_EQ(L"Un", str);
+  EXPECT_EQ(L"Un", version_info_win->GetStringValue(L"Custom prop 1"));
+
+  EXPECT_TRUE(version_info_win->GetValue(L"Custom prop 2", &str));
+  EXPECT_EQ(L"Deux", str);
+  EXPECT_EQ(L"Deux", version_info_win->GetStringValue(L"Custom prop 2"));
+
+  EXPECT_TRUE(version_info_win->GetValue(L"Custom prop 3", &str));
+  EXPECT_EQ(L"1600 Amphitheatre Parkway Mountain View, CA 94043", str);
+  EXPECT_EQ(L"1600 Amphitheatre Parkway Mountain View, CA 94043",
+            version_info_win->GetStringValue(L"Custom prop 3"));
+
+  // Test an non-existing property.
+  EXPECT_FALSE(version_info_win->GetValue(L"Unknown property", &str));
+  EXPECT_EQ(L"", version_info_win->GetStringValue(L"Unknown property"));
+}
diff --git a/base/files/dir_reader_fallback.h b/base/files/dir_reader_fallback.h
new file mode 100644
index 0000000..d44c227
--- /dev/null
+++ b/base/files/dir_reader_fallback.h
@@ -0,0 +1,35 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_FILES_DIR_READER_FALLBACK_H_
+#define BASE_FILES_DIR_READER_FALLBACK_H_
+
+namespace base {
+
+class DirReaderFallback {
+ public:
+  // Open a directory. If |IsValid| is true, then |Next| can be called to start
+  // the iteration at the beginning of the directory.
+  explicit DirReaderFallback(const char* directory_path) {}
+
+  // After construction, IsValid returns true iff the directory was
+  // successfully opened.
+  bool IsValid() const { return false; }
+
+  // Move to the next entry returning false if the iteration is complete.
+  bool Next() { return false; }
+
+  // Return the name of the current directory entry.
+  const char* name() { return nullptr;}
+
+  // Return the file descriptor which is being used.
+  int fd() const { return -1; }
+
+  // Returns true if this is a no-op fallback class (for testing).
+  static bool IsFallback() { return true; }
+};
+
+}  // namespace base
+
+#endif  // BASE_FILES_DIR_READER_FALLBACK_H_
diff --git a/base/files/dir_reader_linux.h b/base/files/dir_reader_linux.h
new file mode 100644
index 0000000..259bcfe
--- /dev/null
+++ b/base/files/dir_reader_linux.h
@@ -0,0 +1,101 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_FILES_DIR_READER_LINUX_H_
+#define BASE_FILES_DIR_READER_LINUX_H_
+
+#include <errno.h>
+#include <fcntl.h>
+#include <stddef.h>
+#include <stdint.h>
+#include <sys/syscall.h>
+#include <unistd.h>
+
+#include "base/logging.h"
+#include "base/macros.h"
+#include "base/posix/eintr_wrapper.h"
+
+// See the comments in dir_reader_posix.h about this.
+
+namespace base {
+
+struct linux_dirent {
+  uint64_t        d_ino;
+  int64_t         d_off;
+  unsigned short  d_reclen;
+  unsigned char   d_type;
+  char            d_name[0];
+};
+
+class DirReaderLinux {
+ public:
+  explicit DirReaderLinux(const char* directory_path)
+      : fd_(open(directory_path, O_RDONLY | O_DIRECTORY)),
+        offset_(0),
+        size_(0) {
+    memset(buf_, 0, sizeof(buf_));
+  }
+
+  ~DirReaderLinux() {
+    if (fd_ >= 0) {
+      if (IGNORE_EINTR(close(fd_)))
+        RAW_LOG(ERROR, "Failed to close directory handle");
+    }
+  }
+
+  bool IsValid() const {
+    return fd_ >= 0;
+  }
+
+  // Move to the next entry returning false if the iteration is complete.
+  bool Next() {
+    if (size_) {
+      linux_dirent* dirent = reinterpret_cast<linux_dirent*>(&buf_[offset_]);
+      offset_ += dirent->d_reclen;
+    }
+
+    if (offset_ != size_)
+      return true;
+
+    const int r = syscall(__NR_getdents64, fd_, buf_, sizeof(buf_));
+    if (r == 0)
+      return false;
+    if (r == -1) {
+      DPLOG(FATAL) << "getdents64 returned an error: " << errno;
+      return false;
+    }
+    size_ = r;
+    offset_ = 0;
+    return true;
+  }
+
+  const char* name() const {
+    if (!size_)
+      return nullptr;
+
+    const linux_dirent* dirent =
+        reinterpret_cast<const linux_dirent*>(&buf_[offset_]);
+    return dirent->d_name;
+  }
+
+  int fd() const {
+    return fd_;
+  }
+
+  static bool IsFallback() {
+    return false;
+  }
+
+ private:
+  const int fd_;
+  alignas(linux_dirent) unsigned char buf_[512];
+  size_t offset_;
+  size_t size_;
+
+  DISALLOW_COPY_AND_ASSIGN(DirReaderLinux);
+};
+
+}  // namespace base
+
+#endif  // BASE_FILES_DIR_READER_LINUX_H_
diff --git a/base/files/dir_reader_posix.h b/base/files/dir_reader_posix.h
new file mode 100644
index 0000000..15fc744
--- /dev/null
+++ b/base/files/dir_reader_posix.h
@@ -0,0 +1,36 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_FILES_DIR_READER_POSIX_H_
+#define BASE_FILES_DIR_READER_POSIX_H_
+
+#include "build/build_config.h"
+
+// This header provides a class, DirReaderPosix, which allows one to open and
+// read from directories without allocating memory. For the interface, see
+// the generic fallback in dir_reader_fallback.h.
+
+// Mac note: OS X has getdirentries, but it only works if we restrict Chrome to
+// 32-bit inodes. There is a getdirentries64 syscall in 10.6, but it's not
+// wrapped and the direct syscall interface is unstable. Using an unstable API
+// seems worse than falling back to enumerating all file descriptors so we will
+// probably never implement this on the Mac.
+
+#if defined(OS_LINUX) || defined(OS_ANDROID)
+#include "base/files/dir_reader_linux.h"
+#else
+#include "base/files/dir_reader_fallback.h"
+#endif
+
+namespace base {
+
+#if defined(OS_LINUX) || defined(OS_ANDROID)
+typedef DirReaderLinux DirReaderPosix;
+#else
+typedef DirReaderFallback DirReaderPosix;
+#endif
+
+}  // namespace base
+
+#endif  // BASE_FILES_DIR_READER_POSIX_H_
diff --git a/base/files/dir_reader_posix_unittest.cc b/base/files/dir_reader_posix_unittest.cc
new file mode 100644
index 0000000..1954cb2
--- /dev/null
+++ b/base/files/dir_reader_posix_unittest.cc
@@ -0,0 +1,95 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/files/dir_reader_posix.h"
+
+#include <fcntl.h>
+#include <limits.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+
+#include "base/files/scoped_temp_dir.h"
+#include "base/logging.h"
+#include "build/build_config.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+#if defined(OS_ANDROID)
+#include "base/os_compat_android.h"
+#endif
+
+namespace base {
+
+TEST(DirReaderPosixUnittest, Read) {
+  static const unsigned kNumFiles = 100;
+
+  if (DirReaderPosix::IsFallback())
+    return;
+
+  base::ScopedTempDir temp_dir;
+  ASSERT_TRUE(temp_dir.CreateUniqueTempDir());
+  const char* dir = temp_dir.GetPath().value().c_str();
+  ASSERT_TRUE(dir);
+
+  char wdbuf[PATH_MAX];
+  PCHECK(getcwd(wdbuf, PATH_MAX));
+
+  PCHECK(chdir(dir) == 0);
+
+  for (unsigned i = 0; i < kNumFiles; i++) {
+    char buf[16];
+    snprintf(buf, sizeof(buf), "%d", i);
+    const int fd = open(buf, O_CREAT | O_RDONLY | O_EXCL, 0600);
+    PCHECK(fd >= 0);
+    PCHECK(close(fd) == 0);
+  }
+
+  std::set<unsigned> seen;
+
+  DirReaderPosix reader(dir);
+  EXPECT_TRUE(reader.IsValid());
+
+  if (!reader.IsValid())
+    return;
+
+  bool seen_dot = false, seen_dotdot = false;
+
+  for (; reader.Next(); ) {
+    if (strcmp(reader.name(), ".") == 0) {
+      seen_dot = true;
+      continue;
+    }
+    if (strcmp(reader.name(), "..") == 0) {
+      seen_dotdot = true;
+      continue;
+    }
+
+    SCOPED_TRACE(testing::Message() << "reader.name(): " << reader.name());
+
+    char *endptr;
+    const unsigned long value = strtoul(reader.name(), &endptr, 10);
+
+    EXPECT_FALSE(*endptr);
+    EXPECT_LT(value, kNumFiles);
+    EXPECT_EQ(0u, seen.count(value));
+    seen.insert(value);
+  }
+
+  for (unsigned i = 0; i < kNumFiles; i++) {
+    char buf[16];
+    snprintf(buf, sizeof(buf), "%d", i);
+    PCHECK(unlink(buf) == 0);
+  }
+
+  PCHECK(rmdir(dir) == 0);
+
+  PCHECK(chdir(wdbuf) == 0);
+
+  EXPECT_TRUE(seen_dot);
+  EXPECT_TRUE(seen_dotdot);
+  EXPECT_EQ(kNumFiles, seen.size());
+}
+
+}  // namespace base
diff --git a/base/files/file.cc b/base/files/file.cc
new file mode 100644
index 0000000..1a4ee37
--- /dev/null
+++ b/base/files/file.cc
@@ -0,0 +1,150 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/files/file.h"
+#include "base/files/file_path.h"
+#include "base/files/file_tracing.h"
+#include "base/metrics/histogram.h"
+#include "base/timer/elapsed_timer.h"
+#include "build/build_config.h"
+
+#if defined(OS_POSIX) || defined(OS_FUCHSIA)
+#include <errno.h>
+#endif
+
+namespace base {
+
+File::Info::Info()
+    : size(0),
+      is_directory(false),
+      is_symbolic_link(false) {
+}
+
+File::Info::~Info() = default;
+
+File::File()
+    : error_details_(FILE_ERROR_FAILED),
+      created_(false),
+      async_(false) {
+}
+
+#if !defined(OS_NACL)
+File::File(const FilePath& path, uint32_t flags)
+    : error_details_(FILE_OK), created_(false), async_(false) {
+  Initialize(path, flags);
+}
+#endif
+
+File::File(PlatformFile platform_file)
+    : file_(platform_file),
+      error_details_(FILE_OK),
+      created_(false),
+      async_(false) {
+#if defined(OS_POSIX) || defined(OS_FUCHSIA)
+  DCHECK_GE(platform_file, -1);
+#endif
+}
+
+File::File(Error error_details)
+    : error_details_(error_details),
+      created_(false),
+      async_(false) {
+}
+
+File::File(File&& other)
+    : file_(other.TakePlatformFile()),
+      tracing_path_(other.tracing_path_),
+      error_details_(other.error_details()),
+      created_(other.created()),
+      async_(other.async_) {}
+
+File::~File() {
+  // Go through the AssertIOAllowed logic.
+  Close();
+}
+
+// static
+File File::CreateForAsyncHandle(PlatformFile platform_file) {
+  File file(platform_file);
+  // It would be nice if we could validate that |platform_file| was opened with
+  // FILE_FLAG_OVERLAPPED on Windows but this doesn't appear to be possible.
+  file.async_ = true;
+  return file;
+}
+
+File& File::operator=(File&& other) {
+  Close();
+  SetPlatformFile(other.TakePlatformFile());
+  tracing_path_ = other.tracing_path_;
+  error_details_ = other.error_details();
+  created_ = other.created();
+  async_ = other.async_;
+  return *this;
+}
+
+#if !defined(OS_NACL)
+void File::Initialize(const FilePath& path, uint32_t flags) {
+  if (path.ReferencesParent()) {
+#if defined(OS_WIN)
+    ::SetLastError(ERROR_ACCESS_DENIED);
+#elif defined(OS_POSIX) || defined(OS_FUCHSIA)
+    errno = EACCES;
+#else
+#error Unsupported platform
+#endif
+    error_details_ = FILE_ERROR_ACCESS_DENIED;
+    return;
+  }
+  if (FileTracing::IsCategoryEnabled())
+    tracing_path_ = path;
+  SCOPED_FILE_TRACE("Initialize");
+  DoInitialize(path, flags);
+}
+#endif
+
+std::string File::ErrorToString(Error error) {
+  switch (error) {
+    case FILE_OK:
+      return "FILE_OK";
+    case FILE_ERROR_FAILED:
+      return "FILE_ERROR_FAILED";
+    case FILE_ERROR_IN_USE:
+      return "FILE_ERROR_IN_USE";
+    case FILE_ERROR_EXISTS:
+      return "FILE_ERROR_EXISTS";
+    case FILE_ERROR_NOT_FOUND:
+      return "FILE_ERROR_NOT_FOUND";
+    case FILE_ERROR_ACCESS_DENIED:
+      return "FILE_ERROR_ACCESS_DENIED";
+    case FILE_ERROR_TOO_MANY_OPENED:
+      return "FILE_ERROR_TOO_MANY_OPENED";
+    case FILE_ERROR_NO_MEMORY:
+      return "FILE_ERROR_NO_MEMORY";
+    case FILE_ERROR_NO_SPACE:
+      return "FILE_ERROR_NO_SPACE";
+    case FILE_ERROR_NOT_A_DIRECTORY:
+      return "FILE_ERROR_NOT_A_DIRECTORY";
+    case FILE_ERROR_INVALID_OPERATION:
+      return "FILE_ERROR_INVALID_OPERATION";
+    case FILE_ERROR_SECURITY:
+      return "FILE_ERROR_SECURITY";
+    case FILE_ERROR_ABORT:
+      return "FILE_ERROR_ABORT";
+    case FILE_ERROR_NOT_A_FILE:
+      return "FILE_ERROR_NOT_A_FILE";
+    case FILE_ERROR_NOT_EMPTY:
+      return "FILE_ERROR_NOT_EMPTY";
+    case FILE_ERROR_INVALID_URL:
+      return "FILE_ERROR_INVALID_URL";
+    case FILE_ERROR_IO:
+      return "FILE_ERROR_IO";
+    case FILE_ERROR_MAX:
+      break;
+  }
+
+  NOTREACHED();
+  return "";
+}
+
+}  // namespace base
diff --git a/base/files/file.h b/base/files/file.h
new file mode 100644
index 0000000..c3a31d84
--- /dev/null
+++ b/base/files/file.h
@@ -0,0 +1,375 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_FILES_FILE_H_
+#define BASE_FILES_FILE_H_
+
+#include <stdint.h>
+
+#include <string>
+
+#include "base/base_export.h"
+#include "base/files/file_path.h"
+#include "base/files/file_tracing.h"
+#include "base/files/platform_file.h"
+#include "base/files/scoped_file.h"
+#include "base/macros.h"
+#include "base/time/time.h"
+#include "build/build_config.h"
+
+#if defined(OS_POSIX) || defined(OS_FUCHSIA)
+#include <sys/stat.h>
+#endif
+
+namespace base {
+
+#if defined(OS_BSD) || defined(OS_MACOSX) || defined(OS_NACL) || \
+    defined(OS_ANDROID) && __ANDROID_API__ < 21
+typedef struct stat stat_wrapper_t;
+#elif defined(OS_POSIX) || defined(OS_FUCHSIA)
+typedef struct stat64 stat_wrapper_t;
+#endif
+
+// Thin wrapper around an OS-level file.
+// Note that this class does not provide any support for asynchronous IO, other
+// than the ability to create asynchronous handles on Windows.
+//
+// Note about const: this class does not attempt to determine if the underlying
+// file system object is affected by a particular method in order to consider
+// that method const or not. Only methods that deal with member variables in an
+// obvious non-modifying way are marked as const. Any method that forward calls
+// to the OS is not considered const, even if there is no apparent change to
+// member variables.
+class BASE_EXPORT File {
+ public:
+  // FLAG_(OPEN|CREATE).* are mutually exclusive. You should specify exactly one
+  // of the five (possibly combining with other flags) when opening or creating
+  // a file.
+  // FLAG_(WRITE|APPEND) are mutually exclusive. This is so that APPEND behavior
+  // will be consistent with O_APPEND on POSIX.
+  // FLAG_EXCLUSIVE_(READ|WRITE) only grant exclusive access to the file on
+  // creation on POSIX; for existing files, consider using Lock().
+  enum Flags {
+    FLAG_OPEN = 1 << 0,            // Opens a file, only if it exists.
+    FLAG_CREATE = 1 << 1,          // Creates a new file, only if it does not
+                                   // already exist.
+    FLAG_OPEN_ALWAYS = 1 << 2,     // May create a new file.
+    FLAG_CREATE_ALWAYS = 1 << 3,   // May overwrite an old file.
+    FLAG_OPEN_TRUNCATED = 1 << 4,  // Opens a file and truncates it, only if it
+                                   // exists.
+    FLAG_READ = 1 << 5,
+    FLAG_WRITE = 1 << 6,
+    FLAG_APPEND = 1 << 7,
+    FLAG_EXCLUSIVE_READ = 1 << 8,  // EXCLUSIVE is opposite of Windows SHARE.
+    FLAG_EXCLUSIVE_WRITE = 1 << 9,
+    FLAG_ASYNC = 1 << 10,
+    FLAG_TEMPORARY = 1 << 11,  // Used on Windows only.
+    FLAG_HIDDEN = 1 << 12,     // Used on Windows only.
+    FLAG_DELETE_ON_CLOSE = 1 << 13,
+    FLAG_WRITE_ATTRIBUTES = 1 << 14,     // Used on Windows only.
+    FLAG_SHARE_DELETE = 1 << 15,         // Used on Windows only.
+    FLAG_TERMINAL_DEVICE = 1 << 16,      // Serial port flags.
+    FLAG_BACKUP_SEMANTICS = 1 << 17,     // Used on Windows only.
+    FLAG_EXECUTE = 1 << 18,              // Used on Windows only.
+    FLAG_SEQUENTIAL_SCAN = 1 << 19,      // Used on Windows only.
+    FLAG_CAN_DELETE_ON_CLOSE = 1 << 20,  // Requests permission to delete a file
+                                         // via DeleteOnClose() (Windows only).
+                                         // See DeleteOnClose() for details.
+  };
+
+  // This enum has been recorded in multiple histograms using PlatformFileError
+  // enum. If the order of the fields needs to change, please ensure that those
+  // histograms are obsolete or have been moved to a different enum.
+  //
+  // FILE_ERROR_ACCESS_DENIED is returned when a call fails because of a
+  // filesystem restriction. FILE_ERROR_SECURITY is returned when a browser
+  // policy doesn't allow the operation to be executed.
+  enum Error {
+    FILE_OK = 0,
+    FILE_ERROR_FAILED = -1,
+    FILE_ERROR_IN_USE = -2,
+    FILE_ERROR_EXISTS = -3,
+    FILE_ERROR_NOT_FOUND = -4,
+    FILE_ERROR_ACCESS_DENIED = -5,
+    FILE_ERROR_TOO_MANY_OPENED = -6,
+    FILE_ERROR_NO_MEMORY = -7,
+    FILE_ERROR_NO_SPACE = -8,
+    FILE_ERROR_NOT_A_DIRECTORY = -9,
+    FILE_ERROR_INVALID_OPERATION = -10,
+    FILE_ERROR_SECURITY = -11,
+    FILE_ERROR_ABORT = -12,
+    FILE_ERROR_NOT_A_FILE = -13,
+    FILE_ERROR_NOT_EMPTY = -14,
+    FILE_ERROR_INVALID_URL = -15,
+    FILE_ERROR_IO = -16,
+    // Put new entries here and increment FILE_ERROR_MAX.
+    FILE_ERROR_MAX = -17
+  };
+
+  // This explicit mapping matches both FILE_ on Windows and SEEK_ on Linux.
+  enum Whence {
+    FROM_BEGIN   = 0,
+    FROM_CURRENT = 1,
+    FROM_END     = 2
+  };
+
+  // Used to hold information about a given file.
+  // If you add more fields to this structure (platform-specific fields are OK),
+  // make sure to update all functions that use it in file_util_{win|posix}.cc,
+  // too, and the ParamTraits<base::File::Info> implementation in
+  // ipc/ipc_message_utils.cc.
+  struct BASE_EXPORT Info {
+    Info();
+    ~Info();
+#if defined(OS_POSIX) || defined(OS_FUCHSIA)
+    // Fills this struct with values from |stat_info|.
+    void FromStat(const stat_wrapper_t& stat_info);
+#endif
+
+    // The size of the file in bytes.  Undefined when is_directory is true.
+    int64_t size;
+
+    // True if the file corresponds to a directory.
+    bool is_directory;
+
+    // True if the file corresponds to a symbolic link.  For Windows currently
+    // not supported and thus always false.
+    bool is_symbolic_link;
+
+    // The last modified time of a file.
+    Time last_modified;
+
+    // The last accessed time of a file.
+    Time last_accessed;
+
+    // The creation time of a file.
+    Time creation_time;
+  };
+
+  File();
+
+  // Creates or opens the given file. This will fail with 'access denied' if the
+  // |path| contains path traversal ('..') components.
+  File(const FilePath& path, uint32_t flags);
+
+  // Takes ownership of |platform_file|.
+  explicit File(PlatformFile platform_file);
+
+  // Creates an object with a specific error_details code.
+  explicit File(Error error_details);
+
+  File(File&& other);
+
+  ~File();
+
+  // Takes ownership of |platform_file|.
+  static File CreateForAsyncHandle(PlatformFile platform_file);
+
+  File& operator=(File&& other);
+
+  // Creates or opens the given file.
+  void Initialize(const FilePath& path, uint32_t flags);
+
+  // Returns |true| if the handle / fd wrapped by this object is valid.  This
+  // method doesn't interact with the file system (and is safe to be called from
+  // ThreadRestrictions::SetIOAllowed(false) threads).
+  bool IsValid() const;
+
+  // Returns true if a new file was created (or an old one truncated to zero
+  // length to simulate a new file, which can happen with
+  // FLAG_CREATE_ALWAYS), and false otherwise.
+  bool created() const { return created_; }
+
+  // Returns the OS result of opening this file. Note that the way to verify
+  // the success of the operation is to use IsValid(), not this method:
+  //   File file(path, flags);
+  //   if (!file.IsValid())
+  //     return;
+  Error error_details() const { return error_details_; }
+
+  PlatformFile GetPlatformFile() const;
+  PlatformFile TakePlatformFile();
+
+  // Destroying this object closes the file automatically.
+  void Close();
+
+  // Changes current position in the file to an |offset| relative to an origin
+  // defined by |whence|. Returns the resultant current position in the file
+  // (relative to the start) or -1 in case of error.
+  int64_t Seek(Whence whence, int64_t offset);
+
+  // Reads the given number of bytes (or until EOF is reached) starting with the
+  // given offset. Returns the number of bytes read, or -1 on error. Note that
+  // this function makes a best effort to read all data on all platforms, so it
+  // is not intended for stream oriented files but instead for cases when the
+  // normal expectation is that actually |size| bytes are read unless there is
+  // an error.
+  int Read(int64_t offset, char* data, int size);
+
+  // Same as above but without seek.
+  int ReadAtCurrentPos(char* data, int size);
+
+  // Reads the given number of bytes (or until EOF is reached) starting with the
+  // given offset, but does not make any effort to read all data on all
+  // platforms. Returns the number of bytes read, or -1 on error.
+  int ReadNoBestEffort(int64_t offset, char* data, int size);
+
+  // Same as above but without seek.
+  int ReadAtCurrentPosNoBestEffort(char* data, int size);
+
+  // Writes the given buffer into the file at the given offset, overwritting any
+  // data that was previously there. Returns the number of bytes written, or -1
+  // on error. Note that this function makes a best effort to write all data on
+  // all platforms. |data| can be nullptr when |size| is 0.
+  // Ignores the offset and writes to the end of the file if the file was opened
+  // with FLAG_APPEND.
+  int Write(int64_t offset, const char* data, int size);
+
+  // Save as above but without seek.
+  int WriteAtCurrentPos(const char* data, int size);
+
+  // Save as above but does not make any effort to write all data on all
+  // platforms. Returns the number of bytes written, or -1 on error.
+  int WriteAtCurrentPosNoBestEffort(const char* data, int size);
+
+  // Returns the current size of this file, or a negative number on failure.
+  int64_t GetLength();
+
+  // Truncates the file to the given length. If |length| is greater than the
+  // current size of the file, the file is extended with zeros. If the file
+  // doesn't exist, |false| is returned.
+  bool SetLength(int64_t length);
+
+  // Instructs the filesystem to flush the file to disk. (POSIX: fsync, Windows:
+  // FlushFileBuffers).
+  // Calling Flush() does not guarantee file integrity and thus is not a valid
+  // substitute for file integrity checks and recovery codepaths for malformed
+  // files. It can also be *really* slow, so avoid blocking on Flush(),
+  // especially please don't block shutdown on Flush().
+  // Latency percentiles of Flush() across all platforms as of July 2016:
+  // 50 %     > 5 ms
+  // 10 %     > 58 ms
+  //  1 %     > 357 ms
+  //  0.1 %   > 1.8 seconds
+  //  0.01 %  > 7.6 seconds
+  bool Flush();
+
+  // Updates the file times.
+  bool SetTimes(Time last_access_time, Time last_modified_time);
+
+  // Returns some basic information for the given file.
+  bool GetInfo(Info* info);
+
+#if !defined(OS_FUCHSIA)  // Fuchsia's POSIX API does not support file locking.
+
+  // Attempts to take an exclusive write lock on the file. Returns immediately
+  // (i.e. does not wait for another process to unlock the file). If the lock
+  // was obtained, the result will be FILE_OK. A lock only guarantees
+  // that other processes may not also take a lock on the same file with the
+  // same API - it may still be opened, renamed, unlinked, etc.
+  //
+  // Common semantics:
+  //  * Locks are held by processes, but not inherited by child processes.
+  //  * Locks are released by the OS on file close or process termination.
+  //  * Locks are reliable only on local filesystems.
+  //  * Duplicated file handles may also write to locked files.
+  // Windows-specific semantics:
+  //  * Locks are mandatory for read/write APIs, advisory for mapping APIs.
+  //  * Within a process, locking the same file (by the same or new handle)
+  //    will fail.
+  // POSIX-specific semantics:
+  //  * Locks are advisory only.
+  //  * Within a process, locking the same file (by the same or new handle)
+  //    will succeed.
+  //  * Closing any descriptor on a given file releases the lock.
+  Error Lock();
+
+  // Unlock a file previously locked.
+  Error Unlock();
+
+#endif  // !defined(OS_FUCHSIA)
+
+  // Returns a new object referencing this file for use within the current
+  // process. Handling of FLAG_DELETE_ON_CLOSE varies by OS. On POSIX, the File
+  // object that was created or initialized with this flag will have unlinked
+  // the underlying file when it was created or opened. On Windows, the
+  // underlying file is deleted when the last handle to it is closed.
+  File Duplicate() const;
+
+  bool async() const { return async_; }
+
+#if defined(OS_WIN)
+  // Sets or clears the DeleteFile disposition on the handle. Returns true if
+  // the disposition was set or cleared, as indicated by |delete_on_close|.
+  //
+  // Microsoft Windows deletes a file only when the last handle to the
+  // underlying kernel object is closed when the DeleteFile disposition has been
+  // set by any handle holder. This disposition is be set by:
+  // - Calling the Win32 DeleteFile function with the path to a file.
+  // - Opening/creating a file with FLAG_DELETE_ON_CLOSE.
+  // - Opening/creating a file with FLAG_CAN_DELETE_ON_CLOSE and subsequently
+  //   calling DeleteOnClose(true).
+  //
+  // In all cases, all pre-existing handles to the file must have been opened
+  // with FLAG_SHARE_DELETE.
+  //
+  // So:
+  // - Use FLAG_SHARE_DELETE when creating/opening a file to allow another
+  //   entity on the system to cause it to be deleted when it is closed. (Note:
+  //   another entity can delete the file the moment after it is closed, so not
+  //   using this permission doesn't provide any protections.)
+  // - Use FLAG_DELETE_ON_CLOSE for any file that is to be deleted after use.
+  //   The OS will ensure it is deleted even in the face of process termination.
+  // - Use FLAG_CAN_DELETE_ON_CLOSE in conjunction with DeleteOnClose() to alter
+  //   the DeleteFile disposition on an open handle. This fine-grained control
+  //   allows for marking a file for deletion during processing so that it is
+  //   deleted in the event of untimely process termination, and then clearing
+  //   this state once the file is suitable for persistence.
+  bool DeleteOnClose(bool delete_on_close);
+#endif
+
+#if defined(OS_WIN)
+  static Error OSErrorToFileError(DWORD last_error);
+#elif defined(OS_POSIX) || defined(OS_FUCHSIA)
+  static Error OSErrorToFileError(int saved_errno);
+#endif
+
+  // Gets the last global error (errno or GetLastError()) and converts it to the
+  // closest base::File::Error equivalent via OSErrorToFileError(). The returned
+  // value is only trustworthy immediately after another base::File method
+  // fails. base::File never resets the global error to zero.
+  static Error GetLastFileError();
+
+  // Converts an error value to a human-readable form. Used for logging.
+  static std::string ErrorToString(Error error);
+
+ private:
+  friend class FileTracing::ScopedTrace;
+
+  // Creates or opens the given file. Only called if |path| has no
+  // traversal ('..') components.
+  void DoInitialize(const FilePath& path, uint32_t flags);
+
+  void SetPlatformFile(PlatformFile file);
+
+  ScopedPlatformFile file_;
+
+  // A path to use for tracing purposes. Set if file tracing is enabled during
+  // |Initialize()|.
+  FilePath tracing_path_;
+
+  // Object tied to the lifetime of |this| that enables/disables tracing.
+  FileTracing::ScopedEnabler trace_enabler_;
+
+  Error error_details_;
+  bool created_;
+  bool async_;
+
+  DISALLOW_COPY_AND_ASSIGN(File);
+};
+
+}  // namespace base
+
+#endif  // BASE_FILES_FILE_H_
+
diff --git a/base/files/file_descriptor_watcher_posix.cc b/base/files/file_descriptor_watcher_posix.cc
new file mode 100644
index 0000000..b26bf6c
--- /dev/null
+++ b/base/files/file_descriptor_watcher_posix.cc
@@ -0,0 +1,217 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/files/file_descriptor_watcher_posix.h"
+
+#include "base/bind.h"
+#include "base/lazy_instance.h"
+#include "base/logging.h"
+#include "base/memory/ptr_util.h"
+#include "base/message_loop/message_loop_current.h"
+#include "base/message_loop/message_pump_for_io.h"
+#include "base/sequenced_task_runner.h"
+#include "base/single_thread_task_runner.h"
+#include "base/threading/sequenced_task_runner_handle.h"
+#include "base/threading/thread_checker.h"
+#include "base/threading/thread_local.h"
+
+namespace base {
+
+namespace {
+
+// MessageLoopForIO used to watch file descriptors for which callbacks are
+// registered from a given thread.
+LazyInstance<ThreadLocalPointer<MessageLoopForIO>>::Leaky
+    tls_message_loop_for_io = LAZY_INSTANCE_INITIALIZER;
+
+}  // namespace
+
+FileDescriptorWatcher::Controller::~Controller() {
+  DCHECK(sequence_checker_.CalledOnValidSequence());
+
+  // Delete |watcher_| on the MessageLoopForIO.
+  //
+  // If the MessageLoopForIO is deleted before Watcher::StartWatching() runs,
+  // |watcher_| is leaked. If the MessageLoopForIO is deleted after
+  // Watcher::StartWatching() runs but before the DeleteSoon task runs,
+  // |watcher_| is deleted from Watcher::WillDestroyCurrentMessageLoop().
+  message_loop_for_io_task_runner_->DeleteSoon(FROM_HERE, watcher_.release());
+
+  // Since WeakPtrs are invalidated by the destructor, RunCallback() won't be
+  // invoked after this returns.
+}
+
+class FileDescriptorWatcher::Controller::Watcher
+    : public MessagePumpForIO::FdWatcher,
+      public MessageLoopCurrent::DestructionObserver {
+ public:
+  Watcher(WeakPtr<Controller> controller, MessagePumpForIO::Mode mode, int fd);
+  ~Watcher() override;
+
+  void StartWatching();
+
+ private:
+  friend class FileDescriptorWatcher;
+
+  // MessagePumpForIO::FdWatcher:
+  void OnFileCanReadWithoutBlocking(int fd) override;
+  void OnFileCanWriteWithoutBlocking(int fd) override;
+
+  // MessageLoopCurrent::DestructionObserver:
+  void WillDestroyCurrentMessageLoop() override;
+
+  // The MessageLoopForIO's watch handle (stops the watch when destroyed).
+  MessagePumpForIO::FdWatchController fd_watch_controller_;
+
+  // Runs tasks on the sequence on which this was instantiated (i.e. the
+  // sequence on which the callback must run).
+  const scoped_refptr<SequencedTaskRunner> callback_task_runner_ =
+      SequencedTaskRunnerHandle::Get();
+
+  // The Controller that created this Watcher.
+  WeakPtr<Controller> controller_;
+
+  // Whether this Watcher is notified when |fd_| becomes readable or writable
+  // without blocking.
+  const MessagePumpForIO::Mode mode_;
+
+  // The watched file descriptor.
+  const int fd_;
+
+  // Except for the constructor, every method of this class must run on the same
+  // MessageLoopForIO thread.
+  ThreadChecker thread_checker_;
+
+  // Whether this Watcher was registered as a DestructionObserver on the
+  // MessageLoopForIO thread.
+  bool registered_as_destruction_observer_ = false;
+
+  DISALLOW_COPY_AND_ASSIGN(Watcher);
+};
+
+FileDescriptorWatcher::Controller::Watcher::Watcher(
+    WeakPtr<Controller> controller,
+    MessagePumpForIO::Mode mode,
+    int fd)
+    : fd_watch_controller_(FROM_HERE),
+      controller_(controller),
+      mode_(mode),
+      fd_(fd) {
+  DCHECK(callback_task_runner_);
+  thread_checker_.DetachFromThread();
+}
+
+FileDescriptorWatcher::Controller::Watcher::~Watcher() {
+  DCHECK(thread_checker_.CalledOnValidThread());
+  MessageLoopCurrentForIO::Get()->RemoveDestructionObserver(this);
+}
+
+void FileDescriptorWatcher::Controller::Watcher::StartWatching() {
+  DCHECK(thread_checker_.CalledOnValidThread());
+
+  if (!MessageLoopCurrentForIO::Get()->WatchFileDescriptor(
+          fd_, false, mode_, &fd_watch_controller_, this)) {
+    // TODO(wez): Ideally we would [D]CHECK here, or propagate the failure back
+    // to the caller, but there is no guarantee that they haven't already
+    // closed |fd_| on another thread, so the best we can do is Debug-log.
+    DLOG(ERROR) << "Failed to watch fd=" << fd_;
+  }
+
+  if (!registered_as_destruction_observer_) {
+    MessageLoopCurrentForIO::Get()->AddDestructionObserver(this);
+    registered_as_destruction_observer_ = true;
+  }
+}
+
+void FileDescriptorWatcher::Controller::Watcher::OnFileCanReadWithoutBlocking(
+    int fd) {
+  DCHECK_EQ(fd_, fd);
+  DCHECK_EQ(MessagePumpForIO::WATCH_READ, mode_);
+  DCHECK(thread_checker_.CalledOnValidThread());
+
+  // Run the callback on the sequence on which the watch was initiated.
+  callback_task_runner_->PostTask(
+      FROM_HERE, BindOnce(&Controller::RunCallback, controller_));
+}
+
+void FileDescriptorWatcher::Controller::Watcher::OnFileCanWriteWithoutBlocking(
+    int fd) {
+  DCHECK_EQ(fd_, fd);
+  DCHECK_EQ(MessagePumpForIO::WATCH_WRITE, mode_);
+  DCHECK(thread_checker_.CalledOnValidThread());
+
+  // Run the callback on the sequence on which the watch was initiated.
+  callback_task_runner_->PostTask(
+      FROM_HERE, BindOnce(&Controller::RunCallback, controller_));
+}
+
+void FileDescriptorWatcher::Controller::Watcher::
+    WillDestroyCurrentMessageLoop() {
+  DCHECK(thread_checker_.CalledOnValidThread());
+
+  // A Watcher is owned by a Controller. When the Controller is deleted, it
+  // transfers ownership of the Watcher to a delete task posted to the
+  // MessageLoopForIO. If the MessageLoopForIO is deleted before the delete task
+  // runs, the following line takes care of deleting the Watcher.
+  delete this;
+}
+
+FileDescriptorWatcher::Controller::Controller(MessagePumpForIO::Mode mode,
+                                              int fd,
+                                              const Closure& callback)
+    : callback_(callback),
+      message_loop_for_io_task_runner_(
+          tls_message_loop_for_io.Get().Get()->task_runner()),
+      weak_factory_(this) {
+  DCHECK(!callback_.is_null());
+  DCHECK(message_loop_for_io_task_runner_);
+  watcher_ = std::make_unique<Watcher>(weak_factory_.GetWeakPtr(), mode, fd);
+  StartWatching();
+}
+
+void FileDescriptorWatcher::Controller::StartWatching() {
+  DCHECK(sequence_checker_.CalledOnValidSequence());
+  // It is safe to use Unretained() below because |watcher_| can only be deleted
+  // by a delete task posted to |message_loop_for_io_task_runner_| by this
+  // Controller's destructor. Since this delete task hasn't been posted yet, it
+  // can't run before the task posted below.
+  message_loop_for_io_task_runner_->PostTask(
+      FROM_HERE, BindOnce(&Watcher::StartWatching, Unretained(watcher_.get())));
+}
+
+void FileDescriptorWatcher::Controller::RunCallback() {
+  DCHECK(sequence_checker_.CalledOnValidSequence());
+
+  WeakPtr<Controller> weak_this = weak_factory_.GetWeakPtr();
+
+  callback_.Run();
+
+  // If |this| wasn't deleted, re-enable the watch.
+  if (weak_this)
+    StartWatching();
+}
+
+FileDescriptorWatcher::FileDescriptorWatcher(
+    MessageLoopForIO* message_loop_for_io) {
+  DCHECK(message_loop_for_io);
+  DCHECK(!tls_message_loop_for_io.Get().Get());
+  tls_message_loop_for_io.Get().Set(message_loop_for_io);
+}
+
+FileDescriptorWatcher::~FileDescriptorWatcher() {
+  tls_message_loop_for_io.Get().Set(nullptr);
+}
+
+std::unique_ptr<FileDescriptorWatcher::Controller>
+FileDescriptorWatcher::WatchReadable(int fd, const Closure& callback) {
+  return WrapUnique(new Controller(MessagePumpForIO::WATCH_READ, fd, callback));
+}
+
+std::unique_ptr<FileDescriptorWatcher::Controller>
+FileDescriptorWatcher::WatchWritable(int fd, const Closure& callback) {
+  return WrapUnique(
+      new Controller(MessagePumpForIO::WATCH_WRITE, fd, callback));
+}
+
+}  // namespace base
diff --git a/base/files/file_descriptor_watcher_posix.h b/base/files/file_descriptor_watcher_posix.h
new file mode 100644
index 0000000..aa44579
--- /dev/null
+++ b/base/files/file_descriptor_watcher_posix.h
@@ -0,0 +1,110 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_FILES_FILE_DESCRIPTOR_WATCHER_POSIX_H_
+#define BASE_FILES_FILE_DESCRIPTOR_WATCHER_POSIX_H_
+
+#include <memory>
+
+#include "base/base_export.h"
+#include "base/callback.h"
+#include "base/macros.h"
+#include "base/memory/ref_counted.h"
+#include "base/memory/weak_ptr.h"
+#include "base/message_loop/message_loop.h"
+#include "base/message_loop/message_pump_for_io.h"
+#include "base/sequence_checker.h"
+
+namespace base {
+
+class SingleThreadTaskRunner;
+
+// The FileDescriptorWatcher API allows callbacks to be invoked when file
+// descriptors are readable or writable without blocking.
+//
+// To enable this API in unit tests, use a ScopedTaskEnvironment with
+// MainThreadType::IO.
+//
+// Note: Prefer FileDescriptorWatcher to MessageLoopForIO::WatchFileDescriptor()
+// for non-critical IO. FileDescriptorWatcher works on threads/sequences without
+// MessagePumps but involves going through the task queue after being notified
+// by the OS (a desirablable property for non-critical IO that shouldn't preempt
+// the main queue).
+class BASE_EXPORT FileDescriptorWatcher {
+ public:
+  // Instantiated and returned by WatchReadable() or WatchWritable(). The
+  // constructor registers a callback to be invoked when a file descriptor is
+  // readable or writable without blocking and the destructor unregisters it.
+  class Controller {
+   public:
+    // Unregisters the callback registered by the constructor.
+    ~Controller();
+
+   private:
+    friend class FileDescriptorWatcher;
+    class Watcher;
+
+    // Registers |callback| to be invoked when |fd| is readable or writable
+    // without blocking (depending on |mode|).
+    Controller(MessagePumpForIO::Mode mode, int fd, const Closure& callback);
+
+    // Starts watching the file descriptor.
+    void StartWatching();
+
+    // Runs |callback_|.
+    void RunCallback();
+
+    // The callback to run when the watched file descriptor is readable or
+    // writable without blocking.
+    Closure callback_;
+
+    // TaskRunner associated with the MessageLoopForIO that watches the file
+    // descriptor.
+    const scoped_refptr<SingleThreadTaskRunner>
+        message_loop_for_io_task_runner_;
+
+    // Notified by the MessageLoopForIO associated with
+    // |message_loop_for_io_task_runner_| when the watched file descriptor is
+    // readable or writable without blocking. Posts a task to run RunCallback()
+    // on the sequence on which the Controller was instantiated. When the
+    // Controller is deleted, ownership of |watcher_| is transfered to a delete
+    // task posted to the MessageLoopForIO. This ensures that |watcher_| isn't
+    // deleted while it is being used by the MessageLoopForIO.
+    std::unique_ptr<Watcher> watcher_;
+
+    // Validates that the Controller is used on the sequence on which it was
+    // instantiated.
+    SequenceChecker sequence_checker_;
+
+    WeakPtrFactory<Controller> weak_factory_;
+
+    DISALLOW_COPY_AND_ASSIGN(Controller);
+  };
+
+  // Registers |message_loop_for_io| to watch file descriptors for which
+  // callbacks are registered from the current thread via WatchReadable() or
+  // WatchWritable(). |message_loop_for_io| may run on another thread. The
+  // constructed FileDescriptorWatcher must not outlive |message_loop_for_io|.
+  FileDescriptorWatcher(MessageLoopForIO* message_loop_for_io);
+  ~FileDescriptorWatcher();
+
+  // Registers |callback| to be posted on the current sequence when |fd| is
+  // readable or writable without blocking. |callback| is unregistered when the
+  // returned Controller is deleted (deletion must happen on the current
+  // sequence). To call these methods, a FileDescriptorWatcher must have been
+  // instantiated on the current thread and SequencedTaskRunnerHandle::IsSet()
+  // must return true (these conditions are met at least on all TaskScheduler
+  // threads as well as on threads backed by a MessageLoopForIO).
+  static std::unique_ptr<Controller> WatchReadable(int fd,
+                                                   const Closure& callback);
+  static std::unique_ptr<Controller> WatchWritable(int fd,
+                                                   const Closure& callback);
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(FileDescriptorWatcher);
+};
+
+}  // namespace base
+
+#endif  // BASE_FILES_FILE_DESCRIPTOR_WATCHER_POSIX_H_
diff --git a/base/files/file_descriptor_watcher_posix_unittest.cc b/base/files/file_descriptor_watcher_posix_unittest.cc
new file mode 100644
index 0000000..4ed044b
--- /dev/null
+++ b/base/files/file_descriptor_watcher_posix_unittest.cc
@@ -0,0 +1,318 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/files/file_descriptor_watcher_posix.h"
+
+#include <unistd.h>
+
+#include <memory>
+
+#include "base/bind.h"
+#include "base/files/file_util.h"
+#include "base/macros.h"
+#include "base/memory/ptr_util.h"
+#include "base/message_loop/message_loop.h"
+#include "base/posix/eintr_wrapper.h"
+#include "base/run_loop.h"
+#include "base/test/test_timeouts.h"
+#include "base/threading/platform_thread.h"
+#include "base/threading/thread.h"
+#include "base/threading/thread_checker_impl.h"
+#include "build/build_config.h"
+#include "testing/gmock/include/gmock/gmock.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+
+namespace {
+
+class Mock {
+ public:
+  Mock() = default;
+
+  MOCK_METHOD0(ReadableCallback, void());
+  MOCK_METHOD0(WritableCallback, void());
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(Mock);
+};
+
+enum class FileDescriptorWatcherTestType {
+  MESSAGE_LOOP_FOR_IO_ON_MAIN_THREAD,
+  MESSAGE_LOOP_FOR_IO_ON_OTHER_THREAD,
+};
+
+class FileDescriptorWatcherTest
+    : public testing::TestWithParam<FileDescriptorWatcherTestType> {
+ public:
+  FileDescriptorWatcherTest()
+      : message_loop_(GetParam() == FileDescriptorWatcherTestType::
+                                        MESSAGE_LOOP_FOR_IO_ON_MAIN_THREAD
+                          ? new MessageLoopForIO
+                          : new MessageLoop),
+        other_thread_("FileDescriptorWatcherTest_OtherThread") {}
+  ~FileDescriptorWatcherTest() override = default;
+
+  void SetUp() override {
+    ASSERT_EQ(0, pipe(pipe_fds_));
+
+    MessageLoop* message_loop_for_io;
+    if (GetParam() ==
+        FileDescriptorWatcherTestType::MESSAGE_LOOP_FOR_IO_ON_OTHER_THREAD) {
+      Thread::Options options;
+      options.message_loop_type = MessageLoop::TYPE_IO;
+      ASSERT_TRUE(other_thread_.StartWithOptions(options));
+      message_loop_for_io = other_thread_.message_loop();
+    } else {
+      message_loop_for_io = message_loop_.get();
+    }
+
+    ASSERT_TRUE(message_loop_for_io->IsType(MessageLoop::TYPE_IO));
+    file_descriptor_watcher_ = std::make_unique<FileDescriptorWatcher>(
+        static_cast<MessageLoopForIO*>(message_loop_for_io));
+  }
+
+  void TearDown() override {
+    if (GetParam() ==
+            FileDescriptorWatcherTestType::MESSAGE_LOOP_FOR_IO_ON_MAIN_THREAD &&
+        message_loop_) {
+      // Allow the delete task posted by the Controller's destructor to run.
+      base::RunLoop().RunUntilIdle();
+    }
+
+    // Ensure that OtherThread is done processing before closing fds.
+    other_thread_.Stop();
+
+    EXPECT_EQ(0, IGNORE_EINTR(close(pipe_fds_[0])));
+    EXPECT_EQ(0, IGNORE_EINTR(close(pipe_fds_[1])));
+  }
+
+ protected:
+  int read_file_descriptor() const { return pipe_fds_[0]; }
+  int write_file_descriptor() const { return pipe_fds_[1]; }
+
+  // Waits for a short delay and run pending tasks.
+  void WaitAndRunPendingTasks() {
+    PlatformThread::Sleep(TestTimeouts::tiny_timeout());
+    RunLoop().RunUntilIdle();
+  }
+
+  // Registers ReadableCallback() to be called on |mock_| when
+  // read_file_descriptor() is readable without blocking.
+  std::unique_ptr<FileDescriptorWatcher::Controller> WatchReadable() {
+    std::unique_ptr<FileDescriptorWatcher::Controller> controller =
+        FileDescriptorWatcher::WatchReadable(
+            read_file_descriptor(),
+            Bind(&Mock::ReadableCallback, Unretained(&mock_)));
+    EXPECT_TRUE(controller);
+
+    // Unless read_file_descriptor() was readable before the callback was
+    // registered, this shouldn't do anything.
+    WaitAndRunPendingTasks();
+
+    return controller;
+  }
+
+  // Registers WritableCallback() to be called on |mock_| when
+  // write_file_descriptor() is writable without blocking.
+  std::unique_ptr<FileDescriptorWatcher::Controller> WatchWritable() {
+    std::unique_ptr<FileDescriptorWatcher::Controller> controller =
+        FileDescriptorWatcher::WatchWritable(
+            write_file_descriptor(),
+            Bind(&Mock::WritableCallback, Unretained(&mock_)));
+    EXPECT_TRUE(controller);
+    return controller;
+  }
+
+  void WriteByte() {
+    constexpr char kByte = '!';
+    ASSERT_TRUE(
+        WriteFileDescriptor(write_file_descriptor(), &kByte, sizeof(kByte)));
+  }
+
+  void ReadByte() {
+    // This is always called as part of the WatchReadable() callback, which
+    // should run on the main thread.
+    EXPECT_TRUE(thread_checker_.CalledOnValidThread());
+
+    char buffer;
+    ASSERT_TRUE(ReadFromFD(read_file_descriptor(), &buffer, sizeof(buffer)));
+  }
+
+  // Mock on wich callbacks are invoked.
+  testing::StrictMock<Mock> mock_;
+
+  // MessageLoop bound to the main thread.
+  std::unique_ptr<MessageLoop> message_loop_;
+
+  // Thread running a MessageLoopForIO. Used when the test type is
+  // MESSAGE_LOOP_FOR_IO_ON_OTHER_THREAD.
+  Thread other_thread_;
+
+ private:
+  // Determines which MessageLoopForIO is used to watch file descriptors for
+  // which callbacks are registered on the main thread.
+  std::unique_ptr<FileDescriptorWatcher> file_descriptor_watcher_;
+
+  // Watched file descriptors.
+  int pipe_fds_[2];
+
+  // Used to verify that callbacks run on the thread on which they are
+  // registered.
+  ThreadCheckerImpl thread_checker_;
+
+  DISALLOW_COPY_AND_ASSIGN(FileDescriptorWatcherTest);
+};
+
+}  // namespace
+
+TEST_P(FileDescriptorWatcherTest, WatchWritable) {
+  auto controller = WatchWritable();
+
+  // The write end of a newly created pipe is immediately writable.
+  RunLoop run_loop;
+  EXPECT_CALL(mock_, WritableCallback())
+      .WillOnce(testing::Invoke(&run_loop, &RunLoop::Quit));
+  run_loop.Run();
+}
+
+TEST_P(FileDescriptorWatcherTest, WatchReadableOneByte) {
+  auto controller = WatchReadable();
+
+  // Write 1 byte to the pipe, making it readable without blocking. Expect one
+  // call to ReadableCallback() which will read 1 byte from the pipe.
+  WriteByte();
+  RunLoop run_loop;
+  EXPECT_CALL(mock_, ReadableCallback())
+      .WillOnce(testing::Invoke([this, &run_loop]() {
+        ReadByte();
+        run_loop.Quit();
+      }));
+  run_loop.Run();
+  testing::Mock::VerifyAndClear(&mock_);
+
+  // No more call to ReadableCallback() is expected.
+  WaitAndRunPendingTasks();
+}
+
+TEST_P(FileDescriptorWatcherTest, WatchReadableTwoBytes) {
+  auto controller = WatchReadable();
+
+  // Write 2 bytes to the pipe. Expect two calls to ReadableCallback() which
+  // will each read 1 byte from the pipe.
+  WriteByte();
+  WriteByte();
+  RunLoop run_loop;
+  EXPECT_CALL(mock_, ReadableCallback())
+      .WillOnce(testing::Invoke([this]() { ReadByte(); }))
+      .WillOnce(testing::Invoke([this, &run_loop]() {
+        ReadByte();
+        run_loop.Quit();
+      }));
+  run_loop.Run();
+  testing::Mock::VerifyAndClear(&mock_);
+
+  // No more call to ReadableCallback() is expected.
+  WaitAndRunPendingTasks();
+}
+
+TEST_P(FileDescriptorWatcherTest, WatchReadableByteWrittenFromCallback) {
+  auto controller = WatchReadable();
+
+  // Write 1 byte to the pipe. Expect one call to ReadableCallback() from which
+  // 1 byte is read and 1 byte is written to the pipe. Then, expect another call
+  // to ReadableCallback() from which the remaining byte is read from the pipe.
+  WriteByte();
+  RunLoop run_loop;
+  EXPECT_CALL(mock_, ReadableCallback())
+      .WillOnce(testing::Invoke([this]() {
+        ReadByte();
+        WriteByte();
+      }))
+      .WillOnce(testing::Invoke([this, &run_loop]() {
+        ReadByte();
+        run_loop.Quit();
+      }));
+  run_loop.Run();
+  testing::Mock::VerifyAndClear(&mock_);
+
+  // No more call to ReadableCallback() is expected.
+  WaitAndRunPendingTasks();
+}
+
+TEST_P(FileDescriptorWatcherTest, DeleteControllerFromCallback) {
+  auto controller = WatchReadable();
+
+  // Write 1 byte to the pipe. Expect one call to ReadableCallback() from which
+  // |controller| is deleted.
+  WriteByte();
+  RunLoop run_loop;
+  EXPECT_CALL(mock_, ReadableCallback())
+      .WillOnce(testing::Invoke([&run_loop, &controller]() {
+        controller = nullptr;
+        run_loop.Quit();
+      }));
+  run_loop.Run();
+  testing::Mock::VerifyAndClear(&mock_);
+
+  // Since |controller| has been deleted, no call to ReadableCallback() is
+  // expected even though the pipe is still readable without blocking.
+  WaitAndRunPendingTasks();
+}
+
+TEST_P(FileDescriptorWatcherTest,
+       DeleteControllerBeforeFileDescriptorReadable) {
+  auto controller = WatchReadable();
+
+  // Cancel the watch.
+  controller = nullptr;
+
+  // Write 1 byte to the pipe to make it readable without blocking.
+  WriteByte();
+
+  // No call to ReadableCallback() is expected.
+  WaitAndRunPendingTasks();
+}
+
+TEST_P(FileDescriptorWatcherTest, DeleteControllerAfterFileDescriptorReadable) {
+  auto controller = WatchReadable();
+
+  // Write 1 byte to the pipe to make it readable without blocking.
+  WriteByte();
+
+  // Cancel the watch.
+  controller = nullptr;
+
+  // No call to ReadableCallback() is expected.
+  WaitAndRunPendingTasks();
+}
+
+TEST_P(FileDescriptorWatcherTest, DeleteControllerAfterDeleteMessageLoopForIO) {
+  auto controller = WatchReadable();
+
+  // Delete the MessageLoopForIO.
+  if (GetParam() ==
+      FileDescriptorWatcherTestType::MESSAGE_LOOP_FOR_IO_ON_MAIN_THREAD) {
+    message_loop_ = nullptr;
+  } else {
+    other_thread_.Stop();
+  }
+
+  // Deleting |controller| shouldn't crash even though that causes a task to be
+  // posted to the MessageLoopForIO thread.
+  controller = nullptr;
+}
+
+INSTANTIATE_TEST_CASE_P(
+    MessageLoopForIOOnMainThread,
+    FileDescriptorWatcherTest,
+    ::testing::Values(
+        FileDescriptorWatcherTestType::MESSAGE_LOOP_FOR_IO_ON_MAIN_THREAD));
+INSTANTIATE_TEST_CASE_P(
+    MessageLoopForIOOnOtherThread,
+    FileDescriptorWatcherTest,
+    ::testing::Values(
+        FileDescriptorWatcherTestType::MESSAGE_LOOP_FOR_IO_ON_OTHER_THREAD));
+
+}  // namespace base
diff --git a/base/files/file_enumerator.cc b/base/files/file_enumerator.cc
new file mode 100644
index 0000000..9dfb2ba
--- /dev/null
+++ b/base/files/file_enumerator.cc
@@ -0,0 +1,25 @@
+// Copyright (c) 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/files/file_enumerator.h"
+
+#include "base/files/file_util.h"
+
+namespace base {
+
+FileEnumerator::FileInfo::~FileInfo() = default;
+
+bool FileEnumerator::ShouldSkip(const FilePath& path) {
+  FilePath::StringType basename = path.BaseName().value();
+  return basename == FILE_PATH_LITERAL(".") ||
+         (basename == FILE_PATH_LITERAL("..") &&
+          !(INCLUDE_DOT_DOT & file_type_));
+}
+
+bool FileEnumerator::IsTypeMatched(bool is_dir) const {
+  return (file_type_ &
+          (is_dir ? FileEnumerator::DIRECTORIES : FileEnumerator::FILES)) != 0;
+}
+
+}  // namespace base
diff --git a/base/files/file_enumerator.h b/base/files/file_enumerator.h
new file mode 100644
index 0000000..0fa99a6
--- /dev/null
+++ b/base/files/file_enumerator.h
@@ -0,0 +1,174 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_FILES_FILE_ENUMERATOR_H_
+#define BASE_FILES_FILE_ENUMERATOR_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <vector>
+
+#include "base/base_export.h"
+#include "base/containers/stack.h"
+#include "base/files/file_path.h"
+#include "base/macros.h"
+#include "base/time/time.h"
+#include "build/build_config.h"
+
+#if defined(OS_WIN)
+#include <windows.h>
+#elif defined(OS_POSIX) || defined(OS_FUCHSIA)
+#include <sys/stat.h>
+#include <unistd.h>
+#endif
+
+namespace base {
+
+// A class for enumerating the files in a provided path. The order of the
+// results is not guaranteed.
+//
+// This is blocking. Do not use on critical threads.
+//
+// Example:
+//
+//   base::FileEnumerator enum(my_dir, false, base::FileEnumerator::FILES,
+//                             FILE_PATH_LITERAL("*.txt"));
+//   for (base::FilePath name = enum.Next(); !name.empty(); name = enum.Next())
+//     ...
+class BASE_EXPORT FileEnumerator {
+ public:
+  // Note: copy & assign supported.
+  class BASE_EXPORT FileInfo {
+   public:
+    FileInfo();
+    ~FileInfo();
+
+    bool IsDirectory() const;
+
+    // The name of the file. This will not include any path information. This
+    // is in constrast to the value returned by FileEnumerator.Next() which
+    // includes the |root_path| passed into the FileEnumerator constructor.
+    FilePath GetName() const;
+
+    int64_t GetSize() const;
+    Time GetLastModifiedTime() const;
+
+#if defined(OS_WIN)
+    // Note that the cAlternateFileName (used to hold the "short" 8.3 name)
+    // of the WIN32_FIND_DATA will be empty. Since we don't use short file
+    // names, we tell Windows to omit it which speeds up the query slightly.
+    const WIN32_FIND_DATA& find_data() const { return find_data_; }
+#elif defined(OS_POSIX) || defined(OS_FUCHSIA)
+    const struct stat& stat() const { return stat_; }
+#endif
+
+   private:
+    friend class FileEnumerator;
+
+#if defined(OS_WIN)
+    WIN32_FIND_DATA find_data_;
+#elif defined(OS_POSIX) || defined(OS_FUCHSIA)
+    struct stat stat_;
+    FilePath filename_;
+#endif
+  };
+
+  enum FileType {
+    FILES = 1 << 0,
+    DIRECTORIES = 1 << 1,
+    INCLUDE_DOT_DOT = 1 << 2,
+#if defined(OS_POSIX) || defined(OS_FUCHSIA)
+    SHOW_SYM_LINKS = 1 << 4,
+#endif
+  };
+
+  // Search policy for intermediate folders.
+  enum class FolderSearchPolicy {
+    // Recursive search will pass through folders whose names match the
+    // pattern. Inside each one, all files will be returned. Folders with names
+    // that do not match the pattern will be ignored within their interior.
+    MATCH_ONLY,
+    // Recursive search will pass through every folder and perform pattern
+    // matching inside each one.
+    ALL,
+  };
+
+  // |root_path| is the starting directory to search for. It may or may not end
+  // in a slash.
+  //
+  // If |recursive| is true, this will enumerate all matches in any
+  // subdirectories matched as well. It does a breadth-first search, so all
+  // files in one directory will be returned before any files in a
+  // subdirectory.
+  //
+  // |file_type|, a bit mask of FileType, specifies whether the enumerator
+  // should match files, directories, or both.
+  //
+  // |pattern| is an optional pattern for which files to match. This
+  // works like shell globbing. For example, "*.txt" or "Foo???.doc".
+  // However, be careful in specifying patterns that aren't cross platform
+  // since the underlying code uses OS-specific matching routines.  In general,
+  // Windows matching is less featureful than others, so test there first.
+  // If unspecified, this will match all files.
+  FileEnumerator(const FilePath& root_path,
+                 bool recursive,
+                 int file_type);
+  FileEnumerator(const FilePath& root_path,
+                 bool recursive,
+                 int file_type,
+                 const FilePath::StringType& pattern);
+  FileEnumerator(const FilePath& root_path,
+                 bool recursive,
+                 int file_type,
+                 const FilePath::StringType& pattern,
+                 FolderSearchPolicy folder_search_policy);
+  ~FileEnumerator();
+
+  // Returns the next file or an empty string if there are no more results.
+  //
+  // The returned path will incorporate the |root_path| passed in the
+  // constructor: "<root_path>/file_name.txt". If the |root_path| is absolute,
+  // then so will be the result of Next().
+  FilePath Next();
+
+  // Write the file info into |info|.
+  FileInfo GetInfo() const;
+
+ private:
+  // Returns true if the given path should be skipped in enumeration.
+  bool ShouldSkip(const FilePath& path);
+
+  bool IsTypeMatched(bool is_dir) const;
+
+  bool IsPatternMatched(const FilePath& src) const;
+
+#if defined(OS_WIN)
+  // True when find_data_ is valid.
+  bool has_find_data_ = false;
+  WIN32_FIND_DATA find_data_;
+  HANDLE find_handle_ = INVALID_HANDLE_VALUE;
+#elif defined(OS_POSIX) || defined(OS_FUCHSIA)
+  // The files in the current directory
+  std::vector<FileInfo> directory_entries_;
+
+  // The next entry to use from the directory_entries_ vector
+  size_t current_directory_entry_;
+#endif
+  FilePath root_path_;
+  const bool recursive_;
+  const int file_type_;
+  FilePath::StringType pattern_;
+  const FolderSearchPolicy folder_search_policy_;
+
+  // A stack that keeps track of which subdirectories we still need to
+  // enumerate in the breadth-first search.
+  base::stack<FilePath> pending_paths_;
+
+  DISALLOW_COPY_AND_ASSIGN(FileEnumerator);
+};
+
+}  // namespace base
+
+#endif  // BASE_FILES_FILE_ENUMERATOR_H_
diff --git a/base/files/file_enumerator_posix.cc b/base/files/file_enumerator_posix.cc
new file mode 100644
index 0000000..4b429c6
--- /dev/null
+++ b/base/files/file_enumerator_posix.cc
@@ -0,0 +1,188 @@
+// Copyright (c) 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/files/file_enumerator.h"
+
+#include <dirent.h>
+#include <errno.h>
+#include <fnmatch.h>
+#include <stdint.h>
+#include <string.h>
+
+#include "base/logging.h"
+#include "base/threading/thread_restrictions.h"
+#include "build/build_config.h"
+
+namespace base {
+namespace {
+
+void GetStat(const FilePath& path, bool show_links, struct stat* st) {
+  DCHECK(st);
+  const int res = show_links ? lstat(path.value().c_str(), st)
+                             : stat(path.value().c_str(), st);
+  if (res < 0) {
+    // Print the stat() error message unless it was ENOENT and we're following
+    // symlinks.
+    if (!(errno == ENOENT && !show_links))
+      DPLOG(ERROR) << "Couldn't stat" << path.value();
+    memset(st, 0, sizeof(*st));
+  }
+}
+
+}  // namespace
+
+// FileEnumerator::FileInfo ----------------------------------------------------
+
+FileEnumerator::FileInfo::FileInfo() {
+  memset(&stat_, 0, sizeof(stat_));
+}
+
+bool FileEnumerator::FileInfo::IsDirectory() const {
+  return S_ISDIR(stat_.st_mode);
+}
+
+FilePath FileEnumerator::FileInfo::GetName() const {
+  return filename_;
+}
+
+int64_t FileEnumerator::FileInfo::GetSize() const {
+  return stat_.st_size;
+}
+
+base::Time FileEnumerator::FileInfo::GetLastModifiedTime() const {
+  return base::Time::FromTimeT(stat_.st_mtime);
+}
+
+// FileEnumerator --------------------------------------------------------------
+
+FileEnumerator::FileEnumerator(const FilePath& root_path,
+                               bool recursive,
+                               int file_type)
+    : FileEnumerator(root_path,
+                     recursive,
+                     file_type,
+                     FilePath::StringType(),
+                     FolderSearchPolicy::MATCH_ONLY) {}
+
+FileEnumerator::FileEnumerator(const FilePath& root_path,
+                               bool recursive,
+                               int file_type,
+                               const FilePath::StringType& pattern)
+    : FileEnumerator(root_path,
+                     recursive,
+                     file_type,
+                     pattern,
+                     FolderSearchPolicy::MATCH_ONLY) {}
+
+FileEnumerator::FileEnumerator(const FilePath& root_path,
+                               bool recursive,
+                               int file_type,
+                               const FilePath::StringType& pattern,
+                               FolderSearchPolicy folder_search_policy)
+    : current_directory_entry_(0),
+      root_path_(root_path),
+      recursive_(recursive),
+      file_type_(file_type),
+      pattern_(pattern),
+      folder_search_policy_(folder_search_policy) {
+  // INCLUDE_DOT_DOT must not be specified if recursive.
+  DCHECK(!(recursive && (INCLUDE_DOT_DOT & file_type_)));
+
+  pending_paths_.push(root_path);
+}
+
+FileEnumerator::~FileEnumerator() = default;
+
+FilePath FileEnumerator::Next() {
+  AssertBlockingAllowed();
+
+  ++current_directory_entry_;
+
+  // While we've exhausted the entries in the current directory, do the next
+  while (current_directory_entry_ >= directory_entries_.size()) {
+    if (pending_paths_.empty())
+      return FilePath();
+
+    root_path_ = pending_paths_.top();
+    root_path_ = root_path_.StripTrailingSeparators();
+    pending_paths_.pop();
+
+    DIR* dir = opendir(root_path_.value().c_str());
+    if (!dir)
+      continue;
+
+    directory_entries_.clear();
+
+#if defined(OS_FUCHSIA)
+    // Fuchsia does not support .. on the file system server side, see
+    // https://fuchsia.googlesource.com/docs/+/master/dotdot.md and
+    // https://crbug.com/735540. However, for UI purposes, having the parent
+    // directory show up in directory listings makes sense, so we add it here to
+    // match the expectation on other operating systems. In cases where this
+    // is useful it should be resolvable locally.
+    FileInfo dotdot;
+    dotdot.stat_.st_mode = S_IFDIR;
+    dotdot.filename_ = FilePath("..");
+    if (!ShouldSkip(dotdot.filename_)) {
+      directory_entries_.push_back(std::move(dotdot));
+    }
+#endif  // OS_FUCHSIA
+
+    current_directory_entry_ = 0;
+    struct dirent* dent;
+    while ((dent = readdir(dir))) {
+      FileInfo info;
+      info.filename_ = FilePath(dent->d_name);
+
+      if (ShouldSkip(info.filename_))
+        continue;
+
+      const bool is_pattern_matched = IsPatternMatched(info.filename_);
+
+      // MATCH_ONLY policy enumerates files and directories which matching
+      // pattern only. So we can early skip further checks.
+      if (folder_search_policy_ == FolderSearchPolicy::MATCH_ONLY &&
+          !is_pattern_matched)
+        continue;
+
+      // Do not call OS stat/lstat if there is no sense to do it. If pattern is
+      // not matched (file will not appear in results) and search is not
+      // recursive (possible directory will not be added to pending paths) -
+      // there is no sense to obtain item below.
+      if (!recursive_ && !is_pattern_matched)
+        continue;
+
+      const FilePath full_path = root_path_.Append(info.filename_);
+      GetStat(full_path, file_type_ & SHOW_SYM_LINKS, &info.stat_);
+
+      const bool is_dir = info.IsDirectory();
+
+      if (recursive_ && is_dir)
+        pending_paths_.push(full_path);
+
+      if (is_pattern_matched && IsTypeMatched(is_dir))
+        directory_entries_.push_back(std::move(info));
+    }
+    closedir(dir);
+
+    // MATCH_ONLY policy enumerates files in matched subfolders by "*" pattern.
+    // ALL policy enumerates files in all subfolders by origin pattern.
+    if (folder_search_policy_ == FolderSearchPolicy::MATCH_ONLY)
+      pattern_.clear();
+  }
+
+  return root_path_.Append(
+      directory_entries_[current_directory_entry_].filename_);
+}
+
+FileEnumerator::FileInfo FileEnumerator::GetInfo() const {
+  return directory_entries_[current_directory_entry_];
+}
+
+bool FileEnumerator::IsPatternMatched(const FilePath& path) const {
+  return pattern_.empty() ||
+         !fnmatch(pattern_.c_str(), path.value().c_str(), FNM_NOESCAPE);
+}
+
+}  // namespace base
diff --git a/base/files/file_enumerator_unittest.cc b/base/files/file_enumerator_unittest.cc
new file mode 100644
index 0000000..11df075
--- /dev/null
+++ b/base/files/file_enumerator_unittest.cc
@@ -0,0 +1,312 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/files/file_enumerator.h"
+
+#include <utility>
+#include <vector>
+
+#include "base/containers/circular_deque.h"
+#include "base/files/file_path.h"
+#include "base/files/file_util.h"
+#include "base/files/scoped_temp_dir.h"
+#include "testing/gmock/include/gmock/gmock.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+using testing::ElementsAre;
+using testing::IsEmpty;
+using testing::UnorderedElementsAre;
+
+namespace base {
+namespace {
+
+const FilePath::StringType kEmptyPattern;
+
+const std::vector<FileEnumerator::FolderSearchPolicy> kFolderSearchPolicies{
+    FileEnumerator::FolderSearchPolicy::MATCH_ONLY,
+    FileEnumerator::FolderSearchPolicy::ALL};
+
+circular_deque<FilePath> RunEnumerator(
+    const FilePath& root_path,
+    bool recursive,
+    int file_type,
+    const FilePath::StringType& pattern,
+    FileEnumerator::FolderSearchPolicy folder_search_policy) {
+  circular_deque<FilePath> rv;
+  FileEnumerator enumerator(root_path, recursive, file_type, pattern,
+                            folder_search_policy);
+  for (auto file = enumerator.Next(); !file.empty(); file = enumerator.Next())
+    rv.emplace_back(std::move(file));
+  return rv;
+}
+
+bool CreateDummyFile(const FilePath& path) {
+  return WriteFile(path, "42", sizeof("42")) == sizeof("42");
+}
+
+}  // namespace
+
+TEST(FileEnumerator, NotExistingPath) {
+  const FilePath path = FilePath::FromUTF8Unsafe("some_not_existing_path");
+  ASSERT_FALSE(PathExists(path));
+
+  for (auto policy : kFolderSearchPolicies) {
+    const auto files = RunEnumerator(
+        path, true, FileEnumerator::FILES & FileEnumerator::DIRECTORIES,
+        FILE_PATH_LITERAL(""), policy);
+    EXPECT_THAT(files, IsEmpty());
+  }
+}
+
+TEST(FileEnumerator, EmptyFolder) {
+  ScopedTempDir temp_dir;
+  ASSERT_TRUE(temp_dir.CreateUniqueTempDir());
+
+  for (auto policy : kFolderSearchPolicies) {
+    const auto files =
+        RunEnumerator(temp_dir.GetPath(), true,
+                      FileEnumerator::FILES & FileEnumerator::DIRECTORIES,
+                      kEmptyPattern, policy);
+    EXPECT_THAT(files, IsEmpty());
+  }
+}
+
+TEST(FileEnumerator, SingleFileInFolderForFileSearch) {
+  ScopedTempDir temp_dir;
+  ASSERT_TRUE(temp_dir.CreateUniqueTempDir());
+
+  const FilePath& path = temp_dir.GetPath();
+  const FilePath file = path.AppendASCII("test.txt");
+  ASSERT_TRUE(CreateDummyFile(file));
+
+  for (auto policy : kFolderSearchPolicies) {
+    const auto files = RunEnumerator(
+        temp_dir.GetPath(), true, FileEnumerator::FILES, kEmptyPattern, policy);
+    EXPECT_THAT(files, ElementsAre(file));
+  }
+}
+
+TEST(FileEnumerator, SingleFileInFolderForDirSearch) {
+  ScopedTempDir temp_dir;
+  ASSERT_TRUE(temp_dir.CreateUniqueTempDir());
+
+  const FilePath& path = temp_dir.GetPath();
+  ASSERT_TRUE(CreateDummyFile(path.AppendASCII("test.txt")));
+
+  for (auto policy : kFolderSearchPolicies) {
+    const auto files = RunEnumerator(path, true, FileEnumerator::DIRECTORIES,
+                                     kEmptyPattern, policy);
+    EXPECT_THAT(files, IsEmpty());
+  }
+}
+
+TEST(FileEnumerator, SingleFileInFolderWithFiltering) {
+  ScopedTempDir temp_dir;
+  ASSERT_TRUE(temp_dir.CreateUniqueTempDir());
+
+  const FilePath& path = temp_dir.GetPath();
+  const FilePath file = path.AppendASCII("test.txt");
+  ASSERT_TRUE(CreateDummyFile(file));
+
+  for (auto policy : kFolderSearchPolicies) {
+    auto files = RunEnumerator(path, true, FileEnumerator::FILES,
+                               FILE_PATH_LITERAL("*.txt"), policy);
+    EXPECT_THAT(files, ElementsAre(file));
+
+    files = RunEnumerator(path, true, FileEnumerator::FILES,
+                          FILE_PATH_LITERAL("*.pdf"), policy);
+    EXPECT_THAT(files, IsEmpty());
+  }
+}
+
+TEST(FileEnumerator, TwoFilesInFolder) {
+  ScopedTempDir temp_dir;
+  ASSERT_TRUE(temp_dir.CreateUniqueTempDir());
+
+  const FilePath& path = temp_dir.GetPath();
+  const FilePath foo_txt = path.AppendASCII("foo.txt");
+  const FilePath bar_txt = path.AppendASCII("bar.txt");
+  ASSERT_TRUE(CreateDummyFile(foo_txt));
+  ASSERT_TRUE(CreateDummyFile(bar_txt));
+
+  for (auto policy : kFolderSearchPolicies) {
+    auto files = RunEnumerator(path, true, FileEnumerator::FILES,
+                               FILE_PATH_LITERAL("*.txt"), policy);
+    EXPECT_THAT(files, UnorderedElementsAre(foo_txt, bar_txt));
+
+    files = RunEnumerator(path, true, FileEnumerator::FILES,
+                          FILE_PATH_LITERAL("foo*"), policy);
+    EXPECT_THAT(files, ElementsAre(foo_txt));
+
+    files = RunEnumerator(path, true, FileEnumerator::FILES,
+                          FILE_PATH_LITERAL("*.pdf"), policy);
+    EXPECT_THAT(files, IsEmpty());
+
+    files =
+        RunEnumerator(path, true, FileEnumerator::FILES, kEmptyPattern, policy);
+    EXPECT_THAT(files, UnorderedElementsAre(foo_txt, bar_txt));
+  }
+}
+
+TEST(FileEnumerator, SingleFolderInFolderForFileSearch) {
+  ScopedTempDir temp_dir;
+  ASSERT_TRUE(temp_dir.CreateUniqueTempDir());
+
+  const FilePath& path = temp_dir.GetPath();
+
+  ScopedTempDir temp_subdir;
+  ASSERT_TRUE(temp_subdir.CreateUniqueTempDirUnderPath(path));
+
+  for (auto policy : kFolderSearchPolicies) {
+    const auto files =
+        RunEnumerator(path, true, FileEnumerator::FILES, kEmptyPattern, policy);
+    EXPECT_THAT(files, IsEmpty());
+  }
+}
+
+TEST(FileEnumerator, SingleFolderInFolderForDirSearch) {
+  ScopedTempDir temp_dir;
+  ASSERT_TRUE(temp_dir.CreateUniqueTempDir());
+
+  const FilePath& path = temp_dir.GetPath();
+
+  ScopedTempDir temp_subdir;
+  ASSERT_TRUE(temp_subdir.CreateUniqueTempDirUnderPath(path));
+
+  for (auto policy : kFolderSearchPolicies) {
+    const auto files = RunEnumerator(path, true, FileEnumerator::DIRECTORIES,
+                                     kEmptyPattern, policy);
+    EXPECT_THAT(files, ElementsAre(temp_subdir.GetPath()));
+  }
+}
+
+TEST(FileEnumerator, TwoFoldersInFolder) {
+  ScopedTempDir temp_dir;
+  ASSERT_TRUE(temp_dir.CreateUniqueTempDir());
+
+  const FilePath& path = temp_dir.GetPath();
+
+  const FilePath subdir_foo = path.AppendASCII("foo");
+  const FilePath subdir_bar = path.AppendASCII("bar");
+  ASSERT_TRUE(CreateDirectory(subdir_foo));
+  ASSERT_TRUE(CreateDirectory(subdir_bar));
+
+  for (auto policy : kFolderSearchPolicies) {
+    auto files = RunEnumerator(path, true, FileEnumerator::DIRECTORIES,
+                               kEmptyPattern, policy);
+    EXPECT_THAT(files, UnorderedElementsAre(subdir_foo, subdir_bar));
+
+    files = RunEnumerator(path, true, FileEnumerator::DIRECTORIES,
+                          FILE_PATH_LITERAL("foo"), policy);
+    EXPECT_THAT(files, ElementsAre(subdir_foo));
+  }
+}
+
+TEST(FileEnumerator, FolderAndFileInFolder) {
+  ScopedTempDir temp_dir;
+  ASSERT_TRUE(temp_dir.CreateUniqueTempDir());
+
+  const FilePath& path = temp_dir.GetPath();
+
+  ScopedTempDir temp_subdir;
+  ASSERT_TRUE(temp_subdir.CreateUniqueTempDirUnderPath(path));
+  const FilePath file = path.AppendASCII("test.txt");
+  ASSERT_TRUE(CreateDummyFile(file));
+
+  for (auto policy : kFolderSearchPolicies) {
+    auto files =
+        RunEnumerator(path, true, FileEnumerator::FILES, kEmptyPattern, policy);
+    EXPECT_THAT(files, ElementsAre(file));
+
+    files = RunEnumerator(path, true, FileEnumerator::DIRECTORIES,
+                          kEmptyPattern, policy);
+    EXPECT_THAT(files, ElementsAre(temp_subdir.GetPath()));
+
+    files = RunEnumerator(path, true,
+                          FileEnumerator::FILES | FileEnumerator::DIRECTORIES,
+                          kEmptyPattern, policy);
+    EXPECT_THAT(files, UnorderedElementsAre(file, temp_subdir.GetPath()));
+  }
+}
+
+TEST(FileEnumerator, FilesInParentFolderAlwaysFirst) {
+  ScopedTempDir temp_dir;
+  ASSERT_TRUE(temp_dir.CreateUniqueTempDir());
+
+  const FilePath& path = temp_dir.GetPath();
+
+  ScopedTempDir temp_subdir;
+  ASSERT_TRUE(temp_subdir.CreateUniqueTempDirUnderPath(path));
+  const FilePath foo_txt = path.AppendASCII("foo.txt");
+  const FilePath bar_txt = temp_subdir.GetPath().AppendASCII("bar.txt");
+  ASSERT_TRUE(CreateDummyFile(foo_txt));
+  ASSERT_TRUE(CreateDummyFile(bar_txt));
+
+  for (auto policy : kFolderSearchPolicies) {
+    const auto files =
+        RunEnumerator(path, true, FileEnumerator::FILES, kEmptyPattern, policy);
+    EXPECT_THAT(files, ElementsAre(foo_txt, bar_txt));
+  }
+}
+
+TEST(FileEnumerator, FileInSubfolder) {
+  ScopedTempDir temp_dir;
+  ASSERT_TRUE(temp_dir.CreateUniqueTempDir());
+
+  const FilePath subdir = temp_dir.GetPath().AppendASCII("subdir");
+  ASSERT_TRUE(CreateDirectory(subdir));
+
+  const FilePath file = subdir.AppendASCII("test.txt");
+  ASSERT_TRUE(CreateDummyFile(file));
+
+  for (auto policy : kFolderSearchPolicies) {
+    auto files = RunEnumerator(temp_dir.GetPath(), true, FileEnumerator::FILES,
+                               kEmptyPattern, policy);
+    EXPECT_THAT(files, ElementsAre(file));
+
+    files = RunEnumerator(temp_dir.GetPath(), false, FileEnumerator::FILES,
+                          kEmptyPattern, policy);
+    EXPECT_THAT(files, IsEmpty());
+  }
+}
+
+TEST(FileEnumerator, FilesInSubfoldersWithFiltering) {
+  ScopedTempDir temp_dir;
+  ASSERT_TRUE(temp_dir.CreateUniqueTempDir());
+
+  const FilePath test_txt = temp_dir.GetPath().AppendASCII("test.txt");
+  const FilePath subdir_foo = temp_dir.GetPath().AppendASCII("foo_subdir");
+  const FilePath subdir_bar = temp_dir.GetPath().AppendASCII("bar_subdir");
+  const FilePath foo_test = subdir_foo.AppendASCII("test.txt");
+  const FilePath foo_foo = subdir_foo.AppendASCII("foo.txt");
+  const FilePath foo_bar = subdir_foo.AppendASCII("bar.txt");
+  const FilePath bar_test = subdir_bar.AppendASCII("test.txt");
+  const FilePath bar_foo = subdir_bar.AppendASCII("foo.txt");
+  const FilePath bar_bar = subdir_bar.AppendASCII("bar.txt");
+  ASSERT_TRUE(CreateDummyFile(test_txt));
+  ASSERT_TRUE(CreateDirectory(subdir_foo));
+  ASSERT_TRUE(CreateDirectory(subdir_bar));
+  ASSERT_TRUE(CreateDummyFile(foo_test));
+  ASSERT_TRUE(CreateDummyFile(foo_foo));
+  ASSERT_TRUE(CreateDummyFile(foo_bar));
+  ASSERT_TRUE(CreateDummyFile(bar_test));
+  ASSERT_TRUE(CreateDummyFile(bar_foo));
+  ASSERT_TRUE(CreateDummyFile(bar_bar));
+
+  auto files =
+      RunEnumerator(temp_dir.GetPath(), true,
+                    FileEnumerator::FILES | FileEnumerator::DIRECTORIES,
+                    FILE_PATH_LITERAL("foo*"),
+                    FileEnumerator::FolderSearchPolicy::MATCH_ONLY);
+  EXPECT_THAT(files,
+              UnorderedElementsAre(subdir_foo, foo_test, foo_foo, foo_bar));
+
+  files = RunEnumerator(temp_dir.GetPath(), true,
+                        FileEnumerator::FILES | FileEnumerator::DIRECTORIES,
+                        FILE_PATH_LITERAL("foo*"),
+                        FileEnumerator::FolderSearchPolicy::ALL);
+  EXPECT_THAT(files, UnorderedElementsAre(subdir_foo, foo_foo, bar_foo));
+}
+
+}  // namespace base
diff --git a/base/files/file_enumerator_win.cc b/base/files/file_enumerator_win.cc
new file mode 100644
index 0000000..f96074c
--- /dev/null
+++ b/base/files/file_enumerator_win.cc
@@ -0,0 +1,195 @@
+// Copyright (c) 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/files/file_enumerator.h"
+
+#include <shlwapi.h>
+#include <stdint.h>
+#include <string.h>
+
+#include "base/logging.h"
+#include "base/threading/thread_restrictions.h"
+
+namespace base {
+
+namespace {
+
+FilePath BuildSearchFilter(FileEnumerator::FolderSearchPolicy policy,
+                           const FilePath& root_path,
+                           const FilePath::StringType& pattern) {
+  // MATCH_ONLY policy filters incoming files by pattern on OS side. ALL policy
+  // collects all files and filters them manually.
+  switch (policy) {
+    case FileEnumerator::FolderSearchPolicy::MATCH_ONLY:
+      return root_path.Append(pattern);
+    case FileEnumerator::FolderSearchPolicy::ALL:
+      return root_path.Append(L"*");
+  }
+  NOTREACHED();
+  return {};
+}
+
+}  // namespace
+
+// FileEnumerator::FileInfo ----------------------------------------------------
+
+FileEnumerator::FileInfo::FileInfo() {
+  memset(&find_data_, 0, sizeof(find_data_));
+}
+
+bool FileEnumerator::FileInfo::IsDirectory() const {
+  return (find_data_.dwFileAttributes & FILE_ATTRIBUTE_DIRECTORY) != 0;
+}
+
+FilePath FileEnumerator::FileInfo::GetName() const {
+  return FilePath(find_data_.cFileName);
+}
+
+int64_t FileEnumerator::FileInfo::GetSize() const {
+  ULARGE_INTEGER size;
+  size.HighPart = find_data_.nFileSizeHigh;
+  size.LowPart = find_data_.nFileSizeLow;
+  DCHECK_LE(size.QuadPart,
+            static_cast<ULONGLONG>(std::numeric_limits<int64_t>::max()));
+  return static_cast<int64_t>(size.QuadPart);
+}
+
+base::Time FileEnumerator::FileInfo::GetLastModifiedTime() const {
+  return base::Time::FromFileTime(find_data_.ftLastWriteTime);
+}
+
+// FileEnumerator --------------------------------------------------------------
+
+FileEnumerator::FileEnumerator(const FilePath& root_path,
+                               bool recursive,
+                               int file_type)
+    : FileEnumerator(root_path,
+                     recursive,
+                     file_type,
+                     FilePath::StringType(),
+                     FolderSearchPolicy::MATCH_ONLY) {}
+
+FileEnumerator::FileEnumerator(const FilePath& root_path,
+                               bool recursive,
+                               int file_type,
+                               const FilePath::StringType& pattern)
+    : FileEnumerator(root_path,
+                     recursive,
+                     file_type,
+                     pattern,
+                     FolderSearchPolicy::MATCH_ONLY) {}
+
+FileEnumerator::FileEnumerator(const FilePath& root_path,
+                               bool recursive,
+                               int file_type,
+                               const FilePath::StringType& pattern,
+                               FolderSearchPolicy folder_search_policy)
+    : recursive_(recursive),
+      file_type_(file_type),
+      pattern_(!pattern.empty() ? pattern : L"*"),
+      folder_search_policy_(folder_search_policy) {
+  // INCLUDE_DOT_DOT must not be specified if recursive.
+  DCHECK(!(recursive && (INCLUDE_DOT_DOT & file_type_)));
+  memset(&find_data_, 0, sizeof(find_data_));
+  pending_paths_.push(root_path);
+}
+
+FileEnumerator::~FileEnumerator() {
+  if (find_handle_ != INVALID_HANDLE_VALUE)
+    FindClose(find_handle_);
+}
+
+FileEnumerator::FileInfo FileEnumerator::GetInfo() const {
+  if (!has_find_data_) {
+    NOTREACHED();
+    return FileInfo();
+  }
+  FileInfo ret;
+  memcpy(&ret.find_data_, &find_data_, sizeof(find_data_));
+  return ret;
+}
+
+FilePath FileEnumerator::Next() {
+  AssertBlockingAllowed();
+
+  while (has_find_data_ || !pending_paths_.empty()) {
+    if (!has_find_data_) {
+      // The last find FindFirstFile operation is done, prepare a new one.
+      root_path_ = pending_paths_.top();
+      pending_paths_.pop();
+
+      // Start a new find operation.
+      const FilePath src =
+          BuildSearchFilter(folder_search_policy_, root_path_, pattern_);
+      find_handle_ = FindFirstFileEx(src.value().c_str(),
+                                     FindExInfoBasic,  // Omit short name.
+                                     &find_data_, FindExSearchNameMatch,
+                                     nullptr, FIND_FIRST_EX_LARGE_FETCH);
+      has_find_data_ = true;
+    } else {
+      // Search for the next file/directory.
+      if (!FindNextFile(find_handle_, &find_data_)) {
+        FindClose(find_handle_);
+        find_handle_ = INVALID_HANDLE_VALUE;
+      }
+    }
+
+    if (INVALID_HANDLE_VALUE == find_handle_) {
+      has_find_data_ = false;
+
+      // MATCH_ONLY policy clears pattern for matched subfolders. ALL policy
+      // applies pattern for all subfolders.
+      if (folder_search_policy_ == FolderSearchPolicy::MATCH_ONLY) {
+        // This is reached when we have finished a directory and are advancing
+        // to the next one in the queue. We applied the pattern (if any) to the
+        // files in the root search directory, but for those directories which
+        // were matched, we want to enumerate all files inside them. This will
+        // happen when the handle is empty.
+        pattern_ = L"*";
+      }
+
+      continue;
+    }
+
+    const FilePath filename(find_data_.cFileName);
+    if (ShouldSkip(filename))
+      continue;
+
+    const bool is_dir =
+        (find_data_.dwFileAttributes & FILE_ATTRIBUTE_DIRECTORY) != 0;
+    const FilePath abs_path = root_path_.Append(filename);
+
+    // Check if directory should be processed recursive.
+    if (is_dir && recursive_) {
+      // If |cur_file| is a directory, and we are doing recursive searching,
+      // add it to pending_paths_ so we scan it after we finish scanning this
+      // directory. However, don't do recursion through reparse points or we
+      // may end up with an infinite cycle.
+      DWORD attributes = GetFileAttributes(abs_path.value().c_str());
+      if (!(attributes & FILE_ATTRIBUTE_REPARSE_POINT))
+        pending_paths_.push(abs_path);
+    }
+
+    if (IsTypeMatched(is_dir) && IsPatternMatched(filename))
+      return abs_path;
+  }
+  return FilePath();
+}
+
+bool FileEnumerator::IsPatternMatched(const FilePath& src) const {
+  switch (folder_search_policy_) {
+    case FolderSearchPolicy::MATCH_ONLY:
+      // MATCH_ONLY policy filters by pattern on search request, so all found
+      // files already fits to pattern.
+      return true;
+    case FolderSearchPolicy::ALL:
+      // ALL policy enumerates all files, we need to check pattern match
+      // manually.
+      return PathMatchSpec(src.value().c_str(), pattern_.c_str()) == TRUE;
+  }
+  NOTREACHED();
+  return false;
+}
+
+}  // namespace base
diff --git a/base/files/file_locking_unittest.cc b/base/files/file_locking_unittest.cc
new file mode 100644
index 0000000..e158b7d
--- /dev/null
+++ b/base/files/file_locking_unittest.cc
@@ -0,0 +1,232 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/command_line.h"
+#include "base/files/file.h"
+#include "base/files/file_util.h"
+#include "base/files/scoped_temp_dir.h"
+#include "base/macros.h"
+#include "base/test/multiprocess_test.h"
+#include "base/test/test_timeouts.h"
+#include "base/threading/platform_thread.h"
+#include "base/time/time.h"
+#include "build/build_config.h"
+#include "testing/gtest/include/gtest/gtest.h"
+#include "testing/multiprocess_func_list.h"
+
+using base::File;
+using base::FilePath;
+
+namespace {
+
+// Flag for the parent to share a temp dir to the child.
+const char kTempDirFlag[] = "temp-dir";
+
+// Flags to control how the subprocess unlocks the file.
+const char kFileUnlock[] = "file-unlock";
+const char kCloseUnlock[] = "close-unlock";
+const char kExitUnlock[] = "exit-unlock";
+
+// File to lock in temp dir.
+const char kLockFile[] = "lockfile";
+
+// Constants for various requests and responses, used as |signal_file| parameter
+// to signal/wait helpers.
+const char kSignalLockFileLocked[] = "locked.signal";
+const char kSignalLockFileClose[] = "close.signal";
+const char kSignalLockFileClosed[] = "closed.signal";
+const char kSignalLockFileUnlock[] = "unlock.signal";
+const char kSignalLockFileUnlocked[] = "unlocked.signal";
+const char kSignalExit[] = "exit.signal";
+
+// Signal an event by creating a file which didn't previously exist.
+bool SignalEvent(const FilePath& signal_dir, const char* signal_file) {
+  File file(signal_dir.AppendASCII(signal_file),
+            File::FLAG_CREATE | File::FLAG_WRITE);
+  return file.IsValid();
+}
+
+// Check whether an event was signaled.
+bool CheckEvent(const FilePath& signal_dir, const char* signal_file) {
+  File file(signal_dir.AppendASCII(signal_file),
+            File::FLAG_OPEN | File::FLAG_READ);
+  return file.IsValid();
+}
+
+// Busy-wait for an event to be signaled, returning false for timeout.
+bool WaitForEventWithTimeout(const FilePath& signal_dir,
+                             const char* signal_file,
+                             const base::TimeDelta& timeout) {
+  const base::Time finish_by = base::Time::Now() + timeout;
+  while (!CheckEvent(signal_dir, signal_file)) {
+    if (base::Time::Now() > finish_by)
+      return false;
+    base::PlatformThread::Sleep(base::TimeDelta::FromMilliseconds(10));
+  }
+  return true;
+}
+
+// Wait forever for the event to be signaled (should never return false).
+bool WaitForEvent(const FilePath& signal_dir, const char* signal_file) {
+  return WaitForEventWithTimeout(signal_dir, signal_file,
+                                 base::TimeDelta::Max());
+}
+
+// Keep these in sync so StartChild*() can refer to correct test main.
+#define ChildMain ChildLockUnlock
+#define ChildMainString "ChildLockUnlock"
+
+// Subprocess to test getting a file lock then releasing it.  |kTempDirFlag|
+// must pass in an existing temporary directory for the lockfile and signal
+// files.  One of the following flags must be passed to determine how to unlock
+// the lock file:
+// - |kFileUnlock| calls Unlock() to unlock.
+// - |kCloseUnlock| calls Close() while the lock is held.
+// - |kExitUnlock| exits while the lock is held.
+MULTIPROCESS_TEST_MAIN(ChildMain) {
+  base::CommandLine* command_line = base::CommandLine::ForCurrentProcess();
+  const FilePath temp_path = command_line->GetSwitchValuePath(kTempDirFlag);
+  CHECK(base::DirectoryExists(temp_path));
+
+  // Immediately lock the file.
+  File file(temp_path.AppendASCII(kLockFile),
+            File::FLAG_OPEN | File::FLAG_READ | File::FLAG_WRITE);
+  CHECK(file.IsValid());
+  CHECK_EQ(File::FILE_OK, file.Lock());
+  CHECK(SignalEvent(temp_path, kSignalLockFileLocked));
+
+  if (command_line->HasSwitch(kFileUnlock)) {
+    // Wait for signal to unlock, then unlock the file.
+    CHECK(WaitForEvent(temp_path, kSignalLockFileUnlock));
+    CHECK_EQ(File::FILE_OK, file.Unlock());
+    CHECK(SignalEvent(temp_path, kSignalLockFileUnlocked));
+  } else if (command_line->HasSwitch(kCloseUnlock)) {
+    // Wait for the signal to close, then close the file.
+    CHECK(WaitForEvent(temp_path, kSignalLockFileClose));
+    file.Close();
+    CHECK(!file.IsValid());
+    CHECK(SignalEvent(temp_path, kSignalLockFileClosed));
+  } else {
+    CHECK(command_line->HasSwitch(kExitUnlock));
+  }
+
+  // Wait for signal to exit, so that unlock or close can be distinguished from
+  // exit.
+  CHECK(WaitForEvent(temp_path, kSignalExit));
+  return 0;
+}
+
+}  // namespace
+
+class FileLockingTest : public testing::Test {
+ public:
+  FileLockingTest() = default;
+
+ protected:
+  void SetUp() override {
+    testing::Test::SetUp();
+
+    // Setup the temp dir and the lock file.
+    ASSERT_TRUE(temp_dir_.CreateUniqueTempDir());
+    lock_file_.Initialize(
+        temp_dir_.GetPath().AppendASCII(kLockFile),
+        File::FLAG_CREATE | File::FLAG_READ | File::FLAG_WRITE);
+    ASSERT_TRUE(lock_file_.IsValid());
+  }
+
+  bool SignalEvent(const char* signal_file) {
+    return ::SignalEvent(temp_dir_.GetPath(), signal_file);
+  }
+
+  bool WaitForEventOrTimeout(const char* signal_file) {
+    return ::WaitForEventWithTimeout(temp_dir_.GetPath(), signal_file,
+                                     TestTimeouts::action_timeout());
+  }
+
+  // Start a child process set to use the specified unlock action, and wait for
+  // it to lock the file.
+  void StartChildAndSignalLock(const char* unlock_action) {
+    // Create a temporary dir and spin up a ChildLockExit subprocess against it.
+    const FilePath temp_path = temp_dir_.GetPath();
+    base::CommandLine child_command_line(
+        base::GetMultiProcessTestChildBaseCommandLine());
+    child_command_line.AppendSwitchPath(kTempDirFlag, temp_path);
+    child_command_line.AppendSwitch(unlock_action);
+    lock_child_ = base::SpawnMultiProcessTestChild(
+        ChildMainString, child_command_line, base::LaunchOptions());
+    ASSERT_TRUE(lock_child_.IsValid());
+
+    // Wait for the child to lock the file.
+    ASSERT_TRUE(WaitForEventOrTimeout(kSignalLockFileLocked));
+  }
+
+  // Signal the child to exit cleanly.
+  void ExitChildCleanly() {
+    ASSERT_TRUE(SignalEvent(kSignalExit));
+    int rv = -1;
+    ASSERT_TRUE(WaitForMultiprocessTestChildExit(
+        lock_child_, TestTimeouts::action_timeout(), &rv));
+    ASSERT_EQ(0, rv);
+  }
+
+  base::ScopedTempDir temp_dir_;
+  base::File lock_file_;
+  base::Process lock_child_;
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(FileLockingTest);
+};
+
+// Test that locks are released by Unlock().
+TEST_F(FileLockingTest, LockAndUnlock) {
+  StartChildAndSignalLock(kFileUnlock);
+
+  ASSERT_NE(File::FILE_OK, lock_file_.Lock());
+  ASSERT_TRUE(SignalEvent(kSignalLockFileUnlock));
+  ASSERT_TRUE(WaitForEventOrTimeout(kSignalLockFileUnlocked));
+  ASSERT_EQ(File::FILE_OK, lock_file_.Lock());
+  ASSERT_EQ(File::FILE_OK, lock_file_.Unlock());
+
+  ExitChildCleanly();
+}
+
+// Test that locks are released on Close().
+TEST_F(FileLockingTest, UnlockOnClose) {
+  StartChildAndSignalLock(kCloseUnlock);
+
+  ASSERT_NE(File::FILE_OK, lock_file_.Lock());
+  ASSERT_TRUE(SignalEvent(kSignalLockFileClose));
+  ASSERT_TRUE(WaitForEventOrTimeout(kSignalLockFileClosed));
+  ASSERT_EQ(File::FILE_OK, lock_file_.Lock());
+  ASSERT_EQ(File::FILE_OK, lock_file_.Unlock());
+
+  ExitChildCleanly();
+}
+
+// Test that locks are released on exit.
+TEST_F(FileLockingTest, UnlockOnExit) {
+  StartChildAndSignalLock(kExitUnlock);
+
+  ASSERT_NE(File::FILE_OK, lock_file_.Lock());
+  ExitChildCleanly();
+  ASSERT_EQ(File::FILE_OK, lock_file_.Lock());
+  ASSERT_EQ(File::FILE_OK, lock_file_.Unlock());
+}
+
+// Test that killing the process releases the lock.  This should cover crashing.
+// Flaky on Android (http://crbug.com/747518)
+#if defined(OS_ANDROID)
+#define MAYBE_UnlockOnTerminate DISABLED_UnlockOnTerminate
+#else
+#define MAYBE_UnlockOnTerminate UnlockOnTerminate
+#endif
+TEST_F(FileLockingTest, MAYBE_UnlockOnTerminate) {
+  // The child will wait for an exit which never arrives.
+  StartChildAndSignalLock(kExitUnlock);
+
+  ASSERT_NE(File::FILE_OK, lock_file_.Lock());
+  ASSERT_TRUE(TerminateMultiProcessTestChild(lock_child_, 0, true));
+  ASSERT_EQ(File::FILE_OK, lock_file_.Lock());
+  ASSERT_EQ(File::FILE_OK, lock_file_.Unlock());
+}
diff --git a/base/files/file_path.cc b/base/files/file_path.cc
new file mode 100644
index 0000000..14f9251
--- /dev/null
+++ b/base/files/file_path.cc
@@ -0,0 +1,1342 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/files/file_path.h"
+
+#include <string.h>
+#include <algorithm>
+
+#include "base/logging.h"
+#include "base/macros.h"
+#include "base/pickle.h"
+#include "base/strings/string_piece.h"
+#include "base/strings/string_util.h"
+#include "base/strings/sys_string_conversions.h"
+#include "base/strings/utf_string_conversions.h"
+#include "build/build_config.h"
+
+#if defined(OS_MACOSX)
+#include "base/mac/scoped_cftyperef.h"
+#include "base/third_party/icu/icu_utf.h"
+#endif
+
+#if defined(OS_WIN)
+#include <windows.h>
+#elif defined(OS_MACOSX)
+#include <CoreFoundation/CoreFoundation.h>
+#endif
+
+namespace base {
+
+using StringType = FilePath::StringType;
+using StringPieceType = FilePath::StringPieceType;
+
+namespace {
+
+const char* const kCommonDoubleExtensionSuffixes[] = { "gz", "z", "bz2", "bz" };
+const char* const kCommonDoubleExtensions[] = { "user.js" };
+
+const FilePath::CharType kStringTerminator = FILE_PATH_LITERAL('\0');
+
+// If this FilePath contains a drive letter specification, returns the
+// position of the last character of the drive letter specification,
+// otherwise returns npos.  This can only be true on Windows, when a pathname
+// begins with a letter followed by a colon.  On other platforms, this always
+// returns npos.
+StringPieceType::size_type FindDriveLetter(StringPieceType path) {
+#if defined(FILE_PATH_USES_DRIVE_LETTERS)
+  // This is dependent on an ASCII-based character set, but that's a
+  // reasonable assumption.  iswalpha can be too inclusive here.
+  if (path.length() >= 2 && path[1] == L':' &&
+      ((path[0] >= L'A' && path[0] <= L'Z') ||
+       (path[0] >= L'a' && path[0] <= L'z'))) {
+    return 1;
+  }
+#endif  // FILE_PATH_USES_DRIVE_LETTERS
+  return StringType::npos;
+}
+
+#if defined(FILE_PATH_USES_DRIVE_LETTERS)
+bool EqualDriveLetterCaseInsensitive(StringPieceType a, StringPieceType b) {
+  size_t a_letter_pos = FindDriveLetter(a);
+  size_t b_letter_pos = FindDriveLetter(b);
+
+  if (a_letter_pos == StringType::npos || b_letter_pos == StringType::npos)
+    return a == b;
+
+  StringPieceType a_letter(a.substr(0, a_letter_pos + 1));
+  StringPieceType b_letter(b.substr(0, b_letter_pos + 1));
+  if (!StartsWith(a_letter, b_letter, CompareCase::INSENSITIVE_ASCII))
+    return false;
+
+  StringPieceType a_rest(a.substr(a_letter_pos + 1));
+  StringPieceType b_rest(b.substr(b_letter_pos + 1));
+  return a_rest == b_rest;
+}
+#endif  // defined(FILE_PATH_USES_DRIVE_LETTERS)
+
+bool IsPathAbsolute(StringPieceType path) {
+#if defined(FILE_PATH_USES_DRIVE_LETTERS)
+  StringType::size_type letter = FindDriveLetter(path);
+  if (letter != StringType::npos) {
+    // Look for a separator right after the drive specification.
+    return path.length() > letter + 1 &&
+        FilePath::IsSeparator(path[letter + 1]);
+  }
+  // Look for a pair of leading separators.
+  return path.length() > 1 &&
+      FilePath::IsSeparator(path[0]) && FilePath::IsSeparator(path[1]);
+#else  // FILE_PATH_USES_DRIVE_LETTERS
+  // Look for a separator in the first position.
+  return path.length() > 0 && FilePath::IsSeparator(path[0]);
+#endif  // FILE_PATH_USES_DRIVE_LETTERS
+}
+
+bool AreAllSeparators(const StringType& input) {
+  for (StringType::const_iterator it = input.begin();
+      it != input.end(); ++it) {
+    if (!FilePath::IsSeparator(*it))
+      return false;
+  }
+
+  return true;
+}
+
+// Find the position of the '.' that separates the extension from the rest
+// of the file name. The position is relative to BaseName(), not value().
+// Returns npos if it can't find an extension.
+StringType::size_type FinalExtensionSeparatorPosition(const StringType& path) {
+  // Special case "." and ".."
+  if (path == FilePath::kCurrentDirectory || path == FilePath::kParentDirectory)
+    return StringType::npos;
+
+  return path.rfind(FilePath::kExtensionSeparator);
+}
+
+// Same as above, but allow a second extension component of up to 4
+// characters when the rightmost extension component is a common double
+// extension (gz, bz2, Z).  For example, foo.tar.gz or foo.tar.Z would have
+// extension components of '.tar.gz' and '.tar.Z' respectively.
+StringType::size_type ExtensionSeparatorPosition(const StringType& path) {
+  const StringType::size_type last_dot = FinalExtensionSeparatorPosition(path);
+
+  // No extension, or the extension is the whole filename.
+  if (last_dot == StringType::npos || last_dot == 0U)
+    return last_dot;
+
+  const StringType::size_type penultimate_dot =
+      path.rfind(FilePath::kExtensionSeparator, last_dot - 1);
+  const StringType::size_type last_separator =
+      path.find_last_of(FilePath::kSeparators, last_dot - 1,
+                        FilePath::kSeparatorsLength - 1);
+
+  if (penultimate_dot == StringType::npos ||
+      (last_separator != StringType::npos &&
+       penultimate_dot < last_separator)) {
+    return last_dot;
+  }
+
+  for (size_t i = 0; i < arraysize(kCommonDoubleExtensions); ++i) {
+    StringType extension(path, penultimate_dot + 1);
+    if (LowerCaseEqualsASCII(extension, kCommonDoubleExtensions[i]))
+      return penultimate_dot;
+  }
+
+  StringType extension(path, last_dot + 1);
+  for (size_t i = 0; i < arraysize(kCommonDoubleExtensionSuffixes); ++i) {
+    if (LowerCaseEqualsASCII(extension, kCommonDoubleExtensionSuffixes[i])) {
+      if ((last_dot - penultimate_dot) <= 5U &&
+          (last_dot - penultimate_dot) > 1U) {
+        return penultimate_dot;
+      }
+    }
+  }
+
+  return last_dot;
+}
+
+// Returns true if path is "", ".", or "..".
+bool IsEmptyOrSpecialCase(const StringType& path) {
+  // Special cases "", ".", and ".."
+  if (path.empty() || path == FilePath::kCurrentDirectory ||
+      path == FilePath::kParentDirectory) {
+    return true;
+  }
+
+  return false;
+}
+
+}  // namespace
+
+FilePath::FilePath() = default;
+
+FilePath::FilePath(const FilePath& that) = default;
+FilePath::FilePath(FilePath&& that) noexcept = default;
+
+FilePath::FilePath(StringPieceType path) {
+  path.CopyToString(&path_);
+  StringType::size_type nul_pos = path_.find(kStringTerminator);
+  if (nul_pos != StringType::npos)
+    path_.erase(nul_pos, StringType::npos);
+}
+
+FilePath::~FilePath() = default;
+
+FilePath& FilePath::operator=(const FilePath& that) = default;
+
+FilePath& FilePath::operator=(FilePath&& that) = default;
+
+bool FilePath::operator==(const FilePath& that) const {
+#if defined(FILE_PATH_USES_DRIVE_LETTERS)
+  return EqualDriveLetterCaseInsensitive(this->path_, that.path_);
+#else  // defined(FILE_PATH_USES_DRIVE_LETTERS)
+  return path_ == that.path_;
+#endif  // defined(FILE_PATH_USES_DRIVE_LETTERS)
+}
+
+bool FilePath::operator!=(const FilePath& that) const {
+#if defined(FILE_PATH_USES_DRIVE_LETTERS)
+  return !EqualDriveLetterCaseInsensitive(this->path_, that.path_);
+#else  // defined(FILE_PATH_USES_DRIVE_LETTERS)
+  return path_ != that.path_;
+#endif  // defined(FILE_PATH_USES_DRIVE_LETTERS)
+}
+
+std::ostream& operator<<(std::ostream& out, const FilePath& file_path) {
+  return out << file_path.value();
+}
+
+// static
+bool FilePath::IsSeparator(CharType character) {
+  for (size_t i = 0; i < kSeparatorsLength - 1; ++i) {
+    if (character == kSeparators[i]) {
+      return true;
+    }
+  }
+
+  return false;
+}
+
+void FilePath::GetComponents(std::vector<StringType>* components) const {
+  DCHECK(components);
+  if (!components)
+    return;
+  components->clear();
+  if (value().empty())
+    return;
+
+  std::vector<StringType> ret_val;
+  FilePath current = *this;
+  FilePath base;
+
+  // Capture path components.
+  while (current != current.DirName()) {
+    base = current.BaseName();
+    if (!AreAllSeparators(base.value()))
+      ret_val.push_back(base.value());
+    current = current.DirName();
+  }
+
+  // Capture root, if any.
+  base = current.BaseName();
+  if (!base.value().empty() && base.value() != kCurrentDirectory)
+    ret_val.push_back(current.BaseName().value());
+
+  // Capture drive letter, if any.
+  FilePath dir = current.DirName();
+  StringType::size_type letter = FindDriveLetter(dir.value());
+  if (letter != StringType::npos) {
+    ret_val.push_back(StringType(dir.value(), 0, letter + 1));
+  }
+
+  *components = std::vector<StringType>(ret_val.rbegin(), ret_val.rend());
+}
+
+bool FilePath::IsParent(const FilePath& child) const {
+  return AppendRelativePath(child, nullptr);
+}
+
+bool FilePath::AppendRelativePath(const FilePath& child,
+                                  FilePath* path) const {
+  std::vector<StringType> parent_components;
+  std::vector<StringType> child_components;
+  GetComponents(&parent_components);
+  child.GetComponents(&child_components);
+
+  if (parent_components.empty() ||
+      parent_components.size() >= child_components.size())
+    return false;
+
+  std::vector<StringType>::const_iterator parent_comp =
+      parent_components.begin();
+  std::vector<StringType>::const_iterator child_comp =
+      child_components.begin();
+
+#if defined(FILE_PATH_USES_DRIVE_LETTERS)
+  // Windows can access case sensitive filesystems, so component
+  // comparisions must be case sensitive, but drive letters are
+  // never case sensitive.
+  if ((FindDriveLetter(*parent_comp) != StringType::npos) &&
+      (FindDriveLetter(*child_comp) != StringType::npos)) {
+    if (!StartsWith(*parent_comp, *child_comp, CompareCase::INSENSITIVE_ASCII))
+      return false;
+    ++parent_comp;
+    ++child_comp;
+  }
+#endif  // defined(FILE_PATH_USES_DRIVE_LETTERS)
+
+  while (parent_comp != parent_components.end()) {
+    if (*parent_comp != *child_comp)
+      return false;
+    ++parent_comp;
+    ++child_comp;
+  }
+
+  if (path != nullptr) {
+    for (; child_comp != child_components.end(); ++child_comp) {
+      *path = path->Append(*child_comp);
+    }
+  }
+  return true;
+}
+
+// libgen's dirname and basename aren't guaranteed to be thread-safe and aren't
+// guaranteed to not modify their input strings, and in fact are implemented
+// differently in this regard on different platforms.  Don't use them, but
+// adhere to their behavior.
+FilePath FilePath::DirName() const {
+  FilePath new_path(path_);
+  new_path.StripTrailingSeparatorsInternal();
+
+  // The drive letter, if any, always needs to remain in the output.  If there
+  // is no drive letter, as will always be the case on platforms which do not
+  // support drive letters, letter will be npos, or -1, so the comparisons and
+  // resizes below using letter will still be valid.
+  StringType::size_type letter = FindDriveLetter(new_path.path_);
+
+  StringType::size_type last_separator =
+      new_path.path_.find_last_of(kSeparators, StringType::npos,
+                                  kSeparatorsLength - 1);
+  if (last_separator == StringType::npos) {
+    // path_ is in the current directory.
+    new_path.path_.resize(letter + 1);
+  } else if (last_separator == letter + 1) {
+    // path_ is in the root directory.
+    new_path.path_.resize(letter + 2);
+  } else if (last_separator == letter + 2 &&
+             IsSeparator(new_path.path_[letter + 1])) {
+    // path_ is in "//" (possibly with a drive letter); leave the double
+    // separator intact indicating alternate root.
+    new_path.path_.resize(letter + 3);
+  } else if (last_separator != 0) {
+    // path_ is somewhere else, trim the basename.
+    new_path.path_.resize(last_separator);
+  }
+
+  new_path.StripTrailingSeparatorsInternal();
+  if (!new_path.path_.length())
+    new_path.path_ = kCurrentDirectory;
+
+  return new_path;
+}
+
+FilePath FilePath::BaseName() const {
+  FilePath new_path(path_);
+  new_path.StripTrailingSeparatorsInternal();
+
+  // The drive letter, if any, is always stripped.
+  StringType::size_type letter = FindDriveLetter(new_path.path_);
+  if (letter != StringType::npos) {
+    new_path.path_.erase(0, letter + 1);
+  }
+
+  // Keep everything after the final separator, but if the pathname is only
+  // one character and it's a separator, leave it alone.
+  StringType::size_type last_separator =
+      new_path.path_.find_last_of(kSeparators, StringType::npos,
+                                  kSeparatorsLength - 1);
+  if (last_separator != StringType::npos &&
+      last_separator < new_path.path_.length() - 1) {
+    new_path.path_.erase(0, last_separator + 1);
+  }
+
+  return new_path;
+}
+
+StringType FilePath::Extension() const {
+  FilePath base(BaseName());
+  const StringType::size_type dot = ExtensionSeparatorPosition(base.path_);
+  if (dot == StringType::npos)
+    return StringType();
+
+  return base.path_.substr(dot, StringType::npos);
+}
+
+StringType FilePath::FinalExtension() const {
+  FilePath base(BaseName());
+  const StringType::size_type dot = FinalExtensionSeparatorPosition(base.path_);
+  if (dot == StringType::npos)
+    return StringType();
+
+  return base.path_.substr(dot, StringType::npos);
+}
+
+FilePath FilePath::RemoveExtension() const {
+  if (Extension().empty())
+    return *this;
+
+  const StringType::size_type dot = ExtensionSeparatorPosition(path_);
+  if (dot == StringType::npos)
+    return *this;
+
+  return FilePath(path_.substr(0, dot));
+}
+
+FilePath FilePath::RemoveFinalExtension() const {
+  if (FinalExtension().empty())
+    return *this;
+
+  const StringType::size_type dot = FinalExtensionSeparatorPosition(path_);
+  if (dot == StringType::npos)
+    return *this;
+
+  return FilePath(path_.substr(0, dot));
+}
+
+FilePath FilePath::InsertBeforeExtension(StringPieceType suffix) const {
+  if (suffix.empty())
+    return FilePath(path_);
+
+  if (IsEmptyOrSpecialCase(BaseName().value()))
+    return FilePath();
+
+  StringType ext = Extension();
+  StringType ret = RemoveExtension().value();
+  suffix.AppendToString(&ret);
+  ret.append(ext);
+  return FilePath(ret);
+}
+
+FilePath FilePath::InsertBeforeExtensionASCII(StringPiece suffix)
+    const {
+  DCHECK(IsStringASCII(suffix));
+#if defined(OS_WIN)
+  return InsertBeforeExtension(ASCIIToUTF16(suffix));
+#elif defined(OS_POSIX) || defined(OS_FUCHSIA)
+  return InsertBeforeExtension(suffix);
+#endif
+}
+
+FilePath FilePath::AddExtension(StringPieceType extension) const {
+  if (IsEmptyOrSpecialCase(BaseName().value()))
+    return FilePath();
+
+  // If the new extension is "" or ".", then just return the current FilePath.
+  if (extension.empty() ||
+      (extension.size() == 1 && extension[0] == kExtensionSeparator))
+    return *this;
+
+  StringType str = path_;
+  if (extension[0] != kExtensionSeparator &&
+      *(str.end() - 1) != kExtensionSeparator) {
+    str.append(1, kExtensionSeparator);
+  }
+  extension.AppendToString(&str);
+  return FilePath(str);
+}
+
+FilePath FilePath::ReplaceExtension(StringPieceType extension) const {
+  if (IsEmptyOrSpecialCase(BaseName().value()))
+    return FilePath();
+
+  FilePath no_ext = RemoveExtension();
+  // If the new extension is "" or ".", then just remove the current extension.
+  if (extension.empty() ||
+      (extension.size() == 1 && extension[0] == kExtensionSeparator))
+    return no_ext;
+
+  StringType str = no_ext.value();
+  if (extension[0] != kExtensionSeparator)
+    str.append(1, kExtensionSeparator);
+  extension.AppendToString(&str);
+  return FilePath(str);
+}
+
+bool FilePath::MatchesExtension(StringPieceType extension) const {
+  DCHECK(extension.empty() || extension[0] == kExtensionSeparator);
+
+  StringType current_extension = Extension();
+
+  if (current_extension.length() != extension.length())
+    return false;
+
+  return FilePath::CompareEqualIgnoreCase(extension, current_extension);
+}
+
+FilePath FilePath::Append(StringPieceType component) const {
+  StringPieceType appended = component;
+  StringType without_nuls;
+
+  StringType::size_type nul_pos = component.find(kStringTerminator);
+  if (nul_pos != StringPieceType::npos) {
+    component.substr(0, nul_pos).CopyToString(&without_nuls);
+    appended = StringPieceType(without_nuls);
+  }
+
+  DCHECK(!IsPathAbsolute(appended));
+
+  if (path_.compare(kCurrentDirectory) == 0 && !appended.empty()) {
+    // Append normally doesn't do any normalization, but as a special case,
+    // when appending to kCurrentDirectory, just return a new path for the
+    // component argument.  Appending component to kCurrentDirectory would
+    // serve no purpose other than needlessly lengthening the path, and
+    // it's likely in practice to wind up with FilePath objects containing
+    // only kCurrentDirectory when calling DirName on a single relative path
+    // component.
+    return FilePath(appended);
+  }
+
+  FilePath new_path(path_);
+  new_path.StripTrailingSeparatorsInternal();
+
+  // Don't append a separator if the path is empty (indicating the current
+  // directory) or if the path component is empty (indicating nothing to
+  // append).
+  if (!appended.empty() && !new_path.path_.empty()) {
+    // Don't append a separator if the path still ends with a trailing
+    // separator after stripping (indicating the root directory).
+    if (!IsSeparator(new_path.path_.back())) {
+      // Don't append a separator if the path is just a drive letter.
+      if (FindDriveLetter(new_path.path_) + 1 != new_path.path_.length()) {
+        new_path.path_.append(1, kSeparators[0]);
+      }
+    }
+  }
+
+  appended.AppendToString(&new_path.path_);
+  return new_path;
+}
+
+FilePath FilePath::Append(const FilePath& component) const {
+  return Append(component.value());
+}
+
+FilePath FilePath::AppendASCII(StringPiece component) const {
+  DCHECK(base::IsStringASCII(component));
+#if defined(OS_WIN)
+  return Append(ASCIIToUTF16(component));
+#elif defined(OS_POSIX) || defined(OS_FUCHSIA)
+  return Append(component);
+#endif
+}
+
+bool FilePath::IsAbsolute() const {
+  return IsPathAbsolute(path_);
+}
+
+bool FilePath::EndsWithSeparator() const {
+  if (empty())
+    return false;
+  return IsSeparator(path_.back());
+}
+
+FilePath FilePath::AsEndingWithSeparator() const {
+  if (EndsWithSeparator() || path_.empty())
+    return *this;
+
+  StringType path_str;
+  path_str.reserve(path_.length() + 1);  // Only allocate string once.
+
+  path_str = path_;
+  path_str.append(&kSeparators[0], 1);
+  return FilePath(path_str);
+}
+
+FilePath FilePath::StripTrailingSeparators() const {
+  FilePath new_path(path_);
+  new_path.StripTrailingSeparatorsInternal();
+
+  return new_path;
+}
+
+bool FilePath::ReferencesParent() const {
+  if (path_.find(kParentDirectory) == StringType::npos) {
+    // GetComponents is quite expensive, so avoid calling it in the majority
+    // of cases where there isn't a kParentDirectory anywhere in the path.
+    return false;
+  }
+
+  std::vector<StringType> components;
+  GetComponents(&components);
+
+  std::vector<StringType>::const_iterator it = components.begin();
+  for (; it != components.end(); ++it) {
+    const StringType& component = *it;
+    // Windows has odd, undocumented behavior with path components containing
+    // only whitespace and . characters. So, if all we see is . and
+    // whitespace, then we treat any .. sequence as referencing parent.
+    // For simplicity we enforce this on all platforms.
+    if (component.find_first_not_of(FILE_PATH_LITERAL(". \n\r\t")) ==
+            std::string::npos &&
+        component.find(kParentDirectory) != std::string::npos) {
+      return true;
+    }
+  }
+  return false;
+}
+
+#if defined(OS_WIN)
+
+string16 FilePath::LossyDisplayName() const {
+  return path_;
+}
+
+std::string FilePath::MaybeAsASCII() const {
+  if (base::IsStringASCII(path_))
+    return UTF16ToASCII(path_);
+  return std::string();
+}
+
+std::string FilePath::AsUTF8Unsafe() const {
+  return WideToUTF8(value());
+}
+
+string16 FilePath::AsUTF16Unsafe() const {
+  return value();
+}
+
+// static
+FilePath FilePath::FromUTF8Unsafe(StringPiece utf8) {
+  return FilePath(UTF8ToWide(utf8));
+}
+
+// static
+FilePath FilePath::FromUTF16Unsafe(StringPiece16 utf16) {
+  return FilePath(utf16);
+}
+
+#elif defined(OS_POSIX) || defined(OS_FUCHSIA)
+
+// See file_path.h for a discussion of the encoding of paths on POSIX
+// platforms.  These encoding conversion functions are not quite correct.
+
+string16 FilePath::LossyDisplayName() const {
+  return WideToUTF16(SysNativeMBToWide(path_));
+}
+
+std::string FilePath::MaybeAsASCII() const {
+  if (base::IsStringASCII(path_))
+    return path_;
+  return std::string();
+}
+
+std::string FilePath::AsUTF8Unsafe() const {
+#if defined(SYSTEM_NATIVE_UTF8)
+  return value();
+#else
+  return WideToUTF8(SysNativeMBToWide(value()));
+#endif
+}
+
+string16 FilePath::AsUTF16Unsafe() const {
+#if defined(SYSTEM_NATIVE_UTF8)
+  return UTF8ToUTF16(value());
+#else
+  return WideToUTF16(SysNativeMBToWide(value()));
+#endif
+}
+
+// static
+FilePath FilePath::FromUTF8Unsafe(StringPiece utf8) {
+#if defined(SYSTEM_NATIVE_UTF8)
+  return FilePath(utf8);
+#else
+  return FilePath(SysWideToNativeMB(UTF8ToWide(utf8)));
+#endif
+}
+
+// static
+FilePath FilePath::FromUTF16Unsafe(StringPiece16 utf16) {
+#if defined(SYSTEM_NATIVE_UTF8)
+  return FilePath(UTF16ToUTF8(utf16));
+#else
+  return FilePath(SysWideToNativeMB(UTF16ToWide(utf16.as_string())));
+#endif
+}
+
+#endif  // defined(OS_WIN)
+
+void FilePath::WriteToPickle(Pickle* pickle) const {
+#if defined(OS_WIN)
+  pickle->WriteString16(path_);
+#elif defined(OS_POSIX) || defined(OS_FUCHSIA)
+  pickle->WriteString(path_);
+#else
+#error Unsupported platform
+#endif
+}
+
+bool FilePath::ReadFromPickle(PickleIterator* iter) {
+#if defined(OS_WIN)
+  if (!iter->ReadString16(&path_))
+    return false;
+#elif defined(OS_POSIX) || defined(OS_FUCHSIA)
+  if (!iter->ReadString(&path_))
+    return false;
+#else
+#error Unsupported platform
+#endif
+
+  if (path_.find(kStringTerminator) != StringType::npos)
+    return false;
+
+  return true;
+}
+
+#if defined(OS_WIN)
+// Windows specific implementation of file string comparisons.
+
+int FilePath::CompareIgnoreCase(StringPieceType string1,
+                                StringPieceType string2) {
+  static decltype(::CharUpperW)* const char_upper_api =
+      reinterpret_cast<decltype(::CharUpperW)*>(
+          ::GetProcAddress(::GetModuleHandle(L"user32.dll"), "CharUpperW"));
+  CHECK(char_upper_api);
+  // Perform character-wise upper case comparison rather than using the
+  // fully Unicode-aware CompareString(). For details see:
+  // http://blogs.msdn.com/michkap/archive/2005/10/17/481600.aspx
+  StringPieceType::const_iterator i1 = string1.begin();
+  StringPieceType::const_iterator i2 = string2.begin();
+  StringPieceType::const_iterator string1end = string1.end();
+  StringPieceType::const_iterator string2end = string2.end();
+  for ( ; i1 != string1end && i2 != string2end; ++i1, ++i2) {
+    wchar_t c1 =
+        (wchar_t)LOWORD(char_upper_api((LPWSTR)(DWORD_PTR)MAKELONG(*i1, 0)));
+    wchar_t c2 =
+        (wchar_t)LOWORD(char_upper_api((LPWSTR)(DWORD_PTR)MAKELONG(*i2, 0)));
+    if (c1 < c2)
+      return -1;
+    if (c1 > c2)
+      return 1;
+  }
+  if (i1 != string1end)
+    return 1;
+  if (i2 != string2end)
+    return -1;
+  return 0;
+}
+
+#elif defined(OS_MACOSX)
+// Mac OS X specific implementation of file string comparisons.
+
+// cf. http://developer.apple.com/mac/library/technotes/tn/tn1150.html#UnicodeSubtleties
+//
+// "When using CreateTextEncoding to create a text encoding, you should set
+// the TextEncodingBase to kTextEncodingUnicodeV2_0, set the
+// TextEncodingVariant to kUnicodeCanonicalDecompVariant, and set the
+// TextEncodingFormat to kUnicode16BitFormat. Using these values ensures that
+// the Unicode will be in the same form as on an HFS Plus volume, even as the
+// Unicode standard evolves."
+//
+// Another technical article for X 10.4 updates this: one should use
+// the new (unambiguous) kUnicodeHFSPlusDecompVariant.
+// cf. http://developer.apple.com/mac/library/releasenotes/TextFonts/RN-TEC/index.html
+//
+// This implementation uses CFStringGetFileSystemRepresentation() to get the
+// decomposed form, and an adapted version of the FastUnicodeCompare as
+// described in the tech note to compare the strings.
+
+// Character conversion table for FastUnicodeCompare()
+//
+// The lower case table consists of a 256-entry high-byte table followed by
+// some number of 256-entry subtables. The high-byte table contains either an
+// offset to the subtable for characters with that high byte or zero, which
+// means that there are no case mappings or ignored characters in that block.
+// Ignored characters are mapped to zero.
+//
+// cf. downloadable file linked in
+// http://developer.apple.com/mac/library/technotes/tn/tn1150.html#StringComparisonAlgorithm
+
+namespace {
+
+const UInt16 lower_case_table[] = {
+  // High-byte indices ( == 0 iff no case mapping and no ignorables )
+
+  /* 0 */ 0x0100, 0x0200, 0x0000, 0x0300, 0x0400, 0x0500, 0x0000, 0x0000,
+          0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+  /* 1 */ 0x0600, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+          0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+  /* 2 */ 0x0700, 0x0800, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+          0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+  /* 3 */ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+          0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+  /* 4 */ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+          0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+  /* 5 */ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+          0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+  /* 6 */ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+          0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+  /* 7 */ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+          0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+  /* 8 */ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+          0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+  /* 9 */ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+          0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+  /* A */ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+          0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+  /* B */ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+          0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+  /* C */ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+          0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+  /* D */ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+          0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+  /* E */ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+          0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+  /* F */ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+          0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0900, 0x0A00,
+
+  // Table 1 (for high byte 0x00)
+
+  /* 0 */ 0xFFFF, 0x0001, 0x0002, 0x0003, 0x0004, 0x0005, 0x0006, 0x0007,
+          0x0008, 0x0009, 0x000A, 0x000B, 0x000C, 0x000D, 0x000E, 0x000F,
+  /* 1 */ 0x0010, 0x0011, 0x0012, 0x0013, 0x0014, 0x0015, 0x0016, 0x0017,
+          0x0018, 0x0019, 0x001A, 0x001B, 0x001C, 0x001D, 0x001E, 0x001F,
+  /* 2 */ 0x0020, 0x0021, 0x0022, 0x0023, 0x0024, 0x0025, 0x0026, 0x0027,
+          0x0028, 0x0029, 0x002A, 0x002B, 0x002C, 0x002D, 0x002E, 0x002F,
+  /* 3 */ 0x0030, 0x0031, 0x0032, 0x0033, 0x0034, 0x0035, 0x0036, 0x0037,
+          0x0038, 0x0039, 0x003A, 0x003B, 0x003C, 0x003D, 0x003E, 0x003F,
+  /* 4 */ 0x0040, 0x0061, 0x0062, 0x0063, 0x0064, 0x0065, 0x0066, 0x0067,
+          0x0068, 0x0069, 0x006A, 0x006B, 0x006C, 0x006D, 0x006E, 0x006F,
+  /* 5 */ 0x0070, 0x0071, 0x0072, 0x0073, 0x0074, 0x0075, 0x0076, 0x0077,
+          0x0078, 0x0079, 0x007A, 0x005B, 0x005C, 0x005D, 0x005E, 0x005F,
+  /* 6 */ 0x0060, 0x0061, 0x0062, 0x0063, 0x0064, 0x0065, 0x0066, 0x0067,
+          0x0068, 0x0069, 0x006A, 0x006B, 0x006C, 0x006D, 0x006E, 0x006F,
+  /* 7 */ 0x0070, 0x0071, 0x0072, 0x0073, 0x0074, 0x0075, 0x0076, 0x0077,
+          0x0078, 0x0079, 0x007A, 0x007B, 0x007C, 0x007D, 0x007E, 0x007F,
+  /* 8 */ 0x0080, 0x0081, 0x0082, 0x0083, 0x0084, 0x0085, 0x0086, 0x0087,
+          0x0088, 0x0089, 0x008A, 0x008B, 0x008C, 0x008D, 0x008E, 0x008F,
+  /* 9 */ 0x0090, 0x0091, 0x0092, 0x0093, 0x0094, 0x0095, 0x0096, 0x0097,
+          0x0098, 0x0099, 0x009A, 0x009B, 0x009C, 0x009D, 0x009E, 0x009F,
+  /* A */ 0x00A0, 0x00A1, 0x00A2, 0x00A3, 0x00A4, 0x00A5, 0x00A6, 0x00A7,
+          0x00A8, 0x00A9, 0x00AA, 0x00AB, 0x00AC, 0x00AD, 0x00AE, 0x00AF,
+  /* B */ 0x00B0, 0x00B1, 0x00B2, 0x00B3, 0x00B4, 0x00B5, 0x00B6, 0x00B7,
+          0x00B8, 0x00B9, 0x00BA, 0x00BB, 0x00BC, 0x00BD, 0x00BE, 0x00BF,
+  /* C */ 0x00C0, 0x00C1, 0x00C2, 0x00C3, 0x00C4, 0x00C5, 0x00E6, 0x00C7,
+          0x00C8, 0x00C9, 0x00CA, 0x00CB, 0x00CC, 0x00CD, 0x00CE, 0x00CF,
+  /* D */ 0x00F0, 0x00D1, 0x00D2, 0x00D3, 0x00D4, 0x00D5, 0x00D6, 0x00D7,
+          0x00F8, 0x00D9, 0x00DA, 0x00DB, 0x00DC, 0x00DD, 0x00FE, 0x00DF,
+  /* E */ 0x00E0, 0x00E1, 0x00E2, 0x00E3, 0x00E4, 0x00E5, 0x00E6, 0x00E7,
+          0x00E8, 0x00E9, 0x00EA, 0x00EB, 0x00EC, 0x00ED, 0x00EE, 0x00EF,
+  /* F */ 0x00F0, 0x00F1, 0x00F2, 0x00F3, 0x00F4, 0x00F5, 0x00F6, 0x00F7,
+          0x00F8, 0x00F9, 0x00FA, 0x00FB, 0x00FC, 0x00FD, 0x00FE, 0x00FF,
+
+  // Table 2 (for high byte 0x01)
+
+  /* 0 */ 0x0100, 0x0101, 0x0102, 0x0103, 0x0104, 0x0105, 0x0106, 0x0107,
+          0x0108, 0x0109, 0x010A, 0x010B, 0x010C, 0x010D, 0x010E, 0x010F,
+  /* 1 */ 0x0111, 0x0111, 0x0112, 0x0113, 0x0114, 0x0115, 0x0116, 0x0117,
+          0x0118, 0x0119, 0x011A, 0x011B, 0x011C, 0x011D, 0x011E, 0x011F,
+  /* 2 */ 0x0120, 0x0121, 0x0122, 0x0123, 0x0124, 0x0125, 0x0127, 0x0127,
+          0x0128, 0x0129, 0x012A, 0x012B, 0x012C, 0x012D, 0x012E, 0x012F,
+  /* 3 */ 0x0130, 0x0131, 0x0133, 0x0133, 0x0134, 0x0135, 0x0136, 0x0137,
+          0x0138, 0x0139, 0x013A, 0x013B, 0x013C, 0x013D, 0x013E, 0x0140,
+  /* 4 */ 0x0140, 0x0142, 0x0142, 0x0143, 0x0144, 0x0145, 0x0146, 0x0147,
+          0x0148, 0x0149, 0x014B, 0x014B, 0x014C, 0x014D, 0x014E, 0x014F,
+  /* 5 */ 0x0150, 0x0151, 0x0153, 0x0153, 0x0154, 0x0155, 0x0156, 0x0157,
+          0x0158, 0x0159, 0x015A, 0x015B, 0x015C, 0x015D, 0x015E, 0x015F,
+  /* 6 */ 0x0160, 0x0161, 0x0162, 0x0163, 0x0164, 0x0165, 0x0167, 0x0167,
+          0x0168, 0x0169, 0x016A, 0x016B, 0x016C, 0x016D, 0x016E, 0x016F,
+  /* 7 */ 0x0170, 0x0171, 0x0172, 0x0173, 0x0174, 0x0175, 0x0176, 0x0177,
+          0x0178, 0x0179, 0x017A, 0x017B, 0x017C, 0x017D, 0x017E, 0x017F,
+  /* 8 */ 0x0180, 0x0253, 0x0183, 0x0183, 0x0185, 0x0185, 0x0254, 0x0188,
+          0x0188, 0x0256, 0x0257, 0x018C, 0x018C, 0x018D, 0x01DD, 0x0259,
+  /* 9 */ 0x025B, 0x0192, 0x0192, 0x0260, 0x0263, 0x0195, 0x0269, 0x0268,
+          0x0199, 0x0199, 0x019A, 0x019B, 0x026F, 0x0272, 0x019E, 0x0275,
+  /* A */ 0x01A0, 0x01A1, 0x01A3, 0x01A3, 0x01A5, 0x01A5, 0x01A6, 0x01A8,
+          0x01A8, 0x0283, 0x01AA, 0x01AB, 0x01AD, 0x01AD, 0x0288, 0x01AF,
+  /* B */ 0x01B0, 0x028A, 0x028B, 0x01B4, 0x01B4, 0x01B6, 0x01B6, 0x0292,
+          0x01B9, 0x01B9, 0x01BA, 0x01BB, 0x01BD, 0x01BD, 0x01BE, 0x01BF,
+  /* C */ 0x01C0, 0x01C1, 0x01C2, 0x01C3, 0x01C6, 0x01C6, 0x01C6, 0x01C9,
+          0x01C9, 0x01C9, 0x01CC, 0x01CC, 0x01CC, 0x01CD, 0x01CE, 0x01CF,
+  /* D */ 0x01D0, 0x01D1, 0x01D2, 0x01D3, 0x01D4, 0x01D5, 0x01D6, 0x01D7,
+          0x01D8, 0x01D9, 0x01DA, 0x01DB, 0x01DC, 0x01DD, 0x01DE, 0x01DF,
+  /* E */ 0x01E0, 0x01E1, 0x01E2, 0x01E3, 0x01E5, 0x01E5, 0x01E6, 0x01E7,
+          0x01E8, 0x01E9, 0x01EA, 0x01EB, 0x01EC, 0x01ED, 0x01EE, 0x01EF,
+  /* F */ 0x01F0, 0x01F3, 0x01F3, 0x01F3, 0x01F4, 0x01F5, 0x01F6, 0x01F7,
+          0x01F8, 0x01F9, 0x01FA, 0x01FB, 0x01FC, 0x01FD, 0x01FE, 0x01FF,
+
+  // Table 3 (for high byte 0x03)
+
+  /* 0 */ 0x0300, 0x0301, 0x0302, 0x0303, 0x0304, 0x0305, 0x0306, 0x0307,
+          0x0308, 0x0309, 0x030A, 0x030B, 0x030C, 0x030D, 0x030E, 0x030F,
+  /* 1 */ 0x0310, 0x0311, 0x0312, 0x0313, 0x0314, 0x0315, 0x0316, 0x0317,
+          0x0318, 0x0319, 0x031A, 0x031B, 0x031C, 0x031D, 0x031E, 0x031F,
+  /* 2 */ 0x0320, 0x0321, 0x0322, 0x0323, 0x0324, 0x0325, 0x0326, 0x0327,
+          0x0328, 0x0329, 0x032A, 0x032B, 0x032C, 0x032D, 0x032E, 0x032F,
+  /* 3 */ 0x0330, 0x0331, 0x0332, 0x0333, 0x0334, 0x0335, 0x0336, 0x0337,
+          0x0338, 0x0339, 0x033A, 0x033B, 0x033C, 0x033D, 0x033E, 0x033F,
+  /* 4 */ 0x0340, 0x0341, 0x0342, 0x0343, 0x0344, 0x0345, 0x0346, 0x0347,
+          0x0348, 0x0349, 0x034A, 0x034B, 0x034C, 0x034D, 0x034E, 0x034F,
+  /* 5 */ 0x0350, 0x0351, 0x0352, 0x0353, 0x0354, 0x0355, 0x0356, 0x0357,
+          0x0358, 0x0359, 0x035A, 0x035B, 0x035C, 0x035D, 0x035E, 0x035F,
+  /* 6 */ 0x0360, 0x0361, 0x0362, 0x0363, 0x0364, 0x0365, 0x0366, 0x0367,
+          0x0368, 0x0369, 0x036A, 0x036B, 0x036C, 0x036D, 0x036E, 0x036F,
+  /* 7 */ 0x0370, 0x0371, 0x0372, 0x0373, 0x0374, 0x0375, 0x0376, 0x0377,
+          0x0378, 0x0379, 0x037A, 0x037B, 0x037C, 0x037D, 0x037E, 0x037F,
+  /* 8 */ 0x0380, 0x0381, 0x0382, 0x0383, 0x0384, 0x0385, 0x0386, 0x0387,
+          0x0388, 0x0389, 0x038A, 0x038B, 0x038C, 0x038D, 0x038E, 0x038F,
+  /* 9 */ 0x0390, 0x03B1, 0x03B2, 0x03B3, 0x03B4, 0x03B5, 0x03B6, 0x03B7,
+          0x03B8, 0x03B9, 0x03BA, 0x03BB, 0x03BC, 0x03BD, 0x03BE, 0x03BF,
+  /* A */ 0x03C0, 0x03C1, 0x03A2, 0x03C3, 0x03C4, 0x03C5, 0x03C6, 0x03C7,
+          0x03C8, 0x03C9, 0x03AA, 0x03AB, 0x03AC, 0x03AD, 0x03AE, 0x03AF,
+  /* B */ 0x03B0, 0x03B1, 0x03B2, 0x03B3, 0x03B4, 0x03B5, 0x03B6, 0x03B7,
+          0x03B8, 0x03B9, 0x03BA, 0x03BB, 0x03BC, 0x03BD, 0x03BE, 0x03BF,
+  /* C */ 0x03C0, 0x03C1, 0x03C2, 0x03C3, 0x03C4, 0x03C5, 0x03C6, 0x03C7,
+          0x03C8, 0x03C9, 0x03CA, 0x03CB, 0x03CC, 0x03CD, 0x03CE, 0x03CF,
+  /* D */ 0x03D0, 0x03D1, 0x03D2, 0x03D3, 0x03D4, 0x03D5, 0x03D6, 0x03D7,
+          0x03D8, 0x03D9, 0x03DA, 0x03DB, 0x03DC, 0x03DD, 0x03DE, 0x03DF,
+  /* E */ 0x03E0, 0x03E1, 0x03E3, 0x03E3, 0x03E5, 0x03E5, 0x03E7, 0x03E7,
+          0x03E9, 0x03E9, 0x03EB, 0x03EB, 0x03ED, 0x03ED, 0x03EF, 0x03EF,
+  /* F */ 0x03F0, 0x03F1, 0x03F2, 0x03F3, 0x03F4, 0x03F5, 0x03F6, 0x03F7,
+          0x03F8, 0x03F9, 0x03FA, 0x03FB, 0x03FC, 0x03FD, 0x03FE, 0x03FF,
+
+  // Table 4 (for high byte 0x04)
+
+  /* 0 */ 0x0400, 0x0401, 0x0452, 0x0403, 0x0454, 0x0455, 0x0456, 0x0407,
+          0x0458, 0x0459, 0x045A, 0x045B, 0x040C, 0x040D, 0x040E, 0x045F,
+  /* 1 */ 0x0430, 0x0431, 0x0432, 0x0433, 0x0434, 0x0435, 0x0436, 0x0437,
+          0x0438, 0x0419, 0x043A, 0x043B, 0x043C, 0x043D, 0x043E, 0x043F,
+  /* 2 */ 0x0440, 0x0441, 0x0442, 0x0443, 0x0444, 0x0445, 0x0446, 0x0447,
+          0x0448, 0x0449, 0x044A, 0x044B, 0x044C, 0x044D, 0x044E, 0x044F,
+  /* 3 */ 0x0430, 0x0431, 0x0432, 0x0433, 0x0434, 0x0435, 0x0436, 0x0437,
+          0x0438, 0x0439, 0x043A, 0x043B, 0x043C, 0x043D, 0x043E, 0x043F,
+  /* 4 */ 0x0440, 0x0441, 0x0442, 0x0443, 0x0444, 0x0445, 0x0446, 0x0447,
+          0x0448, 0x0449, 0x044A, 0x044B, 0x044C, 0x044D, 0x044E, 0x044F,
+  /* 5 */ 0x0450, 0x0451, 0x0452, 0x0453, 0x0454, 0x0455, 0x0456, 0x0457,
+          0x0458, 0x0459, 0x045A, 0x045B, 0x045C, 0x045D, 0x045E, 0x045F,
+  /* 6 */ 0x0461, 0x0461, 0x0463, 0x0463, 0x0465, 0x0465, 0x0467, 0x0467,
+          0x0469, 0x0469, 0x046B, 0x046B, 0x046D, 0x046D, 0x046F, 0x046F,
+  /* 7 */ 0x0471, 0x0471, 0x0473, 0x0473, 0x0475, 0x0475, 0x0476, 0x0477,
+          0x0479, 0x0479, 0x047B, 0x047B, 0x047D, 0x047D, 0x047F, 0x047F,
+  /* 8 */ 0x0481, 0x0481, 0x0482, 0x0483, 0x0484, 0x0485, 0x0486, 0x0487,
+          0x0488, 0x0489, 0x048A, 0x048B, 0x048C, 0x048D, 0x048E, 0x048F,
+  /* 9 */ 0x0491, 0x0491, 0x0493, 0x0493, 0x0495, 0x0495, 0x0497, 0x0497,
+          0x0499, 0x0499, 0x049B, 0x049B, 0x049D, 0x049D, 0x049F, 0x049F,
+  /* A */ 0x04A1, 0x04A1, 0x04A3, 0x04A3, 0x04A5, 0x04A5, 0x04A7, 0x04A7,
+          0x04A9, 0x04A9, 0x04AB, 0x04AB, 0x04AD, 0x04AD, 0x04AF, 0x04AF,
+  /* B */ 0x04B1, 0x04B1, 0x04B3, 0x04B3, 0x04B5, 0x04B5, 0x04B7, 0x04B7,
+          0x04B9, 0x04B9, 0x04BB, 0x04BB, 0x04BD, 0x04BD, 0x04BF, 0x04BF,
+  /* C */ 0x04C0, 0x04C1, 0x04C2, 0x04C4, 0x04C4, 0x04C5, 0x04C6, 0x04C8,
+          0x04C8, 0x04C9, 0x04CA, 0x04CC, 0x04CC, 0x04CD, 0x04CE, 0x04CF,
+  /* D */ 0x04D0, 0x04D1, 0x04D2, 0x04D3, 0x04D4, 0x04D5, 0x04D6, 0x04D7,
+          0x04D8, 0x04D9, 0x04DA, 0x04DB, 0x04DC, 0x04DD, 0x04DE, 0x04DF,
+  /* E */ 0x04E0, 0x04E1, 0x04E2, 0x04E3, 0x04E4, 0x04E5, 0x04E6, 0x04E7,
+          0x04E8, 0x04E9, 0x04EA, 0x04EB, 0x04EC, 0x04ED, 0x04EE, 0x04EF,
+  /* F */ 0x04F0, 0x04F1, 0x04F2, 0x04F3, 0x04F4, 0x04F5, 0x04F6, 0x04F7,
+          0x04F8, 0x04F9, 0x04FA, 0x04FB, 0x04FC, 0x04FD, 0x04FE, 0x04FF,
+
+  // Table 5 (for high byte 0x05)
+
+  /* 0 */ 0x0500, 0x0501, 0x0502, 0x0503, 0x0504, 0x0505, 0x0506, 0x0507,
+          0x0508, 0x0509, 0x050A, 0x050B, 0x050C, 0x050D, 0x050E, 0x050F,
+  /* 1 */ 0x0510, 0x0511, 0x0512, 0x0513, 0x0514, 0x0515, 0x0516, 0x0517,
+          0x0518, 0x0519, 0x051A, 0x051B, 0x051C, 0x051D, 0x051E, 0x051F,
+  /* 2 */ 0x0520, 0x0521, 0x0522, 0x0523, 0x0524, 0x0525, 0x0526, 0x0527,
+          0x0528, 0x0529, 0x052A, 0x052B, 0x052C, 0x052D, 0x052E, 0x052F,
+  /* 3 */ 0x0530, 0x0561, 0x0562, 0x0563, 0x0564, 0x0565, 0x0566, 0x0567,
+          0x0568, 0x0569, 0x056A, 0x056B, 0x056C, 0x056D, 0x056E, 0x056F,
+  /* 4 */ 0x0570, 0x0571, 0x0572, 0x0573, 0x0574, 0x0575, 0x0576, 0x0577,
+          0x0578, 0x0579, 0x057A, 0x057B, 0x057C, 0x057D, 0x057E, 0x057F,
+  /* 5 */ 0x0580, 0x0581, 0x0582, 0x0583, 0x0584, 0x0585, 0x0586, 0x0557,
+          0x0558, 0x0559, 0x055A, 0x055B, 0x055C, 0x055D, 0x055E, 0x055F,
+  /* 6 */ 0x0560, 0x0561, 0x0562, 0x0563, 0x0564, 0x0565, 0x0566, 0x0567,
+          0x0568, 0x0569, 0x056A, 0x056B, 0x056C, 0x056D, 0x056E, 0x056F,
+  /* 7 */ 0x0570, 0x0571, 0x0572, 0x0573, 0x0574, 0x0575, 0x0576, 0x0577,
+          0x0578, 0x0579, 0x057A, 0x057B, 0x057C, 0x057D, 0x057E, 0x057F,
+  /* 8 */ 0x0580, 0x0581, 0x0582, 0x0583, 0x0584, 0x0585, 0x0586, 0x0587,
+          0x0588, 0x0589, 0x058A, 0x058B, 0x058C, 0x058D, 0x058E, 0x058F,
+  /* 9 */ 0x0590, 0x0591, 0x0592, 0x0593, 0x0594, 0x0595, 0x0596, 0x0597,
+          0x0598, 0x0599, 0x059A, 0x059B, 0x059C, 0x059D, 0x059E, 0x059F,
+  /* A */ 0x05A0, 0x05A1, 0x05A2, 0x05A3, 0x05A4, 0x05A5, 0x05A6, 0x05A7,
+          0x05A8, 0x05A9, 0x05AA, 0x05AB, 0x05AC, 0x05AD, 0x05AE, 0x05AF,
+  /* B */ 0x05B0, 0x05B1, 0x05B2, 0x05B3, 0x05B4, 0x05B5, 0x05B6, 0x05B7,
+          0x05B8, 0x05B9, 0x05BA, 0x05BB, 0x05BC, 0x05BD, 0x05BE, 0x05BF,
+  /* C */ 0x05C0, 0x05C1, 0x05C2, 0x05C3, 0x05C4, 0x05C5, 0x05C6, 0x05C7,
+          0x05C8, 0x05C9, 0x05CA, 0x05CB, 0x05CC, 0x05CD, 0x05CE, 0x05CF,
+  /* D */ 0x05D0, 0x05D1, 0x05D2, 0x05D3, 0x05D4, 0x05D5, 0x05D6, 0x05D7,
+          0x05D8, 0x05D9, 0x05DA, 0x05DB, 0x05DC, 0x05DD, 0x05DE, 0x05DF,
+  /* E */ 0x05E0, 0x05E1, 0x05E2, 0x05E3, 0x05E4, 0x05E5, 0x05E6, 0x05E7,
+          0x05E8, 0x05E9, 0x05EA, 0x05EB, 0x05EC, 0x05ED, 0x05EE, 0x05EF,
+  /* F */ 0x05F0, 0x05F1, 0x05F2, 0x05F3, 0x05F4, 0x05F5, 0x05F6, 0x05F7,
+          0x05F8, 0x05F9, 0x05FA, 0x05FB, 0x05FC, 0x05FD, 0x05FE, 0x05FF,
+
+  // Table 6 (for high byte 0x10)
+
+  /* 0 */ 0x1000, 0x1001, 0x1002, 0x1003, 0x1004, 0x1005, 0x1006, 0x1007,
+          0x1008, 0x1009, 0x100A, 0x100B, 0x100C, 0x100D, 0x100E, 0x100F,
+  /* 1 */ 0x1010, 0x1011, 0x1012, 0x1013, 0x1014, 0x1015, 0x1016, 0x1017,
+          0x1018, 0x1019, 0x101A, 0x101B, 0x101C, 0x101D, 0x101E, 0x101F,
+  /* 2 */ 0x1020, 0x1021, 0x1022, 0x1023, 0x1024, 0x1025, 0x1026, 0x1027,
+          0x1028, 0x1029, 0x102A, 0x102B, 0x102C, 0x102D, 0x102E, 0x102F,
+  /* 3 */ 0x1030, 0x1031, 0x1032, 0x1033, 0x1034, 0x1035, 0x1036, 0x1037,
+          0x1038, 0x1039, 0x103A, 0x103B, 0x103C, 0x103D, 0x103E, 0x103F,
+  /* 4 */ 0x1040, 0x1041, 0x1042, 0x1043, 0x1044, 0x1045, 0x1046, 0x1047,
+          0x1048, 0x1049, 0x104A, 0x104B, 0x104C, 0x104D, 0x104E, 0x104F,
+  /* 5 */ 0x1050, 0x1051, 0x1052, 0x1053, 0x1054, 0x1055, 0x1056, 0x1057,
+          0x1058, 0x1059, 0x105A, 0x105B, 0x105C, 0x105D, 0x105E, 0x105F,
+  /* 6 */ 0x1060, 0x1061, 0x1062, 0x1063, 0x1064, 0x1065, 0x1066, 0x1067,
+          0x1068, 0x1069, 0x106A, 0x106B, 0x106C, 0x106D, 0x106E, 0x106F,
+  /* 7 */ 0x1070, 0x1071, 0x1072, 0x1073, 0x1074, 0x1075, 0x1076, 0x1077,
+          0x1078, 0x1079, 0x107A, 0x107B, 0x107C, 0x107D, 0x107E, 0x107F,
+  /* 8 */ 0x1080, 0x1081, 0x1082, 0x1083, 0x1084, 0x1085, 0x1086, 0x1087,
+          0x1088, 0x1089, 0x108A, 0x108B, 0x108C, 0x108D, 0x108E, 0x108F,
+  /* 9 */ 0x1090, 0x1091, 0x1092, 0x1093, 0x1094, 0x1095, 0x1096, 0x1097,
+          0x1098, 0x1099, 0x109A, 0x109B, 0x109C, 0x109D, 0x109E, 0x109F,
+  /* A */ 0x10D0, 0x10D1, 0x10D2, 0x10D3, 0x10D4, 0x10D5, 0x10D6, 0x10D7,
+          0x10D8, 0x10D9, 0x10DA, 0x10DB, 0x10DC, 0x10DD, 0x10DE, 0x10DF,
+  /* B */ 0x10E0, 0x10E1, 0x10E2, 0x10E3, 0x10E4, 0x10E5, 0x10E6, 0x10E7,
+          0x10E8, 0x10E9, 0x10EA, 0x10EB, 0x10EC, 0x10ED, 0x10EE, 0x10EF,
+  /* C */ 0x10F0, 0x10F1, 0x10F2, 0x10F3, 0x10F4, 0x10F5, 0x10C6, 0x10C7,
+          0x10C8, 0x10C9, 0x10CA, 0x10CB, 0x10CC, 0x10CD, 0x10CE, 0x10CF,
+  /* D */ 0x10D0, 0x10D1, 0x10D2, 0x10D3, 0x10D4, 0x10D5, 0x10D6, 0x10D7,
+          0x10D8, 0x10D9, 0x10DA, 0x10DB, 0x10DC, 0x10DD, 0x10DE, 0x10DF,
+  /* E */ 0x10E0, 0x10E1, 0x10E2, 0x10E3, 0x10E4, 0x10E5, 0x10E6, 0x10E7,
+          0x10E8, 0x10E9, 0x10EA, 0x10EB, 0x10EC, 0x10ED, 0x10EE, 0x10EF,
+  /* F */ 0x10F0, 0x10F1, 0x10F2, 0x10F3, 0x10F4, 0x10F5, 0x10F6, 0x10F7,
+          0x10F8, 0x10F9, 0x10FA, 0x10FB, 0x10FC, 0x10FD, 0x10FE, 0x10FF,
+
+  // Table 7 (for high byte 0x20)
+
+  /* 0 */ 0x2000, 0x2001, 0x2002, 0x2003, 0x2004, 0x2005, 0x2006, 0x2007,
+          0x2008, 0x2009, 0x200A, 0x200B, 0x0000, 0x0000, 0x0000, 0x0000,
+  /* 1 */ 0x2010, 0x2011, 0x2012, 0x2013, 0x2014, 0x2015, 0x2016, 0x2017,
+          0x2018, 0x2019, 0x201A, 0x201B, 0x201C, 0x201D, 0x201E, 0x201F,
+  /* 2 */ 0x2020, 0x2021, 0x2022, 0x2023, 0x2024, 0x2025, 0x2026, 0x2027,
+          0x2028, 0x2029, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x202F,
+  /* 3 */ 0x2030, 0x2031, 0x2032, 0x2033, 0x2034, 0x2035, 0x2036, 0x2037,
+          0x2038, 0x2039, 0x203A, 0x203B, 0x203C, 0x203D, 0x203E, 0x203F,
+  /* 4 */ 0x2040, 0x2041, 0x2042, 0x2043, 0x2044, 0x2045, 0x2046, 0x2047,
+          0x2048, 0x2049, 0x204A, 0x204B, 0x204C, 0x204D, 0x204E, 0x204F,
+  /* 5 */ 0x2050, 0x2051, 0x2052, 0x2053, 0x2054, 0x2055, 0x2056, 0x2057,
+          0x2058, 0x2059, 0x205A, 0x205B, 0x205C, 0x205D, 0x205E, 0x205F,
+  /* 6 */ 0x2060, 0x2061, 0x2062, 0x2063, 0x2064, 0x2065, 0x2066, 0x2067,
+          0x2068, 0x2069, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+  /* 7 */ 0x2070, 0x2071, 0x2072, 0x2073, 0x2074, 0x2075, 0x2076, 0x2077,
+          0x2078, 0x2079, 0x207A, 0x207B, 0x207C, 0x207D, 0x207E, 0x207F,
+  /* 8 */ 0x2080, 0x2081, 0x2082, 0x2083, 0x2084, 0x2085, 0x2086, 0x2087,
+          0x2088, 0x2089, 0x208A, 0x208B, 0x208C, 0x208D, 0x208E, 0x208F,
+  /* 9 */ 0x2090, 0x2091, 0x2092, 0x2093, 0x2094, 0x2095, 0x2096, 0x2097,
+          0x2098, 0x2099, 0x209A, 0x209B, 0x209C, 0x209D, 0x209E, 0x209F,
+  /* A */ 0x20A0, 0x20A1, 0x20A2, 0x20A3, 0x20A4, 0x20A5, 0x20A6, 0x20A7,
+          0x20A8, 0x20A9, 0x20AA, 0x20AB, 0x20AC, 0x20AD, 0x20AE, 0x20AF,
+  /* B */ 0x20B0, 0x20B1, 0x20B2, 0x20B3, 0x20B4, 0x20B5, 0x20B6, 0x20B7,
+          0x20B8, 0x20B9, 0x20BA, 0x20BB, 0x20BC, 0x20BD, 0x20BE, 0x20BF,
+  /* C */ 0x20C0, 0x20C1, 0x20C2, 0x20C3, 0x20C4, 0x20C5, 0x20C6, 0x20C7,
+          0x20C8, 0x20C9, 0x20CA, 0x20CB, 0x20CC, 0x20CD, 0x20CE, 0x20CF,
+  /* D */ 0x20D0, 0x20D1, 0x20D2, 0x20D3, 0x20D4, 0x20D5, 0x20D6, 0x20D7,
+          0x20D8, 0x20D9, 0x20DA, 0x20DB, 0x20DC, 0x20DD, 0x20DE, 0x20DF,
+  /* E */ 0x20E0, 0x20E1, 0x20E2, 0x20E3, 0x20E4, 0x20E5, 0x20E6, 0x20E7,
+          0x20E8, 0x20E9, 0x20EA, 0x20EB, 0x20EC, 0x20ED, 0x20EE, 0x20EF,
+  /* F */ 0x20F0, 0x20F1, 0x20F2, 0x20F3, 0x20F4, 0x20F5, 0x20F6, 0x20F7,
+          0x20F8, 0x20F9, 0x20FA, 0x20FB, 0x20FC, 0x20FD, 0x20FE, 0x20FF,
+
+  // Table 8 (for high byte 0x21)
+
+  /* 0 */ 0x2100, 0x2101, 0x2102, 0x2103, 0x2104, 0x2105, 0x2106, 0x2107,
+          0x2108, 0x2109, 0x210A, 0x210B, 0x210C, 0x210D, 0x210E, 0x210F,
+  /* 1 */ 0x2110, 0x2111, 0x2112, 0x2113, 0x2114, 0x2115, 0x2116, 0x2117,
+          0x2118, 0x2119, 0x211A, 0x211B, 0x211C, 0x211D, 0x211E, 0x211F,
+  /* 2 */ 0x2120, 0x2121, 0x2122, 0x2123, 0x2124, 0x2125, 0x2126, 0x2127,
+          0x2128, 0x2129, 0x212A, 0x212B, 0x212C, 0x212D, 0x212E, 0x212F,
+  /* 3 */ 0x2130, 0x2131, 0x2132, 0x2133, 0x2134, 0x2135, 0x2136, 0x2137,
+          0x2138, 0x2139, 0x213A, 0x213B, 0x213C, 0x213D, 0x213E, 0x213F,
+  /* 4 */ 0x2140, 0x2141, 0x2142, 0x2143, 0x2144, 0x2145, 0x2146, 0x2147,
+          0x2148, 0x2149, 0x214A, 0x214B, 0x214C, 0x214D, 0x214E, 0x214F,
+  /* 5 */ 0x2150, 0x2151, 0x2152, 0x2153, 0x2154, 0x2155, 0x2156, 0x2157,
+          0x2158, 0x2159, 0x215A, 0x215B, 0x215C, 0x215D, 0x215E, 0x215F,
+  /* 6 */ 0x2170, 0x2171, 0x2172, 0x2173, 0x2174, 0x2175, 0x2176, 0x2177,
+          0x2178, 0x2179, 0x217A, 0x217B, 0x217C, 0x217D, 0x217E, 0x217F,
+  /* 7 */ 0x2170, 0x2171, 0x2172, 0x2173, 0x2174, 0x2175, 0x2176, 0x2177,
+          0x2178, 0x2179, 0x217A, 0x217B, 0x217C, 0x217D, 0x217E, 0x217F,
+  /* 8 */ 0x2180, 0x2181, 0x2182, 0x2183, 0x2184, 0x2185, 0x2186, 0x2187,
+          0x2188, 0x2189, 0x218A, 0x218B, 0x218C, 0x218D, 0x218E, 0x218F,
+  /* 9 */ 0x2190, 0x2191, 0x2192, 0x2193, 0x2194, 0x2195, 0x2196, 0x2197,
+          0x2198, 0x2199, 0x219A, 0x219B, 0x219C, 0x219D, 0x219E, 0x219F,
+  /* A */ 0x21A0, 0x21A1, 0x21A2, 0x21A3, 0x21A4, 0x21A5, 0x21A6, 0x21A7,
+          0x21A8, 0x21A9, 0x21AA, 0x21AB, 0x21AC, 0x21AD, 0x21AE, 0x21AF,
+  /* B */ 0x21B0, 0x21B1, 0x21B2, 0x21B3, 0x21B4, 0x21B5, 0x21B6, 0x21B7,
+          0x21B8, 0x21B9, 0x21BA, 0x21BB, 0x21BC, 0x21BD, 0x21BE, 0x21BF,
+  /* C */ 0x21C0, 0x21C1, 0x21C2, 0x21C3, 0x21C4, 0x21C5, 0x21C6, 0x21C7,
+          0x21C8, 0x21C9, 0x21CA, 0x21CB, 0x21CC, 0x21CD, 0x21CE, 0x21CF,
+  /* D */ 0x21D0, 0x21D1, 0x21D2, 0x21D3, 0x21D4, 0x21D5, 0x21D6, 0x21D7,
+          0x21D8, 0x21D9, 0x21DA, 0x21DB, 0x21DC, 0x21DD, 0x21DE, 0x21DF,
+  /* E */ 0x21E0, 0x21E1, 0x21E2, 0x21E3, 0x21E4, 0x21E5, 0x21E6, 0x21E7,
+          0x21E8, 0x21E9, 0x21EA, 0x21EB, 0x21EC, 0x21ED, 0x21EE, 0x21EF,
+  /* F */ 0x21F0, 0x21F1, 0x21F2, 0x21F3, 0x21F4, 0x21F5, 0x21F6, 0x21F7,
+          0x21F8, 0x21F9, 0x21FA, 0x21FB, 0x21FC, 0x21FD, 0x21FE, 0x21FF,
+
+  // Table 9 (for high byte 0xFE)
+
+  /* 0 */ 0xFE00, 0xFE01, 0xFE02, 0xFE03, 0xFE04, 0xFE05, 0xFE06, 0xFE07,
+          0xFE08, 0xFE09, 0xFE0A, 0xFE0B, 0xFE0C, 0xFE0D, 0xFE0E, 0xFE0F,
+  /* 1 */ 0xFE10, 0xFE11, 0xFE12, 0xFE13, 0xFE14, 0xFE15, 0xFE16, 0xFE17,
+          0xFE18, 0xFE19, 0xFE1A, 0xFE1B, 0xFE1C, 0xFE1D, 0xFE1E, 0xFE1F,
+  /* 2 */ 0xFE20, 0xFE21, 0xFE22, 0xFE23, 0xFE24, 0xFE25, 0xFE26, 0xFE27,
+          0xFE28, 0xFE29, 0xFE2A, 0xFE2B, 0xFE2C, 0xFE2D, 0xFE2E, 0xFE2F,
+  /* 3 */ 0xFE30, 0xFE31, 0xFE32, 0xFE33, 0xFE34, 0xFE35, 0xFE36, 0xFE37,
+          0xFE38, 0xFE39, 0xFE3A, 0xFE3B, 0xFE3C, 0xFE3D, 0xFE3E, 0xFE3F,
+  /* 4 */ 0xFE40, 0xFE41, 0xFE42, 0xFE43, 0xFE44, 0xFE45, 0xFE46, 0xFE47,
+          0xFE48, 0xFE49, 0xFE4A, 0xFE4B, 0xFE4C, 0xFE4D, 0xFE4E, 0xFE4F,
+  /* 5 */ 0xFE50, 0xFE51, 0xFE52, 0xFE53, 0xFE54, 0xFE55, 0xFE56, 0xFE57,
+          0xFE58, 0xFE59, 0xFE5A, 0xFE5B, 0xFE5C, 0xFE5D, 0xFE5E, 0xFE5F,
+  /* 6 */ 0xFE60, 0xFE61, 0xFE62, 0xFE63, 0xFE64, 0xFE65, 0xFE66, 0xFE67,
+          0xFE68, 0xFE69, 0xFE6A, 0xFE6B, 0xFE6C, 0xFE6D, 0xFE6E, 0xFE6F,
+  /* 7 */ 0xFE70, 0xFE71, 0xFE72, 0xFE73, 0xFE74, 0xFE75, 0xFE76, 0xFE77,
+          0xFE78, 0xFE79, 0xFE7A, 0xFE7B, 0xFE7C, 0xFE7D, 0xFE7E, 0xFE7F,
+  /* 8 */ 0xFE80, 0xFE81, 0xFE82, 0xFE83, 0xFE84, 0xFE85, 0xFE86, 0xFE87,
+          0xFE88, 0xFE89, 0xFE8A, 0xFE8B, 0xFE8C, 0xFE8D, 0xFE8E, 0xFE8F,
+  /* 9 */ 0xFE90, 0xFE91, 0xFE92, 0xFE93, 0xFE94, 0xFE95, 0xFE96, 0xFE97,
+          0xFE98, 0xFE99, 0xFE9A, 0xFE9B, 0xFE9C, 0xFE9D, 0xFE9E, 0xFE9F,
+  /* A */ 0xFEA0, 0xFEA1, 0xFEA2, 0xFEA3, 0xFEA4, 0xFEA5, 0xFEA6, 0xFEA7,
+          0xFEA8, 0xFEA9, 0xFEAA, 0xFEAB, 0xFEAC, 0xFEAD, 0xFEAE, 0xFEAF,
+  /* B */ 0xFEB0, 0xFEB1, 0xFEB2, 0xFEB3, 0xFEB4, 0xFEB5, 0xFEB6, 0xFEB7,
+          0xFEB8, 0xFEB9, 0xFEBA, 0xFEBB, 0xFEBC, 0xFEBD, 0xFEBE, 0xFEBF,
+  /* C */ 0xFEC0, 0xFEC1, 0xFEC2, 0xFEC3, 0xFEC4, 0xFEC5, 0xFEC6, 0xFEC7,
+          0xFEC8, 0xFEC9, 0xFECA, 0xFECB, 0xFECC, 0xFECD, 0xFECE, 0xFECF,
+  /* D */ 0xFED0, 0xFED1, 0xFED2, 0xFED3, 0xFED4, 0xFED5, 0xFED6, 0xFED7,
+          0xFED8, 0xFED9, 0xFEDA, 0xFEDB, 0xFEDC, 0xFEDD, 0xFEDE, 0xFEDF,
+  /* E */ 0xFEE0, 0xFEE1, 0xFEE2, 0xFEE3, 0xFEE4, 0xFEE5, 0xFEE6, 0xFEE7,
+          0xFEE8, 0xFEE9, 0xFEEA, 0xFEEB, 0xFEEC, 0xFEED, 0xFEEE, 0xFEEF,
+  /* F */ 0xFEF0, 0xFEF1, 0xFEF2, 0xFEF3, 0xFEF4, 0xFEF5, 0xFEF6, 0xFEF7,
+          0xFEF8, 0xFEF9, 0xFEFA, 0xFEFB, 0xFEFC, 0xFEFD, 0xFEFE, 0x0000,
+
+  // Table 10 (for high byte 0xFF)
+
+  /* 0 */ 0xFF00, 0xFF01, 0xFF02, 0xFF03, 0xFF04, 0xFF05, 0xFF06, 0xFF07,
+          0xFF08, 0xFF09, 0xFF0A, 0xFF0B, 0xFF0C, 0xFF0D, 0xFF0E, 0xFF0F,
+  /* 1 */ 0xFF10, 0xFF11, 0xFF12, 0xFF13, 0xFF14, 0xFF15, 0xFF16, 0xFF17,
+          0xFF18, 0xFF19, 0xFF1A, 0xFF1B, 0xFF1C, 0xFF1D, 0xFF1E, 0xFF1F,
+  /* 2 */ 0xFF20, 0xFF41, 0xFF42, 0xFF43, 0xFF44, 0xFF45, 0xFF46, 0xFF47,
+          0xFF48, 0xFF49, 0xFF4A, 0xFF4B, 0xFF4C, 0xFF4D, 0xFF4E, 0xFF4F,
+  /* 3 */ 0xFF50, 0xFF51, 0xFF52, 0xFF53, 0xFF54, 0xFF55, 0xFF56, 0xFF57,
+          0xFF58, 0xFF59, 0xFF5A, 0xFF3B, 0xFF3C, 0xFF3D, 0xFF3E, 0xFF3F,
+  /* 4 */ 0xFF40, 0xFF41, 0xFF42, 0xFF43, 0xFF44, 0xFF45, 0xFF46, 0xFF47,
+          0xFF48, 0xFF49, 0xFF4A, 0xFF4B, 0xFF4C, 0xFF4D, 0xFF4E, 0xFF4F,
+  /* 5 */ 0xFF50, 0xFF51, 0xFF52, 0xFF53, 0xFF54, 0xFF55, 0xFF56, 0xFF57,
+          0xFF58, 0xFF59, 0xFF5A, 0xFF5B, 0xFF5C, 0xFF5D, 0xFF5E, 0xFF5F,
+  /* 6 */ 0xFF60, 0xFF61, 0xFF62, 0xFF63, 0xFF64, 0xFF65, 0xFF66, 0xFF67,
+          0xFF68, 0xFF69, 0xFF6A, 0xFF6B, 0xFF6C, 0xFF6D, 0xFF6E, 0xFF6F,
+  /* 7 */ 0xFF70, 0xFF71, 0xFF72, 0xFF73, 0xFF74, 0xFF75, 0xFF76, 0xFF77,
+          0xFF78, 0xFF79, 0xFF7A, 0xFF7B, 0xFF7C, 0xFF7D, 0xFF7E, 0xFF7F,
+  /* 8 */ 0xFF80, 0xFF81, 0xFF82, 0xFF83, 0xFF84, 0xFF85, 0xFF86, 0xFF87,
+          0xFF88, 0xFF89, 0xFF8A, 0xFF8B, 0xFF8C, 0xFF8D, 0xFF8E, 0xFF8F,
+  /* 9 */ 0xFF90, 0xFF91, 0xFF92, 0xFF93, 0xFF94, 0xFF95, 0xFF96, 0xFF97,
+          0xFF98, 0xFF99, 0xFF9A, 0xFF9B, 0xFF9C, 0xFF9D, 0xFF9E, 0xFF9F,
+  /* A */ 0xFFA0, 0xFFA1, 0xFFA2, 0xFFA3, 0xFFA4, 0xFFA5, 0xFFA6, 0xFFA7,
+          0xFFA8, 0xFFA9, 0xFFAA, 0xFFAB, 0xFFAC, 0xFFAD, 0xFFAE, 0xFFAF,
+  /* B */ 0xFFB0, 0xFFB1, 0xFFB2, 0xFFB3, 0xFFB4, 0xFFB5, 0xFFB6, 0xFFB7,
+          0xFFB8, 0xFFB9, 0xFFBA, 0xFFBB, 0xFFBC, 0xFFBD, 0xFFBE, 0xFFBF,
+  /* C */ 0xFFC0, 0xFFC1, 0xFFC2, 0xFFC3, 0xFFC4, 0xFFC5, 0xFFC6, 0xFFC7,
+          0xFFC8, 0xFFC9, 0xFFCA, 0xFFCB, 0xFFCC, 0xFFCD, 0xFFCE, 0xFFCF,
+  /* D */ 0xFFD0, 0xFFD1, 0xFFD2, 0xFFD3, 0xFFD4, 0xFFD5, 0xFFD6, 0xFFD7,
+          0xFFD8, 0xFFD9, 0xFFDA, 0xFFDB, 0xFFDC, 0xFFDD, 0xFFDE, 0xFFDF,
+  /* E */ 0xFFE0, 0xFFE1, 0xFFE2, 0xFFE3, 0xFFE4, 0xFFE5, 0xFFE6, 0xFFE7,
+          0xFFE8, 0xFFE9, 0xFFEA, 0xFFEB, 0xFFEC, 0xFFED, 0xFFEE, 0xFFEF,
+  /* F */ 0xFFF0, 0xFFF1, 0xFFF2, 0xFFF3, 0xFFF4, 0xFFF5, 0xFFF6, 0xFFF7,
+          0xFFF8, 0xFFF9, 0xFFFA, 0xFFFB, 0xFFFC, 0xFFFD, 0xFFFE, 0xFFFF,
+};
+
+// Returns the next non-ignorable codepoint within string starting from the
+// position indicated by index, or zero if there are no more.
+// The passed-in index is automatically advanced as the characters in the input
+// HFS-decomposed UTF-8 strings are read.
+inline int HFSReadNextNonIgnorableCodepoint(const char* string,
+                                            int length,
+                                            int* index) {
+  int codepoint = 0;
+  while (*index < length && codepoint == 0) {
+    // CBU8_NEXT returns a value < 0 in error cases. For purposes of string
+    // comparison, we just use that value and flag it with DCHECK.
+    CBU8_NEXT(string, *index, length, codepoint);
+    DCHECK_GT(codepoint, 0);
+    if (codepoint > 0) {
+      // Check if there is a subtable for this upper byte.
+      int lookup_offset = lower_case_table[codepoint >> 8];
+      if (lookup_offset != 0)
+        codepoint = lower_case_table[lookup_offset + (codepoint & 0x00FF)];
+      // Note: codepoint1 may be again 0 at this point if the character was
+      // an ignorable.
+    }
+  }
+  return codepoint;
+}
+
+}  // namespace
+
+// Special UTF-8 version of FastUnicodeCompare. Cf:
+// http://developer.apple.com/mac/library/technotes/tn/tn1150.html#StringComparisonAlgorithm
+// The input strings must be in the special HFS decomposed form.
+int FilePath::HFSFastUnicodeCompare(StringPieceType string1,
+                                    StringPieceType string2) {
+  int length1 = string1.length();
+  int length2 = string2.length();
+  int index1 = 0;
+  int index2 = 0;
+
+  for (;;) {
+    int codepoint1 = HFSReadNextNonIgnorableCodepoint(string1.data(),
+                                                      length1,
+                                                      &index1);
+    int codepoint2 = HFSReadNextNonIgnorableCodepoint(string2.data(),
+                                                      length2,
+                                                      &index2);
+    if (codepoint1 != codepoint2)
+      return (codepoint1 < codepoint2) ? -1 : 1;
+    if (codepoint1 == 0) {
+      DCHECK_EQ(index1, length1);
+      DCHECK_EQ(index2, length2);
+      return 0;
+    }
+  }
+}
+
+StringType FilePath::GetHFSDecomposedForm(StringPieceType string) {
+  StringType result;
+  ScopedCFTypeRef<CFStringRef> cfstring(
+      CFStringCreateWithBytesNoCopy(
+          NULL,
+          reinterpret_cast<const UInt8*>(string.data()),
+          string.length(),
+          kCFStringEncodingUTF8,
+          false,
+          kCFAllocatorNull));
+  if (cfstring) {
+    // Query the maximum length needed to store the result. In most cases this
+    // will overestimate the required space. The return value also already
+    // includes the space needed for a terminating 0.
+    CFIndex length = CFStringGetMaximumSizeOfFileSystemRepresentation(cfstring);
+    DCHECK_GT(length, 0);  // should be at least 1 for the 0-terminator.
+    // Reserve enough space for CFStringGetFileSystemRepresentation to write
+    // into. Also set the length to the maximum so that we can shrink it later.
+    // (Increasing rather than decreasing it would clobber the string contents!)
+    result.reserve(length);
+    result.resize(length - 1);
+    Boolean success = CFStringGetFileSystemRepresentation(cfstring,
+                                                          &result[0],
+                                                          length);
+    if (success) {
+      // Reduce result.length() to actual string length.
+      result.resize(strlen(result.c_str()));
+    } else {
+      // An error occurred -> clear result.
+      result.clear();
+    }
+  }
+  return result;
+}
+
+int FilePath::CompareIgnoreCase(StringPieceType string1,
+                                StringPieceType string2) {
+  // Quick checks for empty strings - these speed things up a bit and make the
+  // following code cleaner.
+  if (string1.empty())
+    return string2.empty() ? 0 : -1;
+  if (string2.empty())
+    return 1;
+
+  StringType hfs1 = GetHFSDecomposedForm(string1);
+  StringType hfs2 = GetHFSDecomposedForm(string2);
+
+  // GetHFSDecomposedForm() returns an empty string in an error case.
+  if (hfs1.empty() || hfs2.empty()) {
+    NOTREACHED();
+    ScopedCFTypeRef<CFStringRef> cfstring1(
+        CFStringCreateWithBytesNoCopy(
+            NULL,
+            reinterpret_cast<const UInt8*>(string1.data()),
+            string1.length(),
+            kCFStringEncodingUTF8,
+            false,
+            kCFAllocatorNull));
+    ScopedCFTypeRef<CFStringRef> cfstring2(
+        CFStringCreateWithBytesNoCopy(
+            NULL,
+            reinterpret_cast<const UInt8*>(string2.data()),
+            string2.length(),
+            kCFStringEncodingUTF8,
+            false,
+            kCFAllocatorNull));
+    return CFStringCompare(cfstring1,
+                           cfstring2,
+                           kCFCompareCaseInsensitive);
+  }
+
+  return HFSFastUnicodeCompare(hfs1, hfs2);
+}
+
+#elif defined(OS_POSIX) || defined(OS_FUCHSIA)
+
+// Generic Posix system comparisons.
+int FilePath::CompareIgnoreCase(StringPieceType string1,
+                                StringPieceType string2) {
+  // Specifically need null termianted strings for this API call.
+  int comparison = strcasecmp(string1.as_string().c_str(),
+                              string2.as_string().c_str());
+  if (comparison < 0)
+    return -1;
+  if (comparison > 0)
+    return 1;
+  return 0;
+}
+
+#endif  // OS versions of CompareIgnoreCase()
+
+
+void FilePath::StripTrailingSeparatorsInternal() {
+  // If there is no drive letter, start will be 1, which will prevent stripping
+  // the leading separator if there is only one separator.  If there is a drive
+  // letter, start will be set appropriately to prevent stripping the first
+  // separator following the drive letter, if a separator immediately follows
+  // the drive letter.
+  StringType::size_type start = FindDriveLetter(path_) + 2;
+
+  StringType::size_type last_stripped = StringType::npos;
+  for (StringType::size_type pos = path_.length();
+       pos > start && IsSeparator(path_[pos - 1]);
+       --pos) {
+    // If the string only has two separators and they're at the beginning,
+    // don't strip them, unless the string began with more than two separators.
+    if (pos != start + 1 || last_stripped == start + 2 ||
+        !IsSeparator(path_[start - 1])) {
+      path_.resize(pos - 1);
+      last_stripped = pos;
+    }
+  }
+}
+
+FilePath FilePath::NormalizePathSeparators() const {
+  return NormalizePathSeparatorsTo(kSeparators[0]);
+}
+
+FilePath FilePath::NormalizePathSeparatorsTo(CharType separator) const {
+#if defined(FILE_PATH_USES_WIN_SEPARATORS)
+  DCHECK_NE(kSeparators + kSeparatorsLength,
+            std::find(kSeparators, kSeparators + kSeparatorsLength, separator));
+  StringType copy = path_;
+  for (size_t i = 0; i < kSeparatorsLength; ++i) {
+    std::replace(copy.begin(), copy.end(), kSeparators[i], separator);
+  }
+  return FilePath(copy);
+#else
+  return *this;
+#endif
+}
+
+#if defined(OS_ANDROID)
+bool FilePath::IsContentUri() const {
+  return StartsWith(path_, "content://", base::CompareCase::INSENSITIVE_ASCII);
+}
+#endif
+
+}  // namespace base
diff --git a/base/files/file_path.h b/base/files/file_path.h
new file mode 100644
index 0000000..2dc15f9
--- /dev/null
+++ b/base/files/file_path.h
@@ -0,0 +1,481 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// FilePath is a container for pathnames stored in a platform's native string
+// type, providing containers for manipulation in according with the
+// platform's conventions for pathnames.  It supports the following path
+// types:
+//
+//                   POSIX            Windows
+//                   ---------------  ----------------------------------
+// Fundamental type  char[]           wchar_t[]
+// Encoding          unspecified*     UTF-16
+// Separator         /                \, tolerant of /
+// Drive letters     no               case-insensitive A-Z followed by :
+// Alternate root    // (surprise!)   \\, for UNC paths
+//
+// * The encoding need not be specified on POSIX systems, although some
+//   POSIX-compliant systems do specify an encoding.  Mac OS X uses UTF-8.
+//   Chrome OS also uses UTF-8.
+//   Linux does not specify an encoding, but in practice, the locale's
+//   character set may be used.
+//
+// For more arcane bits of path trivia, see below.
+//
+// FilePath objects are intended to be used anywhere paths are.  An
+// application may pass FilePath objects around internally, masking the
+// underlying differences between systems, only differing in implementation
+// where interfacing directly with the system.  For example, a single
+// OpenFile(const FilePath &) function may be made available, allowing all
+// callers to operate without regard to the underlying implementation.  On
+// POSIX-like platforms, OpenFile might wrap fopen, and on Windows, it might
+// wrap _wfopen_s, perhaps both by calling file_path.value().c_str().  This
+// allows each platform to pass pathnames around without requiring conversions
+// between encodings, which has an impact on performance, but more imporantly,
+// has an impact on correctness on platforms that do not have well-defined
+// encodings for pathnames.
+//
+// Several methods are available to perform common operations on a FilePath
+// object, such as determining the parent directory (DirName), isolating the
+// final path component (BaseName), and appending a relative pathname string
+// to an existing FilePath object (Append).  These methods are highly
+// recommended over attempting to split and concatenate strings directly.
+// These methods are based purely on string manipulation and knowledge of
+// platform-specific pathname conventions, and do not consult the filesystem
+// at all, making them safe to use without fear of blocking on I/O operations.
+// These methods do not function as mutators but instead return distinct
+// instances of FilePath objects, and are therefore safe to use on const
+// objects.  The objects themselves are safe to share between threads.
+//
+// To aid in initialization of FilePath objects from string literals, a
+// FILE_PATH_LITERAL macro is provided, which accounts for the difference
+// between char[]-based pathnames on POSIX systems and wchar_t[]-based
+// pathnames on Windows.
+//
+// As a precaution against premature truncation, paths can't contain NULs.
+//
+// Because a FilePath object should not be instantiated at the global scope,
+// instead, use a FilePath::CharType[] and initialize it with
+// FILE_PATH_LITERAL.  At runtime, a FilePath object can be created from the
+// character array.  Example:
+//
+// | const FilePath::CharType kLogFileName[] = FILE_PATH_LITERAL("log.txt");
+// |
+// | void Function() {
+// |   FilePath log_file_path(kLogFileName);
+// |   [...]
+// | }
+//
+// WARNING: FilePaths should ALWAYS be displayed with LTR directionality, even
+// when the UI language is RTL. This means you always need to pass filepaths
+// through base::i18n::WrapPathWithLTRFormatting() before displaying it in the
+// RTL UI.
+//
+// This is a very common source of bugs, please try to keep this in mind.
+//
+// ARCANE BITS OF PATH TRIVIA
+//
+//  - A double leading slash is actually part of the POSIX standard.  Systems
+//    are allowed to treat // as an alternate root, as Windows does for UNC
+//    (network share) paths.  Most POSIX systems don't do anything special
+//    with two leading slashes, but FilePath handles this case properly
+//    in case it ever comes across such a system.  FilePath needs this support
+//    for Windows UNC paths, anyway.
+//    References:
+//    The Open Group Base Specifications Issue 7, sections 3.267 ("Pathname")
+//    and 4.12 ("Pathname Resolution"), available at:
+//    http://www.opengroup.org/onlinepubs/9699919799/basedefs/V1_chap03.html#tag_03_267
+//    http://www.opengroup.org/onlinepubs/9699919799/basedefs/V1_chap04.html#tag_04_12
+//
+//  - Windows treats c:\\ the same way it treats \\.  This was intended to
+//    allow older applications that require drive letters to support UNC paths
+//    like \\server\share\path, by permitting c:\\server\share\path as an
+//    equivalent.  Since the OS treats these paths specially, FilePath needs
+//    to do the same.  Since Windows can use either / or \ as the separator,
+//    FilePath treats c://, c:\\, //, and \\ all equivalently.
+//    Reference:
+//    The Old New Thing, "Why is a drive letter permitted in front of UNC
+//    paths (sometimes)?", available at:
+//    http://blogs.msdn.com/oldnewthing/archive/2005/11/22/495740.aspx
+
+#ifndef BASE_FILES_FILE_PATH_H_
+#define BASE_FILES_FILE_PATH_H_
+
+#include <stddef.h>
+
+#include <iosfwd>
+#include <string>
+#include <vector>
+
+#include "base/base_export.h"
+#include "base/compiler_specific.h"
+#include "base/macros.h"
+#include "base/strings/string16.h"
+#include "base/strings/string_piece.h"
+#include "build/build_config.h"
+
+// Windows-style drive letter support and pathname separator characters can be
+// enabled and disabled independently, to aid testing.  These #defines are
+// here so that the same setting can be used in both the implementation and
+// in the unit test.
+#if defined(OS_WIN)
+#define FILE_PATH_USES_DRIVE_LETTERS
+#define FILE_PATH_USES_WIN_SEPARATORS
+#endif  // OS_WIN
+
+// To print path names portably use PRIsFP (based on PRIuS and friends from
+// C99 and format_macros.h) like this:
+// base::StringPrintf("Path is %" PRIsFP ".\n", path.value().c_str());
+#if defined(OS_WIN)
+#define PRIsFP "ls"
+#elif defined(OS_POSIX) || defined(OS_FUCHSIA)
+#define PRIsFP "s"
+#endif  // OS_WIN
+
+namespace base {
+
+class Pickle;
+class PickleIterator;
+
+// An abstraction to isolate users from the differences between native
+// pathnames on different platforms.
+class BASE_EXPORT FilePath {
+ public:
+#if defined(OS_WIN)
+  // On Windows, for Unicode-aware applications, native pathnames are wchar_t
+  // arrays encoded in UTF-16.
+  typedef std::wstring StringType;
+#elif defined(OS_POSIX) || defined(OS_FUCHSIA)
+  // On most platforms, native pathnames are char arrays, and the encoding
+  // may or may not be specified.  On Mac OS X, native pathnames are encoded
+  // in UTF-8.
+  typedef std::string StringType;
+#endif  // OS_WIN
+
+  typedef BasicStringPiece<StringType> StringPieceType;
+  typedef StringType::value_type CharType;
+
+  // Null-terminated array of separators used to separate components in
+  // hierarchical paths.  Each character in this array is a valid separator,
+  // but kSeparators[0] is treated as the canonical separator and will be used
+  // when composing pathnames.
+  static const CharType kSeparators[];
+
+  // arraysize(kSeparators).
+  static const size_t kSeparatorsLength;
+
+  // A special path component meaning "this directory."
+  static const CharType kCurrentDirectory[];
+
+  // A special path component meaning "the parent directory."
+  static const CharType kParentDirectory[];
+
+  // The character used to identify a file extension.
+  static const CharType kExtensionSeparator;
+
+  FilePath();
+  FilePath(const FilePath& that);
+  explicit FilePath(StringPieceType path);
+  ~FilePath();
+  FilePath& operator=(const FilePath& that);
+
+  // Constructs FilePath with the contents of |that|, which is left in valid but
+  // unspecified state.
+  FilePath(FilePath&& that) noexcept;
+  // Replaces the contents with those of |that|, which is left in valid but
+  // unspecified state.
+  FilePath& operator=(FilePath&& that);
+
+  bool operator==(const FilePath& that) const;
+
+  bool operator!=(const FilePath& that) const;
+
+  // Required for some STL containers and operations
+  bool operator<(const FilePath& that) const {
+    return path_ < that.path_;
+  }
+
+  const StringType& value() const { return path_; }
+
+  bool empty() const { return path_.empty(); }
+
+  void clear() { path_.clear(); }
+
+  // Returns true if |character| is in kSeparators.
+  static bool IsSeparator(CharType character);
+
+  // Returns a vector of all of the components of the provided path. It is
+  // equivalent to calling DirName().value() on the path's root component,
+  // and BaseName().value() on each child component.
+  //
+  // To make sure this is lossless so we can differentiate absolute and
+  // relative paths, the root slash will be included even though no other
+  // slashes will be. The precise behavior is:
+  //
+  // Posix:  "/foo/bar"  ->  [ "/", "foo", "bar" ]
+  // Windows:  "C:\foo\bar"  ->  [ "C:", "\\", "foo", "bar" ]
+  void GetComponents(std::vector<FilePath::StringType>* components) const;
+
+  // Returns true if this FilePath is a strict parent of the |child|. Absolute
+  // and relative paths are accepted i.e. is /foo parent to /foo/bar and
+  // is foo parent to foo/bar. Does not convert paths to absolute, follow
+  // symlinks or directory navigation (e.g. ".."). A path is *NOT* its own
+  // parent.
+  bool IsParent(const FilePath& child) const;
+
+  // If IsParent(child) holds, appends to path (if non-NULL) the
+  // relative path to child and returns true.  For example, if parent
+  // holds "/Users/johndoe/Library/Application Support", child holds
+  // "/Users/johndoe/Library/Application Support/Google/Chrome/Default", and
+  // *path holds "/Users/johndoe/Library/Caches", then after
+  // parent.AppendRelativePath(child, path) is called *path will hold
+  // "/Users/johndoe/Library/Caches/Google/Chrome/Default".  Otherwise,
+  // returns false.
+  bool AppendRelativePath(const FilePath& child, FilePath* path) const;
+
+  // Returns a FilePath corresponding to the directory containing the path
+  // named by this object, stripping away the file component.  If this object
+  // only contains one component, returns a FilePath identifying
+  // kCurrentDirectory.  If this object already refers to the root directory,
+  // returns a FilePath identifying the root directory. Please note that this
+  // doesn't resolve directory navigation, e.g. the result for "../a" is "..".
+  FilePath DirName() const WARN_UNUSED_RESULT;
+
+  // Returns a FilePath corresponding to the last path component of this
+  // object, either a file or a directory.  If this object already refers to
+  // the root directory, returns a FilePath identifying the root directory;
+  // this is the only situation in which BaseName will return an absolute path.
+  FilePath BaseName() const WARN_UNUSED_RESULT;
+
+  // Returns ".jpg" for path "C:\pics\jojo.jpg", or an empty string if
+  // the file has no extension.  If non-empty, Extension() will always start
+  // with precisely one ".".  The following code should always work regardless
+  // of the value of path.  For common double-extensions like .tar.gz and
+  // .user.js, this method returns the combined extension.  For a single
+  // component, use FinalExtension().
+  // new_path = path.RemoveExtension().value().append(path.Extension());
+  // ASSERT(new_path == path.value());
+  // NOTE: this is different from the original file_util implementation which
+  // returned the extension without a leading "." ("jpg" instead of ".jpg")
+  StringType Extension() const WARN_UNUSED_RESULT;
+
+  // Returns the path's file extension, as in Extension(), but will
+  // never return a double extension.
+  //
+  // TODO(davidben): Check all our extension-sensitive code to see if
+  // we can rename this to Extension() and the other to something like
+  // LongExtension(), defaulting to short extensions and leaving the
+  // long "extensions" to logic like base::GetUniquePathNumber().
+  StringType FinalExtension() const WARN_UNUSED_RESULT;
+
+  // Returns "C:\pics\jojo" for path "C:\pics\jojo.jpg"
+  // NOTE: this is slightly different from the similar file_util implementation
+  // which returned simply 'jojo'.
+  FilePath RemoveExtension() const WARN_UNUSED_RESULT;
+
+  // Removes the path's file extension, as in RemoveExtension(), but
+  // ignores double extensions.
+  FilePath RemoveFinalExtension() const WARN_UNUSED_RESULT;
+
+  // Inserts |suffix| after the file name portion of |path| but before the
+  // extension.  Returns "" if BaseName() == "." or "..".
+  // Examples:
+  // path == "C:\pics\jojo.jpg" suffix == " (1)", returns "C:\pics\jojo (1).jpg"
+  // path == "jojo.jpg"         suffix == " (1)", returns "jojo (1).jpg"
+  // path == "C:\pics\jojo"     suffix == " (1)", returns "C:\pics\jojo (1)"
+  // path == "C:\pics.old\jojo" suffix == " (1)", returns "C:\pics.old\jojo (1)"
+  FilePath InsertBeforeExtension(
+      StringPieceType suffix) const WARN_UNUSED_RESULT;
+  FilePath InsertBeforeExtensionASCII(
+      StringPiece suffix) const WARN_UNUSED_RESULT;
+
+  // Adds |extension| to |file_name|. Returns the current FilePath if
+  // |extension| is empty. Returns "" if BaseName() == "." or "..".
+  FilePath AddExtension(StringPieceType extension) const WARN_UNUSED_RESULT;
+
+  // Replaces the extension of |file_name| with |extension|.  If |file_name|
+  // does not have an extension, then |extension| is added.  If |extension| is
+  // empty, then the extension is removed from |file_name|.
+  // Returns "" if BaseName() == "." or "..".
+  FilePath ReplaceExtension(StringPieceType extension) const WARN_UNUSED_RESULT;
+
+  // Returns true if the file path matches the specified extension. The test is
+  // case insensitive. Don't forget the leading period if appropriate.
+  bool MatchesExtension(StringPieceType extension) const;
+
+  // Returns a FilePath by appending a separator and the supplied path
+  // component to this object's path.  Append takes care to avoid adding
+  // excessive separators if this object's path already ends with a separator.
+  // If this object's path is kCurrentDirectory, a new FilePath corresponding
+  // only to |component| is returned.  |component| must be a relative path;
+  // it is an error to pass an absolute path.
+  FilePath Append(StringPieceType component) const WARN_UNUSED_RESULT;
+  FilePath Append(const FilePath& component) const WARN_UNUSED_RESULT;
+
+  // Although Windows StringType is std::wstring, since the encoding it uses for
+  // paths is well defined, it can handle ASCII path components as well.
+  // Mac uses UTF8, and since ASCII is a subset of that, it works there as well.
+  // On Linux, although it can use any 8-bit encoding for paths, we assume that
+  // ASCII is a valid subset, regardless of the encoding, since many operating
+  // system paths will always be ASCII.
+  FilePath AppendASCII(StringPiece component) const WARN_UNUSED_RESULT;
+
+  // Returns true if this FilePath contains an absolute path.  On Windows, an
+  // absolute path begins with either a drive letter specification followed by
+  // a separator character, or with two separator characters.  On POSIX
+  // platforms, an absolute path begins with a separator character.
+  bool IsAbsolute() const;
+
+  // Returns true if the patch ends with a path separator character.
+  bool EndsWithSeparator() const WARN_UNUSED_RESULT;
+
+  // Returns a copy of this FilePath that ends with a trailing separator. If
+  // the input path is empty, an empty FilePath will be returned.
+  FilePath AsEndingWithSeparator() const WARN_UNUSED_RESULT;
+
+  // Returns a copy of this FilePath that does not end with a trailing
+  // separator.
+  FilePath StripTrailingSeparators() const WARN_UNUSED_RESULT;
+
+  // Returns true if this FilePath contains an attempt to reference a parent
+  // directory (e.g. has a path component that is "..").
+  bool ReferencesParent() const;
+
+  // Return a Unicode human-readable version of this path.
+  // Warning: you can *not*, in general, go from a display name back to a real
+  // path.  Only use this when displaying paths to users, not just when you
+  // want to stuff a string16 into some other API.
+  string16 LossyDisplayName() const;
+
+  // Return the path as ASCII, or the empty string if the path is not ASCII.
+  // This should only be used for cases where the FilePath is representing a
+  // known-ASCII filename.
+  std::string MaybeAsASCII() const;
+
+  // Return the path as UTF-8.
+  //
+  // This function is *unsafe* as there is no way to tell what encoding is
+  // used in file names on POSIX systems other than Mac and Chrome OS,
+  // although UTF-8 is practically used everywhere these days. To mitigate
+  // the encoding issue, this function internally calls
+  // SysNativeMBToWide() on POSIX systems other than Mac and Chrome OS,
+  // per assumption that the current locale's encoding is used in file
+  // names, but this isn't a perfect solution.
+  //
+  // Once it becomes safe to to stop caring about non-UTF-8 file names,
+  // the SysNativeMBToWide() hack will be removed from the code, along
+  // with "Unsafe" in the function name.
+  std::string AsUTF8Unsafe() const;
+
+  // Similar to AsUTF8Unsafe, but returns UTF-16 instead.
+  string16 AsUTF16Unsafe() const;
+
+  // Returns a FilePath object from a path name in UTF-8. This function
+  // should only be used for cases where you are sure that the input
+  // string is UTF-8.
+  //
+  // Like AsUTF8Unsafe(), this function is unsafe. This function
+  // internally calls SysWideToNativeMB() on POSIX systems other than Mac
+  // and Chrome OS, to mitigate the encoding issue. See the comment at
+  // AsUTF8Unsafe() for details.
+  static FilePath FromUTF8Unsafe(StringPiece utf8);
+
+  // Similar to FromUTF8Unsafe, but accepts UTF-16 instead.
+  static FilePath FromUTF16Unsafe(StringPiece16 utf16);
+
+  void WriteToPickle(Pickle* pickle) const;
+  bool ReadFromPickle(PickleIterator* iter);
+
+  // Normalize all path separators to backslash on Windows
+  // (if FILE_PATH_USES_WIN_SEPARATORS is true), or do nothing on POSIX systems.
+  FilePath NormalizePathSeparators() const;
+
+  // Normalize all path separattors to given type on Windows
+  // (if FILE_PATH_USES_WIN_SEPARATORS is true), or do nothing on POSIX systems.
+  FilePath NormalizePathSeparatorsTo(CharType separator) const;
+
+  // Compare two strings in the same way the file system does.
+  // Note that these always ignore case, even on file systems that are case-
+  // sensitive. If case-sensitive comparison is ever needed, add corresponding
+  // methods here.
+  // The methods are written as a static method so that they can also be used
+  // on parts of a file path, e.g., just the extension.
+  // CompareIgnoreCase() returns -1, 0 or 1 for less-than, equal-to and
+  // greater-than respectively.
+  static int CompareIgnoreCase(StringPieceType string1,
+                               StringPieceType string2);
+  static bool CompareEqualIgnoreCase(StringPieceType string1,
+                                     StringPieceType string2) {
+    return CompareIgnoreCase(string1, string2) == 0;
+  }
+  static bool CompareLessIgnoreCase(StringPieceType string1,
+                                    StringPieceType string2) {
+    return CompareIgnoreCase(string1, string2) < 0;
+  }
+
+#if defined(OS_MACOSX)
+  // Returns the string in the special canonical decomposed form as defined for
+  // HFS, which is close to, but not quite, decomposition form D. See
+  // http://developer.apple.com/mac/library/technotes/tn/tn1150.html#UnicodeSubtleties
+  // for further comments.
+  // Returns the epmty string if the conversion failed.
+  static StringType GetHFSDecomposedForm(StringPieceType string);
+
+  // Special UTF-8 version of FastUnicodeCompare. Cf:
+  // http://developer.apple.com/mac/library/technotes/tn/tn1150.html#StringComparisonAlgorithm
+  // IMPORTANT: The input strings must be in the special HFS decomposed form!
+  // (cf. above GetHFSDecomposedForm method)
+  static int HFSFastUnicodeCompare(StringPieceType string1,
+                                   StringPieceType string2);
+#endif
+
+#if defined(OS_ANDROID)
+  // On android, file selection dialog can return a file with content uri
+  // scheme(starting with content://). Content uri needs to be opened with
+  // ContentResolver to guarantee that the app has appropriate permissions
+  // to access it.
+  // Returns true if the path is a content uri, or false otherwise.
+  bool IsContentUri() const;
+#endif
+
+ private:
+  // Remove trailing separators from this object.  If the path is absolute, it
+  // will never be stripped any more than to refer to the absolute root
+  // directory, so "////" will become "/", not "".  A leading pair of
+  // separators is never stripped, to support alternate roots.  This is used to
+  // support UNC paths on Windows.
+  void StripTrailingSeparatorsInternal();
+
+  StringType path_;
+};
+
+BASE_EXPORT std::ostream& operator<<(std::ostream& out,
+                                     const FilePath& file_path);
+
+}  // namespace base
+
+// Macros for string literal initialization of FilePath::CharType[], and for
+// using a FilePath::CharType[] in a printf-style format string.
+#if defined(OS_WIN)
+#define FILE_PATH_LITERAL(x) L ## x
+#define PRFilePath "ls"
+#elif defined(OS_POSIX) || defined(OS_FUCHSIA)
+#define FILE_PATH_LITERAL(x) x
+#define PRFilePath "s"
+#endif  // OS_WIN
+
+namespace std {
+
+template <>
+struct hash<base::FilePath> {
+  typedef base::FilePath argument_type;
+  typedef std::size_t result_type;
+  result_type operator()(argument_type const& f) const {
+    return hash<base::FilePath::StringType>()(f.value());
+  }
+};
+
+}  // namespace std
+
+#endif  // BASE_FILES_FILE_PATH_H_
diff --git a/base/files/file_path_constants.cc b/base/files/file_path_constants.cc
new file mode 100644
index 0000000..0b74846
--- /dev/null
+++ b/base/files/file_path_constants.cc
@@ -0,0 +1,25 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <stddef.h>
+
+#include "base/files/file_path.h"
+#include "base/macros.h"
+
+namespace base {
+
+#if defined(FILE_PATH_USES_WIN_SEPARATORS)
+const FilePath::CharType FilePath::kSeparators[] = FILE_PATH_LITERAL("\\/");
+#else  // FILE_PATH_USES_WIN_SEPARATORS
+const FilePath::CharType FilePath::kSeparators[] = FILE_PATH_LITERAL("/");
+#endif  // FILE_PATH_USES_WIN_SEPARATORS
+
+const size_t FilePath::kSeparatorsLength = arraysize(kSeparators);
+
+const FilePath::CharType FilePath::kCurrentDirectory[] = FILE_PATH_LITERAL(".");
+const FilePath::CharType FilePath::kParentDirectory[] = FILE_PATH_LITERAL("..");
+
+const FilePath::CharType FilePath::kExtensionSeparator = FILE_PATH_LITERAL('.');
+
+}  // namespace base
diff --git a/base/files/file_path_unittest.cc b/base/files/file_path_unittest.cc
new file mode 100644
index 0000000..e722c68
--- /dev/null
+++ b/base/files/file_path_unittest.cc
@@ -0,0 +1,1319 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <stddef.h>
+
+#include <sstream>
+
+#include "base/files/file_path.h"
+#include "base/macros.h"
+#include "base/strings/utf_string_conversions.h"
+#include "build/build_config.h"
+#include "testing/gtest/include/gtest/gtest.h"
+#include "testing/platform_test.h"
+
+#if defined(OS_POSIX) || defined(OS_FUCHSIA)
+#include "base/test/scoped_locale.h"
+#endif
+
+// This macro helps avoid wrapped lines in the test structs.
+#define FPL(x) FILE_PATH_LITERAL(x)
+
+// This macro constructs strings which can contain NULs.
+#define FPS(x) FilePath::StringType(FPL(x), arraysize(FPL(x)) - 1)
+
+namespace base {
+
+struct UnaryTestData {
+  const FilePath::CharType* input;
+  const FilePath::CharType* expected;
+};
+
+struct UnaryBooleanTestData {
+  const FilePath::CharType* input;
+  bool expected;
+};
+
+struct BinaryTestData {
+  const FilePath::CharType* inputs[2];
+  const FilePath::CharType* expected;
+};
+
+struct BinaryBooleanTestData {
+  const FilePath::CharType* inputs[2];
+  bool expected;
+};
+
+struct BinaryIntTestData {
+  const FilePath::CharType* inputs[2];
+  int expected;
+};
+
+struct UTF8TestData {
+  const FilePath::CharType* native;
+  const char* utf8;
+};
+
+// file_util winds up using autoreleased objects on the Mac, so this needs
+// to be a PlatformTest
+typedef PlatformTest FilePathTest;
+
+TEST_F(FilePathTest, DirName) {
+  const struct UnaryTestData cases[] = {
+    { FPL(""),              FPL(".") },
+    { FPL("aa"),            FPL(".") },
+    { FPL("/aa/bb"),        FPL("/aa") },
+    { FPL("/aa/bb/"),       FPL("/aa") },
+    { FPL("/aa/bb//"),      FPL("/aa") },
+    { FPL("/aa/bb/ccc"),    FPL("/aa/bb") },
+    { FPL("/aa"),           FPL("/") },
+    { FPL("/aa/"),          FPL("/") },
+    { FPL("/"),             FPL("/") },
+    { FPL("//"),            FPL("//") },
+    { FPL("///"),           FPL("/") },
+    { FPL("aa/"),           FPL(".") },
+    { FPL("aa/bb"),         FPL("aa") },
+    { FPL("aa/bb/"),        FPL("aa") },
+    { FPL("aa/bb//"),       FPL("aa") },
+    { FPL("aa//bb//"),      FPL("aa") },
+    { FPL("aa//bb/"),       FPL("aa") },
+    { FPL("aa//bb"),        FPL("aa") },
+    { FPL("//aa/bb"),       FPL("//aa") },
+    { FPL("//aa/"),         FPL("//") },
+    { FPL("//aa"),          FPL("//") },
+    { FPL("0:"),            FPL(".") },
+    { FPL("@:"),            FPL(".") },
+    { FPL("[:"),            FPL(".") },
+    { FPL("`:"),            FPL(".") },
+    { FPL("{:"),            FPL(".") },
+    { FPL("\xB3:"),         FPL(".") },
+    { FPL("\xC5:"),         FPL(".") },
+    { FPL("/aa/../bb/cc"),  FPL("/aa/../bb")},
+#if defined(OS_WIN)
+    { FPL("\x0143:"),       FPL(".") },
+#endif  // OS_WIN
+#if defined(FILE_PATH_USES_DRIVE_LETTERS)
+    { FPL("c:"),            FPL("c:") },
+    { FPL("C:"),            FPL("C:") },
+    { FPL("A:"),            FPL("A:") },
+    { FPL("Z:"),            FPL("Z:") },
+    { FPL("a:"),            FPL("a:") },
+    { FPL("z:"),            FPL("z:") },
+    { FPL("c:aa"),          FPL("c:") },
+    { FPL("c:/"),           FPL("c:/") },
+    { FPL("c://"),          FPL("c://") },
+    { FPL("c:///"),         FPL("c:/") },
+    { FPL("c:/aa"),         FPL("c:/") },
+    { FPL("c:/aa/"),        FPL("c:/") },
+    { FPL("c:/aa/bb"),      FPL("c:/aa") },
+    { FPL("c:aa/bb"),       FPL("c:aa") },
+#endif  // FILE_PATH_USES_DRIVE_LETTERS
+#if defined(FILE_PATH_USES_WIN_SEPARATORS)
+    { FPL("\\aa\\bb"),      FPL("\\aa") },
+    { FPL("\\aa\\bb\\"),    FPL("\\aa") },
+    { FPL("\\aa\\bb\\\\"),  FPL("\\aa") },
+    { FPL("\\aa\\bb\\ccc"), FPL("\\aa\\bb") },
+    { FPL("\\aa"),          FPL("\\") },
+    { FPL("\\aa\\"),        FPL("\\") },
+    { FPL("\\"),            FPL("\\") },
+    { FPL("\\\\"),          FPL("\\\\") },
+    { FPL("\\\\\\"),        FPL("\\") },
+    { FPL("aa\\"),          FPL(".") },
+    { FPL("aa\\bb"),        FPL("aa") },
+    { FPL("aa\\bb\\"),      FPL("aa") },
+    { FPL("aa\\bb\\\\"),    FPL("aa") },
+    { FPL("aa\\\\bb\\\\"),  FPL("aa") },
+    { FPL("aa\\\\bb\\"),    FPL("aa") },
+    { FPL("aa\\\\bb"),      FPL("aa") },
+    { FPL("\\\\aa\\bb"),    FPL("\\\\aa") },
+    { FPL("\\\\aa\\"),      FPL("\\\\") },
+    { FPL("\\\\aa"),        FPL("\\\\") },
+    { FPL("aa\\..\\bb\\c"), FPL("aa\\..\\bb")},
+#if defined(FILE_PATH_USES_DRIVE_LETTERS)
+    { FPL("c:\\"),          FPL("c:\\") },
+    { FPL("c:\\\\"),        FPL("c:\\\\") },
+    { FPL("c:\\\\\\"),      FPL("c:\\") },
+    { FPL("c:\\aa"),        FPL("c:\\") },
+    { FPL("c:\\aa\\"),      FPL("c:\\") },
+    { FPL("c:\\aa\\bb"),    FPL("c:\\aa") },
+    { FPL("c:aa\\bb"),      FPL("c:aa") },
+#endif  // FILE_PATH_USES_DRIVE_LETTERS
+#endif  // FILE_PATH_USES_WIN_SEPARATORS
+  };
+
+  for (size_t i = 0; i < arraysize(cases); ++i) {
+    FilePath input(cases[i].input);
+    FilePath observed = input.DirName();
+    EXPECT_EQ(FilePath::StringType(cases[i].expected), observed.value()) <<
+              "i: " << i << ", input: " << input.value();
+  }
+}
+
+TEST_F(FilePathTest, BaseName) {
+  const struct UnaryTestData cases[] = {
+    { FPL(""),              FPL("") },
+    { FPL("aa"),            FPL("aa") },
+    { FPL("/aa/bb"),        FPL("bb") },
+    { FPL("/aa/bb/"),       FPL("bb") },
+    { FPL("/aa/bb//"),      FPL("bb") },
+    { FPL("/aa/bb/ccc"),    FPL("ccc") },
+    { FPL("/aa"),           FPL("aa") },
+    { FPL("/"),             FPL("/") },
+    { FPL("//"),            FPL("//") },
+    { FPL("///"),           FPL("/") },
+    { FPL("aa/"),           FPL("aa") },
+    { FPL("aa/bb"),         FPL("bb") },
+    { FPL("aa/bb/"),        FPL("bb") },
+    { FPL("aa/bb//"),       FPL("bb") },
+    { FPL("aa//bb//"),      FPL("bb") },
+    { FPL("aa//bb/"),       FPL("bb") },
+    { FPL("aa//bb"),        FPL("bb") },
+    { FPL("//aa/bb"),       FPL("bb") },
+    { FPL("//aa/"),         FPL("aa") },
+    { FPL("//aa"),          FPL("aa") },
+    { FPL("0:"),            FPL("0:") },
+    { FPL("@:"),            FPL("@:") },
+    { FPL("[:"),            FPL("[:") },
+    { FPL("`:"),            FPL("`:") },
+    { FPL("{:"),            FPL("{:") },
+    { FPL("\xB3:"),         FPL("\xB3:") },
+    { FPL("\xC5:"),         FPL("\xC5:") },
+#if defined(OS_WIN)
+    { FPL("\x0143:"),       FPL("\x0143:") },
+#endif  // OS_WIN
+#if defined(FILE_PATH_USES_DRIVE_LETTERS)
+    { FPL("c:"),            FPL("") },
+    { FPL("C:"),            FPL("") },
+    { FPL("A:"),            FPL("") },
+    { FPL("Z:"),            FPL("") },
+    { FPL("a:"),            FPL("") },
+    { FPL("z:"),            FPL("") },
+    { FPL("c:aa"),          FPL("aa") },
+    { FPL("c:/"),           FPL("/") },
+    { FPL("c://"),          FPL("//") },
+    { FPL("c:///"),         FPL("/") },
+    { FPL("c:/aa"),         FPL("aa") },
+    { FPL("c:/aa/"),        FPL("aa") },
+    { FPL("c:/aa/bb"),      FPL("bb") },
+    { FPL("c:aa/bb"),       FPL("bb") },
+#endif  // FILE_PATH_USES_DRIVE_LETTERS
+#if defined(FILE_PATH_USES_WIN_SEPARATORS)
+    { FPL("\\aa\\bb"),      FPL("bb") },
+    { FPL("\\aa\\bb\\"),    FPL("bb") },
+    { FPL("\\aa\\bb\\\\"),  FPL("bb") },
+    { FPL("\\aa\\bb\\ccc"), FPL("ccc") },
+    { FPL("\\aa"),          FPL("aa") },
+    { FPL("\\"),            FPL("\\") },
+    { FPL("\\\\"),          FPL("\\\\") },
+    { FPL("\\\\\\"),        FPL("\\") },
+    { FPL("aa\\"),          FPL("aa") },
+    { FPL("aa\\bb"),        FPL("bb") },
+    { FPL("aa\\bb\\"),      FPL("bb") },
+    { FPL("aa\\bb\\\\"),    FPL("bb") },
+    { FPL("aa\\\\bb\\\\"),  FPL("bb") },
+    { FPL("aa\\\\bb\\"),    FPL("bb") },
+    { FPL("aa\\\\bb"),      FPL("bb") },
+    { FPL("\\\\aa\\bb"),    FPL("bb") },
+    { FPL("\\\\aa\\"),      FPL("aa") },
+    { FPL("\\\\aa"),        FPL("aa") },
+#if defined(FILE_PATH_USES_DRIVE_LETTERS)
+    { FPL("c:\\"),          FPL("\\") },
+    { FPL("c:\\\\"),        FPL("\\\\") },
+    { FPL("c:\\\\\\"),      FPL("\\") },
+    { FPL("c:\\aa"),        FPL("aa") },
+    { FPL("c:\\aa\\"),      FPL("aa") },
+    { FPL("c:\\aa\\bb"),    FPL("bb") },
+    { FPL("c:aa\\bb"),      FPL("bb") },
+#endif  // FILE_PATH_USES_DRIVE_LETTERS
+#endif  // FILE_PATH_USES_WIN_SEPARATORS
+  };
+
+  for (size_t i = 0; i < arraysize(cases); ++i) {
+    FilePath input(cases[i].input);
+    FilePath observed = input.BaseName();
+    EXPECT_EQ(FilePath::StringType(cases[i].expected), observed.value()) <<
+              "i: " << i << ", input: " << input.value();
+  }
+}
+
+TEST_F(FilePathTest, Append) {
+  const struct BinaryTestData cases[] = {
+    { { FPL(""),           FPL("cc") }, FPL("cc") },
+    { { FPL("."),          FPL("ff") }, FPL("ff") },
+    { { FPL("."),          FPL("") },   FPL(".") },
+    { { FPL("/"),          FPL("cc") }, FPL("/cc") },
+    { { FPL("/aa"),        FPL("") },   FPL("/aa") },
+    { { FPL("/aa/"),       FPL("") },   FPL("/aa") },
+    { { FPL("//aa"),       FPL("") },   FPL("//aa") },
+    { { FPL("//aa/"),      FPL("") },   FPL("//aa") },
+    { { FPL("//"),         FPL("aa") }, FPL("//aa") },
+#if defined(FILE_PATH_USES_DRIVE_LETTERS)
+    { { FPL("c:"),         FPL("a") },  FPL("c:a") },
+    { { FPL("c:"),         FPL("") },   FPL("c:") },
+    { { FPL("c:/"),        FPL("a") },  FPL("c:/a") },
+    { { FPL("c://"),       FPL("a") },  FPL("c://a") },
+    { { FPL("c:///"),      FPL("a") },  FPL("c:/a") },
+#endif  // FILE_PATH_USES_DRIVE_LETTERS
+#if defined(FILE_PATH_USES_WIN_SEPARATORS)
+    // Append introduces the default separator character, so these test cases
+    // need to be defined with different expected results on platforms that use
+    // different default separator characters.
+    { { FPL("\\"),         FPL("cc") }, FPL("\\cc") },
+    { { FPL("\\aa"),       FPL("") },   FPL("\\aa") },
+    { { FPL("\\aa\\"),     FPL("") },   FPL("\\aa") },
+    { { FPL("\\\\aa"),     FPL("") },   FPL("\\\\aa") },
+    { { FPL("\\\\aa\\"),   FPL("") },   FPL("\\\\aa") },
+    { { FPL("\\\\"),       FPL("aa") }, FPL("\\\\aa") },
+    { { FPL("/aa/bb"),     FPL("cc") }, FPL("/aa/bb\\cc") },
+    { { FPL("/aa/bb/"),    FPL("cc") }, FPL("/aa/bb\\cc") },
+    { { FPL("aa/bb/"),     FPL("cc") }, FPL("aa/bb\\cc") },
+    { { FPL("aa/bb"),      FPL("cc") }, FPL("aa/bb\\cc") },
+    { { FPL("a/b"),        FPL("c") },  FPL("a/b\\c") },
+    { { FPL("a/b/"),       FPL("c") },  FPL("a/b\\c") },
+    { { FPL("//aa"),       FPL("bb") }, FPL("//aa\\bb") },
+    { { FPL("//aa/"),      FPL("bb") }, FPL("//aa\\bb") },
+    { { FPL("\\aa\\bb"),   FPL("cc") }, FPL("\\aa\\bb\\cc") },
+    { { FPL("\\aa\\bb\\"), FPL("cc") }, FPL("\\aa\\bb\\cc") },
+    { { FPL("aa\\bb\\"),   FPL("cc") }, FPL("aa\\bb\\cc") },
+    { { FPL("aa\\bb"),     FPL("cc") }, FPL("aa\\bb\\cc") },
+    { { FPL("a\\b"),       FPL("c") },  FPL("a\\b\\c") },
+    { { FPL("a\\b\\"),     FPL("c") },  FPL("a\\b\\c") },
+    { { FPL("\\\\aa"),     FPL("bb") }, FPL("\\\\aa\\bb") },
+    { { FPL("\\\\aa\\"),   FPL("bb") }, FPL("\\\\aa\\bb") },
+#if defined(FILE_PATH_USES_DRIVE_LETTERS)
+    { { FPL("c:\\"),       FPL("a") },  FPL("c:\\a") },
+    { { FPL("c:\\\\"),     FPL("a") },  FPL("c:\\\\a") },
+    { { FPL("c:\\\\\\"),   FPL("a") },  FPL("c:\\a") },
+    { { FPL("c:\\"),       FPL("") },   FPL("c:\\") },
+    { { FPL("c:\\a"),      FPL("b") },  FPL("c:\\a\\b") },
+    { { FPL("c:\\a\\"),    FPL("b") },  FPL("c:\\a\\b") },
+#endif  // FILE_PATH_USES_DRIVE_LETTERS
+#else  // FILE_PATH_USES_WIN_SEPARATORS
+    { { FPL("/aa/bb"),     FPL("cc") }, FPL("/aa/bb/cc") },
+    { { FPL("/aa/bb/"),    FPL("cc") }, FPL("/aa/bb/cc") },
+    { { FPL("aa/bb/"),     FPL("cc") }, FPL("aa/bb/cc") },
+    { { FPL("aa/bb"),      FPL("cc") }, FPL("aa/bb/cc") },
+    { { FPL("a/b"),        FPL("c") },  FPL("a/b/c") },
+    { { FPL("a/b/"),       FPL("c") },  FPL("a/b/c") },
+    { { FPL("//aa"),       FPL("bb") }, FPL("//aa/bb") },
+    { { FPL("//aa/"),      FPL("bb") }, FPL("//aa/bb") },
+#if defined(FILE_PATH_USES_DRIVE_LETTERS)
+    { { FPL("c:/"),        FPL("a") },  FPL("c:/a") },
+    { { FPL("c:/"),        FPL("") },   FPL("c:/") },
+    { { FPL("c:/a"),       FPL("b") },  FPL("c:/a/b") },
+    { { FPL("c:/a/"),      FPL("b") },  FPL("c:/a/b") },
+#endif  // FILE_PATH_USES_DRIVE_LETTERS
+#endif  // FILE_PATH_USES_WIN_SEPARATORS
+  };
+
+  for (size_t i = 0; i < arraysize(cases); ++i) {
+    FilePath root(cases[i].inputs[0]);
+    FilePath::StringType leaf(cases[i].inputs[1]);
+    FilePath observed_str = root.Append(leaf);
+    EXPECT_EQ(FilePath::StringType(cases[i].expected), observed_str.value()) <<
+              "i: " << i << ", root: " << root.value() << ", leaf: " << leaf;
+    FilePath observed_path = root.Append(FilePath(leaf));
+    EXPECT_EQ(FilePath::StringType(cases[i].expected), observed_path.value()) <<
+              "i: " << i << ", root: " << root.value() << ", leaf: " << leaf;
+
+    // TODO(erikkay): It would be nice to have a unicode test append value to
+    // handle the case when AppendASCII is passed UTF8
+#if defined(OS_WIN)
+    std::string ascii = WideToUTF8(leaf);
+#elif defined(OS_POSIX) || defined(OS_FUCHSIA)
+    std::string ascii = leaf;
+#endif
+    observed_str = root.AppendASCII(ascii);
+    EXPECT_EQ(FilePath::StringType(cases[i].expected), observed_str.value()) <<
+              "i: " << i << ", root: " << root.value() << ", leaf: " << leaf;
+  }
+}
+
+TEST_F(FilePathTest, StripTrailingSeparators) {
+  const struct UnaryTestData cases[] = {
+    { FPL(""),              FPL("") },
+    { FPL("/"),             FPL("/") },
+    { FPL("//"),            FPL("//") },
+    { FPL("///"),           FPL("/") },
+    { FPL("////"),          FPL("/") },
+    { FPL("a/"),            FPL("a") },
+    { FPL("a//"),           FPL("a") },
+    { FPL("a///"),          FPL("a") },
+    { FPL("a////"),         FPL("a") },
+    { FPL("/a"),            FPL("/a") },
+    { FPL("/a/"),           FPL("/a") },
+    { FPL("/a//"),          FPL("/a") },
+    { FPL("/a///"),         FPL("/a") },
+    { FPL("/a////"),        FPL("/a") },
+#if defined(FILE_PATH_USES_DRIVE_LETTERS)
+    { FPL("c:"),            FPL("c:") },
+    { FPL("c:/"),           FPL("c:/") },
+    { FPL("c://"),          FPL("c://") },
+    { FPL("c:///"),         FPL("c:/") },
+    { FPL("c:////"),        FPL("c:/") },
+    { FPL("c:/a"),          FPL("c:/a") },
+    { FPL("c:/a/"),         FPL("c:/a") },
+    { FPL("c:/a//"),        FPL("c:/a") },
+    { FPL("c:/a///"),       FPL("c:/a") },
+    { FPL("c:/a////"),      FPL("c:/a") },
+#endif  // FILE_PATH_USES_DRIVE_LETTERS
+#if defined(FILE_PATH_USES_WIN_SEPARATORS)
+    { FPL("\\"),            FPL("\\") },
+    { FPL("\\\\"),          FPL("\\\\") },
+    { FPL("\\\\\\"),        FPL("\\") },
+    { FPL("\\\\\\\\"),      FPL("\\") },
+    { FPL("a\\"),           FPL("a") },
+    { FPL("a\\\\"),         FPL("a") },
+    { FPL("a\\\\\\"),       FPL("a") },
+    { FPL("a\\\\\\\\"),     FPL("a") },
+    { FPL("\\a"),           FPL("\\a") },
+    { FPL("\\a\\"),         FPL("\\a") },
+    { FPL("\\a\\\\"),       FPL("\\a") },
+    { FPL("\\a\\\\\\"),     FPL("\\a") },
+    { FPL("\\a\\\\\\\\"),   FPL("\\a") },
+#if defined(FILE_PATH_USES_DRIVE_LETTERS)
+    { FPL("c:\\"),          FPL("c:\\") },
+    { FPL("c:\\\\"),        FPL("c:\\\\") },
+    { FPL("c:\\\\\\"),      FPL("c:\\") },
+    { FPL("c:\\\\\\\\"),    FPL("c:\\") },
+    { FPL("c:\\a"),         FPL("c:\\a") },
+    { FPL("c:\\a\\"),       FPL("c:\\a") },
+    { FPL("c:\\a\\\\"),     FPL("c:\\a") },
+    { FPL("c:\\a\\\\\\"),   FPL("c:\\a") },
+    { FPL("c:\\a\\\\\\\\"), FPL("c:\\a") },
+#endif  // FILE_PATH_USES_DRIVE_LETTERS
+#endif  // FILE_PATH_USES_WIN_SEPARATORS
+  };
+
+  for (size_t i = 0; i < arraysize(cases); ++i) {
+    FilePath input(cases[i].input);
+    FilePath observed = input.StripTrailingSeparators();
+    EXPECT_EQ(FilePath::StringType(cases[i].expected), observed.value()) <<
+              "i: " << i << ", input: " << input.value();
+  }
+}
+
+TEST_F(FilePathTest, IsAbsolute) {
+  const struct UnaryBooleanTestData cases[] = {
+    { FPL(""),       false },
+    { FPL("a"),      false },
+    { FPL("c:"),     false },
+    { FPL("c:a"),    false },
+    { FPL("a/b"),    false },
+    { FPL("//"),     true },
+    { FPL("//a"),    true },
+    { FPL("c:a/b"),  false },
+    { FPL("?:/a"),   false },
+#if defined(FILE_PATH_USES_DRIVE_LETTERS)
+    { FPL("/"),      false },
+    { FPL("/a"),     false },
+    { FPL("/."),     false },
+    { FPL("/.."),    false },
+    { FPL("c:/"),    true },
+    { FPL("c:/a"),   true },
+    { FPL("c:/."),   true },
+    { FPL("c:/.."),  true },
+    { FPL("C:/a"),   true },
+    { FPL("d:/a"),   true },
+#else  // FILE_PATH_USES_DRIVE_LETTERS
+    { FPL("/"),      true },
+    { FPL("/a"),     true },
+    { FPL("/."),     true },
+    { FPL("/.."),    true },
+    { FPL("c:/"),    false },
+#endif  // FILE_PATH_USES_DRIVE_LETTERS
+#if defined(FILE_PATH_USES_WIN_SEPARATORS)
+    { FPL("a\\b"),   false },
+    { FPL("\\\\"),   true },
+    { FPL("\\\\a"),  true },
+    { FPL("a\\b"),   false },
+    { FPL("\\\\"),   true },
+    { FPL("//a"),    true },
+    { FPL("c:a\\b"), false },
+    { FPL("?:\\a"),  false },
+#if defined(FILE_PATH_USES_DRIVE_LETTERS)
+    { FPL("\\"),     false },
+    { FPL("\\a"),    false },
+    { FPL("\\."),    false },
+    { FPL("\\.."),   false },
+    { FPL("c:\\"),   true },
+    { FPL("c:\\"),   true },
+    { FPL("c:\\a"),  true },
+    { FPL("c:\\."),  true },
+    { FPL("c:\\.."), true },
+    { FPL("C:\\a"),  true },
+    { FPL("d:\\a"),  true },
+#else  // FILE_PATH_USES_DRIVE_LETTERS
+    { FPL("\\"),     true },
+    { FPL("\\a"),    true },
+    { FPL("\\."),    true },
+    { FPL("\\.."),   true },
+    { FPL("c:\\"),   false },
+#endif  // FILE_PATH_USES_DRIVE_LETTERS
+#endif  // FILE_PATH_USES_WIN_SEPARATORS
+  };
+
+  for (size_t i = 0; i < arraysize(cases); ++i) {
+    FilePath input(cases[i].input);
+    bool observed = input.IsAbsolute();
+    EXPECT_EQ(cases[i].expected, observed) <<
+              "i: " << i << ", input: " << input.value();
+  }
+}
+
+TEST_F(FilePathTest, PathComponentsTest) {
+  const struct UnaryTestData cases[] = {
+    { FPL("//foo/bar/baz/"),          FPL("|//|foo|bar|baz")},
+    { FPL("///"),                     FPL("|/")},
+    { FPL("/foo//bar//baz/"),         FPL("|/|foo|bar|baz")},
+    { FPL("/foo/bar/baz/"),           FPL("|/|foo|bar|baz")},
+    { FPL("/foo/bar/baz//"),          FPL("|/|foo|bar|baz")},
+    { FPL("/foo/bar/baz///"),         FPL("|/|foo|bar|baz")},
+    { FPL("/foo/bar/baz"),            FPL("|/|foo|bar|baz")},
+    { FPL("/foo/bar.bot/baz.txt"),    FPL("|/|foo|bar.bot|baz.txt")},
+    { FPL("//foo//bar/baz"),          FPL("|//|foo|bar|baz")},
+    { FPL("/"),                       FPL("|/")},
+    { FPL("foo"),                     FPL("|foo")},
+    { FPL(""),                        FPL("")},
+#if defined(FILE_PATH_USES_DRIVE_LETTERS)
+    { FPL("e:/foo"),                  FPL("|e:|/|foo")},
+    { FPL("e:/"),                     FPL("|e:|/")},
+    { FPL("e:"),                      FPL("|e:")},
+#endif  // FILE_PATH_USES_DRIVE_LETTERS
+#if defined(FILE_PATH_USES_WIN_SEPARATORS)
+    { FPL("../foo"),                  FPL("|..|foo")},
+    { FPL("./foo"),                   FPL("|foo")},
+    { FPL("../foo/bar/"),             FPL("|..|foo|bar") },
+    { FPL("\\\\foo\\bar\\baz\\"),     FPL("|\\\\|foo|bar|baz")},
+    { FPL("\\\\\\"),                  FPL("|\\")},
+    { FPL("\\foo\\\\bar\\\\baz\\"),   FPL("|\\|foo|bar|baz")},
+    { FPL("\\foo\\bar\\baz\\"),       FPL("|\\|foo|bar|baz")},
+    { FPL("\\foo\\bar\\baz\\\\"),     FPL("|\\|foo|bar|baz")},
+    { FPL("\\foo\\bar\\baz\\\\\\"),   FPL("|\\|foo|bar|baz")},
+    { FPL("\\foo\\bar\\baz"),         FPL("|\\|foo|bar|baz")},
+    { FPL("\\foo\\bar/baz\\\\\\"),    FPL("|\\|foo|bar|baz")},
+    { FPL("/foo\\bar\\baz"),          FPL("|/|foo|bar|baz")},
+    { FPL("\\foo\\bar.bot\\baz.txt"), FPL("|\\|foo|bar.bot|baz.txt")},
+    { FPL("\\\\foo\\\\bar\\baz"),     FPL("|\\\\|foo|bar|baz")},
+    { FPL("\\"),                      FPL("|\\")},
+#endif  // FILE_PATH_USES_WIN_SEPARATORS
+  };
+
+  for (size_t i = 0; i < arraysize(cases); ++i) {
+    FilePath input(cases[i].input);
+    std::vector<FilePath::StringType> comps;
+    input.GetComponents(&comps);
+
+    FilePath::StringType observed;
+    for (size_t j = 0; j < comps.size(); ++j) {
+      observed.append(FILE_PATH_LITERAL("|"), 1);
+      observed.append(comps[j]);
+    }
+    EXPECT_EQ(FilePath::StringType(cases[i].expected), observed) <<
+              "i: " << i << ", input: " << input.value();
+  }
+}
+
+TEST_F(FilePathTest, IsParentTest) {
+  const struct BinaryBooleanTestData cases[] = {
+    { { FPL("/"),             FPL("/foo/bar/baz") },      true},
+    { { FPL("/foo/bar"),      FPL("/foo/bar/baz") },      true},
+    { { FPL("/foo/bar/"),     FPL("/foo/bar/baz") },      true},
+    { { FPL("//foo/bar/"),    FPL("//foo/bar/baz") },     true},
+    { { FPL("/foo/bar"),      FPL("/foo2/bar/baz") },     false},
+    { { FPL("/foo/bar.txt"),  FPL("/foo/bar/baz") },      false},
+    { { FPL("/foo/bar"),      FPL("/foo/bar2/baz") },     false},
+    { { FPL("/foo/bar"),      FPL("/foo/bar") },          false},
+    { { FPL("/foo/bar/baz"),  FPL("/foo/bar") },          false},
+    { { FPL("foo/bar"),       FPL("foo/bar/baz") },       true},
+    { { FPL("foo/bar"),       FPL("foo2/bar/baz") },      false},
+    { { FPL("foo/bar"),       FPL("foo/bar2/baz") },      false},
+    { { FPL(""),              FPL("foo") },               false},
+#if defined(FILE_PATH_USES_DRIVE_LETTERS)
+    { { FPL("c:/foo/bar"),    FPL("c:/foo/bar/baz") },    true},
+    { { FPL("E:/foo/bar"),    FPL("e:/foo/bar/baz") },    true},
+    { { FPL("f:/foo/bar"),    FPL("F:/foo/bar/baz") },    true},
+    { { FPL("E:/Foo/bar"),    FPL("e:/foo/bar/baz") },    false},
+    { { FPL("f:/foo/bar"),    FPL("F:/foo/Bar/baz") },    false},
+    { { FPL("c:/"),           FPL("c:/foo/bar/baz") },    true},
+    { { FPL("c:"),            FPL("c:/foo/bar/baz") },    true},
+    { { FPL("c:/foo/bar"),    FPL("d:/foo/bar/baz") },    false},
+    { { FPL("c:/foo/bar"),    FPL("D:/foo/bar/baz") },    false},
+    { { FPL("C:/foo/bar"),    FPL("d:/foo/bar/baz") },    false},
+    { { FPL("c:/foo/bar"),    FPL("c:/foo2/bar/baz") },   false},
+    { { FPL("e:/foo/bar"),    FPL("E:/foo2/bar/baz") },   false},
+    { { FPL("F:/foo/bar"),    FPL("f:/foo2/bar/baz") },   false},
+    { { FPL("c:/foo/bar"),    FPL("c:/foo/bar2/baz") },   false},
+#endif  // FILE_PATH_USES_DRIVE_LETTERS
+#if defined(FILE_PATH_USES_WIN_SEPARATORS)
+    { { FPL("\\foo\\bar"),    FPL("\\foo\\bar\\baz") },   true},
+    { { FPL("\\foo/bar"),     FPL("\\foo\\bar\\baz") },   true},
+    { { FPL("\\foo/bar"),     FPL("\\foo/bar/baz") },     true},
+    { { FPL("\\"),            FPL("\\foo\\bar\\baz") },   true},
+    { { FPL(""),              FPL("\\foo\\bar\\baz") },   false},
+    { { FPL("\\foo\\bar"),    FPL("\\foo2\\bar\\baz") },  false},
+    { { FPL("\\foo\\bar"),    FPL("\\foo\\bar2\\baz") },  false},
+#endif  // FILE_PATH_USES_WIN_SEPARATORS
+  };
+
+  for (size_t i = 0; i < arraysize(cases); ++i) {
+    FilePath parent(cases[i].inputs[0]);
+    FilePath child(cases[i].inputs[1]);
+
+    EXPECT_EQ(parent.IsParent(child), cases[i].expected) <<
+        "i: " << i << ", parent: " << parent.value() << ", child: " <<
+        child.value();
+  }
+}
+
+TEST_F(FilePathTest, AppendRelativePathTest) {
+  const struct BinaryTestData cases[] = {
+#if defined(FILE_PATH_USES_WIN_SEPARATORS)
+    { { FPL("/"),             FPL("/foo/bar/baz") },      FPL("foo\\bar\\baz")},
+#else  // FILE_PATH_USES_WIN_SEPARATORS
+    { { FPL("/"),             FPL("/foo/bar/baz") },      FPL("foo/bar/baz")},
+#endif  // FILE_PATH_USES_WIN_SEPARATORS
+    { { FPL("/foo/bar"),      FPL("/foo/bar/baz") },      FPL("baz")},
+    { { FPL("/foo/bar/"),     FPL("/foo/bar/baz") },      FPL("baz")},
+    { { FPL("//foo/bar/"),    FPL("//foo/bar/baz") },     FPL("baz")},
+    { { FPL("/foo/bar"),      FPL("/foo2/bar/baz") },     FPL("")},
+    { { FPL("/foo/bar.txt"),  FPL("/foo/bar/baz") },      FPL("")},
+    { { FPL("/foo/bar"),      FPL("/foo/bar2/baz") },     FPL("")},
+    { { FPL("/foo/bar"),      FPL("/foo/bar") },          FPL("")},
+    { { FPL("/foo/bar/baz"),  FPL("/foo/bar") },          FPL("")},
+    { { FPL("foo/bar"),       FPL("foo/bar/baz") },       FPL("baz")},
+    { { FPL("foo/bar"),       FPL("foo2/bar/baz") },      FPL("")},
+    { { FPL("foo/bar"),       FPL("foo/bar2/baz") },      FPL("")},
+    { { FPL(""),              FPL("foo") },               FPL("")},
+#if defined(FILE_PATH_USES_DRIVE_LETTERS)
+    { { FPL("c:/foo/bar"),    FPL("c:/foo/bar/baz") },    FPL("baz")},
+    { { FPL("E:/foo/bar"),    FPL("e:/foo/bar/baz") },    FPL("baz")},
+    { { FPL("f:/foo/bar"),    FPL("F:/foo/bar/baz") },    FPL("baz")},
+    { { FPL("E:/Foo/bar"),    FPL("e:/foo/bar/baz") },    FPL("")},
+    { { FPL("f:/foo/bar"),    FPL("F:/foo/Bar/baz") },    FPL("")},
+#if defined(FILE_PATH_USES_WIN_SEPARATORS)
+    { { FPL("c:/"),           FPL("c:/foo/bar/baz") },    FPL("foo\\bar\\baz")},
+    // TODO(akalin): Figure out how to handle the corner case in the
+    // commented-out test case below.  Appending to an empty path gives
+    // /foo\bar\baz but appending to a nonempty path "blah" gives
+    // blah\foo\bar\baz.
+    // { { FPL("c:"),            FPL("c:/foo/bar/baz") }, FPL("foo\\bar\\baz")},
+#endif  // FILE_PATH_USES_WIN_SEPARATORS
+    { { FPL("c:/foo/bar"),    FPL("d:/foo/bar/baz") },    FPL("")},
+    { { FPL("c:/foo/bar"),    FPL("D:/foo/bar/baz") },    FPL("")},
+    { { FPL("C:/foo/bar"),    FPL("d:/foo/bar/baz") },    FPL("")},
+    { { FPL("c:/foo/bar"),    FPL("c:/foo2/bar/baz") },   FPL("")},
+    { { FPL("e:/foo/bar"),    FPL("E:/foo2/bar/baz") },   FPL("")},
+    { { FPL("F:/foo/bar"),    FPL("f:/foo2/bar/baz") },   FPL("")},
+    { { FPL("c:/foo/bar"),    FPL("c:/foo/bar2/baz") },   FPL("")},
+#endif  // FILE_PATH_USES_DRIVE_LETTERS
+#if defined(FILE_PATH_USES_WIN_SEPARATORS)
+    { { FPL("\\foo\\bar"),    FPL("\\foo\\bar\\baz") },   FPL("baz")},
+    { { FPL("\\foo/bar"),     FPL("\\foo\\bar\\baz") },   FPL("baz")},
+    { { FPL("\\foo/bar"),     FPL("\\foo/bar/baz") },     FPL("baz")},
+    { { FPL("\\"),            FPL("\\foo\\bar\\baz") },   FPL("foo\\bar\\baz")},
+    { { FPL(""),              FPL("\\foo\\bar\\baz") },   FPL("")},
+    { { FPL("\\foo\\bar"),    FPL("\\foo2\\bar\\baz") },  FPL("")},
+    { { FPL("\\foo\\bar"),    FPL("\\foo\\bar2\\baz") },  FPL("")},
+#endif  // FILE_PATH_USES_WIN_SEPARATORS
+  };
+
+  const FilePath base(FPL("blah"));
+
+  for (size_t i = 0; i < arraysize(cases); ++i) {
+    FilePath parent(cases[i].inputs[0]);
+    FilePath child(cases[i].inputs[1]);
+    {
+      FilePath result;
+      bool success = parent.AppendRelativePath(child, &result);
+      EXPECT_EQ(cases[i].expected[0] != '\0', success) <<
+        "i: " << i << ", parent: " << parent.value() << ", child: " <<
+        child.value();
+      EXPECT_STREQ(cases[i].expected, result.value().c_str()) <<
+        "i: " << i << ", parent: " << parent.value() << ", child: " <<
+        child.value();
+    }
+    {
+      FilePath result(base);
+      bool success = parent.AppendRelativePath(child, &result);
+      EXPECT_EQ(cases[i].expected[0] != '\0', success) <<
+        "i: " << i << ", parent: " << parent.value() << ", child: " <<
+        child.value();
+      EXPECT_EQ(base.Append(cases[i].expected).value(), result.value()) <<
+        "i: " << i << ", parent: " << parent.value() << ", child: " <<
+        child.value();
+    }
+  }
+}
+
+TEST_F(FilePathTest, EqualityTest) {
+  const struct BinaryBooleanTestData cases[] = {
+    { { FPL("/foo/bar/baz"),  FPL("/foo/bar/baz") },      true},
+    { { FPL("/foo/bar"),      FPL("/foo/bar/baz") },      false},
+    { { FPL("/foo/bar/baz"),  FPL("/foo/bar") },          false},
+    { { FPL("//foo/bar/"),    FPL("//foo/bar/") },        true},
+    { { FPL("/foo/bar"),      FPL("/foo2/bar") },         false},
+    { { FPL("/foo/bar.txt"),  FPL("/foo/bar") },          false},
+    { { FPL("foo/bar"),       FPL("foo/bar") },           true},
+    { { FPL("foo/bar"),       FPL("foo/bar/baz") },       false},
+    { { FPL(""),              FPL("foo") },               false},
+#if defined(FILE_PATH_USES_DRIVE_LETTERS)
+    { { FPL("c:/foo/bar"),    FPL("c:/foo/bar") },        true},
+    { { FPL("E:/foo/bar"),    FPL("e:/foo/bar") },        true},
+    { { FPL("f:/foo/bar"),    FPL("F:/foo/bar") },        true},
+    { { FPL("E:/Foo/bar"),    FPL("e:/foo/bar") },        false},
+    { { FPL("f:/foo/bar"),    FPL("F:/foo/Bar") },        false},
+    { { FPL("c:/"),           FPL("c:/") },               true},
+    { { FPL("c:"),            FPL("c:") },                true},
+    { { FPL("c:/foo/bar"),    FPL("d:/foo/bar") },        false},
+    { { FPL("c:/foo/bar"),    FPL("D:/foo/bar") },        false},
+    { { FPL("C:/foo/bar"),    FPL("d:/foo/bar") },        false},
+    { { FPL("c:/foo/bar"),    FPL("c:/foo2/bar") },       false},
+#endif  // FILE_PATH_USES_DRIVE_LETTERS
+#if defined(FILE_PATH_USES_WIN_SEPARATORS)
+    { { FPL("\\foo\\bar"),    FPL("\\foo\\bar") },        true},
+    { { FPL("\\foo/bar"),     FPL("\\foo/bar") },         true},
+    { { FPL("\\foo/bar"),     FPL("\\foo\\bar") },        false},
+    { { FPL("\\"),            FPL("\\") },                true},
+    { { FPL("\\"),            FPL("/") },                 false},
+    { { FPL(""),              FPL("\\") },                false},
+    { { FPL("\\foo\\bar"),    FPL("\\foo2\\bar") },       false},
+    { { FPL("\\foo\\bar"),    FPL("\\foo\\bar2") },       false},
+#if defined(FILE_PATH_USES_DRIVE_LETTERS)
+    { { FPL("c:\\foo\\bar"),    FPL("c:\\foo\\bar") },    true},
+    { { FPL("E:\\foo\\bar"),    FPL("e:\\foo\\bar") },    true},
+    { { FPL("f:\\foo\\bar"),    FPL("F:\\foo/bar") },     false},
+#endif  // FILE_PATH_USES_DRIVE_LETTERS
+#endif  // FILE_PATH_USES_WIN_SEPARATORS
+  };
+
+  for (size_t i = 0; i < arraysize(cases); ++i) {
+    FilePath a(cases[i].inputs[0]);
+    FilePath b(cases[i].inputs[1]);
+
+    EXPECT_EQ(a == b, cases[i].expected) <<
+      "equality i: " << i << ", a: " << a.value() << ", b: " <<
+      b.value();
+  }
+
+  for (size_t i = 0; i < arraysize(cases); ++i) {
+    FilePath a(cases[i].inputs[0]);
+    FilePath b(cases[i].inputs[1]);
+
+    EXPECT_EQ(a != b, !cases[i].expected) <<
+      "inequality i: " << i << ", a: " << a.value() << ", b: " <<
+      b.value();
+  }
+}
+
+TEST_F(FilePathTest, Extension) {
+  FilePath base_dir(FILE_PATH_LITERAL("base_dir"));
+
+  FilePath jpg = base_dir.Append(FILE_PATH_LITERAL("foo.jpg"));
+  EXPECT_EQ(FILE_PATH_LITERAL(".jpg"), jpg.Extension());
+  EXPECT_EQ(FILE_PATH_LITERAL(".jpg"), jpg.FinalExtension());
+
+  FilePath base = jpg.BaseName().RemoveExtension();
+  EXPECT_EQ(FILE_PATH_LITERAL("foo"), base.value());
+
+  FilePath path_no_ext = base_dir.Append(base);
+  EXPECT_EQ(path_no_ext.value(), jpg.RemoveExtension().value());
+
+  EXPECT_EQ(path_no_ext.value(), path_no_ext.RemoveExtension().value());
+  EXPECT_EQ(FILE_PATH_LITERAL(""), path_no_ext.Extension());
+  EXPECT_EQ(FILE_PATH_LITERAL(""), path_no_ext.FinalExtension());
+}
+
+TEST_F(FilePathTest, Extension2) {
+  const struct UnaryTestData cases[] = {
+#if defined(FILE_PATH_USES_WIN_SEPARATORS)
+    { FPL("C:\\a\\b\\c.ext"),        FPL(".ext") },
+    { FPL("C:\\a\\b\\c."),           FPL(".") },
+    { FPL("C:\\a\\b\\c"),            FPL("") },
+    { FPL("C:\\a\\b\\"),             FPL("") },
+    { FPL("C:\\a\\b.\\"),            FPL(".") },
+    { FPL("C:\\a\\b\\c.ext1.ext2"),  FPL(".ext2") },
+    { FPL("C:\\foo.bar\\\\\\"),      FPL(".bar") },
+    { FPL("C:\\foo.bar\\.."),        FPL("") },
+    { FPL("C:\\foo.bar\\..\\\\"),    FPL("") },
+#endif
+    { FPL("/foo/bar/baz.ext"),       FPL(".ext") },
+    { FPL("/foo/bar/baz."),          FPL(".") },
+    { FPL("/foo/bar/baz.."),         FPL(".") },
+    { FPL("/foo/bar/baz"),           FPL("") },
+    { FPL("/foo/bar/"),              FPL("") },
+    { FPL("/foo/bar./"),             FPL(".") },
+    { FPL("/foo/bar/baz.ext1.ext2"), FPL(".ext2") },
+    { FPL("/subversion-1.6.12.zip"), FPL(".zip") },
+    { FPL("/foo.12345.gz"),          FPL(".gz") },
+    { FPL("/foo..gz"),               FPL(".gz") },
+    { FPL("."),                      FPL("") },
+    { FPL(".."),                     FPL("") },
+    { FPL("./foo"),                  FPL("") },
+    { FPL("./foo.ext"),              FPL(".ext") },
+    { FPL("/foo.ext1/bar.ext2"),     FPL(".ext2") },
+    { FPL("/foo.bar////"),           FPL(".bar") },
+    { FPL("/foo.bar/.."),            FPL("") },
+    { FPL("/foo.bar/..////"),        FPL("") },
+    { FPL("/foo.1234.luser.js"),     FPL(".js") },
+    { FPL("/user.js"),               FPL(".js") },
+  };
+  const struct UnaryTestData double_extension_cases[] = {
+    { FPL("/foo.tar.gz"),            FPL(".tar.gz") },
+    { FPL("/foo.tar.Z"),             FPL(".tar.Z") },
+    { FPL("/foo.tar.bz2"),           FPL(".tar.bz2") },
+    { FPL("/foo.1234.gz"),           FPL(".1234.gz") },
+    { FPL("/foo.1234.tar.gz"),       FPL(".tar.gz") },
+    { FPL("/foo.tar.tar.gz"),        FPL(".tar.gz") },
+    { FPL("/foo.tar.gz.gz"),         FPL(".gz.gz") },
+    { FPL("/foo.1234.user.js"),      FPL(".user.js") },
+    { FPL("foo.user.js"),            FPL(".user.js") },
+    { FPL("/foo.tar.bz"),            FPL(".tar.bz") },
+  };
+  for (unsigned int i = 0; i < arraysize(cases); ++i) {
+    FilePath path(cases[i].input);
+    FilePath::StringType extension = path.Extension();
+    FilePath::StringType final_extension = path.FinalExtension();
+    EXPECT_STREQ(cases[i].expected, extension.c_str())
+        << "i: " << i << ", path: " << path.value();
+    EXPECT_STREQ(cases[i].expected, final_extension.c_str())
+        << "i: " << i << ", path: " << path.value();
+  }
+  for (unsigned int i = 0; i < arraysize(double_extension_cases); ++i) {
+    FilePath path(double_extension_cases[i].input);
+    FilePath::StringType extension = path.Extension();
+    EXPECT_STREQ(double_extension_cases[i].expected, extension.c_str())
+        << "i: " << i << ", path: " << path.value();
+  }
+}
+
+TEST_F(FilePathTest, InsertBeforeExtension) {
+  const struct BinaryTestData cases[] = {
+    { { FPL(""),                FPL("") },        FPL("") },
+    { { FPL(""),                FPL("txt") },     FPL("") },
+    { { FPL("."),               FPL("txt") },     FPL("") },
+    { { FPL(".."),              FPL("txt") },     FPL("") },
+    { { FPL("foo.dll"),         FPL("txt") },     FPL("footxt.dll") },
+    { { FPL("."),               FPL("") },        FPL(".") },
+    { { FPL("foo.dll"),         FPL(".txt") },    FPL("foo.txt.dll") },
+    { { FPL("foo"),             FPL("txt") },     FPL("footxt") },
+    { { FPL("foo"),             FPL(".txt") },    FPL("foo.txt") },
+    { { FPL("foo.baz.dll"),     FPL("txt") },     FPL("foo.baztxt.dll") },
+    { { FPL("foo.baz.dll"),     FPL(".txt") },    FPL("foo.baz.txt.dll") },
+    { { FPL("foo.dll"),         FPL("") },        FPL("foo.dll") },
+    { { FPL("foo.dll"),         FPL(".") },       FPL("foo..dll") },
+    { { FPL("foo"),             FPL("") },        FPL("foo") },
+    { { FPL("foo"),             FPL(".") },       FPL("foo.") },
+    { { FPL("foo.baz.dll"),     FPL("") },        FPL("foo.baz.dll") },
+    { { FPL("foo.baz.dll"),     FPL(".") },       FPL("foo.baz..dll") },
+#if defined(FILE_PATH_USES_WIN_SEPARATORS)
+    { { FPL("\\"),              FPL("") },        FPL("\\") },
+    { { FPL("\\"),              FPL("txt") },     FPL("\\txt") },
+    { { FPL("\\."),             FPL("txt") },     FPL("") },
+    { { FPL("\\.."),            FPL("txt") },     FPL("") },
+    { { FPL("\\."),             FPL("") },        FPL("\\.") },
+    { { FPL("C:\\bar\\foo.dll"), FPL("txt") },
+        FPL("C:\\bar\\footxt.dll") },
+    { { FPL("C:\\bar.baz\\foodll"), FPL("txt") },
+        FPL("C:\\bar.baz\\foodlltxt") },
+    { { FPL("C:\\bar.baz\\foo.dll"), FPL("txt") },
+        FPL("C:\\bar.baz\\footxt.dll") },
+    { { FPL("C:\\bar.baz\\foo.dll.exe"), FPL("txt") },
+        FPL("C:\\bar.baz\\foo.dlltxt.exe") },
+    { { FPL("C:\\bar.baz\\foo"), FPL("") },
+        FPL("C:\\bar.baz\\foo") },
+    { { FPL("C:\\bar.baz\\foo.exe"), FPL("") },
+        FPL("C:\\bar.baz\\foo.exe") },
+    { { FPL("C:\\bar.baz\\foo.dll.exe"), FPL("") },
+        FPL("C:\\bar.baz\\foo.dll.exe") },
+    { { FPL("C:\\bar\\baz\\foo.exe"), FPL(" (1)") },
+        FPL("C:\\bar\\baz\\foo (1).exe") },
+    { { FPL("C:\\foo.baz\\\\"), FPL(" (1)") },    FPL("C:\\foo (1).baz") },
+    { { FPL("C:\\foo.baz\\..\\"), FPL(" (1)") },  FPL("") },
+#endif
+    { { FPL("/"),               FPL("") },        FPL("/") },
+    { { FPL("/"),               FPL("txt") },     FPL("/txt") },
+    { { FPL("/."),              FPL("txt") },     FPL("") },
+    { { FPL("/.."),             FPL("txt") },     FPL("") },
+    { { FPL("/."),              FPL("") },        FPL("/.") },
+    { { FPL("/bar/foo.dll"),    FPL("txt") },     FPL("/bar/footxt.dll") },
+    { { FPL("/bar.baz/foodll"), FPL("txt") },     FPL("/bar.baz/foodlltxt") },
+    { { FPL("/bar.baz/foo.dll"), FPL("txt") },    FPL("/bar.baz/footxt.dll") },
+    { { FPL("/bar.baz/foo.dll.exe"), FPL("txt") },
+        FPL("/bar.baz/foo.dlltxt.exe") },
+    { { FPL("/bar.baz/foo"),    FPL("") },        FPL("/bar.baz/foo") },
+    { { FPL("/bar.baz/foo.exe"), FPL("") },       FPL("/bar.baz/foo.exe") },
+    { { FPL("/bar.baz/foo.dll.exe"), FPL("") },   FPL("/bar.baz/foo.dll.exe") },
+    { { FPL("/bar/baz/foo.exe"), FPL(" (1)") },   FPL("/bar/baz/foo (1).exe") },
+    { { FPL("/bar/baz/..////"), FPL(" (1)") },    FPL("") },
+  };
+  for (unsigned int i = 0; i < arraysize(cases); ++i) {
+    FilePath path(cases[i].inputs[0]);
+    FilePath result = path.InsertBeforeExtension(cases[i].inputs[1]);
+    EXPECT_EQ(cases[i].expected, result.value()) << "i: " << i <<
+        ", path: " << path.value() << ", insert: " << cases[i].inputs[1];
+  }
+}
+
+TEST_F(FilePathTest, RemoveExtension) {
+  const struct UnaryTestData cases[] = {
+    { FPL(""),                    FPL("") },
+    { FPL("."),                   FPL(".") },
+    { FPL(".."),                  FPL("..") },
+    { FPL("foo.dll"),             FPL("foo") },
+    { FPL("./foo.dll"),           FPL("./foo") },
+    { FPL("foo..dll"),            FPL("foo.") },
+    { FPL("foo"),                 FPL("foo") },
+    { FPL("foo."),                FPL("foo") },
+    { FPL("foo.."),               FPL("foo.") },
+    { FPL("foo.baz.dll"),         FPL("foo.baz") },
+#if defined(FILE_PATH_USES_WIN_SEPARATORS)
+    { FPL("C:\\foo.bar\\foo"),    FPL("C:\\foo.bar\\foo") },
+    { FPL("C:\\foo.bar\\..\\\\"), FPL("C:\\foo.bar\\..\\\\") },
+#endif
+    { FPL("/foo.bar/foo"),        FPL("/foo.bar/foo") },
+    { FPL("/foo.bar/..////"),     FPL("/foo.bar/..////") },
+  };
+  for (unsigned int i = 0; i < arraysize(cases); ++i) {
+    FilePath path(cases[i].input);
+    FilePath removed = path.RemoveExtension();
+    FilePath removed_final = path.RemoveFinalExtension();
+    EXPECT_EQ(cases[i].expected, removed.value()) << "i: " << i <<
+        ", path: " << path.value();
+    EXPECT_EQ(cases[i].expected, removed_final.value()) << "i: " << i <<
+        ", path: " << path.value();
+  }
+  {
+    FilePath path(FPL("foo.tar.gz"));
+    FilePath removed = path.RemoveExtension();
+    FilePath removed_final = path.RemoveFinalExtension();
+    EXPECT_EQ(FPL("foo"), removed.value()) << ", path: " << path.value();
+    EXPECT_EQ(FPL("foo.tar"), removed_final.value()) << ", path: "
+                                                     << path.value();
+  }
+}
+
+TEST_F(FilePathTest, ReplaceExtension) {
+  const struct BinaryTestData cases[] = {
+    { { FPL(""),              FPL("") },      FPL("") },
+    { { FPL(""),              FPL("txt") },   FPL("") },
+    { { FPL("."),             FPL("txt") },   FPL("") },
+    { { FPL(".."),            FPL("txt") },   FPL("") },
+    { { FPL("."),             FPL("") },      FPL("") },
+    { { FPL("foo.dll"),       FPL("txt") },   FPL("foo.txt") },
+    { { FPL("./foo.dll"),     FPL("txt") },   FPL("./foo.txt") },
+    { { FPL("foo..dll"),      FPL("txt") },   FPL("foo..txt") },
+    { { FPL("foo.dll"),       FPL(".txt") },  FPL("foo.txt") },
+    { { FPL("foo"),           FPL("txt") },   FPL("foo.txt") },
+    { { FPL("foo."),          FPL("txt") },   FPL("foo.txt") },
+    { { FPL("foo.."),         FPL("txt") },   FPL("foo..txt") },
+    { { FPL("foo"),           FPL(".txt") },  FPL("foo.txt") },
+    { { FPL("foo.baz.dll"),   FPL("txt") },   FPL("foo.baz.txt") },
+    { { FPL("foo.baz.dll"),   FPL(".txt") },  FPL("foo.baz.txt") },
+    { { FPL("foo.dll"),       FPL("") },      FPL("foo") },
+    { { FPL("foo.dll"),       FPL(".") },     FPL("foo") },
+    { { FPL("foo"),           FPL("") },      FPL("foo") },
+    { { FPL("foo"),           FPL(".") },     FPL("foo") },
+    { { FPL("foo.baz.dll"),   FPL("") },      FPL("foo.baz") },
+    { { FPL("foo.baz.dll"),   FPL(".") },     FPL("foo.baz") },
+#if defined(FILE_PATH_USES_WIN_SEPARATORS)
+    { { FPL("C:\\foo.bar\\foo"),    FPL("baz") }, FPL("C:\\foo.bar\\foo.baz") },
+    { { FPL("C:\\foo.bar\\..\\\\"), FPL("baz") }, FPL("") },
+#endif
+    { { FPL("/foo.bar/foo"),        FPL("baz") }, FPL("/foo.bar/foo.baz") },
+    { { FPL("/foo.bar/..////"),     FPL("baz") }, FPL("") },
+  };
+  for (unsigned int i = 0; i < arraysize(cases); ++i) {
+    FilePath path(cases[i].inputs[0]);
+    FilePath replaced = path.ReplaceExtension(cases[i].inputs[1]);
+    EXPECT_EQ(cases[i].expected, replaced.value()) << "i: " << i <<
+        ", path: " << path.value() << ", replace: " << cases[i].inputs[1];
+  }
+}
+
+TEST_F(FilePathTest, AddExtension) {
+  const struct BinaryTestData cases[] = {
+    { { FPL(""),              FPL("") },      FPL("") },
+    { { FPL(""),              FPL("txt") },   FPL("") },
+    { { FPL("."),             FPL("txt") },   FPL("") },
+    { { FPL(".."),            FPL("txt") },   FPL("") },
+    { { FPL("."),             FPL("") },      FPL("") },
+    { { FPL("foo.dll"),       FPL("txt") },   FPL("foo.dll.txt") },
+    { { FPL("./foo.dll"),     FPL("txt") },   FPL("./foo.dll.txt") },
+    { { FPL("foo..dll"),      FPL("txt") },   FPL("foo..dll.txt") },
+    { { FPL("foo.dll"),       FPL(".txt") },  FPL("foo.dll.txt") },
+    { { FPL("foo"),           FPL("txt") },   FPL("foo.txt") },
+    { { FPL("foo."),          FPL("txt") },   FPL("foo.txt") },
+    { { FPL("foo.."),         FPL("txt") },   FPL("foo..txt") },
+    { { FPL("foo"),           FPL(".txt") },  FPL("foo.txt") },
+    { { FPL("foo.baz.dll"),   FPL("txt") },   FPL("foo.baz.dll.txt") },
+    { { FPL("foo.baz.dll"),   FPL(".txt") },  FPL("foo.baz.dll.txt") },
+    { { FPL("foo.dll"),       FPL("") },      FPL("foo.dll") },
+    { { FPL("foo.dll"),       FPL(".") },     FPL("foo.dll") },
+    { { FPL("foo"),           FPL("") },      FPL("foo") },
+    { { FPL("foo"),           FPL(".") },     FPL("foo") },
+    { { FPL("foo.baz.dll"),   FPL("") },      FPL("foo.baz.dll") },
+    { { FPL("foo.baz.dll"),   FPL(".") },     FPL("foo.baz.dll") },
+#if defined(FILE_PATH_USES_WIN_SEPARATORS)
+    { { FPL("C:\\foo.bar\\foo"),    FPL("baz") }, FPL("C:\\foo.bar\\foo.baz") },
+    { { FPL("C:\\foo.bar\\..\\\\"), FPL("baz") }, FPL("") },
+#endif
+    { { FPL("/foo.bar/foo"),        FPL("baz") }, FPL("/foo.bar/foo.baz") },
+    { { FPL("/foo.bar/..////"),     FPL("baz") }, FPL("") },
+  };
+  for (unsigned int i = 0; i < arraysize(cases); ++i) {
+    FilePath path(cases[i].inputs[0]);
+    FilePath added = path.AddExtension(cases[i].inputs[1]);
+    EXPECT_EQ(cases[i].expected, added.value()) << "i: " << i <<
+        ", path: " << path.value() << ", add: " << cases[i].inputs[1];
+  }
+}
+
+TEST_F(FilePathTest, MatchesExtension) {
+  const struct BinaryBooleanTestData cases[] = {
+    { { FPL("foo"),                     FPL("") },                    true},
+    { { FPL("foo"),                     FPL(".") },                   false},
+    { { FPL("foo."),                    FPL("") },                    false},
+    { { FPL("foo."),                    FPL(".") },                   true},
+    { { FPL("foo.txt"),                 FPL(".dll") },                false},
+    { { FPL("foo.txt"),                 FPL(".txt") },                true},
+    { { FPL("foo.txt.dll"),             FPL(".txt") },                false},
+    { { FPL("foo.txt.dll"),             FPL(".dll") },                true},
+    { { FPL("foo.TXT"),                 FPL(".txt") },                true},
+    { { FPL("foo.txt"),                 FPL(".TXT") },                true},
+    { { FPL("foo.tXt"),                 FPL(".txt") },                true},
+    { { FPL("foo.txt"),                 FPL(".tXt") },                true},
+    { { FPL("foo.tXt"),                 FPL(".TXT") },                true},
+    { { FPL("foo.tXt"),                 FPL(".tXt") },                true},
+#if defined(FILE_PATH_USES_DRIVE_LETTERS)
+    { { FPL("c:/foo.txt.dll"),          FPL(".txt") },                false},
+    { { FPL("c:/foo.txt"),              FPL(".txt") },                true},
+#endif  // FILE_PATH_USES_DRIVE_LETTERS
+#if defined(FILE_PATH_USES_WIN_SEPARATORS)
+    { { FPL("c:\\bar\\foo.txt.dll"),    FPL(".txt") },                false},
+    { { FPL("c:\\bar\\foo.txt"),        FPL(".txt") },                true},
+#endif  // FILE_PATH_USES_DRIVE_LETTERS
+    { { FPL("/bar/foo.txt.dll"),        FPL(".txt") },                false},
+    { { FPL("/bar/foo.txt"),            FPL(".txt") },                true},
+#if defined(OS_WIN) || defined(OS_MACOSX)
+    // Umlauts A, O, U: direct comparison, and upper case vs. lower case
+    { { FPL("foo.\u00E4\u00F6\u00FC"),  FPL(".\u00E4\u00F6\u00FC") }, true},
+    { { FPL("foo.\u00C4\u00D6\u00DC"),  FPL(".\u00E4\u00F6\u00FC") }, true},
+    // C with circumflex: direct comparison, and upper case vs. lower case
+    { { FPL("foo.\u0109"),              FPL(".\u0109") },             true},
+    { { FPL("foo.\u0108"),              FPL(".\u0109") },             true},
+#endif
+  };
+
+  for (size_t i = 0; i < arraysize(cases); ++i) {
+    FilePath path(cases[i].inputs[0]);
+    FilePath::StringType ext(cases[i].inputs[1]);
+
+    EXPECT_EQ(cases[i].expected, path.MatchesExtension(ext)) <<
+        "i: " << i << ", path: " << path.value() << ", ext: " << ext;
+  }
+}
+
+TEST_F(FilePathTest, CompareIgnoreCase) {
+  const struct BinaryIntTestData cases[] = {
+    { { FPL("foo"),                          FPL("foo") },                  0},
+    { { FPL("FOO"),                          FPL("foo") },                  0},
+    { { FPL("foo.ext"),                      FPL("foo.ext") },              0},
+    { { FPL("FOO.EXT"),                      FPL("foo.ext") },              0},
+    { { FPL("Foo.Ext"),                      FPL("foo.ext") },              0},
+    { { FPL("foO"),                          FPL("foo") },                  0},
+    { { FPL("foo"),                          FPL("foO") },                  0},
+    { { FPL("fOo"),                          FPL("foo") },                  0},
+    { { FPL("foo"),                          FPL("fOo") },                  0},
+    { { FPL("bar"),                          FPL("foo") },                 -1},
+    { { FPL("foo"),                          FPL("bar") },                  1},
+    { { FPL("BAR"),                          FPL("foo") },                 -1},
+    { { FPL("FOO"),                          FPL("bar") },                  1},
+    { { FPL("bar"),                          FPL("FOO") },                 -1},
+    { { FPL("foo"),                          FPL("BAR") },                  1},
+    { { FPL("BAR"),                          FPL("FOO") },                 -1},
+    { { FPL("FOO"),                          FPL("BAR") },                  1},
+    // German "Eszett" (lower case and the new-fangled upper case)
+    // Note that uc(<lowercase eszett>) => "SS", NOT <uppercase eszett>!
+    // However, neither Windows nor Mac OSX converts these.
+    // (or even have glyphs for <uppercase eszett>)
+    { { FPL("\u00DF"),                       FPL("\u00DF") },               0},
+    { { FPL("\u1E9E"),                       FPL("\u1E9E") },               0},
+    { { FPL("\u00DF"),                       FPL("\u1E9E") },              -1},
+    { { FPL("SS"),                           FPL("\u00DF") },              -1},
+    { { FPL("SS"),                           FPL("\u1E9E") },              -1},
+#if defined(OS_WIN) || defined(OS_MACOSX)
+    // Umlauts A, O, U: direct comparison, and upper case vs. lower case
+    { { FPL("\u00E4\u00F6\u00FC"),           FPL("\u00E4\u00F6\u00FC") },   0},
+    { { FPL("\u00C4\u00D6\u00DC"),           FPL("\u00E4\u00F6\u00FC") },   0},
+    // C with circumflex: direct comparison, and upper case vs. lower case
+    { { FPL("\u0109"),                       FPL("\u0109") },               0},
+    { { FPL("\u0108"),                       FPL("\u0109") },               0},
+    // Cyrillic letter SHA: direct comparison, and upper case vs. lower case
+    { { FPL("\u0428"),                       FPL("\u0428") },               0},
+    { { FPL("\u0428"),                       FPL("\u0448") },               0},
+    // Greek letter DELTA: direct comparison, and upper case vs. lower case
+    { { FPL("\u0394"),                       FPL("\u0394") },               0},
+    { { FPL("\u0394"),                       FPL("\u03B4") },               0},
+    // Japanese full-width A: direct comparison, and upper case vs. lower case
+    // Note that full-width and standard characters are considered different.
+    { { FPL("\uFF21"),                       FPL("\uFF21") },               0},
+    { { FPL("\uFF21"),                       FPL("\uFF41") },               0},
+    { { FPL("A"),                            FPL("\uFF21") },              -1},
+    { { FPL("A"),                            FPL("\uFF41") },              -1},
+    { { FPL("a"),                            FPL("\uFF21") },              -1},
+    { { FPL("a"),                            FPL("\uFF41") },              -1},
+#endif
+#if defined(OS_MACOSX)
+    // Codepoints > 0x1000
+    // Georgian letter DON: direct comparison, and upper case vs. lower case
+    { { FPL("\u10A3"),                       FPL("\u10A3") },               0},
+    { { FPL("\u10A3"),                       FPL("\u10D3") },               0},
+    // Combining characters vs. pre-composed characters, upper and lower case
+    { { FPL("k\u0301u\u032Do\u0304\u0301n"), FPL("\u1E31\u1E77\u1E53n") },  0},
+    { { FPL("k\u0301u\u032Do\u0304\u0301n"), FPL("kuon") },                 1},
+    { { FPL("kuon"), FPL("k\u0301u\u032Do\u0304\u0301n") },                -1},
+    { { FPL("K\u0301U\u032DO\u0304\u0301N"), FPL("KUON") },                 1},
+    { { FPL("KUON"), FPL("K\u0301U\u032DO\u0304\u0301N") },                -1},
+    { { FPL("k\u0301u\u032Do\u0304\u0301n"), FPL("KUON") },                 1},
+    { { FPL("K\u0301U\u032DO\u0304\u0301N"), FPL("\u1E31\u1E77\u1E53n") },  0},
+    { { FPL("k\u0301u\u032Do\u0304\u0301n"), FPL("\u1E30\u1E76\u1E52n") },  0},
+    { { FPL("k\u0301u\u032Do\u0304\u0302n"), FPL("\u1E30\u1E76\u1E52n") },  1},
+#endif
+  };
+
+  for (size_t i = 0; i < arraysize(cases); ++i) {
+    FilePath::StringType s1(cases[i].inputs[0]);
+    FilePath::StringType s2(cases[i].inputs[1]);
+    int result = FilePath::CompareIgnoreCase(s1, s2);
+    EXPECT_EQ(cases[i].expected, result) <<
+        "i: " << i << ", s1: " << s1 << ", s2: " << s2;
+  }
+}
+
+TEST_F(FilePathTest, ReferencesParent) {
+  const struct UnaryBooleanTestData cases[] = {
+    { FPL("."),        false },
+    { FPL(".."),       true },
+    { FPL(".. "),      true },
+    { FPL(" .."),      true },
+    { FPL("..."),      true },
+    { FPL("a.."),      false },
+    { FPL("..a"),      false },
+    { FPL("../"),      true },
+    { FPL("/.."),      true },
+    { FPL("/../"),     true },
+    { FPL("/a../"),    false },
+    { FPL("/..a/"),    false },
+    { FPL("//.."),     true },
+    { FPL("..//"),     true },
+    { FPL("//..//"),   true },
+    { FPL("a//..//c"), true },
+    { FPL("../b/c"),   true },
+    { FPL("/../b/c"),  true },
+    { FPL("a/b/.."),   true },
+    { FPL("a/b/../"),  true },
+    { FPL("a/../c"),   true },
+    { FPL("a/b/c"),    false },
+  };
+
+  for (size_t i = 0; i < arraysize(cases); ++i) {
+    FilePath input(cases[i].input);
+    bool observed = input.ReferencesParent();
+    EXPECT_EQ(cases[i].expected, observed) <<
+              "i: " << i << ", input: " << input.value();
+  }
+}
+
+TEST_F(FilePathTest, FromUTF8Unsafe_And_AsUTF8Unsafe) {
+  const struct UTF8TestData cases[] = {
+    { FPL("foo.txt"), "foo.txt" },
+    // "aeo" with accents. Use http://0xcc.net/jsescape/ to decode them.
+    { FPL("\u00E0\u00E8\u00F2.txt"), "\xC3\xA0\xC3\xA8\xC3\xB2.txt" },
+    // Full-width "ABC".
+    { FPL("\uFF21\uFF22\uFF23.txt"),
+      "\xEF\xBC\xA1\xEF\xBC\xA2\xEF\xBC\xA3.txt" },
+  };
+
+#if !defined(SYSTEM_NATIVE_UTF8) && defined(OS_LINUX)
+  ScopedLocale locale("en_US.UTF-8");
+#endif
+
+  for (size_t i = 0; i < arraysize(cases); ++i) {
+    // Test FromUTF8Unsafe() works.
+    FilePath from_utf8 = FilePath::FromUTF8Unsafe(cases[i].utf8);
+    EXPECT_EQ(cases[i].native, from_utf8.value())
+        << "i: " << i << ", input: " << cases[i].native;
+    // Test AsUTF8Unsafe() works.
+    FilePath from_native = FilePath(cases[i].native);
+    EXPECT_EQ(cases[i].utf8, from_native.AsUTF8Unsafe())
+        << "i: " << i << ", input: " << cases[i].native;
+    // Test the two file paths are identical.
+    EXPECT_EQ(from_utf8.value(), from_native.value());
+  }
+}
+
+TEST_F(FilePathTest, ConstructWithNUL) {
+  // Assert FPS() works.
+  ASSERT_EQ(3U, FPS("a\0b").length());
+
+  // Test constructor strips '\0'
+  FilePath path(FPS("a\0b"));
+  EXPECT_EQ(1U, path.value().length());
+  EXPECT_EQ(FPL("a"), path.value());
+}
+
+TEST_F(FilePathTest, AppendWithNUL) {
+  // Assert FPS() works.
+  ASSERT_EQ(3U, FPS("b\0b").length());
+
+  // Test Append() strips '\0'
+  FilePath path(FPL("a"));
+  path = path.Append(FPS("b\0b"));
+  EXPECT_EQ(3U, path.value().length());
+#if defined(FILE_PATH_USES_WIN_SEPARATORS)
+  EXPECT_EQ(FPL("a\\b"), path.value());
+#else
+  EXPECT_EQ(FPL("a/b"), path.value());
+#endif
+}
+
+TEST_F(FilePathTest, ReferencesParentWithNUL) {
+  // Assert FPS() works.
+  ASSERT_EQ(3U, FPS("..\0").length());
+
+  // Test ReferencesParent() doesn't break with "..\0"
+  FilePath path(FPS("..\0"));
+  EXPECT_TRUE(path.ReferencesParent());
+}
+
+#if defined(FILE_PATH_USES_WIN_SEPARATORS)
+TEST_F(FilePathTest, NormalizePathSeparators) {
+  const struct UnaryTestData cases[] = {
+    { FPL("foo/bar"), FPL("foo\\bar") },
+    { FPL("foo/bar\\betz"), FPL("foo\\bar\\betz") },
+    { FPL("foo\\bar"), FPL("foo\\bar") },
+    { FPL("foo\\bar/betz"), FPL("foo\\bar\\betz") },
+    { FPL("foo"), FPL("foo") },
+    // Trailing slashes don't automatically get stripped.  That's what
+    // StripTrailingSeparators() is for.
+    { FPL("foo\\"), FPL("foo\\") },
+    { FPL("foo/"), FPL("foo\\") },
+    { FPL("foo/bar\\"), FPL("foo\\bar\\") },
+    { FPL("foo\\bar/"), FPL("foo\\bar\\") },
+    { FPL("foo/bar/"), FPL("foo\\bar\\") },
+    { FPL("foo\\bar\\"), FPL("foo\\bar\\") },
+    { FPL("\\foo/bar"), FPL("\\foo\\bar") },
+    { FPL("/foo\\bar"), FPL("\\foo\\bar") },
+    { FPL("c:/foo/bar/"), FPL("c:\\foo\\bar\\") },
+    { FPL("/foo/bar/"), FPL("\\foo\\bar\\") },
+    { FPL("\\foo\\bar\\"), FPL("\\foo\\bar\\") },
+    { FPL("c:\\foo/bar"), FPL("c:\\foo\\bar") },
+    { FPL("//foo\\bar\\"), FPL("\\\\foo\\bar\\") },
+    { FPL("\\\\foo\\bar\\"), FPL("\\\\foo\\bar\\") },
+    { FPL("//foo\\bar\\"), FPL("\\\\foo\\bar\\") },
+    // This method does not normalize the number of path separators.
+    { FPL("foo\\\\bar"), FPL("foo\\\\bar") },
+    { FPL("foo//bar"), FPL("foo\\\\bar") },
+    { FPL("foo/\\bar"), FPL("foo\\\\bar") },
+    { FPL("foo\\/bar"), FPL("foo\\\\bar") },
+    { FPL("///foo\\\\bar"), FPL("\\\\\\foo\\\\bar") },
+    { FPL("foo//bar///"), FPL("foo\\\\bar\\\\\\") },
+    { FPL("foo/\\bar/\\"), FPL("foo\\\\bar\\\\") },
+    { FPL("/\\foo\\/bar"), FPL("\\\\foo\\\\bar") },
+  };
+  for (size_t i = 0; i < arraysize(cases); ++i) {
+    FilePath input(cases[i].input);
+    FilePath observed = input.NormalizePathSeparators();
+    EXPECT_EQ(FilePath::StringType(cases[i].expected), observed.value()) <<
+              "i: " << i << ", input: " << input.value();
+  }
+}
+#endif
+
+TEST_F(FilePathTest, EndsWithSeparator) {
+  const UnaryBooleanTestData cases[] = {
+    { FPL(""), false },
+    { FPL("/"), true },
+    { FPL("foo/"), true },
+    { FPL("bar"), false },
+    { FPL("/foo/bar"), false },
+  };
+  for (size_t i = 0; i < arraysize(cases); ++i) {
+    FilePath input = FilePath(cases[i].input).NormalizePathSeparators();
+    EXPECT_EQ(cases[i].expected, input.EndsWithSeparator());
+  }
+}
+
+TEST_F(FilePathTest, AsEndingWithSeparator) {
+  const UnaryTestData cases[] = {
+    { FPL(""), FPL("") },
+    { FPL("/"), FPL("/") },
+    { FPL("foo"), FPL("foo/") },
+    { FPL("foo/"), FPL("foo/") }
+  };
+  for (size_t i = 0; i < arraysize(cases); ++i) {
+    FilePath input = FilePath(cases[i].input).NormalizePathSeparators();
+    FilePath expected = FilePath(cases[i].expected).NormalizePathSeparators();
+    EXPECT_EQ(expected.value(), input.AsEndingWithSeparator().value());
+  }
+}
+
+#if defined(OS_ANDROID)
+TEST_F(FilePathTest, ContentUriTest) {
+  const struct UnaryBooleanTestData cases[] = {
+    { FPL("content://foo.bar"),    true },
+    { FPL("content://foo.bar/"),   true },
+    { FPL("content://foo/bar"),    true },
+    { FPL("CoNTenT://foo.bar"),    true },
+    { FPL("content://"),           true },
+    { FPL("content:///foo.bar"),   true },
+    { FPL("content://3foo/bar"),   true },
+    { FPL("content://_foo/bar"),   true },
+    { FPL(".. "),                  false },
+    { FPL("foo.bar"),              false },
+    { FPL("content:foo.bar"),      false },
+    { FPL("content:/foo.ba"),      false },
+    { FPL("content:/dir/foo.bar"), false },
+    { FPL("content: //foo.bar"),   false },
+    { FPL("content%2a%2f%2f"),     false },
+  };
+
+  for (size_t i = 0; i < arraysize(cases); ++i) {
+    FilePath input(cases[i].input);
+    bool observed = input.IsContentUri();
+    EXPECT_EQ(cases[i].expected, observed) <<
+              "i: " << i << ", input: " << input.value();
+  }
+}
+#endif
+
+// Test the operator<<(ostream, FilePath).
+TEST_F(FilePathTest, PrintToOstream) {
+  std::stringstream ss;
+  FilePath fp(FPL("foo"));
+  ss << fp;
+  EXPECT_EQ("foo", ss.str());
+}
+
+// Test GetHFSDecomposedForm should return empty result for invalid UTF-8
+// strings.
+#if defined(OS_MACOSX)
+TEST_F(FilePathTest, GetHFSDecomposedFormWithInvalidInput) {
+  const FilePath::CharType* cases[] = {
+    FPL("\xc3\x28"),
+    FPL("\xe2\x82\x28"),
+    FPL("\xe2\x28\xa1"),
+    FPL("\xf0\x28\x8c\xbc"),
+    FPL("\xf0\x28\x8c\x28"),
+  };
+  for (auto* invalid_input : cases) {
+    FilePath::StringType observed = FilePath::GetHFSDecomposedForm(
+        invalid_input);
+    EXPECT_TRUE(observed.empty());
+  }
+}
+#endif
+
+}  // namespace base
diff --git a/base/files/file_path_watcher.cc b/base/files/file_path_watcher.cc
new file mode 100644
index 0000000..af40346
--- /dev/null
+++ b/base/files/file_path_watcher.cc
@@ -0,0 +1,46 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Cross platform methods for FilePathWatcher. See the various platform
+// specific implementation files, too.
+
+#include "base/files/file_path_watcher.h"
+
+#include "base/logging.h"
+#include "build/build_config.h"
+
+namespace base {
+
+FilePathWatcher::~FilePathWatcher() {
+  DCHECK(sequence_checker_.CalledOnValidSequence());
+  impl_->Cancel();
+}
+
+// static
+bool FilePathWatcher::RecursiveWatchAvailable() {
+#if (defined(OS_MACOSX) && !defined(OS_IOS)) || defined(OS_WIN) || \
+    defined(OS_LINUX) || defined(OS_ANDROID) || defined(OS_AIX)
+  return true;
+#else
+  // FSEvents isn't available on iOS.
+  return false;
+#endif
+}
+
+FilePathWatcher::PlatformDelegate::PlatformDelegate(): cancelled_(false) {
+}
+
+FilePathWatcher::PlatformDelegate::~PlatformDelegate() {
+  DCHECK(is_cancelled());
+}
+
+bool FilePathWatcher::Watch(const FilePath& path,
+                            bool recursive,
+                            const Callback& callback) {
+  DCHECK(sequence_checker_.CalledOnValidSequence());
+  DCHECK(path.IsAbsolute());
+  return impl_->Watch(path, recursive, callback);
+}
+
+}  // namespace base
diff --git a/base/files/file_path_watcher.h b/base/files/file_path_watcher.h
new file mode 100644
index 0000000..9e29d0a
--- /dev/null
+++ b/base/files/file_path_watcher.h
@@ -0,0 +1,110 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This module provides a way to monitor a file or directory for changes.
+
+#ifndef BASE_FILES_FILE_PATH_WATCHER_H_
+#define BASE_FILES_FILE_PATH_WATCHER_H_
+
+#include <memory>
+
+#include "base/base_export.h"
+#include "base/callback.h"
+#include "base/files/file_path.h"
+#include "base/macros.h"
+#include "base/memory/ref_counted.h"
+#include "base/sequence_checker.h"
+#include "base/sequenced_task_runner.h"
+
+namespace base {
+
+// This class lets you register interest in changes on a FilePath.
+// The callback will get called whenever the file or directory referenced by the
+// FilePath is changed, including created or deleted. Due to limitations in the
+// underlying OS APIs, FilePathWatcher has slightly different semantics on OS X
+// than on Windows or Linux. FilePathWatcher on Linux and Windows will detect
+// modifications to files in a watched directory. FilePathWatcher on Mac will
+// detect the creation and deletion of files in a watched directory, but will
+// not detect modifications to those files. See file_path_watcher_kqueue.cc for
+// details.
+//
+// Must be destroyed on the sequence that invokes Watch().
+class BASE_EXPORT FilePathWatcher {
+ public:
+  // Callback type for Watch(). |path| points to the file that was updated,
+  // and |error| is true if the platform specific code detected an error. In
+  // that case, the callback won't be invoked again.
+  typedef base::Callback<void(const FilePath& path, bool error)> Callback;
+
+  // Used internally to encapsulate different members on different platforms.
+  class PlatformDelegate {
+   public:
+    PlatformDelegate();
+    virtual ~PlatformDelegate();
+
+    // Start watching for the given |path| and notify |delegate| about changes.
+    virtual bool Watch(const FilePath& path,
+                       bool recursive,
+                       const Callback& callback) WARN_UNUSED_RESULT = 0;
+
+    // Stop watching. This is called from FilePathWatcher's dtor in order to
+    // allow to shut down properly while the object is still alive.
+    virtual void Cancel() = 0;
+
+   protected:
+    friend class FilePathWatcher;
+
+    scoped_refptr<SequencedTaskRunner> task_runner() const {
+      return task_runner_;
+    }
+
+    void set_task_runner(scoped_refptr<SequencedTaskRunner> runner) {
+      task_runner_ = std::move(runner);
+    }
+
+    // Must be called before the PlatformDelegate is deleted.
+    void set_cancelled() {
+      cancelled_ = true;
+    }
+
+    bool is_cancelled() const {
+      return cancelled_;
+    }
+
+   private:
+    scoped_refptr<SequencedTaskRunner> task_runner_;
+    bool cancelled_;
+
+    DISALLOW_COPY_AND_ASSIGN(PlatformDelegate);
+  };
+
+  FilePathWatcher();
+  ~FilePathWatcher();
+
+  // Returns true if the platform and OS version support recursive watches.
+  static bool RecursiveWatchAvailable();
+
+  // Invokes |callback| whenever updates to |path| are detected. This should be
+  // called at most once. Set |recursive| to true to watch |path| and its
+  // children. The callback will be invoked on the same sequence. Returns true
+  // on success.
+  //
+  // On POSIX, this must be called from a thread that supports
+  // FileDescriptorWatcher.
+  //
+  // Recursive watch is not supported on all platforms and file systems.
+  // Watch() will return false in the case of failure.
+  bool Watch(const FilePath& path, bool recursive, const Callback& callback);
+
+ private:
+  std::unique_ptr<PlatformDelegate> impl_;
+
+  SequenceChecker sequence_checker_;
+
+  DISALLOW_COPY_AND_ASSIGN(FilePathWatcher);
+};
+
+}  // namespace base
+
+#endif  // BASE_FILES_FILE_PATH_WATCHER_H_
diff --git a/base/files/file_path_watcher_fsevents.cc b/base/files/file_path_watcher_fsevents.cc
new file mode 100644
index 0000000..49ed36b
--- /dev/null
+++ b/base/files/file_path_watcher_fsevents.cc
@@ -0,0 +1,282 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/files/file_path_watcher_fsevents.h"
+
+#include <dispatch/dispatch.h>
+
+#include <list>
+
+#include "base/bind.h"
+#include "base/files/file_util.h"
+#include "base/lazy_instance.h"
+#include "base/logging.h"
+#include "base/mac/scoped_cftyperef.h"
+#include "base/strings/stringprintf.h"
+#include "base/threading/sequenced_task_runner_handle.h"
+
+namespace base {
+
+namespace {
+
+// The latency parameter passed to FSEventsStreamCreate().
+const CFAbsoluteTime kEventLatencySeconds = 0.3;
+
+// Resolve any symlinks in the path.
+FilePath ResolvePath(const FilePath& path) {
+  const unsigned kMaxLinksToResolve = 255;
+
+  std::vector<FilePath::StringType> component_vector;
+  path.GetComponents(&component_vector);
+  std::list<FilePath::StringType>
+      components(component_vector.begin(), component_vector.end());
+
+  FilePath result;
+  unsigned resolve_count = 0;
+  while (resolve_count < kMaxLinksToResolve && !components.empty()) {
+    FilePath component(*components.begin());
+    components.pop_front();
+
+    FilePath current;
+    if (component.IsAbsolute()) {
+      current = component;
+    } else {
+      current = result.Append(component);
+    }
+
+    FilePath target;
+    if (ReadSymbolicLink(current, &target)) {
+      if (target.IsAbsolute())
+        result.clear();
+      std::vector<FilePath::StringType> target_components;
+      target.GetComponents(&target_components);
+      components.insert(components.begin(), target_components.begin(),
+                        target_components.end());
+      resolve_count++;
+    } else {
+      result = current;
+    }
+  }
+
+  if (resolve_count >= kMaxLinksToResolve)
+    result.clear();
+  return result;
+}
+
+}  // namespace
+
+FilePathWatcherFSEvents::FilePathWatcherFSEvents()
+    : queue_(dispatch_queue_create(
+          base::StringPrintf("org.chromium.base.FilePathWatcher.%p", this)
+              .c_str(),
+          DISPATCH_QUEUE_SERIAL)),
+      fsevent_stream_(nullptr),
+      weak_factory_(this) {}
+
+FilePathWatcherFSEvents::~FilePathWatcherFSEvents() {
+  DCHECK(!task_runner() || task_runner()->RunsTasksInCurrentSequence());
+  DCHECK(callback_.is_null())
+      << "Cancel() must be called before FilePathWatcher is destroyed.";
+}
+
+bool FilePathWatcherFSEvents::Watch(const FilePath& path,
+                                    bool recursive,
+                                    const FilePathWatcher::Callback& callback) {
+  DCHECK(!callback.is_null());
+  DCHECK(callback_.is_null());
+
+  // This class could support non-recursive watches, but that is currently
+  // left to FilePathWatcherKQueue.
+  if (!recursive)
+    return false;
+
+  set_task_runner(SequencedTaskRunnerHandle::Get());
+  callback_ = callback;
+
+  FSEventStreamEventId start_event = FSEventsGetCurrentEventId();
+  // The block runtime would implicitly capture the reference, not the object
+  // it's referencing. Copy the path into a local, so that the value is
+  // captured by the block's scope.
+  const FilePath path_copy(path);
+
+  dispatch_async(queue_, ^{
+      StartEventStream(start_event, path_copy);
+  });
+  return true;
+}
+
+void FilePathWatcherFSEvents::Cancel() {
+  set_cancelled();
+  callback_.Reset();
+
+  // Switch to the dispatch queue to tear down the event stream. As the queue is
+  // owned by |this|, and this method is called from the destructor, execute the
+  // block synchronously.
+  dispatch_sync(queue_, ^{
+    if (fsevent_stream_) {
+      DestroyEventStream();
+      target_.clear();
+      resolved_target_.clear();
+    }
+  });
+}
+
+// static
+void FilePathWatcherFSEvents::FSEventsCallback(
+    ConstFSEventStreamRef stream,
+    void* event_watcher,
+    size_t num_events,
+    void* event_paths,
+    const FSEventStreamEventFlags flags[],
+    const FSEventStreamEventId event_ids[]) {
+  FilePathWatcherFSEvents* watcher =
+      reinterpret_cast<FilePathWatcherFSEvents*>(event_watcher);
+  bool root_changed = watcher->ResolveTargetPath();
+  std::vector<FilePath> paths;
+  FSEventStreamEventId root_change_at = FSEventStreamGetLatestEventId(stream);
+  for (size_t i = 0; i < num_events; i++) {
+    if (flags[i] & kFSEventStreamEventFlagRootChanged)
+      root_changed = true;
+    if (event_ids[i])
+      root_change_at = std::min(root_change_at, event_ids[i]);
+    paths.push_back(FilePath(
+        reinterpret_cast<char**>(event_paths)[i]).StripTrailingSeparators());
+  }
+
+  // Reinitialize the event stream if we find changes to the root. This is
+  // necessary since FSEvents doesn't report any events for the subtree after
+  // the directory to be watched gets created.
+  if (root_changed) {
+    // Resetting the event stream from within the callback fails (FSEvents spews
+    // bad file descriptor errors), so do the reset asynchronously.
+    //
+    // We can't dispatch_async a call to UpdateEventStream() directly because
+    // there would be no guarantee that |watcher| still exists when it runs.
+    //
+    // Instead, bounce on task_runner() and use a WeakPtr to verify that
+    // |watcher| still exists. If it does, dispatch_async a call to
+    // UpdateEventStream(). Because the destructor of |watcher| runs on
+    // task_runner() and calls dispatch_sync, it is guaranteed that |watcher|
+    // still exists when UpdateEventStream() runs.
+    watcher->task_runner()->PostTask(
+        FROM_HERE, Bind(
+                       [](WeakPtr<FilePathWatcherFSEvents> weak_watcher,
+                          FSEventStreamEventId root_change_at) {
+                         if (!weak_watcher)
+                           return;
+                         FilePathWatcherFSEvents* watcher = weak_watcher.get();
+                         dispatch_async(watcher->queue_, ^{
+                           watcher->UpdateEventStream(root_change_at);
+                         });
+                       },
+                       watcher->weak_factory_.GetWeakPtr(), root_change_at));
+  }
+
+  watcher->OnFilePathsChanged(paths);
+}
+
+void FilePathWatcherFSEvents::OnFilePathsChanged(
+    const std::vector<FilePath>& paths) {
+  DCHECK(!resolved_target_.empty());
+  task_runner()->PostTask(
+      FROM_HERE,
+      Bind(&FilePathWatcherFSEvents::DispatchEvents, weak_factory_.GetWeakPtr(),
+           paths, target_, resolved_target_));
+}
+
+void FilePathWatcherFSEvents::DispatchEvents(const std::vector<FilePath>& paths,
+                                             const FilePath& target,
+                                             const FilePath& resolved_target) {
+  DCHECK(task_runner()->RunsTasksInCurrentSequence());
+
+  // Don't issue callbacks after Cancel() has been called.
+  if (is_cancelled() || callback_.is_null()) {
+    return;
+  }
+
+  for (const FilePath& path : paths) {
+    if (resolved_target.IsParent(path) || resolved_target == path) {
+      callback_.Run(target, false);
+      return;
+    }
+  }
+}
+
+void FilePathWatcherFSEvents::UpdateEventStream(
+    FSEventStreamEventId start_event) {
+  // It can happen that the watcher gets canceled while tasks that call this
+  // function are still in flight, so abort if this situation is detected.
+  if (resolved_target_.empty())
+    return;
+
+  if (fsevent_stream_)
+    DestroyEventStream();
+
+  ScopedCFTypeRef<CFStringRef> cf_path(CFStringCreateWithCString(
+      NULL, resolved_target_.value().c_str(), kCFStringEncodingMacHFS));
+  ScopedCFTypeRef<CFStringRef> cf_dir_path(CFStringCreateWithCString(
+      NULL, resolved_target_.DirName().value().c_str(),
+      kCFStringEncodingMacHFS));
+  CFStringRef paths_array[] = { cf_path.get(), cf_dir_path.get() };
+  ScopedCFTypeRef<CFArrayRef> watched_paths(CFArrayCreate(
+      NULL, reinterpret_cast<const void**>(paths_array), arraysize(paths_array),
+      &kCFTypeArrayCallBacks));
+
+  FSEventStreamContext context;
+  context.version = 0;
+  context.info = this;
+  context.retain = NULL;
+  context.release = NULL;
+  context.copyDescription = NULL;
+
+  fsevent_stream_ = FSEventStreamCreate(NULL, &FSEventsCallback, &context,
+                                        watched_paths,
+                                        start_event,
+                                        kEventLatencySeconds,
+                                        kFSEventStreamCreateFlagWatchRoot);
+  FSEventStreamSetDispatchQueue(fsevent_stream_, queue_);
+
+  if (!FSEventStreamStart(fsevent_stream_)) {
+    task_runner()->PostTask(FROM_HERE,
+                            Bind(&FilePathWatcherFSEvents::ReportError,
+                                 weak_factory_.GetWeakPtr(), target_));
+  }
+}
+
+bool FilePathWatcherFSEvents::ResolveTargetPath() {
+  FilePath resolved = ResolvePath(target_).StripTrailingSeparators();
+  bool changed = resolved != resolved_target_;
+  resolved_target_ = resolved;
+  if (resolved_target_.empty()) {
+    task_runner()->PostTask(FROM_HERE,
+                            Bind(&FilePathWatcherFSEvents::ReportError,
+                                 weak_factory_.GetWeakPtr(), target_));
+  }
+  return changed;
+}
+
+void FilePathWatcherFSEvents::ReportError(const FilePath& target) {
+  DCHECK(task_runner()->RunsTasksInCurrentSequence());
+  if (!callback_.is_null()) {
+    callback_.Run(target, true);
+  }
+}
+
+void FilePathWatcherFSEvents::DestroyEventStream() {
+  FSEventStreamStop(fsevent_stream_);
+  FSEventStreamInvalidate(fsevent_stream_);
+  FSEventStreamRelease(fsevent_stream_);
+  fsevent_stream_ = NULL;
+}
+
+void FilePathWatcherFSEvents::StartEventStream(FSEventStreamEventId start_event,
+                                               const FilePath& path) {
+  DCHECK(resolved_target_.empty());
+
+  target_ = path;
+  ResolveTargetPath();
+  UpdateEventStream(start_event);
+}
+
+}  // namespace base
diff --git a/base/files/file_path_watcher_fsevents.h b/base/files/file_path_watcher_fsevents.h
new file mode 100644
index 0000000..dcdf2fb
--- /dev/null
+++ b/base/files/file_path_watcher_fsevents.h
@@ -0,0 +1,99 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_FILES_FILE_PATH_WATCHER_FSEVENTS_H_
+#define BASE_FILES_FILE_PATH_WATCHER_FSEVENTS_H_
+
+#include <CoreServices/CoreServices.h>
+#include <stddef.h>
+
+#include <vector>
+
+#include "base/files/file_path.h"
+#include "base/files/file_path_watcher.h"
+#include "base/mac/scoped_dispatch_object.h"
+#include "base/macros.h"
+#include "base/memory/weak_ptr.h"
+
+namespace base {
+
+// Mac-specific file watcher implementation based on FSEvents.
+// There are trade-offs between the FSEvents implementation and a kqueue
+// implementation. The biggest issues are that FSEvents on 10.6 sometimes drops
+// events and kqueue does not trigger for modifications to a file in a watched
+// directory. See file_path_watcher_mac.cc for the code that decides when to
+// use which one.
+class FilePathWatcherFSEvents : public FilePathWatcher::PlatformDelegate {
+ public:
+  FilePathWatcherFSEvents();
+  ~FilePathWatcherFSEvents() override;
+
+  // FilePathWatcher::PlatformDelegate overrides.
+  bool Watch(const FilePath& path,
+             bool recursive,
+             const FilePathWatcher::Callback& callback) override;
+  void Cancel() override;
+
+ private:
+  static void FSEventsCallback(ConstFSEventStreamRef stream,
+                               void* event_watcher,
+                               size_t num_events,
+                               void* event_paths,
+                               const FSEventStreamEventFlags flags[],
+                               const FSEventStreamEventId event_ids[]);
+
+  // Called from FSEventsCallback whenever there is a change to the paths.
+  void OnFilePathsChanged(const std::vector<FilePath>& paths);
+
+  // Called on the message_loop() thread to dispatch path events. Can't access
+  // target_ and resolved_target_ directly as those are modified on the
+  // libdispatch thread.
+  void DispatchEvents(const std::vector<FilePath>& paths,
+                      const FilePath& target,
+                      const FilePath& resolved_target);
+
+  // (Re-)Initialize the event stream to start reporting events from
+  // |start_event|.
+  void UpdateEventStream(FSEventStreamEventId start_event);
+
+  // Returns true if resolving the target path got a different result than
+  // last time it was done.
+  bool ResolveTargetPath();
+
+  // Report an error watching the given target.
+  void ReportError(const FilePath& target);
+
+  // Destroy the event stream.
+  void DestroyEventStream();
+
+  // Start watching the FSEventStream.
+  void StartEventStream(FSEventStreamEventId start_event, const FilePath& path);
+
+  // Callback to notify upon changes.
+  // (Only accessed from the message_loop() thread.)
+  FilePathWatcher::Callback callback_;
+
+  // The dispatch queue on which the the event stream is scheduled.
+  ScopedDispatchObject<dispatch_queue_t> queue_;
+
+  // Target path to watch (passed to callback).
+  // (Only accessed from the libdispatch queue.)
+  FilePath target_;
+
+  // Target path with all symbolic links resolved.
+  // (Only accessed from the libdispatch queue.)
+  FilePath resolved_target_;
+
+  // Backend stream we receive event callbacks from (strong reference).
+  // (Only accessed from the libdispatch queue.)
+  FSEventStreamRef fsevent_stream_;
+
+  WeakPtrFactory<FilePathWatcherFSEvents> weak_factory_;
+
+  DISALLOW_COPY_AND_ASSIGN(FilePathWatcherFSEvents);
+};
+
+}  // namespace base
+
+#endif  // BASE_FILES_FILE_PATH_WATCHER_FSEVENTS_H_
diff --git a/base/files/file_path_watcher_fuchsia.cc b/base/files/file_path_watcher_fuchsia.cc
new file mode 100644
index 0000000..53c927e
--- /dev/null
+++ b/base/files/file_path_watcher_fuchsia.cc
@@ -0,0 +1,55 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/files/file_path_watcher.h"
+
+#include "base/macros.h"
+#include "base/memory/ptr_util.h"
+#include "base/threading/sequenced_task_runner_handle.h"
+
+namespace base {
+
+namespace {
+
+class FilePathWatcherImpl : public FilePathWatcher::PlatformDelegate {
+ public:
+  FilePathWatcherImpl() {}
+  ~FilePathWatcherImpl() override {}
+
+  bool Watch(const FilePath& path,
+             bool recursive,
+             const FilePathWatcher::Callback& callback) override;
+
+  void Cancel() override;
+
+ private:
+  FilePathWatcher::Callback callback_;
+  FilePath target_;
+
+  DISALLOW_COPY_AND_ASSIGN(FilePathWatcherImpl);
+};
+
+bool FilePathWatcherImpl::Watch(const FilePath& path,
+                                bool recursive,
+                                const FilePathWatcher::Callback& callback) {
+  DCHECK(!callback.is_null());
+  DCHECK(callback_.is_null());
+
+  callback_ = callback;
+  NOTIMPLEMENTED();
+  return false;
+}
+
+void FilePathWatcherImpl::Cancel() {
+  NOTIMPLEMENTED();
+}
+
+}  // namespace
+
+FilePathWatcher::FilePathWatcher() {
+  sequence_checker_.DetachFromSequence();
+  impl_ = std::make_unique<FilePathWatcherImpl>();
+}
+
+}  // namespace base
diff --git a/base/files/file_path_watcher_kqueue.cc b/base/files/file_path_watcher_kqueue.cc
new file mode 100644
index 0000000..036809d
--- /dev/null
+++ b/base/files/file_path_watcher_kqueue.cc
@@ -0,0 +1,372 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/files/file_path_watcher_kqueue.h"
+
+#include <fcntl.h>
+#include <stddef.h>
+#include <sys/param.h>
+
+#include "base/bind.h"
+#include "base/files/file_util.h"
+#include "base/logging.h"
+#include "base/strings/stringprintf.h"
+#include "base/threading/sequenced_task_runner_handle.h"
+
+// On some platforms these are not defined.
+#if !defined(EV_RECEIPT)
+#define EV_RECEIPT 0
+#endif
+#if !defined(O_EVTONLY)
+#define O_EVTONLY O_RDONLY
+#endif
+
+namespace base {
+
+FilePathWatcherKQueue::FilePathWatcherKQueue() : kqueue_(-1) {}
+
+FilePathWatcherKQueue::~FilePathWatcherKQueue() {
+  DCHECK(!task_runner() || task_runner()->RunsTasksInCurrentSequence());
+}
+
+void FilePathWatcherKQueue::ReleaseEvent(struct kevent& event) {
+  CloseFileDescriptor(&event.ident);
+  EventData* entry = EventDataForKevent(event);
+  delete entry;
+  event.udata = NULL;
+}
+
+int FilePathWatcherKQueue::EventsForPath(FilePath path, EventVector* events) {
+  // Make sure that we are working with a clean slate.
+  DCHECK(events->empty());
+
+  std::vector<FilePath::StringType> components;
+  path.GetComponents(&components);
+
+  if (components.size() < 1) {
+    return -1;
+  }
+
+  int last_existing_entry = 0;
+  FilePath built_path;
+  bool path_still_exists = true;
+  for (std::vector<FilePath::StringType>::iterator i = components.begin();
+      i != components.end(); ++i) {
+    if (i == components.begin()) {
+      built_path = FilePath(*i);
+    } else {
+      built_path = built_path.Append(*i);
+    }
+    uintptr_t fd = kNoFileDescriptor;
+    if (path_still_exists) {
+      fd = FileDescriptorForPath(built_path);
+      if (fd == kNoFileDescriptor) {
+        path_still_exists = false;
+      } else {
+        ++last_existing_entry;
+      }
+    }
+    FilePath::StringType subdir = (i != (components.end() - 1)) ? *(i + 1) : "";
+    EventData* data = new EventData(built_path, subdir);
+    struct kevent event;
+    EV_SET(&event, fd, EVFILT_VNODE, (EV_ADD | EV_CLEAR | EV_RECEIPT),
+           (NOTE_DELETE | NOTE_WRITE | NOTE_ATTRIB |
+            NOTE_RENAME | NOTE_REVOKE | NOTE_EXTEND), 0, data);
+    events->push_back(event);
+  }
+  return last_existing_entry;
+}
+
+uintptr_t FilePathWatcherKQueue::FileDescriptorForPath(const FilePath& path) {
+  int fd = HANDLE_EINTR(open(path.value().c_str(), O_EVTONLY));
+  if (fd == -1)
+    return kNoFileDescriptor;
+  return fd;
+}
+
+void FilePathWatcherKQueue::CloseFileDescriptor(uintptr_t* fd) {
+  if (*fd == kNoFileDescriptor) {
+    return;
+  }
+
+  if (IGNORE_EINTR(close(*fd)) != 0) {
+    DPLOG(ERROR) << "close";
+  }
+  *fd = kNoFileDescriptor;
+}
+
+bool FilePathWatcherKQueue::AreKeventValuesValid(struct kevent* kevents,
+                                               int count) {
+  if (count < 0) {
+    DPLOG(ERROR) << "kevent";
+    return false;
+  }
+  bool valid = true;
+  for (int i = 0; i < count; ++i) {
+    if (kevents[i].flags & EV_ERROR && kevents[i].data) {
+      // Find the kevent in |events_| that matches the kevent with the error.
+      EventVector::iterator event = events_.begin();
+      for (; event != events_.end(); ++event) {
+        if (event->ident == kevents[i].ident) {
+          break;
+        }
+      }
+      std::string path_name;
+      if (event != events_.end()) {
+        EventData* event_data = EventDataForKevent(*event);
+        if (event_data != NULL) {
+          path_name = event_data->path_.value();
+        }
+      }
+      if (path_name.empty()) {
+        path_name = base::StringPrintf(
+            "fd %ld", reinterpret_cast<long>(&kevents[i].ident));
+      }
+      DLOG(ERROR) << "Error: " << kevents[i].data << " for " << path_name;
+      valid = false;
+    }
+  }
+  return valid;
+}
+
+void FilePathWatcherKQueue::HandleAttributesChange(
+    const EventVector::iterator& event,
+    bool* target_file_affected,
+    bool* update_watches) {
+  EventVector::iterator next_event = event + 1;
+  EventData* next_event_data = EventDataForKevent(*next_event);
+  // Check to see if the next item in path is still accessible.
+  uintptr_t have_access = FileDescriptorForPath(next_event_data->path_);
+  if (have_access == kNoFileDescriptor) {
+    *target_file_affected = true;
+    *update_watches = true;
+    EventVector::iterator local_event(event);
+    for (; local_event != events_.end(); ++local_event) {
+      // Close all nodes from the event down. This has the side effect of
+      // potentially rendering other events in |updates| invalid.
+      // There is no need to remove the events from |kqueue_| because this
+      // happens as a side effect of closing the file descriptor.
+      CloseFileDescriptor(&local_event->ident);
+    }
+  } else {
+    CloseFileDescriptor(&have_access);
+  }
+}
+
+void FilePathWatcherKQueue::HandleDeleteOrMoveChange(
+    const EventVector::iterator& event,
+    bool* target_file_affected,
+    bool* update_watches) {
+  *target_file_affected = true;
+  *update_watches = true;
+  EventVector::iterator local_event(event);
+  for (; local_event != events_.end(); ++local_event) {
+    // Close all nodes from the event down. This has the side effect of
+    // potentially rendering other events in |updates| invalid.
+    // There is no need to remove the events from |kqueue_| because this
+    // happens as a side effect of closing the file descriptor.
+    CloseFileDescriptor(&local_event->ident);
+  }
+}
+
+void FilePathWatcherKQueue::HandleCreateItemChange(
+    const EventVector::iterator& event,
+    bool* target_file_affected,
+    bool* update_watches) {
+  // Get the next item in the path.
+  EventVector::iterator next_event = event + 1;
+  // Check to see if it already has a valid file descriptor.
+  if (!IsKeventFileDescriptorOpen(*next_event)) {
+    EventData* next_event_data = EventDataForKevent(*next_event);
+    // If not, attempt to open a file descriptor for it.
+    next_event->ident = FileDescriptorForPath(next_event_data->path_);
+    if (IsKeventFileDescriptorOpen(*next_event)) {
+      *update_watches = true;
+      if (next_event_data->subdir_.empty()) {
+        *target_file_affected = true;
+      }
+    }
+  }
+}
+
+bool FilePathWatcherKQueue::UpdateWatches(bool* target_file_affected) {
+  // Iterate over events adding kevents for items that exist to the kqueue.
+  // Then check to see if new components in the path have been created.
+  // Repeat until no new components in the path are detected.
+  // This is to get around races in directory creation in a watched path.
+  bool update_watches = true;
+  while (update_watches) {
+    size_t valid;
+    for (valid = 0; valid < events_.size(); ++valid) {
+      if (!IsKeventFileDescriptorOpen(events_[valid])) {
+        break;
+      }
+    }
+    if (valid == 0) {
+      // The root of the file path is inaccessible?
+      return false;
+    }
+
+    EventVector updates(valid);
+    int count = HANDLE_EINTR(kevent(kqueue_, &events_[0], valid, &updates[0],
+                                    valid, NULL));
+    if (!AreKeventValuesValid(&updates[0], count)) {
+      return false;
+    }
+    update_watches = false;
+    for (; valid < events_.size(); ++valid) {
+      EventData* event_data = EventDataForKevent(events_[valid]);
+      events_[valid].ident = FileDescriptorForPath(event_data->path_);
+      if (IsKeventFileDescriptorOpen(events_[valid])) {
+        update_watches = true;
+        if (event_data->subdir_.empty()) {
+          *target_file_affected = true;
+        }
+      } else {
+        break;
+      }
+    }
+  }
+  return true;
+}
+
+bool FilePathWatcherKQueue::Watch(const FilePath& path,
+                                  bool recursive,
+                                  const FilePathWatcher::Callback& callback) {
+  DCHECK(target_.value().empty());  // Can only watch one path.
+  DCHECK(!callback.is_null());
+  DCHECK_EQ(kqueue_, -1);
+  // Recursive watch is not supported using kqueue.
+  DCHECK(!recursive);
+
+  callback_ = callback;
+  target_ = path;
+
+  set_task_runner(SequencedTaskRunnerHandle::Get());
+
+  kqueue_ = kqueue();
+  if (kqueue_ == -1) {
+    DPLOG(ERROR) << "kqueue";
+    return false;
+  }
+
+  int last_entry = EventsForPath(target_, &events_);
+  DCHECK_NE(last_entry, 0);
+
+  EventVector responses(last_entry);
+
+  int count = HANDLE_EINTR(kevent(kqueue_, &events_[0], last_entry,
+                                  &responses[0], last_entry, NULL));
+  if (!AreKeventValuesValid(&responses[0], count)) {
+    // Calling Cancel() here to close any file descriptors that were opened.
+    // This would happen in the destructor anyways, but FilePathWatchers tend to
+    // be long lived, and if an error has occurred, there is no reason to waste
+    // the file descriptors.
+    Cancel();
+    return false;
+  }
+
+  // It's safe to use Unretained() because the watch is cancelled and the
+  // callback cannot be invoked after |kqueue_watch_controller_| (which is a
+  // member of |this|) has been deleted.
+  kqueue_watch_controller_ = FileDescriptorWatcher::WatchReadable(
+      kqueue_,
+      Bind(&FilePathWatcherKQueue::OnKQueueReadable, Unretained(this)));
+
+  return true;
+}
+
+void FilePathWatcherKQueue::Cancel() {
+  if (!task_runner()) {
+    set_cancelled();
+    return;
+  }
+
+  DCHECK(task_runner()->RunsTasksInCurrentSequence());
+  if (!is_cancelled()) {
+    set_cancelled();
+    kqueue_watch_controller_.reset();
+    if (IGNORE_EINTR(close(kqueue_)) != 0) {
+      DPLOG(ERROR) << "close kqueue";
+    }
+    kqueue_ = -1;
+    std::for_each(events_.begin(), events_.end(), ReleaseEvent);
+    events_.clear();
+    callback_.Reset();
+  }
+}
+
+void FilePathWatcherKQueue::OnKQueueReadable() {
+  DCHECK(task_runner()->RunsTasksInCurrentSequence());
+  DCHECK(events_.size());
+
+  // Request the file system update notifications that have occurred and return
+  // them in |updates|. |count| will contain the number of updates that have
+  // occurred.
+  EventVector updates(events_.size());
+  struct timespec timeout = {0, 0};
+  int count = HANDLE_EINTR(kevent(kqueue_, NULL, 0, &updates[0], updates.size(),
+                                  &timeout));
+
+  // Error values are stored within updates, so check to make sure that no
+  // errors occurred.
+  if (!AreKeventValuesValid(&updates[0], count)) {
+    callback_.Run(target_, true /* error */);
+    Cancel();
+    return;
+  }
+
+  bool update_watches = false;
+  bool send_notification = false;
+
+  // Iterate through each of the updates and react to them.
+  for (int i = 0; i < count; ++i) {
+    // Find our kevent record that matches the update notification.
+    EventVector::iterator event = events_.begin();
+    for (; event != events_.end(); ++event) {
+      if (!IsKeventFileDescriptorOpen(*event) ||
+          event->ident == updates[i].ident) {
+        break;
+      }
+    }
+    if (event == events_.end() || !IsKeventFileDescriptorOpen(*event)) {
+      // The event may no longer exist in |events_| because another event
+      // modified |events_| in such a way to make it invalid. For example if
+      // the path is /foo/bar/bam and foo is deleted, NOTE_DELETE events for
+      // foo, bar and bam will be sent. If foo is processed first, then
+      // the file descriptors for bar and bam will already be closed and set
+      // to -1 before they get a chance to be processed.
+      continue;
+    }
+
+    EventData* event_data = EventDataForKevent(*event);
+
+    // If the subdir is empty, this is the last item on the path and is the
+    // target file.
+    bool target_file_affected = event_data->subdir_.empty();
+    if ((updates[i].fflags & NOTE_ATTRIB) && !target_file_affected) {
+      HandleAttributesChange(event, &target_file_affected, &update_watches);
+    }
+    if (updates[i].fflags & (NOTE_DELETE | NOTE_REVOKE | NOTE_RENAME)) {
+      HandleDeleteOrMoveChange(event, &target_file_affected, &update_watches);
+    }
+    if ((updates[i].fflags & NOTE_WRITE) && !target_file_affected) {
+      HandleCreateItemChange(event, &target_file_affected, &update_watches);
+    }
+    send_notification |= target_file_affected;
+  }
+
+  if (update_watches) {
+    if (!UpdateWatches(&send_notification)) {
+      callback_.Run(target_, true /* error */);
+      Cancel();
+    }
+  }
+
+  if (send_notification) {
+    callback_.Run(target_, false);
+  }
+}
+
+}  // namespace base
diff --git a/base/files/file_path_watcher_kqueue.h b/base/files/file_path_watcher_kqueue.h
new file mode 100644
index 0000000..ef79be5
--- /dev/null
+++ b/base/files/file_path_watcher_kqueue.h
@@ -0,0 +1,125 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_FILES_FILE_PATH_WATCHER_KQUEUE_H_
+#define BASE_FILES_FILE_PATH_WATCHER_KQUEUE_H_
+
+#include <sys/event.h>
+
+#include <memory>
+#include <vector>
+
+#include "base/files/file_descriptor_watcher_posix.h"
+#include "base/files/file_path.h"
+#include "base/files/file_path_watcher.h"
+#include "base/macros.h"
+
+namespace base {
+
+// Mac-specific file watcher implementation based on kqueue.
+// The Linux and Windows versions are able to detect:
+// - file creation/deletion/modification in a watched directory
+// - file creation/deletion/modification for a watched file
+// - modifications to the paths to a watched object that would affect the
+//   object such as renaming/attibute changes etc.
+// The kqueue implementation will handle all of the items in the list above
+// except for detecting modifications to files in a watched directory. It will
+// detect the creation and deletion of files, just not the modification of
+// files. It does however detect the attribute changes that the FSEvents impl
+// would miss.
+class FilePathWatcherKQueue : public FilePathWatcher::PlatformDelegate {
+ public:
+  FilePathWatcherKQueue();
+  ~FilePathWatcherKQueue() override;
+
+  // FilePathWatcher::PlatformDelegate overrides.
+  bool Watch(const FilePath& path,
+             bool recursive,
+             const FilePathWatcher::Callback& callback) override;
+  void Cancel() override;
+
+ private:
+  class EventData {
+   public:
+    EventData(const FilePath& path, const FilePath::StringType& subdir)
+        : path_(path), subdir_(subdir) { }
+    FilePath path_;  // Full path to this item.
+    FilePath::StringType subdir_;  // Path to any sub item.
+  };
+
+  typedef std::vector<struct kevent> EventVector;
+
+  // Called when data is available in |kqueue_|.
+  void OnKQueueReadable();
+
+  // Returns true if the kevent values are error free.
+  bool AreKeventValuesValid(struct kevent* kevents, int count);
+
+  // Respond to a change of attributes of the path component represented by
+  // |event|. Sets |target_file_affected| to true if |target_| is affected.
+  // Sets |update_watches| to true if |events_| need to be updated.
+  void HandleAttributesChange(const EventVector::iterator& event,
+                              bool* target_file_affected,
+                              bool* update_watches);
+
+  // Respond to a move or deletion of the path component represented by
+  // |event|. Sets |target_file_affected| to true if |target_| is affected.
+  // Sets |update_watches| to true if |events_| need to be updated.
+  void HandleDeleteOrMoveChange(const EventVector::iterator& event,
+                                bool* target_file_affected,
+                                bool* update_watches);
+
+  // Respond to a creation of an item in the path component represented by
+  // |event|. Sets |target_file_affected| to true if |target_| is affected.
+  // Sets |update_watches| to true if |events_| need to be updated.
+  void HandleCreateItemChange(const EventVector::iterator& event,
+                              bool* target_file_affected,
+                              bool* update_watches);
+
+  // Update |events_| with the current status of the system.
+  // Sets |target_file_affected| to true if |target_| is affected.
+  // Returns false if an error occurs.
+  bool UpdateWatches(bool* target_file_affected);
+
+  // Fills |events| with one kevent per component in |path|.
+  // Returns the number of valid events created where a valid event is
+  // defined as one that has a ident (file descriptor) field != -1.
+  static int EventsForPath(FilePath path, EventVector *events);
+
+  // Release a kevent generated by EventsForPath.
+  static void ReleaseEvent(struct kevent& event);
+
+  // Returns a file descriptor that will not block the system from deleting
+  // the file it references.
+  static uintptr_t FileDescriptorForPath(const FilePath& path);
+
+  static const uintptr_t kNoFileDescriptor = static_cast<uintptr_t>(-1);
+
+  // Closes |*fd| and sets |*fd| to -1.
+  static void CloseFileDescriptor(uintptr_t* fd);
+
+  // Returns true if kevent has open file descriptor.
+  static bool IsKeventFileDescriptorOpen(const struct kevent& event) {
+    return event.ident != kNoFileDescriptor;
+  }
+
+  static EventData* EventDataForKevent(const struct kevent& event) {
+    return reinterpret_cast<EventData*>(event.udata);
+  }
+
+  EventVector events_;
+  FilePathWatcher::Callback callback_;
+  FilePath target_;
+  int kqueue_;
+
+  // Throughout the lifetime of this, OnKQueueReadable() will be called when
+  // data is available in |kqueue_|.
+  std::unique_ptr<FileDescriptorWatcher::Controller> kqueue_watch_controller_;
+
+  DISALLOW_COPY_AND_ASSIGN(FilePathWatcherKQueue);
+};
+
+}  // namespace base
+
+#endif  // BASE_FILES_FILE_PATH_WATCHER_KQUEUE_H_
diff --git a/base/files/file_path_watcher_linux.cc b/base/files/file_path_watcher_linux.cc
new file mode 100644
index 0000000..c58d686
--- /dev/null
+++ b/base/files/file_path_watcher_linux.cc
@@ -0,0 +1,680 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/files/file_path_watcher.h"
+
+#include <errno.h>
+#include <stddef.h>
+#include <string.h>
+#include <sys/inotify.h>
+#include <sys/ioctl.h>
+#include <sys/select.h>
+#include <unistd.h>
+
+#include <algorithm>
+#include <map>
+#include <memory>
+#include <set>
+#include <unordered_map>
+#include <utility>
+#include <vector>
+
+#include "base/bind.h"
+#include "base/files/file_enumerator.h"
+#include "base/files/file_path.h"
+#include "base/files/file_util.h"
+#include "base/lazy_instance.h"
+#include "base/location.h"
+#include "base/logging.h"
+#include "base/macros.h"
+#include "base/memory/ptr_util.h"
+#include "base/memory/weak_ptr.h"
+#include "base/posix/eintr_wrapper.h"
+#include "base/single_thread_task_runner.h"
+#include "base/stl_util.h"
+#include "base/synchronization/lock.h"
+#include "base/threading/platform_thread.h"
+#include "base/threading/sequenced_task_runner_handle.h"
+#include "base/trace_event/trace_event.h"
+
+namespace base {
+
+namespace {
+
+class FilePathWatcherImpl;
+class InotifyReader;
+
+class InotifyReaderThreadDelegate final : public PlatformThread::Delegate {
+ public:
+  InotifyReaderThreadDelegate(int inotify_fd) : inotify_fd_(inotify_fd){};
+
+  ~InotifyReaderThreadDelegate() override = default;
+
+ private:
+  void ThreadMain() override;
+
+  int inotify_fd_;
+
+  DISALLOW_COPY_AND_ASSIGN(InotifyReaderThreadDelegate);
+};
+
+// Singleton to manage all inotify watches.
+// TODO(tony): It would be nice if this wasn't a singleton.
+// http://crbug.com/38174
+class InotifyReader {
+ public:
+  typedef int Watch;  // Watch descriptor used by AddWatch and RemoveWatch.
+  static const Watch kInvalidWatch = -1;
+
+  // Watch directory |path| for changes. |watcher| will be notified on each
+  // change. Returns kInvalidWatch on failure.
+  Watch AddWatch(const FilePath& path, FilePathWatcherImpl* watcher);
+
+  // Remove |watch| if it's valid.
+  void RemoveWatch(Watch watch, FilePathWatcherImpl* watcher);
+
+  // Callback for InotifyReaderTask.
+  void OnInotifyEvent(const inotify_event* event);
+
+ private:
+  friend struct LazyInstanceTraitsBase<InotifyReader>;
+
+  typedef std::set<FilePathWatcherImpl*> WatcherSet;
+
+  InotifyReader();
+  // There is no destructor because |g_inotify_reader| is a
+  // base::LazyInstace::Leaky object. Having a destructor causes build
+  // issues with GCC 6 (http://crbug.com/636346).
+
+  // Returns true on successful thread creation.
+  bool StartThread();
+
+  // We keep track of which delegates want to be notified on which watches.
+  std::unordered_map<Watch, WatcherSet> watchers_;
+
+  // Lock to protect watchers_.
+  Lock lock_;
+
+  // File descriptor returned by inotify_init.
+  const int inotify_fd_;
+
+  // Thread delegate for the Inotify thread.
+  InotifyReaderThreadDelegate thread_delegate_;
+
+  // Flag set to true when startup was successful.
+  bool valid_;
+
+  DISALLOW_COPY_AND_ASSIGN(InotifyReader);
+};
+
+class FilePathWatcherImpl : public FilePathWatcher::PlatformDelegate {
+ public:
+  FilePathWatcherImpl();
+  ~FilePathWatcherImpl() override;
+
+  // Called for each event coming from the watch. |fired_watch| identifies the
+  // watch that fired, |child| indicates what has changed, and is relative to
+  // the currently watched path for |fired_watch|.
+  //
+  // |created| is true if the object appears.
+  // |deleted| is true if the object disappears.
+  // |is_dir| is true if the object is a directory.
+  void OnFilePathChanged(InotifyReader::Watch fired_watch,
+                         const FilePath::StringType& child,
+                         bool created,
+                         bool deleted,
+                         bool is_dir);
+
+ private:
+  void OnFilePathChangedOnOriginSequence(InotifyReader::Watch fired_watch,
+                                         const FilePath::StringType& child,
+                                         bool created,
+                                         bool deleted,
+                                         bool is_dir);
+
+  // Start watching |path| for changes and notify |delegate| on each change.
+  // Returns true if watch for |path| has been added successfully.
+  bool Watch(const FilePath& path,
+             bool recursive,
+             const FilePathWatcher::Callback& callback) override;
+
+  // Cancel the watch. This unregisters the instance with InotifyReader.
+  void Cancel() override;
+
+  // Inotify watches are installed for all directory components of |target_|.
+  // A WatchEntry instance holds:
+  // - |watch|: the watch descriptor for a component.
+  // - |subdir|: the subdirectory that identifies the next component.
+  //   - For the last component, there is no next component, so it is empty.
+  // - |linkname|: the target of the symlink.
+  //   - Only if the target being watched is a symbolic link.
+  struct WatchEntry {
+    explicit WatchEntry(const FilePath::StringType& dirname)
+        : watch(InotifyReader::kInvalidWatch),
+          subdir(dirname) {}
+
+    InotifyReader::Watch watch;
+    FilePath::StringType subdir;
+    FilePath::StringType linkname;
+  };
+  typedef std::vector<WatchEntry> WatchVector;
+
+  // Reconfigure to watch for the most specific parent directory of |target_|
+  // that exists. Also calls UpdateRecursiveWatches() below.
+  void UpdateWatches();
+
+  // Reconfigure to recursively watch |target_| and all its sub-directories.
+  // - This is a no-op if the watch is not recursive.
+  // - If |target_| does not exist, then clear all the recursive watches.
+  // - Assuming |target_| exists, passing kInvalidWatch as |fired_watch| forces
+  //   addition of recursive watches for |target_|.
+  // - Otherwise, only the directory associated with |fired_watch| and its
+  //   sub-directories will be reconfigured.
+  void UpdateRecursiveWatches(InotifyReader::Watch fired_watch, bool is_dir);
+
+  // Enumerate recursively through |path| and add / update watches.
+  void UpdateRecursiveWatchesForPath(const FilePath& path);
+
+  // Do internal bookkeeping to update mappings between |watch| and its
+  // associated full path |path|.
+  void TrackWatchForRecursion(InotifyReader::Watch watch, const FilePath& path);
+
+  // Remove all the recursive watches.
+  void RemoveRecursiveWatches();
+
+  // |path| is a symlink to a non-existent target. Attempt to add a watch to
+  // the link target's parent directory. Update |watch_entry| on success.
+  void AddWatchForBrokenSymlink(const FilePath& path, WatchEntry* watch_entry);
+
+  bool HasValidWatchVector() const;
+
+  // Callback to notify upon changes.
+  FilePathWatcher::Callback callback_;
+
+  // The file or directory we're supposed to watch.
+  FilePath target_;
+
+  bool recursive_;
+
+  // The vector of watches and next component names for all path components,
+  // starting at the root directory. The last entry corresponds to the watch for
+  // |target_| and always stores an empty next component name in |subdir|.
+  WatchVector watches_;
+
+  std::unordered_map<InotifyReader::Watch, FilePath> recursive_paths_by_watch_;
+  std::map<FilePath, InotifyReader::Watch> recursive_watches_by_path_;
+
+  // Read only while INotifyReader::lock_ is held, and used to post asynchronous
+  // notifications to the Watcher on its home task_runner(). Ideally this should
+  // be const, but since it is initialized from |weak_factory_|, which must
+  // appear after it, that is not possible.
+  WeakPtr<FilePathWatcherImpl> weak_ptr_;
+
+  WeakPtrFactory<FilePathWatcherImpl> weak_factory_;
+
+  DISALLOW_COPY_AND_ASSIGN(FilePathWatcherImpl);
+};
+
+LazyInstance<InotifyReader>::Leaky g_inotify_reader = LAZY_INSTANCE_INITIALIZER;
+
+void InotifyReaderThreadDelegate::ThreadMain() {
+  PlatformThread::SetName("inotify_reader");
+
+  // Make sure the file descriptors are good for use with select().
+  CHECK_LE(0, inotify_fd_);
+  CHECK_GT(FD_SETSIZE, inotify_fd_);
+
+  while (true) {
+    fd_set rfds;
+    FD_ZERO(&rfds);
+    FD_SET(inotify_fd_, &rfds);
+
+    // Wait until some inotify events are available.
+    int select_result =
+        HANDLE_EINTR(select(inotify_fd_ + 1, &rfds, nullptr, nullptr, nullptr));
+    if (select_result < 0) {
+      DPLOG(WARNING) << "select failed";
+      return;
+    }
+
+    // Adjust buffer size to current event queue size.
+    int buffer_size;
+    int ioctl_result = HANDLE_EINTR(ioctl(inotify_fd_, FIONREAD, &buffer_size));
+
+    if (ioctl_result != 0) {
+      DPLOG(WARNING) << "ioctl failed";
+      return;
+    }
+
+    std::vector<char> buffer(buffer_size);
+
+    ssize_t bytes_read =
+        HANDLE_EINTR(read(inotify_fd_, &buffer[0], buffer_size));
+
+    if (bytes_read < 0) {
+      DPLOG(WARNING) << "read from inotify fd failed";
+      return;
+    }
+
+    ssize_t i = 0;
+    while (i < bytes_read) {
+      inotify_event* event = reinterpret_cast<inotify_event*>(&buffer[i]);
+      size_t event_size = sizeof(inotify_event) + event->len;
+      DCHECK(i + event_size <= static_cast<size_t>(bytes_read));
+      g_inotify_reader.Get().OnInotifyEvent(event);
+      i += event_size;
+    }
+  }
+}
+
+InotifyReader::InotifyReader()
+    : inotify_fd_(inotify_init()),
+      thread_delegate_(inotify_fd_),
+      valid_(false) {
+  if (inotify_fd_ < 0) {
+    PLOG(ERROR) << "inotify_init() failed";
+    return;
+  }
+
+  if (!StartThread())
+    return;
+
+  valid_ = true;
+}
+
+bool InotifyReader::StartThread() {
+  // This object is LazyInstance::Leaky, so thread_delegate_ will outlive the
+  // thread.
+  return PlatformThread::CreateNonJoinable(0, &thread_delegate_);
+}
+
+InotifyReader::Watch InotifyReader::AddWatch(
+    const FilePath& path, FilePathWatcherImpl* watcher) {
+  if (!valid_)
+    return kInvalidWatch;
+
+  AutoLock auto_lock(lock_);
+
+  Watch watch = inotify_add_watch(inotify_fd_, path.value().c_str(),
+                                  IN_ATTRIB | IN_CREATE | IN_DELETE |
+                                  IN_CLOSE_WRITE | IN_MOVE |
+                                  IN_ONLYDIR);
+
+  if (watch == kInvalidWatch)
+    return kInvalidWatch;
+
+  watchers_[watch].insert(watcher);
+
+  return watch;
+}
+
+void InotifyReader::RemoveWatch(Watch watch, FilePathWatcherImpl* watcher) {
+  if (!valid_ || (watch == kInvalidWatch))
+    return;
+
+  AutoLock auto_lock(lock_);
+
+  watchers_[watch].erase(watcher);
+
+  if (watchers_[watch].empty()) {
+    watchers_.erase(watch);
+    inotify_rm_watch(inotify_fd_, watch);
+  }
+}
+
+void InotifyReader::OnInotifyEvent(const inotify_event* event) {
+  if (event->mask & IN_IGNORED)
+    return;
+
+  FilePath::StringType child(event->len ? event->name : FILE_PATH_LITERAL(""));
+  AutoLock auto_lock(lock_);
+
+  for (WatcherSet::iterator watcher = watchers_[event->wd].begin();
+       watcher != watchers_[event->wd].end();
+       ++watcher) {
+    (*watcher)->OnFilePathChanged(event->wd,
+                                  child,
+                                  event->mask & (IN_CREATE | IN_MOVED_TO),
+                                  event->mask & (IN_DELETE | IN_MOVED_FROM),
+                                  event->mask & IN_ISDIR);
+  }
+}
+
+FilePathWatcherImpl::FilePathWatcherImpl()
+    : recursive_(false), weak_factory_(this) {
+  weak_ptr_ = weak_factory_.GetWeakPtr();
+}
+
+FilePathWatcherImpl::~FilePathWatcherImpl() {
+  DCHECK(!task_runner() || task_runner()->RunsTasksInCurrentSequence());
+}
+
+void FilePathWatcherImpl::OnFilePathChanged(InotifyReader::Watch fired_watch,
+                                            const FilePath::StringType& child,
+                                            bool created,
+                                            bool deleted,
+                                            bool is_dir) {
+  DCHECK(!task_runner()->RunsTasksInCurrentSequence());
+
+  // This method is invoked on the Inotify thread. Switch to task_runner() to
+  // access |watches_| safely. Use a WeakPtr to prevent the callback from
+  // running after |this| is destroyed (i.e. after the watch is cancelled).
+  task_runner()->PostTask(
+      FROM_HERE,
+      BindOnce(&FilePathWatcherImpl::OnFilePathChangedOnOriginSequence,
+               weak_ptr_, fired_watch, child, created, deleted, is_dir));
+}
+
+void FilePathWatcherImpl::OnFilePathChangedOnOriginSequence(
+    InotifyReader::Watch fired_watch,
+    const FilePath::StringType& child,
+    bool created,
+    bool deleted,
+    bool is_dir) {
+  DCHECK(task_runner()->RunsTasksInCurrentSequence());
+  DCHECK(!watches_.empty());
+  DCHECK(HasValidWatchVector());
+
+  // Used below to avoid multiple recursive updates.
+  bool did_update = false;
+
+  // Find the entry in |watches_| that corresponds to |fired_watch|.
+  for (size_t i = 0; i < watches_.size(); ++i) {
+    const WatchEntry& watch_entry = watches_[i];
+    if (fired_watch != watch_entry.watch)
+      continue;
+
+    // Check whether a path component of |target_| changed.
+    bool change_on_target_path =
+        child.empty() ||
+        (child == watch_entry.linkname) ||
+        (child == watch_entry.subdir);
+
+    // Check if the change references |target_| or a direct child of |target_|.
+    bool target_changed;
+    if (watch_entry.subdir.empty()) {
+      // The fired watch is for a WatchEntry without a subdir. Thus for a given
+      // |target_| = "/path/to/foo", this is for "foo". Here, check either:
+      // - the target has no symlink: it is the target and it changed.
+      // - the target has a symlink, and it matches |child|.
+      target_changed = (watch_entry.linkname.empty() ||
+                        child == watch_entry.linkname);
+    } else {
+      // The fired watch is for a WatchEntry with a subdir. Thus for a given
+      // |target_| = "/path/to/foo", this is for {"/", "/path", "/path/to"}.
+      // So we can safely access the next WatchEntry since we have not reached
+      // the end yet. Check |watch_entry| is for "/path/to", i.e. the next
+      // element is "foo".
+      bool next_watch_may_be_for_target = watches_[i + 1].subdir.empty();
+      if (next_watch_may_be_for_target) {
+        // The current |watch_entry| is for "/path/to", so check if the |child|
+        // that changed is "foo".
+        target_changed = watch_entry.subdir == child;
+      } else {
+        // The current |watch_entry| is not for "/path/to", so the next entry
+        // cannot be "foo". Thus |target_| has not changed.
+        target_changed = false;
+      }
+    }
+
+    // Update watches if a directory component of the |target_| path
+    // (dis)appears. Note that we don't add the additional restriction of
+    // checking the event mask to see if it is for a directory here as changes
+    // to symlinks on the target path will not have IN_ISDIR set in the event
+    // masks. As a result we may sometimes call UpdateWatches() unnecessarily.
+    if (change_on_target_path && (created || deleted) && !did_update) {
+      UpdateWatches();
+      did_update = true;
+    }
+
+    // Report the following events:
+    //  - The target or a direct child of the target got changed (in case the
+    //    watched path refers to a directory).
+    //  - One of the parent directories got moved or deleted, since the target
+    //    disappears in this case.
+    //  - One of the parent directories appears. The event corresponding to
+    //    the target appearing might have been missed in this case, so recheck.
+    if (target_changed ||
+        (change_on_target_path && deleted) ||
+        (change_on_target_path && created && PathExists(target_))) {
+      if (!did_update) {
+        UpdateRecursiveWatches(fired_watch, is_dir);
+        did_update = true;
+      }
+      callback_.Run(target_, false /* error */);
+      return;
+    }
+  }
+
+  if (ContainsKey(recursive_paths_by_watch_, fired_watch)) {
+    if (!did_update)
+      UpdateRecursiveWatches(fired_watch, is_dir);
+    callback_.Run(target_, false /* error */);
+  }
+}
+
+bool FilePathWatcherImpl::Watch(const FilePath& path,
+                                bool recursive,
+                                const FilePathWatcher::Callback& callback) {
+  DCHECK(target_.empty());
+
+  set_task_runner(SequencedTaskRunnerHandle::Get());
+  callback_ = callback;
+  target_ = path;
+  recursive_ = recursive;
+
+  std::vector<FilePath::StringType> comps;
+  target_.GetComponents(&comps);
+  DCHECK(!comps.empty());
+  for (size_t i = 1; i < comps.size(); ++i)
+    watches_.push_back(WatchEntry(comps[i]));
+  watches_.push_back(WatchEntry(FilePath::StringType()));
+  UpdateWatches();
+  return true;
+}
+
+void FilePathWatcherImpl::Cancel() {
+  if (!callback_) {
+    // Watch() was never called.
+    set_cancelled();
+    return;
+  }
+
+  DCHECK(task_runner()->RunsTasksInCurrentSequence());
+  DCHECK(!is_cancelled());
+
+  set_cancelled();
+  callback_.Reset();
+
+  for (size_t i = 0; i < watches_.size(); ++i)
+    g_inotify_reader.Get().RemoveWatch(watches_[i].watch, this);
+  watches_.clear();
+  target_.clear();
+  RemoveRecursiveWatches();
+}
+
+void FilePathWatcherImpl::UpdateWatches() {
+  // Ensure this runs on the task_runner() exclusively in order to avoid
+  // concurrency issues.
+  DCHECK(task_runner()->RunsTasksInCurrentSequence());
+  DCHECK(HasValidWatchVector());
+
+  // Walk the list of watches and update them as we go.
+  FilePath path(FILE_PATH_LITERAL("/"));
+  for (size_t i = 0; i < watches_.size(); ++i) {
+    WatchEntry& watch_entry = watches_[i];
+    InotifyReader::Watch old_watch = watch_entry.watch;
+    watch_entry.watch = InotifyReader::kInvalidWatch;
+    watch_entry.linkname.clear();
+    watch_entry.watch = g_inotify_reader.Get().AddWatch(path, this);
+    if (watch_entry.watch == InotifyReader::kInvalidWatch) {
+      // Ignore the error code (beyond symlink handling) to attempt to add
+      // watches on accessible children of unreadable directories. Note that
+      // this is a best-effort attempt; we may not catch events in this
+      // scenario.
+      if (IsLink(path))
+        AddWatchForBrokenSymlink(path, &watch_entry);
+    }
+    if (old_watch != watch_entry.watch)
+      g_inotify_reader.Get().RemoveWatch(old_watch, this);
+    path = path.Append(watch_entry.subdir);
+  }
+
+  UpdateRecursiveWatches(InotifyReader::kInvalidWatch,
+                         false /* is directory? */);
+}
+
+void FilePathWatcherImpl::UpdateRecursiveWatches(
+    InotifyReader::Watch fired_watch,
+    bool is_dir) {
+  DCHECK(HasValidWatchVector());
+
+  if (!recursive_)
+    return;
+
+  if (!DirectoryExists(target_)) {
+    RemoveRecursiveWatches();
+    return;
+  }
+
+  // Check to see if this is a forced update or if some component of |target_|
+  // has changed. For these cases, redo the watches for |target_| and below.
+  if (!ContainsKey(recursive_paths_by_watch_, fired_watch) &&
+      fired_watch != watches_.back().watch) {
+    UpdateRecursiveWatchesForPath(target_);
+    return;
+  }
+
+  // Underneath |target_|, only directory changes trigger watch updates.
+  if (!is_dir)
+    return;
+
+  const FilePath& changed_dir =
+      ContainsKey(recursive_paths_by_watch_, fired_watch) ?
+      recursive_paths_by_watch_[fired_watch] :
+      target_;
+
+  std::map<FilePath, InotifyReader::Watch>::iterator start_it =
+      recursive_watches_by_path_.lower_bound(changed_dir);
+  std::map<FilePath, InotifyReader::Watch>::iterator end_it = start_it;
+  for (; end_it != recursive_watches_by_path_.end(); ++end_it) {
+    const FilePath& cur_path = end_it->first;
+    if (!changed_dir.IsParent(cur_path))
+      break;
+    if (!DirectoryExists(cur_path))
+      g_inotify_reader.Get().RemoveWatch(end_it->second, this);
+  }
+  recursive_watches_by_path_.erase(start_it, end_it);
+  UpdateRecursiveWatchesForPath(changed_dir);
+}
+
+void FilePathWatcherImpl::UpdateRecursiveWatchesForPath(const FilePath& path) {
+  DCHECK(recursive_);
+  DCHECK(!path.empty());
+  DCHECK(DirectoryExists(path));
+
+  // Note: SHOW_SYM_LINKS exposes symlinks as symlinks, so they are ignored
+  // rather than followed. Following symlinks can easily lead to the undesirable
+  // situation where the entire file system is being watched.
+  FileEnumerator enumerator(
+      path,
+      true /* recursive enumeration */,
+      FileEnumerator::DIRECTORIES | FileEnumerator::SHOW_SYM_LINKS);
+  for (FilePath current = enumerator.Next();
+       !current.empty();
+       current = enumerator.Next()) {
+    DCHECK(enumerator.GetInfo().IsDirectory());
+
+    if (!ContainsKey(recursive_watches_by_path_, current)) {
+      // Add new watches.
+      InotifyReader::Watch watch =
+          g_inotify_reader.Get().AddWatch(current, this);
+      TrackWatchForRecursion(watch, current);
+    } else {
+      // Update existing watches.
+      InotifyReader::Watch old_watch = recursive_watches_by_path_[current];
+      DCHECK_NE(InotifyReader::kInvalidWatch, old_watch);
+      InotifyReader::Watch watch =
+          g_inotify_reader.Get().AddWatch(current, this);
+      if (watch != old_watch) {
+        g_inotify_reader.Get().RemoveWatch(old_watch, this);
+        recursive_paths_by_watch_.erase(old_watch);
+        recursive_watches_by_path_.erase(current);
+        TrackWatchForRecursion(watch, current);
+      }
+    }
+  }
+}
+
+void FilePathWatcherImpl::TrackWatchForRecursion(InotifyReader::Watch watch,
+                                                 const FilePath& path) {
+  DCHECK(recursive_);
+  DCHECK(!path.empty());
+  DCHECK(target_.IsParent(path));
+
+  if (watch == InotifyReader::kInvalidWatch)
+    return;
+
+  DCHECK(!ContainsKey(recursive_paths_by_watch_, watch));
+  DCHECK(!ContainsKey(recursive_watches_by_path_, path));
+  recursive_paths_by_watch_[watch] = path;
+  recursive_watches_by_path_[path] = watch;
+}
+
+void FilePathWatcherImpl::RemoveRecursiveWatches() {
+  if (!recursive_)
+    return;
+
+  for (const auto& it : recursive_paths_by_watch_)
+    g_inotify_reader.Get().RemoveWatch(it.first, this);
+
+  recursive_paths_by_watch_.clear();
+  recursive_watches_by_path_.clear();
+}
+
+void FilePathWatcherImpl::AddWatchForBrokenSymlink(const FilePath& path,
+                                                   WatchEntry* watch_entry) {
+  DCHECK_EQ(InotifyReader::kInvalidWatch, watch_entry->watch);
+  FilePath link;
+  if (!ReadSymbolicLink(path, &link))
+    return;
+
+  if (!link.IsAbsolute())
+    link = path.DirName().Append(link);
+
+  // Try watching symlink target directory. If the link target is "/", then we
+  // shouldn't get here in normal situations and if we do, we'd watch "/" for
+  // changes to a component "/" which is harmless so no special treatment of
+  // this case is required.
+  InotifyReader::Watch watch =
+      g_inotify_reader.Get().AddWatch(link.DirName(), this);
+  if (watch == InotifyReader::kInvalidWatch) {
+    // TODO(craig) Symlinks only work if the parent directory for the target
+    // exist. Ideally we should make sure we've watched all the components of
+    // the symlink path for changes. See crbug.com/91561 for details.
+    DPLOG(WARNING) << "Watch failed for "  << link.DirName().value();
+    return;
+  }
+  watch_entry->watch = watch;
+  watch_entry->linkname = link.BaseName().value();
+}
+
+bool FilePathWatcherImpl::HasValidWatchVector() const {
+  if (watches_.empty())
+    return false;
+  for (size_t i = 0; i < watches_.size() - 1; ++i) {
+    if (watches_[i].subdir.empty())
+      return false;
+  }
+  return watches_.back().subdir.empty();
+}
+
+}  // namespace
+
+FilePathWatcher::FilePathWatcher() {
+  sequence_checker_.DetachFromSequence();
+  impl_ = std::make_unique<FilePathWatcherImpl>();
+}
+
+}  // namespace base
diff --git a/base/files/file_path_watcher_mac.cc b/base/files/file_path_watcher_mac.cc
new file mode 100644
index 0000000..4dcf90b
--- /dev/null
+++ b/base/files/file_path_watcher_mac.cc
@@ -0,0 +1,63 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <memory>
+
+#include "base/files/file_path_watcher.h"
+#include "base/files/file_path_watcher_kqueue.h"
+#include "base/macros.h"
+#include "base/memory/ptr_util.h"
+#include "build/build_config.h"
+
+#if !defined(OS_IOS)
+#include "base/files/file_path_watcher_fsevents.h"
+#endif
+
+namespace base {
+
+namespace {
+
+class FilePathWatcherImpl : public FilePathWatcher::PlatformDelegate {
+ public:
+  FilePathWatcherImpl() = default;
+  ~FilePathWatcherImpl() override = default;
+
+  bool Watch(const FilePath& path,
+             bool recursive,
+             const FilePathWatcher::Callback& callback) override {
+    // Use kqueue for non-recursive watches and FSEvents for recursive ones.
+    DCHECK(!impl_.get());
+    if (recursive) {
+      if (!FilePathWatcher::RecursiveWatchAvailable())
+        return false;
+#if !defined(OS_IOS)
+      impl_ = std::make_unique<FilePathWatcherFSEvents>();
+#endif  // OS_IOS
+    } else {
+      impl_ = std::make_unique<FilePathWatcherKQueue>();
+    }
+    DCHECK(impl_.get());
+    return impl_->Watch(path, recursive, callback);
+  }
+
+  void Cancel() override {
+    if (impl_.get())
+      impl_->Cancel();
+    set_cancelled();
+  }
+
+ private:
+  std::unique_ptr<PlatformDelegate> impl_;
+
+  DISALLOW_COPY_AND_ASSIGN(FilePathWatcherImpl);
+};
+
+}  // namespace
+
+FilePathWatcher::FilePathWatcher() {
+  sequence_checker_.DetachFromSequence();
+  impl_ = std::make_unique<FilePathWatcherImpl>();
+}
+
+}  // namespace base
diff --git a/base/files/file_path_watcher_stub.cc b/base/files/file_path_watcher_stub.cc
new file mode 100644
index 0000000..93a5bab
--- /dev/null
+++ b/base/files/file_path_watcher_stub.cc
@@ -0,0 +1,41 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file exists for Unix systems which don't have the inotify headers, and
+// thus cannot build file_watcher_inotify.cc
+
+#include "base/files/file_path_watcher.h"
+
+#include "base/macros.h"
+#include "base/memory/ptr_util.h"
+
+namespace base {
+
+namespace {
+
+class FilePathWatcherImpl : public FilePathWatcher::PlatformDelegate {
+ public:
+  FilePathWatcherImpl() = default;
+  ~FilePathWatcherImpl() override = default;
+
+  bool Watch(const FilePath& path,
+             bool recursive,
+             const FilePathWatcher::Callback& callback) override {
+    return false;
+  }
+
+  void Cancel() override {}
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(FilePathWatcherImpl);
+};
+
+}  // namespace
+
+FilePathWatcher::FilePathWatcher() {
+  sequence_checker_.DetachFromSequence();
+  impl_ = std::make_unique<FilePathWatcherImpl>();
+}
+
+}  // namespace base
diff --git a/base/files/file_path_watcher_unittest.cc b/base/files/file_path_watcher_unittest.cc
new file mode 100644
index 0000000..2cc2e58
--- /dev/null
+++ b/base/files/file_path_watcher_unittest.cc
@@ -0,0 +1,873 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/files/file_path_watcher.h"
+
+#if defined(OS_WIN)
+#include <windows.h>
+#include <aclapi.h>
+#elif defined(OS_POSIX)
+#include <sys/stat.h>
+#endif
+
+#include <set>
+
+#include "base/bind.h"
+#include "base/bind_helpers.h"
+#include "base/compiler_specific.h"
+#include "base/files/file_path.h"
+#include "base/files/file_util.h"
+#include "base/files/scoped_temp_dir.h"
+#include "base/location.h"
+#include "base/macros.h"
+#include "base/message_loop/message_loop.h"
+#include "base/run_loop.h"
+#include "base/single_thread_task_runner.h"
+#include "base/stl_util.h"
+#include "base/strings/stringprintf.h"
+#include "base/synchronization/waitable_event.h"
+#include "base/test/test_file_util.h"
+#include "base/test/test_timeouts.h"
+#include "base/threading/thread_task_runner_handle.h"
+#include "build/build_config.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+#if defined(OS_ANDROID)
+#include "base/android/path_utils.h"
+#endif  // defined(OS_ANDROID)
+
+#if defined(OS_POSIX)
+#include "base/files/file_descriptor_watcher_posix.h"
+#endif  // defined(OS_POSIX)
+
+namespace base {
+
+namespace {
+
+class TestDelegate;
+
+// Aggregates notifications from the test delegates and breaks the message loop
+// the test thread is waiting on once they all came in.
+class NotificationCollector
+    : public base::RefCountedThreadSafe<NotificationCollector> {
+ public:
+  NotificationCollector() : task_runner_(base::ThreadTaskRunnerHandle::Get()) {}
+
+  // Called from the file thread by the delegates.
+  void OnChange(TestDelegate* delegate) {
+    task_runner_->PostTask(
+        FROM_HERE, base::BindOnce(&NotificationCollector::RecordChange, this,
+                                  base::Unretained(delegate)));
+  }
+
+  void Register(TestDelegate* delegate) {
+    delegates_.insert(delegate);
+  }
+
+  void Reset() {
+    signaled_.clear();
+  }
+
+  bool Success() {
+    return signaled_ == delegates_;
+  }
+
+ private:
+  friend class base::RefCountedThreadSafe<NotificationCollector>;
+  ~NotificationCollector() = default;
+
+  void RecordChange(TestDelegate* delegate) {
+    // Warning: |delegate| is Unretained. Do not dereference.
+    ASSERT_TRUE(task_runner_->BelongsToCurrentThread());
+    ASSERT_TRUE(delegates_.count(delegate));
+    signaled_.insert(delegate);
+
+    // Check whether all delegates have been signaled.
+    if (signaled_ == delegates_)
+      task_runner_->PostTask(FROM_HERE,
+                             RunLoop::QuitCurrentWhenIdleClosureDeprecated());
+  }
+
+  // Set of registered delegates.
+  std::set<TestDelegate*> delegates_;
+
+  // Set of signaled delegates.
+  std::set<TestDelegate*> signaled_;
+
+  // The loop we should break after all delegates signaled.
+  scoped_refptr<base::SingleThreadTaskRunner> task_runner_;
+};
+
+class TestDelegateBase : public SupportsWeakPtr<TestDelegateBase> {
+ public:
+  TestDelegateBase() = default;
+  virtual ~TestDelegateBase() = default;
+
+  virtual void OnFileChanged(const FilePath& path, bool error) = 0;
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(TestDelegateBase);
+};
+
+// A mock class for testing. Gmock is not appropriate because it is not
+// thread-safe for setting expectations. Thus the test code cannot safely
+// reset expectations while the file watcher is running.
+// Instead, TestDelegate gets the notifications from FilePathWatcher and uses
+// NotificationCollector to aggregate the results.
+class TestDelegate : public TestDelegateBase {
+ public:
+  explicit TestDelegate(NotificationCollector* collector)
+      : collector_(collector) {
+    collector_->Register(this);
+  }
+  ~TestDelegate() override = default;
+
+  void OnFileChanged(const FilePath& path, bool error) override {
+    if (error)
+      ADD_FAILURE() << "Error " << path.value();
+    else
+      collector_->OnChange(this);
+  }
+
+ private:
+  scoped_refptr<NotificationCollector> collector_;
+
+  DISALLOW_COPY_AND_ASSIGN(TestDelegate);
+};
+
+class FilePathWatcherTest : public testing::Test {
+ public:
+  FilePathWatcherTest()
+#if defined(OS_POSIX)
+      : file_descriptor_watcher_(&loop_)
+#endif
+  {
+  }
+
+  ~FilePathWatcherTest() override = default;
+
+ protected:
+  void SetUp() override {
+#if defined(OS_ANDROID)
+    // Watching files is only permitted when all parent directories are
+    // accessible, which is not the case for the default temp directory
+    // on Android which is under /data/data.  Use /sdcard instead.
+    // TODO(pauljensen): Remove this when crbug.com/475568 is fixed.
+    FilePath parent_dir;
+    ASSERT_TRUE(android::GetExternalStorageDirectory(&parent_dir));
+    ASSERT_TRUE(temp_dir_.CreateUniqueTempDirUnderPath(parent_dir));
+#else   // defined(OS_ANDROID)
+    ASSERT_TRUE(temp_dir_.CreateUniqueTempDir());
+#endif  // defined(OS_ANDROID)
+    collector_ = new NotificationCollector();
+  }
+
+  void TearDown() override { RunLoop().RunUntilIdle(); }
+
+  FilePath test_file() {
+    return temp_dir_.GetPath().AppendASCII("FilePathWatcherTest");
+  }
+
+  FilePath test_link() {
+    return temp_dir_.GetPath().AppendASCII("FilePathWatcherTest.lnk");
+  }
+
+  // Write |content| to |file|. Returns true on success.
+  bool WriteFile(const FilePath& file, const std::string& content) {
+    int write_size = ::base::WriteFile(file, content.c_str(), content.length());
+    return write_size == static_cast<int>(content.length());
+  }
+
+  bool SetupWatch(const FilePath& target,
+                  FilePathWatcher* watcher,
+                  TestDelegateBase* delegate,
+                  bool recursive_watch) WARN_UNUSED_RESULT;
+
+  bool WaitForEvents() WARN_UNUSED_RESULT {
+    collector_->Reset();
+
+    RunLoop run_loop;
+    // Make sure we timeout if we don't get notified.
+    ThreadTaskRunnerHandle::Get()->PostDelayedTask(
+        FROM_HERE, run_loop.QuitWhenIdleClosure(),
+        TestTimeouts::action_timeout());
+    run_loop.Run();
+    return collector_->Success();
+  }
+
+  NotificationCollector* collector() { return collector_.get(); }
+
+  MessageLoopForIO loop_;
+#if defined(OS_POSIX)
+  FileDescriptorWatcher file_descriptor_watcher_;
+#endif
+
+  ScopedTempDir temp_dir_;
+  scoped_refptr<NotificationCollector> collector_;
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(FilePathWatcherTest);
+};
+
+bool FilePathWatcherTest::SetupWatch(const FilePath& target,
+                                     FilePathWatcher* watcher,
+                                     TestDelegateBase* delegate,
+                                     bool recursive_watch) {
+  return watcher->Watch(
+      target, recursive_watch,
+      base::Bind(&TestDelegateBase::OnFileChanged, delegate->AsWeakPtr()));
+}
+
+// Basic test: Create the file and verify that we notice.
+TEST_F(FilePathWatcherTest, NewFile) {
+  FilePathWatcher watcher;
+  std::unique_ptr<TestDelegate> delegate(new TestDelegate(collector()));
+  ASSERT_TRUE(SetupWatch(test_file(), &watcher, delegate.get(), false));
+
+  ASSERT_TRUE(WriteFile(test_file(), "content"));
+  ASSERT_TRUE(WaitForEvents());
+}
+
+// Verify that modifying the file is caught.
+TEST_F(FilePathWatcherTest, ModifiedFile) {
+  ASSERT_TRUE(WriteFile(test_file(), "content"));
+
+  FilePathWatcher watcher;
+  std::unique_ptr<TestDelegate> delegate(new TestDelegate(collector()));
+  ASSERT_TRUE(SetupWatch(test_file(), &watcher, delegate.get(), false));
+
+  // Now make sure we get notified if the file is modified.
+  ASSERT_TRUE(WriteFile(test_file(), "new content"));
+  ASSERT_TRUE(WaitForEvents());
+}
+
+// Verify that moving the file into place is caught.
+TEST_F(FilePathWatcherTest, MovedFile) {
+  FilePath source_file(temp_dir_.GetPath().AppendASCII("source"));
+  ASSERT_TRUE(WriteFile(source_file, "content"));
+
+  FilePathWatcher watcher;
+  std::unique_ptr<TestDelegate> delegate(new TestDelegate(collector()));
+  ASSERT_TRUE(SetupWatch(test_file(), &watcher, delegate.get(), false));
+
+  // Now make sure we get notified if the file is modified.
+  ASSERT_TRUE(base::Move(source_file, test_file()));
+  ASSERT_TRUE(WaitForEvents());
+}
+
+TEST_F(FilePathWatcherTest, DeletedFile) {
+  ASSERT_TRUE(WriteFile(test_file(), "content"));
+
+  FilePathWatcher watcher;
+  std::unique_ptr<TestDelegate> delegate(new TestDelegate(collector()));
+  ASSERT_TRUE(SetupWatch(test_file(), &watcher, delegate.get(), false));
+
+  // Now make sure we get notified if the file is deleted.
+  base::DeleteFile(test_file(), false);
+  ASSERT_TRUE(WaitForEvents());
+}
+
+// Used by the DeleteDuringNotify test below.
+// Deletes the FilePathWatcher when it's notified.
+class Deleter : public TestDelegateBase {
+ public:
+  Deleter(FilePathWatcher* watcher, MessageLoop* loop)
+      : watcher_(watcher),
+        loop_(loop) {
+  }
+  ~Deleter() override = default;
+
+  void OnFileChanged(const FilePath&, bool) override {
+    watcher_.reset();
+    loop_->task_runner()->PostTask(
+        FROM_HERE, RunLoop::QuitCurrentWhenIdleClosureDeprecated());
+  }
+
+  FilePathWatcher* watcher() const { return watcher_.get(); }
+
+ private:
+  std::unique_ptr<FilePathWatcher> watcher_;
+  MessageLoop* loop_;
+
+  DISALLOW_COPY_AND_ASSIGN(Deleter);
+};
+
+// Verify that deleting a watcher during the callback doesn't crash.
+TEST_F(FilePathWatcherTest, DeleteDuringNotify) {
+  FilePathWatcher* watcher = new FilePathWatcher;
+  // Takes ownership of watcher.
+  std::unique_ptr<Deleter> deleter(new Deleter(watcher, &loop_));
+  ASSERT_TRUE(SetupWatch(test_file(), watcher, deleter.get(), false));
+
+  ASSERT_TRUE(WriteFile(test_file(), "content"));
+  ASSERT_TRUE(WaitForEvents());
+
+  // We win if we haven't crashed yet.
+  // Might as well double-check it got deleted, too.
+  ASSERT_TRUE(deleter->watcher() == nullptr);
+}
+
+// Verify that deleting the watcher works even if there is a pending
+// notification.
+// Flaky on MacOS (and ARM linux): http://crbug.com/85930
+TEST_F(FilePathWatcherTest, DISABLED_DestroyWithPendingNotification) {
+  std::unique_ptr<TestDelegate> delegate(new TestDelegate(collector()));
+  FilePathWatcher watcher;
+  ASSERT_TRUE(SetupWatch(test_file(), &watcher, delegate.get(), false));
+  ASSERT_TRUE(WriteFile(test_file(), "content"));
+}
+
+TEST_F(FilePathWatcherTest, MultipleWatchersSingleFile) {
+  FilePathWatcher watcher1, watcher2;
+  std::unique_ptr<TestDelegate> delegate1(new TestDelegate(collector()));
+  std::unique_ptr<TestDelegate> delegate2(new TestDelegate(collector()));
+  ASSERT_TRUE(SetupWatch(test_file(), &watcher1, delegate1.get(), false));
+  ASSERT_TRUE(SetupWatch(test_file(), &watcher2, delegate2.get(), false));
+
+  ASSERT_TRUE(WriteFile(test_file(), "content"));
+  ASSERT_TRUE(WaitForEvents());
+}
+
+// Verify that watching a file whose parent directory doesn't exist yet works if
+// the directory and file are created eventually.
+TEST_F(FilePathWatcherTest, NonExistentDirectory) {
+  FilePathWatcher watcher;
+  FilePath dir(temp_dir_.GetPath().AppendASCII("dir"));
+  FilePath file(dir.AppendASCII("file"));
+  std::unique_ptr<TestDelegate> delegate(new TestDelegate(collector()));
+  ASSERT_TRUE(SetupWatch(file, &watcher, delegate.get(), false));
+
+  ASSERT_TRUE(base::CreateDirectory(dir));
+
+  ASSERT_TRUE(WriteFile(file, "content"));
+
+  VLOG(1) << "Waiting for file creation";
+  ASSERT_TRUE(WaitForEvents());
+
+  ASSERT_TRUE(WriteFile(file, "content v2"));
+  VLOG(1) << "Waiting for file change";
+  ASSERT_TRUE(WaitForEvents());
+
+  ASSERT_TRUE(base::DeleteFile(file, false));
+  VLOG(1) << "Waiting for file deletion";
+  ASSERT_TRUE(WaitForEvents());
+}
+
+// Exercises watch reconfiguration for the case that directories on the path
+// are rapidly created.
+TEST_F(FilePathWatcherTest, DirectoryChain) {
+  FilePath path(temp_dir_.GetPath());
+  std::vector<std::string> dir_names;
+  for (int i = 0; i < 20; i++) {
+    std::string dir(base::StringPrintf("d%d", i));
+    dir_names.push_back(dir);
+    path = path.AppendASCII(dir);
+  }
+
+  FilePathWatcher watcher;
+  FilePath file(path.AppendASCII("file"));
+  std::unique_ptr<TestDelegate> delegate(new TestDelegate(collector()));
+  ASSERT_TRUE(SetupWatch(file, &watcher, delegate.get(), false));
+
+  FilePath sub_path(temp_dir_.GetPath());
+  for (std::vector<std::string>::const_iterator d(dir_names.begin());
+       d != dir_names.end(); ++d) {
+    sub_path = sub_path.AppendASCII(*d);
+    ASSERT_TRUE(base::CreateDirectory(sub_path));
+  }
+  VLOG(1) << "Create File";
+  ASSERT_TRUE(WriteFile(file, "content"));
+  VLOG(1) << "Waiting for file creation";
+  ASSERT_TRUE(WaitForEvents());
+
+  ASSERT_TRUE(WriteFile(file, "content v2"));
+  VLOG(1) << "Waiting for file modification";
+  ASSERT_TRUE(WaitForEvents());
+}
+
+#if defined(OS_MACOSX)
+// http://crbug.com/85930
+#define DisappearingDirectory DISABLED_DisappearingDirectory
+#endif
+TEST_F(FilePathWatcherTest, DisappearingDirectory) {
+  FilePathWatcher watcher;
+  FilePath dir(temp_dir_.GetPath().AppendASCII("dir"));
+  FilePath file(dir.AppendASCII("file"));
+  ASSERT_TRUE(base::CreateDirectory(dir));
+  ASSERT_TRUE(WriteFile(file, "content"));
+  std::unique_ptr<TestDelegate> delegate(new TestDelegate(collector()));
+  ASSERT_TRUE(SetupWatch(file, &watcher, delegate.get(), false));
+
+  ASSERT_TRUE(base::DeleteFile(dir, true));
+  ASSERT_TRUE(WaitForEvents());
+}
+
+// Tests that a file that is deleted and reappears is tracked correctly.
+TEST_F(FilePathWatcherTest, DeleteAndRecreate) {
+  ASSERT_TRUE(WriteFile(test_file(), "content"));
+  FilePathWatcher watcher;
+  std::unique_ptr<TestDelegate> delegate(new TestDelegate(collector()));
+  ASSERT_TRUE(SetupWatch(test_file(), &watcher, delegate.get(), false));
+
+  ASSERT_TRUE(base::DeleteFile(test_file(), false));
+  VLOG(1) << "Waiting for file deletion";
+  ASSERT_TRUE(WaitForEvents());
+
+  ASSERT_TRUE(WriteFile(test_file(), "content"));
+  VLOG(1) << "Waiting for file creation";
+  ASSERT_TRUE(WaitForEvents());
+}
+
+TEST_F(FilePathWatcherTest, WatchDirectory) {
+  FilePathWatcher watcher;
+  FilePath dir(temp_dir_.GetPath().AppendASCII("dir"));
+  FilePath file1(dir.AppendASCII("file1"));
+  FilePath file2(dir.AppendASCII("file2"));
+  std::unique_ptr<TestDelegate> delegate(new TestDelegate(collector()));
+  ASSERT_TRUE(SetupWatch(dir, &watcher, delegate.get(), false));
+
+  ASSERT_TRUE(base::CreateDirectory(dir));
+  VLOG(1) << "Waiting for directory creation";
+  ASSERT_TRUE(WaitForEvents());
+
+  ASSERT_TRUE(WriteFile(file1, "content"));
+  VLOG(1) << "Waiting for file1 creation";
+  ASSERT_TRUE(WaitForEvents());
+
+#if !defined(OS_MACOSX)
+  // Mac implementation does not detect files modified in a directory.
+  ASSERT_TRUE(WriteFile(file1, "content v2"));
+  VLOG(1) << "Waiting for file1 modification";
+  ASSERT_TRUE(WaitForEvents());
+#endif  // !OS_MACOSX
+
+  ASSERT_TRUE(base::DeleteFile(file1, false));
+  VLOG(1) << "Waiting for file1 deletion";
+  ASSERT_TRUE(WaitForEvents());
+
+  ASSERT_TRUE(WriteFile(file2, "content"));
+  VLOG(1) << "Waiting for file2 creation";
+  ASSERT_TRUE(WaitForEvents());
+}
+
+TEST_F(FilePathWatcherTest, MoveParent) {
+  FilePathWatcher file_watcher;
+  FilePathWatcher subdir_watcher;
+  FilePath dir(temp_dir_.GetPath().AppendASCII("dir"));
+  FilePath dest(temp_dir_.GetPath().AppendASCII("dest"));
+  FilePath subdir(dir.AppendASCII("subdir"));
+  FilePath file(subdir.AppendASCII("file"));
+  std::unique_ptr<TestDelegate> file_delegate(new TestDelegate(collector()));
+  ASSERT_TRUE(SetupWatch(file, &file_watcher, file_delegate.get(), false));
+  std::unique_ptr<TestDelegate> subdir_delegate(new TestDelegate(collector()));
+  ASSERT_TRUE(SetupWatch(subdir, &subdir_watcher, subdir_delegate.get(),
+                         false));
+
+  // Setup a directory hierarchy.
+  ASSERT_TRUE(base::CreateDirectory(subdir));
+  ASSERT_TRUE(WriteFile(file, "content"));
+  VLOG(1) << "Waiting for file creation";
+  ASSERT_TRUE(WaitForEvents());
+
+  // Move the parent directory.
+  base::Move(dir, dest);
+  VLOG(1) << "Waiting for directory move";
+  ASSERT_TRUE(WaitForEvents());
+}
+
+TEST_F(FilePathWatcherTest, RecursiveWatch) {
+  FilePathWatcher watcher;
+  FilePath dir(temp_dir_.GetPath().AppendASCII("dir"));
+  std::unique_ptr<TestDelegate> delegate(new TestDelegate(collector()));
+  bool setup_result = SetupWatch(dir, &watcher, delegate.get(), true);
+  if (!FilePathWatcher::RecursiveWatchAvailable()) {
+    ASSERT_FALSE(setup_result);
+    return;
+  }
+  ASSERT_TRUE(setup_result);
+
+  // Main directory("dir") creation.
+  ASSERT_TRUE(base::CreateDirectory(dir));
+  ASSERT_TRUE(WaitForEvents());
+
+  // Create "$dir/file1".
+  FilePath file1(dir.AppendASCII("file1"));
+  ASSERT_TRUE(WriteFile(file1, "content"));
+  ASSERT_TRUE(WaitForEvents());
+
+  // Create "$dir/subdir".
+  FilePath subdir(dir.AppendASCII("subdir"));
+  ASSERT_TRUE(base::CreateDirectory(subdir));
+  ASSERT_TRUE(WaitForEvents());
+
+  // Create "$dir/subdir/subdir_file1".
+  FilePath subdir_file1(subdir.AppendASCII("subdir_file1"));
+  ASSERT_TRUE(WriteFile(subdir_file1, "content"));
+  ASSERT_TRUE(WaitForEvents());
+
+  // Create "$dir/subdir/subdir_child_dir".
+  FilePath subdir_child_dir(subdir.AppendASCII("subdir_child_dir"));
+  ASSERT_TRUE(base::CreateDirectory(subdir_child_dir));
+  ASSERT_TRUE(WaitForEvents());
+
+  // Create "$dir/subdir/subdir_child_dir/child_dir_file1".
+  FilePath child_dir_file1(subdir_child_dir.AppendASCII("child_dir_file1"));
+  ASSERT_TRUE(WriteFile(child_dir_file1, "content v2"));
+  ASSERT_TRUE(WaitForEvents());
+
+  // Write into "$dir/subdir/subdir_child_dir/child_dir_file1".
+  ASSERT_TRUE(WriteFile(child_dir_file1, "content"));
+  ASSERT_TRUE(WaitForEvents());
+
+// Apps cannot change file attributes on Android in /sdcard as /sdcard uses the
+// "fuse" file system, while /data uses "ext4".  Running these tests in /data
+// would be preferable and allow testing file attributes and symlinks.
+// TODO(pauljensen): Re-enable when crbug.com/475568 is fixed and SetUp() places
+// the |temp_dir_| in /data.
+#if !defined(OS_ANDROID)
+  // Modify "$dir/subdir/subdir_child_dir/child_dir_file1" attributes.
+  ASSERT_TRUE(base::MakeFileUnreadable(child_dir_file1));
+  ASSERT_TRUE(WaitForEvents());
+#endif
+
+  // Delete "$dir/subdir/subdir_file1".
+  ASSERT_TRUE(base::DeleteFile(subdir_file1, false));
+  ASSERT_TRUE(WaitForEvents());
+
+  // Delete "$dir/subdir/subdir_child_dir/child_dir_file1".
+  ASSERT_TRUE(base::DeleteFile(child_dir_file1, false));
+  ASSERT_TRUE(WaitForEvents());
+}
+
+#if defined(OS_POSIX) && !defined(OS_ANDROID) && !defined(OS_FUCHSIA)
+// Apps cannot create symlinks on Android in /sdcard as /sdcard uses the
+// "fuse" file system, while /data uses "ext4".  Running these tests in /data
+// would be preferable and allow testing file attributes and symlinks.
+// TODO(pauljensen): Re-enable when crbug.com/475568 is fixed and SetUp() places
+// the |temp_dir_| in /data.
+//
+// This test is disabled on Fuchsia since it doesn't support symlinking.
+TEST_F(FilePathWatcherTest, RecursiveWithSymLink) {
+  if (!FilePathWatcher::RecursiveWatchAvailable())
+    return;
+
+  FilePathWatcher watcher;
+  FilePath test_dir(temp_dir_.GetPath().AppendASCII("test_dir"));
+  ASSERT_TRUE(base::CreateDirectory(test_dir));
+  FilePath symlink(test_dir.AppendASCII("symlink"));
+  std::unique_ptr<TestDelegate> delegate(new TestDelegate(collector()));
+  ASSERT_TRUE(SetupWatch(symlink, &watcher, delegate.get(), true));
+
+  // Link creation.
+  FilePath target1(temp_dir_.GetPath().AppendASCII("target1"));
+  ASSERT_TRUE(base::CreateSymbolicLink(target1, symlink));
+  ASSERT_TRUE(WaitForEvents());
+
+  // Target1 creation.
+  ASSERT_TRUE(base::CreateDirectory(target1));
+  ASSERT_TRUE(WaitForEvents());
+
+  // Create a file in target1.
+  FilePath target1_file(target1.AppendASCII("file"));
+  ASSERT_TRUE(WriteFile(target1_file, "content"));
+  ASSERT_TRUE(WaitForEvents());
+
+  // Link change.
+  FilePath target2(temp_dir_.GetPath().AppendASCII("target2"));
+  ASSERT_TRUE(base::CreateDirectory(target2));
+  ASSERT_TRUE(base::DeleteFile(symlink, false));
+  ASSERT_TRUE(base::CreateSymbolicLink(target2, symlink));
+  ASSERT_TRUE(WaitForEvents());
+
+  // Create a file in target2.
+  FilePath target2_file(target2.AppendASCII("file"));
+  ASSERT_TRUE(WriteFile(target2_file, "content"));
+  ASSERT_TRUE(WaitForEvents());
+}
+#endif  // defined(OS_POSIX) && !defined(OS_ANDROID) && !defined(OS_FUCHSIA)
+
+TEST_F(FilePathWatcherTest, MoveChild) {
+  FilePathWatcher file_watcher;
+  FilePathWatcher subdir_watcher;
+  FilePath source_dir(temp_dir_.GetPath().AppendASCII("source"));
+  FilePath source_subdir(source_dir.AppendASCII("subdir"));
+  FilePath source_file(source_subdir.AppendASCII("file"));
+  FilePath dest_dir(temp_dir_.GetPath().AppendASCII("dest"));
+  FilePath dest_subdir(dest_dir.AppendASCII("subdir"));
+  FilePath dest_file(dest_subdir.AppendASCII("file"));
+
+  // Setup a directory hierarchy.
+  ASSERT_TRUE(base::CreateDirectory(source_subdir));
+  ASSERT_TRUE(WriteFile(source_file, "content"));
+
+  std::unique_ptr<TestDelegate> file_delegate(new TestDelegate(collector()));
+  ASSERT_TRUE(SetupWatch(dest_file, &file_watcher, file_delegate.get(), false));
+  std::unique_ptr<TestDelegate> subdir_delegate(new TestDelegate(collector()));
+  ASSERT_TRUE(SetupWatch(dest_subdir, &subdir_watcher, subdir_delegate.get(),
+                         false));
+
+  // Move the directory into place, s.t. the watched file appears.
+  ASSERT_TRUE(base::Move(source_dir, dest_dir));
+  ASSERT_TRUE(WaitForEvents());
+}
+
+// Verify that changing attributes on a file is caught
+#if defined(OS_ANDROID)
+// Apps cannot change file attributes on Android in /sdcard as /sdcard uses the
+// "fuse" file system, while /data uses "ext4".  Running these tests in /data
+// would be preferable and allow testing file attributes and symlinks.
+// TODO(pauljensen): Re-enable when crbug.com/475568 is fixed and SetUp() places
+// the |temp_dir_| in /data.
+#define FileAttributesChanged DISABLED_FileAttributesChanged
+#endif  // defined(OS_ANDROID
+TEST_F(FilePathWatcherTest, FileAttributesChanged) {
+  ASSERT_TRUE(WriteFile(test_file(), "content"));
+  FilePathWatcher watcher;
+  std::unique_ptr<TestDelegate> delegate(new TestDelegate(collector()));
+  ASSERT_TRUE(SetupWatch(test_file(), &watcher, delegate.get(), false));
+
+  // Now make sure we get notified if the file is modified.
+  ASSERT_TRUE(base::MakeFileUnreadable(test_file()));
+  ASSERT_TRUE(WaitForEvents());
+}
+
+#if defined(OS_LINUX)
+
+// Verify that creating a symlink is caught.
+TEST_F(FilePathWatcherTest, CreateLink) {
+  FilePathWatcher watcher;
+  std::unique_ptr<TestDelegate> delegate(new TestDelegate(collector()));
+  // Note that we are watching the symlink
+  ASSERT_TRUE(SetupWatch(test_link(), &watcher, delegate.get(), false));
+
+  // Now make sure we get notified if the link is created.
+  // Note that test_file() doesn't have to exist.
+  ASSERT_TRUE(CreateSymbolicLink(test_file(), test_link()));
+  ASSERT_TRUE(WaitForEvents());
+}
+
+// Verify that deleting a symlink is caught.
+TEST_F(FilePathWatcherTest, DeleteLink) {
+  // Unfortunately this test case only works if the link target exists.
+  // TODO(craig) fix this as part of crbug.com/91561.
+  ASSERT_TRUE(WriteFile(test_file(), "content"));
+  ASSERT_TRUE(CreateSymbolicLink(test_file(), test_link()));
+  FilePathWatcher watcher;
+  std::unique_ptr<TestDelegate> delegate(new TestDelegate(collector()));
+  ASSERT_TRUE(SetupWatch(test_link(), &watcher, delegate.get(), false));
+
+  // Now make sure we get notified if the link is deleted.
+  ASSERT_TRUE(base::DeleteFile(test_link(), false));
+  ASSERT_TRUE(WaitForEvents());
+}
+
+// Verify that modifying a target file that a link is pointing to
+// when we are watching the link is caught.
+TEST_F(FilePathWatcherTest, ModifiedLinkedFile) {
+  ASSERT_TRUE(WriteFile(test_file(), "content"));
+  ASSERT_TRUE(CreateSymbolicLink(test_file(), test_link()));
+  FilePathWatcher watcher;
+  std::unique_ptr<TestDelegate> delegate(new TestDelegate(collector()));
+  // Note that we are watching the symlink.
+  ASSERT_TRUE(SetupWatch(test_link(), &watcher, delegate.get(), false));
+
+  // Now make sure we get notified if the file is modified.
+  ASSERT_TRUE(WriteFile(test_file(), "new content"));
+  ASSERT_TRUE(WaitForEvents());
+}
+
+// Verify that creating a target file that a link is pointing to
+// when we are watching the link is caught.
+TEST_F(FilePathWatcherTest, CreateTargetLinkedFile) {
+  ASSERT_TRUE(CreateSymbolicLink(test_file(), test_link()));
+  FilePathWatcher watcher;
+  std::unique_ptr<TestDelegate> delegate(new TestDelegate(collector()));
+  // Note that we are watching the symlink.
+  ASSERT_TRUE(SetupWatch(test_link(), &watcher, delegate.get(), false));
+
+  // Now make sure we get notified if the target file is created.
+  ASSERT_TRUE(WriteFile(test_file(), "content"));
+  ASSERT_TRUE(WaitForEvents());
+}
+
+// Verify that deleting a target file that a link is pointing to
+// when we are watching the link is caught.
+TEST_F(FilePathWatcherTest, DeleteTargetLinkedFile) {
+  ASSERT_TRUE(WriteFile(test_file(), "content"));
+  ASSERT_TRUE(CreateSymbolicLink(test_file(), test_link()));
+  FilePathWatcher watcher;
+  std::unique_ptr<TestDelegate> delegate(new TestDelegate(collector()));
+  // Note that we are watching the symlink.
+  ASSERT_TRUE(SetupWatch(test_link(), &watcher, delegate.get(), false));
+
+  // Now make sure we get notified if the target file is deleted.
+  ASSERT_TRUE(base::DeleteFile(test_file(), false));
+  ASSERT_TRUE(WaitForEvents());
+}
+
+// Verify that watching a file whose parent directory is a link that
+// doesn't exist yet works if the symlink is created eventually.
+TEST_F(FilePathWatcherTest, LinkedDirectoryPart1) {
+  FilePathWatcher watcher;
+  FilePath dir(temp_dir_.GetPath().AppendASCII("dir"));
+  FilePath link_dir(temp_dir_.GetPath().AppendASCII("dir.lnk"));
+  FilePath file(dir.AppendASCII("file"));
+  FilePath linkfile(link_dir.AppendASCII("file"));
+  std::unique_ptr<TestDelegate> delegate(new TestDelegate(collector()));
+  // dir/file should exist.
+  ASSERT_TRUE(base::CreateDirectory(dir));
+  ASSERT_TRUE(WriteFile(file, "content"));
+  // Note that we are watching dir.lnk/file which doesn't exist yet.
+  ASSERT_TRUE(SetupWatch(linkfile, &watcher, delegate.get(), false));
+
+  ASSERT_TRUE(CreateSymbolicLink(dir, link_dir));
+  VLOG(1) << "Waiting for link creation";
+  ASSERT_TRUE(WaitForEvents());
+
+  ASSERT_TRUE(WriteFile(file, "content v2"));
+  VLOG(1) << "Waiting for file change";
+  ASSERT_TRUE(WaitForEvents());
+
+  ASSERT_TRUE(base::DeleteFile(file, false));
+  VLOG(1) << "Waiting for file deletion";
+  ASSERT_TRUE(WaitForEvents());
+}
+
+// Verify that watching a file whose parent directory is a
+// dangling symlink works if the directory is created eventually.
+TEST_F(FilePathWatcherTest, LinkedDirectoryPart2) {
+  FilePathWatcher watcher;
+  FilePath dir(temp_dir_.GetPath().AppendASCII("dir"));
+  FilePath link_dir(temp_dir_.GetPath().AppendASCII("dir.lnk"));
+  FilePath file(dir.AppendASCII("file"));
+  FilePath linkfile(link_dir.AppendASCII("file"));
+  std::unique_ptr<TestDelegate> delegate(new TestDelegate(collector()));
+  // Now create the link from dir.lnk pointing to dir but
+  // neither dir nor dir/file exist yet.
+  ASSERT_TRUE(CreateSymbolicLink(dir, link_dir));
+  // Note that we are watching dir.lnk/file.
+  ASSERT_TRUE(SetupWatch(linkfile, &watcher, delegate.get(), false));
+
+  ASSERT_TRUE(base::CreateDirectory(dir));
+  ASSERT_TRUE(WriteFile(file, "content"));
+  VLOG(1) << "Waiting for dir/file creation";
+  ASSERT_TRUE(WaitForEvents());
+
+  ASSERT_TRUE(WriteFile(file, "content v2"));
+  VLOG(1) << "Waiting for file change";
+  ASSERT_TRUE(WaitForEvents());
+
+  ASSERT_TRUE(base::DeleteFile(file, false));
+  VLOG(1) << "Waiting for file deletion";
+  ASSERT_TRUE(WaitForEvents());
+}
+
+// Verify that watching a file with a symlink on the path
+// to the file works.
+TEST_F(FilePathWatcherTest, LinkedDirectoryPart3) {
+  FilePathWatcher watcher;
+  FilePath dir(temp_dir_.GetPath().AppendASCII("dir"));
+  FilePath link_dir(temp_dir_.GetPath().AppendASCII("dir.lnk"));
+  FilePath file(dir.AppendASCII("file"));
+  FilePath linkfile(link_dir.AppendASCII("file"));
+  std::unique_ptr<TestDelegate> delegate(new TestDelegate(collector()));
+  ASSERT_TRUE(base::CreateDirectory(dir));
+  ASSERT_TRUE(CreateSymbolicLink(dir, link_dir));
+  // Note that we are watching dir.lnk/file but the file doesn't exist yet.
+  ASSERT_TRUE(SetupWatch(linkfile, &watcher, delegate.get(), false));
+
+  ASSERT_TRUE(WriteFile(file, "content"));
+  VLOG(1) << "Waiting for file creation";
+  ASSERT_TRUE(WaitForEvents());
+
+  ASSERT_TRUE(WriteFile(file, "content v2"));
+  VLOG(1) << "Waiting for file change";
+  ASSERT_TRUE(WaitForEvents());
+
+  ASSERT_TRUE(base::DeleteFile(file, false));
+  VLOG(1) << "Waiting for file deletion";
+  ASSERT_TRUE(WaitForEvents());
+}
+
+#endif  // OS_LINUX
+
+enum Permission {
+  Read,
+  Write,
+  Execute
+};
+
+#if defined(OS_MACOSX)
+bool ChangeFilePermissions(const FilePath& path, Permission perm, bool allow) {
+  struct stat stat_buf;
+
+  if (stat(path.value().c_str(), &stat_buf) != 0)
+    return false;
+
+  mode_t mode = 0;
+  switch (perm) {
+    case Read:
+      mode = S_IRUSR | S_IRGRP | S_IROTH;
+      break;
+    case Write:
+      mode = S_IWUSR | S_IWGRP | S_IWOTH;
+      break;
+    case Execute:
+      mode = S_IXUSR | S_IXGRP | S_IXOTH;
+      break;
+    default:
+      ADD_FAILURE() << "unknown perm " << perm;
+      return false;
+  }
+  if (allow) {
+    stat_buf.st_mode |= mode;
+  } else {
+    stat_buf.st_mode &= ~mode;
+  }
+  return chmod(path.value().c_str(), stat_buf.st_mode) == 0;
+}
+#endif  // defined(OS_MACOSX)
+
+#if defined(OS_MACOSX)
+// Linux implementation of FilePathWatcher doesn't catch attribute changes.
+// http://crbug.com/78043
+// Windows implementation of FilePathWatcher catches attribute changes that
+// don't affect the path being watched.
+// http://crbug.com/78045
+
+// Verify that changing attributes on a directory works.
+TEST_F(FilePathWatcherTest, DirAttributesChanged) {
+  FilePath test_dir1(
+      temp_dir_.GetPath().AppendASCII("DirAttributesChangedDir1"));
+  FilePath test_dir2(test_dir1.AppendASCII("DirAttributesChangedDir2"));
+  FilePath test_file(test_dir2.AppendASCII("DirAttributesChangedFile"));
+  // Setup a directory hierarchy.
+  ASSERT_TRUE(base::CreateDirectory(test_dir1));
+  ASSERT_TRUE(base::CreateDirectory(test_dir2));
+  ASSERT_TRUE(WriteFile(test_file, "content"));
+
+  FilePathWatcher watcher;
+  std::unique_ptr<TestDelegate> delegate(new TestDelegate(collector()));
+  ASSERT_TRUE(SetupWatch(test_file, &watcher, delegate.get(), false));
+
+  // We should not get notified in this case as it hasn't affected our ability
+  // to access the file.
+  ASSERT_TRUE(ChangeFilePermissions(test_dir1, Read, false));
+  loop_.task_runner()->PostDelayedTask(
+      FROM_HERE, RunLoop::QuitCurrentWhenIdleClosureDeprecated(),
+      TestTimeouts::tiny_timeout());
+  ASSERT_FALSE(WaitForEvents());
+  ASSERT_TRUE(ChangeFilePermissions(test_dir1, Read, true));
+
+  // We should get notified in this case because filepathwatcher can no
+  // longer access the file
+  ASSERT_TRUE(ChangeFilePermissions(test_dir1, Execute, false));
+  ASSERT_TRUE(WaitForEvents());
+  ASSERT_TRUE(ChangeFilePermissions(test_dir1, Execute, true));
+}
+
+#endif  // OS_MACOSX
+}  // namespace
+
+}  // namespace base
diff --git a/base/files/file_path_watcher_win.cc b/base/files/file_path_watcher_win.cc
new file mode 100644
index 0000000..4614750
--- /dev/null
+++ b/base/files/file_path_watcher_win.cc
@@ -0,0 +1,290 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/files/file_path_watcher.h"
+
+#include "base/bind.h"
+#include "base/files/file.h"
+#include "base/files/file_path.h"
+#include "base/files/file_util.h"
+#include "base/logging.h"
+#include "base/macros.h"
+#include "base/memory/ptr_util.h"
+#include "base/threading/sequenced_task_runner_handle.h"
+#include "base/time/time.h"
+#include "base/win/object_watcher.h"
+
+#include <windows.h>
+
+namespace base {
+
+namespace {
+
+class FilePathWatcherImpl : public FilePathWatcher::PlatformDelegate,
+                            public base::win::ObjectWatcher::Delegate {
+ public:
+  FilePathWatcherImpl()
+      : handle_(INVALID_HANDLE_VALUE),
+        recursive_watch_(false) {}
+  ~FilePathWatcherImpl() override;
+
+  // FilePathWatcher::PlatformDelegate:
+  bool Watch(const FilePath& path,
+             bool recursive,
+             const FilePathWatcher::Callback& callback) override;
+  void Cancel() override;
+
+  // base::win::ObjectWatcher::Delegate:
+  void OnObjectSignaled(HANDLE object) override;
+
+ private:
+  // Setup a watch handle for directory |dir|. Set |recursive| to true to watch
+  // the directory sub trees. Returns true if no fatal error occurs. |handle|
+  // will receive the handle value if |dir| is watchable, otherwise
+  // INVALID_HANDLE_VALUE.
+  static bool SetupWatchHandle(const FilePath& dir,
+                               bool recursive,
+                               HANDLE* handle) WARN_UNUSED_RESULT;
+
+  // (Re-)Initialize the watch handle.
+  bool UpdateWatch() WARN_UNUSED_RESULT;
+
+  // Destroy the watch handle.
+  void DestroyWatch();
+
+  // Callback to notify upon changes.
+  FilePathWatcher::Callback callback_;
+
+  // Path we're supposed to watch (passed to callback).
+  FilePath target_;
+
+  // Set to true in the destructor.
+  bool* was_deleted_ptr_ = nullptr;
+
+  // Handle for FindFirstChangeNotification.
+  HANDLE handle_;
+
+  // ObjectWatcher to watch handle_ for events.
+  base::win::ObjectWatcher watcher_;
+
+  // Set to true to watch the sub trees of the specified directory file path.
+  bool recursive_watch_;
+
+  // Keep track of the last modified time of the file.  We use nulltime
+  // to represent the file not existing.
+  Time last_modified_;
+
+  // The time at which we processed the first notification with the
+  // |last_modified_| time stamp.
+  Time first_notification_;
+
+  DISALLOW_COPY_AND_ASSIGN(FilePathWatcherImpl);
+};
+
+FilePathWatcherImpl::~FilePathWatcherImpl() {
+  DCHECK(!task_runner() || task_runner()->RunsTasksInCurrentSequence());
+  if (was_deleted_ptr_)
+    *was_deleted_ptr_ = true;
+}
+
+bool FilePathWatcherImpl::Watch(const FilePath& path,
+                                bool recursive,
+                                const FilePathWatcher::Callback& callback) {
+  DCHECK(target_.value().empty());  // Can only watch one path.
+
+  set_task_runner(SequencedTaskRunnerHandle::Get());
+  callback_ = callback;
+  target_ = path;
+  recursive_watch_ = recursive;
+
+  File::Info file_info;
+  if (GetFileInfo(target_, &file_info)) {
+    last_modified_ = file_info.last_modified;
+    first_notification_ = Time::Now();
+  }
+
+  if (!UpdateWatch())
+    return false;
+
+  watcher_.StartWatchingOnce(handle_, this);
+
+  return true;
+}
+
+void FilePathWatcherImpl::Cancel() {
+  if (callback_.is_null()) {
+    // Watch was never called, or the |task_runner_| has already quit.
+    set_cancelled();
+    return;
+  }
+
+  DCHECK(task_runner()->RunsTasksInCurrentSequence());
+  set_cancelled();
+
+  if (handle_ != INVALID_HANDLE_VALUE)
+    DestroyWatch();
+
+  callback_.Reset();
+}
+
+void FilePathWatcherImpl::OnObjectSignaled(HANDLE object) {
+  DCHECK(task_runner()->RunsTasksInCurrentSequence());
+  DCHECK_EQ(object, handle_);
+  DCHECK(!was_deleted_ptr_);
+
+  bool was_deleted = false;
+  was_deleted_ptr_ = &was_deleted;
+
+  if (!UpdateWatch()) {
+    callback_.Run(target_, true /* error */);
+    return;
+  }
+
+  // Check whether the event applies to |target_| and notify the callback.
+  File::Info file_info;
+  bool file_exists = GetFileInfo(target_, &file_info);
+  if (recursive_watch_) {
+    // Only the mtime of |target_| is tracked but in a recursive watch,
+    // some other file or directory may have changed so all notifications
+    // are passed through. It is possible to figure out which file changed
+    // using ReadDirectoryChangesW() instead of FindFirstChangeNotification(),
+    // but that function is quite complicated:
+    // http://qualapps.blogspot.com/2010/05/understanding-readdirectorychangesw.html
+    callback_.Run(target_, false);
+  } else if (file_exists && (last_modified_.is_null() ||
+             last_modified_ != file_info.last_modified)) {
+    last_modified_ = file_info.last_modified;
+    first_notification_ = Time::Now();
+    callback_.Run(target_, false);
+  } else if (file_exists && last_modified_ == file_info.last_modified &&
+             !first_notification_.is_null()) {
+    // The target's last modification time is equal to what's on record. This
+    // means that either an unrelated event occurred, or the target changed
+    // again (file modification times only have a resolution of 1s). Comparing
+    // file modification times against the wall clock is not reliable to find
+    // out whether the change is recent, since this code might just run too
+    // late. Moreover, there's no guarantee that file modification time and wall
+    // clock times come from the same source.
+    //
+    // Instead, the time at which the first notification carrying the current
+    // |last_notified_| time stamp is recorded. Later notifications that find
+    // the same file modification time only need to be forwarded until wall
+    // clock has advanced one second from the initial notification. After that
+    // interval, client code is guaranteed to having seen the current revision
+    // of the file.
+    if (Time::Now() - first_notification_ > TimeDelta::FromSeconds(1)) {
+      // Stop further notifications for this |last_modification_| time stamp.
+      first_notification_ = Time();
+    }
+    callback_.Run(target_, false);
+  } else if (!file_exists && !last_modified_.is_null()) {
+    last_modified_ = Time();
+    callback_.Run(target_, false);
+  }
+
+  // The watch may have been cancelled by the callback.
+  if (!was_deleted) {
+    watcher_.StartWatchingOnce(handle_, this);
+    was_deleted_ptr_ = nullptr;
+  }
+}
+
+// static
+bool FilePathWatcherImpl::SetupWatchHandle(const FilePath& dir,
+                                           bool recursive,
+                                           HANDLE* handle) {
+  *handle = FindFirstChangeNotification(
+      dir.value().c_str(),
+      recursive,
+      FILE_NOTIFY_CHANGE_FILE_NAME | FILE_NOTIFY_CHANGE_SIZE |
+      FILE_NOTIFY_CHANGE_LAST_WRITE | FILE_NOTIFY_CHANGE_DIR_NAME |
+      FILE_NOTIFY_CHANGE_ATTRIBUTES | FILE_NOTIFY_CHANGE_SECURITY);
+  if (*handle != INVALID_HANDLE_VALUE) {
+    // Make sure the handle we got points to an existing directory. It seems
+    // that windows sometimes hands out watches to directories that are
+    // about to go away, but doesn't sent notifications if that happens.
+    if (!DirectoryExists(dir)) {
+      FindCloseChangeNotification(*handle);
+      *handle = INVALID_HANDLE_VALUE;
+    }
+    return true;
+  }
+
+  // If FindFirstChangeNotification failed because the target directory
+  // doesn't exist, access is denied (happens if the file is already gone but
+  // there are still handles open), or the target is not a directory, try the
+  // immediate parent directory instead.
+  DWORD error_code = GetLastError();
+  if (error_code != ERROR_FILE_NOT_FOUND &&
+      error_code != ERROR_PATH_NOT_FOUND &&
+      error_code != ERROR_ACCESS_DENIED &&
+      error_code != ERROR_SHARING_VIOLATION &&
+      error_code != ERROR_DIRECTORY) {
+    DPLOG(ERROR) << "FindFirstChangeNotification failed for "
+                 << dir.value();
+    return false;
+  }
+
+  return true;
+}
+
+bool FilePathWatcherImpl::UpdateWatch() {
+  if (handle_ != INVALID_HANDLE_VALUE)
+    DestroyWatch();
+
+  // Start at the target and walk up the directory chain until we succesfully
+  // create a watch handle in |handle_|. |child_dirs| keeps a stack of child
+  // directories stripped from target, in reverse order.
+  std::vector<FilePath> child_dirs;
+  FilePath watched_path(target_);
+  while (true) {
+    if (!SetupWatchHandle(watched_path, recursive_watch_, &handle_))
+      return false;
+
+    // Break if a valid handle is returned. Try the parent directory otherwise.
+    if (handle_ != INVALID_HANDLE_VALUE)
+      break;
+
+    // Abort if we hit the root directory.
+    child_dirs.push_back(watched_path.BaseName());
+    FilePath parent(watched_path.DirName());
+    if (parent == watched_path) {
+      DLOG(ERROR) << "Reached the root directory";
+      return false;
+    }
+    watched_path = parent;
+  }
+
+  // At this point, handle_ is valid. However, the bottom-up search that the
+  // above code performs races against directory creation. So try to walk back
+  // down and see whether any children appeared in the mean time.
+  while (!child_dirs.empty()) {
+    watched_path = watched_path.Append(child_dirs.back());
+    child_dirs.pop_back();
+    HANDLE temp_handle = INVALID_HANDLE_VALUE;
+    if (!SetupWatchHandle(watched_path, recursive_watch_, &temp_handle))
+      return false;
+    if (temp_handle == INVALID_HANDLE_VALUE)
+      break;
+    FindCloseChangeNotification(handle_);
+    handle_ = temp_handle;
+  }
+
+  return true;
+}
+
+void FilePathWatcherImpl::DestroyWatch() {
+  watcher_.StopWatching();
+  FindCloseChangeNotification(handle_);
+  handle_ = INVALID_HANDLE_VALUE;
+}
+
+}  // namespace
+
+FilePathWatcher::FilePathWatcher() {
+  sequence_checker_.DetachFromSequence();
+  impl_ = std::make_unique<FilePathWatcherImpl>();
+}
+
+}  // namespace base
diff --git a/base/files/file_posix.cc b/base/files/file_posix.cc
new file mode 100644
index 0000000..d538b66
--- /dev/null
+++ b/base/files/file_posix.cc
@@ -0,0 +1,547 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/files/file.h"
+
+#include <errno.h>
+#include <fcntl.h>
+#include <stdint.h>
+#include <sys/stat.h>
+#include <unistd.h>
+
+#include "base/logging.h"
+#include "base/metrics/histogram_functions.h"
+#include "base/posix/eintr_wrapper.h"
+#include "base/strings/utf_string_conversions.h"
+#include "base/threading/thread_restrictions.h"
+#include "build/build_config.h"
+
+#if defined(OS_ANDROID)
+#include "base/os_compat_android.h"
+#endif
+
+namespace base {
+
+// Make sure our Whence mappings match the system headers.
+static_assert(File::FROM_BEGIN == SEEK_SET && File::FROM_CURRENT == SEEK_CUR &&
+                  File::FROM_END == SEEK_END,
+              "whence mapping must match the system headers");
+
+namespace {
+
+#if defined(OS_BSD) || defined(OS_MACOSX) || defined(OS_NACL) || \
+    defined(OS_ANDROID) && __ANDROID_API__ < 21
+int CallFstat(int fd, stat_wrapper_t *sb) {
+  AssertBlockingAllowed();
+  return fstat(fd, sb);
+}
+#else
+int CallFstat(int fd, stat_wrapper_t *sb) {
+  AssertBlockingAllowed();
+  return fstat64(fd, sb);
+}
+#endif
+
+// NaCl doesn't provide the following system calls, so either simulate them or
+// wrap them in order to minimize the number of #ifdef's in this file.
+#if !defined(OS_NACL) && !defined(OS_AIX)
+bool IsOpenAppend(PlatformFile file) {
+  return (fcntl(file, F_GETFL) & O_APPEND) != 0;
+}
+
+int CallFtruncate(PlatformFile file, int64_t length) {
+  return HANDLE_EINTR(ftruncate(file, length));
+}
+
+int CallFutimes(PlatformFile file, const struct timeval times[2]) {
+#ifdef __USE_XOPEN2K8
+  // futimens should be available, but futimes might not be
+  // http://pubs.opengroup.org/onlinepubs/9699919799/
+
+  timespec ts_times[2];
+  ts_times[0].tv_sec  = times[0].tv_sec;
+  ts_times[0].tv_nsec = times[0].tv_usec * 1000;
+  ts_times[1].tv_sec  = times[1].tv_sec;
+  ts_times[1].tv_nsec = times[1].tv_usec * 1000;
+
+  return futimens(file, ts_times);
+#else
+  return futimes(file, times);
+#endif
+}
+
+#if !defined(OS_FUCHSIA)
+File::Error CallFcntlFlock(PlatformFile file, bool do_lock) {
+  struct flock lock;
+  lock.l_type = do_lock ? F_WRLCK : F_UNLCK;
+  lock.l_whence = SEEK_SET;
+  lock.l_start = 0;
+  lock.l_len = 0;  // Lock entire file.
+  if (HANDLE_EINTR(fcntl(file, F_SETLK, &lock)) == -1)
+    return File::GetLastFileError();
+  return File::FILE_OK;
+}
+#endif
+
+#else   // defined(OS_NACL) && !defined(OS_AIX)
+
+bool IsOpenAppend(PlatformFile file) {
+  // NaCl doesn't implement fcntl. Since NaCl's write conforms to the POSIX
+  // standard and always appends if the file is opened with O_APPEND, just
+  // return false here.
+  return false;
+}
+
+int CallFtruncate(PlatformFile file, int64_t length) {
+  NOTIMPLEMENTED();  // NaCl doesn't implement ftruncate.
+  return 0;
+}
+
+int CallFutimes(PlatformFile file, const struct timeval times[2]) {
+  NOTIMPLEMENTED();  // NaCl doesn't implement futimes.
+  return 0;
+}
+
+File::Error CallFcntlFlock(PlatformFile file, bool do_lock) {
+  NOTIMPLEMENTED();  // NaCl doesn't implement flock struct.
+  return File::FILE_ERROR_INVALID_OPERATION;
+}
+#endif  // defined(OS_NACL)
+
+}  // namespace
+
+void File::Info::FromStat(const stat_wrapper_t& stat_info) {
+  is_directory = S_ISDIR(stat_info.st_mode);
+  is_symbolic_link = S_ISLNK(stat_info.st_mode);
+  size = stat_info.st_size;
+
+#if defined(OS_LINUX)
+  time_t last_modified_sec = stat_info.st_mtim.tv_sec;
+  int64_t last_modified_nsec = stat_info.st_mtim.tv_nsec;
+  time_t last_accessed_sec = stat_info.st_atim.tv_sec;
+  int64_t last_accessed_nsec = stat_info.st_atim.tv_nsec;
+  time_t creation_time_sec = stat_info.st_ctim.tv_sec;
+  int64_t creation_time_nsec = stat_info.st_ctim.tv_nsec;
+#elif defined(OS_ANDROID)
+  time_t last_modified_sec = stat_info.st_mtime;
+  int64_t last_modified_nsec = stat_info.st_mtime_nsec;
+  time_t last_accessed_sec = stat_info.st_atime;
+  int64_t last_accessed_nsec = stat_info.st_atime_nsec;
+  time_t creation_time_sec = stat_info.st_ctime;
+  int64_t creation_time_nsec = stat_info.st_ctime_nsec;
+#elif defined(OS_MACOSX) || defined(OS_IOS) || defined(OS_BSD)
+  time_t last_modified_sec = stat_info.st_mtimespec.tv_sec;
+  int64_t last_modified_nsec = stat_info.st_mtimespec.tv_nsec;
+  time_t last_accessed_sec = stat_info.st_atimespec.tv_sec;
+  int64_t last_accessed_nsec = stat_info.st_atimespec.tv_nsec;
+  time_t creation_time_sec = stat_info.st_ctimespec.tv_sec;
+  int64_t creation_time_nsec = stat_info.st_ctimespec.tv_nsec;
+#else
+  time_t last_modified_sec = stat_info.st_mtime;
+  int64_t last_modified_nsec = 0;
+  time_t last_accessed_sec = stat_info.st_atime;
+  int64_t last_accessed_nsec = 0;
+  time_t creation_time_sec = stat_info.st_ctime;
+  int64_t creation_time_nsec = 0;
+#endif
+
+  last_modified =
+      Time::FromTimeT(last_modified_sec) +
+      TimeDelta::FromMicroseconds(last_modified_nsec /
+                                  Time::kNanosecondsPerMicrosecond);
+
+  last_accessed =
+      Time::FromTimeT(last_accessed_sec) +
+      TimeDelta::FromMicroseconds(last_accessed_nsec /
+                                  Time::kNanosecondsPerMicrosecond);
+
+  creation_time =
+      Time::FromTimeT(creation_time_sec) +
+      TimeDelta::FromMicroseconds(creation_time_nsec /
+                                  Time::kNanosecondsPerMicrosecond);
+}
+
+bool File::IsValid() const {
+  return file_.is_valid();
+}
+
+PlatformFile File::GetPlatformFile() const {
+  return file_.get();
+}
+
+PlatformFile File::TakePlatformFile() {
+  return file_.release();
+}
+
+void File::Close() {
+  if (!IsValid())
+    return;
+
+  SCOPED_FILE_TRACE("Close");
+  AssertBlockingAllowed();
+  file_.reset();
+}
+
+int64_t File::Seek(Whence whence, int64_t offset) {
+  AssertBlockingAllowed();
+  DCHECK(IsValid());
+
+  SCOPED_FILE_TRACE_WITH_SIZE("Seek", offset);
+
+#if defined(OS_ANDROID)
+  static_assert(sizeof(int64_t) == sizeof(off64_t), "off64_t must be 64 bits");
+  return lseek64(file_.get(), static_cast<off64_t>(offset),
+                 static_cast<int>(whence));
+#else
+  static_assert(sizeof(int64_t) == sizeof(off_t), "off_t must be 64 bits");
+  return lseek(file_.get(), static_cast<off_t>(offset),
+               static_cast<int>(whence));
+#endif
+}
+
+int File::Read(int64_t offset, char* data, int size) {
+  AssertBlockingAllowed();
+  DCHECK(IsValid());
+  if (size < 0)
+    return -1;
+
+  SCOPED_FILE_TRACE_WITH_SIZE("Read", size);
+
+  int bytes_read = 0;
+  int rv;
+  do {
+    rv = HANDLE_EINTR(pread(file_.get(), data + bytes_read,
+                            size - bytes_read, offset + bytes_read));
+    if (rv <= 0)
+      break;
+
+    bytes_read += rv;
+  } while (bytes_read < size);
+
+  return bytes_read ? bytes_read : rv;
+}
+
+int File::ReadAtCurrentPos(char* data, int size) {
+  AssertBlockingAllowed();
+  DCHECK(IsValid());
+  if (size < 0)
+    return -1;
+
+  SCOPED_FILE_TRACE_WITH_SIZE("ReadAtCurrentPos", size);
+
+  int bytes_read = 0;
+  int rv;
+  do {
+    rv = HANDLE_EINTR(read(file_.get(), data + bytes_read, size - bytes_read));
+    if (rv <= 0)
+      break;
+
+    bytes_read += rv;
+  } while (bytes_read < size);
+
+  return bytes_read ? bytes_read : rv;
+}
+
+int File::ReadNoBestEffort(int64_t offset, char* data, int size) {
+  AssertBlockingAllowed();
+  DCHECK(IsValid());
+  SCOPED_FILE_TRACE_WITH_SIZE("ReadNoBestEffort", size);
+  return HANDLE_EINTR(pread(file_.get(), data, size, offset));
+}
+
+int File::ReadAtCurrentPosNoBestEffort(char* data, int size) {
+  AssertBlockingAllowed();
+  DCHECK(IsValid());
+  if (size < 0)
+    return -1;
+
+  SCOPED_FILE_TRACE_WITH_SIZE("ReadAtCurrentPosNoBestEffort", size);
+  return HANDLE_EINTR(read(file_.get(), data, size));
+}
+
+int File::Write(int64_t offset, const char* data, int size) {
+  AssertBlockingAllowed();
+
+  if (IsOpenAppend(file_.get()))
+    return WriteAtCurrentPos(data, size);
+
+  DCHECK(IsValid());
+  if (size < 0)
+    return -1;
+
+  SCOPED_FILE_TRACE_WITH_SIZE("Write", size);
+
+  int bytes_written = 0;
+  int rv;
+  do {
+    rv = HANDLE_EINTR(pwrite(file_.get(), data + bytes_written,
+                             size - bytes_written, offset + bytes_written));
+    if (rv <= 0)
+      break;
+
+    bytes_written += rv;
+  } while (bytes_written < size);
+
+  return bytes_written ? bytes_written : rv;
+}
+
+int File::WriteAtCurrentPos(const char* data, int size) {
+  AssertBlockingAllowed();
+  DCHECK(IsValid());
+  if (size < 0)
+    return -1;
+
+  SCOPED_FILE_TRACE_WITH_SIZE("WriteAtCurrentPos", size);
+
+  int bytes_written = 0;
+  int rv;
+  do {
+    rv = HANDLE_EINTR(write(file_.get(), data + bytes_written,
+                            size - bytes_written));
+    if (rv <= 0)
+      break;
+
+    bytes_written += rv;
+  } while (bytes_written < size);
+
+  return bytes_written ? bytes_written : rv;
+}
+
+int File::WriteAtCurrentPosNoBestEffort(const char* data, int size) {
+  AssertBlockingAllowed();
+  DCHECK(IsValid());
+  if (size < 0)
+    return -1;
+
+  SCOPED_FILE_TRACE_WITH_SIZE("WriteAtCurrentPosNoBestEffort", size);
+  return HANDLE_EINTR(write(file_.get(), data, size));
+}
+
+int64_t File::GetLength() {
+  DCHECK(IsValid());
+
+  SCOPED_FILE_TRACE("GetLength");
+
+  stat_wrapper_t file_info;
+  if (CallFstat(file_.get(), &file_info))
+    return -1;
+
+  return file_info.st_size;
+}
+
+bool File::SetLength(int64_t length) {
+  AssertBlockingAllowed();
+  DCHECK(IsValid());
+
+  SCOPED_FILE_TRACE_WITH_SIZE("SetLength", length);
+  return !CallFtruncate(file_.get(), length);
+}
+
+bool File::SetTimes(Time last_access_time, Time last_modified_time) {
+  AssertBlockingAllowed();
+  DCHECK(IsValid());
+
+  SCOPED_FILE_TRACE("SetTimes");
+
+  timeval times[2];
+  times[0] = last_access_time.ToTimeVal();
+  times[1] = last_modified_time.ToTimeVal();
+
+  return !CallFutimes(file_.get(), times);
+}
+
+bool File::GetInfo(Info* info) {
+  DCHECK(IsValid());
+
+  SCOPED_FILE_TRACE("GetInfo");
+
+  stat_wrapper_t file_info;
+  if (CallFstat(file_.get(), &file_info))
+    return false;
+
+  info->FromStat(file_info);
+  return true;
+}
+
+#if !defined(OS_FUCHSIA)
+File::Error File::Lock() {
+  SCOPED_FILE_TRACE("Lock");
+  return CallFcntlFlock(file_.get(), true);
+}
+
+File::Error File::Unlock() {
+  SCOPED_FILE_TRACE("Unlock");
+  return CallFcntlFlock(file_.get(), false);
+}
+#endif
+
+File File::Duplicate() const {
+  if (!IsValid())
+    return File();
+
+  SCOPED_FILE_TRACE("Duplicate");
+
+  PlatformFile other_fd = HANDLE_EINTR(dup(GetPlatformFile()));
+  if (other_fd == -1)
+    return File(File::GetLastFileError());
+
+  File other(other_fd);
+  if (async())
+    other.async_ = true;
+  return other;
+}
+
+// Static.
+File::Error File::OSErrorToFileError(int saved_errno) {
+  switch (saved_errno) {
+    case EACCES:
+    case EISDIR:
+    case EROFS:
+    case EPERM:
+      return FILE_ERROR_ACCESS_DENIED;
+    case EBUSY:
+#if !defined(OS_NACL)  // ETXTBSY not defined by NaCl.
+    case ETXTBSY:
+#endif
+      return FILE_ERROR_IN_USE;
+    case EEXIST:
+      return FILE_ERROR_EXISTS;
+    case EIO:
+      return FILE_ERROR_IO;
+    case ENOENT:
+      return FILE_ERROR_NOT_FOUND;
+    case ENFILE:  // fallthrough
+    case EMFILE:
+      return FILE_ERROR_TOO_MANY_OPENED;
+    case ENOMEM:
+      return FILE_ERROR_NO_MEMORY;
+    case ENOSPC:
+      return FILE_ERROR_NO_SPACE;
+    case ENOTDIR:
+      return FILE_ERROR_NOT_A_DIRECTORY;
+    default:
+#if !defined(OS_NACL)  // NaCl build has no metrics code.
+      UmaHistogramSparse("PlatformFile.UnknownErrors.Posix", saved_errno);
+#endif
+      // This function should only be called for errors.
+      DCHECK_NE(0, saved_errno);
+      return FILE_ERROR_FAILED;
+  }
+}
+
+// NaCl doesn't implement system calls to open files directly.
+#if !defined(OS_NACL)
+// TODO(erikkay): does it make sense to support FLAG_EXCLUSIVE_* here?
+void File::DoInitialize(const FilePath& path, uint32_t flags) {
+  AssertBlockingAllowed();
+  DCHECK(!IsValid());
+
+  int open_flags = 0;
+  if (flags & FLAG_CREATE)
+    open_flags = O_CREAT | O_EXCL;
+
+  created_ = false;
+
+  if (flags & FLAG_CREATE_ALWAYS) {
+    DCHECK(!open_flags);
+    DCHECK(flags & FLAG_WRITE);
+    open_flags = O_CREAT | O_TRUNC;
+  }
+
+  if (flags & FLAG_OPEN_TRUNCATED) {
+    DCHECK(!open_flags);
+    DCHECK(flags & FLAG_WRITE);
+    open_flags = O_TRUNC;
+  }
+
+  if (!open_flags && !(flags & FLAG_OPEN) && !(flags & FLAG_OPEN_ALWAYS)) {
+    NOTREACHED();
+    errno = EOPNOTSUPP;
+    error_details_ = FILE_ERROR_FAILED;
+    return;
+  }
+
+  if (flags & FLAG_WRITE && flags & FLAG_READ) {
+    open_flags |= O_RDWR;
+  } else if (flags & FLAG_WRITE) {
+    open_flags |= O_WRONLY;
+  } else if (!(flags & FLAG_READ) &&
+             !(flags & FLAG_WRITE_ATTRIBUTES) &&
+             !(flags & FLAG_APPEND) &&
+             !(flags & FLAG_OPEN_ALWAYS)) {
+    NOTREACHED();
+  }
+
+  if (flags & FLAG_TERMINAL_DEVICE)
+    open_flags |= O_NOCTTY | O_NDELAY;
+
+  if (flags & FLAG_APPEND && flags & FLAG_READ)
+    open_flags |= O_APPEND | O_RDWR;
+  else if (flags & FLAG_APPEND)
+    open_flags |= O_APPEND | O_WRONLY;
+
+  static_assert(O_RDONLY == 0, "O_RDONLY must equal zero");
+
+  int mode = S_IRUSR | S_IWUSR;
+#if defined(OS_CHROMEOS)
+  mode |= S_IRGRP | S_IROTH;
+#endif
+
+  int descriptor = HANDLE_EINTR(open(path.value().c_str(), open_flags, mode));
+
+  if (flags & FLAG_OPEN_ALWAYS) {
+    if (descriptor < 0) {
+      open_flags |= O_CREAT;
+      if (flags & FLAG_EXCLUSIVE_READ || flags & FLAG_EXCLUSIVE_WRITE)
+        open_flags |= O_EXCL;   // together with O_CREAT implies O_NOFOLLOW
+
+      descriptor = HANDLE_EINTR(open(path.value().c_str(), open_flags, mode));
+      if (descriptor >= 0)
+        created_ = true;
+    }
+  }
+
+  if (descriptor < 0) {
+    error_details_ = File::GetLastFileError();
+    return;
+  }
+
+  if (flags & (FLAG_CREATE_ALWAYS | FLAG_CREATE))
+    created_ = true;
+
+  if (flags & FLAG_DELETE_ON_CLOSE)
+    unlink(path.value().c_str());
+
+  async_ = ((flags & FLAG_ASYNC) == FLAG_ASYNC);
+  error_details_ = FILE_OK;
+  file_.reset(descriptor);
+}
+#endif  // !defined(OS_NACL)
+
+bool File::Flush() {
+  AssertBlockingAllowed();
+  DCHECK(IsValid());
+  SCOPED_FILE_TRACE("Flush");
+
+#if defined(OS_NACL)
+  NOTIMPLEMENTED();  // NaCl doesn't implement fsync.
+  return true;
+#elif defined(OS_LINUX) || defined(OS_ANDROID)
+  return !HANDLE_EINTR(fdatasync(file_.get()));
+#else
+  return !HANDLE_EINTR(fsync(file_.get()));
+#endif
+}
+
+void File::SetPlatformFile(PlatformFile file) {
+  DCHECK(!file_.is_valid());
+  file_.reset(file);
+}
+
+// static
+File::Error File::GetLastFileError() {
+  return base::File::OSErrorToFileError(errno);
+}
+
+}  // namespace base
diff --git a/base/files/file_proxy.cc b/base/files/file_proxy.cc
new file mode 100644
index 0000000..f16e594
--- /dev/null
+++ b/base/files/file_proxy.cc
@@ -0,0 +1,358 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/files/file_proxy.h"
+
+#include <utility>
+
+#include "base/bind.h"
+#include "base/bind_helpers.h"
+#include "base/files/file.h"
+#include "base/files/file_util.h"
+#include "base/location.h"
+#include "base/macros.h"
+#include "base/task_runner.h"
+#include "base/task_runner_util.h"
+
+namespace {
+
+void FileDeleter(base::File file) {
+}
+
+}  // namespace
+
+namespace base {
+
+class FileHelper {
+ public:
+   FileHelper(FileProxy* proxy, File file)
+      : file_(std::move(file)),
+        error_(File::FILE_ERROR_FAILED),
+        task_runner_(proxy->task_runner()),
+        proxy_(AsWeakPtr(proxy)) {
+   }
+
+   void PassFile() {
+     if (proxy_)
+       proxy_->SetFile(std::move(file_));
+     else if (file_.IsValid())
+       task_runner_->PostTask(FROM_HERE,
+                              BindOnce(&FileDeleter, std::move(file_)));
+   }
+
+ protected:
+  File file_;
+  File::Error error_;
+
+ private:
+  scoped_refptr<TaskRunner> task_runner_;
+  WeakPtr<FileProxy> proxy_;
+  DISALLOW_COPY_AND_ASSIGN(FileHelper);
+};
+
+namespace {
+
+class GenericFileHelper : public FileHelper {
+ public:
+  GenericFileHelper(FileProxy* proxy, File file)
+      : FileHelper(proxy, std::move(file)) {
+  }
+
+  void Close() {
+    file_.Close();
+    error_ = File::FILE_OK;
+  }
+
+  void SetTimes(Time last_access_time, Time last_modified_time) {
+    bool rv = file_.SetTimes(last_access_time, last_modified_time);
+    error_ = rv ? File::FILE_OK : File::FILE_ERROR_FAILED;
+  }
+
+  void SetLength(int64_t length) {
+    if (file_.SetLength(length))
+      error_ = File::FILE_OK;
+  }
+
+  void Flush() {
+    if (file_.Flush())
+      error_ = File::FILE_OK;
+  }
+
+  void Reply(FileProxy::StatusCallback callback) {
+    PassFile();
+    if (!callback.is_null())
+      std::move(callback).Run(error_);
+  }
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(GenericFileHelper);
+};
+
+class CreateOrOpenHelper : public FileHelper {
+ public:
+  CreateOrOpenHelper(FileProxy* proxy, File file)
+      : FileHelper(proxy, std::move(file)) {
+  }
+
+  void RunWork(const FilePath& file_path, int file_flags) {
+    file_.Initialize(file_path, file_flags);
+    error_ = file_.IsValid() ? File::FILE_OK : file_.error_details();
+  }
+
+  void Reply(FileProxy::StatusCallback callback) {
+    DCHECK(!callback.is_null());
+    PassFile();
+    std::move(callback).Run(error_);
+  }
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(CreateOrOpenHelper);
+};
+
+class CreateTemporaryHelper : public FileHelper {
+ public:
+  CreateTemporaryHelper(FileProxy* proxy, File file)
+      : FileHelper(proxy, std::move(file)) {
+  }
+
+  void RunWork(uint32_t additional_file_flags) {
+    // TODO(darin): file_util should have a variant of CreateTemporaryFile
+    // that returns a FilePath and a File.
+    if (!CreateTemporaryFile(&file_path_)) {
+      // TODO(davidben): base::CreateTemporaryFile should preserve the error
+      // code.
+      error_ = File::FILE_ERROR_FAILED;
+      return;
+    }
+
+    uint32_t file_flags = File::FLAG_WRITE | File::FLAG_TEMPORARY |
+                          File::FLAG_CREATE_ALWAYS | additional_file_flags;
+
+    file_.Initialize(file_path_, file_flags);
+    if (file_.IsValid()) {
+      error_ = File::FILE_OK;
+    } else {
+      error_ = file_.error_details();
+      DeleteFile(file_path_, false);
+      file_path_.clear();
+    }
+  }
+
+  void Reply(FileProxy::CreateTemporaryCallback callback) {
+    DCHECK(!callback.is_null());
+    PassFile();
+    std::move(callback).Run(error_, file_path_);
+  }
+
+ private:
+  FilePath file_path_;
+  DISALLOW_COPY_AND_ASSIGN(CreateTemporaryHelper);
+};
+
+class GetInfoHelper : public FileHelper {
+ public:
+  GetInfoHelper(FileProxy* proxy, File file)
+      : FileHelper(proxy, std::move(file)) {
+  }
+
+  void RunWork() {
+    if (file_.GetInfo(&file_info_))
+      error_  = File::FILE_OK;
+  }
+
+  void Reply(FileProxy::GetFileInfoCallback callback) {
+    PassFile();
+    DCHECK(!callback.is_null());
+    std::move(callback).Run(error_, file_info_);
+  }
+
+ private:
+  File::Info file_info_;
+  DISALLOW_COPY_AND_ASSIGN(GetInfoHelper);
+};
+
+class ReadHelper : public FileHelper {
+ public:
+  ReadHelper(FileProxy* proxy, File file, int bytes_to_read)
+      : FileHelper(proxy, std::move(file)),
+        buffer_(new char[bytes_to_read]),
+        bytes_to_read_(bytes_to_read),
+        bytes_read_(0) {
+  }
+
+  void RunWork(int64_t offset) {
+    bytes_read_ = file_.Read(offset, buffer_.get(), bytes_to_read_);
+    error_ = (bytes_read_ < 0) ? File::FILE_ERROR_FAILED : File::FILE_OK;
+  }
+
+  void Reply(FileProxy::ReadCallback callback) {
+    PassFile();
+    DCHECK(!callback.is_null());
+    std::move(callback).Run(error_, buffer_.get(), bytes_read_);
+  }
+
+ private:
+  std::unique_ptr<char[]> buffer_;
+  int bytes_to_read_;
+  int bytes_read_;
+  DISALLOW_COPY_AND_ASSIGN(ReadHelper);
+};
+
+class WriteHelper : public FileHelper {
+ public:
+  WriteHelper(FileProxy* proxy,
+              File file,
+              const char* buffer, int bytes_to_write)
+      : FileHelper(proxy, std::move(file)),
+        buffer_(new char[bytes_to_write]),
+        bytes_to_write_(bytes_to_write),
+        bytes_written_(0) {
+    memcpy(buffer_.get(), buffer, bytes_to_write);
+  }
+
+  void RunWork(int64_t offset) {
+    bytes_written_ = file_.Write(offset, buffer_.get(), bytes_to_write_);
+    error_ = (bytes_written_ < 0) ? File::FILE_ERROR_FAILED : File::FILE_OK;
+  }
+
+  void Reply(FileProxy::WriteCallback callback) {
+    PassFile();
+    if (!callback.is_null())
+      std::move(callback).Run(error_, bytes_written_);
+  }
+
+ private:
+  std::unique_ptr<char[]> buffer_;
+  int bytes_to_write_;
+  int bytes_written_;
+  DISALLOW_COPY_AND_ASSIGN(WriteHelper);
+};
+
+}  // namespace
+
+FileProxy::FileProxy(TaskRunner* task_runner) : task_runner_(task_runner) {
+}
+
+FileProxy::~FileProxy() {
+  if (file_.IsValid())
+    task_runner_->PostTask(FROM_HERE, BindOnce(&FileDeleter, std::move(file_)));
+}
+
+bool FileProxy::CreateOrOpen(const FilePath& file_path,
+                             uint32_t file_flags,
+                             StatusCallback callback) {
+  DCHECK(!file_.IsValid());
+  CreateOrOpenHelper* helper = new CreateOrOpenHelper(this, File());
+  return task_runner_->PostTaskAndReply(
+      FROM_HERE,
+      BindOnce(&CreateOrOpenHelper::RunWork, Unretained(helper), file_path,
+               file_flags),
+      BindOnce(&CreateOrOpenHelper::Reply, Owned(helper), std::move(callback)));
+}
+
+bool FileProxy::CreateTemporary(uint32_t additional_file_flags,
+                                CreateTemporaryCallback callback) {
+  DCHECK(!file_.IsValid());
+  CreateTemporaryHelper* helper = new CreateTemporaryHelper(this, File());
+  return task_runner_->PostTaskAndReply(
+      FROM_HERE,
+      BindOnce(&CreateTemporaryHelper::RunWork, Unretained(helper),
+               additional_file_flags),
+      BindOnce(&CreateTemporaryHelper::Reply, Owned(helper),
+               std::move(callback)));
+}
+
+bool FileProxy::IsValid() const {
+  return file_.IsValid();
+}
+
+void FileProxy::SetFile(File file) {
+  DCHECK(!file_.IsValid());
+  file_ = std::move(file);
+}
+
+File FileProxy::TakeFile() {
+  return std::move(file_);
+}
+
+File FileProxy::DuplicateFile() {
+  return file_.Duplicate();
+}
+
+PlatformFile FileProxy::GetPlatformFile() const {
+  return file_.GetPlatformFile();
+}
+
+bool FileProxy::Close(StatusCallback callback) {
+  DCHECK(file_.IsValid());
+  GenericFileHelper* helper = new GenericFileHelper(this, std::move(file_));
+  return task_runner_->PostTaskAndReply(
+      FROM_HERE, BindOnce(&GenericFileHelper::Close, Unretained(helper)),
+      BindOnce(&GenericFileHelper::Reply, Owned(helper), std::move(callback)));
+}
+
+bool FileProxy::GetInfo(GetFileInfoCallback callback) {
+  DCHECK(file_.IsValid());
+  GetInfoHelper* helper = new GetInfoHelper(this, std::move(file_));
+  return task_runner_->PostTaskAndReply(
+      FROM_HERE, BindOnce(&GetInfoHelper::RunWork, Unretained(helper)),
+      BindOnce(&GetInfoHelper::Reply, Owned(helper), std::move(callback)));
+}
+
+bool FileProxy::Read(int64_t offset, int bytes_to_read, ReadCallback callback) {
+  DCHECK(file_.IsValid());
+  if (bytes_to_read < 0)
+    return false;
+
+  ReadHelper* helper = new ReadHelper(this, std::move(file_), bytes_to_read);
+  return task_runner_->PostTaskAndReply(
+      FROM_HERE, BindOnce(&ReadHelper::RunWork, Unretained(helper), offset),
+      BindOnce(&ReadHelper::Reply, Owned(helper), std::move(callback)));
+}
+
+bool FileProxy::Write(int64_t offset,
+                      const char* buffer,
+                      int bytes_to_write,
+                      WriteCallback callback) {
+  DCHECK(file_.IsValid());
+  if (bytes_to_write <= 0 || buffer == nullptr)
+    return false;
+
+  WriteHelper* helper =
+      new WriteHelper(this, std::move(file_), buffer, bytes_to_write);
+  return task_runner_->PostTaskAndReply(
+      FROM_HERE, BindOnce(&WriteHelper::RunWork, Unretained(helper), offset),
+      BindOnce(&WriteHelper::Reply, Owned(helper), std::move(callback)));
+}
+
+bool FileProxy::SetTimes(Time last_access_time,
+                         Time last_modified_time,
+                         StatusCallback callback) {
+  DCHECK(file_.IsValid());
+  GenericFileHelper* helper = new GenericFileHelper(this, std::move(file_));
+  return task_runner_->PostTaskAndReply(
+      FROM_HERE,
+      BindOnce(&GenericFileHelper::SetTimes, Unretained(helper),
+               last_access_time, last_modified_time),
+      BindOnce(&GenericFileHelper::Reply, Owned(helper), std::move(callback)));
+}
+
+bool FileProxy::SetLength(int64_t length, StatusCallback callback) {
+  DCHECK(file_.IsValid());
+  GenericFileHelper* helper = new GenericFileHelper(this, std::move(file_));
+  return task_runner_->PostTaskAndReply(
+      FROM_HERE,
+      BindOnce(&GenericFileHelper::SetLength, Unretained(helper), length),
+      BindOnce(&GenericFileHelper::Reply, Owned(helper), std::move(callback)));
+}
+
+bool FileProxy::Flush(StatusCallback callback) {
+  DCHECK(file_.IsValid());
+  GenericFileHelper* helper = new GenericFileHelper(this, std::move(file_));
+  return task_runner_->PostTaskAndReply(
+      FROM_HERE, BindOnce(&GenericFileHelper::Flush, Unretained(helper)),
+      BindOnce(&GenericFileHelper::Reply, Owned(helper), std::move(callback)));
+}
+
+}  // namespace base
diff --git a/base/files/file_proxy.h b/base/files/file_proxy.h
new file mode 100644
index 0000000..d17e4d3
--- /dev/null
+++ b/base/files/file_proxy.h
@@ -0,0 +1,142 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_FILES_FILE_PROXY_H_
+#define BASE_FILES_FILE_PROXY_H_
+
+#include <stdint.h>
+
+#include "base/base_export.h"
+#include "base/callback_forward.h"
+#include "base/files/file.h"
+#include "base/files/file_path.h"
+#include "base/macros.h"
+#include "base/memory/ref_counted.h"
+#include "base/memory/weak_ptr.h"
+
+namespace base {
+
+class TaskRunner;
+class Time;
+
+// This class provides asynchronous access to a File. All methods follow the
+// same rules of the equivalent File method, as they are implemented by bouncing
+// the operation to File using a TaskRunner.
+//
+// This class performs automatic proxying to close the underlying file at
+// destruction.
+//
+// The TaskRunner is in charge of any sequencing of the operations, but a single
+// operation can be proxied at a time, regardless of the use of a callback.
+// In other words, having a sequence like
+//
+//   proxy.Write(...);
+//   proxy.Write(...);
+//
+// means the second Write will always fail.
+class BASE_EXPORT FileProxy : public SupportsWeakPtr<FileProxy> {
+ public:
+  // This callback is used by methods that report only an error code. It is
+  // valid to pass a null callback to some functions that takes a
+  // StatusCallback, in which case the operation will complete silently.
+  using StatusCallback = OnceCallback<void(File::Error)>;
+  using CreateTemporaryCallback =
+      OnceCallback<void(File::Error, const FilePath&)>;
+  using GetFileInfoCallback =
+      OnceCallback<void(File::Error, const File::Info&)>;
+  using ReadCallback =
+      OnceCallback<void(File::Error, const char* data, int bytes_read)>;
+  using WriteCallback = OnceCallback<void(File::Error, int bytes_written)>;
+
+  FileProxy();
+  explicit FileProxy(TaskRunner* task_runner);
+  ~FileProxy();
+
+  // Creates or opens a file with the given flags. It is invalid to pass a null
+  // callback. If File::FLAG_CREATE is set in |file_flags| it always tries to
+  // create a new file at the given |file_path| and fails if the file already
+  // exists.
+  //
+  // This returns false if task posting to |task_runner| has failed.
+  bool CreateOrOpen(const FilePath& file_path,
+                    uint32_t file_flags,
+                    StatusCallback callback);
+
+  // Creates a temporary file for writing. The path and an open file are
+  // returned. It is invalid to pass a null callback. The additional file flags
+  // will be added on top of the default file flags which are:
+  //   File::FLAG_CREATE_ALWAYS
+  //   File::FLAG_WRITE
+  //   File::FLAG_TEMPORARY.
+  //
+  // This returns false if task posting to |task_runner| has failed.
+  bool CreateTemporary(uint32_t additional_file_flags,
+                       CreateTemporaryCallback callback);
+
+  // Returns true if the underlying |file_| is valid.
+  bool IsValid() const;
+
+  // Returns true if a new file was created (or an old one truncated to zero
+  // length to simulate a new file), and false otherwise.
+  bool created() const { return file_.created(); }
+
+  // Claims ownership of |file|. It is an error to call this method when
+  // IsValid() returns true.
+  void SetFile(File file);
+
+  File TakeFile();
+
+  // Returns a new File object that is a duplicate of the underlying |file_|.
+  // See the comment at File::Duplicate for caveats.
+  File DuplicateFile();
+
+  PlatformFile GetPlatformFile() const;
+
+  // Proxies File::Close. The callback can be null.
+  // This returns false if task posting to |task_runner| has failed.
+  bool Close(StatusCallback callback);
+
+  // Proxies File::GetInfo. The callback can't be null.
+  // This returns false if task posting to |task_runner| has failed.
+  bool GetInfo(GetFileInfoCallback callback);
+
+  // Proxies File::Read. The callback can't be null.
+  // This returns false if |bytes_to_read| is less than zero, or
+  // if task posting to |task_runner| has failed.
+  bool Read(int64_t offset, int bytes_to_read, ReadCallback callback);
+
+  // Proxies File::Write. The callback can be null.
+  // This returns false if |bytes_to_write| is less than or equal to zero,
+  // if |buffer| is NULL, or if task posting to |task_runner| has failed.
+  bool Write(int64_t offset,
+             const char* buffer,
+             int bytes_to_write,
+             WriteCallback callback);
+
+  // Proxies File::SetTimes. The callback can be null.
+  // This returns false if task posting to |task_runner| has failed.
+  bool SetTimes(Time last_access_time,
+                Time last_modified_time,
+                StatusCallback callback);
+
+  // Proxies File::SetLength. The callback can be null.
+  // This returns false if task posting to |task_runner| has failed.
+  bool SetLength(int64_t length, StatusCallback callback);
+
+  // Proxies File::Flush. The callback can be null.
+  // This returns false if task posting to |task_runner| has failed.
+  bool Flush(StatusCallback callback);
+
+ private:
+  friend class FileHelper;
+  TaskRunner* task_runner() { return task_runner_.get(); }
+
+  scoped_refptr<TaskRunner> task_runner_;
+  File file_;
+  DISALLOW_COPY_AND_ASSIGN(FileProxy);
+};
+
+}  // namespace base
+
+#endif  // BASE_FILES_FILE_PROXY_H_
diff --git a/base/files/file_proxy_unittest.cc b/base/files/file_proxy_unittest.cc
new file mode 100644
index 0000000..20bb489
--- /dev/null
+++ b/base/files/file_proxy_unittest.cc
@@ -0,0 +1,399 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/files/file_proxy.h"
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <utility>
+
+#include "base/bind.h"
+#include "base/files/file.h"
+#include "base/files/file_util.h"
+#include "base/files/scoped_temp_dir.h"
+#include "base/macros.h"
+#include "base/memory/weak_ptr.h"
+#include "base/message_loop/message_loop.h"
+#include "base/run_loop.h"
+#include "base/threading/thread.h"
+#include "base/threading/thread_restrictions.h"
+#include "build/build_config.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+
+class FileProxyTest : public testing::Test {
+ public:
+  FileProxyTest()
+      : file_thread_("FileProxyTestFileThread"),
+        error_(File::FILE_OK),
+        bytes_written_(-1),
+        weak_factory_(this) {}
+
+  void SetUp() override {
+    ASSERT_TRUE(dir_.CreateUniqueTempDir());
+    ASSERT_TRUE(file_thread_.Start());
+  }
+
+  void DidFinish(File::Error error) {
+    error_ = error;
+    RunLoop::QuitCurrentWhenIdleDeprecated();
+  }
+
+  void DidCreateOrOpen(File::Error error) {
+    error_ = error;
+    RunLoop::QuitCurrentWhenIdleDeprecated();
+  }
+
+  void DidCreateTemporary(File::Error error,
+                          const FilePath& path) {
+    error_ = error;
+    path_ = path;
+    RunLoop::QuitCurrentWhenIdleDeprecated();
+  }
+
+  void DidGetFileInfo(File::Error error,
+                      const File::Info& file_info) {
+    error_ = error;
+    file_info_ = file_info;
+    RunLoop::QuitCurrentWhenIdleDeprecated();
+  }
+
+  void DidRead(File::Error error,
+               const char* data,
+               int bytes_read) {
+    error_ = error;
+    buffer_.resize(bytes_read);
+    memcpy(&buffer_[0], data, bytes_read);
+    RunLoop::QuitCurrentWhenIdleDeprecated();
+  }
+
+  void DidWrite(File::Error error,
+                int bytes_written) {
+    error_ = error;
+    bytes_written_ = bytes_written;
+    RunLoop::QuitCurrentWhenIdleDeprecated();
+  }
+
+ protected:
+  void CreateProxy(uint32_t flags, FileProxy* proxy) {
+    proxy->CreateOrOpen(
+        TestPath(), flags,
+        BindOnce(&FileProxyTest::DidCreateOrOpen, weak_factory_.GetWeakPtr()));
+    RunLoop().Run();
+    EXPECT_TRUE(proxy->IsValid());
+  }
+
+  TaskRunner* file_task_runner() const {
+    return file_thread_.task_runner().get();
+  }
+  const FilePath& TestDirPath() const { return dir_.GetPath(); }
+  const FilePath TestPath() const { return dir_.GetPath().AppendASCII("test"); }
+
+  ScopedTempDir dir_;
+  MessageLoopForIO message_loop_;
+  Thread file_thread_;
+
+  File::Error error_;
+  FilePath path_;
+  File::Info file_info_;
+  std::vector<char> buffer_;
+  int bytes_written_;
+  WeakPtrFactory<FileProxyTest> weak_factory_;
+};
+
+TEST_F(FileProxyTest, CreateOrOpen_Create) {
+  FileProxy proxy(file_task_runner());
+  proxy.CreateOrOpen(
+      TestPath(), File::FLAG_CREATE | File::FLAG_READ,
+      BindOnce(&FileProxyTest::DidCreateOrOpen, weak_factory_.GetWeakPtr()));
+  RunLoop().Run();
+
+  EXPECT_EQ(File::FILE_OK, error_);
+  EXPECT_TRUE(proxy.IsValid());
+  EXPECT_TRUE(proxy.created());
+  EXPECT_TRUE(PathExists(TestPath()));
+}
+
+TEST_F(FileProxyTest, CreateOrOpen_Open) {
+  // Creates a file.
+  base::WriteFile(TestPath(), nullptr, 0);
+  ASSERT_TRUE(PathExists(TestPath()));
+
+  // Opens the created file.
+  FileProxy proxy(file_task_runner());
+  proxy.CreateOrOpen(
+      TestPath(), File::FLAG_OPEN | File::FLAG_READ,
+      BindOnce(&FileProxyTest::DidCreateOrOpen, weak_factory_.GetWeakPtr()));
+  RunLoop().Run();
+
+  EXPECT_EQ(File::FILE_OK, error_);
+  EXPECT_TRUE(proxy.IsValid());
+  EXPECT_FALSE(proxy.created());
+}
+
+TEST_F(FileProxyTest, CreateOrOpen_OpenNonExistent) {
+  FileProxy proxy(file_task_runner());
+  proxy.CreateOrOpen(
+      TestPath(), File::FLAG_OPEN | File::FLAG_READ,
+      BindOnce(&FileProxyTest::DidCreateOrOpen, weak_factory_.GetWeakPtr()));
+  RunLoop().Run();
+  EXPECT_EQ(File::FILE_ERROR_NOT_FOUND, error_);
+  EXPECT_FALSE(proxy.IsValid());
+  EXPECT_FALSE(proxy.created());
+  EXPECT_FALSE(PathExists(TestPath()));
+}
+
+TEST_F(FileProxyTest, CreateOrOpen_AbandonedCreate) {
+  bool prev = ThreadRestrictions::SetIOAllowed(false);
+  {
+    FileProxy proxy(file_task_runner());
+    proxy.CreateOrOpen(
+        TestPath(), File::FLAG_CREATE | File::FLAG_READ,
+        BindOnce(&FileProxyTest::DidCreateOrOpen, weak_factory_.GetWeakPtr()));
+  }
+  RunLoop().Run();
+  ThreadRestrictions::SetIOAllowed(prev);
+
+  EXPECT_TRUE(PathExists(TestPath()));
+}
+
+TEST_F(FileProxyTest, Close) {
+  // Creates a file.
+  FileProxy proxy(file_task_runner());
+  CreateProxy(File::FLAG_CREATE | File::FLAG_WRITE, &proxy);
+
+#if defined(OS_WIN)
+  // This fails on Windows if the file is not closed.
+  EXPECT_FALSE(base::Move(TestPath(), TestDirPath().AppendASCII("new")));
+#endif
+
+  proxy.Close(BindOnce(&FileProxyTest::DidFinish, weak_factory_.GetWeakPtr()));
+  RunLoop().Run();
+  EXPECT_EQ(File::FILE_OK, error_);
+  EXPECT_FALSE(proxy.IsValid());
+
+  // Now it should pass on all platforms.
+  EXPECT_TRUE(base::Move(TestPath(), TestDirPath().AppendASCII("new")));
+}
+
+TEST_F(FileProxyTest, CreateTemporary) {
+  {
+    FileProxy proxy(file_task_runner());
+    proxy.CreateTemporary(0 /* additional_file_flags */,
+                          BindOnce(&FileProxyTest::DidCreateTemporary,
+                                   weak_factory_.GetWeakPtr()));
+    RunLoop().Run();
+
+    EXPECT_TRUE(proxy.IsValid());
+    EXPECT_EQ(File::FILE_OK, error_);
+    EXPECT_TRUE(PathExists(path_));
+
+    // The file should be writable.
+    proxy.Write(0, "test", 4,
+                BindOnce(&FileProxyTest::DidWrite, weak_factory_.GetWeakPtr()));
+    RunLoop().Run();
+    EXPECT_EQ(File::FILE_OK, error_);
+    EXPECT_EQ(4, bytes_written_);
+  }
+
+  // Make sure the written data can be read from the returned path.
+  std::string data;
+  EXPECT_TRUE(ReadFileToString(path_, &data));
+  EXPECT_EQ("test", data);
+
+  // Make sure we can & do delete the created file to prevent leaks on the bots.
+  EXPECT_TRUE(base::DeleteFile(path_, false));
+}
+
+TEST_F(FileProxyTest, SetAndTake) {
+  File file(TestPath(), File::FLAG_CREATE | File::FLAG_READ);
+  ASSERT_TRUE(file.IsValid());
+  FileProxy proxy(file_task_runner());
+  EXPECT_FALSE(proxy.IsValid());
+  proxy.SetFile(std::move(file));
+  EXPECT_TRUE(proxy.IsValid());
+  EXPECT_FALSE(file.IsValid());
+
+  file = proxy.TakeFile();
+  EXPECT_FALSE(proxy.IsValid());
+  EXPECT_TRUE(file.IsValid());
+}
+
+TEST_F(FileProxyTest, DuplicateFile) {
+  FileProxy proxy(file_task_runner());
+  CreateProxy(File::FLAG_CREATE | File::FLAG_WRITE, &proxy);
+  ASSERT_TRUE(proxy.IsValid());
+
+  base::File duplicate = proxy.DuplicateFile();
+  EXPECT_TRUE(proxy.IsValid());
+  EXPECT_TRUE(duplicate.IsValid());
+
+  FileProxy invalid_proxy(file_task_runner());
+  ASSERT_FALSE(invalid_proxy.IsValid());
+
+  base::File invalid_duplicate = invalid_proxy.DuplicateFile();
+  EXPECT_FALSE(invalid_proxy.IsValid());
+  EXPECT_FALSE(invalid_duplicate.IsValid());
+}
+
+TEST_F(FileProxyTest, GetInfo) {
+  // Setup.
+  ASSERT_EQ(4, base::WriteFile(TestPath(), "test", 4));
+  File::Info expected_info;
+  GetFileInfo(TestPath(), &expected_info);
+
+  // Run.
+  FileProxy proxy(file_task_runner());
+  CreateProxy(File::FLAG_OPEN | File::FLAG_READ, &proxy);
+  proxy.GetInfo(
+      BindOnce(&FileProxyTest::DidGetFileInfo, weak_factory_.GetWeakPtr()));
+  RunLoop().Run();
+
+  // Verify.
+  EXPECT_EQ(File::FILE_OK, error_);
+  EXPECT_EQ(expected_info.size, file_info_.size);
+  EXPECT_EQ(expected_info.is_directory, file_info_.is_directory);
+  EXPECT_EQ(expected_info.is_symbolic_link, file_info_.is_symbolic_link);
+  EXPECT_EQ(expected_info.last_modified, file_info_.last_modified);
+  EXPECT_EQ(expected_info.creation_time, file_info_.creation_time);
+}
+
+TEST_F(FileProxyTest, Read) {
+  // Setup.
+  const char expected_data[] = "bleh";
+  int expected_bytes = arraysize(expected_data);
+  ASSERT_EQ(expected_bytes,
+            base::WriteFile(TestPath(), expected_data, expected_bytes));
+
+  // Run.
+  FileProxy proxy(file_task_runner());
+  CreateProxy(File::FLAG_OPEN | File::FLAG_READ, &proxy);
+
+  proxy.Read(0, 128,
+             BindOnce(&FileProxyTest::DidRead, weak_factory_.GetWeakPtr()));
+  RunLoop().Run();
+
+  // Verify.
+  EXPECT_EQ(File::FILE_OK, error_);
+  EXPECT_EQ(expected_bytes, static_cast<int>(buffer_.size()));
+  for (size_t i = 0; i < buffer_.size(); ++i) {
+    EXPECT_EQ(expected_data[i], buffer_[i]);
+  }
+}
+
+TEST_F(FileProxyTest, WriteAndFlush) {
+  FileProxy proxy(file_task_runner());
+  CreateProxy(File::FLAG_CREATE | File::FLAG_WRITE, &proxy);
+
+  const char data[] = "foo!";
+  int data_bytes = arraysize(data);
+  proxy.Write(0, data, data_bytes,
+              BindOnce(&FileProxyTest::DidWrite, weak_factory_.GetWeakPtr()));
+  RunLoop().Run();
+  EXPECT_EQ(File::FILE_OK, error_);
+  EXPECT_EQ(data_bytes, bytes_written_);
+
+  // Flush the written data.  (So that the following read should always
+  // succeed.  On some platforms it may work with or without this flush.)
+  proxy.Flush(BindOnce(&FileProxyTest::DidFinish, weak_factory_.GetWeakPtr()));
+  RunLoop().Run();
+  EXPECT_EQ(File::FILE_OK, error_);
+
+  // Verify the written data.
+  char buffer[10];
+  EXPECT_EQ(data_bytes, base::ReadFile(TestPath(), buffer, data_bytes));
+  for (int i = 0; i < data_bytes; ++i) {
+    EXPECT_EQ(data[i], buffer[i]);
+  }
+}
+
+#if defined(OS_ANDROID)
+// Flaky on Android, see http://crbug.com/489602
+#define MAYBE_SetTimes DISABLED_SetTimes
+#else
+#define MAYBE_SetTimes SetTimes
+#endif
+TEST_F(FileProxyTest, MAYBE_SetTimes) {
+  FileProxy proxy(file_task_runner());
+  CreateProxy(
+      File::FLAG_CREATE | File::FLAG_WRITE | File::FLAG_WRITE_ATTRIBUTES,
+      &proxy);
+
+  Time last_accessed_time = Time::Now() - TimeDelta::FromDays(12345);
+  Time last_modified_time = Time::Now() - TimeDelta::FromHours(98765);
+
+  proxy.SetTimes(
+      last_accessed_time, last_modified_time,
+      BindOnce(&FileProxyTest::DidFinish, weak_factory_.GetWeakPtr()));
+  RunLoop().Run();
+  EXPECT_EQ(File::FILE_OK, error_);
+
+  File::Info info;
+  GetFileInfo(TestPath(), &info);
+
+  // The returned values may only have the seconds precision, so we cast
+  // the double values to int here.
+  EXPECT_EQ(static_cast<int>(last_modified_time.ToDoubleT()),
+            static_cast<int>(info.last_modified.ToDoubleT()));
+  EXPECT_EQ(static_cast<int>(last_accessed_time.ToDoubleT()),
+            static_cast<int>(info.last_accessed.ToDoubleT()));
+}
+
+TEST_F(FileProxyTest, SetLength_Shrink) {
+  // Setup.
+  const char kTestData[] = "0123456789";
+  ASSERT_EQ(10, base::WriteFile(TestPath(), kTestData, 10));
+  File::Info info;
+  GetFileInfo(TestPath(), &info);
+  ASSERT_EQ(10, info.size);
+
+  // Run.
+  FileProxy proxy(file_task_runner());
+  CreateProxy(File::FLAG_OPEN | File::FLAG_WRITE, &proxy);
+  proxy.SetLength(
+      7, BindOnce(&FileProxyTest::DidFinish, weak_factory_.GetWeakPtr()));
+  RunLoop().Run();
+
+  // Verify.
+  GetFileInfo(TestPath(), &info);
+  ASSERT_EQ(7, info.size);
+
+  char buffer[7];
+  EXPECT_EQ(7, base::ReadFile(TestPath(), buffer, 7));
+  int i = 0;
+  for (; i < 7; ++i)
+    EXPECT_EQ(kTestData[i], buffer[i]);
+}
+
+TEST_F(FileProxyTest, SetLength_Expand) {
+  // Setup.
+  const char kTestData[] = "9876543210";
+  ASSERT_EQ(10, base::WriteFile(TestPath(), kTestData, 10));
+  File::Info info;
+  GetFileInfo(TestPath(), &info);
+  ASSERT_EQ(10, info.size);
+
+  // Run.
+  FileProxy proxy(file_task_runner());
+  CreateProxy(File::FLAG_OPEN | File::FLAG_WRITE, &proxy);
+  proxy.SetLength(
+      53, BindOnce(&FileProxyTest::DidFinish, weak_factory_.GetWeakPtr()));
+  RunLoop().Run();
+
+  // Verify.
+  GetFileInfo(TestPath(), &info);
+  ASSERT_EQ(53, info.size);
+
+  char buffer[53];
+  EXPECT_EQ(53, base::ReadFile(TestPath(), buffer, 53));
+  int i = 0;
+  for (; i < 10; ++i)
+    EXPECT_EQ(kTestData[i], buffer[i]);
+  for (; i < 53; ++i)
+    EXPECT_EQ(0, buffer[i]);
+}
+
+}  // namespace base
diff --git a/base/files/file_tracing.cc b/base/files/file_tracing.cc
new file mode 100644
index 0000000..48f5741
--- /dev/null
+++ b/base/files/file_tracing.cc
@@ -0,0 +1,65 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/files/file_tracing.h"
+
+#include "base/atomicops.h"
+#include "base/files/file.h"
+
+using base::subtle::AtomicWord;
+
+namespace base {
+
+namespace {
+AtomicWord g_provider;
+}
+
+FileTracing::Provider* GetProvider() {
+  AtomicWord provider = base::subtle::Acquire_Load(&g_provider);
+  return reinterpret_cast<FileTracing::Provider*>(provider);
+}
+
+// static
+bool FileTracing::IsCategoryEnabled() {
+  FileTracing::Provider* provider = GetProvider();
+  return provider && provider->FileTracingCategoryIsEnabled();
+}
+
+// static
+void FileTracing::SetProvider(FileTracing::Provider* provider) {
+  base::subtle::Release_Store(&g_provider,
+                              reinterpret_cast<AtomicWord>(provider));
+}
+
+FileTracing::ScopedEnabler::ScopedEnabler() {
+  FileTracing::Provider* provider = GetProvider();
+  if (provider)
+    provider->FileTracingEnable(this);
+}
+
+FileTracing::ScopedEnabler::~ScopedEnabler() {
+  FileTracing::Provider* provider = GetProvider();
+  if (provider)
+    provider->FileTracingDisable(this);
+}
+
+FileTracing::ScopedTrace::ScopedTrace() : id_(nullptr) {}
+
+FileTracing::ScopedTrace::~ScopedTrace() {
+  if (id_) {
+    FileTracing::Provider* provider = GetProvider();
+    if (provider)
+      provider->FileTracingEventEnd(name_, id_);
+  }
+}
+
+void FileTracing::ScopedTrace::Initialize(const char* name,
+                                          const File* file,
+                                          int64_t size) {
+  id_ = &file->trace_enabler_;
+  name_ = name;
+  GetProvider()->FileTracingEventBegin(name_, id_, file->tracing_path_, size);
+}
+
+}  // namespace base
diff --git a/base/files/file_tracing.h b/base/files/file_tracing.h
new file mode 100644
index 0000000..1fbfcd4
--- /dev/null
+++ b/base/files/file_tracing.h
@@ -0,0 +1,95 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_FILES_FILE_TRACING_H_
+#define BASE_FILES_FILE_TRACING_H_
+
+#include <stdint.h>
+
+#include "base/base_export.h"
+#include "base/macros.h"
+
+#define FILE_TRACING_PREFIX "File"
+
+#define SCOPED_FILE_TRACE_WITH_SIZE(name, size) \
+    FileTracing::ScopedTrace scoped_file_trace; \
+    if (FileTracing::IsCategoryEnabled()) \
+      scoped_file_trace.Initialize(FILE_TRACING_PREFIX "::" name, this, size)
+
+#define SCOPED_FILE_TRACE(name) SCOPED_FILE_TRACE_WITH_SIZE(name, 0)
+
+namespace base {
+
+class File;
+class FilePath;
+
+class BASE_EXPORT FileTracing {
+ public:
+  // Whether the file tracing category is enabled.
+  static bool IsCategoryEnabled();
+
+  class Provider {
+   public:
+    virtual ~Provider() = default;
+
+    // Whether the file tracing category is currently enabled.
+    virtual bool FileTracingCategoryIsEnabled() const = 0;
+
+    // Enables file tracing for |id|. Must be called before recording events.
+    virtual void FileTracingEnable(const void* id) = 0;
+
+    // Disables file tracing for |id|.
+    virtual void FileTracingDisable(const void* id) = 0;
+
+    // Begins an event for |id| with |name|. |path| tells where in the directory
+    // structure the event is happening (and may be blank). |size| is the number
+    // of bytes involved in the event.
+    virtual void FileTracingEventBegin(const char* name,
+                                       const void* id,
+                                       const FilePath& path,
+                                       int64_t size) = 0;
+
+    // Ends an event for |id| with |name|.
+    virtual void FileTracingEventEnd(const char* name, const void* id) = 0;
+  };
+
+  // Sets a global file tracing provider to query categories and record events.
+  static void SetProvider(Provider* provider);
+
+  // Enables file tracing while in scope.
+  class ScopedEnabler {
+   public:
+    ScopedEnabler();
+    ~ScopedEnabler();
+  };
+
+  class ScopedTrace {
+   public:
+    ScopedTrace();
+    ~ScopedTrace();
+
+    // Called only if the tracing category is enabled. |name| is the name of the
+    // event to trace (e.g. "Read", "Write") and must have an application
+    // lifetime (e.g. static or literal). |file| is the file being traced; must
+    // outlive this class. |size| is the size (in bytes) of this event.
+    void Initialize(const char* name, const File* file, int64_t size);
+
+   private:
+    // The ID of this trace. Based on the |file| passed to |Initialize()|. Must
+    // outlive this class.
+    const void* id_;
+
+    // The name of the event to trace (e.g. "Read", "Write"). Prefixed with
+    // "File".
+    const char* name_;
+
+    DISALLOW_COPY_AND_ASSIGN(ScopedTrace);
+  };
+
+  DISALLOW_COPY_AND_ASSIGN(FileTracing);
+};
+
+}  // namespace base
+
+#endif  // BASE_FILES_FILE_TRACING_H_
diff --git a/base/files/file_unittest.cc b/base/files/file_unittest.cc
new file mode 100644
index 0000000..112b90d
--- /dev/null
+++ b/base/files/file_unittest.cc
@@ -0,0 +1,721 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/files/file.h"
+
+#include <stdint.h>
+
+#include <utility>
+
+#include "base/files/file_util.h"
+#include "base/files/memory_mapped_file.h"
+#include "base/files/scoped_temp_dir.h"
+#include "base/time/time.h"
+#include "build/build_config.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+using base::File;
+using base::FilePath;
+
+TEST(FileTest, Create) {
+  base::ScopedTempDir temp_dir;
+  ASSERT_TRUE(temp_dir.CreateUniqueTempDir());
+  FilePath file_path = temp_dir.GetPath().AppendASCII("create_file_1");
+
+  {
+    // Don't create a File at all.
+    File file;
+    EXPECT_FALSE(file.IsValid());
+    EXPECT_EQ(base::File::FILE_ERROR_FAILED, file.error_details());
+
+    File file2(base::File::FILE_ERROR_TOO_MANY_OPENED);
+    EXPECT_FALSE(file2.IsValid());
+    EXPECT_EQ(base::File::FILE_ERROR_TOO_MANY_OPENED, file2.error_details());
+  }
+
+  {
+    // Open a file that doesn't exist.
+    File file(file_path, base::File::FLAG_OPEN | base::File::FLAG_READ);
+    EXPECT_FALSE(file.IsValid());
+    EXPECT_EQ(base::File::FILE_ERROR_NOT_FOUND, file.error_details());
+    EXPECT_EQ(base::File::FILE_ERROR_NOT_FOUND, base::File::GetLastFileError());
+  }
+
+  {
+    // Open or create a file.
+    File file(file_path, base::File::FLAG_OPEN_ALWAYS | base::File::FLAG_READ);
+    EXPECT_TRUE(file.IsValid());
+    EXPECT_TRUE(file.created());
+    EXPECT_EQ(base::File::FILE_OK, file.error_details());
+  }
+
+  {
+    // Open an existing file.
+    File file(file_path, base::File::FLAG_OPEN | base::File::FLAG_READ);
+    EXPECT_TRUE(file.IsValid());
+    EXPECT_FALSE(file.created());
+    EXPECT_EQ(base::File::FILE_OK, file.error_details());
+
+    // This time verify closing the file.
+    file.Close();
+    EXPECT_FALSE(file.IsValid());
+  }
+
+  {
+    // Open an existing file through Initialize
+    File file;
+    file.Initialize(file_path, base::File::FLAG_OPEN | base::File::FLAG_READ);
+    EXPECT_TRUE(file.IsValid());
+    EXPECT_FALSE(file.created());
+    EXPECT_EQ(base::File::FILE_OK, file.error_details());
+
+    // This time verify closing the file.
+    file.Close();
+    EXPECT_FALSE(file.IsValid());
+  }
+
+  {
+    // Create a file that exists.
+    File file(file_path, base::File::FLAG_CREATE | base::File::FLAG_READ);
+    EXPECT_FALSE(file.IsValid());
+    EXPECT_FALSE(file.created());
+    EXPECT_EQ(base::File::FILE_ERROR_EXISTS, file.error_details());
+    EXPECT_EQ(base::File::FILE_ERROR_EXISTS, base::File::GetLastFileError());
+  }
+
+  {
+    // Create or overwrite a file.
+    File file(file_path,
+              base::File::FLAG_CREATE_ALWAYS | base::File::FLAG_WRITE);
+    EXPECT_TRUE(file.IsValid());
+    EXPECT_TRUE(file.created());
+    EXPECT_EQ(base::File::FILE_OK, file.error_details());
+  }
+
+  {
+    // Create a delete-on-close file.
+    file_path = temp_dir.GetPath().AppendASCII("create_file_2");
+    File file(file_path,
+              base::File::FLAG_OPEN_ALWAYS | base::File::FLAG_READ |
+                  base::File::FLAG_DELETE_ON_CLOSE);
+    EXPECT_TRUE(file.IsValid());
+    EXPECT_TRUE(file.created());
+    EXPECT_EQ(base::File::FILE_OK, file.error_details());
+  }
+
+  EXPECT_FALSE(base::PathExists(file_path));
+}
+
+TEST(FileTest, SelfSwap) {
+  base::ScopedTempDir temp_dir;
+  ASSERT_TRUE(temp_dir.CreateUniqueTempDir());
+  FilePath file_path = temp_dir.GetPath().AppendASCII("create_file_1");
+  File file(file_path,
+            base::File::FLAG_OPEN_ALWAYS | base::File::FLAG_DELETE_ON_CLOSE);
+  std::swap(file, file);
+  EXPECT_TRUE(file.IsValid());
+}
+
+TEST(FileTest, Async) {
+  base::ScopedTempDir temp_dir;
+  ASSERT_TRUE(temp_dir.CreateUniqueTempDir());
+  FilePath file_path = temp_dir.GetPath().AppendASCII("create_file");
+
+  {
+    File file(file_path, base::File::FLAG_OPEN_ALWAYS | base::File::FLAG_ASYNC);
+    EXPECT_TRUE(file.IsValid());
+    EXPECT_TRUE(file.async());
+  }
+
+  {
+    File file(file_path, base::File::FLAG_OPEN_ALWAYS);
+    EXPECT_TRUE(file.IsValid());
+    EXPECT_FALSE(file.async());
+  }
+}
+
+TEST(FileTest, DeleteOpenFile) {
+  base::ScopedTempDir temp_dir;
+  ASSERT_TRUE(temp_dir.CreateUniqueTempDir());
+  FilePath file_path = temp_dir.GetPath().AppendASCII("create_file_1");
+
+  // Create a file.
+  File file(file_path,
+            base::File::FLAG_OPEN_ALWAYS | base::File::FLAG_READ |
+                base::File::FLAG_SHARE_DELETE);
+  EXPECT_TRUE(file.IsValid());
+  EXPECT_TRUE(file.created());
+  EXPECT_EQ(base::File::FILE_OK, file.error_details());
+
+  // Open an existing file and mark it as delete on close.
+  File same_file(file_path,
+                 base::File::FLAG_OPEN | base::File::FLAG_DELETE_ON_CLOSE |
+                     base::File::FLAG_READ);
+  EXPECT_TRUE(file.IsValid());
+  EXPECT_FALSE(same_file.created());
+  EXPECT_EQ(base::File::FILE_OK, same_file.error_details());
+
+  // Close both handles and check that the file is gone.
+  file.Close();
+  same_file.Close();
+  EXPECT_FALSE(base::PathExists(file_path));
+}
+
+TEST(FileTest, ReadWrite) {
+  base::ScopedTempDir temp_dir;
+  ASSERT_TRUE(temp_dir.CreateUniqueTempDir());
+  FilePath file_path = temp_dir.GetPath().AppendASCII("read_write_file");
+  File file(file_path,
+            base::File::FLAG_CREATE | base::File::FLAG_READ |
+                base::File::FLAG_WRITE);
+  ASSERT_TRUE(file.IsValid());
+
+  char data_to_write[] = "test";
+  const int kTestDataSize = 4;
+
+  // Write 0 bytes to the file.
+  int bytes_written = file.Write(0, data_to_write, 0);
+  EXPECT_EQ(0, bytes_written);
+
+  // Write 0 bytes, with buf=nullptr.
+  bytes_written = file.Write(0, nullptr, 0);
+  EXPECT_EQ(0, bytes_written);
+
+  // Write "test" to the file.
+  bytes_written = file.Write(0, data_to_write, kTestDataSize);
+  EXPECT_EQ(kTestDataSize, bytes_written);
+
+  // Read from EOF.
+  char data_read_1[32];
+  int bytes_read = file.Read(kTestDataSize, data_read_1, kTestDataSize);
+  EXPECT_EQ(0, bytes_read);
+
+  // Read from somewhere in the middle of the file.
+  const int kPartialReadOffset = 1;
+  bytes_read = file.Read(kPartialReadOffset, data_read_1, kTestDataSize);
+  EXPECT_EQ(kTestDataSize - kPartialReadOffset, bytes_read);
+  for (int i = 0; i < bytes_read; i++)
+    EXPECT_EQ(data_to_write[i + kPartialReadOffset], data_read_1[i]);
+
+  // Read 0 bytes.
+  bytes_read = file.Read(0, data_read_1, 0);
+  EXPECT_EQ(0, bytes_read);
+
+  // Read the entire file.
+  bytes_read = file.Read(0, data_read_1, kTestDataSize);
+  EXPECT_EQ(kTestDataSize, bytes_read);
+  for (int i = 0; i < bytes_read; i++)
+    EXPECT_EQ(data_to_write[i], data_read_1[i]);
+
+  // Read again, but using the trivial native wrapper.
+  bytes_read = file.ReadNoBestEffort(0, data_read_1, kTestDataSize);
+  EXPECT_LE(bytes_read, kTestDataSize);
+  for (int i = 0; i < bytes_read; i++)
+    EXPECT_EQ(data_to_write[i], data_read_1[i]);
+
+  // Write past the end of the file.
+  const int kOffsetBeyondEndOfFile = 10;
+  const int kPartialWriteLength = 2;
+  bytes_written = file.Write(kOffsetBeyondEndOfFile,
+                             data_to_write, kPartialWriteLength);
+  EXPECT_EQ(kPartialWriteLength, bytes_written);
+
+  // Make sure the file was extended.
+  int64_t file_size = 0;
+  EXPECT_TRUE(GetFileSize(file_path, &file_size));
+  EXPECT_EQ(kOffsetBeyondEndOfFile + kPartialWriteLength, file_size);
+
+  // Make sure the file was zero-padded.
+  char data_read_2[32];
+  bytes_read = file.Read(0, data_read_2, static_cast<int>(file_size));
+  EXPECT_EQ(file_size, bytes_read);
+  for (int i = 0; i < kTestDataSize; i++)
+    EXPECT_EQ(data_to_write[i], data_read_2[i]);
+  for (int i = kTestDataSize; i < kOffsetBeyondEndOfFile; i++)
+    EXPECT_EQ(0, data_read_2[i]);
+  for (int i = kOffsetBeyondEndOfFile; i < file_size; i++)
+    EXPECT_EQ(data_to_write[i - kOffsetBeyondEndOfFile], data_read_2[i]);
+}
+
+TEST(FileTest, GetLastFileError) {
+#if defined(OS_WIN)
+  ::SetLastError(ERROR_ACCESS_DENIED);
+#else
+  errno = EACCES;
+#endif
+  EXPECT_EQ(File::FILE_ERROR_ACCESS_DENIED, File::GetLastFileError());
+
+  base::ScopedTempDir temp_dir;
+  EXPECT_TRUE(temp_dir.CreateUniqueTempDir());
+
+  FilePath nonexistent_path(temp_dir.GetPath().AppendASCII("nonexistent"));
+  File file(nonexistent_path, File::FLAG_OPEN | File::FLAG_READ);
+  File::Error last_error = File::GetLastFileError();
+  EXPECT_FALSE(file.IsValid());
+  EXPECT_EQ(File::FILE_ERROR_NOT_FOUND, file.error_details());
+  EXPECT_EQ(File::FILE_ERROR_NOT_FOUND, last_error);
+}
+
+TEST(FileTest, Append) {
+  base::ScopedTempDir temp_dir;
+  ASSERT_TRUE(temp_dir.CreateUniqueTempDir());
+  FilePath file_path = temp_dir.GetPath().AppendASCII("append_file");
+  File file(file_path, base::File::FLAG_CREATE | base::File::FLAG_APPEND);
+  ASSERT_TRUE(file.IsValid());
+
+  char data_to_write[] = "test";
+  const int kTestDataSize = 4;
+
+  // Write 0 bytes to the file.
+  int bytes_written = file.Write(0, data_to_write, 0);
+  EXPECT_EQ(0, bytes_written);
+
+  // Write 0 bytes, with buf=nullptr.
+  bytes_written = file.Write(0, nullptr, 0);
+  EXPECT_EQ(0, bytes_written);
+
+  // Write "test" to the file.
+  bytes_written = file.Write(0, data_to_write, kTestDataSize);
+  EXPECT_EQ(kTestDataSize, bytes_written);
+
+  file.Close();
+  File file2(file_path,
+             base::File::FLAG_OPEN | base::File::FLAG_READ |
+                 base::File::FLAG_APPEND);
+  ASSERT_TRUE(file2.IsValid());
+
+  // Test passing the file around.
+  file = std::move(file2);
+  EXPECT_FALSE(file2.IsValid());
+  ASSERT_TRUE(file.IsValid());
+
+  char append_data_to_write[] = "78";
+  const int kAppendDataSize = 2;
+
+  // Append "78" to the file.
+  bytes_written = file.Write(0, append_data_to_write, kAppendDataSize);
+  EXPECT_EQ(kAppendDataSize, bytes_written);
+
+  // Read the entire file.
+  char data_read_1[32];
+  int bytes_read = file.Read(0, data_read_1,
+                             kTestDataSize + kAppendDataSize);
+  EXPECT_EQ(kTestDataSize + kAppendDataSize, bytes_read);
+  for (int i = 0; i < kTestDataSize; i++)
+    EXPECT_EQ(data_to_write[i], data_read_1[i]);
+  for (int i = 0; i < kAppendDataSize; i++)
+    EXPECT_EQ(append_data_to_write[i], data_read_1[kTestDataSize + i]);
+}
+
+
+TEST(FileTest, Length) {
+  base::ScopedTempDir temp_dir;
+  ASSERT_TRUE(temp_dir.CreateUniqueTempDir());
+  FilePath file_path = temp_dir.GetPath().AppendASCII("truncate_file");
+  File file(file_path,
+            base::File::FLAG_CREATE | base::File::FLAG_READ |
+                base::File::FLAG_WRITE);
+  ASSERT_TRUE(file.IsValid());
+  EXPECT_EQ(0, file.GetLength());
+
+  // Write "test" to the file.
+  char data_to_write[] = "test";
+  int kTestDataSize = 4;
+  int bytes_written = file.Write(0, data_to_write, kTestDataSize);
+  EXPECT_EQ(kTestDataSize, bytes_written);
+
+  // Extend the file.
+  const int kExtendedFileLength = 10;
+  int64_t file_size = 0;
+  EXPECT_TRUE(file.SetLength(kExtendedFileLength));
+  EXPECT_EQ(kExtendedFileLength, file.GetLength());
+  EXPECT_TRUE(GetFileSize(file_path, &file_size));
+  EXPECT_EQ(kExtendedFileLength, file_size);
+
+  // Make sure the file was zero-padded.
+  char data_read[32];
+  int bytes_read = file.Read(0, data_read, static_cast<int>(file_size));
+  EXPECT_EQ(file_size, bytes_read);
+  for (int i = 0; i < kTestDataSize; i++)
+    EXPECT_EQ(data_to_write[i], data_read[i]);
+  for (int i = kTestDataSize; i < file_size; i++)
+    EXPECT_EQ(0, data_read[i]);
+
+  // Truncate the file.
+  const int kTruncatedFileLength = 2;
+  EXPECT_TRUE(file.SetLength(kTruncatedFileLength));
+  EXPECT_EQ(kTruncatedFileLength, file.GetLength());
+  EXPECT_TRUE(GetFileSize(file_path, &file_size));
+  EXPECT_EQ(kTruncatedFileLength, file_size);
+
+  // Make sure the file was truncated.
+  bytes_read = file.Read(0, data_read, kTestDataSize);
+  EXPECT_EQ(file_size, bytes_read);
+  for (int i = 0; i < file_size; i++)
+    EXPECT_EQ(data_to_write[i], data_read[i]);
+
+  // Close the file and reopen with base::File::FLAG_CREATE_ALWAYS, and make
+  // sure the file is empty (old file was overridden).
+  file.Close();
+  file.Initialize(file_path,
+                  base::File::FLAG_CREATE_ALWAYS | base::File::FLAG_WRITE);
+  EXPECT_EQ(0, file.GetLength());
+}
+
+// Flakily fails: http://crbug.com/86494
+#if defined(OS_ANDROID)
+TEST(FileTest, TouchGetInfo) {
+#else
+TEST(FileTest, DISABLED_TouchGetInfo) {
+#endif
+  base::ScopedTempDir temp_dir;
+  ASSERT_TRUE(temp_dir.CreateUniqueTempDir());
+  File file(temp_dir.GetPath().AppendASCII("touch_get_info_file"),
+            base::File::FLAG_CREATE | base::File::FLAG_WRITE |
+                base::File::FLAG_WRITE_ATTRIBUTES);
+  ASSERT_TRUE(file.IsValid());
+
+  // Get info for a newly created file.
+  base::File::Info info;
+  EXPECT_TRUE(file.GetInfo(&info));
+
+  // Add 2 seconds to account for possible rounding errors on
+  // filesystems that use a 1s or 2s timestamp granularity.
+  base::Time now = base::Time::Now() + base::TimeDelta::FromSeconds(2);
+  EXPECT_EQ(0, info.size);
+  EXPECT_FALSE(info.is_directory);
+  EXPECT_FALSE(info.is_symbolic_link);
+  EXPECT_LE(info.last_accessed.ToInternalValue(), now.ToInternalValue());
+  EXPECT_LE(info.last_modified.ToInternalValue(), now.ToInternalValue());
+  EXPECT_LE(info.creation_time.ToInternalValue(), now.ToInternalValue());
+  base::Time creation_time = info.creation_time;
+
+  // Write "test" to the file.
+  char data[] = "test";
+  const int kTestDataSize = 4;
+  int bytes_written = file.Write(0, data, kTestDataSize);
+  EXPECT_EQ(kTestDataSize, bytes_written);
+
+  // Change the last_accessed and last_modified dates.
+  // It's best to add values that are multiples of 2 (in seconds)
+  // to the current last_accessed and last_modified times, because
+  // FATxx uses a 2s timestamp granularity.
+  base::Time new_last_accessed =
+      info.last_accessed + base::TimeDelta::FromSeconds(234);
+  base::Time new_last_modified =
+      info.last_modified + base::TimeDelta::FromMinutes(567);
+
+  EXPECT_TRUE(file.SetTimes(new_last_accessed, new_last_modified));
+
+  // Make sure the file info was updated accordingly.
+  EXPECT_TRUE(file.GetInfo(&info));
+  EXPECT_EQ(info.size, kTestDataSize);
+  EXPECT_FALSE(info.is_directory);
+  EXPECT_FALSE(info.is_symbolic_link);
+
+  // ext2/ext3 and HPS/HPS+ seem to have a timestamp granularity of 1s.
+#if defined(OS_POSIX)
+  EXPECT_EQ(info.last_accessed.ToTimeVal().tv_sec,
+            new_last_accessed.ToTimeVal().tv_sec);
+  EXPECT_EQ(info.last_modified.ToTimeVal().tv_sec,
+            new_last_modified.ToTimeVal().tv_sec);
+#else
+  EXPECT_EQ(info.last_accessed.ToInternalValue(),
+            new_last_accessed.ToInternalValue());
+  EXPECT_EQ(info.last_modified.ToInternalValue(),
+            new_last_modified.ToInternalValue());
+#endif
+
+  EXPECT_EQ(info.creation_time.ToInternalValue(),
+            creation_time.ToInternalValue());
+}
+
+TEST(FileTest, ReadAtCurrentPosition) {
+  base::ScopedTempDir temp_dir;
+  ASSERT_TRUE(temp_dir.CreateUniqueTempDir());
+  FilePath file_path =
+      temp_dir.GetPath().AppendASCII("read_at_current_position");
+  File file(file_path,
+            base::File::FLAG_CREATE | base::File::FLAG_READ |
+                base::File::FLAG_WRITE);
+  EXPECT_TRUE(file.IsValid());
+
+  const char kData[] = "test";
+  const int kDataSize = sizeof(kData) - 1;
+  EXPECT_EQ(kDataSize, file.Write(0, kData, kDataSize));
+
+  EXPECT_EQ(0, file.Seek(base::File::FROM_BEGIN, 0));
+
+  char buffer[kDataSize];
+  int first_chunk_size = kDataSize / 2;
+  EXPECT_EQ(first_chunk_size, file.ReadAtCurrentPos(buffer, first_chunk_size));
+  EXPECT_EQ(kDataSize - first_chunk_size,
+            file.ReadAtCurrentPos(buffer + first_chunk_size,
+                                  kDataSize - first_chunk_size));
+  EXPECT_EQ(std::string(buffer, buffer + kDataSize), std::string(kData));
+}
+
+TEST(FileTest, WriteAtCurrentPosition) {
+  base::ScopedTempDir temp_dir;
+  ASSERT_TRUE(temp_dir.CreateUniqueTempDir());
+  FilePath file_path =
+      temp_dir.GetPath().AppendASCII("write_at_current_position");
+  File file(file_path,
+            base::File::FLAG_CREATE | base::File::FLAG_READ |
+                base::File::FLAG_WRITE);
+  EXPECT_TRUE(file.IsValid());
+
+  const char kData[] = "test";
+  const int kDataSize = sizeof(kData) - 1;
+
+  int first_chunk_size = kDataSize / 2;
+  EXPECT_EQ(first_chunk_size, file.WriteAtCurrentPos(kData, first_chunk_size));
+  EXPECT_EQ(kDataSize - first_chunk_size,
+            file.WriteAtCurrentPos(kData + first_chunk_size,
+                                   kDataSize - first_chunk_size));
+
+  char buffer[kDataSize];
+  EXPECT_EQ(kDataSize, file.Read(0, buffer, kDataSize));
+  EXPECT_EQ(std::string(buffer, buffer + kDataSize), std::string(kData));
+}
+
+TEST(FileTest, Seek) {
+  base::ScopedTempDir temp_dir;
+  ASSERT_TRUE(temp_dir.CreateUniqueTempDir());
+  FilePath file_path = temp_dir.GetPath().AppendASCII("seek_file");
+  File file(file_path,
+            base::File::FLAG_CREATE | base::File::FLAG_READ |
+                base::File::FLAG_WRITE);
+  ASSERT_TRUE(file.IsValid());
+
+  const int64_t kOffset = 10;
+  EXPECT_EQ(kOffset, file.Seek(base::File::FROM_BEGIN, kOffset));
+  EXPECT_EQ(2 * kOffset, file.Seek(base::File::FROM_CURRENT, kOffset));
+  EXPECT_EQ(kOffset, file.Seek(base::File::FROM_CURRENT, -kOffset));
+  EXPECT_TRUE(file.SetLength(kOffset * 2));
+  EXPECT_EQ(kOffset, file.Seek(base::File::FROM_END, -kOffset));
+}
+
+TEST(FileTest, Duplicate) {
+  base::ScopedTempDir temp_dir;
+  ASSERT_TRUE(temp_dir.CreateUniqueTempDir());
+  FilePath file_path = temp_dir.GetPath().AppendASCII("file");
+  File file(file_path,(base::File::FLAG_CREATE |
+                       base::File::FLAG_READ |
+                       base::File::FLAG_WRITE));
+  ASSERT_TRUE(file.IsValid());
+
+  File file2(file.Duplicate());
+  ASSERT_TRUE(file2.IsValid());
+
+  // Write through one handle, close it, read through the other.
+  static const char kData[] = "now is a good time.";
+  static const int kDataLen = sizeof(kData) - 1;
+
+  ASSERT_EQ(0, file.Seek(base::File::FROM_CURRENT, 0));
+  ASSERT_EQ(0, file2.Seek(base::File::FROM_CURRENT, 0));
+  ASSERT_EQ(kDataLen, file.WriteAtCurrentPos(kData, kDataLen));
+  ASSERT_EQ(kDataLen, file.Seek(base::File::FROM_CURRENT, 0));
+  ASSERT_EQ(kDataLen, file2.Seek(base::File::FROM_CURRENT, 0));
+  file.Close();
+  char buf[kDataLen];
+  ASSERT_EQ(kDataLen, file2.Read(0, &buf[0], kDataLen));
+  ASSERT_EQ(std::string(kData, kDataLen), std::string(&buf[0], kDataLen));
+}
+
+TEST(FileTest, DuplicateDeleteOnClose) {
+  base::ScopedTempDir temp_dir;
+  ASSERT_TRUE(temp_dir.CreateUniqueTempDir());
+  FilePath file_path = temp_dir.GetPath().AppendASCII("file");
+  File file(file_path,(base::File::FLAG_CREATE |
+                       base::File::FLAG_READ |
+                       base::File::FLAG_WRITE |
+                       base::File::FLAG_DELETE_ON_CLOSE));
+  ASSERT_TRUE(file.IsValid());
+  File file2(file.Duplicate());
+  ASSERT_TRUE(file2.IsValid());
+  file.Close();
+  file2.Close();
+  ASSERT_FALSE(base::PathExists(file_path));
+}
+
+#if defined(OS_WIN)
+TEST(FileTest, GetInfoForDirectory) {
+  base::ScopedTempDir temp_dir;
+  ASSERT_TRUE(temp_dir.CreateUniqueTempDir());
+  FilePath empty_dir =
+      temp_dir.GetPath().Append(FILE_PATH_LITERAL("gpfi_test"));
+  ASSERT_TRUE(CreateDirectory(empty_dir));
+
+  base::File dir(
+      ::CreateFile(empty_dir.value().c_str(),
+                   GENERIC_READ | GENERIC_WRITE,
+                   FILE_SHARE_READ | FILE_SHARE_WRITE | FILE_SHARE_DELETE,
+                   NULL,
+                   OPEN_EXISTING,
+                   FILE_FLAG_BACKUP_SEMANTICS,  // Needed to open a directory.
+                   NULL));
+  ASSERT_TRUE(dir.IsValid());
+
+  base::File::Info info;
+  EXPECT_TRUE(dir.GetInfo(&info));
+  EXPECT_TRUE(info.is_directory);
+  EXPECT_FALSE(info.is_symbolic_link);
+  EXPECT_EQ(0, info.size);
+}
+
+TEST(FileTest, DeleteNoop) {
+  base::ScopedTempDir temp_dir;
+  ASSERT_TRUE(temp_dir.CreateUniqueTempDir());
+  FilePath file_path = temp_dir.GetPath().AppendASCII("file");
+
+  // Creating and closing a file with DELETE perms should do nothing special.
+  File file(file_path,
+            (base::File::FLAG_CREATE | base::File::FLAG_READ |
+             base::File::FLAG_WRITE | base::File::FLAG_CAN_DELETE_ON_CLOSE));
+  ASSERT_TRUE(file.IsValid());
+  file.Close();
+  ASSERT_TRUE(base::PathExists(file_path));
+}
+
+TEST(FileTest, Delete) {
+  base::ScopedTempDir temp_dir;
+  ASSERT_TRUE(temp_dir.CreateUniqueTempDir());
+  FilePath file_path = temp_dir.GetPath().AppendASCII("file");
+
+  // Creating a file with DELETE and then marking for delete on close should
+  // delete it.
+  File file(file_path,
+            (base::File::FLAG_CREATE | base::File::FLAG_READ |
+             base::File::FLAG_WRITE | base::File::FLAG_CAN_DELETE_ON_CLOSE));
+  ASSERT_TRUE(file.IsValid());
+  ASSERT_TRUE(file.DeleteOnClose(true));
+  file.Close();
+  ASSERT_FALSE(base::PathExists(file_path));
+}
+
+TEST(FileTest, DeleteThenRevoke) {
+  base::ScopedTempDir temp_dir;
+  ASSERT_TRUE(temp_dir.CreateUniqueTempDir());
+  FilePath file_path = temp_dir.GetPath().AppendASCII("file");
+
+  // Creating a file with DELETE, marking it for delete, then clearing delete on
+  // close should not delete it.
+  File file(file_path,
+            (base::File::FLAG_CREATE | base::File::FLAG_READ |
+             base::File::FLAG_WRITE | base::File::FLAG_CAN_DELETE_ON_CLOSE));
+  ASSERT_TRUE(file.IsValid());
+  ASSERT_TRUE(file.DeleteOnClose(true));
+  ASSERT_TRUE(file.DeleteOnClose(false));
+  file.Close();
+  ASSERT_TRUE(base::PathExists(file_path));
+}
+
+TEST(FileTest, IrrevokableDeleteOnClose) {
+  base::ScopedTempDir temp_dir;
+  ASSERT_TRUE(temp_dir.CreateUniqueTempDir());
+  FilePath file_path = temp_dir.GetPath().AppendASCII("file");
+
+  // DELETE_ON_CLOSE cannot be revoked by this opener.
+  File file(
+      file_path,
+      (base::File::FLAG_CREATE | base::File::FLAG_READ |
+       base::File::FLAG_WRITE | base::File::FLAG_DELETE_ON_CLOSE |
+       base::File::FLAG_SHARE_DELETE | base::File::FLAG_CAN_DELETE_ON_CLOSE));
+  ASSERT_TRUE(file.IsValid());
+  // https://msdn.microsoft.com/library/windows/desktop/aa364221.aspx says that
+  // setting the dispositon has no effect if the handle was opened with
+  // FLAG_DELETE_ON_CLOSE. Do not make the test's success dependent on whether
+  // or not SetFileInformationByHandle indicates success or failure. (It happens
+  // to indicate success on Windows 10.)
+  file.DeleteOnClose(false);
+  file.Close();
+  ASSERT_FALSE(base::PathExists(file_path));
+}
+
+TEST(FileTest, IrrevokableDeleteOnCloseOther) {
+  base::ScopedTempDir temp_dir;
+  ASSERT_TRUE(temp_dir.CreateUniqueTempDir());
+  FilePath file_path = temp_dir.GetPath().AppendASCII("file");
+
+  // DELETE_ON_CLOSE cannot be revoked by another opener.
+  File file(
+      file_path,
+      (base::File::FLAG_CREATE | base::File::FLAG_READ |
+       base::File::FLAG_WRITE | base::File::FLAG_DELETE_ON_CLOSE |
+       base::File::FLAG_SHARE_DELETE | base::File::FLAG_CAN_DELETE_ON_CLOSE));
+  ASSERT_TRUE(file.IsValid());
+
+  File file2(
+      file_path,
+      (base::File::FLAG_OPEN | base::File::FLAG_READ | base::File::FLAG_WRITE |
+       base::File::FLAG_SHARE_DELETE | base::File::FLAG_CAN_DELETE_ON_CLOSE));
+  ASSERT_TRUE(file2.IsValid());
+
+  file2.DeleteOnClose(false);
+  file2.Close();
+  ASSERT_TRUE(base::PathExists(file_path));
+  file.Close();
+  ASSERT_FALSE(base::PathExists(file_path));
+}
+
+TEST(FileTest, DeleteWithoutPermission) {
+  base::ScopedTempDir temp_dir;
+  ASSERT_TRUE(temp_dir.CreateUniqueTempDir());
+  FilePath file_path = temp_dir.GetPath().AppendASCII("file");
+
+  // It should not be possible to mark a file for deletion when it was not
+  // created/opened with DELETE.
+  File file(file_path, (base::File::FLAG_CREATE | base::File::FLAG_READ |
+                        base::File::FLAG_WRITE));
+  ASSERT_TRUE(file.IsValid());
+  ASSERT_FALSE(file.DeleteOnClose(true));
+  file.Close();
+  ASSERT_TRUE(base::PathExists(file_path));
+}
+
+TEST(FileTest, UnsharedDeleteOnClose) {
+  base::ScopedTempDir temp_dir;
+  ASSERT_TRUE(temp_dir.CreateUniqueTempDir());
+  FilePath file_path = temp_dir.GetPath().AppendASCII("file");
+
+  // Opening with DELETE_ON_CLOSE when a previous opener hasn't enabled sharing
+  // will fail.
+  File file(file_path, (base::File::FLAG_CREATE | base::File::FLAG_READ |
+                        base::File::FLAG_WRITE));
+  ASSERT_TRUE(file.IsValid());
+  File file2(
+      file_path,
+      (base::File::FLAG_OPEN | base::File::FLAG_READ | base::File::FLAG_WRITE |
+       base::File::FLAG_DELETE_ON_CLOSE | base::File::FLAG_SHARE_DELETE));
+  ASSERT_FALSE(file2.IsValid());
+
+  file.Close();
+  ASSERT_TRUE(base::PathExists(file_path));
+}
+
+TEST(FileTest, NoDeleteOnCloseWithMappedFile) {
+  base::ScopedTempDir temp_dir;
+  ASSERT_TRUE(temp_dir.CreateUniqueTempDir());
+  FilePath file_path = temp_dir.GetPath().AppendASCII("file");
+
+  // Mapping a file into memory blocks DeleteOnClose.
+  File file(file_path,
+            (base::File::FLAG_CREATE | base::File::FLAG_READ |
+             base::File::FLAG_WRITE | base::File::FLAG_CAN_DELETE_ON_CLOSE));
+  ASSERT_TRUE(file.IsValid());
+  ASSERT_EQ(5, file.WriteAtCurrentPos("12345", 5));
+
+  {
+    base::MemoryMappedFile mapping;
+    ASSERT_TRUE(mapping.Initialize(file.Duplicate()));
+    ASSERT_EQ(5U, mapping.length());
+
+    EXPECT_FALSE(file.DeleteOnClose(true));
+  }
+
+  file.Close();
+  ASSERT_TRUE(base::PathExists(file_path));
+}
+#endif  // defined(OS_WIN)
diff --git a/base/files/file_util.cc b/base/files/file_util.cc
new file mode 100644
index 0000000..109cb22
--- /dev/null
+++ b/base/files/file_util.cc
@@ -0,0 +1,288 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/files/file_util.h"
+
+#if defined(OS_WIN)
+#include <io.h>
+#endif
+#include <stdio.h>
+
+#include <fstream>
+#include <limits>
+
+#include "base/files/file_enumerator.h"
+#include "base/files/file_path.h"
+#include "base/logging.h"
+#include "base/strings/string_piece.h"
+#include "base/strings/string_util.h"
+#include "base/strings/stringprintf.h"
+#include "base/strings/utf_string_conversions.h"
+#include "build/build_config.h"
+
+namespace base {
+
+#if !defined(OS_NACL_NONSFI)
+namespace {
+
+// The maximum number of 'uniquified' files we will try to create.
+// This is used when the filename we're trying to download is already in use,
+// so we create a new unique filename by appending " (nnn)" before the
+// extension, where 1 <= nnn <= kMaxUniqueFiles.
+// Also used by code that cleans up said files.
+static const int kMaxUniqueFiles = 100;
+
+}  // namespace
+
+int64_t ComputeDirectorySize(const FilePath& root_path) {
+  int64_t running_size = 0;
+  FileEnumerator file_iter(root_path, true, FileEnumerator::FILES);
+  while (!file_iter.Next().empty())
+    running_size += file_iter.GetInfo().GetSize();
+  return running_size;
+}
+
+bool Move(const FilePath& from_path, const FilePath& to_path) {
+  if (from_path.ReferencesParent() || to_path.ReferencesParent())
+    return false;
+  return internal::MoveUnsafe(from_path, to_path);
+}
+
+bool ContentsEqual(const FilePath& filename1, const FilePath& filename2) {
+  // We open the file in binary format even if they are text files because
+  // we are just comparing that bytes are exactly same in both files and not
+  // doing anything smart with text formatting.
+  std::ifstream file1(filename1.value().c_str(),
+                      std::ios::in | std::ios::binary);
+  std::ifstream file2(filename2.value().c_str(),
+                      std::ios::in | std::ios::binary);
+
+  // Even if both files aren't openable (and thus, in some sense, "equal"),
+  // any unusable file yields a result of "false".
+  if (!file1.is_open() || !file2.is_open())
+    return false;
+
+  const int BUFFER_SIZE = 2056;
+  char buffer1[BUFFER_SIZE], buffer2[BUFFER_SIZE];
+  do {
+    file1.read(buffer1, BUFFER_SIZE);
+    file2.read(buffer2, BUFFER_SIZE);
+
+    if ((file1.eof() != file2.eof()) ||
+        (file1.gcount() != file2.gcount()) ||
+        (memcmp(buffer1, buffer2, static_cast<size_t>(file1.gcount())))) {
+      file1.close();
+      file2.close();
+      return false;
+    }
+  } while (!file1.eof() || !file2.eof());
+
+  file1.close();
+  file2.close();
+  return true;
+}
+
+bool TextContentsEqual(const FilePath& filename1, const FilePath& filename2) {
+  std::ifstream file1(filename1.value().c_str(), std::ios::in);
+  std::ifstream file2(filename2.value().c_str(), std::ios::in);
+
+  // Even if both files aren't openable (and thus, in some sense, "equal"),
+  // any unusable file yields a result of "false".
+  if (!file1.is_open() || !file2.is_open())
+    return false;
+
+  do {
+    std::string line1, line2;
+    getline(file1, line1);
+    getline(file2, line2);
+
+    // Check for mismatched EOF states, or any error state.
+    if ((file1.eof() != file2.eof()) ||
+        file1.bad() || file2.bad()) {
+      return false;
+    }
+
+    // Trim all '\r' and '\n' characters from the end of the line.
+    std::string::size_type end1 = line1.find_last_not_of("\r\n");
+    if (end1 == std::string::npos)
+      line1.clear();
+    else if (end1 + 1 < line1.length())
+      line1.erase(end1 + 1);
+
+    std::string::size_type end2 = line2.find_last_not_of("\r\n");
+    if (end2 == std::string::npos)
+      line2.clear();
+    else if (end2 + 1 < line2.length())
+      line2.erase(end2 + 1);
+
+    if (line1 != line2)
+      return false;
+  } while (!file1.eof() || !file2.eof());
+
+  return true;
+}
+#endif  // !defined(OS_NACL_NONSFI)
+
+bool ReadFileToStringWithMaxSize(const FilePath& path,
+                                 std::string* contents,
+                                 size_t max_size) {
+  if (contents)
+    contents->clear();
+  if (path.ReferencesParent())
+    return false;
+  FILE* file = OpenFile(path, "rb");
+  if (!file) {
+    return false;
+  }
+
+  // Many files supplied in |path| have incorrect size (proc files etc).
+  // Hence, the file is read sequentially as opposed to a one-shot read, using
+  // file size as a hint for chunk size if available.
+  constexpr int64_t kDefaultChunkSize = 1 << 16;
+  int64_t chunk_size;
+#if !defined(OS_NACL_NONSFI)
+  if (!GetFileSize(path, &chunk_size) || chunk_size <= 0)
+    chunk_size = kDefaultChunkSize - 1;
+  // We need to attempt to read at EOF for feof flag to be set so here we
+  // use |chunk_size| + 1.
+  chunk_size = std::min<uint64_t>(chunk_size, max_size) + 1;
+#else
+  chunk_size = kDefaultChunkSize;
+#endif  // !defined(OS_NACL_NONSFI)
+  size_t bytes_read_this_pass;
+  size_t bytes_read_so_far = 0;
+  bool read_status = true;
+  std::string local_contents;
+  local_contents.resize(chunk_size);
+
+  while ((bytes_read_this_pass = fread(&local_contents[bytes_read_so_far], 1,
+                                       chunk_size, file)) > 0) {
+    if ((max_size - bytes_read_so_far) < bytes_read_this_pass) {
+      // Read more than max_size bytes, bail out.
+      bytes_read_so_far = max_size;
+      read_status = false;
+      break;
+    }
+    // In case EOF was not reached, iterate again but revert to the default
+    // chunk size.
+    if (bytes_read_so_far == 0)
+      chunk_size = kDefaultChunkSize;
+
+    bytes_read_so_far += bytes_read_this_pass;
+    // Last fread syscall (after EOF) can be avoided via feof, which is just a
+    // flag check.
+    if (feof(file))
+      break;
+    local_contents.resize(bytes_read_so_far + chunk_size);
+  }
+  read_status = read_status && !ferror(file);
+  CloseFile(file);
+  if (contents) {
+    contents->swap(local_contents);
+    contents->resize(bytes_read_so_far);
+  }
+
+  return read_status;
+}
+
+bool ReadFileToString(const FilePath& path, std::string* contents) {
+  return ReadFileToStringWithMaxSize(path, contents,
+                                     std::numeric_limits<size_t>::max());
+}
+
+#if !defined(OS_NACL_NONSFI)
+bool IsDirectoryEmpty(const FilePath& dir_path) {
+  FileEnumerator files(dir_path, false,
+      FileEnumerator::FILES | FileEnumerator::DIRECTORIES);
+  if (files.Next().empty())
+    return true;
+  return false;
+}
+
+FILE* CreateAndOpenTemporaryFile(FilePath* path) {
+  FilePath directory;
+  if (!GetTempDir(&directory))
+    return nullptr;
+
+  return CreateAndOpenTemporaryFileInDir(directory, path);
+}
+
+bool CreateDirectory(const FilePath& full_path) {
+  return CreateDirectoryAndGetError(full_path, nullptr);
+}
+
+bool GetFileSize(const FilePath& file_path, int64_t* file_size) {
+  File::Info info;
+  if (!GetFileInfo(file_path, &info))
+    return false;
+  *file_size = info.size;
+  return true;
+}
+
+bool TouchFile(const FilePath& path,
+               const Time& last_accessed,
+               const Time& last_modified) {
+  int flags = File::FLAG_OPEN | File::FLAG_WRITE_ATTRIBUTES;
+
+#if defined(OS_WIN)
+  // On Windows, FILE_FLAG_BACKUP_SEMANTICS is needed to open a directory.
+  if (DirectoryExists(path))
+    flags |= File::FLAG_BACKUP_SEMANTICS;
+#endif  // OS_WIN
+
+  File file(path, flags);
+  if (!file.IsValid())
+    return false;
+
+  return file.SetTimes(last_accessed, last_modified);
+}
+#endif  // !defined(OS_NACL_NONSFI)
+
+bool CloseFile(FILE* file) {
+  if (file == nullptr)
+    return true;
+  return fclose(file) == 0;
+}
+
+#if !defined(OS_NACL_NONSFI)
+bool TruncateFile(FILE* file) {
+  if (file == nullptr)
+    return false;
+  long current_offset = ftell(file);
+  if (current_offset == -1)
+    return false;
+#if defined(OS_WIN)
+  int fd = _fileno(file);
+  if (_chsize(fd, current_offset) != 0)
+    return false;
+#else
+  int fd = fileno(file);
+  if (ftruncate(fd, current_offset) != 0)
+    return false;
+#endif
+  return true;
+}
+
+int GetUniquePathNumber(const FilePath& path,
+                        const FilePath::StringType& suffix) {
+  bool have_suffix = !suffix.empty();
+  if (!PathExists(path) &&
+      (!have_suffix || !PathExists(FilePath(path.value() + suffix)))) {
+    return 0;
+  }
+
+  FilePath new_path;
+  for (int count = 1; count <= kMaxUniqueFiles; ++count) {
+    new_path = path.InsertBeforeExtensionASCII(StringPrintf(" (%d)", count));
+    if (!PathExists(new_path) &&
+        (!have_suffix || !PathExists(FilePath(new_path.value() + suffix)))) {
+      return count;
+    }
+  }
+
+  return -1;
+}
+#endif  // !defined(OS_NACL_NONSFI)
+
+}  // namespace base
diff --git a/base/files/file_util.h b/base/files/file_util.h
new file mode 100644
index 0000000..1ba9368
--- /dev/null
+++ b/base/files/file_util.h
@@ -0,0 +1,489 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file contains utility functions for dealing with the local
+// filesystem.
+
+#ifndef BASE_FILES_FILE_UTIL_H_
+#define BASE_FILES_FILE_UTIL_H_
+
+#include <stddef.h>
+#include <stdint.h>
+#include <stdio.h>
+
+#include <set>
+#include <string>
+#include <vector>
+
+#if defined(OS_POSIX) || defined(OS_FUCHSIA)
+#include <sys/stat.h>
+#include <unistd.h>
+#endif
+
+#include "base/base_export.h"
+#include "base/files/file.h"
+#include "base/files/file_path.h"
+#include "base/strings/string16.h"
+#include "build/build_config.h"
+
+#if defined(OS_WIN)
+#include "base/win/windows_types.h"
+#elif defined(OS_POSIX) || defined(OS_FUCHSIA)
+#include "base/file_descriptor_posix.h"
+#include "base/logging.h"
+#include "base/posix/eintr_wrapper.h"
+#endif
+
+namespace base {
+
+class Environment;
+class Time;
+
+//-----------------------------------------------------------------------------
+// Functions that involve filesystem access or modification:
+
+// Returns an absolute version of a relative path. Returns an empty path on
+// error. On POSIX, this function fails if the path does not exist. This
+// function can result in I/O so it can be slow.
+BASE_EXPORT FilePath MakeAbsoluteFilePath(const FilePath& input);
+
+// Returns the total number of bytes used by all the files under |root_path|.
+// If the path does not exist the function returns 0.
+//
+// This function is implemented using the FileEnumerator class so it is not
+// particularly speedy in any platform.
+BASE_EXPORT int64_t ComputeDirectorySize(const FilePath& root_path);
+
+// Deletes the given path, whether it's a file or a directory.
+// If it's a directory, it's perfectly happy to delete all of the
+// directory's contents.  Passing true to recursive deletes
+// subdirectories and their contents as well.
+// Returns true if successful, false otherwise. It is considered successful
+// to attempt to delete a file that does not exist.
+//
+// In posix environment and if |path| is a symbolic link, this deletes only
+// the symlink. (even if the symlink points to a non-existent file)
+//
+// WARNING: USING THIS WITH recursive==true IS EQUIVALENT
+//          TO "rm -rf", SO USE WITH CAUTION.
+BASE_EXPORT bool DeleteFile(const FilePath& path, bool recursive);
+
+#if defined(OS_WIN)
+// Schedules to delete the given path, whether it's a file or a directory, until
+// the operating system is restarted.
+// Note:
+// 1) The file/directory to be deleted should exist in a temp folder.
+// 2) The directory to be deleted must be empty.
+BASE_EXPORT bool DeleteFileAfterReboot(const FilePath& path);
+#endif
+
+// Moves the given path, whether it's a file or a directory.
+// If a simple rename is not possible, such as in the case where the paths are
+// on different volumes, this will attempt to copy and delete. Returns
+// true for success.
+// This function fails if either path contains traversal components ('..').
+BASE_EXPORT bool Move(const FilePath& from_path, const FilePath& to_path);
+
+// Renames file |from_path| to |to_path|. Both paths must be on the same
+// volume, or the function will fail. Destination file will be created
+// if it doesn't exist. Prefer this function over Move when dealing with
+// temporary files. On Windows it preserves attributes of the target file.
+// Returns true on success, leaving *error unchanged.
+// Returns false on failure and sets *error appropriately, if it is non-NULL.
+BASE_EXPORT bool ReplaceFile(const FilePath& from_path,
+                             const FilePath& to_path,
+                             File::Error* error);
+
+// Copies a single file. Use CopyDirectory() to copy directories.
+// This function fails if either path contains traversal components ('..').
+// This function also fails if |to_path| is a directory.
+//
+// On POSIX, if |to_path| is a symlink, CopyFile() will follow the symlink. This
+// may have security implications. Use with care.
+//
+// If |to_path| already exists and is a regular file, it will be overwritten,
+// though its permissions will stay the same.
+//
+// If |to_path| does not exist, it will be created. The new file's permissions
+// varies per platform:
+//
+// - This function keeps the metadata on Windows. The read only bit is not kept.
+// - On Mac and iOS, |to_path| retains |from_path|'s permissions, except user
+//   read/write permissions are always set.
+// - On Linux and Android, |to_path| has user read/write permissions only. i.e.
+//   Always 0600.
+// - On ChromeOS, |to_path| has user read/write permissions and group/others
+//   read permissions. i.e. Always 0644.
+BASE_EXPORT bool CopyFile(const FilePath& from_path, const FilePath& to_path);
+
+// Copies the given path, and optionally all subdirectories and their contents
+// as well.
+//
+// If there are files existing under to_path, always overwrite. Returns true
+// if successful, false otherwise. Wildcards on the names are not supported.
+//
+// This function has the same metadata behavior as CopyFile().
+//
+// If you only need to copy a file use CopyFile, it's faster.
+BASE_EXPORT bool CopyDirectory(const FilePath& from_path,
+                               const FilePath& to_path,
+                               bool recursive);
+
+// Like CopyDirectory() except trying to overwrite an existing file will not
+// work and will return false.
+BASE_EXPORT bool CopyDirectoryExcl(const FilePath& from_path,
+                                   const FilePath& to_path,
+                                   bool recursive);
+
+// Returns true if the given path exists on the local filesystem,
+// false otherwise.
+BASE_EXPORT bool PathExists(const FilePath& path);
+
+// Returns true if the given path is writable by the user, false otherwise.
+BASE_EXPORT bool PathIsWritable(const FilePath& path);
+
+// Returns true if the given path exists and is a directory, false otherwise.
+BASE_EXPORT bool DirectoryExists(const FilePath& path);
+
+// Returns true if the contents of the two files given are equal, false
+// otherwise.  If either file can't be read, returns false.
+BASE_EXPORT bool ContentsEqual(const FilePath& filename1,
+                               const FilePath& filename2);
+
+// Returns true if the contents of the two text files given are equal, false
+// otherwise.  This routine treats "\r\n" and "\n" as equivalent.
+BASE_EXPORT bool TextContentsEqual(const FilePath& filename1,
+                                   const FilePath& filename2);
+
+// Reads the file at |path| into |contents| and returns true on success and
+// false on error.  For security reasons, a |path| containing path traversal
+// components ('..') is treated as a read error and |contents| is set to empty.
+// In case of I/O error, |contents| holds the data that could be read from the
+// file before the error occurred.
+// |contents| may be NULL, in which case this function is useful for its side
+// effect of priming the disk cache (could be used for unit tests).
+BASE_EXPORT bool ReadFileToString(const FilePath& path, std::string* contents);
+
+// Reads the file at |path| into |contents| and returns true on success and
+// false on error.  For security reasons, a |path| containing path traversal
+// components ('..') is treated as a read error and |contents| is set to empty.
+// In case of I/O error, |contents| holds the data that could be read from the
+// file before the error occurred.  When the file size exceeds |max_size|, the
+// function returns false with |contents| holding the file truncated to
+// |max_size|.
+// |contents| may be NULL, in which case this function is useful for its side
+// effect of priming the disk cache (could be used for unit tests).
+BASE_EXPORT bool ReadFileToStringWithMaxSize(const FilePath& path,
+                                             std::string* contents,
+                                             size_t max_size);
+
+#if defined(OS_POSIX) || defined(OS_FUCHSIA)
+
+// Read exactly |bytes| bytes from file descriptor |fd|, storing the result
+// in |buffer|. This function is protected against EINTR and partial reads.
+// Returns true iff |bytes| bytes have been successfully read from |fd|.
+BASE_EXPORT bool ReadFromFD(int fd, char* buffer, size_t bytes);
+
+// Performs the same function as CreateAndOpenTemporaryFileInDir(), but returns
+// the file-descriptor directly, rather than wrapping it into a FILE. Returns
+// -1 on failure.
+BASE_EXPORT int CreateAndOpenFdForTemporaryFileInDir(const FilePath& dir,
+                                                     FilePath* path);
+
+#endif  // OS_POSIX || OS_FUCHSIA
+
+#if defined(OS_POSIX)
+
+// Creates a symbolic link at |symlink| pointing to |target|.  Returns
+// false on failure.
+BASE_EXPORT bool CreateSymbolicLink(const FilePath& target,
+                                    const FilePath& symlink);
+
+// Reads the given |symlink| and returns where it points to in |target|.
+// Returns false upon failure.
+BASE_EXPORT bool ReadSymbolicLink(const FilePath& symlink, FilePath* target);
+
+// Bits and masks of the file permission.
+enum FilePermissionBits {
+  FILE_PERMISSION_MASK              = S_IRWXU | S_IRWXG | S_IRWXO,
+  FILE_PERMISSION_USER_MASK         = S_IRWXU,
+  FILE_PERMISSION_GROUP_MASK        = S_IRWXG,
+  FILE_PERMISSION_OTHERS_MASK       = S_IRWXO,
+
+  FILE_PERMISSION_READ_BY_USER      = S_IRUSR,
+  FILE_PERMISSION_WRITE_BY_USER     = S_IWUSR,
+  FILE_PERMISSION_EXECUTE_BY_USER   = S_IXUSR,
+  FILE_PERMISSION_READ_BY_GROUP     = S_IRGRP,
+  FILE_PERMISSION_WRITE_BY_GROUP    = S_IWGRP,
+  FILE_PERMISSION_EXECUTE_BY_GROUP  = S_IXGRP,
+  FILE_PERMISSION_READ_BY_OTHERS    = S_IROTH,
+  FILE_PERMISSION_WRITE_BY_OTHERS   = S_IWOTH,
+  FILE_PERMISSION_EXECUTE_BY_OTHERS = S_IXOTH,
+};
+
+// Reads the permission of the given |path|, storing the file permission
+// bits in |mode|. If |path| is symbolic link, |mode| is the permission of
+// a file which the symlink points to.
+BASE_EXPORT bool GetPosixFilePermissions(const FilePath& path, int* mode);
+// Sets the permission of the given |path|. If |path| is symbolic link, sets
+// the permission of a file which the symlink points to.
+BASE_EXPORT bool SetPosixFilePermissions(const FilePath& path, int mode);
+
+// Returns true iff |executable| can be found in any directory specified by the
+// environment variable in |env|.
+BASE_EXPORT bool ExecutableExistsInPath(Environment* env,
+                                        const FilePath::StringType& executable);
+
+#endif  // OS_POSIX
+
+// Returns true if the given directory is empty
+BASE_EXPORT bool IsDirectoryEmpty(const FilePath& dir_path);
+
+// Get the temporary directory provided by the system.
+//
+// WARNING: In general, you should use CreateTemporaryFile variants below
+// instead of this function. Those variants will ensure that the proper
+// permissions are set so that other users on the system can't edit them while
+// they're open (which can lead to security issues).
+BASE_EXPORT bool GetTempDir(FilePath* path);
+
+// Get the home directory. This is more complicated than just getenv("HOME")
+// as it knows to fall back on getpwent() etc.
+//
+// You should not generally call this directly. Instead use DIR_HOME with the
+// path service which will use this function but cache the value.
+// Path service may also override DIR_HOME.
+BASE_EXPORT FilePath GetHomeDir();
+
+// Creates a temporary file. The full path is placed in |path|, and the
+// function returns true if was successful in creating the file. The file will
+// be empty and all handles closed after this function returns.
+BASE_EXPORT bool CreateTemporaryFile(FilePath* path);
+
+// Same as CreateTemporaryFile but the file is created in |dir|.
+BASE_EXPORT bool CreateTemporaryFileInDir(const FilePath& dir,
+                                          FilePath* temp_file);
+
+// Create and open a temporary file.  File is opened for read/write.
+// The full path is placed in |path|.
+// Returns a handle to the opened file or NULL if an error occurred.
+BASE_EXPORT FILE* CreateAndOpenTemporaryFile(FilePath* path);
+
+// Similar to CreateAndOpenTemporaryFile, but the file is created in |dir|.
+BASE_EXPORT FILE* CreateAndOpenTemporaryFileInDir(const FilePath& dir,
+                                                  FilePath* path);
+
+// Create a new directory. If prefix is provided, the new directory name is in
+// the format of prefixyyyy.
+// NOTE: prefix is ignored in the POSIX implementation.
+// If success, return true and output the full path of the directory created.
+BASE_EXPORT bool CreateNewTempDirectory(const FilePath::StringType& prefix,
+                                        FilePath* new_temp_path);
+
+// Create a directory within another directory.
+// Extra characters will be appended to |prefix| to ensure that the
+// new directory does not have the same name as an existing directory.
+BASE_EXPORT bool CreateTemporaryDirInDir(const FilePath& base_dir,
+                                         const FilePath::StringType& prefix,
+                                         FilePath* new_dir);
+
+// Creates a directory, as well as creating any parent directories, if they
+// don't exist. Returns 'true' on successful creation, or if the directory
+// already exists.  The directory is only readable by the current user.
+// Returns true on success, leaving *error unchanged.
+// Returns false on failure and sets *error appropriately, if it is non-NULL.
+BASE_EXPORT bool CreateDirectoryAndGetError(const FilePath& full_path,
+                                            File::Error* error);
+
+// Backward-compatible convenience method for the above.
+BASE_EXPORT bool CreateDirectory(const FilePath& full_path);
+
+// Returns the file size. Returns true on success.
+BASE_EXPORT bool GetFileSize(const FilePath& file_path, int64_t* file_size);
+
+// Sets |real_path| to |path| with symbolic links and junctions expanded.
+// On windows, make sure the path starts with a lettered drive.
+// |path| must reference a file.  Function will fail if |path| points to
+// a directory or to a nonexistent path.  On windows, this function will
+// fail if |path| is a junction or symlink that points to an empty file,
+// or if |real_path| would be longer than MAX_PATH characters.
+BASE_EXPORT bool NormalizeFilePath(const FilePath& path, FilePath* real_path);
+
+#if defined(OS_WIN)
+
+// Given a path in NT native form ("\Device\HarddiskVolumeXX\..."),
+// return in |drive_letter_path| the equivalent path that starts with
+// a drive letter ("C:\...").  Return false if no such path exists.
+BASE_EXPORT bool DevicePathToDriveLetterPath(const FilePath& device_path,
+                                             FilePath* drive_letter_path);
+
+// Given an existing file in |path|, set |real_path| to the path
+// in native NT format, of the form "\Device\HarddiskVolumeXX\..".
+// Returns false if the path can not be found. Empty files cannot
+// be resolved with this function.
+BASE_EXPORT bool NormalizeToNativeFilePath(const FilePath& path,
+                                           FilePath* nt_path);
+#endif
+
+// This function will return if the given file is a symlink or not.
+BASE_EXPORT bool IsLink(const FilePath& file_path);
+
+// Returns information about the given file path.
+BASE_EXPORT bool GetFileInfo(const FilePath& file_path, File::Info* info);
+
+// Sets the time of the last access and the time of the last modification.
+BASE_EXPORT bool TouchFile(const FilePath& path,
+                           const Time& last_accessed,
+                           const Time& last_modified);
+
+// Wrapper for fopen-like calls. Returns non-NULL FILE* on success. The
+// underlying file descriptor (POSIX) or handle (Windows) is unconditionally
+// configured to not be propagated to child processes.
+BASE_EXPORT FILE* OpenFile(const FilePath& filename, const char* mode);
+
+// Closes file opened by OpenFile. Returns true on success.
+BASE_EXPORT bool CloseFile(FILE* file);
+
+// Associates a standard FILE stream with an existing File. Note that this
+// functions take ownership of the existing File.
+BASE_EXPORT FILE* FileToFILE(File file, const char* mode);
+
+// Truncates an open file to end at the location of the current file pointer.
+// This is a cross-platform analog to Windows' SetEndOfFile() function.
+BASE_EXPORT bool TruncateFile(FILE* file);
+
+// Reads at most the given number of bytes from the file into the buffer.
+// Returns the number of read bytes, or -1 on error.
+BASE_EXPORT int ReadFile(const FilePath& filename, char* data, int max_size);
+
+// Writes the given buffer into the file, overwriting any data that was
+// previously there.  Returns the number of bytes written, or -1 on error.
+BASE_EXPORT int WriteFile(const FilePath& filename, const char* data,
+                          int size);
+
+#if defined(OS_POSIX) || defined(OS_FUCHSIA)
+// Appends |data| to |fd|. Does not close |fd| when done.  Returns true iff
+// |size| bytes of |data| were written to |fd|.
+BASE_EXPORT bool WriteFileDescriptor(const int fd, const char* data, int size);
+#endif
+
+// Appends |data| to |filename|.  Returns true iff |size| bytes of |data| were
+// written to |filename|.
+BASE_EXPORT bool AppendToFile(const FilePath& filename,
+                              const char* data,
+                              int size);
+
+// Gets the current working directory for the process.
+BASE_EXPORT bool GetCurrentDirectory(FilePath* path);
+
+// Sets the current working directory for the process.
+BASE_EXPORT bool SetCurrentDirectory(const FilePath& path);
+
+// Attempts to find a number that can be appended to the |path| to make it
+// unique. If |path| does not exist, 0 is returned.  If it fails to find such
+// a number, -1 is returned. If |suffix| is not empty, also checks the
+// existence of it with the given suffix.
+BASE_EXPORT int GetUniquePathNumber(const FilePath& path,
+                                    const FilePath::StringType& suffix);
+
+// Sets the given |fd| to non-blocking mode.
+// Returns true if it was able to set it in the non-blocking mode, otherwise
+// false.
+BASE_EXPORT bool SetNonBlocking(int fd);
+
+#if defined(OS_POSIX) || defined(OS_FUCHSIA)
+// Creates a non-blocking, close-on-exec pipe.
+// This creates a non-blocking pipe that is not intended to be shared with any
+// child process. This will be done atomically if the operating system supports
+// it. Returns true if it was able to create the pipe, otherwise false.
+BASE_EXPORT bool CreateLocalNonBlockingPipe(int fds[2]);
+
+// Sets the given |fd| to close-on-exec mode.
+// Returns true if it was able to set it in the close-on-exec mode, otherwise
+// false.
+BASE_EXPORT bool SetCloseOnExec(int fd);
+
+// Test that |path| can only be changed by a given user and members of
+// a given set of groups.
+// Specifically, test that all parts of |path| under (and including) |base|:
+// * Exist.
+// * Are owned by a specific user.
+// * Are not writable by all users.
+// * Are owned by a member of a given set of groups, or are not writable by
+//   their group.
+// * Are not symbolic links.
+// This is useful for checking that a config file is administrator-controlled.
+// |base| must contain |path|.
+BASE_EXPORT bool VerifyPathControlledByUser(const base::FilePath& base,
+                                            const base::FilePath& path,
+                                            uid_t owner_uid,
+                                            const std::set<gid_t>& group_gids);
+#endif  // defined(OS_POSIX) || defined(OS_FUCHSIA)
+
+#if defined(OS_MACOSX) && !defined(OS_IOS)
+// Is |path| writable only by a user with administrator privileges?
+// This function uses Mac OS conventions.  The super user is assumed to have
+// uid 0, and the administrator group is assumed to be named "admin".
+// Testing that |path|, and every parent directory including the root of
+// the filesystem, are owned by the superuser, controlled by the group
+// "admin", are not writable by all users, and contain no symbolic links.
+// Will return false if |path| does not exist.
+BASE_EXPORT bool VerifyPathControlledByAdmin(const base::FilePath& path);
+#endif  // defined(OS_MACOSX) && !defined(OS_IOS)
+
+// Returns the maximum length of path component on the volume containing
+// the directory |path|, in the number of FilePath::CharType, or -1 on failure.
+BASE_EXPORT int GetMaximumPathComponentLength(const base::FilePath& path);
+
+#if defined(OS_LINUX) || defined(OS_AIX)
+// Broad categories of file systems as returned by statfs() on Linux.
+enum FileSystemType {
+  FILE_SYSTEM_UNKNOWN,  // statfs failed.
+  FILE_SYSTEM_0,        // statfs.f_type == 0 means unknown, may indicate AFS.
+  FILE_SYSTEM_ORDINARY,       // on-disk filesystem like ext2
+  FILE_SYSTEM_NFS,
+  FILE_SYSTEM_SMB,
+  FILE_SYSTEM_CODA,
+  FILE_SYSTEM_MEMORY,         // in-memory file system
+  FILE_SYSTEM_CGROUP,         // cgroup control.
+  FILE_SYSTEM_OTHER,          // any other value.
+  FILE_SYSTEM_TYPE_COUNT
+};
+
+// Attempts determine the FileSystemType for |path|.
+// Returns false if |path| doesn't exist.
+BASE_EXPORT bool GetFileSystemType(const FilePath& path, FileSystemType* type);
+#endif
+
+#if defined(OS_POSIX) || defined(OS_FUCHSIA)
+// Get a temporary directory for shared memory files. The directory may depend
+// on whether the destination is intended for executable files, which in turn
+// depends on how /dev/shmem was mounted. As a result, you must supply whether
+// you intend to create executable shmem segments so this function can find
+// an appropriate location.
+BASE_EXPORT bool GetShmemTempDir(bool executable, FilePath* path);
+#endif
+
+// Internal --------------------------------------------------------------------
+
+namespace internal {
+
+// Same as Move but allows paths with traversal components.
+// Use only with extreme care.
+BASE_EXPORT bool MoveUnsafe(const FilePath& from_path,
+                            const FilePath& to_path);
+
+#if defined(OS_WIN)
+// Copy from_path to to_path recursively and then delete from_path recursively.
+// Returns true if all operations succeed.
+// This function simulates Move(), but unlike Move() it works across volumes.
+// This function is not transactional.
+BASE_EXPORT bool CopyAndDeleteDirectory(const FilePath& from_path,
+                                        const FilePath& to_path);
+#endif  // defined(OS_WIN)
+
+}  // namespace internal
+}  // namespace base
+
+#endif  // BASE_FILES_FILE_UTIL_H_
diff --git a/base/files/file_util_android.cc b/base/files/file_util_android.cc
new file mode 100644
index 0000000..b8b3b37
--- /dev/null
+++ b/base/files/file_util_android.cc
@@ -0,0 +1,16 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/files/file_util.h"
+
+#include "base/files/file_path.h"
+#include "base/path_service.h"
+
+namespace base {
+
+bool GetShmemTempDir(bool executable, base::FilePath* path) {
+  return PathService::Get(base::DIR_CACHE, path);
+}
+
+}  // namespace base
diff --git a/base/files/file_util_linux.cc b/base/files/file_util_linux.cc
new file mode 100644
index 0000000..b230fd9
--- /dev/null
+++ b/base/files/file_util_linux.cc
@@ -0,0 +1,63 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/files/file_util.h"
+
+#include <errno.h>
+#include <linux/magic.h>
+#include <sys/vfs.h>
+
+#include "base/files/file_path.h"
+
+namespace base {
+
+bool GetFileSystemType(const FilePath& path, FileSystemType* type) {
+  struct statfs statfs_buf;
+  if (statfs(path.value().c_str(), &statfs_buf) < 0) {
+    if (errno == ENOENT)
+      return false;
+    *type = FILE_SYSTEM_UNKNOWN;
+    return true;
+  }
+
+  // Not all possible |statfs_buf.f_type| values are in linux/magic.h.
+  // Missing values are copied from the statfs man page.
+  switch (statfs_buf.f_type) {
+    case 0:
+      *type = FILE_SYSTEM_0;
+      break;
+    case EXT2_SUPER_MAGIC:  // Also ext3 and ext4
+    case MSDOS_SUPER_MAGIC:
+    case REISERFS_SUPER_MAGIC:
+    case BTRFS_SUPER_MAGIC:
+    case 0x5346544E:  // NTFS
+    case 0x58465342:  // XFS
+    case 0x3153464A:  // JFS
+      *type = FILE_SYSTEM_ORDINARY;
+      break;
+    case NFS_SUPER_MAGIC:
+      *type = FILE_SYSTEM_NFS;
+      break;
+    case SMB_SUPER_MAGIC:
+    case 0xFF534D42:  // CIFS
+      *type = FILE_SYSTEM_SMB;
+      break;
+    case CODA_SUPER_MAGIC:
+      *type = FILE_SYSTEM_CODA;
+      break;
+    case HUGETLBFS_MAGIC:
+    case RAMFS_MAGIC:
+    case TMPFS_MAGIC:
+      *type = FILE_SYSTEM_MEMORY;
+      break;
+    case CGROUP_SUPER_MAGIC:
+      *type = FILE_SYSTEM_CGROUP;
+      break;
+    default:
+      *type = FILE_SYSTEM_OTHER;
+  }
+  return true;
+}
+
+}  // namespace base
diff --git a/base/files/file_util_mac.mm b/base/files/file_util_mac.mm
new file mode 100644
index 0000000..392fbce
--- /dev/null
+++ b/base/files/file_util_mac.mm
@@ -0,0 +1,66 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/files/file_util.h"
+
+#import <Foundation/Foundation.h>
+#include <copyfile.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include "base/files/file_path.h"
+#include "base/logging.h"
+#include "base/mac/foundation_util.h"
+#include "base/strings/string_util.h"
+#include "base/threading/thread_restrictions.h"
+
+namespace base {
+
+bool CopyFile(const FilePath& from_path, const FilePath& to_path) {
+  AssertBlockingAllowed();
+  if (from_path.ReferencesParent() || to_path.ReferencesParent())
+    return false;
+  return (copyfile(from_path.value().c_str(),
+                   to_path.value().c_str(), NULL, COPYFILE_DATA) == 0);
+}
+
+bool GetTempDir(base::FilePath* path) {
+  // In order to facilitate hermetic runs on macOS, first check
+  // $MAC_CHROMIUM_TMPDIR. We check this instead of $TMPDIR because external
+  // programs currently set $TMPDIR with no effect, but when we respect it
+  // directly it can cause crashes (like crbug.com/698759).
+  const char* env_tmpdir = getenv("MAC_CHROMIUM_TMPDIR");
+  if (env_tmpdir) {
+    DCHECK_LT(strlen(env_tmpdir), 50u)
+        << "too-long TMPDIR causes socket name length issues.";
+    *path = base::FilePath(env_tmpdir);
+    return true;
+  }
+
+  // If we didn't find it, fall back to the native function.
+  NSString* tmp = NSTemporaryDirectory();
+  if (tmp == nil)
+    return false;
+  *path = base::mac::NSStringToFilePath(tmp);
+  return true;
+}
+
+FilePath GetHomeDir() {
+  NSString* tmp = NSHomeDirectory();
+  if (tmp != nil) {
+    FilePath mac_home_dir = base::mac::NSStringToFilePath(tmp);
+    if (!mac_home_dir.empty())
+      return mac_home_dir;
+  }
+
+  // Fall back on temp dir if no home directory is defined.
+  FilePath rv;
+  if (GetTempDir(&rv))
+    return rv;
+
+  // Last resort.
+  return FilePath("/tmp");
+}
+
+}  // namespace base
diff --git a/base/files/file_util_posix.cc b/base/files/file_util_posix.cc
new file mode 100644
index 0000000..d8a0ae0
--- /dev/null
+++ b/base/files/file_util_posix.cc
@@ -0,0 +1,1086 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/files/file_util.h"
+
+#include <dirent.h>
+#include <errno.h>
+#include <fcntl.h>
+#include <libgen.h>
+#include <limits.h>
+#include <stddef.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/mman.h>
+#include <sys/param.h>
+#include <sys/stat.h>
+#include <sys/time.h>
+#include <sys/types.h>
+#include <time.h>
+#include <unistd.h>
+
+#include "base/base_switches.h"
+#include "base/command_line.h"
+#include "base/containers/stack.h"
+#include "base/environment.h"
+#include "base/files/file_enumerator.h"
+#include "base/files/file_path.h"
+#include "base/files/scoped_file.h"
+#include "base/logging.h"
+#include "base/macros.h"
+#include "base/memory/singleton.h"
+#include "base/path_service.h"
+#include "base/posix/eintr_wrapper.h"
+#include "base/stl_util.h"
+#include "base/strings/string_split.h"
+#include "base/strings/string_util.h"
+#include "base/strings/stringprintf.h"
+#include "base/strings/sys_string_conversions.h"
+#include "base/strings/utf_string_conversions.h"
+#include "base/sys_info.h"
+#include "base/threading/thread_restrictions.h"
+#include "base/time/time.h"
+#include "build/build_config.h"
+
+#if defined(OS_MACOSX)
+#include <AvailabilityMacros.h>
+#include "base/mac/foundation_util.h"
+#endif
+
+#if defined(OS_ANDROID)
+#include "base/android/content_uri_utils.h"
+#include "base/os_compat_android.h"
+#endif
+
+#if !defined(OS_IOS)
+#include <grp.h>
+#endif
+
+// We need to do this on AIX due to some inconsistencies in how AIX
+// handles XOPEN_SOURCE and ALL_SOURCE.
+#if defined(OS_AIX)
+extern "C" char* mkdtemp(char* path);
+#endif
+
+namespace base {
+
+namespace {
+
+#if defined(OS_BSD) || defined(OS_MACOSX) || defined(OS_NACL) || \
+    defined(OS_ANDROID) && __ANDROID_API__ < 21
+int CallStat(const char* path, stat_wrapper_t* sb) {
+  AssertBlockingAllowed();
+  return stat(path, sb);
+}
+int CallLstat(const char* path, stat_wrapper_t* sb) {
+  AssertBlockingAllowed();
+  return lstat(path, sb);
+}
+#else
+int CallStat(const char* path, stat_wrapper_t* sb) {
+  AssertBlockingAllowed();
+  return stat64(path, sb);
+}
+int CallLstat(const char* path, stat_wrapper_t* sb) {
+  AssertBlockingAllowed();
+  return lstat64(path, sb);
+}
+#endif
+
+#if !defined(OS_NACL_NONSFI)
+// Helper for VerifyPathControlledByUser.
+bool VerifySpecificPathControlledByUser(const FilePath& path,
+                                        uid_t owner_uid,
+                                        const std::set<gid_t>& group_gids) {
+  stat_wrapper_t stat_info;
+  if (CallLstat(path.value().c_str(), &stat_info) != 0) {
+    DPLOG(ERROR) << "Failed to get information on path "
+                 << path.value();
+    return false;
+  }
+
+  if (S_ISLNK(stat_info.st_mode)) {
+    DLOG(ERROR) << "Path " << path.value() << " is a symbolic link.";
+    return false;
+  }
+
+  if (stat_info.st_uid != owner_uid) {
+    DLOG(ERROR) << "Path " << path.value() << " is owned by the wrong user.";
+    return false;
+  }
+
+  if ((stat_info.st_mode & S_IWGRP) &&
+      !ContainsKey(group_gids, stat_info.st_gid)) {
+    DLOG(ERROR) << "Path " << path.value()
+                << " is writable by an unprivileged group.";
+    return false;
+  }
+
+  if (stat_info.st_mode & S_IWOTH) {
+    DLOG(ERROR) << "Path " << path.value() << " is writable by any user.";
+    return false;
+  }
+
+  return true;
+}
+
+std::string TempFileName() {
+#if defined(OS_MACOSX)
+  return StringPrintf(".%s.XXXXXX", base::mac::BaseBundleID());
+#endif
+
+#if defined(GOOGLE_CHROME_BUILD)
+  return std::string(".com.google.Chrome.XXXXXX");
+#else
+  return std::string(".org.chromium.Chromium.XXXXXX");
+#endif
+}
+
+#if defined(OS_LINUX) || defined(OS_AIX)
+// Determine if /dev/shm files can be mapped and then mprotect'd PROT_EXEC.
+// This depends on the mount options used for /dev/shm, which vary among
+// different Linux distributions and possibly local configuration.  It also
+// depends on details of kernel--ChromeOS uses the noexec option for /dev/shm
+// but its kernel allows mprotect with PROT_EXEC anyway.
+bool DetermineDevShmExecutable() {
+  bool result = false;
+  FilePath path;
+
+  ScopedFD fd(
+      CreateAndOpenFdForTemporaryFileInDir(FilePath("/dev/shm"), &path));
+  if (fd.is_valid()) {
+    DeleteFile(path, false);
+    long sysconf_result = sysconf(_SC_PAGESIZE);
+    CHECK_GE(sysconf_result, 0);
+    size_t pagesize = static_cast<size_t>(sysconf_result);
+    CHECK_GE(sizeof(pagesize), sizeof(sysconf_result));
+    void* mapping = mmap(nullptr, pagesize, PROT_READ, MAP_SHARED, fd.get(), 0);
+    if (mapping != MAP_FAILED) {
+      if (mprotect(mapping, pagesize, PROT_READ | PROT_EXEC) == 0)
+        result = true;
+      munmap(mapping, pagesize);
+    }
+  }
+  return result;
+}
+#endif  // defined(OS_LINUX) || defined(OS_AIX)
+
+bool AdvanceEnumeratorWithStat(FileEnumerator* traversal,
+                               FilePath* out_next_path,
+                               struct stat* out_next_stat) {
+  DCHECK(out_next_path);
+  DCHECK(out_next_stat);
+  *out_next_path = traversal->Next();
+  if (out_next_path->empty())
+    return false;
+
+  *out_next_stat = traversal->GetInfo().stat();
+  return true;
+}
+
+bool CopyFileContents(File* infile, File* outfile) {
+  static constexpr size_t kBufferSize = 32768;
+  std::vector<char> buffer(kBufferSize);
+
+  for (;;) {
+    ssize_t bytes_read = infile->ReadAtCurrentPos(buffer.data(), buffer.size());
+    if (bytes_read < 0)
+      return false;
+    if (bytes_read == 0)
+      return true;
+    // Allow for partial writes
+    ssize_t bytes_written_per_read = 0;
+    do {
+      ssize_t bytes_written_partial = outfile->WriteAtCurrentPos(
+          &buffer[bytes_written_per_read], bytes_read - bytes_written_per_read);
+      if (bytes_written_partial < 0)
+        return false;
+
+      bytes_written_per_read += bytes_written_partial;
+    } while (bytes_written_per_read < bytes_read);
+  }
+
+  NOTREACHED();
+  return false;
+}
+
+bool DoCopyDirectory(const FilePath& from_path,
+                     const FilePath& to_path,
+                     bool recursive,
+                     bool open_exclusive) {
+  AssertBlockingAllowed();
+  // Some old callers of CopyDirectory want it to support wildcards.
+  // After some discussion, we decided to fix those callers.
+  // Break loudly here if anyone tries to do this.
+  DCHECK(to_path.value().find('*') == std::string::npos);
+  DCHECK(from_path.value().find('*') == std::string::npos);
+
+  if (from_path.value().size() >= PATH_MAX) {
+    return false;
+  }
+
+  // This function does not properly handle destinations within the source
+  FilePath real_to_path = to_path;
+  if (PathExists(real_to_path))
+    real_to_path = MakeAbsoluteFilePath(real_to_path);
+  else
+    real_to_path = MakeAbsoluteFilePath(real_to_path.DirName());
+  if (real_to_path.empty())
+    return false;
+
+  FilePath real_from_path = MakeAbsoluteFilePath(from_path);
+  if (real_from_path.empty())
+    return false;
+  if (real_to_path == real_from_path || real_from_path.IsParent(real_to_path))
+    return false;
+
+  int traverse_type = FileEnumerator::FILES | FileEnumerator::SHOW_SYM_LINKS;
+  if (recursive)
+    traverse_type |= FileEnumerator::DIRECTORIES;
+  FileEnumerator traversal(from_path, recursive, traverse_type);
+
+  // We have to mimic windows behavior here. |to_path| may not exist yet,
+  // start the loop with |to_path|.
+  struct stat from_stat;
+  FilePath current = from_path;
+  if (stat(from_path.value().c_str(), &from_stat) < 0) {
+    DPLOG(ERROR) << "CopyDirectory() couldn't stat source directory: "
+                 << from_path.value();
+    return false;
+  }
+  FilePath from_path_base = from_path;
+  if (recursive && DirectoryExists(to_path)) {
+    // If the destination already exists and is a directory, then the
+    // top level of source needs to be copied.
+    from_path_base = from_path.DirName();
+  }
+
+  // The Windows version of this function assumes that non-recursive calls
+  // will always have a directory for from_path.
+  // TODO(maruel): This is not necessary anymore.
+  DCHECK(recursive || S_ISDIR(from_stat.st_mode));
+
+  do {
+    // current is the source path, including from_path, so append
+    // the suffix after from_path to to_path to create the target_path.
+    FilePath target_path(to_path);
+    if (from_path_base != current &&
+        !from_path_base.AppendRelativePath(current, &target_path)) {
+      return false;
+    }
+
+    if (S_ISDIR(from_stat.st_mode)) {
+      mode_t mode = (from_stat.st_mode & 01777) | S_IRUSR | S_IXUSR | S_IWUSR;
+      if (mkdir(target_path.value().c_str(), mode) == 0)
+        continue;
+      if (errno == EEXIST && !open_exclusive)
+        continue;
+
+      DPLOG(ERROR) << "CopyDirectory() couldn't create directory: "
+                   << target_path.value();
+      return false;
+    }
+
+    if (!S_ISREG(from_stat.st_mode)) {
+      DLOG(WARNING) << "CopyDirectory() skipping non-regular file: "
+                    << current.value();
+      continue;
+    }
+
+    // Add O_NONBLOCK so we can't block opening a pipe.
+    File infile(open(current.value().c_str(), O_RDONLY | O_NONBLOCK));
+    if (!infile.IsValid()) {
+      DPLOG(ERROR) << "CopyDirectory() couldn't open file: " << current.value();
+      return false;
+    }
+
+    struct stat stat_at_use;
+    if (fstat(infile.GetPlatformFile(), &stat_at_use) < 0) {
+      DPLOG(ERROR) << "CopyDirectory() couldn't stat file: " << current.value();
+      return false;
+    }
+
+    if (!S_ISREG(stat_at_use.st_mode)) {
+      DLOG(WARNING) << "CopyDirectory() skipping non-regular file: "
+                    << current.value();
+      continue;
+    }
+
+    int open_flags = O_WRONLY | O_CREAT;
+    // If |open_exclusive| is set then we should always create the destination
+    // file, so O_NONBLOCK is not necessary to ensure we don't block on the
+    // open call for the target file below, and since the destination will
+    // always be a regular file it wouldn't affect the behavior of the
+    // subsequent write calls anyway.
+    if (open_exclusive)
+      open_flags |= O_EXCL;
+    else
+      open_flags |= O_TRUNC | O_NONBLOCK;
+    // Each platform has different default file opening modes for CopyFile which
+    // we want to replicate here. On OS X, we use copyfile(3) which takes the
+    // source file's permissions into account. On the other platforms, we just
+    // use the base::File constructor. On Chrome OS, base::File uses a different
+    // set of permissions than it does on other POSIX platforms.
+#if defined(OS_MACOSX)
+    int mode = 0600 | (stat_at_use.st_mode & 0177);
+#elif defined(OS_CHROMEOS)
+    int mode = 0644;
+#else
+    int mode = 0600;
+#endif
+    File outfile(open(target_path.value().c_str(), open_flags, mode));
+    if (!outfile.IsValid()) {
+      DPLOG(ERROR) << "CopyDirectory() couldn't create file: "
+                   << target_path.value();
+      return false;
+    }
+
+    if (!CopyFileContents(&infile, &outfile)) {
+      DLOG(ERROR) << "CopyDirectory() couldn't copy file: " << current.value();
+      return false;
+    }
+  } while (AdvanceEnumeratorWithStat(&traversal, &current, &from_stat));
+
+  return true;
+}
+#endif  // !defined(OS_NACL_NONSFI)
+
+#if !defined(OS_MACOSX)
+// Appends |mode_char| to |mode| before the optional character set encoding; see
+// https://www.gnu.org/software/libc/manual/html_node/Opening-Streams.html for
+// details.
+std::string AppendModeCharacter(StringPiece mode, char mode_char) {
+  std::string result(mode.as_string());
+  size_t comma_pos = result.find(',');
+  result.insert(comma_pos == std::string::npos ? result.length() : comma_pos, 1,
+                mode_char);
+  return result;
+}
+#endif
+
+}  // namespace
+
+#if !defined(OS_NACL_NONSFI)
+FilePath MakeAbsoluteFilePath(const FilePath& input) {
+  AssertBlockingAllowed();
+  char full_path[PATH_MAX];
+  if (realpath(input.value().c_str(), full_path) == nullptr)
+    return FilePath();
+  return FilePath(full_path);
+}
+
+// TODO(erikkay): The Windows version of this accepts paths like "foo/bar/*"
+// which works both with and without the recursive flag.  I'm not sure we need
+// that functionality. If not, remove from file_util_win.cc, otherwise add it
+// here.
+bool DeleteFile(const FilePath& path, bool recursive) {
+  AssertBlockingAllowed();
+  const char* path_str = path.value().c_str();
+  stat_wrapper_t file_info;
+  if (CallLstat(path_str, &file_info) != 0) {
+    // The Windows version defines this condition as success.
+    return (errno == ENOENT || errno == ENOTDIR);
+  }
+  if (!S_ISDIR(file_info.st_mode))
+    return (unlink(path_str) == 0);
+  if (!recursive)
+    return (rmdir(path_str) == 0);
+
+  bool success = true;
+  stack<std::string> directories;
+  directories.push(path.value());
+  FileEnumerator traversal(path, true,
+      FileEnumerator::FILES | FileEnumerator::DIRECTORIES |
+      FileEnumerator::SHOW_SYM_LINKS);
+  for (FilePath current = traversal.Next(); !current.empty();
+       current = traversal.Next()) {
+    if (traversal.GetInfo().IsDirectory())
+      directories.push(current.value());
+    else
+      success &= (unlink(current.value().c_str()) == 0);
+  }
+
+  while (!directories.empty()) {
+    FilePath dir = FilePath(directories.top());
+    directories.pop();
+    success &= (rmdir(dir.value().c_str()) == 0);
+  }
+  return success;
+}
+
+bool ReplaceFile(const FilePath& from_path,
+                 const FilePath& to_path,
+                 File::Error* error) {
+  AssertBlockingAllowed();
+  if (rename(from_path.value().c_str(), to_path.value().c_str()) == 0)
+    return true;
+  if (error)
+    *error = File::GetLastFileError();
+  return false;
+}
+
+bool CopyDirectory(const FilePath& from_path,
+                   const FilePath& to_path,
+                   bool recursive) {
+  return DoCopyDirectory(from_path, to_path, recursive, false);
+}
+
+bool CopyDirectoryExcl(const FilePath& from_path,
+                       const FilePath& to_path,
+                       bool recursive) {
+  return DoCopyDirectory(from_path, to_path, recursive, true);
+}
+#endif  // !defined(OS_NACL_NONSFI)
+
+bool CreateLocalNonBlockingPipe(int fds[2]) {
+#if defined(OS_LINUX)
+  return pipe2(fds, O_CLOEXEC | O_NONBLOCK) == 0;
+#else
+  int raw_fds[2];
+  if (pipe(raw_fds) != 0)
+    return false;
+  ScopedFD fd_out(raw_fds[0]);
+  ScopedFD fd_in(raw_fds[1]);
+  if (!SetCloseOnExec(fd_out.get()))
+    return false;
+  if (!SetCloseOnExec(fd_in.get()))
+    return false;
+  if (!SetNonBlocking(fd_out.get()))
+    return false;
+  if (!SetNonBlocking(fd_in.get()))
+    return false;
+  fds[0] = fd_out.release();
+  fds[1] = fd_in.release();
+  return true;
+#endif
+}
+
+bool SetNonBlocking(int fd) {
+  const int flags = fcntl(fd, F_GETFL);
+  if (flags == -1)
+    return false;
+  if (flags & O_NONBLOCK)
+    return true;
+  if (HANDLE_EINTR(fcntl(fd, F_SETFL, flags | O_NONBLOCK)) == -1)
+    return false;
+  return true;
+}
+
+bool SetCloseOnExec(int fd) {
+#if defined(OS_NACL_NONSFI)
+  const int flags = 0;
+#else
+  const int flags = fcntl(fd, F_GETFD);
+  if (flags == -1)
+    return false;
+  if (flags & FD_CLOEXEC)
+    return true;
+#endif  // defined(OS_NACL_NONSFI)
+  if (HANDLE_EINTR(fcntl(fd, F_SETFD, flags | FD_CLOEXEC)) == -1)
+    return false;
+  return true;
+}
+
+bool PathExists(const FilePath& path) {
+  AssertBlockingAllowed();
+#if defined(OS_ANDROID)
+  if (path.IsContentUri()) {
+    return ContentUriExists(path);
+  }
+#endif
+  return access(path.value().c_str(), F_OK) == 0;
+}
+
+#if !defined(OS_NACL_NONSFI)
+bool PathIsWritable(const FilePath& path) {
+  AssertBlockingAllowed();
+  return access(path.value().c_str(), W_OK) == 0;
+}
+#endif  // !defined(OS_NACL_NONSFI)
+
+bool DirectoryExists(const FilePath& path) {
+  AssertBlockingAllowed();
+  stat_wrapper_t file_info;
+  if (CallStat(path.value().c_str(), &file_info) != 0)
+    return false;
+  return S_ISDIR(file_info.st_mode);
+}
+
+bool ReadFromFD(int fd, char* buffer, size_t bytes) {
+  size_t total_read = 0;
+  while (total_read < bytes) {
+    ssize_t bytes_read =
+        HANDLE_EINTR(read(fd, buffer + total_read, bytes - total_read));
+    if (bytes_read <= 0)
+      break;
+    total_read += bytes_read;
+  }
+  return total_read == bytes;
+}
+
+#if !defined(OS_NACL_NONSFI)
+
+int CreateAndOpenFdForTemporaryFileInDir(const FilePath& directory,
+                                         FilePath* path) {
+  AssertBlockingAllowed();  // For call to mkstemp().
+  *path = directory.Append(TempFileName());
+  const std::string& tmpdir_string = path->value();
+  // this should be OK since mkstemp just replaces characters in place
+  char* buffer = const_cast<char*>(tmpdir_string.c_str());
+
+  return HANDLE_EINTR(mkstemp(buffer));
+}
+
+#if !defined(OS_FUCHSIA)
+bool CreateSymbolicLink(const FilePath& target_path,
+                        const FilePath& symlink_path) {
+  DCHECK(!symlink_path.empty());
+  DCHECK(!target_path.empty());
+  return ::symlink(target_path.value().c_str(),
+                   symlink_path.value().c_str()) != -1;
+}
+
+bool ReadSymbolicLink(const FilePath& symlink_path, FilePath* target_path) {
+  DCHECK(!symlink_path.empty());
+  DCHECK(target_path);
+  char buf[PATH_MAX];
+  ssize_t count = ::readlink(symlink_path.value().c_str(), buf, arraysize(buf));
+
+  if (count <= 0) {
+    target_path->clear();
+    return false;
+  }
+
+  *target_path = FilePath(FilePath::StringType(buf, count));
+  return true;
+}
+
+bool GetPosixFilePermissions(const FilePath& path, int* mode) {
+  AssertBlockingAllowed();
+  DCHECK(mode);
+
+  stat_wrapper_t file_info;
+  // Uses stat(), because on symbolic link, lstat() does not return valid
+  // permission bits in st_mode
+  if (CallStat(path.value().c_str(), &file_info) != 0)
+    return false;
+
+  *mode = file_info.st_mode & FILE_PERMISSION_MASK;
+  return true;
+}
+
+bool SetPosixFilePermissions(const FilePath& path,
+                             int mode) {
+  AssertBlockingAllowed();
+  DCHECK_EQ(mode & ~FILE_PERMISSION_MASK, 0);
+
+  // Calls stat() so that we can preserve the higher bits like S_ISGID.
+  stat_wrapper_t stat_buf;
+  if (CallStat(path.value().c_str(), &stat_buf) != 0)
+    return false;
+
+  // Clears the existing permission bits, and adds the new ones.
+  mode_t updated_mode_bits = stat_buf.st_mode & ~FILE_PERMISSION_MASK;
+  updated_mode_bits |= mode & FILE_PERMISSION_MASK;
+
+  if (HANDLE_EINTR(chmod(path.value().c_str(), updated_mode_bits)) != 0)
+    return false;
+
+  return true;
+}
+
+bool ExecutableExistsInPath(Environment* env,
+                            const FilePath::StringType& executable) {
+  std::string path;
+  if (!env->GetVar("PATH", &path)) {
+    LOG(ERROR) << "No $PATH variable. Assuming no " << executable << ".";
+    return false;
+  }
+
+  for (const StringPiece& cur_path :
+       SplitStringPiece(path, ":", KEEP_WHITESPACE, SPLIT_WANT_NONEMPTY)) {
+    FilePath file(cur_path);
+    int permissions;
+    if (GetPosixFilePermissions(file.Append(executable), &permissions) &&
+        (permissions & FILE_PERMISSION_EXECUTE_BY_USER))
+      return true;
+  }
+  return false;
+}
+
+#endif  // !OS_FUCHSIA
+
+#if !defined(OS_MACOSX)
+// This is implemented in file_util_mac.mm for Mac.
+bool GetTempDir(FilePath* path) {
+  const char* tmp = getenv("TMPDIR");
+  if (tmp) {
+    *path = FilePath(tmp);
+    return true;
+  }
+
+#if defined(OS_ANDROID)
+  return PathService::Get(DIR_CACHE, path);
+#else
+  *path = FilePath("/tmp");
+  return true;
+#endif
+}
+#endif  // !defined(OS_MACOSX)
+
+#if !defined(OS_MACOSX)  // Mac implementation is in file_util_mac.mm.
+FilePath GetHomeDir() {
+#if defined(OS_CHROMEOS)
+  if (SysInfo::IsRunningOnChromeOS()) {
+    // On Chrome OS chrome::DIR_USER_DATA is overridden with a primary user
+    // homedir once it becomes available. Return / as the safe option.
+    return FilePath("/");
+  }
+#endif
+
+  const char* home_dir = getenv("HOME");
+  if (home_dir && home_dir[0])
+    return FilePath(home_dir);
+
+#if defined(OS_ANDROID)
+  DLOG(WARNING) << "OS_ANDROID: Home directory lookup not yet implemented.";
+#endif
+
+  FilePath rv;
+  if (GetTempDir(&rv))
+    return rv;
+
+  // Last resort.
+  return FilePath("/tmp");
+}
+#endif  // !defined(OS_MACOSX)
+
+bool CreateTemporaryFile(FilePath* path) {
+  AssertBlockingAllowed();  // For call to close().
+  FilePath directory;
+  if (!GetTempDir(&directory))
+    return false;
+  int fd = CreateAndOpenFdForTemporaryFileInDir(directory, path);
+  if (fd < 0)
+    return false;
+  close(fd);
+  return true;
+}
+
+FILE* CreateAndOpenTemporaryFileInDir(const FilePath& dir, FilePath* path) {
+  int fd = CreateAndOpenFdForTemporaryFileInDir(dir, path);
+  if (fd < 0)
+    return nullptr;
+
+  FILE* file = fdopen(fd, "a+");
+  if (!file)
+    close(fd);
+  return file;
+}
+
+bool CreateTemporaryFileInDir(const FilePath& dir, FilePath* temp_file) {
+  AssertBlockingAllowed();  // For call to close().
+  int fd = CreateAndOpenFdForTemporaryFileInDir(dir, temp_file);
+  return ((fd >= 0) && !IGNORE_EINTR(close(fd)));
+}
+
+static bool CreateTemporaryDirInDirImpl(const FilePath& base_dir,
+                                        const FilePath::StringType& name_tmpl,
+                                        FilePath* new_dir) {
+  AssertBlockingAllowed();  // For call to mkdtemp().
+  DCHECK(name_tmpl.find("XXXXXX") != FilePath::StringType::npos)
+      << "Directory name template must contain \"XXXXXX\".";
+
+  FilePath sub_dir = base_dir.Append(name_tmpl);
+  std::string sub_dir_string = sub_dir.value();
+
+  // this should be OK since mkdtemp just replaces characters in place
+  char* buffer = const_cast<char*>(sub_dir_string.c_str());
+  char* dtemp = mkdtemp(buffer);
+  if (!dtemp) {
+    DPLOG(ERROR) << "mkdtemp";
+    return false;
+  }
+  *new_dir = FilePath(dtemp);
+  return true;
+}
+
+bool CreateTemporaryDirInDir(const FilePath& base_dir,
+                             const FilePath::StringType& prefix,
+                             FilePath* new_dir) {
+  FilePath::StringType mkdtemp_template = prefix;
+  mkdtemp_template.append(FILE_PATH_LITERAL("XXXXXX"));
+  return CreateTemporaryDirInDirImpl(base_dir, mkdtemp_template, new_dir);
+}
+
+bool CreateNewTempDirectory(const FilePath::StringType& prefix,
+                            FilePath* new_temp_path) {
+  FilePath tmpdir;
+  if (!GetTempDir(&tmpdir))
+    return false;
+
+  return CreateTemporaryDirInDirImpl(tmpdir, TempFileName(), new_temp_path);
+}
+
+bool CreateDirectoryAndGetError(const FilePath& full_path,
+                                File::Error* error) {
+  AssertBlockingAllowed();  // For call to mkdir().
+  std::vector<FilePath> subpaths;
+
+  // Collect a list of all parent directories.
+  FilePath last_path = full_path;
+  subpaths.push_back(full_path);
+  for (FilePath path = full_path.DirName();
+       path.value() != last_path.value(); path = path.DirName()) {
+    subpaths.push_back(path);
+    last_path = path;
+  }
+
+  // Iterate through the parents and create the missing ones.
+  for (std::vector<FilePath>::reverse_iterator i = subpaths.rbegin();
+       i != subpaths.rend(); ++i) {
+    if (DirectoryExists(*i))
+      continue;
+    if (mkdir(i->value().c_str(), 0700) == 0)
+      continue;
+    // Mkdir failed, but it might have failed with EEXIST, or some other error
+    // due to the the directory appearing out of thin air. This can occur if
+    // two processes are trying to create the same file system tree at the same
+    // time. Check to see if it exists and make sure it is a directory.
+    int saved_errno = errno;
+    if (!DirectoryExists(*i)) {
+      if (error)
+        *error = File::OSErrorToFileError(saved_errno);
+      return false;
+    }
+  }
+  return true;
+}
+
+bool NormalizeFilePath(const FilePath& path, FilePath* normalized_path) {
+  FilePath real_path_result = MakeAbsoluteFilePath(path);
+  if (real_path_result.empty())
+    return false;
+
+  // To be consistant with windows, fail if |real_path_result| is a
+  // directory.
+  if (DirectoryExists(real_path_result))
+    return false;
+
+  *normalized_path = real_path_result;
+  return true;
+}
+
+// TODO(rkc): Refactor GetFileInfo and FileEnumerator to handle symlinks
+// correctly. http://code.google.com/p/chromium-os/issues/detail?id=15948
+bool IsLink(const FilePath& file_path) {
+  stat_wrapper_t st;
+  // If we can't lstat the file, it's safe to assume that the file won't at
+  // least be a 'followable' link.
+  if (CallLstat(file_path.value().c_str(), &st) != 0)
+    return false;
+  return S_ISLNK(st.st_mode);
+}
+
+bool GetFileInfo(const FilePath& file_path, File::Info* results) {
+  stat_wrapper_t file_info;
+#if defined(OS_ANDROID)
+  if (file_path.IsContentUri()) {
+    File file = OpenContentUriForRead(file_path);
+    if (!file.IsValid())
+      return false;
+    return file.GetInfo(results);
+  } else {
+#endif  // defined(OS_ANDROID)
+    if (CallStat(file_path.value().c_str(), &file_info) != 0)
+      return false;
+#if defined(OS_ANDROID)
+  }
+#endif  // defined(OS_ANDROID)
+
+  results->FromStat(file_info);
+  return true;
+}
+#endif  // !defined(OS_NACL_NONSFI)
+
+FILE* OpenFile(const FilePath& filename, const char* mode) {
+  // 'e' is unconditionally added below, so be sure there is not one already
+  // present before a comma in |mode|.
+  DCHECK(
+      strchr(mode, 'e') == nullptr ||
+      (strchr(mode, ',') != nullptr && strchr(mode, 'e') > strchr(mode, ',')));
+  AssertBlockingAllowed();
+  FILE* result = nullptr;
+#if defined(OS_MACOSX)
+  // macOS does not provide a mode character to set O_CLOEXEC; see
+  // https://developer.apple.com/legacy/library/documentation/Darwin/Reference/ManPages/man3/fopen.3.html.
+  const char* the_mode = mode;
+#else
+  std::string mode_with_e(AppendModeCharacter(mode, 'e'));
+  const char* the_mode = mode_with_e.c_str();
+#endif
+  do {
+    result = fopen(filename.value().c_str(), the_mode);
+  } while (!result && errno == EINTR);
+#if defined(OS_MACOSX)
+  // Mark the descriptor as close-on-exec.
+  if (result)
+    SetCloseOnExec(fileno(result));
+#endif
+  return result;
+}
+
+// NaCl doesn't implement system calls to open files directly.
+#if !defined(OS_NACL)
+FILE* FileToFILE(File file, const char* mode) {
+  FILE* stream = fdopen(file.GetPlatformFile(), mode);
+  if (stream)
+    file.TakePlatformFile();
+  return stream;
+}
+#endif  // !defined(OS_NACL)
+
+int ReadFile(const FilePath& filename, char* data, int max_size) {
+  AssertBlockingAllowed();
+  int fd = HANDLE_EINTR(open(filename.value().c_str(), O_RDONLY));
+  if (fd < 0)
+    return -1;
+
+  ssize_t bytes_read = HANDLE_EINTR(read(fd, data, max_size));
+  if (IGNORE_EINTR(close(fd)) < 0)
+    return -1;
+  return bytes_read;
+}
+
+int WriteFile(const FilePath& filename, const char* data, int size) {
+  AssertBlockingAllowed();
+  int fd = HANDLE_EINTR(creat(filename.value().c_str(), 0666));
+  if (fd < 0)
+    return -1;
+
+  int bytes_written = WriteFileDescriptor(fd, data, size) ? size : -1;
+  if (IGNORE_EINTR(close(fd)) < 0)
+    return -1;
+  return bytes_written;
+}
+
+bool WriteFileDescriptor(const int fd, const char* data, int size) {
+  // Allow for partial writes.
+  ssize_t bytes_written_total = 0;
+  for (ssize_t bytes_written_partial = 0; bytes_written_total < size;
+       bytes_written_total += bytes_written_partial) {
+    bytes_written_partial =
+        HANDLE_EINTR(write(fd, data + bytes_written_total,
+                           size - bytes_written_total));
+    if (bytes_written_partial < 0)
+      return false;
+  }
+
+  return true;
+}
+
+#if !defined(OS_NACL_NONSFI)
+
+bool AppendToFile(const FilePath& filename, const char* data, int size) {
+  AssertBlockingAllowed();
+  bool ret = true;
+  int fd = HANDLE_EINTR(open(filename.value().c_str(), O_WRONLY | O_APPEND));
+  if (fd < 0) {
+    VPLOG(1) << "Unable to create file " << filename.value();
+    return false;
+  }
+
+  // This call will either write all of the data or return false.
+  if (!WriteFileDescriptor(fd, data, size)) {
+    VPLOG(1) << "Error while writing to file " << filename.value();
+    ret = false;
+  }
+
+  if (IGNORE_EINTR(close(fd)) < 0) {
+    VPLOG(1) << "Error while closing file " << filename.value();
+    return false;
+  }
+
+  return ret;
+}
+
+bool GetCurrentDirectory(FilePath* dir) {
+  // getcwd can return ENOENT, which implies it checks against the disk.
+  AssertBlockingAllowed();
+
+  char system_buffer[PATH_MAX] = "";
+  if (!getcwd(system_buffer, sizeof(system_buffer))) {
+    NOTREACHED();
+    return false;
+  }
+  *dir = FilePath(system_buffer);
+  return true;
+}
+
+bool SetCurrentDirectory(const FilePath& path) {
+  AssertBlockingAllowed();
+  return chdir(path.value().c_str()) == 0;
+}
+
+bool VerifyPathControlledByUser(const FilePath& base,
+                                const FilePath& path,
+                                uid_t owner_uid,
+                                const std::set<gid_t>& group_gids) {
+  if (base != path && !base.IsParent(path)) {
+     DLOG(ERROR) << "|base| must be a subdirectory of |path|.  base = \""
+                 << base.value() << "\", path = \"" << path.value() << "\"";
+     return false;
+  }
+
+  std::vector<FilePath::StringType> base_components;
+  std::vector<FilePath::StringType> path_components;
+
+  base.GetComponents(&base_components);
+  path.GetComponents(&path_components);
+
+  std::vector<FilePath::StringType>::const_iterator ib, ip;
+  for (ib = base_components.begin(), ip = path_components.begin();
+       ib != base_components.end(); ++ib, ++ip) {
+    // |base| must be a subpath of |path|, so all components should match.
+    // If these CHECKs fail, look at the test that base is a parent of
+    // path at the top of this function.
+    DCHECK(ip != path_components.end());
+    DCHECK(*ip == *ib);
+  }
+
+  FilePath current_path = base;
+  if (!VerifySpecificPathControlledByUser(current_path, owner_uid, group_gids))
+    return false;
+
+  for (; ip != path_components.end(); ++ip) {
+    current_path = current_path.Append(*ip);
+    if (!VerifySpecificPathControlledByUser(
+            current_path, owner_uid, group_gids))
+      return false;
+  }
+  return true;
+}
+
+#if defined(OS_MACOSX) && !defined(OS_IOS)
+bool VerifyPathControlledByAdmin(const FilePath& path) {
+  const unsigned kRootUid = 0;
+  const FilePath kFileSystemRoot("/");
+
+  // The name of the administrator group on mac os.
+  const char* const kAdminGroupNames[] = {
+    "admin",
+    "wheel"
+  };
+
+  // Reading the groups database may touch the file system.
+  AssertBlockingAllowed();
+
+  std::set<gid_t> allowed_group_ids;
+  for (int i = 0, ie = arraysize(kAdminGroupNames); i < ie; ++i) {
+    struct group *group_record = getgrnam(kAdminGroupNames[i]);
+    if (!group_record) {
+      DPLOG(ERROR) << "Could not get the group ID of group \""
+                   << kAdminGroupNames[i] << "\".";
+      continue;
+    }
+
+    allowed_group_ids.insert(group_record->gr_gid);
+  }
+
+  return VerifyPathControlledByUser(
+      kFileSystemRoot, path, kRootUid, allowed_group_ids);
+}
+#endif  // defined(OS_MACOSX) && !defined(OS_IOS)
+
+int GetMaximumPathComponentLength(const FilePath& path) {
+#if defined(OS_FUCHSIA)
+  // Return a value we do not expect anyone ever to reach, but which is small
+  // enough to guard against e.g. bugs causing multi-megabyte paths.
+  return 1024;
+#else
+  AssertBlockingAllowed();
+  return pathconf(path.value().c_str(), _PC_NAME_MAX);
+#endif
+}
+
+#if !defined(OS_ANDROID)
+// This is implemented in file_util_android.cc for that platform.
+bool GetShmemTempDir(bool executable, FilePath* path) {
+#if defined(OS_LINUX) || defined(OS_AIX)
+  bool disable_dev_shm = false;
+#if !defined(OS_CHROMEOS)
+  disable_dev_shm = CommandLine::ForCurrentProcess()->HasSwitch(
+      switches::kDisableDevShmUsage);
+#endif
+  bool use_dev_shm = true;
+  if (executable) {
+    static const bool s_dev_shm_executable = DetermineDevShmExecutable();
+    use_dev_shm = s_dev_shm_executable;
+  }
+  if (use_dev_shm && !disable_dev_shm) {
+    *path = FilePath("/dev/shm");
+    return true;
+  }
+#endif  // defined(OS_LINUX) || defined(OS_AIX)
+  return GetTempDir(path);
+}
+#endif  // !defined(OS_ANDROID)
+
+#if !defined(OS_MACOSX)
+// Mac has its own implementation, this is for all other Posix systems.
+bool CopyFile(const FilePath& from_path, const FilePath& to_path) {
+  AssertBlockingAllowed();
+  File infile;
+#if defined(OS_ANDROID)
+  if (from_path.IsContentUri()) {
+    infile = OpenContentUriForRead(from_path);
+  } else {
+    infile = File(from_path, File::FLAG_OPEN | File::FLAG_READ);
+  }
+#else
+  infile = File(from_path, File::FLAG_OPEN | File::FLAG_READ);
+#endif
+  if (!infile.IsValid())
+    return false;
+
+  File outfile(to_path, File::FLAG_WRITE | File::FLAG_CREATE_ALWAYS);
+  if (!outfile.IsValid())
+    return false;
+
+  return CopyFileContents(&infile, &outfile);
+}
+#endif  // !defined(OS_MACOSX)
+
+// -----------------------------------------------------------------------------
+
+namespace internal {
+
+bool MoveUnsafe(const FilePath& from_path, const FilePath& to_path) {
+  AssertBlockingAllowed();
+  // Windows compatibility: if |to_path| exists, |from_path| and |to_path|
+  // must be the same type, either both files, or both directories.
+  stat_wrapper_t to_file_info;
+  if (CallStat(to_path.value().c_str(), &to_file_info) == 0) {
+    stat_wrapper_t from_file_info;
+    if (CallStat(from_path.value().c_str(), &from_file_info) != 0)
+      return false;
+    if (S_ISDIR(to_file_info.st_mode) != S_ISDIR(from_file_info.st_mode))
+      return false;
+  }
+
+  if (rename(from_path.value().c_str(), to_path.value().c_str()) == 0)
+    return true;
+
+  if (!CopyDirectory(from_path, to_path, true))
+    return false;
+
+  DeleteFile(from_path, true);
+  return true;
+}
+
+}  // namespace internal
+
+#endif  // !defined(OS_NACL_NONSFI)
+}  // namespace base
diff --git a/base/files/file_util_unittest.cc b/base/files/file_util_unittest.cc
new file mode 100644
index 0000000..a89e1b3
--- /dev/null
+++ b/base/files/file_util_unittest.cc
@@ -0,0 +1,3683 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <algorithm>
+#include <fstream>
+#include <initializer_list>
+#include <memory>
+#include <set>
+#include <utility>
+#include <vector>
+
+#include "base/base_paths.h"
+#include "base/bind.h"
+#include "base/bind_helpers.h"
+#include "base/callback_helpers.h"
+#include "base/command_line.h"
+#include "base/environment.h"
+#include "base/files/file.h"
+#include "base/files/file_enumerator.h"
+#include "base/files/file_path.h"
+#include "base/files/file_util.h"
+#include "base/files/scoped_file.h"
+#include "base/files/scoped_temp_dir.h"
+#include "base/macros.h"
+#include "base/path_service.h"
+#include "base/strings/string_util.h"
+#include "base/strings/utf_string_conversions.h"
+#include "base/test/multiprocess_test.h"
+#include "base/test/scoped_environment_variable_override.h"
+#include "base/test/test_file_util.h"
+#include "base/test/test_timeouts.h"
+#include "base/threading/platform_thread.h"
+#include "build/build_config.h"
+#include "testing/gtest/include/gtest/gtest.h"
+#include "testing/multiprocess_func_list.h"
+#include "testing/platform_test.h"
+
+#if defined(OS_WIN)
+#include <shellapi.h>
+#include <shlobj.h>
+#include <tchar.h>
+#include <windows.h>
+#include <winioctl.h>
+#include "base/strings/string_number_conversions.h"
+#include "base/win/scoped_handle.h"
+#include "base/win/win_util.h"
+#endif
+
+#if defined(OS_POSIX) || defined(OS_FUCHSIA)
+#include <errno.h>
+#include <fcntl.h>
+#include <sys/ioctl.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <unistd.h>
+#endif
+
+#if defined(OS_LINUX)
+#include <linux/fs.h>
+#endif
+
+#if defined(OS_ANDROID)
+#include "base/android/content_uri_utils.h"
+#endif
+
+// This macro helps avoid wrapped lines in the test structs.
+#define FPL(x) FILE_PATH_LITERAL(x)
+
+namespace base {
+
+namespace {
+
+const size_t kLargeFileSize = (1 << 16) + 3;
+
+// To test that NormalizeFilePath() deals with NTFS reparse points correctly,
+// we need functions to create and delete reparse points.
+#if defined(OS_WIN)
+typedef struct _REPARSE_DATA_BUFFER {
+  ULONG  ReparseTag;
+  USHORT  ReparseDataLength;
+  USHORT  Reserved;
+  union {
+    struct {
+      USHORT SubstituteNameOffset;
+      USHORT SubstituteNameLength;
+      USHORT PrintNameOffset;
+      USHORT PrintNameLength;
+      ULONG Flags;
+      WCHAR PathBuffer[1];
+    } SymbolicLinkReparseBuffer;
+    struct {
+      USHORT SubstituteNameOffset;
+      USHORT SubstituteNameLength;
+      USHORT PrintNameOffset;
+      USHORT PrintNameLength;
+      WCHAR PathBuffer[1];
+    } MountPointReparseBuffer;
+    struct {
+      UCHAR DataBuffer[1];
+    } GenericReparseBuffer;
+  };
+} REPARSE_DATA_BUFFER, *PREPARSE_DATA_BUFFER;
+
+// Sets a reparse point. |source| will now point to |target|. Returns true if
+// the call succeeds, false otherwise.
+bool SetReparsePoint(HANDLE source, const FilePath& target_path) {
+  std::wstring kPathPrefix = L"\\??\\";
+  std::wstring target_str;
+  // The juction will not work if the target path does not start with \??\ .
+  if (kPathPrefix != target_path.value().substr(0, kPathPrefix.size()))
+    target_str += kPathPrefix;
+  target_str += target_path.value();
+  const wchar_t* target = target_str.c_str();
+  USHORT size_target = static_cast<USHORT>(wcslen(target)) * sizeof(target[0]);
+  char buffer[2000] = {0};
+  DWORD returned;
+
+  REPARSE_DATA_BUFFER* data = reinterpret_cast<REPARSE_DATA_BUFFER*>(buffer);
+
+  data->ReparseTag = 0xa0000003;
+  memcpy(data->MountPointReparseBuffer.PathBuffer, target, size_target + 2);
+
+  data->MountPointReparseBuffer.SubstituteNameLength = size_target;
+  data->MountPointReparseBuffer.PrintNameOffset = size_target + 2;
+  data->ReparseDataLength = size_target + 4 + 8;
+
+  int data_size = data->ReparseDataLength + 8;
+
+  if (!DeviceIoControl(source, FSCTL_SET_REPARSE_POINT, &buffer, data_size,
+                       NULL, 0, &returned, NULL)) {
+    return false;
+  }
+  return true;
+}
+
+// Delete the reparse point referenced by |source|. Returns true if the call
+// succeeds, false otherwise.
+bool DeleteReparsePoint(HANDLE source) {
+  DWORD returned;
+  REPARSE_DATA_BUFFER data = {0};
+  data.ReparseTag = 0xa0000003;
+  if (!DeviceIoControl(source, FSCTL_DELETE_REPARSE_POINT, &data, 8, NULL, 0,
+                       &returned, NULL)) {
+    return false;
+  }
+  return true;
+}
+
+// Manages a reparse point for a test.
+class ReparsePoint {
+ public:
+  // Creates a reparse point from |source| (an empty directory) to |target|.
+  ReparsePoint(const FilePath& source, const FilePath& target) {
+    dir_.Set(
+      ::CreateFile(source.value().c_str(),
+                   GENERIC_READ | GENERIC_WRITE,
+                   FILE_SHARE_READ | FILE_SHARE_WRITE | FILE_SHARE_DELETE,
+                   NULL,
+                   OPEN_EXISTING,
+                   FILE_FLAG_BACKUP_SEMANTICS,  // Needed to open a directory.
+                   NULL));
+    created_ = dir_.IsValid() && SetReparsePoint(dir_.Get(), target);
+  }
+
+  ~ReparsePoint() {
+    if (created_)
+      DeleteReparsePoint(dir_.Get());
+  }
+
+  bool IsValid() { return created_; }
+
+ private:
+  win::ScopedHandle dir_;
+  bool created_;
+  DISALLOW_COPY_AND_ASSIGN(ReparsePoint);
+};
+
+#endif
+
+// Fuchsia doesn't support file permissions.
+#if !defined(OS_FUCHSIA)
+#if defined(OS_POSIX)
+// Provide a simple way to change the permissions bits on |path| in tests.
+// ASSERT failures will return, but not stop the test.  Caller should wrap
+// calls to this function in ASSERT_NO_FATAL_FAILURE().
+void ChangePosixFilePermissions(const FilePath& path,
+                                int mode_bits_to_set,
+                                int mode_bits_to_clear) {
+  ASSERT_FALSE(mode_bits_to_set & mode_bits_to_clear)
+      << "Can't set and clear the same bits.";
+
+  int mode = 0;
+  ASSERT_TRUE(GetPosixFilePermissions(path, &mode));
+  mode |= mode_bits_to_set;
+  mode &= ~mode_bits_to_clear;
+  ASSERT_TRUE(SetPosixFilePermissions(path, mode));
+}
+#endif  // defined(OS_POSIX)
+
+// Sets the source file to read-only.
+void SetReadOnly(const FilePath& path, bool read_only) {
+#if defined(OS_WIN)
+  // On Windows, it involves setting/removing the 'readonly' bit.
+  DWORD attrs = GetFileAttributes(path.value().c_str());
+  ASSERT_NE(INVALID_FILE_ATTRIBUTES, attrs);
+  ASSERT_TRUE(SetFileAttributes(
+      path.value().c_str(), read_only ? (attrs | FILE_ATTRIBUTE_READONLY)
+                                      : (attrs & ~FILE_ATTRIBUTE_READONLY)));
+
+  DWORD expected =
+      read_only
+          ? ((attrs & (FILE_ATTRIBUTE_ARCHIVE | FILE_ATTRIBUTE_DIRECTORY)) |
+             FILE_ATTRIBUTE_READONLY)
+          : (attrs & (FILE_ATTRIBUTE_ARCHIVE | FILE_ATTRIBUTE_DIRECTORY));
+
+  // Ignore FILE_ATTRIBUTE_NOT_CONTENT_INDEXED if present.
+  attrs = GetFileAttributes(path.value().c_str()) &
+          ~FILE_ATTRIBUTE_NOT_CONTENT_INDEXED;
+  ASSERT_EQ(expected, attrs);
+#else
+  // On all other platforms, it involves removing/setting the write bit.
+  mode_t mode = read_only ? S_IRUSR : (S_IRUSR | S_IWUSR);
+  EXPECT_TRUE(SetPosixFilePermissions(
+      path, DirectoryExists(path) ? (mode | S_IXUSR) : mode));
+#endif  // defined(OS_WIN)
+}
+
+bool IsReadOnly(const FilePath& path) {
+#if defined(OS_WIN)
+  DWORD attrs = GetFileAttributes(path.value().c_str());
+  EXPECT_NE(INVALID_FILE_ATTRIBUTES, attrs);
+  return attrs & FILE_ATTRIBUTE_READONLY;
+#else
+  int mode = 0;
+  EXPECT_TRUE(GetPosixFilePermissions(path, &mode));
+  return !(mode & S_IWUSR);
+#endif  // defined(OS_WIN)
+}
+
+#endif  // defined(OS_FUCHSIA)
+
+const wchar_t bogus_content[] = L"I'm cannon fodder.";
+
+const int FILES_AND_DIRECTORIES =
+    FileEnumerator::FILES | FileEnumerator::DIRECTORIES;
+
+// file_util winds up using autoreleased objects on the Mac, so this needs
+// to be a PlatformTest
+class FileUtilTest : public PlatformTest {
+ protected:
+  void SetUp() override {
+    PlatformTest::SetUp();
+    ASSERT_TRUE(temp_dir_.CreateUniqueTempDir());
+  }
+
+  ScopedTempDir temp_dir_;
+};
+
+// Collects all the results from the given file enumerator, and provides an
+// interface to query whether a given file is present.
+class FindResultCollector {
+ public:
+  explicit FindResultCollector(FileEnumerator* enumerator) {
+    FilePath cur_file;
+    while (!(cur_file = enumerator->Next()).value().empty()) {
+      FilePath::StringType path = cur_file.value();
+      // The file should not be returned twice.
+      EXPECT_TRUE(files_.end() == files_.find(path))
+          << "Same file returned twice";
+
+      // Save for later.
+      files_.insert(path);
+    }
+  }
+
+  // Returns true if the enumerator found the file.
+  bool HasFile(const FilePath& file) const {
+    return files_.find(file.value()) != files_.end();
+  }
+
+  int size() {
+    return static_cast<int>(files_.size());
+  }
+
+ private:
+  std::set<FilePath::StringType> files_;
+};
+
+// Simple function to dump some text into a new file.
+void CreateTextFile(const FilePath& filename,
+                    const std::wstring& contents) {
+  std::wofstream file;
+  file.open(filename.value().c_str());
+  ASSERT_TRUE(file.is_open());
+  file << contents;
+  file.close();
+}
+
+// Simple function to take out some text from a file.
+std::wstring ReadTextFile(const FilePath& filename) {
+  wchar_t contents[64];
+  std::wifstream file;
+  file.open(filename.value().c_str());
+  EXPECT_TRUE(file.is_open());
+  file.getline(contents, arraysize(contents));
+  file.close();
+  return std::wstring(contents);
+}
+
+// Sets |is_inheritable| to indicate whether or not |stream| is set up to be
+// inerhited into child processes (i.e., HANDLE_FLAG_INHERIT is set on the
+// underlying handle on Windows, or FD_CLOEXEC is not set on the underlying file
+// descriptor on POSIX). Calls to this function must be wrapped with
+// ASSERT_NO_FATAL_FAILURE to properly abort tests in case of fatal failure.
+void GetIsInheritable(FILE* stream, bool* is_inheritable) {
+#if defined(OS_WIN)
+  HANDLE handle = reinterpret_cast<HANDLE>(_get_osfhandle(_fileno(stream)));
+  ASSERT_NE(INVALID_HANDLE_VALUE, handle);
+
+  DWORD info = 0;
+  ASSERT_EQ(TRUE, ::GetHandleInformation(handle, &info));
+  *is_inheritable = ((info & HANDLE_FLAG_INHERIT) != 0);
+#elif defined(OS_POSIX) || defined(OS_FUCHSIA)
+  int fd = fileno(stream);
+  ASSERT_NE(-1, fd);
+  int flags = fcntl(fd, F_GETFD, 0);
+  ASSERT_NE(-1, flags);
+  *is_inheritable = ((flags & FD_CLOEXEC) == 0);
+#else
+#error Not implemented
+#endif
+}
+
+TEST_F(FileUtilTest, FileAndDirectorySize) {
+  // Create three files of 20, 30 and 3 chars (utf8). ComputeDirectorySize
+  // should return 53 bytes.
+  FilePath file_01 = temp_dir_.GetPath().Append(FPL("The file 01.txt"));
+  CreateTextFile(file_01, L"12345678901234567890");
+  int64_t size_f1 = 0;
+  ASSERT_TRUE(GetFileSize(file_01, &size_f1));
+  EXPECT_EQ(20ll, size_f1);
+
+  FilePath subdir_path = temp_dir_.GetPath().Append(FPL("Level2"));
+  CreateDirectory(subdir_path);
+
+  FilePath file_02 = subdir_path.Append(FPL("The file 02.txt"));
+  CreateTextFile(file_02, L"123456789012345678901234567890");
+  int64_t size_f2 = 0;
+  ASSERT_TRUE(GetFileSize(file_02, &size_f2));
+  EXPECT_EQ(30ll, size_f2);
+
+  FilePath subsubdir_path = subdir_path.Append(FPL("Level3"));
+  CreateDirectory(subsubdir_path);
+
+  FilePath file_03 = subsubdir_path.Append(FPL("The file 03.txt"));
+  CreateTextFile(file_03, L"123");
+
+  int64_t computed_size = ComputeDirectorySize(temp_dir_.GetPath());
+  EXPECT_EQ(size_f1 + size_f2 + 3, computed_size);
+}
+
+TEST_F(FileUtilTest, NormalizeFilePathBasic) {
+  // Create a directory under the test dir.  Because we create it,
+  // we know it is not a link.
+  FilePath file_a_path = temp_dir_.GetPath().Append(FPL("file_a"));
+  FilePath dir_path = temp_dir_.GetPath().Append(FPL("dir"));
+  FilePath file_b_path = dir_path.Append(FPL("file_b"));
+  CreateDirectory(dir_path);
+
+  FilePath normalized_file_a_path, normalized_file_b_path;
+  ASSERT_FALSE(PathExists(file_a_path));
+  ASSERT_FALSE(NormalizeFilePath(file_a_path, &normalized_file_a_path))
+    << "NormalizeFilePath() should fail on nonexistent paths.";
+
+  CreateTextFile(file_a_path, bogus_content);
+  ASSERT_TRUE(PathExists(file_a_path));
+  ASSERT_TRUE(NormalizeFilePath(file_a_path, &normalized_file_a_path));
+
+  CreateTextFile(file_b_path, bogus_content);
+  ASSERT_TRUE(PathExists(file_b_path));
+  ASSERT_TRUE(NormalizeFilePath(file_b_path, &normalized_file_b_path));
+
+  // Beacuse this test created |dir_path|, we know it is not a link
+  // or junction.  So, the real path of the directory holding file a
+  // must be the parent of the path holding file b.
+  ASSERT_TRUE(normalized_file_a_path.DirName()
+      .IsParent(normalized_file_b_path.DirName()));
+}
+
+#if defined(OS_WIN)
+
+TEST_F(FileUtilTest, NormalizeFilePathReparsePoints) {
+  // Build the following directory structure:
+  //
+  // temp_dir
+  // |-> base_a
+  // |   |-> sub_a
+  // |       |-> file.txt
+  // |       |-> long_name___... (Very long name.)
+  // |           |-> sub_long
+  // |              |-> deep.txt
+  // |-> base_b
+  //     |-> to_sub_a (reparse point to temp_dir\base_a\sub_a)
+  //     |-> to_base_b (reparse point to temp_dir\base_b)
+  //     |-> to_sub_long (reparse point to temp_dir\sub_a\long_name_\sub_long)
+
+  FilePath base_a = temp_dir_.GetPath().Append(FPL("base_a"));
+#if defined(OS_WIN)
+  // TEMP can have a lower case drive letter.
+  string16 temp_base_a = base_a.value();
+  ASSERT_FALSE(temp_base_a.empty());
+  *temp_base_a.begin() = ToUpperASCII(*temp_base_a.begin());
+  base_a = FilePath(temp_base_a);
+#endif
+  ASSERT_TRUE(CreateDirectory(base_a));
+
+  FilePath sub_a = base_a.Append(FPL("sub_a"));
+  ASSERT_TRUE(CreateDirectory(sub_a));
+
+  FilePath file_txt = sub_a.Append(FPL("file.txt"));
+  CreateTextFile(file_txt, bogus_content);
+
+  // Want a directory whose name is long enough to make the path to the file
+  // inside just under MAX_PATH chars.  This will be used to test that when
+  // a junction expands to a path over MAX_PATH chars in length,
+  // NormalizeFilePath() fails without crashing.
+  FilePath sub_long_rel(FPL("sub_long"));
+  FilePath deep_txt(FPL("deep.txt"));
+
+  int target_length = MAX_PATH;
+  target_length -= (sub_a.value().length() + 1);  // +1 for the sepperator '\'.
+  target_length -= (sub_long_rel.Append(deep_txt).value().length() + 1);
+  // Without making the path a bit shorter, CreateDirectory() fails.
+  // the resulting path is still long enough to hit the failing case in
+  // NormalizePath().
+  const int kCreateDirLimit = 4;
+  target_length -= kCreateDirLimit;
+  FilePath::StringType long_name_str = FPL("long_name_");
+  long_name_str.resize(target_length, '_');
+
+  FilePath long_name = sub_a.Append(FilePath(long_name_str));
+  FilePath deep_file = long_name.Append(sub_long_rel).Append(deep_txt);
+  ASSERT_EQ(static_cast<size_t>(MAX_PATH - kCreateDirLimit),
+            deep_file.value().length());
+
+  FilePath sub_long = deep_file.DirName();
+  ASSERT_TRUE(CreateDirectory(sub_long));
+  CreateTextFile(deep_file, bogus_content);
+
+  FilePath base_b = temp_dir_.GetPath().Append(FPL("base_b"));
+  ASSERT_TRUE(CreateDirectory(base_b));
+
+  FilePath to_sub_a = base_b.Append(FPL("to_sub_a"));
+  ASSERT_TRUE(CreateDirectory(to_sub_a));
+  FilePath normalized_path;
+  {
+    ReparsePoint reparse_to_sub_a(to_sub_a, sub_a);
+    ASSERT_TRUE(reparse_to_sub_a.IsValid());
+
+    FilePath to_base_b = base_b.Append(FPL("to_base_b"));
+    ASSERT_TRUE(CreateDirectory(to_base_b));
+    ReparsePoint reparse_to_base_b(to_base_b, base_b);
+    ASSERT_TRUE(reparse_to_base_b.IsValid());
+
+    FilePath to_sub_long = base_b.Append(FPL("to_sub_long"));
+    ASSERT_TRUE(CreateDirectory(to_sub_long));
+    ReparsePoint reparse_to_sub_long(to_sub_long, sub_long);
+    ASSERT_TRUE(reparse_to_sub_long.IsValid());
+
+    // Normalize a junction free path: base_a\sub_a\file.txt .
+    ASSERT_TRUE(NormalizeFilePath(file_txt, &normalized_path));
+    ASSERT_STREQ(file_txt.value().c_str(), normalized_path.value().c_str());
+
+    // Check that the path base_b\to_sub_a\file.txt can be normalized to exclude
+    // the junction to_sub_a.
+    ASSERT_TRUE(NormalizeFilePath(to_sub_a.Append(FPL("file.txt")),
+                                             &normalized_path));
+    ASSERT_STREQ(file_txt.value().c_str(), normalized_path.value().c_str());
+
+    // Check that the path base_b\to_base_b\to_base_b\to_sub_a\file.txt can be
+    // normalized to exclude junctions to_base_b and to_sub_a .
+    ASSERT_TRUE(NormalizeFilePath(base_b.Append(FPL("to_base_b"))
+                                                   .Append(FPL("to_base_b"))
+                                                   .Append(FPL("to_sub_a"))
+                                                   .Append(FPL("file.txt")),
+                                             &normalized_path));
+    ASSERT_STREQ(file_txt.value().c_str(), normalized_path.value().c_str());
+
+    // A long enough path will cause NormalizeFilePath() to fail.  Make a long
+    // path using to_base_b many times, and check that paths long enough to fail
+    // do not cause a crash.
+    FilePath long_path = base_b;
+    const int kLengthLimit = MAX_PATH + 200;
+    while (long_path.value().length() <= kLengthLimit) {
+      long_path = long_path.Append(FPL("to_base_b"));
+    }
+    long_path = long_path.Append(FPL("to_sub_a"))
+                         .Append(FPL("file.txt"));
+
+    ASSERT_FALSE(NormalizeFilePath(long_path, &normalized_path));
+
+    // Normalizing the junction to deep.txt should fail, because the expanded
+    // path to deep.txt is longer than MAX_PATH.
+    ASSERT_FALSE(NormalizeFilePath(to_sub_long.Append(deep_txt),
+                                              &normalized_path));
+
+    // Delete the reparse points, and see that NormalizeFilePath() fails
+    // to traverse them.
+  }
+
+  ASSERT_FALSE(NormalizeFilePath(to_sub_a.Append(FPL("file.txt")),
+                                            &normalized_path));
+}
+
+TEST_F(FileUtilTest, DevicePathToDriveLetter) {
+  // Get a drive letter.
+  string16 real_drive_letter =
+      ToUpperASCII(temp_dir_.GetPath().value().substr(0, 2));
+  if (!isalpha(real_drive_letter[0]) || ':' != real_drive_letter[1]) {
+    LOG(ERROR) << "Can't get a drive letter to test with.";
+    return;
+  }
+
+  // Get the NT style path to that drive.
+  wchar_t device_path[MAX_PATH] = {'\0'};
+  ASSERT_TRUE(
+      ::QueryDosDevice(real_drive_letter.c_str(), device_path, MAX_PATH));
+  FilePath actual_device_path(device_path);
+  FilePath win32_path;
+
+  // Run DevicePathToDriveLetterPath() on the NT style path we got from
+  // QueryDosDevice().  Expect the drive letter we started with.
+  ASSERT_TRUE(DevicePathToDriveLetterPath(actual_device_path, &win32_path));
+  ASSERT_EQ(real_drive_letter, win32_path.value());
+
+  // Add some directories to the path.  Expect those extra path componenets
+  // to be preserved.
+  FilePath kRelativePath(FPL("dir1\\dir2\\file.txt"));
+  ASSERT_TRUE(DevicePathToDriveLetterPath(
+      actual_device_path.Append(kRelativePath),
+      &win32_path));
+  EXPECT_EQ(FilePath(real_drive_letter + L"\\").Append(kRelativePath).value(),
+            win32_path.value());
+
+  // Deform the real path so that it is invalid by removing the last four
+  // characters.  The way windows names devices that are hard disks
+  // (\Device\HardDiskVolume${NUMBER}) guarantees that the string is longer
+  // than three characters.  The only way the truncated string could be a
+  // real drive is if more than 10^3 disks are mounted:
+  // \Device\HardDiskVolume10000 would be truncated to \Device\HardDiskVolume1
+  // Check that DevicePathToDriveLetterPath fails.
+  int path_length = actual_device_path.value().length();
+  int new_length = path_length - 4;
+  ASSERT_LT(0, new_length);
+  FilePath prefix_of_real_device_path(
+      actual_device_path.value().substr(0, new_length));
+  ASSERT_FALSE(DevicePathToDriveLetterPath(prefix_of_real_device_path,
+                                           &win32_path));
+
+  ASSERT_FALSE(DevicePathToDriveLetterPath(
+      prefix_of_real_device_path.Append(kRelativePath),
+      &win32_path));
+
+  // Deform the real path so that it is invalid by adding some characters. For
+  // example, if C: maps to \Device\HardDiskVolume8, then we simulate a
+  // request for the drive letter whose native path is
+  // \Device\HardDiskVolume812345 .  We assume such a device does not exist,
+  // because drives are numbered in order and mounting 112345 hard disks will
+  // never happen.
+  const FilePath::StringType kExtraChars = FPL("12345");
+
+  FilePath real_device_path_plus_numbers(
+      actual_device_path.value() + kExtraChars);
+
+  ASSERT_FALSE(DevicePathToDriveLetterPath(
+      real_device_path_plus_numbers,
+      &win32_path));
+
+  ASSERT_FALSE(DevicePathToDriveLetterPath(
+      real_device_path_plus_numbers.Append(kRelativePath),
+      &win32_path));
+}
+
+TEST_F(FileUtilTest, CreateTemporaryFileInDirLongPathTest) {
+  // Test that CreateTemporaryFileInDir() creates a path and returns a long path
+  // if it is available. This test requires that:
+  // - the filesystem at |temp_dir_| supports long filenames.
+  // - the account has FILE_LIST_DIRECTORY permission for all ancestor
+  //   directories of |temp_dir_|.
+  const FilePath::CharType kLongDirName[] = FPL("A long path");
+  const FilePath::CharType kTestSubDirName[] = FPL("test");
+  FilePath long_test_dir = temp_dir_.GetPath().Append(kLongDirName);
+  ASSERT_TRUE(CreateDirectory(long_test_dir));
+
+  // kLongDirName is not a 8.3 component. So GetShortName() should give us a
+  // different short name.
+  WCHAR path_buffer[MAX_PATH];
+  DWORD path_buffer_length = GetShortPathName(long_test_dir.value().c_str(),
+                                              path_buffer, MAX_PATH);
+  ASSERT_LT(path_buffer_length, DWORD(MAX_PATH));
+  ASSERT_NE(DWORD(0), path_buffer_length);
+  FilePath short_test_dir(path_buffer);
+  ASSERT_STRNE(kLongDirName, short_test_dir.BaseName().value().c_str());
+
+  FilePath temp_file;
+  ASSERT_TRUE(CreateTemporaryFileInDir(short_test_dir, &temp_file));
+  EXPECT_STREQ(kLongDirName, temp_file.DirName().BaseName().value().c_str());
+  EXPECT_TRUE(PathExists(temp_file));
+
+  // Create a subdirectory of |long_test_dir| and make |long_test_dir|
+  // unreadable. We should still be able to create a temp file in the
+  // subdirectory, but we won't be able to determine the long path for it. This
+  // mimics the environment that some users run where their user profiles reside
+  // in a location where the don't have full access to the higher level
+  // directories. (Note that this assumption is true for NTFS, but not for some
+  // network file systems. E.g. AFS).
+  FilePath access_test_dir = long_test_dir.Append(kTestSubDirName);
+  ASSERT_TRUE(CreateDirectory(access_test_dir));
+  FilePermissionRestorer long_test_dir_restorer(long_test_dir);
+  ASSERT_TRUE(MakeFileUnreadable(long_test_dir));
+
+  // Use the short form of the directory to create a temporary filename.
+  ASSERT_TRUE(CreateTemporaryFileInDir(
+      short_test_dir.Append(kTestSubDirName), &temp_file));
+  EXPECT_TRUE(PathExists(temp_file));
+  EXPECT_TRUE(short_test_dir.IsParent(temp_file.DirName()));
+
+  // Check that the long path can't be determined for |temp_file|.
+  path_buffer_length = GetLongPathName(temp_file.value().c_str(),
+                                       path_buffer, MAX_PATH);
+  EXPECT_EQ(DWORD(0), path_buffer_length);
+}
+
+#endif  // defined(OS_WIN)
+
+#if defined(OS_POSIX)
+
+TEST_F(FileUtilTest, CreateAndReadSymlinks) {
+  FilePath link_from = temp_dir_.GetPath().Append(FPL("from_file"));
+  FilePath link_to = temp_dir_.GetPath().Append(FPL("to_file"));
+  CreateTextFile(link_to, bogus_content);
+
+  ASSERT_TRUE(CreateSymbolicLink(link_to, link_from))
+    << "Failed to create file symlink.";
+
+  // If we created the link properly, we should be able to read the contents
+  // through it.
+  EXPECT_EQ(bogus_content, ReadTextFile(link_from));
+
+  FilePath result;
+  ASSERT_TRUE(ReadSymbolicLink(link_from, &result));
+  EXPECT_EQ(link_to.value(), result.value());
+
+  // Link to a directory.
+  link_from = temp_dir_.GetPath().Append(FPL("from_dir"));
+  link_to = temp_dir_.GetPath().Append(FPL("to_dir"));
+  ASSERT_TRUE(CreateDirectory(link_to));
+  ASSERT_TRUE(CreateSymbolicLink(link_to, link_from))
+    << "Failed to create directory symlink.";
+
+  // Test failures.
+  EXPECT_FALSE(CreateSymbolicLink(link_to, link_to));
+  EXPECT_FALSE(ReadSymbolicLink(link_to, &result));
+  FilePath missing = temp_dir_.GetPath().Append(FPL("missing"));
+  EXPECT_FALSE(ReadSymbolicLink(missing, &result));
+}
+
+// The following test of NormalizeFilePath() require that we create a symlink.
+// This can not be done on Windows before Vista.  On Vista, creating a symlink
+// requires privilege "SeCreateSymbolicLinkPrivilege".
+// TODO(skerner): Investigate the possibility of giving base_unittests the
+// privileges required to create a symlink.
+TEST_F(FileUtilTest, NormalizeFilePathSymlinks) {
+  // Link one file to another.
+  FilePath link_from = temp_dir_.GetPath().Append(FPL("from_file"));
+  FilePath link_to = temp_dir_.GetPath().Append(FPL("to_file"));
+  CreateTextFile(link_to, bogus_content);
+
+  ASSERT_TRUE(CreateSymbolicLink(link_to, link_from))
+    << "Failed to create file symlink.";
+
+  // Check that NormalizeFilePath sees the link.
+  FilePath normalized_path;
+  ASSERT_TRUE(NormalizeFilePath(link_from, &normalized_path));
+  EXPECT_NE(link_from, link_to);
+  EXPECT_EQ(link_to.BaseName().value(), normalized_path.BaseName().value());
+  EXPECT_EQ(link_to.BaseName().value(), normalized_path.BaseName().value());
+
+  // Link to a directory.
+  link_from = temp_dir_.GetPath().Append(FPL("from_dir"));
+  link_to = temp_dir_.GetPath().Append(FPL("to_dir"));
+  ASSERT_TRUE(CreateDirectory(link_to));
+  ASSERT_TRUE(CreateSymbolicLink(link_to, link_from))
+    << "Failed to create directory symlink.";
+
+  EXPECT_FALSE(NormalizeFilePath(link_from, &normalized_path))
+    << "Links to directories should return false.";
+
+  // Test that a loop in the links causes NormalizeFilePath() to return false.
+  link_from = temp_dir_.GetPath().Append(FPL("link_a"));
+  link_to = temp_dir_.GetPath().Append(FPL("link_b"));
+  ASSERT_TRUE(CreateSymbolicLink(link_to, link_from))
+    << "Failed to create loop symlink a.";
+  ASSERT_TRUE(CreateSymbolicLink(link_from, link_to))
+    << "Failed to create loop symlink b.";
+
+  // Infinite loop!
+  EXPECT_FALSE(NormalizeFilePath(link_from, &normalized_path));
+}
+
+TEST_F(FileUtilTest, DeleteSymlinkToExistentFile) {
+  // Create a file.
+  FilePath file_name = temp_dir_.GetPath().Append(FPL("Test DeleteFile 2.txt"));
+  CreateTextFile(file_name, bogus_content);
+  ASSERT_TRUE(PathExists(file_name));
+
+  // Create a symlink to the file.
+  FilePath file_link = temp_dir_.GetPath().Append("file_link_2");
+  ASSERT_TRUE(CreateSymbolicLink(file_name, file_link))
+      << "Failed to create symlink.";
+
+  // Delete the symbolic link.
+  EXPECT_TRUE(DeleteFile(file_link, false));
+
+  // Make sure original file is not deleted.
+  EXPECT_FALSE(PathExists(file_link));
+  EXPECT_TRUE(PathExists(file_name));
+}
+
+TEST_F(FileUtilTest, DeleteSymlinkToNonExistentFile) {
+  // Create a non-existent file path.
+  FilePath non_existent =
+      temp_dir_.GetPath().Append(FPL("Test DeleteFile 3.txt"));
+  EXPECT_FALSE(PathExists(non_existent));
+
+  // Create a symlink to the non-existent file.
+  FilePath file_link = temp_dir_.GetPath().Append("file_link_3");
+  ASSERT_TRUE(CreateSymbolicLink(non_existent, file_link))
+      << "Failed to create symlink.";
+
+  // Make sure the symbolic link is exist.
+  EXPECT_TRUE(IsLink(file_link));
+  EXPECT_FALSE(PathExists(file_link));
+
+  // Delete the symbolic link.
+  EXPECT_TRUE(DeleteFile(file_link, false));
+
+  // Make sure the symbolic link is deleted.
+  EXPECT_FALSE(IsLink(file_link));
+}
+
+TEST_F(FileUtilTest, CopyFileFollowsSymlinks) {
+  FilePath link_from = temp_dir_.GetPath().Append(FPL("from_file"));
+  FilePath link_to = temp_dir_.GetPath().Append(FPL("to_file"));
+  CreateTextFile(link_to, bogus_content);
+
+  ASSERT_TRUE(CreateSymbolicLink(link_to, link_from));
+
+  // If we created the link properly, we should be able to read the contents
+  // through it.
+  EXPECT_EQ(bogus_content, ReadTextFile(link_from));
+
+  FilePath result;
+  ASSERT_TRUE(ReadSymbolicLink(link_from, &result));
+  EXPECT_EQ(link_to.value(), result.value());
+
+  // Create another file and copy it to |link_from|.
+  FilePath src_file = temp_dir_.GetPath().Append(FPL("src.txt"));
+  const std::wstring file_contents(L"Gooooooooooooooooooooogle");
+  CreateTextFile(src_file, file_contents);
+  ASSERT_TRUE(CopyFile(src_file, link_from));
+
+  // Make sure |link_from| is still a symlink, and |link_to| has been written to
+  // by CopyFile().
+  EXPECT_TRUE(IsLink(link_from));
+  EXPECT_EQ(file_contents, ReadTextFile(link_from));
+  EXPECT_EQ(file_contents, ReadTextFile(link_to));
+}
+
+TEST_F(FileUtilTest, ChangeFilePermissionsAndRead) {
+  // Create a file path.
+  FilePath file_name =
+      temp_dir_.GetPath().Append(FPL("Test Readable File.txt"));
+  EXPECT_FALSE(PathExists(file_name));
+
+  static constexpr char kData[] = "hello";
+  static constexpr int kDataSize = sizeof(kData) - 1;
+  char buffer[kDataSize];
+
+  // Write file.
+  EXPECT_EQ(kDataSize, WriteFile(file_name, kData, kDataSize));
+  EXPECT_TRUE(PathExists(file_name));
+
+  // Make sure the file is readable.
+  int32_t mode = 0;
+  EXPECT_TRUE(GetPosixFilePermissions(file_name, &mode));
+  EXPECT_TRUE(mode & FILE_PERMISSION_READ_BY_USER);
+
+  // Get rid of the read permission.
+  EXPECT_TRUE(SetPosixFilePermissions(file_name, 0u));
+  EXPECT_TRUE(GetPosixFilePermissions(file_name, &mode));
+  EXPECT_FALSE(mode & FILE_PERMISSION_READ_BY_USER);
+  // Make sure the file can't be read.
+  EXPECT_EQ(-1, ReadFile(file_name, buffer, kDataSize));
+
+  // Give the read permission.
+  EXPECT_TRUE(SetPosixFilePermissions(file_name, FILE_PERMISSION_READ_BY_USER));
+  EXPECT_TRUE(GetPosixFilePermissions(file_name, &mode));
+  EXPECT_TRUE(mode & FILE_PERMISSION_READ_BY_USER);
+  // Make sure the file can be read.
+  EXPECT_EQ(kDataSize, ReadFile(file_name, buffer, kDataSize));
+
+  // Delete the file.
+  EXPECT_TRUE(DeleteFile(file_name, false));
+  EXPECT_FALSE(PathExists(file_name));
+}
+
+TEST_F(FileUtilTest, ChangeFilePermissionsAndWrite) {
+  // Create a file path.
+  FilePath file_name =
+      temp_dir_.GetPath().Append(FPL("Test Readable File.txt"));
+  EXPECT_FALSE(PathExists(file_name));
+
+  const std::string kData("hello");
+
+  // Write file.
+  EXPECT_EQ(static_cast<int>(kData.length()),
+            WriteFile(file_name, kData.data(), kData.length()));
+  EXPECT_TRUE(PathExists(file_name));
+
+  // Make sure the file is writable.
+  int mode = 0;
+  EXPECT_TRUE(GetPosixFilePermissions(file_name, &mode));
+  EXPECT_TRUE(mode & FILE_PERMISSION_WRITE_BY_USER);
+  EXPECT_TRUE(PathIsWritable(file_name));
+
+  // Get rid of the write permission.
+  EXPECT_TRUE(SetPosixFilePermissions(file_name, 0u));
+  EXPECT_TRUE(GetPosixFilePermissions(file_name, &mode));
+  EXPECT_FALSE(mode & FILE_PERMISSION_WRITE_BY_USER);
+  // Make sure the file can't be write.
+  EXPECT_EQ(-1, WriteFile(file_name, kData.data(), kData.length()));
+  EXPECT_FALSE(PathIsWritable(file_name));
+
+  // Give read permission.
+  EXPECT_TRUE(SetPosixFilePermissions(file_name,
+                                      FILE_PERMISSION_WRITE_BY_USER));
+  EXPECT_TRUE(GetPosixFilePermissions(file_name, &mode));
+  EXPECT_TRUE(mode & FILE_PERMISSION_WRITE_BY_USER);
+  // Make sure the file can be write.
+  EXPECT_EQ(static_cast<int>(kData.length()),
+            WriteFile(file_name, kData.data(), kData.length()));
+  EXPECT_TRUE(PathIsWritable(file_name));
+
+  // Delete the file.
+  EXPECT_TRUE(DeleteFile(file_name, false));
+  EXPECT_FALSE(PathExists(file_name));
+}
+
+TEST_F(FileUtilTest, ChangeDirectoryPermissionsAndEnumerate) {
+  // Create a directory path.
+  FilePath subdir_path = temp_dir_.GetPath().Append(FPL("PermissionTest1"));
+  CreateDirectory(subdir_path);
+  ASSERT_TRUE(PathExists(subdir_path));
+
+  // Create a dummy file to enumerate.
+  FilePath file_name = subdir_path.Append(FPL("Test Readable File.txt"));
+  EXPECT_FALSE(PathExists(file_name));
+  const std::string kData("hello");
+  EXPECT_EQ(static_cast<int>(kData.length()),
+            WriteFile(file_name, kData.data(), kData.length()));
+  EXPECT_TRUE(PathExists(file_name));
+
+  // Make sure the directory has the all permissions.
+  int mode = 0;
+  EXPECT_TRUE(GetPosixFilePermissions(subdir_path, &mode));
+  EXPECT_EQ(FILE_PERMISSION_USER_MASK, mode & FILE_PERMISSION_USER_MASK);
+
+  // Get rid of the permissions from the directory.
+  EXPECT_TRUE(SetPosixFilePermissions(subdir_path, 0u));
+  EXPECT_TRUE(GetPosixFilePermissions(subdir_path, &mode));
+  EXPECT_FALSE(mode & FILE_PERMISSION_USER_MASK);
+
+  // Make sure the file in the directory can't be enumerated.
+  FileEnumerator f1(subdir_path, true, FileEnumerator::FILES);
+  EXPECT_TRUE(PathExists(subdir_path));
+  FindResultCollector c1(&f1);
+  EXPECT_EQ(0, c1.size());
+  EXPECT_FALSE(GetPosixFilePermissions(file_name, &mode));
+
+  // Give the permissions to the directory.
+  EXPECT_TRUE(SetPosixFilePermissions(subdir_path, FILE_PERMISSION_USER_MASK));
+  EXPECT_TRUE(GetPosixFilePermissions(subdir_path, &mode));
+  EXPECT_EQ(FILE_PERMISSION_USER_MASK, mode & FILE_PERMISSION_USER_MASK);
+
+  // Make sure the file in the directory can be enumerated.
+  FileEnumerator f2(subdir_path, true, FileEnumerator::FILES);
+  FindResultCollector c2(&f2);
+  EXPECT_TRUE(c2.HasFile(file_name));
+  EXPECT_EQ(1, c2.size());
+
+  // Delete the file.
+  EXPECT_TRUE(DeleteFile(subdir_path, true));
+  EXPECT_FALSE(PathExists(subdir_path));
+}
+
+TEST_F(FileUtilTest, ExecutableExistsInPath) {
+  // Create two directories that we will put in our PATH
+  const FilePath::CharType kDir1[] = FPL("dir1");
+  const FilePath::CharType kDir2[] = FPL("dir2");
+
+  FilePath dir1 = temp_dir_.GetPath().Append(kDir1);
+  FilePath dir2 = temp_dir_.GetPath().Append(kDir2);
+  ASSERT_TRUE(CreateDirectory(dir1));
+  ASSERT_TRUE(CreateDirectory(dir2));
+
+  test::ScopedEnvironmentVariableOverride scoped_env(
+      "PATH", dir1.value() + ":" + dir2.value());
+  ASSERT_TRUE(scoped_env.IsOverridden());
+
+  const FilePath::CharType kRegularFileName[] = FPL("regular_file");
+  const FilePath::CharType kExeFileName[] = FPL("exe");
+  const FilePath::CharType kDneFileName[] = FPL("does_not_exist");
+
+  const FilePath kExePath = dir1.Append(kExeFileName);
+  const FilePath kRegularFilePath = dir2.Append(kRegularFileName);
+
+  // Write file.
+  const std::string kData("hello");
+  ASSERT_EQ(static_cast<int>(kData.length()),
+            WriteFile(kExePath, kData.data(), kData.length()));
+  ASSERT_TRUE(PathExists(kExePath));
+  ASSERT_EQ(static_cast<int>(kData.length()),
+            WriteFile(kRegularFilePath, kData.data(), kData.length()));
+  ASSERT_TRUE(PathExists(kRegularFilePath));
+
+  ASSERT_TRUE(SetPosixFilePermissions(dir1.Append(kExeFileName),
+                                      FILE_PERMISSION_EXECUTE_BY_USER));
+
+  EXPECT_TRUE(ExecutableExistsInPath(scoped_env.GetEnv(), kExeFileName));
+  EXPECT_FALSE(ExecutableExistsInPath(scoped_env.GetEnv(), kRegularFileName));
+  EXPECT_FALSE(ExecutableExistsInPath(scoped_env.GetEnv(), kDneFileName));
+}
+
+TEST_F(FileUtilTest, CopyDirectoryPermissions) {
+  // Create a directory.
+  FilePath dir_name_from =
+      temp_dir_.GetPath().Append(FILE_PATH_LITERAL("Copy_From_Subdir"));
+  CreateDirectory(dir_name_from);
+  ASSERT_TRUE(PathExists(dir_name_from));
+
+  // Create some regular files under the directory with various permissions.
+  FilePath file_name_from =
+      dir_name_from.Append(FILE_PATH_LITERAL("Reggy-1.txt"));
+  CreateTextFile(file_name_from, L"Mordecai");
+  ASSERT_TRUE(PathExists(file_name_from));
+  ASSERT_TRUE(SetPosixFilePermissions(file_name_from, 0755));
+
+  FilePath file2_name_from =
+      dir_name_from.Append(FILE_PATH_LITERAL("Reggy-2.txt"));
+  CreateTextFile(file2_name_from, L"Rigby");
+  ASSERT_TRUE(PathExists(file2_name_from));
+  ASSERT_TRUE(SetPosixFilePermissions(file2_name_from, 0777));
+
+  FilePath file3_name_from =
+      dir_name_from.Append(FILE_PATH_LITERAL("Reggy-3.txt"));
+  CreateTextFile(file3_name_from, L"Benson");
+  ASSERT_TRUE(PathExists(file3_name_from));
+  ASSERT_TRUE(SetPosixFilePermissions(file3_name_from, 0400));
+
+  // Copy the directory recursively.
+  FilePath dir_name_to =
+      temp_dir_.GetPath().Append(FILE_PATH_LITERAL("Copy_To_Subdir"));
+  FilePath file_name_to =
+      dir_name_to.Append(FILE_PATH_LITERAL("Reggy-1.txt"));
+  FilePath file2_name_to =
+      dir_name_to.Append(FILE_PATH_LITERAL("Reggy-2.txt"));
+  FilePath file3_name_to =
+      dir_name_to.Append(FILE_PATH_LITERAL("Reggy-3.txt"));
+
+  ASSERT_FALSE(PathExists(dir_name_to));
+
+  EXPECT_TRUE(CopyDirectory(dir_name_from, dir_name_to, true));
+  ASSERT_TRUE(PathExists(file_name_to));
+  ASSERT_TRUE(PathExists(file2_name_to));
+  ASSERT_TRUE(PathExists(file3_name_to));
+
+  int mode = 0;
+  int expected_mode;
+  ASSERT_TRUE(GetPosixFilePermissions(file_name_to, &mode));
+#if defined(OS_MACOSX)
+  expected_mode = 0755;
+#elif defined(OS_CHROMEOS)
+  expected_mode = 0644;
+#else
+  expected_mode = 0600;
+#endif
+  EXPECT_EQ(expected_mode, mode);
+
+  ASSERT_TRUE(GetPosixFilePermissions(file2_name_to, &mode));
+#if defined(OS_MACOSX)
+  expected_mode = 0755;
+#elif defined(OS_CHROMEOS)
+  expected_mode = 0644;
+#else
+  expected_mode = 0600;
+#endif
+  EXPECT_EQ(expected_mode, mode);
+
+  ASSERT_TRUE(GetPosixFilePermissions(file3_name_to, &mode));
+#if defined(OS_MACOSX)
+  expected_mode = 0600;
+#elif defined(OS_CHROMEOS)
+  expected_mode = 0644;
+#else
+  expected_mode = 0600;
+#endif
+  EXPECT_EQ(expected_mode, mode);
+}
+
+TEST_F(FileUtilTest, CopyDirectoryPermissionsOverExistingFile) {
+  // Create a directory.
+  FilePath dir_name_from =
+      temp_dir_.GetPath().Append(FILE_PATH_LITERAL("Copy_From_Subdir"));
+  CreateDirectory(dir_name_from);
+  ASSERT_TRUE(PathExists(dir_name_from));
+
+  // Create a file under the directory.
+  FilePath file_name_from =
+      dir_name_from.Append(FILE_PATH_LITERAL("Reggy-1.txt"));
+  CreateTextFile(file_name_from, L"Mordecai");
+  ASSERT_TRUE(PathExists(file_name_from));
+  ASSERT_TRUE(SetPosixFilePermissions(file_name_from, 0644));
+
+  // Create a directory.
+  FilePath dir_name_to =
+      temp_dir_.GetPath().Append(FILE_PATH_LITERAL("Copy_To_Subdir"));
+  CreateDirectory(dir_name_to);
+  ASSERT_TRUE(PathExists(dir_name_to));
+
+  // Create a file under the directory with wider permissions.
+  FilePath file_name_to =
+      dir_name_to.Append(FILE_PATH_LITERAL("Reggy-1.txt"));
+  CreateTextFile(file_name_to, L"Rigby");
+  ASSERT_TRUE(PathExists(file_name_to));
+  ASSERT_TRUE(SetPosixFilePermissions(file_name_to, 0777));
+
+  // Ensure that when we copy the directory, the file contents are copied
+  // but the permissions on the destination are left alone.
+  EXPECT_TRUE(CopyDirectory(dir_name_from, dir_name_to, false));
+  ASSERT_TRUE(PathExists(file_name_to));
+  ASSERT_EQ(L"Mordecai", ReadTextFile(file_name_to));
+
+  int mode = 0;
+  ASSERT_TRUE(GetPosixFilePermissions(file_name_to, &mode));
+  EXPECT_EQ(0777, mode);
+}
+
+TEST_F(FileUtilTest, CopyDirectoryExclDoesNotOverwrite) {
+  // Create source directory.
+  FilePath dir_name_from =
+      temp_dir_.GetPath().Append(FILE_PATH_LITERAL("Copy_From_Subdir"));
+  CreateDirectory(dir_name_from);
+  ASSERT_TRUE(PathExists(dir_name_from));
+
+  // Create a file under the directory.
+  FilePath file_name_from =
+      dir_name_from.Append(FILE_PATH_LITERAL("Reggy-1.txt"));
+  CreateTextFile(file_name_from, L"Mordecai");
+  ASSERT_TRUE(PathExists(file_name_from));
+
+  // Create destination directory.
+  FilePath dir_name_to =
+      temp_dir_.GetPath().Append(FILE_PATH_LITERAL("Copy_To_Subdir"));
+  CreateDirectory(dir_name_to);
+  ASSERT_TRUE(PathExists(dir_name_to));
+
+  // Create a file under the directory with the same name.
+  FilePath file_name_to = dir_name_to.Append(FILE_PATH_LITERAL("Reggy-1.txt"));
+  CreateTextFile(file_name_to, L"Rigby");
+  ASSERT_TRUE(PathExists(file_name_to));
+
+  // Ensure that copying failed and the file was not overwritten.
+  EXPECT_FALSE(CopyDirectoryExcl(dir_name_from, dir_name_to, false));
+  ASSERT_TRUE(PathExists(file_name_to));
+  ASSERT_EQ(L"Rigby", ReadTextFile(file_name_to));
+}
+
+TEST_F(FileUtilTest, CopyDirectoryExclDirectoryOverExistingFile) {
+  // Create source directory.
+  FilePath dir_name_from =
+      temp_dir_.GetPath().Append(FILE_PATH_LITERAL("Copy_From_Subdir"));
+  CreateDirectory(dir_name_from);
+  ASSERT_TRUE(PathExists(dir_name_from));
+
+  // Create a subdirectory.
+  FilePath subdir_name_from = dir_name_from.Append(FILE_PATH_LITERAL("Subsub"));
+  CreateDirectory(subdir_name_from);
+  ASSERT_TRUE(PathExists(subdir_name_from));
+
+  // Create destination directory.
+  FilePath dir_name_to =
+      temp_dir_.GetPath().Append(FILE_PATH_LITERAL("Copy_To_Subdir"));
+  CreateDirectory(dir_name_to);
+  ASSERT_TRUE(PathExists(dir_name_to));
+
+  // Create a regular file under the directory with the same name.
+  FilePath file_name_to = dir_name_to.Append(FILE_PATH_LITERAL("Subsub"));
+  CreateTextFile(file_name_to, L"Rigby");
+  ASSERT_TRUE(PathExists(file_name_to));
+
+  // Ensure that copying failed and the file was not overwritten.
+  EXPECT_FALSE(CopyDirectoryExcl(dir_name_from, dir_name_to, false));
+  ASSERT_TRUE(PathExists(file_name_to));
+  ASSERT_EQ(L"Rigby", ReadTextFile(file_name_to));
+}
+
+TEST_F(FileUtilTest, CopyDirectoryExclDirectoryOverExistingDirectory) {
+  // Create source directory.
+  FilePath dir_name_from =
+      temp_dir_.GetPath().Append(FILE_PATH_LITERAL("Copy_From_Subdir"));
+  CreateDirectory(dir_name_from);
+  ASSERT_TRUE(PathExists(dir_name_from));
+
+  // Create a subdirectory.
+  FilePath subdir_name_from = dir_name_from.Append(FILE_PATH_LITERAL("Subsub"));
+  CreateDirectory(subdir_name_from);
+  ASSERT_TRUE(PathExists(subdir_name_from));
+
+  // Create destination directory.
+  FilePath dir_name_to =
+      temp_dir_.GetPath().Append(FILE_PATH_LITERAL("Copy_To_Subdir"));
+  CreateDirectory(dir_name_to);
+  ASSERT_TRUE(PathExists(dir_name_to));
+
+  // Create a subdirectory under the directory with the same name.
+  FilePath subdir_name_to = dir_name_to.Append(FILE_PATH_LITERAL("Subsub"));
+  CreateDirectory(subdir_name_to);
+  ASSERT_TRUE(PathExists(subdir_name_to));
+
+  // Ensure that copying failed and the file was not overwritten.
+  EXPECT_FALSE(CopyDirectoryExcl(dir_name_from, dir_name_to, false));
+}
+
+TEST_F(FileUtilTest, CopyFileExecutablePermission) {
+  FilePath src = temp_dir_.GetPath().Append(FPL("src.txt"));
+  const std::wstring file_contents(L"Gooooooooooooooooooooogle");
+  CreateTextFile(src, file_contents);
+
+  ASSERT_TRUE(SetPosixFilePermissions(src, 0755));
+  int mode = 0;
+  ASSERT_TRUE(GetPosixFilePermissions(src, &mode));
+  EXPECT_EQ(0755, mode);
+
+  FilePath dst = temp_dir_.GetPath().Append(FPL("dst.txt"));
+  ASSERT_TRUE(CopyFile(src, dst));
+  EXPECT_EQ(file_contents, ReadTextFile(dst));
+
+  ASSERT_TRUE(GetPosixFilePermissions(dst, &mode));
+  int expected_mode;
+#if defined(OS_MACOSX)
+  expected_mode = 0755;
+#elif defined(OS_CHROMEOS)
+  expected_mode = 0644;
+#else
+  expected_mode = 0600;
+#endif
+  EXPECT_EQ(expected_mode, mode);
+  ASSERT_TRUE(DeleteFile(dst, false));
+
+  ASSERT_TRUE(SetPosixFilePermissions(src, 0777));
+  ASSERT_TRUE(GetPosixFilePermissions(src, &mode));
+  EXPECT_EQ(0777, mode);
+
+  ASSERT_TRUE(CopyFile(src, dst));
+  EXPECT_EQ(file_contents, ReadTextFile(dst));
+
+  ASSERT_TRUE(GetPosixFilePermissions(dst, &mode));
+#if defined(OS_MACOSX)
+  expected_mode = 0755;
+#elif defined(OS_CHROMEOS)
+  expected_mode = 0644;
+#else
+  expected_mode = 0600;
+#endif
+  EXPECT_EQ(expected_mode, mode);
+  ASSERT_TRUE(DeleteFile(dst, false));
+
+  ASSERT_TRUE(SetPosixFilePermissions(src, 0400));
+  ASSERT_TRUE(GetPosixFilePermissions(src, &mode));
+  EXPECT_EQ(0400, mode);
+
+  ASSERT_TRUE(CopyFile(src, dst));
+  EXPECT_EQ(file_contents, ReadTextFile(dst));
+
+  ASSERT_TRUE(GetPosixFilePermissions(dst, &mode));
+#if defined(OS_MACOSX)
+  expected_mode = 0600;
+#elif defined(OS_CHROMEOS)
+  expected_mode = 0644;
+#else
+  expected_mode = 0600;
+#endif
+  EXPECT_EQ(expected_mode, mode);
+
+  // This time, do not delete |dst|. Instead set its permissions to 0777.
+  ASSERT_TRUE(SetPosixFilePermissions(dst, 0777));
+  ASSERT_TRUE(GetPosixFilePermissions(dst, &mode));
+  EXPECT_EQ(0777, mode);
+
+  // Overwrite it and check the permissions again.
+  ASSERT_TRUE(CopyFile(src, dst));
+  EXPECT_EQ(file_contents, ReadTextFile(dst));
+  ASSERT_TRUE(GetPosixFilePermissions(dst, &mode));
+  EXPECT_EQ(0777, mode);
+}
+
+#endif  // defined(OS_POSIX)
+
+#if !defined(OS_FUCHSIA)
+
+TEST_F(FileUtilTest, CopyFileACL) {
+  // While FileUtilTest.CopyFile asserts the content is correctly copied over,
+  // this test case asserts the access control bits are meeting expectations in
+  // CopyFile().
+  FilePath src = temp_dir_.GetPath().Append(FILE_PATH_LITERAL("src.txt"));
+  const std::wstring file_contents(L"Gooooooooooooooooooooogle");
+  CreateTextFile(src, file_contents);
+
+  // Set the source file to read-only.
+  ASSERT_FALSE(IsReadOnly(src));
+  SetReadOnly(src, true);
+  ASSERT_TRUE(IsReadOnly(src));
+
+  // Copy the file.
+  FilePath dst = temp_dir_.GetPath().Append(FILE_PATH_LITERAL("dst.txt"));
+  ASSERT_TRUE(CopyFile(src, dst));
+  EXPECT_EQ(file_contents, ReadTextFile(dst));
+
+  ASSERT_FALSE(IsReadOnly(dst));
+}
+
+TEST_F(FileUtilTest, CopyDirectoryACL) {
+  // Create source directories.
+  FilePath src = temp_dir_.GetPath().Append(FILE_PATH_LITERAL("src"));
+  FilePath src_subdir = src.Append(FILE_PATH_LITERAL("subdir"));
+  CreateDirectory(src_subdir);
+  ASSERT_TRUE(PathExists(src_subdir));
+
+  // Create a file under the directory.
+  FilePath src_file = src.Append(FILE_PATH_LITERAL("src.txt"));
+  CreateTextFile(src_file, L"Gooooooooooooooooooooogle");
+  SetReadOnly(src_file, true);
+  ASSERT_TRUE(IsReadOnly(src_file));
+
+  // Make directory read-only.
+  SetReadOnly(src_subdir, true);
+  ASSERT_TRUE(IsReadOnly(src_subdir));
+
+  // Copy the directory recursively.
+  FilePath dst = temp_dir_.GetPath().Append(FILE_PATH_LITERAL("dst"));
+  FilePath dst_file = dst.Append(FILE_PATH_LITERAL("src.txt"));
+  EXPECT_TRUE(CopyDirectory(src, dst, true));
+
+  FilePath dst_subdir = dst.Append(FILE_PATH_LITERAL("subdir"));
+  ASSERT_FALSE(IsReadOnly(dst_subdir));
+  ASSERT_FALSE(IsReadOnly(dst_file));
+
+  // Give write permissions to allow deletion.
+  SetReadOnly(src_subdir, false);
+  ASSERT_FALSE(IsReadOnly(src_subdir));
+}
+
+#endif  // !defined(OS_FUCHSIA)
+
+TEST_F(FileUtilTest, DeleteNonExistent) {
+  FilePath non_existent =
+      temp_dir_.GetPath().AppendASCII("bogus_file_dne.foobar");
+  ASSERT_FALSE(PathExists(non_existent));
+
+  EXPECT_TRUE(DeleteFile(non_existent, false));
+  ASSERT_FALSE(PathExists(non_existent));
+  EXPECT_TRUE(DeleteFile(non_existent, true));
+  ASSERT_FALSE(PathExists(non_existent));
+}
+
+TEST_F(FileUtilTest, DeleteNonExistentWithNonExistentParent) {
+  FilePath non_existent = temp_dir_.GetPath().AppendASCII("bogus_topdir");
+  non_existent = non_existent.AppendASCII("bogus_subdir");
+  ASSERT_FALSE(PathExists(non_existent));
+
+  EXPECT_TRUE(DeleteFile(non_existent, false));
+  ASSERT_FALSE(PathExists(non_existent));
+  EXPECT_TRUE(DeleteFile(non_existent, true));
+  ASSERT_FALSE(PathExists(non_existent));
+}
+
+TEST_F(FileUtilTest, DeleteFile) {
+  // Create a file
+  FilePath file_name = temp_dir_.GetPath().Append(FPL("Test DeleteFile 1.txt"));
+  CreateTextFile(file_name, bogus_content);
+  ASSERT_TRUE(PathExists(file_name));
+
+  // Make sure it's deleted
+  EXPECT_TRUE(DeleteFile(file_name, false));
+  EXPECT_FALSE(PathExists(file_name));
+
+  // Test recursive case, create a new file
+  file_name = temp_dir_.GetPath().Append(FPL("Test DeleteFile 2.txt"));
+  CreateTextFile(file_name, bogus_content);
+  ASSERT_TRUE(PathExists(file_name));
+
+  // Make sure it's deleted
+  EXPECT_TRUE(DeleteFile(file_name, true));
+  EXPECT_FALSE(PathExists(file_name));
+}
+
+#if defined(OS_WIN)
+// Tests that the Delete function works for wild cards, especially
+// with the recursion flag.  Also coincidentally tests PathExists.
+// TODO(erikkay): see if anyone's actually using this feature of the API
+TEST_F(FileUtilTest, DeleteWildCard) {
+  // Create a file and a directory
+  FilePath file_name =
+      temp_dir_.GetPath().Append(FPL("Test DeleteWildCard.txt"));
+  CreateTextFile(file_name, bogus_content);
+  ASSERT_TRUE(PathExists(file_name));
+
+  FilePath subdir_path = temp_dir_.GetPath().Append(FPL("DeleteWildCardDir"));
+  CreateDirectory(subdir_path);
+  ASSERT_TRUE(PathExists(subdir_path));
+
+  // Create the wildcard path
+  FilePath directory_contents = temp_dir_.GetPath();
+  directory_contents = directory_contents.Append(FPL("*"));
+
+  // Delete non-recursively and check that only the file is deleted
+  EXPECT_TRUE(DeleteFile(directory_contents, false));
+  EXPECT_FALSE(PathExists(file_name));
+  EXPECT_TRUE(PathExists(subdir_path));
+
+  // Delete recursively and make sure all contents are deleted
+  EXPECT_TRUE(DeleteFile(directory_contents, true));
+  EXPECT_FALSE(PathExists(file_name));
+  EXPECT_FALSE(PathExists(subdir_path));
+}
+
+// TODO(erikkay): see if anyone's actually using this feature of the API
+TEST_F(FileUtilTest, DeleteNonExistantWildCard) {
+  // Create a file and a directory
+  FilePath subdir_path =
+      temp_dir_.GetPath().Append(FPL("DeleteNonExistantWildCard"));
+  CreateDirectory(subdir_path);
+  ASSERT_TRUE(PathExists(subdir_path));
+
+  // Create the wildcard path
+  FilePath directory_contents = subdir_path;
+  directory_contents = directory_contents.Append(FPL("*"));
+
+  // Delete non-recursively and check nothing got deleted
+  EXPECT_TRUE(DeleteFile(directory_contents, false));
+  EXPECT_TRUE(PathExists(subdir_path));
+
+  // Delete recursively and check nothing got deleted
+  EXPECT_TRUE(DeleteFile(directory_contents, true));
+  EXPECT_TRUE(PathExists(subdir_path));
+}
+#endif
+
+// Tests non-recursive Delete() for a directory.
+TEST_F(FileUtilTest, DeleteDirNonRecursive) {
+  // Create a subdirectory and put a file and two directories inside.
+  FilePath test_subdir =
+      temp_dir_.GetPath().Append(FPL("DeleteDirNonRecursive"));
+  CreateDirectory(test_subdir);
+  ASSERT_TRUE(PathExists(test_subdir));
+
+  FilePath file_name = test_subdir.Append(FPL("Test DeleteDir.txt"));
+  CreateTextFile(file_name, bogus_content);
+  ASSERT_TRUE(PathExists(file_name));
+
+  FilePath subdir_path1 = test_subdir.Append(FPL("TestSubDir1"));
+  CreateDirectory(subdir_path1);
+  ASSERT_TRUE(PathExists(subdir_path1));
+
+  FilePath subdir_path2 = test_subdir.Append(FPL("TestSubDir2"));
+  CreateDirectory(subdir_path2);
+  ASSERT_TRUE(PathExists(subdir_path2));
+
+  // Delete non-recursively and check that the empty dir got deleted
+  EXPECT_TRUE(DeleteFile(subdir_path2, false));
+  EXPECT_FALSE(PathExists(subdir_path2));
+
+  // Delete non-recursively and check that nothing got deleted
+  EXPECT_FALSE(DeleteFile(test_subdir, false));
+  EXPECT_TRUE(PathExists(test_subdir));
+  EXPECT_TRUE(PathExists(file_name));
+  EXPECT_TRUE(PathExists(subdir_path1));
+}
+
+// Tests recursive Delete() for a directory.
+TEST_F(FileUtilTest, DeleteDirRecursive) {
+  // Create a subdirectory and put a file and two directories inside.
+  FilePath test_subdir = temp_dir_.GetPath().Append(FPL("DeleteDirRecursive"));
+  CreateDirectory(test_subdir);
+  ASSERT_TRUE(PathExists(test_subdir));
+
+  FilePath file_name = test_subdir.Append(FPL("Test DeleteDirRecursive.txt"));
+  CreateTextFile(file_name, bogus_content);
+  ASSERT_TRUE(PathExists(file_name));
+
+  FilePath subdir_path1 = test_subdir.Append(FPL("TestSubDir1"));
+  CreateDirectory(subdir_path1);
+  ASSERT_TRUE(PathExists(subdir_path1));
+
+  FilePath subdir_path2 = test_subdir.Append(FPL("TestSubDir2"));
+  CreateDirectory(subdir_path2);
+  ASSERT_TRUE(PathExists(subdir_path2));
+
+  // Delete recursively and check that the empty dir got deleted
+  EXPECT_TRUE(DeleteFile(subdir_path2, true));
+  EXPECT_FALSE(PathExists(subdir_path2));
+
+  // Delete recursively and check that everything got deleted
+  EXPECT_TRUE(DeleteFile(test_subdir, true));
+  EXPECT_FALSE(PathExists(file_name));
+  EXPECT_FALSE(PathExists(subdir_path1));
+  EXPECT_FALSE(PathExists(test_subdir));
+}
+
+// Tests recursive Delete() for a directory.
+TEST_F(FileUtilTest, DeleteDirRecursiveWithOpenFile) {
+  // Create a subdirectory and put a file and two directories inside.
+  FilePath test_subdir = temp_dir_.GetPath().Append(FPL("DeleteWithOpenFile"));
+  CreateDirectory(test_subdir);
+  ASSERT_TRUE(PathExists(test_subdir));
+
+  FilePath file_name1 = test_subdir.Append(FPL("Undeletebable File1.txt"));
+  File file1(file_name1,
+             File::FLAG_CREATE | File::FLAG_READ | File::FLAG_WRITE);
+  ASSERT_TRUE(PathExists(file_name1));
+
+  FilePath file_name2 = test_subdir.Append(FPL("Deleteable File2.txt"));
+  CreateTextFile(file_name2, bogus_content);
+  ASSERT_TRUE(PathExists(file_name2));
+
+  FilePath file_name3 = test_subdir.Append(FPL("Undeletebable File3.txt"));
+  File file3(file_name3,
+             File::FLAG_CREATE | File::FLAG_READ | File::FLAG_WRITE);
+  ASSERT_TRUE(PathExists(file_name3));
+
+#if defined(OS_LINUX)
+  // On Windows, holding the file open in sufficient to make it un-deletable.
+  // The POSIX code is verifiable on Linux by creating an "immutable" file but
+  // this is best-effort because it's not supported by all file systems. Both
+  // files will have the same flags so no need to get them individually.
+  int flags;
+  bool file_attrs_supported =
+      ioctl(file1.GetPlatformFile(), FS_IOC_GETFLAGS, &flags) == 0;
+  // Some filesystems (e.g. tmpfs) don't support file attributes.
+  if (file_attrs_supported) {
+    flags |= FS_IMMUTABLE_FL;
+    ioctl(file1.GetPlatformFile(), FS_IOC_SETFLAGS, &flags);
+    ioctl(file3.GetPlatformFile(), FS_IOC_SETFLAGS, &flags);
+  }
+#endif
+
+  // Delete recursively and check that at least the second file got deleted.
+  // This ensures that un-deletable files don't impact those that can be.
+  DeleteFile(test_subdir, true);
+  EXPECT_FALSE(PathExists(file_name2));
+
+#if defined(OS_LINUX)
+  // Make sure that the test can clean up after itself.
+  if (file_attrs_supported) {
+    flags &= ~FS_IMMUTABLE_FL;
+    ioctl(file1.GetPlatformFile(), FS_IOC_SETFLAGS, &flags);
+    ioctl(file3.GetPlatformFile(), FS_IOC_SETFLAGS, &flags);
+  }
+#endif
+}
+
+TEST_F(FileUtilTest, MoveFileNew) {
+  // Create a file
+  FilePath file_name_from =
+      temp_dir_.GetPath().Append(FILE_PATH_LITERAL("Move_Test_File.txt"));
+  CreateTextFile(file_name_from, L"Gooooooooooooooooooooogle");
+  ASSERT_TRUE(PathExists(file_name_from));
+
+  // The destination.
+  FilePath file_name_to = temp_dir_.GetPath().Append(
+      FILE_PATH_LITERAL("Move_Test_File_Destination.txt"));
+  ASSERT_FALSE(PathExists(file_name_to));
+
+  EXPECT_TRUE(Move(file_name_from, file_name_to));
+
+  // Check everything has been moved.
+  EXPECT_FALSE(PathExists(file_name_from));
+  EXPECT_TRUE(PathExists(file_name_to));
+}
+
+TEST_F(FileUtilTest, MoveFileExists) {
+  // Create a file
+  FilePath file_name_from =
+      temp_dir_.GetPath().Append(FILE_PATH_LITERAL("Move_Test_File.txt"));
+  CreateTextFile(file_name_from, L"Gooooooooooooooooooooogle");
+  ASSERT_TRUE(PathExists(file_name_from));
+
+  // The destination name.
+  FilePath file_name_to = temp_dir_.GetPath().Append(
+      FILE_PATH_LITERAL("Move_Test_File_Destination.txt"));
+  CreateTextFile(file_name_to, L"Old file content");
+  ASSERT_TRUE(PathExists(file_name_to));
+
+  EXPECT_TRUE(Move(file_name_from, file_name_to));
+
+  // Check everything has been moved.
+  EXPECT_FALSE(PathExists(file_name_from));
+  EXPECT_TRUE(PathExists(file_name_to));
+  EXPECT_TRUE(L"Gooooooooooooooooooooogle" == ReadTextFile(file_name_to));
+}
+
+TEST_F(FileUtilTest, MoveFileDirExists) {
+  // Create a file
+  FilePath file_name_from =
+      temp_dir_.GetPath().Append(FILE_PATH_LITERAL("Move_Test_File.txt"));
+  CreateTextFile(file_name_from, L"Gooooooooooooooooooooogle");
+  ASSERT_TRUE(PathExists(file_name_from));
+
+  // The destination directory
+  FilePath dir_name_to =
+      temp_dir_.GetPath().Append(FILE_PATH_LITERAL("Destination"));
+  CreateDirectory(dir_name_to);
+  ASSERT_TRUE(PathExists(dir_name_to));
+
+  EXPECT_FALSE(Move(file_name_from, dir_name_to));
+}
+
+
+TEST_F(FileUtilTest, MoveNew) {
+  // Create a directory
+  FilePath dir_name_from =
+      temp_dir_.GetPath().Append(FILE_PATH_LITERAL("Move_From_Subdir"));
+  CreateDirectory(dir_name_from);
+  ASSERT_TRUE(PathExists(dir_name_from));
+
+  // Create a file under the directory
+  FilePath txt_file_name(FILE_PATH_LITERAL("Move_Test_File.txt"));
+  FilePath file_name_from = dir_name_from.Append(txt_file_name);
+  CreateTextFile(file_name_from, L"Gooooooooooooooooooooogle");
+  ASSERT_TRUE(PathExists(file_name_from));
+
+  // Move the directory.
+  FilePath dir_name_to =
+      temp_dir_.GetPath().Append(FILE_PATH_LITERAL("Move_To_Subdir"));
+  FilePath file_name_to =
+      dir_name_to.Append(FILE_PATH_LITERAL("Move_Test_File.txt"));
+
+  ASSERT_FALSE(PathExists(dir_name_to));
+
+  EXPECT_TRUE(Move(dir_name_from, dir_name_to));
+
+  // Check everything has been moved.
+  EXPECT_FALSE(PathExists(dir_name_from));
+  EXPECT_FALSE(PathExists(file_name_from));
+  EXPECT_TRUE(PathExists(dir_name_to));
+  EXPECT_TRUE(PathExists(file_name_to));
+
+  // Test path traversal.
+  file_name_from = dir_name_to.Append(txt_file_name);
+  file_name_to = dir_name_to.Append(FILE_PATH_LITERAL(".."));
+  file_name_to = file_name_to.Append(txt_file_name);
+  EXPECT_FALSE(Move(file_name_from, file_name_to));
+  EXPECT_TRUE(PathExists(file_name_from));
+  EXPECT_FALSE(PathExists(file_name_to));
+  EXPECT_TRUE(internal::MoveUnsafe(file_name_from, file_name_to));
+  EXPECT_FALSE(PathExists(file_name_from));
+  EXPECT_TRUE(PathExists(file_name_to));
+}
+
+TEST_F(FileUtilTest, MoveExist) {
+  // Create a directory
+  FilePath dir_name_from =
+      temp_dir_.GetPath().Append(FILE_PATH_LITERAL("Move_From_Subdir"));
+  CreateDirectory(dir_name_from);
+  ASSERT_TRUE(PathExists(dir_name_from));
+
+  // Create a file under the directory
+  FilePath file_name_from =
+      dir_name_from.Append(FILE_PATH_LITERAL("Move_Test_File.txt"));
+  CreateTextFile(file_name_from, L"Gooooooooooooooooooooogle");
+  ASSERT_TRUE(PathExists(file_name_from));
+
+  // Move the directory
+  FilePath dir_name_exists =
+      temp_dir_.GetPath().Append(FILE_PATH_LITERAL("Destination"));
+
+  FilePath dir_name_to =
+      dir_name_exists.Append(FILE_PATH_LITERAL("Move_To_Subdir"));
+  FilePath file_name_to =
+      dir_name_to.Append(FILE_PATH_LITERAL("Move_Test_File.txt"));
+
+  // Create the destination directory.
+  CreateDirectory(dir_name_exists);
+  ASSERT_TRUE(PathExists(dir_name_exists));
+
+  EXPECT_TRUE(Move(dir_name_from, dir_name_to));
+
+  // Check everything has been moved.
+  EXPECT_FALSE(PathExists(dir_name_from));
+  EXPECT_FALSE(PathExists(file_name_from));
+  EXPECT_TRUE(PathExists(dir_name_to));
+  EXPECT_TRUE(PathExists(file_name_to));
+}
+
+TEST_F(FileUtilTest, CopyDirectoryRecursivelyNew) {
+  // Create a directory.
+  FilePath dir_name_from =
+      temp_dir_.GetPath().Append(FILE_PATH_LITERAL("Copy_From_Subdir"));
+  CreateDirectory(dir_name_from);
+  ASSERT_TRUE(PathExists(dir_name_from));
+
+  // Create a file under the directory.
+  FilePath file_name_from =
+      dir_name_from.Append(FILE_PATH_LITERAL("Copy_Test_File.txt"));
+  CreateTextFile(file_name_from, L"Gooooooooooooooooooooogle");
+  ASSERT_TRUE(PathExists(file_name_from));
+
+  // Create a subdirectory.
+  FilePath subdir_name_from =
+      dir_name_from.Append(FILE_PATH_LITERAL("Subdir"));
+  CreateDirectory(subdir_name_from);
+  ASSERT_TRUE(PathExists(subdir_name_from));
+
+  // Create a file under the subdirectory.
+  FilePath file_name2_from =
+      subdir_name_from.Append(FILE_PATH_LITERAL("Copy_Test_File.txt"));
+  CreateTextFile(file_name2_from, L"Gooooooooooooooooooooogle");
+  ASSERT_TRUE(PathExists(file_name2_from));
+
+  // Copy the directory recursively.
+  FilePath dir_name_to =
+      temp_dir_.GetPath().Append(FILE_PATH_LITERAL("Copy_To_Subdir"));
+  FilePath file_name_to =
+      dir_name_to.Append(FILE_PATH_LITERAL("Copy_Test_File.txt"));
+  FilePath subdir_name_to =
+      dir_name_to.Append(FILE_PATH_LITERAL("Subdir"));
+  FilePath file_name2_to =
+      subdir_name_to.Append(FILE_PATH_LITERAL("Copy_Test_File.txt"));
+
+  ASSERT_FALSE(PathExists(dir_name_to));
+
+  EXPECT_TRUE(CopyDirectory(dir_name_from, dir_name_to, true));
+
+  // Check everything has been copied.
+  EXPECT_TRUE(PathExists(dir_name_from));
+  EXPECT_TRUE(PathExists(file_name_from));
+  EXPECT_TRUE(PathExists(subdir_name_from));
+  EXPECT_TRUE(PathExists(file_name2_from));
+  EXPECT_TRUE(PathExists(dir_name_to));
+  EXPECT_TRUE(PathExists(file_name_to));
+  EXPECT_TRUE(PathExists(subdir_name_to));
+  EXPECT_TRUE(PathExists(file_name2_to));
+}
+
+TEST_F(FileUtilTest, CopyDirectoryRecursivelyExists) {
+  // Create a directory.
+  FilePath dir_name_from =
+      temp_dir_.GetPath().Append(FILE_PATH_LITERAL("Copy_From_Subdir"));
+  CreateDirectory(dir_name_from);
+  ASSERT_TRUE(PathExists(dir_name_from));
+
+  // Create a file under the directory.
+  FilePath file_name_from =
+      dir_name_from.Append(FILE_PATH_LITERAL("Copy_Test_File.txt"));
+  CreateTextFile(file_name_from, L"Gooooooooooooooooooooogle");
+  ASSERT_TRUE(PathExists(file_name_from));
+
+  // Create a subdirectory.
+  FilePath subdir_name_from =
+      dir_name_from.Append(FILE_PATH_LITERAL("Subdir"));
+  CreateDirectory(subdir_name_from);
+  ASSERT_TRUE(PathExists(subdir_name_from));
+
+  // Create a file under the subdirectory.
+  FilePath file_name2_from =
+      subdir_name_from.Append(FILE_PATH_LITERAL("Copy_Test_File.txt"));
+  CreateTextFile(file_name2_from, L"Gooooooooooooooooooooogle");
+  ASSERT_TRUE(PathExists(file_name2_from));
+
+  // Copy the directory recursively.
+  FilePath dir_name_exists =
+      temp_dir_.GetPath().Append(FILE_PATH_LITERAL("Destination"));
+
+  FilePath dir_name_to =
+      dir_name_exists.Append(FILE_PATH_LITERAL("Copy_From_Subdir"));
+  FilePath file_name_to =
+      dir_name_to.Append(FILE_PATH_LITERAL("Copy_Test_File.txt"));
+  FilePath subdir_name_to =
+      dir_name_to.Append(FILE_PATH_LITERAL("Subdir"));
+  FilePath file_name2_to =
+      subdir_name_to.Append(FILE_PATH_LITERAL("Copy_Test_File.txt"));
+
+  // Create the destination directory.
+  CreateDirectory(dir_name_exists);
+  ASSERT_TRUE(PathExists(dir_name_exists));
+
+  EXPECT_TRUE(CopyDirectory(dir_name_from, dir_name_exists, true));
+
+  // Check everything has been copied.
+  EXPECT_TRUE(PathExists(dir_name_from));
+  EXPECT_TRUE(PathExists(file_name_from));
+  EXPECT_TRUE(PathExists(subdir_name_from));
+  EXPECT_TRUE(PathExists(file_name2_from));
+  EXPECT_TRUE(PathExists(dir_name_to));
+  EXPECT_TRUE(PathExists(file_name_to));
+  EXPECT_TRUE(PathExists(subdir_name_to));
+  EXPECT_TRUE(PathExists(file_name2_to));
+}
+
+TEST_F(FileUtilTest, CopyDirectoryNew) {
+  // Create a directory.
+  FilePath dir_name_from =
+      temp_dir_.GetPath().Append(FILE_PATH_LITERAL("Copy_From_Subdir"));
+  CreateDirectory(dir_name_from);
+  ASSERT_TRUE(PathExists(dir_name_from));
+
+  // Create a file under the directory.
+  FilePath file_name_from =
+      dir_name_from.Append(FILE_PATH_LITERAL("Copy_Test_File.txt"));
+  CreateTextFile(file_name_from, L"Gooooooooooooooooooooogle");
+  ASSERT_TRUE(PathExists(file_name_from));
+
+  // Create a subdirectory.
+  FilePath subdir_name_from =
+      dir_name_from.Append(FILE_PATH_LITERAL("Subdir"));
+  CreateDirectory(subdir_name_from);
+  ASSERT_TRUE(PathExists(subdir_name_from));
+
+  // Create a file under the subdirectory.
+  FilePath file_name2_from =
+      subdir_name_from.Append(FILE_PATH_LITERAL("Copy_Test_File.txt"));
+  CreateTextFile(file_name2_from, L"Gooooooooooooooooooooogle");
+  ASSERT_TRUE(PathExists(file_name2_from));
+
+  // Copy the directory not recursively.
+  FilePath dir_name_to =
+      temp_dir_.GetPath().Append(FILE_PATH_LITERAL("Copy_To_Subdir"));
+  FilePath file_name_to =
+      dir_name_to.Append(FILE_PATH_LITERAL("Copy_Test_File.txt"));
+  FilePath subdir_name_to =
+      dir_name_to.Append(FILE_PATH_LITERAL("Subdir"));
+
+  ASSERT_FALSE(PathExists(dir_name_to));
+
+  EXPECT_TRUE(CopyDirectory(dir_name_from, dir_name_to, false));
+
+  // Check everything has been copied.
+  EXPECT_TRUE(PathExists(dir_name_from));
+  EXPECT_TRUE(PathExists(file_name_from));
+  EXPECT_TRUE(PathExists(subdir_name_from));
+  EXPECT_TRUE(PathExists(file_name2_from));
+  EXPECT_TRUE(PathExists(dir_name_to));
+  EXPECT_TRUE(PathExists(file_name_to));
+  EXPECT_FALSE(PathExists(subdir_name_to));
+}
+
+TEST_F(FileUtilTest, CopyDirectoryExists) {
+  // Create a directory.
+  FilePath dir_name_from =
+      temp_dir_.GetPath().Append(FILE_PATH_LITERAL("Copy_From_Subdir"));
+  CreateDirectory(dir_name_from);
+  ASSERT_TRUE(PathExists(dir_name_from));
+
+  // Create a file under the directory.
+  FilePath file_name_from =
+      dir_name_from.Append(FILE_PATH_LITERAL("Copy_Test_File.txt"));
+  CreateTextFile(file_name_from, L"Gooooooooooooooooooooogle");
+  ASSERT_TRUE(PathExists(file_name_from));
+
+  // Create a subdirectory.
+  FilePath subdir_name_from =
+      dir_name_from.Append(FILE_PATH_LITERAL("Subdir"));
+  CreateDirectory(subdir_name_from);
+  ASSERT_TRUE(PathExists(subdir_name_from));
+
+  // Create a file under the subdirectory.
+  FilePath file_name2_from =
+      subdir_name_from.Append(FILE_PATH_LITERAL("Copy_Test_File.txt"));
+  CreateTextFile(file_name2_from, L"Gooooooooooooooooooooogle");
+  ASSERT_TRUE(PathExists(file_name2_from));
+
+  // Copy the directory not recursively.
+  FilePath dir_name_to =
+      temp_dir_.GetPath().Append(FILE_PATH_LITERAL("Copy_To_Subdir"));
+  FilePath file_name_to =
+      dir_name_to.Append(FILE_PATH_LITERAL("Copy_Test_File.txt"));
+  FilePath subdir_name_to =
+      dir_name_to.Append(FILE_PATH_LITERAL("Subdir"));
+
+  // Create the destination directory.
+  CreateDirectory(dir_name_to);
+  ASSERT_TRUE(PathExists(dir_name_to));
+
+  EXPECT_TRUE(CopyDirectory(dir_name_from, dir_name_to, false));
+
+  // Check everything has been copied.
+  EXPECT_TRUE(PathExists(dir_name_from));
+  EXPECT_TRUE(PathExists(file_name_from));
+  EXPECT_TRUE(PathExists(subdir_name_from));
+  EXPECT_TRUE(PathExists(file_name2_from));
+  EXPECT_TRUE(PathExists(dir_name_to));
+  EXPECT_TRUE(PathExists(file_name_to));
+  EXPECT_FALSE(PathExists(subdir_name_to));
+}
+
+TEST_F(FileUtilTest, CopyFileWithCopyDirectoryRecursiveToNew) {
+  // Create a file
+  FilePath file_name_from =
+      temp_dir_.GetPath().Append(FILE_PATH_LITERAL("Copy_Test_File.txt"));
+  CreateTextFile(file_name_from, L"Gooooooooooooooooooooogle");
+  ASSERT_TRUE(PathExists(file_name_from));
+
+  // The destination name
+  FilePath file_name_to = temp_dir_.GetPath().Append(
+      FILE_PATH_LITERAL("Copy_Test_File_Destination.txt"));
+  ASSERT_FALSE(PathExists(file_name_to));
+
+  EXPECT_TRUE(CopyDirectory(file_name_from, file_name_to, true));
+
+  // Check the has been copied
+  EXPECT_TRUE(PathExists(file_name_to));
+}
+
+TEST_F(FileUtilTest, CopyFileWithCopyDirectoryRecursiveToExisting) {
+  // Create a file
+  FilePath file_name_from =
+      temp_dir_.GetPath().Append(FILE_PATH_LITERAL("Copy_Test_File.txt"));
+  CreateTextFile(file_name_from, L"Gooooooooooooooooooooogle");
+  ASSERT_TRUE(PathExists(file_name_from));
+
+  // The destination name
+  FilePath file_name_to = temp_dir_.GetPath().Append(
+      FILE_PATH_LITERAL("Copy_Test_File_Destination.txt"));
+  CreateTextFile(file_name_to, L"Old file content");
+  ASSERT_TRUE(PathExists(file_name_to));
+
+  EXPECT_TRUE(CopyDirectory(file_name_from, file_name_to, true));
+
+  // Check the has been copied
+  EXPECT_TRUE(PathExists(file_name_to));
+  EXPECT_TRUE(L"Gooooooooooooooooooooogle" == ReadTextFile(file_name_to));
+}
+
+TEST_F(FileUtilTest, CopyFileWithCopyDirectoryRecursiveToExistingDirectory) {
+  // Create a file
+  FilePath file_name_from =
+      temp_dir_.GetPath().Append(FILE_PATH_LITERAL("Copy_Test_File.txt"));
+  CreateTextFile(file_name_from, L"Gooooooooooooooooooooogle");
+  ASSERT_TRUE(PathExists(file_name_from));
+
+  // The destination
+  FilePath dir_name_to =
+      temp_dir_.GetPath().Append(FILE_PATH_LITERAL("Destination"));
+  CreateDirectory(dir_name_to);
+  ASSERT_TRUE(PathExists(dir_name_to));
+  FilePath file_name_to =
+      dir_name_to.Append(FILE_PATH_LITERAL("Copy_Test_File.txt"));
+
+  EXPECT_TRUE(CopyDirectory(file_name_from, dir_name_to, true));
+
+  // Check the has been copied
+  EXPECT_TRUE(PathExists(file_name_to));
+}
+
+TEST_F(FileUtilTest, CopyFileFailureWithCopyDirectoryExcl) {
+  // Create a file
+  FilePath file_name_from =
+      temp_dir_.GetPath().Append(FILE_PATH_LITERAL("Copy_Test_File.txt"));
+  CreateTextFile(file_name_from, L"Gooooooooooooooooooooogle");
+  ASSERT_TRUE(PathExists(file_name_from));
+
+  // Make a destination file.
+  FilePath file_name_to = temp_dir_.GetPath().Append(
+      FILE_PATH_LITERAL("Copy_Test_File_Destination.txt"));
+  CreateTextFile(file_name_to, L"Old file content");
+  ASSERT_TRUE(PathExists(file_name_to));
+
+  // Overwriting the destination should fail.
+  EXPECT_FALSE(CopyDirectoryExcl(file_name_from, file_name_to, true));
+  EXPECT_EQ(L"Old file content", ReadTextFile(file_name_to));
+}
+
+TEST_F(FileUtilTest, CopyDirectoryWithTrailingSeparators) {
+  // Create a directory.
+  FilePath dir_name_from =
+      temp_dir_.GetPath().Append(FILE_PATH_LITERAL("Copy_From_Subdir"));
+  CreateDirectory(dir_name_from);
+  ASSERT_TRUE(PathExists(dir_name_from));
+
+  // Create a file under the directory.
+  FilePath file_name_from =
+      dir_name_from.Append(FILE_PATH_LITERAL("Copy_Test_File.txt"));
+  CreateTextFile(file_name_from, L"Gooooooooooooooooooooogle");
+  ASSERT_TRUE(PathExists(file_name_from));
+
+  // Copy the directory recursively.
+  FilePath dir_name_to =
+      temp_dir_.GetPath().Append(FILE_PATH_LITERAL("Copy_To_Subdir"));
+  FilePath file_name_to =
+      dir_name_to.Append(FILE_PATH_LITERAL("Copy_Test_File.txt"));
+
+  // Create from path with trailing separators.
+#if defined(OS_WIN)
+  FilePath from_path =
+      temp_dir_.GetPath().Append(FILE_PATH_LITERAL("Copy_From_Subdir\\\\\\"));
+#elif defined(OS_POSIX) || defined(OS_FUCHSIA)
+  FilePath from_path =
+      temp_dir_.GetPath().Append(FILE_PATH_LITERAL("Copy_From_Subdir///"));
+#endif
+
+  EXPECT_TRUE(CopyDirectory(from_path, dir_name_to, true));
+
+  // Check everything has been copied.
+  EXPECT_TRUE(PathExists(dir_name_from));
+  EXPECT_TRUE(PathExists(file_name_from));
+  EXPECT_TRUE(PathExists(dir_name_to));
+  EXPECT_TRUE(PathExists(file_name_to));
+}
+
+#if defined(OS_POSIX)
+TEST_F(FileUtilTest, CopyDirectoryWithNonRegularFiles) {
+  // Create a directory.
+  FilePath dir_name_from =
+      temp_dir_.GetPath().Append(FILE_PATH_LITERAL("Copy_From_Subdir"));
+  ASSERT_TRUE(CreateDirectory(dir_name_from));
+  ASSERT_TRUE(PathExists(dir_name_from));
+
+  // Create a file under the directory.
+  FilePath file_name_from =
+      dir_name_from.Append(FILE_PATH_LITERAL("Copy_Test_File.txt"));
+  CreateTextFile(file_name_from, L"Gooooooooooooooooooooogle");
+  ASSERT_TRUE(PathExists(file_name_from));
+
+  // Create a symbolic link under the directory pointing to that file.
+  FilePath symlink_name_from =
+      dir_name_from.Append(FILE_PATH_LITERAL("Symlink"));
+  ASSERT_TRUE(CreateSymbolicLink(file_name_from, symlink_name_from));
+  ASSERT_TRUE(PathExists(symlink_name_from));
+
+  // Create a fifo under the directory.
+  FilePath fifo_name_from =
+      dir_name_from.Append(FILE_PATH_LITERAL("Fifo"));
+  ASSERT_EQ(0, mkfifo(fifo_name_from.value().c_str(), 0644));
+  ASSERT_TRUE(PathExists(fifo_name_from));
+
+  // Copy the directory.
+  FilePath dir_name_to =
+      temp_dir_.GetPath().Append(FILE_PATH_LITERAL("Copy_To_Subdir"));
+  FilePath file_name_to =
+      dir_name_to.Append(FILE_PATH_LITERAL("Copy_Test_File.txt"));
+  FilePath symlink_name_to =
+      dir_name_to.Append(FILE_PATH_LITERAL("Symlink"));
+  FilePath fifo_name_to =
+      dir_name_to.Append(FILE_PATH_LITERAL("Fifo"));
+
+  ASSERT_FALSE(PathExists(dir_name_to));
+
+  EXPECT_TRUE(CopyDirectory(dir_name_from, dir_name_to, false));
+
+  // Check that only directories and regular files are copied.
+  EXPECT_TRUE(PathExists(dir_name_from));
+  EXPECT_TRUE(PathExists(file_name_from));
+  EXPECT_TRUE(PathExists(symlink_name_from));
+  EXPECT_TRUE(PathExists(fifo_name_from));
+  EXPECT_TRUE(PathExists(dir_name_to));
+  EXPECT_TRUE(PathExists(file_name_to));
+  EXPECT_FALSE(PathExists(symlink_name_to));
+  EXPECT_FALSE(PathExists(fifo_name_to));
+}
+
+TEST_F(FileUtilTest, CopyDirectoryExclFileOverSymlink) {
+  // Create a directory.
+  FilePath dir_name_from =
+      temp_dir_.GetPath().Append(FILE_PATH_LITERAL("Copy_From_Subdir"));
+  ASSERT_TRUE(CreateDirectory(dir_name_from));
+  ASSERT_TRUE(PathExists(dir_name_from));
+
+  // Create a file under the directory.
+  FilePath file_name_from =
+      dir_name_from.Append(FILE_PATH_LITERAL("Copy_Test_File.txt"));
+  CreateTextFile(file_name_from, L"Gooooooooooooooooooooogle");
+  ASSERT_TRUE(PathExists(file_name_from));
+
+  // Create a destination directory with a symlink of the same name.
+  FilePath dir_name_to =
+      temp_dir_.GetPath().Append(FILE_PATH_LITERAL("Copy_To_Subdir"));
+  ASSERT_TRUE(CreateDirectory(dir_name_to));
+  ASSERT_TRUE(PathExists(dir_name_to));
+
+  FilePath symlink_target =
+      dir_name_to.Append(FILE_PATH_LITERAL("Symlink_Target.txt"));
+  CreateTextFile(symlink_target, L"asdf");
+  ASSERT_TRUE(PathExists(symlink_target));
+
+  FilePath symlink_name_to =
+      dir_name_to.Append(FILE_PATH_LITERAL("Copy_Test_File.txt"));
+  ASSERT_TRUE(CreateSymbolicLink(symlink_target, symlink_name_to));
+  ASSERT_TRUE(PathExists(symlink_name_to));
+
+  // Check that copying fails.
+  EXPECT_FALSE(CopyDirectoryExcl(dir_name_from, dir_name_to, false));
+}
+
+TEST_F(FileUtilTest, CopyDirectoryExclDirectoryOverSymlink) {
+  // Create a directory.
+  FilePath dir_name_from =
+      temp_dir_.GetPath().Append(FILE_PATH_LITERAL("Copy_From_Subdir"));
+  ASSERT_TRUE(CreateDirectory(dir_name_from));
+  ASSERT_TRUE(PathExists(dir_name_from));
+
+  // Create a subdirectory.
+  FilePath subdir_name_from = dir_name_from.Append(FILE_PATH_LITERAL("Subsub"));
+  CreateDirectory(subdir_name_from);
+  ASSERT_TRUE(PathExists(subdir_name_from));
+
+  // Create a destination directory with a symlink of the same name.
+  FilePath dir_name_to =
+      temp_dir_.GetPath().Append(FILE_PATH_LITERAL("Copy_To_Subdir"));
+  ASSERT_TRUE(CreateDirectory(dir_name_to));
+  ASSERT_TRUE(PathExists(dir_name_to));
+
+  FilePath symlink_target = dir_name_to.Append(FILE_PATH_LITERAL("Subsub"));
+  CreateTextFile(symlink_target, L"asdf");
+  ASSERT_TRUE(PathExists(symlink_target));
+
+  FilePath symlink_name_to =
+      dir_name_to.Append(FILE_PATH_LITERAL("Copy_Test_File.txt"));
+  ASSERT_TRUE(CreateSymbolicLink(symlink_target, symlink_name_to));
+  ASSERT_TRUE(PathExists(symlink_name_to));
+
+  // Check that copying fails.
+  EXPECT_FALSE(CopyDirectoryExcl(dir_name_from, dir_name_to, false));
+}
+
+TEST_F(FileUtilTest, CopyDirectoryExclFileOverDanglingSymlink) {
+  // Create a directory.
+  FilePath dir_name_from =
+      temp_dir_.GetPath().Append(FILE_PATH_LITERAL("Copy_From_Subdir"));
+  ASSERT_TRUE(CreateDirectory(dir_name_from));
+  ASSERT_TRUE(PathExists(dir_name_from));
+
+  // Create a file under the directory.
+  FilePath file_name_from =
+      dir_name_from.Append(FILE_PATH_LITERAL("Copy_Test_File.txt"));
+  CreateTextFile(file_name_from, L"Gooooooooooooooooooooogle");
+  ASSERT_TRUE(PathExists(file_name_from));
+
+  // Create a destination directory with a dangling symlink of the same name.
+  FilePath dir_name_to =
+      temp_dir_.GetPath().Append(FILE_PATH_LITERAL("Copy_To_Subdir"));
+  ASSERT_TRUE(CreateDirectory(dir_name_to));
+  ASSERT_TRUE(PathExists(dir_name_to));
+
+  FilePath symlink_target =
+      dir_name_to.Append(FILE_PATH_LITERAL("Symlink_Target.txt"));
+  CreateTextFile(symlink_target, L"asdf");
+  ASSERT_TRUE(PathExists(symlink_target));
+
+  FilePath symlink_name_to =
+      dir_name_to.Append(FILE_PATH_LITERAL("Copy_Test_File.txt"));
+  ASSERT_TRUE(CreateSymbolicLink(symlink_target, symlink_name_to));
+  ASSERT_TRUE(PathExists(symlink_name_to));
+  ASSERT_TRUE(DeleteFile(symlink_target, false));
+
+  // Check that copying fails and that no file was created for the symlink's
+  // referent.
+  EXPECT_FALSE(CopyDirectoryExcl(dir_name_from, dir_name_to, false));
+  EXPECT_FALSE(PathExists(symlink_target));
+}
+
+TEST_F(FileUtilTest, CopyDirectoryExclDirectoryOverDanglingSymlink) {
+  // Create a directory.
+  FilePath dir_name_from =
+      temp_dir_.GetPath().Append(FILE_PATH_LITERAL("Copy_From_Subdir"));
+  ASSERT_TRUE(CreateDirectory(dir_name_from));
+  ASSERT_TRUE(PathExists(dir_name_from));
+
+  // Create a subdirectory.
+  FilePath subdir_name_from = dir_name_from.Append(FILE_PATH_LITERAL("Subsub"));
+  CreateDirectory(subdir_name_from);
+  ASSERT_TRUE(PathExists(subdir_name_from));
+
+  // Create a destination directory with a dangling symlink of the same name.
+  FilePath dir_name_to =
+      temp_dir_.GetPath().Append(FILE_PATH_LITERAL("Copy_To_Subdir"));
+  ASSERT_TRUE(CreateDirectory(dir_name_to));
+  ASSERT_TRUE(PathExists(dir_name_to));
+
+  FilePath symlink_target =
+      dir_name_to.Append(FILE_PATH_LITERAL("Symlink_Target.txt"));
+  CreateTextFile(symlink_target, L"asdf");
+  ASSERT_TRUE(PathExists(symlink_target));
+
+  FilePath symlink_name_to =
+      dir_name_to.Append(FILE_PATH_LITERAL("Copy_Test_File.txt"));
+  ASSERT_TRUE(CreateSymbolicLink(symlink_target, symlink_name_to));
+  ASSERT_TRUE(PathExists(symlink_name_to));
+  ASSERT_TRUE(DeleteFile(symlink_target, false));
+
+  // Check that copying fails and that no directory was created for the
+  // symlink's referent.
+  EXPECT_FALSE(CopyDirectoryExcl(dir_name_from, dir_name_to, false));
+  EXPECT_FALSE(PathExists(symlink_target));
+}
+
+TEST_F(FileUtilTest, CopyDirectoryExclFileOverFifo) {
+  // Create a directory.
+  FilePath dir_name_from =
+      temp_dir_.GetPath().Append(FILE_PATH_LITERAL("Copy_From_Subdir"));
+  ASSERT_TRUE(CreateDirectory(dir_name_from));
+  ASSERT_TRUE(PathExists(dir_name_from));
+
+  // Create a file under the directory.
+  FilePath file_name_from =
+      dir_name_from.Append(FILE_PATH_LITERAL("Copy_Test_File.txt"));
+  CreateTextFile(file_name_from, L"Gooooooooooooooooooooogle");
+  ASSERT_TRUE(PathExists(file_name_from));
+
+  // Create a destination directory with a fifo of the same name.
+  FilePath dir_name_to =
+      temp_dir_.GetPath().Append(FILE_PATH_LITERAL("Copy_To_Subdir"));
+  ASSERT_TRUE(CreateDirectory(dir_name_to));
+  ASSERT_TRUE(PathExists(dir_name_to));
+
+  FilePath fifo_name_to =
+      dir_name_to.Append(FILE_PATH_LITERAL("Copy_Test_File.txt"));
+  ASSERT_EQ(0, mkfifo(fifo_name_to.value().c_str(), 0644));
+  ASSERT_TRUE(PathExists(fifo_name_to));
+
+  // Check that copying fails.
+  EXPECT_FALSE(CopyDirectoryExcl(dir_name_from, dir_name_to, false));
+}
+#endif  // defined(OS_POSIX)
+
+TEST_F(FileUtilTest, CopyFile) {
+  // Create a directory
+  FilePath dir_name_from =
+      temp_dir_.GetPath().Append(FILE_PATH_LITERAL("Copy_From_Subdir"));
+  ASSERT_TRUE(CreateDirectory(dir_name_from));
+  ASSERT_TRUE(DirectoryExists(dir_name_from));
+
+  // Create a file under the directory
+  FilePath file_name_from =
+      dir_name_from.Append(FILE_PATH_LITERAL("Copy_Test_File.txt"));
+  const std::wstring file_contents(L"Gooooooooooooooooooooogle");
+  CreateTextFile(file_name_from, file_contents);
+  ASSERT_TRUE(PathExists(file_name_from));
+
+  // Copy the file.
+  FilePath dest_file = dir_name_from.Append(FILE_PATH_LITERAL("DestFile.txt"));
+  ASSERT_TRUE(CopyFile(file_name_from, dest_file));
+
+  // Try to copy the file to another location using '..' in the path.
+  FilePath dest_file2(dir_name_from);
+  dest_file2 = dest_file2.AppendASCII("..");
+  dest_file2 = dest_file2.AppendASCII("DestFile.txt");
+  ASSERT_FALSE(CopyFile(file_name_from, dest_file2));
+
+  FilePath dest_file2_test(dir_name_from);
+  dest_file2_test = dest_file2_test.DirName();
+  dest_file2_test = dest_file2_test.AppendASCII("DestFile.txt");
+
+  // Check expected copy results.
+  EXPECT_TRUE(PathExists(file_name_from));
+  EXPECT_TRUE(PathExists(dest_file));
+  EXPECT_EQ(file_contents, ReadTextFile(dest_file));
+  EXPECT_FALSE(PathExists(dest_file2_test));
+  EXPECT_FALSE(PathExists(dest_file2));
+
+  // Change |file_name_from| contents.
+  const std::wstring new_file_contents(L"Moogle");
+  CreateTextFile(file_name_from, new_file_contents);
+  ASSERT_TRUE(PathExists(file_name_from));
+  EXPECT_EQ(new_file_contents, ReadTextFile(file_name_from));
+
+  // Overwrite |dest_file|.
+  ASSERT_TRUE(CopyFile(file_name_from, dest_file));
+  EXPECT_TRUE(PathExists(dest_file));
+  EXPECT_EQ(new_file_contents, ReadTextFile(dest_file));
+
+  // Create another directory.
+  FilePath dest_dir = temp_dir_.GetPath().Append(FPL("dest_dir"));
+  ASSERT_TRUE(CreateDirectory(dest_dir));
+  EXPECT_TRUE(DirectoryExists(dest_dir));
+  EXPECT_TRUE(IsDirectoryEmpty(dest_dir));
+
+  // Make sure CopyFile() cannot overwrite a directory.
+  ASSERT_FALSE(CopyFile(file_name_from, dest_dir));
+  EXPECT_TRUE(DirectoryExists(dest_dir));
+  EXPECT_TRUE(IsDirectoryEmpty(dest_dir));
+}
+
+// file_util winds up using autoreleased objects on the Mac, so this needs
+// to be a PlatformTest.
+typedef PlatformTest ReadOnlyFileUtilTest;
+
+TEST_F(ReadOnlyFileUtilTest, ContentsEqual) {
+  FilePath data_dir;
+  ASSERT_TRUE(PathService::Get(DIR_TEST_DATA, &data_dir));
+  data_dir = data_dir.AppendASCII("file_util");
+  ASSERT_TRUE(PathExists(data_dir));
+
+  FilePath original_file =
+      data_dir.Append(FILE_PATH_LITERAL("original.txt"));
+  FilePath same_file =
+      data_dir.Append(FILE_PATH_LITERAL("same.txt"));
+  FilePath same_length_file =
+      data_dir.Append(FILE_PATH_LITERAL("same_length.txt"));
+  FilePath different_file =
+      data_dir.Append(FILE_PATH_LITERAL("different.txt"));
+  FilePath different_first_file =
+      data_dir.Append(FILE_PATH_LITERAL("different_first.txt"));
+  FilePath different_last_file =
+      data_dir.Append(FILE_PATH_LITERAL("different_last.txt"));
+  FilePath empty1_file =
+      data_dir.Append(FILE_PATH_LITERAL("empty1.txt"));
+  FilePath empty2_file =
+      data_dir.Append(FILE_PATH_LITERAL("empty2.txt"));
+  FilePath shortened_file =
+      data_dir.Append(FILE_PATH_LITERAL("shortened.txt"));
+  FilePath binary_file =
+      data_dir.Append(FILE_PATH_LITERAL("binary_file.bin"));
+  FilePath binary_file_same =
+      data_dir.Append(FILE_PATH_LITERAL("binary_file_same.bin"));
+  FilePath binary_file_diff =
+      data_dir.Append(FILE_PATH_LITERAL("binary_file_diff.bin"));
+
+  EXPECT_TRUE(ContentsEqual(original_file, original_file));
+  EXPECT_TRUE(ContentsEqual(original_file, same_file));
+  EXPECT_FALSE(ContentsEqual(original_file, same_length_file));
+  EXPECT_FALSE(ContentsEqual(original_file, different_file));
+  EXPECT_FALSE(ContentsEqual(FilePath(FILE_PATH_LITERAL("bogusname")),
+                             FilePath(FILE_PATH_LITERAL("bogusname"))));
+  EXPECT_FALSE(ContentsEqual(original_file, different_first_file));
+  EXPECT_FALSE(ContentsEqual(original_file, different_last_file));
+  EXPECT_TRUE(ContentsEqual(empty1_file, empty2_file));
+  EXPECT_FALSE(ContentsEqual(original_file, shortened_file));
+  EXPECT_FALSE(ContentsEqual(shortened_file, original_file));
+  EXPECT_TRUE(ContentsEqual(binary_file, binary_file_same));
+  EXPECT_FALSE(ContentsEqual(binary_file, binary_file_diff));
+}
+
+TEST_F(ReadOnlyFileUtilTest, TextContentsEqual) {
+  FilePath data_dir;
+  ASSERT_TRUE(PathService::Get(DIR_TEST_DATA, &data_dir));
+  data_dir = data_dir.AppendASCII("file_util");
+  ASSERT_TRUE(PathExists(data_dir));
+
+  FilePath original_file =
+      data_dir.Append(FILE_PATH_LITERAL("original.txt"));
+  FilePath same_file =
+      data_dir.Append(FILE_PATH_LITERAL("same.txt"));
+  FilePath crlf_file =
+      data_dir.Append(FILE_PATH_LITERAL("crlf.txt"));
+  FilePath shortened_file =
+      data_dir.Append(FILE_PATH_LITERAL("shortened.txt"));
+  FilePath different_file =
+      data_dir.Append(FILE_PATH_LITERAL("different.txt"));
+  FilePath different_first_file =
+      data_dir.Append(FILE_PATH_LITERAL("different_first.txt"));
+  FilePath different_last_file =
+      data_dir.Append(FILE_PATH_LITERAL("different_last.txt"));
+  FilePath first1_file =
+      data_dir.Append(FILE_PATH_LITERAL("first1.txt"));
+  FilePath first2_file =
+      data_dir.Append(FILE_PATH_LITERAL("first2.txt"));
+  FilePath empty1_file =
+      data_dir.Append(FILE_PATH_LITERAL("empty1.txt"));
+  FilePath empty2_file =
+      data_dir.Append(FILE_PATH_LITERAL("empty2.txt"));
+  FilePath blank_line_file =
+      data_dir.Append(FILE_PATH_LITERAL("blank_line.txt"));
+  FilePath blank_line_crlf_file =
+      data_dir.Append(FILE_PATH_LITERAL("blank_line_crlf.txt"));
+
+  EXPECT_TRUE(TextContentsEqual(original_file, same_file));
+  EXPECT_TRUE(TextContentsEqual(original_file, crlf_file));
+  EXPECT_FALSE(TextContentsEqual(original_file, shortened_file));
+  EXPECT_FALSE(TextContentsEqual(original_file, different_file));
+  EXPECT_FALSE(TextContentsEqual(original_file, different_first_file));
+  EXPECT_FALSE(TextContentsEqual(original_file, different_last_file));
+  EXPECT_FALSE(TextContentsEqual(first1_file, first2_file));
+  EXPECT_TRUE(TextContentsEqual(empty1_file, empty2_file));
+  EXPECT_FALSE(TextContentsEqual(original_file, empty1_file));
+  EXPECT_TRUE(TextContentsEqual(blank_line_file, blank_line_crlf_file));
+}
+
+// We don't need equivalent functionality outside of Windows.
+#if defined(OS_WIN)
+TEST_F(FileUtilTest, CopyAndDeleteDirectoryTest) {
+  // Create a directory
+  FilePath dir_name_from = temp_dir_.GetPath().Append(
+      FILE_PATH_LITERAL("CopyAndDelete_From_Subdir"));
+  CreateDirectory(dir_name_from);
+  ASSERT_TRUE(PathExists(dir_name_from));
+
+  // Create a file under the directory
+  FilePath file_name_from =
+      dir_name_from.Append(FILE_PATH_LITERAL("CopyAndDelete_Test_File.txt"));
+  CreateTextFile(file_name_from, L"Gooooooooooooooooooooogle");
+  ASSERT_TRUE(PathExists(file_name_from));
+
+  // Move the directory by using CopyAndDeleteDirectory
+  FilePath dir_name_to =
+      temp_dir_.GetPath().Append(FILE_PATH_LITERAL("CopyAndDelete_To_Subdir"));
+  FilePath file_name_to =
+      dir_name_to.Append(FILE_PATH_LITERAL("CopyAndDelete_Test_File.txt"));
+
+  ASSERT_FALSE(PathExists(dir_name_to));
+
+  EXPECT_TRUE(internal::CopyAndDeleteDirectory(dir_name_from,
+                                                     dir_name_to));
+
+  // Check everything has been moved.
+  EXPECT_FALSE(PathExists(dir_name_from));
+  EXPECT_FALSE(PathExists(file_name_from));
+  EXPECT_TRUE(PathExists(dir_name_to));
+  EXPECT_TRUE(PathExists(file_name_to));
+}
+
+TEST_F(FileUtilTest, GetTempDirTest) {
+  static const TCHAR* kTmpKey = _T("TMP");
+  static const TCHAR* kTmpValues[] = {
+    _T(""), _T("C:"), _T("C:\\"), _T("C:\\tmp"), _T("C:\\tmp\\")
+  };
+  // Save the original $TMP.
+  size_t original_tmp_size;
+  TCHAR* original_tmp;
+  ASSERT_EQ(0, ::_tdupenv_s(&original_tmp, &original_tmp_size, kTmpKey));
+  // original_tmp may be NULL.
+
+  for (unsigned int i = 0; i < arraysize(kTmpValues); ++i) {
+    FilePath path;
+    ::_tputenv_s(kTmpKey, kTmpValues[i]);
+    GetTempDir(&path);
+    EXPECT_TRUE(path.IsAbsolute()) << "$TMP=" << kTmpValues[i] <<
+        " result=" << path.value();
+  }
+
+  // Restore the original $TMP.
+  if (original_tmp) {
+    ::_tputenv_s(kTmpKey, original_tmp);
+    free(original_tmp);
+  } else {
+    ::_tputenv_s(kTmpKey, _T(""));
+  }
+}
+#endif  // OS_WIN
+
+// Test that files opened by OpenFile are not set up for inheritance into child
+// procs.
+TEST_F(FileUtilTest, OpenFileNoInheritance) {
+  FilePath file_path(temp_dir_.GetPath().Append(FPL("a_file")));
+
+  for (const char* mode : {"wb", "r,ccs=UTF-8"}) {
+    SCOPED_TRACE(mode);
+    ASSERT_NO_FATAL_FAILURE(CreateTextFile(file_path, L"Geepers"));
+    FILE* file = OpenFile(file_path, mode);
+    ASSERT_NE(nullptr, file);
+    {
+      ScopedClosureRunner file_closer(Bind(IgnoreResult(&CloseFile), file));
+      bool is_inheritable = true;
+      ASSERT_NO_FATAL_FAILURE(GetIsInheritable(file, &is_inheritable));
+      EXPECT_FALSE(is_inheritable);
+    }
+    ASSERT_TRUE(DeleteFile(file_path, false));
+  }
+}
+
+TEST_F(FileUtilTest, CreateTemporaryFileTest) {
+  FilePath temp_files[3];
+  for (int i = 0; i < 3; i++) {
+    ASSERT_TRUE(CreateTemporaryFile(&(temp_files[i])));
+    EXPECT_TRUE(PathExists(temp_files[i]));
+    EXPECT_FALSE(DirectoryExists(temp_files[i]));
+  }
+  for (int i = 0; i < 3; i++)
+    EXPECT_FALSE(temp_files[i] == temp_files[(i+1)%3]);
+  for (int i = 0; i < 3; i++)
+    EXPECT_TRUE(DeleteFile(temp_files[i], false));
+}
+
+TEST_F(FileUtilTest, CreateAndOpenTemporaryFileTest) {
+  FilePath names[3];
+  FILE* fps[3];
+  int i;
+
+  // Create; make sure they are open and exist.
+  for (i = 0; i < 3; ++i) {
+    fps[i] = CreateAndOpenTemporaryFile(&(names[i]));
+    ASSERT_TRUE(fps[i]);
+    EXPECT_TRUE(PathExists(names[i]));
+  }
+
+  // Make sure all names are unique.
+  for (i = 0; i < 3; ++i) {
+    EXPECT_FALSE(names[i] == names[(i+1)%3]);
+  }
+
+  // Close and delete.
+  for (i = 0; i < 3; ++i) {
+    EXPECT_TRUE(CloseFile(fps[i]));
+    EXPECT_TRUE(DeleteFile(names[i], false));
+  }
+}
+
+TEST_F(FileUtilTest, FileToFILE) {
+  File file;
+  FILE* stream = FileToFILE(std::move(file), "w");
+  EXPECT_FALSE(stream);
+
+  FilePath file_name = temp_dir_.GetPath().Append(FPL("The file.txt"));
+  file = File(file_name, File::FLAG_CREATE | File::FLAG_WRITE);
+  EXPECT_TRUE(file.IsValid());
+
+  stream = FileToFILE(std::move(file), "w");
+  EXPECT_TRUE(stream);
+  EXPECT_FALSE(file.IsValid());
+  EXPECT_TRUE(CloseFile(stream));
+}
+
+TEST_F(FileUtilTest, CreateNewTempDirectoryTest) {
+  FilePath temp_dir;
+  ASSERT_TRUE(CreateNewTempDirectory(FilePath::StringType(), &temp_dir));
+  EXPECT_TRUE(PathExists(temp_dir));
+  EXPECT_TRUE(DeleteFile(temp_dir, false));
+}
+
+TEST_F(FileUtilTest, CreateNewTemporaryDirInDirTest) {
+  FilePath new_dir;
+  ASSERT_TRUE(CreateTemporaryDirInDir(
+      temp_dir_.GetPath(), FILE_PATH_LITERAL("CreateNewTemporaryDirInDirTest"),
+      &new_dir));
+  EXPECT_TRUE(PathExists(new_dir));
+  EXPECT_TRUE(temp_dir_.GetPath().IsParent(new_dir));
+  EXPECT_TRUE(DeleteFile(new_dir, false));
+}
+
+#if defined(OS_POSIX) || defined(OS_FUCHSIA)
+TEST_F(FileUtilTest, GetShmemTempDirTest) {
+  FilePath dir;
+  EXPECT_TRUE(GetShmemTempDir(false, &dir));
+  EXPECT_TRUE(DirectoryExists(dir));
+}
+#endif
+
+TEST_F(FileUtilTest, GetHomeDirTest) {
+#if !defined(OS_ANDROID)  // Not implemented on Android.
+  // We don't actually know what the home directory is supposed to be without
+  // calling some OS functions which would just duplicate the implementation.
+  // So here we just test that it returns something "reasonable".
+  FilePath home = GetHomeDir();
+  ASSERT_FALSE(home.empty());
+  ASSERT_TRUE(home.IsAbsolute());
+#endif
+}
+
+TEST_F(FileUtilTest, CreateDirectoryTest) {
+  FilePath test_root =
+      temp_dir_.GetPath().Append(FILE_PATH_LITERAL("create_directory_test"));
+#if defined(OS_WIN)
+  FilePath test_path =
+      test_root.Append(FILE_PATH_LITERAL("dir\\tree\\likely\\doesnt\\exist\\"));
+#elif defined(OS_POSIX) || defined(OS_FUCHSIA)
+  FilePath test_path =
+      test_root.Append(FILE_PATH_LITERAL("dir/tree/likely/doesnt/exist/"));
+#endif
+
+  EXPECT_FALSE(PathExists(test_path));
+  EXPECT_TRUE(CreateDirectory(test_path));
+  EXPECT_TRUE(PathExists(test_path));
+  // CreateDirectory returns true if the DirectoryExists returns true.
+  EXPECT_TRUE(CreateDirectory(test_path));
+
+  // Doesn't work to create it on top of a non-dir
+  test_path = test_path.Append(FILE_PATH_LITERAL("foobar.txt"));
+  EXPECT_FALSE(PathExists(test_path));
+  CreateTextFile(test_path, L"test file");
+  EXPECT_TRUE(PathExists(test_path));
+  EXPECT_FALSE(CreateDirectory(test_path));
+
+  EXPECT_TRUE(DeleteFile(test_root, true));
+  EXPECT_FALSE(PathExists(test_root));
+  EXPECT_FALSE(PathExists(test_path));
+
+  // Verify assumptions made by the Windows implementation:
+  // 1. The current directory always exists.
+  // 2. The root directory always exists.
+  ASSERT_TRUE(DirectoryExists(FilePath(FilePath::kCurrentDirectory)));
+  FilePath top_level = test_root;
+  while (top_level != top_level.DirName()) {
+    top_level = top_level.DirName();
+  }
+  ASSERT_TRUE(DirectoryExists(top_level));
+
+  // Given these assumptions hold, it should be safe to
+  // test that "creating" these directories succeeds.
+  EXPECT_TRUE(CreateDirectory(
+      FilePath(FilePath::kCurrentDirectory)));
+  EXPECT_TRUE(CreateDirectory(top_level));
+
+#if defined(OS_WIN)
+  FilePath invalid_drive(FILE_PATH_LITERAL("o:\\"));
+  FilePath invalid_path =
+      invalid_drive.Append(FILE_PATH_LITERAL("some\\inaccessible\\dir"));
+  if (!PathExists(invalid_drive)) {
+    EXPECT_FALSE(CreateDirectory(invalid_path));
+  }
+#endif
+}
+
+TEST_F(FileUtilTest, DetectDirectoryTest) {
+  // Check a directory
+  FilePath test_root =
+      temp_dir_.GetPath().Append(FILE_PATH_LITERAL("detect_directory_test"));
+  EXPECT_FALSE(PathExists(test_root));
+  EXPECT_TRUE(CreateDirectory(test_root));
+  EXPECT_TRUE(PathExists(test_root));
+  EXPECT_TRUE(DirectoryExists(test_root));
+  // Check a file
+  FilePath test_path =
+      test_root.Append(FILE_PATH_LITERAL("foobar.txt"));
+  EXPECT_FALSE(PathExists(test_path));
+  CreateTextFile(test_path, L"test file");
+  EXPECT_TRUE(PathExists(test_path));
+  EXPECT_FALSE(DirectoryExists(test_path));
+  EXPECT_TRUE(DeleteFile(test_path, false));
+
+  EXPECT_TRUE(DeleteFile(test_root, true));
+}
+
+TEST_F(FileUtilTest, FileEnumeratorTest) {
+  // Test an empty directory.
+  FileEnumerator f0(temp_dir_.GetPath(), true, FILES_AND_DIRECTORIES);
+  EXPECT_EQ(FPL(""), f0.Next().value());
+  EXPECT_EQ(FPL(""), f0.Next().value());
+
+  // Test an empty directory, non-recursively, including "..".
+  FileEnumerator f0_dotdot(
+      temp_dir_.GetPath(), false,
+      FILES_AND_DIRECTORIES | FileEnumerator::INCLUDE_DOT_DOT);
+  EXPECT_EQ(temp_dir_.GetPath().Append(FPL("..")).value(),
+            f0_dotdot.Next().value());
+  EXPECT_EQ(FPL(""), f0_dotdot.Next().value());
+
+  // create the directories
+  FilePath dir1 = temp_dir_.GetPath().Append(FPL("dir1"));
+  EXPECT_TRUE(CreateDirectory(dir1));
+  FilePath dir2 = temp_dir_.GetPath().Append(FPL("dir2"));
+  EXPECT_TRUE(CreateDirectory(dir2));
+  FilePath dir2inner = dir2.Append(FPL("inner"));
+  EXPECT_TRUE(CreateDirectory(dir2inner));
+
+  // create the files
+  FilePath dir2file = dir2.Append(FPL("dir2file.txt"));
+  CreateTextFile(dir2file, std::wstring());
+  FilePath dir2innerfile = dir2inner.Append(FPL("innerfile.txt"));
+  CreateTextFile(dir2innerfile, std::wstring());
+  FilePath file1 = temp_dir_.GetPath().Append(FPL("file1.txt"));
+  CreateTextFile(file1, std::wstring());
+  FilePath file2_rel = dir2.Append(FilePath::kParentDirectory)
+      .Append(FPL("file2.txt"));
+  CreateTextFile(file2_rel, std::wstring());
+  FilePath file2_abs = temp_dir_.GetPath().Append(FPL("file2.txt"));
+
+  // Only enumerate files.
+  FileEnumerator f1(temp_dir_.GetPath(), true, FileEnumerator::FILES);
+  FindResultCollector c1(&f1);
+  EXPECT_TRUE(c1.HasFile(file1));
+  EXPECT_TRUE(c1.HasFile(file2_abs));
+  EXPECT_TRUE(c1.HasFile(dir2file));
+  EXPECT_TRUE(c1.HasFile(dir2innerfile));
+  EXPECT_EQ(4, c1.size());
+
+  // Only enumerate directories.
+  FileEnumerator f2(temp_dir_.GetPath(), true, FileEnumerator::DIRECTORIES);
+  FindResultCollector c2(&f2);
+  EXPECT_TRUE(c2.HasFile(dir1));
+  EXPECT_TRUE(c2.HasFile(dir2));
+  EXPECT_TRUE(c2.HasFile(dir2inner));
+  EXPECT_EQ(3, c2.size());
+
+  // Only enumerate directories non-recursively.
+  FileEnumerator f2_non_recursive(temp_dir_.GetPath(), false,
+                                  FileEnumerator::DIRECTORIES);
+  FindResultCollector c2_non_recursive(&f2_non_recursive);
+  EXPECT_TRUE(c2_non_recursive.HasFile(dir1));
+  EXPECT_TRUE(c2_non_recursive.HasFile(dir2));
+  EXPECT_EQ(2, c2_non_recursive.size());
+
+  // Only enumerate directories, non-recursively, including "..".
+  FileEnumerator f2_dotdot(
+      temp_dir_.GetPath(), false,
+      FileEnumerator::DIRECTORIES | FileEnumerator::INCLUDE_DOT_DOT);
+  FindResultCollector c2_dotdot(&f2_dotdot);
+  EXPECT_TRUE(c2_dotdot.HasFile(dir1));
+  EXPECT_TRUE(c2_dotdot.HasFile(dir2));
+  EXPECT_TRUE(c2_dotdot.HasFile(temp_dir_.GetPath().Append(FPL(".."))));
+  EXPECT_EQ(3, c2_dotdot.size());
+
+  // Enumerate files and directories.
+  FileEnumerator f3(temp_dir_.GetPath(), true, FILES_AND_DIRECTORIES);
+  FindResultCollector c3(&f3);
+  EXPECT_TRUE(c3.HasFile(dir1));
+  EXPECT_TRUE(c3.HasFile(dir2));
+  EXPECT_TRUE(c3.HasFile(file1));
+  EXPECT_TRUE(c3.HasFile(file2_abs));
+  EXPECT_TRUE(c3.HasFile(dir2file));
+  EXPECT_TRUE(c3.HasFile(dir2inner));
+  EXPECT_TRUE(c3.HasFile(dir2innerfile));
+  EXPECT_EQ(7, c3.size());
+
+  // Non-recursive operation.
+  FileEnumerator f4(temp_dir_.GetPath(), false, FILES_AND_DIRECTORIES);
+  FindResultCollector c4(&f4);
+  EXPECT_TRUE(c4.HasFile(dir2));
+  EXPECT_TRUE(c4.HasFile(dir2));
+  EXPECT_TRUE(c4.HasFile(file1));
+  EXPECT_TRUE(c4.HasFile(file2_abs));
+  EXPECT_EQ(4, c4.size());
+
+  // Enumerate with a pattern.
+  FileEnumerator f5(temp_dir_.GetPath(), true, FILES_AND_DIRECTORIES,
+                    FPL("dir*"));
+  FindResultCollector c5(&f5);
+  EXPECT_TRUE(c5.HasFile(dir1));
+  EXPECT_TRUE(c5.HasFile(dir2));
+  EXPECT_TRUE(c5.HasFile(dir2file));
+  EXPECT_TRUE(c5.HasFile(dir2inner));
+  EXPECT_TRUE(c5.HasFile(dir2innerfile));
+  EXPECT_EQ(5, c5.size());
+
+#if defined(OS_WIN)
+  {
+    // Make dir1 point to dir2.
+    ReparsePoint reparse_point(dir1, dir2);
+    EXPECT_TRUE(reparse_point.IsValid());
+
+    // There can be a delay for the enumeration code to see the change on
+    // the file system so skip this test for XP.
+    // Enumerate the reparse point.
+    FileEnumerator f6(dir1, true, FILES_AND_DIRECTORIES);
+    FindResultCollector c6(&f6);
+    FilePath inner2 = dir1.Append(FPL("inner"));
+    EXPECT_TRUE(c6.HasFile(inner2));
+    EXPECT_TRUE(c6.HasFile(inner2.Append(FPL("innerfile.txt"))));
+    EXPECT_TRUE(c6.HasFile(dir1.Append(FPL("dir2file.txt"))));
+    EXPECT_EQ(3, c6.size());
+
+    // No changes for non recursive operation.
+    FileEnumerator f7(temp_dir_.GetPath(), false, FILES_AND_DIRECTORIES);
+    FindResultCollector c7(&f7);
+    EXPECT_TRUE(c7.HasFile(dir2));
+    EXPECT_TRUE(c7.HasFile(dir2));
+    EXPECT_TRUE(c7.HasFile(file1));
+    EXPECT_TRUE(c7.HasFile(file2_abs));
+    EXPECT_EQ(4, c7.size());
+
+    // Should not enumerate inside dir1 when using recursion.
+    FileEnumerator f8(temp_dir_.GetPath(), true, FILES_AND_DIRECTORIES);
+    FindResultCollector c8(&f8);
+    EXPECT_TRUE(c8.HasFile(dir1));
+    EXPECT_TRUE(c8.HasFile(dir2));
+    EXPECT_TRUE(c8.HasFile(file1));
+    EXPECT_TRUE(c8.HasFile(file2_abs));
+    EXPECT_TRUE(c8.HasFile(dir2file));
+    EXPECT_TRUE(c8.HasFile(dir2inner));
+    EXPECT_TRUE(c8.HasFile(dir2innerfile));
+    EXPECT_EQ(7, c8.size());
+  }
+#endif
+
+  // Make sure the destructor closes the find handle while in the middle of a
+  // query to allow TearDown to delete the directory.
+  FileEnumerator f9(temp_dir_.GetPath(), true, FILES_AND_DIRECTORIES);
+  EXPECT_FALSE(f9.Next().value().empty());  // Should have found something
+                                            // (we don't care what).
+}
+
+TEST_F(FileUtilTest, AppendToFile) {
+  FilePath data_dir =
+      temp_dir_.GetPath().Append(FILE_PATH_LITERAL("FilePathTest"));
+
+  // Create a fresh, empty copy of this directory.
+  if (PathExists(data_dir)) {
+    ASSERT_TRUE(DeleteFile(data_dir, true));
+  }
+  ASSERT_TRUE(CreateDirectory(data_dir));
+
+  // Create a fresh, empty copy of this directory.
+  if (PathExists(data_dir)) {
+    ASSERT_TRUE(DeleteFile(data_dir, true));
+  }
+  ASSERT_TRUE(CreateDirectory(data_dir));
+  FilePath foobar(data_dir.Append(FILE_PATH_LITERAL("foobar.txt")));
+
+  std::string data("hello");
+  EXPECT_FALSE(AppendToFile(foobar, data.c_str(), data.size()));
+  EXPECT_EQ(static_cast<int>(data.length()),
+            WriteFile(foobar, data.c_str(), data.length()));
+  EXPECT_TRUE(AppendToFile(foobar, data.c_str(), data.size()));
+
+  const std::wstring read_content = ReadTextFile(foobar);
+  EXPECT_EQ(L"hellohello", read_content);
+}
+
+TEST_F(FileUtilTest, ReadFile) {
+  // Create a test file to be read.
+  const std::string kTestData("The quick brown fox jumps over the lazy dog.");
+  FilePath file_path =
+      temp_dir_.GetPath().Append(FILE_PATH_LITERAL("ReadFileTest"));
+
+  ASSERT_EQ(static_cast<int>(kTestData.size()),
+            WriteFile(file_path, kTestData.data(), kTestData.size()));
+
+  // Make buffers with various size.
+  std::vector<char> small_buffer(kTestData.size() / 2);
+  std::vector<char> exact_buffer(kTestData.size());
+  std::vector<char> large_buffer(kTestData.size() * 2);
+
+  // Read the file with smaller buffer.
+  int bytes_read_small = ReadFile(
+      file_path, &small_buffer[0], static_cast<int>(small_buffer.size()));
+  EXPECT_EQ(static_cast<int>(small_buffer.size()), bytes_read_small);
+  EXPECT_EQ(
+      std::string(kTestData.begin(), kTestData.begin() + small_buffer.size()),
+      std::string(small_buffer.begin(), small_buffer.end()));
+
+  // Read the file with buffer which have exactly same size.
+  int bytes_read_exact = ReadFile(
+      file_path, &exact_buffer[0], static_cast<int>(exact_buffer.size()));
+  EXPECT_EQ(static_cast<int>(kTestData.size()), bytes_read_exact);
+  EXPECT_EQ(kTestData, std::string(exact_buffer.begin(), exact_buffer.end()));
+
+  // Read the file with larger buffer.
+  int bytes_read_large = ReadFile(
+      file_path, &large_buffer[0], static_cast<int>(large_buffer.size()));
+  EXPECT_EQ(static_cast<int>(kTestData.size()), bytes_read_large);
+  EXPECT_EQ(kTestData, std::string(large_buffer.begin(),
+                                   large_buffer.begin() + kTestData.size()));
+
+  // Make sure the return value is -1 if the file doesn't exist.
+  FilePath file_path_not_exist =
+      temp_dir_.GetPath().Append(FILE_PATH_LITERAL("ReadFileNotExistTest"));
+  EXPECT_EQ(-1,
+            ReadFile(file_path_not_exist,
+                     &exact_buffer[0],
+                     static_cast<int>(exact_buffer.size())));
+}
+
+TEST_F(FileUtilTest, ReadFileToString) {
+  const char kTestData[] = "0123";
+  std::string data;
+
+  FilePath file_path =
+      temp_dir_.GetPath().Append(FILE_PATH_LITERAL("ReadFileToStringTest"));
+  FilePath file_path_dangerous =
+      temp_dir_.GetPath()
+          .Append(FILE_PATH_LITERAL(".."))
+          .Append(temp_dir_.GetPath().BaseName())
+          .Append(FILE_PATH_LITERAL("ReadFileToStringTest"));
+
+  // Create test file.
+  ASSERT_EQ(static_cast<int>(strlen(kTestData)),
+            WriteFile(file_path, kTestData, strlen(kTestData)));
+
+  EXPECT_TRUE(ReadFileToString(file_path, &data));
+  EXPECT_EQ(kTestData, data);
+
+  data = "temp";
+  EXPECT_FALSE(ReadFileToStringWithMaxSize(file_path, &data, 0));
+  EXPECT_EQ(0u, data.length());
+
+  data = "temp";
+  EXPECT_FALSE(ReadFileToStringWithMaxSize(file_path, &data, 2));
+  EXPECT_EQ("01", data);
+
+  data = "temp";
+  EXPECT_FALSE(ReadFileToStringWithMaxSize(file_path, &data, 3));
+  EXPECT_EQ("012", data);
+
+  data = "temp";
+  EXPECT_TRUE(ReadFileToStringWithMaxSize(file_path, &data, 4));
+  EXPECT_EQ("0123", data);
+
+  data = "temp";
+  EXPECT_TRUE(ReadFileToStringWithMaxSize(file_path, &data, 6));
+  EXPECT_EQ("0123", data);
+
+  EXPECT_TRUE(ReadFileToStringWithMaxSize(file_path, nullptr, 6));
+
+  EXPECT_TRUE(ReadFileToString(file_path, nullptr));
+
+  data = "temp";
+  EXPECT_FALSE(ReadFileToString(file_path_dangerous, &data));
+  EXPECT_EQ(0u, data.length());
+
+  // Delete test file.
+  EXPECT_TRUE(DeleteFile(file_path, false));
+
+  data = "temp";
+  EXPECT_FALSE(ReadFileToString(file_path, &data));
+  EXPECT_EQ(0u, data.length());
+
+  data = "temp";
+  EXPECT_FALSE(ReadFileToStringWithMaxSize(file_path, &data, 6));
+  EXPECT_EQ(0u, data.length());
+}
+
+#if !defined(OS_WIN)
+TEST_F(FileUtilTest, ReadFileToStringWithUnknownFileSize) {
+  FilePath file_path("/dev/zero");
+  std::string data = "temp";
+
+  EXPECT_FALSE(ReadFileToStringWithMaxSize(file_path, &data, 0));
+  EXPECT_EQ(0u, data.length());
+
+  data = "temp";
+  EXPECT_FALSE(ReadFileToStringWithMaxSize(file_path, &data, 2));
+  EXPECT_EQ(std::string(2, '\0'), data);
+
+  EXPECT_FALSE(ReadFileToStringWithMaxSize(file_path, nullptr, 6));
+
+  // Read more than buffer size.
+  data = "temp";
+  EXPECT_FALSE(ReadFileToStringWithMaxSize(file_path, &data, kLargeFileSize));
+  EXPECT_EQ(kLargeFileSize, data.length());
+  EXPECT_EQ(std::string(kLargeFileSize, '\0'), data);
+
+  EXPECT_FALSE(ReadFileToStringWithMaxSize(file_path, nullptr, kLargeFileSize));
+}
+#endif  // !defined(OS_WIN)
+
+#if !defined(OS_WIN) && !defined(OS_NACL) && !defined(OS_FUCHSIA) && \
+    !defined(OS_IOS)
+#define ChildMain WriteToPipeChildMain
+#define ChildMainString "WriteToPipeChildMain"
+
+MULTIPROCESS_TEST_MAIN(ChildMain) {
+  const char kTestData[] = "0123";
+  base::CommandLine* command_line = base::CommandLine::ForCurrentProcess();
+  const FilePath pipe_path = command_line->GetSwitchValuePath("pipe-path");
+
+  int fd = open(pipe_path.value().c_str(), O_WRONLY);
+  CHECK_NE(-1, fd);
+  size_t written = 0;
+  while (written < strlen(kTestData)) {
+    ssize_t res = write(fd, kTestData + written, strlen(kTestData) - written);
+    if (res == -1)
+      break;
+    written += res;
+  }
+  CHECK_EQ(strlen(kTestData), written);
+  CHECK_EQ(0, close(fd));
+  return 0;
+}
+
+#define MoreThanBufferSizeChildMain WriteToPipeMoreThanBufferSizeChildMain
+#define MoreThanBufferSizeChildMainString \
+  "WriteToPipeMoreThanBufferSizeChildMain"
+
+MULTIPROCESS_TEST_MAIN(MoreThanBufferSizeChildMain) {
+  std::string data(kLargeFileSize, 'c');
+  base::CommandLine* command_line = base::CommandLine::ForCurrentProcess();
+  const FilePath pipe_path = command_line->GetSwitchValuePath("pipe-path");
+
+  int fd = open(pipe_path.value().c_str(), O_WRONLY);
+  CHECK_NE(-1, fd);
+
+  size_t written = 0;
+  while (written < data.size()) {
+    ssize_t res = write(fd, data.c_str() + written, data.size() - written);
+    if (res == -1) {
+      // We are unable to write because reading process has already read
+      // requested number of bytes and closed pipe.
+      break;
+    }
+    written += res;
+  }
+  CHECK_EQ(0, close(fd));
+  return 0;
+}
+
+TEST_F(FileUtilTest, ReadFileToStringWithNamedPipe) {
+  FilePath pipe_path =
+      temp_dir_.GetPath().Append(FILE_PATH_LITERAL("test_pipe"));
+  ASSERT_EQ(0, mkfifo(pipe_path.value().c_str(), 0600));
+
+  base::CommandLine child_command_line(
+      base::GetMultiProcessTestChildBaseCommandLine());
+  child_command_line.AppendSwitchPath("pipe-path", pipe_path);
+
+  {
+    base::Process child_process = base::SpawnMultiProcessTestChild(
+        ChildMainString, child_command_line, base::LaunchOptions());
+    ASSERT_TRUE(child_process.IsValid());
+
+    std::string data = "temp";
+    EXPECT_FALSE(ReadFileToStringWithMaxSize(pipe_path, &data, 2));
+    EXPECT_EQ("01", data);
+
+    int rv = -1;
+    ASSERT_TRUE(WaitForMultiprocessTestChildExit(
+        child_process, TestTimeouts::action_timeout(), &rv));
+    ASSERT_EQ(0, rv);
+  }
+  {
+    base::Process child_process = base::SpawnMultiProcessTestChild(
+        ChildMainString, child_command_line, base::LaunchOptions());
+    ASSERT_TRUE(child_process.IsValid());
+
+    std::string data = "temp";
+    EXPECT_TRUE(ReadFileToStringWithMaxSize(pipe_path, &data, 6));
+    EXPECT_EQ("0123", data);
+
+    int rv = -1;
+    ASSERT_TRUE(WaitForMultiprocessTestChildExit(
+        child_process, TestTimeouts::action_timeout(), &rv));
+    ASSERT_EQ(0, rv);
+  }
+  {
+    base::Process child_process = base::SpawnMultiProcessTestChild(
+        MoreThanBufferSizeChildMainString, child_command_line,
+        base::LaunchOptions());
+    ASSERT_TRUE(child_process.IsValid());
+
+    std::string data = "temp";
+    EXPECT_FALSE(ReadFileToStringWithMaxSize(pipe_path, &data, 6));
+    EXPECT_EQ("cccccc", data);
+
+    int rv = -1;
+    ASSERT_TRUE(WaitForMultiprocessTestChildExit(
+        child_process, TestTimeouts::action_timeout(), &rv));
+    ASSERT_EQ(0, rv);
+  }
+  {
+    base::Process child_process = base::SpawnMultiProcessTestChild(
+        MoreThanBufferSizeChildMainString, child_command_line,
+        base::LaunchOptions());
+    ASSERT_TRUE(child_process.IsValid());
+
+    std::string data = "temp";
+    EXPECT_FALSE(
+        ReadFileToStringWithMaxSize(pipe_path, &data, kLargeFileSize - 1));
+    EXPECT_EQ(std::string(kLargeFileSize - 1, 'c'), data);
+
+    int rv = -1;
+    ASSERT_TRUE(WaitForMultiprocessTestChildExit(
+        child_process, TestTimeouts::action_timeout(), &rv));
+    ASSERT_EQ(0, rv);
+  }
+  {
+    base::Process child_process = base::SpawnMultiProcessTestChild(
+        MoreThanBufferSizeChildMainString, child_command_line,
+        base::LaunchOptions());
+    ASSERT_TRUE(child_process.IsValid());
+
+    std::string data = "temp";
+    EXPECT_TRUE(ReadFileToStringWithMaxSize(pipe_path, &data, kLargeFileSize));
+    EXPECT_EQ(std::string(kLargeFileSize, 'c'), data);
+
+    int rv = -1;
+    ASSERT_TRUE(WaitForMultiprocessTestChildExit(
+        child_process, TestTimeouts::action_timeout(), &rv));
+    ASSERT_EQ(0, rv);
+  }
+  {
+    base::Process child_process = base::SpawnMultiProcessTestChild(
+        MoreThanBufferSizeChildMainString, child_command_line,
+        base::LaunchOptions());
+    ASSERT_TRUE(child_process.IsValid());
+
+    std::string data = "temp";
+    EXPECT_TRUE(
+        ReadFileToStringWithMaxSize(pipe_path, &data, kLargeFileSize * 5));
+    EXPECT_EQ(std::string(kLargeFileSize, 'c'), data);
+
+    int rv = -1;
+    ASSERT_TRUE(WaitForMultiprocessTestChildExit(
+        child_process, TestTimeouts::action_timeout(), &rv));
+    ASSERT_EQ(0, rv);
+  }
+
+  ASSERT_EQ(0, unlink(pipe_path.value().c_str()));
+}
+#endif  // !defined(OS_WIN) && !defined(OS_NACL) && !defined(OS_FUCHSIA) &&
+        // !defined(OS_IOS)
+
+#if defined(OS_WIN)
+#define ChildMain WriteToPipeChildMain
+#define ChildMainString "WriteToPipeChildMain"
+
+MULTIPROCESS_TEST_MAIN(ChildMain) {
+  const char kTestData[] = "0123";
+  base::CommandLine* command_line = base::CommandLine::ForCurrentProcess();
+  const FilePath pipe_path = command_line->GetSwitchValuePath("pipe-path");
+  std::string switch_string = command_line->GetSwitchValueASCII("sync_event");
+  EXPECT_FALSE(switch_string.empty());
+  unsigned int switch_uint = 0;
+  EXPECT_TRUE(StringToUint(switch_string, &switch_uint));
+  win::ScopedHandle sync_event(win::Uint32ToHandle(switch_uint));
+
+  HANDLE ph = CreateNamedPipe(pipe_path.value().c_str(), PIPE_ACCESS_OUTBOUND,
+                              PIPE_WAIT, 1, 0, 0, 0, NULL);
+  EXPECT_NE(ph, INVALID_HANDLE_VALUE);
+  EXPECT_TRUE(SetEvent(sync_event.Get()));
+  EXPECT_TRUE(ConnectNamedPipe(ph, NULL));
+
+  DWORD written;
+  EXPECT_TRUE(::WriteFile(ph, kTestData, strlen(kTestData), &written, NULL));
+  EXPECT_EQ(strlen(kTestData), written);
+  CloseHandle(ph);
+  return 0;
+}
+
+#define MoreThanBufferSizeChildMain WriteToPipeMoreThanBufferSizeChildMain
+#define MoreThanBufferSizeChildMainString \
+  "WriteToPipeMoreThanBufferSizeChildMain"
+
+MULTIPROCESS_TEST_MAIN(MoreThanBufferSizeChildMain) {
+  std::string data(kLargeFileSize, 'c');
+  base::CommandLine* command_line = base::CommandLine::ForCurrentProcess();
+  const FilePath pipe_path = command_line->GetSwitchValuePath("pipe-path");
+  std::string switch_string = command_line->GetSwitchValueASCII("sync_event");
+  EXPECT_FALSE(switch_string.empty());
+  unsigned int switch_uint = 0;
+  EXPECT_TRUE(StringToUint(switch_string, &switch_uint));
+  win::ScopedHandle sync_event(win::Uint32ToHandle(switch_uint));
+
+  HANDLE ph = CreateNamedPipe(pipe_path.value().c_str(), PIPE_ACCESS_OUTBOUND,
+                              PIPE_WAIT, 1, data.size(), data.size(), 0, NULL);
+  EXPECT_NE(ph, INVALID_HANDLE_VALUE);
+  EXPECT_TRUE(SetEvent(sync_event.Get()));
+  EXPECT_TRUE(ConnectNamedPipe(ph, NULL));
+
+  DWORD written;
+  EXPECT_TRUE(::WriteFile(ph, data.c_str(), data.size(), &written, NULL));
+  EXPECT_EQ(data.size(), written);
+  CloseHandle(ph);
+  return 0;
+}
+
+TEST_F(FileUtilTest, ReadFileToStringWithNamedPipe) {
+  FilePath pipe_path(FILE_PATH_LITERAL("\\\\.\\pipe\\test_pipe"));
+  win::ScopedHandle sync_event(CreateEvent(0, false, false, nullptr));
+
+  base::CommandLine child_command_line(
+      base::GetMultiProcessTestChildBaseCommandLine());
+  child_command_line.AppendSwitchPath("pipe-path", pipe_path);
+  child_command_line.AppendSwitchASCII(
+      "sync_event", UintToString(win::HandleToUint32(sync_event.Get())));
+
+  base::LaunchOptions options;
+  options.handles_to_inherit.push_back(sync_event.Get());
+
+  {
+    base::Process child_process = base::SpawnMultiProcessTestChild(
+        ChildMainString, child_command_line, options);
+    ASSERT_TRUE(child_process.IsValid());
+    // Wait for pipe creation in child process.
+    EXPECT_EQ(WAIT_OBJECT_0, WaitForSingleObject(sync_event.Get(), INFINITE));
+
+    std::string data = "temp";
+    EXPECT_FALSE(ReadFileToStringWithMaxSize(pipe_path, &data, 2));
+    EXPECT_EQ("01", data);
+
+    int rv = -1;
+    ASSERT_TRUE(WaitForMultiprocessTestChildExit(
+        child_process, TestTimeouts::action_timeout(), &rv));
+    ASSERT_EQ(0, rv);
+  }
+  {
+    base::Process child_process = base::SpawnMultiProcessTestChild(
+        ChildMainString, child_command_line, options);
+    ASSERT_TRUE(child_process.IsValid());
+    // Wait for pipe creation in child process.
+    EXPECT_EQ(WAIT_OBJECT_0, WaitForSingleObject(sync_event.Get(), INFINITE));
+
+    std::string data = "temp";
+    EXPECT_TRUE(ReadFileToStringWithMaxSize(pipe_path, &data, 6));
+    EXPECT_EQ("0123", data);
+
+    int rv = -1;
+    ASSERT_TRUE(WaitForMultiprocessTestChildExit(
+        child_process, TestTimeouts::action_timeout(), &rv));
+    ASSERT_EQ(0, rv);
+  }
+  {
+    base::Process child_process = base::SpawnMultiProcessTestChild(
+        MoreThanBufferSizeChildMainString, child_command_line, options);
+    ASSERT_TRUE(child_process.IsValid());
+    // Wait for pipe creation in child process.
+    EXPECT_EQ(WAIT_OBJECT_0, WaitForSingleObject(sync_event.Get(), INFINITE));
+
+    std::string data = "temp";
+    EXPECT_FALSE(ReadFileToStringWithMaxSize(pipe_path, &data, 6));
+    EXPECT_EQ("cccccc", data);
+
+    int rv = -1;
+    ASSERT_TRUE(WaitForMultiprocessTestChildExit(
+        child_process, TestTimeouts::action_timeout(), &rv));
+    ASSERT_EQ(0, rv);
+  }
+  {
+    base::Process child_process = base::SpawnMultiProcessTestChild(
+        MoreThanBufferSizeChildMainString, child_command_line, options);
+    ASSERT_TRUE(child_process.IsValid());
+    // Wait for pipe creation in child process.
+    EXPECT_EQ(WAIT_OBJECT_0, WaitForSingleObject(sync_event.Get(), INFINITE));
+
+    std::string data = "temp";
+    EXPECT_FALSE(
+        ReadFileToStringWithMaxSize(pipe_path, &data, kLargeFileSize - 1));
+    EXPECT_EQ(std::string(kLargeFileSize - 1, 'c'), data);
+
+    int rv = -1;
+    ASSERT_TRUE(WaitForMultiprocessTestChildExit(
+        child_process, TestTimeouts::action_timeout(), &rv));
+    ASSERT_EQ(0, rv);
+  }
+  {
+    base::Process child_process = base::SpawnMultiProcessTestChild(
+        MoreThanBufferSizeChildMainString, child_command_line, options);
+    ASSERT_TRUE(child_process.IsValid());
+    // Wait for pipe creation in child process.
+    EXPECT_EQ(WAIT_OBJECT_0, WaitForSingleObject(sync_event.Get(), INFINITE));
+
+    std::string data = "temp";
+    EXPECT_TRUE(ReadFileToStringWithMaxSize(pipe_path, &data, kLargeFileSize));
+    EXPECT_EQ(std::string(kLargeFileSize, 'c'), data);
+
+    int rv = -1;
+    ASSERT_TRUE(WaitForMultiprocessTestChildExit(
+        child_process, TestTimeouts::action_timeout(), &rv));
+    ASSERT_EQ(0, rv);
+  }
+  {
+    base::Process child_process = base::SpawnMultiProcessTestChild(
+        MoreThanBufferSizeChildMainString, child_command_line, options);
+    ASSERT_TRUE(child_process.IsValid());
+    // Wait for pipe creation in child process.
+    EXPECT_EQ(WAIT_OBJECT_0, WaitForSingleObject(sync_event.Get(), INFINITE));
+
+    std::string data = "temp";
+    EXPECT_TRUE(
+        ReadFileToStringWithMaxSize(pipe_path, &data, kLargeFileSize * 5));
+    EXPECT_EQ(std::string(kLargeFileSize, 'c'), data);
+
+    int rv = -1;
+    ASSERT_TRUE(WaitForMultiprocessTestChildExit(
+        child_process, TestTimeouts::action_timeout(), &rv));
+    ASSERT_EQ(0, rv);
+  }
+}
+#endif  // defined(OS_WIN)
+
+#if defined(OS_POSIX) && !defined(OS_MACOSX) && !defined(OS_FUCHSIA)
+TEST_F(FileUtilTest, ReadFileToStringWithProcFileSystem) {
+  FilePath file_path("/proc/cpuinfo");
+  std::string data = "temp";
+
+  EXPECT_FALSE(ReadFileToStringWithMaxSize(file_path, &data, 0));
+  EXPECT_EQ(0u, data.length());
+
+  data = "temp";
+  EXPECT_FALSE(ReadFileToStringWithMaxSize(file_path, &data, 2));
+#if defined(OS_ANDROID)
+  EXPECT_EQ("Pr", data);
+#else
+  EXPECT_EQ("pr", data);
+#endif
+
+  data = "temp";
+  EXPECT_FALSE(ReadFileToStringWithMaxSize(file_path, &data, 4));
+#if defined(OS_ANDROID)
+  EXPECT_EQ("Proc", data);
+#else
+  EXPECT_EQ("proc", data);
+#endif
+
+  EXPECT_FALSE(ReadFileToStringWithMaxSize(file_path, nullptr, 4));
+}
+#endif  // defined(OS_POSIX) && !defined(OS_MACOSX) && !defined(OS_FUCHSIA)
+
+TEST_F(FileUtilTest, ReadFileToStringWithLargeFile) {
+  std::string data(kLargeFileSize, 'c');
+
+  FilePath file_path =
+      temp_dir_.GetPath().Append(FILE_PATH_LITERAL("ReadFileToStringTest"));
+
+  // Create test file.
+  ASSERT_EQ(static_cast<int>(kLargeFileSize),
+            WriteFile(file_path, data.c_str(), kLargeFileSize));
+
+  std::string actual_data = "temp";
+  EXPECT_TRUE(ReadFileToString(file_path, &actual_data));
+  EXPECT_EQ(data, actual_data);
+
+  actual_data = "temp";
+  EXPECT_FALSE(ReadFileToStringWithMaxSize(file_path, &actual_data, 0));
+  EXPECT_EQ(0u, actual_data.length());
+
+  // Read more than buffer size.
+  actual_data = "temp";
+  EXPECT_FALSE(
+      ReadFileToStringWithMaxSize(file_path, &actual_data, kLargeFileSize - 1));
+  EXPECT_EQ(std::string(kLargeFileSize - 1, 'c'), actual_data);
+}
+
+TEST_F(FileUtilTest, TouchFile) {
+  FilePath data_dir =
+      temp_dir_.GetPath().Append(FILE_PATH_LITERAL("FilePathTest"));
+
+  // Create a fresh, empty copy of this directory.
+  if (PathExists(data_dir)) {
+    ASSERT_TRUE(DeleteFile(data_dir, true));
+  }
+  ASSERT_TRUE(CreateDirectory(data_dir));
+
+  FilePath foobar(data_dir.Append(FILE_PATH_LITERAL("foobar.txt")));
+  std::string data("hello");
+  ASSERT_EQ(static_cast<int>(data.length()),
+            WriteFile(foobar, data.c_str(), data.length()));
+
+  Time access_time;
+  // This timestamp is divisible by one day (in local timezone),
+  // to make it work on FAT too.
+  ASSERT_TRUE(Time::FromString("Wed, 16 Nov 1994, 00:00:00",
+                               &access_time));
+
+  Time modification_time;
+  // Note that this timestamp is divisible by two (seconds) - FAT stores
+  // modification times with 2s resolution.
+  ASSERT_TRUE(Time::FromString("Tue, 15 Nov 1994, 12:45:26 GMT",
+              &modification_time));
+
+  ASSERT_TRUE(TouchFile(foobar, access_time, modification_time));
+  File::Info file_info;
+  ASSERT_TRUE(GetFileInfo(foobar, &file_info));
+#if !defined(OS_FUCHSIA)
+  // Access time is not supported on Fuchsia, see https://crbug.com/735233.
+  EXPECT_EQ(access_time.ToInternalValue(),
+            file_info.last_accessed.ToInternalValue());
+#endif
+  EXPECT_EQ(modification_time.ToInternalValue(),
+            file_info.last_modified.ToInternalValue());
+}
+
+TEST_F(FileUtilTest, IsDirectoryEmpty) {
+  FilePath empty_dir =
+      temp_dir_.GetPath().Append(FILE_PATH_LITERAL("EmptyDir"));
+
+  ASSERT_FALSE(PathExists(empty_dir));
+
+  ASSERT_TRUE(CreateDirectory(empty_dir));
+
+  EXPECT_TRUE(IsDirectoryEmpty(empty_dir));
+
+  FilePath foo(empty_dir.Append(FILE_PATH_LITERAL("foo.txt")));
+  std::string bar("baz");
+  ASSERT_EQ(static_cast<int>(bar.length()),
+            WriteFile(foo, bar.c_str(), bar.length()));
+
+  EXPECT_FALSE(IsDirectoryEmpty(empty_dir));
+}
+
+#if defined(OS_POSIX) || defined(OS_FUCHSIA)
+
+TEST_F(FileUtilTest, SetNonBlocking) {
+  const int kInvalidFd = 99999;
+  EXPECT_FALSE(SetNonBlocking(kInvalidFd));
+
+  base::FilePath path;
+  ASSERT_TRUE(PathService::Get(base::DIR_TEST_DATA, &path));
+  path = path.Append(FPL("file_util")).Append(FPL("original.txt"));
+  ScopedFD fd(open(path.value().c_str(), O_RDONLY));
+  ASSERT_GE(fd.get(), 0);
+  EXPECT_TRUE(SetNonBlocking(fd.get()));
+}
+
+TEST_F(FileUtilTest, SetCloseOnExec) {
+  const int kInvalidFd = 99999;
+  EXPECT_FALSE(SetCloseOnExec(kInvalidFd));
+
+  base::FilePath path;
+  ASSERT_TRUE(PathService::Get(base::DIR_TEST_DATA, &path));
+  path = path.Append(FPL("file_util")).Append(FPL("original.txt"));
+  ScopedFD fd(open(path.value().c_str(), O_RDONLY));
+  ASSERT_GE(fd.get(), 0);
+  EXPECT_TRUE(SetCloseOnExec(fd.get()));
+}
+
+#endif
+
+#if defined(OS_POSIX)
+
+// Testing VerifyPathControlledByAdmin() is hard, because there is no
+// way a test can make a file owned by root, or change file paths
+// at the root of the file system.  VerifyPathControlledByAdmin()
+// is implemented as a call to VerifyPathControlledByUser, which gives
+// us the ability to test with paths under the test's temp directory,
+// using a user id we control.
+// Pull tests of VerifyPathControlledByUserTest() into a separate test class
+// with a common SetUp() method.
+class VerifyPathControlledByUserTest : public FileUtilTest {
+ protected:
+  void SetUp() override {
+    FileUtilTest::SetUp();
+
+    // Create a basic structure used by each test.
+    // base_dir_
+    //  |-> sub_dir_
+    //       |-> text_file_
+
+    base_dir_ = temp_dir_.GetPath().AppendASCII("base_dir");
+    ASSERT_TRUE(CreateDirectory(base_dir_));
+
+    sub_dir_ = base_dir_.AppendASCII("sub_dir");
+    ASSERT_TRUE(CreateDirectory(sub_dir_));
+
+    text_file_ = sub_dir_.AppendASCII("file.txt");
+    CreateTextFile(text_file_, L"This text file has some text in it.");
+
+    // Get the user and group files are created with from |base_dir_|.
+    struct stat stat_buf;
+    ASSERT_EQ(0, stat(base_dir_.value().c_str(), &stat_buf));
+    uid_ = stat_buf.st_uid;
+    ok_gids_.insert(stat_buf.st_gid);
+    bad_gids_.insert(stat_buf.st_gid + 1);
+
+    ASSERT_EQ(uid_, getuid());  // This process should be the owner.
+
+    // To ensure that umask settings do not cause the initial state
+    // of permissions to be different from what we expect, explicitly
+    // set permissions on the directories we create.
+    // Make all files and directories non-world-writable.
+
+    // Users and group can read, write, traverse
+    int enabled_permissions =
+        FILE_PERMISSION_USER_MASK | FILE_PERMISSION_GROUP_MASK;
+    // Other users can't read, write, traverse
+    int disabled_permissions = FILE_PERMISSION_OTHERS_MASK;
+
+    ASSERT_NO_FATAL_FAILURE(
+        ChangePosixFilePermissions(
+            base_dir_, enabled_permissions, disabled_permissions));
+    ASSERT_NO_FATAL_FAILURE(
+        ChangePosixFilePermissions(
+            sub_dir_, enabled_permissions, disabled_permissions));
+  }
+
+  FilePath base_dir_;
+  FilePath sub_dir_;
+  FilePath text_file_;
+  uid_t uid_;
+
+  std::set<gid_t> ok_gids_;
+  std::set<gid_t> bad_gids_;
+};
+
+TEST_F(VerifyPathControlledByUserTest, BadPaths) {
+  // File does not exist.
+  FilePath does_not_exist = base_dir_.AppendASCII("does")
+                                     .AppendASCII("not")
+                                     .AppendASCII("exist");
+  EXPECT_FALSE(
+      VerifyPathControlledByUser(base_dir_, does_not_exist, uid_, ok_gids_));
+
+  // |base| not a subpath of |path|.
+  EXPECT_FALSE(VerifyPathControlledByUser(sub_dir_, base_dir_, uid_, ok_gids_));
+
+  // An empty base path will fail to be a prefix for any path.
+  FilePath empty;
+  EXPECT_FALSE(VerifyPathControlledByUser(empty, base_dir_, uid_, ok_gids_));
+
+  // Finding that a bad call fails proves nothing unless a good call succeeds.
+  EXPECT_TRUE(VerifyPathControlledByUser(base_dir_, sub_dir_, uid_, ok_gids_));
+}
+
+TEST_F(VerifyPathControlledByUserTest, Symlinks) {
+  // Symlinks in the path should cause failure.
+
+  // Symlink to the file at the end of the path.
+  FilePath file_link =  base_dir_.AppendASCII("file_link");
+  ASSERT_TRUE(CreateSymbolicLink(text_file_, file_link))
+      << "Failed to create symlink.";
+
+  EXPECT_FALSE(
+      VerifyPathControlledByUser(base_dir_, file_link, uid_, ok_gids_));
+  EXPECT_FALSE(
+      VerifyPathControlledByUser(file_link, file_link, uid_, ok_gids_));
+
+  // Symlink from one directory to another within the path.
+  FilePath link_to_sub_dir =  base_dir_.AppendASCII("link_to_sub_dir");
+  ASSERT_TRUE(CreateSymbolicLink(sub_dir_, link_to_sub_dir))
+    << "Failed to create symlink.";
+
+  FilePath file_path_with_link = link_to_sub_dir.AppendASCII("file.txt");
+  ASSERT_TRUE(PathExists(file_path_with_link));
+
+  EXPECT_FALSE(VerifyPathControlledByUser(base_dir_, file_path_with_link, uid_,
+                                          ok_gids_));
+
+  EXPECT_FALSE(VerifyPathControlledByUser(link_to_sub_dir, file_path_with_link,
+                                          uid_, ok_gids_));
+
+  // Symlinks in parents of base path are allowed.
+  EXPECT_TRUE(VerifyPathControlledByUser(file_path_with_link,
+                                         file_path_with_link, uid_, ok_gids_));
+}
+
+TEST_F(VerifyPathControlledByUserTest, OwnershipChecks) {
+  // Get a uid that is not the uid of files we create.
+  uid_t bad_uid = uid_ + 1;
+
+  // Make all files and directories non-world-writable.
+  ASSERT_NO_FATAL_FAILURE(
+      ChangePosixFilePermissions(base_dir_, 0u, S_IWOTH));
+  ASSERT_NO_FATAL_FAILURE(
+      ChangePosixFilePermissions(sub_dir_, 0u, S_IWOTH));
+  ASSERT_NO_FATAL_FAILURE(
+      ChangePosixFilePermissions(text_file_, 0u, S_IWOTH));
+
+  // We control these paths.
+  EXPECT_TRUE(VerifyPathControlledByUser(base_dir_, sub_dir_, uid_, ok_gids_));
+  EXPECT_TRUE(
+      VerifyPathControlledByUser(base_dir_, text_file_, uid_, ok_gids_));
+  EXPECT_TRUE(VerifyPathControlledByUser(sub_dir_, text_file_, uid_, ok_gids_));
+
+  // Another user does not control these paths.
+  EXPECT_FALSE(
+      VerifyPathControlledByUser(base_dir_, sub_dir_, bad_uid, ok_gids_));
+  EXPECT_FALSE(
+      VerifyPathControlledByUser(base_dir_, text_file_, bad_uid, ok_gids_));
+  EXPECT_FALSE(
+      VerifyPathControlledByUser(sub_dir_, text_file_, bad_uid, ok_gids_));
+
+  // Another group does not control the paths.
+  EXPECT_FALSE(
+      VerifyPathControlledByUser(base_dir_, sub_dir_, uid_, bad_gids_));
+  EXPECT_FALSE(
+      VerifyPathControlledByUser(base_dir_, text_file_, uid_, bad_gids_));
+  EXPECT_FALSE(
+      VerifyPathControlledByUser(sub_dir_, text_file_, uid_, bad_gids_));
+}
+
+TEST_F(VerifyPathControlledByUserTest, GroupWriteTest) {
+  // Make all files and directories writable only by their owner.
+  ASSERT_NO_FATAL_FAILURE(
+      ChangePosixFilePermissions(base_dir_, 0u, S_IWOTH|S_IWGRP));
+  ASSERT_NO_FATAL_FAILURE(
+      ChangePosixFilePermissions(sub_dir_, 0u, S_IWOTH|S_IWGRP));
+  ASSERT_NO_FATAL_FAILURE(
+      ChangePosixFilePermissions(text_file_, 0u, S_IWOTH|S_IWGRP));
+
+  // Any group is okay because the path is not group-writable.
+  EXPECT_TRUE(VerifyPathControlledByUser(base_dir_, sub_dir_, uid_, ok_gids_));
+  EXPECT_TRUE(
+      VerifyPathControlledByUser(base_dir_, text_file_, uid_, ok_gids_));
+  EXPECT_TRUE(VerifyPathControlledByUser(sub_dir_, text_file_, uid_, ok_gids_));
+
+  EXPECT_TRUE(VerifyPathControlledByUser(base_dir_, sub_dir_, uid_, bad_gids_));
+  EXPECT_TRUE(
+      VerifyPathControlledByUser(base_dir_, text_file_, uid_, bad_gids_));
+  EXPECT_TRUE(
+      VerifyPathControlledByUser(sub_dir_, text_file_, uid_, bad_gids_));
+
+  // No group is okay, because we don't check the group
+  // if no group can write.
+  std::set<gid_t> no_gids;  // Empty set of gids.
+  EXPECT_TRUE(VerifyPathControlledByUser(base_dir_, sub_dir_, uid_, no_gids));
+  EXPECT_TRUE(VerifyPathControlledByUser(base_dir_, text_file_, uid_, no_gids));
+  EXPECT_TRUE(VerifyPathControlledByUser(sub_dir_, text_file_, uid_, no_gids));
+
+  // Make all files and directories writable by their group.
+  ASSERT_NO_FATAL_FAILURE(ChangePosixFilePermissions(base_dir_, S_IWGRP, 0u));
+  ASSERT_NO_FATAL_FAILURE(ChangePosixFilePermissions(sub_dir_, S_IWGRP, 0u));
+  ASSERT_NO_FATAL_FAILURE(ChangePosixFilePermissions(text_file_, S_IWGRP, 0u));
+
+  // Now |ok_gids_| works, but |bad_gids_| fails.
+  EXPECT_TRUE(VerifyPathControlledByUser(base_dir_, sub_dir_, uid_, ok_gids_));
+  EXPECT_TRUE(
+      VerifyPathControlledByUser(base_dir_, text_file_, uid_, ok_gids_));
+  EXPECT_TRUE(VerifyPathControlledByUser(sub_dir_, text_file_, uid_, ok_gids_));
+
+  EXPECT_FALSE(
+      VerifyPathControlledByUser(base_dir_, sub_dir_, uid_, bad_gids_));
+  EXPECT_FALSE(
+      VerifyPathControlledByUser(base_dir_, text_file_, uid_, bad_gids_));
+  EXPECT_FALSE(
+      VerifyPathControlledByUser(sub_dir_, text_file_, uid_, bad_gids_));
+
+  // Because any group in the group set is allowed,
+  // the union of good and bad gids passes.
+
+  std::set<gid_t> multiple_gids;
+  std::set_union(
+      ok_gids_.begin(), ok_gids_.end(),
+      bad_gids_.begin(), bad_gids_.end(),
+      std::inserter(multiple_gids, multiple_gids.begin()));
+
+  EXPECT_TRUE(
+      VerifyPathControlledByUser(base_dir_, sub_dir_, uid_, multiple_gids));
+  EXPECT_TRUE(
+      VerifyPathControlledByUser(base_dir_, text_file_, uid_, multiple_gids));
+  EXPECT_TRUE(
+      VerifyPathControlledByUser(sub_dir_, text_file_, uid_, multiple_gids));
+}
+
+TEST_F(VerifyPathControlledByUserTest, WriteBitChecks) {
+  // Make all files and directories non-world-writable.
+  ASSERT_NO_FATAL_FAILURE(
+      ChangePosixFilePermissions(base_dir_, 0u, S_IWOTH));
+  ASSERT_NO_FATAL_FAILURE(
+      ChangePosixFilePermissions(sub_dir_, 0u, S_IWOTH));
+  ASSERT_NO_FATAL_FAILURE(
+      ChangePosixFilePermissions(text_file_, 0u, S_IWOTH));
+
+  // Initialy, we control all parts of the path.
+  EXPECT_TRUE(VerifyPathControlledByUser(base_dir_, sub_dir_, uid_, ok_gids_));
+  EXPECT_TRUE(
+      VerifyPathControlledByUser(base_dir_, text_file_, uid_, ok_gids_));
+  EXPECT_TRUE(VerifyPathControlledByUser(sub_dir_, text_file_, uid_, ok_gids_));
+
+  // Make base_dir_ world-writable.
+  ASSERT_NO_FATAL_FAILURE(
+      ChangePosixFilePermissions(base_dir_, S_IWOTH, 0u));
+  EXPECT_FALSE(VerifyPathControlledByUser(base_dir_, sub_dir_, uid_, ok_gids_));
+  EXPECT_FALSE(
+      VerifyPathControlledByUser(base_dir_, text_file_, uid_, ok_gids_));
+  EXPECT_TRUE(VerifyPathControlledByUser(sub_dir_, text_file_, uid_, ok_gids_));
+
+  // Make sub_dir_ world writable.
+  ASSERT_NO_FATAL_FAILURE(
+      ChangePosixFilePermissions(sub_dir_, S_IWOTH, 0u));
+  EXPECT_FALSE(VerifyPathControlledByUser(base_dir_, sub_dir_, uid_, ok_gids_));
+  EXPECT_FALSE(
+      VerifyPathControlledByUser(base_dir_, text_file_, uid_, ok_gids_));
+  EXPECT_FALSE(
+      VerifyPathControlledByUser(sub_dir_, text_file_, uid_, ok_gids_));
+
+  // Make text_file_ world writable.
+  ASSERT_NO_FATAL_FAILURE(
+      ChangePosixFilePermissions(text_file_, S_IWOTH, 0u));
+  EXPECT_FALSE(VerifyPathControlledByUser(base_dir_, sub_dir_, uid_, ok_gids_));
+  EXPECT_FALSE(
+      VerifyPathControlledByUser(base_dir_, text_file_, uid_, ok_gids_));
+  EXPECT_FALSE(
+      VerifyPathControlledByUser(sub_dir_, text_file_, uid_, ok_gids_));
+
+  // Make sub_dir_ non-world writable.
+  ASSERT_NO_FATAL_FAILURE(
+      ChangePosixFilePermissions(sub_dir_, 0u, S_IWOTH));
+  EXPECT_FALSE(VerifyPathControlledByUser(base_dir_, sub_dir_, uid_, ok_gids_));
+  EXPECT_FALSE(
+      VerifyPathControlledByUser(base_dir_, text_file_, uid_, ok_gids_));
+  EXPECT_FALSE(
+      VerifyPathControlledByUser(sub_dir_, text_file_, uid_, ok_gids_));
+
+  // Make base_dir_ non-world-writable.
+  ASSERT_NO_FATAL_FAILURE(
+      ChangePosixFilePermissions(base_dir_, 0u, S_IWOTH));
+  EXPECT_TRUE(VerifyPathControlledByUser(base_dir_, sub_dir_, uid_, ok_gids_));
+  EXPECT_FALSE(
+      VerifyPathControlledByUser(base_dir_, text_file_, uid_, ok_gids_));
+  EXPECT_FALSE(
+      VerifyPathControlledByUser(sub_dir_, text_file_, uid_, ok_gids_));
+
+  // Back to the initial state: Nothing is writable, so every path
+  // should pass.
+  ASSERT_NO_FATAL_FAILURE(
+      ChangePosixFilePermissions(text_file_, 0u, S_IWOTH));
+  EXPECT_TRUE(VerifyPathControlledByUser(base_dir_, sub_dir_, uid_, ok_gids_));
+  EXPECT_TRUE(
+      VerifyPathControlledByUser(base_dir_, text_file_, uid_, ok_gids_));
+  EXPECT_TRUE(VerifyPathControlledByUser(sub_dir_, text_file_, uid_, ok_gids_));
+}
+
+#endif  // defined(OS_POSIX)
+
+#if defined(OS_ANDROID)
+TEST_F(FileUtilTest, ValidContentUriTest) {
+  // Get the test image path.
+  FilePath data_dir;
+  ASSERT_TRUE(PathService::Get(DIR_TEST_DATA, &data_dir));
+  data_dir = data_dir.AppendASCII("file_util");
+  ASSERT_TRUE(PathExists(data_dir));
+  FilePath image_file = data_dir.Append(FILE_PATH_LITERAL("red.png"));
+  int64_t image_size;
+  GetFileSize(image_file, &image_size);
+  ASSERT_GT(image_size, 0);
+
+  // Insert the image into MediaStore. MediaStore will do some conversions, and
+  // return the content URI.
+  FilePath path = InsertImageIntoMediaStore(image_file);
+  EXPECT_TRUE(path.IsContentUri());
+  EXPECT_TRUE(PathExists(path));
+  // The file size may not equal to the input image as MediaStore may convert
+  // the image.
+  int64_t content_uri_size;
+  GetFileSize(path, &content_uri_size);
+  EXPECT_EQ(image_size, content_uri_size);
+
+  // We should be able to read the file.
+  File file = OpenContentUriForRead(path);
+  EXPECT_TRUE(file.IsValid());
+  auto buffer = std::make_unique<char[]>(image_size);
+  EXPECT_TRUE(file.ReadAtCurrentPos(buffer.get(), image_size));
+}
+
+TEST_F(FileUtilTest, NonExistentContentUriTest) {
+  FilePath path("content://foo.bar");
+  EXPECT_TRUE(path.IsContentUri());
+  EXPECT_FALSE(PathExists(path));
+  // Size should be smaller than 0.
+  int64_t size;
+  EXPECT_FALSE(GetFileSize(path, &size));
+
+  // We should not be able to read the file.
+  File file = OpenContentUriForRead(path);
+  EXPECT_FALSE(file.IsValid());
+}
+#endif
+
+#if defined(OS_POSIX) || defined(OS_FUCHSIA)
+
+TEST(ScopedFD, ScopedFDDoesClose) {
+  int fds[2];
+  char c = 0;
+  ASSERT_EQ(0, pipe(fds));
+  const int write_end = fds[1];
+  ScopedFD read_end_closer(fds[0]);
+  {
+    ScopedFD write_end_closer(fds[1]);
+  }
+  // This is the only thread. This file descriptor should no longer be valid.
+  int ret = close(write_end);
+  EXPECT_EQ(-1, ret);
+  EXPECT_EQ(EBADF, errno);
+  // Make sure read(2) won't block.
+  ASSERT_EQ(0, fcntl(fds[0], F_SETFL, O_NONBLOCK));
+  // Reading the pipe should EOF.
+  EXPECT_EQ(0, read(fds[0], &c, 1));
+}
+
+#if defined(GTEST_HAS_DEATH_TEST)
+void CloseWithScopedFD(int fd) {
+  ScopedFD fd_closer(fd);
+}
+#endif
+
+TEST(ScopedFD, ScopedFDCrashesOnCloseFailure) {
+  int fds[2];
+  ASSERT_EQ(0, pipe(fds));
+  ScopedFD read_end_closer(fds[0]);
+  EXPECT_EQ(0, IGNORE_EINTR(close(fds[1])));
+#if defined(GTEST_HAS_DEATH_TEST)
+  // This is the only thread. This file descriptor should no longer be valid.
+  // Trying to close it should crash. This is important for security.
+  EXPECT_DEATH(CloseWithScopedFD(fds[1]), "");
+#endif
+}
+
+#endif  // defined(OS_POSIX) || defined(OS_FUCHSIA)
+
+}  // namespace
+
+}  // namespace base
diff --git a/base/files/file_util_win.cc b/base/files/file_util_win.cc
new file mode 100644
index 0000000..794584c
--- /dev/null
+++ b/base/files/file_util_win.cc
@@ -0,0 +1,992 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/files/file_util.h"
+
+#include <windows.h>
+#include <io.h>
+#include <psapi.h>
+#include <shellapi.h>
+#include <shlobj.h>
+#include <stddef.h>
+#include <stdint.h>
+#include <time.h>
+#include <winsock2.h>
+
+#include <algorithm>
+#include <limits>
+#include <string>
+
+#include "base/files/file_enumerator.h"
+#include "base/files/file_path.h"
+#include "base/guid.h"
+#include "base/logging.h"
+#include "base/macros.h"
+#include "base/metrics/histogram_functions.h"
+#include "base/process/process_handle.h"
+#include "base/rand_util.h"
+#include "base/strings/string_number_conversions.h"
+#include "base/strings/string_piece.h"
+#include "base/strings/string_util.h"
+#include "base/strings/utf_string_conversions.h"
+#include "base/threading/thread_restrictions.h"
+#include "base/time/time.h"
+#include "base/win/scoped_handle.h"
+#include "base/win/windows_version.h"
+
+namespace base {
+
+namespace {
+
+const DWORD kFileShareAll =
+    FILE_SHARE_READ | FILE_SHARE_WRITE | FILE_SHARE_DELETE;
+
+// Records a sample in a histogram named
+// "Windows.PostOperationState.|operation|" indicating the state of |path|
+// following the named operation. If |operation_succeeded| is true, the
+// "operation succeeded" sample is recorded. Otherwise, the state of |path| is
+// queried and the most meaningful sample is recorded.
+void RecordPostOperationState(const FilePath& path,
+                              StringPiece operation,
+                              bool operation_succeeded) {
+  // The state of a filesystem item after an operation.
+  // These values are persisted to logs. Entries should not be renumbered and
+  // numeric values should never be reused.
+  enum class PostOperationState {
+    kOperationSucceeded = 0,
+    kFileNotFoundAfterFailure = 1,
+    kPathNotFoundAfterFailure = 2,
+    kAccessDeniedAfterFailure = 3,
+    kNoAttributesAfterFailure = 4,
+    kEmptyDirectoryAfterFailure = 5,
+    kNonEmptyDirectoryAfterFailure = 6,
+    kNotDirectoryAfterFailure = 7,
+    kCount
+  } metric = PostOperationState::kOperationSucceeded;
+
+  if (!operation_succeeded) {
+    const DWORD attributes = ::GetFileAttributes(path.value().c_str());
+    if (attributes == INVALID_FILE_ATTRIBUTES) {
+      // On failure to delete, one might expect the file/directory to still be
+      // in place. Slice a failure to get its attributes into a few common error
+      // buckets.
+      const DWORD error_code = ::GetLastError();
+      if (error_code == ERROR_FILE_NOT_FOUND)
+        metric = PostOperationState::kFileNotFoundAfterFailure;
+      else if (error_code == ERROR_PATH_NOT_FOUND)
+        metric = PostOperationState::kPathNotFoundAfterFailure;
+      else if (error_code == ERROR_ACCESS_DENIED)
+        metric = PostOperationState::kAccessDeniedAfterFailure;
+      else
+        metric = PostOperationState::kNoAttributesAfterFailure;
+    } else if (attributes & FILE_ATTRIBUTE_DIRECTORY) {
+      if (IsDirectoryEmpty(path))
+        metric = PostOperationState::kEmptyDirectoryAfterFailure;
+      else
+        metric = PostOperationState::kNonEmptyDirectoryAfterFailure;
+    } else {
+      metric = PostOperationState::kNotDirectoryAfterFailure;
+    }
+  }
+
+  std::string histogram_name = "Windows.PostOperationState.";
+  operation.AppendToString(&histogram_name);
+  UmaHistogramEnumeration(histogram_name, metric, PostOperationState::kCount);
+}
+
+// Records the sample |error| in a histogram named
+// "Windows.FilesystemError.|operation|".
+void RecordFilesystemError(StringPiece operation, DWORD error) {
+  std::string histogram_name = "Windows.FilesystemError.";
+  operation.AppendToString(&histogram_name);
+  UmaHistogramSparse(histogram_name, error);
+}
+
+// Deletes all files and directories in a path.
+// Returns ERROR_SUCCESS on success or the Windows error code corresponding to
+// the first error encountered.
+DWORD DeleteFileRecursive(const FilePath& path,
+                          const FilePath::StringType& pattern,
+                          bool recursive) {
+  FileEnumerator traversal(path, false,
+                           FileEnumerator::FILES | FileEnumerator::DIRECTORIES,
+                           pattern);
+  DWORD result = ERROR_SUCCESS;
+  for (FilePath current = traversal.Next(); !current.empty();
+       current = traversal.Next()) {
+    // Try to clear the read-only bit if we find it.
+    FileEnumerator::FileInfo info = traversal.GetInfo();
+    if ((info.find_data().dwFileAttributes & FILE_ATTRIBUTE_READONLY) &&
+        (recursive || !info.IsDirectory())) {
+      ::SetFileAttributes(
+          current.value().c_str(),
+          info.find_data().dwFileAttributes & ~FILE_ATTRIBUTE_READONLY);
+    }
+
+    DWORD this_result = ERROR_SUCCESS;
+    if (info.IsDirectory()) {
+      if (recursive) {
+        this_result = DeleteFileRecursive(current, pattern, true);
+        if (this_result == ERROR_SUCCESS &&
+            !::RemoveDirectory(current.value().c_str())) {
+          this_result = ::GetLastError();
+        }
+      }
+    } else if (!::DeleteFile(current.value().c_str())) {
+      this_result = ::GetLastError();
+    }
+    if (result == ERROR_SUCCESS)
+      result = this_result;
+  }
+  return result;
+}
+
+// Appends |mode_char| to |mode| before the optional character set encoding; see
+// https://msdn.microsoft.com/library/yeby3zcb.aspx for details.
+void AppendModeCharacter(base::char16 mode_char, base::string16* mode) {
+  size_t comma_pos = mode->find(L',');
+  mode->insert(comma_pos == base::string16::npos ? mode->length() : comma_pos,
+               1, mode_char);
+}
+
+bool DoCopyFile(const FilePath& from_path,
+                const FilePath& to_path,
+                bool fail_if_exists) {
+  AssertBlockingAllowed();
+  if (from_path.ReferencesParent() || to_path.ReferencesParent())
+    return false;
+
+  // NOTE: I suspect we could support longer paths, but that would involve
+  // analyzing all our usage of files.
+  if (from_path.value().length() >= MAX_PATH ||
+      to_path.value().length() >= MAX_PATH) {
+    return false;
+  }
+
+  // Unlike the posix implementation that copies the file manually and discards
+  // the ACL bits, CopyFile() copies the complete SECURITY_DESCRIPTOR and access
+  // bits, which is usually not what we want. We can't do much about the
+  // SECURITY_DESCRIPTOR but at least remove the read only bit.
+  const wchar_t* dest = to_path.value().c_str();
+  if (!::CopyFile(from_path.value().c_str(), dest, fail_if_exists)) {
+    // Copy failed.
+    return false;
+  }
+  DWORD attrs = GetFileAttributes(dest);
+  if (attrs == INVALID_FILE_ATTRIBUTES) {
+    return false;
+  }
+  if (attrs & FILE_ATTRIBUTE_READONLY) {
+    SetFileAttributes(dest, attrs & ~FILE_ATTRIBUTE_READONLY);
+  }
+  return true;
+}
+
+bool DoCopyDirectory(const FilePath& from_path,
+                     const FilePath& to_path,
+                     bool recursive,
+                     bool fail_if_exists) {
+  // NOTE(maruel): Previous version of this function used to call
+  // SHFileOperation().  This used to copy the file attributes and extended
+  // attributes, OLE structured storage, NTFS file system alternate data
+  // streams, SECURITY_DESCRIPTOR. In practice, this is not what we want, we
+  // want the containing directory to propagate its SECURITY_DESCRIPTOR.
+  AssertBlockingAllowed();
+
+  // NOTE: I suspect we could support longer paths, but that would involve
+  // analyzing all our usage of files.
+  if (from_path.value().length() >= MAX_PATH ||
+      to_path.value().length() >= MAX_PATH) {
+    return false;
+  }
+
+  // This function does not properly handle destinations within the source.
+  FilePath real_to_path = to_path;
+  if (PathExists(real_to_path)) {
+    real_to_path = MakeAbsoluteFilePath(real_to_path);
+    if (real_to_path.empty())
+      return false;
+  } else {
+    real_to_path = MakeAbsoluteFilePath(real_to_path.DirName());
+    if (real_to_path.empty())
+      return false;
+  }
+  FilePath real_from_path = MakeAbsoluteFilePath(from_path);
+  if (real_from_path.empty())
+    return false;
+  if (real_to_path == real_from_path || real_from_path.IsParent(real_to_path))
+    return false;
+
+  int traverse_type = FileEnumerator::FILES;
+  if (recursive)
+    traverse_type |= FileEnumerator::DIRECTORIES;
+  FileEnumerator traversal(from_path, recursive, traverse_type);
+
+  if (!PathExists(from_path)) {
+    DLOG(ERROR) << "CopyDirectory() couldn't stat source directory: "
+                << from_path.value().c_str();
+    return false;
+  }
+  // TODO(maruel): This is not necessary anymore.
+  DCHECK(recursive || DirectoryExists(from_path));
+
+  FilePath current = from_path;
+  bool from_is_dir = DirectoryExists(from_path);
+  bool success = true;
+  FilePath from_path_base = from_path;
+  if (recursive && DirectoryExists(to_path)) {
+    // If the destination already exists and is a directory, then the
+    // top level of source needs to be copied.
+    from_path_base = from_path.DirName();
+  }
+
+  while (success && !current.empty()) {
+    // current is the source path, including from_path, so append
+    // the suffix after from_path to to_path to create the target_path.
+    FilePath target_path(to_path);
+    if (from_path_base != current) {
+      if (!from_path_base.AppendRelativePath(current, &target_path)) {
+        success = false;
+        break;
+      }
+    }
+
+    if (from_is_dir) {
+      if (!DirectoryExists(target_path) &&
+          !::CreateDirectory(target_path.value().c_str(), NULL)) {
+        DLOG(ERROR) << "CopyDirectory() couldn't create directory: "
+                    << target_path.value().c_str();
+        success = false;
+      }
+    } else if (!DoCopyFile(current, target_path, fail_if_exists)) {
+      DLOG(ERROR) << "CopyDirectory() couldn't create file: "
+                  << target_path.value().c_str();
+      success = false;
+    }
+
+    current = traversal.Next();
+    if (!current.empty())
+      from_is_dir = traversal.GetInfo().IsDirectory();
+  }
+
+  return success;
+}
+
+// Returns ERROR_SUCCESS on success, or a Windows error code on failure.
+DWORD DoDeleteFile(const FilePath& path, bool recursive) {
+  AssertBlockingAllowed();
+
+  if (path.empty())
+    return ERROR_SUCCESS;
+
+  if (path.value().length() >= MAX_PATH)
+    return ERROR_BAD_PATHNAME;
+
+  // Handle any path with wildcards.
+  if (path.BaseName().value().find_first_of(L"*?") !=
+      FilePath::StringType::npos) {
+    return DeleteFileRecursive(path.DirName(), path.BaseName().value(),
+                               recursive);
+  }
+
+  // Report success if the file or path does not exist.
+  const DWORD attr = ::GetFileAttributes(path.value().c_str());
+  if (attr == INVALID_FILE_ATTRIBUTES) {
+    const DWORD error_code = ::GetLastError();
+    return (error_code == ERROR_FILE_NOT_FOUND ||
+            error_code == ERROR_PATH_NOT_FOUND)
+               ? ERROR_SUCCESS
+               : error_code;
+  }
+
+  // Clear the read-only bit if it is set.
+  if ((attr & FILE_ATTRIBUTE_READONLY) &&
+      !::SetFileAttributes(path.value().c_str(),
+                           attr & ~FILE_ATTRIBUTE_READONLY)) {
+    return ::GetLastError();
+  }
+
+  // Perform a simple delete on anything that isn't a directory.
+  if (!(attr & FILE_ATTRIBUTE_DIRECTORY)) {
+    return ::DeleteFile(path.value().c_str()) ? ERROR_SUCCESS
+                                              : ::GetLastError();
+  }
+
+  if (recursive) {
+    const DWORD error_code = DeleteFileRecursive(path, L"*", true);
+    if (error_code != ERROR_SUCCESS)
+      return error_code;
+  }
+  return ::RemoveDirectory(path.value().c_str()) ? ERROR_SUCCESS
+                                                 : ::GetLastError();
+}
+
+}  // namespace
+
+FilePath MakeAbsoluteFilePath(const FilePath& input) {
+  AssertBlockingAllowed();
+  wchar_t file_path[MAX_PATH];
+  if (!_wfullpath(file_path, input.value().c_str(), MAX_PATH))
+    return FilePath();
+  return FilePath(file_path);
+}
+
+bool DeleteFile(const FilePath& path, bool recursive) {
+  static constexpr char kRecursive[] = "DeleteFile.Recursive";
+  static constexpr char kNonRecursive[] = "DeleteFile.NonRecursive";
+  const StringPiece operation(recursive ? kRecursive : kNonRecursive);
+
+  AssertBlockingAllowed();
+
+  // Metrics for delete failures tracked in https://crbug.com/599084. Delete may
+  // fail for a number of reasons. Log some metrics relating to failures in the
+  // current code so that any improvements or regressions resulting from
+  // subsequent code changes can be detected.
+  const DWORD error = DoDeleteFile(path, recursive);
+  RecordPostOperationState(path, operation, error == ERROR_SUCCESS);
+  if (error == ERROR_SUCCESS)
+    return true;
+
+  RecordFilesystemError(operation, error);
+  return false;
+}
+
+bool DeleteFileAfterReboot(const FilePath& path) {
+  AssertBlockingAllowed();
+
+  if (path.value().length() >= MAX_PATH)
+    return false;
+
+  return MoveFileEx(path.value().c_str(), NULL,
+                    MOVEFILE_DELAY_UNTIL_REBOOT |
+                        MOVEFILE_REPLACE_EXISTING) != FALSE;
+}
+
+bool ReplaceFile(const FilePath& from_path,
+                 const FilePath& to_path,
+                 File::Error* error) {
+  AssertBlockingAllowed();
+  // Try a simple move first.  It will only succeed when |to_path| doesn't
+  // already exist.
+  if (::MoveFile(from_path.value().c_str(), to_path.value().c_str()))
+    return true;
+  File::Error move_error = File::OSErrorToFileError(GetLastError());
+
+  // Try the full-blown replace if the move fails, as ReplaceFile will only
+  // succeed when |to_path| does exist. When writing to a network share, we may
+  // not be able to change the ACLs. Ignore ACL errors then
+  // (REPLACEFILE_IGNORE_MERGE_ERRORS).
+  if (::ReplaceFile(to_path.value().c_str(), from_path.value().c_str(), NULL,
+                    REPLACEFILE_IGNORE_MERGE_ERRORS, NULL, NULL)) {
+    return true;
+  }
+  // In the case of FILE_ERROR_NOT_FOUND from ReplaceFile, it is likely that
+  // |to_path| does not exist. In this case, the more relevant error comes
+  // from the call to MoveFile.
+  if (error) {
+    File::Error replace_error = File::OSErrorToFileError(GetLastError());
+    *error = replace_error == File::FILE_ERROR_NOT_FOUND ? move_error
+                                                         : replace_error;
+  }
+  return false;
+}
+
+bool CopyDirectory(const FilePath& from_path,
+                   const FilePath& to_path,
+                   bool recursive) {
+  return DoCopyDirectory(from_path, to_path, recursive, false);
+}
+
+bool CopyDirectoryExcl(const FilePath& from_path,
+                       const FilePath& to_path,
+                       bool recursive) {
+  return DoCopyDirectory(from_path, to_path, recursive, true);
+}
+
+bool PathExists(const FilePath& path) {
+  AssertBlockingAllowed();
+  return (GetFileAttributes(path.value().c_str()) != INVALID_FILE_ATTRIBUTES);
+}
+
+bool PathIsWritable(const FilePath& path) {
+  AssertBlockingAllowed();
+  HANDLE dir =
+      CreateFile(path.value().c_str(), FILE_ADD_FILE, kFileShareAll,
+                 NULL, OPEN_EXISTING, FILE_FLAG_BACKUP_SEMANTICS, NULL);
+
+  if (dir == INVALID_HANDLE_VALUE)
+    return false;
+
+  CloseHandle(dir);
+  return true;
+}
+
+bool DirectoryExists(const FilePath& path) {
+  AssertBlockingAllowed();
+  DWORD fileattr = GetFileAttributes(path.value().c_str());
+  if (fileattr != INVALID_FILE_ATTRIBUTES)
+    return (fileattr & FILE_ATTRIBUTE_DIRECTORY) != 0;
+  return false;
+}
+
+bool GetTempDir(FilePath* path) {
+  wchar_t temp_path[MAX_PATH + 1];
+  DWORD path_len = ::GetTempPath(MAX_PATH, temp_path);
+  if (path_len >= MAX_PATH || path_len <= 0)
+    return false;
+  // TODO(evanm): the old behavior of this function was to always strip the
+  // trailing slash.  We duplicate this here, but it shouldn't be necessary
+  // when everyone is using the appropriate FilePath APIs.
+  *path = FilePath(temp_path).StripTrailingSeparators();
+  return true;
+}
+
+FilePath GetHomeDir() {
+  char16 result[MAX_PATH];
+  if (SUCCEEDED(SHGetFolderPath(NULL, CSIDL_PROFILE, NULL, SHGFP_TYPE_CURRENT,
+                                result)) &&
+      result[0]) {
+    return FilePath(result);
+  }
+
+  // Fall back to the temporary directory on failure.
+  FilePath temp;
+  if (GetTempDir(&temp))
+    return temp;
+
+  // Last resort.
+  return FilePath(L"C:\\");
+}
+
+bool CreateTemporaryFile(FilePath* path) {
+  AssertBlockingAllowed();
+
+  FilePath temp_file;
+
+  if (!GetTempDir(path))
+    return false;
+
+  if (CreateTemporaryFileInDir(*path, &temp_file)) {
+    *path = temp_file;
+    return true;
+  }
+
+  return false;
+}
+
+// On POSIX we have semantics to create and open a temporary file
+// atomically.
+// TODO(jrg): is there equivalent call to use on Windows instead of
+// going 2-step?
+FILE* CreateAndOpenTemporaryFileInDir(const FilePath& dir, FilePath* path) {
+  AssertBlockingAllowed();
+  if (!CreateTemporaryFileInDir(dir, path)) {
+    return NULL;
+  }
+  // Open file in binary mode, to avoid problems with fwrite. On Windows
+  // it replaces \n's with \r\n's, which may surprise you.
+  // Reference: http://msdn.microsoft.com/en-us/library/h9t88zwz(VS.71).aspx
+  return OpenFile(*path, "wb+");
+}
+
+bool CreateTemporaryFileInDir(const FilePath& dir, FilePath* temp_file) {
+  AssertBlockingAllowed();
+
+  // Use GUID instead of ::GetTempFileName() to generate unique file names.
+  // "Due to the algorithm used to generate file names, GetTempFileName can
+  // perform poorly when creating a large number of files with the same prefix.
+  // In such cases, it is recommended that you construct unique file names based
+  // on GUIDs."
+  // https://msdn.microsoft.com/library/windows/desktop/aa364991.aspx
+
+  FilePath temp_name;
+  bool create_file_success = false;
+
+  // Although it is nearly impossible to get a duplicate name with GUID, we
+  // still use a loop here in case it happens.
+  for (int i = 0; i < 100; ++i) {
+    temp_name = dir.Append(ASCIIToUTF16(base::GenerateGUID()) + L".tmp");
+    File file(temp_name,
+              File::FLAG_CREATE | File::FLAG_READ | File::FLAG_WRITE);
+    if (file.IsValid()) {
+      file.Close();
+      create_file_success = true;
+      break;
+    }
+  }
+
+  if (!create_file_success) {
+    DPLOG(WARNING) << "Failed to get temporary file name in "
+                   << UTF16ToUTF8(dir.value());
+    return false;
+  }
+
+  wchar_t long_temp_name[MAX_PATH + 1];
+  DWORD long_name_len =
+      GetLongPathName(temp_name.value().c_str(), long_temp_name, MAX_PATH);
+  if (long_name_len > MAX_PATH || long_name_len == 0) {
+    // GetLongPathName() failed, but we still have a temporary file.
+    *temp_file = std::move(temp_name);
+    return true;
+  }
+
+  FilePath::StringType long_temp_name_str;
+  long_temp_name_str.assign(long_temp_name, long_name_len);
+  *temp_file = FilePath(std::move(long_temp_name_str));
+  return true;
+}
+
+bool CreateTemporaryDirInDir(const FilePath& base_dir,
+                             const FilePath::StringType& prefix,
+                             FilePath* new_dir) {
+  AssertBlockingAllowed();
+
+  FilePath path_to_create;
+
+  for (int count = 0; count < 50; ++count) {
+    // Try create a new temporary directory with random generated name. If
+    // the one exists, keep trying another path name until we reach some limit.
+    string16 new_dir_name;
+    new_dir_name.assign(prefix);
+    new_dir_name.append(IntToString16(GetCurrentProcId()));
+    new_dir_name.push_back('_');
+    new_dir_name.append(
+        IntToString16(RandInt(0, std::numeric_limits<int16_t>::max())));
+
+    path_to_create = base_dir.Append(new_dir_name);
+    if (::CreateDirectory(path_to_create.value().c_str(), NULL)) {
+      *new_dir = path_to_create;
+      return true;
+    }
+  }
+
+  return false;
+}
+
+bool CreateNewTempDirectory(const FilePath::StringType& prefix,
+                            FilePath* new_temp_path) {
+  AssertBlockingAllowed();
+
+  FilePath system_temp_dir;
+  if (!GetTempDir(&system_temp_dir))
+    return false;
+
+  return CreateTemporaryDirInDir(system_temp_dir, prefix, new_temp_path);
+}
+
+bool CreateDirectoryAndGetError(const FilePath& full_path,
+                                File::Error* error) {
+  AssertBlockingAllowed();
+
+  // If the path exists, we've succeeded if it's a directory, failed otherwise.
+  const wchar_t* full_path_str = full_path.value().c_str();
+  DWORD fileattr = ::GetFileAttributes(full_path_str);
+  if (fileattr != INVALID_FILE_ATTRIBUTES) {
+    if ((fileattr & FILE_ATTRIBUTE_DIRECTORY) != 0) {
+      DVLOG(1) << "CreateDirectory(" << full_path_str << "), "
+               << "directory already exists.";
+      return true;
+    }
+    DLOG(WARNING) << "CreateDirectory(" << full_path_str << "), "
+                  << "conflicts with existing file.";
+    if (error) {
+      *error = File::FILE_ERROR_NOT_A_DIRECTORY;
+    }
+    return false;
+  }
+
+  // Invariant:  Path does not exist as file or directory.
+
+  // Attempt to create the parent recursively.  This will immediately return
+  // true if it already exists, otherwise will create all required parent
+  // directories starting with the highest-level missing parent.
+  FilePath parent_path(full_path.DirName());
+  if (parent_path.value() == full_path.value()) {
+    if (error) {
+      *error = File::FILE_ERROR_NOT_FOUND;
+    }
+    return false;
+  }
+  if (!CreateDirectoryAndGetError(parent_path, error)) {
+    DLOG(WARNING) << "Failed to create one of the parent directories.";
+    if (error) {
+      DCHECK(*error != File::FILE_OK);
+    }
+    return false;
+  }
+
+  if (!::CreateDirectory(full_path_str, NULL)) {
+    DWORD error_code = ::GetLastError();
+    if (error_code == ERROR_ALREADY_EXISTS && DirectoryExists(full_path)) {
+      // This error code ERROR_ALREADY_EXISTS doesn't indicate whether we
+      // were racing with someone creating the same directory, or a file
+      // with the same path.  If DirectoryExists() returns true, we lost the
+      // race to create the same directory.
+      return true;
+    } else {
+      if (error)
+        *error = File::OSErrorToFileError(error_code);
+      DLOG(WARNING) << "Failed to create directory " << full_path_str
+                    << ", last error is " << error_code << ".";
+      return false;
+    }
+  } else {
+    return true;
+  }
+}
+
+bool NormalizeFilePath(const FilePath& path, FilePath* real_path) {
+  AssertBlockingAllowed();
+  FilePath mapped_file;
+  if (!NormalizeToNativeFilePath(path, &mapped_file))
+    return false;
+  // NormalizeToNativeFilePath() will return a path that starts with
+  // "\Device\Harddisk...".  Helper DevicePathToDriveLetterPath()
+  // will find a drive letter which maps to the path's device, so
+  // that we return a path starting with a drive letter.
+  return DevicePathToDriveLetterPath(mapped_file, real_path);
+}
+
+bool DevicePathToDriveLetterPath(const FilePath& nt_device_path,
+                                 FilePath* out_drive_letter_path) {
+  AssertBlockingAllowed();
+
+  // Get the mapping of drive letters to device paths.
+  const int kDriveMappingSize = 1024;
+  wchar_t drive_mapping[kDriveMappingSize] = {'\0'};
+  if (!::GetLogicalDriveStrings(kDriveMappingSize - 1, drive_mapping)) {
+    DLOG(ERROR) << "Failed to get drive mapping.";
+    return false;
+  }
+
+  // The drive mapping is a sequence of null terminated strings.
+  // The last string is empty.
+  wchar_t* drive_map_ptr = drive_mapping;
+  wchar_t device_path_as_string[MAX_PATH];
+  wchar_t drive[] = L" :";
+
+  // For each string in the drive mapping, get the junction that links
+  // to it.  If that junction is a prefix of |device_path|, then we
+  // know that |drive| is the real path prefix.
+  while (*drive_map_ptr) {
+    drive[0] = drive_map_ptr[0];  // Copy the drive letter.
+
+    if (QueryDosDevice(drive, device_path_as_string, MAX_PATH)) {
+      FilePath device_path(device_path_as_string);
+      if (device_path == nt_device_path ||
+          device_path.IsParent(nt_device_path)) {
+        *out_drive_letter_path = FilePath(drive +
+            nt_device_path.value().substr(wcslen(device_path_as_string)));
+        return true;
+      }
+    }
+    // Move to the next drive letter string, which starts one
+    // increment after the '\0' that terminates the current string.
+    while (*drive_map_ptr++) {}
+  }
+
+  // No drive matched.  The path does not start with a device junction
+  // that is mounted as a drive letter.  This means there is no drive
+  // letter path to the volume that holds |device_path|, so fail.
+  return false;
+}
+
+bool NormalizeToNativeFilePath(const FilePath& path, FilePath* nt_path) {
+  AssertBlockingAllowed();
+  // In Vista, GetFinalPathNameByHandle() would give us the real path
+  // from a file handle.  If we ever deprecate XP, consider changing the
+  // code below to a call to GetFinalPathNameByHandle().  The method this
+  // function uses is explained in the following msdn article:
+  // http://msdn.microsoft.com/en-us/library/aa366789(VS.85).aspx
+  win::ScopedHandle file_handle(
+      ::CreateFile(path.value().c_str(),
+                   GENERIC_READ,
+                   kFileShareAll,
+                   NULL,
+                   OPEN_EXISTING,
+                   FILE_ATTRIBUTE_NORMAL,
+                   NULL));
+  if (!file_handle.IsValid())
+    return false;
+
+  // Create a file mapping object.  Can't easily use MemoryMappedFile, because
+  // we only map the first byte, and need direct access to the handle. You can
+  // not map an empty file, this call fails in that case.
+  win::ScopedHandle file_map_handle(
+      ::CreateFileMapping(file_handle.Get(),
+                          NULL,
+                          PAGE_READONLY,
+                          0,
+                          1,  // Just one byte.  No need to look at the data.
+                          NULL));
+  if (!file_map_handle.IsValid())
+    return false;
+
+  // Use a view of the file to get the path to the file.
+  void* file_view = MapViewOfFile(file_map_handle.Get(),
+                                  FILE_MAP_READ, 0, 0, 1);
+  if (!file_view)
+    return false;
+
+  // The expansion of |path| into a full path may make it longer.
+  // GetMappedFileName() will fail if the result is longer than MAX_PATH.
+  // Pad a bit to be safe.  If kMaxPathLength is ever changed to be less
+  // than MAX_PATH, it would be nessisary to test that GetMappedFileName()
+  // not return kMaxPathLength.  This would mean that only part of the
+  // path fit in |mapped_file_path|.
+  const int kMaxPathLength = MAX_PATH + 10;
+  wchar_t mapped_file_path[kMaxPathLength];
+  bool success = false;
+  HANDLE cp = GetCurrentProcess();
+  if (::GetMappedFileNameW(cp, file_view, mapped_file_path, kMaxPathLength)) {
+    *nt_path = FilePath(mapped_file_path);
+    success = true;
+  }
+  ::UnmapViewOfFile(file_view);
+  return success;
+}
+
+// TODO(rkc): Work out if we want to handle NTFS junctions here or not, handle
+// them if we do decide to.
+bool IsLink(const FilePath& file_path) {
+  return false;
+}
+
+bool GetFileInfo(const FilePath& file_path, File::Info* results) {
+  AssertBlockingAllowed();
+
+  WIN32_FILE_ATTRIBUTE_DATA attr;
+  if (!GetFileAttributesEx(file_path.value().c_str(),
+                           GetFileExInfoStandard, &attr)) {
+    return false;
+  }
+
+  ULARGE_INTEGER size;
+  size.HighPart = attr.nFileSizeHigh;
+  size.LowPart = attr.nFileSizeLow;
+  results->size = size.QuadPart;
+
+  results->is_directory =
+      (attr.dwFileAttributes & FILE_ATTRIBUTE_DIRECTORY) != 0;
+  results->last_modified = Time::FromFileTime(attr.ftLastWriteTime);
+  results->last_accessed = Time::FromFileTime(attr.ftLastAccessTime);
+  results->creation_time = Time::FromFileTime(attr.ftCreationTime);
+
+  return true;
+}
+
+FILE* OpenFile(const FilePath& filename, const char* mode) {
+  // 'N' is unconditionally added below, so be sure there is not one already
+  // present before a comma in |mode|.
+  DCHECK(
+      strchr(mode, 'N') == nullptr ||
+      (strchr(mode, ',') != nullptr && strchr(mode, 'N') > strchr(mode, ',')));
+  AssertBlockingAllowed();
+  string16 w_mode = ASCIIToUTF16(mode);
+  AppendModeCharacter(L'N', &w_mode);
+  return _wfsopen(filename.value().c_str(), w_mode.c_str(), _SH_DENYNO);
+}
+
+FILE* FileToFILE(File file, const char* mode) {
+  if (!file.IsValid())
+    return NULL;
+  int fd =
+      _open_osfhandle(reinterpret_cast<intptr_t>(file.GetPlatformFile()), 0);
+  if (fd < 0)
+    return NULL;
+  file.TakePlatformFile();
+  FILE* stream = _fdopen(fd, mode);
+  if (!stream)
+    _close(fd);
+  return stream;
+}
+
+int ReadFile(const FilePath& filename, char* data, int max_size) {
+  AssertBlockingAllowed();
+  win::ScopedHandle file(CreateFile(filename.value().c_str(),
+                                    GENERIC_READ,
+                                    FILE_SHARE_READ | FILE_SHARE_WRITE,
+                                    NULL,
+                                    OPEN_EXISTING,
+                                    FILE_FLAG_SEQUENTIAL_SCAN,
+                                    NULL));
+  if (!file.IsValid())
+    return -1;
+
+  DWORD read;
+  if (::ReadFile(file.Get(), data, max_size, &read, NULL))
+    return read;
+
+  return -1;
+}
+
+int WriteFile(const FilePath& filename, const char* data, int size) {
+  AssertBlockingAllowed();
+  win::ScopedHandle file(CreateFile(filename.value().c_str(), GENERIC_WRITE, 0,
+                                    NULL, CREATE_ALWAYS, FILE_ATTRIBUTE_NORMAL,
+                                    NULL));
+  if (!file.IsValid()) {
+    DPLOG(WARNING) << "CreateFile failed for path "
+                   << UTF16ToUTF8(filename.value());
+    return -1;
+  }
+
+  DWORD written;
+  BOOL result = ::WriteFile(file.Get(), data, size, &written, NULL);
+  if (result && static_cast<int>(written) == size)
+    return written;
+
+  if (!result) {
+    // WriteFile failed.
+    DPLOG(WARNING) << "writing file " << UTF16ToUTF8(filename.value())
+                   << " failed";
+  } else {
+    // Didn't write all the bytes.
+    DLOG(WARNING) << "wrote" << written << " bytes to "
+                  << UTF16ToUTF8(filename.value()) << " expected " << size;
+  }
+  return -1;
+}
+
+bool AppendToFile(const FilePath& filename, const char* data, int size) {
+  AssertBlockingAllowed();
+  win::ScopedHandle file(CreateFile(filename.value().c_str(),
+                                    FILE_APPEND_DATA,
+                                    0,
+                                    NULL,
+                                    OPEN_EXISTING,
+                                    0,
+                                    NULL));
+  if (!file.IsValid()) {
+    VPLOG(1) << "CreateFile failed for path " << UTF16ToUTF8(filename.value());
+    return false;
+  }
+
+  DWORD written;
+  BOOL result = ::WriteFile(file.Get(), data, size, &written, NULL);
+  if (result && static_cast<int>(written) == size)
+    return true;
+
+  if (!result) {
+    // WriteFile failed.
+    VPLOG(1) << "Writing file " << UTF16ToUTF8(filename.value()) << " failed";
+  } else {
+    // Didn't write all the bytes.
+    VPLOG(1) << "Only wrote " << written << " out of " << size << " byte(s) to "
+             << UTF16ToUTF8(filename.value());
+  }
+  return false;
+}
+
+bool GetCurrentDirectory(FilePath* dir) {
+  AssertBlockingAllowed();
+
+  wchar_t system_buffer[MAX_PATH];
+  system_buffer[0] = 0;
+  DWORD len = ::GetCurrentDirectory(MAX_PATH, system_buffer);
+  if (len == 0 || len > MAX_PATH)
+    return false;
+  // TODO(evanm): the old behavior of this function was to always strip the
+  // trailing slash.  We duplicate this here, but it shouldn't be necessary
+  // when everyone is using the appropriate FilePath APIs.
+  std::wstring dir_str(system_buffer);
+  *dir = FilePath(dir_str).StripTrailingSeparators();
+  return true;
+}
+
+bool SetCurrentDirectory(const FilePath& directory) {
+  AssertBlockingAllowed();
+  return ::SetCurrentDirectory(directory.value().c_str()) != 0;
+}
+
+int GetMaximumPathComponentLength(const FilePath& path) {
+  AssertBlockingAllowed();
+
+  wchar_t volume_path[MAX_PATH];
+  if (!GetVolumePathNameW(path.NormalizePathSeparators().value().c_str(),
+                          volume_path,
+                          arraysize(volume_path))) {
+    return -1;
+  }
+
+  DWORD max_length = 0;
+  if (!GetVolumeInformationW(volume_path, NULL, 0, NULL, &max_length, NULL,
+                             NULL, 0)) {
+    return -1;
+  }
+
+  // Length of |path| with path separator appended.
+  size_t prefix = path.StripTrailingSeparators().value().size() + 1;
+  // The whole path string must be shorter than MAX_PATH. That is, it must be
+  // prefix + component_length < MAX_PATH (or equivalently, <= MAX_PATH - 1).
+  int whole_path_limit = std::max(0, MAX_PATH - 1 - static_cast<int>(prefix));
+  return std::min(whole_path_limit, static_cast<int>(max_length));
+}
+
+bool CopyFile(const FilePath& from_path, const FilePath& to_path) {
+  return DoCopyFile(from_path, to_path, false);
+}
+
+bool SetNonBlocking(int fd) {
+  unsigned long nonblocking = 1;
+  if (ioctlsocket(fd, FIONBIO, &nonblocking) == 0)
+    return true;
+  return false;
+}
+
+// -----------------------------------------------------------------------------
+
+namespace internal {
+
+bool MoveUnsafe(const FilePath& from_path, const FilePath& to_path) {
+  AssertBlockingAllowed();
+
+  // NOTE: I suspect we could support longer paths, but that would involve
+  // analyzing all our usage of files.
+  if (from_path.value().length() >= MAX_PATH ||
+      to_path.value().length() >= MAX_PATH) {
+    return false;
+  }
+  if (MoveFileEx(from_path.value().c_str(), to_path.value().c_str(),
+                 MOVEFILE_COPY_ALLOWED | MOVEFILE_REPLACE_EXISTING) != 0)
+    return true;
+
+  // Keep the last error value from MoveFileEx around in case the below
+  // fails.
+  bool ret = false;
+  DWORD last_error = ::GetLastError();
+
+  if (DirectoryExists(from_path)) {
+    // MoveFileEx fails if moving directory across volumes. We will simulate
+    // the move by using Copy and Delete. Ideally we could check whether
+    // from_path and to_path are indeed in different volumes.
+    ret = internal::CopyAndDeleteDirectory(from_path, to_path);
+  }
+
+  if (!ret) {
+    // Leave a clue about what went wrong so that it can be (at least) picked
+    // up by a PLOG entry.
+    ::SetLastError(last_error);
+  }
+
+  return ret;
+}
+
+bool CopyAndDeleteDirectory(const FilePath& from_path,
+                            const FilePath& to_path) {
+  AssertBlockingAllowed();
+  if (CopyDirectory(from_path, to_path, true)) {
+    if (DeleteFile(from_path, true))
+      return true;
+
+    // Like Move, this function is not transactional, so we just
+    // leave the copied bits behind if deleting from_path fails.
+    // If to_path exists previously then we have already overwritten
+    // it by now, we don't get better off by deleting the new bits.
+  }
+  return false;
+}
+
+}  // namespace internal
+}  // namespace base
diff --git a/base/files/file_win.cc b/base/files/file_win.cc
new file mode 100644
index 0000000..d7bffc3
--- /dev/null
+++ b/base/files/file_win.cc
@@ -0,0 +1,429 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/files/file.h"
+
+#include <io.h>
+#include <stdint.h>
+
+#include "base/logging.h"
+#include "base/metrics/histogram_functions.h"
+#include "base/threading/thread_restrictions.h"
+
+#include <windows.h>
+
+namespace base {
+
+// Make sure our Whence mappings match the system headers.
+static_assert(File::FROM_BEGIN == FILE_BEGIN &&
+                  File::FROM_CURRENT == FILE_CURRENT &&
+                  File::FROM_END == FILE_END,
+              "whence mapping must match the system headers");
+
+bool File::IsValid() const {
+  return file_.IsValid();
+}
+
+PlatformFile File::GetPlatformFile() const {
+  return file_.Get();
+}
+
+PlatformFile File::TakePlatformFile() {
+  return file_.Take();
+}
+
+void File::Close() {
+  if (!file_.IsValid())
+    return;
+
+  AssertBlockingAllowed();
+  SCOPED_FILE_TRACE("Close");
+  file_.Close();
+}
+
+int64_t File::Seek(Whence whence, int64_t offset) {
+  AssertBlockingAllowed();
+  DCHECK(IsValid());
+
+  SCOPED_FILE_TRACE_WITH_SIZE("Seek", offset);
+
+  LARGE_INTEGER distance, res;
+  distance.QuadPart = offset;
+  DWORD move_method = static_cast<DWORD>(whence);
+  if (!SetFilePointerEx(file_.Get(), distance, &res, move_method))
+    return -1;
+  return res.QuadPart;
+}
+
+int File::Read(int64_t offset, char* data, int size) {
+  AssertBlockingAllowed();
+  DCHECK(IsValid());
+  DCHECK(!async_);
+  if (size < 0)
+    return -1;
+
+  SCOPED_FILE_TRACE_WITH_SIZE("Read", size);
+
+  LARGE_INTEGER offset_li;
+  offset_li.QuadPart = offset;
+
+  OVERLAPPED overlapped = {0};
+  overlapped.Offset = offset_li.LowPart;
+  overlapped.OffsetHigh = offset_li.HighPart;
+
+  DWORD bytes_read;
+  if (::ReadFile(file_.Get(), data, size, &bytes_read, &overlapped))
+    return bytes_read;
+  if (ERROR_HANDLE_EOF == GetLastError())
+    return 0;
+
+  return -1;
+}
+
+int File::ReadAtCurrentPos(char* data, int size) {
+  AssertBlockingAllowed();
+  DCHECK(IsValid());
+  DCHECK(!async_);
+  if (size < 0)
+    return -1;
+
+  SCOPED_FILE_TRACE_WITH_SIZE("ReadAtCurrentPos", size);
+
+  DWORD bytes_read;
+  if (::ReadFile(file_.Get(), data, size, &bytes_read, NULL))
+    return bytes_read;
+  if (ERROR_HANDLE_EOF == GetLastError())
+    return 0;
+
+  return -1;
+}
+
+int File::ReadNoBestEffort(int64_t offset, char* data, int size) {
+  // TODO(dbeam): trace this separately?
+  return Read(offset, data, size);
+}
+
+int File::ReadAtCurrentPosNoBestEffort(char* data, int size) {
+  // TODO(dbeam): trace this separately?
+  return ReadAtCurrentPos(data, size);
+}
+
+int File::Write(int64_t offset, const char* data, int size) {
+  AssertBlockingAllowed();
+  DCHECK(IsValid());
+  DCHECK(!async_);
+
+  SCOPED_FILE_TRACE_WITH_SIZE("Write", size);
+
+  LARGE_INTEGER offset_li;
+  offset_li.QuadPart = offset;
+
+  OVERLAPPED overlapped = {0};
+  overlapped.Offset = offset_li.LowPart;
+  overlapped.OffsetHigh = offset_li.HighPart;
+
+  DWORD bytes_written;
+  if (::WriteFile(file_.Get(), data, size, &bytes_written, &overlapped))
+    return bytes_written;
+
+  return -1;
+}
+
+int File::WriteAtCurrentPos(const char* data, int size) {
+  AssertBlockingAllowed();
+  DCHECK(IsValid());
+  DCHECK(!async_);
+  if (size < 0)
+    return -1;
+
+  SCOPED_FILE_TRACE_WITH_SIZE("WriteAtCurrentPos", size);
+
+  DWORD bytes_written;
+  if (::WriteFile(file_.Get(), data, size, &bytes_written, NULL))
+    return bytes_written;
+
+  return -1;
+}
+
+int File::WriteAtCurrentPosNoBestEffort(const char* data, int size) {
+  return WriteAtCurrentPos(data, size);
+}
+
+int64_t File::GetLength() {
+  AssertBlockingAllowed();
+  DCHECK(IsValid());
+
+  SCOPED_FILE_TRACE("GetLength");
+
+  LARGE_INTEGER size;
+  if (!::GetFileSizeEx(file_.Get(), &size))
+    return -1;
+
+  return static_cast<int64_t>(size.QuadPart);
+}
+
+bool File::SetLength(int64_t length) {
+  AssertBlockingAllowed();
+  DCHECK(IsValid());
+
+  SCOPED_FILE_TRACE_WITH_SIZE("SetLength", length);
+
+  // Get the current file pointer.
+  LARGE_INTEGER file_pointer;
+  LARGE_INTEGER zero;
+  zero.QuadPart = 0;
+  if (!::SetFilePointerEx(file_.Get(), zero, &file_pointer, FILE_CURRENT))
+    return false;
+
+  LARGE_INTEGER length_li;
+  length_li.QuadPart = length;
+  // If length > file size, SetFilePointerEx() should extend the file
+  // with zeroes on all Windows standard file systems (NTFS, FATxx).
+  if (!::SetFilePointerEx(file_.Get(), length_li, NULL, FILE_BEGIN))
+    return false;
+
+  // Set the new file length and move the file pointer to its old position.
+  // This is consistent with ftruncate()'s behavior, even when the file
+  // pointer points to a location beyond the end of the file.
+  // TODO(rvargas): Emulating ftruncate details seem suspicious and it is not
+  // promised by the interface (nor was promised by PlatformFile). See if this
+  // implementation detail can be removed.
+  return ((::SetEndOfFile(file_.Get()) != FALSE) &&
+          (::SetFilePointerEx(file_.Get(), file_pointer, NULL, FILE_BEGIN) !=
+           FALSE));
+}
+
+bool File::SetTimes(Time last_access_time, Time last_modified_time) {
+  AssertBlockingAllowed();
+  DCHECK(IsValid());
+
+  SCOPED_FILE_TRACE("SetTimes");
+
+  FILETIME last_access_filetime = last_access_time.ToFileTime();
+  FILETIME last_modified_filetime = last_modified_time.ToFileTime();
+  return (::SetFileTime(file_.Get(), NULL, &last_access_filetime,
+                        &last_modified_filetime) != FALSE);
+}
+
+bool File::GetInfo(Info* info) {
+  AssertBlockingAllowed();
+  DCHECK(IsValid());
+
+  SCOPED_FILE_TRACE("GetInfo");
+
+  BY_HANDLE_FILE_INFORMATION file_info;
+  if (!GetFileInformationByHandle(file_.Get(), &file_info))
+    return false;
+
+  LARGE_INTEGER size;
+  size.HighPart = file_info.nFileSizeHigh;
+  size.LowPart = file_info.nFileSizeLow;
+  info->size = size.QuadPart;
+  info->is_directory =
+      (file_info.dwFileAttributes & FILE_ATTRIBUTE_DIRECTORY) != 0;
+  info->is_symbolic_link = false;  // Windows doesn't have symbolic links.
+  info->last_modified = Time::FromFileTime(file_info.ftLastWriteTime);
+  info->last_accessed = Time::FromFileTime(file_info.ftLastAccessTime);
+  info->creation_time = Time::FromFileTime(file_info.ftCreationTime);
+  return true;
+}
+
+File::Error File::Lock() {
+  DCHECK(IsValid());
+
+  SCOPED_FILE_TRACE("Lock");
+
+  BOOL result = LockFile(file_.Get(), 0, 0, MAXDWORD, MAXDWORD);
+  if (!result)
+    return GetLastFileError();
+  return FILE_OK;
+}
+
+File::Error File::Unlock() {
+  DCHECK(IsValid());
+
+  SCOPED_FILE_TRACE("Unlock");
+
+  BOOL result = UnlockFile(file_.Get(), 0, 0, MAXDWORD, MAXDWORD);
+  if (!result)
+    return GetLastFileError();
+  return FILE_OK;
+}
+
+File File::Duplicate() const {
+  if (!IsValid())
+    return File();
+
+  SCOPED_FILE_TRACE("Duplicate");
+
+  HANDLE other_handle = nullptr;
+
+  if (!::DuplicateHandle(GetCurrentProcess(),  // hSourceProcessHandle
+                         GetPlatformFile(),
+                         GetCurrentProcess(),  // hTargetProcessHandle
+                         &other_handle,
+                         0,  // dwDesiredAccess ignored due to SAME_ACCESS
+                         FALSE,  // !bInheritHandle
+                         DUPLICATE_SAME_ACCESS)) {
+    return File(GetLastFileError());
+  }
+
+  File other(other_handle);
+  if (async())
+    other.async_ = true;
+  return other;
+}
+
+bool File::DeleteOnClose(bool delete_on_close) {
+  FILE_DISPOSITION_INFO disposition = {delete_on_close ? TRUE : FALSE};
+  return ::SetFileInformationByHandle(GetPlatformFile(), FileDispositionInfo,
+                                      &disposition, sizeof(disposition)) != 0;
+}
+
+// Static.
+File::Error File::OSErrorToFileError(DWORD last_error) {
+  switch (last_error) {
+    case ERROR_SHARING_VIOLATION:
+      return FILE_ERROR_IN_USE;
+    case ERROR_ALREADY_EXISTS:
+    case ERROR_FILE_EXISTS:
+      return FILE_ERROR_EXISTS;
+    case ERROR_FILE_NOT_FOUND:
+    case ERROR_PATH_NOT_FOUND:
+      return FILE_ERROR_NOT_FOUND;
+    case ERROR_ACCESS_DENIED:
+      return FILE_ERROR_ACCESS_DENIED;
+    case ERROR_TOO_MANY_OPEN_FILES:
+      return FILE_ERROR_TOO_MANY_OPENED;
+    case ERROR_OUTOFMEMORY:
+    case ERROR_NOT_ENOUGH_MEMORY:
+      return FILE_ERROR_NO_MEMORY;
+    case ERROR_HANDLE_DISK_FULL:
+    case ERROR_DISK_FULL:
+    case ERROR_DISK_RESOURCES_EXHAUSTED:
+      return FILE_ERROR_NO_SPACE;
+    case ERROR_USER_MAPPED_FILE:
+      return FILE_ERROR_INVALID_OPERATION;
+    case ERROR_NOT_READY:
+    case ERROR_SECTOR_NOT_FOUND:
+    case ERROR_DEV_NOT_EXIST:
+    case ERROR_IO_DEVICE:
+    case ERROR_FILE_CORRUPT:
+    case ERROR_DISK_CORRUPT:
+      return FILE_ERROR_IO;
+    default:
+      UmaHistogramSparse("PlatformFile.UnknownErrors.Windows", last_error);
+      // This function should only be called for errors.
+      DCHECK_NE(static_cast<DWORD>(ERROR_SUCCESS), last_error);
+      return FILE_ERROR_FAILED;
+  }
+}
+
+void File::DoInitialize(const FilePath& path, uint32_t flags) {
+  AssertBlockingAllowed();
+  DCHECK(!IsValid());
+
+  DWORD disposition = 0;
+
+  if (flags & FLAG_OPEN)
+    disposition = OPEN_EXISTING;
+
+  if (flags & FLAG_CREATE) {
+    DCHECK(!disposition);
+    disposition = CREATE_NEW;
+  }
+
+  if (flags & FLAG_OPEN_ALWAYS) {
+    DCHECK(!disposition);
+    disposition = OPEN_ALWAYS;
+  }
+
+  if (flags & FLAG_CREATE_ALWAYS) {
+    DCHECK(!disposition);
+    DCHECK(flags & FLAG_WRITE);
+    disposition = CREATE_ALWAYS;
+  }
+
+  if (flags & FLAG_OPEN_TRUNCATED) {
+    DCHECK(!disposition);
+    DCHECK(flags & FLAG_WRITE);
+    disposition = TRUNCATE_EXISTING;
+  }
+
+  if (!disposition) {
+    ::SetLastError(ERROR_INVALID_PARAMETER);
+    error_details_ = FILE_ERROR_FAILED;
+    NOTREACHED();
+    return;
+  }
+
+  DWORD access = 0;
+  if (flags & FLAG_WRITE)
+    access = GENERIC_WRITE;
+  if (flags & FLAG_APPEND) {
+    DCHECK(!access);
+    access = FILE_APPEND_DATA;
+  }
+  if (flags & FLAG_READ)
+    access |= GENERIC_READ;
+  if (flags & FLAG_WRITE_ATTRIBUTES)
+    access |= FILE_WRITE_ATTRIBUTES;
+  if (flags & FLAG_EXECUTE)
+    access |= GENERIC_EXECUTE;
+  if (flags & FLAG_CAN_DELETE_ON_CLOSE)
+    access |= DELETE;
+
+  DWORD sharing = (flags & FLAG_EXCLUSIVE_READ) ? 0 : FILE_SHARE_READ;
+  if (!(flags & FLAG_EXCLUSIVE_WRITE))
+    sharing |= FILE_SHARE_WRITE;
+  if (flags & FLAG_SHARE_DELETE)
+    sharing |= FILE_SHARE_DELETE;
+
+  DWORD create_flags = 0;
+  if (flags & FLAG_ASYNC)
+    create_flags |= FILE_FLAG_OVERLAPPED;
+  if (flags & FLAG_TEMPORARY)
+    create_flags |= FILE_ATTRIBUTE_TEMPORARY;
+  if (flags & FLAG_HIDDEN)
+    create_flags |= FILE_ATTRIBUTE_HIDDEN;
+  if (flags & FLAG_DELETE_ON_CLOSE)
+    create_flags |= FILE_FLAG_DELETE_ON_CLOSE;
+  if (flags & FLAG_BACKUP_SEMANTICS)
+    create_flags |= FILE_FLAG_BACKUP_SEMANTICS;
+  if (flags & FLAG_SEQUENTIAL_SCAN)
+    create_flags |= FILE_FLAG_SEQUENTIAL_SCAN;
+
+  file_.Set(CreateFile(path.value().c_str(), access, sharing, NULL,
+                       disposition, create_flags, NULL));
+
+  if (file_.IsValid()) {
+    error_details_ = FILE_OK;
+    async_ = ((flags & FLAG_ASYNC) == FLAG_ASYNC);
+
+    if (flags & (FLAG_OPEN_ALWAYS))
+      created_ = (ERROR_ALREADY_EXISTS != GetLastError());
+    else if (flags & (FLAG_CREATE_ALWAYS | FLAG_CREATE))
+      created_ = true;
+  } else {
+    error_details_ = GetLastFileError();
+  }
+}
+
+bool File::Flush() {
+  AssertBlockingAllowed();
+  DCHECK(IsValid());
+  SCOPED_FILE_TRACE("Flush");
+  return ::FlushFileBuffers(file_.Get()) != FALSE;
+}
+
+void File::SetPlatformFile(PlatformFile file) {
+  file_.Set(file);
+}
+
+// static
+File::Error File::GetLastFileError() {
+  return File::OSErrorToFileError(GetLastError());
+}
+
+}  // namespace base
diff --git a/base/files/important_file_writer.cc b/base/files/important_file_writer.cc
new file mode 100644
index 0000000..235bb8d
--- /dev/null
+++ b/base/files/important_file_writer.cc
@@ -0,0 +1,315 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/files/important_file_writer.h"
+
+#include <stddef.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <string>
+#include <utility>
+
+#include "base/bind.h"
+#include "base/callback_helpers.h"
+#include "base/critical_closure.h"
+#include "base/debug/alias.h"
+#include "base/files/file.h"
+#include "base/files/file_path.h"
+#include "base/files/file_util.h"
+#include "base/logging.h"
+#include "base/macros.h"
+#include "base/metrics/histogram_functions.h"
+#include "base/metrics/histogram_macros.h"
+#include "base/numerics/safe_conversions.h"
+#include "base/strings/string_number_conversions.h"
+#include "base/strings/string_util.h"
+#include "base/task_runner.h"
+#include "base/task_runner_util.h"
+#include "base/threading/thread.h"
+#include "base/time/time.h"
+#include "build/build_config.h"
+
+namespace base {
+
+namespace {
+
+constexpr auto kDefaultCommitInterval = TimeDelta::FromSeconds(10);
+
+// This enum is used to define the buckets for an enumerated UMA histogram.
+// Hence,
+//   (a) existing enumerated constants should never be deleted or reordered, and
+//   (b) new constants should only be appended at the end of the enumeration.
+enum TempFileFailure {
+  FAILED_CREATING,
+  FAILED_OPENING,
+  FAILED_CLOSING,  // Unused.
+  FAILED_WRITING,
+  FAILED_RENAMING,
+  FAILED_FLUSHING,
+  TEMP_FILE_FAILURE_MAX
+};
+
+// Helper function to write samples to a histogram with a dynamically assigned
+// histogram name.  Works with different error code types convertible to int
+// which is the actual argument type of UmaHistogramExactLinear.
+template <typename SampleType>
+void UmaHistogramExactLinearWithSuffix(const char* histogram_name,
+                                       StringPiece histogram_suffix,
+                                       SampleType add_sample,
+                                       SampleType max_sample) {
+  static_assert(std::is_convertible<SampleType, int>::value,
+                "SampleType should be convertible to int");
+  DCHECK(histogram_name);
+  std::string histogram_full_name(histogram_name);
+  if (!histogram_suffix.empty()) {
+    histogram_full_name.append(".");
+    histogram_full_name.append(histogram_suffix.data(),
+                               histogram_suffix.length());
+  }
+  UmaHistogramExactLinear(histogram_full_name, static_cast<int>(add_sample),
+                          static_cast<int>(max_sample));
+}
+
+// Helper function to write samples to a histogram with a dynamically assigned
+// histogram name.  Works with short timings from 1 ms up to 10 seconds (50
+// buckets) which is the actual argument type of UmaHistogramTimes.
+void UmaHistogramTimesWithSuffix(const char* histogram_name,
+                                 StringPiece histogram_suffix,
+                                 TimeDelta sample) {
+  DCHECK(histogram_name);
+  std::string histogram_full_name(histogram_name);
+  if (!histogram_suffix.empty()) {
+    histogram_full_name.append(".");
+    histogram_full_name.append(histogram_suffix.data(),
+                               histogram_suffix.length());
+  }
+  UmaHistogramTimes(histogram_full_name, sample);
+}
+
+void LogFailure(const FilePath& path,
+                StringPiece histogram_suffix,
+                TempFileFailure failure_code,
+                StringPiece message) {
+  UmaHistogramExactLinearWithSuffix("ImportantFile.TempFileFailures",
+                                    histogram_suffix, failure_code,
+                                    TEMP_FILE_FAILURE_MAX);
+  DPLOG(WARNING) << "temp file failure: " << path.value() << " : " << message;
+}
+
+// Helper function to call WriteFileAtomically() with a
+// std::unique_ptr<std::string>.
+void WriteScopedStringToFileAtomically(
+    const FilePath& path,
+    std::unique_ptr<std::string> data,
+    Closure before_write_callback,
+    Callback<void(bool success)> after_write_callback,
+    const std::string& histogram_suffix) {
+  if (!before_write_callback.is_null())
+    before_write_callback.Run();
+
+  TimeTicks start_time = TimeTicks::Now();
+  bool result =
+      ImportantFileWriter::WriteFileAtomically(path, *data, histogram_suffix);
+  if (result) {
+    UmaHistogramTimesWithSuffix("ImportantFile.TimeToWrite", histogram_suffix,
+                                TimeTicks::Now() - start_time);
+  }
+
+  if (!after_write_callback.is_null())
+    after_write_callback.Run(result);
+}
+
+void DeleteTmpFile(const FilePath& tmp_file_path,
+                   StringPiece histogram_suffix) {
+  if (!DeleteFile(tmp_file_path, false)) {
+    UmaHistogramExactLinearWithSuffix(
+        "ImportantFile.FileDeleteError", histogram_suffix,
+        -base::File::GetLastFileError(), -base::File::FILE_ERROR_MAX);
+  }
+}
+
+}  // namespace
+
+// static
+bool ImportantFileWriter::WriteFileAtomically(const FilePath& path,
+                                              StringPiece data,
+                                              StringPiece histogram_suffix) {
+#if defined(OS_CHROMEOS)
+  // On Chrome OS, chrome gets killed when it cannot finish shutdown quickly,
+  // and this function seems to be one of the slowest shutdown steps.
+  // Include some info to the report for investigation. crbug.com/418627
+  // TODO(hashimoto): Remove this.
+  struct {
+    size_t data_size;
+    char path[128];
+  } file_info;
+  file_info.data_size = data.size();
+  strlcpy(file_info.path, path.value().c_str(), arraysize(file_info.path));
+  debug::Alias(&file_info);
+#endif
+
+  // Write the data to a temp file then rename to avoid data loss if we crash
+  // while writing the file. Ensure that the temp file is on the same volume
+  // as target file, so it can be moved in one step, and that the temp file
+  // is securely created.
+  FilePath tmp_file_path;
+  if (!CreateTemporaryFileInDir(path.DirName(), &tmp_file_path)) {
+    UmaHistogramExactLinearWithSuffix(
+        "ImportantFile.FileCreateError", histogram_suffix,
+        -base::File::GetLastFileError(), -base::File::FILE_ERROR_MAX);
+    LogFailure(path, histogram_suffix, FAILED_CREATING,
+               "could not create temporary file");
+    return false;
+  }
+
+  File tmp_file(tmp_file_path, File::FLAG_OPEN | File::FLAG_WRITE);
+  if (!tmp_file.IsValid()) {
+    UmaHistogramExactLinearWithSuffix(
+        "ImportantFile.FileOpenError", histogram_suffix,
+        -tmp_file.error_details(), -base::File::FILE_ERROR_MAX);
+    LogFailure(path, histogram_suffix, FAILED_OPENING,
+               "could not open temporary file");
+    DeleteFile(tmp_file_path, false);
+    return false;
+  }
+
+  // If this fails in the wild, something really bad is going on.
+  const int data_length = checked_cast<int32_t>(data.length());
+  int bytes_written = tmp_file.Write(0, data.data(), data_length);
+  if (bytes_written < data_length) {
+    UmaHistogramExactLinearWithSuffix(
+        "ImportantFile.FileWriteError", histogram_suffix,
+        -base::File::GetLastFileError(), -base::File::FILE_ERROR_MAX);
+  }
+  bool flush_success = tmp_file.Flush();
+  tmp_file.Close();
+
+  if (bytes_written < data_length) {
+    LogFailure(path, histogram_suffix, FAILED_WRITING,
+               "error writing, bytes_written=" + IntToString(bytes_written));
+    DeleteTmpFile(tmp_file_path, histogram_suffix);
+    return false;
+  }
+
+  if (!flush_success) {
+    LogFailure(path, histogram_suffix, FAILED_FLUSHING, "error flushing");
+    DeleteTmpFile(tmp_file_path, histogram_suffix);
+    return false;
+  }
+
+  base::File::Error replace_file_error = base::File::FILE_OK;
+  if (!ReplaceFile(tmp_file_path, path, &replace_file_error)) {
+    UmaHistogramExactLinearWithSuffix("ImportantFile.FileRenameError",
+                                      histogram_suffix, -replace_file_error,
+                                      -base::File::FILE_ERROR_MAX);
+    LogFailure(path, histogram_suffix, FAILED_RENAMING,
+               "could not rename temporary file");
+    DeleteTmpFile(tmp_file_path, histogram_suffix);
+    return false;
+  }
+
+  return true;
+}
+
+ImportantFileWriter::ImportantFileWriter(
+    const FilePath& path,
+    scoped_refptr<SequencedTaskRunner> task_runner,
+    const char* histogram_suffix)
+    : ImportantFileWriter(path,
+                          std::move(task_runner),
+                          kDefaultCommitInterval,
+                          histogram_suffix) {}
+
+ImportantFileWriter::ImportantFileWriter(
+    const FilePath& path,
+    scoped_refptr<SequencedTaskRunner> task_runner,
+    TimeDelta interval,
+    const char* histogram_suffix)
+    : path_(path),
+      task_runner_(std::move(task_runner)),
+      serializer_(nullptr),
+      commit_interval_(interval),
+      histogram_suffix_(histogram_suffix ? histogram_suffix : ""),
+      weak_factory_(this) {
+  DCHECK(task_runner_);
+}
+
+ImportantFileWriter::~ImportantFileWriter() {
+  DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
+  // We're usually a member variable of some other object, which also tends
+  // to be our serializer. It may not be safe to call back to the parent object
+  // being destructed.
+  DCHECK(!HasPendingWrite());
+}
+
+bool ImportantFileWriter::HasPendingWrite() const {
+  DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
+  return timer().IsRunning();
+}
+
+void ImportantFileWriter::WriteNow(std::unique_ptr<std::string> data) {
+  DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
+  if (!IsValueInRangeForNumericType<int32_t>(data->length())) {
+    NOTREACHED();
+    return;
+  }
+
+  Closure task = AdaptCallbackForRepeating(
+      BindOnce(&WriteScopedStringToFileAtomically, path_, std::move(data),
+               std::move(before_next_write_callback_),
+               std::move(after_next_write_callback_), histogram_suffix_));
+
+  if (!task_runner_->PostTask(FROM_HERE, MakeCriticalClosure(task))) {
+    // Posting the task to background message loop is not expected
+    // to fail, but if it does, avoid losing data and just hit the disk
+    // on the current thread.
+    NOTREACHED();
+
+    task.Run();
+  }
+  ClearPendingWrite();
+}
+
+void ImportantFileWriter::ScheduleWrite(DataSerializer* serializer) {
+  DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
+
+  DCHECK(serializer);
+  serializer_ = serializer;
+
+  if (!timer().IsRunning()) {
+    timer().Start(
+        FROM_HERE, commit_interval_,
+        Bind(&ImportantFileWriter::DoScheduledWrite, Unretained(this)));
+  }
+}
+
+void ImportantFileWriter::DoScheduledWrite() {
+  DCHECK(serializer_);
+  std::unique_ptr<std::string> data(new std::string);
+  if (serializer_->SerializeData(data.get())) {
+    WriteNow(std::move(data));
+  } else {
+    DLOG(WARNING) << "failed to serialize data to be saved in "
+                  << path_.value();
+  }
+  ClearPendingWrite();
+}
+
+void ImportantFileWriter::RegisterOnNextWriteCallbacks(
+    const Closure& before_next_write_callback,
+    const Callback<void(bool success)>& after_next_write_callback) {
+  before_next_write_callback_ = before_next_write_callback;
+  after_next_write_callback_ = after_next_write_callback;
+}
+
+void ImportantFileWriter::ClearPendingWrite() {
+  timer().Stop();
+  serializer_ = nullptr;
+}
+
+void ImportantFileWriter::SetTimerForTesting(Timer* timer_override) {
+  timer_override_ = timer_override;
+}
+
+}  // namespace base
diff --git a/base/files/important_file_writer.h b/base/files/important_file_writer.h
new file mode 100644
index 0000000..08a7ee3
--- /dev/null
+++ b/base/files/important_file_writer.h
@@ -0,0 +1,162 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_FILES_IMPORTANT_FILE_WRITER_H_
+#define BASE_FILES_IMPORTANT_FILE_WRITER_H_
+
+#include <string>
+
+#include "base/base_export.h"
+#include "base/callback.h"
+#include "base/files/file_path.h"
+#include "base/macros.h"
+#include "base/memory/ref_counted.h"
+#include "base/sequence_checker.h"
+#include "base/strings/string_piece.h"
+#include "base/time/time.h"
+#include "base/timer/timer.h"
+
+namespace base {
+
+class SequencedTaskRunner;
+
+// Helper for atomically writing a file to ensure that it won't be corrupted by
+// *application* crash during write (implemented as create, flush, rename).
+//
+// As an added benefit, ImportantFileWriter makes it less likely that the file
+// is corrupted by *system* crash, though even if the ImportantFileWriter call
+// has already returned at the time of the crash it is not specified which
+// version of the file (old or new) is preserved. And depending on system
+// configuration (hardware and software) a significant likelihood of file
+// corruption may remain, thus using ImportantFileWriter is not a valid
+// substitute for file integrity checks and recovery codepaths for malformed
+// files.
+//
+// Also note that ImportantFileWriter can be *really* slow (cf. File::Flush()
+// for details) and thus please don't block shutdown on ImportantFileWriter.
+class BASE_EXPORT ImportantFileWriter {
+ public:
+  // Used by ScheduleSave to lazily provide the data to be saved. Allows us
+  // to also batch data serializations.
+  class BASE_EXPORT DataSerializer {
+   public:
+    // Should put serialized string in |data| and return true on successful
+    // serialization. Will be called on the same thread on which
+    // ImportantFileWriter has been created.
+    virtual bool SerializeData(std::string* data) = 0;
+
+   protected:
+    virtual ~DataSerializer() = default;
+  };
+
+  // Save |data| to |path| in an atomic manner. Blocks and writes data on the
+  // current thread. Does not guarantee file integrity across system crash (see
+  // the class comment above).
+  static bool WriteFileAtomically(const FilePath& path,
+                                  StringPiece data,
+                                  StringPiece histogram_suffix = StringPiece());
+
+  // Initialize the writer.
+  // |path| is the name of file to write.
+  // |task_runner| is the SequencedTaskRunner instance where on which we will
+  // execute file I/O operations.
+  // All non-const methods, ctor and dtor must be called on the same thread.
+  ImportantFileWriter(const FilePath& path,
+                      scoped_refptr<SequencedTaskRunner> task_runner,
+                      const char* histogram_suffix = nullptr);
+
+  // Same as above, but with a custom commit interval.
+  ImportantFileWriter(const FilePath& path,
+                      scoped_refptr<SequencedTaskRunner> task_runner,
+                      TimeDelta interval,
+                      const char* histogram_suffix = nullptr);
+
+  // You have to ensure that there are no pending writes at the moment
+  // of destruction.
+  ~ImportantFileWriter();
+
+  const FilePath& path() const { return path_; }
+
+  // Returns true if there is a scheduled write pending which has not yet
+  // been started.
+  bool HasPendingWrite() const;
+
+  // Save |data| to target filename. Does not block. If there is a pending write
+  // scheduled by ScheduleWrite(), it is cancelled.
+  void WriteNow(std::unique_ptr<std::string> data);
+
+  // Schedule a save to target filename. Data will be serialized and saved
+  // to disk after the commit interval. If another ScheduleWrite is issued
+  // before that, only one serialization and write to disk will happen, and
+  // the most recent |serializer| will be used. This operation does not block.
+  // |serializer| should remain valid through the lifetime of
+  // ImportantFileWriter.
+  void ScheduleWrite(DataSerializer* serializer);
+
+  // Serialize data pending to be saved and execute write on backend thread.
+  void DoScheduledWrite();
+
+  // Registers |before_next_write_callback| and |after_next_write_callback| to
+  // be synchronously invoked from WriteFileAtomically() before its next write
+  // and after its next write, respectively. The boolean passed to
+  // |after_next_write_callback| indicates whether the write was successful.
+  // Both callbacks must be thread safe as they will be called on |task_runner_|
+  // and may be called during Chrome shutdown.
+  // If called more than once before a write is scheduled on |task_runner|, the
+  // latest callbacks clobber the others.
+  void RegisterOnNextWriteCallbacks(
+      const Closure& before_next_write_callback,
+      const Callback<void(bool success)>& after_next_write_callback);
+
+  TimeDelta commit_interval() const {
+    return commit_interval_;
+  }
+
+  // Overrides the timer to use for scheduling writes with |timer_override|.
+  void SetTimerForTesting(Timer* timer_override);
+
+ private:
+  const Timer& timer() const {
+    return timer_override_ ? const_cast<const Timer&>(*timer_override_)
+                           : timer_;
+  }
+  Timer& timer() { return timer_override_ ? *timer_override_ : timer_; }
+
+  void ClearPendingWrite();
+
+  // Invoked synchronously on the next write event.
+  Closure before_next_write_callback_;
+  Callback<void(bool success)> after_next_write_callback_;
+
+  // Path being written to.
+  const FilePath path_;
+
+  // TaskRunner for the thread on which file I/O can be done.
+  const scoped_refptr<SequencedTaskRunner> task_runner_;
+
+  // Timer used to schedule commit after ScheduleWrite.
+  OneShotTimer timer_;
+
+  // An override for |timer_| used for testing.
+  Timer* timer_override_ = nullptr;
+
+  // Serializer which will provide the data to be saved.
+  DataSerializer* serializer_;
+
+  // Time delta after which scheduled data will be written to disk.
+  const TimeDelta commit_interval_;
+
+  // Custom histogram suffix.
+  const std::string histogram_suffix_;
+
+  SEQUENCE_CHECKER(sequence_checker_);
+
+  WeakPtrFactory<ImportantFileWriter> weak_factory_;
+
+  DISALLOW_COPY_AND_ASSIGN(ImportantFileWriter);
+};
+
+}  // namespace base
+
+#endif  // BASE_FILES_IMPORTANT_FILE_WRITER_H_
diff --git a/base/files/important_file_writer_unittest.cc b/base/files/important_file_writer_unittest.cc
new file mode 100644
index 0000000..493fb36
--- /dev/null
+++ b/base/files/important_file_writer_unittest.cc
@@ -0,0 +1,351 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/files/important_file_writer.h"
+
+#include "base/bind.h"
+#include "base/compiler_specific.h"
+#include "base/files/file_path.h"
+#include "base/files/file_util.h"
+#include "base/files/scoped_temp_dir.h"
+#include "base/location.h"
+#include "base/logging.h"
+#include "base/macros.h"
+#include "base/memory/ptr_util.h"
+#include "base/message_loop/message_loop.h"
+#include "base/run_loop.h"
+#include "base/single_thread_task_runner.h"
+#include "base/test/histogram_tester.h"
+#include "base/threading/thread.h"
+#include "base/threading/thread_task_runner_handle.h"
+#include "base/time/time.h"
+#include "base/timer/mock_timer.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+
+namespace {
+
+std::string GetFileContent(const FilePath& path) {
+  std::string content;
+  if (!ReadFileToString(path, &content)) {
+    NOTREACHED();
+  }
+  return content;
+}
+
+class DataSerializer : public ImportantFileWriter::DataSerializer {
+ public:
+  explicit DataSerializer(const std::string& data) : data_(data) {
+  }
+
+  bool SerializeData(std::string* output) override {
+    output->assign(data_);
+    return true;
+  }
+
+ private:
+  const std::string data_;
+};
+
+class FailingDataSerializer : public ImportantFileWriter::DataSerializer {
+ public:
+  bool SerializeData(std::string* output) override { return false; }
+};
+
+enum WriteCallbackObservationState {
+  NOT_CALLED,
+  CALLED_WITH_ERROR,
+  CALLED_WITH_SUCCESS,
+};
+
+class WriteCallbacksObserver {
+ public:
+  WriteCallbacksObserver() = default;
+
+  // Register OnBeforeWrite() and OnAfterWrite() to be called on the next write
+  // of |writer|.
+  void ObserveNextWriteCallbacks(ImportantFileWriter* writer);
+
+  // Returns the |WriteCallbackObservationState| which was observed, then resets
+  // it to |NOT_CALLED|.
+  WriteCallbackObservationState GetAndResetObservationState();
+
+ private:
+  void OnBeforeWrite() {
+    EXPECT_FALSE(before_write_called_);
+    before_write_called_ = true;
+  }
+
+  void OnAfterWrite(bool success) {
+    EXPECT_EQ(NOT_CALLED, after_write_observation_state_);
+    after_write_observation_state_ =
+        success ? CALLED_WITH_SUCCESS : CALLED_WITH_ERROR;
+  }
+
+  bool before_write_called_ = false;
+  WriteCallbackObservationState after_write_observation_state_ = NOT_CALLED;
+
+  DISALLOW_COPY_AND_ASSIGN(WriteCallbacksObserver);
+};
+
+void WriteCallbacksObserver::ObserveNextWriteCallbacks(
+    ImportantFileWriter* writer) {
+  writer->RegisterOnNextWriteCallbacks(
+      base::Bind(&WriteCallbacksObserver::OnBeforeWrite,
+                 base::Unretained(this)),
+      base::Bind(&WriteCallbacksObserver::OnAfterWrite,
+                 base::Unretained(this)));
+}
+
+WriteCallbackObservationState
+WriteCallbacksObserver::GetAndResetObservationState() {
+  EXPECT_EQ(after_write_observation_state_ != NOT_CALLED, before_write_called_)
+      << "The before-write callback should always be called before the "
+         "after-write callback";
+
+  WriteCallbackObservationState state = after_write_observation_state_;
+  before_write_called_ = false;
+  after_write_observation_state_ = NOT_CALLED;
+  return state;
+}
+
+}  // namespace
+
+class ImportantFileWriterTest : public testing::Test {
+ public:
+  ImportantFileWriterTest() = default;
+  void SetUp() override {
+    ASSERT_TRUE(temp_dir_.CreateUniqueTempDir());
+    file_ = temp_dir_.GetPath().AppendASCII("test-file");
+  }
+
+ protected:
+  WriteCallbacksObserver write_callback_observer_;
+  FilePath file_;
+  MessageLoop loop_;
+
+ private:
+  ScopedTempDir temp_dir_;
+};
+
+TEST_F(ImportantFileWriterTest, Basic) {
+  ImportantFileWriter writer(file_, ThreadTaskRunnerHandle::Get());
+  EXPECT_FALSE(PathExists(writer.path()));
+  EXPECT_EQ(NOT_CALLED, write_callback_observer_.GetAndResetObservationState());
+  writer.WriteNow(std::make_unique<std::string>("foo"));
+  RunLoop().RunUntilIdle();
+
+  EXPECT_EQ(NOT_CALLED, write_callback_observer_.GetAndResetObservationState());
+  ASSERT_TRUE(PathExists(writer.path()));
+  EXPECT_EQ("foo", GetFileContent(writer.path()));
+}
+
+TEST_F(ImportantFileWriterTest, WriteWithObserver) {
+  ImportantFileWriter writer(file_, ThreadTaskRunnerHandle::Get());
+  EXPECT_FALSE(PathExists(writer.path()));
+  EXPECT_EQ(NOT_CALLED, write_callback_observer_.GetAndResetObservationState());
+
+  // Confirm that the observer is invoked.
+  write_callback_observer_.ObserveNextWriteCallbacks(&writer);
+  writer.WriteNow(std::make_unique<std::string>("foo"));
+  RunLoop().RunUntilIdle();
+
+  EXPECT_EQ(CALLED_WITH_SUCCESS,
+            write_callback_observer_.GetAndResetObservationState());
+  ASSERT_TRUE(PathExists(writer.path()));
+  EXPECT_EQ("foo", GetFileContent(writer.path()));
+
+  // Confirm that re-installing the observer works for another write.
+  EXPECT_EQ(NOT_CALLED, write_callback_observer_.GetAndResetObservationState());
+  write_callback_observer_.ObserveNextWriteCallbacks(&writer);
+  writer.WriteNow(std::make_unique<std::string>("bar"));
+  RunLoop().RunUntilIdle();
+
+  EXPECT_EQ(CALLED_WITH_SUCCESS,
+            write_callback_observer_.GetAndResetObservationState());
+  ASSERT_TRUE(PathExists(writer.path()));
+  EXPECT_EQ("bar", GetFileContent(writer.path()));
+
+  // Confirm that writing again without re-installing the observer doesn't
+  // result in a notification.
+  EXPECT_EQ(NOT_CALLED, write_callback_observer_.GetAndResetObservationState());
+  writer.WriteNow(std::make_unique<std::string>("baz"));
+  RunLoop().RunUntilIdle();
+
+  EXPECT_EQ(NOT_CALLED, write_callback_observer_.GetAndResetObservationState());
+  ASSERT_TRUE(PathExists(writer.path()));
+  EXPECT_EQ("baz", GetFileContent(writer.path()));
+}
+
+TEST_F(ImportantFileWriterTest, FailedWriteWithObserver) {
+  // Use an invalid file path (relative paths are invalid) to get a
+  // FILE_ERROR_ACCESS_DENIED error when trying to write the file.
+  ImportantFileWriter writer(FilePath().AppendASCII("bad/../path"),
+                             ThreadTaskRunnerHandle::Get());
+  EXPECT_FALSE(PathExists(writer.path()));
+  EXPECT_EQ(NOT_CALLED, write_callback_observer_.GetAndResetObservationState());
+  write_callback_observer_.ObserveNextWriteCallbacks(&writer);
+  writer.WriteNow(std::make_unique<std::string>("foo"));
+  RunLoop().RunUntilIdle();
+
+  // Confirm that the write observer was invoked with its boolean parameter set
+  // to false.
+  EXPECT_EQ(CALLED_WITH_ERROR,
+            write_callback_observer_.GetAndResetObservationState());
+  EXPECT_FALSE(PathExists(writer.path()));
+}
+
+TEST_F(ImportantFileWriterTest, CallbackRunsOnWriterThread) {
+  base::Thread file_writer_thread("ImportantFileWriter test thread");
+  file_writer_thread.Start();
+  ImportantFileWriter writer(file_, file_writer_thread.task_runner());
+  EXPECT_EQ(NOT_CALLED, write_callback_observer_.GetAndResetObservationState());
+
+  // Block execution on |file_writer_thread| to verify that callbacks are
+  // executed on it.
+  base::WaitableEvent wait_helper(
+      base::WaitableEvent::ResetPolicy::MANUAL,
+      base::WaitableEvent::InitialState::NOT_SIGNALED);
+  file_writer_thread.task_runner()->PostTask(
+      FROM_HERE, base::BindOnce(&base::WaitableEvent::Wait,
+                                base::Unretained(&wait_helper)));
+
+  write_callback_observer_.ObserveNextWriteCallbacks(&writer);
+  writer.WriteNow(std::make_unique<std::string>("foo"));
+  RunLoop().RunUntilIdle();
+
+  // Expect the callback to not have been executed before the
+  // |file_writer_thread| is unblocked.
+  EXPECT_EQ(NOT_CALLED, write_callback_observer_.GetAndResetObservationState());
+
+  wait_helper.Signal();
+  file_writer_thread.FlushForTesting();
+
+  EXPECT_EQ(CALLED_WITH_SUCCESS,
+            write_callback_observer_.GetAndResetObservationState());
+  ASSERT_TRUE(PathExists(writer.path()));
+  EXPECT_EQ("foo", GetFileContent(writer.path()));
+}
+
+TEST_F(ImportantFileWriterTest, ScheduleWrite) {
+  constexpr TimeDelta kCommitInterval = TimeDelta::FromSeconds(12345);
+  MockTimer timer(true, false);
+  ImportantFileWriter writer(file_, ThreadTaskRunnerHandle::Get(),
+                             kCommitInterval);
+  writer.SetTimerForTesting(&timer);
+  EXPECT_FALSE(writer.HasPendingWrite());
+  DataSerializer serializer("foo");
+  writer.ScheduleWrite(&serializer);
+  EXPECT_TRUE(writer.HasPendingWrite());
+  ASSERT_TRUE(timer.IsRunning());
+  EXPECT_EQ(kCommitInterval, timer.GetCurrentDelay());
+  timer.Fire();
+  EXPECT_FALSE(writer.HasPendingWrite());
+  EXPECT_FALSE(timer.IsRunning());
+  RunLoop().RunUntilIdle();
+  ASSERT_TRUE(PathExists(writer.path()));
+  EXPECT_EQ("foo", GetFileContent(writer.path()));
+}
+
+TEST_F(ImportantFileWriterTest, DoScheduledWrite) {
+  MockTimer timer(true, false);
+  ImportantFileWriter writer(file_, ThreadTaskRunnerHandle::Get());
+  writer.SetTimerForTesting(&timer);
+  EXPECT_FALSE(writer.HasPendingWrite());
+  DataSerializer serializer("foo");
+  writer.ScheduleWrite(&serializer);
+  EXPECT_TRUE(writer.HasPendingWrite());
+  writer.DoScheduledWrite();
+  EXPECT_FALSE(writer.HasPendingWrite());
+  RunLoop().RunUntilIdle();
+  ASSERT_TRUE(PathExists(writer.path()));
+  EXPECT_EQ("foo", GetFileContent(writer.path()));
+}
+
+TEST_F(ImportantFileWriterTest, BatchingWrites) {
+  MockTimer timer(true, false);
+  ImportantFileWriter writer(file_, ThreadTaskRunnerHandle::Get());
+  writer.SetTimerForTesting(&timer);
+  DataSerializer foo("foo"), bar("bar"), baz("baz");
+  writer.ScheduleWrite(&foo);
+  writer.ScheduleWrite(&bar);
+  writer.ScheduleWrite(&baz);
+  ASSERT_TRUE(timer.IsRunning());
+  timer.Fire();
+  RunLoop().RunUntilIdle();
+  ASSERT_TRUE(PathExists(writer.path()));
+  EXPECT_EQ("baz", GetFileContent(writer.path()));
+}
+
+TEST_F(ImportantFileWriterTest, ScheduleWrite_FailToSerialize) {
+  MockTimer timer(true, false);
+  ImportantFileWriter writer(file_, ThreadTaskRunnerHandle::Get());
+  writer.SetTimerForTesting(&timer);
+  EXPECT_FALSE(writer.HasPendingWrite());
+  FailingDataSerializer serializer;
+  writer.ScheduleWrite(&serializer);
+  EXPECT_TRUE(writer.HasPendingWrite());
+  ASSERT_TRUE(timer.IsRunning());
+  timer.Fire();
+  EXPECT_FALSE(writer.HasPendingWrite());
+  RunLoop().RunUntilIdle();
+  EXPECT_FALSE(PathExists(writer.path()));
+}
+
+TEST_F(ImportantFileWriterTest, ScheduleWrite_WriteNow) {
+  MockTimer timer(true, false);
+  ImportantFileWriter writer(file_, ThreadTaskRunnerHandle::Get());
+  writer.SetTimerForTesting(&timer);
+  EXPECT_FALSE(writer.HasPendingWrite());
+  DataSerializer serializer("foo");
+  writer.ScheduleWrite(&serializer);
+  EXPECT_TRUE(writer.HasPendingWrite());
+  writer.WriteNow(std::make_unique<std::string>("bar"));
+  EXPECT_FALSE(writer.HasPendingWrite());
+  EXPECT_FALSE(timer.IsRunning());
+
+  RunLoop().RunUntilIdle();
+  ASSERT_TRUE(PathExists(writer.path()));
+  EXPECT_EQ("bar", GetFileContent(writer.path()));
+}
+
+TEST_F(ImportantFileWriterTest, DoScheduledWrite_FailToSerialize) {
+  MockTimer timer(true, false);
+  ImportantFileWriter writer(file_, ThreadTaskRunnerHandle::Get());
+  writer.SetTimerForTesting(&timer);
+  EXPECT_FALSE(writer.HasPendingWrite());
+  FailingDataSerializer serializer;
+  writer.ScheduleWrite(&serializer);
+  EXPECT_TRUE(writer.HasPendingWrite());
+
+  writer.DoScheduledWrite();
+  EXPECT_FALSE(timer.IsRunning());
+  EXPECT_FALSE(writer.HasPendingWrite());
+  RunLoop().RunUntilIdle();
+  EXPECT_FALSE(PathExists(writer.path()));
+}
+
+TEST_F(ImportantFileWriterTest, WriteFileAtomicallyHistogramSuffixTest) {
+  base::HistogramTester histogram_tester;
+  EXPECT_FALSE(PathExists(file_));
+  EXPECT_TRUE(ImportantFileWriter::WriteFileAtomically(file_, "baz", "test"));
+  EXPECT_TRUE(PathExists(file_));
+  EXPECT_EQ("baz", GetFileContent(file_));
+  histogram_tester.ExpectTotalCount("ImportantFile.FileCreateError", 0);
+  histogram_tester.ExpectTotalCount("ImportantFile.FileCreateError.test", 0);
+
+  FilePath invalid_file_ = FilePath().AppendASCII("bad/../non_existent/path");
+  EXPECT_FALSE(PathExists(invalid_file_));
+  EXPECT_FALSE(
+      ImportantFileWriter::WriteFileAtomically(invalid_file_, nullptr));
+  histogram_tester.ExpectTotalCount("ImportantFile.FileCreateError", 1);
+  histogram_tester.ExpectTotalCount("ImportantFile.FileCreateError.test", 0);
+  EXPECT_FALSE(
+      ImportantFileWriter::WriteFileAtomically(invalid_file_, nullptr, "test"));
+  histogram_tester.ExpectTotalCount("ImportantFile.FileCreateError", 1);
+  histogram_tester.ExpectTotalCount("ImportantFile.FileCreateError.test", 1);
+}
+
+}  // namespace base
diff --git a/base/files/memory_mapped_file.cc b/base/files/memory_mapped_file.cc
new file mode 100644
index 0000000..ccd9e23
--- /dev/null
+++ b/base/files/memory_mapped_file.cc
@@ -0,0 +1,130 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/files/memory_mapped_file.h"
+
+#include <utility>
+
+#include "base/files/file_path.h"
+#include "base/logging.h"
+#include "base/numerics/safe_math.h"
+#include "base/sys_info.h"
+#include "build/build_config.h"
+
+namespace base {
+
+const MemoryMappedFile::Region MemoryMappedFile::Region::kWholeFile = {0, 0};
+
+bool MemoryMappedFile::Region::operator==(
+    const MemoryMappedFile::Region& other) const {
+  return other.offset == offset && other.size == size;
+}
+
+bool MemoryMappedFile::Region::operator!=(
+    const MemoryMappedFile::Region& other) const {
+  return other.offset != offset || other.size != size;
+}
+
+MemoryMappedFile::~MemoryMappedFile() {
+  CloseHandles();
+}
+
+#if !defined(OS_NACL)
+bool MemoryMappedFile::Initialize(const FilePath& file_name, Access access) {
+  if (IsValid())
+    return false;
+
+  uint32_t flags = 0;
+  switch (access) {
+    case READ_ONLY:
+      flags = File::FLAG_OPEN | File::FLAG_READ;
+      break;
+    case READ_WRITE:
+      flags = File::FLAG_OPEN | File::FLAG_READ | File::FLAG_WRITE;
+      break;
+    case READ_WRITE_EXTEND:
+      // Can't open with "extend" because no maximum size is known.
+      NOTREACHED();
+  }
+  file_.Initialize(file_name, flags);
+
+  if (!file_.IsValid()) {
+    DLOG(ERROR) << "Couldn't open " << file_name.AsUTF8Unsafe();
+    return false;
+  }
+
+  if (!MapFileRegionToMemory(Region::kWholeFile, access)) {
+    CloseHandles();
+    return false;
+  }
+
+  return true;
+}
+
+bool MemoryMappedFile::Initialize(File file, Access access) {
+  DCHECK_NE(READ_WRITE_EXTEND, access);
+  return Initialize(std::move(file), Region::kWholeFile, access);
+}
+
+bool MemoryMappedFile::Initialize(File file,
+                                  const Region& region,
+                                  Access access) {
+  switch (access) {
+    case READ_WRITE_EXTEND:
+      DCHECK(Region::kWholeFile != region);
+      {
+        CheckedNumeric<int64_t> region_end(region.offset);
+        region_end += region.size;
+        if (!region_end.IsValid()) {
+          DLOG(ERROR) << "Region bounds exceed maximum for base::File.";
+          return false;
+        }
+      }
+      FALLTHROUGH;
+    case READ_ONLY:
+    case READ_WRITE:
+      // Ensure that the region values are valid.
+      if (region.offset < 0) {
+        DLOG(ERROR) << "Region bounds are not valid.";
+        return false;
+      }
+      break;
+  }
+
+  if (IsValid())
+    return false;
+
+  if (region != Region::kWholeFile)
+    DCHECK_GE(region.offset, 0);
+
+  file_ = std::move(file);
+
+  if (!MapFileRegionToMemory(region, access)) {
+    CloseHandles();
+    return false;
+  }
+
+  return true;
+}
+
+bool MemoryMappedFile::IsValid() const {
+  return data_ != nullptr;
+}
+
+// static
+void MemoryMappedFile::CalculateVMAlignedBoundaries(int64_t start,
+                                                    size_t size,
+                                                    int64_t* aligned_start,
+                                                    size_t* aligned_size,
+                                                    int32_t* offset) {
+  // Sadly, on Windows, the mmap alignment is not just equal to the page size.
+  auto mask = SysInfo::VMAllocationGranularity() - 1;
+  DCHECK(IsValueInRangeForNumericType<int32_t>(mask));
+  *offset = start & mask;
+  *aligned_start = start & ~mask;
+  *aligned_size = (size + *offset + mask) & ~mask;
+}
+#endif  // !defined(OS_NACL)
+
+}  // namespace base
diff --git a/base/files/memory_mapped_file.h b/base/files/memory_mapped_file.h
new file mode 100644
index 0000000..04f4336
--- /dev/null
+++ b/base/files/memory_mapped_file.h
@@ -0,0 +1,136 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_FILES_MEMORY_MAPPED_FILE_H_
+#define BASE_FILES_MEMORY_MAPPED_FILE_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include "base/base_export.h"
+#include "base/files/file.h"
+#include "base/macros.h"
+#include "build/build_config.h"
+
+#if defined(OS_WIN)
+#include <windows.h>
+#endif
+
+namespace base {
+
+class FilePath;
+
+class BASE_EXPORT MemoryMappedFile {
+ public:
+  enum Access {
+    // Mapping a file into memory effectively allows for file I/O on any thread.
+    // The accessing thread could be paused while data from the file is paged
+    // into memory. Worse, a corrupted filesystem could cause a SEGV within the
+    // program instead of just an I/O error.
+    READ_ONLY,
+
+    // This provides read/write access to a file and must be used with care of
+    // the additional subtleties involved in doing so. Though the OS will do
+    // the writing of data on its own time, too many dirty pages can cause
+    // the OS to pause the thread while it writes them out. The pause can
+    // be as much as 1s on some systems.
+    READ_WRITE,
+
+    // This provides read/write access but with the ability to write beyond
+    // the end of the existing file up to a maximum size specified as the
+    // "region". Depending on the OS, the file may or may not be immediately
+    // extended to the maximum size though it won't be loaded in RAM until
+    // needed. Note, however, that the maximum size will still be reserved
+    // in the process address space.
+    READ_WRITE_EXTEND,
+  };
+
+  // The default constructor sets all members to invalid/null values.
+  MemoryMappedFile();
+  ~MemoryMappedFile();
+
+  // Used to hold information about a region [offset + size] of a file.
+  struct BASE_EXPORT Region {
+    static const Region kWholeFile;
+
+    bool operator==(const Region& other) const;
+    bool operator!=(const Region& other) const;
+
+    // Start of the region (measured in bytes from the beginning of the file).
+    int64_t offset;
+
+    // Length of the region in bytes.
+    size_t size;
+  };
+
+  // Opens an existing file and maps it into memory. |access| can be read-only
+  // or read/write but not read/write+extend. If this object already points
+  // to a valid memory mapped file then this method will fail and return
+  // false. If it cannot open the file, the file does not exist, or the
+  // memory mapping fails, it will return false.
+  bool Initialize(const FilePath& file_name, Access access);
+  bool Initialize(const FilePath& file_name) {
+    return Initialize(file_name, READ_ONLY);
+  }
+
+  // As above, but works with an already-opened file. |access| can be read-only
+  // or read/write but not read/write+extend. MemoryMappedFile takes ownership
+  // of |file| and closes it when done. |file| must have been opened with
+  // permissions suitable for |access|. If the memory mapping fails, it will
+  // return false.
+  bool Initialize(File file, Access access);
+  bool Initialize(File file) {
+    return Initialize(std::move(file), READ_ONLY);
+  }
+
+  // As above, but works with a region of an already-opened file. All forms of
+  // |access| are allowed. If READ_WRITE_EXTEND is specified then |region|
+  // provides the maximum size of the file. If the memory mapping fails, it
+  // return false.
+  bool Initialize(File file, const Region& region, Access access);
+  bool Initialize(File file, const Region& region) {
+    return Initialize(std::move(file), region, READ_ONLY);
+  }
+
+  const uint8_t* data() const { return data_; }
+  uint8_t* data() { return data_; }
+  size_t length() const { return length_; }
+
+  // Is file_ a valid file handle that points to an open, memory mapped file?
+  bool IsValid() const;
+
+ private:
+  // Given the arbitrarily aligned memory region [start, size], returns the
+  // boundaries of the region aligned to the granularity specified by the OS,
+  // (a page on Linux, ~32k on Windows) as follows:
+  // - |aligned_start| is page aligned and <= |start|.
+  // - |aligned_size| is a multiple of the VM granularity and >= |size|.
+  // - |offset| is the displacement of |start| w.r.t |aligned_start|.
+  static void CalculateVMAlignedBoundaries(int64_t start,
+                                           size_t size,
+                                           int64_t* aligned_start,
+                                           size_t* aligned_size,
+                                           int32_t* offset);
+
+  // Map the file to memory, set data_ to that memory address. Return true on
+  // success, false on any kind of failure. This is a helper for Initialize().
+  bool MapFileRegionToMemory(const Region& region, Access access);
+
+  // Closes all open handles.
+  void CloseHandles();
+
+  File file_;
+  uint8_t* data_;
+  size_t length_;
+
+#if defined(OS_WIN)
+  win::ScopedHandle file_mapping_;
+#endif
+
+  DISALLOW_COPY_AND_ASSIGN(MemoryMappedFile);
+};
+
+}  // namespace base
+
+#endif  // BASE_FILES_MEMORY_MAPPED_FILE_H_
diff --git a/base/files/memory_mapped_file_posix.cc b/base/files/memory_mapped_file_posix.cc
new file mode 100644
index 0000000..45a0aea
--- /dev/null
+++ b/base/files/memory_mapped_file_posix.cc
@@ -0,0 +1,185 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/files/memory_mapped_file.h"
+
+#include <fcntl.h>
+#include <stddef.h>
+#include <stdint.h>
+#include <sys/mman.h>
+#include <sys/stat.h>
+#include <unistd.h>
+
+#include "base/logging.h"
+#include "base/numerics/safe_conversions.h"
+#include "base/threading/thread_restrictions.h"
+#include "build/build_config.h"
+
+#if defined(OS_ANDROID)
+#include <android/api-level.h>
+#endif
+
+namespace base {
+
+MemoryMappedFile::MemoryMappedFile() : data_(nullptr), length_(0) {}
+
+#if !defined(OS_NACL)
+bool MemoryMappedFile::MapFileRegionToMemory(
+    const MemoryMappedFile::Region& region,
+    Access access) {
+  AssertBlockingAllowed();
+
+  off_t map_start = 0;
+  size_t map_size = 0;
+  int32_t data_offset = 0;
+
+  if (region == MemoryMappedFile::Region::kWholeFile) {
+    int64_t file_len = file_.GetLength();
+    if (file_len < 0) {
+      DPLOG(ERROR) << "fstat " << file_.GetPlatformFile();
+      return false;
+    }
+    if (!IsValueInRangeForNumericType<size_t>(file_len))
+      return false;
+    map_size = static_cast<size_t>(file_len);
+    length_ = map_size;
+  } else {
+    // The region can be arbitrarily aligned. mmap, instead, requires both the
+    // start and size to be page-aligned. Hence, we map here the page-aligned
+    // outer region [|aligned_start|, |aligned_start| + |size|] which contains
+    // |region| and then add up the |data_offset| displacement.
+    int64_t aligned_start = 0;
+    size_t aligned_size = 0;
+    CalculateVMAlignedBoundaries(region.offset,
+                                 region.size,
+                                 &aligned_start,
+                                 &aligned_size,
+                                 &data_offset);
+
+    // Ensure that the casts in the mmap call below are sane.
+    if (aligned_start < 0 ||
+        !IsValueInRangeForNumericType<off_t>(aligned_start)) {
+      DLOG(ERROR) << "Region bounds are not valid for mmap";
+      return false;
+    }
+
+    map_start = static_cast<off_t>(aligned_start);
+    map_size = aligned_size;
+    length_ = region.size;
+  }
+
+  int flags = 0;
+  switch (access) {
+    case READ_ONLY:
+      flags |= PROT_READ;
+      break;
+
+    case READ_WRITE:
+      flags |= PROT_READ | PROT_WRITE;
+      break;
+
+    case READ_WRITE_EXTEND:
+      flags |= PROT_READ | PROT_WRITE;
+
+      const int64_t new_file_len = region.offset + region.size;
+
+      // POSIX won't auto-extend the file when it is written so it must first
+      // be explicitly extended to the maximum size. Zeros will fill the new
+      // space. It is assumed that the existing file is fully realized as
+      // otherwise the entire file would have to be read and possibly written.
+      const int64_t original_file_len = file_.GetLength();
+      if (original_file_len < 0) {
+        DPLOG(ERROR) << "fstat " << file_.GetPlatformFile();
+        return false;
+      }
+
+      // Increase the actual length of the file, if necessary. This can fail if
+      // the disk is full and the OS doesn't support sparse files.
+      if (!file_.SetLength(std::max(original_file_len, new_file_len))) {
+        DPLOG(ERROR) << "ftruncate " << file_.GetPlatformFile();
+        return false;
+      }
+
+      // Realize the extent of the file so that it can't fail (and crash) later
+      // when trying to write to a memory page that can't be created. This can
+      // fail if the disk is full and the file is sparse.
+      bool do_manual_extension = false;
+
+#if defined(OS_ANDROID) && __ANDROID_API__ < 21
+      // Only Android API>=21 supports the fallocate call. Older versions need
+      // to manually extend the file by writing zeros at block intervals.
+      do_manual_extension = true;
+#elif defined(OS_MACOSX)
+      // MacOS doesn't support fallocate even though their new APFS filesystem
+      // does support sparse files. It does, however, have the functionality
+      // available via fcntl.
+      // See also: https://openradar.appspot.com/32720223
+      fstore_t params = {F_ALLOCATEALL, F_PEOFPOSMODE, region.offset,
+                         region.size, 0};
+      if (fcntl(file_.GetPlatformFile(), F_PREALLOCATE, &params) != 0) {
+        DPLOG(ERROR) << "F_PREALLOCATE";
+        // This can fail because the filesystem doesn't support it so don't
+        // give up just yet. Try the manual method below.
+        do_manual_extension = true;
+      }
+#else
+      if (posix_fallocate(file_.GetPlatformFile(), region.offset,
+                          region.size) != 0) {
+        DPLOG(ERROR) << "posix_fallocate";
+        // This can fail because the filesystem doesn't support it so don't
+        // give up just yet. Try the manual method below.
+        do_manual_extension = true;
+      }
+#endif
+
+      // Manually realize the extended file by writing bytes to it at intervals.
+      if (do_manual_extension) {
+        int64_t block_size = 512;  // Start with something safe.
+        struct stat statbuf;
+        if (fstat(file_.GetPlatformFile(), &statbuf) == 0 &&
+            statbuf.st_blksize > 0) {
+          block_size = statbuf.st_blksize;
+        }
+
+        // Write starting at the next block boundary after the old file length.
+        const int64_t extension_start =
+            (original_file_len + block_size - 1) & ~(block_size - 1);
+        for (int64_t i = extension_start; i < new_file_len; i += block_size) {
+          char existing_byte;
+          if (pread(file_.GetPlatformFile(), &existing_byte, 1, i) != 1)
+            return false;  // Can't read? Not viable.
+          if (existing_byte != 0)
+            continue;  // Block has data so must already exist.
+          if (pwrite(file_.GetPlatformFile(), &existing_byte, 1, i) != 1)
+            return false;  // Can't write? Not viable.
+        }
+      }
+
+      break;
+  }
+
+  data_ = static_cast<uint8_t*>(mmap(nullptr, map_size, flags, MAP_SHARED,
+                                     file_.GetPlatformFile(), map_start));
+  if (data_ == MAP_FAILED) {
+    DPLOG(ERROR) << "mmap " << file_.GetPlatformFile();
+    return false;
+  }
+
+  data_ += data_offset;
+  return true;
+}
+#endif
+
+void MemoryMappedFile::CloseHandles() {
+  AssertBlockingAllowed();
+
+  if (data_ != nullptr)
+    munmap(data_, length_);
+  file_.Close();
+
+  data_ = nullptr;
+  length_ = 0;
+}
+
+}  // namespace base
diff --git a/base/files/memory_mapped_file_unittest.cc b/base/files/memory_mapped_file_unittest.cc
new file mode 100644
index 0000000..b7acc61
--- /dev/null
+++ b/base/files/memory_mapped_file_unittest.cc
@@ -0,0 +1,243 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/files/memory_mapped_file.h"
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <utility>
+
+#include "base/files/file_path.h"
+#include "base/files/file_util.h"
+#include "testing/gtest/include/gtest/gtest.h"
+#include "testing/platform_test.h"
+
+namespace base {
+
+namespace {
+
+// Create a temporary buffer and fill it with a watermark sequence.
+std::unique_ptr<uint8_t[]> CreateTestBuffer(size_t size, size_t offset) {
+  std::unique_ptr<uint8_t[]> buf(new uint8_t[size]);
+  for (size_t i = 0; i < size; ++i)
+    buf.get()[i] = static_cast<uint8_t>((offset + i) % 253);
+  return buf;
+}
+
+// Check that the watermark sequence is consistent with the |offset| provided.
+bool CheckBufferContents(const uint8_t* data, size_t size, size_t offset) {
+  std::unique_ptr<uint8_t[]> test_data(CreateTestBuffer(size, offset));
+  return memcmp(test_data.get(), data, size) == 0;
+}
+
+class MemoryMappedFileTest : public PlatformTest {
+ protected:
+  void SetUp() override {
+    PlatformTest::SetUp();
+    CreateTemporaryFile(&temp_file_path_);
+  }
+
+  void TearDown() override { EXPECT_TRUE(DeleteFile(temp_file_path_, false)); }
+
+  void CreateTemporaryTestFile(size_t size) {
+    File file(temp_file_path_,
+              File::FLAG_CREATE_ALWAYS | File::FLAG_READ | File::FLAG_WRITE);
+    EXPECT_TRUE(file.IsValid());
+
+    std::unique_ptr<uint8_t[]> test_data(CreateTestBuffer(size, 0));
+    size_t bytes_written =
+        file.Write(0, reinterpret_cast<char*>(test_data.get()), size);
+    EXPECT_EQ(size, bytes_written);
+    file.Close();
+  }
+
+  const FilePath temp_file_path() const { return temp_file_path_; }
+
+ private:
+  FilePath temp_file_path_;
+};
+
+TEST_F(MemoryMappedFileTest, MapWholeFileByPath) {
+  const size_t kFileSize = 68 * 1024;
+  CreateTemporaryTestFile(kFileSize);
+  MemoryMappedFile map;
+  map.Initialize(temp_file_path());
+  ASSERT_EQ(kFileSize, map.length());
+  ASSERT_TRUE(map.data() != nullptr);
+  EXPECT_TRUE(map.IsValid());
+  ASSERT_TRUE(CheckBufferContents(map.data(), kFileSize, 0));
+}
+
+TEST_F(MemoryMappedFileTest, MapWholeFileByFD) {
+  const size_t kFileSize = 68 * 1024;
+  CreateTemporaryTestFile(kFileSize);
+  MemoryMappedFile map;
+  map.Initialize(File(temp_file_path(), File::FLAG_OPEN | File::FLAG_READ));
+  ASSERT_EQ(kFileSize, map.length());
+  ASSERT_TRUE(map.data() != nullptr);
+  EXPECT_TRUE(map.IsValid());
+  ASSERT_TRUE(CheckBufferContents(map.data(), kFileSize, 0));
+}
+
+TEST_F(MemoryMappedFileTest, MapSmallFile) {
+  const size_t kFileSize = 127;
+  CreateTemporaryTestFile(kFileSize);
+  MemoryMappedFile map;
+  map.Initialize(temp_file_path());
+  ASSERT_EQ(kFileSize, map.length());
+  ASSERT_TRUE(map.data() != nullptr);
+  EXPECT_TRUE(map.IsValid());
+  ASSERT_TRUE(CheckBufferContents(map.data(), kFileSize, 0));
+}
+
+TEST_F(MemoryMappedFileTest, MapWholeFileUsingRegion) {
+  const size_t kFileSize = 157 * 1024;
+  CreateTemporaryTestFile(kFileSize);
+  MemoryMappedFile map;
+
+  File file(temp_file_path(), File::FLAG_OPEN | File::FLAG_READ);
+  map.Initialize(std::move(file), MemoryMappedFile::Region::kWholeFile);
+  ASSERT_EQ(kFileSize, map.length());
+  ASSERT_TRUE(map.data() != nullptr);
+  EXPECT_TRUE(map.IsValid());
+  ASSERT_TRUE(CheckBufferContents(map.data(), kFileSize, 0));
+}
+
+TEST_F(MemoryMappedFileTest, MapPartialRegionAtBeginning) {
+  const size_t kFileSize = 157 * 1024;
+  const size_t kPartialSize = 4 * 1024 + 32;
+  CreateTemporaryTestFile(kFileSize);
+  MemoryMappedFile map;
+
+  File file(temp_file_path(), File::FLAG_OPEN | File::FLAG_READ);
+  MemoryMappedFile::Region region = {0, kPartialSize};
+  map.Initialize(std::move(file), region);
+  ASSERT_EQ(kPartialSize, map.length());
+  ASSERT_TRUE(map.data() != nullptr);
+  EXPECT_TRUE(map.IsValid());
+  ASSERT_TRUE(CheckBufferContents(map.data(), kPartialSize, 0));
+}
+
+TEST_F(MemoryMappedFileTest, MapPartialRegionAtEnd) {
+  const size_t kFileSize = 157 * 1024;
+  const size_t kPartialSize = 5 * 1024 - 32;
+  const size_t kOffset = kFileSize - kPartialSize;
+  CreateTemporaryTestFile(kFileSize);
+  MemoryMappedFile map;
+
+  File file(temp_file_path(), File::FLAG_OPEN | File::FLAG_READ);
+  MemoryMappedFile::Region region = {kOffset, kPartialSize};
+  map.Initialize(std::move(file), region);
+  ASSERT_EQ(kPartialSize, map.length());
+  ASSERT_TRUE(map.data() != nullptr);
+  EXPECT_TRUE(map.IsValid());
+  ASSERT_TRUE(CheckBufferContents(map.data(), kPartialSize, kOffset));
+}
+
+TEST_F(MemoryMappedFileTest, MapSmallPartialRegionInTheMiddle) {
+  const size_t kFileSize = 157 * 1024;
+  const size_t kOffset = 1024 * 5 + 32;
+  const size_t kPartialSize = 8;
+
+  CreateTemporaryTestFile(kFileSize);
+  MemoryMappedFile map;
+
+  File file(temp_file_path(), File::FLAG_OPEN | File::FLAG_READ);
+  MemoryMappedFile::Region region = {kOffset, kPartialSize};
+  map.Initialize(std::move(file), region);
+  ASSERT_EQ(kPartialSize, map.length());
+  ASSERT_TRUE(map.data() != nullptr);
+  EXPECT_TRUE(map.IsValid());
+  ASSERT_TRUE(CheckBufferContents(map.data(), kPartialSize, kOffset));
+}
+
+TEST_F(MemoryMappedFileTest, MapLargePartialRegionInTheMiddle) {
+  const size_t kFileSize = 157 * 1024;
+  const size_t kOffset = 1024 * 5 + 32;
+  const size_t kPartialSize = 16 * 1024 - 32;
+
+  CreateTemporaryTestFile(kFileSize);
+  MemoryMappedFile map;
+
+  File file(temp_file_path(), File::FLAG_OPEN | File::FLAG_READ);
+  MemoryMappedFile::Region region = {kOffset, kPartialSize};
+  map.Initialize(std::move(file), region);
+  ASSERT_EQ(kPartialSize, map.length());
+  ASSERT_TRUE(map.data() != nullptr);
+  EXPECT_TRUE(map.IsValid());
+  ASSERT_TRUE(CheckBufferContents(map.data(), kPartialSize, kOffset));
+}
+
+TEST_F(MemoryMappedFileTest, WriteableFile) {
+  const size_t kFileSize = 127;
+  CreateTemporaryTestFile(kFileSize);
+
+  {
+    MemoryMappedFile map;
+    map.Initialize(temp_file_path(), MemoryMappedFile::READ_WRITE);
+    ASSERT_EQ(kFileSize, map.length());
+    ASSERT_TRUE(map.data() != nullptr);
+    EXPECT_TRUE(map.IsValid());
+    ASSERT_TRUE(CheckBufferContents(map.data(), kFileSize, 0));
+
+    uint8_t* bytes = map.data();
+    bytes[0] = 'B';
+    bytes[1] = 'a';
+    bytes[2] = 'r';
+    bytes[kFileSize - 1] = '!';
+    EXPECT_FALSE(CheckBufferContents(map.data(), kFileSize, 0));
+    EXPECT_TRUE(CheckBufferContents(map.data() + 3, kFileSize - 4, 3));
+  }
+
+  int64_t file_size;
+  ASSERT_TRUE(GetFileSize(temp_file_path(), &file_size));
+  EXPECT_EQ(static_cast<int64_t>(kFileSize), file_size);
+
+  std::string contents;
+  ASSERT_TRUE(ReadFileToString(temp_file_path(), &contents));
+  EXPECT_EQ("Bar", contents.substr(0, 3));
+  EXPECT_EQ("!", contents.substr(kFileSize - 1, 1));
+}
+
+TEST_F(MemoryMappedFileTest, ExtendableFile) {
+  const size_t kFileSize = 127;
+  const size_t kFileExtend = 100;
+  CreateTemporaryTestFile(kFileSize);
+
+  {
+    File file(temp_file_path(),
+              File::FLAG_OPEN | File::FLAG_READ | File::FLAG_WRITE);
+    MemoryMappedFile::Region region = {0, kFileSize + kFileExtend};
+    MemoryMappedFile map;
+    map.Initialize(std::move(file), region,
+                   MemoryMappedFile::READ_WRITE_EXTEND);
+    EXPECT_EQ(kFileSize + kFileExtend, map.length());
+    ASSERT_TRUE(map.data() != nullptr);
+    EXPECT_TRUE(map.IsValid());
+    ASSERT_TRUE(CheckBufferContents(map.data(), kFileSize, 0));
+
+    uint8_t* bytes = map.data();
+    EXPECT_EQ(0, bytes[kFileSize + 0]);
+    EXPECT_EQ(0, bytes[kFileSize + 1]);
+    EXPECT_EQ(0, bytes[kFileSize + 2]);
+    bytes[kFileSize + 0] = 'B';
+    bytes[kFileSize + 1] = 'A';
+    bytes[kFileSize + 2] = 'Z';
+    EXPECT_TRUE(CheckBufferContents(map.data(), kFileSize, 0));
+  }
+
+  int64_t file_size;
+  ASSERT_TRUE(GetFileSize(temp_file_path(), &file_size));
+  EXPECT_LE(static_cast<int64_t>(kFileSize + 3), file_size);
+  EXPECT_GE(static_cast<int64_t>(kFileSize + kFileExtend), file_size);
+
+  std::string contents;
+  ASSERT_TRUE(ReadFileToString(temp_file_path(), &contents));
+  EXPECT_EQ("BAZ", contents.substr(kFileSize, 3));
+}
+
+}  // namespace
+
+}  // namespace base
diff --git a/base/files/memory_mapped_file_win.cc b/base/files/memory_mapped_file_win.cc
new file mode 100644
index 0000000..26869f6
--- /dev/null
+++ b/base/files/memory_mapped_file_win.cc
@@ -0,0 +1,109 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/files/memory_mapped_file.h"
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <limits>
+
+#include "base/files/file_path.h"
+#include "base/strings/string16.h"
+#include "base/threading/thread_restrictions.h"
+
+#include <windows.h>
+
+namespace base {
+
+MemoryMappedFile::MemoryMappedFile() : data_(NULL), length_(0) {
+}
+
+bool MemoryMappedFile::MapFileRegionToMemory(
+    const MemoryMappedFile::Region& region,
+    Access access) {
+  AssertBlockingAllowed();
+
+  if (!file_.IsValid())
+    return false;
+
+  int flags = 0;
+  ULARGE_INTEGER size = {};
+  switch (access) {
+    case READ_ONLY:
+      flags |= PAGE_READONLY;
+      break;
+    case READ_WRITE:
+      flags |= PAGE_READWRITE;
+      break;
+    case READ_WRITE_EXTEND:
+      flags |= PAGE_READWRITE;
+      size.QuadPart = region.size;
+      break;
+  }
+
+  file_mapping_.Set(::CreateFileMapping(file_.GetPlatformFile(), NULL, flags,
+                                        size.HighPart, size.LowPart, NULL));
+  if (!file_mapping_.IsValid())
+    return false;
+
+  LARGE_INTEGER map_start = {};
+  SIZE_T map_size = 0;
+  int32_t data_offset = 0;
+
+  if (region == MemoryMappedFile::Region::kWholeFile) {
+    DCHECK_NE(READ_WRITE_EXTEND, access);
+    int64_t file_len = file_.GetLength();
+    if (file_len <= 0 || !IsValueInRangeForNumericType<size_t>(file_len))
+      return false;
+    length_ = static_cast<size_t>(file_len);
+  } else {
+    // The region can be arbitrarily aligned. MapViewOfFile, instead, requires
+    // that the start address is aligned to the VM granularity (which is
+    // typically larger than a page size, for instance 32k).
+    // Also, conversely to POSIX's mmap, the |map_size| doesn't have to be
+    // aligned and must be less than or equal the mapped file size.
+    // We map here the outer region [|aligned_start|, |aligned_start+size|]
+    // which contains |region| and then add up the |data_offset| displacement.
+    int64_t aligned_start = 0;
+    size_t ignored = 0U;
+    CalculateVMAlignedBoundaries(
+        region.offset, region.size, &aligned_start, &ignored, &data_offset);
+    int64_t full_map_size = region.size + data_offset;
+
+    // Ensure that the casts below in the MapViewOfFile call are sane.
+    if (aligned_start < 0 || full_map_size < 0 ||
+        !IsValueInRangeForNumericType<SIZE_T>(
+            static_cast<uint64_t>(full_map_size))) {
+      DLOG(ERROR) << "Region bounds are not valid for MapViewOfFile";
+      return false;
+    }
+    map_start.QuadPart = aligned_start;
+    map_size = static_cast<SIZE_T>(full_map_size);
+    length_ = region.size;
+  }
+
+  data_ = static_cast<uint8_t*>(
+      ::MapViewOfFile(file_mapping_.Get(),
+                      (flags & PAGE_READONLY) ? FILE_MAP_READ : FILE_MAP_WRITE,
+                      map_start.HighPart, map_start.LowPart, map_size));
+  if (data_ == NULL)
+    return false;
+  data_ += data_offset;
+  return true;
+}
+
+void MemoryMappedFile::CloseHandles() {
+  if (data_)
+    ::UnmapViewOfFile(data_);
+  if (file_mapping_.IsValid())
+    file_mapping_.Close();
+  if (file_.IsValid())
+    file_.Close();
+
+  data_ = NULL;
+  length_ = 0;
+}
+
+}  // namespace base
diff --git a/base/files/platform_file.h b/base/files/platform_file.h
new file mode 100644
index 0000000..3929a0d
--- /dev/null
+++ b/base/files/platform_file.h
@@ -0,0 +1,43 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_FILES_PLATFORM_FILE_H_
+#define BASE_FILES_PLATFORM_FILE_H_
+
+#include "base/files/scoped_file.h"
+#include "build/build_config.h"
+
+#if defined(OS_WIN)
+#include "base/win/scoped_handle.h"
+#include "base/win/windows_types.h"
+#endif
+
+// This file defines platform-independent types for dealing with
+// platform-dependent files. If possible, use the higher-level base::File class
+// rather than these primitives.
+
+namespace base {
+
+#if defined(OS_WIN)
+
+using PlatformFile = HANDLE;
+using ScopedPlatformFile = ::base::win::ScopedHandle;
+
+// It would be nice to make this constexpr but INVALID_HANDLE_VALUE is a
+// ((void*)(-1)) which Clang rejects since reinterpret_cast is technically
+// disallowed in constexpr. Visual Studio accepts this, however.
+const PlatformFile kInvalidPlatformFile = INVALID_HANDLE_VALUE;
+
+#elif defined(OS_POSIX) || defined(OS_FUCHSIA)
+
+using PlatformFile = int;
+using ScopedPlatformFile = ::base::ScopedFD;
+
+constexpr PlatformFile kInvalidPlatformFile = -1;
+
+#endif
+
+}  // namespace
+
+#endif  // BASE_FILES_PLATFORM_FILE_H_
diff --git a/base/files/scoped_file.cc b/base/files/scoped_file.cc
new file mode 100644
index 0000000..1b9227d
--- /dev/null
+++ b/base/files/scoped_file.cc
@@ -0,0 +1,49 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/files/scoped_file.h"
+
+#include "base/logging.h"
+#include "build/build_config.h"
+
+#if defined(OS_POSIX) || defined(OS_FUCHSIA)
+#include <errno.h>
+#include <unistd.h>
+
+#include "base/posix/eintr_wrapper.h"
+#endif
+
+namespace base {
+namespace internal {
+
+#if defined(OS_POSIX) || defined(OS_FUCHSIA)
+
+// static
+void ScopedFDCloseTraits::Free(int fd) {
+  // It's important to crash here.
+  // There are security implications to not closing a file descriptor
+  // properly. As file descriptors are "capabilities", keeping them open
+  // would make the current process keep access to a resource. Much of
+  // Chrome relies on being able to "drop" such access.
+  // It's especially problematic on Linux with the setuid sandbox, where
+  // a single open directory would bypass the entire security model.
+  int ret = IGNORE_EINTR(close(fd));
+
+#if defined(OS_LINUX) || defined(OS_MACOSX) || defined(OS_FUCHSIA) || \
+    defined(OS_ANDROID)
+  // NB: Some file descriptors can return errors from close() e.g. network
+  // filesystems such as NFS and Linux input devices. On Linux, macOS, and
+  // Fuchsia's POSIX layer, errors from close other than EBADF do not indicate
+  // failure to actually close the fd.
+  if (ret != 0 && errno != EBADF)
+    ret = 0;
+#endif
+
+  PCHECK(0 == ret);
+}
+
+#endif  // OS_POSIX || OS_FUCHSIA
+
+}  // namespace internal
+}  // namespace base
diff --git a/base/files/scoped_file.h b/base/files/scoped_file.h
new file mode 100644
index 0000000..e32a603
--- /dev/null
+++ b/base/files/scoped_file.h
@@ -0,0 +1,62 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_FILES_SCOPED_FILE_H_
+#define BASE_FILES_SCOPED_FILE_H_
+
+#include <stdio.h>
+
+#include <memory>
+
+#include "base/base_export.h"
+#include "base/logging.h"
+#include "base/scoped_generic.h"
+#include "build/build_config.h"
+
+namespace base {
+
+namespace internal {
+
+#if defined(OS_POSIX) || defined(OS_FUCHSIA)
+struct BASE_EXPORT ScopedFDCloseTraits {
+  static int InvalidValue() {
+    return -1;
+  }
+  static void Free(int fd);
+};
+#endif
+
+// Functor for |ScopedFILE| (below).
+struct ScopedFILECloser {
+  inline void operator()(FILE* x) const {
+    if (x)
+      fclose(x);
+  }
+};
+
+}  // namespace internal
+
+// -----------------------------------------------------------------------------
+
+#if defined(OS_POSIX) || defined(OS_FUCHSIA)
+// A low-level Posix file descriptor closer class. Use this when writing
+// platform-specific code, especially that does non-file-like things with the
+// FD (like sockets).
+//
+// If you're writing low-level Windows code, see base/win/scoped_handle.h
+// which provides some additional functionality.
+//
+// If you're writing cross-platform code that deals with actual files, you
+// should generally use base::File instead which can be constructed with a
+// handle, and in addition to handling ownership, has convenient cross-platform
+// file manipulation functions on it.
+typedef ScopedGeneric<int, internal::ScopedFDCloseTraits> ScopedFD;
+#endif
+
+// Automatically closes |FILE*|s.
+typedef std::unique_ptr<FILE, internal::ScopedFILECloser> ScopedFILE;
+
+}  // namespace base
+
+#endif  // BASE_FILES_SCOPED_FILE_H_
diff --git a/base/files/scoped_temp_dir.cc b/base/files/scoped_temp_dir.cc
new file mode 100644
index 0000000..01ec0f0
--- /dev/null
+++ b/base/files/scoped_temp_dir.cc
@@ -0,0 +1,97 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/files/scoped_temp_dir.h"
+
+#include "base/files/file_util.h"
+#include "base/logging.h"
+
+namespace base {
+
+namespace {
+
+constexpr FilePath::CharType kScopedDirPrefix[] =
+    FILE_PATH_LITERAL("scoped_dir");
+
+}  // namespace
+
+ScopedTempDir::ScopedTempDir() = default;
+
+ScopedTempDir::~ScopedTempDir() {
+  if (!path_.empty() && !Delete())
+    DLOG(WARNING) << "Could not delete temp dir in dtor.";
+}
+
+bool ScopedTempDir::CreateUniqueTempDir() {
+  if (!path_.empty())
+    return false;
+
+  // This "scoped_dir" prefix is only used on Windows and serves as a template
+  // for the unique name.
+  if (!base::CreateNewTempDirectory(kScopedDirPrefix, &path_))
+    return false;
+
+  return true;
+}
+
+bool ScopedTempDir::CreateUniqueTempDirUnderPath(const FilePath& base_path) {
+  if (!path_.empty())
+    return false;
+
+  // If |base_path| does not exist, create it.
+  if (!base::CreateDirectory(base_path))
+    return false;
+
+  // Create a new, uniquely named directory under |base_path|.
+  if (!base::CreateTemporaryDirInDir(base_path, kScopedDirPrefix, &path_))
+    return false;
+
+  return true;
+}
+
+bool ScopedTempDir::Set(const FilePath& path) {
+  if (!path_.empty())
+    return false;
+
+  if (!DirectoryExists(path) && !base::CreateDirectory(path))
+    return false;
+
+  path_ = path;
+  return true;
+}
+
+bool ScopedTempDir::Delete() {
+  if (path_.empty())
+    return false;
+
+  bool ret = base::DeleteFile(path_, true);
+  if (ret) {
+    // We only clear the path if deleted the directory.
+    path_.clear();
+  }
+
+  return ret;
+}
+
+FilePath ScopedTempDir::Take() {
+  FilePath ret = path_;
+  path_ = FilePath();
+  return ret;
+}
+
+const FilePath& ScopedTempDir::GetPath() const {
+  DCHECK(!path_.empty()) << "Did you call CreateUniqueTempDir* before?";
+  return path_;
+}
+
+bool ScopedTempDir::IsValid() const {
+  return !path_.empty() && DirectoryExists(path_);
+}
+
+// static
+const FilePath::CharType* ScopedTempDir::GetTempDirPrefix() {
+  return kScopedDirPrefix;
+}
+
+}  // namespace base
diff --git a/base/files/scoped_temp_dir.h b/base/files/scoped_temp_dir.h
new file mode 100644
index 0000000..872f6f8
--- /dev/null
+++ b/base/files/scoped_temp_dir.h
@@ -0,0 +1,71 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_FILES_SCOPED_TEMP_DIR_H_
+#define BASE_FILES_SCOPED_TEMP_DIR_H_
+
+// An object representing a temporary / scratch directory that should be
+// cleaned up (recursively) when this object goes out of scope.  Since deletion
+// occurs during the destructor, no further error handling is possible if the
+// directory fails to be deleted.  As a result, deletion is not guaranteed by
+// this class.  (However note that, whenever possible, by default
+// CreateUniqueTempDir creates the directory in a location that is
+// automatically cleaned up on reboot, or at other appropriate times.)
+//
+// Multiple calls to the methods which establish a temporary directory
+// (CreateUniqueTempDir, CreateUniqueTempDirUnderPath, and Set) must have
+// intervening calls to Delete or Take, or the calls will fail.
+
+#include "base/base_export.h"
+#include "base/files/file_path.h"
+#include "base/macros.h"
+
+namespace base {
+
+class BASE_EXPORT ScopedTempDir {
+ public:
+  // No directory is owned/created initially.
+  ScopedTempDir();
+
+  // Recursively delete path.
+  ~ScopedTempDir();
+
+  // Creates a unique directory in TempPath, and takes ownership of it.
+  // See file_util::CreateNewTemporaryDirectory.
+  bool CreateUniqueTempDir() WARN_UNUSED_RESULT;
+
+  // Creates a unique directory under a given path, and takes ownership of it.
+  bool CreateUniqueTempDirUnderPath(const FilePath& path) WARN_UNUSED_RESULT;
+
+  // Takes ownership of directory at |path|, creating it if necessary.
+  // Don't call multiple times unless Take() has been called first.
+  bool Set(const FilePath& path) WARN_UNUSED_RESULT;
+
+  // Deletes the temporary directory wrapped by this object.
+  bool Delete() WARN_UNUSED_RESULT;
+
+  // Caller takes ownership of the temporary directory so it won't be destroyed
+  // when this object goes out of scope.
+  FilePath Take();
+
+  // Returns the path to the created directory. Call one of the
+  // CreateUniqueTempDir* methods before getting the path.
+  const FilePath& GetPath() const;
+
+  // Returns true if path_ is non-empty and exists.
+  bool IsValid() const;
+
+  // Returns the prefix used for temp directory names generated by
+  // ScopedTempDirs.
+  static const FilePath::CharType* GetTempDirPrefix();
+
+ private:
+  FilePath path_;
+
+  DISALLOW_COPY_AND_ASSIGN(ScopedTempDir);
+};
+
+}  // namespace base
+
+#endif  // BASE_FILES_SCOPED_TEMP_DIR_H_
diff --git a/base/files/scoped_temp_dir_unittest.cc b/base/files/scoped_temp_dir_unittest.cc
new file mode 100644
index 0000000..024b438
--- /dev/null
+++ b/base/files/scoped_temp_dir_unittest.cc
@@ -0,0 +1,114 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <string>
+
+#include "base/files/file.h"
+#include "base/files/file_util.h"
+#include "base/files/scoped_temp_dir.h"
+#include "build/build_config.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+
+TEST(ScopedTempDir, FullPath) {
+  FilePath test_path;
+  base::CreateNewTempDirectory(FILE_PATH_LITERAL("scoped_temp_dir"),
+                               &test_path);
+
+  // Against an existing dir, it should get destroyed when leaving scope.
+  EXPECT_TRUE(DirectoryExists(test_path));
+  {
+    ScopedTempDir dir;
+    EXPECT_TRUE(dir.Set(test_path));
+    EXPECT_TRUE(dir.IsValid());
+  }
+  EXPECT_FALSE(DirectoryExists(test_path));
+
+  {
+    ScopedTempDir dir;
+    EXPECT_TRUE(dir.Set(test_path));
+    // Now the dir doesn't exist, so ensure that it gets created.
+    EXPECT_TRUE(DirectoryExists(test_path));
+    // When we call Release(), it shouldn't get destroyed when leaving scope.
+    FilePath path = dir.Take();
+    EXPECT_EQ(path.value(), test_path.value());
+    EXPECT_FALSE(dir.IsValid());
+  }
+  EXPECT_TRUE(DirectoryExists(test_path));
+
+  // Clean up.
+  {
+    ScopedTempDir dir;
+    EXPECT_TRUE(dir.Set(test_path));
+  }
+  EXPECT_FALSE(DirectoryExists(test_path));
+}
+
+TEST(ScopedTempDir, TempDir) {
+  // In this case, just verify that a directory was created and that it's a
+  // child of TempDir.
+  FilePath test_path;
+  {
+    ScopedTempDir dir;
+    EXPECT_TRUE(dir.CreateUniqueTempDir());
+    test_path = dir.GetPath();
+    EXPECT_TRUE(DirectoryExists(test_path));
+    FilePath tmp_dir;
+    EXPECT_TRUE(base::GetTempDir(&tmp_dir));
+    EXPECT_TRUE(test_path.value().find(tmp_dir.value()) != std::string::npos);
+  }
+  EXPECT_FALSE(DirectoryExists(test_path));
+}
+
+TEST(ScopedTempDir, UniqueTempDirUnderPath) {
+  // Create a path which will contain a unique temp path.
+  FilePath base_path;
+  ASSERT_TRUE(base::CreateNewTempDirectory(FILE_PATH_LITERAL("base_dir"),
+                                           &base_path));
+
+  FilePath test_path;
+  {
+    ScopedTempDir dir;
+    EXPECT_TRUE(dir.CreateUniqueTempDirUnderPath(base_path));
+    test_path = dir.GetPath();
+    EXPECT_TRUE(DirectoryExists(test_path));
+    EXPECT_TRUE(base_path.IsParent(test_path));
+    EXPECT_TRUE(test_path.value().find(base_path.value()) != std::string::npos);
+  }
+  EXPECT_FALSE(DirectoryExists(test_path));
+  base::DeleteFile(base_path, true);
+}
+
+TEST(ScopedTempDir, MultipleInvocations) {
+  ScopedTempDir dir;
+  EXPECT_TRUE(dir.CreateUniqueTempDir());
+  EXPECT_FALSE(dir.CreateUniqueTempDir());
+  EXPECT_TRUE(dir.Delete());
+  EXPECT_TRUE(dir.CreateUniqueTempDir());
+  EXPECT_FALSE(dir.CreateUniqueTempDir());
+  ScopedTempDir other_dir;
+  EXPECT_TRUE(other_dir.Set(dir.Take()));
+  EXPECT_TRUE(dir.CreateUniqueTempDir());
+  EXPECT_FALSE(dir.CreateUniqueTempDir());
+  EXPECT_FALSE(other_dir.CreateUniqueTempDir());
+}
+
+#if defined(OS_WIN)
+TEST(ScopedTempDir, LockedTempDir) {
+  ScopedTempDir dir;
+  EXPECT_TRUE(dir.CreateUniqueTempDir());
+  base::File file(dir.GetPath().Append(FILE_PATH_LITERAL("temp")),
+                  base::File::FLAG_CREATE_ALWAYS | base::File::FLAG_WRITE);
+  EXPECT_TRUE(file.IsValid());
+  EXPECT_EQ(base::File::FILE_OK, file.error_details());
+  EXPECT_FALSE(dir.Delete());  // We should not be able to delete.
+  EXPECT_FALSE(dir.GetPath().empty());  // We should still have a valid path.
+  file.Close();
+  // Now, we should be able to delete.
+  EXPECT_TRUE(dir.Delete());
+}
+#endif  // defined(OS_WIN)
+
+}  // namespace base
diff --git a/base/format_macros.h b/base/format_macros.h
new file mode 100644
index 0000000..1279ff7
--- /dev/null
+++ b/base/format_macros.h
@@ -0,0 +1,97 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_FORMAT_MACROS_H_
+#define BASE_FORMAT_MACROS_H_
+
+// This file defines the format macros for some integer types.
+
+// To print a 64-bit value in a portable way:
+//   int64_t value;
+//   printf("xyz:%" PRId64, value);
+// The "d" in the macro corresponds to %d; you can also use PRIu64 etc.
+//
+// For wide strings, prepend "Wide" to the macro:
+//   int64_t value;
+//   StringPrintf(L"xyz: %" WidePRId64, value);
+//
+// To print a size_t value in a portable way:
+//   size_t size;
+//   printf("xyz: %" PRIuS, size);
+// The "u" in the macro corresponds to %u, and S is for "size".
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include "build/build_config.h"
+
+#if (defined(OS_POSIX) || defined(OS_FUCHSIA)) && \
+    (defined(_INTTYPES_H) || defined(_INTTYPES_H_)) && !defined(PRId64)
+#error "inttypes.h has already been included before this header file, but "
+#error "without __STDC_FORMAT_MACROS defined."
+#endif
+
+#if (defined(OS_POSIX) || defined(OS_FUCHSIA)) && !defined(__STDC_FORMAT_MACROS)
+#define __STDC_FORMAT_MACROS
+#endif
+
+#include <inttypes.h>
+
+#if defined(OS_WIN)
+
+#if !defined(PRId64) || !defined(PRIu64) || !defined(PRIx64)
+#error "inttypes.h provided by win toolchain should define these."
+#endif
+
+#define WidePRId64 L"I64d"
+#define WidePRIu64 L"I64u"
+#define WidePRIx64 L"I64x"
+
+#if !defined(PRIuS)
+#define PRIuS "Iu"
+#endif
+
+#elif defined(OS_POSIX) || defined(OS_FUCHSIA)
+
+// GCC will concatenate wide and narrow strings correctly, so nothing needs to
+// be done here.
+#define WidePRId64 PRId64
+#define WidePRIu64 PRIu64
+#define WidePRIx64 PRIx64
+
+#if !defined(PRIuS)
+#define PRIuS "zu"
+#endif
+
+#endif  // defined(OS_WIN)
+
+// The size of NSInteger and NSUInteger varies between 32-bit and 64-bit
+// architectures and Apple does not provides standard format macros and
+// recommends casting. This has many drawbacks, so instead define macros
+// for formatting those types.
+#if defined(OS_MACOSX)
+#if defined(ARCH_CPU_64_BITS)
+#if !defined(PRIdNS)
+#define PRIdNS "ld"
+#endif
+#if !defined(PRIuNS)
+#define PRIuNS "lu"
+#endif
+#if !defined(PRIxNS)
+#define PRIxNS "lx"
+#endif
+#else  // defined(ARCH_CPU_64_BITS)
+#if !defined(PRIdNS)
+#define PRIdNS "d"
+#endif
+#if !defined(PRIuNS)
+#define PRIuNS "u"
+#endif
+#if !defined(PRIxNS)
+#define PRIxNS "x"
+#endif
+#endif
+#endif  // defined(OS_MACOSX)
+
+#endif  // BASE_FORMAT_MACROS_H_
diff --git a/base/gmock_unittest.cc b/base/gmock_unittest.cc
new file mode 100644
index 0000000..5c16728
--- /dev/null
+++ b/base/gmock_unittest.cc
@@ -0,0 +1,135 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// This test is a simple sanity check to make sure gmock is able to build/link
+// correctly.  It just instantiates a mock object and runs through a couple of
+// the basic mock features.
+
+#include "testing/gmock/include/gmock/gmock.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+// Gmock matchers and actions that we use below.
+using testing::AnyOf;
+using testing::Eq;
+using testing::Return;
+using testing::SetArgPointee;
+using testing::WithArg;
+using testing::_;
+
+namespace {
+
+// Simple class that we can mock out the behavior for.  Everything is virtual
+// for easy mocking.
+class SampleClass {
+ public:
+  SampleClass() = default;
+  virtual ~SampleClass() = default;
+
+  virtual int ReturnSomething() {
+    return -1;
+  }
+
+  virtual void ReturnNothingConstly() const {
+  }
+
+  virtual void OutputParam(int* a) {
+  }
+
+  virtual int ReturnSecond(int a, int b) {
+    return b;
+  }
+};
+
+// Declare a mock for the class.
+class MockSampleClass : public SampleClass {
+ public:
+  MOCK_METHOD0(ReturnSomething, int());
+  MOCK_CONST_METHOD0(ReturnNothingConstly, void());
+  MOCK_METHOD1(OutputParam, void(int* a));
+  MOCK_METHOD2(ReturnSecond, int(int a, int b));
+};
+
+// Create a couple of custom actions.  Custom actions can be used for adding
+// more complex behavior into your mock...though if you start needing these, ask
+// if you're asking your mock to do too much.
+ACTION(ReturnVal) {
+  // Return the first argument received.
+  return arg0;
+}
+ACTION(ReturnSecond) {
+  // Returns the second argument.  This basically implemetns ReturnSecond.
+  return arg1;
+}
+
+TEST(GmockTest, SimpleMatchAndActions) {
+  // Basic test of some simple gmock matchers, actions, and cardinality
+  // expectations.
+  MockSampleClass mock;
+
+  EXPECT_CALL(mock, ReturnSomething())
+      .WillOnce(Return(1))
+      .WillOnce(Return(2))
+      .WillOnce(Return(3));
+  EXPECT_EQ(1, mock.ReturnSomething());
+  EXPECT_EQ(2, mock.ReturnSomething());
+  EXPECT_EQ(3, mock.ReturnSomething());
+
+  EXPECT_CALL(mock, ReturnNothingConstly()).Times(2);
+  mock.ReturnNothingConstly();
+  mock.ReturnNothingConstly();
+}
+
+TEST(GmockTest, AssignArgument) {
+  // Capture an argument for examination.
+  MockSampleClass mock;
+
+  EXPECT_CALL(mock, OutputParam(_)).WillRepeatedly(SetArgPointee<0>(5));
+
+  int arg = 0;
+  mock.OutputParam(&arg);
+  EXPECT_EQ(5, arg);
+}
+
+TEST(GmockTest, SideEffects) {
+  // Capture an argument for examination.
+  MockSampleClass mock;
+
+  EXPECT_CALL(mock, OutputParam(_)).WillRepeatedly(SetArgPointee<0>(5));
+
+  int arg = 0;
+  mock.OutputParam(&arg);
+  EXPECT_EQ(5, arg);
+}
+
+TEST(GmockTest, CustomAction_ReturnSecond) {
+  // Test a mock of the ReturnSecond behavior using an action that provides an
+  // alternate implementation of the function.  Danger here though, this is
+  // starting to add too much behavior of the mock, which means the mock
+  // implementation might start to have bugs itself.
+  MockSampleClass mock;
+
+  EXPECT_CALL(mock, ReturnSecond(_, AnyOf(Eq(4), Eq(5))))
+      .WillRepeatedly(ReturnSecond());
+  EXPECT_EQ(4, mock.ReturnSecond(-1, 4));
+  EXPECT_EQ(5, mock.ReturnSecond(0, 5));
+  EXPECT_EQ(4, mock.ReturnSecond(0xdeadbeef, 4));
+  EXPECT_EQ(4, mock.ReturnSecond(112358, 4));
+  EXPECT_EQ(5, mock.ReturnSecond(1337, 5));
+}
+
+TEST(GmockTest, CustomAction_ReturnVal) {
+  // Alternate implemention of ReturnSecond using a more general custom action,
+  // and a WithArg adapter to bridge the interfaces.
+  MockSampleClass mock;
+
+  EXPECT_CALL(mock, ReturnSecond(_, AnyOf(Eq(4), Eq(5))))
+      .WillRepeatedly(WithArg<1>(ReturnVal()));
+  EXPECT_EQ(4, mock.ReturnSecond(-1, 4));
+  EXPECT_EQ(5, mock.ReturnSecond(0, 5));
+  EXPECT_EQ(4, mock.ReturnSecond(0xdeadbeef, 4));
+  EXPECT_EQ(4, mock.ReturnSecond(112358, 4));
+  EXPECT_EQ(5, mock.ReturnSecond(1337, 5));
+}
+
+}  // namespace
diff --git a/base/gtest_prod_util.h b/base/gtest_prod_util.h
new file mode 100644
index 0000000..2ca267e
--- /dev/null
+++ b/base/gtest_prod_util.h
@@ -0,0 +1,66 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_GTEST_PROD_UTIL_H_
+#define BASE_GTEST_PROD_UTIL_H_
+
+#include "testing/gtest/include/gtest/gtest_prod.h"  // nogncheck
+
+// This is a wrapper for gtest's FRIEND_TEST macro that friends
+// test with all possible prefixes. This is very helpful when changing the test
+// prefix, because the friend declarations don't need to be updated.
+//
+// Example usage:
+//
+// class MyClass {
+//  private:
+//   void MyMethod();
+//   FRIEND_TEST_ALL_PREFIXES(MyClassTest, MyMethod);
+// };
+#define FRIEND_TEST_ALL_PREFIXES(test_case_name, test_name) \
+  FRIEND_TEST(test_case_name, test_name); \
+  FRIEND_TEST(test_case_name, DISABLED_##test_name); \
+  FRIEND_TEST(test_case_name, FLAKY_##test_name)
+
+// C++ compilers will refuse to compile the following code:
+//
+// namespace foo {
+// class MyClass {
+//  private:
+//   FRIEND_TEST_ALL_PREFIXES(MyClassTest, TestMethod);
+//   bool private_var;
+// };
+// }  // namespace foo
+//
+// class MyClassTest::TestMethod() {
+//   foo::MyClass foo_class;
+//   foo_class.private_var = true;
+// }
+//
+// Unless you forward declare MyClassTest::TestMethod outside of namespace foo.
+// Use FORWARD_DECLARE_TEST to do so for all possible prefixes.
+//
+// Example usage:
+//
+// FORWARD_DECLARE_TEST(MyClassTest, TestMethod);
+//
+// namespace foo {
+// class MyClass {
+//  private:
+//   FRIEND_TEST_ALL_PREFIXES(::MyClassTest, TestMethod);  // NOTE use of ::
+//   bool private_var;
+// };
+// }  // namespace foo
+//
+// class MyClassTest::TestMethod() {
+//   foo::MyClass foo_class;
+//   foo_class.private_var = true;
+// }
+
+#define FORWARD_DECLARE_TEST(test_case_name, test_name) \
+  class test_case_name##_##test_name##_Test; \
+  class test_case_name##_##DISABLED_##test_name##_Test; \
+  class test_case_name##_##FLAKY_##test_name##_Test
+
+#endif  // BASE_GTEST_PROD_UTIL_H_
diff --git a/base/guid.cc b/base/guid.cc
new file mode 100644
index 0000000..2a23658
--- /dev/null
+++ b/base/guid.cc
@@ -0,0 +1,82 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/guid.h"
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include "base/rand_util.h"
+#include "base/strings/string_util.h"
+#include "base/strings/stringprintf.h"
+
+namespace base {
+
+namespace {
+
+bool IsLowerHexDigit(char c) {
+  return (c >= '0' && c <= '9') || (c >= 'a' && c <= 'f');
+}
+
+bool IsValidGUIDInternal(const base::StringPiece& guid, bool strict) {
+  const size_t kGUIDLength = 36U;
+  if (guid.length() != kGUIDLength)
+    return false;
+
+  for (size_t i = 0; i < guid.length(); ++i) {
+    char current = guid[i];
+    if (i == 8 || i == 13 || i == 18 || i == 23) {
+      if (current != '-')
+        return false;
+    } else {
+      if ((strict && !IsLowerHexDigit(current)) || !IsHexDigit(current))
+        return false;
+    }
+  }
+
+  return true;
+}
+
+}  // namespace
+
+std::string GenerateGUID() {
+  uint64_t sixteen_bytes[2];
+  // Use base::RandBytes instead of crypto::RandBytes, because crypto calls the
+  // base version directly, and to prevent the dependency from base/ to crypto/.
+  base::RandBytes(&sixteen_bytes, sizeof(sixteen_bytes));
+
+  // Set the GUID to version 4 as described in RFC 4122, section 4.4.
+  // The format of GUID version 4 must be xxxxxxxx-xxxx-4xxx-yxxx-xxxxxxxxxxxx,
+  // where y is one of [8, 9, A, B].
+
+  // Clear the version bits and set the version to 4:
+  sixteen_bytes[0] &= 0xffffffff'ffff0fffULL;
+  sixteen_bytes[0] |= 0x00000000'00004000ULL;
+
+  // Set the two most significant bits (bits 6 and 7) of the
+  // clock_seq_hi_and_reserved to zero and one, respectively:
+  sixteen_bytes[1] &= 0x3fffffff'ffffffffULL;
+  sixteen_bytes[1] |= 0x80000000'00000000ULL;
+
+  return RandomDataToGUIDString(sixteen_bytes);
+}
+
+bool IsValidGUID(const base::StringPiece& guid) {
+  return IsValidGUIDInternal(guid, false /* strict */);
+}
+
+bool IsValidGUIDOutputString(const base::StringPiece& guid) {
+  return IsValidGUIDInternal(guid, true /* strict */);
+}
+
+std::string RandomDataToGUIDString(const uint64_t bytes[2]) {
+  return StringPrintf("%08x-%04x-%04x-%04x-%012llx",
+                      static_cast<unsigned int>(bytes[0] >> 32),
+                      static_cast<unsigned int>((bytes[0] >> 16) & 0x0000ffff),
+                      static_cast<unsigned int>(bytes[0] & 0x0000ffff),
+                      static_cast<unsigned int>(bytes[1] >> 48),
+                      bytes[1] & 0x0000ffff'ffffffffULL);
+}
+
+}  // namespace base
diff --git a/base/guid.h b/base/guid.h
new file mode 100644
index 0000000..c6937a1
--- /dev/null
+++ b/base/guid.h
@@ -0,0 +1,45 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_GUID_H_
+#define BASE_GUID_H_
+
+#include <stdint.h>
+
+#include <string>
+
+#include "base/base_export.h"
+#include "base/strings/string_piece.h"
+#include "build/build_config.h"
+
+namespace base {
+
+// Generate a 128-bit random GUID in the form of version 4 as described in
+// RFC 4122, section 4.4.
+// The format of GUID version 4 must be xxxxxxxx-xxxx-4xxx-yxxx-xxxxxxxxxxxx,
+// where y is one of [8, 9, A, B].
+// The hexadecimal values "a" through "f" are output as lower case characters.
+//
+// A cryptographically secure random source will be used, but consider using
+// UnguessableToken for greater type-safety if GUID format is unnecessary.
+BASE_EXPORT std::string GenerateGUID();
+
+// Returns true if the input string conforms to the version 4 GUID format.
+// Note that this does NOT check if the hexadecimal values "a" through "f"
+// are in lower case characters, as Version 4 RFC says onput they're
+// case insensitive. (Use IsValidGUIDOutputString for checking if the
+// given string is valid output string)
+BASE_EXPORT bool IsValidGUID(const base::StringPiece& guid);
+
+// Returns true if the input string is valid version 4 GUID output string.
+// This also checks if the hexadecimal values "a" through "f" are in lower
+// case characters.
+BASE_EXPORT bool IsValidGUIDOutputString(const base::StringPiece& guid);
+
+// For unit testing purposes only.  Do not use outside of tests.
+BASE_EXPORT std::string RandomDataToGUIDString(const uint64_t bytes[2]);
+
+}  // namespace base
+
+#endif  // BASE_GUID_H_
diff --git a/base/guid_unittest.cc b/base/guid_unittest.cc
new file mode 100644
index 0000000..70dad67
--- /dev/null
+++ b/base/guid_unittest.cc
@@ -0,0 +1,65 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/guid.h"
+
+#include <stdint.h>
+
+#include <limits>
+
+#include "base/strings/string_util.h"
+#include "build/build_config.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+
+namespace {
+
+bool IsGUIDv4(const std::string& guid) {
+  // The format of GUID version 4 must be xxxxxxxx-xxxx-4xxx-yxxx-xxxxxxxxxxxx,
+  // where y is one of [8, 9, A, B].
+  return IsValidGUID(guid) && guid[14] == '4' &&
+         (guid[19] == '8' || guid[19] == '9' || guid[19] == 'A' ||
+          guid[19] == 'a' || guid[19] == 'B' || guid[19] == 'b');
+}
+
+}  // namespace
+
+TEST(GUIDTest, GUIDGeneratesAllZeroes) {
+  uint64_t bytes[] = {0, 0};
+  std::string clientid = RandomDataToGUIDString(bytes);
+  EXPECT_EQ("00000000-0000-0000-0000-000000000000", clientid);
+}
+
+TEST(GUIDTest, GUIDGeneratesCorrectly) {
+  uint64_t bytes[] = {0x0123456789ABCDEFULL, 0xFEDCBA9876543210ULL};
+  std::string clientid = RandomDataToGUIDString(bytes);
+  EXPECT_EQ("01234567-89ab-cdef-fedc-ba9876543210", clientid);
+}
+
+TEST(GUIDTest, GUIDCorrectlyFormatted) {
+  const int kIterations = 10;
+  for (int it = 0; it < kIterations; ++it) {
+    std::string guid = GenerateGUID();
+    EXPECT_TRUE(IsValidGUID(guid));
+    EXPECT_TRUE(IsValidGUIDOutputString(guid));
+    EXPECT_TRUE(IsValidGUID(ToLowerASCII(guid)));
+    EXPECT_TRUE(IsValidGUID(ToUpperASCII(guid)));
+  }
+}
+
+TEST(GUIDTest, GUIDBasicUniqueness) {
+  const int kIterations = 10;
+  for (int it = 0; it < kIterations; ++it) {
+    std::string guid1 = GenerateGUID();
+    std::string guid2 = GenerateGUID();
+    EXPECT_EQ(36U, guid1.length());
+    EXPECT_EQ(36U, guid2.length());
+    EXPECT_NE(guid1, guid2);
+    EXPECT_TRUE(IsGUIDv4(guid1));
+    EXPECT_TRUE(IsGUIDv4(guid2));
+  }
+}
+
+}  // namespace base
diff --git a/base/hash.cc b/base/hash.cc
new file mode 100644
index 0000000..ab5cebc
--- /dev/null
+++ b/base/hash.cc
@@ -0,0 +1,104 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/hash.h"
+
+// Definition in base/third_party/superfasthash/superfasthash.c. (Third-party
+// code did not come with its own header file, so declaring the function here.)
+// Note: This algorithm is also in Blink under Source/wtf/StringHasher.h.
+extern "C" uint32_t SuperFastHash(const char* data, int len);
+
+namespace base {
+
+uint32_t Hash(const void* data, size_t length) {
+  // Currently our in-memory hash is the same as the persistent hash. The
+  // split between in-memory and persistent hash functions is maintained to
+  // allow the in-memory hash function to be updated in the future.
+  return PersistentHash(data, length);
+}
+
+uint32_t Hash(const std::string& str) {
+  return PersistentHash(str.data(), str.size());
+}
+
+uint32_t Hash(const string16& str) {
+  return PersistentHash(str.data(), str.size() * sizeof(char16));
+}
+
+uint32_t PersistentHash(const void* data, size_t length) {
+  // This hash function must not change, since it is designed to be persistable
+  // to disk.
+  if (length > static_cast<size_t>(std::numeric_limits<int>::max())) {
+    NOTREACHED();
+    return 0;
+  }
+  return ::SuperFastHash(reinterpret_cast<const char*>(data),
+                         static_cast<int>(length));
+}
+
+uint32_t PersistentHash(const std::string& str) {
+  return PersistentHash(str.data(), str.size());
+}
+
+// Implement hashing for pairs of at-most 32 bit integer values.
+// When size_t is 32 bits, we turn the 64-bit hash code into 32 bits by using
+// multiply-add hashing. This algorithm, as described in
+// Theorem 4.3.3 of the thesis "Über die Komplexität der Multiplikation in
+// eingeschränkten Branchingprogrammmodellen" by Woelfel, is:
+//
+//   h32(x32, y32) = (h64(x32, y32) * rand_odd64 + rand16 * 2^16) % 2^64 / 2^32
+//
+// Contact danakj@chromium.org for any questions.
+size_t HashInts32(uint32_t value1, uint32_t value2) {
+  uint64_t value1_64 = value1;
+  uint64_t hash64 = (value1_64 << 32) | value2;
+
+  if (sizeof(size_t) >= sizeof(uint64_t))
+    return static_cast<size_t>(hash64);
+
+  uint64_t odd_random = 481046412LL << 32 | 1025306955LL;
+  uint32_t shift_random = 10121U << 16;
+
+  hash64 = hash64 * odd_random + shift_random;
+  size_t high_bits =
+      static_cast<size_t>(hash64 >> (8 * (sizeof(uint64_t) - sizeof(size_t))));
+  return high_bits;
+}
+
+// Implement hashing for pairs of up-to 64-bit integer values.
+// We use the compound integer hash method to produce a 64-bit hash code, by
+// breaking the two 64-bit inputs into 4 32-bit values:
+// http://opendatastructures.org/versions/edition-0.1d/ods-java/node33.html#SECTION00832000000000000000
+// Then we reduce our result to 32 bits if required, similar to above.
+size_t HashInts64(uint64_t value1, uint64_t value2) {
+  uint32_t short_random1 = 842304669U;
+  uint32_t short_random2 = 619063811U;
+  uint32_t short_random3 = 937041849U;
+  uint32_t short_random4 = 3309708029U;
+
+  uint32_t value1a = static_cast<uint32_t>(value1 & 0xffffffff);
+  uint32_t value1b = static_cast<uint32_t>((value1 >> 32) & 0xffffffff);
+  uint32_t value2a = static_cast<uint32_t>(value2 & 0xffffffff);
+  uint32_t value2b = static_cast<uint32_t>((value2 >> 32) & 0xffffffff);
+
+  uint64_t product1 = static_cast<uint64_t>(value1a) * short_random1;
+  uint64_t product2 = static_cast<uint64_t>(value1b) * short_random2;
+  uint64_t product3 = static_cast<uint64_t>(value2a) * short_random3;
+  uint64_t product4 = static_cast<uint64_t>(value2b) * short_random4;
+
+  uint64_t hash64 = product1 + product2 + product3 + product4;
+
+  if (sizeof(size_t) >= sizeof(uint64_t))
+    return static_cast<size_t>(hash64);
+
+  uint64_t odd_random = 1578233944LL << 32 | 194370989LL;
+  uint32_t shift_random = 20591U << 16;
+
+  hash64 = hash64 * odd_random + shift_random;
+  size_t high_bits =
+      static_cast<size_t>(hash64 >> (8 * (sizeof(uint64_t) - sizeof(size_t))));
+  return high_bits;
+}
+
+}  // namespace base
diff --git a/base/hash.h b/base/hash.h
new file mode 100644
index 0000000..165899e
--- /dev/null
+++ b/base/hash.h
@@ -0,0 +1,69 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_HASH_H_
+#define BASE_HASH_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <limits>
+#include <string>
+#include <utility>
+
+#include "base/base_export.h"
+#include "base/logging.h"
+#include "base/strings/string16.h"
+
+namespace base {
+
+// Computes a hash of a memory buffer. This hash function is subject to change
+// in the future, so use only for temporary in-memory structures. If you need
+// to persist a change on disk or between computers, use PersistentHash().
+//
+// WARNING: This hash function should not be used for any cryptographic purpose.
+BASE_EXPORT uint32_t Hash(const void* data, size_t length);
+BASE_EXPORT uint32_t Hash(const std::string& str);
+BASE_EXPORT uint32_t Hash(const string16& str);
+
+// Computes a hash of a memory buffer. This hash function must not change so
+// that code can use the hashed values for persistent storage purposes or
+// sending across the network. If a new persistent hash function is desired, a
+// new version will have to be added in addition.
+//
+// WARNING: This hash function should not be used for any cryptographic purpose.
+BASE_EXPORT uint32_t PersistentHash(const void* data, size_t length);
+BASE_EXPORT uint32_t PersistentHash(const std::string& str);
+
+// Hash pairs of 32-bit or 64-bit numbers.
+BASE_EXPORT size_t HashInts32(uint32_t value1, uint32_t value2);
+BASE_EXPORT size_t HashInts64(uint64_t value1, uint64_t value2);
+
+template <typename T1, typename T2>
+inline size_t HashInts(T1 value1, T2 value2) {
+  // This condition is expected to be compile-time evaluated and optimised away
+  // in release builds.
+  if (sizeof(T1) > sizeof(uint32_t) || (sizeof(T2) > sizeof(uint32_t)))
+    return HashInts64(value1, value2);
+
+  return HashInts32(value1, value2);
+}
+
+// A templated hasher for pairs of integer types. Example:
+//
+//   using MyPair = std::pair<int32_t, int32_t>;
+//   std::unordered_set<MyPair, base::IntPairHash<MyPair>> set;
+template <typename T>
+struct IntPairHash;
+
+template <typename Type1, typename Type2>
+struct IntPairHash<std::pair<Type1, Type2>> {
+  size_t operator()(std::pair<Type1, Type2> value) const {
+    return HashInts(value.first, value.second);
+  }
+};
+
+}  // namespace base
+
+#endif  // BASE_HASH_H_
diff --git a/base/hash_unittest.cc b/base/hash_unittest.cc
new file mode 100644
index 0000000..fc8a751
--- /dev/null
+++ b/base/hash_unittest.cc
@@ -0,0 +1,82 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/hash.h"
+
+#include <string>
+#include <vector>
+
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+
+TEST(HashTest, String) {
+  std::string str;
+  // Empty string (should hash to 0).
+  str = "";
+  EXPECT_EQ(0u, Hash(str));
+
+  // Simple test.
+  str = "hello world";
+  EXPECT_EQ(2794219650u, Hash(str));
+
+  // Change one bit.
+  str = "helmo world";
+  EXPECT_EQ(1006697176u, Hash(str));
+
+  // Insert a null byte.
+  str = "hello  world";
+  str[5] = '\0';
+  EXPECT_EQ(2319902537u, Hash(str));
+
+  // Test that the bytes after the null contribute to the hash.
+  str = "hello  worle";
+  str[5] = '\0';
+  EXPECT_EQ(553904462u, Hash(str));
+
+  // Extremely long string.
+  // Also tests strings with high bit set, and null byte.
+  std::vector<char> long_string_buffer;
+  for (int i = 0; i < 4096; ++i)
+    long_string_buffer.push_back((i % 256) - 128);
+  str.assign(&long_string_buffer.front(), long_string_buffer.size());
+  EXPECT_EQ(2797962408u, Hash(str));
+
+  // All possible lengths (mod 4). Tests separate code paths. Also test with
+  // final byte high bit set (regression test for http://crbug.com/90659).
+  // Note that the 1 and 3 cases have a weird bug where the final byte is
+  // treated as a signed char. It was decided on the above bug discussion to
+  // enshrine that behaviour as "correct" to avoid invalidating existing hashes.
+
+  // Length mod 4 == 0.
+  str = "hello w\xab";
+  EXPECT_EQ(615571198u, Hash(str));
+  // Length mod 4 == 1.
+  str = "hello wo\xab";
+  EXPECT_EQ(623474296u, Hash(str));
+  // Length mod 4 == 2.
+  str = "hello wor\xab";
+  EXPECT_EQ(4278562408u, Hash(str));
+  // Length mod 4 == 3.
+  str = "hello worl\xab";
+  EXPECT_EQ(3224633008u, Hash(str));
+}
+
+TEST(HashTest, CString) {
+  const char* str;
+  // Empty string (should hash to 0).
+  str = "";
+  EXPECT_EQ(0u, Hash(str, strlen(str)));
+
+  // Simple test.
+  str = "hello world";
+  EXPECT_EQ(2794219650u, Hash(str, strlen(str)));
+
+  // Ensure that it stops reading after the given length, and does not expect a
+  // null byte.
+  str = "hello world; don't read this part";
+  EXPECT_EQ(2794219650u, Hash(str, strlen("hello world")));
+}
+
+}  // namespace base
diff --git a/base/i18n/OWNERS b/base/i18n/OWNERS
new file mode 100644
index 0000000..d717b8d
--- /dev/null
+++ b/base/i18n/OWNERS
@@ -0,0 +1 @@
+jshin@chromium.org
diff --git a/base/i18n/base_i18n_export.h b/base/i18n/base_i18n_export.h
new file mode 100644
index 0000000..e8a2add
--- /dev/null
+++ b/base/i18n/base_i18n_export.h
@@ -0,0 +1,29 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_I18N_BASE_I18N_EXPORT_H_
+#define BASE_I18N_BASE_I18N_EXPORT_H_
+
+#if defined(COMPONENT_BUILD)
+#if defined(WIN32)
+
+#if defined(BASE_I18N_IMPLEMENTATION)
+#define BASE_I18N_EXPORT __declspec(dllexport)
+#else
+#define BASE_I18N_EXPORT __declspec(dllimport)
+#endif  // defined(BASE_I18N_IMPLEMENTATION)
+
+#else  // defined(WIN32)
+#if defined(BASE_I18N_IMPLEMENTATION)
+#define BASE_I18N_EXPORT __attribute__((visibility("default")))
+#else
+#define BASE_I18N_EXPORT
+#endif
+#endif
+
+#else  // defined(COMPONENT_BUILD)
+#define BASE_I18N_EXPORT
+#endif
+
+#endif  // BASE_I18N_BASE_I18N_EXPORT_H_
diff --git a/base/i18n/base_i18n_switches.cc b/base/i18n/base_i18n_switches.cc
new file mode 100644
index 0000000..103d665
--- /dev/null
+++ b/base/i18n/base_i18n_switches.cc
@@ -0,0 +1,21 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/i18n/base_i18n_switches.h"
+
+namespace switches {
+
+// Force the UI to a specific direction. Valid values are "ltr" (left-to-right)
+// and "rtl" (right-to-left).
+const char kForceUIDirection[]   = "force-ui-direction";
+
+// Force the text rendering to a specific direction. Valid values are "ltr"
+// (left-to-right) and "rtl" (right-to-left). Only tested meaningfully with
+// RTL.
+const char kForceTextDirection[] = "force-text-direction";
+
+const char kForceDirectionLTR[]  = "ltr";
+const char kForceDirectionRTL[]  = "rtl";
+
+}  // namespace switches
diff --git a/base/i18n/base_i18n_switches.h b/base/i18n/base_i18n_switches.h
new file mode 100644
index 0000000..d1ba690
--- /dev/null
+++ b/base/i18n/base_i18n_switches.h
@@ -0,0 +1,21 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_I18N_BASE_I18N_SWITCHES_H_
+#define BASE_I18N_BASE_I18N_SWITCHES_H_
+
+#include "base/i18n/base_i18n_export.h"
+
+namespace switches {
+
+BASE_I18N_EXPORT extern const char kForceUIDirection[];
+BASE_I18N_EXPORT extern const char kForceTextDirection[];
+
+// kForce*Direction choices for the switches above.
+BASE_I18N_EXPORT extern const char kForceDirectionLTR[];
+BASE_I18N_EXPORT extern const char kForceDirectionRTL[];
+
+}  // namespace switches
+
+#endif  // BASE_I18N_BASE_I18N_SWITCHES_H_
diff --git a/base/i18n/bidi_line_iterator.cc b/base/i18n/bidi_line_iterator.cc
new file mode 100644
index 0000000..3f7f868
--- /dev/null
+++ b/base/i18n/bidi_line_iterator.cc
@@ -0,0 +1,119 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/i18n/bidi_line_iterator.h"
+
+#include "base/logging.h"
+
+namespace base {
+namespace i18n {
+
+namespace {
+
+UBiDiLevel GetParagraphLevelForDirection(TextDirection direction) {
+  switch (direction) {
+    case UNKNOWN_DIRECTION:
+      return UBIDI_DEFAULT_LTR;
+      break;
+    case RIGHT_TO_LEFT:
+      return 1;  // Highest RTL level.
+      break;
+    case LEFT_TO_RIGHT:
+      return 0;  // Highest LTR level.
+      break;
+    default:
+      NOTREACHED();
+      return 0;
+  }
+}
+
+// Overrides the default bidi class for a given character, for the custom
+// "AS_URL" behavior. Returns U_BIDI_CLASS_DEFAULT to defer to the default ICU
+// behavior.
+//
+// Matches the C callback interface of ICU's UBiDiClassCallback type (which is
+// why there is an unused argument).
+UCharDirection GetURLBiDiClassCallback(const void* /*unused*/, UChar32 c) {
+  // Note: Use a switch statement instead of strchr() to avoid iterating over a
+  // string for each character (the switch allows for much better compiler
+  // optimization).
+  switch (c) {
+    // The set of characters that delimit URL components (separating the scheme,
+    // username, password, domain labels, host, path segments, query
+    // names/values and fragment).
+    case '#':
+    case '&':
+    case '.':
+    case '/':
+    case ':':
+    case '=':
+    case '?':
+    case '@':
+      // Treat all of these characters as strong LTR, which effectively
+      // surrounds all of the text components of a URL (e.g., the domain labels
+      // and path segments) in a left-to-right embedding. This ensures that the
+      // URL components read from left to right, regardless of any RTL
+      // characters. (Within each component, RTL sequences are rendered from
+      // right to left as expected.)
+      return U_LEFT_TO_RIGHT;
+    default:
+      return U_BIDI_CLASS_DEFAULT;
+  }
+}
+
+}  // namespace
+
+BiDiLineIterator::BiDiLineIterator() : bidi_(nullptr) {}
+
+BiDiLineIterator::~BiDiLineIterator() {
+  if (bidi_) {
+    ubidi_close(bidi_);
+    bidi_ = nullptr;
+  }
+}
+
+bool BiDiLineIterator::Open(const string16& text,
+                            TextDirection direction,
+                            CustomBehavior behavior) {
+  DCHECK(!bidi_);
+  UErrorCode error = U_ZERO_ERROR;
+  bidi_ = ubidi_openSized(static_cast<int>(text.length()), 0, &error);
+  if (U_FAILURE(error))
+    return false;
+
+  if (behavior == CustomBehavior::AS_URL) {
+    ubidi_setClassCallback(bidi_, GetURLBiDiClassCallback, nullptr, nullptr,
+                           nullptr, &error);
+    if (U_FAILURE(error))
+      return false;
+  }
+
+  ubidi_setPara(bidi_, text.data(), static_cast<int>(text.length()),
+                GetParagraphLevelForDirection(direction), nullptr, &error);
+  return (U_SUCCESS(error));
+}
+
+int BiDiLineIterator::CountRuns() const {
+  DCHECK(bidi_ != nullptr);
+  UErrorCode error = U_ZERO_ERROR;
+  const int runs = ubidi_countRuns(bidi_, &error);
+  return U_SUCCESS(error) ? runs : 0;
+}
+
+UBiDiDirection BiDiLineIterator::GetVisualRun(int index,
+                                              int* start,
+                                              int* length) const {
+  DCHECK(bidi_ != nullptr);
+  return ubidi_getVisualRun(bidi_, index, start, length);
+}
+
+void BiDiLineIterator::GetLogicalRun(int start,
+                                     int* end,
+                                     UBiDiLevel* level) const {
+  DCHECK(bidi_ != nullptr);
+  ubidi_getLogicalRun(bidi_, start, end, level);
+}
+
+}  // namespace i18n
+}  // namespace base
diff --git a/base/i18n/bidi_line_iterator.h b/base/i18n/bidi_line_iterator.h
new file mode 100644
index 0000000..d840f61
--- /dev/null
+++ b/base/i18n/bidi_line_iterator.h
@@ -0,0 +1,60 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_I18N_BIDI_LINE_ITERATOR_H_
+#define BASE_I18N_BIDI_LINE_ITERATOR_H_
+
+#include "base/i18n/base_i18n_export.h"
+#include "base/i18n/rtl.h"
+#include "base/macros.h"
+#include "base/strings/string16.h"
+#include "third_party/icu/source/common/unicode/ubidi.h"
+#include "third_party/icu/source/common/unicode/uchar.h"
+
+namespace base {
+namespace i18n {
+
+// A simple wrapper class for the bidirectional iterator of ICU.
+// This class uses the bidirectional iterator of ICU to split a line of
+// bidirectional texts into visual runs in its display order.
+class BASE_I18N_EXPORT BiDiLineIterator {
+ public:
+  // Specifies some alternative iteration behavior.
+  enum class CustomBehavior {
+    // No special behavior.
+    NONE,
+    // Treat URL delimiter characters as strong LTR. This is a special treatment
+    // for URLs that purposefully violates the URL Standard, as an experiment.
+    // It should only be used behind a flag.
+    AS_URL
+  };
+
+  BiDiLineIterator();
+  ~BiDiLineIterator();
+
+  // Initializes the bidirectional iterator with the specified text.  Returns
+  // whether initialization succeeded.
+  bool Open(const string16& text,
+            TextDirection direction,
+            CustomBehavior behavior);
+
+  // Returns the number of visual runs in the text, or zero on error.
+  int CountRuns() const;
+
+  // Gets the logical offset, length, and direction of the specified visual run.
+  UBiDiDirection GetVisualRun(int index, int* start, int* length) const;
+
+  // Given a start position, figure out where the run ends (and the BiDiLevel).
+  void GetLogicalRun(int start, int* end, UBiDiLevel* level) const;
+
+ private:
+  UBiDi* bidi_;
+
+  DISALLOW_COPY_AND_ASSIGN(BiDiLineIterator);
+};
+
+}  // namespace i18n
+}  // namespace base
+
+#endif  // BASE_I18N_BIDI_LINE_ITERATOR_H_
diff --git a/base/i18n/bidi_line_iterator_unittest.cc b/base/i18n/bidi_line_iterator_unittest.cc
new file mode 100644
index 0000000..d531313
--- /dev/null
+++ b/base/i18n/bidi_line_iterator_unittest.cc
@@ -0,0 +1,209 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/i18n/bidi_line_iterator.h"
+
+#include "base/macros.h"
+#include "base/strings/utf_string_conversions.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+namespace i18n {
+namespace {
+
+class BiDiLineIteratorTest : public testing::TestWithParam<TextDirection> {
+ public:
+  BiDiLineIteratorTest() = default;
+
+  BiDiLineIterator* iterator() { return &iterator_; }
+
+ private:
+  BiDiLineIterator iterator_;
+
+  DISALLOW_COPY_AND_ASSIGN(BiDiLineIteratorTest);
+};
+
+TEST_P(BiDiLineIteratorTest, OnlyLTR) {
+  iterator()->Open(UTF8ToUTF16("abc 😁 测试"), GetParam(),
+                   BiDiLineIterator::CustomBehavior::NONE);
+  ASSERT_EQ(1, iterator()->CountRuns());
+
+  int start, length;
+  EXPECT_EQ(UBIDI_LTR, iterator()->GetVisualRun(0, &start, &length));
+  EXPECT_EQ(0, start);
+  EXPECT_EQ(9, length);
+
+  int end;
+  UBiDiLevel level;
+  iterator()->GetLogicalRun(0, &end, &level);
+  EXPECT_EQ(9, end);
+  if (GetParam() == TextDirection::RIGHT_TO_LEFT)
+    EXPECT_EQ(2, level);
+  else
+    EXPECT_EQ(0, level);
+}
+
+TEST_P(BiDiLineIteratorTest, OnlyRTL) {
+  iterator()->Open(UTF8ToUTF16("מה השעה"), GetParam(),
+                   BiDiLineIterator::CustomBehavior::NONE);
+  ASSERT_EQ(1, iterator()->CountRuns());
+
+  int start, length;
+  EXPECT_EQ(UBIDI_RTL, iterator()->GetVisualRun(0, &start, &length));
+  EXPECT_EQ(0, start);
+  EXPECT_EQ(7, length);
+
+  int end;
+  UBiDiLevel level;
+  iterator()->GetLogicalRun(0, &end, &level);
+  EXPECT_EQ(7, end);
+  EXPECT_EQ(1, level);
+}
+
+TEST_P(BiDiLineIteratorTest, Mixed) {
+  iterator()->Open(UTF8ToUTF16("אני משתמש ב- Chrome כדפדפן האינטרנט שלי"),
+                   GetParam(), BiDiLineIterator::CustomBehavior::NONE);
+  ASSERT_EQ(3, iterator()->CountRuns());
+
+  // We'll get completely different results depending on the top-level paragraph
+  // direction.
+  if (GetParam() == TextDirection::RIGHT_TO_LEFT) {
+    // If para direction is RTL, expect the LTR substring "Chrome" to be nested
+    // within the surrounding RTL text.
+    int start, length;
+    EXPECT_EQ(UBIDI_RTL, iterator()->GetVisualRun(0, &start, &length));
+    EXPECT_EQ(19, start);
+    EXPECT_EQ(20, length);
+    EXPECT_EQ(UBIDI_LTR, iterator()->GetVisualRun(1, &start, &length));
+    EXPECT_EQ(13, start);
+    EXPECT_EQ(6, length);
+    EXPECT_EQ(UBIDI_RTL, iterator()->GetVisualRun(2, &start, &length));
+    EXPECT_EQ(0, start);
+    EXPECT_EQ(13, length);
+
+    int end;
+    UBiDiLevel level;
+    iterator()->GetLogicalRun(0, &end, &level);
+    EXPECT_EQ(13, end);
+    EXPECT_EQ(1, level);
+    iterator()->GetLogicalRun(13, &end, &level);
+    EXPECT_EQ(19, end);
+    EXPECT_EQ(2, level);
+    iterator()->GetLogicalRun(19, &end, &level);
+    EXPECT_EQ(39, end);
+    EXPECT_EQ(1, level);
+  } else {
+    // If the para direction is LTR, expect the LTR substring "- Chrome " to be
+    // at the top level, with two nested RTL runs on either side.
+    int start, length;
+    EXPECT_EQ(UBIDI_RTL, iterator()->GetVisualRun(0, &start, &length));
+    EXPECT_EQ(0, start);
+    EXPECT_EQ(11, length);
+    EXPECT_EQ(UBIDI_LTR, iterator()->GetVisualRun(1, &start, &length));
+    EXPECT_EQ(11, start);
+    EXPECT_EQ(9, length);
+    EXPECT_EQ(UBIDI_RTL, iterator()->GetVisualRun(2, &start, &length));
+    EXPECT_EQ(20, start);
+    EXPECT_EQ(19, length);
+
+    int end;
+    UBiDiLevel level;
+    iterator()->GetLogicalRun(0, &end, &level);
+    EXPECT_EQ(11, end);
+    EXPECT_EQ(1, level);
+    iterator()->GetLogicalRun(11, &end, &level);
+    EXPECT_EQ(20, end);
+    EXPECT_EQ(0, level);
+    iterator()->GetLogicalRun(20, &end, &level);
+    EXPECT_EQ(39, end);
+    EXPECT_EQ(1, level);
+  }
+}
+
+TEST_P(BiDiLineIteratorTest, RTLPunctuationNoCustomBehavior) {
+  // This string features Hebrew characters interleaved with ASCII punctuation.
+  iterator()->Open(UTF8ToUTF16("א!ב\"ג#ד$ה%ו&ז'ח(ט)י*ך+כ,ל-ם.מ/"
+                               "ן:נ;ס<ע=ף>פ?ץ@צ[ק\\ר]ש^ת_א`ב{ג|ד}ה~ו"),
+                   GetParam(), BiDiLineIterator::CustomBehavior::NONE);
+
+  // Expect a single RTL run.
+  ASSERT_EQ(1, iterator()->CountRuns());
+
+  int start, length;
+  EXPECT_EQ(UBIDI_RTL, iterator()->GetVisualRun(0, &start, &length));
+  EXPECT_EQ(0, start);
+  EXPECT_EQ(65, length);
+
+  int end;
+  UBiDiLevel level;
+  iterator()->GetLogicalRun(0, &end, &level);
+  EXPECT_EQ(65, end);
+  EXPECT_EQ(1, level);
+}
+
+TEST_P(BiDiLineIteratorTest, RTLPunctuationAsURL) {
+  // This string features Hebrew characters interleaved with ASCII punctuation.
+  iterator()->Open(UTF8ToUTF16("א!ב\"ג#ד$ה%ו&ז'ח(ט)י*ך+כ,ל-ם.מ/"
+                               "ן:נ;ס<ע=ף>פ?ץ@צ[ק\\ר]ש^ת_א`ב{ג|ד}ה~ו"),
+                   GetParam(), BiDiLineIterator::CustomBehavior::AS_URL);
+
+  const int kStringSize = 65;
+
+  // Expect a primary RTL run, broken up by each of the 8 punctuation marks that
+  // are considered strong LTR (17 runs total).
+  struct {
+    int start;
+    UBiDiDirection dir;
+  } expected_runs[] = {
+      {0, UBIDI_RTL},  {5, UBIDI_LTR},   // '#'
+      {6, UBIDI_RTL},  {11, UBIDI_LTR},  // '&'
+      {12, UBIDI_RTL}, {27, UBIDI_LTR},  // '.'
+      {28, UBIDI_RTL}, {29, UBIDI_LTR},  // '/'
+      {30, UBIDI_RTL}, {31, UBIDI_LTR},  // ':'
+      {32, UBIDI_RTL}, {37, UBIDI_LTR},  // '='
+      {38, UBIDI_RTL}, {41, UBIDI_LTR},  // '?'
+      {42, UBIDI_RTL}, {43, UBIDI_LTR},  // '@'
+      {44, UBIDI_RTL},
+  };
+
+  ASSERT_EQ(arraysize(expected_runs),
+            static_cast<size_t>(iterator()->CountRuns()));
+
+  for (size_t i = 0; i < arraysize(expected_runs); ++i) {
+    const auto& expected_run = expected_runs[i];
+    int expected_run_end = i >= arraysize(expected_runs) - 1
+                               ? kStringSize
+                               : expected_runs[i + 1].start;
+
+    size_t visual_index = GetParam() == TextDirection::RIGHT_TO_LEFT
+                              ? arraysize(expected_runs) - 1 - i
+                              : i;
+    int start, length;
+    EXPECT_EQ(expected_run.dir,
+              iterator()->GetVisualRun(visual_index, &start, &length))
+        << "(i = " << i << ")";
+    EXPECT_EQ(expected_run.start, start) << "(i = " << i << ")";
+    EXPECT_EQ(expected_run_end - expected_run.start, length)
+        << "(i = " << i << ")";
+
+    int expected_level =
+        expected_run.dir == UBIDI_RTL
+            ? 1
+            : (GetParam() == TextDirection::RIGHT_TO_LEFT ? 2 : 0);
+    int end;
+    UBiDiLevel level;
+    iterator()->GetLogicalRun(expected_run.start, &end, &level);
+    EXPECT_EQ(expected_run_end, end) << "(i = " << i << ")";
+    EXPECT_EQ(expected_level, level) << "(i = " << i << ")";
+  }
+}
+
+INSTANTIATE_TEST_CASE_P(,
+                        BiDiLineIteratorTest,
+                        ::testing::Values(TextDirection::LEFT_TO_RIGHT,
+                                          TextDirection::RIGHT_TO_LEFT));
+
+}  // namespace
+}  // namespace i18n
+}  // namespace base
diff --git a/base/i18n/break_iterator.cc b/base/i18n/break_iterator.cc
new file mode 100644
index 0000000..251cd00
--- /dev/null
+++ b/base/i18n/break_iterator.cc
@@ -0,0 +1,191 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/i18n/break_iterator.h"
+
+#include <stdint.h>
+
+#include "base/logging.h"
+#include "third_party/icu/source/common/unicode/ubrk.h"
+#include "third_party/icu/source/common/unicode/uchar.h"
+#include "third_party/icu/source/common/unicode/ustring.h"
+
+namespace base {
+namespace i18n {
+
+const size_t npos = static_cast<size_t>(-1);
+
+BreakIterator::BreakIterator(const StringPiece16& str, BreakType break_type)
+    : iter_(nullptr),
+      string_(str),
+      break_type_(break_type),
+      prev_(npos),
+      pos_(0) {}
+
+BreakIterator::BreakIterator(const StringPiece16& str, const string16& rules)
+    : iter_(nullptr),
+      string_(str),
+      rules_(rules),
+      break_type_(RULE_BASED),
+      prev_(npos),
+      pos_(0) {}
+
+BreakIterator::~BreakIterator() {
+  if (iter_)
+    ubrk_close(static_cast<UBreakIterator*>(iter_));
+}
+
+bool BreakIterator::Init() {
+  UErrorCode status = U_ZERO_ERROR;
+  UParseError parse_error;
+  UBreakIteratorType break_type;
+  switch (break_type_) {
+    case BREAK_CHARACTER:
+      break_type = UBRK_CHARACTER;
+      break;
+    case BREAK_WORD:
+      break_type = UBRK_WORD;
+      break;
+    case BREAK_LINE:
+    case BREAK_NEWLINE:
+    case RULE_BASED: // (Keep compiler happy, break_type not used in this case)
+      break_type = UBRK_LINE;
+      break;
+    default:
+      NOTREACHED() << "invalid break_type_";
+      return false;
+  }
+  if (break_type_ == RULE_BASED) {
+    iter_ = ubrk_openRules(rules_.c_str(),
+                           static_cast<int32_t>(rules_.length()),
+                           string_.data(),
+                           static_cast<int32_t>(string_.size()),
+                           &parse_error,
+                           &status);
+    if (U_FAILURE(status)) {
+      NOTREACHED() << "ubrk_openRules failed to parse rule string at line "
+          << parse_error.line << ", offset " << parse_error.offset;
+    }
+  } else {
+    iter_ = ubrk_open(break_type, nullptr, string_.data(),
+                      static_cast<int32_t>(string_.size()), &status);
+    if (U_FAILURE(status)) {
+      NOTREACHED() << "ubrk_open failed for type " << break_type
+          << " with error " << status;
+    }
+  }
+
+  if (U_FAILURE(status)) {
+    return false;
+  }
+
+  // Move the iterator to the beginning of the string.
+  ubrk_first(static_cast<UBreakIterator*>(iter_));
+  return true;
+}
+
+bool BreakIterator::Advance() {
+  int32_t pos;
+  int32_t status;
+  prev_ = pos_;
+  switch (break_type_) {
+    case BREAK_CHARACTER:
+    case BREAK_WORD:
+    case BREAK_LINE:
+    case RULE_BASED:
+      pos = ubrk_next(static_cast<UBreakIterator*>(iter_));
+      if (pos == UBRK_DONE) {
+        pos_ = npos;
+        return false;
+      }
+      pos_ = static_cast<size_t>(pos);
+      return true;
+    case BREAK_NEWLINE:
+      do {
+        pos = ubrk_next(static_cast<UBreakIterator*>(iter_));
+        if (pos == UBRK_DONE)
+          break;
+        pos_ = static_cast<size_t>(pos);
+        status = ubrk_getRuleStatus(static_cast<UBreakIterator*>(iter_));
+      } while (status >= UBRK_LINE_SOFT && status < UBRK_LINE_SOFT_LIMIT);
+      if (pos == UBRK_DONE && prev_ == pos_) {
+        pos_ = npos;
+        return false;
+      }
+      return true;
+    default:
+      NOTREACHED() << "invalid break_type_";
+      return false;
+  }
+}
+
+bool BreakIterator::SetText(const base::char16* text, const size_t length) {
+  UErrorCode status = U_ZERO_ERROR;
+  ubrk_setText(static_cast<UBreakIterator*>(iter_),
+               text, length, &status);
+  pos_ = 0;  // implicit when ubrk_setText is done
+  prev_ = npos;
+  if (U_FAILURE(status)) {
+    NOTREACHED() << "ubrk_setText failed";
+    return false;
+  }
+  string_ = StringPiece16(text, length);
+  return true;
+}
+
+bool BreakIterator::IsWord() const {
+  return GetWordBreakStatus() == IS_WORD_BREAK;
+}
+
+BreakIterator::WordBreakStatus BreakIterator::GetWordBreakStatus() const {
+  int32_t status = ubrk_getRuleStatus(static_cast<UBreakIterator*>(iter_));
+  if (break_type_ != BREAK_WORD && break_type_ != RULE_BASED)
+    return IS_LINE_OR_CHAR_BREAK;
+  // In ICU 60, trying to advance past the end of the text does not change
+  // |status| so that |pos_| has to be checked as well as |status|.
+  // See http://bugs.icu-project.org/trac/ticket/13447 .
+  return (status == UBRK_WORD_NONE || pos_ == npos) ? IS_SKIPPABLE_WORD
+                                                    : IS_WORD_BREAK;
+}
+
+bool BreakIterator::IsEndOfWord(size_t position) const {
+  if (break_type_ != BREAK_WORD && break_type_ != RULE_BASED)
+    return false;
+
+  UBreakIterator* iter = static_cast<UBreakIterator*>(iter_);
+  UBool boundary = ubrk_isBoundary(iter, static_cast<int32_t>(position));
+  int32_t status = ubrk_getRuleStatus(iter);
+  return (!!boundary && status != UBRK_WORD_NONE);
+}
+
+bool BreakIterator::IsStartOfWord(size_t position) const {
+  if (break_type_ != BREAK_WORD && break_type_ != RULE_BASED)
+    return false;
+
+  UBreakIterator* iter = static_cast<UBreakIterator*>(iter_);
+  UBool boundary = ubrk_isBoundary(iter, static_cast<int32_t>(position));
+  ubrk_next(iter);
+  int32_t next_status = ubrk_getRuleStatus(iter);
+  return (!!boundary && next_status != UBRK_WORD_NONE);
+}
+
+bool BreakIterator::IsGraphemeBoundary(size_t position) const {
+  if (break_type_ != BREAK_CHARACTER)
+    return false;
+
+  UBreakIterator* iter = static_cast<UBreakIterator*>(iter_);
+  return !!ubrk_isBoundary(iter, static_cast<int32_t>(position));
+}
+
+string16 BreakIterator::GetString() const {
+  return GetStringPiece().as_string();
+}
+
+StringPiece16 BreakIterator::GetStringPiece() const {
+  DCHECK(prev_ != npos && pos_ != npos);
+  return string_.substr(prev_, pos_ - prev_);
+}
+
+}  // namespace i18n
+}  // namespace base
diff --git a/base/i18n/break_iterator.h b/base/i18n/break_iterator.h
new file mode 100644
index 0000000..dc30b64
--- /dev/null
+++ b/base/i18n/break_iterator.h
@@ -0,0 +1,182 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_I18N_BREAK_ITERATOR_H_
+#define BASE_I18N_BREAK_ITERATOR_H_
+
+#include <stddef.h>
+
+#include "base/i18n/base_i18n_export.h"
+#include "base/macros.h"
+#include "base/strings/string16.h"
+#include "base/strings/string_piece.h"
+
+// The BreakIterator class iterates through the words, word breaks, and
+// line breaks in a UTF-16 string.
+//
+// It provides several modes, BREAK_WORD, BREAK_LINE, and BREAK_NEWLINE,
+// which modify how characters are aggregated into the returned string.
+//
+// Under BREAK_WORD mode, once a word is encountered any non-word
+// characters are not included in the returned string (e.g. in the
+// UTF-16 equivalent of the string " foo bar! ", the word breaks are at
+// the periods in ". .foo. .bar.!. .").
+// Note that Chinese/Japanese/Thai do not use spaces between words so that
+// boundaries can fall in the middle of a continuous run of non-space /
+// non-punctuation characters.
+//
+// Under BREAK_LINE mode, once a line breaking opportunity is encountered,
+// any non-word  characters are included in the returned string, breaking
+// only when a space-equivalent character or a line breaking opportunity
+// is encountered (e.g. in the UTF16-equivalent of the string " foo bar! ",
+// the breaks are at the periods in ". .foo .bar! .").
+//
+// Note that lines can be broken at any character/syllable/grapheme cluster
+// boundary in Chinese/Japanese/Korean and at word boundaries in Thai
+// (Thai does not use spaces between words). Therefore, this is NOT the same
+// as breaking only at space-equivalent characters where its former
+// name (BREAK_SPACE) implied.
+//
+// Under BREAK_NEWLINE mode, all characters are included in the returned
+// string, breaking only when a newline-equivalent character is encountered
+// (eg. in the UTF-16 equivalent of the string "foo\nbar!\n\n", the line
+// breaks are at the periods in ".foo\n.bar\n.\n.").
+//
+// To extract the words from a string, move a BREAK_WORD BreakIterator
+// through the string and test whether IsWord() is true. E.g.,
+//   BreakIterator iter(str, BreakIterator::BREAK_WORD);
+//   if (!iter.Init())
+//     return false;
+//   while (iter.Advance()) {
+//     if (iter.IsWord()) {
+//       // Region [iter.prev(), iter.pos()) contains a word.
+//       VLOG(1) << "word: " << iter.GetString();
+//     }
+//   }
+
+namespace base {
+namespace i18n {
+
+class BASE_I18N_EXPORT BreakIterator {
+ public:
+  enum BreakType {
+    BREAK_WORD,
+    BREAK_LINE,
+    // TODO(jshin): Remove this after reviewing call sites.
+    // If call sites really need break only on space-like characters
+    // implement it separately.
+    BREAK_SPACE = BREAK_LINE,
+    BREAK_NEWLINE,
+    BREAK_CHARACTER,
+    // But don't remove this one!
+    RULE_BASED,
+  };
+
+  enum WordBreakStatus {
+    // The end of text that the iterator recognizes as word characters.
+    // Non-word characters are things like punctuation and spaces.
+    IS_WORD_BREAK,
+    // Characters that the iterator can skip past, such as punctuation,
+    // whitespace, and, if using RULE_BASED mode, characters from another
+    // character set.
+    IS_SKIPPABLE_WORD,
+    // Only used if not in BREAK_WORD or RULE_BASED mode. This is returned for
+    // newlines, line breaks, and character breaks.
+    IS_LINE_OR_CHAR_BREAK
+  };
+
+  // Requires |str| to live as long as the BreakIterator does.
+  BreakIterator(const StringPiece16& str, BreakType break_type);
+  // Make a rule-based iterator. BreakType == RULE_BASED is implied.
+  // TODO(andrewhayden): This signature could easily be misinterpreted as
+  // "(const string16& str, const string16& locale)". We should do something
+  // better.
+  BreakIterator(const StringPiece16& str, const string16& rules);
+  ~BreakIterator();
+
+  // Init() must be called before any of the iterators are valid.
+  // Returns false if ICU failed to initialize.
+  bool Init();
+
+  // Advance to the next break.  Returns false if we've run past the end of
+  // the string.  (Note that the very last "break" is after the final
+  // character in the string, and when we advance to that position it's the
+  // last time Advance() returns true.)
+  bool Advance();
+
+  // Updates the text used by the iterator, resetting the iterator as if
+  // if Init() had been called again. Any old state is lost. Returns true
+  // unless there is an error setting the text.
+  bool SetText(const base::char16* text, const size_t length);
+
+  // Under BREAK_WORD mode, returns true if the break we just hit is the
+  // end of a word. (Otherwise, the break iterator just skipped over e.g.
+  // whitespace or punctuation.)  Under BREAK_LINE and BREAK_NEWLINE modes,
+  // this distinction doesn't apply and it always returns false.
+  bool IsWord() const;
+
+  // Under BREAK_WORD mode:
+  //  - Returns IS_SKIPPABLE_WORD if non-word characters, such as punctuation or
+  //    spaces, are found.
+  //  - Returns IS_WORD_BREAK if the break we just hit is the end of a sequence
+  //    of word characters.
+  // Under RULE_BASED mode:
+  //  - Returns IS_SKIPPABLE_WORD if characters outside the rules' character set
+  //    or non-word characters, such as punctuation or spaces, are found.
+  //  - Returns IS_WORD_BREAK if the break we just hit is the end of a sequence
+  //    of word characters that are in the rules' character set.
+  // Not under BREAK_WORD or RULE_BASED mode:
+  //  - Returns IS_LINE_OR_CHAR_BREAK.
+  BreakIterator::WordBreakStatus GetWordBreakStatus() const;
+
+  // Under BREAK_WORD mode, returns true if |position| is at the end of word or
+  // at the start of word. It always returns false under BREAK_LINE and
+  // BREAK_NEWLINE modes.
+  bool IsEndOfWord(size_t position) const;
+  bool IsStartOfWord(size_t position) const;
+
+  // Under BREAK_CHARACTER mode, returns whether |position| is a Unicode
+  // grapheme boundary.
+  bool IsGraphemeBoundary(size_t position) const;
+
+  // Returns the string between prev() and pos().
+  // Advance() must have been called successfully at least once for pos() to
+  // have advanced to somewhere useful.
+  string16 GetString() const;
+
+  StringPiece16 GetStringPiece() const;
+
+  // Returns the value of pos() returned before Advance() was last called.
+  size_t prev() const { return prev_; }
+
+  // Returns the current break position within the string,
+  // or BreakIterator::npos when done.
+  size_t pos() const { return pos_; }
+
+ private:
+  // ICU iterator, avoiding ICU ubrk.h dependence.
+  // This is actually an ICU UBreakiterator* type, which turns out to be
+  // a typedef for a void* in the ICU headers. Using void* directly prevents
+  // callers from needing access to the ICU public headers directory.
+  void* iter_;
+
+  // The string we're iterating over. Can be changed with SetText(...)
+  StringPiece16 string_;
+
+  // Rules for our iterator. Mutually exclusive with break_type_.
+  const string16 rules_;
+
+  // The breaking style (word/space/newline). Mutually exclusive with rules_
+  BreakType break_type_;
+
+  // Previous and current iterator positions.
+  size_t prev_, pos_;
+
+  DISALLOW_COPY_AND_ASSIGN(BreakIterator);
+};
+
+}  // namespace i18n
+}  // namespace base
+
+#endif  // BASE_I18N_BREAK_ITERATOR_H_
diff --git a/base/i18n/break_iterator_unittest.cc b/base/i18n/break_iterator_unittest.cc
new file mode 100644
index 0000000..6137e02
--- /dev/null
+++ b/base/i18n/break_iterator_unittest.cc
@@ -0,0 +1,461 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/i18n/break_iterator.h"
+
+#include <stddef.h>
+
+#include "base/macros.h"
+#include "base/strings/string_piece.h"
+#include "base/strings/stringprintf.h"
+#include "base/strings/utf_string_conversions.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+namespace i18n {
+
+TEST(BreakIteratorTest, BreakWordEmpty) {
+  string16 empty;
+  BreakIterator iter(empty, BreakIterator::BREAK_WORD);
+  ASSERT_TRUE(iter.Init());
+  EXPECT_FALSE(iter.Advance());
+  EXPECT_FALSE(iter.IsWord());
+  EXPECT_FALSE(iter.Advance());  // Test unexpected advance after end.
+  EXPECT_FALSE(iter.IsWord());
+}
+
+TEST(BreakIteratorTest, BreakWord) {
+  string16 space(UTF8ToUTF16(" "));
+  string16 str(UTF8ToUTF16(" foo bar! \npouet boom"));
+  BreakIterator iter(str, BreakIterator::BREAK_WORD);
+  ASSERT_TRUE(iter.Init());
+  EXPECT_TRUE(iter.Advance());
+  EXPECT_FALSE(iter.IsWord());
+  EXPECT_EQ(space, iter.GetString());
+  EXPECT_TRUE(iter.Advance());
+  EXPECT_TRUE(iter.IsWord());
+  EXPECT_EQ(UTF8ToUTF16("foo"), iter.GetString());
+  EXPECT_TRUE(iter.Advance());
+  EXPECT_FALSE(iter.IsWord());
+  EXPECT_EQ(space, iter.GetString());
+  EXPECT_TRUE(iter.Advance());
+  EXPECT_TRUE(iter.IsWord());
+  EXPECT_EQ(UTF8ToUTF16("bar"), iter.GetString());
+  EXPECT_TRUE(iter.Advance());
+  EXPECT_FALSE(iter.IsWord());
+  EXPECT_EQ(UTF8ToUTF16("!"), iter.GetString());
+  EXPECT_TRUE(iter.Advance());
+  EXPECT_FALSE(iter.IsWord());
+  EXPECT_EQ(space, iter.GetString());
+  EXPECT_TRUE(iter.Advance());
+  EXPECT_FALSE(iter.IsWord());
+  EXPECT_EQ(UTF8ToUTF16("\n"), iter.GetString());
+  EXPECT_TRUE(iter.Advance());
+  EXPECT_TRUE(iter.IsWord());
+  EXPECT_EQ(UTF8ToUTF16("pouet"), iter.GetString());
+  EXPECT_TRUE(iter.Advance());
+  EXPECT_FALSE(iter.IsWord());
+  EXPECT_EQ(space, iter.GetString());
+  EXPECT_TRUE(iter.Advance());
+  EXPECT_TRUE(iter.IsWord());
+  EXPECT_EQ(UTF8ToUTF16("boom"), iter.GetString());
+  EXPECT_FALSE(iter.Advance());
+  EXPECT_FALSE(iter.IsWord());
+  EXPECT_FALSE(iter.Advance());  // Test unexpected advance after end.
+  EXPECT_FALSE(iter.IsWord());
+}
+
+TEST(BreakIteratorTest, BreakWide16) {
+  // Two greek words separated by space.
+  const string16 str(WideToUTF16(
+      L"\x03a0\x03b1\x03b3\x03ba\x03cc\x03c3\x03bc\x03b9"
+      L"\x03bf\x03c2\x0020\x0399\x03c3\x03c4\x03cc\x03c2"));
+  const string16 word1(str.substr(0, 10));
+  const string16 word2(str.substr(11, 5));
+  BreakIterator iter(str, BreakIterator::BREAK_WORD);
+  ASSERT_TRUE(iter.Init());
+  EXPECT_TRUE(iter.Advance());
+  EXPECT_TRUE(iter.IsWord());
+  EXPECT_EQ(word1, iter.GetString());
+  EXPECT_TRUE(iter.Advance());
+  EXPECT_FALSE(iter.IsWord());
+  EXPECT_EQ(UTF8ToUTF16(" "), iter.GetString());
+  EXPECT_TRUE(iter.Advance());
+  EXPECT_TRUE(iter.IsWord());
+  EXPECT_EQ(word2, iter.GetString());
+  EXPECT_FALSE(iter.Advance());
+  EXPECT_FALSE(iter.IsWord());
+  EXPECT_FALSE(iter.Advance());  // Test unexpected advance after end.
+  EXPECT_FALSE(iter.IsWord());
+}
+
+TEST(BreakIteratorTest, BreakWide32) {
+  // U+1D49C MATHEMATICAL SCRIPT CAPITAL A
+  const char very_wide_char[] = "\xF0\x9D\x92\x9C";
+  const string16 str(
+      UTF8ToUTF16(base::StringPrintf("%s a", very_wide_char)));
+  const string16 very_wide_word(str.substr(0, 2));
+
+  BreakIterator iter(str, BreakIterator::BREAK_WORD);
+  ASSERT_TRUE(iter.Init());
+  EXPECT_TRUE(iter.Advance());
+  EXPECT_TRUE(iter.IsWord());
+  EXPECT_EQ(very_wide_word, iter.GetString());
+  EXPECT_TRUE(iter.Advance());
+  EXPECT_FALSE(iter.IsWord());
+  EXPECT_EQ(UTF8ToUTF16(" "), iter.GetString());
+  EXPECT_TRUE(iter.Advance());
+  EXPECT_TRUE(iter.IsWord());
+  EXPECT_EQ(UTF8ToUTF16("a"), iter.GetString());
+  EXPECT_FALSE(iter.Advance());
+  EXPECT_FALSE(iter.IsWord());
+  EXPECT_FALSE(iter.Advance());  // Test unexpected advance after end.
+  EXPECT_FALSE(iter.IsWord());
+}
+
+TEST(BreakIteratorTest, BreakSpaceEmpty) {
+  string16 empty;
+  BreakIterator iter(empty, BreakIterator::BREAK_SPACE);
+  ASSERT_TRUE(iter.Init());
+  EXPECT_FALSE(iter.Advance());
+  EXPECT_FALSE(iter.IsWord());
+  EXPECT_FALSE(iter.Advance());  // Test unexpected advance after end.
+  EXPECT_FALSE(iter.IsWord());
+}
+
+TEST(BreakIteratorTest, BreakSpace) {
+  string16 str(UTF8ToUTF16(" foo bar! \npouet boom"));
+  BreakIterator iter(str, BreakIterator::BREAK_SPACE);
+  ASSERT_TRUE(iter.Init());
+  EXPECT_TRUE(iter.Advance());
+  EXPECT_FALSE(iter.IsWord());
+  EXPECT_EQ(UTF8ToUTF16(" "), iter.GetString());
+  EXPECT_TRUE(iter.Advance());
+  EXPECT_FALSE(iter.IsWord());
+  EXPECT_EQ(UTF8ToUTF16("foo "), iter.GetString());
+  EXPECT_TRUE(iter.Advance());
+  EXPECT_FALSE(iter.IsWord());
+  EXPECT_EQ(UTF8ToUTF16("bar! \n"), iter.GetString());
+  EXPECT_TRUE(iter.Advance());
+  EXPECT_FALSE(iter.IsWord());
+  EXPECT_EQ(UTF8ToUTF16("pouet "), iter.GetString());
+  EXPECT_TRUE(iter.Advance());
+  EXPECT_FALSE(iter.IsWord());
+  EXPECT_EQ(UTF8ToUTF16("boom"), iter.GetString());
+  EXPECT_FALSE(iter.Advance());
+  EXPECT_FALSE(iter.IsWord());
+  EXPECT_FALSE(iter.Advance());  // Test unexpected advance after end.
+  EXPECT_FALSE(iter.IsWord());
+}
+
+TEST(BreakIteratorTest, BreakSpaceSP) {
+  string16 str(UTF8ToUTF16(" foo bar! \npouet boom "));
+  BreakIterator iter(str, BreakIterator::BREAK_SPACE);
+  ASSERT_TRUE(iter.Init());
+  EXPECT_TRUE(iter.Advance());
+  EXPECT_FALSE(iter.IsWord());
+  EXPECT_EQ(UTF8ToUTF16(" "), iter.GetString());
+  EXPECT_TRUE(iter.Advance());
+  EXPECT_FALSE(iter.IsWord());
+  EXPECT_EQ(UTF8ToUTF16("foo "), iter.GetString());
+  EXPECT_TRUE(iter.Advance());
+  EXPECT_FALSE(iter.IsWord());
+  EXPECT_EQ(UTF8ToUTF16("bar! \n"), iter.GetString());
+  EXPECT_TRUE(iter.Advance());
+  EXPECT_FALSE(iter.IsWord());
+  EXPECT_EQ(UTF8ToUTF16("pouet "), iter.GetString());
+  EXPECT_TRUE(iter.Advance());
+  EXPECT_FALSE(iter.IsWord());
+  EXPECT_EQ(UTF8ToUTF16("boom "), iter.GetString());
+  EXPECT_FALSE(iter.Advance());
+  EXPECT_FALSE(iter.IsWord());
+  EXPECT_FALSE(iter.Advance());  // Test unexpected advance after end.
+  EXPECT_FALSE(iter.IsWord());
+}
+
+TEST(BreakIteratorTest, BreakSpacekWide16) {
+  // Two Greek words.
+  const string16 str(WideToUTF16(
+      L"\x03a0\x03b1\x03b3\x03ba\x03cc\x03c3\x03bc\x03b9"
+      L"\x03bf\x03c2\x0020\x0399\x03c3\x03c4\x03cc\x03c2"));
+  const string16 word1(str.substr(0, 11));
+  const string16 word2(str.substr(11, 5));
+  BreakIterator iter(str, BreakIterator::BREAK_SPACE);
+  ASSERT_TRUE(iter.Init());
+  EXPECT_TRUE(iter.Advance());
+  EXPECT_FALSE(iter.IsWord());
+  EXPECT_EQ(word1, iter.GetString());
+  EXPECT_TRUE(iter.Advance());
+  EXPECT_FALSE(iter.IsWord());
+  EXPECT_EQ(word2, iter.GetString());
+  EXPECT_FALSE(iter.Advance());
+  EXPECT_FALSE(iter.IsWord());
+  EXPECT_FALSE(iter.Advance());  // Test unexpected advance after end.
+  EXPECT_FALSE(iter.IsWord());
+}
+
+TEST(BreakIteratorTest, BreakSpaceWide32) {
+  // U+1D49C MATHEMATICAL SCRIPT CAPITAL A
+  const char very_wide_char[] = "\xF0\x9D\x92\x9C";
+  const string16 str(
+      UTF8ToUTF16(base::StringPrintf("%s a", very_wide_char)));
+  const string16 very_wide_word(str.substr(0, 3));
+
+  BreakIterator iter(str, BreakIterator::BREAK_SPACE);
+  ASSERT_TRUE(iter.Init());
+  EXPECT_TRUE(iter.Advance());
+  EXPECT_FALSE(iter.IsWord());
+  EXPECT_EQ(very_wide_word, iter.GetString());
+  EXPECT_TRUE(iter.Advance());
+  EXPECT_FALSE(iter.IsWord());
+  EXPECT_EQ(UTF8ToUTF16("a"), iter.GetString());
+  EXPECT_FALSE(iter.Advance());
+  EXPECT_FALSE(iter.IsWord());
+  EXPECT_FALSE(iter.Advance());  // Test unexpected advance after end.
+  EXPECT_FALSE(iter.IsWord());
+}
+
+TEST(BreakIteratorTest, BreakLineEmpty) {
+  string16 empty;
+  BreakIterator iter(empty, BreakIterator::BREAK_NEWLINE);
+  ASSERT_TRUE(iter.Init());
+  EXPECT_FALSE(iter.Advance());
+  EXPECT_FALSE(iter.IsWord());
+  EXPECT_FALSE(iter.Advance());   // Test unexpected advance after end.
+  EXPECT_FALSE(iter.IsWord());
+}
+
+TEST(BreakIteratorTest, BreakLine) {
+  string16 nl(UTF8ToUTF16("\n"));
+  string16 str(UTF8ToUTF16("\nfoo bar!\n\npouet boom"));
+  BreakIterator iter(str, BreakIterator::BREAK_NEWLINE);
+  ASSERT_TRUE(iter.Init());
+  EXPECT_TRUE(iter.Advance());
+  EXPECT_FALSE(iter.IsWord());
+  EXPECT_EQ(nl, iter.GetString());
+  EXPECT_TRUE(iter.Advance());
+  EXPECT_FALSE(iter.IsWord());
+  EXPECT_EQ(UTF8ToUTF16("foo bar!\n"), iter.GetString());
+  EXPECT_TRUE(iter.Advance());
+  EXPECT_FALSE(iter.IsWord());
+  EXPECT_EQ(nl, iter.GetString());
+  EXPECT_TRUE(iter.Advance());
+  EXPECT_FALSE(iter.IsWord());
+  EXPECT_EQ(UTF8ToUTF16("pouet boom"), iter.GetString());
+  EXPECT_FALSE(iter.Advance());
+  EXPECT_FALSE(iter.IsWord());
+  EXPECT_FALSE(iter.Advance());   // Test unexpected advance after end.
+  EXPECT_FALSE(iter.IsWord());
+}
+
+TEST(BreakIteratorTest, BreakLineNL) {
+  string16 nl(UTF8ToUTF16("\n"));
+  string16 str(UTF8ToUTF16("\nfoo bar!\n\npouet boom\n"));
+  BreakIterator iter(str, BreakIterator::BREAK_NEWLINE);
+  ASSERT_TRUE(iter.Init());
+  EXPECT_TRUE(iter.Advance());
+  EXPECT_FALSE(iter.IsWord());
+  EXPECT_EQ(nl, iter.GetString());
+  EXPECT_TRUE(iter.Advance());
+  EXPECT_FALSE(iter.IsWord());
+  EXPECT_EQ(UTF8ToUTF16("foo bar!\n"), iter.GetString());
+  EXPECT_TRUE(iter.Advance());
+  EXPECT_FALSE(iter.IsWord());
+  EXPECT_EQ(nl, iter.GetString());
+  EXPECT_TRUE(iter.Advance());
+  EXPECT_FALSE(iter.IsWord());
+  EXPECT_EQ(UTF8ToUTF16("pouet boom\n"), iter.GetString());
+  EXPECT_FALSE(iter.Advance());
+  EXPECT_FALSE(iter.IsWord());
+  EXPECT_FALSE(iter.Advance());   // Test unexpected advance after end.
+  EXPECT_FALSE(iter.IsWord());
+}
+
+TEST(BreakIteratorTest, BreakLineWide16) {
+  // Two Greek words separated by newline.
+  const string16 str(WideToUTF16(
+      L"\x03a0\x03b1\x03b3\x03ba\x03cc\x03c3\x03bc\x03b9"
+      L"\x03bf\x03c2\x000a\x0399\x03c3\x03c4\x03cc\x03c2"));
+  const string16 line1(str.substr(0, 11));
+  const string16 line2(str.substr(11, 5));
+  BreakIterator iter(str, BreakIterator::BREAK_NEWLINE);
+  ASSERT_TRUE(iter.Init());
+  EXPECT_TRUE(iter.Advance());
+  EXPECT_FALSE(iter.IsWord());
+  EXPECT_EQ(line1, iter.GetString());
+  EXPECT_TRUE(iter.Advance());
+  EXPECT_FALSE(iter.IsWord());
+  EXPECT_EQ(line2, iter.GetString());
+  EXPECT_FALSE(iter.Advance());
+  EXPECT_FALSE(iter.IsWord());
+  EXPECT_FALSE(iter.Advance());   // Test unexpected advance after end.
+  EXPECT_FALSE(iter.IsWord());
+}
+
+TEST(BreakIteratorTest, BreakLineWide32) {
+  // U+1D49C MATHEMATICAL SCRIPT CAPITAL A
+  const char very_wide_char[] = "\xF0\x9D\x92\x9C";
+  const string16 str(
+      UTF8ToUTF16(base::StringPrintf("%s\na", very_wide_char)));
+  const string16 very_wide_line(str.substr(0, 3));
+  BreakIterator iter(str, BreakIterator::BREAK_NEWLINE);
+  ASSERT_TRUE(iter.Init());
+  EXPECT_TRUE(iter.Advance());
+  EXPECT_FALSE(iter.IsWord());
+  EXPECT_EQ(very_wide_line, iter.GetString());
+  EXPECT_TRUE(iter.Advance());
+  EXPECT_FALSE(iter.IsWord());
+  EXPECT_EQ(UTF8ToUTF16("a"), iter.GetString());
+  EXPECT_FALSE(iter.Advance());
+  EXPECT_FALSE(iter.IsWord());
+  EXPECT_FALSE(iter.Advance());   // Test unexpected advance after end.
+  EXPECT_FALSE(iter.IsWord());
+}
+
+TEST(BreakIteratorTest, BreakCharacter) {
+  static const wchar_t* kCharacters[] = {
+    // An English word consisting of four ASCII characters.
+    L"w", L"o", L"r", L"d", L" ",
+    // A Hindi word (which means "Hindi") consisting of three Devanagari
+    // characters.
+    L"\x0939\x093F", L"\x0928\x094D", L"\x0926\x0940", L" ",
+    // A Thai word (which means "feel") consisting of three Thai characters.
+    L"\x0E23\x0E39\x0E49", L"\x0E2A\x0E36", L"\x0E01", L" ",
+  };
+  std::vector<string16> characters;
+  string16 text;
+  for (size_t i = 0; i < arraysize(kCharacters); ++i) {
+    characters.push_back(WideToUTF16(kCharacters[i]));
+    text.append(characters.back());
+  }
+  BreakIterator iter(text, BreakIterator::BREAK_CHARACTER);
+  ASSERT_TRUE(iter.Init());
+  for (size_t i = 0; i < arraysize(kCharacters); ++i) {
+    EXPECT_TRUE(iter.Advance());
+    EXPECT_EQ(characters[i], iter.GetString());
+  }
+}
+
+// Test for https://code.google.com/p/chromium/issues/detail?id=411213
+// We should be able to get valid substrings with GetString() function
+// after setting new content by calling SetText().
+TEST(BreakIteratorTest, GetStringAfterSetText) {
+  const string16 initial_string(ASCIIToUTF16("str"));
+  BreakIterator iter(initial_string, BreakIterator::BREAK_WORD);
+  ASSERT_TRUE(iter.Init());
+
+  const string16 long_string(ASCIIToUTF16("another,string"));
+  EXPECT_TRUE(iter.SetText(long_string.c_str(), long_string.size()));
+  EXPECT_TRUE(iter.Advance());
+  EXPECT_TRUE(iter.Advance());  // Advance to ',' in |long_string|
+
+  // Check that the current position is out of bounds of the |initial_string|.
+  EXPECT_LT(initial_string.size(), iter.pos());
+
+  // Check that we can get a valid substring of |long_string|.
+  EXPECT_EQ(ASCIIToUTF16(","), iter.GetString());
+}
+
+TEST(BreakIteratorTest, GetStringPiece) {
+  const string16 initial_string(ASCIIToUTF16("some string"));
+  BreakIterator iter(initial_string, BreakIterator::BREAK_WORD);
+  ASSERT_TRUE(iter.Init());
+
+  EXPECT_TRUE(iter.Advance());
+  EXPECT_EQ(iter.GetString(), iter.GetStringPiece().as_string());
+  EXPECT_EQ(StringPiece16(ASCIIToUTF16("some")), iter.GetStringPiece());
+
+  EXPECT_TRUE(iter.Advance());
+  EXPECT_TRUE(iter.Advance());
+  EXPECT_EQ(iter.GetString(), iter.GetStringPiece().as_string());
+  EXPECT_EQ(StringPiece16(ASCIIToUTF16("string")), iter.GetStringPiece());
+}
+
+// Make sure that when not in RULE_BASED or BREAK_WORD mode we're getting
+// IS_LINE_OR_CHAR_BREAK.
+TEST(BreakIteratorTest, GetWordBreakStatusBreakLine) {
+  // A string containing the English word "foo", followed by two Khmer
+  // characters, the English word "Can", and then two Russian characters and
+  // punctuation.
+  base::string16 text(
+      base::WideToUTF16(L"foo \x1791\x17C1 \nCan \x041C\x0438..."));
+  BreakIterator iter(text, BreakIterator::BREAK_LINE);
+  ASSERT_TRUE(iter.Init());
+
+  EXPECT_TRUE(iter.Advance());
+  // Finds "foo" and the space.
+  EXPECT_EQ(base::UTF8ToUTF16("foo "), iter.GetString());
+  EXPECT_EQ(iter.GetWordBreakStatus(), BreakIterator::IS_LINE_OR_CHAR_BREAK);
+  EXPECT_TRUE(iter.Advance());
+  // Finds the Khmer characters, the next space, and the newline.
+  EXPECT_EQ(base::WideToUTF16(L"\x1791\x17C1 \n"), iter.GetString());
+  EXPECT_EQ(iter.GetWordBreakStatus(), BreakIterator::IS_LINE_OR_CHAR_BREAK);
+  EXPECT_TRUE(iter.Advance());
+  // Finds "Can" and the space.
+  EXPECT_EQ(base::UTF8ToUTF16("Can "), iter.GetString());
+  EXPECT_EQ(iter.GetWordBreakStatus(), BreakIterator::IS_LINE_OR_CHAR_BREAK);
+  EXPECT_TRUE(iter.Advance());
+  // Finds the Russian characters and periods.
+  EXPECT_EQ(base::WideToUTF16(L"\x041C\x0438..."), iter.GetString());
+  EXPECT_EQ(iter.GetWordBreakStatus(), BreakIterator::IS_LINE_OR_CHAR_BREAK);
+  EXPECT_FALSE(iter.Advance());
+}
+
+// Make sure that in BREAK_WORD mode we're getting IS_WORD_BREAK and
+// IS_SKIPPABLE_WORD when we should be. IS_WORD_BREAK should be returned when we
+// finish going over non-punctuation characters while IS_SKIPPABLE_WORD should
+// be returned on punctuation and spaces.
+TEST(BreakIteratorTest, GetWordBreakStatusBreakWord) {
+  // A string containing the English word "foo", followed by two Khmer
+  // characters, the English word "Can", and then two Russian characters and
+  // punctuation.
+  base::string16 text(
+      base::WideToUTF16(L"foo \x1791\x17C1 \nCan \x041C\x0438..."));
+  BreakIterator iter(text, BreakIterator::BREAK_WORD);
+  ASSERT_TRUE(iter.Init());
+
+  EXPECT_TRUE(iter.Advance());
+  // Finds "foo".
+  EXPECT_EQ(base::UTF8ToUTF16("foo"), iter.GetString());
+  EXPECT_EQ(iter.GetWordBreakStatus(), BreakIterator::IS_WORD_BREAK);
+  EXPECT_TRUE(iter.Advance());
+  // Finds the space, and the Khmer characters.
+  EXPECT_EQ(base::UTF8ToUTF16(" "), iter.GetString());
+  EXPECT_EQ(iter.GetWordBreakStatus(), BreakIterator::IS_SKIPPABLE_WORD);
+  EXPECT_TRUE(iter.Advance());
+  EXPECT_EQ(base::WideToUTF16(L"\x1791\x17C1"), iter.GetString());
+  EXPECT_EQ(iter.GetWordBreakStatus(), BreakIterator::IS_WORD_BREAK);
+  EXPECT_TRUE(iter.Advance());
+  // Finds the space and the newline.
+  EXPECT_EQ(base::UTF8ToUTF16(" "), iter.GetString());
+  EXPECT_EQ(iter.GetWordBreakStatus(), BreakIterator::IS_SKIPPABLE_WORD);
+  EXPECT_TRUE(iter.Advance());
+  EXPECT_EQ(base::UTF8ToUTF16("\n"), iter.GetString());
+  EXPECT_EQ(iter.GetWordBreakStatus(), BreakIterator::IS_SKIPPABLE_WORD);
+  EXPECT_TRUE(iter.Advance());
+  // Finds "Can".
+  EXPECT_EQ(base::UTF8ToUTF16("Can"), iter.GetString());
+  EXPECT_EQ(iter.GetWordBreakStatus(), BreakIterator::IS_WORD_BREAK);
+  EXPECT_TRUE(iter.Advance());
+  // Finds the space and the Russian characters.
+  EXPECT_EQ(base::UTF8ToUTF16(" "), iter.GetString());
+  EXPECT_EQ(iter.GetWordBreakStatus(), BreakIterator::IS_SKIPPABLE_WORD);
+  EXPECT_TRUE(iter.Advance());
+  EXPECT_EQ(base::WideToUTF16(L"\x041C\x0438"), iter.GetString());
+  EXPECT_EQ(iter.GetWordBreakStatus(), BreakIterator::IS_WORD_BREAK);
+  EXPECT_TRUE(iter.Advance());
+  // Finds the trailing periods.
+  EXPECT_EQ(base::UTF8ToUTF16("."), iter.GetString());
+  EXPECT_EQ(iter.GetWordBreakStatus(), BreakIterator::IS_SKIPPABLE_WORD);
+  EXPECT_TRUE(iter.Advance());
+  EXPECT_EQ(base::UTF8ToUTF16("."), iter.GetString());
+  EXPECT_EQ(iter.GetWordBreakStatus(), BreakIterator::IS_SKIPPABLE_WORD);
+  EXPECT_TRUE(iter.Advance());
+  EXPECT_EQ(base::UTF8ToUTF16("."), iter.GetString());
+  EXPECT_EQ(iter.GetWordBreakStatus(), BreakIterator::IS_SKIPPABLE_WORD);
+  EXPECT_FALSE(iter.Advance());
+}
+
+}  // namespace i18n
+}  // namespace base
diff --git a/base/i18n/build_utf8_validator_tables.cc b/base/i18n/build_utf8_validator_tables.cc
new file mode 100644
index 0000000..0cdcc35
--- /dev/null
+++ b/base/i18n/build_utf8_validator_tables.cc
@@ -0,0 +1,470 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Create a state machine for validating UTF-8. The algorithm in brief:
+// 1. Convert the complete unicode range of code points, except for the
+//    surrogate code points, to an ordered array of sequences of bytes in
+//    UTF-8.
+// 2. Convert individual bytes to ranges, starting from the right of each byte
+//    sequence. For each range, ensure the bytes on the left and the ranges
+//    on the right are the identical.
+// 3. Convert the resulting list of ranges into a state machine, collapsing
+//    identical states.
+// 4. Convert the state machine to an array of bytes.
+// 5. Output as a C++ file.
+//
+// To use:
+//  $ ninja -C out/Release build_utf8_validator_tables
+//  $ out/Release/build_utf8_validator_tables
+//                                   --output=base/i18n/utf8_validator_tables.cc
+//  $ git add base/i18n/utf8_validator_tables.cc
+//
+// Because the table is not expected to ever change, it is checked into the
+// repository rather than being regenerated at build time.
+//
+// This code uses type uint8_t throughout to represent bytes, to avoid
+// signed/unsigned char confusion.
+
+#include <stddef.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include <algorithm>
+#include <map>
+#include <string>
+#include <vector>
+
+#include "base/command_line.h"
+#include "base/files/file_path.h"
+#include "base/files/file_util.h"
+#include "base/logging.h"
+#include "base/macros.h"
+#include "base/numerics/safe_conversions.h"
+#include "base/strings/stringprintf.h"
+#include "third_party/icu/source/common/unicode/utf8.h"
+
+namespace {
+
+const char kHelpText[] =
+    "Usage: build_utf8_validator_tables [ --help ] [ --output=<file> ]\n";
+
+const char kProlog[] =
+    "// Copyright 2013 The Chromium Authors. All rights reserved.\n"
+    "// Use of this source code is governed by a BSD-style license that can "
+    "be\n"
+    "// found in the LICENSE file.\n"
+    "\n"
+    "// This file is auto-generated by build_utf8_validator_tables.\n"
+    "// DO NOT EDIT.\n"
+    "\n"
+    "#include \"base/i18n/utf8_validator_tables.h\"\n"
+    "\n"
+    "namespace base {\n"
+    "namespace internal {\n"
+    "\n"
+    "const uint8_t kUtf8ValidatorTables[] = {\n";
+
+const char kEpilog[] =
+    "};\n"
+    "\n"
+    "const size_t kUtf8ValidatorTablesSize = arraysize(kUtf8ValidatorTables);\n"
+    "\n"
+    "}  // namespace internal\n"
+    "}  // namespace base\n";
+
+// Ranges are inclusive at both ends--they represent [from, to]
+class Range {
+ public:
+  // Ranges always start with just one byte.
+  explicit Range(uint8_t value) : from_(value), to_(value) {}
+
+  // Range objects are copyable and assignable to be used in STL
+  // containers. Since they only contain non-pointer POD types, the default copy
+  // constructor, assignment operator and destructor will work.
+
+  // Add a byte to the range. We intentionally only support adding a byte at the
+  // end, since that is the only operation the code needs.
+  void AddByte(uint8_t to) {
+    CHECK(to == to_ + 1);
+    to_ = to;
+  }
+
+  uint8_t from() const { return from_; }
+  uint8_t to() const { return to_; }
+
+  bool operator<(const Range& rhs) const {
+    return (from() < rhs.from() || (from() == rhs.from() && to() < rhs.to()));
+  }
+
+  bool operator==(const Range& rhs) const {
+    return from() == rhs.from() && to() == rhs.to();
+  }
+
+ private:
+  uint8_t from_;
+  uint8_t to_;
+};
+
+// A vector of Ranges is like a simple regular expression--it corresponds to
+// a set of strings of the same length that have bytes in each position in
+// the appropriate range.
+typedef std::vector<Range> StringSet;
+
+// A UTF-8 "character" is represented by a sequence of bytes.
+typedef std::vector<uint8_t> Character;
+
+// In the second stage of the algorithm, we want to convert a large list of
+// Characters into a small list of StringSets.
+struct Pair {
+  Character character;
+  StringSet set;
+};
+
+typedef std::vector<Pair> PairVector;
+
+// A class to print a table of numbers in the same style as clang-format.
+class TablePrinter {
+ public:
+  explicit TablePrinter(FILE* stream)
+      : stream_(stream), values_on_this_line_(0), current_offset_(0) {}
+
+  void PrintValue(uint8_t value) {
+    if (values_on_this_line_ == 0) {
+      fputs("   ", stream_);
+    } else if (values_on_this_line_ == kMaxValuesPerLine) {
+      fprintf(stream_, "  // 0x%02x\n   ", current_offset_);
+      values_on_this_line_ = 0;
+    }
+    fprintf(stream_, " 0x%02x,", static_cast<int>(value));
+    ++values_on_this_line_;
+    ++current_offset_;
+  }
+
+  void NewLine() {
+    while (values_on_this_line_ < kMaxValuesPerLine) {
+      fputs("      ", stream_);
+      ++values_on_this_line_;
+    }
+    fprintf(stream_, "  // 0x%02x\n", current_offset_);
+    values_on_this_line_ = 0;
+  }
+
+ private:
+  // stdio stream. Not owned.
+  FILE* stream_;
+
+  // Number of values so far printed on this line.
+  int values_on_this_line_;
+
+  // Total values printed so far.
+  int current_offset_;
+
+  static const int kMaxValuesPerLine = 8;
+
+  DISALLOW_COPY_AND_ASSIGN(TablePrinter);
+};
+
+// Start by filling a PairVector with characters. The resulting vector goes from
+// "\x00" to "\xf4\x8f\xbf\xbf".
+PairVector InitializeCharacters() {
+  PairVector vector;
+  for (int i = 0; i <= 0x10FFFF; ++i) {
+    if (i >= 0xD800 && i < 0xE000) {
+      // Surrogate codepoints are not permitted. Non-character code points are
+      // explicitly permitted.
+      continue;
+    }
+    uint8_t bytes[4];
+    unsigned int offset = 0;
+    UBool is_error = false;
+    U8_APPEND(bytes, offset, arraysize(bytes), i, is_error);
+    DCHECK(!is_error);
+    DCHECK_GT(offset, 0u);
+    DCHECK_LE(offset, arraysize(bytes));
+    Pair pair = {Character(bytes, bytes + offset), StringSet()};
+    vector.push_back(pair);
+  }
+  return vector;
+}
+
+// Construct a new Pair from |character| and the concatenation of |new_range|
+// and |existing_set|, and append it to |pairs|.
+void ConstructPairAndAppend(const Character& character,
+                            const Range& new_range,
+                            const StringSet& existing_set,
+                            PairVector* pairs) {
+  Pair new_pair = {character, StringSet(1, new_range)};
+  new_pair.set.insert(
+      new_pair.set.end(), existing_set.begin(), existing_set.end());
+  pairs->push_back(new_pair);
+}
+
+// Each pass over the PairVector strips one byte off the right-hand-side of the
+// characters and adds a range to the set on the right. For example, the first
+// pass converts the range from "\xe0\xa0\x80" to "\xe0\xa0\xbf" to ("\xe0\xa0",
+// [\x80-\xbf]), then the second pass converts the range from ("\xe0\xa0",
+// [\x80-\xbf]) to ("\xe0\xbf", [\x80-\xbf]) to ("\xe0",
+// [\xa0-\xbf][\x80-\xbf]).
+void MoveRightMostCharToSet(PairVector* pairs) {
+  PairVector new_pairs;
+  PairVector::const_iterator it = pairs->begin();
+  while (it != pairs->end() && it->character.empty()) {
+    new_pairs.push_back(*it);
+    ++it;
+  }
+  CHECK(it != pairs->end());
+  Character unconverted_bytes(it->character.begin(), it->character.end() - 1);
+  Range new_range(it->character.back());
+  StringSet converted = it->set;
+  ++it;
+  while (it != pairs->end()) {
+    const Pair& current_pair = *it++;
+    if (current_pair.character.size() == unconverted_bytes.size() + 1 &&
+        std::equal(unconverted_bytes.begin(),
+                   unconverted_bytes.end(),
+                   current_pair.character.begin()) &&
+        converted == current_pair.set) {
+      // The particular set of UTF-8 codepoints we are validating guarantees
+      // that each byte range will be contiguous. This would not necessarily be
+      // true for an arbitrary set of UTF-8 codepoints.
+      DCHECK_EQ(new_range.to() + 1, current_pair.character.back());
+      new_range.AddByte(current_pair.character.back());
+      continue;
+    }
+    ConstructPairAndAppend(unconverted_bytes, new_range, converted, &new_pairs);
+    unconverted_bytes = Character(current_pair.character.begin(),
+                                  current_pair.character.end() - 1);
+    new_range = Range(current_pair.character.back());
+    converted = current_pair.set;
+  }
+  ConstructPairAndAppend(unconverted_bytes, new_range, converted, &new_pairs);
+  new_pairs.swap(*pairs);
+}
+
+void MoveAllCharsToSets(PairVector* pairs) {
+  // Since each pass of the function moves one character, and UTF-8 sequences
+  // are at most 4 characters long, this simply runs the algorithm four times.
+  for (int i = 0; i < 4; ++i) {
+    MoveRightMostCharToSet(pairs);
+  }
+#if DCHECK_IS_ON()
+  for (PairVector::const_iterator it = pairs->begin(); it != pairs->end();
+       ++it) {
+    DCHECK(it->character.empty());
+  }
+#endif
+}
+
+// Logs the generated string sets in regular-expression style, ie. [\x00-\x7f],
+// [\xc2-\xdf][\x80-\xbf], etc. This can be a useful sanity-check that the
+// algorithm is working. Use the command-line option
+// --vmodule=build_utf8_validator_tables=1 to see this output.
+void LogStringSets(const PairVector& pairs) {
+  for (PairVector::const_iterator pair_it = pairs.begin();
+       pair_it != pairs.end();
+       ++pair_it) {
+    std::string set_as_string;
+    for (StringSet::const_iterator set_it = pair_it->set.begin();
+         set_it != pair_it->set.end();
+         ++set_it) {
+      set_as_string += base::StringPrintf("[\\x%02x-\\x%02x]",
+                                          static_cast<int>(set_it->from()),
+                                          static_cast<int>(set_it->to()));
+    }
+    VLOG(1) << set_as_string;
+  }
+}
+
+// A single state in the state machine is represented by a sorted vector of
+// start bytes and target states. All input bytes in the range between the start
+// byte and the next entry in the vector (or 0xFF) result in a transition to the
+// target state.
+struct StateRange {
+  uint8_t from;
+  uint8_t target_state;
+};
+
+typedef std::vector<StateRange> State;
+
+// Generates a state where all bytes go to state 1 (invalid). This is also used
+// as an initialiser for other states (since bytes from outside the desired
+// range are invalid).
+State GenerateInvalidState() {
+  const StateRange range = {0, 1};
+  return State(1, range);
+}
+
+// A map from a state (ie. a set of strings which will match from this state) to
+// a number (which is an index into the array of states).
+typedef std::map<StringSet, uint8_t> StateMap;
+
+// Create a new state corresponding to |set|, add it |states| and |state_map|
+// and return the index it was given in |states|.
+uint8_t MakeState(const StringSet& set,
+                  std::vector<State>* states,
+                  StateMap* state_map) {
+  DCHECK(!set.empty());
+  const Range& range = set.front();
+  const StringSet rest(set.begin() + 1, set.end());
+  const StateMap::const_iterator where = state_map->find(rest);
+  const uint8_t target_state = where == state_map->end()
+                                   ? MakeState(rest, states, state_map)
+                                   : where->second;
+  DCHECK_LT(0, range.from());
+  DCHECK_LT(range.to(), 0xFF);
+  const StateRange new_state_initializer[] = {
+      {0, 1},
+      {range.from(), target_state},
+      {static_cast<uint8_t>(range.to() + 1), 1}};
+  states->push_back(
+      State(new_state_initializer,
+            new_state_initializer + arraysize(new_state_initializer)));
+  const uint8_t new_state_number =
+      base::checked_cast<uint8_t>(states->size() - 1);
+  CHECK(state_map->insert(std::make_pair(set, new_state_number)).second);
+  return new_state_number;
+}
+
+std::vector<State> GenerateStates(const PairVector& pairs) {
+  // States 0 and 1 are the initial/valid state and invalid state, respectively.
+  std::vector<State> states(2, GenerateInvalidState());
+  StateMap state_map;
+  state_map.insert(std::make_pair(StringSet(), 0));
+  for (PairVector::const_iterator it = pairs.begin(); it != pairs.end(); ++it) {
+    DCHECK(it->character.empty());
+    DCHECK(!it->set.empty());
+    const Range& range = it->set.front();
+    const StringSet rest(it->set.begin() + 1, it->set.end());
+    const StateMap::const_iterator where = state_map.find(rest);
+    const uint8_t target_state = where == state_map.end()
+                                     ? MakeState(rest, &states, &state_map)
+                                     : where->second;
+    if (states[0].back().from == range.from()) {
+      DCHECK_EQ(1, states[0].back().target_state);
+      states[0].back().target_state = target_state;
+      DCHECK_LT(range.to(), 0xFF);
+      const StateRange new_range = {static_cast<uint8_t>(range.to() + 1), 1};
+      states[0].push_back(new_range);
+    } else {
+      DCHECK_LT(range.to(), 0xFF);
+      const StateRange new_range_initializer[] = {
+          {range.from(), target_state},
+          {static_cast<uint8_t>(range.to() + 1), 1}};
+      states[0]
+          .insert(states[0].end(),
+                  new_range_initializer,
+                  new_range_initializer + arraysize(new_range_initializer));
+    }
+  }
+  return states;
+}
+
+// Output the generated states as a C++ table. Two tricks are used to compact
+// the table: each state in the table starts with a shift value which indicates
+// how many bits we can discard from the right-hand-side of the byte before
+// doing the table lookup. Secondly, only the state-transitions for bytes
+// with the top-bit set are included in the table; bytes without the top-bit set
+// are just ASCII and are handled directly by the code.
+void PrintStates(const std::vector<State>& states, FILE* stream) {
+  // First calculate the start-offset of each state. This allows the state
+  // machine to jump directly to the correct offset, avoiding an extra
+  // indirection. State 0 starts at offset 0.
+  std::vector<uint8_t> state_offset(1, 0);
+  std::vector<uint8_t> shifts;
+  uint8_t pos = 0;
+
+  for (std::vector<State>::const_iterator state_it = states.begin();
+       state_it != states.end();
+       ++state_it) {
+    // We want to set |shift| to the (0-based) index of the least-significant
+    // set bit in any of the ranges for this state, since this tells us how many
+    // bits we can discard and still determine what range a byte lies in. Sadly
+    // it appears that ffs() is not portable, so we do it clumsily.
+    uint8_t shift = 7;
+    for (State::const_iterator range_it = state_it->begin();
+         range_it != state_it->end();
+         ++range_it) {
+      while (shift > 0 && range_it->from % (1 << shift) != 0) {
+        --shift;
+      }
+    }
+    shifts.push_back(shift);
+    pos += 1 + (1 << (7 - shift));
+    state_offset.push_back(pos);
+  }
+
+  DCHECK_EQ(129, state_offset[1]);
+
+  fputs(kProlog, stream);
+  TablePrinter table_printer(stream);
+
+  for (uint8_t state_index = 0; state_index < states.size(); ++state_index) {
+    const uint8_t shift = shifts[state_index];
+    uint8_t next_range = 0;
+    uint8_t target_state = 1;
+    fprintf(stream,
+            "    // State %d, offset 0x%02x\n",
+            static_cast<int>(state_index),
+            static_cast<int>(state_offset[state_index]));
+    table_printer.PrintValue(shift);
+    for (int i = 0; i < 0x100; i += (1 << shift)) {
+      if (next_range < states[state_index].size() &&
+          states[state_index][next_range].from == i) {
+        target_state = states[state_index][next_range].target_state;
+        ++next_range;
+      }
+      if (i >= 0x80) {
+        table_printer.PrintValue(state_offset[target_state]);
+      }
+    }
+    table_printer.NewLine();
+  }
+
+  fputs(kEpilog, stream);
+}
+
+}  // namespace
+
+int main(int argc, char* argv[]) {
+  base::CommandLine::Init(argc, argv);
+  logging::LoggingSettings settings;
+  settings.logging_dest = logging::LOG_TO_SYSTEM_DEBUG_LOG;
+  logging::InitLogging(settings);
+  if (base::CommandLine::ForCurrentProcess()->HasSwitch("help")) {
+    fwrite(kHelpText, 1, arraysize(kHelpText), stdout);
+    exit(EXIT_SUCCESS);
+  }
+  base::FilePath filename =
+      base::CommandLine::ForCurrentProcess()->GetSwitchValuePath("output");
+
+  FILE* output = stdout;
+  if (!filename.empty()) {
+    output = base::OpenFile(filename, "wb");
+    if (!output)
+      PLOG(FATAL) << "Couldn't open '" << filename.AsUTF8Unsafe()
+                  << "' for writing";
+  }
+
+  // Step 1: Enumerate the characters
+  PairVector pairs = InitializeCharacters();
+  // Step 2: Convert to sets.
+  MoveAllCharsToSets(&pairs);
+  if (VLOG_IS_ON(1)) {
+    LogStringSets(pairs);
+  }
+  // Step 3: Generate states.
+  std::vector<State> states = GenerateStates(pairs);
+  // Step 4/5: Print output
+  PrintStates(states, output);
+
+  if (!filename.empty()) {
+    if (!base::CloseFile(output))
+      PLOG(FATAL) << "Couldn't finish writing '" << filename.AsUTF8Unsafe()
+                  << "'";
+  }
+
+  return EXIT_SUCCESS;
+}
diff --git a/base/i18n/case_conversion.cc b/base/i18n/case_conversion.cc
new file mode 100644
index 0000000..a4a104c
--- /dev/null
+++ b/base/i18n/case_conversion.cc
@@ -0,0 +1,90 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/i18n/case_conversion.h"
+
+#include <stdint.h>
+
+#include "base/numerics/safe_conversions.h"
+#include "base/strings/string16.h"
+#include "base/strings/string_util.h"
+#include "third_party/icu/source/common/unicode/uchar.h"
+#include "third_party/icu/source/common/unicode/unistr.h"
+#include "third_party/icu/source/common/unicode/ustring.h"
+
+namespace base {
+namespace i18n {
+
+namespace {
+
+// Provides a uniform interface for upper/lower/folding which take take
+// slightly varying parameters.
+typedef int32_t (*CaseMapperFunction)(UChar* dest, int32_t dest_capacity,
+                                      const UChar* src, int32_t src_length,
+                                      UErrorCode* error);
+
+int32_t ToUpperMapper(UChar* dest, int32_t dest_capacity,
+                      const UChar* src, int32_t src_length,
+                      UErrorCode* error) {
+  // Use default locale.
+  return u_strToUpper(dest, dest_capacity, src, src_length, nullptr, error);
+}
+
+int32_t ToLowerMapper(UChar* dest, int32_t dest_capacity,
+                      const UChar* src, int32_t src_length,
+                      UErrorCode* error) {
+  // Use default locale.
+  return u_strToLower(dest, dest_capacity, src, src_length, nullptr, error);
+}
+
+int32_t FoldCaseMapper(UChar* dest, int32_t dest_capacity,
+                       const UChar* src, int32_t src_length,
+                       UErrorCode* error) {
+  return u_strFoldCase(dest, dest_capacity, src, src_length,
+                       U_FOLD_CASE_DEFAULT, error);
+}
+
+// Provides similar functionality as UnicodeString::caseMap but on string16.
+string16 CaseMap(StringPiece16 string, CaseMapperFunction case_mapper) {
+  string16 dest;
+  if (string.empty())
+    return dest;
+
+  // Provide an initial guess that the string length won't change. The typical
+  // strings we use will very rarely change length in this process, so don't
+  // optimize for that case.
+  dest.resize(string.size());
+
+  UErrorCode error;
+  do {
+    error = U_ZERO_ERROR;
+
+    // ICU won't terminate the string if there's not enough room for the null
+    // terminator, but will otherwise. So we don't need to save room for that.
+    // Don't use WriteInto, which assumes null terminators.
+    int32_t new_length = case_mapper(
+        &dest[0], saturated_cast<int32_t>(dest.size()),
+        string.data(), saturated_cast<int32_t>(string.size()),
+        &error);
+    dest.resize(new_length);
+  } while (error == U_BUFFER_OVERFLOW_ERROR);
+  return dest;
+}
+
+}  // namespace
+
+string16 ToLower(StringPiece16 string) {
+  return CaseMap(string, &ToLowerMapper);
+}
+
+string16 ToUpper(StringPiece16 string) {
+  return CaseMap(string, &ToUpperMapper);
+}
+
+string16 FoldCase(StringPiece16 string) {
+  return CaseMap(string, &FoldCaseMapper);
+}
+
+}  // namespace i18n
+}  // namespace base
diff --git a/base/i18n/case_conversion.h b/base/i18n/case_conversion.h
new file mode 100644
index 0000000..0631a80
--- /dev/null
+++ b/base/i18n/case_conversion.h
@@ -0,0 +1,48 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_I18N_CASE_CONVERSION_H_
+#define BASE_I18N_CASE_CONVERSION_H_
+
+#include "base/i18n/base_i18n_export.h"
+#include "base/strings/string16.h"
+#include "base/strings/string_piece.h"
+
+namespace base {
+namespace i18n {
+
+// UNICODE CASE-HANDLING ADVICE
+//
+// In English it's always safe to convert to upper-case or lower-case text
+// and get a good answer. But some languages have rules specific to those
+// locales. One example is the Turkish I:
+//   http://www.i18nguy.com/unicode/turkish-i18n.html
+//
+// ToLower/ToUpper use the current ICU locale which will take into account
+// the user language preference. Use this when dealing with user typing.
+//
+// FoldCase canonicalizes to a standardized form independent of the current
+// locale. Use this when comparing general Unicode strings that don't
+// necessarily belong in the user's current locale (like commands, protocol
+// names, other strings from the web) for case-insensitive equality.
+//
+// Note that case conversions will change the length of the string in some
+// not-uncommon cases. Never assume that the output is the same length as
+// the input.
+
+// Returns the lower case equivalent of string. Uses ICU's current locale.
+BASE_I18N_EXPORT string16 ToLower(StringPiece16 string);
+
+// Returns the upper case equivalent of string. Uses ICU's current locale.
+BASE_I18N_EXPORT string16 ToUpper(StringPiece16 string);
+
+// Convert the given string to a canonical case, independent of the current
+// locale. For ASCII the canonical form is lower case.
+// See http://unicode.org/faq/casemap_charprop.html#2
+BASE_I18N_EXPORT string16 FoldCase(StringPiece16 string);
+
+}  // namespace i18n
+}  // namespace base
+
+#endif  // BASE_I18N_CASE_CONVERSION_H_
diff --git a/base/i18n/case_conversion_unittest.cc b/base/i18n/case_conversion_unittest.cc
new file mode 100644
index 0000000..ee795bc
--- /dev/null
+++ b/base/i18n/case_conversion_unittest.cc
@@ -0,0 +1,119 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/i18n/case_conversion.h"
+#include "base/i18n/rtl.h"
+#include "base/strings/utf_string_conversions.h"
+#include "base/test/icu_test_util.h"
+#include "testing/gtest/include/gtest/gtest.h"
+#include "third_party/icu/source/i18n/unicode/usearch.h"
+
+namespace base {
+namespace i18n {
+
+namespace {
+
+const wchar_t kNonASCIIMixed[] =
+    L"\xC4\xD6\xE4\xF6\x20\xCF\xEF\x20\xF7\x25"
+    L"\xA4\x23\x2A\x5E\x60\x40\xA3\x24\x2030\x201A\x7E\x20\x1F07\x1F0F"
+    L"\x20\x1E00\x1E01";
+const wchar_t kNonASCIILower[] =
+    L"\xE4\xF6\xE4\xF6\x20\xEF\xEF"
+    L"\x20\xF7\x25\xA4\x23\x2A\x5E\x60\x40\xA3\x24\x2030\x201A\x7E\x20\x1F07"
+    L"\x1F07\x20\x1E01\x1E01";
+const wchar_t kNonASCIIUpper[] =
+    L"\xC4\xD6\xC4\xD6\x20\xCF\xCF"
+    L"\x20\xF7\x25\xA4\x23\x2A\x5E\x60\x40\xA3\x24\x2030\x201A\x7E\x20\x1F0F"
+    L"\x1F0F\x20\x1E00\x1E00";
+
+}  // namespace
+
+// Test upper and lower case string conversion.
+TEST(CaseConversionTest, UpperLower) {
+  const string16 mixed(ASCIIToUTF16("Text with UPPer & lowER casE."));
+  const string16 expected_lower(ASCIIToUTF16("text with upper & lower case."));
+  const string16 expected_upper(ASCIIToUTF16("TEXT WITH UPPER & LOWER CASE."));
+
+  string16 result = ToLower(mixed);
+  EXPECT_EQ(expected_lower, result);
+
+  result = ToUpper(mixed);
+  EXPECT_EQ(expected_upper, result);
+}
+
+TEST(CaseConversionTest, NonASCII) {
+  const string16 mixed(WideToUTF16(kNonASCIIMixed));
+  const string16 expected_lower(WideToUTF16(kNonASCIILower));
+  const string16 expected_upper(WideToUTF16(kNonASCIIUpper));
+
+  string16 result = ToLower(mixed);
+  EXPECT_EQ(expected_lower, result);
+
+  result = ToUpper(mixed);
+  EXPECT_EQ(expected_upper, result);
+}
+
+TEST(CaseConversionTest, TurkishLocaleConversion) {
+  const string16 mixed(WideToUTF16(L"\x49\x131"));
+  const string16 expected_lower(WideToUTF16(L"\x69\x131"));
+  const string16 expected_upper(WideToUTF16(L"\x49\x49"));
+
+  test::ScopedRestoreICUDefaultLocale restore_locale;
+  i18n::SetICUDefaultLocale("en_US");
+
+  string16 result = ToLower(mixed);
+  EXPECT_EQ(expected_lower, result);
+
+  result = ToUpper(mixed);
+  EXPECT_EQ(expected_upper, result);
+
+  i18n::SetICUDefaultLocale("tr");
+
+  const string16 expected_lower_turkish(WideToUTF16(L"\x131\x131"));
+  const string16 expected_upper_turkish(WideToUTF16(L"\x49\x49"));
+
+  result = ToLower(mixed);
+  EXPECT_EQ(expected_lower_turkish, result);
+
+  result = ToUpper(mixed);
+  EXPECT_EQ(expected_upper_turkish, result);
+}
+
+TEST(CaseConversionTest, FoldCase) {
+  // Simple ASCII, should lower-case.
+  EXPECT_EQ(ASCIIToUTF16("hello, world"),
+            FoldCase(ASCIIToUTF16("Hello, World")));
+
+  // Non-ASCII cases from above. They should all fold to the same result.
+  EXPECT_EQ(FoldCase(WideToUTF16(kNonASCIIMixed)),
+            FoldCase(WideToUTF16(kNonASCIILower)));
+  EXPECT_EQ(FoldCase(WideToUTF16(kNonASCIIMixed)),
+            FoldCase(WideToUTF16(kNonASCIIUpper)));
+
+  // Turkish cases from above. This is the lower-case expected result from the
+  // US locale. It should be the same even when the current locale is Turkish.
+  const string16 turkish(WideToUTF16(L"\x49\x131"));
+  const string16 turkish_expected(WideToUTF16(L"\x69\x131"));
+
+  test::ScopedRestoreICUDefaultLocale restore_locale;
+  i18n::SetICUDefaultLocale("en_US");
+  EXPECT_EQ(turkish_expected, FoldCase(turkish));
+
+  i18n::SetICUDefaultLocale("tr");
+  EXPECT_EQ(turkish_expected, FoldCase(turkish));
+
+  // Test a case that gets bigger when processed.
+  // U+130 = LATIN CAPITAL LETTER I WITH DOT ABOVE gets folded to a lower case
+  // "i" followed by U+307 COMBINING DOT ABOVE.
+  EXPECT_EQ(WideToUTF16(L"i\u0307j"), FoldCase(WideToUTF16(L"\u0130j")));
+
+  // U+00DF (SHARP S) and U+1E9E (CAPIRAL SHARP S) are both folded to "ss".
+  EXPECT_EQ(ASCIIToUTF16("ssss"), FoldCase(WideToUTF16(L"\u00DF\u1E9E")));
+}
+
+}  // namespace i18n
+}  // namespace base
+
+
+
diff --git a/base/i18n/char_iterator.cc b/base/i18n/char_iterator.cc
new file mode 100644
index 0000000..d80b8b6
--- /dev/null
+++ b/base/i18n/char_iterator.cc
@@ -0,0 +1,80 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/i18n/char_iterator.h"
+
+#include "third_party/icu/source/common/unicode/utf8.h"
+#include "third_party/icu/source/common/unicode/utf16.h"
+
+namespace base {
+namespace i18n {
+
+UTF8CharIterator::UTF8CharIterator(const std::string* str)
+    : str_(reinterpret_cast<const uint8_t*>(str->data())),
+      len_(str->size()),
+      array_pos_(0),
+      next_pos_(0),
+      char_pos_(0),
+      char_(0) {
+  if (len_)
+    U8_NEXT(str_, next_pos_, len_, char_);
+}
+
+UTF8CharIterator::~UTF8CharIterator() = default;
+
+bool UTF8CharIterator::Advance() {
+  if (array_pos_ >= len_)
+    return false;
+
+  array_pos_ = next_pos_;
+  char_pos_++;
+  if (next_pos_ < len_)
+    U8_NEXT(str_, next_pos_, len_, char_);
+
+  return true;
+}
+
+UTF16CharIterator::UTF16CharIterator(const string16* str)
+    : str_(reinterpret_cast<const char16*>(str->data())),
+      len_(str->size()),
+      array_pos_(0),
+      next_pos_(0),
+      char_pos_(0),
+      char_(0) {
+  if (len_)
+    ReadChar();
+}
+
+UTF16CharIterator::UTF16CharIterator(const char16* str, size_t str_len)
+    : str_(str),
+      len_(str_len),
+      array_pos_(0),
+      next_pos_(0),
+      char_pos_(0),
+      char_(0) {
+  if (len_)
+    ReadChar();
+}
+
+UTF16CharIterator::~UTF16CharIterator() = default;
+
+bool UTF16CharIterator::Advance() {
+  if (array_pos_ >= len_)
+    return false;
+
+  array_pos_ = next_pos_;
+  char_pos_++;
+  if (next_pos_ < len_)
+    ReadChar();
+
+  return true;
+}
+
+void UTF16CharIterator::ReadChar() {
+  // This is actually a huge macro, so is worth having in a separate function.
+  U16_NEXT(str_, next_pos_, len_, char_);
+}
+
+}  // namespace i18n
+}  // namespace base
diff --git a/base/i18n/char_iterator.h b/base/i18n/char_iterator.h
new file mode 100644
index 0000000..24024d4
--- /dev/null
+++ b/base/i18n/char_iterator.h
@@ -0,0 +1,134 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_I18N_CHAR_ITERATOR_H_
+#define BASE_I18N_CHAR_ITERATOR_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <string>
+
+#include "base/i18n/base_i18n_export.h"
+#include "base/macros.h"
+#include "base/strings/string16.h"
+#include "build/build_config.h"
+
+// The CharIterator classes iterate through the characters in UTF8 and
+// UTF16 strings.  Example usage:
+//
+//   UTF8CharIterator iter(&str);
+//   while (!iter.end()) {
+//     VLOG(1) << iter.get();
+//     iter.Advance();
+//   }
+
+#if defined(OS_WIN)
+typedef unsigned char uint8_t;
+#endif
+
+namespace base {
+namespace i18n {
+
+class BASE_I18N_EXPORT UTF8CharIterator {
+ public:
+  // Requires |str| to live as long as the UTF8CharIterator does.
+  explicit UTF8CharIterator(const std::string* str);
+  ~UTF8CharIterator();
+
+  // Return the starting array index of the current character within the
+  // string.
+  int32_t array_pos() const { return array_pos_; }
+
+  // Return the logical index of the current character, independent of the
+  // number of bytes each character takes.
+  int32_t char_pos() const { return char_pos_; }
+
+  // Return the current char.
+  int32_t get() const { return char_; }
+
+  // Returns true if we're at the end of the string.
+  bool end() const { return array_pos_ == len_; }
+
+  // Advance to the next actual character.  Returns false if we're at the
+  // end of the string.
+  bool Advance();
+
+ private:
+  // The string we're iterating over.
+  const uint8_t* str_;
+
+  // The length of the encoded string.
+  int32_t len_;
+
+  // Array index.
+  int32_t array_pos_;
+
+  // The next array index.
+  int32_t next_pos_;
+
+  // Character index.
+  int32_t char_pos_;
+
+  // The current character.
+  int32_t char_;
+
+  DISALLOW_COPY_AND_ASSIGN(UTF8CharIterator);
+};
+
+class BASE_I18N_EXPORT UTF16CharIterator {
+ public:
+  // Requires |str| to live as long as the UTF16CharIterator does.
+  explicit UTF16CharIterator(const string16* str);
+  UTF16CharIterator(const char16* str, size_t str_len);
+  ~UTF16CharIterator();
+
+  // Return the starting array index of the current character within the
+  // string.
+  int32_t array_pos() const { return array_pos_; }
+
+  // Return the logical index of the current character, independent of the
+  // number of codewords each character takes.
+  int32_t char_pos() const { return char_pos_; }
+
+  // Return the current char.
+  int32_t get() const { return char_; }
+
+  // Returns true if we're at the end of the string.
+  bool end() const { return array_pos_ == len_; }
+
+  // Advance to the next actual character.  Returns false if we're at the
+  // end of the string.
+  bool Advance();
+
+ private:
+  // Fills in the current character we found and advances to the next
+  // character, updating all flags as necessary.
+  void ReadChar();
+
+  // The string we're iterating over.
+  const char16* str_;
+
+  // The length of the encoded string.
+  int32_t len_;
+
+  // Array index.
+  int32_t array_pos_;
+
+  // The next array index.
+  int32_t next_pos_;
+
+  // Character index.
+  int32_t char_pos_;
+
+  // The current character.
+  int32_t char_;
+
+  DISALLOW_COPY_AND_ASSIGN(UTF16CharIterator);
+};
+
+}  // namespace i18n
+}  // namespace base
+
+#endif  // BASE_I18N_CHAR_ITERATOR_H_
diff --git a/base/i18n/char_iterator_unittest.cc b/base/i18n/char_iterator_unittest.cc
new file mode 100644
index 0000000..0cf8e6c
--- /dev/null
+++ b/base/i18n/char_iterator_unittest.cc
@@ -0,0 +1,101 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/i18n/char_iterator.h"
+
+#include "base/strings/utf_string_conversions.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+namespace i18n {
+
+TEST(CharIteratorsTest, TestUTF8) {
+  std::string empty;
+  UTF8CharIterator empty_iter(&empty);
+  ASSERT_TRUE(empty_iter.end());
+  ASSERT_EQ(0, empty_iter.array_pos());
+  ASSERT_EQ(0, empty_iter.char_pos());
+  ASSERT_FALSE(empty_iter.Advance());
+
+  std::string str("s\303\273r");  // [u with circumflex]
+  UTF8CharIterator iter(&str);
+  ASSERT_FALSE(iter.end());
+  ASSERT_EQ(0, iter.array_pos());
+  ASSERT_EQ(0, iter.char_pos());
+  ASSERT_EQ('s', iter.get());
+  ASSERT_TRUE(iter.Advance());
+
+  ASSERT_FALSE(iter.end());
+  ASSERT_EQ(1, iter.array_pos());
+  ASSERT_EQ(1, iter.char_pos());
+  ASSERT_EQ(251, iter.get());
+  ASSERT_TRUE(iter.Advance());
+
+  ASSERT_FALSE(iter.end());
+  ASSERT_EQ(3, iter.array_pos());
+  ASSERT_EQ(2, iter.char_pos());
+  ASSERT_EQ('r', iter.get());
+  ASSERT_TRUE(iter.Advance());
+
+  ASSERT_TRUE(iter.end());
+  ASSERT_EQ(4, iter.array_pos());
+  ASSERT_EQ(3, iter.char_pos());
+
+  // Don't care what it returns, but this shouldn't crash
+  iter.get();
+
+  ASSERT_FALSE(iter.Advance());
+}
+
+TEST(CharIteratorsTest, TestUTF16) {
+  string16 empty = UTF8ToUTF16("");
+  UTF16CharIterator empty_iter(&empty);
+  ASSERT_TRUE(empty_iter.end());
+  ASSERT_EQ(0, empty_iter.array_pos());
+  ASSERT_EQ(0, empty_iter.char_pos());
+  ASSERT_FALSE(empty_iter.Advance());
+
+  // This test string contains 4 characters:
+  //   x
+  //   u with circumflex - 2 bytes in UTF8, 1 codeword in UTF16
+  //   math double-struck A - 4 bytes in UTF8, 2 codewords in UTF16
+  //   z
+  string16 str = UTF8ToUTF16("x\303\273\360\235\224\270z");
+  UTF16CharIterator iter(&str);
+  ASSERT_FALSE(iter.end());
+  ASSERT_EQ(0, iter.array_pos());
+  ASSERT_EQ(0, iter.char_pos());
+  ASSERT_EQ('x', iter.get());
+  ASSERT_TRUE(iter.Advance());
+
+  ASSERT_FALSE(iter.end());
+  ASSERT_EQ(1, iter.array_pos());
+  ASSERT_EQ(1, iter.char_pos());
+  ASSERT_EQ(251, iter.get());
+  ASSERT_TRUE(iter.Advance());
+
+  ASSERT_FALSE(iter.end());
+  ASSERT_EQ(2, iter.array_pos());
+  ASSERT_EQ(2, iter.char_pos());
+  ASSERT_EQ(120120, iter.get());
+  ASSERT_TRUE(iter.Advance());
+
+  ASSERT_FALSE(iter.end());
+  ASSERT_EQ(4, iter.array_pos());
+  ASSERT_EQ(3, iter.char_pos());
+  ASSERT_EQ('z', iter.get());
+  ASSERT_TRUE(iter.Advance());
+
+  ASSERT_TRUE(iter.end());
+  ASSERT_EQ(5, iter.array_pos());
+  ASSERT_EQ(4, iter.char_pos());
+
+  // Don't care what it returns, but this shouldn't crash
+  iter.get();
+
+  ASSERT_FALSE(iter.Advance());
+}
+
+}  // namespace i18n
+}  // namespace base
diff --git a/base/i18n/character_encoding.cc b/base/i18n/character_encoding.cc
new file mode 100644
index 0000000..a1068c3
--- /dev/null
+++ b/base/i18n/character_encoding.cc
@@ -0,0 +1,42 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/i18n/character_encoding.h"
+
+#include "base/macros.h"
+#include "third_party/icu/source/common/unicode/ucnv.h"
+
+namespace base {
+namespace {
+
+// An array of all supported canonical encoding names.
+const char* const kCanonicalEncodingNames[] = {
+    "Big5",         "EUC-JP",       "EUC-KR",       "gb18030",
+    "GBK",          "IBM866",       "ISO-2022-JP",  "ISO-8859-10",
+    "ISO-8859-13",  "ISO-8859-14",  "ISO-8859-15",  "ISO-8859-16",
+    "ISO-8859-2",   "ISO-8859-3",   "ISO-8859-4",   "ISO-8859-5",
+    "ISO-8859-6",   "ISO-8859-7",   "ISO-8859-8",   "ISO-8859-8-I",
+    "KOI8-R",       "KOI8-U",       "macintosh",    "Shift_JIS",
+    "UTF-16LE",     "UTF-8",        "windows-1250", "windows-1251",
+    "windows-1252", "windows-1253", "windows-1254", "windows-1255",
+    "windows-1256", "windows-1257", "windows-1258", "windows-874"};
+
+}  // namespace
+
+std::string GetCanonicalEncodingNameByAliasName(const std::string& alias_name) {
+  for (auto* encoding_name : kCanonicalEncodingNames) {
+    if (alias_name == encoding_name)
+      return alias_name;
+  }
+  static const char* kStandards[3] = {"HTML", "MIME", "IANA"};
+  for (auto* standard : kStandards) {
+    UErrorCode error_code = U_ZERO_ERROR;
+    const char* canonical_name =
+        ucnv_getStandardName(alias_name.c_str(), standard, &error_code);
+    if (U_SUCCESS(error_code) && canonical_name)
+      return canonical_name;
+  }
+  return std::string();
+}
+}  // namespace base
diff --git a/base/i18n/character_encoding.h b/base/i18n/character_encoding.h
new file mode 100644
index 0000000..974cb5a
--- /dev/null
+++ b/base/i18n/character_encoding.h
@@ -0,0 +1,20 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_I18N_CHARACTER_ENCODING_H_
+#define BASE_I18N_CHARACTER_ENCODING_H_
+
+#include <string>
+
+#include "base/i18n/base_i18n_export.h"
+
+namespace base {
+
+// Return canonical encoding name according to the encoding alias name.
+BASE_I18N_EXPORT std::string GetCanonicalEncodingNameByAliasName(
+    const std::string& alias_name);
+
+}  // namespace base
+
+#endif  // BASE_I18N_CHARACTER_ENCODING_H_
diff --git a/base/i18n/character_encoding_unittest.cc b/base/i18n/character_encoding_unittest.cc
new file mode 100644
index 0000000..3c11ba3
--- /dev/null
+++ b/base/i18n/character_encoding_unittest.cc
@@ -0,0 +1,23 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/i18n/character_encoding.h"
+
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+
+TEST(CharacterEncodingTest, GetCanonicalEncodingNameByAliasName) {
+  EXPECT_EQ("Big5", GetCanonicalEncodingNameByAliasName("Big5"));
+  EXPECT_EQ("windows-874", GetCanonicalEncodingNameByAliasName("windows-874"));
+  EXPECT_EQ("ISO-8859-8", GetCanonicalEncodingNameByAliasName("ISO-8859-8"));
+
+  // Non-canonical alias names should be converted to a canonical one.
+  EXPECT_EQ("UTF-8", GetCanonicalEncodingNameByAliasName("utf8"));
+  EXPECT_EQ("gb18030", GetCanonicalEncodingNameByAliasName("GB18030"));
+  EXPECT_EQ("windows-874", GetCanonicalEncodingNameByAliasName("tis-620"));
+  EXPECT_EQ("EUC-KR", GetCanonicalEncodingNameByAliasName("ks_c_5601-1987"));
+}
+
+}  // namespace base
diff --git a/base/i18n/encoding_detection.cc b/base/i18n/encoding_detection.cc
new file mode 100644
index 0000000..fef34e4
--- /dev/null
+++ b/base/i18n/encoding_detection.cc
@@ -0,0 +1,40 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/i18n/encoding_detection.h"
+
+#include "build/build_config.h"
+#include "third_party/ced/src/compact_enc_det/compact_enc_det.h"
+
+// third_party/ced/src/util/encodings/encodings.h, which is included
+// by the include above, undefs UNICODE because that is a macro used
+// internally in ced. If we later in the same translation unit do
+// anything related to Windows or Windows headers those will then use
+// the ASCII versions which we do not want. To avoid that happening in
+// jumbo builds, we redefine UNICODE again here.
+#if defined(OS_WIN)
+#define UNICODE 1
+#endif  // OS_WIN
+
+namespace base {
+
+bool DetectEncoding(const std::string& text, std::string* encoding) {
+  int consumed_bytes;
+  bool is_reliable;
+  Encoding enc = CompactEncDet::DetectEncoding(
+      text.c_str(), text.length(), nullptr, nullptr, nullptr,
+      UNKNOWN_ENCODING,
+      UNKNOWN_LANGUAGE,
+      CompactEncDet::QUERY_CORPUS,  // plain text
+      false,  // Include 7-bit encodings
+      &consumed_bytes,
+      &is_reliable);
+
+  if (enc == UNKNOWN_ENCODING)
+    return false;
+
+  *encoding = MimeEncodingName(enc);
+  return true;
+}
+}  // namespace base
diff --git a/base/i18n/encoding_detection.h b/base/i18n/encoding_detection.h
new file mode 100644
index 0000000..c8e660c
--- /dev/null
+++ b/base/i18n/encoding_detection.h
@@ -0,0 +1,21 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_I18N_ENCODING_DETECTION_H_
+#define BASE_I18N_ENCODING_DETECTION_H_
+
+#include <string>
+
+#include "base/compiler_specific.h"
+#include "base/i18n/base_i18n_export.h"
+
+namespace base {
+
+// Detect encoding of |text| and put the name of encoding in |encoding|.
+// Returns true on success.
+BASE_I18N_EXPORT bool DetectEncoding(const std::string& text,
+                                     std::string* encoding) WARN_UNUSED_RESULT;
+}  // namespace base
+
+#endif  // BASE_I18N_ENCODING_DETECTION_H_
diff --git a/base/i18n/file_util_icu.cc b/base/i18n/file_util_icu.cc
new file mode 100644
index 0000000..c91aea1
--- /dev/null
+++ b/base/i18n/file_util_icu.cc
@@ -0,0 +1,179 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// File utilities that use the ICU library go in this file.
+
+#include "base/i18n/file_util_icu.h"
+
+#include <stdint.h>
+
+#include <memory>
+
+#include "base/files/file_path.h"
+#include "base/i18n/icu_string_conversions.h"
+#include "base/i18n/string_compare.h"
+#include "base/logging.h"
+#include "base/macros.h"
+#include "base/memory/singleton.h"
+#include "base/strings/string_util.h"
+#include "base/strings/sys_string_conversions.h"
+#include "base/strings/utf_string_conversions.h"
+#include "build/build_config.h"
+#include "third_party/icu/source/common/unicode/uniset.h"
+#include "third_party/icu/source/i18n/unicode/coll.h"
+
+namespace base {
+namespace i18n {
+
+namespace {
+
+class IllegalCharacters {
+ public:
+  static IllegalCharacters* GetInstance() {
+    return Singleton<IllegalCharacters>::get();
+  }
+
+  bool DisallowedEverywhere(UChar32 ucs4) {
+    return !!illegal_anywhere_->contains(ucs4);
+  }
+
+  bool DisallowedLeadingOrTrailing(UChar32 ucs4) {
+    return !!illegal_at_ends_->contains(ucs4);
+  }
+
+  bool IsAllowedName(const string16& s) {
+    return s.empty() || (!!illegal_anywhere_->containsNone(
+                             icu::UnicodeString(s.c_str(), s.size())) &&
+                         !illegal_at_ends_->contains(*s.begin()) &&
+                         !illegal_at_ends_->contains(*s.rbegin()));
+  }
+
+ private:
+  friend class Singleton<IllegalCharacters>;
+  friend struct DefaultSingletonTraits<IllegalCharacters>;
+
+  IllegalCharacters();
+  ~IllegalCharacters() = default;
+
+  // set of characters considered invalid anywhere inside a filename.
+  std::unique_ptr<icu::UnicodeSet> illegal_anywhere_;
+
+  // set of characters considered invalid at either end of a filename.
+  std::unique_ptr<icu::UnicodeSet> illegal_at_ends_;
+
+  DISALLOW_COPY_AND_ASSIGN(IllegalCharacters);
+};
+
+IllegalCharacters::IllegalCharacters() {
+  UErrorCode everywhere_status = U_ZERO_ERROR;
+  UErrorCode ends_status = U_ZERO_ERROR;
+  // Control characters, formatting characters, non-characters, path separators,
+  // and some printable ASCII characters regarded as dangerous ('"*/:<>?\\').
+  // See  http://blogs.msdn.com/michkap/archive/2006/11/03/941420.aspx
+  // and http://msdn2.microsoft.com/en-us/library/Aa365247.aspx
+  // Note that code points in the "Other, Format" (Cf) category are ignored on
+  // HFS+ despite the ZERO_WIDTH_JOINER and ZERO_WIDTH_NON-JOINER being
+  // legitimate in Arabic and some S/SE Asian scripts. In addition tilde (~) is
+  // also excluded due to the possibility of interacting poorly with short
+  // filenames on VFAT. (Related to CVE-2014-9390)
+  illegal_anywhere_.reset(new icu::UnicodeSet(
+      UNICODE_STRING_SIMPLE("[[\"~*/:<>?\\\\|][:Cc:][:Cf:]]"),
+      everywhere_status));
+  illegal_at_ends_.reset(new icu::UnicodeSet(
+      UNICODE_STRING_SIMPLE("[[:WSpace:][.]]"), ends_status));
+  DCHECK(U_SUCCESS(everywhere_status));
+  DCHECK(U_SUCCESS(ends_status));
+
+  // Add non-characters. If this becomes a performance bottleneck by
+  // any chance, do not add these to |set| and change IsFilenameLegal()
+  // to check |ucs4 & 0xFFFEu == 0xFFFEu|, in addiition to calling
+  // IsAllowedName().
+  illegal_anywhere_->add(0xFDD0, 0xFDEF);
+  for (int i = 0; i <= 0x10; ++i) {
+    int plane_base = 0x10000 * i;
+    illegal_anywhere_->add(plane_base + 0xFFFE, plane_base + 0xFFFF);
+  }
+  illegal_anywhere_->freeze();
+  illegal_at_ends_->freeze();
+}
+
+}  // namespace
+
+bool IsFilenameLegal(const string16& file_name) {
+  return IllegalCharacters::GetInstance()->IsAllowedName(file_name);
+}
+
+void ReplaceIllegalCharactersInPath(FilePath::StringType* file_name,
+                                    char replace_char) {
+  IllegalCharacters* illegal = IllegalCharacters::GetInstance();
+
+  DCHECK(!(illegal->DisallowedEverywhere(replace_char)));
+  DCHECK(!(illegal->DisallowedLeadingOrTrailing(replace_char)));
+
+  int cursor = 0;  // The ICU macros expect an int.
+  while (cursor < static_cast<int>(file_name->size())) {
+    int char_begin = cursor;
+    uint32_t code_point;
+#if defined(OS_WIN)
+    // Windows uses UTF-16 encoding for filenames.
+    U16_NEXT(file_name->data(), cursor, static_cast<int>(file_name->length()),
+             code_point);
+#elif defined(OS_POSIX) || defined(OS_FUCHSIA)
+    // Mac and Chrome OS use UTF-8 encoding for filenames.
+    // Linux doesn't actually define file system encoding. Try to parse as
+    // UTF-8.
+    U8_NEXT(file_name->data(), cursor, static_cast<int>(file_name->length()),
+            code_point);
+#else
+#error Unsupported platform
+#endif
+
+    if (illegal->DisallowedEverywhere(code_point) ||
+        ((char_begin == 0 || cursor == static_cast<int>(file_name->length())) &&
+         illegal->DisallowedLeadingOrTrailing(code_point))) {
+      file_name->replace(char_begin, cursor - char_begin, 1, replace_char);
+      // We just made the potentially multi-byte/word char into one that only
+      // takes one byte/word, so need to adjust the cursor to point to the next
+      // character again.
+      cursor = char_begin + 1;
+    }
+  }
+}
+
+bool LocaleAwareCompareFilenames(const FilePath& a, const FilePath& b) {
+  UErrorCode error_code = U_ZERO_ERROR;
+  // Use the default collator. The default locale should have been properly
+  // set by the time this constructor is called.
+  std::unique_ptr<icu::Collator> collator(
+      icu::Collator::createInstance(error_code));
+  DCHECK(U_SUCCESS(error_code));
+  // Make it case-sensitive.
+  collator->setStrength(icu::Collator::TERTIARY);
+
+#if defined(OS_WIN)
+  return CompareString16WithCollator(*collator, WideToUTF16(a.value()),
+                                     WideToUTF16(b.value())) == UCOL_LESS;
+
+#elif defined(OS_POSIX) || defined(OS_FUCHSIA)
+  // On linux, the file system encoding is not defined. We assume
+  // SysNativeMBToWide takes care of it.
+  return CompareString16WithCollator(
+             *collator, WideToUTF16(SysNativeMBToWide(a.value())),
+             WideToUTF16(SysNativeMBToWide(b.value()))) == UCOL_LESS;
+#endif
+}
+
+void NormalizeFileNameEncoding(FilePath* file_name) {
+#if defined(OS_CHROMEOS)
+  std::string normalized_str;
+  if (ConvertToUtf8AndNormalize(file_name->BaseName().value(), kCodepageUTF8,
+                                &normalized_str) &&
+      !normalized_str.empty()) {
+    *file_name = file_name->DirName().Append(FilePath(normalized_str));
+  }
+#endif
+}
+
+}  // namespace i18n
+}  // namespace base
diff --git a/base/i18n/file_util_icu.h b/base/i18n/file_util_icu.h
new file mode 100644
index 0000000..f8bd9f4
--- /dev/null
+++ b/base/i18n/file_util_icu.h
@@ -0,0 +1,58 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_I18N_FILE_UTIL_ICU_H_
+#define BASE_I18N_FILE_UTIL_ICU_H_
+
+// File utilities that use the ICU library go in this file.
+
+#include "base/files/file_path.h"
+#include "base/i18n/base_i18n_export.h"
+#include "base/strings/string16.h"
+
+namespace base {
+namespace i18n {
+
+// Returns true if file_name does not have any illegal character. The input
+// param has the same restriction as that for ReplaceIllegalCharacters.
+BASE_I18N_EXPORT bool IsFilenameLegal(const string16& file_name);
+
+// Replaces characters in |file_name| that are illegal for file names with
+// |replace_char|. |file_name| must not be a full or relative path, but just the
+// file name component (since slashes are considered illegal). Any leading or
+// trailing whitespace or periods in |file_name| is also replaced with the
+// |replace_char|.
+//
+// Example:
+//   "bad:file*name?.txt" will be turned into "bad_file_name_.txt" when
+//   |replace_char| is '_'.
+//
+// Warning: Do not use this function as the sole means of sanitizing a filename.
+//   While the resulting filename itself would be legal, it doesn't necessarily
+//   mean that the file will behave safely. On Windows, certain reserved names
+//   refer to devices rather than files (E.g. LPT1), and some filenames could be
+//   interpreted as shell namespace extensions (E.g. Foo.{<GUID>}).
+//
+// On Windows, Chrome OS and Mac, the file system encoding is already known and
+// parsed as UTF-8 and UTF-16 accordingly.
+// On Linux, the file name will be parsed as UTF8.
+// TODO(asanka): Move full filename sanitization logic here.
+BASE_I18N_EXPORT void ReplaceIllegalCharactersInPath(
+    FilePath::StringType* file_name,
+    char replace_char);
+
+// Compares two filenames using the current locale information. This can be
+// used to sort directory listings. It behaves like "operator<" for use in
+// std::sort.
+BASE_I18N_EXPORT bool LocaleAwareCompareFilenames(const FilePath& a,
+                                                  const FilePath& b);
+
+// Calculates the canonical file-system representation of |file_name| base name.
+// Modifies |file_name| in place. No-op if not on ChromeOS.
+BASE_I18N_EXPORT void NormalizeFileNameEncoding(FilePath* file_name);
+
+}  // namespace i18n
+}  // namespace base
+
+#endif  // BASE_I18N_FILE_UTIL_ICU_H_
diff --git a/base/i18n/file_util_icu_unittest.cc b/base/i18n/file_util_icu_unittest.cc
new file mode 100644
index 0000000..062d29b
--- /dev/null
+++ b/base/i18n/file_util_icu_unittest.cc
@@ -0,0 +1,140 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/i18n/file_util_icu.h"
+
+#include <stddef.h>
+
+#include "base/files/file_util.h"
+#include "base/macros.h"
+#include "base/strings/utf_string_conversions.h"
+#include "build/build_config.h"
+#include "testing/gtest/include/gtest/gtest.h"
+#include "testing/platform_test.h"
+
+namespace base {
+namespace i18n {
+
+// file_util winds up using autoreleased objects on the Mac, so this needs
+// to be a PlatformTest
+class FileUtilICUTest : public PlatformTest {
+};
+
+#if defined(OS_POSIX) && !defined(OS_MACOSX)
+
+// On linux, file path is parsed and filtered as UTF-8.
+static const struct GoodBadPairLinux {
+  const char* bad_name;
+  const char* good_name;
+} kLinuxIllegalCharacterCases[] = {
+  {"bad*\\/file:name?.jpg", "bad---file-name-.jpg"},
+  {"**********::::.txt", "--------------.txt"},
+  {"\xe9\xf0zzzz.\xff", "\xe9\xf0zzzz.\xff"},
+  {" _ ", "-_-"},
+  {".", "-"},
+  {" .( ). ", "-.( ).-"},
+  {"     ", "-   -"},
+};
+
+TEST_F(FileUtilICUTest, ReplaceIllegalCharactersInPathLinuxTest) {
+  for (size_t i = 0; i < arraysize(kLinuxIllegalCharacterCases); ++i) {
+    std::string bad_name(kLinuxIllegalCharacterCases[i].bad_name);
+    ReplaceIllegalCharactersInPath(&bad_name, '-');
+    EXPECT_EQ(kLinuxIllegalCharacterCases[i].good_name, bad_name);
+  }
+}
+
+#endif
+
+// For Mac & Windows, which both do Unicode validation on filenames. These
+// characters are given as wide strings since its more convenient to specify
+// unicode characters. For Mac they should be converted to UTF-8.
+static const struct goodbad_pair {
+  const wchar_t* bad_name;
+  const wchar_t* good_name;
+} kIllegalCharacterCases[] = {
+    {L"bad*file:name?.jpg", L"bad-file-name-.jpg"},
+    {L"**********::::.txt", L"--------------.txt"},
+    // We can't use UCNs (universal character names) for C0/C1 characters and
+    // U+007F, but \x escape is interpreted by MSVC and gcc as we intend.
+    {L"bad\x0003\x0091 file\u200E\u200Fname.png", L"bad-- file--name.png"},
+    {L"bad*file\\?name.jpg", L"bad-file--name.jpg"},
+    {L"\t  bad*file\\name/.jpg", L"-  bad-file-name-.jpg"},
+    {L"this_file_name is okay!.mp3", L"this_file_name is okay!.mp3"},
+    {L"\u4E00\uAC00.mp3", L"\u4E00\uAC00.mp3"},
+    {L"\u0635\u200C\u0644.mp3", L"\u0635-\u0644.mp3"},
+    {L"\U00010330\U00010331.mp3", L"\U00010330\U00010331.mp3"},
+    // Unassigned codepoints are ok.
+    {L"\u0378\U00040001.mp3", L"\u0378\U00040001.mp3"},
+    // Non-characters are not allowed.
+    {L"bad\uFFFFfile\U0010FFFEname.jpg", L"bad-file-name.jpg"},
+    {L"bad\uFDD0file\uFDEFname.jpg", L"bad-file-name.jpg"},
+    // CVE-2014-9390
+    {L"(\u200C.\u200D.\u200E.\u200F.\u202A.\u202B.\u202C.\u202D.\u202E.\u206A."
+     L"\u206B.\u206C.\u206D.\u206F.\uFEFF)",
+     L"(-.-.-.-.-.-.-.-.-.-.-.-.-.-.-)"},
+    {L"config~1", L"config-1"},
+    {L" _ ", L"-_-"},
+    {L" ", L"-"},
+    {L"\u2008.(\u2007).\u3000", L"-.(\u2007).-"},
+    {L"     ", L"-   -"},
+    {L".    ", L"-   -"}
+};
+
+#if defined(OS_WIN) || defined(OS_MACOSX) || defined(OS_POSIX)
+
+TEST_F(FileUtilICUTest, ReplaceIllegalCharactersInPathTest) {
+  for (size_t i = 0; i < arraysize(kIllegalCharacterCases); ++i) {
+#if defined(OS_WIN)
+    std::wstring bad_name(kIllegalCharacterCases[i].bad_name);
+    ReplaceIllegalCharactersInPath(&bad_name, '-');
+    EXPECT_EQ(kIllegalCharacterCases[i].good_name, bad_name);
+#else
+    std::string bad_name(WideToUTF8(kIllegalCharacterCases[i].bad_name));
+    ReplaceIllegalCharactersInPath(&bad_name, '-');
+    EXPECT_EQ(WideToUTF8(kIllegalCharacterCases[i].good_name), bad_name);
+#endif
+  }
+}
+
+#endif
+
+TEST_F(FileUtilICUTest, IsFilenameLegalTest) {
+  EXPECT_TRUE(IsFilenameLegal(string16()));
+
+  for (const auto& test_case : kIllegalCharacterCases) {
+    string16 bad_name = WideToUTF16(test_case.bad_name);
+    string16 good_name = WideToUTF16(test_case.good_name);
+
+    EXPECT_TRUE(IsFilenameLegal(good_name)) << good_name;
+    if (good_name != bad_name)
+      EXPECT_FALSE(IsFilenameLegal(bad_name)) << bad_name;
+  }
+}
+
+#if defined(OS_CHROMEOS)
+static const struct normalize_name_encoding_test_cases {
+  const char* original_path;
+  const char* normalized_path;
+} kNormalizeFileNameEncodingTestCases[] = {
+  { "foo_na\xcc\x88me.foo", "foo_n\xc3\xa4me.foo"},
+  { "foo_dir_na\xcc\x88me/foo_na\xcc\x88me.foo",
+    "foo_dir_na\xcc\x88me/foo_n\xc3\xa4me.foo"},
+  { "", ""},
+  { "foo_dir_na\xcc\x88me/", "foo_dir_n\xc3\xa4me"}
+};
+
+TEST_F(FileUtilICUTest, NormalizeFileNameEncoding) {
+  for (size_t i = 0; i < arraysize(kNormalizeFileNameEncodingTestCases); i++) {
+    FilePath path(kNormalizeFileNameEncodingTestCases[i].original_path);
+    NormalizeFileNameEncoding(&path);
+    EXPECT_EQ(FilePath(kNormalizeFileNameEncodingTestCases[i].normalized_path),
+              path);
+  }
+}
+
+#endif
+
+}  // namespace i18n
+}  // namespace base
diff --git a/base/i18n/i18n_constants.cc b/base/i18n/i18n_constants.cc
new file mode 100644
index 0000000..7d2f5fc
--- /dev/null
+++ b/base/i18n/i18n_constants.cc
@@ -0,0 +1,13 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/i18n/i18n_constants.h"
+
+namespace base {
+
+const char kCodepageLatin1[] = "ISO-8859-1";
+const char kCodepageUTF8[] = "UTF-8";
+
+}  // namespace base
+
diff --git a/base/i18n/i18n_constants.h b/base/i18n/i18n_constants.h
new file mode 100644
index 0000000..c1bd87d
--- /dev/null
+++ b/base/i18n/i18n_constants.h
@@ -0,0 +1,21 @@
+// Copyright (c) 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_I18N_I18N_CONSTANTS_H_
+#define BASE_I18N_I18N_CONSTANTS_H_
+
+#include "base/i18n/base_i18n_export.h"
+
+namespace base {
+
+// Names of codepages (charsets) understood by icu.
+BASE_I18N_EXPORT extern const char kCodepageLatin1[];  // a.k.a. ISO 8859-1
+BASE_I18N_EXPORT extern const char kCodepageUTF8[];
+
+// The other possible options are UTF-16BE and UTF-16LE, but they are unused in
+// Chromium as of this writing.
+
+}  // namespace base
+
+#endif  // BASE_I18N_I18N_CONSTANTS_H_
diff --git a/base/i18n/icu_string_conversions.cc b/base/i18n/icu_string_conversions.cc
new file mode 100644
index 0000000..6ec9980
--- /dev/null
+++ b/base/i18n/icu_string_conversions.cc
@@ -0,0 +1,223 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/i18n/icu_string_conversions.h"
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <memory>
+#include <vector>
+
+#include "base/logging.h"
+#include "base/strings/string_util.h"
+#include "base/strings/utf_string_conversions.h"
+#include "third_party/icu/source/common/unicode/normalizer2.h"
+#include "third_party/icu/source/common/unicode/ucnv.h"
+#include "third_party/icu/source/common/unicode/ucnv_cb.h"
+#include "third_party/icu/source/common/unicode/ucnv_err.h"
+#include "third_party/icu/source/common/unicode/ustring.h"
+
+namespace base {
+
+namespace {
+// ToUnicodeCallbackSubstitute() is based on UCNV_TO_U_CALLBACK_SUBSTITUTE
+// in source/common/ucnv_err.c.
+
+// Copyright (c) 1995-2006 International Business Machines Corporation
+// and others
+//
+// All rights reserved.
+//
+
+// Permission is hereby granted, free of charge, to any person obtaining a
+// copy of this software and associated documentation files (the "Software"),
+// to deal in the Software without restriction, including without limitation
+// the rights to use, copy, modify, merge, publish, distribute, and/or
+// sell copies of the Software, and to permit persons to whom the Software
+// is furnished to do so, provided that the above copyright notice(s) and
+// this permission notice appear in all copies of the Software and that
+// both the above copyright notice(s) and this permission notice appear in
+// supporting documentation.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT
+// OF THIRD PARTY RIGHTS. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR HOLDERS
+// INCLUDED IN THIS NOTICE BE LIABLE FOR ANY CLAIM, OR ANY SPECIAL INDIRECT
+// OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
+// OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE
+// OR PERFORMANCE OF THIS SOFTWARE.
+//
+// Except as contained in this notice, the name of a copyright holder
+// shall not be used in advertising or otherwise to promote the sale, use
+// or other dealings in this Software without prior written authorization
+// of the copyright holder.
+
+//  ___________________________________________________________________________
+//
+// All trademarks and registered trademarks mentioned herein are the property
+// of their respective owners.
+
+void ToUnicodeCallbackSubstitute(const void* context,
+                                 UConverterToUnicodeArgs *to_args,
+                                 const char* code_units,
+                                 int32_t length,
+                                 UConverterCallbackReason reason,
+                                 UErrorCode * err) {
+  static const UChar kReplacementChar = 0xFFFD;
+  if (reason <= UCNV_IRREGULAR) {
+    if (context == nullptr ||
+        (*(reinterpret_cast<const char*>(context)) == 'i' &&
+         reason == UCNV_UNASSIGNED)) {
+      *err = U_ZERO_ERROR;
+      ucnv_cbToUWriteUChars(to_args, &kReplacementChar, 1, 0, err);
+      }
+      // else the caller must have set the error code accordingly.
+  }
+  // else ignore the reset, close and clone calls.
+}
+
+bool ConvertFromUTF16(UConverter* converter, const UChar* uchar_src,
+                      int uchar_len, OnStringConversionError::Type on_error,
+                      std::string* encoded) {
+  int encoded_max_length = UCNV_GET_MAX_BYTES_FOR_STRING(uchar_len,
+      ucnv_getMaxCharSize(converter));
+  encoded->resize(encoded_max_length);
+
+  UErrorCode status = U_ZERO_ERROR;
+
+  // Setup our error handler.
+  switch (on_error) {
+    case OnStringConversionError::FAIL:
+      ucnv_setFromUCallBack(converter, UCNV_FROM_U_CALLBACK_STOP, nullptr,
+                            nullptr, nullptr, &status);
+      break;
+    case OnStringConversionError::SKIP:
+    case OnStringConversionError::SUBSTITUTE:
+      ucnv_setFromUCallBack(converter, UCNV_FROM_U_CALLBACK_SKIP, nullptr,
+                            nullptr, nullptr, &status);
+      break;
+    default:
+      NOTREACHED();
+  }
+
+  // ucnv_fromUChars returns size not including terminating null
+  int actual_size = ucnv_fromUChars(converter, &(*encoded)[0],
+      encoded_max_length, uchar_src, uchar_len, &status);
+  encoded->resize(actual_size);
+  ucnv_close(converter);
+  if (U_SUCCESS(status))
+    return true;
+  encoded->clear();  // Make sure the output is empty on error.
+  return false;
+}
+
+// Set up our error handler for ToUTF-16 converters
+void SetUpErrorHandlerForToUChars(OnStringConversionError::Type on_error,
+                                  UConverter* converter, UErrorCode* status) {
+  switch (on_error) {
+    case OnStringConversionError::FAIL:
+      ucnv_setToUCallBack(converter, UCNV_TO_U_CALLBACK_STOP, nullptr, nullptr,
+                          nullptr, status);
+      break;
+    case OnStringConversionError::SKIP:
+      ucnv_setToUCallBack(converter, UCNV_TO_U_CALLBACK_SKIP, nullptr, nullptr,
+                          nullptr, status);
+      break;
+    case OnStringConversionError::SUBSTITUTE:
+      ucnv_setToUCallBack(converter, ToUnicodeCallbackSubstitute, nullptr,
+                          nullptr, nullptr, status);
+      break;
+    default:
+      NOTREACHED();
+  }
+}
+
+}  // namespace
+
+// Codepage <-> Wide/UTF-16  ---------------------------------------------------
+
+bool UTF16ToCodepage(const string16& utf16,
+                     const char* codepage_name,
+                     OnStringConversionError::Type on_error,
+                     std::string* encoded) {
+  encoded->clear();
+
+  UErrorCode status = U_ZERO_ERROR;
+  UConverter* converter = ucnv_open(codepage_name, &status);
+  if (!U_SUCCESS(status))
+    return false;
+
+  return ConvertFromUTF16(converter, utf16.c_str(),
+                          static_cast<int>(utf16.length()), on_error, encoded);
+}
+
+bool CodepageToUTF16(const std::string& encoded,
+                     const char* codepage_name,
+                     OnStringConversionError::Type on_error,
+                     string16* utf16) {
+  utf16->clear();
+
+  UErrorCode status = U_ZERO_ERROR;
+  UConverter* converter = ucnv_open(codepage_name, &status);
+  if (!U_SUCCESS(status))
+    return false;
+
+  // Even in the worst case, the maximum length in 2-byte units of UTF-16
+  // output would be at most the same as the number of bytes in input. There
+  // is no single-byte encoding in which a character is mapped to a
+  // non-BMP character requiring two 2-byte units.
+  //
+  // Moreover, non-BMP characters in legacy multibyte encodings
+  // (e.g. EUC-JP, GB18030) take at least 2 bytes. The only exceptions are
+  // BOCU and SCSU, but we don't care about them.
+  size_t uchar_max_length = encoded.length() + 1;
+
+  SetUpErrorHandlerForToUChars(on_error, converter, &status);
+  std::unique_ptr<char16[]> buffer(new char16[uchar_max_length]);
+  int actual_size = ucnv_toUChars(converter, buffer.get(),
+      static_cast<int>(uchar_max_length), encoded.data(),
+      static_cast<int>(encoded.length()), &status);
+  ucnv_close(converter);
+  if (!U_SUCCESS(status)) {
+    utf16->clear();  // Make sure the output is empty on error.
+    return false;
+  }
+
+  utf16->assign(buffer.get(), actual_size);
+  return true;
+}
+
+bool ConvertToUtf8AndNormalize(const std::string& text,
+                               const std::string& charset,
+                               std::string* result) {
+  result->clear();
+  string16 utf16;
+  if (!CodepageToUTF16(
+      text, charset.c_str(), OnStringConversionError::FAIL, &utf16))
+    return false;
+
+  UErrorCode status = U_ZERO_ERROR;
+  const icu::Normalizer2* normalizer = icu::Normalizer2::getNFCInstance(status);
+  DCHECK(U_SUCCESS(status));
+  if (U_FAILURE(status))
+    return false;
+  int32_t utf16_length = static_cast<int32_t>(utf16.length());
+  icu::UnicodeString normalized(utf16.data(), utf16_length);
+  int32_t normalized_prefix_length =
+      normalizer->spanQuickCheckYes(normalized, status);
+  if (normalized_prefix_length < utf16_length) {
+    icu::UnicodeString un_normalized(normalized, normalized_prefix_length);
+    normalized.truncate(normalized_prefix_length);
+    normalizer->normalizeSecondAndAppend(normalized, un_normalized, status);
+  }
+  if (U_FAILURE(status))
+    return false;
+  normalized.toUTF8String(*result);
+  return true;
+}
+
+}  // namespace base
diff --git a/base/i18n/icu_string_conversions.h b/base/i18n/icu_string_conversions.h
new file mode 100644
index 0000000..cbdcb99
--- /dev/null
+++ b/base/i18n/icu_string_conversions.h
@@ -0,0 +1,57 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_I18N_ICU_STRING_CONVERSIONS_H_
+#define BASE_I18N_ICU_STRING_CONVERSIONS_H_
+
+#include <string>
+
+#include "base/i18n/base_i18n_export.h"
+#include "base/i18n/i18n_constants.h"
+#include "base/strings/string16.h"
+
+namespace base {
+
+// Defines the error handling modes of UTF16ToCodepage and CodepageToUTF16.
+class OnStringConversionError {
+ public:
+  enum Type {
+    // The function will return failure. The output buffer will be empty.
+    FAIL,
+
+    // The offending characters are skipped and the conversion will proceed as
+    // if they did not exist.
+    SKIP,
+
+    // When converting to Unicode, the offending byte sequences are substituted
+    // by Unicode replacement character (U+FFFD). When converting from Unicode,
+    // this is the same as SKIP.
+    SUBSTITUTE,
+  };
+
+ private:
+  OnStringConversionError() = delete;
+};
+
+// Converts between UTF-16 strings and the encoding specified.  If the
+// encoding doesn't exist or the encoding fails (when on_error is FAIL),
+// returns false.
+BASE_I18N_EXPORT bool UTF16ToCodepage(const string16& utf16,
+                                      const char* codepage_name,
+                                      OnStringConversionError::Type on_error,
+                                      std::string* encoded);
+BASE_I18N_EXPORT bool CodepageToUTF16(const std::string& encoded,
+                                      const char* codepage_name,
+                                      OnStringConversionError::Type on_error,
+                                      string16* utf16);
+
+// Converts from any codepage to UTF-8 and ensures the resulting UTF-8 is
+// normalized.
+BASE_I18N_EXPORT bool ConvertToUtf8AndNormalize(const std::string& text,
+                                                const std::string& charset,
+                                                std::string* result);
+
+}  // namespace base
+
+#endif  // BASE_I18N_ICU_STRING_CONVERSIONS_H_
diff --git a/base/i18n/icu_string_conversions_unittest.cc b/base/i18n/icu_string_conversions_unittest.cc
new file mode 100644
index 0000000..d155986
--- /dev/null
+++ b/base/i18n/icu_string_conversions_unittest.cc
@@ -0,0 +1,235 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <math.h>
+#include <stdarg.h>
+#include <stddef.h>
+
+#include <limits>
+#include <sstream>
+
+#include "base/format_macros.h"
+#include "base/i18n/icu_string_conversions.h"
+#include "base/logging.h"
+#include "base/macros.h"
+#include "base/strings/string_piece.h"
+#include "base/strings/stringprintf.h"
+#include "base/strings/utf_string_conversions.h"
+#include "build/build_config.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+
+namespace {
+
+// Given a null-terminated string of wchar_t with each wchar_t representing
+// a UTF-16 code unit, returns a string16 made up of wchar_t's in the input.
+// Each wchar_t should be <= 0xFFFF and a non-BMP character (> U+FFFF)
+// should be represented as a surrogate pair (two UTF-16 units)
+// *even* where wchar_t is 32-bit (Linux and Mac).
+//
+// This is to help write tests for functions with string16 params until
+// the C++ 0x UTF-16 literal is well-supported by compilers.
+string16 BuildString16(const wchar_t* s) {
+#if defined(WCHAR_T_IS_UTF16)
+  return string16(s);
+#elif defined(WCHAR_T_IS_UTF32)
+  string16 u16;
+  while (*s != 0) {
+    DCHECK_LE(static_cast<unsigned int>(*s), 0xFFFFu);
+    u16.push_back(*s++);
+  }
+  return u16;
+#endif
+}
+
+}  // namespace
+
+// kConverterCodepageCases is not comprehensive. There are a number of cases
+// to add if we really want to have a comprehensive coverage of various
+// codepages and their 'idiosyncrasies'. Currently, the only implementation
+// for CodepageTo* and *ToCodepage uses ICU, which has a very extensive
+// set of tests for the charset conversion. So, we can get away with a
+// relatively small number of cases listed below.
+//
+// Note about |u16_wide| in the following struct.
+// On Windows, the field is always identical to |wide|. On Mac and Linux,
+// it's identical as long as there's no character outside the
+// BMP (<= U+FFFF). When there is, it is different from |wide| and
+// is not a real wide string (UTF-32 string) in that each wchar_t in
+// the string is a UTF-16 code unit zero-extended to be 32-bit
+// even when the code unit belongs to a surrogate pair.
+// For instance, a Unicode string (U+0041 U+010000) is represented as
+// L"\x0041\xD800\xDC00" instead of L"\x0041\x10000".
+// To avoid the clutter, |u16_wide| will be set to NULL
+// if it's identical to |wide| on *all* platforms.
+
+static const struct {
+  const char* codepage_name;
+  const char* encoded;
+  OnStringConversionError::Type on_error;
+  bool success;
+  const wchar_t* wide;
+  const wchar_t* u16_wide;
+} kConvertCodepageCases[] = {
+    // Test a case where the input cannot be decoded, using SKIP, FAIL
+    // and SUBSTITUTE error handling rules. "A7 41" is valid, but "A6" isn't.
+    {"big5", "\xA7\x41\xA6", OnStringConversionError::FAIL, false, L"",
+     nullptr},
+    {"big5", "\xA7\x41\xA6", OnStringConversionError::SKIP, true, L"\x4F60",
+     nullptr},
+    {"big5", "\xA7\x41\xA6", OnStringConversionError::SUBSTITUTE, true,
+     L"\x4F60\xFFFD", nullptr},
+    // Arabic (ISO-8859)
+    {"iso-8859-6",
+     "\xC7\xEE\xE4\xD3\xF1\xEE\xE4\xC7\xE5\xEF"
+     " "
+     "\xD9\xEE\xE4\xEE\xEA\xF2\xE3\xEF\xE5\xF2",
+     OnStringConversionError::FAIL, true,
+     L"\x0627\x064E\x0644\x0633\x0651\x064E\x0644\x0627\x0645\x064F"
+     L" "
+     L"\x0639\x064E\x0644\x064E\x064A\x0652\x0643\x064F\x0645\x0652",
+     nullptr},
+    // Chinese Simplified (GB2312)
+    {"gb2312", "\xC4\xE3\xBA\xC3", OnStringConversionError::FAIL, true,
+     L"\x4F60\x597D", nullptr},
+    // Chinese (GB18030) : 4 byte sequences mapped to BMP characters
+    {"gb18030", "\x81\x30\x84\x36\xA1\xA7", OnStringConversionError::FAIL, true,
+     L"\x00A5\x00A8", nullptr},
+    // Chinese (GB18030) : A 4 byte sequence mapped to plane 2 (U+20000)
+    {"gb18030", "\x95\x32\x82\x36\xD2\xBB", OnStringConversionError::FAIL, true,
+#if defined(WCHAR_T_IS_UTF16)
+     L"\xD840\xDC00\x4E00",
+#elif defined(WCHAR_T_IS_UTF32)
+     L"\x20000\x4E00",
+#endif
+     L"\xD840\xDC00\x4E00"},
+    {"big5", "\xA7\x41\xA6\x6E", OnStringConversionError::FAIL, true,
+     L"\x4F60\x597D", nullptr},
+    // Greek (ISO-8859)
+    {"iso-8859-7",
+     "\xE3\xE5\xE9\xDC"
+     " "
+     "\xF3\xEF\xF5",
+     OnStringConversionError::FAIL, true,
+     L"\x03B3\x03B5\x03B9\x03AC"
+     L" "
+     L"\x03C3\x03BF\x03C5",
+     nullptr},
+    // Hebrew (Windows)
+    {"windows-1255", "\xF9\xD1\xC8\xEC\xE5\xC9\xED",
+     OnStringConversionError::FAIL, true,
+     L"\x05E9\x05C1\x05B8\x05DC\x05D5\x05B9\x05DD", nullptr},
+    // Korean (EUC)
+    {"euc-kr", "\xBE\xC8\xB3\xE7\xC7\xCF\xBC\xBC\xBF\xE4",
+     OnStringConversionError::FAIL, true, L"\xC548\xB155\xD558\xC138\xC694",
+     nullptr},
+    // Japanese (EUC)
+    {"euc-jp", "\xA4\xB3\xA4\xF3\xA4\xCB\xA4\xC1\xA4\xCF\xB0\xEC\x8E\xA6",
+     OnStringConversionError::FAIL, true,
+     L"\x3053\x3093\x306B\x3061\x306F\x4E00\xFF66", nullptr},
+    // Japanese (ISO-2022)
+    {"iso-2022-jp",
+     "\x1B$B"
+     "\x24\x33\x24\x73\x24\x4B\x24\x41\x24\x4F\x30\x6C"
+     "\x1B(B"
+     "ab"
+     "\x1B(J"
+     "\x5C\x7E#$"
+     "\x1B(B",
+     OnStringConversionError::FAIL, true,
+     L"\x3053\x3093\x306B\x3061\x306F\x4E00"
+     L"ab\x00A5\x203E#$",
+     nullptr},
+    // Japanese (Shift-JIS)
+    {"sjis", "\x82\xB1\x82\xF1\x82\xC9\x82\xBF\x82\xCD\x88\xEA\xA6",
+     OnStringConversionError::FAIL, true,
+     L"\x3053\x3093\x306B\x3061\x306F\x4E00\xFF66", nullptr},
+    // Russian (KOI8)
+    {"koi8-r", "\xDA\xC4\xD2\xC1\xD7\xD3\xD4\xD7\xD5\xCA\xD4\xC5",
+     OnStringConversionError::FAIL, true,
+     L"\x0437\x0434\x0440\x0430\x0432\x0441\x0442\x0432"
+     L"\x0443\x0439\x0442\x0435",
+     nullptr},
+    // Thai (windows-874)
+    {"windows-874",
+     "\xCA\xC7\xD1\xCA\xB4\xD5"
+     "\xA4\xC3\xD1\xBA",
+     OnStringConversionError::FAIL, true,
+     L"\x0E2A\x0E27\x0E31\x0E2A\x0E14\x0E35"
+     L"\x0E04\x0E23\x0e31\x0E1A",
+     nullptr},
+};
+
+TEST(ICUStringConversionsTest, ConvertBetweenCodepageAndUTF16) {
+  for (size_t i = 0; i < arraysize(kConvertCodepageCases); ++i) {
+    SCOPED_TRACE(base::StringPrintf(
+                     "Test[%" PRIuS "]: <encoded: %s> <codepage: %s>", i,
+                     kConvertCodepageCases[i].encoded,
+                     kConvertCodepageCases[i].codepage_name));
+
+    string16 utf16;
+    bool success = CodepageToUTF16(kConvertCodepageCases[i].encoded,
+                                   kConvertCodepageCases[i].codepage_name,
+                                   kConvertCodepageCases[i].on_error,
+                                   &utf16);
+    string16 utf16_expected;
+    if (kConvertCodepageCases[i].u16_wide == nullptr)
+      utf16_expected = BuildString16(kConvertCodepageCases[i].wide);
+    else
+      utf16_expected = BuildString16(kConvertCodepageCases[i].u16_wide);
+    EXPECT_EQ(kConvertCodepageCases[i].success, success);
+    EXPECT_EQ(utf16_expected, utf16);
+
+    // When decoding was successful and nothing was skipped, we also check the
+    // reverse conversion. See also the corresponding comment in
+    // ConvertBetweenCodepageAndWide.
+    if (success &&
+        kConvertCodepageCases[i].on_error == OnStringConversionError::FAIL) {
+      std::string encoded;
+      success = UTF16ToCodepage(utf16, kConvertCodepageCases[i].codepage_name,
+                                kConvertCodepageCases[i].on_error, &encoded);
+      EXPECT_EQ(kConvertCodepageCases[i].success, success);
+      EXPECT_EQ(kConvertCodepageCases[i].encoded, encoded);
+    }
+  }
+}
+
+static const struct {
+  const char* encoded;
+  const char* codepage_name;
+  bool expected_success;
+  const char* expected_value;
+} kConvertAndNormalizeCases[] = {
+  {"foo-\xe4.html", "iso-8859-1", true, "foo-\xc3\xa4.html"},
+  {"foo-\xe4.html", "iso-8859-7", true, "foo-\xce\xb4.html"},
+  {"foo-\xe4.html", "foo-bar", false, ""},
+  // HTML Encoding spec treats US-ASCII as synonymous with windows-1252
+  {"foo-\xff.html", "ascii", true, "foo-\xc3\xbf.html"},
+  {"foo.html", "ascii", true, "foo.html"},
+  {"foo-a\xcc\x88.html", "utf-8", true, "foo-\xc3\xa4.html"},
+  {"\x95\x32\x82\x36\xD2\xBB", "gb18030", true, "\xF0\xA0\x80\x80\xE4\xB8\x80"},
+  {"\xA7\x41\xA6\x6E", "big5", true, "\xE4\xBD\xA0\xE5\xA5\xBD"},
+  // Windows-1258 does have a combining character at xD2 (which is U+0309).
+  // The sequence of (U+00E2, U+0309) is also encoded as U+1EA9.
+  {"foo\xE2\xD2", "windows-1258", true, "foo\xE1\xBA\xA9"},
+  {"", "iso-8859-1", true, ""},
+};
+TEST(ICUStringConversionsTest, ConvertToUtf8AndNormalize) {
+  std::string result;
+  for (size_t i = 0; i < arraysize(kConvertAndNormalizeCases); ++i) {
+    SCOPED_TRACE(base::StringPrintf(
+                     "Test[%" PRIuS "]: <encoded: %s> <codepage: %s>", i,
+                     kConvertAndNormalizeCases[i].encoded,
+                     kConvertAndNormalizeCases[i].codepage_name));
+
+    bool success = ConvertToUtf8AndNormalize(
+        kConvertAndNormalizeCases[i].encoded,
+        kConvertAndNormalizeCases[i].codepage_name, &result);
+    EXPECT_EQ(kConvertAndNormalizeCases[i].expected_success, success);
+    EXPECT_EQ(kConvertAndNormalizeCases[i].expected_value, result);
+  }
+}
+
+}  // namespace base
diff --git a/base/i18n/icu_util.cc b/base/i18n/icu_util.cc
new file mode 100644
index 0000000..bc08ecb
--- /dev/null
+++ b/base/i18n/icu_util.cc
@@ -0,0 +1,275 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/i18n/icu_util.h"
+
+#if defined(OS_WIN)
+#include <windows.h>
+#endif
+
+#include <string>
+
+#include "base/debug/alias.h"
+#include "base/files/file_path.h"
+#include "base/files/memory_mapped_file.h"
+#include "base/logging.h"
+#include "base/path_service.h"
+#include "base/strings/string_util.h"
+#include "base/strings/sys_string_conversions.h"
+#include "build/build_config.h"
+#include "third_party/icu/source/common/unicode/putil.h"
+#include "third_party/icu/source/common/unicode/udata.h"
+#if (defined(OS_LINUX) && !defined(OS_CHROMEOS)) || defined(OS_ANDROID)
+#include "third_party/icu/source/i18n/unicode/timezone.h"
+#endif
+
+#if defined(OS_ANDROID)
+#include "base/android/apk_assets.h"
+#include "base/android/timezone_utils.h"
+#endif
+
+#if defined(OS_IOS)
+#include "base/ios/ios_util.h"
+#endif
+
+#if defined(OS_MACOSX)
+#include "base/mac/foundation_util.h"
+#endif
+
+#if defined(OS_FUCHSIA)
+#include "base/base_paths_fuchsia.h"
+#endif
+
+namespace base {
+namespace i18n {
+
+#if ICU_UTIL_DATA_IMPL == ICU_UTIL_DATA_SHARED
+#define ICU_UTIL_DATA_SYMBOL "icudt" U_ICU_VERSION_SHORT "_dat"
+#if defined(OS_WIN)
+#define ICU_UTIL_DATA_SHARED_MODULE_NAME "icudt.dll"
+#endif
+#endif
+
+namespace {
+#if !defined(OS_NACL)
+#if DCHECK_IS_ON()
+// Assert that we are not called more than once.  Even though calling this
+// function isn't harmful (ICU can handle it), being called twice probably
+// indicates a programming error.
+bool g_check_called_once = true;
+bool g_called_once = false;
+#endif  // DCHECK_IS_ON()
+
+#if ICU_UTIL_DATA_IMPL == ICU_UTIL_DATA_FILE
+
+// Use an unversioned file name to simplify a icu version update down the road.
+// No need to change the filename in multiple places (gyp files, windows
+// build pkg configurations, etc). 'l' stands for Little Endian.
+// This variable is exported through the header file.
+const char kIcuDataFileName[] = "icudtl.dat";
+#if defined(OS_ANDROID)
+const char kAndroidAssetsIcuDataFileName[] = "assets/icudtl.dat";
+#endif
+
+// File handle intentionally never closed. Not using File here because its
+// Windows implementation guards against two instances owning the same
+// PlatformFile (which we allow since we know it is never freed).
+PlatformFile g_icudtl_pf = kInvalidPlatformFile;
+MemoryMappedFile* g_icudtl_mapped_file = nullptr;
+MemoryMappedFile::Region g_icudtl_region;
+
+void LazyInitIcuDataFile() {
+  if (g_icudtl_pf != kInvalidPlatformFile) {
+    return;
+  }
+#if defined(OS_ANDROID)
+  int fd = base::android::OpenApkAsset(kAndroidAssetsIcuDataFileName,
+                                       &g_icudtl_region);
+  g_icudtl_pf = fd;
+  if (fd != -1) {
+    return;
+  }
+// For unit tests, data file is located on disk, so try there as a fallback.
+#endif  // defined(OS_ANDROID)
+#if !defined(OS_MACOSX)
+  FilePath data_path;
+  if (!PathService::Get(DIR_ASSETS, &data_path)) {
+    LOG(ERROR) << "Can't find " << kIcuDataFileName;
+    return;
+  }
+  data_path = data_path.AppendASCII(kIcuDataFileName);
+#else
+  // Assume it is in the framework bundle's Resources directory.
+  ScopedCFTypeRef<CFStringRef> data_file_name(
+      SysUTF8ToCFStringRef(kIcuDataFileName));
+  FilePath data_path = mac::PathForFrameworkBundleResource(data_file_name);
+#if defined(OS_IOS)
+  FilePath override_data_path = base::ios::FilePathOfEmbeddedICU();
+  if (!override_data_path.empty()) {
+    data_path = override_data_path;
+  }
+#endif  // !defined(OS_IOS)
+  if (data_path.empty()) {
+    LOG(ERROR) << kIcuDataFileName << " not found in bundle";
+    return;
+  }
+#endif  // !defined(OS_MACOSX)
+  File file(data_path, File::FLAG_OPEN | File::FLAG_READ);
+  if (file.IsValid()) {
+    g_icudtl_pf = file.TakePlatformFile();
+    g_icudtl_region = MemoryMappedFile::Region::kWholeFile;
+  }
+}
+
+bool InitializeICUWithFileDescriptorInternal(
+    PlatformFile data_fd,
+    const MemoryMappedFile::Region& data_region) {
+  // This can be called multiple times in tests.
+  if (g_icudtl_mapped_file) {
+    return true;
+  }
+  if (data_fd == kInvalidPlatformFile) {
+    LOG(ERROR) << "Invalid file descriptor to ICU data received.";
+    return false;
+  }
+
+  std::unique_ptr<MemoryMappedFile> icudtl_mapped_file(new MemoryMappedFile());
+  if (!icudtl_mapped_file->Initialize(File(data_fd), data_region)) {
+    LOG(ERROR) << "Couldn't mmap icu data file";
+    return false;
+  }
+  g_icudtl_mapped_file = icudtl_mapped_file.release();
+
+  UErrorCode err = U_ZERO_ERROR;
+  udata_setCommonData(const_cast<uint8_t*>(g_icudtl_mapped_file->data()), &err);
+#if defined(OS_ANDROID)
+  if (err == U_ZERO_ERROR) {
+    // On Android, we can't leave it up to ICU to set the default timezone
+    // because ICU's timezone detection does not work in many timezones (e.g.
+    // Australia/Sydney, Asia/Seoul, Europe/Paris ). Use JNI to detect the host
+    // timezone and set the ICU default timezone accordingly in advance of
+    // actual use. See crbug.com/722821 and
+    // https://ssl.icu-project.org/trac/ticket/13208 .
+    base::string16 timezone_id = base::android::GetDefaultTimeZoneId();
+    icu::TimeZone::adoptDefault(icu::TimeZone::createTimeZone(
+        icu::UnicodeString(FALSE, timezone_id.data(), timezone_id.length())));
+  }
+#endif
+  // Never try to load ICU data from files.
+  udata_setFileAccess(UDATA_ONLY_PACKAGES, &err);
+  return err == U_ZERO_ERROR;
+}
+#endif  // ICU_UTIL_DATA_IMPL == ICU_UTIL_DATA_FILE
+#endif  // !defined(OS_NACL)
+
+}  // namespace
+
+#if !defined(OS_NACL)
+#if ICU_UTIL_DATA_IMPL == ICU_UTIL_DATA_FILE
+#if defined(OS_ANDROID)
+bool InitializeICUWithFileDescriptor(
+    PlatformFile data_fd,
+    const MemoryMappedFile::Region& data_region) {
+#if DCHECK_IS_ON()
+  DCHECK(!g_check_called_once || !g_called_once);
+  g_called_once = true;
+#endif
+  return InitializeICUWithFileDescriptorInternal(data_fd, data_region);
+}
+
+PlatformFile GetIcuDataFileHandle(MemoryMappedFile::Region* out_region) {
+  CHECK_NE(g_icudtl_pf, kInvalidPlatformFile);
+  *out_region = g_icudtl_region;
+  return g_icudtl_pf;
+}
+#endif
+
+const uint8_t* GetRawIcuMemory() {
+  CHECK(g_icudtl_mapped_file);
+  return g_icudtl_mapped_file->data();
+}
+
+bool InitializeICUFromRawMemory(const uint8_t* raw_memory) {
+#if !defined(COMPONENT_BUILD)
+#if DCHECK_IS_ON()
+  DCHECK(!g_check_called_once || !g_called_once);
+  g_called_once = true;
+#endif
+
+  UErrorCode err = U_ZERO_ERROR;
+  udata_setCommonData(const_cast<uint8_t*>(raw_memory), &err);
+  // Never try to load ICU data from files.
+  udata_setFileAccess(UDATA_ONLY_PACKAGES, &err);
+  return err == U_ZERO_ERROR;
+#else
+  return true;
+#endif
+}
+
+#endif  // ICU_UTIL_DATA_IMPL == ICU_UTIL_DATA_FILE
+
+bool InitializeICU() {
+#if DCHECK_IS_ON()
+  DCHECK(!g_check_called_once || !g_called_once);
+  g_called_once = true;
+#endif
+
+  bool result;
+#if (ICU_UTIL_DATA_IMPL == ICU_UTIL_DATA_SHARED)
+  FilePath data_path;
+  PathService::Get(DIR_ASSETS, &data_path);
+  data_path = data_path.AppendASCII(ICU_UTIL_DATA_SHARED_MODULE_NAME);
+
+  HMODULE module = LoadLibrary(data_path.value().c_str());
+  if (!module) {
+    LOG(ERROR) << "Failed to load " << ICU_UTIL_DATA_SHARED_MODULE_NAME;
+    return false;
+  }
+
+  FARPROC addr = GetProcAddress(module, ICU_UTIL_DATA_SYMBOL);
+  if (!addr) {
+    LOG(ERROR) << ICU_UTIL_DATA_SYMBOL << ": not found in "
+               << ICU_UTIL_DATA_SHARED_MODULE_NAME;
+    return false;
+  }
+
+  UErrorCode err = U_ZERO_ERROR;
+  udata_setCommonData(reinterpret_cast<void*>(addr), &err);
+  // Never try to load ICU data from files.
+  udata_setFileAccess(UDATA_ONLY_PACKAGES, &err);
+  result = (err == U_ZERO_ERROR);
+#elif (ICU_UTIL_DATA_IMPL == ICU_UTIL_DATA_STATIC)
+  // The ICU data is statically linked.
+  result = true;
+#elif (ICU_UTIL_DATA_IMPL == ICU_UTIL_DATA_FILE)
+  // If the ICU data directory is set, ICU won't actually load the data until
+  // it is needed.  This can fail if the process is sandboxed at that time.
+  // Instead, we map the file in and hand off the data so the sandbox won't
+  // cause any problems.
+  LazyInitIcuDataFile();
+  result =
+      InitializeICUWithFileDescriptorInternal(g_icudtl_pf, g_icudtl_region);
+#endif
+
+// To respond to the timezone change properly, the default timezone
+// cache in ICU has to be populated on starting up.
+// TODO(jungshik): Some callers do not care about tz at all. If necessary,
+// add a boolean argument to this function to init'd the default tz only
+// when requested.
+#if defined(OS_LINUX) && !defined(OS_CHROMEOS)
+  if (result)
+    std::unique_ptr<icu::TimeZone> zone(icu::TimeZone::createDefault());
+#endif
+  return result;
+}
+#endif  // !defined(OS_NACL)
+
+void AllowMultipleInitializeCallsForTesting() {
+#if DCHECK_IS_ON() && !defined(OS_NACL)
+  g_check_called_once = false;
+#endif
+}
+
+}  // namespace i18n
+}  // namespace base
diff --git a/base/i18n/icu_util.h b/base/i18n/icu_util.h
new file mode 100644
index 0000000..5f9948f
--- /dev/null
+++ b/base/i18n/icu_util.h
@@ -0,0 +1,67 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_I18N_ICU_UTIL_H_
+#define BASE_I18N_ICU_UTIL_H_
+
+#include <stdint.h>
+
+#include "base/files/memory_mapped_file.h"
+#include "base/i18n/base_i18n_export.h"
+#include "build/build_config.h"
+
+#define ICU_UTIL_DATA_FILE   0
+#define ICU_UTIL_DATA_SHARED 1
+#define ICU_UTIL_DATA_STATIC 2
+
+namespace base {
+namespace i18n {
+
+#if !defined(OS_NACL)
+// Call this function to load ICU's data tables for the current process.  This
+// function should be called before ICU is used.
+BASE_I18N_EXPORT bool InitializeICU();
+
+#if ICU_UTIL_DATA_IMPL == ICU_UTIL_DATA_FILE
+#if defined(OS_ANDROID)
+// Returns the PlatformFile and Region that was initialized by InitializeICU().
+// Use with InitializeICUWithFileDescriptor().
+BASE_I18N_EXPORT PlatformFile GetIcuDataFileHandle(
+    MemoryMappedFile::Region* out_region);
+
+// Android uses a file descriptor passed by browser process to initialize ICU
+// in render processes.
+BASE_I18N_EXPORT bool InitializeICUWithFileDescriptor(
+    PlatformFile data_fd,
+    const MemoryMappedFile::Region& data_region);
+#endif
+
+// Returns a void pointer to the memory mapped ICU data file.
+//
+// There are cases on Android where we would be unsafely reusing a file
+// descriptor within the same process when initializing two copies of ICU from
+// different binaries in the same address space. This returns an unowned
+// pointer to the memory mapped icu data file; consumers copies of base must
+// not outlive the copy of base that owns the memory mapped file.
+BASE_I18N_EXPORT const uint8_t* GetRawIcuMemory();
+
+// Initializes ICU memory
+//
+// This does nothing in component builds; this initialization should only be
+// done in cases where there could be two copies of base in a single process in
+// non-component builds. (The big example is standalone service libraries: the
+// Service Manager will have a copy of base linked in, and the majority of
+// service libraries will have base linked in but in non-component builds,
+// these will be separate copies of base.)
+BASE_I18N_EXPORT bool InitializeICUFromRawMemory(const uint8_t* raw_memory);
+#endif  // ICU_UTIL_DATA_IMPL == ICU_UTIL_DATA_FILE
+#endif  // !defined(OS_NACL)
+
+// In a test binary, the call above might occur twice.
+BASE_I18N_EXPORT void AllowMultipleInitializeCallsForTesting();
+
+}  // namespace i18n
+}  // namespace base
+
+#endif  // BASE_I18N_ICU_UTIL_H_
diff --git a/base/i18n/message_formatter.cc b/base/i18n/message_formatter.cc
new file mode 100644
index 0000000..c69dd07
--- /dev/null
+++ b/base/i18n/message_formatter.cc
@@ -0,0 +1,142 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/i18n/message_formatter.h"
+
+#include "base/i18n/unicodestring.h"
+#include "base/logging.h"
+#include "base/numerics/safe_conversions.h"
+#include "base/time/time.h"
+#include "third_party/icu/source/common/unicode/unistr.h"
+#include "third_party/icu/source/common/unicode/utypes.h"
+#include "third_party/icu/source/i18n/unicode/fmtable.h"
+#include "third_party/icu/source/i18n/unicode/msgfmt.h"
+
+using icu::UnicodeString;
+
+namespace base {
+namespace i18n {
+namespace {
+UnicodeString UnicodeStringFromStringPiece(StringPiece str) {
+  return UnicodeString::fromUTF8(
+      icu::StringPiece(str.data(), base::checked_cast<int32_t>(str.size())));
+}
+}  // anonymous namespace
+
+namespace internal {
+MessageArg::MessageArg() : formattable(nullptr) {}
+
+MessageArg::MessageArg(const char* s)
+    : formattable(new icu::Formattable(UnicodeStringFromStringPiece(s))) {}
+
+MessageArg::MessageArg(StringPiece s)
+    : formattable(new icu::Formattable(UnicodeStringFromStringPiece(s))) {}
+
+MessageArg::MessageArg(const std::string& s)
+    : formattable(new icu::Formattable(UnicodeString::fromUTF8(s))) {}
+
+MessageArg::MessageArg(const string16& s)
+    : formattable(new icu::Formattable(UnicodeString(s.data(), s.size()))) {}
+
+MessageArg::MessageArg(int i) : formattable(new icu::Formattable(i)) {}
+
+MessageArg::MessageArg(int64_t i) : formattable(new icu::Formattable(i)) {}
+
+MessageArg::MessageArg(double d) : formattable(new icu::Formattable(d)) {}
+
+MessageArg::MessageArg(const Time& t)
+    : formattable(new icu::Formattable(static_cast<UDate>(t.ToJsTime()))) {}
+
+MessageArg::~MessageArg() = default;
+
+// Tests if this argument has a value, and if so increments *count.
+bool MessageArg::has_value(int *count) const {
+  if (formattable == nullptr)
+    return false;
+
+  ++*count;
+  return true;
+}
+
+}  // namespace internal
+
+string16 MessageFormatter::FormatWithNumberedArgs(
+    StringPiece16 msg,
+    const internal::MessageArg& arg0,
+    const internal::MessageArg& arg1,
+    const internal::MessageArg& arg2,
+    const internal::MessageArg& arg3,
+    const internal::MessageArg& arg4,
+    const internal::MessageArg& arg5,
+    const internal::MessageArg& arg6) {
+  int32_t args_count = 0;
+  icu::Formattable args[] = {
+      arg0.has_value(&args_count) ? *arg0.formattable : icu::Formattable(),
+      arg1.has_value(&args_count) ? *arg1.formattable : icu::Formattable(),
+      arg2.has_value(&args_count) ? *arg2.formattable : icu::Formattable(),
+      arg3.has_value(&args_count) ? *arg3.formattable : icu::Formattable(),
+      arg4.has_value(&args_count) ? *arg4.formattable : icu::Formattable(),
+      arg5.has_value(&args_count) ? *arg5.formattable : icu::Formattable(),
+      arg6.has_value(&args_count) ? *arg6.formattable : icu::Formattable(),
+  };
+
+  UnicodeString msg_string(msg.data(), msg.size());
+  UErrorCode error = U_ZERO_ERROR;
+  icu::MessageFormat format(msg_string,  error);
+  icu::UnicodeString formatted;
+  icu::FieldPosition ignore(icu::FieldPosition::DONT_CARE);
+  format.format(args, args_count, formatted, ignore, error);
+  if (U_FAILURE(error)) {
+    LOG(ERROR) << "MessageFormat(" << msg.as_string() << ") failed with "
+               << u_errorName(error);
+    return string16();
+  }
+  return i18n::UnicodeStringToString16(formatted);
+}
+
+string16 MessageFormatter::FormatWithNamedArgs(
+    StringPiece16 msg,
+    StringPiece name0, const internal::MessageArg& arg0,
+    StringPiece name1, const internal::MessageArg& arg1,
+    StringPiece name2, const internal::MessageArg& arg2,
+    StringPiece name3, const internal::MessageArg& arg3,
+    StringPiece name4, const internal::MessageArg& arg4,
+    StringPiece name5, const internal::MessageArg& arg5,
+    StringPiece name6, const internal::MessageArg& arg6) {
+  icu::UnicodeString names[] = {
+      UnicodeStringFromStringPiece(name0),
+      UnicodeStringFromStringPiece(name1),
+      UnicodeStringFromStringPiece(name2),
+      UnicodeStringFromStringPiece(name3),
+      UnicodeStringFromStringPiece(name4),
+      UnicodeStringFromStringPiece(name5),
+      UnicodeStringFromStringPiece(name6),
+  };
+  int32_t args_count = 0;
+  icu::Formattable args[] = {
+      arg0.has_value(&args_count) ? *arg0.formattable : icu::Formattable(),
+      arg1.has_value(&args_count) ? *arg1.formattable : icu::Formattable(),
+      arg2.has_value(&args_count) ? *arg2.formattable : icu::Formattable(),
+      arg3.has_value(&args_count) ? *arg3.formattable : icu::Formattable(),
+      arg4.has_value(&args_count) ? *arg4.formattable : icu::Formattable(),
+      arg5.has_value(&args_count) ? *arg5.formattable : icu::Formattable(),
+      arg6.has_value(&args_count) ? *arg6.formattable : icu::Formattable(),
+  };
+
+  UnicodeString msg_string(msg.data(), msg.size());
+  UErrorCode error = U_ZERO_ERROR;
+  icu::MessageFormat format(msg_string, error);
+
+  icu::UnicodeString formatted;
+  format.format(names, args, args_count, formatted, error);
+  if (U_FAILURE(error)) {
+    LOG(ERROR) << "MessageFormat(" << msg.as_string() << ") failed with "
+               << u_errorName(error);
+    return string16();
+  }
+  return i18n::UnicodeStringToString16(formatted);
+}
+
+}  // namespace i18n
+}  // namespace base
diff --git a/base/i18n/message_formatter.h b/base/i18n/message_formatter.h
new file mode 100644
index 0000000..36a656d
--- /dev/null
+++ b/base/i18n/message_formatter.h
@@ -0,0 +1,128 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_I18N_MESSAGE_FORMATTER_H_
+#define BASE_I18N_MESSAGE_FORMATTER_H_
+
+#include <stdint.h>
+
+#include <memory>
+#include <string>
+
+#include "base/i18n/base_i18n_export.h"
+#include "base/macros.h"
+#include "base/strings/string16.h"
+#include "base/strings/string_piece.h"
+#include "third_party/icu/source/common/unicode/uversion.h"
+
+U_NAMESPACE_BEGIN
+class Formattable;
+U_NAMESPACE_END
+
+namespace base {
+
+class Time;
+
+namespace i18n {
+
+class MessageFormatter;
+
+namespace internal {
+
+class BASE_I18N_EXPORT MessageArg {
+ public:
+  MessageArg(const char* s);
+  MessageArg(StringPiece s);
+  MessageArg(const std::string& s);
+  MessageArg(const string16& s);
+  MessageArg(int i);
+  MessageArg(int64_t i);
+  MessageArg(double d);
+  MessageArg(const Time& t);
+  ~MessageArg();
+
+ private:
+  friend class base::i18n::MessageFormatter;
+  MessageArg();
+  // Tests if this argument has a value, and if so increments *count.
+  bool has_value(int* count) const;
+  std::unique_ptr<icu::Formattable> formattable;
+  DISALLOW_COPY_AND_ASSIGN(MessageArg);
+};
+
+}  // namespace internal
+
+// Message Formatter with the ICU message format syntax support.
+// It can format strings (UTF-8 and UTF-16), numbers and base::Time with
+// plural, gender and other 'selectors' support. This is handy if you
+// have multiple parameters of differnt types and some of them require
+// plural or gender/selector support.
+//
+// To use this API for locale-sensitive formatting, retrieve a 'message
+// template' in the ICU message format from a message bundle (e.g. with
+// l10n_util::GetStringUTF16()) and pass it to FormatWith{Named,Numbered}Args.
+//
+// MessageFormat specs:
+//   http://icu-project.org/apiref/icu4j/com/ibm/icu/text/MessageFormat.html
+//   http://icu-project.org/apiref/icu4c/classicu_1_1DecimalFormat.html#details
+// Examples:
+//   http://userguide.icu-project.org/formatparse/messages
+//   message_formatter_unittest.cc
+//   go/plurals inside Google.
+//   TODO(jshin): Document this API in md format docs.
+// Caveat:
+//   When plural/select/gender is used along with other format specifiers such
+//   as date or number, plural/select/gender should be at the top level. It's
+//   not an ICU restriction but a constraint imposed by Google's translation
+//   infrastructure. Message A does not work. It must be revised to Message B.
+//
+//     A.
+//       Rated <ph name="RATING">{0, number,0.0}<ex>3.2</ex></ph>
+//       by {1, plural, =1{a user} other{# users}}
+//
+//     B.
+//       {1, plural,
+//         =1{Rated <ph name="RATING">{0, number,0.0}<ex>3.2</ex></ph>
+//             by a user.}
+//         other{Rated <ph name="RATING">{0, number,0.0}<ex>3.2</ex></ph>
+//               by # users.}}
+
+class BASE_I18N_EXPORT MessageFormatter {
+ public:
+  static string16 FormatWithNamedArgs(
+      StringPiece16 msg,
+      StringPiece name0 = StringPiece(),
+      const internal::MessageArg& arg0 = internal::MessageArg(),
+      StringPiece name1 = StringPiece(),
+      const internal::MessageArg& arg1 = internal::MessageArg(),
+      StringPiece name2 = StringPiece(),
+      const internal::MessageArg& arg2 = internal::MessageArg(),
+      StringPiece name3 = StringPiece(),
+      const internal::MessageArg& arg3 = internal::MessageArg(),
+      StringPiece name4 = StringPiece(),
+      const internal::MessageArg& arg4 = internal::MessageArg(),
+      StringPiece name5 = StringPiece(),
+      const internal::MessageArg& arg5 = internal::MessageArg(),
+      StringPiece name6 = StringPiece(),
+      const internal::MessageArg& arg6 = internal::MessageArg());
+
+  static string16 FormatWithNumberedArgs(
+      StringPiece16 msg,
+      const internal::MessageArg& arg0 = internal::MessageArg(),
+      const internal::MessageArg& arg1 = internal::MessageArg(),
+      const internal::MessageArg& arg2 = internal::MessageArg(),
+      const internal::MessageArg& arg3 = internal::MessageArg(),
+      const internal::MessageArg& arg4 = internal::MessageArg(),
+      const internal::MessageArg& arg5 = internal::MessageArg(),
+      const internal::MessageArg& arg6 = internal::MessageArg());
+
+ private:
+  MessageFormatter() = delete;
+  DISALLOW_COPY_AND_ASSIGN(MessageFormatter);
+};
+
+}  // namespace i18n
+}  // namespace base
+
+#endif  // BASE_I18N_MESSAGE_FORMATTER_H_
diff --git a/base/i18n/message_formatter_unittest.cc b/base/i18n/message_formatter_unittest.cc
new file mode 100644
index 0000000..a6f4613
--- /dev/null
+++ b/base/i18n/message_formatter_unittest.cc
@@ -0,0 +1,185 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/i18n/message_formatter.h"
+
+#include <memory>
+
+#include "base/i18n/rtl.h"
+#include "base/strings/string_piece.h"
+#include "base/strings/string_util.h"
+#include "base/strings/utf_string_conversions.h"
+#include "base/time/time.h"
+#include "testing/gtest/include/gtest/gtest.h"
+#include "third_party/icu/source/common/unicode/unistr.h"
+#include "third_party/icu/source/i18n/unicode/datefmt.h"
+#include "third_party/icu/source/i18n/unicode/msgfmt.h"
+
+typedef testing::Test MessageFormatterTest;
+
+namespace base {
+namespace i18n {
+
+class MessageFormatterTest : public testing::Test {
+ protected:
+  MessageFormatterTest() {
+    original_locale_ = GetConfiguredLocale();
+    SetICUDefaultLocale("en-US");
+  }
+  ~MessageFormatterTest() override {
+    SetICUDefaultLocale(original_locale_);
+  }
+
+ private:
+  std::string original_locale_;
+};
+
+namespace {
+
+void AppendFormattedDateTime(const std::unique_ptr<icu::DateFormat>& df,
+                             const Time& now,
+                             std::string* result) {
+  icu::UnicodeString formatted;
+  df->format(static_cast<UDate>(now.ToJsTime()), formatted).
+      toUTF8String(*result);
+}
+
+}  // namespace
+
+TEST_F(MessageFormatterTest, PluralNamedArgs) {
+  const string16 pattern = ASCIIToUTF16(
+      "{num_people, plural, "
+      "=0 {I met nobody in {place}.}"
+      "=1 {I met a person in {place}.}"
+      "other {I met # people in {place}.}}");
+
+  std::string result = UTF16ToASCII(MessageFormatter::FormatWithNamedArgs(
+      pattern, "num_people", 0, "place", "Paris"));
+  EXPECT_EQ("I met nobody in Paris.", result);
+  result = UTF16ToASCII(MessageFormatter::FormatWithNamedArgs(
+      pattern, "num_people", 1, "place", "Paris"));
+  EXPECT_EQ("I met a person in Paris.", result);
+  result = UTF16ToASCII(MessageFormatter::FormatWithNamedArgs(
+      pattern, "num_people", 5, "place", "Paris"));
+  EXPECT_EQ("I met 5 people in Paris.", result);
+}
+
+TEST_F(MessageFormatterTest, PluralNamedArgsWithOffset) {
+  const string16 pattern = ASCIIToUTF16(
+      "{num_people, plural, offset:1 "
+      "=0 {I met nobody in {place}.}"
+      "=1 {I met {person} in {place}.}"
+      "=2 {I met {person} and one other person in {place}.}"
+      "=13 {I met {person} and a dozen other people in {place}.}"
+      "other {I met {person} and # other people in {place}.}}");
+
+  std::string result = UTF16ToASCII(MessageFormatter::FormatWithNamedArgs(
+      pattern, "num_people", 0, "place", "Paris"));
+  EXPECT_EQ("I met nobody in Paris.", result);
+  // {person} is ignored if {num_people} is 0.
+  result = UTF16ToASCII(MessageFormatter::FormatWithNamedArgs(
+      pattern, "num_people", 0, "place", "Paris", "person", "Peter"));
+  EXPECT_EQ("I met nobody in Paris.", result);
+  result = UTF16ToASCII(MessageFormatter::FormatWithNamedArgs(
+      pattern, "num_people", 1, "place", "Paris", "person", "Peter"));
+  EXPECT_EQ("I met Peter in Paris.", result);
+  result = UTF16ToASCII(MessageFormatter::FormatWithNamedArgs(
+      pattern, "num_people", 2, "place", "Paris", "person", "Peter"));
+  EXPECT_EQ("I met Peter and one other person in Paris.", result);
+  result = UTF16ToASCII(MessageFormatter::FormatWithNamedArgs(
+      pattern, "num_people", 13, "place", "Paris", "person", "Peter"));
+  EXPECT_EQ("I met Peter and a dozen other people in Paris.", result);
+  result = UTF16ToASCII(MessageFormatter::FormatWithNamedArgs(
+      pattern, "num_people", 50, "place", "Paris", "person", "Peter"));
+  EXPECT_EQ("I met Peter and 49 other people in Paris.", result);
+}
+
+TEST_F(MessageFormatterTest, PluralNumberedArgs) {
+  const string16 pattern = ASCIIToUTF16(
+      "{1, plural, "
+      "=1 {The cert for {0} expired yesterday.}"
+      "=7 {The cert for {0} expired a week ago.}"
+      "other {The cert for {0} expired # days ago.}}");
+
+  std::string result = UTF16ToASCII(MessageFormatter::FormatWithNumberedArgs(
+      pattern, "example.com", 1));
+  EXPECT_EQ("The cert for example.com expired yesterday.", result);
+  result = UTF16ToASCII(MessageFormatter::FormatWithNumberedArgs(
+      pattern, "example.com", 7));
+  EXPECT_EQ("The cert for example.com expired a week ago.", result);
+  result = UTF16ToASCII(MessageFormatter::FormatWithNumberedArgs(
+      pattern, "example.com", 15));
+  EXPECT_EQ("The cert for example.com expired 15 days ago.", result);
+}
+
+TEST_F(MessageFormatterTest, PluralNumberedArgsWithDate) {
+  const string16 pattern = ASCIIToUTF16(
+      "{1, plural, "
+      "=1 {The cert for {0} expired yesterday. Today is {2,date,full}}"
+      "other {The cert for {0} expired # days ago. Today is {2,date,full}}}");
+
+  base::Time now = base::Time::Now();
+  using icu::DateFormat;
+  std::unique_ptr<DateFormat> df(
+      DateFormat::createDateInstance(DateFormat::FULL));
+  std::string second_sentence = " Today is ";
+  AppendFormattedDateTime(df, now, &second_sentence);
+
+  std::string result = UTF16ToASCII(MessageFormatter::FormatWithNumberedArgs(
+      pattern, "example.com", 1, now));
+  EXPECT_EQ("The cert for example.com expired yesterday." + second_sentence,
+            result);
+  result = UTF16ToASCII(MessageFormatter::FormatWithNumberedArgs(
+      pattern, "example.com", 15, now));
+  EXPECT_EQ("The cert for example.com expired 15 days ago." + second_sentence,
+            result);
+}
+
+TEST_F(MessageFormatterTest, DateTimeAndNumber) {
+  // Note that using 'mph' for all locales is not a good i18n practice.
+  const string16 pattern = ASCIIToUTF16(
+      "At {0,time, short} on {0,date, medium}, "
+      "there was {1} at building {2,number,integer}. "
+      "The speed of the wind was {3,number,###.#} mph.");
+
+  using icu::DateFormat;
+  std::unique_ptr<DateFormat> tf(
+      DateFormat::createTimeInstance(DateFormat::SHORT));
+  std::unique_ptr<DateFormat> df(
+      DateFormat::createDateInstance(DateFormat::MEDIUM));
+
+  base::Time now = base::Time::Now();
+  std::string expected = "At ";
+  AppendFormattedDateTime(tf, now, &expected);
+  expected.append(" on ");
+  AppendFormattedDateTime(df, now, &expected);
+  expected.append(", there was an explosion at building 3. "
+                  "The speed of the wind was 37.4 mph.");
+
+  EXPECT_EQ(expected, UTF16ToASCII(MessageFormatter::FormatWithNumberedArgs(
+      pattern, now, "an explosion", 3, 37.413)));
+}
+
+TEST_F(MessageFormatterTest, SelectorSingleOrMultiple) {
+  const string16 pattern = ASCIIToUTF16(
+      "{0, select,"
+      "single {Select a file to upload.}"
+      "multiple {Select files to upload.}"
+      "other {UNUSED}}");
+
+  std::string result = UTF16ToASCII(MessageFormatter::FormatWithNumberedArgs(
+      pattern, "single"));
+  EXPECT_EQ("Select a file to upload.", result);
+  result = UTF16ToASCII(MessageFormatter::FormatWithNumberedArgs(
+      pattern, "multiple"));
+  EXPECT_EQ("Select files to upload.", result);
+
+  // fallback if a parameter is not selectors specified in the message pattern.
+  result = UTF16ToASCII(MessageFormatter::FormatWithNumberedArgs(
+      pattern, "foobar"));
+  EXPECT_EQ("UNUSED", result);
+}
+
+}  // namespace i18n
+}  // namespace base
diff --git a/base/i18n/number_formatting.cc b/base/i18n/number_formatting.cc
new file mode 100644
index 0000000..0ab031e
--- /dev/null
+++ b/base/i18n/number_formatting.cc
@@ -0,0 +1,97 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/i18n/number_formatting.h"
+
+#include <stddef.h>
+
+#include <memory>
+
+#include "base/format_macros.h"
+#include "base/i18n/message_formatter.h"
+#include "base/i18n/unicodestring.h"
+#include "base/lazy_instance.h"
+#include "base/logging.h"
+#include "base/strings/string_util.h"
+#include "base/strings/stringprintf.h"
+#include "base/strings/utf_string_conversions.h"
+#include "third_party/icu/source/common/unicode/ustring.h"
+#include "third_party/icu/source/i18n/unicode/numfmt.h"
+
+namespace base {
+
+namespace {
+
+// A simple wrapper around icu::NumberFormat that allows for resetting it
+// (as LazyInstance does not).
+struct NumberFormatWrapper {
+  NumberFormatWrapper() {
+    Reset();
+  }
+
+  void Reset() {
+    // There's no ICU call to destroy a NumberFormat object other than
+    // operator delete, so use the default Delete, which calls operator delete.
+    // This can cause problems if a different allocator is used by this file
+    // than by ICU.
+    UErrorCode status = U_ZERO_ERROR;
+    number_format.reset(icu::NumberFormat::createInstance(status));
+    DCHECK(U_SUCCESS(status));
+  }
+
+  std::unique_ptr<icu::NumberFormat> number_format;
+};
+
+LazyInstance<NumberFormatWrapper>::DestructorAtExit g_number_format_int =
+    LAZY_INSTANCE_INITIALIZER;
+LazyInstance<NumberFormatWrapper>::DestructorAtExit g_number_format_float =
+    LAZY_INSTANCE_INITIALIZER;
+
+}  // namespace
+
+string16 FormatNumber(int64_t number) {
+  icu::NumberFormat* number_format =
+      g_number_format_int.Get().number_format.get();
+
+  if (!number_format) {
+    // As a fallback, just return the raw number in a string.
+    return ASCIIToUTF16(StringPrintf("%" PRId64, number));
+  }
+  icu::UnicodeString ustr;
+  number_format->format(number, ustr);
+
+  return i18n::UnicodeStringToString16(ustr);
+}
+
+string16 FormatDouble(double number, int fractional_digits) {
+  icu::NumberFormat* number_format =
+      g_number_format_float.Get().number_format.get();
+
+  if (!number_format) {
+    // As a fallback, just return the raw number in a string.
+    return ASCIIToUTF16(StringPrintf("%f", number));
+  }
+  number_format->setMaximumFractionDigits(fractional_digits);
+  number_format->setMinimumFractionDigits(fractional_digits);
+  icu::UnicodeString ustr;
+  number_format->format(number, ustr);
+
+  return i18n::UnicodeStringToString16(ustr);
+}
+
+string16 FormatPercent(int number) {
+  return i18n::MessageFormatter::FormatWithNumberedArgs(
+      ASCIIToUTF16("{0,number,percent}"), static_cast<double>(number) / 100.0);
+}
+
+namespace testing {
+
+void ResetFormatters() {
+  g_number_format_int.Get().Reset();
+  g_number_format_float.Get().Reset();
+}
+
+}  // namespace testing
+
+}  // namespace base
diff --git a/base/i18n/number_formatting.h b/base/i18n/number_formatting.h
new file mode 100644
index 0000000..9636bf4
--- /dev/null
+++ b/base/i18n/number_formatting.h
@@ -0,0 +1,38 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_I18N_NUMBER_FORMATTING_H_
+#define BASE_I18N_NUMBER_FORMATTING_H_
+
+#include <stdint.h>
+
+#include "base/i18n/base_i18n_export.h"
+#include "base/strings/string16.h"
+
+namespace base {
+
+// Return a number formatted with separators in the user's locale.
+// Ex: FormatNumber(1234567) => "1,234,567" in English, "1.234.567" in German
+BASE_I18N_EXPORT string16 FormatNumber(int64_t number);
+
+// Return a number formatted with separators in the user's locale.
+// Ex: FormatDouble(1234567.8, 1)
+//         => "1,234,567.8" in English, "1.234.567,8" in German
+BASE_I18N_EXPORT string16 FormatDouble(double number, int fractional_digits);
+
+// Return a percentage formatted with space and symbol in the user's locale.
+// Ex: FormatPercent(12) => "12%" in English, "12 %" in Romanian
+BASE_I18N_EXPORT string16 FormatPercent(int number);
+
+namespace testing {
+
+// Causes cached formatters to be discarded and recreated. Only useful for
+// testing.
+BASE_I18N_EXPORT void ResetFormatters();
+
+}  // namespace testing
+
+}  // namespace base
+
+#endif  // BASE_I18N_NUMBER_FORMATTING_H_
diff --git a/base/i18n/number_formatting_unittest.cc b/base/i18n/number_formatting_unittest.cc
new file mode 100644
index 0000000..045bc0e
--- /dev/null
+++ b/base/i18n/number_formatting_unittest.cc
@@ -0,0 +1,135 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <limits>
+
+#include "base/i18n/number_formatting.h"
+#include "base/i18n/rtl.h"
+#include "base/macros.h"
+#include "base/strings/utf_string_conversions.h"
+#include "base/test/icu_test_util.h"
+#include "build/build_config.h"
+#include "testing/gtest/include/gtest/gtest.h"
+#include "third_party/icu/source/i18n/unicode/usearch.h"
+
+namespace base {
+namespace {
+
+TEST(NumberFormattingTest, FormatNumber) {
+  static const struct {
+    int64_t number;
+    const char* expected_english;
+    const char* expected_german;
+  } cases[] = {
+    {0, "0", "0"},
+    {1024, "1,024", "1.024"},
+    {std::numeric_limits<int64_t>::max(),
+        "9,223,372,036,854,775,807", "9.223.372.036.854.775.807"},
+    {std::numeric_limits<int64_t>::min(),
+        "-9,223,372,036,854,775,808", "-9.223.372.036.854.775.808"},
+    {-42, "-42", "-42"},
+  };
+
+  test::ScopedRestoreICUDefaultLocale restore_locale;
+
+  for (size_t i = 0; i < arraysize(cases); ++i) {
+    i18n::SetICUDefaultLocale("en");
+    testing::ResetFormatters();
+    EXPECT_EQ(cases[i].expected_english,
+              UTF16ToUTF8(FormatNumber(cases[i].number)));
+    i18n::SetICUDefaultLocale("de");
+    testing::ResetFormatters();
+    EXPECT_EQ(cases[i].expected_german,
+              UTF16ToUTF8(FormatNumber(cases[i].number)));
+  }
+}
+
+TEST(NumberFormattingTest, FormatDouble) {
+  static const struct {
+    double number;
+    int frac_digits;
+    const char* expected_english;
+    const char* expected_german;
+  } cases[] = {
+    {0.0, 0, "0", "0"},
+#if !defined(OS_ANDROID)
+    // Bionic can't printf negative zero correctly.
+    {-0.0, 4, "-0.0000", "-0,0000"},
+#endif
+    {1024.2, 0, "1,024", "1.024"},
+    {-1024.223, 2, "-1,024.22", "-1.024,22"},
+    {std::numeric_limits<double>::max(), 6,
+     "179,769,313,486,231,570,000,000,000,000,000,000,000,000,000,000,000,"
+     "000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,"
+     "000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,"
+     "000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,"
+     "000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,"
+     "000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,"
+     "000.000000",
+     "179.769.313.486.231.570.000.000.000.000.000.000.000.000.000.000.000."
+     "000.000.000.000.000.000.000.000.000.000.000.000.000.000.000.000.000."
+     "000.000.000.000.000.000.000.000.000.000.000.000.000.000.000.000.000."
+     "000.000.000.000.000.000.000.000.000.000.000.000.000.000.000.000.000."
+     "000.000.000.000.000.000.000.000.000.000.000.000.000.000.000.000.000."
+     "000.000.000.000.000.000.000.000.000.000.000.000.000.000.000.000.000."
+     "000,000000"},
+    {std::numeric_limits<double>::min(), 2, "0.00", "0,00"},
+    {-42.7, 3, "-42.700", "-42,700"},
+  };
+
+  test::ScopedRestoreICUDefaultLocale restore_locale;
+  for (size_t i = 0; i < arraysize(cases); ++i) {
+    i18n::SetICUDefaultLocale("en");
+    testing::ResetFormatters();
+    EXPECT_EQ(cases[i].expected_english,
+              UTF16ToUTF8(FormatDouble(cases[i].number, cases[i].frac_digits)));
+    i18n::SetICUDefaultLocale("de");
+    testing::ResetFormatters();
+    EXPECT_EQ(cases[i].expected_german,
+              UTF16ToUTF8(FormatDouble(cases[i].number, cases[i].frac_digits)));
+  }
+}
+
+TEST(NumberFormattingTest, FormatPercent) {
+  static const struct {
+    int64_t number;
+    const char* expected_english;
+    const wchar_t* expected_german;   // Note: Space before % isn't \x20.
+    // Note: Eastern Arabic-Indic digits (U+06Fx) for Persian and
+    // Arabic-Indic digits (U+066x) for Arabic.
+    // See https://unicode.org/cldr/trac/ticket/9040 for details.
+    // See also https://unicode.org/cldr/trac/ticket/10176 .
+    // For now, take what CLDR 32 has (percent sign to the right of
+    // a number in Persian).
+    const wchar_t* expected_persian;
+    const wchar_t* expected_arabic;
+  } cases[] = {
+      {0, "0%", L"0\xa0%", L"\x6f0\x66a", L"\x660\x66a\x61c"},
+      {42, "42%", L"42\xa0%", L"\x6f4\x6f2\x66a", L"\x664\x662\x66a\x61c"},
+      {1024, "1,024%", L"1.024\xa0%", L"\x6f1\x66c\x6f0\x6f2\x6f4\x66a",
+       L"\x661\x66c\x660\x662\x664\x66a\x61c"},
+  };
+
+  test::ScopedRestoreICUDefaultLocale restore_locale;
+  for (size_t i = 0; i < arraysize(cases); ++i) {
+    i18n::SetICUDefaultLocale("en");
+    EXPECT_EQ(ASCIIToUTF16(cases[i].expected_english),
+              FormatPercent(cases[i].number));
+    i18n::SetICUDefaultLocale("de");
+    EXPECT_EQ(WideToUTF16(cases[i].expected_german),
+              FormatPercent(cases[i].number));
+    i18n::SetICUDefaultLocale("fa");
+    EXPECT_EQ(WideToUTF16(cases[i].expected_persian),
+              FormatPercent(cases[i].number));
+    i18n::SetICUDefaultLocale("ar");
+    EXPECT_EQ(WideToUTF16(cases[i].expected_arabic),
+              FormatPercent(cases[i].number));
+  }
+}
+
+}  // namespace
+}  // namespace base
diff --git a/base/i18n/rtl.cc b/base/i18n/rtl.cc
new file mode 100644
index 0000000..bba0d44
--- /dev/null
+++ b/base/i18n/rtl.cc
@@ -0,0 +1,491 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/i18n/rtl.h"
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <algorithm>
+
+#include "base/command_line.h"
+#include "base/files/file_path.h"
+#include "base/i18n/base_i18n_switches.h"
+#include "base/logging.h"
+#include "base/macros.h"
+#include "base/strings/string_split.h"
+#include "base/strings/string_util.h"
+#include "base/strings/sys_string_conversions.h"
+#include "base/strings/utf_string_conversions.h"
+#include "build/build_config.h"
+#include "third_party/icu/source/common/unicode/locid.h"
+#include "third_party/icu/source/common/unicode/uchar.h"
+#include "third_party/icu/source/common/unicode/uscript.h"
+#include "third_party/icu/source/i18n/unicode/coll.h"
+
+#if defined(OS_IOS)
+#include "base/debug/crash_logging.h"
+#include "base/ios/ios_util.h"
+#endif
+
+namespace {
+
+// Extract language, country and variant, but ignore keywords.  For example,
+// en-US, ca@valencia, ca-ES@valencia.
+std::string GetLocaleString(const icu::Locale& locale) {
+  const char* language = locale.getLanguage();
+  const char* country = locale.getCountry();
+  const char* variant = locale.getVariant();
+
+  std::string result =
+      (language != nullptr && *language != '\0') ? language : "und";
+
+  if (country != nullptr && *country != '\0') {
+    result += '-';
+    result += country;
+  }
+
+  if (variant != nullptr && *variant != '\0')
+    result += '@' + base::ToLowerASCII(variant);
+
+  return result;
+}
+
+// Returns LEFT_TO_RIGHT or RIGHT_TO_LEFT if |character| has strong
+// directionality, returns UNKNOWN_DIRECTION if it doesn't. Please refer to
+// http://unicode.org/reports/tr9/ for more information.
+base::i18n::TextDirection GetCharacterDirection(UChar32 character) {
+  static bool has_switch = base::CommandLine::ForCurrentProcess()->HasSwitch(
+      switches::kForceTextDirection);
+  if (has_switch) {
+    base::CommandLine* command_line = base::CommandLine::ForCurrentProcess();
+    std::string force_flag =
+        command_line->GetSwitchValueASCII(switches::kForceTextDirection);
+
+    if (force_flag == switches::kForceDirectionRTL)
+      return base::i18n::RIGHT_TO_LEFT;
+    if (force_flag == switches::kForceDirectionLTR)
+      return base::i18n::LEFT_TO_RIGHT;
+  }
+  // Now that we have the character, we use ICU in order to query for the
+  // appropriate Unicode BiDi character type.
+  int32_t property = u_getIntPropertyValue(character, UCHAR_BIDI_CLASS);
+  if ((property == U_RIGHT_TO_LEFT) ||
+      (property == U_RIGHT_TO_LEFT_ARABIC) ||
+      (property == U_RIGHT_TO_LEFT_EMBEDDING) ||
+      (property == U_RIGHT_TO_LEFT_OVERRIDE)) {
+    return base::i18n::RIGHT_TO_LEFT;
+  } else if ((property == U_LEFT_TO_RIGHT) ||
+             (property == U_LEFT_TO_RIGHT_EMBEDDING) ||
+             (property == U_LEFT_TO_RIGHT_OVERRIDE)) {
+    return base::i18n::LEFT_TO_RIGHT;
+  }
+  return base::i18n::UNKNOWN_DIRECTION;
+}
+
+}  // namespace
+
+namespace base {
+namespace i18n {
+
+// Represents the locale-specific ICU text direction.
+static TextDirection g_icu_text_direction = UNKNOWN_DIRECTION;
+
+// Convert the ICU default locale to a string.
+std::string GetConfiguredLocale() {
+  return GetLocaleString(icu::Locale::getDefault());
+}
+
+// Convert the ICU canonicalized locale to a string.
+std::string GetCanonicalLocale(const std::string& locale) {
+  return GetLocaleString(icu::Locale::createCanonical(locale.c_str()));
+}
+
+// Convert Chrome locale name to ICU locale name
+std::string ICULocaleName(const std::string& locale_string) {
+  // If not Spanish, just return it.
+  if (locale_string.substr(0, 2) != "es")
+    return locale_string;
+  // Expand es to es-ES.
+  if (LowerCaseEqualsASCII(locale_string, "es"))
+    return "es-ES";
+  // Map es-419 (Latin American Spanish) to es-FOO depending on the system
+  // locale.  If it's es-RR other than es-ES, map to es-RR. Otherwise, map
+  // to es-MX (the most populous in Spanish-speaking Latin America).
+  if (LowerCaseEqualsASCII(locale_string, "es-419")) {
+    const icu::Locale& locale = icu::Locale::getDefault();
+    std::string language = locale.getLanguage();
+    const char* country = locale.getCountry();
+    if (LowerCaseEqualsASCII(language, "es") &&
+      !LowerCaseEqualsASCII(country, "es")) {
+        language += '-';
+        language += country;
+        return language;
+    }
+    return "es-MX";
+  }
+  // Currently, Chrome has only "es" and "es-419", but later we may have
+  // more specific "es-RR".
+  return locale_string;
+}
+
+void SetICUDefaultLocale(const std::string& locale_string) {
+#if defined(OS_IOS)
+  static base::debug::CrashKeyString* crash_key_locale =
+      base::debug::AllocateCrashKeyString("icu_locale_input",
+                                          base::debug::CrashKeySize::Size256);
+  base::debug::SetCrashKeyString(crash_key_locale, locale_string);
+#endif
+  icu::Locale locale(ICULocaleName(locale_string).c_str());
+  UErrorCode error_code = U_ZERO_ERROR;
+  const char* lang = locale.getLanguage();
+  if (lang != nullptr && *lang != '\0') {
+    icu::Locale::setDefault(locale, error_code);
+  } else {
+    LOG(ERROR) << "Failed to set the ICU default locale to " << locale_string
+               << ". Falling back to en-US.";
+    icu::Locale::setDefault(icu::Locale::getUS(), error_code);
+  }
+  g_icu_text_direction = UNKNOWN_DIRECTION;
+}
+
+bool IsRTL() {
+  return ICUIsRTL();
+}
+
+bool ICUIsRTL() {
+  if (g_icu_text_direction == UNKNOWN_DIRECTION) {
+    const icu::Locale& locale = icu::Locale::getDefault();
+    g_icu_text_direction = GetTextDirectionForLocaleInStartUp(locale.getName());
+  }
+  return g_icu_text_direction == RIGHT_TO_LEFT;
+}
+
+TextDirection GetForcedTextDirection() {
+// On iOS, check for RTL forcing.
+#if defined(OS_IOS)
+  if (base::ios::IsInForcedRTL())
+    return base::i18n::RIGHT_TO_LEFT;
+#endif
+
+  base::CommandLine* command_line = base::CommandLine::ForCurrentProcess();
+  if (command_line->HasSwitch(switches::kForceUIDirection)) {
+    std::string force_flag =
+        command_line->GetSwitchValueASCII(switches::kForceUIDirection);
+
+    if (force_flag == switches::kForceDirectionLTR)
+      return base::i18n::LEFT_TO_RIGHT;
+
+    if (force_flag == switches::kForceDirectionRTL)
+      return base::i18n::RIGHT_TO_LEFT;
+  }
+
+  return base::i18n::UNKNOWN_DIRECTION;
+}
+
+TextDirection GetTextDirectionForLocaleInStartUp(const char* locale_name) {
+  // Check for direction forcing.
+  TextDirection forced_direction = GetForcedTextDirection();
+  if (forced_direction != UNKNOWN_DIRECTION)
+    return forced_direction;
+
+  // This list needs to be updated in alphabetical order if we add more RTL
+  // locales.
+  static const char kRTLLanguageCodes[][3] = {"ar", "fa", "he", "iw", "ur"};
+  std::vector<StringPiece> locale_split =
+      SplitStringPiece(locale_name, "-_", KEEP_WHITESPACE, SPLIT_WANT_ALL);
+  const StringPiece& language_code = locale_split[0];
+  if (std::binary_search(kRTLLanguageCodes,
+                         kRTLLanguageCodes + arraysize(kRTLLanguageCodes),
+                         language_code))
+    return RIGHT_TO_LEFT;
+  return LEFT_TO_RIGHT;
+}
+
+TextDirection GetTextDirectionForLocale(const char* locale_name) {
+  // Check for direction forcing.
+  TextDirection forced_direction = GetForcedTextDirection();
+  if (forced_direction != UNKNOWN_DIRECTION)
+    return forced_direction;
+
+  UErrorCode status = U_ZERO_ERROR;
+  ULayoutType layout_dir = uloc_getCharacterOrientation(locale_name, &status);
+  DCHECK(U_SUCCESS(status));
+  // Treat anything other than RTL as LTR.
+  return (layout_dir != ULOC_LAYOUT_RTL) ? LEFT_TO_RIGHT : RIGHT_TO_LEFT;
+}
+
+TextDirection GetFirstStrongCharacterDirection(const string16& text) {
+  const UChar* string = text.c_str();
+  size_t length = text.length();
+  size_t position = 0;
+  while (position < length) {
+    UChar32 character;
+    size_t next_position = position;
+    U16_NEXT(string, next_position, length, character);
+    TextDirection direction = GetCharacterDirection(character);
+    if (direction != UNKNOWN_DIRECTION)
+      return direction;
+    position = next_position;
+  }
+  return LEFT_TO_RIGHT;
+}
+
+TextDirection GetLastStrongCharacterDirection(const string16& text) {
+  const UChar* string = text.c_str();
+  size_t position = text.length();
+  while (position > 0) {
+    UChar32 character;
+    size_t prev_position = position;
+    U16_PREV(string, 0, prev_position, character);
+    TextDirection direction = GetCharacterDirection(character);
+    if (direction != UNKNOWN_DIRECTION)
+      return direction;
+    position = prev_position;
+  }
+  return LEFT_TO_RIGHT;
+}
+
+TextDirection GetStringDirection(const string16& text) {
+  const UChar* string = text.c_str();
+  size_t length = text.length();
+  size_t position = 0;
+
+  TextDirection result(UNKNOWN_DIRECTION);
+  while (position < length) {
+    UChar32 character;
+    size_t next_position = position;
+    U16_NEXT(string, next_position, length, character);
+    TextDirection direction = GetCharacterDirection(character);
+    if (direction != UNKNOWN_DIRECTION) {
+      if (result != UNKNOWN_DIRECTION && result != direction)
+        return UNKNOWN_DIRECTION;
+      result = direction;
+    }
+    position = next_position;
+  }
+
+  // Handle the case of a string not containing any strong directionality
+  // characters defaulting to LEFT_TO_RIGHT.
+  if (result == UNKNOWN_DIRECTION)
+    return LEFT_TO_RIGHT;
+
+  return result;
+}
+
+#if defined(OS_WIN)
+bool AdjustStringForLocaleDirection(string16* text) {
+  if (!IsRTL() || text->empty())
+    return false;
+
+  // Marking the string as LTR if the locale is RTL and the string does not
+  // contain strong RTL characters. Otherwise, mark the string as RTL.
+  bool has_rtl_chars = StringContainsStrongRTLChars(*text);
+  if (!has_rtl_chars)
+    WrapStringWithLTRFormatting(text);
+  else
+    WrapStringWithRTLFormatting(text);
+
+  return true;
+}
+
+bool UnadjustStringForLocaleDirection(string16* text) {
+  if (!IsRTL() || text->empty())
+    return false;
+
+  *text = StripWrappingBidiControlCharacters(*text);
+  return true;
+}
+#else
+bool AdjustStringForLocaleDirection(string16* text) {
+  // On OS X & GTK the directionality of a label is determined by the first
+  // strongly directional character.
+  // However, we want to make sure that in an LTR-language-UI all strings are
+  // left aligned and vice versa.
+  // A problem can arise if we display a string which starts with user input.
+  // User input may be of the opposite directionality to the UI. So the whole
+  // string will be displayed in the opposite directionality, e.g. if we want to
+  // display in an LTR UI [such as US English]:
+  //
+  // EMAN_NOISNETXE is now installed.
+  //
+  // Since EXTENSION_NAME begins with a strong RTL char, the label's
+  // directionality will be set to RTL and the string will be displayed visually
+  // as:
+  //
+  // .is now installed EMAN_NOISNETXE
+  //
+  // In order to solve this issue, we prepend an LRM to the string. An LRM is a
+  // strongly directional LTR char.
+  // We also append an LRM at the end, which ensures that we're in an LTR
+  // context.
+
+  // Unlike Windows, Linux and OS X can correctly display RTL glyphs out of the
+  // box so there is no issue with displaying zero-width bidi control characters
+  // on any system.  Thus no need for the !IsRTL() check here.
+  if (text->empty())
+    return false;
+
+  bool ui_direction_is_rtl = IsRTL();
+
+  bool has_rtl_chars = StringContainsStrongRTLChars(*text);
+  if (!ui_direction_is_rtl && has_rtl_chars) {
+    WrapStringWithRTLFormatting(text);
+    text->insert(static_cast<size_t>(0), static_cast<size_t>(1),
+                 kLeftToRightMark);
+    text->push_back(kLeftToRightMark);
+  } else if (ui_direction_is_rtl && has_rtl_chars) {
+    WrapStringWithRTLFormatting(text);
+    text->insert(static_cast<size_t>(0), static_cast<size_t>(1),
+                 kRightToLeftMark);
+    text->push_back(kRightToLeftMark);
+  } else if (ui_direction_is_rtl) {
+    WrapStringWithLTRFormatting(text);
+    text->insert(static_cast<size_t>(0), static_cast<size_t>(1),
+                 kRightToLeftMark);
+    text->push_back(kRightToLeftMark);
+  } else {
+    return false;
+  }
+
+  return true;
+}
+
+bool UnadjustStringForLocaleDirection(string16* text) {
+  if (text->empty())
+    return false;
+
+  size_t begin_index = 0;
+  char16 begin = text->at(begin_index);
+  if (begin == kLeftToRightMark ||
+      begin == kRightToLeftMark) {
+    ++begin_index;
+  }
+
+  size_t end_index = text->length() - 1;
+  char16 end = text->at(end_index);
+  if (end == kLeftToRightMark ||
+      end == kRightToLeftMark) {
+    --end_index;
+  }
+
+  string16 unmarked_text =
+      text->substr(begin_index, end_index - begin_index + 1);
+  *text = StripWrappingBidiControlCharacters(unmarked_text);
+  return true;
+}
+
+#endif  // !OS_WIN
+
+void EnsureTerminatedDirectionalFormatting(string16* text) {
+  int count = 0;
+  for (auto c : *text) {
+    if (c == kLeftToRightEmbeddingMark || c == kRightToLeftEmbeddingMark ||
+        c == kLeftToRightOverride || c == kRightToLeftOverride) {
+      ++count;
+    } else if (c == kPopDirectionalFormatting && count > 0) {
+      --count;
+    }
+  }
+  for (int j = 0; j < count; j++)
+    text->push_back(kPopDirectionalFormatting);
+}
+
+void SanitizeUserSuppliedString(string16* text) {
+  EnsureTerminatedDirectionalFormatting(text);
+  AdjustStringForLocaleDirection(text);
+}
+
+bool StringContainsStrongRTLChars(const string16& text) {
+  const UChar* string = text.c_str();
+  size_t length = text.length();
+  size_t position = 0;
+  while (position < length) {
+    UChar32 character;
+    size_t next_position = position;
+    U16_NEXT(string, next_position, length, character);
+
+    // Now that we have the character, we use ICU in order to query for the
+    // appropriate Unicode BiDi character type.
+    int32_t property = u_getIntPropertyValue(character, UCHAR_BIDI_CLASS);
+    if ((property == U_RIGHT_TO_LEFT) || (property == U_RIGHT_TO_LEFT_ARABIC))
+      return true;
+
+    position = next_position;
+  }
+
+  return false;
+}
+
+void WrapStringWithLTRFormatting(string16* text) {
+  if (text->empty())
+    return;
+
+  // Inserting an LRE (Left-To-Right Embedding) mark as the first character.
+  text->insert(static_cast<size_t>(0), static_cast<size_t>(1),
+               kLeftToRightEmbeddingMark);
+
+  // Inserting a PDF (Pop Directional Formatting) mark as the last character.
+  text->push_back(kPopDirectionalFormatting);
+}
+
+void WrapStringWithRTLFormatting(string16* text) {
+  if (text->empty())
+    return;
+
+  // Inserting an RLE (Right-To-Left Embedding) mark as the first character.
+  text->insert(static_cast<size_t>(0), static_cast<size_t>(1),
+               kRightToLeftEmbeddingMark);
+
+  // Inserting a PDF (Pop Directional Formatting) mark as the last character.
+  text->push_back(kPopDirectionalFormatting);
+}
+
+void WrapPathWithLTRFormatting(const FilePath& path,
+                               string16* rtl_safe_path) {
+  // Wrap the overall path with LRE-PDF pair which essentialy marks the
+  // string as a Left-To-Right string.
+  // Inserting an LRE (Left-To-Right Embedding) mark as the first character.
+  rtl_safe_path->push_back(kLeftToRightEmbeddingMark);
+#if defined(OS_MACOSX)
+    rtl_safe_path->append(UTF8ToUTF16(path.value()));
+#elif defined(OS_WIN)
+    rtl_safe_path->append(path.value());
+#else  // defined(OS_POSIX) && !defined(OS_MACOSX)
+    std::wstring wide_path = base::SysNativeMBToWide(path.value());
+    rtl_safe_path->append(WideToUTF16(wide_path));
+#endif
+  // Inserting a PDF (Pop Directional Formatting) mark as the last character.
+  rtl_safe_path->push_back(kPopDirectionalFormatting);
+}
+
+string16 GetDisplayStringInLTRDirectionality(const string16& text) {
+  // Always wrap the string in RTL UI (it may be appended to RTL string).
+  // Also wrap strings with an RTL first strong character direction in LTR UI.
+  if (IsRTL() || GetFirstStrongCharacterDirection(text) == RIGHT_TO_LEFT) {
+    string16 text_mutable(text);
+    WrapStringWithLTRFormatting(&text_mutable);
+    return text_mutable;
+  }
+  return text;
+}
+
+string16 StripWrappingBidiControlCharacters(const string16& text) {
+  if (text.empty())
+    return text;
+  size_t begin_index = 0;
+  char16 begin = text[begin_index];
+  if (begin == kLeftToRightEmbeddingMark ||
+      begin == kRightToLeftEmbeddingMark ||
+      begin == kLeftToRightOverride ||
+      begin == kRightToLeftOverride)
+    ++begin_index;
+  size_t end_index = text.length() - 1;
+  if (text[end_index] == kPopDirectionalFormatting)
+    --end_index;
+  return text.substr(begin_index, end_index - begin_index + 1);
+}
+
+}  // namespace i18n
+}  // namespace base
diff --git a/base/i18n/rtl.h b/base/i18n/rtl.h
new file mode 100644
index 0000000..5325970
--- /dev/null
+++ b/base/i18n/rtl.h
@@ -0,0 +1,168 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_I18N_RTL_H_
+#define BASE_I18N_RTL_H_
+
+#include <string>
+
+#include "base/compiler_specific.h"
+#include "base/i18n/base_i18n_export.h"
+#include "base/strings/string16.h"
+#include "build/build_config.h"
+
+namespace base {
+
+class FilePath;
+
+namespace i18n {
+
+const char16 kRightToLeftMark = 0x200F;
+const char16 kLeftToRightMark = 0x200E;
+const char16 kLeftToRightEmbeddingMark = 0x202A;
+const char16 kRightToLeftEmbeddingMark = 0x202B;
+const char16 kPopDirectionalFormatting = 0x202C;
+const char16 kLeftToRightOverride = 0x202D;
+const char16 kRightToLeftOverride = 0x202E;
+
+// Locale.java mirrored this enum TextDirection. Please keep in sync.
+enum TextDirection {
+  UNKNOWN_DIRECTION = 0,
+  RIGHT_TO_LEFT = 1,
+  LEFT_TO_RIGHT = 2,
+  TEXT_DIRECTION_MAX = LEFT_TO_RIGHT,
+};
+
+// Get the locale that the currently running process has been configured to use.
+// The return value is of the form language[-country] (e.g., en-US) where the
+// language is the 2 or 3 letter code from ISO-639.
+BASE_I18N_EXPORT std::string GetConfiguredLocale();
+
+// Canonicalize a string (eg. a POSIX locale string) to a Chrome locale name.
+BASE_I18N_EXPORT std::string GetCanonicalLocale(const std::string& locale);
+
+// Sets the default locale of ICU.
+// Once the application locale of Chrome in GetApplicationLocale is determined,
+// the default locale of ICU need to be changed to match the application locale
+// so that ICU functions work correctly in a locale-dependent manner.
+// This is handy in that we don't have to call GetApplicationLocale()
+// everytime we call locale-dependent ICU APIs as long as we make sure
+// that this is called before any locale-dependent API is called.
+BASE_I18N_EXPORT void SetICUDefaultLocale(const std::string& locale_string);
+
+// Returns true if the application text direction is right-to-left.
+BASE_I18N_EXPORT bool IsRTL();
+
+// Returns whether the text direction for the default ICU locale is RTL.  This
+// assumes that SetICUDefaultLocale has been called to set the default locale to
+// the UI locale of Chrome.
+// NOTE: Generally, you should call IsRTL() instead of this.
+BASE_I18N_EXPORT bool ICUIsRTL();
+
+// Gets the explicitly forced text direction for debugging. If no forcing is
+// applied, returns UNKNOWN_DIRECTION.
+BASE_I18N_EXPORT TextDirection GetForcedTextDirection();
+
+// Returns the text direction for |locale_name|.
+// As a startup optimization, this method checks the locale against a list of
+// Chrome-supported RTL locales.
+BASE_I18N_EXPORT TextDirection
+GetTextDirectionForLocaleInStartUp(const char* locale_name);
+
+// Returns the text direction for |locale_name|.
+BASE_I18N_EXPORT TextDirection GetTextDirectionForLocale(
+    const char* locale_name);
+
+// Given the string in |text|, returns the directionality of the first or last
+// character with strong directionality in the string. If no character in the
+// text has strong directionality, LEFT_TO_RIGHT is returned. The Bidi
+// character types L, LRE, LRO, R, AL, RLE, and RLO are considered as strong
+// directionality characters. Please refer to http://unicode.org/reports/tr9/
+// for more information.
+BASE_I18N_EXPORT TextDirection GetFirstStrongCharacterDirection(
+    const string16& text);
+BASE_I18N_EXPORT TextDirection GetLastStrongCharacterDirection(
+    const string16& text);
+
+// Given the string in |text|, returns LEFT_TO_RIGHT or RIGHT_TO_LEFT if all the
+// strong directionality characters in the string are of the same
+// directionality. It returns UNKNOWN_DIRECTION if the string contains a mix of
+// LTR and RTL strong directionality characters. Defaults to LEFT_TO_RIGHT if
+// the string does not contain directionality characters. Please refer to
+// http://unicode.org/reports/tr9/ for more information.
+BASE_I18N_EXPORT TextDirection GetStringDirection(const string16& text);
+
+// Given the string in |text|, this function modifies the string in place with
+// the appropriate Unicode formatting marks that mark the string direction
+// (either left-to-right or right-to-left). The function checks both the current
+// locale and the contents of the string in order to determine the direction of
+// the returned string. The function returns true if the string in |text| was
+// properly adjusted.
+//
+// Certain LTR strings are not rendered correctly when the context is RTL. For
+// example, the string "Foo!" will appear as "!Foo" if it is rendered as is in
+// an RTL context. Calling this function will make sure the returned localized
+// string is always treated as a right-to-left string. This is done by
+// inserting certain Unicode formatting marks into the returned string.
+//
+// ** Notes about the Windows version of this function:
+// TODO(idana) bug 6806: this function adjusts the string in question only
+// if the current locale is right-to-left. The function does not take care of
+// the opposite case (an RTL string displayed in an LTR context) since
+// adjusting the string involves inserting Unicode formatting characters that
+// Windows does not handle well unless right-to-left language support is
+// installed. Since the English version of Windows doesn't have right-to-left
+// language support installed by default, inserting the direction Unicode mark
+// results in Windows displaying squares.
+BASE_I18N_EXPORT bool AdjustStringForLocaleDirection(string16* text);
+
+// Undoes the actions of the above function (AdjustStringForLocaleDirection).
+BASE_I18N_EXPORT bool UnadjustStringForLocaleDirection(string16* text);
+
+// Ensures |text| contains no unterminated directional formatting characters, by
+// appending the appropriate pop-directional-formatting characters to the end of
+// |text|.
+BASE_I18N_EXPORT void EnsureTerminatedDirectionalFormatting(string16* text);
+
+// Sanitizes the |text| by terminating any directional override/embedding
+// characters and then adjusting the string for locale direction.
+BASE_I18N_EXPORT void SanitizeUserSuppliedString(string16* text);
+
+// Returns true if the string contains at least one character with strong right
+// to left directionality; that is, a character with either R or AL Unicode
+// BiDi character type.
+BASE_I18N_EXPORT bool StringContainsStrongRTLChars(const string16& text);
+
+// Wraps a string with an LRE-PDF pair which essentialy marks the string as a
+// Left-To-Right string. Doing this is useful in order to make sure LTR
+// strings are rendered properly in an RTL context.
+BASE_I18N_EXPORT void WrapStringWithLTRFormatting(string16* text);
+
+// Wraps a string with an RLE-PDF pair which essentialy marks the string as a
+// Right-To-Left string. Doing this is useful in order to make sure RTL
+// strings are rendered properly in an LTR context.
+BASE_I18N_EXPORT void WrapStringWithRTLFormatting(string16* text);
+
+// Wraps file path to get it to display correctly in RTL UI. All filepaths
+// should be passed through this function before display in UI for RTL locales.
+BASE_I18N_EXPORT void WrapPathWithLTRFormatting(const FilePath& path,
+                                                string16* rtl_safe_path);
+
+// Return the string in |text| wrapped with LRE (Left-To-Right Embedding) and
+// PDF (Pop Directional Formatting) marks, if needed for UI display purposes.
+BASE_I18N_EXPORT string16 GetDisplayStringInLTRDirectionality(
+    const string16& text) WARN_UNUSED_RESULT;
+
+// Strip the beginning (U+202A..U+202B, U+202D..U+202E) and/or ending (U+202C)
+// explicit bidi control characters from |text|, if there are any. Otherwise,
+// return the text itself. Explicit bidi control characters display and have
+// semantic effect. They can be deleted so they might not always appear in a
+// pair.
+BASE_I18N_EXPORT string16 StripWrappingBidiControlCharacters(
+    const string16& text) WARN_UNUSED_RESULT;
+
+}  // namespace i18n
+}  // namespace base
+
+#endif  // BASE_I18N_RTL_H_
diff --git a/base/i18n/rtl_unittest.cc b/base/i18n/rtl_unittest.cc
new file mode 100644
index 0000000..313d2b4
--- /dev/null
+++ b/base/i18n/rtl_unittest.cc
@@ -0,0 +1,567 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/i18n/rtl.h"
+
+#include <stddef.h>
+
+#include <algorithm>
+
+#include "base/files/file_path.h"
+#include "base/macros.h"
+#include "base/strings/string_util.h"
+#include "base/strings/sys_string_conversions.h"
+#include "base/strings/utf_string_conversions.h"
+#include "base/test/icu_test_util.h"
+#include "build/build_config.h"
+#include "testing/gtest/include/gtest/gtest.h"
+#include "testing/platform_test.h"
+#include "third_party/icu/source/common/unicode/locid.h"
+#include "third_party/icu/source/i18n/unicode/usearch.h"
+
+namespace base {
+namespace i18n {
+
+namespace {
+
+// A test utility function to set the application default text direction.
+void SetRTL(bool rtl) {
+  // Override the current locale/direction.
+  SetICUDefaultLocale(rtl ? "he" : "en");
+  EXPECT_EQ(rtl, IsRTL());
+}
+
+}  // namespace
+
+class RTLTest : public PlatformTest {
+};
+
+TEST_F(RTLTest, GetFirstStrongCharacterDirection) {
+  struct {
+    const wchar_t* text;
+    TextDirection direction;
+  } cases[] = {
+    // Test pure LTR string.
+    { L"foo bar", LEFT_TO_RIGHT },
+    // Test pure RTL string.
+    { L"\x05d0\x05d1\x05d2 \x05d3\x0d4\x05d5", RIGHT_TO_LEFT},
+    // Test bidi string in which the first character with strong directionality
+    // is a character with type L.
+    { L"foo \x05d0 bar", LEFT_TO_RIGHT },
+    // Test bidi string in which the first character with strong directionality
+    // is a character with type R.
+    { L"\x05d0 foo bar", RIGHT_TO_LEFT },
+    // Test bidi string which starts with a character with weak directionality
+    // and in which the first character with strong directionality is a
+    // character with type L.
+    { L"!foo \x05d0 bar", LEFT_TO_RIGHT },
+    // Test bidi string which starts with a character with weak directionality
+    // and in which the first character with strong directionality is a
+    // character with type R.
+    { L",\x05d0 foo bar", RIGHT_TO_LEFT },
+    // Test bidi string in which the first character with strong directionality
+    // is a character with type LRE.
+    { L"\x202a \x05d0 foo  bar", LEFT_TO_RIGHT },
+    // Test bidi string in which the first character with strong directionality
+    // is a character with type LRO.
+    { L"\x202d \x05d0 foo  bar", LEFT_TO_RIGHT },
+    // Test bidi string in which the first character with strong directionality
+    // is a character with type RLE.
+    { L"\x202b foo \x05d0 bar", RIGHT_TO_LEFT },
+    // Test bidi string in which the first character with strong directionality
+    // is a character with type RLO.
+    { L"\x202e foo \x05d0 bar", RIGHT_TO_LEFT },
+    // Test bidi string in which the first character with strong directionality
+    // is a character with type AL.
+    { L"\x0622 foo \x05d0 bar", RIGHT_TO_LEFT },
+    // Test a string without strong directionality characters.
+    { L",!.{}", LEFT_TO_RIGHT },
+    // Test empty string.
+    { L"", LEFT_TO_RIGHT },
+    // Test characters in non-BMP (e.g. Phoenician letters. Please refer to
+    // http://demo.icu-project.org/icu-bin/ubrowse?scr=151&b=10910 for more
+    // information).
+    {
+#if defined(WCHAR_T_IS_UTF32)
+      L" ! \x10910" L"abc 123",
+#elif defined(WCHAR_T_IS_UTF16)
+      L" ! \xd802\xdd10" L"abc 123",
+#else
+#error wchar_t should be either UTF-16 or UTF-32
+#endif
+      RIGHT_TO_LEFT },
+    {
+#if defined(WCHAR_T_IS_UTF32)
+      L" ! \x10401" L"abc 123",
+#elif defined(WCHAR_T_IS_UTF16)
+      L" ! \xd801\xdc01" L"abc 123",
+#else
+#error wchar_t should be either UTF-16 or UTF-32
+#endif
+      LEFT_TO_RIGHT },
+   };
+
+  for (size_t i = 0; i < arraysize(cases); ++i)
+    EXPECT_EQ(cases[i].direction,
+              GetFirstStrongCharacterDirection(WideToUTF16(cases[i].text)));
+}
+
+
+// Note that the cases with LRE, LRO, RLE and RLO are invalid for
+// GetLastStrongCharacterDirection because they should be followed by PDF
+// character.
+TEST_F(RTLTest, GetLastStrongCharacterDirection) {
+  struct {
+    const wchar_t* text;
+    TextDirection direction;
+  } cases[] = {
+    // Test pure LTR string.
+    { L"foo bar", LEFT_TO_RIGHT },
+    // Test pure RTL string.
+    { L"\x05d0\x05d1\x05d2 \x05d3\x0d4\x05d5", RIGHT_TO_LEFT},
+    // Test bidi string in which the last character with strong directionality
+    // is a character with type L.
+    { L"foo \x05d0 bar", LEFT_TO_RIGHT },
+    // Test bidi string in which the last character with strong directionality
+    // is a character with type R.
+    { L"\x05d0 foo bar \x05d3", RIGHT_TO_LEFT },
+    // Test bidi string which ends with a character with weak directionality
+    // and in which the last character with strong directionality is a
+    // character with type L.
+    { L"!foo \x05d0 bar!", LEFT_TO_RIGHT },
+    // Test bidi string which ends with a character with weak directionality
+    // and in which the last character with strong directionality is a
+    // character with type R.
+    { L",\x05d0 foo bar \x05d1,", RIGHT_TO_LEFT },
+    // Test bidi string in which the last character with strong directionality
+    // is a character with type AL.
+    { L"\x0622 foo \x05d0 bar \x0622", RIGHT_TO_LEFT },
+    // Test a string without strong directionality characters.
+    { L",!.{}", LEFT_TO_RIGHT },
+    // Test empty string.
+    { L"", LEFT_TO_RIGHT },
+    // Test characters in non-BMP (e.g. Phoenician letters. Please refer to
+    // http://demo.icu-project.org/icu-bin/ubrowse?scr=151&b=10910 for more
+    // information).
+    {
+#if defined(WCHAR_T_IS_UTF32)
+       L"abc 123" L" ! \x10910 !",
+#elif defined(WCHAR_T_IS_UTF16)
+       L"abc 123" L" ! \xd802\xdd10 !",
+#else
+#error wchar_t should be either UTF-16 or UTF-32
+#endif
+      RIGHT_TO_LEFT },
+    {
+#if defined(WCHAR_T_IS_UTF32)
+       L"abc 123" L" ! \x10401 !",
+#elif defined(WCHAR_T_IS_UTF16)
+       L"abc 123" L" ! \xd801\xdc01 !",
+#else
+#error wchar_t should be either UTF-16 or UTF-32
+#endif
+      LEFT_TO_RIGHT },
+   };
+
+  for (size_t i = 0; i < arraysize(cases); ++i)
+    EXPECT_EQ(cases[i].direction,
+              GetLastStrongCharacterDirection(WideToUTF16(cases[i].text)));
+}
+
+TEST_F(RTLTest, GetStringDirection) {
+  struct {
+    const wchar_t* text;
+    TextDirection direction;
+  } cases[] = {
+    // Test pure LTR string.
+    { L"foobar", LEFT_TO_RIGHT },
+    { L".foobar", LEFT_TO_RIGHT },
+    { L"foo, bar", LEFT_TO_RIGHT },
+    // Test pure LTR with strong directionality characters of type LRE.
+    { L"\x202a\x202a", LEFT_TO_RIGHT },
+    { L".\x202a\x202a", LEFT_TO_RIGHT },
+    { L"\x202a, \x202a", LEFT_TO_RIGHT },
+    // Test pure LTR with strong directionality characters of type LRO.
+    { L"\x202d\x202d", LEFT_TO_RIGHT },
+    { L".\x202d\x202d", LEFT_TO_RIGHT },
+    { L"\x202d, \x202d", LEFT_TO_RIGHT },
+    // Test pure LTR with various types of strong directionality characters.
+    { L"foo \x202a\x202d", LEFT_TO_RIGHT },
+    { L".\x202d foo \x202a", LEFT_TO_RIGHT },
+    { L"\x202a, \x202d foo", LEFT_TO_RIGHT },
+    // Test pure RTL with strong directionality characters of type R.
+    { L"\x05d0\x05d0", RIGHT_TO_LEFT },
+    { L".\x05d0\x05d0", RIGHT_TO_LEFT },
+    { L"\x05d0, \x05d0", RIGHT_TO_LEFT },
+    // Test pure RTL with strong directionality characters of type RLE.
+    { L"\x202b\x202b", RIGHT_TO_LEFT },
+    { L".\x202b\x202b", RIGHT_TO_LEFT },
+    { L"\x202b, \x202b", RIGHT_TO_LEFT },
+    // Test pure RTL with strong directionality characters of type RLO.
+    { L"\x202e\x202e", RIGHT_TO_LEFT },
+    { L".\x202e\x202e", RIGHT_TO_LEFT },
+    { L"\x202e, \x202e", RIGHT_TO_LEFT },
+    // Test pure RTL with strong directionality characters of type AL.
+    { L"\x0622\x0622", RIGHT_TO_LEFT },
+    { L".\x0622\x0622", RIGHT_TO_LEFT },
+    { L"\x0622, \x0622", RIGHT_TO_LEFT },
+    // Test pure RTL with various types of strong directionality characters.
+    { L"\x05d0\x202b\x202e\x0622", RIGHT_TO_LEFT },
+    { L".\x202b\x202e\x0622\x05d0", RIGHT_TO_LEFT },
+    { L"\x0622\x202e, \x202b\x05d0", RIGHT_TO_LEFT },
+    // Test bidi strings.
+    { L"foo \x05d0 bar", UNKNOWN_DIRECTION },
+    { L"\x202b foo bar", UNKNOWN_DIRECTION },
+    { L"!foo \x0622 bar", UNKNOWN_DIRECTION },
+    { L"\x202a\x202b", UNKNOWN_DIRECTION },
+    { L"\x202e\x202d", UNKNOWN_DIRECTION },
+    { L"\x0622\x202a", UNKNOWN_DIRECTION },
+    { L"\x202d\x05d0", UNKNOWN_DIRECTION },
+    // Test a string without strong directionality characters.
+    { L",!.{}", LEFT_TO_RIGHT },
+    // Test empty string.
+    { L"", LEFT_TO_RIGHT },
+    {
+#if defined(WCHAR_T_IS_UTF32)
+      L" ! \x10910" L"abc 123",
+#elif defined(WCHAR_T_IS_UTF16)
+      L" ! \xd802\xdd10" L"abc 123",
+#else
+#error wchar_t should be either UTF-16 or UTF-32
+#endif
+      UNKNOWN_DIRECTION },
+    {
+#if defined(WCHAR_T_IS_UTF32)
+      L" ! \x10401" L"abc 123",
+#elif defined(WCHAR_T_IS_UTF16)
+      L" ! \xd801\xdc01" L"abc 123",
+#else
+#error wchar_t should be either UTF-16 or UTF-32
+#endif
+      LEFT_TO_RIGHT },
+   };
+
+  for (size_t i = 0; i < arraysize(cases); ++i)
+    EXPECT_EQ(cases[i].direction,
+              GetStringDirection(WideToUTF16(cases[i].text)));
+}
+
+TEST_F(RTLTest, WrapPathWithLTRFormatting) {
+  const wchar_t* cases[] = {
+    // Test common path, such as "c:\foo\bar".
+    L"c:/foo/bar",
+    // Test path with file name, such as "c:\foo\bar\test.jpg".
+    L"c:/foo/bar/test.jpg",
+    // Test path ending with punctuation, such as "c:\(foo)\bar.".
+    L"c:/(foo)/bar.",
+    // Test path ending with separator, such as "c:\foo\bar\".
+    L"c:/foo/bar/",
+    // Test path with RTL character.
+    L"c:/\x05d0",
+    // Test path with 2 level RTL directory names.
+    L"c:/\x05d0/\x0622",
+    // Test path with mixed RTL/LTR directory names and ending with punctuation.
+    L"c:/\x05d0/\x0622/(foo)/b.a.r.",
+    // Test path without driver name, such as "/foo/bar/test/jpg".
+    L"/foo/bar/test.jpg",
+    // Test path start with current directory, such as "./foo".
+    L"./foo",
+    // Test path start with parent directory, such as "../foo/bar.jpg".
+    L"../foo/bar.jpg",
+    // Test absolute path, such as "//foo/bar.jpg".
+    L"//foo/bar.jpg",
+    // Test path with mixed RTL/LTR directory names.
+    L"c:/foo/\x05d0/\x0622/\x05d1.jpg",
+    // Test empty path.
+    L""
+  };
+
+  for (size_t i = 0; i < arraysize(cases); ++i) {
+    FilePath path;
+#if defined(OS_WIN)
+    std::wstring win_path(cases[i]);
+    std::replace(win_path.begin(), win_path.end(), '/', '\\');
+    path = FilePath(win_path);
+    std::wstring wrapped_expected =
+        std::wstring(L"\x202a") + win_path + L"\x202c";
+#else
+    path = FilePath(base::SysWideToNativeMB(cases[i]));
+    std::wstring wrapped_expected =
+        std::wstring(L"\x202a") + cases[i] + L"\x202c";
+#endif
+    string16 localized_file_path_string;
+    WrapPathWithLTRFormatting(path, &localized_file_path_string);
+
+    std::wstring wrapped_actual = UTF16ToWide(localized_file_path_string);
+    EXPECT_EQ(wrapped_expected, wrapped_actual);
+  }
+}
+
+TEST_F(RTLTest, WrapString) {
+  const wchar_t* cases[] = {
+    L" . ",
+    L"abc",
+    L"a" L"\x5d0\x5d1",
+    L"a" L"\x5d1" L"b",
+    L"\x5d0\x5d1\x5d2",
+    L"\x5d0\x5d1" L"a",
+    L"\x5d0" L"a" L"\x5d1",
+  };
+
+  const bool was_rtl = IsRTL();
+
+  test::ScopedRestoreICUDefaultLocale restore_locale;
+  for (size_t i = 0; i < 2; ++i) {
+    // Toggle the application default text direction (to try each direction).
+    SetRTL(!IsRTL());
+
+    string16 empty;
+    WrapStringWithLTRFormatting(&empty);
+    EXPECT_TRUE(empty.empty());
+    WrapStringWithRTLFormatting(&empty);
+    EXPECT_TRUE(empty.empty());
+
+    for (size_t i = 0; i < arraysize(cases); ++i) {
+      string16 input = WideToUTF16(cases[i]);
+      string16 ltr_wrap = input;
+      WrapStringWithLTRFormatting(&ltr_wrap);
+      EXPECT_EQ(ltr_wrap[0], kLeftToRightEmbeddingMark);
+      EXPECT_EQ(ltr_wrap.substr(1, ltr_wrap.length() - 2), input);
+      EXPECT_EQ(ltr_wrap[ltr_wrap.length() -1], kPopDirectionalFormatting);
+
+      string16 rtl_wrap = input;
+      WrapStringWithRTLFormatting(&rtl_wrap);
+      EXPECT_EQ(rtl_wrap[0], kRightToLeftEmbeddingMark);
+      EXPECT_EQ(rtl_wrap.substr(1, rtl_wrap.length() - 2), input);
+      EXPECT_EQ(rtl_wrap[rtl_wrap.length() -1], kPopDirectionalFormatting);
+    }
+  }
+
+  EXPECT_EQ(was_rtl, IsRTL());
+}
+
+TEST_F(RTLTest, GetDisplayStringInLTRDirectionality) {
+  struct {
+    const wchar_t* path;
+    bool wrap_ltr;
+    bool wrap_rtl;
+  } cases[] = {
+    { L"test",                   false, true },
+    { L"test.html",              false, true },
+    { L"\x05d0\x05d1\x05d2",     true,  true },
+    { L"\x05d0\x05d1\x05d2.txt", true,  true },
+    { L"\x05d0" L"abc",          true,  true },
+    { L"\x05d0" L"abc.txt",      true,  true },
+    { L"abc\x05d0\x05d1",        false, true },
+    { L"abc\x05d0\x05d1.jpg",    false, true },
+  };
+
+  const bool was_rtl = IsRTL();
+
+  test::ScopedRestoreICUDefaultLocale restore_locale;
+  for (size_t i = 0; i < 2; ++i) {
+    // Toggle the application default text direction (to try each direction).
+    SetRTL(!IsRTL());
+    for (size_t i = 0; i < arraysize(cases); ++i) {
+      string16 input = WideToUTF16(cases[i].path);
+      string16 output = GetDisplayStringInLTRDirectionality(input);
+      // Test the expected wrapping behavior for the current UI directionality.
+      if (IsRTL() ? cases[i].wrap_rtl : cases[i].wrap_ltr)
+        EXPECT_NE(output, input);
+      else
+        EXPECT_EQ(output, input);
+    }
+  }
+
+  EXPECT_EQ(was_rtl, IsRTL());
+}
+
+TEST_F(RTLTest, GetTextDirection) {
+  EXPECT_EQ(RIGHT_TO_LEFT, GetTextDirectionForLocale("ar"));
+  EXPECT_EQ(RIGHT_TO_LEFT, GetTextDirectionForLocale("ar_EG"));
+  EXPECT_EQ(RIGHT_TO_LEFT, GetTextDirectionForLocale("he"));
+  EXPECT_EQ(RIGHT_TO_LEFT, GetTextDirectionForLocale("he_IL"));
+  // iw is an obsolete code for Hebrew.
+  EXPECT_EQ(RIGHT_TO_LEFT, GetTextDirectionForLocale("iw"));
+  // Although we're not yet localized to Farsi and Urdu, we
+  // do have the text layout direction information for them.
+  EXPECT_EQ(RIGHT_TO_LEFT, GetTextDirectionForLocale("fa"));
+  EXPECT_EQ(RIGHT_TO_LEFT, GetTextDirectionForLocale("ur"));
+#if 0
+  // Enable these when we include the minimal locale data for Azerbaijani
+  // written in Arabic and Dhivehi. At the moment, our copy of
+  // ICU data does not have entries for them.
+  EXPECT_EQ(RIGHT_TO_LEFT, GetTextDirectionForLocale("az_Arab"));
+  // Dhivehi that uses Thaana script.
+  EXPECT_EQ(RIGHT_TO_LEFT, GetTextDirectionForLocale("dv"));
+#endif
+  EXPECT_EQ(LEFT_TO_RIGHT, GetTextDirectionForLocale("en"));
+  // Chinese in China with '-'.
+  EXPECT_EQ(LEFT_TO_RIGHT, GetTextDirectionForLocale("zh-CN"));
+  // Filipino : 3-letter code
+  EXPECT_EQ(LEFT_TO_RIGHT, GetTextDirectionForLocale("fil"));
+  // Russian
+  EXPECT_EQ(LEFT_TO_RIGHT, GetTextDirectionForLocale("ru"));
+  // Japanese that uses multiple scripts
+  EXPECT_EQ(LEFT_TO_RIGHT, GetTextDirectionForLocale("ja"));
+}
+
+TEST_F(RTLTest, GetTextDirectionForLocaleInStartUp) {
+  EXPECT_EQ(RIGHT_TO_LEFT, GetTextDirectionForLocaleInStartUp("ar"));
+  EXPECT_EQ(RIGHT_TO_LEFT, GetTextDirectionForLocaleInStartUp("ar_EG"));
+  EXPECT_EQ(RIGHT_TO_LEFT, GetTextDirectionForLocaleInStartUp("he"));
+  EXPECT_EQ(RIGHT_TO_LEFT, GetTextDirectionForLocaleInStartUp("he_IL"));
+  // iw is an obsolete code for Hebrew.
+  EXPECT_EQ(RIGHT_TO_LEFT, GetTextDirectionForLocaleInStartUp("iw"));
+  // Although we're not yet localized to Farsi and Urdu, we
+  // do have the text layout direction information for them.
+  EXPECT_EQ(RIGHT_TO_LEFT, GetTextDirectionForLocaleInStartUp("fa"));
+  EXPECT_EQ(RIGHT_TO_LEFT, GetTextDirectionForLocaleInStartUp("ur"));
+  EXPECT_EQ(LEFT_TO_RIGHT, GetTextDirectionForLocaleInStartUp("en"));
+  // Chinese in China with '-'.
+  EXPECT_EQ(LEFT_TO_RIGHT, GetTextDirectionForLocaleInStartUp("zh-CN"));
+  // Filipino : 3-letter code
+  EXPECT_EQ(LEFT_TO_RIGHT, GetTextDirectionForLocaleInStartUp("fil"));
+  // Russian
+  EXPECT_EQ(LEFT_TO_RIGHT, GetTextDirectionForLocaleInStartUp("ru"));
+  // Japanese that uses multiple scripts
+  EXPECT_EQ(LEFT_TO_RIGHT, GetTextDirectionForLocaleInStartUp("ja"));
+}
+
+TEST_F(RTLTest, UnadjustStringForLocaleDirection) {
+  // These test strings are borrowed from WrapPathWithLTRFormatting
+  const wchar_t* cases[] = {
+    L"foo bar",
+    L"foo \x05d0 bar",
+    L"\x05d0 foo bar",
+    L"!foo \x05d0 bar",
+    L",\x05d0 foo bar",
+    L"\x202a \x05d0 foo  bar",
+    L"\x202d \x05d0 foo  bar",
+    L"\x202b foo \x05d0 bar",
+    L"\x202e foo \x05d0 bar",
+    L"\x0622 foo \x05d0 bar",
+  };
+
+  const bool was_rtl = IsRTL();
+
+  test::ScopedRestoreICUDefaultLocale restore_locale;
+  for (size_t i = 0; i < 2; ++i) {
+    // Toggle the application default text direction (to try each direction).
+    SetRTL(!IsRTL());
+
+    for (size_t i = 0; i < arraysize(cases); ++i) {
+      string16 test_case = WideToUTF16(cases[i]);
+      string16 adjusted_string = test_case;
+
+      if (!AdjustStringForLocaleDirection(&adjusted_string))
+        continue;
+
+      EXPECT_NE(test_case, adjusted_string);
+      EXPECT_TRUE(UnadjustStringForLocaleDirection(&adjusted_string));
+      EXPECT_EQ(test_case, adjusted_string) << " for test case [" << test_case
+                                            << "] with IsRTL() == " << IsRTL();
+    }
+  }
+
+  EXPECT_EQ(was_rtl, IsRTL());
+}
+
+TEST_F(RTLTest, EnsureTerminatedDirectionalFormatting) {
+  struct {
+    const wchar_t* unformated_text;
+    const wchar_t* formatted_text;
+  } cases[] = {
+      // Tests string without any dir-formatting characters.
+      {L"google.com", L"google.com"},
+      // Tests string with properly terminated dir-formatting character.
+      {L"\x202egoogle.com\x202c", L"\x202egoogle.com\x202c"},
+      // Tests string with over-terminated dir-formatting characters.
+      {L"\x202egoogle\x202c.com\x202c", L"\x202egoogle\x202c.com\x202c"},
+      // Tests string beginning with a dir-formatting character.
+      {L"\x202emoc.elgoog", L"\x202emoc.elgoog\x202c"},
+      // Tests string that over-terminates then re-opens.
+      {L"\x202egoogle\x202c\x202c.\x202eom",
+       L"\x202egoogle\x202c\x202c.\x202eom\x202c"},
+      // Tests string containing a dir-formatting character in the middle.
+      {L"google\x202e.com", L"google\x202e.com\x202c"},
+      // Tests string with multiple dir-formatting characters.
+      {L"\x202egoogle\x202e.com/\x202eguest",
+       L"\x202egoogle\x202e.com/\x202eguest\x202c\x202c\x202c"},
+      // Test the other dir-formatting characters (U+202A, U+202B, and U+202D).
+      {L"\x202agoogle.com", L"\x202agoogle.com\x202c"},
+      {L"\x202bgoogle.com", L"\x202bgoogle.com\x202c"},
+      {L"\x202dgoogle.com", L"\x202dgoogle.com\x202c"},
+  };
+
+  const bool was_rtl = IsRTL();
+
+  test::ScopedRestoreICUDefaultLocale restore_locale;
+  for (size_t i = 0; i < 2; ++i) {
+    // Toggle the application default text direction (to try each direction).
+    SetRTL(!IsRTL());
+    for (size_t i = 0; i < arraysize(cases); ++i) {
+      string16 unsanitized_text = WideToUTF16(cases[i].unformated_text);
+      string16 sanitized_text = WideToUTF16(cases[i].formatted_text);
+      EnsureTerminatedDirectionalFormatting(&unsanitized_text);
+      EXPECT_EQ(sanitized_text, unsanitized_text);
+    }
+  }
+  EXPECT_EQ(was_rtl, IsRTL());
+}
+
+TEST_F(RTLTest, SanitizeUserSuppliedString) {
+  struct {
+    const wchar_t* unformatted_text;
+    const wchar_t* formatted_text;
+  } cases[] = {
+      // Tests RTL string with properly terminated dir-formatting character.
+      {L"\x202eكبير Google التطبيق\x202c", L"\x202eكبير Google التطبيق\x202c"},
+      // Tests RTL string with over-terminated dir-formatting characters.
+      {L"\x202eكبير Google\x202cالتطبيق\x202c",
+       L"\x202eكبير Google\x202cالتطبيق\x202c"},
+      // Tests RTL string that over-terminates then re-opens.
+      {L"\x202eكبير Google\x202c\x202cالتطبيق\x202e",
+       L"\x202eكبير Google\x202c\x202cالتطبيق\x202e\x202c"},
+      // Tests RTL string with multiple dir-formatting characters.
+      {L"\x202eك\x202eبير Google الت\x202eطبيق",
+       L"\x202eك\x202eبير Google الت\x202eطبيق\x202c\x202c\x202c"},
+      // Test the other dir-formatting characters (U+202A, U+202B, and U+202D).
+      {L"\x202aكبير Google التطبيق", L"\x202aكبير Google التطبيق\x202c"},
+      {L"\x202bكبير Google التطبيق", L"\x202bكبير Google التطبيق\x202c"},
+      {L"\x202dكبير Google التطبيق", L"\x202dكبير Google التطبيق\x202c"},
+
+  };
+
+  for (size_t i = 0; i < arraysize(cases); ++i) {
+    // On Windows for an LTR locale, no changes to the string are made.
+    string16 prefix, suffix = WideToUTF16(L"");
+#if !defined(OS_WIN)
+    prefix = WideToUTF16(L"\x200e\x202b");
+    suffix = WideToUTF16(L"\x202c\x200e");
+#endif  // !OS_WIN
+    string16 unsanitized_text = WideToUTF16(cases[i].unformatted_text);
+    string16 sanitized_text =
+        prefix + WideToUTF16(cases[i].formatted_text) + suffix;
+    SanitizeUserSuppliedString(&unsanitized_text);
+    EXPECT_EQ(sanitized_text, unsanitized_text);
+  }
+}
+
+class SetICULocaleTest : public PlatformTest {};
+
+TEST_F(SetICULocaleTest, OverlongLocaleId) {
+  test::ScopedRestoreICUDefaultLocale restore_locale;
+  std::string id("fr-ca-x-foo");
+  while (id.length() < 152)
+    id.append("-x-foo");
+  SetICUDefaultLocale(id);
+  EXPECT_STRNE("en_US", icu::Locale::getDefault().getName());
+  id.append("zzz");
+  SetICUDefaultLocale(id);
+  EXPECT_STREQ("en_US", icu::Locale::getDefault().getName());
+}
+
+}  // namespace i18n
+}  // namespace base
diff --git a/base/i18n/streaming_utf8_validator.cc b/base/i18n/streaming_utf8_validator.cc
new file mode 100644
index 0000000..19c86a3
--- /dev/null
+++ b/base/i18n/streaming_utf8_validator.cc
@@ -0,0 +1,59 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This implementation doesn't use ICU. The ICU macros are oriented towards
+// character-at-a-time processing, whereas byte-at-a-time processing is easier
+// with streaming input.
+
+#include "base/i18n/streaming_utf8_validator.h"
+
+#include "base/i18n/utf8_validator_tables.h"
+#include "base/logging.h"
+
+namespace base {
+namespace {
+
+uint8_t StateTableLookup(uint8_t offset) {
+  DCHECK_LT(offset, internal::kUtf8ValidatorTablesSize);
+  return internal::kUtf8ValidatorTables[offset];
+}
+
+}  // namespace
+
+StreamingUtf8Validator::State StreamingUtf8Validator::AddBytes(const char* data,
+                                                               size_t size) {
+  // Copy |state_| into a local variable so that the compiler doesn't have to be
+  // careful of aliasing.
+  uint8_t state = state_;
+  for (const char* p = data; p != data + size; ++p) {
+    if ((*p & 0x80) == 0) {
+      if (state == 0)
+        continue;
+      state = internal::I18N_UTF8_VALIDATOR_INVALID_INDEX;
+      break;
+    }
+    const uint8_t shift_amount = StateTableLookup(state);
+    const uint8_t shifted_char = (*p & 0x7F) >> shift_amount;
+    state = StateTableLookup(state + shifted_char + 1);
+    // State may be INVALID here, but this code is optimised for the case of
+    // valid UTF-8 and it is more efficient (by about 2%) to not attempt an
+    // early loop exit unless we hit an ASCII character.
+  }
+  state_ = state;
+  return state == 0 ? VALID_ENDPOINT
+      : state == internal::I18N_UTF8_VALIDATOR_INVALID_INDEX
+      ? INVALID
+      : VALID_MIDPOINT;
+}
+
+void StreamingUtf8Validator::Reset() {
+  state_ = 0u;
+}
+
+bool StreamingUtf8Validator::Validate(const std::string& string) {
+  return StreamingUtf8Validator().AddBytes(string.data(), string.size()) ==
+         VALID_ENDPOINT;
+}
+
+}  // namespace base
diff --git a/base/i18n/streaming_utf8_validator.h b/base/i18n/streaming_utf8_validator.h
new file mode 100644
index 0000000..ebf38a6
--- /dev/null
+++ b/base/i18n/streaming_utf8_validator.h
@@ -0,0 +1,66 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// A streaming validator for UTF-8. Validation is based on the definition in
+// RFC-3629. In particular, it does not reject the invalid characters rejected
+// by base::IsStringUTF8().
+//
+// The implementation detects errors on the first possible byte.
+
+#ifndef BASE_I18N_STREAMING_UTF8_VALIDATOR_H_
+#define BASE_I18N_STREAMING_UTF8_VALIDATOR_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <string>
+
+#include "base/i18n/base_i18n_export.h"
+#include "base/macros.h"
+
+namespace base {
+
+class BASE_I18N_EXPORT StreamingUtf8Validator {
+ public:
+  // The validator exposes 3 states. It starts in state VALID_ENDPOINT. As it
+  // processes characters it alternates between VALID_ENDPOINT and
+  // VALID_MIDPOINT. If it encounters an invalid byte or UTF-8 sequence the
+  // state changes permanently to INVALID.
+  enum State {
+    VALID_ENDPOINT,
+    VALID_MIDPOINT,
+    INVALID
+  };
+
+  StreamingUtf8Validator() : state_(0u) {}
+  // Trivial destructor intentionally omitted.
+
+  // Validate |size| bytes starting at |data|. If the concatenation of all calls
+  // to AddBytes() since this object was constructed or reset is a valid UTF-8
+  // string, returns VALID_ENDPOINT. If it could be the prefix of a valid UTF-8
+  // string, returns VALID_MIDPOINT. If an invalid byte or UTF-8 sequence was
+  // present, returns INVALID.
+  State AddBytes(const char* data, size_t size);
+
+  // Return the object to a freshly-constructed state so that it can be re-used.
+  void Reset();
+
+  // Validate a complete string using the same criteria. Returns true if the
+  // string only contains complete, valid UTF-8 codepoints.
+  static bool Validate(const std::string& string);
+
+ private:
+  // The current state of the validator. Value 0 is the initial/valid state.
+  // The state is stored as an offset into |kUtf8ValidatorTables|. The special
+  // state |kUtf8InvalidState| is invalid.
+  uint8_t state_;
+
+  // This type could be made copyable but there is currently no use-case for
+  // it.
+  DISALLOW_COPY_AND_ASSIGN(StreamingUtf8Validator);
+};
+
+}  // namespace base
+
+#endif  // BASE_I18N_STREAMING_UTF8_VALIDATOR_H_
diff --git a/base/i18n/streaming_utf8_validator_perftest.cc b/base/i18n/streaming_utf8_validator_perftest.cc
new file mode 100644
index 0000000..ad328f8
--- /dev/null
+++ b/base/i18n/streaming_utf8_validator_perftest.cc
@@ -0,0 +1,240 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// All data that is passed through a WebSocket with type "Text" needs to be
+// validated as UTF8. Since this is done on the IO thread, it needs to be
+// reasonably fast.
+
+// We are only interested in the performance on valid UTF8. Invalid UTF8 will
+// result in a connection failure, so is unlikely to become a source of
+// performance issues.
+
+#include "base/i18n/streaming_utf8_validator.h"
+
+#include <stddef.h>
+
+#include <string>
+
+#include "base/bind.h"
+#include "base/callback.h"
+#include "base/macros.h"
+#include "base/strings/string_util.h"
+#include "base/strings/stringprintf.h"
+#include "base/test/perf_time_logger.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+namespace {
+
+// We want to test ranges of valid UTF-8 sequences. These ranges are inclusive.
+// They are intended to be large enough that the validator needs to do
+// meaningful work while being in some sense "realistic" (eg. control characters
+// are not included).
+const char kOneByteSeqRangeStart[] = " ";  // U+0020
+const char kOneByteSeqRangeEnd[] = "~";    // U+007E
+
+const char kTwoByteSeqRangeStart[] = "\xc2\xa0";  // U+00A0 non-breaking space
+const char kTwoByteSeqRangeEnd[] = "\xc9\x8f";    // U+024F small y with stroke
+
+const char kThreeByteSeqRangeStart[] = "\xe3\x81\x82";  // U+3042 Hiragana "a"
+const char kThreeByteSeqRangeEnd[] = "\xe9\xbf\x83";    // U+9FC3 "to blink"
+
+const char kFourByteSeqRangeStart[] = "\xf0\xa0\x80\x8b";  // U+2000B
+const char kFourByteSeqRangeEnd[] = "\xf0\xaa\x9a\xb2";    // U+2A6B2
+
+// The different lengths of strings to test.
+const size_t kTestLengths[] = {1, 32, 256, 32768, 1 << 20};
+
+// Simplest possible byte-at-a-time validator, to provide a baseline
+// for comparison. This is only tried on 1-byte UTF-8 sequences, as
+// the results will not be meaningful with sequences containing
+// top-bit-set bytes.
+bool IsString7Bit(const std::string& s) {
+  for (std::string::const_iterator it = s.begin(); it != s.end(); ++it) {
+    if (*it & 0x80)
+      return false;
+  }
+  return true;
+}
+
+// Assumes that |previous| is a valid UTF-8 sequence, and attempts to return
+// the next one. Is just barely smart enough to iterate through the ranges
+// defined about.
+std::string NextUtf8Sequence(const std::string& previous) {
+  DCHECK(StreamingUtf8Validator::Validate(previous));
+  std::string next = previous;
+  for (int i = static_cast<int>(previous.length() - 1); i >= 0; --i) {
+    // All bytes in a UTF-8 sequence except the first one are
+    // constrained to the range 0x80 to 0xbf, inclusive. When we
+    // increment past 0xbf, we carry into the previous byte.
+    if (i > 0 && next[i] == '\xbf') {
+      next[i] = '\x80';
+      continue;  // carry
+    }
+    ++next[i];
+    break;  // no carry
+  }
+  DCHECK(StreamingUtf8Validator::Validate(next))
+      << "Result \"" << next << "\" failed validation";
+  return next;
+}
+
+typedef bool (*TestTargetType)(const std::string&);
+
+// Run fuction |target| over |test_string| |times| times, and report the results
+// using |description|.
+bool RunTest(const std::string& description,
+             TestTargetType target,
+             const std::string& test_string,
+             int times) {
+  base::PerfTimeLogger timer(description.c_str());
+  bool result = true;
+  for (int i = 0; i < times; ++i) {
+    result = target(test_string) && result;
+  }
+  timer.Done();
+  return result;
+}
+
+// Construct a string by repeating |input| enough times to equal or exceed
+// |length|.
+std::string ConstructRepeatedTestString(const std::string& input,
+                                        size_t length) {
+  std::string output = input;
+  while (output.length() * 2 < length) {
+    output += output;
+  }
+  if (output.length() < length) {
+    output += ConstructRepeatedTestString(input, length - output.length());
+  }
+  return output;
+}
+
+// Construct a string by expanding the range of UTF-8 sequences
+// between |input_start| and |input_end|, inclusive, and then
+// repeating the resulting string until it equals or exceeds |length|
+// bytes. |input_start| and |input_end| must be valid UTF-8
+// sequences.
+std::string ConstructRangedTestString(const std::string& input_start,
+                                      const std::string& input_end,
+                                      size_t length) {
+  std::string output = input_start;
+  std::string input = input_start;
+  while (output.length() < length && input != input_end) {
+    input = NextUtf8Sequence(input);
+    output += input;
+  }
+  if (output.length() < length) {
+    output = ConstructRepeatedTestString(output, length);
+  }
+  return output;
+}
+
+struct TestFunctionDescription {
+  TestTargetType function;
+  const char* function_name;
+};
+
+bool IsStringUTF8(const std::string& str) {
+  return base::IsStringUTF8(base::StringPiece(str));
+}
+
+// IsString7Bit is intentionally placed last so it can be excluded easily.
+const TestFunctionDescription kTestFunctions[] = {
+    {&StreamingUtf8Validator::Validate, "StreamingUtf8Validator"},
+    {&IsStringUTF8, "IsStringUTF8"}, {&IsString7Bit, "IsString7Bit"}};
+
+// Construct a test string from |construct_test_string| for each of the lengths
+// in |kTestLengths| in turn. For each string, run each test in |test_functions|
+// for a number of iterations such that the total number of bytes validated
+// is around 16MB.
+void RunSomeTests(
+    const char format[],
+    base::Callback<std::string(size_t length)> construct_test_string,
+    const TestFunctionDescription* test_functions,
+    size_t test_count) {
+  for (size_t i = 0; i < arraysize(kTestLengths); ++i) {
+    const size_t length = kTestLengths[i];
+    const std::string test_string = construct_test_string.Run(length);
+    const int real_length = static_cast<int>(test_string.length());
+    const int times = (1 << 24) / real_length;
+    for (size_t test_index = 0; test_index < test_count; ++test_index) {
+      EXPECT_TRUE(RunTest(StringPrintf(format,
+                                       test_functions[test_index].function_name,
+                                       real_length,
+                                       times),
+                          test_functions[test_index].function,
+                          test_string,
+                          times));
+    }
+  }
+}
+
+TEST(StreamingUtf8ValidatorPerfTest, OneByteRepeated) {
+  RunSomeTests("%s: bytes=1 repeated length=%d repeat=%d",
+               base::Bind(ConstructRepeatedTestString, kOneByteSeqRangeStart),
+               kTestFunctions,
+               3);
+}
+
+TEST(StreamingUtf8ValidatorPerfTest, OneByteRange) {
+  RunSomeTests("%s: bytes=1 ranged length=%d repeat=%d",
+               base::Bind(ConstructRangedTestString,
+                          kOneByteSeqRangeStart,
+                          kOneByteSeqRangeEnd),
+               kTestFunctions,
+               3);
+}
+
+TEST(StreamingUtf8ValidatorPerfTest, TwoByteRepeated) {
+  RunSomeTests("%s: bytes=2 repeated length=%d repeat=%d",
+               base::Bind(ConstructRepeatedTestString, kTwoByteSeqRangeStart),
+               kTestFunctions,
+               2);
+}
+
+TEST(StreamingUtf8ValidatorPerfTest, TwoByteRange) {
+  RunSomeTests("%s: bytes=2 ranged length=%d repeat=%d",
+               base::Bind(ConstructRangedTestString,
+                          kTwoByteSeqRangeStart,
+                          kTwoByteSeqRangeEnd),
+               kTestFunctions,
+               2);
+}
+
+TEST(StreamingUtf8ValidatorPerfTest, ThreeByteRepeated) {
+  RunSomeTests(
+      "%s: bytes=3 repeated length=%d repeat=%d",
+      base::Bind(ConstructRepeatedTestString, kThreeByteSeqRangeStart),
+      kTestFunctions,
+      2);
+}
+
+TEST(StreamingUtf8ValidatorPerfTest, ThreeByteRange) {
+  RunSomeTests("%s: bytes=3 ranged length=%d repeat=%d",
+               base::Bind(ConstructRangedTestString,
+                          kThreeByteSeqRangeStart,
+                          kThreeByteSeqRangeEnd),
+               kTestFunctions,
+               2);
+}
+
+TEST(StreamingUtf8ValidatorPerfTest, FourByteRepeated) {
+  RunSomeTests("%s: bytes=4 repeated length=%d repeat=%d",
+               base::Bind(ConstructRepeatedTestString, kFourByteSeqRangeStart),
+               kTestFunctions,
+               2);
+}
+
+TEST(StreamingUtf8ValidatorPerfTest, FourByteRange) {
+  RunSomeTests("%s: bytes=4 ranged length=%d repeat=%d",
+               base::Bind(ConstructRangedTestString,
+                          kFourByteSeqRangeStart,
+                          kFourByteSeqRangeEnd),
+               kTestFunctions,
+               2);
+}
+
+}  // namespace
+}  // namespace base
diff --git a/base/i18n/streaming_utf8_validator_unittest.cc b/base/i18n/streaming_utf8_validator_unittest.cc
new file mode 100644
index 0000000..f9772d0
--- /dev/null
+++ b/base/i18n/streaming_utf8_validator_unittest.cc
@@ -0,0 +1,412 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/i18n/streaming_utf8_validator.h"
+
+#include <stddef.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <string.h>
+
+#include <string>
+
+#include "base/macros.h"
+#include "base/strings/string_piece.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+// Define BASE_I18N_UTF8_VALIDATOR_THOROUGH_TEST to verify that this class
+// accepts exactly the same set of 4-byte strings as ICU-based validation. This
+// tests every possible 4-byte string, so it is too slow to run routinely on
+// low-powered machines.
+//
+// #define BASE_I18N_UTF8_VALIDATOR_THOROUGH_TEST
+
+#ifdef BASE_I18N_UTF8_VALIDATOR_THOROUGH_TEST
+
+#include "base/bind.h"
+#include "base/location.h"
+#include "base/logging.h"
+#include "base/memory/ref_counted.h"
+#include "base/strings/string_util.h"
+#include "base/strings/stringprintf.h"
+#include "base/strings/utf_string_conversion_utils.h"
+#include "base/synchronization/lock.h"
+#include "base/task_scheduler/post_task.h"
+#include "base/task_scheduler/task_scheduler.h"
+#include "third_party/icu/source/common/unicode/utf8.h"
+
+#endif  // BASE_I18N_UTF8_VALIDATOR_THOROUGH_TEST
+
+namespace base {
+namespace {
+
+// Avoid having to qualify the enum values in the tests.
+const StreamingUtf8Validator::State VALID_ENDPOINT =
+    StreamingUtf8Validator::VALID_ENDPOINT;
+const StreamingUtf8Validator::State VALID_MIDPOINT =
+    StreamingUtf8Validator::VALID_MIDPOINT;
+const StreamingUtf8Validator::State INVALID = StreamingUtf8Validator::INVALID;
+
+#ifdef BASE_I18N_UTF8_VALIDATOR_THOROUGH_TEST
+
+const uint32_t kThoroughTestChunkSize = 1 << 24;
+
+class StreamingUtf8ValidatorThoroughTest : public ::testing::Test {
+ protected:
+  StreamingUtf8ValidatorThoroughTest()
+      : tasks_dispatched_(0), tasks_finished_(0) {}
+
+  // This uses the same logic as base::IsStringUTF8 except it considers
+  // non-characters valid (and doesn't require a string as input).
+  static bool IsStringUtf8(const char* src, int32_t src_len) {
+    int32_t char_index = 0;
+
+    while (char_index < src_len) {
+      int32_t code_point;
+      U8_NEXT(src, char_index, src_len, code_point);
+      if (!base::IsValidCodepoint(code_point))
+        return false;
+    }
+    return true;
+  }
+
+  // Converts the passed-in integer to a 4 byte string and then
+  // verifies that IsStringUtf8 and StreamingUtf8Validator agree on
+  // whether it is valid UTF-8 or not.
+  void TestNumber(uint32_t n) const {
+    char test[sizeof n];
+    memcpy(test, &n, sizeof n);
+    StreamingUtf8Validator validator;
+    EXPECT_EQ(IsStringUtf8(test, sizeof n),
+              validator.AddBytes(test, sizeof n) == VALID_ENDPOINT)
+        << "Difference of opinion for \""
+        << base::StringPrintf("\\x%02X\\x%02X\\x%02X\\x%02X",
+                              test[0] & 0xFF,
+                              test[1] & 0xFF,
+                              test[2] & 0xFF,
+                              test[3] & 0xFF) << "\"";
+  }
+
+ public:
+  // Tests the 4-byte sequences corresponding to the |size| integers
+  // starting at |begin|. This is intended to be run from a worker
+  // pool. Signals |all_done_| at the end if it thinks all tasks are
+  // finished.
+  void TestRange(uint32_t begin, uint32_t size) {
+    for (uint32_t i = 0; i < size; ++i) {
+      TestNumber(begin + i);
+    }
+    base::AutoLock al(lock_);
+    ++tasks_finished_;
+    LOG(INFO) << tasks_finished_ << " / " << tasks_dispatched_
+              << " tasks done\n";
+  }
+
+ protected:
+  base::Lock lock_;
+  int tasks_dispatched_;
+  int tasks_finished_;
+};
+
+TEST_F(StreamingUtf8ValidatorThoroughTest, TestEverything) {
+  base::TaskScheduler::CreateAndStartWithDefaultParams(
+      "StreamingUtf8ValidatorThoroughTest");
+  {
+    base::AutoLock al(lock_);
+    uint32_t begin = 0;
+    do {
+      base::PostTaskWithTraits(
+          FROM_HERE, {base::TaskShutdownBehavior::BLOCK_SHUTDOWN},
+          base::BindOnce(&StreamingUtf8ValidatorThoroughTest::TestRange,
+                         base::Unretained(this), begin,
+                         kThoroughTestChunkSize));
+      ++tasks_dispatched_;
+      begin += kThoroughTestChunkSize;
+    } while (begin != 0);
+  }
+  base::TaskScheduler::GetInstance()->Shutdown();
+  base::TaskScheduler::GetInstance()->JoinForTesting();
+  base::TaskScheduler::SetInstance(nullptr);
+}
+
+#endif  // BASE_I18N_UTF8_VALIDATOR_THOROUGH_TEST
+
+// These valid and invalid UTF-8 sequences are based on the tests from
+// base/strings/string_util_unittest.cc
+
+// All of the strings in |valid| must represent a single codepoint, because
+// partial sequences are constructed by taking non-empty prefixes of these
+// strings.
+const char* const valid[] = {"\r",           "\n",           "a",
+                             "\xc2\x81",     "\xe1\x80\xbf", "\xf1\x80\xa0\xbf",
+                             "\xef\xbb\xbf",  // UTF-8 BOM
+};
+
+const char* const* const valid_end = valid + arraysize(valid);
+
+const char* const invalid[] = {
+    // always invalid bytes
+    "\xc0", "\xc1",
+    "\xf5", "\xf6", "\xf7",
+    "\xf8", "\xf9", "\xfa", "\xfb", "\xfc", "\xfd", "\xfe", "\xff",
+    // surrogate code points
+    "\xed\xa0\x80", "\xed\x0a\x8f", "\xed\xbf\xbf",
+    //
+    // overlong sequences
+    "\xc0\x80",              // U+0000
+    "\xc1\x80",              // "A"
+    "\xc1\x81",              // "B"
+    "\xe0\x80\x80",          // U+0000
+    "\xe0\x82\x80",          // U+0080
+    "\xe0\x9f\xbf",          // U+07ff
+    "\xf0\x80\x80\x8D",      // U+000D
+    "\xf0\x80\x82\x91",      // U+0091
+    "\xf0\x80\xa0\x80",      // U+0800
+    "\xf0\x8f\xbb\xbf",      // U+FEFF (BOM)
+    "\xf8\x80\x80\x80\xbf",  // U+003F
+    "\xfc\x80\x80\x80\xa0\xa5",
+    //
+    // Beyond U+10FFFF
+    "\xf4\x90\x80\x80",          // U+110000
+    "\xf8\xa0\xbf\x80\xbf",      // 5 bytes
+    "\xfc\x9c\xbf\x80\xbf\x80",  // 6 bytes
+    //
+    // BOMs in UTF-16(BE|LE)
+    "\xfe\xff", "\xff\xfe",
+};
+
+const char* const* const invalid_end = invalid + arraysize(invalid);
+
+// A ForwardIterator which returns all the non-empty prefixes of the elements of
+// "valid".
+class PartialIterator {
+ public:
+  // The constructor returns the first iterator, ie. it is equivalent to
+  // begin().
+  PartialIterator() : index_(0), prefix_length_(0) { Advance(); }
+  // The trivial destructor left intentionally undefined.
+  // This is a value type; the default copy constructor and assignment operator
+  // generated by the compiler are used.
+
+  static PartialIterator end() { return PartialIterator(arraysize(valid), 1); }
+
+  PartialIterator& operator++() {
+    Advance();
+    return *this;
+  }
+
+  base::StringPiece operator*() const {
+    return base::StringPiece(valid[index_], prefix_length_);
+  }
+
+  bool operator==(const PartialIterator& rhs) const {
+    return index_ == rhs.index_ && prefix_length_ == rhs.prefix_length_;
+  }
+
+  bool operator!=(const PartialIterator& rhs) const { return !(rhs == *this); }
+
+ private:
+  // This constructor is used by the end() method.
+  PartialIterator(size_t index, size_t prefix_length)
+      : index_(index), prefix_length_(prefix_length) {}
+
+  void Advance() {
+    if (index_ < arraysize(valid) && prefix_length_ < strlen(valid[index_]))
+      ++prefix_length_;
+    while (index_ < arraysize(valid) &&
+           prefix_length_ == strlen(valid[index_])) {
+      ++index_;
+      prefix_length_ = 1;
+    }
+  }
+
+  // The UTF-8 sequence, as an offset into the |valid| array.
+  size_t index_;
+  size_t prefix_length_;
+};
+
+// A test fixture for tests which test one UTF-8 sequence (or invalid
+// byte sequence) at a time.
+class StreamingUtf8ValidatorSingleSequenceTest : public ::testing::Test {
+ protected:
+  // Iterator must be convertible when de-referenced to StringPiece.
+  template <typename Iterator>
+  void CheckRange(Iterator begin,
+                  Iterator end,
+                  StreamingUtf8Validator::State expected) {
+    for (Iterator it = begin; it != end; ++it) {
+      StreamingUtf8Validator validator;
+      base::StringPiece sequence = *it;
+      EXPECT_EQ(expected,
+                validator.AddBytes(sequence.data(), sequence.size()))
+          << "Failed for \"" << sequence << "\"";
+    }
+  }
+
+  // Adding input a byte at a time should make absolutely no difference.
+  template <typename Iterator>
+  void CheckRangeByteAtATime(Iterator begin,
+                             Iterator end,
+                             StreamingUtf8Validator::State expected) {
+    for (Iterator it = begin; it != end; ++it) {
+      StreamingUtf8Validator validator;
+      base::StringPiece sequence = *it;
+      StreamingUtf8Validator::State state = VALID_ENDPOINT;
+      for (base::StringPiece::const_iterator cit = sequence.begin();
+           cit != sequence.end();
+           ++cit) {
+        state = validator.AddBytes(&*cit, 1);
+      }
+      EXPECT_EQ(expected, state) << "Failed for \"" << sequence << "\"";
+    }
+  }
+};
+
+// A test fixture for tests which test the concatenation of byte sequences.
+class StreamingUtf8ValidatorDoubleSequenceTest : public ::testing::Test {
+ protected:
+  // Check every possible concatenation of byte sequences from two
+  // ranges, and verify that the combination matches the expected
+  // state.
+  template <typename Iterator1, typename Iterator2>
+  void CheckCombinations(Iterator1 begin1,
+                         Iterator1 end1,
+                         Iterator2 begin2,
+                         Iterator2 end2,
+                         StreamingUtf8Validator::State expected) {
+    StreamingUtf8Validator validator;
+    for (Iterator1 it1 = begin1; it1 != end1; ++it1) {
+      base::StringPiece c1 = *it1;
+      for (Iterator2 it2 = begin2; it2 != end2; ++it2) {
+        base::StringPiece c2 = *it2;
+        validator.AddBytes(c1.data(), c1.size());
+        EXPECT_EQ(expected, validator.AddBytes(c2.data(), c2.size()))
+            << "Failed for \"" << c1 << c2 << "\"";
+        validator.Reset();
+      }
+    }
+  }
+};
+
+TEST(StreamingUtf8ValidatorTest, NothingIsValid) {
+  static const char kNothing[] = "";
+  EXPECT_EQ(VALID_ENDPOINT, StreamingUtf8Validator().AddBytes(kNothing, 0));
+}
+
+// Because the members of the |valid| array need to be non-zero length
+// sequences and are measured with strlen(), |valid| cannot be used it
+// to test the NUL character '\0', so the NUL character gets its own
+// test.
+TEST(StreamingUtf8ValidatorTest, NulIsValid) {
+  static const char kNul[] = "\x00";
+  EXPECT_EQ(VALID_ENDPOINT, StreamingUtf8Validator().AddBytes(kNul, 1));
+}
+
+// Just a basic sanity test before we start getting fancy.
+TEST(StreamingUtf8ValidatorTest, HelloWorld) {
+  static const char kHelloWorld[] = "Hello, World!";
+  EXPECT_EQ(
+      VALID_ENDPOINT,
+      StreamingUtf8Validator().AddBytes(kHelloWorld, strlen(kHelloWorld)));
+}
+
+// Check that the Reset() method works.
+TEST(StreamingUtf8ValidatorTest, ResetWorks) {
+  StreamingUtf8Validator validator;
+  EXPECT_EQ(INVALID, validator.AddBytes("\xC0", 1));
+  EXPECT_EQ(INVALID, validator.AddBytes("a", 1));
+  validator.Reset();
+  EXPECT_EQ(VALID_ENDPOINT, validator.AddBytes("a", 1));
+}
+
+TEST_F(StreamingUtf8ValidatorSingleSequenceTest, Valid) {
+  CheckRange(valid, valid_end, VALID_ENDPOINT);
+}
+
+TEST_F(StreamingUtf8ValidatorSingleSequenceTest, Partial) {
+  CheckRange(PartialIterator(), PartialIterator::end(), VALID_MIDPOINT);
+}
+
+TEST_F(StreamingUtf8ValidatorSingleSequenceTest, Invalid) {
+  CheckRange(invalid, invalid_end, INVALID);
+}
+
+TEST_F(StreamingUtf8ValidatorSingleSequenceTest, ValidByByte) {
+  CheckRangeByteAtATime(valid, valid_end, VALID_ENDPOINT);
+}
+
+TEST_F(StreamingUtf8ValidatorSingleSequenceTest, PartialByByte) {
+  CheckRangeByteAtATime(
+      PartialIterator(), PartialIterator::end(), VALID_MIDPOINT);
+}
+
+TEST_F(StreamingUtf8ValidatorSingleSequenceTest, InvalidByByte) {
+  CheckRangeByteAtATime(invalid, invalid_end, INVALID);
+}
+
+TEST_F(StreamingUtf8ValidatorDoubleSequenceTest, ValidPlusValidIsValid) {
+  CheckCombinations(valid, valid_end, valid, valid_end, VALID_ENDPOINT);
+}
+
+TEST_F(StreamingUtf8ValidatorDoubleSequenceTest, ValidPlusPartialIsPartial) {
+  CheckCombinations(valid,
+                    valid_end,
+                    PartialIterator(),
+                    PartialIterator::end(),
+                    VALID_MIDPOINT);
+}
+
+TEST_F(StreamingUtf8ValidatorDoubleSequenceTest, PartialPlusValidIsInvalid) {
+  CheckCombinations(
+      PartialIterator(), PartialIterator::end(), valid, valid_end, INVALID);
+}
+
+TEST_F(StreamingUtf8ValidatorDoubleSequenceTest, PartialPlusPartialIsInvalid) {
+  CheckCombinations(PartialIterator(),
+                    PartialIterator::end(),
+                    PartialIterator(),
+                    PartialIterator::end(),
+                    INVALID);
+}
+
+TEST_F(StreamingUtf8ValidatorDoubleSequenceTest, ValidPlusInvalidIsInvalid) {
+  CheckCombinations(valid, valid_end, invalid, invalid_end, INVALID);
+}
+
+TEST_F(StreamingUtf8ValidatorDoubleSequenceTest, InvalidPlusValidIsInvalid) {
+  CheckCombinations(invalid, invalid_end, valid, valid_end, INVALID);
+}
+
+TEST_F(StreamingUtf8ValidatorDoubleSequenceTest, InvalidPlusInvalidIsInvalid) {
+  CheckCombinations(invalid, invalid_end, invalid, invalid_end, INVALID);
+}
+
+TEST_F(StreamingUtf8ValidatorDoubleSequenceTest, InvalidPlusPartialIsInvalid) {
+  CheckCombinations(
+      invalid, invalid_end, PartialIterator(), PartialIterator::end(), INVALID);
+}
+
+TEST_F(StreamingUtf8ValidatorDoubleSequenceTest, PartialPlusInvalidIsInvalid) {
+  CheckCombinations(
+      PartialIterator(), PartialIterator::end(), invalid, invalid_end, INVALID);
+}
+
+TEST(StreamingUtf8ValidatorValidateTest, EmptyIsValid) {
+  EXPECT_TRUE(StreamingUtf8Validator::Validate(std::string()));
+}
+
+TEST(StreamingUtf8ValidatorValidateTest, SimpleValidCase) {
+  EXPECT_TRUE(StreamingUtf8Validator::Validate("\xc2\x81"));
+}
+
+TEST(StreamingUtf8ValidatorValidateTest, SimpleInvalidCase) {
+  EXPECT_FALSE(StreamingUtf8Validator::Validate("\xc0\x80"));
+}
+
+TEST(StreamingUtf8ValidatorValidateTest, TruncatedIsInvalid) {
+  EXPECT_FALSE(StreamingUtf8Validator::Validate("\xc2"));
+}
+
+}  // namespace
+}  // namespace base
diff --git a/base/i18n/string_compare.cc b/base/i18n/string_compare.cc
new file mode 100644
index 0000000..649c281
--- /dev/null
+++ b/base/i18n/string_compare.cc
@@ -0,0 +1,29 @@
+// Copyright (c) 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/i18n/string_compare.h"
+
+#include "base/logging.h"
+#include "base/strings/utf_string_conversions.h"
+#include "third_party/icu/source/common/unicode/unistr.h"
+
+namespace base {
+namespace i18n {
+
+// Compares the character data stored in two different string16 strings by
+// specified Collator instance.
+UCollationResult CompareString16WithCollator(const icu::Collator& collator,
+                                             const string16& lhs,
+                                             const string16& rhs) {
+  UErrorCode error = U_ZERO_ERROR;
+  UCollationResult result = collator.compare(
+      icu::UnicodeString(FALSE, lhs.c_str(), static_cast<int>(lhs.length())),
+      icu::UnicodeString(FALSE, rhs.c_str(), static_cast<int>(rhs.length())),
+      error);
+  DCHECK(U_SUCCESS(error));
+  return result;
+}
+
+}  // namespace i18n
+}  // namespace base
diff --git a/base/i18n/string_compare.h b/base/i18n/string_compare.h
new file mode 100644
index 0000000..5fcc5fe
--- /dev/null
+++ b/base/i18n/string_compare.h
@@ -0,0 +1,28 @@
+// Copyright (c) 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_I18N_STRING_COMPARE_H_
+#define BASE_I18N_STRING_COMPARE_H_
+
+#include <algorithm>
+#include <string>
+#include <vector>
+
+#include "base/i18n/base_i18n_export.h"
+#include "base/strings/string16.h"
+#include "third_party/icu/source/i18n/unicode/coll.h"
+
+namespace base {
+namespace i18n {
+
+// Compares the two strings using the specified collator.
+BASE_I18N_EXPORT UCollationResult
+CompareString16WithCollator(const icu::Collator& collator,
+                            const string16& lhs,
+                            const string16& rhs);
+
+}  // namespace i18n
+}  // namespace base
+
+#endif  // BASE_I18N_STRING_COMPARE_H_
diff --git a/base/i18n/string_search.cc b/base/i18n/string_search.cc
new file mode 100644
index 0000000..2f6fee4
--- /dev/null
+++ b/base/i18n/string_search.cc
@@ -0,0 +1,81 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <stdint.h>
+
+#include "base/i18n/string_search.h"
+#include "base/logging.h"
+
+#include "third_party/icu/source/i18n/unicode/usearch.h"
+
+namespace base {
+namespace i18n {
+
+FixedPatternStringSearchIgnoringCaseAndAccents::
+FixedPatternStringSearchIgnoringCaseAndAccents(const string16& find_this)
+    : find_this_(find_this) {
+  // usearch_open requires a valid string argument to be searched, even if we
+  // want to set it by usearch_setText afterwards. So, supplying a dummy text.
+  const string16& dummy = find_this_;
+
+  UErrorCode status = U_ZERO_ERROR;
+  search_ = usearch_open(find_this_.data(), find_this_.size(), dummy.data(),
+                         dummy.size(), uloc_getDefault(),
+                         nullptr,  // breakiter
+                         &status);
+  if (U_SUCCESS(status)) {
+    UCollator* collator = usearch_getCollator(search_);
+    ucol_setStrength(collator, UCOL_PRIMARY);
+    usearch_reset(search_);
+  }
+}
+
+FixedPatternStringSearchIgnoringCaseAndAccents::
+~FixedPatternStringSearchIgnoringCaseAndAccents() {
+  if (search_)
+    usearch_close(search_);
+}
+
+bool FixedPatternStringSearchIgnoringCaseAndAccents::Search(
+    const string16& in_this, size_t* match_index, size_t* match_length) {
+  UErrorCode status = U_ZERO_ERROR;
+  usearch_setText(search_, in_this.data(), in_this.size(), &status);
+
+  // Default to basic substring search if usearch fails. According to
+  // http://icu-project.org/apiref/icu4c/usearch_8h.html, usearch_open will fail
+  // if either |find_this| or |in_this| are empty. In either case basic
+  // substring search will give the correct return value.
+  if (!U_SUCCESS(status)) {
+    size_t index = in_this.find(find_this_);
+    if (index == string16::npos) {
+      return false;
+    } else {
+      if (match_index)
+        *match_index = index;
+      if (match_length)
+        *match_length = find_this_.size();
+      return true;
+    }
+  }
+
+  int32_t index = usearch_first(search_, &status);
+  if (!U_SUCCESS(status) || index == USEARCH_DONE)
+    return false;
+  if (match_index)
+    *match_index = static_cast<size_t>(index);
+  if (match_length)
+    *match_length = static_cast<size_t>(usearch_getMatchedLength(search_));
+  return true;
+}
+
+bool StringSearchIgnoringCaseAndAccents(const string16& find_this,
+                                        const string16& in_this,
+                                        size_t* match_index,
+                                        size_t* match_length) {
+  return FixedPatternStringSearchIgnoringCaseAndAccents(find_this).Search(
+      in_this, match_index, match_length);
+}
+
+}  // namespace i18n
+}  // namespace base
diff --git a/base/i18n/string_search.h b/base/i18n/string_search.h
new file mode 100644
index 0000000..07a29c1
--- /dev/null
+++ b/base/i18n/string_search.h
@@ -0,0 +1,55 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_I18N_STRING_SEARCH_H_
+#define BASE_I18N_STRING_SEARCH_H_
+
+#include <stddef.h>
+
+#include "base/i18n/base_i18n_export.h"
+#include "base/strings/string16.h"
+
+struct UStringSearch;
+
+namespace base {
+namespace i18n {
+
+// Returns true if |in_this| contains |find_this|. If |match_index| or
+// |match_length| are non-NULL, they are assigned the start position and total
+// length of the match.
+//
+// Only differences between base letters are taken into consideration. Case and
+// accent differences are ignored. Please refer to 'primary level' in
+// http://userguide.icu-project.org/collation/concepts for additional details.
+BASE_I18N_EXPORT
+    bool StringSearchIgnoringCaseAndAccents(const string16& find_this,
+                                            const string16& in_this,
+                                            size_t* match_index,
+                                            size_t* match_length);
+
+// This class is for speeding up multiple StringSearchIgnoringCaseAndAccents()
+// with the same |find_this| argument. |find_this| is passed as the constructor
+// argument, and precomputation for searching is done only at that timing.
+class BASE_I18N_EXPORT FixedPatternStringSearchIgnoringCaseAndAccents {
+ public:
+  explicit FixedPatternStringSearchIgnoringCaseAndAccents(
+      const string16& find_this);
+  ~FixedPatternStringSearchIgnoringCaseAndAccents();
+
+  // Returns true if |in_this| contains |find_this|. If |match_index| or
+  // |match_length| are non-NULL, they are assigned the start position and total
+  // length of the match.
+  bool Search(const string16& in_this,
+              size_t* match_index,
+              size_t* match_length);
+
+ private:
+  string16 find_this_;
+  UStringSearch* search_;
+};
+
+}  // namespace i18n
+}  // namespace base
+
+#endif  // BASE_I18N_STRING_SEARCH_H_
diff --git a/base/i18n/string_search_unittest.cc b/base/i18n/string_search_unittest.cc
new file mode 100644
index 0000000..69501d6
--- /dev/null
+++ b/base/i18n/string_search_unittest.cc
@@ -0,0 +1,228 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <stddef.h>
+
+#include <string>
+
+#include "base/i18n/rtl.h"
+#include "base/i18n/string_search.h"
+#include "base/strings/string16.h"
+#include "base/strings/utf_string_conversions.h"
+#include "testing/gtest/include/gtest/gtest.h"
+#include "third_party/icu/source/i18n/unicode/usearch.h"
+
+namespace base {
+namespace i18n {
+
+// Note on setting default locale for testing: The current default locale on
+// the Mac trybot is en_US_POSIX, with which primary-level collation strength
+// string search is case-sensitive, when normally it should be
+// case-insensitive. In other locales (including en_US which English speakers
+// in the U.S. use), this search would be case-insensitive as expected.
+
+TEST(StringSearchTest, ASCII) {
+  std::string default_locale(uloc_getDefault());
+  bool locale_is_posix = (default_locale == "en_US_POSIX");
+  if (locale_is_posix)
+    SetICUDefaultLocale("en_US");
+
+  size_t index = 0;
+  size_t length = 0;
+
+  EXPECT_TRUE(StringSearchIgnoringCaseAndAccents(
+      ASCIIToUTF16("hello"), ASCIIToUTF16("hello world"), &index, &length));
+  EXPECT_EQ(0U, index);
+  EXPECT_EQ(5U, length);
+
+  EXPECT_FALSE(StringSearchIgnoringCaseAndAccents(
+      ASCIIToUTF16("h    e l l o"), ASCIIToUTF16("h   e l l o"),
+      &index, &length));
+
+  EXPECT_TRUE(StringSearchIgnoringCaseAndAccents(
+      ASCIIToUTF16("aabaaa"), ASCIIToUTF16("aaabaabaaa"), &index, &length));
+  EXPECT_EQ(4U, index);
+  EXPECT_EQ(6U, length);
+
+  EXPECT_FALSE(StringSearchIgnoringCaseAndAccents(
+      ASCIIToUTF16("searching within empty string"), string16(),
+      &index, &length));
+
+  EXPECT_TRUE(StringSearchIgnoringCaseAndAccents(
+      string16(), ASCIIToUTF16("searching for empty string"), &index, &length));
+  EXPECT_EQ(0U, index);
+  EXPECT_EQ(0U, length);
+
+  EXPECT_TRUE(StringSearchIgnoringCaseAndAccents(
+      ASCIIToUTF16("case insensitivity"), ASCIIToUTF16("CaSe InSeNsItIvItY"),
+      &index, &length));
+  EXPECT_EQ(0U, index);
+  EXPECT_EQ(18U, length);
+
+  if (locale_is_posix)
+    SetICUDefaultLocale(default_locale.data());
+}
+
+TEST(StringSearchTest, UnicodeLocaleIndependent) {
+  // Base characters
+  const string16 e_base = WideToUTF16(L"e");
+  const string16 E_base = WideToUTF16(L"E");
+  const string16 a_base = WideToUTF16(L"a");
+
+  // Composed characters
+  const string16 e_with_acute_accent = WideToUTF16(L"\u00e9");
+  const string16 E_with_acute_accent = WideToUTF16(L"\u00c9");
+  const string16 e_with_grave_accent = WideToUTF16(L"\u00e8");
+  const string16 E_with_grave_accent = WideToUTF16(L"\u00c8");
+  const string16 a_with_acute_accent = WideToUTF16(L"\u00e1");
+
+  // Decomposed characters
+  const string16 e_with_acute_combining_mark = WideToUTF16(L"e\u0301");
+  const string16 E_with_acute_combining_mark = WideToUTF16(L"E\u0301");
+  const string16 e_with_grave_combining_mark = WideToUTF16(L"e\u0300");
+  const string16 E_with_grave_combining_mark = WideToUTF16(L"E\u0300");
+  const string16 a_with_acute_combining_mark = WideToUTF16(L"a\u0301");
+
+  std::string default_locale(uloc_getDefault());
+  bool locale_is_posix = (default_locale == "en_US_POSIX");
+  if (locale_is_posix)
+    SetICUDefaultLocale("en_US");
+
+  size_t index = 0;
+  size_t length = 0;
+
+  EXPECT_TRUE(StringSearchIgnoringCaseAndAccents(
+      e_base, e_with_acute_accent, &index, &length));
+  EXPECT_EQ(0U, index);
+  EXPECT_EQ(e_with_acute_accent.size(), length);
+
+  EXPECT_TRUE(StringSearchIgnoringCaseAndAccents(
+      e_with_acute_accent, e_base, &index, &length));
+  EXPECT_EQ(0U, index);
+  EXPECT_EQ(e_base.size(), length);
+
+  EXPECT_TRUE(StringSearchIgnoringCaseAndAccents(
+      e_base, e_with_acute_combining_mark, &index, &length));
+  EXPECT_EQ(0U, index);
+  EXPECT_EQ(e_with_acute_combining_mark.size(), length);
+
+  EXPECT_TRUE(StringSearchIgnoringCaseAndAccents(
+      e_with_acute_combining_mark, e_base, &index, &length));
+  EXPECT_EQ(0U, index);
+  EXPECT_EQ(e_base.size(), length);
+
+  EXPECT_TRUE(StringSearchIgnoringCaseAndAccents(
+      e_with_acute_combining_mark, e_with_acute_accent,
+      &index, &length));
+  EXPECT_EQ(0U, index);
+  EXPECT_EQ(e_with_acute_accent.size(), length);
+
+  EXPECT_TRUE(StringSearchIgnoringCaseAndAccents(
+      e_with_acute_accent, e_with_acute_combining_mark,
+      &index, &length));
+  EXPECT_EQ(0U, index);
+  EXPECT_EQ(e_with_acute_combining_mark.size(), length);
+
+  EXPECT_TRUE(StringSearchIgnoringCaseAndAccents(
+      e_with_acute_combining_mark, e_with_grave_combining_mark,
+      &index, &length));
+  EXPECT_EQ(0U, index);
+  EXPECT_EQ(e_with_grave_combining_mark.size(), length);
+
+  EXPECT_TRUE(StringSearchIgnoringCaseAndAccents(
+      e_with_grave_combining_mark, e_with_acute_combining_mark,
+      &index, &length));
+  EXPECT_EQ(0U, index);
+  EXPECT_EQ(e_with_acute_combining_mark.size(), length);
+
+  EXPECT_TRUE(StringSearchIgnoringCaseAndAccents(
+      e_with_acute_combining_mark, e_with_grave_accent, &index, &length));
+  EXPECT_EQ(0U, index);
+  EXPECT_EQ(e_with_grave_accent.size(), length);
+
+  EXPECT_TRUE(StringSearchIgnoringCaseAndAccents(
+      e_with_grave_accent, e_with_acute_combining_mark, &index, &length));
+  EXPECT_EQ(0U, index);
+  EXPECT_EQ(e_with_acute_combining_mark.size(), length);
+
+  EXPECT_TRUE(StringSearchIgnoringCaseAndAccents(
+      E_with_acute_accent, e_with_acute_accent, &index, &length));
+  EXPECT_EQ(0U, index);
+  EXPECT_EQ(e_with_acute_accent.size(), length);
+
+  EXPECT_TRUE(StringSearchIgnoringCaseAndAccents(
+      E_with_grave_accent, e_with_acute_accent, &index, &length));
+  EXPECT_EQ(0U, index);
+  EXPECT_EQ(e_with_acute_accent.size(), length);
+
+  EXPECT_TRUE(StringSearchIgnoringCaseAndAccents(
+      E_with_acute_combining_mark, e_with_grave_accent, &index, &length));
+  EXPECT_EQ(0U, index);
+  EXPECT_EQ(e_with_grave_accent.size(), length);
+
+  EXPECT_TRUE(StringSearchIgnoringCaseAndAccents(
+      E_with_grave_combining_mark, e_with_acute_accent, &index, &length));
+  EXPECT_EQ(0U, index);
+  EXPECT_EQ(e_with_acute_accent.size(), length);
+
+  EXPECT_TRUE(StringSearchIgnoringCaseAndAccents(
+      E_base, e_with_grave_accent, &index, &length));
+  EXPECT_EQ(0U, index);
+  EXPECT_EQ(e_with_grave_accent.size(), length);
+
+  EXPECT_FALSE(StringSearchIgnoringCaseAndAccents(
+      a_with_acute_accent, e_with_acute_accent, &index, &length));
+
+  EXPECT_FALSE(StringSearchIgnoringCaseAndAccents(
+      a_with_acute_combining_mark, e_with_acute_combining_mark,
+      &index, &length));
+
+  if (locale_is_posix)
+    SetICUDefaultLocale(default_locale.data());
+}
+
+TEST(StringSearchTest, UnicodeLocaleDependent) {
+  // Base characters
+  const string16 a_base = WideToUTF16(L"a");
+
+  // Composed characters
+  const string16 a_with_ring = WideToUTF16(L"\u00e5");
+
+  EXPECT_TRUE(StringSearchIgnoringCaseAndAccents(a_base, a_with_ring, nullptr,
+                                                 nullptr));
+
+  const char* default_locale = uloc_getDefault();
+  SetICUDefaultLocale("da");
+
+  EXPECT_FALSE(StringSearchIgnoringCaseAndAccents(a_base, a_with_ring, nullptr,
+                                                  nullptr));
+
+  SetICUDefaultLocale(default_locale);
+}
+
+TEST(StringSearchTest, FixedPatternMultipleSearch) {
+  std::string default_locale(uloc_getDefault());
+  bool locale_is_posix = (default_locale == "en_US_POSIX");
+  if (locale_is_posix)
+    SetICUDefaultLocale("en_US");
+
+  size_t index = 0;
+  size_t length = 0;
+
+  // Search "hello" over multiple texts.
+  FixedPatternStringSearchIgnoringCaseAndAccents query(ASCIIToUTF16("hello"));
+  EXPECT_TRUE(query.Search(ASCIIToUTF16("12hello34"), &index, &length));
+  EXPECT_EQ(2U, index);
+  EXPECT_EQ(5U, length);
+  EXPECT_FALSE(query.Search(ASCIIToUTF16("bye"), &index, &length));
+  EXPECT_TRUE(query.Search(ASCIIToUTF16("hELLo"), &index, &length));
+  EXPECT_EQ(0U, index);
+  EXPECT_EQ(5U, length);
+
+  if (locale_is_posix)
+    SetICUDefaultLocale(default_locale.data());
+}
+
+}  // namespace i18n
+}  // namespace base
diff --git a/base/i18n/time_formatting.cc b/base/i18n/time_formatting.cc
new file mode 100644
index 0000000..3a5394a
--- /dev/null
+++ b/base/i18n/time_formatting.cc
@@ -0,0 +1,301 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/i18n/time_formatting.h"
+
+#include <stddef.h>
+
+#include <memory>
+
+#include "base/i18n/unicodestring.h"
+#include "base/logging.h"
+#include "base/strings/utf_string_conversions.h"
+#include "base/time/time.h"
+#include "third_party/icu/source/common/unicode/utypes.h"
+#include "third_party/icu/source/i18n/unicode/datefmt.h"
+#include "third_party/icu/source/i18n/unicode/dtitvfmt.h"
+#include "third_party/icu/source/i18n/unicode/dtptngen.h"
+#include "third_party/icu/source/i18n/unicode/fmtable.h"
+#include "third_party/icu/source/i18n/unicode/measfmt.h"
+#include "third_party/icu/source/i18n/unicode/smpdtfmt.h"
+
+namespace base {
+namespace {
+
+string16 TimeFormat(const icu::DateFormat* formatter,
+                    const Time& time) {
+  DCHECK(formatter);
+  icu::UnicodeString date_string;
+
+  formatter->format(static_cast<UDate>(time.ToDoubleT() * 1000), date_string);
+  return i18n::UnicodeStringToString16(date_string);
+}
+
+string16 TimeFormatWithoutAmPm(const icu::DateFormat* formatter,
+                               const Time& time) {
+  DCHECK(formatter);
+  icu::UnicodeString time_string;
+
+  icu::FieldPosition ampm_field(icu::DateFormat::kAmPmField);
+  formatter->format(
+      static_cast<UDate>(time.ToDoubleT() * 1000), time_string, ampm_field);
+  int ampm_length = ampm_field.getEndIndex() - ampm_field.getBeginIndex();
+  if (ampm_length) {
+    int begin = ampm_field.getBeginIndex();
+    // Doesn't include any spacing before the field.
+    if (begin)
+      begin--;
+    time_string.removeBetween(begin, ampm_field.getEndIndex());
+  }
+  return i18n::UnicodeStringToString16(time_string);
+}
+
+icu::SimpleDateFormat CreateSimpleDateFormatter(const char* pattern) {
+  // Generate a locale-dependent format pattern. The generator will take
+  // care of locale-dependent formatting issues like which separator to
+  // use (some locales use '.' instead of ':'), and where to put the am/pm
+  // marker.
+  UErrorCode status = U_ZERO_ERROR;
+  std::unique_ptr<icu::DateTimePatternGenerator> generator(
+      icu::DateTimePatternGenerator::createInstance(status));
+  DCHECK(U_SUCCESS(status));
+  icu::UnicodeString generated_pattern =
+      generator->getBestPattern(icu::UnicodeString(pattern), status);
+  DCHECK(U_SUCCESS(status));
+
+  // Then, format the time using the generated pattern.
+  icu::SimpleDateFormat formatter(generated_pattern, status);
+  DCHECK(U_SUCCESS(status));
+
+  return formatter;
+}
+
+UMeasureFormatWidth DurationWidthToMeasureWidth(DurationFormatWidth width) {
+  switch (width) {
+    case DURATION_WIDTH_WIDE: return UMEASFMT_WIDTH_WIDE;
+    case DURATION_WIDTH_SHORT: return UMEASFMT_WIDTH_SHORT;
+    case DURATION_WIDTH_NARROW: return UMEASFMT_WIDTH_NARROW;
+    case DURATION_WIDTH_NUMERIC: return UMEASFMT_WIDTH_NUMERIC;
+  }
+  NOTREACHED();
+  return UMEASFMT_WIDTH_COUNT;
+}
+
+const char* DateFormatToString(DateFormat format) {
+  switch (format) {
+    case DATE_FORMAT_YEAR_MONTH:
+      return UDAT_YEAR_MONTH;
+    case DATE_FORMAT_MONTH_WEEKDAY_DAY:
+      return UDAT_MONTH_WEEKDAY_DAY;
+  }
+  NOTREACHED();
+  return UDAT_YEAR_MONTH_DAY;
+}
+
+}  // namespace
+
+string16 TimeFormatTimeOfDay(const Time& time) {
+  // We can omit the locale parameter because the default should match
+  // Chrome's application locale.
+  std::unique_ptr<icu::DateFormat> formatter(
+      icu::DateFormat::createTimeInstance(icu::DateFormat::kShort));
+  return TimeFormat(formatter.get(), time);
+}
+
+string16 TimeFormatTimeOfDayWithMilliseconds(const Time& time) {
+  icu::SimpleDateFormat formatter = CreateSimpleDateFormatter("HmsSSS");
+  return TimeFormatWithoutAmPm(&formatter, time);
+}
+
+string16 TimeFormatTimeOfDayWithHourClockType(const Time& time,
+                                              HourClockType type,
+                                              AmPmClockType ampm) {
+  // Just redirect to the normal function if the default type matches the
+  // given type.
+  HourClockType default_type = GetHourClockType();
+  if (default_type == type && (type == k24HourClock || ampm == kKeepAmPm)) {
+    return TimeFormatTimeOfDay(time);
+  }
+
+  const char* base_pattern = (type == k12HourClock ? "ahm" : "Hm");
+  icu::SimpleDateFormat formatter = CreateSimpleDateFormatter(base_pattern);
+
+  if (ampm == kKeepAmPm) {
+    return TimeFormat(&formatter, time);
+  } else {
+    return TimeFormatWithoutAmPm(&formatter, time);
+  }
+}
+
+string16 TimeFormatShortDate(const Time& time) {
+  std::unique_ptr<icu::DateFormat> formatter(
+      icu::DateFormat::createDateInstance(icu::DateFormat::kMedium));
+  return TimeFormat(formatter.get(), time);
+}
+
+string16 TimeFormatShortDateNumeric(const Time& time) {
+  std::unique_ptr<icu::DateFormat> formatter(
+      icu::DateFormat::createDateInstance(icu::DateFormat::kShort));
+  return TimeFormat(formatter.get(), time);
+}
+
+string16 TimeFormatShortDateAndTime(const Time& time) {
+  std::unique_ptr<icu::DateFormat> formatter(
+      icu::DateFormat::createDateTimeInstance(icu::DateFormat::kShort));
+  return TimeFormat(formatter.get(), time);
+}
+
+string16 TimeFormatShortDateAndTimeWithTimeZone(const Time& time) {
+  std::unique_ptr<icu::DateFormat> formatter(
+      icu::DateFormat::createDateTimeInstance(icu::DateFormat::kShort,
+                                              icu::DateFormat::kLong));
+  return TimeFormat(formatter.get(), time);
+}
+
+string16 TimeFormatMonthAndYear(const Time& time) {
+  icu::SimpleDateFormat formatter =
+      CreateSimpleDateFormatter(DateFormatToString(DATE_FORMAT_YEAR_MONTH));
+  return TimeFormat(&formatter, time);
+}
+
+string16 TimeFormatFriendlyDateAndTime(const Time& time) {
+  std::unique_ptr<icu::DateFormat> formatter(
+      icu::DateFormat::createDateTimeInstance(icu::DateFormat::kFull));
+  return TimeFormat(formatter.get(), time);
+}
+
+string16 TimeFormatFriendlyDate(const Time& time) {
+  std::unique_ptr<icu::DateFormat> formatter(
+      icu::DateFormat::createDateInstance(icu::DateFormat::kFull));
+  return TimeFormat(formatter.get(), time);
+}
+
+string16 TimeFormatWithPattern(const Time& time, const char* pattern) {
+  icu::SimpleDateFormat formatter = CreateSimpleDateFormatter(pattern);
+  return TimeFormat(&formatter, time);
+}
+
+bool TimeDurationFormat(const TimeDelta time,
+                        const DurationFormatWidth width,
+                        string16* out) {
+  DCHECK(out);
+  UErrorCode status = U_ZERO_ERROR;
+  const int total_minutes = static_cast<int>(time.InSecondsF() / 60 + 0.5);
+  const int hours = total_minutes / 60;
+  const int minutes = total_minutes % 60;
+  UMeasureFormatWidth u_width = DurationWidthToMeasureWidth(width);
+
+  // TODO(derat): Delete the |status| checks and LOG(ERROR) calls throughout
+  // this function once the cause of http://crbug.com/677043 is tracked down.
+  const icu::Measure measures[] = {
+      icu::Measure(hours, icu::MeasureUnit::createHour(status), status),
+      icu::Measure(minutes, icu::MeasureUnit::createMinute(status), status)};
+  if (U_FAILURE(status)) {
+    LOG(ERROR) << "Creating MeasureUnit or Measure for " << hours << "h"
+               << minutes << "m failed: " << u_errorName(status);
+    return false;
+  }
+
+  icu::MeasureFormat measure_format(icu::Locale::getDefault(), u_width, status);
+  if (U_FAILURE(status)) {
+    LOG(ERROR) << "Creating MeasureFormat for "
+               << icu::Locale::getDefault().getName()
+               << " failed: " << u_errorName(status);
+    return false;
+  }
+
+  icu::UnicodeString formatted;
+  icu::FieldPosition ignore(icu::FieldPosition::DONT_CARE);
+  measure_format.formatMeasures(measures, 2, formatted, ignore, status);
+  if (U_FAILURE(status)) {
+    LOG(ERROR) << "formatMeasures failed: " << u_errorName(status);
+    return false;
+  }
+
+  *out = i18n::UnicodeStringToString16(formatted);
+  return true;
+}
+
+bool TimeDurationFormatWithSeconds(const TimeDelta time,
+                                   const DurationFormatWidth width,
+                                   string16* out) {
+  DCHECK(out);
+  UErrorCode status = U_ZERO_ERROR;
+  const int64_t total_seconds = static_cast<int>(time.InSecondsF() + 0.5);
+  const int hours = total_seconds / 3600;
+  const int minutes = (total_seconds - hours * 3600) / 60;
+  const int seconds = total_seconds % 60;
+  UMeasureFormatWidth u_width = DurationWidthToMeasureWidth(width);
+
+  const icu::Measure measures[] = {
+      icu::Measure(hours, icu::MeasureUnit::createHour(status), status),
+      icu::Measure(minutes, icu::MeasureUnit::createMinute(status), status),
+      icu::Measure(seconds, icu::MeasureUnit::createSecond(status), status)};
+  icu::MeasureFormat measure_format(icu::Locale::getDefault(), u_width, status);
+  icu::UnicodeString formatted;
+  icu::FieldPosition ignore(icu::FieldPosition::DONT_CARE);
+  measure_format.formatMeasures(measures, 3, formatted, ignore, status);
+  *out = i18n::UnicodeStringToString16(formatted);
+  return U_SUCCESS(status) == TRUE;
+}
+
+string16 DateIntervalFormat(const Time& begin_time,
+                            const Time& end_time,
+                            DateFormat format) {
+  UErrorCode status = U_ZERO_ERROR;
+
+  std::unique_ptr<icu::DateIntervalFormat> formatter(
+      icu::DateIntervalFormat::createInstance(DateFormatToString(format),
+                                              status));
+
+  icu::FieldPosition pos = 0;
+  UDate start_date = static_cast<UDate>(begin_time.ToDoubleT() * 1000);
+  UDate end_date = static_cast<UDate>(end_time.ToDoubleT() * 1000);
+  icu::DateInterval interval(start_date, end_date);
+  icu::UnicodeString formatted;
+  formatter->format(&interval, formatted, pos, status);
+  return i18n::UnicodeStringToString16(formatted);
+}
+
+HourClockType GetHourClockType() {
+  // TODO(satorux,jshin): Rework this with ures_getByKeyWithFallback()
+  // once it becomes public. The short time format can be found at
+  // "calendar/gregorian/DateTimePatterns/3" in the resources.
+  std::unique_ptr<icu::SimpleDateFormat> formatter(
+      static_cast<icu::SimpleDateFormat*>(
+          icu::DateFormat::createTimeInstance(icu::DateFormat::kShort)));
+  // Retrieve the short time format.
+  icu::UnicodeString pattern_unicode;
+  formatter->toPattern(pattern_unicode);
+
+  // Determine what hour clock type the current locale uses, by checking
+  // "a" (am/pm marker) in the short time format. This is reliable as "a"
+  // is used by all of 12-hour clock formats, but not any of 24-hour clock
+  // formats, as shown below.
+  //
+  // % grep -A4 DateTimePatterns third_party/icu/source/data/locales/*.txt |
+  //   grep -B1 -- -- |grep -v -- '--' |
+  //   perl -nle 'print $1 if /^\S+\s+"(.*)"/' |sort -u
+  //
+  // H.mm
+  // H:mm
+  // HH.mm
+  // HH:mm
+  // a h:mm
+  // ah:mm
+  // ahh:mm
+  // h-mm a
+  // h:mm a
+  // hh:mm a
+  //
+  // See http://userguide.icu-project.org/formatparse/datetime for details
+  // about the date/time format syntax.
+  if (pattern_unicode.indexOf('a') == -1) {
+    return k24HourClock;
+  } else {
+    return k12HourClock;
+  }
+}
+
+}  // namespace base
diff --git a/base/i18n/time_formatting.h b/base/i18n/time_formatting.h
new file mode 100644
index 0000000..41793b3
--- /dev/null
+++ b/base/i18n/time_formatting.h
@@ -0,0 +1,142 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Basic time formatting methods.  These methods use the current locale
+// formatting for displaying the time.
+
+#ifndef BASE_I18N_TIME_FORMATTING_H_
+#define BASE_I18N_TIME_FORMATTING_H_
+
+#include "base/compiler_specific.h"
+#include "base/i18n/base_i18n_export.h"
+#include "base/strings/string16.h"
+
+namespace base {
+
+class Time;
+class TimeDelta;
+
+// Argument type used to specify the hour clock type.
+enum HourClockType {
+  k12HourClock,  // Uses 1-12. e.g., "3:07 PM"
+  k24HourClock,  // Uses 0-23. e.g., "15:07"
+};
+
+// Argument type used to specify whether or not to include AM/PM sign.
+enum AmPmClockType {
+  kDropAmPm,  // Drops AM/PM sign. e.g., "3:07"
+  kKeepAmPm,  // Keeps AM/PM sign. e.g., "3:07 PM"
+};
+
+// Should match UMeasureFormatWidth in measfmt.h; replicated here to avoid
+// requiring third_party/icu dependencies with this file.
+enum DurationFormatWidth {
+  DURATION_WIDTH_WIDE,    // "3 hours, 7 minutes"
+  DURATION_WIDTH_SHORT,   // "3 hr, 7 min"
+  DURATION_WIDTH_NARROW,  // "3h 7m"
+  DURATION_WIDTH_NUMERIC  // "3:07"
+};
+
+// Date formats from third_party/icu/source/i18n/unicode/udat.h. Add more as
+// necessary.
+enum DateFormat {
+  // November 2007
+  DATE_FORMAT_YEAR_MONTH,
+  // Tuesday, 7 November
+  DATE_FORMAT_MONTH_WEEKDAY_DAY,
+};
+
+// TODO(derat@chromium.org): Update all of these functions to return boolean
+// "success" values and use out-params for formatted strings:
+// http://crbug.com/698802
+
+// Returns the time of day, e.g., "3:07 PM".
+BASE_I18N_EXPORT string16 TimeFormatTimeOfDay(const Time& time);
+
+// Returns the time of day in 24-hour clock format with millisecond accuracy,
+// e.g., "15:07:30.568"
+BASE_I18N_EXPORT string16 TimeFormatTimeOfDayWithMilliseconds(const Time& time);
+
+// Returns the time of day in the specified hour clock type. e.g.
+// "3:07 PM" (type == k12HourClock, ampm == kKeepAmPm).
+// "3:07"    (type == k12HourClock, ampm == kDropAmPm).
+// "15:07"   (type == k24HourClock).
+BASE_I18N_EXPORT string16 TimeFormatTimeOfDayWithHourClockType(
+    const Time& time,
+    HourClockType type,
+    AmPmClockType ampm);
+
+// Returns a shortened date, e.g. "Nov 7, 2007"
+BASE_I18N_EXPORT string16 TimeFormatShortDate(const Time& time);
+
+// Returns a numeric date such as 12/13/52.
+BASE_I18N_EXPORT string16 TimeFormatShortDateNumeric(const Time& time);
+
+// Returns a numeric date and time such as "12/13/52 2:44:30 PM".
+BASE_I18N_EXPORT string16 TimeFormatShortDateAndTime(const Time& time);
+
+// Returns a month and year, e.g. "November 2007"
+BASE_I18N_EXPORT string16 TimeFormatMonthAndYear(const Time& time);
+
+// Returns a numeric date and time with time zone such as
+// "12/13/52 2:44:30 PM PST".
+BASE_I18N_EXPORT string16
+TimeFormatShortDateAndTimeWithTimeZone(const Time& time);
+
+// Formats a time in a friendly sentence format, e.g.
+// "Monday, March 6, 2008 2:44:30 PM".
+BASE_I18N_EXPORT string16 TimeFormatFriendlyDateAndTime(const Time& time);
+
+// Formats a time in a friendly sentence format, e.g.
+// "Monday, March 6, 2008".
+BASE_I18N_EXPORT string16 TimeFormatFriendlyDate(const Time& time);
+
+// Formats a time using a skeleton to produce a format for different locales
+// when an unusual time format is needed, e.g. "Feb. 2, 18:00".
+//
+// See http://userguide.icu-project.org/formatparse/datetime for details.
+BASE_I18N_EXPORT string16 TimeFormatWithPattern(const Time& time,
+                                                const char* pattern);
+
+// Formats a time duration of hours and minutes into various formats, e.g.,
+// "3:07" or "3 hours, 7 minutes", and returns true on success. See
+// DurationFormatWidth for details.
+//
+// Please don't use width = DURATION_WIDTH_NUMERIC when the time duration
+// can possibly be larger than 24h, as the hour value will be cut below 24
+// after formatting.
+// TODO(chengx): fix function output when width = DURATION_WIDTH_NUMERIC
+// (http://crbug.com/675791)
+BASE_I18N_EXPORT bool TimeDurationFormat(const TimeDelta time,
+                                         const DurationFormatWidth width,
+                                         string16* out) WARN_UNUSED_RESULT;
+
+// Formats a time duration of hours, minutes and seconds into various formats,
+// e.g., "3:07:30" or "3 hours, 7 minutes, 30 seconds", and returns true on
+// success. See DurationFormatWidth for details.
+//
+// Please don't use width = DURATION_WIDTH_NUMERIC when the time duration
+// can possibly be larger than 24h, as the hour value will be cut below 24
+// after formatting.
+// TODO(chengx): fix function output when width = DURATION_WIDTH_NUMERIC
+// (http://crbug.com/675791)
+BASE_I18N_EXPORT bool TimeDurationFormatWithSeconds(
+    const TimeDelta time,
+    const DurationFormatWidth width,
+    string16* out) WARN_UNUSED_RESULT;
+
+// Formats a date interval into various formats, e.g. "2 December - 4 December"
+// or "March 2016 - December 2016". See DateFormat for details.
+BASE_I18N_EXPORT string16 DateIntervalFormat(const Time& begin_time,
+                                             const Time& end_time,
+                                             DateFormat format);
+
+// Gets the hour clock type of the current locale. e.g.
+// k12HourClock (en-US).
+// k24HourClock (en-GB).
+BASE_I18N_EXPORT HourClockType GetHourClockType();
+
+}  // namespace base
+
+#endif  // BASE_I18N_TIME_FORMATTING_H_
diff --git a/base/i18n/time_formatting_unittest.cc b/base/i18n/time_formatting_unittest.cc
new file mode 100644
index 0000000..027b7c9
--- /dev/null
+++ b/base/i18n/time_formatting_unittest.cc
@@ -0,0 +1,434 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/i18n/time_formatting.h"
+
+#include <memory>
+
+#include "base/i18n/rtl.h"
+#include "base/i18n/unicodestring.h"
+#include "base/strings/utf_string_conversions.h"
+#include "base/test/icu_test_util.h"
+#include "base/time/time.h"
+#include "testing/gtest/include/gtest/gtest.h"
+#include "third_party/icu/source/common/unicode/uversion.h"
+#include "third_party/icu/source/i18n/unicode/calendar.h"
+#include "third_party/icu/source/i18n/unicode/timezone.h"
+#include "third_party/icu/source/i18n/unicode/tzfmt.h"
+
+namespace base {
+namespace {
+
+const Time::Exploded kTestDateTimeExploded = {
+    2011, 4,  6, 30,  // Sat, Apr 30, 2011
+    22,   42, 7, 0    // 22:42:07.000 in UTC = 15:42:07 in US PDT.
+};
+
+// Returns difference between the local time and GMT formatted as string.
+// This function gets |time| because the difference depends on time,
+// see https://en.wikipedia.org/wiki/Daylight_saving_time for details.
+string16 GetShortTimeZone(const Time& time) {
+  UErrorCode status = U_ZERO_ERROR;
+  std::unique_ptr<icu::TimeZone> zone(icu::TimeZone::createDefault());
+  std::unique_ptr<icu::TimeZoneFormat> zone_formatter(
+      icu::TimeZoneFormat::createInstance(icu::Locale::getDefault(), status));
+  EXPECT_TRUE(U_SUCCESS(status));
+  icu::UnicodeString name;
+  zone_formatter->format(UTZFMT_STYLE_SPECIFIC_SHORT, *zone,
+                         static_cast<UDate>(time.ToDoubleT() * 1000),
+                         name, nullptr);
+  return i18n::UnicodeStringToString16(name);
+}
+
+// Calls TimeDurationFormat() with |delta| and |width| and returns the resulting
+// string. On failure, adds a failed expectation and returns an empty string.
+string16 TimeDurationFormatString(const TimeDelta& delta,
+                                  DurationFormatWidth width) {
+  string16 str;
+  EXPECT_TRUE(TimeDurationFormat(delta, width, &str))
+      << "Failed to format " << delta.ToInternalValue() << " with width "
+      << width;
+  return str;
+}
+
+// Calls TimeDurationFormatWithSeconds() with |delta| and |width| and returns
+// the resulting string. On failure, adds a failed expectation and returns an
+// empty string.
+string16 TimeDurationFormatWithSecondsString(const TimeDelta& delta,
+                                             DurationFormatWidth width) {
+  string16 str;
+  EXPECT_TRUE(TimeDurationFormatWithSeconds(delta, width, &str))
+      << "Failed to format " << delta.ToInternalValue() << " with width "
+      << width;
+  return str;
+}
+
+class ScopedRestoreDefaultTimezone {
+ public:
+  ScopedRestoreDefaultTimezone(const char* zoneid) {
+    original_zone_.reset(icu::TimeZone::createDefault());
+    icu::TimeZone::adoptDefault(icu::TimeZone::createTimeZone(zoneid));
+  }
+  ~ScopedRestoreDefaultTimezone() {
+    icu::TimeZone::adoptDefault(original_zone_.release());
+  }
+
+  ScopedRestoreDefaultTimezone(const ScopedRestoreDefaultTimezone&) = delete;
+  ScopedRestoreDefaultTimezone& operator=(const ScopedRestoreDefaultTimezone&) =
+      delete;
+
+ private:
+  std::unique_ptr<icu::TimeZone> original_zone_;
+};
+
+TEST(TimeFormattingTest, TimeFormatTimeOfDayDefault12h) {
+  // Test for a locale defaulted to 12h clock.
+  // As an instance, we use third_party/icu/source/data/locales/en.txt.
+  test::ScopedRestoreICUDefaultLocale restore_locale;
+  i18n::SetICUDefaultLocale("en_US");
+  ScopedRestoreDefaultTimezone la_time("America/Los_Angeles");
+
+  Time time;
+  EXPECT_TRUE(Time::FromUTCExploded(kTestDateTimeExploded, &time));
+  string16 clock24h(ASCIIToUTF16("15:42"));
+  string16 clock12h_pm(ASCIIToUTF16("3:42 PM"));
+  string16 clock12h(ASCIIToUTF16("3:42"));
+  string16 clock24h_millis(ASCIIToUTF16("15:42:07.000"));
+
+  // The default is 12h clock.
+  EXPECT_EQ(clock12h_pm, TimeFormatTimeOfDay(time));
+  EXPECT_EQ(clock24h_millis, TimeFormatTimeOfDayWithMilliseconds(time));
+  EXPECT_EQ(k12HourClock, GetHourClockType());
+  // k{Keep,Drop}AmPm should not affect for 24h clock.
+  EXPECT_EQ(clock24h,
+            TimeFormatTimeOfDayWithHourClockType(time,
+                                                 k24HourClock,
+                                                 kKeepAmPm));
+  EXPECT_EQ(clock24h,
+            TimeFormatTimeOfDayWithHourClockType(time,
+                                                 k24HourClock,
+                                                 kDropAmPm));
+  // k{Keep,Drop}AmPm affects for 12h clock.
+  EXPECT_EQ(clock12h_pm,
+            TimeFormatTimeOfDayWithHourClockType(time,
+                                                 k12HourClock,
+                                                 kKeepAmPm));
+  EXPECT_EQ(clock12h,
+            TimeFormatTimeOfDayWithHourClockType(time,
+                                                 k12HourClock,
+                                                 kDropAmPm));
+}
+
+TEST(TimeFormattingTest, TimeFormatTimeOfDayDefault24h) {
+  // Test for a locale defaulted to 24h clock.
+  // As an instance, we use third_party/icu/source/data/locales/en_GB.txt.
+  test::ScopedRestoreICUDefaultLocale restore_locale;
+  i18n::SetICUDefaultLocale("en_GB");
+  ScopedRestoreDefaultTimezone la_time("America/Los_Angeles");
+
+  Time time;
+  EXPECT_TRUE(Time::FromUTCExploded(kTestDateTimeExploded, &time));
+  string16 clock24h(ASCIIToUTF16("15:42"));
+  string16 clock12h_pm(ASCIIToUTF16("3:42 pm"));
+  string16 clock12h(ASCIIToUTF16("3:42"));
+  string16 clock24h_millis(ASCIIToUTF16("15:42:07.000"));
+
+  // The default is 24h clock.
+  EXPECT_EQ(clock24h, TimeFormatTimeOfDay(time));
+  EXPECT_EQ(clock24h_millis, TimeFormatTimeOfDayWithMilliseconds(time));
+  EXPECT_EQ(k24HourClock, GetHourClockType());
+  // k{Keep,Drop}AmPm should not affect for 24h clock.
+  EXPECT_EQ(clock24h,
+            TimeFormatTimeOfDayWithHourClockType(time,
+                                                 k24HourClock,
+                                                 kKeepAmPm));
+  EXPECT_EQ(clock24h,
+            TimeFormatTimeOfDayWithHourClockType(time,
+                                                 k24HourClock,
+                                                 kDropAmPm));
+  // k{Keep,Drop}AmPm affects for 12h clock.
+  EXPECT_EQ(clock12h_pm,
+            TimeFormatTimeOfDayWithHourClockType(time,
+                                                 k12HourClock,
+                                                 kKeepAmPm));
+  EXPECT_EQ(clock12h,
+            TimeFormatTimeOfDayWithHourClockType(time,
+                                                 k12HourClock,
+                                                 kDropAmPm));
+}
+
+TEST(TimeFormattingTest, TimeFormatTimeOfDayJP) {
+  // Test for a locale that uses different mark than "AM" and "PM".
+  // As an instance, we use third_party/icu/source/data/locales/ja.txt.
+  test::ScopedRestoreICUDefaultLocale restore_locale;
+  i18n::SetICUDefaultLocale("ja_JP");
+  ScopedRestoreDefaultTimezone la_time("America/Los_Angeles");
+
+  Time time;
+  EXPECT_TRUE(Time::FromUTCExploded(kTestDateTimeExploded, &time));
+  string16 clock24h(ASCIIToUTF16("15:42"));
+  string16 clock12h_pm(UTF8ToUTF16(u8"午後3:42"));
+  string16 clock12h(ASCIIToUTF16("3:42"));
+
+  // The default is 24h clock.
+  EXPECT_EQ(clock24h, TimeFormatTimeOfDay(time));
+  EXPECT_EQ(k24HourClock, GetHourClockType());
+  // k{Keep,Drop}AmPm should not affect for 24h clock.
+  EXPECT_EQ(clock24h, TimeFormatTimeOfDayWithHourClockType(time, k24HourClock,
+                                                           kKeepAmPm));
+  EXPECT_EQ(clock24h, TimeFormatTimeOfDayWithHourClockType(time, k24HourClock,
+                                                           kDropAmPm));
+  // k{Keep,Drop}AmPm affects for 12h clock.
+  EXPECT_EQ(clock12h_pm, TimeFormatTimeOfDayWithHourClockType(
+                             time, k12HourClock, kKeepAmPm));
+  EXPECT_EQ(clock12h, TimeFormatTimeOfDayWithHourClockType(time, k12HourClock,
+                                                           kDropAmPm));
+}
+
+TEST(TimeFormattingTest, TimeFormatTimeOfDayDE) {
+  // Test for a locale that uses different mark than "AM" and "PM".
+  // As an instance, we use third_party/icu/source/data/locales/de.txt.
+  test::ScopedRestoreICUDefaultLocale restore_locale;
+  i18n::SetICUDefaultLocale("de");
+  ScopedRestoreDefaultTimezone la_time("America/Los_Angeles");
+
+  Time time;
+  EXPECT_TRUE(Time::FromUTCExploded(kTestDateTimeExploded, &time));
+  string16 clock24h(ASCIIToUTF16("15:42"));
+  string16 clock12h_pm(UTF8ToUTF16("3:42 nachm."));
+  string16 clock12h(ASCIIToUTF16("3:42"));
+
+  // The default is 24h clock.
+  EXPECT_EQ(clock24h, TimeFormatTimeOfDay(time));
+  EXPECT_EQ(k24HourClock, GetHourClockType());
+  // k{Keep,Drop}AmPm should not affect for 24h clock.
+  EXPECT_EQ(clock24h,
+            TimeFormatTimeOfDayWithHourClockType(time,
+                                                 k24HourClock,
+                                                 kKeepAmPm));
+  EXPECT_EQ(clock24h,
+            TimeFormatTimeOfDayWithHourClockType(time,
+                                                 k24HourClock,
+                                                 kDropAmPm));
+  // k{Keep,Drop}AmPm affects for 12h clock.
+  EXPECT_EQ(clock12h_pm,
+            TimeFormatTimeOfDayWithHourClockType(time,
+                                                 k12HourClock,
+                                                 kKeepAmPm));
+  EXPECT_EQ(clock12h,
+            TimeFormatTimeOfDayWithHourClockType(time,
+                                                 k12HourClock,
+                                                 kDropAmPm));
+}
+
+TEST(TimeFormattingTest, TimeFormatDateUS) {
+  // See third_party/icu/source/data/locales/en.txt.
+  // The date patterns are "EEEE, MMMM d, y", "MMM d, y", and "M/d/yy".
+  test::ScopedRestoreICUDefaultLocale restore_locale;
+  i18n::SetICUDefaultLocale("en_US");
+  ScopedRestoreDefaultTimezone la_time("America/Los_Angeles");
+
+  Time time;
+  EXPECT_TRUE(Time::FromUTCExploded(kTestDateTimeExploded, &time));
+
+  EXPECT_EQ(ASCIIToUTF16("Apr 30, 2011"), TimeFormatShortDate(time));
+  EXPECT_EQ(ASCIIToUTF16("4/30/11"), TimeFormatShortDateNumeric(time));
+
+  EXPECT_EQ(ASCIIToUTF16("4/30/11, 3:42:07 PM"),
+            TimeFormatShortDateAndTime(time));
+  EXPECT_EQ(ASCIIToUTF16("4/30/11, 3:42:07 PM ") + GetShortTimeZone(time),
+            TimeFormatShortDateAndTimeWithTimeZone(time));
+
+  EXPECT_EQ(ASCIIToUTF16("April 2011"), TimeFormatMonthAndYear(time));
+
+  EXPECT_EQ(ASCIIToUTF16("Saturday, April 30, 2011 at 3:42:07 PM"),
+            TimeFormatFriendlyDateAndTime(time));
+
+  EXPECT_EQ(ASCIIToUTF16("Saturday, April 30, 2011"),
+            TimeFormatFriendlyDate(time));
+}
+
+TEST(TimeFormattingTest, TimeFormatDateGB) {
+  // See third_party/icu/source/data/locales/en_GB.txt.
+  // The date patterns are "EEEE, d MMMM y", "d MMM y", and "dd/MM/yyyy".
+  test::ScopedRestoreICUDefaultLocale restore_locale;
+  i18n::SetICUDefaultLocale("en_GB");
+  ScopedRestoreDefaultTimezone la_time("America/Los_Angeles");
+
+  Time time;
+  EXPECT_TRUE(Time::FromUTCExploded(kTestDateTimeExploded, &time));
+
+  EXPECT_EQ(ASCIIToUTF16("30 Apr 2011"), TimeFormatShortDate(time));
+  EXPECT_EQ(ASCIIToUTF16("30/04/2011"), TimeFormatShortDateNumeric(time));
+  EXPECT_EQ(ASCIIToUTF16("30/04/2011, 15:42:07"),
+            TimeFormatShortDateAndTime(time));
+  EXPECT_EQ(ASCIIToUTF16("30/04/2011, 15:42:07 ") + GetShortTimeZone(time),
+            TimeFormatShortDateAndTimeWithTimeZone(time));
+  EXPECT_EQ(ASCIIToUTF16("April 2011"), TimeFormatMonthAndYear(time));
+  EXPECT_EQ(ASCIIToUTF16("Saturday, 30 April 2011 at 15:42:07"),
+            TimeFormatFriendlyDateAndTime(time));
+  EXPECT_EQ(ASCIIToUTF16("Saturday, 30 April 2011"),
+            TimeFormatFriendlyDate(time));
+}
+
+TEST(TimeFormattingTest, TimeFormatWithPattern) {
+  test::ScopedRestoreICUDefaultLocale restore_locale;
+  ScopedRestoreDefaultTimezone la_time("America/Los_Angeles");
+
+  Time time;
+  EXPECT_TRUE(Time::FromUTCExploded(kTestDateTimeExploded, &time));
+
+  i18n::SetICUDefaultLocale("en_US");
+  EXPECT_EQ(ASCIIToUTF16("Apr 30, 2011"), TimeFormatWithPattern(time, "yMMMd"));
+  EXPECT_EQ(ASCIIToUTF16("April 30, 3:42:07 PM"),
+            TimeFormatWithPattern(time, "MMMMdjmmss"));
+
+  i18n::SetICUDefaultLocale("en_GB");
+  EXPECT_EQ(ASCIIToUTF16("30 Apr 2011"), TimeFormatWithPattern(time, "yMMMd"));
+  EXPECT_EQ(ASCIIToUTF16("30 April, 15:42:07"),
+            TimeFormatWithPattern(time, "MMMMdjmmss"));
+
+  i18n::SetICUDefaultLocale("ja_JP");
+  EXPECT_EQ(UTF8ToUTF16(u8"2011年4月30日"),
+            TimeFormatWithPattern(time, "yMMMd"));
+  EXPECT_EQ(UTF8ToUTF16(u8"4月30日 15:42:07"),
+            TimeFormatWithPattern(time, "MMMMdjmmss"));
+}
+
+TEST(TimeFormattingTest, TimeDurationFormat) {
+  test::ScopedRestoreICUDefaultLocale restore_locale;
+  TimeDelta delta = TimeDelta::FromMinutes(15 * 60 + 42);
+
+  // US English.
+  i18n::SetICUDefaultLocale("en_US");
+  EXPECT_EQ(ASCIIToUTF16("15 hours, 42 minutes"),
+            TimeDurationFormatString(delta, DURATION_WIDTH_WIDE));
+  EXPECT_EQ(ASCIIToUTF16("15 hr, 42 min"),
+            TimeDurationFormatString(delta, DURATION_WIDTH_SHORT));
+  EXPECT_EQ(ASCIIToUTF16("15h 42m"),
+            TimeDurationFormatString(delta, DURATION_WIDTH_NARROW));
+  EXPECT_EQ(ASCIIToUTF16("15:42"),
+            TimeDurationFormatString(delta, DURATION_WIDTH_NUMERIC));
+
+  // Danish, with Latin alphabet but different abbreviations and punctuation.
+  i18n::SetICUDefaultLocale("da");
+  EXPECT_EQ(ASCIIToUTF16("15 timer og 42 minutter"),
+            TimeDurationFormatString(delta, DURATION_WIDTH_WIDE));
+  EXPECT_EQ(ASCIIToUTF16("15 t og 42 min."),
+            TimeDurationFormatString(delta, DURATION_WIDTH_SHORT));
+  EXPECT_EQ(ASCIIToUTF16("15 t og 42 min"),
+            TimeDurationFormatString(delta, DURATION_WIDTH_NARROW));
+  EXPECT_EQ(ASCIIToUTF16("15.42"),
+            TimeDurationFormatString(delta, DURATION_WIDTH_NUMERIC));
+
+  // Persian, with non-Arabic numbers.
+  i18n::SetICUDefaultLocale("fa");
+  string16 fa_wide = UTF8ToUTF16(
+      u8"\u06f1\u06f5 \u0633\u0627\u0639\u062a \u0648 \u06f4\u06f2 \u062f\u0642"
+      u8"\u06cc\u0642\u0647");
+  string16 fa_short = UTF8ToUTF16(
+      u8"\u06f1\u06f5 \u0633\u0627\u0639\u062a\u060c\u200f \u06f4\u06f2 \u062f"
+      u8"\u0642\u06cc\u0642\u0647");
+  string16 fa_narrow = UTF8ToUTF16(
+      u8"\u06f1\u06f5 \u0633\u0627\u0639\u062a \u06f4\u06f2 \u062f\u0642\u06cc"
+      u8"\u0642\u0647");
+  string16 fa_numeric = UTF8ToUTF16(u8"\u06f1\u06f5:\u06f4\u06f2");
+  EXPECT_EQ(fa_wide, TimeDurationFormatString(delta, DURATION_WIDTH_WIDE));
+  EXPECT_EQ(fa_short, TimeDurationFormatString(delta, DURATION_WIDTH_SHORT));
+  EXPECT_EQ(fa_narrow, TimeDurationFormatString(delta, DURATION_WIDTH_NARROW));
+  EXPECT_EQ(fa_numeric,
+            TimeDurationFormatString(delta, DURATION_WIDTH_NUMERIC));
+}
+
+TEST(TimeFormattingTest, TimeDurationFormatWithSeconds) {
+  test::ScopedRestoreICUDefaultLocale restore_locale;
+
+  // US English.
+  i18n::SetICUDefaultLocale("en_US");
+
+  // Test different formats.
+  TimeDelta delta = TimeDelta::FromSeconds(15 * 3600 + 42 * 60 + 30);
+  EXPECT_EQ(ASCIIToUTF16("15 hours, 42 minutes, 30 seconds"),
+            TimeDurationFormatWithSecondsString(delta, DURATION_WIDTH_WIDE));
+  EXPECT_EQ(ASCIIToUTF16("15 hr, 42 min, 30 sec"),
+            TimeDurationFormatWithSecondsString(delta, DURATION_WIDTH_SHORT));
+  EXPECT_EQ(ASCIIToUTF16("15h 42m 30s"),
+            TimeDurationFormatWithSecondsString(delta, DURATION_WIDTH_NARROW));
+  EXPECT_EQ(ASCIIToUTF16("15:42:30"),
+            TimeDurationFormatWithSecondsString(delta, DURATION_WIDTH_NUMERIC));
+
+  // Test edge case when hour >= 100.
+  delta = TimeDelta::FromSeconds(125 * 3600 + 42 * 60 + 30);
+  EXPECT_EQ(ASCIIToUTF16("125 hours, 42 minutes, 30 seconds"),
+            TimeDurationFormatWithSecondsString(delta, DURATION_WIDTH_WIDE));
+  EXPECT_EQ(ASCIIToUTF16("125 hr, 42 min, 30 sec"),
+            TimeDurationFormatWithSecondsString(delta, DURATION_WIDTH_SHORT));
+  EXPECT_EQ(ASCIIToUTF16("125h 42m 30s"),
+            TimeDurationFormatWithSecondsString(delta, DURATION_WIDTH_NARROW));
+
+  // Test edge case when minute = 0.
+  delta = TimeDelta::FromSeconds(15 * 3600 + 0 * 60 + 30);
+  EXPECT_EQ(ASCIIToUTF16("15 hours, 0 minutes, 30 seconds"),
+            TimeDurationFormatWithSecondsString(delta, DURATION_WIDTH_WIDE));
+  EXPECT_EQ(ASCIIToUTF16("15 hr, 0 min, 30 sec"),
+            TimeDurationFormatWithSecondsString(delta, DURATION_WIDTH_SHORT));
+  EXPECT_EQ(ASCIIToUTF16("15h 0m 30s"),
+            TimeDurationFormatWithSecondsString(delta, DURATION_WIDTH_NARROW));
+  EXPECT_EQ(ASCIIToUTF16("15:00:30"),
+            TimeDurationFormatWithSecondsString(delta, DURATION_WIDTH_NUMERIC));
+
+  // Test edge case when second = 0.
+  delta = TimeDelta::FromSeconds(15 * 3600 + 42 * 60 + 0);
+  EXPECT_EQ(ASCIIToUTF16("15 hours, 42 minutes, 0 seconds"),
+            TimeDurationFormatWithSecondsString(delta, DURATION_WIDTH_WIDE));
+  EXPECT_EQ(ASCIIToUTF16("15 hr, 42 min, 0 sec"),
+            TimeDurationFormatWithSecondsString(delta, DURATION_WIDTH_SHORT));
+  EXPECT_EQ(ASCIIToUTF16("15h 42m 0s"),
+            TimeDurationFormatWithSecondsString(delta, DURATION_WIDTH_NARROW));
+  EXPECT_EQ(ASCIIToUTF16("15:42:00"),
+            TimeDurationFormatWithSecondsString(delta, DURATION_WIDTH_NUMERIC));
+}
+
+TEST(TimeFormattingTest, TimeIntervalFormat) {
+  test::ScopedRestoreICUDefaultLocale restore_locale;
+  i18n::SetICUDefaultLocale("en_US");
+  ScopedRestoreDefaultTimezone la_time("America/Los_Angeles");
+
+  const Time::Exploded kTestIntervalEndTimeExploded = {
+      2011, 5,  6, 28,  // Sat, May 28, 2012
+      22,   42, 7, 0    // 22:42:07.000
+  };
+
+  Time begin_time;
+  EXPECT_TRUE(Time::FromUTCExploded(kTestDateTimeExploded, &begin_time));
+  Time end_time;
+  EXPECT_TRUE(Time::FromUTCExploded(kTestIntervalEndTimeExploded, &end_time));
+
+  EXPECT_EQ(
+      UTF8ToUTF16(u8"Saturday, April 30 – Saturday, May 28"),
+      DateIntervalFormat(begin_time, end_time, DATE_FORMAT_MONTH_WEEKDAY_DAY));
+
+  const Time::Exploded kTestIntervalBeginTimeExploded = {
+      2011, 5,  1, 16,  // Mon, May 16, 2012
+      22,   42, 7, 0    // 22:42:07.000
+  };
+  EXPECT_TRUE(
+      Time::FromUTCExploded(kTestIntervalBeginTimeExploded, &begin_time));
+  EXPECT_EQ(
+      UTF8ToUTF16(u8"Monday, May 16 – Saturday, May 28"),
+      DateIntervalFormat(begin_time, end_time, DATE_FORMAT_MONTH_WEEKDAY_DAY));
+
+  i18n::SetICUDefaultLocale("en_GB");
+  EXPECT_EQ(
+      UTF8ToUTF16(u8"Monday 16 – Saturday 28 May"),
+      DateIntervalFormat(begin_time, end_time, DATE_FORMAT_MONTH_WEEKDAY_DAY));
+
+  i18n::SetICUDefaultLocale("ja");
+  EXPECT_EQ(
+      UTF8ToUTF16(u8"5月16日(月曜日)~28日(土曜日)"),
+      DateIntervalFormat(begin_time, end_time, DATE_FORMAT_MONTH_WEEKDAY_DAY));
+}
+
+}  // namespace
+}  // namespace base
diff --git a/base/i18n/timezone.cc b/base/i18n/timezone.cc
new file mode 100644
index 0000000..8624e07
--- /dev/null
+++ b/base/i18n/timezone.cc
@@ -0,0 +1,34 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/i18n/timezone.h"
+
+#include <memory>
+#include <string>
+
+#include "third_party/icu/source/common/unicode/unistr.h"
+#include "third_party/icu/source/i18n/unicode/timezone.h"
+
+namespace base {
+
+std::string CountryCodeForCurrentTimezone() {
+  std::unique_ptr<icu::TimeZone> zone(icu::TimeZone::createDefault());
+  icu::UnicodeString id;
+  // ICU returns '001' (world) for Etc/GMT. Preserve the old behavior
+  // only for Etc/GMT while returning an empty string for Etc/UTC and
+  // Etc/UCT because they're less likely to be chosen by mistake in UK in
+  // place of Europe/London (Briitish Time).
+  if (zone->getID(id) == UNICODE_STRING_SIMPLE("Etc/GMT"))
+    return "GB";
+  char region_code[4];
+  UErrorCode status = U_ZERO_ERROR;
+  int length = zone->getRegion(id, region_code, 4, status);
+  // Return an empty string if region_code is a 3-digit numeric code such
+  // as 001 (World) for Etc/UTC, Etc/UCT.
+  return (U_SUCCESS(status) && length == 2)
+             ? std::string(region_code, static_cast<size_t>(length))
+             : std::string();
+}
+
+}  // namespace base
diff --git a/base/i18n/timezone.h b/base/i18n/timezone.h
new file mode 100644
index 0000000..7557d44
--- /dev/null
+++ b/base/i18n/timezone.h
@@ -0,0 +1,24 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_I18N_TIMEZONE_H_
+#define BASE_I18N_TIMEZONE_H_
+
+#include <string>
+
+#include "base/i18n/base_i18n_export.h"
+
+namespace base {
+
+// Checks the system timezone and turns it into a two-character ISO 3166 country
+// code. This may fail (for example, it used to always fail on Android), in
+// which case it will return an empty string. It'll also return an empty string
+// when the timezone is Etc/UTC or Etc/UCT, but will return 'GB" for Etc/GMT
+// because people in the UK tends to select Etc/GMT by mistake instead of
+// Europe/London (British Time).
+BASE_I18N_EXPORT std::string CountryCodeForCurrentTimezone();
+
+}  // namespace base
+
+#endif  // BASE_I18N_TIMEZONE_H_
diff --git a/base/i18n/timezone_unittest.cc b/base/i18n/timezone_unittest.cc
new file mode 100644
index 0000000..57467dc
--- /dev/null
+++ b/base/i18n/timezone_unittest.cc
@@ -0,0 +1,27 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/i18n/timezone.h"
+
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+namespace {
+
+TEST(TimezoneTest, CountryCodeForCurrentTimezone) {
+  std::string country_code = CountryCodeForCurrentTimezone();
+  // On some systems (such as Android or some flavors of Linux), ICU may come up
+  // empty. With https://chromium-review.googlesource.com/c/512282/ , ICU will
+  // not fail any more. See also http://bugs.icu-project.org/trac/ticket/13208 .
+  // Even with that, ICU returns '001' (world) for region-agnostic timezones
+  // such as Etc/UTC and |CountryCodeForCurrentTimezone| returns an empty
+  // string so that the next fallback can be tried by a customer.
+  // TODO(jshin): Revise this to test for actual timezones using
+  // use ScopedRestoreICUDefaultTimezone.
+  if (!country_code.empty())
+    EXPECT_EQ(2U, country_code.size()) << "country_code = " << country_code;
+}
+
+}  // namespace
+}  // namespace base
diff --git a/base/i18n/unicodestring.h b/base/i18n/unicodestring.h
new file mode 100644
index 0000000..b62c526
--- /dev/null
+++ b/base/i18n/unicodestring.h
@@ -0,0 +1,32 @@
+// Copyright (c) 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_I18N_UNICODESTRING_H_
+#define BASE_I18N_UNICODESTRING_H_
+
+#include "base/strings/string16.h"
+#include "third_party/icu/source/common/unicode/unistr.h"
+#include "third_party/icu/source/common/unicode/uvernum.h"
+
+#if U_ICU_VERSION_MAJOR_NUM >= 59
+#include "third_party/icu/source/common/unicode/char16ptr.h"
+#endif
+
+namespace base {
+namespace i18n {
+
+inline string16 UnicodeStringToString16(const icu::UnicodeString& unistr) {
+#if U_ICU_VERSION_MAJOR_NUM >= 59
+  return base::string16(icu::toUCharPtr(unistr.getBuffer()),
+                        static_cast<size_t>(unistr.length()));
+#else
+  return base::string16(unistr.getBuffer(),
+                        static_cast<size_t>(unistr.length()));
+#endif
+}
+
+}  // namespace i18n
+}  // namespace base
+
+#endif  // BASE_UNICODESTRING_H_
diff --git a/base/i18n/utf8_validator_tables.cc b/base/i18n/utf8_validator_tables.cc
new file mode 100644
index 0000000..913afc7
--- /dev/null
+++ b/base/i18n/utf8_validator_tables.cc
@@ -0,0 +1,55 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file is auto-generated by build_utf8_validator_tables.
+// DO NOT EDIT.
+
+#include "base/i18n/utf8_validator_tables.h"
+
+namespace base {
+namespace internal {
+
+const uint8_t kUtf8ValidatorTables[] = {
+    // State 0, offset 0x00
+    0x00, 0x81, 0x81, 0x81, 0x81, 0x81, 0x81, 0x81,  // 0x08
+    0x81, 0x81, 0x81, 0x81, 0x81, 0x81, 0x81, 0x81,  // 0x10
+    0x81, 0x81, 0x81, 0x81, 0x81, 0x81, 0x81, 0x81,  // 0x18
+    0x81, 0x81, 0x81, 0x81, 0x81, 0x81, 0x81, 0x81,  // 0x20
+    0x81, 0x81, 0x81, 0x81, 0x81, 0x81, 0x81, 0x81,  // 0x28
+    0x81, 0x81, 0x81, 0x81, 0x81, 0x81, 0x81, 0x81,  // 0x30
+    0x81, 0x81, 0x81, 0x81, 0x81, 0x81, 0x81, 0x81,  // 0x38
+    0x81, 0x81, 0x81, 0x81, 0x81, 0x81, 0x81, 0x81,  // 0x40
+    0x81, 0x81, 0x81, 0x83, 0x83, 0x83, 0x83, 0x83,  // 0x48
+    0x83, 0x83, 0x83, 0x83, 0x83, 0x83, 0x83, 0x83,  // 0x50
+    0x83, 0x83, 0x83, 0x83, 0x83, 0x83, 0x83, 0x83,  // 0x58
+    0x83, 0x83, 0x83, 0x83, 0x83, 0x83, 0x83, 0x83,  // 0x60
+    0x83, 0x86, 0x8b, 0x8b, 0x8b, 0x8b, 0x8b, 0x8b,  // 0x68
+    0x8b, 0x8b, 0x8b, 0x8b, 0x8b, 0x8b, 0x8e, 0x8b,  // 0x70
+    0x8b, 0x93, 0x9c, 0x9c, 0x9c, 0x9f, 0x81, 0x81,  // 0x78
+    0x81, 0x81, 0x81, 0x81, 0x81, 0x81, 0x81, 0x81,  // 0x80
+    0x81,                                            // 0x81
+    // State 1, offset 0x81
+    0x07, 0x81,                                      // 0x83
+    // State 2, offset 0x83
+    0x06, 0x00, 0x81,                                // 0x86
+    // State 3, offset 0x86
+    0x05, 0x81, 0x83, 0x81, 0x81,                    // 0x8b
+    // State 4, offset 0x8b
+    0x06, 0x83, 0x81,                                // 0x8e
+    // State 5, offset 0x8e
+    0x05, 0x83, 0x81, 0x81, 0x81,                    // 0x93
+    // State 6, offset 0x93
+    0x04, 0x81, 0x8b, 0x8b, 0x8b, 0x81, 0x81, 0x81,  // 0x9b
+    0x81,                                            // 0x9c
+    // State 7, offset 0x9c
+    0x06, 0x8b, 0x81,                                // 0x9f
+    // State 8, offset 0x9f
+    0x04, 0x8b, 0x81, 0x81, 0x81, 0x81, 0x81, 0x81,  // 0xa7
+    0x81,                                            // 0xa8
+};
+
+const size_t kUtf8ValidatorTablesSize = arraysize(kUtf8ValidatorTables);
+
+}  // namespace internal
+}  // namespace base
diff --git a/base/i18n/utf8_validator_tables.h b/base/i18n/utf8_validator_tables.h
new file mode 100644
index 0000000..939616b
--- /dev/null
+++ b/base/i18n/utf8_validator_tables.h
@@ -0,0 +1,32 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_I18N_UTF8_VALIDATOR_TABLES_H_
+#define BASE_I18N_UTF8_VALIDATOR_TABLES_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include "base/macros.h"
+
+namespace base {
+namespace internal {
+
+// The tables for all states; a list of entries of the form (right_shift,
+// next_state, next_state, ....). The right_shifts are used to reduce the
+// overall size of the table. The table only covers bytes in the range
+// [0x80, 0xFF] to save space.
+extern const uint8_t kUtf8ValidatorTables[];
+
+extern const size_t kUtf8ValidatorTablesSize;
+
+// The offset of the INVALID state in kUtf8ValidatorTables.
+enum {
+  I18N_UTF8_VALIDATOR_INVALID_INDEX = 129
+};
+
+}  // namespace internal
+}  // namespace base
+
+#endif  // BASE_I18N_UTF8_VALIDATOR_TABLES_H_
diff --git a/base/ios/OWNERS b/base/ios/OWNERS
new file mode 100644
index 0000000..bdb59ec
--- /dev/null
+++ b/base/ios/OWNERS
@@ -0,0 +1,3 @@
+eugenebut@chromium.org
+rohitrao@chromium.org
+sdefresne@chromium.org
diff --git a/base/ios/block_types.h b/base/ios/block_types.h
new file mode 100644
index 0000000..e4dde79
--- /dev/null
+++ b/base/ios/block_types.h
@@ -0,0 +1,14 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_IOS_BLOCK_TYPES_H_
+#define BASE_IOS_BLOCK_TYPES_H_
+
+// A generic procedural block type that takes no arguments and returns nothing.
+typedef void (^ProceduralBlock)(void);
+
+// A block that takes no arguments and returns a bool.
+typedef bool (^ConditionBlock)(void);
+
+#endif  // BASE_IOS_BLOCK_TYPES_H_
diff --git a/base/ios/crb_protocol_observers.h b/base/ios/crb_protocol_observers.h
new file mode 100644
index 0000000..8ff5878
--- /dev/null
+++ b/base/ios/crb_protocol_observers.h
@@ -0,0 +1,43 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_IOS_CRB_PROTOCOL_OBSERVERS_H_
+#define BASE_IOS_CRB_PROTOCOL_OBSERVERS_H_
+
+#import <Foundation/Foundation.h>
+
+typedef void (^ExecutionWithObserverBlock)(id);
+
+// Implements a container for observers that implement a specific Objective-C
+// protocol. The container forwards method invocations to its contained
+// observers, so that sending a message to all the observers is as simple as
+// sending the message to the container.
+// It is safe for an observer to remove itself or another observer while being
+// notified. It is also safe to add an other observer while being notified but
+// the newly added observer will not be notified as part of the current
+// notification dispatch.
+@interface CRBProtocolObservers : NSObject
+
+// The Objective-C protocol that the observers in this container conform to.
+@property(nonatomic, readonly) Protocol* protocol;
+
+// Returns a CRBProtocolObservers container for observers that conform to
+// |protocol|.
++ (instancetype)observersWithProtocol:(Protocol*)protocol;
+
+// Adds |observer| to this container.
+- (void)addObserver:(id)observer;
+
+// Remove |observer| from this container.
+- (void)removeObserver:(id)observer;
+
+// Returns true if there are currently no observers.
+- (BOOL)empty;
+
+// Executes callback on every observer. |callback| cannot be nil.
+- (void)executeOnObservers:(ExecutionWithObserverBlock)callback;
+
+@end
+
+#endif  // BASE_IOS_CRB_PROTOCOL_OBSERVERS_H_
diff --git a/base/ios/crb_protocol_observers.mm b/base/ios/crb_protocol_observers.mm
new file mode 100644
index 0000000..1a3b9f7
--- /dev/null
+++ b/base/ios/crb_protocol_observers.mm
@@ -0,0 +1,191 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#import "base/ios/crb_protocol_observers.h"
+
+#include <objc/runtime.h>
+#include <stddef.h>
+#include <algorithm>
+#include <vector>
+
+#include "base/logging.h"
+#include "base/mac/scoped_nsobject.h"
+#include "base/stl_util.h"
+
+@interface CRBProtocolObservers () {
+  base::scoped_nsobject<Protocol> _protocol;
+  // ivars declared here are private to the implementation but must be
+  // public for allowing the C++ |Iterator| class access to those ivars.
+ @public
+  // vector of weak pointers to observers.
+  std::vector<__unsafe_unretained id> _observers;
+  // The nested level of observer iteration.
+  // A depth of 0 means nobody is currently iterating on the list of observers.
+  int _invocationDepth;
+}
+
+// Removes nil observers from the list and is called when the
+// |_invocationDepth| reaches 0.
+- (void)compact;
+
+@end
+
+namespace {
+
+class Iterator {
+ public:
+  explicit Iterator(CRBProtocolObservers* protocol_observers);
+  ~Iterator();
+  id GetNext();
+
+ private:
+  CRBProtocolObservers* protocol_observers_;
+  size_t index_;
+  size_t max_index_;
+};
+
+Iterator::Iterator(CRBProtocolObservers* protocol_observers)
+    : protocol_observers_(protocol_observers),
+      index_(0),
+      max_index_(protocol_observers->_observers.size()) {
+  DCHECK(protocol_observers_);
+  ++protocol_observers->_invocationDepth;
+}
+
+Iterator::~Iterator() {
+  if (protocol_observers_ && --protocol_observers_->_invocationDepth == 0)
+    [protocol_observers_ compact];
+}
+
+id Iterator::GetNext() {
+  if (!protocol_observers_)
+    return nil;
+  auto& observers = protocol_observers_->_observers;
+  // Skip nil elements.
+  size_t max_index = std::min(max_index_, observers.size());
+  while (index_ < max_index && !observers[index_])
+    ++index_;
+  return index_ < max_index ? observers[index_++] : nil;
+}
+}
+
+@interface CRBProtocolObservers ()
+
+// Designated initializer.
+- (id)initWithProtocol:(Protocol*)protocol;
+
+@end
+
+@implementation CRBProtocolObservers
+
++ (instancetype)observersWithProtocol:(Protocol*)protocol {
+  return [[[self alloc] initWithProtocol:protocol] autorelease];
+}
+
+- (id)init {
+  NOTREACHED();
+  return nil;
+}
+
+- (id)initWithProtocol:(Protocol*)protocol {
+  self = [super init];
+  if (self) {
+    _protocol.reset([protocol retain]);
+  }
+  return self;
+}
+
+- (Protocol*)protocol {
+  return _protocol.get();
+}
+
+- (void)addObserver:(id)observer {
+  DCHECK(observer);
+  DCHECK([observer conformsToProtocol:self.protocol]);
+
+  if (base::ContainsValue(_observers, observer))
+    return;
+
+  _observers.push_back(observer);
+}
+
+- (void)removeObserver:(id)observer {
+  DCHECK(observer);
+  auto it = std::find(_observers.begin(), _observers.end(), observer);
+  if (it != _observers.end()) {
+    if (_invocationDepth)
+      *it = nil;
+    else
+      _observers.erase(it);
+  }
+}
+
+- (BOOL)empty {
+  int count = 0;
+  for (id observer : _observers) {
+    if (observer != nil)
+      ++count;
+  }
+  return count == 0;
+}
+
+#pragma mark - NSObject
+
+- (NSMethodSignature*)methodSignatureForSelector:(SEL)selector {
+  NSMethodSignature* signature = [super methodSignatureForSelector:selector];
+  if (signature)
+    return signature;
+
+  // Look for a required method in the protocol. protocol_getMethodDescription
+  // returns a struct whose fields are null if a method for the selector was
+  // not found.
+  struct objc_method_description description =
+      protocol_getMethodDescription(self.protocol, selector, YES, YES);
+  if (description.types)
+    return [NSMethodSignature signatureWithObjCTypes:description.types];
+
+  // Look for an optional method in the protocol.
+  description = protocol_getMethodDescription(self.protocol, selector, NO, YES);
+  if (description.types)
+    return [NSMethodSignature signatureWithObjCTypes:description.types];
+
+  // There is neither a required nor optional method with this selector in the
+  // protocol, so invoke -[NSObject doesNotRecognizeSelector:] to raise
+  // NSInvalidArgumentException.
+  [self doesNotRecognizeSelector:selector];
+  return nil;
+}
+
+- (void)forwardInvocation:(NSInvocation*)invocation {
+  DCHECK(invocation);
+  if (_observers.empty())
+    return;
+  SEL selector = [invocation selector];
+  Iterator it(self);
+  id observer;
+  while ((observer = it.GetNext()) != nil) {
+    if ([observer respondsToSelector:selector])
+      [invocation invokeWithTarget:observer];
+  }
+}
+
+- (void)executeOnObservers:(ExecutionWithObserverBlock)callback {
+  DCHECK(callback);
+  if (_observers.empty())
+    return;
+  Iterator it(self);
+  id observer;
+  while ((observer = it.GetNext()) != nil)
+    callback(observer);
+}
+
+#pragma mark - Private
+
+- (void)compact {
+  DCHECK(!_invocationDepth);
+  _observers.erase(std::remove(_observers.begin(), _observers.end(), nil),
+                   _observers.end());
+}
+
+@end
diff --git a/base/ios/crb_protocol_observers_unittest.mm b/base/ios/crb_protocol_observers_unittest.mm
new file mode 100644
index 0000000..07f5cff
--- /dev/null
+++ b/base/ios/crb_protocol_observers_unittest.mm
@@ -0,0 +1,290 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#import "base/ios/crb_protocol_observers.h"
+#include "base/ios/weak_nsobject.h"
+#include "base/logging.h"
+#include "base/mac/scoped_nsautorelease_pool.h"
+#include "base/mac/scoped_nsobject.h"
+#include "testing/gtest/include/gtest/gtest.h"
+#include "testing/gtest_mac.h"
+#include "testing/platform_test.h"
+
+@protocol TestObserver
+
+@required
+- (void)requiredMethod;
+- (void)reset;
+
+@optional
+- (void)optionalMethod;
+- (void)mutateByAddingObserver:(id<TestObserver>)observer;
+- (void)mutateByRemovingObserver:(id<TestObserver>)observer;
+- (void)nestedMutateByAddingObserver:(id<TestObserver>)observer;
+- (void)nestedMutateByRemovingObserver:(id<TestObserver>)observer;
+
+@end
+
+// Implements only the required methods in the TestObserver protocol.
+@interface TestPartialObserver : NSObject<TestObserver>
+@property(nonatomic, readonly) BOOL requiredMethodInvoked;
+@end
+
+// Implements all the methods in the TestObserver protocol.
+@interface TestCompleteObserver : TestPartialObserver<TestObserver>
+@property(nonatomic, readonly) BOOL optionalMethodInvoked;
+@end
+
+@interface TestMutateObserver : TestCompleteObserver
+- (instancetype)initWithObserver:(CRBProtocolObservers*)observer
+    NS_DESIGNATED_INITIALIZER;
+- (instancetype)init NS_UNAVAILABLE;
+@end
+
+namespace {
+
+class CRBProtocolObserversTest : public PlatformTest {
+ public:
+  CRBProtocolObserversTest() {}
+
+ protected:
+  void SetUp() override {
+    PlatformTest::SetUp();
+
+    observers_.reset([[CRBProtocolObservers observersWithProtocol:
+        @protocol(TestObserver)] retain]);
+
+    partial_observer_.reset([[TestPartialObserver alloc] init]);
+    EXPECT_FALSE([partial_observer_ requiredMethodInvoked]);
+
+    complete_observer_.reset([[TestCompleteObserver alloc] init]);
+    EXPECT_FALSE([complete_observer_ requiredMethodInvoked]);
+    EXPECT_FALSE([complete_observer_ optionalMethodInvoked]);
+
+    mutate_observer_.reset(
+        [[TestMutateObserver alloc] initWithObserver:observers_.get()]);
+    EXPECT_FALSE([mutate_observer_ requiredMethodInvoked]);
+  }
+
+  base::scoped_nsobject<id> observers_;
+  base::scoped_nsobject<TestPartialObserver> partial_observer_;
+  base::scoped_nsobject<TestCompleteObserver> complete_observer_;
+  base::scoped_nsobject<TestMutateObserver> mutate_observer_;
+};
+
+// Verifies basic functionality of -[CRBProtocolObservers addObserver:] and
+// -[CRBProtocolObservers removeObserver:].
+TEST_F(CRBProtocolObserversTest, AddRemoveObserver) {
+  // Add an observer and verify that the CRBProtocolObservers instance forwards
+  // an invocation to it.
+  [observers_ addObserver:partial_observer_];
+  [observers_ requiredMethod];
+  EXPECT_TRUE([partial_observer_ requiredMethodInvoked]);
+
+  [partial_observer_ reset];
+  EXPECT_FALSE([partial_observer_ requiredMethodInvoked]);
+
+  // Remove the observer and verify that the CRBProtocolObservers instance no
+  // longer forwards an invocation to it.
+  [observers_ removeObserver:partial_observer_];
+  [observers_ requiredMethod];
+  EXPECT_FALSE([partial_observer_ requiredMethodInvoked]);
+}
+
+// Verifies that CRBProtocolObservers correctly forwards the invocation of a
+// required method in the protocol.
+TEST_F(CRBProtocolObserversTest, RequiredMethods) {
+  [observers_ addObserver:partial_observer_];
+  [observers_ addObserver:complete_observer_];
+  [observers_ requiredMethod];
+  EXPECT_TRUE([partial_observer_ requiredMethodInvoked]);
+  EXPECT_TRUE([complete_observer_ requiredMethodInvoked]);
+}
+
+// Verifies that CRBProtocolObservers correctly forwards the invocation of an
+// optional method in the protocol.
+TEST_F(CRBProtocolObserversTest, OptionalMethods) {
+  [observers_ addObserver:partial_observer_];
+  [observers_ addObserver:complete_observer_];
+  [observers_ optionalMethod];
+  EXPECT_FALSE([partial_observer_ requiredMethodInvoked]);
+  EXPECT_FALSE([complete_observer_ requiredMethodInvoked]);
+  EXPECT_TRUE([complete_observer_ optionalMethodInvoked]);
+}
+
+// Verifies that CRBProtocolObservers only holds a weak reference to an
+// observer.
+TEST_F(CRBProtocolObserversTest, WeakReference) {
+  base::WeakNSObject<TestPartialObserver> weak_observer(
+      partial_observer_);
+  EXPECT_TRUE(weak_observer);
+
+  [observers_ addObserver:partial_observer_];
+
+  {
+    // Need an autorelease pool here, because
+    // -[CRBProtocolObservers forwardInvocation:] creates a temporary
+    // autoreleased array that holds all the observers.
+    base::mac::ScopedNSAutoreleasePool pool;
+    [observers_ requiredMethod];
+    EXPECT_TRUE([partial_observer_ requiredMethodInvoked]);
+  }
+
+  partial_observer_.reset();
+  EXPECT_FALSE(weak_observer.get());
+}
+
+// Verifies that an observer can safely remove itself as observer while being
+// notified.
+TEST_F(CRBProtocolObserversTest, SelfMutateObservers) {
+  [observers_ addObserver:mutate_observer_];
+  EXPECT_FALSE([observers_ empty]);
+
+  [observers_ requiredMethod];
+  EXPECT_TRUE([mutate_observer_ requiredMethodInvoked]);
+
+  [mutate_observer_ reset];
+
+  [observers_ nestedMutateByRemovingObserver:mutate_observer_];
+  EXPECT_FALSE([mutate_observer_ requiredMethodInvoked]);
+
+  [observers_ addObserver:partial_observer_];
+
+  [observers_ requiredMethod];
+  EXPECT_FALSE([mutate_observer_ requiredMethodInvoked]);
+  EXPECT_TRUE([partial_observer_ requiredMethodInvoked]);
+
+  [observers_ removeObserver:partial_observer_];
+  EXPECT_TRUE([observers_ empty]);
+}
+
+// Verifies that - [CRBProtocolObservers addObserver:] and
+// - [CRBProtocolObservers removeObserver:] can be called while methods are
+// being forwarded.
+TEST_F(CRBProtocolObserversTest, MutateObservers) {
+  // Indirectly add an observer while forwarding an observer method.
+  [observers_ addObserver:mutate_observer_];
+
+  [observers_ mutateByAddingObserver:partial_observer_];
+  EXPECT_FALSE([partial_observer_ requiredMethodInvoked]);
+
+  // Check that methods are correctly forwared to the indirectly added observer.
+  [mutate_observer_ reset];
+  [observers_ requiredMethod];
+  EXPECT_TRUE([mutate_observer_ requiredMethodInvoked]);
+  EXPECT_TRUE([partial_observer_ requiredMethodInvoked]);
+
+  [mutate_observer_ reset];
+  [partial_observer_ reset];
+
+  // Indirectly remove an observer while forwarding an observer method.
+  [observers_ mutateByRemovingObserver:partial_observer_];
+
+  // Check that method is not forwared to the indirectly removed observer.
+  [observers_ requiredMethod];
+  EXPECT_TRUE([mutate_observer_ requiredMethodInvoked]);
+  EXPECT_FALSE([partial_observer_ requiredMethodInvoked]);
+}
+
+// Verifies that - [CRBProtocolObservers addObserver:] and
+// - [CRBProtocolObservers removeObserver:] can be called while methods are
+// being forwarded with a nested invocation depth > 0.
+TEST_F(CRBProtocolObserversTest, NestedMutateObservers) {
+  // Indirectly add an observer while forwarding an observer method.
+  [observers_ addObserver:mutate_observer_];
+
+  [observers_ nestedMutateByAddingObserver:partial_observer_];
+  EXPECT_FALSE([partial_observer_ requiredMethodInvoked]);
+
+  // Check that methods are correctly forwared to the indirectly added observer.
+  [mutate_observer_ reset];
+  [observers_ requiredMethod];
+  EXPECT_TRUE([mutate_observer_ requiredMethodInvoked]);
+  EXPECT_TRUE([partial_observer_ requiredMethodInvoked]);
+
+  [mutate_observer_ reset];
+  [partial_observer_ reset];
+
+  // Indirectly remove an observer while forwarding an observer method.
+  [observers_ nestedMutateByRemovingObserver:partial_observer_];
+
+  // Check that method is not forwared to the indirectly removed observer.
+  [observers_ requiredMethod];
+  EXPECT_TRUE([mutate_observer_ requiredMethodInvoked]);
+  EXPECT_FALSE([partial_observer_ requiredMethodInvoked]);
+}
+
+}  // namespace
+
+@implementation TestPartialObserver {
+  BOOL _requiredMethodInvoked;
+}
+
+- (BOOL)requiredMethodInvoked {
+  return _requiredMethodInvoked;
+}
+
+- (void)requiredMethod {
+  _requiredMethodInvoked = YES;
+}
+
+- (void)reset {
+  _requiredMethodInvoked = NO;
+}
+
+@end
+
+@implementation TestCompleteObserver {
+  BOOL _optionalMethodInvoked;
+}
+
+- (BOOL)optionalMethodInvoked {
+  return _optionalMethodInvoked;
+}
+
+- (void)optionalMethod {
+  _optionalMethodInvoked = YES;
+}
+
+- (void)reset {
+  [super reset];
+  _optionalMethodInvoked = NO;
+}
+
+@end
+
+@implementation TestMutateObserver {
+  id _observers;  // weak
+}
+
+- (instancetype)initWithObserver:(CRBProtocolObservers*)observers {
+  self = [super init];
+  if (self) {
+    _observers = observers;
+  }
+  return self;
+}
+
+- (instancetype)init {
+  NOTREACHED();
+  return nil;
+}
+
+- (void)mutateByAddingObserver:(id<TestObserver>)observer {
+  [_observers addObserver:observer];
+}
+
+- (void)mutateByRemovingObserver:(id<TestObserver>)observer {
+  [_observers removeObserver:observer];
+}
+
+- (void)nestedMutateByAddingObserver:(id<TestObserver>)observer {
+  [_observers mutateByAddingObserver:observer];
+}
+
+- (void)nestedMutateByRemovingObserver:(id<TestObserver>)observer {
+  [_observers mutateByRemovingObserver:observer];
+}
+
+@end
diff --git a/base/ios/device_util.h b/base/ios/device_util.h
new file mode 100644
index 0000000..b1bed5c
--- /dev/null
+++ b/base/ios/device_util.h
@@ -0,0 +1,86 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_IOS_DEVICE_UTIL_H_
+#define BASE_IOS_DEVICE_UTIL_H_
+
+#include <stdint.h>
+
+#include <string>
+
+namespace ios {
+namespace device_util {
+
+// Returns the hardware version of the device the app is running on.
+//
+// The returned string is the string returned by sysctlbyname() with name
+// "hw.machine". Possible (known) values include:
+//
+// iPhone1,1 -> iPhone 1G
+// iPhone1,2 -> iPhone 3G
+// iPhone2,1 -> iPhone 3GS
+// iPhone3,1 -> iPhone 4/AT&T
+// iPhone3,2 -> iPhone 4/Other Carrier?
+// iPhone3,3 -> iPhone 4/Other Carrier?
+// iPhone4,1 -> iPhone 4S
+//
+// iPod1,1   -> iPod touch 1G
+// iPod2,1   -> iPod touch 2G
+// iPod2,2   -> ?
+// iPod3,1   -> iPod touch 3G
+// iPod4,1   -> iPod touch 4G
+// iPod5,1   -> ?
+//
+// iPad1,1   -> iPad 1G, WiFi
+// iPad1,?   -> iPad 1G, 3G <- needs 3G owner to test
+// iPad2,1   -> iPad 2G, WiFi
+//
+// AppleTV2,1 -> AppleTV 2
+//
+// i386       -> Simulator
+// x86_64     -> Simulator
+std::string GetPlatform();
+
+// Returns true if the application is running on a device with 512MB or more
+// RAM.
+bool RamIsAtLeast512Mb();
+
+// Returns true if the application is running on a device with 1024MB or more
+// RAM.
+bool RamIsAtLeast1024Mb();
+
+// Returns true if the application is running on a device with |ram_in_mb| MB or
+// more RAM.
+// Use with caution! Actual RAM reported by devices is less than the commonly
+// used powers-of-two values. For example, a 512MB device may report only 502MB
+// RAM. The convenience methods above should be used in most cases because they
+// correctly handle this issue.
+bool RamIsAtLeast(uint64_t ram_in_mb);
+
+// Returns true if the device has only one core.
+bool IsSingleCoreDevice();
+
+// Returns the MAC address of the interface with name |interface_name|.
+std::string GetMacAddress(const std::string& interface_name);
+
+// Returns a random UUID.
+std::string GetRandomId();
+
+// Returns an identifier for the device, using the given |salt|. A global
+// identifier is generated the first time this method is called, and the salt
+// is used to be able to generate distinct identifiers for the same device. If
+// |salt| is NULL, a default value is used. Unless you are using this value for
+// something that should be anonymous, you should probably pass NULL.
+std::string GetDeviceIdentifier(const char* salt);
+
+// Returns a hashed version of |in_string| using |salt| (which must not be
+// zero-length). Different salt values should result in differently hashed
+// strings.
+std::string GetSaltedString(const std::string& in_string,
+                            const std::string& salt);
+
+}  // namespace device_util
+}  // namespace ios
+
+#endif  // BASE_IOS_DEVICE_UTIL_H_
diff --git a/base/ios/device_util.mm b/base/ios/device_util.mm
new file mode 100644
index 0000000..5ec1e69
--- /dev/null
+++ b/base/ios/device_util.mm
@@ -0,0 +1,178 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/ios/device_util.h"
+
+#include <CommonCrypto/CommonDigest.h>
+#import <UIKit/UIKit.h>
+#include <ifaddrs.h>
+#include <net/if_dl.h>
+#include <stddef.h>
+#include <string.h>
+#include <sys/socket.h>
+#include <sys/sysctl.h>
+
+#include <memory>
+
+#include "base/logging.h"
+#include "base/mac/scoped_cftyperef.h"
+#include "base/strings/string_util.h"
+#include "base/strings/stringprintf.h"
+#include "base/strings/sys_string_conversions.h"
+
+namespace {
+
+// Client ID key in the user preferences.
+NSString* const kLegacyClientIdPreferenceKey = @"ChromiumClientID";
+NSString* const kClientIdPreferenceKey = @"ChromeClientID";
+// Current hardware type. This is used to detect that a device has been backed
+// up and restored to another device, and allows regenerating a new device id.
+NSString* const kHardwareTypePreferenceKey = @"ClientIDGenerationHardwareType";
+// Default salt for device ids.
+const char kDefaultSalt[] = "Salt";
+// Zero UUID returned on buggy iOS devices.
+NSString* const kZeroUUID = @"00000000-0000-0000-0000-000000000000";
+
+NSString* GenerateClientId() {
+  NSUserDefaults* defaults = [NSUserDefaults standardUserDefaults];
+
+  // Try to migrate from legacy client id.
+  NSString* client_id = [defaults stringForKey:kLegacyClientIdPreferenceKey];
+
+  // Some iOS6 devices return a buggy identifierForVendor:
+  // http://openradar.appspot.com/12377282. If this is the case, revert to
+  // generating a new one.
+  if (!client_id || [client_id isEqualToString:kZeroUUID]) {
+    client_id = [[[UIDevice currentDevice] identifierForVendor] UUIDString];
+    if ([client_id isEqualToString:kZeroUUID])
+      client_id = base::SysUTF8ToNSString(ios::device_util::GetRandomId());
+  }
+  return client_id;
+}
+
+}  // namespace
+
+namespace ios {
+namespace device_util {
+
+std::string GetPlatform() {
+  std::string platform;
+  size_t size = 0;
+  sysctlbyname("hw.machine", NULL, &size, NULL, 0);
+  sysctlbyname("hw.machine", base::WriteInto(&platform, size), &size, NULL, 0);
+  return platform;
+}
+
+bool RamIsAtLeast512Mb() {
+  // 512MB devices report anywhere from 502-504 MB, use 450 MB just to be safe.
+  return RamIsAtLeast(450);
+}
+
+bool RamIsAtLeast1024Mb() {
+  // 1GB devices report anywhere from 975-999 MB, use 900 MB just to be safe.
+  return RamIsAtLeast(900);
+}
+
+bool RamIsAtLeast(uint64_t ram_in_mb) {
+  uint64_t memory_size = 0;
+  size_t size = sizeof(memory_size);
+  if (sysctlbyname("hw.memsize", &memory_size, &size, NULL, 0) == 0) {
+    // Anything >= 500M, call high ram.
+    return memory_size >= ram_in_mb * 1024 * 1024;
+  }
+  return false;
+}
+
+bool IsSingleCoreDevice() {
+  uint64_t cpu_number = 0;
+  size_t sizes = sizeof(cpu_number);
+  sysctlbyname("hw.physicalcpu", &cpu_number, &sizes, NULL, 0);
+  return cpu_number == 1;
+}
+
+std::string GetMacAddress(const std::string& interface_name) {
+  std::string mac_string;
+  struct ifaddrs* addresses;
+  if (getifaddrs(&addresses) == 0) {
+    for (struct ifaddrs* address = addresses; address;
+         address = address->ifa_next) {
+      if ((address->ifa_addr->sa_family == AF_LINK) &&
+          strcmp(interface_name.c_str(), address->ifa_name) == 0) {
+        const struct sockaddr_dl* found_address_struct =
+            reinterpret_cast<const struct sockaddr_dl*>(address->ifa_addr);
+
+        // |found_address_struct->sdl_data| contains the interface name followed
+        // by the interface address. The address part can be accessed based on
+        // the length of the name, that is, |found_address_struct->sdl_nlen|.
+        const unsigned char* found_address =
+            reinterpret_cast<const unsigned char*>(
+                &found_address_struct->sdl_data[
+                    found_address_struct->sdl_nlen]);
+
+        int found_address_length = found_address_struct->sdl_alen;
+        for (int i = 0; i < found_address_length; ++i) {
+          if (i != 0)
+            mac_string.push_back(':');
+          base::StringAppendF(&mac_string, "%02X", found_address[i]);
+        }
+        break;
+      }
+    }
+    freeifaddrs(addresses);
+  }
+  return mac_string;
+}
+
+std::string GetRandomId() {
+  base::ScopedCFTypeRef<CFUUIDRef> uuid_object(
+      CFUUIDCreate(kCFAllocatorDefault));
+  base::ScopedCFTypeRef<CFStringRef> uuid_string(
+      CFUUIDCreateString(kCFAllocatorDefault, uuid_object));
+  return base::SysCFStringRefToUTF8(uuid_string);
+}
+
+std::string GetDeviceIdentifier(const char* salt) {
+  NSUserDefaults* defaults = [NSUserDefaults standardUserDefaults];
+
+  NSString* last_seen_hardware =
+      [defaults stringForKey:kHardwareTypePreferenceKey];
+  NSString* current_hardware = base::SysUTF8ToNSString(GetPlatform());
+  if (!last_seen_hardware) {
+    last_seen_hardware = current_hardware;
+    [defaults setObject:current_hardware forKey:kHardwareTypePreferenceKey];
+    [defaults synchronize];
+  }
+
+  NSString* client_id = [defaults stringForKey:kClientIdPreferenceKey];
+
+  if (!client_id || ![last_seen_hardware isEqualToString:current_hardware]) {
+    client_id = GenerateClientId();
+    [defaults setObject:client_id forKey:kClientIdPreferenceKey];
+    [defaults setObject:current_hardware forKey:kHardwareTypePreferenceKey];
+    [defaults synchronize];
+  }
+
+  return GetSaltedString(base::SysNSStringToUTF8(client_id),
+                         salt ? salt : kDefaultSalt);
+}
+
+std::string GetSaltedString(const std::string& in_string,
+                            const std::string& salt) {
+  DCHECK(salt.length());
+  NSData* hash_data = [base::SysUTF8ToNSString(in_string + salt)
+      dataUsingEncoding:NSUTF8StringEncoding];
+
+  unsigned char hash[CC_SHA256_DIGEST_LENGTH];
+  CC_SHA256([hash_data bytes], [hash_data length], hash);
+  CFUUIDBytes* uuid_bytes = reinterpret_cast<CFUUIDBytes*>(hash);
+
+  base::ScopedCFTypeRef<CFUUIDRef> uuid_object(
+      CFUUIDCreateFromUUIDBytes(kCFAllocatorDefault, *uuid_bytes));
+  base::ScopedCFTypeRef<CFStringRef> device_id(
+      CFUUIDCreateString(kCFAllocatorDefault, uuid_object));
+  return base::SysCFStringRefToUTF8(device_id);
+}
+
+}  // namespace device_util
+}  // namespace ios
diff --git a/base/ios/device_util_unittest.mm b/base/ios/device_util_unittest.mm
new file mode 100644
index 0000000..82d4217
--- /dev/null
+++ b/base/ios/device_util_unittest.mm
@@ -0,0 +1,143 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#import <UIKit/UIKit.h>
+
+#include "base/ios/device_util.h"
+#include "base/strings/sys_string_conversions.h"
+#include "testing/gtest/include/gtest/gtest.h"
+#include "testing/gtest_mac.h"
+#include "testing/platform_test.h"
+
+namespace {
+// The behavior of most of these utility functions depends on what they are run
+// on, so there is not much to unittest them. The APIs are run to make sure they
+// don't choke. Additional checks are added for particular APIs when needed.
+
+typedef PlatformTest DeviceUtilTest;
+
+void CleanNSUserDefaultsForDeviceId() {
+  NSUserDefaults* defaults = [NSUserDefaults standardUserDefaults];
+  [defaults removeObjectForKey:@"ChromeClientID"];
+  [defaults removeObjectForKey:@"ChromiumClientID"];
+  [defaults removeObjectForKey:@"ClientIDGenerationHardwareType"];
+  [defaults synchronize];
+}
+
+TEST_F(DeviceUtilTest, GetPlatform) {
+  GTEST_ASSERT_GT(ios::device_util::GetPlatform().length(), 0U);
+}
+
+TEST_F(DeviceUtilTest, IsSingleCoreDevice) {
+  ios::device_util::IsSingleCoreDevice();
+}
+
+TEST_F(DeviceUtilTest, GetMacAddress) {
+  GTEST_ASSERT_GT(ios::device_util::GetMacAddress("en0").length(), 0U);
+}
+
+TEST_F(DeviceUtilTest, GetRandomId) {
+  GTEST_ASSERT_GT(ios::device_util::GetRandomId().length(), 0U);
+}
+
+TEST_F(DeviceUtilTest, GetDeviceIdentifier) {
+  CleanNSUserDefaultsForDeviceId();
+
+  std::string default_id = ios::device_util::GetDeviceIdentifier(NULL);
+  std::string other_id = ios::device_util::GetDeviceIdentifier("ForTest");
+  EXPECT_NE(default_id, other_id);
+
+  CleanNSUserDefaultsForDeviceId();
+
+  std::string new_default_id = ios::device_util::GetDeviceIdentifier(NULL);
+  if (![[[[UIDevice currentDevice] identifierForVendor] UUIDString]
+          isEqualToString:@"00000000-0000-0000-0000-000000000000"]) {
+    EXPECT_EQ(default_id, new_default_id);
+  } else {
+    EXPECT_NE(default_id, new_default_id);
+  }
+
+  CleanNSUserDefaultsForDeviceId();
+}
+
+TEST_F(DeviceUtilTest, CheckMigration) {
+  CleanNSUserDefaultsForDeviceId();
+
+  NSUserDefaults* defaults = [NSUserDefaults standardUserDefaults];
+  [defaults setObject:@"10000000-0000-0000-0000-000000000000"
+               forKey:@"ChromeClientID"];
+  [defaults synchronize];
+  std::string expected_id = ios::device_util::GetDeviceIdentifier(NULL);
+  [defaults removeObjectForKey:@"ChromeClientID"];
+  [defaults setObject:@"10000000-0000-0000-0000-000000000000"
+               forKey:@"ChromiumClientID"];
+  [defaults synchronize];
+  std::string new_id = ios::device_util::GetDeviceIdentifier(NULL);
+  EXPECT_EQ(expected_id, new_id);
+
+  CleanNSUserDefaultsForDeviceId();
+}
+
+TEST_F(DeviceUtilTest, CheckMigrationFromZero) {
+  CleanNSUserDefaultsForDeviceId();
+
+  NSUserDefaults* defaults = [NSUserDefaults standardUserDefaults];
+  [defaults setObject:@"00000000-0000-0000-0000-000000000000"
+               forKey:@"ChromeClientID"];
+  [defaults synchronize];
+  std::string zero_id = ios::device_util::GetDeviceIdentifier(NULL);
+  [defaults removeObjectForKey:@"ChromeClientID"];
+  [defaults setObject:@"00000000-0000-0000-0000-000000000000"
+               forKey:@"ChromiumClientID"];
+  [defaults synchronize];
+  std::string new_id = ios::device_util::GetDeviceIdentifier(NULL);
+  EXPECT_NE(zero_id, new_id);
+
+  CleanNSUserDefaultsForDeviceId();
+}
+
+TEST_F(DeviceUtilTest, GetSaltedStringEquals) {
+  std::string string1("The quick brown fox jumps over the lazy dog");
+  std::string string2("The quick brown fox jumps over the lazy dog");
+  std::string salt("salt");
+  // Same string and same salt should result in the same salted string.
+  EXPECT_EQ(ios::device_util::GetSaltedString(string1, salt),
+            ios::device_util::GetSaltedString(string2, salt));
+}
+
+TEST_F(DeviceUtilTest, GetSaltedStringNotEquals) {
+  std::string string1("The quick brown fox jumps over the lazy dog");
+  std::string string2("The lazy brown fox jumps over the quick dog");
+  std::string salt("salt");
+  // Different string and same salt should result in different salted strings.
+  EXPECT_NE(ios::device_util::GetSaltedString(string1, salt),
+            ios::device_util::GetSaltedString(string2, salt));
+}
+
+TEST_F(DeviceUtilTest, GetSaltedStringDifferentSalt) {
+  std::string string1("The quick brown fox jumps over the lazy dog");
+  std::string salt1("salt");
+  std::string salt2("pepper");
+  // Same string with different salt should result in different salted strings.
+  EXPECT_NE(ios::device_util::GetSaltedString(string1, salt1),
+            ios::device_util::GetSaltedString(string1, salt2));
+}
+
+TEST_F(DeviceUtilTest, CheckDeviceMigration) {
+  CleanNSUserDefaultsForDeviceId();
+
+  NSUserDefaults* defaults = [NSUserDefaults standardUserDefaults];
+  [defaults setObject:@"10000000-0000-0000-0000-000000000000"
+               forKey:@"ChromeClientID"];
+  [defaults synchronize];
+  std::string base_id = ios::device_util::GetDeviceIdentifier(NULL);
+  [defaults setObject:@"Foo" forKey:@"ClientIDGenerationHardwareType"];
+  [defaults synchronize];
+  std::string new_id = ios::device_util::GetDeviceIdentifier(NULL);
+  EXPECT_NE(new_id, base_id);
+
+  CleanNSUserDefaultsForDeviceId();
+}
+
+}  // namespace
diff --git a/base/ios/ios_util.h b/base/ios/ios_util.h
new file mode 100644
index 0000000..2464b1c
--- /dev/null
+++ b/base/ios/ios_util.h
@@ -0,0 +1,45 @@
+// Copyright 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_IOS_IOS_UTIL_H_
+#define BASE_IOS_IOS_UTIL_H_
+
+#include <stdint.h>
+
+#include "base/base_export.h"
+#include "base/files/file_path.h"
+
+namespace base {
+namespace ios {
+
+// Returns whether the operating system is iOS 10 or later.
+BASE_EXPORT bool IsRunningOnIOS10OrLater();
+
+// Returns whether the operating system is iOS 11 or later.
+BASE_EXPORT bool IsRunningOnIOS11OrLater();
+
+// Returns whether the operating system is at the given version or later.
+BASE_EXPORT bool IsRunningOnOrLater(int32_t major,
+                                    int32_t minor,
+                                    int32_t bug_fix);
+
+// Returns whether iOS is signalling that an RTL text direction should be used
+// regardless of the current locale. This should not return true if the current
+// language is a "real" RTL language such as Arabic or Urdu; it should only
+// return true in cases where the RTL text direction has been forced (for
+// example by using the "RTL Psuedolanguage" option when launching from XCode).
+BASE_EXPORT bool IsInForcedRTL();
+
+// Stores the |path| of the ICU dat file in a global to be referenced later by
+// FilePathOfICUFile().  This should only be called once.
+BASE_EXPORT void OverridePathOfEmbeddedICU(const char* path);
+
+// Returns the overriden path set by OverridePathOfEmbeddedICU(), otherwise
+// returns invalid FilePath.
+BASE_EXPORT FilePath FilePathOfEmbeddedICU();
+
+}  // namespace ios
+}  // namespace base
+
+#endif  // BASE_IOS_IOS_UTIL_H_
diff --git a/base/ios/ios_util.mm b/base/ios/ios_util.mm
new file mode 100644
index 0000000..2402d30
--- /dev/null
+++ b/base/ios/ios_util.mm
@@ -0,0 +1,69 @@
+// Copyright 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/ios/ios_util.h"
+
+#import <Foundation/Foundation.h>
+#include <stddef.h>
+
+#include "base/macros.h"
+#include "base/sys_info.h"
+
+namespace {
+
+// Return a 3 elements array containing the major, minor and bug fix version of
+// the OS.
+const int32_t* OSVersionAsArray() {
+  int32_t* digits = new int32_t[3];
+  base::SysInfo::OperatingSystemVersionNumbers(
+      &digits[0], &digits[1], &digits[2]);
+  return digits;
+}
+
+std::string* g_icudtl_path_override = nullptr;
+
+}  // namespace
+
+namespace base {
+namespace ios {
+
+bool IsRunningOnIOS10OrLater() {
+  static const bool is_running_on_or_later = IsRunningOnOrLater(10, 0, 0);
+  return is_running_on_or_later;
+}
+
+bool IsRunningOnIOS11OrLater() {
+  static const bool is_running_on_or_later = IsRunningOnOrLater(11, 0, 0);
+  return is_running_on_or_later;
+}
+
+bool IsRunningOnOrLater(int32_t major, int32_t minor, int32_t bug_fix) {
+  static const int32_t* current_version = OSVersionAsArray();
+  int32_t version[] = {major, minor, bug_fix};
+  for (size_t i = 0; i < arraysize(version); i++) {
+    if (current_version[i] != version[i])
+      return current_version[i] > version[i];
+  }
+  return true;
+}
+
+bool IsInForcedRTL() {
+  NSUserDefaults* defaults = [NSUserDefaults standardUserDefaults];
+  return [defaults boolForKey:@"NSForceRightToLeftWritingDirection"];
+}
+
+void OverridePathOfEmbeddedICU(const char* path) {
+  DCHECK(!g_icudtl_path_override);
+  g_icudtl_path_override = new std::string(path);
+}
+
+FilePath FilePathOfEmbeddedICU() {
+  if (g_icudtl_path_override) {
+    return FilePath(*g_icudtl_path_override);
+  }
+  return FilePath();
+}
+
+}  // namespace ios
+}  // namespace base
diff --git a/base/ios/ns_error_util.h b/base/ios/ns_error_util.h
new file mode 100644
index 0000000..1012292
--- /dev/null
+++ b/base/ios/ns_error_util.h
@@ -0,0 +1,25 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_IOS_NS_ERROR_UTIL_H_
+#define BASE_IOS_NS_ERROR_UTIL_H_
+
+@class NSError;
+
+namespace base {
+namespace ios {
+
+// Iterates through |error|'s underlying errors and returns the first error for
+// which there is no underlying error.
+NSError* GetFinalUnderlyingErrorFromError(NSError* error);
+
+// Returns a copy of |original_error| with |underlying_error| appended to the
+// end of its underlying error chain.
+NSError* ErrorWithAppendedUnderlyingError(NSError* original_error,
+                                          NSError* underlying_error);
+
+}  // namespace ios
+}  // namespace base
+
+#endif  // BASE_IOS_NS_ERROR_UTIL_H_
diff --git a/base/ios/ns_error_util.mm b/base/ios/ns_error_util.mm
new file mode 100644
index 0000000..c44d9ee
--- /dev/null
+++ b/base/ios/ns_error_util.mm
@@ -0,0 +1,53 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#import "base/ios/ns_error_util.h"
+
+#import <Foundation/Foundation.h>
+
+#include "base/logging.h"
+#include "base/mac/scoped_nsobject.h"
+
+namespace base {
+namespace ios {
+
+namespace {
+// Iterates through |error|'s underlying errors and returns them in an array.
+NSArray* GetFullErrorChainForError(NSError* error) {
+  NSMutableArray* error_chain = [NSMutableArray array];
+  NSError* current_error = error;
+  while (current_error) {
+    DCHECK([current_error isKindOfClass:[NSError class]]);
+    [error_chain addObject:current_error];
+    current_error = current_error.userInfo[NSUnderlyingErrorKey];
+  }
+  return error_chain;
+}
+}  // namespace
+
+NSError* GetFinalUnderlyingErrorFromError(NSError* error) {
+  DCHECK(error);
+  return [GetFullErrorChainForError(error) lastObject];
+}
+
+NSError* ErrorWithAppendedUnderlyingError(NSError* original_error,
+                                          NSError* underlying_error) {
+  DCHECK(original_error);
+  DCHECK(underlying_error);
+  NSArray* error_chain = GetFullErrorChainForError(original_error);
+  NSError* current_error = underlying_error;
+  for (NSInteger idx = error_chain.count - 1; idx >= 0; --idx) {
+    NSError* error = error_chain[idx];
+    scoped_nsobject<NSMutableDictionary> user_info(
+        [error.userInfo mutableCopy]);
+    [user_info setObject:current_error forKey:NSUnderlyingErrorKey];
+    current_error = [NSError errorWithDomain:error.domain
+                                        code:error.code
+                                    userInfo:user_info];
+  }
+  return current_error;
+}
+
+}  // namespace ios
+}  // namespace base
diff --git a/base/ios/scoped_critical_action.h b/base/ios/scoped_critical_action.h
new file mode 100644
index 0000000..2f7d16c
--- /dev/null
+++ b/base/ios/scoped_critical_action.h
@@ -0,0 +1,73 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_IOS_SCOPED_CRITICAL_ACTION_H_
+#define BASE_IOS_SCOPED_CRITICAL_ACTION_H_
+
+#include "base/macros.h"
+#include "base/memory/ref_counted.h"
+#include "base/synchronization/lock.h"
+
+namespace base {
+namespace ios {
+
+// This class attempts to allow the application to continue to run for a period
+// of time after it transitions to the background. The construction of an
+// instance of this class marks the beginning of a task that needs background
+// running time when the application is moved to the background and the
+// destruction marks the end of such a task.
+//
+// Note there is no guarantee that the task will continue to finish when the
+// application is moved to the background.
+//
+// This class should be used at times where leaving a task unfinished might be
+// detrimental to user experience. For example, it should be used to ensure that
+// the application has enough time to save important data or at least attempt to
+// save such data.
+class ScopedCriticalAction {
+ public:
+  ScopedCriticalAction();
+  ~ScopedCriticalAction();
+
+ private:
+  // Core logic; ScopedCriticalAction should not be reference counted so
+  // that it follows the normal pattern of stack-allocating ScopedFoo objects,
+  // but the expiration handler needs to have a reference counted object to
+  // refer to.
+  class Core : public base::RefCountedThreadSafe<Core> {
+   public:
+    Core();
+
+    // Informs the OS that the background task has started. This is a
+    // static method to ensure that the instance has a non-zero refcount.
+    static void StartBackgroundTask(scoped_refptr<Core> core);
+    // Informs the OS that the background task has completed. This is a
+    // static method to ensure that the instance has a non-zero refcount.
+    static void EndBackgroundTask(scoped_refptr<Core> core);
+
+   private:
+    friend base::RefCountedThreadSafe<Core>;
+    ~Core();
+
+    // |UIBackgroundTaskIdentifier| returned by
+    // |beginBackgroundTaskWithExpirationHandler:| when marking the beginning of
+    // a long-running background task. It is defined as an |unsigned int|
+    // instead of a |UIBackgroundTaskIdentifier| so this class can be used in
+    // .cc files.
+    unsigned int background_task_id_;
+    Lock background_task_id_lock_;
+
+    DISALLOW_COPY_AND_ASSIGN(Core);
+  };
+
+  // The instance of the core that drives the background task.
+  scoped_refptr<Core> core_;
+
+  DISALLOW_COPY_AND_ASSIGN(ScopedCriticalAction);
+};
+
+}  // namespace ios
+}  // namespace base
+
+#endif  // BASE_IOS_SCOPED_CRITICAL_ACTION_H_
diff --git a/base/ios/scoped_critical_action.mm b/base/ios/scoped_critical_action.mm
new file mode 100644
index 0000000..dbfbd45
--- /dev/null
+++ b/base/ios/scoped_critical_action.mm
@@ -0,0 +1,77 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/ios/scoped_critical_action.h"
+
+#import <UIKit/UIKit.h>
+
+#include "base/logging.h"
+#include "base/memory/ref_counted.h"
+#include "base/synchronization/lock.h"
+
+namespace base {
+namespace ios {
+
+ScopedCriticalAction::ScopedCriticalAction()
+    : core_(MakeRefCounted<ScopedCriticalAction::Core>()) {
+  ScopedCriticalAction::Core::StartBackgroundTask(core_);
+}
+
+ScopedCriticalAction::~ScopedCriticalAction() {
+  ScopedCriticalAction::Core::EndBackgroundTask(core_);
+}
+
+ScopedCriticalAction::Core::Core()
+    : background_task_id_(UIBackgroundTaskInvalid) {}
+
+ScopedCriticalAction::Core::~Core() {
+  DCHECK_EQ(background_task_id_, UIBackgroundTaskInvalid);
+}
+
+// This implementation calls |beginBackgroundTaskWithExpirationHandler:| when
+// instantiated and |endBackgroundTask:| when destroyed, creating a scope whose
+// execution will continue (temporarily) even after the app is backgrounded.
+// static
+void ScopedCriticalAction::Core::StartBackgroundTask(scoped_refptr<Core> core) {
+  UIApplication* application = [UIApplication sharedApplication];
+  if (!application) {
+    return;
+  }
+
+  core->background_task_id_ =
+      [application beginBackgroundTaskWithExpirationHandler:^{
+        DLOG(WARNING) << "Background task with id " << core->background_task_id_
+                      << " expired.";
+        // Note if |endBackgroundTask:| is not called for each task before time
+        // expires, the system kills the application.
+        EndBackgroundTask(core);
+      }];
+
+  if (core->background_task_id_ == UIBackgroundTaskInvalid) {
+    DLOG(WARNING)
+        << "beginBackgroundTaskWithExpirationHandler: returned an invalid ID";
+  } else {
+    VLOG(3) << "Beginning background task with id "
+            << core->background_task_id_;
+  }
+}
+
+// static
+void ScopedCriticalAction::Core::EndBackgroundTask(scoped_refptr<Core> core) {
+  UIBackgroundTaskIdentifier task_id;
+  {
+    AutoLock lock_scope(core->background_task_id_lock_);
+    if (core->background_task_id_ == UIBackgroundTaskInvalid) {
+      return;
+    }
+    task_id = core->background_task_id_;
+    core->background_task_id_ = UIBackgroundTaskInvalid;
+  }
+
+  VLOG(3) << "Ending background task with id " << task_id;
+  [[UIApplication sharedApplication] endBackgroundTask:task_id];
+}
+
+}  // namespace ios
+}  // namespace base
diff --git a/base/ios/weak_nsobject.h b/base/ios/weak_nsobject.h
new file mode 100644
index 0000000..498cdee
--- /dev/null
+++ b/base/ios/weak_nsobject.h
@@ -0,0 +1,187 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_IOS_WEAK_NSOBJECT_H_
+#define BASE_IOS_WEAK_NSOBJECT_H_
+
+#import <Foundation/Foundation.h>
+#import <objc/runtime.h>
+
+#include "base/compiler_specific.h"
+#include "base/logging.h"
+#include "base/memory/ref_counted.h"
+#include "base/threading/thread_checker.h"
+
+// WeakNSObject<> is patterned after scoped_nsobject<>, but instead of
+// maintaining ownership of an NSObject subclass object, it will nil itself out
+// when the object is deallocated.
+//
+// WeakNSProtocol<> has the same behavior as WeakNSObject, but can be used
+// with protocols.
+//
+// Example usage (base::WeakNSObject<T>):
+//   scoped_nsobject<Foo> foo([[Foo alloc] init]);
+//   WeakNSObject<Foo> weak_foo;  // No pointer
+//   weak_foo.reset(foo)  // Now a weak reference is kept.
+//   [weak_foo description];  // Returns [foo description].
+//   foo.reset();  // The reference is released.
+//   [weak_foo description];  // Returns nil, as weak_foo is pointing to nil.
+//
+//
+// Implementation wise a WeakNSObject keeps a reference to a refcounted
+// WeakContainer. There is one unique instance of a WeakContainer per watched
+// NSObject, this relationship is maintained via the ObjectiveC associated
+// object API, indirectly via an ObjectiveC CRBWeakNSProtocolSentinel class.
+//
+// Threading restrictions:
+// - Several WeakNSObject pointing to the same underlying object must all be
+//   created and dereferenced on the same thread;
+// - thread safety is enforced by the implementation, except in two cases:
+//   (1) it is allowed to copy a WeakNSObject on a different thread. However,
+//       that copy must return to the original thread before being dereferenced,
+//   (2) it is allowed to destroy a WeakNSObject on any thread;
+// - the implementation assumes that the tracked object will be released on the
+//   same thread that the WeakNSObject is created on.
+namespace base {
+
+// WeakContainer keeps a weak pointer to an object and clears it when it
+// receives nullify() from the object's sentinel.
+class WeakContainer : public base::RefCountedThreadSafe<WeakContainer> {
+ public:
+  explicit WeakContainer(id object);
+
+  id object() {
+    DCHECK(checker_.CalledOnValidThread());
+    return object_;
+  }
+
+  void nullify() {
+    DCHECK(checker_.CalledOnValidThread());
+    object_ = nil;
+  }
+
+ private:
+  friend base::RefCountedThreadSafe<WeakContainer>;
+  ~WeakContainer();
+  base::ThreadChecker checker_;
+  __unsafe_unretained id object_;
+};
+
+}  // namespace base
+
+// Sentinel for observing the object contained in the weak pointer. The object
+// will be deleted when the weak object is deleted and will notify its
+// container.
+@interface CRBWeakNSProtocolSentinel : NSObject
+// Return the only associated container for this object. There can be only one.
+// Will return null if object is nil .
++ (scoped_refptr<base::WeakContainer>)containerForObject:(id)object;
+@end
+
+namespace base {
+
+// Base class for all WeakNSObject derivatives.
+template <typename NST>
+class WeakNSProtocol {
+ public:
+  explicit WeakNSProtocol(NST object = nil) {
+    container_ = [CRBWeakNSProtocolSentinel containerForObject:object];
+  }
+
+  WeakNSProtocol(const WeakNSProtocol<NST>& that) {
+    // A WeakNSProtocol object can be copied on one thread and used on
+    // another.
+    checker_.DetachFromThread();
+    container_ = that.container_;
+  }
+
+  ~WeakNSProtocol() {
+    // A WeakNSProtocol object can be used on one thread and released on
+    // another. This is not the case for the contained object.
+    checker_.DetachFromThread();
+  }
+
+  void reset(NST object = nil) {
+    DCHECK(checker_.CalledOnValidThread());
+    container_ = [CRBWeakNSProtocolSentinel containerForObject:object];
+  }
+
+  NST get() const {
+    DCHECK(checker_.CalledOnValidThread());
+    if (!container_.get())
+      return nil;
+    return container_->object();
+  }
+
+  WeakNSProtocol& operator=(const WeakNSProtocol<NST>& that) {
+    // A WeakNSProtocol object can be copied on one thread and used on
+    // another.
+    checker_.DetachFromThread();
+    container_ = that.container_;
+    return *this;
+  }
+
+  bool operator==(NST that) const {
+    DCHECK(checker_.CalledOnValidThread());
+    return get() == that;
+  }
+
+  bool operator!=(NST that) const {
+    DCHECK(checker_.CalledOnValidThread());
+    return get() != that;
+  }
+
+  operator NST() const {
+    DCHECK(checker_.CalledOnValidThread());
+    return get();
+  }
+
+ private:
+  // Refecounted reference to the container tracking the ObjectiveC object this
+  // class encapsulates.
+  scoped_refptr<base::WeakContainer> container_;
+  base::ThreadChecker checker_;
+};
+
+// Free functions
+template <class NST>
+bool operator==(NST p1, const WeakNSProtocol<NST>& p2) {
+  return p1 == p2.get();
+}
+
+template <class NST>
+bool operator!=(NST p1, const WeakNSProtocol<NST>& p2) {
+  return p1 != p2.get();
+}
+
+template <typename NST>
+class WeakNSObject : public WeakNSProtocol<NST*> {
+ public:
+  explicit WeakNSObject(NST* object = nil) : WeakNSProtocol<NST*>(object) {}
+
+  WeakNSObject(const WeakNSObject<NST>& that) : WeakNSProtocol<NST*>(that) {}
+
+  WeakNSObject& operator=(const WeakNSObject<NST>& that) {
+    WeakNSProtocol<NST*>::operator=(that);
+    return *this;
+  }
+};
+
+// Specialization to make WeakNSObject<id> work.
+template <>
+class WeakNSObject<id> : public WeakNSProtocol<id> {
+ public:
+  explicit WeakNSObject(id object = nil) : WeakNSProtocol<id>(object) {}
+
+  WeakNSObject(const WeakNSObject<id>& that) : WeakNSProtocol<id>(that) {}
+
+  WeakNSObject& operator=(const WeakNSObject<id>& that) {
+    WeakNSProtocol<id>::operator=(that);
+    return *this;
+  }
+};
+
+}  // namespace base
+
+#endif  // BASE_IOS_WEAK_NSOBJECT_H_
diff --git a/base/ios/weak_nsobject.mm b/base/ios/weak_nsobject.mm
new file mode 100644
index 0000000..c017b1d
--- /dev/null
+++ b/base/ios/weak_nsobject.mm
@@ -0,0 +1,69 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/ios/weak_nsobject.h"
+
+#include "base/mac/scoped_nsautorelease_pool.h"
+#include "base/mac/scoped_nsobject.h"
+
+namespace {
+// The key needed by objc_setAssociatedObject.
+char sentinelObserverKey_;
+}
+
+namespace base {
+
+WeakContainer::WeakContainer(id object) : object_(object) {}
+
+WeakContainer::~WeakContainer() {}
+
+}  // namespace base
+
+@interface CRBWeakNSProtocolSentinel ()
+// Container to notify on dealloc.
+@property(readonly, assign) scoped_refptr<base::WeakContainer> container;
+// Designed initializer.
+- (id)initWithContainer:(scoped_refptr<base::WeakContainer>)container;
+@end
+
+@implementation CRBWeakNSProtocolSentinel
+
+@synthesize container = container_;
+
++ (scoped_refptr<base::WeakContainer>)containerForObject:(id)object {
+  if (object == nil)
+    return nullptr;
+  // The autoreleasePool is needed here as the call to objc_getAssociatedObject
+  // returns an autoreleased object which is better released sooner than later.
+  base::mac::ScopedNSAutoreleasePool pool;
+  CRBWeakNSProtocolSentinel* sentinel =
+      objc_getAssociatedObject(object, &sentinelObserverKey_);
+  if (!sentinel) {
+    base::scoped_nsobject<CRBWeakNSProtocolSentinel> newSentinel(
+        [[CRBWeakNSProtocolSentinel alloc]
+            initWithContainer:new base::WeakContainer(object)]);
+    sentinel = newSentinel;
+    objc_setAssociatedObject(object, &sentinelObserverKey_, sentinel,
+                             OBJC_ASSOCIATION_RETAIN);
+    // The retain count is 2. One retain is due to the alloc, the other to the
+    // association with the weak object.
+    DCHECK_EQ(2u, [sentinel retainCount]);
+  }
+  return [sentinel container];
+}
+
+- (id)initWithContainer:(scoped_refptr<base::WeakContainer>)container {
+  DCHECK(container.get());
+  self = [super init];
+  if (self)
+    container_ = container;
+  return self;
+}
+
+- (void)dealloc {
+  self.container->nullify();
+  [super dealloc];
+}
+
+@end
diff --git a/base/ios/weak_nsobject_unittest.mm b/base/ios/weak_nsobject_unittest.mm
new file mode 100644
index 0000000..ba85217
--- /dev/null
+++ b/base/ios/weak_nsobject_unittest.mm
@@ -0,0 +1,140 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/bind.h"
+#include "base/ios/weak_nsobject.h"
+#include "base/mac/scoped_nsobject.h"
+#include "base/message_loop/message_loop.h"
+#include "base/run_loop.h"
+#include "base/single_thread_task_runner.h"
+#include "base/threading/thread.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+namespace {
+
+TEST(WeakNSObjectTest, WeakNSObject) {
+  scoped_nsobject<NSObject> p1([[NSObject alloc] init]);
+  WeakNSObject<NSObject> w1(p1);
+  EXPECT_TRUE(w1);
+  p1.reset();
+  EXPECT_FALSE(w1);
+}
+
+TEST(WeakNSObjectTest, MultipleWeakNSObject) {
+  scoped_nsobject<NSObject> p1([[NSObject alloc] init]);
+  WeakNSObject<NSObject> w1(p1);
+  WeakNSObject<NSObject> w2(w1);
+  EXPECT_TRUE(w1);
+  EXPECT_TRUE(w2);
+  EXPECT_TRUE(w1.get() == w2.get());
+  p1.reset();
+  EXPECT_FALSE(w1);
+  EXPECT_FALSE(w2);
+}
+
+TEST(WeakNSObjectTest, WeakNSObjectDies) {
+  scoped_nsobject<NSObject> p1([[NSObject alloc] init]);
+  {
+    WeakNSObject<NSObject> w1(p1);
+    EXPECT_TRUE(w1);
+  }
+}
+
+TEST(WeakNSObjectTest, WeakNSObjectReset) {
+  scoped_nsobject<NSObject> p1([[NSObject alloc] init]);
+  WeakNSObject<NSObject> w1(p1);
+  EXPECT_TRUE(w1);
+  w1.reset();
+  EXPECT_FALSE(w1);
+  EXPECT_TRUE(p1);
+  EXPECT_TRUE([p1 description]);
+}
+
+TEST(WeakNSObjectTest, WeakNSObjectResetWithObject) {
+  scoped_nsobject<NSObject> p1([[NSObject alloc] init]);
+  scoped_nsobject<NSObject> p2([[NSObject alloc] init]);
+  WeakNSObject<NSObject> w1(p1);
+  EXPECT_TRUE(w1);
+  w1.reset(p2);
+  EXPECT_TRUE(w1);
+  EXPECT_TRUE([p1 description]);
+  EXPECT_TRUE([p2 description]);
+}
+
+TEST(WeakNSObjectTest, WeakNSObjectEmpty) {
+  scoped_nsobject<NSObject> p1([[NSObject alloc] init]);
+  WeakNSObject<NSObject> w1;
+  EXPECT_FALSE(w1);
+  w1.reset(p1);
+  EXPECT_TRUE(w1);
+  p1.reset();
+  EXPECT_FALSE(w1);
+}
+
+TEST(WeakNSObjectTest, WeakNSObjectCopy) {
+  scoped_nsobject<NSObject> p1([[NSObject alloc] init]);
+  WeakNSObject<NSObject> w1(p1);
+  WeakNSObject<NSObject> w2(w1);
+  EXPECT_TRUE(w1);
+  EXPECT_TRUE(w2);
+  p1.reset();
+  EXPECT_FALSE(w1);
+  EXPECT_FALSE(w2);
+}
+
+TEST(WeakNSObjectTest, WeakNSObjectAssignment) {
+  scoped_nsobject<NSObject> p1([[NSObject alloc] init]);
+  WeakNSObject<NSObject> w1(p1);
+  WeakNSObject<NSObject> w2;
+  EXPECT_FALSE(w2);
+  w2 = w1;
+  EXPECT_TRUE(w1);
+  EXPECT_TRUE(w2);
+  p1.reset();
+  EXPECT_FALSE(w1);
+  EXPECT_FALSE(w2);
+}
+
+// Touches |weak_data| by increasing its length by 1. Used to check that the
+// weak object can be dereferenced.
+void TouchWeakData(const WeakNSObject<NSMutableData>& weak_data) {
+  if (!weak_data)
+    return;
+  [weak_data increaseLengthBy:1];
+}
+
+// Makes a copy of |weak_object| on the current thread and posts a task to touch
+// the weak object on its original thread.
+void CopyWeakNSObjectAndPost(const WeakNSObject<NSMutableData>& weak_object,
+                             scoped_refptr<SingleThreadTaskRunner> runner) {
+  // Copy using constructor.
+  WeakNSObject<NSMutableData> weak_copy1(weak_object);
+  runner->PostTask(FROM_HERE, Bind(&TouchWeakData, weak_copy1));
+  // Copy using assignment operator.
+  WeakNSObject<NSMutableData> weak_copy2 = weak_object;
+  runner->PostTask(FROM_HERE, Bind(&TouchWeakData, weak_copy2));
+}
+
+// Tests that the weak object can be copied on a different thread.
+TEST(WeakNSObjectTest, WeakNSObjectCopyOnOtherThread) {
+  MessageLoop loop;
+  Thread other_thread("WeakNSObjectCopyOnOtherThread");
+  other_thread.Start();
+
+  scoped_nsobject<NSMutableData> data([[NSMutableData alloc] init]);
+  WeakNSObject<NSMutableData> weak(data);
+
+  scoped_refptr<SingleThreadTaskRunner> runner = loop.task_runner();
+  other_thread.task_runner()->PostTask(
+      FROM_HERE, Bind(&CopyWeakNSObjectAndPost, weak, runner));
+  other_thread.Stop();
+  RunLoop().RunUntilIdle();
+
+  // Check that TouchWeakData was called and the object touched twice.
+  EXPECT_EQ(2u, [data length]);
+}
+
+}  // namespace
+}  // namespace base
diff --git a/base/json/OWNERS b/base/json/OWNERS
new file mode 100644
index 0000000..14fce2a
--- /dev/null
+++ b/base/json/OWNERS
@@ -0,0 +1 @@
+rsesek@chromium.org
diff --git a/base/json/json_correctness_fuzzer.cc b/base/json/json_correctness_fuzzer.cc
new file mode 100644
index 0000000..1f32d8c
--- /dev/null
+++ b/base/json/json_correctness_fuzzer.cc
@@ -0,0 +1,63 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// A fuzzer that checks correctness of json parser/writer.
+// The fuzzer input is passed through parsing twice,
+// so that presumably valid json is parsed/written again.
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <string>
+
+#include "base/json/json_reader.h"
+#include "base/json/json_writer.h"
+#include "base/json/string_escape.h"
+#include "base/logging.h"
+#include "base/values.h"
+
+// Entry point for libFuzzer.
+// We will use the last byte of data as parsing options.
+// The rest will be used as text input to the parser.
+extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) {
+  if (size < 2)
+    return 0;
+
+  int error_code, error_line, error_column;
+  std::string error_message;
+
+  // Create a copy of input buffer, as otherwise we don't catch
+  // overflow that touches the last byte (which is used in options).
+  std::unique_ptr<char[]> input(new char[size - 1]);
+  memcpy(input.get(), data, size - 1);
+
+  base::StringPiece input_string(input.get(), size - 1);
+
+  const int options = data[size - 1];
+  auto parsed_value = base::JSONReader::ReadAndReturnError(
+      input_string, options, &error_code, &error_message, &error_line,
+      &error_column);
+  if (!parsed_value)
+    return 0;
+
+  std::string parsed_output;
+  bool b = base::JSONWriter::Write(*parsed_value, &parsed_output);
+  LOG_ASSERT(b);
+
+  auto double_parsed_value = base::JSONReader::ReadAndReturnError(
+      parsed_output, options, &error_code, &error_message, &error_line,
+      &error_column);
+  LOG_ASSERT(double_parsed_value);
+  std::string double_parsed_output;
+  bool b2 =
+      base::JSONWriter::Write(*double_parsed_value, &double_parsed_output);
+  LOG_ASSERT(b2);
+
+  LOG_ASSERT(parsed_output == double_parsed_output)
+      << "Parser/Writer mismatch."
+      << "\nInput=" << base::GetQuotedJSONString(parsed_output)
+      << "\nOutput=" << base::GetQuotedJSONString(double_parsed_output);
+
+  return 0;
+}
diff --git a/base/json/json_file_value_serializer.cc b/base/json/json_file_value_serializer.cc
new file mode 100644
index 0000000..a7c68c5
--- /dev/null
+++ b/base/json/json_file_value_serializer.cc
@@ -0,0 +1,115 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/json/json_file_value_serializer.h"
+
+#include "base/files/file_util.h"
+#include "base/json/json_string_value_serializer.h"
+#include "base/logging.h"
+#include "build/build_config.h"
+
+using base::FilePath;
+
+const char JSONFileValueDeserializer::kAccessDenied[] = "Access denied.";
+const char JSONFileValueDeserializer::kCannotReadFile[] = "Can't read file.";
+const char JSONFileValueDeserializer::kFileLocked[] = "File locked.";
+const char JSONFileValueDeserializer::kNoSuchFile[] = "File doesn't exist.";
+
+JSONFileValueSerializer::JSONFileValueSerializer(
+    const base::FilePath& json_file_path)
+    : json_file_path_(json_file_path) {
+}
+
+JSONFileValueSerializer::~JSONFileValueSerializer() = default;
+
+bool JSONFileValueSerializer::Serialize(const base::Value& root) {
+  return SerializeInternal(root, false);
+}
+
+bool JSONFileValueSerializer::SerializeAndOmitBinaryValues(
+    const base::Value& root) {
+  return SerializeInternal(root, true);
+}
+
+bool JSONFileValueSerializer::SerializeInternal(const base::Value& root,
+                                                bool omit_binary_values) {
+  std::string json_string;
+  JSONStringValueSerializer serializer(&json_string);
+  serializer.set_pretty_print(true);
+  bool result = omit_binary_values ?
+      serializer.SerializeAndOmitBinaryValues(root) :
+      serializer.Serialize(root);
+  if (!result)
+    return false;
+
+  int data_size = static_cast<int>(json_string.size());
+  if (base::WriteFile(json_file_path_, json_string.data(), data_size) !=
+      data_size)
+    return false;
+
+  return true;
+}
+
+JSONFileValueDeserializer::JSONFileValueDeserializer(
+    const base::FilePath& json_file_path,
+    int options)
+    : json_file_path_(json_file_path), options_(options), last_read_size_(0U) {}
+
+JSONFileValueDeserializer::~JSONFileValueDeserializer() = default;
+
+int JSONFileValueDeserializer::ReadFileToString(std::string* json_string) {
+  DCHECK(json_string);
+  if (!base::ReadFileToString(json_file_path_, json_string)) {
+#if defined(OS_WIN)
+    int error = ::GetLastError();
+    if (error == ERROR_SHARING_VIOLATION || error == ERROR_LOCK_VIOLATION) {
+      return JSON_FILE_LOCKED;
+    } else if (error == ERROR_ACCESS_DENIED) {
+      return JSON_ACCESS_DENIED;
+    }
+#endif
+    if (!base::PathExists(json_file_path_))
+      return JSON_NO_SUCH_FILE;
+    else
+      return JSON_CANNOT_READ_FILE;
+  }
+
+  last_read_size_ = json_string->size();
+  return JSON_NO_ERROR;
+}
+
+const char* JSONFileValueDeserializer::GetErrorMessageForCode(int error_code) {
+  switch (error_code) {
+    case JSON_NO_ERROR:
+      return "";
+    case JSON_ACCESS_DENIED:
+      return kAccessDenied;
+    case JSON_CANNOT_READ_FILE:
+      return kCannotReadFile;
+    case JSON_FILE_LOCKED:
+      return kFileLocked;
+    case JSON_NO_SUCH_FILE:
+      return kNoSuchFile;
+    default:
+      NOTREACHED();
+      return "";
+  }
+}
+
+std::unique_ptr<base::Value> JSONFileValueDeserializer::Deserialize(
+    int* error_code,
+    std::string* error_str) {
+  std::string json_string;
+  int error = ReadFileToString(&json_string);
+  if (error != JSON_NO_ERROR) {
+    if (error_code)
+      *error_code = error;
+    if (error_str)
+      *error_str = GetErrorMessageForCode(error);
+    return nullptr;
+  }
+
+  JSONStringValueDeserializer deserializer(json_string, options_);
+  return deserializer.Deserialize(error_code, error_str);
+}
diff --git a/base/json/json_file_value_serializer.h b/base/json/json_file_value_serializer.h
new file mode 100644
index 0000000..a93950a
--- /dev/null
+++ b/base/json/json_file_value_serializer.h
@@ -0,0 +1,103 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_JSON_JSON_FILE_VALUE_SERIALIZER_H_
+#define BASE_JSON_JSON_FILE_VALUE_SERIALIZER_H_
+
+#include <stddef.h>
+
+#include <string>
+
+#include "base/base_export.h"
+#include "base/files/file_path.h"
+#include "base/macros.h"
+#include "base/values.h"
+
+class BASE_EXPORT JSONFileValueSerializer : public base::ValueSerializer {
+ public:
+  // |json_file_path_| is the path of a file that will be destination of the
+  // serialization. The serializer will attempt to create the file at the
+  // specified location.
+  explicit JSONFileValueSerializer(const base::FilePath& json_file_path);
+
+  ~JSONFileValueSerializer() override;
+
+  // DO NOT USE except in unit tests to verify the file was written properly.
+  // We should never serialize directly to a file since this will block the
+  // thread. Instead, serialize to a string and write to the file you want on
+  // the file thread.
+  //
+  // Attempt to serialize the data structure represented by Value into
+  // JSON.  If the return value is true, the result will have been written
+  // into the file whose name was passed into the constructor.
+  bool Serialize(const base::Value& root) override;
+
+  // Equivalent to Serialize(root) except binary values are omitted from the
+  // output.
+  bool SerializeAndOmitBinaryValues(const base::Value& root);
+
+ private:
+  bool SerializeInternal(const base::Value& root, bool omit_binary_values);
+
+  const base::FilePath json_file_path_;
+
+  DISALLOW_IMPLICIT_CONSTRUCTORS(JSONFileValueSerializer);
+};
+
+class BASE_EXPORT JSONFileValueDeserializer : public base::ValueDeserializer {
+ public:
+  // |json_file_path_| is the path of a file that will be source of the
+  // deserialization. |options| is a bitmask of JSONParserOptions.
+  explicit JSONFileValueDeserializer(const base::FilePath& json_file_path,
+                                     int options = 0);
+
+  ~JSONFileValueDeserializer() override;
+
+  // Attempt to deserialize the data structure encoded in the file passed
+  // in to the constructor into a structure of Value objects.  If the return
+  // value is NULL, and if |error_code| is non-null, |error_code| will
+  // contain an integer error code (either JsonFileError or JsonParseError).
+  // If |error_message| is non-null, it will be filled in with a formatted
+  // error message including the location of the error if appropriate.
+  // The caller takes ownership of the returned value.
+  std::unique_ptr<base::Value> Deserialize(int* error_code,
+                                           std::string* error_message) override;
+
+  // This enum is designed to safely overlap with JSONReader::JsonParseError.
+  enum JsonFileError {
+    JSON_NO_ERROR = 0,
+    JSON_ACCESS_DENIED = 1000,
+    JSON_CANNOT_READ_FILE,
+    JSON_FILE_LOCKED,
+    JSON_NO_SUCH_FILE
+  };
+
+  // File-specific error messages that can be returned.
+  static const char kAccessDenied[];
+  static const char kCannotReadFile[];
+  static const char kFileLocked[];
+  static const char kNoSuchFile[];
+
+  // Convert an error code into an error message.  |error_code| is assumed to
+  // be a JsonFileError.
+  static const char* GetErrorMessageForCode(int error_code);
+
+  // Returns the size (in bytes) of JSON string read from disk in the last
+  // successful |Deserialize()| call.
+  size_t get_last_read_size() const { return last_read_size_; }
+
+ private:
+  // A wrapper for ReadFileToString which returns a non-zero JsonFileError if
+  // there were file errors.
+  int ReadFileToString(std::string* json_string);
+
+  const base::FilePath json_file_path_;
+  const int options_;
+  size_t last_read_size_;
+
+  DISALLOW_IMPLICIT_CONSTRUCTORS(JSONFileValueDeserializer);
+};
+
+#endif  // BASE_JSON_JSON_FILE_VALUE_SERIALIZER_H_
+
diff --git a/base/json/json_parser.cc b/base/json/json_parser.cc
new file mode 100644
index 0000000..dfe246c
--- /dev/null
+++ b/base/json/json_parser.cc
@@ -0,0 +1,755 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/json/json_parser.h"
+
+#include <cmath>
+#include <utility>
+#include <vector>
+
+#include "base/logging.h"
+#include "base/macros.h"
+#include "base/numerics/safe_conversions.h"
+#include "base/strings/string_number_conversions.h"
+#include "base/strings/string_piece.h"
+#include "base/strings/string_util.h"
+#include "base/strings/stringprintf.h"
+#include "base/strings/utf_string_conversion_utils.h"
+#include "base/strings/utf_string_conversions.h"
+#include "base/third_party/icu/icu_utf.h"
+#include "base/values.h"
+
+namespace base {
+namespace internal {
+
+namespace {
+
+const int32_t kExtendedASCIIStart = 0x80;
+
+// Simple class that checks for maximum recursion/"stack overflow."
+class StackMarker {
+ public:
+  StackMarker(int max_depth, int* depth)
+      : max_depth_(max_depth), depth_(depth) {
+    ++(*depth_);
+    DCHECK_LE(*depth_, max_depth_);
+  }
+  ~StackMarker() {
+    --(*depth_);
+  }
+
+  bool IsTooDeep() const { return *depth_ >= max_depth_; }
+
+ private:
+  const int max_depth_;
+  int* const depth_;
+
+  DISALLOW_COPY_AND_ASSIGN(StackMarker);
+};
+
+constexpr uint32_t kUnicodeReplacementPoint = 0xFFFD;
+
+}  // namespace
+
+// This is U+FFFD.
+const char kUnicodeReplacementString[] = "\xEF\xBF\xBD";
+
+JSONParser::JSONParser(int options, int max_depth)
+    : options_(options),
+      max_depth_(max_depth),
+      index_(0),
+      stack_depth_(0),
+      line_number_(0),
+      index_last_line_(0),
+      error_code_(JSONReader::JSON_NO_ERROR),
+      error_line_(0),
+      error_column_(0) {
+  CHECK_LE(max_depth, JSONReader::kStackMaxDepth);
+}
+
+JSONParser::~JSONParser() = default;
+
+Optional<Value> JSONParser::Parse(StringPiece input) {
+  input_ = input;
+  index_ = 0;
+  line_number_ = 1;
+  index_last_line_ = 0;
+
+  error_code_ = JSONReader::JSON_NO_ERROR;
+  error_line_ = 0;
+  error_column_ = 0;
+
+  // ICU and ReadUnicodeCharacter() use int32_t for lengths, so ensure
+  // that the index_ will not overflow when parsing.
+  if (!base::IsValueInRangeForNumericType<int32_t>(input.length())) {
+    ReportError(JSONReader::JSON_TOO_LARGE, 0);
+    return nullopt;
+  }
+
+  // When the input JSON string starts with a UTF-8 Byte-Order-Mark,
+  // advance the start position to avoid the ParseNextToken function mis-
+  // treating a Unicode BOM as an invalid character and returning NULL.
+  ConsumeIfMatch("\xEF\xBB\xBF");
+
+  // Parse the first and any nested tokens.
+  Optional<Value> root(ParseNextToken());
+  if (!root)
+    return nullopt;
+
+  // Make sure the input stream is at an end.
+  if (GetNextToken() != T_END_OF_INPUT) {
+    ReportError(JSONReader::JSON_UNEXPECTED_DATA_AFTER_ROOT, 1);
+    return nullopt;
+  }
+
+  return root;
+}
+
+JSONReader::JsonParseError JSONParser::error_code() const {
+  return error_code_;
+}
+
+std::string JSONParser::GetErrorMessage() const {
+  return FormatErrorMessage(error_line_, error_column_,
+      JSONReader::ErrorCodeToString(error_code_));
+}
+
+int JSONParser::error_line() const {
+  return error_line_;
+}
+
+int JSONParser::error_column() const {
+  return error_column_;
+}
+
+// StringBuilder ///////////////////////////////////////////////////////////////
+
+JSONParser::StringBuilder::StringBuilder() : StringBuilder(nullptr) {}
+
+JSONParser::StringBuilder::StringBuilder(const char* pos)
+    : pos_(pos), length_(0) {}
+
+JSONParser::StringBuilder::~StringBuilder() = default;
+
+JSONParser::StringBuilder& JSONParser::StringBuilder::operator=(
+    StringBuilder&& other) = default;
+
+void JSONParser::StringBuilder::Append(uint32_t point) {
+  DCHECK(IsValidCharacter(point));
+
+  if (point < kExtendedASCIIStart && !string_) {
+    DCHECK_EQ(static_cast<char>(point), pos_[length_]);
+    ++length_;
+  } else {
+    Convert();
+    if (UNLIKELY(point == kUnicodeReplacementPoint)) {
+      string_->append(kUnicodeReplacementString);
+    } else {
+      WriteUnicodeCharacter(point, &*string_);
+    }
+  }
+}
+
+void JSONParser::StringBuilder::Convert() {
+  if (string_)
+    return;
+  string_.emplace(pos_, length_);
+}
+
+std::string JSONParser::StringBuilder::DestructiveAsString() {
+  if (string_)
+    return std::move(*string_);
+  return std::string(pos_, length_);
+}
+
+// JSONParser private //////////////////////////////////////////////////////////
+
+Optional<StringPiece> JSONParser::PeekChars(int count) {
+  if (static_cast<size_t>(index_) + count > input_.length())
+    return nullopt;
+  // Using StringPiece::substr() is significantly slower (according to
+  // base_perftests) than constructing a substring manually.
+  return StringPiece(input_.data() + index_, count);
+}
+
+Optional<char> JSONParser::PeekChar() {
+  Optional<StringPiece> chars = PeekChars(1);
+  if (chars)
+    return (*chars)[0];
+  return nullopt;
+}
+
+Optional<StringPiece> JSONParser::ConsumeChars(int count) {
+  Optional<StringPiece> chars = PeekChars(count);
+  if (chars)
+    index_ += count;
+  return chars;
+}
+
+Optional<char> JSONParser::ConsumeChar() {
+  Optional<StringPiece> chars = ConsumeChars(1);
+  if (chars)
+    return (*chars)[0];
+  return nullopt;
+}
+
+const char* JSONParser::pos() {
+  CHECK_LE(static_cast<size_t>(index_), input_.length());
+  return input_.data() + index_;
+}
+
+JSONParser::Token JSONParser::GetNextToken() {
+  EatWhitespaceAndComments();
+
+  Optional<char> c = PeekChar();
+  if (!c)
+    return T_END_OF_INPUT;
+
+  switch (*c) {
+    case '{':
+      return T_OBJECT_BEGIN;
+    case '}':
+      return T_OBJECT_END;
+    case '[':
+      return T_ARRAY_BEGIN;
+    case ']':
+      return T_ARRAY_END;
+    case '"':
+      return T_STRING;
+    case '0':
+    case '1':
+    case '2':
+    case '3':
+    case '4':
+    case '5':
+    case '6':
+    case '7':
+    case '8':
+    case '9':
+    case '-':
+      return T_NUMBER;
+    case 't':
+      return T_BOOL_TRUE;
+    case 'f':
+      return T_BOOL_FALSE;
+    case 'n':
+      return T_NULL;
+    case ',':
+      return T_LIST_SEPARATOR;
+    case ':':
+      return T_OBJECT_PAIR_SEPARATOR;
+    default:
+      return T_INVALID_TOKEN;
+  }
+}
+
+void JSONParser::EatWhitespaceAndComments() {
+  while (Optional<char> c = PeekChar()) {
+    switch (*c) {
+      case '\r':
+      case '\n':
+        index_last_line_ = index_;
+        // Don't increment line_number_ twice for "\r\n".
+        if (!(c == '\n' && index_ > 0 && input_[index_ - 1] == '\r')) {
+          ++line_number_;
+        }
+        FALLTHROUGH;
+      case ' ':
+      case '\t':
+        ConsumeChar();
+        break;
+      case '/':
+        if (!EatComment())
+          return;
+        break;
+      default:
+        return;
+    }
+  }
+}
+
+bool JSONParser::EatComment() {
+  Optional<StringPiece> comment_start = ConsumeChars(2);
+  if (!comment_start)
+    return false;
+
+  if (comment_start == "//") {
+    // Single line comment, read to newline.
+    while (Optional<char> c = PeekChar()) {
+      if (c == '\n' || c == '\r')
+        return true;
+      ConsumeChar();
+    }
+  } else if (comment_start == "/*") {
+    char previous_char = '\0';
+    // Block comment, read until end marker.
+    while (Optional<char> c = PeekChar()) {
+      if (previous_char == '*' && c == '/') {
+        // EatWhitespaceAndComments will inspect pos(), which will still be on
+        // the last / of the comment, so advance once more (which may also be
+        // end of input).
+        ConsumeChar();
+        return true;
+      }
+      previous_char = *ConsumeChar();
+    }
+
+    // If the comment is unterminated, GetNextToken will report T_END_OF_INPUT.
+  }
+
+  return false;
+}
+
+Optional<Value> JSONParser::ParseNextToken() {
+  return ParseToken(GetNextToken());
+}
+
+Optional<Value> JSONParser::ParseToken(Token token) {
+  switch (token) {
+    case T_OBJECT_BEGIN:
+      return ConsumeDictionary();
+    case T_ARRAY_BEGIN:
+      return ConsumeList();
+    case T_STRING:
+      return ConsumeString();
+    case T_NUMBER:
+      return ConsumeNumber();
+    case T_BOOL_TRUE:
+    case T_BOOL_FALSE:
+    case T_NULL:
+      return ConsumeLiteral();
+    default:
+      ReportError(JSONReader::JSON_UNEXPECTED_TOKEN, 1);
+      return nullopt;
+  }
+}
+
+Optional<Value> JSONParser::ConsumeDictionary() {
+  if (ConsumeChar() != '{') {
+    ReportError(JSONReader::JSON_UNEXPECTED_TOKEN, 1);
+    return nullopt;
+  }
+
+  StackMarker depth_check(max_depth_, &stack_depth_);
+  if (depth_check.IsTooDeep()) {
+    ReportError(JSONReader::JSON_TOO_MUCH_NESTING, 0);
+    return nullopt;
+  }
+
+  std::vector<Value::DictStorage::value_type> dict_storage;
+
+  Token token = GetNextToken();
+  while (token != T_OBJECT_END) {
+    if (token != T_STRING) {
+      ReportError(JSONReader::JSON_UNQUOTED_DICTIONARY_KEY, 1);
+      return nullopt;
+    }
+
+    // First consume the key.
+    StringBuilder key;
+    if (!ConsumeStringRaw(&key)) {
+      return nullopt;
+    }
+
+    // Read the separator.
+    token = GetNextToken();
+    if (token != T_OBJECT_PAIR_SEPARATOR) {
+      ReportError(JSONReader::JSON_SYNTAX_ERROR, 1);
+      return nullopt;
+    }
+
+    // The next token is the value. Ownership transfers to |dict|.
+    ConsumeChar();
+    Optional<Value> value = ParseNextToken();
+    if (!value) {
+      // ReportError from deeper level.
+      return nullopt;
+    }
+
+    dict_storage.emplace_back(key.DestructiveAsString(),
+                              std::make_unique<Value>(std::move(*value)));
+
+    token = GetNextToken();
+    if (token == T_LIST_SEPARATOR) {
+      ConsumeChar();
+      token = GetNextToken();
+      if (token == T_OBJECT_END && !(options_ & JSON_ALLOW_TRAILING_COMMAS)) {
+        ReportError(JSONReader::JSON_TRAILING_COMMA, 1);
+        return nullopt;
+      }
+    } else if (token != T_OBJECT_END) {
+      ReportError(JSONReader::JSON_SYNTAX_ERROR, 0);
+      return nullopt;
+    }
+  }
+
+  ConsumeChar();  // Closing '}'.
+
+  return Value(Value::DictStorage(std::move(dict_storage), KEEP_LAST_OF_DUPES));
+}
+
+Optional<Value> JSONParser::ConsumeList() {
+  if (ConsumeChar() != '[') {
+    ReportError(JSONReader::JSON_UNEXPECTED_TOKEN, 1);
+    return nullopt;
+  }
+
+  StackMarker depth_check(max_depth_, &stack_depth_);
+  if (depth_check.IsTooDeep()) {
+    ReportError(JSONReader::JSON_TOO_MUCH_NESTING, 0);
+    return nullopt;
+  }
+
+  Value::ListStorage list_storage;
+
+  Token token = GetNextToken();
+  while (token != T_ARRAY_END) {
+    Optional<Value> item = ParseToken(token);
+    if (!item) {
+      // ReportError from deeper level.
+      return nullopt;
+    }
+
+    list_storage.push_back(std::move(*item));
+
+    token = GetNextToken();
+    if (token == T_LIST_SEPARATOR) {
+      ConsumeChar();
+      token = GetNextToken();
+      if (token == T_ARRAY_END && !(options_ & JSON_ALLOW_TRAILING_COMMAS)) {
+        ReportError(JSONReader::JSON_TRAILING_COMMA, 1);
+        return nullopt;
+      }
+    } else if (token != T_ARRAY_END) {
+      ReportError(JSONReader::JSON_SYNTAX_ERROR, 1);
+      return nullopt;
+    }
+  }
+
+  ConsumeChar();  // Closing ']'.
+
+  return Value(std::move(list_storage));
+}
+
+Optional<Value> JSONParser::ConsumeString() {
+  StringBuilder string;
+  if (!ConsumeStringRaw(&string))
+    return nullopt;
+
+  return Value(string.DestructiveAsString());
+}
+
+bool JSONParser::ConsumeStringRaw(StringBuilder* out) {
+  if (ConsumeChar() != '"') {
+    ReportError(JSONReader::JSON_UNEXPECTED_TOKEN, 1);
+    return false;
+  }
+
+  // StringBuilder will internally build a StringPiece unless a UTF-16
+  // conversion occurs, at which point it will perform a copy into a
+  // std::string.
+  StringBuilder string(pos());
+
+  while (PeekChar()) {
+    uint32_t next_char = 0;
+    if (!ReadUnicodeCharacter(input_.data(),
+                              static_cast<int32_t>(input_.length()),
+                              &index_,
+                              &next_char) ||
+        !IsValidCharacter(next_char)) {
+      if ((options_ & JSON_REPLACE_INVALID_CHARACTERS) == 0) {
+        ReportError(JSONReader::JSON_UNSUPPORTED_ENCODING, 1);
+        return false;
+      }
+      ConsumeChar();
+      string.Append(kUnicodeReplacementPoint);
+      continue;
+    }
+
+    if (next_char == '"') {
+      ConsumeChar();
+      *out = std::move(string);
+      return true;
+    } else if (next_char != '\\') {
+      // If this character is not an escape sequence...
+      ConsumeChar();
+      string.Append(next_char);
+    } else {
+      // And if it is an escape sequence, the input string will be adjusted
+      // (either by combining the two characters of an encoded escape sequence,
+      // or with a UTF conversion), so using StringPiece isn't possible -- force
+      // a conversion.
+      string.Convert();
+
+      // Read past the escape '\' and ensure there's a character following.
+      Optional<StringPiece> escape_sequence = ConsumeChars(2);
+      if (!escape_sequence) {
+        ReportError(JSONReader::JSON_INVALID_ESCAPE, 0);
+        return false;
+      }
+
+      switch ((*escape_sequence)[1]) {
+        // Allowed esape sequences:
+        case 'x': {  // UTF-8 sequence.
+          // UTF-8 \x escape sequences are not allowed in the spec, but they
+          // are supported here for backwards-compatiblity with the old parser.
+          escape_sequence = ConsumeChars(2);
+          if (!escape_sequence) {
+            ReportError(JSONReader::JSON_INVALID_ESCAPE, -2);
+            return false;
+          }
+
+          int hex_digit = 0;
+          if (!HexStringToInt(*escape_sequence, &hex_digit) ||
+              !IsValidCharacter(hex_digit)) {
+            ReportError(JSONReader::JSON_INVALID_ESCAPE, -2);
+            return false;
+          }
+
+          string.Append(hex_digit);
+          break;
+        }
+        case 'u': {  // UTF-16 sequence.
+          // UTF units are of the form \uXXXX.
+          uint32_t code_point;
+          if (!DecodeUTF16(&code_point)) {
+            ReportError(JSONReader::JSON_INVALID_ESCAPE, 0);
+            return false;
+          }
+          string.Append(code_point);
+          break;
+        }
+        case '"':
+          string.Append('"');
+          break;
+        case '\\':
+          string.Append('\\');
+          break;
+        case '/':
+          string.Append('/');
+          break;
+        case 'b':
+          string.Append('\b');
+          break;
+        case 'f':
+          string.Append('\f');
+          break;
+        case 'n':
+          string.Append('\n');
+          break;
+        case 'r':
+          string.Append('\r');
+          break;
+        case 't':
+          string.Append('\t');
+          break;
+        case 'v':  // Not listed as valid escape sequence in the RFC.
+          string.Append('\v');
+          break;
+        // All other escape squences are illegal.
+        default:
+          ReportError(JSONReader::JSON_INVALID_ESCAPE, 0);
+          return false;
+      }
+    }
+  }
+
+  ReportError(JSONReader::JSON_SYNTAX_ERROR, 0);
+  return false;
+}
+
+// Entry is at the first X in \uXXXX.
+bool JSONParser::DecodeUTF16(uint32_t* out_code_point) {
+  Optional<StringPiece> escape_sequence = ConsumeChars(4);
+  if (!escape_sequence)
+    return false;
+
+  // Consume the UTF-16 code unit, which may be a high surrogate.
+  int code_unit16_high = 0;
+  if (!HexStringToInt(*escape_sequence, &code_unit16_high))
+    return false;
+
+  // If this is a high surrogate, consume the next code unit to get the
+  // low surrogate.
+  if (CBU16_IS_SURROGATE(code_unit16_high)) {
+    // Make sure this is the high surrogate. If not, it's an encoding
+    // error.
+    if (!CBU16_IS_SURROGATE_LEAD(code_unit16_high))
+      return false;
+
+    // Make sure that the token has more characters to consume the
+    // lower surrogate.
+    if (!ConsumeIfMatch("\\u"))
+      return false;
+
+    escape_sequence = ConsumeChars(4);
+    if (!escape_sequence)
+      return false;
+
+    int code_unit16_low = 0;
+    if (!HexStringToInt(*escape_sequence, &code_unit16_low))
+      return false;
+
+    if (!CBU16_IS_TRAIL(code_unit16_low))
+      return false;
+
+    uint32_t code_point =
+        CBU16_GET_SUPPLEMENTARY(code_unit16_high, code_unit16_low);
+    if (!IsValidCharacter(code_point))
+      return false;
+
+    *out_code_point = code_point;
+  } else {
+    // Not a surrogate.
+    DCHECK(CBU16_IS_SINGLE(code_unit16_high));
+    if (!IsValidCharacter(code_unit16_high)) {
+      if ((options_ & JSON_REPLACE_INVALID_CHARACTERS) == 0) {
+        return false;
+      }
+      *out_code_point = kUnicodeReplacementPoint;
+      return true;
+    }
+
+    *out_code_point = code_unit16_high;
+  }
+
+  return true;
+}
+
+Optional<Value> JSONParser::ConsumeNumber() {
+  const char* num_start = pos();
+  const int start_index = index_;
+  int end_index = start_index;
+
+  if (PeekChar() == '-')
+    ConsumeChar();
+
+  if (!ReadInt(false)) {
+    ReportError(JSONReader::JSON_SYNTAX_ERROR, 1);
+    return nullopt;
+  }
+  end_index = index_;
+
+  // The optional fraction part.
+  if (PeekChar() == '.') {
+    ConsumeChar();
+    if (!ReadInt(true)) {
+      ReportError(JSONReader::JSON_SYNTAX_ERROR, 1);
+      return nullopt;
+    }
+    end_index = index_;
+  }
+
+  // Optional exponent part.
+  Optional<char> c = PeekChar();
+  if (c == 'e' || c == 'E') {
+    ConsumeChar();
+    if (PeekChar() == '-' || PeekChar() == '+') {
+      ConsumeChar();
+    }
+    if (!ReadInt(true)) {
+      ReportError(JSONReader::JSON_SYNTAX_ERROR, 1);
+      return nullopt;
+    }
+    end_index = index_;
+  }
+
+  // ReadInt is greedy because numbers have no easily detectable sentinel,
+  // so save off where the parser should be on exit (see Consume invariant at
+  // the top of the header), then make sure the next token is one which is
+  // valid.
+  int exit_index = index_;
+
+  switch (GetNextToken()) {
+    case T_OBJECT_END:
+    case T_ARRAY_END:
+    case T_LIST_SEPARATOR:
+    case T_END_OF_INPUT:
+      break;
+    default:
+      ReportError(JSONReader::JSON_SYNTAX_ERROR, 1);
+      return nullopt;
+  }
+
+  index_ = exit_index;
+
+  StringPiece num_string(num_start, end_index - start_index);
+
+  int num_int;
+  if (StringToInt(num_string, &num_int))
+    return Value(num_int);
+
+  double num_double;
+  if (StringToDouble(num_string.as_string(), &num_double) &&
+      std::isfinite(num_double)) {
+    return Value(num_double);
+  }
+
+  return nullopt;
+}
+
+bool JSONParser::ReadInt(bool allow_leading_zeros) {
+  size_t len = 0;
+  char first = 0;
+
+  while (Optional<char> c = PeekChar()) {
+    if (!IsAsciiDigit(c))
+      break;
+
+    if (len == 0)
+      first = *c;
+
+    ++len;
+    ConsumeChar();
+  }
+
+  if (len == 0)
+    return false;
+
+  if (!allow_leading_zeros && len > 1 && first == '0')
+    return false;
+
+  return true;
+}
+
+Optional<Value> JSONParser::ConsumeLiteral() {
+  if (ConsumeIfMatch("true")) {
+    return Value(true);
+  } else if (ConsumeIfMatch("false")) {
+    return Value(false);
+  } else if (ConsumeIfMatch("null")) {
+    return Value(Value::Type::NONE);
+  } else {
+    ReportError(JSONReader::JSON_SYNTAX_ERROR, 1);
+    return nullopt;
+  }
+}
+
+bool JSONParser::ConsumeIfMatch(StringPiece match) {
+  if (match == PeekChars(match.size())) {
+    ConsumeChars(match.size());
+    return true;
+  }
+  return false;
+}
+
+void JSONParser::ReportError(JSONReader::JsonParseError code,
+                             int column_adjust) {
+  error_code_ = code;
+  error_line_ = line_number_;
+  error_column_ = index_ - index_last_line_ + column_adjust;
+}
+
+// static
+std::string JSONParser::FormatErrorMessage(int line, int column,
+                                           const std::string& description) {
+  if (line || column) {
+    return StringPrintf("Line: %i, column: %i, %s",
+        line, column, description.c_str());
+  }
+  return description;
+}
+
+}  // namespace internal
+}  // namespace base
diff --git a/base/json/json_parser.h b/base/json/json_parser.h
new file mode 100644
index 0000000..a4dd2ba
--- /dev/null
+++ b/base/json/json_parser.h
@@ -0,0 +1,260 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_JSON_JSON_PARSER_H_
+#define BASE_JSON_JSON_PARSER_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <memory>
+#include <string>
+
+#include "base/base_export.h"
+#include "base/compiler_specific.h"
+#include "base/gtest_prod_util.h"
+#include "base/json/json_reader.h"
+#include "base/macros.h"
+#include "base/optional.h"
+#include "base/strings/string_piece.h"
+
+namespace base {
+
+class Value;
+
+namespace internal {
+
+class JSONParserTest;
+
+// The implementation behind the JSONReader interface. This class is not meant
+// to be used directly; it encapsulates logic that need not be exposed publicly.
+//
+// This parser guarantees O(n) time through the input string. Iteration happens
+// on the byte level, with the functions ConsumeChars() and ConsumeChar(). The
+// conversion from byte to JSON token happens without advancing the parser in
+// GetNextToken/ParseToken, that is tokenization operates on the current parser
+// position without advancing.
+//
+// Built on top of these are a family of Consume functions that iterate
+// internally. Invariant: on entry of a Consume function, the parser is wound
+// to the first byte of a valid JSON token. On exit, it is on the first byte
+// after the token that was just consumed, which would likely be the first byte
+// of the next token.
+class BASE_EXPORT JSONParser {
+ public:
+  JSONParser(int options, int max_depth = JSONReader::kStackMaxDepth);
+  ~JSONParser();
+
+  // Parses the input string according to the set options and returns the
+  // result as a Value.
+  // Wrap this in base::FooValue::From() to check the Value is of type Foo and
+  // convert to a FooValue at the same time.
+  Optional<Value> Parse(StringPiece input);
+
+  // Returns the error code.
+  JSONReader::JsonParseError error_code() const;
+
+  // Returns the human-friendly error message.
+  std::string GetErrorMessage() const;
+
+  // Returns the error line number if parse error happened. Otherwise always
+  // returns 0.
+  int error_line() const;
+
+  // Returns the error column number if parse error happened. Otherwise always
+  // returns 0.
+  int error_column() const;
+
+ private:
+  enum Token {
+    T_OBJECT_BEGIN,           // {
+    T_OBJECT_END,             // }
+    T_ARRAY_BEGIN,            // [
+    T_ARRAY_END,              // ]
+    T_STRING,
+    T_NUMBER,
+    T_BOOL_TRUE,              // true
+    T_BOOL_FALSE,             // false
+    T_NULL,                   // null
+    T_LIST_SEPARATOR,         // ,
+    T_OBJECT_PAIR_SEPARATOR,  // :
+    T_END_OF_INPUT,
+    T_INVALID_TOKEN,
+  };
+
+  // A helper class used for parsing strings. One optimization performed is to
+  // create base::Value with a StringPiece to avoid unnecessary std::string
+  // copies. This is not possible if the input string needs to be decoded from
+  // UTF-16 to UTF-8, or if an escape sequence causes characters to be skipped.
+  // This class centralizes that logic.
+  class StringBuilder {
+   public:
+    // Empty constructor. Used for creating a builder with which to assign to.
+    StringBuilder();
+
+    // |pos| is the beginning of an input string, excluding the |"|.
+    explicit StringBuilder(const char* pos);
+
+    ~StringBuilder();
+
+    StringBuilder& operator=(StringBuilder&& other);
+
+    // Appends the Unicode code point |point| to the string, either by
+    // increasing the |length_| of the string if the string has not been
+    // converted, or by appending the UTF8 bytes for the code point.
+    void Append(uint32_t point);
+
+    // Converts the builder from its default StringPiece to a full std::string,
+    // performing a copy. Once a builder is converted, it cannot be made a
+    // StringPiece again.
+    void Convert();
+
+    // Returns the builder as a string, invalidating all state. This allows
+    // the internal string buffer representation to be destructively moved
+    // in cases where the builder will not be needed any more.
+    std::string DestructiveAsString();
+
+   private:
+    // The beginning of the input string.
+    const char* pos_;
+
+    // Number of bytes in |pos_| that make up the string being built.
+    size_t length_;
+
+    // The copied string representation. Will be unset until Convert() is
+    // called.
+    base::Optional<std::string> string_;
+  };
+
+  // Returns the next |count| bytes of the input stream, or nullopt if fewer
+  // than |count| bytes remain.
+  Optional<StringPiece> PeekChars(int count);
+
+  // Calls PeekChars() with a |count| of 1.
+  Optional<char> PeekChar();
+
+  // Returns the next |count| bytes of the input stream, or nullopt if fewer
+  // than |count| bytes remain, and advances the parser position by |count|.
+  Optional<StringPiece> ConsumeChars(int count);
+
+  // Calls ConsumeChars() with a |count| of 1.
+  Optional<char> ConsumeChar();
+
+  // Returns a pointer to the current character position.
+  const char* pos();
+
+  // Skips over whitespace and comments to find the next token in the stream.
+  // This does not advance the parser for non-whitespace or comment chars.
+  Token GetNextToken();
+
+  // Consumes whitespace characters and comments until the next non-that is
+  // encountered.
+  void EatWhitespaceAndComments();
+  // Helper function that consumes a comment, assuming that the parser is
+  // currently wound to a '/'.
+  bool EatComment();
+
+  // Calls GetNextToken() and then ParseToken().
+  Optional<Value> ParseNextToken();
+
+  // Takes a token that represents the start of a Value ("a structural token"
+  // in RFC terms) and consumes it, returning the result as a Value.
+  Optional<Value> ParseToken(Token token);
+
+  // Assuming that the parser is currently wound to '{', this parses a JSON
+  // object into a Value.
+  Optional<Value> ConsumeDictionary();
+
+  // Assuming that the parser is wound to '[', this parses a JSON list into a
+  // Value.
+  Optional<Value> ConsumeList();
+
+  // Calls through ConsumeStringRaw and wraps it in a value.
+  Optional<Value> ConsumeString();
+
+  // Assuming that the parser is wound to a double quote, this parses a string,
+  // decoding any escape sequences and converts UTF-16 to UTF-8. Returns true on
+  // success and places result into |out|. Returns false on failure with
+  // error information set.
+  bool ConsumeStringRaw(StringBuilder* out);
+  // Helper function for ConsumeStringRaw() that consumes the next four or 10
+  // bytes (parser is wound to the first character of a HEX sequence, with the
+  // potential for consuming another \uXXXX for a surrogate). Returns true on
+  // success and places the code point |out_code_point|, and false on failure.
+  bool DecodeUTF16(uint32_t* out_code_point);
+
+  // Assuming that the parser is wound to the start of a valid JSON number,
+  // this parses and converts it to either an int or double value.
+  Optional<Value> ConsumeNumber();
+  // Helper that reads characters that are ints. Returns true if a number was
+  // read and false on error.
+  bool ReadInt(bool allow_leading_zeros);
+
+  // Consumes the literal values of |true|, |false|, and |null|, assuming the
+  // parser is wound to the first character of any of those.
+  Optional<Value> ConsumeLiteral();
+
+  // Helper function that returns true if the byte squence |match| can be
+  // consumed at the current parser position. Returns false if there are fewer
+  // than |match|-length bytes or if the sequence does not match, and the
+  // parser state is unchanged.
+  bool ConsumeIfMatch(StringPiece match);
+
+  // Sets the error information to |code| at the current column, based on
+  // |index_| and |index_last_line_|, with an optional positive/negative
+  // adjustment by |column_adjust|.
+  void ReportError(JSONReader::JsonParseError code, int column_adjust);
+
+  // Given the line and column number of an error, formats one of the error
+  // message contants from json_reader.h for human display.
+  static std::string FormatErrorMessage(int line, int column,
+                                        const std::string& description);
+
+  // base::JSONParserOptions that control parsing.
+  const int options_;
+
+  // Maximum depth to parse.
+  const int max_depth_;
+
+  // The input stream being parsed. Note: Not guaranteed to NUL-terminated.
+  StringPiece input_;
+
+  // The index in the input stream to which the parser is wound.
+  int index_;
+
+  // The number of times the parser has recursed (current stack depth).
+  int stack_depth_;
+
+  // The line number that the parser is at currently.
+  int line_number_;
+
+  // The last value of |index_| on the previous line.
+  int index_last_line_;
+
+  // Error information.
+  JSONReader::JsonParseError error_code_;
+  int error_line_;
+  int error_column_;
+
+  friend class JSONParserTest;
+  FRIEND_TEST_ALL_PREFIXES(JSONParserTest, NextChar);
+  FRIEND_TEST_ALL_PREFIXES(JSONParserTest, ConsumeDictionary);
+  FRIEND_TEST_ALL_PREFIXES(JSONParserTest, ConsumeList);
+  FRIEND_TEST_ALL_PREFIXES(JSONParserTest, ConsumeString);
+  FRIEND_TEST_ALL_PREFIXES(JSONParserTest, ConsumeLiterals);
+  FRIEND_TEST_ALL_PREFIXES(JSONParserTest, ConsumeNumbers);
+  FRIEND_TEST_ALL_PREFIXES(JSONParserTest, ErrorMessages);
+  FRIEND_TEST_ALL_PREFIXES(JSONParserTest, ReplaceInvalidCharacters);
+  FRIEND_TEST_ALL_PREFIXES(JSONParserTest, ReplaceInvalidUTF16EscapeSequence);
+
+  DISALLOW_COPY_AND_ASSIGN(JSONParser);
+};
+
+// Used when decoding and an invalid utf-8 sequence is encountered.
+BASE_EXPORT extern const char kUnicodeReplacementString[];
+
+}  // namespace internal
+}  // namespace base
+
+#endif  // BASE_JSON_JSON_PARSER_H_
diff --git a/base/json/json_parser_unittest.cc b/base/json/json_parser_unittest.cc
new file mode 100644
index 0000000..0da3db8
--- /dev/null
+++ b/base/json/json_parser_unittest.cc
@@ -0,0 +1,462 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/json/json_parser.h"
+
+#include <stddef.h>
+
+#include <memory>
+
+#include "base/json/json_reader.h"
+#include "base/memory/ptr_util.h"
+#include "base/optional.h"
+#include "base/strings/stringprintf.h"
+#include "base/values.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+namespace internal {
+
+class JSONParserTest : public testing::Test {
+ public:
+  JSONParser* NewTestParser(const std::string& input,
+                            int options = JSON_PARSE_RFC) {
+    JSONParser* parser = new JSONParser(options);
+    parser->input_ = input;
+    parser->index_ = 0;
+    return parser;
+  }
+
+  // MSan will do a better job detecting over-read errors if the input is
+  // not nul-terminated on the heap. This will copy |input| to a new buffer
+  // owned by |owner|, returning a StringPiece to |owner|.
+  StringPiece MakeNotNullTerminatedInput(const char* input,
+                                         std::unique_ptr<char[]>* owner) {
+    size_t str_len = strlen(input);
+    owner->reset(new char[str_len]);
+    memcpy(owner->get(), input, str_len);
+    return StringPiece(owner->get(), str_len);
+  }
+
+  void TestLastThree(JSONParser* parser) {
+    EXPECT_EQ(',', *parser->PeekChar());
+    parser->ConsumeChar();
+    EXPECT_EQ('|', *parser->PeekChar());
+    parser->ConsumeChar();
+    EXPECT_EQ('\0', *parser->pos());
+    EXPECT_EQ(static_cast<size_t>(parser->index_), parser->input_.length());
+  }
+};
+
+TEST_F(JSONParserTest, NextChar) {
+  std::string input("Hello world");
+  std::unique_ptr<JSONParser> parser(NewTestParser(input));
+
+  EXPECT_EQ('H', *parser->pos());
+  for (size_t i = 1; i < input.length(); ++i) {
+    parser->ConsumeChar();
+    EXPECT_EQ(input[i], *parser->PeekChar());
+  }
+  parser->ConsumeChar();
+  EXPECT_EQ('\0', *parser->pos());
+  EXPECT_EQ(static_cast<size_t>(parser->index_), parser->input_.length());
+}
+
+TEST_F(JSONParserTest, ConsumeString) {
+  std::string input("\"test\",|");
+  std::unique_ptr<JSONParser> parser(NewTestParser(input));
+  Optional<Value> value(parser->ConsumeString());
+  EXPECT_EQ(',', *parser->pos());
+
+  TestLastThree(parser.get());
+
+  ASSERT_TRUE(value);
+  std::string str;
+  EXPECT_TRUE(value->GetAsString(&str));
+  EXPECT_EQ("test", str);
+}
+
+TEST_F(JSONParserTest, ConsumeList) {
+  std::string input("[true, false],|");
+  std::unique_ptr<JSONParser> parser(NewTestParser(input));
+  Optional<Value> value(parser->ConsumeList());
+  EXPECT_EQ(',', *parser->pos());
+
+  TestLastThree(parser.get());
+
+  ASSERT_TRUE(value);
+  base::ListValue* list;
+  EXPECT_TRUE(value->GetAsList(&list));
+  EXPECT_EQ(2u, list->GetSize());
+}
+
+TEST_F(JSONParserTest, ConsumeDictionary) {
+  std::string input("{\"abc\":\"def\"},|");
+  std::unique_ptr<JSONParser> parser(NewTestParser(input));
+  Optional<Value> value(parser->ConsumeDictionary());
+  EXPECT_EQ(',', *parser->pos());
+
+  TestLastThree(parser.get());
+
+  ASSERT_TRUE(value);
+  base::DictionaryValue* dict;
+  EXPECT_TRUE(value->GetAsDictionary(&dict));
+  std::string str;
+  EXPECT_TRUE(dict->GetString("abc", &str));
+  EXPECT_EQ("def", str);
+}
+
+TEST_F(JSONParserTest, ConsumeLiterals) {
+  // Literal |true|.
+  std::string input("true,|");
+  std::unique_ptr<JSONParser> parser(NewTestParser(input));
+  Optional<Value> value(parser->ConsumeLiteral());
+  EXPECT_EQ(',', *parser->pos());
+
+  TestLastThree(parser.get());
+
+  ASSERT_TRUE(value);
+  bool bool_value = false;
+  EXPECT_TRUE(value->GetAsBoolean(&bool_value));
+  EXPECT_TRUE(bool_value);
+
+  // Literal |false|.
+  input = "false,|";
+  parser.reset(NewTestParser(input));
+  value = parser->ConsumeLiteral();
+  EXPECT_EQ(',', *parser->pos());
+
+  TestLastThree(parser.get());
+
+  ASSERT_TRUE(value);
+  EXPECT_TRUE(value->GetAsBoolean(&bool_value));
+  EXPECT_FALSE(bool_value);
+
+  // Literal |null|.
+  input = "null,|";
+  parser.reset(NewTestParser(input));
+  value = parser->ConsumeLiteral();
+  EXPECT_EQ(',', *parser->pos());
+
+  TestLastThree(parser.get());
+
+  ASSERT_TRUE(value);
+  EXPECT_TRUE(value->is_none());
+}
+
+TEST_F(JSONParserTest, ConsumeNumbers) {
+  // Integer.
+  std::string input("1234,|");
+  std::unique_ptr<JSONParser> parser(NewTestParser(input));
+  Optional<Value> value(parser->ConsumeNumber());
+  EXPECT_EQ(',', *parser->pos());
+
+  TestLastThree(parser.get());
+
+  ASSERT_TRUE(value);
+  int number_i;
+  EXPECT_TRUE(value->GetAsInteger(&number_i));
+  EXPECT_EQ(1234, number_i);
+
+  // Negative integer.
+  input = "-1234,|";
+  parser.reset(NewTestParser(input));
+  value = parser->ConsumeNumber();
+  EXPECT_EQ(',', *parser->pos());
+
+  TestLastThree(parser.get());
+
+  ASSERT_TRUE(value);
+  EXPECT_TRUE(value->GetAsInteger(&number_i));
+  EXPECT_EQ(-1234, number_i);
+
+  // Double.
+  input = "12.34,|";
+  parser.reset(NewTestParser(input));
+  value = parser->ConsumeNumber();
+  EXPECT_EQ(',', *parser->pos());
+
+  TestLastThree(parser.get());
+
+  ASSERT_TRUE(value);
+  double number_d;
+  EXPECT_TRUE(value->GetAsDouble(&number_d));
+  EXPECT_EQ(12.34, number_d);
+
+  // Scientific.
+  input = "42e3,|";
+  parser.reset(NewTestParser(input));
+  value = parser->ConsumeNumber();
+  EXPECT_EQ(',', *parser->pos());
+
+  TestLastThree(parser.get());
+
+  ASSERT_TRUE(value);
+  EXPECT_TRUE(value->GetAsDouble(&number_d));
+  EXPECT_EQ(42000, number_d);
+
+  // Negative scientific.
+  input = "314159e-5,|";
+  parser.reset(NewTestParser(input));
+  value = parser->ConsumeNumber();
+  EXPECT_EQ(',', *parser->pos());
+
+  TestLastThree(parser.get());
+
+  ASSERT_TRUE(value);
+  EXPECT_TRUE(value->GetAsDouble(&number_d));
+  EXPECT_EQ(3.14159, number_d);
+
+  // Positive scientific.
+  input = "0.42e+3,|";
+  parser.reset(NewTestParser(input));
+  value = parser->ConsumeNumber();
+  EXPECT_EQ(',', *parser->pos());
+
+  TestLastThree(parser.get());
+
+  ASSERT_TRUE(value);
+  EXPECT_TRUE(value->GetAsDouble(&number_d));
+  EXPECT_EQ(420, number_d);
+}
+
+TEST_F(JSONParserTest, ErrorMessages) {
+  // Error strings should not be modified in case of success.
+  std::string error_message;
+  int error_code = 0;
+  std::unique_ptr<Value> root = JSONReader::ReadAndReturnError(
+      "[42]", JSON_PARSE_RFC, &error_code, &error_message);
+  EXPECT_TRUE(error_message.empty());
+  EXPECT_EQ(0, error_code);
+
+  // Test line and column counting
+  const char big_json[] = "[\n0,\n1,\n2,\n3,4,5,6 7,\n8,\n9\n]";
+  // error here ----------------------------------^
+  root = JSONReader::ReadAndReturnError(big_json, JSON_PARSE_RFC, &error_code,
+                                        &error_message);
+  EXPECT_FALSE(root.get());
+  EXPECT_EQ(JSONParser::FormatErrorMessage(5, 10, JSONReader::kSyntaxError),
+            error_message);
+  EXPECT_EQ(JSONReader::JSON_SYNTAX_ERROR, error_code);
+
+  error_code = 0;
+  error_message = "";
+  // Test line and column counting with "\r\n" line ending
+  const char big_json_crlf[] =
+      "[\r\n0,\r\n1,\r\n2,\r\n3,4,5,6 7,\r\n8,\r\n9\r\n]";
+  // error here ----------------------^
+  root = JSONReader::ReadAndReturnError(big_json_crlf, JSON_PARSE_RFC,
+                                        &error_code, &error_message);
+  EXPECT_FALSE(root.get());
+  EXPECT_EQ(JSONParser::FormatErrorMessage(5, 10, JSONReader::kSyntaxError),
+            error_message);
+  EXPECT_EQ(JSONReader::JSON_SYNTAX_ERROR, error_code);
+
+  // Test each of the error conditions
+  root = JSONReader::ReadAndReturnError("{},{}", JSON_PARSE_RFC, &error_code,
+                                        &error_message);
+  EXPECT_FALSE(root.get());
+  EXPECT_EQ(JSONParser::FormatErrorMessage(1, 3,
+      JSONReader::kUnexpectedDataAfterRoot), error_message);
+  EXPECT_EQ(JSONReader::JSON_UNEXPECTED_DATA_AFTER_ROOT, error_code);
+
+  std::string nested_json;
+  for (int i = 0; i < 201; ++i) {
+    nested_json.insert(nested_json.begin(), '[');
+    nested_json.append(1, ']');
+  }
+  root = JSONReader::ReadAndReturnError(nested_json, JSON_PARSE_RFC,
+                                        &error_code, &error_message);
+  EXPECT_FALSE(root.get());
+  EXPECT_EQ(JSONParser::FormatErrorMessage(1, 200, JSONReader::kTooMuchNesting),
+            error_message);
+  EXPECT_EQ(JSONReader::JSON_TOO_MUCH_NESTING, error_code);
+
+  root = JSONReader::ReadAndReturnError("[1,]", JSON_PARSE_RFC, &error_code,
+                                        &error_message);
+  EXPECT_FALSE(root.get());
+  EXPECT_EQ(JSONParser::FormatErrorMessage(1, 4, JSONReader::kTrailingComma),
+            error_message);
+  EXPECT_EQ(JSONReader::JSON_TRAILING_COMMA, error_code);
+
+  root = JSONReader::ReadAndReturnError("{foo:\"bar\"}", JSON_PARSE_RFC,
+                                        &error_code, &error_message);
+  EXPECT_FALSE(root.get());
+  EXPECT_EQ(JSONParser::FormatErrorMessage(1, 2,
+      JSONReader::kUnquotedDictionaryKey), error_message);
+  EXPECT_EQ(JSONReader::JSON_UNQUOTED_DICTIONARY_KEY, error_code);
+
+  root = JSONReader::ReadAndReturnError("{\"foo\":\"bar\",}", JSON_PARSE_RFC,
+                                        &error_code, &error_message);
+  EXPECT_FALSE(root.get());
+  EXPECT_EQ(JSONParser::FormatErrorMessage(1, 14, JSONReader::kTrailingComma),
+            error_message);
+
+  root = JSONReader::ReadAndReturnError("[nu]", JSON_PARSE_RFC, &error_code,
+                                        &error_message);
+  EXPECT_FALSE(root.get());
+  EXPECT_EQ(JSONParser::FormatErrorMessage(1, 2, JSONReader::kSyntaxError),
+            error_message);
+  EXPECT_EQ(JSONReader::JSON_SYNTAX_ERROR, error_code);
+
+  root = JSONReader::ReadAndReturnError("[\"xxx\\xq\"]", JSON_PARSE_RFC,
+                                        &error_code, &error_message);
+  EXPECT_FALSE(root.get());
+  EXPECT_EQ(JSONParser::FormatErrorMessage(1, 7, JSONReader::kInvalidEscape),
+            error_message);
+  EXPECT_EQ(JSONReader::JSON_INVALID_ESCAPE, error_code);
+
+  root = JSONReader::ReadAndReturnError("[\"xxx\\uq\"]", JSON_PARSE_RFC,
+                                        &error_code, &error_message);
+  EXPECT_FALSE(root.get());
+  EXPECT_EQ(JSONParser::FormatErrorMessage(1, 7, JSONReader::kInvalidEscape),
+            error_message);
+  EXPECT_EQ(JSONReader::JSON_INVALID_ESCAPE, error_code);
+
+  root = JSONReader::ReadAndReturnError("[\"xxx\\q\"]", JSON_PARSE_RFC,
+                                        &error_code, &error_message);
+  EXPECT_FALSE(root.get());
+  EXPECT_EQ(JSONParser::FormatErrorMessage(1, 7, JSONReader::kInvalidEscape),
+            error_message);
+  EXPECT_EQ(JSONReader::JSON_INVALID_ESCAPE, error_code);
+
+  root = JSONReader::ReadAndReturnError(("[\"\\ufffe\"]"), JSON_PARSE_RFC,
+                                        &error_code, &error_message);
+  EXPECT_EQ(JSONParser::FormatErrorMessage(1, 8, JSONReader::kInvalidEscape),
+            error_message);
+  EXPECT_EQ(JSONReader::JSON_INVALID_ESCAPE, error_code);
+}
+
+TEST_F(JSONParserTest, Decode4ByteUtf8Char) {
+  // This test strings contains a 4 byte unicode character (a smiley!) that the
+  // reader should be able to handle (the character is \xf0\x9f\x98\x87).
+  const char kUtf8Data[] =
+      "[\"😇\",[],[],[],{\"google:suggesttype\":[]}]";
+  std::string error_message;
+  int error_code = 0;
+  std::unique_ptr<Value> root = JSONReader::ReadAndReturnError(
+      kUtf8Data, JSON_PARSE_RFC, &error_code, &error_message);
+  EXPECT_TRUE(root.get()) << error_message;
+}
+
+TEST_F(JSONParserTest, DecodeUnicodeNonCharacter) {
+  // Tests Unicode code points (encoded as escaped UTF-16) that are not valid
+  // characters.
+  EXPECT_FALSE(JSONReader::Read("[\"\\ufdd0\"]"));
+  EXPECT_FALSE(JSONReader::Read("[\"\\ufffe\"]"));
+  EXPECT_FALSE(JSONReader::Read("[\"\\ud83f\\udffe\"]"));
+
+  EXPECT_TRUE(
+      JSONReader::Read("[\"\\ufdd0\"]", JSON_REPLACE_INVALID_CHARACTERS));
+  EXPECT_TRUE(
+      JSONReader::Read("[\"\\ufffe\"]", JSON_REPLACE_INVALID_CHARACTERS));
+}
+
+TEST_F(JSONParserTest, DecodeNegativeEscapeSequence) {
+  EXPECT_FALSE(JSONReader::Read("[\"\\x-A\"]"));
+  EXPECT_FALSE(JSONReader::Read("[\"\\u-00A\"]"));
+}
+
+// Verifies invalid utf-8 characters are replaced.
+TEST_F(JSONParserTest, ReplaceInvalidCharacters) {
+  const std::string bogus_char = "󿿿";
+  const std::string quoted_bogus_char = "\"" + bogus_char + "\"";
+  std::unique_ptr<JSONParser> parser(
+      NewTestParser(quoted_bogus_char, JSON_REPLACE_INVALID_CHARACTERS));
+  Optional<Value> value(parser->ConsumeString());
+  ASSERT_TRUE(value);
+  std::string str;
+  EXPECT_TRUE(value->GetAsString(&str));
+  EXPECT_EQ(kUnicodeReplacementString, str);
+}
+
+TEST_F(JSONParserTest, ReplaceInvalidUTF16EscapeSequence) {
+  const std::string invalid = "\"\\ufffe\"";
+  std::unique_ptr<JSONParser> parser(
+      NewTestParser(invalid, JSON_REPLACE_INVALID_CHARACTERS));
+  Optional<Value> value(parser->ConsumeString());
+  ASSERT_TRUE(value);
+  std::string str;
+  EXPECT_TRUE(value->GetAsString(&str));
+  EXPECT_EQ(kUnicodeReplacementString, str);
+}
+
+TEST_F(JSONParserTest, ParseNumberErrors) {
+  const struct {
+    const char* input;
+    bool parse_success;
+    double value;
+  } kCases[] = {
+      // clang-format off
+      {"1", true, 1},
+      {"2.", false, 0},
+      {"42", true, 42},
+      {"6e", false, 0},
+      {"43e2", true, 4300},
+      {"43e-", false, 0},
+      {"9e-3", true, 0.009},
+      {"2e+", false, 0},
+      {"2e+2", true, 200},
+      // clang-format on
+  };
+
+  for (unsigned int i = 0; i < arraysize(kCases); ++i) {
+    auto test_case = kCases[i];
+    SCOPED_TRACE(StringPrintf("case %u: \"%s\"", i, test_case.input));
+
+    std::unique_ptr<char[]> input_owner;
+    StringPiece input =
+        MakeNotNullTerminatedInput(test_case.input, &input_owner);
+
+    std::unique_ptr<Value> result = JSONReader::Read(input);
+    if (test_case.parse_success) {
+      EXPECT_TRUE(result);
+    } else {
+      EXPECT_FALSE(result);
+    }
+
+    if (!result)
+      continue;
+
+    double double_value = 0;
+    EXPECT_TRUE(result->GetAsDouble(&double_value));
+    EXPECT_EQ(test_case.value, double_value);
+  }
+}
+
+TEST_F(JSONParserTest, UnterminatedInputs) {
+  const char* kCases[] = {
+      // clang-format off
+      "/",
+      "//",
+      "/*",
+      "\"xxxxxx",
+      "\"",
+      "{   ",
+      "[\t",
+      "tru",
+      "fals",
+      "nul",
+      "\"\\x",
+      "\"\\x2",
+      "\"\\u123",
+      "\"\\uD803\\u",
+      "\"\\",
+      "\"\\/",
+      // clang-format on
+  };
+
+  for (unsigned int i = 0; i < arraysize(kCases); ++i) {
+    auto* test_case = kCases[i];
+    SCOPED_TRACE(StringPrintf("case %u: \"%s\"", i, test_case));
+
+    std::unique_ptr<char[]> input_owner;
+    StringPiece input = MakeNotNullTerminatedInput(test_case, &input_owner);
+
+    EXPECT_FALSE(JSONReader::Read(input));
+  }
+}
+
+}  // namespace internal
+}  // namespace base
diff --git a/base/json/json_perftest.cc b/base/json/json_perftest.cc
new file mode 100644
index 0000000..fc05bdc
--- /dev/null
+++ b/base/json/json_perftest.cc
@@ -0,0 +1,84 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/json/json_reader.h"
+#include "base/json/json_writer.h"
+#include "base/memory/ptr_util.h"
+#include "base/time/time.h"
+#include "base/values.h"
+#include "testing/gtest/include/gtest/gtest.h"
+#include "testing/perf/perf_test.h"
+
+namespace base {
+
+namespace {
+// Generates a simple dictionary value with simple data types, a string and a
+// list.
+std::unique_ptr<DictionaryValue> GenerateDict() {
+  auto root = std::make_unique<DictionaryValue>();
+  root->SetDouble("Double", 3.141);
+  root->SetBoolean("Bool", true);
+  root->SetInteger("Int", 42);
+  root->SetString("String", "Foo");
+
+  auto list = std::make_unique<ListValue>();
+  list->Set(0, std::make_unique<Value>(2.718));
+  list->Set(1, std::make_unique<Value>(false));
+  list->Set(2, std::make_unique<Value>(123));
+  list->Set(3, std::make_unique<Value>("Bar"));
+  root->Set("List", std::move(list));
+
+  return root;
+}
+
+// Generates a tree-like dictionary value with a size of O(breadth ** depth).
+std::unique_ptr<DictionaryValue> GenerateLayeredDict(int breadth, int depth) {
+  if (depth == 1)
+    return GenerateDict();
+
+  auto root = GenerateDict();
+  auto next = GenerateLayeredDict(breadth, depth - 1);
+
+  for (int i = 0; i < breadth; ++i) {
+    root->Set("Dict" + std::to_string(i), next->CreateDeepCopy());
+  }
+
+  return root;
+}
+
+}  // namespace
+
+class JSONPerfTest : public testing::Test {
+ public:
+  void TestWriteAndRead(int breadth, int depth) {
+    std::string description = "Breadth: " + std::to_string(breadth) +
+                              ", Depth: " + std::to_string(depth);
+    auto dict = GenerateLayeredDict(breadth, depth);
+    std::string json;
+
+    TimeTicks start_write = TimeTicks::Now();
+    JSONWriter::Write(*dict, &json);
+    TimeTicks end_write = TimeTicks::Now();
+    perf_test::PrintResult("Write", "", description,
+                           (end_write - start_write).InMillisecondsF(), "ms",
+                           true);
+
+    TimeTicks start_read = TimeTicks::Now();
+    JSONReader::Read(json);
+    TimeTicks end_read = TimeTicks::Now();
+    perf_test::PrintResult("Read", "", description,
+                           (end_read - start_read).InMillisecondsF(), "ms",
+                           true);
+  }
+};
+
+TEST_F(JSONPerfTest, StressTest) {
+  for (int i = 0; i < 4; ++i) {
+    for (int j = 0; j < 12; ++j) {
+      TestWriteAndRead(i + 1, j + 1);
+    }
+  }
+}
+
+}  // namespace base
diff --git a/base/json/json_reader.cc b/base/json/json_reader.cc
new file mode 100644
index 0000000..bf2a18a
--- /dev/null
+++ b/base/json/json_reader.cc
@@ -0,0 +1,126 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/json/json_reader.h"
+
+#include <utility>
+#include <vector>
+
+#include "base/json/json_parser.h"
+#include "base/logging.h"
+#include "base/optional.h"
+#include "base/values.h"
+
+namespace base {
+
+// Chosen to support 99.9% of documents found in the wild late 2016.
+// http://crbug.com/673263
+const int JSONReader::kStackMaxDepth = 200;
+
+// Values 1000 and above are used by JSONFileValueSerializer::JsonFileError.
+static_assert(JSONReader::JSON_PARSE_ERROR_COUNT < 1000,
+              "JSONReader error out of bounds");
+
+const char JSONReader::kInvalidEscape[] =
+    "Invalid escape sequence.";
+const char JSONReader::kSyntaxError[] =
+    "Syntax error.";
+const char JSONReader::kUnexpectedToken[] =
+    "Unexpected token.";
+const char JSONReader::kTrailingComma[] =
+    "Trailing comma not allowed.";
+const char JSONReader::kTooMuchNesting[] =
+    "Too much nesting.";
+const char JSONReader::kUnexpectedDataAfterRoot[] =
+    "Unexpected data after root element.";
+const char JSONReader::kUnsupportedEncoding[] =
+    "Unsupported encoding. JSON must be UTF-8.";
+const char JSONReader::kUnquotedDictionaryKey[] =
+    "Dictionary keys must be quoted.";
+const char JSONReader::kInputTooLarge[] =
+    "Input string is too large (>2GB).";
+
+JSONReader::JSONReader(int options, int max_depth)
+    : parser_(new internal::JSONParser(options, max_depth)) {}
+
+JSONReader::~JSONReader() = default;
+
+// static
+std::unique_ptr<Value> JSONReader::Read(StringPiece json,
+                                        int options,
+                                        int max_depth) {
+  internal::JSONParser parser(options, max_depth);
+  Optional<Value> root = parser.Parse(json);
+  return root ? std::make_unique<Value>(std::move(*root)) : nullptr;
+}
+
+
+// static
+std::unique_ptr<Value> JSONReader::ReadAndReturnError(
+    StringPiece json,
+    int options,
+    int* error_code_out,
+    std::string* error_msg_out,
+    int* error_line_out,
+    int* error_column_out) {
+  internal::JSONParser parser(options);
+  Optional<Value> root = parser.Parse(json);
+  if (!root) {
+    if (error_code_out)
+      *error_code_out = parser.error_code();
+    if (error_msg_out)
+      *error_msg_out = parser.GetErrorMessage();
+    if (error_line_out)
+      *error_line_out = parser.error_line();
+    if (error_column_out)
+      *error_column_out = parser.error_column();
+  }
+
+  return root ? std::make_unique<Value>(std::move(*root)) : nullptr;
+}
+
+// static
+std::string JSONReader::ErrorCodeToString(JsonParseError error_code) {
+  switch (error_code) {
+    case JSON_NO_ERROR:
+      return std::string();
+    case JSON_INVALID_ESCAPE:
+      return kInvalidEscape;
+    case JSON_SYNTAX_ERROR:
+      return kSyntaxError;
+    case JSON_UNEXPECTED_TOKEN:
+      return kUnexpectedToken;
+    case JSON_TRAILING_COMMA:
+      return kTrailingComma;
+    case JSON_TOO_MUCH_NESTING:
+      return kTooMuchNesting;
+    case JSON_UNEXPECTED_DATA_AFTER_ROOT:
+      return kUnexpectedDataAfterRoot;
+    case JSON_UNSUPPORTED_ENCODING:
+      return kUnsupportedEncoding;
+    case JSON_UNQUOTED_DICTIONARY_KEY:
+      return kUnquotedDictionaryKey;
+    case JSON_TOO_LARGE:
+      return kInputTooLarge;
+    case JSON_PARSE_ERROR_COUNT:
+      break;
+  }
+  NOTREACHED();
+  return std::string();
+}
+
+std::unique_ptr<Value> JSONReader::ReadToValue(StringPiece json) {
+  Optional<Value> value = parser_->Parse(json);
+  return value ? std::make_unique<Value>(std::move(*value)) : nullptr;
+}
+
+JSONReader::JsonParseError JSONReader::error_code() const {
+  return parser_->error_code();
+}
+
+std::string JSONReader::GetErrorMessage() const {
+  return parser_->GetErrorMessage();
+}
+
+}  // namespace base
diff --git a/base/json/json_reader.h b/base/json/json_reader.h
new file mode 100644
index 0000000..2c6bd3e
--- /dev/null
+++ b/base/json/json_reader.h
@@ -0,0 +1,135 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// A JSON parser.  Converts strings of JSON into a Value object (see
+// base/values.h).
+// http://www.ietf.org/rfc/rfc4627.txt?number=4627
+//
+// Known limitations/deviations from the RFC:
+// - Only knows how to parse ints within the range of a signed 32 bit int and
+//   decimal numbers within a double.
+// - Assumes input is encoded as UTF8.  The spec says we should allow UTF-16
+//   (BE or LE) and UTF-32 (BE or LE) as well.
+// - We limit nesting to 100 levels to prevent stack overflow (this is allowed
+//   by the RFC).
+// - A Unicode FAQ ("http://unicode.org/faq/utf_bom.html") writes a data
+//   stream may start with a Unicode Byte-Order-Mark (U+FEFF), i.e. the input
+//   UTF-8 string for the JSONReader::JsonToValue() function may start with a
+//   UTF-8 BOM (0xEF, 0xBB, 0xBF).
+//   To avoid the function from mis-treating a UTF-8 BOM as an invalid
+//   character, the function skips a Unicode BOM at the beginning of the
+//   Unicode string (converted from the input UTF-8 string) before parsing it.
+//
+// TODO(tc): Add a parsing option to to relax object keys being wrapped in
+//   double quotes
+// TODO(tc): Add an option to disable comment stripping
+
+#ifndef BASE_JSON_JSON_READER_H_
+#define BASE_JSON_JSON_READER_H_
+
+#include <memory>
+#include <string>
+
+#include "base/base_export.h"
+#include "base/strings/string_piece.h"
+
+namespace base {
+
+class Value;
+
+namespace internal {
+class JSONParser;
+}
+
+enum JSONParserOptions {
+  // Parses the input strictly according to RFC 4627, except for where noted
+  // above.
+  JSON_PARSE_RFC = 0,
+
+  // Allows commas to exist after the last element in structures.
+  JSON_ALLOW_TRAILING_COMMAS = 1 << 0,
+
+  // If set the parser replaces invalid characters with the Unicode replacement
+  // character (U+FFFD). If not set, invalid characters trigger a hard error and
+  // parsing fails.
+  JSON_REPLACE_INVALID_CHARACTERS = 1 << 1,
+};
+
+class BASE_EXPORT JSONReader {
+ public:
+  static const int kStackMaxDepth;
+
+  // Error codes during parsing.
+  enum JsonParseError {
+    JSON_NO_ERROR = 0,
+    JSON_INVALID_ESCAPE,
+    JSON_SYNTAX_ERROR,
+    JSON_UNEXPECTED_TOKEN,
+    JSON_TRAILING_COMMA,
+    JSON_TOO_MUCH_NESTING,
+    JSON_UNEXPECTED_DATA_AFTER_ROOT,
+    JSON_UNSUPPORTED_ENCODING,
+    JSON_UNQUOTED_DICTIONARY_KEY,
+    JSON_TOO_LARGE,
+    JSON_PARSE_ERROR_COUNT
+  };
+
+  // String versions of parse error codes.
+  static const char kInvalidEscape[];
+  static const char kSyntaxError[];
+  static const char kUnexpectedToken[];
+  static const char kTrailingComma[];
+  static const char kTooMuchNesting[];
+  static const char kUnexpectedDataAfterRoot[];
+  static const char kUnsupportedEncoding[];
+  static const char kUnquotedDictionaryKey[];
+  static const char kInputTooLarge[];
+
+  // Constructs a reader.
+  JSONReader(int options = JSON_PARSE_RFC, int max_depth = kStackMaxDepth);
+
+  ~JSONReader();
+
+  // Reads and parses |json|, returning a Value.
+  // If |json| is not a properly formed JSON string, returns nullptr.
+  // Wrap this in base::FooValue::From() to check the Value is of type Foo and
+  // convert to a FooValue at the same time.
+  static std::unique_ptr<Value> Read(StringPiece json,
+                                     int options = JSON_PARSE_RFC,
+                                     int max_depth = kStackMaxDepth);
+
+  // Reads and parses |json| like Read(). |error_code_out| and |error_msg_out|
+  // are optional. If specified and nullptr is returned, they will be populated
+  // an error code and a formatted error message (including error location if
+  // appropriate). Otherwise, they will be unmodified.
+  static std::unique_ptr<Value> ReadAndReturnError(
+      StringPiece json,
+      int options,  // JSONParserOptions
+      int* error_code_out,
+      std::string* error_msg_out,
+      int* error_line_out = nullptr,
+      int* error_column_out = nullptr);
+
+  // Converts a JSON parse error code into a human readable message.
+  // Returns an empty string if error_code is JSON_NO_ERROR.
+  static std::string ErrorCodeToString(JsonParseError error_code);
+
+  // Non-static version of Read() above.
+  std::unique_ptr<Value> ReadToValue(StringPiece json);
+
+  // Returns the error code if the last call to ReadToValue() failed.
+  // Returns JSON_NO_ERROR otherwise.
+  JsonParseError error_code() const;
+
+  // Converts error_code_ to a human-readable string, including line and column
+  // numbers if appropriate.
+  std::string GetErrorMessage() const;
+
+ private:
+  std::unique_ptr<internal::JSONParser> parser_;
+};
+
+}  // namespace base
+
+#endif  // BASE_JSON_JSON_READER_H_
diff --git a/base/json/json_reader_fuzzer.cc b/base/json/json_reader_fuzzer.cc
new file mode 100644
index 0000000..a8490da
--- /dev/null
+++ b/base/json/json_reader_fuzzer.cc
@@ -0,0 +1,29 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/json/json_reader.h"
+#include "base/values.h"
+
+int error_code, error_line, error_column;
+std::string error_message;
+
+// Entry point for LibFuzzer.
+extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) {
+  if (size < 2)
+    return 0;
+
+  // Create a copy of input buffer, as otherwise we don't catch
+  // overflow that touches the last byte (which is used in options).
+  std::unique_ptr<char[]> input(new char[size - 1]);
+  memcpy(input.get(), data, size - 1);
+
+  base::StringPiece input_string(input.get(), size - 1);
+
+  const int options = data[size - 1];
+  base::JSONReader::ReadAndReturnError(input_string, options, &error_code,
+                                       &error_message, &error_line,
+                                       &error_column);
+
+  return 0;
+}
diff --git a/base/json/json_reader_unittest.cc b/base/json/json_reader_unittest.cc
new file mode 100644
index 0000000..faaf43e
--- /dev/null
+++ b/base/json/json_reader_unittest.cc
@@ -0,0 +1,665 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/json/json_reader.h"
+
+#include <stddef.h>
+
+#include <memory>
+
+#include "base/base_paths.h"
+#include "base/files/file_util.h"
+#include "base/logging.h"
+#include "base/macros.h"
+#include "base/path_service.h"
+#include "base/strings/string_piece.h"
+#include "base/strings/utf_string_conversions.h"
+#include "base/values.h"
+#include "build/build_config.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+
+TEST(JSONReaderTest, Whitespace) {
+  std::unique_ptr<Value> root = JSONReader().ReadToValue("   null   ");
+  ASSERT_TRUE(root);
+  EXPECT_TRUE(root->is_none());
+}
+
+TEST(JSONReaderTest, InvalidString) {
+  EXPECT_FALSE(JSONReader().ReadToValue("nu"));
+}
+
+TEST(JSONReaderTest, SimpleBool) {
+  std::unique_ptr<Value> root = JSONReader().ReadToValue("true  ");
+  ASSERT_TRUE(root);
+  EXPECT_TRUE(root->is_bool());
+}
+
+TEST(JSONReaderTest, EmbeddedComments) {
+  std::unique_ptr<Value> root = JSONReader().ReadToValue("/* comment */null");
+  ASSERT_TRUE(root);
+  EXPECT_TRUE(root->is_none());
+  root = JSONReader().ReadToValue("40 /* comment */");
+  ASSERT_TRUE(root);
+  EXPECT_TRUE(root->is_int());
+  root = JSONReader().ReadToValue("true // comment");
+  ASSERT_TRUE(root);
+  EXPECT_TRUE(root->is_bool());
+  root = JSONReader().ReadToValue("/* comment */\"sample string\"");
+  ASSERT_TRUE(root);
+  EXPECT_TRUE(root->is_string());
+  std::string value;
+  EXPECT_TRUE(root->GetAsString(&value));
+  EXPECT_EQ("sample string", value);
+  std::unique_ptr<ListValue> list =
+      ListValue::From(JSONReader().ReadToValue("[1, /* comment, 2 ] */ \n 3]"));
+  ASSERT_TRUE(list);
+  EXPECT_EQ(2u, list->GetSize());
+  int int_val = 0;
+  EXPECT_TRUE(list->GetInteger(0, &int_val));
+  EXPECT_EQ(1, int_val);
+  EXPECT_TRUE(list->GetInteger(1, &int_val));
+  EXPECT_EQ(3, int_val);
+  list = ListValue::From(JSONReader().ReadToValue("[1, /*a*/2, 3]"));
+  ASSERT_TRUE(list);
+  EXPECT_EQ(3u, list->GetSize());
+  root = JSONReader().ReadToValue("/* comment **/42");
+  ASSERT_TRUE(root);
+  EXPECT_TRUE(root->is_int());
+  EXPECT_TRUE(root->GetAsInteger(&int_val));
+  EXPECT_EQ(42, int_val);
+  root = JSONReader().ReadToValue(
+      "/* comment **/\n"
+      "// */ 43\n"
+      "44");
+  ASSERT_TRUE(root);
+  EXPECT_TRUE(root->is_int());
+  EXPECT_TRUE(root->GetAsInteger(&int_val));
+  EXPECT_EQ(44, int_val);
+}
+
+TEST(JSONReaderTest, Ints) {
+  std::unique_ptr<Value> root = JSONReader().ReadToValue("43");
+  ASSERT_TRUE(root);
+  EXPECT_TRUE(root->is_int());
+  int int_val = 0;
+  EXPECT_TRUE(root->GetAsInteger(&int_val));
+  EXPECT_EQ(43, int_val);
+}
+
+TEST(JSONReaderTest, NonDecimalNumbers) {
+  // According to RFC4627, oct, hex, and leading zeros are invalid JSON.
+  EXPECT_FALSE(JSONReader().ReadToValue("043"));
+  EXPECT_FALSE(JSONReader().ReadToValue("0x43"));
+  EXPECT_FALSE(JSONReader().ReadToValue("00"));
+}
+
+TEST(JSONReaderTest, NumberZero) {
+  // Test 0 (which needs to be special cased because of the leading zero
+  // clause).
+  std::unique_ptr<Value> root = JSONReader().ReadToValue("0");
+  ASSERT_TRUE(root);
+  EXPECT_TRUE(root->is_int());
+  int int_val = 1;
+  EXPECT_TRUE(root->GetAsInteger(&int_val));
+  EXPECT_EQ(0, int_val);
+}
+
+TEST(JSONReaderTest, LargeIntPromotion) {
+  // Numbers that overflow ints should succeed, being internally promoted to
+  // storage as doubles
+  std::unique_ptr<Value> root = JSONReader().ReadToValue("2147483648");
+  ASSERT_TRUE(root);
+  double double_val;
+  EXPECT_TRUE(root->is_double());
+  double_val = 0.0;
+  EXPECT_TRUE(root->GetAsDouble(&double_val));
+  EXPECT_DOUBLE_EQ(2147483648.0, double_val);
+  root = JSONReader().ReadToValue("-2147483649");
+  ASSERT_TRUE(root);
+  EXPECT_TRUE(root->is_double());
+  double_val = 0.0;
+  EXPECT_TRUE(root->GetAsDouble(&double_val));
+  EXPECT_DOUBLE_EQ(-2147483649.0, double_val);
+}
+
+TEST(JSONReaderTest, Doubles) {
+  std::unique_ptr<Value> root = JSONReader().ReadToValue("43.1");
+  ASSERT_TRUE(root);
+  EXPECT_TRUE(root->is_double());
+  double double_val = 0.0;
+  EXPECT_TRUE(root->GetAsDouble(&double_val));
+  EXPECT_DOUBLE_EQ(43.1, double_val);
+
+  root = JSONReader().ReadToValue("4.3e-1");
+  ASSERT_TRUE(root);
+  EXPECT_TRUE(root->is_double());
+  double_val = 0.0;
+  EXPECT_TRUE(root->GetAsDouble(&double_val));
+  EXPECT_DOUBLE_EQ(.43, double_val);
+
+  root = JSONReader().ReadToValue("2.1e0");
+  ASSERT_TRUE(root);
+  EXPECT_TRUE(root->is_double());
+  double_val = 0.0;
+  EXPECT_TRUE(root->GetAsDouble(&double_val));
+  EXPECT_DOUBLE_EQ(2.1, double_val);
+
+  root = JSONReader().ReadToValue("2.1e+0001");
+  ASSERT_TRUE(root);
+  EXPECT_TRUE(root->is_double());
+  double_val = 0.0;
+  EXPECT_TRUE(root->GetAsDouble(&double_val));
+  EXPECT_DOUBLE_EQ(21.0, double_val);
+
+  root = JSONReader().ReadToValue("0.01");
+  ASSERT_TRUE(root);
+  EXPECT_TRUE(root->is_double());
+  double_val = 0.0;
+  EXPECT_TRUE(root->GetAsDouble(&double_val));
+  EXPECT_DOUBLE_EQ(0.01, double_val);
+
+  root = JSONReader().ReadToValue("1.00");
+  ASSERT_TRUE(root);
+  EXPECT_TRUE(root->is_double());
+  double_val = 0.0;
+  EXPECT_TRUE(root->GetAsDouble(&double_val));
+  EXPECT_DOUBLE_EQ(1.0, double_val);
+}
+
+TEST(JSONReaderTest, FractionalNumbers) {
+  // Fractional parts must have a digit before and after the decimal point.
+  EXPECT_FALSE(JSONReader().ReadToValue("1."));
+  EXPECT_FALSE(JSONReader().ReadToValue(".1"));
+  EXPECT_FALSE(JSONReader().ReadToValue("1.e10"));
+}
+
+TEST(JSONReaderTest, ExponentialNumbers) {
+  // Exponent must have a digit following the 'e'.
+  EXPECT_FALSE(JSONReader().ReadToValue("1e"));
+  EXPECT_FALSE(JSONReader().ReadToValue("1E"));
+  EXPECT_FALSE(JSONReader().ReadToValue("1e1."));
+  EXPECT_FALSE(JSONReader().ReadToValue("1e1.0"));
+}
+
+TEST(JSONReaderTest, InvalidNAN) {
+  EXPECT_FALSE(JSONReader().ReadToValue("1e1000"));
+  EXPECT_FALSE(JSONReader().ReadToValue("-1e1000"));
+  EXPECT_FALSE(JSONReader().ReadToValue("NaN"));
+  EXPECT_FALSE(JSONReader().ReadToValue("nan"));
+  EXPECT_FALSE(JSONReader().ReadToValue("inf"));
+}
+
+TEST(JSONReaderTest, InvalidNumbers) {
+  EXPECT_FALSE(JSONReader().ReadToValue("4.3.1"));
+  EXPECT_FALSE(JSONReader().ReadToValue("4e3.1"));
+  EXPECT_FALSE(JSONReader().ReadToValue("4.a"));
+}
+
+TEST(JSONReader, SimpleString) {
+  std::unique_ptr<Value> root = JSONReader().ReadToValue("\"hello world\"");
+  ASSERT_TRUE(root);
+  EXPECT_TRUE(root->is_string());
+  std::string str_val;
+  EXPECT_TRUE(root->GetAsString(&str_val));
+  EXPECT_EQ("hello world", str_val);
+}
+
+TEST(JSONReaderTest, EmptyString) {
+  std::unique_ptr<Value> root = JSONReader().ReadToValue("\"\"");
+  ASSERT_TRUE(root);
+  EXPECT_TRUE(root->is_string());
+  std::string str_val;
+  EXPECT_TRUE(root->GetAsString(&str_val));
+  EXPECT_EQ("", str_val);
+}
+
+TEST(JSONReaderTest, BasicStringEscapes) {
+  std::unique_ptr<Value> root =
+      JSONReader().ReadToValue("\" \\\"\\\\\\/\\b\\f\\n\\r\\t\\v\"");
+  ASSERT_TRUE(root);
+  EXPECT_TRUE(root->is_string());
+  std::string str_val;
+  EXPECT_TRUE(root->GetAsString(&str_val));
+  EXPECT_EQ(" \"\\/\b\f\n\r\t\v", str_val);
+}
+
+TEST(JSONReaderTest, UnicodeEscapes) {
+  // Test hex and unicode escapes including the null character.
+  std::unique_ptr<Value> root =
+      JSONReader().ReadToValue("\"\\x41\\x00\\u1234\\u0000\"");
+  ASSERT_TRUE(root);
+  EXPECT_TRUE(root->is_string());
+  std::string str_val;
+  EXPECT_TRUE(root->GetAsString(&str_val));
+  EXPECT_EQ(std::wstring(L"A\0\x1234\0", 4), UTF8ToWide(str_val));
+}
+
+TEST(JSONReaderTest, InvalidStrings) {
+  EXPECT_FALSE(JSONReader().ReadToValue("\"no closing quote"));
+  EXPECT_FALSE(JSONReader().ReadToValue("\"\\z invalid escape char\""));
+  EXPECT_FALSE(JSONReader().ReadToValue("\"\\xAQ invalid hex code\""));
+  EXPECT_FALSE(JSONReader().ReadToValue("not enough hex chars\\x1\""));
+  EXPECT_FALSE(JSONReader().ReadToValue("\"not enough escape chars\\u123\""));
+  EXPECT_FALSE(
+      JSONReader().ReadToValue("\"extra backslash at end of input\\\""));
+}
+
+TEST(JSONReaderTest, BasicArray) {
+  std::unique_ptr<ListValue> list =
+      ListValue::From(JSONReader::Read("[true, false, null]"));
+  ASSERT_TRUE(list);
+  EXPECT_EQ(3U, list->GetSize());
+
+  // Test with trailing comma.  Should be parsed the same as above.
+  std::unique_ptr<Value> root2 =
+      JSONReader::Read("[true, false, null, ]", JSON_ALLOW_TRAILING_COMMAS);
+  EXPECT_TRUE(list->Equals(root2.get()));
+}
+
+TEST(JSONReaderTest, EmptyArray) {
+  std::unique_ptr<ListValue> list = ListValue::From(JSONReader::Read("[]"));
+  ASSERT_TRUE(list);
+  EXPECT_EQ(0U, list->GetSize());
+}
+
+TEST(JSONReaderTest, NestedArrays) {
+  std::unique_ptr<ListValue> list = ListValue::From(
+      JSONReader::Read("[[true], [], [false, [], [null]], null]"));
+  ASSERT_TRUE(list);
+  EXPECT_EQ(4U, list->GetSize());
+
+  // Lots of trailing commas.
+  std::unique_ptr<Value> root2 =
+      JSONReader::Read("[[true], [], [false, [], [null, ]  , ], null,]",
+                       JSON_ALLOW_TRAILING_COMMAS);
+  EXPECT_TRUE(list->Equals(root2.get()));
+}
+
+TEST(JSONReaderTest, InvalidArrays) {
+  // Missing close brace.
+  EXPECT_FALSE(JSONReader::Read("[[true], [], [false, [], [null]], null"));
+
+  // Too many commas.
+  EXPECT_FALSE(JSONReader::Read("[true,, null]"));
+  EXPECT_FALSE(JSONReader::Read("[true,, null]", JSON_ALLOW_TRAILING_COMMAS));
+
+  // No commas.
+  EXPECT_FALSE(JSONReader::Read("[true null]"));
+
+  // Trailing comma.
+  EXPECT_FALSE(JSONReader::Read("[true,]"));
+}
+
+TEST(JSONReaderTest, ArrayTrailingComma) {
+  // Valid if we set |allow_trailing_comma| to true.
+  std::unique_ptr<ListValue> list =
+      ListValue::From(JSONReader::Read("[true,]", JSON_ALLOW_TRAILING_COMMAS));
+  ASSERT_TRUE(list);
+  EXPECT_EQ(1U, list->GetSize());
+  Value* tmp_value = nullptr;
+  ASSERT_TRUE(list->Get(0, &tmp_value));
+  EXPECT_TRUE(tmp_value->is_bool());
+  bool bool_value = false;
+  EXPECT_TRUE(tmp_value->GetAsBoolean(&bool_value));
+  EXPECT_TRUE(bool_value);
+}
+
+TEST(JSONReaderTest, ArrayTrailingCommaNoEmptyElements) {
+  // Don't allow empty elements, even if |allow_trailing_comma| is
+  // true.
+  EXPECT_FALSE(JSONReader::Read("[,]", JSON_ALLOW_TRAILING_COMMAS));
+  EXPECT_FALSE(JSONReader::Read("[true,,]", JSON_ALLOW_TRAILING_COMMAS));
+  EXPECT_FALSE(JSONReader::Read("[,true,]", JSON_ALLOW_TRAILING_COMMAS));
+  EXPECT_FALSE(JSONReader::Read("[true,,false]", JSON_ALLOW_TRAILING_COMMAS));
+}
+
+TEST(JSONReaderTest, EmptyDictionary) {
+  std::unique_ptr<DictionaryValue> dict_val =
+      DictionaryValue::From(JSONReader::Read("{}"));
+  ASSERT_TRUE(dict_val);
+}
+
+TEST(JSONReaderTest, CompleteDictionary) {
+  auto dict_val = DictionaryValue::From(JSONReader::Read(
+      "{\"number\":9.87654321, \"null\":null , \"\\x53\" : \"str\" }"));
+  ASSERT_TRUE(dict_val);
+  double double_val = 0.0;
+  EXPECT_TRUE(dict_val->GetDouble("number", &double_val));
+  EXPECT_DOUBLE_EQ(9.87654321, double_val);
+  Value* null_val = nullptr;
+  ASSERT_TRUE(dict_val->Get("null", &null_val));
+  EXPECT_TRUE(null_val->is_none());
+  std::string str_val;
+  EXPECT_TRUE(dict_val->GetString("S", &str_val));
+  EXPECT_EQ("str", str_val);
+
+  std::unique_ptr<Value> root2 = JSONReader::Read(
+      "{\"number\":9.87654321, \"null\":null , \"\\x53\" : \"str\", }",
+      JSON_ALLOW_TRAILING_COMMAS);
+  ASSERT_TRUE(root2);
+  EXPECT_TRUE(dict_val->Equals(root2.get()));
+
+  // Test newline equivalence.
+  root2 = JSONReader::Read(
+      "{\n"
+      "  \"number\":9.87654321,\n"
+      "  \"null\":null,\n"
+      "  \"\\x53\":\"str\",\n"
+      "}\n",
+      JSON_ALLOW_TRAILING_COMMAS);
+  ASSERT_TRUE(root2);
+  EXPECT_TRUE(dict_val->Equals(root2.get()));
+
+  root2 = JSONReader::Read(
+      "{\r\n"
+      "  \"number\":9.87654321,\r\n"
+      "  \"null\":null,\r\n"
+      "  \"\\x53\":\"str\",\r\n"
+      "}\r\n",
+      JSON_ALLOW_TRAILING_COMMAS);
+  ASSERT_TRUE(root2);
+  EXPECT_TRUE(dict_val->Equals(root2.get()));
+}
+
+TEST(JSONReaderTest, NestedDictionaries) {
+  std::unique_ptr<DictionaryValue> dict_val =
+      DictionaryValue::From(JSONReader::Read(
+          "{\"inner\":{\"array\":[true]},\"false\":false,\"d\":{}}"));
+  ASSERT_TRUE(dict_val);
+  DictionaryValue* inner_dict = nullptr;
+  ASSERT_TRUE(dict_val->GetDictionary("inner", &inner_dict));
+  ListValue* inner_array = nullptr;
+  ASSERT_TRUE(inner_dict->GetList("array", &inner_array));
+  EXPECT_EQ(1U, inner_array->GetSize());
+  bool bool_value = true;
+  EXPECT_TRUE(dict_val->GetBoolean("false", &bool_value));
+  EXPECT_FALSE(bool_value);
+  inner_dict = nullptr;
+  EXPECT_TRUE(dict_val->GetDictionary("d", &inner_dict));
+
+  std::unique_ptr<Value> root2 = JSONReader::Read(
+      "{\"inner\": {\"array\":[true] , },\"false\":false,\"d\":{},}",
+      JSON_ALLOW_TRAILING_COMMAS);
+  EXPECT_TRUE(dict_val->Equals(root2.get()));
+}
+
+TEST(JSONReaderTest, DictionaryKeysWithPeriods) {
+  std::unique_ptr<DictionaryValue> dict_val = DictionaryValue::From(
+      JSONReader::Read("{\"a.b\":3,\"c\":2,\"d.e.f\":{\"g.h.i.j\":1}}"));
+  ASSERT_TRUE(dict_val);
+  int integer_value = 0;
+  EXPECT_TRUE(dict_val->GetIntegerWithoutPathExpansion("a.b", &integer_value));
+  EXPECT_EQ(3, integer_value);
+  EXPECT_TRUE(dict_val->GetIntegerWithoutPathExpansion("c", &integer_value));
+  EXPECT_EQ(2, integer_value);
+  DictionaryValue* inner_dict = nullptr;
+  ASSERT_TRUE(
+      dict_val->GetDictionaryWithoutPathExpansion("d.e.f", &inner_dict));
+  EXPECT_EQ(1U, inner_dict->size());
+  EXPECT_TRUE(
+      inner_dict->GetIntegerWithoutPathExpansion("g.h.i.j", &integer_value));
+  EXPECT_EQ(1, integer_value);
+
+  dict_val =
+      DictionaryValue::From(JSONReader::Read("{\"a\":{\"b\":2},\"a.b\":1}"));
+  ASSERT_TRUE(dict_val);
+  EXPECT_TRUE(dict_val->GetInteger("a.b", &integer_value));
+  EXPECT_EQ(2, integer_value);
+  EXPECT_TRUE(dict_val->GetIntegerWithoutPathExpansion("a.b", &integer_value));
+  EXPECT_EQ(1, integer_value);
+}
+
+TEST(JSONReaderTest, InvalidDictionaries) {
+  // No closing brace.
+  EXPECT_FALSE(JSONReader::Read("{\"a\": true"));
+
+  // Keys must be quoted strings.
+  EXPECT_FALSE(JSONReader::Read("{foo:true}"));
+  EXPECT_FALSE(JSONReader::Read("{1234: false}"));
+  EXPECT_FALSE(JSONReader::Read("{:false}"));
+
+  // Trailing comma.
+  EXPECT_FALSE(JSONReader::Read("{\"a\":true,}"));
+
+  // Too many commas.
+  EXPECT_FALSE(JSONReader::Read("{\"a\":true,,\"b\":false}"));
+  EXPECT_FALSE(JSONReader::Read("{\"a\":true,,\"b\":false}",
+                                JSON_ALLOW_TRAILING_COMMAS));
+
+  // No separator.
+  EXPECT_FALSE(JSONReader::Read("{\"a\" \"b\"}"));
+
+  // Lone comma.
+  EXPECT_FALSE(JSONReader::Read("{,}"));
+  EXPECT_FALSE(JSONReader::Read("{,}", JSON_ALLOW_TRAILING_COMMAS));
+  EXPECT_FALSE(JSONReader::Read("{\"a\":true,,}", JSON_ALLOW_TRAILING_COMMAS));
+  EXPECT_FALSE(JSONReader::Read("{,\"a\":true}", JSON_ALLOW_TRAILING_COMMAS));
+  EXPECT_FALSE(JSONReader::Read("{\"a\":true,,\"b\":false}",
+                                JSON_ALLOW_TRAILING_COMMAS));
+}
+
+TEST(JSONReaderTest, StackOverflow) {
+  std::string evil(1000000, '[');
+  evil.append(std::string(1000000, ']'));
+  EXPECT_FALSE(JSONReader::Read(evil));
+
+  // A few thousand adjacent lists is fine.
+  std::string not_evil("[");
+  not_evil.reserve(15010);
+  for (int i = 0; i < 5000; ++i)
+    not_evil.append("[],");
+  not_evil.append("[]]");
+  std::unique_ptr<ListValue> list = ListValue::From(JSONReader::Read(not_evil));
+  ASSERT_TRUE(list);
+  EXPECT_EQ(5001U, list->GetSize());
+}
+
+TEST(JSONReaderTest, UTF8Input) {
+  std::unique_ptr<Value> root =
+      JSONReader().ReadToValue("\"\xe7\xbd\x91\xe9\xa1\xb5\"");
+  ASSERT_TRUE(root);
+  EXPECT_TRUE(root->is_string());
+  std::string str_val;
+  EXPECT_TRUE(root->GetAsString(&str_val));
+  EXPECT_EQ(L"\x7f51\x9875", UTF8ToWide(str_val));
+
+  std::unique_ptr<DictionaryValue> dict_val =
+      DictionaryValue::From(JSONReader().ReadToValue(
+          "{\"path\": \"/tmp/\xc3\xa0\xc3\xa8\xc3\xb2.png\"}"));
+  ASSERT_TRUE(dict_val);
+  EXPECT_TRUE(dict_val->GetString("path", &str_val));
+  EXPECT_EQ("/tmp/\xC3\xA0\xC3\xA8\xC3\xB2.png", str_val);
+}
+
+TEST(JSONReaderTest, InvalidUTF8Input) {
+  EXPECT_FALSE(JSONReader().ReadToValue("\"345\xb0\xa1\xb0\xa2\""));
+  EXPECT_FALSE(JSONReader().ReadToValue("\"123\xc0\x81\""));
+  EXPECT_FALSE(JSONReader().ReadToValue("\"abc\xc0\xae\""));
+}
+
+TEST(JSONReaderTest, UTF16Escapes) {
+  std::unique_ptr<Value> root = JSONReader().ReadToValue("\"\\u20ac3,14\"");
+  ASSERT_TRUE(root);
+  EXPECT_TRUE(root->is_string());
+  std::string str_val;
+  EXPECT_TRUE(root->GetAsString(&str_val));
+  EXPECT_EQ(
+      "\xe2\x82\xac"
+      "3,14",
+      str_val);
+
+  root = JSONReader().ReadToValue("\"\\ud83d\\udca9\\ud83d\\udc6c\"");
+  ASSERT_TRUE(root);
+  EXPECT_TRUE(root->is_string());
+  str_val.clear();
+  EXPECT_TRUE(root->GetAsString(&str_val));
+  EXPECT_EQ("\xf0\x9f\x92\xa9\xf0\x9f\x91\xac", str_val);
+}
+
+TEST(JSONReaderTest, InvalidUTF16Escapes) {
+  const char* const cases[] = {
+      "\"\\u123\"",          // Invalid scalar.
+      "\"\\ud83d\"",         // Invalid scalar.
+      "\"\\u$%@!\"",         // Invalid scalar.
+      "\"\\uzz89\"",         // Invalid scalar.
+      "\"\\ud83d\\udca\"",   // Invalid lower surrogate.
+      "\"\\ud83d\\ud83d\"",  // Invalid lower surrogate.
+      "\"\\ud83d\\uaaaZ\""   // Invalid lower surrogate.
+      "\"\\ud83foo\"",       // No lower surrogate.
+      "\"\\ud83d\\foo\""     // No lower surrogate.
+      "\"\\ud83\\foo\""      // Invalid upper surrogate.
+      "\"\\ud83d\\u1\""      // No lower surrogate.
+      "\"\\ud83\\u1\""       // Invalid upper surrogate.
+  };
+  std::unique_ptr<Value> root;
+  for (size_t i = 0; i < arraysize(cases); ++i) {
+    root = JSONReader().ReadToValue(cases[i]);
+    EXPECT_FALSE(root) << cases[i];
+  }
+}
+
+TEST(JSONReaderTest, LiteralRoots) {
+  std::unique_ptr<Value> root = JSONReader::Read("null");
+  ASSERT_TRUE(root);
+  EXPECT_TRUE(root->is_none());
+
+  root = JSONReader::Read("true");
+  ASSERT_TRUE(root);
+  bool bool_value;
+  EXPECT_TRUE(root->GetAsBoolean(&bool_value));
+  EXPECT_TRUE(bool_value);
+
+  root = JSONReader::Read("10");
+  ASSERT_TRUE(root);
+  int integer_value;
+  EXPECT_TRUE(root->GetAsInteger(&integer_value));
+  EXPECT_EQ(10, integer_value);
+
+  root = JSONReader::Read("\"root\"");
+  ASSERT_TRUE(root);
+  std::string str_val;
+  EXPECT_TRUE(root->GetAsString(&str_val));
+  EXPECT_EQ("root", str_val);
+}
+
+TEST(JSONReaderTest, ReadFromFile) {
+  FilePath path;
+  ASSERT_TRUE(PathService::Get(base::DIR_TEST_DATA, &path));
+  path = path.AppendASCII("json");
+  ASSERT_TRUE(base::PathExists(path));
+
+  std::string input;
+  ASSERT_TRUE(ReadFileToString(path.AppendASCII("bom_feff.json"), &input));
+
+  JSONReader reader;
+  std::unique_ptr<Value> root(reader.ReadToValue(input));
+  ASSERT_TRUE(root) << reader.GetErrorMessage();
+  EXPECT_TRUE(root->is_dict());
+}
+
+// Tests that the root of a JSON object can be deleted safely while its
+// children outlive it.
+TEST(JSONReaderTest, StringOptimizations) {
+  std::unique_ptr<Value> dict_literal_0;
+  std::unique_ptr<Value> dict_literal_1;
+  std::unique_ptr<Value> dict_string_0;
+  std::unique_ptr<Value> dict_string_1;
+  std::unique_ptr<Value> list_value_0;
+  std::unique_ptr<Value> list_value_1;
+
+  {
+    std::unique_ptr<Value> root = JSONReader::Read(
+        "{"
+        "  \"test\": {"
+        "    \"foo\": true,"
+        "    \"bar\": 3.14,"
+        "    \"baz\": \"bat\","
+        "    \"moo\": \"cow\""
+        "  },"
+        "  \"list\": ["
+        "    \"a\","
+        "    \"b\""
+        "  ]"
+        "}",
+        JSON_PARSE_RFC);
+    ASSERT_TRUE(root);
+
+    DictionaryValue* root_dict = nullptr;
+    ASSERT_TRUE(root->GetAsDictionary(&root_dict));
+
+    DictionaryValue* dict = nullptr;
+    ListValue* list = nullptr;
+
+    ASSERT_TRUE(root_dict->GetDictionary("test", &dict));
+    ASSERT_TRUE(root_dict->GetList("list", &list));
+
+    ASSERT_TRUE(dict->Remove("foo", &dict_literal_0));
+    ASSERT_TRUE(dict->Remove("bar", &dict_literal_1));
+    ASSERT_TRUE(dict->Remove("baz", &dict_string_0));
+    ASSERT_TRUE(dict->Remove("moo", &dict_string_1));
+
+    ASSERT_EQ(2u, list->GetSize());
+    ASSERT_TRUE(list->Remove(0, &list_value_0));
+    ASSERT_TRUE(list->Remove(0, &list_value_1));
+  }
+
+  bool b = false;
+  double d = 0;
+  std::string s;
+
+  EXPECT_TRUE(dict_literal_0->GetAsBoolean(&b));
+  EXPECT_TRUE(b);
+
+  EXPECT_TRUE(dict_literal_1->GetAsDouble(&d));
+  EXPECT_EQ(3.14, d);
+
+  EXPECT_TRUE(dict_string_0->GetAsString(&s));
+  EXPECT_EQ("bat", s);
+
+  EXPECT_TRUE(dict_string_1->GetAsString(&s));
+  EXPECT_EQ("cow", s);
+
+  EXPECT_TRUE(list_value_0->GetAsString(&s));
+  EXPECT_EQ("a", s);
+  EXPECT_TRUE(list_value_1->GetAsString(&s));
+  EXPECT_EQ("b", s);
+}
+
+// A smattering of invalid JSON designed to test specific portions of the
+// parser implementation against buffer overflow. Best run with DCHECKs so
+// that the one in NextChar fires.
+TEST(JSONReaderTest, InvalidSanity) {
+  const char* const kInvalidJson[] = {
+      "/* test *", "{\"foo\"", "{\"foo\":", "  [", "\"\\u123g\"", "{\n\"eh:\n}",
+  };
+
+  for (size_t i = 0; i < arraysize(kInvalidJson); ++i) {
+    JSONReader reader;
+    LOG(INFO) << "Sanity test " << i << ": <" << kInvalidJson[i] << ">";
+    EXPECT_FALSE(reader.ReadToValue(kInvalidJson[i]));
+    EXPECT_NE(JSONReader::JSON_NO_ERROR, reader.error_code());
+    EXPECT_NE("", reader.GetErrorMessage());
+  }
+}
+
+TEST(JSONReaderTest, IllegalTrailingNull) {
+  const char json[] = { '"', 'n', 'u', 'l', 'l', '"', '\0' };
+  std::string json_string(json, sizeof(json));
+  JSONReader reader;
+  EXPECT_FALSE(reader.ReadToValue(json_string));
+  EXPECT_EQ(JSONReader::JSON_UNEXPECTED_DATA_AFTER_ROOT, reader.error_code());
+}
+
+TEST(JSONReaderTest, MaxNesting) {
+  std::string json(R"({"outer": { "inner": {"foo": true}}})");
+  std::unique_ptr<Value> root;
+  root = JSONReader::Read(json, JSON_PARSE_RFC, 3);
+  ASSERT_FALSE(root);
+  root = JSONReader::Read(json, JSON_PARSE_RFC, 4);
+  ASSERT_TRUE(root);
+}
+
+}  // namespace base
diff --git a/base/json/json_string_value_serializer.cc b/base/json/json_string_value_serializer.cc
new file mode 100644
index 0000000..f9c45a4
--- /dev/null
+++ b/base/json/json_string_value_serializer.cc
@@ -0,0 +1,55 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/json/json_string_value_serializer.h"
+
+#include "base/json/json_reader.h"
+#include "base/json/json_writer.h"
+#include "base/logging.h"
+
+using base::Value;
+
+JSONStringValueSerializer::JSONStringValueSerializer(std::string* json_string)
+    : json_string_(json_string),
+      pretty_print_(false) {
+}
+
+JSONStringValueSerializer::~JSONStringValueSerializer() = default;
+
+bool JSONStringValueSerializer::Serialize(const Value& root) {
+  return SerializeInternal(root, false);
+}
+
+bool JSONStringValueSerializer::SerializeAndOmitBinaryValues(
+    const Value& root) {
+  return SerializeInternal(root, true);
+}
+
+bool JSONStringValueSerializer::SerializeInternal(const Value& root,
+                                                  bool omit_binary_values) {
+  if (!json_string_)
+    return false;
+
+  int options = 0;
+  if (omit_binary_values)
+    options |= base::JSONWriter::OPTIONS_OMIT_BINARY_VALUES;
+  if (pretty_print_)
+    options |= base::JSONWriter::OPTIONS_PRETTY_PRINT;
+
+  return base::JSONWriter::WriteWithOptions(root, options, json_string_);
+}
+
+JSONStringValueDeserializer::JSONStringValueDeserializer(
+    const base::StringPiece& json_string,
+    int options)
+    : json_string_(json_string), options_(options) {}
+
+JSONStringValueDeserializer::~JSONStringValueDeserializer() = default;
+
+std::unique_ptr<Value> JSONStringValueDeserializer::Deserialize(
+    int* error_code,
+    std::string* error_str) {
+  return base::JSONReader::ReadAndReturnError(json_string_, options_,
+                                              error_code, error_str);
+}
diff --git a/base/json/json_string_value_serializer.h b/base/json/json_string_value_serializer.h
new file mode 100644
index 0000000..55a53e2
--- /dev/null
+++ b/base/json/json_string_value_serializer.h
@@ -0,0 +1,75 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_JSON_JSON_STRING_VALUE_SERIALIZER_H_
+#define BASE_JSON_JSON_STRING_VALUE_SERIALIZER_H_
+
+#include <string>
+
+#include "base/base_export.h"
+#include "base/files/file_path.h"
+#include "base/macros.h"
+#include "base/strings/string_piece.h"
+#include "base/values.h"
+
+class BASE_EXPORT JSONStringValueSerializer : public base::ValueSerializer {
+ public:
+  // |json_string| is the string that will be the destination of the
+  // serialization.  The caller of the constructor retains ownership of the
+  // string. |json_string| must not be null.
+  explicit JSONStringValueSerializer(std::string* json_string);
+
+  ~JSONStringValueSerializer() override;
+
+  // Attempt to serialize the data structure represented by Value into
+  // JSON.  If the return value is true, the result will have been written
+  // into the string passed into the constructor.
+  bool Serialize(const base::Value& root) override;
+
+  // Equivalent to Serialize(root) except binary values are omitted from the
+  // output.
+  bool SerializeAndOmitBinaryValues(const base::Value& root);
+
+  void set_pretty_print(bool new_value) { pretty_print_ = new_value; }
+  bool pretty_print() { return pretty_print_; }
+
+ private:
+  bool SerializeInternal(const base::Value& root, bool omit_binary_values);
+
+  // Owned by the caller of the constructor.
+  std::string* json_string_;
+  bool pretty_print_;  // If true, serialization will span multiple lines.
+
+  DISALLOW_COPY_AND_ASSIGN(JSONStringValueSerializer);
+};
+
+class BASE_EXPORT JSONStringValueDeserializer : public base::ValueDeserializer {
+ public:
+  // This retains a reference to the contents of |json_string|, so the data
+  // must outlive the JSONStringValueDeserializer. |options| is a bitmask of
+  // JSONParserOptions.
+  explicit JSONStringValueDeserializer(const base::StringPiece& json_string,
+                                       int options = 0);
+
+  ~JSONStringValueDeserializer() override;
+
+  // Attempt to deserialize the data structure encoded in the string passed
+  // in to the constructor into a structure of Value objects.  If the return
+  // value is null, and if |error_code| is non-null, |error_code| will
+  // contain an integer error code (a JsonParseError in this case).
+  // If |error_message| is non-null, it will be filled in with a formatted
+  // error message including the location of the error if appropriate.
+  // The caller takes ownership of the returned value.
+  std::unique_ptr<base::Value> Deserialize(int* error_code,
+                                           std::string* error_message) override;
+
+ private:
+  // Data is owned by the caller of the constructor.
+  base::StringPiece json_string_;
+  const int options_;
+
+  DISALLOW_COPY_AND_ASSIGN(JSONStringValueDeserializer);
+};
+
+#endif  // BASE_JSON_JSON_STRING_VALUE_SERIALIZER_H_
diff --git a/base/json/json_value_converter.cc b/base/json/json_value_converter.cc
new file mode 100644
index 0000000..6f772f3
--- /dev/null
+++ b/base/json/json_value_converter.cc
@@ -0,0 +1,37 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/json/json_value_converter.h"
+
+namespace base {
+namespace internal {
+
+bool BasicValueConverter<int>::Convert(
+    const base::Value& value, int* field) const {
+  return value.GetAsInteger(field);
+}
+
+bool BasicValueConverter<std::string>::Convert(
+    const base::Value& value, std::string* field) const {
+  return value.GetAsString(field);
+}
+
+bool BasicValueConverter<string16>::Convert(
+    const base::Value& value, string16* field) const {
+  return value.GetAsString(field);
+}
+
+bool BasicValueConverter<double>::Convert(
+    const base::Value& value, double* field) const {
+  return value.GetAsDouble(field);
+}
+
+bool BasicValueConverter<bool>::Convert(
+    const base::Value& value, bool* field) const {
+  return value.GetAsBoolean(field);
+}
+
+}  // namespace internal
+}  // namespace base
+
diff --git a/base/json/json_value_converter.h b/base/json/json_value_converter.h
new file mode 100644
index 0000000..ef08115
--- /dev/null
+++ b/base/json/json_value_converter.h
@@ -0,0 +1,524 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_JSON_JSON_VALUE_CONVERTER_H_
+#define BASE_JSON_JSON_VALUE_CONVERTER_H_
+
+#include <stddef.h>
+
+#include <memory>
+#include <string>
+#include <vector>
+
+#include "base/base_export.h"
+#include "base/logging.h"
+#include "base/macros.h"
+#include "base/memory/ptr_util.h"
+#include "base/strings/string16.h"
+#include "base/strings/string_piece.h"
+#include "base/values.h"
+
+// JSONValueConverter converts a JSON value into a C++ struct in a
+// lightweight way.
+//
+// Usage:
+// For real examples, you may want to refer to _unittest.cc file.
+//
+// Assume that you have a struct like this:
+//   struct Message {
+//     int foo;
+//     std::string bar;
+//     static void RegisterJSONConverter(
+//         JSONValueConverter<Message>* converter);
+//   };
+//
+// And you want to parse a json data into this struct.  First, you
+// need to declare RegisterJSONConverter() method in your struct.
+//   // static
+//   void Message::RegisterJSONConverter(
+//       JSONValueConverter<Message>* converter) {
+//     converter->RegisterIntField("foo", &Message::foo);
+//     converter->RegisterStringField("bar", &Message::bar);
+//   }
+//
+// Then, you just instantiate your JSONValueConverter of your type and call
+// Convert() method.
+//   Message message;
+//   JSONValueConverter<Message> converter;
+//   converter.Convert(json, &message);
+//
+// Convert() returns false when it fails.  Here "fail" means that the value is
+// structurally different from expected, such like a string value appears
+// for an int field.  Do not report failures for missing fields.
+// Also note that Convert() will modify the passed |message| even when it
+// fails for performance reason.
+//
+// For nested field, the internal message also has to implement the registration
+// method.  Then, just use RegisterNestedField() from the containing struct's
+// RegisterJSONConverter method.
+//   struct Nested {
+//     Message foo;
+//     static void RegisterJSONConverter(...) {
+//       ...
+//       converter->RegisterNestedField("foo", &Nested::foo);
+//     }
+//   };
+//
+// For repeated field, we just assume std::vector<std::unique_ptr<ElementType>>
+// for its container and you can put RegisterRepeatedInt or some other types.
+// Use RegisterRepeatedMessage for nested repeated fields.
+//
+// Sometimes JSON format uses string representations for other types such
+// like enum, timestamp, or URL.  You can use RegisterCustomField method
+// and specify a function to convert a StringPiece to your type.
+//   bool ConvertFunc(StringPiece s, YourEnum* result) {
+//     // do something and return true if succeed...
+//   }
+//   struct Message {
+//     YourEnum ye;
+//     ...
+//     static void RegisterJSONConverter(...) {
+//       ...
+//       converter->RegsiterCustomField<YourEnum>(
+//           "your_enum", &Message::ye, &ConvertFunc);
+//     }
+//   };
+
+namespace base {
+
+template <typename StructType>
+class JSONValueConverter;
+
+namespace internal {
+
+template<typename StructType>
+class FieldConverterBase {
+ public:
+  explicit FieldConverterBase(const std::string& path) : field_path_(path) {}
+  virtual ~FieldConverterBase() = default;
+  virtual bool ConvertField(const base::Value& value, StructType* obj)
+      const = 0;
+  const std::string& field_path() const { return field_path_; }
+
+ private:
+  std::string field_path_;
+  DISALLOW_COPY_AND_ASSIGN(FieldConverterBase);
+};
+
+template <typename FieldType>
+class ValueConverter {
+ public:
+  virtual ~ValueConverter() = default;
+  virtual bool Convert(const base::Value& value, FieldType* field) const = 0;
+};
+
+template <typename StructType, typename FieldType>
+class FieldConverter : public FieldConverterBase<StructType> {
+ public:
+  explicit FieldConverter(const std::string& path,
+                          FieldType StructType::* field,
+                          ValueConverter<FieldType>* converter)
+      : FieldConverterBase<StructType>(path),
+        field_pointer_(field),
+        value_converter_(converter) {
+  }
+
+  bool ConvertField(const base::Value& value, StructType* dst) const override {
+    return value_converter_->Convert(value, &(dst->*field_pointer_));
+  }
+
+ private:
+  FieldType StructType::* field_pointer_;
+  std::unique_ptr<ValueConverter<FieldType>> value_converter_;
+  DISALLOW_COPY_AND_ASSIGN(FieldConverter);
+};
+
+template <typename FieldType>
+class BasicValueConverter;
+
+template <>
+class BASE_EXPORT BasicValueConverter<int> : public ValueConverter<int> {
+ public:
+  BasicValueConverter() = default;
+
+  bool Convert(const base::Value& value, int* field) const override;
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(BasicValueConverter);
+};
+
+template <>
+class BASE_EXPORT BasicValueConverter<std::string>
+    : public ValueConverter<std::string> {
+ public:
+  BasicValueConverter() = default;
+
+  bool Convert(const base::Value& value, std::string* field) const override;
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(BasicValueConverter);
+};
+
+template <>
+class BASE_EXPORT BasicValueConverter<string16>
+    : public ValueConverter<string16> {
+ public:
+  BasicValueConverter() = default;
+
+  bool Convert(const base::Value& value, string16* field) const override;
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(BasicValueConverter);
+};
+
+template <>
+class BASE_EXPORT BasicValueConverter<double> : public ValueConverter<double> {
+ public:
+  BasicValueConverter() = default;
+
+  bool Convert(const base::Value& value, double* field) const override;
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(BasicValueConverter);
+};
+
+template <>
+class BASE_EXPORT BasicValueConverter<bool> : public ValueConverter<bool> {
+ public:
+  BasicValueConverter() = default;
+
+  bool Convert(const base::Value& value, bool* field) const override;
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(BasicValueConverter);
+};
+
+template <typename FieldType>
+class ValueFieldConverter : public ValueConverter<FieldType> {
+ public:
+  typedef bool(*ConvertFunc)(const base::Value* value, FieldType* field);
+
+  explicit ValueFieldConverter(ConvertFunc convert_func)
+      : convert_func_(convert_func) {}
+
+  bool Convert(const base::Value& value, FieldType* field) const override {
+    return convert_func_(&value, field);
+  }
+
+ private:
+  ConvertFunc convert_func_;
+
+  DISALLOW_COPY_AND_ASSIGN(ValueFieldConverter);
+};
+
+template <typename FieldType>
+class CustomFieldConverter : public ValueConverter<FieldType> {
+ public:
+  typedef bool (*ConvertFunc)(StringPiece value, FieldType* field);
+
+  explicit CustomFieldConverter(ConvertFunc convert_func)
+      : convert_func_(convert_func) {}
+
+  bool Convert(const base::Value& value, FieldType* field) const override {
+    std::string string_value;
+    return value.GetAsString(&string_value) &&
+        convert_func_(string_value, field);
+  }
+
+ private:
+  ConvertFunc convert_func_;
+
+  DISALLOW_COPY_AND_ASSIGN(CustomFieldConverter);
+};
+
+template <typename NestedType>
+class NestedValueConverter : public ValueConverter<NestedType> {
+ public:
+  NestedValueConverter() = default;
+
+  bool Convert(const base::Value& value, NestedType* field) const override {
+    return converter_.Convert(value, field);
+  }
+
+ private:
+  JSONValueConverter<NestedType> converter_;
+  DISALLOW_COPY_AND_ASSIGN(NestedValueConverter);
+};
+
+template <typename Element>
+class RepeatedValueConverter
+    : public ValueConverter<std::vector<std::unique_ptr<Element>>> {
+ public:
+  RepeatedValueConverter() = default;
+
+  bool Convert(const base::Value& value,
+               std::vector<std::unique_ptr<Element>>* field) const override {
+    const base::ListValue* list = NULL;
+    if (!value.GetAsList(&list)) {
+      // The field is not a list.
+      return false;
+    }
+
+    field->reserve(list->GetSize());
+    for (size_t i = 0; i < list->GetSize(); ++i) {
+      const base::Value* element = NULL;
+      if (!list->Get(i, &element))
+        continue;
+
+      std::unique_ptr<Element> e(new Element);
+      if (basic_converter_.Convert(*element, e.get())) {
+        field->push_back(std::move(e));
+      } else {
+        DVLOG(1) << "failure at " << i << "-th element";
+        return false;
+      }
+    }
+    return true;
+  }
+
+ private:
+  BasicValueConverter<Element> basic_converter_;
+  DISALLOW_COPY_AND_ASSIGN(RepeatedValueConverter);
+};
+
+template <typename NestedType>
+class RepeatedMessageConverter
+    : public ValueConverter<std::vector<std::unique_ptr<NestedType>>> {
+ public:
+  RepeatedMessageConverter() = default;
+
+  bool Convert(const base::Value& value,
+               std::vector<std::unique_ptr<NestedType>>* field) const override {
+    const base::ListValue* list = NULL;
+    if (!value.GetAsList(&list))
+      return false;
+
+    field->reserve(list->GetSize());
+    for (size_t i = 0; i < list->GetSize(); ++i) {
+      const base::Value* element = NULL;
+      if (!list->Get(i, &element))
+        continue;
+
+      std::unique_ptr<NestedType> nested(new NestedType);
+      if (converter_.Convert(*element, nested.get())) {
+        field->push_back(std::move(nested));
+      } else {
+        DVLOG(1) << "failure at " << i << "-th element";
+        return false;
+      }
+    }
+    return true;
+  }
+
+ private:
+  JSONValueConverter<NestedType> converter_;
+  DISALLOW_COPY_AND_ASSIGN(RepeatedMessageConverter);
+};
+
+template <typename NestedType>
+class RepeatedCustomValueConverter
+    : public ValueConverter<std::vector<std::unique_ptr<NestedType>>> {
+ public:
+  typedef bool(*ConvertFunc)(const base::Value* value, NestedType* field);
+
+  explicit RepeatedCustomValueConverter(ConvertFunc convert_func)
+      : convert_func_(convert_func) {}
+
+  bool Convert(const base::Value& value,
+               std::vector<std::unique_ptr<NestedType>>* field) const override {
+    const base::ListValue* list = NULL;
+    if (!value.GetAsList(&list))
+      return false;
+
+    field->reserve(list->GetSize());
+    for (size_t i = 0; i < list->GetSize(); ++i) {
+      const base::Value* element = NULL;
+      if (!list->Get(i, &element))
+        continue;
+
+      std::unique_ptr<NestedType> nested(new NestedType);
+      if ((*convert_func_)(element, nested.get())) {
+        field->push_back(std::move(nested));
+      } else {
+        DVLOG(1) << "failure at " << i << "-th element";
+        return false;
+      }
+    }
+    return true;
+  }
+
+ private:
+  ConvertFunc convert_func_;
+  DISALLOW_COPY_AND_ASSIGN(RepeatedCustomValueConverter);
+};
+
+
+}  // namespace internal
+
+template <class StructType>
+class JSONValueConverter {
+ public:
+  JSONValueConverter() {
+    StructType::RegisterJSONConverter(this);
+  }
+
+  void RegisterIntField(const std::string& field_name,
+                        int StructType::* field) {
+    fields_.push_back(
+        std::make_unique<internal::FieldConverter<StructType, int>>(
+            field_name, field, new internal::BasicValueConverter<int>));
+  }
+
+  void RegisterStringField(const std::string& field_name,
+                           std::string StructType::* field) {
+    fields_.push_back(
+        std::make_unique<internal::FieldConverter<StructType, std::string>>(
+            field_name, field, new internal::BasicValueConverter<std::string>));
+  }
+
+  void RegisterStringField(const std::string& field_name,
+                           string16 StructType::* field) {
+    fields_.push_back(
+        std::make_unique<internal::FieldConverter<StructType, string16>>(
+            field_name, field, new internal::BasicValueConverter<string16>));
+  }
+
+  void RegisterBoolField(const std::string& field_name,
+                         bool StructType::* field) {
+    fields_.push_back(
+        std::make_unique<internal::FieldConverter<StructType, bool>>(
+            field_name, field, new internal::BasicValueConverter<bool>));
+  }
+
+  void RegisterDoubleField(const std::string& field_name,
+                           double StructType::* field) {
+    fields_.push_back(
+        std::make_unique<internal::FieldConverter<StructType, double>>(
+            field_name, field, new internal::BasicValueConverter<double>));
+  }
+
+  template <class NestedType>
+  void RegisterNestedField(
+      const std::string& field_name, NestedType StructType::* field) {
+    fields_.push_back(
+        std::make_unique<internal::FieldConverter<StructType, NestedType>>(
+            field_name, field, new internal::NestedValueConverter<NestedType>));
+  }
+
+  template <typename FieldType>
+  void RegisterCustomField(const std::string& field_name,
+                           FieldType StructType::*field,
+                           bool (*convert_func)(StringPiece, FieldType*)) {
+    fields_.push_back(
+        std::make_unique<internal::FieldConverter<StructType, FieldType>>(
+            field_name, field,
+            new internal::CustomFieldConverter<FieldType>(convert_func)));
+  }
+
+  template <typename FieldType>
+  void RegisterCustomValueField(
+      const std::string& field_name,
+      FieldType StructType::* field,
+      bool (*convert_func)(const base::Value*, FieldType*)) {
+    fields_.push_back(
+        std::make_unique<internal::FieldConverter<StructType, FieldType>>(
+            field_name, field,
+            new internal::ValueFieldConverter<FieldType>(convert_func)));
+  }
+
+  void RegisterRepeatedInt(
+      const std::string& field_name,
+      std::vector<std::unique_ptr<int>> StructType::*field) {
+    fields_.push_back(std::make_unique<internal::FieldConverter<
+                          StructType, std::vector<std::unique_ptr<int>>>>(
+        field_name, field, new internal::RepeatedValueConverter<int>));
+  }
+
+  void RegisterRepeatedString(
+      const std::string& field_name,
+      std::vector<std::unique_ptr<std::string>> StructType::*field) {
+    fields_.push_back(
+        std::make_unique<internal::FieldConverter<
+            StructType, std::vector<std::unique_ptr<std::string>>>>(
+            field_name, field,
+            new internal::RepeatedValueConverter<std::string>));
+  }
+
+  void RegisterRepeatedString(
+      const std::string& field_name,
+      std::vector<std::unique_ptr<string16>> StructType::*field) {
+    fields_.push_back(std::make_unique<internal::FieldConverter<
+                          StructType, std::vector<std::unique_ptr<string16>>>>(
+        field_name, field, new internal::RepeatedValueConverter<string16>));
+  }
+
+  void RegisterRepeatedDouble(
+      const std::string& field_name,
+      std::vector<std::unique_ptr<double>> StructType::*field) {
+    fields_.push_back(std::make_unique<internal::FieldConverter<
+                          StructType, std::vector<std::unique_ptr<double>>>>(
+        field_name, field, new internal::RepeatedValueConverter<double>));
+  }
+
+  void RegisterRepeatedBool(
+      const std::string& field_name,
+      std::vector<std::unique_ptr<bool>> StructType::*field) {
+    fields_.push_back(std::make_unique<internal::FieldConverter<
+                          StructType, std::vector<std::unique_ptr<bool>>>>(
+        field_name, field, new internal::RepeatedValueConverter<bool>));
+  }
+
+  template <class NestedType>
+  void RegisterRepeatedCustomValue(
+      const std::string& field_name,
+      std::vector<std::unique_ptr<NestedType>> StructType::*field,
+      bool (*convert_func)(const base::Value*, NestedType*)) {
+    fields_.push_back(
+        std::make_unique<internal::FieldConverter<
+            StructType, std::vector<std::unique_ptr<NestedType>>>>(
+            field_name, field,
+            new internal::RepeatedCustomValueConverter<NestedType>(
+                convert_func)));
+  }
+
+  template <class NestedType>
+  void RegisterRepeatedMessage(
+      const std::string& field_name,
+      std::vector<std::unique_ptr<NestedType>> StructType::*field) {
+    fields_.push_back(
+        std::make_unique<internal::FieldConverter<
+            StructType, std::vector<std::unique_ptr<NestedType>>>>(
+            field_name, field,
+            new internal::RepeatedMessageConverter<NestedType>));
+  }
+
+  bool Convert(const base::Value& value, StructType* output) const {
+    const DictionaryValue* dictionary_value = NULL;
+    if (!value.GetAsDictionary(&dictionary_value))
+      return false;
+
+    for (size_t i = 0; i < fields_.size(); ++i) {
+      const internal::FieldConverterBase<StructType>* field_converter =
+          fields_[i].get();
+      const base::Value* field = NULL;
+      if (dictionary_value->Get(field_converter->field_path(), &field)) {
+        if (!field_converter->ConvertField(*field, output)) {
+          DVLOG(1) << "failure at field " << field_converter->field_path();
+          return false;
+        }
+      }
+    }
+    return true;
+  }
+
+ private:
+  std::vector<std::unique_ptr<internal::FieldConverterBase<StructType>>>
+      fields_;
+
+  DISALLOW_COPY_AND_ASSIGN(JSONValueConverter);
+};
+
+}  // namespace base
+
+#endif  // BASE_JSON_JSON_VALUE_CONVERTER_H_
diff --git a/base/json/json_value_converter_unittest.cc b/base/json/json_value_converter_unittest.cc
new file mode 100644
index 0000000..322f5f0
--- /dev/null
+++ b/base/json/json_value_converter_unittest.cc
@@ -0,0 +1,255 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/json/json_value_converter.h"
+
+#include <memory>
+#include <string>
+#include <vector>
+
+#include "base/json/json_reader.h"
+#include "base/strings/string_piece.h"
+#include "base/values.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+namespace {
+
+// Very simple messages.
+struct SimpleMessage {
+  enum SimpleEnum {
+    FOO, BAR,
+  };
+  int foo;
+  std::string bar;
+  bool baz;
+  bool bstruct;
+  SimpleEnum simple_enum;
+  std::vector<std::unique_ptr<int>> ints;
+  std::vector<std::unique_ptr<std::string>> string_values;
+  SimpleMessage() : foo(0), baz(false), bstruct(false), simple_enum(FOO) {}
+
+  static bool ParseSimpleEnum(StringPiece value, SimpleEnum* field) {
+    if (value == "foo") {
+      *field = FOO;
+      return true;
+    } else if (value == "bar") {
+      *field = BAR;
+      return true;
+    }
+    return false;
+  }
+
+  static bool HasFieldPresent(const base::Value* value, bool* result) {
+    *result = value != nullptr;
+    return true;
+  }
+
+  static bool GetValueString(const base::Value* value, std::string* result) {
+    const base::DictionaryValue* dict = nullptr;
+    if (!value->GetAsDictionary(&dict))
+      return false;
+
+    if (!dict->GetString("val", result))
+      return false;
+
+    return true;
+  }
+
+  static void RegisterJSONConverter(
+      base::JSONValueConverter<SimpleMessage>* converter) {
+    converter->RegisterIntField("foo", &SimpleMessage::foo);
+    converter->RegisterStringField("bar", &SimpleMessage::bar);
+    converter->RegisterBoolField("baz", &SimpleMessage::baz);
+    converter->RegisterCustomField<SimpleEnum>(
+        "simple_enum", &SimpleMessage::simple_enum, &ParseSimpleEnum);
+    converter->RegisterRepeatedInt("ints", &SimpleMessage::ints);
+    converter->RegisterCustomValueField<bool>("bstruct",
+                                              &SimpleMessage::bstruct,
+                                              &HasFieldPresent);
+    converter->RegisterRepeatedCustomValue<std::string>(
+        "string_values",
+        &SimpleMessage::string_values,
+        &GetValueString);
+  }
+};
+
+// For nested messages.
+struct NestedMessage {
+  double foo;
+  SimpleMessage child;
+  std::vector<std::unique_ptr<SimpleMessage>> children;
+
+  NestedMessage() : foo(0) {}
+
+  static void RegisterJSONConverter(
+      base::JSONValueConverter<NestedMessage>* converter) {
+    converter->RegisterDoubleField("foo", &NestedMessage::foo);
+    converter->RegisterNestedField("child", &NestedMessage::child);
+    converter->RegisterRepeatedMessage("children", &NestedMessage::children);
+  }
+};
+
+}  // namespace
+
+TEST(JSONValueConverterTest, ParseSimpleMessage) {
+  const char normal_data[] =
+      "{\n"
+      "  \"foo\": 1,\n"
+      "  \"bar\": \"bar\",\n"
+      "  \"baz\": true,\n"
+      "  \"bstruct\": {},\n"
+      "  \"string_values\": [{\"val\": \"value_1\"}, {\"val\": \"value_2\"}],"
+      "  \"simple_enum\": \"foo\","
+      "  \"ints\": [1, 2]"
+      "}\n";
+
+  std::unique_ptr<Value> value = base::JSONReader::Read(normal_data);
+  SimpleMessage message;
+  base::JSONValueConverter<SimpleMessage> converter;
+  EXPECT_TRUE(converter.Convert(*value.get(), &message));
+
+  EXPECT_EQ(1, message.foo);
+  EXPECT_EQ("bar", message.bar);
+  EXPECT_TRUE(message.baz);
+  EXPECT_EQ(SimpleMessage::FOO, message.simple_enum);
+  EXPECT_EQ(2, static_cast<int>(message.ints.size()));
+  ASSERT_EQ(2U, message.string_values.size());
+  EXPECT_EQ("value_1", *message.string_values[0]);
+  EXPECT_EQ("value_2", *message.string_values[1]);
+  EXPECT_EQ(1, *(message.ints[0]));
+  EXPECT_EQ(2, *(message.ints[1]));
+}
+
+TEST(JSONValueConverterTest, ParseNestedMessage) {
+  const char normal_data[] =
+      "{\n"
+      "  \"foo\": 1.0,\n"
+      "  \"child\": {\n"
+      "    \"foo\": 1,\n"
+      "    \"bar\": \"bar\",\n"
+      "    \"bstruct\": {},\n"
+      "    \"string_values\": [{\"val\": \"value_1\"}, {\"val\": \"value_2\"}],"
+      "    \"baz\": true\n"
+      "  },\n"
+      "  \"children\": [{\n"
+      "    \"foo\": 2,\n"
+      "    \"bar\": \"foobar\",\n"
+      "    \"bstruct\": \"\",\n"
+      "    \"string_values\": [{\"val\": \"value_1\"}],"
+      "    \"baz\": true\n"
+      "  },\n"
+      "  {\n"
+      "    \"foo\": 3,\n"
+      "    \"bar\": \"barbaz\",\n"
+      "    \"baz\": false\n"
+      "  }]\n"
+      "}\n";
+
+  std::unique_ptr<Value> value = base::JSONReader::Read(normal_data);
+  NestedMessage message;
+  base::JSONValueConverter<NestedMessage> converter;
+  EXPECT_TRUE(converter.Convert(*value.get(), &message));
+
+  EXPECT_EQ(1.0, message.foo);
+  EXPECT_EQ(1, message.child.foo);
+  EXPECT_EQ("bar", message.child.bar);
+  EXPECT_TRUE(message.child.baz);
+  EXPECT_TRUE(message.child.bstruct);
+  ASSERT_EQ(2U, message.child.string_values.size());
+  EXPECT_EQ("value_1", *message.child.string_values[0]);
+  EXPECT_EQ("value_2", *message.child.string_values[1]);
+
+  EXPECT_EQ(2, static_cast<int>(message.children.size()));
+  const SimpleMessage* first_child = message.children[0].get();
+  ASSERT_TRUE(first_child);
+  EXPECT_EQ(2, first_child->foo);
+  EXPECT_EQ("foobar", first_child->bar);
+  EXPECT_TRUE(first_child->baz);
+  EXPECT_TRUE(first_child->bstruct);
+  ASSERT_EQ(1U, first_child->string_values.size());
+  EXPECT_EQ("value_1", *first_child->string_values[0]);
+
+  const SimpleMessage* second_child = message.children[1].get();
+  ASSERT_TRUE(second_child);
+  EXPECT_EQ(3, second_child->foo);
+  EXPECT_EQ("barbaz", second_child->bar);
+  EXPECT_FALSE(second_child->baz);
+  EXPECT_FALSE(second_child->bstruct);
+  EXPECT_EQ(0U, second_child->string_values.size());
+}
+
+TEST(JSONValueConverterTest, ParseFailures) {
+  const char normal_data[] =
+      "{\n"
+      "  \"foo\": 1,\n"
+      "  \"bar\": 2,\n" // "bar" is an integer here.
+      "  \"baz\": true,\n"
+      "  \"ints\": [1, 2]"
+      "}\n";
+
+  std::unique_ptr<Value> value = base::JSONReader::Read(normal_data);
+  SimpleMessage message;
+  base::JSONValueConverter<SimpleMessage> converter;
+  EXPECT_FALSE(converter.Convert(*value.get(), &message));
+  // Do not check the values below.  |message| may be modified during
+  // Convert() even it fails.
+}
+
+TEST(JSONValueConverterTest, ParseWithMissingFields) {
+  const char normal_data[] =
+      "{\n"
+      "  \"foo\": 1,\n"
+      "  \"baz\": true,\n"
+      "  \"ints\": [1, 2]"
+      "}\n";
+
+  std::unique_ptr<Value> value = base::JSONReader::Read(normal_data);
+  SimpleMessage message;
+  base::JSONValueConverter<SimpleMessage> converter;
+  // Convert() still succeeds even if the input doesn't have "bar" field.
+  EXPECT_TRUE(converter.Convert(*value.get(), &message));
+
+  EXPECT_EQ(1, message.foo);
+  EXPECT_TRUE(message.baz);
+  EXPECT_EQ(2, static_cast<int>(message.ints.size()));
+  EXPECT_EQ(1, *(message.ints[0]));
+  EXPECT_EQ(2, *(message.ints[1]));
+}
+
+TEST(JSONValueConverterTest, EnumParserFails) {
+  const char normal_data[] =
+      "{\n"
+      "  \"foo\": 1,\n"
+      "  \"bar\": \"bar\",\n"
+      "  \"baz\": true,\n"
+      "  \"simple_enum\": \"baz\","
+      "  \"ints\": [1, 2]"
+      "}\n";
+
+  std::unique_ptr<Value> value = base::JSONReader::Read(normal_data);
+  SimpleMessage message;
+  base::JSONValueConverter<SimpleMessage> converter;
+  EXPECT_FALSE(converter.Convert(*value.get(), &message));
+  // No check the values as mentioned above.
+}
+
+TEST(JSONValueConverterTest, RepeatedValueErrorInTheMiddle) {
+  const char normal_data[] =
+      "{\n"
+      "  \"foo\": 1,\n"
+      "  \"bar\": \"bar\",\n"
+      "  \"baz\": true,\n"
+      "  \"simple_enum\": \"baz\","
+      "  \"ints\": [1, false]"
+      "}\n";
+
+  std::unique_ptr<Value> value = base::JSONReader::Read(normal_data);
+  SimpleMessage message;
+  base::JSONValueConverter<SimpleMessage> converter;
+  EXPECT_FALSE(converter.Convert(*value.get(), &message));
+  // No check the values as mentioned above.
+}
+
+}  // namespace base
diff --git a/base/json/json_value_serializer_unittest.cc b/base/json/json_value_serializer_unittest.cc
new file mode 100644
index 0000000..d25f950
--- /dev/null
+++ b/base/json/json_value_serializer_unittest.cc
@@ -0,0 +1,487 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <memory>
+#include <string>
+
+#include "base/files/file_util.h"
+#include "base/files/scoped_temp_dir.h"
+#include "base/json/json_file_value_serializer.h"
+#include "base/json/json_reader.h"
+#include "base/json/json_string_value_serializer.h"
+#include "base/json/json_writer.h"
+#include "base/path_service.h"
+#include "base/strings/string_piece.h"
+#include "base/strings/string_util.h"
+#include "base/strings/utf_string_conversions.h"
+#include "base/values.h"
+#include "build/build_config.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+
+namespace {
+
+// Some proper JSON to test with:
+const char kProperJSON[] =
+    "{\n"
+    "   \"compound\": {\n"
+    "      \"a\": 1,\n"
+    "      \"b\": 2\n"
+    "   },\n"
+    "   \"some_String\": \"1337\",\n"
+    "   \"some_int\": 42,\n"
+    "   \"the_list\": [ \"val1\", \"val2\" ]\n"
+    "}\n";
+
+// Some proper JSON with trailing commas:
+const char kProperJSONWithCommas[] =
+    "{\n"
+    "\t\"some_int\": 42,\n"
+    "\t\"some_String\": \"1337\",\n"
+    "\t\"the_list\": [\"val1\", \"val2\", ],\n"
+    "\t\"compound\": { \"a\": 1, \"b\": 2, },\n"
+    "}\n";
+
+// kProperJSON with a few misc characters at the begin and end.
+const char kProperJSONPadded[] =
+    ")]}'\n"
+    "{\n"
+    "   \"compound\": {\n"
+    "      \"a\": 1,\n"
+    "      \"b\": 2\n"
+    "   },\n"
+    "   \"some_String\": \"1337\",\n"
+    "   \"some_int\": 42,\n"
+    "   \"the_list\": [ \"val1\", \"val2\" ]\n"
+    "}\n"
+    "?!ab\n";
+
+const char kWinLineEnds[] = "\r\n";
+const char kLinuxLineEnds[] = "\n";
+
+// Verifies the generated JSON against the expected output.
+void CheckJSONIsStillTheSame(const Value& value) {
+  // Serialize back the output.
+  std::string serialized_json;
+  JSONStringValueSerializer str_serializer(&serialized_json);
+  str_serializer.set_pretty_print(true);
+  ASSERT_TRUE(str_serializer.Serialize(value));
+  // Unify line endings between platforms.
+  ReplaceSubstringsAfterOffset(&serialized_json, 0,
+                               kWinLineEnds, kLinuxLineEnds);
+  // Now compare the input with the output.
+  ASSERT_EQ(kProperJSON, serialized_json);
+}
+
+void ValidateJsonList(const std::string& json) {
+  std::unique_ptr<ListValue> list = ListValue::From(JSONReader::Read(json));
+  ASSERT_TRUE(list);
+  ASSERT_EQ(1U, list->GetSize());
+  Value* elt = nullptr;
+  ASSERT_TRUE(list->Get(0, &elt));
+  int value = 0;
+  ASSERT_TRUE(elt && elt->GetAsInteger(&value));
+  ASSERT_EQ(1, value);
+}
+
+// Test proper JSON deserialization from string is working.
+TEST(JSONValueDeserializerTest, ReadProperJSONFromString) {
+  // Try to deserialize it through the serializer.
+  JSONStringValueDeserializer str_deserializer(kProperJSON);
+
+  int error_code = 0;
+  std::string error_message;
+  std::unique_ptr<Value> value =
+      str_deserializer.Deserialize(&error_code, &error_message);
+  ASSERT_TRUE(value);
+  ASSERT_EQ(0, error_code);
+  ASSERT_TRUE(error_message.empty());
+  // Verify if the same JSON is still there.
+  CheckJSONIsStillTheSame(*value);
+}
+
+// Test proper JSON deserialization from a StringPiece substring.
+TEST(JSONValueDeserializerTest, ReadProperJSONFromStringPiece) {
+  // Create a StringPiece for the substring of kProperJSONPadded that matches
+  // kProperJSON.
+  StringPiece proper_json(kProperJSONPadded);
+  proper_json = proper_json.substr(5, proper_json.length() - 10);
+  JSONStringValueDeserializer str_deserializer(proper_json);
+
+  int error_code = 0;
+  std::string error_message;
+  std::unique_ptr<Value> value =
+      str_deserializer.Deserialize(&error_code, &error_message);
+  ASSERT_TRUE(value);
+  ASSERT_EQ(0, error_code);
+  ASSERT_TRUE(error_message.empty());
+  // Verify if the same JSON is still there.
+  CheckJSONIsStillTheSame(*value);
+}
+
+// Test that trialing commas are only properly deserialized from string when
+// the proper flag for that is set.
+TEST(JSONValueDeserializerTest, ReadJSONWithTrailingCommasFromString) {
+  // Try to deserialize it through the serializer.
+  JSONStringValueDeserializer str_deserializer(kProperJSONWithCommas);
+
+  int error_code = 0;
+  std::string error_message;
+  std::unique_ptr<Value> value =
+      str_deserializer.Deserialize(&error_code, &error_message);
+  ASSERT_FALSE(value);
+  ASSERT_NE(0, error_code);
+  ASSERT_FALSE(error_message.empty());
+  // Repeat with commas allowed.
+  JSONStringValueDeserializer str_deserializer2(kProperJSONWithCommas,
+                                                JSON_ALLOW_TRAILING_COMMAS);
+  value = str_deserializer2.Deserialize(&error_code, &error_message);
+  ASSERT_TRUE(value);
+  ASSERT_EQ(JSONReader::JSON_TRAILING_COMMA, error_code);
+  // Verify if the same JSON is still there.
+  CheckJSONIsStillTheSame(*value);
+}
+
+// Test proper JSON deserialization from file is working.
+TEST(JSONValueDeserializerTest, ReadProperJSONFromFile) {
+  ScopedTempDir tempdir;
+  ASSERT_TRUE(tempdir.CreateUniqueTempDir());
+  // Write it down in the file.
+  FilePath temp_file(tempdir.GetPath().AppendASCII("test.json"));
+  ASSERT_EQ(static_cast<int>(strlen(kProperJSON)),
+            WriteFile(temp_file, kProperJSON, strlen(kProperJSON)));
+
+  // Try to deserialize it through the serializer.
+  JSONFileValueDeserializer file_deserializer(temp_file);
+
+  int error_code = 0;
+  std::string error_message;
+  std::unique_ptr<Value> value =
+      file_deserializer.Deserialize(&error_code, &error_message);
+  ASSERT_TRUE(value);
+  ASSERT_EQ(0, error_code);
+  ASSERT_TRUE(error_message.empty());
+  // Verify if the same JSON is still there.
+  CheckJSONIsStillTheSame(*value);
+}
+
+// Test that trialing commas are only properly deserialized from file when
+// the proper flag for that is set.
+TEST(JSONValueDeserializerTest, ReadJSONWithCommasFromFile) {
+  ScopedTempDir tempdir;
+  ASSERT_TRUE(tempdir.CreateUniqueTempDir());
+  // Write it down in the file.
+  FilePath temp_file(tempdir.GetPath().AppendASCII("test.json"));
+  ASSERT_EQ(static_cast<int>(strlen(kProperJSONWithCommas)),
+            WriteFile(temp_file, kProperJSONWithCommas,
+                      strlen(kProperJSONWithCommas)));
+
+  // Try to deserialize it through the serializer.
+  JSONFileValueDeserializer file_deserializer(temp_file);
+  // This must fail without the proper flag.
+  int error_code = 0;
+  std::string error_message;
+  std::unique_ptr<Value> value =
+      file_deserializer.Deserialize(&error_code, &error_message);
+  ASSERT_FALSE(value);
+  ASSERT_NE(0, error_code);
+  ASSERT_FALSE(error_message.empty());
+  // Repeat with commas allowed.
+  JSONFileValueDeserializer file_deserializer2(temp_file,
+                                               JSON_ALLOW_TRAILING_COMMAS);
+  value = file_deserializer2.Deserialize(&error_code, &error_message);
+  ASSERT_TRUE(value);
+  ASSERT_EQ(JSONReader::JSON_TRAILING_COMMA, error_code);
+  // Verify if the same JSON is still there.
+  CheckJSONIsStillTheSame(*value);
+}
+
+TEST(JSONValueDeserializerTest, AllowTrailingComma) {
+  static const char kTestWithCommas[] = "{\"key\": [true,],}";
+  static const char kTestNoCommas[] = "{\"key\": [true]}";
+
+  JSONStringValueDeserializer deserializer(kTestWithCommas,
+                                           JSON_ALLOW_TRAILING_COMMAS);
+  JSONStringValueDeserializer deserializer_expected(kTestNoCommas);
+  std::unique_ptr<Value> root = deserializer.Deserialize(nullptr, nullptr);
+  ASSERT_TRUE(root);
+  std::unique_ptr<Value> root_expected;
+  root_expected = deserializer_expected.Deserialize(nullptr, nullptr);
+  ASSERT_TRUE(root_expected);
+  ASSERT_TRUE(root->Equals(root_expected.get()));
+}
+
+TEST(JSONValueSerializerTest, Roundtrip) {
+  static const char kOriginalSerialization[] =
+    "{\"bool\":true,\"double\":3.14,\"int\":42,\"list\":[1,2],\"null\":null}";
+  JSONStringValueDeserializer deserializer(kOriginalSerialization);
+  std::unique_ptr<DictionaryValue> root_dict =
+      DictionaryValue::From(deserializer.Deserialize(nullptr, nullptr));
+  ASSERT_TRUE(root_dict);
+
+  Value* null_value = nullptr;
+  ASSERT_TRUE(root_dict->Get("null", &null_value));
+  ASSERT_TRUE(null_value);
+  ASSERT_TRUE(null_value->is_none());
+
+  bool bool_value = false;
+  ASSERT_TRUE(root_dict->GetBoolean("bool", &bool_value));
+  ASSERT_TRUE(bool_value);
+
+  int int_value = 0;
+  ASSERT_TRUE(root_dict->GetInteger("int", &int_value));
+  ASSERT_EQ(42, int_value);
+
+  double double_value = 0.0;
+  ASSERT_TRUE(root_dict->GetDouble("double", &double_value));
+  ASSERT_DOUBLE_EQ(3.14, double_value);
+
+  std::string test_serialization;
+  JSONStringValueSerializer mutable_serializer(&test_serialization);
+  ASSERT_TRUE(mutable_serializer.Serialize(*root_dict));
+  ASSERT_EQ(kOriginalSerialization, test_serialization);
+
+  mutable_serializer.set_pretty_print(true);
+  ASSERT_TRUE(mutable_serializer.Serialize(*root_dict));
+  // JSON output uses a different newline style on Windows than on other
+  // platforms.
+#if defined(OS_WIN)
+#define JSON_NEWLINE "\r\n"
+#else
+#define JSON_NEWLINE "\n"
+#endif
+  const std::string pretty_serialization =
+    "{" JSON_NEWLINE
+    "   \"bool\": true," JSON_NEWLINE
+    "   \"double\": 3.14," JSON_NEWLINE
+    "   \"int\": 42," JSON_NEWLINE
+    "   \"list\": [ 1, 2 ]," JSON_NEWLINE
+    "   \"null\": null" JSON_NEWLINE
+    "}" JSON_NEWLINE;
+#undef JSON_NEWLINE
+  ASSERT_EQ(pretty_serialization, test_serialization);
+}
+
+TEST(JSONValueSerializerTest, StringEscape) {
+  string16 all_chars;
+  for (int i = 1; i < 256; ++i) {
+    all_chars += static_cast<char16>(i);
+  }
+  // Generated in in Firefox using the following js (with an extra backslash for
+  // double quote):
+  // var s = '';
+  // for (var i = 1; i < 256; ++i) { s += String.fromCharCode(i); }
+  // uneval(s).replace(/\\/g, "\\\\");
+  std::string all_chars_expected =
+      "\\u0001\\u0002\\u0003\\u0004\\u0005\\u0006\\u0007\\b\\t\\n\\u000B\\f\\r"
+      "\\u000E\\u000F\\u0010\\u0011\\u0012\\u0013\\u0014\\u0015\\u0016\\u0017"
+      "\\u0018\\u0019\\u001A\\u001B\\u001C\\u001D\\u001E\\u001F !\\\"#$%&'()*+,"
+      "-./0123456789:;\\u003C=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\\\]^_`abcde"
+      "fghijklmnopqrstuvwxyz{|}~\x7F\xC2\x80\xC2\x81\xC2\x82\xC2\x83\xC2\x84"
+      "\xC2\x85\xC2\x86\xC2\x87\xC2\x88\xC2\x89\xC2\x8A\xC2\x8B\xC2\x8C\xC2\x8D"
+      "\xC2\x8E\xC2\x8F\xC2\x90\xC2\x91\xC2\x92\xC2\x93\xC2\x94\xC2\x95\xC2\x96"
+      "\xC2\x97\xC2\x98\xC2\x99\xC2\x9A\xC2\x9B\xC2\x9C\xC2\x9D\xC2\x9E\xC2\x9F"
+      "\xC2\xA0\xC2\xA1\xC2\xA2\xC2\xA3\xC2\xA4\xC2\xA5\xC2\xA6\xC2\xA7\xC2\xA8"
+      "\xC2\xA9\xC2\xAA\xC2\xAB\xC2\xAC\xC2\xAD\xC2\xAE\xC2\xAF\xC2\xB0\xC2\xB1"
+      "\xC2\xB2\xC2\xB3\xC2\xB4\xC2\xB5\xC2\xB6\xC2\xB7\xC2\xB8\xC2\xB9\xC2\xBA"
+      "\xC2\xBB\xC2\xBC\xC2\xBD\xC2\xBE\xC2\xBF\xC3\x80\xC3\x81\xC3\x82\xC3\x83"
+      "\xC3\x84\xC3\x85\xC3\x86\xC3\x87\xC3\x88\xC3\x89\xC3\x8A\xC3\x8B\xC3\x8C"
+      "\xC3\x8D\xC3\x8E\xC3\x8F\xC3\x90\xC3\x91\xC3\x92\xC3\x93\xC3\x94\xC3\x95"
+      "\xC3\x96\xC3\x97\xC3\x98\xC3\x99\xC3\x9A\xC3\x9B\xC3\x9C\xC3\x9D\xC3\x9E"
+      "\xC3\x9F\xC3\xA0\xC3\xA1\xC3\xA2\xC3\xA3\xC3\xA4\xC3\xA5\xC3\xA6\xC3\xA7"
+      "\xC3\xA8\xC3\xA9\xC3\xAA\xC3\xAB\xC3\xAC\xC3\xAD\xC3\xAE\xC3\xAF\xC3\xB0"
+      "\xC3\xB1\xC3\xB2\xC3\xB3\xC3\xB4\xC3\xB5\xC3\xB6\xC3\xB7\xC3\xB8\xC3\xB9"
+      "\xC3\xBA\xC3\xBB\xC3\xBC\xC3\xBD\xC3\xBE\xC3\xBF";
+
+  std::string expected_output = "{\"all_chars\":\"" + all_chars_expected +
+                                 "\"}";
+  // Test JSONWriter interface
+  std::string output_js;
+  DictionaryValue valueRoot;
+  valueRoot.SetString("all_chars", all_chars);
+  JSONWriter::Write(valueRoot, &output_js);
+  ASSERT_EQ(expected_output, output_js);
+
+  // Test JSONValueSerializer interface (uses JSONWriter).
+  JSONStringValueSerializer serializer(&output_js);
+  ASSERT_TRUE(serializer.Serialize(valueRoot));
+  ASSERT_EQ(expected_output, output_js);
+}
+
+TEST(JSONValueSerializerTest, UnicodeStrings) {
+  // unicode string json -> escaped ascii text
+  DictionaryValue root;
+  string16 test(WideToUTF16(L"\x7F51\x9875"));
+  root.SetString("web", test);
+
+  static const char kExpected[] = "{\"web\":\"\xE7\xBD\x91\xE9\xA1\xB5\"}";
+
+  std::string actual;
+  JSONStringValueSerializer serializer(&actual);
+  ASSERT_TRUE(serializer.Serialize(root));
+  ASSERT_EQ(kExpected, actual);
+
+  // escaped ascii text -> json
+  JSONStringValueDeserializer deserializer(kExpected);
+  std::unique_ptr<Value> deserial_root =
+      deserializer.Deserialize(nullptr, nullptr);
+  ASSERT_TRUE(deserial_root);
+  DictionaryValue* dict_root =
+      static_cast<DictionaryValue*>(deserial_root.get());
+  string16 web_value;
+  ASSERT_TRUE(dict_root->GetString("web", &web_value));
+  ASSERT_EQ(test, web_value);
+}
+
+TEST(JSONValueSerializerTest, HexStrings) {
+  // hex string json -> escaped ascii text
+  DictionaryValue root;
+  string16 test(WideToUTF16(L"\x01\x02"));
+  root.SetString("test", test);
+
+  static const char kExpected[] = "{\"test\":\"\\u0001\\u0002\"}";
+
+  std::string actual;
+  JSONStringValueSerializer serializer(&actual);
+  ASSERT_TRUE(serializer.Serialize(root));
+  ASSERT_EQ(kExpected, actual);
+
+  // escaped ascii text -> json
+  JSONStringValueDeserializer deserializer(kExpected);
+  std::unique_ptr<Value> deserial_root =
+      deserializer.Deserialize(nullptr, nullptr);
+  ASSERT_TRUE(deserial_root);
+  DictionaryValue* dict_root =
+      static_cast<DictionaryValue*>(deserial_root.get());
+  string16 test_value;
+  ASSERT_TRUE(dict_root->GetString("test", &test_value));
+  ASSERT_EQ(test, test_value);
+
+  // Test converting escaped regular chars
+  static const char kEscapedChars[] = "{\"test\":\"\\u0067\\u006f\"}";
+  JSONStringValueDeserializer deserializer2(kEscapedChars);
+  deserial_root = deserializer2.Deserialize(nullptr, nullptr);
+  ASSERT_TRUE(deserial_root);
+  dict_root = static_cast<DictionaryValue*>(deserial_root.get());
+  ASSERT_TRUE(dict_root->GetString("test", &test_value));
+  ASSERT_EQ(ASCIIToUTF16("go"), test_value);
+}
+
+TEST(JSONValueSerializerTest, JSONReaderComments) {
+  ValidateJsonList("[ // 2, 3, ignore me ] \n1 ]");
+  ValidateJsonList("[ /* 2, \n3, ignore me ]*/ \n1 ]");
+  ValidateJsonList("//header\n[ // 2, \n// 3, \n1 ]// footer");
+  ValidateJsonList("/*\n[ // 2, \n// 3, \n1 ]*/[1]");
+  ValidateJsonList("[ 1 /* one */ ] /* end */");
+  ValidateJsonList("[ 1 //// ,2\r\n ]");
+
+  // It's ok to have a comment in a string.
+  std::unique_ptr<ListValue> list =
+      ListValue::From(JSONReader::Read("[\"// ok\\n /* foo */ \"]"));
+  ASSERT_TRUE(list);
+  ASSERT_EQ(1U, list->GetSize());
+  Value* elt = nullptr;
+  ASSERT_TRUE(list->Get(0, &elt));
+  std::string value;
+  ASSERT_TRUE(elt && elt->GetAsString(&value));
+  ASSERT_EQ("// ok\n /* foo */ ", value);
+
+  // You can't nest comments.
+  ASSERT_FALSE(JSONReader::Read("/* /* inner */ outer */ [ 1 ]"));
+
+  // Not a open comment token.
+  ASSERT_FALSE(JSONReader::Read("/ * * / [1]"));
+}
+
+class JSONFileValueSerializerTest : public testing::Test {
+ protected:
+  void SetUp() override { ASSERT_TRUE(temp_dir_.CreateUniqueTempDir()); }
+
+  ScopedTempDir temp_dir_;
+};
+
+TEST_F(JSONFileValueSerializerTest, Roundtrip) {
+  FilePath original_file_path;
+  ASSERT_TRUE(PathService::Get(DIR_TEST_DATA, &original_file_path));
+  original_file_path = original_file_path.AppendASCII("serializer_test.json");
+
+  ASSERT_TRUE(PathExists(original_file_path));
+
+  JSONFileValueDeserializer deserializer(original_file_path);
+  std::unique_ptr<DictionaryValue> root_dict =
+      DictionaryValue::From(deserializer.Deserialize(nullptr, nullptr));
+  ASSERT_TRUE(root_dict);
+
+  Value* null_value = nullptr;
+  ASSERT_TRUE(root_dict->Get("null", &null_value));
+  ASSERT_TRUE(null_value);
+  ASSERT_TRUE(null_value->is_none());
+
+  bool bool_value = false;
+  ASSERT_TRUE(root_dict->GetBoolean("bool", &bool_value));
+  ASSERT_TRUE(bool_value);
+
+  int int_value = 0;
+  ASSERT_TRUE(root_dict->GetInteger("int", &int_value));
+  ASSERT_EQ(42, int_value);
+
+  std::string string_value;
+  ASSERT_TRUE(root_dict->GetString("string", &string_value));
+  ASSERT_EQ("hello", string_value);
+
+  // Now try writing.
+  const FilePath written_file_path =
+      temp_dir_.GetPath().AppendASCII("test_output.js");
+
+  ASSERT_FALSE(PathExists(written_file_path));
+  JSONFileValueSerializer serializer(written_file_path);
+  ASSERT_TRUE(serializer.Serialize(*root_dict));
+  ASSERT_TRUE(PathExists(written_file_path));
+
+  // Now compare file contents.
+  EXPECT_TRUE(TextContentsEqual(original_file_path, written_file_path));
+  EXPECT_TRUE(DeleteFile(written_file_path, false));
+}
+
+TEST_F(JSONFileValueSerializerTest, RoundtripNested) {
+  FilePath original_file_path;
+  ASSERT_TRUE(PathService::Get(DIR_TEST_DATA, &original_file_path));
+  original_file_path =
+      original_file_path.AppendASCII("serializer_nested_test.json");
+
+  ASSERT_TRUE(PathExists(original_file_path));
+
+  JSONFileValueDeserializer deserializer(original_file_path);
+  std::unique_ptr<Value> root = deserializer.Deserialize(nullptr, nullptr);
+  ASSERT_TRUE(root);
+
+  // Now try writing.
+  FilePath written_file_path =
+      temp_dir_.GetPath().AppendASCII("test_output.json");
+
+  ASSERT_FALSE(PathExists(written_file_path));
+  JSONFileValueSerializer serializer(written_file_path);
+  ASSERT_TRUE(serializer.Serialize(*root));
+  ASSERT_TRUE(PathExists(written_file_path));
+
+  // Now compare file contents.
+  EXPECT_TRUE(TextContentsEqual(original_file_path, written_file_path));
+  EXPECT_TRUE(DeleteFile(written_file_path, false));
+}
+
+TEST_F(JSONFileValueSerializerTest, NoWhitespace) {
+  FilePath source_file_path;
+  ASSERT_TRUE(PathService::Get(DIR_TEST_DATA, &source_file_path));
+  source_file_path =
+      source_file_path.AppendASCII("serializer_test_nowhitespace.json");
+  ASSERT_TRUE(PathExists(source_file_path));
+  JSONFileValueDeserializer deserializer(source_file_path);
+  std::unique_ptr<Value> root = deserializer.Deserialize(nullptr, nullptr);
+  ASSERT_TRUE(root);
+}
+
+}  // namespace
+
+}  // namespace base
diff --git a/base/json/json_writer.cc b/base/json/json_writer.cc
new file mode 100644
index 0000000..e4f1e3c
--- /dev/null
+++ b/base/json/json_writer.cc
@@ -0,0 +1,209 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/json/json_writer.h"
+
+#include <stdint.h>
+
+#include <cmath>
+#include <limits>
+
+#include "base/json/string_escape.h"
+#include "base/logging.h"
+#include "base/strings/string_number_conversions.h"
+#include "base/strings/utf_string_conversions.h"
+#include "base/values.h"
+#include "build/build_config.h"
+
+namespace base {
+
+#if defined(OS_WIN)
+const char kPrettyPrintLineEnding[] = "\r\n";
+#else
+const char kPrettyPrintLineEnding[] = "\n";
+#endif
+
+// static
+bool JSONWriter::Write(const Value& node, std::string* json) {
+  return WriteWithOptions(node, 0, json);
+}
+
+// static
+bool JSONWriter::WriteWithOptions(const Value& node,
+                                  int options,
+                                  std::string* json) {
+  json->clear();
+  // Is there a better way to estimate the size of the output?
+  json->reserve(1024);
+
+  JSONWriter writer(options, json);
+  bool result = writer.BuildJSONString(node, 0U);
+
+  if (options & OPTIONS_PRETTY_PRINT)
+    json->append(kPrettyPrintLineEnding);
+
+  return result;
+}
+
+JSONWriter::JSONWriter(int options, std::string* json)
+    : omit_binary_values_((options & OPTIONS_OMIT_BINARY_VALUES) != 0),
+      omit_double_type_preservation_(
+          (options & OPTIONS_OMIT_DOUBLE_TYPE_PRESERVATION) != 0),
+      pretty_print_((options & OPTIONS_PRETTY_PRINT) != 0),
+      json_string_(json) {
+  DCHECK(json);
+}
+
+bool JSONWriter::BuildJSONString(const Value& node, size_t depth) {
+  switch (node.type()) {
+    case Value::Type::NONE: {
+      json_string_->append("null");
+      return true;
+    }
+
+    case Value::Type::BOOLEAN: {
+      bool value;
+      bool result = node.GetAsBoolean(&value);
+      DCHECK(result);
+      json_string_->append(value ? "true" : "false");
+      return result;
+    }
+
+    case Value::Type::INTEGER: {
+      int value;
+      bool result = node.GetAsInteger(&value);
+      DCHECK(result);
+      json_string_->append(IntToString(value));
+      return result;
+    }
+
+    case Value::Type::DOUBLE: {
+      double value;
+      bool result = node.GetAsDouble(&value);
+      DCHECK(result);
+      if (omit_double_type_preservation_ &&
+          value <= std::numeric_limits<int64_t>::max() &&
+          value >= std::numeric_limits<int64_t>::min() &&
+          std::floor(value) == value) {
+        json_string_->append(Int64ToString(static_cast<int64_t>(value)));
+        return result;
+      }
+      std::string real = NumberToString(value);
+      // Ensure that the number has a .0 if there's no decimal or 'e'.  This
+      // makes sure that when we read the JSON back, it's interpreted as a
+      // real rather than an int.
+      if (real.find('.') == std::string::npos &&
+          real.find('e') == std::string::npos &&
+          real.find('E') == std::string::npos) {
+        real.append(".0");
+      }
+      // The JSON spec requires that non-integer values in the range (-1,1)
+      // have a zero before the decimal point - ".52" is not valid, "0.52" is.
+      if (real[0] == '.') {
+        real.insert(static_cast<size_t>(0), static_cast<size_t>(1), '0');
+      } else if (real.length() > 1 && real[0] == '-' && real[1] == '.') {
+        // "-.1" bad "-0.1" good
+        real.insert(static_cast<size_t>(1), static_cast<size_t>(1), '0');
+      }
+      json_string_->append(real);
+      return result;
+    }
+
+    case Value::Type::STRING: {
+      std::string value;
+      bool result = node.GetAsString(&value);
+      DCHECK(result);
+      EscapeJSONString(value, true, json_string_);
+      return result;
+    }
+
+    case Value::Type::LIST: {
+      json_string_->push_back('[');
+      if (pretty_print_)
+        json_string_->push_back(' ');
+
+      const ListValue* list = nullptr;
+      bool first_value_has_been_output = false;
+      bool result = node.GetAsList(&list);
+      DCHECK(result);
+      for (const auto& value : *list) {
+        if (omit_binary_values_ && value.type() == Value::Type::BINARY)
+          continue;
+
+        if (first_value_has_been_output) {
+          json_string_->push_back(',');
+          if (pretty_print_)
+            json_string_->push_back(' ');
+        }
+
+        if (!BuildJSONString(value, depth))
+          result = false;
+
+        first_value_has_been_output = true;
+      }
+
+      if (pretty_print_)
+        json_string_->push_back(' ');
+      json_string_->push_back(']');
+      return result;
+    }
+
+    case Value::Type::DICTIONARY: {
+      json_string_->push_back('{');
+      if (pretty_print_)
+        json_string_->append(kPrettyPrintLineEnding);
+
+      const DictionaryValue* dict = nullptr;
+      bool first_value_has_been_output = false;
+      bool result = node.GetAsDictionary(&dict);
+      DCHECK(result);
+      for (DictionaryValue::Iterator itr(*dict); !itr.IsAtEnd();
+           itr.Advance()) {
+        if (omit_binary_values_ && itr.value().type() == Value::Type::BINARY) {
+          continue;
+        }
+
+        if (first_value_has_been_output) {
+          json_string_->push_back(',');
+          if (pretty_print_)
+            json_string_->append(kPrettyPrintLineEnding);
+        }
+
+        if (pretty_print_)
+          IndentLine(depth + 1U);
+
+        EscapeJSONString(itr.key(), true, json_string_);
+        json_string_->push_back(':');
+        if (pretty_print_)
+          json_string_->push_back(' ');
+
+        if (!BuildJSONString(itr.value(), depth + 1U))
+          result = false;
+
+        first_value_has_been_output = true;
+      }
+
+      if (pretty_print_) {
+        json_string_->append(kPrettyPrintLineEnding);
+        IndentLine(depth);
+      }
+
+      json_string_->push_back('}');
+      return result;
+    }
+
+    case Value::Type::BINARY:
+      // Successful only if we're allowed to omit it.
+      DLOG_IF(ERROR, !omit_binary_values_) << "Cannot serialize binary value.";
+      return omit_binary_values_;
+  }
+  NOTREACHED();
+  return false;
+}
+
+void JSONWriter::IndentLine(size_t depth) {
+  json_string_->append(depth * 3U, ' ');
+}
+
+}  // namespace base
diff --git a/base/json/json_writer.h b/base/json/json_writer.h
new file mode 100644
index 0000000..57cb8c1
--- /dev/null
+++ b/base/json/json_writer.h
@@ -0,0 +1,75 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_JSON_JSON_WRITER_H_
+#define BASE_JSON_JSON_WRITER_H_
+
+#include <stddef.h>
+
+#include <string>
+
+#include "base/base_export.h"
+#include "base/macros.h"
+
+namespace base {
+
+class Value;
+
+class BASE_EXPORT JSONWriter {
+ public:
+  enum Options {
+    // This option instructs the writer that if a Binary value is encountered,
+    // the value (and key if within a dictionary) will be omitted from the
+    // output, and success will be returned. Otherwise, if a binary value is
+    // encountered, failure will be returned.
+    OPTIONS_OMIT_BINARY_VALUES = 1 << 0,
+
+    // This option instructs the writer to write doubles that have no fractional
+    // part as a normal integer (i.e., without using exponential notation
+    // or appending a '.0') as long as the value is within the range of a
+    // 64-bit int.
+    OPTIONS_OMIT_DOUBLE_TYPE_PRESERVATION = 1 << 1,
+
+    // Return a slightly nicer formatted json string (pads with whitespace to
+    // help with readability).
+    OPTIONS_PRETTY_PRINT = 1 << 2,
+  };
+
+  // Given a root node, generates a JSON string and puts it into |json|.
+  // The output string is overwritten and not appended.
+  //
+  // TODO(tc): Should we generate json if it would be invalid json (e.g.,
+  // |node| is not a DictionaryValue/ListValue or if there are inf/-inf float
+  // values)? Return true on success and false on failure.
+  static bool Write(const Value& node, std::string* json);
+
+  // Same as above but with |options| which is a bunch of JSONWriter::Options
+  // bitwise ORed together. Return true on success and false on failure.
+  static bool WriteWithOptions(const Value& node,
+                               int options,
+                               std::string* json);
+
+ private:
+  JSONWriter(int options, std::string* json);
+
+  // Called recursively to build the JSON string. When completed,
+  // |json_string_| will contain the JSON.
+  bool BuildJSONString(const Value& node, size_t depth);
+
+  // Adds space to json_string_ for the indent level.
+  void IndentLine(size_t depth);
+
+  bool omit_binary_values_;
+  bool omit_double_type_preservation_;
+  bool pretty_print_;
+
+  // Where we write JSON data as we generate it.
+  std::string* json_string_;
+
+  DISALLOW_COPY_AND_ASSIGN(JSONWriter);
+};
+
+}  // namespace base
+
+#endif  // BASE_JSON_JSON_WRITER_H_
diff --git a/base/json/json_writer_unittest.cc b/base/json/json_writer_unittest.cc
new file mode 100644
index 0000000..2d81af3
--- /dev/null
+++ b/base/json/json_writer_unittest.cc
@@ -0,0 +1,154 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/json/json_writer.h"
+
+#include "base/memory/ptr_util.h"
+#include "base/values.h"
+#include "build/build_config.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+
+TEST(JSONWriterTest, BasicTypes) {
+  std::string output_js;
+
+  // Test null.
+  EXPECT_TRUE(JSONWriter::Write(Value(), &output_js));
+  EXPECT_EQ("null", output_js);
+
+  // Test empty dict.
+  EXPECT_TRUE(JSONWriter::Write(DictionaryValue(), &output_js));
+  EXPECT_EQ("{}", output_js);
+
+  // Test empty list.
+  EXPECT_TRUE(JSONWriter::Write(ListValue(), &output_js));
+  EXPECT_EQ("[]", output_js);
+
+  // Test integer values.
+  EXPECT_TRUE(JSONWriter::Write(Value(42), &output_js));
+  EXPECT_EQ("42", output_js);
+
+  // Test boolean values.
+  EXPECT_TRUE(JSONWriter::Write(Value(true), &output_js));
+  EXPECT_EQ("true", output_js);
+
+  // Test Real values should always have a decimal or an 'e'.
+  EXPECT_TRUE(JSONWriter::Write(Value(1.0), &output_js));
+  EXPECT_EQ("1.0", output_js);
+
+  // Test Real values in the the range (-1, 1) must have leading zeros
+  EXPECT_TRUE(JSONWriter::Write(Value(0.2), &output_js));
+  EXPECT_EQ("0.2", output_js);
+
+  // Test Real values in the the range (-1, 1) must have leading zeros
+  EXPECT_TRUE(JSONWriter::Write(Value(-0.8), &output_js));
+  EXPECT_EQ("-0.8", output_js);
+
+  // Test String values.
+  EXPECT_TRUE(JSONWriter::Write(Value("foo"), &output_js));
+  EXPECT_EQ("\"foo\"", output_js);
+}
+
+TEST(JSONWriterTest, NestedTypes) {
+  std::string output_js;
+
+  // Writer unittests like empty list/dict nesting,
+  // list list nesting, etc.
+  DictionaryValue root_dict;
+  std::unique_ptr<ListValue> list(new ListValue());
+  std::unique_ptr<DictionaryValue> inner_dict(new DictionaryValue());
+  inner_dict->SetInteger("inner int", 10);
+  list->Append(std::move(inner_dict));
+  list->Append(std::make_unique<ListValue>());
+  list->AppendBoolean(true);
+  root_dict.Set("list", std::move(list));
+
+  // Test the pretty-printer.
+  EXPECT_TRUE(JSONWriter::Write(root_dict, &output_js));
+  EXPECT_EQ("{\"list\":[{\"inner int\":10},[],true]}", output_js);
+  EXPECT_TRUE(JSONWriter::WriteWithOptions(
+      root_dict, JSONWriter::OPTIONS_PRETTY_PRINT, &output_js));
+
+  // The pretty-printer uses a different newline style on Windows than on
+  // other platforms.
+#if defined(OS_WIN)
+#define JSON_NEWLINE "\r\n"
+#else
+#define JSON_NEWLINE "\n"
+#endif
+  EXPECT_EQ("{" JSON_NEWLINE
+            "   \"list\": [ {" JSON_NEWLINE
+            "      \"inner int\": 10" JSON_NEWLINE
+            "   }, [  ], true ]" JSON_NEWLINE
+            "}" JSON_NEWLINE,
+            output_js);
+#undef JSON_NEWLINE
+}
+
+TEST(JSONWriterTest, KeysWithPeriods) {
+  std::string output_js;
+
+  DictionaryValue period_dict;
+  period_dict.SetKey("a.b", base::Value(3));
+  period_dict.SetKey("c", base::Value(2));
+  std::unique_ptr<DictionaryValue> period_dict2(new DictionaryValue());
+  period_dict2->SetKey("g.h.i.j", base::Value(1));
+  period_dict.SetWithoutPathExpansion("d.e.f", std::move(period_dict2));
+  EXPECT_TRUE(JSONWriter::Write(period_dict, &output_js));
+  EXPECT_EQ("{\"a.b\":3,\"c\":2,\"d.e.f\":{\"g.h.i.j\":1}}", output_js);
+
+  DictionaryValue period_dict3;
+  period_dict3.SetInteger("a.b", 2);
+  period_dict3.SetKey("a.b", base::Value(1));
+  EXPECT_TRUE(JSONWriter::Write(period_dict3, &output_js));
+  EXPECT_EQ("{\"a\":{\"b\":2},\"a.b\":1}", output_js);
+}
+
+TEST(JSONWriterTest, BinaryValues) {
+  std::string output_js;
+
+  // Binary values should return errors unless suppressed via the
+  // OPTIONS_OMIT_BINARY_VALUES flag.
+  std::unique_ptr<Value> root(Value::CreateWithCopiedBuffer("asdf", 4));
+  EXPECT_FALSE(JSONWriter::Write(*root, &output_js));
+  EXPECT_TRUE(JSONWriter::WriteWithOptions(
+      *root, JSONWriter::OPTIONS_OMIT_BINARY_VALUES, &output_js));
+  EXPECT_TRUE(output_js.empty());
+
+  ListValue binary_list;
+  binary_list.Append(Value::CreateWithCopiedBuffer("asdf", 4));
+  binary_list.Append(std::make_unique<Value>(5));
+  binary_list.Append(Value::CreateWithCopiedBuffer("asdf", 4));
+  binary_list.Append(std::make_unique<Value>(2));
+  binary_list.Append(Value::CreateWithCopiedBuffer("asdf", 4));
+  EXPECT_FALSE(JSONWriter::Write(binary_list, &output_js));
+  EXPECT_TRUE(JSONWriter::WriteWithOptions(
+      binary_list, JSONWriter::OPTIONS_OMIT_BINARY_VALUES, &output_js));
+  EXPECT_EQ("[5,2]", output_js);
+
+  DictionaryValue binary_dict;
+  binary_dict.Set("a", Value::CreateWithCopiedBuffer("asdf", 4));
+  binary_dict.SetInteger("b", 5);
+  binary_dict.Set("c", Value::CreateWithCopiedBuffer("asdf", 4));
+  binary_dict.SetInteger("d", 2);
+  binary_dict.Set("e", Value::CreateWithCopiedBuffer("asdf", 4));
+  EXPECT_FALSE(JSONWriter::Write(binary_dict, &output_js));
+  EXPECT_TRUE(JSONWriter::WriteWithOptions(
+      binary_dict, JSONWriter::OPTIONS_OMIT_BINARY_VALUES, &output_js));
+  EXPECT_EQ("{\"b\":5,\"d\":2}", output_js);
+}
+
+TEST(JSONWriterTest, DoublesAsInts) {
+  std::string output_js;
+
+  // Test allowing a double with no fractional part to be written as an integer.
+  Value double_value(1e10);
+  EXPECT_TRUE(JSONWriter::WriteWithOptions(
+      double_value, JSONWriter::OPTIONS_OMIT_DOUBLE_TYPE_PRESERVATION,
+      &output_js));
+  EXPECT_EQ("10000000000", output_js);
+}
+
+}  // namespace base
diff --git a/base/json/string_escape.cc b/base/json/string_escape.cc
new file mode 100644
index 0000000..471a9d3
--- /dev/null
+++ b/base/json/string_escape.cc
@@ -0,0 +1,167 @@
+// Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/json/string_escape.h"
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <limits>
+#include <string>
+
+#include "base/strings/string_util.h"
+#include "base/strings/stringprintf.h"
+#include "base/strings/utf_string_conversion_utils.h"
+#include "base/strings/utf_string_conversions.h"
+#include "base/third_party/icu/icu_utf.h"
+
+namespace base {
+
+namespace {
+
+// Format string for printing a \uXXXX escape sequence.
+const char kU16EscapeFormat[] = "\\u%04X";
+
+// The code point to output for an invalid input code unit.
+const uint32_t kReplacementCodePoint = 0xFFFD;
+
+// Used below in EscapeSpecialCodePoint().
+static_assert('<' == 0x3C, "less than sign must be 0x3c");
+
+// Try to escape the |code_point| if it is a known special character. If
+// successful, returns true and appends the escape sequence to |dest|. This
+// isn't required by the spec, but it's more readable by humans.
+bool EscapeSpecialCodePoint(uint32_t code_point, std::string* dest) {
+  // WARNING: if you add a new case here, you need to update the reader as well.
+  // Note: \v is in the reader, but not here since the JSON spec doesn't
+  // allow it.
+  switch (code_point) {
+    case '\b':
+      dest->append("\\b");
+      break;
+    case '\f':
+      dest->append("\\f");
+      break;
+    case '\n':
+      dest->append("\\n");
+      break;
+    case '\r':
+      dest->append("\\r");
+      break;
+    case '\t':
+      dest->append("\\t");
+      break;
+    case '\\':
+      dest->append("\\\\");
+      break;
+    case '"':
+      dest->append("\\\"");
+      break;
+    // Escape < to prevent script execution; escaping > is not necessary and
+    // not doing so save a few bytes.
+    case '<':
+      dest->append("\\u003C");
+      break;
+    // Escape the "Line Separator" and "Paragraph Separator" characters, since
+    // they should be treated like a new line \r or \n.
+    case 0x2028:
+      dest->append("\\u2028");
+      break;
+    case 0x2029:
+      dest->append("\\u2029");
+      break;
+    default:
+      return false;
+  }
+  return true;
+}
+
+template <typename S>
+bool EscapeJSONStringImpl(const S& str, bool put_in_quotes, std::string* dest) {
+  bool did_replacement = false;
+
+  if (put_in_quotes)
+    dest->push_back('"');
+
+  // Casting is necessary because ICU uses int32_t. Try and do so safely.
+  CHECK_LE(str.length(),
+           static_cast<size_t>(std::numeric_limits<int32_t>::max()));
+  const int32_t length = static_cast<int32_t>(str.length());
+
+  for (int32_t i = 0; i < length; ++i) {
+    uint32_t code_point;
+    if (!ReadUnicodeCharacter(str.data(), length, &i, &code_point) ||
+        code_point == static_cast<decltype(code_point)>(CBU_SENTINEL) ||
+        !IsValidCharacter(code_point)) {
+      code_point = kReplacementCodePoint;
+      did_replacement = true;
+    }
+
+    if (EscapeSpecialCodePoint(code_point, dest))
+      continue;
+
+    // Escape non-printing characters.
+    if (code_point < 32)
+      base::StringAppendF(dest, kU16EscapeFormat, code_point);
+    else
+      WriteUnicodeCharacter(code_point, dest);
+  }
+
+  if (put_in_quotes)
+    dest->push_back('"');
+
+  return !did_replacement;
+}
+
+}  // namespace
+
+bool EscapeJSONString(StringPiece str, bool put_in_quotes, std::string* dest) {
+  return EscapeJSONStringImpl(str, put_in_quotes, dest);
+}
+
+bool EscapeJSONString(StringPiece16 str,
+                      bool put_in_quotes,
+                      std::string* dest) {
+  return EscapeJSONStringImpl(str, put_in_quotes, dest);
+}
+
+std::string GetQuotedJSONString(StringPiece str) {
+  std::string dest;
+  bool ok = EscapeJSONStringImpl(str, true, &dest);
+  DCHECK(ok);
+  return dest;
+}
+
+std::string GetQuotedJSONString(StringPiece16 str) {
+  std::string dest;
+  bool ok = EscapeJSONStringImpl(str, true, &dest);
+  DCHECK(ok);
+  return dest;
+}
+
+std::string EscapeBytesAsInvalidJSONString(StringPiece str,
+                                           bool put_in_quotes) {
+  std::string dest;
+
+  if (put_in_quotes)
+    dest.push_back('"');
+
+  for (StringPiece::const_iterator it = str.begin(); it != str.end(); ++it) {
+    unsigned char c = *it;
+    if (EscapeSpecialCodePoint(c, &dest))
+      continue;
+
+    if (c < 32 || c > 126)
+      base::StringAppendF(&dest, kU16EscapeFormat, c);
+    else
+      dest.push_back(*it);
+  }
+
+  if (put_in_quotes)
+    dest.push_back('"');
+
+  return dest;
+}
+
+}  // namespace base
diff --git a/base/json/string_escape.h b/base/json/string_escape.h
new file mode 100644
index 0000000..f75f475
--- /dev/null
+++ b/base/json/string_escape.h
@@ -0,0 +1,61 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file defines utility functions for escaping strings suitable for JSON.
+
+#ifndef BASE_JSON_STRING_ESCAPE_H_
+#define BASE_JSON_STRING_ESCAPE_H_
+
+#include <string>
+
+#include "base/base_export.h"
+#include "base/strings/string_piece.h"
+
+namespace base {
+
+// Appends to |dest| an escaped version of |str|. Valid UTF-8 code units and
+// characters will pass through from the input to the output. Invalid code
+// units and characters will be replaced with the U+FFFD replacement character.
+// This function returns true if no replacement was necessary and false if
+// there was a lossy replacement. On return, |dest| will contain a valid UTF-8
+// JSON string.
+//
+// Non-printing control characters will be escaped as \uXXXX sequences for
+// readability.
+//
+// If |put_in_quotes| is true, then a leading and trailing double-quote mark
+// will be appended to |dest| as well.
+BASE_EXPORT bool EscapeJSONString(StringPiece str,
+                                  bool put_in_quotes,
+                                  std::string* dest);
+
+// Performs a similar function to the UTF-8 StringPiece version above,
+// converting UTF-16 code units to UTF-8 code units and escaping non-printing
+// control characters. On return, |dest| will contain a valid UTF-8 JSON string.
+BASE_EXPORT bool EscapeJSONString(StringPiece16 str,
+                                  bool put_in_quotes,
+                                  std::string* dest);
+
+// Helper functions that wrap the above two functions but return the value
+// instead of appending. |put_in_quotes| is always true.
+BASE_EXPORT std::string GetQuotedJSONString(StringPiece str);
+BASE_EXPORT std::string GetQuotedJSONString(StringPiece16 str);
+
+// Given an arbitrary byte string |str|, this will escape all non-ASCII bytes
+// as \uXXXX escape sequences. This function is *NOT* meant to be used with
+// Unicode strings and does not validate |str| as one.
+//
+// CAVEAT CALLER: The output of this function may not be valid JSON, since
+// JSON requires escape sequences to be valid UTF-16 code units. This output
+// will be mangled if passed to to the base::JSONReader, since the reader will
+// interpret it as UTF-16 and convert it to UTF-8.
+//
+// The output of this function takes the *appearance* of JSON but is not in
+// fact valid according to RFC 4627.
+BASE_EXPORT std::string EscapeBytesAsInvalidJSONString(StringPiece str,
+                                                       bool put_in_quotes);
+
+}  // namespace base
+
+#endif  // BASE_JSON_STRING_ESCAPE_H_
diff --git a/base/json/string_escape_fuzzer.cc b/base/json/string_escape_fuzzer.cc
new file mode 100644
index 0000000..e44bd4f
--- /dev/null
+++ b/base/json/string_escape_fuzzer.cc
@@ -0,0 +1,37 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/json/string_escape.h"
+
+#include <memory>
+
+std::string escaped_string;
+
+// Entry point for LibFuzzer.
+extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) {
+  if (size < 2)
+    return 0;
+
+  const bool put_in_quotes = data[size - 1];
+
+  // Create a copy of input buffer, as otherwise we don't catch
+  // overflow that touches the last byte (which is used in put_in_quotes).
+  size_t actual_size_char8 = size - 1;
+  std::unique_ptr<char[]> input(new char[actual_size_char8]);
+  memcpy(input.get(), data, actual_size_char8);
+
+  base::StringPiece input_string(input.get(), actual_size_char8);
+  base::EscapeJSONString(input_string, put_in_quotes, &escaped_string);
+
+  // Test for wide-strings if available size is even.
+  if (actual_size_char8 & 1)
+    return 0;
+
+  size_t actual_size_char16 = actual_size_char8 / 2;
+  base::StringPiece16 input_string16(
+      reinterpret_cast<base::char16*>(input.get()), actual_size_char16);
+  base::EscapeJSONString(input_string16, put_in_quotes, &escaped_string);
+
+  return 0;
+}
diff --git a/base/json/string_escape_unittest.cc b/base/json/string_escape_unittest.cc
new file mode 100644
index 0000000..1e962c6
--- /dev/null
+++ b/base/json/string_escape_unittest.cc
@@ -0,0 +1,189 @@
+// Copyright (c) 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/json/string_escape.h"
+
+#include <stddef.h>
+
+#include "base/macros.h"
+#include "base/strings/string_util.h"
+#include "base/strings/utf_string_conversions.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+
+TEST(JSONStringEscapeTest, EscapeUTF8) {
+  const struct {
+    const char* to_escape;
+    const char* escaped;
+  } cases[] = {
+      {"\b\001aZ\"\\wee", "\\b\\u0001aZ\\\"\\\\wee"},
+      {"a\b\f\n\r\t\v\1\\.\"z", "a\\b\\f\\n\\r\\t\\u000B\\u0001\\\\.\\\"z"},
+      {"b\x0f\x7f\xf0\xff!",  // \xf0\xff is not a valid UTF-8 unit.
+       "b\\u000F\x7F\xEF\xBF\xBD\xEF\xBF\xBD!"},
+      {"c<>d", "c\\u003C>d"},
+      {"Hello\xe2\x80\xa8world", "Hello\\u2028world"},
+      {"\xe2\x80\xa9purple", "\\u2029purple"},
+      {"\xF3\xBF\xBF\xBF", "\xEF\xBF\xBD"},
+  };
+
+  for (size_t i = 0; i < arraysize(cases); ++i) {
+    const char* in_ptr = cases[i].to_escape;
+    std::string in_str = in_ptr;
+
+    std::string out;
+    EscapeJSONString(in_ptr, false, &out);
+    EXPECT_EQ(std::string(cases[i].escaped), out);
+    EXPECT_TRUE(IsStringUTF8(out));
+
+    out.erase();
+    bool convert_ok = EscapeJSONString(in_str, false, &out);
+    EXPECT_EQ(std::string(cases[i].escaped), out);
+    EXPECT_TRUE(IsStringUTF8(out));
+
+    if (convert_ok) {
+      std::string fooout = GetQuotedJSONString(in_str);
+      EXPECT_EQ("\"" + std::string(cases[i].escaped) + "\"", fooout);
+      EXPECT_TRUE(IsStringUTF8(out));
+    }
+  }
+
+  std::string in = cases[0].to_escape;
+  std::string out;
+  EscapeJSONString(in, false, &out);
+  EXPECT_TRUE(IsStringUTF8(out));
+
+  // test quoting
+  std::string out_quoted;
+  EscapeJSONString(in, true, &out_quoted);
+  EXPECT_EQ(out.length() + 2, out_quoted.length());
+  EXPECT_EQ(out_quoted.find(out), 1U);
+  EXPECT_TRUE(IsStringUTF8(out_quoted));
+
+  // now try with a NULL in the string
+  std::string null_prepend = "test";
+  null_prepend.push_back(0);
+  in = null_prepend + in;
+  std::string expected = "test\\u0000";
+  expected += cases[0].escaped;
+  out.clear();
+  EscapeJSONString(in, false, &out);
+  EXPECT_EQ(expected, out);
+  EXPECT_TRUE(IsStringUTF8(out));
+}
+
+TEST(JSONStringEscapeTest, EscapeUTF16) {
+  const struct {
+    const wchar_t* to_escape;
+    const char* escaped;
+  } cases[] = {
+    {L"b\uffb1\u00ff", "b\xEF\xBE\xB1\xC3\xBF"},
+    {L"\b\001aZ\"\\wee", "\\b\\u0001aZ\\\"\\\\wee"},
+    {L"a\b\f\n\r\t\v\1\\.\"z",
+        "a\\b\\f\\n\\r\\t\\u000B\\u0001\\\\.\\\"z"},
+    {L"b\x0f\x7f\xf0\xff!", "b\\u000F\x7F\xC3\xB0\xC3\xBF!"},
+    {L"c<>d", "c\\u003C>d"},
+    {L"Hello\u2028world", "Hello\\u2028world"},
+    {L"\u2029purple", "\\u2029purple"},
+  };
+
+  for (size_t i = 0; i < arraysize(cases); ++i) {
+    string16 in = WideToUTF16(cases[i].to_escape);
+
+    std::string out;
+    EscapeJSONString(in, false, &out);
+    EXPECT_EQ(std::string(cases[i].escaped), out);
+    EXPECT_TRUE(IsStringUTF8(out));
+
+    out = GetQuotedJSONString(in);
+    EXPECT_EQ("\"" + std::string(cases[i].escaped) + "\"", out);
+    EXPECT_TRUE(IsStringUTF8(out));
+  }
+
+  string16 in = WideToUTF16(cases[0].to_escape);
+  std::string out;
+  EscapeJSONString(in, false, &out);
+  EXPECT_TRUE(IsStringUTF8(out));
+
+  // test quoting
+  std::string out_quoted;
+  EscapeJSONString(in, true, &out_quoted);
+  EXPECT_EQ(out.length() + 2, out_quoted.length());
+  EXPECT_EQ(out_quoted.find(out), 1U);
+  EXPECT_TRUE(IsStringUTF8(out));
+
+  // now try with a NULL in the string
+  string16 null_prepend = WideToUTF16(L"test");
+  null_prepend.push_back(0);
+  in = null_prepend + in;
+  std::string expected = "test\\u0000";
+  expected += cases[0].escaped;
+  out.clear();
+  EscapeJSONString(in, false, &out);
+  EXPECT_EQ(expected, out);
+  EXPECT_TRUE(IsStringUTF8(out));
+}
+
+TEST(JSONStringEscapeTest, EscapeUTF16OutsideBMP) {
+  {
+    // {a, U+10300, !}, SMP.
+    string16 test;
+    test.push_back('a');
+    test.push_back(0xD800);
+    test.push_back(0xDF00);
+    test.push_back('!');
+    std::string actual;
+    EXPECT_TRUE(EscapeJSONString(test, false, &actual));
+    EXPECT_EQ("a\xF0\x90\x8C\x80!", actual);
+  }
+  {
+    // {U+20021, U+2002B}, SIP.
+    string16 test;
+    test.push_back(0xD840);
+    test.push_back(0xDC21);
+    test.push_back(0xD840);
+    test.push_back(0xDC2B);
+    std::string actual;
+    EXPECT_TRUE(EscapeJSONString(test, false, &actual));
+    EXPECT_EQ("\xF0\xA0\x80\xA1\xF0\xA0\x80\xAB", actual);
+  }
+  {
+    // {?, U+D800, @}, lone surrogate.
+    string16 test;
+    test.push_back('?');
+    test.push_back(0xD800);
+    test.push_back('@');
+    std::string actual;
+    EXPECT_FALSE(EscapeJSONString(test, false, &actual));
+    EXPECT_EQ("?\xEF\xBF\xBD@", actual);
+  }
+}
+
+TEST(JSONStringEscapeTest, EscapeBytes) {
+  const struct {
+    const char* to_escape;
+    const char* escaped;
+  } cases[] = {
+    {"b\x0f\x7f\xf0\xff!", "b\\u000F\\u007F\\u00F0\\u00FF!"},
+    {"\xe5\xc4\x4f\x05\xb6\xfd", "\\u00E5\\u00C4O\\u0005\\u00B6\\u00FD"},
+  };
+
+  for (size_t i = 0; i < arraysize(cases); ++i) {
+    std::string in = std::string(cases[i].to_escape);
+    EXPECT_FALSE(IsStringUTF8(in));
+
+    EXPECT_EQ(std::string(cases[i].escaped),
+        EscapeBytesAsInvalidJSONString(in, false));
+    EXPECT_EQ("\"" + std::string(cases[i].escaped) + "\"",
+        EscapeBytesAsInvalidJSONString(in, true));
+  }
+
+  const char kEmbedNull[] = { '\xab', '\x39', '\0', '\x9f', '\xab' };
+  std::string in(kEmbedNull, arraysize(kEmbedNull));
+  EXPECT_FALSE(IsStringUTF8(in));
+  EXPECT_EQ(std::string("\\u00AB9\\u0000\\u009F\\u00AB"),
+            EscapeBytesAsInvalidJSONString(in, false));
+}
+
+}  // namespace base
diff --git a/base/lazy_instance.h b/base/lazy_instance.h
new file mode 100644
index 0000000..36d3158
--- /dev/null
+++ b/base/lazy_instance.h
@@ -0,0 +1,210 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! DEPRECATED !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+// Please don't introduce new instances of LazyInstance<T>. Use a function-local
+// static of type base::NoDestructor<T> instead:
+//
+// Factory& Factory::GetInstance() {
+//   static base::NoDestructor<Factory> instance;
+//   return *instance;
+// }
+// !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+//
+// The LazyInstance<Type, Traits> class manages a single instance of Type,
+// which will be lazily created on the first time it's accessed.  This class is
+// useful for places you would normally use a function-level static, but you
+// need to have guaranteed thread-safety.  The Type constructor will only ever
+// be called once, even if two threads are racing to create the object.  Get()
+// and Pointer() will always return the same, completely initialized instance.
+// When the instance is constructed it is registered with AtExitManager.  The
+// destructor will be called on program exit.
+//
+// LazyInstance is completely thread safe, assuming that you create it safely.
+// The class was designed to be POD initialized, so it shouldn't require a
+// static constructor.  It really only makes sense to declare a LazyInstance as
+// a global variable using the LAZY_INSTANCE_INITIALIZER initializer.
+//
+// LazyInstance is similar to Singleton, except it does not have the singleton
+// property.  You can have multiple LazyInstance's of the same type, and each
+// will manage a unique instance.  It also preallocates the space for Type, as
+// to avoid allocating the Type instance on the heap.  This may help with the
+// performance of creating the instance, and reducing heap fragmentation.  This
+// requires that Type be a complete type so we can determine the size.
+//
+// Example usage:
+//   static LazyInstance<MyClass>::Leaky inst = LAZY_INSTANCE_INITIALIZER;
+//   void SomeMethod() {
+//     inst.Get().SomeMethod();  // MyClass::SomeMethod()
+//
+//     MyClass* ptr = inst.Pointer();
+//     ptr->DoDoDo();  // MyClass::DoDoDo
+//   }
+
+#ifndef BASE_LAZY_INSTANCE_H_
+#define BASE_LAZY_INSTANCE_H_
+
+#include <new>  // For placement new.
+
+#include "base/atomicops.h"
+#include "base/debug/leak_annotations.h"
+#include "base/lazy_instance_helpers.h"
+#include "base/logging.h"
+#include "base/threading/thread_restrictions.h"
+
+// LazyInstance uses its own struct initializer-list style static
+// initialization, which does not require a constructor.
+#define LAZY_INSTANCE_INITIALIZER {0}
+
+namespace base {
+
+template <typename Type>
+struct LazyInstanceTraitsBase {
+  static Type* New(void* instance) {
+    DCHECK_EQ(reinterpret_cast<uintptr_t>(instance) & (alignof(Type) - 1), 0u);
+    // Use placement new to initialize our instance in our preallocated space.
+    // The parenthesis is very important here to force POD type initialization.
+    return new (instance) Type();
+  }
+
+  static void CallDestructor(Type* instance) {
+    // Explicitly call the destructor.
+    instance->~Type();
+  }
+};
+
+// We pull out some of the functionality into non-templated functions, so we
+// can implement the more complicated pieces out of line in the .cc file.
+namespace internal {
+
+// This traits class causes destruction the contained Type at process exit via
+// AtExitManager. This is probably generally not what you want. Instead, prefer
+// Leaky below.
+template <typename Type>
+struct DestructorAtExitLazyInstanceTraits {
+  static const bool kRegisterOnExit = true;
+#if DCHECK_IS_ON()
+  static const bool kAllowedToAccessOnNonjoinableThread = false;
+#endif
+
+  static Type* New(void* instance) {
+    return LazyInstanceTraitsBase<Type>::New(instance);
+  }
+
+  static void Delete(Type* instance) {
+    LazyInstanceTraitsBase<Type>::CallDestructor(instance);
+  }
+};
+
+// Use LazyInstance<T>::Leaky for a less-verbose call-site typedef; e.g.:
+// base::LazyInstance<T>::Leaky my_leaky_lazy_instance;
+// instead of:
+// base::LazyInstance<T, base::internal::LeakyLazyInstanceTraits<T> >
+// my_leaky_lazy_instance;
+// (especially when T is MyLongTypeNameImplClientHolderFactory).
+// Only use this internal::-qualified verbose form to extend this traits class
+// (depending on its implementation details).
+template <typename Type>
+struct LeakyLazyInstanceTraits {
+  static const bool kRegisterOnExit = false;
+#if DCHECK_IS_ON()
+  static const bool kAllowedToAccessOnNonjoinableThread = true;
+#endif
+
+  static Type* New(void* instance) {
+    ANNOTATE_SCOPED_MEMORY_LEAK;
+    return LazyInstanceTraitsBase<Type>::New(instance);
+  }
+  static void Delete(Type* instance) {
+  }
+};
+
+template <typename Type>
+struct ErrorMustSelectLazyOrDestructorAtExitForLazyInstance {};
+
+}  // namespace internal
+
+template <
+    typename Type,
+    typename Traits =
+        internal::ErrorMustSelectLazyOrDestructorAtExitForLazyInstance<Type>>
+class LazyInstance {
+ public:
+  // Do not define a destructor, as doing so makes LazyInstance a
+  // non-POD-struct. We don't want that because then a static initializer will
+  // be created to register the (empty) destructor with atexit() under MSVC, for
+  // example. We handle destruction of the contained Type class explicitly via
+  // the OnExit member function, where needed.
+  // ~LazyInstance() {}
+
+  // Convenience typedef to avoid having to repeat Type for leaky lazy
+  // instances.
+  typedef LazyInstance<Type, internal::LeakyLazyInstanceTraits<Type>> Leaky;
+  typedef LazyInstance<Type, internal::DestructorAtExitLazyInstanceTraits<Type>>
+      DestructorAtExit;
+
+  Type& Get() {
+    return *Pointer();
+  }
+
+  Type* Pointer() {
+#if DCHECK_IS_ON()
+    if (!Traits::kAllowedToAccessOnNonjoinableThread)
+      ThreadRestrictions::AssertSingletonAllowed();
+#endif
+
+    return subtle::GetOrCreateLazyPointer(
+        &private_instance_, &Traits::New, private_buf_,
+        Traits::kRegisterOnExit ? OnExit : nullptr, this);
+  }
+
+  // Returns true if the lazy instance has been created.  Unlike Get() and
+  // Pointer(), calling IsCreated() will not instantiate the object of Type.
+  bool IsCreated() {
+    // Return true (i.e. "created") if |private_instance_| is either being
+    // created right now (i.e. |private_instance_| has value of
+    // internal::kLazyInstanceStateCreating) or was already created (i.e.
+    // |private_instance_| has any other non-zero value).
+    return 0 != subtle::NoBarrier_Load(&private_instance_);
+  }
+
+  // MSVC gives a warning that the alignment expands the size of the
+  // LazyInstance struct to make the size a multiple of the alignment. This
+  // is expected in this case.
+#if defined(OS_WIN)
+#pragma warning(push)
+#pragma warning(disable: 4324)
+#endif
+
+  // Effectively private: member data is only public to allow the linker to
+  // statically initialize it and to maintain a POD class. DO NOT USE FROM
+  // OUTSIDE THIS CLASS.
+  subtle::AtomicWord private_instance_;
+
+  // Preallocated space for the Type instance.
+  alignas(Type) char private_buf_[sizeof(Type)];
+
+#if defined(OS_WIN)
+#pragma warning(pop)
+#endif
+
+ private:
+  Type* instance() {
+    return reinterpret_cast<Type*>(subtle::NoBarrier_Load(&private_instance_));
+  }
+
+  // Adapter function for use with AtExit.  This should be called single
+  // threaded, so don't synchronize across threads.
+  // Calling OnExit while the instance is in use by other threads is a mistake.
+  static void OnExit(void* lazy_instance) {
+    LazyInstance<Type, Traits>* me =
+        reinterpret_cast<LazyInstance<Type, Traits>*>(lazy_instance);
+    Traits::Delete(me->instance());
+    subtle::NoBarrier_Store(&me->private_instance_, 0);
+  }
+};
+
+}  // namespace base
+
+#endif  // BASE_LAZY_INSTANCE_H_
diff --git a/base/lazy_instance_helpers.cc b/base/lazy_instance_helpers.cc
new file mode 100644
index 0000000..7b9e0de
--- /dev/null
+++ b/base/lazy_instance_helpers.cc
@@ -0,0 +1,64 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/lazy_instance_helpers.h"
+
+#include "base/at_exit.h"
+#include "base/atomicops.h"
+#include "base/threading/platform_thread.h"
+
+namespace base {
+namespace internal {
+
+bool NeedsLazyInstance(subtle::AtomicWord* state) {
+  // Try to create the instance, if we're the first, will go from 0 to
+  // kLazyInstanceStateCreating, otherwise we've already been beaten here.
+  // The memory access has no memory ordering as state 0 and
+  // kLazyInstanceStateCreating have no associated data (memory barriers are
+  // all about ordering of memory accesses to *associated* data).
+  if (subtle::NoBarrier_CompareAndSwap(state, 0, kLazyInstanceStateCreating) ==
+      0) {
+    // Caller must create instance
+    return true;
+  }
+
+  // It's either in the process of being created, or already created. Spin.
+  // The load has acquire memory ordering as a thread which sees
+  // state_ == STATE_CREATED needs to acquire visibility over
+  // the associated data (buf_). Pairing Release_Store is in
+  // CompleteLazyInstance().
+  if (subtle::Acquire_Load(state) == kLazyInstanceStateCreating) {
+    const base::TimeTicks start = base::TimeTicks::Now();
+    do {
+      const base::TimeDelta elapsed = base::TimeTicks::Now() - start;
+      // Spin with YieldCurrentThread for at most one ms - this ensures maximum
+      // responsiveness. After that spin with Sleep(1ms) so that we don't burn
+      // excessive CPU time - this also avoids infinite loops due to priority
+      // inversions (https://crbug.com/797129).
+      if (elapsed < TimeDelta::FromMilliseconds(1))
+        PlatformThread::YieldCurrentThread();
+      else
+        PlatformThread::Sleep(TimeDelta::FromMilliseconds(1));
+    } while (subtle::Acquire_Load(state) == kLazyInstanceStateCreating);
+  }
+  // Someone else created the instance.
+  return false;
+}
+
+void CompleteLazyInstance(subtle::AtomicWord* state,
+                          subtle::AtomicWord new_instance,
+                          void (*destructor)(void*),
+                          void* destructor_arg) {
+  // Instance is created, go from CREATING to CREATED (or reset it if
+  // |new_instance| is null). Releases visibility over |private_buf_| to
+  // readers. Pairing Acquire_Load is in NeedsLazyInstance().
+  subtle::Release_Store(state, new_instance);
+
+  // Make sure that the lazily instantiated object will get destroyed at exit.
+  if (new_instance && destructor)
+    AtExitManager::RegisterCallback(destructor, destructor_arg);
+}
+
+}  // namespace internal
+}  // namespace base
diff --git a/base/lazy_instance_helpers.h b/base/lazy_instance_helpers.h
new file mode 100644
index 0000000..5a43d8b
--- /dev/null
+++ b/base/lazy_instance_helpers.h
@@ -0,0 +1,101 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_LAZY_INSTANCE_INTERNAL_H_
+#define BASE_LAZY_INSTANCE_INTERNAL_H_
+
+#include "base/atomicops.h"
+#include "base/base_export.h"
+#include "base/logging.h"
+
+// Helper methods used by LazyInstance and a few other base APIs for thread-safe
+// lazy construction.
+
+namespace base {
+namespace internal {
+
+// Our AtomicWord doubles as a spinlock, where a value of
+// kLazyInstanceStateCreating means the spinlock is being held for creation.
+constexpr subtle::AtomicWord kLazyInstanceStateCreating = 1;
+
+// Helper for GetOrCreateLazyPointer(). Checks if instance needs to be created.
+// If so returns true otherwise if another thread has beat us, waits for
+// instance to be created and returns false.
+BASE_EXPORT bool NeedsLazyInstance(subtle::AtomicWord* state);
+
+// Helper for GetOrCreateLazyPointer(). After creating an instance, this is
+// called to register the dtor to be called at program exit and to update the
+// atomic state to hold the |new_instance|
+BASE_EXPORT void CompleteLazyInstance(subtle::AtomicWord* state,
+                                      subtle::AtomicWord new_instance,
+                                      void (*destructor)(void*),
+                                      void* destructor_arg);
+
+}  // namespace internal
+
+namespace subtle {
+
+// If |state| is uninitialized (zero), constructs a value using
+// |creator_func(creator_arg)|, stores it into |state| and registers
+// |destructor(destructor_arg)| to be called when the current AtExitManager goes
+// out of scope. Then, returns the value stored in |state|. It is safe to have
+// concurrent calls to this function with the same |state|. |creator_func| may
+// return nullptr if it doesn't want to create an instance anymore (e.g. on
+// shutdown), it is from then on required to return nullptr to all callers (ref.
+// StaticMemorySingletonTraits). In that case, callers need to synchronize
+// before |creator_func| may return a non-null instance again (ref.
+// StaticMemorySingletonTraits::ResurectForTesting()).
+// Implementation note on |creator_func/creator_arg|. It makes for ugly adapters
+// but it avoids redundant template instantiations (e.g. saves 27KB in
+// chrome.dll) because linker is able to fold these for multiple Types but
+// couldn't with the more advanced CreatorFunc template type which in turn
+// improves code locality (and application startup) -- ref.
+// https://chromium-review.googlesource.com/c/chromium/src/+/530984/5/base/lazy_instance.h#140,
+// worsened by https://chromium-review.googlesource.com/c/chromium/src/+/868013
+// and caught then as https://crbug.com/804034.
+template <typename Type>
+Type* GetOrCreateLazyPointer(subtle::AtomicWord* state,
+                             Type* (*creator_func)(void*),
+                             void* creator_arg,
+                             void (*destructor)(void*),
+                             void* destructor_arg) {
+  DCHECK(state);
+  DCHECK(creator_func);
+
+  // If any bit in the created mask is true, the instance has already been
+  // fully constructed.
+  constexpr subtle::AtomicWord kLazyInstanceCreatedMask =
+      ~internal::kLazyInstanceStateCreating;
+
+  // We will hopefully have fast access when the instance is already created.
+  // Since a thread sees |state| == 0 or kLazyInstanceStateCreating at most
+  // once, the load is taken out of NeedsLazyInstance() as a fast-path. The load
+  // has acquire memory ordering as a thread which sees |state| > creating needs
+  // to acquire visibility over the associated data. Pairing Release_Store is in
+  // CompleteLazyInstance().
+  subtle::AtomicWord instance = subtle::Acquire_Load(state);
+  if (!(instance & kLazyInstanceCreatedMask)) {
+    if (internal::NeedsLazyInstance(state)) {
+      // This thread won the race and is now responsible for creating the
+      // instance and storing it back into |state|.
+      instance =
+          reinterpret_cast<subtle::AtomicWord>((*creator_func)(creator_arg));
+      internal::CompleteLazyInstance(state, instance, destructor,
+                                     destructor_arg);
+    } else {
+      // This thread lost the race but now has visibility over the constructed
+      // instance (NeedsLazyInstance() doesn't return until the constructing
+      // thread releases the instance via CompleteLazyInstance()).
+      instance = subtle::Acquire_Load(state);
+      DCHECK(instance & kLazyInstanceCreatedMask);
+    }
+  }
+  return reinterpret_cast<Type*>(instance);
+}
+
+}  // namespace subtle
+
+}  // namespace base
+
+#endif  // BASE_LAZY_INSTANCE_INTERNAL_H_
diff --git a/base/lazy_instance_unittest.cc b/base/lazy_instance_unittest.cc
new file mode 100644
index 0000000..a5f024c
--- /dev/null
+++ b/base/lazy_instance_unittest.cc
@@ -0,0 +1,322 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <stddef.h>
+
+#include <memory>
+#include <vector>
+
+#include "base/at_exit.h"
+#include "base/atomic_sequence_num.h"
+#include "base/atomicops.h"
+#include "base/barrier_closure.h"
+#include "base/bind.h"
+#include "base/lazy_instance.h"
+#include "base/sys_info.h"
+#include "base/threading/platform_thread.h"
+#include "base/threading/simple_thread.h"
+#include "base/time/time.h"
+#include "build/build_config.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace {
+
+base::AtomicSequenceNumber constructed_seq_;
+base::AtomicSequenceNumber destructed_seq_;
+
+class ConstructAndDestructLogger {
+ public:
+  ConstructAndDestructLogger() {
+    constructed_seq_.GetNext();
+  }
+  ~ConstructAndDestructLogger() {
+    destructed_seq_.GetNext();
+  }
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(ConstructAndDestructLogger);
+};
+
+class SlowConstructor {
+ public:
+  SlowConstructor() : some_int_(0) {
+    // Sleep for 1 second to try to cause a race.
+    base::PlatformThread::Sleep(base::TimeDelta::FromSeconds(1));
+    ++constructed;
+    some_int_ = 12;
+  }
+  int some_int() const { return some_int_; }
+
+  static int constructed;
+ private:
+  int some_int_;
+
+  DISALLOW_COPY_AND_ASSIGN(SlowConstructor);
+};
+
+// static
+int SlowConstructor::constructed = 0;
+
+class SlowDelegate : public base::DelegateSimpleThread::Delegate {
+ public:
+  explicit SlowDelegate(
+      base::LazyInstance<SlowConstructor>::DestructorAtExit* lazy)
+      : lazy_(lazy) {}
+
+  void Run() override {
+    EXPECT_EQ(12, lazy_->Get().some_int());
+    EXPECT_EQ(12, lazy_->Pointer()->some_int());
+  }
+
+ private:
+  base::LazyInstance<SlowConstructor>::DestructorAtExit* lazy_;
+
+  DISALLOW_COPY_AND_ASSIGN(SlowDelegate);
+};
+
+}  // namespace
+
+base::LazyInstance<ConstructAndDestructLogger>::DestructorAtExit lazy_logger =
+    LAZY_INSTANCE_INITIALIZER;
+
+TEST(LazyInstanceTest, Basic) {
+  {
+    base::ShadowingAtExitManager shadow;
+
+    EXPECT_FALSE(lazy_logger.IsCreated());
+    EXPECT_EQ(0, constructed_seq_.GetNext());
+    EXPECT_EQ(0, destructed_seq_.GetNext());
+
+    lazy_logger.Get();
+    EXPECT_TRUE(lazy_logger.IsCreated());
+    EXPECT_EQ(2, constructed_seq_.GetNext());
+    EXPECT_EQ(1, destructed_seq_.GetNext());
+
+    lazy_logger.Pointer();
+    EXPECT_TRUE(lazy_logger.IsCreated());
+    EXPECT_EQ(3, constructed_seq_.GetNext());
+    EXPECT_EQ(2, destructed_seq_.GetNext());
+  }
+  EXPECT_FALSE(lazy_logger.IsCreated());
+  EXPECT_EQ(4, constructed_seq_.GetNext());
+  EXPECT_EQ(4, destructed_seq_.GetNext());
+}
+
+base::LazyInstance<SlowConstructor>::DestructorAtExit lazy_slow =
+    LAZY_INSTANCE_INITIALIZER;
+
+TEST(LazyInstanceTest, ConstructorThreadSafety) {
+  {
+    base::ShadowingAtExitManager shadow;
+
+    SlowDelegate delegate(&lazy_slow);
+    EXPECT_EQ(0, SlowConstructor::constructed);
+
+    base::DelegateSimpleThreadPool pool("lazy_instance_cons", 5);
+    pool.AddWork(&delegate, 20);
+    EXPECT_EQ(0, SlowConstructor::constructed);
+
+    pool.Start();
+    pool.JoinAll();
+    EXPECT_EQ(1, SlowConstructor::constructed);
+  }
+}
+
+namespace {
+
+// DeleteLogger is an object which sets a flag when it's destroyed.
+// It accepts a bool* and sets the bool to true when the dtor runs.
+class DeleteLogger {
+ public:
+  DeleteLogger() : deleted_(nullptr) {}
+  ~DeleteLogger() { *deleted_ = true; }
+
+  void SetDeletedPtr(bool* deleted) {
+    deleted_ = deleted;
+  }
+
+ private:
+  bool* deleted_;
+};
+
+}  // anonymous namespace
+
+TEST(LazyInstanceTest, LeakyLazyInstance) {
+  // Check that using a plain LazyInstance causes the dtor to run
+  // when the AtExitManager finishes.
+  bool deleted1 = false;
+  {
+    base::ShadowingAtExitManager shadow;
+    static base::LazyInstance<DeleteLogger>::DestructorAtExit test =
+        LAZY_INSTANCE_INITIALIZER;
+    test.Get().SetDeletedPtr(&deleted1);
+  }
+  EXPECT_TRUE(deleted1);
+
+  // Check that using a *leaky* LazyInstance makes the dtor not run
+  // when the AtExitManager finishes.
+  bool deleted2 = false;
+  {
+    base::ShadowingAtExitManager shadow;
+    static base::LazyInstance<DeleteLogger>::Leaky
+        test = LAZY_INSTANCE_INITIALIZER;
+    test.Get().SetDeletedPtr(&deleted2);
+  }
+  EXPECT_FALSE(deleted2);
+}
+
+namespace {
+
+template <size_t alignment>
+class AlignedData {
+ public:
+  AlignedData() = default;
+  ~AlignedData() = default;
+  alignas(alignment) char data_[alignment];
+};
+
+}  // namespace
+
+#define EXPECT_ALIGNED(ptr, align) \
+    EXPECT_EQ(0u, reinterpret_cast<uintptr_t>(ptr) & (align - 1))
+
+TEST(LazyInstanceTest, Alignment) {
+  using base::LazyInstance;
+
+  // Create some static instances with increasing sizes and alignment
+  // requirements. By ordering this way, the linker will need to do some work to
+  // ensure proper alignment of the static data.
+  static LazyInstance<AlignedData<4>>::DestructorAtExit align4 =
+      LAZY_INSTANCE_INITIALIZER;
+  static LazyInstance<AlignedData<32>>::DestructorAtExit align32 =
+      LAZY_INSTANCE_INITIALIZER;
+  static LazyInstance<AlignedData<4096>>::DestructorAtExit align4096 =
+      LAZY_INSTANCE_INITIALIZER;
+
+  EXPECT_ALIGNED(align4.Pointer(), 4);
+  EXPECT_ALIGNED(align32.Pointer(), 32);
+  EXPECT_ALIGNED(align4096.Pointer(), 4096);
+}
+
+namespace {
+
+// A class whose constructor busy-loops until it is told to complete
+// construction.
+class BlockingConstructor {
+ public:
+  BlockingConstructor() {
+    EXPECT_FALSE(WasConstructorCalled());
+    base::subtle::NoBarrier_Store(&constructor_called_, 1);
+    EXPECT_TRUE(WasConstructorCalled());
+    while (!base::subtle::NoBarrier_Load(&complete_construction_))
+      base::PlatformThread::YieldCurrentThread();
+    done_construction_ = true;
+  }
+
+  ~BlockingConstructor() {
+    // Restore static state for the next test.
+    base::subtle::NoBarrier_Store(&constructor_called_, 0);
+    base::subtle::NoBarrier_Store(&complete_construction_, 0);
+  }
+
+  // Returns true if BlockingConstructor() was entered.
+  static bool WasConstructorCalled() {
+    return base::subtle::NoBarrier_Load(&constructor_called_);
+  }
+
+  // Instructs BlockingConstructor() that it may now unblock its construction.
+  static void CompleteConstructionNow() {
+    base::subtle::NoBarrier_Store(&complete_construction_, 1);
+  }
+
+  bool done_construction() { return done_construction_; }
+
+ private:
+  // Use Atomic32 instead of AtomicFlag for them to be trivially initialized.
+  static base::subtle::Atomic32 constructor_called_;
+  static base::subtle::Atomic32 complete_construction_;
+
+  bool done_construction_ = false;
+
+  DISALLOW_COPY_AND_ASSIGN(BlockingConstructor);
+};
+
+// A SimpleThread running at |thread_priority| which invokes |before_get|
+// (optional) and then invokes Get() on the LazyInstance it's assigned.
+class BlockingConstructorThread : public base::SimpleThread {
+ public:
+  BlockingConstructorThread(
+      base::ThreadPriority thread_priority,
+      base::LazyInstance<BlockingConstructor>::DestructorAtExit* lazy,
+      base::OnceClosure before_get)
+      : SimpleThread("BlockingConstructorThread", Options(thread_priority)),
+        lazy_(lazy),
+        before_get_(std::move(before_get)) {}
+
+  void Run() override {
+    if (before_get_)
+      std::move(before_get_).Run();
+    EXPECT_TRUE(lazy_->Get().done_construction());
+  }
+
+ private:
+  base::LazyInstance<BlockingConstructor>::DestructorAtExit* lazy_;
+  base::OnceClosure before_get_;
+
+  DISALLOW_COPY_AND_ASSIGN(BlockingConstructorThread);
+};
+
+// static
+base::subtle::Atomic32 BlockingConstructor::constructor_called_ = 0;
+// static
+base::subtle::Atomic32 BlockingConstructor::complete_construction_ = 0;
+
+base::LazyInstance<BlockingConstructor>::DestructorAtExit lazy_blocking =
+    LAZY_INSTANCE_INITIALIZER;
+
+}  // namespace
+
+// Tests that if the thread assigned to construct the LazyInstance runs at
+// background priority : the foreground threads will yield to it enough for it
+// to eventually complete construction.
+// This is a regression test for https://crbug.com/797129.
+TEST(LazyInstanceTest, PriorityInversionAtInitializationResolves) {
+  base::TimeTicks test_begin = base::TimeTicks::Now();
+
+  // Construct BlockingConstructor from a background thread.
+  BlockingConstructorThread background_getter(
+      base::ThreadPriority::BACKGROUND, &lazy_blocking, base::OnceClosure());
+  background_getter.Start();
+
+  while (!BlockingConstructor::WasConstructorCalled())
+    base::PlatformThread::Sleep(base::TimeDelta::FromMilliseconds(1));
+
+  // Spin 4 foreground thread per core contending to get the already under
+  // construction LazyInstance. When they are all running and poking at it :
+  // allow the background thread to complete its work.
+  const int kNumForegroundThreads = 4 * base::SysInfo::NumberOfProcessors();
+  std::vector<std::unique_ptr<base::SimpleThread>> foreground_threads;
+  base::RepeatingClosure foreground_thread_ready_callback =
+      base::BarrierClosure(
+          kNumForegroundThreads,
+          base::BindOnce(&BlockingConstructor::CompleteConstructionNow));
+  for (int i = 0; i < kNumForegroundThreads; ++i) {
+    foreground_threads.push_back(std::make_unique<BlockingConstructorThread>(
+        base::ThreadPriority::NORMAL, &lazy_blocking,
+        foreground_thread_ready_callback));
+    foreground_threads.back()->Start();
+  }
+
+  // This test will hang if the foreground threads become stuck in
+  // LazyInstance::Get() per the background thread never being scheduled to
+  // complete construction.
+  for (auto& foreground_thread : foreground_threads)
+    foreground_thread->Join();
+  background_getter.Join();
+
+  // Fail if this test takes more than 5 seconds (it takes 5-10 seconds on a
+  // Z840 without r527445 but is expected to be fast (~30ms) with the fix).
+  EXPECT_LT(base::TimeTicks::Now() - test_begin,
+            base::TimeDelta::FromSeconds(5));
+}
diff --git a/base/linux_util.cc b/base/linux_util.cc
new file mode 100644
index 0000000..ddf848e
--- /dev/null
+++ b/base/linux_util.cc
@@ -0,0 +1,226 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/linux_util.h"
+
+#include <dirent.h>
+#include <errno.h>
+#include <fcntl.h>
+#include <limits.h>
+#include <stdlib.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <unistd.h>
+
+#include <memory>
+#include <vector>
+
+#include "base/command_line.h"
+#include "base/files/file_util.h"
+#include "base/memory/singleton.h"
+#include "base/process/launch.h"
+#include "base/strings/string_number_conversions.h"
+#include "base/strings/string_split.h"
+#include "base/strings/string_tokenizer.h"
+#include "base/strings/string_util.h"
+#include "base/synchronization/lock.h"
+#include "build/build_config.h"
+
+namespace {
+
+// Not needed for OS_CHROMEOS.
+#if defined(OS_LINUX)
+enum LinuxDistroState {
+  STATE_DID_NOT_CHECK  = 0,
+  STATE_CHECK_STARTED  = 1,
+  STATE_CHECK_FINISHED = 2,
+};
+
+// Helper class for GetLinuxDistro().
+class LinuxDistroHelper {
+ public:
+  // Retrieves the Singleton.
+  static LinuxDistroHelper* GetInstance() {
+    return base::Singleton<LinuxDistroHelper>::get();
+  }
+
+  // The simple state machine goes from:
+  // STATE_DID_NOT_CHECK -> STATE_CHECK_STARTED -> STATE_CHECK_FINISHED.
+  LinuxDistroHelper() : state_(STATE_DID_NOT_CHECK) {}
+  ~LinuxDistroHelper() = default;
+
+  // Retrieve the current state, if we're in STATE_DID_NOT_CHECK,
+  // we automatically move to STATE_CHECK_STARTED so nobody else will
+  // do the check.
+  LinuxDistroState State() {
+    base::AutoLock scoped_lock(lock_);
+    if (STATE_DID_NOT_CHECK == state_) {
+      state_ = STATE_CHECK_STARTED;
+      return STATE_DID_NOT_CHECK;
+    }
+    return state_;
+  }
+
+  // Indicate the check finished, move to STATE_CHECK_FINISHED.
+  void CheckFinished() {
+    base::AutoLock scoped_lock(lock_);
+    DCHECK_EQ(STATE_CHECK_STARTED, state_);
+    state_ = STATE_CHECK_FINISHED;
+  }
+
+ private:
+  base::Lock lock_;
+  LinuxDistroState state_;
+};
+#endif  // if defined(OS_LINUX)
+
+bool GetTasksForProcess(pid_t pid, std::vector<pid_t>* tids) {
+  char buf[256];
+  snprintf(buf, sizeof(buf), "/proc/%d/task", pid);
+
+  DIR* task = opendir(buf);
+  if (!task) {
+    DLOG(WARNING) << "Cannot open " << buf;
+    return false;
+  }
+
+  struct dirent* dent;
+  while ((dent = readdir(task))) {
+    char* endptr;
+    const unsigned long int tid_ul = strtoul(dent->d_name, &endptr, 10);
+    if (tid_ul == ULONG_MAX || *endptr)
+      continue;
+    tids->push_back(tid_ul);
+  }
+  closedir(task);
+  return true;
+}
+
+}  // namespace
+
+namespace base {
+
+// Account for the terminating null character.
+static const int kDistroSize = 128 + 1;
+
+// We use this static string to hold the Linux distro info. If we
+// crash, the crash handler code will send this in the crash dump.
+char g_linux_distro[kDistroSize] =
+#if defined(OS_CHROMEOS)
+    "CrOS";
+#elif defined(OS_ANDROID)
+    "Android";
+#else  // if defined(OS_LINUX)
+    "Unknown";
+#endif
+
+std::string GetLinuxDistro() {
+#if defined(OS_CHROMEOS) || defined(OS_ANDROID)
+  return g_linux_distro;
+#elif defined(OS_LINUX)
+  LinuxDistroHelper* distro_state_singleton = LinuxDistroHelper::GetInstance();
+  LinuxDistroState state = distro_state_singleton->State();
+  if (STATE_CHECK_FINISHED == state)
+    return g_linux_distro;
+  if (STATE_CHECK_STARTED == state)
+    return "Unknown"; // Don't wait for other thread to finish.
+  DCHECK_EQ(state, STATE_DID_NOT_CHECK);
+  // We do this check only once per process. If it fails, there's
+  // little reason to believe it will work if we attempt to run
+  // lsb_release again.
+  std::vector<std::string> argv;
+  argv.push_back("lsb_release");
+  argv.push_back("-d");
+  std::string output;
+  GetAppOutput(CommandLine(argv), &output);
+  if (output.length() > 0) {
+    // lsb_release -d should return: Description:<tab>Distro Info
+    const char field[] = "Description:\t";
+    if (output.compare(0, strlen(field), field) == 0) {
+      SetLinuxDistro(output.substr(strlen(field)));
+    }
+  }
+  distro_state_singleton->CheckFinished();
+  return g_linux_distro;
+#else
+  NOTIMPLEMENTED();
+  return "Unknown";
+#endif
+}
+
+void SetLinuxDistro(const std::string& distro) {
+  std::string trimmed_distro;
+  TrimWhitespaceASCII(distro, TRIM_ALL, &trimmed_distro);
+  strlcpy(g_linux_distro, trimmed_distro.c_str(), kDistroSize);
+}
+
+pid_t FindThreadIDWithSyscall(pid_t pid, const std::string& expected_data,
+                              bool* syscall_supported) {
+  if (syscall_supported != nullptr)
+    *syscall_supported = false;
+
+  std::vector<pid_t> tids;
+  if (!GetTasksForProcess(pid, &tids))
+    return -1;
+
+  std::unique_ptr<char[]> syscall_data(new char[expected_data.length()]);
+  for (pid_t tid : tids) {
+    char buf[256];
+    snprintf(buf, sizeof(buf), "/proc/%d/task/%d/syscall", pid, tid);
+    int fd = open(buf, O_RDONLY);
+    if (fd < 0)
+      continue;
+    if (syscall_supported != nullptr)
+      *syscall_supported = true;
+    bool read_ret = ReadFromFD(fd, syscall_data.get(), expected_data.length());
+    close(fd);
+    if (!read_ret)
+      continue;
+
+    if (0 == strncmp(expected_data.c_str(), syscall_data.get(),
+                     expected_data.length())) {
+      return tid;
+    }
+  }
+  return -1;
+}
+
+pid_t FindThreadID(pid_t pid, pid_t ns_tid, bool* ns_pid_supported) {
+  if (ns_pid_supported)
+    *ns_pid_supported = false;
+
+  std::vector<pid_t> tids;
+  if (!GetTasksForProcess(pid, &tids))
+    return -1;
+
+  for (pid_t tid : tids) {
+    char buf[256];
+    snprintf(buf, sizeof(buf), "/proc/%d/task/%d/status", pid, tid);
+    std::string status;
+    if (!ReadFileToString(FilePath(buf), &status))
+      return -1;
+    StringTokenizer tokenizer(status, "\n");
+    while (tokenizer.GetNext()) {
+      StringPiece value_str(tokenizer.token_piece());
+      if (!value_str.starts_with("NSpid"))
+        continue;
+      if (ns_pid_supported)
+        *ns_pid_supported = true;
+      std::vector<StringPiece> split_value_str = SplitStringPiece(
+          value_str, "\t", TRIM_WHITESPACE, SPLIT_WANT_NONEMPTY);
+      DCHECK_GE(split_value_str.size(), 2u);
+      int value;
+      // The last value in the list is the PID in the namespace.
+      if (StringToInt(split_value_str.back(), &value) && value == ns_tid) {
+        // The second value in the list is the real PID.
+        if (StringToInt(split_value_str[1], &value))
+          return value;
+      }
+      break;
+    }
+  }
+  return -1;
+}
+
+}  // namespace base
diff --git a/base/linux_util.h b/base/linux_util.h
new file mode 100644
index 0000000..272e06b
--- /dev/null
+++ b/base/linux_util.h
@@ -0,0 +1,44 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_LINUX_UTIL_H_
+#define BASE_LINUX_UTIL_H_
+
+#include <stdint.h>
+#include <sys/types.h>
+
+#include <string>
+
+#include "base/base_export.h"
+
+namespace base {
+
+// This is declared here so the crash reporter can access the memory directly
+// in compromised context without going through the standard library.
+BASE_EXPORT extern char g_linux_distro[];
+
+// Get the Linux Distro if we can, or return "Unknown".
+BASE_EXPORT std::string GetLinuxDistro();
+
+// Set the Linux Distro string.
+BASE_EXPORT void SetLinuxDistro(const std::string& distro);
+
+// For a given process |pid|, look through all its threads and find the first
+// thread with /proc/[pid]/task/[thread_id]/syscall whose first N bytes matches
+// |expected_data|, where N is the length of |expected_data|.
+// Returns the thread id or -1 on error.  If |syscall_supported| is
+// set to false the kernel does not support syscall in procfs.
+BASE_EXPORT pid_t FindThreadIDWithSyscall(pid_t pid,
+                                          const std::string& expected_data,
+                                          bool* syscall_supported);
+
+// For a given process |pid|, look through all its threads and find the first
+// thread with /proc/[pid]/task/[thread_id]/status where NSpid matches |ns_tid|.
+// Returns the thread id or -1 on error.  If |ns_pid_supported| is
+// set to false the kernel does not support NSpid in procfs.
+BASE_EXPORT pid_t FindThreadID(pid_t pid, pid_t ns_tid, bool* ns_pid_supported);
+
+}  // namespace base
+
+#endif  // BASE_LINUX_UTIL_H_
diff --git a/base/location.cc b/base/location.cc
new file mode 100644
index 0000000..8bbf6ed
--- /dev/null
+++ b/base/location.cc
@@ -0,0 +1,77 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/location.h"
+
+#if defined(COMPILER_MSVC)
+#include <intrin.h>
+#endif
+
+#include "base/compiler_specific.h"
+#include "base/strings/string_number_conversions.h"
+#include "base/strings/stringprintf.h"
+#include "build/build_config.h"
+
+namespace base {
+
+Location::Location() = default;
+Location::Location(const Location& other) = default;
+
+Location::Location(const char* file_name, const void* program_counter)
+    : file_name_(file_name), program_counter_(program_counter) {}
+
+Location::Location(const char* function_name,
+                   const char* file_name,
+                   int line_number,
+                   const void* program_counter)
+    : function_name_(function_name),
+      file_name_(file_name),
+      line_number_(line_number),
+      program_counter_(program_counter) {
+#if !defined(OS_NACL)
+  // The program counter should not be null except in a default constructed
+  // (empty) Location object. This value is used for identity, so if it doesn't
+  // uniquely identify a location, things will break.
+  //
+  // The program counter isn't supported in NaCl so location objects won't work
+  // properly in that context.
+  DCHECK(program_counter);
+#endif
+}
+
+std::string Location::ToString() const {
+  if (has_source_info()) {
+    return std::string(function_name_) + "@" + file_name_ + ":" +
+           IntToString(line_number_);
+  }
+  return StringPrintf("pc:%p", program_counter_);
+}
+
+#if defined(COMPILER_MSVC)
+#define RETURN_ADDRESS() _ReturnAddress()
+#elif defined(COMPILER_GCC) && !defined(OS_NACL)
+#define RETURN_ADDRESS() \
+  __builtin_extract_return_addr(__builtin_return_address(0))
+#else
+#define RETURN_ADDRESS() nullptr
+#endif
+
+// static
+NOINLINE Location Location::CreateFromHere(const char* file_name) {
+  return Location(file_name, RETURN_ADDRESS());
+}
+
+// static
+NOINLINE Location Location::CreateFromHere(const char* function_name,
+                                           const char* file_name,
+                                           int line_number) {
+  return Location(function_name, file_name, line_number, RETURN_ADDRESS());
+}
+
+//------------------------------------------------------------------------------
+NOINLINE const void* GetProgramCounter() {
+  return RETURN_ADDRESS();
+}
+
+}  // namespace base
diff --git a/base/location.h b/base/location.h
new file mode 100644
index 0000000..14fe2fa
--- /dev/null
+++ b/base/location.h
@@ -0,0 +1,117 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_LOCATION_H_
+#define BASE_LOCATION_H_
+
+#include <stddef.h>
+
+#include <cassert>
+#include <string>
+
+#include "base/base_export.h"
+#include "base/debug/debugging_buildflags.h"
+#include "base/hash.h"
+
+namespace base {
+
+// Location provides basic info where of an object was constructed, or was
+// significantly brought to life.
+class BASE_EXPORT Location {
+ public:
+  Location();
+  Location(const Location& other);
+
+  // Only initializes the file name and program counter, the source information
+  // will be null for the strings, and -1 for the line number.
+  // TODO(http://crbug.com/760702) remove file name from this constructor.
+  Location(const char* file_name, const void* program_counter);
+
+  // Constructor should be called with a long-lived char*, such as __FILE__.
+  // It assumes the provided value will persist as a global constant, and it
+  // will not make a copy of it.
+  Location(const char* function_name,
+           const char* file_name,
+           int line_number,
+           const void* program_counter);
+
+  // Comparator for hash map insertion. The program counter should uniquely
+  // identify a location.
+  bool operator==(const Location& other) const {
+    return program_counter_ == other.program_counter_;
+  }
+
+  // Returns true if there is source code location info. If this is false,
+  // the Location object only contains a program counter or is
+  // default-initialized (the program counter is also null).
+  bool has_source_info() const { return function_name_ && file_name_; }
+
+  // Will be nullptr for default initialized Location objects and when source
+  // names are disabled.
+  const char* function_name() const { return function_name_; }
+
+  // Will be nullptr for default initialized Location objects and when source
+  // names are disabled.
+  const char* file_name() const { return file_name_; }
+
+  // Will be -1 for default initialized Location objects and when source names
+  // are disabled.
+  int line_number() const { return line_number_; }
+
+  // The address of the code generating this Location object. Should always be
+  // valid except for default initialized Location objects, which will be
+  // nullptr.
+  const void* program_counter() const { return program_counter_; }
+
+  // Converts to the most user-readable form possible. If function and filename
+  // are not available, this will return "pc:<hex address>".
+  std::string ToString() const;
+
+  static Location CreateFromHere(const char* file_name);
+  static Location CreateFromHere(const char* function_name,
+                                 const char* file_name,
+                                 int line_number);
+
+ private:
+  const char* function_name_ = nullptr;
+  const char* file_name_ = nullptr;
+  int line_number_ = -1;
+  const void* program_counter_ = nullptr;
+};
+
+BASE_EXPORT const void* GetProgramCounter();
+
+// The macros defined here will expand to the current function.
+#if BUILDFLAG(ENABLE_LOCATION_SOURCE)
+
+// Full source information should be included.
+#define FROM_HERE FROM_HERE_WITH_EXPLICIT_FUNCTION(__func__)
+#define FROM_HERE_WITH_EXPLICIT_FUNCTION(function_name) \
+  ::base::Location::CreateFromHere(function_name, __FILE__, __LINE__)
+
+#else
+
+// TODO(http://crbug.com/760702) remove the __FILE__ argument from these calls.
+#define FROM_HERE ::base::Location::CreateFromHere(__FILE__)
+#define FROM_HERE_WITH_EXPLICIT_FUNCTION(function_name) \
+  ::base::Location::CreateFromHere(function_name, __FILE__, -1)
+
+#endif
+
+}  // namespace base
+
+namespace std {
+
+// Specialization for using Location in hash tables.
+template <>
+struct hash<::base::Location> {
+  std::size_t operator()(const ::base::Location& loc) const {
+    const void* program_counter = loc.program_counter();
+    return base::Hash(&program_counter, sizeof(void*));
+  }
+};
+
+}  // namespace std
+
+#endif  // BASE_LOCATION_H_
diff --git a/base/logging.cc b/base/logging.cc
new file mode 100644
index 0000000..8eabda0
--- /dev/null
+++ b/base/logging.cc
@@ -0,0 +1,1054 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/logging.h"
+
+#include <limits.h>
+#include <stdint.h>
+
+#include "base/macros.h"
+#include "build/build_config.h"
+
+#if defined(OS_WIN)
+#include <io.h>
+#include <windows.h>
+typedef HANDLE FileHandle;
+typedef HANDLE MutexHandle;
+// Windows warns on using write().  It prefers _write().
+#define write(fd, buf, count) _write(fd, buf, static_cast<unsigned int>(count))
+// Windows doesn't define STDERR_FILENO.  Define it here.
+#define STDERR_FILENO 2
+
+#elif defined(OS_MACOSX)
+// In MacOS 10.12 and iOS 10.0 and later ASL (Apple System Log) was deprecated
+// in favor of OS_LOG (Unified Logging).
+#include <AvailabilityMacros.h>
+#if defined(OS_IOS)
+#if !defined(__IPHONE_10_0) || __IPHONE_OS_VERSION_MIN_REQUIRED < __IPHONE_10_0
+#define USE_ASL
+#endif
+#else  // !defined(OS_IOS)
+#if !defined(MAC_OS_X_VERSION_10_12) || \
+    MAC_OS_X_VERSION_MIN_REQUIRED < MAC_OS_X_VERSION_10_12
+#define USE_ASL
+#endif
+#endif  // defined(OS_IOS)
+
+#if defined(USE_ASL)
+#include <asl.h>
+#else
+#include <os/log.h>
+#endif
+
+#include <CoreFoundation/CoreFoundation.h>
+#include <mach/mach.h>
+#include <mach/mach_time.h>
+#include <mach-o/dyld.h>
+
+#elif defined(OS_POSIX) || defined(OS_FUCHSIA)
+#if defined(OS_NACL)
+#include <sys/time.h>  // timespec doesn't seem to be in <time.h>
+#endif
+#include <time.h>
+#endif
+
+#if defined(OS_FUCHSIA)
+#include <zircon/process.h>
+#include <zircon/syscalls.h>
+#endif
+
+#if defined(OS_ANDROID)
+#include <android/log.h>
+#endif
+
+#if defined(OS_POSIX) || defined(OS_FUCHSIA)
+#include <errno.h>
+#include <paths.h>
+#include <pthread.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/stat.h>
+#include <unistd.h>
+#define MAX_PATH PATH_MAX
+typedef FILE* FileHandle;
+typedef pthread_mutex_t* MutexHandle;
+#endif
+
+#include <algorithm>
+#include <cstring>
+#include <ctime>
+#include <iomanip>
+#include <ostream>
+#include <string>
+#include <utility>
+
+#include "base/base_switches.h"
+#include "base/callback.h"
+#include "base/command_line.h"
+#include "base/containers/stack.h"
+#include "base/debug/activity_tracker.h"
+#include "base/debug/alias.h"
+#include "base/debug/debugger.h"
+#include "base/debug/stack_trace.h"
+#include "base/lazy_instance.h"
+#include "base/posix/eintr_wrapper.h"
+#include "base/strings/string_piece.h"
+#include "base/strings/string_util.h"
+#include "base/strings/stringprintf.h"
+#include "base/strings/sys_string_conversions.h"
+#include "base/strings/utf_string_conversions.h"
+#include "base/synchronization/lock_impl.h"
+#include "base/threading/platform_thread.h"
+#include "base/vlog.h"
+
+#if defined(OS_POSIX) || defined(OS_FUCHSIA)
+#include "base/posix/safe_strerror.h"
+#endif
+
+namespace logging {
+
+namespace {
+
+VlogInfo* g_vlog_info = nullptr;
+VlogInfo* g_vlog_info_prev = nullptr;
+
+const char* const log_severity_names[] = {"INFO", "WARNING", "ERROR", "FATAL"};
+static_assert(LOG_NUM_SEVERITIES == arraysize(log_severity_names),
+              "Incorrect number of log_severity_names");
+
+const char* log_severity_name(int severity) {
+  if (severity >= 0 && severity < LOG_NUM_SEVERITIES)
+    return log_severity_names[severity];
+  return "UNKNOWN";
+}
+
+int g_min_log_level = 0;
+
+LoggingDestination g_logging_destination = LOG_DEFAULT;
+
+// For LOG_ERROR and above, always print to stderr.
+const int kAlwaysPrintErrorLevel = LOG_ERROR;
+
+// Which log file to use? This is initialized by InitLogging or
+// will be lazily initialized to the default value when it is
+// first needed.
+#if defined(OS_WIN)
+typedef std::wstring PathString;
+#elif defined(OS_POSIX) || defined(OS_FUCHSIA)
+typedef std::string PathString;
+#endif
+PathString* g_log_file_name = nullptr;
+
+// This file is lazily opened and the handle may be nullptr
+FileHandle g_log_file = nullptr;
+
+// What should be prepended to each message?
+bool g_log_process_id = false;
+bool g_log_thread_id = false;
+bool g_log_timestamp = true;
+bool g_log_tickcount = false;
+
+// Should we pop up fatal debug messages in a dialog?
+bool show_error_dialogs = false;
+
+// An assert handler override specified by the client to be called instead of
+// the debug message dialog and process termination. Assert handlers are stored
+// in stack to allow overriding and restoring.
+base::LazyInstance<base::stack<LogAssertHandlerFunction>>::Leaky
+    log_assert_handler_stack = LAZY_INSTANCE_INITIALIZER;
+
+// A log message handler that gets notified of every log message we process.
+LogMessageHandlerFunction log_message_handler = nullptr;
+
+// Helper functions to wrap platform differences.
+
+int32_t CurrentProcessId() {
+#if defined(OS_WIN)
+  return GetCurrentProcessId();
+#elif defined(OS_FUCHSIA)
+  zx_info_handle_basic_t basic = {};
+  zx_object_get_info(zx_process_self(), ZX_INFO_HANDLE_BASIC, &basic,
+                     sizeof(basic), nullptr, nullptr);
+  return basic.koid;
+#elif defined(OS_POSIX)
+  return getpid();
+#endif
+}
+
+uint64_t TickCount() {
+#if defined(OS_WIN)
+  return GetTickCount();
+#elif defined(OS_FUCHSIA)
+  return zx_clock_get(ZX_CLOCK_MONOTONIC) /
+         static_cast<zx_time_t>(base::Time::kNanosecondsPerMicrosecond);
+#elif defined(OS_MACOSX)
+  return mach_absolute_time();
+#elif defined(OS_NACL)
+  // NaCl sadly does not have _POSIX_TIMERS enabled in sys/features.h
+  // So we have to use clock() for now.
+  return clock();
+#elif defined(OS_POSIX)
+  struct timespec ts;
+  clock_gettime(CLOCK_MONOTONIC, &ts);
+
+  uint64_t absolute_micro = static_cast<int64_t>(ts.tv_sec) * 1000000 +
+                            static_cast<int64_t>(ts.tv_nsec) / 1000;
+
+  return absolute_micro;
+#endif
+}
+
+void DeleteFilePath(const PathString& log_name) {
+#if defined(OS_WIN)
+  DeleteFile(log_name.c_str());
+#elif defined(OS_NACL)
+  // Do nothing; unlink() isn't supported on NaCl.
+#elif defined(OS_POSIX) || defined(OS_FUCHSIA)
+  unlink(log_name.c_str());
+#else
+#error Unsupported platform
+#endif
+}
+
+PathString GetDefaultLogFile() {
+#if defined(OS_WIN)
+  // On Windows we use the same path as the exe.
+  wchar_t module_name[MAX_PATH];
+  GetModuleFileName(nullptr, module_name, MAX_PATH);
+
+  PathString log_name = module_name;
+  PathString::size_type last_backslash = log_name.rfind('\\', log_name.size());
+  if (last_backslash != PathString::npos)
+    log_name.erase(last_backslash + 1);
+  log_name += L"debug.log";
+  return log_name;
+#elif defined(OS_POSIX) || defined(OS_FUCHSIA)
+  // On other platforms we just use the current directory.
+  return PathString("debug.log");
+#endif
+}
+
+// We don't need locks on Windows for atomically appending to files. The OS
+// provides this functionality.
+#if defined(OS_POSIX) || defined(OS_FUCHSIA)
+// This class acts as a wrapper for locking the logging files.
+// LoggingLock::Init() should be called from the main thread before any logging
+// is done. Then whenever logging, be sure to have a local LoggingLock
+// instance on the stack. This will ensure that the lock is unlocked upon
+// exiting the frame.
+// LoggingLocks can not be nested.
+class LoggingLock {
+ public:
+  LoggingLock() {
+    LockLogging();
+  }
+
+  ~LoggingLock() {
+    UnlockLogging();
+  }
+
+  static void Init(LogLockingState lock_log, const PathChar* new_log_file) {
+    if (initialized)
+      return;
+    lock_log_file = lock_log;
+
+    if (lock_log_file != LOCK_LOG_FILE)
+      log_lock = new base::internal::LockImpl();
+
+    initialized = true;
+  }
+
+ private:
+  static void LockLogging() {
+    if (lock_log_file == LOCK_LOG_FILE) {
+      pthread_mutex_lock(&log_mutex);
+    } else {
+      // use the lock
+      log_lock->Lock();
+    }
+  }
+
+  static void UnlockLogging() {
+    if (lock_log_file == LOCK_LOG_FILE) {
+      pthread_mutex_unlock(&log_mutex);
+    } else {
+      log_lock->Unlock();
+    }
+  }
+
+  // The lock is used if log file locking is false. It helps us avoid problems
+  // with multiple threads writing to the log file at the same time.  Use
+  // LockImpl directly instead of using Lock, because Lock makes logging calls.
+  static base::internal::LockImpl* log_lock;
+
+  // When we don't use a lock, we are using a global mutex. We need to do this
+  // because LockFileEx is not thread safe.
+  static pthread_mutex_t log_mutex;
+
+  static bool initialized;
+  static LogLockingState lock_log_file;
+};
+
+// static
+bool LoggingLock::initialized = false;
+// static
+base::internal::LockImpl* LoggingLock::log_lock = nullptr;
+// static
+LogLockingState LoggingLock::lock_log_file = LOCK_LOG_FILE;
+
+pthread_mutex_t LoggingLock::log_mutex = PTHREAD_MUTEX_INITIALIZER;
+
+#endif  // OS_POSIX || OS_FUCHSIA
+
+// Called by logging functions to ensure that |g_log_file| is initialized
+// and can be used for writing. Returns false if the file could not be
+// initialized. |g_log_file| will be nullptr in this case.
+bool InitializeLogFileHandle() {
+  if (g_log_file)
+    return true;
+
+  if (!g_log_file_name) {
+    // Nobody has called InitLogging to specify a debug log file, so here we
+    // initialize the log file name to a default.
+    g_log_file_name = new PathString(GetDefaultLogFile());
+  }
+
+  if ((g_logging_destination & LOG_TO_FILE) != 0) {
+#if defined(OS_WIN)
+    // The FILE_APPEND_DATA access mask ensures that the file is atomically
+    // appended to across accesses from multiple threads.
+    // https://msdn.microsoft.com/en-us/library/windows/desktop/aa364399(v=vs.85).aspx
+    // https://msdn.microsoft.com/en-us/library/windows/desktop/aa363858(v=vs.85).aspx
+    g_log_file = CreateFile(g_log_file_name->c_str(), FILE_APPEND_DATA,
+                            FILE_SHARE_READ | FILE_SHARE_WRITE, nullptr,
+                            OPEN_ALWAYS, FILE_ATTRIBUTE_NORMAL, nullptr);
+    if (g_log_file == INVALID_HANDLE_VALUE || g_log_file == nullptr) {
+      // We are intentionally not using FilePath or FileUtil here to reduce the
+      // dependencies of the logging implementation. For e.g. FilePath and
+      // FileUtil depend on shell32 and user32.dll. This is not acceptable for
+      // some consumers of base logging like chrome_elf, etc.
+      // Please don't change the code below to use FilePath.
+      // try the current directory
+      wchar_t system_buffer[MAX_PATH];
+      system_buffer[0] = 0;
+      DWORD len = ::GetCurrentDirectory(arraysize(system_buffer),
+                                        system_buffer);
+      if (len == 0 || len > arraysize(system_buffer))
+        return false;
+
+      *g_log_file_name = system_buffer;
+      // Append a trailing backslash if needed.
+      if (g_log_file_name->back() != L'\\')
+        *g_log_file_name += L"\\";
+      *g_log_file_name += L"debug.log";
+
+      g_log_file = CreateFile(g_log_file_name->c_str(), FILE_APPEND_DATA,
+                              FILE_SHARE_READ | FILE_SHARE_WRITE, nullptr,
+                              OPEN_ALWAYS, FILE_ATTRIBUTE_NORMAL, nullptr);
+      if (g_log_file == INVALID_HANDLE_VALUE || g_log_file == nullptr) {
+        g_log_file = nullptr;
+        return false;
+      }
+    }
+#elif defined(OS_POSIX) || defined(OS_FUCHSIA)
+    g_log_file = fopen(g_log_file_name->c_str(), "a");
+    if (g_log_file == nullptr)
+      return false;
+#else
+#error Unsupported platform
+#endif
+  }
+
+  return true;
+}
+
+void CloseFile(FileHandle log) {
+#if defined(OS_WIN)
+  CloseHandle(log);
+#elif defined(OS_POSIX) || defined(OS_FUCHSIA)
+  fclose(log);
+#else
+#error Unsupported platform
+#endif
+}
+
+void CloseLogFileUnlocked() {
+  if (!g_log_file)
+    return;
+
+  CloseFile(g_log_file);
+  g_log_file = nullptr;
+}
+
+}  // namespace
+
+#if DCHECK_IS_CONFIGURABLE
+// In DCHECK-enabled Chrome builds, allow the meaning of LOG_DCHECK to be
+// determined at run-time. We default it to INFO, to avoid it triggering
+// crashes before the run-time has explicitly chosen the behaviour.
+BASE_EXPORT logging::LogSeverity LOG_DCHECK = LOG_INFO;
+#endif  // DCHECK_IS_CONFIGURABLE
+
+// This is never instantiated, it's just used for EAT_STREAM_PARAMETERS to have
+// an object of the correct type on the LHS of the unused part of the ternary
+// operator.
+std::ostream* g_swallow_stream;
+
+LoggingSettings::LoggingSettings()
+    : logging_dest(LOG_DEFAULT),
+      log_file(nullptr),
+      lock_log(LOCK_LOG_FILE),
+      delete_old(APPEND_TO_OLD_LOG_FILE) {}
+
+bool BaseInitLoggingImpl(const LoggingSettings& settings) {
+#if defined(OS_NACL)
+  // Can log only to the system debug log.
+  CHECK_EQ(settings.logging_dest & ~LOG_TO_SYSTEM_DEBUG_LOG, 0);
+#endif
+  base::CommandLine* command_line = base::CommandLine::ForCurrentProcess();
+  // Don't bother initializing |g_vlog_info| unless we use one of the
+  // vlog switches.
+  if (command_line->HasSwitch(switches::kV) ||
+      command_line->HasSwitch(switches::kVModule)) {
+    // NOTE: If |g_vlog_info| has already been initialized, it might be in use
+    // by another thread. Don't delete the old VLogInfo, just create a second
+    // one. We keep track of both to avoid memory leak warnings.
+    CHECK(!g_vlog_info_prev);
+    g_vlog_info_prev = g_vlog_info;
+
+    g_vlog_info =
+        new VlogInfo(command_line->GetSwitchValueASCII(switches::kV),
+                     command_line->GetSwitchValueASCII(switches::kVModule),
+                     &g_min_log_level);
+  }
+
+  g_logging_destination = settings.logging_dest;
+
+  // ignore file options unless logging to file is set.
+  if ((g_logging_destination & LOG_TO_FILE) == 0)
+    return true;
+
+#if defined(OS_POSIX) || defined(OS_FUCHSIA)
+  LoggingLock::Init(settings.lock_log, settings.log_file);
+  LoggingLock logging_lock;
+#endif
+
+  // Calling InitLogging twice or after some log call has already opened the
+  // default log file will re-initialize to the new options.
+  CloseLogFileUnlocked();
+
+  if (!g_log_file_name)
+    g_log_file_name = new PathString();
+  *g_log_file_name = settings.log_file;
+  if (settings.delete_old == DELETE_OLD_LOG_FILE)
+    DeleteFilePath(*g_log_file_name);
+
+  return InitializeLogFileHandle();
+}
+
+void SetMinLogLevel(int level) {
+  g_min_log_level = std::min(LOG_FATAL, level);
+}
+
+int GetMinLogLevel() {
+  return g_min_log_level;
+}
+
+bool ShouldCreateLogMessage(int severity) {
+  if (severity < g_min_log_level)
+    return false;
+
+  // Return true here unless we know ~LogMessage won't do anything. Note that
+  // ~LogMessage writes to stderr if severity_ >= kAlwaysPrintErrorLevel, even
+  // when g_logging_destination is LOG_NONE.
+  return g_logging_destination != LOG_NONE || log_message_handler ||
+         severity >= kAlwaysPrintErrorLevel;
+}
+
+int GetVlogVerbosity() {
+  return std::max(-1, LOG_INFO - GetMinLogLevel());
+}
+
+int GetVlogLevelHelper(const char* file, size_t N) {
+  DCHECK_GT(N, 0U);
+  // Note: |g_vlog_info| may change on a different thread during startup
+  // (but will always be valid or nullptr).
+  VlogInfo* vlog_info = g_vlog_info;
+  return vlog_info ?
+      vlog_info->GetVlogLevel(base::StringPiece(file, N - 1)) :
+      GetVlogVerbosity();
+}
+
+void SetLogItems(bool enable_process_id, bool enable_thread_id,
+                 bool enable_timestamp, bool enable_tickcount) {
+  g_log_process_id = enable_process_id;
+  g_log_thread_id = enable_thread_id;
+  g_log_timestamp = enable_timestamp;
+  g_log_tickcount = enable_tickcount;
+}
+
+void SetShowErrorDialogs(bool enable_dialogs) {
+  show_error_dialogs = enable_dialogs;
+}
+
+ScopedLogAssertHandler::ScopedLogAssertHandler(
+    LogAssertHandlerFunction handler) {
+  log_assert_handler_stack.Get().push(std::move(handler));
+}
+
+ScopedLogAssertHandler::~ScopedLogAssertHandler() {
+  log_assert_handler_stack.Get().pop();
+}
+
+void SetLogMessageHandler(LogMessageHandlerFunction handler) {
+  log_message_handler = handler;
+}
+
+LogMessageHandlerFunction GetLogMessageHandler() {
+  return log_message_handler;
+}
+
+// Explicit instantiations for commonly used comparisons.
+template std::string* MakeCheckOpString<int, int>(
+    const int&, const int&, const char* names);
+template std::string* MakeCheckOpString<unsigned long, unsigned long>(
+    const unsigned long&, const unsigned long&, const char* names);
+template std::string* MakeCheckOpString<unsigned long, unsigned int>(
+    const unsigned long&, const unsigned int&, const char* names);
+template std::string* MakeCheckOpString<unsigned int, unsigned long>(
+    const unsigned int&, const unsigned long&, const char* names);
+template std::string* MakeCheckOpString<std::string, std::string>(
+    const std::string&, const std::string&, const char* name);
+
+void MakeCheckOpValueString(std::ostream* os, std::nullptr_t p) {
+  (*os) << "nullptr";
+}
+
+#if !defined(NDEBUG)
+// Displays a message box to the user with the error message in it.
+// Used for fatal messages, where we close the app simultaneously.
+// This is for developers only; we don't use this in circumstances
+// (like release builds) where users could see it, since users don't
+// understand these messages anyway.
+void DisplayDebugMessageInDialog(const std::string& str) {
+  if (str.empty())
+    return;
+
+  if (!show_error_dialogs)
+    return;
+
+#if defined(OS_WIN)
+  // We intentionally don't implement a dialog on other platforms.
+  // You can just look at stderr.
+  MessageBoxW(nullptr, base::UTF8ToUTF16(str).c_str(), L"Fatal error",
+              MB_OK | MB_ICONHAND | MB_TOPMOST);
+#endif  // defined(OS_WIN)
+}
+#endif  // !defined(NDEBUG)
+
+#if defined(OS_WIN)
+LogMessage::SaveLastError::SaveLastError() : last_error_(::GetLastError()) {
+}
+
+LogMessage::SaveLastError::~SaveLastError() {
+  ::SetLastError(last_error_);
+}
+#endif  // defined(OS_WIN)
+
+LogMessage::LogMessage(const char* file, int line, LogSeverity severity)
+    : severity_(severity), file_(file), line_(line) {
+  Init(file, line);
+}
+
+LogMessage::LogMessage(const char* file, int line, const char* condition)
+    : severity_(LOG_FATAL), file_(file), line_(line) {
+  Init(file, line);
+  stream_ << "Check failed: " << condition << ". ";
+}
+
+LogMessage::LogMessage(const char* file, int line, std::string* result)
+    : severity_(LOG_FATAL), file_(file), line_(line) {
+  Init(file, line);
+  stream_ << "Check failed: " << *result;
+  delete result;
+}
+
+LogMessage::LogMessage(const char* file, int line, LogSeverity severity,
+                       std::string* result)
+    : severity_(severity), file_(file), line_(line) {
+  Init(file, line);
+  stream_ << "Check failed: " << *result;
+  delete result;
+}
+
+LogMessage::~LogMessage() {
+  size_t stack_start = stream_.tellp();
+#if !defined(OFFICIAL_BUILD) && !defined(OS_NACL) && !defined(__UCLIBC__) && \
+    !defined(OS_AIX)
+  if (severity_ == LOG_FATAL && !base::debug::BeingDebugged()) {
+    // Include a stack trace on a fatal, unless a debugger is attached.
+    base::debug::StackTrace trace;
+    stream_ << std::endl;  // Newline to separate from log message.
+    trace.OutputToStream(&stream_);
+  }
+#endif
+  stream_ << std::endl;
+  std::string str_newline(stream_.str());
+
+  // Give any log message handler first dibs on the message.
+  if (log_message_handler &&
+      log_message_handler(severity_, file_, line_,
+                          message_start_, str_newline)) {
+    // The handler took care of it, no further processing.
+    return;
+  }
+
+  if ((g_logging_destination & LOG_TO_SYSTEM_DEBUG_LOG) != 0) {
+#if defined(OS_WIN)
+    OutputDebugStringA(str_newline.c_str());
+#elif defined(OS_MACOSX)
+    // In LOG_TO_SYSTEM_DEBUG_LOG mode, log messages are always written to
+    // stderr. If stderr is /dev/null, also log via ASL (Apple System Log) or
+    // its successor OS_LOG. If there's something weird about stderr, assume
+    // that log messages are going nowhere and log via ASL/OS_LOG too.
+    // Messages logged via ASL/OS_LOG show up in Console.app.
+    //
+    // Programs started by launchd, as UI applications normally are, have had
+    // stderr connected to /dev/null since OS X 10.8. Prior to that, stderr was
+    // a pipe to launchd, which logged what it received (see log_redirect_fd in
+    // 10.7.5 launchd-392.39/launchd/src/launchd_core_logic.c).
+    //
+    // Another alternative would be to determine whether stderr is a pipe to
+    // launchd and avoid logging via ASL only in that case. See 10.7.5
+    // CF-635.21/CFUtilities.c also_do_stderr(). This would result in logging to
+    // both stderr and ASL/OS_LOG even in tests, where it's undesirable to log
+    // to the system log at all.
+    //
+    // Note that the ASL client by default discards messages whose levels are
+    // below ASL_LEVEL_NOTICE. It's possible to change that with
+    // asl_set_filter(), but this is pointless because syslogd normally applies
+    // the same filter.
+    const bool log_to_system = []() {
+      struct stat stderr_stat;
+      if (fstat(fileno(stderr), &stderr_stat) == -1) {
+        return true;
+      }
+      if (!S_ISCHR(stderr_stat.st_mode)) {
+        return false;
+      }
+
+      struct stat dev_null_stat;
+      if (stat(_PATH_DEVNULL, &dev_null_stat) == -1) {
+        return true;
+      }
+
+      return !S_ISCHR(dev_null_stat.st_mode) ||
+             stderr_stat.st_rdev == dev_null_stat.st_rdev;
+    }();
+
+    if (log_to_system) {
+      // Log roughly the same way that CFLog() and NSLog() would. See 10.10.5
+      // CF-1153.18/CFUtilities.c __CFLogCString().
+      CFBundleRef main_bundle = CFBundleGetMainBundle();
+      CFStringRef main_bundle_id_cf =
+          main_bundle ? CFBundleGetIdentifier(main_bundle) : nullptr;
+      std::string main_bundle_id =
+          main_bundle_id_cf ? base::SysCFStringRefToUTF8(main_bundle_id_cf)
+                            : std::string("");
+#if defined(USE_ASL)
+      // The facility is set to the main bundle ID if available. Otherwise,
+      // "com.apple.console" is used.
+      const class ASLClient {
+       public:
+        explicit ASLClient(const std::string& facility)
+            : client_(asl_open(nullptr, facility.c_str(), ASL_OPT_NO_DELAY)) {}
+        ~ASLClient() { asl_close(client_); }
+
+        aslclient get() const { return client_; }
+
+       private:
+        aslclient client_;
+        DISALLOW_COPY_AND_ASSIGN(ASLClient);
+      } asl_client(main_bundle_id.empty() ? main_bundle_id
+                                          : "com.apple.console");
+
+      const class ASLMessage {
+       public:
+        ASLMessage() : message_(asl_new(ASL_TYPE_MSG)) {}
+        ~ASLMessage() { asl_free(message_); }
+
+        aslmsg get() const { return message_; }
+
+       private:
+        aslmsg message_;
+        DISALLOW_COPY_AND_ASSIGN(ASLMessage);
+      } asl_message;
+
+      // By default, messages are only readable by the admin group. Explicitly
+      // make them readable by the user generating the messages.
+      char euid_string[12];
+      snprintf(euid_string, arraysize(euid_string), "%d", geteuid());
+      asl_set(asl_message.get(), ASL_KEY_READ_UID, euid_string);
+
+      // Map Chrome log severities to ASL log levels.
+      const char* const asl_level_string = [](LogSeverity severity) {
+        // ASL_LEVEL_* are ints, but ASL needs equivalent strings. This
+        // non-obvious two-step macro trick achieves what's needed.
+        // https://gcc.gnu.org/onlinedocs/cpp/Stringification.html
+#define ASL_LEVEL_STR(level) ASL_LEVEL_STR_X(level)
+#define ASL_LEVEL_STR_X(level) #level
+        switch (severity) {
+          case LOG_INFO:
+            return ASL_LEVEL_STR(ASL_LEVEL_INFO);
+          case LOG_WARNING:
+            return ASL_LEVEL_STR(ASL_LEVEL_WARNING);
+          case LOG_ERROR:
+            return ASL_LEVEL_STR(ASL_LEVEL_ERR);
+          case LOG_FATAL:
+            return ASL_LEVEL_STR(ASL_LEVEL_CRIT);
+          default:
+            return severity < 0 ? ASL_LEVEL_STR(ASL_LEVEL_DEBUG)
+                                : ASL_LEVEL_STR(ASL_LEVEL_NOTICE);
+        }
+#undef ASL_LEVEL_STR
+#undef ASL_LEVEL_STR_X
+      }(severity_);
+      asl_set(asl_message.get(), ASL_KEY_LEVEL, asl_level_string);
+
+      asl_set(asl_message.get(), ASL_KEY_MSG, str_newline.c_str());
+
+      asl_send(asl_client.get(), asl_message.get());
+#else   // !defined(USE_ASL)
+      const class OSLog {
+       public:
+        explicit OSLog(const char* subsystem)
+            : os_log_(subsystem ? os_log_create(subsystem, "chromium_logging")
+                                : OS_LOG_DEFAULT) {}
+        ~OSLog() {
+          if (os_log_ != OS_LOG_DEFAULT) {
+            os_release(os_log_);
+          }
+        }
+        os_log_t get() const { return os_log_; }
+
+       private:
+        os_log_t os_log_;
+        DISALLOW_COPY_AND_ASSIGN(OSLog);
+      } log(main_bundle_id.empty() ? nullptr : main_bundle_id.c_str());
+      const os_log_type_t os_log_type = [](LogSeverity severity) {
+        switch (severity) {
+          case LOG_INFO:
+            return OS_LOG_TYPE_INFO;
+          case LOG_WARNING:
+            return OS_LOG_TYPE_DEFAULT;
+          case LOG_ERROR:
+            return OS_LOG_TYPE_ERROR;
+          case LOG_FATAL:
+            return OS_LOG_TYPE_FAULT;
+          default:
+            return severity < 0 ? OS_LOG_TYPE_DEBUG : OS_LOG_TYPE_DEFAULT;
+        }
+      }(severity_);
+      os_log_with_type(log.get(), os_log_type, "%{public}s",
+                       str_newline.c_str());
+#endif  // defined(USE_ASL)
+    }
+#elif defined(OS_ANDROID)
+    android_LogPriority priority =
+        (severity_ < 0) ? ANDROID_LOG_VERBOSE : ANDROID_LOG_UNKNOWN;
+    switch (severity_) {
+      case LOG_INFO:
+        priority = ANDROID_LOG_INFO;
+        break;
+      case LOG_WARNING:
+        priority = ANDROID_LOG_WARN;
+        break;
+      case LOG_ERROR:
+        priority = ANDROID_LOG_ERROR;
+        break;
+      case LOG_FATAL:
+        priority = ANDROID_LOG_FATAL;
+        break;
+    }
+    __android_log_write(priority, "chromium", str_newline.c_str());
+#endif
+    ignore_result(fwrite(str_newline.data(), str_newline.size(), 1, stderr));
+    fflush(stderr);
+  } else if (severity_ >= kAlwaysPrintErrorLevel) {
+    // When we're only outputting to a log file, above a certain log level, we
+    // should still output to stderr so that we can better detect and diagnose
+    // problems with unit tests, especially on the buildbots.
+    ignore_result(fwrite(str_newline.data(), str_newline.size(), 1, stderr));
+    fflush(stderr);
+  }
+
+  // write to log file
+  if ((g_logging_destination & LOG_TO_FILE) != 0) {
+    // We can have multiple threads and/or processes, so try to prevent them
+    // from clobbering each other's writes.
+    // If the client app did not call InitLogging, and the lock has not
+    // been created do it now. We do this on demand, but if two threads try
+    // to do this at the same time, there will be a race condition to create
+    // the lock. This is why InitLogging should be called from the main
+    // thread at the beginning of execution.
+#if defined(OS_POSIX) || defined(OS_FUCHSIA)
+    LoggingLock::Init(LOCK_LOG_FILE, nullptr);
+    LoggingLock logging_lock;
+#endif
+    if (InitializeLogFileHandle()) {
+#if defined(OS_WIN)
+      DWORD num_written;
+      WriteFile(g_log_file,
+                static_cast<const void*>(str_newline.c_str()),
+                static_cast<DWORD>(str_newline.length()),
+                &num_written,
+                nullptr);
+#elif defined(OS_POSIX) || defined(OS_FUCHSIA)
+      ignore_result(fwrite(
+          str_newline.data(), str_newline.size(), 1, g_log_file));
+      fflush(g_log_file);
+#else
+#error Unsupported platform
+#endif
+    }
+  }
+
+  if (severity_ == LOG_FATAL) {
+    // Write the log message to the global activity tracker, if running.
+    base::debug::GlobalActivityTracker* tracker =
+        base::debug::GlobalActivityTracker::Get();
+    if (tracker)
+      tracker->RecordLogMessage(str_newline);
+
+    // Ensure the first characters of the string are on the stack so they
+    // are contained in minidumps for diagnostic purposes.
+    DEBUG_ALIAS_FOR_CSTR(str_stack, str_newline.c_str(), 1024);
+
+    if (log_assert_handler_stack.IsCreated() &&
+        !log_assert_handler_stack.Get().empty()) {
+      LogAssertHandlerFunction log_assert_handler =
+          log_assert_handler_stack.Get().top();
+
+      if (log_assert_handler) {
+        log_assert_handler.Run(
+            file_, line_,
+            base::StringPiece(str_newline.c_str() + message_start_,
+                              stack_start - message_start_),
+            base::StringPiece(str_newline.c_str() + stack_start));
+      }
+    } else {
+      // Don't use the string with the newline, get a fresh version to send to
+      // the debug message process. We also don't display assertions to the
+      // user in release mode. The enduser can't do anything with this
+      // information, and displaying message boxes when the application is
+      // hosed can cause additional problems.
+#ifndef NDEBUG
+      if (!base::debug::BeingDebugged()) {
+        // Displaying a dialog is unnecessary when debugging and can complicate
+        // debugging.
+        DisplayDebugMessageInDialog(stream_.str());
+      }
+#endif
+      // Crash the process to generate a dump.
+      base::debug::BreakDebugger();
+    }
+  }
+}
+
+// writes the common header info to the stream
+void LogMessage::Init(const char* file, int line) {
+  base::StringPiece filename(file);
+  size_t last_slash_pos = filename.find_last_of("\\/");
+  if (last_slash_pos != base::StringPiece::npos)
+    filename.remove_prefix(last_slash_pos + 1);
+
+  // TODO(darin): It might be nice if the columns were fixed width.
+
+  stream_ <<  '[';
+  if (g_log_process_id)
+    stream_ << CurrentProcessId() << ':';
+  if (g_log_thread_id)
+    stream_ << base::PlatformThread::CurrentId() << ':';
+  if (g_log_timestamp) {
+#if defined(OS_WIN)
+    SYSTEMTIME local_time;
+    GetLocalTime(&local_time);
+    stream_ << std::setfill('0')
+            << std::setw(2) << local_time.wMonth
+            << std::setw(2) << local_time.wDay
+            << '/'
+            << std::setw(2) << local_time.wHour
+            << std::setw(2) << local_time.wMinute
+            << std::setw(2) << local_time.wSecond
+            << '.'
+            << std::setw(3)
+            << local_time.wMilliseconds
+            << ':';
+#elif defined(OS_POSIX) || defined(OS_FUCHSIA)
+    timeval tv;
+    gettimeofday(&tv, nullptr);
+    time_t t = tv.tv_sec;
+    struct tm local_time;
+    localtime_r(&t, &local_time);
+    struct tm* tm_time = &local_time;
+    stream_ << std::setfill('0')
+            << std::setw(2) << 1 + tm_time->tm_mon
+            << std::setw(2) << tm_time->tm_mday
+            << '/'
+            << std::setw(2) << tm_time->tm_hour
+            << std::setw(2) << tm_time->tm_min
+            << std::setw(2) << tm_time->tm_sec
+            << '.'
+            << std::setw(6) << tv.tv_usec
+            << ':';
+#else
+#error Unsupported platform
+#endif
+  }
+  if (g_log_tickcount)
+    stream_ << TickCount() << ':';
+  if (severity_ >= 0)
+    stream_ << log_severity_name(severity_);
+  else
+    stream_ << "VERBOSE" << -severity_;
+
+  stream_ << ":" << filename << "(" << line << ")] ";
+
+  message_start_ = stream_.str().length();
+}
+
+#if defined(OS_WIN)
+// This has already been defined in the header, but defining it again as DWORD
+// ensures that the type used in the header is equivalent to DWORD. If not,
+// the redefinition is a compile error.
+typedef DWORD SystemErrorCode;
+#endif
+
+SystemErrorCode GetLastSystemErrorCode() {
+#if defined(OS_WIN)
+  return ::GetLastError();
+#elif defined(OS_POSIX) || defined(OS_FUCHSIA)
+  return errno;
+#endif
+}
+
+BASE_EXPORT std::string SystemErrorCodeToString(SystemErrorCode error_code) {
+#if defined(OS_WIN)
+  const int kErrorMessageBufferSize = 256;
+  char msgbuf[kErrorMessageBufferSize];
+  DWORD flags = FORMAT_MESSAGE_FROM_SYSTEM | FORMAT_MESSAGE_IGNORE_INSERTS;
+  DWORD len = FormatMessageA(flags, nullptr, error_code, 0, msgbuf,
+                             arraysize(msgbuf), nullptr);
+  if (len) {
+    // Messages returned by system end with line breaks.
+    return base::CollapseWhitespaceASCII(msgbuf, true) +
+           base::StringPrintf(" (0x%lX)", error_code);
+  }
+  return base::StringPrintf("Error (0x%lX) while retrieving error. (0x%lX)",
+                            GetLastError(), error_code);
+#elif defined(OS_POSIX) || defined(OS_FUCHSIA)
+  return base::safe_strerror(error_code) +
+         base::StringPrintf(" (%d)", error_code);
+#endif  // defined(OS_WIN)
+}
+
+
+#if defined(OS_WIN)
+Win32ErrorLogMessage::Win32ErrorLogMessage(const char* file,
+                                           int line,
+                                           LogSeverity severity,
+                                           SystemErrorCode err)
+    : err_(err),
+      log_message_(file, line, severity) {
+}
+
+Win32ErrorLogMessage::~Win32ErrorLogMessage() {
+  stream() << ": " << SystemErrorCodeToString(err_);
+  // We're about to crash (CHECK). Put |err_| on the stack (by placing it in a
+  // field) and use Alias in hopes that it makes it into crash dumps.
+  DWORD last_error = err_;
+  base::debug::Alias(&last_error);
+}
+#elif defined(OS_POSIX) || defined(OS_FUCHSIA)
+ErrnoLogMessage::ErrnoLogMessage(const char* file,
+                                 int line,
+                                 LogSeverity severity,
+                                 SystemErrorCode err)
+    : err_(err),
+      log_message_(file, line, severity) {
+}
+
+ErrnoLogMessage::~ErrnoLogMessage() {
+  stream() << ": " << SystemErrorCodeToString(err_);
+  // We're about to crash (CHECK). Put |err_| on the stack (by placing it in a
+  // field) and use Alias in hopes that it makes it into crash dumps.
+  int last_error = err_;
+  base::debug::Alias(&last_error);
+}
+#endif  // defined(OS_WIN)
+
+void CloseLogFile() {
+#if defined(OS_POSIX) || defined(OS_FUCHSIA)
+  LoggingLock logging_lock;
+#endif
+  CloseLogFileUnlocked();
+}
+
+void RawLog(int level, const char* message) {
+  if (level >= g_min_log_level && message) {
+    size_t bytes_written = 0;
+    const size_t message_len = strlen(message);
+    int rv;
+    while (bytes_written < message_len) {
+      rv = HANDLE_EINTR(
+          write(STDERR_FILENO, message + bytes_written,
+                message_len - bytes_written));
+      if (rv < 0) {
+        // Give up, nothing we can do now.
+        break;
+      }
+      bytes_written += rv;
+    }
+
+    if (message_len > 0 && message[message_len - 1] != '\n') {
+      do {
+        rv = HANDLE_EINTR(write(STDERR_FILENO, "\n", 1));
+        if (rv < 0) {
+          // Give up, nothing we can do now.
+          break;
+        }
+      } while (rv != 1);
+    }
+  }
+
+  if (level == LOG_FATAL)
+    base::debug::BreakDebugger();
+}
+
+// This was defined at the beginning of this file.
+#undef write
+
+#if defined(OS_WIN)
+bool IsLoggingToFileEnabled() {
+  return g_logging_destination & LOG_TO_FILE;
+}
+
+std::wstring GetLogFileFullPath() {
+  if (g_log_file_name)
+    return *g_log_file_name;
+  return std::wstring();
+}
+#endif
+
+BASE_EXPORT void LogErrorNotReached(const char* file, int line) {
+  LogMessage(file, line, LOG_ERROR).stream()
+      << "NOTREACHED() hit.";
+}
+
+}  // namespace logging
+
+std::ostream& std::operator<<(std::ostream& out, const wchar_t* wstr) {
+  return out << (wstr ? base::WideToUTF8(wstr) : std::string());
+}
diff --git a/base/logging.h b/base/logging.h
new file mode 100644
index 0000000..2996059
--- /dev/null
+++ b/base/logging.h
@@ -0,0 +1,1180 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_LOGGING_H_
+#define BASE_LOGGING_H_
+
+#include <stddef.h>
+
+#include <cassert>
+#include <cstring>
+#include <sstream>
+#include <string>
+#include <type_traits>
+#include <utility>
+
+#include "base/base_export.h"
+#include "base/callback_forward.h"
+#include "base/compiler_specific.h"
+#include "base/debug/debugger.h"
+#include "base/macros.h"
+#include "base/strings/string_piece_forward.h"
+#include "base/template_util.h"
+#include "build/build_config.h"
+
+//
+// Optional message capabilities
+// -----------------------------
+// Assertion failed messages and fatal errors are displayed in a dialog box
+// before the application exits. However, running this UI creates a message
+// loop, which causes application messages to be processed and potentially
+// dispatched to existing application windows. Since the application is in a
+// bad state when this assertion dialog is displayed, these messages may not
+// get processed and hang the dialog, or the application might go crazy.
+//
+// Therefore, it can be beneficial to display the error dialog in a separate
+// process from the main application. When the logging system needs to display
+// a fatal error dialog box, it will look for a program called
+// "DebugMessage.exe" in the same directory as the application executable. It
+// will run this application with the message as the command line, and will
+// not include the name of the application as is traditional for easier
+// parsing.
+//
+// The code for DebugMessage.exe is only one line. In WinMain, do:
+//   MessageBox(NULL, GetCommandLineW(), L"Fatal Error", 0);
+//
+// If DebugMessage.exe is not found, the logging code will use a normal
+// MessageBox, potentially causing the problems discussed above.
+
+
+// Instructions
+// ------------
+//
+// Make a bunch of macros for logging.  The way to log things is to stream
+// things to LOG(<a particular severity level>).  E.g.,
+//
+//   LOG(INFO) << "Found " << num_cookies << " cookies";
+//
+// You can also do conditional logging:
+//
+//   LOG_IF(INFO, num_cookies > 10) << "Got lots of cookies";
+//
+// The CHECK(condition) macro is active in both debug and release builds and
+// effectively performs a LOG(FATAL) which terminates the process and
+// generates a crashdump unless a debugger is attached.
+//
+// There are also "debug mode" logging macros like the ones above:
+//
+//   DLOG(INFO) << "Found cookies";
+//
+//   DLOG_IF(INFO, num_cookies > 10) << "Got lots of cookies";
+//
+// All "debug mode" logging is compiled away to nothing for non-debug mode
+// compiles.  LOG_IF and development flags also work well together
+// because the code can be compiled away sometimes.
+//
+// We also have
+//
+//   LOG_ASSERT(assertion);
+//   DLOG_ASSERT(assertion);
+//
+// which is syntactic sugar for {,D}LOG_IF(FATAL, assert fails) << assertion;
+//
+// There are "verbose level" logging macros.  They look like
+//
+//   VLOG(1) << "I'm printed when you run the program with --v=1 or more";
+//   VLOG(2) << "I'm printed when you run the program with --v=2 or more";
+//
+// These always log at the INFO log level (when they log at all).
+// The verbose logging can also be turned on module-by-module.  For instance,
+//    --vmodule=profile=2,icon_loader=1,browser_*=3,*/chromeos/*=4 --v=0
+// will cause:
+//   a. VLOG(2) and lower messages to be printed from profile.{h,cc}
+//   b. VLOG(1) and lower messages to be printed from icon_loader.{h,cc}
+//   c. VLOG(3) and lower messages to be printed from files prefixed with
+//      "browser"
+//   d. VLOG(4) and lower messages to be printed from files under a
+//     "chromeos" directory.
+//   e. VLOG(0) and lower messages to be printed from elsewhere
+//
+// The wildcarding functionality shown by (c) supports both '*' (match
+// 0 or more characters) and '?' (match any single character)
+// wildcards.  Any pattern containing a forward or backward slash will
+// be tested against the whole pathname and not just the module.
+// E.g., "*/foo/bar/*=2" would change the logging level for all code
+// in source files under a "foo/bar" directory.
+//
+// There's also VLOG_IS_ON(n) "verbose level" condition macro. To be used as
+//
+//   if (VLOG_IS_ON(2)) {
+//     // do some logging preparation and logging
+//     // that can't be accomplished with just VLOG(2) << ...;
+//   }
+//
+// There is also a VLOG_IF "verbose level" condition macro for sample
+// cases, when some extra computation and preparation for logs is not
+// needed.
+//
+//   VLOG_IF(1, (size > 1024))
+//      << "I'm printed when size is more than 1024 and when you run the "
+//         "program with --v=1 or more";
+//
+// We also override the standard 'assert' to use 'DLOG_ASSERT'.
+//
+// Lastly, there is:
+//
+//   PLOG(ERROR) << "Couldn't do foo";
+//   DPLOG(ERROR) << "Couldn't do foo";
+//   PLOG_IF(ERROR, cond) << "Couldn't do foo";
+//   DPLOG_IF(ERROR, cond) << "Couldn't do foo";
+//   PCHECK(condition) << "Couldn't do foo";
+//   DPCHECK(condition) << "Couldn't do foo";
+//
+// which append the last system error to the message in string form (taken from
+// GetLastError() on Windows and errno on POSIX).
+//
+// The supported severity levels for macros that allow you to specify one
+// are (in increasing order of severity) INFO, WARNING, ERROR, and FATAL.
+//
+// Very important: logging a message at the FATAL severity level causes
+// the program to terminate (after the message is logged).
+//
+// There is the special severity of DFATAL, which logs FATAL in debug mode,
+// ERROR in normal mode.
+
+namespace logging {
+
+// TODO(avi): do we want to do a unification of character types here?
+#if defined(OS_WIN)
+typedef wchar_t PathChar;
+#elif defined(OS_POSIX) || defined(OS_FUCHSIA)
+typedef char PathChar;
+#endif
+
+// Where to record logging output? A flat file and/or system debug log
+// via OutputDebugString.
+enum LoggingDestination {
+  LOG_NONE                = 0,
+  LOG_TO_FILE             = 1 << 0,
+  LOG_TO_SYSTEM_DEBUG_LOG = 1 << 1,
+
+  LOG_TO_ALL = LOG_TO_FILE | LOG_TO_SYSTEM_DEBUG_LOG,
+
+  // On Windows, use a file next to the exe; on POSIX platforms, where
+  // it may not even be possible to locate the executable on disk, use
+  // stderr.
+#if defined(OS_WIN)
+  LOG_DEFAULT = LOG_TO_FILE,
+#elif defined(OS_POSIX) || defined(OS_FUCHSIA)
+  LOG_DEFAULT = LOG_TO_SYSTEM_DEBUG_LOG,
+#endif
+};
+
+// Indicates that the log file should be locked when being written to.
+// Unless there is only one single-threaded process that is logging to
+// the log file, the file should be locked during writes to make each
+// log output atomic. Other writers will block.
+//
+// All processes writing to the log file must have their locking set for it to
+// work properly. Defaults to LOCK_LOG_FILE.
+enum LogLockingState { LOCK_LOG_FILE, DONT_LOCK_LOG_FILE };
+
+// On startup, should we delete or append to an existing log file (if any)?
+// Defaults to APPEND_TO_OLD_LOG_FILE.
+enum OldFileDeletionState { DELETE_OLD_LOG_FILE, APPEND_TO_OLD_LOG_FILE };
+
+struct BASE_EXPORT LoggingSettings {
+  // The defaults values are:
+  //
+  //  logging_dest: LOG_DEFAULT
+  //  log_file:     NULL
+  //  lock_log:     LOCK_LOG_FILE
+  //  delete_old:   APPEND_TO_OLD_LOG_FILE
+  LoggingSettings();
+
+  LoggingDestination logging_dest;
+
+  // The three settings below have an effect only when LOG_TO_FILE is
+  // set in |logging_dest|.
+  const PathChar* log_file;
+  LogLockingState lock_log;
+  OldFileDeletionState delete_old;
+};
+
+// Define different names for the BaseInitLoggingImpl() function depending on
+// whether NDEBUG is defined or not so that we'll fail to link if someone tries
+// to compile logging.cc with NDEBUG but includes logging.h without defining it,
+// or vice versa.
+#if defined(NDEBUG)
+#define BaseInitLoggingImpl BaseInitLoggingImpl_built_with_NDEBUG
+#else
+#define BaseInitLoggingImpl BaseInitLoggingImpl_built_without_NDEBUG
+#endif
+
+// Implementation of the InitLogging() method declared below.  We use a
+// more-specific name so we can #define it above without affecting other code
+// that has named stuff "InitLogging".
+BASE_EXPORT bool BaseInitLoggingImpl(const LoggingSettings& settings);
+
+// Sets the log file name and other global logging state. Calling this function
+// is recommended, and is normally done at the beginning of application init.
+// If you don't call it, all the flags will be initialized to their default
+// values, and there is a race condition that may leak a critical section
+// object if two threads try to do the first log at the same time.
+// See the definition of the enums above for descriptions and default values.
+//
+// The default log file is initialized to "debug.log" in the application
+// directory. You probably don't want this, especially since the program
+// directory may not be writable on an enduser's system.
+//
+// This function may be called a second time to re-direct logging (e.g after
+// loging in to a user partition), however it should never be called more than
+// twice.
+inline bool InitLogging(const LoggingSettings& settings) {
+  return BaseInitLoggingImpl(settings);
+}
+
+// Sets the log level. Anything at or above this level will be written to the
+// log file/displayed to the user (if applicable). Anything below this level
+// will be silently ignored. The log level defaults to 0 (everything is logged
+// up to level INFO) if this function is not called.
+// Note that log messages for VLOG(x) are logged at level -x, so setting
+// the min log level to negative values enables verbose logging.
+BASE_EXPORT void SetMinLogLevel(int level);
+
+// Gets the current log level.
+BASE_EXPORT int GetMinLogLevel();
+
+// Used by LOG_IS_ON to lazy-evaluate stream arguments.
+BASE_EXPORT bool ShouldCreateLogMessage(int severity);
+
+// Gets the VLOG default verbosity level.
+BASE_EXPORT int GetVlogVerbosity();
+
+// Note that |N| is the size *with* the null terminator.
+BASE_EXPORT int GetVlogLevelHelper(const char* file_start, size_t N);
+
+// Gets the current vlog level for the given file (usually taken from __FILE__).
+template <size_t N>
+int GetVlogLevel(const char (&file)[N]) {
+  return GetVlogLevelHelper(file, N);
+}
+
+// Sets the common items you want to be prepended to each log message.
+// process and thread IDs default to off, the timestamp defaults to on.
+// If this function is not called, logging defaults to writing the timestamp
+// only.
+BASE_EXPORT void SetLogItems(bool enable_process_id, bool enable_thread_id,
+                             bool enable_timestamp, bool enable_tickcount);
+
+// Sets whether or not you'd like to see fatal debug messages popped up in
+// a dialog box or not.
+// Dialogs are not shown by default.
+BASE_EXPORT void SetShowErrorDialogs(bool enable_dialogs);
+
+// Sets the Log Assert Handler that will be used to notify of check failures.
+// Resets Log Assert Handler on object destruction.
+// The default handler shows a dialog box and then terminate the process,
+// however clients can use this function to override with their own handling
+// (e.g. a silent one for Unit Tests)
+using LogAssertHandlerFunction =
+    base::Callback<void(const char* file,
+                        int line,
+                        const base::StringPiece message,
+                        const base::StringPiece stack_trace)>;
+
+class BASE_EXPORT ScopedLogAssertHandler {
+ public:
+  explicit ScopedLogAssertHandler(LogAssertHandlerFunction handler);
+  ~ScopedLogAssertHandler();
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(ScopedLogAssertHandler);
+};
+
+// Sets the Log Message Handler that gets passed every log message before
+// it's sent to other log destinations (if any).
+// Returns true to signal that it handled the message and the message
+// should not be sent to other log destinations.
+typedef bool (*LogMessageHandlerFunction)(int severity,
+    const char* file, int line, size_t message_start, const std::string& str);
+BASE_EXPORT void SetLogMessageHandler(LogMessageHandlerFunction handler);
+BASE_EXPORT LogMessageHandlerFunction GetLogMessageHandler();
+
+// The ANALYZER_ASSUME_TRUE(bool arg) macro adds compiler-specific hints
+// to Clang which control what code paths are statically analyzed,
+// and is meant to be used in conjunction with assert & assert-like functions.
+// The expression is passed straight through if analysis isn't enabled.
+//
+// ANALYZER_SKIP_THIS_PATH() suppresses static analysis for the current
+// codepath and any other branching codepaths that might follow.
+#if defined(__clang_analyzer__)
+
+inline constexpr bool AnalyzerNoReturn() __attribute__((analyzer_noreturn)) {
+  return false;
+}
+
+inline constexpr bool AnalyzerAssumeTrue(bool arg) {
+  // AnalyzerNoReturn() is invoked and analysis is terminated if |arg| is
+  // false.
+  return arg || AnalyzerNoReturn();
+}
+
+#define ANALYZER_ASSUME_TRUE(arg) logging::AnalyzerAssumeTrue(!!(arg))
+#define ANALYZER_SKIP_THIS_PATH() \
+  static_cast<void>(::logging::AnalyzerNoReturn())
+#define ANALYZER_ALLOW_UNUSED(var) static_cast<void>(var);
+
+#else  // !defined(__clang_analyzer__)
+
+#define ANALYZER_ASSUME_TRUE(arg) (arg)
+#define ANALYZER_SKIP_THIS_PATH()
+#define ANALYZER_ALLOW_UNUSED(var) static_cast<void>(var);
+
+#endif  // defined(__clang_analyzer__)
+
+typedef int LogSeverity;
+const LogSeverity LOG_VERBOSE = -1;  // This is level 1 verbosity
+// Note: the log severities are used to index into the array of names,
+// see log_severity_names.
+const LogSeverity LOG_INFO = 0;
+const LogSeverity LOG_WARNING = 1;
+const LogSeverity LOG_ERROR = 2;
+const LogSeverity LOG_FATAL = 3;
+const LogSeverity LOG_NUM_SEVERITIES = 4;
+
+// LOG_DFATAL is LOG_FATAL in debug mode, ERROR in normal mode
+#if defined(NDEBUG)
+const LogSeverity LOG_DFATAL = LOG_ERROR;
+#else
+const LogSeverity LOG_DFATAL = LOG_FATAL;
+#endif
+
+// A few definitions of macros that don't generate much code. These are used
+// by LOG() and LOG_IF, etc. Since these are used all over our code, it's
+// better to have compact code for these operations.
+#define COMPACT_GOOGLE_LOG_EX_INFO(ClassName, ...) \
+  ::logging::ClassName(__FILE__, __LINE__, ::logging::LOG_INFO, ##__VA_ARGS__)
+#define COMPACT_GOOGLE_LOG_EX_WARNING(ClassName, ...)              \
+  ::logging::ClassName(__FILE__, __LINE__, ::logging::LOG_WARNING, \
+                       ##__VA_ARGS__)
+#define COMPACT_GOOGLE_LOG_EX_ERROR(ClassName, ...) \
+  ::logging::ClassName(__FILE__, __LINE__, ::logging::LOG_ERROR, ##__VA_ARGS__)
+#define COMPACT_GOOGLE_LOG_EX_FATAL(ClassName, ...) \
+  ::logging::ClassName(__FILE__, __LINE__, ::logging::LOG_FATAL, ##__VA_ARGS__)
+#define COMPACT_GOOGLE_LOG_EX_DFATAL(ClassName, ...) \
+  ::logging::ClassName(__FILE__, __LINE__, ::logging::LOG_DFATAL, ##__VA_ARGS__)
+#define COMPACT_GOOGLE_LOG_EX_DCHECK(ClassName, ...) \
+  ::logging::ClassName(__FILE__, __LINE__, ::logging::LOG_DCHECK, ##__VA_ARGS__)
+
+#define COMPACT_GOOGLE_LOG_INFO COMPACT_GOOGLE_LOG_EX_INFO(LogMessage)
+#define COMPACT_GOOGLE_LOG_WARNING COMPACT_GOOGLE_LOG_EX_WARNING(LogMessage)
+#define COMPACT_GOOGLE_LOG_ERROR COMPACT_GOOGLE_LOG_EX_ERROR(LogMessage)
+#define COMPACT_GOOGLE_LOG_FATAL COMPACT_GOOGLE_LOG_EX_FATAL(LogMessage)
+#define COMPACT_GOOGLE_LOG_DFATAL COMPACT_GOOGLE_LOG_EX_DFATAL(LogMessage)
+#define COMPACT_GOOGLE_LOG_DCHECK COMPACT_GOOGLE_LOG_EX_DCHECK(LogMessage)
+
+#if defined(OS_WIN)
+// wingdi.h defines ERROR to be 0. When we call LOG(ERROR), it gets
+// substituted with 0, and it expands to COMPACT_GOOGLE_LOG_0. To allow us
+// to keep using this syntax, we define this macro to do the same thing
+// as COMPACT_GOOGLE_LOG_ERROR, and also define ERROR the same way that
+// the Windows SDK does for consistency.
+#define ERROR 0
+#define COMPACT_GOOGLE_LOG_EX_0(ClassName, ...) \
+  COMPACT_GOOGLE_LOG_EX_ERROR(ClassName , ##__VA_ARGS__)
+#define COMPACT_GOOGLE_LOG_0 COMPACT_GOOGLE_LOG_ERROR
+// Needed for LOG_IS_ON(ERROR).
+const LogSeverity LOG_0 = LOG_ERROR;
+#endif
+
+// As special cases, we can assume that LOG_IS_ON(FATAL) always holds. Also,
+// LOG_IS_ON(DFATAL) always holds in debug mode. In particular, CHECK()s will
+// always fire if they fail.
+#define LOG_IS_ON(severity) \
+  (::logging::ShouldCreateLogMessage(::logging::LOG_##severity))
+
+// We can't do any caching tricks with VLOG_IS_ON() like the
+// google-glog version since it requires GCC extensions.  This means
+// that using the v-logging functions in conjunction with --vmodule
+// may be slow.
+#define VLOG_IS_ON(verboselevel) \
+  ((verboselevel) <= ::logging::GetVlogLevel(__FILE__))
+
+// Helper macro which avoids evaluating the arguments to a stream if
+// the condition doesn't hold. Condition is evaluated once and only once.
+#define LAZY_STREAM(stream, condition)                                  \
+  !(condition) ? (void) 0 : ::logging::LogMessageVoidify() & (stream)
+
+// We use the preprocessor's merging operator, "##", so that, e.g.,
+// LOG(INFO) becomes the token COMPACT_GOOGLE_LOG_INFO.  There's some funny
+// subtle difference between ostream member streaming functions (e.g.,
+// ostream::operator<<(int) and ostream non-member streaming functions
+// (e.g., ::operator<<(ostream&, string&): it turns out that it's
+// impossible to stream something like a string directly to an unnamed
+// ostream. We employ a neat hack by calling the stream() member
+// function of LogMessage which seems to avoid the problem.
+#define LOG_STREAM(severity) COMPACT_GOOGLE_LOG_ ## severity.stream()
+
+#define LOG(severity) LAZY_STREAM(LOG_STREAM(severity), LOG_IS_ON(severity))
+#define LOG_IF(severity, condition) \
+  LAZY_STREAM(LOG_STREAM(severity), LOG_IS_ON(severity) && (condition))
+
+// The VLOG macros log with negative verbosities.
+#define VLOG_STREAM(verbose_level) \
+  ::logging::LogMessage(__FILE__, __LINE__, -verbose_level).stream()
+
+#define VLOG(verbose_level) \
+  LAZY_STREAM(VLOG_STREAM(verbose_level), VLOG_IS_ON(verbose_level))
+
+#define VLOG_IF(verbose_level, condition) \
+  LAZY_STREAM(VLOG_STREAM(verbose_level), \
+      VLOG_IS_ON(verbose_level) && (condition))
+
+#if defined (OS_WIN)
+#define VPLOG_STREAM(verbose_level) \
+  ::logging::Win32ErrorLogMessage(__FILE__, __LINE__, -verbose_level, \
+    ::logging::GetLastSystemErrorCode()).stream()
+#elif defined(OS_POSIX) || defined(OS_FUCHSIA)
+#define VPLOG_STREAM(verbose_level) \
+  ::logging::ErrnoLogMessage(__FILE__, __LINE__, -verbose_level, \
+    ::logging::GetLastSystemErrorCode()).stream()
+#endif
+
+#define VPLOG(verbose_level) \
+  LAZY_STREAM(VPLOG_STREAM(verbose_level), VLOG_IS_ON(verbose_level))
+
+#define VPLOG_IF(verbose_level, condition) \
+  LAZY_STREAM(VPLOG_STREAM(verbose_level), \
+    VLOG_IS_ON(verbose_level) && (condition))
+
+// TODO(akalin): Add more VLOG variants, e.g. VPLOG.
+
+#define LOG_ASSERT(condition)                       \
+  LOG_IF(FATAL, !(ANALYZER_ASSUME_TRUE(condition))) \
+      << "Assert failed: " #condition ". "
+
+#if defined(OS_WIN)
+#define PLOG_STREAM(severity) \
+  COMPACT_GOOGLE_LOG_EX_ ## severity(Win32ErrorLogMessage, \
+      ::logging::GetLastSystemErrorCode()).stream()
+#elif defined(OS_POSIX) || defined(OS_FUCHSIA)
+#define PLOG_STREAM(severity) \
+  COMPACT_GOOGLE_LOG_EX_ ## severity(ErrnoLogMessage, \
+      ::logging::GetLastSystemErrorCode()).stream()
+#endif
+
+#define PLOG(severity)                                          \
+  LAZY_STREAM(PLOG_STREAM(severity), LOG_IS_ON(severity))
+
+#define PLOG_IF(severity, condition) \
+  LAZY_STREAM(PLOG_STREAM(severity), LOG_IS_ON(severity) && (condition))
+
+BASE_EXPORT extern std::ostream* g_swallow_stream;
+
+// Note that g_swallow_stream is used instead of an arbitrary LOG() stream to
+// avoid the creation of an object with a non-trivial destructor (LogMessage).
+// On MSVC x86 (checked on 2015 Update 3), this causes a few additional
+// pointless instructions to be emitted even at full optimization level, even
+// though the : arm of the ternary operator is clearly never executed. Using a
+// simpler object to be &'d with Voidify() avoids these extra instructions.
+// Using a simpler POD object with a templated operator<< also works to avoid
+// these instructions. However, this causes warnings on statically defined
+// implementations of operator<<(std::ostream, ...) in some .cc files, because
+// they become defined-but-unreferenced functions. A reinterpret_cast of 0 to an
+// ostream* also is not suitable, because some compilers warn of undefined
+// behavior.
+#define EAT_STREAM_PARAMETERS \
+  true ? (void)0              \
+       : ::logging::LogMessageVoidify() & (*::logging::g_swallow_stream)
+
+// Captures the result of a CHECK_EQ (for example) and facilitates testing as a
+// boolean.
+class CheckOpResult {
+ public:
+  // |message| must be non-null if and only if the check failed.
+  CheckOpResult(std::string* message) : message_(message) {}
+  // Returns true if the check succeeded.
+  operator bool() const { return !message_; }
+  // Returns the message.
+  std::string* message() { return message_; }
+
+ private:
+  std::string* message_;
+};
+
+// Crashes in the fastest possible way with no attempt at logging.
+// There are different constraints to satisfy here, see http://crbug.com/664209
+// for more context:
+// - The trap instructions, and hence the PC value at crash time, have to be
+//   distinct and not get folded into the same opcode by the compiler.
+//   On Linux/Android this is tricky because GCC still folds identical
+//   asm volatile blocks. The workaround is generating distinct opcodes for
+//   each CHECK using the __COUNTER__ macro.
+// - The debug info for the trap instruction has to be attributed to the source
+//   line that has the CHECK(), to make crash reports actionable. This rules
+//   out the ability of using a inline function, at least as long as clang
+//   doesn't support attribute(artificial).
+// - Failed CHECKs should produce a signal that is distinguishable from an
+//   invalid memory access, to improve the actionability of crash reports.
+// - The compiler should treat the CHECK as no-return instructions, so that the
+//   trap code can be efficiently packed in the prologue of the function and
+//   doesn't interfere with the main execution flow.
+// - When debugging, developers shouldn't be able to accidentally step over a
+//   CHECK. This is achieved by putting opcodes that will cause a non
+//   continuable exception after the actual trap instruction.
+// - Don't cause too much binary bloat.
+#if defined(COMPILER_GCC)
+
+#if defined(ARCH_CPU_X86_FAMILY) && !defined(OS_NACL)
+// int 3 will generate a SIGTRAP.
+#define TRAP_SEQUENCE() \
+  asm volatile(         \
+      "int3; ud2; push %0;" ::"i"(static_cast<unsigned char>(__COUNTER__)))
+
+#elif defined(ARCH_CPU_ARMEL) && !defined(OS_NACL)
+// bkpt will generate a SIGBUS when running on armv7 and a SIGTRAP when running
+// as a 32 bit userspace app on arm64. There doesn't seem to be any way to
+// cause a SIGTRAP from userspace without using a syscall (which would be a
+// problem for sandboxing).
+#define TRAP_SEQUENCE() \
+  asm volatile("bkpt #0; udf %0;" ::"i"(__COUNTER__ % 256))
+
+#elif defined(ARCH_CPU_ARM64) && !defined(OS_NACL)
+// This will always generate a SIGTRAP on arm64.
+#define TRAP_SEQUENCE() \
+  asm volatile("brk #0; hlt %0;" ::"i"(__COUNTER__ % 65536))
+
+#else
+// Crash report accuracy will not be guaranteed on other architectures, but at
+// least this will crash as expected.
+#define TRAP_SEQUENCE() __builtin_trap()
+#endif  // ARCH_CPU_*
+
+// CHECK() and the trap sequence can be invoked from a constexpr function.
+// This could make compilation fail on GCC, as it forbids directly using inline
+// asm inside a constexpr function. However, it allows calling a lambda
+// expression including the same asm.
+// The side effect is that the top of the stacktrace will not point to the
+// calling function, but to this anonymous lambda. This is still useful as the
+// full name of the lambda will typically include the name of the function that
+// calls CHECK() and the debugger will still break at the right line of code.
+#if !defined(__clang__)
+#define WRAPPED_TRAP_SEQUENCE() \
+  do {                          \
+    [] { TRAP_SEQUENCE(); }();  \
+  } while (false)
+#else
+#define WRAPPED_TRAP_SEQUENCE() TRAP_SEQUENCE()
+#endif
+
+#define IMMEDIATE_CRASH()    \
+  ({                         \
+    WRAPPED_TRAP_SEQUENCE(); \
+    __builtin_unreachable(); \
+  })
+
+#elif defined(COMPILER_MSVC)
+
+// Clang is cleverer about coalescing int3s, so we need to add a unique-ish
+// instruction following the __debugbreak() to have it emit distinct locations
+// for CHECKs rather than collapsing them all together. It would be nice to use
+// a short intrinsic to do this (and perhaps have only one implementation for
+// both clang and MSVC), however clang-cl currently does not support intrinsics.
+// On the flip side, MSVC x64 doesn't support inline asm. So, we have to have
+// two implementations. Normally clang-cl's version will be 5 bytes (1 for
+// `int3`, 2 for `ud2`, 2 for `push byte imm`, however, TODO(scottmg):
+// https://crbug.com/694670 clang-cl doesn't currently support %'ing
+// __COUNTER__, so eventually it will emit the dword form of push.
+// TODO(scottmg): Reinvestigate a short sequence that will work on both
+// compilers once clang supports more intrinsics. See https://crbug.com/693713.
+#if defined(__clang__)
+#define IMMEDIATE_CRASH()                           \
+  ({                                                \
+    {__asm int 3 __asm ud2 __asm push __COUNTER__}; \
+    __builtin_unreachable();                        \
+  })
+#else
+#define IMMEDIATE_CRASH() __debugbreak()
+#endif  // __clang__
+
+#else
+#error Port
+#endif
+
+// CHECK dies with a fatal error if condition is not true.  It is *not*
+// controlled by NDEBUG, so the check will be executed regardless of
+// compilation mode.
+//
+// We make sure CHECK et al. always evaluates their arguments, as
+// doing CHECK(FunctionWithSideEffect()) is a common idiom.
+
+#if defined(OFFICIAL_BUILD) && defined(NDEBUG)
+
+// Make all CHECK functions discard their log strings to reduce code bloat, and
+// improve performance, for official release builds.
+//
+// This is not calling BreakDebugger since this is called frequently, and
+// calling an out-of-line function instead of a noreturn inline macro prevents
+// compiler optimizations.
+#define CHECK(condition) \
+  UNLIKELY(!(condition)) ? IMMEDIATE_CRASH() : EAT_STREAM_PARAMETERS
+
+// PCHECK includes the system error code, which is useful for determining
+// why the condition failed. In official builds, preserve only the error code
+// message so that it is available in crash reports. The stringified
+// condition and any additional stream parameters are dropped.
+#define PCHECK(condition)                                  \
+  LAZY_STREAM(PLOG_STREAM(FATAL), UNLIKELY(!(condition))); \
+  EAT_STREAM_PARAMETERS
+
+#define CHECK_OP(name, op, val1, val2) CHECK((val1) op (val2))
+
+#else  // !(OFFICIAL_BUILD && NDEBUG)
+
+#if defined(_PREFAST_) && defined(OS_WIN)
+// Use __analysis_assume to tell the VC++ static analysis engine that
+// assert conditions are true, to suppress warnings.  The LAZY_STREAM
+// parameter doesn't reference 'condition' in /analyze builds because
+// this evaluation confuses /analyze. The !! before condition is because
+// __analysis_assume gets confused on some conditions:
+// http://randomascii.wordpress.com/2011/09/13/analyze-for-visual-studio-the-ugly-part-5/
+
+#define CHECK(condition)                    \
+  __analysis_assume(!!(condition)),         \
+      LAZY_STREAM(LOG_STREAM(FATAL), false) \
+          << "Check failed: " #condition ". "
+
+#define PCHECK(condition)                    \
+  __analysis_assume(!!(condition)),          \
+      LAZY_STREAM(PLOG_STREAM(FATAL), false) \
+          << "Check failed: " #condition ". "
+
+#else  // _PREFAST_
+
+// Do as much work as possible out of line to reduce inline code size.
+#define CHECK(condition)                                                      \
+  LAZY_STREAM(::logging::LogMessage(__FILE__, __LINE__, #condition).stream(), \
+              !ANALYZER_ASSUME_TRUE(condition))
+
+#define PCHECK(condition)                                           \
+  LAZY_STREAM(PLOG_STREAM(FATAL), !ANALYZER_ASSUME_TRUE(condition)) \
+      << "Check failed: " #condition ". "
+
+#endif  // _PREFAST_
+
+// Helper macro for binary operators.
+// Don't use this macro directly in your code, use CHECK_EQ et al below.
+// The 'switch' is used to prevent the 'else' from being ambiguous when the
+// macro is used in an 'if' clause such as:
+// if (a == 1)
+//   CHECK_EQ(2, a);
+#define CHECK_OP(name, op, val1, val2)                                         \
+  switch (0) case 0: default:                                                  \
+  if (::logging::CheckOpResult true_if_passed =                                \
+      ::logging::Check##name##Impl((val1), (val2),                             \
+                                   #val1 " " #op " " #val2))                   \
+   ;                                                                           \
+  else                                                                         \
+    ::logging::LogMessage(__FILE__, __LINE__, true_if_passed.message()).stream()
+
+#endif  // !(OFFICIAL_BUILD && NDEBUG)
+
+// This formats a value for a failing CHECK_XX statement.  Ordinarily,
+// it uses the definition for operator<<, with a few special cases below.
+template <typename T>
+inline typename std::enable_if<
+    base::internal::SupportsOstreamOperator<const T&>::value &&
+        !std::is_function<typename std::remove_pointer<T>::type>::value,
+    void>::type
+MakeCheckOpValueString(std::ostream* os, const T& v) {
+  (*os) << v;
+}
+
+// Provide an overload for functions and function pointers. Function pointers
+// don't implicitly convert to void* but do implicitly convert to bool, so
+// without this function pointers are always printed as 1 or 0. (MSVC isn't
+// standards-conforming here and converts function pointers to regular
+// pointers, so this is a no-op for MSVC.)
+template <typename T>
+inline typename std::enable_if<
+    std::is_function<typename std::remove_pointer<T>::type>::value,
+    void>::type
+MakeCheckOpValueString(std::ostream* os, const T& v) {
+  (*os) << reinterpret_cast<const void*>(v);
+}
+
+// We need overloads for enums that don't support operator<<.
+// (i.e. scoped enums where no operator<< overload was declared).
+template <typename T>
+inline typename std::enable_if<
+    !base::internal::SupportsOstreamOperator<const T&>::value &&
+        std::is_enum<T>::value,
+    void>::type
+MakeCheckOpValueString(std::ostream* os, const T& v) {
+  (*os) << static_cast<typename std::underlying_type<T>::type>(v);
+}
+
+// We need an explicit overload for std::nullptr_t.
+BASE_EXPORT void MakeCheckOpValueString(std::ostream* os, std::nullptr_t p);
+
+// Build the error message string.  This is separate from the "Impl"
+// function template because it is not performance critical and so can
+// be out of line, while the "Impl" code should be inline.  Caller
+// takes ownership of the returned string.
+template<class t1, class t2>
+std::string* MakeCheckOpString(const t1& v1, const t2& v2, const char* names) {
+  std::ostringstream ss;
+  ss << names << " (";
+  MakeCheckOpValueString(&ss, v1);
+  ss << " vs. ";
+  MakeCheckOpValueString(&ss, v2);
+  ss << ")";
+  std::string* msg = new std::string(ss.str());
+  return msg;
+}
+
+// Commonly used instantiations of MakeCheckOpString<>. Explicitly instantiated
+// in logging.cc.
+extern template BASE_EXPORT std::string* MakeCheckOpString<int, int>(
+    const int&, const int&, const char* names);
+extern template BASE_EXPORT
+std::string* MakeCheckOpString<unsigned long, unsigned long>(
+    const unsigned long&, const unsigned long&, const char* names);
+extern template BASE_EXPORT
+std::string* MakeCheckOpString<unsigned long, unsigned int>(
+    const unsigned long&, const unsigned int&, const char* names);
+extern template BASE_EXPORT
+std::string* MakeCheckOpString<unsigned int, unsigned long>(
+    const unsigned int&, const unsigned long&, const char* names);
+extern template BASE_EXPORT
+std::string* MakeCheckOpString<std::string, std::string>(
+    const std::string&, const std::string&, const char* name);
+
+// Helper functions for CHECK_OP macro.
+// The (int, int) specialization works around the issue that the compiler
+// will not instantiate the template version of the function on values of
+// unnamed enum type - see comment below.
+//
+// The checked condition is wrapped with ANALYZER_ASSUME_TRUE, which under
+// static analysis builds, blocks analysis of the current path if the
+// condition is false.
+#define DEFINE_CHECK_OP_IMPL(name, op)                                       \
+  template <class t1, class t2>                                              \
+  inline std::string* Check##name##Impl(const t1& v1, const t2& v2,          \
+                                        const char* names) {                 \
+    if (ANALYZER_ASSUME_TRUE(v1 op v2))                                      \
+      return NULL;                                                           \
+    else                                                                     \
+      return ::logging::MakeCheckOpString(v1, v2, names);                    \
+  }                                                                          \
+  inline std::string* Check##name##Impl(int v1, int v2, const char* names) { \
+    if (ANALYZER_ASSUME_TRUE(v1 op v2))                                      \
+      return NULL;                                                           \
+    else                                                                     \
+      return ::logging::MakeCheckOpString(v1, v2, names);                    \
+  }
+DEFINE_CHECK_OP_IMPL(EQ, ==)
+DEFINE_CHECK_OP_IMPL(NE, !=)
+DEFINE_CHECK_OP_IMPL(LE, <=)
+DEFINE_CHECK_OP_IMPL(LT, < )
+DEFINE_CHECK_OP_IMPL(GE, >=)
+DEFINE_CHECK_OP_IMPL(GT, > )
+#undef DEFINE_CHECK_OP_IMPL
+
+#define CHECK_EQ(val1, val2) CHECK_OP(EQ, ==, val1, val2)
+#define CHECK_NE(val1, val2) CHECK_OP(NE, !=, val1, val2)
+#define CHECK_LE(val1, val2) CHECK_OP(LE, <=, val1, val2)
+#define CHECK_LT(val1, val2) CHECK_OP(LT, < , val1, val2)
+#define CHECK_GE(val1, val2) CHECK_OP(GE, >=, val1, val2)
+#define CHECK_GT(val1, val2) CHECK_OP(GT, > , val1, val2)
+
+#if defined(NDEBUG) && !defined(DCHECK_ALWAYS_ON)
+#define DCHECK_IS_ON() 0
+#else
+#define DCHECK_IS_ON() 1
+#endif
+
+// Definitions for DLOG et al.
+
+#if DCHECK_IS_ON()
+
+#define DLOG_IS_ON(severity) LOG_IS_ON(severity)
+#define DLOG_IF(severity, condition) LOG_IF(severity, condition)
+#define DLOG_ASSERT(condition) LOG_ASSERT(condition)
+#define DPLOG_IF(severity, condition) PLOG_IF(severity, condition)
+#define DVLOG_IF(verboselevel, condition) VLOG_IF(verboselevel, condition)
+#define DVPLOG_IF(verboselevel, condition) VPLOG_IF(verboselevel, condition)
+
+#else  // DCHECK_IS_ON()
+
+// If !DCHECK_IS_ON(), we want to avoid emitting any references to |condition|
+// (which may reference a variable defined only if DCHECK_IS_ON()).
+// Contrast this with DCHECK et al., which has different behavior.
+
+#define DLOG_IS_ON(severity) false
+#define DLOG_IF(severity, condition) EAT_STREAM_PARAMETERS
+#define DLOG_ASSERT(condition) EAT_STREAM_PARAMETERS
+#define DPLOG_IF(severity, condition) EAT_STREAM_PARAMETERS
+#define DVLOG_IF(verboselevel, condition) EAT_STREAM_PARAMETERS
+#define DVPLOG_IF(verboselevel, condition) EAT_STREAM_PARAMETERS
+
+#endif  // DCHECK_IS_ON()
+
+#define DLOG(severity)                                          \
+  LAZY_STREAM(LOG_STREAM(severity), DLOG_IS_ON(severity))
+
+#define DPLOG(severity)                                         \
+  LAZY_STREAM(PLOG_STREAM(severity), DLOG_IS_ON(severity))
+
+#define DVLOG(verboselevel) DVLOG_IF(verboselevel, VLOG_IS_ON(verboselevel))
+
+#define DVPLOG(verboselevel) DVPLOG_IF(verboselevel, VLOG_IS_ON(verboselevel))
+
+// Definitions for DCHECK et al.
+
+#if DCHECK_IS_ON()
+
+#if DCHECK_IS_CONFIGURABLE
+BASE_EXPORT extern LogSeverity LOG_DCHECK;
+#else
+const LogSeverity LOG_DCHECK = LOG_FATAL;
+#endif
+
+#else  // DCHECK_IS_ON()
+
+// There may be users of LOG_DCHECK that are enabled independently
+// of DCHECK_IS_ON(), so default to FATAL logging for those.
+const LogSeverity LOG_DCHECK = LOG_FATAL;
+
+#endif  // DCHECK_IS_ON()
+
+// DCHECK et al. make sure to reference |condition| regardless of
+// whether DCHECKs are enabled; this is so that we don't get unused
+// variable warnings if the only use of a variable is in a DCHECK.
+// This behavior is different from DLOG_IF et al.
+//
+// Note that the definition of the DCHECK macros depends on whether or not
+// DCHECK_IS_ON() is true. When DCHECK_IS_ON() is false, the macros use
+// EAT_STREAM_PARAMETERS to avoid expressions that would create temporaries.
+
+#if defined(_PREFAST_) && defined(OS_WIN)
+// See comments on the previous use of __analysis_assume.
+
+#define DCHECK(condition)                    \
+  __analysis_assume(!!(condition)),          \
+      LAZY_STREAM(LOG_STREAM(DCHECK), false) \
+          << "Check failed: " #condition ". "
+
+#define DPCHECK(condition)                    \
+  __analysis_assume(!!(condition)),           \
+      LAZY_STREAM(PLOG_STREAM(DCHECK), false) \
+          << "Check failed: " #condition ". "
+
+#else  // !(defined(_PREFAST_) && defined(OS_WIN))
+
+#if DCHECK_IS_ON()
+
+#define DCHECK(condition)                                           \
+  LAZY_STREAM(LOG_STREAM(DCHECK), !ANALYZER_ASSUME_TRUE(condition)) \
+      << "Check failed: " #condition ". "
+#define DPCHECK(condition)                                           \
+  LAZY_STREAM(PLOG_STREAM(DCHECK), !ANALYZER_ASSUME_TRUE(condition)) \
+      << "Check failed: " #condition ". "
+
+#else  // DCHECK_IS_ON()
+
+#define DCHECK(condition) EAT_STREAM_PARAMETERS << !(condition)
+#define DPCHECK(condition) EAT_STREAM_PARAMETERS << !(condition)
+
+#endif  // DCHECK_IS_ON()
+
+#endif  // defined(_PREFAST_) && defined(OS_WIN)
+
+// Helper macro for binary operators.
+// Don't use this macro directly in your code, use DCHECK_EQ et al below.
+// The 'switch' is used to prevent the 'else' from being ambiguous when the
+// macro is used in an 'if' clause such as:
+// if (a == 1)
+//   DCHECK_EQ(2, a);
+#if DCHECK_IS_ON()
+
+#define DCHECK_OP(name, op, val1, val2)                                \
+  switch (0) case 0: default:                                          \
+  if (::logging::CheckOpResult true_if_passed =                        \
+      DCHECK_IS_ON() ?                                                 \
+      ::logging::Check##name##Impl((val1), (val2),                     \
+                                   #val1 " " #op " " #val2) : nullptr) \
+   ;                                                                   \
+  else                                                                 \
+    ::logging::LogMessage(__FILE__, __LINE__, ::logging::LOG_DCHECK,   \
+                          true_if_passed.message()).stream()
+
+#else  // DCHECK_IS_ON()
+
+// When DCHECKs aren't enabled, DCHECK_OP still needs to reference operator<<
+// overloads for |val1| and |val2| to avoid potential compiler warnings about
+// unused functions. For the same reason, it also compares |val1| and |val2|
+// using |op|.
+//
+// Note that the contract of DCHECK_EQ, etc is that arguments are only evaluated
+// once. Even though |val1| and |val2| appear twice in this version of the macro
+// expansion, this is OK, since the expression is never actually evaluated.
+#define DCHECK_OP(name, op, val1, val2)                             \
+  EAT_STREAM_PARAMETERS << (::logging::MakeCheckOpValueString(      \
+                                ::logging::g_swallow_stream, val1), \
+                            ::logging::MakeCheckOpValueString(      \
+                                ::logging::g_swallow_stream, val2), \
+                            (val1)op(val2))
+
+#endif  // DCHECK_IS_ON()
+
+// Equality/Inequality checks - compare two values, and log a
+// LOG_DCHECK message including the two values when the result is not
+// as expected.  The values must have operator<<(ostream, ...)
+// defined.
+//
+// You may append to the error message like so:
+//   DCHECK_NE(1, 2) << "The world must be ending!";
+//
+// We are very careful to ensure that each argument is evaluated exactly
+// once, and that anything which is legal to pass as a function argument is
+// legal here.  In particular, the arguments may be temporary expressions
+// which will end up being destroyed at the end of the apparent statement,
+// for example:
+//   DCHECK_EQ(string("abc")[1], 'b');
+//
+// WARNING: These don't compile correctly if one of the arguments is a pointer
+// and the other is NULL.  In new code, prefer nullptr instead.  To
+// work around this for C++98, simply static_cast NULL to the type of the
+// desired pointer.
+
+#define DCHECK_EQ(val1, val2) DCHECK_OP(EQ, ==, val1, val2)
+#define DCHECK_NE(val1, val2) DCHECK_OP(NE, !=, val1, val2)
+#define DCHECK_LE(val1, val2) DCHECK_OP(LE, <=, val1, val2)
+#define DCHECK_LT(val1, val2) DCHECK_OP(LT, < , val1, val2)
+#define DCHECK_GE(val1, val2) DCHECK_OP(GE, >=, val1, val2)
+#define DCHECK_GT(val1, val2) DCHECK_OP(GT, > , val1, val2)
+
+#if !DCHECK_IS_ON() && defined(OS_CHROMEOS)
+// Implement logging of NOTREACHED() as a dedicated function to get function
+// call overhead down to a minimum.
+void LogErrorNotReached(const char* file, int line);
+#define NOTREACHED()                                       \
+  true ? ::logging::LogErrorNotReached(__FILE__, __LINE__) \
+       : EAT_STREAM_PARAMETERS
+#else
+#define NOTREACHED() DCHECK(false)
+#endif
+
+// Redefine the standard assert to use our nice log files
+#undef assert
+#define assert(x) DLOG_ASSERT(x)
+
+// This class more or less represents a particular log message.  You
+// create an instance of LogMessage and then stream stuff to it.
+// When you finish streaming to it, ~LogMessage is called and the
+// full message gets streamed to the appropriate destination.
+//
+// You shouldn't actually use LogMessage's constructor to log things,
+// though.  You should use the LOG() macro (and variants thereof)
+// above.
+class BASE_EXPORT LogMessage {
+ public:
+  // Used for LOG(severity).
+  LogMessage(const char* file, int line, LogSeverity severity);
+
+  // Used for CHECK().  Implied severity = LOG_FATAL.
+  LogMessage(const char* file, int line, const char* condition);
+
+  // Used for CHECK_EQ(), etc. Takes ownership of the given string.
+  // Implied severity = LOG_FATAL.
+  LogMessage(const char* file, int line, std::string* result);
+
+  // Used for DCHECK_EQ(), etc. Takes ownership of the given string.
+  LogMessage(const char* file, int line, LogSeverity severity,
+             std::string* result);
+
+  ~LogMessage();
+
+  std::ostream& stream() { return stream_; }
+
+  LogSeverity severity() { return severity_; }
+  std::string str() { return stream_.str(); }
+
+ private:
+  void Init(const char* file, int line);
+
+  LogSeverity severity_;
+  std::ostringstream stream_;
+  size_t message_start_;  // Offset of the start of the message (past prefix
+                          // info).
+  // The file and line information passed in to the constructor.
+  const char* file_;
+  const int line_;
+
+#if defined(OS_WIN)
+  // Stores the current value of GetLastError in the constructor and restores
+  // it in the destructor by calling SetLastError.
+  // This is useful since the LogMessage class uses a lot of Win32 calls
+  // that will lose the value of GLE and the code that called the log function
+  // will have lost the thread error value when the log call returns.
+  class SaveLastError {
+   public:
+    SaveLastError();
+    ~SaveLastError();
+
+    unsigned long get_error() const { return last_error_; }
+
+   protected:
+    unsigned long last_error_;
+  };
+
+  SaveLastError last_error_;
+#endif
+
+  DISALLOW_COPY_AND_ASSIGN(LogMessage);
+};
+
+// This class is used to explicitly ignore values in the conditional
+// logging macros.  This avoids compiler warnings like "value computed
+// is not used" and "statement has no effect".
+class LogMessageVoidify {
+ public:
+  LogMessageVoidify() = default;
+  // This has to be an operator with a precedence lower than << but
+  // higher than ?:
+  void operator&(std::ostream&) { }
+};
+
+#if defined(OS_WIN)
+typedef unsigned long SystemErrorCode;
+#elif defined(OS_POSIX) || defined(OS_FUCHSIA)
+typedef int SystemErrorCode;
+#endif
+
+// Alias for ::GetLastError() on Windows and errno on POSIX. Avoids having to
+// pull in windows.h just for GetLastError() and DWORD.
+BASE_EXPORT SystemErrorCode GetLastSystemErrorCode();
+BASE_EXPORT std::string SystemErrorCodeToString(SystemErrorCode error_code);
+
+#if defined(OS_WIN)
+// Appends a formatted system message of the GetLastError() type.
+class BASE_EXPORT Win32ErrorLogMessage {
+ public:
+  Win32ErrorLogMessage(const char* file,
+                       int line,
+                       LogSeverity severity,
+                       SystemErrorCode err);
+
+  // Appends the error message before destructing the encapsulated class.
+  ~Win32ErrorLogMessage();
+
+  std::ostream& stream() { return log_message_.stream(); }
+
+ private:
+  SystemErrorCode err_;
+  LogMessage log_message_;
+
+  DISALLOW_COPY_AND_ASSIGN(Win32ErrorLogMessage);
+};
+#elif defined(OS_POSIX) || defined(OS_FUCHSIA)
+// Appends a formatted system message of the errno type
+class BASE_EXPORT ErrnoLogMessage {
+ public:
+  ErrnoLogMessage(const char* file,
+                  int line,
+                  LogSeverity severity,
+                  SystemErrorCode err);
+
+  // Appends the error message before destructing the encapsulated class.
+  ~ErrnoLogMessage();
+
+  std::ostream& stream() { return log_message_.stream(); }
+
+ private:
+  SystemErrorCode err_;
+  LogMessage log_message_;
+
+  DISALLOW_COPY_AND_ASSIGN(ErrnoLogMessage);
+};
+#endif  // OS_WIN
+
+// Closes the log file explicitly if open.
+// NOTE: Since the log file is opened as necessary by the action of logging
+//       statements, there's no guarantee that it will stay closed
+//       after this call.
+BASE_EXPORT void CloseLogFile();
+
+// Async signal safe logging mechanism.
+BASE_EXPORT void RawLog(int level, const char* message);
+
+#define RAW_LOG(level, message) \
+  ::logging::RawLog(::logging::LOG_##level, message)
+
+#define RAW_CHECK(condition)                               \
+  do {                                                     \
+    if (!(condition))                                      \
+      ::logging::RawLog(::logging::LOG_FATAL,              \
+                        "Check failed: " #condition "\n"); \
+  } while (0)
+
+#if defined(OS_WIN)
+// Returns true if logging to file is enabled.
+BASE_EXPORT bool IsLoggingToFileEnabled();
+
+// Returns the default log file path.
+BASE_EXPORT std::wstring GetLogFileFullPath();
+#endif
+
+}  // namespace logging
+
+// Note that "The behavior of a C++ program is undefined if it adds declarations
+// or definitions to namespace std or to a namespace within namespace std unless
+// otherwise specified." --C++11[namespace.std]
+//
+// We've checked that this particular definition has the intended behavior on
+// our implementations, but it's prone to breaking in the future, and please
+// don't imitate this in your own definitions without checking with some
+// standard library experts.
+namespace std {
+// These functions are provided as a convenience for logging, which is where we
+// use streams (it is against Google style to use streams in other places). It
+// is designed to allow you to emit non-ASCII Unicode strings to the log file,
+// which is normally ASCII. It is relatively slow, so try not to use it for
+// common cases. Non-ASCII characters will be converted to UTF-8 by these
+// operators.
+BASE_EXPORT std::ostream& operator<<(std::ostream& out, const wchar_t* wstr);
+inline std::ostream& operator<<(std::ostream& out, const std::wstring& wstr) {
+  return out << wstr.c_str();
+}
+}  // namespace std
+
+// The NOTIMPLEMENTED() macro annotates codepaths which have not been
+// implemented yet. If output spam is a serious concern,
+// NOTIMPLEMENTED_LOG_ONCE can be used.
+
+#if defined(COMPILER_GCC)
+// On Linux, with GCC, we can use __PRETTY_FUNCTION__ to get the demangled name
+// of the current function in the NOTIMPLEMENTED message.
+#define NOTIMPLEMENTED_MSG "Not implemented reached in " << __PRETTY_FUNCTION__
+#else
+#define NOTIMPLEMENTED_MSG "NOT IMPLEMENTED"
+#endif
+
+#if defined(OS_ANDROID) && defined(OFFICIAL_BUILD)
+#define NOTIMPLEMENTED() EAT_STREAM_PARAMETERS
+#define NOTIMPLEMENTED_LOG_ONCE() EAT_STREAM_PARAMETERS
+#else
+#define NOTIMPLEMENTED() LOG(ERROR) << NOTIMPLEMENTED_MSG
+#define NOTIMPLEMENTED_LOG_ONCE()                      \
+  do {                                                 \
+    static bool logged_once = false;                   \
+    LOG_IF(ERROR, !logged_once) << NOTIMPLEMENTED_MSG; \
+    logged_once = true;                                \
+  } while (0);                                         \
+  EAT_STREAM_PARAMETERS
+#endif
+
+#endif  // BASE_LOGGING_H_
diff --git a/base/logging_unittest.cc b/base/logging_unittest.cc
new file mode 100644
index 0000000..9025aaf
--- /dev/null
+++ b/base/logging_unittest.cc
@@ -0,0 +1,676 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/logging.h"
+#include "base/bind.h"
+#include "base/callback.h"
+#include "base/compiler_specific.h"
+#include "base/macros.h"
+#include "base/strings/string_piece.h"
+#include "base/test/scoped_feature_list.h"
+#include "build/build_config.h"
+
+#include "testing/gmock/include/gmock/gmock.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+#if defined(OS_POSIX)
+#include <signal.h>
+#include <unistd.h>
+#include "base/posix/eintr_wrapper.h"
+#endif  // OS_POSIX
+
+#if defined(OS_LINUX) || defined(OS_ANDROID)
+#include <ucontext.h>
+#endif
+
+#if defined(OS_WIN)
+#include <excpt.h>
+#include <windows.h>
+#endif  // OS_WIN
+
+#if defined(OS_FUCHSIA)
+#include "base/fuchsia/fuchsia_logging.h"
+#endif
+
+namespace logging {
+
+namespace {
+
+using ::testing::Return;
+using ::testing::_;
+
+// Needs to be global since log assert handlers can't maintain state.
+int g_log_sink_call_count = 0;
+
+#if !defined(OFFICIAL_BUILD) || defined(DCHECK_ALWAYS_ON) || !defined(NDEBUG)
+void LogSink(const char* file,
+             int line,
+             const base::StringPiece message,
+             const base::StringPiece stack_trace) {
+  ++g_log_sink_call_count;
+}
+#endif
+
+// Class to make sure any manipulations we do to the min log level are
+// contained (i.e., do not affect other unit tests).
+class LogStateSaver {
+ public:
+  LogStateSaver() : old_min_log_level_(GetMinLogLevel()) {}
+
+  ~LogStateSaver() {
+    SetMinLogLevel(old_min_log_level_);
+    g_log_sink_call_count = 0;
+  }
+
+ private:
+  int old_min_log_level_;
+
+  DISALLOW_COPY_AND_ASSIGN(LogStateSaver);
+};
+
+class LoggingTest : public testing::Test {
+ private:
+  LogStateSaver log_state_saver_;
+};
+
+class MockLogSource {
+ public:
+  MOCK_METHOD0(Log, const char*());
+};
+
+class MockLogAssertHandler {
+ public:
+  MOCK_METHOD4(
+      HandleLogAssert,
+      void(const char*, int, const base::StringPiece, const base::StringPiece));
+};
+
+TEST_F(LoggingTest, BasicLogging) {
+  MockLogSource mock_log_source;
+  EXPECT_CALL(mock_log_source, Log())
+      .Times(DCHECK_IS_ON() ? 16 : 8)
+      .WillRepeatedly(Return("log message"));
+
+  SetMinLogLevel(LOG_INFO);
+
+  EXPECT_TRUE(LOG_IS_ON(INFO));
+  EXPECT_TRUE((DCHECK_IS_ON() != 0) == DLOG_IS_ON(INFO));
+  EXPECT_TRUE(VLOG_IS_ON(0));
+
+  LOG(INFO) << mock_log_source.Log();
+  LOG_IF(INFO, true) << mock_log_source.Log();
+  PLOG(INFO) << mock_log_source.Log();
+  PLOG_IF(INFO, true) << mock_log_source.Log();
+  VLOG(0) << mock_log_source.Log();
+  VLOG_IF(0, true) << mock_log_source.Log();
+  VPLOG(0) << mock_log_source.Log();
+  VPLOG_IF(0, true) << mock_log_source.Log();
+
+  DLOG(INFO) << mock_log_source.Log();
+  DLOG_IF(INFO, true) << mock_log_source.Log();
+  DPLOG(INFO) << mock_log_source.Log();
+  DPLOG_IF(INFO, true) << mock_log_source.Log();
+  DVLOG(0) << mock_log_source.Log();
+  DVLOG_IF(0, true) << mock_log_source.Log();
+  DVPLOG(0) << mock_log_source.Log();
+  DVPLOG_IF(0, true) << mock_log_source.Log();
+}
+
+TEST_F(LoggingTest, LogIsOn) {
+#if defined(NDEBUG)
+  const bool kDfatalIsFatal = false;
+#else  // defined(NDEBUG)
+  const bool kDfatalIsFatal = true;
+#endif  // defined(NDEBUG)
+
+  SetMinLogLevel(LOG_INFO);
+  EXPECT_TRUE(LOG_IS_ON(INFO));
+  EXPECT_TRUE(LOG_IS_ON(WARNING));
+  EXPECT_TRUE(LOG_IS_ON(ERROR));
+  EXPECT_TRUE(LOG_IS_ON(FATAL));
+  EXPECT_TRUE(LOG_IS_ON(DFATAL));
+
+  SetMinLogLevel(LOG_WARNING);
+  EXPECT_FALSE(LOG_IS_ON(INFO));
+  EXPECT_TRUE(LOG_IS_ON(WARNING));
+  EXPECT_TRUE(LOG_IS_ON(ERROR));
+  EXPECT_TRUE(LOG_IS_ON(FATAL));
+  EXPECT_TRUE(LOG_IS_ON(DFATAL));
+
+  SetMinLogLevel(LOG_ERROR);
+  EXPECT_FALSE(LOG_IS_ON(INFO));
+  EXPECT_FALSE(LOG_IS_ON(WARNING));
+  EXPECT_TRUE(LOG_IS_ON(ERROR));
+  EXPECT_TRUE(LOG_IS_ON(FATAL));
+  EXPECT_TRUE(LOG_IS_ON(DFATAL));
+
+  // LOG_IS_ON(FATAL) should always be true.
+  SetMinLogLevel(LOG_FATAL + 1);
+  EXPECT_FALSE(LOG_IS_ON(INFO));
+  EXPECT_FALSE(LOG_IS_ON(WARNING));
+  EXPECT_FALSE(LOG_IS_ON(ERROR));
+  EXPECT_TRUE(LOG_IS_ON(FATAL));
+  EXPECT_EQ(kDfatalIsFatal, LOG_IS_ON(DFATAL));
+}
+
+TEST_F(LoggingTest, LoggingIsLazyBySeverity) {
+  MockLogSource mock_log_source;
+  EXPECT_CALL(mock_log_source, Log()).Times(0);
+
+  SetMinLogLevel(LOG_WARNING);
+
+  EXPECT_FALSE(LOG_IS_ON(INFO));
+  EXPECT_FALSE(DLOG_IS_ON(INFO));
+  EXPECT_FALSE(VLOG_IS_ON(1));
+
+  LOG(INFO) << mock_log_source.Log();
+  LOG_IF(INFO, false) << mock_log_source.Log();
+  PLOG(INFO) << mock_log_source.Log();
+  PLOG_IF(INFO, false) << mock_log_source.Log();
+  VLOG(1) << mock_log_source.Log();
+  VLOG_IF(1, true) << mock_log_source.Log();
+  VPLOG(1) << mock_log_source.Log();
+  VPLOG_IF(1, true) << mock_log_source.Log();
+
+  DLOG(INFO) << mock_log_source.Log();
+  DLOG_IF(INFO, true) << mock_log_source.Log();
+  DPLOG(INFO) << mock_log_source.Log();
+  DPLOG_IF(INFO, true) << mock_log_source.Log();
+  DVLOG(1) << mock_log_source.Log();
+  DVLOG_IF(1, true) << mock_log_source.Log();
+  DVPLOG(1) << mock_log_source.Log();
+  DVPLOG_IF(1, true) << mock_log_source.Log();
+}
+
+TEST_F(LoggingTest, LoggingIsLazyByDestination) {
+  MockLogSource mock_log_source;
+  MockLogSource mock_log_source_error;
+  EXPECT_CALL(mock_log_source, Log()).Times(0);
+
+  // Severity >= ERROR is always printed to stderr.
+  EXPECT_CALL(mock_log_source_error, Log()).Times(1).
+      WillRepeatedly(Return("log message"));
+
+  LoggingSettings settings;
+  settings.logging_dest = LOG_NONE;
+  InitLogging(settings);
+
+  LOG(INFO) << mock_log_source.Log();
+  LOG(WARNING) << mock_log_source.Log();
+  LOG(ERROR) << mock_log_source_error.Log();
+}
+
+// Official builds have CHECKs directly call BreakDebugger.
+#if !defined(OFFICIAL_BUILD)
+
+// https://crbug.com/709067 tracks test flakiness on iOS.
+#if defined(OS_IOS)
+#define MAYBE_CheckStreamsAreLazy DISABLED_CheckStreamsAreLazy
+#else
+#define MAYBE_CheckStreamsAreLazy CheckStreamsAreLazy
+#endif
+TEST_F(LoggingTest, MAYBE_CheckStreamsAreLazy) {
+  MockLogSource mock_log_source, uncalled_mock_log_source;
+  EXPECT_CALL(mock_log_source, Log()).Times(8).
+      WillRepeatedly(Return("check message"));
+  EXPECT_CALL(uncalled_mock_log_source, Log()).Times(0);
+
+  ScopedLogAssertHandler scoped_assert_handler(base::Bind(LogSink));
+
+  CHECK(mock_log_source.Log()) << uncalled_mock_log_source.Log();
+  PCHECK(!mock_log_source.Log()) << mock_log_source.Log();
+  CHECK_EQ(mock_log_source.Log(), mock_log_source.Log())
+      << uncalled_mock_log_source.Log();
+  CHECK_NE(mock_log_source.Log(), mock_log_source.Log())
+      << mock_log_source.Log();
+}
+
+#endif
+
+#if defined(OFFICIAL_BUILD) && defined(OS_WIN)
+NOINLINE void CheckContainingFunc(int death_location) {
+  CHECK(death_location != 1);
+  CHECK(death_location != 2);
+  CHECK(death_location != 3);
+}
+
+int GetCheckExceptionData(EXCEPTION_POINTERS* p, DWORD* code, void** addr) {
+  *code = p->ExceptionRecord->ExceptionCode;
+  *addr = p->ExceptionRecord->ExceptionAddress;
+  return EXCEPTION_EXECUTE_HANDLER;
+}
+
+TEST_F(LoggingTest, CheckCausesDistinctBreakpoints) {
+  DWORD code1 = 0;
+  DWORD code2 = 0;
+  DWORD code3 = 0;
+  void* addr1 = nullptr;
+  void* addr2 = nullptr;
+  void* addr3 = nullptr;
+
+  // Record the exception code and addresses.
+  __try {
+    CheckContainingFunc(1);
+  } __except (
+      GetCheckExceptionData(GetExceptionInformation(), &code1, &addr1)) {
+  }
+
+  __try {
+    CheckContainingFunc(2);
+  } __except (
+      GetCheckExceptionData(GetExceptionInformation(), &code2, &addr2)) {
+  }
+
+  __try {
+    CheckContainingFunc(3);
+  } __except (
+      GetCheckExceptionData(GetExceptionInformation(), &code3, &addr3)) {
+  }
+
+  // Ensure that the exception codes are correct (in particular, breakpoints,
+  // not access violations).
+  EXPECT_EQ(STATUS_BREAKPOINT, code1);
+  EXPECT_EQ(STATUS_BREAKPOINT, code2);
+  EXPECT_EQ(STATUS_BREAKPOINT, code3);
+
+  // Ensure that none of the CHECKs are colocated.
+  EXPECT_NE(addr1, addr2);
+  EXPECT_NE(addr1, addr3);
+  EXPECT_NE(addr2, addr3);
+}
+
+#elif defined(OS_POSIX) && !defined(OS_NACL) && !defined(OS_IOS) && \
+    !defined(OS_FUCHSIA) &&                                         \
+    (defined(ARCH_CPU_X86_FAMILY) || defined(ARCH_CPU_ARM_FAMILY))
+
+int g_child_crash_pipe;
+
+void CheckCrashTestSighandler(int, siginfo_t* info, void* context_ptr) {
+  // Conversely to what clearly stated in "man 2 sigaction", some Linux kernels
+  // do NOT populate the |info->si_addr| in the case of a SIGTRAP. Hence we
+  // need the arch-specific boilerplate below, which is inspired by breakpad.
+  // At the same time, on OSX, ucontext.h is deprecated but si_addr works fine.
+  uintptr_t crash_addr = 0;
+#if defined(OS_MACOSX)
+  crash_addr = reinterpret_cast<uintptr_t>(info->si_addr);
+#else  // OS_POSIX && !OS_MACOSX
+  ucontext_t* context = reinterpret_cast<ucontext_t*>(context_ptr);
+#if defined(ARCH_CPU_X86)
+  crash_addr = static_cast<uintptr_t>(context->uc_mcontext.gregs[REG_EIP]);
+#elif defined(ARCH_CPU_X86_64)
+  crash_addr = static_cast<uintptr_t>(context->uc_mcontext.gregs[REG_RIP]);
+#elif defined(ARCH_CPU_ARMEL)
+  crash_addr = static_cast<uintptr_t>(context->uc_mcontext.arm_pc);
+#elif defined(ARCH_CPU_ARM64)
+  crash_addr = static_cast<uintptr_t>(context->uc_mcontext.pc);
+#endif  // ARCH_*
+#endif  // OS_POSIX && !OS_MACOSX
+  HANDLE_EINTR(write(g_child_crash_pipe, &crash_addr, sizeof(uintptr_t)));
+  _exit(0);
+}
+
+// CHECK causes a direct crash (without jumping to another function) only in
+// official builds. Unfortunately, continuous test coverage on official builds
+// is lower. DO_CHECK here falls back on a home-brewed implementation in
+// non-official builds, to catch regressions earlier in the CQ.
+#if defined(OFFICIAL_BUILD)
+#define DO_CHECK CHECK
+#else
+#define DO_CHECK(cond) \
+  if (!(cond))         \
+  IMMEDIATE_CRASH()
+#endif
+
+void CrashChildMain(int death_location) {
+  struct sigaction act = {};
+  act.sa_sigaction = CheckCrashTestSighandler;
+  act.sa_flags = SA_SIGINFO;
+  ASSERT_EQ(0, sigaction(SIGTRAP, &act, nullptr));
+  ASSERT_EQ(0, sigaction(SIGBUS, &act, nullptr));
+  ASSERT_EQ(0, sigaction(SIGILL, &act, nullptr));
+  DO_CHECK(death_location != 1);
+  DO_CHECK(death_location != 2);
+  printf("\n");
+  DO_CHECK(death_location != 3);
+
+  // Should never reach this point.
+  const uintptr_t failed = 0;
+  HANDLE_EINTR(write(g_child_crash_pipe, &failed, sizeof(uintptr_t)));
+};
+
+void SpawnChildAndCrash(int death_location, uintptr_t* child_crash_addr) {
+  int pipefd[2];
+  ASSERT_EQ(0, pipe(pipefd));
+
+  int pid = fork();
+  ASSERT_GE(pid, 0);
+
+  if (pid == 0) {      // child process.
+    close(pipefd[0]);  // Close reader (parent) end.
+    g_child_crash_pipe = pipefd[1];
+    CrashChildMain(death_location);
+    FAIL() << "The child process was supposed to crash. It didn't.";
+  }
+
+  close(pipefd[1]);  // Close writer (child) end.
+  DCHECK(child_crash_addr);
+  int res = HANDLE_EINTR(read(pipefd[0], child_crash_addr, sizeof(uintptr_t)));
+  ASSERT_EQ(static_cast<int>(sizeof(uintptr_t)), res);
+}
+
+TEST_F(LoggingTest, CheckCausesDistinctBreakpoints) {
+  uintptr_t child_crash_addr_1 = 0;
+  uintptr_t child_crash_addr_2 = 0;
+  uintptr_t child_crash_addr_3 = 0;
+
+  SpawnChildAndCrash(1, &child_crash_addr_1);
+  SpawnChildAndCrash(2, &child_crash_addr_2);
+  SpawnChildAndCrash(3, &child_crash_addr_3);
+
+  ASSERT_NE(0u, child_crash_addr_1);
+  ASSERT_NE(0u, child_crash_addr_2);
+  ASSERT_NE(0u, child_crash_addr_3);
+  ASSERT_NE(child_crash_addr_1, child_crash_addr_2);
+  ASSERT_NE(child_crash_addr_1, child_crash_addr_3);
+  ASSERT_NE(child_crash_addr_2, child_crash_addr_3);
+}
+#endif  // OS_POSIX
+
+TEST_F(LoggingTest, DebugLoggingReleaseBehavior) {
+#if DCHECK_IS_ON()
+  int debug_only_variable = 1;
+#endif
+  // These should avoid emitting references to |debug_only_variable|
+  // in release mode.
+  DLOG_IF(INFO, debug_only_variable) << "test";
+  DLOG_ASSERT(debug_only_variable) << "test";
+  DPLOG_IF(INFO, debug_only_variable) << "test";
+  DVLOG_IF(1, debug_only_variable) << "test";
+}
+
+TEST_F(LoggingTest, DcheckStreamsAreLazy) {
+  MockLogSource mock_log_source;
+  EXPECT_CALL(mock_log_source, Log()).Times(0);
+#if DCHECK_IS_ON()
+  DCHECK(true) << mock_log_source.Log();
+  DCHECK_EQ(0, 0) << mock_log_source.Log();
+#else
+  DCHECK(mock_log_source.Log()) << mock_log_source.Log();
+  DPCHECK(mock_log_source.Log()) << mock_log_source.Log();
+  DCHECK_EQ(0, 0) << mock_log_source.Log();
+  DCHECK_EQ(mock_log_source.Log(), static_cast<const char*>(nullptr))
+      << mock_log_source.Log();
+#endif
+}
+
+void DcheckEmptyFunction1() {
+  // Provide a body so that Release builds do not cause the compiler to
+  // optimize DcheckEmptyFunction1 and DcheckEmptyFunction2 as a single
+  // function, which breaks the Dcheck tests below.
+  LOG(INFO) << "DcheckEmptyFunction1";
+}
+void DcheckEmptyFunction2() {}
+
+#if DCHECK_IS_CONFIGURABLE
+class ScopedDcheckSeverity {
+ public:
+  ScopedDcheckSeverity(LogSeverity new_severity) : old_severity_(LOG_DCHECK) {
+    LOG_DCHECK = new_severity;
+  }
+
+  ~ScopedDcheckSeverity() { LOG_DCHECK = old_severity_; }
+
+ private:
+  LogSeverity old_severity_;
+};
+#endif  // DCHECK_IS_CONFIGURABLE
+
+// https://crbug.com/709067 tracks test flakiness on iOS.
+#if defined(OS_IOS)
+#define MAYBE_Dcheck DISABLED_Dcheck
+#else
+#define MAYBE_Dcheck Dcheck
+#endif
+TEST_F(LoggingTest, MAYBE_Dcheck) {
+#if DCHECK_IS_CONFIGURABLE
+  // DCHECKs are enabled, and LOG_DCHECK is mutable, but defaults to non-fatal.
+  // Set it to LOG_FATAL to get the expected behavior from the rest of this
+  // test.
+  ScopedDcheckSeverity dcheck_severity(LOG_FATAL);
+#endif  // DCHECK_IS_CONFIGURABLE
+
+#if defined(NDEBUG) && !defined(DCHECK_ALWAYS_ON)
+  // Release build.
+  EXPECT_FALSE(DCHECK_IS_ON());
+  EXPECT_FALSE(DLOG_IS_ON(DCHECK));
+#elif defined(NDEBUG) && defined(DCHECK_ALWAYS_ON)
+  // Release build with real DCHECKS.
+  ScopedLogAssertHandler scoped_assert_handler(base::Bind(LogSink));
+  EXPECT_TRUE(DCHECK_IS_ON());
+  EXPECT_TRUE(DLOG_IS_ON(DCHECK));
+#else
+  // Debug build.
+  ScopedLogAssertHandler scoped_assert_handler(base::Bind(LogSink));
+  EXPECT_TRUE(DCHECK_IS_ON());
+  EXPECT_TRUE(DLOG_IS_ON(DCHECK));
+#endif
+
+  // DCHECKs are fatal iff they're compiled in DCHECK_IS_ON() and the DCHECK
+  // log level is set to fatal.
+  const bool dchecks_are_fatal = DCHECK_IS_ON() && LOG_DCHECK == LOG_FATAL;
+  EXPECT_EQ(0, g_log_sink_call_count);
+  DCHECK(false);
+  EXPECT_EQ(dchecks_are_fatal ? 1 : 0, g_log_sink_call_count);
+  DPCHECK(false);
+  EXPECT_EQ(dchecks_are_fatal ? 2 : 0, g_log_sink_call_count);
+  DCHECK_EQ(0, 1);
+  EXPECT_EQ(dchecks_are_fatal ? 3 : 0, g_log_sink_call_count);
+
+  // Test DCHECK on std::nullptr_t
+  g_log_sink_call_count = 0;
+  const void* p_null = nullptr;
+  const void* p_not_null = &p_null;
+  DCHECK_EQ(p_null, nullptr);
+  DCHECK_EQ(nullptr, p_null);
+  DCHECK_NE(p_not_null, nullptr);
+  DCHECK_NE(nullptr, p_not_null);
+  EXPECT_EQ(0, g_log_sink_call_count);
+
+  // Test DCHECK on a scoped enum.
+  enum class Animal { DOG, CAT };
+  DCHECK_EQ(Animal::DOG, Animal::DOG);
+  EXPECT_EQ(0, g_log_sink_call_count);
+  DCHECK_EQ(Animal::DOG, Animal::CAT);
+  EXPECT_EQ(dchecks_are_fatal ? 1 : 0, g_log_sink_call_count);
+
+  // Test DCHECK on functions and function pointers.
+  g_log_sink_call_count = 0;
+  struct MemberFunctions {
+    void MemberFunction1() {
+      // See the comment in DcheckEmptyFunction1().
+      LOG(INFO) << "Do not merge with MemberFunction2.";
+    }
+    void MemberFunction2() {}
+  };
+  void (MemberFunctions::*mp1)() = &MemberFunctions::MemberFunction1;
+  void (MemberFunctions::*mp2)() = &MemberFunctions::MemberFunction2;
+  void (*fp1)() = DcheckEmptyFunction1;
+  void (*fp2)() = DcheckEmptyFunction2;
+  void (*fp3)() = DcheckEmptyFunction1;
+  DCHECK_EQ(fp1, fp3);
+  EXPECT_EQ(0, g_log_sink_call_count);
+  DCHECK_EQ(mp1, &MemberFunctions::MemberFunction1);
+  EXPECT_EQ(0, g_log_sink_call_count);
+  DCHECK_EQ(mp2, &MemberFunctions::MemberFunction2);
+  EXPECT_EQ(0, g_log_sink_call_count);
+  DCHECK_EQ(fp1, fp2);
+  EXPECT_EQ(dchecks_are_fatal ? 1 : 0, g_log_sink_call_count);
+  DCHECK_EQ(mp2, &MemberFunctions::MemberFunction1);
+  EXPECT_EQ(dchecks_are_fatal ? 2 : 0, g_log_sink_call_count);
+}
+
+TEST_F(LoggingTest, DcheckReleaseBehavior) {
+  int some_variable = 1;
+  // These should still reference |some_variable| so we don't get
+  // unused variable warnings.
+  DCHECK(some_variable) << "test";
+  DPCHECK(some_variable) << "test";
+  DCHECK_EQ(some_variable, 1) << "test";
+}
+
+TEST_F(LoggingTest, DCheckEqStatements) {
+  bool reached = false;
+  if (false)
+    DCHECK_EQ(false, true);           // Unreached.
+  else
+    DCHECK_EQ(true, reached = true);  // Reached, passed.
+  ASSERT_EQ(DCHECK_IS_ON() ? true : false, reached);
+
+  if (false)
+    DCHECK_EQ(false, true);           // Unreached.
+}
+
+TEST_F(LoggingTest, CheckEqStatements) {
+  bool reached = false;
+  if (false)
+    CHECK_EQ(false, true);           // Unreached.
+  else
+    CHECK_EQ(true, reached = true);  // Reached, passed.
+  ASSERT_TRUE(reached);
+
+  if (false)
+    CHECK_EQ(false, true);           // Unreached.
+}
+
+TEST_F(LoggingTest, NestedLogAssertHandlers) {
+  ::testing::InSequence dummy;
+  ::testing::StrictMock<MockLogAssertHandler> handler_a, handler_b;
+
+  EXPECT_CALL(
+      handler_a,
+      HandleLogAssert(
+          _, _, base::StringPiece("First assert must be caught by handler_a"),
+          _));
+  EXPECT_CALL(
+      handler_b,
+      HandleLogAssert(
+          _, _, base::StringPiece("Second assert must be caught by handler_b"),
+          _));
+  EXPECT_CALL(
+      handler_a,
+      HandleLogAssert(
+          _, _,
+          base::StringPiece("Last assert must be caught by handler_a again"),
+          _));
+
+  logging::ScopedLogAssertHandler scoped_handler_a(base::Bind(
+      &MockLogAssertHandler::HandleLogAssert, base::Unretained(&handler_a)));
+
+  // Using LOG(FATAL) rather than CHECK(false) here since log messages aren't
+  // preserved for CHECKs in official builds.
+  LOG(FATAL) << "First assert must be caught by handler_a";
+
+  {
+    logging::ScopedLogAssertHandler scoped_handler_b(base::Bind(
+        &MockLogAssertHandler::HandleLogAssert, base::Unretained(&handler_b)));
+    LOG(FATAL) << "Second assert must be caught by handler_b";
+  }
+
+  LOG(FATAL) << "Last assert must be caught by handler_a again";
+}
+
+// Test that defining an operator<< for a type in a namespace doesn't prevent
+// other code in that namespace from calling the operator<<(ostream, wstring)
+// defined by logging.h. This can fail if operator<<(ostream, wstring) can't be
+// found by ADL, since defining another operator<< prevents name lookup from
+// looking in the global namespace.
+namespace nested_test {
+  class Streamable {};
+  ALLOW_UNUSED_TYPE std::ostream& operator<<(std::ostream& out,
+                                             const Streamable&) {
+    return out << "Streamable";
+  }
+  TEST_F(LoggingTest, StreamingWstringFindsCorrectOperator) {
+    std::wstring wstr = L"Hello World";
+    std::ostringstream ostr;
+    ostr << wstr;
+    EXPECT_EQ("Hello World", ostr.str());
+  }
+}  // namespace nested_test
+
+#if DCHECK_IS_CONFIGURABLE
+TEST_F(LoggingTest, ConfigurableDCheck) {
+  // Verify that DCHECKs default to non-fatal in configurable-DCHECK builds.
+  // Note that we require only that DCHECK is non-fatal by default, rather
+  // than requiring that it be exactly INFO, ERROR, etc level.
+  EXPECT_LT(LOG_DCHECK, LOG_FATAL);
+  DCHECK(false);
+
+  // Verify that DCHECK* aren't hard-wired to crash on failure.
+  LOG_DCHECK = LOG_INFO;
+  DCHECK(false);
+  DCHECK_EQ(1, 2);
+
+  // Verify that DCHECK does crash if LOG_DCHECK is set to LOG_FATAL.
+  LOG_DCHECK = LOG_FATAL;
+
+  ::testing::StrictMock<MockLogAssertHandler> handler;
+  EXPECT_CALL(handler, HandleLogAssert(_, _, _, _)).Times(2);
+  {
+    logging::ScopedLogAssertHandler scoped_handler_b(base::Bind(
+        &MockLogAssertHandler::HandleLogAssert, base::Unretained(&handler)));
+    DCHECK(false);
+    DCHECK_EQ(1, 2);
+  }
+}
+
+TEST_F(LoggingTest, ConfigurableDCheckFeature) {
+  // Initialize FeatureList with and without DcheckIsFatal, and verify the
+  // value of LOG_DCHECK. Note that we don't require that DCHECK take a
+  // specific value when the feature is off, only that it is non-fatal.
+
+  {
+    base::test::ScopedFeatureList feature_list;
+    feature_list.InitFromCommandLine("DcheckIsFatal", "");
+    EXPECT_EQ(LOG_DCHECK, LOG_FATAL);
+  }
+
+  {
+    base::test::ScopedFeatureList feature_list;
+    feature_list.InitFromCommandLine("", "DcheckIsFatal");
+    EXPECT_LT(LOG_DCHECK, LOG_FATAL);
+  }
+
+  // The default case is last, so we leave LOG_DCHECK in the default state.
+  {
+    base::test::ScopedFeatureList feature_list;
+    feature_list.InitFromCommandLine("", "");
+    EXPECT_LT(LOG_DCHECK, LOG_FATAL);
+  }
+}
+#endif  // DCHECK_IS_CONFIGURABLE
+
+#if defined(OS_FUCHSIA)
+TEST_F(LoggingTest, FuchsiaLogging) {
+  MockLogSource mock_log_source;
+  EXPECT_CALL(mock_log_source, Log())
+      .Times(DCHECK_IS_ON() ? 2 : 1)
+      .WillRepeatedly(Return("log message"));
+
+  SetMinLogLevel(LOG_INFO);
+
+  EXPECT_TRUE(LOG_IS_ON(INFO));
+  EXPECT_TRUE((DCHECK_IS_ON() != 0) == DLOG_IS_ON(INFO));
+
+  ZX_LOG(INFO, ZX_ERR_INTERNAL) << mock_log_source.Log();
+  ZX_DLOG(INFO, ZX_ERR_INTERNAL) << mock_log_source.Log();
+
+  ZX_CHECK(true, ZX_ERR_INTERNAL);
+  ZX_DCHECK(true, ZX_ERR_INTERNAL);
+}
+#endif  // defined(OS_FUCHSIA)
+
+}  // namespace
+
+}  // namespace logging
diff --git a/base/logging_win.cc b/base/logging_win.cc
new file mode 100644
index 0000000..319ae8a
--- /dev/null
+++ b/base/logging_win.cc
@@ -0,0 +1,138 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/logging_win.h"
+#include "base/memory/singleton.h"
+#include <initguid.h>  // NOLINT
+
+namespace logging {
+
+using base::win::EtwEventLevel;
+using base::win::EtwMofEvent;
+
+DEFINE_GUID(kLogEventId,
+    0x7fe69228, 0x633e, 0x4f06, 0x80, 0xc1, 0x52, 0x7f, 0xea, 0x23, 0xe3, 0xa7);
+
+LogEventProvider::LogEventProvider() : old_log_level_(LOG_NONE) {
+}
+
+LogEventProvider* LogEventProvider::GetInstance() {
+  return base::Singleton<LogEventProvider, base::StaticMemorySingletonTraits<
+                                               LogEventProvider>>::get();
+}
+
+bool LogEventProvider::LogMessage(logging::LogSeverity severity,
+    const char* file, int line, size_t message_start,
+    const std::string& message) {
+  EtwEventLevel level = TRACE_LEVEL_NONE;
+
+  // Convert the log severity to the most appropriate ETW trace level.
+  if (severity >= 0) {
+    switch (severity) {
+      case LOG_INFO:
+        level = TRACE_LEVEL_INFORMATION;
+        break;
+      case LOG_WARNING:
+        level = TRACE_LEVEL_WARNING;
+        break;
+      case LOG_ERROR:
+        level = TRACE_LEVEL_ERROR;
+        break;
+      case LOG_FATAL:
+        level = TRACE_LEVEL_FATAL;
+        break;
+    }
+  } else {  // severity < 0 is VLOG verbosity levels.
+    level = static_cast<EtwEventLevel>(TRACE_LEVEL_INFORMATION - severity);
+  }
+
+  // Bail if we're not logging, not at that level,
+  // or if we're post-atexit handling.
+  LogEventProvider* provider = LogEventProvider::GetInstance();
+  if (provider == NULL || level > provider->enable_level())
+    return false;
+
+  // And now log the event.
+  if (provider->enable_flags() & ENABLE_LOG_MESSAGE_ONLY) {
+    EtwMofEvent<1> event(kLogEventId, LOG_MESSAGE, level);
+    event.SetField(0, message.length() + 1 - message_start,
+        message.c_str() + message_start);
+
+    provider->Log(event.get());
+  } else {
+    const size_t kMaxBacktraceDepth = 32;
+    void* backtrace[kMaxBacktraceDepth];
+    DWORD depth = 0;
+
+    // Capture a stack trace if one is requested.
+    // requested per our enable flags.
+    if (provider->enable_flags() & ENABLE_STACK_TRACE_CAPTURE)
+      depth = CaptureStackBackTrace(2, kMaxBacktraceDepth, backtrace, NULL);
+
+    EtwMofEvent<5> event(kLogEventId, LOG_MESSAGE_FULL, level);
+    if (file == NULL)
+      file = "";
+
+    // Add the stack trace.
+    event.SetField(0, sizeof(depth), &depth);
+    event.SetField(1, sizeof(backtrace[0]) * depth, &backtrace);
+    // The line.
+    event.SetField(2, sizeof(line), &line);
+    // The file.
+    event.SetField(3, strlen(file) + 1, file);
+    // And finally the message.
+    event.SetField(4, message.length() + 1 - message_start,
+        message.c_str() + message_start);
+
+    provider->Log(event.get());
+  }
+
+  // Don't increase verbosity in other log destinations.
+  if (severity < provider->old_log_level_)
+    return true;
+
+  return false;
+}
+
+void LogEventProvider::Initialize(const GUID& provider_name) {
+  LogEventProvider* provider = LogEventProvider::GetInstance();
+
+  provider->set_provider_name(provider_name);
+  provider->Register();
+
+  // Register our message handler with logging.
+  SetLogMessageHandler(LogMessage);
+}
+
+void LogEventProvider::Uninitialize() {
+  LogEventProvider::GetInstance()->Unregister();
+}
+
+void LogEventProvider::OnEventsEnabled() {
+  // Grab the old log level so we can restore it later.
+  old_log_level_ = GetMinLogLevel();
+
+  // Convert the new trace level to a logging severity
+  // and enable logging at that level.
+  EtwEventLevel level = enable_level();
+  if (level == TRACE_LEVEL_NONE || level == TRACE_LEVEL_FATAL) {
+    SetMinLogLevel(LOG_FATAL);
+  } else if (level == TRACE_LEVEL_ERROR) {
+    SetMinLogLevel(LOG_ERROR);
+  } else if (level == TRACE_LEVEL_WARNING) {
+    SetMinLogLevel(LOG_WARNING);
+  } else if (level == TRACE_LEVEL_INFORMATION) {
+    SetMinLogLevel(LOG_INFO);
+  } else if (level >= TRACE_LEVEL_VERBOSE) {
+    // Above INFO, we enable verbose levels with negative severities.
+    SetMinLogLevel(TRACE_LEVEL_INFORMATION - level);
+  }
+}
+
+void LogEventProvider::OnEventsDisabled() {
+  // Restore the old log level.
+  SetMinLogLevel(old_log_level_);
+}
+
+}  // namespace logging
diff --git a/base/logging_win.h b/base/logging_win.h
new file mode 100644
index 0000000..cdde7bb
--- /dev/null
+++ b/base/logging_win.h
@@ -0,0 +1,84 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_LOGGING_WIN_H_
+#define BASE_LOGGING_WIN_H_
+
+#include <stddef.h>
+
+#include <string>
+
+#include "base/base_export.h"
+#include "base/logging.h"
+#include "base/macros.h"
+#include "base/win/event_trace_provider.h"
+
+namespace base {
+template <typename Type>
+struct StaticMemorySingletonTraits;
+}  // namespace base
+
+namespace logging {
+
+// Event ID for the log messages we generate.
+EXTERN_C BASE_EXPORT const GUID kLogEventId;
+
+// Feature enable mask for LogEventProvider.
+enum LogEnableMask {
+  // If this bit is set in our provider enable mask, we will include
+  // a stack trace with every log message.
+  ENABLE_STACK_TRACE_CAPTURE = 0x0001,
+  // If this bit is set in our provider enable mask, the provider will log
+  // a LOG message with only the textual content of the message, and no
+  // stack trace.
+  ENABLE_LOG_MESSAGE_ONLY = 0x0002,
+};
+
+// The message types our log event provider generates.
+// ETW likes user message types to start at 10.
+enum LogMessageTypes {
+  // A textual only log message, contains a zero-terminated string.
+  LOG_MESSAGE = 10,
+  // A message with a stack trace, followed by the zero-terminated
+  // message text.
+  LOG_MESSAGE_WITH_STACKTRACE = 11,
+  // A message with:
+  //  a stack trace,
+  //  the line number as a four byte integer,
+  //  the file as a zero terminated UTF8 string,
+  //  the zero-terminated UTF8 message text.
+  LOG_MESSAGE_FULL = 12,
+};
+
+// Trace provider class to drive log control and transport
+// with Event Tracing for Windows.
+class BASE_EXPORT LogEventProvider : public base::win::EtwTraceProvider {
+ public:
+  static LogEventProvider* GetInstance();
+
+  static bool LogMessage(logging::LogSeverity severity, const char* file,
+      int line, size_t message_start, const std::string& str);
+
+  static void Initialize(const GUID& provider_name);
+  static void Uninitialize();
+
+ protected:
+  // Overridden to manipulate the log level on ETW control callbacks.
+  void OnEventsEnabled() override;
+  void OnEventsDisabled() override;
+
+ private:
+  LogEventProvider();
+
+  // The log severity prior to OnEventsEnabled,
+  // restored in OnEventsDisabled.
+  logging::LogSeverity old_log_level_;
+
+  friend struct base::StaticMemorySingletonTraits<LogEventProvider>;
+  DISALLOW_COPY_AND_ASSIGN(LogEventProvider);
+};
+
+}  // namespace logging
+
+#endif  // BASE_LOGGING_WIN_H_
diff --git a/base/mac/OWNERS b/base/mac/OWNERS
new file mode 100644
index 0000000..93e90e0
--- /dev/null
+++ b/base/mac/OWNERS
@@ -0,0 +1,8 @@
+mark@chromium.org
+thakis@chromium.org
+
+# sdk_forward_declarations.[h|mm] will likely need to be modified by Cocoa
+# developers in general.
+per-file sdk_forward_declarations.*=file://chrome/browser/ui/cocoa/OWNERS
+
+# COMPONENT: Internals
diff --git a/base/mac/authorization_util.h b/base/mac/authorization_util.h
new file mode 100644
index 0000000..4629039
--- /dev/null
+++ b/base/mac/authorization_util.h
@@ -0,0 +1,82 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_MAC_AUTHORIZATION_UTIL_H_
+#define BASE_MAC_AUTHORIZATION_UTIL_H_
+
+// AuthorizationExecuteWithPrivileges fork()s and exec()s the tool, but it
+// does not wait() for it.  It also doesn't provide the caller with access to
+// the forked pid.  If used irresponsibly, zombie processes will accumulate.
+//
+// Apple's really gotten us between a rock and a hard place, here.
+//
+// Fortunately, AuthorizationExecuteWithPrivileges does give access to the
+// tool's stdout (and stdin) via a FILE* pipe.  The tool can output its pid
+// to this pipe, and the main program can read it, and then have something
+// that it can wait() for.
+//
+// The contract is that any tool executed by the wrappers declared in this
+// file must print its pid to stdout on a line by itself before doing anything
+// else.
+//
+// http://developer.apple.com/library/mac/#samplecode/BetterAuthorizationSample/Listings/BetterAuthorizationSampleLib_c.html
+// (Look for "What's This About Zombies?")
+
+#include <CoreFoundation/CoreFoundation.h>
+#include <Security/Authorization.h>
+#include <stdio.h>
+#include <sys/types.h>
+
+#include "base/base_export.h"
+
+namespace base {
+namespace mac {
+
+// Obtains an AuthorizationRef for the rights indicated by |rights|.  If
+// necessary, prompts the user for authentication. If the user is prompted,
+// |prompt| will be used as the prompt string and an icon appropriate for the
+// application will be displayed in a prompt dialog. Note that the system
+// appends its own text to the prompt string. |extraFlags| will be ORed
+// together with the default flags. Returns NULL on failure.
+BASE_EXPORT
+AuthorizationRef GetAuthorizationRightsWithPrompt(
+    AuthorizationRights* rights,
+    CFStringRef prompt,
+    AuthorizationFlags extraFlags);
+
+// Obtains an AuthorizationRef (using |GetAuthorizationRightsWithPrompt|) that
+// can be used to run commands as root.
+BASE_EXPORT
+AuthorizationRef AuthorizationCreateToRunAsRoot(CFStringRef prompt);
+
+// Calls straight through to AuthorizationExecuteWithPrivileges.  If that
+// call succeeds, |pid| will be set to the pid of the executed tool.  If the
+// pid can't be determined, |pid| will be set to -1.  |pid| must not be NULL.
+// |pipe| may be NULL, but the tool will always be executed with a pipe in
+// order to read the pid from its stdout.
+BASE_EXPORT
+OSStatus ExecuteWithPrivilegesAndGetPID(AuthorizationRef authorization,
+                                        const char* tool_path,
+                                        AuthorizationFlags options,
+                                        const char** arguments,
+                                        FILE** pipe,
+                                        pid_t* pid);
+
+// Calls ExecuteWithPrivilegesAndGetPID, and if that call succeeds, calls
+// waitpid() to wait for the process to exit.  If waitpid() succeeds, the
+// exit status is placed in |exit_status|, otherwise, -1 is stored.
+// |exit_status| may be NULL and this function will still wait for the process
+// to exit.
+BASE_EXPORT
+OSStatus ExecuteWithPrivilegesAndWait(AuthorizationRef authorization,
+                                      const char* tool_path,
+                                      AuthorizationFlags options,
+                                      const char** arguments,
+                                      FILE** pipe,
+                                      int* exit_status);
+
+}  // namespace mac
+}  // namespace base
+
+#endif  // BASE_MAC_AUTHORIZATION_UTIL_H_
diff --git a/base/mac/authorization_util.mm b/base/mac/authorization_util.mm
new file mode 100644
index 0000000..a3bc4f9
--- /dev/null
+++ b/base/mac/authorization_util.mm
@@ -0,0 +1,201 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/mac/authorization_util.h"
+
+#import <Foundation/Foundation.h>
+#include <stddef.h>
+#include <sys/wait.h>
+
+#include <string>
+
+#include "base/logging.h"
+#include "base/mac/bundle_locations.h"
+#include "base/mac/foundation_util.h"
+#include "base/mac/mac_logging.h"
+#include "base/mac/scoped_authorizationref.h"
+#include "base/macros.h"
+#include "base/posix/eintr_wrapper.h"
+#include "base/strings/string_number_conversions.h"
+#include "base/strings/string_util.h"
+
+namespace base {
+namespace mac {
+
+AuthorizationRef GetAuthorizationRightsWithPrompt(
+    AuthorizationRights* rights,
+    CFStringRef prompt,
+    AuthorizationFlags extraFlags) {
+  // Create an empty AuthorizationRef.
+  ScopedAuthorizationRef authorization;
+  OSStatus status = AuthorizationCreate(NULL, kAuthorizationEmptyEnvironment,
+                                        kAuthorizationFlagDefaults,
+                                        authorization.get_pointer());
+  if (status != errAuthorizationSuccess) {
+    OSSTATUS_LOG(ERROR, status) << "AuthorizationCreate";
+    return NULL;
+  }
+
+  AuthorizationFlags flags = kAuthorizationFlagDefaults |
+                             kAuthorizationFlagInteractionAllowed |
+                             kAuthorizationFlagExtendRights |
+                             kAuthorizationFlagPreAuthorize |
+                             extraFlags;
+
+  // product_logo_32.png is used instead of app.icns because Authorization
+  // Services can't deal with .icns files.
+  NSString* icon_path =
+      [base::mac::FrameworkBundle() pathForResource:@"product_logo_32"
+                                             ofType:@"png"];
+  const char* icon_path_c = [icon_path fileSystemRepresentation];
+  size_t icon_path_length = icon_path_c ? strlen(icon_path_c) : 0;
+
+  // The OS will dispay |prompt| along with a sentence asking the user to type
+  // the "password to allow this."
+  NSString* prompt_ns = base::mac::CFToNSCast(prompt);
+  const char* prompt_c = [prompt_ns UTF8String];
+  size_t prompt_length = prompt_c ? strlen(prompt_c) : 0;
+
+  AuthorizationItem environment_items[] = {
+    {kAuthorizationEnvironmentIcon, icon_path_length, (void*)icon_path_c, 0},
+    {kAuthorizationEnvironmentPrompt, prompt_length, (void*)prompt_c, 0}
+  };
+
+  AuthorizationEnvironment environment = {arraysize(environment_items),
+                                          environment_items};
+
+  status = AuthorizationCopyRights(authorization,
+                                   rights,
+                                   &environment,
+                                   flags,
+                                   NULL);
+
+  if (status != errAuthorizationSuccess) {
+    if (status != errAuthorizationCanceled) {
+      OSSTATUS_LOG(ERROR, status) << "AuthorizationCopyRights";
+    }
+    return NULL;
+  }
+
+  return authorization.release();
+}
+
+AuthorizationRef AuthorizationCreateToRunAsRoot(CFStringRef prompt) {
+  // Specify the "system.privilege.admin" right, which allows
+  // AuthorizationExecuteWithPrivileges to run commands as root.
+  AuthorizationItem right_items[] = {
+    {kAuthorizationRightExecute, 0, NULL, 0}
+  };
+  AuthorizationRights rights = {arraysize(right_items), right_items};
+
+  return GetAuthorizationRightsWithPrompt(&rights, prompt, 0);
+}
+
+OSStatus ExecuteWithPrivilegesAndGetPID(AuthorizationRef authorization,
+                                        const char* tool_path,
+                                        AuthorizationFlags options,
+                                        const char** arguments,
+                                        FILE** pipe,
+                                        pid_t* pid) {
+  // pipe may be NULL, but this function needs one.  In that case, use a local
+  // pipe.
+  FILE* local_pipe;
+  FILE** pipe_pointer;
+  if (pipe) {
+    pipe_pointer = pipe;
+  } else {
+    pipe_pointer = &local_pipe;
+  }
+
+// AuthorizationExecuteWithPrivileges is deprecated in macOS 10.7, but no good
+// replacement exists. https://crbug.com/593133.
+#pragma clang diagnostic push
+#pragma clang diagnostic ignored "-Wdeprecated-declarations"
+  // AuthorizationExecuteWithPrivileges wants |char* const*| for |arguments|,
+  // but it doesn't actually modify the arguments, and that type is kind of
+  // silly and callers probably aren't dealing with that.  Put the cast here
+  // to make things a little easier on callers.
+  OSStatus status = AuthorizationExecuteWithPrivileges(authorization,
+                                                       tool_path,
+                                                       options,
+                                                       (char* const*)arguments,
+                                                       pipe_pointer);
+#pragma clang diagnostic pop
+  if (status != errAuthorizationSuccess) {
+    return status;
+  }
+
+  int line_pid = -1;
+  size_t line_length = 0;
+  char* line_c = fgetln(*pipe_pointer, &line_length);
+  if (line_c) {
+    if (line_length > 0 && line_c[line_length - 1] == '\n') {
+      // line_c + line_length is the start of the next line if there is one.
+      // Back up one character.
+      --line_length;
+    }
+    std::string line(line_c, line_length);
+    if (!base::StringToInt(line, &line_pid)) {
+      // StringToInt may have set line_pid to something, but if the conversion
+      // was imperfect, use -1.
+      LOG(ERROR) << "ExecuteWithPrivilegesAndGetPid: funny line: " << line;
+      line_pid = -1;
+    }
+  } else {
+    LOG(ERROR) << "ExecuteWithPrivilegesAndGetPid: no line";
+  }
+
+  if (!pipe) {
+    fclose(*pipe_pointer);
+  }
+
+  if (pid) {
+    *pid = line_pid;
+  }
+
+  return status;
+}
+
+OSStatus ExecuteWithPrivilegesAndWait(AuthorizationRef authorization,
+                                      const char* tool_path,
+                                      AuthorizationFlags options,
+                                      const char** arguments,
+                                      FILE** pipe,
+                                      int* exit_status) {
+  pid_t pid;
+  OSStatus status = ExecuteWithPrivilegesAndGetPID(authorization,
+                                                   tool_path,
+                                                   options,
+                                                   arguments,
+                                                   pipe,
+                                                   &pid);
+  if (status != errAuthorizationSuccess) {
+    return status;
+  }
+
+  // exit_status may be NULL, but this function needs it.  In that case, use a
+  // local version.
+  int local_exit_status;
+  int* exit_status_pointer;
+  if (exit_status) {
+    exit_status_pointer = exit_status;
+  } else {
+    exit_status_pointer = &local_exit_status;
+  }
+
+  if (pid != -1) {
+    pid_t wait_result = HANDLE_EINTR(waitpid(pid, exit_status_pointer, 0));
+    if (wait_result != pid) {
+      PLOG(ERROR) << "waitpid";
+      *exit_status_pointer = -1;
+    }
+  } else {
+    *exit_status_pointer = -1;
+  }
+
+  return status;
+}
+
+}  // namespace mac
+}  // namespace base
diff --git a/base/mac/availability.h b/base/mac/availability.h
new file mode 100644
index 0000000..6d0bcc7
--- /dev/null
+++ b/base/mac/availability.h
@@ -0,0 +1,35 @@
+// Copyright (c) 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Provides the definition of API_AVAILABLE while we're on an SDK that doesn't
+// contain it yet.
+// TODO(thakis): Remove this file once we're on the 10.12 SDK.
+
+#ifndef BASE_MAC_AVAILABILITY_H_
+#define BASE_MAC_AVAILABILITY_H_
+
+#include <AvailabilityMacros.h>
+
+#if !defined(MAC_OS_X_VERSION_10_12) || \
+    MAC_OS_X_VERSION_MAX_ALLOWED < MAC_OS_X_VERSION_10_12
+#define __API_AVAILABLE_PLATFORM_macos(x) macos, introduced = x
+#define __API_AVAILABLE_PLATFORM_macosx(x) macosx, introduced = x
+#define __API_AVAILABLE_PLATFORM_ios(x) ios, introduced = x
+#define __API_AVAILABLE_PLATFORM_watchos(x) watchos, introduced = x
+#define __API_AVAILABLE_PLATFORM_tvos(x) tvos, introduced = x
+#define __API_A(x) __attribute__((availability(__API_AVAILABLE_PLATFORM_##x)))
+#define __API_AVAILABLE1(x) __API_A(x)
+#define __API_AVAILABLE2(x, y) __API_A(x) __API_A(y)
+#define __API_AVAILABLE3(x, y, z) __API_A(x) __API_A(y) __API_A(z)
+#define __API_AVAILABLE4(x, y, z, t) __API_A(x) __API_A(y) __API_A(z) __API_A(t)
+#define __API_AVAILABLE_GET_MACRO(_1, _2, _3, _4, NAME, ...) NAME
+#define API_AVAILABLE(...)                                                   \
+  __API_AVAILABLE_GET_MACRO(__VA_ARGS__, __API_AVAILABLE4, __API_AVAILABLE3, \
+                            __API_AVAILABLE2, __API_AVAILABLE1)              \
+  (__VA_ARGS__)
+#else
+#import <os/availability.h>
+#endif  // MAC_OS_X_VERSION_10_12
+
+#endif  // BASE_MAC_AVAILABILITY_H_
diff --git a/base/mac/bind_objc_block.h b/base/mac/bind_objc_block.h
new file mode 100644
index 0000000..9a481ed
--- /dev/null
+++ b/base/mac/bind_objc_block.h
@@ -0,0 +1,79 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_MAC_BIND_OBJC_BLOCK_H_
+#define BASE_MAC_BIND_OBJC_BLOCK_H_
+
+#include <Block.h>
+
+#include "base/bind.h"
+#include "base/callback_forward.h"
+#include "base/compiler_specific.h"
+#include "base/mac/scoped_block.h"
+
+// BindBlock builds a callback from an Objective-C block. Example usages:
+//
+// Closure closure = BindBlock(^{DoSomething();});
+//
+// Callback<int(void)> callback = BindBlock(^{return 42;});
+//
+// Callback<void(const std::string&, const std::string&)> callback =
+//     BindBlock(^(const std::string& arg0, const std::string& arg1) {
+//         ...
+//     });
+//
+// These variadic templates will accommodate any number of arguments, however
+// the underlying templates in bind_internal.h and callback.h are limited to
+// seven total arguments, and the bound block itself is used as one of these
+// arguments, so functionally the templates are limited to binding blocks with
+// zero through six arguments.
+//
+// For code compiled with ARC (automatic reference counting), use BindBlockArc.
+// This is because the method has a different implementation (to avoid over-
+// retaining the block) and need to have a different name not to break the ODR
+// (one definition rule). Another subtle difference is that the implementation
+// will call a different version of ScopedBlock constructor thus the linker must
+// not merge both functions.
+
+namespace base {
+
+namespace internal {
+
+// Helper function to run the block contained in the parameter.
+template<typename R, typename... Args>
+R RunBlock(base::mac::ScopedBlock<R(^)(Args...)> block, Args... args) {
+  R(^extracted_block)(Args...) = block.get();
+  return extracted_block(args...);
+}
+
+}  // namespace internal
+
+#if !defined(__has_feature) || !__has_feature(objc_arc)
+
+// Construct a callback from an objective-C block with up to six arguments (see
+// note above).
+template<typename R, typename... Args>
+base::Callback<R(Args...)> BindBlock(R(^block)(Args...)) {
+  return base::Bind(
+      &base::internal::RunBlock<R, Args...>,
+      base::mac::ScopedBlock<R (^)(Args...)>(
+          base::mac::internal::ScopedBlockTraits<R (^)(Args...)>::Retain(
+              block)));
+}
+
+#else
+
+// Construct a callback from an objective-C block with up to six arguments (see
+// note above).
+template <typename R, typename... Args>
+base::Callback<R(Args...)> BindBlockArc(R (^block)(Args...)) {
+  return base::Bind(&base::internal::RunBlock<R, Args...>,
+                    base::mac::ScopedBlock<R (^)(Args...)>(block));
+}
+
+#endif
+
+}  // namespace base
+
+#endif  // BASE_MAC_BIND_OBJC_BLOCK_H_
diff --git a/base/mac/bind_objc_block_unittest.mm b/base/mac/bind_objc_block_unittest.mm
new file mode 100644
index 0000000..2b18672
--- /dev/null
+++ b/base/mac/bind_objc_block_unittest.mm
@@ -0,0 +1,124 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#import "base/mac/bind_objc_block.h"
+
+#include <string>
+
+#include "base/bind.h"
+#include "base/callback.h"
+#include "base/callback_helpers.h"
+#include "build/build_config.h"
+#include "testing/gtest/include/gtest/gtest.h"
+#include "testing/gtest_mac.h"
+
+#if defined(OS_IOS)
+#include "base/ios/weak_nsobject.h"
+#include "base/mac/scoped_nsautorelease_pool.h"
+#endif
+
+namespace {
+
+TEST(BindObjcBlockTest, TestScopedClosureRunnerExitScope) {
+  int run_count = 0;
+  int* ptr = &run_count;
+  {
+    base::ScopedClosureRunner runner(base::BindBlock(^{
+        (*ptr)++;
+    }));
+    EXPECT_EQ(0, run_count);
+  }
+  EXPECT_EQ(1, run_count);
+}
+
+TEST(BindObjcBlockTest, TestScopedClosureRunnerRelease) {
+  int run_count = 0;
+  int* ptr = &run_count;
+  base::OnceClosure c;
+  {
+    base::ScopedClosureRunner runner(base::BindBlock(^{
+        (*ptr)++;
+    }));
+    c = runner.Release();
+    EXPECT_EQ(0, run_count);
+  }
+  EXPECT_EQ(0, run_count);
+  std::move(c).Run();
+  EXPECT_EQ(1, run_count);
+}
+
+TEST(BindObjcBlockTest, TestReturnValue) {
+  const int kReturnValue = 42;
+  base::Callback<int(void)> c = base::BindBlock(^{return kReturnValue;});
+  EXPECT_EQ(kReturnValue, c.Run());
+}
+
+TEST(BindObjcBlockTest, TestArgument) {
+  const int kArgument = 42;
+  base::Callback<int(int)> c = base::BindBlock(^(int a){return a + 1;});
+  EXPECT_EQ(kArgument + 1, c.Run(kArgument));
+}
+
+TEST(BindObjcBlockTest, TestTwoArguments) {
+  std::string result;
+  std::string* ptr = &result;
+  base::Callback<void(const std::string&, const std::string&)> c =
+      base::BindBlock(^(const std::string& a, const std::string& b) {
+          *ptr = a + b;
+      });
+  c.Run("forty", "two");
+  EXPECT_EQ(result, "fortytwo");
+}
+
+TEST(BindObjcBlockTest, TestThreeArguments) {
+  std::string result;
+  std::string* ptr = &result;
+  base::Callback<void(const std::string&,
+                      const std::string&,
+                      const std::string&)> c =
+      base::BindBlock(^(const std::string& a,
+                        const std::string& b,
+                        const std::string& c) {
+          *ptr = a + b + c;
+      });
+  c.Run("six", "times", "nine");
+  EXPECT_EQ(result, "sixtimesnine");
+}
+
+TEST(BindObjcBlockTest, TestSixArguments) {
+  std::string result1;
+  std::string* ptr = &result1;
+  int result2;
+  int* ptr2 = &result2;
+  base::Callback<void(int, int, const std::string&, const std::string&,
+                      int, const std::string&)> c =
+      base::BindBlock(^(int a, int b, const std::string& c,
+                        const std::string& d, int e, const std::string& f) {
+          *ptr = c + d + f;
+          *ptr2 = a + b + e;
+      });
+  c.Run(1, 2, "infinite", "improbability", 3, "drive");
+  EXPECT_EQ(result1, "infiniteimprobabilitydrive");
+  EXPECT_EQ(result2, 6);
+}
+
+#if defined(OS_IOS)
+
+TEST(BindObjcBlockTest, TestBlockReleased) {
+  base::WeakNSObject<NSObject> weak_nsobject;
+  {
+    base::mac::ScopedNSAutoreleasePool autorelease_pool;
+    NSObject* nsobject = [[[NSObject alloc] init] autorelease];
+    weak_nsobject.reset(nsobject);
+
+    auto callback = base::BindBlock(^{
+      [nsobject description];
+    });
+  }
+  EXPECT_NSEQ(nil, weak_nsobject);
+}
+
+#endif
+
+}  // namespace
diff --git a/base/mac/bind_objc_block_unittest_arc.mm b/base/mac/bind_objc_block_unittest_arc.mm
new file mode 100644
index 0000000..24ec974
--- /dev/null
+++ b/base/mac/bind_objc_block_unittest_arc.mm
@@ -0,0 +1,125 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#import "base/mac/bind_objc_block.h"
+
+#include <string>
+
+#include "base/bind.h"
+#include "base/callback.h"
+#include "base/callback_helpers.h"
+#include "build/build_config.h"
+#include "testing/gtest/include/gtest/gtest.h"
+#include "testing/gtest_mac.h"
+
+#if !defined(__has_feature) || !__has_feature(objc_arc)
+#error "This file requires ARC support."
+#endif
+
+namespace {
+
+TEST(BindObjcBlockTestARC, TestScopedClosureRunnerExitScope) {
+  int run_count = 0;
+  int* ptr = &run_count;
+  {
+    base::ScopedClosureRunner runner(base::BindBlockArc(^{
+      (*ptr)++;
+    }));
+    EXPECT_EQ(0, run_count);
+  }
+  EXPECT_EQ(1, run_count);
+}
+
+TEST(BindObjcBlockTestARC, TestScopedClosureRunnerRelease) {
+  int run_count = 0;
+  int* ptr = &run_count;
+  base::OnceClosure c;
+  {
+    base::ScopedClosureRunner runner(base::BindBlockArc(^{
+      (*ptr)++;
+    }));
+    c = runner.Release();
+    EXPECT_EQ(0, run_count);
+  }
+  EXPECT_EQ(0, run_count);
+  std::move(c).Run();
+  EXPECT_EQ(1, run_count);
+}
+
+TEST(BindObjcBlockTestARC, TestReturnValue) {
+  const int kReturnValue = 42;
+  base::Callback<int(void)> c = base::BindBlockArc(^{
+    return kReturnValue;
+  });
+  EXPECT_EQ(kReturnValue, c.Run());
+}
+
+TEST(BindObjcBlockTestARC, TestArgument) {
+  const int kArgument = 42;
+  base::Callback<int(int)> c = base::BindBlockArc(^(int a) {
+    return a + 1;
+  });
+  EXPECT_EQ(kArgument + 1, c.Run(kArgument));
+}
+
+TEST(BindObjcBlockTestARC, TestTwoArguments) {
+  std::string result;
+  std::string* ptr = &result;
+  base::Callback<void(const std::string&, const std::string&)> c =
+      base::BindBlockArc(^(const std::string& a, const std::string& b) {
+        *ptr = a + b;
+      });
+  c.Run("forty", "two");
+  EXPECT_EQ(result, "fortytwo");
+}
+
+TEST(BindObjcBlockTestARC, TestThreeArguments) {
+  std::string result;
+  std::string* ptr = &result;
+  base::Callback<void(const std::string&, const std::string&,
+                      const std::string&)>
+      c = base::BindBlockArc(
+          ^(const std::string& a, const std::string& b, const std::string& c) {
+            *ptr = a + b + c;
+          });
+  c.Run("six", "times", "nine");
+  EXPECT_EQ(result, "sixtimesnine");
+}
+
+TEST(BindObjcBlockTestARC, TestSixArguments) {
+  std::string result1;
+  std::string* ptr = &result1;
+  int result2;
+  int* ptr2 = &result2;
+  base::Callback<void(int, int, const std::string&, const std::string&, int,
+                      const std::string&)>
+      c = base::BindBlockArc(^(int a, int b, const std::string& c,
+                               const std::string& d, int e,
+                               const std::string& f) {
+        *ptr = c + d + f;
+        *ptr2 = a + b + e;
+      });
+  c.Run(1, 2, "infinite", "improbability", 3, "drive");
+  EXPECT_EQ(result1, "infiniteimprobabilitydrive");
+  EXPECT_EQ(result2, 6);
+}
+
+#if defined(OS_IOS)
+
+TEST(BindObjcBlockTestARC, TestBlockReleased) {
+  __weak NSObject* weak_nsobject;
+  @autoreleasepool {
+    NSObject* nsobject = [[NSObject alloc] init];
+    weak_nsobject = nsobject;
+
+    auto callback = base::BindBlockArc(^{
+      [nsobject description];
+    });
+  }
+  EXPECT_NSEQ(nil, weak_nsobject);
+}
+
+#endif
+
+}  // namespace
diff --git a/base/mac/bundle_locations.h b/base/mac/bundle_locations.h
new file mode 100644
index 0000000..5cc44ba
--- /dev/null
+++ b/base/mac/bundle_locations.h
@@ -0,0 +1,66 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_MAC_BUNDLE_LOCATIONS_H_
+#define BASE_MAC_BUNDLE_LOCATIONS_H_
+
+#include "base/base_export.h"
+#include "base/files/file_path.h"
+
+#if defined(__OBJC__)
+#import <Foundation/Foundation.h>
+#else  // __OBJC__
+class NSBundle;
+#endif  // __OBJC__
+
+namespace base {
+
+class FilePath;
+
+namespace mac {
+
+// This file provides several functions to explicitly request the various
+// component bundles of Chrome.  Please use these methods rather than calling
+// +[NSBundle mainBundle] or CFBundleGetMainBundle().
+//
+// Terminology
+//  - "Outer Bundle" - This is the main bundle for Chrome; it's what
+//  +[NSBundle mainBundle] returns when Chrome is launched normally.
+//
+//  - "Main Bundle" - This is the bundle from which Chrome was launched.
+//  This will be the same as the outer bundle except when Chrome is launched
+//  via an app shortcut, in which case this will return the app shortcut's
+//  bundle rather than the main Chrome bundle.
+//
+//  - "Framework Bundle" - This is the bundle corresponding to the Chrome
+//  framework.
+//
+// Guidelines for use:
+//  - To access a resource, the Framework bundle should be used.
+//  - If the choice is between the Outer or Main bundles then please choose
+//  carefully.  Most often the Outer bundle will be the right choice, but for
+//  cases such as adding an app to the "launch on startup" list, the Main
+//  bundle is probably the one to use.
+
+// Methods for retrieving the various bundles.
+BASE_EXPORT NSBundle* MainBundle();
+BASE_EXPORT FilePath MainBundlePath();
+BASE_EXPORT NSBundle* OuterBundle();
+BASE_EXPORT FilePath OuterBundlePath();
+BASE_EXPORT NSBundle* FrameworkBundle();
+BASE_EXPORT FilePath FrameworkBundlePath();
+
+// Set the bundle that the preceding functions will return, overriding the
+// default values. Restore the default by passing in |nil|.
+BASE_EXPORT void SetOverrideOuterBundle(NSBundle* bundle);
+BASE_EXPORT void SetOverrideFrameworkBundle(NSBundle* bundle);
+
+// Same as above but accepting a FilePath argument.
+BASE_EXPORT void SetOverrideOuterBundlePath(const FilePath& file_path);
+BASE_EXPORT void SetOverrideFrameworkBundlePath(const FilePath& file_path);
+
+}  // namespace mac
+}  // namespace base
+
+#endif  // BASE_MAC_BUNDLE_LOCATIONS_H_
diff --git a/base/mac/bundle_locations.mm b/base/mac/bundle_locations.mm
new file mode 100644
index 0000000..54021b8
--- /dev/null
+++ b/base/mac/bundle_locations.mm
@@ -0,0 +1,83 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/mac/bundle_locations.h"
+
+#include "base/logging.h"
+#include "base/mac/foundation_util.h"
+#include "base/strings/sys_string_conversions.h"
+
+namespace base {
+namespace mac {
+
+// NSBundle isn't threadsafe, all functions in this file must be called on the
+// main thread.
+static NSBundle* g_override_framework_bundle = nil;
+static NSBundle* g_override_outer_bundle = nil;
+
+NSBundle* MainBundle() {
+  return [NSBundle mainBundle];
+}
+
+FilePath MainBundlePath() {
+  NSBundle* bundle = MainBundle();
+  return NSStringToFilePath([bundle bundlePath]);
+}
+
+NSBundle* OuterBundle() {
+  if (g_override_outer_bundle)
+    return g_override_outer_bundle;
+  return [NSBundle mainBundle];
+}
+
+FilePath OuterBundlePath() {
+  NSBundle* bundle = OuterBundle();
+  return NSStringToFilePath([bundle bundlePath]);
+}
+
+NSBundle* FrameworkBundle() {
+  if (g_override_framework_bundle)
+    return g_override_framework_bundle;
+  return [NSBundle mainBundle];
+}
+
+FilePath FrameworkBundlePath() {
+  NSBundle* bundle = FrameworkBundle();
+  return NSStringToFilePath([bundle bundlePath]);
+}
+
+static void AssignOverrideBundle(NSBundle* new_bundle,
+                                 NSBundle** override_bundle) {
+  if (new_bundle != *override_bundle) {
+    [*override_bundle release];
+    *override_bundle = [new_bundle retain];
+  }
+}
+
+static void AssignOverridePath(const FilePath& file_path,
+                               NSBundle** override_bundle) {
+  NSString* path = base::SysUTF8ToNSString(file_path.value());
+  NSBundle* new_bundle = [NSBundle bundleWithPath:path];
+  DCHECK(new_bundle) << "Failed to load the bundle at " << file_path.value();
+  AssignOverrideBundle(new_bundle, override_bundle);
+}
+
+void SetOverrideOuterBundle(NSBundle* bundle) {
+  AssignOverrideBundle(bundle, &g_override_outer_bundle);
+}
+
+void SetOverrideFrameworkBundle(NSBundle* bundle) {
+  AssignOverrideBundle(bundle, &g_override_framework_bundle);
+}
+
+void SetOverrideOuterBundlePath(const FilePath& file_path) {
+  AssignOverridePath(file_path, &g_override_outer_bundle);
+}
+
+void SetOverrideFrameworkBundlePath(const FilePath& file_path) {
+  AssignOverridePath(file_path, &g_override_framework_bundle);
+}
+
+}  // namespace mac
+}  // namespace base
diff --git a/base/mac/call_with_eh_frame.cc b/base/mac/call_with_eh_frame.cc
new file mode 100644
index 0000000..4578541
--- /dev/null
+++ b/base/mac/call_with_eh_frame.cc
@@ -0,0 +1,54 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/mac/call_with_eh_frame.h"
+
+#include <stdint.h>
+#include <unwind.h>
+
+#include "build/build_config.h"
+
+namespace base {
+namespace mac {
+
+#if defined(OS_IOS)
+// No iOS assembly implementation exists, so just call the block directly.
+void CallWithEHFrame(void (^block)(void)) {
+  block();
+}
+#else  // OS_MACOSX
+extern "C" _Unwind_Reason_Code __gxx_personality_v0(int,
+                                                    _Unwind_Action,
+                                                    uint64_t,
+                                                    struct _Unwind_Exception*,
+                                                    struct _Unwind_Context*);
+
+_Unwind_Reason_Code CxxPersonalityRoutine(
+    int version,
+    _Unwind_Action actions,
+    uint64_t exception_class,
+    struct _Unwind_Exception* exception_object,
+    struct _Unwind_Context* context) {
+  // Unwinding is a two-phase process: phase one searches for an exception
+  // handler, and phase two performs cleanup. For phase one, this custom
+  // personality will terminate the search. For phase two, this should delegate
+  // back to the standard personality routine.
+
+  if ((actions & _UA_SEARCH_PHASE) != 0) {
+    // Tell libunwind that this is the end of the stack. When it encounters the
+    // CallWithEHFrame, it will stop searching for an exception handler. The
+    // result is that no exception handler has been found higher on the stack,
+    // and any that are lower on the stack (e.g. in CFRunLoopRunSpecific), will
+    // now be skipped. Since this is reporting the end of the stack, and no
+    // exception handler will have been found, std::terminate() will be called.
+    return _URC_END_OF_STACK;
+  }
+
+  return __gxx_personality_v0(version, actions, exception_class,
+                              exception_object, context);
+}
+#endif  // defined(OS_IOS)
+
+}  // namespace mac
+}  // namespace base
diff --git a/base/mac/call_with_eh_frame.h b/base/mac/call_with_eh_frame.h
new file mode 100644
index 0000000..1f7d5e0
--- /dev/null
+++ b/base/mac/call_with_eh_frame.h
@@ -0,0 +1,26 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_MAC_CALL_WITH_EH_FRAME_H_
+#define BASE_MAC_CALL_WITH_EH_FRAME_H_
+
+#include "base/base_export.h"
+
+namespace base {
+namespace mac {
+
+// Invokes the specified block in a stack frame with a special exception
+// handler. This function creates an exception handling stack frame that
+// specifies a custom C++ exception personality routine, which terminates the
+// search for an exception handler at this frame.
+//
+// The purpose of this function is to prevent a try/catch statement in system
+// libraries, acting as a global exception handler, from handling exceptions
+// in such a way that disrupts the generation of useful stack traces.
+void BASE_EXPORT CallWithEHFrame(void (^block)(void));
+
+}  // namespace mac
+}  // namespace base
+
+#endif  // BASE_MAC_CALL_WITH_EH_FRAME_H_
diff --git a/base/mac/call_with_eh_frame_asm.S b/base/mac/call_with_eh_frame_asm.S
new file mode 100644
index 0000000..0e399cf
--- /dev/null
+++ b/base/mac/call_with_eh_frame_asm.S
@@ -0,0 +1,89 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// base::mac::CallWithEHFrame(void () block_pointer)
+#define CALL_WITH_EH_FRAME __ZN4base3mac15CallWithEHFrameEU13block_pointerFvvE
+
+  .section __TEXT,__text,regular,pure_instructions
+#if !defined(COMPONENT_BUILD)
+  .private_extern CALL_WITH_EH_FRAME
+#endif
+  .globl CALL_WITH_EH_FRAME
+  .align 4
+CALL_WITH_EH_FRAME:
+
+  .cfi_startproc
+
+  // Configure the C++ exception handler personality routine. Normally the
+  // compiler would emit ___gxx_personality_v0 here. The purpose of this
+  // function is to use a custom personality routine.
+  .cfi_personality 155, __ZN4base3mac21CxxPersonalityRoutineEi14_Unwind_ActionyP17_Unwind_ExceptionP15_Unwind_Context
+  .cfi_lsda 16, CallWithEHFrame_exception_table
+
+Lfunction_start:
+  pushq %rbp
+  .cfi_def_cfa_offset 16
+  .cfi_offset %rbp, -16
+  movq %rsp, %rbp
+  .cfi_def_cfa_register %rbp
+
+  // Load the function pointer from the block descriptor.
+  movq 16(%rdi), %rax
+
+  // Execute the block in the context of a C++ try{}.
+Ltry_start:
+  callq *%rax
+Ltry_end:
+  popq %rbp
+  ret
+
+  // Landing pad for the exception handler. This should never be called, since
+  // the personality routine will stop the search for an exception handler,
+  // which will cause the runtime to invoke the default terminate handler.
+Lcatch:
+  movq %rax, %rdi
+  callq ___cxa_begin_catch  // The ABI requires a call to the catch handler.
+  ud2  // In the event this is called, make it fatal.
+
+Lfunction_end:
+  .cfi_endproc
+
+// The C++ exception table that is used to identify this frame as an
+// exception handler. See http://llvm.org/docs/ExceptionHandling.html and
+// http://mentorembedded.github.io/cxx-abi/exceptions.pdf.
+  .section __TEXT,__gcc_except_tab
+  .align 2
+CallWithEHFrame_exception_table:
+  .byte 255  // DW_EH_PE_omit
+  .byte 155  // DW_EH_PE_indirect | DW_EH_PE_pcrel | DW_EH_PE_sdata4
+  .asciz "\242\200\200"  // LE int128 for the number of bytes in this table.
+  .byte 3  // DW_EH_PE_udata4
+  .byte 26  // Callsite table length.
+
+// First callsite.
+CS1_begin = Ltry_start - Lfunction_start
+  .long CS1_begin
+CS1_end = Ltry_end - Ltry_start
+  .long CS1_end
+
+// First landing pad.
+LP1 = Lcatch - Lfunction_start
+  .long LP1
+  .byte 1  // Action record.
+
+// Second callsite.
+CS2_begin = Ltry_end - Lfunction_start
+  .long CS2_begin
+CS2_end = Lfunction_end - Ltry_end
+  .long CS2_end
+
+// Second landing pad (none).
+  .long 0
+  .byte 0  // No action.
+
+// Action table.
+  .byte 1  // Action record 1.
+  .byte 0  // No further action to take.
+  .long 0  // No type filter for this catch(){} clause.
+  .align 2
diff --git a/base/mac/call_with_eh_frame_unittest.mm b/base/mac/call_with_eh_frame_unittest.mm
new file mode 100644
index 0000000..4dad822
--- /dev/null
+++ b/base/mac/call_with_eh_frame_unittest.mm
@@ -0,0 +1,55 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/mac/call_with_eh_frame.h"
+
+#import <Foundation/Foundation.h>
+
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+namespace mac {
+namespace {
+
+class CallWithEHFrameTest : public testing::Test {
+ protected:
+  void ThrowException() {
+    @throw [NSException exceptionWithName:@"TestException"
+                                   reason:@"Testing exceptions"
+                                 userInfo:nil];
+  }
+};
+
+// Catching from within the EHFrame is allowed.
+TEST_F(CallWithEHFrameTest, CatchExceptionHigher) {
+  bool __block saw_exception = false;
+  base::mac::CallWithEHFrame(^{
+    @try {
+      ThrowException();
+    } @catch (NSException* exception) {
+      saw_exception = true;
+    }
+  });
+  EXPECT_TRUE(saw_exception);
+}
+
+// Trying to catch an exception outside the EHFrame is blocked.
+TEST_F(CallWithEHFrameTest, CatchExceptionLower) {
+  auto catch_exception_lower = ^{
+    bool saw_exception = false;
+    @try {
+      base::mac::CallWithEHFrame(^{
+        ThrowException();
+      });
+    } @catch (NSException* exception) {
+      saw_exception = true;
+    }
+    ASSERT_FALSE(saw_exception);
+  };
+  EXPECT_DEATH(catch_exception_lower(), "");
+}
+
+}  // namespace
+}  // namespace mac
+}  // namespace base
diff --git a/base/mac/close_nocancel.cc b/base/mac/close_nocancel.cc
new file mode 100644
index 0000000..8971e73
--- /dev/null
+++ b/base/mac/close_nocancel.cc
@@ -0,0 +1,70 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// http://crbug.com/269623
+// http://openradar.appspot.com/14999594
+//
+// When the default version of close used on Mac OS X fails with EINTR, the
+// file descriptor is not in a deterministic state. It may have been closed,
+// or it may not have been. This makes it impossible to gracefully recover
+// from the error. If the close is retried after the FD has been closed, the
+// subsequent close can report EBADF, or worse, it can close an unrelated FD
+// opened by another thread. If the close is not retried after the FD has been
+// left open, the FD is leaked. Neither of these are good options.
+//
+// Mac OS X provides an alternate version of close, close$NOCANCEL. This
+// version will never fail with EINTR before the FD is actually closed. With
+// this version, it is thus safe to call close without checking for EINTR (as
+// the HANDLE_EINTR macro does) and not risk leaking the FD. In fact, mixing
+// this verison of close with HANDLE_EINTR is hazardous.
+//
+// The $NOCANCEL variants of various system calls are activated by compiling
+// with __DARWIN_NON_CANCELABLE, which prevents them from being pthread
+// cancellation points. Rather than taking such a heavy-handed approach, this
+// file implements an alternative: to use the $NOCANCEL variant of close (thus
+// preventing it from being a pthread cancellation point) without affecting
+// any other system calls.
+//
+// This file operates by providing a close function with the non-$NOCANCEL
+// symbol name expected for the compilation environment as set by <unistd.h>
+// and <sys/cdefs.h> (the DARWIN_ALIAS_C macro). That name is set by an asm
+// label on the declaration of the close function, so the definition of that
+// function receives that name. The function calls the $NOCANCEL variant, which
+// is resolved from libsyscall. By linking with this version of close prior to
+// the libsyscall version, close's implementation is overridden.
+
+#include <sys/cdefs.h>
+#include <unistd.h>
+
+// If the non-cancelable variants of all system calls have already been
+// chosen, do nothing.
+#if !__DARWIN_NON_CANCELABLE
+
+extern "C" {
+
+#if !__DARWIN_ONLY_UNIX_CONFORMANCE
+
+// When there's a choice between UNIX2003 and pre-UNIX2003. There's no
+// close$NOCANCEL symbol in this case, so use close$NOCANCEL$UNIX2003 as the
+// implementation. It does the same thing that close$NOCANCEL would do.
+#define close_implementation close$NOCANCEL$UNIX2003
+
+#else  // __DARWIN_ONLY_UNIX_CONFORMANCE
+
+// When only UNIX2003 is supported:
+#define close_implementation close$NOCANCEL
+
+#endif
+
+int close_implementation(int fd);
+
+int close(int fd) {
+  return close_implementation(fd);
+}
+
+#undef close_implementation
+
+}  // extern "C"
+
+#endif  // !__DARWIN_NON_CANCELABLE
diff --git a/base/mac/dispatch_source_mach.cc b/base/mac/dispatch_source_mach.cc
new file mode 100644
index 0000000..0858f30
--- /dev/null
+++ b/base/mac/dispatch_source_mach.cc
@@ -0,0 +1,48 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/mac/dispatch_source_mach.h"
+
+namespace base {
+
+DispatchSourceMach::DispatchSourceMach(const char* name,
+                                       mach_port_t port,
+                                       void (^event_handler)())
+    // TODO(rsesek): Specify DISPATCH_QUEUE_SERIAL, in the 10.7 SDK. NULL
+    // means the same thing but is not symbolically clear.
+    : DispatchSourceMach(dispatch_queue_create(name, NULL),
+                         port,
+                         event_handler) {
+  // Since the queue was created above in the delegated constructor, and it was
+  // subsequently retained, release it here.
+  dispatch_release(queue_);
+}
+
+DispatchSourceMach::DispatchSourceMach(dispatch_queue_t queue,
+                                       mach_port_t port,
+                                       void (^event_handler)())
+    : queue_(queue, base::scoped_policy::RETAIN),
+      source_(dispatch_source_create(DISPATCH_SOURCE_TYPE_MACH_RECV,
+          port, 0, queue_)),
+      source_canceled_(dispatch_semaphore_create(0)) {
+  dispatch_source_set_event_handler(source_, event_handler);
+  dispatch_source_set_cancel_handler(source_, ^{
+      dispatch_semaphore_signal(source_canceled_);
+  });
+}
+
+DispatchSourceMach::~DispatchSourceMach() {
+  // Cancel the source and wait for the semaphore to be signaled. This will
+  // ensure the source managed by this class is not used after it is freed.
+  dispatch_source_cancel(source_);
+  source_.reset();
+
+  dispatch_semaphore_wait(source_canceled_, DISPATCH_TIME_FOREVER);
+}
+
+void DispatchSourceMach::Resume() {
+  dispatch_resume(source_);
+}
+
+}  // namespace base
diff --git a/base/mac/dispatch_source_mach.h b/base/mac/dispatch_source_mach.h
new file mode 100644
index 0000000..336125f
--- /dev/null
+++ b/base/mac/dispatch_source_mach.h
@@ -0,0 +1,58 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_MAC_DISPATCH_SOURCE_MACH_H_
+#define BASE_MAC_DISPATCH_SOURCE_MACH_H_
+
+#include <dispatch/dispatch.h>
+
+#include "base/base_export.h"
+#include "base/mac/scoped_dispatch_object.h"
+#include "base/macros.h"
+
+namespace base {
+
+// This class encapsulates a MACH_RECV dispatch source. When this object is
+// destroyed, the source will be cancelled and it will wait for the source
+// to stop executing work. The source can run on either a user-supplied queue,
+// or it can create its own for the source.
+class BASE_EXPORT DispatchSourceMach {
+ public:
+  // Creates a new dispatch source for the |port| and schedules it on a new
+  // queue that will be created with |name|. When a Mach message is received,
+  // the |event_handler| will be called.
+  DispatchSourceMach(const char* name,
+                     mach_port_t port,
+                     void (^event_handler)());
+
+  // Creates a new dispatch source with the same semantics as above, but rather
+  // than creating a new queue, it schedules the source on |queue|.
+  DispatchSourceMach(dispatch_queue_t queue,
+                     mach_port_t port,
+                     void (^event_handler)());
+
+  // Cancels the source and waits for it to become fully cancelled before
+  // releasing the source.
+  ~DispatchSourceMach();
+
+  // Resumes the source. This must be called before any Mach messages will
+  // be received.
+  void Resume();
+
+ private:
+  // The dispatch queue used to service the source_.
+  ScopedDispatchObject<dispatch_queue_t> queue_;
+
+  // A MACH_RECV dispatch source.
+  ScopedDispatchObject<dispatch_source_t> source_;
+
+  // Semaphore used to wait on the |source_|'s cancellation in the destructor.
+  ScopedDispatchObject<dispatch_semaphore_t> source_canceled_;
+
+  DISALLOW_COPY_AND_ASSIGN(DispatchSourceMach);
+};
+
+}  // namespace base
+
+#endif  // BASE_MAC_DISPATCH_SOURCE_MACH_H_
diff --git a/base/mac/dispatch_source_mach_unittest.cc b/base/mac/dispatch_source_mach_unittest.cc
new file mode 100644
index 0000000..738a137
--- /dev/null
+++ b/base/mac/dispatch_source_mach_unittest.cc
@@ -0,0 +1,125 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/mac/dispatch_source_mach.h"
+
+#include <mach/mach.h>
+
+#include <memory>
+
+#include "base/logging.h"
+#include "base/mac/scoped_mach_port.h"
+#include "base/test/test_timeouts.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+
+class DispatchSourceMachTest : public testing::Test {
+ public:
+  void SetUp() override {
+    mach_port_t port = MACH_PORT_NULL;
+    ASSERT_EQ(KERN_SUCCESS, mach_port_allocate(mach_task_self(),
+        MACH_PORT_RIGHT_RECEIVE, &port));
+    receive_right_.reset(port);
+
+    ASSERT_EQ(KERN_SUCCESS, mach_port_insert_right(mach_task_self(), port,
+        port, MACH_MSG_TYPE_MAKE_SEND));
+    send_right_.reset(port);
+  }
+
+  mach_port_t GetPort() { return receive_right_.get(); }
+
+  void WaitForSemaphore(dispatch_semaphore_t semaphore) {
+    dispatch_semaphore_wait(semaphore, dispatch_time(
+        DISPATCH_TIME_NOW,
+        TestTimeouts::action_timeout().InSeconds() * NSEC_PER_SEC));
+  }
+
+ private:
+  base::mac::ScopedMachReceiveRight receive_right_;
+  base::mac::ScopedMachSendRight send_right_;
+};
+
+TEST_F(DispatchSourceMachTest, ReceiveAfterResume) {
+  dispatch_semaphore_t signal = dispatch_semaphore_create(0);
+  mach_port_t port = GetPort();
+
+  bool __block did_receive = false;
+  DispatchSourceMach source("org.chromium.base.test.ReceiveAfterResume",
+      port, ^{
+          mach_msg_empty_rcv_t msg = {{0}};
+          msg.header.msgh_size = sizeof(msg);
+          msg.header.msgh_local_port = port;
+          mach_msg_receive(&msg.header);
+          did_receive = true;
+
+          dispatch_semaphore_signal(signal);
+      });
+
+  mach_msg_empty_send_t msg = {{0}};
+  msg.header.msgh_size = sizeof(msg);
+  msg.header.msgh_remote_port = port;
+  msg.header.msgh_bits = MACH_MSGH_BITS_REMOTE(MACH_MSG_TYPE_COPY_SEND);
+  ASSERT_EQ(KERN_SUCCESS, mach_msg_send(&msg.header));
+
+  EXPECT_FALSE(did_receive);
+
+  source.Resume();
+
+  WaitForSemaphore(signal);
+  dispatch_release(signal);
+
+  EXPECT_TRUE(did_receive);
+}
+
+TEST_F(DispatchSourceMachTest, NoMessagesAfterDestruction) {
+  mach_port_t port = GetPort();
+
+  std::unique_ptr<int> count(new int(0));
+  int* __block count_ptr = count.get();
+
+  std::unique_ptr<DispatchSourceMach> source(new DispatchSourceMach(
+      "org.chromium.base.test.NoMessagesAfterDestruction", port, ^{
+        mach_msg_empty_rcv_t msg = {{0}};
+        msg.header.msgh_size = sizeof(msg);
+        msg.header.msgh_local_port = port;
+        mach_msg_receive(&msg.header);
+        LOG(INFO) << "Receieve " << *count_ptr;
+        ++(*count_ptr);
+      }));
+  source->Resume();
+
+  dispatch_queue_t queue =
+      dispatch_queue_create("org.chromium.base.test.MessageSend", NULL);
+  dispatch_semaphore_t signal = dispatch_semaphore_create(0);
+  for (int i = 0; i < 30; ++i) {
+    dispatch_async(queue, ^{
+        mach_msg_empty_send_t msg = {{0}};
+        msg.header.msgh_size = sizeof(msg);
+        msg.header.msgh_remote_port = port;
+        msg.header.msgh_bits =
+            MACH_MSGH_BITS_REMOTE(MACH_MSG_TYPE_COPY_SEND);
+        mach_msg_send(&msg.header);
+    });
+
+    // After sending five messages, shut down the source and taint the
+    // pointer the handler dereferences. The test will crash if |count_ptr|
+    // is being used after "free".
+    if (i == 5) {
+      std::unique_ptr<DispatchSourceMach>* source_ptr = &source;
+      dispatch_async(queue, ^{
+          source_ptr->reset();
+          count_ptr = reinterpret_cast<int*>(0xdeaddead);
+          dispatch_semaphore_signal(signal);
+      });
+    }
+  }
+
+  WaitForSemaphore(signal);
+  dispatch_release(signal);
+
+  dispatch_release(queue);
+}
+
+}  // namespace base
diff --git a/base/mac/foundation_util.h b/base/mac/foundation_util.h
new file mode 100644
index 0000000..abdfdf3
--- /dev/null
+++ b/base/mac/foundation_util.h
@@ -0,0 +1,411 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_MAC_FOUNDATION_UTIL_H_
+#define BASE_MAC_FOUNDATION_UTIL_H_
+
+#include <CoreFoundation/CoreFoundation.h>
+
+#include <string>
+#include <vector>
+
+#include "base/base_export.h"
+#include "base/logging.h"
+#include "base/mac/scoped_cftyperef.h"
+#include "build/build_config.h"
+
+#if defined(__OBJC__)
+#import <Foundation/Foundation.h>
+@class NSFont;
+@class UIFont;
+#else  // __OBJC__
+#include <CoreFoundation/CoreFoundation.h>
+class NSBundle;
+class NSFont;
+class NSString;
+class UIFont;
+#endif  // __OBJC__
+
+#if defined(OS_IOS)
+#include <CoreText/CoreText.h>
+#else
+#include <ApplicationServices/ApplicationServices.h>
+#endif
+
+// Adapted from NSObjCRuntime.h NS_ENUM definition (used in Foundation starting
+// with the OS X 10.8 SDK and the iOS 6.0 SDK).
+#if __has_extension(cxx_strong_enums) && \
+    (defined(OS_IOS) || (defined(MAC_OS_X_VERSION_10_8) && \
+                         MAC_OS_X_VERSION_MAX_ALLOWED >= MAC_OS_X_VERSION_10_8))
+#define CR_FORWARD_ENUM(_type, _name) enum _name : _type _name
+#else
+#define CR_FORWARD_ENUM(_type, _name) _type _name
+#endif
+
+// Adapted from NSPathUtilities.h and NSObjCRuntime.h.
+#if __LP64__ || NS_BUILD_32_LIKE_64
+typedef CR_FORWARD_ENUM(unsigned long, NSSearchPathDirectory);
+typedef unsigned long NSSearchPathDomainMask;
+#else
+typedef CR_FORWARD_ENUM(unsigned int, NSSearchPathDirectory);
+typedef unsigned int NSSearchPathDomainMask;
+#endif
+
+typedef struct OpaqueSecTrustRef* SecACLRef;
+typedef struct OpaqueSecTrustedApplicationRef* SecTrustedApplicationRef;
+
+#if defined(OS_IOS)
+typedef struct CF_BRIDGED_TYPE(id) __SecKey* SecKeyRef;
+typedef struct CF_BRIDGED_TYPE(id) __SecPolicy* SecPolicyRef;
+#else
+typedef struct OpaqueSecKeyRef* SecKeyRef;
+typedef struct OpaqueSecPolicyRef* SecPolicyRef;
+#endif
+
+namespace base {
+
+class FilePath;
+
+namespace mac {
+
+// Returns true if the application is running from a bundle
+BASE_EXPORT bool AmIBundled();
+BASE_EXPORT void SetOverrideAmIBundled(bool value);
+
+#if defined(UNIT_TEST)
+// This is required because instantiating some tests requires checking the
+// directory structure, which sets the AmIBundled cache state. Individual tests
+// may or may not be bundled, and this would trip them up if the cache weren't
+// cleared. This should not be called from individual tests, just from test
+// instantiation code that gets a path from PathService.
+BASE_EXPORT void ClearAmIBundledCache();
+#endif
+
+// Returns true if this process is marked as a "Background only process".
+BASE_EXPORT bool IsBackgroundOnlyProcess();
+
+// Returns the path to a resource within the framework bundle.
+BASE_EXPORT FilePath PathForFrameworkBundleResource(CFStringRef resourceName);
+
+// Returns the creator code associated with the CFBundleRef at bundle.
+OSType CreatorCodeForCFBundleRef(CFBundleRef bundle);
+
+// Returns the creator code associated with this application, by calling
+// CreatorCodeForCFBundleRef for the application's main bundle.  If this
+// information cannot be determined, returns kUnknownType ('????').  This
+// does not respect the override app bundle because it's based on CFBundle
+// instead of NSBundle, and because callers probably don't want the override
+// app bundle's creator code anyway.
+BASE_EXPORT OSType CreatorCodeForApplication();
+
+// Searches for directories for the given key in only the given |domain_mask|.
+// If found, fills result (which must always be non-NULL) with the
+// first found directory and returns true.  Otherwise, returns false.
+BASE_EXPORT bool GetSearchPathDirectory(NSSearchPathDirectory directory,
+                                        NSSearchPathDomainMask domain_mask,
+                                        FilePath* result);
+
+// Searches for directories for the given key in only the local domain.
+// If found, fills result (which must always be non-NULL) with the
+// first found directory and returns true.  Otherwise, returns false.
+BASE_EXPORT bool GetLocalDirectory(NSSearchPathDirectory directory,
+                                   FilePath* result);
+
+// Searches for directories for the given key in only the user domain.
+// If found, fills result (which must always be non-NULL) with the
+// first found directory and returns true.  Otherwise, returns false.
+BASE_EXPORT bool GetUserDirectory(NSSearchPathDirectory directory,
+                                  FilePath* result);
+
+// Returns the ~/Library directory.
+BASE_EXPORT FilePath GetUserLibraryPath();
+
+// Takes a path to an (executable) binary and tries to provide the path to an
+// application bundle containing it. It takes the outermost bundle that it can
+// find (so for "/Foo/Bar.app/.../Baz.app/..." it produces "/Foo/Bar.app").
+//   |exec_name| - path to the binary
+//   returns - path to the application bundle, or empty on error
+BASE_EXPORT FilePath GetAppBundlePath(const FilePath& exec_name);
+
+#define TYPE_NAME_FOR_CF_TYPE_DECL(TypeCF) \
+BASE_EXPORT std::string TypeNameForCFType(TypeCF##Ref);
+
+TYPE_NAME_FOR_CF_TYPE_DECL(CFArray);
+TYPE_NAME_FOR_CF_TYPE_DECL(CFBag);
+TYPE_NAME_FOR_CF_TYPE_DECL(CFBoolean);
+TYPE_NAME_FOR_CF_TYPE_DECL(CFData);
+TYPE_NAME_FOR_CF_TYPE_DECL(CFDate);
+TYPE_NAME_FOR_CF_TYPE_DECL(CFDictionary);
+TYPE_NAME_FOR_CF_TYPE_DECL(CFNull);
+TYPE_NAME_FOR_CF_TYPE_DECL(CFNumber);
+TYPE_NAME_FOR_CF_TYPE_DECL(CFSet);
+TYPE_NAME_FOR_CF_TYPE_DECL(CFString);
+TYPE_NAME_FOR_CF_TYPE_DECL(CFURL);
+TYPE_NAME_FOR_CF_TYPE_DECL(CFUUID);
+
+TYPE_NAME_FOR_CF_TYPE_DECL(CGColor);
+
+TYPE_NAME_FOR_CF_TYPE_DECL(CTFont);
+TYPE_NAME_FOR_CF_TYPE_DECL(CTRun);
+
+TYPE_NAME_FOR_CF_TYPE_DECL(SecKey);
+TYPE_NAME_FOR_CF_TYPE_DECL(SecPolicy);
+
+#undef TYPE_NAME_FOR_CF_TYPE_DECL
+
+// Retain/release calls for memory management in C++.
+BASE_EXPORT void NSObjectRetain(void* obj);
+BASE_EXPORT void NSObjectRelease(void* obj);
+
+// CFTypeRefToNSObjectAutorelease transfers ownership of a Core Foundation
+// object (one derived from CFTypeRef) to the Foundation memory management
+// system.  In a traditional managed-memory environment, cf_object is
+// autoreleased and returned as an NSObject.  In a garbage-collected
+// environment, cf_object is marked as eligible for garbage collection.
+//
+// This function should only be used to convert a concrete CFTypeRef type to
+// its equivalent "toll-free bridged" NSObject subclass, for example,
+// converting a CFStringRef to NSString.
+//
+// By calling this function, callers relinquish any ownership claim to
+// cf_object.  In a managed-memory environment, the object's ownership will be
+// managed by the innermost NSAutoreleasePool, so after this function returns,
+// callers should not assume that cf_object is valid any longer than the
+// returned NSObject.
+//
+// Returns an id, typed here for C++'s sake as a void*.
+BASE_EXPORT void* CFTypeRefToNSObjectAutorelease(CFTypeRef cf_object);
+
+// Returns the base bundle ID, which can be set by SetBaseBundleID but
+// defaults to a reasonable string. This never returns NULL. BaseBundleID
+// returns a pointer to static storage that must not be freed.
+BASE_EXPORT const char* BaseBundleID();
+
+// Sets the base bundle ID to override the default. The implementation will
+// make its own copy of new_base_bundle_id.
+BASE_EXPORT void SetBaseBundleID(const char* new_base_bundle_id);
+
+}  // namespace mac
+}  // namespace base
+
+#if !defined(__OBJC__)
+#define OBJC_CPP_CLASS_DECL(x) class x;
+#else  // __OBJC__
+#define OBJC_CPP_CLASS_DECL(x)
+#endif  // __OBJC__
+
+// Convert toll-free bridged CFTypes to NSTypes and vice-versa. This does not
+// autorelease |cf_val|. This is useful for the case where there is a CFType in
+// a call that expects an NSType and the compiler is complaining about const
+// casting problems.
+// The calls are used like this:
+// NSString *foo = CFToNSCast(CFSTR("Hello"));
+// CFStringRef foo2 = NSToCFCast(@"Hello");
+// The macro magic below is to enforce safe casting. It could possibly have
+// been done using template function specialization, but template function
+// specialization doesn't always work intuitively,
+// (http://www.gotw.ca/publications/mill17.htm) so the trusty combination
+// of macros and function overloading is used instead.
+
+#define CF_TO_NS_CAST_DECL(TypeCF, TypeNS) \
+OBJC_CPP_CLASS_DECL(TypeNS) \
+\
+namespace base { \
+namespace mac { \
+BASE_EXPORT TypeNS* CFToNSCast(TypeCF##Ref cf_val); \
+BASE_EXPORT TypeCF##Ref NSToCFCast(TypeNS* ns_val); \
+} \
+}
+
+#define CF_TO_NS_MUTABLE_CAST_DECL(name) \
+CF_TO_NS_CAST_DECL(CF##name, NS##name) \
+OBJC_CPP_CLASS_DECL(NSMutable##name) \
+\
+namespace base { \
+namespace mac { \
+BASE_EXPORT NSMutable##name* CFToNSCast(CFMutable##name##Ref cf_val); \
+BASE_EXPORT CFMutable##name##Ref NSToCFCast(NSMutable##name* ns_val); \
+} \
+}
+
+// List of toll-free bridged types taken from:
+// http://www.cocoadev.com/index.pl?TollFreeBridged
+
+CF_TO_NS_MUTABLE_CAST_DECL(Array);
+CF_TO_NS_MUTABLE_CAST_DECL(AttributedString);
+CF_TO_NS_CAST_DECL(CFCalendar, NSCalendar);
+CF_TO_NS_MUTABLE_CAST_DECL(CharacterSet);
+CF_TO_NS_MUTABLE_CAST_DECL(Data);
+CF_TO_NS_CAST_DECL(CFDate, NSDate);
+CF_TO_NS_MUTABLE_CAST_DECL(Dictionary);
+CF_TO_NS_CAST_DECL(CFError, NSError);
+CF_TO_NS_CAST_DECL(CFLocale, NSLocale);
+CF_TO_NS_CAST_DECL(CFNumber, NSNumber);
+CF_TO_NS_CAST_DECL(CFRunLoopTimer, NSTimer);
+CF_TO_NS_CAST_DECL(CFTimeZone, NSTimeZone);
+CF_TO_NS_MUTABLE_CAST_DECL(Set);
+CF_TO_NS_CAST_DECL(CFReadStream, NSInputStream);
+CF_TO_NS_CAST_DECL(CFWriteStream, NSOutputStream);
+CF_TO_NS_MUTABLE_CAST_DECL(String);
+CF_TO_NS_CAST_DECL(CFURL, NSURL);
+
+#if defined(OS_IOS)
+CF_TO_NS_CAST_DECL(CTFont, UIFont);
+#else
+CF_TO_NS_CAST_DECL(CTFont, NSFont);
+#endif
+
+#undef CF_TO_NS_CAST_DECL
+#undef CF_TO_NS_MUTABLE_CAST_DECL
+#undef OBJC_CPP_CLASS_DECL
+
+namespace base {
+namespace mac {
+
+// CFCast<>() and CFCastStrict<>() cast a basic CFTypeRef to a more
+// specific CoreFoundation type. The compatibility of the passed
+// object is found by comparing its opaque type against the
+// requested type identifier. If the supplied object is not
+// compatible with the requested return type, CFCast<>() returns
+// NULL and CFCastStrict<>() will DCHECK. Providing a NULL pointer
+// to either variant results in NULL being returned without
+// triggering any DCHECK.
+//
+// Example usage:
+// CFNumberRef some_number = base::mac::CFCast<CFNumberRef>(
+//     CFArrayGetValueAtIndex(array, index));
+//
+// CFTypeRef hello = CFSTR("hello world");
+// CFStringRef some_string = base::mac::CFCastStrict<CFStringRef>(hello);
+
+template<typename T>
+T CFCast(const CFTypeRef& cf_val);
+
+template<typename T>
+T CFCastStrict(const CFTypeRef& cf_val);
+
+#define CF_CAST_DECL(TypeCF) \
+template<> BASE_EXPORT TypeCF##Ref \
+CFCast<TypeCF##Ref>(const CFTypeRef& cf_val);\
+\
+template<> BASE_EXPORT TypeCF##Ref \
+CFCastStrict<TypeCF##Ref>(const CFTypeRef& cf_val);
+
+CF_CAST_DECL(CFArray);
+CF_CAST_DECL(CFBag);
+CF_CAST_DECL(CFBoolean);
+CF_CAST_DECL(CFData);
+CF_CAST_DECL(CFDate);
+CF_CAST_DECL(CFDictionary);
+CF_CAST_DECL(CFNull);
+CF_CAST_DECL(CFNumber);
+CF_CAST_DECL(CFSet);
+CF_CAST_DECL(CFString);
+CF_CAST_DECL(CFURL);
+CF_CAST_DECL(CFUUID);
+
+CF_CAST_DECL(CGColor);
+
+CF_CAST_DECL(CTFont);
+CF_CAST_DECL(CTFontDescriptor);
+CF_CAST_DECL(CTRun);
+
+CF_CAST_DECL(SecACL);
+CF_CAST_DECL(SecKey);
+CF_CAST_DECL(SecPolicy);
+CF_CAST_DECL(SecTrustedApplication);
+
+#undef CF_CAST_DECL
+
+#if defined(__OBJC__)
+
+// ObjCCast<>() and ObjCCastStrict<>() cast a basic id to a more
+// specific (NSObject-derived) type. The compatibility of the passed
+// object is found by checking if it's a kind of the requested type
+// identifier. If the supplied object is not compatible with the
+// requested return type, ObjCCast<>() returns nil and
+// ObjCCastStrict<>() will DCHECK. Providing a nil pointer to either
+// variant results in nil being returned without triggering any DCHECK.
+//
+// The strict variant is useful when retrieving a value from a
+// collection which only has values of a specific type, e.g. an
+// NSArray of NSStrings. The non-strict variant is useful when
+// retrieving values from data that you can't fully control. For
+// example, a plist read from disk may be beyond your exclusive
+// control, so you'd only want to check that the values you retrieve
+// from it are of the expected types, but not crash if they're not.
+//
+// Example usage:
+// NSString* version = base::mac::ObjCCast<NSString>(
+//     [bundle objectForInfoDictionaryKey:@"CFBundleShortVersionString"]);
+//
+// NSString* str = base::mac::ObjCCastStrict<NSString>(
+//     [ns_arr_of_ns_strs objectAtIndex:0]);
+template<typename T>
+T* ObjCCast(id objc_val) {
+  if ([objc_val isKindOfClass:[T class]]) {
+    return reinterpret_cast<T*>(objc_val);
+  }
+  return nil;
+}
+
+template<typename T>
+T* ObjCCastStrict(id objc_val) {
+  T* rv = ObjCCast<T>(objc_val);
+  DCHECK(objc_val == nil || rv);
+  return rv;
+}
+
+#endif  // defined(__OBJC__)
+
+// Helper function for GetValueFromDictionary to create the error message
+// that appears when a type mismatch is encountered.
+BASE_EXPORT std::string GetValueFromDictionaryErrorMessage(
+    CFStringRef key, const std::string& expected_type, CFTypeRef value);
+
+// Utility function to pull out a value from a dictionary, check its type, and
+// return it. Returns NULL if the key is not present or of the wrong type.
+template<typename T>
+T GetValueFromDictionary(CFDictionaryRef dict, CFStringRef key) {
+  CFTypeRef value = CFDictionaryGetValue(dict, key);
+  T value_specific = CFCast<T>(value);
+
+  if (value && !value_specific) {
+    std::string expected_type = TypeNameForCFType(value_specific);
+    DLOG(WARNING) << GetValueFromDictionaryErrorMessage(key,
+                                                        expected_type,
+                                                        value);
+  }
+
+  return value_specific;
+}
+
+// Converts |path| to an autoreleased NSString. Returns nil if |path| is empty.
+BASE_EXPORT NSString* FilePathToNSString(const FilePath& path);
+
+// Converts |str| to a FilePath. Returns an empty path if |str| is nil.
+BASE_EXPORT FilePath NSStringToFilePath(NSString* str);
+
+#if defined(__OBJC__)
+// Converts |range| to an NSRange, returning the new range in |range_out|.
+// Returns true if conversion was successful, false if the values of |range|
+// could not be converted to NSUIntegers.
+BASE_EXPORT bool CFRangeToNSRange(CFRange range,
+                                  NSRange* range_out) WARN_UNUSED_RESULT;
+#endif  // defined(__OBJC__)
+
+}  // namespace mac
+}  // namespace base
+
+// Stream operations for CFTypes. They can be used with NSTypes as well
+// by using the NSToCFCast methods above.
+// e.g. LOG(INFO) << base::mac::NSToCFCast(@"foo");
+// Operator << can not be overloaded for ObjectiveC types as the compiler
+// can not distinguish between overloads for id with overloads for void*.
+BASE_EXPORT extern std::ostream& operator<<(std::ostream& o,
+                                            const CFErrorRef err);
+BASE_EXPORT extern std::ostream& operator<<(std::ostream& o,
+                                            const CFStringRef str);
+
+#endif  // BASE_MAC_FOUNDATION_UTIL_H_
diff --git a/base/mac/foundation_util.mm b/base/mac/foundation_util.mm
new file mode 100644
index 0000000..15fc15b
--- /dev/null
+++ b/base/mac/foundation_util.mm
@@ -0,0 +1,484 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/mac/foundation_util.h"
+
+#include <stddef.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include "base/files/file_path.h"
+#include "base/logging.h"
+#include "base/mac/bundle_locations.h"
+#include "base/mac/mac_logging.h"
+#include "base/macros.h"
+#include "base/numerics/safe_conversions.h"
+#include "base/strings/sys_string_conversions.h"
+#include "build/build_config.h"
+
+#if !defined(OS_IOS)
+#import <AppKit/AppKit.h>
+#endif
+
+extern "C" {
+CFTypeID SecKeyGetTypeID();
+#if !defined(OS_IOS)
+CFTypeID SecACLGetTypeID();
+CFTypeID SecTrustedApplicationGetTypeID();
+Boolean _CFIsObjC(CFTypeID typeID, CFTypeRef obj);
+#endif
+}  // extern "C"
+
+namespace base {
+namespace mac {
+
+namespace {
+
+bool g_cached_am_i_bundled_called = false;
+bool g_cached_am_i_bundled_value = false;
+bool g_override_am_i_bundled = false;
+bool g_override_am_i_bundled_value = false;
+
+bool UncachedAmIBundled() {
+#if defined(OS_IOS)
+  // All apps are bundled on iOS.
+  return true;
+#else
+  if (g_override_am_i_bundled)
+    return g_override_am_i_bundled_value;
+
+  // Yes, this is cheap.
+  return [[base::mac::OuterBundle() bundlePath] hasSuffix:@".app"];
+#endif
+}
+
+}  // namespace
+
+bool AmIBundled() {
+  // If the return value is not cached, this function will return different
+  // values depending on when it's called. This confuses some client code, see
+  // http://crbug.com/63183 .
+  if (!g_cached_am_i_bundled_called) {
+    g_cached_am_i_bundled_called = true;
+    g_cached_am_i_bundled_value = UncachedAmIBundled();
+  }
+  DCHECK_EQ(g_cached_am_i_bundled_value, UncachedAmIBundled())
+      << "The return value of AmIBundled() changed. This will confuse tests. "
+      << "Call SetAmIBundled() override manually if your test binary "
+      << "delay-loads the framework.";
+  return g_cached_am_i_bundled_value;
+}
+
+void SetOverrideAmIBundled(bool value) {
+#if defined(OS_IOS)
+  // It doesn't make sense not to be bundled on iOS.
+  if (!value)
+    NOTREACHED();
+#endif
+  g_override_am_i_bundled = true;
+  g_override_am_i_bundled_value = value;
+}
+
+BASE_EXPORT void ClearAmIBundledCache() {
+  g_cached_am_i_bundled_called = false;
+}
+
+bool IsBackgroundOnlyProcess() {
+  // This function really does want to examine NSBundle's idea of the main
+  // bundle dictionary.  It needs to look at the actual running .app's
+  // Info.plist to access its LSUIElement property.
+  NSDictionary* info_dictionary = [base::mac::MainBundle() infoDictionary];
+  return [info_dictionary[@"LSUIElement"] boolValue] != NO;
+}
+
+FilePath PathForFrameworkBundleResource(CFStringRef resourceName) {
+  NSBundle* bundle = base::mac::FrameworkBundle();
+  NSString* resourcePath = [bundle pathForResource:(NSString*)resourceName
+                                            ofType:nil];
+  return NSStringToFilePath(resourcePath);
+}
+
+OSType CreatorCodeForCFBundleRef(CFBundleRef bundle) {
+  OSType creator = kUnknownType;
+  CFBundleGetPackageInfo(bundle, NULL, &creator);
+  return creator;
+}
+
+OSType CreatorCodeForApplication() {
+  CFBundleRef bundle = CFBundleGetMainBundle();
+  if (!bundle)
+    return kUnknownType;
+
+  return CreatorCodeForCFBundleRef(bundle);
+}
+
+bool GetSearchPathDirectory(NSSearchPathDirectory directory,
+                            NSSearchPathDomainMask domain_mask,
+                            FilePath* result) {
+  DCHECK(result);
+  NSArray<NSString*>* dirs =
+      NSSearchPathForDirectoriesInDomains(directory, domain_mask, YES);
+  if ([dirs count] < 1) {
+    return false;
+  }
+  *result = NSStringToFilePath(dirs[0]);
+  return true;
+}
+
+bool GetLocalDirectory(NSSearchPathDirectory directory, FilePath* result) {
+  return GetSearchPathDirectory(directory, NSLocalDomainMask, result);
+}
+
+bool GetUserDirectory(NSSearchPathDirectory directory, FilePath* result) {
+  return GetSearchPathDirectory(directory, NSUserDomainMask, result);
+}
+
+FilePath GetUserLibraryPath() {
+  FilePath user_library_path;
+  if (!GetUserDirectory(NSLibraryDirectory, &user_library_path)) {
+    DLOG(WARNING) << "Could not get user library path";
+  }
+  return user_library_path;
+}
+
+// Takes a path to an (executable) binary and tries to provide the path to an
+// application bundle containing it. It takes the outermost bundle that it can
+// find (so for "/Foo/Bar.app/.../Baz.app/..." it produces "/Foo/Bar.app").
+//   |exec_name| - path to the binary
+//   returns - path to the application bundle, or empty on error
+FilePath GetAppBundlePath(const FilePath& exec_name) {
+  const char kExt[] = ".app";
+  const size_t kExtLength = arraysize(kExt) - 1;
+
+  // Split the path into components.
+  std::vector<std::string> components;
+  exec_name.GetComponents(&components);
+
+  // It's an error if we don't get any components.
+  if (components.empty())
+    return FilePath();
+
+  // Don't prepend '/' to the first component.
+  std::vector<std::string>::const_iterator it = components.begin();
+  std::string bundle_name = *it;
+  DCHECK_GT(it->length(), 0U);
+  // If the first component ends in ".app", we're already done.
+  if (it->length() > kExtLength &&
+      !it->compare(it->length() - kExtLength, kExtLength, kExt, kExtLength))
+    return FilePath(bundle_name);
+
+  // The first component may be "/" or "//", etc. Only append '/' if it doesn't
+  // already end in '/'.
+  if (bundle_name.back() != '/')
+    bundle_name += '/';
+
+  // Go through the remaining components.
+  for (++it; it != components.end(); ++it) {
+    DCHECK_GT(it->length(), 0U);
+
+    bundle_name += *it;
+
+    // If the current component ends in ".app", we're done.
+    if (it->length() > kExtLength &&
+        !it->compare(it->length() - kExtLength, kExtLength, kExt, kExtLength))
+      return FilePath(bundle_name);
+
+    // Separate this component from the next one.
+    bundle_name += '/';
+  }
+
+  return FilePath();
+}
+
+#define TYPE_NAME_FOR_CF_TYPE_DEFN(TypeCF) \
+std::string TypeNameForCFType(TypeCF##Ref) { \
+  return #TypeCF; \
+}
+
+TYPE_NAME_FOR_CF_TYPE_DEFN(CFArray);
+TYPE_NAME_FOR_CF_TYPE_DEFN(CFBag);
+TYPE_NAME_FOR_CF_TYPE_DEFN(CFBoolean);
+TYPE_NAME_FOR_CF_TYPE_DEFN(CFData);
+TYPE_NAME_FOR_CF_TYPE_DEFN(CFDate);
+TYPE_NAME_FOR_CF_TYPE_DEFN(CFDictionary);
+TYPE_NAME_FOR_CF_TYPE_DEFN(CFNull);
+TYPE_NAME_FOR_CF_TYPE_DEFN(CFNumber);
+TYPE_NAME_FOR_CF_TYPE_DEFN(CFSet);
+TYPE_NAME_FOR_CF_TYPE_DEFN(CFString);
+TYPE_NAME_FOR_CF_TYPE_DEFN(CFURL);
+TYPE_NAME_FOR_CF_TYPE_DEFN(CFUUID);
+
+TYPE_NAME_FOR_CF_TYPE_DEFN(CGColor);
+
+TYPE_NAME_FOR_CF_TYPE_DEFN(CTFont);
+TYPE_NAME_FOR_CF_TYPE_DEFN(CTRun);
+
+#if !defined(OS_IOS)
+TYPE_NAME_FOR_CF_TYPE_DEFN(SecKey);
+TYPE_NAME_FOR_CF_TYPE_DEFN(SecPolicy);
+#endif
+
+#undef TYPE_NAME_FOR_CF_TYPE_DEFN
+
+void NSObjectRetain(void* obj) {
+  id<NSObject> nsobj = static_cast<id<NSObject> >(obj);
+  [nsobj retain];
+}
+
+void NSObjectRelease(void* obj) {
+  id<NSObject> nsobj = static_cast<id<NSObject> >(obj);
+  [nsobj release];
+}
+
+void* CFTypeRefToNSObjectAutorelease(CFTypeRef cf_object) {
+  // When GC is on, NSMakeCollectable marks cf_object for GC and autorelease
+  // is a no-op.
+  //
+  // In the traditional GC-less environment, NSMakeCollectable is a no-op,
+  // and cf_object is autoreleased, balancing out the caller's ownership claim.
+  //
+  // NSMakeCollectable returns nil when used on a NULL object.
+  return [NSMakeCollectable(cf_object) autorelease];
+}
+
+static const char* base_bundle_id;
+
+const char* BaseBundleID() {
+  if (base_bundle_id) {
+    return base_bundle_id;
+  }
+
+#if defined(GOOGLE_CHROME_BUILD)
+  return "com.google.Chrome";
+#else
+  return "org.chromium.Chromium";
+#endif
+}
+
+void SetBaseBundleID(const char* new_base_bundle_id) {
+  if (new_base_bundle_id != base_bundle_id) {
+    free((void*)base_bundle_id);
+    base_bundle_id = new_base_bundle_id ? strdup(new_base_bundle_id) : NULL;
+  }
+}
+
+// Definitions for the corresponding CF_TO_NS_CAST_DECL macros in
+// foundation_util.h.
+#define CF_TO_NS_CAST_DEFN(TypeCF, TypeNS) \
+\
+TypeNS* CFToNSCast(TypeCF##Ref cf_val) { \
+  DCHECK(!cf_val || TypeCF##GetTypeID() == CFGetTypeID(cf_val)); \
+  TypeNS* ns_val = \
+      const_cast<TypeNS*>(reinterpret_cast<const TypeNS*>(cf_val)); \
+  return ns_val; \
+} \
+\
+TypeCF##Ref NSToCFCast(TypeNS* ns_val) { \
+  TypeCF##Ref cf_val = reinterpret_cast<TypeCF##Ref>(ns_val); \
+  DCHECK(!cf_val || TypeCF##GetTypeID() == CFGetTypeID(cf_val)); \
+  return cf_val; \
+}
+
+#define CF_TO_NS_MUTABLE_CAST_DEFN(name) \
+CF_TO_NS_CAST_DEFN(CF##name, NS##name) \
+\
+NSMutable##name* CFToNSCast(CFMutable##name##Ref cf_val) { \
+  DCHECK(!cf_val || CF##name##GetTypeID() == CFGetTypeID(cf_val)); \
+  NSMutable##name* ns_val = reinterpret_cast<NSMutable##name*>(cf_val); \
+  return ns_val; \
+} \
+\
+CFMutable##name##Ref NSToCFCast(NSMutable##name* ns_val) { \
+  CFMutable##name##Ref cf_val = \
+      reinterpret_cast<CFMutable##name##Ref>(ns_val); \
+  DCHECK(!cf_val || CF##name##GetTypeID() == CFGetTypeID(cf_val)); \
+  return cf_val; \
+}
+
+CF_TO_NS_MUTABLE_CAST_DEFN(Array);
+CF_TO_NS_MUTABLE_CAST_DEFN(AttributedString);
+CF_TO_NS_CAST_DEFN(CFCalendar, NSCalendar);
+CF_TO_NS_MUTABLE_CAST_DEFN(CharacterSet);
+CF_TO_NS_MUTABLE_CAST_DEFN(Data);
+CF_TO_NS_CAST_DEFN(CFDate, NSDate);
+CF_TO_NS_MUTABLE_CAST_DEFN(Dictionary);
+CF_TO_NS_CAST_DEFN(CFError, NSError);
+CF_TO_NS_CAST_DEFN(CFLocale, NSLocale);
+CF_TO_NS_CAST_DEFN(CFNumber, NSNumber);
+CF_TO_NS_CAST_DEFN(CFRunLoopTimer, NSTimer);
+CF_TO_NS_CAST_DEFN(CFTimeZone, NSTimeZone);
+CF_TO_NS_MUTABLE_CAST_DEFN(Set);
+CF_TO_NS_CAST_DEFN(CFReadStream, NSInputStream);
+CF_TO_NS_CAST_DEFN(CFWriteStream, NSOutputStream);
+CF_TO_NS_MUTABLE_CAST_DEFN(String);
+CF_TO_NS_CAST_DEFN(CFURL, NSURL);
+
+#if defined(OS_IOS)
+CF_TO_NS_CAST_DEFN(CTFont, UIFont);
+#else
+// The NSFont/CTFont toll-free bridging is broken when it comes to type
+// checking, so do some special-casing.
+// http://www.openradar.me/15341349 rdar://15341349
+NSFont* CFToNSCast(CTFontRef cf_val) {
+  NSFont* ns_val =
+      const_cast<NSFont*>(reinterpret_cast<const NSFont*>(cf_val));
+  DCHECK(!cf_val ||
+         CTFontGetTypeID() == CFGetTypeID(cf_val) ||
+         (_CFIsObjC(CTFontGetTypeID(), cf_val) &&
+          [ns_val isKindOfClass:[NSFont class]]));
+  return ns_val;
+}
+
+CTFontRef NSToCFCast(NSFont* ns_val) {
+  CTFontRef cf_val = reinterpret_cast<CTFontRef>(ns_val);
+  DCHECK(!cf_val ||
+         CTFontGetTypeID() == CFGetTypeID(cf_val) ||
+         [ns_val isKindOfClass:[NSFont class]]);
+  return cf_val;
+}
+#endif
+
+#undef CF_TO_NS_CAST_DEFN
+#undef CF_TO_NS_MUTABLE_CAST_DEFN
+
+#define CF_CAST_DEFN(TypeCF) \
+template<> TypeCF##Ref \
+CFCast<TypeCF##Ref>(const CFTypeRef& cf_val) { \
+  if (cf_val == NULL) { \
+    return NULL; \
+  } \
+  if (CFGetTypeID(cf_val) == TypeCF##GetTypeID()) { \
+    return (TypeCF##Ref)(cf_val); \
+  } \
+  return NULL; \
+} \
+\
+template<> TypeCF##Ref \
+CFCastStrict<TypeCF##Ref>(const CFTypeRef& cf_val) { \
+  TypeCF##Ref rv = CFCast<TypeCF##Ref>(cf_val); \
+  DCHECK(cf_val == NULL || rv); \
+  return rv; \
+}
+
+CF_CAST_DEFN(CFArray);
+CF_CAST_DEFN(CFBag);
+CF_CAST_DEFN(CFBoolean);
+CF_CAST_DEFN(CFData);
+CF_CAST_DEFN(CFDate);
+CF_CAST_DEFN(CFDictionary);
+CF_CAST_DEFN(CFNull);
+CF_CAST_DEFN(CFNumber);
+CF_CAST_DEFN(CFSet);
+CF_CAST_DEFN(CFString);
+CF_CAST_DEFN(CFURL);
+CF_CAST_DEFN(CFUUID);
+
+CF_CAST_DEFN(CGColor);
+
+CF_CAST_DEFN(CTFontDescriptor);
+CF_CAST_DEFN(CTRun);
+
+#if defined(OS_IOS)
+CF_CAST_DEFN(CTFont);
+#else
+// The NSFont/CTFont toll-free bridging is broken when it comes to type
+// checking, so do some special-casing.
+// http://www.openradar.me/15341349 rdar://15341349
+template<> CTFontRef
+CFCast<CTFontRef>(const CFTypeRef& cf_val) {
+  if (cf_val == NULL) {
+    return NULL;
+  }
+  if (CFGetTypeID(cf_val) == CTFontGetTypeID()) {
+    return (CTFontRef)(cf_val);
+  }
+
+  if (!_CFIsObjC(CTFontGetTypeID(), cf_val))
+    return NULL;
+
+  id<NSObject> ns_val = reinterpret_cast<id>(const_cast<void*>(cf_val));
+  if ([ns_val isKindOfClass:[NSFont class]]) {
+    return (CTFontRef)(cf_val);
+  }
+  return NULL;
+}
+
+template<> CTFontRef
+CFCastStrict<CTFontRef>(const CFTypeRef& cf_val) {
+  CTFontRef rv = CFCast<CTFontRef>(cf_val);
+  DCHECK(cf_val == NULL || rv);
+  return rv;
+}
+#endif
+
+#if !defined(OS_IOS)
+CF_CAST_DEFN(SecACL);
+CF_CAST_DEFN(SecKey);
+CF_CAST_DEFN(SecPolicy);
+CF_CAST_DEFN(SecTrustedApplication);
+#endif
+
+#undef CF_CAST_DEFN
+
+std::string GetValueFromDictionaryErrorMessage(
+    CFStringRef key, const std::string& expected_type, CFTypeRef value) {
+  ScopedCFTypeRef<CFStringRef> actual_type_ref(
+      CFCopyTypeIDDescription(CFGetTypeID(value)));
+  return "Expected value for key " +
+      base::SysCFStringRefToUTF8(key) +
+      " to be " +
+      expected_type +
+      " but it was " +
+      base::SysCFStringRefToUTF8(actual_type_ref) +
+      " instead";
+}
+
+NSString* FilePathToNSString(const FilePath& path) {
+  if (path.empty())
+    return nil;
+  return @(path.value().c_str());  // @() does UTF8 conversion.
+}
+
+FilePath NSStringToFilePath(NSString* str) {
+  if (![str length])
+    return FilePath();
+  return FilePath([str fileSystemRepresentation]);
+}
+
+bool CFRangeToNSRange(CFRange range, NSRange* range_out) {
+  if (base::IsValueInRangeForNumericType<decltype(range_out->location)>(
+          range.location) &&
+      base::IsValueInRangeForNumericType<decltype(range_out->length)>(
+          range.length) &&
+      base::IsValueInRangeForNumericType<decltype(range_out->location)>(
+          range.location + range.length)) {
+    *range_out = NSMakeRange(range.location, range.length);
+    return true;
+  }
+  return false;
+}
+
+}  // namespace mac
+}  // namespace base
+
+std::ostream& operator<<(std::ostream& o, const CFStringRef string) {
+  return o << base::SysCFStringRefToUTF8(string);
+}
+
+std::ostream& operator<<(std::ostream& o, const CFErrorRef err) {
+  base::ScopedCFTypeRef<CFStringRef> desc(CFErrorCopyDescription(err));
+  base::ScopedCFTypeRef<CFDictionaryRef> user_info(CFErrorCopyUserInfo(err));
+  CFStringRef errorDesc = NULL;
+  if (user_info.get()) {
+    errorDesc = reinterpret_cast<CFStringRef>(
+        CFDictionaryGetValue(user_info.get(), kCFErrorDescriptionKey));
+  }
+  o << "Code: " << CFErrorGetCode(err)
+    << " Domain: " << CFErrorGetDomain(err)
+    << " Desc: " << desc.get();
+  if(errorDesc) {
+    o << "(" << errorDesc << ")";
+  }
+  return o;
+}
diff --git a/base/mac/foundation_util_unittest.mm b/base/mac/foundation_util_unittest.mm
new file mode 100644
index 0000000..a584094
--- /dev/null
+++ b/base/mac/foundation_util_unittest.mm
@@ -0,0 +1,405 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/mac/foundation_util.h"
+
+#include <limits.h>
+#include <stddef.h>
+
+#include "base/compiler_specific.h"
+#include "base/files/file_path.h"
+#include "base/format_macros.h"
+#include "base/mac/scoped_cftyperef.h"
+#include "base/mac/scoped_nsautorelease_pool.h"
+#include "base/macros.h"
+#include "base/strings/stringprintf.h"
+#include "build/build_config.h"
+#include "testing/gtest/include/gtest/gtest.h"
+#import "testing/gtest_mac.h"
+
+namespace base {
+namespace mac {
+
+TEST(FoundationUtilTest, CFCast) {
+  // Build out the CF types to be tested as empty containers.
+  ScopedCFTypeRef<CFTypeRef> test_array(
+      CFArrayCreate(NULL, NULL, 0, &kCFTypeArrayCallBacks));
+  ScopedCFTypeRef<CFTypeRef> test_array_mutable(
+      CFArrayCreateMutable(NULL, 0, &kCFTypeArrayCallBacks));
+  ScopedCFTypeRef<CFTypeRef> test_bag(
+      CFBagCreate(NULL, NULL, 0, &kCFTypeBagCallBacks));
+  ScopedCFTypeRef<CFTypeRef> test_bag_mutable(
+      CFBagCreateMutable(NULL, 0, &kCFTypeBagCallBacks));
+  CFTypeRef test_bool = kCFBooleanTrue;
+  ScopedCFTypeRef<CFTypeRef> test_data(
+      CFDataCreate(NULL, NULL, 0));
+  ScopedCFTypeRef<CFTypeRef> test_data_mutable(
+      CFDataCreateMutable(NULL, 0));
+  ScopedCFTypeRef<CFTypeRef> test_date(
+      CFDateCreate(NULL, 0));
+  ScopedCFTypeRef<CFTypeRef> test_dict(
+      CFDictionaryCreate(NULL, NULL, NULL, 0,
+                         &kCFCopyStringDictionaryKeyCallBacks,
+                         &kCFTypeDictionaryValueCallBacks));
+  ScopedCFTypeRef<CFTypeRef> test_dict_mutable(
+      CFDictionaryCreateMutable(NULL, 0,
+                                &kCFCopyStringDictionaryKeyCallBacks,
+                                &kCFTypeDictionaryValueCallBacks));
+  int int_val = 256;
+  ScopedCFTypeRef<CFTypeRef> test_number(
+      CFNumberCreate(NULL, kCFNumberIntType, &int_val));
+  CFTypeRef test_null = kCFNull;
+  ScopedCFTypeRef<CFTypeRef> test_set(
+      CFSetCreate(NULL, NULL, 0, &kCFTypeSetCallBacks));
+  ScopedCFTypeRef<CFTypeRef> test_set_mutable(
+      CFSetCreateMutable(NULL, 0, &kCFTypeSetCallBacks));
+  ScopedCFTypeRef<CFTypeRef> test_str(
+      CFStringCreateWithBytes(NULL, NULL, 0, kCFStringEncodingASCII, false));
+  CFTypeRef test_str_const = CFSTR("hello");
+  ScopedCFTypeRef<CFTypeRef> test_str_mutable(CFStringCreateMutable(NULL, 0));
+
+  // Make sure the allocations of CF types are good.
+  EXPECT_TRUE(test_array);
+  EXPECT_TRUE(test_array_mutable);
+  EXPECT_TRUE(test_bag);
+  EXPECT_TRUE(test_bag_mutable);
+  EXPECT_TRUE(test_bool);
+  EXPECT_TRUE(test_data);
+  EXPECT_TRUE(test_data_mutable);
+  EXPECT_TRUE(test_date);
+  EXPECT_TRUE(test_dict);
+  EXPECT_TRUE(test_dict_mutable);
+  EXPECT_TRUE(test_number);
+  EXPECT_TRUE(test_null);
+  EXPECT_TRUE(test_set);
+  EXPECT_TRUE(test_set_mutable);
+  EXPECT_TRUE(test_str);
+  EXPECT_TRUE(test_str_const);
+  EXPECT_TRUE(test_str_mutable);
+
+  // Casting the CFTypeRef objects correctly provides the same pointer.
+  EXPECT_EQ(test_array, CFCast<CFArrayRef>(test_array));
+  EXPECT_EQ(test_array_mutable, CFCast<CFArrayRef>(test_array_mutable));
+  EXPECT_EQ(test_bag, CFCast<CFBagRef>(test_bag));
+  EXPECT_EQ(test_bag_mutable, CFCast<CFBagRef>(test_bag_mutable));
+  EXPECT_EQ(test_bool, CFCast<CFBooleanRef>(test_bool));
+  EXPECT_EQ(test_data, CFCast<CFDataRef>(test_data));
+  EXPECT_EQ(test_data_mutable, CFCast<CFDataRef>(test_data_mutable));
+  EXPECT_EQ(test_date, CFCast<CFDateRef>(test_date));
+  EXPECT_EQ(test_dict, CFCast<CFDictionaryRef>(test_dict));
+  EXPECT_EQ(test_dict_mutable, CFCast<CFDictionaryRef>(test_dict_mutable));
+  EXPECT_EQ(test_number, CFCast<CFNumberRef>(test_number));
+  EXPECT_EQ(test_null, CFCast<CFNullRef>(test_null));
+  EXPECT_EQ(test_set, CFCast<CFSetRef>(test_set));
+  EXPECT_EQ(test_set_mutable, CFCast<CFSetRef>(test_set_mutable));
+  EXPECT_EQ(test_str, CFCast<CFStringRef>(test_str));
+  EXPECT_EQ(test_str_const, CFCast<CFStringRef>(test_str_const));
+  EXPECT_EQ(test_str_mutable, CFCast<CFStringRef>(test_str_mutable));
+
+  // When given an incorrect CF cast, provide NULL.
+  EXPECT_FALSE(CFCast<CFStringRef>(test_array));
+  EXPECT_FALSE(CFCast<CFStringRef>(test_array_mutable));
+  EXPECT_FALSE(CFCast<CFStringRef>(test_bag));
+  EXPECT_FALSE(CFCast<CFSetRef>(test_bag_mutable));
+  EXPECT_FALSE(CFCast<CFSetRef>(test_bool));
+  EXPECT_FALSE(CFCast<CFNullRef>(test_data));
+  EXPECT_FALSE(CFCast<CFDictionaryRef>(test_data_mutable));
+  EXPECT_FALSE(CFCast<CFDictionaryRef>(test_date));
+  EXPECT_FALSE(CFCast<CFNumberRef>(test_dict));
+  EXPECT_FALSE(CFCast<CFDateRef>(test_dict_mutable));
+  EXPECT_FALSE(CFCast<CFDataRef>(test_number));
+  EXPECT_FALSE(CFCast<CFDataRef>(test_null));
+  EXPECT_FALSE(CFCast<CFBooleanRef>(test_set));
+  EXPECT_FALSE(CFCast<CFBagRef>(test_set_mutable));
+  EXPECT_FALSE(CFCast<CFBagRef>(test_str));
+  EXPECT_FALSE(CFCast<CFArrayRef>(test_str_const));
+  EXPECT_FALSE(CFCast<CFArrayRef>(test_str_mutable));
+
+  // Giving a NULL provides a NULL.
+  EXPECT_FALSE(CFCast<CFArrayRef>(NULL));
+  EXPECT_FALSE(CFCast<CFBagRef>(NULL));
+  EXPECT_FALSE(CFCast<CFBooleanRef>(NULL));
+  EXPECT_FALSE(CFCast<CFDataRef>(NULL));
+  EXPECT_FALSE(CFCast<CFDateRef>(NULL));
+  EXPECT_FALSE(CFCast<CFDictionaryRef>(NULL));
+  EXPECT_FALSE(CFCast<CFNullRef>(NULL));
+  EXPECT_FALSE(CFCast<CFNumberRef>(NULL));
+  EXPECT_FALSE(CFCast<CFSetRef>(NULL));
+  EXPECT_FALSE(CFCast<CFStringRef>(NULL));
+
+  // CFCastStrict: correct cast results in correct pointer being returned.
+  EXPECT_EQ(test_array, CFCastStrict<CFArrayRef>(test_array));
+  EXPECT_EQ(test_array_mutable, CFCastStrict<CFArrayRef>(test_array_mutable));
+  EXPECT_EQ(test_bag, CFCastStrict<CFBagRef>(test_bag));
+  EXPECT_EQ(test_bag_mutable, CFCastStrict<CFBagRef>(test_bag_mutable));
+  EXPECT_EQ(test_bool, CFCastStrict<CFBooleanRef>(test_bool));
+  EXPECT_EQ(test_data, CFCastStrict<CFDataRef>(test_data));
+  EXPECT_EQ(test_data_mutable, CFCastStrict<CFDataRef>(test_data_mutable));
+  EXPECT_EQ(test_date, CFCastStrict<CFDateRef>(test_date));
+  EXPECT_EQ(test_dict, CFCastStrict<CFDictionaryRef>(test_dict));
+  EXPECT_EQ(test_dict_mutable,
+            CFCastStrict<CFDictionaryRef>(test_dict_mutable));
+  EXPECT_EQ(test_number, CFCastStrict<CFNumberRef>(test_number));
+  EXPECT_EQ(test_null, CFCastStrict<CFNullRef>(test_null));
+  EXPECT_EQ(test_set, CFCastStrict<CFSetRef>(test_set));
+  EXPECT_EQ(test_set_mutable, CFCastStrict<CFSetRef>(test_set_mutable));
+  EXPECT_EQ(test_str, CFCastStrict<CFStringRef>(test_str));
+  EXPECT_EQ(test_str_const, CFCastStrict<CFStringRef>(test_str_const));
+  EXPECT_EQ(test_str_mutable, CFCastStrict<CFStringRef>(test_str_mutable));
+
+  // CFCastStrict: Giving a NULL provides a NULL.
+  EXPECT_FALSE(CFCastStrict<CFArrayRef>(NULL));
+  EXPECT_FALSE(CFCastStrict<CFBagRef>(NULL));
+  EXPECT_FALSE(CFCastStrict<CFBooleanRef>(NULL));
+  EXPECT_FALSE(CFCastStrict<CFDataRef>(NULL));
+  EXPECT_FALSE(CFCastStrict<CFDateRef>(NULL));
+  EXPECT_FALSE(CFCastStrict<CFDictionaryRef>(NULL));
+  EXPECT_FALSE(CFCastStrict<CFNullRef>(NULL));
+  EXPECT_FALSE(CFCastStrict<CFNumberRef>(NULL));
+  EXPECT_FALSE(CFCastStrict<CFSetRef>(NULL));
+  EXPECT_FALSE(CFCastStrict<CFStringRef>(NULL));
+}
+
+TEST(FoundationUtilTest, ObjCCast) {
+  ScopedNSAutoreleasePool pool;
+
+  id test_array = @[];
+  id test_array_mutable = [NSMutableArray array];
+  id test_data = [NSData data];
+  id test_data_mutable = [NSMutableData dataWithCapacity:10];
+  id test_date = [NSDate date];
+  id test_dict = @{ @"meaning" : @42 };
+  id test_dict_mutable = [NSMutableDictionary dictionaryWithCapacity:10];
+  id test_number = @42;
+  id test_null = [NSNull null];
+  id test_set = [NSSet setWithObject:@"string object"];
+  id test_set_mutable = [NSMutableSet setWithCapacity:10];
+  id test_str = [NSString string];
+  id test_str_const = @"bonjour";
+  id test_str_mutable = [NSMutableString stringWithCapacity:10];
+
+  // Make sure the allocations of NS types are good.
+  EXPECT_TRUE(test_array);
+  EXPECT_TRUE(test_array_mutable);
+  EXPECT_TRUE(test_data);
+  EXPECT_TRUE(test_data_mutable);
+  EXPECT_TRUE(test_date);
+  EXPECT_TRUE(test_dict);
+  EXPECT_TRUE(test_dict_mutable);
+  EXPECT_TRUE(test_number);
+  EXPECT_TRUE(test_null);
+  EXPECT_TRUE(test_set);
+  EXPECT_TRUE(test_set_mutable);
+  EXPECT_TRUE(test_str);
+  EXPECT_TRUE(test_str_const);
+  EXPECT_TRUE(test_str_mutable);
+
+  // Casting the id correctly provides the same pointer.
+  EXPECT_EQ(test_array, ObjCCast<NSArray>(test_array));
+  EXPECT_EQ(test_array_mutable, ObjCCast<NSArray>(test_array_mutable));
+  EXPECT_EQ(test_data, ObjCCast<NSData>(test_data));
+  EXPECT_EQ(test_data_mutable, ObjCCast<NSData>(test_data_mutable));
+  EXPECT_EQ(test_date, ObjCCast<NSDate>(test_date));
+  EXPECT_EQ(test_dict, ObjCCast<NSDictionary>(test_dict));
+  EXPECT_EQ(test_dict_mutable, ObjCCast<NSDictionary>(test_dict_mutable));
+  EXPECT_EQ(test_number, ObjCCast<NSNumber>(test_number));
+  EXPECT_EQ(test_null, ObjCCast<NSNull>(test_null));
+  EXPECT_EQ(test_set, ObjCCast<NSSet>(test_set));
+  EXPECT_EQ(test_set_mutable, ObjCCast<NSSet>(test_set_mutable));
+  EXPECT_EQ(test_str, ObjCCast<NSString>(test_str));
+  EXPECT_EQ(test_str_const, ObjCCast<NSString>(test_str_const));
+  EXPECT_EQ(test_str_mutable, ObjCCast<NSString>(test_str_mutable));
+
+  // When given an incorrect ObjC cast, provide nil.
+  EXPECT_FALSE(ObjCCast<NSString>(test_array));
+  EXPECT_FALSE(ObjCCast<NSString>(test_array_mutable));
+  EXPECT_FALSE(ObjCCast<NSString>(test_data));
+  EXPECT_FALSE(ObjCCast<NSString>(test_data_mutable));
+  EXPECT_FALSE(ObjCCast<NSSet>(test_date));
+  EXPECT_FALSE(ObjCCast<NSSet>(test_dict));
+  EXPECT_FALSE(ObjCCast<NSNumber>(test_dict_mutable));
+  EXPECT_FALSE(ObjCCast<NSNull>(test_number));
+  EXPECT_FALSE(ObjCCast<NSDictionary>(test_null));
+  EXPECT_FALSE(ObjCCast<NSDictionary>(test_set));
+  EXPECT_FALSE(ObjCCast<NSDate>(test_set_mutable));
+  EXPECT_FALSE(ObjCCast<NSData>(test_str));
+  EXPECT_FALSE(ObjCCast<NSData>(test_str_const));
+  EXPECT_FALSE(ObjCCast<NSArray>(test_str_mutable));
+
+  // Giving a nil provides a nil.
+  EXPECT_FALSE(ObjCCast<NSArray>(nil));
+  EXPECT_FALSE(ObjCCast<NSData>(nil));
+  EXPECT_FALSE(ObjCCast<NSDate>(nil));
+  EXPECT_FALSE(ObjCCast<NSDictionary>(nil));
+  EXPECT_FALSE(ObjCCast<NSNull>(nil));
+  EXPECT_FALSE(ObjCCast<NSNumber>(nil));
+  EXPECT_FALSE(ObjCCast<NSSet>(nil));
+  EXPECT_FALSE(ObjCCast<NSString>(nil));
+
+  // ObjCCastStrict: correct cast results in correct pointer being returned.
+  EXPECT_EQ(test_array, ObjCCastStrict<NSArray>(test_array));
+  EXPECT_EQ(test_array_mutable,
+            ObjCCastStrict<NSArray>(test_array_mutable));
+  EXPECT_EQ(test_data, ObjCCastStrict<NSData>(test_data));
+  EXPECT_EQ(test_data_mutable,
+            ObjCCastStrict<NSData>(test_data_mutable));
+  EXPECT_EQ(test_date, ObjCCastStrict<NSDate>(test_date));
+  EXPECT_EQ(test_dict, ObjCCastStrict<NSDictionary>(test_dict));
+  EXPECT_EQ(test_dict_mutable,
+            ObjCCastStrict<NSDictionary>(test_dict_mutable));
+  EXPECT_EQ(test_number, ObjCCastStrict<NSNumber>(test_number));
+  EXPECT_EQ(test_null, ObjCCastStrict<NSNull>(test_null));
+  EXPECT_EQ(test_set, ObjCCastStrict<NSSet>(test_set));
+  EXPECT_EQ(test_set_mutable,
+            ObjCCastStrict<NSSet>(test_set_mutable));
+  EXPECT_EQ(test_str, ObjCCastStrict<NSString>(test_str));
+  EXPECT_EQ(test_str_const,
+            ObjCCastStrict<NSString>(test_str_const));
+  EXPECT_EQ(test_str_mutable,
+            ObjCCastStrict<NSString>(test_str_mutable));
+
+  // ObjCCastStrict: Giving a nil provides a nil.
+  EXPECT_FALSE(ObjCCastStrict<NSArray>(nil));
+  EXPECT_FALSE(ObjCCastStrict<NSData>(nil));
+  EXPECT_FALSE(ObjCCastStrict<NSDate>(nil));
+  EXPECT_FALSE(ObjCCastStrict<NSDictionary>(nil));
+  EXPECT_FALSE(ObjCCastStrict<NSNull>(nil));
+  EXPECT_FALSE(ObjCCastStrict<NSNumber>(nil));
+  EXPECT_FALSE(ObjCCastStrict<NSSet>(nil));
+  EXPECT_FALSE(ObjCCastStrict<NSString>(nil));
+}
+
+TEST(FoundationUtilTest, GetValueFromDictionary) {
+  int one = 1, two = 2, three = 3;
+
+  ScopedCFTypeRef<CFNumberRef> cf_one(
+      CFNumberCreate(kCFAllocatorDefault, kCFNumberIntType, &one));
+  ScopedCFTypeRef<CFNumberRef> cf_two(
+      CFNumberCreate(kCFAllocatorDefault, kCFNumberIntType, &two));
+  ScopedCFTypeRef<CFNumberRef> cf_three(
+      CFNumberCreate(kCFAllocatorDefault, kCFNumberIntType, &three));
+
+  CFStringRef keys[] = { CFSTR("one"), CFSTR("two"), CFSTR("three") };
+  CFNumberRef values[] = { cf_one, cf_two, cf_three };
+
+  static_assert(arraysize(keys) == arraysize(values),
+                "keys and values arrays must have the same size");
+
+  ScopedCFTypeRef<CFDictionaryRef> test_dict(
+      CFDictionaryCreate(kCFAllocatorDefault,
+                         reinterpret_cast<const void**>(keys),
+                         reinterpret_cast<const void**>(values),
+                         arraysize(values),
+                         &kCFCopyStringDictionaryKeyCallBacks,
+                         &kCFTypeDictionaryValueCallBacks));
+
+  // GetValueFromDictionary<>(_, _) should produce the correct
+  // expected output.
+  EXPECT_EQ(values[0],
+            GetValueFromDictionary<CFNumberRef>(test_dict, CFSTR("one")));
+  EXPECT_EQ(values[1],
+            GetValueFromDictionary<CFNumberRef>(test_dict, CFSTR("two")));
+  EXPECT_EQ(values[2],
+            GetValueFromDictionary<CFNumberRef>(test_dict, CFSTR("three")));
+
+  // Bad input should produce bad output.
+  EXPECT_FALSE(GetValueFromDictionary<CFNumberRef>(test_dict, CFSTR("four")));
+  EXPECT_FALSE(GetValueFromDictionary<CFStringRef>(test_dict, CFSTR("one")));
+}
+
+TEST(FoundationUtilTest, FilePathToNSString) {
+  EXPECT_NSEQ(nil, FilePathToNSString(FilePath()));
+  EXPECT_NSEQ(@"/a/b", FilePathToNSString(FilePath("/a/b")));
+}
+
+TEST(FoundationUtilTest, NSStringToFilePath) {
+  EXPECT_EQ(FilePath(), NSStringToFilePath(nil));
+  EXPECT_EQ(FilePath(), NSStringToFilePath(@""));
+  EXPECT_EQ(FilePath("/a/b"), NSStringToFilePath(@"/a/b"));
+}
+
+TEST(FoundationUtilTest, CFRangeToNSRange) {
+  NSRange range_out;
+  EXPECT_TRUE(CFRangeToNSRange(CFRangeMake(10, 5), &range_out));
+  EXPECT_EQ(10UL, range_out.location);
+  EXPECT_EQ(5UL, range_out.length);
+  EXPECT_FALSE(CFRangeToNSRange(CFRangeMake(-1, 5), &range_out));
+  EXPECT_FALSE(CFRangeToNSRange(CFRangeMake(5, -1), &range_out));
+  EXPECT_FALSE(CFRangeToNSRange(CFRangeMake(-1, -1), &range_out));
+  EXPECT_FALSE(CFRangeToNSRange(CFRangeMake(LONG_MAX, LONG_MAX), &range_out));
+  EXPECT_FALSE(CFRangeToNSRange(CFRangeMake(LONG_MIN, LONG_MAX), &range_out));
+}
+
+TEST(StringNumberConversionsTest, FormatNSInteger) {
+  // The PRI[dxu]NS macro assumes that NSInteger is a typedef to "int" on
+  // 32-bit architecture and a typedef to "long" on 64-bit architecture
+  // (respectively "unsigned int" and "unsigned long" for NSUInteger). Use
+  // pointer incompatibility to validate this at compilation.
+#if defined(ARCH_CPU_64_BITS)
+  typedef long FormatNSIntegerAsType;
+  typedef unsigned long FormatNSUIntegerAsType;
+#else
+  typedef int FormatNSIntegerAsType;
+  typedef unsigned int FormatNSUIntegerAsType;
+#endif  // defined(ARCH_CPU_64_BITS)
+
+  NSInteger some_nsinteger;
+  FormatNSIntegerAsType* pointer_to_some_nsinteger = &some_nsinteger;
+  ALLOW_UNUSED_LOCAL(pointer_to_some_nsinteger);
+
+  NSUInteger some_nsuinteger;
+  FormatNSUIntegerAsType* pointer_to_some_nsuinteger = &some_nsuinteger;
+  ALLOW_UNUSED_LOCAL(pointer_to_some_nsuinteger);
+
+  // Check that format specifier works correctly for NSInteger.
+  const struct {
+    NSInteger value;
+    const char* expected;
+    const char* expected_hex;
+  } nsinteger_cases[] = {
+#if !defined(ARCH_CPU_64_BITS)
+    {12345678, "12345678", "bc614e"},
+    {-12345678, "-12345678", "ff439eb2"},
+#else
+    {12345678, "12345678", "bc614e"},
+    {-12345678, "-12345678", "ffffffffff439eb2"},
+    {137451299150l, "137451299150", "2000bc614e"},
+    {-137451299150l, "-137451299150", "ffffffdfff439eb2"},
+#endif  // !defined(ARCH_CPU_64_BITS)
+  };
+
+  for (size_t i = 0; i < arraysize(nsinteger_cases); ++i) {
+    EXPECT_EQ(nsinteger_cases[i].expected,
+              StringPrintf("%" PRIdNS, nsinteger_cases[i].value));
+    EXPECT_EQ(nsinteger_cases[i].expected_hex,
+              StringPrintf("%" PRIxNS, nsinteger_cases[i].value));
+  }
+
+  // Check that format specifier works correctly for NSUInteger.
+  const struct {
+    NSUInteger value;
+    const char* expected;
+    const char* expected_hex;
+  } nsuinteger_cases[] = {
+#if !defined(ARCH_CPU_64_BITS)
+    {12345678u, "12345678", "bc614e"},
+    {4282621618u, "4282621618", "ff439eb2"},
+#else
+    {12345678u, "12345678", "bc614e"},
+    {4282621618u, "4282621618", "ff439eb2"},
+    {137451299150ul, "137451299150", "2000bc614e"},
+    {18446743936258252466ul, "18446743936258252466", "ffffffdfff439eb2"},
+#endif  // !defined(ARCH_CPU_64_BITS)
+  };
+
+  for (size_t i = 0; i < arraysize(nsuinteger_cases); ++i) {
+    EXPECT_EQ(nsuinteger_cases[i].expected,
+              StringPrintf("%" PRIuNS, nsuinteger_cases[i].value));
+    EXPECT_EQ(nsuinteger_cases[i].expected_hex,
+              StringPrintf("%" PRIxNS, nsuinteger_cases[i].value));
+  }
+}
+
+}  // namespace mac
+}  // namespace base
diff --git a/base/mac/launch_services_util.h b/base/mac/launch_services_util.h
new file mode 100644
index 0000000..30d1eec
--- /dev/null
+++ b/base/mac/launch_services_util.h
@@ -0,0 +1,31 @@
+// Copyright (c) 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_MAC_LAUNCH_SERVICES_UTIL_H_
+#define BASE_MAC_LAUNCH_SERVICES_UTIL_H_
+
+#import <AppKit/AppKit.h>
+
+#include "base/base_export.h"
+#include "base/command_line.h"
+#include "base/files/file_path.h"
+#include "base/process/process.h"
+
+namespace base {
+namespace mac {
+
+// Launches the application bundle at |bundle_path|, passing argv[1..] from
+// |command_line| as command line arguments if the app isn't already running.
+// |launch_options| are passed directly to
+// -[NSWorkspace launchApplicationAtURL:options:configuration:error:].
+// Returns a valid process if the app was successfully launched.
+BASE_EXPORT Process
+OpenApplicationWithPath(const FilePath& bundle_path,
+                        const CommandLine& command_line,
+                        NSWorkspaceLaunchOptions launch_options);
+
+}  // namespace mac
+}  // namespace base
+
+#endif  // BASE_MAC_LAUNCH_SERVICES_UTIL_H_
diff --git a/base/mac/launch_services_util.mm b/base/mac/launch_services_util.mm
new file mode 100644
index 0000000..fa6e808
--- /dev/null
+++ b/base/mac/launch_services_util.mm
@@ -0,0 +1,52 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#import "base/mac/launch_services_util.h"
+
+#include "base/logging.h"
+#include "base/strings/sys_string_conversions.h"
+
+namespace base {
+namespace mac {
+
+Process OpenApplicationWithPath(const base::FilePath& bundle_path,
+                                const CommandLine& command_line,
+                                NSWorkspaceLaunchOptions launch_options) {
+  NSString* bundle_url_spec = base::SysUTF8ToNSString(bundle_path.value());
+  NSURL* bundle_url = [NSURL fileURLWithPath:bundle_url_spec isDirectory:YES];
+  DCHECK(bundle_url);
+  if (!bundle_url) {
+    return Process();
+  }
+
+  // NSWorkspace automatically adds the binary path as the first argument and
+  // it should not be included into the list.
+  std::vector<std::string> argv = command_line.argv();
+  int argc = argv.size();
+  NSMutableArray* launch_args = [NSMutableArray arrayWithCapacity:argc - 1];
+  for (int i = 1; i < argc; ++i) {
+    [launch_args addObject:base::SysUTF8ToNSString(argv[i])];
+  }
+
+  NSDictionary* configuration = @{
+    NSWorkspaceLaunchConfigurationArguments : launch_args,
+  };
+  NSError* launch_error = nil;
+  // TODO(jeremya): this opens a new browser window if Chrome is already
+  // running without any windows open.
+  NSRunningApplication* app =
+      [[NSWorkspace sharedWorkspace] launchApplicationAtURL:bundle_url
+                                                    options:launch_options
+                                              configuration:configuration
+                                                      error:&launch_error];
+  if (launch_error) {
+    LOG(ERROR) << base::SysNSStringToUTF8([launch_error localizedDescription]);
+    return Process();
+  }
+  DCHECK(app);
+  return Process([app processIdentifier]);
+}
+
+}  // namespace mac
+}  // namespace base
diff --git a/base/mac/launchd.cc b/base/mac/launchd.cc
new file mode 100644
index 0000000..0337d2e
--- /dev/null
+++ b/base/mac/launchd.cc
@@ -0,0 +1,75 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/mac/launchd.h"
+
+#include "base/logging.h"
+#include "base/mac/scoped_launch_data.h"
+
+namespace base {
+namespace mac {
+
+// MessageForJob sends a single message to launchd with a simple dictionary
+// mapping |operation| to |job_label|, and returns the result of calling
+// launch_msg to send that message. On failure, returns NULL. The caller
+// assumes ownership of the returned launch_data_t object.
+launch_data_t MessageForJob(const std::string& job_label,
+                            const char* operation) {
+  // launch_data_alloc returns something that needs to be freed.
+  ScopedLaunchData message(launch_data_alloc(LAUNCH_DATA_DICTIONARY));
+  if (!message.is_valid()) {
+    LOG(ERROR) << "launch_data_alloc";
+    return NULL;
+  }
+
+  // launch_data_new_string returns something that needs to be freed, but
+  // the dictionary will assume ownership when launch_data_dict_insert is
+  // called, so put it in a scoper and .release() it when given to the
+  // dictionary.
+  ScopedLaunchData job_label_launchd(launch_data_new_string(job_label.c_str()));
+  if (!job_label_launchd.is_valid()) {
+    LOG(ERROR) << "launch_data_new_string";
+    return NULL;
+  }
+
+  if (!launch_data_dict_insert(message.get(), job_label_launchd.release(),
+                               operation)) {
+    return NULL;
+  }
+
+  return launch_msg(message.get());
+}
+
+pid_t PIDForJob(const std::string& job_label) {
+  ScopedLaunchData response(MessageForJob(job_label, LAUNCH_KEY_GETJOB));
+  if (!response.is_valid()) {
+    return -1;
+  }
+
+  launch_data_type_t response_type = launch_data_get_type(response.get());
+  if (response_type != LAUNCH_DATA_DICTIONARY) {
+    if (response_type == LAUNCH_DATA_ERRNO) {
+      LOG(ERROR) << "PIDForJob: error "
+                 << launch_data_get_errno(response.get());
+    } else {
+      LOG(ERROR) << "PIDForJob: expected dictionary, got " << response_type;
+    }
+    return -1;
+  }
+
+  launch_data_t pid_data =
+      launch_data_dict_lookup(response.get(), LAUNCH_JOBKEY_PID);
+  if (!pid_data)
+    return 0;
+
+  if (launch_data_get_type(pid_data) != LAUNCH_DATA_INTEGER) {
+    LOG(ERROR) << "PIDForJob: expected integer";
+    return -1;
+  }
+
+  return launch_data_get_integer(pid_data);
+}
+
+}  // namespace mac
+}  // namespace base
diff --git a/base/mac/launchd.h b/base/mac/launchd.h
new file mode 100644
index 0000000..9e4514e
--- /dev/null
+++ b/base/mac/launchd.h
@@ -0,0 +1,34 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_MAC_LAUNCHD_H_
+#define BASE_MAC_LAUNCHD_H_
+
+#include <launch.h>
+#include <sys/types.h>
+
+#include <string>
+
+#include "base/base_export.h"
+
+namespace base {
+namespace mac {
+
+// MessageForJob sends a single message to launchd with a simple dictionary
+// mapping |operation| to |job_label|, and returns the result of calling
+// launch_msg to send that message. On failure, returns NULL. The caller
+// assumes ownership of the returned launch_data_t object.
+BASE_EXPORT
+launch_data_t MessageForJob(const std::string& job_label,
+                            const char* operation);
+
+// Returns the process ID for |job_label| if the job is running, 0 if the job
+// is loaded but not running, or -1 on error.
+BASE_EXPORT
+pid_t PIDForJob(const std::string& job_label);
+
+}  // namespace mac
+}  // namespace base
+
+#endif  // BASE_MAC_LAUNCHD_H_
diff --git a/base/mac/mac_logging.h b/base/mac/mac_logging.h
new file mode 100644
index 0000000..30e43ea
--- /dev/null
+++ b/base/mac/mac_logging.h
@@ -0,0 +1,98 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_MAC_MAC_LOGGING_H_
+#define BASE_MAC_MAC_LOGGING_H_
+
+#include "base/base_export.h"
+#include "base/logging.h"
+#include "base/macros.h"
+#include "build/build_config.h"
+
+#if defined(OS_IOS)
+#include <MacTypes.h>
+#else
+#include <libkern/OSTypes.h>
+#endif
+
+// Use the OSSTATUS_LOG family to log messages related to errors in Mac OS X
+// system routines that report status via an OSStatus or OSErr value. It is
+// similar to the PLOG family which operates on errno, but because there is no
+// global (or thread-local) OSStatus or OSErr value, the specific error must
+// be supplied as an argument to the OSSTATUS_LOG macro. The message logged
+// will contain the symbolic constant name corresponding to the status value,
+// along with the value itself.
+//
+// OSErr is just an older 16-bit form of the newer 32-bit OSStatus. Despite
+// the name, OSSTATUS_LOG can be used equally well for OSStatus and OSErr.
+
+namespace logging {
+
+// Returns a UTF8 description from an OS X Status error.
+BASE_EXPORT std::string DescriptionFromOSStatus(OSStatus err);
+
+class BASE_EXPORT OSStatusLogMessage : public logging::LogMessage {
+ public:
+  OSStatusLogMessage(const char* file_path,
+                     int line,
+                     LogSeverity severity,
+                     OSStatus status);
+  ~OSStatusLogMessage();
+
+ private:
+  OSStatus status_;
+
+  DISALLOW_COPY_AND_ASSIGN(OSStatusLogMessage);
+};
+
+}  // namespace logging
+
+#if defined(NDEBUG)
+#define MAC_DVLOG_IS_ON(verbose_level) 0
+#else
+#define MAC_DVLOG_IS_ON(verbose_level) VLOG_IS_ON(verbose_level)
+#endif
+
+#define OSSTATUS_LOG_STREAM(severity, status) \
+    COMPACT_GOOGLE_LOG_EX_ ## severity(OSStatusLogMessage, status).stream()
+#define OSSTATUS_VLOG_STREAM(verbose_level, status) \
+    logging::OSStatusLogMessage(__FILE__, __LINE__, \
+                                -verbose_level, status).stream()
+
+#define OSSTATUS_LOG(severity, status) \
+    LAZY_STREAM(OSSTATUS_LOG_STREAM(severity, status), LOG_IS_ON(severity))
+#define OSSTATUS_LOG_IF(severity, condition, status) \
+    LAZY_STREAM(OSSTATUS_LOG_STREAM(severity, status), \
+                LOG_IS_ON(severity) && (condition))
+
+#define OSSTATUS_VLOG(verbose_level, status) \
+    LAZY_STREAM(OSSTATUS_VLOG_STREAM(verbose_level, status), \
+                VLOG_IS_ON(verbose_level))
+#define OSSTATUS_VLOG_IF(verbose_level, condition, status) \
+    LAZY_STREAM(OSSTATUS_VLOG_STREAM(verbose_level, status), \
+                VLOG_IS_ON(verbose_level) && (condition))
+
+#define OSSTATUS_CHECK(condition, status) \
+    LAZY_STREAM(OSSTATUS_LOG_STREAM(FATAL, status), !(condition)) \
+    << "Check failed: " # condition << ". "
+
+#define OSSTATUS_DLOG(severity, status) \
+    LAZY_STREAM(OSSTATUS_LOG_STREAM(severity, status), DLOG_IS_ON(severity))
+#define OSSTATUS_DLOG_IF(severity, condition, status) \
+    LAZY_STREAM(OSSTATUS_LOG_STREAM(severity, status), \
+                DLOG_IS_ON(severity) && (condition))
+
+#define OSSTATUS_DVLOG(verbose_level, status) \
+    LAZY_STREAM(OSSTATUS_VLOG_STREAM(verbose_level, status), \
+                MAC_DVLOG_IS_ON(verbose_level))
+#define OSSTATUS_DVLOG_IF(verbose_level, condition, status) \
+    LAZY_STREAM(OSSTATUS_VLOG_STREAM(verbose_level, status), \
+                MAC_DVLOG_IS_ON(verbose_level) && (condition))
+
+#define OSSTATUS_DCHECK(condition, status)        \
+  LAZY_STREAM(OSSTATUS_LOG_STREAM(FATAL, status), \
+              DCHECK_IS_ON() && !(condition))     \
+      << "Check failed: " #condition << ". "
+
+#endif  // BASE_MAC_MAC_LOGGING_H_
diff --git a/base/mac/mac_logging.mm b/base/mac/mac_logging.mm
new file mode 100644
index 0000000..f0d3c07
--- /dev/null
+++ b/base/mac/mac_logging.mm
@@ -0,0 +1,47 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/mac/mac_logging.h"
+
+#import <Foundation/Foundation.h>
+
+#include <iomanip>
+
+#include "build/build_config.h"
+
+#if !defined(OS_IOS)
+#include <CoreServices/CoreServices.h>
+#endif
+
+namespace logging {
+
+std::string DescriptionFromOSStatus(OSStatus err) {
+  NSError* error =
+      [NSError errorWithDomain:NSOSStatusErrorDomain code:err userInfo:nil];
+  return error.description.UTF8String;
+}
+
+OSStatusLogMessage::OSStatusLogMessage(const char* file_path,
+                                       int line,
+                                       LogSeverity severity,
+                                       OSStatus status)
+    : LogMessage(file_path, line, severity),
+      status_(status) {
+}
+
+OSStatusLogMessage::~OSStatusLogMessage() {
+#if defined(OS_IOS)
+  // TODO(crbug.com/546375): Consider using NSError with NSOSStatusErrorDomain
+  // to try to get a description of the failure.
+  stream() << ": " << status_;
+#else
+  stream() << ": "
+           << DescriptionFromOSStatus(status_)
+           << " ("
+           << status_
+           << ")";
+#endif
+}
+
+}  // namespace logging
diff --git a/base/mac/mac_util.h b/base/mac/mac_util.h
new file mode 100644
index 0000000..37e5b67
--- /dev/null
+++ b/base/mac/mac_util.h
@@ -0,0 +1,182 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_MAC_MAC_UTIL_H_
+#define BASE_MAC_MAC_UTIL_H_
+
+#include <stdint.h>
+#include <string>
+
+#import <CoreGraphics/CoreGraphics.h>
+
+#include "base/base_export.h"
+
+namespace base {
+
+class FilePath;
+
+namespace mac {
+
+// Full screen modes, in increasing order of priority.  More permissive modes
+// take predecence.
+enum FullScreenMode {
+  kFullScreenModeHideAll = 0,
+  kFullScreenModeHideDock = 1,
+  kFullScreenModeAutoHideAll = 2,
+  kNumFullScreenModes = 3,
+
+  // kFullScreenModeNormal is not a valid FullScreenMode, but it is useful to
+  // other classes, so we include it here.
+  kFullScreenModeNormal = 10,
+};
+
+// Returns an sRGB color space.  The return value is a static value; do not
+// release it!
+BASE_EXPORT CGColorSpaceRef GetSRGBColorSpace();
+
+// Returns the generic RGB color space. The return value is a static value; do
+// not release it!
+BASE_EXPORT CGColorSpaceRef GetGenericRGBColorSpace();
+
+// Returns the color space being used by the main display.  The return value
+// is a static value; do not release it!
+BASE_EXPORT CGColorSpaceRef GetSystemColorSpace();
+
+// Add a full screen request for the given |mode|.  Must be paired with a
+// ReleaseFullScreen() call for the same |mode|.  This does not by itself create
+// a fullscreen window; rather, it manages per-application state related to
+// hiding the dock and menubar.  Must be called on the main thread.
+BASE_EXPORT void RequestFullScreen(FullScreenMode mode);
+
+// Release a request for full screen mode.  Must be matched with a
+// RequestFullScreen() call for the same |mode|.  As with RequestFullScreen(),
+// this does not affect windows directly, but rather manages per-application
+// state.  For example, if there are no other outstanding
+// |kFullScreenModeAutoHideAll| requests, this will reshow the menu bar.  Must
+// be called on main thread.
+BASE_EXPORT void ReleaseFullScreen(FullScreenMode mode);
+
+// Convenience method to switch the current fullscreen mode.  This has the same
+// net effect as a ReleaseFullScreen(from_mode) call followed immediately by a
+// RequestFullScreen(to_mode).  Must be called on the main thread.
+BASE_EXPORT void SwitchFullScreenModes(FullScreenMode from_mode,
+                                       FullScreenMode to_mode);
+
+// Excludes the file given by |file_path| from being backed up by Time Machine.
+BASE_EXPORT bool SetFileBackupExclusion(const FilePath& file_path);
+
+// Checks if the current application is set as a Login Item, so it will launch
+// on Login. If a non-NULL pointer to is_hidden is passed, the Login Item also
+// is queried for the 'hide on launch' flag.
+BASE_EXPORT bool CheckLoginItemStatus(bool* is_hidden);
+
+// Adds current application to the set of Login Items with specified "hide"
+// flag. This has the same effect as adding/removing the application in
+// SystemPreferences->Accounts->LoginItems or marking Application in the Dock
+// as "Options->Open on Login".
+// Does nothing if the application is already set up as Login Item with
+// specified hide flag.
+BASE_EXPORT void AddToLoginItems(bool hide_on_startup);
+
+// Removes the current application from the list Of Login Items.
+BASE_EXPORT void RemoveFromLoginItems();
+
+// Returns true if the current process was automatically launched as a
+// 'Login Item' or via Lion's Resume. Used to suppress opening windows.
+BASE_EXPORT bool WasLaunchedAsLoginOrResumeItem();
+
+// Returns true if the current process was automatically launched as a
+// 'Login Item' or via Resume, and the 'Reopen windows when logging back in'
+// checkbox was selected by the user.  This indicates that the previous
+// session should be restored.
+BASE_EXPORT bool WasLaunchedAsLoginItemRestoreState();
+
+// Returns true if the current process was automatically launched as a
+// 'Login Item' with 'hide on startup' flag. Used to suppress opening windows.
+BASE_EXPORT bool WasLaunchedAsHiddenLoginItem();
+
+// Remove the quarantine xattr from the given file. Returns false if there was
+// an error, or true otherwise.
+BASE_EXPORT bool RemoveQuarantineAttribute(const FilePath& file_path);
+
+namespace internal {
+
+// Returns the system's Mac OS X minor version. This is the |y| value
+// in 10.y or 10.y.z.
+BASE_EXPORT int MacOSXMinorVersion();
+
+}  // namespace internal
+
+// Run-time OS version checks. Use these instead of
+// base::SysInfo::OperatingSystemVersionNumbers. Prefer the "AtLeast" and
+// "AtMost" variants to those that check for a specific version, unless you
+// know for sure that you need to check for a specific version.
+
+#define DEFINE_IS_OS_FUNCS(V, TEST_DEPLOYMENT_TARGET) \
+  inline bool IsOS10_##V() {                          \
+    TEST_DEPLOYMENT_TARGET(>, V, false)               \
+    return internal::MacOSXMinorVersion() == V;       \
+  }                                                   \
+  inline bool IsAtLeastOS10_##V() {                   \
+    TEST_DEPLOYMENT_TARGET(>=, V, true)               \
+    return internal::MacOSXMinorVersion() >= V;       \
+  }                                                   \
+  inline bool IsAtMostOS10_##V() {                    \
+    TEST_DEPLOYMENT_TARGET(>, V, false)               \
+    return internal::MacOSXMinorVersion() <= V;       \
+  }
+
+#define TEST_DEPLOYMENT_TARGET(OP, V, RET)                      \
+  if (MAC_OS_X_VERSION_MIN_REQUIRED OP MAC_OS_X_VERSION_10_##V) \
+    return RET;
+#define IGNORE_DEPLOYMENT_TARGET(OP, V, RET)
+
+DEFINE_IS_OS_FUNCS(9, TEST_DEPLOYMENT_TARGET)
+DEFINE_IS_OS_FUNCS(10, TEST_DEPLOYMENT_TARGET)
+
+#ifdef MAC_OS_X_VERSION_10_11
+DEFINE_IS_OS_FUNCS(11, TEST_DEPLOYMENT_TARGET)
+#else
+DEFINE_IS_OS_FUNCS(11, IGNORE_DEPLOYMENT_TARGET)
+#endif
+
+#ifdef MAC_OS_X_VERSION_10_12
+DEFINE_IS_OS_FUNCS(12, TEST_DEPLOYMENT_TARGET)
+#else
+DEFINE_IS_OS_FUNCS(12, IGNORE_DEPLOYMENT_TARGET)
+#endif
+
+#ifdef MAC_OS_X_VERSION_10_13
+DEFINE_IS_OS_FUNCS(13, TEST_DEPLOYMENT_TARGET)
+#else
+DEFINE_IS_OS_FUNCS(13, IGNORE_DEPLOYMENT_TARGET)
+#endif
+
+#undef IGNORE_DEPLOYMENT_TARGET
+#undef TEST_DEPLOYMENT_TARGET
+#undef DEFINE_IS_OS_FUNCS
+
+// This should be infrequently used. It only makes sense to use this to avoid
+// codepaths that are very likely to break on future (unreleased, untested,
+// unborn) OS releases, or to log when the OS is newer than any known version.
+inline bool IsOSLaterThan10_13_DontCallThis() {
+  return !IsAtMostOS10_13();
+}
+
+// Retrieve the system's model identifier string from the IOKit registry:
+// for example, "MacPro4,1", "MacBookPro6,1". Returns empty string upon
+// failure.
+BASE_EXPORT std::string GetModelIdentifier();
+
+// Parse a model identifier string; for example, into ("MacBookPro", 6, 1).
+// If any error occurs, none of the input pointers are touched.
+BASE_EXPORT bool ParseModelIdentifier(const std::string& ident,
+                                      std::string* type,
+                                      int32_t* major,
+                                      int32_t* minor);
+
+}  // namespace mac
+}  // namespace base
+
+#endif  // BASE_MAC_MAC_UTIL_H_
diff --git a/base/mac/mac_util.mm b/base/mac/mac_util.mm
new file mode 100644
index 0000000..a8308be
--- /dev/null
+++ b/base/mac/mac_util.mm
@@ -0,0 +1,485 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/mac/mac_util.h"
+
+#import <Cocoa/Cocoa.h>
+#import <IOKit/IOKitLib.h>
+#include <errno.h>
+#include <stddef.h>
+#include <string.h>
+#include <sys/utsname.h>
+#include <sys/xattr.h>
+
+#include "base/files/file_path.h"
+#include "base/logging.h"
+#include "base/mac/bundle_locations.h"
+#include "base/mac/foundation_util.h"
+#include "base/mac/mac_logging.h"
+#include "base/mac/scoped_cftyperef.h"
+#include "base/mac/scoped_ioobject.h"
+#include "base/mac/scoped_nsobject.h"
+#include "base/mac/sdk_forward_declarations.h"
+#include "base/strings/string_number_conversions.h"
+#include "base/strings/string_piece.h"
+#include "base/strings/sys_string_conversions.h"
+
+namespace base {
+namespace mac {
+
+namespace {
+
+// The current count of outstanding requests for full screen mode from browser
+// windows, plugins, etc.
+int g_full_screen_requests[kNumFullScreenModes] = { 0 };
+
+// Sets the appropriate application presentation option based on the current
+// full screen requests.  Since only one presentation option can be active at a
+// given time, full screen requests are ordered by priority.  If there are no
+// outstanding full screen requests, reverts to normal mode.  If the correct
+// presentation option is already set, does nothing.
+void SetUIMode() {
+  NSApplicationPresentationOptions current_options =
+      [NSApp presentationOptions];
+
+  // Determine which mode should be active, based on which requests are
+  // currently outstanding.  More permissive requests take precedence.  For
+  // example, plugins request |kFullScreenModeAutoHideAll|, while browser
+  // windows request |kFullScreenModeHideDock| when the fullscreen overlay is
+  // down.  Precedence goes to plugins in this case, so AutoHideAll wins over
+  // HideDock.
+  NSApplicationPresentationOptions desired_options =
+      NSApplicationPresentationDefault;
+  if (g_full_screen_requests[kFullScreenModeAutoHideAll] > 0) {
+    desired_options = NSApplicationPresentationHideDock |
+                      NSApplicationPresentationAutoHideMenuBar;
+  } else if (g_full_screen_requests[kFullScreenModeHideDock] > 0) {
+    desired_options = NSApplicationPresentationHideDock;
+  } else if (g_full_screen_requests[kFullScreenModeHideAll] > 0) {
+    desired_options = NSApplicationPresentationHideDock |
+                      NSApplicationPresentationHideMenuBar;
+  }
+
+  // Mac OS X bug: if the window is fullscreened (Lion-style) and
+  // NSApplicationPresentationDefault is requested, the result is that the menu
+  // bar doesn't auto-hide. rdar://13576498 http://www.openradar.me/13576498
+  //
+  // As a workaround, in that case, explicitly set the presentation options to
+  // the ones that are set by the system as it fullscreens a window.
+  if (desired_options == NSApplicationPresentationDefault &&
+      current_options & NSApplicationPresentationFullScreen) {
+    desired_options |= NSApplicationPresentationFullScreen |
+                       NSApplicationPresentationAutoHideMenuBar |
+                       NSApplicationPresentationAutoHideDock;
+  }
+
+  if (current_options != desired_options)
+    [NSApp setPresentationOptions:desired_options];
+}
+
+// Looks into Shared File Lists corresponding to Login Items for the item
+// representing the current application.  If such an item is found, returns a
+// retained reference to it. Caller is responsible for releasing the reference.
+LSSharedFileListItemRef GetLoginItemForApp() {
+  ScopedCFTypeRef<LSSharedFileListRef> login_items(LSSharedFileListCreate(
+      NULL, kLSSharedFileListSessionLoginItems, NULL));
+
+  if (!login_items.get()) {
+    DLOG(ERROR) << "Couldn't get a Login Items list.";
+    return NULL;
+  }
+
+  base::scoped_nsobject<NSArray> login_items_array(
+      CFToNSCast(LSSharedFileListCopySnapshot(login_items, NULL)));
+
+  NSURL* url = [NSURL fileURLWithPath:[base::mac::MainBundle() bundlePath]];
+
+  for(NSUInteger i = 0; i < [login_items_array count]; ++i) {
+    LSSharedFileListItemRef item =
+        reinterpret_cast<LSSharedFileListItemRef>(login_items_array[i]);
+    CFURLRef item_url_ref = NULL;
+
+    // It seems that LSSharedFileListItemResolve() can return NULL in
+    // item_url_ref even if the function itself returns noErr. See
+    // https://crbug.com/760989
+    if (LSSharedFileListItemResolve(item, 0, &item_url_ref, NULL) == noErr &&
+        item_url_ref) {
+      ScopedCFTypeRef<CFURLRef> item_url(item_url_ref);
+      if (CFEqual(item_url, url)) {
+        CFRetain(item);
+        return item;
+      }
+    }
+  }
+
+  return NULL;
+}
+
+bool IsHiddenLoginItem(LSSharedFileListItemRef item) {
+  ScopedCFTypeRef<CFBooleanRef> hidden(reinterpret_cast<CFBooleanRef>(
+      LSSharedFileListItemCopyProperty(item,
+          reinterpret_cast<CFStringRef>(kLSSharedFileListLoginItemHidden))));
+
+  return hidden && hidden == kCFBooleanTrue;
+}
+
+}  // namespace
+
+CGColorSpaceRef GetGenericRGBColorSpace() {
+  // Leaked. That's OK, it's scoped to the lifetime of the application.
+  static CGColorSpaceRef g_color_space_generic_rgb(
+      CGColorSpaceCreateWithName(kCGColorSpaceGenericRGB));
+  DLOG_IF(ERROR, !g_color_space_generic_rgb) <<
+      "Couldn't get the generic RGB color space";
+  return g_color_space_generic_rgb;
+}
+
+CGColorSpaceRef GetSRGBColorSpace() {
+  // Leaked.  That's OK, it's scoped to the lifetime of the application.
+  static CGColorSpaceRef g_color_space_sRGB =
+      CGColorSpaceCreateWithName(kCGColorSpaceSRGB);
+  DLOG_IF(ERROR, !g_color_space_sRGB) << "Couldn't get the sRGB color space";
+  return g_color_space_sRGB;
+}
+
+CGColorSpaceRef GetSystemColorSpace() {
+  // Leaked.  That's OK, it's scoped to the lifetime of the application.
+  // Try to get the main display's color space.
+  static CGColorSpaceRef g_system_color_space =
+      CGDisplayCopyColorSpace(CGMainDisplayID());
+
+  if (!g_system_color_space) {
+    // Use a generic RGB color space.  This is better than nothing.
+    g_system_color_space = CGColorSpaceCreateDeviceRGB();
+
+    if (g_system_color_space) {
+      DLOG(WARNING) <<
+          "Couldn't get the main display's color space, using generic";
+    } else {
+      DLOG(ERROR) << "Couldn't get any color space";
+    }
+  }
+
+  return g_system_color_space;
+}
+
+// Add a request for full screen mode.  Must be called on the main thread.
+void RequestFullScreen(FullScreenMode mode) {
+  DCHECK_LT(mode, kNumFullScreenModes);
+  if (mode >= kNumFullScreenModes)
+    return;
+
+  DCHECK_GE(g_full_screen_requests[mode], 0);
+  if (mode < 0)
+    return;
+
+  g_full_screen_requests[mode] = std::max(g_full_screen_requests[mode] + 1, 1);
+  SetUIMode();
+}
+
+// Release a request for full screen mode.  Must be called on the main thread.
+void ReleaseFullScreen(FullScreenMode mode) {
+  DCHECK_LT(mode, kNumFullScreenModes);
+  if (mode >= kNumFullScreenModes)
+    return;
+
+  DCHECK_GE(g_full_screen_requests[mode], 0);
+  if (mode < 0)
+    return;
+
+  g_full_screen_requests[mode] = std::max(g_full_screen_requests[mode] - 1, 0);
+  SetUIMode();
+}
+
+// Switches full screen modes.  Releases a request for |from_mode| and adds a
+// new request for |to_mode|.  Must be called on the main thread.
+void SwitchFullScreenModes(FullScreenMode from_mode, FullScreenMode to_mode) {
+  DCHECK_LT(from_mode, kNumFullScreenModes);
+  DCHECK_LT(to_mode, kNumFullScreenModes);
+  if (from_mode >= kNumFullScreenModes || to_mode >= kNumFullScreenModes)
+    return;
+
+  DCHECK_GT(g_full_screen_requests[from_mode], 0);
+  DCHECK_GE(g_full_screen_requests[to_mode], 0);
+  g_full_screen_requests[from_mode] =
+      std::max(g_full_screen_requests[from_mode] - 1, 0);
+  g_full_screen_requests[to_mode] =
+      std::max(g_full_screen_requests[to_mode] + 1, 1);
+  SetUIMode();
+}
+
+bool SetFileBackupExclusion(const FilePath& file_path) {
+  NSString* file_path_ns =
+      [NSString stringWithUTF8String:file_path.value().c_str()];
+  NSURL* file_url = [NSURL fileURLWithPath:file_path_ns];
+
+  // When excludeByPath is true the application must be running with root
+  // privileges (admin for 10.6 and earlier) but the URL does not have to
+  // already exist. When excludeByPath is false the URL must already exist but
+  // can be used in non-root (or admin as above) mode. We use false so that
+  // non-root (or admin) users don't get their TimeMachine drive filled up with
+  // unnecessary backups.
+  OSStatus os_err =
+      CSBackupSetItemExcluded(base::mac::NSToCFCast(file_url), TRUE, FALSE);
+  if (os_err != noErr) {
+    OSSTATUS_DLOG(WARNING, os_err)
+        << "Failed to set backup exclusion for file '"
+        << file_path.value().c_str() << "'";
+  }
+  return os_err == noErr;
+}
+
+bool CheckLoginItemStatus(bool* is_hidden) {
+  ScopedCFTypeRef<LSSharedFileListItemRef> item(GetLoginItemForApp());
+  if (!item.get())
+    return false;
+
+  if (is_hidden)
+    *is_hidden = IsHiddenLoginItem(item);
+
+  return true;
+}
+
+void AddToLoginItems(bool hide_on_startup) {
+  ScopedCFTypeRef<LSSharedFileListItemRef> item(GetLoginItemForApp());
+  if (item.get() && (IsHiddenLoginItem(item) == hide_on_startup)) {
+    return;  // Already is a login item with required hide flag.
+  }
+
+  ScopedCFTypeRef<LSSharedFileListRef> login_items(LSSharedFileListCreate(
+      NULL, kLSSharedFileListSessionLoginItems, NULL));
+
+  if (!login_items.get()) {
+    DLOG(ERROR) << "Couldn't get a Login Items list.";
+    return;
+  }
+
+  // Remove the old item, it has wrong hide flag, we'll create a new one.
+  if (item.get()) {
+    LSSharedFileListItemRemove(login_items, item);
+  }
+
+  NSURL* url = [NSURL fileURLWithPath:[base::mac::MainBundle() bundlePath]];
+
+  BOOL hide = hide_on_startup ? YES : NO;
+  NSDictionary* properties =
+      [NSDictionary
+        dictionaryWithObject:[NSNumber numberWithBool:hide]
+                      forKey:(NSString*)kLSSharedFileListLoginItemHidden];
+
+  ScopedCFTypeRef<LSSharedFileListItemRef> new_item;
+  new_item.reset(LSSharedFileListInsertItemURL(
+      login_items, kLSSharedFileListItemLast, NULL, NULL,
+      reinterpret_cast<CFURLRef>(url),
+      reinterpret_cast<CFDictionaryRef>(properties), NULL));
+
+  if (!new_item.get()) {
+    DLOG(ERROR) << "Couldn't insert current app into Login Items list.";
+  }
+}
+
+void RemoveFromLoginItems() {
+  ScopedCFTypeRef<LSSharedFileListItemRef> item(GetLoginItemForApp());
+  if (!item.get())
+    return;
+
+  ScopedCFTypeRef<LSSharedFileListRef> login_items(LSSharedFileListCreate(
+      NULL, kLSSharedFileListSessionLoginItems, NULL));
+
+  if (!login_items.get()) {
+    DLOG(ERROR) << "Couldn't get a Login Items list.";
+    return;
+  }
+
+  LSSharedFileListItemRemove(login_items, item);
+}
+
+bool WasLaunchedAsLoginOrResumeItem() {
+  ProcessSerialNumber psn = { 0, kCurrentProcess };
+  ProcessInfoRec info = {};
+  info.processInfoLength = sizeof(info);
+
+// GetProcessInformation has been deprecated since macOS 10.9, but there is no
+// replacement that provides the information we need. See
+// https://crbug.com/650854.
+#pragma clang diagnostic push
+#pragma clang diagnostic ignored "-Wdeprecated-declarations"
+  if (GetProcessInformation(&psn, &info) == noErr) {
+#pragma clang diagnostic pop
+    ProcessInfoRec parent_info = {};
+    parent_info.processInfoLength = sizeof(parent_info);
+#pragma clang diagnostic push
+#pragma clang diagnostic ignored "-Wdeprecated-declarations"
+    if (GetProcessInformation(&info.processLauncher, &parent_info) == noErr) {
+#pragma clang diagnostic pop
+      return parent_info.processSignature == 'lgnw';
+    }
+  }
+  return false;
+}
+
+bool WasLaunchedAsLoginItemRestoreState() {
+  // "Reopen windows..." option was added for Lion.  Prior OS versions should
+  // not have this behavior.
+  if (!WasLaunchedAsLoginOrResumeItem())
+    return false;
+
+  CFStringRef app = CFSTR("com.apple.loginwindow");
+  CFStringRef save_state = CFSTR("TALLogoutSavesState");
+  ScopedCFTypeRef<CFPropertyListRef> plist(
+      CFPreferencesCopyAppValue(save_state, app));
+  // According to documentation, com.apple.loginwindow.plist does not exist on a
+  // fresh installation until the user changes a login window setting.  The
+  // "reopen windows" option is checked by default, so the plist would exist had
+  // the user unchecked it.
+  // https://developer.apple.com/library/mac/documentation/macosx/conceptual/bpsystemstartup/chapters/CustomLogin.html
+  if (!plist)
+    return true;
+
+  if (CFBooleanRef restore_state = base::mac::CFCast<CFBooleanRef>(plist))
+    return CFBooleanGetValue(restore_state);
+
+  return false;
+}
+
+bool WasLaunchedAsHiddenLoginItem() {
+  if (!WasLaunchedAsLoginOrResumeItem())
+    return false;
+
+  ScopedCFTypeRef<LSSharedFileListItemRef> item(GetLoginItemForApp());
+  if (!item.get()) {
+    // OS X can launch items for the resume feature.
+    return false;
+  }
+  return IsHiddenLoginItem(item);
+}
+
+bool RemoveQuarantineAttribute(const FilePath& file_path) {
+  const char kQuarantineAttrName[] = "com.apple.quarantine";
+  int status = removexattr(file_path.value().c_str(), kQuarantineAttrName, 0);
+  return status == 0 || errno == ENOATTR;
+}
+
+namespace {
+
+// Returns the running system's Darwin major version. Don't call this, it's
+// an implementation detail and its result is meant to be cached by
+// MacOSXMinorVersion.
+int DarwinMajorVersionInternal() {
+  // base::OperatingSystemVersionNumbers calls Gestalt, which is a
+  // higher-level operation than is needed. It might perform unnecessary
+  // operations. On 10.6, it was observed to be able to spawn threads (see
+  // http://crbug.com/53200). It might also read files or perform other
+  // blocking operations. Actually, nobody really knows for sure just what
+  // Gestalt might do, or what it might be taught to do in the future.
+  //
+  // uname, on the other hand, is implemented as a simple series of sysctl
+  // system calls to obtain the relevant data from the kernel. The data is
+  // compiled right into the kernel, so no threads or blocking or other
+  // funny business is necessary.
+
+  struct utsname uname_info;
+  if (uname(&uname_info) != 0) {
+    DPLOG(ERROR) << "uname";
+    return 0;
+  }
+
+  if (strcmp(uname_info.sysname, "Darwin") != 0) {
+    DLOG(ERROR) << "unexpected uname sysname " << uname_info.sysname;
+    return 0;
+  }
+
+  int darwin_major_version = 0;
+  char* dot = strchr(uname_info.release, '.');
+  if (dot) {
+    if (!base::StringToInt(base::StringPiece(uname_info.release,
+                                             dot - uname_info.release),
+                           &darwin_major_version)) {
+      dot = NULL;
+    }
+  }
+
+  if (!dot) {
+    DLOG(ERROR) << "could not parse uname release " << uname_info.release;
+    return 0;
+  }
+
+  return darwin_major_version;
+}
+
+// Returns the running system's Mac OS X minor version. This is the |y| value
+// in 10.y or 10.y.z. Don't call this, it's an implementation detail and the
+// result is meant to be cached by MacOSXMinorVersion.
+int MacOSXMinorVersionInternal() {
+  int darwin_major_version = DarwinMajorVersionInternal();
+
+  // The Darwin major version is always 4 greater than the Mac OS X minor
+  // version for Darwin versions beginning with 6, corresponding to Mac OS X
+  // 10.2. Since this correspondence may change in the future, warn when
+  // encountering a version higher than anything seen before. Older Darwin
+  // versions, or versions that can't be determined, result in
+  // immediate death.
+  CHECK(darwin_major_version >= 6);
+  int mac_os_x_minor_version = darwin_major_version - 4;
+  DLOG_IF(WARNING, darwin_major_version > 17)
+      << "Assuming Darwin " << base::IntToString(darwin_major_version)
+      << " is macOS 10." << base::IntToString(mac_os_x_minor_version);
+
+  return mac_os_x_minor_version;
+}
+
+}  // namespace
+
+namespace internal {
+int MacOSXMinorVersion() {
+  static int mac_os_x_minor_version = MacOSXMinorVersionInternal();
+  return mac_os_x_minor_version;
+}
+}  // namespace internal
+
+std::string GetModelIdentifier() {
+  std::string return_string;
+  ScopedIOObject<io_service_t> platform_expert(
+      IOServiceGetMatchingService(kIOMasterPortDefault,
+                                  IOServiceMatching("IOPlatformExpertDevice")));
+  if (platform_expert) {
+    ScopedCFTypeRef<CFDataRef> model_data(
+        static_cast<CFDataRef>(IORegistryEntryCreateCFProperty(
+            platform_expert,
+            CFSTR("model"),
+            kCFAllocatorDefault,
+            0)));
+    if (model_data) {
+      return_string =
+          reinterpret_cast<const char*>(CFDataGetBytePtr(model_data));
+    }
+  }
+  return return_string;
+}
+
+bool ParseModelIdentifier(const std::string& ident,
+                          std::string* type,
+                          int32_t* major,
+                          int32_t* minor) {
+  size_t number_loc = ident.find_first_of("0123456789");
+  if (number_loc == std::string::npos)
+    return false;
+  size_t comma_loc = ident.find(',', number_loc);
+  if (comma_loc == std::string::npos)
+    return false;
+  int32_t major_tmp, minor_tmp;
+  std::string::const_iterator begin = ident.begin();
+  if (!StringToInt(
+          StringPiece(begin + number_loc, begin + comma_loc), &major_tmp) ||
+      !StringToInt(
+          StringPiece(begin + comma_loc + 1, ident.end()), &minor_tmp))
+    return false;
+  *type = ident.substr(0, number_loc);
+  *major = major_tmp;
+  *minor = minor_tmp;
+  return true;
+}
+
+}  // namespace mac
+}  // namespace base
diff --git a/base/mac/mac_util_unittest.mm b/base/mac/mac_util_unittest.mm
new file mode 100644
index 0000000..266d1c4
--- /dev/null
+++ b/base/mac/mac_util_unittest.mm
@@ -0,0 +1,299 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#import <Cocoa/Cocoa.h>
+#include <stddef.h>
+#include <stdint.h>
+
+#include "base/mac/mac_util.h"
+
+#include "base/files/file_path.h"
+#include "base/files/file_util.h"
+#include "base/files/scoped_temp_dir.h"
+#include "base/mac/foundation_util.h"
+#include "base/mac/scoped_cftyperef.h"
+#include "base/mac/scoped_nsobject.h"
+#include "base/macros.h"
+#include "base/sys_info.h"
+#include "testing/gtest/include/gtest/gtest.h"
+#include "testing/platform_test.h"
+
+#include <errno.h>
+#include <sys/xattr.h>
+
+namespace base {
+namespace mac {
+
+namespace {
+
+typedef PlatformTest MacUtilTest;
+
+TEST_F(MacUtilTest, GetUserDirectoryTest) {
+  // Try a few keys, make sure they come back with non-empty paths.
+  FilePath caches_dir;
+  EXPECT_TRUE(GetUserDirectory(NSCachesDirectory, &caches_dir));
+  EXPECT_FALSE(caches_dir.empty());
+
+  FilePath application_support_dir;
+  EXPECT_TRUE(GetUserDirectory(NSApplicationSupportDirectory,
+                               &application_support_dir));
+  EXPECT_FALSE(application_support_dir.empty());
+
+  FilePath library_dir;
+  EXPECT_TRUE(GetUserDirectory(NSLibraryDirectory, &library_dir));
+  EXPECT_FALSE(library_dir.empty());
+}
+
+TEST_F(MacUtilTest, TestLibraryPath) {
+  FilePath library_dir = GetUserLibraryPath();
+  // Make sure the string isn't empty.
+  EXPECT_FALSE(library_dir.value().empty());
+}
+
+TEST_F(MacUtilTest, TestGetAppBundlePath) {
+  FilePath out;
+
+  // Make sure it doesn't crash.
+  out = GetAppBundlePath(FilePath());
+  EXPECT_TRUE(out.empty());
+
+  // Some more invalid inputs.
+  const char* const invalid_inputs[] = {
+    "/", "/foo", "foo", "/foo/bar.", "foo/bar.", "/foo/bar./bazquux",
+    "foo/bar./bazquux", "foo/.app", "//foo",
+  };
+  for (size_t i = 0; i < arraysize(invalid_inputs); i++) {
+    out = GetAppBundlePath(FilePath(invalid_inputs[i]));
+    EXPECT_TRUE(out.empty()) << "loop: " << i;
+  }
+
+  // Some valid inputs; this and |expected_outputs| should be in sync.
+  struct {
+    const char *in;
+    const char *expected_out;
+  } valid_inputs[] = {
+    { "FooBar.app/", "FooBar.app" },
+    { "/FooBar.app", "/FooBar.app" },
+    { "/FooBar.app/", "/FooBar.app" },
+    { "//FooBar.app", "//FooBar.app" },
+    { "/Foo/Bar.app", "/Foo/Bar.app" },
+    { "/Foo/Bar.app/", "/Foo/Bar.app" },
+    { "/F/B.app", "/F/B.app" },
+    { "/F/B.app/", "/F/B.app" },
+    { "/Foo/Bar.app/baz", "/Foo/Bar.app" },
+    { "/Foo/Bar.app/baz/", "/Foo/Bar.app" },
+    { "/Foo/Bar.app/baz/quux.app/quuux", "/Foo/Bar.app" },
+    { "/Applications/Google Foo.app/bar/Foo Helper.app/quux/Foo Helper",
+        "/Applications/Google Foo.app" },
+  };
+  for (size_t i = 0; i < arraysize(valid_inputs); i++) {
+    out = GetAppBundlePath(FilePath(valid_inputs[i].in));
+    EXPECT_FALSE(out.empty()) << "loop: " << i;
+    EXPECT_STREQ(valid_inputs[i].expected_out,
+        out.value().c_str()) << "loop: " << i;
+  }
+}
+
+// http://crbug.com/425745
+TEST_F(MacUtilTest, DISABLED_TestExcludeFileFromBackups) {
+  // The file must already exist in order to set its exclusion property.
+  ScopedTempDir temp_dir_;
+  ASSERT_TRUE(temp_dir_.CreateUniqueTempDir());
+  FilePath dummy_file_path = temp_dir_.GetPath().Append("DummyFile");
+  const char dummy_data[] = "All your base are belong to us!";
+  // Dump something real into the file.
+  ASSERT_EQ(static_cast<int>(arraysize(dummy_data)),
+            WriteFile(dummy_file_path, dummy_data, arraysize(dummy_data)));
+  NSString* fileURLString =
+      [NSString stringWithUTF8String:dummy_file_path.value().c_str()];
+  NSURL* fileURL = [NSURL URLWithString:fileURLString];
+  // Initial state should be non-excluded.
+  EXPECT_FALSE(CSBackupIsItemExcluded(base::mac::NSToCFCast(fileURL), NULL));
+  // Exclude the file.
+  EXPECT_TRUE(SetFileBackupExclusion(dummy_file_path));
+  // SetFileBackupExclusion never excludes by path.
+  Boolean excluded_by_path = FALSE;
+  Boolean excluded =
+      CSBackupIsItemExcluded(base::mac::NSToCFCast(fileURL), &excluded_by_path);
+  EXPECT_TRUE(excluded);
+  EXPECT_FALSE(excluded_by_path);
+}
+
+TEST_F(MacUtilTest, NSObjectRetainRelease) {
+  base::scoped_nsobject<NSArray> array(
+      [[NSArray alloc] initWithObjects:@"foo", nil]);
+  EXPECT_EQ(1U, [array retainCount]);
+
+  NSObjectRetain(array);
+  EXPECT_EQ(2U, [array retainCount]);
+
+  NSObjectRelease(array);
+  EXPECT_EQ(1U, [array retainCount]);
+}
+
+TEST_F(MacUtilTest, IsOSEllipsis) {
+  int32_t major, minor, bugfix;
+  base::SysInfo::OperatingSystemVersionNumbers(&major, &minor, &bugfix);
+
+  if (major == 10) {
+    if (minor == 9) {
+      EXPECT_TRUE(IsOS10_9());
+      EXPECT_TRUE(IsAtMostOS10_9());
+      EXPECT_TRUE(IsAtLeastOS10_9());
+      EXPECT_FALSE(IsOS10_10());
+      EXPECT_TRUE(IsAtMostOS10_10());
+      EXPECT_FALSE(IsAtLeastOS10_10());
+      EXPECT_FALSE(IsOS10_11());
+      EXPECT_TRUE(IsAtMostOS10_11());
+      EXPECT_FALSE(IsAtLeastOS10_11());
+      EXPECT_FALSE(IsOS10_12());
+      EXPECT_FALSE(IsAtLeastOS10_12());
+      EXPECT_TRUE(IsAtMostOS10_12());
+      EXPECT_FALSE(IsOS10_13());
+      EXPECT_FALSE(IsAtLeastOS10_13());
+      EXPECT_TRUE(IsAtMostOS10_13());
+      EXPECT_FALSE(IsOSLaterThan10_13_DontCallThis());
+    } else if (minor == 10) {
+      EXPECT_FALSE(IsOS10_9());
+      EXPECT_FALSE(IsAtMostOS10_9());
+      EXPECT_TRUE(IsAtLeastOS10_9());
+      EXPECT_TRUE(IsOS10_10());
+      EXPECT_TRUE(IsAtMostOS10_10());
+      EXPECT_TRUE(IsAtLeastOS10_10());
+      EXPECT_FALSE(IsOS10_11());
+      EXPECT_TRUE(IsAtMostOS10_11());
+      EXPECT_FALSE(IsAtLeastOS10_11());
+      EXPECT_FALSE(IsOS10_12());
+      EXPECT_FALSE(IsAtLeastOS10_12());
+      EXPECT_TRUE(IsAtMostOS10_12());
+      EXPECT_FALSE(IsOS10_13());
+      EXPECT_FALSE(IsAtLeastOS10_13());
+      EXPECT_TRUE(IsAtMostOS10_13());
+      EXPECT_FALSE(IsOSLaterThan10_13_DontCallThis());
+    } else if (minor == 11) {
+      EXPECT_FALSE(IsOS10_9());
+      EXPECT_FALSE(IsAtMostOS10_9());
+      EXPECT_TRUE(IsAtLeastOS10_9());
+      EXPECT_FALSE(IsOS10_10());
+      EXPECT_FALSE(IsAtMostOS10_10());
+      EXPECT_TRUE(IsAtLeastOS10_10());
+      EXPECT_TRUE(IsOS10_11());
+      EXPECT_TRUE(IsAtMostOS10_11());
+      EXPECT_TRUE(IsAtLeastOS10_11());
+      EXPECT_FALSE(IsOS10_12());
+      EXPECT_FALSE(IsAtLeastOS10_12());
+      EXPECT_TRUE(IsAtMostOS10_12());
+      EXPECT_FALSE(IsOS10_13());
+      EXPECT_FALSE(IsAtLeastOS10_13());
+      EXPECT_TRUE(IsAtMostOS10_13());
+      EXPECT_FALSE(IsOSLaterThan10_13_DontCallThis());
+    } else if (minor == 12) {
+      EXPECT_FALSE(IsOS10_9());
+      EXPECT_FALSE(IsAtMostOS10_9());
+      EXPECT_TRUE(IsAtLeastOS10_9());
+      EXPECT_FALSE(IsOS10_10());
+      EXPECT_FALSE(IsAtMostOS10_10());
+      EXPECT_TRUE(IsAtLeastOS10_10());
+      EXPECT_FALSE(IsOS10_11());
+      EXPECT_FALSE(IsAtMostOS10_11());
+      EXPECT_TRUE(IsAtLeastOS10_11());
+      EXPECT_TRUE(IsOS10_12());
+      EXPECT_TRUE(IsAtMostOS10_12());
+      EXPECT_TRUE(IsAtLeastOS10_12());
+      EXPECT_FALSE(IsOS10_13());
+      EXPECT_FALSE(IsAtLeastOS10_13());
+      EXPECT_TRUE(IsAtMostOS10_13());
+      EXPECT_FALSE(IsOSLaterThan10_13_DontCallThis());
+    } else if (minor == 13) {
+      EXPECT_FALSE(IsOS10_9());
+      EXPECT_FALSE(IsAtMostOS10_9());
+      EXPECT_TRUE(IsAtLeastOS10_9());
+      EXPECT_FALSE(IsOS10_10());
+      EXPECT_FALSE(IsAtMostOS10_10());
+      EXPECT_TRUE(IsAtLeastOS10_10());
+      EXPECT_FALSE(IsOS10_11());
+      EXPECT_FALSE(IsAtMostOS10_11());
+      EXPECT_TRUE(IsAtLeastOS10_11());
+      EXPECT_FALSE(IsOS10_12());
+      EXPECT_FALSE(IsAtMostOS10_12());
+      EXPECT_TRUE(IsAtLeastOS10_12());
+      EXPECT_TRUE(IsOS10_13());
+      EXPECT_TRUE(IsAtLeastOS10_13());
+      EXPECT_TRUE(IsAtMostOS10_13());
+      EXPECT_FALSE(IsOSLaterThan10_13_DontCallThis());
+    } else {
+      // Not nine, ten, eleven, twelve, or thirteen. Ah, ah, ah.
+      EXPECT_TRUE(false);
+    }
+  } else {
+    // Not ten. What you gonna do?
+    EXPECT_FALSE(true);
+  }
+}
+
+TEST_F(MacUtilTest, ParseModelIdentifier) {
+  std::string model;
+  int32_t major = 1, minor = 2;
+
+  EXPECT_FALSE(ParseModelIdentifier("", &model, &major, &minor));
+  EXPECT_EQ(0U, model.length());
+  EXPECT_EQ(1, major);
+  EXPECT_EQ(2, minor);
+  EXPECT_FALSE(ParseModelIdentifier("FooBar", &model, &major, &minor));
+
+  EXPECT_TRUE(ParseModelIdentifier("MacPro4,1", &model, &major, &minor));
+  EXPECT_EQ(model, "MacPro");
+  EXPECT_EQ(4, major);
+  EXPECT_EQ(1, minor);
+
+  EXPECT_TRUE(ParseModelIdentifier("MacBookPro6,2", &model, &major, &minor));
+  EXPECT_EQ(model, "MacBookPro");
+  EXPECT_EQ(6, major);
+  EXPECT_EQ(2, minor);
+}
+
+TEST_F(MacUtilTest, TestRemoveQuarantineAttribute) {
+  ScopedTempDir temp_dir_;
+  ASSERT_TRUE(temp_dir_.CreateUniqueTempDir());
+  FilePath dummy_folder_path = temp_dir_.GetPath().Append("DummyFolder");
+  ASSERT_TRUE(base::CreateDirectory(dummy_folder_path));
+  const char* quarantine_str = "0000;4b392bb2;Chromium;|org.chromium.Chromium";
+  const char* file_path_str = dummy_folder_path.value().c_str();
+  EXPECT_EQ(0, setxattr(file_path_str, "com.apple.quarantine",
+      quarantine_str, strlen(quarantine_str), 0, 0));
+  EXPECT_EQ(static_cast<long>(strlen(quarantine_str)),
+      getxattr(file_path_str, "com.apple.quarantine",
+          NULL, 0, 0, 0));
+  EXPECT_TRUE(RemoveQuarantineAttribute(dummy_folder_path));
+  EXPECT_EQ(-1, getxattr(file_path_str, "com.apple.quarantine", NULL, 0, 0, 0));
+  EXPECT_EQ(ENOATTR, errno);
+}
+
+TEST_F(MacUtilTest, TestRemoveQuarantineAttributeTwice) {
+  ScopedTempDir temp_dir_;
+  ASSERT_TRUE(temp_dir_.CreateUniqueTempDir());
+  FilePath dummy_folder_path = temp_dir_.GetPath().Append("DummyFolder");
+  const char* file_path_str = dummy_folder_path.value().c_str();
+  ASSERT_TRUE(base::CreateDirectory(dummy_folder_path));
+  EXPECT_EQ(-1, getxattr(file_path_str, "com.apple.quarantine", NULL, 0, 0, 0));
+  // No quarantine attribute to begin with, but RemoveQuarantineAttribute still
+  // succeeds because in the end the folder still doesn't have the quarantine
+  // attribute set.
+  EXPECT_TRUE(RemoveQuarantineAttribute(dummy_folder_path));
+  EXPECT_TRUE(RemoveQuarantineAttribute(dummy_folder_path));
+  EXPECT_EQ(ENOATTR, errno);
+}
+
+TEST_F(MacUtilTest, TestRemoveQuarantineAttributeNonExistentPath) {
+  ScopedTempDir temp_dir_;
+  ASSERT_TRUE(temp_dir_.CreateUniqueTempDir());
+  FilePath non_existent_path = temp_dir_.GetPath().Append("DummyPath");
+  ASSERT_FALSE(PathExists(non_existent_path));
+  EXPECT_FALSE(RemoveQuarantineAttribute(non_existent_path));
+}
+
+}  // namespace
+
+}  // namespace mac
+}  // namespace base
diff --git a/base/mac/mach_logging.cc b/base/mac/mach_logging.cc
new file mode 100644
index 0000000..7b939b3
--- /dev/null
+++ b/base/mac/mach_logging.cc
@@ -0,0 +1,88 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/mac/mach_logging.h"
+
+#include <iomanip>
+#include <string>
+
+#include "base/strings/stringprintf.h"
+#include "build/build_config.h"
+
+#if !defined(OS_IOS)
+#include <servers/bootstrap.h>
+#endif  // !OS_IOS
+
+namespace {
+
+std::string FormatMachErrorNumber(mach_error_t mach_err) {
+  // For the os/kern subsystem, give the error number in decimal as in
+  // <mach/kern_return.h>. Otherwise, give it in hexadecimal to make it easier
+  // to visualize the various bits. See <mach/error.h>.
+  if (mach_err >= 0 && mach_err < KERN_RETURN_MAX) {
+    return base::StringPrintf(" (%d)", mach_err);
+  }
+  return base::StringPrintf(" (0x%08x)", mach_err);
+}
+
+}  // namespace
+
+namespace logging {
+
+MachLogMessage::MachLogMessage(const char* file_path,
+                               int line,
+                               LogSeverity severity,
+                               mach_error_t mach_err)
+    : LogMessage(file_path, line, severity),
+      mach_err_(mach_err) {
+}
+
+MachLogMessage::~MachLogMessage() {
+  stream() << ": "
+           << mach_error_string(mach_err_)
+           << FormatMachErrorNumber(mach_err_);
+}
+
+#if !defined(OS_IOS)
+
+BootstrapLogMessage::BootstrapLogMessage(const char* file_path,
+                                         int line,
+                                         LogSeverity severity,
+                                         kern_return_t bootstrap_err)
+    : LogMessage(file_path, line, severity),
+      bootstrap_err_(bootstrap_err) {
+}
+
+BootstrapLogMessage::~BootstrapLogMessage() {
+  stream() << ": "
+           << bootstrap_strerror(bootstrap_err_);
+
+  switch (bootstrap_err_) {
+    case BOOTSTRAP_SUCCESS:
+    case BOOTSTRAP_NOT_PRIVILEGED:
+    case BOOTSTRAP_NAME_IN_USE:
+    case BOOTSTRAP_UNKNOWN_SERVICE:
+    case BOOTSTRAP_SERVICE_ACTIVE:
+    case BOOTSTRAP_BAD_COUNT:
+    case BOOTSTRAP_NO_MEMORY:
+    case BOOTSTRAP_NO_CHILDREN: {
+      // Show known bootstrap errors in decimal because that's how they're
+      // defined in <servers/bootstrap.h>.
+      stream() << " (" << bootstrap_err_ << ")";
+      break;
+    }
+
+    default: {
+      // bootstrap_strerror passes unknown errors to mach_error_string, so
+      // format them as they would be if they were handled by
+      // MachErrorMessage.
+      stream() << FormatMachErrorNumber(bootstrap_err_);
+      break;
+    }
+  }
+}
+
+#endif  // !OS_IOS
+
+}  // namespace logging
diff --git a/base/mac/mach_logging.h b/base/mac/mach_logging.h
new file mode 100644
index 0000000..59ab762
--- /dev/null
+++ b/base/mac/mach_logging.h
@@ -0,0 +1,167 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_MAC_MACH_LOGGING_H_
+#define BASE_MAC_MACH_LOGGING_H_
+
+#include <mach/mach.h>
+
+#include "base/base_export.h"
+#include "base/logging.h"
+#include "base/macros.h"
+#include "build/build_config.h"
+
+// Use the MACH_LOG family of macros along with a mach_error_t (kern_return_t)
+// containing a Mach error. The error value will be decoded so that logged
+// messages explain the error.
+//
+// Use the BOOTSTRAP_LOG family of macros specifically for errors that occur
+// while interoperating with the bootstrap subsystem. These errors will first
+// be looked up as bootstrap error messages. If no match is found, they will
+// be treated as generic Mach errors, as in MACH_LOG.
+//
+// Examples:
+//
+//   kern_return_t kr = mach_timebase_info(&info);
+//   if (kr != KERN_SUCCESS) {
+//     MACH_LOG(ERROR, kr) << "mach_timebase_info";
+//   }
+//
+//   kr = vm_deallocate(task, address, size);
+//   MACH_DCHECK(kr == KERN_SUCCESS, kr) << "vm_deallocate";
+
+namespace logging {
+
+class BASE_EXPORT MachLogMessage : public logging::LogMessage {
+ public:
+  MachLogMessage(const char* file_path,
+                 int line,
+                 LogSeverity severity,
+                 mach_error_t mach_err);
+  ~MachLogMessage();
+
+ private:
+  mach_error_t mach_err_;
+
+  DISALLOW_COPY_AND_ASSIGN(MachLogMessage);
+};
+
+}  // namespace logging
+
+#if defined(NDEBUG)
+#define MACH_DVLOG_IS_ON(verbose_level) 0
+#else
+#define MACH_DVLOG_IS_ON(verbose_level) VLOG_IS_ON(verbose_level)
+#endif
+
+#define MACH_LOG_STREAM(severity, mach_err) \
+    COMPACT_GOOGLE_LOG_EX_ ## severity(MachLogMessage, mach_err).stream()
+#define MACH_VLOG_STREAM(verbose_level, mach_err) \
+    logging::MachLogMessage(__FILE__, __LINE__, \
+                            -verbose_level, mach_err).stream()
+
+#define MACH_LOG(severity, mach_err) \
+    LAZY_STREAM(MACH_LOG_STREAM(severity, mach_err), LOG_IS_ON(severity))
+#define MACH_LOG_IF(severity, condition, mach_err) \
+    LAZY_STREAM(MACH_LOG_STREAM(severity, mach_err), \
+                LOG_IS_ON(severity) && (condition))
+
+#define MACH_VLOG(verbose_level, mach_err) \
+    LAZY_STREAM(MACH_VLOG_STREAM(verbose_level, mach_err), \
+                VLOG_IS_ON(verbose_level))
+#define MACH_VLOG_IF(verbose_level, condition, mach_err) \
+    LAZY_STREAM(MACH_VLOG_STREAM(verbose_level, mach_err), \
+                VLOG_IS_ON(verbose_level) && (condition))
+
+#define MACH_CHECK(condition, mach_err) \
+    LAZY_STREAM(MACH_LOG_STREAM(FATAL, mach_err), !(condition)) \
+    << "Check failed: " # condition << ". "
+
+#define MACH_DLOG(severity, mach_err) \
+    LAZY_STREAM(MACH_LOG_STREAM(severity, mach_err), DLOG_IS_ON(severity))
+#define MACH_DLOG_IF(severity, condition, mach_err) \
+    LAZY_STREAM(MACH_LOG_STREAM(severity, mach_err), \
+                DLOG_IS_ON(severity) && (condition))
+
+#define MACH_DVLOG(verbose_level, mach_err) \
+    LAZY_STREAM(MACH_VLOG_STREAM(verbose_level, mach_err), \
+                MACH_DVLOG_IS_ON(verbose_level))
+#define MACH_DVLOG_IF(verbose_level, condition, mach_err) \
+    LAZY_STREAM(MACH_VLOG_STREAM(verbose_level, mach_err), \
+                MACH_DVLOG_IS_ON(verbose_level) && (condition))
+
+#define MACH_DCHECK(condition, mach_err)        \
+  LAZY_STREAM(MACH_LOG_STREAM(FATAL, mach_err), \
+              DCHECK_IS_ON() && !(condition))   \
+      << "Check failed: " #condition << ". "
+
+#if !defined(OS_IOS)
+
+namespace logging {
+
+class BASE_EXPORT BootstrapLogMessage : public logging::LogMessage {
+ public:
+  BootstrapLogMessage(const char* file_path,
+                      int line,
+                      LogSeverity severity,
+                      kern_return_t bootstrap_err);
+  ~BootstrapLogMessage();
+
+ private:
+  kern_return_t bootstrap_err_;
+
+  DISALLOW_COPY_AND_ASSIGN(BootstrapLogMessage);
+};
+
+}  // namespace logging
+
+#define BOOTSTRAP_DVLOG_IS_ON MACH_DVLOG_IS_ON
+
+#define BOOTSTRAP_LOG_STREAM(severity, bootstrap_err) \
+    COMPACT_GOOGLE_LOG_EX_ ## severity(BootstrapLogMessage, \
+                                       bootstrap_err).stream()
+#define BOOTSTRAP_VLOG_STREAM(verbose_level, bootstrap_err) \
+    logging::BootstrapLogMessage(__FILE__, __LINE__, \
+                                 -verbose_level, bootstrap_err).stream()
+
+#define BOOTSTRAP_LOG(severity, bootstrap_err) \
+    LAZY_STREAM(BOOTSTRAP_LOG_STREAM(severity, \
+                                     bootstrap_err), LOG_IS_ON(severity))
+#define BOOTSTRAP_LOG_IF(severity, condition, bootstrap_err) \
+    LAZY_STREAM(BOOTSTRAP_LOG_STREAM(severity, bootstrap_err), \
+                LOG_IS_ON(severity) && (condition))
+
+#define BOOTSTRAP_VLOG(verbose_level, bootstrap_err) \
+    LAZY_STREAM(BOOTSTRAP_VLOG_STREAM(verbose_level, bootstrap_err), \
+                VLOG_IS_ON(verbose_level))
+#define BOOTSTRAP_VLOG_IF(verbose_level, condition, bootstrap_err) \
+    LAZY_STREAM(BOOTSTRAP_VLOG_STREAM(verbose_level, bootstrap_err), \
+                VLOG_IS_ON(verbose_level) && (condition))
+
+#define BOOTSTRAP_CHECK(condition, bootstrap_err) \
+    LAZY_STREAM(BOOTSTRAP_LOG_STREAM(FATAL, bootstrap_err), !(condition)) \
+    << "Check failed: " # condition << ". "
+
+#define BOOTSTRAP_DLOG(severity, bootstrap_err) \
+    LAZY_STREAM(BOOTSTRAP_LOG_STREAM(severity, bootstrap_err), \
+                DLOG_IS_ON(severity))
+#define BOOTSTRAP_DLOG_IF(severity, condition, bootstrap_err) \
+    LAZY_STREAM(BOOTSTRAP_LOG_STREAM(severity, bootstrap_err), \
+                DLOG_IS_ON(severity) && (condition))
+
+#define BOOTSTRAP_DVLOG(verbose_level, bootstrap_err) \
+    LAZY_STREAM(BOOTSTRAP_VLOG_STREAM(verbose_level, bootstrap_err), \
+                BOOTSTRAP_DVLOG_IS_ON(verbose_level))
+#define BOOTSTRAP_DVLOG_IF(verbose_level, condition, bootstrap_err) \
+    LAZY_STREAM(BOOTSTRAP_VLOG_STREAM(verbose_level, bootstrap_err), \
+                BOOTSTRAP_DVLOG_IS_ON(verbose_level) && (condition))
+
+#define BOOTSTRAP_DCHECK(condition, bootstrap_err)        \
+  LAZY_STREAM(BOOTSTRAP_LOG_STREAM(FATAL, bootstrap_err), \
+              DCHECK_IS_ON() && !(condition))             \
+      << "Check failed: " #condition << ". "
+
+#endif  // !OS_IOS
+
+#endif  // BASE_MAC_MACH_LOGGING_H_
diff --git a/base/mac/mach_port_broker.h b/base/mac/mach_port_broker.h
new file mode 100644
index 0000000..4554b6a
--- /dev/null
+++ b/base/mac/mach_port_broker.h
@@ -0,0 +1,108 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_MAC_MACH_PORT_BROKER_H_
+#define BASE_MAC_MACH_PORT_BROKER_H_
+
+#include <mach/mach.h>
+
+#include <map>
+#include <memory>
+#include <string>
+
+#include "base/base_export.h"
+#include "base/mac/dispatch_source_mach.h"
+#include "base/mac/scoped_mach_port.h"
+#include "base/macros.h"
+#include "base/process/port_provider_mac.h"
+#include "base/process/process_handle.h"
+#include "base/synchronization/lock.h"
+
+namespace base {
+
+// On OS X, the task port of a process is required to collect metrics about the
+// process, and to insert Mach ports into the process. Running |task_for_pid()|
+// is only allowed for privileged code. However, a process has port rights to
+// all its subprocesses, so let the child processes send their Mach port to the
+// parent over IPC.
+//
+// Mach ports can only be sent over Mach IPC, not over the |socketpair()| that
+// the regular IPC system uses. Hence, the child processes opens a Mach
+// connection shortly after launching and ipc their mach data to the parent
+// process. A single |MachPortBroker| with a given name is expected to exist in
+// the parent process.
+//
+// Since this data arrives over a separate channel, it is not available
+// immediately after a child process has been started.
+class BASE_EXPORT MachPortBroker : public base::PortProvider {
+ public:
+  // For use in child processes. This will send the task port of the current
+  // process over Mach IPC to the port registered by name (via this class) in
+  // the parent process. Returns true if the message was sent successfully
+  // and false if otherwise.
+  static bool ChildSendTaskPortToParent(const std::string& name);
+
+  // Returns the Mach port name to use when sending or receiving messages.
+  // Does the Right Thing in the browser and in child processes.
+  static std::string GetMachPortName(const std::string& name, bool is_child);
+
+  MachPortBroker(const std::string& name);
+  ~MachPortBroker() override;
+
+  // Performs any initialization work.
+  bool Init();
+
+  // Adds a placeholder to the map for the given pid with MACH_PORT_NULL.
+  // Callers are expected to later update the port with FinalizePid(). Callers
+  // MUST acquire the lock given by GetLock() before calling this method (and
+  // release the lock afterwards).
+  void AddPlaceholderForPid(base::ProcessHandle pid);
+
+  // Removes |pid| from the task port map. Callers MUST acquire the lock given
+  // by GetLock() before calling this method (and release the lock afterwards).
+  void InvalidatePid(base::ProcessHandle pid);
+
+  // The lock that protects this MachPortBroker object. Callers MUST acquire
+  // and release this lock around calls to AddPlaceholderForPid(),
+  // InvalidatePid(), and FinalizePid();
+  base::Lock& GetLock() { return lock_; }
+
+  // Implement |base::PortProvider|.
+  mach_port_t TaskForPid(base::ProcessHandle process) const override;
+
+ private:
+  friend class MachPortBrokerTest;
+
+  // Message handler that is invoked on |dispatch_source_| when an
+  // incoming message needs to be received.
+  void HandleRequest();
+
+  // Updates the mapping for |pid| to include the given |mach_info|.  Does
+  // nothing if PlaceholderForPid() has not already been called for the given
+  // |pid|. Callers MUST acquire the lock given by GetLock() before calling
+  // this method (and release the lock afterwards).
+  void FinalizePid(base::ProcessHandle pid, mach_port_t task_port);
+
+  // Name used to identify a particular port broker.
+  const std::string name_;
+
+  // The Mach port on which the server listens.
+  base::mac::ScopedMachReceiveRight server_port_;
+
+  // The dispatch source and queue on which Mach messages will be received.
+  std::unique_ptr<base::DispatchSourceMach> dispatch_source_;
+
+  // Stores mach info for every process in the broker.
+  typedef std::map<base::ProcessHandle, mach_port_t> MachMap;
+  MachMap mach_map_;
+
+  // Mutex that guards |mach_map_|.
+  mutable base::Lock lock_;
+
+  DISALLOW_COPY_AND_ASSIGN(MachPortBroker);
+};
+
+}  // namespace base
+
+#endif  // BASE_MAC_MACH_PORT_BROKER_H_
diff --git a/base/mac/mach_port_broker.mm b/base/mac/mach_port_broker.mm
new file mode 100644
index 0000000..6d9fec5
--- /dev/null
+++ b/base/mac/mach_port_broker.mm
@@ -0,0 +1,184 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/mac/mach_port_broker.h"
+
+#include <bsm/libbsm.h>
+#include <servers/bootstrap.h>
+
+#include "base/logging.h"
+#include "base/mac/foundation_util.h"
+#include "base/mac/mach_logging.h"
+#include "base/strings/string_util.h"
+#include "base/strings/stringprintf.h"
+
+namespace base {
+
+namespace {
+
+// Mach message structure used in the child as a sending message.
+struct MachPortBroker_ChildSendMsg {
+  mach_msg_header_t header;
+  mach_msg_body_t body;
+  mach_msg_port_descriptor_t child_task_port;
+};
+
+// Complement to the ChildSendMsg, this is used in the parent for receiving
+// a message. Contains a message trailer with audit information.
+struct MachPortBroker_ParentRecvMsg : public MachPortBroker_ChildSendMsg {
+  mach_msg_audit_trailer_t trailer;
+};
+
+}  // namespace
+
+// static
+bool MachPortBroker::ChildSendTaskPortToParent(const std::string& name) {
+  // Look up the named MachPortBroker port that's been registered with the
+  // bootstrap server.
+  mach_port_t parent_port;
+  kern_return_t kr = bootstrap_look_up(bootstrap_port,
+      const_cast<char*>(GetMachPortName(name, true).c_str()), &parent_port);
+  if (kr != KERN_SUCCESS) {
+    BOOTSTRAP_LOG(ERROR, kr) << "bootstrap_look_up";
+    return false;
+  }
+  base::mac::ScopedMachSendRight scoped_right(parent_port);
+
+  // Create the check in message. This will copy a send right on this process'
+  // (the child's) task port and send it to the parent.
+  MachPortBroker_ChildSendMsg msg;
+  bzero(&msg, sizeof(msg));
+  msg.header.msgh_bits = MACH_MSGH_BITS_REMOTE(MACH_MSG_TYPE_COPY_SEND) |
+                         MACH_MSGH_BITS_COMPLEX;
+  msg.header.msgh_remote_port = parent_port;
+  msg.header.msgh_size = sizeof(msg);
+  msg.body.msgh_descriptor_count = 1;
+  msg.child_task_port.name = mach_task_self();
+  msg.child_task_port.disposition = MACH_MSG_TYPE_PORT_SEND;
+  msg.child_task_port.type = MACH_MSG_PORT_DESCRIPTOR;
+
+  kr = mach_msg(&msg.header, MACH_SEND_MSG | MACH_SEND_TIMEOUT, sizeof(msg),
+      0, MACH_PORT_NULL, 100 /*milliseconds*/, MACH_PORT_NULL);
+  if (kr != KERN_SUCCESS) {
+    MACH_LOG(ERROR, kr) << "mach_msg";
+    return false;
+  }
+
+  return true;
+}
+
+// static
+std::string MachPortBroker::GetMachPortName(const std::string& name,
+                                            bool is_child) {
+  // In child processes, use the parent's pid.
+  const pid_t pid = is_child ? getppid() : getpid();
+  return base::StringPrintf(
+      "%s.%s.%d", base::mac::BaseBundleID(), name.c_str(), pid);
+}
+
+mach_port_t MachPortBroker::TaskForPid(base::ProcessHandle pid) const {
+  base::AutoLock lock(lock_);
+  MachPortBroker::MachMap::const_iterator it = mach_map_.find(pid);
+  if (it == mach_map_.end())
+    return MACH_PORT_NULL;
+  return it->second;
+}
+
+MachPortBroker::MachPortBroker(const std::string& name) : name_(name) {}
+
+MachPortBroker::~MachPortBroker() {}
+
+bool MachPortBroker::Init() {
+  DCHECK(server_port_.get() == MACH_PORT_NULL);
+
+  // Check in with launchd and publish the service name.
+  mach_port_t port;
+  kern_return_t kr = bootstrap_check_in(
+      bootstrap_port, GetMachPortName(name_, false).c_str(), &port);
+  if (kr != KERN_SUCCESS) {
+    BOOTSTRAP_LOG(ERROR, kr) << "bootstrap_check_in";
+    return false;
+  }
+  server_port_.reset(port);
+
+  // Start the dispatch source.
+  std::string queue_name =
+      base::StringPrintf("%s.MachPortBroker", base::mac::BaseBundleID());
+  dispatch_source_.reset(new base::DispatchSourceMach(
+      queue_name.c_str(), server_port_.get(), ^{ HandleRequest(); }));
+  dispatch_source_->Resume();
+
+  return true;
+}
+
+void MachPortBroker::AddPlaceholderForPid(base::ProcessHandle pid) {
+  lock_.AssertAcquired();
+  DCHECK_EQ(0u, mach_map_.count(pid));
+  mach_map_[pid] = MACH_PORT_NULL;
+}
+
+void MachPortBroker::InvalidatePid(base::ProcessHandle pid) {
+  lock_.AssertAcquired();
+
+  MachMap::iterator mach_it = mach_map_.find(pid);
+  if (mach_it != mach_map_.end()) {
+    kern_return_t kr = mach_port_deallocate(mach_task_self(), mach_it->second);
+    MACH_LOG_IF(WARNING, kr != KERN_SUCCESS, kr) << "mach_port_deallocate";
+    mach_map_.erase(mach_it);
+  }
+}
+
+void MachPortBroker::HandleRequest() {
+  MachPortBroker_ParentRecvMsg msg;
+  bzero(&msg, sizeof(msg));
+  msg.header.msgh_size = sizeof(msg);
+  msg.header.msgh_local_port = server_port_.get();
+
+  const mach_msg_option_t options = MACH_RCV_MSG |
+      MACH_RCV_TRAILER_TYPE(MACH_RCV_TRAILER_AUDIT) |
+      MACH_RCV_TRAILER_ELEMENTS(MACH_RCV_TRAILER_AUDIT);
+
+  kern_return_t kr = mach_msg(&msg.header,
+                              options,
+                              0,
+                              sizeof(msg),
+                              server_port_.get(),
+                              MACH_MSG_TIMEOUT_NONE,
+                              MACH_PORT_NULL);
+  if (kr != KERN_SUCCESS) {
+    MACH_LOG(ERROR, kr) << "mach_msg";
+    return;
+  }
+
+  // Use the kernel audit information to make sure this message is from
+  // a task that this process spawned. The kernel audit token contains the
+  // unspoofable pid of the task that sent the message.
+  pid_t child_pid = audit_token_to_pid(msg.trailer.msgh_audit);
+  mach_port_t child_task_port = msg.child_task_port.name;
+
+  // Take the lock and update the broker information.
+  {
+    base::AutoLock lock(lock_);
+    FinalizePid(child_pid, child_task_port);
+  }
+  NotifyObservers(child_pid);
+}
+
+void MachPortBroker::FinalizePid(base::ProcessHandle pid,
+                                 mach_port_t task_port) {
+  lock_.AssertAcquired();
+
+  MachMap::iterator it = mach_map_.find(pid);
+  if (it == mach_map_.end()) {
+    // Do nothing for unknown pids.
+    LOG(ERROR) << "Unknown process " << pid << " is sending Mach IPC messages!";
+    return;
+  }
+
+  DCHECK(it->second == MACH_PORT_NULL);
+  if (it->second == MACH_PORT_NULL)
+    it->second = task_port;
+}
+
+}  // namespace base
diff --git a/base/mac/mach_port_broker_unittest.cc b/base/mac/mach_port_broker_unittest.cc
new file mode 100644
index 0000000..bff8eb6
--- /dev/null
+++ b/base/mac/mach_port_broker_unittest.cc
@@ -0,0 +1,133 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/mac/mach_port_broker.h"
+
+#include "base/command_line.h"
+#include "base/synchronization/lock.h"
+#include "base/synchronization/waitable_event.h"
+#include "base/test/multiprocess_test.h"
+#include "base/test/test_timeouts.h"
+#include "testing/gtest/include/gtest/gtest.h"
+#include "testing/multiprocess_func_list.h"
+
+namespace base {
+
+namespace {
+const char kBootstrapPortName[] = "thisisatest";
+}
+
+class MachPortBrokerTest : public testing::Test,
+                           public base::PortProvider::Observer {
+ public:
+  MachPortBrokerTest()
+      : broker_(kBootstrapPortName),
+        event_(base::WaitableEvent::ResetPolicy::MANUAL,
+               base::WaitableEvent::InitialState::NOT_SIGNALED),
+        received_process_(kNullProcessHandle) {
+    broker_.AddObserver(this);
+  }
+  ~MachPortBrokerTest() override {
+    broker_.RemoveObserver(this);
+  }
+
+  // Helper function to acquire/release locks and call |PlaceholderForPid()|.
+  void AddPlaceholderForPid(base::ProcessHandle pid) {
+    base::AutoLock lock(broker_.GetLock());
+    broker_.AddPlaceholderForPid(pid);
+  }
+
+  // Helper function to acquire/release locks and call |FinalizePid()|.
+  void FinalizePid(base::ProcessHandle pid,
+                   mach_port_t task_port) {
+    base::AutoLock lock(broker_.GetLock());
+    broker_.FinalizePid(pid, task_port);
+  }
+
+  void WaitForTaskPort() {
+    event_.Wait();
+  }
+
+  // base::PortProvider::Observer:
+  void OnReceivedTaskPort(ProcessHandle process) override {
+    received_process_ = process;
+    event_.Signal();
+  }
+
+ protected:
+  MachPortBroker broker_;
+  WaitableEvent event_;
+  ProcessHandle received_process_;
+};
+
+TEST_F(MachPortBrokerTest, Locks) {
+  // Acquire and release the locks.  Nothing bad should happen.
+  base::AutoLock lock(broker_.GetLock());
+}
+
+TEST_F(MachPortBrokerTest, AddPlaceholderAndFinalize) {
+  // Add a placeholder for PID 1.
+  AddPlaceholderForPid(1);
+  EXPECT_EQ(0u, broker_.TaskForPid(1));
+
+  // Finalize PID 1.
+  FinalizePid(1, 100u);
+  EXPECT_EQ(100u, broker_.TaskForPid(1));
+
+  // Should be no entry for PID 2.
+  EXPECT_EQ(0u, broker_.TaskForPid(2));
+}
+
+TEST_F(MachPortBrokerTest, FinalizeUnknownPid) {
+  // Finalizing an entry for an unknown pid should not add it to the map.
+  FinalizePid(1u, 100u);
+  EXPECT_EQ(0u, broker_.TaskForPid(1u));
+}
+
+MULTIPROCESS_TEST_MAIN(MachPortBrokerTestChild) {
+  CHECK(base::MachPortBroker::ChildSendTaskPortToParent(kBootstrapPortName));
+  return 0;
+}
+
+TEST_F(MachPortBrokerTest, ReceivePortFromChild) {
+  ASSERT_TRUE(broker_.Init());
+  CommandLine command_line(
+      base::GetMultiProcessTestChildBaseCommandLine());
+  broker_.GetLock().Acquire();
+  base::Process test_child_process = base::SpawnMultiProcessTestChild(
+      "MachPortBrokerTestChild", command_line, LaunchOptions());
+  broker_.AddPlaceholderForPid(test_child_process.Handle());
+  broker_.GetLock().Release();
+
+  WaitForTaskPort();
+  EXPECT_EQ(test_child_process.Handle(), received_process_);
+
+  int rv = -1;
+  ASSERT_TRUE(test_child_process.WaitForExitWithTimeout(
+      TestTimeouts::action_timeout(), &rv));
+  EXPECT_EQ(0, rv);
+
+  EXPECT_NE(static_cast<mach_port_t>(MACH_PORT_NULL),
+            broker_.TaskForPid(test_child_process.Handle()));
+}
+
+TEST_F(MachPortBrokerTest, ReceivePortFromChildWithoutAdding) {
+  ASSERT_TRUE(broker_.Init());
+  CommandLine command_line(
+      base::GetMultiProcessTestChildBaseCommandLine());
+  broker_.GetLock().Acquire();
+  base::Process test_child_process = base::SpawnMultiProcessTestChild(
+      "MachPortBrokerTestChild", command_line, LaunchOptions());
+  broker_.GetLock().Release();
+
+  int rv = -1;
+  ASSERT_TRUE(test_child_process.WaitForExitWithTimeout(
+      TestTimeouts::action_timeout(), &rv));
+  EXPECT_EQ(0, rv);
+
+  EXPECT_EQ(static_cast<mach_port_t>(MACH_PORT_NULL),
+            broker_.TaskForPid(test_child_process.Handle()));
+}
+
+}  // namespace base
diff --git a/base/mac/mach_port_util.cc b/base/mac/mach_port_util.cc
new file mode 100644
index 0000000..0eee210
--- /dev/null
+++ b/base/mac/mach_port_util.cc
@@ -0,0 +1,136 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/mac/mach_port_util.h"
+
+#include "base/logging.h"
+
+namespace base {
+
+namespace {
+
+// Struct for sending a complex Mach message.
+struct MachSendComplexMessage {
+  mach_msg_header_t header;
+  mach_msg_body_t body;
+  mach_msg_port_descriptor_t data;
+};
+
+// Struct for receiving a complex message.
+struct MachReceiveComplexMessage {
+  mach_msg_header_t header;
+  mach_msg_body_t body;
+  mach_msg_port_descriptor_t data;
+  mach_msg_trailer_t trailer;
+};
+
+}  // namespace
+
+kern_return_t SendMachPort(mach_port_t endpoint,
+                           mach_port_t port_to_send,
+                           int disposition) {
+  MachSendComplexMessage send_msg;
+  send_msg.header.msgh_bits =
+      MACH_MSGH_BITS(MACH_MSG_TYPE_MOVE_SEND_ONCE, 0) | MACH_MSGH_BITS_COMPLEX;
+  send_msg.header.msgh_size = sizeof(send_msg);
+  send_msg.header.msgh_remote_port = endpoint;
+  send_msg.header.msgh_local_port = MACH_PORT_NULL;
+  send_msg.header.msgh_reserved = 0;
+  send_msg.header.msgh_id = 0;
+  send_msg.body.msgh_descriptor_count = 1;
+  send_msg.data.name = port_to_send;
+  send_msg.data.disposition = disposition;
+  send_msg.data.type = MACH_MSG_PORT_DESCRIPTOR;
+
+  kern_return_t kr =
+      mach_msg(&send_msg.header, MACH_SEND_MSG | MACH_SEND_TIMEOUT,
+               send_msg.header.msgh_size,
+               0,                // receive limit
+               MACH_PORT_NULL,   // receive name
+               0,                // timeout
+               MACH_PORT_NULL);  // notification port
+
+  if (kr != KERN_SUCCESS)
+    mach_port_deallocate(mach_task_self(), endpoint);
+
+  return kr;
+}
+
+base::mac::ScopedMachSendRight ReceiveMachPort(mach_port_t port_to_listen_on) {
+  MachReceiveComplexMessage recv_msg;
+  mach_msg_header_t* recv_hdr = &recv_msg.header;
+  recv_hdr->msgh_local_port = port_to_listen_on;
+  recv_hdr->msgh_size = sizeof(recv_msg);
+
+  kern_return_t kr =
+      mach_msg(recv_hdr, MACH_RCV_MSG | MACH_RCV_TIMEOUT, 0,
+               recv_hdr->msgh_size, port_to_listen_on, 0, MACH_PORT_NULL);
+  if (kr != KERN_SUCCESS)
+    return base::mac::ScopedMachSendRight(MACH_PORT_NULL);
+  if (recv_msg.header.msgh_id != 0)
+    return base::mac::ScopedMachSendRight(MACH_PORT_NULL);
+  return base::mac::ScopedMachSendRight(recv_msg.data.name);
+}
+
+mach_port_name_t CreateIntermediateMachPort(
+    mach_port_t task_port,
+    base::mac::ScopedMachSendRight port_to_insert,
+    MachCreateError* error_code) {
+  DCHECK_NE(mach_task_self(), task_port);
+  DCHECK_NE(static_cast<mach_port_name_t>(MACH_PORT_NULL), task_port);
+
+  // Make a port with receive rights in the destination task.
+  mach_port_name_t endpoint;
+  kern_return_t kr =
+      mach_port_allocate(task_port, MACH_PORT_RIGHT_RECEIVE, &endpoint);
+  if (kr != KERN_SUCCESS) {
+    if (error_code)
+      *error_code = MachCreateError::ERROR_MAKE_RECEIVE_PORT;
+    return MACH_PORT_NULL;
+  }
+
+  // Change its message queue limit so that it accepts one message.
+  mach_port_limits limits = {};
+  limits.mpl_qlimit = 1;
+  kr = mach_port_set_attributes(task_port, endpoint, MACH_PORT_LIMITS_INFO,
+                                reinterpret_cast<mach_port_info_t>(&limits),
+                                MACH_PORT_LIMITS_INFO_COUNT);
+  if (kr != KERN_SUCCESS) {
+    if (error_code)
+      *error_code = MachCreateError::ERROR_SET_ATTRIBUTES;
+    mach_port_deallocate(task_port, endpoint);
+    return MACH_PORT_NULL;
+  }
+
+  // Get a send right.
+  mach_port_t send_once_right;
+  mach_msg_type_name_t send_right_type;
+  kr =
+      mach_port_extract_right(task_port, endpoint, MACH_MSG_TYPE_MAKE_SEND_ONCE,
+                              &send_once_right, &send_right_type);
+  if (kr != KERN_SUCCESS) {
+    if (error_code)
+      *error_code = MachCreateError::ERROR_EXTRACT_DEST_RIGHT;
+    mach_port_deallocate(task_port, endpoint);
+    return MACH_PORT_NULL;
+  }
+  DCHECK_EQ(static_cast<mach_msg_type_name_t>(MACH_MSG_TYPE_PORT_SEND_ONCE),
+            send_right_type);
+
+  // This call takes ownership of |send_once_right|.
+  kr = base::SendMachPort(
+      send_once_right, port_to_insert.get(), MACH_MSG_TYPE_COPY_SEND);
+  if (kr != KERN_SUCCESS) {
+    if (error_code)
+      *error_code = MachCreateError::ERROR_SEND_MACH_PORT;
+    mach_port_deallocate(task_port, endpoint);
+    return MACH_PORT_NULL;
+  }
+
+  // Endpoint is intentionally leaked into the destination task. An IPC must be
+  // sent to the destination task so that it can clean up this port.
+  return endpoint;
+}
+
+}  // namespace base
diff --git a/base/mac/mach_port_util.h b/base/mac/mach_port_util.h
new file mode 100644
index 0000000..f7a7f32
--- /dev/null
+++ b/base/mac/mach_port_util.h
@@ -0,0 +1,48 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_MAC_MACH_PORT_UTIL_H_
+#define BASE_MAC_MACH_PORT_UTIL_H_
+
+#include <mach/mach.h>
+
+#include "base/base_export.h"
+#include "base/mac/scoped_mach_port.h"
+
+namespace base {
+
+enum class MachCreateError {
+    ERROR_MAKE_RECEIVE_PORT,
+    ERROR_SET_ATTRIBUTES,
+    ERROR_EXTRACT_DEST_RIGHT,
+    ERROR_SEND_MACH_PORT,
+};
+
+// Sends a Mach port to |dest_port|. Assumes that |dest_port| is a send once
+// right. Takes ownership of |dest_port|.
+BASE_EXPORT kern_return_t SendMachPort(mach_port_t dest_port,
+                                       mach_port_t port_to_send,
+                                       int disposition);
+
+// Receives a Mach port from |port_to_listen_on|, which should have exactly one
+// queued message. Returns |MACH_PORT_NULL| on any error.
+BASE_EXPORT base::mac::ScopedMachSendRight ReceiveMachPort(
+    mach_port_t port_to_listen_on);
+
+// Creates an intermediate Mach port in |task_port| and sends |port_to_insert|
+// as a mach_msg to the intermediate Mach port.
+// |task_port| is the task port of another process.
+// |port_to_insert| must be a send right in the current task's name space.
+// Returns the intermediate port on success, and MACH_PORT_NULL on failure.
+// On failure, |error_code| is set if not null.
+// This method takes ownership of |port_to_insert|. On success, ownership is
+// passed to the intermediate Mach port.
+BASE_EXPORT mach_port_name_t CreateIntermediateMachPort(
+    mach_port_t task_port,
+    base::mac::ScopedMachSendRight port_to_insert,
+    MachCreateError* error_code);
+
+}  // namespace base
+
+#endif  // BASE_MAC_MACH_PORT_UTIL_H_
diff --git a/base/mac/objc_release_properties.h b/base/mac/objc_release_properties.h
new file mode 100644
index 0000000..d064cf9
--- /dev/null
+++ b/base/mac/objc_release_properties.h
@@ -0,0 +1,65 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#if defined(__has_feature) && __has_feature(objc_arc)
+#error "ARC manages properties, so base::mac::ReleaseProperties isn't needed."
+#endif
+
+#ifndef BASE_MAC_OBJC_RELEASE_PROPERTIES_H_
+#define BASE_MAC_OBJC_RELEASE_PROPERTIES_H_
+
+#import <Foundation/Foundation.h>
+
+#include "base/base_export.h"
+
+// base::mac::ReleaseProperties(self) can be used in a class's -dealloc method
+// to release all properties marked "retain" or "copy" and backed by instance
+// variables. It only affects properties defined by the calling class, not
+// sub/superclass properties.
+//
+// Example usage:
+//
+//     @interface AllaysIBF : NSObject
+//
+//     @property(retain, nonatomic) NSString* string;
+//     @property(copy, nonatomic) NSMutableDictionary* dictionary;
+//     @property(assign, nonatomic) IBFDelegate* delegate;
+//
+//     @end  // @interface AllaysIBF
+//
+//     @implementation AllaysIBF
+//
+//     - (void)dealloc {
+//       base::mac::ReleaseProperties(self);
+//       [super dealloc];
+//     }
+//
+//     @end  // @implementation AllaysIBF
+//
+// self.string and self.dictionary will each be released, but self.delegate
+// will not because it is marked "assign", not "retain" or "copy".
+//
+// Another approach would be to provide a base class to inherit from whose
+// -dealloc walks the property lists of all subclasses to release their
+// properties. Distant subclasses might not expect it and over-release their
+// properties, so don't do that.
+
+namespace base {
+namespace mac {
+
+namespace details {
+
+BASE_EXPORT void ReleaseProperties(id, Class);
+
+}  // namespace details
+
+template <typename Self>
+void ReleaseProperties(Self* self) {
+  details::ReleaseProperties(self, [Self class]);
+}
+
+}  // namespace mac
+}  // namespace base
+
+#endif  // BASE_MAC_OBJC_RELEASE_PROPERTIES_H_
diff --git a/base/mac/objc_release_properties.mm b/base/mac/objc_release_properties.mm
new file mode 100644
index 0000000..d0006cf
--- /dev/null
+++ b/base/mac/objc_release_properties.mm
@@ -0,0 +1,66 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/mac/objc_release_properties.h"
+
+#include <memory>
+
+#include <objc/runtime.h>
+
+#include "base/logging.h"
+#include "base/memory/free_deleter.h"
+
+namespace {
+
+bool IsRetained(objc_property_t property) {
+  // The format of the string returned by property_getAttributes is documented
+  // at
+  // http://developer.apple.com/library/mac/#documentation/Cocoa/Conceptual/ObjCRuntimeGuide/Articles/ocrtPropertyIntrospection.html#//apple_ref/doc/uid/TP40008048-CH101-SW6
+  const char* attribute = property_getAttributes(property);
+  while (attribute[0]) {
+    switch (attribute[0]) {
+      case 'C':  // copy
+      case '&':  // retain
+        return true;
+    }
+    do {
+      attribute++;
+    } while (attribute[0] && attribute[-1] != ',');
+  }
+  return false;
+}
+
+id ValueOf(id obj, objc_property_t property) {
+  std::unique_ptr<char, base::FreeDeleter> ivar_name(
+      property_copyAttributeValue(property, "V"));  // instance variable name
+  if (!ivar_name)
+    return nil;
+  id ivar_value = nil;
+  Ivar ivar = object_getInstanceVariable(obj, &*ivar_name,
+                                         reinterpret_cast<void**>(&ivar_value));
+  DCHECK(ivar);
+  return ivar_value;
+}
+
+}  // namespace
+
+namespace base {
+namespace mac {
+namespace details {
+
+void ReleaseProperties(id self, Class cls) {
+  unsigned int property_count;
+  std::unique_ptr<objc_property_t[], base::FreeDeleter> properties(
+      class_copyPropertyList(cls, &property_count));
+  for (size_t i = 0; i < property_count; ++i) {
+    objc_property_t property = properties[i];
+    if (!IsRetained(property))
+      continue;
+    [ValueOf(self, property) release];
+  }
+}
+
+}  // namespace details
+}  // namespace mac
+}  // namespace base
diff --git a/base/mac/objc_release_properties_unittest.mm b/base/mac/objc_release_properties_unittest.mm
new file mode 100644
index 0000000..2d90127
--- /dev/null
+++ b/base/mac/objc_release_properties_unittest.mm
@@ -0,0 +1,375 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/mac/objc_release_properties.h"
+#include "base/stl_util.h"
+
+#import "base/mac/scoped_nsautorelease_pool.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+#import <objc/runtime.h>
+
+// "When I'm alone, I count myself."
+//   --Count von Count, http://www.youtube.com/watch?v=FKzszqa9WA4
+
+namespace {
+
+// The number of CountVonCounts outstanding.
+int ah_ah_ah;
+
+// NumberHolder exists to exercise the property attribute string parser by
+// providing a named struct and an anonymous union.
+struct NumberHolder {
+  union {
+    long long sixty_four;
+    int thirty_two;
+    short sixteen;
+    char eight;
+  } what;
+  enum { SIXTY_FOUR, THIRTY_TWO, SIXTEEN, EIGHT } how;
+};
+
+}  // namespace
+
+@interface CountVonCount : NSObject<NSCopying>
+
++ (CountVonCount*)countVonCount;
+
+@end  // @interface CountVonCount
+
+@implementation CountVonCount
+
++ (CountVonCount*)countVonCount {
+  return [[[CountVonCount alloc] init] autorelease];
+}
+
+- (id)init {
+  ++ah_ah_ah;
+  return [super init];
+}
+
+- (void)dealloc {
+  --ah_ah_ah;
+  [super dealloc];
+}
+
+- (id)copyWithZone:(NSZone*)zone {
+  return [[CountVonCount allocWithZone:zone] init];
+}
+
+@end  // @implementation CountVonCount
+
+@interface ObjCPropertyTestBase : NSObject {
+ @private
+  CountVonCount* baseCvcRetain_;
+  CountVonCount* baseCvcCopy_;
+  CountVonCount* baseCvcAssign_;
+  CountVonCount* baseCvcNotProperty_;
+  CountVonCount* baseCvcNil_;
+  CountVonCount* baseCvcCustom_;
+  int baseInt_;
+  double baseDouble_;
+  void* basePointer_;
+  NumberHolder baseStruct_;
+}
+
+@property(retain, nonatomic) CountVonCount* baseCvcRetain;
+@property(copy, nonatomic) CountVonCount* baseCvcCopy;
+@property(assign, nonatomic) CountVonCount* baseCvcAssign;
+@property(retain, nonatomic) CountVonCount* baseCvcNil;
+@property(retain, nonatomic, getter=baseCustom, setter=setBaseCustom:)
+    CountVonCount* baseCvcCustom;
+@property(readonly, retain, nonatomic) CountVonCount* baseCvcReadOnly;
+@property(retain, nonatomic) CountVonCount* baseCvcDynamic;
+@property(assign, nonatomic) int baseInt;
+@property(assign, nonatomic) double baseDouble;
+@property(assign, nonatomic) void* basePointer;
+@property(assign, nonatomic) NumberHolder baseStruct;
+
+- (void)setBaseCvcNotProperty:(CountVonCount*)cvc;
+
+@end  // @interface ObjCPropertyTestBase
+
+@implementation ObjCPropertyTestBase
+
+@synthesize baseCvcRetain = baseCvcRetain_;
+@synthesize baseCvcCopy = baseCvcCopy_;
+@synthesize baseCvcAssign = baseCvcAssign_;
+@synthesize baseCvcNil = baseCvcNil_;
+@synthesize baseCvcCustom = baseCvcCustom_;
+@synthesize baseCvcReadOnly = baseCvcReadOnly_;
+@dynamic baseCvcDynamic;
+@synthesize baseInt = baseInt_;
+@synthesize baseDouble = baseDouble_;
+@synthesize basePointer = basePointer_;
+@synthesize baseStruct = baseStruct_;
+
+- (void)dealloc {
+  [baseCvcNotProperty_ release];
+  base::mac::ReleaseProperties(self);
+  [super dealloc];
+}
+
+- (void)setBaseCvcNotProperty:(CountVonCount*)cvc {
+  if (cvc != baseCvcNotProperty_) {
+    [baseCvcNotProperty_ release];
+    baseCvcNotProperty_ = [cvc retain];
+  }
+}
+
+- (void)setBaseCvcReadOnlyProperty:(CountVonCount*)cvc {
+  if (cvc != baseCvcReadOnly_) {
+    [baseCvcReadOnly_ release];
+    baseCvcReadOnly_ = [cvc retain];
+  }
+}
+
+@end  // @implementation ObjCPropertyTestBase
+
+@protocol ObjCPropertyTestProtocol
+
+@property(retain, nonatomic) CountVonCount* protoCvcRetain;
+@property(copy, nonatomic) CountVonCount* protoCvcCopy;
+@property(assign, nonatomic) CountVonCount* protoCvcAssign;
+@property(retain, nonatomic) CountVonCount* protoCvcNil;
+@property(retain, nonatomic, getter=protoCustom, setter=setProtoCustom:)
+    CountVonCount* protoCvcCustom;
+@property(retain, nonatomic) CountVonCount* protoCvcDynamic;
+@property(assign, nonatomic) int protoInt;
+@property(assign, nonatomic) double protoDouble;
+@property(assign, nonatomic) void* protoPointer;
+@property(assign, nonatomic) NumberHolder protoStruct;
+
+@end  // @protocol ObjCPropertyTestProtocol
+
+// @protocol(NSObject) declares some (copy, readonly) properties (superclass,
+// description, debugDescription, and hash), but we're not expected to release
+// them. The current implementation only releases properties backed by instance
+// variables, and this makes sure that doesn't change in a breaking way.
+@interface ObjCPropertyTestDerived
+    : ObjCPropertyTestBase<ObjCPropertyTestProtocol, NSObject> {
+ @private
+  CountVonCount* derivedCvcRetain_;
+  CountVonCount* derivedCvcCopy_;
+  CountVonCount* derivedCvcAssign_;
+  CountVonCount* derivedCvcNotProperty_;
+  CountVonCount* derivedCvcNil_;
+  CountVonCount* derivedCvcCustom_;
+  int derivedInt_;
+  double derivedDouble_;
+  void* derivedPointer_;
+  NumberHolder derivedStruct_;
+
+  CountVonCount* protoCvcRetain_;
+  CountVonCount* protoCvcCopy_;
+  CountVonCount* protoCvcAssign_;
+  CountVonCount* protoCvcNil_;
+  CountVonCount* protoCvcCustom_;
+  int protoInt_;
+  double protoDouble_;
+  void* protoPointer_;
+  NumberHolder protoStruct_;
+}
+
+@property(retain, nonatomic) CountVonCount* derivedCvcRetain;
+@property(copy, nonatomic) CountVonCount* derivedCvcCopy;
+@property(assign, nonatomic) CountVonCount* derivedCvcAssign;
+@property(retain, nonatomic) CountVonCount* derivedCvcNil;
+@property(retain, nonatomic, getter=derivedCustom, setter=setDerivedCustom:)
+    CountVonCount* derivedCvcCustom;
+@property(retain, nonatomic) CountVonCount* derivedCvcDynamic;
+@property(assign, nonatomic) int derivedInt;
+@property(assign, nonatomic) double derivedDouble;
+@property(assign, nonatomic) void* derivedPointer;
+@property(assign, nonatomic) NumberHolder derivedStruct;
+
+- (void)setDerivedCvcNotProperty:(CountVonCount*)cvc;
+
+@end  // @interface ObjCPropertyTestDerived
+
+@implementation ObjCPropertyTestDerived
+
+@synthesize derivedCvcRetain = derivedCvcRetain_;
+@synthesize derivedCvcCopy = derivedCvcCopy_;
+@synthesize derivedCvcAssign = derivedCvcAssign_;
+@synthesize derivedCvcNil = derivedCvcNil_;
+@synthesize derivedCvcCustom = derivedCvcCustom_;
+@dynamic derivedCvcDynamic;
+@synthesize derivedInt = derivedInt_;
+@synthesize derivedDouble = derivedDouble_;
+@synthesize derivedPointer = derivedPointer_;
+@synthesize derivedStruct = derivedStruct_;
+
+@synthesize protoCvcRetain = protoCvcRetain_;
+@synthesize protoCvcCopy = protoCvcCopy_;
+@synthesize protoCvcAssign = protoCvcAssign_;
+@synthesize protoCvcNil = protoCvcNil_;
+@synthesize protoCvcCustom = protoCvcCustom_;
+@dynamic protoCvcDynamic;
+@synthesize protoInt = protoInt_;
+@synthesize protoDouble = protoDouble_;
+@synthesize protoPointer = protoPointer_;
+@synthesize protoStruct = protoStruct_;
+
++ (BOOL)resolveInstanceMethod:(SEL)sel {
+  static const std::vector<SEL> dynamicMethods {
+    @selector(baseCvcDynamic), @selector(derivedCvcDynamic),
+        @selector(protoCvcDynamic),
+  };
+  if (!base::ContainsValue(dynamicMethods, sel)) {
+    return NO;
+  }
+  id (*imp)() = []() -> id { return nil; };
+  class_addMethod([self class], sel, reinterpret_cast<IMP>(imp), "@@:");
+  return YES;
+}
+
+- (void)dealloc {
+  base::mac::ReleaseProperties(self);
+  [derivedCvcNotProperty_ release];
+  [super dealloc];
+}
+
+- (void)setDerivedCvcNotProperty:(CountVonCount*)cvc {
+  if (cvc != derivedCvcNotProperty_) {
+    [derivedCvcNotProperty_ release];
+    derivedCvcNotProperty_ = [cvc retain];
+  }
+}
+
+@end  // @implementation ObjCPropertyTestDerived
+
+@interface ObjcPropertyTestEmpty : NSObject
+@end
+
+@implementation ObjcPropertyTestEmpty
+
+- (void)dealloc {
+  base::mac::ReleaseProperties(self);
+  [super dealloc];
+}
+
+@end  // @implementation ObjcPropertyTestEmpty
+
+namespace {
+
+TEST(ObjCReleasePropertiesTest, SesameStreet) {
+  ObjCPropertyTestDerived* test_object = [[ObjCPropertyTestDerived alloc] init];
+
+  // Assure a clean slate.
+  EXPECT_EQ(0, ah_ah_ah);
+  EXPECT_EQ(1U, [test_object retainCount]);
+
+  CountVonCount* baseAssign = [[CountVonCount alloc] init];
+  CountVonCount* derivedAssign = [[CountVonCount alloc] init];
+  CountVonCount* protoAssign = [[CountVonCount alloc] init];
+
+  // Make sure that worked before things get more involved.
+  EXPECT_EQ(3, ah_ah_ah);
+
+  {
+    base::mac::ScopedNSAutoreleasePool pool;
+
+    test_object.baseCvcRetain = [CountVonCount countVonCount];
+    test_object.baseCvcCopy = [CountVonCount countVonCount];
+    test_object.baseCvcAssign = baseAssign;
+    test_object.baseCvcCustom = [CountVonCount countVonCount];
+    [test_object setBaseCvcReadOnlyProperty:[CountVonCount countVonCount]];
+    [test_object setBaseCvcNotProperty:[CountVonCount countVonCount]];
+
+    // That added 5 objects, plus 1 more that was copied.
+    EXPECT_EQ(9, ah_ah_ah);
+
+    test_object.derivedCvcRetain = [CountVonCount countVonCount];
+    test_object.derivedCvcCopy = [CountVonCount countVonCount];
+    test_object.derivedCvcAssign = derivedAssign;
+    test_object.derivedCvcCustom = [CountVonCount countVonCount];
+    [test_object setDerivedCvcNotProperty:[CountVonCount countVonCount]];
+
+    // That added 4 objects, plus 1 more that was copied.
+    EXPECT_EQ(14, ah_ah_ah);
+
+    test_object.protoCvcRetain = [CountVonCount countVonCount];
+    test_object.protoCvcCopy = [CountVonCount countVonCount];
+    test_object.protoCvcAssign = protoAssign;
+    test_object.protoCvcCustom = [CountVonCount countVonCount];
+
+    // That added 3 objects, plus 1 more that was copied.
+    EXPECT_EQ(18, ah_ah_ah);
+  }
+
+  // Now that the autorelease pool has been popped, the 3 objects that were
+  // copied when placed into the test object will have been deallocated.
+  EXPECT_EQ(15, ah_ah_ah);
+
+  // Make sure that the setters wo/rk and have the expected semantics.
+  test_object.baseCvcRetain = nil;
+  test_object.baseCvcCopy = nil;
+  test_object.baseCvcAssign = nil;
+  test_object.baseCvcCustom = nil;
+  test_object.derivedCvcRetain = nil;
+  test_object.derivedCvcCopy = nil;
+  test_object.derivedCvcAssign = nil;
+  test_object.derivedCvcCustom = nil;
+  test_object.protoCvcRetain = nil;
+  test_object.protoCvcCopy = nil;
+  test_object.protoCvcAssign = nil;
+  test_object.protoCvcCustom = nil;
+
+  // The CountVonCounts marked "retain" and "copy" should have been
+  // deallocated. Those marked assign should not have been. The only ones that
+  // should exist now are the ones marked "assign", the ones held in
+  // non-property instance variables, and the ones held in properties marked
+  // readonly.
+  EXPECT_EQ(6, ah_ah_ah);
+
+  {
+    base::mac::ScopedNSAutoreleasePool pool;
+
+    // Put things back to how they were.
+    test_object.baseCvcRetain = [CountVonCount countVonCount];
+    test_object.baseCvcCopy = [CountVonCount countVonCount];
+    test_object.baseCvcAssign = baseAssign;
+    test_object.baseCvcCustom = [CountVonCount countVonCount];
+    test_object.derivedCvcRetain = [CountVonCount countVonCount];
+    test_object.derivedCvcCopy = [CountVonCount countVonCount];
+    test_object.derivedCvcAssign = derivedAssign;
+    test_object.derivedCvcCustom = [CountVonCount countVonCount];
+    test_object.protoCvcRetain = [CountVonCount countVonCount];
+    test_object.protoCvcCopy = [CountVonCount countVonCount];
+    test_object.protoCvcAssign = protoAssign;
+    test_object.protoCvcCustom = [CountVonCount countVonCount];
+
+    // 9 more CountVonCounts, 3 of which were copied.
+    EXPECT_EQ(18, ah_ah_ah);
+  }
+
+  // Now that the autorelease pool has been popped, the 3 copies are gone.
+  EXPECT_EQ(15, ah_ah_ah);
+
+  // Releasing the test object should get rid of everything that it owns.
+  [test_object release];
+
+  // base::mac::ReleaseProperties(self) should have released all of the
+  // CountVonCounts associated with properties marked "retain" or "copy". The
+  // -dealloc methods in each should have released the single non-property
+  // objects in each. Only the CountVonCounts assigned to the properties marked
+  // "assign" should remain.
+  EXPECT_EQ(3, ah_ah_ah);
+
+  [baseAssign release];
+  [derivedAssign release];
+  [protoAssign release];
+
+  // Zero! Zero counts! Ah, ah, ah.
+  EXPECT_EQ(0, ah_ah_ah);
+}
+
+TEST(ObjCReleasePropertiesTest, EmptyObject) {
+  // Test that ReleaseProperties doesn't do anything unexpected to a class
+  // with no properties.
+  [[[ObjcPropertyTestEmpty alloc] init] release];
+}
+
+}  // namespace
diff --git a/base/mac/os_crash_dumps.cc b/base/mac/os_crash_dumps.cc
new file mode 100644
index 0000000..95af009
--- /dev/null
+++ b/base/mac/os_crash_dumps.cc
@@ -0,0 +1,61 @@
+// Copyright (c) 2010 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/mac/os_crash_dumps.h"
+
+#include <signal.h>
+#include <stddef.h>
+#include <unistd.h>
+
+#include "base/logging.h"
+#include "base/macros.h"
+
+namespace base {
+namespace mac {
+
+namespace {
+
+void ExitSignalHandler(int sig) {
+  // A call to exit() can call atexit() handlers.  If we SIGSEGV due
+  // to a corrupt heap, and if we have an atexit handler that
+  // allocates or frees memory, we are in trouble if we do not _exit.
+  _exit(128 + sig);
+}
+
+}  // namespace
+
+void DisableOSCrashDumps() {
+  // These are the POSIX signals corresponding to the Mach exceptions that
+  // Apple Crash Reporter handles.  See ux_exception() in xnu's
+  // bsd/uxkern/ux_exception.c and machine_exception() in xnu's
+  // bsd/dev/*/unix_signal.c.
+  const int signals_to_intercept[] = {
+    // Hardware faults
+    SIGILL,   // EXC_BAD_INSTRUCTION
+    SIGTRAP,  // EXC_BREAKPOINT
+    SIGFPE,   // EXC_ARITHMETIC
+    SIGBUS,   // EXC_BAD_ACCESS
+    SIGSEGV,  // EXC_BAD_ACCESS
+    // Not a hardware fault
+    SIGABRT
+  };
+
+  // For all these signals, just wire things up so we exit immediately.
+  for (size_t i = 0; i < arraysize(signals_to_intercept); ++i) {
+    struct sigaction act = {};
+    act.sa_handler = ExitSignalHandler;
+
+    // It is better to allow the signal handler to run on the stack
+    // registered with sigaltstack(), if one is present.
+    act.sa_flags = SA_ONSTACK;
+
+    if (sigemptyset(&act.sa_mask) != 0)
+      DPLOG(FATAL) << "sigemptyset() failed";
+    if (sigaction(signals_to_intercept[i], &act, NULL) != 0)
+      DPLOG(FATAL) << "sigaction() failed";
+  }
+}
+
+}  // namespace mac
+}  // namespace base
diff --git a/base/mac/os_crash_dumps.h b/base/mac/os_crash_dumps.h
new file mode 100644
index 0000000..31d90fb
--- /dev/null
+++ b/base/mac/os_crash_dumps.h
@@ -0,0 +1,22 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_MAC_OS_CRASH_DUMPS_H_
+#define BASE_MAC_OS_CRASH_DUMPS_H_
+
+#include "base/base_export.h"
+
+namespace base {
+namespace mac {
+
+// On Mac OS X, it can take a really long time for the OS crash handler to
+// process a Chrome crash when debugging symbols are available.  This
+// translates into a long wait until the process actually dies.  This call
+// disables Apple Crash Reporter entirely.
+BASE_EXPORT void DisableOSCrashDumps();
+
+}  // namespace mac
+}  // namespace base
+
+#endif  // BASE_MAC_OS_CRASH_DUMPS_H_
diff --git a/base/mac/scoped_aedesc.h b/base/mac/scoped_aedesc.h
new file mode 100644
index 0000000..7327092
--- /dev/null
+++ b/base/mac/scoped_aedesc.h
@@ -0,0 +1,52 @@
+// Copyright (c) 2010 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_MAC_SCOPED_AEDESC_H_
+#define BASE_MAC_SCOPED_AEDESC_H_
+
+#import <CoreServices/CoreServices.h>
+
+#include "base/macros.h"
+
+namespace base {
+namespace mac {
+
+// The ScopedAEDesc is used to scope AppleEvent descriptors.  On creation,
+// it will store a NULL descriptor.  On destruction, it will dispose of the
+// descriptor.
+//
+// This class is parameterized for additional type safety checks.  You can use
+// the generic AEDesc type by not providing a template parameter:
+//  ScopedAEDesc<> desc;
+template <typename AEDescType = AEDesc>
+class ScopedAEDesc {
+ public:
+  ScopedAEDesc() {
+    AECreateDesc(typeNull, NULL, 0, &desc_);
+  }
+
+  ~ScopedAEDesc() {
+    AEDisposeDesc(&desc_);
+  }
+
+  // Used for in parameters.
+  operator const AEDescType*() {
+    return &desc_;
+  }
+
+  // Used for out parameters.
+  AEDescType* OutPointer() {
+    return &desc_;
+  }
+
+ private:
+  AEDescType desc_;
+
+  DISALLOW_COPY_AND_ASSIGN(ScopedAEDesc);
+};
+
+}  // namespace mac
+}  // namespace base
+
+#endif  // BASE_MAC_SCOPED_AEDESC_H_
diff --git a/base/mac/scoped_authorizationref.h b/base/mac/scoped_authorizationref.h
new file mode 100644
index 0000000..b83f8df
--- /dev/null
+++ b/base/mac/scoped_authorizationref.h
@@ -0,0 +1,82 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_MAC_SCOPED_AUTHORIZATIONREF_H_
+#define BASE_MAC_SCOPED_AUTHORIZATIONREF_H_
+
+#include <Security/Authorization.h>
+
+#include "base/compiler_specific.h"
+#include "base/macros.h"
+
+// ScopedAuthorizationRef maintains ownership of an AuthorizationRef.  It is
+// patterned after the unique_ptr interface.
+
+namespace base {
+namespace mac {
+
+class ScopedAuthorizationRef {
+ public:
+  explicit ScopedAuthorizationRef(AuthorizationRef authorization = NULL)
+      : authorization_(authorization) {
+  }
+
+  ~ScopedAuthorizationRef() {
+    if (authorization_) {
+      AuthorizationFree(authorization_, kAuthorizationFlagDestroyRights);
+    }
+  }
+
+  void reset(AuthorizationRef authorization = NULL) {
+    if (authorization_ != authorization) {
+      if (authorization_) {
+        AuthorizationFree(authorization_, kAuthorizationFlagDestroyRights);
+      }
+      authorization_ = authorization;
+    }
+  }
+
+  bool operator==(AuthorizationRef that) const {
+    return authorization_ == that;
+  }
+
+  bool operator!=(AuthorizationRef that) const {
+    return authorization_ != that;
+  }
+
+  operator AuthorizationRef() const {
+    return authorization_;
+  }
+
+  AuthorizationRef* get_pointer() { return &authorization_; }
+
+  AuthorizationRef get() const {
+    return authorization_;
+  }
+
+  void swap(ScopedAuthorizationRef& that) {
+    AuthorizationRef temp = that.authorization_;
+    that.authorization_ = authorization_;
+    authorization_ = temp;
+  }
+
+  // ScopedAuthorizationRef::release() is like std::unique_ptr<>::release. It is
+  // NOT a wrapper for AuthorizationFree(). To force a ScopedAuthorizationRef
+  // object to call AuthorizationFree(), use ScopedAuthorizationRef::reset().
+  AuthorizationRef release() WARN_UNUSED_RESULT {
+    AuthorizationRef temp = authorization_;
+    authorization_ = NULL;
+    return temp;
+  }
+
+ private:
+  AuthorizationRef authorization_;
+
+  DISALLOW_COPY_AND_ASSIGN(ScopedAuthorizationRef);
+};
+
+}  // namespace mac
+}  // namespace base
+
+#endif  // BASE_MAC_SCOPED_AUTHORIZATIONREF_H_
diff --git a/base/mac/scoped_block.h b/base/mac/scoped_block.h
new file mode 100644
index 0000000..10ab4b4
--- /dev/null
+++ b/base/mac/scoped_block.h
@@ -0,0 +1,72 @@
+// Copyright (c) 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_MAC_SCOPED_BLOCK_H_
+#define BASE_MAC_SCOPED_BLOCK_H_
+
+#include <Block.h>
+
+#include "base/mac/scoped_typeref.h"
+
+#if defined(__has_feature) && __has_feature(objc_arc)
+#define BASE_MAC_BRIDGE_CAST(TYPE, VALUE) (__bridge TYPE)(VALUE)
+#else
+#define BASE_MAC_BRIDGE_CAST(TYPE, VALUE) VALUE
+#endif
+
+namespace base {
+namespace mac {
+
+namespace internal {
+
+template <typename B>
+struct ScopedBlockTraits {
+  static B InvalidValue() { return nullptr; }
+  static B Retain(B block) {
+    return BASE_MAC_BRIDGE_CAST(
+        B, Block_copy(BASE_MAC_BRIDGE_CAST(const void*, block)));
+  }
+  static void Release(B block) {
+    Block_release(BASE_MAC_BRIDGE_CAST(const void*, block));
+  }
+};
+
+}  // namespace internal
+
+// ScopedBlock<> is patterned after ScopedCFTypeRef<>, but uses Block_copy() and
+// Block_release() instead of CFRetain() and CFRelease().
+template <typename B>
+class ScopedBlock : public ScopedTypeRef<B, internal::ScopedBlockTraits<B>> {
+ public:
+  using Traits = internal::ScopedBlockTraits<B>;
+
+#if !defined(__has_feature) || !__has_feature(objc_arc)
+  explicit ScopedBlock(
+      B block = Traits::InvalidValue(),
+      base::scoped_policy::OwnershipPolicy policy = base::scoped_policy::ASSUME)
+      : ScopedTypeRef<B, Traits>(block, policy) {}
+#else
+  explicit ScopedBlock(B block = Traits::InvalidValue())
+      : ScopedTypeRef<B, Traits>(block, base::scoped_policy::RETAIN) {}
+#endif
+
+#if !defined(__has_feature) || !__has_feature(objc_arc)
+  void reset(B block = Traits::InvalidValue(),
+             base::scoped_policy::OwnershipPolicy policy =
+                 base::scoped_policy::ASSUME) {
+    ScopedTypeRef<B, Traits>::reset(block, policy);
+  }
+#else
+  void reset(B block = Traits::InvalidValue()) {
+    ScopedTypeRef<B, Traits>::reset(block, base::scoped_policy::RETAIN);
+  }
+#endif
+};
+
+}  // namespace mac
+}  // namespace base
+
+#undef BASE_MAC_BRIDGE_CAST
+
+#endif  // BASE_MAC_SCOPED_BLOCK_H_
diff --git a/base/mac/scoped_cffiledescriptorref.h b/base/mac/scoped_cffiledescriptorref.h
new file mode 100644
index 0000000..923a159
--- /dev/null
+++ b/base/mac/scoped_cffiledescriptorref.h
@@ -0,0 +1,39 @@
+// Copyright 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_MAC_SCOPED_CFFILEDESCRIPTORREF_H_
+#define BASE_MAC_SCOPED_CFFILEDESCRIPTORREF_H_
+
+#include <CoreFoundation/CoreFoundation.h>
+
+#include "base/scoped_generic.h"
+
+namespace base {
+namespace mac {
+
+namespace internal {
+
+struct ScopedCFFileDescriptorRefTraits {
+  static CFFileDescriptorRef InvalidValue() { return nullptr; }
+  static void Free(CFFileDescriptorRef ref) {
+    CFFileDescriptorInvalidate(ref);
+    CFRelease(ref);
+  }
+};
+
+}  // namespace internal
+
+// ScopedCFFileDescriptorRef is designed after ScopedCFTypeRef<>. On
+// destruction, it will invalidate the file descriptor.
+// ScopedCFFileDescriptorRef (unlike ScopedCFTypeRef<>) does not support RETAIN
+// semantics, copying, or assignment, as doing so would increase the chances
+// that a file descriptor is invalidated while still in use.
+using ScopedCFFileDescriptorRef =
+    ScopedGeneric<CFFileDescriptorRef,
+                  internal::ScopedCFFileDescriptorRefTraits>;
+
+}  // namespace mac
+}  // namespace base
+
+#endif  // BASE_MAC_SCOPED_CFFILEDESCRIPTORREF_H_
diff --git a/base/mac/scoped_cftyperef.h b/base/mac/scoped_cftyperef.h
new file mode 100644
index 0000000..ccbc5cf
--- /dev/null
+++ b/base/mac/scoped_cftyperef.h
@@ -0,0 +1,50 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_MAC_SCOPED_CFTYPEREF_H_
+#define BASE_MAC_SCOPED_CFTYPEREF_H_
+
+#include <CoreFoundation/CoreFoundation.h>
+
+#include "base/mac/scoped_typeref.h"
+
+namespace base {
+
+// ScopedCFTypeRef<> is patterned after std::unique_ptr<>, but maintains
+// ownership of a CoreFoundation object: any object that can be represented
+// as a CFTypeRef.  Style deviations here are solely for compatibility with
+// std::unique_ptr<>'s interface, with which everyone is already familiar.
+//
+// By default, ScopedCFTypeRef<> takes ownership of an object (in the
+// constructor or in reset()) by taking over the caller's existing ownership
+// claim.  The caller must own the object it gives to ScopedCFTypeRef<>, and
+// relinquishes an ownership claim to that object.  ScopedCFTypeRef<> does not
+// call CFRetain(). This behavior is parameterized by the |OwnershipPolicy|
+// enum. If the value |RETAIN| is passed (in the constructor or in reset()),
+// then ScopedCFTypeRef<> will call CFRetain() on the object, and the initial
+// ownership is not changed.
+
+namespace internal {
+
+template<typename CFT>
+struct ScopedCFTypeRefTraits {
+  static CFT InvalidValue() { return nullptr; }
+  static CFT Retain(CFT object) {
+    CFRetain(object);
+    return object;
+  }
+  static void Release(CFT object) {
+    CFRelease(object);
+  }
+};
+
+}  // namespace internal
+
+template<typename CFT>
+using ScopedCFTypeRef =
+    ScopedTypeRef<CFT, internal::ScopedCFTypeRefTraits<CFT>>;
+
+}  // namespace base
+
+#endif  // BASE_MAC_SCOPED_CFTYPEREF_H_
diff --git a/base/mac/scoped_dispatch_object.h b/base/mac/scoped_dispatch_object.h
new file mode 100644
index 0000000..cd2daf2
--- /dev/null
+++ b/base/mac/scoped_dispatch_object.h
@@ -0,0 +1,36 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_MAC_SCOPED_DISPATCH_OBJECT_H_
+#define BASE_MAC_SCOPED_DISPATCH_OBJECT_H_
+
+#include <dispatch/dispatch.h>
+
+#include "base/mac/scoped_typeref.h"
+
+namespace base {
+
+namespace internal {
+
+template <typename T>
+struct ScopedDispatchObjectTraits {
+  static constexpr T InvalidValue() { return nullptr; }
+  static T Retain(T object) {
+    dispatch_retain(object);
+    return object;
+  }
+  static void Release(T object) {
+    dispatch_release(object);
+  }
+};
+
+}  // namepsace internal
+
+template <typename T>
+using ScopedDispatchObject =
+    ScopedTypeRef<T, internal::ScopedDispatchObjectTraits<T>>;
+
+}  // namespace base
+
+#endif  // BASE_MAC_SCOPED_DISPATCH_OBJECT_H_
diff --git a/base/mac/scoped_ionotificationportref.h b/base/mac/scoped_ionotificationportref.h
new file mode 100644
index 0000000..93ebc98
--- /dev/null
+++ b/base/mac/scoped_ionotificationportref.h
@@ -0,0 +1,33 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_MAC_SCOPED_IONOTIFICATIONPORTREF_H_
+#define BASE_MAC_SCOPED_IONOTIFICATIONPORTREF_H_
+
+#include <IOKit/IOKitLib.h>
+
+#include "base/scoped_generic.h"
+
+namespace base {
+namespace mac {
+
+namespace internal {
+
+struct ScopedIONotificationPortRefTraits {
+  static IONotificationPortRef InvalidValue() { return nullptr; }
+  static void Free(IONotificationPortRef object) {
+    IONotificationPortDestroy(object);
+  }
+};
+
+}  // namepsace internal
+
+using ScopedIONotificationPortRef =
+    ScopedGeneric<IONotificationPortRef,
+                  internal::ScopedIONotificationPortRefTraits>;
+
+}  // namespace mac
+}  // namespace base
+
+#endif  // BASE_MAC_SCOPED_IONOTIFICATIONPORTREF_H_
diff --git a/base/mac/scoped_ioobject.h b/base/mac/scoped_ioobject.h
new file mode 100644
index 0000000..c948cb5
--- /dev/null
+++ b/base/mac/scoped_ioobject.h
@@ -0,0 +1,36 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_MAC_SCOPED_IOOBJECT_H_
+#define BASE_MAC_SCOPED_IOOBJECT_H_
+
+#include <IOKit/IOKitLib.h>
+
+#include "base/mac/scoped_typeref.h"
+
+namespace base {
+namespace mac {
+
+namespace internal {
+
+template <typename IOT>
+struct ScopedIOObjectTraits {
+  static IOT InvalidValue() { return IO_OBJECT_NULL; }
+  static IOT Retain(IOT iot) {
+    IOObjectRetain(iot);
+    return iot;
+  }
+  static void Release(IOT iot) { IOObjectRelease(iot); }
+};
+
+}  // namespce internal
+
+// Just like ScopedCFTypeRef but for io_object_t and subclasses.
+template <typename IOT>
+using ScopedIOObject = ScopedTypeRef<IOT, internal::ScopedIOObjectTraits<IOT>>;
+
+}  // namespace mac
+}  // namespace base
+
+#endif  // BASE_MAC_SCOPED_IOOBJECT_H_
diff --git a/base/mac/scoped_ioplugininterface.h b/base/mac/scoped_ioplugininterface.h
new file mode 100644
index 0000000..872da8e
--- /dev/null
+++ b/base/mac/scoped_ioplugininterface.h
@@ -0,0 +1,38 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_MAC_SCOPED_IOPLUGININTERFACE_H_
+#define BASE_MAC_SCOPED_IOPLUGININTERFACE_H_
+
+#include <IOKit/IOKitLib.h>
+
+#include "base/mac/scoped_typeref.h"
+
+namespace base {
+namespace mac {
+
+namespace internal {
+
+template <typename T>
+struct ScopedIOPluginInterfaceTraits {
+  static T InvalidValue() { return nullptr; }
+  static T Retain(T t) {
+    (*t)->AddRef(t);
+    return t;
+  }
+  static void Release(T t) { (*t)->Release(t); }
+};
+
+}  // namespace internal
+
+// Just like ScopedCFTypeRef but for IOCFPlugInInterface and friends
+// (IOUSBInterfaceStruct and IOUSBDeviceStruct320 in particular).
+template <typename T>
+using ScopedIOPluginInterface =
+    ScopedTypeRef<T**, internal::ScopedIOPluginInterfaceTraits<T**>>;
+
+}  // namespace mac
+}  // namespace base
+
+#endif  // BASE_MAC_SCOPED_IOPLUGININTERFACE_H_
diff --git a/base/mac/scoped_launch_data.h b/base/mac/scoped_launch_data.h
new file mode 100644
index 0000000..f4db330
--- /dev/null
+++ b/base/mac/scoped_launch_data.h
@@ -0,0 +1,31 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_MAC_SCOPED_LAUNCH_DATA_H_
+#define BASE_MAC_SCOPED_LAUNCH_DATA_H_
+
+#include <launch.h>
+
+#include "base/scoped_generic.h"
+
+namespace base {
+namespace mac {
+
+namespace internal {
+
+struct ScopedLaunchDataTraits {
+  static launch_data_t InvalidValue() { return nullptr; }
+  static void Free(launch_data_t ldt) { launch_data_free(ldt); }
+};
+
+}  // namespace internal
+
+// Just like std::unique_ptr<> but for launch_data_t.
+using ScopedLaunchData =
+    ScopedGeneric<launch_data_t, internal::ScopedLaunchDataTraits>;
+
+}  // namespace mac
+}  // namespace base
+
+#endif  // BASE_MAC_SCOPED_LAUNCH_DATA_H_
diff --git a/base/mac/scoped_mach_port.cc b/base/mac/scoped_mach_port.cc
new file mode 100644
index 0000000..13307f2
--- /dev/null
+++ b/base/mac/scoped_mach_port.cc
@@ -0,0 +1,38 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/mac/scoped_mach_port.h"
+
+#include "base/mac/mach_logging.h"
+
+namespace base {
+namespace mac {
+namespace internal {
+
+// static
+void SendRightTraits::Free(mach_port_t port) {
+  kern_return_t kr = mach_port_deallocate(mach_task_self(), port);
+  MACH_LOG_IF(ERROR, kr != KERN_SUCCESS, kr)
+      << "ScopedMachSendRight mach_port_deallocate";
+}
+
+// static
+void ReceiveRightTraits::Free(mach_port_t port) {
+  kern_return_t kr =
+      mach_port_mod_refs(mach_task_self(), port, MACH_PORT_RIGHT_RECEIVE, -1);
+  MACH_LOG_IF(ERROR, kr != KERN_SUCCESS, kr)
+      << "ScopedMachReceiveRight mach_port_mod_refs";
+}
+
+// static
+void PortSetTraits::Free(mach_port_t port) {
+  kern_return_t kr =
+      mach_port_mod_refs(mach_task_self(), port, MACH_PORT_RIGHT_PORT_SET, -1);
+  MACH_LOG_IF(ERROR, kr != KERN_SUCCESS, kr)
+      << "ScopedMachPortSet mach_port_mod_refs";
+}
+
+}  // namespace internal
+}  // namespace mac
+}  // namespace base
diff --git a/base/mac/scoped_mach_port.h b/base/mac/scoped_mach_port.h
new file mode 100644
index 0000000..67fed6b
--- /dev/null
+++ b/base/mac/scoped_mach_port.h
@@ -0,0 +1,67 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_MAC_SCOPED_MACH_PORT_H_
+#define BASE_MAC_SCOPED_MACH_PORT_H_
+
+#include <mach/mach.h>
+
+#include "base/base_export.h"
+#include "base/scoped_generic.h"
+
+namespace base {
+namespace mac {
+
+namespace internal {
+
+struct BASE_EXPORT SendRightTraits {
+  static mach_port_t InvalidValue() {
+    return MACH_PORT_NULL;
+  }
+
+  BASE_EXPORT static void Free(mach_port_t port);
+};
+
+struct BASE_EXPORT ReceiveRightTraits {
+  static mach_port_t InvalidValue() {
+    return MACH_PORT_NULL;
+  }
+
+  BASE_EXPORT static void Free(mach_port_t port);
+};
+
+struct PortSetTraits {
+  static mach_port_t InvalidValue() {
+    return MACH_PORT_NULL;
+  }
+
+  BASE_EXPORT static void Free(mach_port_t port);
+};
+
+}  // namespace internal
+
+// A scoper for handling a Mach port that names a send right. Send rights are
+// reference counted, and this takes ownership of the right on construction
+// and then removes a reference to the right on destruction. If the reference
+// is the last one on the right, the right is deallocated.
+using ScopedMachSendRight =
+    ScopedGeneric<mach_port_t, internal::SendRightTraits>;
+
+// A scoper for handling a Mach port's receive right. There is only one
+// receive right per port. This takes ownership of the receive right on
+// construction and then destroys the right on destruction, turning all
+// outstanding send rights into dead names.
+using ScopedMachReceiveRight =
+    ScopedGeneric<mach_port_t, internal::ReceiveRightTraits>;
+
+// A scoper for handling a Mach port set. A port set can have only one
+// reference. This takes ownership of that single reference on construction and
+// destroys the port set on destruction. Destroying a port set does not destroy
+// the receive rights that are members of the port set.
+using ScopedMachPortSet = ScopedGeneric<mach_port_t, internal::PortSetTraits>;
+
+}  // namespace mac
+}  // namespace base
+
+#endif  // BASE_MAC_SCOPED_MACH_PORT_H_
diff --git a/base/mac/scoped_mach_vm.cc b/base/mac/scoped_mach_vm.cc
new file mode 100644
index 0000000..d52c77f
--- /dev/null
+++ b/base/mac/scoped_mach_vm.cc
@@ -0,0 +1,33 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/mac/scoped_mach_vm.h"
+
+namespace base {
+namespace mac {
+
+void ScopedMachVM::reset(vm_address_t address, vm_size_t size) {
+  DCHECK_EQ(address % PAGE_SIZE, 0u);
+  DCHECK_EQ(size % PAGE_SIZE, 0u);
+
+  if (size_) {
+    if (address_ < address) {
+      vm_deallocate(mach_task_self(),
+                    address_,
+                    std::min(size_, address - address_));
+    }
+    if (address_ + size_ > address + size) {
+      vm_address_t deallocate_start = std::max(address_, address + size);
+      vm_deallocate(mach_task_self(),
+                    deallocate_start,
+                    address_ + size_ - deallocate_start);
+    }
+  }
+
+  address_ = address;
+  size_ = size;
+}
+
+}  // namespace mac
+}  // namespace base
diff --git a/base/mac/scoped_mach_vm.h b/base/mac/scoped_mach_vm.h
new file mode 100644
index 0000000..58a13f6
--- /dev/null
+++ b/base/mac/scoped_mach_vm.h
@@ -0,0 +1,93 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_MAC_SCOPED_MACH_VM_H_
+#define BASE_MAC_SCOPED_MACH_VM_H_
+
+#include <mach/mach.h>
+#include <stddef.h>
+
+#include <algorithm>
+
+#include "base/base_export.h"
+#include "base/logging.h"
+#include "base/macros.h"
+
+// Use ScopedMachVM to supervise ownership of pages in the current process
+// through the Mach VM subsystem. Pages allocated with vm_allocate can be
+// released when exiting a scope with ScopedMachVM.
+//
+// The Mach VM subsystem operates on a page-by-page basis, and a single VM
+// allocation managed by a ScopedMachVM object may span multiple pages. As far
+// as Mach is concerned, allocated pages may be deallocated individually. This
+// is in contrast to higher-level allocators such as malloc, where the base
+// address of an allocation implies the size of an allocated block.
+// Consequently, it is not sufficient to just pass the base address of an
+// allocation to ScopedMachVM, it also needs to know the size of the
+// allocation. To avoid any confusion, both the base address and size must
+// be page-aligned.
+//
+// When dealing with Mach VM, base addresses will naturally be page-aligned,
+// but user-specified sizes may not be. If there's a concern that a size is
+// not page-aligned, use the mach_vm_round_page macro to correct it.
+//
+// Example:
+//
+//   vm_address_t address = 0;
+//   vm_size_t size = 12345;  // This requested size is not page-aligned.
+//   kern_return_t kr =
+//       vm_allocate(mach_task_self(), &address, size, VM_FLAGS_ANYWHERE);
+//   if (kr != KERN_SUCCESS) {
+//     return false;
+//   }
+//   ScopedMachVM vm_owner(address, mach_vm_round_page(size));
+
+namespace base {
+namespace mac {
+
+class BASE_EXPORT ScopedMachVM {
+ public:
+  explicit ScopedMachVM(vm_address_t address = 0, vm_size_t size = 0)
+      : address_(address), size_(size) {
+    DCHECK_EQ(address % PAGE_SIZE, 0u);
+    DCHECK_EQ(size % PAGE_SIZE, 0u);
+  }
+
+  ~ScopedMachVM() {
+    if (size_) {
+      vm_deallocate(mach_task_self(), address_, size_);
+    }
+  }
+
+  void reset(vm_address_t address = 0, vm_size_t size = 0);
+
+  vm_address_t address() const {
+    return address_;
+  }
+
+  vm_size_t size() const {
+    return size_;
+  }
+
+  void swap(ScopedMachVM& that) {
+    std::swap(address_, that.address_);
+    std::swap(size_, that.size_);
+  }
+
+  void release() {
+    address_ = 0;
+    size_ = 0;
+  }
+
+ private:
+  vm_address_t address_;
+  vm_size_t size_;
+
+  DISALLOW_COPY_AND_ASSIGN(ScopedMachVM);
+};
+
+}  // namespace mac
+}  // namespace base
+
+#endif  // BASE_MAC_SCOPED_MACH_VM_H_
diff --git a/base/mac/scoped_nsautorelease_pool.h b/base/mac/scoped_nsautorelease_pool.h
new file mode 100644
index 0000000..4d15e6d
--- /dev/null
+++ b/base/mac/scoped_nsautorelease_pool.h
@@ -0,0 +1,45 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_MAC_SCOPED_NSAUTORELEASE_POOL_H_
+#define BASE_MAC_SCOPED_NSAUTORELEASE_POOL_H_
+
+#include "base/base_export.h"
+#include "base/macros.h"
+
+#if defined(__OBJC__)
+@class NSAutoreleasePool;
+#else  // __OBJC__
+class NSAutoreleasePool;
+#endif  // __OBJC__
+
+namespace base {
+namespace mac {
+
+// ScopedNSAutoreleasePool allocates an NSAutoreleasePool when instantiated and
+// sends it a -drain message when destroyed.  This allows an autorelease pool to
+// be maintained in ordinary C++ code without bringing in any direct Objective-C
+// dependency.
+
+class BASE_EXPORT ScopedNSAutoreleasePool {
+ public:
+  ScopedNSAutoreleasePool();
+  ~ScopedNSAutoreleasePool();
+
+  // Clear out the pool in case its position on the stack causes it to be
+  // alive for long periods of time (such as the entire length of the app).
+  // Only use then when you're certain the items currently in the pool are
+  // no longer needed.
+  void Recycle();
+ private:
+  NSAutoreleasePool* autorelease_pool_;
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(ScopedNSAutoreleasePool);
+};
+
+}  // namespace mac
+}  // namespace base
+
+#endif  // BASE_MAC_SCOPED_NSAUTORELEASE_POOL_H_
diff --git a/base/mac/scoped_nsautorelease_pool.mm b/base/mac/scoped_nsautorelease_pool.mm
new file mode 100644
index 0000000..e542ca8
--- /dev/null
+++ b/base/mac/scoped_nsautorelease_pool.mm
@@ -0,0 +1,32 @@
+// Copyright (c) 2010 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/mac/scoped_nsautorelease_pool.h"
+
+#import <Foundation/Foundation.h>
+
+#include "base/logging.h"
+
+namespace base {
+namespace mac {
+
+ScopedNSAutoreleasePool::ScopedNSAutoreleasePool()
+    : autorelease_pool_([[NSAutoreleasePool alloc] init]) {
+  DCHECK(autorelease_pool_);
+}
+
+ScopedNSAutoreleasePool::~ScopedNSAutoreleasePool() {
+  [autorelease_pool_ drain];
+}
+
+// Cycle the internal pool, allowing everything there to get cleaned up and
+// start anew.
+void ScopedNSAutoreleasePool::Recycle() {
+  [autorelease_pool_ drain];
+  autorelease_pool_ = [[NSAutoreleasePool alloc] init];
+  DCHECK(autorelease_pool_);
+}
+
+}  // namespace mac
+}  // namespace base
diff --git a/base/mac/scoped_nsobject.h b/base/mac/scoped_nsobject.h
new file mode 100644
index 0000000..d970d03
--- /dev/null
+++ b/base/mac/scoped_nsobject.h
@@ -0,0 +1,240 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_MAC_SCOPED_NSOBJECT_H_
+#define BASE_MAC_SCOPED_NSOBJECT_H_
+
+#include <type_traits>
+
+// Include NSObject.h directly because Foundation.h pulls in many dependencies.
+// (Approx 100k lines of code versus 1.5k for NSObject.h). scoped_nsobject gets
+// singled out because it is most typically included from other header files.
+#import <Foundation/NSObject.h>
+
+#include "base/base_export.h"
+#include "base/compiler_specific.h"
+#include "base/mac/scoped_typeref.h"
+
+#if !defined(__has_feature) || !__has_feature(objc_arc)
+@class NSAutoreleasePool;
+#endif
+
+namespace base {
+
+// scoped_nsobject<> is patterned after std::unique_ptr<>, but maintains
+// ownership of an NSObject subclass object.  Style deviations here are solely
+// for compatibility with std::unique_ptr<>'s interface, with which everyone is
+// already familiar.
+//
+// scoped_nsobject<> takes ownership of an object (in the constructor or in
+// reset()) by taking over the caller's existing ownership claim.  The caller
+// must own the object it gives to scoped_nsobject<>, and relinquishes an
+// ownership claim to that object.  scoped_nsobject<> does not call -retain,
+// callers have to call this manually if appropriate.
+//
+// scoped_nsprotocol<> has the same behavior as scoped_nsobject, but can be used
+// with protocols.
+//
+// scoped_nsobject<> is not to be used for NSAutoreleasePools. For
+// NSAutoreleasePools use ScopedNSAutoreleasePool from
+// scoped_nsautorelease_pool.h instead.
+// We check for bad uses of scoped_nsobject and NSAutoreleasePool at compile
+// time with a template specialization (see below).
+//
+// If Automatic Reference Counting (aka ARC) is enabled then the ownership
+// policy is not controllable by the user as ARC make it really difficult to
+// transfer ownership (the reference passed to scoped_nsobject constructor is
+// sunk by ARC and __attribute((ns_consumed)) appears to not work correctly
+// with Objective-C++ see https://llvm.org/bugs/show_bug.cgi?id=27887). Due to
+// that, the policy is always to |RETAIN| when using ARC.
+
+namespace internal {
+
+BASE_EXPORT id ScopedNSProtocolTraitsRetain(__unsafe_unretained id obj)
+    __attribute((ns_returns_not_retained));
+BASE_EXPORT id ScopedNSProtocolTraitsAutoRelease(__unsafe_unretained id obj)
+    __attribute((ns_returns_not_retained));
+BASE_EXPORT void ScopedNSProtocolTraitsRelease(__unsafe_unretained id obj);
+
+// Traits for ScopedTypeRef<>. As this class may be compiled from file with
+// Automatic Reference Counting enable or not all methods have annotation to
+// enforce the same code generation in both case (in particular, the Retain
+// method uses ns_returns_not_retained to prevent ARC to insert a -release
+// call on the returned value and thus defeating the -retain).
+template <typename NST>
+struct ScopedNSProtocolTraits {
+  static NST InvalidValue() __attribute((ns_returns_not_retained)) {
+    return nil;
+  }
+  static NST Retain(__unsafe_unretained NST nst)
+      __attribute((ns_returns_not_retained)) {
+    return ScopedNSProtocolTraitsRetain(nst);
+  }
+  static void Release(__unsafe_unretained NST nst) {
+    ScopedNSProtocolTraitsRelease(nst);
+  }
+};
+
+}  // namespace internal
+
+template <typename NST>
+class scoped_nsprotocol
+    : public ScopedTypeRef<NST, internal::ScopedNSProtocolTraits<NST>> {
+ public:
+  using Traits = internal::ScopedNSProtocolTraits<NST>;
+
+#if !defined(__has_feature) || !__has_feature(objc_arc)
+  explicit constexpr scoped_nsprotocol(
+      NST object = Traits::InvalidValue(),
+      base::scoped_policy::OwnershipPolicy policy = base::scoped_policy::ASSUME)
+      : ScopedTypeRef<NST, Traits>(object, policy) {}
+#else
+  explicit constexpr scoped_nsprotocol(NST object = Traits::InvalidValue())
+      : ScopedTypeRef<NST, Traits>(object, base::scoped_policy::RETAIN) {}
+#endif
+
+  scoped_nsprotocol(const scoped_nsprotocol<NST>& that)
+      : ScopedTypeRef<NST, Traits>(that) {}
+
+  template <typename NSR>
+  explicit scoped_nsprotocol(const scoped_nsprotocol<NSR>& that_as_subclass)
+      : ScopedTypeRef<NST, Traits>(that_as_subclass) {}
+
+  scoped_nsprotocol(scoped_nsprotocol<NST>&& that)
+      : ScopedTypeRef<NST, Traits>(std::move(that)) {}
+
+  scoped_nsprotocol& operator=(const scoped_nsprotocol<NST>& that) {
+    ScopedTypeRef<NST, Traits>::operator=(that);
+    return *this;
+  }
+
+#if !defined(__has_feature) || !__has_feature(objc_arc)
+  void reset(NST object = Traits::InvalidValue(),
+             base::scoped_policy::OwnershipPolicy policy =
+                 base::scoped_policy::ASSUME) {
+    ScopedTypeRef<NST, Traits>::reset(object, policy);
+  }
+#else
+  void reset(NST object = Traits::InvalidValue()) {
+    ScopedTypeRef<NST, Traits>::reset(object, base::scoped_policy::RETAIN);
+  }
+#endif
+
+  // Shift reference to the autorelease pool to be released later.
+  NST autorelease() __attribute((ns_returns_not_retained)) {
+    return internal::ScopedNSProtocolTraitsAutoRelease(this->release());
+  }
+};
+
+// Free functions
+template <class C>
+void swap(scoped_nsprotocol<C>& p1, scoped_nsprotocol<C>& p2) {
+  p1.swap(p2);
+}
+
+template <class C>
+bool operator==(C p1, const scoped_nsprotocol<C>& p2) {
+  return p1 == p2.get();
+}
+
+template <class C>
+bool operator!=(C p1, const scoped_nsprotocol<C>& p2) {
+  return p1 != p2.get();
+}
+
+template <typename NST>
+class scoped_nsobject : public scoped_nsprotocol<NST*> {
+ public:
+  using Traits = typename scoped_nsprotocol<NST*>::Traits;
+
+#if !defined(__has_feature) || !__has_feature(objc_arc)
+  explicit constexpr scoped_nsobject(
+      NST* object = Traits::InvalidValue(),
+      base::scoped_policy::OwnershipPolicy policy = base::scoped_policy::ASSUME)
+      : scoped_nsprotocol<NST*>(object, policy) {}
+#else
+  explicit constexpr scoped_nsobject(NST* object = Traits::InvalidValue())
+      : scoped_nsprotocol<NST*>(object) {}
+#endif
+
+  scoped_nsobject(const scoped_nsobject<NST>& that)
+      : scoped_nsprotocol<NST*>(that) {}
+
+  template <typename NSR>
+  explicit scoped_nsobject(const scoped_nsobject<NSR>& that_as_subclass)
+      : scoped_nsprotocol<NST*>(that_as_subclass) {}
+
+  scoped_nsobject(scoped_nsobject<NST>&& that)
+      : scoped_nsprotocol<NST*>(std::move(that)) {}
+
+  scoped_nsobject& operator=(const scoped_nsobject<NST>& that) {
+    scoped_nsprotocol<NST*>::operator=(that);
+    return *this;
+  }
+
+#if !defined(__has_feature) || !__has_feature(objc_arc)
+  void reset(NST* object = Traits::InvalidValue(),
+             base::scoped_policy::OwnershipPolicy policy =
+                 base::scoped_policy::ASSUME) {
+    scoped_nsprotocol<NST*>::reset(object, policy);
+  }
+#else
+  void reset(NST* object = Traits::InvalidValue()) {
+    scoped_nsprotocol<NST*>::reset(object);
+  }
+#endif
+
+#if !defined(__has_feature) || !__has_feature(objc_arc)
+  static_assert(std::is_same<NST, NSAutoreleasePool>::value == false,
+                "Use ScopedNSAutoreleasePool instead");
+#endif
+};
+
+// Specialization to make scoped_nsobject<id> work.
+template<>
+class scoped_nsobject<id> : public scoped_nsprotocol<id> {
+ public:
+  using Traits = typename scoped_nsprotocol<id>::Traits;
+
+#if !defined(__has_feature) || !__has_feature(objc_arc)
+  explicit constexpr scoped_nsobject(
+      id object = Traits::InvalidValue(),
+      base::scoped_policy::OwnershipPolicy policy = base::scoped_policy::ASSUME)
+      : scoped_nsprotocol<id>(object, policy) {}
+#else
+  explicit constexpr scoped_nsobject(id object = Traits::InvalidValue())
+      : scoped_nsprotocol<id>(object) {}
+#endif
+
+  scoped_nsobject(const scoped_nsobject<id>& that)
+      : scoped_nsprotocol<id>(that) {}
+
+  template <typename NSR>
+  explicit scoped_nsobject(const scoped_nsobject<NSR>& that_as_subclass)
+      : scoped_nsprotocol<id>(that_as_subclass) {}
+
+  scoped_nsobject(scoped_nsobject<id>&& that)
+      : scoped_nsprotocol<id>(std::move(that)) {}
+
+  scoped_nsobject& operator=(const scoped_nsobject<id>& that) {
+    scoped_nsprotocol<id>::operator=(that);
+    return *this;
+  }
+
+#if !defined(__has_feature) || !__has_feature(objc_arc)
+  void reset(id object = Traits::InvalidValue(),
+             base::scoped_policy::OwnershipPolicy policy =
+                 base::scoped_policy::ASSUME) {
+    scoped_nsprotocol<id>::reset(object, policy);
+  }
+#else
+  void reset(id object = Traits::InvalidValue()) {
+    scoped_nsprotocol<id>::reset(object);
+  }
+#endif
+};
+
+}  // namespace base
+
+#endif  // BASE_MAC_SCOPED_NSOBJECT_H_
diff --git a/base/mac/scoped_nsobject.mm b/base/mac/scoped_nsobject.mm
new file mode 100644
index 0000000..65b4031
--- /dev/null
+++ b/base/mac/scoped_nsobject.mm
@@ -0,0 +1,23 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#import "base/mac/scoped_nsobject.h"
+
+namespace base {
+namespace internal {
+
+id ScopedNSProtocolTraitsRetain(id obj) {
+  return [obj retain];
+}
+
+id ScopedNSProtocolTraitsAutoRelease(id obj) {
+  return [obj autorelease];
+}
+
+void ScopedNSProtocolTraitsRelease(id obj) {
+  return [obj release];
+}
+
+}  // namespace internal
+}  // namespace base
diff --git a/base/mac/scoped_nsobject_unittest.mm b/base/mac/scoped_nsobject_unittest.mm
new file mode 100644
index 0000000..72d5242
--- /dev/null
+++ b/base/mac/scoped_nsobject_unittest.mm
@@ -0,0 +1,102 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <vector>
+
+#include "base/mac/scoped_nsautorelease_pool.h"
+#include "base/mac/scoped_nsobject.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace {
+
+TEST(ScopedNSObjectTest, ScopedNSObject) {
+  base::scoped_nsobject<NSObject> p1([[NSObject alloc] init]);
+  ASSERT_TRUE(p1.get());
+  ASSERT_EQ(1u, [p1 retainCount]);
+  base::scoped_nsobject<NSObject> p2(p1);
+  ASSERT_EQ(p1.get(), p2.get());
+  ASSERT_EQ(2u, [p1 retainCount]);
+  p2.reset();
+  ASSERT_EQ(nil, p2.get());
+  ASSERT_EQ(1u, [p1 retainCount]);
+  {
+    base::scoped_nsobject<NSObject> p3 = p1;
+    ASSERT_EQ(p1.get(), p3.get());
+    ASSERT_EQ(2u, [p1 retainCount]);
+    {
+      base::mac::ScopedNSAutoreleasePool pool;
+      p3 = p1;
+    }
+    ASSERT_EQ(p1.get(), p3.get());
+    ASSERT_EQ(2u, [p1 retainCount]);
+  }
+  ASSERT_EQ(1u, [p1 retainCount]);
+  base::scoped_nsobject<NSObject> p4([p1.get() retain]);
+  ASSERT_EQ(2u, [p1 retainCount]);
+  ASSERT_TRUE(p1 == p1.get());
+  ASSERT_TRUE(p1 == p1);
+  ASSERT_FALSE(p1 != p1);
+  ASSERT_FALSE(p1 != p1.get());
+  base::scoped_nsobject<NSObject> p5([[NSObject alloc] init]);
+  ASSERT_TRUE(p1 != p5);
+  ASSERT_TRUE(p1 != p5.get());
+  ASSERT_FALSE(p1 == p5);
+  ASSERT_FALSE(p1 == p5.get());
+
+  base::scoped_nsobject<NSObject> p6 = p1;
+  ASSERT_EQ(3u, [p6 retainCount]);
+  {
+    base::mac::ScopedNSAutoreleasePool pool;
+    p6.autorelease();
+    ASSERT_EQ(nil, p6.get());
+    ASSERT_EQ(3u, [p1 retainCount]);
+  }
+  ASSERT_EQ(2u, [p1 retainCount]);
+
+  base::scoped_nsobject<NSObject> p7([NSObject new]);
+  base::scoped_nsobject<NSObject> p8(std::move(p7));
+  ASSERT_TRUE(p8);
+  ASSERT_EQ(1u, [p8 retainCount]);
+  ASSERT_FALSE(p7.get());
+}
+
+// Instantiating scoped_nsobject<> with T=NSAutoreleasePool should trip a
+// static_assert.
+#if 0
+TEST(ScopedNSObjectTest, FailToCreateScopedNSObjectAutoreleasePool) {
+  base::scoped_nsobject<NSAutoreleasePool> pool;
+}
+#endif
+
+TEST(ScopedNSObjectTest, ScopedNSObjectInContainer) {
+  base::scoped_nsobject<id> p([[NSObject alloc] init]);
+  ASSERT_TRUE(p.get());
+  ASSERT_EQ(1u, [p retainCount]);
+  {
+    std::vector<base::scoped_nsobject<id>> objects;
+    objects.push_back(p);
+    ASSERT_EQ(2u, [p retainCount]);
+    ASSERT_EQ(p.get(), objects[0].get());
+    objects.push_back(base::scoped_nsobject<id>([[NSObject alloc] init]));
+    ASSERT_TRUE(objects[1].get());
+    ASSERT_EQ(1u, [objects[1] retainCount]);
+  }
+  ASSERT_EQ(1u, [p retainCount]);
+}
+
+TEST(ScopedNSObjectTest, ScopedNSObjectFreeFunctions) {
+  base::scoped_nsobject<id> p1([[NSObject alloc] init]);
+  id o1 = p1.get();
+  ASSERT_TRUE(o1 == p1);
+  ASSERT_FALSE(o1 != p1);
+  base::scoped_nsobject<id> p2([[NSObject alloc] init]);
+  ASSERT_TRUE(o1 != p2);
+  ASSERT_FALSE(o1 == p2);
+  id o2 = p2.get();
+  swap(p1, p2);
+  ASSERT_EQ(o2, p1.get());
+  ASSERT_EQ(o1, p2.get());
+}
+
+}  // namespace
diff --git a/base/mac/scoped_nsobject_unittest_arc.mm b/base/mac/scoped_nsobject_unittest_arc.mm
new file mode 100644
index 0000000..5cbf3f8
--- /dev/null
+++ b/base/mac/scoped_nsobject_unittest_arc.mm
@@ -0,0 +1,131 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <vector>
+
+#import <CoreFoundation/CoreFoundation.h>
+
+#include "base/logging.h"
+#import "base/mac/scoped_nsobject.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+#if !defined(__has_feature) || !__has_feature(objc_arc)
+#error "This file requires ARC support."
+#endif
+
+namespace {
+
+template <typename NST>
+CFIndex GetRetainCount(const base::scoped_nsobject<NST>& nst) {
+  @autoreleasepool {
+    return CFGetRetainCount((__bridge CFTypeRef)nst.get()) - 1;
+  }
+}
+
+#if __has_feature(objc_arc_weak)
+TEST(ScopedNSObjectTestARC, DefaultPolicyIsRetain) {
+  __weak id o;
+  @autoreleasepool {
+    base::scoped_nsprotocol<id> p([[NSObject alloc] init]);
+    o = p.get();
+    DCHECK_EQ(o, p.get());
+  }
+  DCHECK_EQ(o, nil);
+}
+#endif
+
+TEST(ScopedNSObjectTestARC, ScopedNSObject) {
+  base::scoped_nsobject<NSObject> p1([[NSObject alloc] init]);
+  @autoreleasepool {
+    EXPECT_TRUE(p1.get());
+    EXPECT_TRUE(p1.get());
+  }
+  EXPECT_EQ(1, GetRetainCount(p1));
+  EXPECT_EQ(1, GetRetainCount(p1));
+  base::scoped_nsobject<NSObject> p2(p1);
+  @autoreleasepool {
+    EXPECT_EQ(p1.get(), p2.get());
+  }
+  EXPECT_EQ(2, GetRetainCount(p1));
+  p2.reset();
+  EXPECT_EQ(nil, p2.get());
+  EXPECT_EQ(1, GetRetainCount(p1));
+  {
+    base::scoped_nsobject<NSObject> p3 = p1;
+    @autoreleasepool {
+      EXPECT_EQ(p1.get(), p3.get());
+    }
+    EXPECT_EQ(2, GetRetainCount(p1));
+    @autoreleasepool {
+      p3 = p1;
+      EXPECT_EQ(p1.get(), p3.get());
+    }
+    EXPECT_EQ(2, GetRetainCount(p1));
+  }
+  EXPECT_EQ(1, GetRetainCount(p1));
+  base::scoped_nsobject<NSObject> p4;
+  @autoreleasepool {
+    p4 = base::scoped_nsobject<NSObject>(p1.get());
+  }
+  EXPECT_EQ(2, GetRetainCount(p1));
+  @autoreleasepool {
+    EXPECT_TRUE(p1 == p1.get());
+    EXPECT_TRUE(p1 == p1);
+    EXPECT_FALSE(p1 != p1);
+    EXPECT_FALSE(p1 != p1.get());
+  }
+  base::scoped_nsobject<NSObject> p5([[NSObject alloc] init]);
+  @autoreleasepool {
+    EXPECT_TRUE(p1 != p5);
+    EXPECT_TRUE(p1 != p5.get());
+    EXPECT_FALSE(p1 == p5);
+    EXPECT_FALSE(p1 == p5.get());
+  }
+
+  base::scoped_nsobject<NSObject> p6 = p1;
+  EXPECT_EQ(3, GetRetainCount(p6));
+  @autoreleasepool {
+    p6.autorelease();
+    EXPECT_EQ(nil, p6.get());
+  }
+  EXPECT_EQ(2, GetRetainCount(p1));
+}
+
+TEST(ScopedNSObjectTestARC, ScopedNSObjectInContainer) {
+  base::scoped_nsobject<id> p([[NSObject alloc] init]);
+  @autoreleasepool {
+    EXPECT_TRUE(p.get());
+  }
+  EXPECT_EQ(1, GetRetainCount(p));
+  @autoreleasepool {
+    std::vector<base::scoped_nsobject<id>> objects;
+    objects.push_back(p);
+    EXPECT_EQ(2, GetRetainCount(p));
+    @autoreleasepool {
+      EXPECT_EQ(p.get(), objects[0].get());
+    }
+    objects.push_back(base::scoped_nsobject<id>([[NSObject alloc] init]));
+    @autoreleasepool {
+      EXPECT_TRUE(objects[1].get());
+    }
+    EXPECT_EQ(1, GetRetainCount(objects[1]));
+  }
+  EXPECT_EQ(1, GetRetainCount(p));
+}
+
+TEST(ScopedNSObjectTestARC, ScopedNSObjectFreeFunctions) {
+  base::scoped_nsobject<id> p1([[NSObject alloc] init]);
+  id o1 = p1.get();
+  EXPECT_TRUE(o1 == p1);
+  EXPECT_FALSE(o1 != p1);
+  base::scoped_nsobject<id> p2([[NSObject alloc] init]);
+  EXPECT_TRUE(o1 != p2);
+  EXPECT_FALSE(o1 == p2);
+  id o2 = p2.get();
+  swap(p1, p2);
+  EXPECT_EQ(o2, p1.get());
+  EXPECT_EQ(o1, p2.get());
+}
+
+}  // namespace
diff --git a/base/mac/scoped_objc_class_swizzler.h b/base/mac/scoped_objc_class_swizzler.h
new file mode 100644
index 0000000..e18e4ab
--- /dev/null
+++ b/base/mac/scoped_objc_class_swizzler.h
@@ -0,0 +1,49 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_MAC_SCOPED_OBJC_CLASS_SWIZZLER_H_
+#define BASE_MAC_SCOPED_OBJC_CLASS_SWIZZLER_H_
+
+#import <objc/runtime.h>
+
+#include "base/base_export.h"
+#include "base/macros.h"
+
+namespace base {
+namespace mac {
+
+// Within a given scope, swaps method implementations of a class interface, or
+// between two class interfaces. The argument and return types must match.
+class BASE_EXPORT ScopedObjCClassSwizzler {
+ public:
+  // Given two classes that each respond to |selector|, swap the implementations
+  // of those methods.
+  ScopedObjCClassSwizzler(Class target, Class source, SEL selector);
+
+  // Given two selectors on the same class interface, |target| (e.g. via
+  // inheritance or categories), swap the implementations of methods |original|
+  // and |alternate|.
+  ScopedObjCClassSwizzler(Class target, SEL original, SEL alternate);
+
+  ~ScopedObjCClassSwizzler();
+
+  // Return a callable function pointer for the replaced method. To call this
+  // from the replacing function, the first two arguments should be |self| and
+  // |_cmd|. These are followed by the (variadic) method arguments.
+  IMP GetOriginalImplementation();
+
+ private:
+  // Delegated constructor.
+  void Init(Class target, Class source, SEL original, SEL alternate);
+
+  Method old_selector_impl_;
+  Method new_selector_impl_;
+
+  DISALLOW_COPY_AND_ASSIGN(ScopedObjCClassSwizzler);
+};
+
+}  // namespace mac
+}  // namespace base
+
+#endif  // BASE_MAC_SCOPED_OBJC_CLASS_SWIZZLER_H_
diff --git a/base/mac/scoped_objc_class_swizzler.mm b/base/mac/scoped_objc_class_swizzler.mm
new file mode 100644
index 0000000..20e5c56
--- /dev/null
+++ b/base/mac/scoped_objc_class_swizzler.mm
@@ -0,0 +1,71 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#import "base/mac/scoped_objc_class_swizzler.h"
+
+#include <string.h>
+
+#include "base/logging.h"
+
+namespace base {
+namespace mac {
+
+ScopedObjCClassSwizzler::ScopedObjCClassSwizzler(Class target,
+                                                 Class source,
+                                                 SEL selector)
+    : old_selector_impl_(NULL), new_selector_impl_(NULL) {
+  Init(target, source, selector, selector);
+}
+
+ScopedObjCClassSwizzler::ScopedObjCClassSwizzler(Class target,
+                                                 SEL original,
+                                                 SEL alternate)
+    : old_selector_impl_(NULL), new_selector_impl_(NULL) {
+  Init(target, target, original, alternate);
+}
+
+ScopedObjCClassSwizzler::~ScopedObjCClassSwizzler() {
+  if (old_selector_impl_ && new_selector_impl_)
+    method_exchangeImplementations(old_selector_impl_, new_selector_impl_);
+}
+
+IMP ScopedObjCClassSwizzler::GetOriginalImplementation() {
+  // Note that while the swizzle is in effect the "new" method is actually
+  // pointing to the original implementation, since they have been swapped.
+  return method_getImplementation(new_selector_impl_);
+}
+
+void ScopedObjCClassSwizzler::Init(Class target,
+                                   Class source,
+                                   SEL original,
+                                   SEL alternate) {
+  old_selector_impl_ = class_getInstanceMethod(target, original);
+  new_selector_impl_ = class_getInstanceMethod(source, alternate);
+  if (!old_selector_impl_ && !new_selector_impl_) {
+    // Try class methods.
+    old_selector_impl_ = class_getClassMethod(target, original);
+    new_selector_impl_ = class_getClassMethod(source, alternate);
+  }
+
+  DCHECK(old_selector_impl_);
+  DCHECK(new_selector_impl_);
+  if (!old_selector_impl_ || !new_selector_impl_)
+    return;
+
+  // The argument and return types must match exactly.
+  const char* old_types = method_getTypeEncoding(old_selector_impl_);
+  const char* new_types = method_getTypeEncoding(new_selector_impl_);
+  DCHECK(old_types);
+  DCHECK(new_types);
+  DCHECK_EQ(0, strcmp(old_types, new_types));
+  if (!old_types || !new_types || strcmp(old_types, new_types)) {
+    old_selector_impl_ = new_selector_impl_ = NULL;
+    return;
+  }
+
+  method_exchangeImplementations(old_selector_impl_, new_selector_impl_);
+}
+
+}  // namespace mac
+}  // namespace base
diff --git a/base/mac/scoped_objc_class_swizzler_unittest.mm b/base/mac/scoped_objc_class_swizzler_unittest.mm
new file mode 100644
index 0000000..79820a3
--- /dev/null
+++ b/base/mac/scoped_objc_class_swizzler_unittest.mm
@@ -0,0 +1,167 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#import "base/mac/scoped_objc_class_swizzler.h"
+
+#import "base/mac/scoped_nsobject.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+@interface ObjCClassSwizzlerTestOne : NSObject
++ (NSInteger)function;
+- (NSInteger)method;
+- (NSInteger)modifier;
+@end
+
+@interface ObjCClassSwizzlerTestTwo : NSObject
++ (NSInteger)function;
+- (NSInteger)method;
+- (NSInteger)modifier;
+@end
+
+@implementation ObjCClassSwizzlerTestOne : NSObject
+
++ (NSInteger)function {
+  return 10;
+}
+
+- (NSInteger)method {
+  // Multiply by a modifier to ensure |self| in a swizzled implementation
+  // refers to the original object.
+  return 1 * [self modifier];
+}
+
+- (NSInteger)modifier {
+  return 3;
+}
+
+@end
+
+@implementation ObjCClassSwizzlerTestTwo : NSObject
+
++ (NSInteger)function {
+  return 20;
+}
+
+- (NSInteger)method {
+  return 2 * [self modifier];
+}
+
+- (NSInteger)modifier {
+  return 7;
+}
+
+@end
+
+@interface ObjCClassSwizzlerTestOne (AlternateCategory)
+- (NSInteger)alternate;
+@end
+
+@implementation ObjCClassSwizzlerTestOne (AlternateCategory)
+- (NSInteger)alternate {
+  return 3 * [self modifier];
+}
+@end
+
+@interface ObjCClassSwizzlerTestOneChild : ObjCClassSwizzlerTestOne
+- (NSInteger)childAlternate;
+@end
+
+@implementation ObjCClassSwizzlerTestOneChild
+- (NSInteger)childAlternate {
+  return 5 * [self modifier];
+}
+@end
+
+namespace base {
+namespace mac {
+
+TEST(ObjCClassSwizzlerTest, SwizzleInstanceMethods) {
+  base::scoped_nsobject<ObjCClassSwizzlerTestOne> object_one(
+      [[ObjCClassSwizzlerTestOne alloc] init]);
+  base::scoped_nsobject<ObjCClassSwizzlerTestTwo> object_two(
+      [[ObjCClassSwizzlerTestTwo alloc] init]);
+  EXPECT_EQ(3, [object_one method]);
+  EXPECT_EQ(14, [object_two method]);
+
+  {
+    base::mac::ScopedObjCClassSwizzler swizzler(
+        [ObjCClassSwizzlerTestOne class],
+        [ObjCClassSwizzlerTestTwo class],
+        @selector(method));
+    EXPECT_EQ(6, [object_one method]);
+    EXPECT_EQ(7, [object_two method]);
+
+    IMP original = swizzler.GetOriginalImplementation();
+    id expected_result = reinterpret_cast<id>(3);
+    EXPECT_EQ(expected_result, original(object_one, @selector(method)));
+  }
+
+  EXPECT_EQ(3, [object_one method]);
+  EXPECT_EQ(14, [object_two method]);
+}
+
+TEST(ObjCClassSwizzlerTest, SwizzleClassMethods) {
+  EXPECT_EQ(10, [ObjCClassSwizzlerTestOne function]);
+  EXPECT_EQ(20, [ObjCClassSwizzlerTestTwo function]);
+
+  {
+    base::mac::ScopedObjCClassSwizzler swizzler(
+        [ObjCClassSwizzlerTestOne class],
+        [ObjCClassSwizzlerTestTwo class],
+        @selector(function));
+    EXPECT_EQ(20, [ObjCClassSwizzlerTestOne function]);
+    EXPECT_EQ(10, [ObjCClassSwizzlerTestTwo function]);
+
+    IMP original = swizzler.GetOriginalImplementation();
+    id expected_result = reinterpret_cast<id>(10);
+    EXPECT_EQ(expected_result,
+              original([ObjCClassSwizzlerTestOne class], @selector(function)));
+  }
+
+  EXPECT_EQ(10, [ObjCClassSwizzlerTestOne function]);
+  EXPECT_EQ(20, [ObjCClassSwizzlerTestTwo function]);
+}
+
+TEST(ObjCClassSwizzlerTest, SwizzleViaCategory) {
+  base::scoped_nsobject<ObjCClassSwizzlerTestOne> object_one(
+      [[ObjCClassSwizzlerTestOne alloc] init]);
+  EXPECT_EQ(3, [object_one method]);
+
+  {
+    base::mac::ScopedObjCClassSwizzler swizzler(
+        [ObjCClassSwizzlerTestOne class],
+        @selector(method),
+        @selector(alternate));
+    EXPECT_EQ(9, [object_one method]);
+
+    IMP original = swizzler.GetOriginalImplementation();
+    id expected_result = reinterpret_cast<id>(3);
+    EXPECT_EQ(expected_result, original(object_one, @selector(method)));
+  }
+
+  EXPECT_EQ(3, [object_one method]);
+}
+
+TEST(ObjCClassSwizzlerTest, SwizzleViaInheritance) {
+  base::scoped_nsobject<ObjCClassSwizzlerTestOneChild> child(
+      [[ObjCClassSwizzlerTestOneChild alloc] init]);
+  EXPECT_EQ(3, [child method]);
+
+  {
+    base::mac::ScopedObjCClassSwizzler swizzler(
+        [ObjCClassSwizzlerTestOneChild class],
+        @selector(method),
+        @selector(childAlternate));
+    EXPECT_EQ(15, [child method]);
+
+    IMP original = swizzler.GetOriginalImplementation();
+    id expected_result = reinterpret_cast<id>(3);
+    EXPECT_EQ(expected_result, original(child, @selector(method)));
+  }
+
+  EXPECT_EQ(3, [child method]);
+}
+
+}  // namespace mac
+}  // namespace base
diff --git a/base/mac/scoped_sending_event.h b/base/mac/scoped_sending_event.h
new file mode 100644
index 0000000..c579cef
--- /dev/null
+++ b/base/mac/scoped_sending_event.h
@@ -0,0 +1,48 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_MAC_SCOPED_SENDING_EVENT_H_
+#define BASE_MAC_SCOPED_SENDING_EVENT_H_
+
+#include "base/base_export.h"
+#include "base/macros.h"
+#include "base/message_loop/message_pump_mac.h"
+
+// Nested event loops can pump IPC messages, including
+// script-initiated tab closes, which could release objects that the
+// nested event loop might message.  CrAppProtocol defines how to ask
+// the embedding NSApplication subclass if an event is currently being
+// handled, in which case such closes are deferred to the top-level
+// event loop.
+//
+// ScopedSendingEvent allows script-initiated event loops to work like
+// a nested event loop, as such events do not arrive via -sendEvent:.
+// CrAppControlProtocol lets ScopedSendingEvent tell the embedding
+// NSApplication what to return from -handlingSendEvent.
+
+@protocol CrAppControlProtocol<CrAppProtocol>
+- (void)setHandlingSendEvent:(BOOL)handlingSendEvent;
+@end
+
+namespace base {
+namespace mac {
+
+class BASE_EXPORT ScopedSendingEvent {
+ public:
+  ScopedSendingEvent();
+  ~ScopedSendingEvent();
+
+ private:
+  // The NSApp in control at the time the constructor was run, to be
+  // sure the |handling_| setting is restored appropriately.
+  NSObject<CrAppControlProtocol>* app_;
+  BOOL handling_;  // Value of -[app_ handlingSendEvent] at construction.
+
+  DISALLOW_COPY_AND_ASSIGN(ScopedSendingEvent);
+};
+
+}  // namespace mac
+}  // namespace base
+
+#endif  // BASE_MAC_SCOPED_SENDING_EVENT_H_
diff --git a/base/mac/scoped_sending_event.mm b/base/mac/scoped_sending_event.mm
new file mode 100644
index 0000000..c3813d8
--- /dev/null
+++ b/base/mac/scoped_sending_event.mm
@@ -0,0 +1,24 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#import "base/mac/scoped_sending_event.h"
+
+#include "base/logging.h"
+
+namespace base {
+namespace mac {
+
+ScopedSendingEvent::ScopedSendingEvent()
+    : app_(static_cast<NSObject<CrAppControlProtocol>*>(NSApp)) {
+  DCHECK([app_ conformsToProtocol:@protocol(CrAppControlProtocol)]);
+  handling_ = [app_ isHandlingSendEvent];
+  [app_ setHandlingSendEvent:YES];
+}
+
+ScopedSendingEvent::~ScopedSendingEvent() {
+  [app_ setHandlingSendEvent:handling_];
+}
+
+}  // namespace mac
+}  // namespace base
diff --git a/base/mac/scoped_sending_event_unittest.mm b/base/mac/scoped_sending_event_unittest.mm
new file mode 100644
index 0000000..52f18c6
--- /dev/null
+++ b/base/mac/scoped_sending_event_unittest.mm
@@ -0,0 +1,63 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#import "base/mac/scoped_sending_event.h"
+
+#import <Foundation/Foundation.h>
+
+#include "base/mac/scoped_nsobject.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+@interface ScopedSendingEventTestCrApp : NSApplication <CrAppControlProtocol> {
+ @private
+  BOOL handlingSendEvent_;
+}
+@property(nonatomic, assign, getter=isHandlingSendEvent) BOOL handlingSendEvent;
+@end
+
+@implementation ScopedSendingEventTestCrApp
+@synthesize handlingSendEvent = handlingSendEvent_;
+@end
+
+namespace {
+
+class ScopedSendingEventTest : public testing::Test {
+ public:
+  ScopedSendingEventTest() : app_([[ScopedSendingEventTestCrApp alloc] init]) {
+    NSApp = app_.get();
+  }
+  ~ScopedSendingEventTest() override { NSApp = nil; }
+
+ private:
+  base::scoped_nsobject<ScopedSendingEventTestCrApp> app_;
+};
+
+// Sets the flag within scope, resets when leaving scope.
+TEST_F(ScopedSendingEventTest, SetHandlingSendEvent) {
+  id<CrAppProtocol> app = NSApp;
+  EXPECT_FALSE([app isHandlingSendEvent]);
+  {
+    base::mac::ScopedSendingEvent is_handling_send_event;
+    EXPECT_TRUE([app isHandlingSendEvent]);
+  }
+  EXPECT_FALSE([app isHandlingSendEvent]);
+}
+
+// Nested call restores previous value rather than resetting flag.
+TEST_F(ScopedSendingEventTest, NestedSetHandlingSendEvent) {
+  id<CrAppProtocol> app = NSApp;
+  EXPECT_FALSE([app isHandlingSendEvent]);
+  {
+    base::mac::ScopedSendingEvent is_handling_send_event;
+    EXPECT_TRUE([app isHandlingSendEvent]);
+    {
+      base::mac::ScopedSendingEvent nested_is_handling_send_event;
+      EXPECT_TRUE([app isHandlingSendEvent]);
+    }
+    EXPECT_TRUE([app isHandlingSendEvent]);
+  }
+  EXPECT_FALSE([app isHandlingSendEvent]);
+}
+
+}  // namespace
diff --git a/base/mac/scoped_typeref.h b/base/mac/scoped_typeref.h
new file mode 100644
index 0000000..dd9841d
--- /dev/null
+++ b/base/mac/scoped_typeref.h
@@ -0,0 +1,139 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_MAC_SCOPED_TYPEREF_H_
+#define BASE_MAC_SCOPED_TYPEREF_H_
+
+#include "base/compiler_specific.h"
+#include "base/logging.h"
+#include "base/memory/scoped_policy.h"
+
+namespace base {
+
+// ScopedTypeRef<> is patterned after std::unique_ptr<>, but maintains ownership
+// of a reference to any type that is maintained by Retain and Release methods.
+//
+// The Traits structure must provide the Retain and Release methods for type T.
+// A default ScopedTypeRefTraits is used but not defined, and should be defined
+// for each type to use this interface. For example, an appropriate definition
+// of ScopedTypeRefTraits for CGLContextObj would be:
+//
+//   template<>
+//   struct ScopedTypeRefTraits<CGLContextObj> {
+//     static CGLContextObj InvalidValue() { return nullptr; }
+//     static CGLContextObj Retain(CGLContextObj object) {
+//       CGLContextRetain(object);
+//       return object;
+//     }
+//     static void Release(CGLContextObj object) { CGLContextRelease(object); }
+//   };
+//
+// For the many types that have pass-by-pointer create functions, the function
+// InitializeInto() is provided to allow direct initialization and assumption
+// of ownership of the object. For example, continuing to use the above
+// CGLContextObj specialization:
+//
+//   base::ScopedTypeRef<CGLContextObj> context;
+//   CGLCreateContext(pixel_format, share_group, context.InitializeInto());
+//
+// For initialization with an existing object, the caller may specify whether
+// the ScopedTypeRef<> being initialized is assuming the caller's existing
+// ownership of the object (and should not call Retain in initialization) or if
+// it should not assume this ownership and must create its own (by calling
+// Retain in initialization). This behavior is based on the |policy| parameter,
+// with |ASSUME| for the former and |RETAIN| for the latter. The default policy
+// is to |ASSUME|.
+
+template<typename T>
+struct ScopedTypeRefTraits;
+
+template<typename T, typename Traits = ScopedTypeRefTraits<T>>
+class ScopedTypeRef {
+ public:
+  typedef T element_type;
+
+  explicit constexpr ScopedTypeRef(
+      __unsafe_unretained T object = Traits::InvalidValue(),
+      base::scoped_policy::OwnershipPolicy policy = base::scoped_policy::ASSUME)
+      : object_(object) {
+    if (object_ && policy == base::scoped_policy::RETAIN)
+      object_ = Traits::Retain(object_);
+  }
+
+  ScopedTypeRef(const ScopedTypeRef<T, Traits>& that)
+      : object_(that.object_) {
+    if (object_)
+      object_ = Traits::Retain(object_);
+  }
+
+  // This allows passing an object to a function that takes its superclass.
+  template <typename R, typename RTraits>
+  explicit ScopedTypeRef(const ScopedTypeRef<R, RTraits>& that_as_subclass)
+      : object_(that_as_subclass.get()) {
+    if (object_)
+      object_ = Traits::Retain(object_);
+  }
+
+  ScopedTypeRef(ScopedTypeRef<T, Traits>&& that) : object_(that.object_) {
+    that.object_ = Traits::InvalidValue();
+  }
+
+  ~ScopedTypeRef() {
+    if (object_)
+      Traits::Release(object_);
+  }
+
+  ScopedTypeRef& operator=(const ScopedTypeRef<T, Traits>& that) {
+    reset(that.get(), base::scoped_policy::RETAIN);
+    return *this;
+  }
+
+  // This is to be used only to take ownership of objects that are created
+  // by pass-by-pointer create functions. To enforce this, require that the
+  // object be reset to NULL before this may be used.
+  T* InitializeInto() WARN_UNUSED_RESULT {
+    DCHECK(!object_);
+    return &object_;
+  }
+
+  void reset(__unsafe_unretained T object = Traits::InvalidValue(),
+             base::scoped_policy::OwnershipPolicy policy =
+                 base::scoped_policy::ASSUME) {
+    if (object && policy == base::scoped_policy::RETAIN)
+      object = Traits::Retain(object);
+    if (object_)
+      Traits::Release(object_);
+    object_ = object;
+  }
+
+  bool operator==(__unsafe_unretained T that) const { return object_ == that; }
+
+  bool operator!=(__unsafe_unretained T that) const { return object_ != that; }
+
+  operator T() const __attribute((ns_returns_not_retained)) { return object_; }
+
+  T get() const __attribute((ns_returns_not_retained)) { return object_; }
+
+  void swap(ScopedTypeRef& that) {
+    __unsafe_unretained T temp = that.object_;
+    that.object_ = object_;
+    object_ = temp;
+  }
+
+  // ScopedTypeRef<>::release() is like std::unique_ptr<>::release.  It is NOT
+  // a wrapper for Release().  To force a ScopedTypeRef<> object to call
+  // Release(), use ScopedTypeRef<>::reset().
+  T release() __attribute((ns_returns_not_retained)) WARN_UNUSED_RESULT {
+    __unsafe_unretained T temp = object_;
+    object_ = Traits::InvalidValue();
+    return temp;
+  }
+
+ private:
+  __unsafe_unretained T object_;
+};
+
+}  // namespace base
+
+#endif  // BASE_MAC_SCOPED_TYPEREF_H_
diff --git a/base/mac/sdk_forward_declarations.h b/base/mac/sdk_forward_declarations.h
new file mode 100644
index 0000000..7993870
--- /dev/null
+++ b/base/mac/sdk_forward_declarations.h
@@ -0,0 +1,334 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file contains forward declarations for items in later SDKs than the
+// default one with which Chromium is built (currently 10.10).
+// If you call any function from this header, be sure to check at runtime for
+// respondsToSelector: before calling these functions (else your code will crash
+// on older OS X versions that chrome still supports).
+
+#ifndef BASE_MAC_SDK_FORWARD_DECLARATIONS_H_
+#define BASE_MAC_SDK_FORWARD_DECLARATIONS_H_
+
+#import <AppKit/AppKit.h>
+#import <CoreBluetooth/CoreBluetooth.h>
+#import <CoreWLAN/CoreWLAN.h>
+#import <IOBluetooth/IOBluetooth.h>
+#import <ImageCaptureCore/ImageCaptureCore.h>
+#import <QuartzCore/QuartzCore.h>
+#include <stdint.h>
+
+#include "base/base_export.h"
+#include "base/mac/availability.h"
+
+// ----------------------------------------------------------------------------
+// Define typedefs, enums, and protocols not available in the version of the
+// OSX SDK being compiled against.
+// ----------------------------------------------------------------------------
+
+#if !defined(MAC_OS_X_VERSION_10_11) || \
+    MAC_OS_X_VERSION_MAX_ALLOWED < MAC_OS_X_VERSION_10_11
+
+enum {
+  NSPressureBehaviorUnknown = -1,
+  NSPressureBehaviorPrimaryDefault = 0,
+  NSPressureBehaviorPrimaryClick = 1,
+  NSPressureBehaviorPrimaryGeneric = 2,
+  NSPressureBehaviorPrimaryAccelerator = 3,
+  NSPressureBehaviorPrimaryDeepClick = 5,
+  NSPressureBehaviorPrimaryDeepDrag = 6
+};
+typedef NSInteger NSPressureBehavior;
+
+@interface NSPressureConfiguration : NSObject
+- (instancetype)initWithPressureBehavior:(NSPressureBehavior)pressureBehavior;
+@end
+
+enum {
+  NSSpringLoadingHighlightNone = 0,
+  NSSpringLoadingHighlightStandard,
+  NSSpringLoadingHighlightEmphasized
+};
+typedef NSUInteger NSSpringLoadingHighlight;
+
+#endif  // MAC_OS_X_VERSION_10_11
+
+#if !defined(MAC_OS_X_VERSION_10_12) || \
+    MAC_OS_X_VERSION_MAX_ALLOWED < MAC_OS_X_VERSION_10_12
+
+// The protocol was formalized by the 10.12 SDK, but it was informally used
+// before.
+@protocol CAAnimationDelegate
+- (void)animationDidStart:(CAAnimation*)animation;
+- (void)animationDidStop:(CAAnimation*)animation finished:(BOOL)finished;
+@end
+
+@protocol CALayerDelegate
+@end
+
+#endif  // MAC_OS_X_VERSION_10_12
+
+// ----------------------------------------------------------------------------
+// Define NSStrings only available in newer versions of the OSX SDK to force
+// them to be statically linked.
+// ----------------------------------------------------------------------------
+
+extern "C" {
+#if !defined(MAC_OS_X_VERSION_10_10) || \
+    MAC_OS_X_VERSION_MIN_REQUIRED < MAC_OS_X_VERSION_10_10
+BASE_EXPORT extern NSString* const CIDetectorTypeQRCode;
+BASE_EXPORT extern NSString* const NSUserActivityTypeBrowsingWeb;
+BASE_EXPORT extern NSString* const NSAppearanceNameVibrantDark;
+BASE_EXPORT extern NSString* const NSAppearanceNameVibrantLight;
+#endif  // MAC_OS_X_VERSION_10_10
+}  // extern "C"
+
+// ----------------------------------------------------------------------------
+// If compiling against an older version of the OSX SDK, declare classes and
+// functions that are available in newer versions of the OSX SDK. If compiling
+// against a newer version of the OSX SDK, redeclare those same classes and
+// functions to suppress -Wpartial-availability warnings.
+// ----------------------------------------------------------------------------
+
+// Once Chrome no longer supports OSX 10.9, everything within this preprocessor
+// block can be removed.
+#if !defined(MAC_OS_X_VERSION_10_10) || \
+    MAC_OS_X_VERSION_MIN_REQUIRED < MAC_OS_X_VERSION_10_10
+
+@interface NSUserActivity (YosemiteSDK)
+@property(readonly, copy) NSString* activityType;
+@property(copy) NSDictionary* userInfo;
+@property(copy) NSURL* webpageURL;
+@property(copy) NSString* title;
+- (instancetype)initWithActivityType:(NSString*)activityType;
+- (void)becomeCurrent;
+- (void)invalidate;
+@end
+
+@interface CBUUID (YosemiteSDK)
+- (NSString*)UUIDString;
+@end
+
+@interface NSViewController (YosemiteSDK)
+- (void)viewDidLoad;
+@end
+
+@interface NSWindow (YosemiteSDK)
+- (void)setTitlebarAppearsTransparent:(BOOL)flag;
+@end
+
+@interface NSProcessInfo (YosemiteSDK)
+@property(readonly) NSOperatingSystemVersion operatingSystemVersion;
+@end
+
+@interface NSLayoutConstraint (YosemiteSDK)
+@property(getter=isActive) BOOL active;
++ (void)activateConstraints:(NSArray*)constraints;
+@end
+
+@interface NSVisualEffectView (YosemiteSDK)
+- (void)setState:(NSVisualEffectState)state;
+@end
+
+@class NSVisualEffectView;
+
+@interface CIQRCodeFeature (YosemiteSDK)
+@property(readonly) CGRect bounds;
+@property(readonly) CGPoint topLeft;
+@property(readonly) CGPoint topRight;
+@property(readonly) CGPoint bottomLeft;
+@property(readonly) CGPoint bottomRight;
+@property(readonly, copy) NSString* messageString;
+@end
+
+@class CIQRCodeFeature;
+
+@interface NSView (YosemiteSDK)
+- (BOOL)isAccessibilitySelectorAllowed:(SEL)selector;
+@property(copy) NSString* accessibilityLabel;
+@end
+
+#endif  // MAC_OS_X_VERSION_10_10
+
+// Once Chrome no longer supports OSX 10.10.2, everything within this
+// preprocessor block can be removed.
+#if !defined(MAC_OS_X_VERSION_10_10_3) || \
+    MAC_OS_X_VERSION_MIN_REQUIRED < MAC_OS_X_VERSION_10_10_3
+
+@interface NSEvent (Yosemite_3_SDK)
+@property(readonly) NSInteger stage;
+@end
+
+#endif  // MAC_OS_X_VERSION_10_10
+
+// ----------------------------------------------------------------------------
+// Define NSStrings only available in newer versions of the OSX SDK to force
+// them to be statically linked.
+// ----------------------------------------------------------------------------
+
+extern "C" {
+#if !defined(MAC_OS_X_VERSION_10_11) || \
+    MAC_OS_X_VERSION_MIN_REQUIRED < MAC_OS_X_VERSION_10_11
+BASE_EXPORT extern NSString* const CIDetectorTypeText;
+#endif  // MAC_OS_X_VERSION_10_11
+}  // extern "C"
+
+// Once Chrome no longer supports OSX 10.10, everything within this
+// preprocessor block can be removed.
+#if !defined(MAC_OS_X_VERSION_10_11) || \
+    MAC_OS_X_VERSION_MIN_REQUIRED < MAC_OS_X_VERSION_10_11
+
+@class NSLayoutDimension;
+@class NSLayoutXAxisAnchor;
+@class NSLayoutYAxisAnchor;
+
+@interface NSObject (ElCapitanSDK)
+- (NSLayoutConstraint*)constraintEqualToConstant:(CGFloat)c;
+- (NSLayoutConstraint*)constraintGreaterThanOrEqualToConstant:(CGFloat)c;
+@end
+
+@interface NSView (ElCapitanSDK)
+- (void)setPressureConfiguration:(NSPressureConfiguration*)aConfiguration
+    API_AVAILABLE(macos(10.11));
+@property(readonly, strong)
+    NSLayoutXAxisAnchor* leftAnchor API_AVAILABLE(macos(10.11));
+@property(readonly, strong)
+    NSLayoutXAxisAnchor* rightAnchor API_AVAILABLE(macos(10.11));
+@property(readonly, strong)
+    NSLayoutYAxisAnchor* bottomAnchor API_AVAILABLE(macos(10.11));
+@property(readonly, strong)
+    NSLayoutDimension* widthAnchor API_AVAILABLE(macos(10.11));
+@end
+
+@interface NSWindow (ElCapitanSDK)
+- (void)performWindowDragWithEvent:(NSEvent*)event;
+@end
+
+@interface CIRectangleFeature (ElCapitanSDK)
+@property(readonly) CGRect bounds;
+@end
+
+@class CIRectangleFeature;
+
+#endif  // MAC_OS_X_VERSION_10_11
+
+// Once Chrome no longer supports OSX 10.11, everything within this
+// preprocessor block can be removed.
+#if !defined(MAC_OS_X_VERSION_10_12) || \
+    MAC_OS_X_VERSION_MIN_REQUIRED < MAC_OS_X_VERSION_10_12
+
+@interface NSWindow (SierraSDK)
+@property(class) BOOL allowsAutomaticWindowTabbing;
+@end
+
+#endif  // MAC_OS_X_VERSION_10_12
+
+// Once Chrome no longer supports OSX 10.12.0, everything within this
+// preprocessor block can be removed.
+#if !defined(MAC_OS_X_VERSION_10_12_1) || \
+    MAC_OS_X_VERSION_MIN_REQUIRED < MAC_OS_X_VERSION_10_12_1
+
+@interface NSButton (SierraPointOneSDK)
+@property(copy) NSColor* bezelColor;
+@property BOOL imageHugsTitle;
++ (instancetype)buttonWithTitle:(NSString*)title
+                         target:(id)target
+                         action:(SEL)action;
++ (instancetype)buttonWithImage:(NSImage*)image
+                         target:(id)target
+                         action:(SEL)action;
++ (instancetype)buttonWithTitle:(NSString*)title
+                          image:(NSImage*)image
+                         target:(id)target
+                         action:(SEL)action;
+@end
+
+@interface NSSegmentedControl (SierraPointOneSDK)
++ (instancetype)segmentedControlWithImages:(NSArray*)images
+                              trackingMode:(NSSegmentSwitchTracking)trackingMode
+                                    target:(id)target
+                                    action:(SEL)action;
+@end
+
+@interface NSTextField (SierraPointOneSDK)
++ (instancetype)labelWithAttributedString:
+    (NSAttributedString*)attributedStringValue;
+@end
+
+#endif  // MAC_OS_X_VERSION_10_12_1
+
+// Once Chrome no longer supports OSX 10.12, everything within this
+// preprocessor block can be removed.
+#if !defined(MAC_OS_X_VERSION_10_13) || \
+    MAC_OS_X_VERSION_MIN_REQUIRED < MAC_OS_X_VERSION_10_13
+
+// VNRequest forward declarations.
+@class VNRequest;
+typedef void (^VNRequestCompletionHandler)(VNRequest* request, NSError* error);
+
+@interface VNRequest : NSObject<NSCopying>
+- (instancetype)initWithCompletionHandler:
+    (VNRequestCompletionHandler)completionHandler NS_DESIGNATED_INITIALIZER;
+@property(readonly, nonatomic, copy) NSArray* results;
+@end
+
+// VNDetectFaceLandmarksRequest forward declarations.
+@interface VNImageBasedRequest : VNRequest
+@end
+
+@protocol VNFaceObservationAccepting<NSObject>
+@end
+
+@interface VNDetectFaceLandmarksRequest
+    : VNImageBasedRequest<VNFaceObservationAccepting>
+@end
+
+// VNImageRequestHandler forward declarations.
+typedef NSString* VNImageOption NS_STRING_ENUM;
+
+@interface VNImageRequestHandler : NSObject
+- (instancetype)initWithCIImage:(CIImage*)image
+                        options:(NSDictionary<VNImageOption, id>*)options;
+- (BOOL)performRequests:(NSArray<VNRequest*>*)requests error:(NSError**)error;
+@end
+
+// VNFaceLandmarks2D forward declarations.
+@interface VNFaceLandmarkRegion : NSObject
+@property(readonly) NSUInteger pointCount;
+@end
+
+@interface VNFaceLandmarkRegion2D : VNFaceLandmarkRegion
+@property(readonly, assign)
+    const CGPoint* normalizedPoints NS_RETURNS_INNER_POINTER;
+@end
+
+@interface VNFaceLandmarks2D : NSObject
+@property(readonly) VNFaceLandmarkRegion2D* leftEye;
+@property(readonly) VNFaceLandmarkRegion2D* rightEye;
+@property(readonly) VNFaceLandmarkRegion2D* outerLips;
+@property(readonly) VNFaceLandmarkRegion2D* nose;
+@end
+
+// VNFaceObservation forward declarations.
+@interface VNObservation : NSObject<NSCopying, NSSecureCoding>
+@end
+
+@interface VNDetectedObjectObservation : VNObservation
+@property(readonly, nonatomic, assign) CGRect boundingBox;
+@end
+
+@interface VNFaceObservation : VNDetectedObjectObservation
+@property(readonly, nonatomic, strong) VNFaceLandmarks2D* landmarks;
+@end
+
+#endif  // MAC_OS_X_VERSION_10_13
+// ----------------------------------------------------------------------------
+// The symbol for kCWSSIDDidChangeNotification is available in the
+// CoreWLAN.framework for OSX versions 10.6 through 10.10. The symbol is not
+// declared in the OSX 10.9+ SDK, so when compiling against an OSX 10.9+ SDK,
+// declare the symbol.
+// ----------------------------------------------------------------------------
+BASE_EXPORT extern "C" NSString* const kCWSSIDDidChangeNotification;
+
+#endif  // BASE_MAC_SDK_FORWARD_DECLARATIONS_H_
diff --git a/base/mac/sdk_forward_declarations.mm b/base/mac/sdk_forward_declarations.mm
new file mode 100644
index 0000000..c624dae
--- /dev/null
+++ b/base/mac/sdk_forward_declarations.mm
@@ -0,0 +1,20 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/mac/sdk_forward_declarations.h"
+
+#if !defined(MAC_OS_X_VERSION_10_10) || \
+    MAC_OS_X_VERSION_MIN_REQUIRED < MAC_OS_X_VERSION_10_10
+NSString* const CIDetectorTypeQRCode = @"CIDetectorTypeQRCode";
+
+NSString* const NSUserActivityTypeBrowsingWeb =
+    @"NSUserActivityTypeBrowsingWeb";
+
+NSString* const NSAppearanceNameVibrantDark = @"NSAppearanceNameVibrantDark";
+#endif  // MAC_OS_X_VERSION_10_10
+
+#if !defined(MAC_OS_X_VERSION_10_11) || \
+    MAC_OS_X_VERSION_MIN_REQUIRED < MAC_OS_X_VERSION_10_11
+NSString* const CIDetectorTypeText = @"CIDetectorTypeText";
+#endif  // MAC_OS_X_VERSION_10_11
diff --git a/base/macros.h b/base/macros.h
new file mode 100644
index 0000000..3064a1b
--- /dev/null
+++ b/base/macros.h
@@ -0,0 +1,95 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file contains macros and macro-like constructs (e.g., templates) that
+// are commonly used throughout Chromium source. (It may also contain things
+// that are closely related to things that are commonly used that belong in this
+// file.)
+
+#ifndef BASE_MACROS_H_
+#define BASE_MACROS_H_
+
+#include <stddef.h>  // For size_t.
+
+// Distinguish mips32.
+#if defined(__mips__) && (_MIPS_SIM == _ABIO32) && !defined(__mips32__)
+#define __mips32__
+#endif
+
+// Distinguish mips64.
+#if defined(__mips__) && (_MIPS_SIM == _ABI64) && !defined(__mips64__)
+#define __mips64__
+#endif
+
+// Put this in the declarations for a class to be uncopyable.
+#define DISALLOW_COPY(TypeName) \
+  TypeName(const TypeName&) = delete
+
+// Put this in the declarations for a class to be unassignable.
+#define DISALLOW_ASSIGN(TypeName) TypeName& operator=(const TypeName&) = delete
+
+// Put this in the declarations for a class to be uncopyable and unassignable.
+#define DISALLOW_COPY_AND_ASSIGN(TypeName) \
+  DISALLOW_COPY(TypeName);                 \
+  DISALLOW_ASSIGN(TypeName)
+
+// A macro to disallow all the implicit constructors, namely the
+// default constructor, copy constructor and operator= functions.
+// This is especially useful for classes containing only static methods.
+#define DISALLOW_IMPLICIT_CONSTRUCTORS(TypeName) \
+  TypeName() = delete;                           \
+  DISALLOW_COPY_AND_ASSIGN(TypeName)
+
+// The arraysize(arr) macro returns the # of elements in an array arr.  The
+// expression is a compile-time constant, and therefore can be used in defining
+// new arrays, for example.  If you use arraysize on a pointer by mistake, you
+// will get a compile-time error.  For the technical details, refer to
+// http://blogs.msdn.com/b/the1/archive/2004/05/07/128242.aspx.
+
+// This template function declaration is used in defining arraysize.
+// Note that the function doesn't need an implementation, as we only
+// use its type.
+//
+// DEPRECATED, please use base::size(array) instead.
+// TODO(https://crbug.com/837308): Replace existing arraysize usages.
+template <typename T, size_t N> char (&ArraySizeHelper(T (&array)[N]))[N];
+#define arraysize(array) (sizeof(ArraySizeHelper(array)))
+
+// Used to explicitly mark the return value of a function as unused. If you are
+// really sure you don't want to do anything with the return value of a function
+// that has been marked WARN_UNUSED_RESULT, wrap it with this. Example:
+//
+//   std::unique_ptr<MyType> my_var = ...;
+//   if (TakeOwnership(my_var.get()) == SUCCESS)
+//     ignore_result(my_var.release());
+//
+template<typename T>
+inline void ignore_result(const T&) {
+}
+
+namespace base {
+
+// Use these to declare and define a static local variable (static T;) so that
+// it is leaked so that its destructors are not called at exit.  This is
+// thread-safe.
+//
+// !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! DEPRECATED !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+// Please don't use this macro. Use a function-local static of type
+// base::NoDestructor<T> instead:
+//
+// Factory& Factory::GetInstance() {
+//   static base::NoDestructor<Factory> instance;
+//   return *instance;
+// }
+// !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+#define CR_DEFINE_STATIC_LOCAL(type, name, arguments) \
+  static type& name = *new type arguments
+
+// Workaround for MSVC, which expands __VA_ARGS__ as one macro argument. To
+// work around this bug, wrap the entire expression in this macro...
+#define CR_EXPAND_ARG(arg) arg
+
+}  // base
+
+#endif  // BASE_MACROS_H_
diff --git a/base/md5.cc b/base/md5.cc
new file mode 100644
index 0000000..72c774d
--- /dev/null
+++ b/base/md5.cc
@@ -0,0 +1,299 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// The original file was copied from sqlite, and was in the public domain.
+
+/*
+ * This code implements the MD5 message-digest algorithm.
+ * The algorithm is due to Ron Rivest.  This code was
+ * written by Colin Plumb in 1993, no copyright is claimed.
+ * This code is in the public domain; do with it what you wish.
+ *
+ * Equivalent code is available from RSA Data Security, Inc.
+ * This code has been tested against that, and is equivalent,
+ * except that you don't need to include two pages of legalese
+ * with every copy.
+ *
+ * To compute the message digest of a chunk of bytes, declare an
+ * MD5Context structure, pass it to MD5Init, call MD5Update as
+ * needed on buffers full of bytes, and then call MD5Final, which
+ * will fill a supplied 16-byte array with the digest.
+ */
+
+#include "base/md5.h"
+
+#include <stddef.h>
+
+namespace {
+
+struct Context {
+  uint32_t buf[4];
+  uint32_t bits[2];
+  uint8_t in[64];
+};
+
+/*
+ * Note: this code is harmless on little-endian machines.
+ */
+void byteReverse(uint8_t* buf, unsigned longs) {
+  do {
+    uint32_t temp = static_cast<uint32_t>(
+        static_cast<unsigned>(buf[3]) << 8 |
+        buf[2]) << 16 |
+        (static_cast<unsigned>(buf[1]) << 8 | buf[0]);
+    *reinterpret_cast<uint32_t*>(buf) = temp;
+    buf += 4;
+  } while (--longs);
+}
+
+/* The four core functions - F1 is optimized somewhat */
+
+/* #define F1(x, y, z) (x & y | ~x & z) */
+#define F1(x, y, z) (z ^ (x & (y ^ z)))
+#define F2(x, y, z) F1(z, x, y)
+#define F3(x, y, z) (x ^ y ^ z)
+#define F4(x, y, z) (y ^ (x | ~z))
+
+/* This is the central step in the MD5 algorithm. */
+#define MD5STEP(f, w, x, y, z, data, s) \
+  (w += f(x, y, z) + data, w = w << s | w >> (32 - s), w += x)
+
+/*
+ * The core of the MD5 algorithm, this alters an existing MD5 hash to
+ * reflect the addition of 16 longwords of new data.  MD5Update blocks
+ * the data and converts bytes into longwords for this routine.
+ */
+void MD5Transform(uint32_t buf[4], const uint32_t in[16]) {
+  uint32_t a, b, c, d;
+
+  a = buf[0];
+  b = buf[1];
+  c = buf[2];
+  d = buf[3];
+
+  MD5STEP(F1, a, b, c, d, in[0] + 0xd76aa478, 7);
+  MD5STEP(F1, d, a, b, c, in[1] + 0xe8c7b756, 12);
+  MD5STEP(F1, c, d, a, b, in[2] + 0x242070db, 17);
+  MD5STEP(F1, b, c, d, a, in[3] + 0xc1bdceee, 22);
+  MD5STEP(F1, a, b, c, d, in[4] + 0xf57c0faf, 7);
+  MD5STEP(F1, d, a, b, c, in[5] + 0x4787c62a, 12);
+  MD5STEP(F1, c, d, a, b, in[6] + 0xa8304613, 17);
+  MD5STEP(F1, b, c, d, a, in[7] + 0xfd469501, 22);
+  MD5STEP(F1, a, b, c, d, in[8] + 0x698098d8, 7);
+  MD5STEP(F1, d, a, b, c, in[9] + 0x8b44f7af, 12);
+  MD5STEP(F1, c, d, a, b, in[10] + 0xffff5bb1, 17);
+  MD5STEP(F1, b, c, d, a, in[11] + 0x895cd7be, 22);
+  MD5STEP(F1, a, b, c, d, in[12] + 0x6b901122, 7);
+  MD5STEP(F1, d, a, b, c, in[13] + 0xfd987193, 12);
+  MD5STEP(F1, c, d, a, b, in[14] + 0xa679438e, 17);
+  MD5STEP(F1, b, c, d, a, in[15] + 0x49b40821, 22);
+
+  MD5STEP(F2, a, b, c, d, in[1] + 0xf61e2562, 5);
+  MD5STEP(F2, d, a, b, c, in[6] + 0xc040b340, 9);
+  MD5STEP(F2, c, d, a, b, in[11] + 0x265e5a51, 14);
+  MD5STEP(F2, b, c, d, a, in[0] + 0xe9b6c7aa, 20);
+  MD5STEP(F2, a, b, c, d, in[5] + 0xd62f105d, 5);
+  MD5STEP(F2, d, a, b, c, in[10] + 0x02441453, 9);
+  MD5STEP(F2, c, d, a, b, in[15] + 0xd8a1e681, 14);
+  MD5STEP(F2, b, c, d, a, in[4] + 0xe7d3fbc8, 20);
+  MD5STEP(F2, a, b, c, d, in[9] + 0x21e1cde6, 5);
+  MD5STEP(F2, d, a, b, c, in[14] + 0xc33707d6, 9);
+  MD5STEP(F2, c, d, a, b, in[3] + 0xf4d50d87, 14);
+  MD5STEP(F2, b, c, d, a, in[8] + 0x455a14ed, 20);
+  MD5STEP(F2, a, b, c, d, in[13] + 0xa9e3e905, 5);
+  MD5STEP(F2, d, a, b, c, in[2] + 0xfcefa3f8, 9);
+  MD5STEP(F2, c, d, a, b, in[7] + 0x676f02d9, 14);
+  MD5STEP(F2, b, c, d, a, in[12] + 0x8d2a4c8a, 20);
+
+  MD5STEP(F3, a, b, c, d, in[5] + 0xfffa3942, 4);
+  MD5STEP(F3, d, a, b, c, in[8] + 0x8771f681, 11);
+  MD5STEP(F3, c, d, a, b, in[11] + 0x6d9d6122, 16);
+  MD5STEP(F3, b, c, d, a, in[14] + 0xfde5380c, 23);
+  MD5STEP(F3, a, b, c, d, in[1] + 0xa4beea44, 4);
+  MD5STEP(F3, d, a, b, c, in[4] + 0x4bdecfa9, 11);
+  MD5STEP(F3, c, d, a, b, in[7] + 0xf6bb4b60, 16);
+  MD5STEP(F3, b, c, d, a, in[10] + 0xbebfbc70, 23);
+  MD5STEP(F3, a, b, c, d, in[13] + 0x289b7ec6, 4);
+  MD5STEP(F3, d, a, b, c, in[0] + 0xeaa127fa, 11);
+  MD5STEP(F3, c, d, a, b, in[3] + 0xd4ef3085, 16);
+  MD5STEP(F3, b, c, d, a, in[6] + 0x04881d05, 23);
+  MD5STEP(F3, a, b, c, d, in[9] + 0xd9d4d039, 4);
+  MD5STEP(F3, d, a, b, c, in[12] + 0xe6db99e5, 11);
+  MD5STEP(F3, c, d, a, b, in[15] + 0x1fa27cf8, 16);
+  MD5STEP(F3, b, c, d, a, in[2] + 0xc4ac5665, 23);
+
+  MD5STEP(F4, a, b, c, d, in[0] + 0xf4292244, 6);
+  MD5STEP(F4, d, a, b, c, in[7] + 0x432aff97, 10);
+  MD5STEP(F4, c, d, a, b, in[14] + 0xab9423a7, 15);
+  MD5STEP(F4, b, c, d, a, in[5] + 0xfc93a039, 21);
+  MD5STEP(F4, a, b, c, d, in[12] + 0x655b59c3, 6);
+  MD5STEP(F4, d, a, b, c, in[3] + 0x8f0ccc92, 10);
+  MD5STEP(F4, c, d, a, b, in[10] + 0xffeff47d, 15);
+  MD5STEP(F4, b, c, d, a, in[1] + 0x85845dd1, 21);
+  MD5STEP(F4, a, b, c, d, in[8] + 0x6fa87e4f, 6);
+  MD5STEP(F4, d, a, b, c, in[15] + 0xfe2ce6e0, 10);
+  MD5STEP(F4, c, d, a, b, in[6] + 0xa3014314, 15);
+  MD5STEP(F4, b, c, d, a, in[13] + 0x4e0811a1, 21);
+  MD5STEP(F4, a, b, c, d, in[4] + 0xf7537e82, 6);
+  MD5STEP(F4, d, a, b, c, in[11] + 0xbd3af235, 10);
+  MD5STEP(F4, c, d, a, b, in[2] + 0x2ad7d2bb, 15);
+  MD5STEP(F4, b, c, d, a, in[9] + 0xeb86d391, 21);
+
+  buf[0] += a;
+  buf[1] += b;
+  buf[2] += c;
+  buf[3] += d;
+}
+
+}  // namespace
+
+namespace base {
+
+/*
+ * Start MD5 accumulation.  Set bit count to 0 and buffer to mysterious
+ * initialization constants.
+ */
+void MD5Init(MD5Context* context) {
+  struct Context* ctx = reinterpret_cast<struct Context*>(context);
+  ctx->buf[0] = 0x67452301;
+  ctx->buf[1] = 0xefcdab89;
+  ctx->buf[2] = 0x98badcfe;
+  ctx->buf[3] = 0x10325476;
+  ctx->bits[0] = 0;
+  ctx->bits[1] = 0;
+}
+
+/*
+ * Update context to reflect the concatenation of another buffer full
+ * of bytes.
+ */
+void MD5Update(MD5Context* context, const StringPiece& data) {
+  struct Context* ctx = reinterpret_cast<struct Context*>(context);
+  const uint8_t* buf = reinterpret_cast<const uint8_t*>(data.data());
+  size_t len = data.size();
+
+  /* Update bitcount */
+
+  uint32_t t = ctx->bits[0];
+  if ((ctx->bits[0] = t + (static_cast<uint32_t>(len) << 3)) < t)
+    ctx->bits[1]++; /* Carry from low to high */
+  ctx->bits[1] += static_cast<uint32_t>(len >> 29);
+
+  t = (t >> 3) & 0x3f; /* Bytes already in shsInfo->data */
+
+  /* Handle any leading odd-sized chunks */
+
+  if (t) {
+    uint8_t* p = static_cast<uint8_t*>(ctx->in + t);
+
+    t = 64 - t;
+    if (len < t) {
+      memcpy(p, buf, len);
+      return;
+    }
+    memcpy(p, buf, t);
+    byteReverse(ctx->in, 16);
+    MD5Transform(ctx->buf, reinterpret_cast<uint32_t*>(ctx->in));
+    buf += t;
+    len -= t;
+  }
+
+  /* Process data in 64-byte chunks */
+
+  while (len >= 64) {
+    memcpy(ctx->in, buf, 64);
+    byteReverse(ctx->in, 16);
+    MD5Transform(ctx->buf, reinterpret_cast<uint32_t*>(ctx->in));
+    buf += 64;
+    len -= 64;
+  }
+
+  /* Handle any remaining bytes of data. */
+
+  memcpy(ctx->in, buf, len);
+}
+
+/*
+ * Final wrapup - pad to 64-byte boundary with the bit pattern
+ * 1 0* (64-bit count of bits processed, MSB-first)
+ */
+void MD5Final(MD5Digest* digest, MD5Context* context) {
+  struct Context* ctx = reinterpret_cast<struct Context*>(context);
+  unsigned count;
+  uint8_t* p;
+
+  /* Compute number of bytes mod 64 */
+  count = (ctx->bits[0] >> 3) & 0x3F;
+
+  /* Set the first char of padding to 0x80.  This is safe since there is
+     always at least one byte free */
+  p = ctx->in + count;
+  *p++ = 0x80;
+
+  /* Bytes of padding needed to make 64 bytes */
+  count = 64 - 1 - count;
+
+  /* Pad out to 56 mod 64 */
+  if (count < 8) {
+    /* Two lots of padding:  Pad the first block to 64 bytes */
+    memset(p, 0, count);
+    byteReverse(ctx->in, 16);
+    MD5Transform(ctx->buf, reinterpret_cast<uint32_t*>(ctx->in));
+
+    /* Now fill the next block with 56 bytes */
+    memset(ctx->in, 0, 56);
+  } else {
+    /* Pad block to 56 bytes */
+    memset(p, 0, count - 8);
+  }
+  byteReverse(ctx->in, 14);
+
+  /* Append length in bits and transform */
+  memcpy(&ctx->in[14 * sizeof(ctx->bits[0])], &ctx->bits[0],
+         sizeof(ctx->bits[0]));
+  memcpy(&ctx->in[15 * sizeof(ctx->bits[1])], &ctx->bits[1],
+         sizeof(ctx->bits[1]));
+
+  MD5Transform(ctx->buf, reinterpret_cast<uint32_t*>(ctx->in));
+  byteReverse(reinterpret_cast<uint8_t*>(ctx->buf), 4);
+  memcpy(digest->a, ctx->buf, 16);
+  memset(ctx, 0, sizeof(*ctx)); /* In case it's sensitive */
+}
+
+void MD5IntermediateFinal(MD5Digest* digest, const MD5Context* context) {
+  /* MD5Final mutates the MD5Context*. Make a copy for generating the
+     intermediate value. */
+  MD5Context context_copy;
+  memcpy(&context_copy, context, sizeof(context_copy));
+  MD5Final(digest, &context_copy);
+}
+
+std::string MD5DigestToBase16(const MD5Digest& digest) {
+  static char const zEncode[] = "0123456789abcdef";
+
+  std::string ret;
+  ret.resize(32);
+
+  for (int i = 0, j = 0; i < 16; i++, j += 2) {
+    uint8_t a = digest.a[i];
+    ret[j] = zEncode[(a >> 4) & 0xf];
+    ret[j + 1] = zEncode[a & 0xf];
+  }
+  return ret;
+}
+
+void MD5Sum(const void* data, size_t length, MD5Digest* digest) {
+  MD5Context ctx;
+  MD5Init(&ctx);
+  MD5Update(&ctx, StringPiece(reinterpret_cast<const char*>(data), length));
+  MD5Final(digest, &ctx);
+}
+
+std::string MD5String(const StringPiece& str) {
+  MD5Digest digest;
+  MD5Sum(str.data(), str.length(), &digest);
+  return MD5DigestToBase16(digest);
+}
+
+}  // namespace base
diff --git a/base/md5.h b/base/md5.h
new file mode 100644
index 0000000..ef64178
--- /dev/null
+++ b/base/md5.h
@@ -0,0 +1,78 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_MD5_H_
+#define BASE_MD5_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include "base/base_export.h"
+#include "base/strings/string_piece.h"
+
+namespace base {
+
+// MD5 stands for Message Digest algorithm 5.
+// MD5 is a robust hash function, designed for cyptography, but often used
+// for file checksums.  The code is complex and slow, but has few
+// collisions.
+// See Also:
+//   http://en.wikipedia.org/wiki/MD5
+
+// These functions perform MD5 operations. The simplest call is MD5Sum() to
+// generate the MD5 sum of the given data.
+//
+// You can also compute the MD5 sum of data incrementally by making multiple
+// calls to MD5Update():
+//   MD5Context ctx; // intermediate MD5 data: do not use
+//   MD5Init(&ctx);
+//   MD5Update(&ctx, data1, length1);
+//   MD5Update(&ctx, data2, length2);
+//   ...
+//
+//   MD5Digest digest; // the result of the computation
+//   MD5Final(&digest, &ctx);
+//
+// You can call MD5DigestToBase16() to generate a string of the digest.
+
+// The output of an MD5 operation.
+struct MD5Digest {
+  uint8_t a[16];
+};
+
+// Used for storing intermediate data during an MD5 computation. Callers
+// should not access the data.
+typedef char MD5Context[88];
+
+// Initializes the given MD5 context structure for subsequent calls to
+// MD5Update().
+BASE_EXPORT void MD5Init(MD5Context* context);
+
+// For the given buffer of |data| as a StringPiece, updates the given MD5
+// context with the sum of the data. You can call this any number of times
+// during the computation, except that MD5Init() must have been called first.
+BASE_EXPORT void MD5Update(MD5Context* context, const StringPiece& data);
+
+// Finalizes the MD5 operation and fills the buffer with the digest.
+BASE_EXPORT void MD5Final(MD5Digest* digest, MD5Context* context);
+
+// MD5IntermediateFinal() generates a digest without finalizing the MD5
+// operation.  Can be used to generate digests for the input seen thus far,
+// without affecting the digest generated for the entire input.
+BASE_EXPORT void MD5IntermediateFinal(MD5Digest* digest,
+                                      const MD5Context* context);
+
+// Converts a digest into human-readable hexadecimal.
+BASE_EXPORT std::string MD5DigestToBase16(const MD5Digest& digest);
+
+// Computes the MD5 sum of the given data buffer with the given length.
+// The given 'digest' structure will be filled with the result data.
+BASE_EXPORT void MD5Sum(const void* data, size_t length, MD5Digest* digest);
+
+// Returns the MD5 (in hexadecimal) of a string.
+BASE_EXPORT std::string MD5String(const StringPiece& str);
+
+}  // namespace base
+
+#endif  // BASE_MD5_H_
diff --git a/base/md5_unittest.cc b/base/md5_unittest.cc
new file mode 100644
index 0000000..b27efe9
--- /dev/null
+++ b/base/md5_unittest.cc
@@ -0,0 +1,253 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/md5.h"
+
+#include <string.h>
+
+#include <memory>
+#include <string>
+
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+
+TEST(MD5, DigestToBase16) {
+  MD5Digest digest;
+
+  int data[] = {
+    0xd4, 0x1d, 0x8c, 0xd9,
+    0x8f, 0x00, 0xb2, 0x04,
+    0xe9, 0x80, 0x09, 0x98,
+    0xec, 0xf8, 0x42, 0x7e
+  };
+
+  for (int i = 0; i < 16; ++i)
+    digest.a[i] = data[i] & 0xff;
+
+  std::string actual = MD5DigestToBase16(digest);
+  std::string expected = "d41d8cd98f00b204e9800998ecf8427e";
+
+  EXPECT_EQ(expected, actual);
+}
+
+TEST(MD5, MD5SumEmtpyData) {
+  MD5Digest digest;
+  const char data[] = "";
+
+  MD5Sum(data, strlen(data), &digest);
+
+  int expected[] = {
+    0xd4, 0x1d, 0x8c, 0xd9,
+    0x8f, 0x00, 0xb2, 0x04,
+    0xe9, 0x80, 0x09, 0x98,
+    0xec, 0xf8, 0x42, 0x7e
+  };
+
+  for (int i = 0; i < 16; ++i)
+    EXPECT_EQ(expected[i], digest.a[i] & 0xFF);
+}
+
+TEST(MD5, MD5SumOneByteData) {
+  MD5Digest digest;
+  const char data[] = "a";
+
+  MD5Sum(data, strlen(data), &digest);
+
+  int expected[] = {
+    0x0c, 0xc1, 0x75, 0xb9,
+    0xc0, 0xf1, 0xb6, 0xa8,
+    0x31, 0xc3, 0x99, 0xe2,
+    0x69, 0x77, 0x26, 0x61
+  };
+
+  for (int i = 0; i < 16; ++i)
+    EXPECT_EQ(expected[i], digest.a[i] & 0xFF);
+}
+
+TEST(MD5, MD5SumLongData) {
+  const int length = 10 * 1024 * 1024 + 1;
+  std::unique_ptr<char[]> data(new char[length]);
+
+  for (int i = 0; i < length; ++i)
+    data[i] = i & 0xFF;
+
+  MD5Digest digest;
+  MD5Sum(data.get(), length, &digest);
+
+  int expected[] = {
+    0x90, 0xbd, 0x6a, 0xd9,
+    0x0a, 0xce, 0xf5, 0xad,
+    0xaa, 0x92, 0x20, 0x3e,
+    0x21, 0xc7, 0xa1, 0x3e
+  };
+
+  for (int i = 0; i < 16; ++i)
+    EXPECT_EQ(expected[i], digest.a[i] & 0xFF);
+}
+
+TEST(MD5, ContextWithEmptyData) {
+  MD5Context ctx;
+  MD5Init(&ctx);
+
+  MD5Digest digest;
+  MD5Final(&digest, &ctx);
+
+  int expected[] = {
+    0xd4, 0x1d, 0x8c, 0xd9,
+    0x8f, 0x00, 0xb2, 0x04,
+    0xe9, 0x80, 0x09, 0x98,
+    0xec, 0xf8, 0x42, 0x7e
+  };
+
+  for (int i = 0; i < 16; ++i)
+    EXPECT_EQ(expected[i], digest.a[i] & 0xFF);
+}
+
+TEST(MD5, ContextWithLongData) {
+  MD5Context ctx;
+  MD5Init(&ctx);
+
+  const int length = 10 * 1024 * 1024 + 1;
+  std::unique_ptr<char[]> data(new char[length]);
+
+  for (int i = 0; i < length; ++i)
+    data[i] = i & 0xFF;
+
+  int total = 0;
+  while (total < length) {
+    int len = 4097;  // intentionally not 2^k.
+    if (len > length - total)
+      len = length - total;
+
+    MD5Update(&ctx,
+              StringPiece(reinterpret_cast<char*>(data.get() + total), len));
+    total += len;
+  }
+
+  EXPECT_EQ(length, total);
+
+  MD5Digest digest;
+  MD5Final(&digest, &ctx);
+
+  int expected[] = {
+    0x90, 0xbd, 0x6a, 0xd9,
+    0x0a, 0xce, 0xf5, 0xad,
+    0xaa, 0x92, 0x20, 0x3e,
+    0x21, 0xc7, 0xa1, 0x3e
+  };
+
+  for (int i = 0; i < 16; ++i)
+    EXPECT_EQ(expected[i], digest.a[i] & 0xFF);
+}
+
+// Example data from http://www.ietf.org/rfc/rfc1321.txt A.5 Test Suite
+TEST(MD5, MD5StringTestSuite1) {
+  std::string actual = MD5String("");
+  std::string expected = "d41d8cd98f00b204e9800998ecf8427e";
+  EXPECT_EQ(expected, actual);
+}
+
+TEST(MD5, MD5StringTestSuite2) {
+  std::string actual = MD5String("a");
+  std::string expected = "0cc175b9c0f1b6a831c399e269772661";
+  EXPECT_EQ(expected, actual);
+}
+
+TEST(MD5, MD5StringTestSuite3) {
+  std::string actual = MD5String("abc");
+  std::string expected = "900150983cd24fb0d6963f7d28e17f72";
+  EXPECT_EQ(expected, actual);
+}
+
+TEST(MD5, MD5StringTestSuite4) {
+  std::string actual = MD5String("message digest");
+  std::string expected = "f96b697d7cb7938d525a2f31aaf161d0";
+  EXPECT_EQ(expected, actual);
+}
+
+TEST(MD5, MD5StringTestSuite5) {
+  std::string actual = MD5String("abcdefghijklmnopqrstuvwxyz");
+  std::string expected = "c3fcd3d76192e4007dfb496cca67e13b";
+  EXPECT_EQ(expected, actual);
+}
+
+TEST(MD5, MD5StringTestSuite6) {
+  std::string actual = MD5String("ABCDEFGHIJKLMNOPQRSTUVWXYZ"
+                                 "abcdefghijklmnopqrstuvwxyz"
+                                 "0123456789");
+  std::string expected = "d174ab98d277d9f5a5611c2c9f419d9f";
+  EXPECT_EQ(expected, actual);
+}
+
+TEST(MD5, MD5StringTestSuite7) {
+  std::string actual = MD5String("12345678901234567890"
+                                 "12345678901234567890"
+                                 "12345678901234567890"
+                                 "12345678901234567890");
+  std::string expected = "57edf4a22be3c955ac49da2e2107b67a";
+  EXPECT_EQ(expected, actual);
+}
+
+TEST(MD5, ContextWithStringData) {
+  MD5Context ctx;
+  MD5Init(&ctx);
+
+  MD5Update(&ctx, "abc");
+
+  MD5Digest digest;
+  MD5Final(&digest, &ctx);
+
+  std::string actual = MD5DigestToBase16(digest);
+  std::string expected = "900150983cd24fb0d6963f7d28e17f72";
+
+  EXPECT_EQ(expected, actual);
+}
+
+// Test that a digest generated by MD5IntermediateFinal() gives the same results
+// as an independently-calculated digest, and also does not modify the context.
+TEST(MD5, IntermediateFinal) {
+  // Independent context over the header.
+  MD5Context check_header_context;
+  MD5Init(&check_header_context);
+
+  // Independent context over entire input.
+  MD5Context check_full_context;
+  MD5Init(&check_full_context);
+
+  // Context intermediate digest will be calculated from.
+  MD5Context context;
+  MD5Init(&context);
+
+  static const char kHeader[] = "header data";
+  static const char kBody[] = "payload data";
+
+  MD5Update(&context, kHeader);
+  MD5Update(&check_header_context, kHeader);
+  MD5Update(&check_full_context, kHeader);
+
+  MD5Digest check_header_digest;
+  MD5Final(&check_header_digest, &check_header_context);
+
+  MD5Digest header_digest;
+  MD5IntermediateFinal(&header_digest, &context);
+
+  MD5Update(&context, kBody);
+  MD5Update(&check_full_context, kBody);
+
+  MD5Digest check_full_digest;
+  MD5Final(&check_full_digest, &check_full_context);
+
+  MD5Digest digest;
+  MD5Final(&digest, &context);
+
+  // The header and full digest pairs are the same, and they aren't the same as
+  // each other.
+  EXPECT_TRUE(!memcmp(&header_digest, &check_header_digest,
+                      sizeof(header_digest)));
+  EXPECT_TRUE(!memcmp(&digest, &check_full_digest, sizeof(digest)));
+  EXPECT_TRUE(memcmp(&digest, &header_digest, sizeof(digest)));
+}
+
+}  // namespace base
diff --git a/base/memory/OWNERS b/base/memory/OWNERS
new file mode 100644
index 0000000..9b7cbb1
--- /dev/null
+++ b/base/memory/OWNERS
@@ -0,0 +1,4 @@
+per-file *chromeos*=skuhne@chromium.org
+per-file *chromeos*=oshima@chromium.org
+per-file *shared_memory*=set noparent
+per-file *shared_memory*=file://ipc/SECURITY_OWNERS
diff --git a/base/memory/aligned_memory.cc b/base/memory/aligned_memory.cc
new file mode 100644
index 0000000..93cbeb5
--- /dev/null
+++ b/base/memory/aligned_memory.cc
@@ -0,0 +1,47 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/memory/aligned_memory.h"
+
+#include "base/logging.h"
+#include "build/build_config.h"
+
+#if defined(OS_ANDROID)
+#include <malloc.h>
+#endif
+
+namespace base {
+
+void* AlignedAlloc(size_t size, size_t alignment) {
+  DCHECK_GT(size, 0U);
+  DCHECK_EQ(alignment & (alignment - 1), 0U);
+  DCHECK_EQ(alignment % sizeof(void*), 0U);
+  void* ptr = nullptr;
+#if defined(COMPILER_MSVC)
+  ptr = _aligned_malloc(size, alignment);
+// Android technically supports posix_memalign(), but does not expose it in
+// the current version of the library headers used by Chrome.  Luckily,
+// memalign() on Android returns pointers which can safely be used with
+// free(), so we can use it instead.  Issue filed to document this:
+// http://code.google.com/p/android/issues/detail?id=35391
+#elif defined(OS_ANDROID)
+  ptr = memalign(alignment, size);
+#else
+  if (posix_memalign(&ptr, alignment, size))
+    ptr = nullptr;
+#endif
+  // Since aligned allocations may fail for non-memory related reasons, force a
+  // crash if we encounter a failed allocation; maintaining consistent behavior
+  // with a normal allocation failure in Chrome.
+  if (!ptr) {
+    DLOG(ERROR) << "If you crashed here, your aligned allocation is incorrect: "
+                << "size=" << size << ", alignment=" << alignment;
+    CHECK(false);
+  }
+  // Sanity check alignment just to be safe.
+  DCHECK_EQ(reinterpret_cast<uintptr_t>(ptr) & (alignment - 1), 0U);
+  return ptr;
+}
+
+}  // namespace base
diff --git a/base/memory/aligned_memory.h b/base/memory/aligned_memory.h
new file mode 100644
index 0000000..89f9505
--- /dev/null
+++ b/base/memory/aligned_memory.h
@@ -0,0 +1,58 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_MEMORY_ALIGNED_MEMORY_H_
+#define BASE_MEMORY_ALIGNED_MEMORY_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <type_traits>
+
+#include "base/base_export.h"
+#include "base/compiler_specific.h"
+#include "build/build_config.h"
+
+#if defined(COMPILER_MSVC)
+#include <malloc.h>
+#else
+#include <stdlib.h>
+#endif
+
+// A runtime sized aligned allocation can be created:
+//
+//   float* my_array = static_cast<float*>(AlignedAlloc(size, alignment));
+//
+//   // ... later, to release the memory:
+//   AlignedFree(my_array);
+//
+// Or using unique_ptr:
+//
+//   std::unique_ptr<float, AlignedFreeDeleter> my_array(
+//       static_cast<float*>(AlignedAlloc(size, alignment)));
+
+namespace base {
+
+// This can be replaced with std::aligned_malloc when we have C++17.
+BASE_EXPORT void* AlignedAlloc(size_t size, size_t alignment);
+
+inline void AlignedFree(void* ptr) {
+#if defined(COMPILER_MSVC)
+  _aligned_free(ptr);
+#else
+  free(ptr);
+#endif
+}
+
+// Deleter for use with unique_ptr. E.g., use as
+//   std::unique_ptr<Foo, base::AlignedFreeDeleter> foo;
+struct AlignedFreeDeleter {
+  inline void operator()(void* ptr) const {
+    AlignedFree(ptr);
+  }
+};
+
+}  // namespace base
+
+#endif  // BASE_MEMORY_ALIGNED_MEMORY_H_
diff --git a/base/memory/aligned_memory_unittest.cc b/base/memory/aligned_memory_unittest.cc
new file mode 100644
index 0000000..e354f38
--- /dev/null
+++ b/base/memory/aligned_memory_unittest.cc
@@ -0,0 +1,46 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/memory/aligned_memory.h"
+
+#include <memory>
+
+#include "build/build_config.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+#define EXPECT_ALIGNED(ptr, align) \
+    EXPECT_EQ(0u, reinterpret_cast<uintptr_t>(ptr) & (align - 1))
+
+namespace base {
+
+TEST(AlignedMemoryTest, DynamicAllocation) {
+  void* p = AlignedAlloc(8, 8);
+  EXPECT_TRUE(p);
+  EXPECT_ALIGNED(p, 8);
+  AlignedFree(p);
+
+  p = AlignedAlloc(8, 16);
+  EXPECT_TRUE(p);
+  EXPECT_ALIGNED(p, 16);
+  AlignedFree(p);
+
+  p = AlignedAlloc(8, 256);
+  EXPECT_TRUE(p);
+  EXPECT_ALIGNED(p, 256);
+  AlignedFree(p);
+
+  p = AlignedAlloc(8, 4096);
+  EXPECT_TRUE(p);
+  EXPECT_ALIGNED(p, 4096);
+  AlignedFree(p);
+}
+
+TEST(AlignedMemoryTest, ScopedDynamicAllocation) {
+  std::unique_ptr<float, AlignedFreeDeleter> p(
+      static_cast<float*>(AlignedAlloc(8, 8)));
+  EXPECT_TRUE(p.get());
+  EXPECT_ALIGNED(p.get(), 8);
+}
+
+}  // namespace base
diff --git a/base/memory/discardable_memory.cc b/base/memory/discardable_memory.cc
new file mode 100644
index 0000000..f0730aa
--- /dev/null
+++ b/base/memory/discardable_memory.cc
@@ -0,0 +1,13 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/memory/discardable_memory.h"
+
+namespace base {
+
+DiscardableMemory::DiscardableMemory() = default;
+
+DiscardableMemory::~DiscardableMemory() = default;
+
+}  // namespace base
diff --git a/base/memory/discardable_memory.h b/base/memory/discardable_memory.h
new file mode 100644
index 0000000..5c632d1
--- /dev/null
+++ b/base/memory/discardable_memory.h
@@ -0,0 +1,78 @@
+// Copyright (c) 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_MEMORY_DISCARDABLE_MEMORY_H_
+#define BASE_MEMORY_DISCARDABLE_MEMORY_H_
+
+#include "base/base_export.h"
+#include "base/compiler_specific.h"
+
+namespace base {
+
+namespace trace_event {
+class MemoryAllocatorDump;
+class ProcessMemoryDump;
+}
+
+// Discardable memory is used to cache large objects without worrying about
+// blowing out memory, both on mobile devices where there is no swap, and
+// desktop devices where unused free memory should be used to help the user
+// experience. This is preferable to releasing memory in response to an OOM
+// signal because it is simpler and provides system-wide management of
+// purgable memory, though it has less flexibility as to which objects get
+// discarded.
+//
+// Discardable memory has two states: locked and unlocked. While the memory is
+// locked, it will not be discarded. Unlocking the memory allows the
+// discardable memory system and the OS to reclaim it if needed. Locks do not
+// nest.
+//
+// Notes:
+//   - The paging behavior of memory while it is locked is not specified. While
+//     mobile platforms will not swap it out, it may qualify for swapping
+//     on desktop platforms. It is not expected that this will matter, as the
+//     preferred pattern of usage for DiscardableMemory is to lock down the
+//     memory, use it as quickly as possible, and then unlock it.
+//   - Because of memory alignment, the amount of memory allocated can be
+//     larger than the requested memory size. It is not very efficient for
+//     small allocations.
+//   - A discardable memory instance is not thread safe. It is the
+//     responsibility of users of discardable memory to ensure there are no
+//     races.
+//
+class BASE_EXPORT DiscardableMemory {
+ public:
+  DiscardableMemory();
+  virtual ~DiscardableMemory();
+
+  // Locks the memory so that it will not be purged by the system. Returns
+  // true on success. If the return value is false then this object should be
+  // discarded and a new one should be created.
+  virtual bool Lock() WARN_UNUSED_RESULT = 0;
+
+  // Unlocks the memory so that it can be purged by the system. Must be called
+  // after every successful lock call.
+  virtual void Unlock() = 0;
+
+  // Returns the memory address held by this object. The object must be locked
+  // before calling this.
+  virtual void* data() const = 0;
+
+  // Handy method to simplify calling data() with a reinterpret_cast.
+  template<typename T> T* data_as() const {
+    return reinterpret_cast<T*>(data());
+  }
+
+  // Used for dumping the statistics of discardable memory allocated in tracing.
+  // Returns a new MemoryAllocatorDump in the |pmd| with the size of the
+  // discardable memory. The MemoryAllocatorDump created is owned by |pmd|. See
+  // ProcessMemoryDump::CreateAllocatorDump.
+  virtual trace_event::MemoryAllocatorDump* CreateMemoryAllocatorDump(
+      const char* name,
+      trace_event::ProcessMemoryDump* pmd) const = 0;
+};
+
+}  // namespace base
+
+#endif  // BASE_MEMORY_DISCARDABLE_MEMORY_H_
diff --git a/base/memory/discardable_memory_allocator.cc b/base/memory/discardable_memory_allocator.cc
new file mode 100644
index 0000000..3dbb276
--- /dev/null
+++ b/base/memory/discardable_memory_allocator.cc
@@ -0,0 +1,29 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/memory/discardable_memory_allocator.h"
+
+#include "base/logging.h"
+
+namespace base {
+namespace {
+
+DiscardableMemoryAllocator* g_discardable_allocator = nullptr;
+
+}  // namespace
+
+// static
+void DiscardableMemoryAllocator::SetInstance(
+    DiscardableMemoryAllocator* allocator) {
+  DCHECK(!allocator || !g_discardable_allocator);
+  g_discardable_allocator = allocator;
+}
+
+// static
+DiscardableMemoryAllocator* DiscardableMemoryAllocator::GetInstance() {
+  DCHECK(g_discardable_allocator);
+  return g_discardable_allocator;
+}
+
+}  // namespace base
diff --git a/base/memory/discardable_memory_allocator.h b/base/memory/discardable_memory_allocator.h
new file mode 100644
index 0000000..8d74b16
--- /dev/null
+++ b/base/memory/discardable_memory_allocator.h
@@ -0,0 +1,38 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_MEMORY_DISCARDABLE_MEMORY_ALLOCATOR_H_
+#define BASE_MEMORY_DISCARDABLE_MEMORY_ALLOCATOR_H_
+
+#include <stddef.h>
+
+#include <memory>
+
+#include "base/base_export.h"
+
+namespace base {
+class DiscardableMemory;
+
+class BASE_EXPORT DiscardableMemoryAllocator {
+ public:
+  // Returns the allocator instance.
+  static DiscardableMemoryAllocator* GetInstance();
+
+  // Sets the allocator instance. Can only be called once, e.g. on startup.
+  // Ownership of |instance| remains with the caller.
+  static void SetInstance(DiscardableMemoryAllocator* allocator);
+
+  // Giant WARNING: Discardable[Shared]Memory is only implemented on Android. On
+  // non-Android platforms, it behaves exactly the same as SharedMemory.
+  // See LockPages() in discardable_shared_memory.cc.
+  virtual std::unique_ptr<DiscardableMemory> AllocateLockedDiscardableMemory(
+      size_t size) = 0;
+
+ protected:
+  virtual ~DiscardableMemoryAllocator() = default;
+};
+
+}  // namespace base
+
+#endif  // BASE_MEMORY_DISCARDABLE_MEMORY_ALLOCATOR_H_
diff --git a/base/memory/discardable_shared_memory.cc b/base/memory/discardable_shared_memory.cc
new file mode 100644
index 0000000..3b6b4db
--- /dev/null
+++ b/base/memory/discardable_shared_memory.cc
@@ -0,0 +1,514 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/memory/discardable_shared_memory.h"
+
+#include <stdint.h>
+
+#include <algorithm>
+
+#include "base/atomicops.h"
+#include "base/bits.h"
+#include "base/logging.h"
+#include "base/memory/shared_memory_tracker.h"
+#include "base/numerics/safe_math.h"
+#include "base/process/process_metrics.h"
+#include "base/trace_event/memory_allocator_dump.h"
+#include "base/trace_event/process_memory_dump.h"
+#include "build/build_config.h"
+
+#if defined(OS_POSIX) && !defined(OS_NACL)
+// For madvise() which is available on all POSIX compatible systems.
+#include <sys/mman.h>
+#endif
+
+#if defined(OS_ANDROID)
+#include "third_party/ashmem/ashmem.h"
+#endif
+
+#if defined(OS_WIN)
+#include <windows.h>
+#include "base/win/windows_version.h"
+#endif
+
+namespace base {
+namespace {
+
+// Use a machine-sized pointer as atomic type. It will use the Atomic32 or
+// Atomic64 routines, depending on the architecture.
+typedef intptr_t AtomicType;
+typedef uintptr_t UAtomicType;
+
+// Template specialization for timestamp serialization/deserialization. This
+// is used to serialize timestamps using Unix time on systems where AtomicType
+// does not have enough precision to contain a timestamp in the standard
+// serialized format.
+template <int>
+Time TimeFromWireFormat(int64_t value);
+template <int>
+int64_t TimeToWireFormat(Time time);
+
+// Serialize to Unix time when using 4-byte wire format.
+// Note: 19 January 2038, this will cease to work.
+template <>
+Time ALLOW_UNUSED_TYPE TimeFromWireFormat<4>(int64_t value) {
+  return value ? Time::UnixEpoch() + TimeDelta::FromSeconds(value) : Time();
+}
+template <>
+int64_t ALLOW_UNUSED_TYPE TimeToWireFormat<4>(Time time) {
+  return time > Time::UnixEpoch() ? (time - Time::UnixEpoch()).InSeconds() : 0;
+}
+
+// Standard serialization format when using 8-byte wire format.
+template <>
+Time ALLOW_UNUSED_TYPE TimeFromWireFormat<8>(int64_t value) {
+  return Time::FromInternalValue(value);
+}
+template <>
+int64_t ALLOW_UNUSED_TYPE TimeToWireFormat<8>(Time time) {
+  return time.ToInternalValue();
+}
+
+struct SharedState {
+  enum LockState { UNLOCKED = 0, LOCKED = 1 };
+
+  explicit SharedState(AtomicType ivalue) { value.i = ivalue; }
+  SharedState(LockState lock_state, Time timestamp) {
+    int64_t wire_timestamp = TimeToWireFormat<sizeof(AtomicType)>(timestamp);
+    DCHECK_GE(wire_timestamp, 0);
+    DCHECK_EQ(lock_state & ~1, 0);
+    value.u = (static_cast<UAtomicType>(wire_timestamp) << 1) | lock_state;
+  }
+
+  LockState GetLockState() const { return static_cast<LockState>(value.u & 1); }
+
+  Time GetTimestamp() const {
+    return TimeFromWireFormat<sizeof(AtomicType)>(value.u >> 1);
+  }
+
+  // Bit 1: Lock state. Bit is set when locked.
+  // Bit 2..sizeof(AtomicType)*8: Usage timestamp. NULL time when locked or
+  // purged.
+  union {
+    AtomicType i;
+    UAtomicType u;
+  } value;
+};
+
+// Shared state is stored at offset 0 in shared memory segments.
+SharedState* SharedStateFromSharedMemory(
+    const WritableSharedMemoryMapping& shared_memory) {
+  DCHECK(shared_memory.IsValid());
+  return static_cast<SharedState*>(shared_memory.memory());
+}
+
+// Round up |size| to a multiple of page size.
+size_t AlignToPageSize(size_t size) {
+  return bits::Align(size, base::GetPageSize());
+}
+
+}  // namespace
+
+DiscardableSharedMemory::DiscardableSharedMemory()
+    : mapped_size_(0), locked_page_count_(0) {
+}
+
+DiscardableSharedMemory::DiscardableSharedMemory(
+    UnsafeSharedMemoryRegion shared_memory_region)
+    : shared_memory_region_(std::move(shared_memory_region)),
+      mapped_size_(0),
+      locked_page_count_(0) {}
+
+DiscardableSharedMemory::~DiscardableSharedMemory() = default;
+
+bool DiscardableSharedMemory::CreateAndMap(size_t size) {
+  CheckedNumeric<size_t> checked_size = size;
+  checked_size += AlignToPageSize(sizeof(SharedState));
+  if (!checked_size.IsValid())
+    return false;
+
+  shared_memory_region_ =
+      UnsafeSharedMemoryRegion::Create(checked_size.ValueOrDie());
+
+  if (!shared_memory_region_.IsValid())
+    return false;
+
+  shared_memory_mapping_ = shared_memory_region_.Map();
+  if (!shared_memory_mapping_.IsValid())
+    return false;
+
+  mapped_size_ = shared_memory_mapping_.mapped_size() -
+                 AlignToPageSize(sizeof(SharedState));
+
+  locked_page_count_ = AlignToPageSize(mapped_size_) / base::GetPageSize();
+#if DCHECK_IS_ON()
+  for (size_t page = 0; page < locked_page_count_; ++page)
+    locked_pages_.insert(page);
+#endif
+
+  DCHECK(last_known_usage_.is_null());
+  SharedState new_state(SharedState::LOCKED, Time());
+  subtle::Release_Store(
+      &SharedStateFromSharedMemory(shared_memory_mapping_)->value.i,
+      new_state.value.i);
+  return true;
+}
+
+bool DiscardableSharedMemory::Map(size_t size) {
+  DCHECK(!shared_memory_mapping_.IsValid());
+  if (shared_memory_mapping_.IsValid())
+    return false;
+
+  shared_memory_mapping_ = shared_memory_region_.MapAt(
+      0, AlignToPageSize(sizeof(SharedState)) + size);
+  if (!shared_memory_mapping_.IsValid())
+    return false;
+
+  mapped_size_ = shared_memory_mapping_.mapped_size() -
+                 AlignToPageSize(sizeof(SharedState));
+
+  locked_page_count_ = AlignToPageSize(mapped_size_) / base::GetPageSize();
+#if DCHECK_IS_ON()
+  for (size_t page = 0; page < locked_page_count_; ++page)
+    locked_pages_.insert(page);
+#endif
+
+  return true;
+}
+
+bool DiscardableSharedMemory::Unmap() {
+  if (!shared_memory_mapping_.IsValid())
+    return false;
+
+  shared_memory_mapping_ = WritableSharedMemoryMapping();
+  locked_page_count_ = 0;
+#if DCHECK_IS_ON()
+  locked_pages_.clear();
+#endif
+  mapped_size_ = 0;
+  return true;
+}
+
+DiscardableSharedMemory::LockResult DiscardableSharedMemory::Lock(
+    size_t offset, size_t length) {
+  DCHECK_EQ(AlignToPageSize(offset), offset);
+  DCHECK_EQ(AlignToPageSize(length), length);
+
+  // Calls to this function must be synchronized properly.
+  DFAKE_SCOPED_LOCK(thread_collision_warner_);
+
+  DCHECK(shared_memory_mapping_.IsValid());
+
+  // We need to successfully acquire the platform independent lock before
+  // individual pages can be locked.
+  if (!locked_page_count_) {
+    // Return false when instance has been purged or not initialized properly
+    // by checking if |last_known_usage_| is NULL.
+    if (last_known_usage_.is_null())
+      return FAILED;
+
+    SharedState old_state(SharedState::UNLOCKED, last_known_usage_);
+    SharedState new_state(SharedState::LOCKED, Time());
+    SharedState result(subtle::Acquire_CompareAndSwap(
+        &SharedStateFromSharedMemory(shared_memory_mapping_)->value.i,
+        old_state.value.i, new_state.value.i));
+    if (result.value.u != old_state.value.u) {
+      // Update |last_known_usage_| in case the above CAS failed because of
+      // an incorrect timestamp.
+      last_known_usage_ = result.GetTimestamp();
+      return FAILED;
+    }
+  }
+
+  // Zero for length means "everything onward".
+  if (!length)
+    length = AlignToPageSize(mapped_size_) - offset;
+
+  size_t start = offset / base::GetPageSize();
+  size_t end = start + length / base::GetPageSize();
+  DCHECK_LE(start, end);
+  DCHECK_LE(end, AlignToPageSize(mapped_size_) / base::GetPageSize());
+
+  // Add pages to |locked_page_count_|.
+  // Note: Locking a page that is already locked is an error.
+  locked_page_count_ += end - start;
+#if DCHECK_IS_ON()
+  // Detect incorrect usage by keeping track of exactly what pages are locked.
+  for (auto page = start; page < end; ++page) {
+    auto result = locked_pages_.insert(page);
+    DCHECK(result.second);
+  }
+  DCHECK_EQ(locked_pages_.size(), locked_page_count_);
+#endif
+
+  // Always behave as if memory was purged when trying to lock a 0 byte segment.
+  if (!length)
+      return PURGED;
+
+#if defined(OS_ANDROID)
+  // Ensure that the platform won't discard the required pages.
+  return LockPages(shared_memory_region_,
+                   AlignToPageSize(sizeof(SharedState)) + offset, length);
+#elif defined(OS_MACOSX)
+  // On macOS, there is no mechanism to lock pages. However, we do need to call
+  // madvise(MADV_FREE_REUSE) in order to correctly update accounting for memory
+  // footprint via task_info().
+  //
+  // Note that calling madvise(MADV_FREE_REUSE) on regions that haven't had
+  // madvise(MADV_FREE_REUSABLE) called on them has no effect.
+  //
+  // Note that the corresponding call to MADV_FREE_REUSABLE is in Purge(), since
+  // that's where the memory is actually released, rather than Unlock(), which
+  // is a no-op on macOS.
+  //
+  // For more information, see
+  // https://bugs.chromium.org/p/chromium/issues/detail?id=823915.
+  if (madvise(reinterpret_cast<char*>(shared_memory_mapping_.memory()) +
+                  AlignToPageSize(sizeof(SharedState)),
+              AlignToPageSize(mapped_size_), MADV_FREE_REUSE))
+    ;
+  return DiscardableSharedMemory::SUCCESS;
+#else
+  return DiscardableSharedMemory::SUCCESS;
+#endif
+}
+
+void DiscardableSharedMemory::Unlock(size_t offset, size_t length) {
+  DCHECK_EQ(AlignToPageSize(offset), offset);
+  DCHECK_EQ(AlignToPageSize(length), length);
+
+  // Calls to this function must be synchronized properly.
+  DFAKE_SCOPED_LOCK(thread_collision_warner_);
+
+  // Passing zero for |length| means "everything onward". Note that |length| may
+  // still be zero after this calculation, e.g. if |mapped_size_| is zero.
+  if (!length)
+    length = AlignToPageSize(mapped_size_) - offset;
+
+  DCHECK(shared_memory_mapping_.IsValid());
+
+  // Allow the pages to be discarded by the platform, if supported.
+  UnlockPages(shared_memory_region_,
+              AlignToPageSize(sizeof(SharedState)) + offset, length);
+
+  size_t start = offset / base::GetPageSize();
+  size_t end = start + length / base::GetPageSize();
+  DCHECK_LE(start, end);
+  DCHECK_LE(end, AlignToPageSize(mapped_size_) / base::GetPageSize());
+
+  // Remove pages from |locked_page_count_|.
+  // Note: Unlocking a page that is not locked is an error.
+  DCHECK_GE(locked_page_count_, end - start);
+  locked_page_count_ -= end - start;
+#if DCHECK_IS_ON()
+  // Detect incorrect usage by keeping track of exactly what pages are locked.
+  for (auto page = start; page < end; ++page) {
+    auto erased_count = locked_pages_.erase(page);
+    DCHECK_EQ(1u, erased_count);
+  }
+  DCHECK_EQ(locked_pages_.size(), locked_page_count_);
+#endif
+
+  // Early out and avoid releasing the platform independent lock if some pages
+  // are still locked.
+  if (locked_page_count_)
+    return;
+
+  Time current_time = Now();
+  DCHECK(!current_time.is_null());
+
+  SharedState old_state(SharedState::LOCKED, Time());
+  SharedState new_state(SharedState::UNLOCKED, current_time);
+  // Note: timestamp cannot be NULL as that is a unique value used when
+  // locked or purged.
+  DCHECK(!new_state.GetTimestamp().is_null());
+  // Timestamp precision should at least be accurate to the second.
+  DCHECK_EQ((new_state.GetTimestamp() - Time::UnixEpoch()).InSeconds(),
+            (current_time - Time::UnixEpoch()).InSeconds());
+  SharedState result(subtle::Release_CompareAndSwap(
+      &SharedStateFromSharedMemory(shared_memory_mapping_)->value.i,
+      old_state.value.i, new_state.value.i));
+
+  DCHECK_EQ(old_state.value.u, result.value.u);
+
+  last_known_usage_ = current_time;
+}
+
+void* DiscardableSharedMemory::memory() const {
+  return reinterpret_cast<uint8_t*>(shared_memory_mapping_.memory()) +
+         AlignToPageSize(sizeof(SharedState));
+}
+
+bool DiscardableSharedMemory::Purge(Time current_time) {
+  // Calls to this function must be synchronized properly.
+  DFAKE_SCOPED_LOCK(thread_collision_warner_);
+  DCHECK(shared_memory_mapping_.IsValid());
+
+  SharedState old_state(SharedState::UNLOCKED, last_known_usage_);
+  SharedState new_state(SharedState::UNLOCKED, Time());
+  SharedState result(subtle::Acquire_CompareAndSwap(
+      &SharedStateFromSharedMemory(shared_memory_mapping_)->value.i,
+      old_state.value.i, new_state.value.i));
+
+  // Update |last_known_usage_| to |current_time| if the memory is locked. This
+  // allows the caller to determine if purging failed because last known usage
+  // was incorrect or memory was locked. In the second case, the caller should
+  // most likely wait for some amount of time before attempting to purge the
+  // the memory again.
+  if (result.value.u != old_state.value.u) {
+    last_known_usage_ = result.GetLockState() == SharedState::LOCKED
+                            ? current_time
+                            : result.GetTimestamp();
+    return false;
+  }
+
+// The next section will release as much resource as can be done
+// from the purging process, until the client process notices the
+// purge and releases its own references.
+// Note: this memory will not be accessed again.  The segment will be
+// freed asynchronously at a later time, so just do the best
+// immediately.
+#if defined(OS_POSIX) && !defined(OS_NACL)
+// Linux and Android provide MADV_REMOVE which is preferred as it has a
+// behavior that can be verified in tests. Other POSIX flavors (MacOSX, BSDs),
+// provide MADV_FREE which has the same result but memory is purged lazily.
+#if defined(OS_LINUX) || defined(OS_ANDROID)
+#define MADV_PURGE_ARGUMENT MADV_REMOVE
+#elif defined(OS_MACOSX)
+// MADV_FREE_REUSABLE is similar to MADV_FREE, but also marks the pages with the
+// reusable bit, which allows both Activity Monitor and memory-infra to
+// correctly track the pages.
+#define MADV_PURGE_ARGUMENT MADV_FREE_REUSABLE
+#else
+#define MADV_PURGE_ARGUMENT MADV_FREE
+#endif
+  // Advise the kernel to remove resources associated with purged pages.
+  // Subsequent accesses of memory pages will succeed, but might result in
+  // zero-fill-on-demand pages.
+  if (madvise(reinterpret_cast<char*>(shared_memory_mapping_.memory()) +
+                  AlignToPageSize(sizeof(SharedState)),
+              AlignToPageSize(mapped_size_), MADV_PURGE_ARGUMENT)) {
+    DPLOG(ERROR) << "madvise() failed";
+  }
+#elif defined(OS_WIN)
+  if (base::win::GetVersion() >= base::win::VERSION_WIN8_1) {
+    // Discard the purged pages, which releases the physical storage (resident
+    // memory, compressed or swapped), but leaves them reserved & committed.
+    // This does not free commit for use by other applications, but allows the
+    // system to avoid compressing/swapping these pages to free physical memory.
+    static const auto discard_virtual_memory =
+        reinterpret_cast<decltype(&::DiscardVirtualMemory)>(GetProcAddress(
+            GetModuleHandle(L"kernel32.dll"), "DiscardVirtualMemory"));
+    if (discard_virtual_memory) {
+      DWORD discard_result = discard_virtual_memory(
+          reinterpret_cast<char*>(shared_memory_mapping_.memory()) +
+              AlignToPageSize(sizeof(SharedState)),
+          AlignToPageSize(mapped_size_));
+      if (discard_result != ERROR_SUCCESS) {
+        DLOG(DCHECK) << "DiscardVirtualMemory() failed in Purge(): "
+                     << logging::SystemErrorCodeToString(discard_result);
+      }
+    }
+  }
+#endif
+
+  last_known_usage_ = Time();
+  return true;
+}
+
+bool DiscardableSharedMemory::IsMemoryResident() const {
+  DCHECK(shared_memory_mapping_.IsValid());
+
+  SharedState result(subtle::NoBarrier_Load(
+      &SharedStateFromSharedMemory(shared_memory_mapping_)->value.i));
+
+  return result.GetLockState() == SharedState::LOCKED ||
+         !result.GetTimestamp().is_null();
+}
+
+bool DiscardableSharedMemory::IsMemoryLocked() const {
+  DCHECK(shared_memory_mapping_.IsValid());
+
+  SharedState result(subtle::NoBarrier_Load(
+      &SharedStateFromSharedMemory(shared_memory_mapping_)->value.i));
+
+  return result.GetLockState() == SharedState::LOCKED;
+}
+
+void DiscardableSharedMemory::Close() {
+  shared_memory_region_ = UnsafeSharedMemoryRegion();
+}
+
+void DiscardableSharedMemory::CreateSharedMemoryOwnershipEdge(
+    trace_event::MemoryAllocatorDump* local_segment_dump,
+    trace_event::ProcessMemoryDump* pmd,
+    bool is_owned) const {
+  auto* shared_memory_dump = SharedMemoryTracker::GetOrCreateSharedMemoryDump(
+      shared_memory_mapping_, pmd);
+  // TODO(ssid): Clean this by a new api to inherit size of parent dump once the
+  // we send the full PMD and calculate sizes inside chrome, crbug.com/704203.
+  size_t resident_size = shared_memory_dump->GetSizeInternal();
+  local_segment_dump->AddScalar(trace_event::MemoryAllocatorDump::kNameSize,
+                                trace_event::MemoryAllocatorDump::kUnitsBytes,
+                                resident_size);
+
+  // By creating an edge with a higher |importance| (w.r.t non-owned dumps)
+  // the tracing UI will account the effective size of the segment to the
+  // client instead of manager.
+  // TODO(ssid): Define better constants in MemoryAllocatorDump for importance
+  // values, crbug.com/754793.
+  const int kImportance = is_owned ? 2 : 0;
+  auto shared_memory_guid = shared_memory_mapping_.guid();
+  local_segment_dump->AddString("id", "hash", shared_memory_guid.ToString());
+
+  // Owned discardable segments which are allocated by client process, could
+  // have been cleared by the discardable manager. So, the segment need not
+  // exist in memory and weak dumps are created to indicate the UI that the dump
+  // should exist only if the manager also created the global dump edge.
+  if (is_owned) {
+    pmd->CreateWeakSharedMemoryOwnershipEdge(local_segment_dump->guid(),
+                                             shared_memory_guid, kImportance);
+  } else {
+    pmd->CreateSharedMemoryOwnershipEdge(local_segment_dump->guid(),
+                                         shared_memory_guid, kImportance);
+  }
+}
+
+// static
+DiscardableSharedMemory::LockResult DiscardableSharedMemory::LockPages(
+    const UnsafeSharedMemoryRegion& region,
+    size_t offset,
+    size_t length) {
+#if defined(OS_ANDROID)
+  if (region.IsValid()) {
+    int pin_result =
+        ashmem_pin_region(region.GetPlatformHandle(), offset, length);
+    if (pin_result == ASHMEM_WAS_PURGED)
+      return PURGED;
+    if (pin_result < 0)
+      return FAILED;
+  }
+#endif
+  return SUCCESS;
+}
+
+// static
+void DiscardableSharedMemory::UnlockPages(
+    const UnsafeSharedMemoryRegion& region,
+    size_t offset,
+    size_t length) {
+#if defined(OS_ANDROID)
+  if (region.IsValid()) {
+    int unpin_result =
+        ashmem_unpin_region(region.GetPlatformHandle(), offset, length);
+    DCHECK_EQ(0, unpin_result);
+  }
+#endif
+}
+
+Time DiscardableSharedMemory::Now() const {
+  return Time::Now();
+}
+
+}  // namespace base
diff --git a/base/memory/discardable_shared_memory.h b/base/memory/discardable_shared_memory.h
new file mode 100644
index 0000000..52a78b1
--- /dev/null
+++ b/base/memory/discardable_shared_memory.h
@@ -0,0 +1,187 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_MEMORY_DISCARDABLE_SHARED_MEMORY_H_
+#define BASE_MEMORY_DISCARDABLE_SHARED_MEMORY_H_
+
+#include <stddef.h>
+
+#include "base/base_export.h"
+#include "base/logging.h"
+#include "base/macros.h"
+#include "base/memory/shared_memory_mapping.h"
+#include "base/memory/unsafe_shared_memory_region.h"
+#include "base/threading/thread_collision_warner.h"
+#include "base/time/time.h"
+#include "build/build_config.h"
+
+#if DCHECK_IS_ON()
+#include <set>
+#endif
+
+// Linux (including Android) support the MADV_REMOVE argument with madvise()
+// which has the behavior of reliably causing zero-fill-on-demand pages to
+// be returned after a call. Here we define
+// DISCARDABLE_SHARED_MEMORY_ZERO_FILL_ON_DEMAND_PAGES_AFTER_PURGE on Linux
+// and Android to indicate that this type of behavior can be expected on
+// those platforms. Note that madvise() will still be used on other POSIX
+// platforms but doesn't provide the zero-fill-on-demand pages guarantee.
+#if defined(OS_LINUX) || defined(OS_ANDROID)
+#define DISCARDABLE_SHARED_MEMORY_ZERO_FILL_ON_DEMAND_PAGES_AFTER_PURGE
+#endif
+
+namespace base {
+
+namespace trace_event {
+class MemoryAllocatorDump;
+class ProcessMemoryDump;
+}  // namespace trace_event
+
+// Platform abstraction for discardable shared memory.
+//
+// This class is not thread-safe. Clients are responsible for synchronizing
+// access to an instance of this class.
+class BASE_EXPORT DiscardableSharedMemory {
+ public:
+  enum LockResult { SUCCESS, PURGED, FAILED };
+
+  DiscardableSharedMemory();
+
+  // Create a new DiscardableSharedMemory object from an existing, open shared
+  // memory file. Memory must be locked.
+  explicit DiscardableSharedMemory(UnsafeSharedMemoryRegion region);
+
+  // Closes any open files.
+  virtual ~DiscardableSharedMemory();
+
+  // Creates and maps a locked DiscardableSharedMemory object with |size|.
+  // Returns true on success and false on failure.
+  bool CreateAndMap(size_t size);
+
+  // Maps the locked discardable memory into the caller's address space.
+  // Returns true on success, false otherwise.
+  bool Map(size_t size);
+
+  // Unmaps the discardable shared memory from the caller's address space.
+  // Unmapping won't unlock previously locked range.
+  // Returns true if successful; returns false on error or if the memory is
+  // not mapped.
+  bool Unmap();
+
+  // The actual size of the mapped memory (may be larger than requested).
+  size_t mapped_size() const { return mapped_size_; }
+
+  // Returns a duplicated shared memory region for this DiscardableSharedMemory
+  // object.
+  UnsafeSharedMemoryRegion DuplicateRegion() const {
+    return shared_memory_region_.Duplicate();
+  }
+
+  // Returns an ID for the shared memory region. This is ID of the mapped region
+  // consistent across all processes and is valid as long as the region is not
+  // unmapped.
+  const UnguessableToken& mapped_id() const {
+    return shared_memory_mapping_.guid();
+  }
+
+  // Locks a range of memory so that it will not be purged by the system.
+  // The range of memory must be unlocked. The result of trying to lock an
+  // already locked range is undefined. |offset| and |length| must both be
+  // a multiple of the page size as returned by GetPageSize().
+  // Passing 0 for |length| means "everything onward".
+  // Returns SUCCESS if range was successfully locked and the memory is still
+  // resident, PURGED if range was successfully locked but has been purged
+  // since last time it was locked and FAILED if range could not be locked.
+  // Locking can fail for two reasons; object might have been purged, our
+  // last known usage timestamp might be out of date. Last known usage time
+  // is updated to the actual last usage timestamp if memory is still resident
+  // or 0 if not.
+  LockResult Lock(size_t offset, size_t length);
+
+  // Unlock a previously successfully locked range of memory. The range of
+  // memory must be locked. The result of trying to unlock a not
+  // previously locked range is undefined.
+  // |offset| and |length| must both be a multiple of the page size as returned
+  // by GetPageSize().
+  // Passing 0 for |length| means "everything onward".
+  void Unlock(size_t offset, size_t length);
+
+  // Gets a pointer to the opened discardable memory space. Discardable memory
+  // must have been mapped via Map().
+  void* memory() const;
+
+  // Returns the last known usage time for DiscardableSharedMemory object. This
+  // may be earlier than the "true" usage time when memory has been used by a
+  // different process. Returns NULL time if purged.
+  Time last_known_usage() const { return last_known_usage_; }
+
+  // This returns true and sets |last_known_usage_| to 0 if
+  // DiscardableSharedMemory object was successfully purged. Purging can fail
+  // for two reasons; object might be locked or our last known usage timestamp
+  // might be out of date. Last known usage time is updated to |current_time|
+  // if locked or the actual last usage timestamp if unlocked. It is often
+  // necessary to call this function twice for the object to successfully be
+  // purged. First call, updates |last_known_usage_|. Second call, successfully
+  // purges the object using the updated |last_known_usage_|.
+  // Note: there is no guarantee that multiple calls to this function will
+  // successfully purge object. DiscardableSharedMemory object might be locked
+  // or another thread/process might be able to lock and unlock it in between
+  // each call.
+  bool Purge(Time current_time);
+
+  // Returns true if memory is still resident.
+  bool IsMemoryResident() const;
+
+  // Returns true if memory is locked.
+  bool IsMemoryLocked() const;
+
+  // Closes the open discardable memory segment.
+  // It is safe to call Close repeatedly.
+  void Close();
+
+  // For tracing: Creates ownership edge to the underlying shared memory dump
+  // which is cross process in the given |pmd|. |local_segment_dump| is the dump
+  // associated with the local discardable shared memory segment and |is_owned|
+  // is true when the current process owns the segment and the effective memory
+  // is assigned to the current process.
+  void CreateSharedMemoryOwnershipEdge(
+      trace_event::MemoryAllocatorDump* local_segment_dump,
+      trace_event::ProcessMemoryDump* pmd,
+      bool is_owned) const;
+
+ private:
+  // LockPages/UnlockPages are platform-native discardable page management
+  // helper functions. Both expect |offset| to be specified relative to the
+  // base address at which |memory| is mapped, and that |offset| and |length|
+  // are page-aligned by the caller.
+  // Returns SUCCESS on platforms which do not support discardable pages.
+  static LockResult LockPages(const UnsafeSharedMemoryRegion& region,
+                              size_t offset,
+                              size_t length);
+  // UnlockPages() is a no-op on platforms not supporting discardable pages.
+  static void UnlockPages(const UnsafeSharedMemoryRegion& region,
+                          size_t offset,
+                          size_t length);
+
+  // Virtual for tests.
+  virtual Time Now() const;
+
+  UnsafeSharedMemoryRegion shared_memory_region_;
+  WritableSharedMemoryMapping shared_memory_mapping_;
+  size_t mapped_size_;
+  size_t locked_page_count_;
+#if DCHECK_IS_ON()
+  std::set<size_t> locked_pages_;
+#endif
+  // Implementation is not thread-safe but still usable if clients are
+  // synchronized somehow. Use a collision warner to detect incorrect usage.
+  DFAKE_MUTEX(thread_collision_warner_);
+  Time last_known_usage_;
+
+  DISALLOW_COPY_AND_ASSIGN(DiscardableSharedMemory);
+};
+
+}  // namespace base
+
+#endif  // BASE_MEMORY_DISCARDABLE_SHARED_MEMORY_H_
diff --git a/base/memory/discardable_shared_memory_unittest.cc b/base/memory/discardable_shared_memory_unittest.cc
new file mode 100644
index 0000000..a7310a7
--- /dev/null
+++ b/base/memory/discardable_shared_memory_unittest.cc
@@ -0,0 +1,456 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <fcntl.h>
+#include <stdint.h>
+
+#include "base/files/scoped_file.h"
+#include "base/memory/discardable_shared_memory.h"
+#include "base/memory/shared_memory_tracker.h"
+#include "base/process/process_metrics.h"
+#include "base/trace_event/memory_allocator_dump.h"
+#include "base/trace_event/process_memory_dump.h"
+#include "build/build_config.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+
+class TestDiscardableSharedMemory : public DiscardableSharedMemory {
+ public:
+  TestDiscardableSharedMemory() = default;
+
+  explicit TestDiscardableSharedMemory(UnsafeSharedMemoryRegion region)
+      : DiscardableSharedMemory(std::move(region)) {}
+
+  void SetNow(Time now) { now_ = now; }
+
+ private:
+  // Overriden from DiscardableSharedMemory:
+  Time Now() const override { return now_; }
+
+  Time now_;
+};
+
+TEST(DiscardableSharedMemoryTest, CreateAndMap) {
+  const uint32_t kDataSize = 1024;
+
+  TestDiscardableSharedMemory memory;
+  bool rv = memory.CreateAndMap(kDataSize);
+  ASSERT_TRUE(rv);
+  EXPECT_GE(memory.mapped_size(), kDataSize);
+  EXPECT_TRUE(memory.IsMemoryLocked());
+}
+
+TEST(DiscardableSharedMemoryTest, CreateFromHandle) {
+  const uint32_t kDataSize = 1024;
+
+  TestDiscardableSharedMemory memory1;
+  bool rv = memory1.CreateAndMap(kDataSize);
+  ASSERT_TRUE(rv);
+
+  UnsafeSharedMemoryRegion shared_region = memory1.DuplicateRegion();
+  ASSERT_TRUE(shared_region.IsValid());
+
+  TestDiscardableSharedMemory memory2(std::move(shared_region));
+  rv = memory2.Map(kDataSize);
+  ASSERT_TRUE(rv);
+  EXPECT_TRUE(memory2.IsMemoryLocked());
+}
+
+TEST(DiscardableSharedMemoryTest, LockAndUnlock) {
+  const uint32_t kDataSize = 1024;
+
+  TestDiscardableSharedMemory memory1;
+  bool rv = memory1.CreateAndMap(kDataSize);
+  ASSERT_TRUE(rv);
+
+  // Memory is initially locked. Unlock it.
+  memory1.SetNow(Time::FromDoubleT(1));
+  memory1.Unlock(0, 0);
+  EXPECT_FALSE(memory1.IsMemoryLocked());
+
+  // Lock and unlock memory.
+  DiscardableSharedMemory::LockResult lock_rv = memory1.Lock(0, 0);
+  EXPECT_EQ(DiscardableSharedMemory::SUCCESS, lock_rv);
+  memory1.SetNow(Time::FromDoubleT(2));
+  memory1.Unlock(0, 0);
+
+  // Lock again before duplicating and passing ownership to new instance.
+  lock_rv = memory1.Lock(0, 0);
+  EXPECT_EQ(DiscardableSharedMemory::SUCCESS, lock_rv);
+  EXPECT_TRUE(memory1.IsMemoryLocked());
+
+  UnsafeSharedMemoryRegion shared_region = memory1.DuplicateRegion();
+  ASSERT_TRUE(shared_region.IsValid());
+
+  TestDiscardableSharedMemory memory2(std::move(shared_region));
+  rv = memory2.Map(kDataSize);
+  ASSERT_TRUE(rv);
+
+  // Unlock second instance.
+  memory2.SetNow(Time::FromDoubleT(3));
+  memory2.Unlock(0, 0);
+
+  // Both memory instances should be unlocked now.
+  EXPECT_FALSE(memory2.IsMemoryLocked());
+  EXPECT_FALSE(memory1.IsMemoryLocked());
+
+  // Lock second instance before passing ownership back to first instance.
+  lock_rv = memory2.Lock(0, 0);
+  EXPECT_EQ(DiscardableSharedMemory::SUCCESS, lock_rv);
+
+  // Memory should still be resident and locked.
+  rv = memory1.IsMemoryResident();
+  EXPECT_TRUE(rv);
+  EXPECT_TRUE(memory1.IsMemoryLocked());
+
+  // Unlock first instance.
+  memory1.SetNow(Time::FromDoubleT(4));
+  memory1.Unlock(0, 0);
+}
+
+TEST(DiscardableSharedMemoryTest, Purge) {
+  const uint32_t kDataSize = 1024;
+
+  TestDiscardableSharedMemory memory1;
+  bool rv = memory1.CreateAndMap(kDataSize);
+  ASSERT_TRUE(rv);
+
+  UnsafeSharedMemoryRegion shared_region = memory1.DuplicateRegion();
+  ASSERT_TRUE(shared_region.IsValid());
+
+  TestDiscardableSharedMemory memory2(std::move(shared_region));
+  rv = memory2.Map(kDataSize);
+  ASSERT_TRUE(rv);
+
+  // This should fail as memory is locked.
+  rv = memory1.Purge(Time::FromDoubleT(1));
+  EXPECT_FALSE(rv);
+
+  memory2.SetNow(Time::FromDoubleT(2));
+  memory2.Unlock(0, 0);
+
+  ASSERT_TRUE(memory2.IsMemoryResident());
+
+  // Memory is unlocked, but our usage timestamp is incorrect.
+  rv = memory1.Purge(Time::FromDoubleT(3));
+  EXPECT_FALSE(rv);
+
+  ASSERT_TRUE(memory2.IsMemoryResident());
+
+  // Memory is unlocked and our usage timestamp should be correct.
+  rv = memory1.Purge(Time::FromDoubleT(4));
+  EXPECT_TRUE(rv);
+
+  // Lock should fail as memory has been purged.
+  DiscardableSharedMemory::LockResult lock_rv = memory2.Lock(0, 0);
+  EXPECT_EQ(DiscardableSharedMemory::FAILED, lock_rv);
+
+  ASSERT_FALSE(memory2.IsMemoryResident());
+}
+
+TEST(DiscardableSharedMemoryTest, LastUsed) {
+  const uint32_t kDataSize = 1024;
+
+  TestDiscardableSharedMemory memory1;
+  bool rv = memory1.CreateAndMap(kDataSize);
+  ASSERT_TRUE(rv);
+
+  UnsafeSharedMemoryRegion shared_region = memory1.DuplicateRegion();
+  ASSERT_TRUE(shared_region.IsValid());
+
+  TestDiscardableSharedMemory memory2(std::move(shared_region));
+  rv = memory2.Map(kDataSize);
+  ASSERT_TRUE(rv);
+
+  memory2.SetNow(Time::FromDoubleT(1));
+  memory2.Unlock(0, 0);
+
+  EXPECT_EQ(memory2.last_known_usage(), Time::FromDoubleT(1));
+
+  DiscardableSharedMemory::LockResult lock_rv = memory2.Lock(0, 0);
+  EXPECT_EQ(DiscardableSharedMemory::SUCCESS, lock_rv);
+
+  // This should fail as memory is locked.
+  rv = memory1.Purge(Time::FromDoubleT(2));
+  ASSERT_FALSE(rv);
+
+  // Last usage should have been updated to timestamp passed to Purge above.
+  EXPECT_EQ(memory1.last_known_usage(), Time::FromDoubleT(2));
+
+  memory2.SetNow(Time::FromDoubleT(3));
+  memory2.Unlock(0, 0);
+
+  // Usage time should be correct for |memory2| instance.
+  EXPECT_EQ(memory2.last_known_usage(), Time::FromDoubleT(3));
+
+  // However, usage time has not changed as far as |memory1| instance knows.
+  EXPECT_EQ(memory1.last_known_usage(), Time::FromDoubleT(2));
+
+  // Memory is unlocked, but our usage timestamp is incorrect.
+  rv = memory1.Purge(Time::FromDoubleT(4));
+  EXPECT_FALSE(rv);
+
+  // The failed purge attempt should have updated usage time to the correct
+  // value.
+  EXPECT_EQ(memory1.last_known_usage(), Time::FromDoubleT(3));
+
+  // Purge memory through |memory2| instance. The last usage time should be
+  // set to 0 as a result of this.
+  rv = memory2.Purge(Time::FromDoubleT(5));
+  EXPECT_TRUE(rv);
+  EXPECT_TRUE(memory2.last_known_usage().is_null());
+
+  // This should fail as memory has already been purged and |memory1|'s usage
+  // time is incorrect as a result.
+  rv = memory1.Purge(Time::FromDoubleT(6));
+  EXPECT_FALSE(rv);
+
+  // The failed purge attempt should have updated usage time to the correct
+  // value.
+  EXPECT_TRUE(memory1.last_known_usage().is_null());
+
+  // Purge should succeed now that usage time is correct.
+  rv = memory1.Purge(Time::FromDoubleT(7));
+  EXPECT_TRUE(rv);
+}
+
+TEST(DiscardableSharedMemoryTest, LockShouldAlwaysFailAfterSuccessfulPurge) {
+  const uint32_t kDataSize = 1024;
+
+  TestDiscardableSharedMemory memory1;
+  bool rv = memory1.CreateAndMap(kDataSize);
+  ASSERT_TRUE(rv);
+
+  UnsafeSharedMemoryRegion shared_region = memory1.DuplicateRegion();
+  ASSERT_TRUE(shared_region.IsValid());
+
+  TestDiscardableSharedMemory memory2(std::move(shared_region));
+  rv = memory2.Map(kDataSize);
+  ASSERT_TRUE(rv);
+
+  memory2.SetNow(Time::FromDoubleT(1));
+  memory2.Unlock(0, 0);
+
+  rv = memory2.Purge(Time::FromDoubleT(2));
+  EXPECT_TRUE(rv);
+
+  // Lock should fail as memory has been purged.
+  DiscardableSharedMemory::LockResult lock_rv = memory2.Lock(0, 0);
+  EXPECT_EQ(DiscardableSharedMemory::FAILED, lock_rv);
+}
+
+#if defined(OS_ANDROID)
+TEST(DiscardableSharedMemoryTest, LockShouldFailIfPlatformLockPagesFails) {
+  const uint32_t kDataSize = 1024;
+
+  DiscardableSharedMemory memory1;
+  bool rv1 = memory1.CreateAndMap(kDataSize);
+  ASSERT_TRUE(rv1);
+
+  base::UnsafeSharedMemoryRegion region = memory1.DuplicateRegion();
+  int fd = region.GetPlatformHandle();
+  DiscardableSharedMemory memory2(std::move(region));
+  bool rv2 = memory2.Map(kDataSize);
+  ASSERT_TRUE(rv2);
+
+  // Unlock() the first page of memory, so we can test Lock()ing it.
+  memory2.Unlock(0, base::GetPageSize());
+  // To cause ashmem_pin_region() to fail, we arrange for it to be called with
+  // an invalid file-descriptor, which requires a valid-looking fd (i.e. we
+  // can't just Close() |memory|), but one on which the operation is invalid.
+  // We can overwrite the |memory| fd with a handle to a different file using
+  // dup2(), which has the nice properties that |memory| still has a valid fd
+  // that it can close, etc without errors, but on which ashmem_pin_region()
+  // will fail.
+  base::ScopedFD null(open("/dev/null", O_RDONLY));
+  ASSERT_EQ(fd, dup2(null.get(), fd));
+
+  // Now re-Lock()ing the first page should fail.
+  DiscardableSharedMemory::LockResult lock_rv =
+      memory2.Lock(0, base::GetPageSize());
+  EXPECT_EQ(DiscardableSharedMemory::FAILED, lock_rv);
+}
+#endif  // defined(OS_ANDROID)
+
+TEST(DiscardableSharedMemoryTest, LockAndUnlockRange) {
+  const uint32_t kDataSize = 32;
+
+  uint32_t data_size_in_bytes = kDataSize * base::GetPageSize();
+
+  TestDiscardableSharedMemory memory1;
+  bool rv = memory1.CreateAndMap(data_size_in_bytes);
+  ASSERT_TRUE(rv);
+
+  UnsafeSharedMemoryRegion shared_region = memory1.DuplicateRegion();
+  ASSERT_TRUE(shared_region.IsValid());
+
+  TestDiscardableSharedMemory memory2(std::move(shared_region));
+  rv = memory2.Map(data_size_in_bytes);
+  ASSERT_TRUE(rv);
+
+  // Unlock first page.
+  memory2.SetNow(Time::FromDoubleT(1));
+  memory2.Unlock(0, base::GetPageSize());
+
+  rv = memory1.Purge(Time::FromDoubleT(2));
+  EXPECT_FALSE(rv);
+
+  // Lock first page again.
+  memory2.SetNow(Time::FromDoubleT(3));
+  DiscardableSharedMemory::LockResult lock_rv =
+      memory2.Lock(0, base::GetPageSize());
+  EXPECT_NE(DiscardableSharedMemory::FAILED, lock_rv);
+
+  // Unlock first page.
+  memory2.SetNow(Time::FromDoubleT(4));
+  memory2.Unlock(0, base::GetPageSize());
+
+  rv = memory1.Purge(Time::FromDoubleT(5));
+  EXPECT_FALSE(rv);
+
+  // Unlock second page.
+  memory2.SetNow(Time::FromDoubleT(6));
+  memory2.Unlock(base::GetPageSize(), base::GetPageSize());
+
+  rv = memory1.Purge(Time::FromDoubleT(7));
+  EXPECT_FALSE(rv);
+
+  // Unlock anything onwards.
+  memory2.SetNow(Time::FromDoubleT(8));
+  memory2.Unlock(2 * base::GetPageSize(), 0);
+
+  // Memory is unlocked, but our usage timestamp is incorrect.
+  rv = memory1.Purge(Time::FromDoubleT(9));
+  EXPECT_FALSE(rv);
+
+  // The failed purge attempt should have updated usage time to the correct
+  // value.
+  EXPECT_EQ(Time::FromDoubleT(8), memory1.last_known_usage());
+
+  // Purge should now succeed.
+  rv = memory1.Purge(Time::FromDoubleT(10));
+  EXPECT_TRUE(rv);
+}
+
+TEST(DiscardableSharedMemoryTest, MappedSize) {
+  const uint32_t kDataSize = 1024;
+
+  TestDiscardableSharedMemory memory;
+  bool rv = memory.CreateAndMap(kDataSize);
+  ASSERT_TRUE(rv);
+
+  EXPECT_LE(kDataSize, memory.mapped_size());
+
+  // Mapped size should be 0 after memory segment has been unmapped.
+  rv = memory.Unmap();
+  EXPECT_TRUE(rv);
+  EXPECT_EQ(0u, memory.mapped_size());
+}
+
+TEST(DiscardableSharedMemoryTest, Close) {
+  const uint32_t kDataSize = 1024;
+
+  TestDiscardableSharedMemory memory;
+  bool rv = memory.CreateAndMap(kDataSize);
+  ASSERT_TRUE(rv);
+
+  // Mapped size should be unchanged after memory segment has been closed.
+  memory.Close();
+  EXPECT_LE(kDataSize, memory.mapped_size());
+
+  // Memory is initially locked. Unlock it.
+  memory.SetNow(Time::FromDoubleT(1));
+  memory.Unlock(0, 0);
+
+  // Lock and unlock memory.
+  DiscardableSharedMemory::LockResult lock_rv = memory.Lock(0, 0);
+  EXPECT_EQ(DiscardableSharedMemory::SUCCESS, lock_rv);
+  memory.SetNow(Time::FromDoubleT(2));
+  memory.Unlock(0, 0);
+}
+
+TEST(DiscardableSharedMemoryTest, ZeroSize) {
+  TestDiscardableSharedMemory memory;
+  bool rv = memory.CreateAndMap(0);
+  ASSERT_TRUE(rv);
+
+  EXPECT_LE(0u, memory.mapped_size());
+
+  // Memory is initially locked. Unlock it.
+  memory.SetNow(Time::FromDoubleT(1));
+  memory.Unlock(0, 0);
+
+  // Lock and unlock memory.
+  DiscardableSharedMemory::LockResult lock_rv = memory.Lock(0, 0);
+  EXPECT_NE(DiscardableSharedMemory::FAILED, lock_rv);
+  memory.SetNow(Time::FromDoubleT(2));
+  memory.Unlock(0, 0);
+}
+
+// This test checks that zero-filled pages are returned after purging a segment
+// when DISCARDABLE_SHARED_MEMORY_ZERO_FILL_ON_DEMAND_PAGES_AFTER_PURGE is
+// defined and MADV_REMOVE is supported.
+#if defined(DISCARDABLE_SHARED_MEMORY_ZERO_FILL_ON_DEMAND_PAGES_AFTER_PURGE)
+TEST(DiscardableSharedMemoryTest, ZeroFilledPagesAfterPurge) {
+  const uint32_t kDataSize = 1024;
+
+  TestDiscardableSharedMemory memory1;
+  bool rv = memory1.CreateAndMap(kDataSize);
+  ASSERT_TRUE(rv);
+
+  UnsafeSharedMemoryRegion shared_region = memory1.DuplicateRegion();
+  ASSERT_TRUE(shared_region.IsValid());
+
+  TestDiscardableSharedMemory memory2(std::move(shared_region));
+  rv = memory2.Map(kDataSize);
+  ASSERT_TRUE(rv);
+
+  // Initialize all memory to '0xaa'.
+  memset(memory2.memory(), 0xaa, kDataSize);
+
+  // Unlock memory.
+  memory2.SetNow(Time::FromDoubleT(1));
+  memory2.Unlock(0, 0);
+  EXPECT_FALSE(memory1.IsMemoryLocked());
+
+  // Memory is unlocked, but our usage timestamp is incorrect.
+  rv = memory1.Purge(Time::FromDoubleT(2));
+  EXPECT_FALSE(rv);
+  rv = memory1.Purge(Time::FromDoubleT(3));
+  EXPECT_TRUE(rv);
+
+  // Check that reading memory after it has been purged is returning
+  // zero-filled pages.
+  uint8_t expected_data[kDataSize] = {};
+  EXPECT_EQ(memcmp(memory2.memory(), expected_data, kDataSize), 0);
+}
+#endif
+
+TEST(DiscardableSharedMemoryTest, TracingOwnershipEdges) {
+  const uint32_t kDataSize = 1024;
+  TestDiscardableSharedMemory memory1;
+  bool rv = memory1.CreateAndMap(kDataSize);
+  ASSERT_TRUE(rv);
+
+  base::trace_event::MemoryDumpArgs args = {
+      base::trace_event::MemoryDumpLevelOfDetail::DETAILED};
+  trace_event::ProcessMemoryDump pmd(nullptr, args);
+  trace_event::MemoryAllocatorDump* client_dump =
+      pmd.CreateAllocatorDump("discardable_manager/map1");
+  const bool is_owned = false;
+  memory1.CreateSharedMemoryOwnershipEdge(client_dump, &pmd, is_owned);
+  const auto* shm_dump = pmd.GetAllocatorDump(
+      SharedMemoryTracker::GetDumpNameForTracing(memory1.mapped_id()));
+  EXPECT_TRUE(shm_dump);
+  EXPECT_EQ(shm_dump->GetSizeInternal(), client_dump->GetSizeInternal());
+  const auto edges = pmd.allocator_dumps_edges();
+  EXPECT_EQ(2u, edges.size());
+  EXPECT_NE(edges.end(), edges.find(shm_dump->guid()));
+  EXPECT_NE(edges.end(), edges.find(client_dump->guid()));
+  // TODO(ssid): test for weak global dump once the
+  // CreateWeakSharedMemoryOwnershipEdge() is fixed, crbug.com/661257.
+}
+
+}  // namespace base
diff --git a/base/memory/free_deleter.h b/base/memory/free_deleter.h
new file mode 100644
index 0000000..5604118
--- /dev/null
+++ b/base/memory/free_deleter.h
@@ -0,0 +1,25 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_MEMORY_FREE_DELETER_H_
+#define BASE_MEMORY_FREE_DELETER_H_
+
+#include <stdlib.h>
+
+namespace base {
+
+// Function object which invokes 'free' on its parameter, which must be
+// a pointer. Can be used to store malloc-allocated pointers in std::unique_ptr:
+//
+// std::unique_ptr<int, base::FreeDeleter> foo_ptr(
+//     static_cast<int*>(malloc(sizeof(int))));
+struct FreeDeleter {
+  inline void operator()(void* ptr) const {
+    free(ptr);
+  }
+};
+
+}  // namespace base
+
+#endif  // BASE_MEMORY_FREE_DELETER_H_
diff --git a/base/memory/linked_ptr.h b/base/memory/linked_ptr.h
new file mode 100644
index 0000000..6851286
--- /dev/null
+++ b/base/memory/linked_ptr.h
@@ -0,0 +1,179 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// A "smart" pointer type with reference tracking.  Every pointer to a
+// particular object is kept on a circular linked list.  When the last pointer
+// to an object is destroyed or reassigned, the object is deleted.
+//
+// Used properly, this deletes the object when the last reference goes away.
+// There are several caveats:
+// - Like all reference counting schemes, cycles lead to leaks.
+// - Each smart pointer is actually two pointers (8 bytes instead of 4).
+// - Every time a pointer is released, the entire list of pointers to that
+//   object is traversed.  This class is therefore NOT SUITABLE when there
+//   will often be more than two or three pointers to a particular object.
+// - References are only tracked as long as linked_ptr<> objects are copied.
+//   If a linked_ptr<> is converted to a raw pointer and back, BAD THINGS
+//   will happen (double deletion).
+//
+// Note: If you use an incomplete type with linked_ptr<>, the class
+// *containing* linked_ptr<> must have a constructor and destructor (even
+// if they do nothing!).
+//
+// Thread Safety:
+//   A linked_ptr is NOT thread safe. Copying a linked_ptr object is
+//   effectively a read-write operation.
+//
+// Alternative: to linked_ptr is shared_ptr, which
+//  - is also two pointers in size (8 bytes for 32 bit addresses)
+//  - is thread safe for copying and deletion
+//  - supports weak_ptrs
+
+#ifndef BASE_MEMORY_LINKED_PTR_H_
+#define BASE_MEMORY_LINKED_PTR_H_
+
+#include "base/logging.h"  // for CHECK macros
+
+// This is used internally by all instances of linked_ptr<>.  It needs to be
+// a non-template class because different types of linked_ptr<> can refer to
+// the same object (linked_ptr<Superclass>(obj) vs linked_ptr<Subclass>(obj)).
+// So, it needs to be possible for different types of linked_ptr to participate
+// in the same circular linked list, so we need a single class type here.
+//
+// DO NOT USE THIS CLASS DIRECTLY YOURSELF.  Use linked_ptr<T>.
+class linked_ptr_internal {
+ public:
+  // Create a new circle that includes only this instance.
+  void join_new() {
+    next_ = this;
+  }
+
+  // Join an existing circle.
+  void join(linked_ptr_internal const* ptr) {
+    next_ = ptr->next_;
+    ptr->next_ = this;
+  }
+
+  // Leave whatever circle we're part of.  Returns true iff we were the
+  // last member of the circle.  Once this is done, you can join() another.
+  bool depart() {
+    if (next_ == this) return true;
+    linked_ptr_internal const* p = next_;
+    while (p->next_ != this) p = p->next_;
+    p->next_ = next_;
+    return false;
+  }
+
+ private:
+  mutable linked_ptr_internal const* next_;
+};
+
+// TODO(http://crbug.com/556939): DEPRECATED: Use unique_ptr instead (now that
+// we have support for moveable types inside STL containers).
+template <typename T>
+class linked_ptr {
+ public:
+  typedef T element_type;
+
+  // Take over ownership of a raw pointer.  This should happen as soon as
+  // possible after the object is created.
+  explicit linked_ptr(T* ptr = NULL) { capture(ptr); }
+  ~linked_ptr() { depart(); }
+
+  // Copy an existing linked_ptr<>, adding ourselves to the list of references.
+  template <typename U> linked_ptr(linked_ptr<U> const& ptr) { copy(&ptr); }
+
+  linked_ptr(linked_ptr const& ptr) {
+    DCHECK_NE(&ptr, this);
+    copy(&ptr);
+  }
+
+  // Assignment releases the old value and acquires the new.
+  template <typename U> linked_ptr& operator=(linked_ptr<U> const& ptr) {
+    depart();
+    copy(&ptr);
+    return *this;
+  }
+
+  linked_ptr& operator=(linked_ptr const& ptr) {
+    if (&ptr != this) {
+      depart();
+      copy(&ptr);
+    }
+    return *this;
+  }
+
+  // Smart pointer members.
+  void reset(T* ptr = NULL) {
+    depart();
+    capture(ptr);
+  }
+  T* get() const { return value_; }
+  T* operator->() const { return value_; }
+  T& operator*() const { return *value_; }
+  // Release ownership of the pointed object and returns it.
+  // Sole ownership by this linked_ptr object is required.
+  T* release() {
+    bool last = link_.depart();
+    CHECK(last);
+    T* v = value_;
+    value_ = NULL;
+    return v;
+  }
+
+  bool operator==(const T* p) const { return value_ == p; }
+  bool operator!=(const T* p) const { return value_ != p; }
+  template <typename U>
+  bool operator==(linked_ptr<U> const& ptr) const {
+    return value_ == ptr.get();
+  }
+  template <typename U>
+  bool operator!=(linked_ptr<U> const& ptr) const {
+    return value_ != ptr.get();
+  }
+
+ private:
+  template <typename U>
+  friend class linked_ptr;
+
+  T* value_;
+  linked_ptr_internal link_;
+
+  void depart() {
+    if (link_.depart()) delete value_;
+  }
+
+  void capture(T* ptr) {
+    value_ = ptr;
+    link_.join_new();
+  }
+
+  template <typename U> void copy(linked_ptr<U> const* ptr) {
+    value_ = ptr->get();
+    if (value_)
+      link_.join(&ptr->link_);
+    else
+      link_.join_new();
+  }
+};
+
+template<typename T> inline
+bool operator==(T* ptr, const linked_ptr<T>& x) {
+  return ptr == x.get();
+}
+
+template<typename T> inline
+bool operator!=(T* ptr, const linked_ptr<T>& x) {
+  return ptr != x.get();
+}
+
+// A function to convert T* into linked_ptr<T>
+// Doing e.g. make_linked_ptr(new FooBarBaz<type>(arg)) is a shorter notation
+// for linked_ptr<FooBarBaz<type> >(new FooBarBaz<type>(arg))
+template <typename T>
+linked_ptr<T> make_linked_ptr(T* ptr) {
+  return linked_ptr<T>(ptr);
+}
+
+#endif  // BASE_MEMORY_LINKED_PTR_H_
diff --git a/base/memory/linked_ptr_unittest.cc b/base/memory/linked_ptr_unittest.cc
new file mode 100644
index 0000000..344ffa4
--- /dev/null
+++ b/base/memory/linked_ptr_unittest.cc
@@ -0,0 +1,108 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <string>
+
+#include "base/memory/linked_ptr.h"
+#include "base/strings/stringprintf.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace {
+
+int num = 0;
+
+std::string history;
+
+// Class which tracks allocation/deallocation
+struct A {
+  A(): mynum(num++) { history += base::StringPrintf("A%d ctor\n", mynum); }
+  virtual ~A() { history += base::StringPrintf("A%d dtor\n", mynum); }
+  virtual void Use() { history += base::StringPrintf("A%d use\n", mynum); }
+  int mynum;
+};
+
+// Subclass
+struct B: public A {
+  B() { history += base::StringPrintf("B%d ctor\n", mynum); }
+  ~B() override { history += base::StringPrintf("B%d dtor\n", mynum); }
+  void Use() override { history += base::StringPrintf("B%d use\n", mynum); }
+};
+
+}  // namespace
+
+TEST(LinkedPtrTest, Test) {
+  {
+    linked_ptr<A> a0, a1, a2;
+    a0 = *&a0;  // The *& defeats Clang's -Wself-assign warning.
+    a1 = a2;
+    ASSERT_EQ(a0.get(), static_cast<A*>(nullptr));
+    ASSERT_EQ(a1.get(), static_cast<A*>(nullptr));
+    ASSERT_EQ(a2.get(), static_cast<A*>(nullptr));
+    ASSERT_TRUE(a0 == nullptr);
+    ASSERT_TRUE(a1 == nullptr);
+    ASSERT_TRUE(a2 == nullptr);
+
+    {
+      linked_ptr<A> a3(new A);
+      a0 = a3;
+      ASSERT_TRUE(a0 == a3);
+      ASSERT_TRUE(a0 != nullptr);
+      ASSERT_TRUE(a0.get() == a3);
+      ASSERT_TRUE(a0 == a3.get());
+      linked_ptr<A> a4(a0);
+      a1 = a4;
+      linked_ptr<A> a5(new A);
+      ASSERT_TRUE(a5.get() != a3);
+      ASSERT_TRUE(a5 != a3.get());
+      a2 = a5;
+      linked_ptr<B> b0(new B);
+      linked_ptr<A> a6(b0);
+      ASSERT_TRUE(b0 == a6);
+      ASSERT_TRUE(a6 == b0);
+      ASSERT_TRUE(b0 != nullptr);
+      a5 = b0;
+      a5 = b0;
+      a3->Use();
+      a4->Use();
+      a5->Use();
+      a6->Use();
+      b0->Use();
+      (*b0).Use();
+      b0.get()->Use();
+    }
+
+    a0->Use();
+    a1->Use();
+    a2->Use();
+
+    a1 = a2;
+    a2.reset(new A);
+    a0.reset();
+
+    linked_ptr<A> a7;
+  }
+
+  ASSERT_EQ(history,
+    "A0 ctor\n"
+    "A1 ctor\n"
+    "A2 ctor\n"
+    "B2 ctor\n"
+    "A0 use\n"
+    "A0 use\n"
+    "B2 use\n"
+    "B2 use\n"
+    "B2 use\n"
+    "B2 use\n"
+    "B2 use\n"
+    "B2 dtor\n"
+    "A2 dtor\n"
+    "A0 use\n"
+    "A0 use\n"
+    "A1 use\n"
+    "A3 ctor\n"
+    "A0 dtor\n"
+    "A3 dtor\n"
+    "A1 dtor\n"
+  );
+}
diff --git a/base/memory/memory_coordinator_client.cc b/base/memory/memory_coordinator_client.cc
new file mode 100644
index 0000000..7fa6232
--- /dev/null
+++ b/base/memory/memory_coordinator_client.cc
@@ -0,0 +1,27 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/memory/memory_coordinator_client.h"
+
+#include "base/logging.h"
+
+namespace base {
+
+const char* MemoryStateToString(MemoryState state) {
+  switch (state) {
+    case MemoryState::UNKNOWN:
+      return "unknown";
+    case MemoryState::NORMAL:
+      return "normal";
+    case MemoryState::THROTTLED:
+      return "throttled";
+    case MemoryState::SUSPENDED:
+      return "suspended";
+    default:
+      NOTREACHED();
+  }
+  return "";
+}
+
+}  // namespace base
diff --git a/base/memory/memory_coordinator_client.h b/base/memory/memory_coordinator_client.h
new file mode 100644
index 0000000..804f0a6
--- /dev/null
+++ b/base/memory/memory_coordinator_client.h
@@ -0,0 +1,79 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_MEMORY_MEMORY_COORDINATOR_CLIENT_H_
+#define BASE_MEMORY_MEMORY_COORDINATOR_CLIENT_H_
+
+#include "base/base_export.h"
+
+namespace base {
+
+// OVERVIEW:
+//
+// MemoryCoordinatorClient is an interface which a component can implement to
+// adjust "future allocation" and "existing allocation". For "future allocation"
+// it provides a callback to observe memory state changes, and for "existing
+// allocation" it provides a callback to purge memory.
+//
+// Unlike MemoryPressureListener, memory state changes are stateful. State
+// transitions are throttled to avoid thrashing; the exact throttling period is
+// platform dependent, but will be at least 5-10 seconds. When a state change
+// notification is dispatched, clients are expected to update their allocation
+// policies (e.g. setting cache limit) that persist for the duration of the
+// memory state. Note that clients aren't expected to free up memory on memory
+// state changes. Clients should wait for a separate purge request to free up
+// memory. Purging requests will be throttled as well.
+
+// MemoryState is an indicator that processes can use to guide their memory
+// allocation policies. For example, a process that receives the throttled
+// state can use that as as signal to decrease memory cache limits.
+// NOTE: This enum is used to back an UMA histogram, and therefore should be
+// treated as append-only.
+enum class MemoryState : int {
+  // The state is unknown.
+  UNKNOWN = -1,
+  // No memory constraints.
+  NORMAL = 0,
+  // Running and interactive but memory allocation should be throttled.
+  // Clients should set lower budget for any memory that is used as an
+  // optimization but that is not necessary for the process to run.
+  // (e.g. caches)
+  THROTTLED = 1,
+  // Still resident in memory but core processing logic has been suspended.
+  // In most cases, OnPurgeMemory() will be called before entering this state.
+  SUSPENDED = 2,
+};
+
+const int kMemoryStateMax = static_cast<int>(MemoryState::SUSPENDED) + 1;
+
+// Returns a string representation of MemoryState.
+BASE_EXPORT const char* MemoryStateToString(MemoryState state);
+
+// This is an interface for components which can respond to memory status
+// changes. An initial state is NORMAL. See MemoryCoordinatorClientRegistry for
+// threading guarantees and ownership management.
+class BASE_EXPORT MemoryCoordinatorClient {
+ public:
+  // Called when memory state has changed. Any transition can occur except for
+  // UNKNOWN. General guidelines are:
+  //  * NORMAL:    Restore the default settings for memory allocation/usage if
+  //               it has changed.
+  //  * THROTTLED: Use smaller limits for future memory allocations. You don't
+  //               need to take any action on existing allocations.
+  //  * SUSPENDED: Use much smaller limits for future memory allocations. You
+  //               don't need to take any action on existing allocations.
+  virtual void OnMemoryStateChange(MemoryState state) {}
+
+  // Called to purge memory.
+  // This callback should free up any memory that is used as an optimization, or
+  // any memory whose contents can be reproduced.
+  virtual void OnPurgeMemory() {}
+
+ protected:
+  virtual ~MemoryCoordinatorClient() = default;
+};
+
+}  // namespace base
+
+#endif  // BASE_MEMORY_MEMORY_COORDINATOR_CLIENT_H_
diff --git a/base/memory/memory_coordinator_client_registry.cc b/base/memory/memory_coordinator_client_registry.cc
new file mode 100644
index 0000000..45b4a7f
--- /dev/null
+++ b/base/memory/memory_coordinator_client_registry.cc
@@ -0,0 +1,41 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/memory/memory_coordinator_client_registry.h"
+
+namespace base {
+
+// static
+MemoryCoordinatorClientRegistry*
+MemoryCoordinatorClientRegistry::GetInstance() {
+  return Singleton<
+      MemoryCoordinatorClientRegistry,
+      LeakySingletonTraits<MemoryCoordinatorClientRegistry>>::get();
+}
+
+MemoryCoordinatorClientRegistry::MemoryCoordinatorClientRegistry()
+    : clients_(new ClientList) {}
+
+MemoryCoordinatorClientRegistry::~MemoryCoordinatorClientRegistry() = default;
+
+void MemoryCoordinatorClientRegistry::Register(
+    MemoryCoordinatorClient* client) {
+  clients_->AddObserver(client);
+}
+
+void MemoryCoordinatorClientRegistry::Unregister(
+    MemoryCoordinatorClient* client) {
+  clients_->RemoveObserver(client);
+}
+
+void MemoryCoordinatorClientRegistry::Notify(MemoryState state) {
+  clients_->Notify(FROM_HERE,
+                   &base::MemoryCoordinatorClient::OnMemoryStateChange, state);
+}
+
+void MemoryCoordinatorClientRegistry::PurgeMemory() {
+  clients_->Notify(FROM_HERE, &base::MemoryCoordinatorClient::OnPurgeMemory);
+}
+
+}  // namespace base
diff --git a/base/memory/memory_coordinator_client_registry.h b/base/memory/memory_coordinator_client_registry.h
new file mode 100644
index 0000000..e2c81b7
--- /dev/null
+++ b/base/memory/memory_coordinator_client_registry.h
@@ -0,0 +1,56 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_MEMORY_MEMORY_CLIENT_REGISTRY_H_
+#define BASE_MEMORY_MEMORY_CLIENT_REGISTRY_H_
+
+#include "base/base_export.h"
+#include "base/memory/memory_coordinator_client.h"
+#include "base/memory/singleton.h"
+#include "base/observer_list_threadsafe.h"
+
+namespace base {
+
+// MemoryCoordinatorClientRegistry is the registry of MemoryCoordinatorClients.
+// This class manages clients and provides a way to notify memory state changes
+// to clients, but this isn't responsible to determine how/when to change
+// memory states.
+//
+// Threading guarantees:
+// This class uses ObserverListThreadsafe internally, which means that
+//  * Registering/unregistering callbacks are thread-safe.
+//  * Callbacks are invoked on the same thread on which they are registered.
+// See base/observer_list_threadsafe.h for reference.
+//
+// Ownership management:
+// This class doesn't take the ownership of clients. Clients must be
+// unregistered before they are destroyed.
+class BASE_EXPORT MemoryCoordinatorClientRegistry {
+ public:
+  static MemoryCoordinatorClientRegistry* GetInstance();
+
+  ~MemoryCoordinatorClientRegistry();
+
+  // Registers/unregisters a client. Does not take ownership of client.
+  void Register(MemoryCoordinatorClient* client);
+  void Unregister(MemoryCoordinatorClient* client);
+
+  // Notify clients of a memory state change.
+  void Notify(MemoryState state);
+
+  // Requests purging memory.
+  void PurgeMemory();
+
+ private:
+  friend struct DefaultSingletonTraits<MemoryCoordinatorClientRegistry>;
+
+  MemoryCoordinatorClientRegistry();
+
+  using ClientList = ObserverListThreadSafe<MemoryCoordinatorClient>;
+  scoped_refptr<ClientList> clients_;
+};
+
+}  // namespace base
+
+#endif  // BASE_MEMORY_MEMORY_CLIENT_REGISTRY_H_
diff --git a/base/memory/memory_coordinator_client_registry_unittest.cc b/base/memory/memory_coordinator_client_registry_unittest.cc
new file mode 100644
index 0000000..37ed767
--- /dev/null
+++ b/base/memory/memory_coordinator_client_registry_unittest.cc
@@ -0,0 +1,58 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/memory/memory_coordinator_client_registry.h"
+
+#include "base/message_loop/message_loop.h"
+#include "base/run_loop.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+
+namespace {
+
+class TestMemoryCoordinatorClient : public MemoryCoordinatorClient {
+ public:
+  void OnMemoryStateChange(MemoryState state) override { state_ = state; }
+
+  void OnPurgeMemory() override { ++purge_count_; }
+
+  MemoryState state() const { return state_; }
+  size_t purge_count() const { return purge_count_; }
+
+ private:
+  MemoryState state_ = MemoryState::UNKNOWN;
+  size_t purge_count_ = 0;
+};
+
+void RunUntilIdle() {
+  base::RunLoop loop;
+  loop.RunUntilIdle();
+}
+
+TEST(MemoryCoordinatorClientRegistryTest, NotifyStateChange) {
+  MessageLoop loop;
+  auto* registry = MemoryCoordinatorClientRegistry::GetInstance();
+  TestMemoryCoordinatorClient client;
+  registry->Register(&client);
+  registry->Notify(MemoryState::THROTTLED);
+  RunUntilIdle();
+  ASSERT_EQ(MemoryState::THROTTLED, client.state());
+  registry->Unregister(&client);
+}
+
+TEST(MemoryCoordinatorClientRegistryTest, PurgeMemory) {
+  MessageLoop loop;
+  auto* registry = MemoryCoordinatorClientRegistry::GetInstance();
+  TestMemoryCoordinatorClient client;
+  registry->Register(&client);
+  registry->PurgeMemory();
+  RunUntilIdle();
+  ASSERT_EQ(1u, client.purge_count());
+  registry->Unregister(&client);
+}
+
+}  // namespace
+
+}  // namespace base
diff --git a/base/memory/memory_coordinator_proxy.cc b/base/memory/memory_coordinator_proxy.cc
new file mode 100644
index 0000000..4e22fe0
--- /dev/null
+++ b/base/memory/memory_coordinator_proxy.cc
@@ -0,0 +1,37 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/memory/memory_coordinator_proxy.h"
+
+namespace base {
+
+namespace {
+
+MemoryCoordinator* g_memory_coordinator = nullptr;
+
+}  // namespace
+
+MemoryCoordinatorProxy::MemoryCoordinatorProxy() = default;
+
+MemoryCoordinatorProxy::~MemoryCoordinatorProxy() = default;
+
+// static
+MemoryCoordinatorProxy* MemoryCoordinatorProxy::GetInstance() {
+  return Singleton<base::MemoryCoordinatorProxy>::get();
+}
+
+// static
+void MemoryCoordinatorProxy::SetMemoryCoordinator(
+    MemoryCoordinator* coordinator) {
+  DCHECK(!g_memory_coordinator || !coordinator);
+  g_memory_coordinator = coordinator;
+}
+
+MemoryState MemoryCoordinatorProxy::GetCurrentMemoryState() const {
+  if (!g_memory_coordinator)
+    return MemoryState::NORMAL;
+  return g_memory_coordinator->GetCurrentMemoryState();
+}
+
+}  // namespace base
diff --git a/base/memory/memory_coordinator_proxy.h b/base/memory/memory_coordinator_proxy.h
new file mode 100644
index 0000000..b6e7b3f
--- /dev/null
+++ b/base/memory/memory_coordinator_proxy.h
@@ -0,0 +1,49 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_MEMORY_MEMORY_COORDINATOR_PROXY_H_
+#define BASE_MEMORY_MEMORY_COORDINATOR_PROXY_H_
+
+#include "base/base_export.h"
+#include "base/callback.h"
+#include "base/memory/memory_coordinator_client.h"
+#include "base/memory/singleton.h"
+
+namespace base {
+
+// The MemoryCoordinator interface. See comments in MemoryCoordinatorProxy for
+// method descriptions.
+class BASE_EXPORT MemoryCoordinator {
+ public:
+  virtual ~MemoryCoordinator() = default;
+
+  virtual MemoryState GetCurrentMemoryState() const = 0;
+};
+
+// The proxy of MemoryCoordinator to be accessed from components that are not
+// in content/browser e.g. net.
+class BASE_EXPORT MemoryCoordinatorProxy {
+ public:
+  static MemoryCoordinatorProxy* GetInstance();
+
+  // Sets an implementation of MemoryCoordinator. MemoryCoordinatorProxy doesn't
+  // take the ownership of |coordinator|. It must outlive this proxy.
+  // This should be called before any components starts using this proxy.
+  static void SetMemoryCoordinator(MemoryCoordinator* coordinator);
+
+  // Returns the current memory state.
+  MemoryState GetCurrentMemoryState() const;
+
+ private:
+  friend struct base::DefaultSingletonTraits<MemoryCoordinatorProxy>;
+
+  MemoryCoordinatorProxy();
+  virtual ~MemoryCoordinatorProxy();
+
+  DISALLOW_COPY_AND_ASSIGN(MemoryCoordinatorProxy);
+};
+
+}  // namespace base
+
+#endif  // BASE_MEMORY_MEMORY_COORDINATOR_PROXY_H_
diff --git a/base/memory/memory_pressure_listener.cc b/base/memory/memory_pressure_listener.cc
new file mode 100644
index 0000000..669fb17
--- /dev/null
+++ b/base/memory/memory_pressure_listener.cc
@@ -0,0 +1,129 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/memory/memory_pressure_listener.h"
+
+#include "base/observer_list_threadsafe.h"
+#include "base/trace_event/trace_event.h"
+
+namespace base {
+
+namespace {
+
+// This class is thread safe and internally synchronized.
+class MemoryPressureObserver {
+ public:
+  // There is at most one MemoryPressureObserver and it is never deleted.
+  ~MemoryPressureObserver() = delete;
+
+  void AddObserver(MemoryPressureListener* listener, bool sync) {
+    async_observers_->AddObserver(listener);
+    if (sync) {
+      AutoLock lock(sync_observers_lock_);
+      sync_observers_.AddObserver(listener);
+    }
+  }
+
+  void RemoveObserver(MemoryPressureListener* listener) {
+    async_observers_->RemoveObserver(listener);
+    AutoLock lock(sync_observers_lock_);
+    sync_observers_.RemoveObserver(listener);
+  }
+
+  void Notify(
+      MemoryPressureListener::MemoryPressureLevel memory_pressure_level) {
+    async_observers_->Notify(FROM_HERE, &MemoryPressureListener::Notify,
+                             memory_pressure_level);
+    AutoLock lock(sync_observers_lock_);
+    for (auto& observer : sync_observers_)
+      observer.SyncNotify(memory_pressure_level);
+  }
+
+ private:
+  const scoped_refptr<ObserverListThreadSafe<MemoryPressureListener>>
+      async_observers_ = base::MakeRefCounted<
+          ObserverListThreadSafe<MemoryPressureListener>>();
+  ObserverList<MemoryPressureListener> sync_observers_;
+  Lock sync_observers_lock_;
+};
+
+// Gets the shared MemoryPressureObserver singleton instance.
+MemoryPressureObserver* GetMemoryPressureObserver() {
+  static auto* const observer = new MemoryPressureObserver();
+  return observer;
+}
+
+subtle::Atomic32 g_notifications_suppressed = 0;
+
+}  // namespace
+
+MemoryPressureListener::MemoryPressureListener(
+    const MemoryPressureListener::MemoryPressureCallback& callback)
+    : callback_(callback) {
+  GetMemoryPressureObserver()->AddObserver(this, false);
+}
+
+MemoryPressureListener::MemoryPressureListener(
+    const MemoryPressureListener::MemoryPressureCallback& callback,
+    const MemoryPressureListener::SyncMemoryPressureCallback&
+        sync_memory_pressure_callback)
+    : callback_(callback),
+      sync_memory_pressure_callback_(sync_memory_pressure_callback) {
+  GetMemoryPressureObserver()->AddObserver(this, true);
+}
+
+MemoryPressureListener::~MemoryPressureListener() {
+  GetMemoryPressureObserver()->RemoveObserver(this);
+}
+
+void MemoryPressureListener::Notify(MemoryPressureLevel memory_pressure_level) {
+  callback_.Run(memory_pressure_level);
+}
+
+void MemoryPressureListener::SyncNotify(
+    MemoryPressureLevel memory_pressure_level) {
+  if (!sync_memory_pressure_callback_.is_null()) {
+    sync_memory_pressure_callback_.Run(memory_pressure_level);
+  }
+}
+
+// static
+void MemoryPressureListener::NotifyMemoryPressure(
+    MemoryPressureLevel memory_pressure_level) {
+  DCHECK_NE(memory_pressure_level, MEMORY_PRESSURE_LEVEL_NONE);
+  TRACE_EVENT_INSTANT1(TRACE_DISABLED_BY_DEFAULT("memory-infra"),
+                       "MemoryPressureListener::NotifyMemoryPressure",
+                       TRACE_EVENT_SCOPE_THREAD, "level",
+                       memory_pressure_level);
+  if (AreNotificationsSuppressed())
+    return;
+  DoNotifyMemoryPressure(memory_pressure_level);
+}
+
+// static
+bool MemoryPressureListener::AreNotificationsSuppressed() {
+  return subtle::Acquire_Load(&g_notifications_suppressed) == 1;
+}
+
+// static
+void MemoryPressureListener::SetNotificationsSuppressed(bool suppress) {
+  subtle::Release_Store(&g_notifications_suppressed, suppress ? 1 : 0);
+}
+
+// static
+void MemoryPressureListener::SimulatePressureNotification(
+    MemoryPressureLevel memory_pressure_level) {
+  // Notify all listeners even if regular pressure notifications are suppressed.
+  DoNotifyMemoryPressure(memory_pressure_level);
+}
+
+// static
+void MemoryPressureListener::DoNotifyMemoryPressure(
+    MemoryPressureLevel memory_pressure_level) {
+  DCHECK_NE(memory_pressure_level, MEMORY_PRESSURE_LEVEL_NONE);
+
+  GetMemoryPressureObserver()->Notify(memory_pressure_level);
+}
+
+}  // namespace base
diff --git a/base/memory/memory_pressure_listener.h b/base/memory/memory_pressure_listener.h
new file mode 100644
index 0000000..7e97010
--- /dev/null
+++ b/base/memory/memory_pressure_listener.h
@@ -0,0 +1,102 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// MemoryPressure provides static APIs for handling memory pressure on
+// platforms that have such signals, such as Android and ChromeOS.
+// The app will try to discard buffers that aren't deemed essential (individual
+// modules will implement their own policy).
+
+#ifndef BASE_MEMORY_MEMORY_PRESSURE_LISTENER_H_
+#define BASE_MEMORY_MEMORY_PRESSURE_LISTENER_H_
+
+#include "base/base_export.h"
+#include "base/callback.h"
+#include "base/macros.h"
+
+namespace base {
+
+// To start listening, create a new instance, passing a callback to a
+// function that takes a MemoryPressureLevel parameter. To stop listening,
+// simply delete the listener object. The implementation guarantees
+// that the callback will always be called on the thread that created
+// the listener.
+// Note that even on the same thread, the callback is not guaranteed to be
+// called synchronously within the system memory pressure broadcast.
+// Please see notes in MemoryPressureLevel enum below: some levels are
+// absolutely critical, and if not enough memory is returned to the system,
+// it'll potentially kill the app, and then later the app will have to be
+// cold-started.
+//
+// Example:
+//
+//    void OnMemoryPressure(MemoryPressureLevel memory_pressure_level) {
+//       ...
+//    }
+//
+//    // Start listening.
+//    MemoryPressureListener* my_listener =
+//        new MemoryPressureListener(base::Bind(&OnMemoryPressure));
+//
+//    ...
+//
+//    // Stop listening.
+//    delete my_listener;
+//
+class BASE_EXPORT MemoryPressureListener {
+ public:
+  // A Java counterpart will be generated for this enum.
+  // GENERATED_JAVA_ENUM_PACKAGE: org.chromium.base
+  enum MemoryPressureLevel {
+    // No problems, there is enough memory to use. This event is not sent via
+    // callback, but the enum is used in other places to find out the current
+    // state of the system.
+    MEMORY_PRESSURE_LEVEL_NONE,
+
+    // Modules are advised to free buffers that are cheap to re-allocate and not
+    // immediately needed.
+    MEMORY_PRESSURE_LEVEL_MODERATE,
+
+    // At this level, modules are advised to free all possible memory.  The
+    // alternative is to be killed by the system, which means all memory will
+    // have to be re-created, plus the cost of a cold start.
+    MEMORY_PRESSURE_LEVEL_CRITICAL,
+  };
+
+  typedef Callback<void(MemoryPressureLevel)> MemoryPressureCallback;
+  typedef Callback<void(MemoryPressureLevel)> SyncMemoryPressureCallback;
+
+  explicit MemoryPressureListener(
+      const MemoryPressureCallback& memory_pressure_callback);
+  MemoryPressureListener(
+      const MemoryPressureCallback& memory_pressure_callback,
+      const SyncMemoryPressureCallback& sync_memory_pressure_callback);
+
+  ~MemoryPressureListener();
+
+  // Intended for use by the platform specific implementation.
+  static void NotifyMemoryPressure(MemoryPressureLevel memory_pressure_level);
+
+  // These methods should not be used anywhere else but in memory measurement
+  // code, where they are intended to maintain stable conditions across
+  // measurements.
+  static bool AreNotificationsSuppressed();
+  static void SetNotificationsSuppressed(bool suppressed);
+  static void SimulatePressureNotification(
+      MemoryPressureLevel memory_pressure_level);
+
+  void Notify(MemoryPressureLevel memory_pressure_level);
+  void SyncNotify(MemoryPressureLevel memory_pressure_level);
+
+ private:
+  static void DoNotifyMemoryPressure(MemoryPressureLevel memory_pressure_level);
+
+  MemoryPressureCallback callback_;
+  SyncMemoryPressureCallback sync_memory_pressure_callback_;
+
+  DISALLOW_COPY_AND_ASSIGN(MemoryPressureListener);
+};
+
+}  // namespace base
+
+#endif  // BASE_MEMORY_MEMORY_PRESSURE_LISTENER_H_
diff --git a/base/memory/memory_pressure_listener_unittest.cc b/base/memory/memory_pressure_listener_unittest.cc
new file mode 100644
index 0000000..87d5f4c
--- /dev/null
+++ b/base/memory/memory_pressure_listener_unittest.cc
@@ -0,0 +1,79 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/memory/memory_pressure_listener.h"
+
+#include "base/bind.h"
+#include "base/message_loop/message_loop.h"
+#include "base/run_loop.h"
+#include "testing/gmock/include/gmock/gmock.h"
+
+namespace base {
+
+using MemoryPressureLevel = MemoryPressureListener::MemoryPressureLevel;
+
+class MemoryPressureListenerTest : public testing::Test {
+ public:
+  void SetUp() override {
+    message_loop_.reset(new MessageLoopForUI());
+    listener_.reset(new MemoryPressureListener(
+        Bind(&MemoryPressureListenerTest::OnMemoryPressure, Unretained(this))));
+  }
+
+  void TearDown() override {
+    listener_.reset();
+    message_loop_.reset();
+  }
+
+ protected:
+  void ExpectNotification(
+      void (*notification_function)(MemoryPressureLevel),
+      MemoryPressureLevel level) {
+    EXPECT_CALL(*this, OnMemoryPressure(level)).Times(1);
+    notification_function(level);
+    RunLoop().RunUntilIdle();
+  }
+
+  void ExpectNoNotification(
+      void (*notification_function)(MemoryPressureLevel),
+      MemoryPressureLevel level) {
+    EXPECT_CALL(*this, OnMemoryPressure(testing::_)).Times(0);
+    notification_function(level);
+    RunLoop().RunUntilIdle();
+  }
+
+ private:
+  MOCK_METHOD1(OnMemoryPressure,
+               void(MemoryPressureListener::MemoryPressureLevel));
+
+  std::unique_ptr<MessageLoopForUI> message_loop_;
+  std::unique_ptr<MemoryPressureListener> listener_;
+};
+
+TEST_F(MemoryPressureListenerTest, NotifyMemoryPressure) {
+  // Memory pressure notifications are not suppressed by default.
+  EXPECT_FALSE(MemoryPressureListener::AreNotificationsSuppressed());
+  ExpectNotification(&MemoryPressureListener::NotifyMemoryPressure,
+                     MemoryPressureLevel::MEMORY_PRESSURE_LEVEL_MODERATE);
+  ExpectNotification(&MemoryPressureListener::SimulatePressureNotification,
+                     MemoryPressureLevel::MEMORY_PRESSURE_LEVEL_MODERATE);
+
+  // Enable suppressing memory pressure notifications.
+  MemoryPressureListener::SetNotificationsSuppressed(true);
+  EXPECT_TRUE(MemoryPressureListener::AreNotificationsSuppressed());
+  ExpectNoNotification(&MemoryPressureListener::NotifyMemoryPressure,
+                       MemoryPressureLevel::MEMORY_PRESSURE_LEVEL_MODERATE);
+  ExpectNotification(&MemoryPressureListener::SimulatePressureNotification,
+                     MemoryPressureLevel::MEMORY_PRESSURE_LEVEL_MODERATE);
+
+  // Disable suppressing memory pressure notifications.
+  MemoryPressureListener::SetNotificationsSuppressed(false);
+  EXPECT_FALSE(MemoryPressureListener::AreNotificationsSuppressed());
+  ExpectNotification(&MemoryPressureListener::NotifyMemoryPressure,
+                     MemoryPressureLevel::MEMORY_PRESSURE_LEVEL_CRITICAL);
+  ExpectNotification(&MemoryPressureListener::SimulatePressureNotification,
+                     MemoryPressureLevel::MEMORY_PRESSURE_LEVEL_CRITICAL);
+}
+
+}  // namespace base
diff --git a/base/memory/memory_pressure_monitor.cc b/base/memory/memory_pressure_monitor.cc
new file mode 100644
index 0000000..ed350b8
--- /dev/null
+++ b/base/memory/memory_pressure_monitor.cc
@@ -0,0 +1,71 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/memory/memory_pressure_monitor.h"
+
+#include "base/logging.h"
+#include "base/metrics/histogram_macros.h"
+
+namespace base {
+namespace {
+
+MemoryPressureMonitor* g_monitor = nullptr;
+
+// Enumeration of UMA memory pressure levels. This needs to be kept in sync with
+// histograms.xml and the memory pressure levels defined in
+// MemoryPressureListener.
+enum MemoryPressureLevelUMA {
+  UMA_MEMORY_PRESSURE_LEVEL_NONE = 0,
+  UMA_MEMORY_PRESSURE_LEVEL_MODERATE = 1,
+  UMA_MEMORY_PRESSURE_LEVEL_CRITICAL = 2,
+  // This must be the last value in the enum.
+  UMA_MEMORY_PRESSURE_LEVEL_COUNT,
+};
+
+// Converts a memory pressure level to an UMA enumeration value.
+MemoryPressureLevelUMA MemoryPressureLevelToUmaEnumValue(
+    base::MemoryPressureListener::MemoryPressureLevel level) {
+  switch (level) {
+    case MemoryPressureListener::MEMORY_PRESSURE_LEVEL_NONE:
+      return UMA_MEMORY_PRESSURE_LEVEL_NONE;
+    case MemoryPressureListener::MEMORY_PRESSURE_LEVEL_MODERATE:
+      return UMA_MEMORY_PRESSURE_LEVEL_MODERATE;
+    case MemoryPressureListener::MEMORY_PRESSURE_LEVEL_CRITICAL:
+      return UMA_MEMORY_PRESSURE_LEVEL_CRITICAL;
+  }
+  NOTREACHED();
+  return UMA_MEMORY_PRESSURE_LEVEL_NONE;
+}
+
+}  // namespace
+
+MemoryPressureMonitor::MemoryPressureMonitor() {
+  DCHECK(!g_monitor);
+  g_monitor = this;
+}
+
+MemoryPressureMonitor::~MemoryPressureMonitor() {
+  DCHECK(g_monitor);
+  g_monitor = nullptr;
+}
+
+// static
+MemoryPressureMonitor* MemoryPressureMonitor::Get() {
+  return g_monitor;
+}
+void MemoryPressureMonitor::RecordMemoryPressure(
+    base::MemoryPressureListener::MemoryPressureLevel level,
+    int ticks) {
+  // Use the more primitive STATIC_HISTOGRAM_POINTER_BLOCK macro because the
+  // simple UMA_HISTOGRAM macros don't expose 'AddCount' functionality.
+  STATIC_HISTOGRAM_POINTER_BLOCK(
+      "Memory.PressureLevel",
+      AddCount(MemoryPressureLevelToUmaEnumValue(level), ticks),
+      base::LinearHistogram::FactoryGet(
+          "Memory.PressureLevel", 1, UMA_MEMORY_PRESSURE_LEVEL_COUNT,
+          UMA_MEMORY_PRESSURE_LEVEL_COUNT + 1,
+          base::HistogramBase::kUmaTargetedHistogramFlag));
+}
+
+}  // namespace base
diff --git a/base/memory/memory_pressure_monitor.h b/base/memory/memory_pressure_monitor.h
new file mode 100644
index 0000000..e48244b
--- /dev/null
+++ b/base/memory/memory_pressure_monitor.h
@@ -0,0 +1,53 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_MEMORY_MEMORY_PRESSURE_MONITOR_H_
+#define BASE_MEMORY_MEMORY_PRESSURE_MONITOR_H_
+
+#include "base/base_export.h"
+#include "base/callback.h"
+#include "base/macros.h"
+#include "base/memory/memory_pressure_listener.h"
+
+namespace base {
+
+// TODO(chrisha): Make this a concrete class with per-OS implementations rather
+// than an abstract base class.
+
+// Declares the interface for a MemoryPressureMonitor. There are multiple
+// OS specific implementations of this class. An instance of the memory
+// pressure observer is created at the process level, tracks memory usage, and
+// pushes memory state change notifications to the static function
+// base::MemoryPressureListener::NotifyMemoryPressure. This is turn notifies
+// all MemoryPressureListener instances via a callback.
+class BASE_EXPORT MemoryPressureMonitor {
+ public:
+  using MemoryPressureLevel = base::MemoryPressureListener::MemoryPressureLevel;
+  using DispatchCallback = base::Callback<void(MemoryPressureLevel level)>;
+
+  virtual ~MemoryPressureMonitor();
+
+  // Return the singleton MemoryPressureMonitor.
+  static MemoryPressureMonitor* Get();
+
+  // Record memory pressure UMA statistic. A tick is 5 seconds.
+  static void RecordMemoryPressure(MemoryPressureLevel level, int ticks);
+
+  // Returns the currently observed memory pressure.
+  virtual MemoryPressureLevel GetCurrentPressureLevel() = 0;
+
+  // Sets a notification callback. The default callback invokes
+  // base::MemoryPressureListener::NotifyMemoryPressure.
+  virtual void SetDispatchCallback(const DispatchCallback& callback) = 0;
+
+ protected:
+  MemoryPressureMonitor();
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(MemoryPressureMonitor);
+};
+
+}  // namespace base
+
+#endif  // BASE_MEMORY_MEMORY_PRESSURE_MONITOR_H_
diff --git a/base/memory/memory_pressure_monitor_chromeos.cc b/base/memory/memory_pressure_monitor_chromeos.cc
new file mode 100644
index 0000000..b4e4b94
--- /dev/null
+++ b/base/memory/memory_pressure_monitor_chromeos.cc
@@ -0,0 +1,288 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/memory/memory_pressure_monitor_chromeos.h"
+
+#include <fcntl.h>
+#include <sys/select.h>
+
+#include "base/metrics/histogram_macros.h"
+#include "base/posix/eintr_wrapper.h"
+#include "base/process/process_metrics.h"
+#include "base/single_thread_task_runner.h"
+#include "base/sys_info.h"
+#include "base/threading/thread_task_runner_handle.h"
+#include "base/time/time.h"
+
+namespace base {
+namespace chromeos {
+
+namespace {
+
+// The time between memory pressure checks. While under critical pressure, this
+// is also the timer to repeat cleanup attempts.
+const int kMemoryPressureIntervalMs = 1000;
+
+// The time which should pass between two moderate memory pressure calls.
+const int kModerateMemoryPressureCooldownMs = 10000;
+
+// Number of event polls before the next moderate pressure event can be sent.
+const int kModerateMemoryPressureCooldown =
+    kModerateMemoryPressureCooldownMs / kMemoryPressureIntervalMs;
+
+// Threshold constants to emit pressure events.
+const int kNormalMemoryPressureModerateThresholdPercent = 60;
+const int kNormalMemoryPressureCriticalThresholdPercent = 95;
+const int kAggressiveMemoryPressureModerateThresholdPercent = 35;
+const int kAggressiveMemoryPressureCriticalThresholdPercent = 70;
+
+// The possible state for memory pressure level. The values should be in line
+// with values in MemoryPressureListener::MemoryPressureLevel and should be
+// updated if more memory pressure levels are introduced.
+enum MemoryPressureLevelUMA {
+  MEMORY_PRESSURE_LEVEL_NONE = 0,
+  MEMORY_PRESSURE_LEVEL_MODERATE,
+  MEMORY_PRESSURE_LEVEL_CRITICAL,
+  NUM_MEMORY_PRESSURE_LEVELS
+};
+
+// This is the file that will exist if low memory notification is available
+// on the device.  Whenever it becomes readable, it signals a low memory
+// condition.
+const char kLowMemFile[] = "/dev/chromeos-low-mem";
+
+// Converts a |MemoryPressureThreshold| value into a used memory percentage for
+// the moderate pressure event.
+int GetModerateMemoryThresholdInPercent(
+    MemoryPressureMonitor::MemoryPressureThresholds thresholds) {
+  return thresholds == MemoryPressureMonitor::
+                           THRESHOLD_AGGRESSIVE_CACHE_DISCARD ||
+         thresholds == MemoryPressureMonitor::THRESHOLD_AGGRESSIVE
+             ? kAggressiveMemoryPressureModerateThresholdPercent
+             : kNormalMemoryPressureModerateThresholdPercent;
+}
+
+// Converts a |MemoryPressureThreshold| value into a used memory percentage for
+// the critical pressure event.
+int GetCriticalMemoryThresholdInPercent(
+    MemoryPressureMonitor::MemoryPressureThresholds thresholds) {
+  return thresholds == MemoryPressureMonitor::
+                           THRESHOLD_AGGRESSIVE_TAB_DISCARD ||
+         thresholds == MemoryPressureMonitor::THRESHOLD_AGGRESSIVE
+             ? kAggressiveMemoryPressureCriticalThresholdPercent
+             : kNormalMemoryPressureCriticalThresholdPercent;
+}
+
+// Converts free percent of memory into a memory pressure value.
+MemoryPressureListener::MemoryPressureLevel GetMemoryPressureLevelFromFillLevel(
+    int actual_fill_level,
+    int moderate_threshold,
+    int critical_threshold) {
+  if (actual_fill_level < moderate_threshold)
+    return MemoryPressureListener::MEMORY_PRESSURE_LEVEL_NONE;
+  return actual_fill_level < critical_threshold
+             ? MemoryPressureListener::MEMORY_PRESSURE_LEVEL_MODERATE
+             : MemoryPressureListener::MEMORY_PRESSURE_LEVEL_CRITICAL;
+}
+
+// This function will be called less than once a second. It will check if
+// the kernel has detected a low memory situation.
+bool IsLowMemoryCondition(int file_descriptor) {
+  fd_set fds;
+  struct timeval tv;
+
+  FD_ZERO(&fds);
+  FD_SET(file_descriptor, &fds);
+
+  tv.tv_sec = 0;
+  tv.tv_usec = 0;
+
+  return HANDLE_EINTR(select(file_descriptor + 1, &fds, NULL, NULL, &tv)) > 0;
+}
+
+}  // namespace
+
+MemoryPressureMonitor::MemoryPressureMonitor(
+    MemoryPressureThresholds thresholds)
+    : current_memory_pressure_level_(
+          MemoryPressureListener::MEMORY_PRESSURE_LEVEL_NONE),
+      moderate_pressure_repeat_count_(0),
+      seconds_since_reporting_(0),
+      moderate_pressure_threshold_percent_(
+          GetModerateMemoryThresholdInPercent(thresholds)),
+      critical_pressure_threshold_percent_(
+          GetCriticalMemoryThresholdInPercent(thresholds)),
+      low_mem_file_(HANDLE_EINTR(::open(kLowMemFile, O_RDONLY))),
+      dispatch_callback_(
+          base::Bind(&MemoryPressureListener::NotifyMemoryPressure)),
+      weak_ptr_factory_(this) {
+  StartObserving();
+  LOG_IF(ERROR,
+         base::SysInfo::IsRunningOnChromeOS() && !low_mem_file_.is_valid())
+      << "Cannot open kernel listener";
+}
+
+MemoryPressureMonitor::~MemoryPressureMonitor() {
+  StopObserving();
+}
+
+void MemoryPressureMonitor::ScheduleEarlyCheck() {
+  ThreadTaskRunnerHandle::Get()->PostTask(
+      FROM_HERE, BindOnce(&MemoryPressureMonitor::CheckMemoryPressure,
+                          weak_ptr_factory_.GetWeakPtr()));
+}
+
+MemoryPressureListener::MemoryPressureLevel
+MemoryPressureMonitor::GetCurrentPressureLevel() {
+  return current_memory_pressure_level_;
+}
+
+// static
+MemoryPressureMonitor* MemoryPressureMonitor::Get() {
+  return static_cast<MemoryPressureMonitor*>(
+      base::MemoryPressureMonitor::Get());
+}
+
+void MemoryPressureMonitor::StartObserving() {
+  timer_.Start(FROM_HERE,
+               TimeDelta::FromMilliseconds(kMemoryPressureIntervalMs),
+               Bind(&MemoryPressureMonitor::
+                        CheckMemoryPressureAndRecordStatistics,
+                    weak_ptr_factory_.GetWeakPtr()));
+}
+
+void MemoryPressureMonitor::StopObserving() {
+  // If StartObserving failed, StopObserving will still get called.
+  timer_.Stop();
+}
+
+void MemoryPressureMonitor::CheckMemoryPressureAndRecordStatistics() {
+  CheckMemoryPressure();
+  if (seconds_since_reporting_++ == 5) {
+    seconds_since_reporting_ = 0;
+    RecordMemoryPressure(current_memory_pressure_level_, 1);
+  }
+  // Record UMA histogram statistics for the current memory pressure level.
+  // TODO(lgrey): Remove this once there's a usable history for the
+  // "Memory.PressureLevel" statistic
+  MemoryPressureLevelUMA memory_pressure_level_uma(MEMORY_PRESSURE_LEVEL_NONE);
+  switch (current_memory_pressure_level_) {
+    case MemoryPressureListener::MEMORY_PRESSURE_LEVEL_NONE:
+      memory_pressure_level_uma = MEMORY_PRESSURE_LEVEL_NONE;
+      break;
+    case MemoryPressureListener::MEMORY_PRESSURE_LEVEL_MODERATE:
+      memory_pressure_level_uma = MEMORY_PRESSURE_LEVEL_MODERATE;
+      break;
+    case MemoryPressureListener::MEMORY_PRESSURE_LEVEL_CRITICAL:
+      memory_pressure_level_uma = MEMORY_PRESSURE_LEVEL_CRITICAL;
+      break;
+  }
+
+  UMA_HISTOGRAM_ENUMERATION("ChromeOS.MemoryPressureLevel",
+                            memory_pressure_level_uma,
+                            NUM_MEMORY_PRESSURE_LEVELS);
+}
+
+void MemoryPressureMonitor::CheckMemoryPressure() {
+  MemoryPressureListener::MemoryPressureLevel old_pressure =
+      current_memory_pressure_level_;
+
+  // If we have the kernel low memory observer, we use it's flag instead of our
+  // own computation (for now). Note that in "simulation mode" it can be null.
+  // TODO(skuhne): We need to add code which makes sure that the kernel and this
+  // computation come to similar results and then remove this override again.
+  // TODO(skuhne): Add some testing framework here to see how close the kernel
+  // and the internal functions are.
+  if (low_mem_file_.is_valid() && IsLowMemoryCondition(low_mem_file_.get())) {
+    current_memory_pressure_level_ =
+        MemoryPressureListener::MEMORY_PRESSURE_LEVEL_CRITICAL;
+  } else {
+    current_memory_pressure_level_ = GetMemoryPressureLevelFromFillLevel(
+        GetUsedMemoryInPercent(),
+        moderate_pressure_threshold_percent_,
+        critical_pressure_threshold_percent_);
+
+    // When listening to the kernel, we ignore the reported memory pressure
+    // level from our own computation and reduce critical to moderate.
+    if (low_mem_file_.is_valid() &&
+        current_memory_pressure_level_ ==
+        MemoryPressureListener::MEMORY_PRESSURE_LEVEL_CRITICAL) {
+      current_memory_pressure_level_ =
+          MemoryPressureListener::MEMORY_PRESSURE_LEVEL_MODERATE;
+    }
+  }
+
+  // In case there is no memory pressure we do not notify.
+  if (current_memory_pressure_level_ ==
+      MemoryPressureListener::MEMORY_PRESSURE_LEVEL_NONE) {
+    return;
+  }
+  if (old_pressure == current_memory_pressure_level_) {
+    // If the memory pressure is still at the same level, we notify again for a
+    // critical level. In case of a moderate level repeat however, we only send
+    // a notification after a certain time has passed.
+    if (current_memory_pressure_level_ ==
+        MemoryPressureListener::MEMORY_PRESSURE_LEVEL_MODERATE &&
+          ++moderate_pressure_repeat_count_ <
+              kModerateMemoryPressureCooldown) {
+      return;
+    }
+  } else if (current_memory_pressure_level_ ==
+               MemoryPressureListener::MEMORY_PRESSURE_LEVEL_MODERATE &&
+             old_pressure ==
+               MemoryPressureListener::MEMORY_PRESSURE_LEVEL_CRITICAL) {
+    // When we reducing the pressure level from critical to moderate, we
+    // restart the timeout and do not send another notification.
+    moderate_pressure_repeat_count_ = 0;
+    return;
+  }
+  moderate_pressure_repeat_count_ = 0;
+  dispatch_callback_.Run(current_memory_pressure_level_);
+}
+
+// Gets the used ChromeOS memory in percent.
+int MemoryPressureMonitor::GetUsedMemoryInPercent() {
+  base::SystemMemoryInfoKB info;
+  if (!base::GetSystemMemoryInfo(&info)) {
+    VLOG(1) << "Cannot determine the free memory of the system.";
+    return 0;
+  }
+  // TODO(skuhne): Instead of adding the kernel memory pressure calculation
+  // logic here, we should have a kernel mechanism similar to the low memory
+  // notifier in ChromeOS which offers multiple pressure states.
+  // To track this, we have crbug.com/381196.
+
+  // The available memory consists of "real" and virtual (z)ram memory.
+  // Since swappable memory uses a non pre-deterministic compression and
+  // the compression creates its own "dynamic" in the system, it gets
+  // de-emphasized by the |kSwapWeight| factor.
+  const int kSwapWeight = 4;
+
+  // The total memory we have is the "real memory" plus the virtual (z)ram.
+  int total_memory = info.total + info.swap_total / kSwapWeight;
+
+  // The kernel internally uses 50MB.
+  const int kMinFileMemory = 50 * 1024;
+
+  // Most file memory can be easily reclaimed.
+  int file_memory = info.active_file + info.inactive_file;
+  // unless it is dirty or it's a minimal portion which is required.
+  file_memory -= info.dirty + kMinFileMemory;
+
+  // Available memory is the sum of free, swap and easy reclaimable memory.
+  int available_memory =
+      info.free + info.swap_free / kSwapWeight + file_memory;
+
+  DCHECK(available_memory < total_memory);
+  int percentage = ((total_memory - available_memory) * 100) / total_memory;
+  return percentage;
+}
+
+void MemoryPressureMonitor::SetDispatchCallback(
+    const DispatchCallback& callback) {
+  dispatch_callback_ = callback;
+}
+
+}  // namespace chromeos
+}  // namespace base
diff --git a/base/memory/memory_pressure_monitor_chromeos.h b/base/memory/memory_pressure_monitor_chromeos.h
new file mode 100644
index 0000000..563ba85
--- /dev/null
+++ b/base/memory/memory_pressure_monitor_chromeos.h
@@ -0,0 +1,128 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_MEMORY_MEMORY_PRESSURE_MONITOR_CHROMEOS_H_
+#define BASE_MEMORY_MEMORY_PRESSURE_MONITOR_CHROMEOS_H_
+
+#include "base/base_export.h"
+#include "base/files/scoped_file.h"
+#include "base/macros.h"
+#include "base/memory/memory_pressure_listener.h"
+#include "base/memory/memory_pressure_monitor.h"
+#include "base/memory/weak_ptr.h"
+#include "base/timer/timer.h"
+
+namespace base {
+namespace chromeos {
+
+class TestMemoryPressureMonitor;
+
+////////////////////////////////////////////////////////////////////////////////
+// MemoryPressureMonitor
+//
+// A class to handle the observation of our free memory. It notifies the
+// MemoryPressureListener of memory fill level changes, so that it can take
+// action to reduce memory resources accordingly.
+//
+class BASE_EXPORT MemoryPressureMonitor : public base::MemoryPressureMonitor {
+ public:
+  using GetUsedMemoryInPercentCallback = int (*)();
+
+  // There are two memory pressure events:
+  // MODERATE - which will mainly release caches.
+  // CRITICAL - which will discard tabs.
+  // The |MemoryPressureThresholds| enum selects the strategy of firing these
+  // events: A conservative strategy will keep as much content in memory as
+  // possible (causing the system to swap to zram) and an aggressive strategy
+  // will release memory earlier to avoid swapping.
+  enum MemoryPressureThresholds {
+    // Use the system default.
+    THRESHOLD_DEFAULT = 0,
+    // Try to keep as much content in memory as possible.
+    THRESHOLD_CONSERVATIVE = 1,
+    // Discard caches earlier, allowing to keep more tabs in memory.
+    THRESHOLD_AGGRESSIVE_CACHE_DISCARD = 2,
+    // Discard tabs earlier, allowing the system to get faster.
+    THRESHOLD_AGGRESSIVE_TAB_DISCARD = 3,
+    // Discard caches and tabs earlier to allow the system to be faster.
+    THRESHOLD_AGGRESSIVE = 4
+  };
+
+  explicit MemoryPressureMonitor(MemoryPressureThresholds thresholds);
+  ~MemoryPressureMonitor() override;
+
+  // Redo the memory pressure calculation soon and call again if a critical
+  // memory pressure prevails. Note that this call will trigger an asynchronous
+  // action which gives the system time to release memory back into the pool.
+  void ScheduleEarlyCheck();
+
+  // Get the current memory pressure level.
+  MemoryPressureListener::MemoryPressureLevel GetCurrentPressureLevel()
+      override;
+  void SetDispatchCallback(const DispatchCallback& callback) override;
+
+  // Returns a type-casted version of the current memory pressure monitor. A
+  // simple wrapper to base::MemoryPressureMonitor::Get.
+  static MemoryPressureMonitor* Get();
+
+ private:
+  friend TestMemoryPressureMonitor;
+  // Starts observing the memory fill level.
+  // Calls to StartObserving should always be matched with calls to
+  // StopObserving.
+  void StartObserving();
+
+  // Stop observing the memory fill level.
+  // May be safely called if StartObserving has not been called.
+  void StopObserving();
+
+  // The function which gets periodically called to check any changes in the
+  // memory pressure. It will report pressure changes as well as continuous
+  // critical pressure levels.
+  void CheckMemoryPressure();
+
+  // The function periodically checks the memory pressure changes and records
+  // the UMA histogram statistics for the current memory pressure level.
+  void CheckMemoryPressureAndRecordStatistics();
+
+  // Get the memory pressure in percent (virtual for testing).
+  virtual int GetUsedMemoryInPercent();
+
+  // The current memory pressure.
+  base::MemoryPressureListener::MemoryPressureLevel
+      current_memory_pressure_level_;
+
+  // A periodic timer to check for resource pressure changes. This will get
+  // replaced by a kernel triggered event system (see crbug.com/381196).
+  base::RepeatingTimer timer_;
+
+  // To slow down the amount of moderate pressure event calls, this counter
+  // gets used to count the number of events since the last event occured.
+  int moderate_pressure_repeat_count_;
+
+  // The "Memory.PressureLevel" statistic is recorded every
+  // 5 seconds, but the timer to report "ChromeOS.MemoryPressureLevel"
+  // fires every second. This counter is used to allow reporting
+  // "Memory.PressureLevel" correctly without adding another
+  // timer.
+  int seconds_since_reporting_;
+
+  // The thresholds for moderate and critical pressure.
+  const int moderate_pressure_threshold_percent_;
+  const int critical_pressure_threshold_percent_;
+
+  // File descriptor used to detect low memory condition.
+  ScopedFD low_mem_file_;
+
+  DispatchCallback dispatch_callback_;
+
+  base::WeakPtrFactory<MemoryPressureMonitor> weak_ptr_factory_;
+
+  DISALLOW_COPY_AND_ASSIGN(MemoryPressureMonitor);
+};
+
+}  // namespace chromeos
+}  // namespace base
+
+#endif  // BASE_MEMORY_MEMORY_PRESSURE_MONITOR_CHROMEOS_H_
diff --git a/base/memory/memory_pressure_monitor_chromeos_unittest.cc b/base/memory/memory_pressure_monitor_chromeos_unittest.cc
new file mode 100644
index 0000000..ee00091
--- /dev/null
+++ b/base/memory/memory_pressure_monitor_chromeos_unittest.cc
@@ -0,0 +1,172 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/memory/memory_pressure_monitor_chromeos.h"
+
+#include "base/macros.h"
+#include "base/memory/memory_pressure_listener.h"
+#include "base/message_loop/message_loop.h"
+#include "base/run_loop.h"
+#include "base/sys_info.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+namespace chromeos {
+
+namespace {
+
+// True if the memory notifier got called.
+// Do not read/modify value directly.
+bool on_memory_pressure_called = false;
+
+// If the memory notifier got called, this is the memory pressure reported.
+MemoryPressureListener::MemoryPressureLevel on_memory_pressure_level =
+    MemoryPressureListener::MEMORY_PRESSURE_LEVEL_NONE;
+
+// Processes OnMemoryPressure calls.
+void OnMemoryPressure(MemoryPressureListener::MemoryPressureLevel level) {
+  on_memory_pressure_called = true;
+  on_memory_pressure_level = level;
+}
+
+// Resets the indicator for memory pressure.
+void ResetOnMemoryPressureCalled() {
+  on_memory_pressure_called = false;
+}
+
+// Returns true when OnMemoryPressure was called (and resets it).
+bool WasOnMemoryPressureCalled() {
+  bool b = on_memory_pressure_called;
+  ResetOnMemoryPressureCalled();
+  return b;
+}
+
+}  // namespace
+
+class TestMemoryPressureMonitor : public MemoryPressureMonitor {
+ public:
+  TestMemoryPressureMonitor()
+      : MemoryPressureMonitor(THRESHOLD_DEFAULT),
+        memory_in_percent_override_(0) {
+    // Disable any timers which are going on and set a special memory reporting
+    // function.
+    StopObserving();
+  }
+  ~TestMemoryPressureMonitor() override = default;
+
+  void SetMemoryInPercentOverride(int percent) {
+    memory_in_percent_override_ = percent;
+  }
+
+  void CheckMemoryPressureForTest() {
+    CheckMemoryPressure();
+  }
+
+ private:
+  int GetUsedMemoryInPercent() override {
+    return memory_in_percent_override_;
+  }
+
+  int memory_in_percent_override_;
+  DISALLOW_COPY_AND_ASSIGN(TestMemoryPressureMonitor);
+};
+
+// This test tests the various transition states from memory pressure, looking
+// for the correct behavior on event reposting as well as state updates.
+TEST(ChromeOSMemoryPressureMonitorTest, CheckMemoryPressure) {
+  // crbug.com/844102:
+  if (base::SysInfo::IsRunningOnChromeOS())
+    return;
+
+  base::MessageLoopForUI message_loop;
+  std::unique_ptr<TestMemoryPressureMonitor> monitor(
+      new TestMemoryPressureMonitor);
+  std::unique_ptr<MemoryPressureListener> listener(
+      new MemoryPressureListener(base::Bind(&OnMemoryPressure)));
+  // Checking the memory pressure while 0% are used should not produce any
+  // events.
+  monitor->SetMemoryInPercentOverride(0);
+  ResetOnMemoryPressureCalled();
+
+  monitor->CheckMemoryPressureForTest();
+  RunLoop().RunUntilIdle();
+  EXPECT_FALSE(WasOnMemoryPressureCalled());
+  EXPECT_EQ(MemoryPressureListener::MEMORY_PRESSURE_LEVEL_NONE,
+            monitor->GetCurrentPressureLevel());
+
+  // Setting the memory level to 80% should produce a moderate pressure level.
+  monitor->SetMemoryInPercentOverride(80);
+  monitor->CheckMemoryPressureForTest();
+  RunLoop().RunUntilIdle();
+  EXPECT_TRUE(WasOnMemoryPressureCalled());
+  EXPECT_EQ(MemoryPressureListener::MEMORY_PRESSURE_LEVEL_MODERATE,
+            monitor->GetCurrentPressureLevel());
+  EXPECT_EQ(MemoryPressureListener::MEMORY_PRESSURE_LEVEL_MODERATE,
+            on_memory_pressure_level);
+
+  // We need to check that the event gets reposted after a while.
+  int i = 0;
+  for (; i < 100; i++) {
+    monitor->CheckMemoryPressureForTest();
+    RunLoop().RunUntilIdle();
+    EXPECT_EQ(MemoryPressureListener::MEMORY_PRESSURE_LEVEL_MODERATE,
+              monitor->GetCurrentPressureLevel());
+    if (WasOnMemoryPressureCalled()) {
+      EXPECT_EQ(MemoryPressureListener::MEMORY_PRESSURE_LEVEL_MODERATE,
+                on_memory_pressure_level);
+      break;
+    }
+  }
+  // Should be more than 5 and less than 100.
+  EXPECT_LE(5, i);
+  EXPECT_GE(99, i);
+
+  // Setting the memory usage to 99% should produce critical levels.
+  monitor->SetMemoryInPercentOverride(99);
+  monitor->CheckMemoryPressureForTest();
+  RunLoop().RunUntilIdle();
+  EXPECT_TRUE(WasOnMemoryPressureCalled());
+  EXPECT_EQ(MemoryPressureListener::MEMORY_PRESSURE_LEVEL_CRITICAL,
+            on_memory_pressure_level);
+  EXPECT_EQ(MemoryPressureListener::MEMORY_PRESSURE_LEVEL_CRITICAL,
+            monitor->GetCurrentPressureLevel());
+
+  // Calling it again should immediately produce a second call.
+  monitor->CheckMemoryPressureForTest();
+  RunLoop().RunUntilIdle();
+  EXPECT_TRUE(WasOnMemoryPressureCalled());
+  EXPECT_EQ(MemoryPressureListener::MEMORY_PRESSURE_LEVEL_CRITICAL,
+            on_memory_pressure_level);
+  EXPECT_EQ(MemoryPressureListener::MEMORY_PRESSURE_LEVEL_CRITICAL,
+            monitor->GetCurrentPressureLevel());
+
+  // When lowering the pressure again we should not get an event, but the
+  // pressure should go back to moderate.
+  monitor->SetMemoryInPercentOverride(80);
+  monitor->CheckMemoryPressureForTest();
+  RunLoop().RunUntilIdle();
+  EXPECT_FALSE(WasOnMemoryPressureCalled());
+  EXPECT_EQ(MemoryPressureListener::MEMORY_PRESSURE_LEVEL_MODERATE,
+            monitor->GetCurrentPressureLevel());
+
+  // We should need exactly the same amount of calls as before, before the next
+  // call comes in.
+  int j = 0;
+  for (; j < 100; j++) {
+    monitor->CheckMemoryPressureForTest();
+    RunLoop().RunUntilIdle();
+    EXPECT_EQ(MemoryPressureListener::MEMORY_PRESSURE_LEVEL_MODERATE,
+              monitor->GetCurrentPressureLevel());
+    if (WasOnMemoryPressureCalled()) {
+      EXPECT_EQ(MemoryPressureListener::MEMORY_PRESSURE_LEVEL_MODERATE,
+                on_memory_pressure_level);
+      break;
+    }
+  }
+  // We should have needed exactly the same amount of checks as before.
+  EXPECT_EQ(j, i);
+}
+
+}  // namespace chromeos
+}  // namespace base
diff --git a/base/memory/memory_pressure_monitor_mac.cc b/base/memory/memory_pressure_monitor_mac.cc
new file mode 100644
index 0000000..678c276
--- /dev/null
+++ b/base/memory/memory_pressure_monitor_mac.cc
@@ -0,0 +1,190 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/memory/memory_pressure_monitor_mac.h"
+
+#include <CoreFoundation/CoreFoundation.h>
+
+#include <dlfcn.h>
+#include <stddef.h>
+#include <sys/sysctl.h>
+
+#include <cmath>
+
+#include "base/bind.h"
+#include "base/logging.h"
+#include "base/mac/mac_util.h"
+
+// Redeclare for partial 10.9 availability.
+DISPATCH_EXPORT const struct dispatch_source_type_s
+    _dispatch_source_type_memorypressure;
+
+namespace {
+static const int kUMATickSize = 5;
+}  // namespace
+
+namespace base {
+namespace mac {
+
+MemoryPressureListener::MemoryPressureLevel
+MemoryPressureMonitor::MemoryPressureLevelForMacMemoryPressureLevel(
+    int mac_memory_pressure_level) {
+  switch (mac_memory_pressure_level) {
+    case DISPATCH_MEMORYPRESSURE_NORMAL:
+      return MemoryPressureListener::MEMORY_PRESSURE_LEVEL_NONE;
+    case DISPATCH_MEMORYPRESSURE_WARN:
+      return MemoryPressureListener::MEMORY_PRESSURE_LEVEL_MODERATE;
+    case DISPATCH_MEMORYPRESSURE_CRITICAL:
+      return MemoryPressureListener::MEMORY_PRESSURE_LEVEL_CRITICAL;
+  }
+  return MemoryPressureListener::MEMORY_PRESSURE_LEVEL_NONE;
+}
+
+void MemoryPressureMonitor::OnRunLoopExit(CFRunLoopObserverRef observer,
+                                          CFRunLoopActivity activity,
+                                          void* info) {
+  MemoryPressureMonitor* self = static_cast<MemoryPressureMonitor*>(info);
+  self->UpdatePressureLevelOnRunLoopExit();
+}
+
+MemoryPressureMonitor::MemoryPressureMonitor()
+    : memory_level_event_source_(dispatch_source_create(
+          DISPATCH_SOURCE_TYPE_MEMORYPRESSURE,
+          0,
+          DISPATCH_MEMORYPRESSURE_WARN | DISPATCH_MEMORYPRESSURE_CRITICAL |
+              DISPATCH_MEMORYPRESSURE_NORMAL,
+          dispatch_get_global_queue(DISPATCH_QUEUE_PRIORITY_DEFAULT, 0))),
+      dispatch_callback_(
+          base::Bind(&MemoryPressureListener::NotifyMemoryPressure)),
+      last_statistic_report_time_(CFAbsoluteTimeGetCurrent()),
+      last_pressure_level_(MemoryPressureListener::MEMORY_PRESSURE_LEVEL_NONE),
+      subtick_seconds_(0) {
+  // Attach an event handler to the memory pressure event source.
+  if (memory_level_event_source_.get()) {
+    dispatch_source_set_event_handler(memory_level_event_source_, ^{
+      OnMemoryPressureChanged(memory_level_event_source_.get(),
+                              dispatch_callback_);
+    });
+
+    // Start monitoring the event source.
+    dispatch_resume(memory_level_event_source_);
+  }
+
+  // Create a CFRunLoopObserver to check the memory pressure at the end of
+  // every pass through the event loop (modulo kUMATickSize).
+  CFRunLoopObserverContext observer_context = {0, this, NULL, NULL, NULL};
+
+  exit_observer_.reset(
+      CFRunLoopObserverCreate(kCFAllocatorDefault, kCFRunLoopExit, true, 0,
+                              OnRunLoopExit, &observer_context));
+
+  CFRunLoopRef run_loop = CFRunLoopGetCurrent();
+  CFRunLoopAddObserver(run_loop, exit_observer_, kCFRunLoopCommonModes);
+  CFRunLoopAddObserver(run_loop, exit_observer_,
+                       kMessageLoopExclusiveRunLoopMode);
+}
+
+MemoryPressureMonitor::~MemoryPressureMonitor() {
+  // Detach from the run loop.
+  CFRunLoopRef run_loop = CFRunLoopGetCurrent();
+  CFRunLoopRemoveObserver(run_loop, exit_observer_, kCFRunLoopCommonModes);
+  CFRunLoopRemoveObserver(run_loop, exit_observer_,
+                          kMessageLoopExclusiveRunLoopMode);
+
+  // Remove the memory pressure event source.
+  if (memory_level_event_source_.get()) {
+    dispatch_source_cancel(memory_level_event_source_);
+  }
+}
+
+int MemoryPressureMonitor::GetMacMemoryPressureLevel() {
+  // Get the raw memory pressure level from macOS.
+  int mac_memory_pressure_level;
+  size_t length = sizeof(int);
+  sysctlbyname("kern.memorystatus_vm_pressure_level",
+               &mac_memory_pressure_level, &length, nullptr, 0);
+
+  return mac_memory_pressure_level;
+}
+
+void MemoryPressureMonitor::UpdatePressureLevel() {
+  // Get the current macOS pressure level and convert to the corresponding
+  // Chrome pressure level.
+  int mac_memory_pressure_level = GetMacMemoryPressureLevel();
+  MemoryPressureListener::MemoryPressureLevel new_pressure_level =
+      MemoryPressureLevelForMacMemoryPressureLevel(mac_memory_pressure_level);
+
+  // Compute the number of "ticks" spent at |last_pressure_level_| (since the
+  // last report sent to UMA).
+  CFTimeInterval now = CFAbsoluteTimeGetCurrent();
+  CFTimeInterval time_since_last_report = now - last_statistic_report_time_;
+  last_statistic_report_time_ = now;
+
+  double accumulated_time = time_since_last_report + subtick_seconds_;
+  int ticks_to_report = static_cast<int>(accumulated_time / kUMATickSize);
+  // Save for later the seconds that didn't make it into a full tick.
+  subtick_seconds_ = std::fmod(accumulated_time, kUMATickSize);
+
+  // Round the tick count up on a pressure level change to ensure we capture it.
+  bool pressure_level_changed = (new_pressure_level != last_pressure_level_);
+  if (pressure_level_changed && ticks_to_report < 1) {
+    ticks_to_report = 1;
+    subtick_seconds_ = 0;
+  }
+
+  // Send elapsed ticks to UMA.
+  if (ticks_to_report >= 1) {
+    RecordMemoryPressure(last_pressure_level_, ticks_to_report);
+  }
+
+  // Save the now-current memory pressure level.
+  last_pressure_level_ = new_pressure_level;
+}
+
+void MemoryPressureMonitor::UpdatePressureLevelOnRunLoopExit() {
+  // Wait until it's time to check the pressure level.
+  CFTimeInterval now = CFAbsoluteTimeGetCurrent();
+  if (now >= next_run_loop_update_time_) {
+    UpdatePressureLevel();
+
+    // Update again in kUMATickSize seconds. We can update at any frequency,
+    // but because we're only checking memory pressure levels for UMA there's
+    // no need to update more frequently than we're keeping statistics on.
+    next_run_loop_update_time_ = now + kUMATickSize - subtick_seconds_;
+  }
+}
+
+// Static.
+int MemoryPressureMonitor::GetSecondsPerUMATick() {
+  return kUMATickSize;
+}
+
+MemoryPressureListener::MemoryPressureLevel
+MemoryPressureMonitor::GetCurrentPressureLevel() {
+  return last_pressure_level_;
+}
+
+void MemoryPressureMonitor::OnMemoryPressureChanged(
+    dispatch_source_s* event_source,
+    const MemoryPressureMonitor::DispatchCallback& dispatch_callback) {
+  // The OS has sent a notification that the memory pressure level has changed.
+  // Go through the normal memory pressure level checking mechanism so that
+  // last_pressure_level_ and UMA get updated to the current value.
+  UpdatePressureLevel();
+
+  // Run the callback that's waiting on memory pressure change notifications.
+  // The convention is to not send notifiations on memory pressure returning to
+  // normal.
+  if (last_pressure_level_ !=
+      MemoryPressureListener::MEMORY_PRESSURE_LEVEL_NONE)
+    dispatch_callback.Run(last_pressure_level_);
+}
+
+void MemoryPressureMonitor::SetDispatchCallback(
+    const DispatchCallback& callback) {
+  dispatch_callback_ = callback;
+}
+
+}  // namespace mac
+}  // namespace base
diff --git a/base/memory/memory_pressure_monitor_mac.h b/base/memory/memory_pressure_monitor_mac.h
new file mode 100644
index 0000000..b85b6c9
--- /dev/null
+++ b/base/memory/memory_pressure_monitor_mac.h
@@ -0,0 +1,91 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_MEMORY_MEMORY_PRESSURE_MONITOR_MAC_H_
+#define BASE_MEMORY_MEMORY_PRESSURE_MONITOR_MAC_H_
+
+#include <CoreFoundation/CFDate.h>
+#include <dispatch/dispatch.h>
+
+#include "base/base_export.h"
+#include "base/mac/scoped_cftyperef.h"
+#include "base/mac/scoped_dispatch_object.h"
+#include "base/macros.h"
+#include "base/memory/memory_pressure_listener.h"
+#include "base/memory/memory_pressure_monitor.h"
+#include "base/message_loop/message_pump_mac.h"
+
+namespace base {
+namespace mac {
+
+class TestMemoryPressureMonitor;
+
+// Declares the interface for the Mac MemoryPressureMonitor, which reports
+// memory pressure events and status.
+class BASE_EXPORT MemoryPressureMonitor : public base::MemoryPressureMonitor {
+ public:
+  MemoryPressureMonitor();
+  ~MemoryPressureMonitor() override;
+
+  // Returns the currently-observed memory pressure.
+  MemoryPressureLevel GetCurrentPressureLevel() override;
+
+  void SetDispatchCallback(const DispatchCallback& callback) override;
+
+ private:
+  friend TestMemoryPressureMonitor;
+
+  static MemoryPressureLevel MemoryPressureLevelForMacMemoryPressureLevel(
+      int mac_memory_pressure_level);
+  static void OnRunLoopExit(CFRunLoopObserverRef observer,
+                            CFRunLoopActivity activity,
+                            void* info);
+  // Returns the raw memory pressure level from the macOS. Exposed for
+  // unit testing.
+  virtual int GetMacMemoryPressureLevel();
+
+  // Updates |last_pressure_level_| with the current memory pressure level.
+  void UpdatePressureLevel();
+
+  // Updates |last_pressure_level_| at the end of every run loop pass (modulo
+  // some number of seconds).
+  void UpdatePressureLevelOnRunLoopExit();
+
+  // Run |dispatch_callback| on memory pressure notifications from the OS.
+  void OnMemoryPressureChanged(dispatch_source_s* event_source,
+                               const DispatchCallback& dispatch_callback);
+
+  // Returns the number of seconds per UMA tick (for statistics recording).
+  // Exposed for testing.
+  static int GetSecondsPerUMATick();
+
+  // The dispatch source that generates memory pressure change notifications.
+  ScopedDispatchObject<dispatch_source_t> memory_level_event_source_;
+
+  // The callback to call upon receiving a memory pressure change notification.
+  DispatchCallback dispatch_callback_;
+
+  // Last UMA report time.
+  CFTimeInterval last_statistic_report_time_;
+
+  // Most-recent memory pressure level.
+  MemoryPressureLevel last_pressure_level_;
+
+  // Observer that tracks exits from the main run loop.
+  ScopedCFTypeRef<CFRunLoopObserverRef> exit_observer_;
+
+  // Next time to update the memory pressure level when exiting the run loop.
+  CFTimeInterval next_run_loop_update_time_;
+
+  // Seconds left over from the last UMA tick calculation (to be added to the
+  // next calculation).
+  CFTimeInterval subtick_seconds_;
+
+  DISALLOW_COPY_AND_ASSIGN(MemoryPressureMonitor);
+};
+
+}  // namespace mac
+}  // namespace base
+
+#endif  // BASE_MEMORY_MEMORY_PRESSURE_MONITOR_MAC_H_
diff --git a/base/memory/memory_pressure_monitor_mac_unittest.cc b/base/memory/memory_pressure_monitor_mac_unittest.cc
new file mode 100644
index 0000000..ff464fb
--- /dev/null
+++ b/base/memory/memory_pressure_monitor_mac_unittest.cc
@@ -0,0 +1,228 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/memory/memory_pressure_monitor_mac.h"
+
+#include "base/bind.h"
+#include "base/bind_helpers.h"
+#include "base/mac/scoped_cftyperef.h"
+#include "base/macros.h"
+#include "base/test/histogram_tester.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+namespace mac {
+
+class TestMemoryPressureMonitor : public MemoryPressureMonitor {
+ public:
+  using MemoryPressureMonitor::MemoryPressureLevelForMacMemoryPressureLevel;
+
+  // A HistogramTester for verifying correct UMA stat generation.
+  base::HistogramTester tester;
+
+  TestMemoryPressureMonitor() { }
+
+  // Clears the next run loop update time so that the next pass of the run
+  // loop checks the memory pressure level immediately. Normally there's a
+  // 5 second delay between pressure readings.
+  void ResetRunLoopUpdateTime() { next_run_loop_update_time_ = 0; }
+
+  // Sets the last UMA stat report time. Time spent in memory pressure is
+  // recorded in 5-second "ticks" from the last time statistics were recorded.
+  void SetLastStatisticReportTime(CFTimeInterval time) {
+    last_statistic_report_time_ = time;
+  }
+
+  // Sets the raw macOS memory pressure level read by the memory pressure
+  // monitor.
+  int macos_pressure_level_for_testing_;
+
+  // Exposes the UpdatePressureLevel() method for testing.
+  void UpdatePressureLevel() { MemoryPressureMonitor::UpdatePressureLevel(); }
+
+  // Returns the number of seconds left over from the last UMA tick
+  // calculation.
+  int SubTickSeconds() { return subtick_seconds_; }
+
+  // Returns the number of seconds per UMA tick.
+  static int GetSecondsPerUMATick() {
+    return MemoryPressureMonitor::GetSecondsPerUMATick();
+  }
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(TestMemoryPressureMonitor);
+
+  int GetMacMemoryPressureLevel() override {
+    return macos_pressure_level_for_testing_;
+  }
+};
+
+TEST(MacMemoryPressureMonitorTest, MemoryPressureFromMacMemoryPressure) {
+  EXPECT_EQ(
+      MemoryPressureListener::MEMORY_PRESSURE_LEVEL_NONE,
+      TestMemoryPressureMonitor::MemoryPressureLevelForMacMemoryPressureLevel(
+          DISPATCH_MEMORYPRESSURE_NORMAL));
+  EXPECT_EQ(
+      MemoryPressureListener::MEMORY_PRESSURE_LEVEL_MODERATE,
+      TestMemoryPressureMonitor::MemoryPressureLevelForMacMemoryPressureLevel(
+          DISPATCH_MEMORYPRESSURE_WARN));
+  EXPECT_EQ(
+      MemoryPressureListener::MEMORY_PRESSURE_LEVEL_CRITICAL,
+      TestMemoryPressureMonitor::MemoryPressureLevelForMacMemoryPressureLevel(
+          DISPATCH_MEMORYPRESSURE_CRITICAL));
+  EXPECT_EQ(
+      MemoryPressureListener::MEMORY_PRESSURE_LEVEL_NONE,
+      TestMemoryPressureMonitor::MemoryPressureLevelForMacMemoryPressureLevel(
+          0));
+  EXPECT_EQ(
+      MemoryPressureListener::MEMORY_PRESSURE_LEVEL_NONE,
+      TestMemoryPressureMonitor::MemoryPressureLevelForMacMemoryPressureLevel(
+          3));
+  EXPECT_EQ(
+      MemoryPressureListener::MEMORY_PRESSURE_LEVEL_NONE,
+      TestMemoryPressureMonitor::MemoryPressureLevelForMacMemoryPressureLevel(
+          5));
+  EXPECT_EQ(
+      MemoryPressureListener::MEMORY_PRESSURE_LEVEL_NONE,
+      TestMemoryPressureMonitor::MemoryPressureLevelForMacMemoryPressureLevel(
+          -1));
+}
+
+TEST(MacMemoryPressureMonitorTest, CurrentMemoryPressure) {
+  TestMemoryPressureMonitor monitor;
+
+  MemoryPressureListener::MemoryPressureLevel memory_pressure =
+      monitor.GetCurrentPressureLevel();
+  EXPECT_TRUE(memory_pressure ==
+                  MemoryPressureListener::MEMORY_PRESSURE_LEVEL_NONE ||
+              memory_pressure ==
+                  MemoryPressureListener::MEMORY_PRESSURE_LEVEL_MODERATE ||
+              memory_pressure ==
+                  MemoryPressureListener::MEMORY_PRESSURE_LEVEL_CRITICAL);
+}
+
+TEST(MacMemoryPressureMonitorTest, MemoryPressureConversion) {
+  TestMemoryPressureMonitor monitor;
+
+  monitor.macos_pressure_level_for_testing_ = DISPATCH_MEMORYPRESSURE_NORMAL;
+  monitor.UpdatePressureLevel();
+  MemoryPressureListener::MemoryPressureLevel memory_pressure =
+      monitor.GetCurrentPressureLevel();
+  EXPECT_EQ(MemoryPressureListener::MEMORY_PRESSURE_LEVEL_NONE,
+            memory_pressure);
+
+  monitor.macos_pressure_level_for_testing_ = DISPATCH_MEMORYPRESSURE_WARN;
+  monitor.UpdatePressureLevel();
+  memory_pressure = monitor.GetCurrentPressureLevel();
+  EXPECT_EQ(MemoryPressureListener::MEMORY_PRESSURE_LEVEL_MODERATE,
+            memory_pressure);
+
+  monitor.macos_pressure_level_for_testing_ = DISPATCH_MEMORYPRESSURE_CRITICAL;
+  monitor.UpdatePressureLevel();
+  memory_pressure = monitor.GetCurrentPressureLevel();
+  EXPECT_EQ(MemoryPressureListener::MEMORY_PRESSURE_LEVEL_CRITICAL,
+            memory_pressure);
+}
+
+TEST(MacMemoryPressureMonitorTest, MemoryPressureRunLoopChecking) {
+  TestMemoryPressureMonitor monitor;
+
+  // To test grabbing the memory presure at the end of the run loop, we have to
+  // run the run loop, but to do that the run loop needs a run loop source. Add
+  // a timer as the source. We know that the exit observer is attached to
+  // the kMessageLoopExclusiveRunLoopMode mode, so use that mode.
+  ScopedCFTypeRef<CFRunLoopTimerRef> timer_ref(CFRunLoopTimerCreate(
+      NULL, CFAbsoluteTimeGetCurrent() + 10, 0, 0, 0, nullptr, nullptr));
+  CFRunLoopAddTimer(CFRunLoopGetCurrent(), timer_ref,
+                    kMessageLoopExclusiveRunLoopMode);
+
+  monitor.macos_pressure_level_for_testing_ = DISPATCH_MEMORYPRESSURE_WARN;
+  monitor.ResetRunLoopUpdateTime();
+  CFRunLoopRunInMode(kMessageLoopExclusiveRunLoopMode, 0, true);
+  EXPECT_EQ(monitor.GetCurrentPressureLevel(),
+            MemoryPressureListener::MEMORY_PRESSURE_LEVEL_MODERATE);
+
+  monitor.macos_pressure_level_for_testing_ = DISPATCH_MEMORYPRESSURE_CRITICAL;
+  monitor.ResetRunLoopUpdateTime();
+  CFRunLoopRunInMode(kMessageLoopExclusiveRunLoopMode, 0, true);
+  EXPECT_EQ(monitor.GetCurrentPressureLevel(),
+            MemoryPressureListener::MEMORY_PRESSURE_LEVEL_CRITICAL);
+
+  monitor.macos_pressure_level_for_testing_ = DISPATCH_MEMORYPRESSURE_NORMAL;
+  monitor.ResetRunLoopUpdateTime();
+  CFRunLoopRunInMode(kMessageLoopExclusiveRunLoopMode, 0, true);
+  EXPECT_EQ(monitor.GetCurrentPressureLevel(),
+            MemoryPressureListener::MEMORY_PRESSURE_LEVEL_NONE);
+
+  CFRunLoopRemoveTimer(CFRunLoopGetCurrent(), timer_ref,
+                       kMessageLoopExclusiveRunLoopMode);
+}
+
+TEST(MacMemoryPressureMonitorTest, RecordMemoryPressureStats) {
+  TestMemoryPressureMonitor monitor;
+  const char* kHistogram = "Memory.PressureLevel";
+  CFTimeInterval now = CFAbsoluteTimeGetCurrent();
+  const int seconds_per_tick =
+      TestMemoryPressureMonitor::GetSecondsPerUMATick();
+
+  // Set the initial pressure level.
+  monitor.macos_pressure_level_for_testing_ = DISPATCH_MEMORYPRESSURE_NORMAL;
+  // Incur one UMA tick of time (and include one extra second of elapsed time).
+  monitor.SetLastStatisticReportTime(now - (seconds_per_tick + 1));
+  monitor.UpdatePressureLevel();
+  monitor.tester.ExpectTotalCount(kHistogram, 1);
+  monitor.tester.ExpectBucketCount(kHistogram, 0, 1);
+  // The report time above included an extra second so there should be 1
+  // sub-tick second left over.
+  EXPECT_EQ(1, monitor.SubTickSeconds());
+
+  // Simulate sitting in normal pressure for 1 second less than 6 UMA tick
+  // seconds and then elevating to warning. With the left over sub-tick second
+  // from above, the total elapsed ticks should be an even 6 UMA ticks.
+  monitor.macos_pressure_level_for_testing_ = DISPATCH_MEMORYPRESSURE_WARN;
+  monitor.SetLastStatisticReportTime(now - (seconds_per_tick * 6 - 1));
+  monitor.UpdatePressureLevel();
+  monitor.tester.ExpectTotalCount(kHistogram, 7);
+  monitor.tester.ExpectBucketCount(kHistogram, 0, 7);
+  monitor.tester.ExpectBucketCount(kHistogram, 1, 0);
+  EXPECT_EQ(0, monitor.SubTickSeconds());
+
+  // Simulate sitting in warning pressure for 20 UMA ticks and 2 seconds, and
+  // then elevating to critical.
+  monitor.macos_pressure_level_for_testing_ = DISPATCH_MEMORYPRESSURE_CRITICAL;
+  monitor.SetLastStatisticReportTime(now - (20 * seconds_per_tick + 2));
+  monitor.UpdatePressureLevel();
+  monitor.tester.ExpectTotalCount(kHistogram, 27);
+  monitor.tester.ExpectBucketCount(kHistogram, 0, 7);
+  monitor.tester.ExpectBucketCount(kHistogram, 1, 20);
+  monitor.tester.ExpectBucketCount(kHistogram, 2, 0);
+  EXPECT_EQ(2, monitor.SubTickSeconds());
+
+  // A quick update while critical - the stats should not budge because less
+  // than 1 tick of time has elapsed.
+  monitor.macos_pressure_level_for_testing_ = DISPATCH_MEMORYPRESSURE_CRITICAL;
+  monitor.SetLastStatisticReportTime(now - 1);
+  monitor.UpdatePressureLevel();
+  monitor.tester.ExpectTotalCount(kHistogram, 27);
+  monitor.tester.ExpectBucketCount(kHistogram, 0, 7);
+  monitor.tester.ExpectBucketCount(kHistogram, 1, 20);
+  monitor.tester.ExpectBucketCount(kHistogram, 2, 0);
+  EXPECT_EQ(3, monitor.SubTickSeconds());
+
+  // A quick change back to normal. Less than 1 tick of time has elapsed, but
+  // in this case the pressure level changed, so the critical bucket should
+  // get another sample (otherwise we could miss quick level changes).
+  monitor.macos_pressure_level_for_testing_ = DISPATCH_MEMORYPRESSURE_NORMAL;
+  monitor.SetLastStatisticReportTime(now - 1);
+  monitor.UpdatePressureLevel();
+  monitor.tester.ExpectTotalCount(kHistogram, 28);
+  monitor.tester.ExpectBucketCount(kHistogram, 0, 7);
+  monitor.tester.ExpectBucketCount(kHistogram, 1, 20);
+  monitor.tester.ExpectBucketCount(kHistogram, 2, 1);
+  // When less than 1 tick of time has elapsed but the pressure level changed,
+  // the subtick remainder gets zeroed out.
+  EXPECT_EQ(0, monitor.SubTickSeconds());
+}
+}  // namespace mac
+}  // namespace base
diff --git a/base/memory/memory_pressure_monitor_unittest.cc b/base/memory/memory_pressure_monitor_unittest.cc
new file mode 100644
index 0000000..e974741
--- /dev/null
+++ b/base/memory/memory_pressure_monitor_unittest.cc
@@ -0,0 +1,33 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/memory/memory_pressure_monitor.h"
+
+#include "base/macros.h"
+#include "base/memory/memory_pressure_listener.h"
+#include "base/test/histogram_tester.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+
+TEST(MemoryPressureMonitorTest, RecordMemoryPressure) {
+  base::HistogramTester tester;
+  const char* kHistogram = "Memory.PressureLevel";
+
+  MemoryPressureMonitor::RecordMemoryPressure(
+      MemoryPressureListener::MEMORY_PRESSURE_LEVEL_NONE, 3);
+  tester.ExpectTotalCount(kHistogram, 3);
+  tester.ExpectBucketCount(kHistogram, 0, 3);
+
+  MemoryPressureMonitor::RecordMemoryPressure(
+      MemoryPressureListener::MEMORY_PRESSURE_LEVEL_MODERATE, 2);
+  tester.ExpectTotalCount(kHistogram, 5);
+  tester.ExpectBucketCount(kHistogram, 1, 2);
+
+  MemoryPressureMonitor::RecordMemoryPressure(
+      MemoryPressureListener::MEMORY_PRESSURE_LEVEL_CRITICAL, 1);
+  tester.ExpectTotalCount(kHistogram, 6);
+  tester.ExpectBucketCount(kHistogram, 2, 1);
+}
+}  // namespace base
diff --git a/base/memory/memory_pressure_monitor_win.cc b/base/memory/memory_pressure_monitor_win.cc
new file mode 100644
index 0000000..3effe2c
--- /dev/null
+++ b/base/memory/memory_pressure_monitor_win.cc
@@ -0,0 +1,233 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/memory/memory_pressure_monitor_win.h"
+
+#include <windows.h>
+
+#include "base/single_thread_task_runner.h"
+#include "base/threading/thread_task_runner_handle.h"
+#include "base/time/time.h"
+
+namespace base {
+namespace win {
+
+namespace {
+
+static const DWORDLONG kMBBytes = 1024 * 1024;
+
+}  // namespace
+
+// The following constants have been lifted from similar values in the ChromeOS
+// memory pressure monitor. The values were determined experimentally to ensure
+// sufficient responsiveness of the memory pressure subsystem, and minimal
+// overhead.
+const int MemoryPressureMonitor::kPollingIntervalMs = 5000;
+const int MemoryPressureMonitor::kModeratePressureCooldownMs = 10000;
+const int MemoryPressureMonitor::kModeratePressureCooldownCycles =
+    kModeratePressureCooldownMs / kPollingIntervalMs;
+
+// TODO(chrisha): Explore the following constants further with an experiment.
+
+// A system is considered 'high memory' if it has more than 1.5GB of system
+// memory available for use by the memory manager (not reserved for hardware
+// and drivers). This is a fuzzy version of the ~2GB discussed below.
+const int MemoryPressureMonitor::kLargeMemoryThresholdMb = 1536;
+
+// These are the default thresholds used for systems with < ~2GB of physical
+// memory. Such systems have been observed to always maintain ~100MB of
+// available memory, paging until that is the case. To try to avoid paging a
+// threshold slightly above this is chosen. The moderate threshold is slightly
+// less grounded in reality and chosen as 2.5x critical.
+const int MemoryPressureMonitor::kSmallMemoryDefaultModerateThresholdMb = 500;
+const int MemoryPressureMonitor::kSmallMemoryDefaultCriticalThresholdMb = 200;
+
+// These are the default thresholds used for systems with >= ~2GB of physical
+// memory. Such systems have been observed to always maintain ~300MB of
+// available memory, paging until that is the case.
+const int MemoryPressureMonitor::kLargeMemoryDefaultModerateThresholdMb = 1000;
+const int MemoryPressureMonitor::kLargeMemoryDefaultCriticalThresholdMb = 400;
+
+MemoryPressureMonitor::MemoryPressureMonitor()
+    : moderate_threshold_mb_(0),
+      critical_threshold_mb_(0),
+      current_memory_pressure_level_(
+          MemoryPressureListener::MEMORY_PRESSURE_LEVEL_NONE),
+      moderate_pressure_repeat_count_(0),
+      dispatch_callback_(
+          base::Bind(&MemoryPressureListener::NotifyMemoryPressure)),
+      weak_ptr_factory_(this) {
+  InferThresholds();
+  StartObserving();
+}
+
+MemoryPressureMonitor::MemoryPressureMonitor(int moderate_threshold_mb,
+                                             int critical_threshold_mb)
+    : moderate_threshold_mb_(moderate_threshold_mb),
+      critical_threshold_mb_(critical_threshold_mb),
+      current_memory_pressure_level_(
+          MemoryPressureListener::MEMORY_PRESSURE_LEVEL_NONE),
+      moderate_pressure_repeat_count_(0),
+      dispatch_callback_(
+          base::Bind(&MemoryPressureListener::NotifyMemoryPressure)),
+      weak_ptr_factory_(this) {
+  DCHECK_GE(moderate_threshold_mb_, critical_threshold_mb_);
+  DCHECK_LE(0, critical_threshold_mb_);
+  StartObserving();
+}
+
+MemoryPressureMonitor::~MemoryPressureMonitor() {
+  StopObserving();
+}
+
+void MemoryPressureMonitor::CheckMemoryPressureSoon() {
+  DCHECK(thread_checker_.CalledOnValidThread());
+
+  ThreadTaskRunnerHandle::Get()->PostTask(
+      FROM_HERE, Bind(&MemoryPressureMonitor::CheckMemoryPressure,
+                      weak_ptr_factory_.GetWeakPtr()));
+}
+
+MemoryPressureListener::MemoryPressureLevel
+MemoryPressureMonitor::GetCurrentPressureLevel() {
+  return current_memory_pressure_level_;
+}
+
+void MemoryPressureMonitor::InferThresholds() {
+  // Default to a 'high' memory situation, which uses more conservative
+  // thresholds.
+  bool high_memory = true;
+  MEMORYSTATUSEX mem_status = {};
+  if (GetSystemMemoryStatus(&mem_status)) {
+    static const DWORDLONG kLargeMemoryThresholdBytes =
+        static_cast<DWORDLONG>(kLargeMemoryThresholdMb) * kMBBytes;
+    high_memory = mem_status.ullTotalPhys >= kLargeMemoryThresholdBytes;
+  }
+
+  if (high_memory) {
+    moderate_threshold_mb_ = kLargeMemoryDefaultModerateThresholdMb;
+    critical_threshold_mb_ = kLargeMemoryDefaultCriticalThresholdMb;
+  } else {
+    moderate_threshold_mb_ = kSmallMemoryDefaultModerateThresholdMb;
+    critical_threshold_mb_ = kSmallMemoryDefaultCriticalThresholdMb;
+  }
+}
+
+void MemoryPressureMonitor::StartObserving() {
+  DCHECK(thread_checker_.CalledOnValidThread());
+
+  timer_.Start(FROM_HERE,
+               TimeDelta::FromMilliseconds(kPollingIntervalMs),
+               Bind(&MemoryPressureMonitor::
+                        CheckMemoryPressureAndRecordStatistics,
+                    weak_ptr_factory_.GetWeakPtr()));
+}
+
+void MemoryPressureMonitor::StopObserving() {
+  DCHECK(thread_checker_.CalledOnValidThread());
+
+  // If StartObserving failed, StopObserving will still get called.
+  timer_.Stop();
+  weak_ptr_factory_.InvalidateWeakPtrs();
+}
+
+void MemoryPressureMonitor::CheckMemoryPressure() {
+  DCHECK(thread_checker_.CalledOnValidThread());
+
+  // Get the previous pressure level and update the current one.
+  MemoryPressureLevel old_pressure = current_memory_pressure_level_;
+  current_memory_pressure_level_ = CalculateCurrentPressureLevel();
+
+  // |notify| will be set to true if MemoryPressureListeners need to be
+  // notified of a memory pressure level state change.
+  bool notify = false;
+  switch (current_memory_pressure_level_) {
+    case MemoryPressureListener::MEMORY_PRESSURE_LEVEL_NONE:
+      break;
+
+    case MemoryPressureListener::MEMORY_PRESSURE_LEVEL_MODERATE:
+      if (old_pressure != current_memory_pressure_level_) {
+        // This is a new transition to moderate pressure so notify.
+        moderate_pressure_repeat_count_ = 0;
+        notify = true;
+      } else {
+        // Already in moderate pressure, only notify if sustained over the
+        // cooldown period.
+        if (++moderate_pressure_repeat_count_ ==
+                kModeratePressureCooldownCycles) {
+          moderate_pressure_repeat_count_ = 0;
+          notify = true;
+        }
+      }
+      break;
+
+    case MemoryPressureListener::MEMORY_PRESSURE_LEVEL_CRITICAL:
+      // Always notify of critical pressure levels.
+      notify = true;
+      break;
+  }
+
+  if (!notify)
+    return;
+
+  // Emit a notification of the current memory pressure level. This can only
+  // happen for moderate and critical pressure levels.
+  DCHECK_NE(MemoryPressureListener::MEMORY_PRESSURE_LEVEL_NONE,
+            current_memory_pressure_level_);
+  dispatch_callback_.Run(current_memory_pressure_level_);
+}
+
+void MemoryPressureMonitor::CheckMemoryPressureAndRecordStatistics() {
+  DCHECK(thread_checker_.CalledOnValidThread());
+
+  CheckMemoryPressure();
+
+  RecordMemoryPressure(current_memory_pressure_level_, 1);
+}
+
+MemoryPressureListener::MemoryPressureLevel
+MemoryPressureMonitor::CalculateCurrentPressureLevel() {
+  MEMORYSTATUSEX mem_status = {};
+  if (!GetSystemMemoryStatus(&mem_status))
+    return MemoryPressureListener::MEMORY_PRESSURE_LEVEL_NONE;
+
+  // How much system memory is actively available for use right now, in MBs.
+  int phys_free = static_cast<int>(mem_status.ullAvailPhys / kMBBytes);
+
+  // TODO(chrisha): This should eventually care about address space pressure,
+  // but the browser process (where this is running) effectively never runs out
+  // of address space. Renderers occasionally do, but it does them no good to
+  // have the browser process monitor address space pressure. Long term,
+  // renderers should run their own address space pressure monitors and act
+  // accordingly, with the browser making cross-process decisions based on
+  // system memory pressure.
+
+  // Determine if the physical memory is under critical memory pressure.
+  if (phys_free <= critical_threshold_mb_)
+    return MemoryPressureListener::MEMORY_PRESSURE_LEVEL_CRITICAL;
+
+  // Determine if the physical memory is under moderate memory pressure.
+  if (phys_free <= moderate_threshold_mb_)
+    return MemoryPressureListener::MEMORY_PRESSURE_LEVEL_MODERATE;
+
+  // No memory pressure was detected.
+  return MemoryPressureListener::MEMORY_PRESSURE_LEVEL_NONE;
+}
+
+bool MemoryPressureMonitor::GetSystemMemoryStatus(
+    MEMORYSTATUSEX* mem_status) {
+  DCHECK(mem_status != nullptr);
+  mem_status->dwLength = sizeof(*mem_status);
+  if (!::GlobalMemoryStatusEx(mem_status))
+    return false;
+  return true;
+}
+
+void MemoryPressureMonitor::SetDispatchCallback(
+    const DispatchCallback& callback) {
+  dispatch_callback_ = callback;
+}
+
+}  // namespace win
+}  // namespace base
diff --git a/base/memory/memory_pressure_monitor_win.h b/base/memory/memory_pressure_monitor_win.h
new file mode 100644
index 0000000..a65c191
--- /dev/null
+++ b/base/memory/memory_pressure_monitor_win.h
@@ -0,0 +1,148 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_MEMORY_MEMORY_PRESSURE_MONITOR_WIN_H_
+#define BASE_MEMORY_MEMORY_PRESSURE_MONITOR_WIN_H_
+
+#include "base/base_export.h"
+#include "base/macros.h"
+#include "base/memory/memory_pressure_listener.h"
+#include "base/memory/memory_pressure_monitor.h"
+#include "base/memory/weak_ptr.h"
+#include "base/threading/thread_checker.h"
+#include "base/timer/timer.h"
+
+// To not pull in windows.h.
+typedef struct _MEMORYSTATUSEX MEMORYSTATUSEX;
+
+namespace base {
+namespace win {
+
+// Windows memory pressure monitor. Because there is no OS provided signal this
+// polls at a low frequency (once per second), and applies internal hysteresis.
+class BASE_EXPORT MemoryPressureMonitor : public base::MemoryPressureMonitor {
+ public:
+  // Constants governing the polling and hysteresis behaviour of the observer.
+
+  // The polling interval, in milliseconds. While under critical pressure, this
+  // is also the timer to repeat cleanup attempts.
+  static const int kPollingIntervalMs;
+  // The time which should pass between 2 successive moderate memory pressure
+  // signals, in milliseconds.
+  static const int kModeratePressureCooldownMs;
+  // The number of cycles that should pass between 2 successive moderate memory
+  // pressure signals.
+  static const int kModeratePressureCooldownCycles;
+
+  // Constants governing the memory pressure level detection.
+
+  // The amount of total system memory beyond which a system is considered to be
+  // a large-memory system.
+  static const int kLargeMemoryThresholdMb;
+  // Default minimum free memory thresholds for small-memory systems, in MB.
+  static const int kSmallMemoryDefaultModerateThresholdMb;
+  static const int kSmallMemoryDefaultCriticalThresholdMb;
+  // Default minimum free memory thresholds for large-memory systems, in MB.
+  static const int kLargeMemoryDefaultModerateThresholdMb;
+  static const int kLargeMemoryDefaultCriticalThresholdMb;
+
+  // Default constructor. Will choose thresholds automatically basd on the
+  // actual amount of system memory.
+  MemoryPressureMonitor();
+
+  // Constructor with explicit memory thresholds. These represent the amount of
+  // free memory below which the applicable memory pressure state engages.
+  MemoryPressureMonitor(int moderate_threshold_mb, int critical_threshold_mb);
+
+  ~MemoryPressureMonitor() override;
+
+  // Schedules a memory pressure check to run soon. This must be called on the
+  // same thread where the monitor was instantiated.
+  void CheckMemoryPressureSoon();
+
+  // Get the current memory pressure level. This can be called from any thread.
+  MemoryPressureLevel GetCurrentPressureLevel() override;
+  void SetDispatchCallback(const DispatchCallback& callback) override;
+
+  // Returns the moderate pressure level free memory threshold, in MB.
+  int moderate_threshold_mb() const { return moderate_threshold_mb_; }
+
+  // Returns the critical pressure level free memory threshold, in MB.
+  int critical_threshold_mb() const { return critical_threshold_mb_; }
+
+ protected:
+  // Internals are exposed for unittests.
+
+  // Automatically infers threshold values based on system memory. This invokes
+  // GetMemoryStatus so it can be mocked in unittests.
+  void InferThresholds();
+
+  // Starts observing the memory fill level. Calls to StartObserving should
+  // always be matched with calls to StopObserving.
+  void StartObserving();
+
+  // Stop observing the memory fill level. May be safely called if
+  // StartObserving has not been called. Must be called from the same thread on
+  // which the monitor was instantiated.
+  void StopObserving();
+
+  // Checks memory pressure, storing the current level, applying any hysteresis
+  // and emitting memory pressure level change signals as necessary. This
+  // function is called periodically while the monitor is observing memory
+  // pressure. This is split out from CheckMemoryPressureAndRecordStatistics so
+  // that it may be called by CheckMemoryPressureSoon and not invoke UMA
+  // logging. Must be called from the same thread on which the monitor was
+  // instantiated.
+  void CheckMemoryPressure();
+
+  // Wrapper to CheckMemoryPressure that also records the observed memory
+  // pressure level via an UMA enumeration. This is the function that is called
+  // periodically by the timer. Must be called from the same thread on which the
+  // monitor was instantiated.
+  void CheckMemoryPressureAndRecordStatistics();
+
+  // Calculates the current instantaneous memory pressure level. This does not
+  // use any hysteresis and simply returns the result at the current moment. Can
+  // be called on any thread.
+  MemoryPressureLevel CalculateCurrentPressureLevel();
+
+  // Gets system memory status. This is virtual as a unittesting hook. Returns
+  // true if the system call succeeds, false otherwise. Can be called on any
+  // thread.
+  virtual bool GetSystemMemoryStatus(MEMORYSTATUSEX* mem_status);
+
+ private:
+  // Threshold amounts of available memory that trigger pressure levels. See
+  // memory_pressure_monitor.cc for a discussion of reasonable values for these.
+  int moderate_threshold_mb_;
+  int critical_threshold_mb_;
+
+  // A periodic timer to check for memory pressure changes.
+  base::RepeatingTimer timer_;
+
+  // The current memory pressure.
+  MemoryPressureLevel current_memory_pressure_level_;
+
+  // To slow down the amount of moderate pressure event calls, this gets used to
+  // count the number of events since the last event occured. This is used by
+  // |CheckMemoryPressure| to apply hysteresis on the raw results of
+  // |CalculateCurrentPressureLevel|.
+  int moderate_pressure_repeat_count_;
+
+  // Ensures that this object is used from a single thread.
+  base::ThreadChecker thread_checker_;
+
+  DispatchCallback dispatch_callback_;
+
+  // Weak pointer factory to ourself used for scheduling calls to
+  // CheckMemoryPressure/CheckMemoryPressureAndRecordStatistics via |timer_|.
+  base::WeakPtrFactory<MemoryPressureMonitor> weak_ptr_factory_;
+
+  DISALLOW_COPY_AND_ASSIGN(MemoryPressureMonitor);
+};
+
+}  // namespace win
+}  // namespace base
+
+#endif  // BASE_MEMORY_MEMORY_PRESSURE_MONITOR_WIN_H_
diff --git a/base/memory/memory_pressure_monitor_win_unittest.cc b/base/memory/memory_pressure_monitor_win_unittest.cc
new file mode 100644
index 0000000..1002a01
--- /dev/null
+++ b/base/memory/memory_pressure_monitor_win_unittest.cc
@@ -0,0 +1,299 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/memory/memory_pressure_monitor_win.h"
+
+#include "base/macros.h"
+#include "base/memory/memory_pressure_listener.h"
+#include "base/message_loop/message_loop.h"
+#include "base/run_loop.h"
+#include "testing/gmock/include/gmock/gmock.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+namespace win {
+
+namespace {
+
+struct PressureSettings {
+  int phys_left_mb;
+  MemoryPressureListener::MemoryPressureLevel level;
+};
+
+}  // namespace
+
+// This is outside of the anonymous namespace so that it can be seen as a friend
+// to the monitor class.
+class TestMemoryPressureMonitor : public MemoryPressureMonitor {
+ public:
+  using MemoryPressureMonitor::CalculateCurrentPressureLevel;
+  using MemoryPressureMonitor::CheckMemoryPressure;
+
+  static const DWORDLONG kMBBytes = 1024 * 1024;
+
+  explicit TestMemoryPressureMonitor(bool large_memory)
+      : mem_status_() {
+    // Generate a plausible amount of memory.
+    mem_status_.ullTotalPhys =
+        static_cast<DWORDLONG>(GenerateTotalMemoryMb(large_memory)) * kMBBytes;
+
+    // Rerun InferThresholds using the test fixture's GetSystemMemoryStatus.
+    InferThresholds();
+    // Stop the timer.
+    StopObserving();
+  }
+
+  TestMemoryPressureMonitor(int system_memory_mb,
+                            int moderate_threshold_mb,
+                            int critical_threshold_mb)
+      : MemoryPressureMonitor(moderate_threshold_mb, critical_threshold_mb),
+        mem_status_() {
+    // Set the amount of system memory.
+    mem_status_.ullTotalPhys = static_cast<DWORDLONG>(
+        system_memory_mb * kMBBytes);
+
+    // Stop the timer.
+    StopObserving();
+  }
+
+  virtual ~TestMemoryPressureMonitor() {}
+
+  MOCK_METHOD1(OnMemoryPressure,
+               void(MemoryPressureListener::MemoryPressureLevel level));
+
+  // Generates an amount of total memory that is consistent with the requested
+  // memory model.
+  int GenerateTotalMemoryMb(bool large_memory) {
+    int total_mb = 64;
+    while (total_mb < MemoryPressureMonitor::kLargeMemoryThresholdMb)
+      total_mb *= 2;
+    if (large_memory)
+      return total_mb * 2;
+    return total_mb / 2;
+  }
+
+  // Sets up the memory status to reflect the provided absolute memory left.
+  void SetMemoryFree(int phys_left_mb) {
+    // ullTotalPhys is set in the constructor and not modified.
+
+    // Set the amount of available memory.
+    mem_status_.ullAvailPhys =
+        static_cast<DWORDLONG>(phys_left_mb) * kMBBytes;
+    DCHECK_LT(mem_status_.ullAvailPhys, mem_status_.ullTotalPhys);
+
+    // These fields are unused.
+    mem_status_.dwMemoryLoad = 0;
+    mem_status_.ullTotalPageFile = 0;
+    mem_status_.ullAvailPageFile = 0;
+    mem_status_.ullTotalVirtual = 0;
+    mem_status_.ullAvailVirtual = 0;
+  }
+
+  void SetNone() {
+    SetMemoryFree(moderate_threshold_mb() + 1);
+  }
+
+  void SetModerate() {
+    SetMemoryFree(moderate_threshold_mb() - 1);
+  }
+
+  void SetCritical() {
+    SetMemoryFree(critical_threshold_mb() - 1);
+  }
+
+ private:
+  bool GetSystemMemoryStatus(MEMORYSTATUSEX* mem_status) override {
+    // Simply copy the memory status set by the test fixture.
+    *mem_status = mem_status_;
+    return true;
+  }
+
+  MEMORYSTATUSEX mem_status_;
+
+  DISALLOW_COPY_AND_ASSIGN(TestMemoryPressureMonitor);
+};
+
+class WinMemoryPressureMonitorTest : public testing::Test {
+ protected:
+  void CalculateCurrentMemoryPressureLevelTest(
+      TestMemoryPressureMonitor* monitor) {
+
+    int mod = monitor->moderate_threshold_mb();
+    monitor->SetMemoryFree(mod + 1);
+    EXPECT_EQ(MemoryPressureListener::MEMORY_PRESSURE_LEVEL_NONE,
+              monitor->CalculateCurrentPressureLevel());
+
+    monitor->SetMemoryFree(mod);
+    EXPECT_EQ(MemoryPressureListener::MEMORY_PRESSURE_LEVEL_MODERATE,
+              monitor->CalculateCurrentPressureLevel());
+
+    monitor->SetMemoryFree(mod - 1);
+    EXPECT_EQ(MemoryPressureListener::MEMORY_PRESSURE_LEVEL_MODERATE,
+              monitor->CalculateCurrentPressureLevel());
+
+    int crit = monitor->critical_threshold_mb();
+    monitor->SetMemoryFree(crit + 1);
+    EXPECT_EQ(MemoryPressureListener::MEMORY_PRESSURE_LEVEL_MODERATE,
+              monitor->CalculateCurrentPressureLevel());
+
+    monitor->SetMemoryFree(crit);
+    EXPECT_EQ(MemoryPressureListener::MEMORY_PRESSURE_LEVEL_CRITICAL,
+              monitor->CalculateCurrentPressureLevel());
+
+    monitor->SetMemoryFree(crit - 1);
+    EXPECT_EQ(MemoryPressureListener::MEMORY_PRESSURE_LEVEL_CRITICAL,
+              monitor->CalculateCurrentPressureLevel());
+  }
+
+  base::MessageLoopForUI message_loop_;
+};
+
+// Tests the fundamental direct calculation of memory pressure with automatic
+// small-memory thresholds.
+TEST_F(WinMemoryPressureMonitorTest, CalculateCurrentMemoryPressureLevelSmall) {
+  static const int kModerateMb =
+      MemoryPressureMonitor::kSmallMemoryDefaultModerateThresholdMb;
+  static const int kCriticalMb =
+      MemoryPressureMonitor::kSmallMemoryDefaultCriticalThresholdMb;
+
+  TestMemoryPressureMonitor monitor(false);  // Small-memory model.
+
+  EXPECT_EQ(kModerateMb, monitor.moderate_threshold_mb());
+  EXPECT_EQ(kCriticalMb, monitor.critical_threshold_mb());
+
+  ASSERT_NO_FATAL_FAILURE(CalculateCurrentMemoryPressureLevelTest(&monitor));
+}
+
+// Tests the fundamental direct calculation of memory pressure with automatic
+// large-memory thresholds.
+TEST_F(WinMemoryPressureMonitorTest, CalculateCurrentMemoryPressureLevelLarge) {
+  static const int kModerateMb =
+      MemoryPressureMonitor::kLargeMemoryDefaultModerateThresholdMb;
+  static const int kCriticalMb =
+      MemoryPressureMonitor::kLargeMemoryDefaultCriticalThresholdMb;
+
+  TestMemoryPressureMonitor monitor(true);  // Large-memory model.
+
+  EXPECT_EQ(kModerateMb, monitor.moderate_threshold_mb());
+  EXPECT_EQ(kCriticalMb, monitor.critical_threshold_mb());
+
+  ASSERT_NO_FATAL_FAILURE(CalculateCurrentMemoryPressureLevelTest(&monitor));
+}
+
+// Tests the fundamental direct calculation of memory pressure with manually
+// specified threshold levels.
+TEST_F(WinMemoryPressureMonitorTest,
+       CalculateCurrentMemoryPressureLevelCustom) {
+  static const int kSystemMb = 512;
+  static const int kModerateMb = 256;
+  static const int kCriticalMb = 128;
+
+  TestMemoryPressureMonitor monitor(kSystemMb, kModerateMb, kCriticalMb);
+
+  EXPECT_EQ(kModerateMb, monitor.moderate_threshold_mb());
+  EXPECT_EQ(kCriticalMb, monitor.critical_threshold_mb());
+
+  ASSERT_NO_FATAL_FAILURE(CalculateCurrentMemoryPressureLevelTest(&monitor));
+}
+
+// This test tests the various transition states from memory pressure, looking
+// for the correct behavior on event reposting as well as state updates.
+TEST_F(WinMemoryPressureMonitorTest, CheckMemoryPressure) {
+  // Large-memory.
+  testing::StrictMock<TestMemoryPressureMonitor> monitor(true);
+  MemoryPressureListener listener(
+      base::Bind(&TestMemoryPressureMonitor::OnMemoryPressure,
+                 base::Unretained(&monitor)));
+
+  // Checking the memory pressure at 0% load should not produce any
+  // events.
+  monitor.SetNone();
+  monitor.CheckMemoryPressure();
+  RunLoop().RunUntilIdle();
+  EXPECT_EQ(MemoryPressureListener::MEMORY_PRESSURE_LEVEL_NONE,
+            monitor.GetCurrentPressureLevel());
+
+  // Setting the memory level to 80% should produce a moderate pressure level.
+  EXPECT_CALL(monitor,
+              OnMemoryPressure(MemoryPressureListener::
+                                   MEMORY_PRESSURE_LEVEL_MODERATE));
+  monitor.SetModerate();
+  monitor.CheckMemoryPressure();
+  RunLoop().RunUntilIdle();
+  EXPECT_EQ(MemoryPressureListener::MEMORY_PRESSURE_LEVEL_MODERATE,
+            monitor.GetCurrentPressureLevel());
+  testing::Mock::VerifyAndClearExpectations(&monitor);
+
+  // Check that the event gets reposted after a while.
+  for (int i = 0; i < monitor.kModeratePressureCooldownCycles; ++i) {
+    if (i + 1 == monitor.kModeratePressureCooldownCycles) {
+      EXPECT_CALL(monitor,
+                  OnMemoryPressure(MemoryPressureListener::
+                                       MEMORY_PRESSURE_LEVEL_MODERATE));
+    }
+    monitor.CheckMemoryPressure();
+    RunLoop().RunUntilIdle();
+    EXPECT_EQ(MemoryPressureListener::MEMORY_PRESSURE_LEVEL_MODERATE,
+              monitor.GetCurrentPressureLevel());
+    testing::Mock::VerifyAndClearExpectations(&monitor);
+  }
+
+  // Setting the memory usage to 99% should produce critical levels.
+  EXPECT_CALL(monitor,
+              OnMemoryPressure(MemoryPressureListener::
+                                   MEMORY_PRESSURE_LEVEL_CRITICAL));
+  monitor.SetCritical();
+  monitor.CheckMemoryPressure();
+  RunLoop().RunUntilIdle();
+  EXPECT_EQ(MemoryPressureListener::MEMORY_PRESSURE_LEVEL_CRITICAL,
+            monitor.GetCurrentPressureLevel());
+  testing::Mock::VerifyAndClearExpectations(&monitor);
+
+  // Calling it again should immediately produce a second call.
+  EXPECT_CALL(monitor,
+              OnMemoryPressure(MemoryPressureListener::
+                                   MEMORY_PRESSURE_LEVEL_CRITICAL));
+  monitor.CheckMemoryPressure();
+  RunLoop().RunUntilIdle();
+  EXPECT_EQ(MemoryPressureListener::MEMORY_PRESSURE_LEVEL_CRITICAL,
+            monitor.GetCurrentPressureLevel());
+  testing::Mock::VerifyAndClearExpectations(&monitor);
+
+  // When lowering the pressure again there should be a notification and the
+  // pressure should go back to moderate.
+  EXPECT_CALL(monitor,
+              OnMemoryPressure(MemoryPressureListener::
+                                   MEMORY_PRESSURE_LEVEL_MODERATE));
+  monitor.SetModerate();
+  monitor.CheckMemoryPressure();
+  RunLoop().RunUntilIdle();
+  EXPECT_EQ(MemoryPressureListener::MEMORY_PRESSURE_LEVEL_MODERATE,
+            monitor.GetCurrentPressureLevel());
+  testing::Mock::VerifyAndClearExpectations(&monitor);
+
+  // Check that the event gets reposted after a while.
+  for (int i = 0; i < monitor.kModeratePressureCooldownCycles; ++i) {
+    if (i + 1 == monitor.kModeratePressureCooldownCycles) {
+      EXPECT_CALL(monitor,
+                  OnMemoryPressure(MemoryPressureListener::
+                                       MEMORY_PRESSURE_LEVEL_MODERATE));
+    }
+    monitor.CheckMemoryPressure();
+    RunLoop().RunUntilIdle();
+    EXPECT_EQ(MemoryPressureListener::MEMORY_PRESSURE_LEVEL_MODERATE,
+              monitor.GetCurrentPressureLevel());
+    testing::Mock::VerifyAndClearExpectations(&monitor);
+  }
+
+  // Going down to no pressure should not produce an notification.
+  monitor.SetNone();
+  monitor.CheckMemoryPressure();
+  RunLoop().RunUntilIdle();
+  EXPECT_EQ(MemoryPressureListener::MEMORY_PRESSURE_LEVEL_NONE,
+            monitor.GetCurrentPressureLevel());
+  testing::Mock::VerifyAndClearExpectations(&monitor);
+}
+
+}  // namespace win
+}  // namespace base
diff --git a/base/memory/platform_shared_memory_region.cc b/base/memory/platform_shared_memory_region.cc
new file mode 100644
index 0000000..c145336
--- /dev/null
+++ b/base/memory/platform_shared_memory_region.cc
@@ -0,0 +1,37 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/memory/platform_shared_memory_region.h"
+
+#include "base/memory/shared_memory_mapping.h"
+
+namespace base {
+namespace subtle {
+
+// static
+PlatformSharedMemoryRegion PlatformSharedMemoryRegion::CreateWritable(
+    size_t size) {
+  return Create(Mode::kWritable, size);
+}
+
+// static
+PlatformSharedMemoryRegion PlatformSharedMemoryRegion::CreateUnsafe(
+    size_t size) {
+  return Create(Mode::kUnsafe, size);
+}
+
+PlatformSharedMemoryRegion::PlatformSharedMemoryRegion() = default;
+PlatformSharedMemoryRegion::PlatformSharedMemoryRegion(
+    PlatformSharedMemoryRegion&& other) = default;
+PlatformSharedMemoryRegion& PlatformSharedMemoryRegion::operator=(
+    PlatformSharedMemoryRegion&& other) = default;
+PlatformSharedMemoryRegion::~PlatformSharedMemoryRegion() = default;
+
+PlatformSharedMemoryRegion::ScopedPlatformHandle
+PlatformSharedMemoryRegion::PassPlatformHandle() {
+  return std::move(handle_);
+}
+
+}  // namespace subtle
+}  // namespace base
diff --git a/base/memory/platform_shared_memory_region.h b/base/memory/platform_shared_memory_region.h
new file mode 100644
index 0000000..143a1d4
--- /dev/null
+++ b/base/memory/platform_shared_memory_region.h
@@ -0,0 +1,223 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_MEMORY_PLATFORM_SHARED_MEMORY_REGION_H_
+#define BASE_MEMORY_PLATFORM_SHARED_MEMORY_REGION_H_
+
+#include <utility>
+
+#include "base/compiler_specific.h"
+#include "base/gtest_prod_util.h"
+#include "base/macros.h"
+#include "base/unguessable_token.h"
+#include "build/build_config.h"
+
+#if defined(OS_MACOSX) && !defined(OS_IOS)
+#include <mach/mach.h>
+#include "base/mac/scoped_mach_port.h"
+#elif defined(OS_FUCHSIA)
+#include "base/fuchsia/scoped_zx_handle.h"
+#elif defined(OS_WIN)
+#include "base/win/scoped_handle.h"
+#include "base/win/windows_types.h"
+#elif defined(OS_POSIX)
+#include <sys/types.h>
+#include "base/file_descriptor_posix.h"
+#include "base/files/scoped_file.h"
+#endif
+
+namespace base {
+namespace subtle {
+
+#if defined(OS_POSIX) && (!defined(OS_MACOSX) || defined(OS_IOS)) && \
+    !defined(OS_FUCHSIA) && !defined(OS_ANDROID)
+// Helper structs to keep two descriptors on POSIX. It's needed to support
+// ConvertToReadOnly().
+struct BASE_EXPORT FDPair {
+  int fd;
+  int readonly_fd;
+};
+
+struct BASE_EXPORT ScopedFDPair {
+  ScopedFDPair();
+  ScopedFDPair(ScopedFD in_fd, ScopedFD in_readonly_fd);
+  ScopedFDPair(ScopedFDPair&&);
+  ScopedFDPair& operator=(ScopedFDPair&&);
+  ~ScopedFDPair();
+
+  FDPair get() const;
+
+  ScopedFD fd;
+  ScopedFD readonly_fd;
+};
+#endif
+
+// Implementation class for shared memory regions.
+//
+// This class does the following:
+//
+// - Wraps and owns a shared memory region platform handle.
+// - Provides a way to allocate a new region of platform shared memory of given
+//   size.
+// - Provides a way to create mapping of the region in the current process'
+//   address space, under special access-control constraints (see Mode).
+// - Provides methods to help transferring the handle across process boundaries.
+// - Holds a 128-bit unique identifier used to uniquely identify the same
+//   kernel region resource across processes (used for memory tracking).
+// - Has a method to retrieve the region's size in bytes.
+//
+// IMPORTANT NOTE: Users should never use this directly, but
+// ReadOnlySharedMemoryRegion, WritableSharedMemoryRegion or
+// UnsafeSharedMemoryRegion since this is an implementation class.
+class BASE_EXPORT PlatformSharedMemoryRegion {
+ public:
+  // Permission mode of the platform handle. Each mode corresponds to one of the
+  // typed shared memory classes:
+  //
+  // * ReadOnlySharedMemoryRegion: A region that can only create read-only
+  // mappings.
+  //
+  // * WritableSharedMemoryRegion: A region that can only create writable
+  // mappings. The region can be demoted to ReadOnlySharedMemoryRegion without
+  // the possibility of promoting back to writable.
+  //
+  // * UnsafeSharedMemoryRegion: A region that can only create writable
+  // mappings. The region cannot be demoted to ReadOnlySharedMemoryRegion.
+  enum class Mode {
+    kReadOnly,  // ReadOnlySharedMemoryRegion
+    kWritable,  // WritableSharedMemoryRegion
+    kUnsafe,    // UnsafeSharedMemoryRegion
+    kMaxValue = kUnsafe
+  };
+
+// Platform-specific shared memory type used by this class.
+#if defined(OS_MACOSX) && !defined(OS_IOS)
+  using PlatformHandle = mach_port_t;
+  using ScopedPlatformHandle = mac::ScopedMachSendRight;
+#elif defined(OS_FUCHSIA)
+  using PlatformHandle = zx_handle_t;
+  using ScopedPlatformHandle = ScopedZxHandle;
+#elif defined(OS_WIN)
+  using PlatformHandle = HANDLE;
+  using ScopedPlatformHandle = win::ScopedHandle;
+#elif defined(OS_ANDROID)
+  using PlatformHandle = int;
+  using ScopedPlatformHandle = ScopedFD;
+#else
+  using PlatformHandle = FDPair;
+  using ScopedPlatformHandle = ScopedFDPair;
+#endif
+
+  // The minimum alignment in bytes that any mapped address produced by Map()
+  // and MapAt() is guaranteed to have.
+  enum { kMapMinimumAlignment = 32 };
+
+  // Creates a new PlatformSharedMemoryRegion with corresponding mode and size.
+  // Creating in kReadOnly mode isn't supported because then there will be no
+  // way to modify memory content.
+  static PlatformSharedMemoryRegion CreateWritable(size_t size);
+  static PlatformSharedMemoryRegion CreateUnsafe(size_t size);
+
+  // Returns a new PlatformSharedMemoryRegion that takes ownership of the
+  // |handle|. All parameters must be taken from another valid
+  // PlatformSharedMemoryRegion instance, e.g. |size| must be equal to the
+  // actual region size as allocated by the kernel.
+  // Closes the |handle| and returns an invalid instance if passed parameters
+  // are invalid.
+  static PlatformSharedMemoryRegion Take(ScopedPlatformHandle handle,
+                                         Mode mode,
+                                         size_t size,
+                                         const UnguessableToken& guid);
+
+  // Default constructor initializes an invalid instance, i.e. an instance that
+  // doesn't wrap any valid platform handle.
+  PlatformSharedMemoryRegion();
+
+  // Move operations are allowed.
+  PlatformSharedMemoryRegion(PlatformSharedMemoryRegion&&);
+  PlatformSharedMemoryRegion& operator=(PlatformSharedMemoryRegion&&);
+
+  // Destructor closes the platform handle. Does nothing if the handle is
+  // invalid.
+  ~PlatformSharedMemoryRegion();
+
+  // Passes ownership of the platform handle to the caller. The current instance
+  // becomes invalid. It's the responsibility of the caller to close the handle.
+  ScopedPlatformHandle PassPlatformHandle() WARN_UNUSED_RESULT;
+
+  // Returns the platform handle. The current instance keeps ownership of this
+  // handle.
+  PlatformHandle GetPlatformHandle() const;
+
+  // Whether the platform handle is valid.
+  bool IsValid() const;
+
+  // Duplicates the platform handle and creates a new PlatformSharedMemoryRegion
+  // with the same |mode_|, |size_| and |guid_| that owns this handle. Returns
+  // invalid region on failure, the current instance remains valid.
+  // Can be called only in kReadOnly and kUnsafe modes, CHECK-fails if is
+  // called in kWritable mode.
+  PlatformSharedMemoryRegion Duplicate() const;
+
+  // Converts the region to read-only. Returns whether the operation succeeded.
+  // Makes the current instance invalid on failure. Can be called only in
+  // kWritable mode, all other modes will CHECK-fail. The object will have
+  // kReadOnly mode after this call on success.
+  bool ConvertToReadOnly();
+#if defined(OS_MACOSX) && !defined(OS_IOS)
+  // Same as above, but |mapped_addr| is used as a hint to avoid additional
+  // mapping of the memory object.
+  // |mapped_addr| must be mapped location of |memory_object_|. If the location
+  // is unknown, |mapped_addr| should be |nullptr|.
+  bool ConvertToReadOnly(void* mapped_addr);
+#endif  // defined(OS_MACOSX) && !defined(OS_IOS)
+
+  // Maps |size| bytes of the shared memory region starting with the given
+  // |offset| into the caller's address space. |offset| must be aligned to value
+  // of |SysInfo::VMAllocationGranularity()|. Fails if requested bytes are out
+  // of the region limits.
+  // Returns true and sets |memory| and |mapped_size| on success, returns false
+  // and leaves output parameters in unspecified state otherwise. The mapped
+  // address is guaranteed to have an alignment of at least
+  // |kMapMinimumAlignment|.
+  bool MapAt(off_t offset,
+             size_t size,
+             void** memory,
+             size_t* mapped_size) const;
+
+  const UnguessableToken& GetGUID() const { return guid_; }
+
+  size_t GetSize() const { return size_; }
+
+  Mode GetMode() const { return mode_; }
+
+ private:
+  FRIEND_TEST_ALL_PREFIXES(PlatformSharedMemoryRegionTest,
+                           CreateReadOnlyRegionDeathTest);
+  FRIEND_TEST_ALL_PREFIXES(PlatformSharedMemoryRegionTest,
+                           CheckPlatformHandlePermissionsCorrespondToMode);
+  static PlatformSharedMemoryRegion Create(Mode mode, size_t size);
+
+  static bool CheckPlatformHandlePermissionsCorrespondToMode(
+      PlatformHandle handle,
+      Mode mode,
+      size_t size);
+
+  PlatformSharedMemoryRegion(ScopedPlatformHandle handle,
+                             Mode mode,
+                             size_t size,
+                             const UnguessableToken& guid);
+
+  ScopedPlatformHandle handle_;
+  Mode mode_ = Mode::kReadOnly;
+  size_t size_ = 0;
+  UnguessableToken guid_;
+
+  DISALLOW_COPY_AND_ASSIGN(PlatformSharedMemoryRegion);
+};
+
+}  // namespace subtle
+}  // namespace base
+
+#endif  // BASE_MEMORY_PLATFORM_SHARED_MEMORY_REGION_H_
diff --git a/base/memory/platform_shared_memory_region_android.cc b/base/memory/platform_shared_memory_region_android.cc
new file mode 100644
index 0000000..664d3d4
--- /dev/null
+++ b/base/memory/platform_shared_memory_region_android.cc
@@ -0,0 +1,190 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/memory/platform_shared_memory_region.h"
+
+#include <sys/mman.h>
+
+#include "base/memory/shared_memory_tracker.h"
+#include "base/posix/eintr_wrapper.h"
+#include "third_party/ashmem/ashmem.h"
+
+namespace base {
+namespace subtle {
+
+// For Android, we use ashmem to implement SharedMemory. ashmem_create_region
+// will automatically pin the region. We never explicitly call pin/unpin. When
+// all the file descriptors from different processes associated with the region
+// are closed, the memory buffer will go away.
+
+namespace {
+
+static int GetAshmemRegionProtectionMask(int fd) {
+  int prot = ashmem_get_prot_region(fd);
+  if (prot < 0) {
+    DPLOG(ERROR) << "ashmem_get_prot_region failed";
+    return -1;
+  }
+  return prot;
+}
+
+}  // namespace
+
+// static
+PlatformSharedMemoryRegion PlatformSharedMemoryRegion::Take(
+    ScopedFD fd,
+    Mode mode,
+    size_t size,
+    const UnguessableToken& guid) {
+  if (!fd.is_valid())
+    return {};
+
+  if (size == 0)
+    return {};
+
+  if (size > static_cast<size_t>(std::numeric_limits<int>::max()))
+    return {};
+
+  CHECK(CheckPlatformHandlePermissionsCorrespondToMode(fd.get(), mode, size));
+
+  return PlatformSharedMemoryRegion(std::move(fd), mode, size, guid);
+}
+
+int PlatformSharedMemoryRegion::GetPlatformHandle() const {
+  return handle_.get();
+}
+
+bool PlatformSharedMemoryRegion::IsValid() const {
+  return handle_.is_valid();
+}
+
+PlatformSharedMemoryRegion PlatformSharedMemoryRegion::Duplicate() const {
+  if (!IsValid())
+    return {};
+
+  CHECK_NE(mode_, Mode::kWritable)
+      << "Duplicating a writable shared memory region is prohibited";
+
+  ScopedFD duped_fd(HANDLE_EINTR(dup(handle_.get())));
+  if (!duped_fd.is_valid()) {
+    DPLOG(ERROR) << "dup(" << handle_.get() << ") failed";
+    return {};
+  }
+
+  return PlatformSharedMemoryRegion(std::move(duped_fd), mode_, size_, guid_);
+}
+
+bool PlatformSharedMemoryRegion::ConvertToReadOnly() {
+  if (!IsValid())
+    return false;
+
+  CHECK_EQ(mode_, Mode::kWritable)
+      << "Only writable shared memory region can be converted to read-only";
+
+  ScopedFD handle_copy(handle_.release());
+
+  int prot = GetAshmemRegionProtectionMask(handle_copy.get());
+  if (prot < 0)
+    return false;
+
+  prot &= ~PROT_WRITE;
+  int ret = ashmem_set_prot_region(handle_copy.get(), prot);
+  if (ret != 0) {
+    DPLOG(ERROR) << "ashmem_set_prot_region failed";
+    return false;
+  }
+
+  handle_ = std::move(handle_copy);
+  mode_ = Mode::kReadOnly;
+  return true;
+}
+
+bool PlatformSharedMemoryRegion::MapAt(off_t offset,
+                                       size_t size,
+                                       void** memory,
+                                       size_t* mapped_size) const {
+  if (!IsValid())
+    return false;
+
+  size_t end_byte;
+  if (!CheckAdd(offset, size).AssignIfValid(&end_byte) || end_byte > size_) {
+    return false;
+  }
+
+  bool write_allowed = mode_ != Mode::kReadOnly;
+  *memory = mmap(nullptr, size, PROT_READ | (write_allowed ? PROT_WRITE : 0),
+                 MAP_SHARED, handle_.get(), offset);
+
+  bool mmap_succeeded = *memory && *memory != reinterpret_cast<void*>(-1);
+  if (!mmap_succeeded) {
+    DPLOG(ERROR) << "mmap " << handle_.get() << " failed";
+    return false;
+  }
+
+  *mapped_size = size;
+  DCHECK_EQ(0U,
+            reinterpret_cast<uintptr_t>(*memory) & (kMapMinimumAlignment - 1));
+  return true;
+}
+
+// static
+PlatformSharedMemoryRegion PlatformSharedMemoryRegion::Create(Mode mode,
+                                                              size_t size) {
+  if (size == 0)
+    return {};
+
+  if (size > static_cast<size_t>(std::numeric_limits<int>::max()))
+    return {};
+
+  CHECK_NE(mode, Mode::kReadOnly) << "Creating a region in read-only mode will "
+                                     "lead to this region being non-modifiable";
+
+  UnguessableToken guid = UnguessableToken::Create();
+
+  ScopedFD fd(ashmem_create_region(
+      SharedMemoryTracker::GetDumpNameForTracing(guid).c_str(), size));
+  if (!fd.is_valid()) {
+    DPLOG(ERROR) << "ashmem_create_region failed";
+    return {};
+  }
+
+  int err = ashmem_set_prot_region(fd.get(), PROT_READ | PROT_WRITE);
+  if (err < 0) {
+    DPLOG(ERROR) << "ashmem_set_prot_region failed";
+    return {};
+  }
+
+  return PlatformSharedMemoryRegion(std::move(fd), mode, size, guid);
+}
+
+bool PlatformSharedMemoryRegion::CheckPlatformHandlePermissionsCorrespondToMode(
+    PlatformHandle handle,
+    Mode mode,
+    size_t size) {
+  int prot = GetAshmemRegionProtectionMask(handle);
+  if (prot < 0)
+    return false;
+
+  bool is_read_only = (prot & PROT_WRITE) == 0;
+  bool expected_read_only = mode == Mode::kReadOnly;
+
+  if (is_read_only != expected_read_only) {
+    DLOG(ERROR) << "Ashmem region has a wrong protection mask: it is"
+                << (is_read_only ? " " : " not ") << "read-only but it should"
+                << (expected_read_only ? " " : " not ") << "be";
+    return false;
+  }
+
+  return true;
+}
+
+PlatformSharedMemoryRegion::PlatformSharedMemoryRegion(
+    ScopedFD fd,
+    Mode mode,
+    size_t size,
+    const UnguessableToken& guid)
+    : handle_(std::move(fd)), mode_(mode), size_(size), guid_(guid) {}
+
+}  // namespace subtle
+}  // namespace base
diff --git a/base/memory/platform_shared_memory_region_fuchsia.cc b/base/memory/platform_shared_memory_region_fuchsia.cc
new file mode 100644
index 0000000..5a75845
--- /dev/null
+++ b/base/memory/platform_shared_memory_region_fuchsia.cc
@@ -0,0 +1,190 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/memory/platform_shared_memory_region.h"
+
+#include <zircon/process.h>
+#include <zircon/rights.h>
+#include <zircon/syscalls.h>
+
+#include "base/bits.h"
+#include "base/fuchsia/fuchsia_logging.h"
+#include "base/numerics/checked_math.h"
+#include "base/process/process_metrics.h"
+
+namespace base {
+namespace subtle {
+
+static constexpr int kNoWriteOrExec =
+    ZX_DEFAULT_VMO_RIGHTS &
+    ~(ZX_RIGHT_WRITE | ZX_RIGHT_EXECUTE | ZX_RIGHT_SET_PROPERTY);
+
+// static
+PlatformSharedMemoryRegion PlatformSharedMemoryRegion::Take(
+    ScopedZxHandle handle,
+    Mode mode,
+    size_t size,
+    const UnguessableToken& guid) {
+  if (!handle.is_valid())
+    return {};
+
+  if (size == 0)
+    return {};
+
+  if (size > static_cast<size_t>(std::numeric_limits<int>::max()))
+    return {};
+
+  CHECK(
+      CheckPlatformHandlePermissionsCorrespondToMode(handle.get(), mode, size));
+
+  return PlatformSharedMemoryRegion(std::move(handle), mode, size, guid);
+}
+
+zx_handle_t PlatformSharedMemoryRegion::GetPlatformHandle() const {
+  return handle_.get();
+}
+
+bool PlatformSharedMemoryRegion::IsValid() const {
+  return handle_.is_valid();
+}
+
+PlatformSharedMemoryRegion PlatformSharedMemoryRegion::Duplicate() const {
+  if (!IsValid())
+    return {};
+
+  CHECK_NE(mode_, Mode::kWritable)
+      << "Duplicating a writable shared memory region is prohibited";
+
+  ScopedZxHandle duped_handle;
+  zx_status_t status = zx_handle_duplicate(handle_.get(), ZX_RIGHT_SAME_RIGHTS,
+                                           duped_handle.receive());
+  if (status != ZX_OK) {
+    ZX_DLOG(ERROR, status) << "zx_handle_duplicate";
+    return {};
+  }
+
+  return PlatformSharedMemoryRegion(std::move(duped_handle), mode_, size_,
+                                    guid_);
+}
+
+bool PlatformSharedMemoryRegion::ConvertToReadOnly() {
+  if (!IsValid())
+    return false;
+
+  CHECK_EQ(mode_, Mode::kWritable)
+      << "Only writable shared memory region can be converted to read-only";
+
+  ScopedZxHandle old_handle(handle_.release());
+  ScopedZxHandle new_handle;
+  zx_status_t status =
+      zx_handle_replace(old_handle.get(), kNoWriteOrExec, new_handle.receive());
+  if (status != ZX_OK) {
+    ZX_DLOG(ERROR, status) << "zx_handle_replace";
+    return false;
+  }
+  ignore_result(old_handle.release());
+
+  handle_ = std::move(new_handle);
+  mode_ = Mode::kReadOnly;
+  return true;
+}
+
+bool PlatformSharedMemoryRegion::MapAt(off_t offset,
+                                       size_t size,
+                                       void** memory,
+                                       size_t* mapped_size) const {
+  if (!IsValid())
+    return false;
+
+  size_t end_byte;
+  if (!CheckAdd(offset, size).AssignIfValid(&end_byte) || end_byte > size_) {
+    return false;
+  }
+
+  bool write_allowed = mode_ != Mode::kReadOnly;
+  uintptr_t addr;
+  zx_status_t status = zx_vmar_map(
+      zx_vmar_root_self(), 0, handle_.get(), offset, size,
+      ZX_VM_FLAG_PERM_READ | (write_allowed ? ZX_VM_FLAG_PERM_WRITE : 0),
+      &addr);
+  if (status != ZX_OK) {
+    ZX_DLOG(ERROR, status) << "zx_vmar_map";
+    return false;
+  }
+
+  *memory = reinterpret_cast<void*>(addr);
+  *mapped_size = size;
+  DCHECK_EQ(0U,
+            reinterpret_cast<uintptr_t>(*memory) & (kMapMinimumAlignment - 1));
+  return true;
+}
+
+// static
+PlatformSharedMemoryRegion PlatformSharedMemoryRegion::Create(Mode mode,
+                                                              size_t size) {
+  if (size == 0)
+    return {};
+
+  size_t rounded_size = bits::Align(size, GetPageSize());
+  if (rounded_size > static_cast<size_t>(std::numeric_limits<int>::max()))
+    return {};
+
+  CHECK_NE(mode, Mode::kReadOnly) << "Creating a region in read-only mode will "
+                                     "lead to this region being non-modifiable";
+
+  ScopedZxHandle vmo;
+  zx_status_t status = zx_vmo_create(rounded_size, 0, vmo.receive());
+  if (status != ZX_OK) {
+    ZX_DLOG(ERROR, status) << "zx_vmo_create";
+    return {};
+  }
+
+  const int kNoExecFlags = ZX_DEFAULT_VMO_RIGHTS & ~ZX_RIGHT_EXECUTE;
+  ScopedZxHandle old_vmo(std::move(vmo));
+  status = zx_handle_replace(old_vmo.get(), kNoExecFlags, vmo.receive());
+  if (status != ZX_OK) {
+    ZX_DLOG(ERROR, status) << "zx_handle_replace";
+    return {};
+  }
+  ignore_result(old_vmo.release());
+
+  return PlatformSharedMemoryRegion(std::move(vmo), mode, size,
+                                    UnguessableToken::Create());
+}
+
+// static
+bool PlatformSharedMemoryRegion::CheckPlatformHandlePermissionsCorrespondToMode(
+    PlatformHandle handle,
+    Mode mode,
+    size_t size) {
+  zx_info_handle_basic_t basic = {};
+  zx_status_t status = zx_object_get_info(handle, ZX_INFO_HANDLE_BASIC, &basic,
+                                          sizeof(basic), nullptr, nullptr);
+  if (status != ZX_OK) {
+    ZX_DLOG(ERROR, status) << "zx_object_get_info";
+    return false;
+  }
+
+  bool is_read_only = (basic.rights & kNoWriteOrExec) == basic.rights;
+  bool expected_read_only = mode == Mode::kReadOnly;
+
+  if (is_read_only != expected_read_only) {
+    DLOG(ERROR) << "VMO object has wrong access rights: it is"
+                << (is_read_only ? " " : " not ") << "read-only but it should"
+                << (expected_read_only ? " " : " not ") << "be";
+    return false;
+  }
+
+  return true;
+}
+
+PlatformSharedMemoryRegion::PlatformSharedMemoryRegion(
+    ScopedZxHandle handle,
+    Mode mode,
+    size_t size,
+    const UnguessableToken& guid)
+    : handle_(std::move(handle)), mode_(mode), size_(size), guid_(guid) {}
+
+}  // namespace subtle
+}  // namespace base
diff --git a/base/memory/platform_shared_memory_region_mac.cc b/base/memory/platform_shared_memory_region_mac.cc
new file mode 100644
index 0000000..b4d12ba
--- /dev/null
+++ b/base/memory/platform_shared_memory_region_mac.cc
@@ -0,0 +1,222 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/memory/platform_shared_memory_region.h"
+
+#include <mach/mach_vm.h>
+
+#include "base/mac/mach_logging.h"
+#include "base/mac/scoped_mach_vm.h"
+#include "base/numerics/checked_math.h"
+#include "build/build_config.h"
+
+#if defined(OS_IOS)
+#error "MacOS only - iOS uses platform_shared_memory_region_posix.cc"
+#endif
+
+namespace base {
+namespace subtle {
+
+// static
+PlatformSharedMemoryRegion PlatformSharedMemoryRegion::Take(
+    mac::ScopedMachSendRight handle,
+    Mode mode,
+    size_t size,
+    const UnguessableToken& guid) {
+  if (!handle.is_valid())
+    return {};
+
+  if (size == 0)
+    return {};
+
+  if (size > static_cast<size_t>(std::numeric_limits<int>::max()))
+    return {};
+
+  CHECK(
+      CheckPlatformHandlePermissionsCorrespondToMode(handle.get(), mode, size));
+
+  return PlatformSharedMemoryRegion(std::move(handle), mode, size, guid);
+}
+
+mach_port_t PlatformSharedMemoryRegion::GetPlatformHandle() const {
+  return handle_.get();
+}
+
+bool PlatformSharedMemoryRegion::IsValid() const {
+  return handle_.is_valid();
+}
+
+PlatformSharedMemoryRegion PlatformSharedMemoryRegion::Duplicate() const {
+  if (!IsValid())
+    return {};
+
+  CHECK_NE(mode_, Mode::kWritable)
+      << "Duplicating a writable shared memory region is prohibited";
+
+  // Increment the ref count.
+  kern_return_t kr = mach_port_mod_refs(mach_task_self(), handle_.get(),
+                                        MACH_PORT_RIGHT_SEND, 1);
+  if (kr != KERN_SUCCESS) {
+    MACH_DLOG(ERROR, kr) << "mach_port_mod_refs";
+    return {};
+  }
+
+  return PlatformSharedMemoryRegion(mac::ScopedMachSendRight(handle_.get()),
+                                    mode_, size_, guid_);
+}
+
+bool PlatformSharedMemoryRegion::ConvertToReadOnly() {
+  return ConvertToReadOnly(nullptr);
+}
+
+bool PlatformSharedMemoryRegion::ConvertToReadOnly(void* mapped_addr) {
+  if (!IsValid())
+    return false;
+
+  CHECK_EQ(mode_, Mode::kWritable)
+      << "Only writable shared memory region can be converted to read-only";
+
+  mac::ScopedMachSendRight handle_copy(handle_.release());
+
+  void* temp_addr = mapped_addr;
+  mac::ScopedMachVM scoped_memory;
+  if (!temp_addr) {
+    // Intentionally lower current prot and max prot to |VM_PROT_READ|.
+    kern_return_t kr = mach_vm_map(
+        mach_task_self(), reinterpret_cast<mach_vm_address_t*>(&temp_addr),
+        size_, 0, VM_FLAGS_ANYWHERE, handle_copy.get(), 0, FALSE, VM_PROT_READ,
+        VM_PROT_READ, VM_INHERIT_NONE);
+    if (kr != KERN_SUCCESS) {
+      MACH_DLOG(ERROR, kr) << "mach_vm_map";
+      return false;
+    }
+    scoped_memory.reset(reinterpret_cast<vm_address_t>(temp_addr),
+                        mach_vm_round_page(size_));
+  }
+
+  // Make new memory object.
+  memory_object_size_t allocation_size = size_;
+  mac::ScopedMachSendRight named_right;
+  kern_return_t kr = mach_make_memory_entry_64(
+      mach_task_self(), &allocation_size,
+      reinterpret_cast<memory_object_offset_t>(temp_addr), VM_PROT_READ,
+      named_right.receive(), MACH_PORT_NULL);
+  if (kr != KERN_SUCCESS) {
+    MACH_DLOG(ERROR, kr) << "mach_make_memory_entry_64";
+    return false;
+  }
+  DCHECK_GE(allocation_size, size_);
+
+  handle_ = std::move(named_right);
+  mode_ = Mode::kReadOnly;
+  return true;
+}
+
+bool PlatformSharedMemoryRegion::MapAt(off_t offset,
+                                       size_t size,
+                                       void** memory,
+                                       size_t* mapped_size) const {
+  if (!IsValid())
+    return false;
+
+  size_t end_byte;
+  if (!CheckAdd(offset, size).AssignIfValid(&end_byte) || end_byte > size_) {
+    return false;
+  }
+
+  bool write_allowed = mode_ != Mode::kReadOnly;
+  vm_prot_t vm_prot_write = write_allowed ? VM_PROT_WRITE : 0;
+  kern_return_t kr = mach_vm_map(
+      mach_task_self(),
+      reinterpret_cast<mach_vm_address_t*>(memory),  // Output parameter
+      size,
+      0,  // Alignment mask
+      VM_FLAGS_ANYWHERE, handle_.get(), offset,
+      FALSE,                         // Copy
+      VM_PROT_READ | vm_prot_write,  // Current protection
+      VM_PROT_READ | vm_prot_write,  // Maximum protection
+      VM_INHERIT_NONE);
+  if (kr != KERN_SUCCESS) {
+    MACH_DLOG(ERROR, kr) << "mach_vm_map";
+    return false;
+  }
+
+  *mapped_size = size;
+  DCHECK_EQ(0U,
+            reinterpret_cast<uintptr_t>(*memory) & (kMapMinimumAlignment - 1));
+  return true;
+}
+
+// static
+PlatformSharedMemoryRegion PlatformSharedMemoryRegion::Create(Mode mode,
+                                                              size_t size) {
+  if (size == 0)
+    return {};
+
+  if (size > static_cast<size_t>(std::numeric_limits<int>::max()))
+    return {};
+
+  CHECK_NE(mode, Mode::kReadOnly) << "Creating a region in read-only mode will "
+                                     "lead to this region being non-modifiable";
+
+  mach_vm_size_t vm_size = size;
+  mac::ScopedMachSendRight named_right;
+  kern_return_t kr = mach_make_memory_entry_64(
+      mach_task_self(), &vm_size,
+      0,  // Address.
+      MAP_MEM_NAMED_CREATE | VM_PROT_READ | VM_PROT_WRITE,
+      named_right.receive(),
+      MACH_PORT_NULL);  // Parent handle.
+  if (kr != KERN_SUCCESS) {
+    MACH_DLOG(ERROR, kr) << "mach_make_memory_entry_64";
+    return {};
+  }
+  DCHECK_GE(vm_size, size);
+
+  return PlatformSharedMemoryRegion(std::move(named_right), mode, size,
+                                    UnguessableToken::Create());
+}
+
+// static
+bool PlatformSharedMemoryRegion::CheckPlatformHandlePermissionsCorrespondToMode(
+    PlatformHandle handle,
+    Mode mode,
+    size_t size) {
+  mach_vm_address_t temp_addr = 0;
+  kern_return_t kr =
+      mach_vm_map(mach_task_self(), &temp_addr, size, 0, VM_FLAGS_ANYWHERE,
+                  handle, 0, FALSE, VM_PROT_READ | VM_PROT_WRITE,
+                  VM_PROT_READ | VM_PROT_WRITE, VM_INHERIT_NONE);
+  if (kr == KERN_SUCCESS) {
+    kern_return_t kr_deallocate =
+        mach_vm_deallocate(mach_task_self(), temp_addr, size);
+    MACH_DLOG_IF(ERROR, kr_deallocate != KERN_SUCCESS, kr_deallocate)
+        << "mach_vm_deallocate";
+  } else if (kr != KERN_INVALID_RIGHT) {
+    MACH_DLOG(ERROR, kr) << "mach_vm_map";
+    return false;
+  }
+
+  bool is_read_only = kr == KERN_INVALID_RIGHT;
+  bool expected_read_only = mode == Mode::kReadOnly;
+
+  if (is_read_only != expected_read_only) {
+    DLOG(ERROR) << "VM region has a wrong protection mask: it is"
+                << (is_read_only ? " " : " not ") << "read-only but it should"
+                << (expected_read_only ? " " : " not ") << "be";
+    return false;
+  }
+
+  return true;
+}
+
+PlatformSharedMemoryRegion::PlatformSharedMemoryRegion(
+    mac::ScopedMachSendRight handle,
+    Mode mode,
+    size_t size,
+    const UnguessableToken& guid)
+    : handle_(std::move(handle)), mode_(mode), size_(size), guid_(guid) {}
+
+}  // namespace subtle
+}  // namespace base
diff --git a/base/memory/platform_shared_memory_region_posix.cc b/base/memory/platform_shared_memory_region_posix.cc
new file mode 100644
index 0000000..8453c12
--- /dev/null
+++ b/base/memory/platform_shared_memory_region_posix.cc
@@ -0,0 +1,291 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/memory/platform_shared_memory_region.h"
+
+#include <fcntl.h>
+#include <sys/mman.h>
+#include <sys/stat.h>
+
+#include "base/files/file_util.h"
+#include "base/numerics/checked_math.h"
+#include "base/threading/thread_restrictions.h"
+#include "build/build_config.h"
+
+namespace base {
+namespace subtle {
+
+namespace {
+
+struct ScopedPathUnlinkerTraits {
+  static const FilePath* InvalidValue() { return nullptr; }
+
+  static void Free(const FilePath* path) {
+    if (unlink(path->value().c_str()))
+      PLOG(WARNING) << "unlink";
+  }
+};
+
+// Unlinks the FilePath when the object is destroyed.
+using ScopedPathUnlinker =
+    ScopedGeneric<const FilePath*, ScopedPathUnlinkerTraits>;
+
+bool CheckFDAccessMode(int fd, int expected_mode) {
+  int fd_status = fcntl(fd, F_GETFL);
+  if (fd_status == -1) {
+    DPLOG(ERROR) << "fcntl(" << fd << ", F_GETFL) failed";
+    return false;
+  }
+
+  int mode = fd_status & O_ACCMODE;
+  if (mode != expected_mode) {
+    DLOG(ERROR) << "Descriptor access mode (" << mode
+                << ") differs from expected (" << expected_mode << ")";
+    return false;
+  }
+
+  return true;
+}
+
+}  // namespace
+
+ScopedFDPair::ScopedFDPair() = default;
+
+ScopedFDPair::ScopedFDPair(ScopedFDPair&&) = default;
+
+ScopedFDPair& ScopedFDPair::operator=(ScopedFDPair&&) = default;
+
+ScopedFDPair::~ScopedFDPair() = default;
+
+ScopedFDPair::ScopedFDPair(ScopedFD in_fd, ScopedFD in_readonly_fd)
+    : fd(std::move(in_fd)), readonly_fd(std::move(in_readonly_fd)) {}
+
+FDPair ScopedFDPair::get() const {
+  return {fd.get(), readonly_fd.get()};
+}
+
+// static
+PlatformSharedMemoryRegion PlatformSharedMemoryRegion::Take(
+    ScopedFDPair handle,
+    Mode mode,
+    size_t size,
+    const UnguessableToken& guid) {
+  if (!handle.fd.is_valid())
+    return {};
+
+  if (size == 0)
+    return {};
+
+  if (size > static_cast<size_t>(std::numeric_limits<int>::max()))
+    return {};
+
+  CHECK(
+      CheckPlatformHandlePermissionsCorrespondToMode(handle.get(), mode, size));
+
+  switch (mode) {
+    case Mode::kReadOnly:
+    case Mode::kUnsafe:
+      if (handle.readonly_fd.is_valid()) {
+        handle.readonly_fd.reset();
+        DLOG(WARNING) << "Readonly handle shouldn't be valid for a "
+                         "non-writable memory region; closing";
+      }
+      break;
+    case Mode::kWritable:
+      if (!handle.readonly_fd.is_valid()) {
+        DLOG(ERROR)
+            << "Readonly handle must be valid for writable memory region";
+        return {};
+      }
+      break;
+    default:
+      DLOG(ERROR) << "Invalid permission mode: " << static_cast<int>(mode);
+      return {};
+  }
+
+  return PlatformSharedMemoryRegion(std::move(handle), mode, size, guid);
+}
+
+FDPair PlatformSharedMemoryRegion::GetPlatformHandle() const {
+  return handle_.get();
+}
+
+bool PlatformSharedMemoryRegion::IsValid() const {
+  return handle_.fd.is_valid() &&
+         (mode_ == Mode::kWritable ? handle_.readonly_fd.is_valid() : true);
+}
+
+PlatformSharedMemoryRegion PlatformSharedMemoryRegion::Duplicate() const {
+  if (!IsValid())
+    return {};
+
+  CHECK_NE(mode_, Mode::kWritable)
+      << "Duplicating a writable shared memory region is prohibited";
+
+  ScopedFD duped_fd(HANDLE_EINTR(dup(handle_.fd.get())));
+  if (!duped_fd.is_valid()) {
+    DPLOG(ERROR) << "dup(" << handle_.fd.get() << ") failed";
+    return {};
+  }
+
+  return PlatformSharedMemoryRegion({std::move(duped_fd), ScopedFD()}, mode_,
+                                    size_, guid_);
+}
+
+bool PlatformSharedMemoryRegion::ConvertToReadOnly() {
+  if (!IsValid())
+    return false;
+
+  CHECK_EQ(mode_, Mode::kWritable)
+      << "Only writable shared memory region can be converted to read-only";
+
+  handle_.fd.reset(handle_.readonly_fd.release());
+  mode_ = Mode::kReadOnly;
+  return true;
+}
+
+bool PlatformSharedMemoryRegion::MapAt(off_t offset,
+                                       size_t size,
+                                       void** memory,
+                                       size_t* mapped_size) const {
+  if (!IsValid())
+    return false;
+
+  size_t end_byte;
+  if (!CheckAdd(offset, size).AssignIfValid(&end_byte) || end_byte > size_) {
+    return false;
+  }
+
+  bool write_allowed = mode_ != Mode::kReadOnly;
+  *memory = mmap(nullptr, size, PROT_READ | (write_allowed ? PROT_WRITE : 0),
+                 MAP_SHARED, handle_.fd.get(), offset);
+
+  bool mmap_succeeded = *memory && *memory != reinterpret_cast<void*>(-1);
+  if (!mmap_succeeded) {
+    DPLOG(ERROR) << "mmap " << handle_.fd.get() << " failed";
+    return false;
+  }
+
+  *mapped_size = size;
+  DCHECK_EQ(0U,
+            reinterpret_cast<uintptr_t>(*memory) & (kMapMinimumAlignment - 1));
+  return true;
+}
+
+// static
+PlatformSharedMemoryRegion PlatformSharedMemoryRegion::Create(Mode mode,
+                                                              size_t size) {
+#if defined(OS_NACL)
+  // Untrusted code can't create descriptors or handles.
+  return {};
+#else
+  if (size == 0)
+    return {};
+
+  if (size > static_cast<size_t>(std::numeric_limits<int>::max()))
+    return {};
+
+  CHECK_NE(mode, Mode::kReadOnly) << "Creating a region in read-only mode will "
+                                     "lead to this region being non-modifiable";
+
+  // This function theoretically can block on the disk, but realistically
+  // the temporary files we create will just go into the buffer cache
+  // and be deleted before they ever make it out to disk.
+  ThreadRestrictions::ScopedAllowIO allow_io;
+
+  // We don't use shm_open() API in order to support the --disable-dev-shm-usage
+  // flag.
+  FilePath directory;
+  if (!GetShmemTempDir(false /* executable */, &directory))
+    return {};
+
+  ScopedFD fd;
+  FilePath path;
+  fd.reset(CreateAndOpenFdForTemporaryFileInDir(directory, &path));
+
+  if (!fd.is_valid()) {
+    PLOG(ERROR) << "Creating shared memory in " << path.value() << " failed";
+    FilePath dir = path.DirName();
+    if (access(dir.value().c_str(), W_OK | X_OK) < 0) {
+      PLOG(ERROR) << "Unable to access(W_OK|X_OK) " << dir.value();
+      if (dir.value() == "/dev/shm") {
+        LOG(FATAL) << "This is frequently caused by incorrect permissions on "
+                   << "/dev/shm.  Try 'sudo chmod 1777 /dev/shm' to fix.";
+      }
+    }
+    return {};
+  }
+
+  // Deleting the file prevents anyone else from mapping it in (making it
+  // private), and prevents the need for cleanup (once the last fd is
+  // closed, it is truly freed).
+  ScopedPathUnlinker path_unlinker(&path);
+
+  ScopedFD readonly_fd;
+  if (mode == Mode::kWritable) {
+    // Also open as readonly so that we can ConvertToReadOnly().
+    readonly_fd.reset(HANDLE_EINTR(open(path.value().c_str(), O_RDONLY)));
+    if (!readonly_fd.is_valid()) {
+      DPLOG(ERROR) << "open(\"" << path.value() << "\", O_RDONLY) failed";
+      return {};
+    }
+  }
+
+  // Get current size.
+  struct stat stat = {};
+  if (fstat(fd.get(), &stat) != 0)
+    return {};
+  const size_t current_size = stat.st_size;
+  if (current_size != size) {
+    if (HANDLE_EINTR(ftruncate(fd.get(), size)) != 0)
+      return {};
+  }
+
+  if (readonly_fd.is_valid()) {
+    struct stat readonly_stat = {};
+    if (fstat(readonly_fd.get(), &readonly_stat))
+      NOTREACHED();
+
+    if (stat.st_dev != readonly_stat.st_dev ||
+        stat.st_ino != readonly_stat.st_ino) {
+      LOG(ERROR) << "Writable and read-only inodes don't match; bailing";
+      return {};
+    }
+  }
+
+  return PlatformSharedMemoryRegion({std::move(fd), std::move(readonly_fd)},
+                                    mode, size, UnguessableToken::Create());
+#endif  // !defined(OS_NACL)
+}
+
+bool PlatformSharedMemoryRegion::CheckPlatformHandlePermissionsCorrespondToMode(
+    PlatformHandle handle,
+    Mode mode,
+    size_t size) {
+  if (!CheckFDAccessMode(handle.fd,
+                         mode == Mode::kReadOnly ? O_RDONLY : O_RDWR)) {
+    return false;
+  }
+
+  if (mode == Mode::kWritable)
+    return CheckFDAccessMode(handle.readonly_fd, O_RDONLY);
+
+  // The second descriptor must be invalid in kReadOnly and kUnsafe modes.
+  if (handle.readonly_fd != -1) {
+    DLOG(ERROR) << "The second descriptor must be invalid";
+    return false;
+  }
+
+  return true;
+}
+
+PlatformSharedMemoryRegion::PlatformSharedMemoryRegion(
+    ScopedFDPair handle,
+    Mode mode,
+    size_t size,
+    const UnguessableToken& guid)
+    : handle_(std::move(handle)), mode_(mode), size_(size), guid_(guid) {}
+
+}  // namespace subtle
+}  // namespace base
diff --git a/base/memory/platform_shared_memory_region_unittest.cc b/base/memory/platform_shared_memory_region_unittest.cc
new file mode 100644
index 0000000..df3e526
--- /dev/null
+++ b/base/memory/platform_shared_memory_region_unittest.cc
@@ -0,0 +1,288 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/memory/platform_shared_memory_region.h"
+
+#include "base/memory/shared_memory_mapping.h"
+#include "base/process/process_metrics.h"
+#include "base/sys_info.h"
+#include "base/test/gtest_util.h"
+#include "base/test/test_shared_memory_util.h"
+#include "build/build_config.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+#if defined(OS_MACOSX) && !defined(OS_IOS)
+#include <mach/mach_vm.h>
+#endif
+
+namespace base {
+namespace subtle {
+
+const size_t kRegionSize = 1024;
+
+class PlatformSharedMemoryRegionTest : public ::testing::Test {};
+
+// Tests that a default constructed region is invalid and produces invalid
+// mappings.
+TEST_F(PlatformSharedMemoryRegionTest, DefaultConstructedRegionIsInvalid) {
+  PlatformSharedMemoryRegion region;
+  EXPECT_FALSE(region.IsValid());
+  WritableSharedMemoryMapping mapping = MapForTesting(&region);
+  EXPECT_FALSE(mapping.IsValid());
+  PlatformSharedMemoryRegion duplicate = region.Duplicate();
+  EXPECT_FALSE(duplicate.IsValid());
+  EXPECT_FALSE(region.ConvertToReadOnly());
+}
+
+// Tests that creating a region of 0 size returns an invalid region.
+TEST_F(PlatformSharedMemoryRegionTest, CreateRegionOfZeroSizeIsInvalid) {
+  PlatformSharedMemoryRegion region =
+      PlatformSharedMemoryRegion::CreateWritable(0);
+  EXPECT_FALSE(region.IsValid());
+
+  PlatformSharedMemoryRegion region2 =
+      PlatformSharedMemoryRegion::CreateUnsafe(0);
+  EXPECT_FALSE(region2.IsValid());
+}
+
+// Tests that creating a region of size bigger than the integer max value
+// returns an invalid region.
+TEST_F(PlatformSharedMemoryRegionTest, CreateTooLargeRegionIsInvalid) {
+  size_t too_large_region_size =
+      static_cast<size_t>(std::numeric_limits<int>::max()) + 1;
+  PlatformSharedMemoryRegion region =
+      PlatformSharedMemoryRegion::CreateWritable(too_large_region_size);
+  EXPECT_FALSE(region.IsValid());
+
+  PlatformSharedMemoryRegion region2 =
+      PlatformSharedMemoryRegion::CreateUnsafe(too_large_region_size);
+  EXPECT_FALSE(region2.IsValid());
+}
+
+// Tests that regions consistently report their size as the size requested at
+// creation time even if their allocation size is larger due to platform
+// constraints.
+TEST_F(PlatformSharedMemoryRegionTest, ReportedSizeIsRequestedSize) {
+  constexpr size_t kTestSizes[] = {1, 2, 3, 64, 4096, 1024 * 1024};
+  for (size_t size : kTestSizes) {
+    PlatformSharedMemoryRegion region =
+        PlatformSharedMemoryRegion::CreateWritable(size);
+    EXPECT_EQ(region.GetSize(), size);
+
+    region.ConvertToReadOnly();
+    EXPECT_EQ(region.GetSize(), size);
+  }
+}
+
+// Tests that the platform-specific handle converted to read-only cannot be used
+// to perform a writable mapping with low-level system APIs like mmap().
+TEST_F(PlatformSharedMemoryRegionTest, ReadOnlyHandleIsNotWritable) {
+  PlatformSharedMemoryRegion region =
+      PlatformSharedMemoryRegion::CreateWritable(kRegionSize);
+  ASSERT_TRUE(region.IsValid());
+  EXPECT_TRUE(region.ConvertToReadOnly());
+  EXPECT_EQ(region.GetMode(), PlatformSharedMemoryRegion::Mode::kReadOnly);
+  EXPECT_TRUE(
+      CheckReadOnlyPlatformSharedMemoryRegionForTesting(std::move(region)));
+}
+
+// Tests that the PassPlatformHandle() call invalidates the region.
+TEST_F(PlatformSharedMemoryRegionTest, InvalidAfterPass) {
+  PlatformSharedMemoryRegion region =
+      PlatformSharedMemoryRegion::CreateWritable(kRegionSize);
+  ASSERT_TRUE(region.IsValid());
+  ignore_result(region.PassPlatformHandle());
+  EXPECT_FALSE(region.IsValid());
+}
+
+// Tests that the region is invalid after move.
+TEST_F(PlatformSharedMemoryRegionTest, InvalidAfterMove) {
+  PlatformSharedMemoryRegion region =
+      PlatformSharedMemoryRegion::CreateWritable(kRegionSize);
+  ASSERT_TRUE(region.IsValid());
+  PlatformSharedMemoryRegion moved_region = std::move(region);
+  EXPECT_FALSE(region.IsValid());
+  EXPECT_TRUE(moved_region.IsValid());
+}
+
+// Tests that calling Take() with the size parameter equal to zero returns an
+// invalid region.
+TEST_F(PlatformSharedMemoryRegionTest, TakeRegionOfZeroSizeIsInvalid) {
+  PlatformSharedMemoryRegion region =
+      PlatformSharedMemoryRegion::CreateWritable(kRegionSize);
+  ASSERT_TRUE(region.IsValid());
+  PlatformSharedMemoryRegion region2 = PlatformSharedMemoryRegion::Take(
+      region.PassPlatformHandle(), region.GetMode(), 0, region.GetGUID());
+  EXPECT_FALSE(region2.IsValid());
+}
+
+// Tests that calling Take() with the size parameter bigger than the integer max
+// value returns an invalid region.
+TEST_F(PlatformSharedMemoryRegionTest, TakeTooLargeRegionIsInvalid) {
+  PlatformSharedMemoryRegion region =
+      PlatformSharedMemoryRegion::CreateWritable(kRegionSize);
+  ASSERT_TRUE(region.IsValid());
+  PlatformSharedMemoryRegion region2 = PlatformSharedMemoryRegion::Take(
+      region.PassPlatformHandle(), region.GetMode(),
+      static_cast<size_t>(std::numeric_limits<int>::max()) + 1,
+      region.GetGUID());
+  EXPECT_FALSE(region2.IsValid());
+}
+
+// Tests that mapping bytes out of the region limits fails.
+TEST_F(PlatformSharedMemoryRegionTest, MapAtOutOfTheRegionLimitsTest) {
+  PlatformSharedMemoryRegion region =
+      PlatformSharedMemoryRegion::CreateWritable(kRegionSize);
+  ASSERT_TRUE(region.IsValid());
+  WritableSharedMemoryMapping mapping =
+      MapAtForTesting(&region, 0, region.GetSize() + 1);
+  EXPECT_FALSE(mapping.IsValid());
+}
+
+// Tests that mapping with a size and offset causing overflow fails.
+TEST_F(PlatformSharedMemoryRegionTest, MapAtWithOverflowTest) {
+  PlatformSharedMemoryRegion region =
+      PlatformSharedMemoryRegion::CreateWritable(
+          SysInfo::VMAllocationGranularity() * 2);
+  ASSERT_TRUE(region.IsValid());
+  size_t size = std::numeric_limits<size_t>::max();
+  size_t offset = SysInfo::VMAllocationGranularity();
+  // |size| + |offset| should be below the region size due to overflow but
+  // mapping a region with these parameters should be invalid.
+  EXPECT_LT(size + offset, region.GetSize());
+  WritableSharedMemoryMapping mapping = MapAtForTesting(&region, offset, size);
+  EXPECT_FALSE(mapping.IsValid());
+}
+
+#if defined(OS_POSIX) && !defined(OS_ANDROID) && !defined(OS_FUCHSIA) && \
+    !defined(OS_MACOSX)
+// Tests that the second handle is closed after a conversion to read-only on
+// POSIX.
+TEST_F(PlatformSharedMemoryRegionTest,
+       ConvertToReadOnlyInvalidatesSecondHandle) {
+  PlatformSharedMemoryRegion region =
+      PlatformSharedMemoryRegion::CreateWritable(kRegionSize);
+  ASSERT_TRUE(region.IsValid());
+  ASSERT_TRUE(region.ConvertToReadOnly());
+  FDPair fds = region.GetPlatformHandle();
+  EXPECT_LT(fds.readonly_fd, 0);
+}
+#endif
+
+#if defined(OS_MACOSX) && !defined(OS_IOS)
+// Tests that protection bits are set correctly for read-only region on MacOS.
+TEST_F(PlatformSharedMemoryRegionTest, MapCurrentAndMaxProtectionSetCorrectly) {
+  PlatformSharedMemoryRegion region =
+      PlatformSharedMemoryRegion::CreateWritable(kRegionSize);
+  ASSERT_TRUE(region.IsValid());
+  ASSERT_TRUE(region.ConvertToReadOnly());
+  WritableSharedMemoryMapping ro_mapping = MapForTesting(&region);
+  ASSERT_TRUE(ro_mapping.IsValid());
+
+  vm_region_basic_info_64 basic_info;
+  mach_vm_size_t dummy_size = 0;
+  void* temp_addr = ro_mapping.memory();
+  MachVMRegionResult result = GetBasicInfo(
+      mach_task_self(), &dummy_size,
+      reinterpret_cast<mach_vm_address_t*>(&temp_addr), &basic_info);
+  EXPECT_EQ(result, MachVMRegionResult::Success);
+  EXPECT_EQ(basic_info.protection & VM_PROT_ALL, VM_PROT_READ);
+  EXPECT_EQ(basic_info.max_protection & VM_PROT_ALL, VM_PROT_READ);
+}
+#endif
+
+// Tests that platform handle permissions are checked correctly.
+TEST_F(PlatformSharedMemoryRegionTest,
+       CheckPlatformHandlePermissionsCorrespondToMode) {
+  using Mode = PlatformSharedMemoryRegion::Mode;
+  auto check = [](const PlatformSharedMemoryRegion& region,
+                  PlatformSharedMemoryRegion::Mode mode) {
+    return PlatformSharedMemoryRegion::
+        CheckPlatformHandlePermissionsCorrespondToMode(
+            region.GetPlatformHandle(), mode, region.GetSize());
+  };
+
+  // Check kWritable region.
+  PlatformSharedMemoryRegion region =
+      PlatformSharedMemoryRegion::CreateWritable(kRegionSize);
+  ASSERT_TRUE(region.IsValid());
+  EXPECT_TRUE(check(region, Mode::kWritable));
+  EXPECT_FALSE(check(region, Mode::kReadOnly));
+
+  // Check kReadOnly region.
+  ASSERT_TRUE(region.ConvertToReadOnly());
+  EXPECT_TRUE(check(region, Mode::kReadOnly));
+  EXPECT_FALSE(check(region, Mode::kWritable));
+  EXPECT_FALSE(check(region, Mode::kUnsafe));
+
+  // Check kUnsafe region.
+  PlatformSharedMemoryRegion region2 =
+      PlatformSharedMemoryRegion::CreateUnsafe(kRegionSize);
+  ASSERT_TRUE(region2.IsValid());
+  EXPECT_TRUE(check(region2, Mode::kUnsafe));
+  EXPECT_FALSE(check(region2, Mode::kReadOnly));
+}
+
+// Tests that it's impossible to create read-only platform shared memory region.
+TEST_F(PlatformSharedMemoryRegionTest, CreateReadOnlyRegionDeathTest) {
+#ifdef OFFICIAL_BUILD
+  // The official build does not print the reason a CHECK failed.
+  const char kErrorRegex[] = "";
+#else
+  const char kErrorRegex[] =
+      "Creating a region in read-only mode will lead to this region being "
+      "non-modifiable";
+#endif
+  EXPECT_DEATH_IF_SUPPORTED(
+      PlatformSharedMemoryRegion::Create(
+          PlatformSharedMemoryRegion::Mode::kReadOnly, kRegionSize),
+      kErrorRegex);
+}
+
+// Tests that it's prohibited to duplicate a writable region.
+TEST_F(PlatformSharedMemoryRegionTest, DuplicateWritableRegionDeathTest) {
+#ifdef OFFICIAL_BUILD
+  const char kErrorRegex[] = "";
+#else
+  const char kErrorRegex[] =
+      "Duplicating a writable shared memory region is prohibited";
+#endif
+  PlatformSharedMemoryRegion region =
+      PlatformSharedMemoryRegion::CreateWritable(kRegionSize);
+  ASSERT_TRUE(region.IsValid());
+  EXPECT_DEATH_IF_SUPPORTED(region.Duplicate(), kErrorRegex);
+}
+
+// Tests that it's prohibited to convert an unsafe region to read-only.
+TEST_F(PlatformSharedMemoryRegionTest, UnsafeRegionConvertToReadOnlyDeathTest) {
+#ifdef OFFICIAL_BUILD
+  const char kErrorRegex[] = "";
+#else
+  const char kErrorRegex[] =
+      "Only writable shared memory region can be converted to read-only";
+#endif
+  PlatformSharedMemoryRegion region =
+      PlatformSharedMemoryRegion::CreateUnsafe(kRegionSize);
+  ASSERT_TRUE(region.IsValid());
+  EXPECT_DEATH_IF_SUPPORTED(region.ConvertToReadOnly(), kErrorRegex);
+}
+
+// Tests that it's prohibited to convert a read-only region to read-only.
+TEST_F(PlatformSharedMemoryRegionTest,
+       ReadOnlyRegionConvertToReadOnlyDeathTest) {
+#ifdef OFFICIAL_BUILD
+  const char kErrorRegex[] = "";
+#else
+  const char kErrorRegex[] =
+      "Only writable shared memory region can be converted to read-only";
+#endif
+  PlatformSharedMemoryRegion region =
+      PlatformSharedMemoryRegion::CreateWritable(kRegionSize);
+  ASSERT_TRUE(region.IsValid());
+  EXPECT_TRUE(region.ConvertToReadOnly());
+  EXPECT_DEATH_IF_SUPPORTED(region.ConvertToReadOnly(), kErrorRegex);
+}
+
+}  // namespace subtle
+}  // namespace base
diff --git a/base/memory/platform_shared_memory_region_win.cc b/base/memory/platform_shared_memory_region_win.cc
new file mode 100644
index 0000000..b6608da
--- /dev/null
+++ b/base/memory/platform_shared_memory_region_win.cc
@@ -0,0 +1,345 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/memory/platform_shared_memory_region.h"
+
+#include <aclapi.h>
+#include <stddef.h>
+#include <stdint.h>
+
+#include "base/allocator/partition_allocator/page_allocator.h"
+#include "base/bits.h"
+#include "base/metrics/histogram_functions.h"
+#include "base/metrics/histogram_macros.h"
+#include "base/numerics/checked_math.h"
+#include "base/process/process_handle.h"
+#include "base/rand_util.h"
+#include "base/strings/stringprintf.h"
+
+namespace base {
+namespace subtle {
+
+namespace {
+
+// Errors that can occur during Shared Memory construction.
+// These match tools/metrics/histograms/histograms.xml.
+// This enum is append-only.
+enum CreateError {
+  SUCCESS = 0,
+  SIZE_ZERO = 1,
+  SIZE_TOO_LARGE = 2,
+  INITIALIZE_ACL_FAILURE = 3,
+  INITIALIZE_SECURITY_DESC_FAILURE = 4,
+  SET_SECURITY_DESC_FAILURE = 5,
+  CREATE_FILE_MAPPING_FAILURE = 6,
+  REDUCE_PERMISSIONS_FAILURE = 7,
+  ALREADY_EXISTS = 8,
+  CREATE_ERROR_LAST = ALREADY_EXISTS
+};
+
+// Emits UMA metrics about encountered errors. Pass zero (0) for |winerror|
+// if there is no associated Windows error.
+void LogError(CreateError error, DWORD winerror) {
+  UMA_HISTOGRAM_ENUMERATION("SharedMemory.CreateError", error,
+                            CREATE_ERROR_LAST + 1);
+  static_assert(ERROR_SUCCESS == 0, "Windows error code changed!");
+  if (winerror != ERROR_SUCCESS)
+    UmaHistogramSparse("SharedMemory.CreateWinError", winerror);
+}
+
+typedef enum _SECTION_INFORMATION_CLASS {
+  SectionBasicInformation,
+} SECTION_INFORMATION_CLASS;
+
+typedef struct _SECTION_BASIC_INFORMATION {
+  PVOID BaseAddress;
+  ULONG Attributes;
+  LARGE_INTEGER Size;
+} SECTION_BASIC_INFORMATION, *PSECTION_BASIC_INFORMATION;
+
+typedef ULONG(__stdcall* NtQuerySectionType)(
+    HANDLE SectionHandle,
+    SECTION_INFORMATION_CLASS SectionInformationClass,
+    PVOID SectionInformation,
+    ULONG SectionInformationLength,
+    PULONG ResultLength);
+
+// Returns the length of the memory section starting at the supplied address.
+size_t GetMemorySectionSize(void* address) {
+  MEMORY_BASIC_INFORMATION memory_info;
+  if (!::VirtualQuery(address, &memory_info, sizeof(memory_info)))
+    return 0;
+  return memory_info.RegionSize -
+         (static_cast<char*>(address) -
+          static_cast<char*>(memory_info.AllocationBase));
+}
+
+// Checks if the section object is safe to map. At the moment this just means
+// it's not an image section.
+bool IsSectionSafeToMap(HANDLE handle) {
+  static NtQuerySectionType nt_query_section_func =
+      reinterpret_cast<NtQuerySectionType>(
+          ::GetProcAddress(::GetModuleHandle(L"ntdll.dll"), "NtQuerySection"));
+  DCHECK(nt_query_section_func);
+
+  // The handle must have SECTION_QUERY access for this to succeed.
+  SECTION_BASIC_INFORMATION basic_information = {};
+  ULONG status =
+      nt_query_section_func(handle, SectionBasicInformation, &basic_information,
+                            sizeof(basic_information), nullptr);
+  if (status)
+    return false;
+  return (basic_information.Attributes & SEC_IMAGE) != SEC_IMAGE;
+}
+
+// Returns a HANDLE on success and |nullptr| on failure.
+// This function is similar to CreateFileMapping, but removes the permissions
+// WRITE_DAC, WRITE_OWNER, READ_CONTROL, and DELETE.
+//
+// A newly created file mapping has two sets of permissions. It has access
+// control permissions (WRITE_DAC, WRITE_OWNER, READ_CONTROL, and DELETE) and
+// file permissions (FILE_MAP_READ, FILE_MAP_WRITE, etc.). The Chrome sandbox
+// prevents HANDLEs with the WRITE_DAC permission from being duplicated into
+// unprivileged processes.
+//
+// In order to remove the access control permissions, after being created the
+// handle is duplicated with only the file access permissions.
+HANDLE CreateFileMappingWithReducedPermissions(SECURITY_ATTRIBUTES* sa,
+                                               size_t rounded_size,
+                                               LPCWSTR name) {
+  HANDLE h = CreateFileMapping(INVALID_HANDLE_VALUE, sa, PAGE_READWRITE, 0,
+                               static_cast<DWORD>(rounded_size), name);
+  if (!h) {
+    LogError(CREATE_FILE_MAPPING_FAILURE, GetLastError());
+    return nullptr;
+  }
+
+  HANDLE h2;
+  ProcessHandle process = GetCurrentProcess();
+  BOOL success = ::DuplicateHandle(
+      process, h, process, &h2, FILE_MAP_READ | FILE_MAP_WRITE | SECTION_QUERY,
+      FALSE, 0);
+  BOOL rv = ::CloseHandle(h);
+  DCHECK(rv);
+
+  if (!success) {
+    LogError(REDUCE_PERMISSIONS_FAILURE, GetLastError());
+    return nullptr;
+  }
+
+  return h2;
+}
+
+}  // namespace
+
+// static
+PlatformSharedMemoryRegion PlatformSharedMemoryRegion::Take(
+    win::ScopedHandle handle,
+    Mode mode,
+    size_t size,
+    const UnguessableToken& guid) {
+  if (!handle.IsValid())
+    return {};
+
+  if (size == 0)
+    return {};
+
+  if (size > static_cast<size_t>(std::numeric_limits<int>::max()))
+    return {};
+
+  if (!IsSectionSafeToMap(handle.Get()))
+    return {};
+
+  CHECK(
+      CheckPlatformHandlePermissionsCorrespondToMode(handle.Get(), mode, size));
+
+  return PlatformSharedMemoryRegion(std::move(handle), mode, size, guid);
+}
+
+HANDLE PlatformSharedMemoryRegion::GetPlatformHandle() const {
+  return handle_.Get();
+}
+
+bool PlatformSharedMemoryRegion::IsValid() const {
+  return handle_.IsValid();
+}
+
+PlatformSharedMemoryRegion PlatformSharedMemoryRegion::Duplicate() const {
+  if (!IsValid())
+    return {};
+
+  CHECK_NE(mode_, Mode::kWritable)
+      << "Duplicating a writable shared memory region is prohibited";
+
+  HANDLE duped_handle;
+  ProcessHandle process = GetCurrentProcess();
+  BOOL success =
+      ::DuplicateHandle(process, handle_.Get(), process, &duped_handle, 0,
+                        FALSE, DUPLICATE_SAME_ACCESS);
+  if (!success)
+    return {};
+
+  return PlatformSharedMemoryRegion(win::ScopedHandle(duped_handle), mode_,
+                                    size_, guid_);
+}
+
+bool PlatformSharedMemoryRegion::ConvertToReadOnly() {
+  if (!IsValid())
+    return false;
+
+  CHECK_EQ(mode_, Mode::kWritable)
+      << "Only writable shared memory region can be converted to read-only";
+
+  win::ScopedHandle handle_copy(handle_.Take());
+
+  HANDLE duped_handle;
+  ProcessHandle process = GetCurrentProcess();
+  BOOL success =
+      ::DuplicateHandle(process, handle_copy.Get(), process, &duped_handle,
+                        FILE_MAP_READ | SECTION_QUERY, FALSE, 0);
+  if (!success)
+    return false;
+
+  handle_.Set(duped_handle);
+  mode_ = Mode::kReadOnly;
+  return true;
+}
+
+bool PlatformSharedMemoryRegion::MapAt(off_t offset,
+                                       size_t size,
+                                       void** memory,
+                                       size_t* mapped_size) const {
+  if (!IsValid())
+    return false;
+
+  size_t end_byte;
+  if (!CheckAdd(offset, size).AssignIfValid(&end_byte) || end_byte > size_) {
+    return false;
+  }
+
+  bool write_allowed = mode_ != Mode::kReadOnly;
+  // Try to map the shared memory. On the first failure, release any reserved
+  // address space for a single entry.
+  for (int i = 0; i < 2; ++i) {
+    *memory = MapViewOfFile(
+        handle_.Get(), FILE_MAP_READ | (write_allowed ? FILE_MAP_WRITE : 0),
+        static_cast<uint64_t>(offset) >> 32, static_cast<DWORD>(offset), size);
+    if (*memory)
+      break;
+    ReleaseReservation();
+  }
+  if (!*memory) {
+    DPLOG(ERROR) << "Failed executing MapViewOfFile";
+    return false;
+  }
+
+  *mapped_size = GetMemorySectionSize(*memory);
+  DCHECK_EQ(0U,
+            reinterpret_cast<uintptr_t>(*memory) & (kMapMinimumAlignment - 1));
+  return true;
+}
+
+// static
+PlatformSharedMemoryRegion PlatformSharedMemoryRegion::Create(Mode mode,
+                                                              size_t size) {
+  // TODO(crbug.com/210609): NaCl forces us to round up 64k here, wasting 32k
+  // per mapping on average.
+  static const size_t kSectionSize = 65536;
+  if (size == 0) {
+    LogError(SIZE_ZERO, 0);
+    return {};
+  }
+
+  size_t rounded_size = bits::Align(size, kSectionSize);
+  if (rounded_size > static_cast<size_t>(std::numeric_limits<int>::max())) {
+    LogError(SIZE_TOO_LARGE, 0);
+    return {};
+  }
+
+  CHECK_NE(mode, Mode::kReadOnly) << "Creating a region in read-only mode will "
+                                     "lead to this region being non-modifiable";
+
+  // Add an empty DACL to enforce anonymous read-only sections.
+  ACL dacl;
+  SECURITY_DESCRIPTOR sd;
+  if (!InitializeAcl(&dacl, sizeof(dacl), ACL_REVISION)) {
+    LogError(INITIALIZE_ACL_FAILURE, GetLastError());
+    return {};
+  }
+  if (!InitializeSecurityDescriptor(&sd, SECURITY_DESCRIPTOR_REVISION)) {
+    LogError(INITIALIZE_SECURITY_DESC_FAILURE, GetLastError());
+    return {};
+  }
+  if (!SetSecurityDescriptorDacl(&sd, TRUE, &dacl, FALSE)) {
+    LogError(SET_SECURITY_DESC_FAILURE, GetLastError());
+    return {};
+  }
+
+  // Windows ignores DACLs on unnamed shared section. Generate a random name in
+  // order to be able to enforce read-only.
+  uint64_t rand_values[4];
+  RandBytes(&rand_values, sizeof(rand_values));
+  string16 name =
+      StringPrintf(L"CrSharedMem_%016llx%016llx%016llx%016llx", rand_values[0],
+                   rand_values[1], rand_values[2], rand_values[3]);
+
+  SECURITY_ATTRIBUTES sa = {sizeof(sa), &sd, FALSE};
+  // Ask for the file mapping with reduced permisions to avoid passing the
+  // access control permissions granted by default into unpriviledged process.
+  HANDLE h =
+      CreateFileMappingWithReducedPermissions(&sa, rounded_size, name.c_str());
+  if (h == nullptr) {
+    // The error is logged within CreateFileMappingWithReducedPermissions().
+    return {};
+  }
+
+  win::ScopedHandle scoped_h(h);
+  // Check if the shared memory pre-exists.
+  if (GetLastError() == ERROR_ALREADY_EXISTS) {
+    LogError(ALREADY_EXISTS, ERROR_ALREADY_EXISTS);
+    return {};
+  }
+
+  return PlatformSharedMemoryRegion(std::move(scoped_h), mode, size,
+                                    UnguessableToken::Create());
+}
+
+// static
+bool PlatformSharedMemoryRegion::CheckPlatformHandlePermissionsCorrespondToMode(
+    PlatformHandle handle,
+    Mode mode,
+    size_t size) {
+  // Call ::DuplicateHandle() with FILE_MAP_WRITE as a desired access to check
+  // if the |handle| has a write access.
+  ProcessHandle process = GetCurrentProcess();
+  HANDLE duped_handle;
+  BOOL success = ::DuplicateHandle(process, handle, process, &duped_handle,
+                                   FILE_MAP_WRITE, FALSE, 0);
+  if (success) {
+    BOOL rv = ::CloseHandle(duped_handle);
+    DCHECK(rv);
+  }
+
+  bool is_read_only = !success;
+  bool expected_read_only = mode == Mode::kReadOnly;
+
+  if (is_read_only != expected_read_only) {
+    DLOG(ERROR) << "File mapping handle has wrong access rights: it is"
+                << (is_read_only ? " " : " not ") << "read-only but it should"
+                << (expected_read_only ? " " : " not ") << "be";
+    return false;
+  }
+
+  return true;
+}
+
+PlatformSharedMemoryRegion::PlatformSharedMemoryRegion(
+    win::ScopedHandle handle,
+    Mode mode,
+    size_t size,
+    const UnguessableToken& guid)
+    : handle_(std::move(handle)), mode_(mode), size_(size), guid_(guid) {}
+
+}  // namespace subtle
+}  // namespace base
diff --git a/base/memory/protected_memory.cc b/base/memory/protected_memory.cc
new file mode 100644
index 0000000..157a677
--- /dev/null
+++ b/base/memory/protected_memory.cc
@@ -0,0 +1,17 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/memory/protected_memory.h"
+#include "base/synchronization/lock.h"
+
+namespace base {
+
+#if !defined(COMPONENT_BUILD)
+PROTECTED_MEMORY_SECTION int AutoWritableMemory::writers = 0;
+#endif  // !defined(COMPONENT_BUILD)
+
+base::LazyInstance<Lock>::Leaky AutoWritableMemory::writers_lock =
+    LAZY_INSTANCE_INITIALIZER;
+
+}  // namespace base
diff --git a/base/memory/protected_memory.h b/base/memory/protected_memory.h
new file mode 100644
index 0000000..3cb2ec3
--- /dev/null
+++ b/base/memory/protected_memory.h
@@ -0,0 +1,276 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Protected memory is memory holding security-sensitive data intended to be
+// left read-only for the majority of its lifetime to avoid being overwritten
+// by attackers. ProtectedMemory is a simple wrapper around platform-specific
+// APIs to set memory read-write and read-only when required. Protected memory
+// should be set read-write for the minimum amount of time required.
+
+// Normally mutable variables are held in read-write memory and constant data
+// is held in read-only memory to ensure it is not accidentally overwritten.
+// In some cases we want to hold mutable variables in read-only memory, except
+// when they are being written to, to ensure that they are not tampered with.
+//
+// ProtectedMemory is a container class intended to hold a single variable in
+// read-only memory, except when explicitly set read-write. The variable can be
+// set read-write by creating a scoped AutoWritableMemory object by calling
+// AutoWritableMemory::Create(), the memory stays writable until the returned
+// object goes out of scope and is destructed. The wrapped variable can be
+// accessed using operator* and operator->.
+//
+// Instances of ProtectedMemory must be declared in the PROTECTED_MEMORY_SECTION
+// and as global variables. Because protected memory variables are globals, the
+// the same rules apply disallowing non-trivial constructors and destructors.
+// Global definitions are required to avoid the linker placing statics in
+// inlinable functions into a comdat section and setting the protected memory
+// section read-write when they are merged.
+//
+// EXAMPLE:
+//
+//  struct Items { void* item1; };
+//  static PROTECTED_MEMORY_SECTION base::ProtectedMemory<Items> items;
+//  void InitializeItems() {
+//    // Explicitly set items read-write before writing to it.
+//    auto writer = base::AutoWritableMemory::Create(items);
+//    items->item1 = /* ... */;
+//    assert(items->item1 != nullptr);
+//    // items is set back to read-only on the destruction of writer
+//  }
+//
+//  using FnPtr = void (*)(void);
+//  PROTECTED_MEMORY_SECTION base::ProtectedMemory<FnPtr> fnPtr;
+//  FnPtr ResolveFnPtr(void) {
+//    // The Initializer nested class is a helper class for creating a static
+//    // initializer for a ProtectedMemory variable. It implicitly sets the
+//    // variable read-write during initialization.
+//    static base::ProtectedMemory<FnPtr>::Initializer I(&fnPtr,
+//      reinterpret_cast<FnPtr>(dlsym(/* ... */)));
+//    return *fnPtr;
+//  }
+
+#ifndef BASE_MEMORY_PROTECTED_MEMORY_H_
+#define BASE_MEMORY_PROTECTED_MEMORY_H_
+
+#include "base/lazy_instance.h"
+#include "base/logging.h"
+#include "base/macros.h"
+#include "base/memory/protected_memory_buildflags.h"
+#include "base/synchronization/lock.h"
+#include "build/build_config.h"
+
+#define PROTECTED_MEMORY_ENABLED 1
+
+// Linking with lld is required to workaround crbug.com/792777.
+// TODO(vtsyrklevich): Remove once support for gold on Android/CrOs is dropped
+#if defined(OS_LINUX) && BUILDFLAG(USE_LLD)
+// Define the section read-only
+__asm__(".section protected_memory, \"a\"\n\t");
+#define PROTECTED_MEMORY_SECTION __attribute__((section("protected_memory")))
+
+// Explicitly mark these variables hidden so the symbols are local to the
+// currently built component. Otherwise they are created with global (external)
+// linkage and component builds would break because a single pair of these
+// symbols would override the rest.
+__attribute__((visibility("hidden"))) extern char __start_protected_memory;
+__attribute__((visibility("hidden"))) extern char __stop_protected_memory;
+
+#elif defined(OS_MACOSX) && !defined(OS_IOS)
+// The segment the section is in is defined read-only with a linker flag in
+// build/config/mac/BUILD.gn
+#define PROTECTED_MEMORY_SECTION \
+  __attribute__((section("PROTECTED_MEMORY, protected_memory")))
+extern char __start_protected_memory __asm(
+    "section$start$PROTECTED_MEMORY$protected_memory");
+extern char __stop_protected_memory __asm(
+    "section$end$PROTECTED_MEMORY$protected_memory");
+
+#elif defined(OS_WIN)
+// Define a read-write prot section. The $a, $mem, and $z 'sub-sections' are
+// merged alphabetically so $a and $z are used to define the start and end of
+// the protected memory section, and $mem holds protected variables.
+// (Note: Sections in Portable Executables are equivalent to segments in other
+// executable formats, so this section is mapped into its own pages.)
+#pragma section("prot$a", read, write)
+#pragma section("prot$mem", read, write)
+#pragma section("prot$z", read, write)
+
+// We want the protected memory section to be read-only, not read-write so we
+// instruct the linker to set the section read-only at link time. We do this
+// at link time instead of compile time, because defining the prot section
+// read-only would cause mis-compiles due to optimizations assuming that the
+// section contents are constant.
+#pragma comment(linker, "/SECTION:prot,R")
+
+__declspec(allocate("prot$a")) __declspec(selectany)
+char __start_protected_memory;
+__declspec(allocate("prot$z")) __declspec(selectany)
+char __stop_protected_memory;
+
+#define PROTECTED_MEMORY_SECTION __declspec(allocate("prot$mem"))
+
+#else
+#undef PROTECTED_MEMORY_ENABLED
+#define PROTECTED_MEMORY_ENABLED 0
+#define PROTECTED_MEMORY_SECTION
+#endif
+
+namespace base {
+
+template <typename T>
+class ProtectedMemory {
+ public:
+  ProtectedMemory() = default;
+
+  // Expose direct access to the encapsulated variable
+  T& operator*() { return data; }
+  const T& operator*() const { return data; }
+  T* operator->() { return &data; }
+  const T* operator->() const { return &data; }
+
+  // Helper class for creating simple ProtectedMemory static initializers.
+  class Initializer {
+   public:
+    // Defined out-of-line below to break circular definition dependency between
+    // ProtectedMemory and AutoWritableMemory.
+    Initializer(ProtectedMemory<T>* PM, const T& Init);
+
+    DISALLOW_IMPLICIT_CONSTRUCTORS(Initializer);
+  };
+
+ private:
+  T data;
+
+  DISALLOW_COPY_AND_ASSIGN(ProtectedMemory);
+};
+
+// DCHECK that the byte at |ptr| is read-only.
+BASE_EXPORT void AssertMemoryIsReadOnly(const void* ptr);
+
+// Abstract out platform-specific methods to get the beginning and end of the
+// PROTECTED_MEMORY_SECTION. ProtectedMemoryEnd returns a pointer to the byte
+// past the end of the PROTECTED_MEMORY_SECTION.
+#if PROTECTED_MEMORY_ENABLED
+constexpr void* ProtectedMemoryStart = &__start_protected_memory;
+constexpr void* ProtectedMemoryEnd = &__stop_protected_memory;
+#endif
+
+#if defined(COMPONENT_BUILD)
+namespace internal {
+
+// For component builds we want to define a separate global writers variable
+// (explained below) in every DSO that includes this header. To do that we use
+// this template to define a global without duplicate symbol errors.
+template <typename T>
+struct DsoSpecific {
+  static T value;
+};
+template <typename T>
+T DsoSpecific<T>::value = 0;
+
+}  // namespace internal
+#endif  // defined(COMPONENT_BUILD)
+
+// A class that sets a given ProtectedMemory variable writable while the
+// AutoWritableMemory is in scope. This class implements the logic for setting
+// the protected memory region read-only/read-write in a thread-safe manner.
+class AutoWritableMemory {
+ private:
+  // 'writers' is a global holding the number of ProtectedMemory instances set
+  // writable, used to avoid races setting protected memory readable/writable.
+  // When this reaches zero the protected memory region is set read only.
+  // Access is controlled by writers_lock.
+#if defined(COMPONENT_BUILD)
+  // For component builds writers is a reference to an int defined separately in
+  // every DSO.
+  static constexpr int& writers = internal::DsoSpecific<int>::value;
+#else
+  // Otherwise, we declare writers in the protected memory section to avoid the
+  // scenario where an attacker could overwrite it with a large value and invoke
+  // code that constructs and destructs an AutoWritableMemory. After such a call
+  // protected memory would still be set writable because writers > 0.
+  static int writers;
+#endif  // defined(COMPONENT_BUILD)
+
+  // Synchronizes access to the writers variable and the simultaneous actions
+  // that need to happen alongside writers changes, e.g. setting the protected
+  // memory region readable when writers is decremented to 0.
+  static BASE_EXPORT base::LazyInstance<Lock>::Leaky writers_lock;
+
+  // Abstract out platform-specific memory APIs. |end| points to the byte past
+  // the end of the region of memory having its memory protections changed.
+  BASE_EXPORT bool SetMemoryReadWrite(void* start, void* end);
+  BASE_EXPORT bool SetMemoryReadOnly(void* start, void* end);
+
+  // If this is the first writer (e.g. writers == 0) set the writers variable
+  // read-write. Next, increment writers and set the requested memory writable.
+  AutoWritableMemory(void* ptr, void* ptr_end) {
+#if PROTECTED_MEMORY_ENABLED
+    DCHECK(ptr >= ProtectedMemoryStart && ptr_end <= ProtectedMemoryEnd);
+
+    {
+      base::AutoLock auto_lock(writers_lock.Get());
+      if (writers == 0) {
+        AssertMemoryIsReadOnly(ptr);
+#if !defined(COMPONENT_BUILD)
+        AssertMemoryIsReadOnly(&writers);
+        CHECK(SetMemoryReadWrite(&writers, &writers + 1));
+#endif  // !defined(COMPONENT_BUILD)
+      }
+
+      writers++;
+    }
+
+    CHECK(SetMemoryReadWrite(ptr, ptr_end));
+#endif  // PROTECTED_MEMORY_ENABLED
+  }
+
+ public:
+  // Wrap the private constructor to create an easy-to-use interface to
+  // construct AutoWritableMemory objects.
+  template <typename T>
+  static AutoWritableMemory Create(ProtectedMemory<T>& PM) {
+    T* ptr = &*PM;
+    return AutoWritableMemory(ptr, ptr + 1);
+  }
+
+  // Move constructor just increments writers
+  AutoWritableMemory(AutoWritableMemory&& original) {
+#if PROTECTED_MEMORY_ENABLED
+    base::AutoLock auto_lock(writers_lock.Get());
+    CHECK_GT(writers, 0);
+    writers++;
+#endif  // PROTECTED_MEMORY_ENABLED
+  }
+
+  // On destruction decrement writers, and if no other writers exist, set the
+  // entire protected memory region read-only.
+  ~AutoWritableMemory() {
+#if PROTECTED_MEMORY_ENABLED
+    base::AutoLock auto_lock(writers_lock.Get());
+    CHECK_GT(writers, 0);
+    writers--;
+
+    if (writers == 0) {
+      CHECK(SetMemoryReadOnly(ProtectedMemoryStart, ProtectedMemoryEnd));
+#if !defined(COMPONENT_BUILD)
+      AssertMemoryIsReadOnly(&writers);
+#endif  // !defined(COMPONENT_BUILD)
+    }
+#endif  // PROTECTED_MEMORY_ENABLED
+  }
+
+  DISALLOW_IMPLICIT_CONSTRUCTORS(AutoWritableMemory);
+};
+
+template <typename T>
+ProtectedMemory<T>::Initializer::Initializer(ProtectedMemory<T>* PM,
+                                             const T& Init) {
+  AutoWritableMemory writer = AutoWritableMemory::Create(*PM);
+  **PM = Init;
+}
+
+}  // namespace base
+
+#endif  // BASE_MEMORY_PROTECTED_MEMORY_H_
diff --git a/base/memory/protected_memory_cfi.h b/base/memory/protected_memory_cfi.h
new file mode 100644
index 0000000..a90023b
--- /dev/null
+++ b/base/memory/protected_memory_cfi.h
@@ -0,0 +1,86 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Helper routines to call function pointers stored in protected memory with
+// Control Flow Integrity indirect call checking disabled. Some indirect calls,
+// e.g. dynamically resolved symbols in another DSO, can not be accounted for by
+// CFI-icall. These routines allow those symbols to be called without CFI-icall
+// checking safely by ensuring that they are placed in protected memory.
+
+#ifndef BASE_MEMORY_PROTECTED_MEMORY_CFI_H_
+#define BASE_MEMORY_PROTECTED_MEMORY_CFI_H_
+
+#include <utility>
+
+#include "base/cfi_buildflags.h"
+#include "base/compiler_specific.h"
+#include "base/macros.h"
+#include "base/memory/protected_memory.h"
+#include "build/build_config.h"
+
+#if BUILDFLAG(CFI_ICALL_CHECK) && !PROTECTED_MEMORY_ENABLED
+#error "CFI-icall enabled for platform without protected memory support"
+#endif  // BUILDFLAG(CFI_ICALL_CHECK) && !PROTECTED_MEMORY_ENABLED
+
+namespace base {
+namespace internal {
+
+// This class is used to exempt calls to function pointers stored in
+// ProtectedMemory from cfi-icall checking. It's not secure to use directly, it
+// should only be used by the UnsanitizedCfiCall() functions below. Given an
+// UnsanitizedCfiCall object, you can use operator() to call the encapsulated
+// function pointer without cfi-icall checking.
+template <typename FunctionType>
+class UnsanitizedCfiCall {
+ public:
+  explicit UnsanitizedCfiCall(FunctionType function) : function_(function) {}
+  UnsanitizedCfiCall(UnsanitizedCfiCall&&) = default;
+
+  template <typename... Args>
+  NO_SANITIZE("cfi-icall")
+  auto operator()(Args&&... args) {
+    return function_(std::forward<Args>(args)...);
+  }
+
+ private:
+  FunctionType function_;
+
+  DISALLOW_IMPLICIT_CONSTRUCTORS(UnsanitizedCfiCall);
+};
+
+}  // namespace internal
+
+// These functions can be used to call function pointers in ProtectedMemory
+// without cfi-icall checking. They are intended to be used to create an
+// UnsanitizedCfiCall object and immediately call it. UnsanitizedCfiCall objects
+// should not initialized directly or stored because they hold a function
+// pointer that will be called without CFI-icall checking in mutable memory. The
+// functions can be used as shown below:
+
+// ProtectedMemory<void (*)(int)> p;
+// UnsanitizedCfiCall(p)(5); /* In place of (*p)(5); */
+
+template <typename T>
+auto UnsanitizedCfiCall(const ProtectedMemory<T>& PM) {
+#if PROTECTED_MEMORY_ENABLED
+  DCHECK(&PM >= ProtectedMemoryStart && &PM < ProtectedMemoryEnd);
+#endif  // PROTECTED_MEMORY_ENABLED
+  return internal::UnsanitizedCfiCall<T>(*PM);
+}
+
+// struct S { void (*fp)(int); } s;
+// ProtectedMemory<S> p;
+// UnsanitizedCfiCall(p, &S::fp)(5); /* In place of p->fp(5); */
+
+template <typename T, typename Member>
+auto UnsanitizedCfiCall(const ProtectedMemory<T>& PM, Member member) {
+#if PROTECTED_MEMORY_ENABLED
+  DCHECK(&PM >= ProtectedMemoryStart && &PM < ProtectedMemoryEnd);
+#endif  // PROTECTED_MEMORY_ENABLED
+  return internal::UnsanitizedCfiCall<decltype(*PM.*member)>(*PM.*member);
+}
+
+}  // namespace base
+
+#endif  // BASE_MEMORY_PROTECTED_MEMORY_CFI_H_
diff --git a/base/memory/protected_memory_posix.cc b/base/memory/protected_memory_posix.cc
new file mode 100644
index 0000000..d003d79
--- /dev/null
+++ b/base/memory/protected_memory_posix.cc
@@ -0,0 +1,79 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/memory/protected_memory.h"
+
+#include <stdint.h>
+#include <sys/mman.h>
+#include <unistd.h>
+
+#if defined(OS_LINUX)
+#include <sys/resource.h>
+#endif  // defined(OS_LINUX)
+
+#if defined(OS_MACOSX) && !defined(OS_IOS)
+#include <mach/mach.h>
+#include <mach/mach_vm.h>
+#endif  // defined(OS_MACOSX) && !defined(OS_IOS)
+
+#include "base/posix/eintr_wrapper.h"
+#include "base/process/process_metrics.h"
+#include "base/synchronization/lock.h"
+#include "build/build_config.h"
+
+namespace base {
+
+namespace {
+
+bool SetMemory(void* start, void* end, int prot) {
+  DCHECK(end > start);
+  const uintptr_t page_mask = ~(base::GetPageSize() - 1);
+  const uintptr_t page_start = reinterpret_cast<uintptr_t>(start) & page_mask;
+  return mprotect(reinterpret_cast<void*>(page_start),
+                  reinterpret_cast<uintptr_t>(end) - page_start, prot) == 0;
+}
+
+}  // namespace
+
+bool AutoWritableMemory::SetMemoryReadWrite(void* start, void* end) {
+  return SetMemory(start, end, PROT_READ | PROT_WRITE);
+}
+
+bool AutoWritableMemory::SetMemoryReadOnly(void* start, void* end) {
+  return SetMemory(start, end, PROT_READ);
+}
+
+#if defined(OS_LINUX)
+void AssertMemoryIsReadOnly(const void* ptr) {
+#if DCHECK_IS_ON()
+  const uintptr_t page_mask = ~(base::GetPageSize() - 1);
+  const uintptr_t page_start = reinterpret_cast<uintptr_t>(ptr) & page_mask;
+
+  // Note: We've casted away const here, which should not be meaningful since
+  // if the memory is written to we will abort immediately.
+  int result =
+      getrlimit(RLIMIT_NPROC, reinterpret_cast<struct rlimit*>(page_start));
+  DCHECK_EQ(result, -1);
+  DCHECK_EQ(errno, EFAULT);
+#endif  // DCHECK_IS_ON()
+}
+#elif defined(OS_MACOSX) && !defined(OS_IOS)
+void AssertMemoryIsReadOnly(const void* ptr) {
+#if DCHECK_IS_ON()
+  mach_port_t object_name;
+  vm_region_basic_info_64 region_info;
+  mach_vm_size_t size = 1;
+  mach_msg_type_number_t count = VM_REGION_BASIC_INFO_COUNT_64;
+
+  kern_return_t kr = mach_vm_region(
+      mach_task_self(), reinterpret_cast<mach_vm_address_t*>(&ptr), &size,
+      VM_REGION_BASIC_INFO_64, reinterpret_cast<vm_region_info_t>(&region_info),
+      &count, &object_name);
+  DCHECK_EQ(kr, KERN_SUCCESS);
+  DCHECK_EQ(region_info.protection, VM_PROT_READ);
+#endif  // DCHECK_IS_ON()
+}
+#endif  // defined(OS_LINUX) || (defined(OS_MACOSX) && !defined(OS_IOS))
+
+}  // namespace base
diff --git a/base/memory/protected_memory_unittest.cc b/base/memory/protected_memory_unittest.cc
new file mode 100644
index 0000000..b7daed3
--- /dev/null
+++ b/base/memory/protected_memory_unittest.cc
@@ -0,0 +1,126 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/memory/protected_memory.h"
+#include "base/cfi_buildflags.h"
+#include "base/memory/protected_memory_cfi.h"
+#include "base/synchronization/lock.h"
+#include "base/test/gtest_util.h"
+#include "build/build_config.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+
+namespace {
+
+struct Data {
+  Data() = default;
+  Data(int foo_) : foo(foo_) {}
+  int foo;
+};
+
+}  // namespace
+
+class ProtectedMemoryTest : public ::testing::Test {
+ protected:
+  // Run tests one at a time. Some of the negative tests can not be made thread
+  // safe.
+  void SetUp() final { lock.Acquire(); }
+  void TearDown() final { lock.Release(); }
+
+  Lock lock;
+};
+
+PROTECTED_MEMORY_SECTION ProtectedMemory<int> init;
+
+TEST_F(ProtectedMemoryTest, Initializer) {
+  static ProtectedMemory<int>::Initializer I(&init, 4);
+  EXPECT_EQ(*init, 4);
+}
+
+PROTECTED_MEMORY_SECTION ProtectedMemory<Data> data;
+
+TEST_F(ProtectedMemoryTest, Basic) {
+  AutoWritableMemory writer = AutoWritableMemory::Create(data);
+  data->foo = 5;
+  EXPECT_EQ(data->foo, 5);
+}
+
+#if defined(GTEST_HAS_DEATH_TEST) && !defined(OS_ANDROID)
+
+#if PROTECTED_MEMORY_ENABLED
+TEST_F(ProtectedMemoryTest, ReadOnlyOnStart) {
+  EXPECT_DEATH({ data->foo = 6; AutoWritableMemory::Create(data); }, "");
+}
+
+TEST_F(ProtectedMemoryTest, ReadOnlyAfterSetWritable) {
+  { AutoWritableMemory writer = AutoWritableMemory::Create(data); }
+  EXPECT_DEATH({ data->foo = 7; }, "");
+}
+
+TEST_F(ProtectedMemoryTest, AssertMemoryIsReadOnly) {
+  AssertMemoryIsReadOnly(&data->foo);
+  { AutoWritableMemory::Create(data); }
+  AssertMemoryIsReadOnly(&data->foo);
+
+  ProtectedMemory<Data> writable_data;
+  EXPECT_DCHECK_DEATH({ AssertMemoryIsReadOnly(&writable_data->foo); });
+}
+
+TEST_F(ProtectedMemoryTest, FailsIfDefinedOutsideOfProtectMemoryRegion) {
+  ProtectedMemory<Data> data;
+  EXPECT_DCHECK_DEATH({ AutoWritableMemory::Create(data); });
+}
+
+TEST_F(ProtectedMemoryTest, UnsanitizedCfiCallOutsideOfProtectedMemoryRegion) {
+  ProtectedMemory<void (*)(void)> data;
+  EXPECT_DCHECK_DEATH({ UnsanitizedCfiCall(data)(); });
+}
+#endif  // PROTECTED_MEMORY_ENABLED
+
+namespace {
+
+struct BadIcall {
+  BadIcall() = default;
+  BadIcall(int (*fp_)(int)) : fp(fp_) {}
+  int (*fp)(int);
+};
+
+unsigned int bad_icall(int i) {
+  return 4 + i;
+}
+
+}  // namespace
+
+PROTECTED_MEMORY_SECTION ProtectedMemory<BadIcall> icall_pm1;
+
+TEST_F(ProtectedMemoryTest, BadMemberCall) {
+  static ProtectedMemory<BadIcall>::Initializer I(
+      &icall_pm1, BadIcall(reinterpret_cast<int (*)(int)>(&bad_icall)));
+
+  EXPECT_EQ(UnsanitizedCfiCall(icall_pm1, &BadIcall::fp)(1), 5);
+#if !BUILDFLAG(CFI_ICALL_CHECK)
+  EXPECT_EQ(icall_pm1->fp(1), 5);
+#elif BUILDFLAG(CFI_ENFORCEMENT_TRAP) || BUILDFLAG(CFI_ENFORCEMENT_DIAGNOSTIC)
+  EXPECT_DEATH({ icall_pm1->fp(1); }, "");
+#endif
+}
+
+PROTECTED_MEMORY_SECTION ProtectedMemory<int (*)(int)> icall_pm2;
+
+TEST_F(ProtectedMemoryTest, BadFnPtrCall) {
+  static ProtectedMemory<int (*)(int)>::Initializer I(
+      &icall_pm2, reinterpret_cast<int (*)(int)>(&bad_icall));
+
+  EXPECT_EQ(UnsanitizedCfiCall(icall_pm2)(1), 5);
+#if !BUILDFLAG(CFI_ICALL_CHECK)
+  EXPECT_EQ((*icall_pm2)(1), 5);
+#elif BUILDFLAG(CFI_ENFORCEMENT_TRAP) || BUILDFLAG(CFI_ENFORCEMENT_DIAGNOSTIC)
+  EXPECT_DEATH({ (*icall_pm2)(1); }, "");
+#endif
+}
+
+#endif  // defined(GTEST_HAS_DEATH_TEST) && !defined(OS_ANDROID)
+
+}  // namespace base
diff --git a/base/memory/protected_memory_win.cc b/base/memory/protected_memory_win.cc
new file mode 100644
index 0000000..cf3da78
--- /dev/null
+++ b/base/memory/protected_memory_win.cc
@@ -0,0 +1,52 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/memory/protected_memory.h"
+
+#include <windows.h>
+
+#include <stdint.h>
+
+#include "base/process/process_metrics.h"
+#include "base/synchronization/lock.h"
+#include "build/build_config.h"
+
+namespace base {
+
+namespace {
+
+bool SetMemory(void* start, void* end, DWORD prot) {
+  DCHECK(end > start);
+  const uintptr_t page_mask = ~(base::GetPageSize() - 1);
+  const uintptr_t page_start = reinterpret_cast<uintptr_t>(start) & page_mask;
+  DWORD old_prot;
+  return VirtualProtect(reinterpret_cast<void*>(page_start),
+                        reinterpret_cast<uintptr_t>(end) - page_start, prot,
+                        &old_prot) != 0;
+}
+
+}  // namespace
+
+bool AutoWritableMemory::SetMemoryReadWrite(void* start, void* end) {
+  return SetMemory(start, end, PAGE_READWRITE);
+}
+
+bool AutoWritableMemory::SetMemoryReadOnly(void* start, void* end) {
+  return SetMemory(start, end, PAGE_READONLY);
+}
+
+void AssertMemoryIsReadOnly(const void* ptr) {
+#if DCHECK_IS_ON()
+  const uintptr_t page_mask = ~(base::GetPageSize() - 1);
+  const uintptr_t page_start = reinterpret_cast<uintptr_t>(ptr) & page_mask;
+
+  MEMORY_BASIC_INFORMATION info;
+  SIZE_T result =
+      VirtualQuery(reinterpret_cast<LPCVOID>(page_start), &info, sizeof(info));
+  DCHECK_GT(result, 0U);
+  DCHECK(info.Protect == PAGE_READONLY);
+#endif  // DCHECK_IS_ON()
+}
+
+}  // namespace base
diff --git a/base/memory/ptr_util.h b/base/memory/ptr_util.h
new file mode 100644
index 0000000..42f4f49
--- /dev/null
+++ b/base/memory/ptr_util.h
@@ -0,0 +1,23 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_MEMORY_PTR_UTIL_H_
+#define BASE_MEMORY_PTR_UTIL_H_
+
+#include <memory>
+#include <utility>
+
+namespace base {
+
+// Helper to transfer ownership of a raw pointer to a std::unique_ptr<T>.
+// Note that std::unique_ptr<T> has very different semantics from
+// std::unique_ptr<T[]>: do not use this helper for array allocations.
+template <typename T>
+std::unique_ptr<T> WrapUnique(T* ptr) {
+  return std::unique_ptr<T>(ptr);
+}
+
+}  // namespace base
+
+#endif  // BASE_MEMORY_PTR_UTIL_H_
diff --git a/base/memory/ptr_util_unittest.cc b/base/memory/ptr_util_unittest.cc
new file mode 100644
index 0000000..3fa40d8
--- /dev/null
+++ b/base/memory/ptr_util_unittest.cc
@@ -0,0 +1,40 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/memory/ptr_util.h"
+
+#include <stddef.h>
+
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+
+namespace {
+
+class DeleteCounter {
+ public:
+  DeleteCounter() { ++count_; }
+  ~DeleteCounter() { --count_; }
+
+  static size_t count() { return count_; }
+
+ private:
+  static size_t count_;
+};
+
+size_t DeleteCounter::count_ = 0;
+
+}  // namespace
+
+TEST(PtrUtilTest, WrapUnique) {
+  EXPECT_EQ(0u, DeleteCounter::count());
+  DeleteCounter* counter = new DeleteCounter;
+  EXPECT_EQ(1u, DeleteCounter::count());
+  std::unique_ptr<DeleteCounter> owned_counter = WrapUnique(counter);
+  EXPECT_EQ(1u, DeleteCounter::count());
+  owned_counter.reset();
+  EXPECT_EQ(0u, DeleteCounter::count());
+}
+
+}  // namespace base
diff --git a/base/memory/raw_scoped_refptr_mismatch_checker.h b/base/memory/raw_scoped_refptr_mismatch_checker.h
new file mode 100644
index 0000000..ab8b2ab
--- /dev/null
+++ b/base/memory/raw_scoped_refptr_mismatch_checker.h
@@ -0,0 +1,52 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_MEMORY_RAW_SCOPED_REFPTR_MISMATCH_CHECKER_H_
+#define BASE_MEMORY_RAW_SCOPED_REFPTR_MISMATCH_CHECKER_H_
+
+#include <type_traits>
+
+#include "base/template_util.h"
+
+// It is dangerous to post a task with a T* argument where T is a subtype of
+// RefCounted(Base|ThreadSafeBase), since by the time the parameter is used, the
+// object may already have been deleted since it was not held with a
+// scoped_refptr. Example: http://crbug.com/27191
+// The following set of traits are designed to generate a compile error
+// whenever this antipattern is attempted.
+
+namespace base {
+
+// This is a base internal implementation file used by task.h and callback.h.
+// Not for public consumption, so we wrap it in namespace internal.
+namespace internal {
+
+template <typename T, typename = void>
+struct IsRefCountedType : std::false_type {};
+
+template <typename T>
+struct IsRefCountedType<T,
+                        void_t<decltype(std::declval<T*>()->AddRef()),
+                               decltype(std::declval<T*>()->Release())>>
+    : std::true_type {};
+
+template <typename T>
+struct NeedsScopedRefptrButGetsRawPtr {
+  static_assert(!std::is_reference<T>::value,
+                "NeedsScopedRefptrButGetsRawPtr requires non-reference type.");
+
+  enum {
+    // Human readable translation: you needed to be a scoped_refptr if you are a
+    // raw pointer type and are convertible to a RefCounted(Base|ThreadSafeBase)
+    // type.
+    value = std::is_pointer<T>::value &&
+            IsRefCountedType<std::remove_pointer_t<T>>::value
+  };
+};
+
+}  // namespace internal
+
+}  // namespace base
+
+#endif  // BASE_MEMORY_RAW_SCOPED_REFPTR_MISMATCH_CHECKER_H_
diff --git a/base/memory/read_only_shared_memory_region.cc b/base/memory/read_only_shared_memory_region.cc
new file mode 100644
index 0000000..6b654c9
--- /dev/null
+++ b/base/memory/read_only_shared_memory_region.cc
@@ -0,0 +1,97 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/memory/read_only_shared_memory_region.h"
+
+#include <utility>
+
+#include "base/memory/shared_memory.h"
+#include "build/build_config.h"
+
+namespace base {
+
+// static
+MappedReadOnlyRegion ReadOnlySharedMemoryRegion::Create(size_t size) {
+  subtle::PlatformSharedMemoryRegion handle =
+      subtle::PlatformSharedMemoryRegion::CreateWritable(size);
+  if (!handle.IsValid())
+    return {};
+
+  void* memory_ptr = nullptr;
+  size_t mapped_size = 0;
+  if (!handle.MapAt(0, handle.GetSize(), &memory_ptr, &mapped_size))
+    return {};
+
+  WritableSharedMemoryMapping mapping(memory_ptr, size, mapped_size,
+                                      handle.GetGUID());
+#if defined(OS_MACOSX) && !defined(OS_IOS)
+  handle.ConvertToReadOnly(memory_ptr);
+#else
+  handle.ConvertToReadOnly();
+#endif  // defined(OS_MACOSX) && !defined(OS_IOS)
+  ReadOnlySharedMemoryRegion region(std::move(handle));
+
+  if (!region.IsValid() || !mapping.IsValid())
+    return {};
+
+  return {std::move(region), std::move(mapping)};
+}
+
+// static
+ReadOnlySharedMemoryRegion ReadOnlySharedMemoryRegion::Deserialize(
+    subtle::PlatformSharedMemoryRegion handle) {
+  return ReadOnlySharedMemoryRegion(std::move(handle));
+}
+
+// static
+subtle::PlatformSharedMemoryRegion
+ReadOnlySharedMemoryRegion::TakeHandleForSerialization(
+    ReadOnlySharedMemoryRegion region) {
+  return std::move(region.handle_);
+}
+
+ReadOnlySharedMemoryRegion::ReadOnlySharedMemoryRegion() = default;
+ReadOnlySharedMemoryRegion::ReadOnlySharedMemoryRegion(
+    ReadOnlySharedMemoryRegion&& region) = default;
+ReadOnlySharedMemoryRegion& ReadOnlySharedMemoryRegion::operator=(
+    ReadOnlySharedMemoryRegion&& region) = default;
+ReadOnlySharedMemoryRegion::~ReadOnlySharedMemoryRegion() = default;
+
+ReadOnlySharedMemoryRegion ReadOnlySharedMemoryRegion::Duplicate() const {
+  return ReadOnlySharedMemoryRegion(handle_.Duplicate());
+}
+
+ReadOnlySharedMemoryMapping ReadOnlySharedMemoryRegion::Map() const {
+  return MapAt(0, handle_.GetSize());
+}
+
+ReadOnlySharedMemoryMapping ReadOnlySharedMemoryRegion::MapAt(
+    off_t offset,
+    size_t size) const {
+  if (!IsValid())
+    return {};
+
+  void* memory = nullptr;
+  size_t mapped_size = 0;
+  if (!handle_.MapAt(offset, size, &memory, &mapped_size))
+    return {};
+
+  return ReadOnlySharedMemoryMapping(memory, size, mapped_size,
+                                     handle_.GetGUID());
+}
+
+bool ReadOnlySharedMemoryRegion::IsValid() const {
+  return handle_.IsValid();
+}
+
+ReadOnlySharedMemoryRegion::ReadOnlySharedMemoryRegion(
+    subtle::PlatformSharedMemoryRegion handle)
+    : handle_(std::move(handle)) {
+  if (handle_.IsValid()) {
+    CHECK_EQ(handle_.GetMode(),
+             subtle::PlatformSharedMemoryRegion::Mode::kReadOnly);
+  }
+}
+
+}  // namespace base
diff --git a/base/memory/read_only_shared_memory_region.h b/base/memory/read_only_shared_memory_region.h
new file mode 100644
index 0000000..54e73ed
--- /dev/null
+++ b/base/memory/read_only_shared_memory_region.h
@@ -0,0 +1,116 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_MEMORY_READ_ONLY_SHARED_MEMORY_REGION_H_
+#define BASE_MEMORY_READ_ONLY_SHARED_MEMORY_REGION_H_
+
+#include <utility>
+
+#include "base/macros.h"
+#include "base/memory/platform_shared_memory_region.h"
+#include "base/memory/shared_memory_mapping.h"
+
+namespace base {
+
+struct MappedReadOnlyRegion;
+
+// Scoped move-only handle to a region of platform shared memory. The instance
+// owns the platform handle it wraps. Mappings created by this region are
+// read-only. These mappings remain valid even after the region handle is moved
+// or destroyed.
+class BASE_EXPORT ReadOnlySharedMemoryRegion {
+ public:
+  using MappingType = ReadOnlySharedMemoryMapping;
+  // Creates a new ReadOnlySharedMemoryRegion instance of a given size along
+  // with the WritableSharedMemoryMapping which provides the only way to modify
+  // the content of the newly created region. The returned region and mapping
+  // are guaranteed to either be both valid or both invalid. Use
+  // |MappedReadOnlyRegion::IsValid()| as a shortcut for checking creation
+  // success.
+  //
+  // This means that the caller's process is the only process that can modify
+  // the region content. If you need to pass write access to another process,
+  // consider using WritableSharedMemoryRegion or UnsafeSharedMemoryRegion.
+  static MappedReadOnlyRegion Create(size_t size);
+
+  // Returns a ReadOnlySharedMemoryRegion built from a platform-specific handle
+  // that was taken from another ReadOnlySharedMemoryRegion instance. Returns an
+  // invalid region iff the |handle| is invalid. CHECK-fails if the |handle|
+  // isn't read-only.
+  // This should be used only by the code passing handles across process
+  // boundaries.
+  static ReadOnlySharedMemoryRegion Deserialize(
+      subtle::PlatformSharedMemoryRegion handle);
+
+  // Extracts a platform handle from the region. Ownership is transferred to the
+  // returned region object.
+  // This should be used only for sending the handle from the current process to
+  // another.
+  static subtle::PlatformSharedMemoryRegion TakeHandleForSerialization(
+      ReadOnlySharedMemoryRegion region);
+
+  // Default constructor initializes an invalid instance.
+  ReadOnlySharedMemoryRegion();
+
+  // Move operations are allowed.
+  ReadOnlySharedMemoryRegion(ReadOnlySharedMemoryRegion&&);
+  ReadOnlySharedMemoryRegion& operator=(ReadOnlySharedMemoryRegion&&);
+
+  // Destructor closes shared memory region if valid.
+  // All created mappings will remain valid.
+  ~ReadOnlySharedMemoryRegion();
+
+  // Duplicates the underlying platform handle and creates a new
+  // ReadOnlySharedMemoryRegion instance that owns this handle. Returns a valid
+  // ReadOnlySharedMemoryRegion on success, invalid otherwise. The current
+  // region instance remains valid in any case.
+  ReadOnlySharedMemoryRegion Duplicate() const;
+
+  // Maps the shared memory region into the caller's address space with
+  // read-only access. The mapped address is guaranteed to have an alignment of
+  // at least |subtle::PlatformSharedMemoryRegion::kMapMinimumAlignment|.
+  // Returns a valid ReadOnlySharedMemoryMapping instance on success, invalid
+  // otherwise.
+  ReadOnlySharedMemoryMapping Map() const;
+
+  // Same as above, but maps only |size| bytes of the shared memory region
+  // starting with the given |offset|. |offset| must be aligned to value of
+  // |SysInfo::VMAllocationGranularity()|. Returns an invalid mapping if
+  // requested bytes are out of the region limits.
+  ReadOnlySharedMemoryMapping MapAt(off_t offset, size_t size) const;
+
+  // Whether the underlying platform handle is valid.
+  bool IsValid() const;
+
+  // Returns the maximum mapping size that can be created from this region.
+  size_t GetSize() const {
+    DCHECK(IsValid());
+    return handle_.GetSize();
+  }
+
+ private:
+  explicit ReadOnlySharedMemoryRegion(
+      subtle::PlatformSharedMemoryRegion handle);
+
+  subtle::PlatformSharedMemoryRegion handle_;
+
+  DISALLOW_COPY_AND_ASSIGN(ReadOnlySharedMemoryRegion);
+};
+
+// Helper struct for return value of ReadOnlySharedMemoryRegion::Create().
+struct MappedReadOnlyRegion {
+  ReadOnlySharedMemoryRegion region;
+  WritableSharedMemoryMapping mapping;
+  // Helper function to check return value of
+  // ReadOnlySharedMemoryRegion::Create(). |region| and |mapping| either both
+  // valid or invalid.
+  bool IsValid() {
+    DCHECK_EQ(region.IsValid(), mapping.IsValid());
+    return region.IsValid() && mapping.IsValid();
+  }
+};
+
+}  // namespace base
+
+#endif  // BASE_MEMORY_READ_ONLY_SHARED_MEMORY_REGION_H_
diff --git a/base/memory/ref_counted.cc b/base/memory/ref_counted.cc
new file mode 100644
index 0000000..b9fa15f
--- /dev/null
+++ b/base/memory/ref_counted.cc
@@ -0,0 +1,68 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/memory/ref_counted.h"
+
+#include "base/threading/thread_collision_warner.h"
+
+namespace base {
+namespace {
+
+#if DCHECK_IS_ON()
+std::atomic_int g_cross_thread_ref_count_access_allow_count(0);
+#endif
+
+}  // namespace
+
+namespace subtle {
+
+bool RefCountedThreadSafeBase::HasOneRef() const {
+  return ref_count_.IsOne();
+}
+
+#if DCHECK_IS_ON()
+RefCountedThreadSafeBase::~RefCountedThreadSafeBase() {
+  DCHECK(in_dtor_) << "RefCountedThreadSafe object deleted without "
+                      "calling Release()";
+}
+#endif
+
+#if defined(ARCH_CPU_64_BIT)
+void RefCountedBase::AddRefImpl() const {
+  // Check if |ref_count_| overflow only on 64 bit archs since the number of
+  // objects may exceed 2^32.
+  // To avoid the binary size bloat, use non-inline function here.
+  CHECK(++ref_count_ > 0);
+}
+#endif
+
+#if !defined(ARCH_CPU_X86_FAMILY)
+bool RefCountedThreadSafeBase::Release() const {
+  return ReleaseImpl();
+}
+void RefCountedThreadSafeBase::AddRef() const {
+  AddRefImpl();
+}
+#endif
+
+#if DCHECK_IS_ON()
+bool RefCountedBase::CalledOnValidSequence() const {
+  return sequence_checker_.CalledOnValidSequence() ||
+         g_cross_thread_ref_count_access_allow_count.load() != 0;
+}
+#endif
+
+}  // namespace subtle
+
+#if DCHECK_IS_ON()
+ScopedAllowCrossThreadRefCountAccess::ScopedAllowCrossThreadRefCountAccess() {
+  ++g_cross_thread_ref_count_access_allow_count;
+}
+
+ScopedAllowCrossThreadRefCountAccess::~ScopedAllowCrossThreadRefCountAccess() {
+  --g_cross_thread_ref_count_access_allow_count;
+}
+#endif
+
+}  // namespace base
diff --git a/base/memory/ref_counted.h b/base/memory/ref_counted.h
new file mode 100644
index 0000000..249f70e
--- /dev/null
+++ b/base/memory/ref_counted.h
@@ -0,0 +1,425 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_MEMORY_REF_COUNTED_H_
+#define BASE_MEMORY_REF_COUNTED_H_
+
+#include <stddef.h>
+
+#include <utility>
+
+#include "base/atomic_ref_count.h"
+#include "base/base_export.h"
+#include "base/compiler_specific.h"
+#include "base/logging.h"
+#include "base/macros.h"
+#include "base/memory/scoped_refptr.h"
+#include "base/sequence_checker.h"
+#include "base/threading/thread_collision_warner.h"
+#include "build/build_config.h"
+
+namespace base {
+namespace subtle {
+
+class BASE_EXPORT RefCountedBase {
+ public:
+  bool HasOneRef() const { return ref_count_ == 1; }
+
+ protected:
+  explicit RefCountedBase(StartRefCountFromZeroTag) {
+#if DCHECK_IS_ON()
+    sequence_checker_.DetachFromSequence();
+#endif
+  }
+
+  explicit RefCountedBase(StartRefCountFromOneTag) : ref_count_(1) {
+#if DCHECK_IS_ON()
+    needs_adopt_ref_ = true;
+    sequence_checker_.DetachFromSequence();
+#endif
+  }
+
+  ~RefCountedBase() {
+#if DCHECK_IS_ON()
+    DCHECK(in_dtor_) << "RefCounted object deleted without calling Release()";
+#endif
+  }
+
+  void AddRef() const {
+    // TODO(maruel): Add back once it doesn't assert 500 times/sec.
+    // Current thread books the critical section "AddRelease"
+    // without release it.
+    // DFAKE_SCOPED_LOCK_THREAD_LOCKED(add_release_);
+#if DCHECK_IS_ON()
+    DCHECK(!in_dtor_);
+    DCHECK(!needs_adopt_ref_)
+        << "This RefCounted object is created with non-zero reference count."
+        << " The first reference to such a object has to be made by AdoptRef or"
+        << " MakeRefCounted.";
+    if (ref_count_ >= 1) {
+      DCHECK(CalledOnValidSequence());
+    }
+#endif
+
+    AddRefImpl();
+  }
+
+  // Returns true if the object should self-delete.
+  bool Release() const {
+    --ref_count_;
+
+    // TODO(maruel): Add back once it doesn't assert 500 times/sec.
+    // Current thread books the critical section "AddRelease"
+    // without release it.
+    // DFAKE_SCOPED_LOCK_THREAD_LOCKED(add_release_);
+
+#if DCHECK_IS_ON()
+    DCHECK(!in_dtor_);
+    if (ref_count_ == 0)
+      in_dtor_ = true;
+
+    if (ref_count_ >= 1)
+      DCHECK(CalledOnValidSequence());
+    if (ref_count_ == 1)
+      sequence_checker_.DetachFromSequence();
+#endif
+
+    return ref_count_ == 0;
+  }
+
+  // Returns true if it is safe to read or write the object, from a thread
+  // safety standpoint. Should be DCHECK'd from the methods of RefCounted
+  // classes if there is a danger of objects being shared across threads.
+  //
+  // This produces fewer false positives than adding a separate SequenceChecker
+  // into the subclass, because it automatically detaches from the sequence when
+  // the reference count is 1 (and never fails if there is only one reference).
+  //
+  // This means unlike a separate SequenceChecker, it will permit a singly
+  // referenced object to be passed between threads (not holding a reference on
+  // the sending thread), but will trap if the sending thread holds onto a
+  // reference, or if the object is accessed from multiple threads
+  // simultaneously.
+  bool IsOnValidSequence() const {
+#if DCHECK_IS_ON()
+    return ref_count_ <= 1 || CalledOnValidSequence();
+#else
+    return true;
+#endif
+  }
+
+ private:
+  template <typename U>
+  friend scoped_refptr<U> base::AdoptRef(U*);
+
+  void Adopted() const {
+#if DCHECK_IS_ON()
+    DCHECK(needs_adopt_ref_);
+    needs_adopt_ref_ = false;
+#endif
+  }
+
+#if defined(ARCH_CPU_64_BIT)
+  void AddRefImpl() const;
+#else
+  void AddRefImpl() const { ++ref_count_; }
+#endif
+
+#if DCHECK_IS_ON()
+  bool CalledOnValidSequence() const;
+#endif
+
+  mutable uint32_t ref_count_ = 0;
+
+#if DCHECK_IS_ON()
+  mutable bool needs_adopt_ref_ = false;
+  mutable bool in_dtor_ = false;
+  mutable SequenceChecker sequence_checker_;
+#endif
+
+  DFAKE_MUTEX(add_release_);
+
+  DISALLOW_COPY_AND_ASSIGN(RefCountedBase);
+};
+
+class BASE_EXPORT RefCountedThreadSafeBase {
+ public:
+  bool HasOneRef() const;
+
+ protected:
+  explicit constexpr RefCountedThreadSafeBase(StartRefCountFromZeroTag) {}
+  explicit constexpr RefCountedThreadSafeBase(StartRefCountFromOneTag)
+      : ref_count_(1) {
+#if DCHECK_IS_ON()
+    needs_adopt_ref_ = true;
+#endif
+  }
+
+#if DCHECK_IS_ON()
+  ~RefCountedThreadSafeBase();
+#else
+  ~RefCountedThreadSafeBase() = default;
+#endif
+
+// Release and AddRef are suitable for inlining on X86 because they generate
+// very small code sequences. On other platforms (ARM), it causes a size
+// regression and is probably not worth it.
+#if defined(ARCH_CPU_X86_FAMILY)
+  // Returns true if the object should self-delete.
+  bool Release() const { return ReleaseImpl(); }
+  void AddRef() const { AddRefImpl(); }
+#else
+  // Returns true if the object should self-delete.
+  bool Release() const;
+  void AddRef() const;
+#endif
+
+ private:
+  template <typename U>
+  friend scoped_refptr<U> base::AdoptRef(U*);
+
+  void Adopted() const {
+#if DCHECK_IS_ON()
+    DCHECK(needs_adopt_ref_);
+    needs_adopt_ref_ = false;
+#endif
+  }
+
+  ALWAYS_INLINE void AddRefImpl() const {
+#if DCHECK_IS_ON()
+    DCHECK(!in_dtor_);
+    DCHECK(!needs_adopt_ref_)
+        << "This RefCounted object is created with non-zero reference count."
+        << " The first reference to such a object has to be made by AdoptRef or"
+        << " MakeRefCounted.";
+#endif
+    ref_count_.Increment();
+  }
+
+  ALWAYS_INLINE bool ReleaseImpl() const {
+#if DCHECK_IS_ON()
+    DCHECK(!in_dtor_);
+    DCHECK(!ref_count_.IsZero());
+#endif
+    if (!ref_count_.Decrement()) {
+#if DCHECK_IS_ON()
+      in_dtor_ = true;
+#endif
+      return true;
+    }
+    return false;
+  }
+
+  mutable AtomicRefCount ref_count_{0};
+#if DCHECK_IS_ON()
+  mutable bool needs_adopt_ref_ = false;
+  mutable bool in_dtor_ = false;
+#endif
+
+  DISALLOW_COPY_AND_ASSIGN(RefCountedThreadSafeBase);
+};
+
+}  // namespace subtle
+
+// ScopedAllowCrossThreadRefCountAccess disables the check documented on
+// RefCounted below for rare pre-existing use cases where thread-safety was
+// guaranteed through other means (e.g. explicit sequencing of calls across
+// execution sequences when bouncing between threads in order). New callers
+// should refrain from using this (callsites handling thread-safety through
+// locks should use RefCountedThreadSafe per the overhead of its atomics being
+// negligible compared to locks anyways and callsites doing explicit sequencing
+// should properly std::move() the ref to avoid hitting this check).
+// TODO(tzik): Cleanup existing use cases and remove
+// ScopedAllowCrossThreadRefCountAccess.
+class BASE_EXPORT ScopedAllowCrossThreadRefCountAccess final {
+ public:
+#if DCHECK_IS_ON()
+  ScopedAllowCrossThreadRefCountAccess();
+  ~ScopedAllowCrossThreadRefCountAccess();
+#else
+  ScopedAllowCrossThreadRefCountAccess() {}
+  ~ScopedAllowCrossThreadRefCountAccess() {}
+#endif
+};
+
+//
+// A base class for reference counted classes.  Otherwise, known as a cheap
+// knock-off of WebKit's RefCounted<T> class.  To use this, just extend your
+// class from it like so:
+//
+//   class MyFoo : public base::RefCounted<MyFoo> {
+//    ...
+//    private:
+//     friend class base::RefCounted<MyFoo>;
+//     ~MyFoo();
+//   };
+//
+// You should always make your destructor non-public, to avoid any code deleting
+// the object accidently while there are references to it.
+//
+//
+// The ref count manipulation to RefCounted is NOT thread safe and has DCHECKs
+// to trap unsafe cross thread usage. A subclass instance of RefCounted can be
+// passed to another execution sequence only when its ref count is 1. If the ref
+// count is more than 1, the RefCounted class verifies the ref updates are made
+// on the same execution sequence as the previous ones. The subclass can also
+// manually call IsOnValidSequence to trap other non-thread-safe accesses; see
+// the documentation for that method.
+//
+//
+// The reference count starts from zero by default, and we intended to migrate
+// to start-from-one ref count. Put REQUIRE_ADOPTION_FOR_REFCOUNTED_TYPE() to
+// the ref counted class to opt-in.
+//
+// If an object has start-from-one ref count, the first scoped_refptr need to be
+// created by base::AdoptRef() or base::MakeRefCounted(). We can use
+// base::MakeRefCounted() to create create both type of ref counted object.
+//
+// The motivations to use start-from-one ref count are:
+//  - Start-from-one ref count doesn't need the ref count increment for the
+//    first reference.
+//  - It can detect an invalid object acquisition for a being-deleted object
+//    that has zero ref count. That tends to happen on custom deleter that
+//    delays the deletion.
+//    TODO(tzik): Implement invalid acquisition detection.
+//  - Behavior parity to Blink's WTF::RefCounted, whose count starts from one.
+//    And start-from-one ref count is a step to merge WTF::RefCounted into
+//    base::RefCounted.
+//
+#define REQUIRE_ADOPTION_FOR_REFCOUNTED_TYPE()             \
+  static constexpr ::base::subtle::StartRefCountFromOneTag \
+      kRefCountPreference = ::base::subtle::kStartRefCountFromOneTag
+
+template <class T, typename Traits>
+class RefCounted;
+
+template <typename T>
+struct DefaultRefCountedTraits {
+  static void Destruct(const T* x) {
+    RefCounted<T, DefaultRefCountedTraits>::DeleteInternal(x);
+  }
+};
+
+template <class T, typename Traits = DefaultRefCountedTraits<T>>
+class RefCounted : public subtle::RefCountedBase {
+ public:
+  static constexpr subtle::StartRefCountFromZeroTag kRefCountPreference =
+      subtle::kStartRefCountFromZeroTag;
+
+  RefCounted() : subtle::RefCountedBase(T::kRefCountPreference) {}
+
+  void AddRef() const {
+    subtle::RefCountedBase::AddRef();
+  }
+
+  void Release() const {
+    if (subtle::RefCountedBase::Release()) {
+      // Prune the code paths which the static analyzer may take to simulate
+      // object destruction. Use-after-free errors aren't possible given the
+      // lifetime guarantees of the refcounting system.
+      ANALYZER_SKIP_THIS_PATH();
+
+      Traits::Destruct(static_cast<const T*>(this));
+    }
+  }
+
+ protected:
+  ~RefCounted() = default;
+
+ private:
+  friend struct DefaultRefCountedTraits<T>;
+  template <typename U>
+  static void DeleteInternal(const U* x) {
+    delete x;
+  }
+
+  DISALLOW_COPY_AND_ASSIGN(RefCounted);
+};
+
+// Forward declaration.
+template <class T, typename Traits> class RefCountedThreadSafe;
+
+// Default traits for RefCountedThreadSafe<T>.  Deletes the object when its ref
+// count reaches 0.  Overload to delete it on a different thread etc.
+template<typename T>
+struct DefaultRefCountedThreadSafeTraits {
+  static void Destruct(const T* x) {
+    // Delete through RefCountedThreadSafe to make child classes only need to be
+    // friend with RefCountedThreadSafe instead of this struct, which is an
+    // implementation detail.
+    RefCountedThreadSafe<T,
+                         DefaultRefCountedThreadSafeTraits>::DeleteInternal(x);
+  }
+};
+
+//
+// A thread-safe variant of RefCounted<T>
+//
+//   class MyFoo : public base::RefCountedThreadSafe<MyFoo> {
+//    ...
+//   };
+//
+// If you're using the default trait, then you should add compile time
+// asserts that no one else is deleting your object.  i.e.
+//    private:
+//     friend class base::RefCountedThreadSafe<MyFoo>;
+//     ~MyFoo();
+//
+// We can use REQUIRE_ADOPTION_FOR_REFCOUNTED_TYPE() with RefCountedThreadSafe
+// too. See the comment above the RefCounted definition for details.
+template <class T, typename Traits = DefaultRefCountedThreadSafeTraits<T> >
+class RefCountedThreadSafe : public subtle::RefCountedThreadSafeBase {
+ public:
+  static constexpr subtle::StartRefCountFromZeroTag kRefCountPreference =
+      subtle::kStartRefCountFromZeroTag;
+
+  explicit RefCountedThreadSafe()
+      : subtle::RefCountedThreadSafeBase(T::kRefCountPreference) {}
+
+  void AddRef() const {
+    subtle::RefCountedThreadSafeBase::AddRef();
+  }
+
+  void Release() const {
+    if (subtle::RefCountedThreadSafeBase::Release()) {
+      ANALYZER_SKIP_THIS_PATH();
+      Traits::Destruct(static_cast<const T*>(this));
+    }
+  }
+
+ protected:
+  ~RefCountedThreadSafe() = default;
+
+ private:
+  friend struct DefaultRefCountedThreadSafeTraits<T>;
+  template <typename U>
+  static void DeleteInternal(const U* x) {
+    delete x;
+  }
+
+  DISALLOW_COPY_AND_ASSIGN(RefCountedThreadSafe);
+};
+
+//
+// A thread-safe wrapper for some piece of data so we can place other
+// things in scoped_refptrs<>.
+//
+template<typename T>
+class RefCountedData
+    : public base::RefCountedThreadSafe< base::RefCountedData<T> > {
+ public:
+  RefCountedData() : data() {}
+  RefCountedData(const T& in_value) : data(in_value) {}
+  RefCountedData(T&& in_value) : data(std::move(in_value)) {}
+
+  T data;
+
+ private:
+  friend class base::RefCountedThreadSafe<base::RefCountedData<T> >;
+  ~RefCountedData() = default;
+};
+
+}  // namespace base
+
+#endif  // BASE_MEMORY_REF_COUNTED_H_
diff --git a/base/memory/ref_counted_delete_on_sequence.h b/base/memory/ref_counted_delete_on_sequence.h
new file mode 100644
index 0000000..dd30106
--- /dev/null
+++ b/base/memory/ref_counted_delete_on_sequence.h
@@ -0,0 +1,82 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_MEMORY_REF_COUNTED_DELETE_ON_SEQUENCE_H_
+#define BASE_MEMORY_REF_COUNTED_DELETE_ON_SEQUENCE_H_
+
+#include <utility>
+
+#include "base/location.h"
+#include "base/logging.h"
+#include "base/macros.h"
+#include "base/memory/ref_counted.h"
+#include "base/sequenced_task_runner.h"
+
+namespace base {
+
+// RefCountedDeleteOnSequence is similar to RefCountedThreadSafe, and ensures
+// that the object will be deleted on a specified sequence.
+//
+// Sample usage:
+// class Foo : public RefCountedDeleteOnSequence<Foo> {
+//
+//   Foo(scoped_refptr<SequencedTaskRunner> task_runner)
+//       : RefCountedDeleteOnSequence<Foo>(std::move(task_runner)) {}
+//   ...
+//  private:
+//   friend class RefCountedDeleteOnSequence<Foo>;
+//   friend class DeleteHelper<Foo>;
+//
+//   ~Foo();
+// };
+template <class T>
+class RefCountedDeleteOnSequence : public subtle::RefCountedThreadSafeBase {
+ public:
+  static constexpr subtle::StartRefCountFromZeroTag kRefCountPreference =
+      subtle::kStartRefCountFromZeroTag;
+
+  // A SequencedTaskRunner for the current sequence can be acquired by calling
+  // SequencedTaskRunnerHandle::Get().
+  RefCountedDeleteOnSequence(
+      scoped_refptr<SequencedTaskRunner> owning_task_runner)
+      : subtle::RefCountedThreadSafeBase(T::kRefCountPreference),
+        owning_task_runner_(std::move(owning_task_runner)) {
+    DCHECK(owning_task_runner_);
+  }
+
+  void AddRef() const { subtle::RefCountedThreadSafeBase::AddRef(); }
+
+  void Release() const {
+    if (subtle::RefCountedThreadSafeBase::Release())
+      DestructOnSequence();
+  }
+
+ protected:
+  friend class DeleteHelper<RefCountedDeleteOnSequence>;
+  ~RefCountedDeleteOnSequence() = default;
+
+  SequencedTaskRunner* owning_task_runner() {
+    return owning_task_runner_.get();
+  }
+  const SequencedTaskRunner* owning_task_runner() const {
+    return owning_task_runner_.get();
+  }
+
+ private:
+  void DestructOnSequence() const {
+    const T* t = static_cast<const T*>(this);
+    if (owning_task_runner_->RunsTasksInCurrentSequence())
+      delete t;
+    else
+      owning_task_runner_->DeleteSoon(FROM_HERE, t);
+  }
+
+  const scoped_refptr<SequencedTaskRunner> owning_task_runner_;
+
+  DISALLOW_COPY_AND_ASSIGN(RefCountedDeleteOnSequence);
+};
+
+}  // namespace base
+
+#endif  // BASE_MEMORY_REF_COUNTED_DELETE_ON_SEQUENCE_H_
diff --git a/base/memory/ref_counted_memory.cc b/base/memory/ref_counted_memory.cc
new file mode 100644
index 0000000..23a5ffc
--- /dev/null
+++ b/base/memory/ref_counted_memory.cc
@@ -0,0 +1,132 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/memory/ref_counted_memory.h"
+
+#include <utility>
+
+#include "base/logging.h"
+#include "base/memory/read_only_shared_memory_region.h"
+
+namespace base {
+
+bool RefCountedMemory::Equals(
+    const scoped_refptr<RefCountedMemory>& other) const {
+  return other.get() &&
+         size() == other->size() &&
+         (memcmp(front(), other->front(), size()) == 0);
+}
+
+RefCountedMemory::RefCountedMemory() = default;
+
+RefCountedMemory::~RefCountedMemory() = default;
+
+const unsigned char* RefCountedStaticMemory::front() const {
+  return data_;
+}
+
+size_t RefCountedStaticMemory::size() const {
+  return length_;
+}
+
+RefCountedStaticMemory::~RefCountedStaticMemory() = default;
+
+RefCountedBytes::RefCountedBytes() = default;
+
+RefCountedBytes::RefCountedBytes(const std::vector<unsigned char>& initializer)
+    : data_(initializer) {
+}
+
+RefCountedBytes::RefCountedBytes(const unsigned char* p, size_t size)
+    : data_(p, p + size) {}
+
+RefCountedBytes::RefCountedBytes(size_t size) : data_(size, 0) {}
+
+scoped_refptr<RefCountedBytes> RefCountedBytes::TakeVector(
+    std::vector<unsigned char>* to_destroy) {
+  auto bytes = MakeRefCounted<RefCountedBytes>();
+  bytes->data_.swap(*to_destroy);
+  return bytes;
+}
+
+const unsigned char* RefCountedBytes::front() const {
+  // STL will assert if we do front() on an empty vector, but calling code
+  // expects a NULL.
+  return size() ? &data_.front() : nullptr;
+}
+
+size_t RefCountedBytes::size() const {
+  return data_.size();
+}
+
+RefCountedBytes::~RefCountedBytes() = default;
+
+RefCountedString::RefCountedString() = default;
+
+RefCountedString::~RefCountedString() = default;
+
+// static
+scoped_refptr<RefCountedString> RefCountedString::TakeString(
+    std::string* to_destroy) {
+  auto self = MakeRefCounted<RefCountedString>();
+  to_destroy->swap(self->data_);
+  return self;
+}
+
+const unsigned char* RefCountedString::front() const {
+  return data_.empty() ? nullptr
+                       : reinterpret_cast<const unsigned char*>(data_.data());
+}
+
+size_t RefCountedString::size() const {
+  return data_.size();
+}
+
+RefCountedSharedMemory::RefCountedSharedMemory(
+    std::unique_ptr<SharedMemory> shm,
+    size_t size)
+    : shm_(std::move(shm)), size_(size) {
+  DCHECK(shm_);
+  DCHECK(shm_->memory());
+  DCHECK_GT(size_, 0U);
+  DCHECK_LE(size_, shm_->mapped_size());
+}
+
+RefCountedSharedMemory::~RefCountedSharedMemory() = default;
+
+const unsigned char* RefCountedSharedMemory::front() const {
+  return static_cast<const unsigned char*>(shm_->memory());
+}
+
+size_t RefCountedSharedMemory::size() const {
+  return size_;
+}
+
+RefCountedSharedMemoryMapping::RefCountedSharedMemoryMapping(
+    ReadOnlySharedMemoryMapping mapping)
+    : mapping_(std::move(mapping)), size_(mapping_.size()) {
+  DCHECK_GT(size_, 0U);
+}
+
+RefCountedSharedMemoryMapping::~RefCountedSharedMemoryMapping() = default;
+
+const unsigned char* RefCountedSharedMemoryMapping::front() const {
+  return static_cast<const unsigned char*>(mapping_.memory());
+}
+
+size_t RefCountedSharedMemoryMapping::size() const {
+  return size_;
+}
+
+// static
+scoped_refptr<RefCountedSharedMemoryMapping>
+RefCountedSharedMemoryMapping::CreateFromWholeRegion(
+    const ReadOnlySharedMemoryRegion& region) {
+  ReadOnlySharedMemoryMapping mapping = region.Map();
+  if (!mapping.IsValid())
+    return nullptr;
+  return MakeRefCounted<RefCountedSharedMemoryMapping>(std::move(mapping));
+}
+
+}  //  namespace base
diff --git a/base/memory/ref_counted_memory.h b/base/memory/ref_counted_memory.h
new file mode 100644
index 0000000..92a7d7b
--- /dev/null
+++ b/base/memory/ref_counted_memory.h
@@ -0,0 +1,193 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_MEMORY_REF_COUNTED_MEMORY_H_
+#define BASE_MEMORY_REF_COUNTED_MEMORY_H_
+
+#include <stddef.h>
+
+#include <memory>
+#include <string>
+#include <vector>
+
+#include "base/base_export.h"
+#include "base/macros.h"
+#include "base/memory/ref_counted.h"
+#include "base/memory/shared_memory.h"
+#include "base/memory/shared_memory_mapping.h"
+
+namespace base {
+
+class ReadOnlySharedMemoryRegion;
+
+// A generic interface to memory. This object is reference counted because most
+// of its subclasses own the data they carry, and this interface needs to
+// support heterogeneous containers of these different types of memory.
+class BASE_EXPORT RefCountedMemory
+    : public RefCountedThreadSafe<RefCountedMemory> {
+ public:
+  // Retrieves a pointer to the beginning of the data we point to. If the data
+  // is empty, this will return NULL.
+  virtual const unsigned char* front() const = 0;
+
+  // Size of the memory pointed to.
+  virtual size_t size() const = 0;
+
+  // Returns true if |other| is byte for byte equal.
+  bool Equals(const scoped_refptr<RefCountedMemory>& other) const;
+
+  // Handy method to simplify calling front() with a reinterpret_cast.
+  template<typename T> const T* front_as() const {
+    return reinterpret_cast<const T*>(front());
+  }
+
+ protected:
+  friend class RefCountedThreadSafe<RefCountedMemory>;
+  RefCountedMemory();
+  virtual ~RefCountedMemory();
+};
+
+// An implementation of RefCountedMemory, where the ref counting does not
+// matter.
+class BASE_EXPORT RefCountedStaticMemory : public RefCountedMemory {
+ public:
+  RefCountedStaticMemory() : data_(nullptr), length_(0) {}
+  RefCountedStaticMemory(const void* data, size_t length)
+      : data_(static_cast<const unsigned char*>(length ? data : nullptr)),
+        length_(length) {}
+
+  // RefCountedMemory:
+  const unsigned char* front() const override;
+  size_t size() const override;
+
+ private:
+  ~RefCountedStaticMemory() override;
+
+  const unsigned char* data_;
+  size_t length_;
+
+  DISALLOW_COPY_AND_ASSIGN(RefCountedStaticMemory);
+};
+
+// An implementation of RefCountedMemory, where the data is stored in a STL
+// vector.
+class BASE_EXPORT RefCountedBytes : public RefCountedMemory {
+ public:
+  RefCountedBytes();
+
+  // Constructs a RefCountedBytes object by copying from |initializer|.
+  explicit RefCountedBytes(const std::vector<unsigned char>& initializer);
+
+  // Constructs a RefCountedBytes object by copying |size| bytes from |p|.
+  RefCountedBytes(const unsigned char* p, size_t size);
+
+  // Constructs a RefCountedBytes object by zero-initializing a new vector of
+  // |size| bytes.
+  explicit RefCountedBytes(size_t size);
+
+  // Constructs a RefCountedBytes object by performing a swap. (To non
+  // destructively build a RefCountedBytes, use the constructor that takes a
+  // vector.)
+  static scoped_refptr<RefCountedBytes> TakeVector(
+      std::vector<unsigned char>* to_destroy);
+
+  // RefCountedMemory:
+  const unsigned char* front() const override;
+  size_t size() const override;
+
+  const std::vector<unsigned char>& data() const { return data_; }
+  std::vector<unsigned char>& data() { return data_; }
+
+  // Non-const versions of front() and front_as() that are simply shorthand for
+  // data().data().
+  unsigned char* front() { return data_.data(); }
+  template <typename T>
+  T* front_as() {
+    return reinterpret_cast<T*>(front());
+  }
+
+ private:
+  ~RefCountedBytes() override;
+
+  std::vector<unsigned char> data_;
+
+  DISALLOW_COPY_AND_ASSIGN(RefCountedBytes);
+};
+
+// An implementation of RefCountedMemory, where the bytes are stored in a STL
+// string. Use this if your data naturally arrives in that format.
+class BASE_EXPORT RefCountedString : public RefCountedMemory {
+ public:
+  RefCountedString();
+
+  // Constructs a RefCountedString object by performing a swap. (To non
+  // destructively build a RefCountedString, use the default constructor and
+  // copy into object->data()).
+  static scoped_refptr<RefCountedString> TakeString(std::string* to_destroy);
+
+  // RefCountedMemory:
+  const unsigned char* front() const override;
+  size_t size() const override;
+
+  const std::string& data() const { return data_; }
+  std::string& data() { return data_; }
+
+ private:
+  ~RefCountedString() override;
+
+  std::string data_;
+
+  DISALLOW_COPY_AND_ASSIGN(RefCountedString);
+};
+
+// An implementation of RefCountedMemory, where the bytes are stored in
+// SharedMemory.
+class BASE_EXPORT RefCountedSharedMemory : public RefCountedMemory {
+ public:
+  // Constructs a RefCountedMemory object by taking ownership of an already
+  // mapped SharedMemory object.
+  RefCountedSharedMemory(std::unique_ptr<SharedMemory> shm, size_t size);
+
+  // RefCountedMemory:
+  const unsigned char* front() const override;
+  size_t size() const override;
+
+ private:
+  ~RefCountedSharedMemory() override;
+
+  const std::unique_ptr<SharedMemory> shm_;
+  const size_t size_;
+
+  DISALLOW_COPY_AND_ASSIGN(RefCountedSharedMemory);
+};
+
+// An implementation of RefCountedMemory, where the bytes are stored in
+// ReadOnlySharedMemoryMapping.
+class BASE_EXPORT RefCountedSharedMemoryMapping : public RefCountedMemory {
+ public:
+  // Constructs a RefCountedMemory object by taking ownership of an already
+  // mapped ReadOnlySharedMemoryMapping object.
+  explicit RefCountedSharedMemoryMapping(ReadOnlySharedMemoryMapping mapping);
+
+  // Convenience method to map all of |region| and take ownership of the
+  // mapping. Returns an empty scoped_refptr if the map operation fails.
+  static scoped_refptr<RefCountedSharedMemoryMapping> CreateFromWholeRegion(
+      const ReadOnlySharedMemoryRegion& region);
+
+  // RefCountedMemory:
+  const unsigned char* front() const override;
+  size_t size() const override;
+
+ private:
+  ~RefCountedSharedMemoryMapping() override;
+
+  const ReadOnlySharedMemoryMapping mapping_;
+  const size_t size_;
+
+  DISALLOW_COPY_AND_ASSIGN(RefCountedSharedMemoryMapping);
+};
+
+}  // namespace base
+
+#endif  // BASE_MEMORY_REF_COUNTED_MEMORY_H_
diff --git a/base/memory/ref_counted_memory_unittest.cc b/base/memory/ref_counted_memory_unittest.cc
new file mode 100644
index 0000000..b7498f9
--- /dev/null
+++ b/base/memory/ref_counted_memory_unittest.cc
@@ -0,0 +1,145 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/memory/ref_counted_memory.h"
+
+#include <stdint.h>
+
+#include <utility>
+
+#include "base/memory/read_only_shared_memory_region.h"
+#include "testing/gmock/include/gmock/gmock.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+using testing::Each;
+using testing::ElementsAre;
+
+namespace base {
+
+TEST(RefCountedMemoryUnitTest, RefCountedStaticMemory) {
+  auto mem = MakeRefCounted<RefCountedStaticMemory>("static mem00", 10);
+
+  EXPECT_EQ(10U, mem->size());
+  EXPECT_EQ("static mem", std::string(mem->front_as<char>(), mem->size()));
+}
+
+TEST(RefCountedMemoryUnitTest, RefCountedBytes) {
+  std::vector<uint8_t> data;
+  data.push_back(45);
+  data.push_back(99);
+  scoped_refptr<RefCountedMemory> mem = RefCountedBytes::TakeVector(&data);
+
+  EXPECT_EQ(0U, data.size());
+
+  ASSERT_EQ(2U, mem->size());
+  EXPECT_EQ(45U, mem->front()[0]);
+  EXPECT_EQ(99U, mem->front()[1]);
+
+  scoped_refptr<RefCountedMemory> mem2;
+  {
+    const unsigned char kData[] = {12, 11, 99};
+    mem2 = MakeRefCounted<RefCountedBytes>(kData, arraysize(kData));
+  }
+  ASSERT_EQ(3U, mem2->size());
+  EXPECT_EQ(12U, mem2->front()[0]);
+  EXPECT_EQ(11U, mem2->front()[1]);
+  EXPECT_EQ(99U, mem2->front()[2]);
+}
+
+TEST(RefCountedMemoryUnitTest, RefCountedBytesMutable) {
+  auto mem = base::MakeRefCounted<RefCountedBytes>(10);
+
+  ASSERT_EQ(10U, mem->size());
+  EXPECT_THAT(mem->data(), Each(0U));
+
+  // Test non-const versions of data(), front() and front_as<>().
+  mem->data()[0] = 1;
+  mem->front()[1] = 2;
+  mem->front_as<char>()[2] = 3;
+
+  EXPECT_THAT(mem->data(), ElementsAre(1, 2, 3, 0, 0, 0, 0, 0, 0, 0));
+}
+
+TEST(RefCountedMemoryUnitTest, RefCountedString) {
+  std::string s("destroy me");
+  scoped_refptr<RefCountedMemory> mem = RefCountedString::TakeString(&s);
+
+  EXPECT_EQ(0U, s.size());
+
+  ASSERT_EQ(10U, mem->size());
+  EXPECT_EQ('d', mem->front()[0]);
+  EXPECT_EQ('e', mem->front()[1]);
+  EXPECT_EQ('e', mem->front()[9]);
+}
+
+TEST(RefCountedMemoryUnitTest, RefCountedSharedMemory) {
+  static const char kData[] = "shm_dummy_data";
+  auto shm = std::make_unique<SharedMemory>();
+  ASSERT_TRUE(shm->CreateAndMapAnonymous(sizeof(kData)));
+  memcpy(shm->memory(), kData, sizeof(kData));
+
+  auto mem =
+      MakeRefCounted<RefCountedSharedMemory>(std::move(shm), sizeof(kData));
+  ASSERT_EQ(sizeof(kData), mem->size());
+  EXPECT_EQ('s', mem->front()[0]);
+  EXPECT_EQ('h', mem->front()[1]);
+  EXPECT_EQ('_', mem->front()[9]);
+}
+
+TEST(RefCountedMemoryUnitTest, RefCountedSharedMemoryMapping) {
+  static const char kData[] = "mem_region_dummy_data";
+  scoped_refptr<RefCountedSharedMemoryMapping> mem;
+  {
+    MappedReadOnlyRegion region =
+        ReadOnlySharedMemoryRegion::Create(sizeof(kData));
+    ReadOnlySharedMemoryMapping ro_mapping = region.region.Map();
+    WritableSharedMemoryMapping rw_mapping = std::move(region.mapping);
+    ASSERT_TRUE(rw_mapping.IsValid());
+    memcpy(rw_mapping.memory(), kData, sizeof(kData));
+    mem = MakeRefCounted<RefCountedSharedMemoryMapping>(std::move(ro_mapping));
+  }
+
+  ASSERT_LE(sizeof(kData), mem->size());
+  EXPECT_EQ('e', mem->front()[1]);
+  EXPECT_EQ('m', mem->front()[2]);
+  EXPECT_EQ('o', mem->front()[8]);
+
+  {
+    MappedReadOnlyRegion region =
+        ReadOnlySharedMemoryRegion::Create(sizeof(kData));
+    WritableSharedMemoryMapping rw_mapping = std::move(region.mapping);
+    ASSERT_TRUE(rw_mapping.IsValid());
+    memcpy(rw_mapping.memory(), kData, sizeof(kData));
+    mem = RefCountedSharedMemoryMapping::CreateFromWholeRegion(region.region);
+  }
+
+  ASSERT_LE(sizeof(kData), mem->size());
+  EXPECT_EQ('_', mem->front()[3]);
+  EXPECT_EQ('r', mem->front()[4]);
+  EXPECT_EQ('i', mem->front()[7]);
+}
+
+TEST(RefCountedMemoryUnitTest, Equals) {
+  std::string s1("same");
+  scoped_refptr<RefCountedMemory> mem1 = RefCountedString::TakeString(&s1);
+
+  std::vector<unsigned char> d2 = {'s', 'a', 'm', 'e'};
+  scoped_refptr<RefCountedMemory> mem2 = RefCountedBytes::TakeVector(&d2);
+
+  EXPECT_TRUE(mem1->Equals(mem2));
+
+  std::string s3("diff");
+  scoped_refptr<RefCountedMemory> mem3 = RefCountedString::TakeString(&s3);
+
+  EXPECT_FALSE(mem1->Equals(mem3));
+  EXPECT_FALSE(mem2->Equals(mem3));
+}
+
+TEST(RefCountedMemoryUnitTest, EqualsNull) {
+  std::string s("str");
+  scoped_refptr<RefCountedMemory> mem = RefCountedString::TakeString(&s);
+  EXPECT_FALSE(mem->Equals(nullptr));
+}
+
+}  //  namespace base
diff --git a/base/memory/ref_counted_unittest.cc b/base/memory/ref_counted_unittest.cc
new file mode 100644
index 0000000..d88fc54
--- /dev/null
+++ b/base/memory/ref_counted_unittest.cc
@@ -0,0 +1,606 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/memory/ref_counted.h"
+
+#include <type_traits>
+#include <utility>
+
+#include "base/test/gtest_util.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace {
+
+class SelfAssign : public base::RefCounted<SelfAssign> {
+ protected:
+  virtual ~SelfAssign() = default;
+
+ private:
+  friend class base::RefCounted<SelfAssign>;
+};
+
+class Derived : public SelfAssign {
+ protected:
+  ~Derived() override = default;
+
+ private:
+  friend class base::RefCounted<Derived>;
+};
+
+class CheckDerivedMemberAccess : public scoped_refptr<SelfAssign> {
+ public:
+  CheckDerivedMemberAccess() {
+    // This shouldn't compile if we don't have access to the member variable.
+    SelfAssign** pptr = &ptr_;
+    EXPECT_EQ(*pptr, ptr_);
+  }
+};
+
+class ScopedRefPtrToSelf : public base::RefCounted<ScopedRefPtrToSelf> {
+ public:
+  ScopedRefPtrToSelf() : self_ptr_(this) {}
+
+  static bool was_destroyed() { return was_destroyed_; }
+
+  static void reset_was_destroyed() { was_destroyed_ = false; }
+
+  scoped_refptr<ScopedRefPtrToSelf> self_ptr_;
+
+ private:
+  friend class base::RefCounted<ScopedRefPtrToSelf>;
+  ~ScopedRefPtrToSelf() { was_destroyed_ = true; }
+
+  static bool was_destroyed_;
+};
+
+bool ScopedRefPtrToSelf::was_destroyed_ = false;
+
+class ScopedRefPtrCountBase : public base::RefCounted<ScopedRefPtrCountBase> {
+ public:
+  ScopedRefPtrCountBase() { ++constructor_count_; }
+
+  static int constructor_count() { return constructor_count_; }
+
+  static int destructor_count() { return destructor_count_; }
+
+  static void reset_count() {
+    constructor_count_ = 0;
+    destructor_count_ = 0;
+  }
+
+ protected:
+  virtual ~ScopedRefPtrCountBase() { ++destructor_count_; }
+
+ private:
+  friend class base::RefCounted<ScopedRefPtrCountBase>;
+
+  static int constructor_count_;
+  static int destructor_count_;
+};
+
+int ScopedRefPtrCountBase::constructor_count_ = 0;
+int ScopedRefPtrCountBase::destructor_count_ = 0;
+
+class ScopedRefPtrCountDerived : public ScopedRefPtrCountBase {
+ public:
+  ScopedRefPtrCountDerived() { ++constructor_count_; }
+
+  static int constructor_count() { return constructor_count_; }
+
+  static int destructor_count() { return destructor_count_; }
+
+  static void reset_count() {
+    constructor_count_ = 0;
+    destructor_count_ = 0;
+  }
+
+ protected:
+  ~ScopedRefPtrCountDerived() override { ++destructor_count_; }
+
+ private:
+  friend class base::RefCounted<ScopedRefPtrCountDerived>;
+
+  static int constructor_count_;
+  static int destructor_count_;
+};
+
+int ScopedRefPtrCountDerived::constructor_count_ = 0;
+int ScopedRefPtrCountDerived::destructor_count_ = 0;
+
+class Other : public base::RefCounted<Other> {
+ private:
+  friend class base::RefCounted<Other>;
+
+  ~Other() = default;
+};
+
+class HasPrivateDestructorWithDeleter;
+
+struct Deleter {
+  static void Destruct(const HasPrivateDestructorWithDeleter* x);
+};
+
+class HasPrivateDestructorWithDeleter
+    : public base::RefCounted<HasPrivateDestructorWithDeleter, Deleter> {
+ public:
+  HasPrivateDestructorWithDeleter() = default;
+
+ private:
+  friend struct Deleter;
+  ~HasPrivateDestructorWithDeleter() = default;
+};
+
+void Deleter::Destruct(const HasPrivateDestructorWithDeleter* x) {
+  delete x;
+}
+
+scoped_refptr<Other> Overloaded(scoped_refptr<Other> other) {
+  return other;
+}
+
+scoped_refptr<SelfAssign> Overloaded(scoped_refptr<SelfAssign> self_assign) {
+  return self_assign;
+}
+
+class InitialRefCountIsOne : public base::RefCounted<InitialRefCountIsOne> {
+ public:
+  REQUIRE_ADOPTION_FOR_REFCOUNTED_TYPE();
+
+  InitialRefCountIsOne() = default;
+
+ private:
+  friend class base::RefCounted<InitialRefCountIsOne>;
+  ~InitialRefCountIsOne() = default;
+};
+
+}  // end namespace
+
+TEST(RefCountedUnitTest, TestSelfAssignment) {
+  SelfAssign* p = new SelfAssign;
+  scoped_refptr<SelfAssign> var(p);
+  var = *&var;  // The *& defeats Clang's -Wself-assign warning.
+  EXPECT_EQ(var.get(), p);
+  var = std::move(var);
+  EXPECT_EQ(var.get(), p);
+  var.swap(var);
+  EXPECT_EQ(var.get(), p);
+  swap(var, var);
+  EXPECT_EQ(var.get(), p);
+}
+
+TEST(RefCountedUnitTest, ScopedRefPtrMemberAccess) {
+  CheckDerivedMemberAccess check;
+}
+
+TEST(RefCountedUnitTest, ScopedRefPtrToSelfPointerAssignment) {
+  ScopedRefPtrToSelf::reset_was_destroyed();
+
+  ScopedRefPtrToSelf* check = new ScopedRefPtrToSelf();
+  EXPECT_FALSE(ScopedRefPtrToSelf::was_destroyed());
+  check->self_ptr_ = nullptr;
+  EXPECT_TRUE(ScopedRefPtrToSelf::was_destroyed());
+}
+
+TEST(RefCountedUnitTest, ScopedRefPtrToSelfMoveAssignment) {
+  ScopedRefPtrToSelf::reset_was_destroyed();
+
+  ScopedRefPtrToSelf* check = new ScopedRefPtrToSelf();
+  EXPECT_FALSE(ScopedRefPtrToSelf::was_destroyed());
+  // Releasing |check->self_ptr_| will delete |check|.
+  // The move assignment operator must assign |check->self_ptr_| first then
+  // release |check->self_ptr_|.
+  check->self_ptr_ = scoped_refptr<ScopedRefPtrToSelf>();
+  EXPECT_TRUE(ScopedRefPtrToSelf::was_destroyed());
+}
+
+TEST(RefCountedUnitTest, BooleanTesting) {
+  scoped_refptr<SelfAssign> ptr_to_an_instance = new SelfAssign;
+  EXPECT_TRUE(ptr_to_an_instance);
+  EXPECT_FALSE(!ptr_to_an_instance);
+
+  if (ptr_to_an_instance) {
+  } else {
+    ADD_FAILURE() << "Pointer to an instance should result in true.";
+  }
+
+  if (!ptr_to_an_instance) {  // check for operator!().
+    ADD_FAILURE() << "Pointer to an instance should result in !x being false.";
+  }
+
+  scoped_refptr<SelfAssign> null_ptr;
+  EXPECT_FALSE(null_ptr);
+  EXPECT_TRUE(!null_ptr);
+
+  if (null_ptr) {
+    ADD_FAILURE() << "Null pointer should result in false.";
+  }
+
+  if (!null_ptr) {  // check for operator!().
+  } else {
+    ADD_FAILURE() << "Null pointer should result in !x being true.";
+  }
+}
+
+TEST(RefCountedUnitTest, Equality) {
+  scoped_refptr<SelfAssign> p1(new SelfAssign);
+  scoped_refptr<SelfAssign> p2(new SelfAssign);
+
+  EXPECT_EQ(p1, p1);
+  EXPECT_EQ(p2, p2);
+
+  EXPECT_NE(p1, p2);
+  EXPECT_NE(p2, p1);
+}
+
+TEST(RefCountedUnitTest, NullptrEquality) {
+  scoped_refptr<SelfAssign> ptr_to_an_instance(new SelfAssign);
+  scoped_refptr<SelfAssign> ptr_to_nullptr;
+
+  EXPECT_NE(nullptr, ptr_to_an_instance);
+  EXPECT_NE(ptr_to_an_instance, nullptr);
+  EXPECT_EQ(nullptr, ptr_to_nullptr);
+  EXPECT_EQ(ptr_to_nullptr, nullptr);
+}
+
+TEST(RefCountedUnitTest, ConvertibleEquality) {
+  scoped_refptr<Derived> p1(new Derived);
+  scoped_refptr<SelfAssign> p2;
+
+  EXPECT_NE(p1, p2);
+  EXPECT_NE(p2, p1);
+
+  p2 = p1;
+
+  EXPECT_EQ(p1, p2);
+  EXPECT_EQ(p2, p1);
+}
+
+TEST(RefCountedUnitTest, MoveAssignment1) {
+  ScopedRefPtrCountBase::reset_count();
+
+  {
+    ScopedRefPtrCountBase *raw = new ScopedRefPtrCountBase();
+    scoped_refptr<ScopedRefPtrCountBase> p1(raw);
+    EXPECT_EQ(1, ScopedRefPtrCountBase::constructor_count());
+    EXPECT_EQ(0, ScopedRefPtrCountBase::destructor_count());
+
+    {
+      scoped_refptr<ScopedRefPtrCountBase> p2;
+
+      p2 = std::move(p1);
+      EXPECT_EQ(1, ScopedRefPtrCountBase::constructor_count());
+      EXPECT_EQ(0, ScopedRefPtrCountBase::destructor_count());
+      EXPECT_EQ(nullptr, p1.get());
+      EXPECT_EQ(raw, p2.get());
+
+      // p2 goes out of scope.
+    }
+    EXPECT_EQ(1, ScopedRefPtrCountBase::constructor_count());
+    EXPECT_EQ(1, ScopedRefPtrCountBase::destructor_count());
+
+    // p1 goes out of scope.
+  }
+  EXPECT_EQ(1, ScopedRefPtrCountBase::constructor_count());
+  EXPECT_EQ(1, ScopedRefPtrCountBase::destructor_count());
+}
+
+TEST(RefCountedUnitTest, MoveAssignment2) {
+  ScopedRefPtrCountBase::reset_count();
+
+  {
+    ScopedRefPtrCountBase *raw = new ScopedRefPtrCountBase();
+    scoped_refptr<ScopedRefPtrCountBase> p1;
+    EXPECT_EQ(1, ScopedRefPtrCountBase::constructor_count());
+    EXPECT_EQ(0, ScopedRefPtrCountBase::destructor_count());
+
+    {
+      scoped_refptr<ScopedRefPtrCountBase> p2(raw);
+      EXPECT_EQ(1, ScopedRefPtrCountBase::constructor_count());
+      EXPECT_EQ(0, ScopedRefPtrCountBase::destructor_count());
+
+      p1 = std::move(p2);
+      EXPECT_EQ(1, ScopedRefPtrCountBase::constructor_count());
+      EXPECT_EQ(0, ScopedRefPtrCountBase::destructor_count());
+      EXPECT_EQ(raw, p1.get());
+      EXPECT_EQ(nullptr, p2.get());
+
+      // p2 goes out of scope.
+    }
+    EXPECT_EQ(1, ScopedRefPtrCountBase::constructor_count());
+    EXPECT_EQ(0, ScopedRefPtrCountBase::destructor_count());
+
+    // p1 goes out of scope.
+  }
+  EXPECT_EQ(1, ScopedRefPtrCountBase::constructor_count());
+  EXPECT_EQ(1, ScopedRefPtrCountBase::destructor_count());
+}
+
+TEST(RefCountedUnitTest, MoveAssignmentSameInstance1) {
+  ScopedRefPtrCountBase::reset_count();
+
+  {
+    ScopedRefPtrCountBase *raw = new ScopedRefPtrCountBase();
+    scoped_refptr<ScopedRefPtrCountBase> p1(raw);
+    EXPECT_EQ(1, ScopedRefPtrCountBase::constructor_count());
+    EXPECT_EQ(0, ScopedRefPtrCountBase::destructor_count());
+
+    {
+      scoped_refptr<ScopedRefPtrCountBase> p2(p1);
+      EXPECT_EQ(1, ScopedRefPtrCountBase::constructor_count());
+      EXPECT_EQ(0, ScopedRefPtrCountBase::destructor_count());
+
+      p1 = std::move(p2);
+      EXPECT_EQ(1, ScopedRefPtrCountBase::constructor_count());
+      EXPECT_EQ(0, ScopedRefPtrCountBase::destructor_count());
+      EXPECT_EQ(raw, p1.get());
+      EXPECT_EQ(nullptr, p2.get());
+
+      // p2 goes out of scope.
+    }
+    EXPECT_EQ(1, ScopedRefPtrCountBase::constructor_count());
+    EXPECT_EQ(0, ScopedRefPtrCountBase::destructor_count());
+
+    // p1 goes out of scope.
+  }
+  EXPECT_EQ(1, ScopedRefPtrCountBase::constructor_count());
+  EXPECT_EQ(1, ScopedRefPtrCountBase::destructor_count());
+}
+
+TEST(RefCountedUnitTest, MoveAssignmentSameInstance2) {
+  ScopedRefPtrCountBase::reset_count();
+
+  {
+    ScopedRefPtrCountBase *raw = new ScopedRefPtrCountBase();
+    scoped_refptr<ScopedRefPtrCountBase> p1(raw);
+    EXPECT_EQ(1, ScopedRefPtrCountBase::constructor_count());
+    EXPECT_EQ(0, ScopedRefPtrCountBase::destructor_count());
+
+    {
+      scoped_refptr<ScopedRefPtrCountBase> p2(p1);
+      EXPECT_EQ(1, ScopedRefPtrCountBase::constructor_count());
+      EXPECT_EQ(0, ScopedRefPtrCountBase::destructor_count());
+
+      p2 = std::move(p1);
+      EXPECT_EQ(1, ScopedRefPtrCountBase::constructor_count());
+      EXPECT_EQ(0, ScopedRefPtrCountBase::destructor_count());
+      EXPECT_EQ(nullptr, p1.get());
+      EXPECT_EQ(raw, p2.get());
+
+      // p2 goes out of scope.
+    }
+    EXPECT_EQ(1, ScopedRefPtrCountBase::constructor_count());
+    EXPECT_EQ(1, ScopedRefPtrCountBase::destructor_count());
+
+    // p1 goes out of scope.
+  }
+  EXPECT_EQ(1, ScopedRefPtrCountBase::constructor_count());
+  EXPECT_EQ(1, ScopedRefPtrCountBase::destructor_count());
+}
+
+TEST(RefCountedUnitTest, MoveAssignmentDifferentInstances) {
+  ScopedRefPtrCountBase::reset_count();
+
+  {
+    ScopedRefPtrCountBase *raw1 = new ScopedRefPtrCountBase();
+    scoped_refptr<ScopedRefPtrCountBase> p1(raw1);
+    EXPECT_EQ(1, ScopedRefPtrCountBase::constructor_count());
+    EXPECT_EQ(0, ScopedRefPtrCountBase::destructor_count());
+
+    {
+      ScopedRefPtrCountBase *raw2 = new ScopedRefPtrCountBase();
+      scoped_refptr<ScopedRefPtrCountBase> p2(raw2);
+      EXPECT_EQ(2, ScopedRefPtrCountBase::constructor_count());
+      EXPECT_EQ(0, ScopedRefPtrCountBase::destructor_count());
+
+      p1 = std::move(p2);
+      EXPECT_EQ(2, ScopedRefPtrCountBase::constructor_count());
+      EXPECT_EQ(1, ScopedRefPtrCountBase::destructor_count());
+      EXPECT_EQ(raw2, p1.get());
+      EXPECT_EQ(nullptr, p2.get());
+
+      // p2 goes out of scope.
+    }
+    EXPECT_EQ(2, ScopedRefPtrCountBase::constructor_count());
+    EXPECT_EQ(1, ScopedRefPtrCountBase::destructor_count());
+
+    // p1 goes out of scope.
+  }
+  EXPECT_EQ(2, ScopedRefPtrCountBase::constructor_count());
+  EXPECT_EQ(2, ScopedRefPtrCountBase::destructor_count());
+}
+
+TEST(RefCountedUnitTest, MoveAssignmentSelfMove) {
+  ScopedRefPtrCountBase::reset_count();
+
+  {
+    ScopedRefPtrCountBase* raw = new ScopedRefPtrCountBase;
+    scoped_refptr<ScopedRefPtrCountBase> p1(raw);
+    scoped_refptr<ScopedRefPtrCountBase>& p1_ref = p1;
+
+    EXPECT_EQ(1, ScopedRefPtrCountBase::constructor_count());
+    EXPECT_EQ(0, ScopedRefPtrCountBase::destructor_count());
+
+    p1 = std::move(p1_ref);
+
+    // |p1| is "valid but unspecified", so don't bother inspecting its
+    // contents, just ensure that we don't crash.
+  }
+
+  EXPECT_EQ(1, ScopedRefPtrCountBase::constructor_count());
+  EXPECT_EQ(1, ScopedRefPtrCountBase::destructor_count());
+}
+
+TEST(RefCountedUnitTest, MoveAssignmentDerived) {
+  ScopedRefPtrCountBase::reset_count();
+  ScopedRefPtrCountDerived::reset_count();
+
+  {
+    ScopedRefPtrCountBase *raw1 = new ScopedRefPtrCountBase();
+    scoped_refptr<ScopedRefPtrCountBase> p1(raw1);
+    EXPECT_EQ(1, ScopedRefPtrCountBase::constructor_count());
+    EXPECT_EQ(0, ScopedRefPtrCountBase::destructor_count());
+    EXPECT_EQ(0, ScopedRefPtrCountDerived::constructor_count());
+    EXPECT_EQ(0, ScopedRefPtrCountDerived::destructor_count());
+
+    {
+      ScopedRefPtrCountDerived *raw2 = new ScopedRefPtrCountDerived();
+      scoped_refptr<ScopedRefPtrCountDerived> p2(raw2);
+      EXPECT_EQ(2, ScopedRefPtrCountBase::constructor_count());
+      EXPECT_EQ(0, ScopedRefPtrCountBase::destructor_count());
+      EXPECT_EQ(1, ScopedRefPtrCountDerived::constructor_count());
+      EXPECT_EQ(0, ScopedRefPtrCountDerived::destructor_count());
+
+      p1 = std::move(p2);
+      EXPECT_EQ(2, ScopedRefPtrCountBase::constructor_count());
+      EXPECT_EQ(1, ScopedRefPtrCountBase::destructor_count());
+      EXPECT_EQ(1, ScopedRefPtrCountDerived::constructor_count());
+      EXPECT_EQ(0, ScopedRefPtrCountDerived::destructor_count());
+      EXPECT_EQ(raw2, p1.get());
+      EXPECT_EQ(nullptr, p2.get());
+
+      // p2 goes out of scope.
+    }
+    EXPECT_EQ(2, ScopedRefPtrCountBase::constructor_count());
+    EXPECT_EQ(1, ScopedRefPtrCountBase::destructor_count());
+    EXPECT_EQ(1, ScopedRefPtrCountDerived::constructor_count());
+    EXPECT_EQ(0, ScopedRefPtrCountDerived::destructor_count());
+
+    // p1 goes out of scope.
+  }
+  EXPECT_EQ(2, ScopedRefPtrCountBase::constructor_count());
+  EXPECT_EQ(2, ScopedRefPtrCountBase::destructor_count());
+  EXPECT_EQ(1, ScopedRefPtrCountDerived::constructor_count());
+  EXPECT_EQ(1, ScopedRefPtrCountDerived::destructor_count());
+}
+
+TEST(RefCountedUnitTest, MoveConstructor) {
+  ScopedRefPtrCountBase::reset_count();
+
+  {
+    ScopedRefPtrCountBase *raw = new ScopedRefPtrCountBase();
+    scoped_refptr<ScopedRefPtrCountBase> p1(raw);
+    EXPECT_EQ(1, ScopedRefPtrCountBase::constructor_count());
+    EXPECT_EQ(0, ScopedRefPtrCountBase::destructor_count());
+
+    {
+      scoped_refptr<ScopedRefPtrCountBase> p2(std::move(p1));
+      EXPECT_EQ(1, ScopedRefPtrCountBase::constructor_count());
+      EXPECT_EQ(0, ScopedRefPtrCountBase::destructor_count());
+      EXPECT_EQ(nullptr, p1.get());
+      EXPECT_EQ(raw, p2.get());
+
+      // p2 goes out of scope.
+    }
+    EXPECT_EQ(1, ScopedRefPtrCountBase::constructor_count());
+    EXPECT_EQ(1, ScopedRefPtrCountBase::destructor_count());
+
+    // p1 goes out of scope.
+  }
+  EXPECT_EQ(1, ScopedRefPtrCountBase::constructor_count());
+  EXPECT_EQ(1, ScopedRefPtrCountBase::destructor_count());
+}
+
+TEST(RefCountedUnitTest, MoveConstructorDerived) {
+  ScopedRefPtrCountBase::reset_count();
+  ScopedRefPtrCountDerived::reset_count();
+
+  {
+    ScopedRefPtrCountDerived *raw1 = new ScopedRefPtrCountDerived();
+    scoped_refptr<ScopedRefPtrCountDerived> p1(raw1);
+    EXPECT_EQ(1, ScopedRefPtrCountBase::constructor_count());
+    EXPECT_EQ(0, ScopedRefPtrCountBase::destructor_count());
+    EXPECT_EQ(1, ScopedRefPtrCountDerived::constructor_count());
+    EXPECT_EQ(0, ScopedRefPtrCountDerived::destructor_count());
+
+    {
+      scoped_refptr<ScopedRefPtrCountBase> p2(std::move(p1));
+      EXPECT_EQ(1, ScopedRefPtrCountBase::constructor_count());
+      EXPECT_EQ(0, ScopedRefPtrCountBase::destructor_count());
+      EXPECT_EQ(1, ScopedRefPtrCountDerived::constructor_count());
+      EXPECT_EQ(0, ScopedRefPtrCountDerived::destructor_count());
+      EXPECT_EQ(nullptr, p1.get());
+      EXPECT_EQ(raw1, p2.get());
+
+      // p2 goes out of scope.
+    }
+    EXPECT_EQ(1, ScopedRefPtrCountBase::constructor_count());
+    EXPECT_EQ(1, ScopedRefPtrCountBase::destructor_count());
+    EXPECT_EQ(1, ScopedRefPtrCountDerived::constructor_count());
+    EXPECT_EQ(1, ScopedRefPtrCountDerived::destructor_count());
+
+    // p1 goes out of scope.
+  }
+  EXPECT_EQ(1, ScopedRefPtrCountBase::constructor_count());
+  EXPECT_EQ(1, ScopedRefPtrCountBase::destructor_count());
+  EXPECT_EQ(1, ScopedRefPtrCountDerived::constructor_count());
+  EXPECT_EQ(1, ScopedRefPtrCountDerived::destructor_count());
+}
+
+TEST(RefCountedUnitTest, TestOverloadResolutionCopy) {
+  const scoped_refptr<Derived> derived(new Derived);
+  const scoped_refptr<SelfAssign> expected(derived);
+  EXPECT_EQ(expected, Overloaded(derived));
+
+  const scoped_refptr<Other> other(new Other);
+  EXPECT_EQ(other, Overloaded(other));
+}
+
+TEST(RefCountedUnitTest, TestOverloadResolutionMove) {
+  scoped_refptr<Derived> derived(new Derived);
+  const scoped_refptr<SelfAssign> expected(derived);
+  EXPECT_EQ(expected, Overloaded(std::move(derived)));
+
+  scoped_refptr<Other> other(new Other);
+  const scoped_refptr<Other> other2(other);
+  EXPECT_EQ(other2, Overloaded(std::move(other)));
+}
+
+TEST(RefCountedUnitTest, TestMakeRefCounted) {
+  scoped_refptr<Derived> derived = new Derived;
+  EXPECT_TRUE(derived->HasOneRef());
+  derived = nullptr;
+
+  scoped_refptr<Derived> derived2 = base::MakeRefCounted<Derived>();
+  EXPECT_TRUE(derived2->HasOneRef());
+  derived2 = nullptr;
+}
+
+TEST(RefCountedUnitTest, TestInitialRefCountIsOne) {
+  scoped_refptr<InitialRefCountIsOne> obj =
+      base::MakeRefCounted<InitialRefCountIsOne>();
+  EXPECT_TRUE(obj->HasOneRef());
+  obj = nullptr;
+
+  scoped_refptr<InitialRefCountIsOne> obj2 =
+      base::AdoptRef(new InitialRefCountIsOne);
+  EXPECT_TRUE(obj2->HasOneRef());
+  obj2 = nullptr;
+
+  scoped_refptr<Other> obj3 = base::MakeRefCounted<Other>();
+  EXPECT_TRUE(obj3->HasOneRef());
+  obj3 = nullptr;
+}
+
+TEST(RefCountedDeathTest, TestAdoptRef) {
+  // Check that WrapRefCounted() DCHECKs if passed a type that defines
+  // REQUIRE_ADOPTION_FOR_REFCOUNTED_TYPE.
+  EXPECT_DCHECK_DEATH(base::WrapRefCounted(new InitialRefCountIsOne));
+
+  // Check that AdoptRef() DCHECKs if passed a nullptr.
+  InitialRefCountIsOne* ptr = nullptr;
+  EXPECT_DCHECK_DEATH(base::AdoptRef(ptr));
+
+  // Check that AdoptRef() DCHECKs if passed an object that doesn't need to be
+  // adopted.
+  scoped_refptr<InitialRefCountIsOne> obj =
+      base::MakeRefCounted<InitialRefCountIsOne>();
+  EXPECT_DCHECK_DEATH(base::AdoptRef(obj.get()));
+}
+
+TEST(RefCountedUnitTest, TestPrivateDestructorWithDeleter) {
+  // Ensure that RefCounted doesn't need the access to the pointee dtor when
+  // a custom deleter is given.
+  scoped_refptr<HasPrivateDestructorWithDeleter> obj =
+      base::MakeRefCounted<HasPrivateDestructorWithDeleter>();
+}
diff --git a/base/memory/ref_counted_unittest.nc b/base/memory/ref_counted_unittest.nc
new file mode 100644
index 0000000..b8c371f
--- /dev/null
+++ b/base/memory/ref_counted_unittest.nc
@@ -0,0 +1,28 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/memory/ref_counted.h"
+
+namespace base {
+
+class InitialRefCountIsZero : public base::RefCounted<InitialRefCountIsZero> {
+ public:
+  InitialRefCountIsZero() {}
+ private:
+  friend class base::RefCounted<InitialRefCountIsZero>;
+  ~InitialRefCountIsZero() {}
+};
+
+// TODO(hans): Remove .* and update the static_assert expectations once we roll
+// past Clang r313315. https://crbug.com/765692.
+
+#if defined(NCTEST_ADOPT_REF_TO_ZERO_START)  // [r"fatal error: static_assert failed .*\"Use AdoptRef only for the reference count starts from one\.\""]
+
+void WontCompile() {
+  AdoptRef(new InitialRefCountIsZero());
+}
+
+#endif
+
+}  // namespace base
diff --git a/base/memory/scoped_policy.h b/base/memory/scoped_policy.h
new file mode 100644
index 0000000..5dbf204
--- /dev/null
+++ b/base/memory/scoped_policy.h
@@ -0,0 +1,25 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_MEMORY_SCOPED_POLICY_H_
+#define BASE_MEMORY_SCOPED_POLICY_H_
+
+namespace base {
+namespace scoped_policy {
+
+// Defines the ownership policy for a scoped object.
+enum OwnershipPolicy {
+  // The scoped object takes ownership of an object by taking over an existing
+  // ownership claim.
+  ASSUME,
+
+  // The scoped object will retain the the object and any initial ownership is
+  // not changed.
+  RETAIN
+};
+
+}  // namespace scoped_policy
+}  // namespace base
+
+#endif  // BASE_MEMORY_SCOPED_POLICY_H_
diff --git a/base/memory/scoped_refptr.h b/base/memory/scoped_refptr.h
new file mode 100644
index 0000000..a257617
--- /dev/null
+++ b/base/memory/scoped_refptr.h
@@ -0,0 +1,333 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_MEMORY_SCOPED_REFPTR_H_
+#define BASE_MEMORY_SCOPED_REFPTR_H_
+
+#include <stddef.h>
+
+#include <iosfwd>
+#include <type_traits>
+#include <utility>
+
+#include "base/compiler_specific.h"
+#include "base/logging.h"
+#include "base/macros.h"
+
+template <class T>
+class scoped_refptr;
+
+namespace base {
+
+template <class, typename>
+class RefCounted;
+template <class, typename>
+class RefCountedThreadSafe;
+
+template <typename T>
+scoped_refptr<T> AdoptRef(T* t);
+
+namespace subtle {
+
+enum AdoptRefTag { kAdoptRefTag };
+enum StartRefCountFromZeroTag { kStartRefCountFromZeroTag };
+enum StartRefCountFromOneTag { kStartRefCountFromOneTag };
+
+template <typename T, typename U, typename V>
+constexpr bool IsRefCountPreferenceOverridden(const T*,
+                                              const RefCounted<U, V>*) {
+  return !std::is_same<std::decay_t<decltype(T::kRefCountPreference)>,
+                       std::decay_t<decltype(U::kRefCountPreference)>>::value;
+}
+
+template <typename T, typename U, typename V>
+constexpr bool IsRefCountPreferenceOverridden(
+    const T*,
+    const RefCountedThreadSafe<U, V>*) {
+  return !std::is_same<std::decay_t<decltype(T::kRefCountPreference)>,
+                       std::decay_t<decltype(U::kRefCountPreference)>>::value;
+}
+
+constexpr bool IsRefCountPreferenceOverridden(...) {
+  return false;
+}
+
+}  // namespace subtle
+
+// Creates a scoped_refptr from a raw pointer without incrementing the reference
+// count. Use this only for a newly created object whose reference count starts
+// from 1 instead of 0.
+template <typename T>
+scoped_refptr<T> AdoptRef(T* obj) {
+  using Tag = std::decay_t<decltype(T::kRefCountPreference)>;
+  static_assert(std::is_same<subtle::StartRefCountFromOneTag, Tag>::value,
+                "Use AdoptRef only for the reference count starts from one.");
+
+  DCHECK(obj);
+  DCHECK(obj->HasOneRef());
+  obj->Adopted();
+  return scoped_refptr<T>(obj, subtle::kAdoptRefTag);
+}
+
+namespace subtle {
+
+template <typename T>
+scoped_refptr<T> AdoptRefIfNeeded(T* obj, StartRefCountFromZeroTag) {
+  return scoped_refptr<T>(obj);
+}
+
+template <typename T>
+scoped_refptr<T> AdoptRefIfNeeded(T* obj, StartRefCountFromOneTag) {
+  return AdoptRef(obj);
+}
+
+}  // namespace subtle
+
+// Constructs an instance of T, which is a ref counted type, and wraps the
+// object into a scoped_refptr<T>.
+template <typename T, typename... Args>
+scoped_refptr<T> MakeRefCounted(Args&&... args) {
+  T* obj = new T(std::forward<Args>(args)...);
+  return subtle::AdoptRefIfNeeded(obj, T::kRefCountPreference);
+}
+
+// Takes an instance of T, which is a ref counted type, and wraps the object
+// into a scoped_refptr<T>.
+template <typename T>
+scoped_refptr<T> WrapRefCounted(T* t) {
+  return scoped_refptr<T>(t);
+}
+
+}  // namespace base
+
+//
+// A smart pointer class for reference counted objects.  Use this class instead
+// of calling AddRef and Release manually on a reference counted object to
+// avoid common memory leaks caused by forgetting to Release an object
+// reference.  Sample usage:
+//
+//   class MyFoo : public RefCounted<MyFoo> {
+//    ...
+//    private:
+//     friend class RefCounted<MyFoo>;  // Allow destruction by RefCounted<>.
+//     ~MyFoo();                        // Destructor must be private/protected.
+//   };
+//
+//   void some_function() {
+//     scoped_refptr<MyFoo> foo = MakeRefCounted<MyFoo>();
+//     foo->Method(param);
+//     // |foo| is released when this function returns
+//   }
+//
+//   void some_other_function() {
+//     scoped_refptr<MyFoo> foo = MakeRefCounted<MyFoo>();
+//     ...
+//     foo = nullptr;  // explicitly releases |foo|
+//     ...
+//     if (foo)
+//       foo->Method(param);
+//   }
+//
+// The above examples show how scoped_refptr<T> acts like a pointer to T.
+// Given two scoped_refptr<T> classes, it is also possible to exchange
+// references between the two objects, like so:
+//
+//   {
+//     scoped_refptr<MyFoo> a = MakeRefCounted<MyFoo>();
+//     scoped_refptr<MyFoo> b;
+//
+//     b.swap(a);
+//     // now, |b| references the MyFoo object, and |a| references nullptr.
+//   }
+//
+// To make both |a| and |b| in the above example reference the same MyFoo
+// object, simply use the assignment operator:
+//
+//   {
+//     scoped_refptr<MyFoo> a = MakeRefCounted<MyFoo>();
+//     scoped_refptr<MyFoo> b;
+//
+//     b = a;
+//     // now, |a| and |b| each own a reference to the same MyFoo object.
+//   }
+//
+// Also see Chromium's ownership and calling conventions:
+// https://chromium.googlesource.com/chromium/src/+/lkgr/styleguide/c++/c++.md#object-ownership-and-calling-conventions
+// Specifically:
+//   If the function (at least sometimes) takes a ref on a refcounted object,
+//   declare the param as scoped_refptr<T>. The caller can decide whether it
+//   wishes to transfer ownership (by calling std::move(t) when passing t) or
+//   retain its ref (by simply passing t directly).
+//   In other words, use scoped_refptr like you would a std::unique_ptr except
+//   in the odd case where it's required to hold on to a ref while handing one
+//   to another component (if a component merely needs to use t on the stack
+//   without keeping a ref: pass t as a raw T*).
+template <class T>
+class scoped_refptr {
+ public:
+  typedef T element_type;
+
+  constexpr scoped_refptr() = default;
+
+  // Constructs from raw pointer. constexpr if |p| is null.
+  constexpr scoped_refptr(T* p) : ptr_(p) {
+    if (ptr_)
+      AddRef(ptr_);
+  }
+
+  // Copy constructor. This is required in addition to the copy conversion
+  // constructor below.
+  scoped_refptr(const scoped_refptr& r) : scoped_refptr(r.ptr_) {}
+
+  // Copy conversion constructor.
+  template <typename U,
+            typename = typename std::enable_if<
+                std::is_convertible<U*, T*>::value>::type>
+  scoped_refptr(const scoped_refptr<U>& r) : scoped_refptr(r.ptr_) {}
+
+  // Move constructor. This is required in addition to the move conversion
+  // constructor below.
+  scoped_refptr(scoped_refptr&& r) noexcept : ptr_(r.ptr_) { r.ptr_ = nullptr; }
+
+  // Move conversion constructor.
+  template <typename U,
+            typename = typename std::enable_if<
+                std::is_convertible<U*, T*>::value>::type>
+  scoped_refptr(scoped_refptr<U>&& r) noexcept : ptr_(r.ptr_) {
+    r.ptr_ = nullptr;
+  }
+
+  ~scoped_refptr() {
+    static_assert(!base::subtle::IsRefCountPreferenceOverridden(
+                      static_cast<T*>(nullptr), static_cast<T*>(nullptr)),
+                  "It's unsafe to override the ref count preference."
+                  " Please remove REQUIRE_ADOPTION_FOR_REFCOUNTED_TYPE"
+                  " from subclasses.");
+    if (ptr_)
+      Release(ptr_);
+  }
+
+  T* get() const { return ptr_; }
+
+  T& operator*() const {
+    DCHECK(ptr_);
+    return *ptr_;
+  }
+
+  T* operator->() const {
+    DCHECK(ptr_);
+    return ptr_;
+  }
+
+  scoped_refptr& operator=(T* p) { return *this = scoped_refptr(p); }
+
+  // Unified assignment operator.
+  scoped_refptr& operator=(scoped_refptr r) noexcept {
+    swap(r);
+    return *this;
+  }
+
+  void swap(scoped_refptr& r) noexcept { std::swap(ptr_, r.ptr_); }
+
+  explicit operator bool() const { return ptr_ != nullptr; }
+
+  template <typename U>
+  bool operator==(const scoped_refptr<U>& rhs) const {
+    return ptr_ == rhs.get();
+  }
+
+  template <typename U>
+  bool operator!=(const scoped_refptr<U>& rhs) const {
+    return !operator==(rhs);
+  }
+
+  template <typename U>
+  bool operator<(const scoped_refptr<U>& rhs) const {
+    return ptr_ < rhs.get();
+  }
+
+ protected:
+  T* ptr_ = nullptr;
+
+ private:
+  template <typename U>
+  friend scoped_refptr<U> base::AdoptRef(U*);
+
+  scoped_refptr(T* p, base::subtle::AdoptRefTag) : ptr_(p) {}
+
+  // Friend required for move constructors that set r.ptr_ to null.
+  template <typename U>
+  friend class scoped_refptr;
+
+  // Non-inline helpers to allow:
+  //     class Opaque;
+  //     extern template class scoped_refptr<Opaque>;
+  // Otherwise the compiler will complain that Opaque is an incomplete type.
+  static void AddRef(T* ptr);
+  static void Release(T* ptr);
+};
+
+// static
+template <typename T>
+void scoped_refptr<T>::AddRef(T* ptr) {
+  ptr->AddRef();
+}
+
+// static
+template <typename T>
+void scoped_refptr<T>::Release(T* ptr) {
+  ptr->Release();
+}
+
+template <typename T, typename U>
+bool operator==(const scoped_refptr<T>& lhs, const U* rhs) {
+  return lhs.get() == rhs;
+}
+
+template <typename T, typename U>
+bool operator==(const T* lhs, const scoped_refptr<U>& rhs) {
+  return lhs == rhs.get();
+}
+
+template <typename T>
+bool operator==(const scoped_refptr<T>& lhs, std::nullptr_t null) {
+  return !static_cast<bool>(lhs);
+}
+
+template <typename T>
+bool operator==(std::nullptr_t null, const scoped_refptr<T>& rhs) {
+  return !static_cast<bool>(rhs);
+}
+
+template <typename T, typename U>
+bool operator!=(const scoped_refptr<T>& lhs, const U* rhs) {
+  return !operator==(lhs, rhs);
+}
+
+template <typename T, typename U>
+bool operator!=(const T* lhs, const scoped_refptr<U>& rhs) {
+  return !operator==(lhs, rhs);
+}
+
+template <typename T>
+bool operator!=(const scoped_refptr<T>& lhs, std::nullptr_t null) {
+  return !operator==(lhs, null);
+}
+
+template <typename T>
+bool operator!=(std::nullptr_t null, const scoped_refptr<T>& rhs) {
+  return !operator==(null, rhs);
+}
+
+template <typename T>
+std::ostream& operator<<(std::ostream& out, const scoped_refptr<T>& p) {
+  return out << p.get();
+}
+
+template <typename T>
+void swap(scoped_refptr<T>& lhs, scoped_refptr<T>& rhs) noexcept {
+  lhs.swap(rhs);
+}
+
+#endif  // BASE_MEMORY_SCOPED_REFPTR_H_
diff --git a/base/memory/shared_memory.h b/base/memory/shared_memory.h
new file mode 100644
index 0000000..c573ef7
--- /dev/null
+++ b/base/memory/shared_memory.h
@@ -0,0 +1,254 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_MEMORY_SHARED_MEMORY_H_
+#define BASE_MEMORY_SHARED_MEMORY_H_
+
+#include <stddef.h>
+
+#include <string>
+
+#include "base/base_export.h"
+#include "base/hash.h"
+#include "base/macros.h"
+#include "base/memory/shared_memory_handle.h"
+#include "base/process/process_handle.h"
+#include "base/strings/string16.h"
+#include "build/build_config.h"
+
+#if defined(OS_POSIX) || defined(OS_FUCHSIA)
+#include <stdio.h>
+#include <sys/types.h>
+#include <semaphore.h>
+#include "base/file_descriptor_posix.h"
+#include "base/files/file_util.h"
+#include "base/files/scoped_file.h"
+#endif
+
+#if defined(OS_WIN)
+#include "base/win/scoped_handle.h"
+#endif
+
+namespace base {
+
+class FilePath;
+
+// Options for creating a shared memory object.
+struct BASE_EXPORT SharedMemoryCreateOptions {
+#if defined(OS_MACOSX) && !defined(OS_IOS)
+  // The type of OS primitive that should back the SharedMemory object.
+  SharedMemoryHandle::Type type = SharedMemoryHandle::MACH;
+#elif !defined(OS_FUCHSIA)
+  // DEPRECATED (crbug.com/345734):
+  // If NULL, the object is anonymous.  This pointer is owned by the caller
+  // and must live through the call to Create().
+  const std::string* name_deprecated = nullptr;
+
+  // DEPRECATED (crbug.com/345734):
+  // If true, and the shared memory already exists, Create() will open the
+  // existing shared memory and ignore the size parameter.  If false,
+  // shared memory must not exist.  This flag is meaningless unless
+  // name_deprecated is non-NULL.
+  bool open_existing_deprecated = false;
+#endif  // defined(OS_MACOSX) && !defined(OS_IOS)
+
+  // Size of the shared memory object to be created.
+  // When opening an existing object, this has no effect.
+  size_t size = 0;
+
+  // If true, mappings might need to be made executable later.
+  bool executable = false;
+
+  // If true, the file can be shared read-only to a process.
+  bool share_read_only = false;
+};
+
+// Platform abstraction for shared memory.
+// SharedMemory consumes a SharedMemoryHandle [potentially one that it created]
+// to map a shared memory OS resource into the virtual address space of the
+// current process.
+class BASE_EXPORT SharedMemory {
+ public:
+  SharedMemory();
+
+#if defined(OS_WIN)
+  // Similar to the default constructor, except that this allows for
+  // calling LockDeprecated() to acquire the named mutex before either Create or
+  // Open are called on Windows.
+  explicit SharedMemory(const string16& name);
+#endif
+
+  // Create a new SharedMemory object from an existing, open
+  // shared memory file.
+  //
+  // WARNING: This does not reduce the OS-level permissions on the handle; it
+  // only affects how the SharedMemory will be mmapped. Use
+  // GetReadOnlyHandle to drop permissions. TODO(jln,jyasskin): DCHECK
+  // that |read_only| matches the permissions of the handle.
+  SharedMemory(const SharedMemoryHandle& handle, bool read_only);
+
+  // Closes any open files.
+  ~SharedMemory();
+
+  // Return true iff the given handle is valid (i.e. not the distingished
+  // invalid value; NULL for a HANDLE and -1 for a file descriptor)
+  static bool IsHandleValid(const SharedMemoryHandle& handle);
+
+  // Closes a shared memory handle.
+  static void CloseHandle(const SharedMemoryHandle& handle);
+
+  // Returns the maximum number of handles that can be open at once per process.
+  static size_t GetHandleLimit();
+
+  // Duplicates The underlying OS primitive. Returns an invalid handle on
+  // failure. The caller is responsible for destroying the duplicated OS
+  // primitive.
+  static SharedMemoryHandle DuplicateHandle(const SharedMemoryHandle& handle);
+
+#if defined(OS_POSIX)
+  // This method requires that the SharedMemoryHandle is backed by a POSIX fd.
+  static int GetFdFromSharedMemoryHandle(const SharedMemoryHandle& handle);
+#endif
+
+  // Creates a shared memory object as described by the options struct.
+  // Returns true on success and false on failure.
+  bool Create(const SharedMemoryCreateOptions& options);
+
+  // Creates and maps an anonymous shared memory segment of size size.
+  // Returns true on success and false on failure.
+  bool CreateAndMapAnonymous(size_t size);
+
+  // Creates an anonymous shared memory segment of size size.
+  // Returns true on success and false on failure.
+  bool CreateAnonymous(size_t size) {
+    SharedMemoryCreateOptions options;
+    options.size = size;
+    return Create(options);
+  }
+
+#if (!defined(OS_MACOSX) || defined(OS_IOS)) && !defined(OS_FUCHSIA)
+  // DEPRECATED (crbug.com/345734):
+  // Creates or opens a shared memory segment based on a name.
+  // If open_existing is true, and the shared memory already exists,
+  // opens the existing shared memory and ignores the size parameter.
+  // If open_existing is false, shared memory must not exist.
+  // size is the size of the block to be created.
+  // Returns true on success, false on failure.
+  bool CreateNamedDeprecated(
+      const std::string& name, bool open_existing, size_t size) {
+    SharedMemoryCreateOptions options;
+    options.name_deprecated = &name;
+    options.open_existing_deprecated = open_existing;
+    options.size = size;
+    return Create(options);
+  }
+
+  // Deletes resources associated with a shared memory segment based on name.
+  // Not all platforms require this call.
+  bool Delete(const std::string& name);
+
+  // Opens a shared memory segment based on a name.
+  // If read_only is true, opens for read-only access.
+  // Returns true on success, false on failure.
+  bool Open(const std::string& name, bool read_only);
+#endif  // !defined(OS_MACOSX) || defined(OS_IOS)
+
+  // Maps the shared memory into the caller's address space.
+  // Returns true on success, false otherwise.  The memory address
+  // is accessed via the memory() accessor.  The mapped address is guaranteed to
+  // have an alignment of at least MAP_MINIMUM_ALIGNMENT. This method will fail
+  // if this object is currently mapped.
+  bool Map(size_t bytes) {
+    return MapAt(0, bytes);
+  }
+
+  // Same as above, but with |offset| to specify from begining of the shared
+  // memory block to map.
+  // |offset| must be alignent to value of |SysInfo::VMAllocationGranularity()|.
+  bool MapAt(off_t offset, size_t bytes);
+  enum { MAP_MINIMUM_ALIGNMENT = 32 };
+
+  // Unmaps the shared memory from the caller's address space.
+  // Returns true if successful; returns false on error or if the
+  // memory is not mapped.
+  bool Unmap();
+
+  // The size requested when the map is first created.
+  size_t requested_size() const { return requested_size_; }
+
+  // The actual size of the mapped memory (may be larger than requested).
+  size_t mapped_size() const { return mapped_size_; }
+
+  // Gets a pointer to the opened memory space if it has been
+  // Mapped via Map().  Returns NULL if it is not mapped.
+  void* memory() const { return memory_; }
+
+  // Returns the underlying OS handle for this segment.
+  // Use of this handle for anything other than an opaque
+  // identifier is not portable.
+  SharedMemoryHandle handle() const;
+
+  // Returns the underlying OS handle for this segment. The caller takes
+  // ownership of the handle and memory is unmapped. This is equivalent to
+  // duplicating the handle and then calling Unmap() and Close() on this object,
+  // without the overhead of duplicating the handle.
+  SharedMemoryHandle TakeHandle();
+
+  // Closes the open shared memory segment. The memory will remain mapped if
+  // it was previously mapped.
+  // It is safe to call Close repeatedly.
+  void Close();
+
+  // Returns a read-only handle to this shared memory region. The caller takes
+  // ownership of the handle. For POSIX handles, CHECK-fails if the region
+  // wasn't Created or Opened with share_read_only=true, which is required to
+  // make the handle read-only. When the handle is passed to the IPC subsystem,
+  // that takes ownership of the handle. As such, it's not valid to pass the
+  // sample handle to the IPC subsystem twice. Returns an invalid handle on
+  // failure.
+  SharedMemoryHandle GetReadOnlyHandle() const;
+
+  // Returns an ID for the mapped region. This is ID of the SharedMemoryHandle
+  // that was mapped. The ID is valid even after the SharedMemoryHandle is
+  // Closed, as long as the region is not unmapped.
+  const UnguessableToken& mapped_id() const { return mapped_id_; }
+
+ private:
+#if defined(OS_POSIX) && !defined(OS_NACL) && !defined(OS_ANDROID) && \
+    (!defined(OS_MACOSX) || defined(OS_IOS))
+  bool FilePathForMemoryName(const std::string& mem_name, FilePath* path);
+#endif
+
+#if defined(OS_WIN)
+  // If true indicates this came from an external source so needs extra checks
+  // before being mapped.
+  bool external_section_ = false;
+  string16 name_;
+#elif !defined(OS_ANDROID) && !defined(OS_FUCHSIA)
+  // If valid, points to the same memory region as shm_, but with readonly
+  // permissions.
+  SharedMemoryHandle readonly_shm_;
+#endif
+
+#if defined(OS_MACOSX) && !defined(OS_IOS)
+  // The mechanism by which the memory is mapped. Only valid if |memory_| is not
+  // |nullptr|.
+  SharedMemoryHandle::Type mapped_memory_mechanism_ = SharedMemoryHandle::MACH;
+#endif
+
+  // The OS primitive that backs the shared memory region.
+  SharedMemoryHandle shm_;
+
+  size_t mapped_size_ = 0;
+  void* memory_ = nullptr;
+  bool read_only_ = false;
+  size_t requested_size_ = 0;
+  base::UnguessableToken mapped_id_;
+
+  DISALLOW_COPY_AND_ASSIGN(SharedMemory);
+};
+
+}  // namespace base
+
+#endif  // BASE_MEMORY_SHARED_MEMORY_H_
diff --git a/base/memory/shared_memory_android.cc b/base/memory/shared_memory_android.cc
new file mode 100644
index 0000000..c126767
--- /dev/null
+++ b/base/memory/shared_memory_android.cc
@@ -0,0 +1,75 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/memory/shared_memory.h"
+
+#include <stddef.h>
+#include <sys/mman.h>
+
+#include "base/logging.h"
+#include "third_party/ashmem/ashmem.h"
+
+namespace base {
+
+// For Android, we use ashmem to implement SharedMemory. ashmem_create_region
+// will automatically pin the region. We never explicitly call pin/unpin. When
+// all the file descriptors from different processes associated with the region
+// are closed, the memory buffer will go away.
+
+bool SharedMemory::Create(const SharedMemoryCreateOptions& options) {
+  DCHECK(!shm_.IsValid());
+
+  if (options.size > static_cast<size_t>(std::numeric_limits<int>::max()))
+    return false;
+
+  // "name" is just a label in ashmem. It is visible in /proc/pid/maps.
+  int fd = ashmem_create_region(
+      options.name_deprecated ? options.name_deprecated->c_str() : "",
+      options.size);
+  shm_ = SharedMemoryHandle::ImportHandle(fd, options.size);
+  if (!shm_.IsValid()) {
+    DLOG(ERROR) << "Shared memory creation failed";
+    return false;
+  }
+
+  int flags = PROT_READ | PROT_WRITE | (options.executable ? PROT_EXEC : 0);
+  int err = ashmem_set_prot_region(shm_.GetHandle(), flags);
+  if (err < 0) {
+    DLOG(ERROR) << "Error " << err << " when setting protection of ashmem";
+    return false;
+  }
+
+  requested_size_ = options.size;
+
+  return true;
+}
+
+bool SharedMemory::Delete(const std::string& name) {
+  // Like on Windows, this is intentionally returning true as ashmem will
+  // automatically releases the resource when all FDs on it are closed.
+  return true;
+}
+
+bool SharedMemory::Open(const std::string& name, bool read_only) {
+  // ashmem doesn't support name mapping
+  NOTIMPLEMENTED();
+  return false;
+}
+
+void SharedMemory::Close() {
+  if (shm_.IsValid()) {
+    shm_.Close();
+    shm_ = SharedMemoryHandle();
+  }
+}
+
+SharedMemoryHandle SharedMemory::GetReadOnlyHandle() const {
+  // There are no read-only Ashmem descriptors on Android.
+  // Instead, the protection mask is a property of the region itself.
+  SharedMemoryHandle handle = shm_.Duplicate();
+  handle.SetReadOnly();
+  return handle;
+}
+
+}  // namespace base
diff --git a/base/memory/shared_memory_fuchsia.cc b/base/memory/shared_memory_fuchsia.cc
new file mode 100644
index 0000000..4036bf6
--- /dev/null
+++ b/base/memory/shared_memory_fuchsia.cc
@@ -0,0 +1,166 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/memory/shared_memory.h"
+
+#include <limits>
+
+#include <zircon/process.h>
+#include <zircon/rights.h>
+#include <zircon/syscalls.h>
+
+#include "base/bits.h"
+#include "base/fuchsia/scoped_zx_handle.h"
+#include "base/logging.h"
+#include "base/memory/shared_memory_tracker.h"
+#include "base/process/process_metrics.h"
+
+namespace base {
+
+SharedMemory::SharedMemory() {}
+
+SharedMemory::SharedMemory(const SharedMemoryHandle& handle, bool read_only)
+    : shm_(handle), read_only_(read_only) {}
+
+SharedMemory::~SharedMemory() {
+  Unmap();
+  Close();
+}
+
+// static
+bool SharedMemory::IsHandleValid(const SharedMemoryHandle& handle) {
+  return handle.IsValid();
+}
+
+// static
+void SharedMemory::CloseHandle(const SharedMemoryHandle& handle) {
+  DCHECK(handle.IsValid());
+  handle.Close();
+}
+
+// static
+size_t SharedMemory::GetHandleLimit() {
+  // Duplicated from the internal Magenta kernel constant kMaxHandleCount
+  // (kernel/lib/zircon/zircon.cpp).
+  return 256 * 1024u;
+}
+
+bool SharedMemory::CreateAndMapAnonymous(size_t size) {
+  return CreateAnonymous(size) && Map(size);
+}
+
+bool SharedMemory::Create(const SharedMemoryCreateOptions& options) {
+  requested_size_ = options.size;
+  mapped_size_ = bits::Align(requested_size_, GetPageSize());
+  ScopedZxHandle vmo;
+  zx_status_t status = zx_vmo_create(mapped_size_, 0, vmo.receive());
+  if (status != ZX_OK) {
+    DLOG(ERROR) << "zx_vmo_create failed, status=" << status;
+    return false;
+  }
+
+  if (!options.executable) {
+    // If options.executable isn't set, drop that permission by replacement.
+    const int kNoExecFlags = ZX_DEFAULT_VMO_RIGHTS & ~ZX_RIGHT_EXECUTE;
+    ScopedZxHandle old_vmo(std::move(vmo));
+    status = zx_handle_replace(old_vmo.get(), kNoExecFlags, vmo.receive());
+    if (status != ZX_OK) {
+      DLOG(ERROR) << "zx_handle_replace() failed: "
+                  << zx_status_get_string(status);
+      return false;
+    }
+    ignore_result(old_vmo.release());
+  }
+
+  shm_ = SharedMemoryHandle(vmo.release(), mapped_size_,
+                            UnguessableToken::Create());
+  return true;
+}
+
+bool SharedMemory::MapAt(off_t offset, size_t bytes) {
+  if (!shm_.IsValid())
+    return false;
+
+  if (bytes > static_cast<size_t>(std::numeric_limits<int>::max()))
+    return false;
+
+  if (memory_)
+    return false;
+
+  int flags = ZX_VM_FLAG_PERM_READ;
+  if (!read_only_)
+    flags |= ZX_VM_FLAG_PERM_WRITE;
+  uintptr_t addr;
+  zx_status_t status = zx_vmar_map(zx_vmar_root_self(), 0, shm_.GetHandle(),
+                                   offset, bytes, flags, &addr);
+  if (status != ZX_OK) {
+    DLOG(ERROR) << "zx_vmar_map failed, status=" << status;
+    return false;
+  }
+  memory_ = reinterpret_cast<void*>(addr);
+
+  mapped_size_ = bytes;
+  mapped_id_ = shm_.GetGUID();
+  SharedMemoryTracker::GetInstance()->IncrementMemoryUsage(*this);
+  return true;
+}
+
+bool SharedMemory::Unmap() {
+  if (!memory_)
+    return false;
+
+  SharedMemoryTracker::GetInstance()->DecrementMemoryUsage(*this);
+
+  uintptr_t addr = reinterpret_cast<uintptr_t>(memory_);
+  zx_status_t status = zx_vmar_unmap(zx_vmar_root_self(), addr, mapped_size_);
+  if (status != ZX_OK) {
+    DLOG(ERROR) << "zx_vmar_unmap failed, status=" << status;
+    return false;
+  }
+
+  memory_ = nullptr;
+  mapped_id_ = UnguessableToken();
+  return true;
+}
+
+void SharedMemory::Close() {
+  if (shm_.IsValid()) {
+    shm_.Close();
+    shm_ = SharedMemoryHandle();
+  }
+}
+
+SharedMemoryHandle SharedMemory::handle() const {
+  return shm_;
+}
+
+SharedMemoryHandle SharedMemory::TakeHandle() {
+  SharedMemoryHandle handle(shm_);
+  handle.SetOwnershipPassesToIPC(true);
+  Unmap();
+  shm_ = SharedMemoryHandle();
+  return handle;
+}
+
+SharedMemoryHandle SharedMemory::DuplicateHandle(
+    const SharedMemoryHandle& handle) {
+  return handle.Duplicate();
+}
+
+SharedMemoryHandle SharedMemory::GetReadOnlyHandle() const {
+  zx_handle_t duped_handle;
+  const int kNoWriteOrExec =
+      ZX_DEFAULT_VMO_RIGHTS &
+      ~(ZX_RIGHT_WRITE | ZX_RIGHT_EXECUTE | ZX_RIGHT_SET_PROPERTY);
+  zx_status_t status =
+      zx_handle_duplicate(shm_.GetHandle(), kNoWriteOrExec, &duped_handle);
+  if (status != ZX_OK)
+    return SharedMemoryHandle();
+
+  SharedMemoryHandle handle(duped_handle, shm_.GetSize(), shm_.GetGUID());
+  handle.SetOwnershipPassesToIPC(true);
+  return handle;
+}
+
+}  // namespace base
diff --git a/base/memory/shared_memory_handle.cc b/base/memory/shared_memory_handle.cc
new file mode 100644
index 0000000..085bde4
--- /dev/null
+++ b/base/memory/shared_memory_handle.cc
@@ -0,0 +1,23 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/memory/shared_memory_handle.h"
+
+namespace base {
+
+SharedMemoryHandle::SharedMemoryHandle(const SharedMemoryHandle& handle) =
+    default;
+
+SharedMemoryHandle& SharedMemoryHandle::operator=(
+    const SharedMemoryHandle& handle) = default;
+
+base::UnguessableToken SharedMemoryHandle::GetGUID() const {
+  return guid_;
+}
+
+size_t SharedMemoryHandle::GetSize() const {
+  return size_;
+}
+
+}  // namespace base
diff --git a/base/memory/shared_memory_handle.h b/base/memory/shared_memory_handle.h
new file mode 100644
index 0000000..ae143af
--- /dev/null
+++ b/base/memory/shared_memory_handle.h
@@ -0,0 +1,236 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_MEMORY_SHARED_MEMORY_HANDLE_H_
+#define BASE_MEMORY_SHARED_MEMORY_HANDLE_H_
+
+#include <stddef.h>
+
+#include "base/unguessable_token.h"
+#include "build/build_config.h"
+
+#if defined(OS_WIN)
+#include "base/process/process_handle.h"
+#include "base/win/windows_types.h"
+#elif defined(OS_MACOSX) && !defined(OS_IOS)
+#include <mach/mach.h>
+#include "base/base_export.h"
+#include "base/file_descriptor_posix.h"
+#include "base/macros.h"
+#include "base/process/process_handle.h"
+#elif defined(OS_POSIX) || defined(OS_FUCHSIA)
+#include <sys/types.h>
+#include "base/file_descriptor_posix.h"
+#endif
+
+namespace base {
+
+// SharedMemoryHandle is the smallest possible IPC-transportable "reference" to
+// a shared memory OS resource. A "reference" can be consumed exactly once [by
+// base::SharedMemory] to map the shared memory OS resource into the virtual
+// address space of the current process.
+// TODO(erikchen): This class should have strong ownership semantics to prevent
+// leaks of the underlying OS resource. https://crbug.com/640840.
+class BASE_EXPORT SharedMemoryHandle {
+ public:
+  // The default constructor returns an invalid SharedMemoryHandle.
+  SharedMemoryHandle();
+
+  // Standard copy constructor. The new instance shares the underlying OS
+  // primitives.
+  SharedMemoryHandle(const SharedMemoryHandle& handle);
+
+  // Standard assignment operator. The updated instance shares the underlying
+  // OS primitives.
+  SharedMemoryHandle& operator=(const SharedMemoryHandle& handle);
+
+  // Closes the underlying OS resource.
+  // The fact that this method needs to be "const" is an artifact of the
+  // original interface for base::SharedMemory::CloseHandle.
+  // TODO(erikchen): This doesn't clear the underlying reference, which seems
+  // like a bug, but is how this class has always worked. Fix this:
+  // https://crbug.com/716072.
+  void Close() const;
+
+  // Whether ownership of the underlying OS resource is implicitly passed to
+  // the IPC subsystem during serialization.
+  void SetOwnershipPassesToIPC(bool ownership_passes);
+  bool OwnershipPassesToIPC() const;
+
+  // Whether the underlying OS resource is valid.
+  bool IsValid() const;
+
+  // Duplicates the underlying OS resource. Using the return value as a
+  // parameter to an IPC message will cause the IPC subsystem to consume the OS
+  // resource.
+  SharedMemoryHandle Duplicate() const;
+
+  // Uniques identifies the shared memory region that the underlying OS resource
+  // points to. Multiple SharedMemoryHandles that point to the same shared
+  // memory region will have the same GUID. Preserved across IPC.
+  base::UnguessableToken GetGUID() const;
+
+  // Returns the size of the memory region that SharedMemoryHandle points to.
+  size_t GetSize() const;
+
+#if defined(OS_WIN)
+  // Takes implicit ownership of |h|.
+  // |guid| uniquely identifies the shared memory region pointed to by the
+  // underlying OS resource. If the HANDLE is associated with another
+  // SharedMemoryHandle, the caller must pass the |guid| of that
+  // SharedMemoryHandle. Otherwise, the caller should generate a new
+  // UnguessableToken.
+  // Passing the wrong |size| has no immediate consequence, but may cause errors
+  // when trying to map the SharedMemoryHandle at a later point in time.
+  SharedMemoryHandle(HANDLE h, size_t size, const base::UnguessableToken& guid);
+  HANDLE GetHandle() const;
+#elif defined(OS_FUCHSIA)
+  // Takes implicit ownership of |h|.
+  // |guid| uniquely identifies the shared memory region pointed to by the
+  // underlying OS resource. If the zx_handle_t is associated with another
+  // SharedMemoryHandle, the caller must pass the |guid| of that
+  // SharedMemoryHandle. Otherwise, the caller should generate a new
+  // UnguessableToken.
+  // Passing the wrong |size| has no immediate consequence, but may cause errors
+  // when trying to map the SharedMemoryHandle at a later point in time.
+  SharedMemoryHandle(zx_handle_t h,
+                     size_t size,
+                     const base::UnguessableToken& guid);
+  zx_handle_t GetHandle() const;
+#elif defined(OS_MACOSX) && !defined(OS_IOS)
+  enum Type {
+    // The SharedMemoryHandle is backed by a POSIX fd.
+    POSIX,
+    // The SharedMemoryHandle is backed by the Mach primitive "memory object".
+    MACH,
+  };
+
+  // Makes a Mach-based SharedMemoryHandle of the given size. On error,
+  // subsequent calls to IsValid() return false.
+  // Passing the wrong |size| has no immediate consequence, but may cause errors
+  // when trying to map the SharedMemoryHandle at a later point in time.
+  SharedMemoryHandle(mach_vm_size_t size, const base::UnguessableToken& guid);
+
+  // Makes a Mach-based SharedMemoryHandle from |memory_object|, a named entry
+  // in the current task. The memory region has size |size|.
+  // Passing the wrong |size| has no immediate consequence, but may cause errors
+  // when trying to map the SharedMemoryHandle at a later point in time.
+  SharedMemoryHandle(mach_port_t memory_object,
+                     mach_vm_size_t size,
+                     const base::UnguessableToken& guid);
+
+  // Exposed so that the SharedMemoryHandle can be transported between
+  // processes.
+  mach_port_t GetMemoryObject() const;
+
+  // The SharedMemoryHandle must be valid.
+  // Returns whether the SharedMemoryHandle was successfully mapped into memory.
+  // On success, |memory| is an output variable that contains the start of the
+  // mapped memory.
+  bool MapAt(off_t offset, size_t bytes, void** memory, bool read_only);
+#elif defined(OS_POSIX)
+  // Creates a SharedMemoryHandle from an |fd| supplied from an external
+  // service.
+  // Passing the wrong |size| has no immediate consequence, but may cause errors
+  // when trying to map the SharedMemoryHandle at a later point in time.
+  static SharedMemoryHandle ImportHandle(int fd, size_t size);
+
+  // Returns the underlying OS resource.
+  int GetHandle() const;
+
+  // Invalidates [but doesn't close] the underlying OS resource. This will leak
+  // unless the caller is careful.
+  int Release();
+#endif
+
+#if defined(OS_ANDROID)
+  // Marks the current file descriptor as read-only, for the purpose of
+  // mapping. This is independent of the region's read-only status.
+  void SetReadOnly() { read_only_ = true; }
+
+  // Returns true iff the descriptor is to be used for read-only
+  // mappings.
+  bool IsReadOnly() const { return read_only_; }
+
+  // Returns true iff the corresponding region is read-only.
+  bool IsRegionReadOnly() const;
+
+  // Try to set the region read-only. This will fail any future attempt
+  // at read-write mapping.
+  bool SetRegionReadOnly() const;
+#endif
+
+#if defined(OS_POSIX)
+  // Constructs a SharedMemoryHandle backed by a FileDescriptor. The newly
+  // created instance has the same ownership semantics as base::FileDescriptor.
+  // This typically means that the SharedMemoryHandle takes ownership of the
+  // |fd| if |auto_close| is true. Unfortunately, it's common for existing code
+  // to make shallow copies of SharedMemoryHandle, and the one that is finally
+  // passed into a base::SharedMemory is the one that "consumes" the fd.
+  //
+  // |guid| uniquely identifies the shared memory region pointed to by the
+  // underlying OS resource. If |file_descriptor| is associated with another
+  // SharedMemoryHandle, the caller must pass the |guid| of that
+  // SharedMemoryHandle. Otherwise, the caller should generate a new
+  // UnguessableToken.
+  // Passing the wrong |size| has no immediate consequence, but may cause errors
+  // when trying to map the SharedMemoryHandle at a later point in time.
+  SharedMemoryHandle(const base::FileDescriptor& file_descriptor,
+                     size_t size,
+                     const base::UnguessableToken& guid);
+#endif
+
+ private:
+#if defined(OS_WIN)
+  HANDLE handle_ = nullptr;
+
+  // Whether passing this object as a parameter to an IPC message passes
+  // ownership of |handle_| to the IPC stack. This is meant to mimic the
+  // behavior of the |auto_close| parameter of FileDescriptor. This member only
+  // affects attachment-brokered SharedMemoryHandles.
+  // Defaults to |false|.
+  bool ownership_passes_to_ipc_ = false;
+#elif defined(OS_FUCHSIA)
+  zx_handle_t handle_ = ZX_HANDLE_INVALID;
+  bool ownership_passes_to_ipc_ = false;
+#elif defined(OS_MACOSX) && !defined(OS_IOS)
+  friend class SharedMemory;
+  friend bool CheckReadOnlySharedMemoryHandleForTesting(
+      SharedMemoryHandle handle);
+
+  Type type_ = MACH;
+
+  // Each instance of a SharedMemoryHandle is backed either by a POSIX fd or a
+  // mach port. |type_| determines the backing member.
+  union {
+    FileDescriptor file_descriptor_;
+
+    struct {
+      mach_port_t memory_object_ = MACH_PORT_NULL;
+
+      // Whether passing this object as a parameter to an IPC message passes
+      // ownership of |memory_object_| to the IPC stack. This is meant to mimic
+      // the behavior of the |auto_close| parameter of FileDescriptor.
+      // Defaults to |false|.
+      bool ownership_passes_to_ipc_ = false;
+    };
+  };
+#elif defined(OS_ANDROID)
+  friend class SharedMemory;
+
+  FileDescriptor file_descriptor_;
+  bool read_only_ = false;
+#elif defined(OS_POSIX)
+  FileDescriptor file_descriptor_;
+#endif
+
+  base::UnguessableToken guid_;
+
+  // The size of the region referenced by the SharedMemoryHandle.
+  size_t size_ = 0;
+};
+
+}  // namespace base
+
+#endif  // BASE_MEMORY_SHARED_MEMORY_HANDLE_H_
diff --git a/base/memory/shared_memory_handle_android.cc b/base/memory/shared_memory_handle_android.cc
new file mode 100644
index 0000000..1b61535
--- /dev/null
+++ b/base/memory/shared_memory_handle_android.cc
@@ -0,0 +1,115 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/memory/shared_memory_handle.h"
+
+#include <sys/mman.h>
+#include <unistd.h>
+
+#include "base/logging.h"
+#include "base/posix/eintr_wrapper.h"
+#include "base/posix/unix_domain_socket.h"
+#include "base/unguessable_token.h"
+#include "third_party/ashmem/ashmem.h"
+
+namespace base {
+
+static int GetAshmemRegionProtectionMask(int fd) {
+  int prot = ashmem_get_prot_region(fd);
+  if (prot < 0) {
+    DPLOG(ERROR) << "ashmem_get_prot_region";
+    return -1;
+  }
+  return prot;
+}
+
+SharedMemoryHandle::SharedMemoryHandle() = default;
+
+SharedMemoryHandle::SharedMemoryHandle(
+    const base::FileDescriptor& file_descriptor,
+    size_t size,
+    const base::UnguessableToken& guid)
+    : guid_(guid), size_(size) {
+  DCHECK_GE(file_descriptor.fd, 0);
+  file_descriptor_ = file_descriptor;
+}
+
+// static
+SharedMemoryHandle SharedMemoryHandle::ImportHandle(int fd, size_t size) {
+  SharedMemoryHandle handle;
+  handle.file_descriptor_.fd = fd;
+  handle.file_descriptor_.auto_close = false;
+  handle.guid_ = UnguessableToken::Create();
+  handle.size_ = size;
+  return handle;
+}
+
+int SharedMemoryHandle::GetHandle() const {
+  DCHECK(IsValid());
+  return file_descriptor_.fd;
+}
+
+bool SharedMemoryHandle::IsValid() const {
+  return file_descriptor_.fd >= 0;
+}
+
+void SharedMemoryHandle::Close() const {
+  DCHECK(IsValid());
+  if (IGNORE_EINTR(close(file_descriptor_.fd)) < 0)
+    PLOG(ERROR) << "close";
+}
+
+int SharedMemoryHandle::Release() {
+  int old_fd = file_descriptor_.fd;
+  file_descriptor_.fd = -1;
+  return old_fd;
+}
+
+SharedMemoryHandle SharedMemoryHandle::Duplicate() const {
+  DCHECK(IsValid());
+  SharedMemoryHandle result;
+  int duped_handle = HANDLE_EINTR(dup(file_descriptor_.fd));
+  if (duped_handle >= 0) {
+    result = SharedMemoryHandle(FileDescriptor(duped_handle, true), GetSize(),
+                                GetGUID());
+    if (IsReadOnly())
+      result.SetReadOnly();
+  }
+  return result;
+}
+
+void SharedMemoryHandle::SetOwnershipPassesToIPC(bool ownership_passes) {
+  file_descriptor_.auto_close = ownership_passes;
+}
+
+bool SharedMemoryHandle::OwnershipPassesToIPC() const {
+  return file_descriptor_.auto_close;
+}
+
+bool SharedMemoryHandle::IsRegionReadOnly() const {
+  int prot = GetAshmemRegionProtectionMask(file_descriptor_.fd);
+  return (prot >= 0 && (prot & PROT_WRITE) == 0);
+}
+
+bool SharedMemoryHandle::SetRegionReadOnly() const {
+  int fd = file_descriptor_.fd;
+  int prot = GetAshmemRegionProtectionMask(fd);
+  if (prot < 0)
+    return false;
+
+  if ((prot & PROT_WRITE) == 0) {
+    // Region is already read-only.
+    return true;
+  }
+
+  prot &= ~PROT_WRITE;
+  int ret = ashmem_set_prot_region(fd, prot);
+  if (ret != 0) {
+    DPLOG(ERROR) << "ashmem_set_prot_region";
+    return false;
+  }
+  return true;
+}
+
+}  // namespace base
diff --git a/base/memory/shared_memory_handle_fuchsia.cc b/base/memory/shared_memory_handle_fuchsia.cc
new file mode 100644
index 0000000..eab681f
--- /dev/null
+++ b/base/memory/shared_memory_handle_fuchsia.cc
@@ -0,0 +1,54 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/memory/shared_memory_handle.h"
+
+#include <zircon/syscalls.h>
+
+#include "base/logging.h"
+#include "base/unguessable_token.h"
+
+namespace base {
+
+SharedMemoryHandle::SharedMemoryHandle() {}
+
+SharedMemoryHandle::SharedMemoryHandle(zx_handle_t h,
+                                       size_t size,
+                                       const base::UnguessableToken& guid)
+    : handle_(h), guid_(guid), size_(size) {}
+
+void SharedMemoryHandle::Close() const {
+  DCHECK(handle_ != ZX_HANDLE_INVALID);
+  zx_handle_close(handle_);
+}
+
+bool SharedMemoryHandle::IsValid() const {
+  return handle_ != ZX_HANDLE_INVALID;
+}
+
+SharedMemoryHandle SharedMemoryHandle::Duplicate() const {
+  zx_handle_t duped_handle;
+  zx_status_t status =
+      zx_handle_duplicate(handle_, ZX_RIGHT_SAME_RIGHTS, &duped_handle);
+  if (status != ZX_OK)
+    return SharedMemoryHandle();
+
+  SharedMemoryHandle handle(duped_handle, GetSize(), GetGUID());
+  handle.SetOwnershipPassesToIPC(true);
+  return handle;
+}
+
+zx_handle_t SharedMemoryHandle::GetHandle() const {
+  return handle_;
+}
+
+void SharedMemoryHandle::SetOwnershipPassesToIPC(bool ownership_passes) {
+  ownership_passes_to_ipc_ = ownership_passes;
+}
+
+bool SharedMemoryHandle::OwnershipPassesToIPC() const {
+  return ownership_passes_to_ipc_;
+}
+
+}  // namespace base
diff --git a/base/memory/shared_memory_handle_mac.cc b/base/memory/shared_memory_handle_mac.cc
new file mode 100644
index 0000000..0e863fa
--- /dev/null
+++ b/base/memory/shared_memory_handle_mac.cc
@@ -0,0 +1,154 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/memory/shared_memory_handle.h"
+
+#include <mach/mach_vm.h>
+#include <stddef.h>
+#include <sys/mman.h>
+#include <unistd.h>
+
+#include "base/mac/mac_util.h"
+#include "base/mac/mach_logging.h"
+#include "base/posix/eintr_wrapper.h"
+#include "base/unguessable_token.h"
+
+namespace base {
+
+SharedMemoryHandle::SharedMemoryHandle() {}
+
+SharedMemoryHandle::SharedMemoryHandle(
+    const base::FileDescriptor& file_descriptor,
+    size_t size,
+    const base::UnguessableToken& guid)
+    : type_(POSIX),
+      file_descriptor_(file_descriptor),
+      guid_(guid),
+      size_(size) {}
+
+SharedMemoryHandle::SharedMemoryHandle(mach_vm_size_t size,
+                                       const base::UnguessableToken& guid) {
+  type_ = MACH;
+  mach_port_t named_right;
+  kern_return_t kr = mach_make_memory_entry_64(
+      mach_task_self(),
+      &size,
+      0,  // Address.
+      MAP_MEM_NAMED_CREATE | VM_PROT_READ | VM_PROT_WRITE,
+      &named_right,
+      MACH_PORT_NULL);  // Parent handle.
+  if (kr != KERN_SUCCESS) {
+    memory_object_ = MACH_PORT_NULL;
+    return;
+  }
+
+  memory_object_ = named_right;
+  size_ = size;
+  ownership_passes_to_ipc_ = false;
+  guid_ = guid;
+}
+
+SharedMemoryHandle::SharedMemoryHandle(mach_port_t memory_object,
+                                       mach_vm_size_t size,
+                                       const base::UnguessableToken& guid)
+    : type_(MACH),
+      memory_object_(memory_object),
+      ownership_passes_to_ipc_(false),
+      guid_(guid),
+      size_(size) {}
+
+SharedMemoryHandle SharedMemoryHandle::Duplicate() const {
+  switch (type_) {
+    case POSIX: {
+      if (!IsValid())
+        return SharedMemoryHandle();
+      int duped_fd = HANDLE_EINTR(dup(file_descriptor_.fd));
+      if (duped_fd < 0)
+        return SharedMemoryHandle();
+      return SharedMemoryHandle(FileDescriptor(duped_fd, true), size_, guid_);
+    }
+    case MACH: {
+      if (!IsValid())
+        return SharedMemoryHandle();
+
+      // Increment the ref count.
+      kern_return_t kr = mach_port_mod_refs(mach_task_self(), memory_object_,
+                                            MACH_PORT_RIGHT_SEND, 1);
+      DCHECK_EQ(kr, KERN_SUCCESS);
+      SharedMemoryHandle handle(*this);
+      handle.SetOwnershipPassesToIPC(true);
+      return handle;
+    }
+  }
+}
+
+bool SharedMemoryHandle::IsValid() const {
+  switch (type_) {
+    case POSIX:
+      return file_descriptor_.fd >= 0;
+    case MACH:
+      return memory_object_ != MACH_PORT_NULL;
+  }
+}
+
+mach_port_t SharedMemoryHandle::GetMemoryObject() const {
+  DCHECK_EQ(type_, MACH);
+  return memory_object_;
+}
+
+bool SharedMemoryHandle::MapAt(off_t offset,
+                               size_t bytes,
+                               void** memory,
+                               bool read_only) {
+  DCHECK(IsValid());
+  switch (type_) {
+    case SharedMemoryHandle::POSIX:
+      *memory = mmap(nullptr, bytes, PROT_READ | (read_only ? 0 : PROT_WRITE),
+                     MAP_SHARED, file_descriptor_.fd, offset);
+      return *memory != MAP_FAILED;
+    case SharedMemoryHandle::MACH:
+      kern_return_t kr = mach_vm_map(
+          mach_task_self(),
+          reinterpret_cast<mach_vm_address_t*>(memory),    // Output parameter
+          bytes,
+          0,                                               // Alignment mask
+          VM_FLAGS_ANYWHERE,
+          memory_object_,
+          offset,
+          FALSE,                                           // Copy
+          VM_PROT_READ | (read_only ? 0 : VM_PROT_WRITE),  // Current protection
+          VM_PROT_WRITE | VM_PROT_READ | VM_PROT_IS_MASK,  // Maximum protection
+          VM_INHERIT_NONE);
+      return kr == KERN_SUCCESS;
+  }
+}
+
+void SharedMemoryHandle::Close() const {
+  if (!IsValid())
+    return;
+
+  switch (type_) {
+    case POSIX:
+      if (IGNORE_EINTR(close(file_descriptor_.fd)) < 0)
+        DPLOG(ERROR) << "Error closing fd";
+      break;
+    case MACH:
+      kern_return_t kr = mach_port_deallocate(mach_task_self(), memory_object_);
+      if (kr != KERN_SUCCESS)
+        MACH_DLOG(ERROR, kr) << "Error deallocating mach port";
+      break;
+  }
+}
+
+void SharedMemoryHandle::SetOwnershipPassesToIPC(bool ownership_passes) {
+  DCHECK_EQ(type_, MACH);
+  ownership_passes_to_ipc_ = ownership_passes;
+}
+
+bool SharedMemoryHandle::OwnershipPassesToIPC() const {
+  DCHECK_EQ(type_, MACH);
+  return ownership_passes_to_ipc_;
+}
+
+}  // namespace base
diff --git a/base/memory/shared_memory_handle_posix.cc b/base/memory/shared_memory_handle_posix.cc
new file mode 100644
index 0000000..09dfb9c
--- /dev/null
+++ b/base/memory/shared_memory_handle_posix.cc
@@ -0,0 +1,71 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/memory/shared_memory_handle.h"
+
+#include <unistd.h>
+
+#include "base/logging.h"
+#include "base/posix/eintr_wrapper.h"
+#include "base/unguessable_token.h"
+
+namespace base {
+
+SharedMemoryHandle::SharedMemoryHandle() = default;
+
+SharedMemoryHandle::SharedMemoryHandle(
+    const base::FileDescriptor& file_descriptor,
+    size_t size,
+    const base::UnguessableToken& guid)
+    : file_descriptor_(file_descriptor), guid_(guid), size_(size) {}
+
+// static
+SharedMemoryHandle SharedMemoryHandle::ImportHandle(int fd, size_t size) {
+  SharedMemoryHandle handle;
+  handle.file_descriptor_.fd = fd;
+  handle.file_descriptor_.auto_close = false;
+  handle.guid_ = UnguessableToken::Create();
+  handle.size_ = size;
+  return handle;
+}
+
+int SharedMemoryHandle::GetHandle() const {
+  return file_descriptor_.fd;
+}
+
+bool SharedMemoryHandle::IsValid() const {
+  return file_descriptor_.fd >= 0;
+}
+
+void SharedMemoryHandle::Close() const {
+  if (IGNORE_EINTR(close(file_descriptor_.fd)) < 0)
+    PLOG(ERROR) << "close";
+}
+
+int SharedMemoryHandle::Release() {
+  int old_fd = file_descriptor_.fd;
+  file_descriptor_.fd = -1;
+  return old_fd;
+}
+
+SharedMemoryHandle SharedMemoryHandle::Duplicate() const {
+  if (!IsValid())
+    return SharedMemoryHandle();
+
+  int duped_handle = HANDLE_EINTR(dup(file_descriptor_.fd));
+  if (duped_handle < 0)
+    return SharedMemoryHandle();
+  return SharedMemoryHandle(FileDescriptor(duped_handle, true), GetSize(),
+                            GetGUID());
+}
+
+void SharedMemoryHandle::SetOwnershipPassesToIPC(bool ownership_passes) {
+  file_descriptor_.auto_close = ownership_passes;
+}
+
+bool SharedMemoryHandle::OwnershipPassesToIPC() const {
+  return file_descriptor_.auto_close;
+}
+
+}  // namespace base
diff --git a/base/memory/shared_memory_handle_win.cc b/base/memory/shared_memory_handle_win.cc
new file mode 100644
index 0000000..8c11d39
--- /dev/null
+++ b/base/memory/shared_memory_handle_win.cc
@@ -0,0 +1,55 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/memory/shared_memory_handle.h"
+
+#include "base/logging.h"
+#include "base/unguessable_token.h"
+
+#include <windows.h>
+
+namespace base {
+
+SharedMemoryHandle::SharedMemoryHandle() {}
+
+SharedMemoryHandle::SharedMemoryHandle(HANDLE h,
+                                       size_t size,
+                                       const base::UnguessableToken& guid)
+    : handle_(h), guid_(guid), size_(size) {}
+
+void SharedMemoryHandle::Close() const {
+  DCHECK(handle_ != nullptr);
+  ::CloseHandle(handle_);
+}
+
+bool SharedMemoryHandle::IsValid() const {
+  return handle_ != nullptr;
+}
+
+SharedMemoryHandle SharedMemoryHandle::Duplicate() const {
+  HANDLE duped_handle;
+  ProcessHandle process = GetCurrentProcess();
+  BOOL success = ::DuplicateHandle(process, handle_, process, &duped_handle, 0,
+                                   FALSE, DUPLICATE_SAME_ACCESS);
+  if (!success)
+    return SharedMemoryHandle();
+
+  base::SharedMemoryHandle handle(duped_handle, GetSize(), GetGUID());
+  handle.SetOwnershipPassesToIPC(true);
+  return handle;
+}
+
+HANDLE SharedMemoryHandle::GetHandle() const {
+  return handle_;
+}
+
+void SharedMemoryHandle::SetOwnershipPassesToIPC(bool ownership_passes) {
+  ownership_passes_to_ipc_ = ownership_passes;
+}
+
+bool SharedMemoryHandle::OwnershipPassesToIPC() const {
+  return ownership_passes_to_ipc_;
+}
+
+}  // namespace base
diff --git a/base/memory/shared_memory_helper.cc b/base/memory/shared_memory_helper.cc
new file mode 100644
index 0000000..f98b734
--- /dev/null
+++ b/base/memory/shared_memory_helper.cc
@@ -0,0 +1,157 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/memory/shared_memory_helper.h"
+
+#if defined(OS_CHROMEOS)
+#include <sys/resource.h>
+#include <sys/time.h>
+
+#include "base/debug/alias.h"
+#endif  // defined(OS_CHROMEOS)
+
+#include "base/threading/thread_restrictions.h"
+
+namespace base {
+
+struct ScopedPathUnlinkerTraits {
+  static const FilePath* InvalidValue() { return nullptr; }
+
+  static void Free(const FilePath* path) {
+    if (unlink(path->value().c_str()))
+      PLOG(WARNING) << "unlink";
+  }
+};
+
+// Unlinks the FilePath when the object is destroyed.
+using ScopedPathUnlinker =
+    ScopedGeneric<const FilePath*, ScopedPathUnlinkerTraits>;
+
+#if !defined(OS_ANDROID)
+bool CreateAnonymousSharedMemory(const SharedMemoryCreateOptions& options,
+                                 ScopedFD* fd,
+                                 ScopedFD* readonly_fd,
+                                 FilePath* path) {
+#if defined(OS_LINUX)
+  // It doesn't make sense to have a open-existing private piece of shmem
+  DCHECK(!options.open_existing_deprecated);
+#endif  // defined(OS_LINUX)
+  // Q: Why not use the shm_open() etc. APIs?
+  // A: Because they're limited to 4mb on OS X.  FFFFFFFUUUUUUUUUUU
+  FilePath directory;
+  ScopedPathUnlinker path_unlinker;
+  if (!GetShmemTempDir(options.executable, &directory))
+    return false;
+
+  fd->reset(base::CreateAndOpenFdForTemporaryFileInDir(directory, path));
+
+  if (!fd->is_valid())
+    return false;
+
+  // Deleting the file prevents anyone else from mapping it in (making it
+  // private), and prevents the need for cleanup (once the last fd is
+  // closed, it is truly freed).
+  path_unlinker.reset(path);
+
+  if (options.share_read_only) {
+    // Also open as readonly so that we can GetReadOnlyHandle.
+    readonly_fd->reset(HANDLE_EINTR(open(path->value().c_str(), O_RDONLY)));
+    if (!readonly_fd->is_valid()) {
+      DPLOG(ERROR) << "open(\"" << path->value() << "\", O_RDONLY) failed";
+      fd->reset();
+      return false;
+    }
+  }
+  return true;
+}
+
+bool PrepareMapFile(ScopedFD fd,
+                    ScopedFD readonly_fd,
+                    int* mapped_file,
+                    int* readonly_mapped_file) {
+  DCHECK_EQ(-1, *mapped_file);
+  DCHECK_EQ(-1, *readonly_mapped_file);
+  if (!fd.is_valid())
+    return false;
+
+  // This function theoretically can block on the disk, but realistically
+  // the temporary files we create will just go into the buffer cache
+  // and be deleted before they ever make it out to disk.
+  base::ThreadRestrictions::ScopedAllowIO allow_io;
+
+  if (readonly_fd.is_valid()) {
+    struct stat st = {};
+    if (fstat(fd.get(), &st))
+      NOTREACHED();
+
+    struct stat readonly_st = {};
+    if (fstat(readonly_fd.get(), &readonly_st))
+      NOTREACHED();
+    if (st.st_dev != readonly_st.st_dev || st.st_ino != readonly_st.st_ino) {
+      LOG(ERROR) << "writable and read-only inodes don't match; bailing";
+      return false;
+    }
+  }
+
+  *mapped_file = HANDLE_EINTR(dup(fd.get()));
+  if (*mapped_file == -1) {
+    NOTREACHED() << "Call to dup failed, errno=" << errno;
+
+#if defined(OS_CHROMEOS)
+    if (errno == EMFILE) {
+      // We're out of file descriptors and are probably about to crash somewhere
+      // else in Chrome anyway. Let's collect what FD information we can and
+      // crash.
+      // Added for debugging crbug.com/733718
+      int original_fd_limit = 16384;
+      struct rlimit rlim;
+      if (getrlimit(RLIMIT_NOFILE, &rlim) == 0) {
+        original_fd_limit = rlim.rlim_cur;
+        if (rlim.rlim_max > rlim.rlim_cur) {
+          // Increase fd limit so breakpad has a chance to write a minidump.
+          rlim.rlim_cur = rlim.rlim_max;
+          if (setrlimit(RLIMIT_NOFILE, &rlim) != 0) {
+            PLOG(ERROR) << "setrlimit() failed";
+          }
+        }
+      } else {
+        PLOG(ERROR) << "getrlimit() failed";
+      }
+
+      const char kFileDataMarker[] = "FDATA";
+      char buf[PATH_MAX];
+      char fd_path[PATH_MAX];
+      char crash_buffer[32 * 1024] = {0};
+      char* crash_ptr = crash_buffer;
+      base::debug::Alias(crash_buffer);
+
+      // Put a marker at the start of our data so we can confirm where it
+      // begins.
+      crash_ptr = strncpy(crash_ptr, kFileDataMarker, strlen(kFileDataMarker));
+      for (int i = original_fd_limit; i >= 0; --i) {
+        memset(buf, 0, arraysize(buf));
+        memset(fd_path, 0, arraysize(fd_path));
+        snprintf(fd_path, arraysize(fd_path) - 1, "/proc/self/fd/%d", i);
+        ssize_t count = readlink(fd_path, buf, arraysize(buf) - 1);
+        if (count < 0) {
+          PLOG(ERROR) << "readlink failed for: " << fd_path;
+          continue;
+        }
+
+        if (crash_ptr + count + 1 < crash_buffer + arraysize(crash_buffer)) {
+          crash_ptr = strncpy(crash_ptr, buf, count + 1);
+        }
+        LOG(ERROR) << i << ": " << buf;
+      }
+      LOG(FATAL) << "Logged for file descriptor exhaustion, crashing now";
+    }
+#endif  // defined(OS_CHROMEOS)
+  }
+  *readonly_mapped_file = readonly_fd.release();
+
+  return true;
+}
+#endif  // !defined(OS_ANDROID)
+
+}  // namespace base
diff --git a/base/memory/shared_memory_helper.h b/base/memory/shared_memory_helper.h
new file mode 100644
index 0000000..2c24f86
--- /dev/null
+++ b/base/memory/shared_memory_helper.h
@@ -0,0 +1,36 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_MEMORY_SHARED_MEMORY_HELPER_H_
+#define BASE_MEMORY_SHARED_MEMORY_HELPER_H_
+
+#include "base/memory/shared_memory.h"
+#include "build/build_config.h"
+
+#include <fcntl.h>
+
+namespace base {
+
+#if !defined(OS_ANDROID)
+// Makes a temporary file, fdopens it, and then unlinks it. |fd| is populated
+// with the opened fd. |readonly_fd| is populated with the opened fd if
+// options.share_read_only is true. |path| is populated with the location of
+// the file before it was unlinked.
+// Returns false if there's an unhandled failure.
+bool CreateAnonymousSharedMemory(const SharedMemoryCreateOptions& options,
+                                 ScopedFD* fd,
+                                 ScopedFD* readonly_fd,
+                                 FilePath* path);
+
+// Takes the outputs of CreateAnonymousSharedMemory and maps them properly to
+// |mapped_file| or |readonly_mapped_file|, depending on which one is populated.
+bool PrepareMapFile(ScopedFD fd,
+                    ScopedFD readonly_fd,
+                    int* mapped_file,
+                    int* readonly_mapped_file);
+#endif  // !defined(OS_ANDROID)
+
+}  // namespace base
+
+#endif  // BASE_MEMORY_SHARED_MEMORY_HELPER_H_
diff --git a/base/memory/shared_memory_mac.cc b/base/memory/shared_memory_mac.cc
new file mode 100644
index 0000000..0a233e5
--- /dev/null
+++ b/base/memory/shared_memory_mac.cc
@@ -0,0 +1,264 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/memory/shared_memory.h"
+
+#include <errno.h>
+#include <mach/mach_vm.h>
+#include <stddef.h>
+#include <sys/mman.h>
+#include <sys/stat.h>
+#include <unistd.h>
+
+#include "base/files/file_util.h"
+#include "base/files/scoped_file.h"
+#include "base/logging.h"
+#include "base/mac/foundation_util.h"
+#include "base/mac/mac_util.h"
+#include "base/mac/scoped_mach_vm.h"
+#include "base/memory/shared_memory_helper.h"
+#include "base/memory/shared_memory_tracker.h"
+#include "base/metrics/field_trial.h"
+#include "base/metrics/histogram_macros.h"
+#include "base/posix/eintr_wrapper.h"
+#include "base/posix/safe_strerror.h"
+#include "base/process/process_metrics.h"
+#include "base/scoped_generic.h"
+#include "base/strings/utf_string_conversions.h"
+#include "base/threading/thread_restrictions.h"
+#include "base/unguessable_token.h"
+#include "build/build_config.h"
+
+#if defined(OS_IOS)
+#error "MacOS only - iOS uses shared_memory_posix.cc"
+#endif
+
+namespace base {
+
+namespace {
+
+// Returns whether the operation succeeded.
+// |new_handle| is an output variable, populated on success. The caller takes
+// ownership of the underlying memory object.
+// |handle| is the handle to copy.
+// If |handle| is already mapped, |mapped_addr| is its mapped location.
+// Otherwise, |mapped_addr| should be |nullptr|.
+bool MakeMachSharedMemoryHandleReadOnly(SharedMemoryHandle* new_handle,
+                                        SharedMemoryHandle handle,
+                                        void* mapped_addr) {
+  if (!handle.IsValid())
+    return false;
+
+  size_t size = handle.GetSize();
+
+  // Map if necessary.
+  void* temp_addr = mapped_addr;
+  base::mac::ScopedMachVM scoper;
+  if (!temp_addr) {
+    // Intentionally lower current prot and max prot to |VM_PROT_READ|.
+    kern_return_t kr = mach_vm_map(
+        mach_task_self(), reinterpret_cast<mach_vm_address_t*>(&temp_addr),
+        size, 0, VM_FLAGS_ANYWHERE, handle.GetMemoryObject(), 0, FALSE,
+        VM_PROT_READ, VM_PROT_READ, VM_INHERIT_NONE);
+    if (kr != KERN_SUCCESS)
+      return false;
+    scoper.reset(reinterpret_cast<vm_address_t>(temp_addr),
+                 mach_vm_round_page(size));
+  }
+
+  // Make new memory object.
+  mach_port_t named_right;
+  kern_return_t kr = mach_make_memory_entry_64(
+      mach_task_self(), reinterpret_cast<memory_object_size_t*>(&size),
+      reinterpret_cast<memory_object_offset_t>(temp_addr), VM_PROT_READ,
+      &named_right, MACH_PORT_NULL);
+  if (kr != KERN_SUCCESS)
+    return false;
+
+  *new_handle = SharedMemoryHandle(named_right, size, handle.GetGUID());
+  return true;
+}
+
+}  // namespace
+
+SharedMemory::SharedMemory() {}
+
+SharedMemory::SharedMemory(const SharedMemoryHandle& handle, bool read_only)
+    : mapped_memory_mechanism_(SharedMemoryHandle::POSIX),
+      shm_(handle),
+      read_only_(read_only) {}
+
+SharedMemory::~SharedMemory() {
+  Unmap();
+  Close();
+}
+
+// static
+bool SharedMemory::IsHandleValid(const SharedMemoryHandle& handle) {
+  return handle.IsValid();
+}
+
+// static
+void SharedMemory::CloseHandle(const SharedMemoryHandle& handle) {
+  handle.Close();
+}
+
+// static
+size_t SharedMemory::GetHandleLimit() {
+  return GetMaxFds();
+}
+
+// static
+SharedMemoryHandle SharedMemory::DuplicateHandle(
+    const SharedMemoryHandle& handle) {
+  return handle.Duplicate();
+}
+
+// static
+int SharedMemory::GetFdFromSharedMemoryHandle(
+    const SharedMemoryHandle& handle) {
+  return handle.file_descriptor_.fd;
+}
+
+bool SharedMemory::CreateAndMapAnonymous(size_t size) {
+  return CreateAnonymous(size) && Map(size);
+}
+
+// Chromium mostly only uses the unique/private shmem as specified by
+// "name == L"". The exception is in the StatsTable.
+bool SharedMemory::Create(const SharedMemoryCreateOptions& options) {
+  DCHECK(!shm_.IsValid());
+  if (options.size == 0)
+    return false;
+
+  if (options.size > static_cast<size_t>(std::numeric_limits<int>::max()))
+    return false;
+
+  if (options.type == SharedMemoryHandle::MACH) {
+    shm_ = SharedMemoryHandle(options.size, UnguessableToken::Create());
+    requested_size_ = options.size;
+    return shm_.IsValid();
+  }
+
+  // This function theoretically can block on the disk. Both profiling of real
+  // users and local instrumentation shows that this is a real problem.
+  // https://code.google.com/p/chromium/issues/detail?id=466437
+  ThreadRestrictions::ScopedAllowIO allow_io;
+
+  ScopedFD fd;
+  ScopedFD readonly_fd;
+
+  FilePath path;
+  bool result = CreateAnonymousSharedMemory(options, &fd, &readonly_fd, &path);
+  if (!result)
+    return false;
+  // Should be guaranteed by CreateAnonymousSharedMemory().
+  DCHECK(fd.is_valid());
+
+  // Get current size.
+  struct stat stat;
+  if (fstat(fd.get(), &stat) != 0)
+    return false;
+  const size_t current_size = stat.st_size;
+  if (current_size != options.size) {
+    if (HANDLE_EINTR(ftruncate(fd.get(), options.size)) != 0)
+      return false;
+  }
+  requested_size_ = options.size;
+
+  int mapped_file = -1;
+  int readonly_mapped_file = -1;
+  result = PrepareMapFile(std::move(fd), std::move(readonly_fd), &mapped_file,
+                          &readonly_mapped_file);
+  shm_ = SharedMemoryHandle(FileDescriptor(mapped_file, false), options.size,
+                            UnguessableToken::Create());
+  readonly_shm_ =
+      SharedMemoryHandle(FileDescriptor(readonly_mapped_file, false),
+                         options.size, shm_.GetGUID());
+  return result;
+}
+
+bool SharedMemory::MapAt(off_t offset, size_t bytes) {
+  if (!shm_.IsValid())
+    return false;
+  if (bytes > static_cast<size_t>(std::numeric_limits<int>::max()))
+    return false;
+  if (memory_)
+    return false;
+
+  bool success = shm_.MapAt(offset, bytes, &memory_, read_only_);
+  if (success) {
+    mapped_size_ = bytes;
+    DCHECK_EQ(0U, reinterpret_cast<uintptr_t>(memory_) &
+                      (SharedMemory::MAP_MINIMUM_ALIGNMENT - 1));
+    mapped_memory_mechanism_ = shm_.type_;
+    mapped_id_ = shm_.GetGUID();
+    SharedMemoryTracker::GetInstance()->IncrementMemoryUsage(*this);
+  } else {
+    memory_ = nullptr;
+  }
+
+  return success;
+}
+
+bool SharedMemory::Unmap() {
+  if (!memory_)
+    return false;
+
+  SharedMemoryTracker::GetInstance()->DecrementMemoryUsage(*this);
+  switch (mapped_memory_mechanism_) {
+    case SharedMemoryHandle::POSIX:
+      munmap(memory_, mapped_size_);
+      break;
+    case SharedMemoryHandle::MACH:
+      mach_vm_deallocate(mach_task_self(),
+                         reinterpret_cast<mach_vm_address_t>(memory_),
+                         mapped_size_);
+      break;
+  }
+  memory_ = nullptr;
+  mapped_size_ = 0;
+  mapped_id_ = UnguessableToken();
+  return true;
+}
+
+SharedMemoryHandle SharedMemory::handle() const {
+  return shm_;
+}
+
+SharedMemoryHandle SharedMemory::TakeHandle() {
+  SharedMemoryHandle dup = DuplicateHandle(handle());
+  Unmap();
+  Close();
+  return dup;
+}
+
+void SharedMemory::Close() {
+  shm_.Close();
+  shm_ = SharedMemoryHandle();
+  if (shm_.type_ == SharedMemoryHandle::POSIX) {
+    if (readonly_shm_.IsValid()) {
+      readonly_shm_.Close();
+      readonly_shm_ = SharedMemoryHandle();
+    }
+  }
+}
+
+SharedMemoryHandle SharedMemory::GetReadOnlyHandle() const {
+  if (shm_.type_ == SharedMemoryHandle::POSIX) {
+    // We could imagine re-opening the file from /dev/fd, but that can't make it
+    // readonly on Mac: https://codereview.chromium.org/27265002/#msg10.
+    CHECK(readonly_shm_.IsValid());
+    return readonly_shm_.Duplicate();
+  }
+
+  DCHECK(shm_.IsValid());
+  SharedMemoryHandle new_handle;
+  bool success = MakeMachSharedMemoryHandleReadOnly(&new_handle, shm_, memory_);
+  if (success)
+    new_handle.SetOwnershipPassesToIPC(true);
+  return new_handle;
+}
+
+}  // namespace base
diff --git a/base/memory/shared_memory_mac_unittest.cc b/base/memory/shared_memory_mac_unittest.cc
new file mode 100644
index 0000000..b17dab7
--- /dev/null
+++ b/base/memory/shared_memory_mac_unittest.cc
@@ -0,0 +1,457 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <mach/mach.h>
+#include <mach/mach_vm.h>
+#include <servers/bootstrap.h>
+#include <stddef.h>
+#include <stdint.h>
+
+#include "base/command_line.h"
+#include "base/mac/mac_util.h"
+#include "base/mac/mach_logging.h"
+#include "base/mac/scoped_mach_port.h"
+#include "base/macros.h"
+#include "base/memory/shared_memory.h"
+#include "base/process/process_handle.h"
+#include "base/rand_util.h"
+#include "base/strings/stringprintf.h"
+#include "base/sys_info.h"
+#include "base/test/multiprocess_test.h"
+#include "base/test/test_timeouts.h"
+#include "base/unguessable_token.h"
+#include "testing/multiprocess_func_list.h"
+
+namespace base {
+
+namespace {
+
+// Gets the current and maximum protection levels of the memory region.
+// Returns whether the operation was successful.
+// |current| and |max| are output variables only populated on success.
+bool GetProtections(void* address, size_t size, int* current, int* max) {
+  vm_region_info_t region_info;
+  mach_vm_address_t mem_address = reinterpret_cast<mach_vm_address_t>(address);
+  mach_vm_size_t mem_size = size;
+  vm_region_basic_info_64 basic_info;
+
+  region_info = reinterpret_cast<vm_region_recurse_info_t>(&basic_info);
+  vm_region_flavor_t flavor = VM_REGION_BASIC_INFO_64;
+  memory_object_name_t memory_object;
+  mach_msg_type_number_t count = VM_REGION_BASIC_INFO_COUNT_64;
+
+  kern_return_t kr =
+      mach_vm_region(mach_task_self(), &mem_address, &mem_size, flavor,
+                     region_info, &count, &memory_object);
+  if (kr != KERN_SUCCESS) {
+    MACH_LOG(ERROR, kr) << "Failed to get region info.";
+    return false;
+  }
+
+  *current = basic_info.protection;
+  *max = basic_info.max_protection;
+  return true;
+}
+
+// Creates a new SharedMemory with the given |size|, filled with 'a'.
+std::unique_ptr<SharedMemory> CreateSharedMemory(int size) {
+  SharedMemoryHandle shm(size, UnguessableToken::Create());
+  if (!shm.IsValid()) {
+    LOG(ERROR) << "Failed to make SharedMemoryHandle";
+    return nullptr;
+  }
+  std::unique_ptr<SharedMemory> shared_memory(new SharedMemory(shm, false));
+  shared_memory->Map(size);
+  memset(shared_memory->memory(), 'a', size);
+  return shared_memory;
+}
+
+static const std::string g_service_switch_name = "service_name";
+
+// Structs used to pass a mach port from client to server.
+struct MachSendPortMessage {
+  mach_msg_header_t header;
+  mach_msg_body_t body;
+  mach_msg_port_descriptor_t data;
+};
+struct MachReceivePortMessage {
+  mach_msg_header_t header;
+  mach_msg_body_t body;
+  mach_msg_port_descriptor_t data;
+  mach_msg_trailer_t trailer;
+};
+
+// Makes the current process into a Mach Server with the given |service_name|.
+mach_port_t BecomeMachServer(const char* service_name) {
+  mach_port_t port;
+  kern_return_t kr = bootstrap_check_in(bootstrap_port, service_name, &port);
+  MACH_CHECK(kr == KERN_SUCCESS, kr) << "BecomeMachServer";
+  return port;
+}
+
+// Returns the mach port for the Mach Server with the given |service_name|.
+mach_port_t LookupServer(const char* service_name) {
+  mach_port_t server_port;
+  kern_return_t kr =
+      bootstrap_look_up(bootstrap_port, service_name, &server_port);
+  MACH_CHECK(kr == KERN_SUCCESS, kr) << "LookupServer";
+  return server_port;
+}
+
+mach_port_t MakeReceivingPort() {
+  mach_port_t client_port;
+  kern_return_t kr =
+      mach_port_allocate(mach_task_self(),         // our task is acquiring
+                         MACH_PORT_RIGHT_RECEIVE,  // a new receive right
+                         &client_port);            // with this name
+  MACH_CHECK(kr == KERN_SUCCESS, kr) << "MakeReceivingPort";
+  return client_port;
+}
+
+// Blocks until a mach message is sent to |server_port|. This mach message
+// must contain a mach port. Returns that mach port.
+mach_port_t ReceiveMachPort(mach_port_t port_to_listen_on) {
+  MachReceivePortMessage recv_msg;
+  mach_msg_header_t* recv_hdr = &(recv_msg.header);
+  recv_hdr->msgh_local_port = port_to_listen_on;
+  recv_hdr->msgh_size = sizeof(recv_msg);
+  kern_return_t kr =
+      mach_msg(recv_hdr,               // message buffer
+               MACH_RCV_MSG,           // option indicating service
+               0,                      // send size
+               recv_hdr->msgh_size,    // size of header + body
+               port_to_listen_on,      // receive name
+               MACH_MSG_TIMEOUT_NONE,  // no timeout, wait forever
+               MACH_PORT_NULL);        // no notification port
+  MACH_CHECK(kr == KERN_SUCCESS, kr) << "ReceiveMachPort";
+  mach_port_t other_task_port = recv_msg.data.name;
+  return other_task_port;
+}
+
+// Passes a copy of the send right of |port_to_send| to |receiving_port|.
+void SendMachPort(mach_port_t receiving_port,
+                  mach_port_t port_to_send,
+                  int disposition) {
+  MachSendPortMessage send_msg;
+  mach_msg_header_t* send_hdr;
+  send_hdr = &(send_msg.header);
+  send_hdr->msgh_bits =
+      MACH_MSGH_BITS(MACH_MSG_TYPE_COPY_SEND, 0) | MACH_MSGH_BITS_COMPLEX;
+  send_hdr->msgh_size = sizeof(send_msg);
+  send_hdr->msgh_remote_port = receiving_port;
+  send_hdr->msgh_local_port = MACH_PORT_NULL;
+  send_hdr->msgh_reserved = 0;
+  send_hdr->msgh_id = 0;
+  send_msg.body.msgh_descriptor_count = 1;
+  send_msg.data.name = port_to_send;
+  send_msg.data.disposition = disposition;
+  send_msg.data.type = MACH_MSG_PORT_DESCRIPTOR;
+  int kr = mach_msg(send_hdr,               // message buffer
+                    MACH_SEND_MSG,          // option indicating send
+                    send_hdr->msgh_size,    // size of header + body
+                    0,                      // receive limit
+                    MACH_PORT_NULL,         // receive name
+                    MACH_MSG_TIMEOUT_NONE,  // no timeout, wait forever
+                    MACH_PORT_NULL);        // no notification port
+  MACH_CHECK(kr == KERN_SUCCESS, kr) << "SendMachPort";
+}
+
+std::string CreateRandomServiceName() {
+  return StringPrintf("SharedMemoryMacMultiProcessTest.%llu", RandUint64());
+}
+
+// Sets up the mach communication ports with the server. Returns a port to which
+// the server will send mach objects.
+mach_port_t CommonChildProcessSetUp() {
+  CommandLine cmd_line = *CommandLine::ForCurrentProcess();
+  std::string service_name =
+      cmd_line.GetSwitchValueASCII(g_service_switch_name);
+  mac::ScopedMachSendRight server_port(LookupServer(service_name.c_str()));
+  mach_port_t client_port = MakeReceivingPort();
+
+  // Send the port that this process is listening on to the server.
+  SendMachPort(server_port.get(), client_port, MACH_MSG_TYPE_MAKE_SEND);
+  return client_port;
+}
+
+// The number of active names in the current task's port name space.
+mach_msg_type_number_t GetActiveNameCount() {
+  mach_port_name_array_t name_array;
+  mach_msg_type_number_t names_count;
+  mach_port_type_array_t type_array;
+  mach_msg_type_number_t types_count;
+  kern_return_t kr = mach_port_names(mach_task_self(), &name_array,
+                                     &names_count, &type_array, &types_count);
+  MACH_CHECK(kr == KERN_SUCCESS, kr) << "GetActiveNameCount";
+  return names_count;
+}
+
+}  // namespace
+
+class SharedMemoryMacMultiProcessTest : public MultiProcessTest {
+ public:
+  SharedMemoryMacMultiProcessTest() {}
+
+  CommandLine MakeCmdLine(const std::string& procname) override {
+    CommandLine command_line = MultiProcessTest::MakeCmdLine(procname);
+    // Pass the service name to the child process.
+    command_line.AppendSwitchASCII(g_service_switch_name, service_name_);
+    return command_line;
+  }
+
+  void SetUpChild(const std::string& name) {
+    // Make a random service name so that this test doesn't conflict with other
+    // similar tests.
+    service_name_ = CreateRandomServiceName();
+    server_port_.reset(BecomeMachServer(service_name_.c_str()));
+    child_process_ = SpawnChild(name);
+    client_port_.reset(ReceiveMachPort(server_port_.get()));
+  }
+
+  static const int s_memory_size = 99999;
+
+ protected:
+  std::string service_name_;
+
+  // A port on which the main process listens for mach messages from the child
+  // process.
+  mac::ScopedMachReceiveRight server_port_;
+
+  // A port on which the child process listens for mach messages from the main
+  // process.
+  mac::ScopedMachSendRight client_port_;
+
+  base::Process child_process_;
+  DISALLOW_COPY_AND_ASSIGN(SharedMemoryMacMultiProcessTest);
+};
+
+// Tests that content written to shared memory in the server process can be read
+// by the child process.
+TEST_F(SharedMemoryMacMultiProcessTest, MachBasedSharedMemory) {
+  SetUpChild("MachBasedSharedMemoryClient");
+
+  std::unique_ptr<SharedMemory> shared_memory(
+      CreateSharedMemory(s_memory_size));
+
+  // Send the underlying memory object to the client process.
+  SendMachPort(client_port_.get(), shared_memory->handle().GetMemoryObject(),
+               MACH_MSG_TYPE_COPY_SEND);
+  int rv = -1;
+  ASSERT_TRUE(child_process_.WaitForExitWithTimeout(
+      TestTimeouts::action_timeout(), &rv));
+  EXPECT_EQ(0, rv);
+}
+
+MULTIPROCESS_TEST_MAIN(MachBasedSharedMemoryClient) {
+  mac::ScopedMachReceiveRight client_port(CommonChildProcessSetUp());
+  // The next mach port should be for a memory object.
+  mach_port_t memory_object = ReceiveMachPort(client_port.get());
+  SharedMemoryHandle shm(memory_object,
+                         SharedMemoryMacMultiProcessTest::s_memory_size,
+                         UnguessableToken::Create());
+  SharedMemory shared_memory(shm, false);
+  shared_memory.Map(SharedMemoryMacMultiProcessTest::s_memory_size);
+  const char* start = static_cast<const char*>(shared_memory.memory());
+  for (int i = 0; i < SharedMemoryMacMultiProcessTest::s_memory_size; ++i) {
+    DCHECK_EQ(start[i], 'a');
+  }
+  return 0;
+}
+
+// Tests that mapping shared memory with an offset works correctly.
+TEST_F(SharedMemoryMacMultiProcessTest, MachBasedSharedMemoryWithOffset) {
+  SetUpChild("MachBasedSharedMemoryWithOffsetClient");
+
+  SharedMemoryHandle shm(s_memory_size, UnguessableToken::Create());
+  ASSERT_TRUE(shm.IsValid());
+  SharedMemory shared_memory(shm, false);
+  shared_memory.Map(s_memory_size);
+
+  size_t page_size = SysInfo::VMAllocationGranularity();
+  char* start = static_cast<char*>(shared_memory.memory());
+  memset(start, 'a', page_size);
+  memset(start + page_size, 'b', page_size);
+  memset(start + 2 * page_size, 'c', page_size);
+
+  // Send the underlying memory object to the client process.
+  SendMachPort(
+      client_port_.get(), shm.GetMemoryObject(), MACH_MSG_TYPE_COPY_SEND);
+  int rv = -1;
+  ASSERT_TRUE(child_process_.WaitForExitWithTimeout(
+      TestTimeouts::action_timeout(), &rv));
+  EXPECT_EQ(0, rv);
+}
+
+MULTIPROCESS_TEST_MAIN(MachBasedSharedMemoryWithOffsetClient) {
+  mac::ScopedMachReceiveRight client_port(CommonChildProcessSetUp());
+  // The next mach port should be for a memory object.
+  mach_port_t memory_object = ReceiveMachPort(client_port.get());
+  SharedMemoryHandle shm(memory_object,
+                         SharedMemoryMacMultiProcessTest::s_memory_size,
+                         UnguessableToken::Create());
+  SharedMemory shared_memory(shm, false);
+  size_t page_size = SysInfo::VMAllocationGranularity();
+  shared_memory.MapAt(page_size, 2 * page_size);
+  const char* start = static_cast<const char*>(shared_memory.memory());
+  for (size_t i = 0; i < page_size; ++i) {
+    DCHECK_EQ(start[i], 'b');
+  }
+  for (size_t i = page_size; i < 2 * page_size; ++i) {
+    DCHECK_EQ(start[i], 'c');
+  }
+  return 0;
+}
+
+// Tests that duplication and closing has the right effect on Mach reference
+// counts.
+TEST_F(SharedMemoryMacMultiProcessTest, MachDuplicateAndClose) {
+  mach_msg_type_number_t active_name_count = GetActiveNameCount();
+
+  // Making a new SharedMemoryHandle increments the name count.
+  SharedMemoryHandle shm(s_memory_size, UnguessableToken::Create());
+  ASSERT_TRUE(shm.IsValid());
+  EXPECT_EQ(active_name_count + 1, GetActiveNameCount());
+
+  // Duplicating the SharedMemoryHandle increments the ref count, but doesn't
+  // make a new name.
+  shm.Duplicate();
+  EXPECT_EQ(active_name_count + 1, GetActiveNameCount());
+
+  // Closing the SharedMemoryHandle decrements the ref count. The first time has
+  // no effect.
+  shm.Close();
+  EXPECT_EQ(active_name_count + 1, GetActiveNameCount());
+
+  // Closing the SharedMemoryHandle decrements the ref count. The second time
+  // destroys the port.
+  shm.Close();
+  EXPECT_EQ(active_name_count, GetActiveNameCount());
+}
+
+// Tests that Mach shared memory can be mapped and unmapped.
+TEST_F(SharedMemoryMacMultiProcessTest, MachUnmapMap) {
+  mach_msg_type_number_t active_name_count = GetActiveNameCount();
+
+  std::unique_ptr<SharedMemory> shared_memory =
+      CreateSharedMemory(s_memory_size);
+  ASSERT_TRUE(shared_memory->Unmap());
+  ASSERT_TRUE(shared_memory->Map(s_memory_size));
+  shared_memory.reset();
+  EXPECT_EQ(active_name_count, GetActiveNameCount());
+}
+
+// Tests that passing a SharedMemoryHandle to a SharedMemory object also passes
+// ownership, and that destroying the SharedMemory closes the SharedMemoryHandle
+// as well.
+TEST_F(SharedMemoryMacMultiProcessTest, MachSharedMemoryTakesOwnership) {
+  mach_msg_type_number_t active_name_count = GetActiveNameCount();
+
+  // Making a new SharedMemoryHandle increments the name count.
+  SharedMemoryHandle shm(s_memory_size, UnguessableToken::Create());
+  ASSERT_TRUE(shm.IsValid());
+  EXPECT_EQ(active_name_count + 1, GetActiveNameCount());
+
+  // Name count doesn't change when mapping the memory.
+  std::unique_ptr<SharedMemory> shared_memory(new SharedMemory(shm, false));
+  shared_memory->Map(s_memory_size);
+  EXPECT_EQ(active_name_count + 1, GetActiveNameCount());
+
+  // Destroying the SharedMemory object frees the resource.
+  shared_memory.reset();
+  EXPECT_EQ(active_name_count, GetActiveNameCount());
+}
+
+// Tests that the read-only flag works.
+TEST_F(SharedMemoryMacMultiProcessTest, MachReadOnly) {
+  std::unique_ptr<SharedMemory> shared_memory(
+      CreateSharedMemory(s_memory_size));
+
+  SharedMemoryHandle shm2 = shared_memory->handle().Duplicate();
+  ASSERT_TRUE(shm2.IsValid());
+  SharedMemory shared_memory2(shm2, true);
+  shared_memory2.Map(s_memory_size);
+  ASSERT_DEATH(memset(shared_memory2.memory(), 'b', s_memory_size), "");
+}
+
+// Tests that duplication of the underlying handle works.
+TEST_F(SharedMemoryMacMultiProcessTest, MachDuplicate) {
+  mach_msg_type_number_t active_name_count = GetActiveNameCount();
+
+  {
+    std::unique_ptr<SharedMemory> shared_memory(
+        CreateSharedMemory(s_memory_size));
+
+    SharedMemoryHandle shm2 = shared_memory->handle().Duplicate();
+    ASSERT_TRUE(shm2.IsValid());
+    SharedMemory shared_memory2(shm2, true);
+    shared_memory2.Map(s_memory_size);
+
+    ASSERT_EQ(0, memcmp(shared_memory->memory(), shared_memory2.memory(),
+                        s_memory_size));
+  }
+
+  EXPECT_EQ(active_name_count, GetActiveNameCount());
+}
+
+// Tests that the method GetReadOnlyHandle() creates a memory object that
+// is read only.
+TEST_F(SharedMemoryMacMultiProcessTest, MachReadonly) {
+  std::unique_ptr<SharedMemory> shared_memory(
+      CreateSharedMemory(s_memory_size));
+
+  // Check the protection levels.
+  int current_prot, max_prot;
+  ASSERT_TRUE(GetProtections(shared_memory->memory(),
+                             shared_memory->mapped_size(), &current_prot,
+                             &max_prot));
+  ASSERT_EQ(VM_PROT_READ | VM_PROT_WRITE, current_prot);
+  ASSERT_EQ(VM_PROT_READ | VM_PROT_WRITE, max_prot);
+
+  // Make a new memory object.
+  SharedMemoryHandle shm2 = shared_memory->GetReadOnlyHandle();
+  ASSERT_TRUE(shm2.IsValid());
+  EXPECT_EQ(shared_memory->handle().GetGUID(), shm2.GetGUID());
+
+  // Mapping with |readonly| set to |false| should fail.
+  SharedMemory shared_memory2(shm2, false);
+  shared_memory2.Map(s_memory_size);
+  ASSERT_EQ(nullptr, shared_memory2.memory());
+
+  // Now trying mapping with |readonly| set to |true|.
+  SharedMemory shared_memory3(shm2.Duplicate(), true);
+  shared_memory3.Map(s_memory_size);
+  ASSERT_NE(nullptr, shared_memory3.memory());
+
+  // Check the protection levels.
+  ASSERT_TRUE(GetProtections(shared_memory3.memory(),
+                             shared_memory3.mapped_size(), &current_prot,
+                             &max_prot));
+  ASSERT_EQ(VM_PROT_READ, current_prot);
+  ASSERT_EQ(VM_PROT_READ, max_prot);
+
+  // The memory should still be readonly, since the underlying memory object
+  // is readonly.
+  ASSERT_DEATH(memset(shared_memory2.memory(), 'b', s_memory_size), "");
+}
+
+// Tests that the method GetReadOnlyHandle() doesn't leak.
+TEST_F(SharedMemoryMacMultiProcessTest, MachReadonlyLeak) {
+  mach_msg_type_number_t active_name_count = GetActiveNameCount();
+
+  {
+    std::unique_ptr<SharedMemory> shared_memory(
+        CreateSharedMemory(s_memory_size));
+
+    SharedMemoryHandle shm2 = shared_memory->GetReadOnlyHandle();
+    ASSERT_TRUE(shm2.IsValid());
+
+    // Intentionally map with |readonly| set to |false|.
+    SharedMemory shared_memory2(shm2, false);
+    shared_memory2.Map(s_memory_size);
+  }
+
+  EXPECT_EQ(active_name_count, GetActiveNameCount());
+}
+
+}  //  namespace base
diff --git a/base/memory/shared_memory_mapping.cc b/base/memory/shared_memory_mapping.cc
new file mode 100644
index 0000000..005e3fc
--- /dev/null
+++ b/base/memory/shared_memory_mapping.cc
@@ -0,0 +1,115 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/memory/shared_memory_mapping.h"
+
+#include <utility>
+
+#include "base/logging.h"
+#include "base/memory/shared_memory_tracker.h"
+#include "base/unguessable_token.h"
+#include "build/build_config.h"
+
+#if defined(OS_POSIX)
+#include <sys/mman.h>
+#endif
+
+#if defined(OS_WIN)
+#include <aclapi.h>
+#endif
+
+#if defined(OS_MACOSX) && !defined(OS_IOS)
+#include <mach/mach_vm.h>
+#include "base/mac/mach_logging.h"
+#endif
+
+#if defined(OS_FUCHSIA)
+#include <zircon/process.h>
+#include <zircon/status.h>
+#include <zircon/syscalls.h>
+#endif
+
+namespace base {
+
+SharedMemoryMapping::SharedMemoryMapping() = default;
+
+SharedMemoryMapping::SharedMemoryMapping(SharedMemoryMapping&& mapping)
+    : memory_(mapping.memory_),
+      size_(mapping.size_),
+      mapped_size_(mapping.mapped_size_),
+      guid_(mapping.guid_) {
+  mapping.memory_ = nullptr;
+}
+
+SharedMemoryMapping& SharedMemoryMapping::operator=(
+    SharedMemoryMapping&& mapping) {
+  Unmap();
+  memory_ = mapping.memory_;
+  size_ = mapping.size_;
+  mapped_size_ = mapping.mapped_size_;
+  guid_ = mapping.guid_;
+  mapping.memory_ = nullptr;
+  return *this;
+}
+
+SharedMemoryMapping::~SharedMemoryMapping() {
+  Unmap();
+}
+
+SharedMemoryMapping::SharedMemoryMapping(void* memory,
+                                         size_t size,
+                                         size_t mapped_size,
+                                         const UnguessableToken& guid)
+    : memory_(memory), size_(size), mapped_size_(mapped_size), guid_(guid) {
+  SharedMemoryTracker::GetInstance()->IncrementMemoryUsage(*this);
+}
+
+void SharedMemoryMapping::Unmap() {
+  if (!IsValid())
+    return;
+
+  SharedMemoryTracker::GetInstance()->DecrementMemoryUsage(*this);
+#if defined(OS_WIN)
+  if (!UnmapViewOfFile(memory_))
+    DPLOG(ERROR) << "UnmapViewOfFile";
+#elif defined(OS_FUCHSIA)
+  uintptr_t addr = reinterpret_cast<uintptr_t>(memory_);
+  zx_status_t status = zx_vmar_unmap(zx_vmar_root_self(), addr, size_);
+  DLOG_IF(ERROR, status != ZX_OK)
+      << "zx_vmar_unmap failed: " << zx_status_get_string(status);
+#elif defined(OS_MACOSX) && !defined(OS_IOS)
+  kern_return_t kr = mach_vm_deallocate(
+      mach_task_self(), reinterpret_cast<mach_vm_address_t>(memory_), size_);
+  MACH_DLOG_IF(ERROR, kr != KERN_SUCCESS, kr) << "mach_vm_deallocate";
+#else
+  if (munmap(memory_, size_) < 0)
+    DPLOG(ERROR) << "munmap";
+#endif
+}
+
+ReadOnlySharedMemoryMapping::ReadOnlySharedMemoryMapping() = default;
+ReadOnlySharedMemoryMapping::ReadOnlySharedMemoryMapping(
+    ReadOnlySharedMemoryMapping&&) = default;
+ReadOnlySharedMemoryMapping& ReadOnlySharedMemoryMapping::operator=(
+    ReadOnlySharedMemoryMapping&&) = default;
+ReadOnlySharedMemoryMapping::ReadOnlySharedMemoryMapping(
+    void* address,
+    size_t size,
+    size_t mapped_size,
+    const UnguessableToken& guid)
+    : SharedMemoryMapping(address, size, mapped_size, guid) {}
+
+WritableSharedMemoryMapping::WritableSharedMemoryMapping() = default;
+WritableSharedMemoryMapping::WritableSharedMemoryMapping(
+    WritableSharedMemoryMapping&&) = default;
+WritableSharedMemoryMapping& WritableSharedMemoryMapping::operator=(
+    WritableSharedMemoryMapping&&) = default;
+WritableSharedMemoryMapping::WritableSharedMemoryMapping(
+    void* address,
+    size_t size,
+    size_t mapped_size,
+    const UnguessableToken& guid)
+    : SharedMemoryMapping(address, size, mapped_size, guid) {}
+
+}  // namespace base
diff --git a/base/memory/shared_memory_mapping.h b/base/memory/shared_memory_mapping.h
new file mode 100644
index 0000000..ace4c15
--- /dev/null
+++ b/base/memory/shared_memory_mapping.h
@@ -0,0 +1,144 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_MEMORY_SHARED_MEMORY_MAPPING_H_
+#define BASE_MEMORY_SHARED_MEMORY_MAPPING_H_
+
+#include <cstddef>
+
+#include "base/macros.h"
+#include "base/unguessable_token.h"
+
+namespace base {
+
+namespace subtle {
+class PlatformSharedMemoryRegion;
+}  // namespace subtle
+
+// Base class for scoped handles to a shared memory mapping created from a
+// shared memory region. Created shared memory mappings remain valid even if the
+// creator region is transferred or destroyed.
+//
+// Each mapping has an UnguessableToken that identifies the shared memory region
+// it was created from. This is used for memory metrics, to avoid overcounting
+// shared memory.
+class BASE_EXPORT SharedMemoryMapping {
+ public:
+  // Default constructor initializes an invalid instance.
+  SharedMemoryMapping();
+
+  // Move operations are allowed.
+  SharedMemoryMapping(SharedMemoryMapping&& mapping);
+  SharedMemoryMapping& operator=(SharedMemoryMapping&& mapping);
+
+  // Unmaps the region if the mapping is valid.
+  virtual ~SharedMemoryMapping();
+
+  // Returns true iff the mapping is valid. False means there is no
+  // corresponding area of memory.
+  bool IsValid() const { return memory_ != nullptr; }
+
+  // Returns the logical size of the mapping in bytes. This is precisely the
+  // size requested by whoever created the mapping, and it is always less than
+  // or equal to |mapped_size()|. This is undefined for invalid instances.
+  size_t size() const {
+    DCHECK(IsValid());
+    return size_;
+  }
+
+  // Returns the actual size of the mapping in bytes. This is always at least
+  // as large as |size()| but may be larger due to platform mapping alignment
+  // constraints. This is undefined for invalid instances.
+  size_t mapped_size() const {
+    DCHECK(IsValid());
+    return mapped_size_;
+  }
+
+  // Returns 128-bit GUID of the region this mapping belongs to.
+  const UnguessableToken& guid() const {
+    DCHECK(IsValid());
+    return guid_;
+  }
+
+ protected:
+  SharedMemoryMapping(void* address,
+                      size_t size,
+                      size_t mapped_size,
+                      const UnguessableToken& guid);
+  void* raw_memory_ptr() const { return memory_; }
+
+ private:
+  friend class SharedMemoryTracker;
+
+  void Unmap();
+
+  void* memory_ = nullptr;
+  size_t size_ = 0;
+  size_t mapped_size_ = 0;
+  UnguessableToken guid_;
+
+  DISALLOW_COPY_AND_ASSIGN(SharedMemoryMapping);
+};
+
+// Class modeling a read-only mapping of a shared memory region into the
+// current process' address space. This is created by ReadOnlySharedMemoryRegion
+// instances.
+class BASE_EXPORT ReadOnlySharedMemoryMapping : public SharedMemoryMapping {
+ public:
+  // Default constructor initializes an invalid instance.
+  ReadOnlySharedMemoryMapping();
+
+  // Move operations are allowed.
+  ReadOnlySharedMemoryMapping(ReadOnlySharedMemoryMapping&&);
+  ReadOnlySharedMemoryMapping& operator=(ReadOnlySharedMemoryMapping&&);
+
+  // Returns the base address of the mapping. This is read-only memory. This is
+  // page-aligned. This is nullptr for invalid instances.
+  const void* memory() const { return raw_memory_ptr(); }
+
+ private:
+  friend class ReadOnlySharedMemoryRegion;
+  ReadOnlySharedMemoryMapping(void* address,
+                              size_t size,
+                              size_t mapped_size,
+                              const UnguessableToken& guid);
+
+  DISALLOW_COPY_AND_ASSIGN(ReadOnlySharedMemoryMapping);
+};
+
+// Class modeling a writable mapping of a shared memory region into the
+// current process' address space. This is created by *SharedMemoryRegion
+// instances.
+class BASE_EXPORT WritableSharedMemoryMapping : public SharedMemoryMapping {
+ public:
+  // Default constructor initializes an invalid instance.
+  WritableSharedMemoryMapping();
+
+  // Move operations are allowed.
+  WritableSharedMemoryMapping(WritableSharedMemoryMapping&&);
+  WritableSharedMemoryMapping& operator=(WritableSharedMemoryMapping&&);
+
+  // Returns the base address of the mapping. This is writable memory. This is
+  // page-aligned. This is nullptr for invalid instances.
+  void* memory() const { return raw_memory_ptr(); }
+
+ private:
+  friend WritableSharedMemoryMapping MapAtForTesting(
+      subtle::PlatformSharedMemoryRegion* region,
+      off_t offset,
+      size_t size);
+  friend class ReadOnlySharedMemoryRegion;
+  friend class WritableSharedMemoryRegion;
+  friend class UnsafeSharedMemoryRegion;
+  WritableSharedMemoryMapping(void* address,
+                              size_t size,
+                              size_t mapped_size,
+                              const UnguessableToken& guid);
+
+  DISALLOW_COPY_AND_ASSIGN(WritableSharedMemoryMapping);
+};
+
+}  // namespace base
+
+#endif  // BASE_MEMORY_SHARED_MEMORY_MAPPING_H_
diff --git a/base/memory/shared_memory_nacl.cc b/base/memory/shared_memory_nacl.cc
new file mode 100644
index 0000000..4bcbb54
--- /dev/null
+++ b/base/memory/shared_memory_nacl.cc
@@ -0,0 +1,138 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/memory/shared_memory.h"
+
+#include <errno.h>
+#include <fcntl.h>
+#include <stddef.h>
+#include <sys/mman.h>
+#include <sys/stat.h>
+#include <unistd.h>
+
+#include <limits>
+
+#include "base/logging.h"
+#include "base/memory/shared_memory_tracker.h"
+
+namespace base {
+
+SharedMemory::SharedMemory()
+    : mapped_size_(0), memory_(NULL), read_only_(false), requested_size_(0) {}
+
+SharedMemory::SharedMemory(const SharedMemoryHandle& handle, bool read_only)
+    : shm_(handle),
+      mapped_size_(0),
+      memory_(NULL),
+      read_only_(read_only),
+      requested_size_(0) {}
+
+SharedMemory::~SharedMemory() {
+  Unmap();
+  Close();
+}
+
+// static
+bool SharedMemory::IsHandleValid(const SharedMemoryHandle& handle) {
+  return handle.IsValid();
+}
+
+// static
+void SharedMemory::CloseHandle(const SharedMemoryHandle& handle) {
+  DCHECK(handle.IsValid());
+  handle.Close();
+}
+
+// static
+SharedMemoryHandle SharedMemory::DuplicateHandle(
+    const SharedMemoryHandle& handle) {
+  return handle.Duplicate();
+}
+
+bool SharedMemory::CreateAndMapAnonymous(size_t size) {
+  // Untrusted code can't create descriptors or handles.
+  return false;
+}
+
+bool SharedMemory::Create(const SharedMemoryCreateOptions& options) {
+  // Untrusted code can't create descriptors or handles.
+  return false;
+}
+
+bool SharedMemory::Delete(const std::string& name) {
+  return false;
+}
+
+bool SharedMemory::Open(const std::string& name, bool read_only) {
+  return false;
+}
+
+bool SharedMemory::MapAt(off_t offset, size_t bytes) {
+  if (!shm_.IsValid())
+    return false;
+
+  if (bytes > static_cast<size_t>(std::numeric_limits<int>::max()))
+    return false;
+
+  if (memory_)
+    return false;
+
+  memory_ = mmap(NULL, bytes, PROT_READ | (read_only_ ? 0 : PROT_WRITE),
+                 MAP_SHARED, shm_.GetHandle(), offset);
+
+  bool mmap_succeeded = memory_ != MAP_FAILED && memory_ != NULL;
+  if (mmap_succeeded) {
+    mapped_size_ = bytes;
+    DCHECK_EQ(0U, reinterpret_cast<uintptr_t>(memory_) &
+        (SharedMemory::MAP_MINIMUM_ALIGNMENT - 1));
+    mapped_id_ = shm_.GetGUID();
+    SharedMemoryTracker::GetInstance()->IncrementMemoryUsage(*this);
+  } else {
+    memory_ = NULL;
+  }
+
+  return mmap_succeeded;
+}
+
+bool SharedMemory::Unmap() {
+  if (memory_ == NULL)
+    return false;
+
+  SharedMemoryTracker::GetInstance()->DecrementMemoryUsage(*this);
+  if (munmap(memory_, mapped_size_) < 0)
+    DPLOG(ERROR) << "munmap";
+  memory_ = NULL;
+  mapped_size_ = 0;
+  mapped_id_ = UnguessableToken();
+  return true;
+}
+
+SharedMemoryHandle SharedMemory::handle() const {
+  SharedMemoryHandle handle_copy = shm_;
+  handle_copy.SetOwnershipPassesToIPC(false);
+  return handle_copy;
+}
+
+SharedMemoryHandle SharedMemory::TakeHandle() {
+  SharedMemoryHandle handle_copy = shm_;
+  handle_copy.SetOwnershipPassesToIPC(true);
+  Unmap();
+  shm_ = SharedMemoryHandle();
+  return handle_copy;
+}
+
+void SharedMemory::Close() {
+  if (shm_.IsValid()) {
+    shm_.Close();
+    shm_ = SharedMemoryHandle();
+  }
+}
+
+SharedMemoryHandle SharedMemory::GetReadOnlyHandle() const {
+  // Untrusted code can't create descriptors or handles, which is needed to
+  // drop permissions.
+  return SharedMemoryHandle();
+}
+
+}  // namespace base
diff --git a/base/memory/shared_memory_posix.cc b/base/memory/shared_memory_posix.cc
new file mode 100644
index 0000000..d3163e5
--- /dev/null
+++ b/base/memory/shared_memory_posix.cc
@@ -0,0 +1,386 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/memory/shared_memory.h"
+
+#include <errno.h>
+#include <fcntl.h>
+#include <stddef.h>
+#include <sys/mman.h>
+#include <sys/stat.h>
+#include <unistd.h>
+
+#include "base/files/file_util.h"
+#include "base/files/scoped_file.h"
+#include "base/logging.h"
+#include "base/macros.h"
+#include "base/memory/shared_memory_helper.h"
+#include "base/memory/shared_memory_tracker.h"
+#include "base/posix/eintr_wrapper.h"
+#include "base/posix/safe_strerror.h"
+#include "base/process/process_metrics.h"
+#include "base/scoped_generic.h"
+#include "base/strings/utf_string_conversions.h"
+#include "base/threading/thread_restrictions.h"
+#include "base/trace_event/trace_event.h"
+#include "base/unguessable_token.h"
+#include "build/build_config.h"
+
+#if defined(OS_ANDROID)
+#include "base/os_compat_android.h"
+#include "third_party/ashmem/ashmem.h"
+#endif
+
+#if defined(OS_MACOSX) && !defined(OS_IOS)
+#error "MacOS uses shared_memory_mac.cc"
+#endif
+
+namespace base {
+
+SharedMemory::SharedMemory() = default;
+
+SharedMemory::SharedMemory(const SharedMemoryHandle& handle, bool read_only)
+    : shm_(handle), read_only_(read_only) {}
+
+SharedMemory::~SharedMemory() {
+  Unmap();
+  Close();
+}
+
+// static
+bool SharedMemory::IsHandleValid(const SharedMemoryHandle& handle) {
+  return handle.IsValid();
+}
+
+// static
+void SharedMemory::CloseHandle(const SharedMemoryHandle& handle) {
+  DCHECK(handle.IsValid());
+  handle.Close();
+}
+
+// static
+size_t SharedMemory::GetHandleLimit() {
+  return GetMaxFds();
+}
+
+// static
+SharedMemoryHandle SharedMemory::DuplicateHandle(
+    const SharedMemoryHandle& handle) {
+  return handle.Duplicate();
+}
+
+// static
+int SharedMemory::GetFdFromSharedMemoryHandle(
+    const SharedMemoryHandle& handle) {
+  return handle.GetHandle();
+}
+
+bool SharedMemory::CreateAndMapAnonymous(size_t size) {
+  return CreateAnonymous(size) && Map(size);
+}
+
+#if !defined(OS_ANDROID)
+
+// Chromium mostly only uses the unique/private shmem as specified by
+// "name == L"". The exception is in the StatsTable.
+// TODO(jrg): there is no way to "clean up" all unused named shmem if
+// we restart from a crash.  (That isn't a new problem, but it is a problem.)
+// In case we want to delete it later, it may be useful to save the value
+// of mem_filename after FilePathForMemoryName().
+bool SharedMemory::Create(const SharedMemoryCreateOptions& options) {
+  DCHECK(!shm_.IsValid());
+  if (options.size == 0) return false;
+
+  if (options.size > static_cast<size_t>(std::numeric_limits<int>::max()))
+    return false;
+
+  // This function theoretically can block on the disk, but realistically
+  // the temporary files we create will just go into the buffer cache
+  // and be deleted before they ever make it out to disk.
+  ThreadRestrictions::ScopedAllowIO allow_io;
+
+  bool fix_size = true;
+  ScopedFD fd;
+  ScopedFD readonly_fd;
+  FilePath path;
+  if (!options.name_deprecated || options.name_deprecated->empty()) {
+    bool result =
+        CreateAnonymousSharedMemory(options, &fd, &readonly_fd, &path);
+    if (!result)
+      return false;
+  } else {
+    if (!FilePathForMemoryName(*options.name_deprecated, &path))
+      return false;
+
+    // Make sure that the file is opened without any permission
+    // to other users on the system.
+    const mode_t kOwnerOnly = S_IRUSR | S_IWUSR;
+
+    // First, try to create the file.
+    fd.reset(HANDLE_EINTR(
+        open(path.value().c_str(), O_RDWR | O_CREAT | O_EXCL, kOwnerOnly)));
+    if (!fd.is_valid() && options.open_existing_deprecated) {
+      // If this doesn't work, try and open an existing file in append mode.
+      // Opening an existing file in a world writable directory has two main
+      // security implications:
+      // - Attackers could plant a file under their control, so ownership of
+      //   the file is checked below.
+      // - Attackers could plant a symbolic link so that an unexpected file
+      //   is opened, so O_NOFOLLOW is passed to open().
+#if !defined(OS_AIX)
+      fd.reset(HANDLE_EINTR(
+          open(path.value().c_str(), O_RDWR | O_APPEND | O_NOFOLLOW)));
+#else
+      // AIX has no 64-bit support for open flags such as -
+      //  O_CLOEXEC, O_NOFOLLOW and O_TTY_INIT.
+      fd.reset(HANDLE_EINTR(open(path.value().c_str(), O_RDWR | O_APPEND)));
+#endif
+      // Check that the current user owns the file.
+      // If uid != euid, then a more complex permission model is used and this
+      // API is not appropriate.
+      const uid_t real_uid = getuid();
+      const uid_t effective_uid = geteuid();
+      struct stat sb;
+      if (fd.is_valid() &&
+          (fstat(fd.get(), &sb) != 0 || sb.st_uid != real_uid ||
+           sb.st_uid != effective_uid)) {
+        LOG(ERROR) <<
+            "Invalid owner when opening existing shared memory file.";
+        close(fd.get());
+        return false;
+      }
+
+      // An existing file was opened, so its size should not be fixed.
+      fix_size = false;
+    }
+
+    if (options.share_read_only) {
+      // Also open as readonly so that we can GetReadOnlyHandle.
+      readonly_fd.reset(HANDLE_EINTR(open(path.value().c_str(), O_RDONLY)));
+      if (!readonly_fd.is_valid()) {
+        DPLOG(ERROR) << "open(\"" << path.value() << "\", O_RDONLY) failed";
+        close(fd.get());
+        return false;
+      }
+    }
+    if (fd.is_valid()) {
+      // "a+" is always appropriate: if it's a new file, a+ is similar to w+.
+      if (!fdopen(fd.get(), "a+")) {
+        PLOG(ERROR) << "Creating file stream in " << path.value() << " failed";
+        return false;
+      }
+    }
+  }
+  if (fd.is_valid() && fix_size) {
+    // Get current size.
+    struct stat stat;
+    if (fstat(fd.get(), &stat) != 0)
+      return false;
+    const size_t current_size = stat.st_size;
+    if (current_size != options.size) {
+      if (HANDLE_EINTR(ftruncate(fd.get(), options.size)) != 0)
+        return false;
+    }
+    requested_size_ = options.size;
+  }
+  if (!fd.is_valid()) {
+    PLOG(ERROR) << "Creating shared memory in " << path.value() << " failed";
+    FilePath dir = path.DirName();
+    if (access(dir.value().c_str(), W_OK | X_OK) < 0) {
+      PLOG(ERROR) << "Unable to access(W_OK|X_OK) " << dir.value();
+      if (dir.value() == "/dev/shm") {
+        LOG(FATAL) << "This is frequently caused by incorrect permissions on "
+                   << "/dev/shm.  Try 'sudo chmod 1777 /dev/shm' to fix.";
+      }
+    }
+    return false;
+  }
+
+  int mapped_file = -1;
+  int readonly_mapped_file = -1;
+
+  bool result = PrepareMapFile(std::move(fd), std::move(readonly_fd),
+                               &mapped_file, &readonly_mapped_file);
+  shm_ = SharedMemoryHandle(FileDescriptor(mapped_file, false), options.size,
+                            UnguessableToken::Create());
+  readonly_shm_ =
+      SharedMemoryHandle(FileDescriptor(readonly_mapped_file, false),
+                         options.size, shm_.GetGUID());
+  return result;
+}
+
+// Our current implementation of shmem is with mmap()ing of files.
+// These files need to be deleted explicitly.
+// In practice this call is only needed for unit tests.
+bool SharedMemory::Delete(const std::string& name) {
+  FilePath path;
+  if (!FilePathForMemoryName(name, &path))
+    return false;
+
+  if (PathExists(path))
+    return DeleteFile(path, false);
+
+  // Doesn't exist, so success.
+  return true;
+}
+
+bool SharedMemory::Open(const std::string& name, bool read_only) {
+  FilePath path;
+  if (!FilePathForMemoryName(name, &path))
+    return false;
+
+  read_only_ = read_only;
+
+  int mode = read_only ? O_RDONLY : O_RDWR;
+  ScopedFD fd(HANDLE_EINTR(open(path.value().c_str(), mode)));
+  ScopedFD readonly_fd(HANDLE_EINTR(open(path.value().c_str(), O_RDONLY)));
+  if (!readonly_fd.is_valid()) {
+    DPLOG(ERROR) << "open(\"" << path.value() << "\", O_RDONLY) failed";
+    return false;
+  }
+  int mapped_file = -1;
+  int readonly_mapped_file = -1;
+  bool result = PrepareMapFile(std::move(fd), std::move(readonly_fd),
+                               &mapped_file, &readonly_mapped_file);
+  // This form of sharing shared memory is deprecated. https://crbug.com/345734.
+  // However, we can't get rid of it without a significant refactor because its
+  // used to communicate between two versions of the same service process, very
+  // early in the life cycle.
+  // Technically, we should also pass the GUID from the original shared memory
+  // region. We don't do that - this means that we will overcount this memory,
+  // which thankfully isn't relevant since Chrome only communicates with a
+  // single version of the service process.
+  // We pass the size |0|, which is a dummy size and wrong, but otherwise
+  // harmless.
+  shm_ = SharedMemoryHandle(FileDescriptor(mapped_file, false), 0u,
+                            UnguessableToken::Create());
+  readonly_shm_ = SharedMemoryHandle(
+      FileDescriptor(readonly_mapped_file, false), 0, shm_.GetGUID());
+  return result;
+}
+#endif  // !defined(OS_ANDROID)
+
+bool SharedMemory::MapAt(off_t offset, size_t bytes) {
+  if (!shm_.IsValid())
+    return false;
+
+  if (bytes > static_cast<size_t>(std::numeric_limits<int>::max()))
+    return false;
+
+  if (memory_)
+    return false;
+
+#if defined(OS_ANDROID)
+  // On Android, Map can be called with a size and offset of zero to use the
+  // ashmem-determined size.
+  if (bytes == 0) {
+    DCHECK_EQ(0, offset);
+    int ashmem_bytes = ashmem_get_size_region(shm_.GetHandle());
+    if (ashmem_bytes < 0)
+      return false;
+    bytes = ashmem_bytes;
+  }
+
+  // Sanity check. This shall catch invalid uses of the SharedMemory APIs
+  // but will not protect against direct mmap() attempts.
+  if (shm_.IsReadOnly()) {
+    // Use a DCHECK() to call writable mappings with read-only descriptors
+    // in debug builds immediately. Return an error for release builds
+    // or during unit-testing (assuming a ScopedLogAssertHandler was installed).
+    DCHECK(read_only_)
+        << "Trying to map a region writable with a read-only descriptor.";
+    if (!read_only_) {
+      return false;
+    }
+    if (!shm_.SetRegionReadOnly()) {  // Ensure the region is read-only.
+      return false;
+    }
+  }
+#endif
+
+  memory_ = mmap(nullptr, bytes, PROT_READ | (read_only_ ? 0 : PROT_WRITE),
+                 MAP_SHARED, shm_.GetHandle(), offset);
+
+  bool mmap_succeeded = memory_ && memory_ != reinterpret_cast<void*>(-1);
+  if (mmap_succeeded) {
+    mapped_size_ = bytes;
+    mapped_id_ = shm_.GetGUID();
+    DCHECK_EQ(0U,
+              reinterpret_cast<uintptr_t>(memory_) &
+                  (SharedMemory::MAP_MINIMUM_ALIGNMENT - 1));
+    SharedMemoryTracker::GetInstance()->IncrementMemoryUsage(*this);
+  } else {
+    memory_ = nullptr;
+  }
+
+  return mmap_succeeded;
+}
+
+bool SharedMemory::Unmap() {
+  if (!memory_)
+    return false;
+
+  SharedMemoryTracker::GetInstance()->DecrementMemoryUsage(*this);
+  munmap(memory_, mapped_size_);
+  memory_ = nullptr;
+  mapped_size_ = 0;
+  mapped_id_ = UnguessableToken();
+  return true;
+}
+
+SharedMemoryHandle SharedMemory::handle() const {
+  return shm_;
+}
+
+SharedMemoryHandle SharedMemory::TakeHandle() {
+  SharedMemoryHandle handle_copy = shm_;
+  handle_copy.SetOwnershipPassesToIPC(true);
+  Unmap();
+  shm_ = SharedMemoryHandle();
+  return handle_copy;
+}
+
+#if !defined(OS_ANDROID)
+void SharedMemory::Close() {
+  if (shm_.IsValid()) {
+    shm_.Close();
+    shm_ = SharedMemoryHandle();
+  }
+  if (readonly_shm_.IsValid()) {
+    readonly_shm_.Close();
+    readonly_shm_ = SharedMemoryHandle();
+  }
+}
+
+// For the given shmem named |mem_name|, return a filename to mmap()
+// (and possibly create).  Modifies |filename|.  Return false on
+// error, or true of we are happy.
+bool SharedMemory::FilePathForMemoryName(const std::string& mem_name,
+                                         FilePath* path) {
+  // mem_name will be used for a filename; make sure it doesn't
+  // contain anything which will confuse us.
+  DCHECK_EQ(std::string::npos, mem_name.find('/'));
+  DCHECK_EQ(std::string::npos, mem_name.find('\0'));
+
+  FilePath temp_dir;
+  if (!GetShmemTempDir(false, &temp_dir))
+    return false;
+
+#if defined(GOOGLE_CHROME_BUILD)
+  static const char kShmem[] = "com.google.Chrome.shmem.";
+#else
+  static const char kShmem[] = "org.chromium.Chromium.shmem.";
+#endif
+  CR_DEFINE_STATIC_LOCAL(const std::string, name_base, (kShmem));
+  *path = temp_dir.AppendASCII(name_base + mem_name);
+  return true;
+}
+
+SharedMemoryHandle SharedMemory::GetReadOnlyHandle() const {
+  CHECK(readonly_shm_.IsValid());
+  return readonly_shm_.Duplicate();
+}
+#endif  // !defined(OS_ANDROID)
+
+}  // namespace base
diff --git a/base/memory/shared_memory_region_unittest.cc b/base/memory/shared_memory_region_unittest.cc
new file mode 100644
index 0000000..fcecb1f
--- /dev/null
+++ b/base/memory/shared_memory_region_unittest.cc
@@ -0,0 +1,279 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <utility>
+
+#include "base/memory/platform_shared_memory_region.h"
+#include "base/memory/read_only_shared_memory_region.h"
+#include "base/memory/unsafe_shared_memory_region.h"
+#include "base/memory/writable_shared_memory_region.h"
+#include "base/sys_info.h"
+#include "base/test/test_shared_memory_util.h"
+#include "build/build_config.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+
+const size_t kRegionSize = 1024;
+
+bool IsMemoryFilledWithByte(const void* memory, size_t size, char byte) {
+  const char* start_ptr = static_cast<const char*>(memory);
+  const char* end_ptr = start_ptr + size;
+  for (const char* ptr = start_ptr; ptr < end_ptr; ++ptr) {
+    if (*ptr != byte)
+      return false;
+  }
+
+  return true;
+}
+
+template <typename SharedMemoryRegionType>
+class SharedMemoryRegionTest : public ::testing::Test {
+ public:
+  void SetUp() override {
+    std::tie(region_, rw_mapping_) =
+        CreateMappedRegion<SharedMemoryRegionType>(kRegionSize);
+    ASSERT_TRUE(region_.IsValid());
+    ASSERT_TRUE(rw_mapping_.IsValid());
+    memset(rw_mapping_.memory(), 'G', kRegionSize);
+    EXPECT_TRUE(IsMemoryFilledWithByte(rw_mapping_.memory(), kRegionSize, 'G'));
+  }
+
+ protected:
+  SharedMemoryRegionType region_;
+  WritableSharedMemoryMapping rw_mapping_;
+};
+
+typedef ::testing::Types<WritableSharedMemoryRegion,
+                         UnsafeSharedMemoryRegion,
+                         ReadOnlySharedMemoryRegion>
+    AllRegionTypes;
+TYPED_TEST_CASE(SharedMemoryRegionTest, AllRegionTypes);
+
+TYPED_TEST(SharedMemoryRegionTest, NonValidRegion) {
+  TypeParam region;
+  EXPECT_FALSE(region.IsValid());
+  // We shouldn't crash on Map but should return an invalid mapping.
+  typename TypeParam::MappingType mapping = region.Map();
+  EXPECT_FALSE(mapping.IsValid());
+}
+
+TYPED_TEST(SharedMemoryRegionTest, MoveRegion) {
+  TypeParam moved_region = std::move(this->region_);
+  EXPECT_FALSE(this->region_.IsValid());
+  ASSERT_TRUE(moved_region.IsValid());
+
+  // Check that moved region maps correctly.
+  typename TypeParam::MappingType mapping = moved_region.Map();
+  ASSERT_TRUE(mapping.IsValid());
+  EXPECT_NE(this->rw_mapping_.memory(), mapping.memory());
+  EXPECT_EQ(memcmp(this->rw_mapping_.memory(), mapping.memory(), kRegionSize),
+            0);
+
+  // Verify that the second mapping reflects changes in the first.
+  memset(this->rw_mapping_.memory(), '#', kRegionSize);
+  EXPECT_EQ(memcmp(this->rw_mapping_.memory(), mapping.memory(), kRegionSize),
+            0);
+}
+
+TYPED_TEST(SharedMemoryRegionTest, MappingValidAfterClose) {
+  // Check the mapping is still valid after the region is closed.
+  this->region_ = TypeParam();
+  EXPECT_FALSE(this->region_.IsValid());
+  ASSERT_TRUE(this->rw_mapping_.IsValid());
+  EXPECT_TRUE(
+      IsMemoryFilledWithByte(this->rw_mapping_.memory(), kRegionSize, 'G'));
+}
+
+TYPED_TEST(SharedMemoryRegionTest, MapTwice) {
+  // The second mapping is either writable or read-only.
+  typename TypeParam::MappingType mapping = this->region_.Map();
+  ASSERT_TRUE(mapping.IsValid());
+  EXPECT_NE(this->rw_mapping_.memory(), mapping.memory());
+  EXPECT_EQ(memcmp(this->rw_mapping_.memory(), mapping.memory(), kRegionSize),
+            0);
+
+  // Verify that the second mapping reflects changes in the first.
+  memset(this->rw_mapping_.memory(), '#', kRegionSize);
+  EXPECT_EQ(memcmp(this->rw_mapping_.memory(), mapping.memory(), kRegionSize),
+            0);
+
+  // Close the region and unmap the first memory segment, verify the second
+  // still has the right data.
+  this->region_ = TypeParam();
+  this->rw_mapping_ = WritableSharedMemoryMapping();
+  EXPECT_TRUE(IsMemoryFilledWithByte(mapping.memory(), kRegionSize, '#'));
+}
+
+TYPED_TEST(SharedMemoryRegionTest, MapUnmapMap) {
+  this->rw_mapping_ = WritableSharedMemoryMapping();
+
+  typename TypeParam::MappingType mapping = this->region_.Map();
+  ASSERT_TRUE(mapping.IsValid());
+  EXPECT_TRUE(IsMemoryFilledWithByte(mapping.memory(), kRegionSize, 'G'));
+}
+
+TYPED_TEST(SharedMemoryRegionTest, SerializeAndDeserialize) {
+  subtle::PlatformSharedMemoryRegion platform_region =
+      TypeParam::TakeHandleForSerialization(std::move(this->region_));
+  EXPECT_EQ(platform_region.GetGUID(), this->rw_mapping_.guid());
+  TypeParam region = TypeParam::Deserialize(std::move(platform_region));
+  EXPECT_TRUE(region.IsValid());
+  EXPECT_FALSE(this->region_.IsValid());
+  typename TypeParam::MappingType mapping = region.Map();
+  ASSERT_TRUE(mapping.IsValid());
+  EXPECT_TRUE(IsMemoryFilledWithByte(mapping.memory(), kRegionSize, 'G'));
+
+  // Verify that the second mapping reflects changes in the first.
+  memset(this->rw_mapping_.memory(), '#', kRegionSize);
+  EXPECT_EQ(memcmp(this->rw_mapping_.memory(), mapping.memory(), kRegionSize),
+            0);
+}
+
+// Map() will return addresses which are aligned to the platform page size, this
+// varies from platform to platform though.  Since we'd like to advertise a
+// minimum alignment that callers can count on, test for it here.
+TYPED_TEST(SharedMemoryRegionTest, MapMinimumAlignment) {
+  EXPECT_EQ(0U,
+            reinterpret_cast<uintptr_t>(this->rw_mapping_.memory()) &
+                (subtle::PlatformSharedMemoryRegion::kMapMinimumAlignment - 1));
+}
+
+TYPED_TEST(SharedMemoryRegionTest, MapSize) {
+  EXPECT_EQ(this->rw_mapping_.size(), kRegionSize);
+  EXPECT_GE(this->rw_mapping_.mapped_size(), kRegionSize);
+}
+
+TYPED_TEST(SharedMemoryRegionTest, MapGranularity) {
+  EXPECT_LT(this->rw_mapping_.mapped_size(),
+            kRegionSize + SysInfo::VMAllocationGranularity());
+}
+
+TYPED_TEST(SharedMemoryRegionTest, MapAt) {
+  const size_t kPageSize = SysInfo::VMAllocationGranularity();
+  ASSERT_TRUE(kPageSize >= sizeof(uint32_t));
+  ASSERT_EQ(kPageSize % sizeof(uint32_t), 0U);
+  const size_t kDataSize = kPageSize * 2;
+  const size_t kCount = kDataSize / sizeof(uint32_t);
+
+  TypeParam region;
+  WritableSharedMemoryMapping rw_mapping;
+  std::tie(region, rw_mapping) = CreateMappedRegion<TypeParam>(kDataSize);
+  ASSERT_TRUE(region.IsValid());
+  ASSERT_TRUE(rw_mapping.IsValid());
+  uint32_t* ptr = static_cast<uint32_t*>(rw_mapping.memory());
+
+  for (size_t i = 0; i < kCount; ++i)
+    ptr[i] = i;
+
+  rw_mapping = WritableSharedMemoryMapping();
+  off_t bytes_offset = kPageSize;
+  typename TypeParam::MappingType mapping =
+      region.MapAt(bytes_offset, kDataSize - bytes_offset);
+  ASSERT_TRUE(mapping.IsValid());
+
+  off_t int_offset = bytes_offset / sizeof(uint32_t);
+  const uint32_t* ptr2 = static_cast<const uint32_t*>(mapping.memory());
+  for (size_t i = int_offset; i < kCount; ++i) {
+    EXPECT_EQ(ptr2[i - int_offset], i);
+  }
+}
+
+TYPED_TEST(SharedMemoryRegionTest, MapAtNotAlignedOffsetFails) {
+  const size_t kDataSize = SysInfo::VMAllocationGranularity();
+
+  TypeParam region;
+  WritableSharedMemoryMapping rw_mapping;
+  std::tie(region, rw_mapping) = CreateMappedRegion<TypeParam>(kDataSize);
+  ASSERT_TRUE(region.IsValid());
+  ASSERT_TRUE(rw_mapping.IsValid());
+  off_t offset = kDataSize / 2;
+  typename TypeParam::MappingType mapping =
+      region.MapAt(offset, kDataSize - offset);
+  EXPECT_FALSE(mapping.IsValid());
+}
+
+TYPED_TEST(SharedMemoryRegionTest, MapMoreBytesThanRegionSizeFails) {
+  size_t region_real_size = this->region_.GetSize();
+  typename TypeParam::MappingType mapping =
+      this->region_.MapAt(0, region_real_size + 1);
+  EXPECT_FALSE(mapping.IsValid());
+}
+
+template <typename DuplicatableSharedMemoryRegion>
+class DuplicatableSharedMemoryRegionTest
+    : public SharedMemoryRegionTest<DuplicatableSharedMemoryRegion> {};
+
+typedef ::testing::Types<UnsafeSharedMemoryRegion, ReadOnlySharedMemoryRegion>
+    DuplicatableRegionTypes;
+TYPED_TEST_CASE(DuplicatableSharedMemoryRegionTest, DuplicatableRegionTypes);
+
+TYPED_TEST(DuplicatableSharedMemoryRegionTest, Duplicate) {
+  TypeParam dup_region = this->region_.Duplicate();
+  typename TypeParam::MappingType mapping = dup_region.Map();
+  ASSERT_TRUE(mapping.IsValid());
+  EXPECT_NE(this->rw_mapping_.memory(), mapping.memory());
+  EXPECT_EQ(this->rw_mapping_.guid(), mapping.guid());
+  EXPECT_TRUE(IsMemoryFilledWithByte(mapping.memory(), kRegionSize, 'G'));
+}
+
+class ReadOnlySharedMemoryRegionTest : public ::testing::Test {
+ public:
+  ReadOnlySharedMemoryRegion GetInitiallyReadOnlyRegion(size_t size) {
+    MappedReadOnlyRegion mapped_region =
+        ReadOnlySharedMemoryRegion::Create(size);
+    ReadOnlySharedMemoryRegion region = std::move(mapped_region.region);
+    return region;
+  }
+
+  ReadOnlySharedMemoryRegion GetConvertedToReadOnlyRegion(size_t size) {
+    WritableSharedMemoryRegion region =
+        WritableSharedMemoryRegion::Create(kRegionSize);
+    ReadOnlySharedMemoryRegion ro_region =
+        WritableSharedMemoryRegion::ConvertToReadOnly(std::move(region));
+    return ro_region;
+  }
+};
+
+TEST_F(ReadOnlySharedMemoryRegionTest,
+       InitiallyReadOnlyRegionCannotBeMappedAsWritable) {
+  ReadOnlySharedMemoryRegion region = GetInitiallyReadOnlyRegion(kRegionSize);
+  ASSERT_TRUE(region.IsValid());
+
+  EXPECT_TRUE(CheckReadOnlyPlatformSharedMemoryRegionForTesting(
+      ReadOnlySharedMemoryRegion::TakeHandleForSerialization(
+          std::move(region))));
+}
+
+TEST_F(ReadOnlySharedMemoryRegionTest,
+       ConvertedToReadOnlyRegionCannotBeMappedAsWritable) {
+  ReadOnlySharedMemoryRegion region = GetConvertedToReadOnlyRegion(kRegionSize);
+  ASSERT_TRUE(region.IsValid());
+
+  EXPECT_TRUE(CheckReadOnlyPlatformSharedMemoryRegionForTesting(
+      ReadOnlySharedMemoryRegion::TakeHandleForSerialization(
+          std::move(region))));
+}
+
+TEST_F(ReadOnlySharedMemoryRegionTest,
+       InitiallyReadOnlyRegionProducedMappingWriteDeathTest) {
+  ReadOnlySharedMemoryRegion region = GetInitiallyReadOnlyRegion(kRegionSize);
+  ASSERT_TRUE(region.IsValid());
+  ReadOnlySharedMemoryMapping mapping = region.Map();
+  ASSERT_TRUE(mapping.IsValid());
+  void* memory_ptr = const_cast<void*>(mapping.memory());
+  EXPECT_DEATH_IF_SUPPORTED(memset(memory_ptr, 'G', kRegionSize), "");
+}
+
+TEST_F(ReadOnlySharedMemoryRegionTest,
+       ConvertedToReadOnlyRegionProducedMappingWriteDeathTest) {
+  ReadOnlySharedMemoryRegion region = GetConvertedToReadOnlyRegion(kRegionSize);
+  ASSERT_TRUE(region.IsValid());
+  ReadOnlySharedMemoryMapping mapping = region.Map();
+  ASSERT_TRUE(mapping.IsValid());
+  void* memory_ptr = const_cast<void*>(mapping.memory());
+  EXPECT_DEATH_IF_SUPPORTED(memset(memory_ptr, 'G', kRegionSize), "");
+}
+
+}  // namespace base
diff --git a/base/memory/shared_memory_tracker.cc b/base/memory/shared_memory_tracker.cc
new file mode 100644
index 0000000..5ca7c84
--- /dev/null
+++ b/base/memory/shared_memory_tracker.cc
@@ -0,0 +1,147 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/memory/shared_memory_tracker.h"
+
+#include "base/memory/shared_memory.h"
+#include "base/strings/string_number_conversions.h"
+#include "base/trace_event/memory_allocator_dump_guid.h"
+#include "base/trace_event/memory_dump_manager.h"
+#include "base/trace_event/process_memory_dump.h"
+
+namespace base {
+
+const char SharedMemoryTracker::kDumpRootName[] = "shared_memory";
+
+// static
+SharedMemoryTracker* SharedMemoryTracker::GetInstance() {
+  static SharedMemoryTracker* instance = new SharedMemoryTracker;
+  return instance;
+}
+
+// static
+std::string SharedMemoryTracker::GetDumpNameForTracing(
+    const UnguessableToken& id) {
+  DCHECK(!id.is_empty());
+  return std::string(kDumpRootName) + "/" + id.ToString();
+}
+
+// static
+trace_event::MemoryAllocatorDumpGuid
+SharedMemoryTracker::GetGlobalDumpIdForTracing(const UnguessableToken& id) {
+  std::string dump_name = GetDumpNameForTracing(id);
+  return trace_event::MemoryAllocatorDumpGuid(dump_name);
+}
+
+// static
+const trace_event::MemoryAllocatorDump*
+SharedMemoryTracker::GetOrCreateSharedMemoryDump(
+    const SharedMemory* shared_memory,
+    trace_event::ProcessMemoryDump* pmd) {
+  return GetOrCreateSharedMemoryDumpInternal(shared_memory->memory(),
+                                             shared_memory->mapped_size(),
+                                             shared_memory->mapped_id(), pmd);
+}
+
+const trace_event::MemoryAllocatorDump*
+SharedMemoryTracker::GetOrCreateSharedMemoryDump(
+    const SharedMemoryMapping& shared_memory,
+    trace_event::ProcessMemoryDump* pmd) {
+  return GetOrCreateSharedMemoryDumpInternal(shared_memory.raw_memory_ptr(),
+                                             shared_memory.mapped_size(),
+                                             shared_memory.guid(), pmd);
+}
+
+void SharedMemoryTracker::IncrementMemoryUsage(
+    const SharedMemory& shared_memory) {
+  AutoLock hold(usages_lock_);
+  DCHECK(usages_.find(shared_memory.memory()) == usages_.end());
+  usages_.emplace(shared_memory.memory(), UsageInfo(shared_memory.mapped_size(),
+                                                    shared_memory.mapped_id()));
+}
+
+void SharedMemoryTracker::IncrementMemoryUsage(
+    const SharedMemoryMapping& mapping) {
+  AutoLock hold(usages_lock_);
+  DCHECK(usages_.find(mapping.raw_memory_ptr()) == usages_.end());
+  usages_.emplace(mapping.raw_memory_ptr(),
+                  UsageInfo(mapping.mapped_size(), mapping.guid()));
+}
+
+void SharedMemoryTracker::DecrementMemoryUsage(
+    const SharedMemory& shared_memory) {
+  AutoLock hold(usages_lock_);
+  DCHECK(usages_.find(shared_memory.memory()) != usages_.end());
+  usages_.erase(shared_memory.memory());
+}
+
+void SharedMemoryTracker::DecrementMemoryUsage(
+    const SharedMemoryMapping& mapping) {
+  AutoLock hold(usages_lock_);
+  DCHECK(usages_.find(mapping.raw_memory_ptr()) != usages_.end());
+  usages_.erase(mapping.raw_memory_ptr());
+}
+
+SharedMemoryTracker::SharedMemoryTracker() {
+  trace_event::MemoryDumpManager::GetInstance()->RegisterDumpProvider(
+      this, "SharedMemoryTracker", nullptr);
+}
+
+SharedMemoryTracker::~SharedMemoryTracker() = default;
+
+bool SharedMemoryTracker::OnMemoryDump(const trace_event::MemoryDumpArgs& args,
+                                       trace_event::ProcessMemoryDump* pmd) {
+  AutoLock hold(usages_lock_);
+  for (const auto& usage : usages_) {
+    const trace_event::MemoryAllocatorDump* dump =
+        GetOrCreateSharedMemoryDumpInternal(
+            usage.first, usage.second.mapped_size, usage.second.mapped_id, pmd);
+    DCHECK(dump);
+  }
+  return true;
+}
+
+// static
+const trace_event::MemoryAllocatorDump*
+SharedMemoryTracker::GetOrCreateSharedMemoryDumpInternal(
+    void* mapped_memory,
+    size_t mapped_size,
+    const UnguessableToken& mapped_id,
+    trace_event::ProcessMemoryDump* pmd) {
+  const std::string dump_name = GetDumpNameForTracing(mapped_id);
+  trace_event::MemoryAllocatorDump* local_dump =
+      pmd->GetAllocatorDump(dump_name);
+  if (local_dump)
+    return local_dump;
+
+  size_t virtual_size = mapped_size;
+  // If resident size is not available, a virtual size is used as fallback.
+  size_t size = virtual_size;
+#if defined(COUNT_RESIDENT_BYTES_SUPPORTED)
+  base::Optional<size_t> resident_size =
+      trace_event::ProcessMemoryDump::CountResidentBytesInSharedMemory(
+          mapped_memory, mapped_size);
+  if (resident_size.has_value())
+    size = resident_size.value();
+#endif
+
+  local_dump = pmd->CreateAllocatorDump(dump_name);
+  local_dump->AddScalar(trace_event::MemoryAllocatorDump::kNameSize,
+                        trace_event::MemoryAllocatorDump::kUnitsBytes, size);
+  local_dump->AddScalar("virtual_size",
+                        trace_event::MemoryAllocatorDump::kUnitsBytes,
+                        virtual_size);
+  auto global_dump_guid = GetGlobalDumpIdForTracing(mapped_id);
+  trace_event::MemoryAllocatorDump* global_dump =
+      pmd->CreateSharedGlobalAllocatorDump(global_dump_guid);
+  global_dump->AddScalar(trace_event::MemoryAllocatorDump::kNameSize,
+                         trace_event::MemoryAllocatorDump::kUnitsBytes, size);
+
+  // The edges will be overriden by the clients with correct importance.
+  pmd->AddOverridableOwnershipEdge(local_dump->guid(), global_dump->guid(),
+                                   0 /* importance */);
+  return local_dump;
+}
+
+}  // namespace
diff --git a/base/memory/shared_memory_tracker.h b/base/memory/shared_memory_tracker.h
new file mode 100644
index 0000000..499b172
--- /dev/null
+++ b/base/memory/shared_memory_tracker.h
@@ -0,0 +1,90 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_MEMORY_SHARED_MEMORY_TRACKER_H_
+#define BASE_MEMORY_SHARED_MEMORY_TRACKER_H_
+
+#include <map>
+#include <string>
+
+#include "base/memory/shared_memory.h"
+#include "base/memory/shared_memory_mapping.h"
+#include "base/synchronization/lock.h"
+#include "base/trace_event/memory_dump_provider.h"
+
+namespace base {
+
+namespace trace_event {
+class MemoryAllocatorDump;
+class MemoryAllocatorDumpGuid;
+class ProcessMemoryDump;
+}
+
+// SharedMemoryTracker tracks shared memory usage.
+class BASE_EXPORT SharedMemoryTracker : public trace_event::MemoryDumpProvider {
+ public:
+  // Returns a singleton instance.
+  static SharedMemoryTracker* GetInstance();
+
+  static std::string GetDumpNameForTracing(const UnguessableToken& id);
+
+  static trace_event::MemoryAllocatorDumpGuid GetGlobalDumpIdForTracing(
+      const UnguessableToken& id);
+
+  // Gets or creates if non-existant, a memory dump for the |shared_memory|
+  // inside the given |pmd|. Also adds the necessary edges for the dump when
+  // creating the dump.
+  static const trace_event::MemoryAllocatorDump* GetOrCreateSharedMemoryDump(
+      const SharedMemory* shared_memory,
+      trace_event::ProcessMemoryDump* pmd);
+  // We're in the middle of a refactor https://crbug.com/795291. Eventually, the
+  // first call will go away.
+  static const trace_event::MemoryAllocatorDump* GetOrCreateSharedMemoryDump(
+      const SharedMemoryMapping& shared_memory,
+      trace_event::ProcessMemoryDump* pmd);
+
+  // Records shared memory usage on valid mapping.
+  void IncrementMemoryUsage(const SharedMemory& shared_memory);
+  void IncrementMemoryUsage(const SharedMemoryMapping& mapping);
+
+  // Records shared memory usage on unmapping.
+  void DecrementMemoryUsage(const SharedMemory& shared_memory);
+  void DecrementMemoryUsage(const SharedMemoryMapping& mapping);
+
+  // Root dump name for all shared memory dumps.
+  static const char kDumpRootName[];
+
+ private:
+  SharedMemoryTracker();
+  ~SharedMemoryTracker() override;
+
+  // trace_event::MemoryDumpProvider implementation.
+  bool OnMemoryDump(const trace_event::MemoryDumpArgs& args,
+                    trace_event::ProcessMemoryDump* pmd) override;
+
+  static const trace_event::MemoryAllocatorDump*
+  GetOrCreateSharedMemoryDumpInternal(void* mapped_memory,
+                                      size_t mapped_size,
+                                      const UnguessableToken& mapped_id,
+                                      trace_event::ProcessMemoryDump* pmd);
+
+  // Information associated with each mapped address.
+  struct UsageInfo {
+    UsageInfo(size_t size, const UnguessableToken& id)
+        : mapped_size(size), mapped_id(id) {}
+
+    size_t mapped_size;
+    UnguessableToken mapped_id;
+  };
+
+  // Used to lock when |usages_| is modified or read.
+  Lock usages_lock_;
+  std::map<void*, UsageInfo> usages_;
+
+  DISALLOW_COPY_AND_ASSIGN(SharedMemoryTracker);
+};
+
+}  // namespace base
+
+#endif  // BASE_MEMORY_SHARED_MEMORY_TRACKER_H_
diff --git a/base/memory/shared_memory_unittest.cc b/base/memory/shared_memory_unittest.cc
new file mode 100644
index 0000000..b754540
--- /dev/null
+++ b/base/memory/shared_memory_unittest.cc
@@ -0,0 +1,965 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/memory/shared_memory.h"
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <memory>
+
+#include "base/atomicops.h"
+#include "base/base_switches.h"
+#include "base/bind.h"
+#include "base/command_line.h"
+#include "base/logging.h"
+#include "base/macros.h"
+#include "base/memory/shared_memory_handle.h"
+#include "base/process/kill.h"
+#include "base/rand_util.h"
+#include "base/strings/string_number_conversions.h"
+#include "base/strings/string_piece.h"
+#include "base/strings/string_util.h"
+#include "base/sys_info.h"
+#include "base/test/multiprocess_test.h"
+#include "base/threading/platform_thread.h"
+#include "base/time/time.h"
+#include "base/unguessable_token.h"
+#include "build/build_config.h"
+#include "testing/gtest/include/gtest/gtest.h"
+#include "testing/multiprocess_func_list.h"
+
+#if defined(OS_ANDROID)
+#include "base/callback.h"
+#endif
+
+#if defined(OS_POSIX)
+#include <errno.h>
+#include <fcntl.h>
+#include <sys/mman.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <unistd.h>
+#endif
+
+#if defined(OS_LINUX)
+#include <sys/syscall.h>
+#endif
+
+#if defined(OS_WIN)
+#include "base/win/scoped_handle.h"
+#endif
+
+#if defined(OS_FUCHSIA)
+#include <zircon/process.h>
+#include <zircon/syscalls.h>
+#include "base/fuchsia/scoped_zx_handle.h"
+#endif
+
+namespace base {
+
+namespace {
+
+#if !defined(OS_MACOSX) && !defined(OS_FUCHSIA)
+// Each thread will open the shared memory.  Each thread will take a different 4
+// byte int pointer, and keep changing it, with some small pauses in between.
+// Verify that each thread's value in the shared memory is always correct.
+class MultipleThreadMain : public PlatformThread::Delegate {
+ public:
+  explicit MultipleThreadMain(int16_t id) : id_(id) {}
+  ~MultipleThreadMain() override = default;
+
+  static void CleanUp() {
+    SharedMemory memory;
+    memory.Delete(s_test_name_);
+  }
+
+  // PlatformThread::Delegate interface.
+  void ThreadMain() override {
+    const uint32_t kDataSize = 1024;
+    SharedMemory memory;
+    bool rv = memory.CreateNamedDeprecated(s_test_name_, true, kDataSize);
+    EXPECT_TRUE(rv);
+    rv = memory.Map(kDataSize);
+    EXPECT_TRUE(rv);
+    int* ptr = static_cast<int*>(memory.memory()) + id_;
+    EXPECT_EQ(0, *ptr);
+
+    for (int idx = 0; idx < 100; idx++) {
+      *ptr = idx;
+      PlatformThread::Sleep(TimeDelta::FromMilliseconds(1));
+      EXPECT_EQ(*ptr, idx);
+    }
+    // Reset back to 0 for the next test that uses the same name.
+    *ptr = 0;
+
+    memory.Close();
+  }
+
+ private:
+  int16_t id_;
+
+  static const char s_test_name_[];
+
+  DISALLOW_COPY_AND_ASSIGN(MultipleThreadMain);
+};
+
+const char MultipleThreadMain::s_test_name_[] =
+    "SharedMemoryOpenThreadTest";
+#endif  // !defined(OS_MACOSX) && !defined(OS_FUCHSIA)
+
+enum class Mode {
+  Default,
+#if defined(OS_LINUX) && !defined(OS_CHROMEOS)
+  DisableDevShm = 1,
+#endif
+};
+
+class SharedMemoryTest : public ::testing::TestWithParam<Mode> {
+ public:
+  void SetUp() override {
+    switch (GetParam()) {
+      case Mode::Default:
+        break;
+#if defined(OS_LINUX) && !defined(OS_CHROMEOS)
+      case Mode::DisableDevShm:
+        CommandLine* cmdline = CommandLine::ForCurrentProcess();
+        cmdline->AppendSwitch(switches::kDisableDevShmUsage);
+        break;
+#endif  // defined(OS_LINUX) && !defined(OS_CHROMEOS)
+    }
+  }
+};
+
+}  // namespace
+
+// Android/Mac/Fuchsia doesn't support SharedMemory::Open/Delete/
+// CreateNamedDeprecated(openExisting=true)
+#if !defined(OS_ANDROID) && !defined(OS_MACOSX) && !defined(OS_FUCHSIA)
+
+TEST_P(SharedMemoryTest, OpenClose) {
+  const uint32_t kDataSize = 1024;
+  std::string test_name = "SharedMemoryOpenCloseTest";
+
+  // Open two handles to a memory segment, confirm that they are mapped
+  // separately yet point to the same space.
+  SharedMemory memory1;
+  bool rv = memory1.Delete(test_name);
+  EXPECT_TRUE(rv);
+  rv = memory1.Delete(test_name);
+  EXPECT_TRUE(rv);
+  rv = memory1.Open(test_name, false);
+  EXPECT_FALSE(rv);
+  rv = memory1.CreateNamedDeprecated(test_name, false, kDataSize);
+  EXPECT_TRUE(rv);
+  rv = memory1.Map(kDataSize);
+  EXPECT_TRUE(rv);
+  SharedMemory memory2;
+  rv = memory2.Open(test_name, false);
+  EXPECT_TRUE(rv);
+  rv = memory2.Map(kDataSize);
+  EXPECT_TRUE(rv);
+  EXPECT_NE(memory1.memory(), memory2.memory());  // Compare the pointers.
+
+  // Make sure we don't segfault. (it actually happened!)
+  ASSERT_NE(memory1.memory(), static_cast<void*>(nullptr));
+  ASSERT_NE(memory2.memory(), static_cast<void*>(nullptr));
+
+  // Write data to the first memory segment, verify contents of second.
+  memset(memory1.memory(), '1', kDataSize);
+  EXPECT_EQ(memcmp(memory1.memory(), memory2.memory(), kDataSize), 0);
+
+  // Close the first memory segment, and verify the second has the right data.
+  memory1.Close();
+  char* start_ptr = static_cast<char*>(memory2.memory());
+  char* end_ptr = start_ptr + kDataSize;
+  for (char* ptr = start_ptr; ptr < end_ptr; ptr++)
+    EXPECT_EQ(*ptr, '1');
+
+  // Close the second memory segment.
+  memory2.Close();
+
+  rv = memory1.Delete(test_name);
+  EXPECT_TRUE(rv);
+  rv = memory2.Delete(test_name);
+  EXPECT_TRUE(rv);
+}
+
+TEST_P(SharedMemoryTest, OpenExclusive) {
+  const uint32_t kDataSize = 1024;
+  const uint32_t kDataSize2 = 2048;
+  std::ostringstream test_name_stream;
+  test_name_stream << "SharedMemoryOpenExclusiveTest."
+                   << Time::Now().ToDoubleT();
+  std::string test_name = test_name_stream.str();
+
+  // Open two handles to a memory segment and check that
+  // open_existing_deprecated works as expected.
+  SharedMemory memory1;
+  bool rv = memory1.CreateNamedDeprecated(test_name, false, kDataSize);
+  EXPECT_TRUE(rv);
+
+  // Memory1 knows it's size because it created it.
+  EXPECT_EQ(memory1.requested_size(), kDataSize);
+
+  rv = memory1.Map(kDataSize);
+  EXPECT_TRUE(rv);
+
+  // The mapped memory1 must be at least the size we asked for.
+  EXPECT_GE(memory1.mapped_size(), kDataSize);
+
+  // The mapped memory1 shouldn't exceed rounding for allocation granularity.
+  EXPECT_LT(memory1.mapped_size(),
+            kDataSize + SysInfo::VMAllocationGranularity());
+
+  memset(memory1.memory(), 'G', kDataSize);
+
+  SharedMemory memory2;
+  // Should not be able to create if openExisting is false.
+  rv = memory2.CreateNamedDeprecated(test_name, false, kDataSize2);
+  EXPECT_FALSE(rv);
+
+  // Should be able to create with openExisting true.
+  rv = memory2.CreateNamedDeprecated(test_name, true, kDataSize2);
+  EXPECT_TRUE(rv);
+
+  // Memory2 shouldn't know the size because we didn't create it.
+  EXPECT_EQ(memory2.requested_size(), 0U);
+
+  // We should be able to map the original size.
+  rv = memory2.Map(kDataSize);
+  EXPECT_TRUE(rv);
+
+  // The mapped memory2 must be at least the size of the original.
+  EXPECT_GE(memory2.mapped_size(), kDataSize);
+
+  // The mapped memory2 shouldn't exceed rounding for allocation granularity.
+  EXPECT_LT(memory2.mapped_size(),
+            kDataSize2 + SysInfo::VMAllocationGranularity());
+
+  // Verify that opening memory2 didn't truncate or delete memory 1.
+  char* start_ptr = static_cast<char*>(memory2.memory());
+  char* end_ptr = start_ptr + kDataSize;
+  for (char* ptr = start_ptr; ptr < end_ptr; ptr++) {
+    EXPECT_EQ(*ptr, 'G');
+  }
+
+  memory1.Close();
+  memory2.Close();
+
+  rv = memory1.Delete(test_name);
+  EXPECT_TRUE(rv);
+}
+#endif  // !defined(OS_ANDROID) && !defined(OS_MACOSX) && !defined(OS_FUCHSIA)
+
+// Check that memory is still mapped after its closed.
+TEST_P(SharedMemoryTest, CloseNoUnmap) {
+  const size_t kDataSize = 4096;
+
+  SharedMemory memory;
+  ASSERT_TRUE(memory.CreateAndMapAnonymous(kDataSize));
+  char* ptr = static_cast<char*>(memory.memory());
+  ASSERT_NE(ptr, static_cast<void*>(nullptr));
+  memset(ptr, 'G', kDataSize);
+
+  memory.Close();
+
+  EXPECT_EQ(ptr, memory.memory());
+  EXPECT_TRUE(!memory.handle().IsValid());
+
+  for (size_t i = 0; i < kDataSize; i++) {
+    EXPECT_EQ('G', ptr[i]);
+  }
+
+  memory.Unmap();
+  EXPECT_EQ(nullptr, memory.memory());
+}
+
+#if !defined(OS_MACOSX) && !defined(OS_FUCHSIA)
+// Create a set of N threads to each open a shared memory segment and write to
+// it. Verify that they are always reading/writing consistent data.
+TEST_P(SharedMemoryTest, MultipleThreads) {
+  const int kNumThreads = 5;
+
+  MultipleThreadMain::CleanUp();
+  // On POSIX we have a problem when 2 threads try to create the shmem
+  // (a file) at exactly the same time, since create both creates the
+  // file and zerofills it.  We solve the problem for this unit test
+  // (make it not flaky) by starting with 1 thread, then
+  // intentionally don't clean up its shmem before running with
+  // kNumThreads.
+
+  int threadcounts[] = { 1, kNumThreads };
+  for (size_t i = 0; i < arraysize(threadcounts); i++) {
+    int numthreads = threadcounts[i];
+    std::unique_ptr<PlatformThreadHandle[]> thread_handles;
+    std::unique_ptr<MultipleThreadMain* []> thread_delegates;
+
+    thread_handles.reset(new PlatformThreadHandle[numthreads]);
+    thread_delegates.reset(new MultipleThreadMain*[numthreads]);
+
+    // Spawn the threads.
+    for (int16_t index = 0; index < numthreads; index++) {
+      PlatformThreadHandle pth;
+      thread_delegates[index] = new MultipleThreadMain(index);
+      EXPECT_TRUE(PlatformThread::Create(0, thread_delegates[index], &pth));
+      thread_handles[index] = pth;
+    }
+
+    // Wait for the threads to finish.
+    for (int index = 0; index < numthreads; index++) {
+      PlatformThread::Join(thread_handles[index]);
+      delete thread_delegates[index];
+    }
+  }
+  MultipleThreadMain::CleanUp();
+}
+#endif
+
+// Allocate private (unique) shared memory with an empty string for a
+// name.  Make sure several of them don't point to the same thing as
+// we might expect if the names are equal.
+TEST_P(SharedMemoryTest, AnonymousPrivate) {
+  int i, j;
+  int count = 4;
+  bool rv;
+  const uint32_t kDataSize = 8192;
+
+  std::unique_ptr<SharedMemory[]> memories(new SharedMemory[count]);
+  std::unique_ptr<int* []> pointers(new int*[count]);
+  ASSERT_TRUE(memories.get());
+  ASSERT_TRUE(pointers.get());
+
+  for (i = 0; i < count; i++) {
+    rv = memories[i].CreateAndMapAnonymous(kDataSize);
+    EXPECT_TRUE(rv);
+    int* ptr = static_cast<int*>(memories[i].memory());
+    EXPECT_TRUE(ptr);
+    pointers[i] = ptr;
+  }
+
+  for (i = 0; i < count; i++) {
+    // zero out the first int in each except for i; for that one, make it 100.
+    for (j = 0; j < count; j++) {
+      if (i == j)
+        pointers[j][0] = 100;
+      else
+        pointers[j][0] = 0;
+    }
+    // make sure there is no bleeding of the 100 into the other pointers
+    for (j = 0; j < count; j++) {
+      if (i == j)
+        EXPECT_EQ(100, pointers[j][0]);
+      else
+        EXPECT_EQ(0, pointers[j][0]);
+    }
+  }
+
+  for (int i = 0; i < count; i++) {
+    memories[i].Close();
+  }
+}
+
+TEST_P(SharedMemoryTest, GetReadOnlyHandle) {
+  StringPiece contents = "Hello World";
+
+  SharedMemory writable_shmem;
+  SharedMemoryCreateOptions options;
+  options.size = contents.size();
+  options.share_read_only = true;
+#if defined(OS_MACOSX) && !defined(OS_IOS)
+  // The Mach functionality is tested in shared_memory_mac_unittest.cc.
+  options.type = SharedMemoryHandle::POSIX;
+#endif
+  ASSERT_TRUE(writable_shmem.Create(options));
+  ASSERT_TRUE(writable_shmem.Map(options.size));
+  memcpy(writable_shmem.memory(), contents.data(), contents.size());
+  EXPECT_TRUE(writable_shmem.Unmap());
+
+  SharedMemoryHandle readonly_handle = writable_shmem.GetReadOnlyHandle();
+  EXPECT_EQ(writable_shmem.handle().GetGUID(), readonly_handle.GetGUID());
+  EXPECT_EQ(writable_shmem.handle().GetSize(), readonly_handle.GetSize());
+  ASSERT_TRUE(readonly_handle.IsValid());
+  SharedMemory readonly_shmem(readonly_handle, /*readonly=*/true);
+
+  ASSERT_TRUE(readonly_shmem.Map(contents.size()));
+  EXPECT_EQ(contents,
+            StringPiece(static_cast<const char*>(readonly_shmem.memory()),
+                        contents.size()));
+  EXPECT_TRUE(readonly_shmem.Unmap());
+
+#if defined(OS_ANDROID)
+  // On Android, mapping a region through a read-only descriptor makes the
+  // region read-only. Any writable mapping attempt should fail.
+  ASSERT_FALSE(writable_shmem.Map(contents.size()));
+#else
+  // Make sure the writable instance is still writable.
+  ASSERT_TRUE(writable_shmem.Map(contents.size()));
+  StringPiece new_contents = "Goodbye";
+  memcpy(writable_shmem.memory(), new_contents.data(), new_contents.size());
+  EXPECT_EQ(new_contents,
+            StringPiece(static_cast<const char*>(writable_shmem.memory()),
+                        new_contents.size()));
+#endif
+
+  // We'd like to check that if we send the read-only segment to another
+  // process, then that other process can't reopen it read/write.  (Since that
+  // would be a security hole.)  Setting up multiple processes is hard in a
+  // unittest, so this test checks that the *current* process can't reopen the
+  // segment read/write.  I think the test here is stronger than we actually
+  // care about, but there's a remote possibility that sending a file over a
+  // pipe would transform it into read/write.
+  SharedMemoryHandle handle = readonly_shmem.handle();
+
+#if defined(OS_ANDROID)
+  // The "read-only" handle is still writable on Android:
+  // http://crbug.com/320865
+  (void)handle;
+#elif defined(OS_FUCHSIA)
+  uintptr_t addr;
+  EXPECT_NE(ZX_OK, zx_vmar_map(zx_vmar_root_self(), 0, handle.GetHandle(), 0,
+                               contents.size(), ZX_VM_FLAG_PERM_WRITE, &addr))
+      << "Shouldn't be able to map as writable.";
+
+  ScopedZxHandle duped_handle;
+  EXPECT_NE(ZX_OK, zx_handle_duplicate(handle.GetHandle(), ZX_RIGHT_WRITE,
+                                       duped_handle.receive()))
+      << "Shouldn't be able to duplicate the handle into a writable one.";
+
+  EXPECT_EQ(ZX_OK, zx_handle_duplicate(handle.GetHandle(), ZX_RIGHT_READ,
+                                       duped_handle.receive()))
+      << "Should be able to duplicate the handle into a readable one.";
+#elif defined(OS_POSIX)
+  int handle_fd = SharedMemory::GetFdFromSharedMemoryHandle(handle);
+  EXPECT_EQ(O_RDONLY, fcntl(handle_fd, F_GETFL) & O_ACCMODE)
+      << "The descriptor itself should be read-only.";
+
+  errno = 0;
+  void* writable = mmap(nullptr, contents.size(), PROT_READ | PROT_WRITE,
+                        MAP_SHARED, handle_fd, 0);
+  int mmap_errno = errno;
+  EXPECT_EQ(MAP_FAILED, writable)
+      << "It shouldn't be possible to re-mmap the descriptor writable.";
+  EXPECT_EQ(EACCES, mmap_errno) << strerror(mmap_errno);
+  if (writable != MAP_FAILED)
+    EXPECT_EQ(0, munmap(writable, readonly_shmem.mapped_size()));
+
+#elif defined(OS_WIN)
+  EXPECT_EQ(NULL, MapViewOfFile(handle.GetHandle(), FILE_MAP_WRITE, 0, 0, 0))
+      << "Shouldn't be able to map memory writable.";
+
+  HANDLE temp_handle;
+  BOOL rv = ::DuplicateHandle(GetCurrentProcess(), handle.GetHandle(),
+                              GetCurrentProcess(), &temp_handle,
+                              FILE_MAP_ALL_ACCESS, false, 0);
+  EXPECT_EQ(FALSE, rv)
+      << "Shouldn't be able to duplicate the handle into a writable one.";
+  if (rv)
+    win::ScopedHandle writable_handle(temp_handle);
+  rv = ::DuplicateHandle(GetCurrentProcess(), handle.GetHandle(),
+                         GetCurrentProcess(), &temp_handle, FILE_MAP_READ,
+                         false, 0);
+  EXPECT_EQ(TRUE, rv)
+      << "Should be able to duplicate the handle into a readable one.";
+  if (rv)
+    win::ScopedHandle writable_handle(temp_handle);
+#else
+#error Unexpected platform; write a test that tries to make 'handle' writable.
+#endif  // defined(OS_POSIX) || defined(OS_WIN)
+}
+
+TEST_P(SharedMemoryTest, ShareToSelf) {
+  StringPiece contents = "Hello World";
+
+  SharedMemory shmem;
+  ASSERT_TRUE(shmem.CreateAndMapAnonymous(contents.size()));
+  memcpy(shmem.memory(), contents.data(), contents.size());
+  EXPECT_TRUE(shmem.Unmap());
+
+  SharedMemoryHandle shared_handle = shmem.handle().Duplicate();
+  ASSERT_TRUE(shared_handle.IsValid());
+  EXPECT_TRUE(shared_handle.OwnershipPassesToIPC());
+  EXPECT_EQ(shared_handle.GetGUID(), shmem.handle().GetGUID());
+  EXPECT_EQ(shared_handle.GetSize(), shmem.handle().GetSize());
+  SharedMemory shared(shared_handle, /*readonly=*/false);
+
+  ASSERT_TRUE(shared.Map(contents.size()));
+  EXPECT_EQ(
+      contents,
+      StringPiece(static_cast<const char*>(shared.memory()), contents.size()));
+
+  shared_handle = shmem.handle().Duplicate();
+  ASSERT_TRUE(shared_handle.IsValid());
+  ASSERT_TRUE(shared_handle.OwnershipPassesToIPC());
+  SharedMemory readonly(shared_handle, /*readonly=*/true);
+
+  ASSERT_TRUE(readonly.Map(contents.size()));
+  EXPECT_EQ(contents,
+            StringPiece(static_cast<const char*>(readonly.memory()),
+                        contents.size()));
+}
+
+TEST_P(SharedMemoryTest, ShareWithMultipleInstances) {
+  static const StringPiece kContents = "Hello World";
+
+  SharedMemory shmem;
+  ASSERT_TRUE(shmem.CreateAndMapAnonymous(kContents.size()));
+  // We do not need to unmap |shmem| to let |shared| map.
+  const StringPiece shmem_contents(static_cast<const char*>(shmem.memory()),
+                                   shmem.requested_size());
+
+  SharedMemoryHandle shared_handle = shmem.handle().Duplicate();
+  ASSERT_TRUE(shared_handle.IsValid());
+  SharedMemory shared(shared_handle, /*readonly=*/false);
+  ASSERT_TRUE(shared.Map(kContents.size()));
+  // The underlying shared memory is created by |shmem|, so both
+  // |shared|.requested_size() and |readonly|.requested_size() are zero.
+  ASSERT_EQ(0U, shared.requested_size());
+  const StringPiece shared_contents(static_cast<const char*>(shared.memory()),
+                                    shmem.requested_size());
+
+  shared_handle = shmem.handle().Duplicate();
+  ASSERT_TRUE(shared_handle.IsValid());
+  ASSERT_TRUE(shared_handle.OwnershipPassesToIPC());
+  SharedMemory readonly(shared_handle, /*readonly=*/true);
+  ASSERT_TRUE(readonly.Map(kContents.size()));
+  ASSERT_EQ(0U, readonly.requested_size());
+  const StringPiece readonly_contents(
+      static_cast<const char*>(readonly.memory()),
+      shmem.requested_size());
+
+  // |shmem| should be able to update the content.
+  memcpy(shmem.memory(), kContents.data(), kContents.size());
+
+  ASSERT_EQ(kContents, shmem_contents);
+  ASSERT_EQ(kContents, shared_contents);
+  ASSERT_EQ(kContents, readonly_contents);
+
+  // |shared| should also be able to update the content.
+  memcpy(shared.memory(), ToLowerASCII(kContents).c_str(), kContents.size());
+
+  ASSERT_EQ(StringPiece(ToLowerASCII(kContents)), shmem_contents);
+  ASSERT_EQ(StringPiece(ToLowerASCII(kContents)), shared_contents);
+  ASSERT_EQ(StringPiece(ToLowerASCII(kContents)), readonly_contents);
+}
+
+TEST_P(SharedMemoryTest, MapAt) {
+  ASSERT_TRUE(SysInfo::VMAllocationGranularity() >= sizeof(uint32_t));
+  const size_t kCount = SysInfo::VMAllocationGranularity();
+  const size_t kDataSize = kCount * sizeof(uint32_t);
+
+  SharedMemory memory;
+  ASSERT_TRUE(memory.CreateAndMapAnonymous(kDataSize));
+  uint32_t* ptr = static_cast<uint32_t*>(memory.memory());
+  ASSERT_NE(ptr, static_cast<void*>(nullptr));
+
+  for (size_t i = 0; i < kCount; ++i) {
+    ptr[i] = i;
+  }
+
+  memory.Unmap();
+
+  off_t offset = SysInfo::VMAllocationGranularity();
+  ASSERT_TRUE(memory.MapAt(offset, kDataSize - offset));
+  offset /= sizeof(uint32_t);
+  ptr = static_cast<uint32_t*>(memory.memory());
+  ASSERT_NE(ptr, static_cast<void*>(nullptr));
+  for (size_t i = offset; i < kCount; ++i) {
+    EXPECT_EQ(ptr[i - offset], i);
+  }
+}
+
+TEST_P(SharedMemoryTest, MapTwice) {
+  const uint32_t kDataSize = 1024;
+  SharedMemory memory;
+  bool rv = memory.CreateAndMapAnonymous(kDataSize);
+  EXPECT_TRUE(rv);
+
+  void* old_address = memory.memory();
+
+  rv = memory.Map(kDataSize);
+  EXPECT_FALSE(rv);
+  EXPECT_EQ(old_address, memory.memory());
+}
+
+#if defined(OS_POSIX)
+// This test is not applicable for iOS (crbug.com/399384).
+#if !defined(OS_IOS)
+// Create a shared memory object, mmap it, and mprotect it to PROT_EXEC.
+TEST_P(SharedMemoryTest, AnonymousExecutable) {
+  const uint32_t kTestSize = 1 << 16;
+
+  SharedMemory shared_memory;
+  SharedMemoryCreateOptions options;
+  options.size = kTestSize;
+  options.executable = true;
+#if defined(OS_MACOSX) && !defined(OS_IOS)
+  // The Mach functionality is tested in shared_memory_mac_unittest.cc.
+  options.type = SharedMemoryHandle::POSIX;
+#endif
+
+  EXPECT_TRUE(shared_memory.Create(options));
+  EXPECT_TRUE(shared_memory.Map(shared_memory.requested_size()));
+
+  EXPECT_EQ(0, mprotect(shared_memory.memory(), shared_memory.requested_size(),
+                        PROT_READ | PROT_EXEC));
+}
+#endif  // !defined(OS_IOS)
+
+#if defined(OS_ANDROID)
+// This test is restricted to Android since there is no way on other platforms
+// to guarantee that a region can never be mapped with PROT_EXEC. E.g. on
+// Linux, anonymous shared regions come from /dev/shm which can be mounted
+// without 'noexec'. In this case, anything can perform an mprotect() to
+// change the protection mask of a given page.
+TEST(SharedMemoryTest, AnonymousIsNotExecutableByDefault) {
+  const uint32_t kTestSize = 1 << 16;
+
+  SharedMemory shared_memory;
+  SharedMemoryCreateOptions options;
+  options.size = kTestSize;
+
+  EXPECT_TRUE(shared_memory.Create(options));
+  EXPECT_TRUE(shared_memory.Map(shared_memory.requested_size()));
+
+  errno = 0;
+  EXPECT_EQ(-1, mprotect(shared_memory.memory(), shared_memory.requested_size(),
+                         PROT_READ | PROT_EXEC));
+  EXPECT_EQ(EACCES, errno);
+}
+#endif  // OS_ANDROID
+
+// Android supports a different permission model than POSIX for its "ashmem"
+// shared memory implementation. So the tests about file permissions are not
+// included on Android. Fuchsia does not use a file-backed shared memory
+// implementation.
+
+#if !defined(OS_ANDROID) && !defined(OS_FUCHSIA)
+
+// Set a umask and restore the old mask on destruction.
+class ScopedUmaskSetter {
+ public:
+  explicit ScopedUmaskSetter(mode_t target_mask) {
+    old_umask_ = umask(target_mask);
+  }
+  ~ScopedUmaskSetter() { umask(old_umask_); }
+ private:
+  mode_t old_umask_;
+  DISALLOW_IMPLICIT_CONSTRUCTORS(ScopedUmaskSetter);
+};
+
+// Create a shared memory object, check its permissions.
+TEST_P(SharedMemoryTest, FilePermissionsAnonymous) {
+  const uint32_t kTestSize = 1 << 8;
+
+  SharedMemory shared_memory;
+  SharedMemoryCreateOptions options;
+  options.size = kTestSize;
+#if defined(OS_MACOSX) && !defined(OS_IOS)
+  // The Mach functionality is tested in shared_memory_mac_unittest.cc.
+  options.type = SharedMemoryHandle::POSIX;
+#endif
+  // Set a file mode creation mask that gives all permissions.
+  ScopedUmaskSetter permissive_mask(S_IWGRP | S_IWOTH);
+
+  EXPECT_TRUE(shared_memory.Create(options));
+
+  int shm_fd =
+      SharedMemory::GetFdFromSharedMemoryHandle(shared_memory.handle());
+  struct stat shm_stat;
+  EXPECT_EQ(0, fstat(shm_fd, &shm_stat));
+  // Neither the group, nor others should be able to read the shared memory
+  // file.
+  EXPECT_FALSE(shm_stat.st_mode & S_IRWXO);
+  EXPECT_FALSE(shm_stat.st_mode & S_IRWXG);
+}
+
+// Create a shared memory object, check its permissions.
+TEST_P(SharedMemoryTest, FilePermissionsNamed) {
+  const uint32_t kTestSize = 1 << 8;
+
+  SharedMemory shared_memory;
+  SharedMemoryCreateOptions options;
+  options.size = kTestSize;
+#if defined(OS_MACOSX) && !defined(OS_IOS)
+  // The Mach functionality is tested in shared_memory_mac_unittest.cc.
+  options.type = SharedMemoryHandle::POSIX;
+#endif
+
+  // Set a file mode creation mask that gives all permissions.
+  ScopedUmaskSetter permissive_mask(S_IWGRP | S_IWOTH);
+
+  EXPECT_TRUE(shared_memory.Create(options));
+
+  int fd = SharedMemory::GetFdFromSharedMemoryHandle(shared_memory.handle());
+  struct stat shm_stat;
+  EXPECT_EQ(0, fstat(fd, &shm_stat));
+  // Neither the group, nor others should have been able to open the shared
+  // memory file while its name existed.
+  EXPECT_FALSE(shm_stat.st_mode & S_IRWXO);
+  EXPECT_FALSE(shm_stat.st_mode & S_IRWXG);
+}
+#endif  // !defined(OS_ANDROID) && !defined(OS_FUCHSIA)
+
+#endif  // defined(OS_POSIX)
+
+// Map() will return addresses which are aligned to the platform page size, this
+// varies from platform to platform though.  Since we'd like to advertise a
+// minimum alignment that callers can count on, test for it here.
+TEST_P(SharedMemoryTest, MapMinimumAlignment) {
+  static const int kDataSize = 8192;
+
+  SharedMemory shared_memory;
+  ASSERT_TRUE(shared_memory.CreateAndMapAnonymous(kDataSize));
+  EXPECT_EQ(0U, reinterpret_cast<uintptr_t>(
+      shared_memory.memory()) & (SharedMemory::MAP_MINIMUM_ALIGNMENT - 1));
+  shared_memory.Close();
+}
+
+#if defined(OS_WIN)
+TEST_P(SharedMemoryTest, UnsafeImageSection) {
+  const char kTestSectionName[] = "UnsafeImageSection";
+  wchar_t path[MAX_PATH];
+  EXPECT_GT(::GetModuleFileName(nullptr, path, arraysize(path)), 0U);
+
+  // Map the current executable image to save us creating a new PE file on disk.
+  base::win::ScopedHandle file_handle(::CreateFile(
+      path, GENERIC_READ, FILE_SHARE_READ, nullptr, OPEN_EXISTING, 0, nullptr));
+  EXPECT_TRUE(file_handle.IsValid());
+  base::win::ScopedHandle section_handle(
+      ::CreateFileMappingA(file_handle.Get(), nullptr,
+                           PAGE_READONLY | SEC_IMAGE, 0, 0, kTestSectionName));
+  EXPECT_TRUE(section_handle.IsValid());
+
+  // Check direct opening by name, from handle and duplicated from handle.
+  SharedMemory shared_memory_open;
+  EXPECT_TRUE(shared_memory_open.Open(kTestSectionName, true));
+  EXPECT_FALSE(shared_memory_open.Map(1));
+  EXPECT_EQ(nullptr, shared_memory_open.memory());
+
+  SharedMemory shared_memory_handle_local(
+      SharedMemoryHandle(section_handle.Take(), 1, UnguessableToken::Create()),
+      true);
+  EXPECT_FALSE(shared_memory_handle_local.Map(1));
+  EXPECT_EQ(nullptr, shared_memory_handle_local.memory());
+
+  // Check that a handle without SECTION_QUERY also can't be mapped as it can't
+  // be checked.
+  SharedMemory shared_memory_handle_dummy;
+  SharedMemoryCreateOptions options;
+  options.size = 0x1000;
+  EXPECT_TRUE(shared_memory_handle_dummy.Create(options));
+  HANDLE handle_no_query;
+  EXPECT_TRUE(::DuplicateHandle(
+      ::GetCurrentProcess(), shared_memory_handle_dummy.handle().GetHandle(),
+      ::GetCurrentProcess(), &handle_no_query, FILE_MAP_READ, FALSE, 0));
+  SharedMemory shared_memory_handle_no_query(
+      SharedMemoryHandle(handle_no_query, options.size,
+                         UnguessableToken::Create()),
+      true);
+  EXPECT_FALSE(shared_memory_handle_no_query.Map(1));
+  EXPECT_EQ(nullptr, shared_memory_handle_no_query.memory());
+}
+#endif  // defined(OS_WIN)
+
+// iOS does not allow multiple processes.
+// Android ashmem does not support named shared memory.
+// Fuchsia SharedMemory does not support named shared memory.
+// Mac SharedMemory does not support named shared memory. crbug.com/345734
+#if !defined(OS_IOS) && !defined(OS_ANDROID) && !defined(OS_MACOSX) && \
+    !defined(OS_FUCHSIA)
+// On POSIX it is especially important we test shmem across processes,
+// not just across threads.  But the test is enabled on all platforms.
+class SharedMemoryProcessTest : public MultiProcessTest {
+ public:
+  static void CleanUp() {
+    SharedMemory memory;
+    memory.Delete(s_test_name_);
+  }
+
+  static int TaskTestMain() {
+    int errors = 0;
+    SharedMemory memory;
+    bool rv = memory.CreateNamedDeprecated(s_test_name_, true, s_data_size_);
+    EXPECT_TRUE(rv);
+    if (rv != true)
+      errors++;
+    rv = memory.Map(s_data_size_);
+    EXPECT_TRUE(rv);
+    if (rv != true)
+      errors++;
+    int* ptr = static_cast<int*>(memory.memory());
+
+    // This runs concurrently in multiple processes. Writes need to be atomic.
+    subtle::Barrier_AtomicIncrement(ptr, 1);
+    memory.Close();
+    return errors;
+  }
+
+  static const char s_test_name_[];
+  static const uint32_t s_data_size_;
+};
+
+const char SharedMemoryProcessTest::s_test_name_[] = "MPMem";
+const uint32_t SharedMemoryProcessTest::s_data_size_ = 1024;
+
+TEST_F(SharedMemoryProcessTest, SharedMemoryAcrossProcesses) {
+  const int kNumTasks = 5;
+
+  SharedMemoryProcessTest::CleanUp();
+
+  // Create a shared memory region. Set the first word to 0.
+  SharedMemory memory;
+  bool rv = memory.CreateNamedDeprecated(s_test_name_, true, s_data_size_);
+  ASSERT_TRUE(rv);
+  rv = memory.Map(s_data_size_);
+  ASSERT_TRUE(rv);
+  int* ptr = static_cast<int*>(memory.memory());
+  *ptr = 0;
+
+  // Start |kNumTasks| processes, each of which atomically increments the first
+  // word by 1.
+  Process processes[kNumTasks];
+  for (int index = 0; index < kNumTasks; ++index) {
+    processes[index] = SpawnChild("SharedMemoryTestMain");
+    ASSERT_TRUE(processes[index].IsValid());
+  }
+
+  // Check that each process exited correctly.
+  int exit_code = 0;
+  for (int index = 0; index < kNumTasks; ++index) {
+    EXPECT_TRUE(processes[index].WaitForExit(&exit_code));
+    EXPECT_EQ(0, exit_code);
+  }
+
+  // Check that the shared memory region reflects |kNumTasks| increments.
+  ASSERT_EQ(kNumTasks, *ptr);
+
+  memory.Close();
+  SharedMemoryProcessTest::CleanUp();
+}
+
+MULTIPROCESS_TEST_MAIN(SharedMemoryTestMain) {
+  return SharedMemoryProcessTest::TaskTestMain();
+}
+#endif  // !defined(OS_IOS) && !defined(OS_ANDROID) && !defined(OS_MACOSX) &&
+        // !defined(OS_FUCHSIA)
+
+TEST_P(SharedMemoryTest, MappedId) {
+  const uint32_t kDataSize = 1024;
+  SharedMemory memory;
+  SharedMemoryCreateOptions options;
+  options.size = kDataSize;
+#if defined(OS_MACOSX) && !defined(OS_IOS)
+  // The Mach functionality is tested in shared_memory_mac_unittest.cc.
+  options.type = SharedMemoryHandle::POSIX;
+#endif
+
+  EXPECT_TRUE(memory.Create(options));
+  base::UnguessableToken id = memory.handle().GetGUID();
+  EXPECT_FALSE(id.is_empty());
+  EXPECT_TRUE(memory.mapped_id().is_empty());
+
+  EXPECT_TRUE(memory.Map(kDataSize));
+  EXPECT_EQ(id, memory.mapped_id());
+
+  memory.Close();
+  EXPECT_EQ(id, memory.mapped_id());
+
+  memory.Unmap();
+  EXPECT_TRUE(memory.mapped_id().is_empty());
+}
+
+INSTANTIATE_TEST_CASE_P(Default,
+                        SharedMemoryTest,
+                        ::testing::Values(Mode::Default));
+#if defined(OS_LINUX) && !defined(OS_CHROMEOS)
+INSTANTIATE_TEST_CASE_P(SkipDevShm,
+                        SharedMemoryTest,
+                        ::testing::Values(Mode::DisableDevShm));
+#endif  // defined(OS_LINUX) && !defined(OS_CHROMEOS)
+
+#if defined(OS_ANDROID)
+TEST(SharedMemoryTest, ReadOnlyRegions) {
+  const uint32_t kDataSize = 1024;
+  SharedMemory memory;
+  SharedMemoryCreateOptions options;
+  options.size = kDataSize;
+  EXPECT_TRUE(memory.Create(options));
+
+  EXPECT_FALSE(memory.handle().IsRegionReadOnly());
+
+  // Check that it is possible to map the region directly from the fd.
+  int region_fd = memory.handle().GetHandle();
+  EXPECT_GE(region_fd, 0);
+  void* address = mmap(nullptr, kDataSize, PROT_READ | PROT_WRITE, MAP_SHARED,
+                       region_fd, 0);
+  bool success = address && address != MAP_FAILED;
+  ASSERT_TRUE(address);
+  ASSERT_NE(address, MAP_FAILED);
+  if (success) {
+    EXPECT_EQ(0, munmap(address, kDataSize));
+  }
+
+  ASSERT_TRUE(memory.handle().SetRegionReadOnly());
+  EXPECT_TRUE(memory.handle().IsRegionReadOnly());
+
+  // Check that it is no longer possible to map the region read/write.
+  errno = 0;
+  address = mmap(nullptr, kDataSize, PROT_READ | PROT_WRITE, MAP_SHARED,
+                 region_fd, 0);
+  success = address && address != MAP_FAILED;
+  ASSERT_FALSE(success);
+  ASSERT_EQ(EPERM, errno);
+  if (success) {
+    EXPECT_EQ(0, munmap(address, kDataSize));
+  }
+}
+
+TEST(SharedMemoryTest, ReadOnlyDescriptors) {
+  const uint32_t kDataSize = 1024;
+  SharedMemory memory;
+  SharedMemoryCreateOptions options;
+  options.size = kDataSize;
+  EXPECT_TRUE(memory.Create(options));
+
+  EXPECT_FALSE(memory.handle().IsRegionReadOnly());
+
+  // Getting a read-only descriptor should not make the region read-only itself.
+  SharedMemoryHandle ro_handle = memory.GetReadOnlyHandle();
+  EXPECT_FALSE(memory.handle().IsRegionReadOnly());
+
+  // Mapping a writable region from a read-only descriptor should not
+  // be possible, it will DCHECK() in debug builds (see test below),
+  // while returning false on release ones.
+  {
+    bool dcheck_fired = false;
+    logging::ScopedLogAssertHandler log_assert(
+        base::BindRepeating([](bool* flag, const char*, int, base::StringPiece,
+                               base::StringPiece) { *flag = true; },
+                            base::Unretained(&dcheck_fired)));
+
+    SharedMemory rw_region(ro_handle.Duplicate(), /* read_only */ false);
+    EXPECT_FALSE(rw_region.Map(kDataSize));
+    EXPECT_EQ(DCHECK_IS_ON() ? true : false, dcheck_fired);
+  }
+
+  // Nor shall it turn the region read-only itself.
+  EXPECT_FALSE(ro_handle.IsRegionReadOnly());
+
+  // Mapping a read-only region from a read-only descriptor should work.
+  SharedMemory ro_region(ro_handle.Duplicate(), /* read_only */ true);
+  EXPECT_TRUE(ro_region.Map(kDataSize));
+
+  // And it should turn the region read-only too.
+  EXPECT_TRUE(ro_handle.IsRegionReadOnly());
+  EXPECT_TRUE(memory.handle().IsRegionReadOnly());
+  EXPECT_FALSE(memory.Map(kDataSize));
+
+  ro_handle.Close();
+}
+
+#endif  // OS_ANDROID
+
+}  // namespace base
diff --git a/base/memory/shared_memory_win.cc b/base/memory/shared_memory_win.cc
new file mode 100644
index 0000000..cf06dd3
--- /dev/null
+++ b/base/memory/shared_memory_win.cc
@@ -0,0 +1,381 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/memory/shared_memory.h"
+
+#include <aclapi.h>
+#include <stddef.h>
+#include <stdint.h>
+
+#include "base/allocator/partition_allocator/page_allocator.h"
+#include "base/logging.h"
+#include "base/memory/shared_memory_tracker.h"
+#include "base/metrics/histogram_functions.h"
+#include "base/metrics/histogram_macros.h"
+#include "base/rand_util.h"
+#include "base/strings/stringprintf.h"
+#include "base/strings/utf_string_conversions.h"
+#include "base/unguessable_token.h"
+
+namespace base {
+namespace {
+
+// Errors that can occur during Shared Memory construction.
+// These match tools/metrics/histograms/histograms.xml.
+// This enum is append-only.
+enum CreateError {
+  SUCCESS = 0,
+  SIZE_ZERO = 1,
+  SIZE_TOO_LARGE = 2,
+  INITIALIZE_ACL_FAILURE = 3,
+  INITIALIZE_SECURITY_DESC_FAILURE = 4,
+  SET_SECURITY_DESC_FAILURE = 5,
+  CREATE_FILE_MAPPING_FAILURE = 6,
+  REDUCE_PERMISSIONS_FAILURE = 7,
+  ALREADY_EXISTS = 8,
+  CREATE_ERROR_LAST = ALREADY_EXISTS
+};
+
+// Emits UMA metrics about encountered errors. Pass zero (0) for |winerror|
+// if there is no associated Windows error.
+void LogError(CreateError error, DWORD winerror) {
+  UMA_HISTOGRAM_ENUMERATION("SharedMemory.CreateError", error,
+                            CREATE_ERROR_LAST + 1);
+  static_assert(ERROR_SUCCESS == 0, "Windows error code changed!");
+  if (winerror != ERROR_SUCCESS)
+    UmaHistogramSparse("SharedMemory.CreateWinError", winerror);
+}
+
+typedef enum _SECTION_INFORMATION_CLASS {
+  SectionBasicInformation,
+} SECTION_INFORMATION_CLASS;
+
+typedef struct _SECTION_BASIC_INFORMATION {
+  PVOID BaseAddress;
+  ULONG Attributes;
+  LARGE_INTEGER Size;
+} SECTION_BASIC_INFORMATION, *PSECTION_BASIC_INFORMATION;
+
+typedef ULONG(__stdcall* NtQuerySectionType)(
+    HANDLE SectionHandle,
+    SECTION_INFORMATION_CLASS SectionInformationClass,
+    PVOID SectionInformation,
+    ULONG SectionInformationLength,
+    PULONG ResultLength);
+
+// Returns the length of the memory section starting at the supplied address.
+size_t GetMemorySectionSize(void* address) {
+  MEMORY_BASIC_INFORMATION memory_info;
+  if (!::VirtualQuery(address, &memory_info, sizeof(memory_info)))
+    return 0;
+  return memory_info.RegionSize - (static_cast<char*>(address) -
+         static_cast<char*>(memory_info.AllocationBase));
+}
+
+// Checks if the section object is safe to map. At the moment this just means
+// it's not an image section.
+bool IsSectionSafeToMap(HANDLE handle) {
+  static NtQuerySectionType nt_query_section_func;
+  if (!nt_query_section_func) {
+    nt_query_section_func = reinterpret_cast<NtQuerySectionType>(
+        ::GetProcAddress(::GetModuleHandle(L"ntdll.dll"), "NtQuerySection"));
+    DCHECK(nt_query_section_func);
+  }
+
+  // The handle must have SECTION_QUERY access for this to succeed.
+  SECTION_BASIC_INFORMATION basic_information = {};
+  ULONG status =
+      nt_query_section_func(handle, SectionBasicInformation, &basic_information,
+                            sizeof(basic_information), nullptr);
+  if (status)
+    return false;
+  return (basic_information.Attributes & SEC_IMAGE) != SEC_IMAGE;
+}
+
+// Returns a HANDLE on success and |nullptr| on failure.
+// This function is similar to CreateFileMapping, but removes the permissions
+// WRITE_DAC, WRITE_OWNER, READ_CONTROL, and DELETE.
+//
+// A newly created file mapping has two sets of permissions. It has access
+// control permissions (WRITE_DAC, WRITE_OWNER, READ_CONTROL, and DELETE) and
+// file permissions (FILE_MAP_READ, FILE_MAP_WRITE, etc.). ::DuplicateHandle()
+// with the parameter DUPLICATE_SAME_ACCESS copies both sets of permissions.
+//
+// The Chrome sandbox prevents HANDLEs with the WRITE_DAC permission from being
+// duplicated into unprivileged processes. But the only way to copy file
+// permissions is with the parameter DUPLICATE_SAME_ACCESS. This means that
+// there is no way for a privileged process to duplicate a file mapping into an
+// unprivileged process while maintaining the previous file permissions.
+//
+// By removing all access control permissions of a file mapping immediately
+// after creation, ::DuplicateHandle() effectively only copies the file
+// permissions.
+HANDLE CreateFileMappingWithReducedPermissions(SECURITY_ATTRIBUTES* sa,
+                                               size_t rounded_size,
+                                               LPCWSTR name) {
+  HANDLE h = CreateFileMapping(INVALID_HANDLE_VALUE, sa, PAGE_READWRITE, 0,
+                               static_cast<DWORD>(rounded_size), name);
+  if (!h) {
+    LogError(CREATE_FILE_MAPPING_FAILURE, GetLastError());
+    return nullptr;
+  }
+
+  HANDLE h2;
+  BOOL success = ::DuplicateHandle(
+      GetCurrentProcess(), h, GetCurrentProcess(), &h2,
+      FILE_MAP_READ | FILE_MAP_WRITE | SECTION_QUERY, FALSE, 0);
+  BOOL rv = ::CloseHandle(h);
+  DCHECK(rv);
+
+  if (!success) {
+    LogError(REDUCE_PERMISSIONS_FAILURE, GetLastError());
+    return nullptr;
+  }
+
+  return h2;
+}
+
+}  // namespace.
+
+SharedMemory::SharedMemory() {}
+
+SharedMemory::SharedMemory(const string16& name) : name_(name) {}
+
+SharedMemory::SharedMemory(const SharedMemoryHandle& handle, bool read_only)
+    : external_section_(true), shm_(handle), read_only_(read_only) {}
+
+SharedMemory::~SharedMemory() {
+  Unmap();
+  Close();
+}
+
+// static
+bool SharedMemory::IsHandleValid(const SharedMemoryHandle& handle) {
+  return handle.IsValid();
+}
+
+// static
+void SharedMemory::CloseHandle(const SharedMemoryHandle& handle) {
+  handle.Close();
+}
+
+// static
+size_t SharedMemory::GetHandleLimit() {
+  // Rounded down from value reported here:
+  // http://blogs.technet.com/b/markrussinovich/archive/2009/09/29/3283844.aspx
+  return static_cast<size_t>(1 << 23);
+}
+
+// static
+SharedMemoryHandle SharedMemory::DuplicateHandle(
+    const SharedMemoryHandle& handle) {
+  return handle.Duplicate();
+}
+
+bool SharedMemory::CreateAndMapAnonymous(size_t size) {
+  return CreateAnonymous(size) && Map(size);
+}
+
+bool SharedMemory::Create(const SharedMemoryCreateOptions& options) {
+  // TODO(crbug.com/210609): NaCl forces us to round up 64k here, wasting 32k
+  // per mapping on average.
+  static const size_t kSectionMask = 65536 - 1;
+  DCHECK(!options.executable);
+  DCHECK(!shm_.IsValid());
+  if (options.size == 0) {
+    LogError(SIZE_ZERO, 0);
+    return false;
+  }
+
+  // Check maximum accounting for overflow.
+  if (options.size >
+      static_cast<size_t>(std::numeric_limits<int>::max()) - kSectionMask) {
+    LogError(SIZE_TOO_LARGE, 0);
+    return false;
+  }
+
+  size_t rounded_size = (options.size + kSectionMask) & ~kSectionMask;
+  name_ = options.name_deprecated ?
+      ASCIIToUTF16(*options.name_deprecated) : L"";
+  SECURITY_ATTRIBUTES sa = {sizeof(sa), nullptr, FALSE};
+  SECURITY_DESCRIPTOR sd;
+  ACL dacl;
+
+  if (name_.empty()) {
+    // Add an empty DACL to enforce anonymous read-only sections.
+    sa.lpSecurityDescriptor = &sd;
+    if (!InitializeAcl(&dacl, sizeof(dacl), ACL_REVISION)) {
+      LogError(INITIALIZE_ACL_FAILURE, GetLastError());
+      return false;
+    }
+    if (!InitializeSecurityDescriptor(&sd, SECURITY_DESCRIPTOR_REVISION)) {
+      LogError(INITIALIZE_SECURITY_DESC_FAILURE, GetLastError());
+      return false;
+    }
+    if (!SetSecurityDescriptorDacl(&sd, TRUE, &dacl, FALSE)) {
+      LogError(SET_SECURITY_DESC_FAILURE, GetLastError());
+      return false;
+    }
+
+    // Windows ignores DACLs on certain unnamed objects (like shared sections).
+    // So, we generate a random name when we need to enforce read-only.
+    uint64_t rand_values[4];
+    RandBytes(&rand_values, sizeof(rand_values));
+    name_ = StringPrintf(L"CrSharedMem_%016llx%016llx%016llx%016llx",
+                         rand_values[0], rand_values[1],
+                         rand_values[2], rand_values[3]);
+  }
+  DCHECK(!name_.empty());
+  shm_ = SharedMemoryHandle(
+      CreateFileMappingWithReducedPermissions(&sa, rounded_size, name_.c_str()),
+      rounded_size, UnguessableToken::Create());
+  if (!shm_.IsValid()) {
+    // The error is logged within CreateFileMappingWithReducedPermissions().
+    return false;
+  }
+
+  requested_size_ = options.size;
+
+  // Check if the shared memory pre-exists.
+  if (GetLastError() == ERROR_ALREADY_EXISTS) {
+    // If the file already existed, set requested_size_ to 0 to show that
+    // we don't know the size.
+    requested_size_ = 0;
+    external_section_ = true;
+    if (!options.open_existing_deprecated) {
+      Close();
+      // From "if" above: GetLastError() == ERROR_ALREADY_EXISTS.
+      LogError(ALREADY_EXISTS, ERROR_ALREADY_EXISTS);
+      return false;
+    }
+  }
+
+  LogError(SUCCESS, ERROR_SUCCESS);
+  return true;
+}
+
+bool SharedMemory::Delete(const std::string& name) {
+  // intentionally empty -- there is nothing for us to do on Windows.
+  return true;
+}
+
+bool SharedMemory::Open(const std::string& name, bool read_only) {
+  DCHECK(!shm_.IsValid());
+  DWORD access = FILE_MAP_READ | SECTION_QUERY;
+  if (!read_only)
+    access |= FILE_MAP_WRITE;
+  name_ = ASCIIToUTF16(name);
+  read_only_ = read_only;
+
+  // This form of sharing shared memory is deprecated. https://crbug.com/345734.
+  // However, we can't get rid of it without a significant refactor because its
+  // used to communicate between two versions of the same service process, very
+  // early in the life cycle.
+  // Technically, we should also pass the GUID from the original shared memory
+  // region. We don't do that - this means that we will overcount this memory,
+  // which thankfully isn't relevant since Chrome only communicates with a
+  // single version of the service process.
+  // We pass the size |0|, which is a dummy size and wrong, but otherwise
+  // harmless.
+  shm_ = SharedMemoryHandle(
+      OpenFileMapping(access, false, name_.empty() ? nullptr : name_.c_str()),
+      0u, UnguessableToken::Create());
+  if (!shm_.IsValid())
+    return false;
+  // If a name specified assume it's an external section.
+  if (!name_.empty())
+    external_section_ = true;
+  // Note: size_ is not set in this case.
+  return true;
+}
+
+bool SharedMemory::MapAt(off_t offset, size_t bytes) {
+  if (!shm_.IsValid()) {
+    DLOG(ERROR) << "Invalid SharedMemoryHandle.";
+    return false;
+  }
+
+  if (bytes > static_cast<size_t>(std::numeric_limits<int>::max())) {
+    DLOG(ERROR) << "Bytes required exceeds the 2G limitation.";
+    return false;
+  }
+
+  if (memory_) {
+    DLOG(ERROR) << "The SharedMemory has been mapped already.";
+    return false;
+  }
+
+  if (external_section_ && !IsSectionSafeToMap(shm_.GetHandle())) {
+    DLOG(ERROR) << "SharedMemoryHandle is not safe to be mapped.";
+    return false;
+  }
+
+  // Try to map the shared memory. On the first failure, release any reserved
+  // address space for a single retry.
+  for (int i = 0; i < 2; ++i) {
+    memory_ = MapViewOfFile(
+        shm_.GetHandle(),
+        read_only_ ? FILE_MAP_READ : FILE_MAP_READ | FILE_MAP_WRITE,
+        static_cast<uint64_t>(offset) >> 32, static_cast<DWORD>(offset), bytes);
+    if (memory_)
+      break;
+    ReleaseReservation();
+  }
+  if (!memory_) {
+    DPLOG(ERROR) << "Failed executing MapViewOfFile";
+    return false;
+  }
+
+  DCHECK_EQ(0U, reinterpret_cast<uintptr_t>(memory_) &
+                    (SharedMemory::MAP_MINIMUM_ALIGNMENT - 1));
+  mapped_size_ = GetMemorySectionSize(memory_);
+  mapped_id_ = shm_.GetGUID();
+  SharedMemoryTracker::GetInstance()->IncrementMemoryUsage(*this);
+  return true;
+}
+
+bool SharedMemory::Unmap() {
+  if (!memory_)
+    return false;
+
+  SharedMemoryTracker::GetInstance()->DecrementMemoryUsage(*this);
+  UnmapViewOfFile(memory_);
+  memory_ = nullptr;
+  mapped_id_ = UnguessableToken();
+  return true;
+}
+
+SharedMemoryHandle SharedMemory::GetReadOnlyHandle() const {
+  HANDLE result;
+  ProcessHandle process = GetCurrentProcess();
+  if (!::DuplicateHandle(process, shm_.GetHandle(), process, &result,
+                         FILE_MAP_READ | SECTION_QUERY, FALSE, 0)) {
+    return SharedMemoryHandle();
+  }
+  SharedMemoryHandle handle =
+      SharedMemoryHandle(result, shm_.GetSize(), shm_.GetGUID());
+  handle.SetOwnershipPassesToIPC(true);
+  return handle;
+}
+
+void SharedMemory::Close() {
+  if (shm_.IsValid()) {
+    shm_.Close();
+    shm_ = SharedMemoryHandle();
+  }
+}
+
+SharedMemoryHandle SharedMemory::handle() const {
+  return shm_;
+}
+
+SharedMemoryHandle SharedMemory::TakeHandle() {
+  SharedMemoryHandle handle(shm_);
+  handle.SetOwnershipPassesToIPC(true);
+  Unmap();
+  shm_ = SharedMemoryHandle();
+  return handle;
+}
+
+}  // namespace base
diff --git a/base/memory/shared_memory_win_unittest.cc b/base/memory/shared_memory_win_unittest.cc
new file mode 100644
index 0000000..5fc132d
--- /dev/null
+++ b/base/memory/shared_memory_win_unittest.cc
@@ -0,0 +1,224 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <windows.h>
+#include <sddl.h>
+
+#include <memory>
+
+#include "base/command_line.h"
+#include "base/memory/free_deleter.h"
+#include "base/memory/shared_memory.h"
+#include "base/process/process.h"
+#include "base/rand_util.h"
+#include "base/strings/stringprintf.h"
+#include "base/strings/sys_string_conversions.h"
+#include "base/test/multiprocess_test.h"
+#include "base/test/test_timeouts.h"
+#include "base/win/scoped_handle.h"
+#include "base/win/win_util.h"
+#include "testing/multiprocess_func_list.h"
+
+namespace base {
+namespace {
+const char* kHandleSwitchName = "shared_memory_win_test_switch";
+
+// Creates a process token with a low integrity SID.
+win::ScopedHandle CreateLowIntegritySID() {
+  HANDLE process_token_raw = nullptr;
+  BOOL success = ::OpenProcessToken(GetCurrentProcess(),
+                                    TOKEN_DUPLICATE | TOKEN_ADJUST_DEFAULT |
+                                        TOKEN_QUERY | TOKEN_ASSIGN_PRIMARY,
+                                    &process_token_raw);
+  if (!success)
+    return base::win::ScopedHandle();
+  win::ScopedHandle process_token(process_token_raw);
+
+  HANDLE lowered_process_token_raw = nullptr;
+  success =
+      ::DuplicateTokenEx(process_token.Get(), 0, NULL, SecurityImpersonation,
+                         TokenPrimary, &lowered_process_token_raw);
+  if (!success)
+    return base::win::ScopedHandle();
+  win::ScopedHandle lowered_process_token(lowered_process_token_raw);
+
+  // Low integrity SID
+  WCHAR integrity_sid_string[20] = L"S-1-16-4096";
+  PSID integrity_sid = nullptr;
+  success = ::ConvertStringSidToSid(integrity_sid_string, &integrity_sid);
+  if (!success)
+    return base::win::ScopedHandle();
+
+  TOKEN_MANDATORY_LABEL TIL = {};
+  TIL.Label.Attributes = SE_GROUP_INTEGRITY;
+  TIL.Label.Sid = integrity_sid;
+  success = ::SetTokenInformation(
+      lowered_process_token.Get(), TokenIntegrityLevel, &TIL,
+      sizeof(TOKEN_MANDATORY_LABEL) + GetLengthSid(integrity_sid));
+  if (!success)
+    return base::win::ScopedHandle();
+  return lowered_process_token;
+}
+
+// Reads a HANDLE from the pipe as a raw int, least significant digit first.
+win::ScopedHandle ReadHandleFromPipe(HANDLE pipe) {
+  // Read from parent pipe.
+  const size_t buf_size = 1000;
+  char buffer[buf_size];
+  memset(buffer, 0, buf_size);
+  DWORD bytes_read;
+  BOOL success = ReadFile(pipe, buffer, buf_size, &bytes_read, NULL);
+
+  if (!success || bytes_read == 0) {
+    LOG(ERROR) << "Failed to read handle from pipe.";
+    return win::ScopedHandle();
+  }
+
+  int handle_as_int = 0;
+  int power_of_ten = 1;
+  for (unsigned int i = 0; i < bytes_read; ++i) {
+    handle_as_int += buffer[i] * power_of_ten;
+    power_of_ten *= 10;
+  }
+
+  return win::ScopedHandle(reinterpret_cast<HANDLE>(handle_as_int));
+}
+
+// Writes a HANDLE to a pipe as a raw int, least significant digit first.
+void WriteHandleToPipe(HANDLE pipe, HANDLE handle) {
+  uint32_t handle_as_int = base::win::HandleToUint32(handle);
+
+  std::unique_ptr<char, base::FreeDeleter> buffer(
+      static_cast<char*>(malloc(1000)));
+  size_t index = 0;
+  while (handle_as_int > 0) {
+    buffer.get()[index] = handle_as_int % 10;
+    handle_as_int /= 10;
+    ++index;
+  }
+
+  ::ConnectNamedPipe(pipe, nullptr);
+  DWORD written;
+  ASSERT_TRUE(::WriteFile(pipe, buffer.get(), index, &written, NULL));
+}
+
+// Creates a communication pipe with the given name.
+win::ScopedHandle CreateCommunicationPipe(const std::wstring& name) {
+  return win::ScopedHandle(CreateNamedPipe(name.c_str(),  // pipe name
+                                           PIPE_ACCESS_DUPLEX, PIPE_WAIT, 255,
+                                           1000, 1000, 0, NULL));
+}
+
+// Generates a random name for a communication pipe.
+std::wstring CreateCommunicationPipeName() {
+  uint64_t rand_values[4];
+  RandBytes(&rand_values, sizeof(rand_values));
+  std::wstring child_pipe_name = StringPrintf(
+      L"\\\\.\\pipe\\SharedMemoryWinTest_%016llx%016llx%016llx%016llx",
+      rand_values[0], rand_values[1], rand_values[2], rand_values[3]);
+  return child_pipe_name;
+}
+
+class SharedMemoryWinTest : public base::MultiProcessTest {
+ protected:
+  CommandLine MakeCmdLine(const std::string& procname) override {
+    CommandLine line = base::MultiProcessTest::MakeCmdLine(procname);
+    line.AppendSwitchASCII(kHandleSwitchName, communication_pipe_name_);
+    return line;
+  }
+
+  std::string communication_pipe_name_;
+};
+
+MULTIPROCESS_TEST_MAIN(LowerPermissions) {
+  std::string handle_name =
+      CommandLine::ForCurrentProcess()->GetSwitchValueASCII(kHandleSwitchName);
+  std::wstring handle_name16 = SysUTF8ToWide(handle_name);
+  win::ScopedHandle parent_pipe(
+      ::CreateFile(handle_name16.c_str(),  // pipe name
+                   GENERIC_READ,
+                   0,              // no sharing
+                   NULL,           // default security attributes
+                   OPEN_EXISTING,  // opens existing pipe
+                   0,              // default attributes
+                   NULL));         // no template file
+  if (parent_pipe.Get() == INVALID_HANDLE_VALUE) {
+    LOG(ERROR) << "Failed to open communication pipe.";
+    return 1;
+  }
+
+  win::ScopedHandle received_handle = ReadHandleFromPipe(parent_pipe.Get());
+  if (!received_handle.Get()) {
+    LOG(ERROR) << "Failed to read handle from pipe.";
+    return 1;
+  }
+
+  // Attempting to add the WRITE_DAC permission should fail.
+  HANDLE duped_handle;
+  BOOL success = ::DuplicateHandle(GetCurrentProcess(), received_handle.Get(),
+                                   GetCurrentProcess(), &duped_handle,
+                                   FILE_MAP_READ | WRITE_DAC, FALSE, 0);
+  if (success) {
+    LOG(ERROR) << "Should not have been able to add WRITE_DAC permission.";
+    return 1;
+  }
+
+  // Attempting to add the FILE_MAP_WRITE permission should fail.
+  success = ::DuplicateHandle(GetCurrentProcess(), received_handle.Get(),
+                              GetCurrentProcess(), &duped_handle,
+                              FILE_MAP_READ | FILE_MAP_WRITE, FALSE, 0);
+  if (success) {
+    LOG(ERROR) << "Should not have been able to add FILE_MAP_WRITE permission.";
+    return 1;
+  }
+
+  // Attempting to duplicate the HANDLE with the same permissions should
+  // succeed.
+  success = ::DuplicateHandle(GetCurrentProcess(), received_handle.Get(),
+                              GetCurrentProcess(), &duped_handle, FILE_MAP_READ,
+                              FALSE, 0);
+  if (!success) {
+    LOG(ERROR) << "Failed to duplicate handle.";
+    return 4;
+  }
+  ::CloseHandle(duped_handle);
+  return 0;
+}
+
+TEST_F(SharedMemoryWinTest, LowerPermissions) {
+  std::wstring communication_pipe_name = CreateCommunicationPipeName();
+  communication_pipe_name_ = SysWideToUTF8(communication_pipe_name);
+
+  win::ScopedHandle communication_pipe =
+      CreateCommunicationPipe(communication_pipe_name);
+  ASSERT_TRUE(communication_pipe.Get());
+
+  win::ScopedHandle lowered_process_token = CreateLowIntegritySID();
+  ASSERT_TRUE(lowered_process_token.Get());
+
+  base::LaunchOptions options;
+  options.as_user = lowered_process_token.Get();
+  base::Process process = SpawnChildWithOptions("LowerPermissions", options);
+  ASSERT_TRUE(process.IsValid());
+
+  SharedMemory memory;
+  memory.CreateAndMapAnonymous(1001);
+
+  // Duplicate into child process, giving only FILE_MAP_READ permissions.
+  HANDLE raw_handle = nullptr;
+  ::DuplicateHandle(::GetCurrentProcess(), memory.handle().GetHandle(),
+                    process.Handle(), &raw_handle,
+                    FILE_MAP_READ | SECTION_QUERY, FALSE, 0);
+  ASSERT_TRUE(raw_handle);
+
+  WriteHandleToPipe(communication_pipe.Get(), raw_handle);
+
+  int exit_code;
+  EXPECT_TRUE(process.WaitForExitWithTimeout(TestTimeouts::action_max_timeout(),
+                                             &exit_code));
+  EXPECT_EQ(0, exit_code);
+}
+
+}  // namespace
+}  // namespace base
diff --git a/base/memory/singleton.h b/base/memory/singleton.h
new file mode 100644
index 0000000..880ef0a
--- /dev/null
+++ b/base/memory/singleton.h
@@ -0,0 +1,262 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+// PLEASE READ: Do you really need a singleton? If possible, use a
+// function-local static of type base::NoDestructor<T> instead:
+//
+// Factory& Factory::GetInstance() {
+//   static base::NoDestructor<Factory> instance;
+//   return *instance;
+// }
+// !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+//
+// Singletons make it hard to determine the lifetime of an object, which can
+// lead to buggy code and spurious crashes.
+//
+// Instead of adding another singleton into the mix, try to identify either:
+//   a) An existing singleton that can manage your object's lifetime
+//   b) Locations where you can deterministically create the object and pass
+//      into other objects
+//
+// If you absolutely need a singleton, please keep them as trivial as possible
+// and ideally a leaf dependency. Singletons get problematic when they attempt
+// to do too much in their destructor or have circular dependencies.
+
+#ifndef BASE_MEMORY_SINGLETON_H_
+#define BASE_MEMORY_SINGLETON_H_
+
+#include "base/at_exit.h"
+#include "base/atomicops.h"
+#include "base/base_export.h"
+#include "base/lazy_instance_helpers.h"
+#include "base/logging.h"
+#include "base/macros.h"
+#include "base/threading/thread_restrictions.h"
+
+namespace base {
+
+// Default traits for Singleton<Type>. Calls operator new and operator delete on
+// the object. Registers automatic deletion at process exit.
+// Overload if you need arguments or another memory allocation function.
+template<typename Type>
+struct DefaultSingletonTraits {
+  // Allocates the object.
+  static Type* New() {
+    // The parenthesis is very important here; it forces POD type
+    // initialization.
+    return new Type();
+  }
+
+  // Destroys the object.
+  static void Delete(Type* x) {
+    delete x;
+  }
+
+  // Set to true to automatically register deletion of the object on process
+  // exit. See below for the required call that makes this happen.
+  static const bool kRegisterAtExit = true;
+
+#if DCHECK_IS_ON()
+  // Set to false to disallow access on a non-joinable thread.  This is
+  // different from kRegisterAtExit because StaticMemorySingletonTraits allows
+  // access on non-joinable threads, and gracefully handles this.
+  static const bool kAllowedToAccessOnNonjoinableThread = false;
+#endif
+};
+
+
+// Alternate traits for use with the Singleton<Type>.  Identical to
+// DefaultSingletonTraits except that the Singleton will not be cleaned up
+// at exit.
+template<typename Type>
+struct LeakySingletonTraits : public DefaultSingletonTraits<Type> {
+  static const bool kRegisterAtExit = false;
+#if DCHECK_IS_ON()
+  static const bool kAllowedToAccessOnNonjoinableThread = true;
+#endif
+};
+
+// Alternate traits for use with the Singleton<Type>.  Allocates memory
+// for the singleton instance from a static buffer.  The singleton will
+// be cleaned up at exit, but can't be revived after destruction unless
+// the ResurrectForTesting() method is called.
+//
+// This is useful for a certain category of things, notably logging and
+// tracing, where the singleton instance is of a type carefully constructed to
+// be safe to access post-destruction.
+// In logging and tracing you'll typically get stray calls at odd times, like
+// during static destruction, thread teardown and the like, and there's a
+// termination race on the heap-based singleton - e.g. if one thread calls
+// get(), but then another thread initiates AtExit processing, the first thread
+// may call into an object residing in unallocated memory. If the instance is
+// allocated from the data segment, then this is survivable.
+//
+// The destructor is to deallocate system resources, in this case to unregister
+// a callback the system will invoke when logging levels change. Note that
+// this is also used in e.g. Chrome Frame, where you have to allow for the
+// possibility of loading briefly into someone else's process space, and
+// so leaking is not an option, as that would sabotage the state of your host
+// process once you've unloaded.
+template <typename Type>
+struct StaticMemorySingletonTraits {
+  // WARNING: User has to support a New() which returns null.
+  static Type* New() {
+    // Only constructs once and returns pointer; otherwise returns null.
+    if (subtle::NoBarrier_AtomicExchange(&dead_, 1))
+      return nullptr;
+
+    return new (buffer_) Type();
+  }
+
+  static void Delete(Type* p) {
+    if (p)
+      p->Type::~Type();
+  }
+
+  static const bool kRegisterAtExit = true;
+
+#if DCHECK_IS_ON()
+  static const bool kAllowedToAccessOnNonjoinableThread = true;
+#endif
+
+  static void ResurrectForTesting() { subtle::NoBarrier_Store(&dead_, 0); }
+
+ private:
+  alignas(Type) static char buffer_[sizeof(Type)];
+  // Signal the object was already deleted, so it is not revived.
+  static subtle::Atomic32 dead_;
+};
+
+template <typename Type>
+alignas(Type) char StaticMemorySingletonTraits<Type>::buffer_[sizeof(Type)];
+template <typename Type>
+subtle::Atomic32 StaticMemorySingletonTraits<Type>::dead_ = 0;
+
+// The Singleton<Type, Traits, DifferentiatingType> class manages a single
+// instance of Type which will be created on first use and will be destroyed at
+// normal process exit). The Trait::Delete function will not be called on
+// abnormal process exit.
+//
+// DifferentiatingType is used as a key to differentiate two different
+// singletons having the same memory allocation functions but serving a
+// different purpose. This is mainly used for Locks serving different purposes.
+//
+// Example usage:
+//
+// In your header:
+//   namespace base {
+//   template <typename T>
+//   struct DefaultSingletonTraits;
+//   }
+//   class FooClass {
+//    public:
+//     static FooClass* GetInstance();  <-- See comment below on this.
+//     void Bar() { ... }
+//    private:
+//     FooClass() { ... }
+//     friend struct base::DefaultSingletonTraits<FooClass>;
+//
+//     DISALLOW_COPY_AND_ASSIGN(FooClass);
+//   };
+//
+// In your source file:
+//  #include "base/memory/singleton.h"
+//  FooClass* FooClass::GetInstance() {
+//    return base::Singleton<FooClass>::get();
+//  }
+//
+// Or for leaky singletons:
+//  #include "base/memory/singleton.h"
+//  FooClass* FooClass::GetInstance() {
+//    return base::Singleton<
+//        FooClass, base::LeakySingletonTraits<FooClass>>::get();
+//  }
+//
+// And to call methods on FooClass:
+//   FooClass::GetInstance()->Bar();
+//
+// NOTE: The method accessing Singleton<T>::get() has to be named as GetInstance
+// and it is important that FooClass::GetInstance() is not inlined in the
+// header. This makes sure that when source files from multiple targets include
+// this header they don't end up with different copies of the inlined code
+// creating multiple copies of the singleton.
+//
+// Singleton<> has no non-static members and doesn't need to actually be
+// instantiated.
+//
+// This class is itself thread-safe. The underlying Type must of course be
+// thread-safe if you want to use it concurrently. Two parameters may be tuned
+// depending on the user's requirements.
+//
+// Glossary:
+//   RAE = kRegisterAtExit
+//
+// On every platform, if Traits::RAE is true, the singleton will be destroyed at
+// process exit. More precisely it uses AtExitManager which requires an
+// object of this type to be instantiated. AtExitManager mimics the semantics
+// of atexit() such as LIFO order but under Windows is safer to call. For more
+// information see at_exit.h.
+//
+// If Traits::RAE is false, the singleton will not be freed at process exit,
+// thus the singleton will be leaked if it is ever accessed. Traits::RAE
+// shouldn't be false unless absolutely necessary. Remember that the heap where
+// the object is allocated may be destroyed by the CRT anyway.
+//
+// Caveats:
+// (a) Every call to get(), operator->() and operator*() incurs some overhead
+//     (16ns on my P4/2.8GHz) to check whether the object has already been
+//     initialized.  You may wish to cache the result of get(); it will not
+//     change.
+//
+// (b) Your factory function must never throw an exception. This class is not
+//     exception-safe.
+//
+
+template <typename Type,
+          typename Traits = DefaultSingletonTraits<Type>,
+          typename DifferentiatingType = Type>
+class Singleton {
+ private:
+  // Classes using the Singleton<T> pattern should declare a GetInstance()
+  // method and call Singleton::get() from within that.
+  friend Type* Type::GetInstance();
+
+  // This class is safe to be constructed and copy-constructed since it has no
+  // member.
+
+  // Return a pointer to the one true instance of the class.
+  static Type* get() {
+#if DCHECK_IS_ON()
+    if (!Traits::kAllowedToAccessOnNonjoinableThread)
+      ThreadRestrictions::AssertSingletonAllowed();
+#endif
+
+    return subtle::GetOrCreateLazyPointer(
+        &instance_, &CreatorFunc, nullptr,
+        Traits::kRegisterAtExit ? OnExit : nullptr, nullptr);
+  }
+
+  // Internal method used as an adaptor for GetOrCreateLazyPointer(). Do not use
+  // outside of that use case.
+  static Type* CreatorFunc(void* /* creator_arg*/) { return Traits::New(); }
+
+  // Adapter function for use with AtExit().  This should be called single
+  // threaded, so don't use atomic operations.
+  // Calling OnExit while singleton is in use by other threads is a mistake.
+  static void OnExit(void* /*unused*/) {
+    // AtExit should only ever be register after the singleton instance was
+    // created.  We should only ever get here with a valid instance_ pointer.
+    Traits::Delete(reinterpret_cast<Type*>(subtle::NoBarrier_Load(&instance_)));
+    instance_ = 0;
+  }
+  static subtle::AtomicWord instance_;
+};
+
+template <typename Type, typename Traits, typename DifferentiatingType>
+subtle::AtomicWord Singleton<Type, Traits, DifferentiatingType>::instance_ = 0;
+
+}  // namespace base
+
+#endif  // BASE_MEMORY_SINGLETON_H_
diff --git a/base/memory/singleton_unittest.cc b/base/memory/singleton_unittest.cc
new file mode 100644
index 0000000..06e53b2
--- /dev/null
+++ b/base/memory/singleton_unittest.cc
@@ -0,0 +1,299 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <stdint.h>
+
+#include "base/at_exit.h"
+#include "base/memory/singleton.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+namespace {
+
+static_assert(DefaultSingletonTraits<int>::kRegisterAtExit == true,
+              "object must be deleted on process exit");
+
+typedef void (*CallbackFunc)();
+
+template <size_t alignment>
+class AlignedData {
+ public:
+  AlignedData() = default;
+  ~AlignedData() = default;
+  alignas(alignment) char data_[alignment];
+};
+
+class IntSingleton {
+ public:
+  static IntSingleton* GetInstance() {
+    return Singleton<IntSingleton>::get();
+  }
+
+  int value_;
+};
+
+class Init5Singleton {
+ public:
+  struct Trait;
+
+  static Init5Singleton* GetInstance() {
+    return Singleton<Init5Singleton, Trait>::get();
+  }
+
+  int value_;
+};
+
+struct Init5Singleton::Trait : public DefaultSingletonTraits<Init5Singleton> {
+  static Init5Singleton* New() {
+    Init5Singleton* instance = new Init5Singleton();
+    instance->value_ = 5;
+    return instance;
+  }
+};
+
+int* SingletonInt() {
+  return &IntSingleton::GetInstance()->value_;
+}
+
+int* SingletonInt5() {
+  return &Init5Singleton::GetInstance()->value_;
+}
+
+template <typename Type>
+struct CallbackTrait : public DefaultSingletonTraits<Type> {
+  static void Delete(Type* instance) {
+    if (instance->callback_)
+      (instance->callback_)();
+    DefaultSingletonTraits<Type>::Delete(instance);
+  }
+};
+
+class CallbackSingleton {
+ public:
+  CallbackSingleton() : callback_(nullptr) {}
+  CallbackFunc callback_;
+};
+
+class CallbackSingletonWithNoLeakTrait : public CallbackSingleton {
+ public:
+  struct Trait : public CallbackTrait<CallbackSingletonWithNoLeakTrait> { };
+
+  CallbackSingletonWithNoLeakTrait() : CallbackSingleton() { }
+
+  static CallbackSingletonWithNoLeakTrait* GetInstance() {
+    return Singleton<CallbackSingletonWithNoLeakTrait, Trait>::get();
+  }
+};
+
+class CallbackSingletonWithLeakTrait : public CallbackSingleton {
+ public:
+  struct Trait : public CallbackTrait<CallbackSingletonWithLeakTrait> {
+    static const bool kRegisterAtExit = false;
+  };
+
+  CallbackSingletonWithLeakTrait() : CallbackSingleton() { }
+
+  static CallbackSingletonWithLeakTrait* GetInstance() {
+    return Singleton<CallbackSingletonWithLeakTrait, Trait>::get();
+  }
+};
+
+class CallbackSingletonWithStaticTrait : public CallbackSingleton {
+ public:
+  struct Trait;
+
+  CallbackSingletonWithStaticTrait() : CallbackSingleton() { }
+
+  static CallbackSingletonWithStaticTrait* GetInstance() {
+    return Singleton<CallbackSingletonWithStaticTrait, Trait>::get();
+  }
+};
+
+struct CallbackSingletonWithStaticTrait::Trait
+    : public StaticMemorySingletonTraits<CallbackSingletonWithStaticTrait> {
+  static void Delete(CallbackSingletonWithStaticTrait* instance) {
+    if (instance->callback_)
+      (instance->callback_)();
+    StaticMemorySingletonTraits<CallbackSingletonWithStaticTrait>::Delete(
+        instance);
+  }
+};
+
+template <class Type>
+class AlignedTestSingleton {
+ public:
+  AlignedTestSingleton() = default;
+  ~AlignedTestSingleton() = default;
+  static AlignedTestSingleton* GetInstance() {
+    return Singleton<AlignedTestSingleton,
+                     StaticMemorySingletonTraits<AlignedTestSingleton>>::get();
+  }
+
+  Type type_;
+};
+
+
+void SingletonNoLeak(CallbackFunc CallOnQuit) {
+  CallbackSingletonWithNoLeakTrait::GetInstance()->callback_ = CallOnQuit;
+}
+
+void SingletonLeak(CallbackFunc CallOnQuit) {
+  CallbackSingletonWithLeakTrait::GetInstance()->callback_ = CallOnQuit;
+}
+
+CallbackFunc* GetLeakySingleton() {
+  return &CallbackSingletonWithLeakTrait::GetInstance()->callback_;
+}
+
+void DeleteLeakySingleton() {
+  DefaultSingletonTraits<CallbackSingletonWithLeakTrait>::Delete(
+      CallbackSingletonWithLeakTrait::GetInstance());
+}
+
+void SingletonStatic(CallbackFunc CallOnQuit) {
+  CallbackSingletonWithStaticTrait::GetInstance()->callback_ = CallOnQuit;
+}
+
+CallbackFunc* GetStaticSingleton() {
+  return &CallbackSingletonWithStaticTrait::GetInstance()->callback_;
+}
+
+
+class SingletonTest : public testing::Test {
+ public:
+  SingletonTest() = default;
+
+  void SetUp() override {
+    non_leak_called_ = false;
+    leaky_called_ = false;
+    static_called_ = false;
+  }
+
+ protected:
+  void VerifiesCallbacks() {
+    EXPECT_TRUE(non_leak_called_);
+    EXPECT_FALSE(leaky_called_);
+    EXPECT_TRUE(static_called_);
+    non_leak_called_ = false;
+    leaky_called_ = false;
+    static_called_ = false;
+  }
+
+  void VerifiesCallbacksNotCalled() {
+    EXPECT_FALSE(non_leak_called_);
+    EXPECT_FALSE(leaky_called_);
+    EXPECT_FALSE(static_called_);
+    non_leak_called_ = false;
+    leaky_called_ = false;
+    static_called_ = false;
+  }
+
+  static void CallbackNoLeak() {
+    non_leak_called_ = true;
+  }
+
+  static void CallbackLeak() {
+    leaky_called_ = true;
+  }
+
+  static void CallbackStatic() {
+    static_called_ = true;
+  }
+
+ private:
+  static bool non_leak_called_;
+  static bool leaky_called_;
+  static bool static_called_;
+};
+
+bool SingletonTest::non_leak_called_ = false;
+bool SingletonTest::leaky_called_ = false;
+bool SingletonTest::static_called_ = false;
+
+TEST_F(SingletonTest, Basic) {
+  int* singleton_int;
+  int* singleton_int_5;
+  CallbackFunc* leaky_singleton;
+  CallbackFunc* static_singleton;
+
+  {
+    ShadowingAtExitManager sem;
+    {
+      singleton_int = SingletonInt();
+    }
+    // Ensure POD type initialization.
+    EXPECT_EQ(*singleton_int, 0);
+    *singleton_int = 1;
+
+    EXPECT_EQ(singleton_int, SingletonInt());
+    EXPECT_EQ(*singleton_int, 1);
+
+    {
+      singleton_int_5 = SingletonInt5();
+    }
+    // Is default initialized to 5.
+    EXPECT_EQ(*singleton_int_5, 5);
+
+    SingletonNoLeak(&CallbackNoLeak);
+    SingletonLeak(&CallbackLeak);
+    SingletonStatic(&CallbackStatic);
+    static_singleton = GetStaticSingleton();
+    leaky_singleton = GetLeakySingleton();
+    EXPECT_TRUE(leaky_singleton);
+  }
+
+  // Verify that only the expected callback has been called.
+  VerifiesCallbacks();
+  // Delete the leaky singleton.
+  DeleteLeakySingleton();
+
+  // The static singleton can't be acquired post-atexit.
+  EXPECT_EQ(nullptr, GetStaticSingleton());
+
+  {
+    ShadowingAtExitManager sem;
+    // Verifiy that the variables were reset.
+    {
+      singleton_int = SingletonInt();
+      EXPECT_EQ(*singleton_int, 0);
+    }
+    {
+      singleton_int_5 = SingletonInt5();
+      EXPECT_EQ(*singleton_int_5, 5);
+    }
+    {
+      // Resurrect the static singleton, and assert that it
+      // still points to the same (static) memory.
+      CallbackSingletonWithStaticTrait::Trait::ResurrectForTesting();
+      EXPECT_EQ(GetStaticSingleton(), static_singleton);
+    }
+  }
+  // The leaky singleton shouldn't leak since SingletonLeak has not been called.
+  VerifiesCallbacksNotCalled();
+}
+
+#define EXPECT_ALIGNED(ptr, align) \
+    EXPECT_EQ(0u, reinterpret_cast<uintptr_t>(ptr) & (align - 1))
+
+TEST_F(SingletonTest, Alignment) {
+  // Create some static singletons with increasing sizes and alignment
+  // requirements. By ordering this way, the linker will need to do some work to
+  // ensure proper alignment of the static data.
+  AlignedTestSingleton<int32_t>* align4 =
+      AlignedTestSingleton<int32_t>::GetInstance();
+  AlignedTestSingleton<AlignedData<32>>* align32 =
+      AlignedTestSingleton<AlignedData<32>>::GetInstance();
+  AlignedTestSingleton<AlignedData<128>>* align128 =
+      AlignedTestSingleton<AlignedData<128>>::GetInstance();
+  AlignedTestSingleton<AlignedData<4096>>* align4096 =
+      AlignedTestSingleton<AlignedData<4096>>::GetInstance();
+
+  EXPECT_ALIGNED(align4, 4);
+  EXPECT_ALIGNED(align32, 32);
+  EXPECT_ALIGNED(align128, 128);
+  EXPECT_ALIGNED(align4096, 4096);
+}
+
+}  // namespace
+}  // namespace base
diff --git a/base/memory/unsafe_shared_memory_region.cc b/base/memory/unsafe_shared_memory_region.cc
new file mode 100644
index 0000000..422b5a9
--- /dev/null
+++ b/base/memory/unsafe_shared_memory_region.cc
@@ -0,0 +1,76 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/memory/unsafe_shared_memory_region.h"
+
+#include <utility>
+
+#include "base/memory/shared_memory.h"
+
+namespace base {
+
+// static
+UnsafeSharedMemoryRegion UnsafeSharedMemoryRegion::Create(size_t size) {
+  subtle::PlatformSharedMemoryRegion handle =
+      subtle::PlatformSharedMemoryRegion::CreateUnsafe(size);
+
+  return UnsafeSharedMemoryRegion(std::move(handle));
+}
+
+// static
+UnsafeSharedMemoryRegion UnsafeSharedMemoryRegion::Deserialize(
+    subtle::PlatformSharedMemoryRegion handle) {
+  return UnsafeSharedMemoryRegion(std::move(handle));
+}
+
+// static
+subtle::PlatformSharedMemoryRegion
+UnsafeSharedMemoryRegion::TakeHandleForSerialization(
+    UnsafeSharedMemoryRegion region) {
+  return std::move(region.handle_);
+}
+
+UnsafeSharedMemoryRegion::UnsafeSharedMemoryRegion() = default;
+UnsafeSharedMemoryRegion::UnsafeSharedMemoryRegion(
+    UnsafeSharedMemoryRegion&& region) = default;
+UnsafeSharedMemoryRegion& UnsafeSharedMemoryRegion::operator=(
+    UnsafeSharedMemoryRegion&& region) = default;
+UnsafeSharedMemoryRegion::~UnsafeSharedMemoryRegion() = default;
+
+UnsafeSharedMemoryRegion UnsafeSharedMemoryRegion::Duplicate() const {
+  return UnsafeSharedMemoryRegion(handle_.Duplicate());
+}
+
+WritableSharedMemoryMapping UnsafeSharedMemoryRegion::Map() const {
+  return MapAt(0, handle_.GetSize());
+}
+
+WritableSharedMemoryMapping UnsafeSharedMemoryRegion::MapAt(off_t offset,
+                                                            size_t size) const {
+  if (!IsValid())
+    return {};
+
+  void* memory = nullptr;
+  size_t mapped_size = 0;
+  if (!handle_.MapAt(offset, size, &memory, &mapped_size))
+    return {};
+
+  return WritableSharedMemoryMapping(memory, size, mapped_size,
+                                     handle_.GetGUID());
+}
+
+bool UnsafeSharedMemoryRegion::IsValid() const {
+  return handle_.IsValid();
+}
+
+UnsafeSharedMemoryRegion::UnsafeSharedMemoryRegion(
+    subtle::PlatformSharedMemoryRegion handle)
+    : handle_(std::move(handle)) {
+  if (handle_.IsValid()) {
+    CHECK_EQ(handle_.GetMode(),
+             subtle::PlatformSharedMemoryRegion::Mode::kUnsafe);
+  }
+}
+
+}  // namespace base
diff --git a/base/memory/unsafe_shared_memory_region.h b/base/memory/unsafe_shared_memory_region.h
new file mode 100644
index 0000000..d77eaaa
--- /dev/null
+++ b/base/memory/unsafe_shared_memory_region.h
@@ -0,0 +1,112 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_MEMORY_UNSAFE_SHARED_MEMORY_REGION_H_
+#define BASE_MEMORY_UNSAFE_SHARED_MEMORY_REGION_H_
+
+#include "base/gtest_prod_util.h"
+#include "base/macros.h"
+#include "base/memory/platform_shared_memory_region.h"
+#include "base/memory/shared_memory_mapping.h"
+
+namespace base {
+
+// Scoped move-only handle to a region of platform shared memory. The instance
+// owns the platform handle it wraps. Mappings created by this region are
+// writable. These mappings remain valid even after the region handle is moved
+// or destroyed.
+//
+// NOTE: UnsafeSharedMemoryRegion cannot be converted to a read-only region. Use
+// with caution as the region will be writable to any process with a handle to
+// the region.
+//
+// Use this if and only if the following is true:
+// - You do not need to share the region as read-only, and,
+// - You need to have several instances of the region simultaneously, possibly
+//   in different processes, that can produce writable mappings.
+
+class BASE_EXPORT UnsafeSharedMemoryRegion {
+ public:
+  using MappingType = WritableSharedMemoryMapping;
+  // Creates a new UnsafeSharedMemoryRegion instance of a given size that can be
+  // used for mapping writable shared memory into the virtual address space.
+  static UnsafeSharedMemoryRegion Create(size_t size);
+
+  // Returns an UnsafeSharedMemoryRegion built from a platform-specific handle
+  // that was taken from another UnsafeSharedMemoryRegion instance. Returns an
+  // invalid region iff the |handle| is invalid. CHECK-fails if the |handle|
+  // isn't unsafe.
+  // This should be used only by the code passing a handle across
+  // process boundaries.
+  static UnsafeSharedMemoryRegion Deserialize(
+      subtle::PlatformSharedMemoryRegion handle);
+
+  // Extracts a platform handle from the region. Ownership is transferred to the
+  // returned region object.
+  // This should be used only for sending the handle from the current
+  // process to another.
+  static subtle::PlatformSharedMemoryRegion TakeHandleForSerialization(
+      UnsafeSharedMemoryRegion region);
+
+  // Default constructor initializes an invalid instance.
+  UnsafeSharedMemoryRegion();
+
+  // Move operations are allowed.
+  UnsafeSharedMemoryRegion(UnsafeSharedMemoryRegion&&);
+  UnsafeSharedMemoryRegion& operator=(UnsafeSharedMemoryRegion&&);
+
+  // Destructor closes shared memory region if valid.
+  // All created mappings will remain valid.
+  ~UnsafeSharedMemoryRegion();
+
+  // Duplicates the underlying platform handle and creates a new
+  // UnsafeSharedMemoryRegion instance that owns the newly created handle.
+  // Returns a valid UnsafeSharedMemoryRegion on success, invalid otherwise.
+  // The current region instance remains valid in any case.
+  UnsafeSharedMemoryRegion Duplicate() const;
+
+  // Maps the shared memory region into the caller's address space with write
+  // access. The mapped address is guaranteed to have an alignment of
+  // at least |subtle::PlatformSharedMemoryRegion::kMapMinimumAlignment|.
+  // Returns a valid WritableSharedMemoryMapping instance on success, invalid
+  // otherwise.
+  WritableSharedMemoryMapping Map() const;
+
+  // Same as above, but maps only |size| bytes of the shared memory region
+  // starting with the given |offset|. |offset| must be aligned to value of
+  // |SysInfo::VMAllocationGranularity()|. Returns an invalid mapping if
+  // requested bytes are out of the region limits.
+  WritableSharedMemoryMapping MapAt(off_t offset, size_t size) const;
+
+  // Whether the underlying platform handle is valid.
+  bool IsValid() const;
+
+  // Returns the maximum mapping size that can be created from this region.
+  size_t GetSize() const {
+    DCHECK(IsValid());
+    return handle_.GetSize();
+  }
+
+ private:
+  FRIEND_TEST_ALL_PREFIXES(DiscardableSharedMemoryTest,
+                           LockShouldFailIfPlatformLockPagesFails);
+  friend class DiscardableSharedMemory;
+
+  explicit UnsafeSharedMemoryRegion(subtle::PlatformSharedMemoryRegion handle);
+
+  // Returns a platform shared memory handle. |this| remains the owner of the
+  // handle.
+  subtle::PlatformSharedMemoryRegion::PlatformHandle GetPlatformHandle() const {
+    DCHECK(IsValid());
+    return handle_.GetPlatformHandle();
+  }
+
+  subtle::PlatformSharedMemoryRegion handle_;
+
+  DISALLOW_COPY_AND_ASSIGN(UnsafeSharedMemoryRegion);
+};
+
+}  // namespace base
+
+#endif  // BASE_MEMORY_UNSAFE_SHARED_MEMORY_REGION_H_
diff --git a/base/memory/weak_ptr.cc b/base/memory/weak_ptr.cc
new file mode 100644
index 0000000..d2a7d89
--- /dev/null
+++ b/base/memory/weak_ptr.cc
@@ -0,0 +1,86 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/memory/weak_ptr.h"
+
+namespace base {
+namespace internal {
+
+WeakReference::Flag::Flag() : is_valid_(true) {
+  // Flags only become bound when checked for validity, or invalidated,
+  // so that we can check that later validity/invalidation operations on
+  // the same Flag take place on the same sequenced thread.
+  sequence_checker_.DetachFromSequence();
+}
+
+void WeakReference::Flag::Invalidate() {
+  // The flag being invalidated with a single ref implies that there are no
+  // weak pointers in existence. Allow deletion on other thread in this case.
+  DCHECK(sequence_checker_.CalledOnValidSequence() || HasOneRef())
+      << "WeakPtrs must be invalidated on the same sequenced thread.";
+  is_valid_ = false;
+}
+
+bool WeakReference::Flag::IsValid() const {
+  DCHECK(sequence_checker_.CalledOnValidSequence())
+      << "WeakPtrs must be checked on the same sequenced thread.";
+  return is_valid_;
+}
+
+WeakReference::Flag::~Flag() = default;
+
+WeakReference::WeakReference() = default;
+
+WeakReference::WeakReference(const scoped_refptr<Flag>& flag) : flag_(flag) {}
+
+WeakReference::~WeakReference() = default;
+
+WeakReference::WeakReference(WeakReference&& other) = default;
+
+WeakReference::WeakReference(const WeakReference& other) = default;
+
+bool WeakReference::is_valid() const {
+  return flag_ && flag_->IsValid();
+}
+
+WeakReferenceOwner::WeakReferenceOwner() = default;
+
+WeakReferenceOwner::~WeakReferenceOwner() {
+  Invalidate();
+}
+
+WeakReference WeakReferenceOwner::GetRef() const {
+  // If we hold the last reference to the Flag then create a new one.
+  if (!HasRefs())
+    flag_ = new WeakReference::Flag();
+
+  return WeakReference(flag_);
+}
+
+void WeakReferenceOwner::Invalidate() {
+  if (flag_) {
+    flag_->Invalidate();
+    flag_ = nullptr;
+  }
+}
+
+WeakPtrBase::WeakPtrBase() : ptr_(0) {}
+
+WeakPtrBase::~WeakPtrBase() = default;
+
+WeakPtrBase::WeakPtrBase(const WeakReference& ref, uintptr_t ptr)
+    : ref_(ref), ptr_(ptr) {
+  DCHECK(ptr_);
+}
+
+WeakPtrFactoryBase::WeakPtrFactoryBase(uintptr_t ptr) : ptr_(ptr) {
+  DCHECK(ptr_);
+}
+
+WeakPtrFactoryBase::~WeakPtrFactoryBase() {
+  ptr_ = 0;
+}
+
+}  // namespace internal
+}  // namespace base
diff --git a/base/memory/weak_ptr.h b/base/memory/weak_ptr.h
new file mode 100644
index 0000000..34e7d2e
--- /dev/null
+++ b/base/memory/weak_ptr.h
@@ -0,0 +1,377 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Weak pointers are pointers to an object that do not affect its lifetime,
+// and which may be invalidated (i.e. reset to nullptr) by the object, or its
+// owner, at any time, most commonly when the object is about to be deleted.
+
+// Weak pointers are useful when an object needs to be accessed safely by one
+// or more objects other than its owner, and those callers can cope with the
+// object vanishing and e.g. tasks posted to it being silently dropped.
+// Reference-counting such an object would complicate the ownership graph and
+// make it harder to reason about the object's lifetime.
+
+// EXAMPLE:
+//
+//  class Controller {
+//   public:
+//    Controller() : weak_factory_(this) {}
+//    void SpawnWorker() { Worker::StartNew(weak_factory_.GetWeakPtr()); }
+//    void WorkComplete(const Result& result) { ... }
+//   private:
+//    // Member variables should appear before the WeakPtrFactory, to ensure
+//    // that any WeakPtrs to Controller are invalidated before its members
+//    // variable's destructors are executed, rendering them invalid.
+//    WeakPtrFactory<Controller> weak_factory_;
+//  };
+//
+//  class Worker {
+//   public:
+//    static void StartNew(const WeakPtr<Controller>& controller) {
+//      Worker* worker = new Worker(controller);
+//      // Kick off asynchronous processing...
+//    }
+//   private:
+//    Worker(const WeakPtr<Controller>& controller)
+//        : controller_(controller) {}
+//    void DidCompleteAsynchronousProcessing(const Result& result) {
+//      if (controller_)
+//        controller_->WorkComplete(result);
+//    }
+//    WeakPtr<Controller> controller_;
+//  };
+//
+// With this implementation a caller may use SpawnWorker() to dispatch multiple
+// Workers and subsequently delete the Controller, without waiting for all
+// Workers to have completed.
+
+// ------------------------- IMPORTANT: Thread-safety -------------------------
+
+// Weak pointers may be passed safely between threads, but must always be
+// dereferenced and invalidated on the same SequencedTaskRunner otherwise
+// checking the pointer would be racey.
+//
+// To ensure correct use, the first time a WeakPtr issued by a WeakPtrFactory
+// is dereferenced, the factory and its WeakPtrs become bound to the calling
+// thread or current SequencedWorkerPool token, and cannot be dereferenced or
+// invalidated on any other task runner. Bound WeakPtrs can still be handed
+// off to other task runners, e.g. to use to post tasks back to object on the
+// bound sequence.
+//
+// If all WeakPtr objects are destroyed or invalidated then the factory is
+// unbound from the SequencedTaskRunner/Thread. The WeakPtrFactory may then be
+// destroyed, or new WeakPtr objects may be used, from a different sequence.
+//
+// Thus, at least one WeakPtr object must exist and have been dereferenced on
+// the correct thread to enforce that other WeakPtr objects will enforce they
+// are used on the desired thread.
+
+#ifndef BASE_MEMORY_WEAK_PTR_H_
+#define BASE_MEMORY_WEAK_PTR_H_
+
+#include <cstddef>
+#include <type_traits>
+
+#include "base/base_export.h"
+#include "base/logging.h"
+#include "base/macros.h"
+#include "base/memory/ref_counted.h"
+#include "base/sequence_checker.h"
+
+namespace base {
+
+template <typename T> class SupportsWeakPtr;
+template <typename T> class WeakPtr;
+
+namespace internal {
+// These classes are part of the WeakPtr implementation.
+// DO NOT USE THESE CLASSES DIRECTLY YOURSELF.
+
+class BASE_EXPORT WeakReference {
+ public:
+  // Although Flag is bound to a specific SequencedTaskRunner, it may be
+  // deleted from another via base::WeakPtr::~WeakPtr().
+  class BASE_EXPORT Flag : public RefCountedThreadSafe<Flag> {
+   public:
+    Flag();
+
+    void Invalidate();
+    bool IsValid() const;
+
+   private:
+    friend class base::RefCountedThreadSafe<Flag>;
+
+    ~Flag();
+
+    SequenceChecker sequence_checker_;
+    bool is_valid_;
+  };
+
+  WeakReference();
+  explicit WeakReference(const scoped_refptr<Flag>& flag);
+  ~WeakReference();
+
+  WeakReference(WeakReference&& other);
+  WeakReference(const WeakReference& other);
+  WeakReference& operator=(WeakReference&& other) = default;
+  WeakReference& operator=(const WeakReference& other) = default;
+
+  bool is_valid() const;
+
+ private:
+  scoped_refptr<const Flag> flag_;
+};
+
+class BASE_EXPORT WeakReferenceOwner {
+ public:
+  WeakReferenceOwner();
+  ~WeakReferenceOwner();
+
+  WeakReference GetRef() const;
+
+  bool HasRefs() const { return flag_ && !flag_->HasOneRef(); }
+
+  void Invalidate();
+
+ private:
+  mutable scoped_refptr<WeakReference::Flag> flag_;
+};
+
+// This class simplifies the implementation of WeakPtr's type conversion
+// constructor by avoiding the need for a public accessor for ref_.  A
+// WeakPtr<T> cannot access the private members of WeakPtr<U>, so this
+// base class gives us a way to access ref_ in a protected fashion.
+class BASE_EXPORT WeakPtrBase {
+ public:
+  WeakPtrBase();
+  ~WeakPtrBase();
+
+  WeakPtrBase(const WeakPtrBase& other) = default;
+  WeakPtrBase(WeakPtrBase&& other) = default;
+  WeakPtrBase& operator=(const WeakPtrBase& other) = default;
+  WeakPtrBase& operator=(WeakPtrBase&& other) = default;
+
+  void reset() {
+    ref_ = internal::WeakReference();
+    ptr_ = 0;
+  }
+
+ protected:
+  WeakPtrBase(const WeakReference& ref, uintptr_t ptr);
+
+  WeakReference ref_;
+
+  // This pointer is only valid when ref_.is_valid() is true.  Otherwise, its
+  // value is undefined (as opposed to nullptr).
+  uintptr_t ptr_;
+};
+
+// This class provides a common implementation of common functions that would
+// otherwise get instantiated separately for each distinct instantiation of
+// SupportsWeakPtr<>.
+class SupportsWeakPtrBase {
+ public:
+  // A safe static downcast of a WeakPtr<Base> to WeakPtr<Derived>. This
+  // conversion will only compile if there is exists a Base which inherits
+  // from SupportsWeakPtr<Base>. See base::AsWeakPtr() below for a helper
+  // function that makes calling this easier.
+  //
+  // Precondition: t != nullptr
+  template<typename Derived>
+  static WeakPtr<Derived> StaticAsWeakPtr(Derived* t) {
+    static_assert(
+        std::is_base_of<internal::SupportsWeakPtrBase, Derived>::value,
+        "AsWeakPtr argument must inherit from SupportsWeakPtr");
+    return AsWeakPtrImpl<Derived>(t);
+  }
+
+ private:
+  // This template function uses type inference to find a Base of Derived
+  // which is an instance of SupportsWeakPtr<Base>. We can then safely
+  // static_cast the Base* to a Derived*.
+  template <typename Derived, typename Base>
+  static WeakPtr<Derived> AsWeakPtrImpl(SupportsWeakPtr<Base>* t) {
+    WeakPtr<Base> ptr = t->AsWeakPtr();
+    return WeakPtr<Derived>(
+        ptr.ref_, static_cast<Derived*>(reinterpret_cast<Base*>(ptr.ptr_)));
+  }
+};
+
+}  // namespace internal
+
+template <typename T> class WeakPtrFactory;
+
+// The WeakPtr class holds a weak reference to |T*|.
+//
+// This class is designed to be used like a normal pointer.  You should always
+// null-test an object of this class before using it or invoking a method that
+// may result in the underlying object being destroyed.
+//
+// EXAMPLE:
+//
+//   class Foo { ... };
+//   WeakPtr<Foo> foo;
+//   if (foo)
+//     foo->method();
+//
+template <typename T>
+class WeakPtr : public internal::WeakPtrBase {
+ public:
+  WeakPtr() = default;
+
+  WeakPtr(std::nullptr_t) {}
+
+  // Allow conversion from U to T provided U "is a" T. Note that this
+  // is separate from the (implicit) copy and move constructors.
+  template <typename U>
+  WeakPtr(const WeakPtr<U>& other) : WeakPtrBase(other) {
+    // Need to cast from U* to T* to do pointer adjustment in case of multiple
+    // inheritance. This also enforces the "U is a T" rule.
+    T* t = reinterpret_cast<U*>(other.ptr_);
+    ptr_ = reinterpret_cast<uintptr_t>(t);
+  }
+  template <typename U>
+  WeakPtr(WeakPtr<U>&& other) : WeakPtrBase(std::move(other)) {
+    // Need to cast from U* to T* to do pointer adjustment in case of multiple
+    // inheritance. This also enforces the "U is a T" rule.
+    T* t = reinterpret_cast<U*>(other.ptr_);
+    ptr_ = reinterpret_cast<uintptr_t>(t);
+  }
+
+  T* get() const {
+    return ref_.is_valid() ? reinterpret_cast<T*>(ptr_) : nullptr;
+  }
+
+  T& operator*() const {
+    DCHECK(get() != nullptr);
+    return *get();
+  }
+  T* operator->() const {
+    DCHECK(get() != nullptr);
+    return get();
+  }
+
+  // Allow conditionals to test validity, e.g. if (weak_ptr) {...};
+  explicit operator bool() const { return get() != nullptr; }
+
+ private:
+  friend class internal::SupportsWeakPtrBase;
+  template <typename U> friend class WeakPtr;
+  friend class SupportsWeakPtr<T>;
+  friend class WeakPtrFactory<T>;
+
+  WeakPtr(const internal::WeakReference& ref, T* ptr)
+      : WeakPtrBase(ref, reinterpret_cast<uintptr_t>(ptr)) {}
+};
+
+// Allow callers to compare WeakPtrs against nullptr to test validity.
+template <class T>
+bool operator!=(const WeakPtr<T>& weak_ptr, std::nullptr_t) {
+  return !(weak_ptr == nullptr);
+}
+template <class T>
+bool operator!=(std::nullptr_t, const WeakPtr<T>& weak_ptr) {
+  return weak_ptr != nullptr;
+}
+template <class T>
+bool operator==(const WeakPtr<T>& weak_ptr, std::nullptr_t) {
+  return weak_ptr.get() == nullptr;
+}
+template <class T>
+bool operator==(std::nullptr_t, const WeakPtr<T>& weak_ptr) {
+  return weak_ptr == nullptr;
+}
+
+namespace internal {
+class BASE_EXPORT WeakPtrFactoryBase {
+ protected:
+  WeakPtrFactoryBase(uintptr_t ptr);
+  ~WeakPtrFactoryBase();
+  internal::WeakReferenceOwner weak_reference_owner_;
+  uintptr_t ptr_;
+};
+}  // namespace internal
+
+// A class may be composed of a WeakPtrFactory and thereby
+// control how it exposes weak pointers to itself.  This is helpful if you only
+// need weak pointers within the implementation of a class.  This class is also
+// useful when working with primitive types.  For example, you could have a
+// WeakPtrFactory<bool> that is used to pass around a weak reference to a bool.
+template <class T>
+class WeakPtrFactory : public internal::WeakPtrFactoryBase {
+ public:
+  explicit WeakPtrFactory(T* ptr)
+      : WeakPtrFactoryBase(reinterpret_cast<uintptr_t>(ptr)) {}
+
+  ~WeakPtrFactory() = default;
+
+  WeakPtr<T> GetWeakPtr() {
+    return WeakPtr<T>(weak_reference_owner_.GetRef(),
+                      reinterpret_cast<T*>(ptr_));
+  }
+
+  // Call this method to invalidate all existing weak pointers.
+  void InvalidateWeakPtrs() {
+    DCHECK(ptr_);
+    weak_reference_owner_.Invalidate();
+  }
+
+  // Call this method to determine if any weak pointers exist.
+  bool HasWeakPtrs() const {
+    DCHECK(ptr_);
+    return weak_reference_owner_.HasRefs();
+  }
+
+ private:
+  DISALLOW_IMPLICIT_CONSTRUCTORS(WeakPtrFactory);
+};
+
+// A class may extend from SupportsWeakPtr to let others take weak pointers to
+// it. This avoids the class itself implementing boilerplate to dispense weak
+// pointers.  However, since SupportsWeakPtr's destructor won't invalidate
+// weak pointers to the class until after the derived class' members have been
+// destroyed, its use can lead to subtle use-after-destroy issues.
+template <class T>
+class SupportsWeakPtr : public internal::SupportsWeakPtrBase {
+ public:
+  SupportsWeakPtr() = default;
+
+  WeakPtr<T> AsWeakPtr() {
+    return WeakPtr<T>(weak_reference_owner_.GetRef(), static_cast<T*>(this));
+  }
+
+ protected:
+  ~SupportsWeakPtr() = default;
+
+ private:
+  internal::WeakReferenceOwner weak_reference_owner_;
+  DISALLOW_COPY_AND_ASSIGN(SupportsWeakPtr);
+};
+
+// Helper function that uses type deduction to safely return a WeakPtr<Derived>
+// when Derived doesn't directly extend SupportsWeakPtr<Derived>, instead it
+// extends a Base that extends SupportsWeakPtr<Base>.
+//
+// EXAMPLE:
+//   class Base : public base::SupportsWeakPtr<Producer> {};
+//   class Derived : public Base {};
+//
+//   Derived derived;
+//   base::WeakPtr<Derived> ptr = base::AsWeakPtr(&derived);
+//
+// Note that the following doesn't work (invalid type conversion) since
+// Derived::AsWeakPtr() is WeakPtr<Base> SupportsWeakPtr<Base>::AsWeakPtr(),
+// and there's no way to safely cast WeakPtr<Base> to WeakPtr<Derived> at
+// the caller.
+//
+//   base::WeakPtr<Derived> ptr = derived.AsWeakPtr();  // Fails.
+
+template <typename Derived>
+WeakPtr<Derived> AsWeakPtr(Derived* t) {
+  return internal::SupportsWeakPtrBase::StaticAsWeakPtr<Derived>(t);
+}
+
+}  // namespace base
+
+#endif  // BASE_MEMORY_WEAK_PTR_H_
diff --git a/base/memory/weak_ptr_unittest.cc b/base/memory/weak_ptr_unittest.cc
new file mode 100644
index 0000000..f8dfb7c
--- /dev/null
+++ b/base/memory/weak_ptr_unittest.cc
@@ -0,0 +1,716 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/memory/weak_ptr.h"
+
+#include <memory>
+#include <string>
+
+#include "base/bind.h"
+#include "base/debug/leak_annotations.h"
+#include "base/location.h"
+#include "base/single_thread_task_runner.h"
+#include "base/synchronization/waitable_event.h"
+#include "base/test/gtest_util.h"
+#include "base/threading/thread.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+namespace {
+
+WeakPtr<int> PassThru(WeakPtr<int> ptr) {
+  return ptr;
+}
+
+template <class T>
+class OffThreadObjectCreator {
+ public:
+  static T* NewObject() {
+    T* result;
+    {
+      Thread creator_thread("creator_thread");
+      creator_thread.Start();
+      creator_thread.task_runner()->PostTask(
+          FROM_HERE,
+          base::BindOnce(OffThreadObjectCreator::CreateObject, &result));
+    }
+    DCHECK(result);  // We synchronized on thread destruction above.
+    return result;
+  }
+ private:
+  static void CreateObject(T** result) {
+    *result = new T;
+  }
+};
+
+struct Base {
+  std::string member;
+};
+struct Derived : public Base {};
+
+struct TargetBase {};
+struct Target : public TargetBase, public SupportsWeakPtr<Target> {
+  virtual ~Target() = default;
+};
+
+struct DerivedTarget : public Target {};
+
+// A class inheriting from Target and defining a nested type called 'Base'.
+// To guard against strange compilation errors.
+struct DerivedTargetWithNestedBase : public Target {
+  using Base = void;
+};
+
+// A struct with a virtual destructor.
+struct VirtualDestructor {
+  virtual ~VirtualDestructor() = default;
+};
+
+// A class inheriting from Target where Target is not the first base, and where
+// the first base has a virtual method table. This creates a structure where the
+// Target base is not positioned at the beginning of
+// DerivedTargetMultipleInheritance.
+struct DerivedTargetMultipleInheritance : public VirtualDestructor,
+                                          public Target {};
+
+struct Arrow {
+  WeakPtr<Target> target;
+};
+struct TargetWithFactory : public Target {
+  TargetWithFactory() : factory(this) {}
+  WeakPtrFactory<Target> factory;
+};
+
+// Helper class to create and destroy weak pointer copies
+// and delete objects on a background thread.
+class BackgroundThread : public Thread {
+ public:
+  BackgroundThread() : Thread("owner_thread") {}
+
+  ~BackgroundThread() override { Stop(); }
+
+  void CreateArrowFromTarget(Arrow** arrow, Target* target) {
+    WaitableEvent completion(WaitableEvent::ResetPolicy::MANUAL,
+                             WaitableEvent::InitialState::NOT_SIGNALED);
+    task_runner()->PostTask(
+        FROM_HERE, base::BindOnce(&BackgroundThread::DoCreateArrowFromTarget,
+                                  arrow, target, &completion));
+    completion.Wait();
+  }
+
+  void CreateArrowFromArrow(Arrow** arrow, const Arrow* other) {
+    WaitableEvent completion(WaitableEvent::ResetPolicy::MANUAL,
+                             WaitableEvent::InitialState::NOT_SIGNALED);
+    task_runner()->PostTask(
+        FROM_HERE, base::BindOnce(&BackgroundThread::DoCreateArrowFromArrow,
+                                  arrow, other, &completion));
+    completion.Wait();
+  }
+
+  void DeleteTarget(Target* object) {
+    WaitableEvent completion(WaitableEvent::ResetPolicy::MANUAL,
+                             WaitableEvent::InitialState::NOT_SIGNALED);
+    task_runner()->PostTask(
+        FROM_HERE,
+        base::BindOnce(&BackgroundThread::DoDeleteTarget, object, &completion));
+    completion.Wait();
+  }
+
+  void CopyAndAssignArrow(Arrow* object) {
+    WaitableEvent completion(WaitableEvent::ResetPolicy::MANUAL,
+                             WaitableEvent::InitialState::NOT_SIGNALED);
+    task_runner()->PostTask(
+        FROM_HERE, base::BindOnce(&BackgroundThread::DoCopyAndAssignArrow,
+                                  object, &completion));
+    completion.Wait();
+  }
+
+  void CopyAndAssignArrowBase(Arrow* object) {
+    WaitableEvent completion(WaitableEvent::ResetPolicy::MANUAL,
+                             WaitableEvent::InitialState::NOT_SIGNALED);
+    task_runner()->PostTask(
+        FROM_HERE, base::BindOnce(&BackgroundThread::DoCopyAndAssignArrowBase,
+                                  object, &completion));
+    completion.Wait();
+  }
+
+  void DeleteArrow(Arrow* object) {
+    WaitableEvent completion(WaitableEvent::ResetPolicy::MANUAL,
+                             WaitableEvent::InitialState::NOT_SIGNALED);
+    task_runner()->PostTask(
+        FROM_HERE,
+        base::BindOnce(&BackgroundThread::DoDeleteArrow, object, &completion));
+    completion.Wait();
+  }
+
+  Target* DeRef(const Arrow* arrow) {
+    WaitableEvent completion(WaitableEvent::ResetPolicy::MANUAL,
+                             WaitableEvent::InitialState::NOT_SIGNALED);
+    Target* result = nullptr;
+    task_runner()->PostTask(
+        FROM_HERE, base::BindOnce(&BackgroundThread::DoDeRef, arrow, &result,
+                                  &completion));
+    completion.Wait();
+    return result;
+  }
+
+ protected:
+  static void DoCreateArrowFromArrow(Arrow** arrow,
+                                     const Arrow* other,
+                                     WaitableEvent* completion) {
+    *arrow = new Arrow;
+    **arrow = *other;
+    completion->Signal();
+  }
+
+  static void DoCreateArrowFromTarget(Arrow** arrow,
+                                      Target* target,
+                                      WaitableEvent* completion) {
+    *arrow = new Arrow;
+    (*arrow)->target = target->AsWeakPtr();
+    completion->Signal();
+  }
+
+  static void DoDeRef(const Arrow* arrow,
+                      Target** result,
+                      WaitableEvent* completion) {
+    *result = arrow->target.get();
+    completion->Signal();
+  }
+
+  static void DoDeleteTarget(Target* object, WaitableEvent* completion) {
+    delete object;
+    completion->Signal();
+  }
+
+  static void DoCopyAndAssignArrow(Arrow* object, WaitableEvent* completion) {
+    // Copy constructor.
+    Arrow a = *object;
+    // Assignment operator.
+    *object = a;
+    completion->Signal();
+  }
+
+  static void DoCopyAndAssignArrowBase(
+      Arrow* object,
+      WaitableEvent* completion) {
+    // Copy constructor.
+    WeakPtr<TargetBase> b = object->target;
+    // Assignment operator.
+    WeakPtr<TargetBase> c;
+    c = object->target;
+    completion->Signal();
+  }
+
+  static void DoDeleteArrow(Arrow* object, WaitableEvent* completion) {
+    delete object;
+    completion->Signal();
+  }
+};
+
+}  // namespace
+
+TEST(WeakPtrFactoryTest, Basic) {
+  int data;
+  WeakPtrFactory<int> factory(&data);
+  WeakPtr<int> ptr = factory.GetWeakPtr();
+  EXPECT_EQ(&data, ptr.get());
+}
+
+TEST(WeakPtrFactoryTest, Comparison) {
+  int data;
+  WeakPtrFactory<int> factory(&data);
+  WeakPtr<int> ptr = factory.GetWeakPtr();
+  WeakPtr<int> ptr2 = ptr;
+  EXPECT_EQ(ptr.get(), ptr2.get());
+}
+
+TEST(WeakPtrFactoryTest, Move) {
+  int data;
+  WeakPtrFactory<int> factory(&data);
+  WeakPtr<int> ptr = factory.GetWeakPtr();
+  WeakPtr<int> ptr2 = factory.GetWeakPtr();
+  WeakPtr<int> ptr3 = std::move(ptr2);
+  EXPECT_NE(ptr.get(), ptr2.get());
+  EXPECT_EQ(ptr.get(), ptr3.get());
+}
+
+TEST(WeakPtrFactoryTest, OutOfScope) {
+  WeakPtr<int> ptr;
+  EXPECT_EQ(nullptr, ptr.get());
+  {
+    int data;
+    WeakPtrFactory<int> factory(&data);
+    ptr = factory.GetWeakPtr();
+  }
+  EXPECT_EQ(nullptr, ptr.get());
+}
+
+TEST(WeakPtrFactoryTest, Multiple) {
+  WeakPtr<int> a, b;
+  {
+    int data;
+    WeakPtrFactory<int> factory(&data);
+    a = factory.GetWeakPtr();
+    b = factory.GetWeakPtr();
+    EXPECT_EQ(&data, a.get());
+    EXPECT_EQ(&data, b.get());
+  }
+  EXPECT_EQ(nullptr, a.get());
+  EXPECT_EQ(nullptr, b.get());
+}
+
+TEST(WeakPtrFactoryTest, MultipleStaged) {
+  WeakPtr<int> a;
+  {
+    int data;
+    WeakPtrFactory<int> factory(&data);
+    a = factory.GetWeakPtr();
+    {
+      WeakPtr<int> b = factory.GetWeakPtr();
+    }
+    EXPECT_NE(nullptr, a.get());
+  }
+  EXPECT_EQ(nullptr, a.get());
+}
+
+TEST(WeakPtrFactoryTest, Dereference) {
+  Base data;
+  data.member = "123456";
+  WeakPtrFactory<Base> factory(&data);
+  WeakPtr<Base> ptr = factory.GetWeakPtr();
+  EXPECT_EQ(&data, ptr.get());
+  EXPECT_EQ(data.member, (*ptr).member);
+  EXPECT_EQ(data.member, ptr->member);
+}
+
+TEST(WeakPtrFactoryTest, UpCast) {
+  Derived data;
+  WeakPtrFactory<Derived> factory(&data);
+  WeakPtr<Base> ptr = factory.GetWeakPtr();
+  ptr = factory.GetWeakPtr();
+  EXPECT_EQ(ptr.get(), &data);
+}
+
+TEST(WeakPtrTest, ConstructFromNullptr) {
+  WeakPtr<int> ptr = PassThru(nullptr);
+  EXPECT_EQ(nullptr, ptr.get());
+}
+
+TEST(WeakPtrTest, SupportsWeakPtr) {
+  Target target;
+  WeakPtr<Target> ptr = target.AsWeakPtr();
+  EXPECT_EQ(&target, ptr.get());
+}
+
+TEST(WeakPtrTest, DerivedTarget) {
+  DerivedTarget target;
+  WeakPtr<DerivedTarget> ptr = AsWeakPtr(&target);
+  EXPECT_EQ(&target, ptr.get());
+}
+
+TEST(WeakPtrTest, DerivedTargetWithNestedBase) {
+  DerivedTargetWithNestedBase target;
+  WeakPtr<DerivedTargetWithNestedBase> ptr = AsWeakPtr(&target);
+  EXPECT_EQ(&target, ptr.get());
+}
+
+TEST(WeakPtrTest, DerivedTargetMultipleInheritance) {
+  DerivedTargetMultipleInheritance d;
+  Target& b = d;
+  EXPECT_NE(static_cast<void*>(&d), static_cast<void*>(&b));
+  const WeakPtr<Target> pb = AsWeakPtr(&b);
+  EXPECT_EQ(pb.get(), &b);
+  const WeakPtr<DerivedTargetMultipleInheritance> pd = AsWeakPtr(&d);
+  EXPECT_EQ(pd.get(), &d);
+}
+
+TEST(WeakPtrFactoryTest, BooleanTesting) {
+  int data;
+  WeakPtrFactory<int> factory(&data);
+
+  WeakPtr<int> ptr_to_an_instance = factory.GetWeakPtr();
+  EXPECT_TRUE(ptr_to_an_instance);
+  EXPECT_FALSE(!ptr_to_an_instance);
+
+  if (ptr_to_an_instance) {
+  } else {
+    ADD_FAILURE() << "Pointer to an instance should result in true.";
+  }
+
+  if (!ptr_to_an_instance) {  // check for operator!().
+    ADD_FAILURE() << "Pointer to an instance should result in !x being false.";
+  }
+
+  WeakPtr<int> null_ptr;
+  EXPECT_FALSE(null_ptr);
+  EXPECT_TRUE(!null_ptr);
+
+  if (null_ptr) {
+    ADD_FAILURE() << "Null pointer should result in false.";
+  }
+
+  if (!null_ptr) {  // check for operator!().
+  } else {
+    ADD_FAILURE() << "Null pointer should result in !x being true.";
+  }
+}
+
+TEST(WeakPtrFactoryTest, ComparisonToNull) {
+  int data;
+  WeakPtrFactory<int> factory(&data);
+
+  WeakPtr<int> ptr_to_an_instance = factory.GetWeakPtr();
+  EXPECT_NE(nullptr, ptr_to_an_instance);
+  EXPECT_NE(ptr_to_an_instance, nullptr);
+
+  WeakPtr<int> null_ptr;
+  EXPECT_EQ(null_ptr, nullptr);
+  EXPECT_EQ(nullptr, null_ptr);
+}
+
+TEST(WeakPtrTest, InvalidateWeakPtrs) {
+  int data;
+  WeakPtrFactory<int> factory(&data);
+  WeakPtr<int> ptr = factory.GetWeakPtr();
+  EXPECT_EQ(&data, ptr.get());
+  EXPECT_TRUE(factory.HasWeakPtrs());
+  factory.InvalidateWeakPtrs();
+  EXPECT_EQ(nullptr, ptr.get());
+  EXPECT_FALSE(factory.HasWeakPtrs());
+
+  // Test that the factory can create new weak pointers after a
+  // InvalidateWeakPtrs call, and they remain valid until the next
+  // InvalidateWeakPtrs call.
+  WeakPtr<int> ptr2 = factory.GetWeakPtr();
+  EXPECT_EQ(&data, ptr2.get());
+  EXPECT_TRUE(factory.HasWeakPtrs());
+  factory.InvalidateWeakPtrs();
+  EXPECT_EQ(nullptr, ptr2.get());
+  EXPECT_FALSE(factory.HasWeakPtrs());
+}
+
+TEST(WeakPtrTest, HasWeakPtrs) {
+  int data;
+  WeakPtrFactory<int> factory(&data);
+  {
+    WeakPtr<int> ptr = factory.GetWeakPtr();
+    EXPECT_TRUE(factory.HasWeakPtrs());
+  }
+  EXPECT_FALSE(factory.HasWeakPtrs());
+}
+
+TEST(WeakPtrTest, ObjectAndWeakPtrOnDifferentThreads) {
+  // Test that it is OK to create an object that supports WeakPtr on one thread,
+  // but use it on another.  This tests that we do not trip runtime checks that
+  // ensure that a WeakPtr is not used by multiple threads.
+  std::unique_ptr<Target> target(OffThreadObjectCreator<Target>::NewObject());
+  WeakPtr<Target> weak_ptr = target->AsWeakPtr();
+  EXPECT_EQ(target.get(), weak_ptr.get());
+}
+
+TEST(WeakPtrTest, WeakPtrInitiateAndUseOnDifferentThreads) {
+  // Test that it is OK to create an object that has a WeakPtr member on one
+  // thread, but use it on another.  This tests that we do not trip runtime
+  // checks that ensure that a WeakPtr is not used by multiple threads.
+  std::unique_ptr<Arrow> arrow(OffThreadObjectCreator<Arrow>::NewObject());
+  Target target;
+  arrow->target = target.AsWeakPtr();
+  EXPECT_EQ(&target, arrow->target.get());
+}
+
+TEST(WeakPtrTest, MoveOwnershipImplicitly) {
+  // Move object ownership to another thread by releasing all weak pointers
+  // on the original thread first, and then establish WeakPtr on a different
+  // thread.
+  BackgroundThread background;
+  background.Start();
+
+  Target* target = new Target();
+  {
+    WeakPtr<Target> weak_ptr = target->AsWeakPtr();
+    // Main thread deletes the WeakPtr, then the thread ownership of the
+    // object can be implicitly moved.
+  }
+  Arrow* arrow;
+
+  // Background thread creates WeakPtr(and implicitly owns the object).
+  background.CreateArrowFromTarget(&arrow, target);
+  EXPECT_EQ(background.DeRef(arrow), target);
+
+  {
+    // Main thread creates another WeakPtr, but this does not trigger implicitly
+    // thread ownership move.
+    Arrow arrow;
+    arrow.target = target->AsWeakPtr();
+
+    // The new WeakPtr is owned by background thread.
+    EXPECT_EQ(target, background.DeRef(&arrow));
+  }
+
+  // Target can only be deleted on background thread.
+  background.DeleteTarget(target);
+  background.DeleteArrow(arrow);
+}
+
+TEST(WeakPtrTest, MoveOwnershipOfUnreferencedObject) {
+  BackgroundThread background;
+  background.Start();
+
+  Arrow* arrow;
+  {
+    Target target;
+    // Background thread creates WeakPtr.
+    background.CreateArrowFromTarget(&arrow, &target);
+
+    // Bind to background thread.
+    EXPECT_EQ(&target, background.DeRef(arrow));
+
+    // Release the only WeakPtr.
+    arrow->target.reset();
+
+    // Now we should be able to create a new reference from this thread.
+    arrow->target = target.AsWeakPtr();
+
+    // Re-bind to main thread.
+    EXPECT_EQ(&target, arrow->target.get());
+
+    // And the main thread can now delete the target.
+  }
+
+  delete arrow;
+}
+
+TEST(WeakPtrTest, MoveOwnershipAfterInvalidate) {
+  BackgroundThread background;
+  background.Start();
+
+  Arrow arrow;
+  std::unique_ptr<TargetWithFactory> target(new TargetWithFactory);
+
+  // Bind to main thread.
+  arrow.target = target->factory.GetWeakPtr();
+  EXPECT_EQ(target.get(), arrow.target.get());
+
+  target->factory.InvalidateWeakPtrs();
+  EXPECT_EQ(nullptr, arrow.target.get());
+
+  arrow.target = target->factory.GetWeakPtr();
+  // Re-bind to background thread.
+  EXPECT_EQ(target.get(), background.DeRef(&arrow));
+
+  // And the background thread can now delete the target.
+  background.DeleteTarget(target.release());
+}
+
+TEST(WeakPtrTest, MainThreadRefOutlivesBackgroundThreadRef) {
+  // Originating thread has a WeakPtr that outlives others.
+  // - Main thread creates a WeakPtr
+  // - Background thread creates a WeakPtr copy from the one in main thread
+  // - Destruct the WeakPtr on background thread
+  // - Destruct the WeakPtr on main thread
+  BackgroundThread background;
+  background.Start();
+
+  Target target;
+  Arrow arrow;
+  arrow.target = target.AsWeakPtr();
+
+  Arrow* arrow_copy;
+  background.CreateArrowFromArrow(&arrow_copy, &arrow);
+  EXPECT_EQ(arrow_copy->target.get(), &target);
+  background.DeleteArrow(arrow_copy);
+}
+
+TEST(WeakPtrTest, BackgroundThreadRefOutlivesMainThreadRef) {
+  // Originating thread drops all references before another thread.
+  // - Main thread creates a WeakPtr and passes copy to background thread
+  // - Destruct the pointer on main thread
+  // - Destruct the pointer on background thread
+  BackgroundThread background;
+  background.Start();
+
+  Target target;
+  Arrow* arrow_copy;
+  {
+    Arrow arrow;
+    arrow.target = target.AsWeakPtr();
+    background.CreateArrowFromArrow(&arrow_copy, &arrow);
+  }
+  EXPECT_EQ(arrow_copy->target.get(), &target);
+  background.DeleteArrow(arrow_copy);
+}
+
+TEST(WeakPtrTest, OwnerThreadDeletesObject) {
+  // Originating thread invalidates WeakPtrs while its held by other thread.
+  // - Main thread creates WeakPtr and passes Copy to background thread
+  // - Object gets destroyed on main thread
+  //   (invalidates WeakPtr on background thread)
+  // - WeakPtr gets destroyed on Thread B
+  BackgroundThread background;
+  background.Start();
+  Arrow* arrow_copy;
+  {
+    Target target;
+    Arrow arrow;
+    arrow.target = target.AsWeakPtr();
+    background.CreateArrowFromArrow(&arrow_copy, &arrow);
+  }
+  EXPECT_EQ(nullptr, arrow_copy->target.get());
+  background.DeleteArrow(arrow_copy);
+}
+
+TEST(WeakPtrTest, NonOwnerThreadCanCopyAndAssignWeakPtr) {
+  // Main thread creates a Target object.
+  Target target;
+  // Main thread creates an arrow referencing the Target.
+  Arrow *arrow = new Arrow();
+  arrow->target = target.AsWeakPtr();
+
+  // Background can copy and assign arrow (as well as the WeakPtr inside).
+  BackgroundThread background;
+  background.Start();
+  background.CopyAndAssignArrow(arrow);
+  background.DeleteArrow(arrow);
+}
+
+TEST(WeakPtrTest, NonOwnerThreadCanCopyAndAssignWeakPtrBase) {
+  // Main thread creates a Target object.
+  Target target;
+  // Main thread creates an arrow referencing the Target.
+  Arrow *arrow = new Arrow();
+  arrow->target = target.AsWeakPtr();
+
+  // Background can copy and assign arrow's WeakPtr to a base class WeakPtr.
+  BackgroundThread background;
+  background.Start();
+  background.CopyAndAssignArrowBase(arrow);
+  background.DeleteArrow(arrow);
+}
+
+TEST(WeakPtrTest, NonOwnerThreadCanDeleteWeakPtr) {
+  // Main thread creates a Target object.
+  Target target;
+  // Main thread creates an arrow referencing the Target.
+  Arrow* arrow = new Arrow();
+  arrow->target = target.AsWeakPtr();
+
+  // Background can delete arrow (as well as the WeakPtr inside).
+  BackgroundThread background;
+  background.Start();
+  background.DeleteArrow(arrow);
+}
+
+TEST(WeakPtrDeathTest, WeakPtrCopyDoesNotChangeThreadBinding) {
+  // The default style "fast" does not support multi-threaded tests
+  // (introduces deadlock on Linux).
+  ::testing::FLAGS_gtest_death_test_style = "threadsafe";
+
+  BackgroundThread background;
+  background.Start();
+
+  // Main thread creates a Target object.
+  Target target;
+  // Main thread creates an arrow referencing the Target.
+  Arrow arrow;
+  arrow.target = target.AsWeakPtr();
+
+  // Background copies the WeakPtr.
+  Arrow* arrow_copy;
+  background.CreateArrowFromArrow(&arrow_copy, &arrow);
+
+  // The copy is still bound to main thread so I can deref.
+  EXPECT_EQ(arrow.target.get(), arrow_copy->target.get());
+
+  // Although background thread created the copy, it can not deref the copied
+  // WeakPtr.
+  ASSERT_DCHECK_DEATH(background.DeRef(arrow_copy));
+
+  background.DeleteArrow(arrow_copy);
+}
+
+TEST(WeakPtrDeathTest, NonOwnerThreadDereferencesWeakPtrAfterReference) {
+  // The default style "fast" does not support multi-threaded tests
+  // (introduces deadlock on Linux).
+  ::testing::FLAGS_gtest_death_test_style = "threadsafe";
+
+  // Main thread creates a Target object.
+  Target target;
+
+  // Main thread creates an arrow referencing the Target (so target's
+  // thread ownership can not be implicitly moved).
+  Arrow arrow;
+  arrow.target = target.AsWeakPtr();
+  arrow.target.get();
+
+  // Background thread tries to deref target, which violates thread ownership.
+  BackgroundThread background;
+  background.Start();
+  ASSERT_DCHECK_DEATH(background.DeRef(&arrow));
+}
+
+TEST(WeakPtrDeathTest, NonOwnerThreadDeletesWeakPtrAfterReference) {
+  // The default style "fast" does not support multi-threaded tests
+  // (introduces deadlock on Linux).
+  ::testing::FLAGS_gtest_death_test_style = "threadsafe";
+
+  std::unique_ptr<Target> target(new Target());
+
+  // Main thread creates an arrow referencing the Target.
+  Arrow arrow;
+  arrow.target = target->AsWeakPtr();
+
+  // Background thread tries to deref target, binding it to the thread.
+  BackgroundThread background;
+  background.Start();
+  background.DeRef(&arrow);
+
+  // Main thread deletes Target, violating thread binding.
+  ASSERT_DCHECK_DEATH(target.reset());
+
+  // |target.reset()| died so |target| still holds the object, so we
+  // must pass it to the background thread to teardown.
+  background.DeleteTarget(target.release());
+}
+
+TEST(WeakPtrDeathTest, NonOwnerThreadDeletesObjectAfterReference) {
+  // The default style "fast" does not support multi-threaded tests
+  // (introduces deadlock on Linux).
+  ::testing::FLAGS_gtest_death_test_style = "threadsafe";
+
+  std::unique_ptr<Target> target(new Target());
+
+  // Main thread creates an arrow referencing the Target, and references it, so
+  // that it becomes bound to the thread.
+  Arrow arrow;
+  arrow.target = target->AsWeakPtr();
+  arrow.target.get();
+
+  // Background thread tries to delete target, volating thread binding.
+  BackgroundThread background;
+  background.Start();
+  ASSERT_DCHECK_DEATH(background.DeleteTarget(target.release()));
+}
+
+TEST(WeakPtrDeathTest, NonOwnerThreadReferencesObjectAfterDeletion) {
+  // The default style "fast" does not support multi-threaded tests
+  // (introduces deadlock on Linux).
+  ::testing::FLAGS_gtest_death_test_style = "threadsafe";
+
+  std::unique_ptr<Target> target(new Target());
+
+  // Main thread creates an arrow referencing the Target.
+  Arrow arrow;
+  arrow.target = target->AsWeakPtr();
+
+  // Background thread tries to delete target, binding the object to the thread.
+  BackgroundThread background;
+  background.Start();
+  background.DeleteTarget(target.release());
+
+  // Main thread attempts to dereference the target, violating thread binding.
+  ASSERT_DCHECK_DEATH(arrow.target.get());
+}
+
+}  // namespace base
diff --git a/base/memory/weak_ptr_unittest.nc b/base/memory/weak_ptr_unittest.nc
new file mode 100644
index 0000000..b96b033
--- /dev/null
+++ b/base/memory/weak_ptr_unittest.nc
@@ -0,0 +1,144 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This is a "No Compile Test" suite.
+// http://dev.chromium.org/developers/testing/no-compile-tests
+
+#include "base/memory/weak_ptr.h"
+
+namespace base {
+
+struct Producer : SupportsWeakPtr<Producer> {};
+struct DerivedProducer : Producer {};
+struct OtherDerivedProducer : Producer {};
+struct MultiplyDerivedProducer : Producer,
+                                 SupportsWeakPtr<MultiplyDerivedProducer> {};
+struct Unrelated {};
+struct DerivedUnrelated : Unrelated {};
+
+#if defined(NCTEST_AUTO_DOWNCAST)  // [r"cannot initialize a variable of type 'base::DerivedProducer \*' with an rvalue of type 'base::Producer \*'"]
+
+void WontCompile() {
+  Producer f;
+  WeakPtr<Producer> ptr = f.AsWeakPtr();
+  WeakPtr<DerivedProducer> derived_ptr = ptr;
+}
+
+#elif defined(NCTEST_STATIC_DOWNCAST)  // [r"cannot initialize a variable of type 'base::DerivedProducer \*' with an rvalue of type 'base::Producer \*'"]
+
+void WontCompile() {
+  Producer f;
+  WeakPtr<Producer> ptr = f.AsWeakPtr();
+  WeakPtr<DerivedProducer> derived_ptr =
+      static_cast<WeakPtr<DerivedProducer> >(ptr);
+}
+
+#elif defined(NCTEST_AUTO_REF_DOWNCAST)  // [r"fatal error: non-const lvalue reference to type 'WeakPtr<base::DerivedProducer>' cannot bind to a value of unrelated type 'WeakPtr<base::Producer>'"]
+
+void WontCompile() {
+  Producer f;
+  WeakPtr<Producer> ptr = f.AsWeakPtr();
+  WeakPtr<DerivedProducer>& derived_ptr = ptr;
+}
+
+#elif defined(NCTEST_STATIC_REF_DOWNCAST)  // [r"fatal error: non-const lvalue reference to type 'WeakPtr<base::DerivedProducer>' cannot bind to a value of unrelated type 'WeakPtr<base::Producer>'"]
+
+void WontCompile() {
+  Producer f;
+  WeakPtr<Producer> ptr = f.AsWeakPtr();
+  WeakPtr<DerivedProducer>& derived_ptr =
+      static_cast<WeakPtr<DerivedProducer>&>(ptr);
+}
+
+#elif defined(NCTEST_STATIC_ASWEAKPTR_DOWNCAST)  // [r"no matching function"]
+
+void WontCompile() {
+  Producer f;
+  WeakPtr<DerivedProducer> ptr =
+      SupportsWeakPtr<Producer>::StaticAsWeakPtr<DerivedProducer>(&f);
+}
+
+#elif defined(NCTEST_UNSAFE_HELPER_DOWNCAST)  // [r"cannot initialize a variable of type 'base::DerivedProducer \*' with an rvalue of type 'base::Producer \*'"]
+
+void WontCompile() {
+  Producer f;
+  WeakPtr<DerivedProducer> ptr = AsWeakPtr(&f);
+}
+
+#elif defined(NCTEST_UNSAFE_INSTANTIATED_HELPER_DOWNCAST)  // [r"no matching function"]
+
+void WontCompile() {
+  Producer f;
+  WeakPtr<DerivedProducer> ptr = AsWeakPtr<DerivedProducer>(&f);
+}
+
+#elif defined(NCTEST_UNSAFE_WRONG_INSANTIATED_HELPER_DOWNCAST)  // [r"cannot initialize a variable of type 'base::DerivedProducer \*' with an rvalue of type 'base::Producer \*'"]
+
+void WontCompile() {
+  Producer f;
+  WeakPtr<DerivedProducer> ptr = AsWeakPtr<Producer>(&f);
+}
+
+#elif defined(NCTEST_UNSAFE_HELPER_CAST)  // [r"cannot initialize a variable of type 'base::OtherDerivedProducer \*' with an rvalue of type 'base::DerivedProducer \*'"]
+
+void WontCompile() {
+  DerivedProducer f;
+  WeakPtr<OtherDerivedProducer> ptr = AsWeakPtr(&f);
+}
+
+#elif defined(NCTEST_UNSAFE_INSTANTIATED_HELPER_SIDECAST)  // [r"fatal error: no matching function for call to 'AsWeakPtr'"]
+
+void WontCompile() {
+  DerivedProducer f;
+  WeakPtr<OtherDerivedProducer> ptr = AsWeakPtr<OtherDerivedProducer>(&f);
+}
+
+#elif defined(NCTEST_UNSAFE_WRONG_INSTANTIATED_HELPER_SIDECAST)  // [r"cannot initialize a variable of type 'base::OtherDerivedProducer \*' with an rvalue of type 'base::DerivedProducer \*'"]
+
+void WontCompile() {
+  DerivedProducer f;
+  WeakPtr<OtherDerivedProducer> ptr = AsWeakPtr<DerivedProducer>(&f);
+}
+
+#elif defined(NCTEST_UNRELATED_HELPER)  // [r"cannot initialize a variable of type 'base::Unrelated \*' with an rvalue of type 'base::DerivedProducer \*'"]
+
+void WontCompile() {
+  DerivedProducer f;
+  WeakPtr<Unrelated> ptr = AsWeakPtr(&f);
+}
+
+#elif defined(NCTEST_UNRELATED_INSTANTIATED_HELPER)  // [r"no matching function"]
+
+void WontCompile() {
+  DerivedProducer f;
+  WeakPtr<Unrelated> ptr = AsWeakPtr<Unrelated>(&f);
+}
+
+// TODO(hans): Remove .* and update the static_assert expectations once we roll
+// past Clang r313315. https://crbug.com/765692.
+
+#elif defined(NCTEST_COMPLETELY_UNRELATED_HELPER)  // [r"fatal error: static_assert failed .*\"AsWeakPtr argument must inherit from SupportsWeakPtr\""]
+
+void WontCompile() {
+  Unrelated f;
+  WeakPtr<Unrelated> ptr = AsWeakPtr(&f);
+}
+
+#elif defined(NCTEST_DERIVED_COMPLETELY_UNRELATED_HELPER)  // [r"fatal error: static_assert failed .*\"AsWeakPtr argument must inherit from SupportsWeakPtr\""]
+
+void WontCompile() {
+  DerivedUnrelated f;
+  WeakPtr<Unrelated> ptr = AsWeakPtr(&f);
+}
+
+#elif defined(NCTEST_AMBIGUOUS_ANCESTORS)  // [r"fatal error: use of undeclared identifier 'AsWeakPtrImpl'"]
+
+void WontCompile() {
+  MultiplyDerivedProducer f;
+  WeakPtr<MultiplyDerivedProducer> ptr = AsWeakPtr(&f);
+}
+
+#endif
+
+}
diff --git a/base/memory/writable_shared_memory_region.cc b/base/memory/writable_shared_memory_region.cc
new file mode 100644
index 0000000..0806c37
--- /dev/null
+++ b/base/memory/writable_shared_memory_region.cc
@@ -0,0 +1,84 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/memory/writable_shared_memory_region.h"
+
+#include <utility>
+
+#include "base/memory/shared_memory.h"
+#include "build/build_config.h"
+
+namespace base {
+
+// static
+WritableSharedMemoryRegion WritableSharedMemoryRegion::Create(size_t size) {
+  subtle::PlatformSharedMemoryRegion handle =
+      subtle::PlatformSharedMemoryRegion::CreateWritable(size);
+
+  return WritableSharedMemoryRegion(std::move(handle));
+}
+
+// static
+WritableSharedMemoryRegion WritableSharedMemoryRegion::Deserialize(
+    subtle::PlatformSharedMemoryRegion handle) {
+  return WritableSharedMemoryRegion(std::move(handle));
+}
+
+// static
+subtle::PlatformSharedMemoryRegion
+WritableSharedMemoryRegion::TakeHandleForSerialization(
+    WritableSharedMemoryRegion region) {
+  return std::move(region.handle_);
+}
+
+// static
+ReadOnlySharedMemoryRegion WritableSharedMemoryRegion::ConvertToReadOnly(
+    WritableSharedMemoryRegion region) {
+  subtle::PlatformSharedMemoryRegion handle = std::move(region.handle_);
+  if (!handle.ConvertToReadOnly())
+    return {};
+
+  return ReadOnlySharedMemoryRegion::Deserialize(std::move(handle));
+}
+
+WritableSharedMemoryRegion::WritableSharedMemoryRegion() = default;
+WritableSharedMemoryRegion::WritableSharedMemoryRegion(
+    WritableSharedMemoryRegion&& region) = default;
+WritableSharedMemoryRegion& WritableSharedMemoryRegion::operator=(
+    WritableSharedMemoryRegion&& region) = default;
+WritableSharedMemoryRegion::~WritableSharedMemoryRegion() = default;
+
+WritableSharedMemoryMapping WritableSharedMemoryRegion::Map() const {
+  return MapAt(0, handle_.GetSize());
+}
+
+WritableSharedMemoryMapping WritableSharedMemoryRegion::MapAt(
+    off_t offset,
+    size_t size) const {
+  if (!IsValid())
+    return {};
+
+  void* memory = nullptr;
+  size_t mapped_size = 0;
+  if (!handle_.MapAt(offset, size, &memory, &mapped_size))
+    return {};
+
+  return WritableSharedMemoryMapping(memory, size, mapped_size,
+                                     handle_.GetGUID());
+}
+
+bool WritableSharedMemoryRegion::IsValid() const {
+  return handle_.IsValid();
+}
+
+WritableSharedMemoryRegion::WritableSharedMemoryRegion(
+    subtle::PlatformSharedMemoryRegion handle)
+    : handle_(std::move(handle)) {
+  if (handle_.IsValid()) {
+    CHECK_EQ(handle_.GetMode(),
+             subtle::PlatformSharedMemoryRegion::Mode::kWritable);
+  }
+}
+
+}  // namespace base
diff --git a/base/memory/writable_shared_memory_region.h b/base/memory/writable_shared_memory_region.h
new file mode 100644
index 0000000..b953a10
--- /dev/null
+++ b/base/memory/writable_shared_memory_region.h
@@ -0,0 +1,97 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_MEMORY_WRITABLE_SHARED_MEMORY_REGION_H_
+#define BASE_MEMORY_WRITABLE_SHARED_MEMORY_REGION_H_
+
+#include "base/macros.h"
+#include "base/memory/platform_shared_memory_region.h"
+#include "base/memory/read_only_shared_memory_region.h"
+#include "base/memory/shared_memory_mapping.h"
+
+namespace base {
+
+// Scoped move-only handle to a region of platform shared memory. The instance
+// owns the platform handle it wraps. Mappings created by this region are
+// writable. These mappings remain valid even after the region handle is moved
+// or destroyed.
+//
+// This region can be locked to read-only access by converting it to a
+// ReadOnlySharedMemoryRegion. However, unlike ReadOnlySharedMemoryRegion and
+// UnsafeSharedMemoryRegion, ownership of this region (while writable) is unique
+// and may only be transferred, not duplicated.
+class BASE_EXPORT WritableSharedMemoryRegion {
+ public:
+  using MappingType = WritableSharedMemoryMapping;
+  // Creates a new WritableSharedMemoryRegion instance of a given
+  // size that can be used for mapping writable shared memory into the virtual
+  // address space.
+  static WritableSharedMemoryRegion Create(size_t size);
+
+  // Returns a WritableSharedMemoryRegion built from a platform handle that was
+  // taken from another WritableSharedMemoryRegion instance. Returns an invalid
+  // region iff the |handle| is invalid. CHECK-fails if the |handle| isn't
+  // writable.
+  // This should be used only by the code passing handles across process
+  // boundaries.
+  static WritableSharedMemoryRegion Deserialize(
+      subtle::PlatformSharedMemoryRegion handle);
+
+  // Extracts a platform handle from the region. Ownership is transferred to the
+  // returned region object.
+  // This should be used only for sending the handle from the current
+  // process to another.
+  static subtle::PlatformSharedMemoryRegion TakeHandleForSerialization(
+      WritableSharedMemoryRegion region);
+
+  // Makes the region read-only. No new writable mappings of the region can be
+  // created after this call. Returns an invalid region on failure.
+  static ReadOnlySharedMemoryRegion ConvertToReadOnly(
+      WritableSharedMemoryRegion region);
+
+  // Default constructor initializes an invalid instance.
+  WritableSharedMemoryRegion();
+
+  // Move operations are allowed.
+  WritableSharedMemoryRegion(WritableSharedMemoryRegion&&);
+  WritableSharedMemoryRegion& operator=(WritableSharedMemoryRegion&&);
+
+  // Destructor closes shared memory region if valid.
+  // All created mappings will remain valid.
+  ~WritableSharedMemoryRegion();
+
+  // Maps the shared memory region into the caller's address space with write
+  // access. The mapped address is guaranteed to have an alignment of
+  // at least |subtle::PlatformSharedMemoryRegion::kMapMinimumAlignment|.
+  // Returns a valid WritableSharedMemoryMapping instance on success, invalid
+  // otherwise.
+  WritableSharedMemoryMapping Map() const;
+
+  // Same as above, but maps only |size| bytes of the shared memory block
+  // starting with the given |offset|. |offset| must be aligned to value of
+  // |SysInfo::VMAllocationGranularity()|. Returns an invalid mapping if
+  // requested bytes are out of the region limits.
+  WritableSharedMemoryMapping MapAt(off_t offset, size_t size) const;
+
+  // Whether underlying platform handles are valid.
+  bool IsValid() const;
+
+  // Returns the maximum mapping size that can be created from this region.
+  size_t GetSize() const {
+    DCHECK(IsValid());
+    return handle_.GetSize();
+  }
+
+ private:
+  explicit WritableSharedMemoryRegion(
+      subtle::PlatformSharedMemoryRegion handle);
+
+  subtle::PlatformSharedMemoryRegion handle_;
+
+  DISALLOW_COPY_AND_ASSIGN(WritableSharedMemoryRegion);
+};
+
+}  // namespace base
+
+#endif  // BASE_MEMORY_WRITABLE_SHARED_MEMORY_REGION_H_
diff --git a/base/message_loop/incoming_task_queue.cc b/base/message_loop/incoming_task_queue.cc
new file mode 100644
index 0000000..9f5f855
--- /dev/null
+++ b/base/message_loop/incoming_task_queue.cc
@@ -0,0 +1,373 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/message_loop/incoming_task_queue.h"
+
+#include <limits>
+#include <utility>
+
+#include "base/location.h"
+#include "base/message_loop/message_loop.h"
+#include "base/synchronization/waitable_event.h"
+#include "base/time/time.h"
+#include "build/build_config.h"
+
+namespace base {
+namespace internal {
+
+namespace {
+
+#if DCHECK_IS_ON()
+// Delays larger than this are often bogus, and a warning should be emitted in
+// debug builds to warn developers.  http://crbug.com/450045
+constexpr TimeDelta kTaskDelayWarningThreshold = TimeDelta::FromDays(14);
+#endif
+
+// Returns true if MessagePump::ScheduleWork() must be called one
+// time for every task that is added to the MessageLoop incoming queue.
+bool AlwaysNotifyPump(MessageLoop::Type type) {
+#if defined(OS_ANDROID)
+  // The Android UI message loop needs to get notified each time a task is
+  // added
+  // to the incoming queue.
+  return type == MessageLoop::TYPE_UI || type == MessageLoop::TYPE_JAVA;
+#else
+  return false;
+#endif
+}
+
+TimeTicks CalculateDelayedRuntime(TimeDelta delay) {
+  TimeTicks delayed_run_time;
+  if (delay > TimeDelta())
+    delayed_run_time = TimeTicks::Now() + delay;
+  else
+    DCHECK_EQ(delay.InMilliseconds(), 0) << "delay should not be negative";
+  return delayed_run_time;
+}
+
+}  // namespace
+
+IncomingTaskQueue::IncomingTaskQueue(MessageLoop* message_loop)
+    : always_schedule_work_(AlwaysNotifyPump(message_loop->type())),
+      triage_tasks_(this),
+      delayed_tasks_(this),
+      deferred_tasks_(this),
+      message_loop_(message_loop) {
+  // The constructing sequence is not necessarily the running sequence in the
+  // case of base::Thread.
+  DETACH_FROM_SEQUENCE(sequence_checker_);
+}
+
+bool IncomingTaskQueue::AddToIncomingQueue(const Location& from_here,
+                                           OnceClosure task,
+                                           TimeDelta delay,
+                                           Nestable nestable) {
+  // Use CHECK instead of DCHECK to crash earlier. See http://crbug.com/711167
+  // for details.
+  CHECK(task);
+  DLOG_IF(WARNING, delay > kTaskDelayWarningThreshold)
+      << "Requesting super-long task delay period of " << delay.InSeconds()
+      << " seconds from here: " << from_here.ToString();
+
+  PendingTask pending_task(from_here, std::move(task),
+                           CalculateDelayedRuntime(delay), nestable);
+#if defined(OS_WIN)
+  // We consider the task needs a high resolution timer if the delay is
+  // more than 0 and less than 32ms. This caps the relative error to
+  // less than 50% : a 33ms wait can wake at 48ms since the default
+  // resolution on Windows is between 10 and 15ms.
+  if (delay > TimeDelta() &&
+      delay.InMilliseconds() < (2 * Time::kMinLowResolutionThresholdMs)) {
+    pending_task.is_high_res = true;
+  }
+#endif
+  return PostPendingTask(&pending_task);
+}
+
+void IncomingTaskQueue::WillDestroyCurrentMessageLoop() {
+  {
+    AutoLock auto_lock(incoming_queue_lock_);
+    accept_new_tasks_ = false;
+  }
+  {
+    AutoLock auto_lock(message_loop_lock_);
+    message_loop_ = nullptr;
+  }
+}
+
+void IncomingTaskQueue::StartScheduling() {
+  bool schedule_work;
+  {
+    AutoLock lock(incoming_queue_lock_);
+    DCHECK(!is_ready_for_scheduling_);
+    DCHECK(!message_loop_scheduled_);
+    is_ready_for_scheduling_ = true;
+    schedule_work = !incoming_queue_.empty();
+    if (schedule_work)
+      message_loop_scheduled_ = true;
+  }
+  if (schedule_work) {
+    DCHECK(message_loop_);
+    AutoLock auto_lock(message_loop_lock_);
+    message_loop_->ScheduleWork();
+  }
+}
+
+IncomingTaskQueue::~IncomingTaskQueue() {
+  // Verify that WillDestroyCurrentMessageLoop() has been called.
+  DCHECK(!message_loop_);
+}
+
+void IncomingTaskQueue::RunTask(PendingTask* pending_task) {
+  DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
+  task_annotator_.RunTask("MessageLoop::PostTask", pending_task);
+}
+
+IncomingTaskQueue::TriageQueue::TriageQueue(IncomingTaskQueue* outer)
+    : outer_(outer) {}
+
+IncomingTaskQueue::TriageQueue::~TriageQueue() = default;
+
+const PendingTask& IncomingTaskQueue::TriageQueue::Peek() {
+  DCHECK_CALLED_ON_VALID_SEQUENCE(outer_->sequence_checker_);
+  ReloadFromIncomingQueueIfEmpty();
+  DCHECK(!queue_.empty());
+  return queue_.front();
+}
+
+PendingTask IncomingTaskQueue::TriageQueue::Pop() {
+  DCHECK_CALLED_ON_VALID_SEQUENCE(outer_->sequence_checker_);
+  ReloadFromIncomingQueueIfEmpty();
+  DCHECK(!queue_.empty());
+  PendingTask pending_task = std::move(queue_.front());
+  queue_.pop();
+
+  if (pending_task.is_high_res)
+    --outer_->pending_high_res_tasks_;
+
+  return pending_task;
+}
+
+bool IncomingTaskQueue::TriageQueue::HasTasks() {
+  DCHECK_CALLED_ON_VALID_SEQUENCE(outer_->sequence_checker_);
+  ReloadFromIncomingQueueIfEmpty();
+  return !queue_.empty();
+}
+
+void IncomingTaskQueue::TriageQueue::Clear() {
+  DCHECK_CALLED_ON_VALID_SEQUENCE(outer_->sequence_checker_);
+  // Previously, MessageLoop would delete all tasks including delayed and
+  // deferred tasks in a single round before attempting to reload from the
+  // incoming queue to see if more tasks remained. This gave it a chance to
+  // assess whether or not clearing should continue. As a result, while
+  // reloading is automatic for getting and seeing if tasks exist, it is not
+  // automatic for Clear().
+  while (!queue_.empty()) {
+    PendingTask pending_task = std::move(queue_.front());
+    queue_.pop();
+
+    if (pending_task.is_high_res)
+      --outer_->pending_high_res_tasks_;
+
+    if (!pending_task.delayed_run_time.is_null()) {
+      outer_->delayed_tasks().Push(std::move(pending_task));
+    }
+  }
+}
+
+void IncomingTaskQueue::TriageQueue::ReloadFromIncomingQueueIfEmpty() {
+  DCHECK_CALLED_ON_VALID_SEQUENCE(outer_->sequence_checker_);
+  if (queue_.empty()) {
+    // TODO(robliao): Since these high resolution tasks aren't yet in the
+    // delayed queue, they technically shouldn't trigger high resolution timers
+    // until they are.
+    outer_->pending_high_res_tasks_ += outer_->ReloadWorkQueue(&queue_);
+  }
+}
+
+IncomingTaskQueue::DelayedQueue::DelayedQueue(IncomingTaskQueue* outer)
+    : outer_(outer) {}
+
+IncomingTaskQueue::DelayedQueue::~DelayedQueue() = default;
+
+void IncomingTaskQueue::DelayedQueue::Push(PendingTask pending_task) {
+  DCHECK_CALLED_ON_VALID_SEQUENCE(outer_->sequence_checker_);
+
+  if (pending_task.is_high_res)
+    ++outer_->pending_high_res_tasks_;
+
+  queue_.push(std::move(pending_task));
+}
+
+const PendingTask& IncomingTaskQueue::DelayedQueue::Peek() {
+  DCHECK_CALLED_ON_VALID_SEQUENCE(outer_->sequence_checker_);
+  DCHECK(!queue_.empty());
+  return queue_.top();
+}
+
+PendingTask IncomingTaskQueue::DelayedQueue::Pop() {
+  DCHECK_CALLED_ON_VALID_SEQUENCE(outer_->sequence_checker_);
+  DCHECK(!queue_.empty());
+  PendingTask delayed_task = std::move(const_cast<PendingTask&>(queue_.top()));
+  queue_.pop();
+
+  if (delayed_task.is_high_res)
+    --outer_->pending_high_res_tasks_;
+
+  return delayed_task;
+}
+
+bool IncomingTaskQueue::DelayedQueue::HasTasks() {
+  DCHECK_CALLED_ON_VALID_SEQUENCE(outer_->sequence_checker_);
+  // TODO(robliao): The other queues don't check for IsCancelled(). Should they?
+  while (!queue_.empty() && Peek().task.IsCancelled())
+    Pop();
+
+  return !queue_.empty();
+}
+
+void IncomingTaskQueue::DelayedQueue::Clear() {
+  DCHECK_CALLED_ON_VALID_SEQUENCE(outer_->sequence_checker_);
+  while (!queue_.empty())
+    Pop();
+}
+
+IncomingTaskQueue::DeferredQueue::DeferredQueue(IncomingTaskQueue* outer)
+    : outer_(outer) {}
+
+IncomingTaskQueue::DeferredQueue::~DeferredQueue() = default;
+
+void IncomingTaskQueue::DeferredQueue::Push(PendingTask pending_task) {
+  DCHECK_CALLED_ON_VALID_SEQUENCE(outer_->sequence_checker_);
+
+  // TODO(robliao): These tasks should not count towards the high res task count
+  // since they are no longer in the delayed queue.
+  if (pending_task.is_high_res)
+    ++outer_->pending_high_res_tasks_;
+
+  queue_.push(std::move(pending_task));
+}
+
+const PendingTask& IncomingTaskQueue::DeferredQueue::Peek() {
+  DCHECK_CALLED_ON_VALID_SEQUENCE(outer_->sequence_checker_);
+  DCHECK(!queue_.empty());
+  return queue_.front();
+}
+
+PendingTask IncomingTaskQueue::DeferredQueue::Pop() {
+  DCHECK_CALLED_ON_VALID_SEQUENCE(outer_->sequence_checker_);
+  DCHECK(!queue_.empty());
+  PendingTask deferred_task = std::move(queue_.front());
+  queue_.pop();
+
+  if (deferred_task.is_high_res)
+    --outer_->pending_high_res_tasks_;
+
+  return deferred_task;
+}
+
+bool IncomingTaskQueue::DeferredQueue::HasTasks() {
+  DCHECK_CALLED_ON_VALID_SEQUENCE(outer_->sequence_checker_);
+  return !queue_.empty();
+}
+
+void IncomingTaskQueue::DeferredQueue::Clear() {
+  DCHECK_CALLED_ON_VALID_SEQUENCE(outer_->sequence_checker_);
+  while (!queue_.empty())
+    Pop();
+}
+
+bool IncomingTaskQueue::PostPendingTask(PendingTask* pending_task) {
+  // Warning: Don't try to short-circuit, and handle this thread's tasks more
+  // directly, as it could starve handling of foreign threads.  Put every task
+  // into this queue.
+  bool accept_new_tasks;
+  bool schedule_work = false;
+  {
+    AutoLock auto_lock(incoming_queue_lock_);
+    accept_new_tasks = accept_new_tasks_;
+    if (accept_new_tasks)
+      schedule_work = PostPendingTaskLockRequired(pending_task);
+  }
+
+  if (!accept_new_tasks) {
+    // Clear the pending task outside of |incoming_queue_lock_| to prevent any
+    // chance of self-deadlock if destroying a task also posts a task to this
+    // queue.
+    DCHECK(!schedule_work);
+    pending_task->task.Reset();
+    return false;
+  }
+
+  // Wake up the message loop and schedule work. This is done outside
+  // |incoming_queue_lock_| to allow for multiple post tasks to occur while
+  // ScheduleWork() is running. For platforms (e.g. Android) that require one
+  // call to ScheduleWork() for each task, all pending tasks may serialize
+  // within the ScheduleWork() call. As a result, holding a lock to maintain the
+  // lifetime of |message_loop_| is less of a concern.
+  if (schedule_work) {
+    // Ensures |message_loop_| isn't destroyed while running.
+    AutoLock auto_lock(message_loop_lock_);
+    if (message_loop_)
+      message_loop_->ScheduleWork();
+  }
+
+  return true;
+}
+
+bool IncomingTaskQueue::PostPendingTaskLockRequired(PendingTask* pending_task) {
+  incoming_queue_lock_.AssertAcquired();
+
+#if defined(OS_WIN)
+  if (pending_task->is_high_res)
+    ++high_res_task_count_;
+#endif
+
+  // Initialize the sequence number. The sequence number is used for delayed
+  // tasks (to facilitate FIFO sorting when two tasks have the same
+  // delayed_run_time value) and for identifying the task in about:tracing.
+  pending_task->sequence_num = next_sequence_num_++;
+
+  task_annotator_.DidQueueTask("MessageLoop::PostTask", *pending_task);
+
+  bool was_empty = incoming_queue_.empty();
+  incoming_queue_.push(std::move(*pending_task));
+
+  if (is_ready_for_scheduling_ &&
+      (always_schedule_work_ || (!message_loop_scheduled_ && was_empty))) {
+    // After we've scheduled the message loop, we do not need to do so again
+    // until we know it has processed all of the work in our queue and is
+    // waiting for more work again. The message loop will always attempt to
+    // reload from the incoming queue before waiting again so we clear this
+    // flag in ReloadWorkQueue().
+    message_loop_scheduled_ = true;
+    return true;
+  }
+  return false;
+}
+
+int IncomingTaskQueue::ReloadWorkQueue(TaskQueue* work_queue) {
+  DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
+
+  // Make sure no tasks are lost.
+  DCHECK(work_queue->empty());
+
+  // Acquire all we can from the inter-thread queue with one lock acquisition.
+  AutoLock lock(incoming_queue_lock_);
+  if (incoming_queue_.empty()) {
+    // If the loop attempts to reload but there are no tasks in the incoming
+    // queue, that means it will go to sleep waiting for more work. If the
+    // incoming queue becomes nonempty we need to schedule it again.
+    message_loop_scheduled_ = false;
+  } else {
+    incoming_queue_.swap(*work_queue);
+  }
+  // Reset the count of high resolution tasks since our queue is now empty.
+  int high_res_tasks = high_res_task_count_;
+  high_res_task_count_ = 0;
+  return high_res_tasks;
+}
+
+}  // namespace internal
+}  // namespace base
diff --git a/base/message_loop/incoming_task_queue.h b/base/message_loop/incoming_task_queue.h
new file mode 100644
index 0000000..f158d2a
--- /dev/null
+++ b/base/message_loop/incoming_task_queue.h
@@ -0,0 +1,265 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_MESSAGE_LOOP_INCOMING_TASK_QUEUE_H_
+#define BASE_MESSAGE_LOOP_INCOMING_TASK_QUEUE_H_
+
+#include "base/base_export.h"
+#include "base/callback.h"
+#include "base/debug/task_annotator.h"
+#include "base/macros.h"
+#include "base/memory/ref_counted.h"
+#include "base/pending_task.h"
+#include "base/sequence_checker.h"
+#include "base/synchronization/lock.h"
+#include "base/time/time.h"
+
+namespace base {
+
+class MessageLoop;
+class PostTaskTest;
+
+namespace internal {
+
+// Implements a queue of tasks posted to the message loop running on the current
+// thread. This class takes care of synchronizing posting tasks from different
+// threads and together with MessageLoop ensures clean shutdown.
+class BASE_EXPORT IncomingTaskQueue
+    : public RefCountedThreadSafe<IncomingTaskQueue> {
+ public:
+  // Provides a read and remove only view into a task queue.
+  class ReadAndRemoveOnlyQueue {
+   public:
+    ReadAndRemoveOnlyQueue() = default;
+    virtual ~ReadAndRemoveOnlyQueue() = default;
+
+    // Returns the next task. HasTasks() is assumed to be true.
+    virtual const PendingTask& Peek() = 0;
+
+    // Removes and returns the next task. HasTasks() is assumed to be true.
+    virtual PendingTask Pop() = 0;
+
+    // Whether this queue has tasks.
+    virtual bool HasTasks() = 0;
+
+    // Removes all tasks.
+    virtual void Clear() = 0;
+
+   private:
+    DISALLOW_COPY_AND_ASSIGN(ReadAndRemoveOnlyQueue);
+  };
+
+  // Provides a read-write task queue.
+  class Queue : public ReadAndRemoveOnlyQueue {
+   public:
+    Queue() = default;
+    ~Queue() override = default;
+
+    // Adds the task to the end of the queue.
+    virtual void Push(PendingTask pending_task) = 0;
+
+   private:
+    DISALLOW_COPY_AND_ASSIGN(Queue);
+  };
+
+  explicit IncomingTaskQueue(MessageLoop* message_loop);
+
+  // Appends a task to the incoming queue. Posting of all tasks is routed though
+  // AddToIncomingQueue() or TryAddToIncomingQueue() to make sure that posting
+  // task is properly synchronized between different threads.
+  //
+  // Returns true if the task was successfully added to the queue, otherwise
+  // returns false. In all cases, the ownership of |task| is transferred to the
+  // called method.
+  bool AddToIncomingQueue(const Location& from_here,
+                          OnceClosure task,
+                          TimeDelta delay,
+                          Nestable nestable);
+
+  // Disconnects |this| from the parent message loop.
+  void WillDestroyCurrentMessageLoop();
+
+  // This should be called when the message loop becomes ready for
+  // scheduling work.
+  void StartScheduling();
+
+  // Runs |pending_task|.
+  void RunTask(PendingTask* pending_task);
+
+  ReadAndRemoveOnlyQueue& triage_tasks() { return triage_tasks_; }
+
+  Queue& delayed_tasks() { return delayed_tasks_; }
+
+  Queue& deferred_tasks() { return deferred_tasks_; }
+
+  bool HasPendingHighResolutionTasks() {
+    DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
+    return pending_high_res_tasks_ > 0;
+  }
+
+ private:
+  friend class base::PostTaskTest;
+  friend class RefCountedThreadSafe<IncomingTaskQueue>;
+
+  // These queues below support the previous MessageLoop behavior of
+  // maintaining three queue queues to process tasks:
+  //
+  // TriageQueue
+  // The first queue to receive all tasks for the processing sequence (when
+  // reloading from the thread-safe |incoming_queue_|). Tasks are generally
+  // either dispatched immediately or sent to the queues below.
+  //
+  // DelayedQueue
+  // The queue for holding tasks that should be run later and sorted by expected
+  // run time.
+  //
+  // DeferredQueue
+  // The queue for holding tasks that couldn't be run while the MessageLoop was
+  // nested. These are generally processed during the idle stage.
+  //
+  // Many of these do not share implementations even though they look like they
+  // could because of small quirks (reloading semantics) or differing underlying
+  // data strucutre (TaskQueue vs DelayedTaskQueue).
+
+  // The starting point for all tasks on the sequence processing the tasks.
+  class TriageQueue : public ReadAndRemoveOnlyQueue {
+   public:
+    TriageQueue(IncomingTaskQueue* outer);
+    ~TriageQueue() override;
+
+    // ReadAndRemoveOnlyQueue:
+    // In general, the methods below will attempt to reload from the incoming
+    // queue if the queue itself is empty except for Clear(). See Clear() for
+    // why it doesn't reload.
+    const PendingTask& Peek() override;
+    PendingTask Pop() override;
+    // Whether this queue has tasks after reloading from the incoming queue.
+    bool HasTasks() override;
+    void Clear() override;
+
+   private:
+    void ReloadFromIncomingQueueIfEmpty();
+
+    IncomingTaskQueue* const outer_;
+    TaskQueue queue_;
+
+    DISALLOW_COPY_AND_ASSIGN(TriageQueue);
+  };
+
+  class DelayedQueue : public Queue {
+   public:
+    DelayedQueue(IncomingTaskQueue* outer);
+    ~DelayedQueue() override;
+
+    // Queue:
+    const PendingTask& Peek() override;
+    PendingTask Pop() override;
+    // Whether this queue has tasks after sweeping the cancelled ones in front.
+    bool HasTasks() override;
+    void Clear() override;
+    void Push(PendingTask pending_task) override;
+
+   private:
+    IncomingTaskQueue* const outer_;
+    DelayedTaskQueue queue_;
+
+    DISALLOW_COPY_AND_ASSIGN(DelayedQueue);
+  };
+
+  class DeferredQueue : public Queue {
+   public:
+    DeferredQueue(IncomingTaskQueue* outer);
+    ~DeferredQueue() override;
+
+    // Queue:
+    const PendingTask& Peek() override;
+    PendingTask Pop() override;
+    bool HasTasks() override;
+    void Clear() override;
+    void Push(PendingTask pending_task) override;
+
+   private:
+    IncomingTaskQueue* const outer_;
+    TaskQueue queue_;
+
+    DISALLOW_COPY_AND_ASSIGN(DeferredQueue);
+  };
+
+  virtual ~IncomingTaskQueue();
+
+  // Adds a task to |incoming_queue_|. The caller retains ownership of
+  // |pending_task|, but this function will reset the value of
+  // |pending_task->task|. This is needed to ensure that the posting call stack
+  // does not retain |pending_task->task| beyond this function call.
+  bool PostPendingTask(PendingTask* pending_task);
+
+  // Does the real work of posting a pending task. Returns true if the caller
+  // should call ScheduleWork() on the message loop.
+  bool PostPendingTaskLockRequired(PendingTask* pending_task);
+
+  // Loads tasks from the |incoming_queue_| into |*work_queue|. Must be called
+  // from the sequence processing the tasks. Returns the number of tasks that
+  // require high resolution timers in |work_queue|.
+  int ReloadWorkQueue(TaskQueue* work_queue);
+
+  // Checks calls made only on the MessageLoop thread.
+  SEQUENCE_CHECKER(sequence_checker_);
+
+  debug::TaskAnnotator task_annotator_;
+
+  // True if we always need to call ScheduleWork when receiving a new task, even
+  // if the incoming queue was not empty.
+  const bool always_schedule_work_;
+
+  // Queue for initial triaging of tasks on the |sequence_checker_| sequence.
+  TriageQueue triage_tasks_;
+
+  // Queue for delayed tasks on the |sequence_checker_| sequence.
+  DelayedQueue delayed_tasks_;
+
+  // Queue for non-nestable deferred tasks on the |sequence_checker_| sequence.
+  DeferredQueue deferred_tasks_;
+
+  // Number of high resolution tasks in the sequence affine queues above.
+  int pending_high_res_tasks_ = 0;
+
+  // Lock that serializes |message_loop_->ScheduleWork()| calls as well as
+  // prevents |message_loop_| from being made nullptr during such a call.
+  base::Lock message_loop_lock_;
+
+  // Points to the message loop that owns |this|.
+  MessageLoop* message_loop_;
+
+  // Synchronizes access to all members below this line.
+  base::Lock incoming_queue_lock_;
+
+  // Number of tasks that require high resolution timing. This value is kept
+  // so that ReloadWorkQueue() completes in constant time.
+  int high_res_task_count_ = 0;
+
+  // An incoming queue of tasks that are acquired under a mutex for processing
+  // on this instance's thread. These tasks have not yet been been pushed to
+  // |triage_tasks_|.
+  TaskQueue incoming_queue_;
+
+  // True if new tasks should be accepted.
+  bool accept_new_tasks_ = true;
+
+  // The next sequence number to use for delayed tasks.
+  int next_sequence_num_ = 0;
+
+  // True if our message loop has already been scheduled and does not need to be
+  // scheduled again until an empty reload occurs.
+  bool message_loop_scheduled_ = false;
+
+  // False until StartScheduling() is called.
+  bool is_ready_for_scheduling_ = false;
+
+  DISALLOW_COPY_AND_ASSIGN(IncomingTaskQueue);
+};
+
+}  // namespace internal
+}  // namespace base
+
+#endif  // BASE_MESSAGE_LOOP_INCOMING_TASK_QUEUE_H_
diff --git a/base/message_loop/message_loop.cc b/base/message_loop/message_loop.cc
new file mode 100644
index 0000000..a7c3f25
--- /dev/null
+++ b/base/message_loop/message_loop.cc
@@ -0,0 +1,488 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/message_loop/message_loop.h"
+
+#include <algorithm>
+#include <utility>
+
+#include "base/bind.h"
+#include "base/compiler_specific.h"
+#include "base/logging.h"
+#include "base/memory/ptr_util.h"
+#include "base/message_loop/message_pump_default.h"
+#include "base/message_loop/message_pump_for_io.h"
+#include "base/message_loop/message_pump_for_ui.h"
+#include "base/run_loop.h"
+#include "base/third_party/dynamic_annotations/dynamic_annotations.h"
+#include "base/threading/thread_id_name_manager.h"
+#include "base/threading/thread_task_runner_handle.h"
+#include "base/trace_event/trace_event.h"
+
+#if defined(OS_MACOSX)
+#include "base/message_loop/message_pump_mac.h"
+#endif
+
+namespace base {
+
+namespace {
+
+MessageLoop::MessagePumpFactory* message_pump_for_ui_factory_ = nullptr;
+
+std::unique_ptr<MessagePump> ReturnPump(std::unique_ptr<MessagePump> pump) {
+  return pump;
+}
+
+}  // namespace
+
+//------------------------------------------------------------------------------
+
+MessageLoop::MessageLoop(Type type)
+    : MessageLoop(type, MessagePumpFactoryCallback()) {
+  BindToCurrentThread();
+}
+
+MessageLoop::MessageLoop(std::unique_ptr<MessagePump> pump)
+    : MessageLoop(TYPE_CUSTOM, BindOnce(&ReturnPump, std::move(pump))) {
+  BindToCurrentThread();
+}
+
+MessageLoop::~MessageLoop() {
+  // If |pump_| is non-null, this message loop has been bound and should be the
+  // current one on this thread. Otherwise, this loop is being destructed before
+  // it was bound to a thread, so a different message loop (or no loop at all)
+  // may be current.
+  DCHECK((pump_ && MessageLoopCurrent::IsBoundToCurrentThreadInternal(this)) ||
+         (!pump_ && !MessageLoopCurrent::IsBoundToCurrentThreadInternal(this)));
+
+  // iOS just attaches to the loop, it doesn't Run it.
+  // TODO(stuartmorgan): Consider wiring up a Detach().
+#if !defined(OS_IOS)
+  // There should be no active RunLoops on this thread, unless this MessageLoop
+  // isn't bound to the current thread (see other condition at the top of this
+  // method).
+  DCHECK(
+      (!pump_ && !MessageLoopCurrent::IsBoundToCurrentThreadInternal(this)) ||
+      !RunLoop::IsRunningOnCurrentThread());
+#endif  // !defined(OS_IOS)
+
+#if defined(OS_WIN)
+  if (in_high_res_mode_)
+    Time::ActivateHighResolutionTimer(false);
+#endif
+  // Clean up any unprocessed tasks, but take care: deleting a task could
+  // result in the addition of more tasks (e.g., via DeleteSoon).  We set a
+  // limit on the number of times we will allow a deleted task to generate more
+  // tasks.  Normally, we should only pass through this loop once or twice.  If
+  // we end up hitting the loop limit, then it is probably due to one task that
+  // is being stubborn.  Inspect the queues to see who is left.
+  bool tasks_remain;
+  for (int i = 0; i < 100; ++i) {
+    DeletePendingTasks();
+    // If we end up with empty queues, then break out of the loop.
+    tasks_remain = incoming_task_queue_->triage_tasks().HasTasks();
+    if (!tasks_remain)
+      break;
+  }
+  DCHECK(!tasks_remain);
+
+  // Let interested parties have one last shot at accessing this.
+  for (auto& observer : destruction_observers_)
+    observer.WillDestroyCurrentMessageLoop();
+
+  thread_task_runner_handle_.reset();
+
+  // Tell the incoming queue that we are dying.
+  incoming_task_queue_->WillDestroyCurrentMessageLoop();
+  incoming_task_queue_ = nullptr;
+  unbound_task_runner_ = nullptr;
+  task_runner_ = nullptr;
+
+  // OK, now make it so that no one can find us.
+  if (MessageLoopCurrent::IsBoundToCurrentThreadInternal(this))
+    MessageLoopCurrent::UnbindFromCurrentThreadInternal(this);
+}
+
+// static
+MessageLoopCurrent MessageLoop::current() {
+  return MessageLoopCurrent::Get();
+}
+
+// static
+bool MessageLoop::InitMessagePumpForUIFactory(MessagePumpFactory* factory) {
+  if (message_pump_for_ui_factory_)
+    return false;
+
+  message_pump_for_ui_factory_ = factory;
+  return true;
+}
+
+// static
+std::unique_ptr<MessagePump> MessageLoop::CreateMessagePumpForType(Type type) {
+  if (type == MessageLoop::TYPE_UI) {
+    if (message_pump_for_ui_factory_)
+      return message_pump_for_ui_factory_();
+#if defined(OS_IOS) || defined(OS_MACOSX)
+    return MessagePumpMac::Create();
+#elif defined(OS_NACL) || defined(OS_AIX)
+    // Currently NaCl and AIX don't have a UI MessageLoop.
+    // TODO(abarth): Figure out if we need this.
+    NOTREACHED();
+    return nullptr;
+#else
+    return std::make_unique<MessagePumpForUI>();
+#endif
+  }
+
+  if (type == MessageLoop::TYPE_IO)
+    return std::unique_ptr<MessagePump>(new MessagePumpForIO());
+
+#if defined(OS_ANDROID)
+  if (type == MessageLoop::TYPE_JAVA)
+    return std::unique_ptr<MessagePump>(new MessagePumpForUI());
+#endif
+
+  DCHECK_EQ(MessageLoop::TYPE_DEFAULT, type);
+#if defined(OS_IOS)
+  // On iOS, a native runloop is always required to pump system work.
+  return std::make_unique<MessagePumpCFRunLoop>();
+#else
+  return std::make_unique<MessagePumpDefault>();
+#endif
+}
+
+bool MessageLoop::IsType(Type type) const {
+  return type_ == type;
+}
+
+// TODO(gab): Migrate TaskObservers to RunLoop as part of separating concerns
+// between MessageLoop and RunLoop and making MessageLoop a swappable
+// implementation detail. http://crbug.com/703346
+void MessageLoop::AddTaskObserver(TaskObserver* task_observer) {
+  DCHECK_CALLED_ON_VALID_THREAD(bound_thread_checker_);
+  task_observers_.AddObserver(task_observer);
+}
+
+void MessageLoop::RemoveTaskObserver(TaskObserver* task_observer) {
+  DCHECK_CALLED_ON_VALID_THREAD(bound_thread_checker_);
+  task_observers_.RemoveObserver(task_observer);
+}
+
+bool MessageLoop::IsIdleForTesting() {
+  // Have unprocessed tasks? (this reloads the work queue if necessary)
+  if (incoming_task_queue_->triage_tasks().HasTasks())
+    return false;
+
+  // Have unprocessed deferred tasks which can be processed at this run-level?
+  if (incoming_task_queue_->deferred_tasks().HasTasks() &&
+      !RunLoop::IsNestedOnCurrentThread()) {
+    return false;
+  }
+
+  return true;
+}
+
+//------------------------------------------------------------------------------
+
+// static
+std::unique_ptr<MessageLoop> MessageLoop::CreateUnbound(
+    Type type,
+    MessagePumpFactoryCallback pump_factory) {
+  return WrapUnique(new MessageLoop(type, std::move(pump_factory)));
+}
+
+MessageLoop::MessageLoop(Type type, MessagePumpFactoryCallback pump_factory)
+    : MessageLoopCurrent(this),
+      type_(type),
+      pump_factory_(std::move(pump_factory)),
+      incoming_task_queue_(new internal::IncomingTaskQueue(this)),
+      unbound_task_runner_(
+          new internal::MessageLoopTaskRunner(incoming_task_queue_)),
+      task_runner_(unbound_task_runner_) {
+  // If type is TYPE_CUSTOM non-null pump_factory must be given.
+  DCHECK(type_ != TYPE_CUSTOM || !pump_factory_.is_null());
+
+  // Bound in BindToCurrentThread();
+  DETACH_FROM_THREAD(bound_thread_checker_);
+}
+
+void MessageLoop::BindToCurrentThread() {
+  DCHECK_CALLED_ON_VALID_THREAD(bound_thread_checker_);
+
+  DCHECK(!pump_);
+  if (!pump_factory_.is_null())
+    pump_ = std::move(pump_factory_).Run();
+  else
+    pump_ = CreateMessagePumpForType(type_);
+
+  DCHECK(!MessageLoopCurrent::IsSet())
+      << "should only have one message loop per thread";
+  MessageLoopCurrent::BindToCurrentThreadInternal(this);
+
+  incoming_task_queue_->StartScheduling();
+  unbound_task_runner_->BindToCurrentThread();
+  unbound_task_runner_ = nullptr;
+  SetThreadTaskRunnerHandle();
+  thread_id_ = PlatformThread::CurrentId();
+
+  scoped_set_sequence_local_storage_map_for_current_thread_ = std::make_unique<
+      internal::ScopedSetSequenceLocalStorageMapForCurrentThread>(
+      &sequence_local_storage_map_);
+
+  RunLoop::RegisterDelegateForCurrentThread(this);
+}
+
+std::string MessageLoop::GetThreadName() const {
+  DCHECK_NE(kInvalidThreadId, thread_id_)
+      << "GetThreadName() must only be called after BindToCurrentThread()'s "
+      << "side-effects have been synchronized with this thread.";
+  return ThreadIdNameManager::GetInstance()->GetName(thread_id_);
+}
+
+void MessageLoop::SetTaskRunner(
+    scoped_refptr<SingleThreadTaskRunner> task_runner) {
+  DCHECK_CALLED_ON_VALID_THREAD(bound_thread_checker_);
+
+  DCHECK(task_runner);
+  DCHECK(task_runner->BelongsToCurrentThread());
+  DCHECK(!unbound_task_runner_);
+  task_runner_ = std::move(task_runner);
+  SetThreadTaskRunnerHandle();
+}
+
+void MessageLoop::ClearTaskRunnerForTesting() {
+  DCHECK_CALLED_ON_VALID_THREAD(bound_thread_checker_);
+
+  DCHECK(!unbound_task_runner_);
+  task_runner_ = nullptr;
+  thread_task_runner_handle_.reset();
+}
+
+void MessageLoop::Run(bool application_tasks_allowed) {
+  DCHECK_CALLED_ON_VALID_THREAD(bound_thread_checker_);
+  if (application_tasks_allowed && !task_execution_allowed_) {
+    // Allow nested task execution as explicitly requested.
+    DCHECK(RunLoop::IsNestedOnCurrentThread());
+    task_execution_allowed_ = true;
+    pump_->Run(this);
+    task_execution_allowed_ = false;
+  } else {
+    pump_->Run(this);
+  }
+}
+
+void MessageLoop::Quit() {
+  DCHECK_CALLED_ON_VALID_THREAD(bound_thread_checker_);
+  pump_->Quit();
+}
+
+void MessageLoop::EnsureWorkScheduled() {
+  DCHECK_CALLED_ON_VALID_THREAD(bound_thread_checker_);
+  if (incoming_task_queue_->triage_tasks().HasTasks())
+    pump_->ScheduleWork();
+}
+
+void MessageLoop::SetThreadTaskRunnerHandle() {
+  DCHECK_CALLED_ON_VALID_THREAD(bound_thread_checker_);
+  // Clear the previous thread task runner first, because only one can exist at
+  // a time.
+  thread_task_runner_handle_.reset();
+  thread_task_runner_handle_.reset(new ThreadTaskRunnerHandle(task_runner_));
+}
+
+bool MessageLoop::ProcessNextDelayedNonNestableTask() {
+  if (RunLoop::IsNestedOnCurrentThread())
+    return false;
+
+  while (incoming_task_queue_->deferred_tasks().HasTasks()) {
+    PendingTask pending_task = incoming_task_queue_->deferred_tasks().Pop();
+    if (!pending_task.task.IsCancelled()) {
+      RunTask(&pending_task);
+      return true;
+    }
+  }
+
+  return false;
+}
+
+void MessageLoop::RunTask(PendingTask* pending_task) {
+  DCHECK(task_execution_allowed_);
+
+  // Execute the task and assume the worst: It is probably not reentrant.
+  task_execution_allowed_ = false;
+
+  TRACE_TASK_EXECUTION("MessageLoop::RunTask", *pending_task);
+
+  for (auto& observer : task_observers_)
+    observer.WillProcessTask(*pending_task);
+  incoming_task_queue_->RunTask(pending_task);
+  for (auto& observer : task_observers_)
+    observer.DidProcessTask(*pending_task);
+
+  task_execution_allowed_ = true;
+}
+
+bool MessageLoop::DeferOrRunPendingTask(PendingTask pending_task) {
+  if (pending_task.nestable == Nestable::kNestable ||
+      !RunLoop::IsNestedOnCurrentThread()) {
+    RunTask(&pending_task);
+    // Show that we ran a task (Note: a new one might arrive as a
+    // consequence!).
+    return true;
+  }
+
+  // We couldn't run the task now because we're in a nested run loop
+  // and the task isn't nestable.
+  incoming_task_queue_->deferred_tasks().Push(std::move(pending_task));
+  return false;
+}
+
+void MessageLoop::DeletePendingTasks() {
+  incoming_task_queue_->triage_tasks().Clear();
+  incoming_task_queue_->deferred_tasks().Clear();
+  // TODO(robliao): Determine if we can move delayed task destruction before
+  // deferred tasks to maintain the MessagePump DoWork, DoDelayedWork, and
+  // DoIdleWork processing order.
+  incoming_task_queue_->delayed_tasks().Clear();
+}
+
+void MessageLoop::ScheduleWork() {
+  pump_->ScheduleWork();
+}
+
+bool MessageLoop::DoWork() {
+  if (!task_execution_allowed_)
+    return false;
+
+  // Execute oldest task.
+  while (incoming_task_queue_->triage_tasks().HasTasks()) {
+    PendingTask pending_task = incoming_task_queue_->triage_tasks().Pop();
+    if (pending_task.task.IsCancelled())
+      continue;
+
+    if (!pending_task.delayed_run_time.is_null()) {
+      int sequence_num = pending_task.sequence_num;
+      TimeTicks delayed_run_time = pending_task.delayed_run_time;
+      incoming_task_queue_->delayed_tasks().Push(std::move(pending_task));
+      // If we changed the topmost task, then it is time to reschedule.
+      if (incoming_task_queue_->delayed_tasks().Peek().sequence_num ==
+          sequence_num) {
+        pump_->ScheduleDelayedWork(delayed_run_time);
+      }
+    } else if (DeferOrRunPendingTask(std::move(pending_task))) {
+      return true;
+    }
+  }
+
+  // Nothing happened.
+  return false;
+}
+
+bool MessageLoop::DoDelayedWork(TimeTicks* next_delayed_work_time) {
+  if (!task_execution_allowed_ ||
+      !incoming_task_queue_->delayed_tasks().HasTasks()) {
+    recent_time_ = *next_delayed_work_time = TimeTicks();
+    return false;
+  }
+
+  // When we "fall behind", there will be a lot of tasks in the delayed work
+  // queue that are ready to run.  To increase efficiency when we fall behind,
+  // we will only call Time::Now() intermittently, and then process all tasks
+  // that are ready to run before calling it again.  As a result, the more we
+  // fall behind (and have a lot of ready-to-run delayed tasks), the more
+  // efficient we'll be at handling the tasks.
+
+  TimeTicks next_run_time =
+      incoming_task_queue_->delayed_tasks().Peek().delayed_run_time;
+  if (next_run_time > recent_time_) {
+    recent_time_ = TimeTicks::Now();  // Get a better view of Now();
+    if (next_run_time > recent_time_) {
+      *next_delayed_work_time = next_run_time;
+      return false;
+    }
+  }
+
+  PendingTask pending_task = incoming_task_queue_->delayed_tasks().Pop();
+
+  if (incoming_task_queue_->delayed_tasks().HasTasks()) {
+    *next_delayed_work_time =
+        incoming_task_queue_->delayed_tasks().Peek().delayed_run_time;
+  }
+
+  return DeferOrRunPendingTask(std::move(pending_task));
+}
+
+bool MessageLoop::DoIdleWork() {
+  if (ProcessNextDelayedNonNestableTask())
+    return true;
+
+  if (ShouldQuitWhenIdle())
+    pump_->Quit();
+
+  // When we return we will do a kernel wait for more tasks.
+#if defined(OS_WIN)
+  // On Windows we activate the high resolution timer so that the wait
+  // _if_ triggered by the timer happens with good resolution. If we don't
+  // do this the default resolution is 15ms which might not be acceptable
+  // for some tasks.
+  bool high_res = incoming_task_queue_->HasPendingHighResolutionTasks();
+  if (high_res != in_high_res_mode_) {
+    in_high_res_mode_ = high_res;
+    Time::ActivateHighResolutionTimer(in_high_res_mode_);
+  }
+#endif
+  return false;
+}
+
+#if !defined(OS_NACL)
+
+//------------------------------------------------------------------------------
+// MessageLoopForUI
+
+MessageLoopForUI::MessageLoopForUI(std::unique_ptr<MessagePump> pump)
+    : MessageLoop(TYPE_UI, BindOnce(&ReturnPump, std::move(pump))) {}
+
+// static
+MessageLoopCurrentForUI MessageLoopForUI::current() {
+  return MessageLoopCurrentForUI::Get();
+}
+
+// static
+bool MessageLoopForUI::IsCurrent() {
+  return MessageLoopCurrentForUI::IsSet();
+}
+
+#if defined(OS_IOS)
+void MessageLoopForUI::Attach() {
+  static_cast<MessagePumpUIApplication*>(pump_.get())->Attach(this);
+}
+#endif  // defined(OS_IOS)
+
+#if defined(OS_ANDROID)
+void MessageLoopForUI::Start() {
+  // No Histogram support for UI message loop as it is managed by Java side
+  static_cast<MessagePumpForUI*>(pump_.get())->Start(this);
+}
+
+void MessageLoopForUI::Abort() {
+  static_cast<MessagePumpForUI*>(pump_.get())->Abort();
+}
+#endif  // defined(OS_ANDROID)
+
+#endif  // !defined(OS_NACL)
+
+//------------------------------------------------------------------------------
+// MessageLoopForIO
+
+// static
+MessageLoopCurrentForIO MessageLoopForIO::current() {
+  return MessageLoopCurrentForIO::Get();
+}
+
+// static
+bool MessageLoopForIO::IsCurrent() {
+  return MessageLoopCurrentForIO::IsSet();
+}
+
+}  // namespace base
diff --git a/base/message_loop/message_loop.h b/base/message_loop/message_loop.h
new file mode 100644
index 0000000..2d8047d
--- /dev/null
+++ b/base/message_loop/message_loop.h
@@ -0,0 +1,403 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_MESSAGE_LOOP_MESSAGE_LOOP_H_
+#define BASE_MESSAGE_LOOP_MESSAGE_LOOP_H_
+
+#include <memory>
+#include <queue>
+#include <string>
+
+#include "base/base_export.h"
+#include "base/callback_forward.h"
+#include "base/gtest_prod_util.h"
+#include "base/macros.h"
+#include "base/memory/scoped_refptr.h"
+#include "base/message_loop/incoming_task_queue.h"
+#include "base/message_loop/message_loop_current.h"
+#include "base/message_loop/message_loop_task_runner.h"
+#include "base/message_loop/message_pump.h"
+#include "base/message_loop/timer_slack.h"
+#include "base/observer_list.h"
+#include "base/pending_task.h"
+#include "base/run_loop.h"
+#include "base/synchronization/lock.h"
+#include "base/threading/sequence_local_storage_map.h"
+#include "base/threading/thread_checker.h"
+#include "base/time/time.h"
+#include "build/build_config.h"
+
+namespace base {
+
+class ThreadTaskRunnerHandle;
+
+// A MessageLoop is used to process events for a particular thread.  There is
+// at most one MessageLoop instance per thread.
+//
+// Events include at a minimum Task instances submitted to the MessageLoop's
+// TaskRunner. Depending on the type of message pump used by the MessageLoop
+// other events such as UI messages may be processed.  On Windows APC calls (as
+// time permits) and signals sent to a registered set of HANDLEs may also be
+// processed.
+//
+// The MessageLoop's API should only be used directly by its owner (and users
+// which the owner opts to share a MessageLoop* with). Other ways to access
+// subsets of the MessageLoop API:
+//   - base::RunLoop : Drive the MessageLoop from the thread it's bound to.
+//   - base::Thread/SequencedTaskRunnerHandle : Post back to the MessageLoop
+//     from a task running on it.
+//   - SequenceLocalStorageSlot : Bind external state to this MessageLoop.
+//   - base::MessageLoopCurrent : Access statically exposed APIs of this
+//     MessageLoop.
+//   - Embedders may provide their own static accessors to post tasks on
+//     specific loops (e.g. content::BrowserThreads).
+//
+// NOTE: Unless otherwise specified, a MessageLoop's methods may only be called
+// on the thread where the MessageLoop's Run method executes.
+//
+// NOTE: MessageLoop has task reentrancy protection.  This means that if a
+// task is being processed, a second task cannot start until the first task is
+// finished.  Reentrancy can happen when processing a task, and an inner
+// message pump is created.  That inner pump then processes native messages
+// which could implicitly start an inner task.  Inner message pumps are created
+// with dialogs (DialogBox), common dialogs (GetOpenFileName), OLE functions
+// (DoDragDrop), printer functions (StartDoc) and *many* others.
+//
+// Sample workaround when inner task processing is needed:
+//   HRESULT hr;
+//   {
+//     MessageLoopCurrent::ScopedNestableTaskAllower allow;
+//     hr = DoDragDrop(...); // Implicitly runs a modal message loop.
+//   }
+//   // Process |hr| (the result returned by DoDragDrop()).
+//
+// Please be SURE your task is reentrant (nestable) and all global variables
+// are stable and accessible before calling SetNestableTasksAllowed(true).
+//
+// TODO(gab): MessageLoop doesn't need to be a MessageLoopCurrent once callers
+// that store MessageLoop::current() in a MessageLoop* variable have been
+// updated to use a MessageLoopCurrent variable.
+class BASE_EXPORT MessageLoop : public MessagePump::Delegate,
+                                public RunLoop::Delegate,
+                                public MessageLoopCurrent {
+ public:
+  // TODO(gab): Migrate usage of this class to MessageLoopCurrent and remove
+  // this forwarded declaration.
+  using DestructionObserver = MessageLoopCurrent::DestructionObserver;
+
+  // A MessageLoop has a particular type, which indicates the set of
+  // asynchronous events it may process in addition to tasks and timers.
+  //
+  // TYPE_DEFAULT
+  //   This type of ML only supports tasks and timers.
+  //
+  // TYPE_UI
+  //   This type of ML also supports native UI events (e.g., Windows messages).
+  //   See also MessageLoopForUI.
+  //
+  // TYPE_IO
+  //   This type of ML also supports asynchronous IO.  See also
+  //   MessageLoopForIO.
+  //
+  // TYPE_JAVA
+  //   This type of ML is backed by a Java message handler which is responsible
+  //   for running the tasks added to the ML. This is only for use on Android.
+  //   TYPE_JAVA behaves in essence like TYPE_UI, except during construction
+  //   where it does not use the main thread specific pump factory.
+  //
+  // TYPE_CUSTOM
+  //   MessagePump was supplied to constructor.
+  //
+  enum Type {
+    TYPE_DEFAULT,
+    TYPE_UI,
+    TYPE_CUSTOM,
+    TYPE_IO,
+#if defined(OS_ANDROID)
+    TYPE_JAVA,
+#endif  // defined(OS_ANDROID)
+  };
+
+  // Normally, it is not necessary to instantiate a MessageLoop.  Instead, it
+  // is typical to make use of the current thread's MessageLoop instance.
+  explicit MessageLoop(Type type = TYPE_DEFAULT);
+  // Creates a TYPE_CUSTOM MessageLoop with the supplied MessagePump, which must
+  // be non-NULL.
+  explicit MessageLoop(std::unique_ptr<MessagePump> pump);
+
+  ~MessageLoop() override;
+
+  // TODO(gab): Mass migrate callers to MessageLoopCurrent::Get().
+  static MessageLoopCurrent current();
+
+  using MessagePumpFactory = std::unique_ptr<MessagePump>();
+  // Uses the given base::MessagePumpForUIFactory to override the default
+  // MessagePump implementation for 'TYPE_UI'. Returns true if the factory
+  // was successfully registered.
+  static bool InitMessagePumpForUIFactory(MessagePumpFactory* factory);
+
+  // Creates the default MessagePump based on |type|. Caller owns return
+  // value.
+  static std::unique_ptr<MessagePump> CreateMessagePumpForType(Type type);
+
+  // Set the timer slack for this message loop.
+  void SetTimerSlack(TimerSlack timer_slack) {
+    pump_->SetTimerSlack(timer_slack);
+  }
+
+  // Returns true if this loop is |type|. This allows subclasses (especially
+  // those in tests) to specialize how they are identified.
+  virtual bool IsType(Type type) const;
+
+  // Returns the type passed to the constructor.
+  Type type() const { return type_; }
+
+  // Returns the name of the thread this message loop is bound to. This function
+  // is only valid when this message loop is running, BindToCurrentThread has
+  // already been called and has an "happens-before" relationship with this call
+  // (this relationship is obtained implicitly by the MessageLoop's task posting
+  // system unless calling this very early).
+  std::string GetThreadName() const;
+
+  // Gets the TaskRunner associated with this message loop.
+  const scoped_refptr<SingleThreadTaskRunner>& task_runner() const {
+    return task_runner_;
+  }
+
+  // Sets a new TaskRunner for this message loop. The message loop must already
+  // have been bound to a thread prior to this call, and the task runner must
+  // belong to that thread. Note that changing the task runner will also affect
+  // the ThreadTaskRunnerHandle for the target thread. Must be called on the
+  // thread to which the message loop is bound.
+  void SetTaskRunner(scoped_refptr<SingleThreadTaskRunner> task_runner);
+
+  // Clears task_runner() and the ThreadTaskRunnerHandle for the target thread.
+  // Must be called on the thread to which the message loop is bound.
+  void ClearTaskRunnerForTesting();
+
+  // TODO(https://crbug.com/825327): Remove users of TaskObservers through
+  // MessageLoop::current() and migrate the type back here.
+  using TaskObserver = MessageLoopCurrent::TaskObserver;
+
+  // These functions can only be called on the same thread that |this| is
+  // running on.
+  void AddTaskObserver(TaskObserver* task_observer);
+  void RemoveTaskObserver(TaskObserver* task_observer);
+
+  // Returns true if the message loop is idle (ignoring delayed tasks). This is
+  // the same condition which triggers DoWork() to return false: i.e.
+  // out of tasks which can be processed at the current run-level -- there might
+  // be deferred non-nestable tasks remaining if currently in a nested run
+  // level.
+  bool IsIdleForTesting();
+
+  // Runs the specified PendingTask.
+  void RunTask(PendingTask* pending_task);
+
+  //----------------------------------------------------------------------------
+ protected:
+  std::unique_ptr<MessagePump> pump_;
+
+  using MessagePumpFactoryCallback =
+      OnceCallback<std::unique_ptr<MessagePump>()>;
+
+  // Common protected constructor. Other constructors delegate the
+  // initialization to this constructor.
+  // A subclass can invoke this constructor to create a message_loop of a
+  // specific type with a custom loop. The implementation does not call
+  // BindToCurrentThread. If this constructor is invoked directly by a subclass,
+  // then the subclass must subsequently bind the message loop.
+  MessageLoop(Type type, MessagePumpFactoryCallback pump_factory);
+
+  // Configure various members and bind this message loop to the current thread.
+  void BindToCurrentThread();
+
+ private:
+  friend class internal::IncomingTaskQueue;
+  friend class MessageLoopCurrent;
+  friend class MessageLoopCurrentForIO;
+  friend class MessageLoopCurrentForUI;
+  friend class ScheduleWorkTest;
+  friend class Thread;
+  FRIEND_TEST_ALL_PREFIXES(MessageLoopTest, DeleteUnboundLoop);
+
+  // Creates a MessageLoop without binding to a thread.
+  // If |type| is TYPE_CUSTOM non-null |pump_factory| must be also given
+  // to create a message pump for this message loop.  Otherwise a default
+  // message pump for the |type| is created.
+  //
+  // It is valid to call this to create a new message loop on one thread,
+  // and then pass it to the thread where the message loop actually runs.
+  // The message loop's BindToCurrentThread() method must be called on the
+  // thread the message loop runs on, before calling Run().
+  // Before BindToCurrentThread() is called, only Post*Task() functions can
+  // be called on the message loop.
+  static std::unique_ptr<MessageLoop> CreateUnbound(
+      Type type,
+      MessagePumpFactoryCallback pump_factory);
+
+  // Sets the ThreadTaskRunnerHandle for the current thread to point to the
+  // task runner for this message loop.
+  void SetThreadTaskRunnerHandle();
+
+  // RunLoop::Delegate:
+  void Run(bool application_tasks_allowed) override;
+  void Quit() override;
+  void EnsureWorkScheduled() override;
+
+  // Called to process any delayed non-nestable tasks.
+  bool ProcessNextDelayedNonNestableTask();
+
+  // Calls RunTask or queues the pending_task on the deferred task list if it
+  // cannot be run right now.  Returns true if the task was run.
+  bool DeferOrRunPendingTask(PendingTask pending_task);
+
+  // Delete tasks that haven't run yet without running them.  Used in the
+  // destructor to make sure all the task's destructors get called.
+  void DeletePendingTasks();
+
+  // Wakes up the message pump. Can be called on any thread. The caller is
+  // responsible for synchronizing ScheduleWork() calls.
+  void ScheduleWork();
+
+  // MessagePump::Delegate methods:
+  bool DoWork() override;
+  bool DoDelayedWork(TimeTicks* next_delayed_work_time) override;
+  bool DoIdleWork() override;
+
+  const Type type_;
+
+#if defined(OS_WIN)
+  // Tracks if we have requested high resolution timers. Its only use is to
+  // turn off the high resolution timer upon loop destruction.
+  bool in_high_res_mode_ = false;
+#endif
+
+  // A recent snapshot of Time::Now(), used to check delayed_work_queue_.
+  TimeTicks recent_time_;
+
+  ObserverList<DestructionObserver> destruction_observers_;
+
+  // A boolean which prevents unintentional reentrant task execution (e.g. from
+  // induced nested message loops). As such, nested message loops will only
+  // process system messages (not application tasks) by default. A nested loop
+  // layer must have been explicitly granted permission to be able to execute
+  // application tasks. This is granted either by
+  // RunLoop::Type::kNestableTasksAllowed when the loop is driven by the
+  // application or by a ScopedNestableTaskAllower preceding a system call that
+  // is known to generate a system-driven nested loop.
+  bool task_execution_allowed_ = true;
+
+  // pump_factory_.Run() is called to create a message pump for this loop
+  // if type_ is TYPE_CUSTOM and pump_ is null.
+  MessagePumpFactoryCallback pump_factory_;
+
+  ObserverList<TaskObserver> task_observers_;
+
+  scoped_refptr<internal::IncomingTaskQueue> incoming_task_queue_;
+
+  // A task runner which we haven't bound to a thread yet.
+  scoped_refptr<internal::MessageLoopTaskRunner> unbound_task_runner_;
+
+  // The task runner associated with this message loop.
+  scoped_refptr<SingleThreadTaskRunner> task_runner_;
+  std::unique_ptr<ThreadTaskRunnerHandle> thread_task_runner_handle_;
+
+  // Id of the thread this message loop is bound to. Initialized once when the
+  // MessageLoop is bound to its thread and constant forever after.
+  PlatformThreadId thread_id_ = kInvalidThreadId;
+
+  // Holds data stored through the SequenceLocalStorageSlot API.
+  internal::SequenceLocalStorageMap sequence_local_storage_map_;
+
+  // Enables the SequenceLocalStorageSlot API within its scope.
+  // Instantiated in BindToCurrentThread().
+  std::unique_ptr<internal::ScopedSetSequenceLocalStorageMapForCurrentThread>
+      scoped_set_sequence_local_storage_map_for_current_thread_;
+
+  // Verifies that calls are made on the thread on which BindToCurrentThread()
+  // was invoked.
+  THREAD_CHECKER(bound_thread_checker_);
+
+  DISALLOW_COPY_AND_ASSIGN(MessageLoop);
+};
+
+#if !defined(OS_NACL)
+
+//-----------------------------------------------------------------------------
+// MessageLoopForUI extends MessageLoop with methods that are particular to a
+// MessageLoop instantiated with TYPE_UI.
+//
+// By instantiating a MessageLoopForUI on the current thread, the owner enables
+// native UI message pumping.
+//
+// MessageLoopCurrentForUI is exposed statically on its thread via
+// MessageLoopCurrentForUI::Get() to provide additional functionality.
+//
+class BASE_EXPORT MessageLoopForUI : public MessageLoop {
+ public:
+  MessageLoopForUI() : MessageLoop(TYPE_UI) {
+  }
+
+  explicit MessageLoopForUI(std::unique_ptr<MessagePump> pump);
+
+  // TODO(gab): Mass migrate callers to MessageLoopCurrentForUI::Get()/IsSet().
+  static MessageLoopCurrentForUI current();
+  static bool IsCurrent();
+
+#if defined(OS_IOS)
+  // On iOS, the main message loop cannot be Run().  Instead call Attach(),
+  // which connects this MessageLoop to the UI thread's CFRunLoop and allows
+  // PostTask() to work.
+  void Attach();
+#endif
+
+#if defined(OS_ANDROID)
+  // On Android, the UI message loop is handled by Java side. So Run() should
+  // never be called. Instead use Start(), which will forward all the native UI
+  // events to the Java message loop.
+  void Start();
+
+  // In Android there are cases where we want to abort immediately without
+  // calling Quit(), in these cases we call Abort().
+  void Abort();
+#endif
+};
+
+// Do not add any member variables to MessageLoopForUI!  This is important b/c
+// MessageLoopForUI is often allocated via MessageLoop(TYPE_UI).  Any extra
+// data that you need should be stored on the MessageLoop's pump_ instance.
+static_assert(sizeof(MessageLoop) == sizeof(MessageLoopForUI),
+              "MessageLoopForUI should not have extra member variables");
+
+#endif  // !defined(OS_NACL)
+
+//-----------------------------------------------------------------------------
+// MessageLoopForIO extends MessageLoop with methods that are particular to a
+// MessageLoop instantiated with TYPE_IO.
+//
+// By instantiating a MessageLoopForIO on the current thread, the owner enables
+// native async IO message pumping.
+//
+// MessageLoopCurrentForIO is exposed statically on its thread via
+// MessageLoopCurrentForIO::Get() to provide additional functionality.
+//
+class BASE_EXPORT MessageLoopForIO : public MessageLoop {
+ public:
+  MessageLoopForIO() : MessageLoop(TYPE_IO) {}
+
+  // TODO(gab): Mass migrate callers to MessageLoopCurrentForIO::Get()/IsSet().
+  static MessageLoopCurrentForIO current();
+  static bool IsCurrent();
+};
+
+// Do not add any member variables to MessageLoopForIO!  This is important b/c
+// MessageLoopForIO is often allocated via MessageLoop(TYPE_IO).  Any extra
+// data that you need should be stored on the MessageLoop's pump_ instance.
+static_assert(sizeof(MessageLoop) == sizeof(MessageLoopForIO),
+              "MessageLoopForIO should not have extra member variables");
+
+}  // namespace base
+
+#endif  // BASE_MESSAGE_LOOP_MESSAGE_LOOP_H_
diff --git a/base/message_loop/message_loop_current.cc b/base/message_loop/message_loop_current.cc
new file mode 100644
index 0000000..0beef5a
--- /dev/null
+++ b/base/message_loop/message_loop_current.cc
@@ -0,0 +1,252 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/message_loop/message_loop_current.h"
+
+#include "base/bind.h"
+#include "base/message_loop/message_loop.h"
+#include "base/message_loop/message_pump_for_io.h"
+#include "base/message_loop/message_pump_for_ui.h"
+#include "base/no_destructor.h"
+#include "base/threading/thread_local.h"
+
+namespace base {
+
+namespace {
+
+base::ThreadLocalPointer<MessageLoop>* GetTLSMessageLoop() {
+  static NoDestructor<ThreadLocalPointer<MessageLoop>> lazy_tls_ptr;
+  return lazy_tls_ptr.get();
+}
+
+}  // namespace
+
+//------------------------------------------------------------------------------
+// MessageLoopCurrent
+
+// static
+MessageLoopCurrent MessageLoopCurrent::Get() {
+  return MessageLoopCurrent(GetTLSMessageLoop()->Get());
+}
+
+// static
+bool MessageLoopCurrent::IsSet() {
+  return !!GetTLSMessageLoop()->Get();
+}
+
+void MessageLoopCurrent::AddDestructionObserver(
+    DestructionObserver* destruction_observer) {
+  DCHECK_CALLED_ON_VALID_THREAD(current_->bound_thread_checker_);
+  current_->destruction_observers_.AddObserver(destruction_observer);
+}
+
+void MessageLoopCurrent::RemoveDestructionObserver(
+    DestructionObserver* destruction_observer) {
+  DCHECK_CALLED_ON_VALID_THREAD(current_->bound_thread_checker_);
+  current_->destruction_observers_.RemoveObserver(destruction_observer);
+}
+
+const scoped_refptr<SingleThreadTaskRunner>& MessageLoopCurrent::task_runner()
+    const {
+  DCHECK_CALLED_ON_VALID_THREAD(current_->bound_thread_checker_);
+  return current_->task_runner();
+}
+
+void MessageLoopCurrent::SetTaskRunner(
+    scoped_refptr<SingleThreadTaskRunner> task_runner) {
+  DCHECK_CALLED_ON_VALID_THREAD(current_->bound_thread_checker_);
+  current_->SetTaskRunner(std::move(task_runner));
+}
+
+bool MessageLoopCurrent::IsIdleForTesting() {
+  DCHECK_CALLED_ON_VALID_THREAD(current_->bound_thread_checker_);
+  return current_->IsIdleForTesting();
+}
+
+void MessageLoopCurrent::AddTaskObserver(TaskObserver* task_observer) {
+  DCHECK_CALLED_ON_VALID_THREAD(current_->bound_thread_checker_);
+  current_->AddTaskObserver(task_observer);
+}
+
+void MessageLoopCurrent::RemoveTaskObserver(TaskObserver* task_observer) {
+  DCHECK_CALLED_ON_VALID_THREAD(current_->bound_thread_checker_);
+  current_->RemoveTaskObserver(task_observer);
+}
+
+void MessageLoopCurrent::SetNestableTasksAllowed(bool allowed) {
+  DCHECK_CALLED_ON_VALID_THREAD(current_->bound_thread_checker_);
+  if (allowed) {
+    // Kick the native pump just in case we enter a OS-driven nested message
+    // loop that does not go through RunLoop::Run().
+    current_->pump_->ScheduleWork();
+  }
+  current_->task_execution_allowed_ = allowed;
+}
+
+bool MessageLoopCurrent::NestableTasksAllowed() const {
+  DCHECK_CALLED_ON_VALID_THREAD(current_->bound_thread_checker_);
+  return current_->task_execution_allowed_;
+}
+
+MessageLoopCurrent::ScopedNestableTaskAllower::ScopedNestableTaskAllower()
+    : loop_(GetTLSMessageLoop()->Get()),
+      old_state_(loop_->NestableTasksAllowed()) {
+  loop_->SetNestableTasksAllowed(true);
+}
+
+MessageLoopCurrent::ScopedNestableTaskAllower::~ScopedNestableTaskAllower() {
+  loop_->SetNestableTasksAllowed(old_state_);
+}
+
+// static
+void MessageLoopCurrent::BindToCurrentThreadInternal(MessageLoop* current) {
+  DCHECK(!GetTLSMessageLoop()->Get())
+      << "Can't register a second MessageLoop on the same thread.";
+  GetTLSMessageLoop()->Set(current);
+}
+
+// static
+void MessageLoopCurrent::UnbindFromCurrentThreadInternal(MessageLoop* current) {
+  DCHECK_EQ(current, GetTLSMessageLoop()->Get());
+  GetTLSMessageLoop()->Set(nullptr);
+}
+
+bool MessageLoopCurrent::IsBoundToCurrentThreadInternal(
+    MessageLoop* message_loop) {
+  return GetTLSMessageLoop()->Get() == message_loop;
+}
+
+#if !defined(OS_NACL)
+
+//------------------------------------------------------------------------------
+// MessageLoopCurrentForUI
+
+// static
+MessageLoopCurrentForUI MessageLoopCurrentForUI::Get() {
+  MessageLoop* loop = GetTLSMessageLoop()->Get();
+  DCHECK(loop);
+#if defined(OS_ANDROID)
+  DCHECK(loop->IsType(MessageLoop::TYPE_UI) ||
+         loop->IsType(MessageLoop::TYPE_JAVA));
+#else   // defined(OS_ANDROID)
+  DCHECK(loop->IsType(MessageLoop::TYPE_UI));
+#endif  // defined(OS_ANDROID)
+  auto* loop_for_ui = static_cast<MessageLoopForUI*>(loop);
+  return MessageLoopCurrentForUI(
+      loop_for_ui, static_cast<MessagePumpForUI*>(loop_for_ui->pump_.get()));
+}
+
+// static
+bool MessageLoopCurrentForUI::IsSet() {
+  MessageLoop* loop = GetTLSMessageLoop()->Get();
+  return loop &&
+#if defined(OS_ANDROID)
+         (loop->IsType(MessageLoop::TYPE_UI) ||
+          loop->IsType(MessageLoop::TYPE_JAVA));
+#else   // defined(OS_ANDROID)
+         loop->IsType(MessageLoop::TYPE_UI);
+#endif  // defined(OS_ANDROID)
+}
+
+#if defined(USE_OZONE) && !defined(OS_FUCHSIA) && !defined(OS_WIN)
+bool MessageLoopCurrentForUI::WatchFileDescriptor(
+    int fd,
+    bool persistent,
+    MessagePumpForUI::Mode mode,
+    MessagePumpForUI::FdWatchController* controller,
+    MessagePumpForUI::FdWatcher* delegate) {
+  DCHECK_CALLED_ON_VALID_THREAD(current_->bound_thread_checker_);
+  return pump_->WatchFileDescriptor(fd, persistent, mode, controller, delegate);
+}
+#endif
+
+#if defined(OS_IOS)
+void MessageLoopCurrentForUI::Attach() {
+  static_cast<MessageLoopForUI*>(current_)->Attach();
+}
+#endif  // defined(OS_IOS)
+
+#if defined(OS_ANDROID)
+void MessageLoopCurrentForUI::Start() {
+  static_cast<MessageLoopForUI*>(current_)->Start();
+}
+
+void MessageLoopCurrentForUI::Abort() {
+  static_cast<MessageLoopForUI*>(current_)->Abort();
+}
+#endif  // defined(OS_ANDROID)
+
+#endif  // !defined(OS_NACL)
+
+//------------------------------------------------------------------------------
+// MessageLoopCurrentForIO
+
+// static
+MessageLoopCurrentForIO MessageLoopCurrentForIO::Get() {
+  MessageLoop* loop = GetTLSMessageLoop()->Get();
+  DCHECK(loop);
+  DCHECK_EQ(MessageLoop::TYPE_IO, loop->type());
+  auto* loop_for_io = static_cast<MessageLoopForIO*>(loop);
+  return MessageLoopCurrentForIO(
+      loop_for_io, static_cast<MessagePumpForIO*>(loop_for_io->pump_.get()));
+}
+
+// static
+bool MessageLoopCurrentForIO::IsSet() {
+  MessageLoop* loop = GetTLSMessageLoop()->Get();
+  return loop && loop->IsType(MessageLoop::TYPE_IO);
+}
+
+#if !defined(OS_NACL_SFI)
+
+#if defined(OS_WIN)
+void MessageLoopCurrentForIO::RegisterIOHandler(
+    HANDLE file,
+    MessagePumpForIO::IOHandler* handler) {
+  DCHECK_CALLED_ON_VALID_THREAD(current_->bound_thread_checker_);
+  pump_->RegisterIOHandler(file, handler);
+}
+
+bool MessageLoopCurrentForIO::RegisterJobObject(
+    HANDLE job,
+    MessagePumpForIO::IOHandler* handler) {
+  DCHECK_CALLED_ON_VALID_THREAD(current_->bound_thread_checker_);
+  return pump_->RegisterJobObject(job, handler);
+}
+
+bool MessageLoopCurrentForIO::WaitForIOCompletion(
+    DWORD timeout,
+    MessagePumpForIO::IOHandler* filter) {
+  DCHECK_CALLED_ON_VALID_THREAD(current_->bound_thread_checker_);
+  return pump_->WaitForIOCompletion(timeout, filter);
+}
+#elif defined(OS_POSIX) || defined(OS_FUCHSIA)
+bool MessageLoopCurrentForIO::WatchFileDescriptor(
+    int fd,
+    bool persistent,
+    MessagePumpForIO::Mode mode,
+    MessagePumpForIO::FdWatchController* controller,
+    MessagePumpForIO::FdWatcher* delegate) {
+  DCHECK_CALLED_ON_VALID_THREAD(current_->bound_thread_checker_);
+  return pump_->WatchFileDescriptor(fd, persistent, mode, controller, delegate);
+}
+#endif  // defined(OS_WIN)
+
+#endif  // !defined(OS_NACL_SFI)
+
+#if defined(OS_FUCHSIA)
+// Additional watch API for native platform resources.
+bool MessageLoopCurrentForIO::WatchZxHandle(
+    zx_handle_t handle,
+    bool persistent,
+    zx_signals_t signals,
+    MessagePumpForIO::ZxHandleWatchController* controller,
+    MessagePumpForIO::ZxHandleWatcher* delegate) {
+  DCHECK_CALLED_ON_VALID_THREAD(current_->bound_thread_checker_);
+  return pump_->WatchZxHandle(handle, persistent, signals, controller,
+                              delegate);
+}
+#endif
+
+}  // namespace base
diff --git a/base/message_loop/message_loop_current.h b/base/message_loop/message_loop_current.h
new file mode 100644
index 0000000..c5016dc
--- /dev/null
+++ b/base/message_loop/message_loop_current.h
@@ -0,0 +1,303 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_MESSAGE_LOOP_MESSAGE_LOOP_CURRENT_H_
+#define BASE_MESSAGE_LOOP_MESSAGE_LOOP_CURRENT_H_
+
+#include "base/base_export.h"
+#include "base/logging.h"
+#include "base/memory/scoped_refptr.h"
+#include "base/message_loop/message_pump_for_io.h"
+#include "base/message_loop/message_pump_for_ui.h"
+#include "base/pending_task.h"
+#include "base/single_thread_task_runner.h"
+#include "build/build_config.h"
+
+namespace base {
+
+class MessageLoop;
+
+// MessageLoopCurrent is a proxy to the public interface of the MessageLoop
+// bound to the thread it's obtained on.
+//
+// MessageLoopCurrent(ForUI|ForIO) is available statically through
+// MessageLoopCurrent(ForUI|ForIO)::Get() on threads that have a matching
+// MessageLoop instance. APIs intended for all consumers on the thread should be
+// on MessageLoopCurrent(ForUI|ForIO), while APIs intended for the owner of the
+// instance should be on MessageLoop(ForUI|ForIO).
+//
+// Why: Historically MessageLoop::current() gave access to the full MessageLoop
+// API, preventing both addition of powerful owner-only APIs as well as making
+// it harder to remove callers of deprecated APIs (that need to stick around for
+// a few owner-only use cases and re-accrue callers after cleanup per remaining
+// publicly available).
+//
+// As such, many methods below are flagged as deprecated and should be removed
+// (or moved back to MessageLoop) once all static callers have been migrated.
+class BASE_EXPORT MessageLoopCurrent {
+ public:
+  // MessageLoopCurrent is effectively just a disguised pointer and is fine to
+  // copy around.
+  MessageLoopCurrent(const MessageLoopCurrent& other) = default;
+  MessageLoopCurrent& operator=(const MessageLoopCurrent& other) = default;
+
+  // Returns a proxy object to interact with the MessageLoop running the
+  // current thread. It must only be used on the thread it was obtained.
+  static MessageLoopCurrent Get();
+
+  // Returns true if the current thread is running a MessageLoop. Prefer this to
+  // verifying the boolean value of Get() (so that Get() can ultimately DCHECK
+  // it's only invoked when IsSet()).
+  static bool IsSet();
+
+  // Allow MessageLoopCurrent to be used like a pointer to support the many
+  // callsites that used MessageLoop::current() that way when it was a
+  // MessageLoop*.
+  MessageLoopCurrent* operator->() { return this; }
+  explicit operator bool() const { return !!current_; }
+
+  // TODO(gab): Migrate the types of variables that store MessageLoop::current()
+  // and remove this implicit cast back to MessageLoop*.
+  operator MessageLoop*() const { return current_; }
+
+  // A DestructionObserver is notified when the current MessageLoop is being
+  // destroyed.  These observers are notified prior to MessageLoop::current()
+  // being changed to return NULL.  This gives interested parties the chance to
+  // do final cleanup that depends on the MessageLoop.
+  //
+  // NOTE: Any tasks posted to the MessageLoop during this notification will
+  // not be run.  Instead, they will be deleted.
+  //
+  // Deprecation note: Prefer SequenceLocalStorageSlot<std::unique_ptr<Foo>> to
+  // DestructionObserver to bind an object's lifetime to the current
+  // thread/sequence.
+  class BASE_EXPORT DestructionObserver {
+   public:
+    virtual void WillDestroyCurrentMessageLoop() = 0;
+
+   protected:
+    virtual ~DestructionObserver() = default;
+  };
+
+  // Add a DestructionObserver, which will start receiving notifications
+  // immediately.
+  void AddDestructionObserver(DestructionObserver* destruction_observer);
+
+  // Remove a DestructionObserver.  It is safe to call this method while a
+  // DestructionObserver is receiving a notification callback.
+  void RemoveDestructionObserver(DestructionObserver* destruction_observer);
+
+  // Forwards to MessageLoop::task_runner().
+  // DEPRECATED(https://crbug.com/616447): Use ThreadTaskRunnerHandle::Get()
+  // instead of MessageLoopCurrent::Get()->task_runner().
+  const scoped_refptr<SingleThreadTaskRunner>& task_runner() const;
+
+  // Forwards to MessageLoop::SetTaskRunner().
+  // DEPRECATED(https://crbug.com/825327): only owners of the MessageLoop
+  // instance should replace its TaskRunner.
+  void SetTaskRunner(scoped_refptr<SingleThreadTaskRunner> task_runner);
+
+  // A TaskObserver is an object that receives task notifications from the
+  // MessageLoop.
+  //
+  // NOTE: A TaskObserver implementation should be extremely fast!
+  class BASE_EXPORT TaskObserver {
+   public:
+    // This method is called before processing a task.
+    virtual void WillProcessTask(const PendingTask& pending_task) = 0;
+
+    // This method is called after processing a task.
+    virtual void DidProcessTask(const PendingTask& pending_task) = 0;
+
+   protected:
+    virtual ~TaskObserver() = default;
+  };
+
+  // Forwards to MessageLoop::(Add|Remove)TaskObserver.
+  // DEPRECATED(https://crbug.com/825327): only owners of the MessageLoop
+  // instance should add task observers on it.
+  void AddTaskObserver(TaskObserver* task_observer);
+  void RemoveTaskObserver(TaskObserver* task_observer);
+
+  // Enables or disables the recursive task processing. This happens in the case
+  // of recursive message loops. Some unwanted message loops may occur when
+  // using common controls or printer functions. By default, recursive task
+  // processing is disabled.
+  //
+  // Please use |ScopedNestableTaskAllower| instead of calling these methods
+  // directly.  In general, nestable message loops are to be avoided.  They are
+  // dangerous and difficult to get right, so please use with extreme caution.
+  //
+  // The specific case where tasks get queued is:
+  // - The thread is running a message loop.
+  // - It receives a task #1 and executes it.
+  // - The task #1 implicitly starts a message loop, like a MessageBox in the
+  //   unit test. This can also be StartDoc or GetSaveFileName.
+  // - The thread receives a task #2 before or while in this second message
+  //   loop.
+  // - With NestableTasksAllowed set to true, the task #2 will run right away.
+  //   Otherwise, it will get executed right after task #1 completes at "thread
+  //   message loop level".
+  //
+  // DEPRECATED(https://crbug.com/750779): Use RunLoop::Type on the relevant
+  // RunLoop instead of these methods.
+  // TODO(gab): Migrate usage and delete these methods.
+  void SetNestableTasksAllowed(bool allowed);
+  bool NestableTasksAllowed() const;
+
+  // Enables nestable tasks on the current MessageLoop while in scope.
+  // DEPRECATED(https://crbug.com/750779): This should not be used when the
+  // nested loop is driven by RunLoop (use RunLoop::Type::kNestableTasksAllowed
+  // instead). It can however still be useful in a few scenarios where re-
+  // entrancy is caused by a native message loop.
+  // TODO(gab): Remove usage of this class alongside RunLoop and rename it to
+  // ScopedApplicationTasksAllowedInNativeNestedLoop(?) for remaining use cases.
+  class BASE_EXPORT ScopedNestableTaskAllower {
+   public:
+    ScopedNestableTaskAllower();
+    ~ScopedNestableTaskAllower();
+
+   private:
+    MessageLoop* const loop_;
+    const bool old_state_;
+  };
+
+  // Returns true if the message loop is idle (ignoring delayed tasks). This is
+  // the same condition which triggers DoWork() to return false: i.e.
+  // out of tasks which can be processed at the current run-level -- there might
+  // be deferred non-nestable tasks remaining if currently in a nested run
+  // level.
+  bool IsIdleForTesting();
+
+  // Binds |current| to the current thread. It will from then on be the
+  // MessageLoop driven by MessageLoopCurrent on this thread. This is only meant
+  // to be invoked by the MessageLoop itself.
+  static void BindToCurrentThreadInternal(MessageLoop* current);
+
+  // Unbinds |current| from the current thread. Must be invoked on the same
+  // thread that invoked |BindToCurrentThreadInternal(current)|. This is only
+  // meant to be invoked by the MessageLoop itself.
+  static void UnbindFromCurrentThreadInternal(MessageLoop* current);
+
+  // Returns true if |message_loop| is bound to MessageLoopCurrent on the
+  // current thread. This is only meant to be invoked by the MessageLoop itself.
+  static bool IsBoundToCurrentThreadInternal(MessageLoop* message_loop);
+
+ protected:
+  explicit MessageLoopCurrent(MessageLoop* current) : current_(current) {}
+
+  MessageLoop* const current_;
+};
+
+#if !defined(OS_NACL)
+
+// ForUI extension of MessageLoopCurrent.
+class BASE_EXPORT MessageLoopCurrentForUI : public MessageLoopCurrent {
+ public:
+  // Returns an interface for the MessageLoopForUI of the current thread.
+  // Asserts that IsSet().
+  static MessageLoopCurrentForUI Get();
+
+  // Returns true if the current thread is running a MessageLoopForUI.
+  static bool IsSet();
+
+  MessageLoopCurrentForUI* operator->() { return this; }
+
+#if defined(USE_OZONE) && !defined(OS_FUCHSIA) && !defined(OS_WIN)
+  // Please see MessagePumpLibevent for definition.
+  static_assert(std::is_same<MessagePumpForUI, MessagePumpLibevent>::value,
+                "MessageLoopCurrentForUI::WatchFileDescriptor is not supported "
+                "when MessagePumpForUI is not a MessagePumpLibevent.");
+  bool WatchFileDescriptor(int fd,
+                           bool persistent,
+                           MessagePumpForUI::Mode mode,
+                           MessagePumpForUI::FdWatchController* controller,
+                           MessagePumpForUI::FdWatcher* delegate);
+#endif
+
+#if defined(OS_IOS)
+  // Forwards to MessageLoopForUI::Attach().
+  // TODO(https://crbug.com/825327): Plumb the actual MessageLoopForUI* to
+  // callers and remove ability to access this method from
+  // MessageLoopCurrentForUI.
+  void Attach();
+#endif
+
+#if defined(OS_ANDROID)
+  // Forwards to MessageLoopForUI::Start().
+  // TODO(https://crbug.com/825327): Plumb the actual MessageLoopForUI* to
+  // callers and remove ability to access this method from
+  // MessageLoopCurrentForUI.
+  void Start();
+
+  // Forwards to MessageLoopForUI::Abort().
+  // TODO(https://crbug.com/825327): Plumb the actual MessageLoopForUI* to
+  // callers and remove ability to access this method from
+  // MessageLoopCurrentForUI.
+  void Abort();
+#endif
+
+ private:
+  MessageLoopCurrentForUI(MessageLoop* current, MessagePumpForUI* pump)
+      : MessageLoopCurrent(current), pump_(pump) {
+    DCHECK(pump_);
+  }
+
+  MessagePumpForUI* const pump_;
+};
+
+#endif  // !defined(OS_NACL)
+
+// ForIO extension of MessageLoopCurrent.
+class BASE_EXPORT MessageLoopCurrentForIO : public MessageLoopCurrent {
+ public:
+  // Returns an interface for the MessageLoopForIO of the current thread.
+  // Asserts that IsSet().
+  static MessageLoopCurrentForIO Get();
+
+  // Returns true if the current thread is running a MessageLoopForIO.
+  static bool IsSet();
+
+  MessageLoopCurrentForIO* operator->() { return this; }
+
+#if !defined(OS_NACL_SFI)
+
+#if defined(OS_WIN)
+  // Please see MessagePumpWin for definitions of these methods.
+  void RegisterIOHandler(HANDLE file, MessagePumpForIO::IOHandler* handler);
+  bool RegisterJobObject(HANDLE job, MessagePumpForIO::IOHandler* handler);
+  bool WaitForIOCompletion(DWORD timeout, MessagePumpForIO::IOHandler* filter);
+#elif defined(OS_POSIX) || defined(OS_FUCHSIA)
+  // Please see WatchableIOMessagePumpPosix for definition.
+  // Prefer base::FileDescriptorWatcher for non-critical IO.
+  bool WatchFileDescriptor(int fd,
+                           bool persistent,
+                           MessagePumpForIO::Mode mode,
+                           MessagePumpForIO::FdWatchController* controller,
+                           MessagePumpForIO::FdWatcher* delegate);
+#endif  // defined(OS_WIN)
+
+#if defined(OS_FUCHSIA)
+  // Additional watch API for native platform resources.
+  bool WatchZxHandle(zx_handle_t handle,
+                     bool persistent,
+                     zx_signals_t signals,
+                     MessagePumpForIO::ZxHandleWatchController* controller,
+                     MessagePumpForIO::ZxHandleWatcher* delegate);
+#endif  // defined(OS_FUCHSIA)
+
+#endif  // !defined(OS_NACL_SFI)
+
+ private:
+  MessageLoopCurrentForIO(MessageLoop* current, MessagePumpForIO* pump)
+      : MessageLoopCurrent(current), pump_(pump) {
+    DCHECK(pump_);
+  }
+
+  MessagePumpForIO* const pump_;
+};
+
+}  // namespace base
+
+#endif  // BASE_MESSAGE_LOOP_MESSAGE_LOOP_CURRENT_H_
diff --git a/base/message_loop/message_loop_io_posix_unittest.cc b/base/message_loop/message_loop_io_posix_unittest.cc
new file mode 100644
index 0000000..4dd5f28
--- /dev/null
+++ b/base/message_loop/message_loop_io_posix_unittest.cc
@@ -0,0 +1,418 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/message_loop/message_loop.h"
+
+#include "base/bind.h"
+#include "base/compiler_specific.h"
+#include "base/files/file_util.h"
+#include "base/files/scoped_file.h"
+#include "base/logging.h"
+#include "base/macros.h"
+#include "base/memory/ptr_util.h"
+#include "base/message_loop/message_loop_current.h"
+#include "base/message_loop/message_pump_for_io.h"
+#include "base/posix/eintr_wrapper.h"
+#include "base/run_loop.h"
+#include "base/test/gtest_util.h"
+#include "build/build_config.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+
+#if !defined(OS_NACL)
+
+namespace {
+
+class MessageLoopForIoPosixTest : public testing::Test {
+ public:
+  MessageLoopForIoPosixTest() = default;
+
+  // testing::Test interface.
+  void SetUp() override {
+    // Create a file descriptor.  Doesn't need to be readable or writable,
+    // as we don't need to actually get any notifications.
+    // pipe() is just the easiest way to do it.
+    int pipefds[2];
+    int err = pipe(pipefds);
+    ASSERT_EQ(0, err);
+    read_fd_ = ScopedFD(pipefds[0]);
+    write_fd_ = ScopedFD(pipefds[1]);
+  }
+
+  void TriggerReadEvent() {
+    // Write from the other end of the pipe to trigger the event.
+    char c = '\0';
+    EXPECT_EQ(1, HANDLE_EINTR(write(write_fd_.get(), &c, 1)));
+  }
+
+ protected:
+  ScopedFD read_fd_;
+  ScopedFD write_fd_;
+
+  DISALLOW_COPY_AND_ASSIGN(MessageLoopForIoPosixTest);
+};
+
+class TestHandler : public MessagePumpForIO::FdWatcher {
+ public:
+  void OnFileCanReadWithoutBlocking(int fd) override {
+    watcher_to_delete_ = nullptr;
+    is_readable_ = true;
+    RunLoop::QuitCurrentWhenIdleDeprecated();
+  }
+  void OnFileCanWriteWithoutBlocking(int fd) override {
+    watcher_to_delete_ = nullptr;
+    is_writable_ = true;
+    RunLoop::QuitCurrentWhenIdleDeprecated();
+  }
+
+  bool is_readable_ = false;
+  bool is_writable_ = false;
+
+  // If set then the contained watcher will be deleted on notification.
+  std::unique_ptr<MessagePumpForIO::FdWatchController> watcher_to_delete_;
+};
+
+// Watcher that calls specified closures when read/write events occur. Verifies
+// that each non-null closure passed to this class is called once and only once.
+// Also resets the read event by reading from the FD.
+class CallClosureHandler : public MessagePumpForIO::FdWatcher {
+ public:
+  CallClosureHandler(OnceClosure read_closure, OnceClosure write_closure)
+      : read_closure_(std::move(read_closure)),
+        write_closure_(std::move(write_closure)) {}
+
+  ~CallClosureHandler() override {
+    EXPECT_TRUE(read_closure_.is_null());
+    EXPECT_TRUE(write_closure_.is_null());
+  }
+
+  void SetReadClosure(OnceClosure read_closure) {
+    EXPECT_TRUE(read_closure_.is_null());
+    read_closure_ = std::move(read_closure);
+  }
+
+  void SetWriteClosure(OnceClosure write_closure) {
+    EXPECT_TRUE(write_closure_.is_null());
+    write_closure_ = std::move(write_closure);
+  }
+
+  // base:MessagePumpFuchsia::Watcher interface.
+  void OnFileCanReadWithoutBlocking(int fd) override {
+    // Empty the pipe buffer to reset the event. Otherwise libevent
+    // implementation of MessageLoop may call the event handler again even if
+    // |read_closure_| below quits the RunLoop.
+    char c;
+    int result = HANDLE_EINTR(read(fd, &c, 1));
+    if (result == -1) {
+      PLOG(ERROR) << "read";
+      FAIL();
+    }
+    EXPECT_EQ(result, 1);
+
+    ASSERT_FALSE(read_closure_.is_null());
+    std::move(read_closure_).Run();
+  }
+
+  void OnFileCanWriteWithoutBlocking(int fd) override {
+    ASSERT_FALSE(write_closure_.is_null());
+    std::move(write_closure_).Run();
+  }
+
+ private:
+  OnceClosure read_closure_;
+  OnceClosure write_closure_;
+};
+
+TEST_F(MessageLoopForIoPosixTest, FileDescriptorWatcherOutlivesMessageLoop) {
+  // Simulate a MessageLoop that dies before an FileDescriptorWatcher.
+  // This could happen when people use the Singleton pattern or atexit.
+
+  // Arrange for watcher to live longer than message loop.
+  MessagePumpForIO::FdWatchController watcher(FROM_HERE);
+  TestHandler handler;
+  {
+    MessageLoopForIO message_loop;
+
+    MessageLoopCurrentForIO::Get()->WatchFileDescriptor(
+        write_fd_.get(), true, MessagePumpForIO::WATCH_WRITE, &watcher,
+        &handler);
+    // Don't run the message loop, just destroy it.
+  }
+
+  ASSERT_FALSE(handler.is_readable_);
+  ASSERT_FALSE(handler.is_writable_);
+}
+
+TEST_F(MessageLoopForIoPosixTest, FileDescriptorWatcherDoubleStop) {
+  // Verify that it's ok to call StopWatchingFileDescriptor().
+
+  // Arrange for message loop to live longer than watcher.
+  MessageLoopForIO message_loop;
+  {
+    MessagePumpForIO::FdWatchController watcher(FROM_HERE);
+
+    TestHandler handler;
+    MessageLoopCurrentForIO::Get()->WatchFileDescriptor(
+        write_fd_.get(), true, MessagePumpForIO::WATCH_WRITE, &watcher,
+        &handler);
+    ASSERT_TRUE(watcher.StopWatchingFileDescriptor());
+    ASSERT_TRUE(watcher.StopWatchingFileDescriptor());
+  }
+}
+
+TEST_F(MessageLoopForIoPosixTest, FileDescriptorWatcherDeleteInCallback) {
+  // Verify that it is OK to delete the FileDescriptorWatcher from within a
+  // callback.
+  MessageLoopForIO message_loop;
+
+  TestHandler handler;
+  handler.watcher_to_delete_ =
+      std::make_unique<MessagePumpForIO::FdWatchController>(FROM_HERE);
+
+  MessageLoopCurrentForIO::Get()->WatchFileDescriptor(
+      write_fd_.get(), true, MessagePumpForIO::WATCH_WRITE,
+      handler.watcher_to_delete_.get(), &handler);
+  RunLoop().Run();
+}
+
+// Verify that basic readable notification works.
+TEST_F(MessageLoopForIoPosixTest, WatchReadable) {
+  MessageLoopForIO message_loop;
+  MessagePumpForIO::FdWatchController watcher(FROM_HERE);
+  TestHandler handler;
+
+  // Watch the pipe for readability.
+  ASSERT_TRUE(MessageLoopCurrentForIO::Get()->WatchFileDescriptor(
+      read_fd_.get(), /*persistent=*/false, MessagePumpForIO::WATCH_READ,
+      &watcher, &handler));
+
+  // The pipe should not be readable when first created.
+  RunLoop().RunUntilIdle();
+  ASSERT_FALSE(handler.is_readable_);
+  ASSERT_FALSE(handler.is_writable_);
+
+  TriggerReadEvent();
+
+  // We don't want to assume that the read fd becomes readable the
+  // instant a bytes is written, so Run until quit by an event.
+  RunLoop().Run();
+
+  ASSERT_TRUE(handler.is_readable_);
+  ASSERT_FALSE(handler.is_writable_);
+}
+
+// Verify that watching a file descriptor for writability succeeds.
+TEST_F(MessageLoopForIoPosixTest, WatchWritable) {
+  MessageLoopForIO message_loop;
+  MessagePumpForIO::FdWatchController watcher(FROM_HERE);
+  TestHandler handler;
+
+  // Watch the pipe for writability.
+  ASSERT_TRUE(MessageLoopCurrentForIO::Get()->WatchFileDescriptor(
+      write_fd_.get(), /*persistent=*/false, MessagePumpForIO::WATCH_WRITE,
+      &watcher, &handler));
+
+  // We should not receive a writable notification until we process events.
+  ASSERT_FALSE(handler.is_readable_);
+  ASSERT_FALSE(handler.is_writable_);
+
+  // The pipe should be writable immediately, but wait for the quit closure
+  // anyway, to be sure.
+  RunLoop().Run();
+
+  ASSERT_FALSE(handler.is_readable_);
+  ASSERT_TRUE(handler.is_writable_);
+}
+
+// Verify that RunUntilIdle() receives IO notifications.
+TEST_F(MessageLoopForIoPosixTest, RunUntilIdle) {
+  MessageLoopForIO message_loop;
+  MessagePumpForIO::FdWatchController watcher(FROM_HERE);
+  TestHandler handler;
+
+  // Watch the pipe for readability.
+  ASSERT_TRUE(MessageLoopCurrentForIO::Get()->WatchFileDescriptor(
+      read_fd_.get(), /*persistent=*/false, MessagePumpForIO::WATCH_READ,
+      &watcher, &handler));
+
+  // The pipe should not be readable when first created.
+  RunLoop().RunUntilIdle();
+  ASSERT_FALSE(handler.is_readable_);
+
+  TriggerReadEvent();
+
+  while (!handler.is_readable_)
+    RunLoop().RunUntilIdle();
+}
+
+void StopWatching(MessagePumpForIO::FdWatchController* controller,
+                  RunLoop* run_loop) {
+  controller->StopWatchingFileDescriptor();
+  run_loop->Quit();
+}
+
+// Verify that StopWatchingFileDescriptor() works from an event handler.
+TEST_F(MessageLoopForIoPosixTest, StopFromHandler) {
+  MessageLoopForIO message_loop;
+  RunLoop run_loop;
+  MessagePumpForIO::FdWatchController watcher(FROM_HERE);
+  CallClosureHandler handler(BindOnce(&StopWatching, &watcher, &run_loop),
+                             OnceClosure());
+
+  // Create persistent watcher.
+  ASSERT_TRUE(MessageLoopCurrentForIO::Get()->WatchFileDescriptor(
+      read_fd_.get(), /*persistent=*/true, MessagePumpForIO::WATCH_READ,
+      &watcher, &handler));
+
+  TriggerReadEvent();
+  run_loop.Run();
+
+  // Trigger the event again. The event handler should not be called again.
+  TriggerReadEvent();
+  RunLoop().RunUntilIdle();
+}
+
+// Verify that non-persistent watcher is called only once.
+TEST_F(MessageLoopForIoPosixTest, NonPersistentWatcher) {
+  MessageLoopForIO message_loop;
+  MessagePumpForIO::FdWatchController watcher(FROM_HERE);
+
+  RunLoop run_loop;
+  CallClosureHandler handler(run_loop.QuitClosure(), OnceClosure());
+
+  // Create a non-persistent watcher.
+  ASSERT_TRUE(MessageLoopCurrentForIO::Get()->WatchFileDescriptor(
+      read_fd_.get(), /*persistent=*/false, MessagePumpForIO::WATCH_READ,
+      &watcher, &handler));
+
+  TriggerReadEvent();
+  run_loop.Run();
+
+  // Trigger the event again. handler should not be called again.
+  TriggerReadEvent();
+  RunLoop().RunUntilIdle();
+}
+
+// Verify that persistent watcher is called every time the event is triggered.
+TEST_F(MessageLoopForIoPosixTest, PersistentWatcher) {
+  MessageLoopForIO message_loop;
+  MessagePumpForIO::FdWatchController watcher(FROM_HERE);
+
+  RunLoop run_loop1;
+  CallClosureHandler handler(run_loop1.QuitClosure(), OnceClosure());
+
+  // Create persistent watcher.
+  ASSERT_TRUE(MessageLoopCurrentForIO::Get()->WatchFileDescriptor(
+      read_fd_.get(), /*persistent=*/true, MessagePumpForIO::WATCH_READ,
+      &watcher, &handler));
+
+  TriggerReadEvent();
+  run_loop1.Run();
+
+  RunLoop run_loop2;
+  handler.SetReadClosure(run_loop2.QuitClosure());
+
+  // Trigger the event again. handler should be called now, which will quit
+  // run_loop2.
+  TriggerReadEvent();
+  run_loop2.Run();
+}
+
+void StopWatchingAndWatchAgain(MessagePumpForIO::FdWatchController* controller,
+                               int fd,
+                               MessagePumpForIO::FdWatcher* new_handler,
+                               RunLoop* run_loop) {
+  controller->StopWatchingFileDescriptor();
+
+  ASSERT_TRUE(MessageLoopCurrentForIO::Get()->WatchFileDescriptor(
+      fd, /*persistent=*/true, MessagePumpForIO::WATCH_READ, controller,
+      new_handler));
+
+  run_loop->Quit();
+}
+
+// Verify that a watcher can be stopped and reused from an event handler.
+TEST_F(MessageLoopForIoPosixTest, StopAndRestartFromHandler) {
+  MessageLoopForIO message_loop;
+  MessagePumpForIO::FdWatchController watcher(FROM_HERE);
+
+  RunLoop run_loop1;
+  RunLoop run_loop2;
+  CallClosureHandler handler2(run_loop2.QuitClosure(), OnceClosure());
+  CallClosureHandler handler1(BindOnce(&StopWatchingAndWatchAgain, &watcher,
+                                       read_fd_.get(), &handler2, &run_loop1),
+                              OnceClosure());
+
+  // Create persistent watcher.
+  ASSERT_TRUE(MessageLoopCurrentForIO::Get()->WatchFileDescriptor(
+      read_fd_.get(), /*persistent=*/true, MessagePumpForIO::WATCH_READ,
+      &watcher, &handler1));
+
+  TriggerReadEvent();
+  run_loop1.Run();
+
+  // Trigger the event again. handler2 should be called now, which will quit
+  // run_loop2
+  TriggerReadEvent();
+  run_loop2.Run();
+}
+
+// Verify that the pump properly handles a delayed task after an IO event.
+TEST_F(MessageLoopForIoPosixTest, IoEventThenTimer) {
+  MessageLoopForIO message_loop;
+  MessagePumpForIO::FdWatchController watcher(FROM_HERE);
+
+  RunLoop timer_run_loop;
+  message_loop.task_runner()->PostDelayedTask(
+      FROM_HERE, timer_run_loop.QuitClosure(),
+      base::TimeDelta::FromMilliseconds(10));
+
+  RunLoop watcher_run_loop;
+  CallClosureHandler handler(watcher_run_loop.QuitClosure(), OnceClosure());
+
+  // Create a non-persistent watcher.
+  ASSERT_TRUE(MessageLoopCurrentForIO::Get()->WatchFileDescriptor(
+      read_fd_.get(), /*persistent=*/false, MessagePumpForIO::WATCH_READ,
+      &watcher, &handler));
+
+  TriggerReadEvent();
+
+  // Normally the IO event will be received before the delayed task is
+  // executed, so this run loop will first handle the IO event and then quit on
+  // the timer.
+  timer_run_loop.Run();
+
+  // Run watcher_run_loop in case the IO event wasn't received before the
+  // delayed task.
+  watcher_run_loop.Run();
+}
+
+// Verify that the pipe can handle an IO event after a delayed task.
+TEST_F(MessageLoopForIoPosixTest, TimerThenIoEvent) {
+  MessageLoopForIO message_loop;
+  MessagePumpForIO::FdWatchController watcher(FROM_HERE);
+
+  // Trigger read event from a delayed task.
+  message_loop.task_runner()->PostDelayedTask(
+      FROM_HERE,
+      BindOnce(&MessageLoopForIoPosixTest::TriggerReadEvent, Unretained(this)),
+      TimeDelta::FromMilliseconds(1));
+
+  RunLoop run_loop;
+  CallClosureHandler handler(run_loop.QuitClosure(), OnceClosure());
+
+  // Create a non-persistent watcher.
+  ASSERT_TRUE(MessageLoopCurrentForIO::Get()->WatchFileDescriptor(
+      read_fd_.get(), /*persistent=*/false, MessagePumpForIO::WATCH_READ,
+      &watcher, &handler));
+
+  run_loop.Run();
+}
+
+}  // namespace
+
+#endif  // !defined(OS_NACL)
+
+}  // namespace base
diff --git a/base/message_loop/message_loop_perftest.cc b/base/message_loop/message_loop_perftest.cc
new file mode 100644
index 0000000..867e8fe
--- /dev/null
+++ b/base/message_loop/message_loop_perftest.cc
@@ -0,0 +1,254 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <memory>
+#include <vector>
+
+#include "base/atomicops.h"
+#include "base/bind.h"
+#include "base/callback.h"
+#include "base/macros.h"
+#include "base/memory/ptr_util.h"
+#include "base/message_loop/message_loop.h"
+#include "base/run_loop.h"
+#include "base/strings/stringprintf.h"
+#include "base/synchronization/atomic_flag.h"
+#include "base/synchronization/waitable_event.h"
+#include "base/threading/platform_thread.h"
+#include "base/threading/sequenced_task_runner_handle.h"
+#include "base/time/time.h"
+#include "testing/gtest/include/gtest/gtest.h"
+#include "testing/perf/perf_test.h"
+
+namespace base {
+
+namespace {
+
+// A thread that waits for the caller to signal an event before proceeding to
+// call Action::Run().
+class PostingThread {
+ public:
+  class Action {
+   public:
+    virtual ~Action() = default;
+
+    // Called after the thread is started and |start_event_| is signalled.
+    virtual void Run() = 0;
+
+   protected:
+    Action() = default;
+
+   private:
+    DISALLOW_COPY_AND_ASSIGN(Action);
+  };
+
+  // Creates a PostingThread where the thread waits on |start_event| before
+  // calling action->Run(). If a thread is returned, the thread is guaranteed to
+  // be allocated and running and the caller must call Join() before destroying
+  // the PostingThread.
+  static std::unique_ptr<PostingThread> Create(WaitableEvent* start_event,
+                                               std::unique_ptr<Action> action) {
+    auto posting_thread =
+        WrapUnique(new PostingThread(start_event, std::move(action)));
+
+    if (!posting_thread->Start())
+      return nullptr;
+
+    return posting_thread;
+  }
+
+  ~PostingThread() { DCHECK_EQ(!thread_handle_.is_null(), join_called_); }
+
+  void Join() {
+    PlatformThread::Join(thread_handle_);
+    join_called_ = true;
+  }
+
+ private:
+  class Delegate final : public PlatformThread::Delegate {
+   public:
+    Delegate(PostingThread* outer, std::unique_ptr<Action> action)
+        : outer_(outer), action_(std::move(action)) {
+      DCHECK(outer_);
+      DCHECK(action_);
+    }
+
+    ~Delegate() override = default;
+
+   private:
+    void ThreadMain() override {
+      outer_->thread_started_.Signal();
+      outer_->start_event_->Wait();
+      action_->Run();
+    }
+
+    PostingThread* const outer_;
+    const std::unique_ptr<Action> action_;
+
+    DISALLOW_COPY_AND_ASSIGN(Delegate);
+  };
+
+  PostingThread(WaitableEvent* start_event, std::unique_ptr<Action> delegate)
+      : start_event_(start_event),
+        thread_started_(WaitableEvent::ResetPolicy::MANUAL,
+                        WaitableEvent::InitialState::NOT_SIGNALED),
+        delegate_(this, std::move(delegate)) {
+    DCHECK(start_event_);
+  }
+
+  bool Start() {
+    bool thread_created =
+        PlatformThread::Create(0, &delegate_, &thread_handle_);
+    if (thread_created)
+      thread_started_.Wait();
+
+    return thread_created;
+  }
+
+  bool join_called_ = false;
+  WaitableEvent* const start_event_;
+  WaitableEvent thread_started_;
+  Delegate delegate_;
+
+  PlatformThreadHandle thread_handle_;
+
+  DISALLOW_COPY_AND_ASSIGN(PostingThread);
+};
+
+class MessageLoopPerfTest : public ::testing::TestWithParam<int> {
+ public:
+  MessageLoopPerfTest()
+      : message_loop_task_runner_(SequencedTaskRunnerHandle::Get()),
+        run_posting_threads_(WaitableEvent::ResetPolicy::MANUAL,
+                             WaitableEvent::InitialState::NOT_SIGNALED) {}
+
+  static std::string ParamInfoToString(
+      ::testing::TestParamInfo<int> param_info) {
+    return PostingThreadCountToString(param_info.param);
+  }
+
+  static std::string PostingThreadCountToString(int posting_threads) {
+    // Special case 1 thread for thread vs threads.
+    if (posting_threads == 1)
+      return "1_Posting_Thread";
+
+    return StringPrintf("%d_Posting_Threads", posting_threads);
+  }
+
+ protected:
+  class ContinuouslyPostTasks final : public PostingThread::Action {
+   public:
+    ContinuouslyPostTasks(MessageLoopPerfTest* outer) : outer_(outer) {
+      DCHECK(outer_);
+    }
+    ~ContinuouslyPostTasks() override = default;
+
+   private:
+    void Run() override {
+      RepeatingClosure task_to_run =
+          BindRepeating([](size_t* num_tasks_run) { ++*num_tasks_run; },
+                        &outer_->num_tasks_run_);
+      while (!outer_->stop_posting_threads_.IsSet()) {
+        outer_->message_loop_task_runner_->PostTask(FROM_HERE, task_to_run);
+        subtle::NoBarrier_AtomicIncrement(&outer_->num_tasks_posted_, 1);
+      }
+    }
+
+    MessageLoopPerfTest* const outer_;
+
+    DISALLOW_COPY_AND_ASSIGN(ContinuouslyPostTasks);
+  };
+
+  void SetUp() override {
+    // This check is here because we can't ASSERT_TRUE in the constructor.
+    ASSERT_TRUE(message_loop_task_runner_);
+  }
+
+  // Runs ActionType::Run() on |num_posting_threads| and requests test
+  // termination around |duration|.
+  template <typename ActionType>
+  void RunTest(const int num_posting_threads, TimeDelta duration) {
+    std::vector<std::unique_ptr<PostingThread>> threads;
+    for (int i = 0; i < num_posting_threads; ++i) {
+      threads.emplace_back(PostingThread::Create(
+          &run_posting_threads_, std::make_unique<ActionType>(this)));
+      // Don't assert here to simplify the code that requires a Join() call for
+      // every created PostingThread.
+      EXPECT_TRUE(threads[i]);
+    }
+
+    RunLoop run_loop;
+    message_loop_task_runner_->PostDelayedTask(
+        FROM_HERE,
+        BindOnce(
+            [](RunLoop* run_loop, AtomicFlag* stop_posting_threads) {
+              stop_posting_threads->Set();
+              run_loop->Quit();
+            },
+            &run_loop, &stop_posting_threads_),
+        duration);
+
+    TimeTicks post_task_start = TimeTicks::Now();
+    run_posting_threads_.Signal();
+
+    TimeTicks run_loop_start = TimeTicks::Now();
+    run_loop.Run();
+    tasks_run_duration_ = TimeTicks::Now() - run_loop_start;
+
+    for (auto& thread : threads)
+      thread->Join();
+
+    tasks_posted_duration_ = TimeTicks::Now() - post_task_start;
+  }
+
+  size_t num_tasks_posted() const {
+    return subtle::NoBarrier_Load(&num_tasks_posted_);
+  }
+
+  TimeDelta tasks_posted_duration() const { return tasks_posted_duration_; }
+
+  size_t num_tasks_run() const { return num_tasks_run_; }
+
+  TimeDelta tasks_run_duration() const { return tasks_run_duration_; }
+
+ private:
+  MessageLoop message_loop_;
+
+  // Accessed on multiple threads, thread-safe or constant:
+  const scoped_refptr<SequencedTaskRunner> message_loop_task_runner_;
+  WaitableEvent run_posting_threads_;
+  AtomicFlag stop_posting_threads_;
+  subtle::AtomicWord num_tasks_posted_ = 0;
+
+  // Accessed only on the test case thread:
+  TimeDelta tasks_posted_duration_;
+  TimeDelta tasks_run_duration_;
+  size_t num_tasks_run_ = 0;
+
+  DISALLOW_COPY_AND_ASSIGN(MessageLoopPerfTest);
+};
+
+}  // namespace
+
+TEST_P(MessageLoopPerfTest, PostTaskRate) {
+  // Measures the average rate of posting tasks from different threads and the
+  // average rate that the message loop is running those tasks.
+  RunTest<ContinuouslyPostTasks>(GetParam(), TimeDelta::FromSeconds(3));
+  perf_test::PrintResult("task_posting", "",
+                         PostingThreadCountToString(GetParam()),
+                         tasks_posted_duration().InMicroseconds() /
+                             static_cast<double>(num_tasks_posted()),
+                         "us/task", true);
+  perf_test::PrintResult("task_running", "",
+                         PostingThreadCountToString(GetParam()),
+                         tasks_run_duration().InMicroseconds() /
+                             static_cast<double>(num_tasks_run()),
+                         "us/task", true);
+}
+
+INSTANTIATE_TEST_CASE_P(,
+                        MessageLoopPerfTest,
+                        ::testing::Values(1, 5, 10),
+                        MessageLoopPerfTest::ParamInfoToString);
+}  // namespace base
diff --git a/base/message_loop/message_loop_task_runner.cc b/base/message_loop/message_loop_task_runner.cc
new file mode 100644
index 0000000..f251e3b
--- /dev/null
+++ b/base/message_loop/message_loop_task_runner.cc
@@ -0,0 +1,53 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/message_loop/message_loop_task_runner.h"
+
+#include <utility>
+
+#include "base/location.h"
+#include "base/logging.h"
+#include "base/message_loop/incoming_task_queue.h"
+
+namespace base {
+namespace internal {
+
+MessageLoopTaskRunner::MessageLoopTaskRunner(
+    scoped_refptr<IncomingTaskQueue> incoming_queue)
+    : incoming_queue_(incoming_queue), valid_thread_id_(kInvalidThreadId) {
+}
+
+void MessageLoopTaskRunner::BindToCurrentThread() {
+  AutoLock lock(valid_thread_id_lock_);
+  DCHECK_EQ(kInvalidThreadId, valid_thread_id_);
+  valid_thread_id_ = PlatformThread::CurrentId();
+}
+
+bool MessageLoopTaskRunner::PostDelayedTask(const Location& from_here,
+                                            OnceClosure task,
+                                            base::TimeDelta delay) {
+  DCHECK(!task.is_null()) << from_here.ToString();
+  return incoming_queue_->AddToIncomingQueue(from_here, std::move(task), delay,
+                                             Nestable::kNestable);
+}
+
+bool MessageLoopTaskRunner::PostNonNestableDelayedTask(
+    const Location& from_here,
+    OnceClosure task,
+    base::TimeDelta delay) {
+  DCHECK(!task.is_null()) << from_here.ToString();
+  return incoming_queue_->AddToIncomingQueue(from_here, std::move(task), delay,
+                                             Nestable::kNonNestable);
+}
+
+bool MessageLoopTaskRunner::RunsTasksInCurrentSequence() const {
+  AutoLock lock(valid_thread_id_lock_);
+  return valid_thread_id_ == PlatformThread::CurrentId();
+}
+
+MessageLoopTaskRunner::~MessageLoopTaskRunner() = default;
+
+}  // namespace internal
+
+}  // namespace base
diff --git a/base/message_loop/message_loop_task_runner.h b/base/message_loop/message_loop_task_runner.h
new file mode 100644
index 0000000..c7d48c2
--- /dev/null
+++ b/base/message_loop/message_loop_task_runner.h
@@ -0,0 +1,60 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_MESSAGE_LOOP_MESSAGE_LOOP_TASK_RUNNER_H_
+#define BASE_MESSAGE_LOOP_MESSAGE_LOOP_TASK_RUNNER_H_
+
+#include "base/base_export.h"
+#include "base/callback.h"
+#include "base/macros.h"
+#include "base/memory/ref_counted.h"
+#include "base/pending_task.h"
+#include "base/single_thread_task_runner.h"
+#include "base/synchronization/lock.h"
+#include "base/threading/platform_thread.h"
+
+namespace base {
+namespace internal {
+
+class IncomingTaskQueue;
+
+// A stock implementation of SingleThreadTaskRunner that is created and managed
+// by a MessageLoop. For now a MessageLoopTaskRunner can only be created as
+// part of a MessageLoop.
+class BASE_EXPORT MessageLoopTaskRunner : public SingleThreadTaskRunner {
+ public:
+  explicit MessageLoopTaskRunner(
+      scoped_refptr<IncomingTaskQueue> incoming_queue);
+
+  // Initialize this message loop task runner on the current thread.
+  void BindToCurrentThread();
+
+  // SingleThreadTaskRunner implementation
+  bool PostDelayedTask(const Location& from_here,
+                       OnceClosure task,
+                       TimeDelta delay) override;
+  bool PostNonNestableDelayedTask(const Location& from_here,
+                                  OnceClosure task,
+                                  TimeDelta delay) override;
+  bool RunsTasksInCurrentSequence() const override;
+
+ private:
+  friend class RefCountedThreadSafe<MessageLoopTaskRunner>;
+  ~MessageLoopTaskRunner() override;
+
+  // The incoming queue receiving all posted tasks.
+  scoped_refptr<IncomingTaskQueue> incoming_queue_;
+
+  // ID of the thread |this| was created on.  Could be accessed on multiple
+  // threads, protected by |valid_thread_id_lock_|.
+  PlatformThreadId valid_thread_id_;
+  mutable Lock valid_thread_id_lock_;
+
+  DISALLOW_COPY_AND_ASSIGN(MessageLoopTaskRunner);
+};
+
+}  // namespace internal
+}  // namespace base
+
+#endif  // BASE_MESSAGE_LOOP_MESSAGE_LOOP_TASK_RUNNER_H_
diff --git a/base/message_loop/message_loop_task_runner_unittest.cc b/base/message_loop/message_loop_task_runner_unittest.cc
new file mode 100644
index 0000000..c7e9aa0
--- /dev/null
+++ b/base/message_loop/message_loop_task_runner_unittest.cc
@@ -0,0 +1,365 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/message_loop/message_loop_task_runner.h"
+
+#include <memory>
+
+#include "base/atomic_sequence_num.h"
+#include "base/bind.h"
+#include "base/debug/leak_annotations.h"
+#include "base/message_loop/message_loop.h"
+#include "base/message_loop/message_loop_task_runner.h"
+#include "base/run_loop.h"
+#include "base/synchronization/waitable_event.h"
+#include "base/threading/thread.h"
+#include "base/threading/thread_task_runner_handle.h"
+#include "testing/gtest/include/gtest/gtest.h"
+#include "testing/platform_test.h"
+
+namespace base {
+
+class MessageLoopTaskRunnerTest : public testing::Test {
+ public:
+  MessageLoopTaskRunnerTest()
+      : current_loop_(new MessageLoop()),
+        task_thread_("task_thread"),
+        thread_sync_(WaitableEvent::ResetPolicy::MANUAL,
+                     WaitableEvent::InitialState::NOT_SIGNALED) {}
+
+  void DeleteCurrentMessageLoop() { current_loop_.reset(); }
+
+ protected:
+  void SetUp() override {
+    // Use SetUp() instead of the constructor to avoid posting a task to a
+    // partially constructed object.
+    task_thread_.Start();
+
+    // Allow us to pause the |task_thread_|'s MessageLoop.
+    task_thread_.task_runner()->PostTask(
+        FROM_HERE, BindOnce(&MessageLoopTaskRunnerTest::BlockTaskThreadHelper,
+                            Unretained(this)));
+  }
+
+  void TearDown() override {
+    // Make sure the |task_thread_| is not blocked, and stop the thread
+    // fully before destruction because its tasks may still depend on the
+    // |thread_sync_| event.
+    thread_sync_.Signal();
+    task_thread_.Stop();
+    DeleteCurrentMessageLoop();
+  }
+
+  // Make LoopRecorder threadsafe so that there is defined behavior even if a
+  // threading mistake sneaks into the PostTaskAndReplyRelay implementation.
+  class LoopRecorder : public RefCountedThreadSafe<LoopRecorder> {
+   public:
+    LoopRecorder(MessageLoop** run_on,
+                 MessageLoop** deleted_on,
+                 int* destruct_order)
+        : run_on_(run_on),
+          deleted_on_(deleted_on),
+          destruct_order_(destruct_order) {}
+
+    void RecordRun() { *run_on_ = MessageLoop::current(); }
+
+   private:
+    friend class RefCountedThreadSafe<LoopRecorder>;
+    ~LoopRecorder() {
+      *deleted_on_ = MessageLoop::current();
+      *destruct_order_ = g_order.GetNext();
+    }
+
+    MessageLoop** run_on_;
+    MessageLoop** deleted_on_;
+    int* destruct_order_;
+  };
+
+  static void RecordLoop(scoped_refptr<LoopRecorder> recorder) {
+    recorder->RecordRun();
+  }
+
+  static void RecordLoopAndQuit(scoped_refptr<LoopRecorder> recorder) {
+    recorder->RecordRun();
+    RunLoop::QuitCurrentWhenIdleDeprecated();
+  }
+
+  void UnblockTaskThread() { thread_sync_.Signal(); }
+
+  void BlockTaskThreadHelper() { thread_sync_.Wait(); }
+
+  static AtomicSequenceNumber g_order;
+
+  std::unique_ptr<MessageLoop> current_loop_;
+  Thread task_thread_;
+
+ private:
+  base::WaitableEvent thread_sync_;
+};
+
+AtomicSequenceNumber MessageLoopTaskRunnerTest::g_order;
+
+TEST_F(MessageLoopTaskRunnerTest, PostTaskAndReply_Basic) {
+  MessageLoop* task_run_on = nullptr;
+  MessageLoop* task_deleted_on = nullptr;
+  int task_delete_order = -1;
+  MessageLoop* reply_run_on = nullptr;
+  MessageLoop* reply_deleted_on = nullptr;
+  int reply_delete_order = -1;
+
+  scoped_refptr<LoopRecorder> task_recorder =
+      new LoopRecorder(&task_run_on, &task_deleted_on, &task_delete_order);
+  scoped_refptr<LoopRecorder> reply_recorder =
+      new LoopRecorder(&reply_run_on, &reply_deleted_on, &reply_delete_order);
+
+  ASSERT_TRUE(task_thread_.task_runner()->PostTaskAndReply(
+      FROM_HERE, BindOnce(&RecordLoop, task_recorder),
+      BindOnce(&RecordLoopAndQuit, reply_recorder)));
+
+  // Die if base::Bind doesn't retain a reference to the recorders.
+  task_recorder = nullptr;
+  reply_recorder = nullptr;
+  ASSERT_FALSE(task_deleted_on);
+  ASSERT_FALSE(reply_deleted_on);
+
+  UnblockTaskThread();
+  RunLoop().Run();
+
+  EXPECT_EQ(task_thread_.message_loop(), task_run_on);
+  EXPECT_EQ(task_thread_.message_loop(), task_deleted_on);
+  EXPECT_EQ(current_loop_.get(), reply_run_on);
+  EXPECT_EQ(current_loop_.get(), reply_deleted_on);
+  EXPECT_LT(task_delete_order, reply_delete_order);
+}
+
+TEST_F(MessageLoopTaskRunnerTest, PostTaskAndReplyOnDeletedThreadDoesNotLeak) {
+  MessageLoop* task_run_on = nullptr;
+  MessageLoop* task_deleted_on = nullptr;
+  int task_delete_order = -1;
+  MessageLoop* reply_run_on = nullptr;
+  MessageLoop* reply_deleted_on = nullptr;
+  int reply_delete_order = -1;
+
+  scoped_refptr<LoopRecorder> task_recorder =
+      new LoopRecorder(&task_run_on, &task_deleted_on, &task_delete_order);
+  scoped_refptr<LoopRecorder> reply_recorder =
+      new LoopRecorder(&reply_run_on, &reply_deleted_on, &reply_delete_order);
+
+  // Grab a task runner to a dead MessageLoop.
+  scoped_refptr<SingleThreadTaskRunner> task_runner =
+      task_thread_.task_runner();
+  UnblockTaskThread();
+  task_thread_.Stop();
+
+  ASSERT_FALSE(task_runner->PostTaskAndReply(
+      FROM_HERE, BindOnce(&RecordLoop, task_recorder),
+      BindOnce(&RecordLoopAndQuit, reply_recorder)));
+
+  // The relay should have properly deleted its resources leaving us as the only
+  // reference.
+  EXPECT_EQ(task_delete_order, reply_delete_order);
+  ASSERT_TRUE(task_recorder->HasOneRef());
+  ASSERT_TRUE(reply_recorder->HasOneRef());
+
+  // Nothing should have run though.
+  EXPECT_FALSE(task_run_on);
+  EXPECT_FALSE(reply_run_on);
+}
+
+TEST_F(MessageLoopTaskRunnerTest, PostTaskAndReply_SameLoop) {
+  MessageLoop* task_run_on = nullptr;
+  MessageLoop* task_deleted_on = nullptr;
+  int task_delete_order = -1;
+  MessageLoop* reply_run_on = nullptr;
+  MessageLoop* reply_deleted_on = nullptr;
+  int reply_delete_order = -1;
+
+  scoped_refptr<LoopRecorder> task_recorder =
+      new LoopRecorder(&task_run_on, &task_deleted_on, &task_delete_order);
+  scoped_refptr<LoopRecorder> reply_recorder =
+      new LoopRecorder(&reply_run_on, &reply_deleted_on, &reply_delete_order);
+
+  // Enqueue the relay.
+  ASSERT_TRUE(current_loop_->task_runner()->PostTaskAndReply(
+      FROM_HERE, BindOnce(&RecordLoop, task_recorder),
+      BindOnce(&RecordLoopAndQuit, reply_recorder)));
+
+  // Die if base::Bind doesn't retain a reference to the recorders.
+  task_recorder = nullptr;
+  reply_recorder = nullptr;
+  ASSERT_FALSE(task_deleted_on);
+  ASSERT_FALSE(reply_deleted_on);
+
+  RunLoop().Run();
+
+  EXPECT_EQ(current_loop_.get(), task_run_on);
+  EXPECT_EQ(current_loop_.get(), task_deleted_on);
+  EXPECT_EQ(current_loop_.get(), reply_run_on);
+  EXPECT_EQ(current_loop_.get(), reply_deleted_on);
+  EXPECT_LT(task_delete_order, reply_delete_order);
+}
+
+TEST_F(MessageLoopTaskRunnerTest,
+       PostTaskAndReply_DeadReplyTaskRunnerBehavior) {
+  // Annotate the scope as having memory leaks to suppress heapchecker reports.
+  ANNOTATE_SCOPED_MEMORY_LEAK;
+  MessageLoop* task_run_on = nullptr;
+  MessageLoop* task_deleted_on = nullptr;
+  int task_delete_order = -1;
+  MessageLoop* reply_run_on = nullptr;
+  MessageLoop* reply_deleted_on = nullptr;
+  int reply_delete_order = -1;
+
+  scoped_refptr<LoopRecorder> task_recorder =
+      new LoopRecorder(&task_run_on, &task_deleted_on, &task_delete_order);
+  scoped_refptr<LoopRecorder> reply_recorder =
+      new LoopRecorder(&reply_run_on, &reply_deleted_on, &reply_delete_order);
+
+  // Enqueue the relay.
+  task_thread_.task_runner()->PostTaskAndReply(
+      FROM_HERE, BindOnce(&RecordLoop, task_recorder),
+      BindOnce(&RecordLoopAndQuit, reply_recorder));
+
+  // Die if base::Bind doesn't retain a reference to the recorders.
+  task_recorder = nullptr;
+  reply_recorder = nullptr;
+  ASSERT_FALSE(task_deleted_on);
+  ASSERT_FALSE(reply_deleted_on);
+
+  UnblockTaskThread();
+
+  // Mercilessly whack the current loop before |reply| gets to run.
+  current_loop_.reset();
+
+  // This should ensure the relay has been run.  We need to record the
+  // MessageLoop pointer before stopping the thread because Thread::Stop() will
+  // NULL out its own pointer.
+  MessageLoop* task_loop = task_thread_.message_loop();
+  task_thread_.Stop();
+
+  // Even if the reply task runner is already gone, the original task should
+  // already be deleted. However, the reply which hasn't executed yet should
+  // leak to avoid thread-safety issues.
+  EXPECT_EQ(task_loop, task_run_on);
+  EXPECT_EQ(task_loop, task_deleted_on);
+  EXPECT_FALSE(reply_run_on);
+  ASSERT_FALSE(reply_deleted_on);
+
+  // The PostTaskAndReplyRelay is leaked here.  Even if we had a reference to
+  // it, we cannot just delete it because PostTaskAndReplyRelay's destructor
+  // checks that MessageLoop::current() is the the same as when the
+  // PostTaskAndReplyRelay object was constructed.  However, this loop must have
+  // already been deleted in order to perform this test.  See
+  // http://crbug.com/86301.
+}
+
+class MessageLoopTaskRunnerThreadingTest : public testing::Test {
+ public:
+  void Release() const {
+    AssertOnIOThread();
+    Quit();
+  }
+
+  void Quit() const {
+    loop_.task_runner()->PostTask(
+        FROM_HERE, RunLoop::QuitCurrentWhenIdleClosureDeprecated());
+  }
+
+  void AssertOnIOThread() const {
+    ASSERT_TRUE(io_thread_->task_runner()->BelongsToCurrentThread());
+    ASSERT_EQ(io_thread_->task_runner(), ThreadTaskRunnerHandle::Get());
+  }
+
+  void AssertOnFileThread() const {
+    ASSERT_TRUE(file_thread_->task_runner()->BelongsToCurrentThread());
+    ASSERT_EQ(file_thread_->task_runner(), ThreadTaskRunnerHandle::Get());
+  }
+
+ protected:
+  void SetUp() override {
+    io_thread_.reset(new Thread("MessageLoopTaskRunnerThreadingTest_IO"));
+    file_thread_.reset(new Thread("MessageLoopTaskRunnerThreadingTest_File"));
+    io_thread_->Start();
+    file_thread_->Start();
+  }
+
+  void TearDown() override {
+    io_thread_->Stop();
+    file_thread_->Stop();
+  }
+
+  static void BasicFunction(MessageLoopTaskRunnerThreadingTest* test) {
+    test->AssertOnFileThread();
+    test->Quit();
+  }
+
+  static void AssertNotRun() { FAIL() << "Callback Should not get executed."; }
+
+  class DeletedOnFile {
+   public:
+    explicit DeletedOnFile(MessageLoopTaskRunnerThreadingTest* test)
+        : test_(test) {}
+
+    ~DeletedOnFile() {
+      test_->AssertOnFileThread();
+      test_->Quit();
+    }
+
+   private:
+    MessageLoopTaskRunnerThreadingTest* test_;
+  };
+
+  std::unique_ptr<Thread> io_thread_;
+  std::unique_ptr<Thread> file_thread_;
+
+ private:
+  mutable MessageLoop loop_;
+};
+
+TEST_F(MessageLoopTaskRunnerThreadingTest, Release) {
+  EXPECT_TRUE(io_thread_->task_runner()->ReleaseSoon(FROM_HERE, this));
+  RunLoop().Run();
+}
+
+TEST_F(MessageLoopTaskRunnerThreadingTest, Delete) {
+  DeletedOnFile* deleted_on_file = new DeletedOnFile(this);
+  EXPECT_TRUE(
+      file_thread_->task_runner()->DeleteSoon(FROM_HERE, deleted_on_file));
+  RunLoop().Run();
+}
+
+TEST_F(MessageLoopTaskRunnerThreadingTest, PostTask) {
+  EXPECT_TRUE(file_thread_->task_runner()->PostTask(
+      FROM_HERE, BindOnce(&MessageLoopTaskRunnerThreadingTest::BasicFunction,
+                          Unretained(this))));
+  RunLoop().Run();
+}
+
+TEST_F(MessageLoopTaskRunnerThreadingTest, PostTaskAfterThreadExits) {
+  std::unique_ptr<Thread> test_thread(
+      new Thread("MessageLoopTaskRunnerThreadingTest_Dummy"));
+  test_thread->Start();
+  scoped_refptr<SingleThreadTaskRunner> task_runner =
+      test_thread->task_runner();
+  test_thread->Stop();
+
+  bool ret = task_runner->PostTask(
+      FROM_HERE, BindOnce(&MessageLoopTaskRunnerThreadingTest::AssertNotRun));
+  EXPECT_FALSE(ret);
+}
+
+TEST_F(MessageLoopTaskRunnerThreadingTest, PostTaskAfterThreadIsDeleted) {
+  scoped_refptr<SingleThreadTaskRunner> task_runner;
+  {
+    std::unique_ptr<Thread> test_thread(
+        new Thread("MessageLoopTaskRunnerThreadingTest_Dummy"));
+    test_thread->Start();
+    task_runner = test_thread->task_runner();
+  }
+  bool ret = task_runner->PostTask(
+      FROM_HERE, BindOnce(&MessageLoopTaskRunnerThreadingTest::AssertNotRun));
+  EXPECT_FALSE(ret);
+}
+
+}  // namespace base
diff --git a/base/message_loop/message_loop_unittest.cc b/base/message_loop/message_loop_unittest.cc
new file mode 100644
index 0000000..8525366
--- /dev/null
+++ b/base/message_loop/message_loop_unittest.cc
@@ -0,0 +1,2184 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <vector>
+
+#include "base/bind.h"
+#include "base/bind_helpers.h"
+#include "base/compiler_specific.h"
+#include "base/logging.h"
+#include "base/macros.h"
+#include "base/memory/ptr_util.h"
+#include "base/memory/ref_counted.h"
+#include "base/message_loop/message_loop.h"
+#include "base/message_loop/message_loop_current.h"
+#include "base/message_loop/message_pump_for_io.h"
+#include "base/pending_task.h"
+#include "base/posix/eintr_wrapper.h"
+#include "base/run_loop.h"
+#include "base/single_thread_task_runner.h"
+#include "base/synchronization/waitable_event.h"
+#include "base/task_scheduler/task_scheduler.h"
+#include "base/test/test_simple_task_runner.h"
+#include "base/test/test_timeouts.h"
+#include "base/threading/platform_thread.h"
+#include "base/threading/sequence_local_storage_slot.h"
+#include "base/threading/thread.h"
+#include "base/threading/thread_task_runner_handle.h"
+#include "build/build_config.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+#if defined(OS_ANDROID)
+#include "base/android/java_handler_thread.h"
+#include "base/android/jni_android.h"
+#include "base/test/android/java_handler_thread_helpers.h"
+#endif
+
+#if defined(OS_WIN)
+#include "base/message_loop/message_pump_win.h"
+#include "base/process/memory.h"
+#include "base/strings/string16.h"
+#include "base/win/current_module.h"
+#include "base/win/scoped_handle.h"
+#endif
+
+namespace base {
+
+// TODO(darin): Platform-specific MessageLoop tests should be grouped together
+// to avoid chopping this file up with so many #ifdefs.
+
+namespace {
+
+class Foo : public RefCounted<Foo> {
+ public:
+  Foo() : test_count_(0) {
+  }
+
+  void Test0() { ++test_count_; }
+
+  void Test1ConstRef(const std::string& a) {
+    ++test_count_;
+    result_.append(a);
+  }
+
+  void Test1Ptr(std::string* a) {
+    ++test_count_;
+    result_.append(*a);
+  }
+
+  void Test1Int(int a) { test_count_ += a; }
+
+  void Test2Ptr(std::string* a, std::string* b) {
+    ++test_count_;
+    result_.append(*a);
+    result_.append(*b);
+  }
+
+  void Test2Mixed(const std::string& a, std::string* b) {
+    ++test_count_;
+    result_.append(a);
+    result_.append(*b);
+  }
+
+  int test_count() const { return test_count_; }
+  const std::string& result() const { return result_; }
+
+ private:
+  friend class RefCounted<Foo>;
+
+  ~Foo() = default;
+
+  int test_count_;
+  std::string result_;
+
+  DISALLOW_COPY_AND_ASSIGN(Foo);
+};
+
+// This function runs slowly to simulate a large amount of work being done.
+static void SlowFunc(TimeDelta pause, int* quit_counter) {
+  PlatformThread::Sleep(pause);
+  if (--(*quit_counter) == 0)
+    RunLoop::QuitCurrentWhenIdleDeprecated();
+}
+
+// This function records the time when Run was called in a Time object, which is
+// useful for building a variety of MessageLoop tests.
+static void RecordRunTimeFunc(TimeTicks* run_time, int* quit_counter) {
+  *run_time = TimeTicks::Now();
+
+  // Cause our Run function to take some time to execute.  As a result we can
+  // count on subsequent RecordRunTimeFunc()s running at a future time,
+  // without worry about the resolution of our system clock being an issue.
+  SlowFunc(TimeDelta::FromMilliseconds(10), quit_counter);
+}
+
+enum TaskType {
+  MESSAGEBOX,
+  ENDDIALOG,
+  RECURSIVE,
+  TIMEDMESSAGELOOP,
+  QUITMESSAGELOOP,
+  ORDERED,
+  PUMPS,
+  SLEEP,
+  RUNS,
+};
+
+// Saves the order in which the tasks executed.
+struct TaskItem {
+  TaskItem(TaskType t, int c, bool s)
+      : type(t),
+        cookie(c),
+        start(s) {
+  }
+
+  TaskType type;
+  int cookie;
+  bool start;
+
+  bool operator == (const TaskItem& other) const {
+    return type == other.type && cookie == other.cookie && start == other.start;
+  }
+};
+
+std::ostream& operator <<(std::ostream& os, TaskType type) {
+  switch (type) {
+  case MESSAGEBOX:        os << "MESSAGEBOX"; break;
+  case ENDDIALOG:         os << "ENDDIALOG"; break;
+  case RECURSIVE:         os << "RECURSIVE"; break;
+  case TIMEDMESSAGELOOP:  os << "TIMEDMESSAGELOOP"; break;
+  case QUITMESSAGELOOP:   os << "QUITMESSAGELOOP"; break;
+  case ORDERED:          os << "ORDERED"; break;
+  case PUMPS:             os << "PUMPS"; break;
+  case SLEEP:             os << "SLEEP"; break;
+  default:
+    NOTREACHED();
+    os << "Unknown TaskType";
+    break;
+  }
+  return os;
+}
+
+std::ostream& operator <<(std::ostream& os, const TaskItem& item) {
+  if (item.start)
+    return os << item.type << " " << item.cookie << " starts";
+  else
+    return os << item.type << " " << item.cookie << " ends";
+}
+
+class TaskList {
+ public:
+  void RecordStart(TaskType type, int cookie) {
+    TaskItem item(type, cookie, true);
+    DVLOG(1) << item;
+    task_list_.push_back(item);
+  }
+
+  void RecordEnd(TaskType type, int cookie) {
+    TaskItem item(type, cookie, false);
+    DVLOG(1) << item;
+    task_list_.push_back(item);
+  }
+
+  size_t Size() {
+    return task_list_.size();
+  }
+
+  TaskItem Get(int n)  {
+    return task_list_[n];
+  }
+
+ private:
+  std::vector<TaskItem> task_list_;
+};
+
+class DummyTaskObserver : public MessageLoop::TaskObserver {
+ public:
+  explicit DummyTaskObserver(int num_tasks)
+      : num_tasks_started_(0), num_tasks_processed_(0), num_tasks_(num_tasks) {}
+
+  DummyTaskObserver(int num_tasks, int num_tasks_started)
+      : num_tasks_started_(num_tasks_started),
+        num_tasks_processed_(0),
+        num_tasks_(num_tasks) {}
+
+  ~DummyTaskObserver() override = default;
+
+  void WillProcessTask(const PendingTask& pending_task) override {
+    num_tasks_started_++;
+    EXPECT_LE(num_tasks_started_, num_tasks_);
+    EXPECT_EQ(num_tasks_started_, num_tasks_processed_ + 1);
+  }
+
+  void DidProcessTask(const PendingTask& pending_task) override {
+    num_tasks_processed_++;
+    EXPECT_LE(num_tasks_started_, num_tasks_);
+    EXPECT_EQ(num_tasks_started_, num_tasks_processed_);
+  }
+
+  int num_tasks_started() const { return num_tasks_started_; }
+  int num_tasks_processed() const { return num_tasks_processed_; }
+
+ private:
+  int num_tasks_started_;
+  int num_tasks_processed_;
+  const int num_tasks_;
+
+  DISALLOW_COPY_AND_ASSIGN(DummyTaskObserver);
+};
+
+void RecursiveFunc(TaskList* order, int cookie, int depth,
+                   bool is_reentrant) {
+  order->RecordStart(RECURSIVE, cookie);
+  if (depth > 0) {
+    if (is_reentrant)
+      MessageLoopCurrent::Get()->SetNestableTasksAllowed(true);
+    ThreadTaskRunnerHandle::Get()->PostTask(
+        FROM_HERE,
+        BindOnce(&RecursiveFunc, order, cookie, depth - 1, is_reentrant));
+  }
+  order->RecordEnd(RECURSIVE, cookie);
+}
+
+void QuitFunc(TaskList* order, int cookie) {
+  order->RecordStart(QUITMESSAGELOOP, cookie);
+  RunLoop::QuitCurrentWhenIdleDeprecated();
+  order->RecordEnd(QUITMESSAGELOOP, cookie);
+}
+
+void PostNTasks(int posts_remaining) {
+  if (posts_remaining > 1) {
+    ThreadTaskRunnerHandle::Get()->PostTask(
+        FROM_HERE, BindOnce(&PostNTasks, posts_remaining - 1));
+  }
+}
+
+enum class TaskSchedulerAvailability {
+  NO_TASK_SCHEDULER,
+  WITH_TASK_SCHEDULER,
+};
+
+std::string TaskSchedulerAvailabilityToString(
+    TaskSchedulerAvailability availability) {
+  switch (availability) {
+    case TaskSchedulerAvailability::NO_TASK_SCHEDULER:
+      return "NoTaskScheduler";
+    case TaskSchedulerAvailability::WITH_TASK_SCHEDULER:
+      return "WithTaskScheduler";
+  }
+  NOTREACHED();
+  return "Unknown";
+}
+
+class MessageLoopTest
+    : public ::testing::TestWithParam<TaskSchedulerAvailability> {
+ public:
+  MessageLoopTest() = default;
+  ~MessageLoopTest() override = default;
+
+  void SetUp() override {
+    if (GetParam() == TaskSchedulerAvailability::WITH_TASK_SCHEDULER)
+      TaskScheduler::CreateAndStartWithDefaultParams("MessageLoopTest");
+  }
+
+  void TearDown() override {
+    if (GetParam() == TaskSchedulerAvailability::WITH_TASK_SCHEDULER) {
+      // Failure to call FlushForTesting() could result in task leaks as tasks
+      // are skipped on shutdown.
+      base::TaskScheduler::GetInstance()->FlushForTesting();
+      base::TaskScheduler::GetInstance()->Shutdown();
+      base::TaskScheduler::GetInstance()->JoinForTesting();
+      base::TaskScheduler::SetInstance(nullptr);
+    }
+  }
+
+  static std::string ParamInfoToString(
+      ::testing::TestParamInfo<TaskSchedulerAvailability> param_info) {
+    return TaskSchedulerAvailabilityToString(param_info.param);
+  }
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(MessageLoopTest);
+};
+
+#if defined(OS_ANDROID)
+void DoNotRun() {
+  ASSERT_TRUE(false);
+}
+
+void RunTest_AbortDontRunMoreTasks(bool delayed, bool init_java_first) {
+  WaitableEvent test_done_event(WaitableEvent::ResetPolicy::MANUAL,
+                                WaitableEvent::InitialState::NOT_SIGNALED);
+  std::unique_ptr<android::JavaHandlerThread> java_thread;
+  if (init_java_first) {
+    java_thread = android::JavaHandlerThreadHelpers::CreateJavaFirst();
+  } else {
+    java_thread = std::make_unique<android::JavaHandlerThread>(
+        "JavaHandlerThreadForTesting from AbortDontRunMoreTasks");
+  }
+  java_thread->Start();
+  java_thread->ListenForUncaughtExceptionsForTesting();
+
+  auto target =
+      BindOnce(&android::JavaHandlerThreadHelpers::ThrowExceptionAndAbort,
+               &test_done_event);
+  if (delayed) {
+    java_thread->message_loop()->task_runner()->PostDelayedTask(
+        FROM_HERE, std::move(target), TimeDelta::FromMilliseconds(10));
+  } else {
+    java_thread->message_loop()->task_runner()->PostTask(FROM_HERE,
+                                                         std::move(target));
+    java_thread->message_loop()->task_runner()->PostTask(FROM_HERE,
+                                                         BindOnce(&DoNotRun));
+  }
+  test_done_event.Wait();
+  java_thread->Stop();
+  android::ScopedJavaLocalRef<jthrowable> exception =
+      java_thread->GetUncaughtExceptionIfAny();
+  ASSERT_TRUE(
+      android::JavaHandlerThreadHelpers::IsExceptionTestException(exception));
+}
+
+TEST_P(MessageLoopTest, JavaExceptionAbort) {
+  constexpr bool delayed = false;
+  constexpr bool init_java_first = false;
+  RunTest_AbortDontRunMoreTasks(delayed, init_java_first);
+}
+TEST_P(MessageLoopTest, DelayedJavaExceptionAbort) {
+  constexpr bool delayed = true;
+  constexpr bool init_java_first = false;
+  RunTest_AbortDontRunMoreTasks(delayed, init_java_first);
+}
+TEST_P(MessageLoopTest, JavaExceptionAbortInitJavaFirst) {
+  constexpr bool delayed = false;
+  constexpr bool init_java_first = true;
+  RunTest_AbortDontRunMoreTasks(delayed, init_java_first);
+}
+
+TEST_P(MessageLoopTest, RunTasksWhileShuttingDownJavaThread) {
+  const int kNumPosts = 6;
+  DummyTaskObserver observer(kNumPosts, 1);
+
+  auto java_thread = std::make_unique<android::JavaHandlerThread>("test");
+  java_thread->Start();
+
+  java_thread->message_loop()->task_runner()->PostTask(
+      FROM_HERE,
+      BindOnce(
+          [](android::JavaHandlerThread* java_thread,
+             DummyTaskObserver* observer, int num_posts) {
+            java_thread->message_loop()->AddTaskObserver(observer);
+            ThreadTaskRunnerHandle::Get()->PostDelayedTask(
+                FROM_HERE, BindOnce([]() { ADD_FAILURE(); }),
+                TimeDelta::FromDays(1));
+            java_thread->StopMessageLoopForTesting();
+            PostNTasks(num_posts);
+          },
+          Unretained(java_thread.get()), Unretained(&observer), kNumPosts));
+
+  java_thread->JoinForTesting();
+  java_thread.reset();
+
+  EXPECT_EQ(kNumPosts, observer.num_tasks_started());
+  EXPECT_EQ(kNumPosts, observer.num_tasks_processed());
+}
+#endif  // defined(OS_ANDROID)
+
+#if defined(OS_WIN)
+
+void SubPumpFunc() {
+  MessageLoopCurrent::Get()->SetNestableTasksAllowed(true);
+  MSG msg;
+  while (GetMessage(&msg, NULL, 0, 0)) {
+    TranslateMessage(&msg);
+    DispatchMessage(&msg);
+  }
+  RunLoop::QuitCurrentWhenIdleDeprecated();
+}
+
+void RunTest_PostDelayedTask_SharedTimer_SubPump() {
+  MessageLoop message_loop(MessageLoop::TYPE_UI);
+
+  // Test that the interval of the timer, used to run the next delayed task, is
+  // set to a value corresponding to when the next delayed task should run.
+
+  // By setting num_tasks to 1, we ensure that the first task to run causes the
+  // run loop to exit.
+  int num_tasks = 1;
+  TimeTicks run_time;
+
+  message_loop.task_runner()->PostTask(FROM_HERE, BindOnce(&SubPumpFunc));
+
+  // This very delayed task should never run.
+  message_loop.task_runner()->PostDelayedTask(
+      FROM_HERE, BindOnce(&RecordRunTimeFunc, &run_time, &num_tasks),
+      TimeDelta::FromSeconds(1000));
+
+  // This slightly delayed task should run from within SubPumpFunc.
+  message_loop.task_runner()->PostDelayedTask(FROM_HERE,
+                                              BindOnce(&PostQuitMessage, 0),
+                                              TimeDelta::FromMilliseconds(10));
+
+  Time start_time = Time::Now();
+
+  RunLoop().Run();
+  EXPECT_EQ(1, num_tasks);
+
+  // Ensure that we ran in far less time than the slower timer.
+  TimeDelta total_time = Time::Now() - start_time;
+  EXPECT_GT(5000, total_time.InMilliseconds());
+
+  // In case both timers somehow run at nearly the same time, sleep a little
+  // and then run all pending to force them both to have run.  This is just
+  // encouraging flakiness if there is any.
+  PlatformThread::Sleep(TimeDelta::FromMilliseconds(100));
+  RunLoop().RunUntilIdle();
+
+  EXPECT_TRUE(run_time.is_null());
+}
+
+const wchar_t kMessageBoxTitle[] = L"MessageLoop Unit Test";
+
+// MessageLoop implicitly start a "modal message loop". Modal dialog boxes,
+// common controls (like OpenFile) and StartDoc printing function can cause
+// implicit message loops.
+void MessageBoxFunc(TaskList* order, int cookie, bool is_reentrant) {
+  order->RecordStart(MESSAGEBOX, cookie);
+  if (is_reentrant)
+    MessageLoopCurrent::Get()->SetNestableTasksAllowed(true);
+  MessageBox(NULL, L"Please wait...", kMessageBoxTitle, MB_OK);
+  order->RecordEnd(MESSAGEBOX, cookie);
+}
+
+// Will end the MessageBox.
+void EndDialogFunc(TaskList* order, int cookie) {
+  order->RecordStart(ENDDIALOG, cookie);
+  HWND window = GetActiveWindow();
+  if (window != NULL) {
+    EXPECT_NE(EndDialog(window, IDCONTINUE), 0);
+    // Cheap way to signal that the window wasn't found if RunEnd() isn't
+    // called.
+    order->RecordEnd(ENDDIALOG, cookie);
+  }
+}
+
+void RecursiveFuncWin(scoped_refptr<SingleThreadTaskRunner> task_runner,
+                      HANDLE event,
+                      bool expect_window,
+                      TaskList* order,
+                      bool is_reentrant) {
+  task_runner->PostTask(FROM_HERE,
+                        BindOnce(&RecursiveFunc, order, 1, 2, is_reentrant));
+  task_runner->PostTask(FROM_HERE,
+                        BindOnce(&MessageBoxFunc, order, 2, is_reentrant));
+  task_runner->PostTask(FROM_HERE,
+                        BindOnce(&RecursiveFunc, order, 3, 2, is_reentrant));
+  // The trick here is that for recursive task processing, this task will be
+  // ran _inside_ the MessageBox message loop, dismissing the MessageBox
+  // without a chance.
+  // For non-recursive task processing, this will be executed _after_ the
+  // MessageBox will have been dismissed by the code below, where
+  // expect_window_ is true.
+  task_runner->PostTask(FROM_HERE, BindOnce(&EndDialogFunc, order, 4));
+  task_runner->PostTask(FROM_HERE, BindOnce(&QuitFunc, order, 5));
+
+  // Enforce that every tasks are sent before starting to run the main thread
+  // message loop.
+  ASSERT_TRUE(SetEvent(event));
+
+  // Poll for the MessageBox. Don't do this at home! At the speed we do it,
+  // you will never realize one MessageBox was shown.
+  for (; expect_window;) {
+    HWND window = FindWindow(L"#32770", kMessageBoxTitle);
+    if (window) {
+      // Dismiss it.
+      for (;;) {
+        HWND button = FindWindowEx(window, NULL, L"Button", NULL);
+        if (button != NULL) {
+          EXPECT_EQ(0, SendMessage(button, WM_LBUTTONDOWN, 0, 0));
+          EXPECT_EQ(0, SendMessage(button, WM_LBUTTONUP, 0, 0));
+          break;
+        }
+      }
+      break;
+    }
+  }
+}
+
+// TODO(darin): These tests need to be ported since they test critical
+// message loop functionality.
+
+// A side effect of this test is the generation a beep. Sorry.
+void RunTest_RecursiveDenial2(MessageLoop::Type message_loop_type) {
+  MessageLoop loop(message_loop_type);
+
+  Thread worker("RecursiveDenial2_worker");
+  Thread::Options options;
+  options.message_loop_type = message_loop_type;
+  ASSERT_EQ(true, worker.StartWithOptions(options));
+  TaskList order;
+  win::ScopedHandle event(CreateEvent(NULL, FALSE, FALSE, NULL));
+  worker.task_runner()->PostTask(
+      FROM_HERE, BindOnce(&RecursiveFuncWin, ThreadTaskRunnerHandle::Get(),
+                          event.Get(), true, &order, false));
+  // Let the other thread execute.
+  WaitForSingleObject(event.Get(), INFINITE);
+  RunLoop().Run();
+
+  ASSERT_EQ(17u, order.Size());
+  EXPECT_EQ(order.Get(0), TaskItem(RECURSIVE, 1, true));
+  EXPECT_EQ(order.Get(1), TaskItem(RECURSIVE, 1, false));
+  EXPECT_EQ(order.Get(2), TaskItem(MESSAGEBOX, 2, true));
+  EXPECT_EQ(order.Get(3), TaskItem(MESSAGEBOX, 2, false));
+  EXPECT_EQ(order.Get(4), TaskItem(RECURSIVE, 3, true));
+  EXPECT_EQ(order.Get(5), TaskItem(RECURSIVE, 3, false));
+  // When EndDialogFunc is processed, the window is already dismissed, hence no
+  // "end" entry.
+  EXPECT_EQ(order.Get(6), TaskItem(ENDDIALOG, 4, true));
+  EXPECT_EQ(order.Get(7), TaskItem(QUITMESSAGELOOP, 5, true));
+  EXPECT_EQ(order.Get(8), TaskItem(QUITMESSAGELOOP, 5, false));
+  EXPECT_EQ(order.Get(9), TaskItem(RECURSIVE, 1, true));
+  EXPECT_EQ(order.Get(10), TaskItem(RECURSIVE, 1, false));
+  EXPECT_EQ(order.Get(11), TaskItem(RECURSIVE, 3, true));
+  EXPECT_EQ(order.Get(12), TaskItem(RECURSIVE, 3, false));
+  EXPECT_EQ(order.Get(13), TaskItem(RECURSIVE, 1, true));
+  EXPECT_EQ(order.Get(14), TaskItem(RECURSIVE, 1, false));
+  EXPECT_EQ(order.Get(15), TaskItem(RECURSIVE, 3, true));
+  EXPECT_EQ(order.Get(16), TaskItem(RECURSIVE, 3, false));
+}
+
+// A side effect of this test is the generation a beep. Sorry.  This test also
+// needs to process windows messages on the current thread.
+void RunTest_RecursiveSupport2(MessageLoop::Type message_loop_type) {
+  MessageLoop loop(message_loop_type);
+
+  Thread worker("RecursiveSupport2_worker");
+  Thread::Options options;
+  options.message_loop_type = message_loop_type;
+  ASSERT_EQ(true, worker.StartWithOptions(options));
+  TaskList order;
+  win::ScopedHandle event(CreateEvent(NULL, FALSE, FALSE, NULL));
+  worker.task_runner()->PostTask(
+      FROM_HERE, BindOnce(&RecursiveFuncWin, ThreadTaskRunnerHandle::Get(),
+                          event.Get(), false, &order, true));
+  // Let the other thread execute.
+  WaitForSingleObject(event.Get(), INFINITE);
+  RunLoop().Run();
+
+  ASSERT_EQ(18u, order.Size());
+  EXPECT_EQ(order.Get(0), TaskItem(RECURSIVE, 1, true));
+  EXPECT_EQ(order.Get(1), TaskItem(RECURSIVE, 1, false));
+  EXPECT_EQ(order.Get(2), TaskItem(MESSAGEBOX, 2, true));
+  // Note that this executes in the MessageBox modal loop.
+  EXPECT_EQ(order.Get(3), TaskItem(RECURSIVE, 3, true));
+  EXPECT_EQ(order.Get(4), TaskItem(RECURSIVE, 3, false));
+  EXPECT_EQ(order.Get(5), TaskItem(ENDDIALOG, 4, true));
+  EXPECT_EQ(order.Get(6), TaskItem(ENDDIALOG, 4, false));
+  EXPECT_EQ(order.Get(7), TaskItem(MESSAGEBOX, 2, false));
+  /* The order can subtly change here. The reason is that when RecursiveFunc(1)
+     is called in the main thread, if it is faster than getting to the
+     PostTask(FROM_HERE, BindOnce(&QuitFunc) execution, the order of task
+     execution can change. We don't care anyway that the order isn't correct.
+  EXPECT_EQ(order.Get(8), TaskItem(QUITMESSAGELOOP, 5, true));
+  EXPECT_EQ(order.Get(9), TaskItem(QUITMESSAGELOOP, 5, false));
+  EXPECT_EQ(order.Get(10), TaskItem(RECURSIVE, 1, true));
+  EXPECT_EQ(order.Get(11), TaskItem(RECURSIVE, 1, false));
+  */
+  EXPECT_EQ(order.Get(12), TaskItem(RECURSIVE, 3, true));
+  EXPECT_EQ(order.Get(13), TaskItem(RECURSIVE, 3, false));
+  EXPECT_EQ(order.Get(14), TaskItem(RECURSIVE, 1, true));
+  EXPECT_EQ(order.Get(15), TaskItem(RECURSIVE, 1, false));
+  EXPECT_EQ(order.Get(16), TaskItem(RECURSIVE, 3, true));
+  EXPECT_EQ(order.Get(17), TaskItem(RECURSIVE, 3, false));
+}
+
+#endif  // defined(OS_WIN)
+
+void PostNTasksThenQuit(int posts_remaining) {
+  if (posts_remaining > 1) {
+    ThreadTaskRunnerHandle::Get()->PostTask(
+        FROM_HERE, BindOnce(&PostNTasksThenQuit, posts_remaining - 1));
+  } else {
+    RunLoop::QuitCurrentWhenIdleDeprecated();
+  }
+}
+
+#if defined(OS_WIN)
+
+class TestIOHandler : public MessagePumpForIO::IOHandler {
+ public:
+  TestIOHandler(const wchar_t* name, HANDLE signal, bool wait);
+
+  void OnIOCompleted(MessagePumpForIO::IOContext* context,
+                     DWORD bytes_transfered,
+                     DWORD error) override;
+
+  void Init();
+  void WaitForIO();
+  OVERLAPPED* context() { return &context_.overlapped; }
+  DWORD size() { return sizeof(buffer_); }
+
+ private:
+  char buffer_[48];
+  MessagePumpForIO::IOContext context_;
+  HANDLE signal_;
+  win::ScopedHandle file_;
+  bool wait_;
+};
+
+TestIOHandler::TestIOHandler(const wchar_t* name, HANDLE signal, bool wait)
+    : signal_(signal), wait_(wait) {
+  memset(buffer_, 0, sizeof(buffer_));
+
+  file_.Set(CreateFile(name, GENERIC_READ, 0, NULL, OPEN_EXISTING,
+                       FILE_FLAG_OVERLAPPED, NULL));
+  EXPECT_TRUE(file_.IsValid());
+}
+
+void TestIOHandler::Init() {
+  MessageLoopCurrentForIO::Get()->RegisterIOHandler(file_.Get(), this);
+
+  DWORD read;
+  EXPECT_FALSE(ReadFile(file_.Get(), buffer_, size(), &read, context()));
+  EXPECT_EQ(static_cast<DWORD>(ERROR_IO_PENDING), GetLastError());
+  if (wait_)
+    WaitForIO();
+}
+
+void TestIOHandler::OnIOCompleted(MessagePumpForIO::IOContext* context,
+                                  DWORD bytes_transfered,
+                                  DWORD error) {
+  ASSERT_TRUE(context == &context_);
+  ASSERT_TRUE(SetEvent(signal_));
+}
+
+void TestIOHandler::WaitForIO() {
+  EXPECT_TRUE(MessageLoopCurrentForIO::Get()->WaitForIOCompletion(300, this));
+  EXPECT_TRUE(MessageLoopCurrentForIO::Get()->WaitForIOCompletion(400, this));
+}
+
+void RunTest_IOHandler() {
+  win::ScopedHandle callback_called(CreateEvent(NULL, TRUE, FALSE, NULL));
+  ASSERT_TRUE(callback_called.IsValid());
+
+  const wchar_t* kPipeName = L"\\\\.\\pipe\\iohandler_pipe";
+  win::ScopedHandle server(
+      CreateNamedPipe(kPipeName, PIPE_ACCESS_OUTBOUND, 0, 1, 0, 0, 0, NULL));
+  ASSERT_TRUE(server.IsValid());
+
+  Thread thread("IOHandler test");
+  Thread::Options options;
+  options.message_loop_type = MessageLoop::TYPE_IO;
+  ASSERT_TRUE(thread.StartWithOptions(options));
+
+  TestIOHandler handler(kPipeName, callback_called.Get(), false);
+  thread.task_runner()->PostTask(
+      FROM_HERE, BindOnce(&TestIOHandler::Init, Unretained(&handler)));
+  // Make sure the thread runs and sleeps for lack of work.
+  PlatformThread::Sleep(TimeDelta::FromMilliseconds(100));
+
+  const char buffer[] = "Hello there!";
+  DWORD written;
+  EXPECT_TRUE(WriteFile(server.Get(), buffer, sizeof(buffer), &written, NULL));
+
+  DWORD result = WaitForSingleObject(callback_called.Get(), 1000);
+  EXPECT_EQ(WAIT_OBJECT_0, result);
+
+  thread.Stop();
+}
+
+void RunTest_WaitForIO() {
+  win::ScopedHandle callback1_called(
+      CreateEvent(NULL, TRUE, FALSE, NULL));
+  win::ScopedHandle callback2_called(
+      CreateEvent(NULL, TRUE, FALSE, NULL));
+  ASSERT_TRUE(callback1_called.IsValid());
+  ASSERT_TRUE(callback2_called.IsValid());
+
+  const wchar_t* kPipeName1 = L"\\\\.\\pipe\\iohandler_pipe1";
+  const wchar_t* kPipeName2 = L"\\\\.\\pipe\\iohandler_pipe2";
+  win::ScopedHandle server1(
+      CreateNamedPipe(kPipeName1, PIPE_ACCESS_OUTBOUND, 0, 1, 0, 0, 0, NULL));
+  win::ScopedHandle server2(
+      CreateNamedPipe(kPipeName2, PIPE_ACCESS_OUTBOUND, 0, 1, 0, 0, 0, NULL));
+  ASSERT_TRUE(server1.IsValid());
+  ASSERT_TRUE(server2.IsValid());
+
+  Thread thread("IOHandler test");
+  Thread::Options options;
+  options.message_loop_type = MessageLoop::TYPE_IO;
+  ASSERT_TRUE(thread.StartWithOptions(options));
+
+  TestIOHandler handler1(kPipeName1, callback1_called.Get(), false);
+  TestIOHandler handler2(kPipeName2, callback2_called.Get(), true);
+  thread.task_runner()->PostTask(
+      FROM_HERE, BindOnce(&TestIOHandler::Init, Unretained(&handler1)));
+  // TODO(ajwong): Do we really need such long Sleeps in this function?
+  // Make sure the thread runs and sleeps for lack of work.
+  TimeDelta delay = TimeDelta::FromMilliseconds(100);
+  PlatformThread::Sleep(delay);
+  thread.task_runner()->PostTask(
+      FROM_HERE, BindOnce(&TestIOHandler::Init, Unretained(&handler2)));
+  PlatformThread::Sleep(delay);
+
+  // At this time handler1 is waiting to be called, and the thread is waiting
+  // on the Init method of handler2, filtering only handler2 callbacks.
+
+  const char buffer[] = "Hello there!";
+  DWORD written;
+  EXPECT_TRUE(WriteFile(server1.Get(), buffer, sizeof(buffer), &written, NULL));
+  PlatformThread::Sleep(2 * delay);
+  EXPECT_EQ(static_cast<DWORD>(WAIT_TIMEOUT),
+            WaitForSingleObject(callback1_called.Get(), 0))
+      << "handler1 has not been called";
+
+  EXPECT_TRUE(WriteFile(server2.Get(), buffer, sizeof(buffer), &written, NULL));
+
+  HANDLE objects[2] = { callback1_called.Get(), callback2_called.Get() };
+  DWORD result = WaitForMultipleObjects(2, objects, TRUE, 1000);
+  EXPECT_EQ(WAIT_OBJECT_0, result);
+
+  thread.Stop();
+}
+
+#endif  // defined(OS_WIN)
+
+}  // namespace
+
+//-----------------------------------------------------------------------------
+// Each test is run against each type of MessageLoop.  That way we are sure
+// that message loops work properly in all configurations.  Of course, in some
+// cases, a unit test may only be for a particular type of loop.
+
+namespace {
+
+struct MessageLoopTypedTestParams {
+  MessageLoopTypedTestParams(
+      MessageLoop::Type type_in,
+      TaskSchedulerAvailability task_scheduler_availability_in) {
+    type = type_in;
+    task_scheduler_availability = task_scheduler_availability_in;
+  }
+
+  MessageLoop::Type type;
+  TaskSchedulerAvailability task_scheduler_availability;
+};
+
+class MessageLoopTypedTest
+    : public ::testing::TestWithParam<MessageLoopTypedTestParams> {
+ public:
+  MessageLoopTypedTest() = default;
+  ~MessageLoopTypedTest() = default;
+
+  void SetUp() override {
+    if (GetTaskSchedulerAvailability() ==
+        TaskSchedulerAvailability::WITH_TASK_SCHEDULER) {
+      TaskScheduler::CreateAndStartWithDefaultParams("MessageLoopTypedTest");
+    }
+  }
+
+  void TearDown() override {
+    if (GetTaskSchedulerAvailability() ==
+        TaskSchedulerAvailability::WITH_TASK_SCHEDULER) {
+      // Failure to call FlushForTesting() could result in task leaks as tasks
+      // are skipped on shutdown.
+      base::TaskScheduler::GetInstance()->FlushForTesting();
+      base::TaskScheduler::GetInstance()->Shutdown();
+      base::TaskScheduler::GetInstance()->JoinForTesting();
+      base::TaskScheduler::SetInstance(nullptr);
+    }
+  }
+
+  static std::string ParamInfoToString(
+      ::testing::TestParamInfo<MessageLoopTypedTestParams> param_info) {
+    return MessageLoopTypeToString(param_info.param.type) + "_" +
+           TaskSchedulerAvailabilityToString(
+               param_info.param.task_scheduler_availability);
+  }
+
+ protected:
+  MessageLoop::Type GetMessageLoopType() { return GetParam().type; }
+
+ private:
+  static std::string MessageLoopTypeToString(MessageLoop::Type type) {
+    switch (type) {
+      case MessageLoop::TYPE_DEFAULT:
+        return "Default";
+      case MessageLoop::TYPE_IO:
+        return "IO";
+      case MessageLoop::TYPE_UI:
+        return "UI";
+      case MessageLoop::TYPE_CUSTOM:
+#if defined(OS_ANDROID)
+      case MessageLoop::TYPE_JAVA:
+#endif  // defined(OS_ANDROID)
+        break;
+    }
+    NOTREACHED();
+    return "NotSupported";
+  }
+
+  TaskSchedulerAvailability GetTaskSchedulerAvailability() {
+    return GetParam().task_scheduler_availability;
+  }
+
+  DISALLOW_COPY_AND_ASSIGN(MessageLoopTypedTest);
+};
+
+}  // namespace
+
+TEST_P(MessageLoopTypedTest, PostTask) {
+  MessageLoop loop(GetMessageLoopType());
+  // Add tests to message loop
+  scoped_refptr<Foo> foo(new Foo());
+  std::string a("a"), b("b"), c("c"), d("d");
+  ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
+                                          BindOnce(&Foo::Test0, foo));
+  ThreadTaskRunnerHandle::Get()->PostTask(
+      FROM_HERE, BindOnce(&Foo::Test1ConstRef, foo, a));
+  ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
+                                          BindOnce(&Foo::Test1Ptr, foo, &b));
+  ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
+                                          BindOnce(&Foo::Test1Int, foo, 100));
+  ThreadTaskRunnerHandle::Get()->PostTask(
+      FROM_HERE, BindOnce(&Foo::Test2Ptr, foo, &a, &c));
+  ThreadTaskRunnerHandle::Get()->PostTask(
+      FROM_HERE, BindOnce(&Foo::Test2Mixed, foo, a, &d));
+  // After all tests, post a message that will shut down the message loop
+  ThreadTaskRunnerHandle::Get()->PostTask(
+      FROM_HERE, BindOnce(&RunLoop::QuitCurrentWhenIdleDeprecated));
+
+  // Now kick things off
+  RunLoop().Run();
+
+  EXPECT_EQ(foo->test_count(), 105);
+  EXPECT_EQ(foo->result(), "abacad");
+}
+
+TEST_P(MessageLoopTypedTest, PostDelayedTask_Basic) {
+  MessageLoop loop(GetMessageLoopType());
+
+  // Test that PostDelayedTask results in a delayed task.
+
+  const TimeDelta kDelay = TimeDelta::FromMilliseconds(100);
+
+  int num_tasks = 1;
+  TimeTicks run_time;
+
+  TimeTicks time_before_run = TimeTicks::Now();
+  loop.task_runner()->PostDelayedTask(
+      FROM_HERE, BindOnce(&RecordRunTimeFunc, &run_time, &num_tasks), kDelay);
+  RunLoop().Run();
+  TimeTicks time_after_run = TimeTicks::Now();
+
+  EXPECT_EQ(0, num_tasks);
+  EXPECT_LT(kDelay, time_after_run - time_before_run);
+}
+
+TEST_P(MessageLoopTypedTest, PostDelayedTask_InDelayOrder) {
+  MessageLoop loop(GetMessageLoopType());
+
+  // Test that two tasks with different delays run in the right order.
+  int num_tasks = 2;
+  TimeTicks run_time1, run_time2;
+
+  loop.task_runner()->PostDelayedTask(
+      FROM_HERE, BindOnce(&RecordRunTimeFunc, &run_time1, &num_tasks),
+      TimeDelta::FromMilliseconds(200));
+  // If we get a large pause in execution (due to a context switch) here, this
+  // test could fail.
+  loop.task_runner()->PostDelayedTask(
+      FROM_HERE, BindOnce(&RecordRunTimeFunc, &run_time2, &num_tasks),
+      TimeDelta::FromMilliseconds(10));
+
+  RunLoop().Run();
+  EXPECT_EQ(0, num_tasks);
+
+  EXPECT_TRUE(run_time2 < run_time1);
+}
+
+TEST_P(MessageLoopTypedTest, PostDelayedTask_InPostOrder) {
+  MessageLoop loop(GetMessageLoopType());
+
+  // Test that two tasks with the same delay run in the order in which they
+  // were posted.
+  //
+  // NOTE: This is actually an approximate test since the API only takes a
+  // "delay" parameter, so we are not exactly simulating two tasks that get
+  // posted at the exact same time.  It would be nice if the API allowed us to
+  // specify the desired run time.
+
+  const TimeDelta kDelay = TimeDelta::FromMilliseconds(100);
+
+  int num_tasks = 2;
+  TimeTicks run_time1, run_time2;
+
+  loop.task_runner()->PostDelayedTask(
+      FROM_HERE, BindOnce(&RecordRunTimeFunc, &run_time1, &num_tasks), kDelay);
+  loop.task_runner()->PostDelayedTask(
+      FROM_HERE, BindOnce(&RecordRunTimeFunc, &run_time2, &num_tasks), kDelay);
+
+  RunLoop().Run();
+  EXPECT_EQ(0, num_tasks);
+
+  EXPECT_TRUE(run_time1 < run_time2);
+}
+
+TEST_P(MessageLoopTypedTest, PostDelayedTask_InPostOrder_2) {
+  MessageLoop loop(GetMessageLoopType());
+
+  // Test that a delayed task still runs after a normal tasks even if the
+  // normal tasks take a long time to run.
+
+  const TimeDelta kPause = TimeDelta::FromMilliseconds(50);
+
+  int num_tasks = 2;
+  TimeTicks run_time;
+
+  loop.task_runner()->PostTask(FROM_HERE,
+                               BindOnce(&SlowFunc, kPause, &num_tasks));
+  loop.task_runner()->PostDelayedTask(
+      FROM_HERE, BindOnce(&RecordRunTimeFunc, &run_time, &num_tasks),
+      TimeDelta::FromMilliseconds(10));
+
+  TimeTicks time_before_run = TimeTicks::Now();
+  RunLoop().Run();
+  TimeTicks time_after_run = TimeTicks::Now();
+
+  EXPECT_EQ(0, num_tasks);
+
+  EXPECT_LT(kPause, time_after_run - time_before_run);
+}
+
+TEST_P(MessageLoopTypedTest, PostDelayedTask_InPostOrder_3) {
+  MessageLoop loop(GetMessageLoopType());
+
+  // Test that a delayed task still runs after a pile of normal tasks.  The key
+  // difference between this test and the previous one is that here we return
+  // the MessageLoop a lot so we give the MessageLoop plenty of opportunities
+  // to maybe run the delayed task.  It should know not to do so until the
+  // delayed task's delay has passed.
+
+  int num_tasks = 11;
+  TimeTicks run_time1, run_time2;
+
+  // Clutter the ML with tasks.
+  for (int i = 1; i < num_tasks; ++i)
+    loop.task_runner()->PostTask(
+        FROM_HERE, BindOnce(&RecordRunTimeFunc, &run_time1, &num_tasks));
+
+  loop.task_runner()->PostDelayedTask(
+      FROM_HERE, BindOnce(&RecordRunTimeFunc, &run_time2, &num_tasks),
+      TimeDelta::FromMilliseconds(1));
+
+  RunLoop().Run();
+  EXPECT_EQ(0, num_tasks);
+
+  EXPECT_TRUE(run_time2 > run_time1);
+}
+
+TEST_P(MessageLoopTypedTest, PostDelayedTask_SharedTimer) {
+  MessageLoop loop(GetMessageLoopType());
+
+  // Test that the interval of the timer, used to run the next delayed task, is
+  // set to a value corresponding to when the next delayed task should run.
+
+  // By setting num_tasks to 1, we ensure that the first task to run causes the
+  // run loop to exit.
+  int num_tasks = 1;
+  TimeTicks run_time1, run_time2;
+
+  loop.task_runner()->PostDelayedTask(
+      FROM_HERE, BindOnce(&RecordRunTimeFunc, &run_time1, &num_tasks),
+      TimeDelta::FromSeconds(1000));
+  loop.task_runner()->PostDelayedTask(
+      FROM_HERE, BindOnce(&RecordRunTimeFunc, &run_time2, &num_tasks),
+      TimeDelta::FromMilliseconds(10));
+
+  TimeTicks start_time = TimeTicks::Now();
+
+  RunLoop().Run();
+  EXPECT_EQ(0, num_tasks);
+
+  // Ensure that we ran in far less time than the slower timer.
+  TimeDelta total_time = TimeTicks::Now() - start_time;
+  EXPECT_GT(5000, total_time.InMilliseconds());
+
+  // In case both timers somehow run at nearly the same time, sleep a little
+  // and then run all pending to force them both to have run.  This is just
+  // encouraging flakiness if there is any.
+  PlatformThread::Sleep(TimeDelta::FromMilliseconds(100));
+  RunLoop().RunUntilIdle();
+
+  EXPECT_TRUE(run_time1.is_null());
+  EXPECT_FALSE(run_time2.is_null());
+}
+
+namespace {
+
+// This is used to inject a test point for recording the destructor calls for
+// Closure objects send to MessageLoop::PostTask(). It is awkward usage since we
+// are trying to hook the actual destruction, which is not a common operation.
+class RecordDeletionProbe : public RefCounted<RecordDeletionProbe> {
+ public:
+  RecordDeletionProbe(RecordDeletionProbe* post_on_delete, bool* was_deleted)
+      : post_on_delete_(post_on_delete), was_deleted_(was_deleted) {}
+  void Run() {}
+
+ private:
+  friend class RefCounted<RecordDeletionProbe>;
+
+  ~RecordDeletionProbe() {
+    *was_deleted_ = true;
+    if (post_on_delete_.get())
+      ThreadTaskRunnerHandle::Get()->PostTask(
+          FROM_HERE, BindOnce(&RecordDeletionProbe::Run, post_on_delete_));
+  }
+
+  scoped_refptr<RecordDeletionProbe> post_on_delete_;
+  bool* was_deleted_;
+};
+
+}  // namespace
+
+/* TODO(darin): MessageLoop does not support deleting all tasks in the */
+/* destructor. */
+/* Fails, http://crbug.com/50272. */
+TEST_P(MessageLoopTypedTest, DISABLED_EnsureDeletion) {
+  bool a_was_deleted = false;
+  bool b_was_deleted = false;
+  {
+    MessageLoop loop(GetMessageLoopType());
+    loop.task_runner()->PostTask(
+        FROM_HERE, BindOnce(&RecordDeletionProbe::Run,
+                            new RecordDeletionProbe(nullptr, &a_was_deleted)));
+    // TODO(ajwong): Do we really need 1000ms here?
+    loop.task_runner()->PostDelayedTask(
+        FROM_HERE,
+        BindOnce(&RecordDeletionProbe::Run,
+                 new RecordDeletionProbe(nullptr, &b_was_deleted)),
+        TimeDelta::FromMilliseconds(1000));
+  }
+  EXPECT_TRUE(a_was_deleted);
+  EXPECT_TRUE(b_was_deleted);
+}
+
+/* TODO(darin): MessageLoop does not support deleting all tasks in the */
+/* destructor. */
+/* Fails, http://crbug.com/50272. */
+TEST_P(MessageLoopTypedTest, DISABLED_EnsureDeletion_Chain) {
+  bool a_was_deleted = false;
+  bool b_was_deleted = false;
+  bool c_was_deleted = false;
+  {
+    MessageLoop loop(GetMessageLoopType());
+    // The scoped_refptr for each of the below is held either by the chained
+    // RecordDeletionProbe, or the bound RecordDeletionProbe::Run() callback.
+    RecordDeletionProbe* a = new RecordDeletionProbe(nullptr, &a_was_deleted);
+    RecordDeletionProbe* b = new RecordDeletionProbe(a, &b_was_deleted);
+    RecordDeletionProbe* c = new RecordDeletionProbe(b, &c_was_deleted);
+    loop.task_runner()->PostTask(FROM_HERE,
+                                 BindOnce(&RecordDeletionProbe::Run, c));
+  }
+  EXPECT_TRUE(a_was_deleted);
+  EXPECT_TRUE(b_was_deleted);
+  EXPECT_TRUE(c_was_deleted);
+}
+
+namespace {
+
+void NestingFunc(int* depth) {
+  if (*depth > 0) {
+    *depth -= 1;
+    ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
+                                            BindOnce(&NestingFunc, depth));
+
+    MessageLoopCurrent::Get()->SetNestableTasksAllowed(true);
+    RunLoop().Run();
+  }
+  base::RunLoop::QuitCurrentWhenIdleDeprecated();
+}
+
+}  // namespace
+
+TEST_P(MessageLoopTypedTest, Nesting) {
+  MessageLoop loop(GetMessageLoopType());
+
+  int depth = 50;
+  ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
+                                          BindOnce(&NestingFunc, &depth));
+  RunLoop().Run();
+  EXPECT_EQ(depth, 0);
+}
+
+TEST_P(MessageLoopTypedTest, RecursiveDenial1) {
+  MessageLoop loop(GetMessageLoopType());
+
+  EXPECT_TRUE(MessageLoopCurrent::Get()->NestableTasksAllowed());
+  TaskList order;
+  ThreadTaskRunnerHandle::Get()->PostTask(
+      FROM_HERE, BindOnce(&RecursiveFunc, &order, 1, 2, false));
+  ThreadTaskRunnerHandle::Get()->PostTask(
+      FROM_HERE, BindOnce(&RecursiveFunc, &order, 2, 2, false));
+  ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
+                                          BindOnce(&QuitFunc, &order, 3));
+
+  RunLoop().Run();
+
+  // FIFO order.
+  ASSERT_EQ(14U, order.Size());
+  EXPECT_EQ(order.Get(0), TaskItem(RECURSIVE, 1, true));
+  EXPECT_EQ(order.Get(1), TaskItem(RECURSIVE, 1, false));
+  EXPECT_EQ(order.Get(2), TaskItem(RECURSIVE, 2, true));
+  EXPECT_EQ(order.Get(3), TaskItem(RECURSIVE, 2, false));
+  EXPECT_EQ(order.Get(4), TaskItem(QUITMESSAGELOOP, 3, true));
+  EXPECT_EQ(order.Get(5), TaskItem(QUITMESSAGELOOP, 3, false));
+  EXPECT_EQ(order.Get(6), TaskItem(RECURSIVE, 1, true));
+  EXPECT_EQ(order.Get(7), TaskItem(RECURSIVE, 1, false));
+  EXPECT_EQ(order.Get(8), TaskItem(RECURSIVE, 2, true));
+  EXPECT_EQ(order.Get(9), TaskItem(RECURSIVE, 2, false));
+  EXPECT_EQ(order.Get(10), TaskItem(RECURSIVE, 1, true));
+  EXPECT_EQ(order.Get(11), TaskItem(RECURSIVE, 1, false));
+  EXPECT_EQ(order.Get(12), TaskItem(RECURSIVE, 2, true));
+  EXPECT_EQ(order.Get(13), TaskItem(RECURSIVE, 2, false));
+}
+
+namespace {
+
+void RecursiveSlowFunc(TaskList* order,
+                       int cookie,
+                       int depth,
+                       bool is_reentrant) {
+  RecursiveFunc(order, cookie, depth, is_reentrant);
+  PlatformThread::Sleep(TimeDelta::FromMilliseconds(10));
+}
+
+void OrderedFunc(TaskList* order, int cookie) {
+  order->RecordStart(ORDERED, cookie);
+  order->RecordEnd(ORDERED, cookie);
+}
+
+}  // namespace
+
+TEST_P(MessageLoopTypedTest, RecursiveDenial3) {
+  MessageLoop loop(GetMessageLoopType());
+
+  EXPECT_TRUE(MessageLoopCurrent::Get()->NestableTasksAllowed());
+  TaskList order;
+  ThreadTaskRunnerHandle::Get()->PostTask(
+      FROM_HERE, BindOnce(&RecursiveSlowFunc, &order, 1, 2, false));
+  ThreadTaskRunnerHandle::Get()->PostTask(
+      FROM_HERE, BindOnce(&RecursiveSlowFunc, &order, 2, 2, false));
+  ThreadTaskRunnerHandle::Get()->PostDelayedTask(
+      FROM_HERE, BindOnce(&OrderedFunc, &order, 3),
+      TimeDelta::FromMilliseconds(5));
+  ThreadTaskRunnerHandle::Get()->PostDelayedTask(
+      FROM_HERE, BindOnce(&QuitFunc, &order, 4),
+      TimeDelta::FromMilliseconds(5));
+
+  RunLoop().Run();
+
+  // FIFO order.
+  ASSERT_EQ(16U, order.Size());
+  EXPECT_EQ(order.Get(0), TaskItem(RECURSIVE, 1, true));
+  EXPECT_EQ(order.Get(1), TaskItem(RECURSIVE, 1, false));
+  EXPECT_EQ(order.Get(2), TaskItem(RECURSIVE, 2, true));
+  EXPECT_EQ(order.Get(3), TaskItem(RECURSIVE, 2, false));
+  EXPECT_EQ(order.Get(4), TaskItem(RECURSIVE, 1, true));
+  EXPECT_EQ(order.Get(5), TaskItem(RECURSIVE, 1, false));
+  EXPECT_EQ(order.Get(6), TaskItem(ORDERED, 3, true));
+  EXPECT_EQ(order.Get(7), TaskItem(ORDERED, 3, false));
+  EXPECT_EQ(order.Get(8), TaskItem(RECURSIVE, 2, true));
+  EXPECT_EQ(order.Get(9), TaskItem(RECURSIVE, 2, false));
+  EXPECT_EQ(order.Get(10), TaskItem(QUITMESSAGELOOP, 4, true));
+  EXPECT_EQ(order.Get(11), TaskItem(QUITMESSAGELOOP, 4, false));
+  EXPECT_EQ(order.Get(12), TaskItem(RECURSIVE, 1, true));
+  EXPECT_EQ(order.Get(13), TaskItem(RECURSIVE, 1, false));
+  EXPECT_EQ(order.Get(14), TaskItem(RECURSIVE, 2, true));
+  EXPECT_EQ(order.Get(15), TaskItem(RECURSIVE, 2, false));
+}
+
+TEST_P(MessageLoopTypedTest, RecursiveSupport1) {
+  MessageLoop loop(GetMessageLoopType());
+
+  TaskList order;
+  ThreadTaskRunnerHandle::Get()->PostTask(
+      FROM_HERE, BindOnce(&RecursiveFunc, &order, 1, 2, true));
+  ThreadTaskRunnerHandle::Get()->PostTask(
+      FROM_HERE, BindOnce(&RecursiveFunc, &order, 2, 2, true));
+  ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
+                                          BindOnce(&QuitFunc, &order, 3));
+
+  RunLoop().Run();
+
+  // FIFO order.
+  ASSERT_EQ(14U, order.Size());
+  EXPECT_EQ(order.Get(0), TaskItem(RECURSIVE, 1, true));
+  EXPECT_EQ(order.Get(1), TaskItem(RECURSIVE, 1, false));
+  EXPECT_EQ(order.Get(2), TaskItem(RECURSIVE, 2, true));
+  EXPECT_EQ(order.Get(3), TaskItem(RECURSIVE, 2, false));
+  EXPECT_EQ(order.Get(4), TaskItem(QUITMESSAGELOOP, 3, true));
+  EXPECT_EQ(order.Get(5), TaskItem(QUITMESSAGELOOP, 3, false));
+  EXPECT_EQ(order.Get(6), TaskItem(RECURSIVE, 1, true));
+  EXPECT_EQ(order.Get(7), TaskItem(RECURSIVE, 1, false));
+  EXPECT_EQ(order.Get(8), TaskItem(RECURSIVE, 2, true));
+  EXPECT_EQ(order.Get(9), TaskItem(RECURSIVE, 2, false));
+  EXPECT_EQ(order.Get(10), TaskItem(RECURSIVE, 1, true));
+  EXPECT_EQ(order.Get(11), TaskItem(RECURSIVE, 1, false));
+  EXPECT_EQ(order.Get(12), TaskItem(RECURSIVE, 2, true));
+  EXPECT_EQ(order.Get(13), TaskItem(RECURSIVE, 2, false));
+}
+
+// Tests that non nestable tasks run in FIFO if there are no nested loops.
+TEST_P(MessageLoopTypedTest, NonNestableWithNoNesting) {
+  MessageLoop loop(GetMessageLoopType());
+
+  TaskList order;
+
+  ThreadTaskRunnerHandle::Get()->PostNonNestableTask(
+      FROM_HERE, BindOnce(&OrderedFunc, &order, 1));
+  ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
+                                          BindOnce(&OrderedFunc, &order, 2));
+  ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
+                                          BindOnce(&QuitFunc, &order, 3));
+  RunLoop().Run();
+
+  // FIFO order.
+  ASSERT_EQ(6U, order.Size());
+  EXPECT_EQ(order.Get(0), TaskItem(ORDERED, 1, true));
+  EXPECT_EQ(order.Get(1), TaskItem(ORDERED, 1, false));
+  EXPECT_EQ(order.Get(2), TaskItem(ORDERED, 2, true));
+  EXPECT_EQ(order.Get(3), TaskItem(ORDERED, 2, false));
+  EXPECT_EQ(order.Get(4), TaskItem(QUITMESSAGELOOP, 3, true));
+  EXPECT_EQ(order.Get(5), TaskItem(QUITMESSAGELOOP, 3, false));
+}
+
+namespace {
+
+void FuncThatPumps(TaskList* order, int cookie) {
+  order->RecordStart(PUMPS, cookie);
+  RunLoop(RunLoop::Type::kNestableTasksAllowed).RunUntilIdle();
+  order->RecordEnd(PUMPS, cookie);
+}
+
+void SleepFunc(TaskList* order, int cookie, TimeDelta delay) {
+  order->RecordStart(SLEEP, cookie);
+  PlatformThread::Sleep(delay);
+  order->RecordEnd(SLEEP, cookie);
+}
+
+}  // namespace
+
+// Tests that non nestable tasks don't run when there's code in the call stack.
+TEST_P(MessageLoopTypedTest, NonNestableDelayedInNestedLoop) {
+  MessageLoop loop(GetMessageLoopType());
+
+  TaskList order;
+
+  ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
+                                          BindOnce(&FuncThatPumps, &order, 1));
+  ThreadTaskRunnerHandle::Get()->PostNonNestableTask(
+      FROM_HERE, BindOnce(&OrderedFunc, &order, 2));
+  ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
+                                          BindOnce(&OrderedFunc, &order, 3));
+  ThreadTaskRunnerHandle::Get()->PostTask(
+      FROM_HERE,
+      BindOnce(&SleepFunc, &order, 4, TimeDelta::FromMilliseconds(50)));
+  ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
+                                          BindOnce(&OrderedFunc, &order, 5));
+  ThreadTaskRunnerHandle::Get()->PostNonNestableTask(
+      FROM_HERE, BindOnce(&QuitFunc, &order, 6));
+
+  RunLoop().Run();
+
+  // FIFO order.
+  ASSERT_EQ(12U, order.Size());
+  EXPECT_EQ(order.Get(0), TaskItem(PUMPS, 1, true));
+  EXPECT_EQ(order.Get(1), TaskItem(ORDERED, 3, true));
+  EXPECT_EQ(order.Get(2), TaskItem(ORDERED, 3, false));
+  EXPECT_EQ(order.Get(3), TaskItem(SLEEP, 4, true));
+  EXPECT_EQ(order.Get(4), TaskItem(SLEEP, 4, false));
+  EXPECT_EQ(order.Get(5), TaskItem(ORDERED, 5, true));
+  EXPECT_EQ(order.Get(6), TaskItem(ORDERED, 5, false));
+  EXPECT_EQ(order.Get(7), TaskItem(PUMPS, 1, false));
+  EXPECT_EQ(order.Get(8), TaskItem(ORDERED, 2, true));
+  EXPECT_EQ(order.Get(9), TaskItem(ORDERED, 2, false));
+  EXPECT_EQ(order.Get(10), TaskItem(QUITMESSAGELOOP, 6, true));
+  EXPECT_EQ(order.Get(11), TaskItem(QUITMESSAGELOOP, 6, false));
+}
+
+namespace {
+
+void FuncThatRuns(TaskList* order, int cookie, RunLoop* run_loop) {
+  order->RecordStart(RUNS, cookie);
+  {
+    MessageLoopCurrent::ScopedNestableTaskAllower allow;
+    run_loop->Run();
+  }
+  order->RecordEnd(RUNS, cookie);
+}
+
+void FuncThatQuitsNow() {
+  base::RunLoop::QuitCurrentDeprecated();
+}
+
+}  // namespace
+
+// Tests RunLoopQuit only quits the corresponding MessageLoop::Run.
+TEST_P(MessageLoopTypedTest, QuitNow) {
+  MessageLoop loop(GetMessageLoopType());
+
+  TaskList order;
+
+  RunLoop run_loop;
+
+  ThreadTaskRunnerHandle::Get()->PostTask(
+      FROM_HERE, BindOnce(&FuncThatRuns, &order, 1, Unretained(&run_loop)));
+  ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
+                                          BindOnce(&OrderedFunc, &order, 2));
+  ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
+                                          BindOnce(&FuncThatQuitsNow));
+  ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
+                                          BindOnce(&OrderedFunc, &order, 3));
+  ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
+                                          BindOnce(&FuncThatQuitsNow));
+  ThreadTaskRunnerHandle::Get()->PostTask(
+      FROM_HERE, BindOnce(&OrderedFunc, &order, 4));  // never runs
+
+  RunLoop().Run();
+
+  ASSERT_EQ(6U, order.Size());
+  int task_index = 0;
+  EXPECT_EQ(order.Get(task_index++), TaskItem(RUNS, 1, true));
+  EXPECT_EQ(order.Get(task_index++), TaskItem(ORDERED, 2, true));
+  EXPECT_EQ(order.Get(task_index++), TaskItem(ORDERED, 2, false));
+  EXPECT_EQ(order.Get(task_index++), TaskItem(RUNS, 1, false));
+  EXPECT_EQ(order.Get(task_index++), TaskItem(ORDERED, 3, true));
+  EXPECT_EQ(order.Get(task_index++), TaskItem(ORDERED, 3, false));
+  EXPECT_EQ(static_cast<size_t>(task_index), order.Size());
+}
+
+// Tests RunLoopQuit only quits the corresponding MessageLoop::Run.
+TEST_P(MessageLoopTypedTest, RunLoopQuitTop) {
+  MessageLoop loop(GetMessageLoopType());
+
+  TaskList order;
+
+  RunLoop outer_run_loop;
+  RunLoop nested_run_loop;
+
+  ThreadTaskRunnerHandle::Get()->PostTask(
+      FROM_HERE,
+      BindOnce(&FuncThatRuns, &order, 1, Unretained(&nested_run_loop)));
+  ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
+                                          outer_run_loop.QuitClosure());
+  ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
+                                          BindOnce(&OrderedFunc, &order, 2));
+  ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
+                                          nested_run_loop.QuitClosure());
+
+  outer_run_loop.Run();
+
+  ASSERT_EQ(4U, order.Size());
+  int task_index = 0;
+  EXPECT_EQ(order.Get(task_index++), TaskItem(RUNS, 1, true));
+  EXPECT_EQ(order.Get(task_index++), TaskItem(ORDERED, 2, true));
+  EXPECT_EQ(order.Get(task_index++), TaskItem(ORDERED, 2, false));
+  EXPECT_EQ(order.Get(task_index++), TaskItem(RUNS, 1, false));
+  EXPECT_EQ(static_cast<size_t>(task_index), order.Size());
+}
+
+// Tests RunLoopQuit only quits the corresponding MessageLoop::Run.
+TEST_P(MessageLoopTypedTest, RunLoopQuitNested) {
+  MessageLoop loop(GetMessageLoopType());
+
+  TaskList order;
+
+  RunLoop outer_run_loop;
+  RunLoop nested_run_loop;
+
+  ThreadTaskRunnerHandle::Get()->PostTask(
+      FROM_HERE,
+      BindOnce(&FuncThatRuns, &order, 1, Unretained(&nested_run_loop)));
+  ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
+                                          nested_run_loop.QuitClosure());
+  ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
+                                          BindOnce(&OrderedFunc, &order, 2));
+  ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
+                                          outer_run_loop.QuitClosure());
+
+  outer_run_loop.Run();
+
+  ASSERT_EQ(4U, order.Size());
+  int task_index = 0;
+  EXPECT_EQ(order.Get(task_index++), TaskItem(RUNS, 1, true));
+  EXPECT_EQ(order.Get(task_index++), TaskItem(RUNS, 1, false));
+  EXPECT_EQ(order.Get(task_index++), TaskItem(ORDERED, 2, true));
+  EXPECT_EQ(order.Get(task_index++), TaskItem(ORDERED, 2, false));
+  EXPECT_EQ(static_cast<size_t>(task_index), order.Size());
+}
+
+// Quits current loop and immediately runs a nested loop.
+void QuitAndRunNestedLoop(TaskList* order,
+                          int cookie,
+                          RunLoop* outer_run_loop,
+                          RunLoop* nested_run_loop) {
+  order->RecordStart(RUNS, cookie);
+  outer_run_loop->Quit();
+  nested_run_loop->Run();
+  order->RecordEnd(RUNS, cookie);
+}
+
+// Test that we can run nested loop after quitting the current one.
+TEST_P(MessageLoopTypedTest, RunLoopNestedAfterQuit) {
+  MessageLoop loop(GetMessageLoopType());
+
+  TaskList order;
+
+  RunLoop outer_run_loop;
+  RunLoop nested_run_loop;
+
+  ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
+                                          nested_run_loop.QuitClosure());
+  ThreadTaskRunnerHandle::Get()->PostTask(
+      FROM_HERE, BindOnce(&QuitAndRunNestedLoop, &order, 1, &outer_run_loop,
+                          &nested_run_loop));
+
+  outer_run_loop.Run();
+
+  ASSERT_EQ(2U, order.Size());
+  int task_index = 0;
+  EXPECT_EQ(order.Get(task_index++), TaskItem(RUNS, 1, true));
+  EXPECT_EQ(order.Get(task_index++), TaskItem(RUNS, 1, false));
+  EXPECT_EQ(static_cast<size_t>(task_index), order.Size());
+}
+
+// Tests RunLoopQuit only quits the corresponding MessageLoop::Run.
+TEST_P(MessageLoopTypedTest, RunLoopQuitBogus) {
+  MessageLoop loop(GetMessageLoopType());
+
+  TaskList order;
+
+  RunLoop outer_run_loop;
+  RunLoop nested_run_loop;
+  RunLoop bogus_run_loop;
+
+  ThreadTaskRunnerHandle::Get()->PostTask(
+      FROM_HERE,
+      BindOnce(&FuncThatRuns, &order, 1, Unretained(&nested_run_loop)));
+  ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
+                                          bogus_run_loop.QuitClosure());
+  ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
+                                          BindOnce(&OrderedFunc, &order, 2));
+  ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
+                                          outer_run_loop.QuitClosure());
+  ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
+                                          nested_run_loop.QuitClosure());
+
+  outer_run_loop.Run();
+
+  ASSERT_EQ(4U, order.Size());
+  int task_index = 0;
+  EXPECT_EQ(order.Get(task_index++), TaskItem(RUNS, 1, true));
+  EXPECT_EQ(order.Get(task_index++), TaskItem(ORDERED, 2, true));
+  EXPECT_EQ(order.Get(task_index++), TaskItem(ORDERED, 2, false));
+  EXPECT_EQ(order.Get(task_index++), TaskItem(RUNS, 1, false));
+  EXPECT_EQ(static_cast<size_t>(task_index), order.Size());
+}
+
+// Tests RunLoopQuit only quits the corresponding MessageLoop::Run.
+TEST_P(MessageLoopTypedTest, RunLoopQuitDeep) {
+  MessageLoop loop(GetMessageLoopType());
+
+  TaskList order;
+
+  RunLoop outer_run_loop;
+  RunLoop nested_loop1;
+  RunLoop nested_loop2;
+  RunLoop nested_loop3;
+  RunLoop nested_loop4;
+
+  ThreadTaskRunnerHandle::Get()->PostTask(
+      FROM_HERE, BindOnce(&FuncThatRuns, &order, 1, Unretained(&nested_loop1)));
+  ThreadTaskRunnerHandle::Get()->PostTask(
+      FROM_HERE, BindOnce(&FuncThatRuns, &order, 2, Unretained(&nested_loop2)));
+  ThreadTaskRunnerHandle::Get()->PostTask(
+      FROM_HERE, BindOnce(&FuncThatRuns, &order, 3, Unretained(&nested_loop3)));
+  ThreadTaskRunnerHandle::Get()->PostTask(
+      FROM_HERE, BindOnce(&FuncThatRuns, &order, 4, Unretained(&nested_loop4)));
+  ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
+                                          BindOnce(&OrderedFunc, &order, 5));
+  ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
+                                          outer_run_loop.QuitClosure());
+  ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
+                                          BindOnce(&OrderedFunc, &order, 6));
+  ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
+                                          nested_loop1.QuitClosure());
+  ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
+                                          BindOnce(&OrderedFunc, &order, 7));
+  ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
+                                          nested_loop2.QuitClosure());
+  ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
+                                          BindOnce(&OrderedFunc, &order, 8));
+  ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
+                                          nested_loop3.QuitClosure());
+  ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
+                                          BindOnce(&OrderedFunc, &order, 9));
+  ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
+                                          nested_loop4.QuitClosure());
+  ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
+                                          BindOnce(&OrderedFunc, &order, 10));
+
+  outer_run_loop.Run();
+
+  ASSERT_EQ(18U, order.Size());
+  int task_index = 0;
+  EXPECT_EQ(order.Get(task_index++), TaskItem(RUNS, 1, true));
+  EXPECT_EQ(order.Get(task_index++), TaskItem(RUNS, 2, true));
+  EXPECT_EQ(order.Get(task_index++), TaskItem(RUNS, 3, true));
+  EXPECT_EQ(order.Get(task_index++), TaskItem(RUNS, 4, true));
+  EXPECT_EQ(order.Get(task_index++), TaskItem(ORDERED, 5, true));
+  EXPECT_EQ(order.Get(task_index++), TaskItem(ORDERED, 5, false));
+  EXPECT_EQ(order.Get(task_index++), TaskItem(ORDERED, 6, true));
+  EXPECT_EQ(order.Get(task_index++), TaskItem(ORDERED, 6, false));
+  EXPECT_EQ(order.Get(task_index++), TaskItem(ORDERED, 7, true));
+  EXPECT_EQ(order.Get(task_index++), TaskItem(ORDERED, 7, false));
+  EXPECT_EQ(order.Get(task_index++), TaskItem(ORDERED, 8, true));
+  EXPECT_EQ(order.Get(task_index++), TaskItem(ORDERED, 8, false));
+  EXPECT_EQ(order.Get(task_index++), TaskItem(ORDERED, 9, true));
+  EXPECT_EQ(order.Get(task_index++), TaskItem(ORDERED, 9, false));
+  EXPECT_EQ(order.Get(task_index++), TaskItem(RUNS, 4, false));
+  EXPECT_EQ(order.Get(task_index++), TaskItem(RUNS, 3, false));
+  EXPECT_EQ(order.Get(task_index++), TaskItem(RUNS, 2, false));
+  EXPECT_EQ(order.Get(task_index++), TaskItem(RUNS, 1, false));
+  EXPECT_EQ(static_cast<size_t>(task_index), order.Size());
+}
+
+// Tests RunLoopQuit works before RunWithID.
+TEST_P(MessageLoopTypedTest, RunLoopQuitOrderBefore) {
+  MessageLoop loop(GetMessageLoopType());
+
+  TaskList order;
+
+  RunLoop run_loop;
+
+  run_loop.Quit();
+
+  ThreadTaskRunnerHandle::Get()->PostTask(
+      FROM_HERE, BindOnce(&OrderedFunc, &order, 1));  // never runs
+  ThreadTaskRunnerHandle::Get()->PostTask(
+      FROM_HERE, BindOnce(&FuncThatQuitsNow));  // never runs
+
+  run_loop.Run();
+
+  ASSERT_EQ(0U, order.Size());
+}
+
+// Tests RunLoopQuit works during RunWithID.
+TEST_P(MessageLoopTypedTest, RunLoopQuitOrderDuring) {
+  MessageLoop loop(GetMessageLoopType());
+
+  TaskList order;
+
+  RunLoop run_loop;
+
+  ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
+                                          BindOnce(&OrderedFunc, &order, 1));
+  ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE, run_loop.QuitClosure());
+  ThreadTaskRunnerHandle::Get()->PostTask(
+      FROM_HERE, BindOnce(&OrderedFunc, &order, 2));  // never runs
+  ThreadTaskRunnerHandle::Get()->PostTask(
+      FROM_HERE, BindOnce(&FuncThatQuitsNow));  // never runs
+
+  run_loop.Run();
+
+  ASSERT_EQ(2U, order.Size());
+  int task_index = 0;
+  EXPECT_EQ(order.Get(task_index++), TaskItem(ORDERED, 1, true));
+  EXPECT_EQ(order.Get(task_index++), TaskItem(ORDERED, 1, false));
+  EXPECT_EQ(static_cast<size_t>(task_index), order.Size());
+}
+
+// Tests RunLoopQuit works after RunWithID.
+TEST_P(MessageLoopTypedTest, RunLoopQuitOrderAfter) {
+  MessageLoop loop(GetMessageLoopType());
+
+  TaskList order;
+
+  RunLoop run_loop;
+
+  ThreadTaskRunnerHandle::Get()->PostTask(
+      FROM_HERE, BindOnce(&FuncThatRuns, &order, 1, Unretained(&run_loop)));
+  ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
+                                          BindOnce(&OrderedFunc, &order, 2));
+  ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
+                                          BindOnce(&FuncThatQuitsNow));
+  ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
+                                          BindOnce(&OrderedFunc, &order, 3));
+  ThreadTaskRunnerHandle::Get()->PostTask(
+      FROM_HERE, run_loop.QuitClosure());  // has no affect
+  ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
+                                          BindOnce(&OrderedFunc, &order, 4));
+  ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
+                                          BindOnce(&FuncThatQuitsNow));
+
+  RunLoop outer_run_loop;
+  outer_run_loop.Run();
+
+  ASSERT_EQ(8U, order.Size());
+  int task_index = 0;
+  EXPECT_EQ(order.Get(task_index++), TaskItem(RUNS, 1, true));
+  EXPECT_EQ(order.Get(task_index++), TaskItem(ORDERED, 2, true));
+  EXPECT_EQ(order.Get(task_index++), TaskItem(ORDERED, 2, false));
+  EXPECT_EQ(order.Get(task_index++), TaskItem(RUNS, 1, false));
+  EXPECT_EQ(order.Get(task_index++), TaskItem(ORDERED, 3, true));
+  EXPECT_EQ(order.Get(task_index++), TaskItem(ORDERED, 3, false));
+  EXPECT_EQ(order.Get(task_index++), TaskItem(ORDERED, 4, true));
+  EXPECT_EQ(order.Get(task_index++), TaskItem(ORDERED, 4, false));
+  EXPECT_EQ(static_cast<size_t>(task_index), order.Size());
+}
+
+// There was a bug in the MessagePumpGLib where posting tasks recursively
+// caused the message loop to hang, due to the buffer of the internal pipe
+// becoming full. Test all MessageLoop types to ensure this issue does not
+// exist in other MessagePumps.
+//
+// On Linux, the pipe buffer size is 64KiB by default. The bug caused one
+// byte accumulated in the pipe per two posts, so we should repeat 128K
+// times to reproduce the bug.
+TEST_P(MessageLoopTypedTest, RecursivePosts) {
+  const int kNumTimes = 1 << 17;
+  MessageLoop loop(GetMessageLoopType());
+  loop.task_runner()->PostTask(FROM_HERE,
+                               BindOnce(&PostNTasksThenQuit, kNumTimes));
+  RunLoop().Run();
+}
+
+TEST_P(MessageLoopTypedTest, NestableTasksAllowedAtTopLevel) {
+  MessageLoop loop(GetMessageLoopType());
+  EXPECT_TRUE(MessageLoopCurrent::Get()->NestableTasksAllowed());
+}
+
+// Nestable tasks shouldn't be allowed to run reentrantly by default (regression
+// test for https://crbug.com/754112).
+TEST_P(MessageLoopTypedTest, NestableTasksDisallowedByDefault) {
+  MessageLoop loop(GetMessageLoopType());
+  RunLoop run_loop;
+  loop.task_runner()->PostTask(
+      FROM_HERE,
+      BindOnce(
+          [](RunLoop* run_loop) {
+            EXPECT_FALSE(MessageLoopCurrent::Get()->NestableTasksAllowed());
+            run_loop->Quit();
+          },
+          Unretained(&run_loop)));
+  run_loop.Run();
+}
+
+TEST_P(MessageLoopTypedTest, NestableTasksProcessedWhenRunLoopAllows) {
+  MessageLoop loop(GetMessageLoopType());
+  RunLoop run_loop;
+  loop.task_runner()->PostTask(
+      FROM_HERE,
+      BindOnce(
+          [](RunLoop* run_loop) {
+            // This test would hang if this RunLoop wasn't of type
+            // kNestableTasksAllowed (i.e. this is testing that this is
+            // processed and doesn't hang).
+            RunLoop nested_run_loop(RunLoop::Type::kNestableTasksAllowed);
+            ThreadTaskRunnerHandle::Get()->PostTask(
+                FROM_HERE,
+                BindOnce(
+                    [](RunLoop* nested_run_loop) {
+                      // Each additional layer of application task nesting
+                      // requires its own allowance. The kNestableTasksAllowed
+                      // RunLoop allowed this task to be processed but further
+                      // nestable tasks are by default disallowed from this
+                      // layer.
+                      EXPECT_FALSE(
+                          MessageLoopCurrent::Get()->NestableTasksAllowed());
+                      nested_run_loop->Quit();
+                    },
+                    Unretained(&nested_run_loop)));
+            nested_run_loop.Run();
+
+            run_loop->Quit();
+          },
+          Unretained(&run_loop)));
+  run_loop.Run();
+}
+
+TEST_P(MessageLoopTypedTest, NestableTasksAllowedExplicitlyInScope) {
+  MessageLoop loop(GetMessageLoopType());
+  RunLoop run_loop;
+  loop.task_runner()->PostTask(
+      FROM_HERE,
+      BindOnce(
+          [](RunLoop* run_loop) {
+            {
+              MessageLoopCurrent::ScopedNestableTaskAllower
+                  allow_nestable_tasks;
+              EXPECT_TRUE(MessageLoopCurrent::Get()->NestableTasksAllowed());
+            }
+            EXPECT_FALSE(MessageLoopCurrent::Get()->NestableTasksAllowed());
+            run_loop->Quit();
+          },
+          Unretained(&run_loop)));
+  run_loop.Run();
+}
+
+TEST_P(MessageLoopTypedTest, NestableTasksAllowedManually) {
+  MessageLoop loop(GetMessageLoopType());
+  RunLoop run_loop;
+  loop.task_runner()->PostTask(
+      FROM_HERE,
+      BindOnce(
+          [](RunLoop* run_loop) {
+            EXPECT_FALSE(MessageLoopCurrent::Get()->NestableTasksAllowed());
+            MessageLoopCurrent::Get()->SetNestableTasksAllowed(true);
+            EXPECT_TRUE(MessageLoopCurrent::Get()->NestableTasksAllowed());
+            MessageLoopCurrent::Get()->SetNestableTasksAllowed(false);
+            EXPECT_FALSE(MessageLoopCurrent::Get()->NestableTasksAllowed());
+            run_loop->Quit();
+          },
+          Unretained(&run_loop)));
+  run_loop.Run();
+}
+
+INSTANTIATE_TEST_CASE_P(
+    ,
+    MessageLoopTypedTest,
+    ::testing::Values(MessageLoopTypedTestParams(
+                          MessageLoop::TYPE_DEFAULT,
+                          TaskSchedulerAvailability::NO_TASK_SCHEDULER),
+                      MessageLoopTypedTestParams(
+                          MessageLoop::TYPE_IO,
+                          TaskSchedulerAvailability::NO_TASK_SCHEDULER),
+                      MessageLoopTypedTestParams(
+                          MessageLoop::TYPE_UI,
+                          TaskSchedulerAvailability::NO_TASK_SCHEDULER),
+                      MessageLoopTypedTestParams(
+                          MessageLoop::TYPE_DEFAULT,
+                          TaskSchedulerAvailability::WITH_TASK_SCHEDULER),
+                      MessageLoopTypedTestParams(
+                          MessageLoop::TYPE_IO,
+                          TaskSchedulerAvailability::WITH_TASK_SCHEDULER),
+                      MessageLoopTypedTestParams(
+                          MessageLoop::TYPE_UI,
+                          TaskSchedulerAvailability::WITH_TASK_SCHEDULER)),
+    MessageLoopTypedTest::ParamInfoToString);
+
+#if defined(OS_WIN)
+// Verifies that the MessageLoop ignores WM_QUIT, rather than quitting.
+// Users of MessageLoop typically expect to control when their RunLoops stop
+// Run()ning explicitly, via QuitClosure() etc (see https://crbug.com/720078)
+TEST_P(MessageLoopTest, WmQuitIsIgnored) {
+  MessageLoop loop(MessageLoop::TYPE_UI);
+  RunLoop run_loop;
+  // Post a WM_QUIT message to the current thread.
+  ::PostQuitMessage(0);
+
+  // Post a task to the current thread, with a small delay to make it less
+  // likely that we process the posted task before looking for WM_* messages.
+  bool task_was_run = false;
+  loop.task_runner()->PostDelayedTask(
+      FROM_HERE,
+      BindOnce(
+          [](bool* flag, OnceClosure closure) {
+            *flag = true;
+            std::move(closure).Run();
+          },
+          &task_was_run, run_loop.QuitClosure()),
+      TestTimeouts::tiny_timeout());
+
+  // Run the loop, and ensure that the posted task is processed before we quit.
+  run_loop.Run();
+  EXPECT_TRUE(task_was_run);
+}
+
+TEST_P(MessageLoopTest, PostDelayedTask_SharedTimer_SubPump) {
+  RunTest_PostDelayedTask_SharedTimer_SubPump();
+}
+
+// This test occasionally hangs. See http://crbug.com/44567.
+TEST_P(MessageLoopTest, DISABLED_RecursiveDenial2) {
+  RunTest_RecursiveDenial2(MessageLoop::TYPE_DEFAULT);
+  RunTest_RecursiveDenial2(MessageLoop::TYPE_UI);
+  RunTest_RecursiveDenial2(MessageLoop::TYPE_IO);
+}
+
+TEST_P(MessageLoopTest, RecursiveSupport2) {
+  // This test requires a UI loop.
+  RunTest_RecursiveSupport2(MessageLoop::TYPE_UI);
+}
+#endif  // defined(OS_WIN)
+
+TEST_P(MessageLoopTest, TaskObserver) {
+  const int kNumPosts = 6;
+  DummyTaskObserver observer(kNumPosts);
+
+  MessageLoop loop;
+  loop.AddTaskObserver(&observer);
+  loop.task_runner()->PostTask(FROM_HERE,
+                               BindOnce(&PostNTasksThenQuit, kNumPosts));
+  RunLoop().Run();
+  loop.RemoveTaskObserver(&observer);
+
+  EXPECT_EQ(kNumPosts, observer.num_tasks_started());
+  EXPECT_EQ(kNumPosts, observer.num_tasks_processed());
+}
+
+#if defined(OS_WIN)
+TEST_P(MessageLoopTest, IOHandler) {
+  RunTest_IOHandler();
+}
+
+TEST_P(MessageLoopTest, WaitForIO) {
+  RunTest_WaitForIO();
+}
+
+TEST_P(MessageLoopTest, HighResolutionTimer) {
+  MessageLoop message_loop;
+  Time::EnableHighResolutionTimer(true);
+
+  constexpr TimeDelta kFastTimer = TimeDelta::FromMilliseconds(5);
+  constexpr TimeDelta kSlowTimer = TimeDelta::FromMilliseconds(100);
+
+  {
+    // Post a fast task to enable the high resolution timers.
+    RunLoop run_loop;
+    message_loop.task_runner()->PostDelayedTask(
+        FROM_HERE,
+        BindOnce(
+            [](RunLoop* run_loop) {
+              EXPECT_TRUE(Time::IsHighResolutionTimerInUse());
+              run_loop->QuitWhenIdle();
+            },
+            &run_loop),
+        kFastTimer);
+    run_loop.Run();
+  }
+  EXPECT_FALSE(Time::IsHighResolutionTimerInUse());
+  {
+    // Check that a slow task does not trigger the high resolution logic.
+    RunLoop run_loop;
+    message_loop.task_runner()->PostDelayedTask(
+        FROM_HERE,
+        BindOnce(
+            [](RunLoop* run_loop) {
+              EXPECT_FALSE(Time::IsHighResolutionTimerInUse());
+              run_loop->QuitWhenIdle();
+            },
+            &run_loop),
+        kSlowTimer);
+    run_loop.Run();
+  }
+  Time::EnableHighResolutionTimer(false);
+  Time::ResetHighResolutionTimerUsage();
+}
+
+#endif  // defined(OS_WIN)
+
+namespace {
+// Inject a test point for recording the destructor calls for Closure objects
+// send to MessageLoop::PostTask(). It is awkward usage since we are trying to
+// hook the actual destruction, which is not a common operation.
+class DestructionObserverProbe :
+  public RefCounted<DestructionObserverProbe> {
+ public:
+  DestructionObserverProbe(bool* task_destroyed,
+                           bool* destruction_observer_called)
+      : task_destroyed_(task_destroyed),
+        destruction_observer_called_(destruction_observer_called) {
+  }
+  virtual void Run() {
+    // This task should never run.
+    ADD_FAILURE();
+  }
+ private:
+  friend class RefCounted<DestructionObserverProbe>;
+
+  virtual ~DestructionObserverProbe() {
+    EXPECT_FALSE(*destruction_observer_called_);
+    *task_destroyed_ = true;
+  }
+
+  bool* task_destroyed_;
+  bool* destruction_observer_called_;
+};
+
+class MLDestructionObserver : public MessageLoopCurrent::DestructionObserver {
+ public:
+  MLDestructionObserver(bool* task_destroyed, bool* destruction_observer_called)
+      : task_destroyed_(task_destroyed),
+        destruction_observer_called_(destruction_observer_called),
+        task_destroyed_before_message_loop_(false) {
+  }
+  void WillDestroyCurrentMessageLoop() override {
+    task_destroyed_before_message_loop_ = *task_destroyed_;
+    *destruction_observer_called_ = true;
+  }
+  bool task_destroyed_before_message_loop() const {
+    return task_destroyed_before_message_loop_;
+  }
+ private:
+  bool* task_destroyed_;
+  bool* destruction_observer_called_;
+  bool task_destroyed_before_message_loop_;
+};
+
+}  // namespace
+
+TEST_P(MessageLoopTest, DestructionObserverTest) {
+  // Verify that the destruction observer gets called at the very end (after
+  // all the pending tasks have been destroyed).
+  MessageLoop* loop = new MessageLoop;
+  const TimeDelta kDelay = TimeDelta::FromMilliseconds(100);
+
+  bool task_destroyed = false;
+  bool destruction_observer_called = false;
+
+  MLDestructionObserver observer(&task_destroyed, &destruction_observer_called);
+  loop->AddDestructionObserver(&observer);
+  loop->task_runner()->PostDelayedTask(
+      FROM_HERE,
+      BindOnce(&DestructionObserverProbe::Run,
+               new DestructionObserverProbe(&task_destroyed,
+                                            &destruction_observer_called)),
+      kDelay);
+  delete loop;
+  EXPECT_TRUE(observer.task_destroyed_before_message_loop());
+  // The task should have been destroyed when we deleted the loop.
+  EXPECT_TRUE(task_destroyed);
+  EXPECT_TRUE(destruction_observer_called);
+}
+
+
+// Verify that MessageLoop sets ThreadMainTaskRunner::current() and it
+// posts tasks on that message loop.
+TEST_P(MessageLoopTest, ThreadMainTaskRunner) {
+  MessageLoop loop;
+
+  scoped_refptr<Foo> foo(new Foo());
+  std::string a("a");
+  ThreadTaskRunnerHandle::Get()->PostTask(
+      FROM_HERE, BindOnce(&Foo::Test1ConstRef, foo, a));
+
+  // Post quit task;
+  ThreadTaskRunnerHandle::Get()->PostTask(
+      FROM_HERE, BindOnce(&RunLoop::QuitCurrentWhenIdleDeprecated));
+
+  // Now kick things off
+  RunLoop().Run();
+
+  EXPECT_EQ(foo->test_count(), 1);
+  EXPECT_EQ(foo->result(), "a");
+}
+
+TEST_P(MessageLoopTest, IsType) {
+  MessageLoop loop(MessageLoop::TYPE_UI);
+  EXPECT_TRUE(loop.IsType(MessageLoop::TYPE_UI));
+  EXPECT_FALSE(loop.IsType(MessageLoop::TYPE_IO));
+  EXPECT_FALSE(loop.IsType(MessageLoop::TYPE_DEFAULT));
+}
+
+#if defined(OS_WIN)
+void EmptyFunction() {}
+
+void PostMultipleTasks() {
+  ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
+                                          base::BindOnce(&EmptyFunction));
+  ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
+                                          base::BindOnce(&EmptyFunction));
+}
+
+static const int kSignalMsg = WM_USER + 2;
+
+void PostWindowsMessage(HWND message_hwnd) {
+  PostMessage(message_hwnd, kSignalMsg, 0, 2);
+}
+
+void EndTest(bool* did_run, HWND hwnd) {
+  *did_run = true;
+  PostMessage(hwnd, WM_CLOSE, 0, 0);
+}
+
+int kMyMessageFilterCode = 0x5002;
+
+LRESULT CALLBACK TestWndProcThunk(HWND hwnd, UINT message,
+                                  WPARAM wparam, LPARAM lparam) {
+  if (message == WM_CLOSE)
+    EXPECT_TRUE(DestroyWindow(hwnd));
+  if (message != kSignalMsg)
+    return DefWindowProc(hwnd, message, wparam, lparam);
+
+  switch (lparam) {
+  case 1:
+    // First, we post a task that will post multiple no-op tasks to make sure
+    // that the pump's incoming task queue does not become empty during the
+    // test.
+    ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
+                                            base::BindOnce(&PostMultipleTasks));
+    // Next, we post a task that posts a windows message to trigger the second
+    // stage of the test.
+    ThreadTaskRunnerHandle::Get()->PostTask(
+        FROM_HERE, base::BindOnce(&PostWindowsMessage, hwnd));
+    break;
+  case 2:
+    // Since we're about to enter a modal loop, tell the message loop that we
+    // intend to nest tasks.
+    MessageLoopCurrent::Get()->SetNestableTasksAllowed(true);
+    bool did_run = false;
+    ThreadTaskRunnerHandle::Get()->PostTask(
+        FROM_HERE, base::BindOnce(&EndTest, &did_run, hwnd));
+    // Run a nested windows-style message loop and verify that our task runs. If
+    // it doesn't, then we'll loop here until the test times out.
+    MSG msg;
+    while (GetMessage(&msg, 0, 0, 0)) {
+      if (!CallMsgFilter(&msg, kMyMessageFilterCode))
+        DispatchMessage(&msg);
+      // If this message is a WM_CLOSE, explicitly exit the modal loop. Posting
+      // a WM_QUIT should handle this, but unfortunately MessagePumpWin eats
+      // WM_QUIT messages even when running inside a modal loop.
+      if (msg.message == WM_CLOSE)
+        break;
+    }
+    EXPECT_TRUE(did_run);
+    RunLoop::QuitCurrentWhenIdleDeprecated();
+    break;
+  }
+  return 0;
+}
+
+TEST_P(MessageLoopTest, AlwaysHaveUserMessageWhenNesting) {
+  MessageLoop loop(MessageLoop::TYPE_UI);
+  HINSTANCE instance = CURRENT_MODULE();
+  WNDCLASSEX wc = {0};
+  wc.cbSize = sizeof(wc);
+  wc.lpfnWndProc = TestWndProcThunk;
+  wc.hInstance = instance;
+  wc.lpszClassName = L"MessageLoopTest_HWND";
+  ATOM atom = RegisterClassEx(&wc);
+  ASSERT_TRUE(atom);
+
+  HWND message_hwnd = CreateWindow(MAKEINTATOM(atom), 0, 0, 0, 0, 0, 0,
+                                   HWND_MESSAGE, 0, instance, 0);
+  ASSERT_TRUE(message_hwnd) << GetLastError();
+
+  ASSERT_TRUE(PostMessage(message_hwnd, kSignalMsg, 0, 1));
+
+  RunLoop().Run();
+
+  ASSERT_TRUE(UnregisterClass(MAKEINTATOM(atom), instance));
+}
+#endif  // defined(OS_WIN)
+
+TEST_P(MessageLoopTest, SetTaskRunner) {
+  MessageLoop loop;
+  scoped_refptr<SingleThreadTaskRunner> new_runner(new TestSimpleTaskRunner());
+
+  loop.SetTaskRunner(new_runner);
+  EXPECT_EQ(new_runner, loop.task_runner());
+  EXPECT_EQ(new_runner, ThreadTaskRunnerHandle::Get());
+}
+
+TEST_P(MessageLoopTest, OriginalRunnerWorks) {
+  MessageLoop loop;
+  scoped_refptr<SingleThreadTaskRunner> new_runner(new TestSimpleTaskRunner());
+  scoped_refptr<SingleThreadTaskRunner> original_runner(loop.task_runner());
+  loop.SetTaskRunner(new_runner);
+
+  scoped_refptr<Foo> foo(new Foo());
+  original_runner->PostTask(FROM_HERE, BindOnce(&Foo::Test1ConstRef, foo, "a"));
+  RunLoop().RunUntilIdle();
+  EXPECT_EQ(1, foo->test_count());
+}
+
+TEST_P(MessageLoopTest, DeleteUnboundLoop) {
+  // It should be possible to delete an unbound message loop on a thread which
+  // already has another active loop. This happens when thread creation fails.
+  MessageLoop loop;
+  std::unique_ptr<MessageLoop> unbound_loop(MessageLoop::CreateUnbound(
+      MessageLoop::TYPE_DEFAULT, MessageLoop::MessagePumpFactoryCallback()));
+  unbound_loop.reset();
+  EXPECT_EQ(&loop, MessageLoop::current());
+  EXPECT_EQ(loop.task_runner(), ThreadTaskRunnerHandle::Get());
+}
+
+TEST_P(MessageLoopTest, ThreadName) {
+  {
+    std::string kThreadName("foo");
+    MessageLoop loop;
+    PlatformThread::SetName(kThreadName);
+    EXPECT_EQ(kThreadName, loop.GetThreadName());
+  }
+
+  {
+    std::string kThreadName("bar");
+    base::Thread thread(kThreadName);
+    ASSERT_TRUE(thread.StartAndWaitForTesting());
+    EXPECT_EQ(kThreadName, thread.message_loop()->GetThreadName());
+  }
+}
+
+// Verify that tasks posted to and code running in the scope of the same
+// MessageLoop access the same SequenceLocalStorage values.
+TEST_P(MessageLoopTest, SequenceLocalStorageSetGet) {
+  MessageLoop loop;
+
+  SequenceLocalStorageSlot<int> slot;
+
+  ThreadTaskRunnerHandle::Get()->PostTask(
+      FROM_HERE,
+      BindOnce(&SequenceLocalStorageSlot<int>::Set, Unretained(&slot), 11));
+
+  ThreadTaskRunnerHandle::Get()->PostTask(
+      FROM_HERE, BindOnce(
+                     [](SequenceLocalStorageSlot<int>* slot) {
+                       EXPECT_EQ(slot->Get(), 11);
+                     },
+                     &slot));
+
+  RunLoop().RunUntilIdle();
+  EXPECT_EQ(slot.Get(), 11);
+}
+
+// Verify that tasks posted to and code running in different MessageLoops access
+// different SequenceLocalStorage values.
+TEST_P(MessageLoopTest, SequenceLocalStorageDifferentMessageLoops) {
+  SequenceLocalStorageSlot<int> slot;
+
+  {
+    MessageLoop loop;
+    ThreadTaskRunnerHandle::Get()->PostTask(
+        FROM_HERE,
+        BindOnce(&SequenceLocalStorageSlot<int>::Set, Unretained(&slot), 11));
+
+    RunLoop().RunUntilIdle();
+    EXPECT_EQ(slot.Get(), 11);
+  }
+
+  MessageLoop loop;
+  ThreadTaskRunnerHandle::Get()->PostTask(
+      FROM_HERE, BindOnce(
+                     [](SequenceLocalStorageSlot<int>* slot) {
+                       EXPECT_NE(slot->Get(), 11);
+                     },
+                     &slot));
+
+  RunLoop().RunUntilIdle();
+  EXPECT_NE(slot.Get(), 11);
+}
+
+INSTANTIATE_TEST_CASE_P(
+    ,
+    MessageLoopTest,
+    ::testing::Values(TaskSchedulerAvailability::NO_TASK_SCHEDULER,
+                      TaskSchedulerAvailability::WITH_TASK_SCHEDULER),
+    MessageLoopTest::ParamInfoToString);
+
+}  // namespace base
diff --git a/base/message_loop/message_pump.cc b/base/message_loop/message_pump.cc
new file mode 100644
index 0000000..9076176
--- /dev/null
+++ b/base/message_loop/message_pump.cc
@@ -0,0 +1,16 @@
+// Copyright (c) 2010 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/message_loop/message_pump.h"
+
+namespace base {
+
+MessagePump::MessagePump() = default;
+
+MessagePump::~MessagePump() = default;
+
+void MessagePump::SetTimerSlack(TimerSlack) {
+}
+
+}  // namespace base
diff --git a/base/message_loop/message_pump.h b/base/message_loop/message_pump.h
new file mode 100644
index 0000000..dec0c94
--- /dev/null
+++ b/base/message_loop/message_pump.h
@@ -0,0 +1,131 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_MESSAGE_LOOP_MESSAGE_PUMP_H_
+#define BASE_MESSAGE_LOOP_MESSAGE_PUMP_H_
+
+#include "base/base_export.h"
+#include "base/message_loop/timer_slack.h"
+#include "base/sequence_checker.h"
+
+namespace base {
+
+class TimeTicks;
+
+class BASE_EXPORT MessagePump {
+ public:
+  // Please see the comments above the Run method for an illustration of how
+  // these delegate methods are used.
+  class BASE_EXPORT Delegate {
+   public:
+    virtual ~Delegate() = default;
+
+    // Called from within Run in response to ScheduleWork or when the message
+    // pump would otherwise call DoDelayedWork.  Returns true to indicate that
+    // work was done.  DoDelayedWork will still be called if DoWork returns
+    // true, but DoIdleWork will not.
+    virtual bool DoWork() = 0;
+
+    // Called from within Run in response to ScheduleDelayedWork or when the
+    // message pump would otherwise sleep waiting for more work.  Returns true
+    // to indicate that delayed work was done.  DoIdleWork will not be called
+    // if DoDelayedWork returns true.  Upon return |next_delayed_work_time|
+    // indicates the time when DoDelayedWork should be called again.  If
+    // |next_delayed_work_time| is null (per Time::is_null), then the queue of
+    // future delayed work (timer events) is currently empty, and no additional
+    // calls to this function need to be scheduled.
+    virtual bool DoDelayedWork(TimeTicks* next_delayed_work_time) = 0;
+
+    // Called from within Run just before the message pump goes to sleep.
+    // Returns true to indicate that idle work was done. Returning false means
+    // the pump will now wait.
+    virtual bool DoIdleWork() = 0;
+  };
+
+  MessagePump();
+  virtual ~MessagePump();
+
+  // The Run method is called to enter the message pump's run loop.
+  //
+  // Within the method, the message pump is responsible for processing native
+  // messages as well as for giving cycles to the delegate periodically.  The
+  // message pump should take care to mix delegate callbacks with native
+  // message processing so neither type of event starves the other of cycles.
+  //
+  // The anatomy of a typical run loop:
+  //
+  //   for (;;) {
+  //     bool did_work = DoInternalWork();
+  //     if (should_quit_)
+  //       break;
+  //
+  //     did_work |= delegate_->DoWork();
+  //     if (should_quit_)
+  //       break;
+  //
+  //     TimeTicks next_time;
+  //     did_work |= delegate_->DoDelayedWork(&next_time);
+  //     if (should_quit_)
+  //       break;
+  //
+  //     if (did_work)
+  //       continue;
+  //
+  //     did_work = delegate_->DoIdleWork();
+  //     if (should_quit_)
+  //       break;
+  //
+  //     if (did_work)
+  //       continue;
+  //
+  //     WaitForWork();
+  //   }
+  //
+  // Here, DoInternalWork is some private method of the message pump that is
+  // responsible for dispatching the next UI message or notifying the next IO
+  // completion (for example).  WaitForWork is a private method that simply
+  // blocks until there is more work of any type to do.
+  //
+  // Notice that the run loop cycles between calling DoInternalWork, DoWork,
+  // and DoDelayedWork methods.  This helps ensure that none of these work
+  // queues starve the others.  This is important for message pumps that are
+  // used to drive animations, for example.
+  //
+  // Notice also that after each callout to foreign code, the run loop checks
+  // to see if it should quit.  The Quit method is responsible for setting this
+  // flag.  No further work is done once the quit flag is set.
+  //
+  // NOTE: Care must be taken to handle Run being called again from within any
+  // of the callouts to foreign code.  Native message pumps may also need to
+  // deal with other native message pumps being run outside their control
+  // (e.g., the MessageBox API on Windows pumps UI messages!).  To be specific,
+  // the callouts (DoWork and DoDelayedWork) MUST still be provided even in
+  // nested sub-loops that are "seemingly" outside the control of this message
+  // pump.  DoWork in particular must never be starved for time slices unless
+  // it returns false (meaning it has run out of things to do).
+  //
+  virtual void Run(Delegate* delegate) = 0;
+
+  // Quit immediately from the most recently entered run loop.  This method may
+  // only be used on the thread that called Run.
+  virtual void Quit() = 0;
+
+  // Schedule a DoWork callback to happen reasonably soon.  Does nothing if a
+  // DoWork callback is already scheduled.  This method may be called from any
+  // thread.  Once this call is made, DoWork should not be "starved" at least
+  // until it returns a value of false.
+  virtual void ScheduleWork() = 0;
+
+  // Schedule a DoDelayedWork callback to happen at the specified time,
+  // cancelling any pending DoDelayedWork callback.  This method may only be
+  // used on the thread that called Run.
+  virtual void ScheduleDelayedWork(const TimeTicks& delayed_work_time) = 0;
+
+  // Sets the timer slack to the specified value.
+  virtual void SetTimerSlack(TimerSlack timer_slack);
+};
+
+}  // namespace base
+
+#endif  // BASE_MESSAGE_LOOP_MESSAGE_PUMP_H_
diff --git a/base/message_loop/message_pump_android.cc b/base/message_loop/message_pump_android.cc
new file mode 100644
index 0000000..8c5bb57
--- /dev/null
+++ b/base/message_loop/message_pump_android.cc
@@ -0,0 +1,165 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/message_loop/message_pump_android.h"
+
+#include <jni.h>
+
+#include "base/android/jni_android.h"
+#include "base/android/scoped_java_ref.h"
+#include "base/lazy_instance.h"
+#include "base/logging.h"
+#include "base/run_loop.h"
+#include "jni/SystemMessageHandler_jni.h"
+
+using base::android::JavaParamRef;
+using base::android::ScopedJavaLocalRef;
+
+namespace base {
+
+MessagePumpForUI::MessagePumpForUI() = default;
+MessagePumpForUI::~MessagePumpForUI() = default;
+
+// This is called by the java SystemMessageHandler whenever the message queue
+// detects an idle state (as in, control returns to the looper and there are no
+// tasks available to be run immediately).
+// See the comments in DoRunLoopOnce for how this differs from the
+// implementation on other platforms.
+void MessagePumpForUI::DoIdleWork(JNIEnv* env,
+                                  const JavaParamRef<jobject>& obj) {
+  delegate_->DoIdleWork();
+}
+
+void MessagePumpForUI::DoRunLoopOnce(JNIEnv* env,
+                                     const JavaParamRef<jobject>& obj,
+                                     jboolean delayed) {
+  if (delayed)
+    delayed_scheduled_time_ = base::TimeTicks();
+
+  // If the pump has been aborted, tasks may continue to be queued up, but
+  // shouldn't run.
+  if (ShouldAbort())
+    return;
+
+  // This is based on MessagePumpForUI::DoRunLoop() from desktop.
+  // Note however that our system queue is handled in the java side.
+  // In desktop we inspect and process a single system message and then
+  // we call DoWork() / DoDelayedWork(). This is then wrapped in a for loop and
+  // repeated until no work is left to do, at which point DoIdleWork is called.
+  // On Android, the java message queue may contain messages for other handlers
+  // that will be processed before calling here again.
+  // This means that unlike Desktop, we can't wrap a for loop around this
+  // function and keep processing tasks until we have no work left to do - we
+  // have to return control back to the Android Looper after each message. This
+  // also means we have to perform idle detection differently, which is why we
+  // add an IdleHandler to the message queue in SystemMessageHandler.java, which
+  // calls DoIdleWork whenever control returns back to the looper and there are
+  // no tasks queued up to run immediately.
+  delegate_->DoWork();
+  if (ShouldAbort()) {
+    // There is a pending JNI exception, return to Java so that the exception is
+    // thrown correctly.
+    return;
+  }
+
+  base::TimeTicks next_delayed_work_time;
+  delegate_->DoDelayedWork(&next_delayed_work_time);
+  if (ShouldAbort()) {
+    // There is a pending JNI exception, return to Java so that the exception is
+    // thrown correctly
+    return;
+  }
+
+  if (!next_delayed_work_time.is_null())
+    ScheduleDelayedWork(next_delayed_work_time);
+}
+
+void MessagePumpForUI::Run(Delegate* delegate) {
+  NOTREACHED() << "UnitTests should rely on MessagePumpForUIStub in"
+                  " test_stub_android.h";
+}
+
+void MessagePumpForUI::Start(Delegate* delegate) {
+  DCHECK(!quit_);
+  delegate_ = delegate;
+  run_loop_ = std::make_unique<RunLoop>();
+  // Since the RunLoop was just created above, BeforeRun should be guaranteed to
+  // return true (it only returns false if the RunLoop has been Quit already).
+  if (!run_loop_->BeforeRun())
+    NOTREACHED();
+
+  DCHECK(system_message_handler_obj_.is_null());
+
+  JNIEnv* env = base::android::AttachCurrentThread();
+  DCHECK(env);
+  system_message_handler_obj_.Reset(
+      Java_SystemMessageHandler_create(env, reinterpret_cast<jlong>(this)));
+}
+
+void MessagePumpForUI::Quit() {
+  quit_ = true;
+
+  if (!system_message_handler_obj_.is_null()) {
+    JNIEnv* env = base::android::AttachCurrentThread();
+    DCHECK(env);
+
+    Java_SystemMessageHandler_shutdown(env, system_message_handler_obj_);
+    system_message_handler_obj_.Reset();
+  }
+
+  if (run_loop_) {
+    run_loop_->AfterRun();
+    run_loop_ = nullptr;
+  }
+}
+
+void MessagePumpForUI::ScheduleWork() {
+  if (quit_)
+    return;
+  DCHECK(!system_message_handler_obj_.is_null());
+
+  JNIEnv* env = base::android::AttachCurrentThread();
+  DCHECK(env);
+
+  Java_SystemMessageHandler_scheduleWork(env, system_message_handler_obj_);
+}
+
+void MessagePumpForUI::ScheduleDelayedWork(const TimeTicks& delayed_work_time) {
+  if (quit_)
+    return;
+  // In the java side, |SystemMessageHandler| keeps a single "delayed" message.
+  // It's an expensive operation to |removeMessage| there, so this is optimized
+  // to avoid those calls.
+  //
+  // At this stage, |delayed_work_time| can be:
+  // 1) The same as previously scheduled: nothing to be done, move along. This
+  // is the typical case, since this method is called for every single message.
+  //
+  // 2) Not previously scheduled: just post a new message in java.
+  //
+  // 3) Shorter than previously scheduled: far less common. In this case,
+  // |removeMessage| and post a new one.
+  //
+  // 4) Longer than previously scheduled (or null): nothing to be done, move
+  // along.
+  if (!delayed_scheduled_time_.is_null() &&
+      delayed_work_time >= delayed_scheduled_time_) {
+    return;
+  }
+  DCHECK(!delayed_work_time.is_null());
+  DCHECK(!system_message_handler_obj_.is_null());
+
+  JNIEnv* env = base::android::AttachCurrentThread();
+  DCHECK(env);
+
+  jlong millis =
+      (delayed_work_time - TimeTicks::Now()).InMillisecondsRoundedUp();
+  delayed_scheduled_time_ = delayed_work_time;
+  // Note that we're truncating to milliseconds as required by the java side,
+  // even though delayed_work_time is microseconds resolution.
+  Java_SystemMessageHandler_scheduleDelayedWork(
+      env, system_message_handler_obj_, millis);
+}
+
+}  // namespace base
diff --git a/base/message_loop/message_pump_android.h b/base/message_loop/message_pump_android.h
new file mode 100644
index 0000000..d09fdde
--- /dev/null
+++ b/base/message_loop/message_pump_android.h
@@ -0,0 +1,61 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_MESSAGE_LOOP_MESSAGE_PUMP_ANDROID_H_
+#define BASE_MESSAGE_LOOP_MESSAGE_PUMP_ANDROID_H_
+
+#include <jni.h>
+#include <memory>
+
+#include "base/android/scoped_java_ref.h"
+#include "base/base_export.h"
+#include "base/compiler_specific.h"
+#include "base/macros.h"
+#include "base/message_loop/message_pump.h"
+#include "base/time/time.h"
+
+namespace base {
+
+class RunLoop;
+
+// This class implements a MessagePump needed for TYPE_UI MessageLoops on
+// OS_ANDROID platform.
+class BASE_EXPORT MessagePumpForUI : public MessagePump {
+ public:
+  MessagePumpForUI();
+  ~MessagePumpForUI() override;
+
+  void DoIdleWork(JNIEnv* env, const base::android::JavaParamRef<jobject>& obj);
+  void DoRunLoopOnce(JNIEnv* env,
+                     const base::android::JavaParamRef<jobject>& obj,
+                     jboolean delayed);
+
+  void Run(Delegate* delegate) override;
+  void Quit() override;
+  void ScheduleWork() override;
+  void ScheduleDelayedWork(const TimeTicks& delayed_work_time) override;
+
+  virtual void Start(Delegate* delegate);
+
+  // We call Abort when there is a pending JNI exception, meaning that the
+  // current thread will crash when we return to Java.
+  // We can't call any JNI-methods before returning to Java as we would then
+  // cause a native crash (instead of the original Java crash).
+  void Abort() { should_abort_ = true; }
+  bool ShouldAbort() const { return should_abort_; }
+
+ private:
+  std::unique_ptr<RunLoop> run_loop_;
+  base::android::ScopedJavaGlobalRef<jobject> system_message_handler_obj_;
+  bool should_abort_ = false;
+  bool quit_ = false;
+  Delegate* delegate_ = nullptr;
+  base::TimeTicks delayed_scheduled_time_;
+
+  DISALLOW_COPY_AND_ASSIGN(MessagePumpForUI);
+};
+
+}  // namespace base
+
+#endif  // BASE_MESSAGE_LOOP_MESSAGE_PUMP_ANDROID_H_
diff --git a/base/message_loop/message_pump_default.cc b/base/message_loop/message_pump_default.cc
new file mode 100644
index 0000000..4104e73
--- /dev/null
+++ b/base/message_loop/message_pump_default.cc
@@ -0,0 +1,103 @@
+// Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/message_loop/message_pump_default.h"
+
+#include "base/auto_reset.h"
+#include "base/logging.h"
+#include "base/threading/thread_restrictions.h"
+#include "build/build_config.h"
+
+#if defined(OS_MACOSX)
+#include <mach/thread_policy.h>
+
+#include "base/mac/mach_logging.h"
+#include "base/mac/scoped_mach_port.h"
+#include "base/mac/scoped_nsautorelease_pool.h"
+#endif
+
+namespace base {
+
+MessagePumpDefault::MessagePumpDefault()
+    : keep_running_(true),
+      event_(WaitableEvent::ResetPolicy::AUTOMATIC,
+             WaitableEvent::InitialState::NOT_SIGNALED) {}
+
+MessagePumpDefault::~MessagePumpDefault() = default;
+
+void MessagePumpDefault::Run(Delegate* delegate) {
+  AutoReset<bool> auto_reset_keep_running(&keep_running_, true);
+
+  for (;;) {
+#if defined(OS_MACOSX)
+    mac::ScopedNSAutoreleasePool autorelease_pool;
+#endif
+
+    bool did_work = delegate->DoWork();
+    if (!keep_running_)
+      break;
+
+    did_work |= delegate->DoDelayedWork(&delayed_work_time_);
+    if (!keep_running_)
+      break;
+
+    if (did_work)
+      continue;
+
+    did_work = delegate->DoIdleWork();
+    if (!keep_running_)
+      break;
+
+    if (did_work)
+      continue;
+
+    ThreadRestrictions::ScopedAllowWait allow_wait;
+    if (delayed_work_time_.is_null()) {
+      event_.Wait();
+    } else {
+      // No need to handle already expired |delayed_work_time_| in any special
+      // way. When |delayed_work_time_| is in the past TimeWaitUntil returns
+      // promptly and |delayed_work_time_| will re-initialized on a next
+      // DoDelayedWork call which has to be called in order to get here again.
+      event_.TimedWaitUntil(delayed_work_time_);
+    }
+    // Since event_ is auto-reset, we don't need to do anything special here
+    // other than service each delegate method.
+  }
+}
+
+void MessagePumpDefault::Quit() {
+  keep_running_ = false;
+}
+
+void MessagePumpDefault::ScheduleWork() {
+  // Since this can be called on any thread, we need to ensure that our Run
+  // loop wakes up.
+  event_.Signal();
+}
+
+void MessagePumpDefault::ScheduleDelayedWork(
+    const TimeTicks& delayed_work_time) {
+  // We know that we can't be blocked on Wait right now since this method can
+  // only be called on the same thread as Run, so we only need to update our
+  // record of how long to sleep when we do sleep.
+  delayed_work_time_ = delayed_work_time;
+}
+
+#if defined(OS_MACOSX)
+void MessagePumpDefault::SetTimerSlack(TimerSlack timer_slack) {
+  thread_latency_qos_policy_data_t policy{};
+  policy.thread_latency_qos_tier = timer_slack == TIMER_SLACK_MAXIMUM
+                                       ? LATENCY_QOS_TIER_3
+                                       : LATENCY_QOS_TIER_UNSPECIFIED;
+  mac::ScopedMachSendRight thread_port(mach_thread_self());
+  kern_return_t kr =
+      thread_policy_set(thread_port.get(), THREAD_LATENCY_QOS_POLICY,
+                        reinterpret_cast<thread_policy_t>(&policy),
+                        THREAD_LATENCY_QOS_POLICY_COUNT);
+  MACH_DVLOG_IF(1, kr != KERN_SUCCESS, kr) << "thread_policy_set";
+}
+#endif
+
+}  // namespace base
diff --git a/base/message_loop/message_pump_default.h b/base/message_loop/message_pump_default.h
new file mode 100644
index 0000000..dd11adc
--- /dev/null
+++ b/base/message_loop/message_pump_default.h
@@ -0,0 +1,46 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_MESSAGE_LOOP_MESSAGE_PUMP_DEFAULT_H_
+#define BASE_MESSAGE_LOOP_MESSAGE_PUMP_DEFAULT_H_
+
+#include "base/base_export.h"
+#include "base/macros.h"
+#include "base/message_loop/message_pump.h"
+#include "base/synchronization/waitable_event.h"
+#include "base/time/time.h"
+#include "build/build_config.h"
+
+namespace base {
+
+class BASE_EXPORT MessagePumpDefault : public MessagePump {
+ public:
+  MessagePumpDefault();
+  ~MessagePumpDefault() override;
+
+  // MessagePump methods:
+  void Run(Delegate* delegate) override;
+  void Quit() override;
+  void ScheduleWork() override;
+  void ScheduleDelayedWork(const TimeTicks& delayed_work_time) override;
+#if defined(OS_MACOSX)
+  void SetTimerSlack(TimerSlack timer_slack) override;
+#endif
+
+ private:
+  // This flag is set to false when Run should return.
+  bool keep_running_;
+
+  // Used to sleep until there is more work to do.
+  WaitableEvent event_;
+
+  // The time at which we should call DoDelayedWork.
+  TimeTicks delayed_work_time_;
+
+  DISALLOW_COPY_AND_ASSIGN(MessagePumpDefault);
+};
+
+}  // namespace base
+
+#endif  // BASE_MESSAGE_LOOP_MESSAGE_PUMP_DEFAULT_H_
diff --git a/base/message_loop/message_pump_for_io.h b/base/message_loop/message_pump_for_io.h
new file mode 100644
index 0000000..6aac1e6
--- /dev/null
+++ b/base/message_loop/message_pump_for_io.h
@@ -0,0 +1,44 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_MESSAGE_LOOP_MESSAGE_PUMP_FOR_IO_H_
+#define BASE_MESSAGE_LOOP_MESSAGE_PUMP_FOR_IO_H_
+
+// This header is a forwarding header to coalesce the various platform specific
+// types representing MessagePumpForIO.
+
+#include "build/build_config.h"
+
+#if defined(OS_WIN)
+#include "base/message_loop/message_pump_win.h"
+#elif defined(OS_IOS)
+#include "base/message_loop/message_pump_io_ios.h"
+#elif defined(OS_NACL_SFI)
+#include "base/message_loop/message_pump_default.h"
+#elif defined(OS_FUCHSIA)
+#include "base/message_loop/message_pump_fuchsia.h"
+#elif defined(OS_POSIX)
+#include "base/message_loop/message_pump_libevent.h"
+#endif
+
+namespace base {
+
+#if defined(OS_WIN)
+// Windows defines it as-is.
+using MessagePumpForIO = MessagePumpForIO;
+#elif defined(OS_IOS)
+using MessagePumpForIO = MessagePumpIOSForIO;
+#elif defined(OS_NACL_SFI)
+using MessagePumpForIO = MessagePumpDefault;
+#elif defined(OS_FUCHSIA)
+using MessagePumpForIO = MessagePumpFuchsia;
+#elif defined(OS_POSIX)
+using MessagePumpForIO = MessagePumpLibevent;
+#else
+#error Platform does not define MessagePumpForIO
+#endif
+
+}  // namespace base
+
+#endif  // BASE_MESSAGE_LOOP_MESSAGE_PUMP_FOR_IO_H_
diff --git a/base/message_loop/message_pump_for_ui.h b/base/message_loop/message_pump_for_ui.h
new file mode 100644
index 0000000..6ee02b0
--- /dev/null
+++ b/base/message_loop/message_pump_for_ui.h
@@ -0,0 +1,57 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_MESSAGE_LOOP_MESSAGE_PUMP_FOR_UI_H_
+#define BASE_MESSAGE_LOOP_MESSAGE_PUMP_FOR_UI_H_
+
+// This header is a forwarding header to coalesce the various platform specific
+// implementations of MessagePumpForUI.
+
+#include "build/build_config.h"
+
+#if defined(OS_WIN)
+#include "base/message_loop/message_pump_win.h"
+#elif defined(OS_ANDROID)
+#include "base/message_loop/message_pump_android.h"
+#elif defined(OS_MACOSX)
+#include "base/message_loop/message_pump.h"
+#elif defined(OS_NACL) || defined(OS_AIX)
+// No MessagePumpForUI, see below.
+#elif defined(USE_GLIB)
+#include "base/message_loop/message_pump_glib.h"
+#elif defined(OS_LINUX) || defined(OS_BSD)
+#include "base/message_loop/message_pump_libevent.h"
+#elif defined(OS_FUCHSIA)
+#include "base/message_loop/message_pump_fuchsia.h"
+#endif
+
+namespace base {
+
+#if defined(OS_WIN)
+// Windows defines it as-is.
+using MessagePumpForUI = MessagePumpForUI;
+#elif defined(OS_ANDROID)
+// Android defines it as-is.
+using MessagePumpForUI = MessagePumpForUI;
+#elif defined(OS_MACOSX)
+// MessagePumpForUI isn't bound to a specific impl on Mac. While each impl can
+// be represented by a plain MessagePump: MessagePumpMac::Create() must be used
+// to instantiate the right impl.
+using MessagePumpForUI = MessagePump;
+#elif defined(OS_NACL) || defined(OS_AIX)
+// Currently NaCl and AIX don't have a MessagePumpForUI.
+// TODO(abarth): Figure out if we need this.
+#elif defined(USE_GLIB)
+using MessagePumpForUI = MessagePumpGlib;
+#elif defined(OS_LINUX) || defined(OS_BSD)
+using MessagePumpForUI = MessagePumpLibevent;
+#elif defined(OS_FUCHSIA)
+using MessagePumpForUI = MessagePumpFuchsia;
+#else
+#error Platform does not define MessagePumpForUI
+#endif
+
+}  // namespace base
+
+#endif  // BASE_MESSAGE_LOOP_MESSAGE_PUMP_FOR_UI_H_
diff --git a/base/message_loop/message_pump_fuchsia.cc b/base/message_loop/message_pump_fuchsia.cc
new file mode 100644
index 0000000..b9af643
--- /dev/null
+++ b/base/message_loop/message_pump_fuchsia.cc
@@ -0,0 +1,304 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/message_loop/message_pump_fuchsia.h"
+
+#include <fdio/io.h>
+#include <fdio/private.h>
+#include <zircon/status.h>
+#include <zircon/syscalls.h>
+
+#include "base/auto_reset.h"
+#include "base/fuchsia/fuchsia_logging.h"
+#include "base/logging.h"
+
+namespace base {
+
+MessagePumpFuchsia::ZxHandleWatchController::ZxHandleWatchController(
+    const Location& from_here)
+    : async_wait_t({}), created_from_location_(from_here) {}
+
+MessagePumpFuchsia::ZxHandleWatchController::~ZxHandleWatchController() {
+  if (!StopWatchingZxHandle())
+    NOTREACHED();
+}
+
+bool MessagePumpFuchsia::ZxHandleWatchController::WaitBegin() {
+  DCHECK(!handler);
+  async_wait_t::handler = &HandleSignal;
+
+  zx_status_t status = async_begin_wait(&weak_pump_->async_dispatcher_, this);
+  if (status != ZX_OK) {
+    ZX_DLOG(ERROR, status) << "async_begin_wait() failed";
+    async_wait_t::handler = nullptr;
+    return false;
+  }
+
+  return true;
+}
+
+bool MessagePumpFuchsia::ZxHandleWatchController::StopWatchingZxHandle() {
+  if (was_stopped_) {
+    DCHECK(!*was_stopped_);
+    *was_stopped_ = true;
+
+    // |was_stopped_| points at a value stored on the stack, which will go out
+    // of scope. MessagePumpFuchsia::Run() will reset it only if the value is
+    // false. So we need to reset this pointer here as well, to make sure it's
+    // not used again.
+    was_stopped_ = nullptr;
+  }
+
+  // If the pump is gone then there is nothing to cancel.
+  if (!weak_pump_)
+    return true;
+
+  // |handler| is set when waiting for a signal.
+  if (!handler)
+    return true;
+
+  async_wait_t::handler = nullptr;
+
+  zx_status_t result = async_cancel_wait(&weak_pump_->async_dispatcher_, this);
+  ZX_DLOG_IF(ERROR, result != ZX_OK, result) << "async_cancel_wait failed";
+  return result == ZX_OK;
+}
+
+// static
+void MessagePumpFuchsia::ZxHandleWatchController::HandleSignal(
+    async_t* async,
+    async_wait_t* wait,
+    zx_status_t status,
+    const zx_packet_signal_t* signal) {
+  if (status != ZX_OK) {
+    ZX_LOG(WARNING, status) << "async wait failed";
+    return;
+  }
+
+  ZxHandleWatchController* controller =
+      static_cast<ZxHandleWatchController*>(wait);
+  DCHECK_EQ(controller->handler, &HandleSignal);
+  controller->handler = nullptr;
+
+  // |signal| can include other spurious things, in particular, that an fd
+  // is writable, when we only asked to know when it was readable. In that
+  // case, we don't want to call both the CanWrite and CanRead callback,
+  // when the caller asked for only, for example, readable callbacks. So,
+  // mask with the events that we actually wanted to know about.
+  zx_signals_t signals = signal->trigger & signal->observed;
+  DCHECK_NE(0u, signals);
+
+  // In the case of a persistent Watch, the Watch may be stopped and
+  // potentially deleted by the caller within the callback, in which case
+  // |controller| should not be accessed again, and we mustn't continue the
+  // watch. We check for this with a bool on the stack, which the Watch
+  // receives a pointer to.
+  bool was_stopped = false;
+  controller->was_stopped_ = &was_stopped;
+
+  controller->watcher_->OnZxHandleSignalled(wait->object, signals);
+
+  if (was_stopped)
+    return;
+
+  controller->was_stopped_ = nullptr;
+
+  if (controller->persistent_)
+    controller->WaitBegin();
+}
+
+void MessagePumpFuchsia::FdWatchController::OnZxHandleSignalled(
+    zx_handle_t handle,
+    zx_signals_t signals) {
+  uint32_t events;
+  __fdio_wait_end(io_, signals, &events);
+
+  // Each |watcher_| callback we invoke may stop or delete |this|. The pump has
+  // set |was_stopped_| to point to a safe location on the calling stack, so we
+  // can use that to detect being stopped mid-callback and avoid doing further
+  // work that would touch |this|.
+  bool* was_stopped = was_stopped_;
+  if (events & FDIO_EVT_WRITABLE)
+    watcher_->OnFileCanWriteWithoutBlocking(fd_);
+  if (!*was_stopped && (events & FDIO_EVT_READABLE))
+    watcher_->OnFileCanReadWithoutBlocking(fd_);
+
+  // Don't add additional work here without checking |*was_stopped_| again.
+}
+
+MessagePumpFuchsia::FdWatchController::FdWatchController(
+    const Location& from_here)
+    : FdWatchControllerInterface(from_here),
+      ZxHandleWatchController(from_here) {}
+
+MessagePumpFuchsia::FdWatchController::~FdWatchController() {
+  if (!StopWatchingFileDescriptor())
+    NOTREACHED();
+}
+
+bool MessagePumpFuchsia::FdWatchController::WaitBegin() {
+  // Refresh the |handle_| and |desired_signals_| from the mxio for the fd.
+  // Some types of fdio map read/write events to different signals depending on
+  // their current state, so we must do this every time we begin to wait.
+  __fdio_wait_begin(io_, desired_events_, &object, &trigger);
+  if (async_wait_t::object == ZX_HANDLE_INVALID) {
+    DLOG(ERROR) << "fdio_wait_begin failed";
+    return false;
+  }
+
+  return MessagePumpFuchsia::ZxHandleWatchController::WaitBegin();
+}
+
+bool MessagePumpFuchsia::FdWatchController::StopWatchingFileDescriptor() {
+  bool success = StopWatchingZxHandle();
+  if (io_) {
+    __fdio_release(io_);
+    io_ = nullptr;
+  }
+  return success;
+}
+
+MessagePumpFuchsia::MessagePumpFuchsia() : weak_factory_(this) {}
+
+MessagePumpFuchsia::~MessagePumpFuchsia() = default;
+
+bool MessagePumpFuchsia::WatchFileDescriptor(int fd,
+                                             bool persistent,
+                                             int mode,
+                                             FdWatchController* controller,
+                                             FdWatcher* delegate) {
+  DCHECK_GE(fd, 0);
+  DCHECK(controller);
+  DCHECK(delegate);
+
+  if (!controller->StopWatchingFileDescriptor())
+    NOTREACHED();
+
+  controller->fd_ = fd;
+  controller->watcher_ = delegate;
+
+  DCHECK(!controller->io_);
+  controller->io_ = __fdio_fd_to_io(fd);
+  if (!controller->io_) {
+    DLOG(ERROR) << "Failed to get IO for FD";
+    return false;
+  }
+
+  switch (mode) {
+    case WATCH_READ:
+      controller->desired_events_ = FDIO_EVT_READABLE;
+      break;
+    case WATCH_WRITE:
+      controller->desired_events_ = FDIO_EVT_WRITABLE;
+      break;
+    case WATCH_READ_WRITE:
+      controller->desired_events_ = FDIO_EVT_READABLE | FDIO_EVT_WRITABLE;
+      break;
+    default:
+      NOTREACHED() << "unexpected mode: " << mode;
+      return false;
+  }
+
+  // Pass dummy |handle| and |signals| values to WatchZxHandle(). The real
+  // values will be populated by FdWatchController::WaitBegin(), before actually
+  // starting the wait operation.
+  return WatchZxHandle(ZX_HANDLE_INVALID, persistent, 1, controller,
+                       controller);
+}
+
+bool MessagePumpFuchsia::WatchZxHandle(zx_handle_t handle,
+                                       bool persistent,
+                                       zx_signals_t signals,
+                                       ZxHandleWatchController* controller,
+                                       ZxHandleWatcher* delegate) {
+  DCHECK_NE(0u, signals);
+  DCHECK(controller);
+  DCHECK(delegate);
+  DCHECK(handle == ZX_HANDLE_INVALID ||
+         controller->async_wait_t::object == ZX_HANDLE_INVALID ||
+         handle == controller->async_wait_t::object);
+
+  if (!controller->StopWatchingZxHandle())
+    NOTREACHED();
+
+  controller->async_wait_t::object = handle;
+  controller->persistent_ = persistent;
+  controller->async_wait_t::trigger = signals;
+  controller->watcher_ = delegate;
+
+  controller->weak_pump_ = weak_factory_.GetWeakPtr();
+
+  return controller->WaitBegin();
+}
+
+bool MessagePumpFuchsia::HandleEvents(zx_time_t deadline) {
+  zx_status_t status = async_dispatcher_.DispatchOrWaitUntil(deadline);
+  switch (status) {
+    // Return true if some tasks or events were dispatched or if the dispatcher
+    // was stopped by ScheduleWork().
+    case ZX_OK:
+    case ZX_ERR_CANCELED:
+      return true;
+
+    case ZX_ERR_TIMED_OUT:
+      return false;
+
+    default:
+      ZX_DLOG(DCHECK, status) << "unexpected wait status";
+      return false;
+  }
+}
+
+void MessagePumpFuchsia::Run(Delegate* delegate) {
+  AutoReset<bool> auto_reset_keep_running(&keep_running_, true);
+
+  for (;;) {
+    bool did_work = delegate->DoWork();
+    if (!keep_running_)
+      break;
+
+    did_work |= delegate->DoDelayedWork(&delayed_work_time_);
+    if (!keep_running_)
+      break;
+
+    did_work |= HandleEvents(/*deadline=*/0);
+    if (!keep_running_)
+      break;
+
+    if (did_work)
+      continue;
+
+    did_work = delegate->DoIdleWork();
+    if (!keep_running_)
+      break;
+
+    if (did_work)
+      continue;
+
+    zx_time_t deadline = delayed_work_time_.is_null()
+                             ? ZX_TIME_INFINITE
+                             : delayed_work_time_.ToZxTime();
+    HandleEvents(deadline);
+  }
+}
+
+void MessagePumpFuchsia::Quit() {
+  keep_running_ = false;
+}
+
+void MessagePumpFuchsia::ScheduleWork() {
+  // Stop AsyncDispatcher to let MessagePumpFuchsia::Run() handle message loop
+  // tasks.
+  async_dispatcher_.Stop();
+}
+
+void MessagePumpFuchsia::ScheduleDelayedWork(
+    const TimeTicks& delayed_work_time) {
+  // We know that we can't be blocked right now since this method can only be
+  // called on the same thread as Run, so we only need to update our record of
+  // how long to sleep when we do sleep.
+  delayed_work_time_ = delayed_work_time;
+}
+
+}  // namespace base
diff --git a/base/message_loop/message_pump_fuchsia.h b/base/message_loop/message_pump_fuchsia.h
new file mode 100644
index 0000000..514e23f
--- /dev/null
+++ b/base/message_loop/message_pump_fuchsia.h
@@ -0,0 +1,157 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_MESSAGE_LOOP_MESSAGE_PUMP_FUCHSIA_H_
+#define BASE_MESSAGE_LOOP_MESSAGE_PUMP_FUCHSIA_H_
+
+#include <lib/async/wait.h>
+
+#include "base/base_export.h"
+#include "base/fuchsia/async_dispatcher.h"
+#include "base/location.h"
+#include "base/macros.h"
+#include "base/memory/weak_ptr.h"
+#include "base/message_loop/message_pump.h"
+#include "base/message_loop/watchable_io_message_pump_posix.h"
+
+typedef struct fdio fdio_t;
+
+namespace base {
+
+class BASE_EXPORT MessagePumpFuchsia : public MessagePump,
+                                       public WatchableIOMessagePumpPosix {
+ public:
+  // Implemented by callers to receive notifications of handle & fd events.
+  class ZxHandleWatcher {
+   public:
+    virtual void OnZxHandleSignalled(zx_handle_t handle,
+                                     zx_signals_t signals) = 0;
+
+   protected:
+    virtual ~ZxHandleWatcher() {}
+  };
+
+  // Manages an active watch on an zx_handle_t.
+  class ZxHandleWatchController : public async_wait_t {
+   public:
+    explicit ZxHandleWatchController(const Location& from_here);
+    // Deleting the Controller implicitly calls StopWatchingZxHandle.
+    virtual ~ZxHandleWatchController();
+
+    // Stop watching the handle, always safe to call.  No-op if there's nothing
+    // to do.
+    bool StopWatchingZxHandle();
+
+    const Location& created_from_location() { return created_from_location_; }
+
+   protected:
+    friend class MessagePumpFuchsia;
+
+    virtual bool WaitBegin();
+
+    static void HandleSignal(async_t* async,
+                             async_wait_t* wait,
+                             zx_status_t status,
+                             const zx_packet_signal_t* signal);
+
+    const Location created_from_location_;
+
+    // This bool is used by the pump when invoking the ZxHandleWatcher callback,
+    // and by the FdHandleWatchController when invoking read & write callbacks,
+    // to cope with the possibility of the caller deleting the *Watcher within
+    // the callback. The pump sets |was_stopped_| to a location on the stack,
+    // and the Watcher writes to it, if set, when deleted, allowing the pump
+    // to check the value on the stack to short-cut any post-callback work.
+    bool* was_stopped_ = nullptr;
+
+    // Set directly from the inputs to WatchFileDescriptor.
+    ZxHandleWatcher* watcher_ = nullptr;
+
+    // Used to safely access resources owned by the associated message pump.
+    WeakPtr<MessagePumpFuchsia> weak_pump_;
+
+    // A watch may be marked as persistent, which means it remains active even
+    // after triggering.
+    bool persistent_ = false;
+
+    DISALLOW_COPY_AND_ASSIGN(ZxHandleWatchController);
+  };
+
+  class FdWatchController : public FdWatchControllerInterface,
+                            public ZxHandleWatchController,
+                            public ZxHandleWatcher {
+   public:
+    explicit FdWatchController(const Location& from_here);
+    ~FdWatchController() override;
+
+    // FdWatchControllerInterface:
+    bool StopWatchingFileDescriptor() override;
+
+   private:
+    friend class MessagePumpFuchsia;
+
+    // Determines the desires signals, and begins waiting on the handle.
+    bool WaitBegin() override;
+
+    // ZxHandleWatcher interface.
+    void OnZxHandleSignalled(zx_handle_t handle, zx_signals_t signals) override;
+
+    // Set directly from the inputs to WatchFileDescriptor.
+    FdWatcher* watcher_ = nullptr;
+    int fd_ = -1;
+    uint32_t desired_events_ = 0;
+
+    // Set by WatchFileDescriptor to hold a reference to the descriptor's mxio.
+    fdio_t* io_ = nullptr;
+
+    DISALLOW_COPY_AND_ASSIGN(FdWatchController);
+  };
+
+  enum Mode {
+    WATCH_READ = 1 << 0,
+    WATCH_WRITE = 1 << 1,
+    WATCH_READ_WRITE = WATCH_READ | WATCH_WRITE
+  };
+
+  MessagePumpFuchsia();
+  ~MessagePumpFuchsia() override;
+
+  bool WatchZxHandle(zx_handle_t handle,
+                     bool persistent,
+                     zx_signals_t signals,
+                     ZxHandleWatchController* controller,
+                     ZxHandleWatcher* delegate);
+  bool WatchFileDescriptor(int fd,
+                           bool persistent,
+                           int mode,
+                           FdWatchController* controller,
+                           FdWatcher* delegate);
+
+  // MessagePump implementation:
+  void Run(Delegate* delegate) override;
+  void Quit() override;
+  void ScheduleWork() override;
+  void ScheduleDelayedWork(const TimeTicks& delayed_work_time) override;
+
+ private:
+  // Handles IO events by running |async_dispatcher_|. Returns true if any
+  // events were received or if ScheduleWork() was called.
+  bool HandleEvents(zx_time_t deadline);
+
+  // This flag is set to false when Run should return.
+  bool keep_running_ = true;
+
+  AsyncDispatcher async_dispatcher_;
+
+  // The time at which we should call DoDelayedWork.
+  TimeTicks delayed_work_time_;
+
+  base::WeakPtrFactory<MessagePumpFuchsia> weak_factory_;
+
+  DISALLOW_COPY_AND_ASSIGN(MessagePumpFuchsia);
+};
+
+}  // namespace base
+
+#endif  // BASE_MESSAGE_LOOP_MESSAGE_PUMP_FUCHSIA_H_
diff --git a/base/message_loop/message_pump_glib.cc b/base/message_loop/message_pump_glib.cc
new file mode 100644
index 0000000..2f1909b
--- /dev/null
+++ b/base/message_loop/message_pump_glib.cc
@@ -0,0 +1,359 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/message_loop/message_pump_glib.h"
+
+#include <fcntl.h>
+#include <math.h>
+
+#include <glib.h>
+
+#include "base/lazy_instance.h"
+#include "base/logging.h"
+#include "base/posix/eintr_wrapper.h"
+#include "base/synchronization/lock.h"
+#include "base/threading/platform_thread.h"
+
+namespace base {
+
+namespace {
+
+// Return a timeout suitable for the glib loop, -1 to block forever,
+// 0 to return right away, or a timeout in milliseconds from now.
+int GetTimeIntervalMilliseconds(const TimeTicks& from) {
+  if (from.is_null())
+    return -1;
+
+  // Be careful here.  TimeDelta has a precision of microseconds, but we want a
+  // value in milliseconds.  If there are 5.5ms left, should the delay be 5 or
+  // 6?  It should be 6 to avoid executing delayed work too early.
+  int delay = static_cast<int>(
+      ceil((from - TimeTicks::Now()).InMillisecondsF()));
+
+  // If this value is negative, then we need to run delayed work soon.
+  return delay < 0 ? 0 : delay;
+}
+
+// A brief refresher on GLib:
+//     GLib sources have four callbacks: Prepare, Check, Dispatch and Finalize.
+// On each iteration of the GLib pump, it calls each source's Prepare function.
+// This function should return TRUE if it wants GLib to call its Dispatch, and
+// FALSE otherwise.  It can also set a timeout in this case for the next time
+// Prepare should be called again (it may be called sooner).
+//     After the Prepare calls, GLib does a poll to check for events from the
+// system.  File descriptors can be attached to the sources.  The poll may block
+// if none of the Prepare calls returned TRUE.  It will block indefinitely, or
+// by the minimum time returned by a source in Prepare.
+//     After the poll, GLib calls Check for each source that returned FALSE
+// from Prepare.  The return value of Check has the same meaning as for Prepare,
+// making Check a second chance to tell GLib we are ready for Dispatch.
+//     Finally, GLib calls Dispatch for each source that is ready.  If Dispatch
+// returns FALSE, GLib will destroy the source.  Dispatch calls may be recursive
+// (i.e., you can call Run from them), but Prepare and Check cannot.
+//     Finalize is called when the source is destroyed.
+// NOTE: It is common for subsystems to want to process pending events while
+// doing intensive work, for example the flash plugin. They usually use the
+// following pattern (recommended by the GTK docs):
+// while (gtk_events_pending()) {
+//   gtk_main_iteration();
+// }
+//
+// gtk_events_pending just calls g_main_context_pending, which does the
+// following:
+// - Call prepare on all the sources.
+// - Do the poll with a timeout of 0 (not blocking).
+// - Call check on all the sources.
+// - *Does not* call dispatch on the sources.
+// - Return true if any of prepare() or check() returned true.
+//
+// gtk_main_iteration just calls g_main_context_iteration, which does the whole
+// thing, respecting the timeout for the poll (and block, although it is
+// expected not to if gtk_events_pending returned true), and call dispatch.
+//
+// Thus it is important to only return true from prepare or check if we
+// actually have events or work to do. We also need to make sure we keep
+// internal state consistent so that if prepare/check return true when called
+// from gtk_events_pending, they will still return true when called right
+// after, from gtk_main_iteration.
+//
+// For the GLib pump we try to follow the Windows UI pump model:
+// - Whenever we receive a wakeup event or the timer for delayed work expires,
+// we run DoWork and/or DoDelayedWork. That part will also run in the other
+// event pumps.
+// - We also run DoWork, DoDelayedWork, and possibly DoIdleWork in the main
+// loop, around event handling.
+
+struct WorkSource : public GSource {
+  MessagePumpGlib* pump;
+};
+
+gboolean WorkSourcePrepare(GSource* source,
+                           gint* timeout_ms) {
+  *timeout_ms = static_cast<WorkSource*>(source)->pump->HandlePrepare();
+  // We always return FALSE, so that our timeout is honored.  If we were
+  // to return TRUE, the timeout would be considered to be 0 and the poll
+  // would never block.  Once the poll is finished, Check will be called.
+  return FALSE;
+}
+
+gboolean WorkSourceCheck(GSource* source) {
+  // Only return TRUE if Dispatch should be called.
+  return static_cast<WorkSource*>(source)->pump->HandleCheck();
+}
+
+gboolean WorkSourceDispatch(GSource* source,
+                            GSourceFunc unused_func,
+                            gpointer unused_data) {
+
+  static_cast<WorkSource*>(source)->pump->HandleDispatch();
+  // Always return TRUE so our source stays registered.
+  return TRUE;
+}
+
+// I wish these could be const, but g_source_new wants non-const.
+GSourceFuncs WorkSourceFuncs = {WorkSourcePrepare, WorkSourceCheck,
+                                WorkSourceDispatch, nullptr};
+
+// The following is used to make sure we only run the MessagePumpGlib on one
+// thread. X only has one message pump so we can only have one UI loop per
+// process.
+#ifndef NDEBUG
+
+// Tracks the pump the most recent pump that has been run.
+struct ThreadInfo {
+  // The pump.
+  MessagePumpGlib* pump;
+
+  // ID of the thread the pump was run on.
+  PlatformThreadId thread_id;
+};
+
+// Used for accesing |thread_info|.
+static LazyInstance<Lock>::Leaky thread_info_lock = LAZY_INSTANCE_INITIALIZER;
+
+// If non-NULL it means a MessagePumpGlib exists and has been Run. This is
+// destroyed when the MessagePump is destroyed.
+ThreadInfo* thread_info = NULL;
+
+void CheckThread(MessagePumpGlib* pump) {
+  AutoLock auto_lock(thread_info_lock.Get());
+  if (!thread_info) {
+    thread_info = new ThreadInfo;
+    thread_info->pump = pump;
+    thread_info->thread_id = PlatformThread::CurrentId();
+  }
+  DCHECK(thread_info->thread_id == PlatformThread::CurrentId()) <<
+      "Running MessagePumpGlib on two different threads; "
+      "this is unsupported by GLib!";
+}
+
+void PumpDestroyed(MessagePumpGlib* pump) {
+  AutoLock auto_lock(thread_info_lock.Get());
+  if (thread_info && thread_info->pump == pump) {
+    delete thread_info;
+    thread_info = NULL;
+  }
+}
+
+#endif
+
+}  // namespace
+
+struct MessagePumpGlib::RunState {
+  Delegate* delegate;
+
+  // Used to flag that the current Run() invocation should return ASAP.
+  bool should_quit;
+
+  // Used to count how many Run() invocations are on the stack.
+  int run_depth;
+
+  // This keeps the state of whether the pump got signaled that there was new
+  // work to be done. Since we eat the message on the wake up pipe as soon as
+  // we get it, we keep that state here to stay consistent.
+  bool has_work;
+};
+
+MessagePumpGlib::MessagePumpGlib()
+    : state_(nullptr),
+      context_(g_main_context_default()),
+      wakeup_gpollfd_(new GPollFD) {
+  // Create our wakeup pipe, which is used to flag when work was scheduled.
+  int fds[2];
+  int ret = pipe(fds);
+  DCHECK_EQ(ret, 0);
+  (void)ret;  // Prevent warning in release mode.
+
+  wakeup_pipe_read_  = fds[0];
+  wakeup_pipe_write_ = fds[1];
+  wakeup_gpollfd_->fd = wakeup_pipe_read_;
+  wakeup_gpollfd_->events = G_IO_IN;
+
+  work_source_ = g_source_new(&WorkSourceFuncs, sizeof(WorkSource));
+  static_cast<WorkSource*>(work_source_)->pump = this;
+  g_source_add_poll(work_source_, wakeup_gpollfd_.get());
+  // Use a low priority so that we let other events in the queue go first.
+  g_source_set_priority(work_source_, G_PRIORITY_DEFAULT_IDLE);
+  // This is needed to allow Run calls inside Dispatch.
+  g_source_set_can_recurse(work_source_, TRUE);
+  g_source_attach(work_source_, context_);
+}
+
+MessagePumpGlib::~MessagePumpGlib() {
+#ifndef NDEBUG
+  PumpDestroyed(this);
+#endif
+  g_source_destroy(work_source_);
+  g_source_unref(work_source_);
+  close(wakeup_pipe_read_);
+  close(wakeup_pipe_write_);
+}
+
+// Return the timeout we want passed to poll.
+int MessagePumpGlib::HandlePrepare() {
+  // We know we have work, but we haven't called HandleDispatch yet. Don't let
+  // the pump block so that we can do some processing.
+  if (state_ &&  // state_ may be null during tests.
+      state_->has_work)
+    return 0;
+
+  // We don't think we have work to do, but make sure not to block
+  // longer than the next time we need to run delayed work.
+  return GetTimeIntervalMilliseconds(delayed_work_time_);
+}
+
+bool MessagePumpGlib::HandleCheck() {
+  if (!state_)  // state_ may be null during tests.
+    return false;
+
+  // We usually have a single message on the wakeup pipe, since we are only
+  // signaled when the queue went from empty to non-empty, but there can be
+  // two messages if a task posted a task, hence we read at most two bytes.
+  // The glib poll will tell us whether there was data, so this read
+  // shouldn't block.
+  if (wakeup_gpollfd_->revents & G_IO_IN) {
+    char msg[2];
+    const int num_bytes = HANDLE_EINTR(read(wakeup_pipe_read_, msg, 2));
+    if (num_bytes < 1) {
+      NOTREACHED() << "Error reading from the wakeup pipe.";
+    }
+    DCHECK((num_bytes == 1 && msg[0] == '!') ||
+           (num_bytes == 2 && msg[0] == '!' && msg[1] == '!'));
+    // Since we ate the message, we need to record that we have more work,
+    // because HandleCheck() may be called without HandleDispatch being called
+    // afterwards.
+    state_->has_work = true;
+  }
+
+  if (state_->has_work)
+    return true;
+
+  if (GetTimeIntervalMilliseconds(delayed_work_time_) == 0) {
+    // The timer has expired. That condition will stay true until we process
+    // that delayed work, so we don't need to record this differently.
+    return true;
+  }
+
+  return false;
+}
+
+void MessagePumpGlib::HandleDispatch() {
+  state_->has_work = false;
+  if (state_->delegate->DoWork()) {
+    // NOTE: on Windows at this point we would call ScheduleWork (see
+    // MessagePumpGlib::HandleWorkMessage in message_pump_win.cc). But here,
+    // instead of posting a message on the wakeup pipe, we can avoid the
+    // syscalls and just signal that we have more work.
+    state_->has_work = true;
+  }
+
+  if (state_->should_quit)
+    return;
+
+  state_->delegate->DoDelayedWork(&delayed_work_time_);
+}
+
+void MessagePumpGlib::Run(Delegate* delegate) {
+#ifndef NDEBUG
+  CheckThread(this);
+#endif
+
+  RunState state;
+  state.delegate = delegate;
+  state.should_quit = false;
+  state.run_depth = state_ ? state_->run_depth + 1 : 1;
+  state.has_work = false;
+
+  RunState* previous_state = state_;
+  state_ = &state;
+
+  // We really only do a single task for each iteration of the loop.  If we
+  // have done something, assume there is likely something more to do.  This
+  // will mean that we don't block on the message pump until there was nothing
+  // more to do.  We also set this to true to make sure not to block on the
+  // first iteration of the loop, so RunUntilIdle() works correctly.
+  bool more_work_is_plausible = true;
+
+  // We run our own loop instead of using g_main_loop_quit in one of the
+  // callbacks.  This is so we only quit our own loops, and we don't quit
+  // nested loops run by others.  TODO(deanm): Is this what we want?
+  for (;;) {
+    // Don't block if we think we have more work to do.
+    bool block = !more_work_is_plausible;
+
+    more_work_is_plausible = g_main_context_iteration(context_, block);
+    if (state_->should_quit)
+      break;
+
+    more_work_is_plausible |= state_->delegate->DoWork();
+    if (state_->should_quit)
+      break;
+
+    more_work_is_plausible |=
+        state_->delegate->DoDelayedWork(&delayed_work_time_);
+    if (state_->should_quit)
+      break;
+
+    if (more_work_is_plausible)
+      continue;
+
+    more_work_is_plausible = state_->delegate->DoIdleWork();
+    if (state_->should_quit)
+      break;
+  }
+
+  state_ = previous_state;
+}
+
+void MessagePumpGlib::Quit() {
+  if (state_) {
+    state_->should_quit = true;
+  } else {
+    NOTREACHED() << "Quit called outside Run!";
+  }
+}
+
+void MessagePumpGlib::ScheduleWork() {
+  // This can be called on any thread, so we don't want to touch any state
+  // variables as we would then need locks all over.  This ensures that if
+  // we are sleeping in a poll that we will wake up.
+  char msg = '!';
+  if (HANDLE_EINTR(write(wakeup_pipe_write_, &msg, 1)) != 1) {
+    NOTREACHED() << "Could not write to the UI message loop wakeup pipe!";
+  }
+}
+
+void MessagePumpGlib::ScheduleDelayedWork(const TimeTicks& delayed_work_time) {
+  // We need to wake up the loop in case the poll timeout needs to be
+  // adjusted.  This will cause us to try to do work, but that's OK.
+  delayed_work_time_ = delayed_work_time;
+  ScheduleWork();
+}
+
+bool MessagePumpGlib::ShouldQuit() const {
+  CHECK(state_);
+  return state_->should_quit;
+}
+
+}  // namespace base
diff --git a/base/message_loop/message_pump_glib.h b/base/message_loop/message_pump_glib.h
new file mode 100644
index 0000000..d79dba5
--- /dev/null
+++ b/base/message_loop/message_pump_glib.h
@@ -0,0 +1,80 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_MESSAGE_LOOP_MESSAGE_PUMP_GLIB_H_
+#define BASE_MESSAGE_LOOP_MESSAGE_PUMP_GLIB_H_
+
+#include <memory>
+
+#include "base/base_export.h"
+#include "base/macros.h"
+#include "base/message_loop/message_pump.h"
+#include "base/observer_list.h"
+#include "base/time/time.h"
+
+typedef struct _GMainContext GMainContext;
+typedef struct _GPollFD GPollFD;
+typedef struct _GSource GSource;
+
+namespace base {
+
+// This class implements a base MessagePump needed for TYPE_UI MessageLoops on
+// platforms using GLib.
+class BASE_EXPORT MessagePumpGlib : public MessagePump {
+ public:
+  MessagePumpGlib();
+  ~MessagePumpGlib() override;
+
+  // Internal methods used for processing the pump callbacks.  They are
+  // public for simplicity but should not be used directly.  HandlePrepare
+  // is called during the prepare step of glib, and returns a timeout that
+  // will be passed to the poll. HandleCheck is called after the poll
+  // has completed, and returns whether or not HandleDispatch should be called.
+  // HandleDispatch is called if HandleCheck returned true.
+  int HandlePrepare();
+  bool HandleCheck();
+  void HandleDispatch();
+
+  // Overridden from MessagePump:
+  void Run(Delegate* delegate) override;
+  void Quit() override;
+  void ScheduleWork() override;
+  void ScheduleDelayedWork(const TimeTicks& delayed_work_time) override;
+
+ private:
+  bool ShouldQuit() const;
+
+  // We may make recursive calls to Run, so we save state that needs to be
+  // separate between them in this structure type.
+  struct RunState;
+
+  RunState* state_;
+
+  // This is a GLib structure that we can add event sources to.  We use the
+  // default GLib context, which is the one to which all GTK events are
+  // dispatched.
+  GMainContext* context_;
+
+  // This is the time when we need to do delayed work.
+  TimeTicks delayed_work_time_;
+
+  // The work source.  It is shared by all calls to Run and destroyed when
+  // the message pump is destroyed.
+  GSource* work_source_;
+
+  // We use a wakeup pipe to make sure we'll get out of the glib polling phase
+  // when another thread has scheduled us to do some work.  There is a glib
+  // mechanism g_main_context_wakeup, but this won't guarantee that our event's
+  // Dispatch() will be called.
+  int wakeup_pipe_read_;
+  int wakeup_pipe_write_;
+  // Use a unique_ptr to avoid needing the definition of GPollFD in the header.
+  std::unique_ptr<GPollFD> wakeup_gpollfd_;
+
+  DISALLOW_COPY_AND_ASSIGN(MessagePumpGlib);
+};
+
+}  // namespace base
+
+#endif  // BASE_MESSAGE_LOOP_MESSAGE_PUMP_GLIB_H_
diff --git a/base/message_loop/message_pump_glib_unittest.cc b/base/message_loop/message_pump_glib_unittest.cc
new file mode 100644
index 0000000..70be2a4
--- /dev/null
+++ b/base/message_loop/message_pump_glib_unittest.cc
@@ -0,0 +1,523 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/message_loop/message_pump_glib.h"
+
+#include <glib.h>
+#include <math.h>
+
+#include <algorithm>
+#include <vector>
+
+#include "base/bind.h"
+#include "base/bind_helpers.h"
+#include "base/callback.h"
+#include "base/macros.h"
+#include "base/memory/ref_counted.h"
+#include "base/message_loop/message_loop.h"
+#include "base/message_loop/message_loop_current.h"
+#include "base/run_loop.h"
+#include "base/single_thread_task_runner.h"
+#include "base/threading/thread.h"
+#include "base/threading/thread_task_runner_handle.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+namespace {
+
+// This class injects dummy "events" into the GLib loop. When "handled" these
+// events can run tasks. This is intended to mock gtk events (the corresponding
+// GLib source runs at the same priority).
+class EventInjector {
+ public:
+  EventInjector() : processed_events_(0) {
+    source_ = static_cast<Source*>(g_source_new(&SourceFuncs, sizeof(Source)));
+    source_->injector = this;
+    g_source_attach(source_, nullptr);
+    g_source_set_can_recurse(source_, TRUE);
+  }
+
+  ~EventInjector() {
+    g_source_destroy(source_);
+    g_source_unref(source_);
+  }
+
+  int HandlePrepare() {
+    // If the queue is empty, block.
+    if (events_.empty())
+      return -1;
+    TimeDelta delta = events_[0].time - Time::NowFromSystemTime();
+    return std::max(0, static_cast<int>(ceil(delta.InMillisecondsF())));
+  }
+
+  bool HandleCheck() {
+    if (events_.empty())
+      return false;
+    return events_[0].time <= Time::NowFromSystemTime();
+  }
+
+  void HandleDispatch() {
+    if (events_.empty())
+      return;
+    Event event = std::move(events_[0]);
+    events_.erase(events_.begin());
+    ++processed_events_;
+    if (!event.callback.is_null())
+      std::move(event.callback).Run();
+    else if (!event.task.is_null())
+      std::move(event.task).Run();
+  }
+
+  // Adds an event to the queue. When "handled", executes |callback|.
+  // delay_ms is relative to the last event if any, or to Now() otherwise.
+  void AddEvent(int delay_ms, OnceClosure callback) {
+    AddEventHelper(delay_ms, std::move(callback), OnceClosure());
+  }
+
+  void AddDummyEvent(int delay_ms) {
+    AddEventHelper(delay_ms, OnceClosure(), OnceClosure());
+  }
+
+  void AddEventAsTask(int delay_ms, OnceClosure task) {
+    AddEventHelper(delay_ms, OnceClosure(), std::move(task));
+  }
+
+  void Reset() {
+    processed_events_ = 0;
+    events_.clear();
+  }
+
+  int processed_events() const { return processed_events_; }
+
+ private:
+  struct Event {
+    Time time;
+    OnceClosure callback;
+    OnceClosure task;
+  };
+
+  struct Source : public GSource {
+    EventInjector* injector;
+  };
+
+  void AddEventHelper(int delay_ms, OnceClosure callback, OnceClosure task) {
+    Time last_time;
+    if (!events_.empty())
+      last_time = (events_.end()-1)->time;
+    else
+      last_time = Time::NowFromSystemTime();
+
+    Time future = last_time + TimeDelta::FromMilliseconds(delay_ms);
+    EventInjector::Event event = {future, std::move(callback), std::move(task)};
+    events_.push_back(std::move(event));
+  }
+
+  static gboolean Prepare(GSource* source, gint* timeout_ms) {
+    *timeout_ms = static_cast<Source*>(source)->injector->HandlePrepare();
+    return FALSE;
+  }
+
+  static gboolean Check(GSource* source) {
+    return static_cast<Source*>(source)->injector->HandleCheck();
+  }
+
+  static gboolean Dispatch(GSource* source,
+                           GSourceFunc unused_func,
+                           gpointer unused_data) {
+    static_cast<Source*>(source)->injector->HandleDispatch();
+    return TRUE;
+  }
+
+  Source* source_;
+  std::vector<Event> events_;
+  int processed_events_;
+  static GSourceFuncs SourceFuncs;
+  DISALLOW_COPY_AND_ASSIGN(EventInjector);
+};
+
+GSourceFuncs EventInjector::SourceFuncs = {EventInjector::Prepare,
+                                           EventInjector::Check,
+                                           EventInjector::Dispatch, nullptr};
+
+void IncrementInt(int *value) {
+  ++*value;
+}
+
+// Checks how many events have been processed by the injector.
+void ExpectProcessedEvents(EventInjector* injector, int count) {
+  EXPECT_EQ(injector->processed_events(), count);
+}
+
+// Posts a task on the current message loop.
+void PostMessageLoopTask(const Location& from_here, OnceClosure task) {
+  ThreadTaskRunnerHandle::Get()->PostTask(from_here, std::move(task));
+}
+
+// Test fixture.
+class MessagePumpGLibTest : public testing::Test {
+ public:
+  MessagePumpGLibTest() : loop_(nullptr), injector_(nullptr) {}
+
+  // Overridden from testing::Test:
+  void SetUp() override {
+    loop_ = new MessageLoop(MessageLoop::TYPE_UI);
+    injector_ = new EventInjector();
+  }
+  void TearDown() override {
+    delete injector_;
+    injector_ = nullptr;
+    delete loop_;
+    loop_ = nullptr;
+  }
+
+  MessageLoop* loop() const { return loop_; }
+  EventInjector* injector() const { return injector_; }
+
+ private:
+  MessageLoop* loop_;
+  EventInjector* injector_;
+  DISALLOW_COPY_AND_ASSIGN(MessagePumpGLibTest);
+};
+
+}  // namespace
+
+TEST_F(MessagePumpGLibTest, TestQuit) {
+  // Checks that Quit works and that the basic infrastructure is working.
+
+  // Quit from a task
+  RunLoop().RunUntilIdle();
+  EXPECT_EQ(0, injector()->processed_events());
+
+  injector()->Reset();
+  // Quit from an event
+  injector()->AddEvent(0, RunLoop::QuitCurrentWhenIdleClosureDeprecated());
+  RunLoop().Run();
+  EXPECT_EQ(1, injector()->processed_events());
+}
+
+TEST_F(MessagePumpGLibTest, TestEventTaskInterleave) {
+  // Checks that tasks posted by events are executed before the next event if
+  // the posted task queue is empty.
+  // MessageLoop doesn't make strong guarantees that it is the case, but the
+  // current implementation ensures it and the tests below rely on it.
+  // If changes cause this test to fail, it is reasonable to change it, but
+  // TestWorkWhileWaitingForEvents and TestEventsWhileWaitingForWork have to be
+  // changed accordingly, otherwise they can become flaky.
+  injector()->AddEventAsTask(0, DoNothing());
+  OnceClosure check_task =
+      BindOnce(&ExpectProcessedEvents, Unretained(injector()), 2);
+  OnceClosure posted_task =
+      BindOnce(&PostMessageLoopTask, FROM_HERE, std::move(check_task));
+  injector()->AddEventAsTask(0, std::move(posted_task));
+  injector()->AddEventAsTask(0, DoNothing());
+  injector()->AddEvent(0, RunLoop::QuitCurrentWhenIdleClosureDeprecated());
+  RunLoop().Run();
+  EXPECT_EQ(4, injector()->processed_events());
+
+  injector()->Reset();
+  injector()->AddEventAsTask(0, DoNothing());
+  check_task = BindOnce(&ExpectProcessedEvents, Unretained(injector()), 2);
+  posted_task =
+      BindOnce(&PostMessageLoopTask, FROM_HERE, std::move(check_task));
+  injector()->AddEventAsTask(0, std::move(posted_task));
+  injector()->AddEventAsTask(10, DoNothing());
+  injector()->AddEvent(0, RunLoop::QuitCurrentWhenIdleClosureDeprecated());
+  RunLoop().Run();
+  EXPECT_EQ(4, injector()->processed_events());
+}
+
+TEST_F(MessagePumpGLibTest, TestWorkWhileWaitingForEvents) {
+  int task_count = 0;
+  // Tests that we process tasks while waiting for new events.
+  // The event queue is empty at first.
+  for (int i = 0; i < 10; ++i) {
+    loop()->task_runner()->PostTask(FROM_HERE,
+                                    BindOnce(&IncrementInt, &task_count));
+  }
+  // After all the previous tasks have executed, enqueue an event that will
+  // quit.
+  loop()->task_runner()->PostTask(
+      FROM_HERE, BindOnce(&EventInjector::AddEvent, Unretained(injector()), 0,
+                          RunLoop::QuitCurrentWhenIdleClosureDeprecated()));
+  RunLoop().Run();
+  ASSERT_EQ(10, task_count);
+  EXPECT_EQ(1, injector()->processed_events());
+
+  // Tests that we process delayed tasks while waiting for new events.
+  injector()->Reset();
+  task_count = 0;
+  for (int i = 0; i < 10; ++i) {
+    loop()->task_runner()->PostDelayedTask(FROM_HERE,
+                                           BindOnce(&IncrementInt, &task_count),
+                                           TimeDelta::FromMilliseconds(10 * i));
+  }
+  // After all the previous tasks have executed, enqueue an event that will
+  // quit.
+  // This relies on the fact that delayed tasks are executed in delay order.
+  // That is verified in message_loop_unittest.cc.
+  loop()->task_runner()->PostDelayedTask(
+      FROM_HERE,
+      BindOnce(&EventInjector::AddEvent, Unretained(injector()), 10,
+               RunLoop::QuitCurrentWhenIdleClosureDeprecated()),
+      TimeDelta::FromMilliseconds(150));
+  RunLoop().Run();
+  ASSERT_EQ(10, task_count);
+  EXPECT_EQ(1, injector()->processed_events());
+}
+
+TEST_F(MessagePumpGLibTest, TestEventsWhileWaitingForWork) {
+  // Tests that we process events while waiting for work.
+  // The event queue is empty at first.
+  for (int i = 0; i < 10; ++i) {
+    injector()->AddDummyEvent(0);
+  }
+  // After all the events have been processed, post a task that will check that
+  // the events have been processed (note: the task executes after the event
+  // that posted it has been handled, so we expect 11 at that point).
+  OnceClosure check_task =
+      BindOnce(&ExpectProcessedEvents, Unretained(injector()), 11);
+  OnceClosure posted_task =
+      BindOnce(&PostMessageLoopTask, FROM_HERE, std::move(check_task));
+  injector()->AddEventAsTask(10, std::move(posted_task));
+
+  // And then quit (relies on the condition tested by TestEventTaskInterleave).
+  injector()->AddEvent(10, RunLoop::QuitCurrentWhenIdleClosureDeprecated());
+  RunLoop().Run();
+
+  EXPECT_EQ(12, injector()->processed_events());
+}
+
+namespace {
+
+// This class is a helper for the concurrent events / posted tasks test below.
+// It will quit the main loop once enough tasks and events have been processed,
+// while making sure there is always work to do and events in the queue.
+class ConcurrentHelper : public RefCounted<ConcurrentHelper>  {
+ public:
+  explicit ConcurrentHelper(EventInjector* injector)
+      : injector_(injector),
+        event_count_(kStartingEventCount),
+        task_count_(kStartingTaskCount) {
+  }
+
+  void FromTask() {
+    if (task_count_ > 0) {
+      --task_count_;
+    }
+    if (task_count_ == 0 && event_count_ == 0) {
+      RunLoop::QuitCurrentWhenIdleDeprecated();
+    } else {
+      ThreadTaskRunnerHandle::Get()->PostTask(
+          FROM_HERE, BindOnce(&ConcurrentHelper::FromTask, this));
+    }
+  }
+
+  void FromEvent() {
+    if (event_count_ > 0) {
+      --event_count_;
+    }
+    if (task_count_ == 0 && event_count_ == 0) {
+      RunLoop::QuitCurrentWhenIdleDeprecated();
+    } else {
+      injector_->AddEventAsTask(0,
+                                BindOnce(&ConcurrentHelper::FromEvent, this));
+    }
+  }
+
+  int event_count() const { return event_count_; }
+  int task_count() const { return task_count_; }
+
+ private:
+  friend class RefCounted<ConcurrentHelper>;
+
+  ~ConcurrentHelper() {}
+
+  static const int kStartingEventCount = 20;
+  static const int kStartingTaskCount = 20;
+
+  EventInjector* injector_;
+  int event_count_;
+  int task_count_;
+};
+
+}  // namespace
+
+TEST_F(MessagePumpGLibTest, TestConcurrentEventPostedTask) {
+  // Tests that posted tasks don't starve events, nor the opposite.
+  // We use the helper class above. We keep both event and posted task queues
+  // full, the helper verifies that both tasks and events get processed.
+  // If that is not the case, either event_count_ or task_count_ will not get
+  // to 0, and MessageLoop::QuitWhenIdle() will never be called.
+  scoped_refptr<ConcurrentHelper> helper = new ConcurrentHelper(injector());
+
+  // Add 2 events to the queue to make sure it is always full (when we remove
+  // the event before processing it).
+  injector()->AddEventAsTask(0, BindOnce(&ConcurrentHelper::FromEvent, helper));
+  injector()->AddEventAsTask(0, BindOnce(&ConcurrentHelper::FromEvent, helper));
+
+  // Similarly post 2 tasks.
+  loop()->task_runner()->PostTask(
+      FROM_HERE, BindOnce(&ConcurrentHelper::FromTask, helper));
+  loop()->task_runner()->PostTask(
+      FROM_HERE, BindOnce(&ConcurrentHelper::FromTask, helper));
+
+  RunLoop().Run();
+  EXPECT_EQ(0, helper->event_count());
+  EXPECT_EQ(0, helper->task_count());
+}
+
+namespace {
+
+void AddEventsAndDrainGLib(EventInjector* injector) {
+  // Add a couple of dummy events
+  injector->AddDummyEvent(0);
+  injector->AddDummyEvent(0);
+  // Then add an event that will quit the main loop.
+  injector->AddEvent(0, RunLoop::QuitCurrentWhenIdleClosureDeprecated());
+
+  // Post a couple of dummy tasks
+  ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE, DoNothing());
+  ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE, DoNothing());
+
+  // Drain the events
+  while (g_main_context_pending(nullptr)) {
+    g_main_context_iteration(nullptr, FALSE);
+  }
+}
+
+}  // namespace
+
+TEST_F(MessagePumpGLibTest, TestDrainingGLib) {
+  // Tests that draining events using GLib works.
+  loop()->task_runner()->PostTask(
+      FROM_HERE, BindOnce(&AddEventsAndDrainGLib, Unretained(injector())));
+  RunLoop().Run();
+
+  EXPECT_EQ(3, injector()->processed_events());
+}
+
+namespace {
+
+// Helper class that lets us run the GLib message loop.
+class GLibLoopRunner : public RefCounted<GLibLoopRunner> {
+ public:
+  GLibLoopRunner() : quit_(false) { }
+
+  void RunGLib() {
+    while (!quit_) {
+      g_main_context_iteration(nullptr, TRUE);
+    }
+  }
+
+  void RunLoop() {
+    while (!quit_) {
+      g_main_context_iteration(nullptr, TRUE);
+    }
+  }
+
+  void Quit() {
+    quit_ = true;
+  }
+
+  void Reset() {
+    quit_ = false;
+  }
+
+ private:
+  friend class RefCounted<GLibLoopRunner>;
+
+  ~GLibLoopRunner() {}
+
+  bool quit_;
+};
+
+void TestGLibLoopInternal(EventInjector* injector) {
+  // Allow tasks to be processed from 'native' event loops.
+  MessageLoopCurrent::Get()->SetNestableTasksAllowed(true);
+  scoped_refptr<GLibLoopRunner> runner = new GLibLoopRunner();
+
+  int task_count = 0;
+  // Add a couple of dummy events
+  injector->AddDummyEvent(0);
+  injector->AddDummyEvent(0);
+  // Post a couple of dummy tasks
+  ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
+                                          BindOnce(&IncrementInt, &task_count));
+  ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
+                                          BindOnce(&IncrementInt, &task_count));
+  // Delayed events
+  injector->AddDummyEvent(10);
+  injector->AddDummyEvent(10);
+  // Delayed work
+  ThreadTaskRunnerHandle::Get()->PostDelayedTask(
+      FROM_HERE, BindOnce(&IncrementInt, &task_count),
+      TimeDelta::FromMilliseconds(30));
+  ThreadTaskRunnerHandle::Get()->PostDelayedTask(
+      FROM_HERE, BindOnce(&GLibLoopRunner::Quit, runner),
+      TimeDelta::FromMilliseconds(40));
+
+  // Run a nested, straight GLib message loop.
+  runner->RunGLib();
+
+  ASSERT_EQ(3, task_count);
+  EXPECT_EQ(4, injector->processed_events());
+  RunLoop::QuitCurrentWhenIdleDeprecated();
+}
+
+void TestGtkLoopInternal(EventInjector* injector) {
+  // Allow tasks to be processed from 'native' event loops.
+  MessageLoopCurrent::Get()->SetNestableTasksAllowed(true);
+  scoped_refptr<GLibLoopRunner> runner = new GLibLoopRunner();
+
+  int task_count = 0;
+  // Add a couple of dummy events
+  injector->AddDummyEvent(0);
+  injector->AddDummyEvent(0);
+  // Post a couple of dummy tasks
+  ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
+                                          BindOnce(&IncrementInt, &task_count));
+  ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
+                                          BindOnce(&IncrementInt, &task_count));
+  // Delayed events
+  injector->AddDummyEvent(10);
+  injector->AddDummyEvent(10);
+  // Delayed work
+  ThreadTaskRunnerHandle::Get()->PostDelayedTask(
+      FROM_HERE, BindOnce(&IncrementInt, &task_count),
+      TimeDelta::FromMilliseconds(30));
+  ThreadTaskRunnerHandle::Get()->PostDelayedTask(
+      FROM_HERE, BindOnce(&GLibLoopRunner::Quit, runner),
+      TimeDelta::FromMilliseconds(40));
+
+  // Run a nested, straight Gtk message loop.
+  runner->RunLoop();
+
+  ASSERT_EQ(3, task_count);
+  EXPECT_EQ(4, injector->processed_events());
+  RunLoop::QuitCurrentWhenIdleDeprecated();
+}
+
+}  // namespace
+
+TEST_F(MessagePumpGLibTest, TestGLibLoop) {
+  // Tests that events and posted tasks are correctly executed if the message
+  // loop is not run by MessageLoop::Run() but by a straight GLib loop.
+  // Note that in this case we don't make strong guarantees about niceness
+  // between events and posted tasks.
+  loop()->task_runner()->PostTask(
+      FROM_HERE, BindOnce(&TestGLibLoopInternal, Unretained(injector())));
+  RunLoop().Run();
+}
+
+TEST_F(MessagePumpGLibTest, TestGtkLoop) {
+  // Tests that events and posted tasks are correctly executed if the message
+  // loop is not run by MessageLoop::Run() but by a straight Gtk loop.
+  // Note that in this case we don't make strong guarantees about niceness
+  // between events and posted tasks.
+  loop()->task_runner()->PostTask(
+      FROM_HERE, BindOnce(&TestGtkLoopInternal, Unretained(injector())));
+  RunLoop().Run();
+}
+
+}  // namespace base
diff --git a/base/message_loop/message_pump_io_ios.cc b/base/message_loop/message_pump_io_ios.cc
new file mode 100644
index 0000000..9b43e8e
--- /dev/null
+++ b/base/message_loop/message_pump_io_ios.cc
@@ -0,0 +1,182 @@
+// Copyright 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/message_loop/message_pump_io_ios.h"
+
+namespace base {
+
+MessagePumpIOSForIO::FdWatchController::FdWatchController(
+    const Location& from_here)
+    : FdWatchControllerInterface(from_here) {}
+
+MessagePumpIOSForIO::FdWatchController::~FdWatchController() {
+  StopWatchingFileDescriptor();
+}
+
+bool MessagePumpIOSForIO::FdWatchController::StopWatchingFileDescriptor() {
+  if (fdref_ == NULL)
+    return true;
+
+  CFFileDescriptorDisableCallBacks(fdref_.get(), callback_types_);
+  if (pump_)
+    pump_->RemoveRunLoopSource(fd_source_);
+  fd_source_.reset();
+  fdref_.reset();
+  callback_types_ = 0;
+  pump_.reset();
+  watcher_ = NULL;
+  return true;
+}
+
+void MessagePumpIOSForIO::FdWatchController::Init(CFFileDescriptorRef fdref,
+                                                  CFOptionFlags callback_types,
+                                                  CFRunLoopSourceRef fd_source,
+                                                  bool is_persistent) {
+  DCHECK(fdref);
+  DCHECK(!fdref_.is_valid());
+
+  is_persistent_ = is_persistent;
+  fdref_.reset(fdref);
+  callback_types_ = callback_types;
+  fd_source_.reset(fd_source);
+}
+
+void MessagePumpIOSForIO::FdWatchController::OnFileCanReadWithoutBlocking(
+    int fd,
+    MessagePumpIOSForIO* pump) {
+  DCHECK(callback_types_ & kCFFileDescriptorReadCallBack);
+  watcher_->OnFileCanReadWithoutBlocking(fd);
+}
+
+void MessagePumpIOSForIO::FdWatchController::OnFileCanWriteWithoutBlocking(
+    int fd,
+    MessagePumpIOSForIO* pump) {
+  DCHECK(callback_types_ & kCFFileDescriptorWriteCallBack);
+  watcher_->OnFileCanWriteWithoutBlocking(fd);
+}
+
+MessagePumpIOSForIO::MessagePumpIOSForIO() : weak_factory_(this) {
+}
+
+MessagePumpIOSForIO::~MessagePumpIOSForIO() {
+}
+
+bool MessagePumpIOSForIO::WatchFileDescriptor(int fd,
+                                              bool persistent,
+                                              int mode,
+                                              FdWatchController* controller,
+                                              FdWatcher* delegate) {
+  DCHECK_GE(fd, 0);
+  DCHECK(controller);
+  DCHECK(delegate);
+  DCHECK(mode == WATCH_READ || mode == WATCH_WRITE || mode == WATCH_READ_WRITE);
+
+  // WatchFileDescriptor should be called on the pump thread. It is not
+  // threadsafe, and your watcher may never be registered.
+  DCHECK(watch_file_descriptor_caller_checker_.CalledOnValidThread());
+
+  CFFileDescriptorContext source_context = {0};
+  source_context.info = controller;
+
+  CFOptionFlags callback_types = 0;
+  if (mode & WATCH_READ) {
+    callback_types |= kCFFileDescriptorReadCallBack;
+  }
+  if (mode & WATCH_WRITE) {
+    callback_types |= kCFFileDescriptorWriteCallBack;
+  }
+
+  CFFileDescriptorRef fdref = controller->fdref_.get();
+  if (fdref == NULL) {
+    base::ScopedCFTypeRef<CFFileDescriptorRef> scoped_fdref(
+        CFFileDescriptorCreate(
+            kCFAllocatorDefault, fd, false, HandleFdIOEvent, &source_context));
+    if (scoped_fdref == NULL) {
+      NOTREACHED() << "CFFileDescriptorCreate failed";
+      return false;
+    }
+
+    CFFileDescriptorEnableCallBacks(scoped_fdref, callback_types);
+
+    // TODO(wtc): what should the 'order' argument be?
+    base::ScopedCFTypeRef<CFRunLoopSourceRef> scoped_fd_source(
+        CFFileDescriptorCreateRunLoopSource(
+            kCFAllocatorDefault, scoped_fdref, 0));
+    if (scoped_fd_source == NULL) {
+      NOTREACHED() << "CFFileDescriptorCreateRunLoopSource failed";
+      return false;
+    }
+    CFRunLoopAddSource(run_loop(), scoped_fd_source, kCFRunLoopCommonModes);
+
+    // Transfer ownership of scoped_fdref and fd_source to controller.
+    controller->Init(scoped_fdref.release(), callback_types,
+                     scoped_fd_source.release(), persistent);
+  } else {
+    // It's illegal to use this function to listen on 2 separate fds with the
+    // same |controller|.
+    if (CFFileDescriptorGetNativeDescriptor(fdref) != fd) {
+      NOTREACHED() << "FDs don't match: "
+                   << CFFileDescriptorGetNativeDescriptor(fdref)
+                   << " != " << fd;
+      return false;
+    }
+    if (persistent != controller->is_persistent_) {
+      NOTREACHED() << "persistent doesn't match";
+      return false;
+    }
+
+    // Combine old/new event masks.
+    CFFileDescriptorDisableCallBacks(fdref, controller->callback_types_);
+    controller->callback_types_ |= callback_types;
+    CFFileDescriptorEnableCallBacks(fdref, controller->callback_types_);
+  }
+
+  controller->set_watcher(delegate);
+  controller->set_pump(weak_factory_.GetWeakPtr());
+
+  return true;
+}
+
+void MessagePumpIOSForIO::RemoveRunLoopSource(CFRunLoopSourceRef source) {
+  CFRunLoopRemoveSource(run_loop(), source, kCFRunLoopCommonModes);
+}
+
+// static
+void MessagePumpIOSForIO::HandleFdIOEvent(CFFileDescriptorRef fdref,
+                                          CFOptionFlags callback_types,
+                                          void* context) {
+  FdWatchController* controller = static_cast<FdWatchController*>(context);
+  DCHECK_EQ(fdref, controller->fdref_.get());
+
+  // Ensure that |fdref| will remain live for the duration of this function
+  // call even if |controller| is deleted or |StopWatchingFileDescriptor()| is
+  // called, either of which will cause |fdref| to be released.
+  ScopedCFTypeRef<CFFileDescriptorRef> scoped_fdref(
+      fdref, base::scoped_policy::RETAIN);
+
+  int fd = CFFileDescriptorGetNativeDescriptor(fdref);
+  MessagePumpIOSForIO* pump = controller->pump().get();
+  DCHECK(pump);
+  if (callback_types & kCFFileDescriptorWriteCallBack)
+    controller->OnFileCanWriteWithoutBlocking(fd, pump);
+
+  // Perform the read callback only if the file descriptor has not been
+  // invalidated in the write callback. As |FdWatchController| invalidates
+  // its file descriptor on destruction, the file descriptor being valid also
+  // guarantees that |controller| has not been deleted.
+  if (callback_types & kCFFileDescriptorReadCallBack &&
+      CFFileDescriptorIsValid(fdref)) {
+    DCHECK_EQ(fdref, controller->fdref_.get());
+    controller->OnFileCanReadWithoutBlocking(fd, pump);
+  }
+
+  // Re-enable callbacks after the read/write if the file descriptor is still
+  // valid and the controller is persistent.
+  if (CFFileDescriptorIsValid(fdref) && controller->is_persistent_) {
+    DCHECK_EQ(fdref, controller->fdref_.get());
+    CFFileDescriptorEnableCallBacks(fdref, callback_types);
+  }
+}
+
+}  // namespace base
diff --git a/base/message_loop/message_pump_io_ios.h b/base/message_loop/message_pump_io_ios.h
new file mode 100644
index 0000000..b390544
--- /dev/null
+++ b/base/message_loop/message_pump_io_ios.h
@@ -0,0 +1,91 @@
+// Copyright 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_MESSAGE_LOOP_MESSAGE_PUMP_IO_IOS_H_
+#define BASE_MESSAGE_LOOP_MESSAGE_PUMP_IO_IOS_H_
+
+#include "base/base_export.h"
+#include "base/mac/scoped_cffiledescriptorref.h"
+#include "base/mac/scoped_cftyperef.h"
+#include "base/macros.h"
+#include "base/memory/ref_counted.h"
+#include "base/memory/weak_ptr.h"
+#include "base/message_loop/message_pump_mac.h"
+#include "base/message_loop/watchable_io_message_pump_posix.h"
+#include "base/threading/thread_checker.h"
+
+namespace base {
+
+// This file introduces a class to monitor sockets and issue callbacks when
+// sockets are ready for I/O on iOS.
+class BASE_EXPORT MessagePumpIOSForIO : public MessagePumpNSRunLoop,
+                                        public WatchableIOMessagePumpPosix {
+ public:
+  class FdWatchController : public FdWatchControllerInterface {
+   public:
+    explicit FdWatchController(const Location& from_here);
+
+    // Implicitly calls StopWatchingFileDescriptor.
+    ~FdWatchController() override;
+
+    // FdWatchControllerInterface:
+    bool StopWatchingFileDescriptor() override;
+
+   private:
+    friend class MessagePumpIOSForIO;
+    friend class MessagePumpIOSForIOTest;
+
+    // Called by MessagePumpIOSForIO, ownership of |fdref| and |fd_source|
+    // is transferred to this object.
+    void Init(CFFileDescriptorRef fdref,
+              CFOptionFlags callback_types,
+              CFRunLoopSourceRef fd_source,
+              bool is_persistent);
+
+    void set_pump(base::WeakPtr<MessagePumpIOSForIO> pump) { pump_ = pump; }
+    const base::WeakPtr<MessagePumpIOSForIO>& pump() const { return pump_; }
+
+    void set_watcher(FdWatcher* watcher) { watcher_ = watcher; }
+
+    void OnFileCanReadWithoutBlocking(int fd, MessagePumpIOSForIO* pump);
+    void OnFileCanWriteWithoutBlocking(int fd, MessagePumpIOSForIO* pump);
+
+    bool is_persistent_ = false;  // false if this event is one-shot.
+    base::mac::ScopedCFFileDescriptorRef fdref_;
+    CFOptionFlags callback_types_ = 0;
+    base::ScopedCFTypeRef<CFRunLoopSourceRef> fd_source_;
+    base::WeakPtr<MessagePumpIOSForIO> pump_;
+    FdWatcher* watcher_ = nullptr;
+
+    DISALLOW_COPY_AND_ASSIGN(FdWatchController);
+  };
+
+  MessagePumpIOSForIO();
+  ~MessagePumpIOSForIO() override;
+
+  bool WatchFileDescriptor(int fd,
+                           bool persistent,
+                           int mode,
+                           FdWatchController* controller,
+                           FdWatcher* delegate);
+
+  void RemoveRunLoopSource(CFRunLoopSourceRef source);
+
+ private:
+  friend class MessagePumpIOSForIOTest;
+
+  static void HandleFdIOEvent(CFFileDescriptorRef fdref,
+                              CFOptionFlags callback_types,
+                              void* context);
+
+  ThreadChecker watch_file_descriptor_caller_checker_;
+
+  base::WeakPtrFactory<MessagePumpIOSForIO> weak_factory_;
+
+  DISALLOW_COPY_AND_ASSIGN(MessagePumpIOSForIO);
+};
+
+}  // namespace base
+
+#endif  // BASE_MESSAGE_LOOP_MESSAGE_PUMP_IO_IOS_H_
diff --git a/base/message_loop/message_pump_io_ios_unittest.cc b/base/message_loop/message_pump_io_ios_unittest.cc
new file mode 100644
index 0000000..4d15d44
--- /dev/null
+++ b/base/message_loop/message_pump_io_ios_unittest.cc
@@ -0,0 +1,153 @@
+// Copyright 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/message_loop/message_pump_io_ios.h"
+
+#include <unistd.h>
+
+#include "base/macros.h"
+#include "base/posix/eintr_wrapper.h"
+#include "base/test/gtest_util.h"
+#include "base/threading/thread.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+
+class MessagePumpIOSForIOTest : public testing::Test {
+ protected:
+  MessagePumpIOSForIOTest() = default;
+  ~MessagePumpIOSForIOTest() override = default;
+
+  void SetUp() override {
+    int ret = pipe(pipefds_);
+    ASSERT_EQ(0, ret);
+    ret = pipe(alternate_pipefds_);
+    ASSERT_EQ(0, ret);
+  }
+
+  void TearDown() override {
+    if (IGNORE_EINTR(close(pipefds_[0])) < 0)
+      PLOG(ERROR) << "close";
+    if (IGNORE_EINTR(close(pipefds_[1])) < 0)
+      PLOG(ERROR) << "close";
+  }
+
+  void HandleFdIOEvent(MessagePumpForIO::FdWatchController* watcher) {
+    MessagePumpIOSForIO::HandleFdIOEvent(watcher->fdref_.get(),
+        kCFFileDescriptorReadCallBack | kCFFileDescriptorWriteCallBack,
+        watcher);
+  }
+
+  int pipefds_[2];
+  int alternate_pipefds_[2];
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(MessagePumpIOSForIOTest);
+};
+
+namespace {
+
+// Concrete implementation of MessagePumpIOSForIO::FdWatcher that does
+// nothing useful.
+class StupidWatcher : public MessagePumpIOSForIO::FdWatcher {
+ public:
+  ~StupidWatcher() override {}
+
+  // base:MessagePumpIOSForIO::FdWatcher interface
+  void OnFileCanReadWithoutBlocking(int fd) override {}
+  void OnFileCanWriteWithoutBlocking(int fd) override {}
+};
+
+class BaseWatcher : public MessagePumpIOSForIO::FdWatcher {
+ public:
+  BaseWatcher(MessagePumpIOSForIO::FdWatchController* controller)
+      : controller_(controller) {
+    DCHECK(controller_);
+  }
+  ~BaseWatcher() override {}
+
+  // MessagePumpIOSForIO::FdWatcher interface
+  void OnFileCanReadWithoutBlocking(int /* fd */) override { NOTREACHED(); }
+
+  void OnFileCanWriteWithoutBlocking(int /* fd */) override { NOTREACHED(); }
+
+ protected:
+  MessagePumpIOSForIO::FdWatchController* controller_;
+};
+
+class DeleteWatcher : public BaseWatcher {
+ public:
+  explicit DeleteWatcher(MessagePumpIOSForIO::FdWatchController* controller)
+      : BaseWatcher(controller) {}
+
+  ~DeleteWatcher() override { DCHECK(!controller_); }
+
+  void OnFileCanWriteWithoutBlocking(int /* fd */) override {
+    DCHECK(controller_);
+    delete controller_;
+    controller_ = NULL;
+  }
+};
+
+TEST_F(MessagePumpIOSForIOTest, DeleteWatcher) {
+  std::unique_ptr<MessagePumpIOSForIO> pump(new MessagePumpIOSForIO);
+  MessagePumpIOSForIO::FdWatchController* watcher =
+      new MessagePumpIOSForIO::FdWatchController(FROM_HERE);
+  DeleteWatcher delegate(watcher);
+  pump->WatchFileDescriptor(pipefds_[1],
+      false, MessagePumpIOSForIO::WATCH_READ_WRITE, watcher, &delegate);
+
+  // Spoof a callback.
+  HandleFdIOEvent(watcher);
+}
+
+class StopWatcher : public BaseWatcher {
+ public:
+  StopWatcher(MessagePumpIOSForIO::FdWatchController* controller,
+              MessagePumpIOSForIO* pump,
+              int fd_to_start_watching = -1)
+      : BaseWatcher(controller),
+        pump_(pump),
+        fd_to_start_watching_(fd_to_start_watching) {}
+
+  ~StopWatcher() override {}
+
+  void OnFileCanWriteWithoutBlocking(int /* fd */) override {
+    controller_->StopWatchingFileDescriptor();
+    if (fd_to_start_watching_ >= 0) {
+      pump_->WatchFileDescriptor(fd_to_start_watching_,
+          false, MessagePumpIOSForIO::WATCH_READ_WRITE, controller_, this);
+    }
+  }
+
+ private:
+  MessagePumpIOSForIO* pump_;
+  int fd_to_start_watching_;
+};
+
+TEST_F(MessagePumpIOSForIOTest, StopWatcher) {
+  std::unique_ptr<MessagePumpIOSForIO> pump(new MessagePumpIOSForIO);
+  MessagePumpIOSForIO::FdWatchController watcher(FROM_HERE);
+  StopWatcher delegate(&watcher, pump.get());
+  pump->WatchFileDescriptor(pipefds_[1],
+      false, MessagePumpIOSForIO::WATCH_READ_WRITE, &watcher, &delegate);
+
+  // Spoof a callback.
+  HandleFdIOEvent(&watcher);
+}
+
+TEST_F(MessagePumpIOSForIOTest, StopWatcherAndWatchSomethingElse) {
+  std::unique_ptr<MessagePumpIOSForIO> pump(new MessagePumpIOSForIO);
+  MessagePumpIOSForIO::FdWatchController watcher(FROM_HERE);
+  StopWatcher delegate(&watcher, pump.get(), alternate_pipefds_[1]);
+  pump->WatchFileDescriptor(pipefds_[1],
+      false, MessagePumpIOSForIO::WATCH_READ_WRITE, &watcher, &delegate);
+
+  // Spoof a callback.
+  HandleFdIOEvent(&watcher);
+}
+
+}  // namespace
+
+}  // namespace base
diff --git a/base/message_loop/message_pump_libevent.cc b/base/message_loop/message_pump_libevent.cc
new file mode 100644
index 0000000..2a595e5
--- /dev/null
+++ b/base/message_loop/message_pump_libevent.cc
@@ -0,0 +1,350 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/message_loop/message_pump_libevent.h"
+
+#include <errno.h>
+#include <unistd.h>
+
+#include <utility>
+
+#include "base/auto_reset.h"
+#include "base/compiler_specific.h"
+#include "base/files/file_util.h"
+#include "base/logging.h"
+#include "base/posix/eintr_wrapper.h"
+#include "base/third_party/libevent/event.h"
+#include "base/time/time.h"
+#include "base/trace_event/trace_event.h"
+#include "build/build_config.h"
+
+#if defined(OS_MACOSX)
+#include "base/mac/scoped_nsautorelease_pool.h"
+#endif
+
+// Lifecycle of struct event
+// Libevent uses two main data structures:
+// struct event_base (of which there is one per message pump), and
+// struct event (of which there is roughly one per socket).
+// The socket's struct event is created in
+// MessagePumpLibevent::WatchFileDescriptor(),
+// is owned by the FdWatchController, and is destroyed in
+// StopWatchingFileDescriptor().
+// It is moved into and out of lists in struct event_base by
+// the libevent functions event_add() and event_del().
+//
+// TODO(dkegel):
+// At the moment bad things happen if a FdWatchController
+// is active after its MessagePumpLibevent has been destroyed.
+// See MessageLoopTest.FdWatchControllerOutlivesMessageLoop
+// Not clear yet whether that situation occurs in practice,
+// but if it does, we need to fix it.
+
+namespace base {
+
+MessagePumpLibevent::FdWatchController::FdWatchController(
+    const Location& from_here)
+    : FdWatchControllerInterface(from_here) {}
+
+MessagePumpLibevent::FdWatchController::~FdWatchController() {
+  if (event_) {
+    StopWatchingFileDescriptor();
+  }
+  if (was_destroyed_) {
+    DCHECK(!*was_destroyed_);
+    *was_destroyed_ = true;
+  }
+}
+
+bool MessagePumpLibevent::FdWatchController::StopWatchingFileDescriptor() {
+  std::unique_ptr<event> e = ReleaseEvent();
+  if (!e)
+    return true;
+
+  // event_del() is a no-op if the event isn't active.
+  int rv = event_del(e.get());
+  pump_ = nullptr;
+  watcher_ = nullptr;
+  return (rv == 0);
+}
+
+void MessagePumpLibevent::FdWatchController::Init(std::unique_ptr<event> e) {
+  DCHECK(e);
+  DCHECK(!event_);
+
+  event_ = std::move(e);
+}
+
+std::unique_ptr<event> MessagePumpLibevent::FdWatchController::ReleaseEvent() {
+  return std::move(event_);
+}
+
+void MessagePumpLibevent::FdWatchController::OnFileCanReadWithoutBlocking(
+    int fd,
+    MessagePumpLibevent* pump) {
+  // Since OnFileCanWriteWithoutBlocking() gets called first, it can stop
+  // watching the file descriptor.
+  if (!watcher_)
+    return;
+  watcher_->OnFileCanReadWithoutBlocking(fd);
+}
+
+void MessagePumpLibevent::FdWatchController::OnFileCanWriteWithoutBlocking(
+    int fd,
+    MessagePumpLibevent* pump) {
+  DCHECK(watcher_);
+  watcher_->OnFileCanWriteWithoutBlocking(fd);
+}
+
+MessagePumpLibevent::MessagePumpLibevent()
+    : keep_running_(true),
+      in_run_(false),
+      processed_io_events_(false),
+      event_base_(event_base_new()),
+      wakeup_pipe_in_(-1),
+      wakeup_pipe_out_(-1) {
+  if (!Init())
+    NOTREACHED();
+}
+
+MessagePumpLibevent::~MessagePumpLibevent() {
+  DCHECK(wakeup_event_);
+  DCHECK(event_base_);
+  event_del(wakeup_event_);
+  delete wakeup_event_;
+  if (wakeup_pipe_in_ >= 0) {
+    if (IGNORE_EINTR(close(wakeup_pipe_in_)) < 0)
+      DPLOG(ERROR) << "close";
+  }
+  if (wakeup_pipe_out_ >= 0) {
+    if (IGNORE_EINTR(close(wakeup_pipe_out_)) < 0)
+      DPLOG(ERROR) << "close";
+  }
+  event_base_free(event_base_);
+}
+
+bool MessagePumpLibevent::WatchFileDescriptor(int fd,
+                                              bool persistent,
+                                              int mode,
+                                              FdWatchController* controller,
+                                              FdWatcher* delegate) {
+  DCHECK_GE(fd, 0);
+  DCHECK(controller);
+  DCHECK(delegate);
+  DCHECK(mode == WATCH_READ || mode == WATCH_WRITE || mode == WATCH_READ_WRITE);
+  // WatchFileDescriptor should be called on the pump thread. It is not
+  // threadsafe, and your watcher may never be registered.
+  DCHECK(watch_file_descriptor_caller_checker_.CalledOnValidThread());
+
+  int event_mask = persistent ? EV_PERSIST : 0;
+  if (mode & WATCH_READ) {
+    event_mask |= EV_READ;
+  }
+  if (mode & WATCH_WRITE) {
+    event_mask |= EV_WRITE;
+  }
+
+  std::unique_ptr<event> evt(controller->ReleaseEvent());
+  if (!evt) {
+    // Ownership is transferred to the controller.
+    evt.reset(new event);
+  } else {
+    // Make sure we don't pick up any funky internal libevent masks.
+    int old_interest_mask = evt->ev_events & (EV_READ | EV_WRITE | EV_PERSIST);
+
+    // Combine old/new event masks.
+    event_mask |= old_interest_mask;
+
+    // Must disarm the event before we can reuse it.
+    event_del(evt.get());
+
+    // It's illegal to use this function to listen on 2 separate fds with the
+    // same |controller|.
+    if (EVENT_FD(evt.get()) != fd) {
+      NOTREACHED() << "FDs don't match" << EVENT_FD(evt.get()) << "!=" << fd;
+      return false;
+    }
+  }
+
+  // Set current interest mask and message pump for this event.
+  event_set(evt.get(), fd, event_mask, OnLibeventNotification, controller);
+
+  // Tell libevent which message pump this socket will belong to when we add it.
+  if (event_base_set(event_base_, evt.get())) {
+    DPLOG(ERROR) << "event_base_set(fd=" << EVENT_FD(evt.get()) << ")";
+    return false;
+  }
+
+  // Add this socket to the list of monitored sockets.
+  if (event_add(evt.get(), nullptr)) {
+    DPLOG(ERROR) << "event_add failed(fd=" << EVENT_FD(evt.get()) << ")";
+    return false;
+  }
+
+  controller->Init(std::move(evt));
+  controller->set_watcher(delegate);
+  controller->set_pump(this);
+  return true;
+}
+
+// Tell libevent to break out of inner loop.
+static void timer_callback(int fd, short events, void* context) {
+  event_base_loopbreak((struct event_base*)context);
+}
+
+// Reentrant!
+void MessagePumpLibevent::Run(Delegate* delegate) {
+  AutoReset<bool> auto_reset_keep_running(&keep_running_, true);
+  AutoReset<bool> auto_reset_in_run(&in_run_, true);
+
+  // event_base_loopexit() + EVLOOP_ONCE is leaky, see http://crbug.com/25641.
+  // Instead, make our own timer and reuse it on each call to event_base_loop().
+  std::unique_ptr<event> timer_event(new event);
+
+  for (;;) {
+#if defined(OS_MACOSX)
+    mac::ScopedNSAutoreleasePool autorelease_pool;
+#endif
+
+    bool did_work = delegate->DoWork();
+    if (!keep_running_)
+      break;
+
+    event_base_loop(event_base_, EVLOOP_NONBLOCK);
+    did_work |= processed_io_events_;
+    processed_io_events_ = false;
+    if (!keep_running_)
+      break;
+
+    did_work |= delegate->DoDelayedWork(&delayed_work_time_);
+    if (!keep_running_)
+      break;
+
+    if (did_work)
+      continue;
+
+    did_work = delegate->DoIdleWork();
+    if (!keep_running_)
+      break;
+
+    if (did_work)
+      continue;
+
+    // EVLOOP_ONCE tells libevent to only block once,
+    // but to service all pending events when it wakes up.
+    if (delayed_work_time_.is_null()) {
+      event_base_loop(event_base_, EVLOOP_ONCE);
+    } else {
+      TimeDelta delay = delayed_work_time_ - TimeTicks::Now();
+      if (delay > TimeDelta()) {
+        struct timeval poll_tv;
+        poll_tv.tv_sec = delay.InSeconds();
+        poll_tv.tv_usec = delay.InMicroseconds() % Time::kMicrosecondsPerSecond;
+        event_set(timer_event.get(), -1, 0, timer_callback, event_base_);
+        event_base_set(event_base_, timer_event.get());
+        event_add(timer_event.get(), &poll_tv);
+        event_base_loop(event_base_, EVLOOP_ONCE);
+        event_del(timer_event.get());
+      } else {
+        // It looks like delayed_work_time_ indicates a time in the past, so we
+        // need to call DoDelayedWork now.
+        delayed_work_time_ = TimeTicks();
+      }
+    }
+
+    if (!keep_running_)
+      break;
+  }
+}
+
+void MessagePumpLibevent::Quit() {
+  DCHECK(in_run_) << "Quit was called outside of Run!";
+  // Tell both libevent and Run that they should break out of their loops.
+  keep_running_ = false;
+  ScheduleWork();
+}
+
+void MessagePumpLibevent::ScheduleWork() {
+  // Tell libevent (in a threadsafe way) that it should break out of its loop.
+  char buf = 0;
+  int nwrite = HANDLE_EINTR(write(wakeup_pipe_in_, &buf, 1));
+  DCHECK(nwrite == 1 || errno == EAGAIN)
+      << "[nwrite:" << nwrite << "] [errno:" << errno << "]";
+}
+
+void MessagePumpLibevent::ScheduleDelayedWork(
+    const TimeTicks& delayed_work_time) {
+  // We know that we can't be blocked on Wait right now since this method can
+  // only be called on the same thread as Run, so we only need to update our
+  // record of how long to sleep when we do sleep.
+  delayed_work_time_ = delayed_work_time;
+}
+
+bool MessagePumpLibevent::Init() {
+  int fds[2];
+  if (!CreateLocalNonBlockingPipe(fds)) {
+    DPLOG(ERROR) << "pipe creation failed";
+    return false;
+  }
+  wakeup_pipe_out_ = fds[0];
+  wakeup_pipe_in_ = fds[1];
+
+  wakeup_event_ = new event;
+  event_set(wakeup_event_, wakeup_pipe_out_, EV_READ | EV_PERSIST,
+            OnWakeup, this);
+  event_base_set(event_base_, wakeup_event_);
+
+  if (event_add(wakeup_event_, nullptr))
+    return false;
+  return true;
+}
+
+// static
+void MessagePumpLibevent::OnLibeventNotification(int fd,
+                                                 short flags,
+                                                 void* context) {
+  FdWatchController* controller = static_cast<FdWatchController*>(context);
+  DCHECK(controller);
+  TRACE_EVENT2("toplevel", "MessagePumpLibevent::OnLibeventNotification",
+               "src_file", controller->created_from_location().file_name(),
+               "src_func", controller->created_from_location().function_name());
+  TRACE_HEAP_PROFILER_API_SCOPED_TASK_EXECUTION heap_profiler_scope(
+      controller->created_from_location().file_name());
+
+  MessagePumpLibevent* pump = controller->pump();
+  pump->processed_io_events_ = true;
+
+  if ((flags & (EV_READ | EV_WRITE)) == (EV_READ | EV_WRITE)) {
+    // Both callbacks will be called. It is necessary to check that |controller|
+    // is not destroyed.
+    bool controller_was_destroyed = false;
+    controller->was_destroyed_ = &controller_was_destroyed;
+    controller->OnFileCanWriteWithoutBlocking(fd, pump);
+    if (!controller_was_destroyed)
+      controller->OnFileCanReadWithoutBlocking(fd, pump);
+    if (!controller_was_destroyed)
+      controller->was_destroyed_ = nullptr;
+  } else if (flags & EV_WRITE) {
+    controller->OnFileCanWriteWithoutBlocking(fd, pump);
+  } else if (flags & EV_READ) {
+    controller->OnFileCanReadWithoutBlocking(fd, pump);
+  }
+}
+
+// Called if a byte is received on the wakeup pipe.
+// static
+void MessagePumpLibevent::OnWakeup(int socket, short flags, void* context) {
+  MessagePumpLibevent* that = static_cast<MessagePumpLibevent*>(context);
+  DCHECK(that->wakeup_pipe_out_ == socket);
+
+  // Remove and discard the wakeup byte.
+  char buf;
+  int nread = HANDLE_EINTR(read(socket, &buf, 1));
+  DCHECK_EQ(nread, 1);
+  that->processed_io_events_ = true;
+  // Tell libevent to break out of inner loop.
+  event_base_loopbreak(that->event_base_);
+}
+
+}  // namespace base
diff --git a/base/message_loop/message_pump_libevent.h b/base/message_loop/message_pump_libevent.h
new file mode 100644
index 0000000..002c36c
--- /dev/null
+++ b/base/message_loop/message_pump_libevent.h
@@ -0,0 +1,123 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_MESSAGE_LOOP_MESSAGE_PUMP_LIBEVENT_H_
+#define BASE_MESSAGE_LOOP_MESSAGE_PUMP_LIBEVENT_H_
+
+#include <memory>
+
+#include "base/compiler_specific.h"
+#include "base/macros.h"
+#include "base/message_loop/message_pump.h"
+#include "base/message_loop/watchable_io_message_pump_posix.h"
+#include "base/threading/thread_checker.h"
+#include "base/time/time.h"
+
+// Declare structs we need from libevent.h rather than including it
+struct event_base;
+struct event;
+
+namespace base {
+
+// Class to monitor sockets and issue callbacks when sockets are ready for I/O
+// TODO(dkegel): add support for background file IO somehow
+class BASE_EXPORT MessagePumpLibevent : public MessagePump,
+                                        public WatchableIOMessagePumpPosix {
+ public:
+  class FdWatchController : public FdWatchControllerInterface {
+   public:
+    explicit FdWatchController(const Location& from_here);
+
+    // Implicitly calls StopWatchingFileDescriptor.
+    ~FdWatchController() override;
+
+    // FdWatchControllerInterface:
+    bool StopWatchingFileDescriptor() override;
+
+   private:
+    friend class MessagePumpLibevent;
+    friend class MessagePumpLibeventTest;
+
+    // Called by MessagePumpLibevent.
+    void Init(std::unique_ptr<event> e);
+
+    // Used by MessagePumpLibevent to take ownership of |event_|.
+    std::unique_ptr<event> ReleaseEvent();
+
+    void set_pump(MessagePumpLibevent* pump) { pump_ = pump; }
+    MessagePumpLibevent* pump() const { return pump_; }
+
+    void set_watcher(FdWatcher* watcher) { watcher_ = watcher; }
+
+    void OnFileCanReadWithoutBlocking(int fd, MessagePumpLibevent* pump);
+    void OnFileCanWriteWithoutBlocking(int fd, MessagePumpLibevent* pump);
+
+    std::unique_ptr<event> event_;
+    MessagePumpLibevent* pump_ = nullptr;
+    FdWatcher* watcher_ = nullptr;
+    // If this pointer is non-NULL, the pointee is set to true in the
+    // destructor.
+    bool* was_destroyed_ = nullptr;
+
+    DISALLOW_COPY_AND_ASSIGN(FdWatchController);
+  };
+
+  MessagePumpLibevent();
+  ~MessagePumpLibevent() override;
+
+  bool WatchFileDescriptor(int fd,
+                           bool persistent,
+                           int mode,
+                           FdWatchController* controller,
+                           FdWatcher* delegate);
+
+  // MessagePump methods:
+  void Run(Delegate* delegate) override;
+  void Quit() override;
+  void ScheduleWork() override;
+  void ScheduleDelayedWork(const TimeTicks& delayed_work_time) override;
+
+ private:
+  friend class MessagePumpLibeventTest;
+
+  // Risky part of constructor.  Returns true on success.
+  bool Init();
+
+  // Called by libevent to tell us a registered FD can be read/written to.
+  static void OnLibeventNotification(int fd, short flags, void* context);
+
+  // Unix pipe used to implement ScheduleWork()
+  // ... callback; called by libevent inside Run() when pipe is ready to read
+  static void OnWakeup(int socket, short flags, void* context);
+
+  // This flag is set to false when Run should return.
+  bool keep_running_;
+
+  // This flag is set when inside Run.
+  bool in_run_;
+
+  // This flag is set if libevent has processed I/O events.
+  bool processed_io_events_;
+
+  // The time at which we should call DoDelayedWork.
+  TimeTicks delayed_work_time_;
+
+  // Libevent dispatcher.  Watches all sockets registered with it, and sends
+  // readiness callbacks when a socket is ready for I/O.
+  event_base* event_base_;
+
+  // ... write end; ScheduleWork() writes a single byte to it
+  int wakeup_pipe_in_;
+  // ... read end; OnWakeup reads it and then breaks Run() out of its sleep
+  int wakeup_pipe_out_;
+  // ... libevent wrapper for read end
+  event* wakeup_event_;
+
+  ThreadChecker watch_file_descriptor_caller_checker_;
+  DISALLOW_COPY_AND_ASSIGN(MessagePumpLibevent);
+};
+
+}  // namespace base
+
+#endif  // BASE_MESSAGE_LOOP_MESSAGE_PUMP_LIBEVENT_H_
diff --git a/base/message_loop/message_pump_libevent_unittest.cc b/base/message_loop/message_pump_libevent_unittest.cc
new file mode 100644
index 0000000..55eb0b4
--- /dev/null
+++ b/base/message_loop/message_pump_libevent_unittest.cc
@@ -0,0 +1,263 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/message_loop/message_pump_libevent.h"
+
+#include <unistd.h>
+
+#include <memory>
+#include <utility>
+
+#include "base/bind.h"
+#include "base/bind_helpers.h"
+#include "base/files/file_util.h"
+#include "base/memory/ptr_util.h"
+#include "base/message_loop/message_loop.h"
+#include "base/posix/eintr_wrapper.h"
+#include "base/run_loop.h"
+#include "base/single_thread_task_runner.h"
+#include "base/synchronization/waitable_event.h"
+#include "base/synchronization/waitable_event_watcher.h"
+#include "base/test/gtest_util.h"
+#include "base/third_party/libevent/event.h"
+#include "base/threading/sequenced_task_runner_handle.h"
+#include "base/threading/thread.h"
+#include "base/threading/thread_task_runner_handle.h"
+#include "build/build_config.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+
+class MessagePumpLibeventTest : public testing::Test {
+ protected:
+  MessagePumpLibeventTest()
+      : ui_loop_(new MessageLoop(MessageLoop::TYPE_UI)),
+        io_thread_("MessagePumpLibeventTestIOThread") {}
+  ~MessagePumpLibeventTest() override = default;
+
+  void SetUp() override {
+    Thread::Options options(MessageLoop::TYPE_IO, 0);
+    ASSERT_TRUE(io_thread_.StartWithOptions(options));
+    ASSERT_EQ(MessageLoop::TYPE_IO, io_thread_.message_loop()->type());
+    int ret = pipe(pipefds_);
+    ASSERT_EQ(0, ret);
+  }
+
+  void TearDown() override {
+    if (IGNORE_EINTR(close(pipefds_[0])) < 0)
+      PLOG(ERROR) << "close";
+    if (IGNORE_EINTR(close(pipefds_[1])) < 0)
+      PLOG(ERROR) << "close";
+  }
+
+  void WaitUntilIoThreadStarted() {
+    ASSERT_TRUE(io_thread_.WaitUntilThreadStarted());
+  }
+
+  scoped_refptr<SingleThreadTaskRunner> io_runner() const {
+    return io_thread_.task_runner();
+  }
+
+  void OnLibeventNotification(
+      MessagePumpLibevent* pump,
+      MessagePumpLibevent::FdWatchController* controller) {
+    pump->OnLibeventNotification(0, EV_WRITE | EV_READ, controller);
+  }
+
+  int pipefds_[2];
+  std::unique_ptr<MessageLoop> ui_loop_;
+
+ private:
+  Thread io_thread_;
+};
+
+namespace {
+
+// Concrete implementation of MessagePumpLibevent::FdWatcher that does
+// nothing useful.
+class StupidWatcher : public MessagePumpLibevent::FdWatcher {
+ public:
+  ~StupidWatcher() override = default;
+
+  // base:MessagePumpLibevent::FdWatcher interface
+  void OnFileCanReadWithoutBlocking(int fd) override {}
+  void OnFileCanWriteWithoutBlocking(int fd) override {}
+};
+
+TEST_F(MessagePumpLibeventTest, QuitOutsideOfRun) {
+  std::unique_ptr<MessagePumpLibevent> pump(new MessagePumpLibevent);
+  ASSERT_DCHECK_DEATH(pump->Quit());
+}
+
+class BaseWatcher : public MessagePumpLibevent::FdWatcher {
+ public:
+  explicit BaseWatcher(MessagePumpLibevent::FdWatchController* controller)
+      : controller_(controller) {
+    DCHECK(controller_);
+  }
+  ~BaseWatcher() override = default;
+
+  // base:MessagePumpLibevent::FdWatcher interface
+  void OnFileCanReadWithoutBlocking(int /* fd */) override { NOTREACHED(); }
+
+  void OnFileCanWriteWithoutBlocking(int /* fd */) override { NOTREACHED(); }
+
+ protected:
+  MessagePumpLibevent::FdWatchController* controller_;
+};
+
+class DeleteWatcher : public BaseWatcher {
+ public:
+  explicit DeleteWatcher(MessagePumpLibevent::FdWatchController* controller)
+      : BaseWatcher(controller) {}
+
+  ~DeleteWatcher() override { DCHECK(!controller_); }
+
+  void OnFileCanWriteWithoutBlocking(int /* fd */) override {
+    DCHECK(controller_);
+    delete controller_;
+    controller_ = nullptr;
+  }
+};
+
+TEST_F(MessagePumpLibeventTest, DeleteWatcher) {
+  std::unique_ptr<MessagePumpLibevent> pump(new MessagePumpLibevent);
+  MessagePumpLibevent::FdWatchController* watcher =
+      new MessagePumpLibevent::FdWatchController(FROM_HERE);
+  DeleteWatcher delegate(watcher);
+  pump->WatchFileDescriptor(pipefds_[1],
+      false, MessagePumpLibevent::WATCH_READ_WRITE, watcher, &delegate);
+
+  // Spoof a libevent notification.
+  OnLibeventNotification(pump.get(), watcher);
+}
+
+class StopWatcher : public BaseWatcher {
+ public:
+  explicit StopWatcher(MessagePumpLibevent::FdWatchController* controller)
+      : BaseWatcher(controller) {}
+
+  ~StopWatcher() override = default;
+
+  void OnFileCanWriteWithoutBlocking(int /* fd */) override {
+    controller_->StopWatchingFileDescriptor();
+  }
+};
+
+TEST_F(MessagePumpLibeventTest, StopWatcher) {
+  std::unique_ptr<MessagePumpLibevent> pump(new MessagePumpLibevent);
+  MessagePumpLibevent::FdWatchController watcher(FROM_HERE);
+  StopWatcher delegate(&watcher);
+  pump->WatchFileDescriptor(pipefds_[1],
+      false, MessagePumpLibevent::WATCH_READ_WRITE, &watcher, &delegate);
+
+  // Spoof a libevent notification.
+  OnLibeventNotification(pump.get(), &watcher);
+}
+
+void QuitMessageLoopAndStart(const Closure& quit_closure) {
+  quit_closure.Run();
+
+  RunLoop runloop(RunLoop::Type::kNestableTasksAllowed);
+  ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE, runloop.QuitClosure());
+  runloop.Run();
+}
+
+class NestedPumpWatcher : public MessagePumpLibevent::FdWatcher {
+ public:
+  NestedPumpWatcher() = default;
+  ~NestedPumpWatcher() override = default;
+
+  void OnFileCanReadWithoutBlocking(int /* fd */) override {
+    RunLoop runloop;
+    ThreadTaskRunnerHandle::Get()->PostTask(
+        FROM_HERE, BindOnce(&QuitMessageLoopAndStart, runloop.QuitClosure()));
+    runloop.Run();
+  }
+
+  void OnFileCanWriteWithoutBlocking(int /* fd */) override {}
+};
+
+TEST_F(MessagePumpLibeventTest, NestedPumpWatcher) {
+  std::unique_ptr<MessagePumpLibevent> pump(new MessagePumpLibevent);
+  MessagePumpLibevent::FdWatchController watcher(FROM_HERE);
+  NestedPumpWatcher delegate;
+  pump->WatchFileDescriptor(pipefds_[1],
+      false, MessagePumpLibevent::WATCH_READ, &watcher, &delegate);
+
+  // Spoof a libevent notification.
+  OnLibeventNotification(pump.get(), &watcher);
+}
+
+void FatalClosure() {
+  FAIL() << "Reached fatal closure.";
+}
+
+class QuitWatcher : public BaseWatcher {
+ public:
+  QuitWatcher(MessagePumpLibevent::FdWatchController* controller,
+              base::Closure quit_closure)
+      : BaseWatcher(controller), quit_closure_(std::move(quit_closure)) {}
+
+  void OnFileCanReadWithoutBlocking(int /* fd */) override {
+    // Post a fatal closure to the MessageLoop before we quit it.
+    ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE, BindOnce(&FatalClosure));
+
+    quit_closure_.Run();
+  }
+
+ private:
+  base::Closure quit_closure_;
+};
+
+void WriteFDWrapper(const int fd,
+                    const char* buf,
+                    int size,
+                    WaitableEvent* event) {
+  ASSERT_TRUE(WriteFileDescriptor(fd, buf, size));
+}
+
+// Tests that MessagePumpLibevent quits immediately when it is quit from
+// libevent's event_base_loop().
+TEST_F(MessagePumpLibeventTest, QuitWatcher) {
+  // Delete the old MessageLoop so that we can manage our own one here.
+  ui_loop_.reset();
+
+  MessagePumpLibevent* pump = new MessagePumpLibevent;  // owned by |loop|.
+  MessageLoop loop(WrapUnique(pump));
+  RunLoop run_loop;
+  MessagePumpLibevent::FdWatchController controller(FROM_HERE);
+  QuitWatcher delegate(&controller, run_loop.QuitClosure());
+  WaitableEvent event(WaitableEvent::ResetPolicy::AUTOMATIC,
+                      WaitableEvent::InitialState::NOT_SIGNALED);
+  std::unique_ptr<WaitableEventWatcher> watcher(new WaitableEventWatcher);
+
+  // Tell the pump to watch the pipe.
+  pump->WatchFileDescriptor(pipefds_[0], false, MessagePumpLibevent::WATCH_READ,
+                            &controller, &delegate);
+
+  // Make the IO thread wait for |event| before writing to pipefds[1].
+  const char buf = 0;
+  WaitableEventWatcher::EventCallback write_fd_task =
+      BindOnce(&WriteFDWrapper, pipefds_[1], &buf, 1);
+  io_runner()->PostTask(
+      FROM_HERE, BindOnce(IgnoreResult(&WaitableEventWatcher::StartWatching),
+                          Unretained(watcher.get()), &event,
+                          std::move(write_fd_task), io_runner()));
+
+  // Queue |event| to signal on |loop|.
+  loop.task_runner()->PostTask(
+      FROM_HERE, BindOnce(&WaitableEvent::Signal, Unretained(&event)));
+
+  // Now run the MessageLoop.
+  run_loop.Run();
+
+  // StartWatching can move |watcher| to IO thread. Release on IO thread.
+  io_runner()->PostTask(FROM_HERE, BindOnce(&WaitableEventWatcher::StopWatching,
+                                            Owned(watcher.release())));
+}
+
+}  // namespace
+
+}  // namespace base
diff --git a/base/message_loop/message_pump_mac.h b/base/message_loop/message_pump_mac.h
new file mode 100644
index 0000000..fa88c3a
--- /dev/null
+++ b/base/message_loop/message_pump_mac.h
@@ -0,0 +1,404 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// The basis for all native run loops on the Mac is the CFRunLoop.  It can be
+// used directly, it can be used as the driving force behind the similar
+// Foundation NSRunLoop, and it can be used to implement higher-level event
+// loops such as the NSApplication event loop.
+//
+// This file introduces a basic CFRunLoop-based implementation of the
+// MessagePump interface called CFRunLoopBase.  CFRunLoopBase contains all
+// of the machinery necessary to dispatch events to a delegate, but does not
+// implement the specific run loop.  Concrete subclasses must provide their
+// own DoRun and Quit implementations.
+//
+// A concrete subclass that just runs a CFRunLoop loop is provided in
+// MessagePumpCFRunLoop.  For an NSRunLoop, the similar MessagePumpNSRunLoop
+// is provided.
+//
+// For the application's event loop, an implementation based on AppKit's
+// NSApplication event system is provided in MessagePumpNSApplication.
+//
+// Typically, MessagePumpNSApplication only makes sense on a Cocoa
+// application's main thread.  If a CFRunLoop-based message pump is needed on
+// any other thread, one of the other concrete subclasses is preferable.
+// MessagePumpMac::Create is defined, which returns a new NSApplication-based
+// or NSRunLoop-based MessagePump subclass depending on which thread it is
+// called on.
+
+#ifndef BASE_MESSAGE_LOOP_MESSAGE_PUMP_MAC_H_
+#define BASE_MESSAGE_LOOP_MESSAGE_PUMP_MAC_H_
+
+#include "base/message_loop/message_pump.h"
+
+
+#include <CoreFoundation/CoreFoundation.h>
+
+#include "base/macros.h"
+#include "base/memory/weak_ptr.h"
+#include "base/message_loop/timer_slack.h"
+#include "build/build_config.h"
+
+#if defined(__OBJC__)
+#if defined(OS_IOS)
+#import <Foundation/Foundation.h>
+#else
+#import <AppKit/AppKit.h>
+
+// Clients must subclass NSApplication and implement this protocol if they use
+// MessagePumpMac.
+@protocol CrAppProtocol
+// Must return true if -[NSApplication sendEvent:] is currently on the stack.
+// See the comment for |CreateAutoreleasePool()| in the cc file for why this is
+// necessary.
+- (BOOL)isHandlingSendEvent;
+@end
+#endif  // !defined(OS_IOS)
+#endif  // defined(__OBJC__)
+
+namespace base {
+
+class RunLoop;
+class TimeTicks;
+
+// AutoreleasePoolType is a proxy type for autorelease pools. Its definition
+// depends on the translation unit (TU) in which this header appears. In pure
+// C++ TUs, it is defined as a forward C++ class declaration (that is never
+// defined), because autorelease pools are an Objective-C concept. In Automatic
+// Reference Counting (ARC) Objective-C TUs, it is similarly defined as a
+// forward C++ class declaration, because clang will not allow the type
+// "NSAutoreleasePool" in such TUs. Finally, in Manual Retain Release (MRR)
+// Objective-C TUs, it is a type alias for NSAutoreleasePool. In all cases, a
+// method that takes or returns an NSAutoreleasePool* can use
+// AutoreleasePoolType* instead.
+#if !defined(__OBJC__) || __has_feature(objc_arc)
+class AutoreleasePoolType;
+#else   // !defined(__OBJC__) || __has_feature(objc_arc)
+typedef NSAutoreleasePool AutoreleasePoolType;
+#endif  // !defined(__OBJC__) || __has_feature(objc_arc)
+
+class BASE_EXPORT MessagePumpCFRunLoopBase : public MessagePump {
+ public:
+  // MessagePump:
+  void Run(Delegate* delegate) override;
+  void ScheduleWork() override;
+  void ScheduleDelayedWork(const TimeTicks& delayed_work_time) override;
+  void SetTimerSlack(TimerSlack timer_slack) override;
+
+ protected:
+  // Needs access to CreateAutoreleasePool.
+  friend class MessagePumpScopedAutoreleasePool;
+  friend class TestMessagePumpCFRunLoopBase;
+
+  // Tasks will be pumped in the run loop modes described by
+  // |initial_mode_mask|, which maps bits to the index of an internal array of
+  // run loop mode identifiers.
+  explicit MessagePumpCFRunLoopBase(int initial_mode_mask);
+  ~MessagePumpCFRunLoopBase() override;
+
+  // Subclasses should implement the work they need to do in MessagePump::Run
+  // in the DoRun method.  MessagePumpCFRunLoopBase::Run calls DoRun directly.
+  // This arrangement is used because MessagePumpCFRunLoopBase needs to set
+  // up and tear down things before and after the "meat" of DoRun.
+  virtual void DoRun(Delegate* delegate) = 0;
+
+  // Accessors for private data members to be used by subclasses.
+  CFRunLoopRef run_loop() const { return run_loop_; }
+  int nesting_level() const { return nesting_level_; }
+  int run_nesting_level() const { return run_nesting_level_; }
+
+  // Sets this pump's delegate.  Signals the appropriate sources if
+  // |delegateless_work_| is true.  |delegate| can be NULL.
+  void SetDelegate(Delegate* delegate);
+
+  // Return an autorelease pool to wrap around any work being performed.
+  // In some cases, CreateAutoreleasePool may return nil intentionally to
+  // preventing an autorelease pool from being created, allowing any
+  // objects autoreleased by work to fall into the current autorelease pool.
+  virtual AutoreleasePoolType* CreateAutoreleasePool();
+
+  // Enable and disable entries in |enabled_modes_| to match |mode_mask|.
+  void SetModeMask(int mode_mask);
+
+  // Get the current mode mask from |enabled_modes_|.
+  int GetModeMask() const;
+
+ private:
+  class ScopedModeEnabler;
+
+  // The maximum number of run loop modes that can be monitored.
+  static constexpr int kNumModes = 4;
+
+  // Marking timers as invalid at the right time helps significantly reduce
+  // power use (see the comment in RunDelayedWorkTimer()), however there is no
+  // public API for doing so. CFRuntime.h states that CFRuntimeBase, upon which
+  // the above timer invalidation functions are based, can change from release
+  // to release and should not be accessed directly (this struct last changed at
+  // least in 2008 in CF-476).
+  //
+  // This function uses private API to modify a test timer's valid state and
+  // uses public API to confirm that the private API changed the right bit.
+  static bool CanInvalidateCFRunLoopTimers();
+
+  // Sets a Core Foundation object's "invalid" bit to |valid|. Based on code
+  // from CFRunLoop.c.
+  static void ChromeCFRunLoopTimerSetValid(CFRunLoopTimerRef timer, bool valid);
+
+  // Timer callback scheduled by ScheduleDelayedWork.  This does not do any
+  // work, but it signals work_source_ so that delayed work can be performed
+  // within the appropriate priority constraints.
+  static void RunDelayedWorkTimer(CFRunLoopTimerRef timer, void* info);
+
+  // Perform highest-priority work.  This is associated with work_source_
+  // signalled by ScheduleWork or RunDelayedWorkTimer.  The static method calls
+  // the instance method; the instance method returns true if it resignalled
+  // work_source_ to be called again from the loop.
+  static void RunWorkSource(void* info);
+  bool RunWork();
+
+  // Perform idle-priority work.  This is normally called by PreWaitObserver,
+  // but is also associated with idle_work_source_.  When this function
+  // actually does perform idle work, it will resignal that source.  The
+  // static method calls the instance method; the instance method returns
+  // true if idle work was done.
+  static void RunIdleWorkSource(void* info);
+  bool RunIdleWork();
+
+  // Perform work that may have been deferred because it was not runnable
+  // within a nested run loop.  This is associated with
+  // nesting_deferred_work_source_ and is signalled by
+  // MaybeScheduleNestingDeferredWork when returning from a nested loop,
+  // so that an outer loop will be able to perform the necessary tasks if it
+  // permits nestable tasks.
+  static void RunNestingDeferredWorkSource(void* info);
+  bool RunNestingDeferredWork();
+
+  // Schedules possible nesting-deferred work to be processed before the run
+  // loop goes to sleep, exits, or begins processing sources at the top of its
+  // loop.  If this function detects that a nested loop had run since the
+  // previous attempt to schedule nesting-deferred work, it will schedule a
+  // call to RunNestingDeferredWorkSource.
+  void MaybeScheduleNestingDeferredWork();
+
+  // Observer callback responsible for performing idle-priority work, before
+  // the run loop goes to sleep.  Associated with idle_work_observer_.
+  static void PreWaitObserver(CFRunLoopObserverRef observer,
+                              CFRunLoopActivity activity, void* info);
+
+  // Observer callback called before the run loop processes any sources.
+  // Associated with pre_source_observer_.
+  static void PreSourceObserver(CFRunLoopObserverRef observer,
+                                CFRunLoopActivity activity, void* info);
+
+  // Observer callback called when the run loop starts and stops, at the
+  // beginning and end of calls to CFRunLoopRun.  This is used to maintain
+  // nesting_level_.  Associated with enter_exit_observer_.
+  static void EnterExitObserver(CFRunLoopObserverRef observer,
+                                CFRunLoopActivity activity, void* info);
+
+  // Called by EnterExitObserver after performing maintenance on nesting_level_.
+  // This allows subclasses an opportunity to perform additional processing on
+  // the basis of run loops starting and stopping.
+  virtual void EnterExitRunLoop(CFRunLoopActivity activity);
+
+  // The thread's run loop.
+  CFRunLoopRef run_loop_;
+
+  // The enabled modes. Posted tasks may run in any non-null entry.
+  std::unique_ptr<ScopedModeEnabler> enabled_modes_[kNumModes];
+
+  // The timer, sources, and observers are described above alongside their
+  // callbacks.
+  CFRunLoopTimerRef delayed_work_timer_;
+  CFRunLoopSourceRef work_source_;
+  CFRunLoopSourceRef idle_work_source_;
+  CFRunLoopSourceRef nesting_deferred_work_source_;
+  CFRunLoopObserverRef pre_wait_observer_;
+  CFRunLoopObserverRef pre_source_observer_;
+  CFRunLoopObserverRef enter_exit_observer_;
+
+  // (weak) Delegate passed as an argument to the innermost Run call.
+  Delegate* delegate_;
+
+  // The time that delayed_work_timer_ is scheduled to fire.  This is tracked
+  // independently of CFRunLoopTimerGetNextFireDate(delayed_work_timer_)
+  // to be able to reset the timer properly after waking from system sleep.
+  // See PowerStateNotification.
+  CFAbsoluteTime delayed_work_fire_time_;
+
+  base::TimerSlack timer_slack_;
+
+  // The recursion depth of the currently-executing CFRunLoopRun loop on the
+  // run loop's thread.  0 if no run loops are running inside of whatever scope
+  // the object was created in.
+  int nesting_level_;
+
+  // The recursion depth (calculated in the same way as nesting_level_) of the
+  // innermost executing CFRunLoopRun loop started by a call to Run.
+  int run_nesting_level_;
+
+  // The deepest (numerically highest) recursion depth encountered since the
+  // most recent attempt to run nesting-deferred work.
+  int deepest_nesting_level_;
+
+  // "Delegateless" work flags are set when work is ready to be performed but
+  // must wait until a delegate is available to process it.  This can happen
+  // when a MessagePumpCFRunLoopBase is instantiated and work arrives without
+  // any call to Run on the stack.  The Run method will check for delegateless
+  // work on entry and redispatch it as needed once a delegate is available.
+  bool delegateless_work_;
+  bool delegateless_idle_work_;
+
+  DISALLOW_COPY_AND_ASSIGN(MessagePumpCFRunLoopBase);
+};
+
+class BASE_EXPORT MessagePumpCFRunLoop : public MessagePumpCFRunLoopBase {
+ public:
+  MessagePumpCFRunLoop();
+  ~MessagePumpCFRunLoop() override;
+
+  void DoRun(Delegate* delegate) override;
+  void Quit() override;
+
+ private:
+  void EnterExitRunLoop(CFRunLoopActivity activity) override;
+
+  // True if Quit is called to stop the innermost MessagePump
+  // (innermost_quittable_) but some other CFRunLoopRun loop (nesting_level_)
+  // is running inside the MessagePump's innermost Run call.
+  bool quit_pending_;
+
+  DISALLOW_COPY_AND_ASSIGN(MessagePumpCFRunLoop);
+};
+
+class BASE_EXPORT MessagePumpNSRunLoop : public MessagePumpCFRunLoopBase {
+ public:
+  MessagePumpNSRunLoop();
+  ~MessagePumpNSRunLoop() override;
+
+  void DoRun(Delegate* delegate) override;
+  void Quit() override;
+
+ private:
+  // A source that doesn't do anything but provide something signalable
+  // attached to the run loop.  This source will be signalled when Quit
+  // is called, to cause the loop to wake up so that it can stop.
+  CFRunLoopSourceRef quit_source_;
+
+  // False after Quit is called.
+  bool keep_running_;
+
+  DISALLOW_COPY_AND_ASSIGN(MessagePumpNSRunLoop);
+};
+
+#if defined(OS_IOS)
+// This is a fake message pump.  It attaches sources to the main thread's
+// CFRunLoop, so PostTask() will work, but it is unable to drive the loop
+// directly, so calling Run() or Quit() are errors.
+class MessagePumpUIApplication : public MessagePumpCFRunLoopBase {
+ public:
+  MessagePumpUIApplication();
+  ~MessagePumpUIApplication() override;
+  void DoRun(Delegate* delegate) override;
+  void Quit() override;
+
+  // This message pump can not spin the main message loop directly.  Instead,
+  // call |Attach()| to set up a delegate.  It is an error to call |Run()|.
+  virtual void Attach(Delegate* delegate);
+
+ private:
+  RunLoop* run_loop_;
+
+  DISALLOW_COPY_AND_ASSIGN(MessagePumpUIApplication);
+};
+
+#else
+
+// While in scope, permits posted tasks to be run in private AppKit run loop
+// modes that would otherwise make the UI unresponsive. E.g., menu fade out.
+class BASE_EXPORT ScopedPumpMessagesInPrivateModes {
+ public:
+  ScopedPumpMessagesInPrivateModes();
+  ~ScopedPumpMessagesInPrivateModes();
+
+  int GetModeMaskForTest();
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(ScopedPumpMessagesInPrivateModes);
+};
+
+class MessagePumpNSApplication : public MessagePumpCFRunLoopBase {
+ public:
+  MessagePumpNSApplication();
+  ~MessagePumpNSApplication() override;
+
+  void DoRun(Delegate* delegate) override;
+  void Quit() override;
+
+ private:
+  friend class ScopedPumpMessagesInPrivateModes;
+
+  // False after Quit is called.
+  bool keep_running_;
+
+  // True if DoRun is managing its own run loop as opposed to letting
+  // -[NSApplication run] handle it.  The outermost run loop in the application
+  // is managed by -[NSApplication run], inner run loops are handled by a loop
+  // in DoRun.
+  bool running_own_loop_;
+
+  DISALLOW_COPY_AND_ASSIGN(MessagePumpNSApplication);
+};
+
+class MessagePumpCrApplication : public MessagePumpNSApplication {
+ public:
+  MessagePumpCrApplication();
+  ~MessagePumpCrApplication() override;
+
+ protected:
+  // Returns nil if NSApp is currently in the middle of calling
+  // -sendEvent.  Requires NSApp implementing CrAppProtocol.
+  AutoreleasePoolType* CreateAutoreleasePool() override;
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(MessagePumpCrApplication);
+};
+#endif  // !defined(OS_IOS)
+
+class BASE_EXPORT MessagePumpMac {
+ public:
+  // If not on the main thread, returns a new instance of
+  // MessagePumpNSRunLoop.
+  //
+  // On the main thread, if NSApp exists and conforms to
+  // CrAppProtocol, creates an instances of MessagePumpCrApplication.
+  //
+  // Otherwise creates an instance of MessagePumpNSApplication using a
+  // default NSApplication.
+  static std::unique_ptr<MessagePump> Create();
+
+#if !defined(OS_IOS)
+  // If a pump is created before the required CrAppProtocol is
+  // created, the wrong MessagePump subclass could be used.
+  // UsingCrApp() returns false if the message pump was created before
+  // NSApp was initialized, or if NSApp does not implement
+  // CrAppProtocol.  NSApp must be initialized before calling.
+  static bool UsingCrApp();
+
+  // Wrapper to query -[NSApp isHandlingSendEvent] from C++ code.
+  // Requires NSApp to implement CrAppProtocol.
+  static bool IsHandlingSendEvent();
+#endif  // !defined(OS_IOS)
+
+ private:
+  DISALLOW_IMPLICIT_CONSTRUCTORS(MessagePumpMac);
+};
+
+// Tasks posted to the message loop are posted under this mode, as well
+// as kCFRunLoopCommonModes.
+extern const CFStringRef BASE_EXPORT kMessageLoopExclusiveRunLoopMode;
+
+}  // namespace base
+
+#endif  // BASE_MESSAGE_LOOP_MESSAGE_PUMP_MAC_H_
diff --git a/base/message_loop/message_pump_mac.mm b/base/message_loop/message_pump_mac.mm
new file mode 100644
index 0000000..fb25201
--- /dev/null
+++ b/base/message_loop/message_pump_mac.mm
@@ -0,0 +1,935 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#import "base/message_loop/message_pump_mac.h"
+
+#import <Foundation/Foundation.h>
+
+#include <limits>
+#include <memory>
+
+#include "base/auto_reset.h"
+#include "base/logging.h"
+#include "base/mac/call_with_eh_frame.h"
+#include "base/mac/scoped_cftyperef.h"
+#include "base/macros.h"
+#include "base/message_loop/timer_slack.h"
+#include "base/run_loop.h"
+#include "base/time/time.h"
+#include "build/build_config.h"
+
+#if !defined(OS_IOS)
+#import <AppKit/AppKit.h>
+#endif  // !defined(OS_IOS)
+
+namespace base {
+
+const CFStringRef kMessageLoopExclusiveRunLoopMode =
+    CFSTR("kMessageLoopExclusiveRunLoopMode");
+
+namespace {
+
+// Mask that determines which modes to use.
+enum { kCommonModeMask = 0x1, kAllModesMask = 0xf };
+
+// Modes to use for MessagePumpNSApplication that are considered "safe".
+// Currently just common and exclusive modes. Ideally, messages would be pumped
+// in all modes, but that interacts badly with app modal dialogs (e.g. NSAlert).
+enum { kNSApplicationModalSafeModeMask = 0x3 };
+
+void NoOp(void* info) {
+}
+
+constexpr CFTimeInterval kCFTimeIntervalMax =
+    std::numeric_limits<CFTimeInterval>::max();
+
+#if !defined(OS_IOS)
+// Set to true if MessagePumpMac::Create() is called before NSApp is
+// initialized.  Only accessed from the main thread.
+bool g_not_using_cr_app = false;
+
+// The MessagePump controlling [NSApp run].
+MessagePumpNSApplication* g_app_pump;
+
+// Various CoreFoundation definitions.
+typedef struct __CFRuntimeBase {
+  uintptr_t _cfisa;
+  uint8_t _cfinfo[4];
+  uint32_t _rc;
+} CFRuntimeBase;
+
+#if defined(__BIG_ENDIAN__)
+#define __CF_BIG_ENDIAN__ 1
+#define __CF_LITTLE_ENDIAN__ 0
+#endif
+
+#if defined(__LITTLE_ENDIAN__)
+#define __CF_LITTLE_ENDIAN__ 1
+#define __CF_BIG_ENDIAN__ 0
+#endif
+
+#define CF_INFO_BITS (!!(__CF_BIG_ENDIAN__)*3)
+
+#define __CFBitfieldMask(N1, N2) \
+  ((((UInt32)~0UL) << (31UL - (N1) + (N2))) >> (31UL - N1))
+#define __CFBitfieldSetValue(V, N1, N2, X)   \
+  ((V) = ((V) & ~__CFBitfieldMask(N1, N2)) | \
+         (((X) << (N2)) & __CFBitfieldMask(N1, N2)))
+
+// Marking timers as invalid at the right time by flipping their valid bit helps
+// significantly reduce power use (see the explanation in
+// RunDelayedWorkTimer()), however there is no public API for doing so.
+// CFRuntime.h states that CFRuntimeBase can change from release to release
+// and should not be accessed directly. The last known change of this struct
+// occurred in 2008 in CF-476 / 10.5; unfortunately the source for 10.11 and
+// 10.12 is not available for inspection at this time.
+// CanInvalidateCFRunLoopTimers() will at least prevent us from invalidating
+// timers if this function starts flipping the wrong bit on a future OS release.
+void __ChromeCFRunLoopTimerSetValid(CFRunLoopTimerRef timer, bool valid) {
+  __CFBitfieldSetValue(((CFRuntimeBase*)timer)->_cfinfo[CF_INFO_BITS], 3, 3,
+                       valid);
+}
+#endif  // !defined(OS_IOS)
+
+}  // namespace
+
+// A scoper for autorelease pools created from message pump run loops.
+// Avoids dirtying up the ScopedNSAutoreleasePool interface for the rare
+// case where an autorelease pool needs to be passed in.
+class MessagePumpScopedAutoreleasePool {
+ public:
+  explicit MessagePumpScopedAutoreleasePool(MessagePumpCFRunLoopBase* pump) :
+      pool_(pump->CreateAutoreleasePool()) {
+  }
+   ~MessagePumpScopedAutoreleasePool() {
+    [pool_ drain];
+  }
+
+ private:
+  NSAutoreleasePool* pool_;
+  DISALLOW_COPY_AND_ASSIGN(MessagePumpScopedAutoreleasePool);
+};
+
+class MessagePumpCFRunLoopBase::ScopedModeEnabler {
+ public:
+  ScopedModeEnabler(MessagePumpCFRunLoopBase* owner, int mode_index)
+      : owner_(owner), mode_index_(mode_index) {
+    CFRunLoopRef loop = owner_->run_loop_;
+    CFRunLoopAddTimer(loop, owner_->delayed_work_timer_, mode());
+    CFRunLoopAddSource(loop, owner_->work_source_, mode());
+    CFRunLoopAddSource(loop, owner_->idle_work_source_, mode());
+    CFRunLoopAddSource(loop, owner_->nesting_deferred_work_source_, mode());
+    CFRunLoopAddObserver(loop, owner_->pre_wait_observer_, mode());
+    CFRunLoopAddObserver(loop, owner_->pre_source_observer_, mode());
+    CFRunLoopAddObserver(loop, owner_->enter_exit_observer_, mode());
+  }
+
+  ~ScopedModeEnabler() {
+    CFRunLoopRef loop = owner_->run_loop_;
+    CFRunLoopRemoveObserver(loop, owner_->enter_exit_observer_, mode());
+    CFRunLoopRemoveObserver(loop, owner_->pre_source_observer_, mode());
+    CFRunLoopRemoveObserver(loop, owner_->pre_wait_observer_, mode());
+    CFRunLoopRemoveSource(loop, owner_->nesting_deferred_work_source_, mode());
+    CFRunLoopRemoveSource(loop, owner_->idle_work_source_, mode());
+    CFRunLoopRemoveSource(loop, owner_->work_source_, mode());
+    CFRunLoopRemoveTimer(loop, owner_->delayed_work_timer_, mode());
+  }
+
+  // This function knows about the AppKit RunLoop modes observed to potentially
+  // run tasks posted to Chrome's main thread task runner. Some are internal to
+  // AppKit but must be observed to keep Chrome's UI responsive. Others that may
+  // be interesting, but are not watched:
+  //  - com.apple.hitoolbox.windows.transitionmode
+  //  - com.apple.hitoolbox.windows.flushmode
+  const CFStringRef& mode() const {
+    static const CFStringRef modes[] = {
+        // The standard Core Foundation "common modes" constant. Must always be
+        // first in this list to match the value of kCommonModeMask.
+        kCFRunLoopCommonModes,
+
+        // Mode that only sees Chrome work sources.
+        kMessageLoopExclusiveRunLoopMode,
+
+        // Process work when NSMenus are fading out.
+        CFSTR("com.apple.hitoolbox.windows.windowfadingmode"),
+
+        // Process work when AppKit is highlighting an item on the main menubar.
+        CFSTR("NSUnhighlightMenuRunLoopMode"),
+    };
+    static_assert(arraysize(modes) == kNumModes, "mode size mismatch");
+    static_assert((1 << kNumModes) - 1 == kAllModesMask,
+                  "kAllModesMask not large enough");
+
+    return modes[mode_index_];
+  }
+
+ private:
+  MessagePumpCFRunLoopBase* const owner_;  // Weak. Owns this.
+  const int mode_index_;
+
+  DISALLOW_COPY_AND_ASSIGN(ScopedModeEnabler);
+};
+
+// Must be called on the run loop thread.
+void MessagePumpCFRunLoopBase::Run(Delegate* delegate) {
+  // nesting_level_ will be incremented in EnterExitRunLoop, so set
+  // run_nesting_level_ accordingly.
+  int last_run_nesting_level = run_nesting_level_;
+  run_nesting_level_ = nesting_level_ + 1;
+
+  Delegate* last_delegate = delegate_;
+  SetDelegate(delegate);
+
+  DoRun(delegate);
+
+  // Restore the previous state of the object.
+  SetDelegate(last_delegate);
+  run_nesting_level_ = last_run_nesting_level;
+}
+
+// May be called on any thread.
+void MessagePumpCFRunLoopBase::ScheduleWork() {
+  CFRunLoopSourceSignal(work_source_);
+  CFRunLoopWakeUp(run_loop_);
+}
+
+// Must be called on the run loop thread.
+void MessagePumpCFRunLoopBase::ScheduleDelayedWork(
+    const TimeTicks& delayed_work_time) {
+  TimeDelta delta = delayed_work_time - TimeTicks::Now();
+  delayed_work_fire_time_ = CFAbsoluteTimeGetCurrent() + delta.InSecondsF();
+
+  // Flip the timer's validation bit just before setting the new fire time. Do
+  // this now because CFRunLoopTimerSetNextFireDate() likely checks the validity
+  // of a timer before proceeding to set its fire date. Making the timer valid
+  // now won't have any side effects (such as a premature firing of the timer)
+  // because we're only flipping a bit.
+  //
+  // Please see the comment in RunDelayedWorkTimer() for more info on the whys
+  // of invalidation.
+  ChromeCFRunLoopTimerSetValid(delayed_work_timer_, true);
+
+  CFRunLoopTimerSetNextFireDate(delayed_work_timer_, delayed_work_fire_time_);
+  if (timer_slack_ == TIMER_SLACK_MAXIMUM) {
+    CFRunLoopTimerSetTolerance(delayed_work_timer_, delta.InSecondsF() * 0.5);
+  } else {
+    CFRunLoopTimerSetTolerance(delayed_work_timer_, 0);
+  }
+}
+
+void MessagePumpCFRunLoopBase::SetTimerSlack(TimerSlack timer_slack) {
+  timer_slack_ = timer_slack;
+}
+
+// Must be called on the run loop thread.
+MessagePumpCFRunLoopBase::MessagePumpCFRunLoopBase(int initial_mode_mask)
+    : delegate_(NULL),
+      delayed_work_fire_time_(kCFTimeIntervalMax),
+      timer_slack_(base::TIMER_SLACK_NONE),
+      nesting_level_(0),
+      run_nesting_level_(0),
+      deepest_nesting_level_(0),
+      delegateless_work_(false),
+      delegateless_idle_work_(false) {
+  run_loop_ = CFRunLoopGetCurrent();
+  CFRetain(run_loop_);
+
+  // Set a repeating timer with a preposterous firing time and interval.  The
+  // timer will effectively never fire as-is.  The firing time will be adjusted
+  // as needed when ScheduleDelayedWork is called.
+  CFRunLoopTimerContext timer_context = CFRunLoopTimerContext();
+  timer_context.info = this;
+  delayed_work_timer_ = CFRunLoopTimerCreate(NULL,                // allocator
+                                             kCFTimeIntervalMax,  // fire time
+                                             kCFTimeIntervalMax,  // interval
+                                             0,                   // flags
+                                             0,                   // priority
+                                             RunDelayedWorkTimer,
+                                             &timer_context);
+
+  CFRunLoopSourceContext source_context = CFRunLoopSourceContext();
+  source_context.info = this;
+  source_context.perform = RunWorkSource;
+  work_source_ = CFRunLoopSourceCreate(NULL,  // allocator
+                                       1,     // priority
+                                       &source_context);
+  source_context.perform = RunIdleWorkSource;
+  idle_work_source_ = CFRunLoopSourceCreate(NULL,  // allocator
+                                            2,     // priority
+                                            &source_context);
+  source_context.perform = RunNestingDeferredWorkSource;
+  nesting_deferred_work_source_ = CFRunLoopSourceCreate(NULL,  // allocator
+                                                        0,     // priority
+                                                        &source_context);
+
+  CFRunLoopObserverContext observer_context = CFRunLoopObserverContext();
+  observer_context.info = this;
+  pre_wait_observer_ = CFRunLoopObserverCreate(NULL,  // allocator
+                                               kCFRunLoopBeforeWaiting,
+                                               true,  // repeat
+                                               0,     // priority
+                                               PreWaitObserver,
+                                               &observer_context);
+  pre_source_observer_ = CFRunLoopObserverCreate(NULL,  // allocator
+                                                 kCFRunLoopBeforeSources,
+                                                 true,  // repeat
+                                                 0,     // priority
+                                                 PreSourceObserver,
+                                                 &observer_context);
+  enter_exit_observer_ = CFRunLoopObserverCreate(NULL,  // allocator
+                                                 kCFRunLoopEntry |
+                                                     kCFRunLoopExit,
+                                                 true,  // repeat
+                                                 0,     // priority
+                                                 EnterExitObserver,
+                                                 &observer_context);
+  SetModeMask(initial_mode_mask);
+}
+
+// Ideally called on the run loop thread.  If other run loops were running
+// lower on the run loop thread's stack when this object was created, the
+// same number of run loops must be running when this object is destroyed.
+MessagePumpCFRunLoopBase::~MessagePumpCFRunLoopBase() {
+  SetModeMask(0);
+  CFRelease(enter_exit_observer_);
+  CFRelease(pre_source_observer_);
+  CFRelease(pre_wait_observer_);
+  CFRelease(nesting_deferred_work_source_);
+  CFRelease(idle_work_source_);
+  CFRelease(work_source_);
+  CFRelease(delayed_work_timer_);
+  CFRelease(run_loop_);
+}
+
+void MessagePumpCFRunLoopBase::SetDelegate(Delegate* delegate) {
+  delegate_ = delegate;
+
+  if (delegate) {
+    // If any work showed up but could not be dispatched for want of a
+    // delegate, set it up for dispatch again now that a delegate is
+    // available.
+    if (delegateless_work_) {
+      CFRunLoopSourceSignal(work_source_);
+      delegateless_work_ = false;
+    }
+    if (delegateless_idle_work_) {
+      CFRunLoopSourceSignal(idle_work_source_);
+      delegateless_idle_work_ = false;
+    }
+  }
+}
+
+// Base version returns a standard NSAutoreleasePool.
+AutoreleasePoolType* MessagePumpCFRunLoopBase::CreateAutoreleasePool() {
+  return [[NSAutoreleasePool alloc] init];
+}
+
+void MessagePumpCFRunLoopBase::SetModeMask(int mode_mask) {
+  for (size_t i = 0; i < kNumModes; ++i) {
+    bool enable = mode_mask & (0x1 << i);
+    if (enable == !enabled_modes_[i]) {
+      enabled_modes_[i] =
+          enable ? std::make_unique<ScopedModeEnabler>(this, i) : nullptr;
+    }
+  }
+}
+
+int MessagePumpCFRunLoopBase::GetModeMask() const {
+  int mask = 0;
+  for (size_t i = 0; i < kNumModes; ++i)
+    mask |= enabled_modes_[i] ? (0x1 << i) : 0;
+  return mask;
+}
+
+#if !defined(OS_IOS)
+// This function uses private API to modify a test timer's valid state and
+// uses public API to confirm that the private API changed the correct bit.
+// static
+bool MessagePumpCFRunLoopBase::CanInvalidateCFRunLoopTimers() {
+  CFRunLoopTimerContext timer_context = CFRunLoopTimerContext();
+  timer_context.info = nullptr;
+  ScopedCFTypeRef<CFRunLoopTimerRef> test_timer(
+      CFRunLoopTimerCreate(NULL,                // allocator
+                           kCFTimeIntervalMax,  // fire time
+                           kCFTimeIntervalMax,  // interval
+                           0,                   // flags
+                           0,                   // priority
+                           nullptr, &timer_context));
+  // Should be valid from the start.
+  if (!CFRunLoopTimerIsValid(test_timer)) {
+    return false;
+  }
+  // Confirm that the private API can mark the timer invalid.
+  __ChromeCFRunLoopTimerSetValid(test_timer, false);
+  if (CFRunLoopTimerIsValid(test_timer)) {
+    return false;
+  }
+  // Confirm that the private API can mark the timer valid.
+  __ChromeCFRunLoopTimerSetValid(test_timer, true);
+  return CFRunLoopTimerIsValid(test_timer);
+}
+#endif  // !defined(OS_IOS)
+
+// static
+void MessagePumpCFRunLoopBase::ChromeCFRunLoopTimerSetValid(
+    CFRunLoopTimerRef timer,
+    bool valid) {
+#if !defined(OS_IOS)
+  static bool can_invalidate_timers = CanInvalidateCFRunLoopTimers();
+  if (can_invalidate_timers) {
+    __ChromeCFRunLoopTimerSetValid(timer, valid);
+  }
+#endif  // !defined(OS_IOS)
+}
+
+// Called from the run loop.
+// static
+void MessagePumpCFRunLoopBase::RunDelayedWorkTimer(CFRunLoopTimerRef timer,
+                                                   void* info) {
+  MessagePumpCFRunLoopBase* self = static_cast<MessagePumpCFRunLoopBase*>(info);
+
+  // The timer won't fire again until it's reset.
+  self->delayed_work_fire_time_ = kCFTimeIntervalMax;
+
+  // The message pump's timer needs to fire at changing and unpredictable
+  // intervals. Creating a new timer for each firing time is very expensive, so
+  // the message pump instead uses a repeating timer with a very large repeat
+  // rate. After each firing of the timer, the run loop sets the timer's next
+  // firing time to the distant future, essentially pausing the timer until the
+  // pump sets the next firing time. This is the solution recommended by Apple.
+  //
+  // It turns out, however, that scheduling timers is also quite expensive, and
+  // that every one of the message pump's timer firings incurs two
+  // reschedulings. The first rescheduling occurs in ScheduleDelayedWork(),
+  // which sets the desired next firing time. The second comes after exiting
+  // this method (the timer's callback method), when the run loop sets the
+  // timer's next firing time to far in the future.
+  //
+  // The code in __CFRunLoopDoTimer() inside CFRunLoop.c calls the timer's
+  // callback, confirms that the timer is valid, and then sets its future
+  // firing time based on its repeat frequency. Flipping the valid bit here
+  // causes the __CFRunLoopDoTimer() to skip setting the future firing time.
+  // Note that there's public API to invalidate a timer but it goes beyond
+  // flipping the valid bit, making the timer unusable in the future.
+  //
+  // ScheduleDelayedWork() flips the valid bit back just before setting the
+  // timer's new firing time.
+  ChromeCFRunLoopTimerSetValid(self->delayed_work_timer_, false);
+
+  // CFRunLoopTimers fire outside of the priority scheme for CFRunLoopSources.
+  // In order to establish the proper priority in which work and delayed work
+  // are processed one for one, the timer used to schedule delayed work must
+  // signal a CFRunLoopSource used to dispatch both work and delayed work.
+  CFRunLoopSourceSignal(self->work_source_);
+}
+
+// Called from the run loop.
+// static
+void MessagePumpCFRunLoopBase::RunWorkSource(void* info) {
+  MessagePumpCFRunLoopBase* self = static_cast<MessagePumpCFRunLoopBase*>(info);
+  base::mac::CallWithEHFrame(^{
+    self->RunWork();
+  });
+}
+
+// Called by MessagePumpCFRunLoopBase::RunWorkSource.
+bool MessagePumpCFRunLoopBase::RunWork() {
+  if (!delegate_) {
+    // This point can be reached with a NULL delegate_ if Run is not on the
+    // stack but foreign code is spinning the CFRunLoop.  Arrange to come back
+    // here when a delegate is available.
+    delegateless_work_ = true;
+    return false;
+  }
+
+  // The NSApplication-based run loop only drains the autorelease pool at each
+  // UI event (NSEvent).  The autorelease pool is not drained for each
+  // CFRunLoopSource target that's run.  Use a local pool for any autoreleased
+  // objects if the app is not currently handling a UI event to ensure they're
+  // released promptly even in the absence of UI events.
+  MessagePumpScopedAutoreleasePool autorelease_pool(this);
+
+  // Call DoWork and DoDelayedWork once, and if something was done, arrange to
+  // come back here again as long as the loop is still running.
+  bool did_work = delegate_->DoWork();
+  bool resignal_work_source = did_work;
+
+  TimeTicks next_time;
+  delegate_->DoDelayedWork(&next_time);
+  if (!did_work) {
+    // Determine whether there's more delayed work, and if so, if it needs to
+    // be done at some point in the future or if it's already time to do it.
+    // Only do these checks if did_work is false. If did_work is true, this
+    // function, and therefore any additional delayed work, will get another
+    // chance to run before the loop goes to sleep.
+    bool more_delayed_work = !next_time.is_null();
+    if (more_delayed_work) {
+      TimeDelta delay = next_time - TimeTicks::Now();
+      if (delay > TimeDelta()) {
+        // There's more delayed work to be done in the future.
+        ScheduleDelayedWork(next_time);
+      } else {
+        // There's more delayed work to be done, and its time is in the past.
+        // Arrange to come back here directly as long as the loop is still
+        // running.
+        resignal_work_source = true;
+      }
+    }
+  }
+
+  if (resignal_work_source) {
+    CFRunLoopSourceSignal(work_source_);
+  }
+
+  return resignal_work_source;
+}
+
+// Called from the run loop.
+// static
+void MessagePumpCFRunLoopBase::RunIdleWorkSource(void* info) {
+  MessagePumpCFRunLoopBase* self = static_cast<MessagePumpCFRunLoopBase*>(info);
+  base::mac::CallWithEHFrame(^{
+    self->RunIdleWork();
+  });
+}
+
+// Called by MessagePumpCFRunLoopBase::RunIdleWorkSource.
+bool MessagePumpCFRunLoopBase::RunIdleWork() {
+  if (!delegate_) {
+    // This point can be reached with a NULL delegate_ if Run is not on the
+    // stack but foreign code is spinning the CFRunLoop.  Arrange to come back
+    // here when a delegate is available.
+    delegateless_idle_work_ = true;
+    return false;
+  }
+
+  // The NSApplication-based run loop only drains the autorelease pool at each
+  // UI event (NSEvent).  The autorelease pool is not drained for each
+  // CFRunLoopSource target that's run.  Use a local pool for any autoreleased
+  // objects if the app is not currently handling a UI event to ensure they're
+  // released promptly even in the absence of UI events.
+  MessagePumpScopedAutoreleasePool autorelease_pool(this);
+
+  // Call DoIdleWork once, and if something was done, arrange to come back here
+  // again as long as the loop is still running.
+  bool did_work = delegate_->DoIdleWork();
+  if (did_work) {
+    CFRunLoopSourceSignal(idle_work_source_);
+  }
+
+  return did_work;
+}
+
+// Called from the run loop.
+// static
+void MessagePumpCFRunLoopBase::RunNestingDeferredWorkSource(void* info) {
+  MessagePumpCFRunLoopBase* self = static_cast<MessagePumpCFRunLoopBase*>(info);
+  base::mac::CallWithEHFrame(^{
+    self->RunNestingDeferredWork();
+  });
+}
+
+// Called by MessagePumpCFRunLoopBase::RunNestingDeferredWorkSource.
+bool MessagePumpCFRunLoopBase::RunNestingDeferredWork() {
+  if (!delegate_) {
+    // This point can be reached with a NULL delegate_ if Run is not on the
+    // stack but foreign code is spinning the CFRunLoop.  There's no sense in
+    // attempting to do any work or signalling the work sources because
+    // without a delegate, work is not possible.
+    return false;
+  }
+
+  // Immediately try work in priority order.
+  if (!RunWork()) {
+    if (!RunIdleWork()) {
+      return false;
+    }
+  } else {
+    // Work was done.  Arrange for the loop to try non-nestable idle work on
+    // a subsequent pass.
+    CFRunLoopSourceSignal(idle_work_source_);
+  }
+
+  return true;
+}
+
+// Called before the run loop goes to sleep or exits, or processes sources.
+void MessagePumpCFRunLoopBase::MaybeScheduleNestingDeferredWork() {
+  // deepest_nesting_level_ is set as run loops are entered.  If the deepest
+  // level encountered is deeper than the current level, a nested loop
+  // (relative to the current level) ran since the last time nesting-deferred
+  // work was scheduled.  When that situation is encountered, schedule
+  // nesting-deferred work in case any work was deferred because nested work
+  // was disallowed.
+  if (deepest_nesting_level_ > nesting_level_) {
+    deepest_nesting_level_ = nesting_level_;
+    CFRunLoopSourceSignal(nesting_deferred_work_source_);
+  }
+}
+
+// Called from the run loop.
+// static
+void MessagePumpCFRunLoopBase::PreWaitObserver(CFRunLoopObserverRef observer,
+                                               CFRunLoopActivity activity,
+                                               void* info) {
+  MessagePumpCFRunLoopBase* self = static_cast<MessagePumpCFRunLoopBase*>(info);
+  base::mac::CallWithEHFrame(^{
+    // Attempt to do some idle work before going to sleep.
+    self->RunIdleWork();
+
+    // The run loop is about to go to sleep.  If any of the work done since it
+    // started or woke up resulted in a nested run loop running,
+    // nesting-deferred work may have accumulated.  Schedule it for processing
+    // if appropriate.
+    self->MaybeScheduleNestingDeferredWork();
+  });
+}
+
+// Called from the run loop.
+// static
+void MessagePumpCFRunLoopBase::PreSourceObserver(CFRunLoopObserverRef observer,
+                                                 CFRunLoopActivity activity,
+                                                 void* info) {
+  MessagePumpCFRunLoopBase* self = static_cast<MessagePumpCFRunLoopBase*>(info);
+
+  // The run loop has reached the top of the loop and is about to begin
+  // processing sources.  If the last iteration of the loop at this nesting
+  // level did not sleep or exit, nesting-deferred work may have accumulated
+  // if a nested loop ran.  Schedule nesting-deferred work for processing if
+  // appropriate.
+  base::mac::CallWithEHFrame(^{
+    self->MaybeScheduleNestingDeferredWork();
+  });
+}
+
+// Called from the run loop.
+// static
+void MessagePumpCFRunLoopBase::EnterExitObserver(CFRunLoopObserverRef observer,
+                                                 CFRunLoopActivity activity,
+                                                 void* info) {
+  MessagePumpCFRunLoopBase* self = static_cast<MessagePumpCFRunLoopBase*>(info);
+
+  switch (activity) {
+    case kCFRunLoopEntry:
+      ++self->nesting_level_;
+      if (self->nesting_level_ > self->deepest_nesting_level_) {
+        self->deepest_nesting_level_ = self->nesting_level_;
+      }
+      break;
+
+    case kCFRunLoopExit:
+      // Not all run loops go to sleep.  If a run loop is stopped before it
+      // goes to sleep due to a CFRunLoopStop call, or if the timeout passed
+      // to CFRunLoopRunInMode expires, the run loop may proceed directly from
+      // handling sources to exiting without any sleep.  This most commonly
+      // occurs when CFRunLoopRunInMode is passed a timeout of 0, causing it
+      // to make a single pass through the loop and exit without sleep.  Some
+      // native loops use CFRunLoop in this way.  Because PreWaitObserver will
+      // not be called in these case, MaybeScheduleNestingDeferredWork needs
+      // to be called here, as the run loop exits.
+      //
+      // MaybeScheduleNestingDeferredWork consults self->nesting_level_
+      // to determine whether to schedule nesting-deferred work.  It expects
+      // the nesting level to be set to the depth of the loop that is going
+      // to sleep or exiting.  It must be called before decrementing the
+      // value so that the value still corresponds to the level of the exiting
+      // loop.
+      base::mac::CallWithEHFrame(^{
+        self->MaybeScheduleNestingDeferredWork();
+      });
+      --self->nesting_level_;
+      break;
+
+    default:
+      break;
+  }
+
+  base::mac::CallWithEHFrame(^{
+    self->EnterExitRunLoop(activity);
+  });
+}
+
+// Called by MessagePumpCFRunLoopBase::EnterExitRunLoop.  The default
+// implementation is a no-op.
+void MessagePumpCFRunLoopBase::EnterExitRunLoop(CFRunLoopActivity activity) {
+}
+
+MessagePumpCFRunLoop::MessagePumpCFRunLoop()
+    : MessagePumpCFRunLoopBase(kCommonModeMask), quit_pending_(false) {}
+
+MessagePumpCFRunLoop::~MessagePumpCFRunLoop() {}
+
+// Called by MessagePumpCFRunLoopBase::DoRun.  If other CFRunLoopRun loops were
+// running lower on the run loop thread's stack when this object was created,
+// the same number of CFRunLoopRun loops must be running for the outermost call
+// to Run.  Run/DoRun are reentrant after that point.
+void MessagePumpCFRunLoop::DoRun(Delegate* delegate) {
+  // This is completely identical to calling CFRunLoopRun(), except autorelease
+  // pool management is introduced.
+  int result;
+  do {
+    MessagePumpScopedAutoreleasePool autorelease_pool(this);
+    result = CFRunLoopRunInMode(kCFRunLoopDefaultMode,
+                                kCFTimeIntervalMax,
+                                false);
+  } while (result != kCFRunLoopRunStopped && result != kCFRunLoopRunFinished);
+}
+
+// Must be called on the run loop thread.
+void MessagePumpCFRunLoop::Quit() {
+  // Stop the innermost run loop managed by this MessagePumpCFRunLoop object.
+  if (nesting_level() == run_nesting_level()) {
+    // This object is running the innermost loop, just stop it.
+    CFRunLoopStop(run_loop());
+  } else {
+    // There's another loop running inside the loop managed by this object.
+    // In other words, someone else called CFRunLoopRunInMode on the same
+    // thread, deeper on the stack than the deepest Run call.  Don't preempt
+    // other run loops, just mark this object to quit the innermost Run as
+    // soon as the other inner loops not managed by Run are done.
+    quit_pending_ = true;
+  }
+}
+
+// Called by MessagePumpCFRunLoopBase::EnterExitObserver.
+void MessagePumpCFRunLoop::EnterExitRunLoop(CFRunLoopActivity activity) {
+  if (activity == kCFRunLoopExit &&
+      nesting_level() == run_nesting_level() &&
+      quit_pending_) {
+    // Quit was called while loops other than those managed by this object
+    // were running further inside a run loop managed by this object.  Now
+    // that all unmanaged inner run loops are gone, stop the loop running
+    // just inside Run.
+    CFRunLoopStop(run_loop());
+    quit_pending_ = false;
+  }
+}
+
+MessagePumpNSRunLoop::MessagePumpNSRunLoop()
+    : MessagePumpCFRunLoopBase(kCommonModeMask), keep_running_(true) {
+  CFRunLoopSourceContext source_context = CFRunLoopSourceContext();
+  source_context.perform = NoOp;
+  quit_source_ = CFRunLoopSourceCreate(NULL,  // allocator
+                                       0,     // priority
+                                       &source_context);
+  CFRunLoopAddSource(run_loop(), quit_source_, kCFRunLoopCommonModes);
+}
+
+MessagePumpNSRunLoop::~MessagePumpNSRunLoop() {
+  CFRunLoopRemoveSource(run_loop(), quit_source_, kCFRunLoopCommonModes);
+  CFRelease(quit_source_);
+}
+
+void MessagePumpNSRunLoop::DoRun(Delegate* delegate) {
+  AutoReset<bool> auto_reset_keep_running(&keep_running_, true);
+
+  while (keep_running_) {
+    // NSRunLoop manages autorelease pools itself.
+    [[NSRunLoop currentRunLoop] runMode:NSDefaultRunLoopMode
+                             beforeDate:[NSDate distantFuture]];
+  }
+}
+
+void MessagePumpNSRunLoop::Quit() {
+  keep_running_ = false;
+  CFRunLoopSourceSignal(quit_source_);
+  CFRunLoopWakeUp(run_loop());
+}
+
+#if defined(OS_IOS)
+MessagePumpUIApplication::MessagePumpUIApplication()
+    : MessagePumpCFRunLoopBase(kCommonModeMask), run_loop_(NULL) {}
+
+MessagePumpUIApplication::~MessagePumpUIApplication() {}
+
+void MessagePumpUIApplication::DoRun(Delegate* delegate) {
+  NOTREACHED();
+}
+
+void MessagePumpUIApplication::Quit() {
+  NOTREACHED();
+}
+
+void MessagePumpUIApplication::Attach(Delegate* delegate) {
+  DCHECK(!run_loop_);
+  run_loop_ = new RunLoop();
+  CHECK(run_loop_->BeforeRun());
+  SetDelegate(delegate);
+}
+
+#else
+
+ScopedPumpMessagesInPrivateModes::ScopedPumpMessagesInPrivateModes() {
+  DCHECK(g_app_pump);
+  DCHECK_EQ(kNSApplicationModalSafeModeMask, g_app_pump->GetModeMask());
+  // Pumping events in private runloop modes is known to interact badly with
+  // app modal windows like NSAlert.
+  if (![NSApp modalWindow])
+    g_app_pump->SetModeMask(kAllModesMask);
+}
+
+ScopedPumpMessagesInPrivateModes::~ScopedPumpMessagesInPrivateModes() {
+  DCHECK(g_app_pump);
+  g_app_pump->SetModeMask(kNSApplicationModalSafeModeMask);
+}
+
+int ScopedPumpMessagesInPrivateModes::GetModeMaskForTest() {
+  return g_app_pump ? g_app_pump->GetModeMask() : -1;
+}
+
+MessagePumpNSApplication::MessagePumpNSApplication()
+    : MessagePumpCFRunLoopBase(kNSApplicationModalSafeModeMask),
+      keep_running_(true),
+      running_own_loop_(false) {
+  DCHECK_EQ(nullptr, g_app_pump);
+  g_app_pump = this;
+}
+
+MessagePumpNSApplication::~MessagePumpNSApplication() {
+  DCHECK_EQ(this, g_app_pump);
+  g_app_pump = nullptr;
+}
+
+void MessagePumpNSApplication::DoRun(Delegate* delegate) {
+  AutoReset<bool> auto_reset_keep_running(&keep_running_, true);
+  bool last_running_own_loop_ = running_own_loop_;
+
+  // NSApp must be initialized by calling:
+  // [{some class which implements CrAppProtocol} sharedApplication]
+  // Most likely candidates are CrApplication or BrowserCrApplication.
+  // These can be initialized from C++ code by calling
+  // RegisterCrApp() or RegisterBrowserCrApp().
+  CHECK(NSApp);
+
+  if (![NSApp isRunning]) {
+    running_own_loop_ = false;
+    // NSApplication manages autorelease pools itself when run this way.
+    [NSApp run];
+  } else {
+    running_own_loop_ = true;
+    NSDate* distant_future = [NSDate distantFuture];
+    while (keep_running_) {
+      MessagePumpScopedAutoreleasePool autorelease_pool(this);
+      NSEvent* event = [NSApp nextEventMatchingMask:NSAnyEventMask
+                                          untilDate:distant_future
+                                             inMode:NSDefaultRunLoopMode
+                                            dequeue:YES];
+      if (event) {
+        [NSApp sendEvent:event];
+      }
+    }
+  }
+
+  running_own_loop_ = last_running_own_loop_;
+}
+
+void MessagePumpNSApplication::Quit() {
+  if (!running_own_loop_) {
+    [[NSApplication sharedApplication] stop:nil];
+  } else {
+    keep_running_ = false;
+  }
+
+  // Send a fake event to wake the loop up.
+  [NSApp postEvent:[NSEvent otherEventWithType:NSApplicationDefined
+                                      location:NSZeroPoint
+                                 modifierFlags:0
+                                     timestamp:0
+                                  windowNumber:0
+                                       context:NULL
+                                       subtype:0
+                                         data1:0
+                                         data2:0]
+           atStart:NO];
+}
+
+MessagePumpCrApplication::MessagePumpCrApplication() {
+}
+
+MessagePumpCrApplication::~MessagePumpCrApplication() {
+}
+
+// Prevents an autorelease pool from being created if the app is in the midst of
+// handling a UI event because various parts of AppKit depend on objects that
+// are created while handling a UI event to be autoreleased in the event loop.
+// An example of this is NSWindowController. When a window with a window
+// controller is closed it goes through a stack like this:
+// (Several stack frames elided for clarity)
+//
+// #0 [NSWindowController autorelease]
+// #1 DoAClose
+// #2 MessagePumpCFRunLoopBase::DoWork()
+// #3 [NSRunLoop run]
+// #4 [NSButton performClick:]
+// #5 [NSWindow sendEvent:]
+// #6 [NSApp sendEvent:]
+// #7 [NSApp run]
+//
+// -performClick: spins a nested run loop. If the pool created in DoWork was a
+// standard NSAutoreleasePool, it would release the objects that were
+// autoreleased into it once DoWork released it. This would cause the window
+// controller, which autoreleased itself in frame #0, to release itself, and
+// possibly free itself. Unfortunately this window controller controls the
+// window in frame #5. When the stack is unwound to frame #5, the window would
+// no longer exists and crashes may occur. Apple gets around this by never
+// releasing the pool it creates in frame #4, and letting frame #7 clean it up
+// when it cleans up the pool that wraps frame #7. When an autorelease pool is
+// released it releases all other pools that were created after it on the
+// autorelease pool stack.
+//
+// CrApplication is responsible for setting handlingSendEvent to true just
+// before it sends the event through the event handling mechanism, and
+// returning it to its previous value once the event has been sent.
+AutoreleasePoolType* MessagePumpCrApplication::CreateAutoreleasePool() {
+  if (MessagePumpMac::IsHandlingSendEvent())
+    return nil;
+  return MessagePumpNSApplication::CreateAutoreleasePool();
+}
+
+// static
+bool MessagePumpMac::UsingCrApp() {
+  DCHECK([NSThread isMainThread]);
+
+  // If NSApp is still not initialized, then the subclass used cannot
+  // be determined.
+  DCHECK(NSApp);
+
+  // The pump was created using MessagePumpNSApplication.
+  if (g_not_using_cr_app)
+    return false;
+
+  return [NSApp conformsToProtocol:@protocol(CrAppProtocol)];
+}
+
+// static
+bool MessagePumpMac::IsHandlingSendEvent() {
+  DCHECK([NSApp conformsToProtocol:@protocol(CrAppProtocol)]);
+  NSObject<CrAppProtocol>* app = static_cast<NSObject<CrAppProtocol>*>(NSApp);
+  return [app isHandlingSendEvent];
+}
+#endif  // !defined(OS_IOS)
+
+// static
+std::unique_ptr<MessagePump> MessagePumpMac::Create() {
+  if ([NSThread isMainThread]) {
+#if defined(OS_IOS)
+    return std::make_unique<MessagePumpUIApplication>();
+#else
+    if ([NSApp conformsToProtocol:@protocol(CrAppProtocol)])
+      return std::make_unique<MessagePumpCrApplication>();
+
+    // The main-thread MessagePump implementations REQUIRE an NSApp.
+    // Executables which have specific requirements for their
+    // NSApplication subclass should initialize appropriately before
+    // creating an event loop.
+    [NSApplication sharedApplication];
+    g_not_using_cr_app = true;
+    return std::make_unique<MessagePumpNSApplication>();
+#endif
+  }
+
+  return std::make_unique<MessagePumpNSRunLoop>();
+}
+
+}  // namespace base
diff --git a/base/message_loop/message_pump_mac_unittest.mm b/base/message_loop/message_pump_mac_unittest.mm
new file mode 100644
index 0000000..6b63aa1
--- /dev/null
+++ b/base/message_loop/message_pump_mac_unittest.mm
@@ -0,0 +1,225 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/message_loop/message_pump_mac.h"
+
+#include "base/mac/scoped_cftyperef.h"
+#import "base/mac/scoped_nsobject.h"
+#include "base/macros.h"
+#include "base/message_loop/message_loop.h"
+#include "base/message_loop/message_loop_current.h"
+#include "base/threading/thread_task_runner_handle.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+@interface TestModalAlertCloser : NSObject
+- (void)runTestThenCloseAlert:(NSAlert*)alert;
+@end
+
+namespace {
+
+// Internal constants from message_pump_mac.mm.
+constexpr int kAllModesMask = 0xf;
+constexpr int kNSApplicationModalSafeModeMask = 0x3;
+
+}  // namespace
+
+namespace base {
+
+class TestMessagePumpCFRunLoopBase {
+ public:
+  bool TestCanInvalidateTimers() {
+    return MessagePumpCFRunLoopBase::CanInvalidateCFRunLoopTimers();
+  }
+  static void SetTimerValid(CFRunLoopTimerRef timer, bool valid) {
+    MessagePumpCFRunLoopBase::ChromeCFRunLoopTimerSetValid(timer, valid);
+  }
+
+  static void PerformTimerCallback(CFRunLoopTimerRef timer, void* info) {
+    TestMessagePumpCFRunLoopBase* self =
+        static_cast<TestMessagePumpCFRunLoopBase*>(info);
+    self->timer_callback_called_ = true;
+
+    if (self->invalidate_timer_in_callback_) {
+      SetTimerValid(timer, false);
+    }
+  }
+
+  bool invalidate_timer_in_callback_;
+
+  bool timer_callback_called_;
+};
+
+TEST(MessagePumpMacTest, TestCanInvalidateTimers) {
+  TestMessagePumpCFRunLoopBase message_pump_test;
+
+  // Catch whether or not the use of private API ever starts failing.
+  EXPECT_TRUE(message_pump_test.TestCanInvalidateTimers());
+}
+
+TEST(MessagePumpMacTest, TestInvalidatedTimerReuse) {
+  TestMessagePumpCFRunLoopBase message_pump_test;
+
+  CFRunLoopTimerContext timer_context = CFRunLoopTimerContext();
+  timer_context.info = &message_pump_test;
+  const CFTimeInterval kCFTimeIntervalMax =
+      std::numeric_limits<CFTimeInterval>::max();
+  ScopedCFTypeRef<CFRunLoopTimerRef> test_timer(CFRunLoopTimerCreate(
+      NULL,                // allocator
+      kCFTimeIntervalMax,  // fire time
+      kCFTimeIntervalMax,  // interval
+      0,                   // flags
+      0,                   // priority
+      TestMessagePumpCFRunLoopBase::PerformTimerCallback, &timer_context));
+  CFRunLoopAddTimer(CFRunLoopGetCurrent(), test_timer,
+                    kMessageLoopExclusiveRunLoopMode);
+
+  // Sanity check.
+  EXPECT_TRUE(CFRunLoopTimerIsValid(test_timer));
+
+  // Confirm that the timer fires as expected, and that it's not a one-time-use
+  // timer (those timers are invalidated after they fire).
+  CFAbsoluteTime next_fire_time = CFAbsoluteTimeGetCurrent() + 0.01;
+  CFRunLoopTimerSetNextFireDate(test_timer, next_fire_time);
+  message_pump_test.timer_callback_called_ = false;
+  message_pump_test.invalidate_timer_in_callback_ = false;
+  CFRunLoopRunInMode(kMessageLoopExclusiveRunLoopMode, 0.02, true);
+  EXPECT_TRUE(message_pump_test.timer_callback_called_);
+  EXPECT_TRUE(CFRunLoopTimerIsValid(test_timer));
+
+  // As a repeating timer, the timer should have a new fire date set in the
+  // future.
+  EXPECT_GT(CFRunLoopTimerGetNextFireDate(test_timer), next_fire_time);
+
+  // Try firing the timer, and invalidating it within its callback.
+  next_fire_time = CFAbsoluteTimeGetCurrent() + 0.01;
+  CFRunLoopTimerSetNextFireDate(test_timer, next_fire_time);
+  message_pump_test.timer_callback_called_ = false;
+  message_pump_test.invalidate_timer_in_callback_ = true;
+  CFRunLoopRunInMode(kMessageLoopExclusiveRunLoopMode, 0.02, true);
+  EXPECT_TRUE(message_pump_test.timer_callback_called_);
+  EXPECT_FALSE(CFRunLoopTimerIsValid(test_timer));
+
+  // The CFRunLoop believes the timer is invalid, so it should not have a
+  // fire date.
+  EXPECT_EQ(0, CFRunLoopTimerGetNextFireDate(test_timer));
+
+  // Now mark the timer as valid and confirm that it still fires correctly.
+  TestMessagePumpCFRunLoopBase::SetTimerValid(test_timer, true);
+  EXPECT_TRUE(CFRunLoopTimerIsValid(test_timer));
+  next_fire_time = CFAbsoluteTimeGetCurrent() + 0.01;
+  CFRunLoopTimerSetNextFireDate(test_timer, next_fire_time);
+  message_pump_test.timer_callback_called_ = false;
+  message_pump_test.invalidate_timer_in_callback_ = false;
+  CFRunLoopRunInMode(kMessageLoopExclusiveRunLoopMode, 0.02, true);
+  EXPECT_TRUE(message_pump_test.timer_callback_called_);
+  EXPECT_TRUE(CFRunLoopTimerIsValid(test_timer));
+
+  // Confirm that the run loop again gave it a new fire date in the future.
+  EXPECT_GT(CFRunLoopTimerGetNextFireDate(test_timer), next_fire_time);
+
+  CFRunLoopRemoveTimer(CFRunLoopGetCurrent(), test_timer,
+                       kMessageLoopExclusiveRunLoopMode);
+}
+
+namespace {
+
+// PostedTasks are only executed while the message pump has a delegate. That is,
+// when a base::RunLoop is running, so in order to test whether posted tasks
+// are run by CFRunLoopRunInMode and *not* by the regular RunLoop, we need to
+// be inside a task that is also calling CFRunLoopRunInMode. This task runs the
+// given |mode| after posting a task to increment a counter, then checks whether
+// the counter incremented after emptying that run loop mode.
+void IncrementInModeAndExpect(CFRunLoopMode mode, int result) {
+  // Since this task is "ours" rather than a system task, allow nesting.
+  MessageLoopCurrent::ScopedNestableTaskAllower allow;
+  int counter = 0;
+  auto increment = BindRepeating([](int* i) { ++*i; }, &counter);
+  ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE, increment);
+  while (CFRunLoopRunInMode(mode, 0, true) == kCFRunLoopRunHandledSource)
+    ;
+  ASSERT_EQ(result, counter);
+}
+
+}  // namespace
+
+// Tests the correct behavior of ScopedPumpMessagesInPrivateModes.
+TEST(MessagePumpMacTest, ScopedPumpMessagesInPrivateModes) {
+  MessageLoopForUI message_loop;
+
+  CFRunLoopMode kRegular = kCFRunLoopDefaultMode;
+  CFRunLoopMode kPrivate = CFSTR("NSUnhighlightMenuRunLoopMode");
+
+  // Work is seen when running in the default mode.
+  ThreadTaskRunnerHandle::Get()->PostTask(
+      FROM_HERE, BindOnce(&IncrementInModeAndExpect, kRegular, 1));
+  EXPECT_NO_FATAL_FAILURE(RunLoop().RunUntilIdle());
+
+  // But not seen when running in a private mode.
+  ThreadTaskRunnerHandle::Get()->PostTask(
+      FROM_HERE, BindOnce(&IncrementInModeAndExpect, kPrivate, 0));
+  EXPECT_NO_FATAL_FAILURE(RunLoop().RunUntilIdle());
+
+  {
+    ScopedPumpMessagesInPrivateModes allow_private;
+    // Now the work should be seen.
+    ThreadTaskRunnerHandle::Get()->PostTask(
+        FROM_HERE, BindOnce(&IncrementInModeAndExpect, kPrivate, 1));
+    EXPECT_NO_FATAL_FAILURE(RunLoop().RunUntilIdle());
+
+    // The regular mode should also work the same.
+    ThreadTaskRunnerHandle::Get()->PostTask(
+        FROM_HERE, BindOnce(&IncrementInModeAndExpect, kRegular, 1));
+    EXPECT_NO_FATAL_FAILURE(RunLoop().RunUntilIdle());
+  }
+
+  // And now the scoper is out of scope, private modes should no longer see it.
+  ThreadTaskRunnerHandle::Get()->PostTask(
+      FROM_HERE, BindOnce(&IncrementInModeAndExpect, kPrivate, 0));
+  EXPECT_NO_FATAL_FAILURE(RunLoop().RunUntilIdle());
+
+  // Only regular modes see it.
+  ThreadTaskRunnerHandle::Get()->PostTask(
+      FROM_HERE, BindOnce(&IncrementInModeAndExpect, kRegular, 1));
+  EXPECT_NO_FATAL_FAILURE(RunLoop().RunUntilIdle());
+}
+
+// Tests that private message loop modes are not pumped while a modal dialog is
+// present.
+TEST(MessagePumpMacTest, ScopedPumpMessagesAttemptWithModalDialog) {
+  MessageLoopForUI message_loop;
+
+  {
+    base::ScopedPumpMessagesInPrivateModes allow_private;
+    // No modal window, so all modes should be pumped.
+    EXPECT_EQ(kAllModesMask, allow_private.GetModeMaskForTest());
+  }
+
+  base::scoped_nsobject<NSAlert> alert([[NSAlert alloc] init]);
+  [alert addButtonWithTitle:@"OK"];
+  base::scoped_nsobject<TestModalAlertCloser> closer(
+      [[TestModalAlertCloser alloc] init]);
+  [closer performSelector:@selector(runTestThenCloseAlert:)
+               withObject:alert
+               afterDelay:0
+                  inModes:@[ NSModalPanelRunLoopMode ]];
+  NSInteger result = [alert runModal];
+  EXPECT_EQ(NSAlertFirstButtonReturn, result);
+}
+
+}  // namespace base
+
+@implementation TestModalAlertCloser
+
+- (void)runTestThenCloseAlert:(NSAlert*)alert {
+  EXPECT_TRUE([NSApp modalWindow]);
+  {
+    base::ScopedPumpMessagesInPrivateModes allow_private;
+    // With a modal window, only safe modes should be pumped.
+    EXPECT_EQ(kNSApplicationModalSafeModeMask,
+              allow_private.GetModeMaskForTest());
+  }
+  [[alert buttons][0] performClick:nil];
+}
+
+@end
diff --git a/base/message_loop/message_pump_perftest.cc b/base/message_loop/message_pump_perftest.cc
new file mode 100644
index 0000000..76f18cb
--- /dev/null
+++ b/base/message_loop/message_pump_perftest.cc
@@ -0,0 +1,306 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include "base/bind.h"
+#include "base/bind_helpers.h"
+#include "base/format_macros.h"
+#include "base/memory/ptr_util.h"
+#include "base/message_loop/message_loop.h"
+#include "base/single_thread_task_runner.h"
+#include "base/strings/stringprintf.h"
+#include "base/synchronization/condition_variable.h"
+#include "base/synchronization/lock.h"
+#include "base/synchronization/waitable_event.h"
+#include "base/threading/thread.h"
+#include "base/time/time.h"
+#include "build/build_config.h"
+#include "testing/gtest/include/gtest/gtest.h"
+#include "testing/perf/perf_test.h"
+
+#if defined(OS_ANDROID)
+#include "base/android/java_handler_thread.h"
+#endif
+
+namespace base {
+
+class ScheduleWorkTest : public testing::Test {
+ public:
+  ScheduleWorkTest() : counter_(0) {}
+
+  void SetUp() override {
+    if (base::ThreadTicks::IsSupported())
+      base::ThreadTicks::WaitUntilInitialized();
+  }
+
+  void Increment(uint64_t amount) { counter_ += amount; }
+
+  void Schedule(int index) {
+    base::TimeTicks start = base::TimeTicks::Now();
+    base::ThreadTicks thread_start;
+    if (ThreadTicks::IsSupported())
+      thread_start = base::ThreadTicks::Now();
+    base::TimeDelta minimum = base::TimeDelta::Max();
+    base::TimeDelta maximum = base::TimeDelta();
+    base::TimeTicks now, lastnow = start;
+    uint64_t schedule_calls = 0u;
+    do {
+      for (size_t i = 0; i < kBatchSize; ++i) {
+        target_message_loop()->ScheduleWork();
+        schedule_calls++;
+      }
+      now = base::TimeTicks::Now();
+      base::TimeDelta laptime = now - lastnow;
+      lastnow = now;
+      minimum = std::min(minimum, laptime);
+      maximum = std::max(maximum, laptime);
+    } while (now - start < base::TimeDelta::FromSeconds(kTargetTimeSec));
+
+    scheduling_times_[index] = now - start;
+    if (ThreadTicks::IsSupported())
+      scheduling_thread_times_[index] =
+          base::ThreadTicks::Now() - thread_start;
+    min_batch_times_[index] = minimum;
+    max_batch_times_[index] = maximum;
+    target_message_loop()->task_runner()->PostTask(
+        FROM_HERE, base::BindOnce(&ScheduleWorkTest::Increment,
+                                  base::Unretained(this), schedule_calls));
+  }
+
+  void ScheduleWork(MessageLoop::Type target_type, int num_scheduling_threads) {
+#if defined(OS_ANDROID)
+    if (target_type == MessageLoop::TYPE_JAVA) {
+      java_thread_.reset(new android::JavaHandlerThread("target"));
+      java_thread_->Start();
+    } else
+#endif
+    {
+      target_.reset(new Thread("target"));
+      target_->StartWithOptions(Thread::Options(target_type, 0u));
+
+      // Without this, it's possible for the scheduling threads to start and run
+      // before the target thread. In this case, the scheduling threads will
+      // call target_message_loop()->ScheduleWork(), which dereferences the
+      // loop's message pump, which is only created after the target thread has
+      // finished starting.
+      target_->WaitUntilThreadStarted();
+    }
+
+    std::vector<std::unique_ptr<Thread>> scheduling_threads;
+    scheduling_times_.reset(new base::TimeDelta[num_scheduling_threads]);
+    scheduling_thread_times_.reset(new base::TimeDelta[num_scheduling_threads]);
+    min_batch_times_.reset(new base::TimeDelta[num_scheduling_threads]);
+    max_batch_times_.reset(new base::TimeDelta[num_scheduling_threads]);
+
+    for (int i = 0; i < num_scheduling_threads; ++i) {
+      scheduling_threads.push_back(std::make_unique<Thread>("posting thread"));
+      scheduling_threads[i]->Start();
+    }
+
+    for (int i = 0; i < num_scheduling_threads; ++i) {
+      scheduling_threads[i]->task_runner()->PostTask(
+          FROM_HERE, base::BindOnce(&ScheduleWorkTest::Schedule,
+                                    base::Unretained(this), i));
+    }
+
+    for (int i = 0; i < num_scheduling_threads; ++i) {
+      scheduling_threads[i]->Stop();
+    }
+#if defined(OS_ANDROID)
+    if (target_type == MessageLoop::TYPE_JAVA) {
+      java_thread_->Stop();
+      java_thread_.reset();
+    } else
+#endif
+    {
+      target_->Stop();
+      target_.reset();
+    }
+    base::TimeDelta total_time;
+    base::TimeDelta total_thread_time;
+    base::TimeDelta min_batch_time = base::TimeDelta::Max();
+    base::TimeDelta max_batch_time = base::TimeDelta();
+    for (int i = 0; i < num_scheduling_threads; ++i) {
+      total_time += scheduling_times_[i];
+      total_thread_time += scheduling_thread_times_[i];
+      min_batch_time = std::min(min_batch_time, min_batch_times_[i]);
+      max_batch_time = std::max(max_batch_time, max_batch_times_[i]);
+    }
+    std::string trace = StringPrintf(
+        "%d_threads_scheduling_to_%s_pump",
+        num_scheduling_threads,
+        target_type == MessageLoop::TYPE_IO
+            ? "io"
+            : (target_type == MessageLoop::TYPE_UI ? "ui" : "default"));
+    perf_test::PrintResult(
+        "task",
+        "",
+        trace,
+        total_time.InMicroseconds() / static_cast<double>(counter_),
+        "us/task",
+        true);
+    perf_test::PrintResult(
+        "task",
+        "_min_batch_time",
+        trace,
+        min_batch_time.InMicroseconds() / static_cast<double>(kBatchSize),
+        "us/task",
+        false);
+    perf_test::PrintResult(
+        "task",
+        "_max_batch_time",
+        trace,
+        max_batch_time.InMicroseconds() / static_cast<double>(kBatchSize),
+        "us/task",
+        false);
+    if (ThreadTicks::IsSupported()) {
+      perf_test::PrintResult(
+          "task",
+          "_thread_time",
+          trace,
+          total_thread_time.InMicroseconds() / static_cast<double>(counter_),
+          "us/task",
+          true);
+    }
+  }
+
+  MessageLoop* target_message_loop() {
+#if defined(OS_ANDROID)
+    if (java_thread_)
+      return java_thread_->message_loop();
+#endif
+    return target_->message_loop();
+  }
+
+ private:
+  std::unique_ptr<Thread> target_;
+#if defined(OS_ANDROID)
+  std::unique_ptr<android::JavaHandlerThread> java_thread_;
+#endif
+  std::unique_ptr<base::TimeDelta[]> scheduling_times_;
+  std::unique_ptr<base::TimeDelta[]> scheduling_thread_times_;
+  std::unique_ptr<base::TimeDelta[]> min_batch_times_;
+  std::unique_ptr<base::TimeDelta[]> max_batch_times_;
+  uint64_t counter_;
+
+  static const size_t kTargetTimeSec = 5;
+  static const size_t kBatchSize = 1000;
+};
+
+TEST_F(ScheduleWorkTest, ThreadTimeToIOFromOneThread) {
+  ScheduleWork(MessageLoop::TYPE_IO, 1);
+}
+
+TEST_F(ScheduleWorkTest, ThreadTimeToIOFromTwoThreads) {
+  ScheduleWork(MessageLoop::TYPE_IO, 2);
+}
+
+TEST_F(ScheduleWorkTest, ThreadTimeToIOFromFourThreads) {
+  ScheduleWork(MessageLoop::TYPE_IO, 4);
+}
+
+TEST_F(ScheduleWorkTest, ThreadTimeToUIFromOneThread) {
+  ScheduleWork(MessageLoop::TYPE_UI, 1);
+}
+
+TEST_F(ScheduleWorkTest, ThreadTimeToUIFromTwoThreads) {
+  ScheduleWork(MessageLoop::TYPE_UI, 2);
+}
+
+TEST_F(ScheduleWorkTest, ThreadTimeToUIFromFourThreads) {
+  ScheduleWork(MessageLoop::TYPE_UI, 4);
+}
+
+TEST_F(ScheduleWorkTest, ThreadTimeToDefaultFromOneThread) {
+  ScheduleWork(MessageLoop::TYPE_DEFAULT, 1);
+}
+
+TEST_F(ScheduleWorkTest, ThreadTimeToDefaultFromTwoThreads) {
+  ScheduleWork(MessageLoop::TYPE_DEFAULT, 2);
+}
+
+TEST_F(ScheduleWorkTest, ThreadTimeToDefaultFromFourThreads) {
+  ScheduleWork(MessageLoop::TYPE_DEFAULT, 4);
+}
+
+#if defined(OS_ANDROID)
+TEST_F(ScheduleWorkTest, ThreadTimeToJavaFromOneThread) {
+  ScheduleWork(MessageLoop::TYPE_JAVA, 1);
+}
+
+TEST_F(ScheduleWorkTest, ThreadTimeToJavaFromTwoThreads) {
+  ScheduleWork(MessageLoop::TYPE_JAVA, 2);
+}
+
+TEST_F(ScheduleWorkTest, ThreadTimeToJavaFromFourThreads) {
+  ScheduleWork(MessageLoop::TYPE_JAVA, 4);
+}
+#endif
+
+class FakeMessagePump : public MessagePump {
+ public:
+  FakeMessagePump() = default;
+  ~FakeMessagePump() override = default;
+
+  void Run(Delegate* delegate) override {}
+
+  void Quit() override {}
+  void ScheduleWork() override {}
+  void ScheduleDelayedWork(const TimeTicks& delayed_work_time) override {}
+};
+
+class PostTaskTest : public testing::Test {
+ public:
+  void Run(int batch_size, int tasks_per_reload) {
+    base::TimeTicks start = base::TimeTicks::Now();
+    base::TimeTicks now;
+    MessageLoop loop(std::unique_ptr<MessagePump>(new FakeMessagePump));
+    scoped_refptr<internal::IncomingTaskQueue> queue(
+        new internal::IncomingTaskQueue(&loop));
+    uint32_t num_posted = 0;
+    do {
+      for (int i = 0; i < batch_size; ++i) {
+        for (int j = 0; j < tasks_per_reload; ++j) {
+          queue->AddToIncomingQueue(FROM_HERE, DoNothing(), base::TimeDelta(),
+                                    Nestable::kNonNestable);
+          num_posted++;
+        }
+        TaskQueue loop_local_queue;
+        queue->ReloadWorkQueue(&loop_local_queue);
+        while (!loop_local_queue.empty()) {
+          PendingTask t = std::move(loop_local_queue.front());
+          loop_local_queue.pop();
+          loop.RunTask(&t);
+        }
+      }
+
+      now = base::TimeTicks::Now();
+    } while (now - start < base::TimeDelta::FromSeconds(5));
+    std::string trace = StringPrintf("%d_tasks_per_reload", tasks_per_reload);
+    perf_test::PrintResult(
+        "task",
+        "",
+        trace,
+        (now - start).InMicroseconds() / static_cast<double>(num_posted),
+        "us/task",
+        true);
+    queue->WillDestroyCurrentMessageLoop();
+  }
+};
+
+TEST_F(PostTaskTest, OneTaskPerReload) {
+  Run(10000, 1);
+}
+
+TEST_F(PostTaskTest, TenTasksPerReload) {
+  Run(10000, 10);
+}
+
+TEST_F(PostTaskTest, OneHundredTasksPerReload) {
+  Run(1000, 100);
+}
+
+}  // namespace base
diff --git a/base/message_loop/message_pump_win.cc b/base/message_loop/message_pump_win.cc
new file mode 100644
index 0000000..8e6f1f4
--- /dev/null
+++ b/base/message_loop/message_pump_win.cc
@@ -0,0 +1,585 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/message_loop/message_pump_win.h"
+
+#include <math.h>
+#include <stdint.h>
+
+#include <limits>
+
+#include "base/debug/alias.h"
+#include "base/memory/ptr_util.h"
+#include "base/metrics/histogram_macros.h"
+#include "base/strings/stringprintf.h"
+#include "base/trace_event/trace_event.h"
+#include "base/win/current_module.h"
+#include "base/win/wrapped_window_proc.h"
+
+namespace base {
+
+namespace {
+
+enum MessageLoopProblems {
+  MESSAGE_POST_ERROR,
+  COMPLETION_POST_ERROR,
+  SET_TIMER_ERROR,
+  RECEIVED_WM_QUIT_ERROR,
+  MESSAGE_LOOP_PROBLEM_MAX,
+};
+
+}  // namespace
+
+// Message sent to get an additional time slice for pumping (processing) another
+// task (a series of such messages creates a continuous task pump).
+static const int kMsgHaveWork = WM_USER + 1;
+
+//-----------------------------------------------------------------------------
+// MessagePumpWin public:
+
+MessagePumpWin::MessagePumpWin() = default;
+
+void MessagePumpWin::Run(Delegate* delegate) {
+  RunState s;
+  s.delegate = delegate;
+  s.should_quit = false;
+  s.run_depth = state_ ? state_->run_depth + 1 : 1;
+
+  // TODO(stanisc): crbug.com/596190: Remove this code once the bug is fixed.
+  s.schedule_work_error_count = 0;
+  s.last_schedule_work_error_time = Time();
+
+  RunState* previous_state = state_;
+  state_ = &s;
+
+  DoRunLoop();
+
+  state_ = previous_state;
+}
+
+void MessagePumpWin::Quit() {
+  DCHECK(state_);
+  state_->should_quit = true;
+}
+
+//-----------------------------------------------------------------------------
+// MessagePumpWin protected:
+
+int MessagePumpWin::GetCurrentDelay() const {
+  if (delayed_work_time_.is_null())
+    return -1;
+
+  // Be careful here.  TimeDelta has a precision of microseconds, but we want a
+  // value in milliseconds.  If there are 5.5ms left, should the delay be 5 or
+  // 6?  It should be 6 to avoid executing delayed work too early.
+  double timeout =
+      ceil((delayed_work_time_ - TimeTicks::Now()).InMillisecondsF());
+
+  // Range check the |timeout| while converting to an integer.  If the |timeout|
+  // is negative, then we need to run delayed work soon.  If the |timeout| is
+  // "overflowingly" large, that means a delayed task was posted with a
+  // super-long delay.
+  return timeout < 0 ? 0 :
+      (timeout > std::numeric_limits<int>::max() ?
+       std::numeric_limits<int>::max() : static_cast<int>(timeout));
+}
+
+//-----------------------------------------------------------------------------
+// MessagePumpForUI public:
+
+MessagePumpForUI::MessagePumpForUI() {
+  bool succeeded = message_window_.Create(
+      BindRepeating(&MessagePumpForUI::MessageCallback, Unretained(this)));
+  DCHECK(succeeded);
+}
+
+MessagePumpForUI::~MessagePumpForUI() = default;
+
+void MessagePumpForUI::ScheduleWork() {
+  if (InterlockedExchange(&work_state_, HAVE_WORK) != READY)
+    return;  // Someone else continued the pumping.
+
+  // Make sure the MessagePump does some work for us.
+  BOOL ret = PostMessage(message_window_.hwnd(), kMsgHaveWork, 0, 0);
+  if (ret)
+    return;  // There was room in the Window Message queue.
+
+  // We have failed to insert a have-work message, so there is a chance that we
+  // will starve tasks/timers while sitting in a nested run loop.  Nested
+  // loops only look at Windows Message queues, and don't look at *our* task
+  // queues, etc., so we might not get a time slice in such. :-(
+  // We could abort here, but the fear is that this failure mode is plausibly
+  // common (queue is full, of about 2000 messages), so we'll do a near-graceful
+  // recovery.  Nested loops are pretty transient (we think), so this will
+  // probably be recoverable.
+
+  // Clarify that we didn't really insert.
+  InterlockedExchange(&work_state_, READY);
+  UMA_HISTOGRAM_ENUMERATION("Chrome.MessageLoopProblem", MESSAGE_POST_ERROR,
+                            MESSAGE_LOOP_PROBLEM_MAX);
+  state_->schedule_work_error_count++;
+  state_->last_schedule_work_error_time = Time::Now();
+}
+
+void MessagePumpForUI::ScheduleDelayedWork(const TimeTicks& delayed_work_time) {
+  delayed_work_time_ = delayed_work_time;
+  RescheduleTimer();
+}
+
+//-----------------------------------------------------------------------------
+// MessagePumpForUI private:
+
+bool MessagePumpForUI::MessageCallback(
+    UINT message, WPARAM wparam, LPARAM lparam, LRESULT* result) {
+  switch (message) {
+    case kMsgHaveWork:
+      HandleWorkMessage();
+      break;
+    case WM_TIMER:
+      HandleTimerMessage();
+      break;
+  }
+  return false;
+}
+
+void MessagePumpForUI::DoRunLoop() {
+  // IF this was just a simple PeekMessage() loop (servicing all possible work
+  // queues), then Windows would try to achieve the following order according
+  // to MSDN documentation about PeekMessage with no filter):
+  //    * Sent messages
+  //    * Posted messages
+  //    * Sent messages (again)
+  //    * WM_PAINT messages
+  //    * WM_TIMER messages
+  //
+  // Summary: none of the above classes is starved, and sent messages has twice
+  // the chance of being processed (i.e., reduced service time).
+
+  for (;;) {
+    // If we do any work, we may create more messages etc., and more work may
+    // possibly be waiting in another task group.  When we (for example)
+    // ProcessNextWindowsMessage(), there is a good chance there are still more
+    // messages waiting.  On the other hand, when any of these methods return
+    // having done no work, then it is pretty unlikely that calling them again
+    // quickly will find any work to do.  Finally, if they all say they had no
+    // work, then it is a good time to consider sleeping (waiting) for more
+    // work.
+
+    bool more_work_is_plausible = ProcessNextWindowsMessage();
+    if (state_->should_quit)
+      break;
+
+    more_work_is_plausible |= state_->delegate->DoWork();
+    if (state_->should_quit)
+      break;
+
+    more_work_is_plausible |=
+        state_->delegate->DoDelayedWork(&delayed_work_time_);
+    // If we did not process any delayed work, then we can assume that our
+    // existing WM_TIMER if any will fire when delayed work should run.  We
+    // don't want to disturb that timer if it is already in flight.  However,
+    // if we did do all remaining delayed work, then lets kill the WM_TIMER.
+    if (more_work_is_plausible && delayed_work_time_.is_null())
+      KillTimer(message_window_.hwnd(), reinterpret_cast<UINT_PTR>(this));
+    if (state_->should_quit)
+      break;
+
+    if (more_work_is_plausible)
+      continue;
+
+    more_work_is_plausible = state_->delegate->DoIdleWork();
+    if (state_->should_quit)
+      break;
+
+    if (more_work_is_plausible)
+      continue;
+
+    WaitForWork();  // Wait (sleep) until we have work to do again.
+  }
+}
+
+void MessagePumpForUI::WaitForWork() {
+  // Wait until a message is available, up to the time needed by the timer
+  // manager to fire the next set of timers.
+  int delay;
+  DWORD wait_flags = MWMO_INPUTAVAILABLE;
+
+  while ((delay = GetCurrentDelay()) != 0) {
+    if (delay < 0)  // Negative value means no timers waiting.
+      delay = INFINITE;
+
+    // Tell the optimizer to retain these values to simplify analyzing hangs.
+    base::debug::Alias(&delay);
+    base::debug::Alias(&wait_flags);
+    DWORD result = MsgWaitForMultipleObjectsEx(0, nullptr, delay, QS_ALLINPUT,
+                                               wait_flags);
+
+    if (WAIT_OBJECT_0 == result) {
+      // A WM_* message is available.
+      // If a parent child relationship exists between windows across threads
+      // then their thread inputs are implicitly attached.
+      // This causes the MsgWaitForMultipleObjectsEx API to return indicating
+      // that messages are ready for processing (Specifically, mouse messages
+      // intended for the child window may appear if the child window has
+      // capture).
+      // The subsequent PeekMessages call may fail to return any messages thus
+      // causing us to enter a tight loop at times.
+      // The code below is a workaround to give the child window
+      // some time to process its input messages by looping back to
+      // MsgWaitForMultipleObjectsEx above when there are no messages for the
+      // current thread.
+      MSG msg = {0};
+      bool has_pending_sent_message =
+          (HIWORD(GetQueueStatus(QS_SENDMESSAGE)) & QS_SENDMESSAGE) != 0;
+      if (has_pending_sent_message ||
+          PeekMessage(&msg, nullptr, 0, 0, PM_NOREMOVE)) {
+        return;
+      }
+
+      // We know there are no more messages for this thread because PeekMessage
+      // has returned false. Reset |wait_flags| so that we wait for a *new*
+      // message.
+      wait_flags = 0;
+    }
+
+    DCHECK_NE(WAIT_FAILED, result) << GetLastError();
+  }
+}
+
+void MessagePumpForUI::HandleWorkMessage() {
+  // If we are being called outside of the context of Run, then don't try to do
+  // any work.  This could correspond to a MessageBox call or something of that
+  // sort.
+  if (!state_) {
+    // Since we handled a kMsgHaveWork message, we must still update this flag.
+    InterlockedExchange(&work_state_, READY);
+    return;
+  }
+
+  // Let whatever would have run had we not been putting messages in the queue
+  // run now.  This is an attempt to make our dummy message not starve other
+  // messages that may be in the Windows message queue.
+  ProcessPumpReplacementMessage();
+
+  // Now give the delegate a chance to do some work.  It'll let us know if it
+  // needs to do more work.
+  if (state_->delegate->DoWork())
+    ScheduleWork();
+  state_->delegate->DoDelayedWork(&delayed_work_time_);
+  RescheduleTimer();
+}
+
+void MessagePumpForUI::HandleTimerMessage() {
+  KillTimer(message_window_.hwnd(), reinterpret_cast<UINT_PTR>(this));
+
+  // If we are being called outside of the context of Run, then don't do
+  // anything.  This could correspond to a MessageBox call or something of
+  // that sort.
+  if (!state_)
+    return;
+
+  state_->delegate->DoDelayedWork(&delayed_work_time_);
+  RescheduleTimer();
+}
+
+void MessagePumpForUI::RescheduleTimer() {
+  if (delayed_work_time_.is_null())
+    return;
+  //
+  // We would *like* to provide high resolution timers.  Windows timers using
+  // SetTimer() have a 10ms granularity.  We have to use WM_TIMER as a wakeup
+  // mechanism because the application can enter modal windows loops where it
+  // is not running our MessageLoop; the only way to have our timers fire in
+  // these cases is to post messages there.
+  //
+  // To provide sub-10ms timers, we process timers directly from our run loop.
+  // For the common case, timers will be processed there as the run loop does
+  // its normal work.  However, we *also* set the system timer so that WM_TIMER
+  // events fire.  This mops up the case of timers not being able to work in
+  // modal message loops.  It is possible for the SetTimer to pop and have no
+  // pending timers, because they could have already been processed by the
+  // run loop itself.
+  //
+  // We use a single SetTimer corresponding to the timer that will expire
+  // soonest.  As new timers are created and destroyed, we update SetTimer.
+  // Getting a spurious SetTimer event firing is benign, as we'll just be
+  // processing an empty timer queue.
+  //
+  int delay_msec = GetCurrentDelay();
+  DCHECK_GE(delay_msec, 0);
+  if (delay_msec == 0) {
+    ScheduleWork();
+  } else {
+    if (delay_msec < USER_TIMER_MINIMUM)
+      delay_msec = USER_TIMER_MINIMUM;
+
+    // Tell the optimizer to retain these values to simplify analyzing hangs.
+    base::debug::Alias(&delay_msec);
+    // Create a WM_TIMER event that will wake us up to check for any pending
+    // timers (in case we are running within a nested, external sub-pump).
+    UINT_PTR ret = SetTimer(message_window_.hwnd(), 0, delay_msec, nullptr);
+    if (ret)
+      return;
+    // If we can't set timers, we are in big trouble... but cross our fingers
+    // for now.
+    // TODO(jar): If we don't see this error, use a CHECK() here instead.
+    UMA_HISTOGRAM_ENUMERATION("Chrome.MessageLoopProblem", SET_TIMER_ERROR,
+                              MESSAGE_LOOP_PROBLEM_MAX);
+  }
+}
+
+bool MessagePumpForUI::ProcessNextWindowsMessage() {
+  // If there are sent messages in the queue then PeekMessage internally
+  // dispatches the message and returns false. We return true in this
+  // case to ensure that the message loop peeks again instead of calling
+  // MsgWaitForMultipleObjectsEx again.
+  bool sent_messages_in_queue = false;
+  DWORD queue_status = GetQueueStatus(QS_SENDMESSAGE);
+  if (HIWORD(queue_status) & QS_SENDMESSAGE)
+    sent_messages_in_queue = true;
+
+  MSG msg;
+  if (PeekMessage(&msg, nullptr, 0, 0, PM_REMOVE) != FALSE)
+    return ProcessMessageHelper(msg);
+
+  return sent_messages_in_queue;
+}
+
+bool MessagePumpForUI::ProcessMessageHelper(const MSG& msg) {
+  TRACE_EVENT1("base", "MessagePumpForUI::ProcessMessageHelper",
+               "message", msg.message);
+  if (WM_QUIT == msg.message) {
+    // WM_QUIT is the standard way to exit a GetMessage() loop. Our MessageLoop
+    // has its own quit mechanism, so WM_QUIT is unexpected and should be
+    // ignored.
+    UMA_HISTOGRAM_ENUMERATION("Chrome.MessageLoopProblem",
+                              RECEIVED_WM_QUIT_ERROR, MESSAGE_LOOP_PROBLEM_MAX);
+    return true;
+  }
+
+  // While running our main message pump, we discard kMsgHaveWork messages.
+  if (msg.message == kMsgHaveWork && msg.hwnd == message_window_.hwnd())
+    return ProcessPumpReplacementMessage();
+
+  TranslateMessage(&msg);
+  DispatchMessage(&msg);
+
+  return true;
+}
+
+bool MessagePumpForUI::ProcessPumpReplacementMessage() {
+  // When we encounter a kMsgHaveWork message, this method is called to peek and
+  // process a replacement message. The goal is to make the kMsgHaveWork as non-
+  // intrusive as possible, even though a continuous stream of such messages are
+  // posted. This method carefully peeks a message while there is no chance for
+  // a kMsgHaveWork to be pending, then resets the |have_work_| flag (allowing a
+  // replacement kMsgHaveWork to possibly be posted), and finally dispatches
+  // that peeked replacement. Note that the re-post of kMsgHaveWork may be
+  // asynchronous to this thread!!
+
+  MSG msg;
+  const bool have_message =
+      PeekMessage(&msg, nullptr, 0, 0, PM_REMOVE) != FALSE;
+
+  // Expect no message or a message different than kMsgHaveWork.
+  DCHECK(!have_message || kMsgHaveWork != msg.message ||
+         msg.hwnd != message_window_.hwnd());
+
+  // Since we discarded a kMsgHaveWork message, we must update the flag.
+  int old_work_state_ = InterlockedExchange(&work_state_, READY);
+  DCHECK_EQ(HAVE_WORK, old_work_state_);
+
+  // We don't need a special time slice if we didn't have_message to process.
+  if (!have_message)
+    return false;
+
+  // Guarantee we'll get another time slice in the case where we go into native
+  // windows code.   This ScheduleWork() may hurt performance a tiny bit when
+  // tasks appear very infrequently, but when the event queue is busy, the
+  // kMsgHaveWork events get (percentage wise) rarer and rarer.
+  ScheduleWork();
+  return ProcessMessageHelper(msg);
+}
+
+//-----------------------------------------------------------------------------
+// MessagePumpForIO public:
+
+MessagePumpForIO::IOContext::IOContext() {
+  memset(&overlapped, 0, sizeof(overlapped));
+}
+
+MessagePumpForIO::MessagePumpForIO() {
+  port_.Set(CreateIoCompletionPort(INVALID_HANDLE_VALUE, nullptr,
+      reinterpret_cast<ULONG_PTR>(nullptr), 1));
+  DCHECK(port_.IsValid());
+}
+
+MessagePumpForIO::~MessagePumpForIO() = default;
+
+void MessagePumpForIO::ScheduleWork() {
+  if (InterlockedExchange(&work_state_, HAVE_WORK) != READY)
+    return;  // Someone else continued the pumping.
+
+  // Make sure the MessagePump does some work for us.
+  BOOL ret = PostQueuedCompletionStatus(port_.Get(), 0,
+                                        reinterpret_cast<ULONG_PTR>(this),
+                                        reinterpret_cast<OVERLAPPED*>(this));
+  if (ret)
+    return;  // Post worked perfectly.
+
+  // See comment in MessagePumpForUI::ScheduleWork() for this error recovery.
+  InterlockedExchange(&work_state_, READY);  // Clarify that we didn't succeed.
+  UMA_HISTOGRAM_ENUMERATION("Chrome.MessageLoopProblem", COMPLETION_POST_ERROR,
+                            MESSAGE_LOOP_PROBLEM_MAX);
+  state_->schedule_work_error_count++;
+  state_->last_schedule_work_error_time = Time::Now();
+}
+
+void MessagePumpForIO::ScheduleDelayedWork(const TimeTicks& delayed_work_time) {
+  // We know that we can't be blocked right now since this method can only be
+  // called on the same thread as Run, so we only need to update our record of
+  // how long to sleep when we do sleep.
+  delayed_work_time_ = delayed_work_time;
+}
+
+void MessagePumpForIO::RegisterIOHandler(HANDLE file_handle,
+                                         IOHandler* handler) {
+  HANDLE port = CreateIoCompletionPort(file_handle, port_.Get(),
+                                       reinterpret_cast<ULONG_PTR>(handler), 1);
+  DPCHECK(port);
+}
+
+bool MessagePumpForIO::RegisterJobObject(HANDLE job_handle,
+                                         IOHandler* handler) {
+  JOBOBJECT_ASSOCIATE_COMPLETION_PORT info;
+  info.CompletionKey = handler;
+  info.CompletionPort = port_.Get();
+  return SetInformationJobObject(job_handle,
+                                 JobObjectAssociateCompletionPortInformation,
+                                 &info,
+                                 sizeof(info)) != FALSE;
+}
+
+//-----------------------------------------------------------------------------
+// MessagePumpForIO private:
+
+void MessagePumpForIO::DoRunLoop() {
+  for (;;) {
+    // If we do any work, we may create more messages etc., and more work may
+    // possibly be waiting in another task group.  When we (for example)
+    // WaitForIOCompletion(), there is a good chance there are still more
+    // messages waiting.  On the other hand, when any of these methods return
+    // having done no work, then it is pretty unlikely that calling them
+    // again quickly will find any work to do.  Finally, if they all say they
+    // had no work, then it is a good time to consider sleeping (waiting) for
+    // more work.
+
+    bool more_work_is_plausible = state_->delegate->DoWork();
+    if (state_->should_quit)
+      break;
+
+    more_work_is_plausible |= WaitForIOCompletion(0, nullptr);
+    if (state_->should_quit)
+      break;
+
+    more_work_is_plausible |=
+        state_->delegate->DoDelayedWork(&delayed_work_time_);
+    if (state_->should_quit)
+      break;
+
+    if (more_work_is_plausible)
+      continue;
+
+    more_work_is_plausible = state_->delegate->DoIdleWork();
+    if (state_->should_quit)
+      break;
+
+    if (more_work_is_plausible)
+      continue;
+
+    WaitForWork();  // Wait (sleep) until we have work to do again.
+  }
+}
+
+// Wait until IO completes, up to the time needed by the timer manager to fire
+// the next set of timers.
+void MessagePumpForIO::WaitForWork() {
+  // We do not support nested IO message loops. This is to avoid messy
+  // recursion problems.
+  DCHECK_EQ(1, state_->run_depth) << "Cannot nest an IO message loop!";
+
+  int timeout = GetCurrentDelay();
+  if (timeout < 0)  // Negative value means no timers waiting.
+    timeout = INFINITE;
+
+  // Tell the optimizer to retain these values to simplify analyzing hangs.
+  base::debug::Alias(&timeout);
+  WaitForIOCompletion(timeout, nullptr);
+}
+
+bool MessagePumpForIO::WaitForIOCompletion(DWORD timeout, IOHandler* filter) {
+  IOItem item;
+  if (completed_io_.empty() || !MatchCompletedIOItem(filter, &item)) {
+    // We have to ask the system for another IO completion.
+    if (!GetIOItem(timeout, &item))
+      return false;
+
+    if (ProcessInternalIOItem(item))
+      return true;
+  }
+
+  if (filter && item.handler != filter) {
+    // Save this item for later
+    completed_io_.push_back(item);
+  } else {
+    item.handler->OnIOCompleted(item.context, item.bytes_transfered,
+                                item.error);
+  }
+  return true;
+}
+
+// Asks the OS for another IO completion result.
+bool MessagePumpForIO::GetIOItem(DWORD timeout, IOItem* item) {
+  memset(item, 0, sizeof(*item));
+  ULONG_PTR key = reinterpret_cast<ULONG_PTR>(nullptr);
+  OVERLAPPED* overlapped = nullptr;
+  if (!GetQueuedCompletionStatus(port_.Get(), &item->bytes_transfered, &key,
+                                 &overlapped, timeout)) {
+    if (!overlapped)
+      return false;  // Nothing in the queue.
+    item->error = GetLastError();
+    item->bytes_transfered = 0;
+  }
+
+  item->handler = reinterpret_cast<IOHandler*>(key);
+  item->context = reinterpret_cast<IOContext*>(overlapped);
+  return true;
+}
+
+bool MessagePumpForIO::ProcessInternalIOItem(const IOItem& item) {
+  if (reinterpret_cast<void*>(this) == reinterpret_cast<void*>(item.context) &&
+      reinterpret_cast<void*>(this) == reinterpret_cast<void*>(item.handler)) {
+    // This is our internal completion.
+    DCHECK(!item.bytes_transfered);
+    InterlockedExchange(&work_state_, READY);
+    return true;
+  }
+  return false;
+}
+
+// Returns a completion item that was previously received.
+bool MessagePumpForIO::MatchCompletedIOItem(IOHandler* filter, IOItem* item) {
+  DCHECK(!completed_io_.empty());
+  for (std::list<IOItem>::iterator it = completed_io_.begin();
+       it != completed_io_.end(); ++it) {
+    if (!filter || it->handler == filter) {
+      *item = *it;
+      completed_io_.erase(it);
+      return true;
+    }
+  }
+  return false;
+}
+
+}  // namespace base
diff --git a/base/message_loop/message_pump_win.h b/base/message_loop/message_pump_win.h
new file mode 100644
index 0000000..f8a8557
--- /dev/null
+++ b/base/message_loop/message_pump_win.h
@@ -0,0 +1,254 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_MESSAGE_LOOP_MESSAGE_PUMP_WIN_H_
+#define BASE_MESSAGE_LOOP_MESSAGE_PUMP_WIN_H_
+
+#include <windows.h>
+
+#include <list>
+#include <memory>
+
+#include "base/base_export.h"
+#include "base/message_loop/message_pump.h"
+#include "base/time/time.h"
+#include "base/win/message_window.h"
+#include "base/win/scoped_handle.h"
+
+namespace base {
+
+// MessagePumpWin serves as the base for specialized versions of the MessagePump
+// for Windows. It provides basic functionality like handling of observers and
+// controlling the lifetime of the message pump.
+class BASE_EXPORT MessagePumpWin : public MessagePump {
+ public:
+  MessagePumpWin();
+
+  // MessagePump methods:
+  void Run(Delegate* delegate) override;
+  void Quit() override;
+
+ protected:
+  struct RunState {
+    Delegate* delegate;
+
+    // Used to flag that the current Run() invocation should return ASAP.
+    bool should_quit;
+
+    // Used to count how many Run() invocations are on the stack.
+    int run_depth;
+
+    // Used to help diagnose hangs.
+    // TODO(stanisc): crbug.com/596190: Remove these once the bug is fixed.
+    int schedule_work_error_count;
+    Time last_schedule_work_error_time;
+  };
+
+  // State used with |work_state_| variable.
+  enum WorkState {
+    READY = 0,      // Ready to accept new work.
+    HAVE_WORK = 1,  // New work has been signalled.
+    WORKING = 2     // Handling the work.
+  };
+
+  virtual void DoRunLoop() = 0;
+  int GetCurrentDelay() const;
+
+  // The time at which delayed work should run.
+  TimeTicks delayed_work_time_;
+
+  // A value used to indicate if there is a kMsgDoWork message pending
+  // in the Windows Message queue.  There is at most one such message, and it
+  // can drive execution of tasks when a native message pump is running.
+  LONG work_state_ = READY;
+
+  // State for the current invocation of Run.
+  RunState* state_ = nullptr;
+};
+
+//-----------------------------------------------------------------------------
+// MessagePumpForUI extends MessagePumpWin with methods that are particular to a
+// MessageLoop instantiated with TYPE_UI.
+//
+// MessagePumpForUI implements a "traditional" Windows message pump. It contains
+// a nearly infinite loop that peeks out messages, and then dispatches them.
+// Intermixed with those peeks are callouts to DoWork for pending tasks, and
+// DoDelayedWork for pending timers. When there are no events to be serviced,
+// this pump goes into a wait state. In most cases, this message pump handles
+// all processing.
+//
+// However, when a task, or windows event, invokes on the stack a native dialog
+// box or such, that window typically provides a bare bones (native?) message
+// pump.  That bare-bones message pump generally supports little more than a
+// peek of the Windows message queue, followed by a dispatch of the peeked
+// message.  MessageLoop extends that bare-bones message pump to also service
+// Tasks, at the cost of some complexity.
+//
+// The basic structure of the extension (referred to as a sub-pump) is that a
+// special message, kMsgHaveWork, is repeatedly injected into the Windows
+// Message queue.  Each time the kMsgHaveWork message is peeked, checks are
+// made for an extended set of events, including the availability of Tasks to
+// run.
+//
+// After running a task, the special message kMsgHaveWork is again posted to
+// the Windows Message queue, ensuring a future time slice for processing a
+// future event.  To prevent flooding the Windows Message queue, care is taken
+// to be sure that at most one kMsgHaveWork message is EVER pending in the
+// Window's Message queue.
+//
+// There are a few additional complexities in this system where, when there are
+// no Tasks to run, this otherwise infinite stream of messages which drives the
+// sub-pump is halted.  The pump is automatically re-started when Tasks are
+// queued.
+//
+// A second complexity is that the presence of this stream of posted tasks may
+// prevent a bare-bones message pump from ever peeking a WM_PAINT or WM_TIMER.
+// Such paint and timer events always give priority to a posted message, such as
+// kMsgHaveWork messages.  As a result, care is taken to do some peeking in
+// between the posting of each kMsgHaveWork message (i.e., after kMsgHaveWork
+// is peeked, and before a replacement kMsgHaveWork is posted).
+//
+// NOTE: Although it may seem odd that messages are used to start and stop this
+// flow (as opposed to signaling objects, etc.), it should be understood that
+// the native message pump will *only* respond to messages.  As a result, it is
+// an excellent choice.  It is also helpful that the starter messages that are
+// placed in the queue when new task arrive also awakens DoRunLoop.
+//
+class BASE_EXPORT MessagePumpForUI : public MessagePumpWin {
+ public:
+  MessagePumpForUI();
+  ~MessagePumpForUI() override;
+
+  // MessagePump methods:
+  void ScheduleWork() override;
+  void ScheduleDelayedWork(const TimeTicks& delayed_work_time) override;
+
+ private:
+  bool MessageCallback(
+      UINT message, WPARAM wparam, LPARAM lparam, LRESULT* result);
+  void DoRunLoop() override;
+  void WaitForWork();
+  void HandleWorkMessage();
+  void HandleTimerMessage();
+  void RescheduleTimer();
+  bool ProcessNextWindowsMessage();
+  bool ProcessMessageHelper(const MSG& msg);
+  bool ProcessPumpReplacementMessage();
+
+  base::win::MessageWindow message_window_;
+};
+
+//-----------------------------------------------------------------------------
+// MessagePumpForIO extends MessagePumpWin with methods that are particular to a
+// MessageLoop instantiated with TYPE_IO. This version of MessagePump does not
+// deal with Windows mesagges, and instead has a Run loop based on Completion
+// Ports so it is better suited for IO operations.
+//
+class BASE_EXPORT MessagePumpForIO : public MessagePumpWin {
+ public:
+  struct BASE_EXPORT IOContext {
+    IOContext();
+    OVERLAPPED overlapped;
+  };
+
+  // Clients interested in receiving OS notifications when asynchronous IO
+  // operations complete should implement this interface and register themselves
+  // with the message pump.
+  //
+  // Typical use #1:
+  //   class MyFile : public IOHandler {
+  //     MyFile() {
+  //       ...
+  //       message_pump->RegisterIOHandler(file_, this);
+  //     }
+  //     // Plus some code to make sure that this destructor is not called
+  //     // while there are pending IO operations.
+  //     ~MyFile() {
+  //     }
+  //     virtual void OnIOCompleted(IOContext* context, DWORD bytes_transfered,
+  //                                DWORD error) {
+  //       ...
+  //       delete context;
+  //     }
+  //     void DoSomeIo() {
+  //       ...
+  //       IOContext* context = new IOContext;
+  //       ReadFile(file_, buffer, num_bytes, &read, &context);
+  //     }
+  //     HANDLE file_;
+  //   };
+  //
+  // Typical use #2:
+  // Same as the previous example, except that in order to deal with the
+  // requirement stated for the destructor, the class calls WaitForIOCompletion
+  // from the destructor to block until all IO finishes.
+  //     ~MyFile() {
+  //       while(pending_)
+  //         message_pump->WaitForIOCompletion(INFINITE, this);
+  //     }
+  //
+  class IOHandler {
+   public:
+    virtual ~IOHandler() {}
+    // This will be called once the pending IO operation associated with
+    // |context| completes. |error| is the Win32 error code of the IO operation
+    // (ERROR_SUCCESS if there was no error). |bytes_transfered| will be zero
+    // on error.
+    virtual void OnIOCompleted(IOContext* context, DWORD bytes_transfered,
+                               DWORD error) = 0;
+  };
+
+  MessagePumpForIO();
+  ~MessagePumpForIO() override;
+
+  // MessagePump methods:
+  void ScheduleWork() override;
+  void ScheduleDelayedWork(const TimeTicks& delayed_work_time) override;
+
+  // Register the handler to be used when asynchronous IO for the given file
+  // completes. The registration persists as long as |file_handle| is valid, so
+  // |handler| must be valid as long as there is pending IO for the given file.
+  void RegisterIOHandler(HANDLE file_handle, IOHandler* handler);
+
+  // Register the handler to be used to process job events. The registration
+  // persists as long as the job object is live, so |handler| must be valid
+  // until the job object is destroyed. Returns true if the registration
+  // succeeded, and false otherwise.
+  bool RegisterJobObject(HANDLE job_handle, IOHandler* handler);
+
+  // Waits for the next IO completion that should be processed by |filter|, for
+  // up to |timeout| milliseconds. Return true if any IO operation completed,
+  // regardless of the involved handler, and false if the timeout expired. If
+  // the completion port received any message and the involved IO handler
+  // matches |filter|, the callback is called before returning from this code;
+  // if the handler is not the one that we are looking for, the callback will
+  // be postponed for another time, so reentrancy problems can be avoided.
+  // External use of this method should be reserved for the rare case when the
+  // caller is willing to allow pausing regular task dispatching on this thread.
+  bool WaitForIOCompletion(DWORD timeout, IOHandler* filter);
+
+ private:
+  struct IOItem {
+    IOHandler* handler;
+    IOContext* context;
+    DWORD bytes_transfered;
+    DWORD error;
+  };
+
+  void DoRunLoop() override;
+  void WaitForWork();
+  bool MatchCompletedIOItem(IOHandler* filter, IOItem* item);
+  bool GetIOItem(DWORD timeout, IOItem* item);
+  bool ProcessInternalIOItem(const IOItem& item);
+
+  // The completion port associated with this thread.
+  win::ScopedHandle port_;
+  // This list will be empty almost always. It stores IO completions that have
+  // not been delivered yet because somebody was doing cleanup.
+  std::list<IOItem> completed_io_;
+};
+
+}  // namespace base
+
+#endif  // BASE_MESSAGE_LOOP_MESSAGE_PUMP_WIN_H_
diff --git a/base/message_loop/timer_slack.h b/base/message_loop/timer_slack.h
new file mode 100644
index 0000000..1ad6ca9
--- /dev/null
+++ b/base/message_loop/timer_slack.h
@@ -0,0 +1,22 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_MESSAGE_LOOP_TIMER_SLACK_H_
+#define BASE_MESSAGE_LOOP_TIMER_SLACK_H_
+
+namespace base {
+
+// Amount of timer slack to use for delayed timers.  Increasing timer slack
+// allows the OS to coalesce timers more effectively.
+enum TimerSlack {
+  // Lowest value for timer slack allowed by OS.
+  TIMER_SLACK_NONE,
+
+  // Maximal value for timer slack allowed by OS.
+  TIMER_SLACK_MAXIMUM
+};
+
+}  // namespace base
+
+#endif  // BASE_MESSAGE_LOOP_TIMER_SLACK_H_
diff --git a/base/message_loop/watchable_io_message_pump_posix.cc b/base/message_loop/watchable_io_message_pump_posix.cc
new file mode 100644
index 0000000..1850137
--- /dev/null
+++ b/base/message_loop/watchable_io_message_pump_posix.cc
@@ -0,0 +1,16 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/message_loop/watchable_io_message_pump_posix.h"
+
+namespace base {
+
+WatchableIOMessagePumpPosix::FdWatchControllerInterface::
+    FdWatchControllerInterface(const Location& from_here)
+    : created_from_location_(from_here) {}
+
+WatchableIOMessagePumpPosix::FdWatchControllerInterface::
+    ~FdWatchControllerInterface() = default;
+
+}  // namespace base
diff --git a/base/message_loop/watchable_io_message_pump_posix.h b/base/message_loop/watchable_io_message_pump_posix.h
new file mode 100644
index 0000000..74583d9
--- /dev/null
+++ b/base/message_loop/watchable_io_message_pump_posix.h
@@ -0,0 +1,88 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_MESSAGE_LOOP_WATCHABLE_IO_MESSAGE_PUMP_POSIX_H_
+#define BASE_MESSAGE_LOOP_WATCHABLE_IO_MESSAGE_PUMP_POSIX_H_
+
+#include "base/location.h"
+#include "base/macros.h"
+
+namespace base {
+
+class WatchableIOMessagePumpPosix {
+ public:
+  // Used with WatchFileDescriptor to asynchronously monitor the I/O readiness
+  // of a file descriptor.
+  class FdWatcher {
+   public:
+    virtual void OnFileCanReadWithoutBlocking(int fd) = 0;
+    virtual void OnFileCanWriteWithoutBlocking(int fd) = 0;
+
+   protected:
+    virtual ~FdWatcher() = default;
+  };
+
+  class FdWatchControllerInterface {
+   public:
+    explicit FdWatchControllerInterface(const Location& from_here);
+    // Subclasses must call StopWatchingFileDescriptor() in their destructor
+    // (this parent class cannot generically do it for them as it must usually
+    // be invoked before they destroy their state which happens before the
+    // parent destructor is invoked).
+    virtual ~FdWatchControllerInterface();
+
+    // NOTE: This method isn't called StopWatching() to avoid confusion with the
+    // win32 ObjectWatcher class. While this doesn't really need to be virtual
+    // as there's only one impl per platform and users don't use pointers to the
+    // base class. Having this interface forces implementers to share similar
+    // implementations (a problem in the past).
+
+    // Stop watching the FD, always safe to call.  No-op if there's nothing to
+    // do.
+    virtual bool StopWatchingFileDescriptor() = 0;
+
+    const Location& created_from_location() const {
+      return created_from_location_;
+    }
+
+   private:
+    const Location created_from_location_;
+
+    DISALLOW_COPY_AND_ASSIGN(FdWatchControllerInterface);
+  };
+
+  enum Mode {
+    WATCH_READ = 1 << 0,
+    WATCH_WRITE = 1 << 1,
+    WATCH_READ_WRITE = WATCH_READ | WATCH_WRITE
+  };
+
+  // Every subclass of WatchableIOMessagePumpPosix must provide a
+  // WatchFileDescriptor() which has the following signature where
+  // |FdWatchController| must be the complete type based on
+  // FdWatchControllerInterface.
+
+  // Registers |delegate| with the current thread's message loop so that its
+  // methods are invoked when file descriptor |fd| becomes ready for reading or
+  // writing (or both) without blocking.  |mode| selects ready for reading, for
+  // writing, or both.  See "enum Mode" above.  |controller| manages the
+  // lifetime of registrations. ("Registrations" are also ambiguously called
+  // "events" in many places, for instance in libevent.)  It is an error to use
+  // the same |controller| for different file descriptors; however, the same
+  // controller can be reused to add registrations with a different |mode|.  If
+  // |controller| is already attached to one or more registrations, the new
+  // registration is added onto those.  If an error occurs while calling this
+  // method, any registration previously attached to |controller| is removed.
+  // Returns true on success.  Must be called on the same thread the MessagePump
+  // is running on.
+  // bool WatchFileDescriptor(int fd,
+  //                          bool persistent,
+  //                          int mode,
+  //                          FdWatchController* controller,
+  //                          FdWatcher* delegate) = 0;
+};
+
+}  // namespace base
+
+#endif  // BASE_MESSAGE_LOOP_WATCHABLE_IO_MESSAGE_PUMP_POSIX_H_
diff --git a/base/metrics/OWNERS b/base/metrics/OWNERS
new file mode 100644
index 0000000..4cc69ff
--- /dev/null
+++ b/base/metrics/OWNERS
@@ -0,0 +1,10 @@
+asvitkine@chromium.org
+bcwhite@chromium.org
+gayane@chromium.org
+holte@chromium.org
+isherman@chromium.org
+jwd@chromium.org
+mpearson@chromium.org
+rkaplow@chromium.org
+
+# COMPONENT: Internals>Metrics
diff --git a/base/metrics/bucket_ranges.cc b/base/metrics/bucket_ranges.cc
new file mode 100644
index 0000000..39b3793
--- /dev/null
+++ b/base/metrics/bucket_ranges.cc
@@ -0,0 +1,124 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/metrics/bucket_ranges.h"
+
+#include <cmath>
+
+#include "base/logging.h"
+
+namespace base {
+
+// Static table of checksums for all possible 8 bit bytes.
+const uint32_t kCrcTable[256] = {
+    0x0,         0x77073096L, 0xee0e612cL, 0x990951baL, 0x76dc419L,
+    0x706af48fL, 0xe963a535L, 0x9e6495a3L, 0xedb8832L,  0x79dcb8a4L,
+    0xe0d5e91eL, 0x97d2d988L, 0x9b64c2bL,  0x7eb17cbdL, 0xe7b82d07L,
+    0x90bf1d91L, 0x1db71064L, 0x6ab020f2L, 0xf3b97148L, 0x84be41deL,
+    0x1adad47dL, 0x6ddde4ebL, 0xf4d4b551L, 0x83d385c7L, 0x136c9856L,
+    0x646ba8c0L, 0xfd62f97aL, 0x8a65c9ecL, 0x14015c4fL, 0x63066cd9L,
+    0xfa0f3d63L, 0x8d080df5L, 0x3b6e20c8L, 0x4c69105eL, 0xd56041e4L,
+    0xa2677172L, 0x3c03e4d1L, 0x4b04d447L, 0xd20d85fdL, 0xa50ab56bL,
+    0x35b5a8faL, 0x42b2986cL, 0xdbbbc9d6L, 0xacbcf940L, 0x32d86ce3L,
+    0x45df5c75L, 0xdcd60dcfL, 0xabd13d59L, 0x26d930acL, 0x51de003aL,
+    0xc8d75180L, 0xbfd06116L, 0x21b4f4b5L, 0x56b3c423L, 0xcfba9599L,
+    0xb8bda50fL, 0x2802b89eL, 0x5f058808L, 0xc60cd9b2L, 0xb10be924L,
+    0x2f6f7c87L, 0x58684c11L, 0xc1611dabL, 0xb6662d3dL, 0x76dc4190L,
+    0x1db7106L,  0x98d220bcL, 0xefd5102aL, 0x71b18589L, 0x6b6b51fL,
+    0x9fbfe4a5L, 0xe8b8d433L, 0x7807c9a2L, 0xf00f934L,  0x9609a88eL,
+    0xe10e9818L, 0x7f6a0dbbL, 0x86d3d2dL,  0x91646c97L, 0xe6635c01L,
+    0x6b6b51f4L, 0x1c6c6162L, 0x856530d8L, 0xf262004eL, 0x6c0695edL,
+    0x1b01a57bL, 0x8208f4c1L, 0xf50fc457L, 0x65b0d9c6L, 0x12b7e950L,
+    0x8bbeb8eaL, 0xfcb9887cL, 0x62dd1ddfL, 0x15da2d49L, 0x8cd37cf3L,
+    0xfbd44c65L, 0x4db26158L, 0x3ab551ceL, 0xa3bc0074L, 0xd4bb30e2L,
+    0x4adfa541L, 0x3dd895d7L, 0xa4d1c46dL, 0xd3d6f4fbL, 0x4369e96aL,
+    0x346ed9fcL, 0xad678846L, 0xda60b8d0L, 0x44042d73L, 0x33031de5L,
+    0xaa0a4c5fL, 0xdd0d7cc9L, 0x5005713cL, 0x270241aaL, 0xbe0b1010L,
+    0xc90c2086L, 0x5768b525L, 0x206f85b3L, 0xb966d409L, 0xce61e49fL,
+    0x5edef90eL, 0x29d9c998L, 0xb0d09822L, 0xc7d7a8b4L, 0x59b33d17L,
+    0x2eb40d81L, 0xb7bd5c3bL, 0xc0ba6cadL, 0xedb88320L, 0x9abfb3b6L,
+    0x3b6e20cL,  0x74b1d29aL, 0xead54739L, 0x9dd277afL, 0x4db2615L,
+    0x73dc1683L, 0xe3630b12L, 0x94643b84L, 0xd6d6a3eL,  0x7a6a5aa8L,
+    0xe40ecf0bL, 0x9309ff9dL, 0xa00ae27L,  0x7d079eb1L, 0xf00f9344L,
+    0x8708a3d2L, 0x1e01f268L, 0x6906c2feL, 0xf762575dL, 0x806567cbL,
+    0x196c3671L, 0x6e6b06e7L, 0xfed41b76L, 0x89d32be0L, 0x10da7a5aL,
+    0x67dd4accL, 0xf9b9df6fL, 0x8ebeeff9L, 0x17b7be43L, 0x60b08ed5L,
+    0xd6d6a3e8L, 0xa1d1937eL, 0x38d8c2c4L, 0x4fdff252L, 0xd1bb67f1L,
+    0xa6bc5767L, 0x3fb506ddL, 0x48b2364bL, 0xd80d2bdaL, 0xaf0a1b4cL,
+    0x36034af6L, 0x41047a60L, 0xdf60efc3L, 0xa867df55L, 0x316e8eefL,
+    0x4669be79L, 0xcb61b38cL, 0xbc66831aL, 0x256fd2a0L, 0x5268e236L,
+    0xcc0c7795L, 0xbb0b4703L, 0x220216b9L, 0x5505262fL, 0xc5ba3bbeL,
+    0xb2bd0b28L, 0x2bb45a92L, 0x5cb36a04L, 0xc2d7ffa7L, 0xb5d0cf31L,
+    0x2cd99e8bL, 0x5bdeae1dL, 0x9b64c2b0L, 0xec63f226L, 0x756aa39cL,
+    0x26d930aL,  0x9c0906a9L, 0xeb0e363fL, 0x72076785L, 0x5005713L,
+    0x95bf4a82L, 0xe2b87a14L, 0x7bb12baeL, 0xcb61b38L,  0x92d28e9bL,
+    0xe5d5be0dL, 0x7cdcefb7L, 0xbdbdf21L,  0x86d3d2d4L, 0xf1d4e242L,
+    0x68ddb3f8L, 0x1fda836eL, 0x81be16cdL, 0xf6b9265bL, 0x6fb077e1L,
+    0x18b74777L, 0x88085ae6L, 0xff0f6a70L, 0x66063bcaL, 0x11010b5cL,
+    0x8f659effL, 0xf862ae69L, 0x616bffd3L, 0x166ccf45L, 0xa00ae278L,
+    0xd70dd2eeL, 0x4e048354L, 0x3903b3c2L, 0xa7672661L, 0xd06016f7L,
+    0x4969474dL, 0x3e6e77dbL, 0xaed16a4aL, 0xd9d65adcL, 0x40df0b66L,
+    0x37d83bf0L, 0xa9bcae53L, 0xdebb9ec5L, 0x47b2cf7fL, 0x30b5ffe9L,
+    0xbdbdf21cL, 0xcabac28aL, 0x53b39330L, 0x24b4a3a6L, 0xbad03605L,
+    0xcdd70693L, 0x54de5729L, 0x23d967bfL, 0xb3667a2eL, 0xc4614ab8L,
+    0x5d681b02L, 0x2a6f2b94L, 0xb40bbe37L, 0xc30c8ea1L, 0x5a05df1bL,
+    0x2d02ef8dL,
+};
+
+// We generate the CRC-32 using the low order bits to select whether to XOR in
+// the reversed polynomial 0xedb88320L.  This is nice and simple, and allows us
+// to keep the quotient in a uint32_t.  Since we're not concerned about the
+// nature of corruptions (i.e., we don't care about bit sequencing, since we are
+// handling memory changes, which are more grotesque) so we don't bother to get
+// the CRC correct for big-endian vs little-ending calculations.  All we need is
+// a nice hash, that tends to depend on all the bits of the sample, with very
+// little chance of changes in one place impacting changes in another place.
+static uint32_t Crc32(uint32_t sum, HistogramBase::Sample value) {
+  union {
+    HistogramBase::Sample range;
+    unsigned char bytes[sizeof(HistogramBase::Sample)];
+  } converter;
+  converter.range = value;
+  for (size_t i = 0; i < sizeof(converter); ++i) {
+    sum = kCrcTable[(sum & 0xff) ^ converter.bytes[i]] ^ (sum >> 8);
+  }
+  return sum;
+}
+
+BucketRanges::BucketRanges(size_t num_ranges)
+    : ranges_(num_ranges, 0),
+      checksum_(0) {}
+
+BucketRanges::~BucketRanges() = default;
+
+uint32_t BucketRanges::CalculateChecksum() const {
+  // Seed checksum.
+  uint32_t checksum = static_cast<uint32_t>(ranges_.size());
+
+  for (size_t index = 0; index < ranges_.size(); ++index)
+    checksum = Crc32(checksum, ranges_[index]);
+  return checksum;
+}
+
+bool BucketRanges::HasValidChecksum() const {
+  return CalculateChecksum() == checksum_;
+}
+
+void BucketRanges::ResetChecksum() {
+  checksum_ = CalculateChecksum();
+}
+
+bool BucketRanges::Equals(const BucketRanges* other) const {
+  if (checksum_ != other->checksum_)
+    return false;
+  if (ranges_.size() != other->ranges_.size())
+    return false;
+  for (size_t index = 0; index < ranges_.size(); ++index) {
+    if (ranges_[index] != other->ranges_[index])
+      return false;
+  }
+  return true;
+}
+
+}  // namespace base
diff --git a/base/metrics/bucket_ranges.h b/base/metrics/bucket_ranges.h
new file mode 100644
index 0000000..1b6d069
--- /dev/null
+++ b/base/metrics/bucket_ranges.h
@@ -0,0 +1,105 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// BucketRanges stores the vector of ranges that delimit what samples are
+// tallied in the corresponding buckets of a histogram. Histograms that have
+// same ranges for all their corresponding buckets should share the same
+// BucketRanges object.
+//
+// E.g. A 5 buckets LinearHistogram with 1 as minimal value and 4 as maximal
+// value will need a BucketRanges with 6 ranges:
+// 0, 1, 2, 3, 4, INT_MAX
+//
+// TODO(kaiwang): Currently we keep all negative values in 0~1 bucket. Consider
+// changing 0 to INT_MIN.
+
+#ifndef BASE_METRICS_BUCKET_RANGES_H_
+#define BASE_METRICS_BUCKET_RANGES_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <vector>
+
+#include <limits.h>
+
+#include "base/atomicops.h"
+#include "base/base_export.h"
+#include "base/macros.h"
+#include "base/metrics/histogram_base.h"
+
+namespace base {
+
+class BASE_EXPORT BucketRanges {
+ public:
+  typedef std::vector<HistogramBase::Sample> Ranges;
+
+  explicit BucketRanges(size_t num_ranges);
+  ~BucketRanges();
+
+  size_t size() const { return ranges_.size(); }
+  HistogramBase::Sample range(size_t i) const { return ranges_[i]; }
+  void set_range(size_t i, HistogramBase::Sample value) {
+    DCHECK_LT(i, ranges_.size());
+    DCHECK_GE(value, 0);
+    ranges_[i] = value;
+  }
+  uint32_t checksum() const { return checksum_; }
+  void set_checksum(uint32_t checksum) { checksum_ = checksum; }
+
+  // A bucket is defined by a consecutive pair of entries in |ranges|, so there
+  // is one fewer bucket than there are ranges.  For example, if |ranges| is
+  // [0, 1, 3, 7, INT_MAX], then the buckets in this histogram are
+  // [0, 1), [1, 3), [3, 7), and [7, INT_MAX).
+  size_t bucket_count() const { return ranges_.size() - 1; }
+
+  // Checksum methods to verify whether the ranges are corrupted (e.g. bad
+  // memory access).
+  uint32_t CalculateChecksum() const;
+  bool HasValidChecksum() const;
+  void ResetChecksum();
+
+  // Return true iff |other| object has same ranges_ as |this| object's ranges_.
+  bool Equals(const BucketRanges* other) const;
+
+  // Set and get a reference into persistent memory where this bucket data
+  // can be found (and re-used). These calls are internally atomic with no
+  // safety against overwriting an existing value since though it is wasteful
+  // to have multiple identical persistent records, it is still safe.
+  void set_persistent_reference(uint32_t ref) const {
+    subtle::Release_Store(&persistent_reference_, ref);
+  }
+  uint32_t persistent_reference() const {
+    return subtle::Acquire_Load(&persistent_reference_);
+  }
+
+ private:
+  // A monotonically increasing list of values which determine which bucket to
+  // put a sample into.  For each index, show the smallest sample that can be
+  // added to the corresponding bucket.
+  Ranges ranges_;
+
+  // Checksum for the conntents of ranges_.  Used to detect random over-writes
+  // of our data, and to quickly see if some other BucketRanges instance is
+  // possibly Equal() to this instance.
+  // TODO(kaiwang): Consider change this to uint64_t. Because we see a lot of
+  // noise on UMA dashboard.
+  uint32_t checksum_;
+
+  // A reference into a global PersistentMemoryAllocator where the ranges
+  // information is stored. This allows for the record to be created once and
+  // re-used simply by having all histograms with the same ranges use the
+  // same reference.
+  mutable subtle::Atomic32 persistent_reference_ = 0;
+
+  DISALLOW_COPY_AND_ASSIGN(BucketRanges);
+};
+
+//////////////////////////////////////////////////////////////////////////////
+// Expose only for test.
+BASE_EXPORT extern const uint32_t kCrcTable[256];
+
+}  // namespace base
+
+#endif  // BASE_METRICS_BUCKET_RANGES_H_
diff --git a/base/metrics/bucket_ranges_unittest.cc b/base/metrics/bucket_ranges_unittest.cc
new file mode 100644
index 0000000..481054c
--- /dev/null
+++ b/base/metrics/bucket_ranges_unittest.cc
@@ -0,0 +1,94 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/metrics/bucket_ranges.h"
+
+#include <stdint.h>
+
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+namespace {
+
+TEST(BucketRangesTest, NormalSetup) {
+  BucketRanges ranges(5);
+  ASSERT_EQ(5u, ranges.size());
+  ASSERT_EQ(4u, ranges.bucket_count());
+
+  for (int i = 0; i < 5; ++i) {
+    EXPECT_EQ(0, ranges.range(i));
+  }
+  EXPECT_EQ(0u, ranges.checksum());
+
+  ranges.set_range(3, 100);
+  EXPECT_EQ(100, ranges.range(3));
+}
+
+TEST(BucketRangesTest, Equals) {
+  // Compare empty ranges.
+  BucketRanges ranges1(3);
+  BucketRanges ranges2(3);
+  BucketRanges ranges3(5);
+
+  EXPECT_TRUE(ranges1.Equals(&ranges2));
+  EXPECT_FALSE(ranges1.Equals(&ranges3));
+  EXPECT_FALSE(ranges2.Equals(&ranges3));
+
+  // Compare full filled ranges.
+  ranges1.set_range(0, 0);
+  ranges1.set_range(1, 1);
+  ranges1.set_range(2, 2);
+  ranges1.set_checksum(100);
+  ranges2.set_range(0, 0);
+  ranges2.set_range(1, 1);
+  ranges2.set_range(2, 2);
+  ranges2.set_checksum(100);
+
+  EXPECT_TRUE(ranges1.Equals(&ranges2));
+
+  // Checksum does not match.
+  ranges1.set_checksum(99);
+  EXPECT_FALSE(ranges1.Equals(&ranges2));
+  ranges1.set_checksum(100);
+
+  // Range does not match.
+  ranges1.set_range(1, 3);
+  EXPECT_FALSE(ranges1.Equals(&ranges2));
+}
+
+TEST(BucketRangesTest, Checksum) {
+  BucketRanges ranges(3);
+  ranges.set_range(0, 0);
+  ranges.set_range(1, 1);
+  ranges.set_range(2, 2);
+
+  ranges.ResetChecksum();
+  EXPECT_EQ(289217253u, ranges.checksum());
+
+  ranges.set_range(2, 3);
+  EXPECT_FALSE(ranges.HasValidChecksum());
+
+  ranges.ResetChecksum();
+  EXPECT_EQ(2843835776u, ranges.checksum());
+  EXPECT_TRUE(ranges.HasValidChecksum());
+}
+
+// Table was generated similarly to sample code for CRC-32 given on:
+// http://www.w3.org/TR/PNG/#D-CRCAppendix.
+TEST(BucketRangesTest, Crc32TableTest) {
+  for (int i = 0; i < 256; ++i) {
+    uint32_t checksum = i;
+    for (int j = 0; j < 8; ++j) {
+      const uint32_t kReversedPolynomial = 0xedb88320L;
+      if (checksum & 1)
+        checksum = kReversedPolynomial ^ (checksum >> 1);
+      else
+        checksum >>= 1;
+    }
+    EXPECT_EQ(kCrcTable[i], checksum);
+  }
+}
+
+}  // namespace
+}  // namespace base
diff --git a/base/metrics/dummy_histogram.cc b/base/metrics/dummy_histogram.cc
new file mode 100644
index 0000000..2707733
--- /dev/null
+++ b/base/metrics/dummy_histogram.cc
@@ -0,0 +1,102 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/metrics/dummy_histogram.h"
+
+#include <memory>
+
+#include "base/logging.h"
+#include "base/metrics/histogram_samples.h"
+#include "base/metrics/metrics_hashes.h"
+
+namespace base {
+
+namespace {
+
+// Helper classes for DummyHistogram.
+class DummySampleCountIterator : public SampleCountIterator {
+ public:
+  DummySampleCountIterator() {}
+  ~DummySampleCountIterator() override {}
+
+  // SampleCountIterator:
+  bool Done() const override { return true; }
+  void Next() override { NOTREACHED(); }
+  void Get(HistogramBase::Sample* min,
+           int64_t* max,
+           HistogramBase::Count* count) const override {
+    NOTREACHED();
+  }
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(DummySampleCountIterator);
+};
+
+class DummyHistogramSamples : public HistogramSamples {
+ public:
+  explicit DummyHistogramSamples() : HistogramSamples(0, new LocalMetadata()) {}
+  ~DummyHistogramSamples() override {
+    delete static_cast<LocalMetadata*>(meta());
+  }
+
+  // HistogramSamples:
+  void Accumulate(HistogramBase::Sample value,
+                  HistogramBase::Count count) override {}
+  HistogramBase::Count GetCount(HistogramBase::Sample value) const override {
+    return HistogramBase::Count();
+  }
+  HistogramBase::Count TotalCount() const override {
+    return HistogramBase::Count();
+  }
+  std::unique_ptr<SampleCountIterator> Iterator() const override {
+    return std::make_unique<DummySampleCountIterator>();
+  }
+  bool AddSubtractImpl(SampleCountIterator* iter, Operator op) override {
+    return true;
+  }
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(DummyHistogramSamples);
+};
+
+}  // namespace
+
+// static
+DummyHistogram* DummyHistogram::GetInstance() {
+  static base::NoDestructor<DummyHistogram> dummy_histogram;
+  return dummy_histogram.get();
+}
+
+uint64_t DummyHistogram::name_hash() const {
+  return HashMetricName(histogram_name());
+}
+
+HistogramType DummyHistogram::GetHistogramType() const {
+  return DUMMY_HISTOGRAM;
+}
+
+bool DummyHistogram::HasConstructionArguments(
+    Sample expected_minimum,
+    Sample expected_maximum,
+    uint32_t expected_bucket_count) const {
+  return true;
+}
+
+bool DummyHistogram::AddSamplesFromPickle(PickleIterator* iter) {
+  return true;
+}
+
+std::unique_ptr<HistogramSamples> DummyHistogram::SnapshotSamples() const {
+  return std::make_unique<DummyHistogramSamples>();
+}
+
+std::unique_ptr<HistogramSamples> DummyHistogram::SnapshotDelta() {
+  return std::make_unique<DummyHistogramSamples>();
+}
+
+std::unique_ptr<HistogramSamples> DummyHistogram::SnapshotFinalDelta() const {
+  return std::make_unique<DummyHistogramSamples>();
+}
+
+}  // namespace base
diff --git a/base/metrics/dummy_histogram.h b/base/metrics/dummy_histogram.h
new file mode 100644
index 0000000..e2cb64e
--- /dev/null
+++ b/base/metrics/dummy_histogram.h
@@ -0,0 +1,61 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_METRICS_DUMMY_HISTOGRAM_H_
+#define BASE_METRICS_DUMMY_HISTOGRAM_H_
+
+#include <stdint.h>
+
+#include <memory>
+#include <string>
+
+#include "base/base_export.h"
+#include "base/metrics/histogram_base.h"
+#include "base/no_destructor.h"
+
+namespace base {
+
+// DummyHistogram is used for mocking histogram objects for histograms that
+// shouldn't be recorded. It doesn't do any actual processing.
+class BASE_EXPORT DummyHistogram : public HistogramBase {
+ public:
+  static DummyHistogram* GetInstance();
+
+  // HistogramBase:
+  void CheckName(const StringPiece& name) const override {}
+  uint64_t name_hash() const override;
+  HistogramType GetHistogramType() const override;
+  bool HasConstructionArguments(Sample expected_minimum,
+                                Sample expected_maximum,
+                                uint32_t expected_bucket_count) const override;
+  void Add(Sample value) override {}
+  void AddCount(Sample value, int count) override {}
+  void AddSamples(const HistogramSamples& samples) override {}
+  bool AddSamplesFromPickle(PickleIterator* iter) override;
+  std::unique_ptr<HistogramSamples> SnapshotSamples() const override;
+  std::unique_ptr<HistogramSamples> SnapshotDelta() override;
+  std::unique_ptr<HistogramSamples> SnapshotFinalDelta() const override;
+  void WriteHTMLGraph(std::string* output) const override {}
+  void WriteAscii(std::string* output) const override {}
+
+ protected:
+  // HistogramBase:
+  void SerializeInfoImpl(Pickle* pickle) const override {}
+  void GetParameters(DictionaryValue* params) const override {}
+  void GetCountAndBucketData(Count* count,
+                             int64_t* sum,
+                             ListValue* buckets) const override {}
+
+ private:
+  friend class NoDestructor<DummyHistogram>;
+
+  DummyHistogram() : HistogramBase("dummy_histogram") {}
+  ~DummyHistogram() override {}
+
+  DISALLOW_COPY_AND_ASSIGN(DummyHistogram);
+};
+
+}  // namespace base
+
+#endif  // BASE_METRICS_DUMMY_HISTOGRAM_H_
diff --git a/base/metrics/field_trial.cc b/base/metrics/field_trial.cc
new file mode 100644
index 0000000..ff37880
--- /dev/null
+++ b/base/metrics/field_trial.cc
@@ -0,0 +1,1522 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/metrics/field_trial.h"
+
+#include <algorithm>
+#include <utility>
+
+#include "base/base_switches.h"
+#include "base/build_time.h"
+#include "base/command_line.h"
+#include "base/debug/activity_tracker.h"
+#include "base/logging.h"
+#include "base/metrics/field_trial_param_associator.h"
+#include "base/process/memory.h"
+#include "base/process/process_handle.h"
+#include "base/process/process_info.h"
+#include "base/rand_util.h"
+#include "base/strings/string_number_conversions.h"
+#include "base/strings/string_split.h"
+#include "base/strings/string_util.h"
+#include "base/strings/stringprintf.h"
+#include "base/strings/utf_string_conversions.h"
+#include "base/unguessable_token.h"
+
+// On POSIX, the fd is shared using the mapping in GlobalDescriptors.
+#if defined(OS_POSIX) && !defined(OS_NACL)
+#include "base/posix/global_descriptors.h"
+#endif
+
+namespace base {
+
+namespace {
+
+// Define a separator character to use when creating a persistent form of an
+// instance.  This is intended for use as a command line argument, passed to a
+// second process to mimic our state (i.e., provide the same group name).
+const char kPersistentStringSeparator = '/';  // Currently a slash.
+
+// Define a marker character to be used as a prefix to a trial name on the
+// command line which forces its activation.
+const char kActivationMarker = '*';
+
+// Use shared memory to communicate field trial (experiment) state. Set to false
+// for now while the implementation is fleshed out (e.g. data format, single
+// shared memory segment). See https://codereview.chromium.org/2365273004/ and
+// crbug.com/653874
+// The browser is the only process that has write access to the shared memory.
+// This is safe from race conditions because MakeIterable is a release operation
+// and GetNextOfType is an acquire operation, so memory writes before
+// MakeIterable happen before memory reads after GetNextOfType.
+#if defined(OS_FUCHSIA)  // TODO(752368): Not yet supported on Fuchsia.
+const bool kUseSharedMemoryForFieldTrials = false;
+#else
+const bool kUseSharedMemoryForFieldTrials = true;
+#endif
+
+// Constants for the field trial allocator.
+const char kAllocatorName[] = "FieldTrialAllocator";
+
+// We allocate 128 KiB to hold all the field trial data. This should be enough,
+// as most people use 3 - 25 KiB for field trials (as of 11/25/2016).
+// This also doesn't allocate all 128 KiB at once -- the pages only get mapped
+// to physical memory when they are touched. If the size of the allocated field
+// trials does get larger than 128 KiB, then we will drop some field trials in
+// child processes, leading to an inconsistent view between browser and child
+// processes and possibly causing crashes (see crbug.com/661617).
+const size_t kFieldTrialAllocationSize = 128 << 10;  // 128 KiB
+
+// Writes out string1 and then string2 to pickle.
+void WriteStringPair(Pickle* pickle,
+                     const StringPiece& string1,
+                     const StringPiece& string2) {
+  pickle->WriteString(string1);
+  pickle->WriteString(string2);
+}
+
+// Writes out the field trial's contents (via trial_state) to the pickle. The
+// format of the pickle looks like:
+// TrialName, GroupName, ParamKey1, ParamValue1, ParamKey2, ParamValue2, ...
+// If there are no parameters, then it just ends at GroupName.
+void PickleFieldTrial(const FieldTrial::State& trial_state, Pickle* pickle) {
+  WriteStringPair(pickle, *trial_state.trial_name, *trial_state.group_name);
+
+  // Get field trial params.
+  std::map<std::string, std::string> params;
+  FieldTrialParamAssociator::GetInstance()->GetFieldTrialParamsWithoutFallback(
+      *trial_state.trial_name, *trial_state.group_name, &params);
+
+  // Write params to pickle.
+  for (const auto& param : params)
+    WriteStringPair(pickle, param.first, param.second);
+}
+
+// Created a time value based on |year|, |month| and |day_of_month| parameters.
+Time CreateTimeFromParams(int year, int month, int day_of_month) {
+  DCHECK_GT(year, 1970);
+  DCHECK_GT(month, 0);
+  DCHECK_LT(month, 13);
+  DCHECK_GT(day_of_month, 0);
+  DCHECK_LT(day_of_month, 32);
+
+  Time::Exploded exploded;
+  exploded.year = year;
+  exploded.month = month;
+  exploded.day_of_week = 0;  // Should be unused.
+  exploded.day_of_month = day_of_month;
+  exploded.hour = 0;
+  exploded.minute = 0;
+  exploded.second = 0;
+  exploded.millisecond = 0;
+  Time out_time;
+  if (!Time::FromLocalExploded(exploded, &out_time)) {
+    // TODO(maksims): implement failure handling.
+    // We might just return |out_time|, which is Time(0).
+    NOTIMPLEMENTED();
+  }
+
+  return out_time;
+}
+
+// Returns the boundary value for comparing against the FieldTrial's added
+// groups for a given |divisor| (total probability) and |entropy_value|.
+FieldTrial::Probability GetGroupBoundaryValue(
+    FieldTrial::Probability divisor,
+    double entropy_value) {
+  // Add a tiny epsilon value to get consistent results when converting floating
+  // points to int. Without it, boundary values have inconsistent results, e.g.:
+  //
+  //   static_cast<FieldTrial::Probability>(100 * 0.56) == 56
+  //   static_cast<FieldTrial::Probability>(100 * 0.57) == 56
+  //   static_cast<FieldTrial::Probability>(100 * 0.58) == 57
+  //   static_cast<FieldTrial::Probability>(100 * 0.59) == 59
+  const double kEpsilon = 1e-8;
+  const FieldTrial::Probability result =
+      static_cast<FieldTrial::Probability>(divisor * entropy_value + kEpsilon);
+  // Ensure that adding the epsilon still results in a value < |divisor|.
+  return std::min(result, divisor - 1);
+}
+
+// Separate type from FieldTrial::State so that it can use StringPieces.
+struct FieldTrialStringEntry {
+  StringPiece trial_name;
+  StringPiece group_name;
+  bool activated = false;
+};
+
+// Parses the --force-fieldtrials string |trials_string| into |entries|.
+// Returns true if the string was parsed correctly. On failure, the |entries|
+// array may end up being partially filled.
+bool ParseFieldTrialsString(const std::string& trials_string,
+                            std::vector<FieldTrialStringEntry>* entries) {
+  const StringPiece trials_string_piece(trials_string);
+
+  size_t next_item = 0;
+  while (next_item < trials_string.length()) {
+    size_t name_end = trials_string.find(kPersistentStringSeparator, next_item);
+    if (name_end == trials_string.npos || next_item == name_end)
+      return false;
+    size_t group_name_end =
+        trials_string.find(kPersistentStringSeparator, name_end + 1);
+    if (name_end + 1 == group_name_end)
+      return false;
+    if (group_name_end == trials_string.npos)
+      group_name_end = trials_string.length();
+
+    FieldTrialStringEntry entry;
+    // Verify if the trial should be activated or not.
+    if (trials_string[next_item] == kActivationMarker) {
+      // Name cannot be only the indicator.
+      if (name_end - next_item == 1)
+        return false;
+      next_item++;
+      entry.activated = true;
+    }
+    entry.trial_name =
+        trials_string_piece.substr(next_item, name_end - next_item);
+    entry.group_name =
+        trials_string_piece.substr(name_end + 1, group_name_end - name_end - 1);
+    next_item = group_name_end + 1;
+
+    entries->push_back(std::move(entry));
+  }
+  return true;
+}
+
+void AddFeatureAndFieldTrialFlags(const char* enable_features_switch,
+                                  const char* disable_features_switch,
+                                  CommandLine* cmd_line) {
+  std::string enabled_features;
+  std::string disabled_features;
+  FeatureList::GetInstance()->GetFeatureOverrides(&enabled_features,
+                                                  &disabled_features);
+
+  if (!enabled_features.empty())
+    cmd_line->AppendSwitchASCII(enable_features_switch, enabled_features);
+  if (!disabled_features.empty())
+    cmd_line->AppendSwitchASCII(disable_features_switch, disabled_features);
+
+  std::string field_trial_states;
+  FieldTrialList::AllStatesToString(&field_trial_states, false);
+  if (!field_trial_states.empty()) {
+    cmd_line->AppendSwitchASCII(switches::kForceFieldTrials,
+                                field_trial_states);
+  }
+}
+
+void OnOutOfMemory(size_t size) {
+#if defined(OS_NACL)
+  NOTREACHED();
+#else
+  TerminateBecauseOutOfMemory(size);
+#endif
+}
+
+#if !defined(OS_NACL)
+// Returns whether the operation succeeded.
+bool DeserializeGUIDFromStringPieces(base::StringPiece first,
+                                     base::StringPiece second,
+                                     base::UnguessableToken* guid) {
+  uint64_t high = 0;
+  uint64_t low = 0;
+  if (!base::StringToUint64(first, &high) ||
+      !base::StringToUint64(second, &low)) {
+    return false;
+  }
+
+  *guid = base::UnguessableToken::Deserialize(high, low);
+  return true;
+}
+
+// Extract a read-only SharedMemoryHandle from an existing |shared_memory|
+// handle. Note that on Android, this also makes the whole region read-only.
+SharedMemoryHandle GetSharedMemoryReadOnlyHandle(SharedMemory* shared_memory) {
+  SharedMemoryHandle result = shared_memory->GetReadOnlyHandle();
+#if defined(OS_ANDROID)
+  // On Android, turn the region read-only. This prevents any future
+  // writable mapping attempts, but the original one in |shm| survives
+  // and is still usable in the current process.
+  result.SetRegionReadOnly();
+#endif  // OS_ANDROID
+  return result;
+}
+#endif  // !OS_NACL
+
+}  // namespace
+
+// statics
+const int FieldTrial::kNotFinalized = -1;
+const int FieldTrial::kDefaultGroupNumber = 0;
+bool FieldTrial::enable_benchmarking_ = false;
+
+int FieldTrialList::kNoExpirationYear = 0;
+
+//------------------------------------------------------------------------------
+// FieldTrial methods and members.
+
+FieldTrial::EntropyProvider::~EntropyProvider() = default;
+
+FieldTrial::State::State() = default;
+
+FieldTrial::State::State(const State& other) = default;
+
+FieldTrial::State::~State() = default;
+
+bool FieldTrial::FieldTrialEntry::GetTrialAndGroupName(
+    StringPiece* trial_name,
+    StringPiece* group_name) const {
+  PickleIterator iter = GetPickleIterator();
+  return ReadStringPair(&iter, trial_name, group_name);
+}
+
+bool FieldTrial::FieldTrialEntry::GetParams(
+    std::map<std::string, std::string>* params) const {
+  PickleIterator iter = GetPickleIterator();
+  StringPiece tmp;
+  // Skip reading trial and group name.
+  if (!ReadStringPair(&iter, &tmp, &tmp))
+    return false;
+
+  while (true) {
+    StringPiece key;
+    StringPiece value;
+    if (!ReadStringPair(&iter, &key, &value))
+      return key.empty();  // Non-empty is bad: got one of a pair.
+    (*params)[key.as_string()] = value.as_string();
+  }
+}
+
+PickleIterator FieldTrial::FieldTrialEntry::GetPickleIterator() const {
+  const char* src =
+      reinterpret_cast<const char*>(this) + sizeof(FieldTrialEntry);
+
+  Pickle pickle(src, pickle_size);
+  return PickleIterator(pickle);
+}
+
+bool FieldTrial::FieldTrialEntry::ReadStringPair(
+    PickleIterator* iter,
+    StringPiece* trial_name,
+    StringPiece* group_name) const {
+  if (!iter->ReadStringPiece(trial_name))
+    return false;
+  if (!iter->ReadStringPiece(group_name))
+    return false;
+  return true;
+}
+
+void FieldTrial::Disable() {
+  DCHECK(!group_reported_);
+  enable_field_trial_ = false;
+
+  // In case we are disabled after initialization, we need to switch
+  // the trial to the default group.
+  if (group_ != kNotFinalized) {
+    // Only reset when not already the default group, because in case we were
+    // forced to the default group, the group number may not be
+    // kDefaultGroupNumber, so we should keep it as is.
+    if (group_name_ != default_group_name_)
+      SetGroupChoice(default_group_name_, kDefaultGroupNumber);
+  }
+}
+
+int FieldTrial::AppendGroup(const std::string& name,
+                            Probability group_probability) {
+  // When the group choice was previously forced, we only need to return the
+  // the id of the chosen group, and anything can be returned for the others.
+  if (forced_) {
+    DCHECK(!group_name_.empty());
+    if (name == group_name_) {
+      // Note that while |group_| may be equal to |kDefaultGroupNumber| on the
+      // forced trial, it will not have the same value as the default group
+      // number returned from the non-forced |FactoryGetFieldTrial()| call,
+      // which takes care to ensure that this does not happen.
+      return group_;
+    }
+    DCHECK_NE(next_group_number_, group_);
+    // We still return different numbers each time, in case some caller need
+    // them to be different.
+    return next_group_number_++;
+  }
+
+  DCHECK_LE(group_probability, divisor_);
+  DCHECK_GE(group_probability, 0);
+
+  if (enable_benchmarking_ || !enable_field_trial_)
+    group_probability = 0;
+
+  accumulated_group_probability_ += group_probability;
+
+  DCHECK_LE(accumulated_group_probability_, divisor_);
+  if (group_ == kNotFinalized && accumulated_group_probability_ > random_) {
+    // This is the group that crossed the random line, so we do the assignment.
+    SetGroupChoice(name, next_group_number_);
+  }
+  return next_group_number_++;
+}
+
+int FieldTrial::group() {
+  FinalizeGroupChoice();
+  if (trial_registered_)
+    FieldTrialList::NotifyFieldTrialGroupSelection(this);
+  return group_;
+}
+
+const std::string& FieldTrial::group_name() {
+  // Call |group()| to ensure group gets assigned and observers are notified.
+  group();
+  DCHECK(!group_name_.empty());
+  return group_name_;
+}
+
+const std::string& FieldTrial::GetGroupNameWithoutActivation() {
+  FinalizeGroupChoice();
+  return group_name_;
+}
+
+void FieldTrial::SetForced() {
+  // We might have been forced before (e.g., by CreateFieldTrial) and it's
+  // first come first served, e.g., command line switch has precedence.
+  if (forced_)
+    return;
+
+  // And we must finalize the group choice before we mark ourselves as forced.
+  FinalizeGroupChoice();
+  forced_ = true;
+}
+
+// static
+void FieldTrial::EnableBenchmarking() {
+  DCHECK_EQ(0u, FieldTrialList::GetFieldTrialCount());
+  enable_benchmarking_ = true;
+}
+
+// static
+FieldTrial* FieldTrial::CreateSimulatedFieldTrial(
+    const std::string& trial_name,
+    Probability total_probability,
+    const std::string& default_group_name,
+    double entropy_value) {
+  return new FieldTrial(trial_name, total_probability, default_group_name,
+                        entropy_value);
+}
+
+FieldTrial::FieldTrial(const std::string& trial_name,
+                       const Probability total_probability,
+                       const std::string& default_group_name,
+                       double entropy_value)
+    : trial_name_(trial_name),
+      divisor_(total_probability),
+      default_group_name_(default_group_name),
+      random_(GetGroupBoundaryValue(total_probability, entropy_value)),
+      accumulated_group_probability_(0),
+      next_group_number_(kDefaultGroupNumber + 1),
+      group_(kNotFinalized),
+      enable_field_trial_(true),
+      forced_(false),
+      group_reported_(false),
+      trial_registered_(false),
+      ref_(FieldTrialList::FieldTrialAllocator::kReferenceNull) {
+  DCHECK_GT(total_probability, 0);
+  DCHECK(!trial_name_.empty());
+  DCHECK(!default_group_name_.empty())
+      << "Trial " << trial_name << " is missing a default group name.";
+}
+
+FieldTrial::~FieldTrial() = default;
+
+void FieldTrial::SetTrialRegistered() {
+  DCHECK_EQ(kNotFinalized, group_);
+  DCHECK(!trial_registered_);
+  trial_registered_ = true;
+}
+
+void FieldTrial::SetGroupChoice(const std::string& group_name, int number) {
+  group_ = number;
+  if (group_name.empty())
+    StringAppendF(&group_name_, "%d", group_);
+  else
+    group_name_ = group_name;
+  DVLOG(1) << "Field trial: " << trial_name_ << " Group choice:" << group_name_;
+}
+
+void FieldTrial::FinalizeGroupChoice() {
+  FinalizeGroupChoiceImpl(false);
+}
+
+void FieldTrial::FinalizeGroupChoiceImpl(bool is_locked) {
+  if (group_ != kNotFinalized)
+    return;
+  accumulated_group_probability_ = divisor_;
+  // Here it's OK to use |kDefaultGroupNumber| since we can't be forced and not
+  // finalized.
+  DCHECK(!forced_);
+  SetGroupChoice(default_group_name_, kDefaultGroupNumber);
+
+  // Add the field trial to shared memory.
+  if (kUseSharedMemoryForFieldTrials && trial_registered_)
+    FieldTrialList::OnGroupFinalized(is_locked, this);
+}
+
+bool FieldTrial::GetActiveGroup(ActiveGroup* active_group) const {
+  if (!group_reported_ || !enable_field_trial_)
+    return false;
+  DCHECK_NE(group_, kNotFinalized);
+  active_group->trial_name = trial_name_;
+  active_group->group_name = group_name_;
+  return true;
+}
+
+bool FieldTrial::GetStateWhileLocked(State* field_trial_state,
+                                     bool include_expired) {
+  if (!include_expired && !enable_field_trial_)
+    return false;
+  FinalizeGroupChoiceImpl(true);
+  field_trial_state->trial_name = &trial_name_;
+  field_trial_state->group_name = &group_name_;
+  field_trial_state->activated = group_reported_;
+  return true;
+}
+
+//------------------------------------------------------------------------------
+// FieldTrialList methods and members.
+
+// static
+FieldTrialList* FieldTrialList::global_ = nullptr;
+
+// static
+bool FieldTrialList::used_without_global_ = false;
+
+FieldTrialList::Observer::~Observer() = default;
+
+FieldTrialList::FieldTrialList(
+    std::unique_ptr<const FieldTrial::EntropyProvider> entropy_provider)
+    : entropy_provider_(std::move(entropy_provider)),
+      observer_list_(new ObserverListThreadSafe<FieldTrialList::Observer>(
+          ObserverListPolicy::EXISTING_ONLY)) {
+  DCHECK(!global_);
+  DCHECK(!used_without_global_);
+  global_ = this;
+
+  Time two_years_from_build_time = GetBuildTime() + TimeDelta::FromDays(730);
+  Time::Exploded exploded;
+  two_years_from_build_time.LocalExplode(&exploded);
+  kNoExpirationYear = exploded.year;
+}
+
+FieldTrialList::~FieldTrialList() {
+  AutoLock auto_lock(lock_);
+  while (!registered_.empty()) {
+    RegistrationMap::iterator it = registered_.begin();
+    it->second->Release();
+    registered_.erase(it->first);
+  }
+  DCHECK_EQ(this, global_);
+  global_ = nullptr;
+}
+
+// static
+FieldTrial* FieldTrialList::FactoryGetFieldTrial(
+    const std::string& trial_name,
+    FieldTrial::Probability total_probability,
+    const std::string& default_group_name,
+    const int year,
+    const int month,
+    const int day_of_month,
+    FieldTrial::RandomizationType randomization_type,
+    int* default_group_number) {
+  return FactoryGetFieldTrialWithRandomizationSeed(
+      trial_name, total_probability, default_group_name, year, month,
+      day_of_month, randomization_type, 0, default_group_number, nullptr);
+}
+
+// static
+FieldTrial* FieldTrialList::FactoryGetFieldTrialWithRandomizationSeed(
+    const std::string& trial_name,
+    FieldTrial::Probability total_probability,
+    const std::string& default_group_name,
+    const int year,
+    const int month,
+    const int day_of_month,
+    FieldTrial::RandomizationType randomization_type,
+    uint32_t randomization_seed,
+    int* default_group_number,
+    const FieldTrial::EntropyProvider* override_entropy_provider) {
+  if (default_group_number)
+    *default_group_number = FieldTrial::kDefaultGroupNumber;
+  // Check if the field trial has already been created in some other way.
+  FieldTrial* existing_trial = Find(trial_name);
+  if (existing_trial) {
+    CHECK(existing_trial->forced_);
+    // If the default group name differs between the existing forced trial
+    // and this trial, then use a different value for the default group number.
+    if (default_group_number &&
+        default_group_name != existing_trial->default_group_name()) {
+      // If the new default group number corresponds to the group that was
+      // chosen for the forced trial (which has been finalized when it was
+      // forced), then set the default group number to that.
+      if (default_group_name == existing_trial->group_name_internal()) {
+        *default_group_number = existing_trial->group_;
+      } else {
+        // Otherwise, use |kNonConflictingGroupNumber| (-2) for the default
+        // group number, so that it does not conflict with the |AppendGroup()|
+        // result for the chosen group.
+        const int kNonConflictingGroupNumber = -2;
+        static_assert(
+            kNonConflictingGroupNumber != FieldTrial::kDefaultGroupNumber,
+            "The 'non-conflicting' group number conflicts");
+        static_assert(kNonConflictingGroupNumber != FieldTrial::kNotFinalized,
+                      "The 'non-conflicting' group number conflicts");
+        *default_group_number = kNonConflictingGroupNumber;
+      }
+    }
+    return existing_trial;
+  }
+
+  double entropy_value;
+  if (randomization_type == FieldTrial::ONE_TIME_RANDOMIZED) {
+    // If an override entropy provider is given, use it.
+    const FieldTrial::EntropyProvider* entropy_provider =
+        override_entropy_provider ? override_entropy_provider
+                                  : GetEntropyProviderForOneTimeRandomization();
+    CHECK(entropy_provider);
+    entropy_value = entropy_provider->GetEntropyForTrial(trial_name,
+                                                         randomization_seed);
+  } else {
+    DCHECK_EQ(FieldTrial::SESSION_RANDOMIZED, randomization_type);
+    DCHECK_EQ(0U, randomization_seed);
+    entropy_value = RandDouble();
+  }
+
+  FieldTrial* field_trial = new FieldTrial(trial_name, total_probability,
+                                           default_group_name, entropy_value);
+  if (GetBuildTime() > CreateTimeFromParams(year, month, day_of_month))
+    field_trial->Disable();
+  FieldTrialList::Register(field_trial);
+  return field_trial;
+}
+
+// static
+FieldTrial* FieldTrialList::Find(const std::string& trial_name) {
+  if (!global_)
+    return nullptr;
+  AutoLock auto_lock(global_->lock_);
+  return global_->PreLockedFind(trial_name);
+}
+
+// static
+int FieldTrialList::FindValue(const std::string& trial_name) {
+  FieldTrial* field_trial = Find(trial_name);
+  if (field_trial)
+    return field_trial->group();
+  return FieldTrial::kNotFinalized;
+}
+
+// static
+std::string FieldTrialList::FindFullName(const std::string& trial_name) {
+  FieldTrial* field_trial = Find(trial_name);
+  if (field_trial)
+    return field_trial->group_name();
+  return std::string();
+}
+
+// static
+bool FieldTrialList::TrialExists(const std::string& trial_name) {
+  return Find(trial_name) != nullptr;
+}
+
+// static
+bool FieldTrialList::IsTrialActive(const std::string& trial_name) {
+  FieldTrial* field_trial = Find(trial_name);
+  FieldTrial::ActiveGroup active_group;
+  return field_trial && field_trial->GetActiveGroup(&active_group);
+}
+
+// static
+void FieldTrialList::StatesToString(std::string* output) {
+  FieldTrial::ActiveGroups active_groups;
+  GetActiveFieldTrialGroups(&active_groups);
+  for (FieldTrial::ActiveGroups::const_iterator it = active_groups.begin();
+       it != active_groups.end(); ++it) {
+    DCHECK_EQ(std::string::npos,
+              it->trial_name.find(kPersistentStringSeparator));
+    DCHECK_EQ(std::string::npos,
+              it->group_name.find(kPersistentStringSeparator));
+    output->append(it->trial_name);
+    output->append(1, kPersistentStringSeparator);
+    output->append(it->group_name);
+    output->append(1, kPersistentStringSeparator);
+  }
+}
+
+// static
+void FieldTrialList::AllStatesToString(std::string* output,
+                                       bool include_expired) {
+  if (!global_)
+    return;
+  AutoLock auto_lock(global_->lock_);
+
+  for (const auto& registered : global_->registered_) {
+    FieldTrial::State trial;
+    if (!registered.second->GetStateWhileLocked(&trial, include_expired))
+      continue;
+    DCHECK_EQ(std::string::npos,
+              trial.trial_name->find(kPersistentStringSeparator));
+    DCHECK_EQ(std::string::npos,
+              trial.group_name->find(kPersistentStringSeparator));
+    if (trial.activated)
+      output->append(1, kActivationMarker);
+    output->append(*trial.trial_name);
+    output->append(1, kPersistentStringSeparator);
+    output->append(*trial.group_name);
+    output->append(1, kPersistentStringSeparator);
+  }
+}
+
+// static
+std::string FieldTrialList::AllParamsToString(bool include_expired,
+                                              EscapeDataFunc encode_data_func) {
+  FieldTrialParamAssociator* params_associator =
+      FieldTrialParamAssociator::GetInstance();
+  std::string output;
+  for (const auto& registered : GetRegisteredTrials()) {
+    FieldTrial::State trial;
+    if (!registered.second->GetStateWhileLocked(&trial, include_expired))
+      continue;
+    DCHECK_EQ(std::string::npos,
+              trial.trial_name->find(kPersistentStringSeparator));
+    DCHECK_EQ(std::string::npos,
+              trial.group_name->find(kPersistentStringSeparator));
+    std::map<std::string, std::string> params;
+    if (params_associator->GetFieldTrialParamsWithoutFallback(
+            *trial.trial_name, *trial.group_name, &params)) {
+      if (params.size() > 0) {
+        // Add comma to seprate from previous entry if it exists.
+        if (!output.empty())
+          output.append(1, ',');
+
+        output.append(encode_data_func(*trial.trial_name));
+        output.append(1, '.');
+        output.append(encode_data_func(*trial.group_name));
+        output.append(1, ':');
+
+        std::string param_str;
+        for (const auto& param : params) {
+          // Add separator from previous param information if it exists.
+          if (!param_str.empty())
+            param_str.append(1, kPersistentStringSeparator);
+          param_str.append(encode_data_func(param.first));
+          param_str.append(1, kPersistentStringSeparator);
+          param_str.append(encode_data_func(param.second));
+        }
+
+        output.append(param_str);
+      }
+    }
+  }
+  return output;
+}
+
+// static
+void FieldTrialList::GetActiveFieldTrialGroups(
+    FieldTrial::ActiveGroups* active_groups) {
+  DCHECK(active_groups->empty());
+  if (!global_)
+    return;
+  AutoLock auto_lock(global_->lock_);
+
+  for (RegistrationMap::iterator it = global_->registered_.begin();
+       it != global_->registered_.end(); ++it) {
+    FieldTrial::ActiveGroup active_group;
+    if (it->second->GetActiveGroup(&active_group))
+      active_groups->push_back(active_group);
+  }
+}
+
+// static
+void FieldTrialList::GetActiveFieldTrialGroupsFromString(
+    const std::string& trials_string,
+    FieldTrial::ActiveGroups* active_groups) {
+  std::vector<FieldTrialStringEntry> entries;
+  if (!ParseFieldTrialsString(trials_string, &entries))
+    return;
+
+  for (const auto& entry : entries) {
+    if (entry.activated) {
+      FieldTrial::ActiveGroup group;
+      group.trial_name = entry.trial_name.as_string();
+      group.group_name = entry.group_name.as_string();
+      active_groups->push_back(group);
+    }
+  }
+}
+
+// static
+void FieldTrialList::GetInitiallyActiveFieldTrials(
+    const base::CommandLine& command_line,
+    FieldTrial::ActiveGroups* active_groups) {
+  DCHECK(global_);
+  DCHECK(global_->create_trials_from_command_line_called_);
+
+  if (!global_->field_trial_allocator_) {
+    GetActiveFieldTrialGroupsFromString(
+        command_line.GetSwitchValueASCII(switches::kForceFieldTrials),
+        active_groups);
+    return;
+  }
+
+  FieldTrialAllocator* allocator = global_->field_trial_allocator_.get();
+  FieldTrialAllocator::Iterator mem_iter(allocator);
+  const FieldTrial::FieldTrialEntry* entry;
+  while ((entry = mem_iter.GetNextOfObject<FieldTrial::FieldTrialEntry>()) !=
+         nullptr) {
+    StringPiece trial_name;
+    StringPiece group_name;
+    if (subtle::NoBarrier_Load(&entry->activated) &&
+        entry->GetTrialAndGroupName(&trial_name, &group_name)) {
+      FieldTrial::ActiveGroup group;
+      group.trial_name = trial_name.as_string();
+      group.group_name = group_name.as_string();
+      active_groups->push_back(group);
+    }
+  }
+}
+
+// static
+bool FieldTrialList::CreateTrialsFromString(
+    const std::string& trials_string,
+    const std::set<std::string>& ignored_trial_names) {
+  DCHECK(global_);
+  if (trials_string.empty() || !global_)
+    return true;
+
+  std::vector<FieldTrialStringEntry> entries;
+  if (!ParseFieldTrialsString(trials_string, &entries))
+    return false;
+
+  for (const auto& entry : entries) {
+    const std::string trial_name = entry.trial_name.as_string();
+    const std::string group_name = entry.group_name.as_string();
+
+    if (ContainsKey(ignored_trial_names, trial_name)) {
+      // This is to warn that the field trial forced through command-line
+      // input is unforcable.
+      // Use --enable-logging or --enable-logging=stderr to see this warning.
+      LOG(WARNING) << "Field trial: " << trial_name << " cannot be forced.";
+      continue;
+    }
+
+    FieldTrial* trial = CreateFieldTrial(trial_name, group_name);
+    if (!trial)
+      return false;
+    if (entry.activated) {
+      // Call |group()| to mark the trial as "used" and notify observers, if
+      // any. This is useful to ensure that field trials created in child
+      // processes are properly reported in crash reports.
+      trial->group();
+    }
+  }
+  return true;
+}
+
+// static
+void FieldTrialList::CreateTrialsFromCommandLine(
+    const CommandLine& cmd_line,
+    const char* field_trial_handle_switch,
+    int fd_key) {
+  global_->create_trials_from_command_line_called_ = true;
+
+#if defined(OS_WIN) || defined(OS_FUCHSIA)
+  if (cmd_line.HasSwitch(field_trial_handle_switch)) {
+    std::string switch_value =
+        cmd_line.GetSwitchValueASCII(field_trial_handle_switch);
+    bool result = CreateTrialsFromSwitchValue(switch_value);
+    DCHECK(result);
+  }
+#elif defined(OS_POSIX) && !defined(OS_NACL)
+  // On POSIX, we check if the handle is valid by seeing if the browser process
+  // sent over the switch (we don't care about the value). Invalid handles
+  // occur in some browser tests which don't initialize the allocator.
+  if (cmd_line.HasSwitch(field_trial_handle_switch)) {
+    std::string switch_value =
+        cmd_line.GetSwitchValueASCII(field_trial_handle_switch);
+    bool result = CreateTrialsFromDescriptor(fd_key, switch_value);
+    DCHECK(result);
+  }
+#endif
+
+  if (cmd_line.HasSwitch(switches::kForceFieldTrials)) {
+    bool result = FieldTrialList::CreateTrialsFromString(
+        cmd_line.GetSwitchValueASCII(switches::kForceFieldTrials),
+        std::set<std::string>());
+    DCHECK(result);
+  }
+}
+
+// static
+void FieldTrialList::CreateFeaturesFromCommandLine(
+    const base::CommandLine& command_line,
+    const char* enable_features_switch,
+    const char* disable_features_switch,
+    FeatureList* feature_list) {
+  // Fallback to command line if not using shared memory.
+  if (!kUseSharedMemoryForFieldTrials ||
+      !global_->field_trial_allocator_.get()) {
+    return feature_list->InitializeFromCommandLine(
+        command_line.GetSwitchValueASCII(enable_features_switch),
+        command_line.GetSwitchValueASCII(disable_features_switch));
+  }
+
+  feature_list->InitializeFromSharedMemory(
+      global_->field_trial_allocator_.get());
+}
+
+#if defined(OS_WIN)
+// static
+void FieldTrialList::AppendFieldTrialHandleIfNeeded(
+    HandlesToInheritVector* handles) {
+  if (!global_)
+    return;
+  if (kUseSharedMemoryForFieldTrials) {
+    InstantiateFieldTrialAllocatorIfNeeded();
+    if (global_->readonly_allocator_handle_.IsValid())
+      handles->push_back(global_->readonly_allocator_handle_.GetHandle());
+  }
+}
+#elif defined(OS_FUCHSIA)
+// TODO(fuchsia): Implement shared-memory configuration (crbug.com/752368).
+#elif defined(OS_POSIX) && !defined(OS_NACL)
+// static
+SharedMemoryHandle FieldTrialList::GetFieldTrialHandle() {
+  if (global_ && kUseSharedMemoryForFieldTrials) {
+    InstantiateFieldTrialAllocatorIfNeeded();
+    // We check for an invalid handle where this gets called.
+    return global_->readonly_allocator_handle_;
+  }
+  return SharedMemoryHandle();
+}
+#endif
+
+// static
+void FieldTrialList::CopyFieldTrialStateToFlags(
+    const char* field_trial_handle_switch,
+    const char* enable_features_switch,
+    const char* disable_features_switch,
+    CommandLine* cmd_line) {
+  // TODO(lawrencewu): Ideally, having the global would be guaranteed. However,
+  // content browser tests currently don't create a FieldTrialList because they
+  // don't run ChromeBrowserMainParts code where it's done for Chrome.
+  // Some tests depend on the enable and disable features flag switch, though,
+  // so we can still add those even though AllStatesToString() will be a no-op.
+  if (!global_) {
+    AddFeatureAndFieldTrialFlags(enable_features_switch,
+                                 disable_features_switch, cmd_line);
+    return;
+  }
+
+  // Use shared memory to pass the state if the feature is enabled, otherwise
+  // fallback to passing it via the command line as a string.
+  if (kUseSharedMemoryForFieldTrials) {
+    InstantiateFieldTrialAllocatorIfNeeded();
+    // If the readonly handle didn't get duplicated properly, then fallback to
+    // original behavior.
+    if (!global_->readonly_allocator_handle_.IsValid()) {
+      AddFeatureAndFieldTrialFlags(enable_features_switch,
+                                   disable_features_switch, cmd_line);
+      return;
+    }
+
+    global_->field_trial_allocator_->UpdateTrackingHistograms();
+    std::string switch_value = SerializeSharedMemoryHandleMetadata(
+        global_->readonly_allocator_handle_);
+    cmd_line->AppendSwitchASCII(field_trial_handle_switch, switch_value);
+
+    // Append --enable-features and --disable-features switches corresponding
+    // to the features enabled on the command-line, so that child and browser
+    // process command lines match and clearly show what has been specified
+    // explicitly by the user.
+    std::string enabled_features;
+    std::string disabled_features;
+    FeatureList::GetInstance()->GetCommandLineFeatureOverrides(
+        &enabled_features, &disabled_features);
+
+    if (!enabled_features.empty())
+      cmd_line->AppendSwitchASCII(enable_features_switch, enabled_features);
+    if (!disabled_features.empty())
+      cmd_line->AppendSwitchASCII(disable_features_switch, disabled_features);
+
+    return;
+  }
+
+  AddFeatureAndFieldTrialFlags(enable_features_switch, disable_features_switch,
+                               cmd_line);
+}
+
+// static
+FieldTrial* FieldTrialList::CreateFieldTrial(
+    const std::string& name,
+    const std::string& group_name) {
+  DCHECK(global_);
+  DCHECK_GE(name.size(), 0u);
+  DCHECK_GE(group_name.size(), 0u);
+  if (name.empty() || group_name.empty() || !global_)
+    return nullptr;
+
+  FieldTrial* field_trial = FieldTrialList::Find(name);
+  if (field_trial) {
+    // In single process mode, or when we force them from the command line,
+    // we may have already created the field trial.
+    if (field_trial->group_name_internal() != group_name)
+      return nullptr;
+    return field_trial;
+  }
+  const int kTotalProbability = 100;
+  field_trial = new FieldTrial(name, kTotalProbability, group_name, 0);
+  FieldTrialList::Register(field_trial);
+  // Force the trial, which will also finalize the group choice.
+  field_trial->SetForced();
+  return field_trial;
+}
+
+// static
+bool FieldTrialList::AddObserver(Observer* observer) {
+  if (!global_)
+    return false;
+  global_->observer_list_->AddObserver(observer);
+  return true;
+}
+
+// static
+void FieldTrialList::RemoveObserver(Observer* observer) {
+  if (!global_)
+    return;
+  global_->observer_list_->RemoveObserver(observer);
+}
+
+// static
+void FieldTrialList::SetSynchronousObserver(Observer* observer) {
+  DCHECK(!global_->synchronous_observer_);
+  global_->synchronous_observer_ = observer;
+}
+
+// static
+void FieldTrialList::RemoveSynchronousObserver(Observer* observer) {
+  DCHECK_EQ(global_->synchronous_observer_, observer);
+  global_->synchronous_observer_ = nullptr;
+}
+
+// static
+void FieldTrialList::OnGroupFinalized(bool is_locked, FieldTrial* field_trial) {
+  if (!global_)
+    return;
+  if (is_locked) {
+    AddToAllocatorWhileLocked(global_->field_trial_allocator_.get(),
+                              field_trial);
+  } else {
+    AutoLock auto_lock(global_->lock_);
+    AddToAllocatorWhileLocked(global_->field_trial_allocator_.get(),
+                              field_trial);
+  }
+}
+
+// static
+void FieldTrialList::NotifyFieldTrialGroupSelection(FieldTrial* field_trial) {
+  if (!global_)
+    return;
+
+  {
+    AutoLock auto_lock(global_->lock_);
+    if (field_trial->group_reported_)
+      return;
+    field_trial->group_reported_ = true;
+
+    if (!field_trial->enable_field_trial_)
+      return;
+
+    if (kUseSharedMemoryForFieldTrials)
+      ActivateFieldTrialEntryWhileLocked(field_trial);
+  }
+
+  // Recording for stability debugging has to be done inline as a task posted
+  // to an observer may not get executed before a crash.
+  base::debug::GlobalActivityTracker* tracker =
+      base::debug::GlobalActivityTracker::Get();
+  if (tracker) {
+    tracker->RecordFieldTrial(field_trial->trial_name(),
+                              field_trial->group_name_internal());
+  }
+
+  if (global_->synchronous_observer_) {
+    global_->synchronous_observer_->OnFieldTrialGroupFinalized(
+        field_trial->trial_name(), field_trial->group_name_internal());
+  }
+
+  global_->observer_list_->Notify(
+      FROM_HERE, &FieldTrialList::Observer::OnFieldTrialGroupFinalized,
+      field_trial->trial_name(), field_trial->group_name_internal());
+}
+
+// static
+size_t FieldTrialList::GetFieldTrialCount() {
+  if (!global_)
+    return 0;
+  AutoLock auto_lock(global_->lock_);
+  return global_->registered_.size();
+}
+
+// static
+bool FieldTrialList::GetParamsFromSharedMemory(
+    FieldTrial* field_trial,
+    std::map<std::string, std::string>* params) {
+  DCHECK(global_);
+  // If the field trial allocator is not set up yet, then there are several
+  // cases:
+  //   - We are in the browser process and the allocator has not been set up
+  //   yet. If we got here, then we couldn't find the params in
+  //   FieldTrialParamAssociator, so it's definitely not here. Return false.
+  //   - Using shared memory for field trials is not enabled. If we got here,
+  //   then there's nothing in shared memory. Return false.
+  //   - We are in the child process and the allocator has not been set up yet.
+  //   If this is the case, then you are calling this too early. The field trial
+  //   allocator should get set up very early in the lifecycle. Try to see if
+  //   you can call it after it's been set up.
+  AutoLock auto_lock(global_->lock_);
+  if (!global_->field_trial_allocator_)
+    return false;
+
+  // If ref_ isn't set, then the field trial data can't be in shared memory.
+  if (!field_trial->ref_)
+    return false;
+
+  const FieldTrial::FieldTrialEntry* entry =
+      global_->field_trial_allocator_->GetAsObject<FieldTrial::FieldTrialEntry>(
+          field_trial->ref_);
+
+  size_t allocated_size =
+      global_->field_trial_allocator_->GetAllocSize(field_trial->ref_);
+  size_t actual_size = sizeof(FieldTrial::FieldTrialEntry) + entry->pickle_size;
+  if (allocated_size < actual_size)
+    return false;
+
+  return entry->GetParams(params);
+}
+
+// static
+void FieldTrialList::ClearParamsFromSharedMemoryForTesting() {
+  if (!global_)
+    return;
+
+  AutoLock auto_lock(global_->lock_);
+  if (!global_->field_trial_allocator_)
+    return;
+
+  // To clear the params, we iterate through every item in the allocator, copy
+  // just the trial and group name into a newly-allocated segment and then clear
+  // the existing item.
+  FieldTrialAllocator* allocator = global_->field_trial_allocator_.get();
+  FieldTrialAllocator::Iterator mem_iter(allocator);
+
+  // List of refs to eventually be made iterable. We can't make it in the loop,
+  // since it would go on forever.
+  std::vector<FieldTrial::FieldTrialRef> new_refs;
+
+  FieldTrial::FieldTrialRef prev_ref;
+  while ((prev_ref = mem_iter.GetNextOfType<FieldTrial::FieldTrialEntry>()) !=
+         FieldTrialAllocator::kReferenceNull) {
+    // Get the existing field trial entry in shared memory.
+    const FieldTrial::FieldTrialEntry* prev_entry =
+        allocator->GetAsObject<FieldTrial::FieldTrialEntry>(prev_ref);
+    StringPiece trial_name;
+    StringPiece group_name;
+    if (!prev_entry->GetTrialAndGroupName(&trial_name, &group_name))
+      continue;
+
+    // Write a new entry, minus the params.
+    Pickle pickle;
+    pickle.WriteString(trial_name);
+    pickle.WriteString(group_name);
+    size_t total_size = sizeof(FieldTrial::FieldTrialEntry) + pickle.size();
+    FieldTrial::FieldTrialEntry* new_entry =
+        allocator->New<FieldTrial::FieldTrialEntry>(total_size);
+    subtle::NoBarrier_Store(&new_entry->activated,
+                            subtle::NoBarrier_Load(&prev_entry->activated));
+    new_entry->pickle_size = pickle.size();
+
+    // TODO(lawrencewu): Modify base::Pickle to be able to write over a section
+    // in memory, so we can avoid this memcpy.
+    char* dst = reinterpret_cast<char*>(new_entry) +
+                sizeof(FieldTrial::FieldTrialEntry);
+    memcpy(dst, pickle.data(), pickle.size());
+
+    // Update the ref on the field trial and add it to the list to be made
+    // iterable.
+    FieldTrial::FieldTrialRef new_ref = allocator->GetAsReference(new_entry);
+    FieldTrial* trial = global_->PreLockedFind(trial_name.as_string());
+    trial->ref_ = new_ref;
+    new_refs.push_back(new_ref);
+
+    // Mark the existing entry as unused.
+    allocator->ChangeType(prev_ref, 0,
+                          FieldTrial::FieldTrialEntry::kPersistentTypeId,
+                          /*clear=*/false);
+  }
+
+  for (const auto& ref : new_refs) {
+    allocator->MakeIterable(ref);
+  }
+}
+
+// static
+void FieldTrialList::DumpAllFieldTrialsToPersistentAllocator(
+    PersistentMemoryAllocator* allocator) {
+  if (!global_)
+    return;
+  AutoLock auto_lock(global_->lock_);
+  for (const auto& registered : global_->registered_) {
+    AddToAllocatorWhileLocked(allocator, registered.second);
+  }
+}
+
+// static
+std::vector<const FieldTrial::FieldTrialEntry*>
+FieldTrialList::GetAllFieldTrialsFromPersistentAllocator(
+    PersistentMemoryAllocator const& allocator) {
+  std::vector<const FieldTrial::FieldTrialEntry*> entries;
+  FieldTrialAllocator::Iterator iter(&allocator);
+  const FieldTrial::FieldTrialEntry* entry;
+  while ((entry = iter.GetNextOfObject<FieldTrial::FieldTrialEntry>()) !=
+         nullptr) {
+    entries.push_back(entry);
+  }
+  return entries;
+}
+
+// static
+bool FieldTrialList::IsGlobalSetForTesting() {
+  return global_ != nullptr;
+}
+
+// static
+std::string FieldTrialList::SerializeSharedMemoryHandleMetadata(
+    const SharedMemoryHandle& shm) {
+  std::stringstream ss;
+#if defined(OS_WIN)
+  // Tell the child process the name of the inherited HANDLE.
+  uintptr_t uintptr_handle = reinterpret_cast<uintptr_t>(shm.GetHandle());
+  ss << uintptr_handle << ",";
+#elif defined(OS_FUCHSIA)
+  ss << shm.GetHandle() << ",";
+#elif !defined(OS_POSIX)
+#error Unsupported OS
+#endif
+
+  base::UnguessableToken guid = shm.GetGUID();
+  ss << guid.GetHighForSerialization() << "," << guid.GetLowForSerialization();
+  ss << "," << shm.GetSize();
+  return ss.str();
+}
+
+#if defined(OS_WIN) || defined(OS_FUCHSIA)
+
+// static
+SharedMemoryHandle FieldTrialList::DeserializeSharedMemoryHandleMetadata(
+    const std::string& switch_value) {
+  std::vector<base::StringPiece> tokens = base::SplitStringPiece(
+      switch_value, ",", base::KEEP_WHITESPACE, base::SPLIT_WANT_ALL);
+
+  if (tokens.size() != 4)
+    return SharedMemoryHandle();
+
+  int field_trial_handle = 0;
+  if (!base::StringToInt(tokens[0], &field_trial_handle))
+    return SharedMemoryHandle();
+#if defined(OS_FUCHSIA)
+  zx_handle_t handle = static_cast<zx_handle_t>(field_trial_handle);
+#elif defined(OS_WIN)
+  HANDLE handle = reinterpret_cast<HANDLE>(field_trial_handle);
+  if (base::IsCurrentProcessElevated()) {
+    // base::LaunchElevatedProcess doesn't have a way to duplicate the handle,
+    // but this process can since by definition it's not sandboxed.
+    base::ProcessId parent_pid = base::GetParentProcessId(GetCurrentProcess());
+    HANDLE parent_handle = OpenProcess(PROCESS_ALL_ACCESS, FALSE, parent_pid);
+    DuplicateHandle(parent_handle, handle, GetCurrentProcess(), &handle, 0,
+                    FALSE, DUPLICATE_SAME_ACCESS);
+    CloseHandle(parent_handle);
+  }
+#endif  // defined(OS_WIN)
+
+  base::UnguessableToken guid;
+  if (!DeserializeGUIDFromStringPieces(tokens[1], tokens[2], &guid))
+    return SharedMemoryHandle();
+
+  int size;
+  if (!base::StringToInt(tokens[3], &size))
+    return SharedMemoryHandle();
+
+  return SharedMemoryHandle(handle, static_cast<size_t>(size), guid);
+}
+
+#elif defined(OS_POSIX) && !defined(OS_NACL)
+
+// static
+SharedMemoryHandle FieldTrialList::DeserializeSharedMemoryHandleMetadata(
+    int fd,
+    const std::string& switch_value) {
+  std::vector<base::StringPiece> tokens = base::SplitStringPiece(
+      switch_value, ",", base::KEEP_WHITESPACE, base::SPLIT_WANT_ALL);
+
+  if (tokens.size() != 3)
+    return SharedMemoryHandle();
+
+  base::UnguessableToken guid;
+  if (!DeserializeGUIDFromStringPieces(tokens[0], tokens[1], &guid))
+    return SharedMemoryHandle();
+
+  int size;
+  if (!base::StringToInt(tokens[2], &size))
+    return SharedMemoryHandle();
+
+  return SharedMemoryHandle(FileDescriptor(fd, true), static_cast<size_t>(size),
+                            guid);
+}
+
+#endif
+
+#if defined(OS_WIN) || defined(OS_FUCHSIA)
+// static
+bool FieldTrialList::CreateTrialsFromSwitchValue(
+    const std::string& switch_value) {
+  SharedMemoryHandle shm = DeserializeSharedMemoryHandleMetadata(switch_value);
+  if (!shm.IsValid())
+    return false;
+  return FieldTrialList::CreateTrialsFromSharedMemoryHandle(shm);
+}
+#elif defined(OS_POSIX) && !defined(OS_NACL)
+// static
+bool FieldTrialList::CreateTrialsFromDescriptor(
+    int fd_key,
+    const std::string& switch_value) {
+  if (!kUseSharedMemoryForFieldTrials)
+    return false;
+
+  if (fd_key == -1)
+    return false;
+
+  int fd = GlobalDescriptors::GetInstance()->MaybeGet(fd_key);
+  if (fd == -1)
+    return false;
+
+  SharedMemoryHandle shm =
+      DeserializeSharedMemoryHandleMetadata(fd, switch_value);
+  if (!shm.IsValid())
+    return false;
+
+  bool result = FieldTrialList::CreateTrialsFromSharedMemoryHandle(shm);
+  DCHECK(result);
+  return true;
+}
+#endif  // defined(OS_POSIX) && !defined(OS_NACL)
+
+// static
+bool FieldTrialList::CreateTrialsFromSharedMemoryHandle(
+    SharedMemoryHandle shm_handle) {
+  // shm gets deleted when it gets out of scope, but that's OK because we need
+  // it only for the duration of this method.
+  std::unique_ptr<SharedMemory> shm(new SharedMemory(shm_handle, true));
+  if (!shm.get()->Map(kFieldTrialAllocationSize))
+    OnOutOfMemory(kFieldTrialAllocationSize);
+
+  return FieldTrialList::CreateTrialsFromSharedMemory(std::move(shm));
+}
+
+// static
+bool FieldTrialList::CreateTrialsFromSharedMemory(
+    std::unique_ptr<SharedMemory> shm) {
+  global_->field_trial_allocator_.reset(
+      new FieldTrialAllocator(std::move(shm), 0, kAllocatorName, true));
+  FieldTrialAllocator* shalloc = global_->field_trial_allocator_.get();
+  FieldTrialAllocator::Iterator mem_iter(shalloc);
+
+  const FieldTrial::FieldTrialEntry* entry;
+  while ((entry = mem_iter.GetNextOfObject<FieldTrial::FieldTrialEntry>()) !=
+         nullptr) {
+    StringPiece trial_name;
+    StringPiece group_name;
+    if (!entry->GetTrialAndGroupName(&trial_name, &group_name))
+      return false;
+
+    // TODO(lawrencewu): Convert the API for CreateFieldTrial to take
+    // StringPieces.
+    FieldTrial* trial =
+        CreateFieldTrial(trial_name.as_string(), group_name.as_string());
+
+    trial->ref_ = mem_iter.GetAsReference(entry);
+    if (subtle::NoBarrier_Load(&entry->activated)) {
+      // Call |group()| to mark the trial as "used" and notify observers, if
+      // any. This is useful to ensure that field trials created in child
+      // processes are properly reported in crash reports.
+      trial->group();
+    }
+  }
+  return true;
+}
+
+// static
+void FieldTrialList::InstantiateFieldTrialAllocatorIfNeeded() {
+  if (!global_)
+    return;
+  AutoLock auto_lock(global_->lock_);
+  // Create the allocator if not already created and add all existing trials.
+  if (global_->field_trial_allocator_ != nullptr)
+    return;
+
+  SharedMemoryCreateOptions options;
+  options.size = kFieldTrialAllocationSize;
+  options.share_read_only = true;
+#if defined(OS_MACOSX) && !defined(OS_IOS)
+  options.type = SharedMemoryHandle::POSIX;
+#endif
+
+  std::unique_ptr<SharedMemory> shm(new SharedMemory());
+  if (!shm->Create(options))
+    OnOutOfMemory(kFieldTrialAllocationSize);
+
+  if (!shm->Map(kFieldTrialAllocationSize))
+    OnOutOfMemory(kFieldTrialAllocationSize);
+
+  global_->field_trial_allocator_.reset(
+      new FieldTrialAllocator(std::move(shm), 0, kAllocatorName, false));
+  global_->field_trial_allocator_->CreateTrackingHistograms(kAllocatorName);
+
+  // Add all existing field trials.
+  for (const auto& registered : global_->registered_) {
+    AddToAllocatorWhileLocked(global_->field_trial_allocator_.get(),
+                              registered.second);
+  }
+
+  // Add all existing features.
+  FeatureList::GetInstance()->AddFeaturesToAllocator(
+      global_->field_trial_allocator_.get());
+
+#if !defined(OS_NACL)
+  global_->readonly_allocator_handle_ = GetSharedMemoryReadOnlyHandle(
+      global_->field_trial_allocator_->shared_memory());
+#endif
+}
+
+// static
+void FieldTrialList::AddToAllocatorWhileLocked(
+    PersistentMemoryAllocator* allocator,
+    FieldTrial* field_trial) {
+  // Don't do anything if the allocator hasn't been instantiated yet.
+  if (allocator == nullptr)
+    return;
+
+  // Or if the allocator is read only, which means we are in a child process and
+  // shouldn't be writing to it.
+  if (allocator->IsReadonly())
+    return;
+
+  FieldTrial::State trial_state;
+  if (!field_trial->GetStateWhileLocked(&trial_state, false))
+    return;
+
+  // Or if we've already added it. We must check after GetState since it can
+  // also add to the allocator.
+  if (field_trial->ref_)
+    return;
+
+  Pickle pickle;
+  PickleFieldTrial(trial_state, &pickle);
+
+  size_t total_size = sizeof(FieldTrial::FieldTrialEntry) + pickle.size();
+  FieldTrial::FieldTrialRef ref = allocator->Allocate(
+      total_size, FieldTrial::FieldTrialEntry::kPersistentTypeId);
+  if (ref == FieldTrialAllocator::kReferenceNull) {
+    NOTREACHED();
+    return;
+  }
+
+  FieldTrial::FieldTrialEntry* entry =
+      allocator->GetAsObject<FieldTrial::FieldTrialEntry>(ref);
+  subtle::NoBarrier_Store(&entry->activated, trial_state.activated);
+  entry->pickle_size = pickle.size();
+
+  // TODO(lawrencewu): Modify base::Pickle to be able to write over a section in
+  // memory, so we can avoid this memcpy.
+  char* dst =
+      reinterpret_cast<char*>(entry) + sizeof(FieldTrial::FieldTrialEntry);
+  memcpy(dst, pickle.data(), pickle.size());
+
+  allocator->MakeIterable(ref);
+  field_trial->ref_ = ref;
+}
+
+// static
+void FieldTrialList::ActivateFieldTrialEntryWhileLocked(
+    FieldTrial* field_trial) {
+  FieldTrialAllocator* allocator = global_->field_trial_allocator_.get();
+
+  // Check if we're in the child process and return early if so.
+  if (!allocator || allocator->IsReadonly())
+    return;
+
+  FieldTrial::FieldTrialRef ref = field_trial->ref_;
+  if (ref == FieldTrialAllocator::kReferenceNull) {
+    // It's fine to do this even if the allocator hasn't been instantiated
+    // yet -- it'll just return early.
+    AddToAllocatorWhileLocked(allocator, field_trial);
+  } else {
+    // It's also okay to do this even though the callee doesn't have a lock --
+    // the only thing that happens on a stale read here is a slight performance
+    // hit from the child re-synchronizing activation state.
+    FieldTrial::FieldTrialEntry* entry =
+        allocator->GetAsObject<FieldTrial::FieldTrialEntry>(ref);
+    subtle::NoBarrier_Store(&entry->activated, 1);
+  }
+}
+
+// static
+const FieldTrial::EntropyProvider*
+    FieldTrialList::GetEntropyProviderForOneTimeRandomization() {
+  if (!global_) {
+    used_without_global_ = true;
+    return nullptr;
+  }
+
+  return global_->entropy_provider_.get();
+}
+
+FieldTrial* FieldTrialList::PreLockedFind(const std::string& name) {
+  RegistrationMap::iterator it = registered_.find(name);
+  if (registered_.end() == it)
+    return nullptr;
+  return it->second;
+}
+
+// static
+void FieldTrialList::Register(FieldTrial* trial) {
+  if (!global_) {
+    used_without_global_ = true;
+    return;
+  }
+  AutoLock auto_lock(global_->lock_);
+  CHECK(!global_->PreLockedFind(trial->trial_name())) << trial->trial_name();
+  trial->AddRef();
+  trial->SetTrialRegistered();
+  global_->registered_[trial->trial_name()] = trial;
+}
+
+// static
+FieldTrialList::RegistrationMap FieldTrialList::GetRegisteredTrials() {
+  RegistrationMap output;
+  if (global_) {
+    AutoLock auto_lock(global_->lock_);
+    output = global_->registered_;
+  }
+  return output;
+}
+
+}  // namespace base
diff --git a/base/metrics/field_trial.h b/base/metrics/field_trial.h
new file mode 100644
index 0000000..ac4ea1c
--- /dev/null
+++ b/base/metrics/field_trial.h
@@ -0,0 +1,802 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// FieldTrial is a class for handling details of statistical experiments
+// performed by actual users in the field (i.e., in a shipped or beta product).
+// All code is called exclusively on the UI thread currently.
+//
+// The simplest example is an experiment to see whether one of two options
+// produces "better" results across our user population.  In that scenario, UMA
+// data is uploaded to aggregate the test results, and this FieldTrial class
+// manages the state of each such experiment (state == which option was
+// pseudo-randomly selected).
+//
+// States are typically generated randomly, either based on a one time
+// randomization (which will yield the same results, in terms of selecting
+// the client for a field trial or not, for every run of the program on a
+// given machine), or by a session randomization (generated each time the
+// application starts up, but held constant during the duration of the
+// process).
+
+//------------------------------------------------------------------------------
+// Example:  Suppose we have an experiment involving memory, such as determining
+// the impact of some pruning algorithm.
+// We assume that we already have a histogram of memory usage, such as:
+
+//   UMA_HISTOGRAM_COUNTS("Memory.RendererTotal", count);
+
+// Somewhere in main thread initialization code, we'd probably define an
+// instance of a FieldTrial, with code such as:
+
+// // FieldTrials are reference counted, and persist automagically until
+// // process teardown, courtesy of their automatic registration in
+// // FieldTrialList.
+// // Note: This field trial will run in Chrome instances compiled through
+// //       8 July, 2015, and after that all instances will be in "StandardMem".
+// scoped_refptr<base::FieldTrial> trial(
+//     base::FieldTrialList::FactoryGetFieldTrial(
+//         "MemoryExperiment", 1000, "StandardMem", 2015, 7, 8,
+//         base::FieldTrial::ONE_TIME_RANDOMIZED, NULL));
+//
+// const int high_mem_group =
+//     trial->AppendGroup("HighMem", 20);  // 2% in HighMem group.
+// const int low_mem_group =
+//     trial->AppendGroup("LowMem", 20);   // 2% in LowMem group.
+// // Take action depending of which group we randomly land in.
+// if (trial->group() == high_mem_group)
+//   SetPruningAlgorithm(kType1);  // Sample setting of browser state.
+// else if (trial->group() == low_mem_group)
+//   SetPruningAlgorithm(kType2);  // Sample alternate setting.
+
+//------------------------------------------------------------------------------
+
+#ifndef BASE_METRICS_FIELD_TRIAL_H_
+#define BASE_METRICS_FIELD_TRIAL_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <map>
+#include <memory>
+#include <set>
+#include <string>
+#include <vector>
+
+#include "base/atomicops.h"
+#include "base/base_export.h"
+#include "base/command_line.h"
+#include "base/feature_list.h"
+#include "base/files/file.h"
+#include "base/gtest_prod_util.h"
+#include "base/macros.h"
+#include "base/memory/ref_counted.h"
+#include "base/memory/shared_memory.h"
+#include "base/memory/shared_memory_handle.h"
+#include "base/metrics/persistent_memory_allocator.h"
+#include "base/observer_list_threadsafe.h"
+#include "base/pickle.h"
+#include "base/process/launch.h"
+#include "base/strings/string_piece.h"
+#include "base/synchronization/lock.h"
+#include "base/time/time.h"
+#include "build/build_config.h"
+
+namespace base {
+
+class FieldTrialList;
+
+class BASE_EXPORT FieldTrial : public RefCounted<FieldTrial> {
+ public:
+  typedef int Probability;  // Probability type for being selected in a trial.
+
+  // TODO(665129): Make private again after crash has been resolved.
+  typedef SharedPersistentMemoryAllocator::Reference FieldTrialRef;
+
+  // Specifies the persistence of the field trial group choice.
+  enum RandomizationType {
+    // One time randomized trials will persist the group choice between
+    // restarts, which is recommended for most trials, especially those that
+    // change user visible behavior.
+    ONE_TIME_RANDOMIZED,
+    // Session randomized trials will roll the dice to select a group on every
+    // process restart.
+    SESSION_RANDOMIZED,
+  };
+
+  // EntropyProvider is an interface for providing entropy for one-time
+  // randomized (persistent) field trials.
+  class BASE_EXPORT EntropyProvider {
+   public:
+    virtual ~EntropyProvider();
+
+    // Returns a double in the range of [0, 1) to be used for the dice roll for
+    // the specified field trial. If |randomization_seed| is not 0, it will be
+    // used in preference to |trial_name| for generating the entropy by entropy
+    // providers that support it. A given instance should always return the same
+    // value given the same input |trial_name| and |randomization_seed| values.
+    virtual double GetEntropyForTrial(const std::string& trial_name,
+                                      uint32_t randomization_seed) const = 0;
+  };
+
+  // A pair representing a Field Trial and its selected group.
+  struct ActiveGroup {
+    std::string trial_name;
+    std::string group_name;
+  };
+
+  // A triplet representing a FieldTrial, its selected group and whether it's
+  // active. String members are pointers to the underlying strings owned by the
+  // FieldTrial object. Does not use StringPiece to avoid conversions back to
+  // std::string.
+  struct BASE_EXPORT State {
+    const std::string* trial_name = nullptr;
+    const std::string* group_name = nullptr;
+    bool activated = false;
+
+    State();
+    State(const State& other);
+    ~State();
+  };
+
+  // We create one FieldTrialEntry per field trial in shared memory, via
+  // AddToAllocatorWhileLocked. The FieldTrialEntry is followed by a
+  // base::Pickle object that we unpickle and read from.
+  struct BASE_EXPORT FieldTrialEntry {
+    // SHA1(FieldTrialEntry): Increment this if structure changes!
+    static constexpr uint32_t kPersistentTypeId = 0xABA17E13 + 2;
+
+    // Expected size for 32/64-bit check.
+    static constexpr size_t kExpectedInstanceSize = 8;
+
+    // Whether or not this field trial is activated. This is really just a
+    // boolean but using a 32 bit value for portability reasons. It should be
+    // accessed via NoBarrier_Load()/NoBarrier_Store() to prevent the compiler
+    // from doing unexpected optimizations because it thinks that only one
+    // thread is accessing the memory location.
+    subtle::Atomic32 activated;
+
+    // Size of the pickled structure, NOT the total size of this entry.
+    uint32_t pickle_size;
+
+    // Calling this is only valid when the entry is initialized. That is, it
+    // resides in shared memory and has a pickle containing the trial name and
+    // group name following it.
+    bool GetTrialAndGroupName(StringPiece* trial_name,
+                              StringPiece* group_name) const;
+
+    // Calling this is only valid when the entry is initialized as well. Reads
+    // the parameters following the trial and group name and stores them as
+    // key-value mappings in |params|.
+    bool GetParams(std::map<std::string, std::string>* params) const;
+
+   private:
+    // Returns an iterator over the data containing names and params.
+    PickleIterator GetPickleIterator() const;
+
+    // Takes the iterator and writes out the first two items into |trial_name|
+    // and |group_name|.
+    bool ReadStringPair(PickleIterator* iter,
+                        StringPiece* trial_name,
+                        StringPiece* group_name) const;
+  };
+
+  typedef std::vector<ActiveGroup> ActiveGroups;
+
+  // A return value to indicate that a given instance has not yet had a group
+  // assignment (and hence is not yet participating in the trial).
+  static const int kNotFinalized;
+
+  // Disables this trial, meaning it always determines the default group
+  // has been selected. May be called immediately after construction, or
+  // at any time after initialization (should not be interleaved with
+  // AppendGroup calls). Once disabled, there is no way to re-enable a
+  // trial.
+  // TODO(mad): http://code.google.com/p/chromium/issues/detail?id=121446
+  // This doesn't properly reset to Default when a group was forced.
+  void Disable();
+
+  // Establish the name and probability of the next group in this trial.
+  // Sometimes, based on construction randomization, this call may cause the
+  // provided group to be *THE* group selected for use in this instance.
+  // The return value is the group number of the new group.
+  int AppendGroup(const std::string& name, Probability group_probability);
+
+  // Return the name of the FieldTrial (excluding the group name).
+  const std::string& trial_name() const { return trial_name_; }
+
+  // Return the randomly selected group number that was assigned, and notify
+  // any/all observers that this finalized group number has presumably been used
+  // (queried), and will never change. Note that this will force an instance to
+  // participate, and make it illegal to attempt to probabilistically add any
+  // other groups to the trial.
+  int group();
+
+  // If the group's name is empty, a string version containing the group number
+  // is used as the group name. This causes a winner to be chosen if none was.
+  const std::string& group_name();
+
+  // Finalizes the group choice and returns the chosen group, but does not mark
+  // the trial as active - so its state will not be reported until group_name()
+  // or similar is called.
+  const std::string& GetGroupNameWithoutActivation();
+
+  // Set the field trial as forced, meaning that it was setup earlier than
+  // the hard coded registration of the field trial to override it.
+  // This allows the code that was hard coded to register the field trial to
+  // still succeed even though the field trial has already been registered.
+  // This must be called after appending all the groups, since we will make
+  // the group choice here. Note that this is a NOOP for already forced trials.
+  // And, as the rest of the FieldTrial code, this is not thread safe and must
+  // be done from the UI thread.
+  void SetForced();
+
+  // Enable benchmarking sets field trials to a common setting.
+  static void EnableBenchmarking();
+
+  // Creates a FieldTrial object with the specified parameters, to be used for
+  // simulation of group assignment without actually affecting global field
+  // trial state in the running process. Group assignment will be done based on
+  // |entropy_value|, which must have a range of [0, 1).
+  //
+  // Note: Using this function will not register the field trial globally in the
+  // running process - for that, use FieldTrialList::FactoryGetFieldTrial().
+  //
+  // The ownership of the returned FieldTrial is transfered to the caller which
+  // is responsible for deref'ing it (e.g. by using scoped_refptr<FieldTrial>).
+  static FieldTrial* CreateSimulatedFieldTrial(
+      const std::string& trial_name,
+      Probability total_probability,
+      const std::string& default_group_name,
+      double entropy_value);
+
+ private:
+  // Allow tests to access our innards for testing purposes.
+  FRIEND_TEST_ALL_PREFIXES(FieldTrialTest, Registration);
+  FRIEND_TEST_ALL_PREFIXES(FieldTrialTest, AbsoluteProbabilities);
+  FRIEND_TEST_ALL_PREFIXES(FieldTrialTest, RemainingProbability);
+  FRIEND_TEST_ALL_PREFIXES(FieldTrialTest, FiftyFiftyProbability);
+  FRIEND_TEST_ALL_PREFIXES(FieldTrialTest, MiddleProbabilities);
+  FRIEND_TEST_ALL_PREFIXES(FieldTrialTest, OneWinner);
+  FRIEND_TEST_ALL_PREFIXES(FieldTrialTest, DisableProbability);
+  FRIEND_TEST_ALL_PREFIXES(FieldTrialTest, ActiveGroups);
+  FRIEND_TEST_ALL_PREFIXES(FieldTrialTest, AllGroups);
+  FRIEND_TEST_ALL_PREFIXES(FieldTrialTest, ActiveGroupsNotFinalized);
+  FRIEND_TEST_ALL_PREFIXES(FieldTrialTest, Save);
+  FRIEND_TEST_ALL_PREFIXES(FieldTrialTest, SaveAll);
+  FRIEND_TEST_ALL_PREFIXES(FieldTrialTest, DuplicateRestore);
+  FRIEND_TEST_ALL_PREFIXES(FieldTrialTest, SetForcedTurnFeatureOff);
+  FRIEND_TEST_ALL_PREFIXES(FieldTrialTest, SetForcedTurnFeatureOn);
+  FRIEND_TEST_ALL_PREFIXES(FieldTrialTest, SetForcedChangeDefault_Default);
+  FRIEND_TEST_ALL_PREFIXES(FieldTrialTest, SetForcedChangeDefault_NonDefault);
+  FRIEND_TEST_ALL_PREFIXES(FieldTrialTest, FloatBoundariesGiveEqualGroupSizes);
+  FRIEND_TEST_ALL_PREFIXES(FieldTrialTest, DoesNotSurpassTotalProbability);
+  FRIEND_TEST_ALL_PREFIXES(FieldTrialListTest,
+                           DoNotAddSimulatedFieldTrialsToAllocator);
+  FRIEND_TEST_ALL_PREFIXES(FieldTrialListTest, ClearParamsFromSharedMemory);
+
+  friend class base::FieldTrialList;
+
+  friend class RefCounted<FieldTrial>;
+
+  // This is the group number of the 'default' group when a choice wasn't forced
+  // by a call to FieldTrialList::CreateFieldTrial. It is kept private so that
+  // consumers don't use it by mistake in cases where the group was forced.
+  static const int kDefaultGroupNumber;
+
+  // Creates a field trial with the specified parameters. Group assignment will
+  // be done based on |entropy_value|, which must have a range of [0, 1).
+  FieldTrial(const std::string& trial_name,
+             Probability total_probability,
+             const std::string& default_group_name,
+             double entropy_value);
+  virtual ~FieldTrial();
+
+  // Return the default group name of the FieldTrial.
+  std::string default_group_name() const { return default_group_name_; }
+
+  // Marks this trial as having been registered with the FieldTrialList. Must be
+  // called no more than once and before any |group()| calls have occurred.
+  void SetTrialRegistered();
+
+  // Sets the chosen group name and number.
+  void SetGroupChoice(const std::string& group_name, int number);
+
+  // Ensures that a group is chosen, if it hasn't yet been. The field trial
+  // might yet be disabled, so this call will *not* notify observers of the
+  // status.
+  void FinalizeGroupChoice();
+
+  // Implements FinalizeGroupChoice() with the added flexibility of being
+  // deadlock-free if |is_locked| is true and the caller is holding a lock.
+  void FinalizeGroupChoiceImpl(bool is_locked);
+
+  // Returns the trial name and selected group name for this field trial via
+  // the output parameter |active_group|, but only if the group has already
+  // been chosen and has been externally observed via |group()| and the trial
+  // has not been disabled. In that case, true is returned and |active_group|
+  // is filled in; otherwise, the result is false and |active_group| is left
+  // untouched.
+  bool GetActiveGroup(ActiveGroup* active_group) const;
+
+  // Returns the trial name and selected group name for this field trial via
+  // the output parameter |field_trial_state| for all the studies when
+  // |bool include_expired| is true. In case when |bool include_expired| is
+  // false, if the trial has not been disabled true is returned and
+  // |field_trial_state| is filled in; otherwise, the result is false and
+  // |field_trial_state| is left untouched.
+  // This function is deadlock-free if the caller is holding a lock.
+  bool GetStateWhileLocked(State* field_trial_state, bool include_expired);
+
+  // Returns the group_name. A winner need not have been chosen.
+  std::string group_name_internal() const { return group_name_; }
+
+  // The name of the field trial, as can be found via the FieldTrialList.
+  const std::string trial_name_;
+
+  // The maximum sum of all probabilities supplied, which corresponds to 100%.
+  // This is the scaling factor used to adjust supplied probabilities.
+  const Probability divisor_;
+
+  // The name of the default group.
+  const std::string default_group_name_;
+
+  // The randomly selected probability that is used to select a group (or have
+  // the instance not participate).  It is the product of divisor_ and a random
+  // number between [0, 1).
+  Probability random_;
+
+  // Sum of the probabilities of all appended groups.
+  Probability accumulated_group_probability_;
+
+  // The number that will be returned by the next AppendGroup() call.
+  int next_group_number_;
+
+  // The pseudo-randomly assigned group number.
+  // This is kNotFinalized if no group has been assigned.
+  int group_;
+
+  // A textual name for the randomly selected group. Valid after |group()|
+  // has been called.
+  std::string group_name_;
+
+  // When enable_field_trial_ is false, field trial reverts to the 'default'
+  // group.
+  bool enable_field_trial_;
+
+  // When forced_ is true, we return the chosen group from AppendGroup when
+  // appropriate.
+  bool forced_;
+
+  // Specifies whether the group choice has been reported to observers.
+  bool group_reported_;
+
+  // Whether this trial is registered with the global FieldTrialList and thus
+  // should notify it when its group is queried.
+  bool trial_registered_;
+
+  // Reference to related field trial struct and data in shared memory.
+  FieldTrialRef ref_;
+
+  // When benchmarking is enabled, field trials all revert to the 'default'
+  // group.
+  static bool enable_benchmarking_;
+
+  DISALLOW_COPY_AND_ASSIGN(FieldTrial);
+};
+
+//------------------------------------------------------------------------------
+// Class with a list of all active field trials.  A trial is active if it has
+// been registered, which includes evaluating its state based on its probaility.
+// Only one instance of this class exists and outside of testing, will live for
+// the entire life time of the process.
+class BASE_EXPORT FieldTrialList {
+ public:
+  typedef SharedPersistentMemoryAllocator FieldTrialAllocator;
+
+  // Type for function pointer passed to |AllParamsToString| used to escape
+  // special characters from |input|.
+  typedef std::string (*EscapeDataFunc)(const std::string& input);
+
+  // Year that is guaranteed to not be expired when instantiating a field trial
+  // via |FactoryGetFieldTrial()|.  Set to two years from the build date.
+  static int kNoExpirationYear;
+
+  // Observer is notified when a FieldTrial's group is selected.
+  class BASE_EXPORT Observer {
+   public:
+    // Notify observers when FieldTrials's group is selected.
+    virtual void OnFieldTrialGroupFinalized(const std::string& trial_name,
+                                            const std::string& group_name) = 0;
+
+   protected:
+    virtual ~Observer();
+  };
+
+  // This singleton holds the global list of registered FieldTrials.
+  //
+  // To support one-time randomized field trials, specify a non-null
+  // |entropy_provider| which should be a source of uniformly distributed
+  // entropy values. If one time randomization is not desired, pass in null for
+  // |entropy_provider|.
+  explicit FieldTrialList(
+      std::unique_ptr<const FieldTrial::EntropyProvider> entropy_provider);
+
+  // Destructor Release()'s references to all registered FieldTrial instances.
+  ~FieldTrialList();
+
+  // Get a FieldTrial instance from the factory.
+  //
+  // |name| is used to register the instance with the FieldTrialList class,
+  // and can be used to find the trial (only one trial can be present for each
+  // name). |default_group_name| is the name of the default group which will
+  // be chosen if none of the subsequent appended groups get to be chosen.
+  // |default_group_number| can receive the group number of the default group as
+  // AppendGroup returns the number of the subsequence groups. |trial_name| and
+  // |default_group_name| may not be empty but |default_group_number| can be
+  // NULL if the value is not needed.
+  //
+  // Group probabilities that are later supplied must sum to less than or equal
+  // to the |total_probability|. Arguments |year|, |month| and |day_of_month|
+  // specify the expiration time. If the build time is after the expiration time
+  // then the field trial reverts to the 'default' group.
+  //
+  // Use this static method to get a startup-randomized FieldTrial or a
+  // previously created forced FieldTrial.
+  static FieldTrial* FactoryGetFieldTrial(
+      const std::string& trial_name,
+      FieldTrial::Probability total_probability,
+      const std::string& default_group_name,
+      const int year,
+      const int month,
+      const int day_of_month,
+      FieldTrial::RandomizationType randomization_type,
+      int* default_group_number);
+
+  // Same as FactoryGetFieldTrial(), but allows specifying a custom seed to be
+  // used on one-time randomized field trials (instead of a hash of the trial
+  // name, which is used otherwise or if |randomization_seed| has value 0). The
+  // |randomization_seed| value (other than 0) should never be the same for two
+  // trials, else this would result in correlated group assignments.  Note:
+  // Using a custom randomization seed is only supported by the
+  // PermutedEntropyProvider (which is used when UMA is not enabled). If
+  // |override_entropy_provider| is not null, then it will be used for
+  // randomization instead of the provider given when the FieldTrialList was
+  // instantiated.
+  static FieldTrial* FactoryGetFieldTrialWithRandomizationSeed(
+      const std::string& trial_name,
+      FieldTrial::Probability total_probability,
+      const std::string& default_group_name,
+      const int year,
+      const int month,
+      const int day_of_month,
+      FieldTrial::RandomizationType randomization_type,
+      uint32_t randomization_seed,
+      int* default_group_number,
+      const FieldTrial::EntropyProvider* override_entropy_provider);
+
+  // The Find() method can be used to test to see if a named trial was already
+  // registered, or to retrieve a pointer to it from the global map.
+  static FieldTrial* Find(const std::string& trial_name);
+
+  // Returns the group number chosen for the named trial, or
+  // FieldTrial::kNotFinalized if the trial does not exist.
+  static int FindValue(const std::string& trial_name);
+
+  // Returns the group name chosen for the named trial, or the empty string if
+  // the trial does not exist. The first call of this function on a given field
+  // trial will mark it as active, so that its state will be reported with usage
+  // metrics, crashes, etc.
+  static std::string FindFullName(const std::string& trial_name);
+
+  // Returns true if the named trial has been registered.
+  static bool TrialExists(const std::string& trial_name);
+
+  // Returns true if the named trial exists and has been activated.
+  static bool IsTrialActive(const std::string& trial_name);
+
+  // Creates a persistent representation of active FieldTrial instances for
+  // resurrection in another process. This allows randomization to be done in
+  // one process, and secondary processes can be synchronized on the result.
+  // The resulting string contains the name and group name pairs of all
+  // registered FieldTrials for which the group has been chosen and externally
+  // observed (via |group()|) and which have not been disabled, with "/" used
+  // to separate all names and to terminate the string. This string is parsed
+  // by |CreateTrialsFromString()|.
+  static void StatesToString(std::string* output);
+
+  // Creates a persistent representation of all FieldTrial instances for
+  // resurrection in another process. This allows randomization to be done in
+  // one process, and secondary processes can be synchronized on the result.
+  // The resulting string contains the name and group name pairs of all
+  // registered FieldTrials including disabled based on |include_expired|,
+  // with "/" used to separate all names and to terminate the string. All
+  // activated trials have their name prefixed with "*". This string is parsed
+  // by |CreateTrialsFromString()|.
+  static void AllStatesToString(std::string* output, bool include_expired);
+
+  // Creates a persistent representation of all FieldTrial params for
+  // resurrection in another process. The returned string contains the trial
+  // name and group name pairs of all registered FieldTrials including disabled
+  // based on |include_expired| separated by '.'. The pair is followed by ':'
+  // separator and list of param name and values separated by '/'. It also takes
+  // |encode_data_func| function pointer for encodeing special charactors.
+  // This string is parsed by |AssociateParamsFromString()|.
+  static std::string AllParamsToString(bool include_expired,
+                                       EscapeDataFunc encode_data_func);
+
+  // Fills in the supplied vector |active_groups| (which must be empty when
+  // called) with a snapshot of all registered FieldTrials for which the group
+  // has been chosen and externally observed (via |group()|) and which have
+  // not been disabled.
+  static void GetActiveFieldTrialGroups(
+      FieldTrial::ActiveGroups* active_groups);
+
+  // Returns the field trials that are marked active in |trials_string|.
+  static void GetActiveFieldTrialGroupsFromString(
+      const std::string& trials_string,
+      FieldTrial::ActiveGroups* active_groups);
+
+  // Returns the field trials that were active when the process was
+  // created. Either parses the field trial string or the shared memory
+  // holding field trial information.
+  // Must be called only after a call to CreateTrialsFromCommandLine().
+  static void GetInitiallyActiveFieldTrials(
+      const base::CommandLine& command_line,
+      FieldTrial::ActiveGroups* active_groups);
+
+  // Use a state string (re: StatesToString()) to augment the current list of
+  // field trials to include the supplied trials, and using a 100% probability
+  // for each trial, force them to have the same group string. This is commonly
+  // used in a non-browser process, to carry randomly selected state in a
+  // browser process into this non-browser process, but could also be invoked
+  // through a command line argument to the browser process. Created field
+  // trials will be marked "used" for the purposes of active trial reporting
+  // if they are prefixed with |kActivationMarker|. Trial names in
+  // |ignored_trial_names| are ignored when parsing |trials_string|.
+  static bool CreateTrialsFromString(
+      const std::string& trials_string,
+      const std::set<std::string>& ignored_trial_names);
+
+  // Achieves the same thing as CreateTrialsFromString, except wraps the logic
+  // by taking in the trials from the command line, either via shared memory
+  // handle or command line argument. A bit of a misnomer since on POSIX we
+  // simply get the trials from opening |fd_key| if using shared memory. On
+  // Windows, we expect the |cmd_line| switch for |field_trial_handle_switch| to
+  // contain the shared memory handle that contains the field trial allocator.
+  // We need the |field_trial_handle_switch| and |fd_key| arguments to be passed
+  // in since base/ can't depend on content/.
+  static void CreateTrialsFromCommandLine(const base::CommandLine& cmd_line,
+                                          const char* field_trial_handle_switch,
+                                          int fd_key);
+
+  // Creates base::Feature overrides from the command line by first trying to
+  // use shared memory and then falling back to the command line if it fails.
+  static void CreateFeaturesFromCommandLine(
+      const base::CommandLine& command_line,
+      const char* enable_features_switch,
+      const char* disable_features_switch,
+      FeatureList* feature_list);
+
+#if defined(OS_WIN)
+  // On Windows, we need to explicitly pass down any handles to be inherited.
+  // This function adds the shared memory handle to field trial state to the
+  // list of handles to be inherited.
+  static void AppendFieldTrialHandleIfNeeded(
+      base::HandlesToInheritVector* handles);
+#elif defined(OS_FUCHSIA)
+  // TODO(fuchsia): Implement shared-memory configuration (crbug.com/752368).
+#elif defined(OS_POSIX) && !defined(OS_NACL)
+  // On POSIX, we also need to explicitly pass down this file descriptor that
+  // should be shared with the child process. Returns an invalid handle if it
+  // was not initialized properly.
+  static base::SharedMemoryHandle GetFieldTrialHandle();
+#endif
+
+  // Adds a switch to the command line containing the field trial state as a
+  // string (if not using shared memory to share field trial state), or the
+  // shared memory handle + length.
+  // Needs the |field_trial_handle_switch| argument to be passed in since base/
+  // can't depend on content/.
+  static void CopyFieldTrialStateToFlags(const char* field_trial_handle_switch,
+                                         const char* enable_features_switch,
+                                         const char* disable_features_switch,
+                                         base::CommandLine* cmd_line);
+
+  // Create a FieldTrial with the given |name| and using 100% probability for
+  // the FieldTrial, force FieldTrial to have the same group string as
+  // |group_name|. This is commonly used in a non-browser process, to carry
+  // randomly selected state in a browser process into this non-browser process.
+  // It returns NULL if there is a FieldTrial that is already registered with
+  // the same |name| but has different finalized group string (|group_name|).
+  static FieldTrial* CreateFieldTrial(const std::string& name,
+                                      const std::string& group_name);
+
+  // Add an observer to be notified when a field trial is irrevocably committed
+  // to being part of some specific field_group (and hence the group_name is
+  // also finalized for that field_trial). Returns false and does nothing if
+  // there is no FieldTrialList singleton.
+  static bool AddObserver(Observer* observer);
+
+  // Remove an observer.
+  static void RemoveObserver(Observer* observer);
+
+  // Similar to AddObserver(), but the passed observer will be notified
+  // synchronously when a field trial is activated and its group selected. It
+  // will be notified synchronously on the same thread where the activation and
+  // group selection happened. It is the responsibility of the observer to make
+  // sure that this is a safe operation and the operation must be fast, as this
+  // work is done synchronously as part of group() or related APIs. Only a
+  // single such observer is supported, exposed specifically for crash
+  // reporting. Must be called on the main thread before any other threads
+  // have been started.
+  static void SetSynchronousObserver(Observer* observer);
+
+  // Removes the single synchronous observer.
+  static void RemoveSynchronousObserver(Observer* observer);
+
+  // Grabs the lock if necessary and adds the field trial to the allocator. This
+  // should only be called from FinalizeGroupChoice().
+  static void OnGroupFinalized(bool is_locked, FieldTrial* field_trial);
+
+  // Notify all observers that a group has been finalized for |field_trial|.
+  static void NotifyFieldTrialGroupSelection(FieldTrial* field_trial);
+
+  // Return the number of active field trials.
+  static size_t GetFieldTrialCount();
+
+  // Gets the parameters for |field_trial| from shared memory and stores them in
+  // |params|. This is only exposed for use by FieldTrialParamAssociator and
+  // shouldn't be used by anything else.
+  static bool GetParamsFromSharedMemory(
+      FieldTrial* field_trial,
+      std::map<std::string, std::string>* params);
+
+  // Clears all the params in the allocator.
+  static void ClearParamsFromSharedMemoryForTesting();
+
+  // Dumps field trial state to an allocator so that it can be analyzed after a
+  // crash.
+  static void DumpAllFieldTrialsToPersistentAllocator(
+      PersistentMemoryAllocator* allocator);
+
+  // Retrieves field trial state from an allocator so that it can be analyzed
+  // after a crash. The pointers in the returned vector are into the persistent
+  // memory segment and so are only valid as long as the allocator is valid.
+  static std::vector<const FieldTrial::FieldTrialEntry*>
+  GetAllFieldTrialsFromPersistentAllocator(
+      PersistentMemoryAllocator const& allocator);
+
+  // Returns true if a global field trial list is set. Only used for testing.
+  static bool IsGlobalSetForTesting();
+
+ private:
+  // Allow tests to access our innards for testing purposes.
+  FRIEND_TEST_ALL_PREFIXES(FieldTrialListTest, InstantiateAllocator);
+  FRIEND_TEST_ALL_PREFIXES(FieldTrialListTest, AddTrialsToAllocator);
+  FRIEND_TEST_ALL_PREFIXES(FieldTrialListTest,
+                           DoNotAddSimulatedFieldTrialsToAllocator);
+  FRIEND_TEST_ALL_PREFIXES(FieldTrialListTest, AssociateFieldTrialParams);
+  FRIEND_TEST_ALL_PREFIXES(FieldTrialListTest, ClearParamsFromSharedMemory);
+  FRIEND_TEST_ALL_PREFIXES(FieldTrialListTest,
+                           SerializeSharedMemoryHandleMetadata);
+  FRIEND_TEST_ALL_PREFIXES(FieldTrialListTest, CheckReadOnlySharedMemoryHandle);
+
+  // Serialization is used to pass information about the handle to child
+  // processes. It passes a reference to the relevant OS resource, and it passes
+  // a GUID. Serialization and deserialization doesn't actually transport the
+  // underlying OS resource - that must be done by the Process launcher.
+  static std::string SerializeSharedMemoryHandleMetadata(
+      const SharedMemoryHandle& shm);
+#if defined(OS_WIN) || defined(OS_FUCHSIA)
+  static SharedMemoryHandle DeserializeSharedMemoryHandleMetadata(
+      const std::string& switch_value);
+#elif defined(OS_POSIX) && !defined(OS_NACL)
+  static SharedMemoryHandle DeserializeSharedMemoryHandleMetadata(
+      int fd,
+      const std::string& switch_value);
+#endif
+
+#if defined(OS_WIN) || defined(OS_FUCHSIA)
+  // Takes in |handle_switch| from the command line which represents the shared
+  // memory handle for field trials, parses it, and creates the field trials.
+  // Returns true on success, false on failure.
+  // |switch_value| also contains the serialized GUID.
+  static bool CreateTrialsFromSwitchValue(const std::string& switch_value);
+#elif defined(OS_POSIX) && !defined(OS_NACL)
+  // On POSIX systems that use the zygote, we look up the correct fd that backs
+  // the shared memory segment containing the field trials by looking it up via
+  // an fd key in GlobalDescriptors. Returns true on success, false on failure.
+  // |switch_value| also contains the serialized GUID.
+  static bool CreateTrialsFromDescriptor(int fd_key,
+                                         const std::string& switch_value);
+#endif
+
+  // Takes an unmapped SharedMemoryHandle, creates a SharedMemory object from it
+  // and maps it with the correct size.
+  static bool CreateTrialsFromSharedMemoryHandle(SharedMemoryHandle shm_handle);
+
+  // Expects a mapped piece of shared memory |shm| that was created from the
+  // browser process's field_trial_allocator and shared via the command line.
+  // This function recreates the allocator, iterates through all the field
+  // trials in it, and creates them via CreateFieldTrial(). Returns true if
+  // successful and false otherwise.
+  static bool CreateTrialsFromSharedMemory(
+      std::unique_ptr<base::SharedMemory> shm);
+
+  // Instantiate the field trial allocator, add all existing field trials to it,
+  // and duplicates its handle to a read-only handle, which gets stored in
+  // |readonly_allocator_handle|.
+  static void InstantiateFieldTrialAllocatorIfNeeded();
+
+  // Adds the field trial to the allocator. Caller must hold a lock before
+  // calling this.
+  static void AddToAllocatorWhileLocked(PersistentMemoryAllocator* allocator,
+                                        FieldTrial* field_trial);
+
+  // Activate the corresponding field trial entry struct in shared memory.
+  static void ActivateFieldTrialEntryWhileLocked(FieldTrial* field_trial);
+
+  // A map from FieldTrial names to the actual instances.
+  typedef std::map<std::string, FieldTrial*> RegistrationMap;
+
+  // If one-time randomization is enabled, returns a weak pointer to the
+  // corresponding EntropyProvider. Otherwise, returns NULL.
+  static const FieldTrial::EntropyProvider*
+      GetEntropyProviderForOneTimeRandomization();
+
+  // Helper function should be called only while holding lock_.
+  FieldTrial* PreLockedFind(const std::string& name);
+
+  // Register() stores a pointer to the given trial in a global map.
+  // This method also AddRef's the indicated trial.
+  // This should always be called after creating a new FieldTrial instance.
+  static void Register(FieldTrial* trial);
+
+  // Returns all the registered trials.
+  static RegistrationMap GetRegisteredTrials();
+
+  static FieldTrialList* global_;  // The singleton of this class.
+
+  // This will tell us if there is an attempt to register a field
+  // trial or check if one-time randomization is enabled without
+  // creating the FieldTrialList. This is not an error, unless a
+  // FieldTrialList is created after that.
+  static bool used_without_global_;
+
+  // Lock for access to registered_ and field_trial_allocator_.
+  Lock lock_;
+  RegistrationMap registered_;
+
+  std::map<std::string, std::string> seen_states_;
+
+  // Entropy provider to be used for one-time randomized field trials. If NULL,
+  // one-time randomization is not supported.
+  std::unique_ptr<const FieldTrial::EntropyProvider> entropy_provider_;
+
+  // List of observers to be notified when a group is selected for a FieldTrial.
+  scoped_refptr<ObserverListThreadSafe<Observer> > observer_list_;
+
+  // Single synchronous observer to be notified when a trial group is chosen.
+  Observer* synchronous_observer_ = nullptr;
+
+  // Allocator in shared memory containing field trial data. Used in both
+  // browser and child processes, but readonly in the child.
+  // In the future, we may want to move this to a more generic place if we want
+  // to start passing more data other than field trials.
+  std::unique_ptr<FieldTrialAllocator> field_trial_allocator_ = nullptr;
+
+  // Readonly copy of the handle to the allocator. Needs to be a member variable
+  // because it's needed from both CopyFieldTrialStateToFlags() and
+  // AppendFieldTrialHandleIfNeeded().
+  base::SharedMemoryHandle readonly_allocator_handle_;
+
+  // Tracks whether CreateTrialsFromCommandLine() has been called.
+  bool create_trials_from_command_line_called_ = false;
+
+  DISALLOW_COPY_AND_ASSIGN(FieldTrialList);
+};
+
+}  // namespace base
+
+#endif  // BASE_METRICS_FIELD_TRIAL_H_
diff --git a/base/metrics/field_trial_param_associator.cc b/base/metrics/field_trial_param_associator.cc
new file mode 100644
index 0000000..af76eaf
--- /dev/null
+++ b/base/metrics/field_trial_param_associator.cc
@@ -0,0 +1,88 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/metrics/field_trial_param_associator.h"
+
+#include "base/metrics/field_trial.h"
+
+namespace base {
+
+FieldTrialParamAssociator::FieldTrialParamAssociator() = default;
+FieldTrialParamAssociator::~FieldTrialParamAssociator() = default;
+
+// static
+FieldTrialParamAssociator* FieldTrialParamAssociator::GetInstance() {
+  return Singleton<FieldTrialParamAssociator,
+                   LeakySingletonTraits<FieldTrialParamAssociator>>::get();
+}
+
+bool FieldTrialParamAssociator::AssociateFieldTrialParams(
+    const std::string& trial_name,
+    const std::string& group_name,
+    const FieldTrialParams& params) {
+  if (FieldTrialList::IsTrialActive(trial_name))
+    return false;
+
+  AutoLock scoped_lock(lock_);
+  const FieldTrialKey key(trial_name, group_name);
+  if (ContainsKey(field_trial_params_, key))
+    return false;
+
+  field_trial_params_[key] = params;
+  return true;
+}
+
+bool FieldTrialParamAssociator::GetFieldTrialParams(
+    const std::string& trial_name,
+    FieldTrialParams* params) {
+  FieldTrial* field_trial = FieldTrialList::Find(trial_name);
+  if (!field_trial)
+    return false;
+
+  // First try the local map, falling back to getting it from shared memory.
+  if (GetFieldTrialParamsWithoutFallback(trial_name, field_trial->group_name(),
+                                         params)) {
+    return true;
+  }
+
+  // TODO(lawrencewu): add the params to field_trial_params_ for next time.
+  return FieldTrialList::GetParamsFromSharedMemory(field_trial, params);
+}
+
+bool FieldTrialParamAssociator::GetFieldTrialParamsWithoutFallback(
+    const std::string& trial_name,
+    const std::string& group_name,
+    FieldTrialParams* params) {
+  AutoLock scoped_lock(lock_);
+
+  const FieldTrialKey key(trial_name, group_name);
+  if (!ContainsKey(field_trial_params_, key))
+    return false;
+
+  *params = field_trial_params_[key];
+  return true;
+}
+
+void FieldTrialParamAssociator::ClearAllParamsForTesting() {
+  {
+    AutoLock scoped_lock(lock_);
+    field_trial_params_.clear();
+  }
+  FieldTrialList::ClearParamsFromSharedMemoryForTesting();
+}
+
+void FieldTrialParamAssociator::ClearParamsForTesting(
+    const std::string& trial_name,
+    const std::string& group_name) {
+  AutoLock scoped_lock(lock_);
+  const FieldTrialKey key(trial_name, group_name);
+  field_trial_params_.erase(key);
+}
+
+void FieldTrialParamAssociator::ClearAllCachedParamsForTesting() {
+  AutoLock scoped_lock(lock_);
+  field_trial_params_.clear();
+}
+
+}  // namespace base
diff --git a/base/metrics/field_trial_param_associator.h b/base/metrics/field_trial_param_associator.h
new file mode 100644
index 0000000..b35e2cc
--- /dev/null
+++ b/base/metrics/field_trial_param_associator.h
@@ -0,0 +1,76 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_METRICS_FIELD_TRIAL_PARAM_ASSOCIATOR_H_
+#define BASE_METRICS_FIELD_TRIAL_PARAM_ASSOCIATOR_H_
+
+#include <map>
+#include <string>
+#include <utility>
+
+#include "base/base_export.h"
+#include "base/memory/singleton.h"
+#include "base/metrics/field_trial.h"
+#include "base/synchronization/lock.h"
+
+namespace base {
+
+// Keeps track of the parameters of all field trials and ensures access to them
+// is thread-safe.
+class BASE_EXPORT FieldTrialParamAssociator {
+ public:
+  FieldTrialParamAssociator();
+  ~FieldTrialParamAssociator();
+
+  // Key-value mapping type for field trial parameters.
+  typedef std::map<std::string, std::string> FieldTrialParams;
+
+  // Retrieve the singleton.
+  static FieldTrialParamAssociator* GetInstance();
+
+  // Sets parameters for the given field trial name and group.
+  bool AssociateFieldTrialParams(const std::string& trial_name,
+                                 const std::string& group_name,
+                                 const FieldTrialParams& params);
+
+  // Gets the parameters for a field trial and its chosen group. If not found in
+  // field_trial_params_, then tries to looks it up in shared memory.
+  bool GetFieldTrialParams(const std::string& trial_name,
+                           FieldTrialParams* params);
+
+  // Gets the parameters for a field trial and its chosen group. Does not
+  // fallback to looking it up in shared memory. This should only be used if you
+  // know for sure the params are in the mapping, like if you're in the browser
+  // process, and even then you should probably just use GetFieldTrialParams().
+  bool GetFieldTrialParamsWithoutFallback(const std::string& trial_name,
+                                          const std::string& group_name,
+                                          FieldTrialParams* params);
+
+  // Clears the internal field_trial_params_ mapping, plus removes all params in
+  // shared memory.
+  void ClearAllParamsForTesting();
+
+  // Clears a single field trial param.
+  // Note: this does NOT remove the param in shared memory.
+  void ClearParamsForTesting(const std::string& trial_name,
+                             const std::string& group_name);
+
+  // Clears the internal field_trial_params_ mapping.
+  void ClearAllCachedParamsForTesting();
+
+ private:
+  friend struct DefaultSingletonTraits<FieldTrialParamAssociator>;
+
+  // (field_trial_name, field_trial_group)
+  typedef std::pair<std::string, std::string> FieldTrialKey;
+
+  Lock lock_;
+  std::map<FieldTrialKey, FieldTrialParams> field_trial_params_;
+
+  DISALLOW_COPY_AND_ASSIGN(FieldTrialParamAssociator);
+};
+
+}  // namespace base
+
+#endif  // BASE_METRICS_FIELD_TRIAL_PARAM_ASSOCIATOR_H_
diff --git a/base/metrics/field_trial_params.cc b/base/metrics/field_trial_params.cc
new file mode 100644
index 0000000..7195f4a
--- /dev/null
+++ b/base/metrics/field_trial_params.cc
@@ -0,0 +1,149 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/metrics/field_trial_params.h"
+
+#include "base/feature_list.h"
+#include "base/metrics/field_trial.h"
+#include "base/metrics/field_trial_param_associator.h"
+#include "base/strings/string_number_conversions.h"
+
+namespace base {
+
+bool AssociateFieldTrialParams(
+    const std::string& trial_name,
+    const std::string& group_name,
+    const std::map<std::string, std::string>& params) {
+  return base::FieldTrialParamAssociator::GetInstance()
+      ->AssociateFieldTrialParams(trial_name, group_name, params);
+}
+
+bool GetFieldTrialParams(const std::string& trial_name,
+                         std::map<std::string, std::string>* params) {
+  return base::FieldTrialParamAssociator::GetInstance()->GetFieldTrialParams(
+      trial_name, params);
+}
+
+bool GetFieldTrialParamsByFeature(const base::Feature& feature,
+                                  std::map<std::string, std::string>* params) {
+  if (!base::FeatureList::IsEnabled(feature))
+    return false;
+
+  base::FieldTrial* trial = base::FeatureList::GetFieldTrial(feature);
+  if (!trial)
+    return false;
+
+  return GetFieldTrialParams(trial->trial_name(), params);
+}
+
+std::string GetFieldTrialParamValue(const std::string& trial_name,
+                                    const std::string& param_name) {
+  std::map<std::string, std::string> params;
+  if (GetFieldTrialParams(trial_name, &params)) {
+    std::map<std::string, std::string>::iterator it = params.find(param_name);
+    if (it != params.end())
+      return it->second;
+  }
+  return std::string();
+}
+
+std::string GetFieldTrialParamValueByFeature(const base::Feature& feature,
+                                             const std::string& param_name) {
+  if (!base::FeatureList::IsEnabled(feature))
+    return std::string();
+
+  base::FieldTrial* trial = base::FeatureList::GetFieldTrial(feature);
+  if (!trial)
+    return std::string();
+
+  return GetFieldTrialParamValue(trial->trial_name(), param_name);
+}
+
+int GetFieldTrialParamByFeatureAsInt(const base::Feature& feature,
+                                     const std::string& param_name,
+                                     int default_value) {
+  std::string value_as_string =
+      GetFieldTrialParamValueByFeature(feature, param_name);
+  int value_as_int = 0;
+  if (!base::StringToInt(value_as_string, &value_as_int)) {
+    if (!value_as_string.empty()) {
+      DLOG(WARNING) << "Failed to parse field trial param " << param_name
+                    << " with string value " << value_as_string
+                    << " under feature " << feature.name
+                    << " into an int. Falling back to default value of "
+                    << default_value;
+    }
+    value_as_int = default_value;
+  }
+  return value_as_int;
+}
+
+double GetFieldTrialParamByFeatureAsDouble(const base::Feature& feature,
+                                           const std::string& param_name,
+                                           double default_value) {
+  std::string value_as_string =
+      GetFieldTrialParamValueByFeature(feature, param_name);
+  double value_as_double = 0;
+  if (!base::StringToDouble(value_as_string, &value_as_double)) {
+    if (!value_as_string.empty()) {
+      DLOG(WARNING) << "Failed to parse field trial param " << param_name
+                    << " with string value " << value_as_string
+                    << " under feature " << feature.name
+                    << " into a double. Falling back to default value of "
+                    << default_value;
+    }
+    value_as_double = default_value;
+  }
+  return value_as_double;
+}
+
+bool GetFieldTrialParamByFeatureAsBool(const base::Feature& feature,
+                                       const std::string& param_name,
+                                       bool default_value) {
+  std::string value_as_string =
+      GetFieldTrialParamValueByFeature(feature, param_name);
+  if (value_as_string == "true")
+    return true;
+  if (value_as_string == "false")
+    return false;
+
+  if (!value_as_string.empty()) {
+    DLOG(WARNING) << "Failed to parse field trial param " << param_name
+                  << " with string value " << value_as_string
+                  << " under feature " << feature.name
+                  << " into a bool. Falling back to default value of "
+                  << default_value;
+  }
+  return default_value;
+}
+
+std::string FeatureParam<std::string>::Get() const {
+  const std::string value = GetFieldTrialParamValueByFeature(*feature, name);
+  return value.empty() ? default_value : value;
+}
+
+double FeatureParam<double>::Get() const {
+  return GetFieldTrialParamByFeatureAsDouble(*feature, name, default_value);
+}
+
+int FeatureParam<int>::Get() const {
+  return GetFieldTrialParamByFeatureAsInt(*feature, name, default_value);
+}
+
+bool FeatureParam<bool>::Get() const {
+  return GetFieldTrialParamByFeatureAsBool(*feature, name, default_value);
+}
+
+void LogInvalidEnumValue(const base::Feature& feature,
+                         const std::string& param_name,
+                         const std::string& value_as_string,
+                         int default_value_as_int) {
+  DLOG(WARNING) << "Failed to parse field trial param " << param_name
+                << " with string value " << value_as_string << " under feature "
+                << feature.name
+                << " into an enum. Falling back to default value of "
+                << default_value_as_int;
+}
+
+}  // namespace base
diff --git a/base/metrics/field_trial_params.h b/base/metrics/field_trial_params.h
new file mode 100644
index 0000000..8682226
--- /dev/null
+++ b/base/metrics/field_trial_params.h
@@ -0,0 +1,258 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_METRICS_FIELD_TRIAL_PARAMS_H_
+#define BASE_METRICS_FIELD_TRIAL_PARAMS_H_
+
+#include <map>
+#include <string>
+
+#include "base/base_export.h"
+
+namespace base {
+
+struct Feature;
+
+// Associates the specified set of key-value |params| with the field trial
+// specified by |trial_name| and |group_name|. Fails and returns false if the
+// specified field trial already has params associated with it or the trial
+// is already active (group() has been called on it). Thread safe.
+BASE_EXPORT bool AssociateFieldTrialParams(
+    const std::string& trial_name,
+    const std::string& group_name,
+    const std::map<std::string, std::string>& params);
+
+// Retrieves the set of key-value |params| for the specified field trial, based
+// on its selected group. If the field trial does not exist or its selected
+// group does not have any parameters associated with it, returns false and
+// does not modify |params|. Calling this function will result in the field
+// trial being marked as active if found (i.e. group() will be called on it),
+// if it wasn't already. Thread safe.
+BASE_EXPORT bool GetFieldTrialParams(
+    const std::string& trial_name,
+    std::map<std::string, std::string>* params);
+
+// Retrieves the set of key-value |params| for the field trial associated with
+// the specified |feature|. A feature is associated with at most one field
+// trial and selected group. See  base/feature_list.h for more information on
+// features. If the feature is not enabled, or if there's no associated params,
+// returns false and does not modify |params|. Calling this function will
+// result in the associated field trial being marked as active if found (i.e.
+// group() will be called on it), if it wasn't already. Thread safe.
+BASE_EXPORT bool GetFieldTrialParamsByFeature(
+    const base::Feature& feature,
+    std::map<std::string, std::string>* params);
+
+// Retrieves a specific parameter value corresponding to |param_name| for the
+// specified field trial, based on its selected group. If the field trial does
+// not exist or the specified parameter does not exist, returns an empty
+// string. Calling this function will result in the field trial being marked as
+// active if found (i.e. group() will be called on it), if it wasn't already.
+// Thread safe.
+BASE_EXPORT std::string GetFieldTrialParamValue(const std::string& trial_name,
+                                                const std::string& param_name);
+
+// Retrieves a specific parameter value corresponding to |param_name| for the
+// field trial associated with the specified |feature|. A feature is associated
+// with at most one field trial and selected group. See base/feature_list.h for
+// more information on features. If the feature is not enabled, or the
+// specified parameter does not exist, returns an empty string. Calling this
+// function will result in the associated field trial being marked as active if
+// found (i.e. group() will be called on it), if it wasn't already. Thread safe.
+BASE_EXPORT std::string GetFieldTrialParamValueByFeature(
+    const base::Feature& feature,
+    const std::string& param_name);
+
+// Same as GetFieldTrialParamValueByFeature(). On top of that, it converts the
+// string value into an int using base::StringToInt() and returns it, if
+// successful. Otherwise, it returns |default_value|. If the string value is not
+// empty and the conversion does not succeed, it produces a warning to LOG.
+BASE_EXPORT int GetFieldTrialParamByFeatureAsInt(const base::Feature& feature,
+                                                 const std::string& param_name,
+                                                 int default_value);
+
+// Same as GetFieldTrialParamValueByFeature(). On top of that, it converts the
+// string value into a double using base::StringToDouble() and returns it, if
+// successful. Otherwise, it returns |default_value|. If the string value is not
+// empty and the conversion does not succeed, it produces a warning to LOG.
+BASE_EXPORT double GetFieldTrialParamByFeatureAsDouble(
+    const base::Feature& feature,
+    const std::string& param_name,
+    double default_value);
+
+// Same as GetFieldTrialParamValueByFeature(). On top of that, it converts the
+// string value into a boolean and returns it, if successful. Otherwise, it
+// returns |default_value|. The only string representations accepted here are
+// "true" and "false". If the string value is not empty and the conversion does
+// not succeed, it produces a warning to LOG.
+BASE_EXPORT bool GetFieldTrialParamByFeatureAsBool(
+    const base::Feature& feature,
+    const std::string& param_name,
+    bool default_value);
+
+// Shared declaration for various FeatureParam<T> types.
+//
+// This template is defined for the following types T:
+//   bool
+//   int
+//   double
+//   std::string
+//   enum types
+//
+// See the individual definitions below for the appropriate interfaces.
+// Attempting to use it with any other type is a compile error.
+template <typename T, bool IsEnum = std::is_enum<T>::value>
+struct FeatureParam {
+  // Prevent use of FeatureParam<> with unsupported types (e.g. void*). Uses T
+  // in its definition so that evaluation is deferred until the template is
+  // instantiated.
+  static_assert(!std::is_same<T, T>::value, "unsupported FeatureParam<> type");
+};
+
+// Declares a string-valued parameter. Example:
+//
+//     constexpr FeatureParam<string> kAssistantName{
+//         &kAssistantFeature, "assistant_name", "HAL"};
+//
+// If the feature is not set, or set to the empty string, then Get() will return
+// the default value.
+template <>
+struct FeatureParam<std::string> {
+  constexpr FeatureParam(const Feature* feature,
+                         const char* name,
+                         const char* default_value)
+      : feature(feature), name(name), default_value(default_value) {}
+
+  BASE_EXPORT std::string Get() const;
+
+  const Feature* const feature;
+  const char* const name;
+  const char* const default_value;
+};
+
+// Declares a double-valued parameter. Example:
+//
+//     constexpr FeatureParam<double> kAssistantTriggerThreshold{
+//         &kAssistantFeature, "trigger_threshold", 0.10};
+//
+// If the feature is not set, or set to an invalid double value, then Get() will
+// return the default value.
+template <>
+struct FeatureParam<double> {
+  constexpr FeatureParam(const Feature* feature,
+                         const char* name,
+                         double default_value)
+      : feature(feature), name(name), default_value(default_value) {}
+
+  BASE_EXPORT double Get() const;
+
+  const Feature* const feature;
+  const char* const name;
+  const double default_value;
+};
+
+// Declares an int-valued parameter. Example:
+//
+//     constexpr FeatureParam<int> kAssistantParallelism{
+//         &kAssistantFeature, "parallelism", 4};
+//
+// If the feature is not set, or set to an invalid int value, then Get() will
+// return the default value.
+template <>
+struct FeatureParam<int> {
+  constexpr FeatureParam(const Feature* feature,
+                         const char* name,
+                         int default_value)
+      : feature(feature), name(name), default_value(default_value) {}
+
+  BASE_EXPORT int Get() const;
+
+  const Feature* const feature;
+  const char* const name;
+  const int default_value;
+};
+
+// Declares a bool-valued parameter. Example:
+//
+//     constexpr FeatureParam<int> kAssistantIsHelpful{
+//         &kAssistantFeature, "is_helpful", true};
+//
+// If the feature is not set, or set to value other than "true" or "false", then
+// Get() will return the default value.
+template <>
+struct FeatureParam<bool> {
+  constexpr FeatureParam(const Feature* feature,
+                         const char* name,
+                         bool default_value)
+      : feature(feature), name(name), default_value(default_value) {}
+
+  BASE_EXPORT bool Get() const;
+
+  const Feature* const feature;
+  const char* const name;
+  const bool default_value;
+};
+
+BASE_EXPORT void LogInvalidEnumValue(const Feature& feature,
+                                     const std::string& param_name,
+                                     const std::string& value_as_string,
+                                     int default_value_as_int);
+
+// Feature param declaration for an enum, with associated options. Example:
+//
+//     constexpr FeatureParam<ShapeEnum>::Option[] kShapeParamOptions[] = {
+//         {SHAPE_CIRCLE, "circle"},
+//         {SHAPE_CYLINDER, "cylinder"},
+//         {SHAPE_PAPERCLIP, "paperclip"}};
+//     constexpr FeatureParam<ShapeEnum> kAssistantShapeParam{
+//         &kAssistantFeature, "shape", SHAPE_CIRCLE, &kShapeParamOptions};
+//
+// With this declaration, the parameter may be set to "circle", "cylinder", or
+// "paperclip", and that will be translated to one of the three enum values. By
+// default, or if the param is set to an unknown value, the parameter will be
+// assumed to be SHAPE_CIRCLE.
+template <typename Enum>
+struct FeatureParam<Enum, true> {
+  struct Option {
+    constexpr Option(Enum value, const char* name) : value(value), name(name) {}
+
+    const Enum value;
+    const char* const name;
+  };
+
+  template <size_t option_count>
+  constexpr FeatureParam(const Feature* feature,
+                         const char* name,
+                         const Enum default_value,
+                         const Option (*options)[option_count])
+      : feature(feature),
+        name(name),
+        default_value(default_value),
+        options(*options),
+        option_count(option_count) {
+    static_assert(option_count >= 1, "FeatureParam<enum> has no options");
+  }
+
+  Enum Get() const {
+    std::string value = GetFieldTrialParamValueByFeature(*feature, name);
+    if (value.empty())
+      return default_value;
+    for (size_t i = 0; i < option_count; ++i) {
+      if (value == options[i].name)
+        return options[i].value;
+    }
+    LogInvalidEnumValue(*feature, name, value, static_cast<int>(default_value));
+    return default_value;
+  }
+
+  const base::Feature* const feature;
+  const char* const name;
+  const Enum default_value;
+  const Option* const options;
+  const size_t option_count;
+};
+
+}  // namespace base
+
+#endif  // BASE_METRICS_FIELD_TRIAL_PARAMS_H_
diff --git a/base/metrics/field_trial_params_unittest.cc b/base/metrics/field_trial_params_unittest.cc
new file mode 100644
index 0000000..d310c0d
--- /dev/null
+++ b/base/metrics/field_trial_params_unittest.cc
@@ -0,0 +1,458 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/metrics/field_trial_params.h"
+
+#include "base/feature_list.h"
+#include "base/macros.h"
+#include "base/metrics/field_trial.h"
+#include "base/metrics/field_trial_param_associator.h"
+#include "base/test/scoped_feature_list.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+
+namespace {
+
+// Call FieldTrialList::FactoryGetFieldTrial() with a future expiry date.
+scoped_refptr<FieldTrial> CreateFieldTrial(
+    const std::string& trial_name,
+    int total_probability,
+    const std::string& default_group_name,
+    int* default_group_number) {
+  return FieldTrialList::FactoryGetFieldTrial(
+      trial_name, total_probability, default_group_name,
+      FieldTrialList::kNoExpirationYear, 1, 1, FieldTrial::SESSION_RANDOMIZED,
+      default_group_number);
+}
+
+}  // namespace
+
+class FieldTrialParamsTest : public ::testing::Test {
+ public:
+  FieldTrialParamsTest() : field_trial_list_(nullptr) {}
+
+  ~FieldTrialParamsTest() override {
+    // Ensure that the maps are cleared between tests, since they are stored as
+    // process singletons.
+    FieldTrialParamAssociator::GetInstance()->ClearAllParamsForTesting();
+  }
+
+  void CreateFeatureWithTrial(const Feature& feature,
+                              FeatureList::OverrideState override_state,
+                              FieldTrial* trial) {
+    std::unique_ptr<FeatureList> feature_list(new FeatureList);
+    feature_list->RegisterFieldTrialOverride(feature.name, override_state,
+                                             trial);
+    scoped_feature_list_.InitWithFeatureList(std::move(feature_list));
+  }
+
+ private:
+  FieldTrialList field_trial_list_;
+  test::ScopedFeatureList scoped_feature_list_;
+
+  DISALLOW_COPY_AND_ASSIGN(FieldTrialParamsTest);
+};
+
+TEST_F(FieldTrialParamsTest, AssociateFieldTrialParams) {
+  const std::string kTrialName = "AssociateFieldTrialParams";
+
+  {
+    std::map<std::string, std::string> params;
+    params["a"] = "10";
+    params["b"] = "test";
+    ASSERT_TRUE(AssociateFieldTrialParams(kTrialName, "A", params));
+  }
+  {
+    std::map<std::string, std::string> params;
+    params["a"] = "5";
+    ASSERT_TRUE(AssociateFieldTrialParams(kTrialName, "B", params));
+  }
+
+  FieldTrialList::CreateFieldTrial(kTrialName, "B");
+  EXPECT_EQ("5", GetFieldTrialParamValue(kTrialName, "a"));
+  EXPECT_EQ(std::string(), GetFieldTrialParamValue(kTrialName, "b"));
+  EXPECT_EQ(std::string(), GetFieldTrialParamValue(kTrialName, "x"));
+
+  std::map<std::string, std::string> params;
+  EXPECT_TRUE(GetFieldTrialParams(kTrialName, &params));
+  EXPECT_EQ(1U, params.size());
+  EXPECT_EQ("5", params["a"]);
+}
+
+TEST_F(FieldTrialParamsTest, AssociateFieldTrialParams_Fail) {
+  const std::string kTrialName = "AssociateFieldTrialParams_Fail";
+  const std::string kGroupName = "A";
+
+  std::map<std::string, std::string> params;
+  params["a"] = "10";
+  ASSERT_TRUE(AssociateFieldTrialParams(kTrialName, kGroupName, params));
+  params["a"] = "1";
+  params["b"] = "2";
+  ASSERT_FALSE(AssociateFieldTrialParams(kTrialName, kGroupName, params));
+
+  FieldTrialList::CreateFieldTrial(kTrialName, kGroupName);
+  EXPECT_EQ("10", GetFieldTrialParamValue(kTrialName, "a"));
+  EXPECT_EQ(std::string(), GetFieldTrialParamValue(kTrialName, "b"));
+}
+
+TEST_F(FieldTrialParamsTest, AssociateFieldTrialParams_TrialActiveFail) {
+  const std::string kTrialName = "AssociateFieldTrialParams_TrialActiveFail";
+  FieldTrialList::CreateFieldTrial(kTrialName, "A");
+  ASSERT_EQ("A", FieldTrialList::FindFullName(kTrialName));
+
+  std::map<std::string, std::string> params;
+  params["a"] = "10";
+  EXPECT_FALSE(AssociateFieldTrialParams(kTrialName, "B", params));
+  EXPECT_FALSE(AssociateFieldTrialParams(kTrialName, "A", params));
+}
+
+TEST_F(FieldTrialParamsTest, AssociateFieldTrialParams_DoesntActivateTrial) {
+  const std::string kTrialName =
+      "AssociateFieldTrialParams_DoesntActivateTrial";
+
+  ASSERT_FALSE(FieldTrialList::IsTrialActive(kTrialName));
+  scoped_refptr<FieldTrial> trial(
+      CreateFieldTrial(kTrialName, 100, "A", nullptr));
+  ASSERT_FALSE(FieldTrialList::IsTrialActive(kTrialName));
+
+  std::map<std::string, std::string> params;
+  params["a"] = "10";
+  EXPECT_TRUE(AssociateFieldTrialParams(kTrialName, "A", params));
+  ASSERT_FALSE(FieldTrialList::IsTrialActive(kTrialName));
+}
+
+TEST_F(FieldTrialParamsTest, GetFieldTrialParams_NoTrial) {
+  const std::string kTrialName = "GetFieldTrialParams_NoParams";
+
+  std::map<std::string, std::string> params;
+  EXPECT_FALSE(GetFieldTrialParams(kTrialName, &params));
+  EXPECT_EQ(std::string(), GetFieldTrialParamValue(kTrialName, "x"));
+  EXPECT_EQ(std::string(), GetFieldTrialParamValue(kTrialName, "y"));
+}
+
+TEST_F(FieldTrialParamsTest, GetFieldTrialParams_NoParams) {
+  const std::string kTrialName = "GetFieldTrialParams_NoParams";
+
+  FieldTrialList::CreateFieldTrial(kTrialName, "A");
+
+  std::map<std::string, std::string> params;
+  EXPECT_FALSE(GetFieldTrialParams(kTrialName, &params));
+  EXPECT_EQ(std::string(), GetFieldTrialParamValue(kTrialName, "x"));
+  EXPECT_EQ(std::string(), GetFieldTrialParamValue(kTrialName, "y"));
+}
+
+TEST_F(FieldTrialParamsTest, GetFieldTrialParams_ActivatesTrial) {
+  const std::string kTrialName = "GetFieldTrialParams_ActivatesTrial";
+
+  ASSERT_FALSE(FieldTrialList::IsTrialActive(kTrialName));
+  scoped_refptr<FieldTrial> trial(
+      CreateFieldTrial(kTrialName, 100, "A", nullptr));
+  ASSERT_FALSE(FieldTrialList::IsTrialActive(kTrialName));
+
+  std::map<std::string, std::string> params;
+  EXPECT_FALSE(GetFieldTrialParams(kTrialName, &params));
+  ASSERT_TRUE(FieldTrialList::IsTrialActive(kTrialName));
+}
+
+TEST_F(FieldTrialParamsTest, GetFieldTrialParamValue_ActivatesTrial) {
+  const std::string kTrialName = "GetFieldTrialParamValue_ActivatesTrial";
+
+  ASSERT_FALSE(FieldTrialList::IsTrialActive(kTrialName));
+  scoped_refptr<FieldTrial> trial(
+      CreateFieldTrial(kTrialName, 100, "A", nullptr));
+  ASSERT_FALSE(FieldTrialList::IsTrialActive(kTrialName));
+
+  std::map<std::string, std::string> params;
+  EXPECT_EQ(std::string(), GetFieldTrialParamValue(kTrialName, "x"));
+  ASSERT_TRUE(FieldTrialList::IsTrialActive(kTrialName));
+}
+
+TEST_F(FieldTrialParamsTest, GetFieldTrialParamsByFeature) {
+  const std::string kTrialName = "GetFieldTrialParamsByFeature";
+  const Feature kFeature{"TestFeature", FEATURE_DISABLED_BY_DEFAULT};
+
+  std::map<std::string, std::string> params;
+  params["x"] = "1";
+  AssociateFieldTrialParams(kTrialName, "A", params);
+  scoped_refptr<FieldTrial> trial(
+      CreateFieldTrial(kTrialName, 100, "A", nullptr));
+
+  CreateFeatureWithTrial(kFeature, FeatureList::OVERRIDE_ENABLE_FEATURE,
+                         trial.get());
+
+  std::map<std::string, std::string> actualParams;
+  EXPECT_TRUE(GetFieldTrialParamsByFeature(kFeature, &actualParams));
+  EXPECT_EQ(params, actualParams);
+}
+
+TEST_F(FieldTrialParamsTest, GetFieldTrialParamValueByFeature) {
+  const std::string kTrialName = "GetFieldTrialParamsByFeature";
+  const Feature kFeature{"TestFeature", FEATURE_DISABLED_BY_DEFAULT};
+
+  std::map<std::string, std::string> params;
+  params["x"] = "1";
+  AssociateFieldTrialParams(kTrialName, "A", params);
+  scoped_refptr<FieldTrial> trial(
+      CreateFieldTrial(kTrialName, 100, "A", nullptr));
+
+  CreateFeatureWithTrial(kFeature, FeatureList::OVERRIDE_ENABLE_FEATURE,
+                         trial.get());
+
+  std::map<std::string, std::string> actualParams;
+  EXPECT_EQ(params["x"], GetFieldTrialParamValueByFeature(kFeature, "x"));
+}
+
+TEST_F(FieldTrialParamsTest, GetFieldTrialParamsByFeature_Disable) {
+  const std::string kTrialName = "GetFieldTrialParamsByFeature";
+  const Feature kFeature{"TestFeature", FEATURE_DISABLED_BY_DEFAULT};
+
+  std::map<std::string, std::string> params;
+  params["x"] = "1";
+  AssociateFieldTrialParams(kTrialName, "A", params);
+  scoped_refptr<FieldTrial> trial(
+      CreateFieldTrial(kTrialName, 100, "A", nullptr));
+
+  CreateFeatureWithTrial(kFeature, FeatureList::OVERRIDE_DISABLE_FEATURE,
+                         trial.get());
+
+  std::map<std::string, std::string> actualParams;
+  EXPECT_FALSE(GetFieldTrialParamsByFeature(kFeature, &actualParams));
+}
+
+TEST_F(FieldTrialParamsTest, GetFieldTrialParamValueByFeature_Disable) {
+  const std::string kTrialName = "GetFieldTrialParamsByFeature";
+  const Feature kFeature{"TestFeature", FEATURE_DISABLED_BY_DEFAULT};
+
+  std::map<std::string, std::string> params;
+  params["x"] = "1";
+  AssociateFieldTrialParams(kTrialName, "A", params);
+  scoped_refptr<FieldTrial> trial(
+      CreateFieldTrial(kTrialName, 100, "A", nullptr));
+
+  CreateFeatureWithTrial(kFeature, FeatureList::OVERRIDE_DISABLE_FEATURE,
+                         trial.get());
+
+  std::map<std::string, std::string> actualParams;
+  EXPECT_EQ(std::string(), GetFieldTrialParamValueByFeature(kFeature, "x"));
+}
+
+TEST_F(FieldTrialParamsTest, FeatureParamString) {
+  const std::string kTrialName = "GetFieldTrialParamsByFeature";
+
+  static const Feature kFeature{"TestFeature", FEATURE_DISABLED_BY_DEFAULT};
+  static const FeatureParam<std::string> a{&kFeature, "a", "default"};
+  static const FeatureParam<std::string> b{&kFeature, "b", ""};
+  static const FeatureParam<std::string> c{&kFeature, "c", "default"};
+  static const FeatureParam<std::string> d{&kFeature, "d", ""};
+  static const FeatureParam<std::string> e{&kFeature, "e", "default"};
+  static const FeatureParam<std::string> f{&kFeature, "f", ""};
+
+  std::map<std::string, std::string> params;
+  params["a"] = "";
+  params["b"] = "non-default";
+  params["c"] = "non-default";
+  params["d"] = "";
+  // "e" is not registered
+  // "f" is not registered
+  AssociateFieldTrialParams(kTrialName, "A", params);
+  scoped_refptr<FieldTrial> trial(
+      CreateFieldTrial(kTrialName, 100, "A", nullptr));
+
+  CreateFeatureWithTrial(kFeature, FeatureList::OVERRIDE_ENABLE_FEATURE,
+                         trial.get());
+
+  EXPECT_EQ("default", a.Get());  // empty
+  EXPECT_EQ("non-default", b.Get());
+  EXPECT_EQ("non-default", c.Get());
+  EXPECT_EQ("", d.Get());         // empty
+  EXPECT_EQ("default", e.Get());  // not registered
+  EXPECT_EQ("", f.Get());         // not registered
+}
+
+TEST_F(FieldTrialParamsTest, FeatureParamInt) {
+  const std::string kTrialName = "GetFieldTrialParamsByFeature";
+
+  static const Feature kFeature{"TestFeature", FEATURE_DISABLED_BY_DEFAULT};
+  static const FeatureParam<int> a{&kFeature, "a", 0};
+  static const FeatureParam<int> b{&kFeature, "b", 0};
+  static const FeatureParam<int> c{&kFeature, "c", 0};
+  static const FeatureParam<int> d{&kFeature, "d", 0};
+  static const FeatureParam<int> e{&kFeature, "e", 0};
+
+  std::map<std::string, std::string> params;
+  params["a"] = "1";
+  params["b"] = "1.5";
+  params["c"] = "foo";
+  params["d"] = "";
+  // "e" is not registered
+  AssociateFieldTrialParams(kTrialName, "A", params);
+  scoped_refptr<FieldTrial> trial(
+      CreateFieldTrial(kTrialName, 100, "A", nullptr));
+
+  CreateFeatureWithTrial(kFeature, FeatureList::OVERRIDE_ENABLE_FEATURE,
+                         trial.get());
+
+  EXPECT_EQ(1, GetFieldTrialParamByFeatureAsInt(kFeature, "a", 0));
+  EXPECT_EQ(0, GetFieldTrialParamByFeatureAsInt(kFeature, "b", 0));  // invalid
+  EXPECT_EQ(0, GetFieldTrialParamByFeatureAsInt(kFeature, "c", 0));  // invalid
+  EXPECT_EQ(0, GetFieldTrialParamByFeatureAsInt(kFeature, "d", 0));  // empty
+  EXPECT_EQ(0, GetFieldTrialParamByFeatureAsInt(kFeature, "e", 0));  // empty
+
+  EXPECT_EQ(1, a.Get());
+  EXPECT_EQ(0, b.Get());  // invalid
+  EXPECT_EQ(0, c.Get());  // invalid
+  EXPECT_EQ(0, d.Get());  // empty
+  EXPECT_EQ(0, e.Get());  // empty
+}
+
+TEST_F(FieldTrialParamsTest, FeatureParamDouble) {
+  const std::string kTrialName = "GetFieldTrialParamsByFeature";
+
+  static const Feature kFeature{"TestFeature", FEATURE_DISABLED_BY_DEFAULT};
+  static const FeatureParam<double> a{&kFeature, "a", 0.0};
+  static const FeatureParam<double> b{&kFeature, "b", 0.0};
+  static const FeatureParam<double> c{&kFeature, "c", 0.0};
+  static const FeatureParam<double> d{&kFeature, "d", 0.0};
+  static const FeatureParam<double> e{&kFeature, "e", 0.0};
+  static const FeatureParam<double> f{&kFeature, "f", 0.0};
+
+  std::map<std::string, std::string> params;
+  params["a"] = "1";
+  params["b"] = "1.5";
+  params["c"] = "1.0e-10";
+  params["d"] = "foo";
+  params["e"] = "";
+  // "f" is not registered
+  AssociateFieldTrialParams(kTrialName, "A", params);
+  scoped_refptr<FieldTrial> trial(
+      CreateFieldTrial(kTrialName, 100, "A", nullptr));
+
+  CreateFeatureWithTrial(kFeature, FeatureList::OVERRIDE_ENABLE_FEATURE,
+                         trial.get());
+
+  EXPECT_EQ(1, GetFieldTrialParamByFeatureAsDouble(kFeature, "a", 0));
+  EXPECT_EQ(1.5, GetFieldTrialParamByFeatureAsDouble(kFeature, "b", 0));
+  EXPECT_EQ(1.0e-10, GetFieldTrialParamByFeatureAsDouble(kFeature, "c", 0));
+  EXPECT_EQ(0,
+            GetFieldTrialParamByFeatureAsDouble(kFeature, "d", 0));  // invalid
+  EXPECT_EQ(0, GetFieldTrialParamByFeatureAsDouble(kFeature, "e", 0));  // empty
+  EXPECT_EQ(0, GetFieldTrialParamByFeatureAsDouble(kFeature, "f", 0));  // empty
+
+  EXPECT_EQ(1, a.Get());
+  EXPECT_EQ(1.5, b.Get());
+  EXPECT_EQ(1.0e-10, c.Get());
+  EXPECT_EQ(0, d.Get());  // invalid
+  EXPECT_EQ(0, e.Get());  // empty
+  EXPECT_EQ(0, f.Get());  // empty
+}
+
+TEST_F(FieldTrialParamsTest, FeatureParamBool) {
+  const std::string kTrialName = "GetFieldTrialParamsByFeature";
+
+  static const Feature kFeature{"TestFeature", FEATURE_DISABLED_BY_DEFAULT};
+  static const FeatureParam<bool> a{&kFeature, "a", false};
+  static const FeatureParam<bool> b{&kFeature, "b", true};
+  static const FeatureParam<bool> c{&kFeature, "c", false};
+  static const FeatureParam<bool> d{&kFeature, "d", true};
+  static const FeatureParam<bool> e{&kFeature, "e", true};
+  static const FeatureParam<bool> f{&kFeature, "f", true};
+
+  std::map<std::string, std::string> params;
+  params["a"] = "true";
+  params["b"] = "false";
+  params["c"] = "1";
+  params["d"] = "False";
+  params["e"] = "";
+  // "f" is not registered
+  AssociateFieldTrialParams(kTrialName, "A", params);
+  scoped_refptr<FieldTrial> trial(
+      CreateFieldTrial(kTrialName, 100, "A", nullptr));
+
+  CreateFeatureWithTrial(kFeature, FeatureList::OVERRIDE_ENABLE_FEATURE,
+                         trial.get());
+
+  EXPECT_TRUE(a.Get());
+  EXPECT_FALSE(b.Get());
+  EXPECT_FALSE(c.Get());  // invalid
+  EXPECT_TRUE(d.Get());   // invalid
+  EXPECT_TRUE(e.Get());   // empty
+  EXPECT_TRUE(f.Get());   // empty
+}
+
+enum Hand { ROCK, PAPER, SCISSORS };
+
+TEST_F(FieldTrialParamsTest, FeatureParamEnum) {
+  const std::string kTrialName = "GetFieldTrialParamsByFeature";
+
+  static const FeatureParam<Hand>::Option hands[] = {
+      {ROCK, "rock"}, {PAPER, "paper"}, {SCISSORS, "scissors"}};
+  static const Feature kFeature{"TestFeature", FEATURE_DISABLED_BY_DEFAULT};
+  static const FeatureParam<Hand> a{&kFeature, "a", ROCK, &hands};
+  static const FeatureParam<Hand> b{&kFeature, "b", ROCK, &hands};
+  static const FeatureParam<Hand> c{&kFeature, "c", ROCK, &hands};
+  static const FeatureParam<Hand> d{&kFeature, "d", ROCK, &hands};
+  static const FeatureParam<Hand> e{&kFeature, "e", PAPER, &hands};
+  static const FeatureParam<Hand> f{&kFeature, "f", SCISSORS, &hands};
+
+  std::map<std::string, std::string> params;
+  params["a"] = "rock";
+  params["b"] = "paper";
+  params["c"] = "scissors";
+  params["d"] = "lizard";
+  params["e"] = "";
+  // "f" is not registered
+  AssociateFieldTrialParams(kTrialName, "A", params);
+  scoped_refptr<FieldTrial> trial(
+      CreateFieldTrial(kTrialName, 100, "A", nullptr));
+
+  CreateFeatureWithTrial(kFeature, FeatureList::OVERRIDE_ENABLE_FEATURE,
+                         trial.get());
+
+  EXPECT_EQ(ROCK, a.Get());
+  EXPECT_EQ(PAPER, b.Get());
+  EXPECT_EQ(SCISSORS, c.Get());
+  EXPECT_EQ(ROCK, d.Get());      // invalid
+  EXPECT_EQ(PAPER, e.Get());     // invalid/empty
+  EXPECT_EQ(SCISSORS, f.Get());  // not registered
+}
+
+enum class UI { ONE_D, TWO_D, THREE_D };
+
+TEST_F(FieldTrialParamsTest, FeatureParamEnumClass) {
+  const std::string kTrialName = "GetFieldTrialParamsByFeature";
+
+  static const FeatureParam<UI>::Option uis[] = {
+      {UI::ONE_D, "1d"}, {UI::TWO_D, "2d"}, {UI::THREE_D, "3d"}};
+  static const Feature kFeature{"TestFeature", FEATURE_DISABLED_BY_DEFAULT};
+  static const FeatureParam<UI> a{&kFeature, "a", UI::ONE_D, &uis};
+  static const FeatureParam<UI> b{&kFeature, "b", UI::ONE_D, &uis};
+  static const FeatureParam<UI> c{&kFeature, "c", UI::ONE_D, &uis};
+  static const FeatureParam<UI> d{&kFeature, "d", UI::ONE_D, &uis};
+  static const FeatureParam<UI> e{&kFeature, "e", UI::TWO_D, &uis};
+  static const FeatureParam<UI> f{&kFeature, "f", UI::THREE_D, &uis};
+
+  std::map<std::string, std::string> params;
+  params["a"] = "1d";
+  params["b"] = "2d";
+  params["c"] = "3d";
+  params["d"] = "4d";
+  params["e"] = "";
+  // "f" is not registered
+  AssociateFieldTrialParams(kTrialName, "A", params);
+  scoped_refptr<FieldTrial> trial(
+      CreateFieldTrial(kTrialName, 100, "A", nullptr));
+
+  CreateFeatureWithTrial(kFeature, FeatureList::OVERRIDE_ENABLE_FEATURE,
+                         trial.get());
+
+  EXPECT_EQ(UI::ONE_D, a.Get());
+  EXPECT_EQ(UI::TWO_D, b.Get());
+  EXPECT_EQ(UI::THREE_D, c.Get());
+  EXPECT_EQ(UI::ONE_D, d.Get());    // invalid
+  EXPECT_EQ(UI::TWO_D, e.Get());    // invalid/empty
+  EXPECT_EQ(UI::THREE_D, f.Get());  // not registered
+}
+
+}  // namespace base
diff --git a/base/metrics/field_trial_params_unittest.nc b/base/metrics/field_trial_params_unittest.nc
new file mode 100644
index 0000000..4c6005e
--- /dev/null
+++ b/base/metrics/field_trial_params_unittest.nc
@@ -0,0 +1,47 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This is a "No Compile Test" suite.
+// http://dev.chromium.org/developers/testing/no-compile-tests
+
+#include "base/feature_list.h"
+#include "base/metrics/field_trial_params.h"
+
+constexpr base::Feature kFeature{"NoCompileFeature"};
+
+enum Param { FOO, BAR };
+
+#if defined(NCTEST_NO_PARAM_TYPE)  // [r"too few template arguments"]
+
+constexpr base::FeatureParam<> kParam{
+  &kFeature, "Param"};
+
+#elif defined(NCTEST_VOID_PARAM_TYPE)  // [r"unsupported FeatureParam<> type"]
+
+constexpr base::FeatureParam<void> kParam{
+  &kFeature, "Param"};
+
+#elif defined(NCTEST_INVALID_PARAM_TYPE)  // [r"unsupported FeatureParam<> type"]
+
+constexpr base::FeatureParam<size_t> kParam{
+  &kFeature, "Param", 1u};
+
+#elif defined(NCTEST_ENUM_NULL_OPTIONS)  // [r"candidate template ignored: could not match"]
+
+constexpr base::FeatureParam<Param> kParam{
+  &kFeature, "Param", FOO, nullptr};
+
+#elif defined(NCTEST_ENUM_EMPTY_OPTIONS)  // [r"zero-length arrays are not permitted"]
+
+constexpr base::FeatureParam<Param>::Option kParamOptions[] = {};
+constexpr base::FeatureParam<Param> kParam{
+  &kFeature, "Param", FOO, &kParamOptions};
+
+#else
+
+void suppress_unused_variable_warning() {
+    (void)kFeature;
+}
+
+#endif
diff --git a/base/metrics/field_trial_unittest.cc b/base/metrics/field_trial_unittest.cc
new file mode 100644
index 0000000..3f7cc30
--- /dev/null
+++ b/base/metrics/field_trial_unittest.cc
@@ -0,0 +1,1491 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/metrics/field_trial.h"
+
+#include <stddef.h>
+
+#include "base/base_switches.h"
+#include "base/build_time.h"
+#include "base/feature_list.h"
+#include "base/macros.h"
+#include "base/memory/ptr_util.h"
+#include "base/message_loop/message_loop.h"
+#include "base/metrics/field_trial_param_associator.h"
+#include "base/rand_util.h"
+#include "base/run_loop.h"
+#include "base/strings/string_number_conversions.h"
+#include "base/strings/stringprintf.h"
+#include "base/test/gtest_util.h"
+#include "base/test/mock_entropy_provider.h"
+#include "base/test/scoped_feature_list.h"
+#include "base/test/test_shared_memory_util.h"
+#include "build/build_config.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+
+namespace {
+
+// Default group name used by several tests.
+const char kDefaultGroupName[] = "DefaultGroup";
+
+// Call FieldTrialList::FactoryGetFieldTrial() with a future expiry date.
+scoped_refptr<FieldTrial> CreateFieldTrial(
+    const std::string& trial_name,
+    int total_probability,
+    const std::string& default_group_name,
+    int* default_group_number) {
+  return FieldTrialList::FactoryGetFieldTrial(
+      trial_name, total_probability, default_group_name,
+      FieldTrialList::kNoExpirationYear, 1, 1, FieldTrial::SESSION_RANDOMIZED,
+      default_group_number);
+}
+
+int OneYearBeforeBuildTime() {
+  Time one_year_before_build_time = GetBuildTime() - TimeDelta::FromDays(365);
+  Time::Exploded exploded;
+  one_year_before_build_time.LocalExplode(&exploded);
+  return exploded.year;
+}
+
+// FieldTrialList::Observer implementation for testing.
+class TestFieldTrialObserver : public FieldTrialList::Observer {
+ public:
+  enum Type {
+    ASYNCHRONOUS,
+    SYNCHRONOUS,
+  };
+
+  TestFieldTrialObserver(Type type) : type_(type) {
+    if (type == SYNCHRONOUS)
+      FieldTrialList::SetSynchronousObserver(this);
+    else
+      FieldTrialList::AddObserver(this);
+  }
+
+  ~TestFieldTrialObserver() override {
+    if (type_ == SYNCHRONOUS)
+      FieldTrialList::RemoveSynchronousObserver(this);
+    else
+      FieldTrialList::RemoveObserver(this);
+  }
+
+  void OnFieldTrialGroupFinalized(const std::string& trial,
+                                  const std::string& group) override {
+    trial_name_ = trial;
+    group_name_ = group;
+  }
+
+  const std::string& trial_name() const { return trial_name_; }
+  const std::string& group_name() const { return group_name_; }
+
+ private:
+  const Type type_;
+  std::string trial_name_;
+  std::string group_name_;
+
+  DISALLOW_COPY_AND_ASSIGN(TestFieldTrialObserver);
+};
+
+std::string MockEscapeQueryParamValue(const std::string& input) {
+  return input;
+}
+
+}  // namespace
+
+class FieldTrialTest : public ::testing::Test {
+ public:
+  FieldTrialTest() : trial_list_(nullptr) {}
+
+ private:
+  MessageLoop message_loop_;
+  FieldTrialList trial_list_;
+
+  DISALLOW_COPY_AND_ASSIGN(FieldTrialTest);
+};
+
+// Test registration, and also check that destructors are called for trials.
+TEST_F(FieldTrialTest, Registration) {
+  const char name1[] = "name 1 test";
+  const char name2[] = "name 2 test";
+  EXPECT_FALSE(FieldTrialList::Find(name1));
+  EXPECT_FALSE(FieldTrialList::Find(name2));
+
+  scoped_refptr<FieldTrial> trial1 =
+      CreateFieldTrial(name1, 10, "default name 1 test", nullptr);
+  EXPECT_EQ(FieldTrial::kNotFinalized, trial1->group_);
+  EXPECT_EQ(name1, trial1->trial_name());
+  EXPECT_EQ("", trial1->group_name_internal());
+
+  trial1->AppendGroup(std::string(), 7);
+
+  EXPECT_EQ(trial1.get(), FieldTrialList::Find(name1));
+  EXPECT_FALSE(FieldTrialList::Find(name2));
+
+  scoped_refptr<FieldTrial> trial2 =
+      CreateFieldTrial(name2, 10, "default name 2 test", nullptr);
+  EXPECT_EQ(FieldTrial::kNotFinalized, trial2->group_);
+  EXPECT_EQ(name2, trial2->trial_name());
+  EXPECT_EQ("", trial2->group_name_internal());
+
+  trial2->AppendGroup("a first group", 7);
+
+  EXPECT_EQ(trial1.get(), FieldTrialList::Find(name1));
+  EXPECT_EQ(trial2.get(), FieldTrialList::Find(name2));
+  // Note: FieldTrialList should delete the objects at shutdown.
+}
+
+TEST_F(FieldTrialTest, AbsoluteProbabilities) {
+  char always_true[] = " always true";
+  char default_always_true[] = " default always true";
+  char always_false[] = " always false";
+  char default_always_false[] = " default always false";
+  for (int i = 1; i < 250; ++i) {
+    // Try lots of names, by changing the first character of the name.
+    char c = static_cast<char>(i);
+    always_true[0] = c;
+    default_always_true[0] = c;
+    always_false[0] = c;
+    default_always_false[0] = c;
+
+    scoped_refptr<FieldTrial> trial_true =
+        CreateFieldTrial(always_true, 10, default_always_true, nullptr);
+    const std::string winner = "TheWinner";
+    int winner_group = trial_true->AppendGroup(winner, 10);
+
+    EXPECT_EQ(winner_group, trial_true->group());
+    EXPECT_EQ(winner, trial_true->group_name());
+
+    scoped_refptr<FieldTrial> trial_false =
+        CreateFieldTrial(always_false, 10, default_always_false, nullptr);
+    int loser_group = trial_false->AppendGroup("ALoser", 0);
+
+    EXPECT_NE(loser_group, trial_false->group());
+  }
+}
+
+TEST_F(FieldTrialTest, RemainingProbability) {
+  // First create a test that hasn't had a winner yet.
+  const std::string winner = "Winner";
+  const std::string loser = "Loser";
+  scoped_refptr<FieldTrial> trial;
+  int counter = 0;
+  int default_group_number = -1;
+  do {
+    std::string name = StringPrintf("trial%d", ++counter);
+    trial = CreateFieldTrial(name, 10, winner, &default_group_number);
+    trial->AppendGroup(loser, 5);  // 50% chance of not being chosen.
+    // If a group is not assigned, group_ will be kNotFinalized.
+  } while (trial->group_ != FieldTrial::kNotFinalized);
+
+  // And that 'default' group (winner) should always win.
+  EXPECT_EQ(default_group_number, trial->group());
+
+  // And that winner should ALWAYS win.
+  EXPECT_EQ(winner, trial->group_name());
+}
+
+TEST_F(FieldTrialTest, FiftyFiftyProbability) {
+  // Check that even with small divisors, we have the proper probabilities, and
+  // all outcomes are possible.  Since this is a 50-50 test, it should get both
+  // outcomes in a few tries, but we'll try no more than 100 times (and be flaky
+  // with probability around 1 in 2^99).
+  bool first_winner = false;
+  bool second_winner = false;
+  int counter = 0;
+  do {
+    std::string name = StringPrintf("FiftyFifty%d", ++counter);
+    std::string default_group_name =
+        StringPrintf("Default FiftyFifty%d", ++counter);
+    scoped_refptr<FieldTrial> trial =
+        CreateFieldTrial(name, 2, default_group_name, nullptr);
+    trial->AppendGroup("first", 1);  // 50% chance of being chosen.
+    // If group_ is kNotFinalized, then a group assignement hasn't been done.
+    if (trial->group_ != FieldTrial::kNotFinalized) {
+      first_winner = true;
+      continue;
+    }
+    trial->AppendGroup("second", 1);  // Always chosen at this point.
+    EXPECT_NE(FieldTrial::kNotFinalized, trial->group());
+    second_winner = true;
+  } while ((!second_winner || !first_winner) && counter < 100);
+  EXPECT_TRUE(second_winner);
+  EXPECT_TRUE(first_winner);
+}
+
+TEST_F(FieldTrialTest, MiddleProbabilities) {
+  char name[] = " same name";
+  char default_group_name[] = " default same name";
+  bool false_event_seen = false;
+  bool true_event_seen = false;
+  for (int i = 1; i < 250; ++i) {
+    char c = static_cast<char>(i);
+    name[0] = c;
+    default_group_name[0] = c;
+    scoped_refptr<FieldTrial> trial =
+        CreateFieldTrial(name, 10, default_group_name, nullptr);
+    int might_win = trial->AppendGroup("MightWin", 5);
+
+    if (trial->group() == might_win) {
+      true_event_seen = true;
+    } else {
+      false_event_seen = true;
+    }
+    if (false_event_seen && true_event_seen)
+      return;  // Successful test!!!
+  }
+  // Very surprising to get here. Probability should be around 1 in 2 ** 250.
+  // One of the following will fail.
+  EXPECT_TRUE(false_event_seen);
+  EXPECT_TRUE(true_event_seen);
+}
+
+TEST_F(FieldTrialTest, OneWinner) {
+  char name[] = "Some name";
+  char default_group_name[] = "Default some name";
+  int group_count(10);
+
+  int default_group_number = -1;
+  scoped_refptr<FieldTrial> trial =
+      CreateFieldTrial(name, group_count, default_group_name, nullptr);
+  int winner_index(-2);
+  std::string winner_name;
+
+  for (int i = 1; i <= group_count; ++i) {
+    int might_win = trial->AppendGroup(std::string(), 1);
+
+    // Because we keep appending groups, we want to see if the last group that
+    // was added has been assigned or not.
+    if (trial->group_ == might_win) {
+      EXPECT_EQ(-2, winner_index);
+      winner_index = might_win;
+      StringAppendF(&winner_name, "%d", might_win);
+      EXPECT_EQ(winner_name, trial->group_name());
+    }
+  }
+  EXPECT_GE(winner_index, 0);
+  // Since all groups cover the total probability, we should not have
+  // chosen the default group.
+  EXPECT_NE(trial->group(), default_group_number);
+  EXPECT_EQ(trial->group(), winner_index);
+  EXPECT_EQ(trial->group_name(), winner_name);
+}
+
+TEST_F(FieldTrialTest, DisableProbability) {
+  const std::string default_group_name = "Default group";
+  const std::string loser = "Loser";
+  const std::string name = "Trial";
+
+  // Create a field trail that has expired.
+  int default_group_number = -1;
+  FieldTrial* trial = FieldTrialList::FactoryGetFieldTrial(
+      name, 1000000000, default_group_name, OneYearBeforeBuildTime(), 1, 1,
+      FieldTrial::SESSION_RANDOMIZED,
+      &default_group_number);
+  trial->AppendGroup(loser, 999999999);  // 99.9999999% chance of being chosen.
+
+  // Because trial has expired, we should always be in the default group.
+  EXPECT_EQ(default_group_number, trial->group());
+
+  // And that default_group_name should ALWAYS win.
+  EXPECT_EQ(default_group_name, trial->group_name());
+}
+
+TEST_F(FieldTrialTest, ActiveGroups) {
+  std::string no_group("No Group");
+  scoped_refptr<FieldTrial> trial =
+      CreateFieldTrial(no_group, 10, "Default", nullptr);
+
+  // There is no winner yet, so no NameGroupId should be returned.
+  FieldTrial::ActiveGroup active_group;
+  EXPECT_FALSE(trial->GetActiveGroup(&active_group));
+
+  // Create a single winning group.
+  std::string one_winner("One Winner");
+  trial = CreateFieldTrial(one_winner, 10, "Default", nullptr);
+  std::string winner("Winner");
+  trial->AppendGroup(winner, 10);
+  EXPECT_FALSE(trial->GetActiveGroup(&active_group));
+  // Finalize the group selection by accessing the selected group.
+  trial->group();
+  EXPECT_TRUE(trial->GetActiveGroup(&active_group));
+  EXPECT_EQ(one_winner, active_group.trial_name);
+  EXPECT_EQ(winner, active_group.group_name);
+
+  std::string multi_group("MultiGroup");
+  scoped_refptr<FieldTrial> multi_group_trial =
+      CreateFieldTrial(multi_group, 9, "Default", nullptr);
+
+  multi_group_trial->AppendGroup("Me", 3);
+  multi_group_trial->AppendGroup("You", 3);
+  multi_group_trial->AppendGroup("Them", 3);
+  EXPECT_FALSE(multi_group_trial->GetActiveGroup(&active_group));
+  // Finalize the group selection by accessing the selected group.
+  multi_group_trial->group();
+  EXPECT_TRUE(multi_group_trial->GetActiveGroup(&active_group));
+  EXPECT_EQ(multi_group, active_group.trial_name);
+  EXPECT_EQ(multi_group_trial->group_name(), active_group.group_name);
+
+  // Now check if the list is built properly...
+  FieldTrial::ActiveGroups active_groups;
+  FieldTrialList::GetActiveFieldTrialGroups(&active_groups);
+  EXPECT_EQ(2U, active_groups.size());
+  for (size_t i = 0; i < active_groups.size(); ++i) {
+    // Order is not guaranteed, so check all values.
+    EXPECT_NE(no_group, active_groups[i].trial_name);
+    EXPECT_TRUE(one_winner != active_groups[i].trial_name ||
+                winner == active_groups[i].group_name);
+    EXPECT_TRUE(multi_group != active_groups[i].trial_name ||
+                multi_group_trial->group_name() == active_groups[i].group_name);
+  }
+}
+
+TEST_F(FieldTrialTest, GetActiveFieldTrialGroupsFromString) {
+  FieldTrial::ActiveGroups active_groups;
+  FieldTrialList::GetActiveFieldTrialGroupsFromString("*A/X/B/Y/*C/Z",
+                                                      &active_groups);
+  ASSERT_EQ(2U, active_groups.size());
+  EXPECT_EQ("A", active_groups[0].trial_name);
+  EXPECT_EQ("X", active_groups[0].group_name);
+  EXPECT_EQ("C", active_groups[1].trial_name);
+  EXPECT_EQ("Z", active_groups[1].group_name);
+}
+
+TEST_F(FieldTrialTest, ActiveGroupsNotFinalized) {
+  const char kTrialName[] = "TestTrial";
+  const char kSecondaryGroupName[] = "SecondaryGroup";
+
+  int default_group = -1;
+  scoped_refptr<FieldTrial> trial =
+      CreateFieldTrial(kTrialName, 100, kDefaultGroupName, &default_group);
+  const int secondary_group = trial->AppendGroup(kSecondaryGroupName, 50);
+
+  // Before |group()| is called, |GetActiveGroup()| should return false.
+  FieldTrial::ActiveGroup active_group;
+  EXPECT_FALSE(trial->GetActiveGroup(&active_group));
+
+  // |GetActiveFieldTrialGroups()| should also not include the trial.
+  FieldTrial::ActiveGroups active_groups;
+  FieldTrialList::GetActiveFieldTrialGroups(&active_groups);
+  EXPECT_TRUE(active_groups.empty());
+
+  // After |group()| has been called, both APIs should succeed.
+  const int chosen_group = trial->group();
+  EXPECT_TRUE(chosen_group == default_group || chosen_group == secondary_group);
+
+  EXPECT_TRUE(trial->GetActiveGroup(&active_group));
+  EXPECT_EQ(kTrialName, active_group.trial_name);
+  if (chosen_group == default_group)
+    EXPECT_EQ(kDefaultGroupName, active_group.group_name);
+  else
+    EXPECT_EQ(kSecondaryGroupName, active_group.group_name);
+
+  FieldTrialList::GetActiveFieldTrialGroups(&active_groups);
+  ASSERT_EQ(1U, active_groups.size());
+  EXPECT_EQ(kTrialName, active_groups[0].trial_name);
+  EXPECT_EQ(active_group.group_name, active_groups[0].group_name);
+}
+
+TEST_F(FieldTrialTest, GetGroupNameWithoutActivation) {
+  const char kTrialName[] = "TestTrial";
+  const char kSecondaryGroupName[] = "SecondaryGroup";
+
+  int default_group = -1;
+  scoped_refptr<FieldTrial> trial =
+      CreateFieldTrial(kTrialName, 100, kDefaultGroupName, &default_group);
+  trial->AppendGroup(kSecondaryGroupName, 50);
+
+  // The trial should start inactive.
+  EXPECT_FALSE(FieldTrialList::IsTrialActive(kTrialName));
+
+  // Calling |GetGroupNameWithoutActivation()| should not activate the trial.
+  std::string group_name = trial->GetGroupNameWithoutActivation();
+  EXPECT_FALSE(group_name.empty());
+  EXPECT_FALSE(FieldTrialList::IsTrialActive(kTrialName));
+
+  // Calling |group_name()| should activate it and return the same group name.
+  EXPECT_EQ(group_name, trial->group_name());
+  EXPECT_TRUE(FieldTrialList::IsTrialActive(kTrialName));
+}
+
+TEST_F(FieldTrialTest, Save) {
+  std::string save_string;
+
+  scoped_refptr<FieldTrial> trial =
+      CreateFieldTrial("Some name", 10, "Default some name", nullptr);
+  // There is no winner yet, so no textual group name is associated with trial.
+  // In this case, the trial should not be included.
+  EXPECT_EQ("", trial->group_name_internal());
+  FieldTrialList::StatesToString(&save_string);
+  EXPECT_EQ("", save_string);
+  save_string.clear();
+
+  // Create a winning group.
+  trial->AppendGroup("Winner", 10);
+  // Finalize the group selection by accessing the selected group.
+  trial->group();
+  FieldTrialList::StatesToString(&save_string);
+  EXPECT_EQ("Some name/Winner/", save_string);
+  save_string.clear();
+
+  // Create a second trial and winning group.
+  scoped_refptr<FieldTrial> trial2 =
+      CreateFieldTrial("xxx", 10, "Default xxx", nullptr);
+  trial2->AppendGroup("yyyy", 10);
+  // Finalize the group selection by accessing the selected group.
+  trial2->group();
+
+  FieldTrialList::StatesToString(&save_string);
+  // We assume names are alphabetized... though this is not critical.
+  EXPECT_EQ("Some name/Winner/xxx/yyyy/", save_string);
+  save_string.clear();
+
+  // Create a third trial with only the default group.
+  scoped_refptr<FieldTrial> trial3 =
+      CreateFieldTrial("zzz", 10, "default", nullptr);
+  // Finalize the group selection by accessing the selected group.
+  trial3->group();
+
+  FieldTrialList::StatesToString(&save_string);
+  EXPECT_EQ("Some name/Winner/xxx/yyyy/zzz/default/", save_string);
+}
+
+TEST_F(FieldTrialTest, SaveAll) {
+  std::string save_string;
+
+  scoped_refptr<FieldTrial> trial =
+      CreateFieldTrial("Some name", 10, "Default some name", nullptr);
+  EXPECT_EQ("", trial->group_name_internal());
+  FieldTrialList::AllStatesToString(&save_string, false);
+  EXPECT_EQ("Some name/Default some name/", save_string);
+  // Getting all states should have finalized the trial.
+  EXPECT_EQ("Default some name", trial->group_name_internal());
+  save_string.clear();
+
+  // Create a winning group.
+  trial = CreateFieldTrial("trial2", 10, "Default some name", nullptr);
+  trial->AppendGroup("Winner", 10);
+  // Finalize the group selection by accessing the selected group.
+  trial->group();
+  FieldTrialList::AllStatesToString(&save_string, false);
+  EXPECT_EQ("Some name/Default some name/*trial2/Winner/", save_string);
+  save_string.clear();
+
+  // Create a second trial and winning group.
+  scoped_refptr<FieldTrial> trial2 =
+      CreateFieldTrial("xxx", 10, "Default xxx", nullptr);
+  trial2->AppendGroup("yyyy", 10);
+  // Finalize the group selection by accessing the selected group.
+  trial2->group();
+
+  FieldTrialList::AllStatesToString(&save_string, false);
+  // We assume names are alphabetized... though this is not critical.
+  EXPECT_EQ("Some name/Default some name/*trial2/Winner/*xxx/yyyy/",
+            save_string);
+  save_string.clear();
+
+  // Create a third trial with only the default group.
+  scoped_refptr<FieldTrial> trial3 =
+      CreateFieldTrial("zzz", 10, "default", nullptr);
+
+  FieldTrialList::AllStatesToString(&save_string, false);
+  EXPECT_EQ("Some name/Default some name/*trial2/Winner/*xxx/yyyy/zzz/default/",
+            save_string);
+
+  // Create expired study.
+  int default_group_number = -1;
+  scoped_refptr<FieldTrial> expired_trial =
+      FieldTrialList::FactoryGetFieldTrial(
+          "Expired trial name", 1000000000, "Default group",
+          OneYearBeforeBuildTime(), 1, 1, FieldTrial::SESSION_RANDOMIZED,
+          &default_group_number);
+  expired_trial->AppendGroup("Expired trial group name", 999999999);
+
+  save_string.clear();
+  FieldTrialList::AllStatesToString(&save_string, false);
+  EXPECT_EQ("Some name/Default some name/*trial2/Winner/*xxx/yyyy/zzz/default/",
+            save_string);
+  save_string.clear();
+  FieldTrialList::AllStatesToString(&save_string, true);
+  EXPECT_EQ(
+      "Expired trial name/Default group/"
+      "Some name/Default some name/*trial2/Winner/*xxx/yyyy/zzz/default/",
+      save_string);
+}
+
+TEST_F(FieldTrialTest, Restore) {
+  ASSERT_FALSE(FieldTrialList::TrialExists("Some_name"));
+  ASSERT_FALSE(FieldTrialList::TrialExists("xxx"));
+
+  FieldTrialList::CreateTrialsFromString("Some_name/Winner/xxx/yyyy/",
+                                         std::set<std::string>());
+
+  FieldTrial* trial = FieldTrialList::Find("Some_name");
+  ASSERT_NE(static_cast<FieldTrial*>(nullptr), trial);
+  EXPECT_EQ("Winner", trial->group_name());
+  EXPECT_EQ("Some_name", trial->trial_name());
+
+  trial = FieldTrialList::Find("xxx");
+  ASSERT_NE(static_cast<FieldTrial*>(nullptr), trial);
+  EXPECT_EQ("yyyy", trial->group_name());
+  EXPECT_EQ("xxx", trial->trial_name());
+}
+
+TEST_F(FieldTrialTest, RestoreNotEndingWithSlash) {
+  EXPECT_TRUE(FieldTrialList::CreateTrialsFromString("tname/gname",
+                                                     std::set<std::string>()));
+
+  FieldTrial* trial = FieldTrialList::Find("tname");
+  ASSERT_NE(static_cast<FieldTrial*>(nullptr), trial);
+  EXPECT_EQ("gname", trial->group_name());
+  EXPECT_EQ("tname", trial->trial_name());
+}
+
+TEST_F(FieldTrialTest, BogusRestore) {
+  EXPECT_FALSE(FieldTrialList::CreateTrialsFromString("MissingSlash",
+                                                      std::set<std::string>()));
+  EXPECT_FALSE(FieldTrialList::CreateTrialsFromString("MissingGroupName/",
+                                                      std::set<std::string>()));
+  EXPECT_FALSE(FieldTrialList::CreateTrialsFromString("noname, only group/",
+                                                      std::set<std::string>()));
+  EXPECT_FALSE(FieldTrialList::CreateTrialsFromString("/emptyname",
+                                                      std::set<std::string>()));
+  EXPECT_FALSE(FieldTrialList::CreateTrialsFromString("*/emptyname",
+                                                      std::set<std::string>()));
+}
+
+TEST_F(FieldTrialTest, DuplicateRestore) {
+  scoped_refptr<FieldTrial> trial =
+      CreateFieldTrial("Some name", 10, "Default", nullptr);
+  trial->AppendGroup("Winner", 10);
+  // Finalize the group selection by accessing the selected group.
+  trial->group();
+  std::string save_string;
+  FieldTrialList::StatesToString(&save_string);
+  EXPECT_EQ("Some name/Winner/", save_string);
+
+  // It is OK if we redundantly specify a winner.
+  EXPECT_TRUE(FieldTrialList::CreateTrialsFromString(save_string,
+                                                     std::set<std::string>()));
+
+  // But it is an error to try to change to a different winner.
+  EXPECT_FALSE(FieldTrialList::CreateTrialsFromString("Some name/Loser/",
+                                                      std::set<std::string>()));
+}
+
+TEST_F(FieldTrialTest, CreateTrialsFromStringNotActive) {
+  ASSERT_FALSE(FieldTrialList::TrialExists("Abc"));
+  ASSERT_FALSE(FieldTrialList::TrialExists("Xyz"));
+  ASSERT_TRUE(FieldTrialList::CreateTrialsFromString("Abc/def/Xyz/zyx/",
+                                                     std::set<std::string>()));
+
+  FieldTrial::ActiveGroups active_groups;
+  FieldTrialList::GetActiveFieldTrialGroups(&active_groups);
+  ASSERT_TRUE(active_groups.empty());
+
+  // Check that the values still get returned and querying them activates them.
+  EXPECT_EQ("def", FieldTrialList::FindFullName("Abc"));
+  EXPECT_EQ("zyx", FieldTrialList::FindFullName("Xyz"));
+
+  FieldTrialList::GetActiveFieldTrialGroups(&active_groups);
+  ASSERT_EQ(2U, active_groups.size());
+  EXPECT_EQ("Abc", active_groups[0].trial_name);
+  EXPECT_EQ("def", active_groups[0].group_name);
+  EXPECT_EQ("Xyz", active_groups[1].trial_name);
+  EXPECT_EQ("zyx", active_groups[1].group_name);
+}
+
+TEST_F(FieldTrialTest, CreateTrialsFromStringForceActivation) {
+  ASSERT_FALSE(FieldTrialList::TrialExists("Abc"));
+  ASSERT_FALSE(FieldTrialList::TrialExists("def"));
+  ASSERT_FALSE(FieldTrialList::TrialExists("Xyz"));
+  ASSERT_TRUE(FieldTrialList::CreateTrialsFromString(
+      "*Abc/cba/def/fed/*Xyz/zyx/", std::set<std::string>()));
+
+  FieldTrial::ActiveGroups active_groups;
+  FieldTrialList::GetActiveFieldTrialGroups(&active_groups);
+  ASSERT_EQ(2U, active_groups.size());
+  EXPECT_EQ("Abc", active_groups[0].trial_name);
+  EXPECT_EQ("cba", active_groups[0].group_name);
+  EXPECT_EQ("Xyz", active_groups[1].trial_name);
+  EXPECT_EQ("zyx", active_groups[1].group_name);
+}
+
+TEST_F(FieldTrialTest, CreateTrialsFromStringNotActiveObserver) {
+  ASSERT_FALSE(FieldTrialList::TrialExists("Abc"));
+
+  TestFieldTrialObserver observer(TestFieldTrialObserver::ASYNCHRONOUS);
+  ASSERT_TRUE(FieldTrialList::CreateTrialsFromString("Abc/def/",
+                                                     std::set<std::string>()));
+  RunLoop().RunUntilIdle();
+  // Observer shouldn't be notified.
+  EXPECT_TRUE(observer.trial_name().empty());
+
+  // Check that the values still get returned and querying them activates them.
+  EXPECT_EQ("def", FieldTrialList::FindFullName("Abc"));
+
+  RunLoop().RunUntilIdle();
+  EXPECT_EQ("Abc", observer.trial_name());
+  EXPECT_EQ("def", observer.group_name());
+}
+
+TEST_F(FieldTrialTest, CreateTrialsFromStringWithIgnoredFieldTrials) {
+  ASSERT_FALSE(FieldTrialList::TrialExists("Unaccepted1"));
+  ASSERT_FALSE(FieldTrialList::TrialExists("Foo"));
+  ASSERT_FALSE(FieldTrialList::TrialExists("Unaccepted2"));
+  ASSERT_FALSE(FieldTrialList::TrialExists("Bar"));
+  ASSERT_FALSE(FieldTrialList::TrialExists("Unaccepted3"));
+
+  std::set<std::string> ignored_trial_names;
+  ignored_trial_names.insert("Unaccepted1");
+  ignored_trial_names.insert("Unaccepted2");
+  ignored_trial_names.insert("Unaccepted3");
+
+  FieldTrialList::CreateTrialsFromString(
+      "Unaccepted1/Unaccepted1_name/"
+      "Foo/Foo_name/"
+      "Unaccepted2/Unaccepted2_name/"
+      "Bar/Bar_name/"
+      "Unaccepted3/Unaccepted3_name/",
+      ignored_trial_names);
+
+  EXPECT_FALSE(FieldTrialList::TrialExists("Unaccepted1"));
+  EXPECT_TRUE(FieldTrialList::TrialExists("Foo"));
+  EXPECT_FALSE(FieldTrialList::TrialExists("Unaccepted2"));
+  EXPECT_TRUE(FieldTrialList::TrialExists("Bar"));
+  EXPECT_FALSE(FieldTrialList::TrialExists("Unaccepted3"));
+
+  FieldTrial::ActiveGroups active_groups;
+  FieldTrialList::GetActiveFieldTrialGroups(&active_groups);
+  EXPECT_TRUE(active_groups.empty());
+
+  FieldTrial* trial = FieldTrialList::Find("Foo");
+  ASSERT_NE(static_cast<FieldTrial*>(nullptr), trial);
+  EXPECT_EQ("Foo", trial->trial_name());
+  EXPECT_EQ("Foo_name", trial->group_name());
+
+  trial = FieldTrialList::Find("Bar");
+  ASSERT_NE(static_cast<FieldTrial*>(nullptr), trial);
+  EXPECT_EQ("Bar", trial->trial_name());
+  EXPECT_EQ("Bar_name", trial->group_name());
+}
+
+TEST_F(FieldTrialTest, CreateFieldTrial) {
+  ASSERT_FALSE(FieldTrialList::TrialExists("Some_name"));
+
+  FieldTrialList::CreateFieldTrial("Some_name", "Winner");
+
+  FieldTrial* trial = FieldTrialList::Find("Some_name");
+  ASSERT_NE(static_cast<FieldTrial*>(nullptr), trial);
+  EXPECT_EQ("Winner", trial->group_name());
+  EXPECT_EQ("Some_name", trial->trial_name());
+}
+
+TEST_F(FieldTrialTest, CreateFieldTrialIsNotActive) {
+  const char kTrialName[] = "CreateFieldTrialIsActiveTrial";
+  const char kWinnerGroup[] = "Winner";
+  ASSERT_FALSE(FieldTrialList::TrialExists(kTrialName));
+  FieldTrialList::CreateFieldTrial(kTrialName, kWinnerGroup);
+
+  FieldTrial::ActiveGroups active_groups;
+  FieldTrialList::GetActiveFieldTrialGroups(&active_groups);
+  EXPECT_TRUE(active_groups.empty());
+}
+
+TEST_F(FieldTrialTest, DuplicateFieldTrial) {
+  scoped_refptr<FieldTrial> trial =
+      CreateFieldTrial("Some_name", 10, "Default", nullptr);
+  trial->AppendGroup("Winner", 10);
+
+  // It is OK if we redundantly specify a winner.
+  FieldTrial* trial1 = FieldTrialList::CreateFieldTrial("Some_name", "Winner");
+  EXPECT_TRUE(trial1 != nullptr);
+
+  // But it is an error to try to change to a different winner.
+  FieldTrial* trial2 = FieldTrialList::CreateFieldTrial("Some_name", "Loser");
+  EXPECT_TRUE(trial2 == nullptr);
+}
+
+TEST_F(FieldTrialTest, DisableImmediately) {
+  int default_group_number = -1;
+  scoped_refptr<FieldTrial> trial =
+      CreateFieldTrial("trial", 100, "default", &default_group_number);
+  trial->Disable();
+  ASSERT_EQ("default", trial->group_name());
+  ASSERT_EQ(default_group_number, trial->group());
+}
+
+TEST_F(FieldTrialTest, DisableAfterInitialization) {
+  scoped_refptr<FieldTrial> trial =
+      CreateFieldTrial("trial", 100, "default", nullptr);
+  trial->AppendGroup("non_default", 100);
+  trial->Disable();
+  ASSERT_EQ("default", trial->group_name());
+}
+
+TEST_F(FieldTrialTest, ForcedFieldTrials) {
+  // Validate we keep the forced choice.
+  FieldTrial* forced_trial = FieldTrialList::CreateFieldTrial("Use the",
+                                                              "Force");
+  EXPECT_STREQ("Force", forced_trial->group_name().c_str());
+
+  int default_group_number = -1;
+  scoped_refptr<FieldTrial> factory_trial =
+      CreateFieldTrial("Use the", 1000, "default", &default_group_number);
+  EXPECT_EQ(factory_trial.get(), forced_trial);
+
+  int chosen_group = factory_trial->AppendGroup("Force", 100);
+  EXPECT_EQ(chosen_group, factory_trial->group());
+  int not_chosen_group = factory_trial->AppendGroup("Dark Side", 100);
+  EXPECT_NE(chosen_group, not_chosen_group);
+
+  // Since we didn't force the default group, we should not be returned the
+  // chosen group as the default group.
+  EXPECT_NE(default_group_number, chosen_group);
+  int new_group = factory_trial->AppendGroup("Duck Tape", 800);
+  EXPECT_NE(chosen_group, new_group);
+  // The new group should not be the default group either.
+  EXPECT_NE(default_group_number, new_group);
+}
+
+TEST_F(FieldTrialTest, ForcedFieldTrialsDefaultGroup) {
+  // Forcing the default should use the proper group ID.
+  FieldTrial* forced_trial = FieldTrialList::CreateFieldTrial("Trial Name",
+                                                              "Default");
+  int default_group_number = -1;
+  scoped_refptr<FieldTrial> factory_trial =
+      CreateFieldTrial("Trial Name", 1000, "Default", &default_group_number);
+  EXPECT_EQ(forced_trial, factory_trial.get());
+
+  int other_group = factory_trial->AppendGroup("Not Default", 100);
+  EXPECT_STREQ("Default", factory_trial->group_name().c_str());
+  EXPECT_EQ(default_group_number, factory_trial->group());
+  EXPECT_NE(other_group, factory_trial->group());
+
+  int new_other_group = factory_trial->AppendGroup("Not Default Either", 800);
+  EXPECT_NE(new_other_group, factory_trial->group());
+}
+
+TEST_F(FieldTrialTest, SetForced) {
+  // Start by setting a trial for which we ensure a winner...
+  int default_group_number = -1;
+  scoped_refptr<FieldTrial> forced_trial =
+      CreateFieldTrial("Use the", 1, "default", &default_group_number);
+  EXPECT_EQ(forced_trial, forced_trial);
+
+  int forced_group = forced_trial->AppendGroup("Force", 1);
+  EXPECT_EQ(forced_group, forced_trial->group());
+
+  // Now force it.
+  forced_trial->SetForced();
+
+  // Now try to set it up differently as a hard coded registration would.
+  scoped_refptr<FieldTrial> hard_coded_trial =
+      CreateFieldTrial("Use the", 1, "default", &default_group_number);
+  EXPECT_EQ(hard_coded_trial, forced_trial);
+
+  int would_lose_group = hard_coded_trial->AppendGroup("Force", 0);
+  EXPECT_EQ(forced_group, hard_coded_trial->group());
+  EXPECT_EQ(forced_group, would_lose_group);
+
+  // Same thing if we would have done it to win again.
+  scoped_refptr<FieldTrial> other_hard_coded_trial =
+      CreateFieldTrial("Use the", 1, "default", &default_group_number);
+  EXPECT_EQ(other_hard_coded_trial, forced_trial);
+
+  int would_win_group = other_hard_coded_trial->AppendGroup("Force", 1);
+  EXPECT_EQ(forced_group, other_hard_coded_trial->group());
+  EXPECT_EQ(forced_group, would_win_group);
+}
+
+TEST_F(FieldTrialTest, SetForcedDefaultOnly) {
+  const char kTrialName[] = "SetForcedDefaultOnly";
+  ASSERT_FALSE(FieldTrialList::TrialExists(kTrialName));
+
+  int default_group = -1;
+  scoped_refptr<FieldTrial> trial =
+      CreateFieldTrial(kTrialName, 100, kDefaultGroupName, &default_group);
+  trial->SetForced();
+
+  trial = CreateFieldTrial(kTrialName, 100, kDefaultGroupName, nullptr);
+  EXPECT_EQ(default_group, trial->group());
+  EXPECT_EQ(kDefaultGroupName, trial->group_name());
+}
+
+TEST_F(FieldTrialTest, SetForcedDefaultWithExtraGroup) {
+  const char kTrialName[] = "SetForcedDefaultWithExtraGroup";
+  ASSERT_FALSE(FieldTrialList::TrialExists(kTrialName));
+
+  int default_group = -1;
+  scoped_refptr<FieldTrial> trial =
+      CreateFieldTrial(kTrialName, 100, kDefaultGroupName, &default_group);
+  trial->SetForced();
+
+  trial = CreateFieldTrial(kTrialName, 100, kDefaultGroupName, nullptr);
+  const int extra_group = trial->AppendGroup("Extra", 100);
+  EXPECT_EQ(default_group, trial->group());
+  EXPECT_NE(extra_group, trial->group());
+  EXPECT_EQ(kDefaultGroupName, trial->group_name());
+}
+
+TEST_F(FieldTrialTest, SetForcedTurnFeatureOn) {
+  const char kTrialName[] = "SetForcedTurnFeatureOn";
+  const char kExtraGroupName[] = "Extra";
+  ASSERT_FALSE(FieldTrialList::TrialExists(kTrialName));
+
+  // Simulate a server-side (forced) config that turns the feature on when the
+  // original hard-coded config had it disabled.
+  scoped_refptr<FieldTrial> forced_trial =
+      CreateFieldTrial(kTrialName, 100, kDefaultGroupName, nullptr);
+  forced_trial->AppendGroup(kExtraGroupName, 100);
+  forced_trial->SetForced();
+
+  int default_group = -1;
+  scoped_refptr<FieldTrial> client_trial =
+      CreateFieldTrial(kTrialName, 100, kDefaultGroupName, &default_group);
+  const int extra_group = client_trial->AppendGroup(kExtraGroupName, 0);
+  EXPECT_NE(default_group, extra_group);
+
+  EXPECT_FALSE(client_trial->group_reported_);
+  EXPECT_EQ(extra_group, client_trial->group());
+  EXPECT_TRUE(client_trial->group_reported_);
+  EXPECT_EQ(kExtraGroupName, client_trial->group_name());
+}
+
+TEST_F(FieldTrialTest, SetForcedTurnFeatureOff) {
+  const char kTrialName[] = "SetForcedTurnFeatureOff";
+  const char kExtraGroupName[] = "Extra";
+  ASSERT_FALSE(FieldTrialList::TrialExists(kTrialName));
+
+  // Simulate a server-side (forced) config that turns the feature off when the
+  // original hard-coded config had it enabled.
+  scoped_refptr<FieldTrial> forced_trial =
+      CreateFieldTrial(kTrialName, 100, kDefaultGroupName, nullptr);
+  forced_trial->AppendGroup(kExtraGroupName, 0);
+  forced_trial->SetForced();
+
+  int default_group = -1;
+  scoped_refptr<FieldTrial> client_trial =
+      CreateFieldTrial(kTrialName, 100, kDefaultGroupName, &default_group);
+  const int extra_group = client_trial->AppendGroup(kExtraGroupName, 100);
+  EXPECT_NE(default_group, extra_group);
+
+  EXPECT_FALSE(client_trial->group_reported_);
+  EXPECT_EQ(default_group, client_trial->group());
+  EXPECT_TRUE(client_trial->group_reported_);
+  EXPECT_EQ(kDefaultGroupName, client_trial->group_name());
+}
+
+TEST_F(FieldTrialTest, SetForcedChangeDefault_Default) {
+  const char kTrialName[] = "SetForcedDefaultGroupChange";
+  const char kGroupAName[] = "A";
+  const char kGroupBName[] = "B";
+  ASSERT_FALSE(FieldTrialList::TrialExists(kTrialName));
+
+  // Simulate a server-side (forced) config that switches which group is default
+  // and ensures that the non-forced code receives the correct group numbers.
+  scoped_refptr<FieldTrial> forced_trial =
+      CreateFieldTrial(kTrialName, 100, kGroupAName, nullptr);
+  forced_trial->AppendGroup(kGroupBName, 100);
+  forced_trial->SetForced();
+
+  int default_group = -1;
+  scoped_refptr<FieldTrial> client_trial =
+      CreateFieldTrial(kTrialName, 100, kGroupBName, &default_group);
+  const int extra_group = client_trial->AppendGroup(kGroupAName, 50);
+  EXPECT_NE(default_group, extra_group);
+
+  EXPECT_FALSE(client_trial->group_reported_);
+  EXPECT_EQ(default_group, client_trial->group());
+  EXPECT_TRUE(client_trial->group_reported_);
+  EXPECT_EQ(kGroupBName, client_trial->group_name());
+}
+
+TEST_F(FieldTrialTest, SetForcedChangeDefault_NonDefault) {
+  const char kTrialName[] = "SetForcedDefaultGroupChange";
+  const char kGroupAName[] = "A";
+  const char kGroupBName[] = "B";
+  ASSERT_FALSE(FieldTrialList::TrialExists(kTrialName));
+
+  // Simulate a server-side (forced) config that switches which group is default
+  // and ensures that the non-forced code receives the correct group numbers.
+  scoped_refptr<FieldTrial> forced_trial =
+      CreateFieldTrial(kTrialName, 100, kGroupAName, nullptr);
+  forced_trial->AppendGroup(kGroupBName, 0);
+  forced_trial->SetForced();
+
+  int default_group = -1;
+  scoped_refptr<FieldTrial> client_trial =
+      CreateFieldTrial(kTrialName, 100, kGroupBName, &default_group);
+  const int extra_group = client_trial->AppendGroup(kGroupAName, 50);
+  EXPECT_NE(default_group, extra_group);
+
+  EXPECT_FALSE(client_trial->group_reported_);
+  EXPECT_EQ(extra_group, client_trial->group());
+  EXPECT_TRUE(client_trial->group_reported_);
+  EXPECT_EQ(kGroupAName, client_trial->group_name());
+}
+
+TEST_F(FieldTrialTest, Observe) {
+  const char kTrialName[] = "TrialToObserve1";
+  const char kSecondaryGroupName[] = "SecondaryGroup";
+
+  TestFieldTrialObserver observer(TestFieldTrialObserver::ASYNCHRONOUS);
+  int default_group = -1;
+  scoped_refptr<FieldTrial> trial =
+      CreateFieldTrial(kTrialName, 100, kDefaultGroupName, &default_group);
+  const int secondary_group = trial->AppendGroup(kSecondaryGroupName, 50);
+  const int chosen_group = trial->group();
+  EXPECT_TRUE(chosen_group == default_group || chosen_group == secondary_group);
+
+  // Observers are called asynchronously.
+  EXPECT_TRUE(observer.trial_name().empty());
+  EXPECT_TRUE(observer.group_name().empty());
+  RunLoop().RunUntilIdle();
+
+  EXPECT_EQ(kTrialName, observer.trial_name());
+  if (chosen_group == default_group)
+    EXPECT_EQ(kDefaultGroupName, observer.group_name());
+  else
+    EXPECT_EQ(kSecondaryGroupName, observer.group_name());
+}
+
+TEST_F(FieldTrialTest, SynchronousObserver) {
+  const char kTrialName[] = "TrialToObserve1";
+  const char kSecondaryGroupName[] = "SecondaryGroup";
+
+  TestFieldTrialObserver observer(TestFieldTrialObserver::SYNCHRONOUS);
+  int default_group = -1;
+  scoped_refptr<FieldTrial> trial =
+      CreateFieldTrial(kTrialName, 100, kDefaultGroupName, &default_group);
+  const int secondary_group = trial->AppendGroup(kSecondaryGroupName, 50);
+  const int chosen_group = trial->group();
+  EXPECT_TRUE(chosen_group == default_group || chosen_group == secondary_group);
+
+  // The observer should be notified synchronously by the group() call.
+  EXPECT_EQ(kTrialName, observer.trial_name());
+  if (chosen_group == default_group)
+    EXPECT_EQ(kDefaultGroupName, observer.group_name());
+  else
+    EXPECT_EQ(kSecondaryGroupName, observer.group_name());
+}
+
+TEST_F(FieldTrialTest, ObserveDisabled) {
+  const char kTrialName[] = "TrialToObserve2";
+
+  TestFieldTrialObserver observer(TestFieldTrialObserver::ASYNCHRONOUS);
+  int default_group = -1;
+  scoped_refptr<FieldTrial> trial =
+      CreateFieldTrial(kTrialName, 100, kDefaultGroupName, &default_group);
+  trial->AppendGroup("A", 25);
+  trial->AppendGroup("B", 25);
+  trial->AppendGroup("C", 25);
+  trial->Disable();
+
+  // Observer shouldn't be notified of a disabled trial.
+  RunLoop().RunUntilIdle();
+  EXPECT_TRUE(observer.trial_name().empty());
+  EXPECT_TRUE(observer.group_name().empty());
+
+  // Observer shouldn't be notified even after a |group()| call.
+  EXPECT_EQ(default_group, trial->group());
+  RunLoop().RunUntilIdle();
+  EXPECT_TRUE(observer.trial_name().empty());
+  EXPECT_TRUE(observer.group_name().empty());
+}
+
+TEST_F(FieldTrialTest, ObserveForcedDisabled) {
+  const char kTrialName[] = "TrialToObserve3";
+
+  TestFieldTrialObserver observer(TestFieldTrialObserver::ASYNCHRONOUS);
+  int default_group = -1;
+  scoped_refptr<FieldTrial> trial =
+      CreateFieldTrial(kTrialName, 100, kDefaultGroupName, &default_group);
+  trial->AppendGroup("A", 25);
+  trial->AppendGroup("B", 25);
+  trial->AppendGroup("C", 25);
+  trial->SetForced();
+  trial->Disable();
+
+  // Observer shouldn't be notified of a disabled trial, even when forced.
+  RunLoop().RunUntilIdle();
+  EXPECT_TRUE(observer.trial_name().empty());
+  EXPECT_TRUE(observer.group_name().empty());
+
+  // Observer shouldn't be notified even after a |group()| call.
+  EXPECT_EQ(default_group, trial->group());
+  RunLoop().RunUntilIdle();
+  EXPECT_TRUE(observer.trial_name().empty());
+  EXPECT_TRUE(observer.group_name().empty());
+}
+
+TEST_F(FieldTrialTest, DisabledTrialNotActive) {
+  const char kTrialName[] = "DisabledTrial";
+  ASSERT_FALSE(FieldTrialList::TrialExists(kTrialName));
+
+  scoped_refptr<FieldTrial> trial =
+      CreateFieldTrial(kTrialName, 100, kDefaultGroupName, nullptr);
+  trial->AppendGroup("X", 50);
+  trial->Disable();
+
+  // Ensure the trial is not listed as active.
+  FieldTrial::ActiveGroups active_groups;
+  FieldTrialList::GetActiveFieldTrialGroups(&active_groups);
+  EXPECT_TRUE(active_groups.empty());
+
+  // Ensure the trial is not listed in the |StatesToString()| result.
+  std::string states;
+  FieldTrialList::StatesToString(&states);
+  EXPECT_TRUE(states.empty());
+}
+
+TEST_F(FieldTrialTest, ExpirationYearNotExpired) {
+  const char kTrialName[] = "NotExpired";
+  const char kGroupName[] = "Group2";
+  const int kProbability = 100;
+  ASSERT_FALSE(FieldTrialList::TrialExists(kTrialName));
+
+  scoped_refptr<FieldTrial> trial =
+      CreateFieldTrial(kTrialName, kProbability, kDefaultGroupName, nullptr);
+  trial->AppendGroup(kGroupName, kProbability);
+  EXPECT_EQ(kGroupName, trial->group_name());
+}
+
+TEST_F(FieldTrialTest, FloatBoundariesGiveEqualGroupSizes) {
+  const int kBucketCount = 100;
+
+  // Try each boundary value |i / 100.0| as the entropy value.
+  for (int i = 0; i < kBucketCount; ++i) {
+    const double entropy = i / static_cast<double>(kBucketCount);
+
+    scoped_refptr<FieldTrial> trial(
+        new FieldTrial("test", kBucketCount, "default", entropy));
+    for (int j = 0; j < kBucketCount; ++j)
+      trial->AppendGroup(IntToString(j), 1);
+
+    EXPECT_EQ(IntToString(i), trial->group_name());
+  }
+}
+
+TEST_F(FieldTrialTest, DoesNotSurpassTotalProbability) {
+  const double kEntropyValue = 1.0 - 1e-9;
+  ASSERT_LT(kEntropyValue, 1.0);
+
+  scoped_refptr<FieldTrial> trial(
+      new FieldTrial("test", 2, "default", kEntropyValue));
+  trial->AppendGroup("1", 1);
+  trial->AppendGroup("2", 1);
+
+  EXPECT_EQ("2", trial->group_name());
+}
+
+TEST_F(FieldTrialTest, CreateSimulatedFieldTrial) {
+  const char kTrialName[] = "CreateSimulatedFieldTrial";
+  ASSERT_FALSE(FieldTrialList::TrialExists(kTrialName));
+
+  // Different cases to test, e.g. default vs. non default group being chosen.
+  struct {
+    double entropy_value;
+    const char* expected_group;
+  } test_cases[] = {
+    { 0.4, "A" },
+    { 0.85, "B" },
+    { 0.95, kDefaultGroupName },
+  };
+
+  for (size_t i = 0; i < arraysize(test_cases); ++i) {
+    TestFieldTrialObserver observer(TestFieldTrialObserver::ASYNCHRONOUS);
+    scoped_refptr<FieldTrial> trial(
+       FieldTrial::CreateSimulatedFieldTrial(kTrialName, 100, kDefaultGroupName,
+                                             test_cases[i].entropy_value));
+    trial->AppendGroup("A", 80);
+    trial->AppendGroup("B", 10);
+    EXPECT_EQ(test_cases[i].expected_group, trial->group_name());
+
+    // Field trial shouldn't have been registered with the list.
+    EXPECT_FALSE(FieldTrialList::TrialExists(kTrialName));
+    EXPECT_EQ(0u, FieldTrialList::GetFieldTrialCount());
+
+    // Observer shouldn't have been notified.
+    RunLoop().RunUntilIdle();
+    EXPECT_TRUE(observer.trial_name().empty());
+
+    // The trial shouldn't be in the active set of trials.
+    FieldTrial::ActiveGroups active_groups;
+    FieldTrialList::GetActiveFieldTrialGroups(&active_groups);
+    EXPECT_TRUE(active_groups.empty());
+
+    // The trial shouldn't be listed in the |StatesToString()| result.
+    std::string states;
+    FieldTrialList::StatesToString(&states);
+    EXPECT_TRUE(states.empty());
+  }
+}
+
+TEST(FieldTrialTestWithoutList, StatesStringFormat) {
+  std::string save_string;
+
+  // Scoping the first FieldTrialList, as we need another one to test the
+  // importing function.
+  {
+    FieldTrialList field_trial_list(nullptr);
+    scoped_refptr<FieldTrial> trial =
+        CreateFieldTrial("Abc", 10, "Default some name", nullptr);
+    trial->AppendGroup("cba", 10);
+    trial->group();
+    scoped_refptr<FieldTrial> trial2 =
+        CreateFieldTrial("Xyz", 10, "Default xxx", nullptr);
+    trial2->AppendGroup("zyx", 10);
+    trial2->group();
+    scoped_refptr<FieldTrial> trial3 =
+        CreateFieldTrial("zzz", 10, "default", nullptr);
+
+    FieldTrialList::AllStatesToString(&save_string, false);
+  }
+
+  // Starting with a new blank FieldTrialList.
+  FieldTrialList field_trial_list(nullptr);
+  ASSERT_TRUE(field_trial_list.CreateTrialsFromString(save_string,
+                                                      std::set<std::string>()));
+
+  FieldTrial::ActiveGroups active_groups;
+  field_trial_list.GetActiveFieldTrialGroups(&active_groups);
+  ASSERT_EQ(2U, active_groups.size());
+  EXPECT_EQ("Abc", active_groups[0].trial_name);
+  EXPECT_EQ("cba", active_groups[0].group_name);
+  EXPECT_EQ("Xyz", active_groups[1].trial_name);
+  EXPECT_EQ("zyx", active_groups[1].group_name);
+  EXPECT_TRUE(field_trial_list.TrialExists("zzz"));
+}
+
+TEST(FieldTrialDeathTest, OneTimeRandomizedTrialWithoutFieldTrialList) {
+  // Trying to instantiate a one-time randomized field trial before the
+  // FieldTrialList is created should crash.
+  EXPECT_DEATH_IF_SUPPORTED(
+      FieldTrialList::FactoryGetFieldTrial(
+          "OneTimeRandomizedTrialWithoutFieldTrialList", 100, kDefaultGroupName,
+          FieldTrialList::kNoExpirationYear, 1, 1,
+          FieldTrial::ONE_TIME_RANDOMIZED, nullptr),
+      "");
+}
+
+TEST(FieldTrialListTest, TestCopyFieldTrialStateToFlags) {
+  constexpr char kFieldTrialHandleSwitch[] = "test-field-trial-handle";
+  constexpr char kEnableFeaturesSwitch[] = "test-enable-features";
+  constexpr char kDisableFeaturesSwitch[] = "test-disable-features";
+
+  FieldTrialList field_trial_list(std::make_unique<MockEntropyProvider>());
+
+  std::unique_ptr<FeatureList> feature_list(new FeatureList);
+  feature_list->InitializeFromCommandLine("A,B", "C");
+
+  FieldTrial* trial = FieldTrialList::CreateFieldTrial("Trial1", "Group1");
+  feature_list->RegisterFieldTrialOverride(
+      "MyFeature", FeatureList::OVERRIDE_ENABLE_FEATURE, trial);
+
+  test::ScopedFeatureList scoped_feature_list;
+  scoped_feature_list.InitWithFeatureList(std::move(feature_list));
+
+  FilePath test_file_path = FilePath(FILE_PATH_LITERAL("Program"));
+  CommandLine command_line = CommandLine(test_file_path);
+
+  FieldTrialList::CopyFieldTrialStateToFlags(
+      kFieldTrialHandleSwitch, kEnableFeaturesSwitch, kDisableFeaturesSwitch,
+      &command_line);
+  EXPECT_TRUE(command_line.HasSwitch(kFieldTrialHandleSwitch));
+
+  // Explictly specified enabled/disabled features should be specified.
+  EXPECT_EQ("A,B", command_line.GetSwitchValueASCII(kEnableFeaturesSwitch));
+  EXPECT_EQ("C", command_line.GetSwitchValueASCII(kDisableFeaturesSwitch));
+}
+
+TEST(FieldTrialListTest, InstantiateAllocator) {
+  test::ScopedFeatureList scoped_feature_list;
+  scoped_feature_list.Init();
+
+  FieldTrialList field_trial_list(nullptr);
+  FieldTrialList::CreateFieldTrial("Trial1", "Group1");
+
+  FieldTrialList::InstantiateFieldTrialAllocatorIfNeeded();
+  void* memory = field_trial_list.field_trial_allocator_->shared_memory();
+  size_t used = field_trial_list.field_trial_allocator_->used();
+
+  // Ensure that the function is idempotent.
+  FieldTrialList::InstantiateFieldTrialAllocatorIfNeeded();
+  void* new_memory = field_trial_list.field_trial_allocator_->shared_memory();
+  size_t new_used = field_trial_list.field_trial_allocator_->used();
+  EXPECT_EQ(memory, new_memory);
+  EXPECT_EQ(used, new_used);
+}
+
+TEST(FieldTrialListTest, AddTrialsToAllocator) {
+  std::string save_string;
+  SharedMemoryHandle handle;
+
+  // Scoping the first FieldTrialList, as we need another one to test that it
+  // matches.
+  {
+    test::ScopedFeatureList scoped_feature_list;
+    scoped_feature_list.Init();
+
+    FieldTrialList field_trial_list(nullptr);
+    FieldTrialList::CreateFieldTrial("Trial1", "Group1");
+    FieldTrialList::InstantiateFieldTrialAllocatorIfNeeded();
+    FieldTrialList::AllStatesToString(&save_string, false);
+    handle = SharedMemory::DuplicateHandle(
+        field_trial_list.field_trial_allocator_->shared_memory()->handle());
+  }
+
+  FieldTrialList field_trial_list2(nullptr);
+  std::unique_ptr<SharedMemory> shm(new SharedMemory(handle, true));
+  // 4 KiB is enough to hold the trials only created for this test.
+  shm.get()->Map(4 << 10);
+  FieldTrialList::CreateTrialsFromSharedMemory(std::move(shm));
+  std::string check_string;
+  FieldTrialList::AllStatesToString(&check_string, false);
+  EXPECT_EQ(save_string, check_string);
+}
+
+TEST(FieldTrialListTest, DoNotAddSimulatedFieldTrialsToAllocator) {
+  constexpr char kTrialName[] = "trial";
+  SharedMemoryHandle handle;
+  {
+    test::ScopedFeatureList scoped_feature_list;
+    scoped_feature_list.Init();
+
+    // Create a simulated trial and a real trial and call group() on them, which
+    // should only add the real trial to the field trial allocator.
+    FieldTrialList field_trial_list(nullptr);
+    FieldTrialList::InstantiateFieldTrialAllocatorIfNeeded();
+
+    // This shouldn't add to the allocator.
+    scoped_refptr<FieldTrial> simulated_trial =
+        FieldTrial::CreateSimulatedFieldTrial(kTrialName, 100, "Simulated",
+                                              0.95);
+    simulated_trial->group();
+
+    // This should add to the allocator.
+    FieldTrial* real_trial =
+        FieldTrialList::CreateFieldTrial(kTrialName, "Real");
+    real_trial->group();
+
+    handle = SharedMemory::DuplicateHandle(
+        field_trial_list.field_trial_allocator_->shared_memory()->handle());
+  }
+
+  // Check that there's only one entry in the allocator.
+  FieldTrialList field_trial_list2(nullptr);
+  std::unique_ptr<SharedMemory> shm(new SharedMemory(handle, true));
+  // 4 KiB is enough to hold the trials only created for this test.
+  shm.get()->Map(4 << 10);
+  FieldTrialList::CreateTrialsFromSharedMemory(std::move(shm));
+  std::string check_string;
+  FieldTrialList::AllStatesToString(&check_string, false);
+  ASSERT_EQ(check_string.find("Simulated"), std::string::npos);
+}
+
+TEST(FieldTrialListTest, AssociateFieldTrialParams) {
+  test::ScopedFeatureList scoped_feature_list;
+  scoped_feature_list.Init();
+
+  std::string trial_name("Trial1");
+  std::string group_name("Group1");
+
+  // Create a field trial with some params.
+  FieldTrialList field_trial_list(nullptr);
+  FieldTrialList::CreateFieldTrial(trial_name, group_name);
+  std::map<std::string, std::string> params;
+  params["key1"] = "value1";
+  params["key2"] = "value2";
+  FieldTrialParamAssociator::GetInstance()->AssociateFieldTrialParams(
+      trial_name, group_name, params);
+  FieldTrialList::InstantiateFieldTrialAllocatorIfNeeded();
+
+  // Clear all cached params from the associator.
+  FieldTrialParamAssociator::GetInstance()->ClearAllCachedParamsForTesting();
+  // Check that the params have been cleared from the cache.
+  std::map<std::string, std::string> cached_params;
+  FieldTrialParamAssociator::GetInstance()->GetFieldTrialParamsWithoutFallback(
+      trial_name, group_name, &cached_params);
+  EXPECT_EQ(0U, cached_params.size());
+
+  // Check that we fetch the param from shared memory properly.
+  std::map<std::string, std::string> new_params;
+  FieldTrialParamAssociator::GetInstance()->GetFieldTrialParams(trial_name,
+                                                                &new_params);
+  EXPECT_EQ("value1", new_params["key1"]);
+  EXPECT_EQ("value2", new_params["key2"]);
+  EXPECT_EQ(2U, new_params.size());
+}
+
+TEST(FieldTrialListTest, ClearParamsFromSharedMemory) {
+  std::string trial_name("Trial1");
+  std::string group_name("Group1");
+
+  SharedMemoryHandle handle;
+  {
+    test::ScopedFeatureList scoped_feature_list;
+    scoped_feature_list.Init();
+
+    // Create a field trial with some params.
+    FieldTrialList field_trial_list(nullptr);
+    FieldTrial* trial =
+        FieldTrialList::CreateFieldTrial(trial_name, group_name);
+    std::map<std::string, std::string> params;
+    params["key1"] = "value1";
+    params["key2"] = "value2";
+    FieldTrialParamAssociator::GetInstance()->AssociateFieldTrialParams(
+        trial_name, group_name, params);
+    FieldTrialList::InstantiateFieldTrialAllocatorIfNeeded();
+
+    // Clear all params from the associator AND shared memory. The allocated
+    // segments should be different.
+    FieldTrial::FieldTrialRef old_ref = trial->ref_;
+    FieldTrialParamAssociator::GetInstance()->ClearAllParamsForTesting();
+    FieldTrial::FieldTrialRef new_ref = trial->ref_;
+    EXPECT_NE(old_ref, new_ref);
+
+    // Check that there are no params associated with the field trial anymore.
+    std::map<std::string, std::string> new_params;
+    FieldTrialParamAssociator::GetInstance()->GetFieldTrialParams(trial_name,
+                                                                  &new_params);
+    EXPECT_EQ(0U, new_params.size());
+
+    // Now duplicate the handle so we can easily check that the trial is still
+    // in shared memory via AllStatesToString.
+    handle = SharedMemory::DuplicateHandle(
+        field_trial_list.field_trial_allocator_->shared_memory()->handle());
+  }
+
+  // Check that we have the trial.
+  FieldTrialList field_trial_list2(nullptr);
+  std::unique_ptr<SharedMemory> shm(new SharedMemory(handle, true));
+  // 4 KiB is enough to hold the trials only created for this test.
+  shm.get()->Map(4 << 10);
+  FieldTrialList::CreateTrialsFromSharedMemory(std::move(shm));
+  std::string check_string;
+  FieldTrialList::AllStatesToString(&check_string, false);
+  EXPECT_EQ("*Trial1/Group1/", check_string);
+}
+
+TEST(FieldTrialListTest, DumpAndFetchFromSharedMemory) {
+  std::string trial_name("Trial1");
+  std::string group_name("Group1");
+
+  // Create a field trial with some params.
+  FieldTrialList field_trial_list(nullptr);
+  FieldTrialList::CreateFieldTrial(trial_name, group_name);
+  std::map<std::string, std::string> params;
+  params["key1"] = "value1";
+  params["key2"] = "value2";
+  FieldTrialParamAssociator::GetInstance()->AssociateFieldTrialParams(
+      trial_name, group_name, params);
+
+  std::unique_ptr<SharedMemory> shm(new SharedMemory());
+  // 4 KiB is enough to hold the trials only created for this test.
+  shm.get()->CreateAndMapAnonymous(4 << 10);
+  // We _could_ use PersistentMemoryAllocator, this just has less params.
+  SharedPersistentMemoryAllocator allocator(std::move(shm), 1, "", false);
+
+  // Dump and subsequently retrieve the field trial to |allocator|.
+  FieldTrialList::DumpAllFieldTrialsToPersistentAllocator(&allocator);
+  std::vector<const FieldTrial::FieldTrialEntry*> entries =
+      FieldTrialList::GetAllFieldTrialsFromPersistentAllocator(allocator);
+
+  // Check that we have the entry we put in.
+  EXPECT_EQ(1u, entries.size());
+  const FieldTrial::FieldTrialEntry* entry = entries[0];
+
+  // Check that the trial and group names match.
+  StringPiece shm_trial_name;
+  StringPiece shm_group_name;
+  entry->GetTrialAndGroupName(&shm_trial_name, &shm_group_name);
+  EXPECT_EQ(trial_name, shm_trial_name);
+  EXPECT_EQ(group_name, shm_group_name);
+
+  // Check that the params match.
+  std::map<std::string, std::string> shm_params;
+  entry->GetParams(&shm_params);
+  EXPECT_EQ(2u, shm_params.size());
+  EXPECT_EQ("value1", shm_params["key1"]);
+  EXPECT_EQ("value2", shm_params["key2"]);
+}
+
+#if !defined(OS_NACL)
+TEST(FieldTrialListTest, SerializeSharedMemoryHandleMetadata) {
+  std::unique_ptr<SharedMemory> shm(new SharedMemory());
+  shm->CreateAndMapAnonymous(4 << 10);
+
+  std::string serialized =
+      FieldTrialList::SerializeSharedMemoryHandleMetadata(shm->handle());
+#if defined(OS_WIN) || defined(OS_FUCHSIA)
+  SharedMemoryHandle deserialized =
+      FieldTrialList::DeserializeSharedMemoryHandleMetadata(serialized);
+#else
+  // Use a valid-looking arbitrary number for the file descriptor. It's not
+  // being used in this unittest, but needs to pass sanity checks in the
+  // handle's constructor.
+  SharedMemoryHandle deserialized =
+      FieldTrialList::DeserializeSharedMemoryHandleMetadata(42, serialized);
+#endif
+  EXPECT_EQ(deserialized.GetGUID(), shm->handle().GetGUID());
+  EXPECT_FALSE(deserialized.GetGUID().is_empty());
+}
+#endif  // !defined(OS_NACL)
+
+// Verify that the field trial shared memory handle is really read-only, and
+// does not allow writable mappings. Test disabled on NaCl, Windows and Fuchsia
+// which don't support/implement GetFieldTrialHandle(). For Fuchsia, see
+// crbug.com/752368
+#if !defined(OS_NACL) && !defined(OS_WIN) && !defined(OS_FUCHSIA)
+TEST(FieldTrialListTest, CheckReadOnlySharedMemoryHandle) {
+  FieldTrialList field_trial_list(nullptr);
+  FieldTrialList::CreateFieldTrial("Trial1", "Group1");
+
+  test::ScopedFeatureList scoped_feature_list;
+  scoped_feature_list.Init();
+
+  FieldTrialList::InstantiateFieldTrialAllocatorIfNeeded();
+
+  SharedMemoryHandle handle = FieldTrialList::GetFieldTrialHandle();
+  ASSERT_TRUE(handle.IsValid());
+
+  ASSERT_TRUE(CheckReadOnlySharedMemoryHandleForTesting(handle));
+}
+#endif  // !OS_NACL && !OS_WIN && !OS_FUCHSIA
+
+TEST_F(FieldTrialTest, TestAllParamsToString) {
+  std::string exptected_output = "t1.g1:p1/v1/p2/v2";
+
+  // Create study with one group and two params.
+  std::map<std::string, std::string> params;
+  params["p1"] = "v1";
+  params["p2"] = "v2";
+  FieldTrialParamAssociator::GetInstance()->AssociateFieldTrialParams(
+      "t1", "g1", params);
+  EXPECT_EQ(
+      "", FieldTrialList::AllParamsToString(false, &MockEscapeQueryParamValue));
+
+  scoped_refptr<FieldTrial> trial1 =
+      CreateFieldTrial("t1", 100, "Default", nullptr);
+  trial1->AppendGroup("g1", 100);
+  trial1->group();
+  EXPECT_EQ(exptected_output, FieldTrialList::AllParamsToString(
+                                  false, &MockEscapeQueryParamValue));
+
+  // Create study with two groups and params that don't belog to the assigned
+  // group. This should be in the output.
+  FieldTrialParamAssociator::GetInstance()->AssociateFieldTrialParams(
+      "t2", "g2", params);
+  scoped_refptr<FieldTrial> trial2 =
+      CreateFieldTrial("t2", 100, "Default", nullptr);
+  trial2->AppendGroup("g1", 100);
+  trial2->AppendGroup("g2", 0);
+  trial2->group();
+  EXPECT_EQ(exptected_output, FieldTrialList::AllParamsToString(
+                                  false, &MockEscapeQueryParamValue));
+}
+
+}  // namespace base
diff --git a/base/metrics/histogram.cc b/base/metrics/histogram.cc
new file mode 100644
index 0000000..f765181
--- /dev/null
+++ b/base/metrics/histogram.cc
@@ -0,0 +1,1315 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Histogram is an object that aggregates statistics, and can summarize them in
+// various forms, including ASCII graphical, HTML, and numerically (as a
+// vector of numbers corresponding to each of the aggregating buckets).
+// See header file for details and examples.
+
+#include "base/metrics/histogram.h"
+
+#include <inttypes.h>
+#include <limits.h>
+#include <math.h>
+
+#include <algorithm>
+#include <string>
+#include <utility>
+
+#include "base/compiler_specific.h"
+#include "base/debug/alias.h"
+#include "base/logging.h"
+#include "base/memory/ptr_util.h"
+#include "base/metrics/dummy_histogram.h"
+#include "base/metrics/histogram_functions.h"
+#include "base/metrics/metrics_hashes.h"
+#include "base/metrics/persistent_histogram_allocator.h"
+#include "base/metrics/persistent_memory_allocator.h"
+#include "base/metrics/sample_vector.h"
+#include "base/metrics/statistics_recorder.h"
+#include "base/pickle.h"
+#include "base/strings/string_util.h"
+#include "base/strings/stringprintf.h"
+#include "base/synchronization/lock.h"
+#include "base/sys_info.h"
+#include "base/values.h"
+#include "build/build_config.h"
+
+namespace base {
+
+namespace {
+
+bool ReadHistogramArguments(PickleIterator* iter,
+                            std::string* histogram_name,
+                            int* flags,
+                            int* declared_min,
+                            int* declared_max,
+                            uint32_t* bucket_count,
+                            uint32_t* range_checksum) {
+  if (!iter->ReadString(histogram_name) ||
+      !iter->ReadInt(flags) ||
+      !iter->ReadInt(declared_min) ||
+      !iter->ReadInt(declared_max) ||
+      !iter->ReadUInt32(bucket_count) ||
+      !iter->ReadUInt32(range_checksum)) {
+    DLOG(ERROR) << "Pickle error decoding Histogram: " << *histogram_name;
+    return false;
+  }
+
+  // Since these fields may have come from an untrusted renderer, do additional
+  // checks above and beyond those in Histogram::Initialize()
+  if (*declared_max <= 0 ||
+      *declared_min <= 0 ||
+      *declared_max < *declared_min ||
+      INT_MAX / sizeof(HistogramBase::Count) <= *bucket_count ||
+      *bucket_count < 2) {
+    DLOG(ERROR) << "Values error decoding Histogram: " << histogram_name;
+    return false;
+  }
+
+  // We use the arguments to find or create the local version of the histogram
+  // in this process, so we need to clear any IPC flag.
+  *flags &= ~HistogramBase::kIPCSerializationSourceFlag;
+
+  return true;
+}
+
+bool ValidateRangeChecksum(const HistogramBase& histogram,
+                           uint32_t range_checksum) {
+  // Normally, |histogram| should have type HISTOGRAM or be inherited from it.
+  // However, if it's expired, it will actually be a DUMMY_HISTOGRAM.
+  // Skip the checks in that case.
+  if (histogram.GetHistogramType() == DUMMY_HISTOGRAM)
+    return true;
+  const Histogram& casted_histogram =
+      static_cast<const Histogram&>(histogram);
+
+  return casted_histogram.bucket_ranges()->checksum() == range_checksum;
+}
+
+}  // namespace
+
+typedef HistogramBase::Count Count;
+typedef HistogramBase::Sample Sample;
+
+// static
+const uint32_t Histogram::kBucketCount_MAX = 16384u;
+
+class Histogram::Factory {
+ public:
+  Factory(const std::string& name,
+          HistogramBase::Sample minimum,
+          HistogramBase::Sample maximum,
+          uint32_t bucket_count,
+          int32_t flags)
+    : Factory(name, HISTOGRAM, minimum, maximum, bucket_count, flags) {}
+
+  // Create histogram based on construction parameters. Caller takes
+  // ownership of the returned object.
+  HistogramBase* Build();
+
+ protected:
+  Factory(const std::string& name,
+          HistogramType histogram_type,
+          HistogramBase::Sample minimum,
+          HistogramBase::Sample maximum,
+          uint32_t bucket_count,
+          int32_t flags)
+    : name_(name),
+      histogram_type_(histogram_type),
+      minimum_(minimum),
+      maximum_(maximum),
+      bucket_count_(bucket_count),
+      flags_(flags) {}
+
+  // Create a BucketRanges structure appropriate for this histogram.
+  virtual BucketRanges* CreateRanges() {
+    BucketRanges* ranges = new BucketRanges(bucket_count_ + 1);
+    Histogram::InitializeBucketRanges(minimum_, maximum_, ranges);
+    return ranges;
+  }
+
+  // Allocate the correct Histogram object off the heap (in case persistent
+  // memory is not available).
+  virtual std::unique_ptr<HistogramBase> HeapAlloc(const BucketRanges* ranges) {
+    return WrapUnique(
+        new Histogram(GetPermanentName(name_), minimum_, maximum_, ranges));
+  }
+
+  // Perform any required datafill on the just-created histogram.  If
+  // overridden, be sure to call the "super" version -- this method may not
+  // always remain empty.
+  virtual void FillHistogram(HistogramBase* histogram) {}
+
+  // These values are protected (instead of private) because they need to
+  // be accessible to methods of sub-classes in order to avoid passing
+  // unnecessary parameters everywhere.
+  const std::string& name_;
+  const HistogramType histogram_type_;
+  HistogramBase::Sample minimum_;
+  HistogramBase::Sample maximum_;
+  uint32_t bucket_count_;
+  int32_t flags_;
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(Factory);
+};
+
+HistogramBase* Histogram::Factory::Build() {
+  HistogramBase* histogram = StatisticsRecorder::FindHistogram(name_);
+  if (!histogram) {
+    // TODO(gayane): |HashMetricName()| is called again in Histogram
+    // constructor. Refactor code to avoid the additional call.
+    bool should_record =
+        StatisticsRecorder::ShouldRecordHistogram(HashMetricName(name_));
+    if (!should_record)
+      return DummyHistogram::GetInstance();
+    // To avoid racy destruction at shutdown, the following will be leaked.
+    const BucketRanges* created_ranges = CreateRanges();
+    const BucketRanges* registered_ranges =
+        StatisticsRecorder::RegisterOrDeleteDuplicateRanges(created_ranges);
+
+    // In most cases, the bucket-count, minimum, and maximum values are known
+    // when the code is written and so are passed in explicitly. In other
+    // cases (such as with a CustomHistogram), they are calculated dynamically
+    // at run-time. In the latter case, those ctor parameters are zero and
+    // the results extracted from the result of CreateRanges().
+    if (bucket_count_ == 0) {
+      bucket_count_ = static_cast<uint32_t>(registered_ranges->bucket_count());
+      minimum_ = registered_ranges->range(1);
+      maximum_ = registered_ranges->range(bucket_count_ - 1);
+    }
+    DCHECK_EQ(minimum_, registered_ranges->range(1));
+    DCHECK_EQ(maximum_, registered_ranges->range(bucket_count_ - 1));
+
+    // Try to create the histogram using a "persistent" allocator. As of
+    // 2016-02-25, the availability of such is controlled by a base::Feature
+    // that is off by default. If the allocator doesn't exist or if
+    // allocating from it fails, code below will allocate the histogram from
+    // the process heap.
+    PersistentHistogramAllocator::Reference histogram_ref = 0;
+    std::unique_ptr<HistogramBase> tentative_histogram;
+    PersistentHistogramAllocator* allocator = GlobalHistogramAllocator::Get();
+    if (allocator) {
+      tentative_histogram = allocator->AllocateHistogram(
+          histogram_type_,
+          name_,
+          minimum_,
+          maximum_,
+          registered_ranges,
+          flags_,
+          &histogram_ref);
+    }
+
+    // Handle the case where no persistent allocator is present or the
+    // persistent allocation fails (perhaps because it is full).
+    if (!tentative_histogram) {
+      DCHECK(!histogram_ref);  // Should never have been set.
+      DCHECK(!allocator);  // Shouldn't have failed.
+      flags_ &= ~HistogramBase::kIsPersistent;
+      tentative_histogram = HeapAlloc(registered_ranges);
+      tentative_histogram->SetFlags(flags_);
+    }
+
+    FillHistogram(tentative_histogram.get());
+
+    // Register this histogram with the StatisticsRecorder. Keep a copy of
+    // the pointer value to tell later whether the locally created histogram
+    // was registered or deleted. The type is "void" because it could point
+    // to released memory after the following line.
+    const void* tentative_histogram_ptr = tentative_histogram.get();
+    histogram = StatisticsRecorder::RegisterOrDeleteDuplicate(
+        tentative_histogram.release());
+
+    // Persistent histograms need some follow-up processing.
+    if (histogram_ref) {
+      allocator->FinalizeHistogram(histogram_ref,
+                                   histogram == tentative_histogram_ptr);
+    }
+  }
+
+  if (histogram_type_ != histogram->GetHistogramType() ||
+      (bucket_count_ != 0 && !histogram->HasConstructionArguments(
+                                 minimum_, maximum_, bucket_count_))) {
+    // The construction arguments do not match the existing histogram.  This can
+    // come about if an extension updates in the middle of a chrome run and has
+    // changed one of them, or simply by bad code within Chrome itself.  A NULL
+    // return would cause Chrome to crash; better to just record it for later
+    // analysis.
+    UmaHistogramSparse("Histogram.MismatchedConstructionArguments",
+                       static_cast<Sample>(HashMetricName(name_)));
+    DLOG(ERROR) << "Histogram " << name_
+                << " has mismatched construction arguments";
+    return DummyHistogram::GetInstance();
+  }
+  return histogram;
+}
+
+HistogramBase* Histogram::FactoryGet(const std::string& name,
+                                     Sample minimum,
+                                     Sample maximum,
+                                     uint32_t bucket_count,
+                                     int32_t flags) {
+  bool valid_arguments =
+      InspectConstructionArguments(name, &minimum, &maximum, &bucket_count);
+  DCHECK(valid_arguments);
+
+  return Factory(name, minimum, maximum, bucket_count, flags).Build();
+}
+
+HistogramBase* Histogram::FactoryTimeGet(const std::string& name,
+                                         TimeDelta minimum,
+                                         TimeDelta maximum,
+                                         uint32_t bucket_count,
+                                         int32_t flags) {
+  return FactoryGet(name, static_cast<Sample>(minimum.InMilliseconds()),
+                    static_cast<Sample>(maximum.InMilliseconds()), bucket_count,
+                    flags);
+}
+
+HistogramBase* Histogram::FactoryMicrosecondsTimeGet(const std::string& name,
+                                                     TimeDelta minimum,
+                                                     TimeDelta maximum,
+                                                     uint32_t bucket_count,
+                                                     int32_t flags) {
+  return FactoryGet(name, static_cast<Sample>(minimum.InMicroseconds()),
+                    static_cast<Sample>(maximum.InMicroseconds()), bucket_count,
+                    flags);
+}
+
+HistogramBase* Histogram::FactoryGet(const char* name,
+                                     Sample minimum,
+                                     Sample maximum,
+                                     uint32_t bucket_count,
+                                     int32_t flags) {
+  return FactoryGet(std::string(name), minimum, maximum, bucket_count, flags);
+}
+
+HistogramBase* Histogram::FactoryTimeGet(const char* name,
+                                         TimeDelta minimum,
+                                         TimeDelta maximum,
+                                         uint32_t bucket_count,
+                                         int32_t flags) {
+  return FactoryTimeGet(std::string(name), minimum, maximum, bucket_count,
+                        flags);
+}
+
+HistogramBase* Histogram::FactoryMicrosecondsTimeGet(const char* name,
+                                                     TimeDelta minimum,
+                                                     TimeDelta maximum,
+                                                     uint32_t bucket_count,
+                                                     int32_t flags) {
+  return FactoryMicrosecondsTimeGet(std::string(name), minimum, maximum,
+                                    bucket_count, flags);
+}
+
+std::unique_ptr<HistogramBase> Histogram::PersistentCreate(
+    const char* name,
+    Sample minimum,
+    Sample maximum,
+    const BucketRanges* ranges,
+    const DelayedPersistentAllocation& counts,
+    const DelayedPersistentAllocation& logged_counts,
+    HistogramSamples::Metadata* meta,
+    HistogramSamples::Metadata* logged_meta) {
+  return WrapUnique(new Histogram(name, minimum, maximum, ranges, counts,
+                                  logged_counts, meta, logged_meta));
+}
+
+// Calculate what range of values are held in each bucket.
+// We have to be careful that we don't pick a ratio between starting points in
+// consecutive buckets that is sooo small, that the integer bounds are the same
+// (effectively making one bucket get no values).  We need to avoid:
+//   ranges(i) == ranges(i + 1)
+// To avoid that, we just do a fine-grained bucket width as far as we need to
+// until we get a ratio that moves us along at least 2 units at a time.  From
+// that bucket onward we do use the exponential growth of buckets.
+//
+// static
+void Histogram::InitializeBucketRanges(Sample minimum,
+                                       Sample maximum,
+                                       BucketRanges* ranges) {
+  double log_max = log(static_cast<double>(maximum));
+  double log_ratio;
+  double log_next;
+  size_t bucket_index = 1;
+  Sample current = minimum;
+  ranges->set_range(bucket_index, current);
+  size_t bucket_count = ranges->bucket_count();
+  while (bucket_count > ++bucket_index) {
+    double log_current;
+    log_current = log(static_cast<double>(current));
+    // Calculate the count'th root of the range.
+    log_ratio = (log_max - log_current) / (bucket_count - bucket_index);
+    // See where the next bucket would start.
+    log_next = log_current + log_ratio;
+    Sample next;
+    next = static_cast<int>(std::round(exp(log_next)));
+    if (next > current)
+      current = next;
+    else
+      ++current;  // Just do a narrow bucket, and keep trying.
+    ranges->set_range(bucket_index, current);
+  }
+  ranges->set_range(ranges->bucket_count(), HistogramBase::kSampleType_MAX);
+  ranges->ResetChecksum();
+}
+
+// static
+const int Histogram::kCommonRaceBasedCountMismatch = 5;
+
+uint32_t Histogram::FindCorruption(const HistogramSamples& samples) const {
+  int inconsistencies = NO_INCONSISTENCIES;
+  Sample previous_range = -1;  // Bottom range is always 0.
+  for (uint32_t index = 0; index < bucket_count(); ++index) {
+    int new_range = ranges(index);
+    if (previous_range >= new_range)
+      inconsistencies |= BUCKET_ORDER_ERROR;
+    previous_range = new_range;
+  }
+
+  if (!bucket_ranges()->HasValidChecksum())
+    inconsistencies |= RANGE_CHECKSUM_ERROR;
+
+  int64_t delta64 = samples.redundant_count() - samples.TotalCount();
+  if (delta64 != 0) {
+    int delta = static_cast<int>(delta64);
+    if (delta != delta64)
+      delta = INT_MAX;  // Flag all giant errors as INT_MAX.
+    if (delta > 0) {
+      if (delta > kCommonRaceBasedCountMismatch)
+        inconsistencies |= COUNT_HIGH_ERROR;
+    } else {
+      DCHECK_GT(0, delta);
+      if (-delta > kCommonRaceBasedCountMismatch)
+        inconsistencies |= COUNT_LOW_ERROR;
+    }
+  }
+  return inconsistencies;
+}
+
+const BucketRanges* Histogram::bucket_ranges() const {
+  return unlogged_samples_->bucket_ranges();
+}
+
+Sample Histogram::declared_min() const {
+  const BucketRanges* ranges = bucket_ranges();
+  if (ranges->bucket_count() < 2)
+    return -1;
+  return ranges->range(1);
+}
+
+Sample Histogram::declared_max() const {
+  const BucketRanges* ranges = bucket_ranges();
+  if (ranges->bucket_count() < 2)
+    return -1;
+  return ranges->range(ranges->bucket_count() - 1);
+}
+
+Sample Histogram::ranges(uint32_t i) const {
+  return bucket_ranges()->range(i);
+}
+
+uint32_t Histogram::bucket_count() const {
+  return static_cast<uint32_t>(bucket_ranges()->bucket_count());
+}
+
+// static
+bool Histogram::InspectConstructionArguments(StringPiece name,
+                                             Sample* minimum,
+                                             Sample* maximum,
+                                             uint32_t* bucket_count) {
+  // Defensive code for backward compatibility.
+  if (*minimum < 1) {
+    DVLOG(1) << "Histogram: " << name << " has bad minimum: " << *minimum;
+    *minimum = 1;
+  }
+  if (*maximum >= kSampleType_MAX) {
+    DVLOG(1) << "Histogram: " << name << " has bad maximum: " << *maximum;
+    *maximum = kSampleType_MAX - 1;
+  }
+  if (*bucket_count >= kBucketCount_MAX) {
+    DVLOG(1) << "Histogram: " << name << " has bad bucket_count: "
+             << *bucket_count;
+    *bucket_count = kBucketCount_MAX - 1;
+  }
+
+  bool check_okay = true;
+
+  if (*minimum > *maximum) {
+    check_okay = false;
+    std::swap(*minimum, *maximum);
+  }
+  if (*maximum == *minimum) {
+    check_okay = false;
+    *maximum = *minimum + 1;
+  }
+  if (*bucket_count < 3) {
+    check_okay = false;
+    *bucket_count = 3;
+  }
+  // Very high bucket counts are wasteful. Use a sparse histogram instead.
+  // Value of 10002 equals a user-supplied value of 10k + 2 overflow buckets.
+  constexpr uint32_t kMaxBucketCount = 10002;
+  if (*bucket_count > kMaxBucketCount) {
+    check_okay = false;
+    *bucket_count = kMaxBucketCount;
+  }
+  if (*bucket_count > static_cast<uint32_t>(*maximum - *minimum + 2)) {
+    check_okay = false;
+    *bucket_count = static_cast<uint32_t>(*maximum - *minimum + 2);
+  }
+
+  if (!check_okay) {
+    UmaHistogramSparse("Histogram.BadConstructionArguments",
+                       static_cast<Sample>(HashMetricName(name)));
+  }
+
+  return check_okay;
+}
+
+uint64_t Histogram::name_hash() const {
+  return unlogged_samples_->id();
+}
+
+HistogramType Histogram::GetHistogramType() const {
+  return HISTOGRAM;
+}
+
+bool Histogram::HasConstructionArguments(Sample expected_minimum,
+                                         Sample expected_maximum,
+                                         uint32_t expected_bucket_count) const {
+  return (expected_bucket_count == bucket_count() &&
+          expected_minimum == declared_min() &&
+          expected_maximum == declared_max());
+}
+
+void Histogram::Add(int value) {
+  AddCount(value, 1);
+}
+
+void Histogram::AddCount(int value, int count) {
+  DCHECK_EQ(0, ranges(0));
+  DCHECK_EQ(kSampleType_MAX, ranges(bucket_count()));
+
+  if (value > kSampleType_MAX - 1)
+    value = kSampleType_MAX - 1;
+  if (value < 0)
+    value = 0;
+  if (count <= 0) {
+    NOTREACHED();
+    return;
+  }
+  unlogged_samples_->Accumulate(value, count);
+
+  FindAndRunCallback(value);
+}
+
+std::unique_ptr<HistogramSamples> Histogram::SnapshotSamples() const {
+  return SnapshotAllSamples();
+}
+
+std::unique_ptr<HistogramSamples> Histogram::SnapshotDelta() {
+#if DCHECK_IS_ON()
+  DCHECK(!final_delta_created_);
+#endif
+
+  // The code below has subtle thread-safety guarantees! All changes to
+  // the underlying SampleVectors use atomic integer operations, which guarantee
+  // eventual consistency, but do not guarantee full synchronization between
+  // different entries in the SampleVector. In particular, this means that
+  // concurrent updates to the histogram might result in the reported sum not
+  // matching the individual bucket counts; or there being some buckets that are
+  // logically updated "together", but end up being only partially updated when
+  // a snapshot is captured. Note that this is why it's important to subtract
+  // exactly the snapshotted unlogged samples, rather than simply resetting the
+  // vector: this way, the next snapshot will include any concurrent updates
+  // missed by the current snapshot.
+
+  std::unique_ptr<HistogramSamples> snapshot = SnapshotUnloggedSamples();
+  unlogged_samples_->Subtract(*snapshot);
+  logged_samples_->Add(*snapshot);
+
+  return snapshot;
+}
+
+std::unique_ptr<HistogramSamples> Histogram::SnapshotFinalDelta() const {
+#if DCHECK_IS_ON()
+  DCHECK(!final_delta_created_);
+  final_delta_created_ = true;
+#endif
+
+  return SnapshotUnloggedSamples();
+}
+
+void Histogram::AddSamples(const HistogramSamples& samples) {
+  unlogged_samples_->Add(samples);
+}
+
+bool Histogram::AddSamplesFromPickle(PickleIterator* iter) {
+  return unlogged_samples_->AddFromPickle(iter);
+}
+
+// The following methods provide a graphical histogram display.
+void Histogram::WriteHTMLGraph(std::string* output) const {
+  // TBD(jar) Write a nice HTML bar chart, with divs an mouse-overs etc.
+  output->append("<PRE>");
+  WriteAsciiImpl(true, "<br>", output);
+  output->append("</PRE>");
+}
+
+void Histogram::WriteAscii(std::string* output) const {
+  WriteAsciiImpl(true, "\n", output);
+}
+
+void Histogram::ValidateHistogramContents() const {
+  CHECK(unlogged_samples_);
+  CHECK(unlogged_samples_->bucket_ranges());
+  CHECK(logged_samples_);
+  CHECK(logged_samples_->bucket_ranges());
+#if !defined(OS_NACL)
+  if (0U == logged_samples_->id() && (flags() & kIsPersistent)) {
+    // ID should never be zero. If it is, then it's probably because the
+    // entire memory page was cleared. Check that this is true.
+    // TODO(bcwhite): Remove this.
+    // https://bugs.chromium.org/p/chromium/issues/detail?id=836875
+    size_t page_size = SysInfo::VMAllocationGranularity();
+    if (page_size == 0)
+      page_size = 1024;
+    const int* address = reinterpret_cast<const int*>(
+        reinterpret_cast<uintptr_t>(logged_samples_->meta()) &
+        ~(page_size - 1));
+    // Check a couple places so there is evidence in a crash report as to
+    // where it was non-zero.
+    CHECK_EQ(0, address[0]);
+    CHECK_EQ(0, address[1]);
+    CHECK_EQ(0, address[2]);
+    CHECK_EQ(0, address[4]);
+    CHECK_EQ(0, address[8]);
+    CHECK_EQ(0, address[16]);
+    CHECK_EQ(0, address[32]);
+    CHECK_EQ(0, address[64]);
+    CHECK_EQ(0, address[128]);
+    CHECK_EQ(0, address[256]);
+    CHECK_EQ(0, address[512]);
+    // Now check every address.
+    for (size_t i = 0; i < page_size / sizeof(int); ++i)
+      CHECK_EQ(0, address[i]);
+  }
+#endif
+  CHECK_NE(0U, logged_samples_->id());
+}
+
+void Histogram::SerializeInfoImpl(Pickle* pickle) const {
+  DCHECK(bucket_ranges()->HasValidChecksum());
+  pickle->WriteString(histogram_name());
+  pickle->WriteInt(flags());
+  pickle->WriteInt(declared_min());
+  pickle->WriteInt(declared_max());
+  pickle->WriteUInt32(bucket_count());
+  pickle->WriteUInt32(bucket_ranges()->checksum());
+}
+
+// TODO(bcwhite): Remove minimum/maximum parameters from here and call chain.
+Histogram::Histogram(const char* name,
+                     Sample minimum,
+                     Sample maximum,
+                     const BucketRanges* ranges)
+    : HistogramBase(name) {
+  DCHECK(ranges) << name << ": " << minimum << "-" << maximum;
+  unlogged_samples_.reset(new SampleVector(HashMetricName(name), ranges));
+  logged_samples_.reset(new SampleVector(unlogged_samples_->id(), ranges));
+}
+
+Histogram::Histogram(const char* name,
+                     Sample minimum,
+                     Sample maximum,
+                     const BucketRanges* ranges,
+                     const DelayedPersistentAllocation& counts,
+                     const DelayedPersistentAllocation& logged_counts,
+                     HistogramSamples::Metadata* meta,
+                     HistogramSamples::Metadata* logged_meta)
+    : HistogramBase(name) {
+  DCHECK(ranges) << name << ": " << minimum << "-" << maximum;
+  unlogged_samples_.reset(
+      new PersistentSampleVector(HashMetricName(name), ranges, meta, counts));
+  logged_samples_.reset(new PersistentSampleVector(
+      unlogged_samples_->id(), ranges, logged_meta, logged_counts));
+}
+
+Histogram::~Histogram() = default;
+
+bool Histogram::PrintEmptyBucket(uint32_t index) const {
+  return true;
+}
+
+// Use the actual bucket widths (like a linear histogram) until the widths get
+// over some transition value, and then use that transition width.  Exponentials
+// get so big so fast (and we don't expect to see a lot of entries in the large
+// buckets), so we need this to make it possible to see what is going on and
+// not have 0-graphical-height buckets.
+double Histogram::GetBucketSize(Count current, uint32_t i) const {
+  DCHECK_GT(ranges(i + 1), ranges(i));
+  static const double kTransitionWidth = 5;
+  double denominator = ranges(i + 1) - ranges(i);
+  if (denominator > kTransitionWidth)
+    denominator = kTransitionWidth;  // Stop trying to normalize.
+  return current/denominator;
+}
+
+const std::string Histogram::GetAsciiBucketRange(uint32_t i) const {
+  return GetSimpleAsciiBucketRange(ranges(i));
+}
+
+//------------------------------------------------------------------------------
+// Private methods
+
+// static
+HistogramBase* Histogram::DeserializeInfoImpl(PickleIterator* iter) {
+  std::string histogram_name;
+  int flags;
+  int declared_min;
+  int declared_max;
+  uint32_t bucket_count;
+  uint32_t range_checksum;
+
+  if (!ReadHistogramArguments(iter, &histogram_name, &flags, &declared_min,
+                              &declared_max, &bucket_count, &range_checksum)) {
+    return nullptr;
+  }
+
+  // Find or create the local version of the histogram in this process.
+  HistogramBase* histogram = Histogram::FactoryGet(
+      histogram_name, declared_min, declared_max, bucket_count, flags);
+  if (!histogram)
+    return nullptr;
+
+  // The serialized histogram might be corrupted.
+  if (!ValidateRangeChecksum(*histogram, range_checksum))
+    return nullptr;
+
+  return histogram;
+}
+
+std::unique_ptr<SampleVector> Histogram::SnapshotAllSamples() const {
+  std::unique_ptr<SampleVector> samples = SnapshotUnloggedSamples();
+  samples->Add(*logged_samples_);
+  return samples;
+}
+
+std::unique_ptr<SampleVector> Histogram::SnapshotUnloggedSamples() const {
+  std::unique_ptr<SampleVector> samples(
+      new SampleVector(unlogged_samples_->id(), bucket_ranges()));
+  samples->Add(*unlogged_samples_);
+  return samples;
+}
+
+void Histogram::WriteAsciiImpl(bool graph_it,
+                               const std::string& newline,
+                               std::string* output) const {
+  // Get local (stack) copies of all effectively volatile class data so that we
+  // are consistent across our output activities.
+  std::unique_ptr<SampleVector> snapshot = SnapshotAllSamples();
+  Count sample_count = snapshot->TotalCount();
+
+  WriteAsciiHeader(*snapshot, sample_count, output);
+  output->append(newline);
+
+  // Prepare to normalize graphical rendering of bucket contents.
+  double max_size = 0;
+  if (graph_it)
+    max_size = GetPeakBucketSize(*snapshot);
+
+  // Calculate space needed to print bucket range numbers.  Leave room to print
+  // nearly the largest bucket range without sliding over the histogram.
+  uint32_t largest_non_empty_bucket = bucket_count() - 1;
+  while (0 == snapshot->GetCountAtIndex(largest_non_empty_bucket)) {
+    if (0 == largest_non_empty_bucket)
+      break;  // All buckets are empty.
+    --largest_non_empty_bucket;
+  }
+
+  // Calculate largest print width needed for any of our bucket range displays.
+  size_t print_width = 1;
+  for (uint32_t i = 0; i < bucket_count(); ++i) {
+    if (snapshot->GetCountAtIndex(i)) {
+      size_t width = GetAsciiBucketRange(i).size() + 1;
+      if (width > print_width)
+        print_width = width;
+    }
+  }
+
+  int64_t remaining = sample_count;
+  int64_t past = 0;
+  // Output the actual histogram graph.
+  for (uint32_t i = 0; i < bucket_count(); ++i) {
+    Count current = snapshot->GetCountAtIndex(i);
+    if (!current && !PrintEmptyBucket(i))
+      continue;
+    remaining -= current;
+    std::string range = GetAsciiBucketRange(i);
+    output->append(range);
+    for (size_t j = 0; range.size() + j < print_width + 1; ++j)
+      output->push_back(' ');
+    if (0 == current && i < bucket_count() - 1 &&
+        0 == snapshot->GetCountAtIndex(i + 1)) {
+      while (i < bucket_count() - 1 &&
+             0 == snapshot->GetCountAtIndex(i + 1)) {
+        ++i;
+      }
+      output->append("... ");
+      output->append(newline);
+      continue;  // No reason to plot emptiness.
+    }
+    double current_size = GetBucketSize(current, i);
+    if (graph_it)
+      WriteAsciiBucketGraph(current_size, max_size, output);
+    WriteAsciiBucketContext(past, current, remaining, i, output);
+    output->append(newline);
+    past += current;
+  }
+  DCHECK_EQ(sample_count, past);
+}
+
+double Histogram::GetPeakBucketSize(const SampleVectorBase& samples) const {
+  double max = 0;
+  for (uint32_t i = 0; i < bucket_count() ; ++i) {
+    double current_size = GetBucketSize(samples.GetCountAtIndex(i), i);
+    if (current_size > max)
+      max = current_size;
+  }
+  return max;
+}
+
+void Histogram::WriteAsciiHeader(const SampleVectorBase& samples,
+                                 Count sample_count,
+                                 std::string* output) const {
+  StringAppendF(output, "Histogram: %s recorded %d samples", histogram_name(),
+                sample_count);
+  if (sample_count == 0) {
+    DCHECK_EQ(samples.sum(), 0);
+  } else {
+    double mean = static_cast<float>(samples.sum()) / sample_count;
+    StringAppendF(output, ", mean = %.1f", mean);
+  }
+  if (flags())
+    StringAppendF(output, " (flags = 0x%x)", flags());
+}
+
+void Histogram::WriteAsciiBucketContext(const int64_t past,
+                                        const Count current,
+                                        const int64_t remaining,
+                                        const uint32_t i,
+                                        std::string* output) const {
+  double scaled_sum = (past + current + remaining) / 100.0;
+  WriteAsciiBucketValue(current, scaled_sum, output);
+  if (0 < i) {
+    double percentage = past / scaled_sum;
+    StringAppendF(output, " {%3.1f%%}", percentage);
+  }
+}
+
+void Histogram::GetParameters(DictionaryValue* params) const {
+  params->SetString("type", HistogramTypeToString(GetHistogramType()));
+  params->SetInteger("min", declared_min());
+  params->SetInteger("max", declared_max());
+  params->SetInteger("bucket_count", static_cast<int>(bucket_count()));
+}
+
+void Histogram::GetCountAndBucketData(Count* count,
+                                      int64_t* sum,
+                                      ListValue* buckets) const {
+  std::unique_ptr<SampleVector> snapshot = SnapshotAllSamples();
+  *count = snapshot->TotalCount();
+  *sum = snapshot->sum();
+  uint32_t index = 0;
+  for (uint32_t i = 0; i < bucket_count(); ++i) {
+    Sample count_at_index = snapshot->GetCountAtIndex(i);
+    if (count_at_index > 0) {
+      std::unique_ptr<DictionaryValue> bucket_value(new DictionaryValue());
+      bucket_value->SetInteger("low", ranges(i));
+      if (i != bucket_count() - 1)
+        bucket_value->SetInteger("high", ranges(i + 1));
+      bucket_value->SetInteger("count", count_at_index);
+      buckets->Set(index, std::move(bucket_value));
+      ++index;
+    }
+  }
+}
+
+//------------------------------------------------------------------------------
+// LinearHistogram: This histogram uses a traditional set of evenly spaced
+// buckets.
+//------------------------------------------------------------------------------
+
+class LinearHistogram::Factory : public Histogram::Factory {
+ public:
+  Factory(const std::string& name,
+          HistogramBase::Sample minimum,
+          HistogramBase::Sample maximum,
+          uint32_t bucket_count,
+          int32_t flags,
+          const DescriptionPair* descriptions)
+    : Histogram::Factory(name, LINEAR_HISTOGRAM, minimum, maximum,
+                         bucket_count, flags) {
+    descriptions_ = descriptions;
+  }
+
+ protected:
+  BucketRanges* CreateRanges() override {
+    BucketRanges* ranges = new BucketRanges(bucket_count_ + 1);
+    LinearHistogram::InitializeBucketRanges(minimum_, maximum_, ranges);
+    return ranges;
+  }
+
+  std::unique_ptr<HistogramBase> HeapAlloc(
+      const BucketRanges* ranges) override {
+    return WrapUnique(new LinearHistogram(GetPermanentName(name_), minimum_,
+                                          maximum_, ranges));
+  }
+
+  void FillHistogram(HistogramBase* base_histogram) override {
+    Histogram::Factory::FillHistogram(base_histogram);
+    // Normally, |base_histogram| should have type LINEAR_HISTOGRAM or be
+    // inherited from it. However, if it's expired, it will actually be a
+    // DUMMY_HISTOGRAM. Skip filling in that case.
+    if (base_histogram->GetHistogramType() == DUMMY_HISTOGRAM)
+      return;
+    LinearHistogram* histogram = static_cast<LinearHistogram*>(base_histogram);
+    // Set range descriptions.
+    if (descriptions_) {
+      for (int i = 0; descriptions_[i].description; ++i) {
+        histogram->bucket_description_[descriptions_[i].sample] =
+            descriptions_[i].description;
+      }
+    }
+  }
+
+ private:
+  const DescriptionPair* descriptions_;
+
+  DISALLOW_COPY_AND_ASSIGN(Factory);
+};
+
+LinearHistogram::~LinearHistogram() = default;
+
+HistogramBase* LinearHistogram::FactoryGet(const std::string& name,
+                                           Sample minimum,
+                                           Sample maximum,
+                                           uint32_t bucket_count,
+                                           int32_t flags) {
+  return FactoryGetWithRangeDescription(name, minimum, maximum, bucket_count,
+                                        flags, NULL);
+}
+
+HistogramBase* LinearHistogram::FactoryTimeGet(const std::string& name,
+                                               TimeDelta minimum,
+                                               TimeDelta maximum,
+                                               uint32_t bucket_count,
+                                               int32_t flags) {
+  return FactoryGet(name, static_cast<Sample>(minimum.InMilliseconds()),
+                    static_cast<Sample>(maximum.InMilliseconds()), bucket_count,
+                    flags);
+}
+
+HistogramBase* LinearHistogram::FactoryGet(const char* name,
+                                           Sample minimum,
+                                           Sample maximum,
+                                           uint32_t bucket_count,
+                                           int32_t flags) {
+  return FactoryGet(std::string(name), minimum, maximum, bucket_count, flags);
+}
+
+HistogramBase* LinearHistogram::FactoryTimeGet(const char* name,
+                                               TimeDelta minimum,
+                                               TimeDelta maximum,
+                                               uint32_t bucket_count,
+                                               int32_t flags) {
+  return FactoryTimeGet(std::string(name),  minimum, maximum, bucket_count,
+                        flags);
+}
+
+std::unique_ptr<HistogramBase> LinearHistogram::PersistentCreate(
+    const char* name,
+    Sample minimum,
+    Sample maximum,
+    const BucketRanges* ranges,
+    const DelayedPersistentAllocation& counts,
+    const DelayedPersistentAllocation& logged_counts,
+    HistogramSamples::Metadata* meta,
+    HistogramSamples::Metadata* logged_meta) {
+  return WrapUnique(new LinearHistogram(name, minimum, maximum, ranges, counts,
+                                        logged_counts, meta, logged_meta));
+}
+
+HistogramBase* LinearHistogram::FactoryGetWithRangeDescription(
+    const std::string& name,
+    Sample minimum,
+    Sample maximum,
+    uint32_t bucket_count,
+    int32_t flags,
+    const DescriptionPair descriptions[]) {
+  bool valid_arguments = Histogram::InspectConstructionArguments(
+      name, &minimum, &maximum, &bucket_count);
+  DCHECK(valid_arguments);
+
+  return Factory(name, minimum, maximum, bucket_count, flags, descriptions)
+      .Build();
+}
+
+HistogramType LinearHistogram::GetHistogramType() const {
+  return LINEAR_HISTOGRAM;
+}
+
+LinearHistogram::LinearHistogram(const char* name,
+                                 Sample minimum,
+                                 Sample maximum,
+                                 const BucketRanges* ranges)
+    : Histogram(name, minimum, maximum, ranges) {}
+
+LinearHistogram::LinearHistogram(
+    const char* name,
+    Sample minimum,
+    Sample maximum,
+    const BucketRanges* ranges,
+    const DelayedPersistentAllocation& counts,
+    const DelayedPersistentAllocation& logged_counts,
+    HistogramSamples::Metadata* meta,
+    HistogramSamples::Metadata* logged_meta)
+    : Histogram(name,
+                minimum,
+                maximum,
+                ranges,
+                counts,
+                logged_counts,
+                meta,
+                logged_meta) {}
+
+double LinearHistogram::GetBucketSize(Count current, uint32_t i) const {
+  DCHECK_GT(ranges(i + 1), ranges(i));
+  // Adjacent buckets with different widths would have "surprisingly" many (few)
+  // samples in a histogram if we didn't normalize this way.
+  double denominator = ranges(i + 1) - ranges(i);
+  return current/denominator;
+}
+
+const std::string LinearHistogram::GetAsciiBucketRange(uint32_t i) const {
+  int range = ranges(i);
+  BucketDescriptionMap::const_iterator it = bucket_description_.find(range);
+  if (it == bucket_description_.end())
+    return Histogram::GetAsciiBucketRange(i);
+  return it->second;
+}
+
+bool LinearHistogram::PrintEmptyBucket(uint32_t index) const {
+  return bucket_description_.find(ranges(index)) == bucket_description_.end();
+}
+
+// static
+void LinearHistogram::InitializeBucketRanges(Sample minimum,
+                                             Sample maximum,
+                                             BucketRanges* ranges) {
+  double min = minimum;
+  double max = maximum;
+  size_t bucket_count = ranges->bucket_count();
+  for (size_t i = 1; i < bucket_count; ++i) {
+    double linear_range =
+        (min * (bucket_count - 1 - i) + max * (i - 1)) / (bucket_count - 2);
+    ranges->set_range(i, static_cast<Sample>(linear_range + 0.5));
+  }
+  ranges->set_range(ranges->bucket_count(), HistogramBase::kSampleType_MAX);
+  ranges->ResetChecksum();
+}
+
+// static
+HistogramBase* LinearHistogram::DeserializeInfoImpl(PickleIterator* iter) {
+  std::string histogram_name;
+  int flags;
+  int declared_min;
+  int declared_max;
+  uint32_t bucket_count;
+  uint32_t range_checksum;
+
+  if (!ReadHistogramArguments(iter, &histogram_name, &flags, &declared_min,
+                              &declared_max, &bucket_count, &range_checksum)) {
+    return nullptr;
+  }
+
+  HistogramBase* histogram = LinearHistogram::FactoryGet(
+      histogram_name, declared_min, declared_max, bucket_count, flags);
+  if (!histogram)
+    return nullptr;
+
+  if (!ValidateRangeChecksum(*histogram, range_checksum)) {
+    // The serialized histogram might be corrupted.
+    return nullptr;
+  }
+  return histogram;
+}
+
+//------------------------------------------------------------------------------
+// This section provides implementation for BooleanHistogram.
+//------------------------------------------------------------------------------
+
+class BooleanHistogram::Factory : public Histogram::Factory {
+ public:
+  Factory(const std::string& name, int32_t flags)
+    : Histogram::Factory(name, BOOLEAN_HISTOGRAM, 1, 2, 3, flags) {}
+
+ protected:
+  BucketRanges* CreateRanges() override {
+    BucketRanges* ranges = new BucketRanges(3 + 1);
+    LinearHistogram::InitializeBucketRanges(1, 2, ranges);
+    return ranges;
+  }
+
+  std::unique_ptr<HistogramBase> HeapAlloc(
+      const BucketRanges* ranges) override {
+    return WrapUnique(new BooleanHistogram(GetPermanentName(name_), ranges));
+  }
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(Factory);
+};
+
+HistogramBase* BooleanHistogram::FactoryGet(const std::string& name,
+                                            int32_t flags) {
+  return Factory(name, flags).Build();
+}
+
+HistogramBase* BooleanHistogram::FactoryGet(const char* name, int32_t flags) {
+  return FactoryGet(std::string(name), flags);
+}
+
+std::unique_ptr<HistogramBase> BooleanHistogram::PersistentCreate(
+    const char* name,
+    const BucketRanges* ranges,
+    const DelayedPersistentAllocation& counts,
+    const DelayedPersistentAllocation& logged_counts,
+    HistogramSamples::Metadata* meta,
+    HistogramSamples::Metadata* logged_meta) {
+  return WrapUnique(new BooleanHistogram(name, ranges, counts, logged_counts,
+                                         meta, logged_meta));
+}
+
+HistogramType BooleanHistogram::GetHistogramType() const {
+  return BOOLEAN_HISTOGRAM;
+}
+
+BooleanHistogram::BooleanHistogram(const char* name, const BucketRanges* ranges)
+    : LinearHistogram(name, 1, 2, ranges) {}
+
+BooleanHistogram::BooleanHistogram(
+    const char* name,
+    const BucketRanges* ranges,
+    const DelayedPersistentAllocation& counts,
+    const DelayedPersistentAllocation& logged_counts,
+    HistogramSamples::Metadata* meta,
+    HistogramSamples::Metadata* logged_meta)
+    : LinearHistogram(name,
+                      1,
+                      2,
+                      ranges,
+                      counts,
+                      logged_counts,
+                      meta,
+                      logged_meta) {}
+
+HistogramBase* BooleanHistogram::DeserializeInfoImpl(PickleIterator* iter) {
+  std::string histogram_name;
+  int flags;
+  int declared_min;
+  int declared_max;
+  uint32_t bucket_count;
+  uint32_t range_checksum;
+
+  if (!ReadHistogramArguments(iter, &histogram_name, &flags, &declared_min,
+                              &declared_max, &bucket_count, &range_checksum)) {
+    return nullptr;
+  }
+
+  HistogramBase* histogram = BooleanHistogram::FactoryGet(
+      histogram_name, flags);
+  if (!histogram)
+    return nullptr;
+
+  if (!ValidateRangeChecksum(*histogram, range_checksum)) {
+    // The serialized histogram might be corrupted.
+    return nullptr;
+  }
+  return histogram;
+}
+
+//------------------------------------------------------------------------------
+// CustomHistogram:
+//------------------------------------------------------------------------------
+
+class CustomHistogram::Factory : public Histogram::Factory {
+ public:
+  Factory(const std::string& name,
+          const std::vector<Sample>* custom_ranges,
+          int32_t flags)
+    : Histogram::Factory(name, CUSTOM_HISTOGRAM, 0, 0, 0, flags) {
+    custom_ranges_ = custom_ranges;
+  }
+
+ protected:
+  BucketRanges* CreateRanges() override {
+    // Remove the duplicates in the custom ranges array.
+    std::vector<int> ranges = *custom_ranges_;
+    ranges.push_back(0);  // Ensure we have a zero value.
+    ranges.push_back(HistogramBase::kSampleType_MAX);
+    std::sort(ranges.begin(), ranges.end());
+    ranges.erase(std::unique(ranges.begin(), ranges.end()), ranges.end());
+
+    BucketRanges* bucket_ranges = new BucketRanges(ranges.size());
+    for (uint32_t i = 0; i < ranges.size(); i++) {
+      bucket_ranges->set_range(i, ranges[i]);
+    }
+    bucket_ranges->ResetChecksum();
+    return bucket_ranges;
+  }
+
+  std::unique_ptr<HistogramBase> HeapAlloc(
+      const BucketRanges* ranges) override {
+    return WrapUnique(new CustomHistogram(GetPermanentName(name_), ranges));
+  }
+
+ private:
+  const std::vector<Sample>* custom_ranges_;
+
+  DISALLOW_COPY_AND_ASSIGN(Factory);
+};
+
+HistogramBase* CustomHistogram::FactoryGet(
+    const std::string& name,
+    const std::vector<Sample>& custom_ranges,
+    int32_t flags) {
+  CHECK(ValidateCustomRanges(custom_ranges));
+
+  return Factory(name, &custom_ranges, flags).Build();
+}
+
+HistogramBase* CustomHistogram::FactoryGet(
+    const char* name,
+    const std::vector<Sample>& custom_ranges,
+    int32_t flags) {
+  return FactoryGet(std::string(name), custom_ranges, flags);
+}
+
+std::unique_ptr<HistogramBase> CustomHistogram::PersistentCreate(
+    const char* name,
+    const BucketRanges* ranges,
+    const DelayedPersistentAllocation& counts,
+    const DelayedPersistentAllocation& logged_counts,
+    HistogramSamples::Metadata* meta,
+    HistogramSamples::Metadata* logged_meta) {
+  return WrapUnique(new CustomHistogram(name, ranges, counts, logged_counts,
+                                        meta, logged_meta));
+}
+
+HistogramType CustomHistogram::GetHistogramType() const {
+  return CUSTOM_HISTOGRAM;
+}
+
+// static
+std::vector<Sample> CustomHistogram::ArrayToCustomEnumRanges(
+    base::span<const Sample> values) {
+  std::vector<Sample> all_values;
+  for (Sample value : values) {
+    all_values.push_back(value);
+
+    // Ensure that a guard bucket is added. If we end up with duplicate
+    // values, FactoryGet will take care of removing them.
+    all_values.push_back(value + 1);
+  }
+  return all_values;
+}
+
+CustomHistogram::CustomHistogram(const char* name, const BucketRanges* ranges)
+    : Histogram(name,
+                ranges->range(1),
+                ranges->range(ranges->bucket_count() - 1),
+                ranges) {}
+
+CustomHistogram::CustomHistogram(
+    const char* name,
+    const BucketRanges* ranges,
+    const DelayedPersistentAllocation& counts,
+    const DelayedPersistentAllocation& logged_counts,
+    HistogramSamples::Metadata* meta,
+    HistogramSamples::Metadata* logged_meta)
+    : Histogram(name,
+                ranges->range(1),
+                ranges->range(ranges->bucket_count() - 1),
+                ranges,
+                counts,
+                logged_counts,
+                meta,
+                logged_meta) {}
+
+void CustomHistogram::SerializeInfoImpl(Pickle* pickle) const {
+  Histogram::SerializeInfoImpl(pickle);
+
+  // Serialize ranges. First and last ranges are alwasy 0 and INT_MAX, so don't
+  // write them.
+  for (uint32_t i = 1; i < bucket_ranges()->bucket_count(); ++i)
+    pickle->WriteInt(bucket_ranges()->range(i));
+}
+
+double CustomHistogram::GetBucketSize(Count current, uint32_t i) const {
+  // If this is a histogram of enum values, normalizing the bucket count
+  // by the bucket range is not helpful, so just return the bucket count.
+  return current;
+}
+
+// static
+HistogramBase* CustomHistogram::DeserializeInfoImpl(PickleIterator* iter) {
+  std::string histogram_name;
+  int flags;
+  int declared_min;
+  int declared_max;
+  uint32_t bucket_count;
+  uint32_t range_checksum;
+
+  if (!ReadHistogramArguments(iter, &histogram_name, &flags, &declared_min,
+                              &declared_max, &bucket_count, &range_checksum)) {
+    return nullptr;
+  }
+
+  // First and last ranges are not serialized.
+  std::vector<Sample> sample_ranges(bucket_count - 1);
+
+  for (uint32_t i = 0; i < sample_ranges.size(); ++i) {
+    if (!iter->ReadInt(&sample_ranges[i]))
+      return nullptr;
+  }
+
+  HistogramBase* histogram = CustomHistogram::FactoryGet(
+      histogram_name, sample_ranges, flags);
+  if (!histogram)
+    return nullptr;
+
+  if (!ValidateRangeChecksum(*histogram, range_checksum)) {
+    // The serialized histogram might be corrupted.
+    return nullptr;
+  }
+  return histogram;
+}
+
+// static
+bool CustomHistogram::ValidateCustomRanges(
+    const std::vector<Sample>& custom_ranges) {
+  bool has_valid_range = false;
+  for (uint32_t i = 0; i < custom_ranges.size(); i++) {
+    Sample sample = custom_ranges[i];
+    if (sample < 0 || sample > HistogramBase::kSampleType_MAX - 1)
+      return false;
+    if (sample != 0)
+      has_valid_range = true;
+  }
+  return has_valid_range;
+}
+
+}  // namespace base
diff --git a/base/metrics/histogram.h b/base/metrics/histogram.h
new file mode 100644
index 0000000..35d8370
--- /dev/null
+++ b/base/metrics/histogram.h
@@ -0,0 +1,559 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Histogram is an object that aggregates statistics, and can summarize them in
+// various forms, including ASCII graphical, HTML, and numerically (as a
+// vector of numbers corresponding to each of the aggregating buckets).
+
+// It supports calls to accumulate either time intervals (which are processed
+// as integral number of milliseconds), or arbitrary integral units.
+
+// For Histogram (exponential histogram), LinearHistogram and CustomHistogram,
+// the minimum for a declared range is 1 (instead of 0), while the maximum is
+// (HistogramBase::kSampleType_MAX - 1). However, there will always be underflow
+// and overflow buckets added automatically, so a 0 bucket will always exist
+// even when a minimum value of 1 is specified.
+
+// Each use of a histogram with the same name will reference the same underlying
+// data, so it is safe to record to the same histogram from multiple locations
+// in the code. It is a runtime error if all uses of the same histogram do not
+// agree exactly in type, bucket size and range.
+
+// For Histogram and LinearHistogram, the maximum for a declared range should
+// always be larger (not equal) than minimal range. Zero and
+// HistogramBase::kSampleType_MAX are implicitly added as first and last ranges,
+// so the smallest legal bucket_count is 3. However CustomHistogram can have
+// bucket count as 2 (when you give a custom ranges vector containing only 1
+// range).
+// For these 3 kinds of histograms, the max bucket count is always
+// (Histogram::kBucketCount_MAX - 1).
+
+// The buckets layout of class Histogram is exponential. For example, buckets
+// might contain (sequentially) the count of values in the following intervals:
+// [0,1), [1,2), [2,4), [4,8), [8,16), [16,32), [32,64), [64,infinity)
+// That bucket allocation would actually result from construction of a histogram
+// for values between 1 and 64, with 8 buckets, such as:
+// Histogram count("some name", 1, 64, 8);
+// Note that the underflow bucket [0,1) and the overflow bucket [64,infinity)
+// are also counted by the constructor in the user supplied "bucket_count"
+// argument.
+// The above example has an exponential ratio of 2 (doubling the bucket width
+// in each consecutive bucket).  The Histogram class automatically calculates
+// the smallest ratio that it can use to construct the number of buckets
+// selected in the constructor.  An another example, if you had 50 buckets,
+// and millisecond time values from 1 to 10000, then the ratio between
+// consecutive bucket widths will be approximately somewhere around the 50th
+// root of 10000.  This approach provides very fine grain (narrow) buckets
+// at the low end of the histogram scale, but allows the histogram to cover a
+// gigantic range with the addition of very few buckets.
+
+// Usually we use macros to define and use a histogram, which are defined in
+// base/metrics/histogram_macros.h. Note: Callers should include that header
+// directly if they only access the histogram APIs through macros.
+//
+// Macros use a pattern involving a function static variable, that is a pointer
+// to a histogram.  This static is explicitly initialized on any thread
+// that detects a uninitialized (NULL) pointer.  The potentially racy
+// initialization is not a problem as it is always set to point to the same
+// value (i.e., the FactoryGet always returns the same value).  FactoryGet
+// is also completely thread safe, which results in a completely thread safe,
+// and relatively fast, set of counters.  To avoid races at shutdown, the static
+// pointer is NOT deleted, and we leak the histograms at process termination.
+
+#ifndef BASE_METRICS_HISTOGRAM_H_
+#define BASE_METRICS_HISTOGRAM_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <map>
+#include <memory>
+#include <string>
+#include <vector>
+
+#include "base/base_export.h"
+#include "base/compiler_specific.h"
+#include "base/containers/span.h"
+#include "base/gtest_prod_util.h"
+#include "base/logging.h"
+#include "base/macros.h"
+#include "base/metrics/bucket_ranges.h"
+#include "base/metrics/histogram_base.h"
+#include "base/metrics/histogram_samples.h"
+#include "base/strings/string_piece.h"
+#include "base/time/time.h"
+
+namespace base {
+
+class BooleanHistogram;
+class CustomHistogram;
+class DelayedPersistentAllocation;
+class Histogram;
+class LinearHistogram;
+class Pickle;
+class PickleIterator;
+class SampleVector;
+class SampleVectorBase;
+
+class BASE_EXPORT Histogram : public HistogramBase {
+ public:
+  // Initialize maximum number of buckets in histograms as 16,384.
+  static const uint32_t kBucketCount_MAX;
+
+  typedef std::vector<Count> Counts;
+
+  ~Histogram() override;
+
+  //----------------------------------------------------------------------------
+  // For a valid histogram, input should follow these restrictions:
+  // minimum > 0 (if a minimum below 1 is specified, it will implicitly be
+  //              normalized up to 1)
+  // maximum > minimum
+  // buckets > 2 [minimum buckets needed: underflow, overflow and the range]
+  // Additionally,
+  // buckets <= (maximum - minimum + 2) - this is to ensure that we don't have
+  // more buckets than the range of numbers; having more buckets than 1 per
+  // value in the range would be nonsensical.
+  static HistogramBase* FactoryGet(const std::string& name,
+                                   Sample minimum,
+                                   Sample maximum,
+                                   uint32_t bucket_count,
+                                   int32_t flags);
+  static HistogramBase* FactoryTimeGet(const std::string& name,
+                                       base::TimeDelta minimum,
+                                       base::TimeDelta maximum,
+                                       uint32_t bucket_count,
+                                       int32_t flags);
+  static HistogramBase* FactoryMicrosecondsTimeGet(const std::string& name,
+                                                   base::TimeDelta minimum,
+                                                   base::TimeDelta maximum,
+                                                   uint32_t bucket_count,
+                                                   int32_t flags);
+
+  // Overloads of the above functions that take a const char* |name| param, to
+  // avoid code bloat from the std::string constructor being inlined into call
+  // sites.
+  static HistogramBase* FactoryGet(const char* name,
+                                   Sample minimum,
+                                   Sample maximum,
+                                   uint32_t bucket_count,
+                                   int32_t flags);
+  static HistogramBase* FactoryTimeGet(const char* name,
+                                       base::TimeDelta minimum,
+                                       base::TimeDelta maximum,
+                                       uint32_t bucket_count,
+                                       int32_t flags);
+  static HistogramBase* FactoryMicrosecondsTimeGet(const char* name,
+                                                   base::TimeDelta minimum,
+                                                   base::TimeDelta maximum,
+                                                   uint32_t bucket_count,
+                                                   int32_t flags);
+
+  // Create a histogram using data in persistent storage.
+  static std::unique_ptr<HistogramBase> PersistentCreate(
+      const char* name,
+      Sample minimum,
+      Sample maximum,
+      const BucketRanges* ranges,
+      const DelayedPersistentAllocation& counts,
+      const DelayedPersistentAllocation& logged_counts,
+      HistogramSamples::Metadata* meta,
+      HistogramSamples::Metadata* logged_meta);
+
+  static void InitializeBucketRanges(Sample minimum,
+                                     Sample maximum,
+                                     BucketRanges* ranges);
+
+  // This constant if for FindCorruption. Since snapshots of histograms are
+  // taken asynchronously relative to sampling, and our counting code currently
+  // does not prevent race conditions, it is pretty likely that we'll catch a
+  // redundant count that doesn't match the sample count.  We allow for a
+  // certain amount of slop before flagging this as an inconsistency. Even with
+  // an inconsistency, we'll snapshot it again (for UMA in about a half hour),
+  // so we'll eventually get the data, if it was not the result of a corruption.
+  static const int kCommonRaceBasedCountMismatch;
+
+  // Check to see if bucket ranges, counts and tallies in the snapshot are
+  // consistent with the bucket ranges and checksums in our histogram.  This can
+  // produce a false-alarm if a race occurred in the reading of the data during
+  // a SnapShot process, but should otherwise be false at all times (unless we
+  // have memory over-writes, or DRAM failures). Flag definitions are located
+  // under "enum Inconsistency" in base/metrics/histogram_base.h.
+  uint32_t FindCorruption(const HistogramSamples& samples) const override;
+
+  //----------------------------------------------------------------------------
+  // Accessors for factory construction, serialization and testing.
+  //----------------------------------------------------------------------------
+  const BucketRanges* bucket_ranges() const;
+  Sample declared_min() const;
+  Sample declared_max() const;
+  virtual Sample ranges(uint32_t i) const;
+  virtual uint32_t bucket_count() const;
+
+  // This function validates histogram construction arguments. It returns false
+  // if some of the arguments are bad but also corrects them so they should
+  // function on non-dcheck builds without crashing.
+  // Note. Currently it allow some bad input, e.g. 0 as minimum, but silently
+  // converts it to good input: 1.
+  // TODO(bcwhite): Use false returns to create "sink" histograms so that bad
+  // data doesn't create confusion on the servers.
+  static bool InspectConstructionArguments(StringPiece name,
+                                           Sample* minimum,
+                                           Sample* maximum,
+                                           uint32_t* bucket_count);
+
+  // HistogramBase implementation:
+  uint64_t name_hash() const override;
+  HistogramType GetHistogramType() const override;
+  bool HasConstructionArguments(Sample expected_minimum,
+                                Sample expected_maximum,
+                                uint32_t expected_bucket_count) const override;
+  void Add(Sample value) override;
+  void AddCount(Sample value, int count) override;
+  std::unique_ptr<HistogramSamples> SnapshotSamples() const override;
+  std::unique_ptr<HistogramSamples> SnapshotDelta() override;
+  std::unique_ptr<HistogramSamples> SnapshotFinalDelta() const override;
+  void AddSamples(const HistogramSamples& samples) override;
+  bool AddSamplesFromPickle(base::PickleIterator* iter) override;
+  void WriteHTMLGraph(std::string* output) const override;
+  void WriteAscii(std::string* output) const override;
+
+  // Validates the histogram contents and CHECKs on errors.
+  // TODO(bcwhite): Remove this after https://crbug/836875.
+  void ValidateHistogramContents() const override;
+
+ protected:
+  // This class, defined entirely within the .cc file, contains all the
+  // common logic for building a Histogram and can be overridden by more
+  // specific types to alter details of how the creation is done. It is
+  // defined as an embedded class (rather than an anonymous one) so it
+  // can access the protected constructors.
+  class Factory;
+
+  // |ranges| should contain the underflow and overflow buckets. See top
+  // comments for example.
+  Histogram(const char* name,
+            Sample minimum,
+            Sample maximum,
+            const BucketRanges* ranges);
+
+  // Traditionally, histograms allocate their own memory for the bucket
+  // vector but "shared" histograms use memory regions allocated from a
+  // special memory segment that is passed in here.  It is assumed that
+  // the life of this memory is managed externally and exceeds the lifetime
+  // of this object. Practically, this memory is never released until the
+  // process exits and the OS cleans it up.
+  Histogram(const char* name,
+            Sample minimum,
+            Sample maximum,
+            const BucketRanges* ranges,
+            const DelayedPersistentAllocation& counts,
+            const DelayedPersistentAllocation& logged_counts,
+            HistogramSamples::Metadata* meta,
+            HistogramSamples::Metadata* logged_meta);
+
+  // HistogramBase implementation:
+  void SerializeInfoImpl(base::Pickle* pickle) const override;
+
+  // Method to override to skip the display of the i'th bucket if it's empty.
+  virtual bool PrintEmptyBucket(uint32_t index) const;
+
+  // Get normalized size, relative to the ranges(i).
+  virtual double GetBucketSize(Count current, uint32_t i) const;
+
+  // Return a string description of what goes in a given bucket.
+  // Most commonly this is the numeric value, but in derived classes it may
+  // be a name (or string description) given to the bucket.
+  virtual const std::string GetAsciiBucketRange(uint32_t it) const;
+
+ private:
+  // Allow tests to corrupt our innards for testing purposes.
+  FRIEND_TEST_ALL_PREFIXES(HistogramTest, BoundsTest);
+  FRIEND_TEST_ALL_PREFIXES(HistogramTest, BucketPlacementTest);
+  FRIEND_TEST_ALL_PREFIXES(HistogramTest, CorruptSampleCounts);
+
+  friend class StatisticsRecorder;  // To allow it to delete duplicates.
+  friend class StatisticsRecorderTest;
+
+  friend BASE_EXPORT HistogramBase* DeserializeHistogramInfo(
+      base::PickleIterator* iter);
+  static HistogramBase* DeserializeInfoImpl(base::PickleIterator* iter);
+
+  // Create a snapshot containing all samples (both logged and unlogged).
+  // Implementation of SnapshotSamples method with a more specific type for
+  // internal use.
+  std::unique_ptr<SampleVector> SnapshotAllSamples() const;
+
+  // Create a copy of unlogged samples.
+  std::unique_ptr<SampleVector> SnapshotUnloggedSamples() const;
+
+  //----------------------------------------------------------------------------
+  // Helpers for emitting Ascii graphic.  Each method appends data to output.
+
+  void WriteAsciiImpl(bool graph_it,
+                      const std::string& newline,
+                      std::string* output) const;
+
+  // Find out how large (graphically) the largest bucket will appear to be.
+  double GetPeakBucketSize(const SampleVectorBase& samples) const;
+
+  // Write a common header message describing this histogram.
+  void WriteAsciiHeader(const SampleVectorBase& samples,
+                        Count sample_count,
+                        std::string* output) const;
+
+  // Write information about previous, current, and next buckets.
+  // Information such as cumulative percentage, etc.
+  void WriteAsciiBucketContext(const int64_t past,
+                               const Count current,
+                               const int64_t remaining,
+                               const uint32_t i,
+                               std::string* output) const;
+
+  // WriteJSON calls these.
+  void GetParameters(DictionaryValue* params) const override;
+
+  void GetCountAndBucketData(Count* count,
+                             int64_t* sum,
+                             ListValue* buckets) const override;
+
+  // Samples that have not yet been logged with SnapshotDelta().
+  std::unique_ptr<SampleVectorBase> unlogged_samples_;
+
+  // Accumulation of all samples that have been logged with SnapshotDelta().
+  std::unique_ptr<SampleVectorBase> logged_samples_;
+
+#if DCHECK_IS_ON()  // Don't waste memory if it won't be used.
+  // Flag to indicate if PrepareFinalDelta has been previously called. It is
+  // used to DCHECK that a final delta is not created multiple times.
+  mutable bool final_delta_created_ = false;
+#endif
+
+  DISALLOW_COPY_AND_ASSIGN(Histogram);
+};
+
+//------------------------------------------------------------------------------
+
+// LinearHistogram is a more traditional histogram, with evenly spaced
+// buckets.
+class BASE_EXPORT LinearHistogram : public Histogram {
+ public:
+  ~LinearHistogram() override;
+
+  /* minimum should start from 1. 0 is as minimum is invalid. 0 is an implicit
+     default underflow bucket. */
+  static HistogramBase* FactoryGet(const std::string& name,
+                                   Sample minimum,
+                                   Sample maximum,
+                                   uint32_t bucket_count,
+                                   int32_t flags);
+  static HistogramBase* FactoryTimeGet(const std::string& name,
+                                       TimeDelta minimum,
+                                       TimeDelta maximum,
+                                       uint32_t bucket_count,
+                                       int32_t flags);
+
+  // Overloads of the above two functions that take a const char* |name| param,
+  // to avoid code bloat from the std::string constructor being inlined into
+  // call sites.
+  static HistogramBase* FactoryGet(const char* name,
+                                   Sample minimum,
+                                   Sample maximum,
+                                   uint32_t bucket_count,
+                                   int32_t flags);
+  static HistogramBase* FactoryTimeGet(const char* name,
+                                       TimeDelta minimum,
+                                       TimeDelta maximum,
+                                       uint32_t bucket_count,
+                                       int32_t flags);
+
+  // Create a histogram using data in persistent storage.
+  static std::unique_ptr<HistogramBase> PersistentCreate(
+      const char* name,
+      Sample minimum,
+      Sample maximum,
+      const BucketRanges* ranges,
+      const DelayedPersistentAllocation& counts,
+      const DelayedPersistentAllocation& logged_counts,
+      HistogramSamples::Metadata* meta,
+      HistogramSamples::Metadata* logged_meta);
+
+  struct DescriptionPair {
+    Sample sample;
+    const char* description;  // Null means end of a list of pairs.
+  };
+
+  // Create a LinearHistogram and store a list of number/text values for use in
+  // writing the histogram graph.
+  // |descriptions| can be NULL, which means no special descriptions to set. If
+  // it's not NULL, the last element in the array must has a NULL in its
+  // "description" field.
+  static HistogramBase* FactoryGetWithRangeDescription(
+      const std::string& name,
+      Sample minimum,
+      Sample maximum,
+      uint32_t bucket_count,
+      int32_t flags,
+      const DescriptionPair descriptions[]);
+
+  static void InitializeBucketRanges(Sample minimum,
+                                     Sample maximum,
+                                     BucketRanges* ranges);
+
+  // Overridden from Histogram:
+  HistogramType GetHistogramType() const override;
+
+ protected:
+  class Factory;
+
+  LinearHistogram(const char* name,
+                  Sample minimum,
+                  Sample maximum,
+                  const BucketRanges* ranges);
+
+  LinearHistogram(const char* name,
+                  Sample minimum,
+                  Sample maximum,
+                  const BucketRanges* ranges,
+                  const DelayedPersistentAllocation& counts,
+                  const DelayedPersistentAllocation& logged_counts,
+                  HistogramSamples::Metadata* meta,
+                  HistogramSamples::Metadata* logged_meta);
+
+  double GetBucketSize(Count current, uint32_t i) const override;
+
+  // If we have a description for a bucket, then return that.  Otherwise
+  // let parent class provide a (numeric) description.
+  const std::string GetAsciiBucketRange(uint32_t i) const override;
+
+  // Skip printing of name for numeric range if we have a name (and if this is
+  // an empty bucket).
+  bool PrintEmptyBucket(uint32_t index) const override;
+
+ private:
+  friend BASE_EXPORT HistogramBase* DeserializeHistogramInfo(
+      base::PickleIterator* iter);
+  static HistogramBase* DeserializeInfoImpl(base::PickleIterator* iter);
+
+  // For some ranges, we store a printable description of a bucket range.
+  // If there is no description, then GetAsciiBucketRange() uses parent class
+  // to provide a description.
+  typedef std::map<Sample, std::string> BucketDescriptionMap;
+  BucketDescriptionMap bucket_description_;
+
+  DISALLOW_COPY_AND_ASSIGN(LinearHistogram);
+};
+
+//------------------------------------------------------------------------------
+
+// BooleanHistogram is a histogram for booleans.
+class BASE_EXPORT BooleanHistogram : public LinearHistogram {
+ public:
+  static HistogramBase* FactoryGet(const std::string& name, int32_t flags);
+
+  // Overload of the above function that takes a const char* |name| param,
+  // to avoid code bloat from the std::string constructor being inlined into
+  // call sites.
+  static HistogramBase* FactoryGet(const char* name, int32_t flags);
+
+  // Create a histogram using data in persistent storage.
+  static std::unique_ptr<HistogramBase> PersistentCreate(
+      const char* name,
+      const BucketRanges* ranges,
+      const DelayedPersistentAllocation& counts,
+      const DelayedPersistentAllocation& logged_counts,
+      HistogramSamples::Metadata* meta,
+      HistogramSamples::Metadata* logged_meta);
+
+  HistogramType GetHistogramType() const override;
+
+ protected:
+  class Factory;
+
+ private:
+  BooleanHistogram(const char* name, const BucketRanges* ranges);
+  BooleanHistogram(const char* name,
+                   const BucketRanges* ranges,
+                   const DelayedPersistentAllocation& counts,
+                   const DelayedPersistentAllocation& logged_counts,
+                   HistogramSamples::Metadata* meta,
+                   HistogramSamples::Metadata* logged_meta);
+
+  friend BASE_EXPORT HistogramBase* DeserializeHistogramInfo(
+      base::PickleIterator* iter);
+  static HistogramBase* DeserializeInfoImpl(base::PickleIterator* iter);
+
+  DISALLOW_COPY_AND_ASSIGN(BooleanHistogram);
+};
+
+//------------------------------------------------------------------------------
+
+// CustomHistogram is a histogram for a set of custom integers.
+class BASE_EXPORT CustomHistogram : public Histogram {
+ public:
+  // |custom_ranges| contains a vector of limits on ranges. Each limit should be
+  // > 0 and < kSampleType_MAX. (Currently 0 is still accepted for backward
+  // compatibility). The limits can be unordered or contain duplication, but
+  // client should not depend on this.
+  static HistogramBase* FactoryGet(const std::string& name,
+                                   const std::vector<Sample>& custom_ranges,
+                                   int32_t flags);
+
+  // Overload of the above function that takes a const char* |name| param,
+  // to avoid code bloat from the std::string constructor being inlined into
+  // call sites.
+  static HistogramBase* FactoryGet(const char* name,
+                                   const std::vector<Sample>& custom_ranges,
+                                   int32_t flags);
+
+  // Create a histogram using data in persistent storage.
+  static std::unique_ptr<HistogramBase> PersistentCreate(
+      const char* name,
+      const BucketRanges* ranges,
+      const DelayedPersistentAllocation& counts,
+      const DelayedPersistentAllocation& logged_counts,
+      HistogramSamples::Metadata* meta,
+      HistogramSamples::Metadata* logged_meta);
+
+  // Overridden from Histogram:
+  HistogramType GetHistogramType() const override;
+
+  // Helper method for transforming an array of valid enumeration values
+  // to the std::vector<int> expected by UMA_HISTOGRAM_CUSTOM_ENUMERATION.
+  // This function ensures that a guard bucket exists right after any
+  // valid sample value (unless the next higher sample is also a valid value),
+  // so that invalid samples never fall into the same bucket as valid samples.
+  static std::vector<Sample> ArrayToCustomEnumRanges(
+      base::span<const Sample> values);
+
+ protected:
+  class Factory;
+
+  CustomHistogram(const char* name, const BucketRanges* ranges);
+
+  CustomHistogram(const char* name,
+                  const BucketRanges* ranges,
+                  const DelayedPersistentAllocation& counts,
+                  const DelayedPersistentAllocation& logged_counts,
+                  HistogramSamples::Metadata* meta,
+                  HistogramSamples::Metadata* logged_meta);
+
+  // HistogramBase implementation:
+  void SerializeInfoImpl(base::Pickle* pickle) const override;
+
+  double GetBucketSize(Count current, uint32_t i) const override;
+
+ private:
+  friend BASE_EXPORT HistogramBase* DeserializeHistogramInfo(
+      base::PickleIterator* iter);
+  static HistogramBase* DeserializeInfoImpl(base::PickleIterator* iter);
+
+  static bool ValidateCustomRanges(const std::vector<Sample>& custom_ranges);
+
+  DISALLOW_COPY_AND_ASSIGN(CustomHistogram);
+};
+
+}  // namespace base
+
+#endif  // BASE_METRICS_HISTOGRAM_H_
diff --git a/base/metrics/histogram_base.cc b/base/metrics/histogram_base.cc
new file mode 100644
index 0000000..da3ae93
--- /dev/null
+++ b/base/metrics/histogram_base.cc
@@ -0,0 +1,214 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/metrics/histogram_base.h"
+
+#include <limits.h>
+
+#include <memory>
+#include <set>
+#include <utility>
+
+#include "base/json/json_string_value_serializer.h"
+#include "base/lazy_instance.h"
+#include "base/logging.h"
+#include "base/metrics/histogram.h"
+#include "base/metrics/histogram_macros.h"
+#include "base/metrics/histogram_samples.h"
+#include "base/metrics/sparse_histogram.h"
+#include "base/metrics/statistics_recorder.h"
+#include "base/pickle.h"
+#include "base/process/process_handle.h"
+#include "base/rand_util.h"
+#include "base/strings/stringprintf.h"
+#include "base/synchronization/lock.h"
+#include "base/values.h"
+
+namespace base {
+
+std::string HistogramTypeToString(HistogramType type) {
+  switch (type) {
+    case HISTOGRAM:
+      return "HISTOGRAM";
+    case LINEAR_HISTOGRAM:
+      return "LINEAR_HISTOGRAM";
+    case BOOLEAN_HISTOGRAM:
+      return "BOOLEAN_HISTOGRAM";
+    case CUSTOM_HISTOGRAM:
+      return "CUSTOM_HISTOGRAM";
+    case SPARSE_HISTOGRAM:
+      return "SPARSE_HISTOGRAM";
+    case DUMMY_HISTOGRAM:
+      return "DUMMY_HISTOGRAM";
+  }
+  NOTREACHED();
+  return "UNKNOWN";
+}
+
+HistogramBase* DeserializeHistogramInfo(PickleIterator* iter) {
+  int type;
+  if (!iter->ReadInt(&type))
+    return nullptr;
+
+  switch (type) {
+    case HISTOGRAM:
+      return Histogram::DeserializeInfoImpl(iter);
+    case LINEAR_HISTOGRAM:
+      return LinearHistogram::DeserializeInfoImpl(iter);
+    case BOOLEAN_HISTOGRAM:
+      return BooleanHistogram::DeserializeInfoImpl(iter);
+    case CUSTOM_HISTOGRAM:
+      return CustomHistogram::DeserializeInfoImpl(iter);
+    case SPARSE_HISTOGRAM:
+      return SparseHistogram::DeserializeInfoImpl(iter);
+    default:
+      return nullptr;
+  }
+}
+
+const HistogramBase::Sample HistogramBase::kSampleType_MAX = INT_MAX;
+
+HistogramBase::HistogramBase(const char* name)
+    : histogram_name_(name), flags_(kNoFlags) {}
+
+HistogramBase::~HistogramBase() = default;
+
+void HistogramBase::CheckName(const StringPiece& name) const {
+  DCHECK_EQ(StringPiece(histogram_name()), name);
+}
+
+void HistogramBase::SetFlags(int32_t flags) {
+  HistogramBase::Count old_flags = subtle::NoBarrier_Load(&flags_);
+  subtle::NoBarrier_Store(&flags_, old_flags | flags);
+}
+
+void HistogramBase::ClearFlags(int32_t flags) {
+  HistogramBase::Count old_flags = subtle::NoBarrier_Load(&flags_);
+  subtle::NoBarrier_Store(&flags_, old_flags & ~flags);
+}
+
+void HistogramBase::AddScaled(Sample value, int count, int scale) {
+  DCHECK_LT(0, scale);
+
+  // Convert raw count and probabilistically round up/down if the remainder
+  // is more than a random number [0, scale). This gives a more accurate
+  // count when there are a large number of records. RandInt is "inclusive",
+  // hence the -1 for the max value.
+  int64_t count_scaled = count / scale;
+  if (count - (count_scaled * scale) > base::RandInt(0, scale - 1))
+    count_scaled += 1;
+  if (count_scaled == 0)
+    return;
+
+  AddCount(value, count_scaled);
+}
+
+void HistogramBase::AddKilo(Sample value, int count) {
+  AddScaled(value, count, 1000);
+}
+
+void HistogramBase::AddKiB(Sample value, int count) {
+  AddScaled(value, count, 1024);
+}
+
+void HistogramBase::AddTimeMillisecondsGranularity(const TimeDelta& time) {
+  Add(static_cast<Sample>(time.InMilliseconds()));
+}
+
+void HistogramBase::AddTimeMicrosecondsGranularity(const TimeDelta& time) {
+  // Intentionally drop high-resolution reports on clients with low-resolution
+  // clocks. High-resolution metrics cannot make use of low-resolution data and
+  // reporting it merely adds noise to the metric. https://crbug.com/807615#c16
+  if (TimeTicks::IsHighResolution())
+    Add(static_cast<Sample>(time.InMicroseconds()));
+}
+
+void HistogramBase::AddBoolean(bool value) {
+  Add(value ? 1 : 0);
+}
+
+void HistogramBase::SerializeInfo(Pickle* pickle) const {
+  pickle->WriteInt(GetHistogramType());
+  SerializeInfoImpl(pickle);
+}
+
+uint32_t HistogramBase::FindCorruption(const HistogramSamples& samples) const {
+  // Not supported by default.
+  return NO_INCONSISTENCIES;
+}
+
+void HistogramBase::ValidateHistogramContents() const {}
+
+void HistogramBase::WriteJSON(std::string* output,
+                              JSONVerbosityLevel verbosity_level) const {
+  Count count;
+  int64_t sum;
+  std::unique_ptr<ListValue> buckets(new ListValue());
+  GetCountAndBucketData(&count, &sum, buckets.get());
+  std::unique_ptr<DictionaryValue> parameters(new DictionaryValue());
+  GetParameters(parameters.get());
+
+  JSONStringValueSerializer serializer(output);
+  DictionaryValue root;
+  root.SetString("name", histogram_name());
+  root.SetInteger("count", count);
+  root.SetDouble("sum", static_cast<double>(sum));
+  root.SetInteger("flags", flags());
+  root.Set("params", std::move(parameters));
+  if (verbosity_level != JSON_VERBOSITY_LEVEL_OMIT_BUCKETS)
+    root.Set("buckets", std::move(buckets));
+  root.SetInteger("pid", GetUniqueIdForProcess());
+  serializer.Serialize(root);
+}
+
+void HistogramBase::FindAndRunCallback(HistogramBase::Sample sample) const {
+  if ((flags() & kCallbackExists) == 0)
+    return;
+
+  StatisticsRecorder::OnSampleCallback cb =
+      StatisticsRecorder::FindCallback(histogram_name());
+  if (!cb.is_null())
+    cb.Run(sample);
+}
+
+void HistogramBase::WriteAsciiBucketGraph(double current_size,
+                                          double max_size,
+                                          std::string* output) const {
+  const int k_line_length = 72;  // Maximal horizontal width of graph.
+  int x_count = static_cast<int>(k_line_length * (current_size / max_size)
+                                 + 0.5);
+  int x_remainder = k_line_length - x_count;
+
+  while (0 < x_count--)
+    output->append("-");
+  output->append("O");
+  while (0 < x_remainder--)
+    output->append(" ");
+}
+
+const std::string HistogramBase::GetSimpleAsciiBucketRange(
+    Sample sample) const {
+  return StringPrintf("%d", sample);
+}
+
+void HistogramBase::WriteAsciiBucketValue(Count current,
+                                          double scaled_sum,
+                                          std::string* output) const {
+  StringAppendF(output, " (%d = %3.1f%%)", current, current/scaled_sum);
+}
+
+// static
+char const* HistogramBase::GetPermanentName(const std::string& name) {
+  // A set of histogram names that provides the "permanent" lifetime required
+  // by histogram objects for those strings that are not already code constants
+  // or held in persistent memory.
+  static LazyInstance<std::set<std::string>>::Leaky permanent_names;
+  static LazyInstance<Lock>::Leaky permanent_names_lock;
+
+  AutoLock lock(permanent_names_lock.Get());
+  auto result = permanent_names.Get().insert(name);
+  return result.first->c_str();
+}
+
+}  // namespace base
diff --git a/base/metrics/histogram_base.h b/base/metrics/histogram_base.h
new file mode 100644
index 0000000..010dc55
--- /dev/null
+++ b/base/metrics/histogram_base.h
@@ -0,0 +1,305 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_METRICS_HISTOGRAM_BASE_H_
+#define BASE_METRICS_HISTOGRAM_BASE_H_
+
+#include <limits.h>
+#include <stddef.h>
+#include <stdint.h>
+
+#include <memory>
+#include <string>
+#include <vector>
+
+#include "base/atomicops.h"
+#include "base/base_export.h"
+#include "base/macros.h"
+#include "base/strings/string_piece.h"
+#include "base/time/time.h"
+
+namespace base {
+
+class DictionaryValue;
+class HistogramBase;
+class HistogramSamples;
+class ListValue;
+class Pickle;
+class PickleIterator;
+
+////////////////////////////////////////////////////////////////////////////////
+// This enum is used to facilitate deserialization of histograms from other
+// processes into the browser. If you create another class that inherits from
+// HistogramBase, add new histogram types and names below.
+
+enum HistogramType {
+  HISTOGRAM,
+  LINEAR_HISTOGRAM,
+  BOOLEAN_HISTOGRAM,
+  CUSTOM_HISTOGRAM,
+  SPARSE_HISTOGRAM,
+  DUMMY_HISTOGRAM,
+};
+
+// Controls the verbosity of the information when the histogram is serialized to
+// a JSON.
+// GENERATED_JAVA_ENUM_PACKAGE: org.chromium.base.metrics
+enum JSONVerbosityLevel {
+  // The histogram is completely serialized.
+  JSON_VERBOSITY_LEVEL_FULL,
+  // The bucket information is not serialized.
+  JSON_VERBOSITY_LEVEL_OMIT_BUCKETS,
+};
+
+std::string HistogramTypeToString(HistogramType type);
+
+// This enum is used for reporting how many histograms and of what types and
+// variations are being created. It has to be in the main .h file so it is
+// visible to files that define the various histogram types.
+enum HistogramReport {
+  // Count the number of reports created. The other counts divided by this
+  // number will give the average per run of the program.
+  HISTOGRAM_REPORT_CREATED = 0,
+
+  // Count the total number of histograms created. It is the limit against
+  // which all others are compared.
+  HISTOGRAM_REPORT_HISTOGRAM_CREATED = 1,
+
+  // Count the total number of histograms looked-up. It's better to cache
+  // the result of a single lookup rather than do it repeatedly.
+  HISTOGRAM_REPORT_HISTOGRAM_LOOKUP = 2,
+
+  // These count the individual histogram types. This must follow the order
+  // of HistogramType above.
+  HISTOGRAM_REPORT_TYPE_LOGARITHMIC = 3,
+  HISTOGRAM_REPORT_TYPE_LINEAR = 4,
+  HISTOGRAM_REPORT_TYPE_BOOLEAN = 5,
+  HISTOGRAM_REPORT_TYPE_CUSTOM = 6,
+  HISTOGRAM_REPORT_TYPE_SPARSE = 7,
+
+  // These indicate the individual flags that were set.
+  HISTOGRAM_REPORT_FLAG_UMA_TARGETED = 8,
+  HISTOGRAM_REPORT_FLAG_UMA_STABILITY = 9,
+  HISTOGRAM_REPORT_FLAG_PERSISTENT = 10,
+
+  // This must be last.
+  HISTOGRAM_REPORT_MAX = 11
+};
+
+// Create or find existing histogram that matches the pickled info.
+// Returns NULL if the pickled data has problems.
+BASE_EXPORT HistogramBase* DeserializeHistogramInfo(base::PickleIterator* iter);
+
+////////////////////////////////////////////////////////////////////////////////
+
+class BASE_EXPORT HistogramBase {
+ public:
+  typedef int32_t Sample;                // Used for samples.
+  typedef subtle::Atomic32 AtomicCount;  // Used to count samples.
+  typedef int32_t Count;  // Used to manipulate counts in temporaries.
+
+  static const Sample kSampleType_MAX;  // INT_MAX
+
+  enum Flags {
+    kNoFlags = 0x0,
+
+    // Histogram should be UMA uploaded.
+    kUmaTargetedHistogramFlag = 0x1,
+
+    // Indicates that this is a stability histogram. This flag exists to specify
+    // which histograms should be included in the initial stability log. Please
+    // refer to |MetricsService::PrepareInitialStabilityLog|.
+    kUmaStabilityHistogramFlag = kUmaTargetedHistogramFlag | 0x2,
+
+    // Indicates that the histogram was pickled to be sent across an IPC
+    // Channel. If we observe this flag on a histogram being aggregated into
+    // after IPC, then we are running in a single process mode, and the
+    // aggregation should not take place (as we would be aggregating back into
+    // the source histogram!).
+    kIPCSerializationSourceFlag = 0x10,
+
+    // Indicates that a callback exists for when a new sample is recorded on
+    // this histogram. We store this as a flag with the histogram since
+    // histograms can be in performance critical code, and this allows us
+    // to shortcut looking up the callback if it doesn't exist.
+    kCallbackExists = 0x20,
+
+    // Indicates that the histogram is held in "persistent" memory and may
+    // be accessible between processes. This is only possible if such a
+    // memory segment has been created/attached, used to create a Persistent-
+    // MemoryAllocator, and that loaded into the Histogram module before this
+    // histogram is created.
+    kIsPersistent = 0x40,
+  };
+
+  // Histogram data inconsistency types.
+  enum Inconsistency : uint32_t {
+    NO_INCONSISTENCIES = 0x0,
+    RANGE_CHECKSUM_ERROR = 0x1,
+    BUCKET_ORDER_ERROR = 0x2,
+    COUNT_HIGH_ERROR = 0x4,
+    COUNT_LOW_ERROR = 0x8,
+
+    NEVER_EXCEEDED_VALUE = 0x10,
+  };
+
+  // Construct the base histogram. The name is not copied; it's up to the
+  // caller to ensure that it lives at least as long as this object.
+  explicit HistogramBase(const char* name);
+  virtual ~HistogramBase();
+
+  const char* histogram_name() const { return histogram_name_; }
+
+  // Compares |name| to the histogram name and triggers a DCHECK if they do not
+  // match. This is a helper function used by histogram macros, which results in
+  // in more compact machine code being generated by the macros.
+  virtual void CheckName(const StringPiece& name) const;
+
+  // Get a unique ID for this histogram's samples.
+  virtual uint64_t name_hash() const = 0;
+
+  // Operations with Flags enum.
+  int32_t flags() const { return subtle::NoBarrier_Load(&flags_); }
+  void SetFlags(int32_t flags);
+  void ClearFlags(int32_t flags);
+
+  virtual HistogramType GetHistogramType() const = 0;
+
+  // Whether the histogram has construction arguments as parameters specified.
+  // For histograms that don't have the concept of minimum, maximum or
+  // bucket_count, this function always returns false.
+  virtual bool HasConstructionArguments(
+      Sample expected_minimum,
+      Sample expected_maximum,
+      uint32_t expected_bucket_count) const = 0;
+
+  virtual void Add(Sample value) = 0;
+
+  // In Add function the |value| bucket is increased by one, but in some use
+  // cases we need to increase this value by an arbitrary integer. AddCount
+  // function increases the |value| bucket by |count|. |count| should be greater
+  // than or equal to 1.
+  virtual void AddCount(Sample value, int count) = 0;
+
+  // Similar to above but divides |count| by the |scale| amount. Probabilistic
+  // rounding is used to yield a reasonably accurate total when many samples
+  // are added. Methods for common cases of scales 1000 and 1024 are included.
+  void AddScaled(Sample value, int count, int scale);
+  void AddKilo(Sample value, int count);  // scale=1000
+  void AddKiB(Sample value, int count);   // scale=1024
+
+  // Convenient functions that call Add(Sample).
+  void AddTime(const TimeDelta& time) { AddTimeMillisecondsGranularity(time); }
+  void AddTimeMillisecondsGranularity(const TimeDelta& time);
+  // Note: AddTimeMicrosecondsGranularity() drops the report if this client
+  // doesn't have a high-resolution clock.
+  void AddTimeMicrosecondsGranularity(const TimeDelta& time);
+  void AddBoolean(bool value);
+
+  virtual void AddSamples(const HistogramSamples& samples) = 0;
+  virtual bool AddSamplesFromPickle(base::PickleIterator* iter) = 0;
+
+  // Serialize the histogram info into |pickle|.
+  // Note: This only serializes the construction arguments of the histogram, but
+  // does not serialize the samples.
+  void SerializeInfo(base::Pickle* pickle) const;
+
+  // Try to find out data corruption from histogram and the samples.
+  // The returned value is a combination of Inconsistency enum.
+  virtual uint32_t FindCorruption(const HistogramSamples& samples) const;
+
+  // Snapshot the current complete set of sample data.
+  // Override with atomic/locked snapshot if needed.
+  // NOTE: this data can overflow for long-running sessions. It should be
+  // handled with care and this method is recommended to be used only
+  // in about:histograms and test code.
+  virtual std::unique_ptr<HistogramSamples> SnapshotSamples() const = 0;
+
+  // Calculate the change (delta) in histogram counts since the previous call
+  // to this method. Each successive call will return only those counts
+  // changed since the last call.
+  virtual std::unique_ptr<HistogramSamples> SnapshotDelta() = 0;
+
+  // Calculate the change (delta) in histogram counts since the previous call
+  // to SnapshotDelta() but do so without modifying any internal data as to
+  // what was previous logged. After such a call, no further calls to this
+  // method or to SnapshotDelta() should be done as the result would include
+  // data previously returned. Because no internal data is changed, this call
+  // can be made on "const" histograms such as those with data held in
+  // read-only memory.
+  virtual std::unique_ptr<HistogramSamples> SnapshotFinalDelta() const = 0;
+
+  // The following methods provide graphical histogram displays.
+  virtual void WriteHTMLGraph(std::string* output) const = 0;
+  virtual void WriteAscii(std::string* output) const = 0;
+
+  // TODO(bcwhite): Remove this after https://crbug/836875.
+  virtual void ValidateHistogramContents() const;
+
+  // Produce a JSON representation of the histogram with |verbosity_level| as
+  // the serialization verbosity. This is implemented with the help of
+  // GetParameters and GetCountAndBucketData; overwrite them to customize the
+  // output.
+  void WriteJSON(std::string* output, JSONVerbosityLevel verbosity_level) const;
+
+ protected:
+  enum ReportActivity { HISTOGRAM_CREATED, HISTOGRAM_LOOKUP };
+
+  // Subclasses should implement this function to make SerializeInfo work.
+  virtual void SerializeInfoImpl(base::Pickle* pickle) const = 0;
+
+  // Writes information about the construction parameters in |params|.
+  virtual void GetParameters(DictionaryValue* params) const = 0;
+
+  // Writes information about the current (non-empty) buckets and their sample
+  // counts to |buckets|, the total sample count to |count| and the total sum
+  // to |sum|.
+  virtual void GetCountAndBucketData(Count* count,
+                                     int64_t* sum,
+                                     ListValue* buckets) const = 0;
+
+  //// Produce actual graph (set of blank vs non blank char's) for a bucket.
+  void WriteAsciiBucketGraph(double current_size,
+                             double max_size,
+                             std::string* output) const;
+
+  // Return a string description of what goes in a given bucket.
+  const std::string GetSimpleAsciiBucketRange(Sample sample) const;
+
+  // Write textual description of the bucket contents (relative to histogram).
+  // Output is the count in the buckets, as well as the percentage.
+  void WriteAsciiBucketValue(Count current,
+                             double scaled_sum,
+                             std::string* output) const;
+
+  // Retrieves the callback for this histogram, if one exists, and runs it
+  // passing |sample| as the parameter.
+  void FindAndRunCallback(Sample sample) const;
+
+  // Gets a permanent string that can be used for histogram objects when the
+  // original is not a code constant or held in persistent memory.
+  static const char* GetPermanentName(const std::string& name);
+
+ private:
+  friend class HistogramBaseTest;
+
+  // A pointer to permanent storage where the histogram name is held. This can
+  // be code space or the output of GetPermanentName() or any other storage
+  // that is known to never change. This is not StringPiece because (a) char*
+  // is 1/2 the size and (b) StringPiece transparently casts from std::string
+  // which can easily lead to a pointer to non-permanent space.
+  // For persistent histograms, this will simply point into the persistent
+  // memory segment, thus avoiding duplication. For heap histograms, the
+  // GetPermanentName method will create the necessary copy.
+  const char* const histogram_name_;
+
+  // Additional information about the histogram.
+  AtomicCount flags_;
+
+  DISALLOW_COPY_AND_ASSIGN(HistogramBase);
+};
+
+}  // namespace base
+
+#endif  // BASE_METRICS_HISTOGRAM_BASE_H_
diff --git a/base/metrics/histogram_base_unittest.cc b/base/metrics/histogram_base_unittest.cc
new file mode 100644
index 0000000..e539e5c
--- /dev/null
+++ b/base/metrics/histogram_base_unittest.cc
@@ -0,0 +1,189 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <vector>
+
+#include "base/metrics/histogram.h"
+#include "base/metrics/histogram_base.h"
+#include "base/metrics/sample_vector.h"
+#include "base/metrics/sparse_histogram.h"
+#include "base/metrics/statistics_recorder.h"
+#include "base/pickle.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+
+class HistogramBaseTest : public testing::Test {
+ protected:
+  HistogramBaseTest() {
+    // Each test will have a clean state (no Histogram / BucketRanges
+    // registered).
+    ResetStatisticsRecorder();
+  }
+
+  ~HistogramBaseTest() override = default;
+
+  void ResetStatisticsRecorder() {
+    // It is necessary to fully destruct any existing StatisticsRecorder
+    // before creating a new one.
+    statistics_recorder_.reset();
+    statistics_recorder_ = StatisticsRecorder::CreateTemporaryForTesting();
+  }
+
+ private:
+  std::unique_ptr<StatisticsRecorder> statistics_recorder_;
+
+  DISALLOW_COPY_AND_ASSIGN(HistogramBaseTest);
+};
+
+TEST_F(HistogramBaseTest, DeserializeHistogram) {
+  HistogramBase* histogram = Histogram::FactoryGet(
+      "TestHistogram", 1, 1000, 10,
+      (HistogramBase::kUmaTargetedHistogramFlag |
+      HistogramBase::kIPCSerializationSourceFlag));
+
+  Pickle pickle;
+  histogram->SerializeInfo(&pickle);
+
+  PickleIterator iter(pickle);
+  HistogramBase* deserialized = DeserializeHistogramInfo(&iter);
+  EXPECT_EQ(histogram, deserialized);
+
+  ResetStatisticsRecorder();
+
+  PickleIterator iter2(pickle);
+  deserialized = DeserializeHistogramInfo(&iter2);
+  EXPECT_TRUE(deserialized);
+  EXPECT_NE(histogram, deserialized);
+  EXPECT_EQ("TestHistogram", StringPiece(deserialized->histogram_name()));
+  EXPECT_TRUE(deserialized->HasConstructionArguments(1, 1000, 10));
+
+  // kIPCSerializationSourceFlag will be cleared.
+  EXPECT_EQ(HistogramBase::kUmaTargetedHistogramFlag, deserialized->flags());
+}
+
+TEST_F(HistogramBaseTest, DeserializeLinearHistogram) {
+  HistogramBase* histogram = LinearHistogram::FactoryGet(
+      "TestHistogram", 1, 1000, 10,
+      HistogramBase::kIPCSerializationSourceFlag);
+
+  Pickle pickle;
+  histogram->SerializeInfo(&pickle);
+
+  PickleIterator iter(pickle);
+  HistogramBase* deserialized = DeserializeHistogramInfo(&iter);
+  EXPECT_EQ(histogram, deserialized);
+
+  ResetStatisticsRecorder();
+
+  PickleIterator iter2(pickle);
+  deserialized = DeserializeHistogramInfo(&iter2);
+  EXPECT_TRUE(deserialized);
+  EXPECT_NE(histogram, deserialized);
+  EXPECT_EQ("TestHistogram", StringPiece(deserialized->histogram_name()));
+  EXPECT_TRUE(deserialized->HasConstructionArguments(1, 1000, 10));
+  EXPECT_EQ(0, deserialized->flags());
+}
+
+TEST_F(HistogramBaseTest, DeserializeBooleanHistogram) {
+  HistogramBase* histogram = BooleanHistogram::FactoryGet(
+      "TestHistogram", HistogramBase::kIPCSerializationSourceFlag);
+
+  Pickle pickle;
+  histogram->SerializeInfo(&pickle);
+
+  PickleIterator iter(pickle);
+  HistogramBase* deserialized = DeserializeHistogramInfo(&iter);
+  EXPECT_EQ(histogram, deserialized);
+
+  ResetStatisticsRecorder();
+
+  PickleIterator iter2(pickle);
+  deserialized = DeserializeHistogramInfo(&iter2);
+  EXPECT_TRUE(deserialized);
+  EXPECT_NE(histogram, deserialized);
+  EXPECT_EQ("TestHistogram", StringPiece(deserialized->histogram_name()));
+  EXPECT_TRUE(deserialized->HasConstructionArguments(1, 2, 3));
+  EXPECT_EQ(0, deserialized->flags());
+}
+
+TEST_F(HistogramBaseTest, DeserializeCustomHistogram) {
+  std::vector<HistogramBase::Sample> ranges;
+  ranges.push_back(13);
+  ranges.push_back(5);
+  ranges.push_back(9);
+
+  HistogramBase* histogram = CustomHistogram::FactoryGet(
+      "TestHistogram", ranges, HistogramBase::kIPCSerializationSourceFlag);
+
+  Pickle pickle;
+  histogram->SerializeInfo(&pickle);
+
+  PickleIterator iter(pickle);
+  HistogramBase* deserialized = DeserializeHistogramInfo(&iter);
+  EXPECT_EQ(histogram, deserialized);
+
+  ResetStatisticsRecorder();
+
+  PickleIterator iter2(pickle);
+  deserialized = DeserializeHistogramInfo(&iter2);
+  EXPECT_TRUE(deserialized);
+  EXPECT_NE(histogram, deserialized);
+  EXPECT_EQ("TestHistogram", StringPiece(deserialized->histogram_name()));
+  EXPECT_TRUE(deserialized->HasConstructionArguments(5, 13, 4));
+  EXPECT_EQ(0, deserialized->flags());
+}
+
+TEST_F(HistogramBaseTest, DeserializeSparseHistogram) {
+  HistogramBase* histogram = SparseHistogram::FactoryGet(
+      "TestHistogram", HistogramBase::kIPCSerializationSourceFlag);
+
+  Pickle pickle;
+  histogram->SerializeInfo(&pickle);
+
+  PickleIterator iter(pickle);
+  HistogramBase* deserialized = DeserializeHistogramInfo(&iter);
+  EXPECT_EQ(histogram, deserialized);
+
+  ResetStatisticsRecorder();
+
+  PickleIterator iter2(pickle);
+  deserialized = DeserializeHistogramInfo(&iter2);
+  EXPECT_TRUE(deserialized);
+  EXPECT_NE(histogram, deserialized);
+  EXPECT_EQ("TestHistogram", StringPiece(deserialized->histogram_name()));
+  EXPECT_EQ(0, deserialized->flags());
+}
+
+TEST_F(HistogramBaseTest, AddKilo) {
+  HistogramBase* histogram =
+      LinearHistogram::FactoryGet("TestAddKiloHistogram", 1, 1000, 100, 0);
+
+  histogram->AddKilo(100, 1000);
+  histogram->AddKilo(200, 2000);
+  histogram->AddKilo(300, 1500);
+
+  std::unique_ptr<HistogramSamples> samples = histogram->SnapshotSamples();
+  EXPECT_EQ(1, samples->GetCount(100));
+  EXPECT_EQ(2, samples->GetCount(200));
+  EXPECT_LE(1, samples->GetCount(300));
+  EXPECT_GE(2, samples->GetCount(300));
+}
+
+TEST_F(HistogramBaseTest, AddKiB) {
+  HistogramBase* histogram =
+      LinearHistogram::FactoryGet("TestAddKiBHistogram", 1, 1000, 100, 0);
+
+  histogram->AddKiB(100, 1024);
+  histogram->AddKiB(200, 2048);
+  histogram->AddKiB(300, 1536);
+
+  std::unique_ptr<HistogramSamples> samples = histogram->SnapshotSamples();
+  EXPECT_EQ(1, samples->GetCount(100));
+  EXPECT_EQ(2, samples->GetCount(200));
+  EXPECT_LE(1, samples->GetCount(300));
+  EXPECT_GE(2, samples->GetCount(300));
+}
+
+}  // namespace base
diff --git a/base/metrics/histogram_delta_serialization.cc b/base/metrics/histogram_delta_serialization.cc
new file mode 100644
index 0000000..a74b87f
--- /dev/null
+++ b/base/metrics/histogram_delta_serialization.cc
@@ -0,0 +1,81 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/metrics/histogram_delta_serialization.h"
+
+#include "base/logging.h"
+#include "base/metrics/histogram_base.h"
+#include "base/metrics/histogram_snapshot_manager.h"
+#include "base/metrics/statistics_recorder.h"
+#include "base/numerics/safe_conversions.h"
+#include "base/pickle.h"
+#include "base/values.h"
+
+namespace base {
+
+namespace {
+
+// Create or find existing histogram and add the samples from pickle.
+// Silently returns when seeing any data problem in the pickle.
+void DeserializeHistogramAndAddSamples(PickleIterator* iter) {
+  HistogramBase* histogram = DeserializeHistogramInfo(iter);
+  if (!histogram)
+    return;
+
+  if (histogram->flags() & HistogramBase::kIPCSerializationSourceFlag) {
+    DVLOG(1) << "Single process mode, histogram observed and not copied: "
+             << histogram->histogram_name();
+    return;
+  }
+  histogram->AddSamplesFromPickle(iter);
+}
+
+}  // namespace
+
+HistogramDeltaSerialization::HistogramDeltaSerialization(
+    const std::string& caller_name)
+    : histogram_snapshot_manager_(this), serialized_deltas_(nullptr) {}
+
+HistogramDeltaSerialization::~HistogramDeltaSerialization() = default;
+
+void HistogramDeltaSerialization::PrepareAndSerializeDeltas(
+    std::vector<std::string>* serialized_deltas,
+    bool include_persistent) {
+  DCHECK(thread_checker_.CalledOnValidThread());
+
+  serialized_deltas_ = serialized_deltas;
+  // Note: Before serializing, we set the kIPCSerializationSourceFlag for all
+  // the histograms, so that the receiving process can distinguish them from the
+  // local histograms.
+  StatisticsRecorder::PrepareDeltas(
+      include_persistent, Histogram::kIPCSerializationSourceFlag,
+      Histogram::kNoFlags, &histogram_snapshot_manager_);
+  serialized_deltas_ = nullptr;
+}
+
+// static
+void HistogramDeltaSerialization::DeserializeAndAddSamples(
+    const std::vector<std::string>& serialized_deltas) {
+  for (std::vector<std::string>::const_iterator it = serialized_deltas.begin();
+       it != serialized_deltas.end(); ++it) {
+    Pickle pickle(it->data(), checked_cast<int>(it->size()));
+    PickleIterator iter(pickle);
+    DeserializeHistogramAndAddSamples(&iter);
+  }
+}
+
+void HistogramDeltaSerialization::RecordDelta(
+    const HistogramBase& histogram,
+    const HistogramSamples& snapshot) {
+  DCHECK(thread_checker_.CalledOnValidThread());
+  DCHECK_NE(0, snapshot.TotalCount());
+
+  Pickle pickle;
+  histogram.SerializeInfo(&pickle);
+  snapshot.Serialize(&pickle);
+  serialized_deltas_->push_back(
+      std::string(static_cast<const char*>(pickle.data()), pickle.size()));
+}
+
+}  // namespace base
diff --git a/base/metrics/histogram_delta_serialization.h b/base/metrics/histogram_delta_serialization.h
new file mode 100644
index 0000000..57ebd2c
--- /dev/null
+++ b/base/metrics/histogram_delta_serialization.h
@@ -0,0 +1,61 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_METRICS_HISTOGRAM_DELTA_SERIALIZATION_H_
+#define BASE_METRICS_HISTOGRAM_DELTA_SERIALIZATION_H_
+
+#include <memory>
+#include <string>
+#include <vector>
+
+#include "base/base_export.h"
+#include "base/macros.h"
+#include "base/metrics/histogram_flattener.h"
+#include "base/metrics/histogram_snapshot_manager.h"
+#include "base/threading/thread_checker.h"
+
+namespace base {
+
+class HistogramBase;
+
+// Serializes and restores histograms deltas.
+class BASE_EXPORT HistogramDeltaSerialization : public HistogramFlattener {
+ public:
+  // |caller_name| is string used in histograms for counting inconsistencies.
+  explicit HistogramDeltaSerialization(const std::string& caller_name);
+  ~HistogramDeltaSerialization() override;
+
+  // Computes deltas in histogram bucket counts relative to the previous call to
+  // this method. Stores the deltas in serialized form into |serialized_deltas|.
+  // If |serialized_deltas| is null, no data is serialized, though the next call
+  // will compute the deltas relative to this one. Setting |include_persistent|
+  // will include histograms held in persistent memory (and thus may be reported
+  // elsewhere); otherwise only histograms local to this process are serialized.
+  void PrepareAndSerializeDeltas(std::vector<std::string>* serialized_deltas,
+                                 bool include_persistent);
+
+  // Deserialize deltas and add samples to corresponding histograms, creating
+  // them if necessary. Silently ignores errors in |serialized_deltas|.
+  static void DeserializeAndAddSamples(
+      const std::vector<std::string>& serialized_deltas);
+
+ private:
+  // HistogramFlattener implementation.
+  void RecordDelta(const HistogramBase& histogram,
+                   const HistogramSamples& snapshot) override;
+
+  ThreadChecker thread_checker_;
+
+  // Calculates deltas in histogram counters.
+  HistogramSnapshotManager histogram_snapshot_manager_;
+
+  // Output buffer for serialized deltas.
+  std::vector<std::string>* serialized_deltas_;
+
+  DISALLOW_COPY_AND_ASSIGN(HistogramDeltaSerialization);
+};
+
+}  // namespace base
+
+#endif  // BASE_METRICS_HISTOGRAM_DELTA_SERIALIZATION_H_
diff --git a/base/metrics/histogram_delta_serialization_unittest.cc b/base/metrics/histogram_delta_serialization_unittest.cc
new file mode 100644
index 0000000..719bc70
--- /dev/null
+++ b/base/metrics/histogram_delta_serialization_unittest.cc
@@ -0,0 +1,55 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/metrics/histogram_delta_serialization.h"
+
+#include <vector>
+
+#include "base/metrics/histogram.h"
+#include "base/metrics/histogram_base.h"
+#include "base/metrics/statistics_recorder.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+
+TEST(HistogramDeltaSerializationTest, DeserializeHistogramAndAddSamples) {
+  std::unique_ptr<StatisticsRecorder> statistic_recorder(
+      StatisticsRecorder::CreateTemporaryForTesting());
+  HistogramDeltaSerialization serializer("HistogramDeltaSerializationTest");
+  std::vector<std::string> deltas;
+  // Nothing was changed yet.
+  serializer.PrepareAndSerializeDeltas(&deltas, true);
+  EXPECT_TRUE(deltas.empty());
+
+  HistogramBase* histogram = Histogram::FactoryGet(
+      "TestHistogram", 1, 1000, 10, HistogramBase::kIPCSerializationSourceFlag);
+  histogram->Add(1);
+  histogram->Add(10);
+  histogram->Add(100);
+  histogram->Add(1000);
+
+  serializer.PrepareAndSerializeDeltas(&deltas, true);
+  EXPECT_FALSE(deltas.empty());
+
+  HistogramDeltaSerialization::DeserializeAndAddSamples(deltas);
+
+  // The histogram has kIPCSerializationSourceFlag. So samples will be ignored.
+  std::unique_ptr<HistogramSamples> snapshot(histogram->SnapshotSamples());
+  EXPECT_EQ(1, snapshot->GetCount(1));
+  EXPECT_EQ(1, snapshot->GetCount(10));
+  EXPECT_EQ(1, snapshot->GetCount(100));
+  EXPECT_EQ(1, snapshot->GetCount(1000));
+
+  // Clear kIPCSerializationSourceFlag to emulate multi-process usage.
+  histogram->ClearFlags(HistogramBase::kIPCSerializationSourceFlag);
+  HistogramDeltaSerialization::DeserializeAndAddSamples(deltas);
+
+  std::unique_ptr<HistogramSamples> snapshot2(histogram->SnapshotSamples());
+  EXPECT_EQ(2, snapshot2->GetCount(1));
+  EXPECT_EQ(2, snapshot2->GetCount(10));
+  EXPECT_EQ(2, snapshot2->GetCount(100));
+  EXPECT_EQ(2, snapshot2->GetCount(1000));
+}
+
+}  // namespace base
diff --git a/base/metrics/histogram_flattener.h b/base/metrics/histogram_flattener.h
new file mode 100644
index 0000000..6a5e3f4
--- /dev/null
+++ b/base/metrics/histogram_flattener.h
@@ -0,0 +1,35 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_METRICS_HISTOGRAM_FLATTENER_H_
+#define BASE_METRICS_HISTOGRAM_FLATTENER_H_
+
+#include <map>
+#include <string>
+
+#include "base/macros.h"
+#include "base/metrics/histogram.h"
+
+namespace base {
+
+class HistogramSamples;
+
+// HistogramFlattener is an interface used by HistogramSnapshotManager, which
+// handles the logistics of gathering up available histograms for recording.
+class BASE_EXPORT HistogramFlattener {
+ public:
+  virtual void RecordDelta(const HistogramBase& histogram,
+                           const HistogramSamples& snapshot) = 0;
+
+ protected:
+  HistogramFlattener() = default;
+  virtual ~HistogramFlattener() = default;
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(HistogramFlattener);
+};
+
+}  // namespace base
+
+#endif  // BASE_METRICS_HISTOGRAM_FLATTENER_H_
diff --git a/base/metrics/histogram_functions.cc b/base/metrics/histogram_functions.cc
new file mode 100644
index 0000000..31bf219
--- /dev/null
+++ b/base/metrics/histogram_functions.cc
@@ -0,0 +1,110 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/metrics/histogram_functions.h"
+
+#include "base/metrics/histogram.h"
+#include "base/metrics/histogram_base.h"
+#include "base/metrics/sparse_histogram.h"
+#include "base/time/time.h"
+
+namespace base {
+
+void UmaHistogramBoolean(const std::string& name, bool sample) {
+  HistogramBase* histogram = BooleanHistogram::FactoryGet(
+      name, HistogramBase::kUmaTargetedHistogramFlag);
+  histogram->Add(sample);
+}
+
+void UmaHistogramExactLinear(const std::string& name,
+                             int sample,
+                             int value_max) {
+  HistogramBase* histogram =
+      LinearHistogram::FactoryGet(name, 1, value_max, value_max + 1,
+                                  HistogramBase::kUmaTargetedHistogramFlag);
+  histogram->Add(sample);
+}
+
+void UmaHistogramPercentage(const std::string& name, int percent) {
+  UmaHistogramExactLinear(name, percent, 100);
+}
+
+void UmaHistogramCustomCounts(const std::string& name,
+                              int sample,
+                              int min,
+                              int max,
+                              int buckets) {
+  HistogramBase* histogram = Histogram::FactoryGet(
+      name, min, max, buckets, HistogramBase::kUmaTargetedHistogramFlag);
+  histogram->Add(sample);
+}
+
+void UmaHistogramCounts100(const std::string& name, int sample) {
+  UmaHistogramCustomCounts(name, sample, 1, 100, 50);
+}
+
+void UmaHistogramCounts1000(const std::string& name, int sample) {
+  UmaHistogramCustomCounts(name, sample, 1, 1000, 50);
+}
+
+void UmaHistogramCounts10000(const std::string& name, int sample) {
+  UmaHistogramCustomCounts(name, sample, 1, 10000, 50);
+}
+
+void UmaHistogramCounts100000(const std::string& name, int sample) {
+  UmaHistogramCustomCounts(name, sample, 1, 100000, 50);
+}
+
+void UmaHistogramCounts1M(const std::string& name, int sample) {
+  UmaHistogramCustomCounts(name, sample, 1, 1000000, 50);
+}
+
+void UmaHistogramCounts10M(const std::string& name, int sample) {
+  UmaHistogramCustomCounts(name, sample, 1, 10000000, 50);
+}
+
+void UmaHistogramCustomTimes(const std::string& name,
+                             TimeDelta sample,
+                             TimeDelta min,
+                             TimeDelta max,
+                             int buckets) {
+  HistogramBase* histogram = Histogram::FactoryTimeGet(
+      name, min, max, buckets, HistogramBase::kUmaTargetedHistogramFlag);
+  histogram->AddTimeMillisecondsGranularity(sample);
+}
+
+void UmaHistogramTimes(const std::string& name, TimeDelta sample) {
+  UmaHistogramCustomTimes(name, sample, TimeDelta::FromMilliseconds(1),
+                          TimeDelta::FromSeconds(10), 50);
+}
+
+void UmaHistogramMediumTimes(const std::string& name, TimeDelta sample) {
+  UmaHistogramCustomTimes(name, sample, TimeDelta::FromMilliseconds(1),
+                          TimeDelta::FromMinutes(3), 50);
+}
+
+void UmaHistogramLongTimes(const std::string& name, TimeDelta sample) {
+  UmaHistogramCustomTimes(name, sample, TimeDelta::FromMilliseconds(1),
+                          TimeDelta::FromHours(1), 50);
+}
+
+void UmaHistogramMemoryKB(const std::string& name, int sample) {
+  UmaHistogramCustomCounts(name, sample, 1000, 500000, 50);
+}
+
+void UmaHistogramMemoryMB(const std::string& name, int sample) {
+  UmaHistogramCustomCounts(name, sample, 1, 1000, 50);
+}
+
+void UmaHistogramMemoryLargeMB(const std::string& name, int sample) {
+  UmaHistogramCustomCounts(name, sample, 1, 64000, 100);
+}
+
+void UmaHistogramSparse(const std::string& name, int sample) {
+  HistogramBase* histogram = SparseHistogram::FactoryGet(
+      name, HistogramBase::kUmaTargetedHistogramFlag);
+  histogram->Add(sample);
+}
+
+}  // namespace base
diff --git a/base/metrics/histogram_functions.h b/base/metrics/histogram_functions.h
new file mode 100644
index 0000000..60c0057
--- /dev/null
+++ b/base/metrics/histogram_functions.h
@@ -0,0 +1,158 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_METRICS_HISTOGRAM_FUNCTIONS_H_
+#define BASE_METRICS_HISTOGRAM_FUNCTIONS_H_
+
+#include "base/metrics/histogram.h"
+#include "base/metrics/histogram_base.h"
+#include "base/time/time.h"
+
+// Functions for recording metrics.
+//
+// For best practices on deciding when to emit to a histogram and what form
+// the histogram should take, see
+// https://chromium.googlesource.com/chromium/src.git/+/HEAD/tools/metrics/histograms/README.md
+
+// Functions for recording UMA histograms. These can be used for cases
+// when the histogram name is generated at runtime. The functionality is
+// equivalent to macros defined in histogram_macros.h but allowing non-constant
+// histogram names. These functions are slower compared to their macro
+// equivalent because the histogram objects are not cached between calls.
+// So, these shouldn't be used in performance critical code.
+namespace base {
+
+// For histograms with linear buckets.
+// Used for capturing integer data with a linear bucketing scheme. This can be
+// used when you want the exact value of some small numeric count, with a max of
+// 100 or less. If you need to capture a range of greater than 100, we recommend
+// the use of the COUNT histograms below.
+// Sample usage:
+//   base::UmaHistogramExactLinear("Histogram.Linear", some_value, 10);
+BASE_EXPORT void UmaHistogramExactLinear(const std::string& name,
+                                         int sample,
+                                         int value_max);
+
+// For adding a sample to an enumerated histogram.
+// Sample usage:
+//   // These values are persisted to logs. Entries should not be renumbered and
+//   // numeric values should never be reused.
+//   enum class MyEnum {
+//     FIRST_VALUE = 0,
+//     SECOND_VALUE = 1,
+//     ...
+//     FINAL_VALUE = N,
+//     COUNT
+//   };
+//   base::UmaHistogramEnumeration("My.Enumeration",
+//                                 MyEnum::SOME_VALUE, MyEnum::COUNT);
+//
+// Note: The value in |sample| must be strictly less than |enum_size|.
+template <typename T>
+void UmaHistogramEnumeration(const std::string& name, T sample, T enum_size) {
+  static_assert(std::is_enum<T>::value,
+                "Non enum passed to UmaHistogramEnumeration");
+  DCHECK_LE(static_cast<uintmax_t>(enum_size), static_cast<uintmax_t>(INT_MAX));
+  DCHECK_LT(static_cast<uintmax_t>(sample), static_cast<uintmax_t>(enum_size));
+  return UmaHistogramExactLinear(name, static_cast<int>(sample),
+                                 static_cast<int>(enum_size));
+}
+
+// Same as above, but uses T::kMaxValue as the inclusive maximum value of the
+// enum.
+template <typename T>
+void UmaHistogramEnumeration(const std::string& name, T sample) {
+  static_assert(std::is_enum<T>::value,
+                "Non enum passed to UmaHistogramEnumeration");
+  DCHECK_LE(static_cast<uintmax_t>(T::kMaxValue),
+            static_cast<uintmax_t>(INT_MAX) - 1);
+  DCHECK_LE(static_cast<uintmax_t>(sample),
+            static_cast<uintmax_t>(T::kMaxValue));
+  return UmaHistogramExactLinear(name, static_cast<int>(sample),
+                                 static_cast<int>(T::kMaxValue) + 1);
+}
+
+// For adding boolean sample to histogram.
+// Sample usage:
+//   base::UmaHistogramBoolean("My.Boolean", true)
+BASE_EXPORT void UmaHistogramBoolean(const std::string& name, bool sample);
+
+// For adding histogram with percent.
+// Percents are integer between 1 and 100.
+// Sample usage:
+//   base::UmaHistogramPercentage("My.Percent", 69)
+BASE_EXPORT void UmaHistogramPercentage(const std::string& name, int percent);
+
+// For adding counts histogram.
+// Sample usage:
+//   base::UmaHistogramCustomCounts("My.Counts", some_value, 1, 600, 30)
+BASE_EXPORT void UmaHistogramCustomCounts(const std::string& name,
+                                          int sample,
+                                          int min,
+                                          int max,
+                                          int buckets);
+
+// Counts specialization for maximum counts 100, 1000, 10k, 100k, 1M and 10M.
+BASE_EXPORT void UmaHistogramCounts100(const std::string& name, int sample);
+BASE_EXPORT void UmaHistogramCounts1000(const std::string& name, int sample);
+BASE_EXPORT void UmaHistogramCounts10000(const std::string& name, int sample);
+BASE_EXPORT void UmaHistogramCounts100000(const std::string& name, int sample);
+BASE_EXPORT void UmaHistogramCounts1M(const std::string& name, int sample);
+BASE_EXPORT void UmaHistogramCounts10M(const std::string& name, int sample);
+
+// For histograms storing times.
+BASE_EXPORT void UmaHistogramCustomTimes(const std::string& name,
+                                         TimeDelta sample,
+                                         TimeDelta min,
+                                         TimeDelta max,
+                                         int buckets);
+// For short timings from 1 ms up to 10 seconds (50 buckets).
+BASE_EXPORT void UmaHistogramTimes(const std::string& name, TimeDelta sample);
+// For medium timings up to 3 minutes (50 buckets).
+BASE_EXPORT void UmaHistogramMediumTimes(const std::string& name,
+                                         TimeDelta sample);
+// For time intervals up to 1 hr (50 buckets).
+BASE_EXPORT void UmaHistogramLongTimes(const std::string& name,
+                                       TimeDelta sample);
+
+// For recording memory related histograms.
+// Used to measure common KB-granularity memory stats. Range is up to 500M.
+BASE_EXPORT void UmaHistogramMemoryKB(const std::string& name, int sample);
+// Used to measure common MB-granularity memory stats. Range is up to ~1G.
+BASE_EXPORT void UmaHistogramMemoryMB(const std::string& name, int sample);
+// Used to measure common MB-granularity memory stats. Range is up to ~64G.
+BASE_EXPORT void UmaHistogramMemoryLargeMB(const std::string& name, int sample);
+
+// For recording sparse histograms.
+// The |sample| can be a negative or non-negative number.
+//
+// Sparse histograms are well suited for recording counts of exact sample values
+// that are sparsely distributed over a relatively large range, in cases where
+// ultra-fast performance is not critical. For instance, Sqlite.Version.* are
+// sparse because for any given database, there's going to be exactly one
+// version logged.
+//
+// Performance:
+// ------------
+// Sparse histograms are typically more memory-efficient but less time-efficient
+// than other histograms. Essentially, they sparse histograms use a map rather
+// than a vector for their backing storage; they also require lock acquisition
+// to increment a sample, whereas other histogram do not. Hence, each increment
+// operation is a bit slower than for other histograms. But, if the data is
+// sparse, then they use less memory client-side, because they allocate buckets
+// on demand rather than preallocating.
+//
+// Data size:
+// ----------
+// Note that server-side, we still need to load all buckets, across all users,
+// at once. Thus, please avoid exploding such histograms, i.e. uploading many
+// many distinct values to the server (across all users). Concretely, keep the
+// number of distinct values <= 100 ideally, definitely <= 1000. If you have no
+// guarantees on the range of your data, use clamping, e.g.:
+//   UmaHistogramSparse("MyHistogram", ClampToRange(value, 0, 200));
+BASE_EXPORT void UmaHistogramSparse(const std::string& name, int sample);
+
+}  // namespace base
+
+#endif  // BASE_METRICS_HISTOGRAM_FUNCTIONS_H_
diff --git a/base/metrics/histogram_functions_unittest.cc b/base/metrics/histogram_functions_unittest.cc
new file mode 100644
index 0000000..3720674
--- /dev/null
+++ b/base/metrics/histogram_functions_unittest.cc
@@ -0,0 +1,127 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/metrics/histogram_functions.h"
+
+#include "base/metrics/histogram_macros.h"
+#include "base/test/histogram_tester.h"
+#include "base/time/time.h"
+#include "testing/gmock/include/gmock/gmock.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+
+enum UmaHistogramTestingEnum {
+  UMA_HISTOGRAM_TESTING_ENUM_FIRST,
+  UMA_HISTOGRAM_TESTING_ENUM_SECOND,
+  UMA_HISTOGRAM_TESTING_ENUM_THIRD
+};
+
+TEST(HistogramFunctionsTest, ExactLinear) {
+  std::string histogram("Testing.UMA.HistogramExactLinear");
+  HistogramTester tester;
+  UmaHistogramExactLinear(histogram, 10, 100);
+  tester.ExpectUniqueSample(histogram, 10, 1);
+  UmaHistogramExactLinear(histogram, 20, 100);
+  UmaHistogramExactLinear(histogram, 10, 100);
+  tester.ExpectBucketCount(histogram, 10, 2);
+  tester.ExpectBucketCount(histogram, 20, 1);
+  tester.ExpectTotalCount(histogram, 3);
+  // Test linear buckets overflow.
+  UmaHistogramExactLinear(histogram, 200, 100);
+  tester.ExpectBucketCount(histogram, 101, 1);
+  tester.ExpectTotalCount(histogram, 4);
+  // Test linear buckets underflow.
+  UmaHistogramExactLinear(histogram, 0, 100);
+  tester.ExpectBucketCount(histogram, 0, 1);
+  tester.ExpectTotalCount(histogram, 5);
+}
+
+TEST(HistogramFunctionsTest, Enumeration) {
+  std::string histogram("Testing.UMA.HistogramEnumeration");
+  HistogramTester tester;
+  UmaHistogramEnumeration(histogram, UMA_HISTOGRAM_TESTING_ENUM_FIRST,
+                          UMA_HISTOGRAM_TESTING_ENUM_THIRD);
+  tester.ExpectUniqueSample(histogram, UMA_HISTOGRAM_TESTING_ENUM_FIRST, 1);
+
+  // Verify the overflow & underflow bucket exists.
+  UMA_HISTOGRAM_ENUMERATION(
+      histogram, static_cast<int>(UMA_HISTOGRAM_TESTING_ENUM_THIRD) + 10,
+      static_cast<int>(UMA_HISTOGRAM_TESTING_ENUM_THIRD));
+  tester.ExpectBucketCount(
+      histogram, static_cast<int>(UMA_HISTOGRAM_TESTING_ENUM_THIRD) + 1, 1);
+  tester.ExpectTotalCount(histogram, 2);
+}
+
+TEST(HistogramFunctionsTest, Boolean) {
+  std::string histogram("Testing.UMA.HistogramBoolean");
+  HistogramTester tester;
+  UmaHistogramBoolean(histogram, true);
+  tester.ExpectUniqueSample(histogram, 1, 1);
+  UmaHistogramBoolean(histogram, false);
+  tester.ExpectBucketCount(histogram, 0, 1);
+  tester.ExpectTotalCount(histogram, 2);
+}
+
+TEST(HistogramFunctionsTest, Percentage) {
+  std::string histogram("Testing.UMA.HistogramPercentage");
+  HistogramTester tester;
+  UmaHistogramPercentage(histogram, 50);
+  tester.ExpectUniqueSample(histogram, 50, 1);
+  // Test overflows.
+  UmaHistogramPercentage(histogram, 110);
+  tester.ExpectBucketCount(histogram, 101, 1);
+  tester.ExpectTotalCount(histogram, 2);
+}
+
+TEST(HistogramFunctionsTest, Counts) {
+  std::string histogram("Testing.UMA.HistogramCount.Custom");
+  HistogramTester tester;
+  UmaHistogramCustomCounts(histogram, 10, 1, 100, 10);
+  tester.ExpectUniqueSample(histogram, 10, 1);
+  UmaHistogramCustomCounts(histogram, 20, 1, 100, 10);
+  UmaHistogramCustomCounts(histogram, 20, 1, 100, 10);
+  UmaHistogramCustomCounts(histogram, 20, 1, 100, 10);
+  tester.ExpectBucketCount(histogram, 20, 3);
+  tester.ExpectTotalCount(histogram, 4);
+  UmaHistogramCustomCounts(histogram, 110, 1, 100, 10);
+  tester.ExpectBucketCount(histogram, 101, 1);
+  tester.ExpectTotalCount(histogram, 5);
+}
+
+TEST(HistogramFunctionsTest, Times) {
+  std::string histogram("Testing.UMA.HistogramTimes");
+  HistogramTester tester;
+  UmaHistogramTimes(histogram, TimeDelta::FromSeconds(1));
+  tester.ExpectTimeBucketCount(histogram, TimeDelta::FromSeconds(1), 1);
+  tester.ExpectTotalCount(histogram, 1);
+  UmaHistogramTimes(histogram, TimeDelta::FromSeconds(9));
+  tester.ExpectTimeBucketCount(histogram, TimeDelta::FromSeconds(9), 1);
+  tester.ExpectTotalCount(histogram, 2);
+  UmaHistogramTimes(histogram, TimeDelta::FromSeconds(10));  // Overflows
+  tester.ExpectTimeBucketCount(histogram, TimeDelta::FromSeconds(10), 1);
+  UmaHistogramTimes(histogram, TimeDelta::FromSeconds(20));  // Overflows.
+  // Check the value by picking any overflow time.
+  tester.ExpectTimeBucketCount(histogram, TimeDelta::FromSeconds(11), 2);
+  tester.ExpectTotalCount(histogram, 4);
+}
+
+TEST(HistogramFunctionsTest, Sparse_SupportsLargeRange) {
+  std::string histogram("Testing.UMA.HistogramSparse");
+  HistogramTester tester;
+  UmaHistogramSparse(histogram, 0);
+  UmaHistogramSparse(histogram, 123456789);
+  UmaHistogramSparse(histogram, 123456789);
+  EXPECT_THAT(tester.GetAllSamples(histogram),
+              testing::ElementsAre(Bucket(0, 1), Bucket(123456789, 2)));
+}
+
+TEST(HistogramFunctionsTest, Sparse_SupportsNegativeValues) {
+  std::string histogram("Testing.UMA.HistogramSparse");
+  HistogramTester tester;
+  UmaHistogramSparse(histogram, -1);
+  tester.ExpectUniqueSample(histogram, -1, 1);
+}
+
+}  // namespace base.
diff --git a/base/metrics/histogram_macros.h b/base/metrics/histogram_macros.h
new file mode 100644
index 0000000..0960b19
--- /dev/null
+++ b/base/metrics/histogram_macros.h
@@ -0,0 +1,359 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_METRICS_HISTOGRAM_MACROS_H_
+#define BASE_METRICS_HISTOGRAM_MACROS_H_
+
+#include "base/macros.h"
+#include "base/metrics/histogram.h"
+#include "base/metrics/histogram_macros_internal.h"
+#include "base/metrics/histogram_macros_local.h"
+#include "base/time/time.h"
+
+
+// Macros for efficient use of histograms.
+//
+// For best practices on deciding when to emit to a histogram and what form
+// the histogram should take, see
+// https://chromium.googlesource.com/chromium/src.git/+/HEAD/tools/metrics/histograms/README.md
+
+// TODO(rkaplow): Link to proper documentation on metric creation once we have
+// it in a good state.
+
+// All of these macros must be called with |name| as a runtime constant - it
+// doesn't have to literally be a constant, but it must be the same string on
+// all calls from a particular call site. If this rule is violated, it is
+// possible the data will be written to the wrong histogram.
+
+//------------------------------------------------------------------------------
+// Enumeration histograms.
+
+// These macros create histograms for enumerated data. Ideally, the data should
+// be of the form of "event occurs, log the result". We recommended not putting
+// related but not directly connected data as enums within the same histogram.
+// You should be defining an associated Enum, and the input sample should be
+// an element of the Enum.
+// All of these macros must be called with |name| as a runtime constant.
+
+// The first variant of UMA_HISTOGRAM_ENUMERATION accepts two arguments: the
+// histogram name and the enum sample. It deduces the correct boundary value to
+// use by looking for an enumerator with the name kMaxValue. kMaxValue should
+// share the value of the highest enumerator: this avoids switch statements
+// having to handle a sentinel no-op value.
+//
+// Sample usage:
+//   // These values are persisted to logs. Entries should not be renumbered and
+//   // numeric values should never be reused.
+//   enum class MyEnum {
+//     kFirstValue = 0,
+//     kSecondValue = 1,
+//     ...
+//     kFinalValue = N,
+//     kMaxValue = kFinalValue,
+//   };
+//   UMA_HISTOGRAM_ENUMERATION("My.Enumeration", MyEnum::kSomeValue);
+//
+// The second variant requires three arguments: the first two are the same as
+// before, and the third argument is the enum boundary: this must be strictly
+// greater than any other enumerator that will be sampled.
+//
+// Sample usage:
+//   // These values are persisted to logs. Entries should not be renumbered and
+//   // numeric values should never be reused.
+//   enum class MyEnum {
+//     FIRST_VALUE = 0,
+//     SECOND_VALUE = 1,
+//     ...
+//     FINAL_VALUE = N,
+//     COUNT
+//   };
+//   UMA_HISTOGRAM_ENUMERATION("My.Enumeration",
+//                             MyEnum::SOME_VALUE, MyEnum::COUNT);
+//
+// Note: If the enum is used in a switch, it is often desirable to avoid writing
+// a case statement to handle an unused sentinel value (i.e. COUNT in the above
+// example). For scoped enums, this is awkward since it requires casting the
+// enum to an arithmetic type and adding one. Instead, prefer the two argument
+// version of the macro which automatically deduces the boundary from kMaxValue.
+#define UMA_HISTOGRAM_ENUMERATION(name, ...)                            \
+  CR_EXPAND_ARG(INTERNAL_UMA_HISTOGRAM_ENUMERATION_GET_MACRO(           \
+      __VA_ARGS__, INTERNAL_UMA_HISTOGRAM_ENUMERATION_SPECIFY_BOUNDARY, \
+      INTERNAL_UMA_HISTOGRAM_ENUMERATION_DEDUCE_BOUNDARY)(              \
+      name, __VA_ARGS__, base::HistogramBase::kUmaTargetedHistogramFlag))
+
+// Histogram for boolean values.
+
+// Sample usage:
+//   UMA_HISTOGRAM_BOOLEAN("Histogram.Boolean", bool);
+#define UMA_HISTOGRAM_BOOLEAN(name, sample)                                    \
+    STATIC_HISTOGRAM_POINTER_BLOCK(name, AddBoolean(sample),                   \
+        base::BooleanHistogram::FactoryGet(name,                               \
+            base::HistogramBase::kUmaTargetedHistogramFlag))
+
+//------------------------------------------------------------------------------
+// Linear histograms.
+
+// All of these macros must be called with |name| as a runtime constant.
+
+// Used for capturing integer data with a linear bucketing scheme. This can be
+// used when you want the exact value of some small numeric count, with a max of
+// 100 or less. If you need to capture a range of greater than 100, we recommend
+// the use of the COUNT histograms below.
+
+// Sample usage:
+//   UMA_HISTOGRAM_EXACT_LINEAR("Histogram.Linear", count, 10);
+#define UMA_HISTOGRAM_EXACT_LINEAR(name, sample, value_max) \
+  INTERNAL_HISTOGRAM_EXACT_LINEAR_WITH_FLAG(                \
+      name, sample, value_max, base::HistogramBase::kUmaTargetedHistogramFlag)
+
+// Used for capturing basic percentages. This will be 100 buckets of size 1.
+
+// Sample usage:
+//   UMA_HISTOGRAM_PERCENTAGE("Histogram.Percent", percent_as_int);
+#define UMA_HISTOGRAM_PERCENTAGE(name, percent_as_int) \
+  UMA_HISTOGRAM_EXACT_LINEAR(name, percent_as_int, 101)
+
+//------------------------------------------------------------------------------
+// Count histograms. These are used for collecting numeric data. Note that we
+// have macros for more specialized use cases below (memory, time, percentages).
+
+// The number suffixes here refer to the max size of the sample, i.e. COUNT_1000
+// will be able to collect samples of counts up to 1000. The default number of
+// buckets in all default macros is 50. We recommend erring on the side of too
+// large a range versus too short a range.
+// These macros default to exponential histograms - i.e. the lengths of the
+// bucket ranges exponentially increase as the sample range increases.
+// These should *not* be used if you are interested in exact counts, i.e. a
+// bucket range of 1. In these cases, you should use the ENUMERATION macros
+// defined later. These should also not be used to capture the number of some
+// event, i.e. "button X was clicked N times". In this cases, an enum should be
+// used, ideally with an appropriate baseline enum entry included.
+// All of these macros must be called with |name| as a runtime constant.
+
+// Sample usage:
+//   UMA_HISTOGRAM_COUNTS_1M("My.Histogram", sample);
+
+#define UMA_HISTOGRAM_COUNTS_100(name, sample) UMA_HISTOGRAM_CUSTOM_COUNTS(    \
+    name, sample, 1, 100, 50)
+
+#define UMA_HISTOGRAM_COUNTS_1000(name, sample) UMA_HISTOGRAM_CUSTOM_COUNTS(   \
+    name, sample, 1, 1000, 50)
+
+#define UMA_HISTOGRAM_COUNTS_10000(name, sample) UMA_HISTOGRAM_CUSTOM_COUNTS(  \
+    name, sample, 1, 10000, 50)
+
+#define UMA_HISTOGRAM_COUNTS_100000(name, sample) UMA_HISTOGRAM_CUSTOM_COUNTS( \
+    name, sample, 1, 100000, 50)
+
+#define UMA_HISTOGRAM_COUNTS_1M(name, sample) UMA_HISTOGRAM_CUSTOM_COUNTS(     \
+    name, sample, 1, 1000000, 50)
+
+#define UMA_HISTOGRAM_COUNTS_10M(name, sample) UMA_HISTOGRAM_CUSTOM_COUNTS(    \
+    name, sample, 1, 10000000, 50)
+
+// This can be used when the default ranges are not sufficient. This macro lets
+// the metric developer customize the min and max of the sampled range, as well
+// as the number of buckets recorded.
+// Any data outside the range here will be put in underflow and overflow
+// buckets. Min values should be >=1 as emitted 0s will still go into the
+// underflow bucket.
+
+// Sample usage:
+//   UMA_HISTOGRAM_CUSTOM_COUNTS("My.Histogram", 1, 100000000, 100);
+#define UMA_HISTOGRAM_CUSTOM_COUNTS(name, sample, min, max, bucket_count)      \
+    INTERNAL_HISTOGRAM_CUSTOM_COUNTS_WITH_FLAG(                                \
+        name, sample, min, max, bucket_count,                                  \
+        base::HistogramBase::kUmaTargetedHistogramFlag)
+
+//------------------------------------------------------------------------------
+// Timing histograms. These are used for collecting timing data (generally
+// latencies).
+
+// These macros create exponentially sized histograms (lengths of the bucket
+// ranges exponentially increase as the sample range increases). The input
+// sample is a base::TimeDelta. The output data is measured in ms granularity.
+// All of these macros must be called with |name| as a runtime constant.
+
+// Sample usage:
+//   UMA_HISTOGRAM_TIMES("My.Timing.Histogram", time_delta);
+
+// Short timings - up to 10 seconds. For high-resolution (microseconds) timings,
+// see UMA_HISTOGRAM_CUSTOM_MICROSECONDS_TIMES.
+#define UMA_HISTOGRAM_TIMES(name, sample) UMA_HISTOGRAM_CUSTOM_TIMES(          \
+    name, sample, base::TimeDelta::FromMilliseconds(1),                        \
+    base::TimeDelta::FromSeconds(10), 50)
+
+// Medium timings - up to 3 minutes. Note this starts at 10ms (no good reason,
+// but not worth changing).
+#define UMA_HISTOGRAM_MEDIUM_TIMES(name, sample) UMA_HISTOGRAM_CUSTOM_TIMES(   \
+    name, sample, base::TimeDelta::FromMilliseconds(10),                       \
+    base::TimeDelta::FromMinutes(3), 50)
+
+// Long timings - up to an hour.
+#define UMA_HISTOGRAM_LONG_TIMES(name, sample) UMA_HISTOGRAM_CUSTOM_TIMES(     \
+    name, sample, base::TimeDelta::FromMilliseconds(1),                        \
+    base::TimeDelta::FromHours(1), 50)
+
+// Long timings with higher granularity - up to an hour with 100 buckets.
+#define UMA_HISTOGRAM_LONG_TIMES_100(name, sample) UMA_HISTOGRAM_CUSTOM_TIMES( \
+    name, sample, base::TimeDelta::FromMilliseconds(1),                        \
+    base::TimeDelta::FromHours(1), 100)
+
+// This can be used when the default ranges are not sufficient. This macro lets
+// the metric developer customize the min and max of the sampled range, as well
+// as the number of buckets recorded.
+
+// Sample usage:
+//   UMA_HISTOGRAM_CUSTOM_TIMES("Very.Long.Timing.Histogram", time_delta,
+//       base::TimeDelta::FromSeconds(1), base::TimeDelta::FromDays(1), 100);
+#define UMA_HISTOGRAM_CUSTOM_TIMES(name, sample, min, max, bucket_count) \
+  STATIC_HISTOGRAM_POINTER_BLOCK(                                        \
+      name, AddTimeMillisecondsGranularity(sample),                      \
+      base::Histogram::FactoryTimeGet(                                   \
+          name, min, max, bucket_count,                                  \
+          base::HistogramBase::kUmaTargetedHistogramFlag))
+
+// Same as UMA_HISTOGRAM_CUSTOM_TIMES but reports |sample| in microseconds,
+// dropping the report if this client doesn't have a high-resolution clock.
+//
+// Note: dropping reports on clients with low-resolution clocks means these
+// reports will be biased to a portion of the population on Windows. See
+// Windows.HasHighResolutionTimeTicks for the affected sample.
+//
+// Sample usage:
+//  UMA_HISTOGRAM_CUSTOM_MICROSECONDS_TIMES(
+//      "High.Resolution.TimingMicroseconds.Histogram", time_delta,
+//      base::TimeDelta::FromMicroseconds(1),
+//      base::TimeDelta::FromMilliseconds(10), 100);
+#define UMA_HISTOGRAM_CUSTOM_MICROSECONDS_TIMES(name, sample, min, max, \
+                                                bucket_count)           \
+  STATIC_HISTOGRAM_POINTER_BLOCK(                                       \
+      name, AddTimeMicrosecondsGranularity(sample),                     \
+      base::Histogram::FactoryMicrosecondsTimeGet(                      \
+          name, min, max, bucket_count,                                 \
+          base::HistogramBase::kUmaTargetedHistogramFlag))
+
+// Scoped class which logs its time on this earth as a UMA statistic. This is
+// recommended for when you want a histogram which measures the time it takes
+// for a method to execute. This measures up to 10 seconds. It uses
+// UMA_HISTOGRAM_TIMES under the hood.
+
+// Sample usage:
+//   void Function() {
+//     SCOPED_UMA_HISTOGRAM_TIMER("Component.FunctionTime");
+//     ...
+//   }
+#define SCOPED_UMA_HISTOGRAM_TIMER(name)                                       \
+  INTERNAL_SCOPED_UMA_HISTOGRAM_TIMER_EXPANDER(name, false, __COUNTER__)
+
+// Similar scoped histogram timer, but this uses UMA_HISTOGRAM_LONG_TIMES_100,
+// which measures up to an hour, and uses 100 buckets. This is more expensive
+// to store, so only use if this often takes >10 seconds.
+#define SCOPED_UMA_HISTOGRAM_LONG_TIMER(name)                                  \
+  INTERNAL_SCOPED_UMA_HISTOGRAM_TIMER_EXPANDER(name, true, __COUNTER__)
+
+
+//------------------------------------------------------------------------------
+// Memory histograms.
+
+// These macros create exponentially sized histograms (lengths of the bucket
+// ranges exponentially increase as the sample range increases). The input
+// sample must be a number measured in kilobytes.
+// All of these macros must be called with |name| as a runtime constant.
+
+// Sample usage:
+//   UMA_HISTOGRAM_MEMORY_KB("My.Memory.Histogram", memory_in_kb);
+
+// Used to measure common KB-granularity memory stats. Range is up to 500000KB -
+// approximately 500M.
+#define UMA_HISTOGRAM_MEMORY_KB(name, sample)                                  \
+    UMA_HISTOGRAM_CUSTOM_COUNTS(name, sample, 1000, 500000, 50)
+
+// Used to measure common MB-granularity memory stats. Range is up to ~64G.
+#define UMA_HISTOGRAM_MEMORY_LARGE_MB(name, sample)                            \
+    UMA_HISTOGRAM_CUSTOM_COUNTS(name, sample, 1, 64000, 100)
+
+
+//------------------------------------------------------------------------------
+// Stability-specific histograms.
+
+// Histograms logged in as stability histograms will be included in the initial
+// stability log. See comments by declaration of
+// MetricsService::PrepareInitialStabilityLog().
+// All of these macros must be called with |name| as a runtime constant.
+
+// For details on usage, see the documentation on the non-stability equivalents.
+
+#define UMA_STABILITY_HISTOGRAM_COUNTS_100(name, sample)                       \
+    UMA_STABILITY_HISTOGRAM_CUSTOM_COUNTS(name, sample, 1, 100, 50)
+
+#define UMA_STABILITY_HISTOGRAM_CUSTOM_COUNTS(name, sample, min, max,          \
+                                              bucket_count)                    \
+    INTERNAL_HISTOGRAM_CUSTOM_COUNTS_WITH_FLAG(                                \
+        name, sample, min, max, bucket_count,                                  \
+        base::HistogramBase::kUmaStabilityHistogramFlag)
+
+#define UMA_STABILITY_HISTOGRAM_ENUMERATION(name, sample, enum_max)            \
+    INTERNAL_HISTOGRAM_ENUMERATION_WITH_FLAG(                                  \
+        name, sample, enum_max,                                                \
+        base::HistogramBase::kUmaStabilityHistogramFlag)
+
+//------------------------------------------------------------------------------
+// Histogram instantiation helpers.
+
+// Support a collection of histograms, perhaps one for each entry in an
+// enumeration. This macro manages a block of pointers, adding to a specific
+// one by its index.
+//
+// A typical instantiation looks something like this:
+//  STATIC_HISTOGRAM_POINTER_GROUP(
+//      GetHistogramNameForIndex(histogram_index),
+//      histogram_index, MAXIMUM_HISTOGRAM_INDEX, Add(some_delta),
+//      base::Histogram::FactoryGet(
+//          GetHistogramNameForIndex(histogram_index),
+//          MINIMUM_SAMPLE, MAXIMUM_SAMPLE, BUCKET_COUNT,
+//          base::HistogramBase::kUmaTargetedHistogramFlag));
+//
+// Though it seems inefficient to generate the name twice, the first
+// instance will be used only for DCHECK builds and the second will
+// execute only during the first access to the given index, after which
+// the pointer is cached and the name never needed again.
+#define STATIC_HISTOGRAM_POINTER_GROUP(constant_histogram_name, index,        \
+                                       constant_maximum,                      \
+                                       histogram_add_method_invocation,       \
+                                       histogram_factory_get_invocation)      \
+  do {                                                                        \
+    static base::subtle::AtomicWord atomic_histograms[constant_maximum];      \
+    DCHECK_LE(0, index);                                                      \
+    DCHECK_LT(index, constant_maximum);                                       \
+    HISTOGRAM_POINTER_USE(&atomic_histograms[index], constant_histogram_name, \
+                          histogram_add_method_invocation,                    \
+                          histogram_factory_get_invocation);                  \
+  } while (0)
+
+//------------------------------------------------------------------------------
+// Deprecated histogram macros. Not recommended for current use.
+
+// Legacy name for UMA_HISTOGRAM_COUNTS_1M. Suggest using explicit naming
+// and not using this macro going forward.
+#define UMA_HISTOGRAM_COUNTS(name, sample) UMA_HISTOGRAM_CUSTOM_COUNTS(        \
+    name, sample, 1, 1000000, 50)
+
+// MB-granularity memory metric. This has a short max (1G).
+#define UMA_HISTOGRAM_MEMORY_MB(name, sample)                                  \
+    UMA_HISTOGRAM_CUSTOM_COUNTS(name, sample, 1, 1000, 50)
+
+// For an enum with customized range. In general, sparse histograms should be
+// used instead.
+// Samples should be one of the std::vector<int> list provided via
+// |custom_ranges|. See comments above CustomRanges::FactoryGet about the
+// requirement of |custom_ranges|. You can use the helper function
+// CustomHistogram::ArrayToCustomEnumRanges to transform a C-style array of
+// valid sample values to a std::vector<int>.
+#define UMA_HISTOGRAM_CUSTOM_ENUMERATION(name, sample, custom_ranges)          \
+    STATIC_HISTOGRAM_POINTER_BLOCK(name, Add(sample),                          \
+        base::CustomHistogram::FactoryGet(name, custom_ranges,                 \
+            base::HistogramBase::kUmaTargetedHistogramFlag))
+
+#endif  // BASE_METRICS_HISTOGRAM_MACROS_H_
diff --git a/base/metrics/histogram_macros_internal.h b/base/metrics/histogram_macros_internal.h
new file mode 100644
index 0000000..ff3702b
--- /dev/null
+++ b/base/metrics/histogram_macros_internal.h
@@ -0,0 +1,226 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_METRICS_HISTOGRAM_MACROS_INTERNAL_H_
+#define BASE_METRICS_HISTOGRAM_MACROS_INTERNAL_H_
+
+#include <stdint.h>
+
+#include <limits>
+#include <type_traits>
+
+#include "base/atomicops.h"
+#include "base/logging.h"
+#include "base/metrics/histogram.h"
+#include "base/metrics/sparse_histogram.h"
+#include "base/time/time.h"
+
+// This is for macros and helpers internal to base/metrics. They should not be
+// used outside of this directory. For writing to UMA histograms, see
+// histogram_macros.h.
+
+namespace base {
+namespace internal {
+
+// Helper traits for deducing the boundary value for enums.
+template <typename Enum, typename SFINAE = void>
+struct EnumSizeTraits {
+  static constexpr Enum Count() {
+    static_assert(
+        sizeof(Enum) == 0,
+        "enumerator must define kMaxValue enumerator to use this macro!");
+    return Enum();
+  }
+};
+
+// Since the UMA histogram macros expect a value one larger than the max defined
+// enumerator value, add one.
+template <typename Enum>
+struct EnumSizeTraits<
+    Enum,
+    std::enable_if_t<std::is_enum<decltype(Enum::kMaxValue)>::value>> {
+  static constexpr Enum Count() {
+    return static_cast<Enum>(
+        static_cast<std::underlying_type_t<Enum>>(Enum::kMaxValue) + 1);
+  }
+};
+
+}  // namespace internal
+}  // namespace base
+
+// TODO(rkaplow): Improve commenting of these methods.
+//------------------------------------------------------------------------------
+// Histograms are often put in areas where they are called many many times, and
+// performance is critical.  As a result, they are designed to have a very low
+// recurring cost of executing (adding additional samples). Toward that end,
+// the macros declare a static pointer to the histogram in question, and only
+// take a "slow path" to construct (or find) the histogram on the first run
+// through the macro. We leak the histograms at shutdown time so that we don't
+// have to validate using the pointers at any time during the running of the
+// process.
+
+// In some cases (integration into 3rd party code), it's useful to separate the
+// definition of |atomic_histogram_pointer| from its use. To achieve this we
+// define HISTOGRAM_POINTER_USE, which uses an |atomic_histogram_pointer|, and
+// STATIC_HISTOGRAM_POINTER_BLOCK, which defines an |atomic_histogram_pointer|
+// and forwards to HISTOGRAM_POINTER_USE.
+#define HISTOGRAM_POINTER_USE(atomic_histogram_pointer,                        \
+                              constant_histogram_name,                         \
+                              histogram_add_method_invocation,                 \
+                              histogram_factory_get_invocation)                \
+  do {                                                                         \
+    /*                                                                         \
+     * Acquire_Load() ensures that we acquire visibility to the                \
+     * pointed-to data in the histogram.                                       \
+     */                                                                        \
+    base::HistogramBase* histogram_pointer(                                    \
+        reinterpret_cast<base::HistogramBase*>(                                \
+            base::subtle::Acquire_Load(atomic_histogram_pointer)));            \
+    if (!histogram_pointer) {                                                  \
+      /*                                                                       \
+       * This is the slow path, which will construct OR find the               \
+       * matching histogram.  histogram_factory_get_invocation includes        \
+       * locks on a global histogram name map and is completely thread         \
+       * safe.                                                                 \
+       */                                                                      \
+      histogram_pointer = histogram_factory_get_invocation;                    \
+                                                                               \
+      /*                                                                       \
+       * Use Release_Store to ensure that the histogram data is made           \
+       * available globally before we make the pointer visible. Several        \
+       * threads may perform this store, but the same value will be            \
+       * stored in all cases (for a given named/spec'ed histogram).            \
+       * We could do this without any barrier, since FactoryGet entered        \
+       * and exited a lock after construction, but this barrier makes          \
+       * things clear.                                                         \
+       */                                                                      \
+      base::subtle::Release_Store(                                             \
+          atomic_histogram_pointer,                                            \
+          reinterpret_cast<base::subtle::AtomicWord>(histogram_pointer));      \
+    }                                                                          \
+    if (DCHECK_IS_ON())                                                        \
+      histogram_pointer->CheckName(constant_histogram_name);                   \
+    histogram_pointer->histogram_add_method_invocation;                        \
+  } while (0)
+
+// This is a helper macro used by other macros and shouldn't be used directly.
+// Defines the static |atomic_histogram_pointer| and forwards to
+// HISTOGRAM_POINTER_USE.
+#define STATIC_HISTOGRAM_POINTER_BLOCK(constant_histogram_name,                \
+                                       histogram_add_method_invocation,        \
+                                       histogram_factory_get_invocation)       \
+  do {                                                                         \
+    /*                                                                         \
+     * The pointer's presence indicates that the initialization is complete.   \
+     * Initialization is idempotent, so it can safely be atomically repeated.  \
+     */                                                                        \
+    static base::subtle::AtomicWord atomic_histogram_pointer = 0;              \
+    HISTOGRAM_POINTER_USE(&atomic_histogram_pointer, constant_histogram_name,  \
+                          histogram_add_method_invocation,                     \
+                          histogram_factory_get_invocation);                   \
+  } while (0)
+
+// This is a helper macro used by other macros and shouldn't be used directly.
+#define INTERNAL_HISTOGRAM_CUSTOM_COUNTS_WITH_FLAG(name, sample, min, max,     \
+                                                   bucket_count, flag)         \
+    STATIC_HISTOGRAM_POINTER_BLOCK(                                            \
+        name, Add(sample),                                                     \
+        base::Histogram::FactoryGet(name, min, max, bucket_count, flag))
+
+// This is a helper macro used by other macros and shouldn't be used directly.
+// The bucketing scheme is linear with a bucket size of 1. For N items,
+// recording values in the range [0, N - 1] creates a linear histogram with N +
+// 1 buckets:
+//   [0, 1), [1, 2), ..., [N - 1, N)
+// and an overflow bucket [N, infinity).
+//
+// Code should never emit to the overflow bucket; only to the other N buckets.
+// This allows future versions of Chrome to safely increase the boundary size.
+// Otherwise, the histogram would have [N - 1, infinity) as its overflow bucket,
+// and so the maximal value (N - 1) would be emitted to this overflow bucket.
+// But, if an additional value were later added, the bucket label for
+// the value (N - 1) would change to [N - 1, N), which would result in different
+// versions of Chrome using different bucket labels for identical data.
+#define INTERNAL_HISTOGRAM_EXACT_LINEAR_WITH_FLAG(name, sample, boundary,  \
+                                                  flag)                    \
+  do {                                                                     \
+    static_assert(!std::is_enum<decltype(sample)>::value,                  \
+                  "|sample| should not be an enum type!");                 \
+    static_assert(!std::is_enum<decltype(boundary)>::value,                \
+                  "|boundary| should not be an enum type!");               \
+    STATIC_HISTOGRAM_POINTER_BLOCK(                                        \
+        name, Add(sample),                                                 \
+        base::LinearHistogram::FactoryGet(name, 1, boundary, boundary + 1, \
+                                          flag));                          \
+  } while (0)
+
+// Helper for 'overloading' UMA_HISTOGRAM_ENUMERATION with a variable number of
+// arguments.
+#define INTERNAL_UMA_HISTOGRAM_ENUMERATION_GET_MACRO(_1, _2, NAME, ...) NAME
+
+#define INTERNAL_UMA_HISTOGRAM_ENUMERATION_DEDUCE_BOUNDARY(name, sample,       \
+                                                           flags)              \
+  INTERNAL_HISTOGRAM_ENUMERATION_WITH_FLAG(                                    \
+      name, sample, base::internal::EnumSizeTraits<decltype(sample)>::Count(), \
+      flags)
+
+// Note: The value in |sample| must be strictly less than |enum_size|.
+#define INTERNAL_UMA_HISTOGRAM_ENUMERATION_SPECIFY_BOUNDARY(name, sample,     \
+                                                            enum_size, flags) \
+  INTERNAL_HISTOGRAM_ENUMERATION_WITH_FLAG(name, sample, enum_size, flags)
+
+// Similar to the previous macro but intended for enumerations. This delegates
+// the work to the previous macro, but supports scoped enumerations as well by
+// forcing an explicit cast to the HistogramBase::Sample integral type.
+//
+// Note the range checks verify two separate issues:
+// - that the declared enum size isn't out of range of HistogramBase::Sample
+// - that the declared enum size is > 0
+//
+// TODO(dcheng): This should assert that the passed in types are actually enum
+// types.
+#define INTERNAL_HISTOGRAM_ENUMERATION_WITH_FLAG(name, sample, boundary, flag) \
+  do {                                                                         \
+    using decayed_sample = std::decay<decltype(sample)>::type;                 \
+    using decayed_boundary = std::decay<decltype(boundary)>::type;             \
+    static_assert(!std::is_enum<decayed_boundary>::value ||                    \
+                      std::is_enum<decayed_sample>::value,                     \
+                  "Unexpected: |boundary| is enum, but |sample| is not.");     \
+    static_assert(!std::is_enum<decayed_sample>::value ||                      \
+                      !std::is_enum<decayed_boundary>::value ||                \
+                      std::is_same<decayed_sample, decayed_boundary>::value,   \
+                  "|sample| and |boundary| shouldn't be of different enums");  \
+    static_assert(                                                             \
+        static_cast<uintmax_t>(boundary) <                                     \
+            static_cast<uintmax_t>(                                            \
+                std::numeric_limits<base::HistogramBase::Sample>::max()),      \
+        "|boundary| is out of range of HistogramBase::Sample");                \
+    INTERNAL_HISTOGRAM_EXACT_LINEAR_WITH_FLAG(                                 \
+        name, static_cast<base::HistogramBase::Sample>(sample),                \
+        static_cast<base::HistogramBase::Sample>(boundary), flag);             \
+  } while (0)
+
+// This is a helper macro used by other macros and shouldn't be used directly.
+// This is necessary to expand __COUNTER__ to an actual value.
+#define INTERNAL_SCOPED_UMA_HISTOGRAM_TIMER_EXPANDER(name, is_long, key)       \
+  INTERNAL_SCOPED_UMA_HISTOGRAM_TIMER_UNIQUE(name, is_long, key)
+
+// This is a helper macro used by other macros and shouldn't be used directly.
+#define INTERNAL_SCOPED_UMA_HISTOGRAM_TIMER_UNIQUE(name, is_long, key)         \
+  class ScopedHistogramTimer##key {                                            \
+   public:                                                                     \
+    ScopedHistogramTimer##key() : constructed_(base::TimeTicks::Now()) {}      \
+    ~ScopedHistogramTimer##key() {                                             \
+      base::TimeDelta elapsed = base::TimeTicks::Now() - constructed_;         \
+      if (is_long) {                                                           \
+        UMA_HISTOGRAM_LONG_TIMES_100(name, elapsed);                           \
+      } else {                                                                 \
+        UMA_HISTOGRAM_TIMES(name, elapsed);                                    \
+      }                                                                        \
+    }                                                                          \
+   private:                                                                    \
+    base::TimeTicks constructed_;                                              \
+  } scoped_histogram_timer_##key
+
+#endif  // BASE_METRICS_HISTOGRAM_MACROS_INTERNAL_H_
diff --git a/base/metrics/histogram_macros_local.h b/base/metrics/histogram_macros_local.h
new file mode 100644
index 0000000..c4d333b
--- /dev/null
+++ b/base/metrics/histogram_macros_local.h
@@ -0,0 +1,90 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_METRICS_HISTOGRAM_MACROS_LOCAL_H_
+#define BASE_METRICS_HISTOGRAM_MACROS_LOCAL_H_
+
+#include "base/logging.h"
+#include "base/metrics/histogram.h"
+#include "base/metrics/histogram_macros_internal.h"
+#include "base/time/time.h"
+
+// TODO(rkaplow): Migrate all LOCAL_* usage within Chromium to include this
+// file instead of the histogram_macros.h file.
+
+//------------------------------------------------------------------------------
+// Enumeration histograms.
+//
+// For usage details, see the equivalents in histogram_macros.h.
+
+#define LOCAL_HISTOGRAM_ENUMERATION(name, ...)                          \
+  CR_EXPAND_ARG(INTERNAL_UMA_HISTOGRAM_ENUMERATION_GET_MACRO(           \
+      __VA_ARGS__, INTERNAL_UMA_HISTOGRAM_ENUMERATION_SPECIFY_BOUNDARY, \
+      INTERNAL_UMA_HISTOGRAM_ENUMERATION_DEDUCE_BOUNDARY)(              \
+      name, __VA_ARGS__, base::HistogramBase::kNoFlags))
+
+#define LOCAL_HISTOGRAM_BOOLEAN(name, sample)                                  \
+    STATIC_HISTOGRAM_POINTER_BLOCK(name, AddBoolean(sample),                   \
+        base::BooleanHistogram::FactoryGet(name, base::Histogram::kNoFlags))
+
+//------------------------------------------------------------------------------
+// Percentage histograms.
+//
+// For usage details, see the equivalents in histogram_macros.h
+
+#define LOCAL_HISTOGRAM_PERCENTAGE(name, under_one_hundred)                    \
+    LOCAL_HISTOGRAM_ENUMERATION(name, under_one_hundred, 101)
+
+//------------------------------------------------------------------------------
+// Count histograms. These are used for collecting numeric data. Note that we
+// have macros for more specialized use cases below (memory, time, percentages).
+// For usage details, see the equivalents in histogram_macros.h.
+
+#define LOCAL_HISTOGRAM_COUNTS_100(name, sample)                               \
+    LOCAL_HISTOGRAM_CUSTOM_COUNTS(name, sample, 1, 100, 50)
+
+#define LOCAL_HISTOGRAM_COUNTS_10000(name, sample)                             \
+    LOCAL_HISTOGRAM_CUSTOM_COUNTS(name, sample, 1, 10000, 50)
+
+#define LOCAL_HISTOGRAM_COUNTS_1000000(name, sample)                           \
+    LOCAL_HISTOGRAM_CUSTOM_COUNTS(name, sample, 1, 1000000, 50)
+
+#define LOCAL_HISTOGRAM_CUSTOM_COUNTS(name, sample, min, max, bucket_count)    \
+    INTERNAL_HISTOGRAM_CUSTOM_COUNTS_WITH_FLAG(                                \
+        name, sample, min, max, bucket_count, base::HistogramBase::kNoFlags)
+
+//------------------------------------------------------------------------------
+// Timing histograms. These are used for collecting timing data (generally
+// latencies).
+//
+// For usage details, see the equivalents in histogram_macros.h.
+
+#define LOCAL_HISTOGRAM_TIMES(name, sample) LOCAL_HISTOGRAM_CUSTOM_TIMES(      \
+    name, sample, base::TimeDelta::FromMilliseconds(1),                        \
+    base::TimeDelta::FromSeconds(10), 50)
+
+#define LOCAL_HISTOGRAM_CUSTOM_TIMES(name, sample, min, max, bucket_count) \
+  STATIC_HISTOGRAM_POINTER_BLOCK(                                          \
+      name, AddTimeMillisecondsGranularity(sample),                        \
+      base::Histogram::FactoryTimeGet(name, min, max, bucket_count,        \
+                                      base::HistogramBase::kNoFlags))
+
+//------------------------------------------------------------------------------
+// Memory histograms.
+//
+// For usage details, see the equivalents in histogram_macros.h.
+
+#define LOCAL_HISTOGRAM_MEMORY_KB(name, sample) LOCAL_HISTOGRAM_CUSTOM_COUNTS( \
+    name, sample, 1000, 500000, 50)
+
+//------------------------------------------------------------------------------
+// Deprecated histograms. Not recommended for current use.
+
+// TODO(rkaplow): See if we can clean up this macro and usage.
+// Legacy non-explicit version. We suggest using LOCAL_HISTOGRAM_COUNTS_1000000
+// instead.
+#define LOCAL_HISTOGRAM_COUNTS(name, sample)                                   \
+    LOCAL_HISTOGRAM_CUSTOM_COUNTS(name, sample, 1, 1000000, 50)
+
+#endif  // BASE_METRICS_HISTOGRAM_MACROS_LOCAL_H_
diff --git a/base/metrics/histogram_macros_unittest.cc b/base/metrics/histogram_macros_unittest.cc
new file mode 100644
index 0000000..3c592b0
--- /dev/null
+++ b/base/metrics/histogram_macros_unittest.cc
@@ -0,0 +1,57 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/metrics/histogram_macros.h"
+#include "base/time/time.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+
+TEST(ScopedHistogramTimer, TwoTimersOneScope) {
+  SCOPED_UMA_HISTOGRAM_TIMER("TestTimer0");
+  SCOPED_UMA_HISTOGRAM_TIMER("TestTimer1");
+  SCOPED_UMA_HISTOGRAM_LONG_TIMER("TestLongTimer0");
+  SCOPED_UMA_HISTOGRAM_LONG_TIMER("TestLongTimer1");
+}
+
+// Compile tests for UMA_HISTOGRAM_ENUMERATION with the three different types it
+// accepts:
+// - integral types
+// - unscoped enums
+// - scoped enums
+TEST(HistogramMacro, IntegralPsuedoEnumeration) {
+  UMA_HISTOGRAM_ENUMERATION("Test.FauxEnumeration", 1, 10000);
+}
+
+TEST(HistogramMacro, UnscopedEnumeration) {
+  enum TestEnum : char {
+    FIRST_VALUE,
+    SECOND_VALUE,
+    THIRD_VALUE,
+    MAX_ENTRIES,
+  };
+  UMA_HISTOGRAM_ENUMERATION("Test.UnscopedEnumeration", SECOND_VALUE,
+                            MAX_ENTRIES);
+}
+
+TEST(HistogramMacro, ScopedEnumeration) {
+  enum class TestEnum {
+    FIRST_VALUE,
+    SECOND_VALUE,
+    THIRD_VALUE,
+    kMaxValue = THIRD_VALUE,
+  };
+  UMA_HISTOGRAM_ENUMERATION("Test.ScopedEnumeration", TestEnum::FIRST_VALUE);
+
+  enum class TestEnum2 {
+    FIRST_VALUE,
+    SECOND_VALUE,
+    THIRD_VALUE,
+    MAX_ENTRIES,
+  };
+  UMA_HISTOGRAM_ENUMERATION("Test.ScopedEnumeration2", TestEnum2::SECOND_VALUE,
+                            TestEnum2::MAX_ENTRIES);
+}
+
+}  // namespace base
diff --git a/base/metrics/histogram_samples.cc b/base/metrics/histogram_samples.cc
new file mode 100644
index 0000000..6830637
--- /dev/null
+++ b/base/metrics/histogram_samples.cc
@@ -0,0 +1,315 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/metrics/histogram_samples.h"
+
+#include <limits>
+
+#include "base/compiler_specific.h"
+#include "base/metrics/histogram_functions.h"
+#include "base/metrics/histogram_macros.h"
+#include "base/numerics/safe_conversions.h"
+#include "base/numerics/safe_math.h"
+#include "base/pickle.h"
+
+namespace base {
+
+namespace {
+
+// A shorthand constant for the max value of size_t.
+constexpr size_t kSizeMax = std::numeric_limits<size_t>::max();
+
+// A constant stored in an AtomicSingleSample (as_atomic) to indicate that the
+// sample is "disabled" and no further accumulation should be done with it. The
+// value is chosen such that it will be MAX_UINT16 for both |bucket| & |count|,
+// and thus less likely to conflict with real use. Conflicts are explicitly
+// handled in the code but it's worth making them as unlikely as possible.
+constexpr int32_t kDisabledSingleSample = -1;
+
+class SampleCountPickleIterator : public SampleCountIterator {
+ public:
+  explicit SampleCountPickleIterator(PickleIterator* iter);
+
+  bool Done() const override;
+  void Next() override;
+  void Get(HistogramBase::Sample* min,
+           int64_t* max,
+           HistogramBase::Count* count) const override;
+
+ private:
+  PickleIterator* const iter_;
+
+  HistogramBase::Sample min_;
+  int64_t max_;
+  HistogramBase::Count count_;
+  bool is_done_;
+};
+
+SampleCountPickleIterator::SampleCountPickleIterator(PickleIterator* iter)
+    : iter_(iter),
+      is_done_(false) {
+  Next();
+}
+
+bool SampleCountPickleIterator::Done() const {
+  return is_done_;
+}
+
+void SampleCountPickleIterator::Next() {
+  DCHECK(!Done());
+  if (!iter_->ReadInt(&min_) || !iter_->ReadInt64(&max_) ||
+      !iter_->ReadInt(&count_)) {
+    is_done_ = true;
+  }
+}
+
+void SampleCountPickleIterator::Get(HistogramBase::Sample* min,
+                                    int64_t* max,
+                                    HistogramBase::Count* count) const {
+  DCHECK(!Done());
+  *min = min_;
+  *max = max_;
+  *count = count_;
+}
+
+}  // namespace
+
+static_assert(sizeof(HistogramSamples::AtomicSingleSample) ==
+                  sizeof(subtle::Atomic32),
+              "AtomicSingleSample isn't 32 bits");
+
+HistogramSamples::SingleSample HistogramSamples::AtomicSingleSample::Load()
+    const {
+  AtomicSingleSample single_sample = subtle::Acquire_Load(&as_atomic);
+
+  // If the sample was extracted/disabled, it's still zero to the outside.
+  if (single_sample.as_atomic == kDisabledSingleSample)
+    single_sample.as_atomic = 0;
+
+  return single_sample.as_parts;
+}
+
+HistogramSamples::SingleSample HistogramSamples::AtomicSingleSample::Extract(
+    bool disable) {
+  AtomicSingleSample single_sample = subtle::NoBarrier_AtomicExchange(
+      &as_atomic, disable ? kDisabledSingleSample : 0);
+  if (single_sample.as_atomic == kDisabledSingleSample)
+    single_sample.as_atomic = 0;
+  return single_sample.as_parts;
+}
+
+bool HistogramSamples::AtomicSingleSample::Accumulate(
+    size_t bucket,
+    HistogramBase::Count count) {
+  if (count == 0)
+    return true;
+
+  // Convert the parameters to 16-bit variables because it's all 16-bit below.
+  // To support decrements/subtractions, divide the |count| into sign/value and
+  // do the proper operation below. The alternative is to change the single-
+  // sample's count to be a signed integer (int16_t) and just add an int16_t
+  // |count16| but that is somewhat wasteful given that the single-sample is
+  // never expected to have a count less than zero.
+  if (count < -std::numeric_limits<uint16_t>::max() ||
+      count > std::numeric_limits<uint16_t>::max() ||
+      bucket > std::numeric_limits<uint16_t>::max()) {
+    return false;
+  }
+  bool count_is_negative = count < 0;
+  uint16_t count16 = static_cast<uint16_t>(count_is_negative ? -count : count);
+  uint16_t bucket16 = static_cast<uint16_t>(bucket);
+
+  // A local, unshared copy of the single-sample is necessary so the parts
+  // can be manipulated without worrying about atomicity.
+  AtomicSingleSample single_sample;
+
+  bool sample_updated;
+  do {
+    subtle::Atomic32 original = subtle::Acquire_Load(&as_atomic);
+    if (original == kDisabledSingleSample)
+      return false;
+    single_sample.as_atomic = original;
+    if (single_sample.as_atomic != 0) {
+      // Only the same bucket (parameter and stored) can be counted multiple
+      // times.
+      if (single_sample.as_parts.bucket != bucket16)
+        return false;
+    } else {
+      // The |single_ sample| was zero so becomes the |bucket| parameter, the
+      // contents of which were checked above to fit in 16 bits.
+      single_sample.as_parts.bucket = bucket16;
+    }
+
+    // Update count, making sure that it doesn't overflow.
+    CheckedNumeric<uint16_t> new_count(single_sample.as_parts.count);
+    if (count_is_negative)
+      new_count -= count16;
+    else
+      new_count += count16;
+    if (!new_count.AssignIfValid(&single_sample.as_parts.count))
+      return false;
+
+    // Don't let this become equivalent to the "disabled" value.
+    if (single_sample.as_atomic == kDisabledSingleSample)
+      return false;
+
+    // Store the updated single-sample back into memory. |existing| is what
+    // was in that memory location at the time of the call; if it doesn't
+    // match |original| then the swap didn't happen so loop again.
+    subtle::Atomic32 existing = subtle::Release_CompareAndSwap(
+        &as_atomic, original, single_sample.as_atomic);
+    sample_updated = (existing == original);
+  } while (!sample_updated);
+
+  return true;
+}
+
+bool HistogramSamples::AtomicSingleSample::IsDisabled() const {
+  return subtle::Acquire_Load(&as_atomic) == kDisabledSingleSample;
+}
+
+HistogramSamples::LocalMetadata::LocalMetadata() {
+  // This is the same way it's done for persistent metadata since no ctor
+  // is called for the data members in that case.
+  memset(this, 0, sizeof(*this));
+}
+
+HistogramSamples::HistogramSamples(uint64_t id, Metadata* meta)
+    : meta_(meta) {
+  DCHECK(meta_->id == 0 || meta_->id == id);
+
+  // It's possible that |meta| is contained in initialized, read-only memory
+  // so it's essential that no write be done in that case.
+  if (!meta_->id)
+    meta_->id = id;
+}
+
+// This mustn't do anything with |meta_|. It was passed to the ctor and may
+// be invalid by the time this dtor gets called.
+HistogramSamples::~HistogramSamples() = default;
+
+void HistogramSamples::Add(const HistogramSamples& other) {
+  IncreaseSumAndCount(other.sum(), other.redundant_count());
+  std::unique_ptr<SampleCountIterator> it = other.Iterator();
+  bool success = AddSubtractImpl(it.get(), ADD);
+  DCHECK(success);
+}
+
+bool HistogramSamples::AddFromPickle(PickleIterator* iter) {
+  int64_t sum;
+  HistogramBase::Count redundant_count;
+
+  if (!iter->ReadInt64(&sum) || !iter->ReadInt(&redundant_count))
+    return false;
+
+  IncreaseSumAndCount(sum, redundant_count);
+
+  SampleCountPickleIterator pickle_iter(iter);
+  return AddSubtractImpl(&pickle_iter, ADD);
+}
+
+void HistogramSamples::Subtract(const HistogramSamples& other) {
+  IncreaseSumAndCount(-other.sum(), -other.redundant_count());
+  std::unique_ptr<SampleCountIterator> it = other.Iterator();
+  bool success = AddSubtractImpl(it.get(), SUBTRACT);
+  DCHECK(success);
+}
+
+void HistogramSamples::Serialize(Pickle* pickle) const {
+  pickle->WriteInt64(sum());
+  pickle->WriteInt(redundant_count());
+
+  HistogramBase::Sample min;
+  int64_t max;
+  HistogramBase::Count count;
+  for (std::unique_ptr<SampleCountIterator> it = Iterator(); !it->Done();
+       it->Next()) {
+    it->Get(&min, &max, &count);
+    pickle->WriteInt(min);
+    pickle->WriteInt64(max);
+    pickle->WriteInt(count);
+  }
+}
+
+bool HistogramSamples::AccumulateSingleSample(HistogramBase::Sample value,
+                                              HistogramBase::Count count,
+                                              size_t bucket) {
+  if (single_sample().Accumulate(bucket, count)) {
+    // Success. Update the (separate) sum and redundant-count.
+    IncreaseSumAndCount(strict_cast<int64_t>(value) * count, count);
+    return true;
+  }
+  return false;
+}
+
+void HistogramSamples::IncreaseSumAndCount(int64_t sum,
+                                           HistogramBase::Count count) {
+#ifdef ARCH_CPU_64_BITS
+  subtle::NoBarrier_AtomicIncrement(&meta_->sum, sum);
+#else
+  meta_->sum += sum;
+#endif
+  subtle::NoBarrier_AtomicIncrement(&meta_->redundant_count, count);
+}
+
+void HistogramSamples::RecordNegativeSample(NegativeSampleReason reason,
+                                            HistogramBase::Count increment) {
+  UMA_HISTOGRAM_ENUMERATION("UMA.NegativeSamples.Reason", reason,
+                            MAX_NEGATIVE_SAMPLE_REASONS);
+  UMA_HISTOGRAM_CUSTOM_COUNTS("UMA.NegativeSamples.Increment", increment, 1,
+                              1 << 30, 100);
+  UmaHistogramSparse("UMA.NegativeSamples.Histogram",
+                     static_cast<int32_t>(id()));
+}
+
+SampleCountIterator::~SampleCountIterator() = default;
+
+bool SampleCountIterator::GetBucketIndex(size_t* index) const {
+  DCHECK(!Done());
+  return false;
+}
+
+SingleSampleIterator::SingleSampleIterator(HistogramBase::Sample min,
+                                           int64_t max,
+                                           HistogramBase::Count count)
+    : SingleSampleIterator(min, max, count, kSizeMax) {}
+
+SingleSampleIterator::SingleSampleIterator(HistogramBase::Sample min,
+                                           int64_t max,
+                                           HistogramBase::Count count,
+                                           size_t bucket_index)
+    : min_(min), max_(max), bucket_index_(bucket_index), count_(count) {}
+
+SingleSampleIterator::~SingleSampleIterator() = default;
+
+bool SingleSampleIterator::Done() const {
+  return count_ == 0;
+}
+
+void SingleSampleIterator::Next() {
+  DCHECK(!Done());
+  count_ = 0;
+}
+
+void SingleSampleIterator::Get(HistogramBase::Sample* min,
+                               int64_t* max,
+                               HistogramBase::Count* count) const {
+  DCHECK(!Done());
+  if (min != nullptr)
+    *min = min_;
+  if (max != nullptr)
+    *max = max_;
+  if (count != nullptr)
+    *count = count_;
+}
+
+bool SingleSampleIterator::GetBucketIndex(size_t* index) const {
+  DCHECK(!Done());
+  if (bucket_index_ == kSizeMax)
+    return false;
+  *index = bucket_index_;
+  return true;
+}
+
+}  // namespace base
diff --git a/base/metrics/histogram_samples.h b/base/metrics/histogram_samples.h
new file mode 100644
index 0000000..059fd3c
--- /dev/null
+++ b/base/metrics/histogram_samples.h
@@ -0,0 +1,270 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_METRICS_HISTOGRAM_SAMPLES_H_
+#define BASE_METRICS_HISTOGRAM_SAMPLES_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <limits>
+#include <memory>
+
+#include "base/atomicops.h"
+#include "base/macros.h"
+#include "base/metrics/histogram_base.h"
+
+namespace base {
+
+class Pickle;
+class PickleIterator;
+class SampleCountIterator;
+
+// HistogramSamples is a container storing all samples of a histogram. All
+// elements must be of a fixed width to ensure 32/64-bit interoperability.
+// If this structure changes, bump the version number for kTypeIdHistogram
+// in persistent_histogram_allocator.cc.
+//
+// Note that though these samples are individually consistent (through the use
+// of atomic operations on the counts), there is only "eventual consistency"
+// overall when multiple threads are accessing this data. That means that the
+// sum, redundant-count, etc. could be momentarily out-of-sync with the stored
+// counts but will settle to a consistent "steady state" once all threads have
+// exited this code.
+class BASE_EXPORT HistogramSamples {
+ public:
+  // A single bucket and count. To fit within a single atomic on 32-bit build
+  // architectures, both |bucket| and |count| are limited in size to 16 bits.
+  // This limits the functionality somewhat but if an entry can't fit then
+  // the full array of samples can be allocated and used.
+  struct SingleSample {
+    uint16_t bucket;
+    uint16_t count;
+  };
+
+  // A structure for managing an atomic single sample. Because this is generally
+  // used in association with other atomic values, the defined methods use
+  // acquire/release operations to guarantee ordering with outside values.
+  union BASE_EXPORT AtomicSingleSample {
+    AtomicSingleSample() : as_atomic(0) {}
+    AtomicSingleSample(subtle::Atomic32 rhs) : as_atomic(rhs) {}
+
+    // Returns the single sample in an atomic manner. This in an "acquire"
+    // load. The returned sample isn't shared and thus its fields can be safely
+    // accessed.
+    SingleSample Load() const;
+
+    // Extracts the single sample in an atomic manner. If |disable| is true
+    // then this object will be set so it will never accumulate another value.
+    // This is "no barrier" so doesn't enforce ordering with other atomic ops.
+    SingleSample Extract(bool disable);
+
+    // Adds a given count to the held bucket. If not possible, it returns false
+    // and leaves the parts unchanged. Once extracted/disabled, this always
+    // returns false. This in an "acquire/release" operation.
+    bool Accumulate(size_t bucket, HistogramBase::Count count);
+
+    // Returns if the sample has been "disabled" (via Extract) and thus not
+    // allowed to accept further accumulation.
+    bool IsDisabled() const;
+
+   private:
+    // union field: The actual sample bucket and count.
+    SingleSample as_parts;
+
+    // union field: The sample as an atomic value. Atomic64 would provide
+    // more flexibility but isn't available on all builds. This can hold a
+    // special, internal "disabled" value indicating that it must not accept
+    // further accumulation.
+    subtle::Atomic32 as_atomic;
+  };
+
+  // A structure of information about the data, common to all sample containers.
+  // Because of how this is used in persistent memory, it must be a POD object
+  // that makes sense when initialized to all zeros.
+  struct Metadata {
+    // Expected size for 32/64-bit check.
+    static constexpr size_t kExpectedInstanceSize = 24;
+
+    // Initialized when the sample-set is first created with a value provided
+    // by the caller. It is generally used to identify the sample-set across
+    // threads and processes, though not necessarily uniquely as it is possible
+    // to have multiple sample-sets representing subsets of the data.
+    uint64_t id;
+
+    // The sum of all the entries, effectivly the sum(sample * count) for
+    // all samples. Despite being atomic, no guarantees are made on the
+    // accuracy of this value; there may be races during histogram
+    // accumulation and snapshotting that we choose to accept. It should
+    // be treated as approximate.
+#ifdef ARCH_CPU_64_BITS
+    subtle::Atomic64 sum;
+#else
+    // 32-bit systems don't have atomic 64-bit operations. Use a basic type
+    // and don't worry about "shearing".
+    int64_t sum;
+#endif
+
+    // A "redundant" count helps identify memory corruption. It redundantly
+    // stores the total number of samples accumulated in the histogram. We
+    // can compare this count to the sum of the counts (TotalCount() function),
+    // and detect problems. Note, depending on the implementation of different
+    // histogram types, there might be races during histogram accumulation
+    // and snapshotting that we choose to accept. In this case, the tallies
+    // might mismatch even when no memory corruption has happened.
+    HistogramBase::AtomicCount redundant_count;
+
+    // A single histogram value and associated count. This allows histograms
+    // that typically report only a single value to not require full storage
+    // to be allocated.
+    AtomicSingleSample single_sample;  // 32 bits
+  };
+
+  // Because structures held in persistent memory must be POD, there can be no
+  // default constructor to clear the fields. This derived class exists just
+  // to clear them when being allocated on the heap.
+  struct BASE_EXPORT LocalMetadata : Metadata {
+    LocalMetadata();
+  };
+
+  HistogramSamples(uint64_t id, Metadata* meta);
+  virtual ~HistogramSamples();
+
+  virtual void Accumulate(HistogramBase::Sample value,
+                          HistogramBase::Count count) = 0;
+  virtual HistogramBase::Count GetCount(HistogramBase::Sample value) const = 0;
+  virtual HistogramBase::Count TotalCount() const = 0;
+
+  virtual void Add(const HistogramSamples& other);
+
+  // Add from serialized samples.
+  virtual bool AddFromPickle(PickleIterator* iter);
+
+  virtual void Subtract(const HistogramSamples& other);
+
+  virtual std::unique_ptr<SampleCountIterator> Iterator() const = 0;
+  virtual void Serialize(Pickle* pickle) const;
+
+  // Accessor fuctions.
+  uint64_t id() const { return meta_->id; }
+  int64_t sum() const {
+#ifdef ARCH_CPU_64_BITS
+    return subtle::NoBarrier_Load(&meta_->sum);
+#else
+    return meta_->sum;
+#endif
+  }
+  HistogramBase::Count redundant_count() const {
+    return subtle::NoBarrier_Load(&meta_->redundant_count);
+  }
+
+  // Temporarily visible for crash debugging. Should be protected.
+  // TODO(bcwhite): Move this back where it belongs.
+  // https://bugs.chromium.org/p/chromium/issues/detail?id=836875
+  Metadata* meta() { return meta_; }
+
+ protected:
+  enum NegativeSampleReason {
+    SAMPLES_HAVE_LOGGED_BUT_NOT_SAMPLE,
+    SAMPLES_SAMPLE_LESS_THAN_LOGGED,
+    SAMPLES_ADDED_NEGATIVE_COUNT,
+    SAMPLES_ADD_WENT_NEGATIVE,
+    SAMPLES_ADD_OVERFLOW,
+    SAMPLES_ACCUMULATE_NEGATIVE_COUNT,
+    SAMPLES_ACCUMULATE_WENT_NEGATIVE,
+    DEPRECATED_SAMPLES_ACCUMULATE_OVERFLOW,
+    SAMPLES_ACCUMULATE_OVERFLOW,
+    MAX_NEGATIVE_SAMPLE_REASONS
+  };
+
+  // Based on |op| type, add or subtract sample counts data from the iterator.
+  enum Operator { ADD, SUBTRACT };
+  virtual bool AddSubtractImpl(SampleCountIterator* iter, Operator op) = 0;
+
+  // Accumulates to the embedded single-sample field if possible. Returns true
+  // on success, false otherwise. Sum and redundant-count are also updated in
+  // the success case.
+  bool AccumulateSingleSample(HistogramBase::Sample value,
+                              HistogramBase::Count count,
+                              size_t bucket);
+
+  // Atomically adjust the sum and redundant-count.
+  void IncreaseSumAndCount(int64_t sum, HistogramBase::Count count);
+
+  // Record a negative-sample observation and the reason why.
+  void RecordNegativeSample(NegativeSampleReason reason,
+                            HistogramBase::Count increment);
+
+  AtomicSingleSample& single_sample() { return meta_->single_sample; }
+  const AtomicSingleSample& single_sample() const {
+    return meta_->single_sample;
+  }
+
+ private:
+  // Depending on derived class meta values can come from local stoarge or
+  // external storage in which case HistogramSamples class cannot take ownership
+  // of Metadata*.
+  Metadata* meta_;
+
+  DISALLOW_COPY_AND_ASSIGN(HistogramSamples);
+};
+
+class BASE_EXPORT SampleCountIterator {
+ public:
+  virtual ~SampleCountIterator();
+
+  virtual bool Done() const = 0;
+  virtual void Next() = 0;
+
+  // Get the sample and count at current position.
+  // |min| |max| and |count| can be NULL if the value is not of interest.
+  // Note: |max| is int64_t because histograms support logged values in the
+  // full int32_t range and bucket max is exclusive, so it needs to support
+  // values up to MAXINT32+1.
+  // Requires: !Done();
+  virtual void Get(HistogramBase::Sample* min,
+                   int64_t* max,
+                   HistogramBase::Count* count) const = 0;
+  static_assert(std::numeric_limits<HistogramBase::Sample>::max() <
+                    std::numeric_limits<int64_t>::max(),
+                "Get() |max| must be able to hold Histogram::Sample max + 1");
+
+  // Get the index of current histogram bucket.
+  // For histograms that don't use predefined buckets, it returns false.
+  // Requires: !Done();
+  virtual bool GetBucketIndex(size_t* index) const;
+};
+
+class BASE_EXPORT SingleSampleIterator : public SampleCountIterator {
+ public:
+  SingleSampleIterator(HistogramBase::Sample min,
+                       int64_t max,
+                       HistogramBase::Count count);
+  SingleSampleIterator(HistogramBase::Sample min,
+                       int64_t max,
+                       HistogramBase::Count count,
+                       size_t bucket_index);
+  ~SingleSampleIterator() override;
+
+  // SampleCountIterator:
+  bool Done() const override;
+  void Next() override;
+  void Get(HistogramBase::Sample* min,
+           int64_t* max,
+           HistogramBase::Count* count) const override;
+
+  // SampleVector uses predefined buckets so iterator can return bucket index.
+  bool GetBucketIndex(size_t* index) const override;
+
+ private:
+  // Information about the single value to return.
+  const HistogramBase::Sample min_;
+  const int64_t max_;
+  const size_t bucket_index_;
+  HistogramBase::Count count_;
+};
+
+}  // namespace base
+
+#endif  // BASE_METRICS_HISTOGRAM_SAMPLES_H_
diff --git a/base/metrics/histogram_samples_unittest.cc b/base/metrics/histogram_samples_unittest.cc
new file mode 100644
index 0000000..74c743b
--- /dev/null
+++ b/base/metrics/histogram_samples_unittest.cc
@@ -0,0 +1,84 @@
+// Copyright (c) 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/metrics/histogram_samples.h"
+
+#include <limits>
+
+#include "base/test/gtest_util.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+
+using SingleSample = HistogramSamples::SingleSample;
+using AtomicSingleSample = HistogramSamples::AtomicSingleSample;
+
+TEST(SingleSampleTest, Load) {
+  AtomicSingleSample sample;
+  ASSERT_TRUE(sample.Accumulate(9, 1));
+
+  SingleSample s = sample.Load();
+  EXPECT_EQ(9U, s.bucket);
+  EXPECT_EQ(1U, s.count);
+
+  s = sample.Load();
+  EXPECT_EQ(9U, s.bucket);
+  EXPECT_EQ(1U, s.count);
+}
+
+TEST(SingleSampleTest, Extract) {
+  AtomicSingleSample sample;
+  ASSERT_TRUE(sample.Accumulate(9, 1));
+
+  SingleSample s = sample.Extract(/*disable=*/false);
+  EXPECT_EQ(9U, s.bucket);
+  EXPECT_EQ(1U, s.count);
+
+  s = sample.Extract(/*disable=*/false);
+  EXPECT_EQ(0U, s.bucket);
+  EXPECT_EQ(0U, s.count);
+}
+
+TEST(SingleSampleTest, Disable) {
+  AtomicSingleSample sample;
+  EXPECT_EQ(0U, sample.Extract(/*disable=*/false).count);
+  EXPECT_FALSE(sample.IsDisabled());
+
+  ASSERT_TRUE(sample.Accumulate(9, 1));
+  EXPECT_EQ(1U, sample.Extract(/*disable=*/true).count);
+  EXPECT_TRUE(sample.IsDisabled());
+
+  ASSERT_FALSE(sample.Accumulate(9, 1));
+  EXPECT_EQ(0U, sample.Extract(/*disable=*/false).count);
+  EXPECT_FALSE(sample.IsDisabled());
+}
+
+TEST(SingleSampleTest, Accumulate) {
+  AtomicSingleSample sample;
+
+  ASSERT_TRUE(sample.Accumulate(9, 1));
+  ASSERT_TRUE(sample.Accumulate(9, 2));
+  ASSERT_TRUE(sample.Accumulate(9, 4));
+  EXPECT_EQ(7U, sample.Extract(/*disable=*/false).count);
+
+  ASSERT_TRUE(sample.Accumulate(9, 4));
+  ASSERT_TRUE(sample.Accumulate(9, -2));
+  ASSERT_TRUE(sample.Accumulate(9, 1));
+  EXPECT_EQ(3U, sample.Extract(/*disable=*/false).count);
+}
+
+TEST(SingleSampleTest, Overflow) {
+  AtomicSingleSample sample;
+
+  ASSERT_TRUE(sample.Accumulate(9, 1));
+  ASSERT_FALSE(sample.Accumulate(9, -2));
+  EXPECT_EQ(1U, sample.Extract(/*disable=*/false).count);
+
+  ASSERT_TRUE(sample.Accumulate(9, std::numeric_limits<uint16_t>::max()));
+  ASSERT_FALSE(sample.Accumulate(9, 1));
+  EXPECT_EQ(std::numeric_limits<uint16_t>::max(),
+            sample.Extract(/*disable=*/false).count);
+}
+
+}  // namespace base
diff --git a/base/metrics/histogram_snapshot_manager.cc b/base/metrics/histogram_snapshot_manager.cc
new file mode 100644
index 0000000..c1b804e
--- /dev/null
+++ b/base/metrics/histogram_snapshot_manager.cc
@@ -0,0 +1,123 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/metrics/histogram_snapshot_manager.h"
+
+#include <memory>
+
+#include "base/debug/alias.h"
+#include "base/metrics/histogram_flattener.h"
+#include "base/metrics/histogram_samples.h"
+#include "base/metrics/statistics_recorder.h"
+#include "base/stl_util.h"
+
+namespace base {
+
+namespace {
+
+// A simple object to set an "active" flag and clear it upon destruction. It is
+// an error if the flag is already set.
+class MakeActive {
+ public:
+  MakeActive(std::atomic<bool>* is_active) : is_active_(is_active) {
+    bool was_active = is_active_->exchange(true, std::memory_order_relaxed);
+    CHECK(!was_active);
+  }
+  ~MakeActive() { is_active_->store(false, std::memory_order_relaxed); }
+
+ private:
+  std::atomic<bool>* is_active_;
+
+  DISALLOW_COPY_AND_ASSIGN(MakeActive);
+};
+
+}  // namespace
+
+HistogramSnapshotManager::HistogramSnapshotManager(
+    HistogramFlattener* histogram_flattener)
+    : histogram_flattener_(histogram_flattener) {
+  DCHECK(histogram_flattener_);
+  is_active_.store(false, std::memory_order_relaxed);
+}
+
+HistogramSnapshotManager::~HistogramSnapshotManager() = default;
+
+void HistogramSnapshotManager::PrepareDeltas(
+    const std::vector<HistogramBase*>& histograms,
+    HistogramBase::Flags flags_to_set,
+    HistogramBase::Flags required_flags) {
+  for (HistogramBase* const histogram : histograms) {
+    histogram->SetFlags(flags_to_set);
+    if ((histogram->flags() & required_flags) == required_flags)
+      PrepareDelta(histogram);
+  }
+}
+
+void HistogramSnapshotManager::PrepareDelta(HistogramBase* histogram) {
+  histogram->ValidateHistogramContents();
+  PrepareSamples(histogram, histogram->SnapshotDelta());
+}
+
+void HistogramSnapshotManager::PrepareFinalDelta(
+    const HistogramBase* histogram) {
+  histogram->ValidateHistogramContents();
+  PrepareSamples(histogram, histogram->SnapshotFinalDelta());
+}
+
+void HistogramSnapshotManager::PrepareSamples(
+    const HistogramBase* histogram,
+    std::unique_ptr<HistogramSamples> samples) {
+  DCHECK(histogram_flattener_);
+
+  // Ensure that there is no concurrent access going on while accessing the
+  // set of known histograms. The flag will be reset when this object goes
+  // out of scope.
+  MakeActive make_active(&is_active_);
+
+  // Get information known about this histogram. If it did not previously
+  // exist, one will be created and initialized.
+  SampleInfo* sample_info = &known_histograms_[histogram->name_hash()];
+
+  // Crash if we detect that our histograms have been overwritten.  This may be
+  // a fair distance from the memory smasher, but we hope to correlate these
+  // crashes with other events, such as plugins, or usage patterns, etc.
+  uint32_t corruption = histogram->FindCorruption(*samples);
+  if (HistogramBase::BUCKET_ORDER_ERROR & corruption) {
+    // Extract fields useful during debug.
+    const BucketRanges* ranges =
+        static_cast<const Histogram*>(histogram)->bucket_ranges();
+    uint32_t ranges_checksum = ranges->checksum();
+    uint32_t ranges_calc_checksum = ranges->CalculateChecksum();
+    int32_t flags = histogram->flags();
+    // The checksum should have caught this, so crash separately if it didn't.
+    CHECK_NE(0U, HistogramBase::RANGE_CHECKSUM_ERROR & corruption);
+    CHECK(false);  // Crash for the bucket order corruption.
+    // Ensure that compiler keeps around pointers to |histogram| and its
+    // internal |bucket_ranges_| for any minidumps.
+    base::debug::Alias(&ranges_checksum);
+    base::debug::Alias(&ranges_calc_checksum);
+    base::debug::Alias(&flags);
+  }
+  // Checksum corruption might not have caused order corruption.
+  CHECK_EQ(0U, HistogramBase::RANGE_CHECKSUM_ERROR & corruption);
+
+  // Note, at this point corruption can only be COUNT_HIGH_ERROR or
+  // COUNT_LOW_ERROR and they never arise together, so we don't need to extract
+  // bits from corruption.
+  if (corruption) {
+    DLOG(ERROR) << "Histogram: \"" << histogram->histogram_name()
+                << "\" has data corruption: " << corruption;
+    // Don't record corrupt data to metrics services.
+    const uint32_t old_corruption = sample_info->inconsistencies;
+    if (old_corruption == (corruption | old_corruption))
+      return;  // We've already seen this corruption for this histogram.
+    sample_info->inconsistencies |= corruption;
+    return;
+  }
+
+  if (samples->TotalCount() > 0)
+    histogram_flattener_->RecordDelta(*histogram, *samples);
+}
+
+}  // namespace base
diff --git a/base/metrics/histogram_snapshot_manager.h b/base/metrics/histogram_snapshot_manager.h
new file mode 100644
index 0000000..cf7c149
--- /dev/null
+++ b/base/metrics/histogram_snapshot_manager.h
@@ -0,0 +1,90 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_METRICS_HISTOGRAM_SNAPSHOT_MANAGER_H_
+#define BASE_METRICS_HISTOGRAM_SNAPSHOT_MANAGER_H_
+
+#include <stdint.h>
+
+#include <atomic>
+#include <map>
+#include <string>
+#include <vector>
+
+#include "base/gtest_prod_util.h"
+#include "base/macros.h"
+#include "base/metrics/histogram_base.h"
+
+namespace base {
+
+class HistogramSamples;
+class HistogramFlattener;
+
+// HistogramSnapshotManager handles the logistics of gathering up available
+// histograms for recording either to disk or for transmission (such as from
+// renderer to browser, or from browser to UMA upload). Since histograms can sit
+// in memory for an extended period of time, and are vulnerable to memory
+// corruption, this class also validates as much redundancy as it can before
+// calling for the marginal change (a.k.a., delta) in a histogram to be
+// recorded.
+class BASE_EXPORT HistogramSnapshotManager final {
+ public:
+  explicit HistogramSnapshotManager(HistogramFlattener* histogram_flattener);
+  ~HistogramSnapshotManager();
+
+  // Snapshot all histograms, and ask |histogram_flattener_| to record the
+  // delta. |flags_to_set| is used to set flags for each histogram.
+  // |required_flags| is used to select histograms to be recorded.
+  // Only histograms that have all the flags specified by the argument will be
+  // chosen. If all histograms should be recorded, set it to
+  // |Histogram::kNoFlags|.
+  void PrepareDeltas(const std::vector<HistogramBase*>& histograms,
+                     HistogramBase::Flags flags_to_set,
+                     HistogramBase::Flags required_flags);
+
+  // When the collection is not so simple as can be done using a single
+  // iterator, the steps can be performed separately. Call PerpareDelta()
+  // as many times as necessary. PrepareFinalDelta() works like PrepareDelta()
+  // except that it does not update the previous logged values and can thus
+  // be used with read-only files.
+  void PrepareDelta(HistogramBase* histogram);
+  void PrepareFinalDelta(const HistogramBase* histogram);
+
+ private:
+  FRIEND_TEST_ALL_PREFIXES(HistogramSnapshotManagerTest, CheckMerge);
+
+  // During a snapshot, samples are acquired and aggregated. This structure
+  // contains all the information for a given histogram that persists between
+  // collections.
+  struct SampleInfo {
+    // The set of inconsistencies (flags) already seen for the histogram.
+    // See HistogramBase::Inconsistency for values.
+    uint32_t inconsistencies = 0;
+  };
+
+  // Capture and hold samples from a histogram. This does all the heavy
+  // lifting for PrepareDelta() and PrepareAbsolute().
+  void PrepareSamples(const HistogramBase* histogram,
+                      std::unique_ptr<HistogramSamples> samples);
+
+  // |histogram_flattener_| handles the logistics of recording the histogram
+  // deltas.
+  HistogramFlattener* const histogram_flattener_;  // Weak.
+
+  // For histograms, track what has been previously seen, indexed
+  // by the hash of the histogram name.
+  std::map<uint64_t, SampleInfo> known_histograms_;
+
+  // A flag indicating if a thread is currently doing an operation. This is
+  // used to check against concurrent access which is not supported. A Thread-
+  // Checker is not sufficient because it may be guarded by at outside lock
+  // (as is the case with cronet).
+  std::atomic<bool> is_active_;
+
+  DISALLOW_COPY_AND_ASSIGN(HistogramSnapshotManager);
+};
+
+}  // namespace base
+
+#endif  // BASE_METRICS_HISTOGRAM_SNAPSHOT_MANAGER_H_
diff --git a/base/metrics/histogram_snapshot_manager_unittest.cc b/base/metrics/histogram_snapshot_manager_unittest.cc
new file mode 100644
index 0000000..1e2c599
--- /dev/null
+++ b/base/metrics/histogram_snapshot_manager_unittest.cc
@@ -0,0 +1,116 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/metrics/histogram_snapshot_manager.h"
+
+#include <string>
+#include <vector>
+
+#include "base/macros.h"
+#include "base/metrics/histogram_delta_serialization.h"
+#include "base/metrics/histogram_macros.h"
+#include "base/metrics/sample_vector.h"
+#include "base/metrics/statistics_recorder.h"
+#include "base/stl_util.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+
+class HistogramFlattenerDeltaRecorder : public HistogramFlattener {
+ public:
+  HistogramFlattenerDeltaRecorder() = default;
+
+  void RecordDelta(const HistogramBase& histogram,
+                   const HistogramSamples& snapshot) override {
+    recorded_delta_histogram_names_.push_back(histogram.histogram_name());
+    // Use CHECK instead of ASSERT to get full stack-trace and thus origin.
+    CHECK(!ContainsKey(recorded_delta_histogram_sum_,
+                       histogram.histogram_name()));
+    // Keep pointer to snapshot for testing. This really isn't ideal but the
+    // snapshot-manager keeps the snapshot alive until it's "forgotten".
+    recorded_delta_histogram_sum_[histogram.histogram_name()] = snapshot.sum();
+  }
+
+  void Reset() {
+    recorded_delta_histogram_names_.clear();
+    recorded_delta_histogram_sum_.clear();
+  }
+
+  std::vector<std::string> GetRecordedDeltaHistogramNames() {
+    return recorded_delta_histogram_names_;
+  }
+
+  int64_t GetRecordedDeltaHistogramSum(const std::string& name) {
+    EXPECT_TRUE(ContainsKey(recorded_delta_histogram_sum_, name));
+    return recorded_delta_histogram_sum_[name];
+  }
+
+ private:
+  std::vector<std::string> recorded_delta_histogram_names_;
+  std::map<std::string, int64_t> recorded_delta_histogram_sum_;
+
+  DISALLOW_COPY_AND_ASSIGN(HistogramFlattenerDeltaRecorder);
+};
+
+class HistogramSnapshotManagerTest : public testing::Test {
+ protected:
+  HistogramSnapshotManagerTest()
+      : statistics_recorder_(StatisticsRecorder::CreateTemporaryForTesting()),
+        histogram_snapshot_manager_(&histogram_flattener_delta_recorder_) {}
+
+  ~HistogramSnapshotManagerTest() override = default;
+
+  std::unique_ptr<StatisticsRecorder> statistics_recorder_;
+  HistogramFlattenerDeltaRecorder histogram_flattener_delta_recorder_;
+  HistogramSnapshotManager histogram_snapshot_manager_;
+};
+
+TEST_F(HistogramSnapshotManagerTest, PrepareDeltasNoFlagsFilter) {
+  // kNoFlags filter should record all histograms.
+  UMA_HISTOGRAM_ENUMERATION("UmaHistogram", 1, 4);
+  UMA_STABILITY_HISTOGRAM_ENUMERATION("UmaStabilityHistogram", 1, 2);
+
+  StatisticsRecorder::PrepareDeltas(false, HistogramBase::kNoFlags,
+                                    HistogramBase::kNoFlags,
+                                    &histogram_snapshot_manager_);
+
+  const std::vector<std::string>& histograms =
+      histogram_flattener_delta_recorder_.GetRecordedDeltaHistogramNames();
+  EXPECT_EQ(2U, histograms.size());
+  EXPECT_EQ("UmaHistogram", histograms[0]);
+  EXPECT_EQ("UmaStabilityHistogram", histograms[1]);
+}
+
+TEST_F(HistogramSnapshotManagerTest, PrepareDeltasUmaHistogramFlagFilter) {
+  // Note that kUmaStabilityHistogramFlag includes kUmaTargetedHistogramFlag.
+  UMA_HISTOGRAM_ENUMERATION("UmaHistogram", 1, 4);
+  UMA_STABILITY_HISTOGRAM_ENUMERATION("UmaStabilityHistogram", 1, 2);
+
+  StatisticsRecorder::PrepareDeltas(false, HistogramBase::kNoFlags,
+                                    HistogramBase::kUmaTargetedHistogramFlag,
+                                    &histogram_snapshot_manager_);
+
+  const std::vector<std::string>& histograms =
+      histogram_flattener_delta_recorder_.GetRecordedDeltaHistogramNames();
+  EXPECT_EQ(2U, histograms.size());
+  EXPECT_EQ("UmaHistogram", histograms[0]);
+  EXPECT_EQ("UmaStabilityHistogram", histograms[1]);
+}
+
+TEST_F(HistogramSnapshotManagerTest,
+       PrepareDeltasUmaStabilityHistogramFlagFilter) {
+  UMA_HISTOGRAM_ENUMERATION("UmaHistogram", 1, 4);
+  UMA_STABILITY_HISTOGRAM_ENUMERATION("UmaStabilityHistogram", 1, 2);
+
+  StatisticsRecorder::PrepareDeltas(false, HistogramBase::kNoFlags,
+                                    HistogramBase::kUmaStabilityHistogramFlag,
+                                    &histogram_snapshot_manager_);
+
+  const std::vector<std::string>& histograms =
+      histogram_flattener_delta_recorder_.GetRecordedDeltaHistogramNames();
+  EXPECT_EQ(1U, histograms.size());
+  EXPECT_EQ("UmaStabilityHistogram", histograms[0]);
+}
+
+}  // namespace base
diff --git a/base/metrics/histogram_unittest.cc b/base/metrics/histogram_unittest.cc
new file mode 100644
index 0000000..b819393
--- /dev/null
+++ b/base/metrics/histogram_unittest.cc
@@ -0,0 +1,841 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/metrics/histogram.h"
+
+#include <limits.h>
+#include <stddef.h>
+#include <stdint.h>
+
+#include <climits>
+#include <memory>
+#include <string>
+#include <vector>
+
+#include "base/logging.h"
+#include "base/metrics/bucket_ranges.h"
+#include "base/metrics/dummy_histogram.h"
+#include "base/metrics/histogram_macros.h"
+#include "base/metrics/metrics_hashes.h"
+#include "base/metrics/persistent_histogram_allocator.h"
+#include "base/metrics/persistent_memory_allocator.h"
+#include "base/metrics/record_histogram_checker.h"
+#include "base/metrics/sample_vector.h"
+#include "base/metrics/statistics_recorder.h"
+#include "base/pickle.h"
+#include "base/strings/stringprintf.h"
+#include "base/test/gtest_util.h"
+#include "base/time/time.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+namespace {
+
+const char kExpiredHistogramName[] = "ExpiredHistogram";
+
+// Test implementation of RecordHistogramChecker interface.
+class TestRecordHistogramChecker : public RecordHistogramChecker {
+ public:
+  ~TestRecordHistogramChecker() override = default;
+
+  // RecordHistogramChecker:
+  bool ShouldRecord(uint64_t histogram_hash) const override {
+    return histogram_hash != HashMetricName(kExpiredHistogramName);
+  }
+};
+
+}  // namespace
+
+// Test parameter indicates if a persistent memory allocator should be used
+// for histogram allocation. False will allocate histograms from the process
+// heap.
+class HistogramTest : public testing::TestWithParam<bool> {
+ protected:
+  const int32_t kAllocatorMemorySize = 8 << 20;  // 8 MiB
+
+  HistogramTest() : use_persistent_histogram_allocator_(GetParam()) {}
+
+  void SetUp() override {
+    if (use_persistent_histogram_allocator_)
+      CreatePersistentHistogramAllocator();
+
+    // Each test will have a clean state (no Histogram / BucketRanges
+    // registered).
+    InitializeStatisticsRecorder();
+  }
+
+  void TearDown() override {
+    if (allocator_) {
+      ASSERT_FALSE(allocator_->IsFull());
+      ASSERT_FALSE(allocator_->IsCorrupt());
+    }
+    UninitializeStatisticsRecorder();
+    DestroyPersistentHistogramAllocator();
+  }
+
+  void InitializeStatisticsRecorder() {
+    DCHECK(!statistics_recorder_);
+    statistics_recorder_ = StatisticsRecorder::CreateTemporaryForTesting();
+    auto record_checker = std::make_unique<TestRecordHistogramChecker>();
+    StatisticsRecorder::SetRecordChecker(std::move(record_checker));
+  }
+
+  void UninitializeStatisticsRecorder() {
+    statistics_recorder_.reset();
+  }
+
+  void CreatePersistentHistogramAllocator() {
+    GlobalHistogramAllocator::CreateWithLocalMemory(
+        kAllocatorMemorySize, 0, "HistogramAllocatorTest");
+    allocator_ = GlobalHistogramAllocator::Get()->memory_allocator();
+  }
+
+  void DestroyPersistentHistogramAllocator() {
+    allocator_ = nullptr;
+    GlobalHistogramAllocator::ReleaseForTesting();
+  }
+
+  const bool use_persistent_histogram_allocator_;
+
+  std::unique_ptr<StatisticsRecorder> statistics_recorder_;
+  std::unique_ptr<char[]> allocator_memory_;
+  PersistentMemoryAllocator* allocator_ = nullptr;
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(HistogramTest);
+};
+
+// Run all HistogramTest cases with both heap and persistent memory.
+INSTANTIATE_TEST_CASE_P(HeapAndPersistent, HistogramTest, testing::Bool());
+
+
+// Check for basic syntax and use.
+TEST_P(HistogramTest, BasicTest) {
+  // Try basic construction
+  HistogramBase* histogram = Histogram::FactoryGet(
+      "TestHistogram", 1, 1000, 10, HistogramBase::kNoFlags);
+  EXPECT_TRUE(histogram);
+
+  HistogramBase* linear_histogram = LinearHistogram::FactoryGet(
+      "TestLinearHistogram", 1, 1000, 10, HistogramBase::kNoFlags);
+  EXPECT_TRUE(linear_histogram);
+
+  std::vector<int> custom_ranges;
+  custom_ranges.push_back(1);
+  custom_ranges.push_back(5);
+  HistogramBase* custom_histogram = CustomHistogram::FactoryGet(
+      "TestCustomHistogram", custom_ranges, HistogramBase::kNoFlags);
+  EXPECT_TRUE(custom_histogram);
+
+  // Macros that create histograms have an internal static variable which will
+  // continue to point to those from the very first run of this method even
+  // during subsequent runs.
+  static bool already_run = false;
+  if (already_run)
+    return;
+  already_run = true;
+
+  // Use standard macros (but with fixed samples)
+  LOCAL_HISTOGRAM_TIMES("Test2Histogram", TimeDelta::FromDays(1));
+  LOCAL_HISTOGRAM_COUNTS("Test3Histogram", 30);
+
+  LOCAL_HISTOGRAM_ENUMERATION("Test6Histogram", 129, 130);
+}
+
+// Check that the macro correctly matches histograms by name and records their
+// data together.
+TEST_P(HistogramTest, NameMatchTest) {
+  // Macros that create histograms have an internal static variable which will
+  // continue to point to those from the very first run of this method even
+  // during subsequent runs.
+  static bool already_run = false;
+  if (already_run)
+    return;
+  already_run = true;
+
+  LOCAL_HISTOGRAM_PERCENTAGE("DuplicatedHistogram", 10);
+  LOCAL_HISTOGRAM_PERCENTAGE("DuplicatedHistogram", 10);
+  HistogramBase* histogram = LinearHistogram::FactoryGet(
+      "DuplicatedHistogram", 1, 101, 102, HistogramBase::kNoFlags);
+
+  std::unique_ptr<HistogramSamples> samples = histogram->SnapshotSamples();
+  EXPECT_EQ(2, samples->TotalCount());
+  EXPECT_EQ(2, samples->GetCount(10));
+}
+
+// Check that delta calculations work correctly.
+TEST_P(HistogramTest, DeltaTest) {
+  HistogramBase* histogram =
+      Histogram::FactoryGet("DeltaHistogram", 1, 64, 8,
+                            HistogramBase::kNoFlags);
+  histogram->Add(1);
+  histogram->Add(10);
+  histogram->Add(50);
+
+  std::unique_ptr<HistogramSamples> samples = histogram->SnapshotDelta();
+  EXPECT_EQ(3, samples->TotalCount());
+  EXPECT_EQ(1, samples->GetCount(1));
+  EXPECT_EQ(1, samples->GetCount(10));
+  EXPECT_EQ(1, samples->GetCount(50));
+  EXPECT_EQ(samples->TotalCount(), samples->redundant_count());
+
+  samples = histogram->SnapshotDelta();
+  EXPECT_EQ(0, samples->TotalCount());
+
+  histogram->Add(10);
+  histogram->Add(10);
+  samples = histogram->SnapshotDelta();
+  EXPECT_EQ(2, samples->TotalCount());
+  EXPECT_EQ(2, samples->GetCount(10));
+
+  samples = histogram->SnapshotDelta();
+  EXPECT_EQ(0, samples->TotalCount());
+}
+
+// Check that final-delta calculations work correctly.
+TEST_P(HistogramTest, FinalDeltaTest) {
+  HistogramBase* histogram =
+      Histogram::FactoryGet("FinalDeltaHistogram", 1, 64, 8,
+                            HistogramBase::kNoFlags);
+  histogram->Add(1);
+  histogram->Add(10);
+  histogram->Add(50);
+
+  std::unique_ptr<HistogramSamples> samples = histogram->SnapshotDelta();
+  EXPECT_EQ(3, samples->TotalCount());
+  EXPECT_EQ(1, samples->GetCount(1));
+  EXPECT_EQ(1, samples->GetCount(10));
+  EXPECT_EQ(1, samples->GetCount(50));
+  EXPECT_EQ(samples->TotalCount(), samples->redundant_count());
+
+  histogram->Add(2);
+  histogram->Add(50);
+
+  samples = histogram->SnapshotFinalDelta();
+  EXPECT_EQ(2, samples->TotalCount());
+  EXPECT_EQ(1, samples->GetCount(2));
+  EXPECT_EQ(1, samples->GetCount(50));
+  EXPECT_EQ(samples->TotalCount(), samples->redundant_count());
+}
+
+TEST_P(HistogramTest, ExponentialRangesTest) {
+  // Check that we got a nice exponential when there was enough room.
+  BucketRanges ranges(9);
+  Histogram::InitializeBucketRanges(1, 64, &ranges);
+  EXPECT_EQ(0, ranges.range(0));
+  int power_of_2 = 1;
+  for (int i = 1; i < 8; i++) {
+    EXPECT_EQ(power_of_2, ranges.range(i));
+    power_of_2 *= 2;
+  }
+  EXPECT_EQ(HistogramBase::kSampleType_MAX, ranges.range(8));
+
+  // Check the corresponding Histogram will use the correct ranges.
+  Histogram* histogram = static_cast<Histogram*>(
+      Histogram::FactoryGet("Histogram", 1, 64, 8, HistogramBase::kNoFlags));
+  EXPECT_TRUE(ranges.Equals(histogram->bucket_ranges()));
+
+  // When bucket count is limited, exponential ranges will partially look like
+  // linear.
+  BucketRanges ranges2(16);
+  Histogram::InitializeBucketRanges(1, 32, &ranges2);
+
+  EXPECT_EQ(0, ranges2.range(0));
+  EXPECT_EQ(1, ranges2.range(1));
+  EXPECT_EQ(2, ranges2.range(2));
+  EXPECT_EQ(3, ranges2.range(3));
+  EXPECT_EQ(4, ranges2.range(4));
+  EXPECT_EQ(5, ranges2.range(5));
+  EXPECT_EQ(6, ranges2.range(6));
+  EXPECT_EQ(7, ranges2.range(7));
+  EXPECT_EQ(9, ranges2.range(8));
+  EXPECT_EQ(11, ranges2.range(9));
+  EXPECT_EQ(14, ranges2.range(10));
+  EXPECT_EQ(17, ranges2.range(11));
+  EXPECT_EQ(21, ranges2.range(12));
+  EXPECT_EQ(26, ranges2.range(13));
+  EXPECT_EQ(32, ranges2.range(14));
+  EXPECT_EQ(HistogramBase::kSampleType_MAX, ranges2.range(15));
+
+  // Check the corresponding Histogram will use the correct ranges.
+  Histogram* histogram2 = static_cast<Histogram*>(
+      Histogram::FactoryGet("Histogram2", 1, 32, 15, HistogramBase::kNoFlags));
+  EXPECT_TRUE(ranges2.Equals(histogram2->bucket_ranges()));
+}
+
+TEST_P(HistogramTest, LinearRangesTest) {
+  BucketRanges ranges(9);
+  LinearHistogram::InitializeBucketRanges(1, 7, &ranges);
+  // Gets a nice linear set of bucket ranges.
+  for (int i = 0; i < 8; i++)
+    EXPECT_EQ(i, ranges.range(i));
+  EXPECT_EQ(HistogramBase::kSampleType_MAX, ranges.range(8));
+
+  // The correspoding LinearHistogram should use the correct ranges.
+  Histogram* histogram = static_cast<Histogram*>(
+      LinearHistogram::FactoryGet("Linear", 1, 7, 8, HistogramBase::kNoFlags));
+  EXPECT_TRUE(ranges.Equals(histogram->bucket_ranges()));
+
+  // Linear ranges are not divisible.
+  BucketRanges ranges2(6);
+  LinearHistogram::InitializeBucketRanges(1, 6, &ranges2);
+  EXPECT_EQ(0, ranges2.range(0));
+  EXPECT_EQ(1, ranges2.range(1));
+  EXPECT_EQ(3, ranges2.range(2));
+  EXPECT_EQ(4, ranges2.range(3));
+  EXPECT_EQ(6, ranges2.range(4));
+  EXPECT_EQ(HistogramBase::kSampleType_MAX, ranges2.range(5));
+  // The correspoding LinearHistogram should use the correct ranges.
+  Histogram* histogram2 = static_cast<Histogram*>(
+      LinearHistogram::FactoryGet("Linear2", 1, 6, 5, HistogramBase::kNoFlags));
+  EXPECT_TRUE(ranges2.Equals(histogram2->bucket_ranges()));
+}
+
+TEST_P(HistogramTest, ArrayToCustomEnumRangesTest) {
+  const HistogramBase::Sample ranges[3] = {5, 10, 20};
+  std::vector<HistogramBase::Sample> ranges_vec =
+      CustomHistogram::ArrayToCustomEnumRanges(ranges);
+  ASSERT_EQ(6u, ranges_vec.size());
+  EXPECT_EQ(5, ranges_vec[0]);
+  EXPECT_EQ(6, ranges_vec[1]);
+  EXPECT_EQ(10, ranges_vec[2]);
+  EXPECT_EQ(11, ranges_vec[3]);
+  EXPECT_EQ(20, ranges_vec[4]);
+  EXPECT_EQ(21, ranges_vec[5]);
+}
+
+TEST_P(HistogramTest, CustomHistogramTest) {
+  // A well prepared custom ranges.
+  std::vector<HistogramBase::Sample> custom_ranges;
+  custom_ranges.push_back(1);
+  custom_ranges.push_back(2);
+
+  Histogram* histogram = static_cast<Histogram*>(
+      CustomHistogram::FactoryGet("TestCustomHistogram1", custom_ranges,
+                                  HistogramBase::kNoFlags));
+  const BucketRanges* ranges = histogram->bucket_ranges();
+  ASSERT_EQ(4u, ranges->size());
+  EXPECT_EQ(0, ranges->range(0));  // Auto added.
+  EXPECT_EQ(1, ranges->range(1));
+  EXPECT_EQ(2, ranges->range(2));
+  EXPECT_EQ(HistogramBase::kSampleType_MAX, ranges->range(3));  // Auto added.
+
+  // A unordered custom ranges.
+  custom_ranges.clear();
+  custom_ranges.push_back(2);
+  custom_ranges.push_back(1);
+  histogram = static_cast<Histogram*>(
+      CustomHistogram::FactoryGet("TestCustomHistogram2", custom_ranges,
+                                  HistogramBase::kNoFlags));
+  ranges = histogram->bucket_ranges();
+  ASSERT_EQ(4u, ranges->size());
+  EXPECT_EQ(0, ranges->range(0));
+  EXPECT_EQ(1, ranges->range(1));
+  EXPECT_EQ(2, ranges->range(2));
+  EXPECT_EQ(HistogramBase::kSampleType_MAX, ranges->range(3));
+
+  // A custom ranges with duplicated values.
+  custom_ranges.clear();
+  custom_ranges.push_back(4);
+  custom_ranges.push_back(1);
+  custom_ranges.push_back(4);
+  histogram = static_cast<Histogram*>(
+      CustomHistogram::FactoryGet("TestCustomHistogram3", custom_ranges,
+                                  HistogramBase::kNoFlags));
+  ranges = histogram->bucket_ranges();
+  ASSERT_EQ(4u, ranges->size());
+  EXPECT_EQ(0, ranges->range(0));
+  EXPECT_EQ(1, ranges->range(1));
+  EXPECT_EQ(4, ranges->range(2));
+  EXPECT_EQ(HistogramBase::kSampleType_MAX, ranges->range(3));
+}
+
+TEST_P(HistogramTest, CustomHistogramWithOnly2Buckets) {
+  // This test exploits the fact that the CustomHistogram can have 2 buckets,
+  // while the base class Histogram is *supposed* to have at least 3 buckets.
+  // We should probably change the restriction on the base class (or not inherit
+  // the base class!).
+
+  std::vector<HistogramBase::Sample> custom_ranges;
+  custom_ranges.push_back(4);
+
+  Histogram* histogram = static_cast<Histogram*>(
+      CustomHistogram::FactoryGet("2BucketsCustomHistogram", custom_ranges,
+                                  HistogramBase::kNoFlags));
+  const BucketRanges* ranges = histogram->bucket_ranges();
+  ASSERT_EQ(3u, ranges->size());
+  EXPECT_EQ(0, ranges->range(0));
+  EXPECT_EQ(4, ranges->range(1));
+  EXPECT_EQ(HistogramBase::kSampleType_MAX, ranges->range(2));
+}
+
+TEST_P(HistogramTest, AddCountTest) {
+  const size_t kBucketCount = 50;
+  Histogram* histogram = static_cast<Histogram*>(
+      Histogram::FactoryGet("AddCountHistogram", 10, 100, kBucketCount,
+                            HistogramBase::kNoFlags));
+
+  histogram->AddCount(20, 15);
+  histogram->AddCount(30, 14);
+
+  std::unique_ptr<HistogramSamples> samples = histogram->SnapshotSamples();
+  EXPECT_EQ(29, samples->TotalCount());
+  EXPECT_EQ(15, samples->GetCount(20));
+  EXPECT_EQ(14, samples->GetCount(30));
+
+  histogram->AddCount(20, 25);
+  histogram->AddCount(30, 24);
+
+  std::unique_ptr<HistogramSamples> samples2 = histogram->SnapshotSamples();
+  EXPECT_EQ(78, samples2->TotalCount());
+  EXPECT_EQ(40, samples2->GetCount(20));
+  EXPECT_EQ(38, samples2->GetCount(30));
+}
+
+TEST_P(HistogramTest, AddCount_LargeValuesDontOverflow) {
+  const size_t kBucketCount = 50;
+  Histogram* histogram = static_cast<Histogram*>(
+      Histogram::FactoryGet("AddCountHistogram", 10, 1000000000, kBucketCount,
+                            HistogramBase::kNoFlags));
+
+  histogram->AddCount(200000000, 15);
+  histogram->AddCount(300000000, 14);
+
+  std::unique_ptr<HistogramSamples> samples = histogram->SnapshotSamples();
+  EXPECT_EQ(29, samples->TotalCount());
+  EXPECT_EQ(15, samples->GetCount(200000000));
+  EXPECT_EQ(14, samples->GetCount(300000000));
+
+  histogram->AddCount(200000000, 25);
+  histogram->AddCount(300000000, 24);
+
+  std::unique_ptr<HistogramSamples> samples2 = histogram->SnapshotSamples();
+  EXPECT_EQ(78, samples2->TotalCount());
+  EXPECT_EQ(40, samples2->GetCount(200000000));
+  EXPECT_EQ(38, samples2->GetCount(300000000));
+  EXPECT_EQ(19400000000LL, samples2->sum());
+}
+
+// Some metrics are designed so that they are guaranteed not to overflow between
+// snapshots, but could overflow over a long-running session.
+// Make sure that counts returned by Histogram::SnapshotDelta do not overflow
+// even when a total count (returned by Histogram::SnapshotSample) does.
+TEST_P(HistogramTest, AddCount_LargeCountsDontOverflow) {
+  const size_t kBucketCount = 10;
+  Histogram* histogram = static_cast<Histogram*>(Histogram::FactoryGet(
+      "AddCountHistogram", 10, 50, kBucketCount, HistogramBase::kNoFlags));
+
+  const int count = (1 << 30) - 1;
+
+  // Repeat N times to make sure that there is no internal value overflow.
+  for (int i = 0; i < 10; ++i) {
+    histogram->AddCount(42, count);
+    std::unique_ptr<HistogramSamples> samples = histogram->SnapshotDelta();
+    EXPECT_EQ(count, samples->TotalCount());
+    EXPECT_EQ(count, samples->GetCount(42));
+  }
+}
+
+// Make sure histogram handles out-of-bounds data gracefully.
+TEST_P(HistogramTest, BoundsTest) {
+  const size_t kBucketCount = 50;
+  Histogram* histogram = static_cast<Histogram*>(
+      Histogram::FactoryGet("Bounded", 10, 100, kBucketCount,
+                            HistogramBase::kNoFlags));
+
+  // Put two samples "out of bounds" above and below.
+  histogram->Add(5);
+  histogram->Add(-50);
+
+  histogram->Add(100);
+  histogram->Add(10000);
+
+  // Verify they landed in the underflow, and overflow buckets.
+  std::unique_ptr<SampleVector> samples = histogram->SnapshotAllSamples();
+  EXPECT_EQ(2, samples->GetCountAtIndex(0));
+  EXPECT_EQ(0, samples->GetCountAtIndex(1));
+  size_t array_size = histogram->bucket_count();
+  EXPECT_EQ(kBucketCount, array_size);
+  EXPECT_EQ(0, samples->GetCountAtIndex(array_size - 2));
+  EXPECT_EQ(2, samples->GetCountAtIndex(array_size - 1));
+
+  std::vector<int> custom_ranges;
+  custom_ranges.push_back(10);
+  custom_ranges.push_back(50);
+  custom_ranges.push_back(100);
+  Histogram* test_custom_histogram = static_cast<Histogram*>(
+      CustomHistogram::FactoryGet("TestCustomRangeBoundedHistogram",
+                                  custom_ranges, HistogramBase::kNoFlags));
+
+  // Put two samples "out of bounds" above and below.
+  test_custom_histogram->Add(5);
+  test_custom_histogram->Add(-50);
+  test_custom_histogram->Add(100);
+  test_custom_histogram->Add(1000);
+  test_custom_histogram->Add(INT_MAX);
+
+  // Verify they landed in the underflow, and overflow buckets.
+  std::unique_ptr<SampleVector> custom_samples =
+      test_custom_histogram->SnapshotAllSamples();
+  EXPECT_EQ(2, custom_samples->GetCountAtIndex(0));
+  EXPECT_EQ(0, custom_samples->GetCountAtIndex(1));
+  size_t bucket_count = test_custom_histogram->bucket_count();
+  EXPECT_EQ(0, custom_samples->GetCountAtIndex(bucket_count - 2));
+  EXPECT_EQ(3, custom_samples->GetCountAtIndex(bucket_count - 1));
+}
+
+// Check to be sure samples land as expected is "correct" buckets.
+TEST_P(HistogramTest, BucketPlacementTest) {
+  Histogram* histogram = static_cast<Histogram*>(
+      Histogram::FactoryGet("Histogram", 1, 64, 8, HistogramBase::kNoFlags));
+
+  // Add i+1 samples to the i'th bucket.
+  histogram->Add(0);
+  int power_of_2 = 1;
+  for (int i = 1; i < 8; i++) {
+    for (int j = 0; j <= i; j++)
+      histogram->Add(power_of_2);
+    power_of_2 *= 2;
+  }
+
+  // Check to see that the bucket counts reflect our additions.
+  std::unique_ptr<SampleVector> samples = histogram->SnapshotAllSamples();
+  for (int i = 0; i < 8; i++)
+    EXPECT_EQ(i + 1, samples->GetCountAtIndex(i));
+}
+
+TEST_P(HistogramTest, CorruptSampleCounts) {
+  // The internal code creates histograms via macros and thus keeps static
+  // pointers to them. If those pointers are to persistent memory which will
+  // be free'd then any following calls to that code will crash with a
+  // segmentation violation.
+  if (use_persistent_histogram_allocator_)
+    return;
+
+  Histogram* histogram = static_cast<Histogram*>(
+      Histogram::FactoryGet("Histogram", 1, 64, 8, HistogramBase::kNoFlags));
+
+  // Add some samples.
+  histogram->Add(20);
+  histogram->Add(40);
+
+  std::unique_ptr<SampleVector> snapshot = histogram->SnapshotAllSamples();
+  EXPECT_EQ(HistogramBase::NO_INCONSISTENCIES,
+            histogram->FindCorruption(*snapshot));
+  EXPECT_EQ(2, snapshot->redundant_count());
+  EXPECT_EQ(2, snapshot->TotalCount());
+
+  snapshot->counts()[3] += 100;  // Sample count won't match redundant count.
+  EXPECT_EQ(HistogramBase::COUNT_LOW_ERROR,
+            histogram->FindCorruption(*snapshot));
+  snapshot->counts()[2] -= 200;
+  EXPECT_EQ(HistogramBase::COUNT_HIGH_ERROR,
+            histogram->FindCorruption(*snapshot));
+
+  // But we can't spot a corruption if it is compensated for.
+  snapshot->counts()[1] += 100;
+  EXPECT_EQ(HistogramBase::NO_INCONSISTENCIES,
+            histogram->FindCorruption(*snapshot));
+}
+
+TEST_P(HistogramTest, CorruptBucketBounds) {
+  Histogram* histogram = static_cast<Histogram*>(
+      Histogram::FactoryGet("Histogram", 1, 64, 8, HistogramBase::kNoFlags));
+
+  std::unique_ptr<HistogramSamples> snapshot = histogram->SnapshotSamples();
+  EXPECT_EQ(HistogramBase::NO_INCONSISTENCIES,
+            histogram->FindCorruption(*snapshot));
+
+  BucketRanges* bucket_ranges =
+      const_cast<BucketRanges*>(histogram->bucket_ranges());
+  HistogramBase::Sample tmp = bucket_ranges->range(1);
+  bucket_ranges->set_range(1, bucket_ranges->range(2));
+  bucket_ranges->set_range(2, tmp);
+  EXPECT_EQ(
+      HistogramBase::BUCKET_ORDER_ERROR | HistogramBase::RANGE_CHECKSUM_ERROR,
+      histogram->FindCorruption(*snapshot));
+
+  bucket_ranges->set_range(2, bucket_ranges->range(1));
+  bucket_ranges->set_range(1, tmp);
+  EXPECT_EQ(0U, histogram->FindCorruption(*snapshot));
+
+  // Show that two simple changes don't offset each other
+  bucket_ranges->set_range(3, bucket_ranges->range(3) + 1);
+  EXPECT_EQ(HistogramBase::RANGE_CHECKSUM_ERROR,
+            histogram->FindCorruption(*snapshot));
+
+  bucket_ranges->set_range(4, bucket_ranges->range(4) - 1);
+  EXPECT_EQ(HistogramBase::RANGE_CHECKSUM_ERROR,
+            histogram->FindCorruption(*snapshot));
+
+  // Repair histogram so that destructor won't DCHECK().
+  bucket_ranges->set_range(3, bucket_ranges->range(3) - 1);
+  bucket_ranges->set_range(4, bucket_ranges->range(4) + 1);
+}
+
+TEST_P(HistogramTest, HistogramSerializeInfo) {
+  Histogram* histogram = static_cast<Histogram*>(
+      Histogram::FactoryGet("Histogram", 1, 64, 8,
+                            HistogramBase::kIPCSerializationSourceFlag));
+  Pickle pickle;
+  histogram->SerializeInfo(&pickle);
+
+  PickleIterator iter(pickle);
+
+  int type;
+  EXPECT_TRUE(iter.ReadInt(&type));
+  EXPECT_EQ(HISTOGRAM, type);
+
+  std::string name;
+  EXPECT_TRUE(iter.ReadString(&name));
+  EXPECT_EQ("Histogram", name);
+
+  int flag;
+  EXPECT_TRUE(iter.ReadInt(&flag));
+  EXPECT_EQ(HistogramBase::kIPCSerializationSourceFlag,
+            flag & ~HistogramBase::kIsPersistent);
+
+  int min;
+  EXPECT_TRUE(iter.ReadInt(&min));
+  EXPECT_EQ(1, min);
+
+  int max;
+  EXPECT_TRUE(iter.ReadInt(&max));
+  EXPECT_EQ(64, max);
+
+  uint32_t bucket_count;
+  EXPECT_TRUE(iter.ReadUInt32(&bucket_count));
+  EXPECT_EQ(8u, bucket_count);
+
+  uint32_t checksum;
+  EXPECT_TRUE(iter.ReadUInt32(&checksum));
+  EXPECT_EQ(histogram->bucket_ranges()->checksum(), checksum);
+
+  // No more data in the pickle.
+  EXPECT_FALSE(iter.SkipBytes(1));
+}
+
+TEST_P(HistogramTest, CustomHistogramSerializeInfo) {
+  std::vector<int> custom_ranges;
+  custom_ranges.push_back(10);
+  custom_ranges.push_back(100);
+
+  HistogramBase* custom_histogram = CustomHistogram::FactoryGet(
+      "TestCustomRangeBoundedHistogram",
+      custom_ranges,
+      HistogramBase::kNoFlags);
+  Pickle pickle;
+  custom_histogram->SerializeInfo(&pickle);
+
+  // Validate the pickle.
+  PickleIterator iter(pickle);
+
+  int i;
+  std::string s;
+  uint32_t bucket_count;
+  uint32_t ui32;
+  EXPECT_TRUE(iter.ReadInt(&i) && iter.ReadString(&s) && iter.ReadInt(&i) &&
+              iter.ReadInt(&i) && iter.ReadInt(&i) &&
+              iter.ReadUInt32(&bucket_count) && iter.ReadUInt32(&ui32));
+  EXPECT_EQ(3u, bucket_count);
+
+  int range;
+  EXPECT_TRUE(iter.ReadInt(&range));
+  EXPECT_EQ(10, range);
+  EXPECT_TRUE(iter.ReadInt(&range));
+  EXPECT_EQ(100, range);
+
+  // No more data in the pickle.
+  EXPECT_FALSE(iter.SkipBytes(1));
+}
+
+TEST_P(HistogramTest, BadConstruction) {
+  HistogramBase* histogram = Histogram::FactoryGet(
+      "BadConstruction", 0, 100, 8, HistogramBase::kNoFlags);
+  EXPECT_TRUE(histogram->HasConstructionArguments(1, 100, 8));
+
+  // Try to get the same histogram name with different arguments.
+  HistogramBase* bad_histogram = Histogram::FactoryGet(
+      "BadConstruction", 0, 100, 7, HistogramBase::kNoFlags);
+  EXPECT_EQ(DummyHistogram::GetInstance(), bad_histogram);
+  bad_histogram = Histogram::FactoryGet(
+      "BadConstruction", 0, 99, 8, HistogramBase::kNoFlags);
+  EXPECT_EQ(DummyHistogram::GetInstance(), bad_histogram);
+
+  HistogramBase* linear_histogram = LinearHistogram::FactoryGet(
+      "BadConstructionLinear", 0, 100, 8, HistogramBase::kNoFlags);
+  EXPECT_TRUE(linear_histogram->HasConstructionArguments(1, 100, 8));
+
+  // Try to get the same histogram name with different arguments.
+  bad_histogram = LinearHistogram::FactoryGet(
+      "BadConstructionLinear", 0, 100, 7, HistogramBase::kNoFlags);
+  EXPECT_EQ(DummyHistogram::GetInstance(), bad_histogram);
+  bad_histogram = LinearHistogram::FactoryGet(
+      "BadConstructionLinear", 10, 100, 8, HistogramBase::kNoFlags);
+  EXPECT_EQ(DummyHistogram::GetInstance(), bad_histogram);
+}
+
+TEST_P(HistogramTest, FactoryTime) {
+  const int kTestCreateCount = 1 << 14;  // Must be power-of-2.
+  const int kTestLookupCount = 100000;
+  const int kTestAddCount = 1000000;
+
+  // Create all histogram names in advance for accurate timing below.
+  std::vector<std::string> histogram_names;
+  for (int i = 0; i < kTestCreateCount; ++i) {
+    histogram_names.push_back(
+        StringPrintf("TestHistogram.%d", i % kTestCreateCount));
+  }
+
+  // Calculate cost of creating histograms.
+  TimeTicks create_start = TimeTicks::Now();
+  for (int i = 0; i < kTestCreateCount; ++i) {
+    Histogram::FactoryGet(histogram_names[i], 1, 100, 10,
+                          HistogramBase::kNoFlags);
+  }
+  TimeDelta create_ticks = TimeTicks::Now() - create_start;
+  int64_t create_ms = create_ticks.InMilliseconds();
+
+  VLOG(1) << kTestCreateCount << " histogram creations took " << create_ms
+          << "ms or about "
+          << (create_ms * 1000000) / kTestCreateCount
+          << "ns each.";
+
+  // Calculate cost of looking up existing histograms.
+  TimeTicks lookup_start = TimeTicks::Now();
+  for (int i = 0; i < kTestLookupCount; ++i) {
+    // 6007 is co-prime with kTestCreateCount and so will do lookups in an
+    // order less likely to be cacheable (but still hit them all) should the
+    // underlying storage use the exact histogram name as the key.
+    const int i_mult = 6007;
+    static_assert(i_mult < INT_MAX / kTestCreateCount, "Multiplier too big");
+    int index = (i * i_mult) & (kTestCreateCount - 1);
+    Histogram::FactoryGet(histogram_names[index], 1, 100, 10,
+                          HistogramBase::kNoFlags);
+  }
+  TimeDelta lookup_ticks = TimeTicks::Now() - lookup_start;
+  int64_t lookup_ms = lookup_ticks.InMilliseconds();
+
+  VLOG(1) << kTestLookupCount << " histogram lookups took " << lookup_ms
+          << "ms or about "
+          << (lookup_ms * 1000000) / kTestLookupCount
+          << "ns each.";
+
+  // Calculate cost of accessing histograms.
+  HistogramBase* histogram = Histogram::FactoryGet(
+      histogram_names[0], 1, 100, 10, HistogramBase::kNoFlags);
+  ASSERT_TRUE(histogram);
+  TimeTicks add_start = TimeTicks::Now();
+  for (int i = 0; i < kTestAddCount; ++i)
+    histogram->Add(i & 127);
+  TimeDelta add_ticks = TimeTicks::Now() - add_start;
+  int64_t add_ms = add_ticks.InMilliseconds();
+
+  VLOG(1) << kTestAddCount << " histogram adds took " << add_ms
+          << "ms or about "
+          << (add_ms * 1000000) / kTestAddCount
+          << "ns each.";
+}
+
+// For Histogram, LinearHistogram and CustomHistogram, the minimum for a
+// declared range is 1, while the maximum is (HistogramBase::kSampleType_MAX -
+// 1). But we accept ranges exceeding those limits, and silently clamped to
+// those limits. This is for backwards compatibility.
+TEST(HistogramDeathTest, BadRangesTest) {
+  HistogramBase* histogram = Histogram::FactoryGet(
+      "BadRanges", 0, HistogramBase::kSampleType_MAX, 8,
+      HistogramBase::kNoFlags);
+  EXPECT_TRUE(
+      histogram->HasConstructionArguments(
+          1, HistogramBase::kSampleType_MAX - 1, 8));
+
+  HistogramBase* linear_histogram = LinearHistogram::FactoryGet(
+      "BadRangesLinear", 0, HistogramBase::kSampleType_MAX, 8,
+      HistogramBase::kNoFlags);
+  EXPECT_TRUE(
+      linear_histogram->HasConstructionArguments(
+          1, HistogramBase::kSampleType_MAX - 1, 8));
+
+  std::vector<int> custom_ranges;
+  custom_ranges.push_back(0);
+  custom_ranges.push_back(5);
+  Histogram* custom_histogram = static_cast<Histogram*>(
+      CustomHistogram::FactoryGet(
+          "BadRangesCustom", custom_ranges, HistogramBase::kNoFlags));
+  const BucketRanges* ranges = custom_histogram->bucket_ranges();
+  ASSERT_EQ(3u, ranges->size());
+  EXPECT_EQ(0, ranges->range(0));
+  EXPECT_EQ(5, ranges->range(1));
+  EXPECT_EQ(HistogramBase::kSampleType_MAX, ranges->range(2));
+
+  // CustomHistogram does not accepts kSampleType_MAX as range.
+  custom_ranges.push_back(HistogramBase::kSampleType_MAX);
+  EXPECT_DEATH_IF_SUPPORTED(
+      CustomHistogram::FactoryGet("BadRangesCustom2", custom_ranges,
+                                  HistogramBase::kNoFlags),
+               "");
+
+  // CustomHistogram needs at least 1 valid range.
+  custom_ranges.clear();
+  custom_ranges.push_back(0);
+  EXPECT_DEATH_IF_SUPPORTED(
+      CustomHistogram::FactoryGet("BadRangesCustom3", custom_ranges,
+                                  HistogramBase::kNoFlags),
+               "");
+}
+
+TEST_P(HistogramTest, ExpiredHistogramTest) {
+  HistogramBase* expired = Histogram::FactoryGet(kExpiredHistogramName, 1, 1000,
+                                                 10, HistogramBase::kNoFlags);
+  ASSERT_TRUE(expired);
+  expired->Add(5);
+  expired->Add(500);
+  auto samples = expired->SnapshotDelta();
+  EXPECT_EQ(0, samples->TotalCount());
+
+  HistogramBase* linear_expired = LinearHistogram::FactoryGet(
+      kExpiredHistogramName, 1, 1000, 10, HistogramBase::kNoFlags);
+  ASSERT_TRUE(linear_expired);
+  linear_expired->Add(5);
+  linear_expired->Add(500);
+  samples = linear_expired->SnapshotDelta();
+  EXPECT_EQ(0, samples->TotalCount());
+
+  std::vector<int> custom_ranges;
+  custom_ranges.push_back(1);
+  custom_ranges.push_back(5);
+  HistogramBase* custom_expired = CustomHistogram::FactoryGet(
+      kExpiredHistogramName, custom_ranges, HistogramBase::kNoFlags);
+  ASSERT_TRUE(custom_expired);
+  custom_expired->Add(2);
+  custom_expired->Add(4);
+  samples = custom_expired->SnapshotDelta();
+  EXPECT_EQ(0, samples->TotalCount());
+
+  HistogramBase* valid = Histogram::FactoryGet("ValidHistogram", 1, 1000, 10,
+                                               HistogramBase::kNoFlags);
+  ASSERT_TRUE(valid);
+  valid->Add(5);
+  valid->Add(500);
+  samples = valid->SnapshotDelta();
+  EXPECT_EQ(2, samples->TotalCount());
+
+  HistogramBase* linear_valid = LinearHistogram::FactoryGet(
+      "LinearHistogram", 1, 1000, 10, HistogramBase::kNoFlags);
+  ASSERT_TRUE(linear_valid);
+  linear_valid->Add(5);
+  linear_valid->Add(500);
+  samples = linear_valid->SnapshotDelta();
+  EXPECT_EQ(2, samples->TotalCount());
+
+  HistogramBase* custom_valid = CustomHistogram::FactoryGet(
+      "CustomHistogram", custom_ranges, HistogramBase::kNoFlags);
+  ASSERT_TRUE(custom_valid);
+  custom_valid->Add(2);
+  custom_valid->Add(4);
+  samples = custom_valid->SnapshotDelta();
+  EXPECT_EQ(2, samples->TotalCount());
+}
+
+}  // namespace base
diff --git a/base/metrics/histogram_unittest.nc b/base/metrics/histogram_unittest.nc
new file mode 100644
index 0000000..c9c2657
--- /dev/null
+++ b/base/metrics/histogram_unittest.nc
@@ -0,0 +1,90 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This is a "No Compile Test" suite.
+// http://dev.chromium.org/developers/testing/no-compile-tests
+
+#include "base/metrics/histogram_functions.h"
+#include "base/metrics/histogram_macros.h"
+
+namespace base {
+
+#if defined(NCTEST_DIFFERENT_ENUM)  // [r"\|sample\| and \|boundary\| shouldn't be of different enums"]
+
+void WontCompile() {
+  enum TypeA { A };
+  enum TypeB { B };
+  UMA_HISTOGRAM_ENUMERATION("", A, B);
+}
+
+#elif defined(NCTEST_DIFFERENT_ENUM_CLASS)  // [r"\|sample\| and \|boundary\| shouldn't be of different enums"]
+
+void WontCompile() {
+  enum class TypeA { A };
+  enum class TypeB { B };
+  UMA_HISTOGRAM_ENUMERATION("", TypeA::A, TypeB::B);
+}
+
+#elif defined(NCTEST_DIFFERENT_ENUM_MIXED)  // [r"\|sample\| and \|boundary\| shouldn't be of different enums"]
+
+void WontCompile() {
+  enum class TypeA { A };
+  enum TypeB { B };
+  UMA_HISTOGRAM_ENUMERATION("", TypeA::A, B);
+}
+
+#elif defined(NCTEST_NEGATIVE_ENUM_MAX)  // [r'static_assert failed "\|boundary\| is out of range of HistogramBase::Sample"']
+
+void WontCompile() {
+  // Buckets for enumeration start from 0, so a boundary < 0 is illegal.
+  enum class TypeA { A = -1 };
+  UMA_HISTOGRAM_ENUMERATION("", TypeA::A, TypeA::A);
+}
+
+#elif defined(NCTEST_ENUM_MAX_OUT_OF_RANGE)  // [r'static_assert failed "\|boundary\| is out of range of HistogramBase::Sample"']
+
+void WontCompile() {
+  // HistogramBase::Sample is an int and can't hold larger values.
+  enum class TypeA : uint32_t { A = 0xffffffff };
+  UMA_HISTOGRAM_ENUMERATION("", TypeA::A, TypeA::A);
+}
+
+#elif defined(NCTEST_SAMPLE_NOT_ENUM)  // [r'static_assert failed "Unexpected: \|boundary\| is enum, but \|sample\| is not."']
+
+void WontCompile() {
+  enum TypeA { A };
+  UMA_HISTOGRAM_ENUMERATION("", 0, TypeA::A);
+}
+
+#elif defined(NCTEST_FUNCTION_INT)  // [r"Non enum passed to UmaHistogramEnumeration"]
+
+void WontCompile() {
+  UmaHistogramEnumeration("", 1, 2);
+}
+
+#elif defined(NCTEST_FUNCTION_DIFFERENT_ENUM)  // [r"no matching function for call to 'UmaHistogramEnumeration'"]
+
+void WontCompile() {
+  enum TypeA { A };
+  enum TypeB { B };
+  UmaHistogramEnumeration("", A, B);
+}
+
+#elif defined(NCTEST_FUNCTION_FIRST_NOT_ENUM)  // [r"no matching function for call to 'UmaHistogramEnumeration'"]
+
+void WontCompile() {
+  enum TypeB { B };
+  UmaHistogramEnumeration("", 1, B);
+}
+
+#elif defined(NCTEST_FUNCTION_SECOND_NOT_ENUM)  // [r"no matching function for call to 'UmaHistogramEnumeration'"]
+
+void WontCompile() {
+  enum TypeA { A };
+  UmaHistogramEnumeration("", A, 2);
+}
+
+#endif
+
+}  // namespace base
diff --git a/base/metrics/metrics_hashes.cc b/base/metrics/metrics_hashes.cc
new file mode 100644
index 0000000..5672b06
--- /dev/null
+++ b/base/metrics/metrics_hashes.cc
@@ -0,0 +1,31 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/metrics/metrics_hashes.h"
+
+#include "base/logging.h"
+#include "base/md5.h"
+#include "base/sys_byteorder.h"
+
+namespace base {
+
+namespace {
+
+// Converts the 8-byte prefix of an MD5 hash into a uint64_t value.
+inline uint64_t DigestToUInt64(const base::MD5Digest& digest) {
+  uint64_t value;
+  DCHECK_GE(sizeof(digest.a), sizeof(value));
+  memcpy(&value, digest.a, sizeof(value));
+  return base::NetToHost64(value);
+}
+
+}  // namespace
+
+uint64_t HashMetricName(base::StringPiece name) {
+  base::MD5Digest digest;
+  base::MD5Sum(name.data(), name.size(), &digest);
+  return DigestToUInt64(digest);
+}
+
+}  // namespace metrics
diff --git a/base/metrics/metrics_hashes.h b/base/metrics/metrics_hashes.h
new file mode 100644
index 0000000..d05c4ba
--- /dev/null
+++ b/base/metrics/metrics_hashes.h
@@ -0,0 +1,21 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_METRICS_METRICS_HASHES_H_
+#define BASE_METRICS_METRICS_HASHES_H_
+
+#include <stdint.h>
+
+#include "base/base_export.h"
+#include "base/strings/string_piece.h"
+
+namespace base {
+
+// Computes a uint64_t hash of a given string based on its MD5 hash. Suitable
+// for metric names.
+BASE_EXPORT uint64_t HashMetricName(base::StringPiece name);
+
+}  // namespace metrics
+
+#endif  // BASE_METRICS_METRICS_HASHES_H_
diff --git a/base/metrics/metrics_hashes_unittest.cc b/base/metrics/metrics_hashes_unittest.cc
new file mode 100644
index 0000000..aea254e
--- /dev/null
+++ b/base/metrics/metrics_hashes_unittest.cc
@@ -0,0 +1,35 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/metrics/metrics_hashes.h"
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include "base/format_macros.h"
+#include "base/macros.h"
+#include "base/strings/stringprintf.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+
+// Make sure our ID hashes are the same as what we see on the server side.
+TEST(MetricsUtilTest, HashMetricName) {
+  static const struct {
+    std::string input;
+    std::string output;
+  } cases[] = {
+    {"Back", "0x0557fa923dcee4d0"},
+    {"Forward", "0x67d2f6740a8eaebf"},
+    {"NewTab", "0x290eb683f96572f1"},
+  };
+
+  for (size_t i = 0; i < arraysize(cases); ++i) {
+    uint64_t hash = HashMetricName(cases[i].input);
+    std::string hash_hex = base::StringPrintf("0x%016" PRIx64, hash);
+    EXPECT_EQ(cases[i].output, hash_hex);
+  }
+}
+
+}  // namespace metrics
diff --git a/base/metrics/persistent_histogram_allocator.cc b/base/metrics/persistent_histogram_allocator.cc
new file mode 100644
index 0000000..bfbb44b
--- /dev/null
+++ b/base/metrics/persistent_histogram_allocator.cc
@@ -0,0 +1,1024 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/metrics/persistent_histogram_allocator.h"
+
+#include <memory>
+
+#include "base/atomicops.h"
+#include "base/files/file_path.h"
+#include "base/files/file_util.h"
+#include "base/files/important_file_writer.h"
+#include "base/files/memory_mapped_file.h"
+#include "base/lazy_instance.h"
+#include "base/logging.h"
+#include "base/memory/ptr_util.h"
+#include "base/metrics/histogram.h"
+#include "base/metrics/histogram_base.h"
+#include "base/metrics/histogram_samples.h"
+#include "base/metrics/metrics_hashes.h"
+#include "base/metrics/persistent_sample_map.h"
+#include "base/metrics/sparse_histogram.h"
+#include "base/metrics/statistics_recorder.h"
+#include "base/numerics/safe_conversions.h"
+#include "base/pickle.h"
+#include "base/process/process_handle.h"
+#include "base/strings/string_number_conversions.h"
+#include "base/strings/string_split.h"
+#include "base/strings/stringprintf.h"
+#include "base/synchronization/lock.h"
+
+namespace base {
+
+namespace {
+
+// Type identifiers used when storing in persistent memory so they can be
+// identified during extraction; the first 4 bytes of the SHA1 of the name
+// is used as a unique integer. A "version number" is added to the base
+// so that, if the structure of that object changes, stored older versions
+// will be safely ignored.
+enum : uint32_t {
+  kTypeIdRangesArray = 0xBCEA225A + 1,  // SHA1(RangesArray) v1
+  kTypeIdCountsArray = 0x53215530 + 1,  // SHA1(CountsArray) v1
+};
+
+// The current globally-active persistent allocator for all new histograms.
+// The object held here will obviously not be destructed at process exit
+// but that's best since PersistentMemoryAllocator objects (that underlie
+// GlobalHistogramAllocator objects) are explicitly forbidden from doing
+// anything essential at exit anyway due to the fact that they depend on data
+// managed elsewhere and which could be destructed first. An AtomicWord is
+// used instead of std::atomic because the latter can create global ctors
+// and dtors.
+subtle::AtomicWord g_histogram_allocator = 0;
+
+// Take an array of range boundaries and create a proper BucketRanges object
+// which is returned to the caller. A return of nullptr indicates that the
+// passed boundaries are invalid.
+std::unique_ptr<BucketRanges> CreateRangesFromData(
+    HistogramBase::Sample* ranges_data,
+    uint32_t ranges_checksum,
+    size_t count) {
+  // To avoid racy destruction at shutdown, the following may be leaked.
+  std::unique_ptr<BucketRanges> ranges(new BucketRanges(count));
+  DCHECK_EQ(count, ranges->size());
+  for (size_t i = 0; i < count; ++i) {
+    if (i > 0 && ranges_data[i] <= ranges_data[i - 1])
+      return nullptr;
+    ranges->set_range(i, ranges_data[i]);
+  }
+
+  ranges->ResetChecksum();
+  if (ranges->checksum() != ranges_checksum)
+    return nullptr;
+
+  return ranges;
+}
+
+// Calculate the number of bytes required to store all of a histogram's
+// "counts". This will return zero (0) if |bucket_count| is not valid.
+size_t CalculateRequiredCountsBytes(size_t bucket_count) {
+  // 2 because each "sample count" also requires a backup "logged count"
+  // used for calculating the delta during snapshot operations.
+  const size_t kBytesPerBucket = 2 * sizeof(HistogramBase::AtomicCount);
+
+  // If the |bucket_count| is such that it would overflow the return type,
+  // perhaps as the result of a malicious actor, then return zero to
+  // indicate the problem to the caller.
+  if (bucket_count > std::numeric_limits<size_t>::max() / kBytesPerBucket)
+    return 0;
+
+  return bucket_count * kBytesPerBucket;
+}
+
+}  // namespace
+
+const Feature kPersistentHistogramsFeature{
+  "PersistentHistograms", FEATURE_DISABLED_BY_DEFAULT
+};
+
+
+PersistentSparseHistogramDataManager::PersistentSparseHistogramDataManager(
+    PersistentMemoryAllocator* allocator)
+    : allocator_(allocator), record_iterator_(allocator) {}
+
+PersistentSparseHistogramDataManager::~PersistentSparseHistogramDataManager() =
+    default;
+
+PersistentSampleMapRecords*
+PersistentSparseHistogramDataManager::UseSampleMapRecords(uint64_t id,
+                                                          const void* user) {
+  base::AutoLock auto_lock(lock_);
+  return GetSampleMapRecordsWhileLocked(id)->Acquire(user);
+}
+
+PersistentSampleMapRecords*
+PersistentSparseHistogramDataManager::GetSampleMapRecordsWhileLocked(
+    uint64_t id) {
+  lock_.AssertAcquired();
+
+  auto found = sample_records_.find(id);
+  if (found != sample_records_.end())
+    return found->second.get();
+
+  std::unique_ptr<PersistentSampleMapRecords>& samples = sample_records_[id];
+  samples = std::make_unique<PersistentSampleMapRecords>(this, id);
+  return samples.get();
+}
+
+bool PersistentSparseHistogramDataManager::LoadRecords(
+    PersistentSampleMapRecords* sample_map_records) {
+  // DataManager must be locked in order to access the found_ field of any
+  // PersistentSampleMapRecords object.
+  base::AutoLock auto_lock(lock_);
+  bool found = false;
+
+  // If there are already "found" entries for the passed object, move them.
+  if (!sample_map_records->found_.empty()) {
+    sample_map_records->records_.reserve(sample_map_records->records_.size() +
+                                         sample_map_records->found_.size());
+    sample_map_records->records_.insert(sample_map_records->records_.end(),
+                                        sample_map_records->found_.begin(),
+                                        sample_map_records->found_.end());
+    sample_map_records->found_.clear();
+    found = true;
+  }
+
+  // Acquiring a lock is a semi-expensive operation so load some records with
+  // each call. More than this number may be loaded if it takes longer to
+  // find at least one matching record for the passed object.
+  const int kMinimumNumberToLoad = 10;
+  const uint64_t match_id = sample_map_records->sample_map_id_;
+
+  // Loop while no enty is found OR we haven't yet loaded the minimum number.
+  // This will continue reading even after a match is found.
+  for (int count = 0; !found || count < kMinimumNumberToLoad; ++count) {
+    // Get the next sample-record. The iterator will always resume from where
+    // it left off even if it previously had nothing further to return.
+    uint64_t found_id;
+    PersistentMemoryAllocator::Reference ref =
+        PersistentSampleMap::GetNextPersistentRecord(record_iterator_,
+                                                     &found_id);
+
+    // Stop immediately if there are none.
+    if (!ref)
+      break;
+
+    // The sample-record could be for any sparse histogram. Add the reference
+    // to the appropriate collection for later use.
+    if (found_id == match_id) {
+      sample_map_records->records_.push_back(ref);
+      found = true;
+    } else {
+      PersistentSampleMapRecords* samples =
+          GetSampleMapRecordsWhileLocked(found_id);
+      DCHECK(samples);
+      samples->found_.push_back(ref);
+    }
+  }
+
+  return found;
+}
+
+
+PersistentSampleMapRecords::PersistentSampleMapRecords(
+    PersistentSparseHistogramDataManager* data_manager,
+    uint64_t sample_map_id)
+    : data_manager_(data_manager), sample_map_id_(sample_map_id) {}
+
+PersistentSampleMapRecords::~PersistentSampleMapRecords() = default;
+
+PersistentSampleMapRecords* PersistentSampleMapRecords::Acquire(
+    const void* user) {
+  DCHECK(!user_);
+  user_ = user;
+  seen_ = 0;
+  return this;
+}
+
+void PersistentSampleMapRecords::Release(const void* user) {
+  DCHECK_EQ(user_, user);
+  user_ = nullptr;
+}
+
+PersistentMemoryAllocator::Reference PersistentSampleMapRecords::GetNext() {
+  DCHECK(user_);
+
+  // If there are no unseen records, lock and swap in all the found ones.
+  if (records_.size() == seen_) {
+    if (!data_manager_->LoadRecords(this))
+      return false;
+  }
+
+  // Return the next record. Records *must* be returned in the same order
+  // they are found in the persistent memory in order to ensure that all
+  // objects using this data always have the same state. Race conditions
+  // can cause duplicate records so using the "first found" is the only
+  // guarantee that all objects always access the same one.
+  DCHECK_LT(seen_, records_.size());
+  return records_[seen_++];
+}
+
+PersistentMemoryAllocator::Reference PersistentSampleMapRecords::CreateNew(
+    HistogramBase::Sample value) {
+  return PersistentSampleMap::CreatePersistentRecord(data_manager_->allocator_,
+                                                     sample_map_id_, value);
+}
+
+
+// This data will be held in persistent memory in order for processes to
+// locate and use histograms created elsewhere.
+struct PersistentHistogramAllocator::PersistentHistogramData {
+  // SHA1(Histogram): Increment this if structure changes!
+  static constexpr uint32_t kPersistentTypeId = 0xF1645910 + 3;
+
+  // Expected size for 32/64-bit check.
+  static constexpr size_t kExpectedInstanceSize =
+      40 + 2 * HistogramSamples::Metadata::kExpectedInstanceSize;
+
+  int32_t histogram_type;
+  int32_t flags;
+  int32_t minimum;
+  int32_t maximum;
+  uint32_t bucket_count;
+  PersistentMemoryAllocator::Reference ranges_ref;
+  uint32_t ranges_checksum;
+  subtle::Atomic32 counts_ref;  // PersistentMemoryAllocator::Reference
+  HistogramSamples::Metadata samples_metadata;
+  HistogramSamples::Metadata logged_metadata;
+
+  // Space for the histogram name will be added during the actual allocation
+  // request. This must be the last field of the structure. A zero-size array
+  // or a "flexible" array would be preferred but is not (yet) valid C++.
+  char name[sizeof(uint64_t)];  // Force 64-bit alignment on 32-bit builds.
+};
+
+PersistentHistogramAllocator::Iterator::Iterator(
+    PersistentHistogramAllocator* allocator)
+    : allocator_(allocator), memory_iter_(allocator->memory_allocator()) {}
+
+std::unique_ptr<HistogramBase>
+PersistentHistogramAllocator::Iterator::GetNextWithIgnore(Reference ignore) {
+  PersistentMemoryAllocator::Reference ref;
+  while ((ref = memory_iter_.GetNextOfType<PersistentHistogramData>()) != 0) {
+    if (ref != ignore)
+      return allocator_->GetHistogram(ref);
+  }
+  return nullptr;
+}
+
+
+PersistentHistogramAllocator::PersistentHistogramAllocator(
+    std::unique_ptr<PersistentMemoryAllocator> memory)
+    : memory_allocator_(std::move(memory)),
+      sparse_histogram_data_manager_(memory_allocator_.get()) {}
+
+PersistentHistogramAllocator::~PersistentHistogramAllocator() = default;
+
+std::unique_ptr<HistogramBase> PersistentHistogramAllocator::GetHistogram(
+    Reference ref) {
+  // Unfortunately, the histogram "pickle" methods cannot be used as part of
+  // the persistance because the deserialization methods always create local
+  // count data (while these must reference the persistent counts) and always
+  // add it to the local list of known histograms (while these may be simple
+  // references to histograms in other processes).
+  PersistentHistogramData* data =
+      memory_allocator_->GetAsObject<PersistentHistogramData>(ref);
+  const size_t length = memory_allocator_->GetAllocSize(ref);
+
+  // Check that metadata is reasonable: name is null-terminated and non-empty,
+  // ID fields have been loaded with a hash of the name (0 is considered
+  // unset/invalid).
+  if (!data || data->name[0] == '\0' ||
+      reinterpret_cast<char*>(data)[length - 1] != '\0' ||
+      data->samples_metadata.id == 0 || data->logged_metadata.id == 0 ||
+      // Note: Sparse histograms use |id + 1| in |logged_metadata|.
+      (data->logged_metadata.id != data->samples_metadata.id &&
+       data->logged_metadata.id != data->samples_metadata.id + 1) ||
+      // Most non-matching values happen due to truncated names. Ideally, we
+      // could just verify the name length based on the overall alloc length,
+      // but that doesn't work because the allocated block may have been
+      // aligned to the next boundary value.
+      HashMetricName(data->name) != data->samples_metadata.id) {
+    NOTREACHED();
+    return nullptr;
+  }
+  return CreateHistogram(data);
+}
+
+std::unique_ptr<HistogramBase> PersistentHistogramAllocator::AllocateHistogram(
+    HistogramType histogram_type,
+    const std::string& name,
+    int minimum,
+    int maximum,
+    const BucketRanges* bucket_ranges,
+    int32_t flags,
+    Reference* ref_ptr) {
+  // If the allocator is corrupt, don't waste time trying anything else.
+  // This also allows differentiating on the dashboard between allocations
+  // failed due to a corrupt allocator and the number of process instances
+  // with one, the latter being idicated by "newly corrupt", below.
+  if (memory_allocator_->IsCorrupt())
+    return nullptr;
+
+  // Create the metadata necessary for a persistent sparse histogram. This
+  // is done first because it is a small subset of what is required for
+  // other histograms. The type is "under construction" so that a crash
+  // during the datafill doesn't leave a bad record around that could cause
+  // confusion by another process trying to read it. It will be corrected
+  // once histogram construction is complete.
+  PersistentHistogramData* histogram_data =
+      memory_allocator_->New<PersistentHistogramData>(
+          offsetof(PersistentHistogramData, name) + name.length() + 1);
+  if (histogram_data) {
+    memcpy(histogram_data->name, name.c_str(), name.size() + 1);
+    histogram_data->histogram_type = histogram_type;
+    histogram_data->flags = flags | HistogramBase::kIsPersistent;
+  }
+
+  // Create the remaining metadata necessary for regular histograms.
+  if (histogram_type != SPARSE_HISTOGRAM) {
+    size_t bucket_count = bucket_ranges->bucket_count();
+    size_t counts_bytes = CalculateRequiredCountsBytes(bucket_count);
+    if (counts_bytes == 0) {
+      // |bucket_count| was out-of-range.
+      NOTREACHED();
+      return nullptr;
+    }
+
+    // Since the StasticsRecorder keeps a global collection of BucketRanges
+    // objects for re-use, it would be dangerous for one to hold a reference
+    // from a persistent allocator that is not the global one (which is
+    // permanent once set). If this stops being the case, this check can
+    // become an "if" condition beside "!ranges_ref" below and before
+    // set_persistent_reference() farther down.
+    DCHECK_EQ(this, GlobalHistogramAllocator::Get());
+
+    // Re-use an existing BucketRanges persistent allocation if one is known;
+    // otherwise, create one.
+    PersistentMemoryAllocator::Reference ranges_ref =
+        bucket_ranges->persistent_reference();
+    if (!ranges_ref) {
+      size_t ranges_count = bucket_count + 1;
+      size_t ranges_bytes = ranges_count * sizeof(HistogramBase::Sample);
+      ranges_ref =
+          memory_allocator_->Allocate(ranges_bytes, kTypeIdRangesArray);
+      if (ranges_ref) {
+        HistogramBase::Sample* ranges_data =
+            memory_allocator_->GetAsArray<HistogramBase::Sample>(
+                ranges_ref, kTypeIdRangesArray, ranges_count);
+        if (ranges_data) {
+          for (size_t i = 0; i < bucket_ranges->size(); ++i)
+            ranges_data[i] = bucket_ranges->range(i);
+          bucket_ranges->set_persistent_reference(ranges_ref);
+        } else {
+          // This should never happen but be tolerant if it does.
+          NOTREACHED();
+          ranges_ref = PersistentMemoryAllocator::kReferenceNull;
+        }
+      }
+    } else {
+      DCHECK_EQ(kTypeIdRangesArray, memory_allocator_->GetType(ranges_ref));
+    }
+
+
+    // Only continue here if all allocations were successful. If they weren't,
+    // there is no way to free the space but that's not really a problem since
+    // the allocations only fail because the space is full or corrupt and so
+    // any future attempts will also fail.
+    if (ranges_ref && histogram_data) {
+      histogram_data->minimum = minimum;
+      histogram_data->maximum = maximum;
+      // |bucket_count| must fit within 32-bits or the allocation of the counts
+      // array would have failed for being too large; the allocator supports
+      // less than 4GB total size.
+      histogram_data->bucket_count = static_cast<uint32_t>(bucket_count);
+      histogram_data->ranges_ref = ranges_ref;
+      histogram_data->ranges_checksum = bucket_ranges->checksum();
+    } else {
+      histogram_data = nullptr;  // Clear this for proper handling below.
+    }
+  }
+
+  if (histogram_data) {
+    // Create the histogram using resources in persistent memory. This ends up
+    // resolving the "ref" values stored in histogram_data instad of just
+    // using what is already known above but avoids duplicating the switch
+    // statement here and serves as a double-check that everything is
+    // correct before commiting the new histogram to persistent space.
+    std::unique_ptr<HistogramBase> histogram = CreateHistogram(histogram_data);
+    DCHECK(histogram);
+    DCHECK_NE(0U, histogram_data->samples_metadata.id);
+    DCHECK_NE(0U, histogram_data->logged_metadata.id);
+
+    PersistentMemoryAllocator::Reference histogram_ref =
+        memory_allocator_->GetAsReference(histogram_data);
+    if (ref_ptr != nullptr)
+      *ref_ptr = histogram_ref;
+
+    // By storing the reference within the allocator to this histogram, the
+    // next import (which will happen before the next histogram creation)
+    // will know to skip it.
+    // See also the comment in ImportHistogramsToStatisticsRecorder().
+    subtle::NoBarrier_Store(&last_created_, histogram_ref);
+    return histogram;
+  }
+
+  if (memory_allocator_->IsCorrupt())
+    NOTREACHED() << memory_allocator_->Name() << " is corrupt!";
+
+  return nullptr;
+}
+
+void PersistentHistogramAllocator::FinalizeHistogram(Reference ref,
+                                                     bool registered) {
+  if (registered) {
+    // If the created persistent histogram was registered then it needs to
+    // be marked as "iterable" in order to be found by other processes. This
+    // happens only after the histogram is fully formed so it's impossible for
+    // code iterating through the allocator to read a partially created record.
+    memory_allocator_->MakeIterable(ref);
+  } else {
+    // If it wasn't registered then a race condition must have caused two to
+    // be created. The allocator does not support releasing the acquired memory
+    // so just change the type to be empty.
+    memory_allocator_->ChangeType(ref, 0,
+                                  PersistentHistogramData::kPersistentTypeId,
+                                  /*clear=*/false);
+  }
+}
+
+void PersistentHistogramAllocator::MergeHistogramDeltaToStatisticsRecorder(
+    HistogramBase* histogram) {
+  DCHECK(histogram);
+
+  HistogramBase* existing = GetOrCreateStatisticsRecorderHistogram(histogram);
+  if (!existing) {
+    // The above should never fail but if it does, no real harm is done.
+    // The data won't be merged but it also won't be recorded as merged
+    // so a future try, if successful, will get what was missed. If it
+    // continues to fail, some metric data will be lost but that is better
+    // than crashing.
+    NOTREACHED();
+    return;
+  }
+
+  // Merge the delta from the passed object to the one in the SR.
+  existing->AddSamples(*histogram->SnapshotDelta());
+}
+
+void PersistentHistogramAllocator::MergeHistogramFinalDeltaToStatisticsRecorder(
+    const HistogramBase* histogram) {
+  DCHECK(histogram);
+
+  HistogramBase* existing = GetOrCreateStatisticsRecorderHistogram(histogram);
+  if (!existing) {
+    // The above should never fail but if it does, no real harm is done.
+    // Some metric data will be lost but that is better than crashing.
+    NOTREACHED();
+    return;
+  }
+
+  // Merge the delta from the passed object to the one in the SR.
+  existing->AddSamples(*histogram->SnapshotFinalDelta());
+}
+
+PersistentSampleMapRecords* PersistentHistogramAllocator::UseSampleMapRecords(
+    uint64_t id,
+    const void* user) {
+  return sparse_histogram_data_manager_.UseSampleMapRecords(id, user);
+}
+
+void PersistentHistogramAllocator::CreateTrackingHistograms(StringPiece name) {
+  memory_allocator_->CreateTrackingHistograms(name);
+}
+
+void PersistentHistogramAllocator::UpdateTrackingHistograms() {
+  memory_allocator_->UpdateTrackingHistograms();
+}
+
+void PersistentHistogramAllocator::ClearLastCreatedReferenceForTesting() {
+  subtle::NoBarrier_Store(&last_created_, 0);
+}
+
+std::unique_ptr<HistogramBase> PersistentHistogramAllocator::CreateHistogram(
+    PersistentHistogramData* histogram_data_ptr) {
+  if (!histogram_data_ptr) {
+    NOTREACHED();
+    return nullptr;
+  }
+
+  // Sparse histograms are quite different so handle them as a special case.
+  if (histogram_data_ptr->histogram_type == SPARSE_HISTOGRAM) {
+    std::unique_ptr<HistogramBase> histogram =
+        SparseHistogram::PersistentCreate(this, histogram_data_ptr->name,
+                                          &histogram_data_ptr->samples_metadata,
+                                          &histogram_data_ptr->logged_metadata);
+    DCHECK(histogram);
+    histogram->SetFlags(histogram_data_ptr->flags);
+    return histogram;
+  }
+
+  // Copy the configuration fields from histogram_data_ptr to local storage
+  // because anything in persistent memory cannot be trusted as it could be
+  // changed at any moment by a malicious actor that shares access. The local
+  // values are validated below and then used to create the histogram, knowing
+  // they haven't changed between validation and use.
+  int32_t histogram_type = histogram_data_ptr->histogram_type;
+  int32_t histogram_flags = histogram_data_ptr->flags;
+  int32_t histogram_minimum = histogram_data_ptr->minimum;
+  int32_t histogram_maximum = histogram_data_ptr->maximum;
+  uint32_t histogram_bucket_count = histogram_data_ptr->bucket_count;
+  uint32_t histogram_ranges_ref = histogram_data_ptr->ranges_ref;
+  uint32_t histogram_ranges_checksum = histogram_data_ptr->ranges_checksum;
+
+  HistogramBase::Sample* ranges_data =
+      memory_allocator_->GetAsArray<HistogramBase::Sample>(
+          histogram_ranges_ref, kTypeIdRangesArray,
+          PersistentMemoryAllocator::kSizeAny);
+
+  const uint32_t max_buckets =
+      std::numeric_limits<uint32_t>::max() / sizeof(HistogramBase::Sample);
+  size_t required_bytes =
+      (histogram_bucket_count + 1) * sizeof(HistogramBase::Sample);
+  size_t allocated_bytes =
+      memory_allocator_->GetAllocSize(histogram_ranges_ref);
+  if (!ranges_data || histogram_bucket_count < 2 ||
+      histogram_bucket_count >= max_buckets ||
+      allocated_bytes < required_bytes) {
+    NOTREACHED();
+    return nullptr;
+  }
+
+  std::unique_ptr<const BucketRanges> created_ranges = CreateRangesFromData(
+      ranges_data, histogram_ranges_checksum, histogram_bucket_count + 1);
+  if (!created_ranges) {
+    NOTREACHED();
+    return nullptr;
+  }
+  const BucketRanges* ranges =
+      StatisticsRecorder::RegisterOrDeleteDuplicateRanges(
+          created_ranges.release());
+
+  size_t counts_bytes = CalculateRequiredCountsBytes(histogram_bucket_count);
+  PersistentMemoryAllocator::Reference counts_ref =
+      subtle::Acquire_Load(&histogram_data_ptr->counts_ref);
+  if (counts_bytes == 0 ||
+      (counts_ref != 0 &&
+       memory_allocator_->GetAllocSize(counts_ref) < counts_bytes)) {
+    NOTREACHED();
+    return nullptr;
+  }
+
+  // The "counts" data (including both samples and logged samples) is a delayed
+  // persistent allocation meaning that though its size and storage for a
+  // reference is defined, no space is reserved until actually needed. When
+  // it is needed, memory will be allocated from the persistent segment and
+  // a reference to it stored at the passed address. Other threads can then
+  // notice the valid reference and access the same data.
+  DelayedPersistentAllocation counts_data(memory_allocator_.get(),
+                                          &histogram_data_ptr->counts_ref,
+                                          kTypeIdCountsArray, counts_bytes, 0);
+
+  // A second delayed allocations is defined using the same reference storage
+  // location as the first so the allocation of one will automatically be found
+  // by the other. Within the block, the first half of the space is for "counts"
+  // and the second half is for "logged counts".
+  DelayedPersistentAllocation logged_data(
+      memory_allocator_.get(), &histogram_data_ptr->counts_ref,
+      kTypeIdCountsArray, counts_bytes, counts_bytes / 2,
+      /*make_iterable=*/false);
+
+  // Create the right type of histogram.
+  const char* name = histogram_data_ptr->name;
+  std::unique_ptr<HistogramBase> histogram;
+  switch (histogram_type) {
+    case HISTOGRAM:
+      histogram = Histogram::PersistentCreate(
+          name, histogram_minimum, histogram_maximum, ranges, counts_data,
+          logged_data, &histogram_data_ptr->samples_metadata,
+          &histogram_data_ptr->logged_metadata);
+      DCHECK(histogram);
+      break;
+    case LINEAR_HISTOGRAM:
+      histogram = LinearHistogram::PersistentCreate(
+          name, histogram_minimum, histogram_maximum, ranges, counts_data,
+          logged_data, &histogram_data_ptr->samples_metadata,
+          &histogram_data_ptr->logged_metadata);
+      DCHECK(histogram);
+      break;
+    case BOOLEAN_HISTOGRAM:
+      histogram = BooleanHistogram::PersistentCreate(
+          name, ranges, counts_data, logged_data,
+          &histogram_data_ptr->samples_metadata,
+          &histogram_data_ptr->logged_metadata);
+      DCHECK(histogram);
+      break;
+    case CUSTOM_HISTOGRAM:
+      histogram = CustomHistogram::PersistentCreate(
+          name, ranges, counts_data, logged_data,
+          &histogram_data_ptr->samples_metadata,
+          &histogram_data_ptr->logged_metadata);
+      DCHECK(histogram);
+      break;
+    default:
+      NOTREACHED();
+  }
+
+  if (histogram) {
+    DCHECK_EQ(histogram_type, histogram->GetHistogramType());
+    histogram->SetFlags(histogram_flags);
+  }
+
+  return histogram;
+}
+
+HistogramBase*
+PersistentHistogramAllocator::GetOrCreateStatisticsRecorderHistogram(
+    const HistogramBase* histogram) {
+  // This should never be called on the global histogram allocator as objects
+  // created there are already within the global statistics recorder.
+  DCHECK_NE(GlobalHistogramAllocator::Get(), this);
+  DCHECK(histogram);
+
+  HistogramBase* existing =
+      StatisticsRecorder::FindHistogram(histogram->histogram_name());
+  if (existing)
+    return existing;
+
+  // Adding the passed histogram to the SR would cause a problem if the
+  // allocator that holds it eventually goes away. Instead, create a new
+  // one from a serialized version. Deserialization calls the appropriate
+  // FactoryGet() which will create the histogram in the global persistent-
+  // histogram allocator if such is set.
+  base::Pickle pickle;
+  histogram->SerializeInfo(&pickle);
+  PickleIterator iter(pickle);
+  existing = DeserializeHistogramInfo(&iter);
+  if (!existing)
+    return nullptr;
+
+  // Make sure there is no "serialization" flag set.
+  DCHECK_EQ(0, existing->flags() & HistogramBase::kIPCSerializationSourceFlag);
+  // Record the newly created histogram in the SR.
+  return StatisticsRecorder::RegisterOrDeleteDuplicate(existing);
+}
+
+GlobalHistogramAllocator::~GlobalHistogramAllocator() = default;
+
+// static
+void GlobalHistogramAllocator::CreateWithPersistentMemory(
+    void* base,
+    size_t size,
+    size_t page_size,
+    uint64_t id,
+    StringPiece name) {
+  Set(WrapUnique(
+      new GlobalHistogramAllocator(std::make_unique<PersistentMemoryAllocator>(
+          base, size, page_size, id, name, false))));
+}
+
+// static
+void GlobalHistogramAllocator::CreateWithLocalMemory(
+    size_t size,
+    uint64_t id,
+    StringPiece name) {
+  Set(WrapUnique(new GlobalHistogramAllocator(
+      std::make_unique<LocalPersistentMemoryAllocator>(size, id, name))));
+}
+
+#if !defined(OS_NACL)
+// static
+bool GlobalHistogramAllocator::CreateWithFile(
+    const FilePath& file_path,
+    size_t size,
+    uint64_t id,
+    StringPiece name) {
+  bool exists = PathExists(file_path);
+  File file(
+      file_path, File::FLAG_OPEN_ALWAYS | File::FLAG_SHARE_DELETE |
+                 File::FLAG_READ | File::FLAG_WRITE);
+
+  std::unique_ptr<MemoryMappedFile> mmfile(new MemoryMappedFile());
+  if (exists) {
+    size = saturated_cast<size_t>(file.GetLength());
+    mmfile->Initialize(std::move(file), MemoryMappedFile::READ_WRITE);
+  } else {
+    mmfile->Initialize(std::move(file), {0, size},
+                       MemoryMappedFile::READ_WRITE_EXTEND);
+  }
+  if (!mmfile->IsValid() ||
+      !FilePersistentMemoryAllocator::IsFileAcceptable(*mmfile, true)) {
+    NOTREACHED() << file_path;
+    return false;
+  }
+
+  Set(WrapUnique(new GlobalHistogramAllocator(
+      std::make_unique<FilePersistentMemoryAllocator>(std::move(mmfile), size,
+                                                      id, name, false))));
+  Get()->SetPersistentLocation(file_path);
+  return true;
+}
+
+// static
+bool GlobalHistogramAllocator::CreateWithActiveFile(const FilePath& base_path,
+                                                    const FilePath& active_path,
+                                                    const FilePath& spare_path,
+                                                    size_t size,
+                                                    uint64_t id,
+                                                    StringPiece name) {
+  // Old "active" becomes "base".
+  if (!base::ReplaceFile(active_path, base_path, nullptr))
+    base::DeleteFile(base_path, /*recursive=*/false);
+  DCHECK(!base::PathExists(active_path));
+
+  // Move any "spare" into "active". Okay to continue if file doesn't exist.
+  if (!spare_path.empty()) {
+    base::ReplaceFile(spare_path, active_path, nullptr);
+    DCHECK(!base::PathExists(spare_path));
+  }
+
+  return base::GlobalHistogramAllocator::CreateWithFile(active_path, size, id,
+                                                        name);
+}
+
+// static
+bool GlobalHistogramAllocator::CreateWithActiveFileInDir(const FilePath& dir,
+                                                         size_t size,
+                                                         uint64_t id,
+                                                         StringPiece name) {
+  FilePath base_path, active_path, spare_path;
+  ConstructFilePaths(dir, name, &base_path, &active_path, &spare_path);
+  return CreateWithActiveFile(base_path, active_path, spare_path, size, id,
+                              name);
+}
+
+// static
+FilePath GlobalHistogramAllocator::ConstructFilePath(const FilePath& dir,
+                                                     StringPiece name) {
+  return dir.AppendASCII(name).AddExtension(
+      PersistentMemoryAllocator::kFileExtension);
+}
+
+// static
+FilePath GlobalHistogramAllocator::ConstructFilePathForUploadDir(
+    const FilePath& dir,
+    StringPiece name,
+    base::Time stamp,
+    ProcessId pid) {
+  return ConstructFilePath(
+      dir,
+      StringPrintf("%.*s-%lX-%lX", static_cast<int>(name.length()), name.data(),
+                   static_cast<long>(stamp.ToTimeT()), static_cast<long>(pid)));
+}
+
+// static
+bool GlobalHistogramAllocator::ParseFilePath(const FilePath& path,
+                                             std::string* out_name,
+                                             Time* out_stamp,
+                                             ProcessId* out_pid) {
+  std::string filename = path.BaseName().AsUTF8Unsafe();
+  std::vector<base::StringPiece> parts = base::SplitStringPiece(
+      filename, "-.", base::KEEP_WHITESPACE, base::SPLIT_WANT_ALL);
+  if (parts.size() != 4)
+    return false;
+
+  if (out_name)
+    *out_name = parts[0].as_string();
+
+  if (out_stamp) {
+    int64_t stamp;
+    if (!HexStringToInt64(parts[1], &stamp))
+      return false;
+    *out_stamp = Time::FromTimeT(static_cast<time_t>(stamp));
+  }
+
+  if (out_pid) {
+    int64_t pid;
+    if (!HexStringToInt64(parts[2], &pid))
+      return false;
+    *out_pid = static_cast<ProcessId>(pid);
+  }
+
+  return true;
+}
+
+// static
+void GlobalHistogramAllocator::ConstructFilePaths(const FilePath& dir,
+                                                  StringPiece name,
+                                                  FilePath* out_base_path,
+                                                  FilePath* out_active_path,
+                                                  FilePath* out_spare_path) {
+  if (out_base_path)
+    *out_base_path = ConstructFilePath(dir, name);
+
+  if (out_active_path) {
+    *out_active_path =
+        ConstructFilePath(dir, name.as_string().append("-active"));
+  }
+
+  if (out_spare_path) {
+    *out_spare_path = ConstructFilePath(dir, name.as_string().append("-spare"));
+  }
+}
+
+// static
+void GlobalHistogramAllocator::ConstructFilePathsForUploadDir(
+    const FilePath& active_dir,
+    const FilePath& upload_dir,
+    const std::string& name,
+    FilePath* out_upload_path,
+    FilePath* out_active_path,
+    FilePath* out_spare_path) {
+  if (out_upload_path) {
+    *out_upload_path = ConstructFilePathForUploadDir(
+        upload_dir, name, Time::Now(), GetCurrentProcId());
+  }
+
+  if (out_active_path) {
+    *out_active_path =
+        ConstructFilePath(active_dir, name + std::string("-active"));
+  }
+
+  if (out_spare_path) {
+    *out_spare_path =
+        ConstructFilePath(active_dir, name + std::string("-spare"));
+  }
+}
+
+// static
+bool GlobalHistogramAllocator::CreateSpareFile(const FilePath& spare_path,
+                                               size_t size) {
+  FilePath temp_spare_path = spare_path.AddExtension(FILE_PATH_LITERAL(".tmp"));
+  bool success = true;
+  {
+    File spare_file(temp_spare_path, File::FLAG_CREATE_ALWAYS |
+                                         File::FLAG_READ | File::FLAG_WRITE);
+    if (!spare_file.IsValid())
+      return false;
+
+    MemoryMappedFile mmfile;
+    mmfile.Initialize(std::move(spare_file), {0, size},
+                      MemoryMappedFile::READ_WRITE_EXTEND);
+    success = mmfile.IsValid();
+  }
+
+  if (success)
+    success = ReplaceFile(temp_spare_path, spare_path, nullptr);
+
+  if (!success)
+    DeleteFile(temp_spare_path, /*recursive=*/false);
+
+  return success;
+}
+
+// static
+bool GlobalHistogramAllocator::CreateSpareFileInDir(const FilePath& dir,
+                                                    size_t size,
+                                                    StringPiece name) {
+  FilePath spare_path;
+  ConstructFilePaths(dir, name, nullptr, nullptr, &spare_path);
+  return CreateSpareFile(spare_path, size);
+}
+#endif  // !defined(OS_NACL)
+
+// static
+void GlobalHistogramAllocator::CreateWithSharedMemoryHandle(
+    const SharedMemoryHandle& handle,
+    size_t size) {
+  std::unique_ptr<SharedMemory> shm(
+      new SharedMemory(handle, /*readonly=*/false));
+  if (!shm->Map(size) ||
+      !SharedPersistentMemoryAllocator::IsSharedMemoryAcceptable(*shm)) {
+    NOTREACHED();
+    return;
+  }
+
+  Set(WrapUnique(new GlobalHistogramAllocator(
+      std::make_unique<SharedPersistentMemoryAllocator>(
+          std::move(shm), 0, StringPiece(), /*readonly=*/false))));
+}
+
+// static
+void GlobalHistogramAllocator::Set(
+    std::unique_ptr<GlobalHistogramAllocator> allocator) {
+  // Releasing or changing an allocator is extremely dangerous because it
+  // likely has histograms stored within it. If the backing memory is also
+  // also released, future accesses to those histograms will seg-fault.
+  CHECK(!subtle::NoBarrier_Load(&g_histogram_allocator));
+  subtle::Release_Store(&g_histogram_allocator,
+                        reinterpret_cast<uintptr_t>(allocator.release()));
+  size_t existing = StatisticsRecorder::GetHistogramCount();
+
+  DVLOG_IF(1, existing)
+      << existing << " histograms were created before persistence was enabled.";
+}
+
+// static
+GlobalHistogramAllocator* GlobalHistogramAllocator::Get() {
+  return reinterpret_cast<GlobalHistogramAllocator*>(
+      subtle::Acquire_Load(&g_histogram_allocator));
+}
+
+// static
+std::unique_ptr<GlobalHistogramAllocator>
+GlobalHistogramAllocator::ReleaseForTesting() {
+  GlobalHistogramAllocator* histogram_allocator = Get();
+  if (!histogram_allocator)
+    return nullptr;
+  PersistentMemoryAllocator* memory_allocator =
+      histogram_allocator->memory_allocator();
+
+  // Before releasing the memory, it's necessary to have the Statistics-
+  // Recorder forget about the histograms contained therein; otherwise,
+  // some operations will try to access them and the released memory.
+  PersistentMemoryAllocator::Iterator iter(memory_allocator);
+  const PersistentHistogramData* data;
+  while ((data = iter.GetNextOfObject<PersistentHistogramData>()) != nullptr) {
+    StatisticsRecorder::ForgetHistogramForTesting(data->name);
+  }
+
+  subtle::Release_Store(&g_histogram_allocator, 0);
+  return WrapUnique(histogram_allocator);
+};
+
+void GlobalHistogramAllocator::SetPersistentLocation(const FilePath& location) {
+  persistent_location_ = location;
+}
+
+const FilePath& GlobalHistogramAllocator::GetPersistentLocation() const {
+  return persistent_location_;
+}
+
+bool GlobalHistogramAllocator::WriteToPersistentLocation() {
+#if defined(OS_NACL)
+  // NACL doesn't support file operations, including ImportantFileWriter.
+  NOTREACHED();
+  return false;
+#else
+  // Stop if no destination is set.
+  if (persistent_location_.empty()) {
+    NOTREACHED() << "Could not write \"" << Name() << "\" persistent histograms"
+                 << " to file because no location was set.";
+    return false;
+  }
+
+  StringPiece contents(static_cast<const char*>(data()), used());
+  if (!ImportantFileWriter::WriteFileAtomically(persistent_location_,
+                                                contents)) {
+    LOG(ERROR) << "Could not write \"" << Name() << "\" persistent histograms"
+               << " to file: " << persistent_location_.value();
+    return false;
+  }
+
+  return true;
+#endif
+}
+
+void GlobalHistogramAllocator::DeletePersistentLocation() {
+  memory_allocator()->SetMemoryState(PersistentMemoryAllocator::MEMORY_DELETED);
+
+#if defined(OS_NACL)
+  NOTREACHED();
+#else
+  if (persistent_location_.empty())
+    return;
+
+  // Open (with delete) and then immediately close the file by going out of
+  // scope. This is the only cross-platform safe way to delete a file that may
+  // be open elsewhere. Open handles will continue to operate normally but
+  // new opens will not be possible.
+  File file(persistent_location_,
+            File::FLAG_OPEN | File::FLAG_READ | File::FLAG_DELETE_ON_CLOSE);
+#endif
+}
+
+GlobalHistogramAllocator::GlobalHistogramAllocator(
+    std::unique_ptr<PersistentMemoryAllocator> memory)
+    : PersistentHistogramAllocator(std::move(memory)),
+      import_iterator_(this) {
+}
+
+void GlobalHistogramAllocator::ImportHistogramsToStatisticsRecorder() {
+  // Skip the import if it's the histogram that was last created. Should a
+  // race condition cause the "last created" to be overwritten before it
+  // is recognized here then the histogram will be created and be ignored
+  // when it is detected as a duplicate by the statistics-recorder. This
+  // simple check reduces the time of creating persistent histograms by
+  // about 40%.
+  Reference record_to_ignore = last_created();
+
+  // There is no lock on this because the iterator is lock-free while still
+  // guaranteed to only return each entry only once. The StatisticsRecorder
+  // has its own lock so the Register operation is safe.
+  while (true) {
+    std::unique_ptr<HistogramBase> histogram =
+        import_iterator_.GetNextWithIgnore(record_to_ignore);
+    if (!histogram)
+      break;
+    StatisticsRecorder::RegisterOrDeleteDuplicate(histogram.release());
+  }
+}
+
+}  // namespace base
diff --git a/base/metrics/persistent_histogram_allocator.h b/base/metrics/persistent_histogram_allocator.h
new file mode 100644
index 0000000..395511f
--- /dev/null
+++ b/base/metrics/persistent_histogram_allocator.h
@@ -0,0 +1,505 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_METRICS_HISTOGRAM_PERSISTENCE_H_
+#define BASE_METRICS_HISTOGRAM_PERSISTENCE_H_
+
+#include <map>
+#include <memory>
+
+#include "base/atomicops.h"
+#include "base/base_export.h"
+#include "base/feature_list.h"
+#include "base/memory/shared_memory.h"
+#include "base/metrics/histogram_base.h"
+#include "base/metrics/persistent_memory_allocator.h"
+#include "base/strings/string_piece.h"
+#include "base/synchronization/lock.h"
+
+namespace base {
+
+class BucketRanges;
+class FilePath;
+class PersistentSampleMapRecords;
+class PersistentSparseHistogramDataManager;
+
+// Feature definition for enabling histogram persistence.
+BASE_EXPORT extern const Feature kPersistentHistogramsFeature;
+
+
+// A data manager for sparse histograms so each instance of such doesn't have
+// to separately iterate over the entire memory segment. Though this class
+// will generally be accessed through the PersistentHistogramAllocator above,
+// it can be used independently on any PersistentMemoryAllocator (making it
+// useable for testing). This object supports only one instance of a sparse
+// histogram for a given id. Tests that create multiple identical histograms,
+// perhaps to simulate multiple processes, should create a separate manager
+// for each.
+class BASE_EXPORT PersistentSparseHistogramDataManager {
+ public:
+  // Constructs the data manager. The allocator must live longer than any
+  // managers that reference it.
+  explicit PersistentSparseHistogramDataManager(
+      PersistentMemoryAllocator* allocator);
+
+  ~PersistentSparseHistogramDataManager();
+
+  // Returns the object that manages the persistent-sample-map records for a
+  // given |id|. Only one |user| of this data is allowed at a time. This does
+  // an automatic Acquire() on the records. The user must call Release() on
+  // the returned object when it is finished with it. Ownership of the records
+  // object stays with this manager.
+  PersistentSampleMapRecords* UseSampleMapRecords(uint64_t id,
+                                                  const void* user);
+
+  // Convenience method that gets the object for a given reference so callers
+  // don't have to also keep their own pointer to the appropriate allocator.
+  template <typename T>
+  T* GetAsObject(PersistentMemoryAllocator::Reference ref) {
+    return allocator_->GetAsObject<T>(ref);
+  }
+
+ private:
+  friend class PersistentSampleMapRecords;
+
+  // Gets the object holding records for a given sample-map id when |lock_|
+  // has already been acquired.
+  PersistentSampleMapRecords* GetSampleMapRecordsWhileLocked(uint64_t id);
+
+  // Loads sample-map records looking for those belonging to the specified
+  // |load_id|. Records found for other sample-maps are held for later use
+  // without having to iterate again. This should be called only from a
+  // PersistentSampleMapRecords object because those objects have a contract
+  // that there are no other threads accessing the internal records_ field
+  // of the object that is passed in.
+  bool LoadRecords(PersistentSampleMapRecords* sample_map_records);
+
+  // Weak-pointer to the allocator used by the sparse histograms.
+  PersistentMemoryAllocator* allocator_;
+
+  // Iterator within the allocator for finding sample records.
+  PersistentMemoryAllocator::Iterator record_iterator_;
+
+  // Mapping of sample-map IDs to their sample records.
+  std::map<uint64_t, std::unique_ptr<PersistentSampleMapRecords>>
+      sample_records_;
+
+  // A lock used for synchronizing changes to sample_records_.
+  base::Lock lock_;
+
+  DISALLOW_COPY_AND_ASSIGN(PersistentSparseHistogramDataManager);
+};
+
+
+// This class manages sample-records used by a PersistentSampleMap container
+// that underlies a persistent SparseHistogram object. It is broken out into a
+// top-level class so that it can be forward-declared in other header files
+// rather than include this entire file as would be necessary if it were
+// declared within the PersistentSparseHistogramDataManager class above.
+class BASE_EXPORT PersistentSampleMapRecords {
+ public:
+  // Constructs an instance of this class. The manager object must live longer
+  // than all instances of this class that reference it, which is not usually
+  // a problem since these objects are generally managed from within that
+  // manager instance.
+  PersistentSampleMapRecords(PersistentSparseHistogramDataManager* data_manager,
+                             uint64_t sample_map_id);
+
+  ~PersistentSampleMapRecords();
+
+  // Resets the internal state for a new object using this data. The return
+  // value is "this" as a convenience.
+  PersistentSampleMapRecords* Acquire(const void* user);
+
+  // Indicates that the using object is done with this data.
+  void Release(const void* user);
+
+  // Gets the next reference to a persistent sample-map record. The type and
+  // layout of the data being referenced is defined entirely within the
+  // PersistentSampleMap class.
+  PersistentMemoryAllocator::Reference GetNext();
+
+  // Creates a new persistent sample-map record for sample |value| and returns
+  // a reference to it.
+  PersistentMemoryAllocator::Reference CreateNew(HistogramBase::Sample value);
+
+  // Convenience method that gets the object for a given reference so callers
+  // don't have to also keep their own pointer to the appropriate allocator.
+  // This is expected to be used with the SampleRecord structure defined inside
+  // the persistent_sample_map.cc file but since that isn't exported (for
+  // cleanliness of the interface), a template is defined that will be
+  // resolved when used inside that file.
+  template <typename T>
+  T* GetAsObject(PersistentMemoryAllocator::Reference ref) {
+    return data_manager_->GetAsObject<T>(ref);
+  }
+
+ private:
+  friend PersistentSparseHistogramDataManager;
+
+  // Weak-pointer to the parent data-manager object.
+  PersistentSparseHistogramDataManager* data_manager_;
+
+  // ID of PersistentSampleMap to which these records apply.
+  const uint64_t sample_map_id_;
+
+  // The current user of this set of records. It is used to ensure that no
+  // more than one object is using these records at a given time.
+  const void* user_ = nullptr;
+
+  // This is the count of how many "records" have already been read by the
+  // owning sample-map.
+  size_t seen_ = 0;
+
+  // This is the set of records previously found for a sample map. Because
+  // there is ever only one object with a given ID (typically a hash of a
+  // histogram name) and because the parent SparseHistogram has acquired
+  // its own lock before accessing the PersistentSampleMap it controls, this
+  // list can be accessed without acquiring any additional lock.
+  std::vector<PersistentMemoryAllocator::Reference> records_;
+
+  // This is the set of records found during iteration through memory. It
+  // is appended in bulk to "records". Access to this vector can be done
+  // only while holding the parent manager's lock.
+  std::vector<PersistentMemoryAllocator::Reference> found_;
+
+  DISALLOW_COPY_AND_ASSIGN(PersistentSampleMapRecords);
+};
+
+
+// This class manages histograms created within a PersistentMemoryAllocator.
+class BASE_EXPORT PersistentHistogramAllocator {
+ public:
+  // A reference to a histogram. While this is implemented as PMA::Reference,
+  // it is not conceptually the same thing. Outside callers should always use
+  // a Reference matching the class it is for and not mix the two.
+  using Reference = PersistentMemoryAllocator::Reference;
+
+  // Iterator used for fetching persistent histograms from an allocator.
+  // It is lock-free and thread-safe.
+  // See PersistentMemoryAllocator::Iterator for more information.
+  class BASE_EXPORT Iterator {
+   public:
+    // Constructs an iterator on a given |allocator|, starting at the beginning.
+    // The allocator must live beyond the lifetime of the iterator.
+    explicit Iterator(PersistentHistogramAllocator* allocator);
+
+    // Gets the next histogram from persistent memory; returns null if there
+    // are no more histograms to be found. This may still be called again
+    // later to retrieve any new histograms added in the meantime.
+    std::unique_ptr<HistogramBase> GetNext() { return GetNextWithIgnore(0); }
+
+    // Gets the next histogram from persistent memory, ignoring one particular
+    // reference in the process. Pass |ignore| of zero (0) to ignore nothing.
+    std::unique_ptr<HistogramBase> GetNextWithIgnore(Reference ignore);
+
+   private:
+    // Weak-pointer to histogram allocator being iterated over.
+    PersistentHistogramAllocator* allocator_;
+
+    // The iterator used for stepping through objects in persistent memory.
+    // It is lock-free and thread-safe which is why this class is also such.
+    PersistentMemoryAllocator::Iterator memory_iter_;
+
+    DISALLOW_COPY_AND_ASSIGN(Iterator);
+  };
+
+  // A PersistentHistogramAllocator is constructed from a PersistentMemory-
+  // Allocator object of which it takes ownership.
+  explicit PersistentHistogramAllocator(
+      std::unique_ptr<PersistentMemoryAllocator> memory);
+  virtual ~PersistentHistogramAllocator();
+
+  // Direct access to underlying memory allocator. If the segment is shared
+  // across threads or processes, reading data through these values does
+  // not guarantee consistency. Use with care. Do not write.
+  PersistentMemoryAllocator* memory_allocator() {
+    return memory_allocator_.get();
+  }
+
+  // Implement the "metadata" API of a PersistentMemoryAllocator, forwarding
+  // those requests to the real one.
+  uint64_t Id() const { return memory_allocator_->Id(); }
+  const char* Name() const { return memory_allocator_->Name(); }
+  const void* data() const { return memory_allocator_->data(); }
+  size_t length() const { return memory_allocator_->length(); }
+  size_t size() const { return memory_allocator_->size(); }
+  size_t used() const { return memory_allocator_->used(); }
+
+  // Recreate a Histogram from data held in persistent memory. Though this
+  // object will be local to the current process, the sample data will be
+  // shared with all other threads referencing it. This method takes a |ref|
+  // to where the top-level histogram data may be found in this allocator.
+  // This method will return null if any problem is detected with the data.
+  std::unique_ptr<HistogramBase> GetHistogram(Reference ref);
+
+  // Allocate a new persistent histogram. The returned histogram will not
+  // be able to be located by other allocators until it is "finalized".
+  std::unique_ptr<HistogramBase> AllocateHistogram(
+      HistogramType histogram_type,
+      const std::string& name,
+      int minimum,
+      int maximum,
+      const BucketRanges* bucket_ranges,
+      int32_t flags,
+      Reference* ref_ptr);
+
+  // Finalize the creation of the histogram, making it available to other
+  // processes if |registered| (as in: added to the StatisticsRecorder) is
+  // True, forgetting it otherwise.
+  void FinalizeHistogram(Reference ref, bool registered);
+
+  // Merges the data in a persistent histogram with one held globally by the
+  // StatisticsRecorder, updating the "logged" samples within the passed
+  // object so that repeated merges are allowed. Don't call this on a "global"
+  // allocator because histograms created there will already be in the SR.
+  void MergeHistogramDeltaToStatisticsRecorder(HistogramBase* histogram);
+
+  // As above but merge the "final" delta. No update of "logged" samples is
+  // done which means it can operate on read-only objects. It's essential,
+  // however, not to call this more than once or those final samples will
+  // get recorded again.
+  void MergeHistogramFinalDeltaToStatisticsRecorder(
+      const HistogramBase* histogram);
+
+  // Returns the object that manages the persistent-sample-map records for a
+  // given |id|. Only one |user| of this data is allowed at a time. This does
+  // an automatic Acquire() on the records. The user must call Release() on
+  // the returned object when it is finished with it. Ownership stays with
+  // this allocator.
+  PersistentSampleMapRecords* UseSampleMapRecords(uint64_t id,
+                                                  const void* user);
+
+  // Create internal histograms for tracking memory use and allocation sizes
+  // for allocator of |name| (which can simply be the result of Name()). This
+  // is done seperately from construction for situations such as when the
+  // histograms will be backed by memory provided by this very allocator.
+  //
+  // IMPORTANT: Callers must update tools/metrics/histograms/histograms.xml
+  // with the following histograms:
+  //    UMA.PersistentAllocator.name.Allocs
+  //    UMA.PersistentAllocator.name.UsedPct
+  void CreateTrackingHistograms(StringPiece name);
+  void UpdateTrackingHistograms();
+
+  // Clears the internal |last_created_| reference so testing can validate
+  // operation without that optimization.
+  void ClearLastCreatedReferenceForTesting();
+
+ protected:
+  // The structure used to hold histogram data in persistent memory. It is
+  // defined and used entirely within the .cc file.
+  struct PersistentHistogramData;
+
+  // Gets the reference of the last histogram created, used to avoid
+  // trying to import what was just created.
+  PersistentHistogramAllocator::Reference last_created() {
+    return subtle::NoBarrier_Load(&last_created_);
+  }
+
+  // Gets the next histogram in persistent data based on iterator while
+  // ignoring a particular reference if it is found.
+  std::unique_ptr<HistogramBase> GetNextHistogramWithIgnore(Iterator* iter,
+                                                            Reference ignore);
+
+ private:
+  // Create a histogram based on saved (persistent) information about it.
+  std::unique_ptr<HistogramBase> CreateHistogram(
+      PersistentHistogramData* histogram_data_ptr);
+
+  // Gets or creates an object in the global StatisticsRecorder matching
+  // the |histogram| passed. Null is returned if one was not found and
+  // one could not be created.
+  HistogramBase* GetOrCreateStatisticsRecorderHistogram(
+      const HistogramBase* histogram);
+
+  // The memory allocator that provides the actual histogram storage.
+  std::unique_ptr<PersistentMemoryAllocator> memory_allocator_;
+
+  // The data-manager used to improve performance of sparse histograms.
+  PersistentSparseHistogramDataManager sparse_histogram_data_manager_;
+
+  // A reference to the last-created histogram in the allocator, used to avoid
+  // trying to import what was just created.
+  // TODO(bcwhite): Change this to std::atomic<PMA::Reference> when available.
+  subtle::Atomic32 last_created_ = 0;
+
+  DISALLOW_COPY_AND_ASSIGN(PersistentHistogramAllocator);
+};
+
+
+// A special case of the PersistentHistogramAllocator that operates on a
+// global scale, collecting histograms created through standard macros and
+// the FactoryGet() method.
+class BASE_EXPORT GlobalHistogramAllocator
+    : public PersistentHistogramAllocator {
+ public:
+  ~GlobalHistogramAllocator() override;
+
+  // Create a global allocator using the passed-in memory |base|, |size|, and
+  // other parameters. Ownership of the memory segment remains with the caller.
+  static void CreateWithPersistentMemory(void* base,
+                                         size_t size,
+                                         size_t page_size,
+                                         uint64_t id,
+                                         StringPiece name);
+
+  // Create a global allocator using an internal block of memory of the
+  // specified |size| taken from the heap.
+  static void CreateWithLocalMemory(size_t size, uint64_t id, StringPiece name);
+
+#if !defined(OS_NACL)
+  // Create a global allocator by memory-mapping a |file|. If the file does
+  // not exist, it will be created with the specified |size|. If the file does
+  // exist, the allocator will use and add to its contents, ignoring the passed
+  // size in favor of the existing size. Returns whether the global allocator
+  // was set.
+  static bool CreateWithFile(const FilePath& file_path,
+                             size_t size,
+                             uint64_t id,
+                             StringPiece name);
+
+  // Creates a new file at |active_path|. If it already exists, it will first be
+  // moved to |base_path|. In all cases, any old file at |base_path| will be
+  // removed. If |spare_path| is non-empty and exists, that will be renamed and
+  // used as the active file. Otherwise, the file will be created using the
+  // given size, id, and name. Returns whether the global allocator was set.
+  static bool CreateWithActiveFile(const FilePath& base_path,
+                                   const FilePath& active_path,
+                                   const FilePath& spare_path,
+                                   size_t size,
+                                   uint64_t id,
+                                   StringPiece name);
+
+  // Uses ConstructBaseActivePairFilePaths() to build a pair of file names which
+  // are then used for CreateWithActiveFile(). |name| is used for both the
+  // internal name for the allocator and also for the name of the file inside
+  // |dir|.
+  static bool CreateWithActiveFileInDir(const FilePath& dir,
+                                        size_t size,
+                                        uint64_t id,
+                                        StringPiece name);
+
+  // Constructs a filename using a name.
+  static FilePath ConstructFilePath(const FilePath& dir, StringPiece name);
+
+  // Like above but with timestamp and pid for use in upload directories.
+  static FilePath ConstructFilePathForUploadDir(const FilePath& dir,
+                                                StringPiece name,
+                                                base::Time stamp,
+                                                ProcessId pid);
+
+  // Parses a filename to extract name, timestamp, and pid.
+  static bool ParseFilePath(const FilePath& path,
+                            std::string* out_name,
+                            Time* out_stamp,
+                            ProcessId* out_pid);
+
+  // Constructs a set of names in |dir| based on name that can be used for a
+  // base + active persistent memory mapped location for CreateWithActiveFile().
+  // The spare path is a file that can be pre-created and moved to be active
+  // without any startup penalty that comes from constructing the file. |name|
+  // will be used as the basename of the file inside |dir|. |out_base_path|,
+  // |out_active_path|, or |out_spare_path| may be null if not needed.
+  static void ConstructFilePaths(const FilePath& dir,
+                                 StringPiece name,
+                                 FilePath* out_base_path,
+                                 FilePath* out_active_path,
+                                 FilePath* out_spare_path);
+
+  // As above but puts the base files in a different "upload" directory. This
+  // is useful when moving all completed files into a single directory for easy
+  // upload management.
+  static void ConstructFilePathsForUploadDir(const FilePath& active_dir,
+                                             const FilePath& upload_dir,
+                                             const std::string& name,
+                                             FilePath* out_upload_path,
+                                             FilePath* out_active_path,
+                                             FilePath* out_spare_path);
+
+  // Create a "spare" file that can later be made the "active" file. This
+  // should be done on a background thread if possible.
+  static bool CreateSpareFile(const FilePath& spare_path, size_t size);
+
+  // Same as above but uses standard names. |name| is the name of the allocator
+  // and is also used to create the correct filename.
+  static bool CreateSpareFileInDir(const FilePath& dir_path,
+                                   size_t size,
+                                   StringPiece name);
+#endif
+
+  // Create a global allocator using a block of shared memory accessed
+  // through the given |handle| and |size|. The allocator takes ownership
+  // of the handle and closes it upon destruction, though the memory will
+  // continue to live if other processes have access to it.
+  static void CreateWithSharedMemoryHandle(const SharedMemoryHandle& handle,
+                                           size_t size);
+
+  // Sets a GlobalHistogramAllocator for globally storing histograms in
+  // a space that can be persisted or shared between processes. There is only
+  // ever one allocator for all such histograms created by a single process.
+  // This takes ownership of the object and should be called as soon as
+  // possible during startup to capture as many histograms as possible and
+  // while operating single-threaded so there are no race-conditions.
+  static void Set(std::unique_ptr<GlobalHistogramAllocator> allocator);
+
+  // Gets a pointer to the global histogram allocator. Returns null if none
+  // exists.
+  static GlobalHistogramAllocator* Get();
+
+  // This access to the persistent allocator is only for testing; it extracts
+  // the current allocator completely. This allows easy creation of histograms
+  // within persistent memory segments which can then be extracted and used in
+  // other ways.
+  static std::unique_ptr<GlobalHistogramAllocator> ReleaseForTesting();
+
+  // Stores a pathname to which the contents of this allocator should be saved
+  // in order to persist the data for a later use.
+  void SetPersistentLocation(const FilePath& location);
+
+  // Retrieves a previously set pathname to which the contents of this allocator
+  // are to be saved.
+  const FilePath& GetPersistentLocation() const;
+
+  // Writes the internal data to a previously set location. This is generally
+  // called when a process is exiting from a section of code that may not know
+  // the filesystem. The data is written in an atomic manner. The return value
+  // indicates success.
+  bool WriteToPersistentLocation();
+
+  // If there is a global metrics file being updated on disk, mark it to be
+  // deleted when the process exits.
+  void DeletePersistentLocation();
+
+ private:
+  friend class StatisticsRecorder;
+
+  // Creates a new global histogram allocator.
+  explicit GlobalHistogramAllocator(
+      std::unique_ptr<PersistentMemoryAllocator> memory);
+
+  // Import new histograms from the global histogram allocator. It's possible
+  // for other processes to create histograms in the active memory segment;
+  // this adds those to the internal list of known histograms to avoid creating
+  // duplicates that would have to be merged during reporting. Every call to
+  // this method resumes from the last entry it saw; it costs nothing if
+  // nothing new has been added.
+  void ImportHistogramsToStatisticsRecorder();
+
+  // Builds a FilePath for a metrics file.
+  static FilePath MakeMetricsFilePath(const FilePath& dir, StringPiece name);
+
+  // Import always continues from where it left off, making use of a single
+  // iterator to continue the work.
+  Iterator import_iterator_;
+
+  // The location to which the data should be persisted.
+  FilePath persistent_location_;
+
+  DISALLOW_COPY_AND_ASSIGN(GlobalHistogramAllocator);
+};
+
+}  // namespace base
+
+#endif  // BASE_METRICS_HISTOGRAM_PERSISTENCE_H_
diff --git a/base/metrics/persistent_histogram_allocator_unittest.cc b/base/metrics/persistent_histogram_allocator_unittest.cc
new file mode 100644
index 0000000..7e07386
--- /dev/null
+++ b/base/metrics/persistent_histogram_allocator_unittest.cc
@@ -0,0 +1,375 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/metrics/persistent_histogram_allocator.h"
+
+#include "base/files/file.h"
+#include "base/files/file_util.h"
+#include "base/files/scoped_temp_dir.h"
+#include "base/logging.h"
+#include "base/memory/ptr_util.h"
+#include "base/metrics/bucket_ranges.h"
+#include "base/metrics/histogram_macros.h"
+#include "base/metrics/persistent_memory_allocator.h"
+#include "base/metrics/statistics_recorder.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+
+class PersistentHistogramAllocatorTest : public testing::Test {
+ protected:
+  const int32_t kAllocatorMemorySize = 64 << 10;  // 64 KiB
+
+  PersistentHistogramAllocatorTest()
+      : statistics_recorder_(StatisticsRecorder::CreateTemporaryForTesting()) {
+    CreatePersistentHistogramAllocator();
+  }
+  ~PersistentHistogramAllocatorTest() override {
+    DestroyPersistentHistogramAllocator();
+  }
+
+  void CreatePersistentHistogramAllocator() {
+    allocator_memory_.reset(new char[kAllocatorMemorySize]);
+
+    GlobalHistogramAllocator::ReleaseForTesting();
+    memset(allocator_memory_.get(), 0, kAllocatorMemorySize);
+    GlobalHistogramAllocator::CreateWithPersistentMemory(
+        allocator_memory_.get(), kAllocatorMemorySize, 0, 0,
+        "PersistentHistogramAllocatorTest");
+    allocator_ = GlobalHistogramAllocator::Get()->memory_allocator();
+  }
+
+  void DestroyPersistentHistogramAllocator() {
+    allocator_ = nullptr;
+    GlobalHistogramAllocator::ReleaseForTesting();
+  }
+
+  std::unique_ptr<StatisticsRecorder> statistics_recorder_;
+  std::unique_ptr<char[]> allocator_memory_;
+  PersistentMemoryAllocator* allocator_ = nullptr;
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(PersistentHistogramAllocatorTest);
+};
+
+TEST_F(PersistentHistogramAllocatorTest, CreateAndIterate) {
+  PersistentMemoryAllocator::MemoryInfo meminfo0;
+  allocator_->GetMemoryInfo(&meminfo0);
+
+  // Try basic construction
+  HistogramBase* histogram = Histogram::FactoryGet(
+      "TestHistogram", 1, 1000, 10, HistogramBase::kIsPersistent);
+  EXPECT_TRUE(histogram);
+  histogram->CheckName("TestHistogram");
+  PersistentMemoryAllocator::MemoryInfo meminfo1;
+  allocator_->GetMemoryInfo(&meminfo1);
+  EXPECT_GT(meminfo0.free, meminfo1.free);
+
+  HistogramBase* linear_histogram = LinearHistogram::FactoryGet(
+      "TestLinearHistogram", 1, 1000, 10, HistogramBase::kIsPersistent);
+  EXPECT_TRUE(linear_histogram);
+  linear_histogram->CheckName("TestLinearHistogram");
+  PersistentMemoryAllocator::MemoryInfo meminfo2;
+  allocator_->GetMemoryInfo(&meminfo2);
+  EXPECT_GT(meminfo1.free, meminfo2.free);
+
+  HistogramBase* boolean_histogram = BooleanHistogram::FactoryGet(
+      "TestBooleanHistogram", HistogramBase::kIsPersistent);
+  EXPECT_TRUE(boolean_histogram);
+  boolean_histogram->CheckName("TestBooleanHistogram");
+  PersistentMemoryAllocator::MemoryInfo meminfo3;
+  allocator_->GetMemoryInfo(&meminfo3);
+  EXPECT_GT(meminfo2.free, meminfo3.free);
+
+  std::vector<int> custom_ranges;
+  custom_ranges.push_back(1);
+  custom_ranges.push_back(5);
+  HistogramBase* custom_histogram = CustomHistogram::FactoryGet(
+      "TestCustomHistogram", custom_ranges, HistogramBase::kIsPersistent);
+  EXPECT_TRUE(custom_histogram);
+  custom_histogram->CheckName("TestCustomHistogram");
+  PersistentMemoryAllocator::MemoryInfo meminfo4;
+  allocator_->GetMemoryInfo(&meminfo4);
+  EXPECT_GT(meminfo3.free, meminfo4.free);
+
+  PersistentMemoryAllocator::Iterator iter(allocator_);
+  uint32_t type;
+  EXPECT_NE(0U, iter.GetNext(&type));  // Histogram
+  EXPECT_NE(0U, iter.GetNext(&type));  // LinearHistogram
+  EXPECT_NE(0U, iter.GetNext(&type));  // BooleanHistogram
+  EXPECT_NE(0U, iter.GetNext(&type));  // CustomHistogram
+  EXPECT_EQ(0U, iter.GetNext(&type));
+
+  // Create a second allocator and have it access the memory of the first.
+  std::unique_ptr<HistogramBase> recovered;
+  PersistentHistogramAllocator recovery(
+      std::make_unique<PersistentMemoryAllocator>(
+          allocator_memory_.get(), kAllocatorMemorySize, 0, 0, "", false));
+  PersistentHistogramAllocator::Iterator histogram_iter(&recovery);
+
+  recovered = histogram_iter.GetNext();
+  ASSERT_TRUE(recovered);
+  recovered->CheckName("TestHistogram");
+
+  recovered = histogram_iter.GetNext();
+  ASSERT_TRUE(recovered);
+  recovered->CheckName("TestLinearHistogram");
+
+  recovered = histogram_iter.GetNext();
+  ASSERT_TRUE(recovered);
+  recovered->CheckName("TestBooleanHistogram");
+
+  recovered = histogram_iter.GetNext();
+  ASSERT_TRUE(recovered);
+  recovered->CheckName("TestCustomHistogram");
+
+  recovered = histogram_iter.GetNext();
+  EXPECT_FALSE(recovered);
+}
+
+TEST_F(PersistentHistogramAllocatorTest, ConstructPaths) {
+  const FilePath dir_path(FILE_PATH_LITERAL("foo/"));
+  const std::string dir_string =
+      dir_path.NormalizePathSeparators().AsUTF8Unsafe();
+
+  FilePath path = GlobalHistogramAllocator::ConstructFilePath(dir_path, "bar");
+  EXPECT_EQ(dir_string + "bar.pma", path.AsUTF8Unsafe());
+
+  std::string name;
+  Time stamp;
+  ProcessId pid;
+  EXPECT_FALSE(
+      GlobalHistogramAllocator::ParseFilePath(path, &name, nullptr, nullptr));
+  EXPECT_FALSE(
+      GlobalHistogramAllocator::ParseFilePath(path, nullptr, &stamp, nullptr));
+  EXPECT_FALSE(
+      GlobalHistogramAllocator::ParseFilePath(path, nullptr, nullptr, &pid));
+
+  path = GlobalHistogramAllocator::ConstructFilePathForUploadDir(
+      dir_path, "bar", Time::FromTimeT(12345), 6789);
+  EXPECT_EQ(dir_string + "bar-3039-1A85.pma", path.AsUTF8Unsafe());
+  ASSERT_TRUE(
+      GlobalHistogramAllocator::ParseFilePath(path, &name, &stamp, &pid));
+  EXPECT_EQ(name, "bar");
+  EXPECT_EQ(Time::FromTimeT(12345), stamp);
+  EXPECT_EQ(static_cast<ProcessId>(6789), pid);
+}
+
+TEST_F(PersistentHistogramAllocatorTest, CreateWithFile) {
+  const char temp_name[] = "CreateWithFileTest";
+  ScopedTempDir temp_dir;
+  ASSERT_TRUE(temp_dir.CreateUniqueTempDir());
+  FilePath temp_file = temp_dir.GetPath().AppendASCII(temp_name);
+  const size_t temp_size = 64 << 10;  // 64 KiB
+
+  // Test creation of a new file.
+  GlobalHistogramAllocator::ReleaseForTesting();
+  GlobalHistogramAllocator::CreateWithFile(temp_file, temp_size, 0, temp_name);
+  EXPECT_EQ(std::string(temp_name),
+            GlobalHistogramAllocator::Get()->memory_allocator()->Name());
+
+  // Test re-open of a possibly-existing file.
+  GlobalHistogramAllocator::ReleaseForTesting();
+  GlobalHistogramAllocator::CreateWithFile(temp_file, temp_size, 0, "");
+  EXPECT_EQ(std::string(temp_name),
+            GlobalHistogramAllocator::Get()->memory_allocator()->Name());
+
+  // Test re-open of an known-existing file.
+  GlobalHistogramAllocator::ReleaseForTesting();
+  GlobalHistogramAllocator::CreateWithFile(temp_file, 0, 0, "");
+  EXPECT_EQ(std::string(temp_name),
+            GlobalHistogramAllocator::Get()->memory_allocator()->Name());
+
+  // Final release so file and temp-dir can be removed.
+  GlobalHistogramAllocator::ReleaseForTesting();
+}
+
+TEST_F(PersistentHistogramAllocatorTest, CreateSpareFile) {
+  const char temp_name[] = "CreateSpareFileTest.pma";
+  ScopedTempDir temp_dir;
+  ASSERT_TRUE(temp_dir.CreateUniqueTempDir());
+  FilePath temp_file = temp_dir.GetPath().AppendASCII(temp_name);
+  const size_t temp_size = 64 << 10;  // 64 KiB
+
+  ASSERT_TRUE(GlobalHistogramAllocator::CreateSpareFile(temp_file, temp_size));
+
+  File file(temp_file, File::FLAG_OPEN | File::FLAG_READ);
+  ASSERT_TRUE(file.IsValid());
+  EXPECT_EQ(static_cast<int64_t>(temp_size), file.GetLength());
+
+  char buffer[256];
+  for (size_t pos = 0; pos < temp_size; pos += sizeof(buffer)) {
+    ASSERT_EQ(static_cast<int>(sizeof(buffer)),
+              file.ReadAtCurrentPos(buffer, sizeof(buffer)));
+    for (size_t i = 0; i < sizeof(buffer); ++i)
+      EXPECT_EQ(0, buffer[i]);
+  }
+}
+
+TEST_F(PersistentHistogramAllocatorTest, StatisticsRecorderMerge) {
+  const char LinearHistogramName[] = "SRTLinearHistogram";
+  const char SparseHistogramName[] = "SRTSparseHistogram";
+  const size_t starting_sr_count = StatisticsRecorder::GetHistogramCount();
+
+  // Create a local StatisticsRecorder in which the newly created histogram
+  // will be recorded. The global allocator must be replaced after because the
+  // act of releasing will cause the active SR to forget about all histograms
+  // in the relased memory.
+  std::unique_ptr<StatisticsRecorder> local_sr =
+      StatisticsRecorder::CreateTemporaryForTesting();
+  EXPECT_EQ(0U, StatisticsRecorder::GetHistogramCount());
+  std::unique_ptr<GlobalHistogramAllocator> old_allocator =
+      GlobalHistogramAllocator::ReleaseForTesting();
+  GlobalHistogramAllocator::CreateWithLocalMemory(kAllocatorMemorySize, 0, "");
+  ASSERT_TRUE(GlobalHistogramAllocator::Get());
+
+  // Create a linear histogram for merge testing.
+  HistogramBase* histogram1 =
+      LinearHistogram::FactoryGet(LinearHistogramName, 1, 10, 10, 0);
+  ASSERT_TRUE(histogram1);
+  EXPECT_EQ(1U, StatisticsRecorder::GetHistogramCount());
+  histogram1->Add(3);
+  histogram1->Add(1);
+  histogram1->Add(4);
+  histogram1->AddCount(1, 4);
+  histogram1->Add(6);
+
+  // Create a sparse histogram for merge testing.
+  HistogramBase* histogram2 =
+      SparseHistogram::FactoryGet(SparseHistogramName, 0);
+  ASSERT_TRUE(histogram2);
+  EXPECT_EQ(2U, StatisticsRecorder::GetHistogramCount());
+  histogram2->Add(3);
+  histogram2->Add(1);
+  histogram2->Add(4);
+  histogram2->AddCount(1, 4);
+  histogram2->Add(6);
+
+  // Destroy the local SR and ensure that we're back to the initial state and
+  // restore the global allocator. Histograms created in the local SR will
+  // become unmanaged.
+  std::unique_ptr<GlobalHistogramAllocator> new_allocator =
+      GlobalHistogramAllocator::ReleaseForTesting();
+  local_sr.reset();
+  EXPECT_EQ(starting_sr_count, StatisticsRecorder::GetHistogramCount());
+  GlobalHistogramAllocator::Set(std::move(old_allocator));
+
+  // Create a "recovery" allocator using the same memory as the local one.
+  PersistentHistogramAllocator recovery1(
+      std::make_unique<PersistentMemoryAllocator>(
+          const_cast<void*>(new_allocator->memory_allocator()->data()),
+          new_allocator->memory_allocator()->size(), 0, 0, "", false));
+  PersistentHistogramAllocator::Iterator histogram_iter1(&recovery1);
+
+  // Get the histograms that were created locally (and forgotten) and merge
+  // them into the global SR. New objects will be created.
+  std::unique_ptr<HistogramBase> recovered;
+  while (true) {
+    recovered = histogram_iter1.GetNext();
+    if (!recovered)
+      break;
+
+    recovery1.MergeHistogramDeltaToStatisticsRecorder(recovered.get());
+    HistogramBase* found =
+        StatisticsRecorder::FindHistogram(recovered->histogram_name());
+    EXPECT_NE(recovered.get(), found);
+  };
+  EXPECT_EQ(starting_sr_count + 2, StatisticsRecorder::GetHistogramCount());
+
+  // Check the merged histograms for accuracy.
+  HistogramBase* found = StatisticsRecorder::FindHistogram(LinearHistogramName);
+  ASSERT_TRUE(found);
+  std::unique_ptr<HistogramSamples> snapshot = found->SnapshotSamples();
+  EXPECT_EQ(found->SnapshotSamples()->TotalCount(), snapshot->TotalCount());
+  EXPECT_EQ(1, snapshot->GetCount(3));
+  EXPECT_EQ(5, snapshot->GetCount(1));
+  EXPECT_EQ(1, snapshot->GetCount(4));
+  EXPECT_EQ(1, snapshot->GetCount(6));
+
+  found = StatisticsRecorder::FindHistogram(SparseHistogramName);
+  ASSERT_TRUE(found);
+  snapshot = found->SnapshotSamples();
+  EXPECT_EQ(found->SnapshotSamples()->TotalCount(), snapshot->TotalCount());
+  EXPECT_EQ(1, snapshot->GetCount(3));
+  EXPECT_EQ(5, snapshot->GetCount(1));
+  EXPECT_EQ(1, snapshot->GetCount(4));
+  EXPECT_EQ(1, snapshot->GetCount(6));
+
+  // Perform additional histogram increments.
+  histogram1->AddCount(1, 3);
+  histogram1->Add(6);
+  histogram2->AddCount(1, 3);
+  histogram2->Add(7);
+
+  // Do another merge.
+  PersistentHistogramAllocator recovery2(
+      std::make_unique<PersistentMemoryAllocator>(
+          const_cast<void*>(new_allocator->memory_allocator()->data()),
+          new_allocator->memory_allocator()->size(), 0, 0, "", false));
+  PersistentHistogramAllocator::Iterator histogram_iter2(&recovery2);
+  while (true) {
+    recovered = histogram_iter2.GetNext();
+    if (!recovered)
+      break;
+    recovery2.MergeHistogramDeltaToStatisticsRecorder(recovered.get());
+  };
+  EXPECT_EQ(starting_sr_count + 2, StatisticsRecorder::GetHistogramCount());
+
+  // And verify.
+  found = StatisticsRecorder::FindHistogram(LinearHistogramName);
+  snapshot = found->SnapshotSamples();
+  EXPECT_EQ(found->SnapshotSamples()->TotalCount(), snapshot->TotalCount());
+  EXPECT_EQ(1, snapshot->GetCount(3));
+  EXPECT_EQ(8, snapshot->GetCount(1));
+  EXPECT_EQ(1, snapshot->GetCount(4));
+  EXPECT_EQ(2, snapshot->GetCount(6));
+
+  found = StatisticsRecorder::FindHistogram(SparseHistogramName);
+  snapshot = found->SnapshotSamples();
+  EXPECT_EQ(found->SnapshotSamples()->TotalCount(), snapshot->TotalCount());
+  EXPECT_EQ(1, snapshot->GetCount(3));
+  EXPECT_EQ(8, snapshot->GetCount(1));
+  EXPECT_EQ(1, snapshot->GetCount(4));
+  EXPECT_EQ(1, snapshot->GetCount(6));
+  EXPECT_EQ(1, snapshot->GetCount(7));
+}
+
+TEST_F(PersistentHistogramAllocatorTest, RangesDeDuplication) {
+  // This corresponds to the "ranges_ref" field of the PersistentHistogramData
+  // structure defined (privately) inside persistent_histogram_allocator.cc.
+  const int kRangesRefIndex = 5;
+
+  // Create two histograms with the same ranges.
+  HistogramBase* histogram1 =
+      Histogram::FactoryGet("TestHistogram1", 1, 1000, 10, 0);
+  HistogramBase* histogram2 =
+      Histogram::FactoryGet("TestHistogram2", 1, 1000, 10, 0);
+  const uint32_t ranges_ref = static_cast<Histogram*>(histogram1)
+                                  ->bucket_ranges()
+                                  ->persistent_reference();
+  ASSERT_NE(0U, ranges_ref);
+  EXPECT_EQ(ranges_ref, static_cast<Histogram*>(histogram2)
+                            ->bucket_ranges()
+                            ->persistent_reference());
+
+  // Make sure that the persistent data record is also correct. Two histograms
+  // will be fetched; other allocations are not "iterable".
+  PersistentMemoryAllocator::Iterator iter(allocator_);
+  uint32_t type;
+  uint32_t ref1 = iter.GetNext(&type);
+  uint32_t ref2 = iter.GetNext(&type);
+  EXPECT_EQ(0U, iter.GetNext(&type));
+  EXPECT_NE(0U, ref1);
+  EXPECT_NE(0U, ref2);
+  EXPECT_NE(ref1, ref2);
+
+  uint32_t* data1 =
+      allocator_->GetAsArray<uint32_t>(ref1, 0, kRangesRefIndex + 1);
+  uint32_t* data2 =
+      allocator_->GetAsArray<uint32_t>(ref2, 0, kRangesRefIndex + 1);
+  EXPECT_EQ(ranges_ref, data1[kRangesRefIndex]);
+  EXPECT_EQ(ranges_ref, data2[kRangesRefIndex]);
+}
+
+}  // namespace base
diff --git a/base/metrics/persistent_histogram_storage.cc b/base/metrics/persistent_histogram_storage.cc
new file mode 100644
index 0000000..e2a56d7
--- /dev/null
+++ b/base/metrics/persistent_histogram_storage.cc
@@ -0,0 +1,103 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/metrics/persistent_histogram_storage.h"
+
+#include "base/files/file_util.h"
+#include "base/files/important_file_writer.h"
+#include "base/logging.h"
+#include "base/metrics/persistent_histogram_allocator.h"
+#include "base/metrics/persistent_memory_allocator.h"
+#include "base/strings/string_util.h"
+#include "base/strings/stringprintf.h"
+#include "base/time/time.h"
+#include "build/build_config.h"
+
+namespace {
+
+constexpr size_t kAllocSize = 1 << 20;  // 1 MiB
+
+}  // namespace
+
+namespace base {
+
+PersistentHistogramStorage::PersistentHistogramStorage(
+    StringPiece allocator_name,
+    StorageDirManagement storage_dir_management)
+    : storage_dir_management_(storage_dir_management) {
+  DCHECK(!allocator_name.empty());
+  DCHECK(IsStringASCII(allocator_name));
+
+  GlobalHistogramAllocator::CreateWithLocalMemory(kAllocSize,
+                                                  0,  // No identifier.
+                                                  allocator_name);
+  GlobalHistogramAllocator::Get()->CreateTrackingHistograms(allocator_name);
+}
+
+PersistentHistogramStorage::~PersistentHistogramStorage() {
+  PersistentHistogramAllocator* allocator = GlobalHistogramAllocator::Get();
+  allocator->UpdateTrackingHistograms();
+
+  // TODO(chengx): Investigate making early return depend on whethere there are
+  // metrics to report at this point or not.
+  if (disabled_)
+    return;
+
+  // Stop if the storage base directory has not been properly set.
+  if (storage_base_dir_.empty()) {
+    LOG(ERROR)
+        << "Could not write \"" << allocator->Name()
+        << "\" persistent histograms to file as the storage base directory "
+           "is not properly set.";
+    return;
+  }
+
+  FilePath storage_dir = storage_base_dir_.AppendASCII(allocator->Name());
+
+  switch (storage_dir_management_) {
+    case StorageDirManagement::kCreate:
+      if (!CreateDirectory(storage_dir)) {
+        LOG(ERROR)
+            << "Could not write \"" << allocator->Name()
+            << "\" persistent histograms to file as the storage directory "
+               "cannot be created.";
+        return;
+      }
+      break;
+    case StorageDirManagement::kUseExisting:
+      if (!DirectoryExists(storage_dir)) {
+        // When the consumer of this class decides to use an existing storage
+        // directory, it should ensure the directory's existence if it's
+        // essential.
+        LOG(ERROR)
+            << "Could not write \"" << allocator->Name()
+            << "\" persistent histograms to file as the storage directory "
+               "does not exist.";
+        return;
+      }
+      break;
+  }
+
+  // Save data using the current time as the filename. The actual filename
+  // doesn't matter (so long as it ends with the correct extension) but this
+  // works as well as anything.
+  Time::Exploded exploded;
+  Time::Now().LocalExplode(&exploded);
+  const FilePath file_path =
+      storage_dir
+          .AppendASCII(StringPrintf("%04d%02d%02d%02d%02d%02d", exploded.year,
+                                    exploded.month, exploded.day_of_month,
+                                    exploded.hour, exploded.minute,
+                                    exploded.second))
+          .AddExtension(PersistentMemoryAllocator::kFileExtension);
+
+  StringPiece contents(static_cast<const char*>(allocator->data()),
+                       allocator->used());
+  if (!ImportantFileWriter::WriteFileAtomically(file_path, contents)) {
+    LOG(ERROR) << "Persistent histograms fail to write to file: "
+               << file_path.value();
+  }
+}
+
+}  // namespace base
diff --git a/base/metrics/persistent_histogram_storage.h b/base/metrics/persistent_histogram_storage.h
new file mode 100644
index 0000000..397236d
--- /dev/null
+++ b/base/metrics/persistent_histogram_storage.h
@@ -0,0 +1,68 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_METRICS_PERSISTENT_HISTOGRAM_STORAGE_H_
+#define BASE_METRICS_PERSISTENT_HISTOGRAM_STORAGE_H_
+
+#include "base/base_export.h"
+#include "base/files/file_path.h"
+#include "base/macros.h"
+#include "base/strings/string_piece.h"
+
+namespace base {
+
+// This class creates a fixed sized persistent memory to allow histograms to be
+// stored in it. When a PersistentHistogramStorage is destructed, histograms
+// recorded during its lifetime are persisted in the directory
+// |storage_base_dir_|/|allocator_name| (see the ctor for allocator_name).
+// Histograms are not persisted if the storage directory does not exist on
+// destruction. PersistentHistogramStorage should be instantiated as early as
+// possible in the process lifetime and should never be instantiated again.
+// Persisted histograms will eventually be reported by Chrome.
+class BASE_EXPORT PersistentHistogramStorage {
+ public:
+  enum class StorageDirManagement { kCreate, kUseExisting };
+
+  // Creates a process-wide storage location for histograms that will be written
+  // to a file within a directory provided by |set_storage_base_dir()| on
+  // destruction.
+  // The |allocator_name| is used both as an internal name for the allocator,
+  // well as the leaf directory name for the file to which the histograms are
+  // persisted. The string must be ASCII.
+  // |storage_dir_management| specifies if this instance reuses an existing
+  // storage directory, or is responsible for creating one.
+  PersistentHistogramStorage(StringPiece allocator_name,
+                             StorageDirManagement storage_dir_management);
+
+  ~PersistentHistogramStorage();
+
+  // The storage directory isn't always known during initial construction so
+  // it's set separately. The last one wins if there are multiple calls to this
+  // method.
+  void set_storage_base_dir(const FilePath& storage_base_dir) {
+    storage_base_dir_ = storage_base_dir;
+  }
+
+  // Disables histogram storage.
+  void Disable() { disabled_ = true; }
+
+ private:
+  // Metrics files are written into directory
+  // |storage_base_dir_|/|allocator_name| (see the ctor for allocator_name).
+  FilePath storage_base_dir_;
+
+  // The setting of the storage directory management.
+  const StorageDirManagement storage_dir_management_;
+
+  // A flag indicating if histogram storage is disabled. It starts with false,
+  // but can be set to true by the caller who decides to throw away its
+  // histogram data.
+  bool disabled_ = false;
+
+  DISALLOW_COPY_AND_ASSIGN(PersistentHistogramStorage);
+};
+
+}  // namespace base
+
+#endif  // BASE_METRICS_PERSISTENT_HISTOGRAM_STORAGE_H_
diff --git a/base/metrics/persistent_histogram_storage_unittest.cc b/base/metrics/persistent_histogram_storage_unittest.cc
new file mode 100644
index 0000000..0b9b1ce
--- /dev/null
+++ b/base/metrics/persistent_histogram_storage_unittest.cc
@@ -0,0 +1,78 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/metrics/persistent_histogram_storage.h"
+
+#include <memory>
+
+#include "base/files/file_path.h"
+#include "base/files/file_util.h"
+#include "base/files/scoped_temp_dir.h"
+#include "base/metrics/histogram_macros.h"
+#include "base/time/time.h"
+#include "build/build_config.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+
+namespace {
+
+// Name of the allocator for storing histograms.
+constexpr char kTestHistogramAllocatorName[] = "TestMetrics";
+
+}  // namespace
+
+class PersistentHistogramStorageTest : public testing::Test {
+ protected:
+  PersistentHistogramStorageTest() = default;
+  ~PersistentHistogramStorageTest() override = default;
+
+  // Creates a unique temporary directory, and sets the test storage directory.
+  void SetUp() override {
+    ASSERT_TRUE(temp_dir_.CreateUniqueTempDir());
+    test_storage_dir_ =
+        temp_dir_path().AppendASCII(kTestHistogramAllocatorName);
+  }
+
+  // Gets the path to the temporary directory.
+  const FilePath& temp_dir_path() { return temp_dir_.GetPath(); }
+
+  const FilePath& test_storage_dir() { return test_storage_dir_; }
+
+ private:
+  // A temporary directory where all file IO operations take place.
+  ScopedTempDir temp_dir_;
+
+  // The directory into which metrics files are written.
+  FilePath test_storage_dir_;
+
+  DISALLOW_COPY_AND_ASSIGN(PersistentHistogramStorageTest);
+};
+
+// TODO(chengx): Re-enable the test on OS_IOS after issue 836789 is fixed.
+// PersistentHistogramStorage is only used on OS_WIN now, so disabling this
+// test on OS_IOS is fine.
+#if !defined(OS_NACL) && !defined(OS_IOS)
+TEST_F(PersistentHistogramStorageTest, HistogramWriteTest) {
+  auto persistent_histogram_storage =
+      std::make_unique<PersistentHistogramStorage>(
+          kTestHistogramAllocatorName,
+          PersistentHistogramStorage::StorageDirManagement::kCreate);
+
+  persistent_histogram_storage->set_storage_base_dir(temp_dir_path());
+
+  // Log some random data.
+  UMA_HISTOGRAM_BOOLEAN("Some.Test.Metric", true);
+
+  // Deleting the object causes the data to be written to the disk.
+  persistent_histogram_storage.reset();
+
+  // The storage directory and the histogram file are created during the
+  // destruction of the PersistentHistogramStorage instance.
+  EXPECT_TRUE(DirectoryExists(test_storage_dir()));
+  EXPECT_FALSE(IsDirectoryEmpty(test_storage_dir()));
+}
+#endif  // !defined(OS_NACL) && !defined(OS_IOS)
+
+}  // namespace base
diff --git a/base/metrics/persistent_memory_allocator.cc b/base/metrics/persistent_memory_allocator.cc
new file mode 100644
index 0000000..9b18a00
--- /dev/null
+++ b/base/metrics/persistent_memory_allocator.cc
@@ -0,0 +1,1204 @@
+// Copyright (c) 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/metrics/persistent_memory_allocator.h"
+
+#include <assert.h>
+#include <algorithm>
+
+#if defined(OS_WIN)
+#include <windows.h>
+#include "winbase.h"
+#elif defined(OS_POSIX) || defined(OS_FUCHSIA)
+#include <sys/mman.h>
+#endif
+
+#include "base/files/memory_mapped_file.h"
+#include "base/logging.h"
+#include "base/memory/shared_memory.h"
+#include "base/metrics/histogram_functions.h"
+#include "base/metrics/sparse_histogram.h"
+#include "base/numerics/safe_conversions.h"
+#include "base/sys_info.h"
+#include "base/threading/thread_restrictions.h"
+#include "build/build_config.h"
+
+namespace {
+
+// Limit of memory segment size. It has to fit in an unsigned 32-bit number
+// and should be a power of 2 in order to accomodate almost any page size.
+const uint32_t kSegmentMaxSize = 1 << 30;  // 1 GiB
+
+// A constant (random) value placed in the shared metadata to identify
+// an already initialized memory segment.
+const uint32_t kGlobalCookie = 0x408305DC;
+
+// The current version of the metadata. If updates are made that change
+// the metadata, the version number can be queried to operate in a backward-
+// compatible manner until the memory segment is completely re-initalized.
+const uint32_t kGlobalVersion = 2;
+
+// Constant values placed in the block headers to indicate its state.
+const uint32_t kBlockCookieFree = 0;
+const uint32_t kBlockCookieQueue = 1;
+const uint32_t kBlockCookieWasted = (uint32_t)-1;
+const uint32_t kBlockCookieAllocated = 0xC8799269;
+
+// TODO(bcwhite): When acceptable, consider moving flags to std::atomic<char>
+// types rather than combined bitfield.
+
+// Flags stored in the flags_ field of the SharedMetadata structure below.
+enum : int {
+  kFlagCorrupt = 1 << 0,
+  kFlagFull    = 1 << 1
+};
+
+// Errors that are logged in "errors" histogram.
+enum AllocatorError : int {
+  kMemoryIsCorrupt = 1,
+};
+
+bool CheckFlag(const volatile std::atomic<uint32_t>* flags, int flag) {
+  uint32_t loaded_flags = flags->load(std::memory_order_relaxed);
+  return (loaded_flags & flag) != 0;
+}
+
+void SetFlag(volatile std::atomic<uint32_t>* flags, int flag) {
+  uint32_t loaded_flags = flags->load(std::memory_order_relaxed);
+  for (;;) {
+    uint32_t new_flags = (loaded_flags & ~flag) | flag;
+    // In the failue case, actual "flags" value stored in loaded_flags.
+    // These access are "relaxed" because they are completely independent
+    // of all other values.
+    if (flags->compare_exchange_weak(loaded_flags, new_flags,
+                                     std::memory_order_relaxed,
+                                     std::memory_order_relaxed)) {
+      break;
+    }
+  }
+}
+
+}  // namespace
+
+namespace base {
+
+// All allocations and data-structures must be aligned to this byte boundary.
+// Alignment as large as the physical bus between CPU and RAM is _required_
+// for some architectures, is simply more efficient on other CPUs, and
+// generally a Good Idea(tm) for all platforms as it reduces/eliminates the
+// chance that a type will span cache lines. Alignment mustn't be less
+// than 8 to ensure proper alignment for all types. The rest is a balance
+// between reducing spans across multiple cache lines and wasted space spent
+// padding out allocations. An alignment of 16 would ensure that the block
+// header structure always sits in a single cache line. An average of about
+// 1/2 this value will be wasted with every allocation.
+const uint32_t PersistentMemoryAllocator::kAllocAlignment = 8;
+
+// The block-header is placed at the top of every allocation within the
+// segment to describe the data that follows it.
+struct PersistentMemoryAllocator::BlockHeader {
+  uint32_t size;       // Number of bytes in this block, including header.
+  uint32_t cookie;     // Constant value indicating completed allocation.
+  std::atomic<uint32_t> type_id;  // Arbitrary number indicating data type.
+  std::atomic<uint32_t> next;     // Pointer to the next block when iterating.
+};
+
+// The shared metadata exists once at the top of the memory segment to
+// describe the state of the allocator to all processes. The size of this
+// structure must be a multiple of 64-bits to ensure compatibility between
+// architectures.
+struct PersistentMemoryAllocator::SharedMetadata {
+  uint32_t cookie;     // Some value that indicates complete initialization.
+  uint32_t size;       // Total size of memory segment.
+  uint32_t page_size;  // Paging size within memory segment.
+  uint32_t version;    // Version code so upgrades don't break.
+  uint64_t id;         // Arbitrary ID number given by creator.
+  uint32_t name;       // Reference to stored name string.
+  uint32_t padding1;   // Pad-out read-only data to 64-bit alignment.
+
+  // Above is read-only after first construction. Below may be changed and
+  // so must be marked "volatile" to provide correct inter-process behavior.
+
+  // State of the memory, plus some padding to keep alignment.
+  volatile std::atomic<uint8_t> memory_state;  // MemoryState enum values.
+  uint8_t padding2[3];
+
+  // Bitfield of information flags. Access to this should be done through
+  // the CheckFlag() and SetFlag() methods defined above.
+  volatile std::atomic<uint32_t> flags;
+
+  // Offset/reference to first free space in segment.
+  volatile std::atomic<uint32_t> freeptr;
+
+  // The "iterable" queue is an M&S Queue as described here, append-only:
+  // https://www.research.ibm.com/people/m/michael/podc-1996.pdf
+  // |queue| needs to be 64-bit aligned and is itself a multiple of 64 bits.
+  volatile std::atomic<uint32_t> tailptr;  // Last block of iteration queue.
+  volatile BlockHeader queue;   // Empty block for linked-list head/tail.
+};
+
+// The "queue" block header is used to detect "last node" so that zero/null
+// can be used to indicate that it hasn't been added at all. It is part of
+// the SharedMetadata structure which itself is always located at offset zero.
+const PersistentMemoryAllocator::Reference
+    PersistentMemoryAllocator::kReferenceQueue =
+        offsetof(SharedMetadata, queue);
+
+const base::FilePath::CharType PersistentMemoryAllocator::kFileExtension[] =
+    FILE_PATH_LITERAL(".pma");
+
+
+PersistentMemoryAllocator::Iterator::Iterator(
+    const PersistentMemoryAllocator* allocator)
+    : allocator_(allocator), last_record_(kReferenceQueue), record_count_(0) {}
+
+PersistentMemoryAllocator::Iterator::Iterator(
+    const PersistentMemoryAllocator* allocator,
+    Reference starting_after)
+    : allocator_(allocator), last_record_(0), record_count_(0) {
+  Reset(starting_after);
+}
+
+void PersistentMemoryAllocator::Iterator::Reset() {
+  last_record_.store(kReferenceQueue, std::memory_order_relaxed);
+  record_count_.store(0, std::memory_order_relaxed);
+}
+
+void PersistentMemoryAllocator::Iterator::Reset(Reference starting_after) {
+  if (starting_after == 0) {
+    Reset();
+    return;
+  }
+
+  last_record_.store(starting_after, std::memory_order_relaxed);
+  record_count_.store(0, std::memory_order_relaxed);
+
+  // Ensure that the starting point is a valid, iterable block (meaning it can
+  // be read and has a non-zero "next" pointer).
+  const volatile BlockHeader* block =
+      allocator_->GetBlock(starting_after, 0, 0, false, false);
+  if (!block || block->next.load(std::memory_order_relaxed) == 0) {
+    NOTREACHED();
+    last_record_.store(kReferenceQueue, std::memory_order_release);
+  }
+}
+
+PersistentMemoryAllocator::Reference
+PersistentMemoryAllocator::Iterator::GetLast() {
+  Reference last = last_record_.load(std::memory_order_relaxed);
+  if (last == kReferenceQueue)
+    return kReferenceNull;
+  return last;
+}
+
+PersistentMemoryAllocator::Reference
+PersistentMemoryAllocator::Iterator::GetNext(uint32_t* type_return) {
+  // Make a copy of the existing count of found-records, acquiring all changes
+  // made to the allocator, notably "freeptr" (see comment in loop for why
+  // the load of that value cannot be moved above here) that occurred during
+  // any previous runs of this method, including those by parallel threads
+  // that interrupted it. It pairs with the Release at the end of this method.
+  //
+  // Otherwise, if the compiler were to arrange the two loads such that
+  // "count" was fetched _after_ "freeptr" then it would be possible for
+  // this thread to be interrupted between them and other threads perform
+  // multiple allocations, make-iterables, and iterations (with the included
+  // increment of |record_count_|) culminating in the check at the bottom
+  // mistakenly determining that a loop exists. Isn't this stuff fun?
+  uint32_t count = record_count_.load(std::memory_order_acquire);
+
+  Reference last = last_record_.load(std::memory_order_acquire);
+  Reference next;
+  while (true) {
+    const volatile BlockHeader* block =
+        allocator_->GetBlock(last, 0, 0, true, false);
+    if (!block)  // Invalid iterator state.
+      return kReferenceNull;
+
+    // The compiler and CPU can freely reorder all memory accesses on which
+    // there are no dependencies. It could, for example, move the load of
+    // "freeptr" to above this point because there are no explicit dependencies
+    // between it and "next". If it did, however, then another block could
+    // be queued after that but before the following load meaning there is
+    // one more queued block than the future "detect loop by having more
+    // blocks that could fit before freeptr" will allow.
+    //
+    // By "acquiring" the "next" value here, it's synchronized to the enqueue
+    // of the node which in turn is synchronized to the allocation (which sets
+    // freeptr). Thus, the scenario above cannot happen.
+    next = block->next.load(std::memory_order_acquire);
+    if (next == kReferenceQueue)  // No next allocation in queue.
+      return kReferenceNull;
+    block = allocator_->GetBlock(next, 0, 0, false, false);
+    if (!block) {  // Memory is corrupt.
+      allocator_->SetCorrupt();
+      return kReferenceNull;
+    }
+
+    // Update the "last_record" pointer to be the reference being returned.
+    // If it fails then another thread has already iterated past it so loop
+    // again. Failing will also load the existing value into "last" so there
+    // is no need to do another such load when the while-loop restarts. A
+    // "strong" compare-exchange is used because failing unnecessarily would
+    // mean repeating some fairly costly validations above.
+    if (last_record_.compare_exchange_strong(
+            last, next, std::memory_order_acq_rel, std::memory_order_acquire)) {
+      *type_return = block->type_id.load(std::memory_order_relaxed);
+      break;
+    }
+  }
+
+  // Memory corruption could cause a loop in the list. Such must be detected
+  // so as to not cause an infinite loop in the caller. This is done by simply
+  // making sure it doesn't iterate more times than the absolute maximum
+  // number of allocations that could have been made. Callers are likely
+  // to loop multiple times before it is detected but at least it stops.
+  const uint32_t freeptr = std::min(
+      allocator_->shared_meta()->freeptr.load(std::memory_order_relaxed),
+      allocator_->mem_size_);
+  const uint32_t max_records =
+      freeptr / (sizeof(BlockHeader) + kAllocAlignment);
+  if (count > max_records) {
+    allocator_->SetCorrupt();
+    return kReferenceNull;
+  }
+
+  // Increment the count and release the changes made above. It pairs with
+  // the Acquire at the top of this method. Note that this operation is not
+  // strictly synchonized with fetching of the object to return, which would
+  // have to be done inside the loop and is somewhat complicated to achieve.
+  // It does not matter if it falls behind temporarily so long as it never
+  // gets ahead.
+  record_count_.fetch_add(1, std::memory_order_release);
+  return next;
+}
+
+PersistentMemoryAllocator::Reference
+PersistentMemoryAllocator::Iterator::GetNextOfType(uint32_t type_match) {
+  Reference ref;
+  uint32_t type_found;
+  while ((ref = GetNext(&type_found)) != 0) {
+    if (type_found == type_match)
+      return ref;
+  }
+  return kReferenceNull;
+}
+
+
+// static
+bool PersistentMemoryAllocator::IsMemoryAcceptable(const void* base,
+                                                   size_t size,
+                                                   size_t page_size,
+                                                   bool readonly) {
+  return ((base && reinterpret_cast<uintptr_t>(base) % kAllocAlignment == 0) &&
+          (size >= sizeof(SharedMetadata) && size <= kSegmentMaxSize) &&
+          (size % kAllocAlignment == 0 || readonly) &&
+          (page_size == 0 || size % page_size == 0 || readonly));
+}
+
+PersistentMemoryAllocator::PersistentMemoryAllocator(void* base,
+                                                     size_t size,
+                                                     size_t page_size,
+                                                     uint64_t id,
+                                                     base::StringPiece name,
+                                                     bool readonly)
+    : PersistentMemoryAllocator(Memory(base, MEM_EXTERNAL),
+                                size,
+                                page_size,
+                                id,
+                                name,
+                                readonly) {}
+
+PersistentMemoryAllocator::PersistentMemoryAllocator(Memory memory,
+                                                     size_t size,
+                                                     size_t page_size,
+                                                     uint64_t id,
+                                                     base::StringPiece name,
+                                                     bool readonly)
+    : mem_base_(static_cast<char*>(memory.base)),
+      mem_type_(memory.type),
+      mem_size_(static_cast<uint32_t>(size)),
+      mem_page_(static_cast<uint32_t>((page_size ? page_size : size))),
+#if defined(OS_NACL)
+      vm_page_size_(4096U),  // SysInfo is not built for NACL.
+#else
+      vm_page_size_(SysInfo::VMAllocationGranularity()),
+#endif
+      readonly_(readonly),
+      corrupt_(0),
+      allocs_histogram_(nullptr),
+      used_histogram_(nullptr),
+      errors_histogram_(nullptr) {
+  // These asserts ensure that the structures are 32/64-bit agnostic and meet
+  // all the requirements of use within the allocator. They access private
+  // definitions and so cannot be moved to the global scope.
+  static_assert(sizeof(PersistentMemoryAllocator::BlockHeader) == 16,
+                "struct is not portable across different natural word widths");
+  static_assert(sizeof(PersistentMemoryAllocator::SharedMetadata) == 64,
+                "struct is not portable across different natural word widths");
+
+  static_assert(sizeof(BlockHeader) % kAllocAlignment == 0,
+                "BlockHeader is not a multiple of kAllocAlignment");
+  static_assert(sizeof(SharedMetadata) % kAllocAlignment == 0,
+                "SharedMetadata is not a multiple of kAllocAlignment");
+  static_assert(kReferenceQueue % kAllocAlignment == 0,
+                "\"queue\" is not aligned properly; must be at end of struct");
+
+  // Ensure that memory segment is of acceptable size.
+  CHECK(IsMemoryAcceptable(memory.base, size, page_size, readonly));
+
+  // These atomics operate inter-process and so must be lock-free. The local
+  // casts are to make sure it can be evaluated at compile time to a constant.
+  CHECK(((SharedMetadata*)nullptr)->freeptr.is_lock_free());
+  CHECK(((SharedMetadata*)nullptr)->flags.is_lock_free());
+  CHECK(((BlockHeader*)nullptr)->next.is_lock_free());
+  CHECK(corrupt_.is_lock_free());
+
+  if (shared_meta()->cookie != kGlobalCookie) {
+    if (readonly) {
+      SetCorrupt();
+      return;
+    }
+
+    // This block is only executed when a completely new memory segment is
+    // being initialized. It's unshared and single-threaded...
+    volatile BlockHeader* const first_block =
+        reinterpret_cast<volatile BlockHeader*>(mem_base_ +
+                                                sizeof(SharedMetadata));
+    if (shared_meta()->cookie != 0 ||
+        shared_meta()->size != 0 ||
+        shared_meta()->version != 0 ||
+        shared_meta()->freeptr.load(std::memory_order_relaxed) != 0 ||
+        shared_meta()->flags.load(std::memory_order_relaxed) != 0 ||
+        shared_meta()->id != 0 ||
+        shared_meta()->name != 0 ||
+        shared_meta()->tailptr != 0 ||
+        shared_meta()->queue.cookie != 0 ||
+        shared_meta()->queue.next.load(std::memory_order_relaxed) != 0 ||
+        first_block->size != 0 ||
+        first_block->cookie != 0 ||
+        first_block->type_id.load(std::memory_order_relaxed) != 0 ||
+        first_block->next != 0) {
+      // ...or something malicious has been playing with the metadata.
+      SetCorrupt();
+    }
+
+    // This is still safe to do even if corruption has been detected.
+    shared_meta()->cookie = kGlobalCookie;
+    shared_meta()->size = mem_size_;
+    shared_meta()->page_size = mem_page_;
+    shared_meta()->version = kGlobalVersion;
+    shared_meta()->id = id;
+    shared_meta()->freeptr.store(sizeof(SharedMetadata),
+                                 std::memory_order_release);
+
+    // Set up the queue of iterable allocations.
+    shared_meta()->queue.size = sizeof(BlockHeader);
+    shared_meta()->queue.cookie = kBlockCookieQueue;
+    shared_meta()->queue.next.store(kReferenceQueue, std::memory_order_release);
+    shared_meta()->tailptr.store(kReferenceQueue, std::memory_order_release);
+
+    // Allocate space for the name so other processes can learn it.
+    if (!name.empty()) {
+      const size_t name_length = name.length() + 1;
+      shared_meta()->name = Allocate(name_length, 0);
+      char* name_cstr = GetAsArray<char>(shared_meta()->name, 0, name_length);
+      if (name_cstr)
+        memcpy(name_cstr, name.data(), name.length());
+    }
+
+    shared_meta()->memory_state.store(MEMORY_INITIALIZED,
+                                      std::memory_order_release);
+  } else {
+    if (shared_meta()->size == 0 || shared_meta()->version != kGlobalVersion ||
+        shared_meta()->freeptr.load(std::memory_order_relaxed) == 0 ||
+        shared_meta()->tailptr == 0 || shared_meta()->queue.cookie == 0 ||
+        shared_meta()->queue.next.load(std::memory_order_relaxed) == 0) {
+      SetCorrupt();
+    }
+    if (!readonly) {
+      // The allocator is attaching to a previously initialized segment of
+      // memory. If the initialization parameters differ, make the best of it
+      // by reducing the local construction parameters to match those of
+      // the actual memory area. This ensures that the local object never
+      // tries to write outside of the original bounds.
+      // Because the fields are const to ensure that no code other than the
+      // constructor makes changes to them as well as to give optimization
+      // hints to the compiler, it's necessary to const-cast them for changes
+      // here.
+      if (shared_meta()->size < mem_size_)
+        *const_cast<uint32_t*>(&mem_size_) = shared_meta()->size;
+      if (shared_meta()->page_size < mem_page_)
+        *const_cast<uint32_t*>(&mem_page_) = shared_meta()->page_size;
+
+      // Ensure that settings are still valid after the above adjustments.
+      if (!IsMemoryAcceptable(memory.base, mem_size_, mem_page_, readonly))
+        SetCorrupt();
+    }
+  }
+}
+
+PersistentMemoryAllocator::~PersistentMemoryAllocator() {
+  // It's strictly forbidden to do any memory access here in case there is
+  // some issue with the underlying memory segment. The "Local" allocator
+  // makes use of this to allow deletion of the segment on the heap from
+  // within its destructor.
+}
+
+uint64_t PersistentMemoryAllocator::Id() const {
+  return shared_meta()->id;
+}
+
+const char* PersistentMemoryAllocator::Name() const {
+  Reference name_ref = shared_meta()->name;
+  const char* name_cstr =
+      GetAsArray<char>(name_ref, 0, PersistentMemoryAllocator::kSizeAny);
+  if (!name_cstr)
+    return "";
+
+  size_t name_length = GetAllocSize(name_ref);
+  if (name_cstr[name_length - 1] != '\0') {
+    NOTREACHED();
+    SetCorrupt();
+    return "";
+  }
+
+  return name_cstr;
+}
+
+void PersistentMemoryAllocator::CreateTrackingHistograms(
+    base::StringPiece name) {
+  if (name.empty() || readonly_)
+    return;
+  std::string name_string = name.as_string();
+
+#if 0
+  // This histogram wasn't being used so has been disabled. It is left here
+  // in case development of a new use of the allocator could benefit from
+  // recording (temporarily and locally) the allocation sizes.
+  DCHECK(!allocs_histogram_);
+  allocs_histogram_ = Histogram::FactoryGet(
+      "UMA.PersistentAllocator." + name_string + ".Allocs", 1, 10000, 50,
+      HistogramBase::kUmaTargetedHistogramFlag);
+#endif
+
+  DCHECK(!used_histogram_);
+  used_histogram_ = LinearHistogram::FactoryGet(
+      "UMA.PersistentAllocator." + name_string + ".UsedPct", 1, 101, 21,
+      HistogramBase::kUmaTargetedHistogramFlag);
+
+  DCHECK(!errors_histogram_);
+  errors_histogram_ = SparseHistogram::FactoryGet(
+      "UMA.PersistentAllocator." + name_string + ".Errors",
+      HistogramBase::kUmaTargetedHistogramFlag);
+}
+
+void PersistentMemoryAllocator::Flush(bool sync) {
+  FlushPartial(used(), sync);
+}
+
+void PersistentMemoryAllocator::SetMemoryState(uint8_t memory_state) {
+  shared_meta()->memory_state.store(memory_state, std::memory_order_relaxed);
+  FlushPartial(sizeof(SharedMetadata), false);
+}
+
+uint8_t PersistentMemoryAllocator::GetMemoryState() const {
+  return shared_meta()->memory_state.load(std::memory_order_relaxed);
+}
+
+size_t PersistentMemoryAllocator::used() const {
+  return std::min(shared_meta()->freeptr.load(std::memory_order_relaxed),
+                  mem_size_);
+}
+
+PersistentMemoryAllocator::Reference PersistentMemoryAllocator::GetAsReference(
+    const void* memory,
+    uint32_t type_id) const {
+  uintptr_t address = reinterpret_cast<uintptr_t>(memory);
+  if (address < reinterpret_cast<uintptr_t>(mem_base_))
+    return kReferenceNull;
+
+  uintptr_t offset = address - reinterpret_cast<uintptr_t>(mem_base_);
+  if (offset >= mem_size_ || offset < sizeof(BlockHeader))
+    return kReferenceNull;
+
+  Reference ref = static_cast<Reference>(offset) - sizeof(BlockHeader);
+  if (!GetBlockData(ref, type_id, kSizeAny))
+    return kReferenceNull;
+
+  return ref;
+}
+
+size_t PersistentMemoryAllocator::GetAllocSize(Reference ref) const {
+  const volatile BlockHeader* const block = GetBlock(ref, 0, 0, false, false);
+  if (!block)
+    return 0;
+  uint32_t size = block->size;
+  // Header was verified by GetBlock() but a malicious actor could change
+  // the value between there and here. Check it again.
+  if (size <= sizeof(BlockHeader) || ref + size > mem_size_) {
+    SetCorrupt();
+    return 0;
+  }
+  return size - sizeof(BlockHeader);
+}
+
+uint32_t PersistentMemoryAllocator::GetType(Reference ref) const {
+  const volatile BlockHeader* const block = GetBlock(ref, 0, 0, false, false);
+  if (!block)
+    return 0;
+  return block->type_id.load(std::memory_order_relaxed);
+}
+
+bool PersistentMemoryAllocator::ChangeType(Reference ref,
+                                           uint32_t to_type_id,
+                                           uint32_t from_type_id,
+                                           bool clear) {
+  DCHECK(!readonly_);
+  volatile BlockHeader* const block = GetBlock(ref, 0, 0, false, false);
+  if (!block)
+    return false;
+
+  // "Strong" exchanges are used below because there is no loop that can retry
+  // in the wake of spurious failures possible with "weak" exchanges. It is,
+  // in aggregate, an "acquire-release" operation so no memory accesses can be
+  // reordered either before or after this method (since changes based on type
+  // could happen on either side).
+
+  if (clear) {
+    // If clearing the memory, first change it to the "transitioning" type so
+    // there can be no confusion by other threads. After the memory is cleared,
+    // it can be changed to its final type.
+    if (!block->type_id.compare_exchange_strong(
+            from_type_id, kTypeIdTransitioning, std::memory_order_acquire,
+            std::memory_order_acquire)) {
+      // Existing type wasn't what was expected: fail (with no changes)
+      return false;
+    }
+
+    // Clear the memory in an atomic manner. Using "release" stores force
+    // every write to be done after the ones before it. This is better than
+    // using memset because (a) it supports "volatile" and (b) it creates a
+    // reliable pattern upon which other threads may rely.
+    volatile std::atomic<int>* data =
+        reinterpret_cast<volatile std::atomic<int>*>(
+            reinterpret_cast<volatile char*>(block) + sizeof(BlockHeader));
+    const uint32_t words = (block->size - sizeof(BlockHeader)) / sizeof(int);
+    DCHECK_EQ(0U, (block->size - sizeof(BlockHeader)) % sizeof(int));
+    for (uint32_t i = 0; i < words; ++i) {
+      data->store(0, std::memory_order_release);
+      ++data;
+    }
+
+    // If the destination type is "transitioning" then skip the final exchange.
+    if (to_type_id == kTypeIdTransitioning)
+      return true;
+
+    // Finish the change to the desired type.
+    from_type_id = kTypeIdTransitioning;  // Exchange needs modifiable original.
+    bool success = block->type_id.compare_exchange_strong(
+        from_type_id, to_type_id, std::memory_order_release,
+        std::memory_order_relaxed);
+    DCHECK(success);  // Should never fail.
+    return success;
+  }
+
+  // One step change to the new type. Will return false if the existing value
+  // doesn't match what is expected.
+  return block->type_id.compare_exchange_strong(from_type_id, to_type_id,
+                                                std::memory_order_acq_rel,
+                                                std::memory_order_acquire);
+}
+
+PersistentMemoryAllocator::Reference PersistentMemoryAllocator::Allocate(
+    size_t req_size,
+    uint32_t type_id) {
+  Reference ref = AllocateImpl(req_size, type_id);
+  if (ref) {
+    // Success: Record this allocation in usage stats (if active).
+    if (allocs_histogram_)
+      allocs_histogram_->Add(static_cast<HistogramBase::Sample>(req_size));
+  } else {
+    // Failure: Record an allocation of zero for tracking.
+    if (allocs_histogram_)
+      allocs_histogram_->Add(0);
+  }
+  return ref;
+}
+
+PersistentMemoryAllocator::Reference PersistentMemoryAllocator::AllocateImpl(
+    size_t req_size,
+    uint32_t type_id) {
+  DCHECK(!readonly_);
+
+  // Validate req_size to ensure it won't overflow when used as 32-bit value.
+  if (req_size > kSegmentMaxSize - sizeof(BlockHeader)) {
+    NOTREACHED();
+    return kReferenceNull;
+  }
+
+  // Round up the requested size, plus header, to the next allocation alignment.
+  uint32_t size = static_cast<uint32_t>(req_size + sizeof(BlockHeader));
+  size = (size + (kAllocAlignment - 1)) & ~(kAllocAlignment - 1);
+  if (size <= sizeof(BlockHeader) || size > mem_page_) {
+    NOTREACHED();
+    return kReferenceNull;
+  }
+
+  // Get the current start of unallocated memory. Other threads may
+  // update this at any time and cause us to retry these operations.
+  // This value should be treated as "const" to avoid confusion through
+  // the code below but recognize that any failed compare-exchange operation
+  // involving it will cause it to be loaded with a more recent value. The
+  // code should either exit or restart the loop in that case.
+  /* const */ uint32_t freeptr =
+      shared_meta()->freeptr.load(std::memory_order_acquire);
+
+  // Allocation is lockless so we do all our caculation and then, if saving
+  // indicates a change has occurred since we started, scrap everything and
+  // start over.
+  for (;;) {
+    if (IsCorrupt())
+      return kReferenceNull;
+
+    if (freeptr + size > mem_size_) {
+      SetFlag(&shared_meta()->flags, kFlagFull);
+      return kReferenceNull;
+    }
+
+    // Get pointer to the "free" block. If something has been allocated since
+    // the load of freeptr above, it is still safe as nothing will be written
+    // to that location until after the compare-exchange below.
+    volatile BlockHeader* const block = GetBlock(freeptr, 0, 0, false, true);
+    if (!block) {
+      SetCorrupt();
+      return kReferenceNull;
+    }
+
+    // An allocation cannot cross page boundaries. If it would, create a
+    // "wasted" block and begin again at the top of the next page. This
+    // area could just be left empty but we fill in the block header just
+    // for completeness sake.
+    const uint32_t page_free = mem_page_ - freeptr % mem_page_;
+    if (size > page_free) {
+      if (page_free <= sizeof(BlockHeader)) {
+        SetCorrupt();
+        return kReferenceNull;
+      }
+      const uint32_t new_freeptr = freeptr + page_free;
+      if (shared_meta()->freeptr.compare_exchange_strong(
+              freeptr, new_freeptr, std::memory_order_acq_rel,
+              std::memory_order_acquire)) {
+        block->size = page_free;
+        block->cookie = kBlockCookieWasted;
+      }
+      continue;
+    }
+
+    // Don't leave a slice at the end of a page too small for anything. This
+    // can result in an allocation up to two alignment-sizes greater than the
+    // minimum required by requested-size + header + alignment.
+    if (page_free - size < sizeof(BlockHeader) + kAllocAlignment)
+      size = page_free;
+
+    const uint32_t new_freeptr = freeptr + size;
+    if (new_freeptr > mem_size_) {
+      SetCorrupt();
+      return kReferenceNull;
+    }
+
+    // Save our work. Try again if another thread has completed an allocation
+    // while we were processing. A "weak" exchange would be permissable here
+    // because the code will just loop and try again but the above processing
+    // is significant so make the extra effort of a "strong" exchange.
+    if (!shared_meta()->freeptr.compare_exchange_strong(
+            freeptr, new_freeptr, std::memory_order_acq_rel,
+            std::memory_order_acquire)) {
+      continue;
+    }
+
+    // Given that all memory was zeroed before ever being given to an instance
+    // of this class and given that we only allocate in a monotomic fashion
+    // going forward, it must be that the newly allocated block is completely
+    // full of zeros. If we find anything in the block header that is NOT a
+    // zero then something must have previously run amuck through memory,
+    // writing beyond the allocated space and into unallocated space.
+    if (block->size != 0 ||
+        block->cookie != kBlockCookieFree ||
+        block->type_id.load(std::memory_order_relaxed) != 0 ||
+        block->next.load(std::memory_order_relaxed) != 0) {
+      SetCorrupt();
+      return kReferenceNull;
+    }
+
+    // Make sure the memory exists by writing to the first byte of every memory
+    // page it touches beyond the one containing the block header itself.
+    // As the underlying storage is often memory mapped from disk or shared
+    // space, sometimes things go wrong and those address don't actually exist
+    // leading to a SIGBUS (or Windows equivalent) at some arbitrary location
+    // in the code. This should concentrate all those failures into this
+    // location for easy tracking and, eventually, proper handling.
+    volatile char* mem_end = reinterpret_cast<volatile char*>(block) + size;
+    volatile char* mem_begin = reinterpret_cast<volatile char*>(
+        (reinterpret_cast<uintptr_t>(block) + sizeof(BlockHeader) +
+         (vm_page_size_ - 1)) &
+        ~static_cast<uintptr_t>(vm_page_size_ - 1));
+    for (volatile char* memory = mem_begin; memory < mem_end;
+         memory += vm_page_size_) {
+      // It's required that a memory segment start as all zeros and thus the
+      // newly allocated block is all zeros at this point. Thus, writing a
+      // zero to it allows testing that the memory exists without actually
+      // changing its contents. The compiler doesn't know about the requirement
+      // and so cannot optimize-away these writes.
+      *memory = 0;
+    }
+
+    // Load information into the block header. There is no "release" of the
+    // data here because this memory can, currently, be seen only by the thread
+    // performing the allocation. When it comes time to share this, the thread
+    // will call MakeIterable() which does the release operation.
+    block->size = size;
+    block->cookie = kBlockCookieAllocated;
+    block->type_id.store(type_id, std::memory_order_relaxed);
+    return freeptr;
+  }
+}
+
+void PersistentMemoryAllocator::GetMemoryInfo(MemoryInfo* meminfo) const {
+  uint32_t remaining = std::max(
+      mem_size_ - shared_meta()->freeptr.load(std::memory_order_relaxed),
+      (uint32_t)sizeof(BlockHeader));
+  meminfo->total = mem_size_;
+  meminfo->free = remaining - sizeof(BlockHeader);
+}
+
+void PersistentMemoryAllocator::MakeIterable(Reference ref) {
+  DCHECK(!readonly_);
+  if (IsCorrupt())
+    return;
+  volatile BlockHeader* block = GetBlock(ref, 0, 0, false, false);
+  if (!block)  // invalid reference
+    return;
+  if (block->next.load(std::memory_order_acquire) != 0)  // Already iterable.
+    return;
+  block->next.store(kReferenceQueue, std::memory_order_release);  // New tail.
+
+  // Try to add this block to the tail of the queue. May take multiple tries.
+  // If so, tail will be automatically updated with a more recent value during
+  // compare-exchange operations.
+  uint32_t tail = shared_meta()->tailptr.load(std::memory_order_acquire);
+  for (;;) {
+    // Acquire the current tail-pointer released by previous call to this
+    // method and validate it.
+    block = GetBlock(tail, 0, 0, true, false);
+    if (!block) {
+      SetCorrupt();
+      return;
+    }
+
+    // Try to insert the block at the tail of the queue. The tail node always
+    // has an existing value of kReferenceQueue; if that is somehow not the
+    // existing value then another thread has acted in the meantime. A "strong"
+    // exchange is necessary so the "else" block does not get executed when
+    // that is not actually the case (which can happen with a "weak" exchange).
+    uint32_t next = kReferenceQueue;  // Will get replaced with existing value.
+    if (block->next.compare_exchange_strong(next, ref,
+                                            std::memory_order_acq_rel,
+                                            std::memory_order_acquire)) {
+      // Update the tail pointer to the new offset. If the "else" clause did
+      // not exist, then this could be a simple Release_Store to set the new
+      // value but because it does, it's possible that other threads could add
+      // one or more nodes at the tail before reaching this point. We don't
+      // have to check the return value because it either operates correctly
+      // or the exact same operation has already been done (by the "else"
+      // clause) on some other thread.
+      shared_meta()->tailptr.compare_exchange_strong(tail, ref,
+                                                     std::memory_order_release,
+                                                     std::memory_order_relaxed);
+      return;
+    } else {
+      // In the unlikely case that a thread crashed or was killed between the
+      // update of "next" and the update of "tailptr", it is necessary to
+      // perform the operation that would have been done. There's no explicit
+      // check for crash/kill which means that this operation may also happen
+      // even when the other thread is in perfect working order which is what
+      // necessitates the CompareAndSwap above.
+      shared_meta()->tailptr.compare_exchange_strong(tail, next,
+                                                     std::memory_order_acq_rel,
+                                                     std::memory_order_acquire);
+    }
+  }
+}
+
+// The "corrupted" state is held both locally and globally (shared). The
+// shared flag can't be trusted since a malicious actor could overwrite it.
+// Because corruption can be detected during read-only operations such as
+// iteration, this method may be called by other "const" methods. In this
+// case, it's safe to discard the constness and modify the local flag and
+// maybe even the shared flag if the underlying data isn't actually read-only.
+void PersistentMemoryAllocator::SetCorrupt() const {
+  if (!corrupt_.load(std::memory_order_relaxed) &&
+      !CheckFlag(
+          const_cast<volatile std::atomic<uint32_t>*>(&shared_meta()->flags),
+          kFlagCorrupt)) {
+    LOG(ERROR) << "Corruption detected in shared-memory segment.";
+    RecordError(kMemoryIsCorrupt);
+  }
+
+  corrupt_.store(true, std::memory_order_relaxed);
+  if (!readonly_) {
+    SetFlag(const_cast<volatile std::atomic<uint32_t>*>(&shared_meta()->flags),
+            kFlagCorrupt);
+  }
+}
+
+bool PersistentMemoryAllocator::IsCorrupt() const {
+  if (corrupt_.load(std::memory_order_relaxed) ||
+      CheckFlag(&shared_meta()->flags, kFlagCorrupt)) {
+    SetCorrupt();  // Make sure all indicators are set.
+    return true;
+  }
+  return false;
+}
+
+bool PersistentMemoryAllocator::IsFull() const {
+  return CheckFlag(&shared_meta()->flags, kFlagFull);
+}
+
+// Dereference a block |ref| and ensure that it's valid for the desired
+// |type_id| and |size|. |special| indicates that we may try to access block
+// headers not available to callers but still accessed by this module. By
+// having internal dereferences go through this same function, the allocator
+// is hardened against corruption.
+const volatile PersistentMemoryAllocator::BlockHeader*
+PersistentMemoryAllocator::GetBlock(Reference ref, uint32_t type_id,
+                                    uint32_t size, bool queue_ok,
+                                    bool free_ok) const {
+  // Handle special cases.
+  if (ref == kReferenceQueue && queue_ok)
+    return reinterpret_cast<const volatile BlockHeader*>(mem_base_ + ref);
+
+  // Validation of parameters.
+  if (ref < sizeof(SharedMetadata))
+    return nullptr;
+  if (ref % kAllocAlignment != 0)
+    return nullptr;
+  size += sizeof(BlockHeader);
+  if (ref + size > mem_size_)
+    return nullptr;
+
+  // Validation of referenced block-header.
+  if (!free_ok) {
+    const volatile BlockHeader* const block =
+        reinterpret_cast<volatile BlockHeader*>(mem_base_ + ref);
+    if (block->cookie != kBlockCookieAllocated)
+      return nullptr;
+    if (block->size < size)
+      return nullptr;
+    if (ref + block->size > mem_size_)
+      return nullptr;
+    if (type_id != 0 &&
+        block->type_id.load(std::memory_order_relaxed) != type_id) {
+      return nullptr;
+    }
+  }
+
+  // Return pointer to block data.
+  return reinterpret_cast<const volatile BlockHeader*>(mem_base_ + ref);
+}
+
+void PersistentMemoryAllocator::FlushPartial(size_t length, bool sync) {
+  // Generally there is nothing to do as every write is done through volatile
+  // memory with atomic instructions to guarantee consistency. This (virtual)
+  // method exists so that derivced classes can do special things, such as
+  // tell the OS to write changes to disk now rather than when convenient.
+}
+
+void PersistentMemoryAllocator::RecordError(int error) const {
+  if (errors_histogram_)
+    errors_histogram_->Add(error);
+}
+
+const volatile void* PersistentMemoryAllocator::GetBlockData(
+    Reference ref,
+    uint32_t type_id,
+    uint32_t size) const {
+  DCHECK(size > 0);
+  const volatile BlockHeader* block =
+      GetBlock(ref, type_id, size, false, false);
+  if (!block)
+    return nullptr;
+  return reinterpret_cast<const volatile char*>(block) + sizeof(BlockHeader);
+}
+
+void PersistentMemoryAllocator::UpdateTrackingHistograms() {
+  DCHECK(!readonly_);
+  if (used_histogram_) {
+    MemoryInfo meminfo;
+    GetMemoryInfo(&meminfo);
+    HistogramBase::Sample used_percent = static_cast<HistogramBase::Sample>(
+        ((meminfo.total - meminfo.free) * 100ULL / meminfo.total));
+    used_histogram_->Add(used_percent);
+  }
+}
+
+
+//----- LocalPersistentMemoryAllocator -----------------------------------------
+
+LocalPersistentMemoryAllocator::LocalPersistentMemoryAllocator(
+    size_t size,
+    uint64_t id,
+    base::StringPiece name)
+    : PersistentMemoryAllocator(AllocateLocalMemory(size),
+                                size, 0, id, name, false) {}
+
+LocalPersistentMemoryAllocator::~LocalPersistentMemoryAllocator() {
+  DeallocateLocalMemory(const_cast<char*>(mem_base_), mem_size_, mem_type_);
+}
+
+// static
+PersistentMemoryAllocator::Memory
+LocalPersistentMemoryAllocator::AllocateLocalMemory(size_t size) {
+  void* address;
+
+#if defined(OS_WIN)
+  address =
+      ::VirtualAlloc(nullptr, size, MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE);
+  if (address)
+    return Memory(address, MEM_VIRTUAL);
+  UmaHistogramSparse("UMA.LocalPersistentMemoryAllocator.Failures.Win",
+                     ::GetLastError());
+#elif defined(OS_POSIX) || defined(OS_FUCHSIA)
+  // MAP_ANON is deprecated on Linux but MAP_ANONYMOUS is not universal on Mac.
+  // MAP_SHARED is not available on Linux <2.4 but required on Mac.
+  address = ::mmap(nullptr, size, PROT_READ | PROT_WRITE,
+                   MAP_ANON | MAP_SHARED, -1, 0);
+  if (address != MAP_FAILED)
+    return Memory(address, MEM_VIRTUAL);
+  UmaHistogramSparse("UMA.LocalPersistentMemoryAllocator.Failures.Posix",
+                     errno);
+#else
+#error This architecture is not (yet) supported.
+#endif
+
+  // As a last resort, just allocate the memory from the heap. This will
+  // achieve the same basic result but the acquired memory has to be
+  // explicitly zeroed and thus realized immediately (i.e. all pages are
+  // added to the process now istead of only when first accessed).
+  address = malloc(size);
+  DPCHECK(address);
+  memset(address, 0, size);
+  return Memory(address, MEM_MALLOC);
+}
+
+// static
+void LocalPersistentMemoryAllocator::DeallocateLocalMemory(void* memory,
+                                                           size_t size,
+                                                           MemoryType type) {
+  if (type == MEM_MALLOC) {
+    free(memory);
+    return;
+  }
+
+  DCHECK_EQ(MEM_VIRTUAL, type);
+#if defined(OS_WIN)
+  BOOL success = ::VirtualFree(memory, 0, MEM_DECOMMIT);
+  DCHECK(success);
+#elif defined(OS_POSIX) || defined(OS_FUCHSIA)
+  int result = ::munmap(memory, size);
+  DCHECK_EQ(0, result);
+#else
+#error This architecture is not (yet) supported.
+#endif
+}
+
+
+//----- SharedPersistentMemoryAllocator ----------------------------------------
+
+SharedPersistentMemoryAllocator::SharedPersistentMemoryAllocator(
+    std::unique_ptr<SharedMemory> memory,
+    uint64_t id,
+    base::StringPiece name,
+    bool read_only)
+    : PersistentMemoryAllocator(
+          Memory(static_cast<uint8_t*>(memory->memory()), MEM_SHARED),
+          memory->mapped_size(),
+          0,
+          id,
+          name,
+          read_only),
+      shared_memory_(std::move(memory)) {}
+
+SharedPersistentMemoryAllocator::~SharedPersistentMemoryAllocator() = default;
+
+// static
+bool SharedPersistentMemoryAllocator::IsSharedMemoryAcceptable(
+    const SharedMemory& memory) {
+  return IsMemoryAcceptable(memory.memory(), memory.mapped_size(), 0, false);
+}
+
+
+#if !defined(OS_NACL)
+//----- FilePersistentMemoryAllocator ------------------------------------------
+
+FilePersistentMemoryAllocator::FilePersistentMemoryAllocator(
+    std::unique_ptr<MemoryMappedFile> file,
+    size_t max_size,
+    uint64_t id,
+    base::StringPiece name,
+    bool read_only)
+    : PersistentMemoryAllocator(
+          Memory(const_cast<uint8_t*>(file->data()), MEM_FILE),
+          max_size != 0 ? max_size : file->length(),
+          0,
+          id,
+          name,
+          read_only),
+      mapped_file_(std::move(file)) {}
+
+FilePersistentMemoryAllocator::~FilePersistentMemoryAllocator() = default;
+
+// static
+bool FilePersistentMemoryAllocator::IsFileAcceptable(
+    const MemoryMappedFile& file,
+    bool read_only) {
+  return IsMemoryAcceptable(file.data(), file.length(), 0, read_only);
+}
+
+void FilePersistentMemoryAllocator::FlushPartial(size_t length, bool sync) {
+  if (sync)
+    AssertBlockingAllowed();
+  if (IsReadonly())
+    return;
+
+#if defined(OS_WIN)
+  // Windows doesn't support asynchronous flush.
+  AssertBlockingAllowed();
+  BOOL success = ::FlushViewOfFile(data(), length);
+  DPCHECK(success);
+#elif defined(OS_MACOSX)
+  // On OSX, "invalidate" removes all cached pages, forcing a re-read from
+  // disk. That's not applicable to "flush" so omit it.
+  int result =
+      ::msync(const_cast<void*>(data()), length, sync ? MS_SYNC : MS_ASYNC);
+  DCHECK_NE(EINVAL, result);
+#elif defined(OS_POSIX) || defined(OS_FUCHSIA)
+  // On POSIX, "invalidate" forces _other_ processes to recognize what has
+  // been written to disk and so is applicable to "flush".
+  int result = ::msync(const_cast<void*>(data()), length,
+                       MS_INVALIDATE | (sync ? MS_SYNC : MS_ASYNC));
+  DCHECK_NE(EINVAL, result);
+#else
+#error Unsupported OS.
+#endif
+}
+#endif  // !defined(OS_NACL)
+
+//----- DelayedPersistentAllocation --------------------------------------------
+
+// Forwarding constructors.
+DelayedPersistentAllocation::DelayedPersistentAllocation(
+    PersistentMemoryAllocator* allocator,
+    subtle::Atomic32* ref,
+    uint32_t type,
+    size_t size,
+    bool make_iterable)
+    : DelayedPersistentAllocation(
+          allocator,
+          reinterpret_cast<std::atomic<Reference>*>(ref),
+          type,
+          size,
+          0,
+          make_iterable) {}
+
+DelayedPersistentAllocation::DelayedPersistentAllocation(
+    PersistentMemoryAllocator* allocator,
+    subtle::Atomic32* ref,
+    uint32_t type,
+    size_t size,
+    size_t offset,
+    bool make_iterable)
+    : DelayedPersistentAllocation(
+          allocator,
+          reinterpret_cast<std::atomic<Reference>*>(ref),
+          type,
+          size,
+          offset,
+          make_iterable) {}
+
+DelayedPersistentAllocation::DelayedPersistentAllocation(
+    PersistentMemoryAllocator* allocator,
+    std::atomic<Reference>* ref,
+    uint32_t type,
+    size_t size,
+    bool make_iterable)
+    : DelayedPersistentAllocation(allocator,
+                                  ref,
+                                  type,
+                                  size,
+                                  0,
+                                  make_iterable) {}
+
+// Real constructor.
+DelayedPersistentAllocation::DelayedPersistentAllocation(
+    PersistentMemoryAllocator* allocator,
+    std::atomic<Reference>* ref,
+    uint32_t type,
+    size_t size,
+    size_t offset,
+    bool make_iterable)
+    : allocator_(allocator),
+      type_(type),
+      size_(checked_cast<uint32_t>(size)),
+      offset_(checked_cast<uint32_t>(offset)),
+      make_iterable_(make_iterable),
+      reference_(ref) {
+  DCHECK(allocator_);
+  DCHECK_NE(0U, type_);
+  DCHECK_LT(0U, size_);
+  DCHECK(reference_);
+}
+
+DelayedPersistentAllocation::~DelayedPersistentAllocation() = default;
+
+void* DelayedPersistentAllocation::Get() const {
+  // Relaxed operations are acceptable here because it's not protecting the
+  // contents of the allocation in any way.
+  Reference ref = reference_->load(std::memory_order_acquire);
+  if (!ref) {
+    ref = allocator_->Allocate(size_, type_);
+    if (!ref)
+      return nullptr;
+
+    // Store the new reference in its proper location using compare-and-swap.
+    // Use a "strong" exchange to ensure no false-negatives since the operation
+    // cannot be retried.
+    Reference existing = 0;  // Must be mutable; receives actual value.
+    if (reference_->compare_exchange_strong(existing, ref,
+                                            std::memory_order_release,
+                                            std::memory_order_relaxed)) {
+      if (make_iterable_)
+        allocator_->MakeIterable(ref);
+    } else {
+      // Failure indicates that something else has raced ahead, performed the
+      // allocation, and stored its reference. Purge the allocation that was
+      // just done and use the other one instead.
+      DCHECK_EQ(type_, allocator_->GetType(existing));
+      DCHECK_LE(size_, allocator_->GetAllocSize(existing));
+      allocator_->ChangeType(ref, 0, type_, /*clear=*/false);
+      ref = existing;
+    }
+  }
+
+  char* mem = allocator_->GetAsArray<char>(ref, type_, size_);
+  if (!mem) {
+    // This should never happen but be tolerant if it does as corruption from
+    // the outside is something to guard against.
+    NOTREACHED();
+    return nullptr;
+  }
+  return mem + offset_;
+}
+
+}  // namespace base
diff --git a/base/metrics/persistent_memory_allocator.h b/base/metrics/persistent_memory_allocator.h
new file mode 100644
index 0000000..978a362
--- /dev/null
+++ b/base/metrics/persistent_memory_allocator.h
@@ -0,0 +1,872 @@
+// Copyright (c) 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_METRICS_PERSISTENT_MEMORY_ALLOCATOR_H_
+#define BASE_METRICS_PERSISTENT_MEMORY_ALLOCATOR_H_
+
+#include <stdint.h>
+
+#include <atomic>
+#include <memory>
+#include <type_traits>
+
+#include "base/atomicops.h"
+#include "base/base_export.h"
+#include "base/files/file_path.h"
+#include "base/gtest_prod_util.h"
+#include "base/macros.h"
+#include "base/strings/string_piece.h"
+
+namespace base {
+
+class HistogramBase;
+class MemoryMappedFile;
+class SharedMemory;
+
+// Simple allocator for pieces of a memory block that may be persistent
+// to some storage or shared across multiple processes. This class resides
+// under base/metrics because it was written for that purpose. It is,
+// however, fully general-purpose and can be freely moved to base/memory
+// if other uses are found.
+//
+// This class provides for thread-secure (i.e. safe against other threads
+// or processes that may be compromised and thus have malicious intent)
+// allocation of memory within a designated block and also a mechanism by
+// which other threads can learn of these allocations.
+//
+// There is (currently) no way to release an allocated block of data because
+// doing so would risk invalidating pointers held by other processes and
+// greatly complicate the allocation algorithm.
+//
+// Construction of this object can accept new, clean (i.e. zeroed) memory
+// or previously initialized memory. In the first case, construction must
+// be allowed to complete before letting other allocators attach to the same
+// segment. In other words, don't share the segment until at least one
+// allocator has been attached to it.
+//
+// Note that memory not in active use is not accessed so it is possible to
+// use virtual memory, including memory-mapped files, as backing storage with
+// the OS "pinning" new (zeroed) physical RAM pages only as they are needed.
+//
+// OBJECTS: Although the allocator can be used in a "malloc" sense, fetching
+// character arrays and manipulating that memory manually, the better way is
+// generally to use the "object" methods to create and manage allocations. In
+// this way the sizing, type-checking, and construction are all automatic. For
+// this to work, however, every type of stored object must define two public
+// "constexpr" values, kPersistentTypeId and kExpectedInstanceSize, as such:
+//
+// struct MyPersistentObjectType {
+//     // SHA1(MyPersistentObjectType): Increment this if structure changes!
+//     static constexpr uint32_t kPersistentTypeId = 0x3E15F6DE + 1;
+//
+//     // Expected size for 32/64-bit check. Update this if structure changes!
+//     static constexpr size_t kExpectedInstanceSize = 20;
+//
+//     ...
+// };
+//
+// kPersistentTypeId: This value is an arbitrary identifier that allows the
+//   identification of these objects in the allocator, including the ability
+//   to find them via iteration. The number is arbitrary but using the first
+//   four bytes of the SHA1 hash of the type name means that there shouldn't
+//   be any conflicts with other types that may also be stored in the memory.
+//   The fully qualified name (e.g. base::debug::MyPersistentObjectType) could
+//   be used to generate the hash if the type name seems common. Use a command
+//   like this to get the hash: echo -n "MyPersistentObjectType" | sha1sum
+//   If the structure layout changes, ALWAYS increment this number so that
+//   newer versions of the code don't try to interpret persistent data written
+//   by older versions with a different layout.
+//
+// kExpectedInstanceSize: This value is the hard-coded number that matches
+//   what sizeof(T) would return. By providing it explicitly, the allocator can
+//   verify that the structure is compatible between both 32-bit and 64-bit
+//   versions of the code.
+//
+// Using New manages the memory and then calls the default constructor for the
+// object. Given that objects are persistent, no destructor is ever called
+// automatically though a caller can explicitly call Delete to destruct it and
+// change the type to something indicating it is no longer in use.
+//
+// Though persistent memory segments are transferrable between programs built
+// for different natural word widths, they CANNOT be exchanged between CPUs
+// of different endianess. Attempts to do so will simply see the existing data
+// as corrupt and refuse to access any of it.
+class BASE_EXPORT PersistentMemoryAllocator {
+ public:
+  typedef uint32_t Reference;
+
+  // These states are used to indicate the overall condition of the memory
+  // segment irrespective of what is stored within it. Because the data is
+  // often persistent and thus needs to be readable by different versions of
+  // a program, these values are fixed and can never change.
+  enum MemoryState : uint8_t {
+    // Persistent memory starts all zeros and so shows "uninitialized".
+    MEMORY_UNINITIALIZED = 0,
+
+    // The header has been written and the memory is ready for use.
+    MEMORY_INITIALIZED = 1,
+
+    // The data should be considered deleted. This would be set when the
+    // allocator is being cleaned up. If file-backed, the file is likely
+    // to be deleted but since deletion can fail for a variety of reasons,
+    // having this extra status means a future reader can realize what
+    // should have happened.
+    MEMORY_DELETED = 2,
+
+    // Outside code can create states starting with this number; these too
+    // must also never change between code versions.
+    MEMORY_USER_DEFINED = 100,
+  };
+
+  // Iterator for going through all iterable memory records in an allocator.
+  // Like the allocator itself, iterators are lock-free and thread-secure.
+  // That means that multiple threads can share an iterator and the same
+  // reference will not be returned twice.
+  //
+  // The order of the items returned by an iterator matches the order in which
+  // MakeIterable() was called on them. Once an allocation is made iterable,
+  // it is always such so the only possible difference between successive
+  // iterations is for more to be added to the end.
+  //
+  // Iteration, in general, is tolerant of corrupted memory. It will return
+  // what it can and stop only when corruption forces it to. Bad corruption
+  // could cause the same object to be returned many times but it will
+  // eventually quit.
+  class BASE_EXPORT Iterator {
+   public:
+    // Constructs an iterator on a given |allocator|, starting at the beginning.
+    // The allocator must live beyond the lifetime of the iterator. This class
+    // has read-only access to the allocator (hence "const") but the returned
+    // references can be used on a read/write version, too.
+    explicit Iterator(const PersistentMemoryAllocator* allocator);
+
+    // As above but resuming from the |starting_after| reference. The first call
+    // to GetNext() will return the next object found after that reference. The
+    // reference must be to an "iterable" object; references to non-iterable
+    // objects (those that never had MakeIterable() called for them) will cause
+    // a run-time error.
+    Iterator(const PersistentMemoryAllocator* allocator,
+             Reference starting_after);
+
+    // Resets the iterator back to the beginning.
+    void Reset();
+
+    // Resets the iterator, resuming from the |starting_after| reference.
+    void Reset(Reference starting_after);
+
+    // Returns the previously retrieved reference, or kReferenceNull if none.
+    // If constructor or reset with a starting_after location, this will return
+    // that value.
+    Reference GetLast();
+
+    // Gets the next iterable, storing that type in |type_return|. The actual
+    // return value is a reference to the allocation inside the allocator or
+    // zero if there are no more. GetNext() may still be called again at a
+    // later time to retrieve any new allocations that have been added.
+    Reference GetNext(uint32_t* type_return);
+
+    // Similar to above but gets the next iterable of a specific |type_match|.
+    // This should not be mixed with calls to GetNext() because any allocations
+    // skipped here due to a type mis-match will never be returned by later
+    // calls to GetNext() meaning it's possible to completely miss entries.
+    Reference GetNextOfType(uint32_t type_match);
+
+    // As above but works using object type.
+    template <typename T>
+    Reference GetNextOfType() {
+      return GetNextOfType(T::kPersistentTypeId);
+    }
+
+    // As above but works using objects and returns null if not found.
+    template <typename T>
+    const T* GetNextOfObject() {
+      return GetAsObject<T>(GetNextOfType<T>());
+    }
+
+    // Converts references to objects. This is a convenience method so that
+    // users of the iterator don't need to also have their own pointer to the
+    // allocator over which the iterator runs in order to retrieve objects.
+    // Because the iterator is not read/write, only "const" objects can be
+    // fetched. Non-const objects can be fetched using the reference on a
+    // non-const (external) pointer to the same allocator (or use const_cast
+    // to remove the qualifier).
+    template <typename T>
+    const T* GetAsObject(Reference ref) const {
+      return allocator_->GetAsObject<T>(ref);
+    }
+
+    // Similar to GetAsObject() but converts references to arrays of things.
+    template <typename T>
+    const T* GetAsArray(Reference ref, uint32_t type_id, size_t count) const {
+      return allocator_->GetAsArray<T>(ref, type_id, count);
+    }
+
+    // Convert a generic pointer back into a reference. A null reference will
+    // be returned if |memory| is not inside the persistent segment or does not
+    // point to an object of the specified |type_id|.
+    Reference GetAsReference(const void* memory, uint32_t type_id) const {
+      return allocator_->GetAsReference(memory, type_id);
+    }
+
+    // As above but convert an object back into a reference.
+    template <typename T>
+    Reference GetAsReference(const T* obj) const {
+      return allocator_->GetAsReference(obj);
+    }
+
+   private:
+    // Weak-pointer to memory allocator being iterated over.
+    const PersistentMemoryAllocator* allocator_;
+
+    // The last record that was returned.
+    std::atomic<Reference> last_record_;
+
+    // The number of records found; used for detecting loops.
+    std::atomic<uint32_t> record_count_;
+
+    DISALLOW_COPY_AND_ASSIGN(Iterator);
+  };
+
+  // Returned information about the internal state of the heap.
+  struct MemoryInfo {
+    size_t total;
+    size_t free;
+  };
+
+  enum : Reference {
+    // A common "null" reference value.
+    kReferenceNull = 0,
+  };
+
+  enum : uint32_t {
+    // A value that will match any type when doing lookups.
+    kTypeIdAny = 0x00000000,
+
+    // A value indicating that the type is in transition. Work is being done
+    // on the contents to prepare it for a new type to come.
+    kTypeIdTransitioning = 0xFFFFFFFF,
+  };
+
+  enum : size_t {
+    kSizeAny = 1  // Constant indicating that any array size is acceptable.
+  };
+
+  // This is the standard file extension (suitable for being passed to the
+  // AddExtension() method of base::FilePath) for dumps of persistent memory.
+  static const base::FilePath::CharType kFileExtension[];
+
+  // The allocator operates on any arbitrary block of memory. Creation and
+  // persisting or sharing of that block with another process is the
+  // responsibility of the caller. The allocator needs to know only the
+  // block's |base| address, the total |size| of the block, and any internal
+  // |page| size (zero if not paged) across which allocations should not span.
+  // The |id| is an arbitrary value the caller can use to identify a
+  // particular memory segment. It will only be loaded during the initial
+  // creation of the segment and can be checked by the caller for consistency.
+  // The |name|, if provided, is used to distinguish histograms for this
+  // allocator. Only the primary owner of the segment should define this value;
+  // other processes can learn it from the shared state. If the underlying
+  // memory is |readonly| then no changes will be made to it. The resulting
+  // object should be stored as a "const" pointer.
+  //
+  // PersistentMemoryAllocator does NOT take ownership of the memory block.
+  // The caller must manage it and ensure it stays available throughout the
+  // lifetime of this object.
+  //
+  // Memory segments for sharing must have had an allocator attached to them
+  // before actually being shared. If the memory segment was just created, it
+  // should be zeroed before being passed here. If it was an existing segment,
+  // the values here will be compared to copies stored in the shared segment
+  // as a guard against corruption.
+  //
+  // Make sure that the memory segment is acceptable (see IsMemoryAcceptable()
+  // method below) before construction if the definition of the segment can
+  // vary in any way at run-time. Invalid memory segments will cause a crash.
+  PersistentMemoryAllocator(void* base, size_t size, size_t page_size,
+                            uint64_t id, base::StringPiece name,
+                            bool readonly);
+  virtual ~PersistentMemoryAllocator();
+
+  // Check if memory segment is acceptable for creation of an Allocator. This
+  // doesn't do any analysis of the data and so doesn't guarantee that the
+  // contents are valid, just that the paramaters won't cause the program to
+  // abort. The IsCorrupt() method will report detection of data problems
+  // found during construction and general operation.
+  static bool IsMemoryAcceptable(const void* data, size_t size,
+                                 size_t page_size, bool readonly);
+
+  // Get the internal identifier for this persistent memory segment.
+  uint64_t Id() const;
+
+  // Get the internal name of this allocator (possibly an empty string).
+  const char* Name() const;
+
+  // Is this segment open only for read?
+  bool IsReadonly() const { return readonly_; }
+
+  // Manage the saved state of the memory.
+  void SetMemoryState(uint8_t memory_state);
+  uint8_t GetMemoryState() const;
+
+  // Create internal histograms for tracking memory use and allocation sizes
+  // for allocator of |name| (which can simply be the result of Name()). This
+  // is done seperately from construction for situations such as when the
+  // histograms will be backed by memory provided by this very allocator.
+  //
+  // IMPORTANT: Callers must update tools/metrics/histograms/histograms.xml
+  // with the following histograms:
+  //    UMA.PersistentAllocator.name.Errors
+  //    UMA.PersistentAllocator.name.UsedPct
+  void CreateTrackingHistograms(base::StringPiece name);
+
+  // Flushes the persistent memory to any backing store. This typically does
+  // nothing but is used by the FilePersistentMemoryAllocator to inform the
+  // OS that all the data should be sent to the disk immediately. This is
+  // useful in the rare case where something has just been stored that needs
+  // to survive a hard shutdown of the machine like from a power failure.
+  // The |sync| parameter indicates if this call should block until the flush
+  // is complete but is only advisory and may or may not have an effect
+  // depending on the capabilities of the OS. Synchronous flushes are allowed
+  // only from theads that are allowed to do I/O but since |sync| is only
+  // advisory, all flushes should be done on IO-capable threads.
+  void Flush(bool sync);
+
+  // Direct access to underlying memory segment. If the segment is shared
+  // across threads or processes, reading data through these values does
+  // not guarantee consistency. Use with care. Do not write.
+  const void* data() const { return const_cast<const char*>(mem_base_); }
+  size_t length() const { return mem_size_; }
+  size_t size() const { return mem_size_; }
+  size_t used() const;
+
+  // Get an object referenced by a |ref|. For safety reasons, the |type_id|
+  // code and size-of(|T|) are compared to ensure the reference is valid
+  // and cannot return an object outside of the memory segment. A |type_id| of
+  // kTypeIdAny (zero) will match any though the size is still checked. NULL is
+  // returned if any problem is detected, such as corrupted storage or incorrect
+  // parameters. Callers MUST check that the returned value is not-null EVERY
+  // TIME before accessing it or risk crashing! Once dereferenced, the pointer
+  // is safe to reuse forever.
+  //
+  // It is essential that the object be of a fixed size. All fields must be of
+  // a defined type that does not change based on the compiler or the CPU
+  // natural word size. Acceptable are char, float, double, and (u)intXX_t.
+  // Unacceptable are int, bool, and wchar_t which are implementation defined
+  // with regards to their size.
+  //
+  // Alignment must also be consistent. A uint64_t after a uint32_t will pad
+  // differently between 32 and 64 bit architectures. Either put the bigger
+  // elements first, group smaller elements into blocks the size of larger
+  // elements, or manually insert padding fields as appropriate for the
+  // largest architecture, including at the end.
+  //
+  // To protected against mistakes, all objects must have the attribute
+  // |kExpectedInstanceSize| (static constexpr size_t)  that is a hard-coded
+  // numerical value -- NNN, not sizeof(T) -- that can be tested. If the
+  // instance size is not fixed, at least one build will fail.
+  //
+  // If the size of a structure changes, the type-ID used to recognize it
+  // should also change so later versions of the code don't try to read
+  // incompatible structures from earlier versions.
+  //
+  // NOTE: Though this method will guarantee that an object of the specified
+  // type can be accessed without going outside the bounds of the memory
+  // segment, it makes no guarantees of the validity of the data within the
+  // object itself. If it is expected that the contents of the segment could
+  // be compromised with malicious intent, the object must be hardened as well.
+  //
+  // Though the persistent data may be "volatile" if it is shared with
+  // other processes, such is not necessarily the case. The internal
+  // "volatile" designation is discarded so as to not propagate the viral
+  // nature of that keyword to the caller. It can add it back, if necessary,
+  // based on knowledge of how the allocator is being used.
+  template <typename T>
+  T* GetAsObject(Reference ref) {
+    static_assert(std::is_standard_layout<T>::value, "only standard objects");
+    static_assert(!std::is_array<T>::value, "use GetAsArray<>()");
+    static_assert(T::kExpectedInstanceSize == sizeof(T), "inconsistent size");
+    return const_cast<T*>(reinterpret_cast<volatile T*>(
+        GetBlockData(ref, T::kPersistentTypeId, sizeof(T))));
+  }
+  template <typename T>
+  const T* GetAsObject(Reference ref) const {
+    static_assert(std::is_standard_layout<T>::value, "only standard objects");
+    static_assert(!std::is_array<T>::value, "use GetAsArray<>()");
+    static_assert(T::kExpectedInstanceSize == sizeof(T), "inconsistent size");
+    return const_cast<const T*>(reinterpret_cast<const volatile T*>(
+        GetBlockData(ref, T::kPersistentTypeId, sizeof(T))));
+  }
+
+  // Like GetAsObject but get an array of simple, fixed-size types.
+  //
+  // Use a |count| of the required number of array elements, or kSizeAny.
+  // GetAllocSize() can be used to calculate the upper bound but isn't reliable
+  // because padding can make space for extra elements that were not written.
+  //
+  // Remember that an array of char is a string but may not be NUL terminated.
+  //
+  // There are no compile-time or run-time checks to ensure 32/64-bit size
+  // compatibilty when using these accessors. Only use fixed-size types such
+  // as char, float, double, or (u)intXX_t.
+  template <typename T>
+  T* GetAsArray(Reference ref, uint32_t type_id, size_t count) {
+    static_assert(std::is_fundamental<T>::value, "use GetAsObject<>()");
+    return const_cast<T*>(reinterpret_cast<volatile T*>(
+        GetBlockData(ref, type_id, count * sizeof(T))));
+  }
+  template <typename T>
+  const T* GetAsArray(Reference ref, uint32_t type_id, size_t count) const {
+    static_assert(std::is_fundamental<T>::value, "use GetAsObject<>()");
+    return const_cast<const char*>(reinterpret_cast<const volatile T*>(
+        GetBlockData(ref, type_id, count * sizeof(T))));
+  }
+
+  // Get the corresponding reference for an object held in persistent memory.
+  // If the |memory| is not valid or the type does not match, a kReferenceNull
+  // result will be returned.
+  Reference GetAsReference(const void* memory, uint32_t type_id) const;
+
+  // Get the number of bytes allocated to a block. This is useful when storing
+  // arrays in order to validate the ending boundary. The returned value will
+  // include any padding added to achieve the required alignment and so could
+  // be larger than given in the original Allocate() request.
+  size_t GetAllocSize(Reference ref) const;
+
+  // Access the internal "type" of an object. This generally isn't necessary
+  // but can be used to "clear" the type and so effectively mark it as deleted
+  // even though the memory stays valid and allocated. Changing the type is
+  // an atomic compare/exchange and so requires knowing the existing value.
+  // It will return false if the existing type is not what is expected.
+  //
+  // Changing the type doesn't mean the data is compatible with the new type.
+  // Passing true for |clear| will zero the memory after the type has been
+  // changed away from |from_type_id| but before it becomes |to_type_id| meaning
+  // that it is done in a manner that is thread-safe. Memory is guaranteed to
+  // be zeroed atomically by machine-word in a monotonically increasing order.
+  //
+  // It will likely be necessary to reconstruct the type before it can be used.
+  // Changing the type WILL NOT invalidate existing pointers to the data, either
+  // in this process or others, so changing the data structure could have
+  // unpredicatable results. USE WITH CARE!
+  uint32_t GetType(Reference ref) const;
+  bool ChangeType(Reference ref,
+                  uint32_t to_type_id,
+                  uint32_t from_type_id,
+                  bool clear);
+
+  // Allocated objects can be added to an internal list that can then be
+  // iterated over by other processes. If an allocated object can be found
+  // another way, such as by having its reference within a different object
+  // that will be made iterable, then this call is not necessary. This always
+  // succeeds unless corruption is detected; check IsCorrupted() to find out.
+  // Once an object is made iterable, its position in iteration can never
+  // change; new iterable objects will always be added after it in the series.
+  // Changing the type does not alter its "iterable" status.
+  void MakeIterable(Reference ref);
+
+  // Get the information about the amount of free space in the allocator. The
+  // amount of free space should be treated as approximate due to extras from
+  // alignment and metadata. Concurrent allocations from other threads will
+  // also make the true amount less than what is reported.
+  void GetMemoryInfo(MemoryInfo* meminfo) const;
+
+  // If there is some indication that the memory has become corrupted,
+  // calling this will attempt to prevent further damage by indicating to
+  // all processes that something is not as expected.
+  void SetCorrupt() const;
+
+  // This can be called to determine if corruption has been detected in the
+  // segment, possibly my a malicious actor. Once detected, future allocations
+  // will fail and iteration may not locate all objects.
+  bool IsCorrupt() const;
+
+  // Flag set if an allocation has failed because the memory segment was full.
+  bool IsFull() const;
+
+  // Update those "tracking" histograms which do not get updates during regular
+  // operation, such as how much memory is currently used. This should be
+  // called before such information is to be displayed or uploaded.
+  void UpdateTrackingHistograms();
+
+  // While the above works much like malloc & free, these next methods provide
+  // an "object" interface similar to new and delete.
+
+  // Reserve space in the memory segment of the desired |size| and |type_id|.
+  // A return value of zero indicates the allocation failed, otherwise the
+  // returned reference can be used by any process to get a real pointer via
+  // the GetAsObject() or GetAsArray calls. The actual allocated size may be
+  // larger and will always be a multiple of 8 bytes (64 bits).
+  Reference Allocate(size_t size, uint32_t type_id);
+
+  // Allocate and construct an object in persistent memory. The type must have
+  // both (size_t) kExpectedInstanceSize and (uint32_t) kPersistentTypeId
+  // static constexpr fields that are used to ensure compatibility between
+  // software versions. An optional size parameter can be specified to force
+  // the allocation to be bigger than the size of the object; this is useful
+  // when the last field is actually variable length.
+  template <typename T>
+  T* New(size_t size) {
+    if (size < sizeof(T))
+      size = sizeof(T);
+    Reference ref = Allocate(size, T::kPersistentTypeId);
+    void* mem =
+        const_cast<void*>(GetBlockData(ref, T::kPersistentTypeId, size));
+    if (!mem)
+      return nullptr;
+    DCHECK_EQ(0U, reinterpret_cast<uintptr_t>(mem) & (alignof(T) - 1));
+    return new (mem) T();
+  }
+  template <typename T>
+  T* New() {
+    return New<T>(sizeof(T));
+  }
+
+  // Similar to New, above, but construct the object out of an existing memory
+  // block and of an expected type. If |clear| is true, memory will be zeroed
+  // before construction. Though this is not standard object behavior, it
+  // is present to match with new allocations that always come from zeroed
+  // memory. Anything previously present simply ceases to exist; no destructor
+  // is called for it so explicitly Delete() the old object first if need be.
+  // Calling this will not invalidate existing pointers to the object, either
+  // in this process or others, so changing the object could have unpredictable
+  // results. USE WITH CARE!
+  template <typename T>
+  T* New(Reference ref, uint32_t from_type_id, bool clear) {
+    DCHECK_LE(sizeof(T), GetAllocSize(ref)) << "alloc not big enough for obj";
+    // Make sure the memory is appropriate. This won't be used until after
+    // the type is changed but checking first avoids the possibility of having
+    // to change the type back.
+    void* mem = const_cast<void*>(GetBlockData(ref, 0, sizeof(T)));
+    if (!mem)
+      return nullptr;
+    // Ensure the allocator's internal alignment is sufficient for this object.
+    // This protects against coding errors in the allocator.
+    DCHECK_EQ(0U, reinterpret_cast<uintptr_t>(mem) & (alignof(T) - 1));
+    // Change the type, clearing the memory if so desired. The new type is
+    // "transitioning" so that there is no race condition with the construction
+    // of the object should another thread be simultaneously iterating over
+    // data. This will "acquire" the memory so no changes get reordered before
+    // it.
+    if (!ChangeType(ref, kTypeIdTransitioning, from_type_id, clear))
+      return nullptr;
+    // Construct an object of the desired type on this memory, just as if
+    // New() had been called to create it.
+    T* obj = new (mem) T();
+    // Finally change the type to the desired one. This will "release" all of
+    // the changes above and so provide a consistent view to other threads.
+    bool success =
+        ChangeType(ref, T::kPersistentTypeId, kTypeIdTransitioning, false);
+    DCHECK(success);
+    return obj;
+  }
+
+  // Deletes an object by destructing it and then changing the type to a
+  // different value (default 0).
+  template <typename T>
+  void Delete(T* obj, uint32_t new_type) {
+    // Get the reference for the object.
+    Reference ref = GetAsReference<T>(obj);
+    // First change the type to "transitioning" so there is no race condition
+    // where another thread could find the object through iteration while it
+    // is been destructed. This will "acquire" the memory so no changes get
+    // reordered before it. It will fail if |ref| is invalid.
+    if (!ChangeType(ref, kTypeIdTransitioning, T::kPersistentTypeId, false))
+      return;
+    // Destruct the object.
+    obj->~T();
+    // Finally change the type to the desired value. This will "release" all
+    // the changes above.
+    bool success = ChangeType(ref, new_type, kTypeIdTransitioning, false);
+    DCHECK(success);
+  }
+  template <typename T>
+  void Delete(T* obj) {
+    Delete<T>(obj, 0);
+  }
+
+  // As above but works with objects allocated from persistent memory.
+  template <typename T>
+  Reference GetAsReference(const T* obj) const {
+    return GetAsReference(obj, T::kPersistentTypeId);
+  }
+
+  // As above but works with an object allocated from persistent memory.
+  template <typename T>
+  void MakeIterable(const T* obj) {
+    MakeIterable(GetAsReference<T>(obj));
+  }
+
+ protected:
+  enum MemoryType {
+    MEM_EXTERNAL,
+    MEM_MALLOC,
+    MEM_VIRTUAL,
+    MEM_SHARED,
+    MEM_FILE,
+  };
+
+  struct Memory {
+    Memory(void* b, MemoryType t) : base(b), type(t) {}
+
+    void* base;
+    MemoryType type;
+  };
+
+  // Constructs the allocator. Everything is the same as the public allocator
+  // except |memory| which is a structure with additional information besides
+  // the base address.
+  PersistentMemoryAllocator(Memory memory, size_t size, size_t page_size,
+                            uint64_t id, base::StringPiece name,
+                            bool readonly);
+
+  // Implementation of Flush that accepts how much to flush.
+  virtual void FlushPartial(size_t length, bool sync);
+
+  volatile char* const mem_base_;  // Memory base. (char so sizeof guaranteed 1)
+  const MemoryType mem_type_;      // Type of memory allocation.
+  const uint32_t mem_size_;        // Size of entire memory segment.
+  const uint32_t mem_page_;        // Page size allocations shouldn't cross.
+
+ private:
+  struct SharedMetadata;
+  struct BlockHeader;
+  static const uint32_t kAllocAlignment;
+  static const Reference kReferenceQueue;
+
+  // The shared metadata is always located at the top of the memory segment.
+  // These convenience functions eliminate constant casting of the base
+  // pointer within the code.
+  const SharedMetadata* shared_meta() const {
+    return reinterpret_cast<const SharedMetadata*>(
+        const_cast<const char*>(mem_base_));
+  }
+  SharedMetadata* shared_meta() {
+    return reinterpret_cast<SharedMetadata*>(const_cast<char*>(mem_base_));
+  }
+
+  // Actual method for doing the allocation.
+  Reference AllocateImpl(size_t size, uint32_t type_id);
+
+  // Get the block header associated with a specific reference.
+  const volatile BlockHeader* GetBlock(Reference ref, uint32_t type_id,
+                                       uint32_t size, bool queue_ok,
+                                       bool free_ok) const;
+  volatile BlockHeader* GetBlock(Reference ref, uint32_t type_id, uint32_t size,
+                                 bool queue_ok, bool free_ok) {
+      return const_cast<volatile BlockHeader*>(
+          const_cast<const PersistentMemoryAllocator*>(this)->GetBlock(
+              ref, type_id, size, queue_ok, free_ok));
+  }
+
+  // Get the actual data within a block associated with a specific reference.
+  const volatile void* GetBlockData(Reference ref, uint32_t type_id,
+                                    uint32_t size) const;
+  volatile void* GetBlockData(Reference ref, uint32_t type_id,
+                              uint32_t size) {
+      return const_cast<volatile void*>(
+          const_cast<const PersistentMemoryAllocator*>(this)->GetBlockData(
+              ref, type_id, size));
+  }
+
+  // Record an error in the internal histogram.
+  void RecordError(int error) const;
+
+  const size_t vm_page_size_;          // The page size used by the OS.
+  const bool readonly_;                // Indicates access to read-only memory.
+  mutable std::atomic<bool> corrupt_;  // Local version of "corrupted" flag.
+
+  HistogramBase* allocs_histogram_;  // Histogram recording allocs.
+  HistogramBase* used_histogram_;    // Histogram recording used space.
+  HistogramBase* errors_histogram_;  // Histogram recording errors.
+
+  friend class PersistentMemoryAllocatorTest;
+  FRIEND_TEST_ALL_PREFIXES(PersistentMemoryAllocatorTest, AllocateAndIterate);
+  DISALLOW_COPY_AND_ASSIGN(PersistentMemoryAllocator);
+};
+
+
+// This allocator uses a local memory block it allocates from the general
+// heap. It is generally used when some kind of "death rattle" handler will
+// save the contents to persistent storage during process shutdown. It is
+// also useful for testing.
+class BASE_EXPORT LocalPersistentMemoryAllocator
+    : public PersistentMemoryAllocator {
+ public:
+  LocalPersistentMemoryAllocator(size_t size, uint64_t id,
+                                 base::StringPiece name);
+  ~LocalPersistentMemoryAllocator() override;
+
+ private:
+  // Allocates a block of local memory of the specified |size|, ensuring that
+  // the memory will not be physically allocated until accessed and will read
+  // as zero when that happens.
+  static Memory AllocateLocalMemory(size_t size);
+
+  // Deallocates a block of local |memory| of the specified |size|.
+  static void DeallocateLocalMemory(void* memory, size_t size, MemoryType type);
+
+  DISALLOW_COPY_AND_ASSIGN(LocalPersistentMemoryAllocator);
+};
+
+
+// This allocator takes a shared-memory object and performs allocation from
+// it. The memory must be previously mapped via Map() or MapAt(). The allocator
+// takes ownership of the memory object.
+class BASE_EXPORT SharedPersistentMemoryAllocator
+    : public PersistentMemoryAllocator {
+ public:
+  SharedPersistentMemoryAllocator(std::unique_ptr<SharedMemory> memory,
+                                  uint64_t id,
+                                  base::StringPiece name,
+                                  bool read_only);
+  ~SharedPersistentMemoryAllocator() override;
+
+  SharedMemory* shared_memory() { return shared_memory_.get(); }
+
+  // Ensure that the memory isn't so invalid that it would crash when passing it
+  // to the allocator. This doesn't guarantee the data is valid, just that it
+  // won't cause the program to abort. The existing IsCorrupt() call will handle
+  // the rest.
+  static bool IsSharedMemoryAcceptable(const SharedMemory& memory);
+
+ private:
+  std::unique_ptr<SharedMemory> shared_memory_;
+
+  DISALLOW_COPY_AND_ASSIGN(SharedPersistentMemoryAllocator);
+};
+
+
+#if !defined(OS_NACL)  // NACL doesn't support any kind of file access in build.
+// This allocator takes a memory-mapped file object and performs allocation
+// from it. The allocator takes ownership of the file object.
+class BASE_EXPORT FilePersistentMemoryAllocator
+    : public PersistentMemoryAllocator {
+ public:
+  // A |max_size| of zero will use the length of the file as the maximum
+  // size. The |file| object must have been already created with sufficient
+  // permissions (read, read/write, or read/write/extend).
+  FilePersistentMemoryAllocator(std::unique_ptr<MemoryMappedFile> file,
+                                size_t max_size,
+                                uint64_t id,
+                                base::StringPiece name,
+                                bool read_only);
+  ~FilePersistentMemoryAllocator() override;
+
+  // Ensure that the file isn't so invalid that it would crash when passing it
+  // to the allocator. This doesn't guarantee the file is valid, just that it
+  // won't cause the program to abort. The existing IsCorrupt() call will handle
+  // the rest.
+  static bool IsFileAcceptable(const MemoryMappedFile& file, bool read_only);
+
+ protected:
+  // PersistentMemoryAllocator:
+  void FlushPartial(size_t length, bool sync) override;
+
+ private:
+  std::unique_ptr<MemoryMappedFile> mapped_file_;
+
+  DISALLOW_COPY_AND_ASSIGN(FilePersistentMemoryAllocator);
+};
+#endif  // !defined(OS_NACL)
+
+// An allocation that is defined but not executed until required at a later
+// time. This allows for potential users of an allocation to be decoupled
+// from the logic that defines it. In addition, there can be multiple users
+// of the same allocation or any region thereof that are guaranteed to always
+// use the same space. It's okay to copy/move these objects.
+//
+// This is a top-level class instead of an inner class of the PMA so that it
+// can be forward-declared in other header files without the need to include
+// the full contents of this file.
+class BASE_EXPORT DelayedPersistentAllocation {
+ public:
+  using Reference = PersistentMemoryAllocator::Reference;
+
+  // Creates a delayed allocation using the specified |allocator|. When
+  // needed, the memory will be allocated using the specified |type| and
+  // |size|. If |offset| is given, the returned pointer will be at that
+  // offset into the segment; this allows combining allocations into a
+  // single persistent segment to reduce overhead and means an "all or
+  // nothing" request. Note that |size| is always the total memory size
+  // and |offset| is just indicating the start of a block within it.  If
+  // |make_iterable| was true, the allocation will made iterable when it
+  // is created; already existing allocations are not changed.
+  //
+  // Once allocated, a reference to the segment will be stored at |ref|.
+  // This shared location must be initialized to zero (0); it is checked
+  // with every Get() request to see if the allocation has already been
+  // done. If reading |ref| outside of this object, be sure to do an
+  // "acquire" load. Don't write to it -- leave that to this object.
+  //
+  // For convenience, methods taking both Atomic32 and std::atomic<Reference>
+  // are defined.
+  DelayedPersistentAllocation(PersistentMemoryAllocator* allocator,
+                              subtle::Atomic32* ref,
+                              uint32_t type,
+                              size_t size,
+                              bool make_iterable);
+  DelayedPersistentAllocation(PersistentMemoryAllocator* allocator,
+                              subtle::Atomic32* ref,
+                              uint32_t type,
+                              size_t size,
+                              size_t offset,
+                              bool make_iterable);
+  DelayedPersistentAllocation(PersistentMemoryAllocator* allocator,
+                              std::atomic<Reference>* ref,
+                              uint32_t type,
+                              size_t size,
+                              bool make_iterable);
+  DelayedPersistentAllocation(PersistentMemoryAllocator* allocator,
+                              std::atomic<Reference>* ref,
+                              uint32_t type,
+                              size_t size,
+                              size_t offset,
+                              bool make_iterable);
+  ~DelayedPersistentAllocation();
+
+  // Gets a pointer to the defined allocation. This will realize the request
+  // and update the reference provided during construction. The memory will
+  // be zeroed the first time it is returned, after that it is shared with
+  // all other Get() requests and so shows any changes made to it elsewhere.
+  //
+  // If the allocation fails for any reason, null will be returned. This works
+  // even on "const" objects because the allocation is already defined, just
+  // delayed.
+  void* Get() const;
+
+  // Gets the internal reference value. If this returns a non-zero value then
+  // a subsequent call to Get() will do nothing but convert that reference into
+  // a memory location -- useful for accessing an existing allocation without
+  // creating one unnecessarily.
+  Reference reference() const {
+    return reference_->load(std::memory_order_relaxed);
+  }
+
+ private:
+  // The underlying object that does the actual allocation of memory. Its
+  // lifetime must exceed that of all DelayedPersistentAllocation objects
+  // that use it.
+  PersistentMemoryAllocator* const allocator_;
+
+  // The desired type and size of the allocated segment plus the offset
+  // within it for the defined request.
+  const uint32_t type_;
+  const uint32_t size_;
+  const uint32_t offset_;
+
+  // Flag indicating if allocation should be made iterable when done.
+  const bool make_iterable_;
+
+  // The location at which a reference to the allocated segment is to be
+  // stored once the allocation is complete. If multiple delayed allocations
+  // share the same pointer then an allocation on one will amount to an
+  // allocation for all.
+  volatile std::atomic<Reference>* const reference_;
+
+  // No DISALLOW_COPY_AND_ASSIGN as it's okay to copy/move these objects.
+};
+
+}  // namespace base
+
+#endif  // BASE_METRICS_PERSISTENT_MEMORY_ALLOCATOR_H_
diff --git a/base/metrics/persistent_memory_allocator_unittest.cc b/base/metrics/persistent_memory_allocator_unittest.cc
new file mode 100644
index 0000000..75e4faa
--- /dev/null
+++ b/base/metrics/persistent_memory_allocator_unittest.cc
@@ -0,0 +1,1001 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/metrics/persistent_memory_allocator.h"
+
+#include <memory>
+
+#include "base/files/file.h"
+#include "base/files/file_util.h"
+#include "base/files/memory_mapped_file.h"
+#include "base/files/scoped_temp_dir.h"
+#include "base/memory/shared_memory.h"
+#include "base/metrics/histogram.h"
+#include "base/rand_util.h"
+#include "base/strings/safe_sprintf.h"
+#include "base/strings/stringprintf.h"
+#include "base/synchronization/condition_variable.h"
+#include "base/synchronization/lock.h"
+#include "base/threading/simple_thread.h"
+#include "testing/gmock/include/gmock/gmock.h"
+
+namespace base {
+
+namespace {
+
+const uint32_t TEST_MEMORY_SIZE = 1 << 20;   // 1 MiB
+const uint32_t TEST_MEMORY_PAGE = 64 << 10;  // 64 KiB
+const uint32_t TEST_ID = 12345;
+const char TEST_NAME[] = "TestAllocator";
+
+void SetFileLength(const base::FilePath& path, size_t length) {
+  {
+    File file(path, File::FLAG_OPEN | File::FLAG_READ | File::FLAG_WRITE);
+    DCHECK(file.IsValid());
+    ASSERT_TRUE(file.SetLength(static_cast<int64_t>(length)));
+  }
+
+  int64_t actual_length;
+  DCHECK(GetFileSize(path, &actual_length));
+  DCHECK_EQ(length, static_cast<size_t>(actual_length));
+}
+
+}  // namespace
+
+typedef PersistentMemoryAllocator::Reference Reference;
+
+class PersistentMemoryAllocatorTest : public testing::Test {
+ public:
+  // This can't be statically initialized because it's value isn't defined
+  // in the PersistentMemoryAllocator header file. Instead, it's simply set
+  // in the constructor.
+  uint32_t kAllocAlignment;
+
+  struct TestObject1 {
+    static constexpr uint32_t kPersistentTypeId = 1;
+    static constexpr size_t kExpectedInstanceSize = 4 + 1 + 3;
+    int32_t onething;
+    char oranother;
+  };
+
+  struct TestObject2 {
+    static constexpr uint32_t kPersistentTypeId = 2;
+    static constexpr size_t kExpectedInstanceSize = 8 + 4 + 4 + 8 + 8;
+    int64_t thiis;
+    int32_t that;
+    float andthe;
+    double other;
+    char thing[8];
+  };
+
+  PersistentMemoryAllocatorTest() {
+    kAllocAlignment = GetAllocAlignment();
+    mem_segment_.reset(new char[TEST_MEMORY_SIZE]);
+  }
+
+  void SetUp() override {
+    allocator_.reset();
+    ::memset(mem_segment_.get(), 0, TEST_MEMORY_SIZE);
+    allocator_.reset(new PersistentMemoryAllocator(
+        mem_segment_.get(), TEST_MEMORY_SIZE, TEST_MEMORY_PAGE,
+        TEST_ID, TEST_NAME, false));
+  }
+
+  void TearDown() override {
+    allocator_.reset();
+  }
+
+  unsigned CountIterables() {
+    PersistentMemoryAllocator::Iterator iter(allocator_.get());
+    uint32_t type;
+    unsigned count = 0;
+    while (iter.GetNext(&type) != 0) {
+      ++count;
+    }
+    return count;
+  }
+
+  static uint32_t GetAllocAlignment() {
+    return PersistentMemoryAllocator::kAllocAlignment;
+  }
+
+ protected:
+  std::unique_ptr<char[]> mem_segment_;
+  std::unique_ptr<PersistentMemoryAllocator> allocator_;
+};
+
+TEST_F(PersistentMemoryAllocatorTest, AllocateAndIterate) {
+  allocator_->CreateTrackingHistograms(allocator_->Name());
+
+  std::string base_name(TEST_NAME);
+  EXPECT_EQ(TEST_ID, allocator_->Id());
+  EXPECT_TRUE(allocator_->used_histogram_);
+  EXPECT_EQ("UMA.PersistentAllocator." + base_name + ".UsedPct",
+            allocator_->used_histogram_->histogram_name());
+  EXPECT_EQ(PersistentMemoryAllocator::MEMORY_INITIALIZED,
+            allocator_->GetMemoryState());
+
+  // Get base memory info for later comparison.
+  PersistentMemoryAllocator::MemoryInfo meminfo0;
+  allocator_->GetMemoryInfo(&meminfo0);
+  EXPECT_EQ(TEST_MEMORY_SIZE, meminfo0.total);
+  EXPECT_GT(meminfo0.total, meminfo0.free);
+
+  // Validate allocation of test object and make sure it can be referenced
+  // and all metadata looks correct.
+  TestObject1* obj1 = allocator_->New<TestObject1>();
+  ASSERT_TRUE(obj1);
+  Reference block1 = allocator_->GetAsReference(obj1);
+  ASSERT_NE(0U, block1);
+  EXPECT_NE(nullptr, allocator_->GetAsObject<TestObject1>(block1));
+  EXPECT_EQ(nullptr, allocator_->GetAsObject<TestObject2>(block1));
+  EXPECT_LE(sizeof(TestObject1), allocator_->GetAllocSize(block1));
+  EXPECT_GT(sizeof(TestObject1) + kAllocAlignment,
+            allocator_->GetAllocSize(block1));
+  PersistentMemoryAllocator::MemoryInfo meminfo1;
+  allocator_->GetMemoryInfo(&meminfo1);
+  EXPECT_EQ(meminfo0.total, meminfo1.total);
+  EXPECT_GT(meminfo0.free, meminfo1.free);
+
+  // Verify that pointers can be turned back into references and that invalid
+  // addresses return null.
+  char* memory1 = allocator_->GetAsArray<char>(block1, 1, 1);
+  ASSERT_TRUE(memory1);
+  EXPECT_EQ(block1, allocator_->GetAsReference(memory1, 0));
+  EXPECT_EQ(block1, allocator_->GetAsReference(memory1, 1));
+  EXPECT_EQ(0U, allocator_->GetAsReference(memory1, 2));
+  EXPECT_EQ(0U, allocator_->GetAsReference(memory1 + 1, 0));
+  EXPECT_EQ(0U, allocator_->GetAsReference(memory1 + 16, 0));
+  EXPECT_EQ(0U, allocator_->GetAsReference(nullptr, 0));
+  EXPECT_EQ(0U, allocator_->GetAsReference(&base_name, 0));
+
+  // Ensure that the test-object can be made iterable.
+  PersistentMemoryAllocator::Iterator iter1a(allocator_.get());
+  EXPECT_EQ(0U, iter1a.GetLast());
+  uint32_t type;
+  EXPECT_EQ(0U, iter1a.GetNext(&type));
+  allocator_->MakeIterable(block1);
+  EXPECT_EQ(block1, iter1a.GetNext(&type));
+  EXPECT_EQ(1U, type);
+  EXPECT_EQ(block1, iter1a.GetLast());
+  EXPECT_EQ(0U, iter1a.GetNext(&type));
+  EXPECT_EQ(block1, iter1a.GetLast());
+
+  // Create second test-object and ensure everything is good and it cannot
+  // be confused with test-object of another type.
+  TestObject2* obj2 = allocator_->New<TestObject2>();
+  ASSERT_TRUE(obj2);
+  Reference block2 = allocator_->GetAsReference(obj2);
+  ASSERT_NE(0U, block2);
+  EXPECT_NE(nullptr, allocator_->GetAsObject<TestObject2>(block2));
+  EXPECT_EQ(nullptr, allocator_->GetAsObject<TestObject1>(block2));
+  EXPECT_LE(sizeof(TestObject2), allocator_->GetAllocSize(block2));
+  EXPECT_GT(sizeof(TestObject2) + kAllocAlignment,
+            allocator_->GetAllocSize(block2));
+  PersistentMemoryAllocator::MemoryInfo meminfo2;
+  allocator_->GetMemoryInfo(&meminfo2);
+  EXPECT_EQ(meminfo1.total, meminfo2.total);
+  EXPECT_GT(meminfo1.free, meminfo2.free);
+
+  // Ensure that second test-object can also be made iterable.
+  allocator_->MakeIterable(obj2);
+  EXPECT_EQ(block2, iter1a.GetNext(&type));
+  EXPECT_EQ(2U, type);
+  EXPECT_EQ(block2, iter1a.GetLast());
+  EXPECT_EQ(0U, iter1a.GetNext(&type));
+  EXPECT_EQ(block2, iter1a.GetLast());
+
+  // Check that the iterator can be reset to the beginning.
+  iter1a.Reset();
+  EXPECT_EQ(0U, iter1a.GetLast());
+  EXPECT_EQ(block1, iter1a.GetNext(&type));
+  EXPECT_EQ(block1, iter1a.GetLast());
+  EXPECT_EQ(block2, iter1a.GetNext(&type));
+  EXPECT_EQ(block2, iter1a.GetLast());
+  EXPECT_EQ(0U, iter1a.GetNext(&type));
+
+  // Check that the iterator can be reset to an arbitrary location.
+  iter1a.Reset(block1);
+  EXPECT_EQ(block1, iter1a.GetLast());
+  EXPECT_EQ(block2, iter1a.GetNext(&type));
+  EXPECT_EQ(block2, iter1a.GetLast());
+  EXPECT_EQ(0U, iter1a.GetNext(&type));
+
+  // Check that iteration can begin after an arbitrary location.
+  PersistentMemoryAllocator::Iterator iter1b(allocator_.get(), block1);
+  EXPECT_EQ(block2, iter1b.GetNext(&type));
+  EXPECT_EQ(0U, iter1b.GetNext(&type));
+
+  // Ensure nothing has gone noticably wrong.
+  EXPECT_FALSE(allocator_->IsFull());
+  EXPECT_FALSE(allocator_->IsCorrupt());
+
+  // Check the internal histogram record of used memory.
+  allocator_->UpdateTrackingHistograms();
+  std::unique_ptr<HistogramSamples> used_samples(
+      allocator_->used_histogram_->SnapshotSamples());
+  EXPECT_TRUE(used_samples);
+  EXPECT_EQ(1, used_samples->TotalCount());
+
+  // Check that an object's type can be changed.
+  EXPECT_EQ(2U, allocator_->GetType(block2));
+  allocator_->ChangeType(block2, 3, 2, false);
+  EXPECT_EQ(3U, allocator_->GetType(block2));
+  allocator_->New<TestObject2>(block2, 3, false);
+  EXPECT_EQ(2U, allocator_->GetType(block2));
+
+  // Create second allocator (read/write) using the same memory segment.
+  std::unique_ptr<PersistentMemoryAllocator> allocator2(
+      new PersistentMemoryAllocator(mem_segment_.get(), TEST_MEMORY_SIZE,
+                                    TEST_MEMORY_PAGE, 0, "", false));
+  EXPECT_EQ(TEST_ID, allocator2->Id());
+  EXPECT_FALSE(allocator2->used_histogram_);
+
+  // Ensure that iteration and access through second allocator works.
+  PersistentMemoryAllocator::Iterator iter2(allocator2.get());
+  EXPECT_EQ(block1, iter2.GetNext(&type));
+  EXPECT_EQ(block2, iter2.GetNext(&type));
+  EXPECT_EQ(0U, iter2.GetNext(&type));
+  EXPECT_NE(nullptr, allocator2->GetAsObject<TestObject1>(block1));
+  EXPECT_NE(nullptr, allocator2->GetAsObject<TestObject2>(block2));
+
+  // Create a third allocator (read-only) using the same memory segment.
+  std::unique_ptr<const PersistentMemoryAllocator> allocator3(
+      new PersistentMemoryAllocator(mem_segment_.get(), TEST_MEMORY_SIZE,
+                                    TEST_MEMORY_PAGE, 0, "", true));
+  EXPECT_EQ(TEST_ID, allocator3->Id());
+  EXPECT_FALSE(allocator3->used_histogram_);
+
+  // Ensure that iteration and access through third allocator works.
+  PersistentMemoryAllocator::Iterator iter3(allocator3.get());
+  EXPECT_EQ(block1, iter3.GetNext(&type));
+  EXPECT_EQ(block2, iter3.GetNext(&type));
+  EXPECT_EQ(0U, iter3.GetNext(&type));
+  EXPECT_NE(nullptr, allocator3->GetAsObject<TestObject1>(block1));
+  EXPECT_NE(nullptr, allocator3->GetAsObject<TestObject2>(block2));
+
+  // Ensure that GetNextOfType works.
+  PersistentMemoryAllocator::Iterator iter1c(allocator_.get());
+  EXPECT_EQ(block2, iter1c.GetNextOfType<TestObject2>());
+  EXPECT_EQ(0U, iter1c.GetNextOfType(2));
+
+  // Ensure that GetNextOfObject works.
+  PersistentMemoryAllocator::Iterator iter1d(allocator_.get());
+  EXPECT_EQ(obj2, iter1d.GetNextOfObject<TestObject2>());
+  EXPECT_EQ(nullptr, iter1d.GetNextOfObject<TestObject2>());
+
+  // Ensure that deleting an object works.
+  allocator_->Delete(obj2);
+  PersistentMemoryAllocator::Iterator iter1z(allocator_.get());
+  EXPECT_EQ(nullptr, iter1z.GetNextOfObject<TestObject2>());
+
+  // Ensure that the memory state can be set.
+  allocator_->SetMemoryState(PersistentMemoryAllocator::MEMORY_DELETED);
+  EXPECT_EQ(PersistentMemoryAllocator::MEMORY_DELETED,
+            allocator_->GetMemoryState());
+}
+
+TEST_F(PersistentMemoryAllocatorTest, PageTest) {
+  // This allocation will go into the first memory page.
+  Reference block1 = allocator_->Allocate(TEST_MEMORY_PAGE / 2, 1);
+  EXPECT_LT(0U, block1);
+  EXPECT_GT(TEST_MEMORY_PAGE, block1);
+
+  // This allocation won't fit in same page as previous block.
+  Reference block2 =
+      allocator_->Allocate(TEST_MEMORY_PAGE - 2 * kAllocAlignment, 2);
+  EXPECT_EQ(TEST_MEMORY_PAGE, block2);
+
+  // This allocation will also require a new page.
+  Reference block3 = allocator_->Allocate(2 * kAllocAlignment + 99, 3);
+  EXPECT_EQ(2U * TEST_MEMORY_PAGE, block3);
+}
+
+// A simple thread that takes an allocator and repeatedly allocates random-
+// sized chunks from it until no more can be done.
+class AllocatorThread : public SimpleThread {
+ public:
+  AllocatorThread(const std::string& name,
+                  void* base,
+                  uint32_t size,
+                  uint32_t page_size)
+      : SimpleThread(name, Options()),
+        count_(0),
+        iterable_(0),
+        allocator_(base, size, page_size, 0, std::string(), false) {}
+
+  void Run() override {
+    for (;;) {
+      uint32_t size = RandInt(1, 99);
+      uint32_t type = RandInt(100, 999);
+      Reference block = allocator_.Allocate(size, type);
+      if (!block)
+        break;
+
+      count_++;
+      if (RandInt(0, 1)) {
+        allocator_.MakeIterable(block);
+        iterable_++;
+      }
+    }
+  }
+
+  unsigned iterable() { return iterable_; }
+  unsigned count() { return count_; }
+
+ private:
+  unsigned count_;
+  unsigned iterable_;
+  PersistentMemoryAllocator allocator_;
+};
+
+// Test parallel allocation/iteration and ensure consistency across all
+// instances.
+TEST_F(PersistentMemoryAllocatorTest, ParallelismTest) {
+  void* memory = mem_segment_.get();
+  AllocatorThread t1("t1", memory, TEST_MEMORY_SIZE, TEST_MEMORY_PAGE);
+  AllocatorThread t2("t2", memory, TEST_MEMORY_SIZE, TEST_MEMORY_PAGE);
+  AllocatorThread t3("t3", memory, TEST_MEMORY_SIZE, TEST_MEMORY_PAGE);
+  AllocatorThread t4("t4", memory, TEST_MEMORY_SIZE, TEST_MEMORY_PAGE);
+  AllocatorThread t5("t5", memory, TEST_MEMORY_SIZE, TEST_MEMORY_PAGE);
+
+  t1.Start();
+  t2.Start();
+  t3.Start();
+  t4.Start();
+  t5.Start();
+
+  unsigned last_count = 0;
+  do {
+    unsigned count = CountIterables();
+    EXPECT_LE(last_count, count);
+  } while (!allocator_->IsCorrupt() && !allocator_->IsFull());
+
+  t1.Join();
+  t2.Join();
+  t3.Join();
+  t4.Join();
+  t5.Join();
+
+  EXPECT_FALSE(allocator_->IsCorrupt());
+  EXPECT_TRUE(allocator_->IsFull());
+  EXPECT_EQ(CountIterables(),
+            t1.iterable() + t2.iterable() + t3.iterable() + t4.iterable() +
+            t5.iterable());
+}
+
+// A simple thread that counts objects by iterating through an allocator.
+class CounterThread : public SimpleThread {
+ public:
+  CounterThread(const std::string& name,
+                PersistentMemoryAllocator::Iterator* iterator,
+                Lock* lock,
+                ConditionVariable* condition,
+                bool* wake_up)
+      : SimpleThread(name, Options()),
+        iterator_(iterator),
+        lock_(lock),
+        condition_(condition),
+        count_(0),
+        wake_up_(wake_up) {}
+
+  void Run() override {
+    // Wait so all threads can start at approximately the same time.
+    // Best performance comes from releasing a single worker which then
+    // releases the next, etc., etc.
+    {
+      AutoLock autolock(*lock_);
+
+      // Before calling Wait(), make sure that the wake up condition
+      // has not already passed.  Also, since spurious signal events
+      // are possible, check the condition in a while loop to make
+      // sure that the wake up condition is met when this thread
+      // returns from the Wait().
+      // See usage comments in src/base/synchronization/condition_variable.h.
+      while (!*wake_up_) {
+        condition_->Wait();
+        condition_->Signal();
+      }
+    }
+
+    uint32_t type;
+    while (iterator_->GetNext(&type) != 0) {
+      ++count_;
+    }
+  }
+
+  unsigned count() { return count_; }
+
+ private:
+  PersistentMemoryAllocator::Iterator* iterator_;
+  Lock* lock_;
+  ConditionVariable* condition_;
+  unsigned count_;
+  bool* wake_up_;
+
+  DISALLOW_COPY_AND_ASSIGN(CounterThread);
+};
+
+// Ensure that parallel iteration returns the same number of objects as
+// single-threaded iteration.
+TEST_F(PersistentMemoryAllocatorTest, IteratorParallelismTest) {
+  // Fill the memory segment with random allocations.
+  unsigned iterable_count = 0;
+  for (;;) {
+    uint32_t size = RandInt(1, 99);
+    uint32_t type = RandInt(100, 999);
+    Reference block = allocator_->Allocate(size, type);
+    if (!block)
+      break;
+    allocator_->MakeIterable(block);
+    ++iterable_count;
+  }
+  EXPECT_FALSE(allocator_->IsCorrupt());
+  EXPECT_TRUE(allocator_->IsFull());
+  EXPECT_EQ(iterable_count, CountIterables());
+
+  PersistentMemoryAllocator::Iterator iter(allocator_.get());
+  Lock lock;
+  ConditionVariable condition(&lock);
+  bool wake_up = false;
+
+  CounterThread t1("t1", &iter, &lock, &condition, &wake_up);
+  CounterThread t2("t2", &iter, &lock, &condition, &wake_up);
+  CounterThread t3("t3", &iter, &lock, &condition, &wake_up);
+  CounterThread t4("t4", &iter, &lock, &condition, &wake_up);
+  CounterThread t5("t5", &iter, &lock, &condition, &wake_up);
+
+  t1.Start();
+  t2.Start();
+  t3.Start();
+  t4.Start();
+  t5.Start();
+
+  // Take the lock and set the wake up condition to true.  This helps to
+  // avoid a race condition where the Signal() event is called before
+  // all the threads have reached the Wait() and thus never get woken up.
+  {
+    AutoLock autolock(lock);
+    wake_up = true;
+  }
+
+  // This will release all the waiting threads.
+  condition.Signal();
+
+  t1.Join();
+  t2.Join();
+  t3.Join();
+  t4.Join();
+  t5.Join();
+
+  EXPECT_EQ(iterable_count,
+            t1.count() + t2.count() + t3.count() + t4.count() + t5.count());
+
+#if 0
+  // These ensure that the threads don't run sequentially. It shouldn't be
+  // enabled in general because it could lead to a flaky test if it happens
+  // simply by chance but it is useful during development to ensure that the
+  // test is working correctly.
+  EXPECT_NE(iterable_count, t1.count());
+  EXPECT_NE(iterable_count, t2.count());
+  EXPECT_NE(iterable_count, t3.count());
+  EXPECT_NE(iterable_count, t4.count());
+  EXPECT_NE(iterable_count, t5.count());
+#endif
+}
+
+TEST_F(PersistentMemoryAllocatorTest, DelayedAllocationTest) {
+  std::atomic<Reference> ref1, ref2;
+  ref1.store(0, std::memory_order_relaxed);
+  ref2.store(0, std::memory_order_relaxed);
+  DelayedPersistentAllocation da1(allocator_.get(), &ref1, 1001, 100, true);
+  DelayedPersistentAllocation da2a(allocator_.get(), &ref2, 2002, 200, 0, true);
+  DelayedPersistentAllocation da2b(allocator_.get(), &ref2, 2002, 200, 5, true);
+
+  // Nothing should yet have been allocated.
+  uint32_t type;
+  PersistentMemoryAllocator::Iterator iter(allocator_.get());
+  EXPECT_EQ(0U, iter.GetNext(&type));
+
+  // Do first delayed allocation and check that a new persistent object exists.
+  EXPECT_EQ(0U, da1.reference());
+  void* mem1 = da1.Get();
+  ASSERT_TRUE(mem1);
+  EXPECT_NE(0U, da1.reference());
+  EXPECT_EQ(allocator_->GetAsReference(mem1, 1001),
+            ref1.load(std::memory_order_relaxed));
+  EXPECT_NE(0U, iter.GetNext(&type));
+  EXPECT_EQ(1001U, type);
+  EXPECT_EQ(0U, iter.GetNext(&type));
+
+  // Do second delayed allocation and check.
+  void* mem2a = da2a.Get();
+  ASSERT_TRUE(mem2a);
+  EXPECT_EQ(allocator_->GetAsReference(mem2a, 2002),
+            ref2.load(std::memory_order_relaxed));
+  EXPECT_NE(0U, iter.GetNext(&type));
+  EXPECT_EQ(2002U, type);
+  EXPECT_EQ(0U, iter.GetNext(&type));
+
+  // Third allocation should just return offset into second allocation.
+  void* mem2b = da2b.Get();
+  ASSERT_TRUE(mem2b);
+  EXPECT_EQ(0U, iter.GetNext(&type));
+  EXPECT_EQ(reinterpret_cast<uintptr_t>(mem2a) + 5,
+            reinterpret_cast<uintptr_t>(mem2b));
+}
+
+// This test doesn't verify anything other than it doesn't crash. Its goal
+// is to find coding errors that aren't otherwise tested for, much like a
+// "fuzzer" would.
+// This test is suppsoed to fail on TSAN bot (crbug.com/579867).
+#if defined(THREAD_SANITIZER)
+#define MAYBE_CorruptionTest DISABLED_CorruptionTest
+#else
+#define MAYBE_CorruptionTest CorruptionTest
+#endif
+TEST_F(PersistentMemoryAllocatorTest, MAYBE_CorruptionTest) {
+  char* memory = mem_segment_.get();
+  AllocatorThread t1("t1", memory, TEST_MEMORY_SIZE, TEST_MEMORY_PAGE);
+  AllocatorThread t2("t2", memory, TEST_MEMORY_SIZE, TEST_MEMORY_PAGE);
+  AllocatorThread t3("t3", memory, TEST_MEMORY_SIZE, TEST_MEMORY_PAGE);
+  AllocatorThread t4("t4", memory, TEST_MEMORY_SIZE, TEST_MEMORY_PAGE);
+  AllocatorThread t5("t5", memory, TEST_MEMORY_SIZE, TEST_MEMORY_PAGE);
+
+  t1.Start();
+  t2.Start();
+  t3.Start();
+  t4.Start();
+  t5.Start();
+
+  do {
+    size_t offset = RandInt(0, TEST_MEMORY_SIZE - 1);
+    char value = RandInt(0, 255);
+    memory[offset] = value;
+  } while (!allocator_->IsCorrupt() && !allocator_->IsFull());
+
+  t1.Join();
+  t2.Join();
+  t3.Join();
+  t4.Join();
+  t5.Join();
+
+  CountIterables();
+}
+
+// Attempt to cause crashes or loops by expressly creating dangerous conditions.
+TEST_F(PersistentMemoryAllocatorTest, MaliciousTest) {
+  Reference block1 = allocator_->Allocate(sizeof(TestObject1), 1);
+  Reference block2 = allocator_->Allocate(sizeof(TestObject1), 2);
+  Reference block3 = allocator_->Allocate(sizeof(TestObject1), 3);
+  Reference block4 = allocator_->Allocate(sizeof(TestObject1), 3);
+  Reference block5 = allocator_->Allocate(sizeof(TestObject1), 3);
+  allocator_->MakeIterable(block1);
+  allocator_->MakeIterable(block2);
+  allocator_->MakeIterable(block3);
+  allocator_->MakeIterable(block4);
+  allocator_->MakeIterable(block5);
+  EXPECT_EQ(5U, CountIterables());
+  EXPECT_FALSE(allocator_->IsCorrupt());
+
+  // Create loop in iterable list and ensure it doesn't hang. The return value
+  // from CountIterables() in these cases is unpredictable. If there is a
+  // failure, the call will hang and the test killed for taking too long.
+  uint32_t* header4 = (uint32_t*)(mem_segment_.get() + block4);
+  EXPECT_EQ(block5, header4[3]);
+  header4[3] = block4;
+  CountIterables();  // loop: 1-2-3-4-4
+  EXPECT_TRUE(allocator_->IsCorrupt());
+
+  // Test where loop goes back to previous block.
+  header4[3] = block3;
+  CountIterables();  // loop: 1-2-3-4-3
+
+  // Test where loop goes back to the beginning.
+  header4[3] = block1;
+  CountIterables();  // loop: 1-2-3-4-1
+}
+
+
+//----- LocalPersistentMemoryAllocator -----------------------------------------
+
+TEST(LocalPersistentMemoryAllocatorTest, CreationTest) {
+  LocalPersistentMemoryAllocator allocator(TEST_MEMORY_SIZE, 42, "");
+  EXPECT_EQ(42U, allocator.Id());
+  EXPECT_NE(0U, allocator.Allocate(24, 1));
+  EXPECT_FALSE(allocator.IsFull());
+  EXPECT_FALSE(allocator.IsCorrupt());
+}
+
+
+//----- SharedPersistentMemoryAllocator ----------------------------------------
+
+TEST(SharedPersistentMemoryAllocatorTest, CreationTest) {
+  SharedMemoryHandle shared_handle_1;
+  SharedMemoryHandle shared_handle_2;
+
+  PersistentMemoryAllocator::MemoryInfo meminfo1;
+  Reference r123, r456, r789;
+  {
+    std::unique_ptr<SharedMemory> shmem1(new SharedMemory());
+    ASSERT_TRUE(shmem1->CreateAndMapAnonymous(TEST_MEMORY_SIZE));
+    SharedPersistentMemoryAllocator local(std::move(shmem1), TEST_ID, "",
+                                          false);
+    EXPECT_FALSE(local.IsReadonly());
+    r123 = local.Allocate(123, 123);
+    r456 = local.Allocate(456, 456);
+    r789 = local.Allocate(789, 789);
+    local.MakeIterable(r123);
+    local.ChangeType(r456, 654, 456, false);
+    local.MakeIterable(r789);
+    local.GetMemoryInfo(&meminfo1);
+    EXPECT_FALSE(local.IsFull());
+    EXPECT_FALSE(local.IsCorrupt());
+
+    shared_handle_1 = local.shared_memory()->handle().Duplicate();
+    ASSERT_TRUE(shared_handle_1.IsValid());
+    shared_handle_2 = local.shared_memory()->handle().Duplicate();
+    ASSERT_TRUE(shared_handle_2.IsValid());
+  }
+
+  // Read-only test.
+  std::unique_ptr<SharedMemory> shmem2(new SharedMemory(shared_handle_1,
+                                                        /*readonly=*/true));
+  ASSERT_TRUE(shmem2->Map(TEST_MEMORY_SIZE));
+
+  SharedPersistentMemoryAllocator shalloc2(std::move(shmem2), 0, "", true);
+  EXPECT_TRUE(shalloc2.IsReadonly());
+  EXPECT_EQ(TEST_ID, shalloc2.Id());
+  EXPECT_FALSE(shalloc2.IsFull());
+  EXPECT_FALSE(shalloc2.IsCorrupt());
+
+  PersistentMemoryAllocator::Iterator iter2(&shalloc2);
+  uint32_t type;
+  EXPECT_EQ(r123, iter2.GetNext(&type));
+  EXPECT_EQ(r789, iter2.GetNext(&type));
+  EXPECT_EQ(0U, iter2.GetNext(&type));
+
+  EXPECT_EQ(123U, shalloc2.GetType(r123));
+  EXPECT_EQ(654U, shalloc2.GetType(r456));
+  EXPECT_EQ(789U, shalloc2.GetType(r789));
+
+  PersistentMemoryAllocator::MemoryInfo meminfo2;
+  shalloc2.GetMemoryInfo(&meminfo2);
+  EXPECT_EQ(meminfo1.total, meminfo2.total);
+  EXPECT_EQ(meminfo1.free, meminfo2.free);
+
+  // Read/write test.
+  std::unique_ptr<SharedMemory> shmem3(new SharedMemory(shared_handle_2,
+                                                        /*readonly=*/false));
+  ASSERT_TRUE(shmem3->Map(TEST_MEMORY_SIZE));
+
+  SharedPersistentMemoryAllocator shalloc3(std::move(shmem3), 0, "", false);
+  EXPECT_FALSE(shalloc3.IsReadonly());
+  EXPECT_EQ(TEST_ID, shalloc3.Id());
+  EXPECT_FALSE(shalloc3.IsFull());
+  EXPECT_FALSE(shalloc3.IsCorrupt());
+
+  PersistentMemoryAllocator::Iterator iter3(&shalloc3);
+  EXPECT_EQ(r123, iter3.GetNext(&type));
+  EXPECT_EQ(r789, iter3.GetNext(&type));
+  EXPECT_EQ(0U, iter3.GetNext(&type));
+
+  EXPECT_EQ(123U, shalloc3.GetType(r123));
+  EXPECT_EQ(654U, shalloc3.GetType(r456));
+  EXPECT_EQ(789U, shalloc3.GetType(r789));
+
+  PersistentMemoryAllocator::MemoryInfo meminfo3;
+  shalloc3.GetMemoryInfo(&meminfo3);
+  EXPECT_EQ(meminfo1.total, meminfo3.total);
+  EXPECT_EQ(meminfo1.free, meminfo3.free);
+
+  // Interconnectivity test.
+  Reference obj = shalloc3.Allocate(42, 42);
+  ASSERT_TRUE(obj);
+  shalloc3.MakeIterable(obj);
+  EXPECT_EQ(obj, iter2.GetNext(&type));
+  EXPECT_EQ(42U, type);
+
+  // Clear-on-change test.
+  Reference data_ref = shalloc3.Allocate(sizeof(int) * 4, 911);
+  int* data = shalloc3.GetAsArray<int>(data_ref, 911, 4);
+  ASSERT_TRUE(data);
+  data[0] = 0;
+  data[1] = 1;
+  data[2] = 2;
+  data[3] = 3;
+  ASSERT_TRUE(shalloc3.ChangeType(data_ref, 119, 911, false));
+  EXPECT_EQ(0, data[0]);
+  EXPECT_EQ(1, data[1]);
+  EXPECT_EQ(2, data[2]);
+  EXPECT_EQ(3, data[3]);
+  ASSERT_TRUE(shalloc3.ChangeType(data_ref, 191, 119, true));
+  EXPECT_EQ(0, data[0]);
+  EXPECT_EQ(0, data[1]);
+  EXPECT_EQ(0, data[2]);
+  EXPECT_EQ(0, data[3]);
+}
+
+
+#if !defined(OS_NACL)
+//----- FilePersistentMemoryAllocator ------------------------------------------
+
+TEST(FilePersistentMemoryAllocatorTest, CreationTest) {
+  ScopedTempDir temp_dir;
+  ASSERT_TRUE(temp_dir.CreateUniqueTempDir());
+  FilePath file_path = temp_dir.GetPath().AppendASCII("persistent_memory");
+
+  PersistentMemoryAllocator::MemoryInfo meminfo1;
+  Reference r123, r456, r789;
+  {
+    LocalPersistentMemoryAllocator local(TEST_MEMORY_SIZE, TEST_ID, "");
+    EXPECT_FALSE(local.IsReadonly());
+    r123 = local.Allocate(123, 123);
+    r456 = local.Allocate(456, 456);
+    r789 = local.Allocate(789, 789);
+    local.MakeIterable(r123);
+    local.ChangeType(r456, 654, 456, false);
+    local.MakeIterable(r789);
+    local.GetMemoryInfo(&meminfo1);
+    EXPECT_FALSE(local.IsFull());
+    EXPECT_FALSE(local.IsCorrupt());
+
+    File writer(file_path, File::FLAG_CREATE | File::FLAG_WRITE);
+    ASSERT_TRUE(writer.IsValid());
+    writer.Write(0, (const char*)local.data(), local.used());
+  }
+
+  std::unique_ptr<MemoryMappedFile> mmfile(new MemoryMappedFile());
+  mmfile->Initialize(file_path);
+  EXPECT_TRUE(mmfile->IsValid());
+  const size_t mmlength = mmfile->length();
+  EXPECT_GE(meminfo1.total, mmlength);
+
+  FilePersistentMemoryAllocator file(std::move(mmfile), 0, 0, "", false);
+  EXPECT_FALSE(file.IsReadonly());
+  EXPECT_EQ(TEST_ID, file.Id());
+  EXPECT_FALSE(file.IsFull());
+  EXPECT_FALSE(file.IsCorrupt());
+
+  PersistentMemoryAllocator::Iterator iter(&file);
+  uint32_t type;
+  EXPECT_EQ(r123, iter.GetNext(&type));
+  EXPECT_EQ(r789, iter.GetNext(&type));
+  EXPECT_EQ(0U, iter.GetNext(&type));
+
+  EXPECT_EQ(123U, file.GetType(r123));
+  EXPECT_EQ(654U, file.GetType(r456));
+  EXPECT_EQ(789U, file.GetType(r789));
+
+  PersistentMemoryAllocator::MemoryInfo meminfo2;
+  file.GetMemoryInfo(&meminfo2);
+  EXPECT_GE(meminfo1.total, meminfo2.total);
+  EXPECT_GE(meminfo1.free, meminfo2.free);
+  EXPECT_EQ(mmlength, meminfo2.total);
+  EXPECT_EQ(0U, meminfo2.free);
+
+  // There's no way of knowing if Flush actually does anything but at least
+  // verify that it runs without CHECK violations.
+  file.Flush(false);
+  file.Flush(true);
+}
+
+TEST(FilePersistentMemoryAllocatorTest, ExtendTest) {
+  ScopedTempDir temp_dir;
+  ASSERT_TRUE(temp_dir.CreateUniqueTempDir());
+  FilePath file_path = temp_dir.GetPath().AppendASCII("extend_test");
+  MemoryMappedFile::Region region = {0, 16 << 10};  // 16KiB maximum size.
+
+  // Start with a small but valid file of persistent data.
+  ASSERT_FALSE(PathExists(file_path));
+  {
+    LocalPersistentMemoryAllocator local(TEST_MEMORY_SIZE, TEST_ID, "");
+    local.Allocate(1, 1);
+    local.Allocate(11, 11);
+
+    File writer(file_path, File::FLAG_CREATE | File::FLAG_WRITE);
+    ASSERT_TRUE(writer.IsValid());
+    writer.Write(0, (const char*)local.data(), local.used());
+  }
+  ASSERT_TRUE(PathExists(file_path));
+  int64_t before_size;
+  ASSERT_TRUE(GetFileSize(file_path, &before_size));
+
+  // Map it as an extendable read/write file and append to it.
+  {
+    std::unique_ptr<MemoryMappedFile> mmfile(new MemoryMappedFile());
+    mmfile->Initialize(
+        File(file_path, File::FLAG_OPEN | File::FLAG_READ | File::FLAG_WRITE),
+        region, MemoryMappedFile::READ_WRITE_EXTEND);
+    FilePersistentMemoryAllocator allocator(std::move(mmfile), region.size, 0,
+                                            "", false);
+    EXPECT_EQ(static_cast<size_t>(before_size), allocator.used());
+
+    allocator.Allocate(111, 111);
+    EXPECT_LT(static_cast<size_t>(before_size), allocator.used());
+  }
+
+  // Validate that append worked.
+  int64_t after_size;
+  ASSERT_TRUE(GetFileSize(file_path, &after_size));
+  EXPECT_LT(before_size, after_size);
+
+  // Verify that it's still an acceptable file.
+  {
+    std::unique_ptr<MemoryMappedFile> mmfile(new MemoryMappedFile());
+    mmfile->Initialize(
+        File(file_path, File::FLAG_OPEN | File::FLAG_READ | File::FLAG_WRITE),
+        region, MemoryMappedFile::READ_WRITE_EXTEND);
+    EXPECT_TRUE(FilePersistentMemoryAllocator::IsFileAcceptable(*mmfile, true));
+    EXPECT_TRUE(
+        FilePersistentMemoryAllocator::IsFileAcceptable(*mmfile, false));
+  }
+}
+
+TEST(FilePersistentMemoryAllocatorTest, AcceptableTest) {
+  const uint32_t kAllocAlignment =
+      PersistentMemoryAllocatorTest::GetAllocAlignment();
+  ScopedTempDir temp_dir;
+  ASSERT_TRUE(temp_dir.CreateUniqueTempDir());
+
+  LocalPersistentMemoryAllocator local(TEST_MEMORY_SIZE, TEST_ID, "");
+  local.MakeIterable(local.Allocate(1, 1));
+  local.MakeIterable(local.Allocate(11, 11));
+  const size_t minsize = local.used();
+  std::unique_ptr<char[]> garbage(new char[minsize]);
+  RandBytes(garbage.get(), minsize);
+
+  std::unique_ptr<MemoryMappedFile> mmfile;
+  char filename[100];
+  for (size_t filesize = minsize; filesize > 0; --filesize) {
+    strings::SafeSPrintf(filename, "memory_%d_A", filesize);
+    FilePath file_path = temp_dir.GetPath().AppendASCII(filename);
+    ASSERT_FALSE(PathExists(file_path));
+    {
+      File writer(file_path, File::FLAG_CREATE | File::FLAG_WRITE);
+      ASSERT_TRUE(writer.IsValid());
+      writer.Write(0, (const char*)local.data(), filesize);
+    }
+    ASSERT_TRUE(PathExists(file_path));
+
+    // Request read/write access for some sizes that are a multple of the
+    // allocator's alignment size. The allocator is strict about file size
+    // being a multiple of its internal alignment when doing read/write access.
+    const bool read_only = (filesize % (2 * kAllocAlignment)) != 0;
+    const uint32_t file_flags =
+        File::FLAG_OPEN | File::FLAG_READ | (read_only ? 0 : File::FLAG_WRITE);
+    const MemoryMappedFile::Access map_access =
+        read_only ? MemoryMappedFile::READ_ONLY : MemoryMappedFile::READ_WRITE;
+
+    mmfile.reset(new MemoryMappedFile());
+    mmfile->Initialize(File(file_path, file_flags), map_access);
+    EXPECT_EQ(filesize, mmfile->length());
+    if (FilePersistentMemoryAllocator::IsFileAcceptable(*mmfile, read_only)) {
+      // Make sure construction doesn't crash. It will, however, cause
+      // error messages warning about about a corrupted memory segment.
+      FilePersistentMemoryAllocator allocator(std::move(mmfile), 0, 0, "",
+                                              read_only);
+      // Also make sure that iteration doesn't crash.
+      PersistentMemoryAllocator::Iterator iter(&allocator);
+      uint32_t type_id;
+      Reference ref;
+      while ((ref = iter.GetNext(&type_id)) != 0) {
+        const char* data = allocator.GetAsArray<char>(
+            ref, 0, PersistentMemoryAllocator::kSizeAny);
+        uint32_t type = allocator.GetType(ref);
+        size_t size = allocator.GetAllocSize(ref);
+        // Ensure compiler can't optimize-out above variables.
+        (void)data;
+        (void)type;
+        (void)size;
+      }
+
+      // Ensure that short files are detected as corrupt and full files are not.
+      EXPECT_EQ(filesize != minsize, allocator.IsCorrupt());
+    } else {
+      // For filesize >= minsize, the file must be acceptable. This
+      // else clause (file-not-acceptable) should be reached only if
+      // filesize < minsize.
+      EXPECT_LT(filesize, minsize);
+    }
+
+    strings::SafeSPrintf(filename, "memory_%d_B", filesize);
+    file_path = temp_dir.GetPath().AppendASCII(filename);
+    ASSERT_FALSE(PathExists(file_path));
+    {
+      File writer(file_path, File::FLAG_CREATE | File::FLAG_WRITE);
+      ASSERT_TRUE(writer.IsValid());
+      writer.Write(0, (const char*)garbage.get(), filesize);
+    }
+    ASSERT_TRUE(PathExists(file_path));
+
+    mmfile.reset(new MemoryMappedFile());
+    mmfile->Initialize(File(file_path, file_flags), map_access);
+    EXPECT_EQ(filesize, mmfile->length());
+    if (FilePersistentMemoryAllocator::IsFileAcceptable(*mmfile, read_only)) {
+      // Make sure construction doesn't crash. It will, however, cause
+      // error messages warning about about a corrupted memory segment.
+      FilePersistentMemoryAllocator allocator(std::move(mmfile), 0, 0, "",
+                                              read_only);
+      EXPECT_TRUE(allocator.IsCorrupt());  // Garbage data so it should be.
+    } else {
+      // For filesize >= minsize, the file must be acceptable. This
+      // else clause (file-not-acceptable) should be reached only if
+      // filesize < minsize.
+      EXPECT_GT(minsize, filesize);
+    }
+  }
+}
+
+TEST_F(PersistentMemoryAllocatorTest, TruncateTest) {
+  ScopedTempDir temp_dir;
+  ASSERT_TRUE(temp_dir.CreateUniqueTempDir());
+  FilePath file_path = temp_dir.GetPath().AppendASCII("truncate_test");
+
+  // Start with a small but valid file of persistent data. Keep the "used"
+  // amount for both allocations.
+  Reference a1_ref;
+  Reference a2_ref;
+  size_t a1_used;
+  size_t a2_used;
+  ASSERT_FALSE(PathExists(file_path));
+  {
+    LocalPersistentMemoryAllocator allocator(TEST_MEMORY_SIZE, TEST_ID, "");
+    a1_ref = allocator.Allocate(100 << 10, 1);
+    allocator.MakeIterable(a1_ref);
+    a1_used = allocator.used();
+    a2_ref = allocator.Allocate(200 << 10, 11);
+    allocator.MakeIterable(a2_ref);
+    a2_used = allocator.used();
+
+    File writer(file_path, File::FLAG_CREATE | File::FLAG_WRITE);
+    ASSERT_TRUE(writer.IsValid());
+    writer.Write(0, static_cast<const char*>(allocator.data()),
+                 allocator.size());
+  }
+  ASSERT_TRUE(PathExists(file_path));
+  EXPECT_LE(a1_used, a2_ref);
+
+  // Truncate the file to include everything and make sure it can be read, both
+  // with read-write and read-only access.
+  for (size_t file_length : {a2_used, a1_used, a1_used / 2}) {
+    SCOPED_TRACE(StringPrintf("file_length=%zu", file_length));
+    SetFileLength(file_path, file_length);
+
+    for (bool read_only : {false, true}) {
+      SCOPED_TRACE(StringPrintf("read_only=%s", read_only ? "true" : "false"));
+
+      std::unique_ptr<MemoryMappedFile> mmfile(new MemoryMappedFile());
+      mmfile->Initialize(
+          File(file_path, File::FLAG_OPEN |
+                              (read_only ? File::FLAG_READ
+                                         : File::FLAG_READ | File::FLAG_WRITE)),
+          read_only ? MemoryMappedFile::READ_ONLY
+                    : MemoryMappedFile::READ_WRITE);
+      ASSERT_TRUE(
+          FilePersistentMemoryAllocator::IsFileAcceptable(*mmfile, read_only));
+
+      FilePersistentMemoryAllocator allocator(std::move(mmfile), 0, 0, "",
+                                              read_only);
+
+      PersistentMemoryAllocator::Iterator iter(&allocator);
+      uint32_t type_id;
+      EXPECT_EQ(file_length >= a1_used ? a1_ref : 0U, iter.GetNext(&type_id));
+      EXPECT_EQ(file_length >= a2_used ? a2_ref : 0U, iter.GetNext(&type_id));
+      EXPECT_EQ(0U, iter.GetNext(&type_id));
+
+      // Ensure that short files are detected as corrupt and full files are not.
+      EXPECT_EQ(file_length < a2_used, allocator.IsCorrupt());
+    }
+
+    // Ensure that file length was not adjusted.
+    int64_t actual_length;
+    ASSERT_TRUE(GetFileSize(file_path, &actual_length));
+    EXPECT_EQ(file_length, static_cast<size_t>(actual_length));
+  }
+}
+
+#endif  // !defined(OS_NACL)
+
+}  // namespace base
diff --git a/base/metrics/persistent_sample_map.cc b/base/metrics/persistent_sample_map.cc
new file mode 100644
index 0000000..f38b9d1
--- /dev/null
+++ b/base/metrics/persistent_sample_map.cc
@@ -0,0 +1,305 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/metrics/persistent_sample_map.h"
+
+#include "base/logging.h"
+#include "base/memory/ptr_util.h"
+#include "base/metrics/histogram_macros.h"
+#include "base/metrics/persistent_histogram_allocator.h"
+#include "base/numerics/safe_conversions.h"
+#include "base/stl_util.h"
+
+namespace base {
+
+typedef HistogramBase::Count Count;
+typedef HistogramBase::Sample Sample;
+
+namespace {
+
+// An iterator for going through a PersistentSampleMap. The logic here is
+// identical to that of SampleMapIterator but with different data structures.
+// Changes here likely need to be duplicated there.
+class PersistentSampleMapIterator : public SampleCountIterator {
+ public:
+  typedef std::map<HistogramBase::Sample, HistogramBase::Count*>
+      SampleToCountMap;
+
+  explicit PersistentSampleMapIterator(const SampleToCountMap& sample_counts);
+  ~PersistentSampleMapIterator() override;
+
+  // SampleCountIterator:
+  bool Done() const override;
+  void Next() override;
+  void Get(HistogramBase::Sample* min,
+           int64_t* max,
+           HistogramBase::Count* count) const override;
+
+ private:
+  void SkipEmptyBuckets();
+
+  SampleToCountMap::const_iterator iter_;
+  const SampleToCountMap::const_iterator end_;
+};
+
+PersistentSampleMapIterator::PersistentSampleMapIterator(
+    const SampleToCountMap& sample_counts)
+    : iter_(sample_counts.begin()),
+      end_(sample_counts.end()) {
+  SkipEmptyBuckets();
+}
+
+PersistentSampleMapIterator::~PersistentSampleMapIterator() = default;
+
+bool PersistentSampleMapIterator::Done() const {
+  return iter_ == end_;
+}
+
+void PersistentSampleMapIterator::Next() {
+  DCHECK(!Done());
+  ++iter_;
+  SkipEmptyBuckets();
+}
+
+void PersistentSampleMapIterator::Get(Sample* min,
+                                      int64_t* max,
+                                      Count* count) const {
+  DCHECK(!Done());
+  if (min)
+    *min = iter_->first;
+  if (max)
+    *max = strict_cast<int64_t>(iter_->first) + 1;
+  if (count)
+    *count = *iter_->second;
+}
+
+void PersistentSampleMapIterator::SkipEmptyBuckets() {
+  while (!Done() && *iter_->second == 0) {
+    ++iter_;
+  }
+}
+
+// This structure holds an entry for a PersistentSampleMap within a persistent
+// memory allocator. The "id" must be unique across all maps held by an
+// allocator or they will get attached to the wrong sample map.
+struct SampleRecord {
+  // SHA1(SampleRecord): Increment this if structure changes!
+  static constexpr uint32_t kPersistentTypeId = 0x8FE6A69F + 1;
+
+  // Expected size for 32/64-bit check.
+  static constexpr size_t kExpectedInstanceSize = 16;
+
+  uint64_t id;   // Unique identifier of owner.
+  Sample value;  // The value for which this record holds a count.
+  Count count;   // The count associated with the above value.
+};
+
+}  // namespace
+
+PersistentSampleMap::PersistentSampleMap(
+    uint64_t id,
+    PersistentHistogramAllocator* allocator,
+    Metadata* meta)
+    : HistogramSamples(id, meta), allocator_(allocator) {}
+
+PersistentSampleMap::~PersistentSampleMap() {
+  if (records_)
+    records_->Release(this);
+}
+
+void PersistentSampleMap::Accumulate(Sample value, Count count) {
+#if 0  // TODO(bcwhite) Re-enable efficient version after crbug.com/682680.
+  *GetOrCreateSampleCountStorage(value) += count;
+#else
+  Count* local_count_ptr = GetOrCreateSampleCountStorage(value);
+  if (count < 0) {
+    if (*local_count_ptr < -count)
+      RecordNegativeSample(SAMPLES_ACCUMULATE_WENT_NEGATIVE, -count);
+    else
+      RecordNegativeSample(SAMPLES_ACCUMULATE_NEGATIVE_COUNT, -count);
+    *local_count_ptr += count;
+  } else {
+    Sample old_value = *local_count_ptr;
+    Sample new_value = old_value + count;
+    *local_count_ptr = new_value;
+    if ((new_value >= 0) != (old_value >= 0))
+      RecordNegativeSample(SAMPLES_ACCUMULATE_OVERFLOW, count);
+  }
+#endif
+  IncreaseSumAndCount(strict_cast<int64_t>(count) * value, count);
+}
+
+Count PersistentSampleMap::GetCount(Sample value) const {
+  // Have to override "const" to make sure all samples have been loaded before
+  // being able to know what value to return.
+  Count* count_pointer =
+      const_cast<PersistentSampleMap*>(this)->GetSampleCountStorage(value);
+  return count_pointer ? *count_pointer : 0;
+}
+
+Count PersistentSampleMap::TotalCount() const {
+  // Have to override "const" in order to make sure all samples have been
+  // loaded before trying to iterate over the map.
+  const_cast<PersistentSampleMap*>(this)->ImportSamples(-1, true);
+
+  Count count = 0;
+  for (const auto& entry : sample_counts_) {
+    count += *entry.second;
+  }
+  return count;
+}
+
+std::unique_ptr<SampleCountIterator> PersistentSampleMap::Iterator() const {
+  // Have to override "const" in order to make sure all samples have been
+  // loaded before trying to iterate over the map.
+  const_cast<PersistentSampleMap*>(this)->ImportSamples(-1, true);
+  return WrapUnique(new PersistentSampleMapIterator(sample_counts_));
+}
+
+// static
+PersistentMemoryAllocator::Reference
+PersistentSampleMap::GetNextPersistentRecord(
+    PersistentMemoryAllocator::Iterator& iterator,
+    uint64_t* sample_map_id) {
+  const SampleRecord* record = iterator.GetNextOfObject<SampleRecord>();
+  if (!record)
+    return 0;
+
+  *sample_map_id = record->id;
+  return iterator.GetAsReference(record);
+}
+
+// static
+PersistentMemoryAllocator::Reference
+PersistentSampleMap::CreatePersistentRecord(
+    PersistentMemoryAllocator* allocator,
+    uint64_t sample_map_id,
+    Sample value) {
+  SampleRecord* record = allocator->New<SampleRecord>();
+  if (!record) {
+    NOTREACHED() << "full=" << allocator->IsFull()
+                 << ", corrupt=" << allocator->IsCorrupt();
+    return 0;
+  }
+
+  record->id = sample_map_id;
+  record->value = value;
+  record->count = 0;
+
+  PersistentMemoryAllocator::Reference ref = allocator->GetAsReference(record);
+  allocator->MakeIterable(ref);
+  return ref;
+}
+
+bool PersistentSampleMap::AddSubtractImpl(SampleCountIterator* iter,
+                                          Operator op) {
+  Sample min;
+  int64_t max;
+  Count count;
+  for (; !iter->Done(); iter->Next()) {
+    iter->Get(&min, &max, &count);
+    if (count == 0)
+      continue;
+    if (strict_cast<int64_t>(min) + 1 != max)
+      return false;  // SparseHistogram only supports bucket with size 1.
+    *GetOrCreateSampleCountStorage(min) +=
+        (op == HistogramSamples::ADD) ? count : -count;
+  }
+  return true;
+}
+
+Count* PersistentSampleMap::GetSampleCountStorage(Sample value) {
+  // If |value| is already in the map, just return that.
+  auto it = sample_counts_.find(value);
+  if (it != sample_counts_.end())
+    return it->second;
+
+  // Import any new samples from persistent memory looking for the value.
+  return ImportSamples(value, false);
+}
+
+Count* PersistentSampleMap::GetOrCreateSampleCountStorage(Sample value) {
+  // Get any existing count storage.
+  Count* count_pointer = GetSampleCountStorage(value);
+  if (count_pointer)
+    return count_pointer;
+
+  // Create a new record in persistent memory for the value. |records_| will
+  // have been initialized by the GetSampleCountStorage() call above.
+  DCHECK(records_);
+  PersistentMemoryAllocator::Reference ref = records_->CreateNew(value);
+  if (!ref) {
+    // If a new record could not be created then the underlying allocator is
+    // full or corrupt. Instead, allocate the counter from the heap. This
+    // sample will not be persistent, will not be shared, and will leak...
+    // but it's better than crashing.
+    count_pointer = new Count(0);
+    sample_counts_[value] = count_pointer;
+    return count_pointer;
+  }
+
+  // A race condition between two independent processes (i.e. two independent
+  // histogram objects sharing the same sample data) could cause two of the
+  // above records to be created. The allocator, however, forces a strict
+  // ordering on iterable objects so use the import method to actually add the
+  // just-created record. This ensures that all PersistentSampleMap objects
+  // will always use the same record, whichever was first made iterable.
+  // Thread-safety within a process where multiple threads use the same
+  // histogram object is delegated to the controlling histogram object which,
+  // for sparse histograms, is a lock object.
+  count_pointer = ImportSamples(value, false);
+  DCHECK(count_pointer);
+  return count_pointer;
+}
+
+PersistentSampleMapRecords* PersistentSampleMap::GetRecords() {
+  // The |records_| pointer is lazily fetched from the |allocator_| only on
+  // first use. Sometimes duplicate histograms are created by race conditions
+  // and if both were to grab the records object, there would be a conflict.
+  // Use of a histogram, and thus a call to this method, won't occur until
+  // after the histogram has been de-dup'd.
+  if (!records_)
+    records_ = allocator_->UseSampleMapRecords(id(), this);
+  return records_;
+}
+
+Count* PersistentSampleMap::ImportSamples(Sample until_value,
+                                          bool import_everything) {
+  Count* found_count = nullptr;
+  PersistentMemoryAllocator::Reference ref;
+  PersistentSampleMapRecords* records = GetRecords();
+  while ((ref = records->GetNext()) != 0) {
+    SampleRecord* record = records->GetAsObject<SampleRecord>(ref);
+    if (!record)
+      continue;
+
+    DCHECK_EQ(id(), record->id);
+
+    // Check if the record's value is already known.
+    if (!ContainsKey(sample_counts_, record->value)) {
+      // No: Add it to map of known values.
+      sample_counts_[record->value] = &record->count;
+    } else {
+      // Yes: Ignore it; it's a duplicate caused by a race condition -- see
+      // code & comment in GetOrCreateSampleCountStorage() for details.
+      // Check that nothing ever operated on the duplicate record.
+      DCHECK_EQ(0, record->count);
+    }
+
+    // Check if it's the value being searched for and, if so, keep a pointer
+    // to return later. Stop here unless everything is being imported.
+    // Because race conditions can cause multiple records for a single value,
+    // be sure to return the first one found.
+    if (record->value == until_value) {
+      if (!found_count)
+        found_count = &record->count;
+      if (!import_everything)
+        break;
+    }
+  }
+
+  return found_count;
+}
+
+}  // namespace base
diff --git a/base/metrics/persistent_sample_map.h b/base/metrics/persistent_sample_map.h
new file mode 100644
index 0000000..853f862
--- /dev/null
+++ b/base/metrics/persistent_sample_map.h
@@ -0,0 +1,109 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// PersistentSampleMap implements HistogramSamples interface. It is used
+// by the SparseHistogram class to store samples in persistent memory which
+// allows it to be shared between processes or live across restarts.
+
+#ifndef BASE_METRICS_PERSISTENT_SAMPLE_MAP_H_
+#define BASE_METRICS_PERSISTENT_SAMPLE_MAP_H_
+
+#include <stdint.h>
+
+#include <map>
+#include <memory>
+
+#include "base/compiler_specific.h"
+#include "base/macros.h"
+#include "base/metrics/histogram_base.h"
+#include "base/metrics/histogram_samples.h"
+#include "base/metrics/persistent_memory_allocator.h"
+
+namespace base {
+
+class PersistentHistogramAllocator;
+class PersistentSampleMapRecords;
+
+// The logic here is similar to that of SampleMap but with different data
+// structures. Changes here likely need to be duplicated there.
+class BASE_EXPORT PersistentSampleMap : public HistogramSamples {
+ public:
+  // Constructs a persistent sample map using a PersistentHistogramAllocator
+  // as the data source for persistent records.
+  PersistentSampleMap(uint64_t id,
+                      PersistentHistogramAllocator* allocator,
+                      Metadata* meta);
+
+  ~PersistentSampleMap() override;
+
+  // HistogramSamples:
+  void Accumulate(HistogramBase::Sample value,
+                  HistogramBase::Count count) override;
+  HistogramBase::Count GetCount(HistogramBase::Sample value) const override;
+  HistogramBase::Count TotalCount() const override;
+  std::unique_ptr<SampleCountIterator> Iterator() const override;
+
+  // Uses a persistent-memory |iterator| to locate and return information about
+  // the next record holding information for a PersistentSampleMap. The record
+  // could be for any Map so return the |sample_map_id| as well.
+  static PersistentMemoryAllocator::Reference GetNextPersistentRecord(
+      PersistentMemoryAllocator::Iterator& iterator,
+      uint64_t* sample_map_id);
+
+  // Creates a new record in an |allocator| storing count information for a
+  // specific sample |value| of a histogram with the given |sample_map_id|.
+  static PersistentMemoryAllocator::Reference CreatePersistentRecord(
+      PersistentMemoryAllocator* allocator,
+      uint64_t sample_map_id,
+      HistogramBase::Sample value);
+
+ protected:
+  // Performs arithemetic. |op| is ADD or SUBTRACT.
+  bool AddSubtractImpl(SampleCountIterator* iter, Operator op) override;
+
+  // Gets a pointer to a "count" corresponding to a given |value|. Returns NULL
+  // if sample does not exist.
+  HistogramBase::Count* GetSampleCountStorage(HistogramBase::Sample value);
+
+  // Gets a pointer to a "count" corresponding to a given |value|, creating
+  // the sample (initialized to zero) if it does not already exists.
+  HistogramBase::Count* GetOrCreateSampleCountStorage(
+      HistogramBase::Sample value);
+
+ private:
+  // Gets the object that manages persistent records. This returns the
+  // |records_| member after first initializing it if necessary.
+  PersistentSampleMapRecords* GetRecords();
+
+  // Imports samples from persistent memory by iterating over all sample
+  // records found therein, adding them to the sample_counts_ map. If a
+  // count for the sample |until_value| is found, stop the import and return
+  // a pointer to that counter. If that value is not found, null will be
+  // returned after all currently available samples have been loaded. Pass
+  // true for |import_everything| to force the importing of all available
+  // samples even if a match is found.
+  HistogramBase::Count* ImportSamples(HistogramBase::Sample until_value,
+                                      bool import_everything);
+
+  // All created/loaded sample values and their associated counts. The storage
+  // for the actual Count numbers is owned by the |records_| object and its
+  // underlying allocator.
+  std::map<HistogramBase::Sample, HistogramBase::Count*> sample_counts_;
+
+  // The allocator that manages histograms inside persistent memory. This is
+  // owned externally and is expected to live beyond the life of this object.
+  PersistentHistogramAllocator* allocator_;
+
+  // The object that manages sample records inside persistent memory. This is
+  // owned by the |allocator_| object (above) and so, like it, is expected to
+  // live beyond the life of this object. This value is lazily-initialized on
+  // first use via the GetRecords() accessor method.
+  PersistentSampleMapRecords* records_ = nullptr;
+
+  DISALLOW_COPY_AND_ASSIGN(PersistentSampleMap);
+};
+
+}  // namespace base
+
+#endif  // BASE_METRICS_PERSISTENT_SAMPLE_MAP_H_
diff --git a/base/metrics/persistent_sample_map_unittest.cc b/base/metrics/persistent_sample_map_unittest.cc
new file mode 100644
index 0000000..b25f582
--- /dev/null
+++ b/base/metrics/persistent_sample_map_unittest.cc
@@ -0,0 +1,260 @@
+// Copyright (c) 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/metrics/persistent_sample_map.h"
+
+#include <memory>
+
+#include "base/memory/ptr_util.h"
+#include "base/metrics/persistent_histogram_allocator.h"
+#include "base/test/gtest_util.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+namespace {
+
+std::unique_ptr<PersistentHistogramAllocator> CreateHistogramAllocator(
+    size_t bytes) {
+  return std::make_unique<PersistentHistogramAllocator>(
+      std::make_unique<LocalPersistentMemoryAllocator>(bytes, 0, ""));
+}
+
+std::unique_ptr<PersistentHistogramAllocator> DuplicateHistogramAllocator(
+    PersistentHistogramAllocator* original) {
+  return std::make_unique<PersistentHistogramAllocator>(
+      std::make_unique<PersistentMemoryAllocator>(
+          const_cast<void*>(original->data()), original->length(), 0,
+          original->Id(), original->Name(), false));
+}
+
+TEST(PersistentSampleMapTest, AccumulateTest) {
+  std::unique_ptr<PersistentHistogramAllocator> allocator =
+      CreateHistogramAllocator(64 << 10);  // 64 KiB
+  HistogramSamples::LocalMetadata meta;
+  PersistentSampleMap samples(1, allocator.get(), &meta);
+
+  samples.Accumulate(1, 100);
+  samples.Accumulate(2, 200);
+  samples.Accumulate(1, -200);
+  EXPECT_EQ(-100, samples.GetCount(1));
+  EXPECT_EQ(200, samples.GetCount(2));
+
+  EXPECT_EQ(300, samples.sum());
+  EXPECT_EQ(100, samples.TotalCount());
+  EXPECT_EQ(samples.redundant_count(), samples.TotalCount());
+}
+
+TEST(PersistentSampleMapTest, Accumulate_LargeValuesDontOverflow) {
+  std::unique_ptr<PersistentHistogramAllocator> allocator =
+      CreateHistogramAllocator(64 << 10);  // 64 KiB
+  HistogramSamples::LocalMetadata meta;
+  PersistentSampleMap samples(1, allocator.get(), &meta);
+
+  samples.Accumulate(250000000, 100);
+  samples.Accumulate(500000000, 200);
+  samples.Accumulate(250000000, -200);
+  EXPECT_EQ(-100, samples.GetCount(250000000));
+  EXPECT_EQ(200, samples.GetCount(500000000));
+
+  EXPECT_EQ(75000000000LL, samples.sum());
+  EXPECT_EQ(100, samples.TotalCount());
+  EXPECT_EQ(samples.redundant_count(), samples.TotalCount());
+}
+
+TEST(PersistentSampleMapTest, AddSubtractTest) {
+  std::unique_ptr<PersistentHistogramAllocator> allocator1 =
+      CreateHistogramAllocator(64 << 10);  // 64 KiB
+  HistogramSamples::LocalMetadata meta1;
+  PersistentSampleMap samples1(1, allocator1.get(), &meta1);
+  samples1.Accumulate(1, 100);
+  samples1.Accumulate(2, 100);
+  samples1.Accumulate(3, 100);
+
+  std::unique_ptr<PersistentHistogramAllocator> allocator2 =
+      DuplicateHistogramAllocator(allocator1.get());
+  HistogramSamples::LocalMetadata meta2;
+  PersistentSampleMap samples2(2, allocator2.get(), &meta2);
+  samples2.Accumulate(1, 200);
+  samples2.Accumulate(2, 200);
+  samples2.Accumulate(4, 200);
+
+  samples1.Add(samples2);
+  EXPECT_EQ(300, samples1.GetCount(1));
+  EXPECT_EQ(300, samples1.GetCount(2));
+  EXPECT_EQ(100, samples1.GetCount(3));
+  EXPECT_EQ(200, samples1.GetCount(4));
+  EXPECT_EQ(2000, samples1.sum());
+  EXPECT_EQ(900, samples1.TotalCount());
+  EXPECT_EQ(samples1.redundant_count(), samples1.TotalCount());
+
+  samples1.Subtract(samples2);
+  EXPECT_EQ(100, samples1.GetCount(1));
+  EXPECT_EQ(100, samples1.GetCount(2));
+  EXPECT_EQ(100, samples1.GetCount(3));
+  EXPECT_EQ(0, samples1.GetCount(4));
+  EXPECT_EQ(600, samples1.sum());
+  EXPECT_EQ(300, samples1.TotalCount());
+  EXPECT_EQ(samples1.redundant_count(), samples1.TotalCount());
+}
+
+TEST(PersistentSampleMapTest, PersistenceTest) {
+  std::unique_ptr<PersistentHistogramAllocator> allocator1 =
+      CreateHistogramAllocator(64 << 10);  // 64 KiB
+  HistogramSamples::LocalMetadata meta12;
+  PersistentSampleMap samples1(12, allocator1.get(), &meta12);
+  samples1.Accumulate(1, 100);
+  samples1.Accumulate(2, 200);
+  samples1.Accumulate(1, -200);
+  samples1.Accumulate(-1, 1);
+  EXPECT_EQ(-100, samples1.GetCount(1));
+  EXPECT_EQ(200, samples1.GetCount(2));
+  EXPECT_EQ(1, samples1.GetCount(-1));
+  EXPECT_EQ(299, samples1.sum());
+  EXPECT_EQ(101, samples1.TotalCount());
+  EXPECT_EQ(samples1.redundant_count(), samples1.TotalCount());
+
+  std::unique_ptr<PersistentHistogramAllocator> allocator2 =
+      DuplicateHistogramAllocator(allocator1.get());
+  PersistentSampleMap samples2(12, allocator2.get(), &meta12);
+  EXPECT_EQ(samples1.id(), samples2.id());
+  EXPECT_EQ(samples1.sum(), samples2.sum());
+  EXPECT_EQ(samples1.redundant_count(), samples2.redundant_count());
+  EXPECT_EQ(samples1.TotalCount(), samples2.TotalCount());
+  EXPECT_EQ(-100, samples2.GetCount(1));
+  EXPECT_EQ(200, samples2.GetCount(2));
+  EXPECT_EQ(1, samples2.GetCount(-1));
+  EXPECT_EQ(299, samples2.sum());
+  EXPECT_EQ(101, samples2.TotalCount());
+  EXPECT_EQ(samples2.redundant_count(), samples2.TotalCount());
+
+  samples1.Accumulate(-1, -1);
+  EXPECT_EQ(0, samples2.GetCount(3));
+  EXPECT_EQ(0, samples1.GetCount(3));
+  samples2.Accumulate(3, 300);
+  EXPECT_EQ(300, samples2.GetCount(3));
+  EXPECT_EQ(300, samples1.GetCount(3));
+  EXPECT_EQ(samples1.sum(), samples2.sum());
+  EXPECT_EQ(samples1.redundant_count(), samples2.redundant_count());
+  EXPECT_EQ(samples1.TotalCount(), samples2.TotalCount());
+
+  EXPECT_EQ(0, samples2.GetCount(4));
+  EXPECT_EQ(0, samples1.GetCount(4));
+  samples1.Accumulate(4, 400);
+  EXPECT_EQ(400, samples2.GetCount(4));
+  EXPECT_EQ(400, samples1.GetCount(4));
+  samples2.Accumulate(4, 4000);
+  EXPECT_EQ(4400, samples2.GetCount(4));
+  EXPECT_EQ(4400, samples1.GetCount(4));
+  EXPECT_EQ(samples1.sum(), samples2.sum());
+  EXPECT_EQ(samples1.redundant_count(), samples2.redundant_count());
+  EXPECT_EQ(samples1.TotalCount(), samples2.TotalCount());
+}
+
+TEST(PersistentSampleMapIteratorTest, IterateTest) {
+  std::unique_ptr<PersistentHistogramAllocator> allocator =
+      CreateHistogramAllocator(64 << 10);  // 64 KiB
+  HistogramSamples::LocalMetadata meta;
+  PersistentSampleMap samples(1, allocator.get(), &meta);
+  samples.Accumulate(1, 100);
+  samples.Accumulate(2, 200);
+  samples.Accumulate(4, -300);
+  samples.Accumulate(5, 0);
+
+  std::unique_ptr<SampleCountIterator> it = samples.Iterator();
+
+  HistogramBase::Sample min;
+  int64_t max;
+  HistogramBase::Count count;
+
+  it->Get(&min, &max, &count);
+  EXPECT_EQ(1, min);
+  EXPECT_EQ(2, max);
+  EXPECT_EQ(100, count);
+  EXPECT_FALSE(it->GetBucketIndex(nullptr));
+
+  it->Next();
+  it->Get(&min, &max, &count);
+  EXPECT_EQ(2, min);
+  EXPECT_EQ(3, max);
+  EXPECT_EQ(200, count);
+
+  it->Next();
+  it->Get(&min, &max, &count);
+  EXPECT_EQ(4, min);
+  EXPECT_EQ(5, max);
+  EXPECT_EQ(-300, count);
+
+  it->Next();
+  EXPECT_TRUE(it->Done());
+}
+
+TEST(PersistentSampleMapIteratorTest, SkipEmptyRanges) {
+  std::unique_ptr<PersistentHistogramAllocator> allocator1 =
+      CreateHistogramAllocator(64 << 10);  // 64 KiB
+  HistogramSamples::LocalMetadata meta1;
+  PersistentSampleMap samples1(1, allocator1.get(), &meta1);
+  samples1.Accumulate(5, 1);
+  samples1.Accumulate(10, 2);
+  samples1.Accumulate(15, 3);
+  samples1.Accumulate(20, 4);
+  samples1.Accumulate(25, 5);
+
+  std::unique_ptr<PersistentHistogramAllocator> allocator2 =
+      DuplicateHistogramAllocator(allocator1.get());
+  HistogramSamples::LocalMetadata meta2;
+  PersistentSampleMap samples2(2, allocator2.get(), &meta2);
+  samples2.Accumulate(5, 1);
+  samples2.Accumulate(20, 4);
+  samples2.Accumulate(25, 5);
+
+  samples1.Subtract(samples2);
+
+  std::unique_ptr<SampleCountIterator> it = samples1.Iterator();
+  EXPECT_FALSE(it->Done());
+
+  HistogramBase::Sample min;
+  int64_t max;
+  HistogramBase::Count count;
+
+  it->Get(&min, &max, &count);
+  EXPECT_EQ(10, min);
+  EXPECT_EQ(11, max);
+  EXPECT_EQ(2, count);
+
+  it->Next();
+  EXPECT_FALSE(it->Done());
+
+  it->Get(&min, &max, &count);
+  EXPECT_EQ(15, min);
+  EXPECT_EQ(16, max);
+  EXPECT_EQ(3, count);
+
+  it->Next();
+  EXPECT_TRUE(it->Done());
+}
+
+TEST(PersistentSampleMapIteratorDeathTest, IterateDoneTest) {
+  std::unique_ptr<PersistentHistogramAllocator> allocator =
+      CreateHistogramAllocator(64 << 10);  // 64 KiB
+  HistogramSamples::LocalMetadata meta;
+  PersistentSampleMap samples(1, allocator.get(), &meta);
+
+  std::unique_ptr<SampleCountIterator> it = samples.Iterator();
+
+  EXPECT_TRUE(it->Done());
+
+  HistogramBase::Sample min;
+  int64_t max;
+  HistogramBase::Count count;
+  EXPECT_DCHECK_DEATH(it->Get(&min, &max, &count));
+
+  EXPECT_DCHECK_DEATH(it->Next());
+
+  samples.Accumulate(1, 100);
+  it = samples.Iterator();
+  EXPECT_FALSE(it->Done());
+}
+
+}  // namespace
+}  // namespace base
diff --git a/base/metrics/record_histogram_checker.h b/base/metrics/record_histogram_checker.h
new file mode 100644
index 0000000..75bc336
--- /dev/null
+++ b/base/metrics/record_histogram_checker.h
@@ -0,0 +1,27 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_METRICS_RECORD_HISTOGRAM_CHECKER_H_
+#define BASE_METRICS_RECORD_HISTOGRAM_CHECKER_H_
+
+#include <stdint.h>
+
+#include "base/base_export.h"
+
+namespace base {
+
+// RecordHistogramChecker provides an interface for checking whether
+// the given histogram should be recorded.
+class BASE_EXPORT RecordHistogramChecker {
+ public:
+  virtual ~RecordHistogramChecker() = default;
+
+  // Returns true iff the given histogram should be recorded.
+  // This method may be called on any thread, so it should not mutate any state.
+  virtual bool ShouldRecord(uint64_t histogram_hash) const = 0;
+};
+
+}  // namespace base
+
+#endif  // BASE_METRICS_RECORD_HISTOGRAM_CHECKER_H_
diff --git a/base/metrics/sample_map.cc b/base/metrics/sample_map.cc
new file mode 100644
index 0000000..c6dce29
--- /dev/null
+++ b/base/metrics/sample_map.cc
@@ -0,0 +1,126 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/metrics/sample_map.h"
+
+#include "base/logging.h"
+#include "base/memory/ptr_util.h"
+#include "base/numerics/safe_conversions.h"
+#include "base/stl_util.h"
+
+namespace base {
+
+typedef HistogramBase::Count Count;
+typedef HistogramBase::Sample Sample;
+
+namespace {
+
+// An iterator for going through a SampleMap. The logic here is identical
+// to that of PersistentSampleMapIterator but with different data structures.
+// Changes here likely need to be duplicated there.
+class SampleMapIterator : public SampleCountIterator {
+ public:
+  typedef std::map<HistogramBase::Sample, HistogramBase::Count>
+      SampleToCountMap;
+
+  explicit SampleMapIterator(const SampleToCountMap& sample_counts);
+  ~SampleMapIterator() override;
+
+  // SampleCountIterator:
+  bool Done() const override;
+  void Next() override;
+  void Get(HistogramBase::Sample* min,
+           int64_t* max,
+           HistogramBase::Count* count) const override;
+
+ private:
+  void SkipEmptyBuckets();
+
+  SampleToCountMap::const_iterator iter_;
+  const SampleToCountMap::const_iterator end_;
+};
+
+SampleMapIterator::SampleMapIterator(const SampleToCountMap& sample_counts)
+    : iter_(sample_counts.begin()),
+      end_(sample_counts.end()) {
+  SkipEmptyBuckets();
+}
+
+SampleMapIterator::~SampleMapIterator() = default;
+
+bool SampleMapIterator::Done() const {
+  return iter_ == end_;
+}
+
+void SampleMapIterator::Next() {
+  DCHECK(!Done());
+  ++iter_;
+  SkipEmptyBuckets();
+}
+
+void SampleMapIterator::Get(Sample* min, int64_t* max, Count* count) const {
+  DCHECK(!Done());
+  if (min)
+    *min = iter_->first;
+  if (max)
+    *max = strict_cast<int64_t>(iter_->first) + 1;
+  if (count)
+    *count = iter_->second;
+}
+
+void SampleMapIterator::SkipEmptyBuckets() {
+  while (!Done() && iter_->second == 0) {
+    ++iter_;
+  }
+}
+
+}  // namespace
+
+SampleMap::SampleMap() : SampleMap(0) {}
+
+SampleMap::SampleMap(uint64_t id) : HistogramSamples(id, new LocalMetadata()) {}
+
+SampleMap::~SampleMap() {
+  delete static_cast<LocalMetadata*>(meta());
+}
+
+void SampleMap::Accumulate(Sample value, Count count) {
+  sample_counts_[value] += count;
+  IncreaseSumAndCount(strict_cast<int64_t>(count) * value, count);
+}
+
+Count SampleMap::GetCount(Sample value) const {
+  std::map<Sample, Count>::const_iterator it = sample_counts_.find(value);
+  if (it == sample_counts_.end())
+    return 0;
+  return it->second;
+}
+
+Count SampleMap::TotalCount() const {
+  Count count = 0;
+  for (const auto& entry : sample_counts_) {
+    count += entry.second;
+  }
+  return count;
+}
+
+std::unique_ptr<SampleCountIterator> SampleMap::Iterator() const {
+  return WrapUnique(new SampleMapIterator(sample_counts_));
+}
+
+bool SampleMap::AddSubtractImpl(SampleCountIterator* iter, Operator op) {
+  Sample min;
+  int64_t max;
+  Count count;
+  for (; !iter->Done(); iter->Next()) {
+    iter->Get(&min, &max, &count);
+    if (strict_cast<int64_t>(min) + 1 != max)
+      return false;  // SparseHistogram only supports bucket with size 1.
+
+    sample_counts_[min] += (op == HistogramSamples::ADD) ? count : -count;
+  }
+  return true;
+}
+
+}  // namespace base
diff --git a/base/metrics/sample_map.h b/base/metrics/sample_map.h
new file mode 100644
index 0000000..7458e05
--- /dev/null
+++ b/base/metrics/sample_map.h
@@ -0,0 +1,50 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// SampleMap implements HistogramSamples interface. It is used by the
+// SparseHistogram class to store samples.
+
+#ifndef BASE_METRICS_SAMPLE_MAP_H_
+#define BASE_METRICS_SAMPLE_MAP_H_
+
+#include <stdint.h>
+
+#include <map>
+#include <memory>
+
+#include "base/compiler_specific.h"
+#include "base/macros.h"
+#include "base/metrics/histogram_base.h"
+#include "base/metrics/histogram_samples.h"
+
+namespace base {
+
+// The logic here is similar to that of PersistentSampleMap but with different
+// data structures. Changes here likely need to be duplicated there.
+class BASE_EXPORT SampleMap : public HistogramSamples {
+ public:
+  SampleMap();
+  explicit SampleMap(uint64_t id);
+  ~SampleMap() override;
+
+  // HistogramSamples:
+  void Accumulate(HistogramBase::Sample value,
+                  HistogramBase::Count count) override;
+  HistogramBase::Count GetCount(HistogramBase::Sample value) const override;
+  HistogramBase::Count TotalCount() const override;
+  std::unique_ptr<SampleCountIterator> Iterator() const override;
+
+ protected:
+  // Performs arithemetic. |op| is ADD or SUBTRACT.
+  bool AddSubtractImpl(SampleCountIterator* iter, Operator op) override;
+
+ private:
+  std::map<HistogramBase::Sample, HistogramBase::Count> sample_counts_;
+
+  DISALLOW_COPY_AND_ASSIGN(SampleMap);
+};
+
+}  // namespace base
+
+#endif  // BASE_METRICS_SAMPLE_MAP_H_
diff --git a/base/metrics/sample_map_unittest.cc b/base/metrics/sample_map_unittest.cc
new file mode 100644
index 0000000..83db56f
--- /dev/null
+++ b/base/metrics/sample_map_unittest.cc
@@ -0,0 +1,168 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/metrics/sample_map.h"
+
+#include <memory>
+
+#include "base/test/gtest_util.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+namespace {
+
+TEST(SampleMapTest, AccumulateTest) {
+  SampleMap samples(1);
+
+  samples.Accumulate(1, 100);
+  samples.Accumulate(2, 200);
+  samples.Accumulate(1, -200);
+  EXPECT_EQ(-100, samples.GetCount(1));
+  EXPECT_EQ(200, samples.GetCount(2));
+
+  EXPECT_EQ(300, samples.sum());
+  EXPECT_EQ(100, samples.TotalCount());
+  EXPECT_EQ(samples.redundant_count(), samples.TotalCount());
+}
+
+TEST(SampleMapTest, Accumulate_LargeValuesDontOverflow) {
+  SampleMap samples(1);
+
+  samples.Accumulate(250000000, 100);
+  samples.Accumulate(500000000, 200);
+  samples.Accumulate(250000000, -200);
+  EXPECT_EQ(-100, samples.GetCount(250000000));
+  EXPECT_EQ(200, samples.GetCount(500000000));
+
+  EXPECT_EQ(75000000000LL, samples.sum());
+  EXPECT_EQ(100, samples.TotalCount());
+  EXPECT_EQ(samples.redundant_count(), samples.TotalCount());
+}
+
+TEST(SampleMapTest, AddSubtractTest) {
+  SampleMap samples1(1);
+  SampleMap samples2(2);
+
+  samples1.Accumulate(1, 100);
+  samples1.Accumulate(2, 100);
+  samples1.Accumulate(3, 100);
+
+  samples2.Accumulate(1, 200);
+  samples2.Accumulate(2, 200);
+  samples2.Accumulate(4, 200);
+
+  samples1.Add(samples2);
+  EXPECT_EQ(300, samples1.GetCount(1));
+  EXPECT_EQ(300, samples1.GetCount(2));
+  EXPECT_EQ(100, samples1.GetCount(3));
+  EXPECT_EQ(200, samples1.GetCount(4));
+  EXPECT_EQ(2000, samples1.sum());
+  EXPECT_EQ(900, samples1.TotalCount());
+  EXPECT_EQ(samples1.redundant_count(), samples1.TotalCount());
+
+  samples1.Subtract(samples2);
+  EXPECT_EQ(100, samples1.GetCount(1));
+  EXPECT_EQ(100, samples1.GetCount(2));
+  EXPECT_EQ(100, samples1.GetCount(3));
+  EXPECT_EQ(0, samples1.GetCount(4));
+  EXPECT_EQ(600, samples1.sum());
+  EXPECT_EQ(300, samples1.TotalCount());
+  EXPECT_EQ(samples1.redundant_count(), samples1.TotalCount());
+}
+
+TEST(SampleMapIteratorTest, IterateTest) {
+  SampleMap samples(1);
+  samples.Accumulate(1, 100);
+  samples.Accumulate(2, 200);
+  samples.Accumulate(4, -300);
+  samples.Accumulate(5, 0);
+
+  std::unique_ptr<SampleCountIterator> it = samples.Iterator();
+
+  HistogramBase::Sample min;
+  int64_t max;
+  HistogramBase::Count count;
+
+  it->Get(&min, &max, &count);
+  EXPECT_EQ(1, min);
+  EXPECT_EQ(2, max);
+  EXPECT_EQ(100, count);
+  EXPECT_FALSE(it->GetBucketIndex(nullptr));
+
+  it->Next();
+  it->Get(&min, &max, &count);
+  EXPECT_EQ(2, min);
+  EXPECT_EQ(3, max);
+  EXPECT_EQ(200, count);
+
+  it->Next();
+  it->Get(&min, &max, &count);
+  EXPECT_EQ(4, min);
+  EXPECT_EQ(5, max);
+  EXPECT_EQ(-300, count);
+
+  it->Next();
+  EXPECT_TRUE(it->Done());
+}
+
+TEST(SampleMapIteratorTest, SkipEmptyRanges) {
+  SampleMap samples(1);
+  samples.Accumulate(5, 1);
+  samples.Accumulate(10, 2);
+  samples.Accumulate(15, 3);
+  samples.Accumulate(20, 4);
+  samples.Accumulate(25, 5);
+
+  SampleMap samples2(2);
+  samples2.Accumulate(5, 1);
+  samples2.Accumulate(20, 4);
+  samples2.Accumulate(25, 5);
+
+  samples.Subtract(samples2);
+
+  std::unique_ptr<SampleCountIterator> it = samples.Iterator();
+  EXPECT_FALSE(it->Done());
+
+  HistogramBase::Sample min;
+  int64_t max;
+  HistogramBase::Count count;
+
+  it->Get(&min, &max, &count);
+  EXPECT_EQ(10, min);
+  EXPECT_EQ(11, max);
+  EXPECT_EQ(2, count);
+
+  it->Next();
+  EXPECT_FALSE(it->Done());
+
+  it->Get(&min, &max, &count);
+  EXPECT_EQ(15, min);
+  EXPECT_EQ(16, max);
+  EXPECT_EQ(3, count);
+
+  it->Next();
+  EXPECT_TRUE(it->Done());
+}
+
+TEST(SampleMapIteratorDeathTest, IterateDoneTest) {
+  SampleMap samples(1);
+
+  std::unique_ptr<SampleCountIterator> it = samples.Iterator();
+
+  EXPECT_TRUE(it->Done());
+
+  HistogramBase::Sample min;
+  int64_t max;
+  HistogramBase::Count count;
+  EXPECT_DCHECK_DEATH(it->Get(&min, &max, &count));
+
+  EXPECT_DCHECK_DEATH(it->Next());
+
+  samples.Accumulate(1, 100);
+  it = samples.Iterator();
+  EXPECT_FALSE(it->Done());
+}
+
+}  // namespace
+}  // namespace base
diff --git a/base/metrics/sample_vector.cc b/base/metrics/sample_vector.cc
new file mode 100644
index 0000000..cf8634e
--- /dev/null
+++ b/base/metrics/sample_vector.cc
@@ -0,0 +1,429 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/metrics/sample_vector.h"
+
+#include "base/lazy_instance.h"
+#include "base/logging.h"
+#include "base/memory/ptr_util.h"
+#include "base/metrics/persistent_memory_allocator.h"
+#include "base/numerics/safe_conversions.h"
+#include "base/synchronization/lock.h"
+#include "base/threading/platform_thread.h"
+
+// This SampleVector makes use of the single-sample embedded in the base
+// HistogramSamples class. If the count is non-zero then there is guaranteed
+// (within the bounds of "eventual consistency") to be no allocated external
+// storage. Once the full counts storage is allocated, the single-sample must
+// be extracted and disabled.
+
+namespace base {
+
+typedef HistogramBase::Count Count;
+typedef HistogramBase::Sample Sample;
+
+SampleVectorBase::SampleVectorBase(uint64_t id,
+                                   Metadata* meta,
+                                   const BucketRanges* bucket_ranges)
+    : HistogramSamples(id, meta), bucket_ranges_(bucket_ranges) {
+  CHECK_GE(bucket_ranges_->bucket_count(), 1u);
+}
+
+SampleVectorBase::~SampleVectorBase() = default;
+
+void SampleVectorBase::Accumulate(Sample value, Count count) {
+  const size_t bucket_index = GetBucketIndex(value);
+
+  // Handle the single-sample case.
+  if (!counts()) {
+    // Try to accumulate the parameters into the single-count entry.
+    if (AccumulateSingleSample(value, count, bucket_index)) {
+      // A race condition could lead to a new single-sample being accumulated
+      // above just after another thread executed the MountCountsStorage below.
+      // Since it is mounted, it could be mounted elsewhere and have values
+      // written to it. It's not allowed to have both a single-sample and
+      // entries in the counts array so move the single-sample.
+      if (counts())
+        MoveSingleSampleToCounts();
+      return;
+    }
+
+    // Need real storage to store both what was in the single-sample plus the
+    // parameter information.
+    MountCountsStorageAndMoveSingleSample();
+  }
+
+  // Handle the multi-sample case.
+  Count new_value =
+      subtle::NoBarrier_AtomicIncrement(&counts()[bucket_index], count);
+  IncreaseSumAndCount(strict_cast<int64_t>(count) * value, count);
+
+  // TODO(bcwhite) Remove after crbug.com/682680.
+  Count old_value = new_value - count;
+  if ((new_value >= 0) != (old_value >= 0) && count > 0)
+    RecordNegativeSample(SAMPLES_ACCUMULATE_OVERFLOW, count);
+}
+
+Count SampleVectorBase::GetCount(Sample value) const {
+  return GetCountAtIndex(GetBucketIndex(value));
+}
+
+Count SampleVectorBase::TotalCount() const {
+  // Handle the single-sample case.
+  SingleSample sample = single_sample().Load();
+  if (sample.count != 0)
+    return sample.count;
+
+  // Handle the multi-sample case.
+  if (counts() || MountExistingCountsStorage()) {
+    Count count = 0;
+    size_t size = counts_size();
+    const HistogramBase::AtomicCount* counts_array = counts();
+    for (size_t i = 0; i < size; ++i) {
+      count += subtle::NoBarrier_Load(&counts_array[i]);
+    }
+    return count;
+  }
+
+  // And the no-value case.
+  return 0;
+}
+
+Count SampleVectorBase::GetCountAtIndex(size_t bucket_index) const {
+  DCHECK(bucket_index < counts_size());
+
+  // Handle the single-sample case.
+  SingleSample sample = single_sample().Load();
+  if (sample.count != 0)
+    return sample.bucket == bucket_index ? sample.count : 0;
+
+  // Handle the multi-sample case.
+  if (counts() || MountExistingCountsStorage())
+    return subtle::NoBarrier_Load(&counts()[bucket_index]);
+
+  // And the no-value case.
+  return 0;
+}
+
+std::unique_ptr<SampleCountIterator> SampleVectorBase::Iterator() const {
+  // Handle the single-sample case.
+  SingleSample sample = single_sample().Load();
+  if (sample.count != 0) {
+    return std::make_unique<SingleSampleIterator>(
+        bucket_ranges_->range(sample.bucket),
+        bucket_ranges_->range(sample.bucket + 1), sample.count, sample.bucket);
+  }
+
+  // Handle the multi-sample case.
+  if (counts() || MountExistingCountsStorage()) {
+    return std::make_unique<SampleVectorIterator>(counts(), counts_size(),
+                                                  bucket_ranges_);
+  }
+
+  // And the no-value case.
+  return std::make_unique<SampleVectorIterator>(nullptr, 0, bucket_ranges_);
+}
+
+bool SampleVectorBase::AddSubtractImpl(SampleCountIterator* iter,
+                                       HistogramSamples::Operator op) {
+  // Stop now if there's nothing to do.
+  if (iter->Done())
+    return true;
+
+  // Get the first value and its index.
+  HistogramBase::Sample min;
+  int64_t max;
+  HistogramBase::Count count;
+  iter->Get(&min, &max, &count);
+  size_t dest_index = GetBucketIndex(min);
+
+  // The destination must be a superset of the source meaning that though the
+  // incoming ranges will find an exact match, the incoming bucket-index, if
+  // it exists, may be offset from the destination bucket-index. Calculate
+  // that offset of the passed iterator; there are are no overflow checks
+  // because 2's compliment math will work it out in the end.
+  //
+  // Because GetBucketIndex() always returns the same true or false result for
+  // a given iterator object, |index_offset| is either set here and used below,
+  // or never set and never used. The compiler doesn't know this, though, which
+  // is why it's necessary to initialize it to something.
+  size_t index_offset = 0;
+  size_t iter_index;
+  if (iter->GetBucketIndex(&iter_index))
+    index_offset = dest_index - iter_index;
+  if (dest_index >= counts_size())
+    return false;
+
+  // Post-increment. Information about the current sample is not available
+  // after this point.
+  iter->Next();
+
+  // Single-value storage is possible if there is no counts storage and the
+  // retrieved entry is the only one in the iterator.
+  if (!counts()) {
+    if (iter->Done()) {
+      // Don't call AccumulateSingleSample because that updates sum and count
+      // which was already done by the caller of this method.
+      if (single_sample().Accumulate(
+              dest_index, op == HistogramSamples::ADD ? count : -count)) {
+        // Handle race-condition that mounted counts storage between above and
+        // here.
+        if (counts())
+          MoveSingleSampleToCounts();
+        return true;
+      }
+    }
+
+    // The counts storage will be needed to hold the multiple incoming values.
+    MountCountsStorageAndMoveSingleSample();
+  }
+
+  // Go through the iterator and add the counts into correct bucket.
+  while (true) {
+    // Ensure that the sample's min/max match the ranges min/max.
+    if (min != bucket_ranges_->range(dest_index) ||
+        max != bucket_ranges_->range(dest_index + 1)) {
+      NOTREACHED() << "sample=" << min << "," << max
+                   << "; range=" << bucket_ranges_->range(dest_index) << ","
+                   << bucket_ranges_->range(dest_index + 1);
+      return false;
+    }
+
+    // Sample's bucket matches exactly. Adjust count.
+    subtle::NoBarrier_AtomicIncrement(
+        &counts()[dest_index], op == HistogramSamples::ADD ? count : -count);
+
+    // Advance to the next iterable sample. See comments above for how
+    // everything works.
+    if (iter->Done())
+      return true;
+    iter->Get(&min, &max, &count);
+    if (iter->GetBucketIndex(&iter_index)) {
+      // Destination bucket is a known offset from the source bucket.
+      dest_index = iter_index + index_offset;
+    } else {
+      // Destination bucket has to be determined anew each time.
+      dest_index = GetBucketIndex(min);
+    }
+    if (dest_index >= counts_size())
+      return false;
+    iter->Next();
+  }
+}
+
+// Use simple binary search.  This is very general, but there are better
+// approaches if we knew that the buckets were linearly distributed.
+size_t SampleVectorBase::GetBucketIndex(Sample value) const {
+  size_t bucket_count = bucket_ranges_->bucket_count();
+  CHECK_GE(bucket_count, 1u);
+  CHECK_GE(value, bucket_ranges_->range(0));
+  CHECK_LT(value, bucket_ranges_->range(bucket_count));
+
+  size_t under = 0;
+  size_t over = bucket_count;
+  size_t mid;
+  do {
+    DCHECK_GE(over, under);
+    mid = under + (over - under)/2;
+    if (mid == under)
+      break;
+    if (bucket_ranges_->range(mid) <= value)
+      under = mid;
+    else
+      over = mid;
+  } while (true);
+
+  DCHECK_LE(bucket_ranges_->range(mid), value);
+  CHECK_GT(bucket_ranges_->range(mid + 1), value);
+  return mid;
+}
+
+void SampleVectorBase::MoveSingleSampleToCounts() {
+  DCHECK(counts());
+
+  // Disable the single-sample since there is now counts storage for the data.
+  SingleSample sample = single_sample().Extract(/*disable=*/true);
+
+  // Stop here if there is no "count" as trying to find the bucket index of
+  // an invalid (including zero) "value" will crash.
+  if (sample.count == 0)
+    return;
+
+  // Move the value into storage. Sum and redundant-count already account
+  // for this entry so no need to call IncreaseSumAndCount().
+  subtle::NoBarrier_AtomicIncrement(&counts()[sample.bucket], sample.count);
+}
+
+void SampleVectorBase::MountCountsStorageAndMoveSingleSample() {
+  // There are many SampleVector objects and the lock is needed very
+  // infrequently (just when advancing from single-sample to multi-sample) so
+  // define a single, global lock that all can use. This lock only prevents
+  // concurrent entry into the code below; access and updates to |counts_|
+  // still requires atomic operations.
+  static LazyInstance<Lock>::Leaky counts_lock = LAZY_INSTANCE_INITIALIZER;
+  if (subtle::NoBarrier_Load(&counts_) == 0) {
+    AutoLock lock(counts_lock.Get());
+    if (subtle::NoBarrier_Load(&counts_) == 0) {
+      // Create the actual counts storage while the above lock is acquired.
+      HistogramBase::Count* counts = CreateCountsStorageWhileLocked();
+      DCHECK(counts);
+
+      // Point |counts_| to the newly created storage. This is done while
+      // locked to prevent possible concurrent calls to CreateCountsStorage
+      // but, between that call and here, other threads could notice the
+      // existence of the storage and race with this to set_counts(). That's
+      // okay because (a) it's atomic and (b) it always writes the same value.
+      set_counts(counts);
+    }
+  }
+
+  // Move any single-sample into the newly mounted storage.
+  MoveSingleSampleToCounts();
+}
+
+SampleVector::SampleVector(const BucketRanges* bucket_ranges)
+    : SampleVector(0, bucket_ranges) {}
+
+SampleVector::SampleVector(uint64_t id, const BucketRanges* bucket_ranges)
+    : SampleVectorBase(id, new LocalMetadata(), bucket_ranges) {}
+
+SampleVector::~SampleVector() {
+  delete static_cast<LocalMetadata*>(meta());
+}
+
+bool SampleVector::MountExistingCountsStorage() const {
+  // There is never any existing storage other than what is already in use.
+  return counts() != nullptr;
+}
+
+HistogramBase::AtomicCount* SampleVector::CreateCountsStorageWhileLocked() {
+  local_counts_.resize(counts_size());
+  return &local_counts_[0];
+}
+
+PersistentSampleVector::PersistentSampleVector(
+    uint64_t id,
+    const BucketRanges* bucket_ranges,
+    Metadata* meta,
+    const DelayedPersistentAllocation& counts)
+    : SampleVectorBase(id, meta, bucket_ranges), persistent_counts_(counts) {
+  // Only mount the full storage if the single-sample has been disabled.
+  // Otherwise, it is possible for this object instance to start using (empty)
+  // storage that was created incidentally while another instance continues to
+  // update to the single sample. This "incidental creation" can happen because
+  // the memory is a DelayedPersistentAllocation which allows multiple memory
+  // blocks within it and applies an all-or-nothing approach to the allocation.
+  // Thus, a request elsewhere for one of the _other_ blocks would make _this_
+  // block available even though nothing has explicitly requested it.
+  //
+  // Note that it's not possible for the ctor to mount existing storage and
+  // move any single-sample to it because sometimes the persistent memory is
+  // read-only. Only non-const methods (which assume that memory is read/write)
+  // can do that.
+  if (single_sample().IsDisabled()) {
+    bool success = MountExistingCountsStorage();
+    DCHECK(success);
+  }
+}
+
+PersistentSampleVector::~PersistentSampleVector() = default;
+
+bool PersistentSampleVector::MountExistingCountsStorage() const {
+  // There is no early exit if counts is not yet mounted because, given that
+  // this is a virtual function, it's more efficient to do that at the call-
+  // site. There is no danger, however, should this get called anyway (perhaps
+  // because of a race condition) because at worst the |counts_| value would
+  // be over-written (in an atomic manner) with the exact same address.
+
+  if (!persistent_counts_.reference())
+    return false;  // Nothing to mount.
+
+  // Mount the counts array in position.
+  set_counts(
+      static_cast<HistogramBase::AtomicCount*>(persistent_counts_.Get()));
+
+  // The above shouldn't fail but can if the data is corrupt or incomplete.
+  return counts() != nullptr;
+}
+
+HistogramBase::AtomicCount*
+PersistentSampleVector::CreateCountsStorageWhileLocked() {
+  void* mem = persistent_counts_.Get();
+  if (!mem) {
+    // The above shouldn't fail but can if Bad Things(tm) are occurring in the
+    // persistent allocator. Crashing isn't a good option so instead just
+    // allocate something from the heap and return that. There will be no
+    // sharing or persistence but worse things are already happening.
+    return new HistogramBase::AtomicCount[counts_size()];
+  }
+
+  return static_cast<HistogramBase::AtomicCount*>(mem);
+}
+
+SampleVectorIterator::SampleVectorIterator(
+    const std::vector<HistogramBase::AtomicCount>* counts,
+    const BucketRanges* bucket_ranges)
+    : counts_(&(*counts)[0]),
+      counts_size_(counts->size()),
+      bucket_ranges_(bucket_ranges),
+      index_(0) {
+  DCHECK_GE(bucket_ranges_->bucket_count(), counts_size_);
+  SkipEmptyBuckets();
+}
+
+SampleVectorIterator::SampleVectorIterator(
+    const HistogramBase::AtomicCount* counts,
+    size_t counts_size,
+    const BucketRanges* bucket_ranges)
+    : counts_(counts),
+      counts_size_(counts_size),
+      bucket_ranges_(bucket_ranges),
+      index_(0) {
+  DCHECK_GE(bucket_ranges_->bucket_count(), counts_size_);
+  SkipEmptyBuckets();
+}
+
+SampleVectorIterator::~SampleVectorIterator() = default;
+
+bool SampleVectorIterator::Done() const {
+  return index_ >= counts_size_;
+}
+
+void SampleVectorIterator::Next() {
+  DCHECK(!Done());
+  index_++;
+  SkipEmptyBuckets();
+}
+
+void SampleVectorIterator::Get(HistogramBase::Sample* min,
+                               int64_t* max,
+                               HistogramBase::Count* count) const {
+  DCHECK(!Done());
+  if (min != nullptr)
+    *min = bucket_ranges_->range(index_);
+  if (max != nullptr)
+    *max = strict_cast<int64_t>(bucket_ranges_->range(index_ + 1));
+  if (count != nullptr)
+    *count = subtle::NoBarrier_Load(&counts_[index_]);
+}
+
+bool SampleVectorIterator::GetBucketIndex(size_t* index) const {
+  DCHECK(!Done());
+  if (index != nullptr)
+    *index = index_;
+  return true;
+}
+
+void SampleVectorIterator::SkipEmptyBuckets() {
+  if (Done())
+    return;
+
+  while (index_ < counts_size_) {
+    if (subtle::NoBarrier_Load(&counts_[index_]) != 0)
+      return;
+    index_++;
+  }
+}
+
+}  // namespace base
diff --git a/base/metrics/sample_vector.h b/base/metrics/sample_vector.h
new file mode 100644
index 0000000..278272d
--- /dev/null
+++ b/base/metrics/sample_vector.h
@@ -0,0 +1,185 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// SampleVector implements HistogramSamples interface. It is used by all
+// Histogram based classes to store samples.
+
+#ifndef BASE_METRICS_SAMPLE_VECTOR_H_
+#define BASE_METRICS_SAMPLE_VECTOR_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <memory>
+#include <vector>
+
+#include "base/atomicops.h"
+#include "base/compiler_specific.h"
+#include "base/gtest_prod_util.h"
+#include "base/macros.h"
+#include "base/metrics/bucket_ranges.h"
+#include "base/metrics/histogram_base.h"
+#include "base/metrics/histogram_samples.h"
+#include "base/metrics/persistent_memory_allocator.h"
+
+namespace base {
+
+class BucketRanges;
+
+class BASE_EXPORT SampleVectorBase : public HistogramSamples {
+ public:
+  SampleVectorBase(uint64_t id,
+                   Metadata* meta,
+                   const BucketRanges* bucket_ranges);
+  ~SampleVectorBase() override;
+
+  // HistogramSamples:
+  void Accumulate(HistogramBase::Sample value,
+                  HistogramBase::Count count) override;
+  HistogramBase::Count GetCount(HistogramBase::Sample value) const override;
+  HistogramBase::Count TotalCount() const override;
+  std::unique_ptr<SampleCountIterator> Iterator() const override;
+
+  // Get count of a specific bucket.
+  HistogramBase::Count GetCountAtIndex(size_t bucket_index) const;
+
+  // Access the bucket ranges held externally.
+  const BucketRanges* bucket_ranges() const { return bucket_ranges_; }
+
+ protected:
+  bool AddSubtractImpl(
+      SampleCountIterator* iter,
+      HistogramSamples::Operator op) override;  // |op| is ADD or SUBTRACT.
+
+  virtual size_t GetBucketIndex(HistogramBase::Sample value) const;
+
+  // Moves the single-sample value to a mounted "counts" array.
+  void MoveSingleSampleToCounts();
+
+  // Mounts (creating if necessary) an array of "counts" for multi-value
+  // storage.
+  void MountCountsStorageAndMoveSingleSample();
+
+  // Mounts "counts" storage that already exists. This does not attempt to move
+  // any single-sample information to that storage as that would violate the
+  // "const" restriction that is often used to indicate read-only memory.
+  virtual bool MountExistingCountsStorage() const = 0;
+
+  // Creates "counts" storage and returns a pointer to it. Ownership of the
+  // array remains with the called method but will never change. This must be
+  // called while some sort of lock is held to prevent reentry.
+  virtual HistogramBase::Count* CreateCountsStorageWhileLocked() = 0;
+
+  HistogramBase::AtomicCount* counts() {
+    return reinterpret_cast<HistogramBase::AtomicCount*>(
+        subtle::Acquire_Load(&counts_));
+  }
+
+  const HistogramBase::AtomicCount* counts() const {
+    return reinterpret_cast<HistogramBase::AtomicCount*>(
+        subtle::Acquire_Load(&counts_));
+  }
+
+  void set_counts(const HistogramBase::AtomicCount* counts) const {
+    subtle::Release_Store(&counts_, reinterpret_cast<uintptr_t>(counts));
+  }
+
+  size_t counts_size() const { return bucket_ranges_->bucket_count(); }
+
+ private:
+  friend class SampleVectorTest;
+  FRIEND_TEST_ALL_PREFIXES(HistogramTest, CorruptSampleCounts);
+  FRIEND_TEST_ALL_PREFIXES(SharedHistogramTest, CorruptSampleCounts);
+
+  // |counts_| is actually a pointer to a HistogramBase::AtomicCount array but
+  // is held as an AtomicWord for concurrency reasons. When combined with the
+  // single_sample held in the metadata, there are four possible states:
+  //   1) single_sample == zero, counts_ == null
+  //   2) single_sample != zero, counts_ == null
+  //   3) single_sample != zero, counts_ != null BUT IS EMPTY
+  //   4) single_sample == zero, counts_ != null and may have data
+  // Once |counts_| is set, it can never revert and any existing single-sample
+  // must be moved to this storage. It is mutable because changing it doesn't
+  // change the (const) data but must adapt if a non-const object causes the
+  // storage to be allocated and updated.
+  mutable subtle::AtomicWord counts_ = 0;
+
+  // Shares the same BucketRanges with Histogram object.
+  const BucketRanges* const bucket_ranges_;
+
+  DISALLOW_COPY_AND_ASSIGN(SampleVectorBase);
+};
+
+// A sample vector that uses local memory for the counts array.
+class BASE_EXPORT SampleVector : public SampleVectorBase {
+ public:
+  explicit SampleVector(const BucketRanges* bucket_ranges);
+  SampleVector(uint64_t id, const BucketRanges* bucket_ranges);
+  ~SampleVector() override;
+
+ private:
+  // SampleVectorBase:
+  bool MountExistingCountsStorage() const override;
+  HistogramBase::Count* CreateCountsStorageWhileLocked() override;
+
+  // Simple local storage for counts.
+  mutable std::vector<HistogramBase::AtomicCount> local_counts_;
+
+  DISALLOW_COPY_AND_ASSIGN(SampleVector);
+};
+
+// A sample vector that uses persistent memory for the counts array.
+class BASE_EXPORT PersistentSampleVector : public SampleVectorBase {
+ public:
+  PersistentSampleVector(uint64_t id,
+                         const BucketRanges* bucket_ranges,
+                         Metadata* meta,
+                         const DelayedPersistentAllocation& counts);
+  ~PersistentSampleVector() override;
+
+ private:
+  // SampleVectorBase:
+  bool MountExistingCountsStorage() const override;
+  HistogramBase::Count* CreateCountsStorageWhileLocked() override;
+
+  // Persistent storage for counts.
+  DelayedPersistentAllocation persistent_counts_;
+
+  DISALLOW_COPY_AND_ASSIGN(PersistentSampleVector);
+};
+
+// An iterator for sample vectors. This could be defined privately in the .cc
+// file but is here for easy testing.
+class BASE_EXPORT SampleVectorIterator : public SampleCountIterator {
+ public:
+  SampleVectorIterator(const std::vector<HistogramBase::AtomicCount>* counts,
+                       const BucketRanges* bucket_ranges);
+  SampleVectorIterator(const HistogramBase::AtomicCount* counts,
+                       size_t counts_size,
+                       const BucketRanges* bucket_ranges);
+  ~SampleVectorIterator() override;
+
+  // SampleCountIterator implementation:
+  bool Done() const override;
+  void Next() override;
+  void Get(HistogramBase::Sample* min,
+           int64_t* max,
+           HistogramBase::Count* count) const override;
+
+  // SampleVector uses predefined buckets, so iterator can return bucket index.
+  bool GetBucketIndex(size_t* index) const override;
+
+ private:
+  void SkipEmptyBuckets();
+
+  const HistogramBase::AtomicCount* counts_;
+  size_t counts_size_;
+  const BucketRanges* bucket_ranges_;
+
+  size_t index_;
+};
+
+}  // namespace base
+
+#endif  // BASE_METRICS_SAMPLE_VECTOR_H_
diff --git a/base/metrics/sample_vector_unittest.cc b/base/metrics/sample_vector_unittest.cc
new file mode 100644
index 0000000..4921802
--- /dev/null
+++ b/base/metrics/sample_vector_unittest.cc
@@ -0,0 +1,545 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/metrics/sample_vector.h"
+
+#include <limits.h>
+#include <stddef.h>
+
+#include <atomic>
+#include <memory>
+#include <vector>
+
+#include "base/metrics/bucket_ranges.h"
+#include "base/metrics/histogram.h"
+#include "base/metrics/persistent_memory_allocator.h"
+#include "base/test/gtest_util.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+
+// This framework class has "friend" access to the SampleVector for accessing
+// non-public methods and fields.
+class SampleVectorTest : public testing::Test {
+ public:
+  const HistogramBase::AtomicCount* GetSamplesCounts(
+      const SampleVectorBase& samples) {
+    return samples.counts();
+  }
+};
+
+TEST_F(SampleVectorTest, Accumulate) {
+  // Custom buckets: [1, 5) [5, 10)
+  BucketRanges ranges(3);
+  ranges.set_range(0, 1);
+  ranges.set_range(1, 5);
+  ranges.set_range(2, 10);
+  SampleVector samples(1, &ranges);
+
+  samples.Accumulate(1, 200);
+  samples.Accumulate(2, -300);
+  EXPECT_EQ(-100, samples.GetCountAtIndex(0));
+
+  samples.Accumulate(5, 200);
+  EXPECT_EQ(200, samples.GetCountAtIndex(1));
+
+  EXPECT_EQ(600, samples.sum());
+  EXPECT_EQ(100, samples.redundant_count());
+  EXPECT_EQ(samples.TotalCount(), samples.redundant_count());
+
+  samples.Accumulate(5, -100);
+  EXPECT_EQ(100, samples.GetCountAtIndex(1));
+
+  EXPECT_EQ(100, samples.sum());
+  EXPECT_EQ(0, samples.redundant_count());
+  EXPECT_EQ(samples.TotalCount(), samples.redundant_count());
+}
+
+TEST_F(SampleVectorTest, Accumulate_LargeValuesDontOverflow) {
+  // Custom buckets: [1, 250000000) [250000000, 500000000)
+  BucketRanges ranges(3);
+  ranges.set_range(0, 1);
+  ranges.set_range(1, 250000000);
+  ranges.set_range(2, 500000000);
+  SampleVector samples(1, &ranges);
+
+  samples.Accumulate(240000000, 200);
+  samples.Accumulate(249999999, -300);
+  EXPECT_EQ(-100, samples.GetCountAtIndex(0));
+
+  samples.Accumulate(250000000, 200);
+  EXPECT_EQ(200, samples.GetCountAtIndex(1));
+
+  EXPECT_EQ(23000000300LL, samples.sum());
+  EXPECT_EQ(100, samples.redundant_count());
+  EXPECT_EQ(samples.TotalCount(), samples.redundant_count());
+
+  samples.Accumulate(250000000, -100);
+  EXPECT_EQ(100, samples.GetCountAtIndex(1));
+
+  EXPECT_EQ(-1999999700LL, samples.sum());
+  EXPECT_EQ(0, samples.redundant_count());
+  EXPECT_EQ(samples.TotalCount(), samples.redundant_count());
+}
+
+TEST_F(SampleVectorTest, AddSubtract) {
+  // Custom buckets: [0, 1) [1, 2) [2, 3) [3, INT_MAX)
+  BucketRanges ranges(5);
+  ranges.set_range(0, 0);
+  ranges.set_range(1, 1);
+  ranges.set_range(2, 2);
+  ranges.set_range(3, 3);
+  ranges.set_range(4, INT_MAX);
+
+  SampleVector samples1(1, &ranges);
+  samples1.Accumulate(0, 100);
+  samples1.Accumulate(2, 100);
+  samples1.Accumulate(4, 100);
+  EXPECT_EQ(600, samples1.sum());
+  EXPECT_EQ(300, samples1.TotalCount());
+  EXPECT_EQ(samples1.redundant_count(), samples1.TotalCount());
+
+  SampleVector samples2(2, &ranges);
+  samples2.Accumulate(1, 200);
+  samples2.Accumulate(2, 200);
+  samples2.Accumulate(4, 200);
+  EXPECT_EQ(1400, samples2.sum());
+  EXPECT_EQ(600, samples2.TotalCount());
+  EXPECT_EQ(samples2.redundant_count(), samples2.TotalCount());
+
+  samples1.Add(samples2);
+  EXPECT_EQ(100, samples1.GetCountAtIndex(0));
+  EXPECT_EQ(200, samples1.GetCountAtIndex(1));
+  EXPECT_EQ(300, samples1.GetCountAtIndex(2));
+  EXPECT_EQ(300, samples1.GetCountAtIndex(3));
+  EXPECT_EQ(2000, samples1.sum());
+  EXPECT_EQ(900, samples1.TotalCount());
+  EXPECT_EQ(samples1.redundant_count(), samples1.TotalCount());
+
+  samples1.Subtract(samples2);
+  EXPECT_EQ(100, samples1.GetCountAtIndex(0));
+  EXPECT_EQ(0, samples1.GetCountAtIndex(1));
+  EXPECT_EQ(100, samples1.GetCountAtIndex(2));
+  EXPECT_EQ(100, samples1.GetCountAtIndex(3));
+  EXPECT_EQ(600, samples1.sum());
+  EXPECT_EQ(300, samples1.TotalCount());
+  EXPECT_EQ(samples1.redundant_count(), samples1.TotalCount());
+}
+
+TEST_F(SampleVectorTest, BucketIndexDeath) {
+  // 8 buckets with exponential layout:
+  // [0, 1) [1, 2) [2, 4) [4, 8) [8, 16) [16, 32) [32, 64) [64, INT_MAX)
+  BucketRanges ranges(9);
+  Histogram::InitializeBucketRanges(1, 64, &ranges);
+  SampleVector samples(1, &ranges);
+
+  // Normal case
+  samples.Accumulate(0, 1);
+  samples.Accumulate(3, 2);
+  samples.Accumulate(64, 3);
+  EXPECT_EQ(1, samples.GetCount(0));
+  EXPECT_EQ(2, samples.GetCount(2));
+  EXPECT_EQ(3, samples.GetCount(65));
+
+  // Extreme case.
+  EXPECT_DEATH_IF_SUPPORTED(samples.Accumulate(INT_MIN, 100), "");
+  EXPECT_DEATH_IF_SUPPORTED(samples.Accumulate(-1, 100), "");
+  EXPECT_DEATH_IF_SUPPORTED(samples.Accumulate(INT_MAX, 100), "");
+
+  // Custom buckets: [1, 5) [5, 10)
+  // Note, this is not a valid BucketRanges for Histogram because it does not
+  // have overflow buckets.
+  BucketRanges ranges2(3);
+  ranges2.set_range(0, 1);
+  ranges2.set_range(1, 5);
+  ranges2.set_range(2, 10);
+  SampleVector samples2(2, &ranges2);
+
+  // Normal case.
+  samples2.Accumulate(1, 1);
+  samples2.Accumulate(4, 1);
+  samples2.Accumulate(5, 2);
+  samples2.Accumulate(9, 2);
+  EXPECT_EQ(2, samples2.GetCount(1));
+  EXPECT_EQ(4, samples2.GetCount(5));
+
+  // Extreme case.
+  EXPECT_DEATH_IF_SUPPORTED(samples2.Accumulate(0, 100), "");
+  EXPECT_DEATH_IF_SUPPORTED(samples2.Accumulate(10, 100), "");
+}
+
+TEST_F(SampleVectorTest, AddSubtractBucketNotMatchDeath) {
+  // Custom buckets 1: [1, 3) [3, 5)
+  BucketRanges ranges1(3);
+  ranges1.set_range(0, 1);
+  ranges1.set_range(1, 3);
+  ranges1.set_range(2, 5);
+  SampleVector samples1(1, &ranges1);
+
+  // Custom buckets 2: [0, 1) [1, 3) [3, 6) [6, 7)
+  BucketRanges ranges2(5);
+  ranges2.set_range(0, 0);
+  ranges2.set_range(1, 1);
+  ranges2.set_range(2, 3);
+  ranges2.set_range(3, 6);
+  ranges2.set_range(4, 7);
+  SampleVector samples2(2, &ranges2);
+
+  samples2.Accumulate(1, 100);
+  samples1.Add(samples2);
+  EXPECT_EQ(100, samples1.GetCountAtIndex(0));
+
+  // Extra bucket in the beginning. These should CHECK in GetBucketIndex.
+  samples2.Accumulate(0, 100);
+  EXPECT_DEATH_IF_SUPPORTED(samples1.Add(samples2), "");
+  EXPECT_DEATH_IF_SUPPORTED(samples1.Subtract(samples2), "");
+
+  // Extra bucket in the end. These should cause AddSubtractImpl to fail, and
+  // Add to DCHECK as a result.
+  samples2.Accumulate(0, -100);
+  samples2.Accumulate(6, 100);
+  EXPECT_DCHECK_DEATH(samples1.Add(samples2));
+  EXPECT_DCHECK_DEATH(samples1.Subtract(samples2));
+
+  // Bucket not match: [3, 5) VS [3, 6). These should cause AddSubtractImpl to
+  // DCHECK.
+  samples2.Accumulate(6, -100);
+  samples2.Accumulate(3, 100);
+  EXPECT_DCHECK_DEATH(samples1.Add(samples2));
+  EXPECT_DCHECK_DEATH(samples1.Subtract(samples2));
+}
+
+TEST_F(SampleVectorTest, Iterate) {
+  BucketRanges ranges(5);
+  ranges.set_range(0, 0);
+  ranges.set_range(1, 1);
+  ranges.set_range(2, 2);
+  ranges.set_range(3, 3);
+  ranges.set_range(4, 4);
+
+  std::vector<HistogramBase::Count> counts(3);
+  counts[0] = 1;
+  counts[1] = 0;  // Iterator will bypass this empty bucket.
+  counts[2] = 2;
+
+  // BucketRanges can have larger size than counts.
+  SampleVectorIterator it(&counts, &ranges);
+  size_t index;
+
+  HistogramBase::Sample min;
+  int64_t max;
+  HistogramBase::Count count;
+  it.Get(&min, &max, &count);
+  EXPECT_EQ(0, min);
+  EXPECT_EQ(1, max);
+  EXPECT_EQ(1, count);
+  EXPECT_TRUE(it.GetBucketIndex(&index));
+  EXPECT_EQ(0u, index);
+
+  it.Next();
+  it.Get(&min, &max, &count);
+  EXPECT_EQ(2, min);
+  EXPECT_EQ(3, max);
+  EXPECT_EQ(2, count);
+  EXPECT_TRUE(it.GetBucketIndex(&index));
+  EXPECT_EQ(2u, index);
+
+  it.Next();
+  EXPECT_TRUE(it.Done());
+
+  // Create iterator from SampleVector.
+  SampleVector samples(1, &ranges);
+  samples.Accumulate(0, 0);
+  samples.Accumulate(1, 1);
+  samples.Accumulate(2, 2);
+  samples.Accumulate(3, 3);
+  std::unique_ptr<SampleCountIterator> it2 = samples.Iterator();
+
+  int i;
+  for (i = 1; !it2->Done(); i++, it2->Next()) {
+    it2->Get(&min, &max, &count);
+    EXPECT_EQ(i, min);
+    EXPECT_EQ(i + 1, max);
+    EXPECT_EQ(i, count);
+
+    size_t index;
+    EXPECT_TRUE(it2->GetBucketIndex(&index));
+    EXPECT_EQ(static_cast<size_t>(i), index);
+  }
+  EXPECT_EQ(4, i);
+}
+
+TEST_F(SampleVectorTest, IterateDoneDeath) {
+  BucketRanges ranges(5);
+  ranges.set_range(0, 0);
+  ranges.set_range(1, 1);
+  ranges.set_range(2, 2);
+  ranges.set_range(3, 3);
+  ranges.set_range(4, INT_MAX);
+  SampleVector samples(1, &ranges);
+
+  std::unique_ptr<SampleCountIterator> it = samples.Iterator();
+
+  EXPECT_TRUE(it->Done());
+
+  HistogramBase::Sample min;
+  int64_t max;
+  HistogramBase::Count count;
+  EXPECT_DCHECK_DEATH(it->Get(&min, &max, &count));
+
+  EXPECT_DCHECK_DEATH(it->Next());
+
+  samples.Accumulate(2, 100);
+  it = samples.Iterator();
+  EXPECT_FALSE(it->Done());
+}
+
+TEST_F(SampleVectorTest, SingleSample) {
+  // Custom buckets: [1, 5) [5, 10)
+  BucketRanges ranges(3);
+  ranges.set_range(0, 1);
+  ranges.set_range(1, 5);
+  ranges.set_range(2, 10);
+  SampleVector samples(&ranges);
+
+  // Ensure that a single value accumulates correctly.
+  EXPECT_FALSE(GetSamplesCounts(samples));
+  samples.Accumulate(3, 200);
+  EXPECT_EQ(200, samples.GetCount(3));
+  EXPECT_FALSE(GetSamplesCounts(samples));
+  samples.Accumulate(3, 400);
+  EXPECT_EQ(600, samples.GetCount(3));
+  EXPECT_FALSE(GetSamplesCounts(samples));
+  EXPECT_EQ(3 * 600, samples.sum());
+  EXPECT_EQ(600, samples.TotalCount());
+  EXPECT_EQ(600, samples.redundant_count());
+
+  // Ensure that the iterator returns only one value.
+  HistogramBase::Sample min;
+  int64_t max;
+  HistogramBase::Count count;
+  std::unique_ptr<SampleCountIterator> it = samples.Iterator();
+  ASSERT_FALSE(it->Done());
+  it->Get(&min, &max, &count);
+  EXPECT_EQ(1, min);
+  EXPECT_EQ(5, max);
+  EXPECT_EQ(600, count);
+  it->Next();
+  EXPECT_TRUE(it->Done());
+
+  // Ensure that it can be merged to another single-sample vector.
+  SampleVector samples_copy(&ranges);
+  samples_copy.Add(samples);
+  EXPECT_FALSE(GetSamplesCounts(samples_copy));
+  EXPECT_EQ(3 * 600, samples_copy.sum());
+  EXPECT_EQ(600, samples_copy.TotalCount());
+  EXPECT_EQ(600, samples_copy.redundant_count());
+
+  // A different value should cause creation of the counts array.
+  samples.Accumulate(8, 100);
+  EXPECT_TRUE(GetSamplesCounts(samples));
+  EXPECT_EQ(600, samples.GetCount(3));
+  EXPECT_EQ(100, samples.GetCount(8));
+  EXPECT_EQ(3 * 600 + 8 * 100, samples.sum());
+  EXPECT_EQ(600 + 100, samples.TotalCount());
+  EXPECT_EQ(600 + 100, samples.redundant_count());
+
+  // The iterator should now return both values.
+  it = samples.Iterator();
+  ASSERT_FALSE(it->Done());
+  it->Get(&min, &max, &count);
+  EXPECT_EQ(1, min);
+  EXPECT_EQ(5, max);
+  EXPECT_EQ(600, count);
+  it->Next();
+  ASSERT_FALSE(it->Done());
+  it->Get(&min, &max, &count);
+  EXPECT_EQ(5, min);
+  EXPECT_EQ(10, max);
+  EXPECT_EQ(100, count);
+  it->Next();
+  EXPECT_TRUE(it->Done());
+
+  // Ensure that it can merged to a single-sample vector.
+  samples_copy.Add(samples);
+  EXPECT_TRUE(GetSamplesCounts(samples_copy));
+  EXPECT_EQ(3 * 1200 + 8 * 100, samples_copy.sum());
+  EXPECT_EQ(1200 + 100, samples_copy.TotalCount());
+  EXPECT_EQ(1200 + 100, samples_copy.redundant_count());
+}
+
+TEST_F(SampleVectorTest, PersistentSampleVector) {
+  LocalPersistentMemoryAllocator allocator(64 << 10, 0, "");
+  std::atomic<PersistentMemoryAllocator::Reference> samples_ref;
+  samples_ref.store(0, std::memory_order_relaxed);
+  HistogramSamples::Metadata samples_meta;
+  memset(&samples_meta, 0, sizeof(samples_meta));
+
+  // Custom buckets: [1, 5) [5, 10)
+  BucketRanges ranges(3);
+  ranges.set_range(0, 1);
+  ranges.set_range(1, 5);
+  ranges.set_range(2, 10);
+
+  // Persistent allocation.
+  const size_t counts_bytes =
+      sizeof(HistogramBase::AtomicCount) * ranges.bucket_count();
+  const DelayedPersistentAllocation allocation(&allocator, &samples_ref, 1,
+                                               counts_bytes, false);
+
+  PersistentSampleVector samples1(0, &ranges, &samples_meta, allocation);
+  EXPECT_FALSE(GetSamplesCounts(samples1));
+  samples1.Accumulate(3, 200);
+  EXPECT_EQ(200, samples1.GetCount(3));
+  EXPECT_FALSE(GetSamplesCounts(samples1));
+  EXPECT_EQ(0, samples1.GetCount(8));
+  EXPECT_FALSE(GetSamplesCounts(samples1));
+
+  PersistentSampleVector samples2(0, &ranges, &samples_meta, allocation);
+  EXPECT_EQ(200, samples2.GetCount(3));
+  EXPECT_FALSE(GetSamplesCounts(samples2));
+
+  HistogramBase::Sample min;
+  int64_t max;
+  HistogramBase::Count count;
+  std::unique_ptr<SampleCountIterator> it = samples2.Iterator();
+  ASSERT_FALSE(it->Done());
+  it->Get(&min, &max, &count);
+  EXPECT_EQ(1, min);
+  EXPECT_EQ(5, max);
+  EXPECT_EQ(200, count);
+  it->Next();
+  EXPECT_TRUE(it->Done());
+
+  samples1.Accumulate(8, 100);
+  EXPECT_TRUE(GetSamplesCounts(samples1));
+
+  EXPECT_FALSE(GetSamplesCounts(samples2));
+  EXPECT_EQ(200, samples2.GetCount(3));
+  EXPECT_EQ(100, samples2.GetCount(8));
+  EXPECT_TRUE(GetSamplesCounts(samples2));
+  EXPECT_EQ(3 * 200 + 8 * 100, samples2.sum());
+  EXPECT_EQ(300, samples2.TotalCount());
+  EXPECT_EQ(300, samples2.redundant_count());
+
+  it = samples2.Iterator();
+  ASSERT_FALSE(it->Done());
+  it->Get(&min, &max, &count);
+  EXPECT_EQ(1, min);
+  EXPECT_EQ(5, max);
+  EXPECT_EQ(200, count);
+  it->Next();
+  ASSERT_FALSE(it->Done());
+  it->Get(&min, &max, &count);
+  EXPECT_EQ(5, min);
+  EXPECT_EQ(10, max);
+  EXPECT_EQ(100, count);
+  it->Next();
+  EXPECT_TRUE(it->Done());
+
+  PersistentSampleVector samples3(0, &ranges, &samples_meta, allocation);
+  EXPECT_TRUE(GetSamplesCounts(samples2));
+  EXPECT_EQ(200, samples3.GetCount(3));
+  EXPECT_EQ(100, samples3.GetCount(8));
+  EXPECT_EQ(3 * 200 + 8 * 100, samples3.sum());
+  EXPECT_EQ(300, samples3.TotalCount());
+  EXPECT_EQ(300, samples3.redundant_count());
+
+  it = samples3.Iterator();
+  ASSERT_FALSE(it->Done());
+  it->Get(&min, &max, &count);
+  EXPECT_EQ(1, min);
+  EXPECT_EQ(5, max);
+  EXPECT_EQ(200, count);
+  it->Next();
+  ASSERT_FALSE(it->Done());
+  it->Get(&min, &max, &count);
+  EXPECT_EQ(5, min);
+  EXPECT_EQ(10, max);
+  EXPECT_EQ(100, count);
+  it->Next();
+  EXPECT_TRUE(it->Done());
+}
+
+TEST_F(SampleVectorTest, PersistentSampleVectorTestWithOutsideAlloc) {
+  LocalPersistentMemoryAllocator allocator(64 << 10, 0, "");
+  std::atomic<PersistentMemoryAllocator::Reference> samples_ref;
+  samples_ref.store(0, std::memory_order_relaxed);
+  HistogramSamples::Metadata samples_meta;
+  memset(&samples_meta, 0, sizeof(samples_meta));
+
+  // Custom buckets: [1, 5) [5, 10)
+  BucketRanges ranges(3);
+  ranges.set_range(0, 1);
+  ranges.set_range(1, 5);
+  ranges.set_range(2, 10);
+
+  // Persistent allocation.
+  const size_t counts_bytes =
+      sizeof(HistogramBase::AtomicCount) * ranges.bucket_count();
+  const DelayedPersistentAllocation allocation(&allocator, &samples_ref, 1,
+                                               counts_bytes, false);
+
+  PersistentSampleVector samples1(0, &ranges, &samples_meta, allocation);
+  EXPECT_FALSE(GetSamplesCounts(samples1));
+  samples1.Accumulate(3, 200);
+  EXPECT_EQ(200, samples1.GetCount(3));
+  EXPECT_FALSE(GetSamplesCounts(samples1));
+
+  // Because the delayed allocation can be shared with other objects (the
+  // |offset| parameter allows concatinating multiple data blocks into the
+  // same allocation), it's possible that the allocation gets realized from
+  // the outside even though the data block being accessed is all zero.
+  allocation.Get();
+  EXPECT_EQ(200, samples1.GetCount(3));
+  EXPECT_FALSE(GetSamplesCounts(samples1));
+
+  HistogramBase::Sample min;
+  int64_t max;
+  HistogramBase::Count count;
+  std::unique_ptr<SampleCountIterator> it = samples1.Iterator();
+  ASSERT_FALSE(it->Done());
+  it->Get(&min, &max, &count);
+  EXPECT_EQ(1, min);
+  EXPECT_EQ(5, max);
+  EXPECT_EQ(200, count);
+  it->Next();
+  EXPECT_TRUE(it->Done());
+
+  // A duplicate samples object should still see the single-sample entry even
+  // when storage is available.
+  PersistentSampleVector samples2(0, &ranges, &samples_meta, allocation);
+  EXPECT_EQ(200, samples2.GetCount(3));
+
+  // New accumulations, in both directions, of the existing value should work.
+  samples1.Accumulate(3, 50);
+  EXPECT_EQ(250, samples1.GetCount(3));
+  EXPECT_EQ(250, samples2.GetCount(3));
+  samples2.Accumulate(3, 50);
+  EXPECT_EQ(300, samples1.GetCount(3));
+  EXPECT_EQ(300, samples2.GetCount(3));
+
+  it = samples1.Iterator();
+  ASSERT_FALSE(it->Done());
+  it->Get(&min, &max, &count);
+  EXPECT_EQ(1, min);
+  EXPECT_EQ(5, max);
+  EXPECT_EQ(300, count);
+  it->Next();
+  EXPECT_TRUE(it->Done());
+
+  samples1.Accumulate(8, 100);
+  EXPECT_TRUE(GetSamplesCounts(samples1));
+  EXPECT_EQ(300, samples1.GetCount(3));
+  EXPECT_EQ(300, samples2.GetCount(3));
+  EXPECT_EQ(100, samples1.GetCount(8));
+  EXPECT_EQ(100, samples2.GetCount(8));
+  samples2.Accumulate(8, 100);
+  EXPECT_EQ(300, samples1.GetCount(3));
+  EXPECT_EQ(300, samples2.GetCount(3));
+  EXPECT_EQ(200, samples1.GetCount(8));
+  EXPECT_EQ(200, samples2.GetCount(8));
+}
+
+}  // namespace base
diff --git a/base/metrics/single_sample_metrics.cc b/base/metrics/single_sample_metrics.cc
new file mode 100644
index 0000000..57c1c8f
--- /dev/null
+++ b/base/metrics/single_sample_metrics.cc
@@ -0,0 +1,77 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/metrics/single_sample_metrics.h"
+
+#include "base/memory/ptr_util.h"
+#include "base/metrics/histogram.h"
+
+namespace base {
+
+static SingleSampleMetricsFactory* g_factory = nullptr;
+
+// static
+SingleSampleMetricsFactory* SingleSampleMetricsFactory::Get() {
+  if (!g_factory)
+    g_factory = new DefaultSingleSampleMetricsFactory();
+
+  return g_factory;
+}
+
+// static
+void SingleSampleMetricsFactory::SetFactory(
+    std::unique_ptr<SingleSampleMetricsFactory> factory) {
+  DCHECK(!g_factory);
+  g_factory = factory.release();
+}
+
+// static
+void SingleSampleMetricsFactory::DeleteFactoryForTesting() {
+  DCHECK(g_factory);
+  delete g_factory;
+  g_factory = nullptr;
+}
+
+std::unique_ptr<SingleSampleMetric>
+DefaultSingleSampleMetricsFactory::CreateCustomCountsMetric(
+    const std::string& histogram_name,
+    HistogramBase::Sample min,
+    HistogramBase::Sample max,
+    uint32_t bucket_count) {
+  return std::make_unique<DefaultSingleSampleMetric>(
+      histogram_name, min, max, bucket_count,
+      HistogramBase::kUmaTargetedHistogramFlag);
+}
+
+DefaultSingleSampleMetric::DefaultSingleSampleMetric(
+    const std::string& histogram_name,
+    HistogramBase::Sample min,
+    HistogramBase::Sample max,
+    uint32_t bucket_count,
+    int32_t flags)
+    : histogram_(Histogram::FactoryGet(histogram_name,
+                                       min,
+                                       max,
+                                       bucket_count,
+                                       flags)) {
+  // Bad construction parameters may lead to |histogram_| being null; DCHECK to
+  // find accidental errors in production. We must still handle the nullptr in
+  // destruction though since this construction may come from another untrusted
+  // process.
+  DCHECK(histogram_);
+}
+
+DefaultSingleSampleMetric::~DefaultSingleSampleMetric() {
+  // |histogram_| may be nullptr if bad construction parameters are given.
+  if (sample_ < 0 || !histogram_)
+    return;
+  histogram_->Add(sample_);
+}
+
+void DefaultSingleSampleMetric::SetSample(HistogramBase::Sample sample) {
+  DCHECK_GE(sample, 0);
+  sample_ = sample;
+}
+
+}  // namespace base
diff --git a/base/metrics/single_sample_metrics.h b/base/metrics/single_sample_metrics.h
new file mode 100644
index 0000000..b966cb1
--- /dev/null
+++ b/base/metrics/single_sample_metrics.h
@@ -0,0 +1,104 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_METRICS_SINGLE_SAMPLE_METRICS_H_
+#define BASE_METRICS_SINGLE_SAMPLE_METRICS_H_
+
+#include <string>
+
+#include "base/base_export.h"
+#include "base/macros.h"
+#include "base/metrics/histogram_base.h"
+
+namespace base {
+
+// See base/metrics/histograms.h for parameter definitions. Must only be used
+// and destroyed from the same thread as construction.
+class BASE_EXPORT SingleSampleMetric {
+ public:
+  virtual ~SingleSampleMetric() = default;
+
+  virtual void SetSample(HistogramBase::Sample sample) = 0;
+};
+
+// Factory for creating single sample metrics. A single sample metric only
+// reports its sample once at destruction time. The sample may be changed prior
+// to destruction using the SetSample() method as many times as desired.
+//
+// The metric creation methods are safe to call from any thread, however the
+// returned class must only be used and destroyed from the same thread as
+// construction.
+//
+// See base/metrics/histogram_macros.h for usage recommendations and
+// base/metrics/histogram.h for full parameter definitions.
+class BASE_EXPORT SingleSampleMetricsFactory {
+ public:
+  virtual ~SingleSampleMetricsFactory() = default;
+
+  // Returns the factory provided by SetFactory(), or if no factory has been set
+  // a default factory will be provided (future calls to SetFactory() will fail
+  // if the default factory is ever vended).
+  static SingleSampleMetricsFactory* Get();
+  static void SetFactory(std::unique_ptr<SingleSampleMetricsFactory> factory);
+
+  // The factory normally persists until process shutdown, but in testing we
+  // should avoid leaking it since it sets a global.
+  static void DeleteFactoryForTesting();
+
+  // The methods below return a single sample metric for counts histograms; see
+  // method comments for the corresponding histogram macro.
+
+  // UMA_HISTOGRAM_CUSTOM_COUNTS()
+  virtual std::unique_ptr<SingleSampleMetric> CreateCustomCountsMetric(
+      const std::string& histogram_name,
+      HistogramBase::Sample min,
+      HistogramBase::Sample max,
+      uint32_t bucket_count) = 0;
+};
+
+// Default implementation for when no factory has been provided to the process.
+// Samples are only recorded within the current process in this case, so samples
+// will be lost in the event of sudden process termination.
+class BASE_EXPORT DefaultSingleSampleMetricsFactory
+    : public SingleSampleMetricsFactory {
+ public:
+  DefaultSingleSampleMetricsFactory() = default;
+  ~DefaultSingleSampleMetricsFactory() override = default;
+
+  // SingleSampleMetricsFactory:
+  std::unique_ptr<SingleSampleMetric> CreateCustomCountsMetric(
+      const std::string& histogram_name,
+      HistogramBase::Sample min,
+      HistogramBase::Sample max,
+      uint32_t bucket_count) override;
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(DefaultSingleSampleMetricsFactory);
+};
+
+class BASE_EXPORT DefaultSingleSampleMetric : public SingleSampleMetric {
+ public:
+  DefaultSingleSampleMetric(const std::string& histogram_name,
+                            HistogramBase::Sample min,
+                            HistogramBase::Sample max,
+                            uint32_t bucket_count,
+                            int32_t flags);
+  ~DefaultSingleSampleMetric() override;
+
+  // SingleSampleMetric:
+  void SetSample(HistogramBase::Sample sample) override;
+
+ private:
+  HistogramBase* const histogram_;
+
+  // The last sample provided to SetSample(). We use -1 as a sentinel value to
+  // indicate no sample has been set.
+  HistogramBase::Sample sample_ = -1;
+
+  DISALLOW_COPY_AND_ASSIGN(DefaultSingleSampleMetric);
+};
+
+}  // namespace base
+
+#endif  // BASE_METRICS_SINGLE_SAMPLE_METRICS_H_
diff --git a/base/metrics/single_sample_metrics_unittest.cc b/base/metrics/single_sample_metrics_unittest.cc
new file mode 100644
index 0000000..5a6d159
--- /dev/null
+++ b/base/metrics/single_sample_metrics_unittest.cc
@@ -0,0 +1,124 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/metrics/single_sample_metrics.h"
+
+#include "base/memory/ptr_util.h"
+#include "base/metrics/dummy_histogram.h"
+#include "base/test/gtest_util.h"
+#include "base/test/histogram_tester.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+
+namespace {
+
+const HistogramBase::Sample kMin = 1;
+const HistogramBase::Sample kMax = 10;
+const uint32_t kBucketCount = 10;
+const char kMetricName[] = "Single.Sample.Metric";
+
+class SingleSampleMetricsTest : public testing::Test {
+ public:
+  SingleSampleMetricsTest() = default;
+
+  ~SingleSampleMetricsTest() override {
+    // Ensure we cleanup after ourselves.
+    SingleSampleMetricsFactory::DeleteFactoryForTesting();
+  }
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(SingleSampleMetricsTest);
+};
+
+}  // namespace
+
+TEST_F(SingleSampleMetricsTest, DefaultFactoryGetSet) {
+  SingleSampleMetricsFactory* factory = SingleSampleMetricsFactory::Get();
+  ASSERT_TRUE(factory);
+
+  // Same factory should be returned evermore.
+  EXPECT_EQ(factory, SingleSampleMetricsFactory::Get());
+
+  // Setting a factory after the default has been instantiated should fail.
+  EXPECT_DCHECK_DEATH(SingleSampleMetricsFactory::SetFactory(
+      WrapUnique<SingleSampleMetricsFactory>(nullptr)));
+}
+
+TEST_F(SingleSampleMetricsTest, CustomFactoryGetSet) {
+  SingleSampleMetricsFactory* factory = new DefaultSingleSampleMetricsFactory();
+  SingleSampleMetricsFactory::SetFactory(WrapUnique(factory));
+  EXPECT_EQ(factory, SingleSampleMetricsFactory::Get());
+}
+
+TEST_F(SingleSampleMetricsTest, DefaultSingleSampleMetricNoValue) {
+  SingleSampleMetricsFactory* factory = SingleSampleMetricsFactory::Get();
+
+  HistogramTester tester;
+  std::unique_ptr<SingleSampleMetric> metric =
+      factory->CreateCustomCountsMetric(kMetricName, kMin, kMax, kBucketCount);
+  metric.reset();
+
+  // Verify that no sample is recorded if SetSample() is never called.
+  tester.ExpectTotalCount(kMetricName, 0);
+}
+
+TEST_F(SingleSampleMetricsTest, DefaultSingleSampleMetricWithValue) {
+  SingleSampleMetricsFactory* factory = SingleSampleMetricsFactory::Get();
+
+  HistogramTester tester;
+  std::unique_ptr<SingleSampleMetric> metric =
+      factory->CreateCustomCountsMetric(kMetricName, kMin, kMax, kBucketCount);
+
+  const HistogramBase::Sample kLastSample = 9;
+  metric->SetSample(1);
+  metric->SetSample(3);
+  metric->SetSample(5);
+  metric->SetSample(kLastSample);
+  metric.reset();
+
+  // Verify only the last sample sent to SetSample() is recorded.
+  tester.ExpectUniqueSample(kMetricName, kLastSample, 1);
+
+  // Verify construction implicitly by requesting a histogram with the same
+  // parameters; this test relies on the fact that histogram objects are unique
+  // per name. Different parameters will result in a Dummy histogram returned.
+  EXPECT_EQ(
+      DummyHistogram::GetInstance(),
+      Histogram::FactoryGet(kMetricName, 1, 3, 3, HistogramBase::kNoFlags));
+  EXPECT_NE(DummyHistogram::GetInstance(),
+            Histogram::FactoryGet(kMetricName, kMin, kMax, kBucketCount,
+                                  HistogramBase::kUmaTargetedHistogramFlag));
+}
+
+TEST_F(SingleSampleMetricsTest, MultipleMetricsAreDistinct) {
+  SingleSampleMetricsFactory* factory = SingleSampleMetricsFactory::Get();
+
+  HistogramTester tester;
+  std::unique_ptr<SingleSampleMetric> metric =
+      factory->CreateCustomCountsMetric(kMetricName, kMin, kMax, kBucketCount);
+  std::unique_ptr<SingleSampleMetric> metric2 =
+      factory->CreateCustomCountsMetric(kMetricName, kMin, kMax, kBucketCount);
+  const char kMetricName2[] = "Single.Sample.Metric.2";
+  std::unique_ptr<SingleSampleMetric> metric3 =
+      factory->CreateCustomCountsMetric(kMetricName2, kMin, kMax, kBucketCount);
+
+  const HistogramBase::Sample kSample1 = 5;
+  metric->SetSample(kSample1);
+  metric2->SetSample(kSample1);
+
+  const HistogramBase::Sample kSample2 = 7;
+  metric3->SetSample(kSample2);
+
+  metric.reset();
+  tester.ExpectUniqueSample(kMetricName, kSample1, 1);
+
+  metric2.reset();
+  tester.ExpectUniqueSample(kMetricName, kSample1, 2);
+
+  metric3.reset();
+  tester.ExpectUniqueSample(kMetricName2, kSample2, 1);
+}
+
+}  // namespace base
diff --git a/base/metrics/sparse_histogram.cc b/base/metrics/sparse_histogram.cc
new file mode 100644
index 0000000..30175a0
--- /dev/null
+++ b/base/metrics/sparse_histogram.cc
@@ -0,0 +1,290 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/metrics/sparse_histogram.h"
+
+#include <utility>
+
+#include "base/memory/ptr_util.h"
+#include "base/metrics/dummy_histogram.h"
+#include "base/metrics/metrics_hashes.h"
+#include "base/metrics/persistent_histogram_allocator.h"
+#include "base/metrics/persistent_sample_map.h"
+#include "base/metrics/sample_map.h"
+#include "base/metrics/statistics_recorder.h"
+#include "base/pickle.h"
+#include "base/strings/stringprintf.h"
+#include "base/synchronization/lock.h"
+
+namespace base {
+
+typedef HistogramBase::Count Count;
+typedef HistogramBase::Sample Sample;
+
+// static
+HistogramBase* SparseHistogram::FactoryGet(const std::string& name,
+                                           int32_t flags) {
+  HistogramBase* histogram = StatisticsRecorder::FindHistogram(name);
+  if (!histogram) {
+    // TODO(gayane): |HashMetricName| is called again in Histogram constructor.
+    // Refactor code to avoid the additional call.
+    bool should_record =
+        StatisticsRecorder::ShouldRecordHistogram(HashMetricName(name));
+    if (!should_record)
+      return DummyHistogram::GetInstance();
+    // Try to create the histogram using a "persistent" allocator. As of
+    // 2016-02-25, the availability of such is controlled by a base::Feature
+    // that is off by default. If the allocator doesn't exist or if
+    // allocating from it fails, code below will allocate the histogram from
+    // the process heap.
+    PersistentMemoryAllocator::Reference histogram_ref = 0;
+    std::unique_ptr<HistogramBase> tentative_histogram;
+    PersistentHistogramAllocator* allocator = GlobalHistogramAllocator::Get();
+    if (allocator) {
+      tentative_histogram = allocator->AllocateHistogram(
+          SPARSE_HISTOGRAM, name, 0, 0, nullptr, flags, &histogram_ref);
+    }
+
+    // Handle the case where no persistent allocator is present or the
+    // persistent allocation fails (perhaps because it is full).
+    if (!tentative_histogram) {
+      DCHECK(!histogram_ref);  // Should never have been set.
+      DCHECK(!allocator);      // Shouldn't have failed.
+      flags &= ~HistogramBase::kIsPersistent;
+      tentative_histogram.reset(new SparseHistogram(GetPermanentName(name)));
+      tentative_histogram->SetFlags(flags);
+    }
+
+    // Register this histogram with the StatisticsRecorder. Keep a copy of
+    // the pointer value to tell later whether the locally created histogram
+    // was registered or deleted. The type is "void" because it could point
+    // to released memory after the following line.
+    const void* tentative_histogram_ptr = tentative_histogram.get();
+    histogram = StatisticsRecorder::RegisterOrDeleteDuplicate(
+        tentative_histogram.release());
+
+    // Persistent histograms need some follow-up processing.
+    if (histogram_ref) {
+      allocator->FinalizeHistogram(histogram_ref,
+                                   histogram == tentative_histogram_ptr);
+    }
+  }
+
+  CHECK_EQ(SPARSE_HISTOGRAM, histogram->GetHistogramType());
+  return histogram;
+}
+
+// static
+std::unique_ptr<HistogramBase> SparseHistogram::PersistentCreate(
+    PersistentHistogramAllocator* allocator,
+    const char* name,
+    HistogramSamples::Metadata* meta,
+    HistogramSamples::Metadata* logged_meta) {
+  return WrapUnique(
+      new SparseHistogram(allocator, name, meta, logged_meta));
+}
+
+SparseHistogram::~SparseHistogram() = default;
+
+uint64_t SparseHistogram::name_hash() const {
+  return unlogged_samples_->id();
+}
+
+HistogramType SparseHistogram::GetHistogramType() const {
+  return SPARSE_HISTOGRAM;
+}
+
+bool SparseHistogram::HasConstructionArguments(
+    Sample expected_minimum,
+    Sample expected_maximum,
+    uint32_t expected_bucket_count) const {
+  // SparseHistogram never has min/max/bucket_count limit.
+  return false;
+}
+
+void SparseHistogram::Add(Sample value) {
+  AddCount(value, 1);
+}
+
+void SparseHistogram::AddCount(Sample value, int count) {
+  if (count <= 0) {
+    NOTREACHED();
+    return;
+  }
+  {
+    base::AutoLock auto_lock(lock_);
+    unlogged_samples_->Accumulate(value, count);
+  }
+
+  FindAndRunCallback(value);
+}
+
+std::unique_ptr<HistogramSamples> SparseHistogram::SnapshotSamples() const {
+  std::unique_ptr<SampleMap> snapshot(new SampleMap(name_hash()));
+
+  base::AutoLock auto_lock(lock_);
+  snapshot->Add(*unlogged_samples_);
+  snapshot->Add(*logged_samples_);
+  return std::move(snapshot);
+}
+
+std::unique_ptr<HistogramSamples> SparseHistogram::SnapshotDelta() {
+  DCHECK(!final_delta_created_);
+
+  std::unique_ptr<SampleMap> snapshot(new SampleMap(name_hash()));
+  base::AutoLock auto_lock(lock_);
+  snapshot->Add(*unlogged_samples_);
+
+  unlogged_samples_->Subtract(*snapshot);
+  logged_samples_->Add(*snapshot);
+  return std::move(snapshot);
+}
+
+std::unique_ptr<HistogramSamples> SparseHistogram::SnapshotFinalDelta() const {
+  DCHECK(!final_delta_created_);
+  final_delta_created_ = true;
+
+  std::unique_ptr<SampleMap> snapshot(new SampleMap(name_hash()));
+  base::AutoLock auto_lock(lock_);
+  snapshot->Add(*unlogged_samples_);
+
+  return std::move(snapshot);
+}
+
+void SparseHistogram::AddSamples(const HistogramSamples& samples) {
+  base::AutoLock auto_lock(lock_);
+  unlogged_samples_->Add(samples);
+}
+
+bool SparseHistogram::AddSamplesFromPickle(PickleIterator* iter) {
+  base::AutoLock auto_lock(lock_);
+  return unlogged_samples_->AddFromPickle(iter);
+}
+
+void SparseHistogram::WriteHTMLGraph(std::string* output) const {
+  output->append("<PRE>");
+  WriteAsciiImpl(true, "<br>", output);
+  output->append("</PRE>");
+}
+
+void SparseHistogram::WriteAscii(std::string* output) const {
+  WriteAsciiImpl(true, "\n", output);
+}
+
+void SparseHistogram::SerializeInfoImpl(Pickle* pickle) const {
+  pickle->WriteString(histogram_name());
+  pickle->WriteInt(flags());
+}
+
+SparseHistogram::SparseHistogram(const char* name)
+    : HistogramBase(name),
+      unlogged_samples_(new SampleMap(HashMetricName(name))),
+      logged_samples_(new SampleMap(unlogged_samples_->id())) {}
+
+SparseHistogram::SparseHistogram(PersistentHistogramAllocator* allocator,
+                                 const char* name,
+                                 HistogramSamples::Metadata* meta,
+                                 HistogramSamples::Metadata* logged_meta)
+    : HistogramBase(name),
+      // While other histogram types maintain a static vector of values with
+      // sufficient space for both "active" and "logged" samples, with each
+      // SampleVector being given the appropriate half, sparse histograms
+      // have no such initial allocation. Each sample has its own record
+      // attached to a single PersistentSampleMap by a common 64-bit identifier.
+      // Since a sparse histogram has two sample maps (active and logged),
+      // there must be two sets of sample records with diffent IDs. The
+      // "active" samples use, for convenience purposes, an ID matching
+      // that of the histogram while the "logged" samples use that number
+      // plus 1.
+      unlogged_samples_(
+          new PersistentSampleMap(HashMetricName(name), allocator, meta)),
+      logged_samples_(new PersistentSampleMap(unlogged_samples_->id() + 1,
+                                              allocator,
+                                              logged_meta)) {}
+
+HistogramBase* SparseHistogram::DeserializeInfoImpl(PickleIterator* iter) {
+  std::string histogram_name;
+  int flags;
+  if (!iter->ReadString(&histogram_name) || !iter->ReadInt(&flags)) {
+    DLOG(ERROR) << "Pickle error decoding Histogram: " << histogram_name;
+    return nullptr;
+  }
+
+  flags &= ~HistogramBase::kIPCSerializationSourceFlag;
+
+  return SparseHistogram::FactoryGet(histogram_name, flags);
+}
+
+void SparseHistogram::GetParameters(DictionaryValue* params) const {
+  // TODO(kaiwang): Implement. (See HistogramBase::WriteJSON.)
+}
+
+void SparseHistogram::GetCountAndBucketData(Count* count,
+                                            int64_t* sum,
+                                            ListValue* buckets) const {
+  // TODO(kaiwang): Implement. (See HistogramBase::WriteJSON.)
+}
+
+void SparseHistogram::WriteAsciiImpl(bool graph_it,
+                                     const std::string& newline,
+                                     std::string* output) const {
+  // Get a local copy of the data so we are consistent.
+  std::unique_ptr<HistogramSamples> snapshot = SnapshotSamples();
+  Count total_count = snapshot->TotalCount();
+  double scaled_total_count = total_count / 100.0;
+
+  WriteAsciiHeader(total_count, output);
+  output->append(newline);
+
+  // Determine how wide the largest bucket range is (how many digits to print),
+  // so that we'll be able to right-align starts for the graphical bars.
+  // Determine which bucket has the largest sample count so that we can
+  // normalize the graphical bar-width relative to that sample count.
+  Count largest_count = 0;
+  Sample largest_sample = 0;
+  std::unique_ptr<SampleCountIterator> it = snapshot->Iterator();
+  while (!it->Done()) {
+    Sample min;
+    int64_t max;
+    Count count;
+    it->Get(&min, &max, &count);
+    if (min > largest_sample)
+      largest_sample = min;
+    if (count > largest_count)
+      largest_count = count;
+    it->Next();
+  }
+  size_t print_width = GetSimpleAsciiBucketRange(largest_sample).size() + 1;
+
+  // iterate over each item and display them
+  it = snapshot->Iterator();
+  while (!it->Done()) {
+    Sample min;
+    int64_t max;
+    Count count;
+    it->Get(&min, &max, &count);
+
+    // value is min, so display it
+    std::string range = GetSimpleAsciiBucketRange(min);
+    output->append(range);
+    for (size_t j = 0; range.size() + j < print_width + 1; ++j)
+      output->push_back(' ');
+
+    if (graph_it)
+      WriteAsciiBucketGraph(count, largest_count, output);
+    WriteAsciiBucketValue(count, scaled_total_count, output);
+    output->append(newline);
+    it->Next();
+  }
+}
+
+void SparseHistogram::WriteAsciiHeader(const Count total_count,
+                                       std::string* output) const {
+  StringAppendF(output, "Histogram: %s recorded %d samples", histogram_name(),
+                total_count);
+  if (flags())
+    StringAppendF(output, " (flags = 0x%x)", flags());
+}
+
+}  // namespace base
diff --git a/base/metrics/sparse_histogram.h b/base/metrics/sparse_histogram.h
new file mode 100644
index 0000000..913762c
--- /dev/null
+++ b/base/metrics/sparse_histogram.h
@@ -0,0 +1,108 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_METRICS_SPARSE_HISTOGRAM_H_
+#define BASE_METRICS_SPARSE_HISTOGRAM_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <map>
+#include <memory>
+#include <string>
+
+#include "base/base_export.h"
+#include "base/macros.h"
+#include "base/metrics/histogram_base.h"
+#include "base/metrics/histogram_samples.h"
+#include "base/synchronization/lock.h"
+
+namespace base {
+
+class HistogramSamples;
+class PersistentHistogramAllocator;
+class Pickle;
+class PickleIterator;
+
+class BASE_EXPORT SparseHistogram : public HistogramBase {
+ public:
+  // If there's one with same name, return the existing one. If not, create a
+  // new one.
+  static HistogramBase* FactoryGet(const std::string& name, int32_t flags);
+
+  // Create a histogram using data in persistent storage. The allocator must
+  // live longer than the created sparse histogram.
+  static std::unique_ptr<HistogramBase> PersistentCreate(
+      PersistentHistogramAllocator* allocator,
+      const char* name,
+      HistogramSamples::Metadata* meta,
+      HistogramSamples::Metadata* logged_meta);
+
+  ~SparseHistogram() override;
+
+  // HistogramBase implementation:
+  uint64_t name_hash() const override;
+  HistogramType GetHistogramType() const override;
+  bool HasConstructionArguments(Sample expected_minimum,
+                                Sample expected_maximum,
+                                uint32_t expected_bucket_count) const override;
+  void Add(Sample value) override;
+  void AddCount(Sample value, int count) override;
+  void AddSamples(const HistogramSamples& samples) override;
+  bool AddSamplesFromPickle(base::PickleIterator* iter) override;
+  std::unique_ptr<HistogramSamples> SnapshotSamples() const override;
+  std::unique_ptr<HistogramSamples> SnapshotDelta() override;
+  std::unique_ptr<HistogramSamples> SnapshotFinalDelta() const override;
+  void WriteHTMLGraph(std::string* output) const override;
+  void WriteAscii(std::string* output) const override;
+
+ protected:
+  // HistogramBase implementation:
+  void SerializeInfoImpl(base::Pickle* pickle) const override;
+
+ private:
+  // Clients should always use FactoryGet to create SparseHistogram.
+  explicit SparseHistogram(const char* name);
+
+  SparseHistogram(PersistentHistogramAllocator* allocator,
+                  const char* name,
+                  HistogramSamples::Metadata* meta,
+                  HistogramSamples::Metadata* logged_meta);
+
+  friend BASE_EXPORT HistogramBase* DeserializeHistogramInfo(
+      base::PickleIterator* iter);
+  static HistogramBase* DeserializeInfoImpl(base::PickleIterator* iter);
+
+  void GetParameters(DictionaryValue* params) const override;
+  void GetCountAndBucketData(Count* count,
+                             int64_t* sum,
+                             ListValue* buckets) const override;
+
+  // Helpers for emitting Ascii graphic.  Each method appends data to output.
+  void WriteAsciiImpl(bool graph_it,
+                      const std::string& newline,
+                      std::string* output) const;
+
+  // Write a common header message describing this histogram.
+  void WriteAsciiHeader(const Count total_count,
+                        std::string* output) const;
+
+  // For constuctor calling.
+  friend class SparseHistogramTest;
+
+  // Protects access to |samples_|.
+  mutable base::Lock lock_;
+
+  // Flag to indicate if PrepareFinalDelta has been previously called.
+  mutable bool final_delta_created_ = false;
+
+  std::unique_ptr<HistogramSamples> unlogged_samples_;
+  std::unique_ptr<HistogramSamples> logged_samples_;
+
+  DISALLOW_COPY_AND_ASSIGN(SparseHistogram);
+};
+
+}  // namespace base
+
+#endif  // BASE_METRICS_SPARSE_HISTOGRAM_H_
diff --git a/base/metrics/sparse_histogram_unittest.cc b/base/metrics/sparse_histogram_unittest.cc
new file mode 100644
index 0000000..72dd905
--- /dev/null
+++ b/base/metrics/sparse_histogram_unittest.cc
@@ -0,0 +1,388 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/metrics/sparse_histogram.h"
+
+#include <memory>
+#include <string>
+
+#include "base/metrics/histogram_base.h"
+#include "base/metrics/histogram_functions.h"
+#include "base/metrics/histogram_samples.h"
+#include "base/metrics/metrics_hashes.h"
+#include "base/metrics/persistent_histogram_allocator.h"
+#include "base/metrics/persistent_memory_allocator.h"
+#include "base/metrics/sample_map.h"
+#include "base/metrics/statistics_recorder.h"
+#include "base/pickle.h"
+#include "base/strings/stringprintf.h"
+#include "testing/gmock/include/gmock/gmock.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+
+// Test parameter indicates if a persistent memory allocator should be used
+// for histogram allocation. False will allocate histograms from the process
+// heap.
+class SparseHistogramTest : public testing::TestWithParam<bool> {
+ protected:
+  const int32_t kAllocatorMemorySize = 8 << 20;  // 8 MiB
+
+  SparseHistogramTest() : use_persistent_histogram_allocator_(GetParam()) {}
+
+  void SetUp() override {
+    if (use_persistent_histogram_allocator_)
+      CreatePersistentMemoryAllocator();
+
+    // Each test will have a clean state (no Histogram / BucketRanges
+    // registered).
+    InitializeStatisticsRecorder();
+  }
+
+  void TearDown() override {
+    if (allocator_) {
+      ASSERT_FALSE(allocator_->IsFull());
+      ASSERT_FALSE(allocator_->IsCorrupt());
+    }
+    UninitializeStatisticsRecorder();
+    DestroyPersistentMemoryAllocator();
+  }
+
+  void InitializeStatisticsRecorder() {
+    DCHECK(!statistics_recorder_);
+    statistics_recorder_ = StatisticsRecorder::CreateTemporaryForTesting();
+  }
+
+  void UninitializeStatisticsRecorder() {
+    statistics_recorder_.reset();
+  }
+
+  void CreatePersistentMemoryAllocator() {
+    GlobalHistogramAllocator::CreateWithLocalMemory(
+        kAllocatorMemorySize, 0, "SparseHistogramAllocatorTest");
+    allocator_ = GlobalHistogramAllocator::Get()->memory_allocator();
+  }
+
+  void DestroyPersistentMemoryAllocator() {
+    allocator_ = nullptr;
+    GlobalHistogramAllocator::ReleaseForTesting();
+  }
+
+  std::unique_ptr<SparseHistogram> NewSparseHistogram(const char* name) {
+    // std::make_unique can't access protected ctor so do it manually. This
+    // test class is a friend so can access it.
+    return std::unique_ptr<SparseHistogram>(new SparseHistogram(name));
+  }
+
+  const bool use_persistent_histogram_allocator_;
+
+  std::unique_ptr<StatisticsRecorder> statistics_recorder_;
+  PersistentMemoryAllocator* allocator_ = nullptr;
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(SparseHistogramTest);
+};
+
+// Run all HistogramTest cases with both heap and persistent memory.
+INSTANTIATE_TEST_CASE_P(HeapAndPersistent,
+                        SparseHistogramTest,
+                        testing::Bool());
+
+
+TEST_P(SparseHistogramTest, BasicTest) {
+  std::unique_ptr<SparseHistogram> histogram(NewSparseHistogram("Sparse"));
+  std::unique_ptr<HistogramSamples> snapshot(histogram->SnapshotSamples());
+  EXPECT_EQ(0, snapshot->TotalCount());
+  EXPECT_EQ(0, snapshot->sum());
+
+  histogram->Add(100);
+  std::unique_ptr<HistogramSamples> snapshot1(histogram->SnapshotSamples());
+  EXPECT_EQ(1, snapshot1->TotalCount());
+  EXPECT_EQ(1, snapshot1->GetCount(100));
+
+  histogram->Add(100);
+  histogram->Add(101);
+  std::unique_ptr<HistogramSamples> snapshot2(histogram->SnapshotSamples());
+  EXPECT_EQ(3, snapshot2->TotalCount());
+  EXPECT_EQ(2, snapshot2->GetCount(100));
+  EXPECT_EQ(1, snapshot2->GetCount(101));
+}
+
+TEST_P(SparseHistogramTest, BasicTestAddCount) {
+  std::unique_ptr<SparseHistogram> histogram(NewSparseHistogram("Sparse"));
+  std::unique_ptr<HistogramSamples> snapshot(histogram->SnapshotSamples());
+  EXPECT_EQ(0, snapshot->TotalCount());
+  EXPECT_EQ(0, snapshot->sum());
+
+  histogram->AddCount(100, 15);
+  std::unique_ptr<HistogramSamples> snapshot1(histogram->SnapshotSamples());
+  EXPECT_EQ(15, snapshot1->TotalCount());
+  EXPECT_EQ(15, snapshot1->GetCount(100));
+
+  histogram->AddCount(100, 15);
+  histogram->AddCount(101, 25);
+  std::unique_ptr<HistogramSamples> snapshot2(histogram->SnapshotSamples());
+  EXPECT_EQ(55, snapshot2->TotalCount());
+  EXPECT_EQ(30, snapshot2->GetCount(100));
+  EXPECT_EQ(25, snapshot2->GetCount(101));
+}
+
+TEST_P(SparseHistogramTest, AddCount_LargeValuesDontOverflow) {
+  std::unique_ptr<SparseHistogram> histogram(NewSparseHistogram("Sparse"));
+  std::unique_ptr<HistogramSamples> snapshot(histogram->SnapshotSamples());
+  EXPECT_EQ(0, snapshot->TotalCount());
+  EXPECT_EQ(0, snapshot->sum());
+
+  histogram->AddCount(1000000000, 15);
+  std::unique_ptr<HistogramSamples> snapshot1(histogram->SnapshotSamples());
+  EXPECT_EQ(15, snapshot1->TotalCount());
+  EXPECT_EQ(15, snapshot1->GetCount(1000000000));
+
+  histogram->AddCount(1000000000, 15);
+  histogram->AddCount(1010000000, 25);
+  std::unique_ptr<HistogramSamples> snapshot2(histogram->SnapshotSamples());
+  EXPECT_EQ(55, snapshot2->TotalCount());
+  EXPECT_EQ(30, snapshot2->GetCount(1000000000));
+  EXPECT_EQ(25, snapshot2->GetCount(1010000000));
+  EXPECT_EQ(55250000000LL, snapshot2->sum());
+}
+
+// Make sure that counts returned by Histogram::SnapshotDelta do not overflow
+// even when a total count (returned by Histogram::SnapshotSample) does.
+TEST_P(SparseHistogramTest, AddCount_LargeCountsDontOverflow) {
+  std::unique_ptr<SparseHistogram> histogram(NewSparseHistogram("Sparse"));
+  std::unique_ptr<HistogramSamples> snapshot(histogram->SnapshotSamples());
+  EXPECT_EQ(0, snapshot->TotalCount());
+  EXPECT_EQ(0, snapshot->sum());
+
+  const int count = (1 << 30) - 1;
+
+  // Repeat N times to make sure that there is no internal value overflow.
+  for (int i = 0; i < 10; ++i) {
+    histogram->AddCount(42, count);
+    std::unique_ptr<HistogramSamples> samples = histogram->SnapshotDelta();
+    EXPECT_EQ(count, samples->TotalCount());
+    EXPECT_EQ(count, samples->GetCount(42));
+  }
+}
+
+TEST_P(SparseHistogramTest, MacroBasicTest) {
+  UmaHistogramSparse("Sparse", 100);
+  UmaHistogramSparse("Sparse", 200);
+  UmaHistogramSparse("Sparse", 100);
+
+  const StatisticsRecorder::Histograms histograms =
+      StatisticsRecorder::GetHistograms();
+
+  ASSERT_THAT(histograms, testing::SizeIs(1));
+  const HistogramBase* const sparse_histogram = histograms[0];
+
+  EXPECT_EQ(SPARSE_HISTOGRAM, sparse_histogram->GetHistogramType());
+  EXPECT_EQ("Sparse", StringPiece(sparse_histogram->histogram_name()));
+  EXPECT_EQ(
+      HistogramBase::kUmaTargetedHistogramFlag |
+          (use_persistent_histogram_allocator_ ? HistogramBase::kIsPersistent
+                                               : 0),
+      sparse_histogram->flags());
+
+  std::unique_ptr<HistogramSamples> samples =
+      sparse_histogram->SnapshotSamples();
+  EXPECT_EQ(3, samples->TotalCount());
+  EXPECT_EQ(2, samples->GetCount(100));
+  EXPECT_EQ(1, samples->GetCount(200));
+}
+
+TEST_P(SparseHistogramTest, MacroInLoopTest) {
+  // Unlike the macros in histogram.h, SparseHistogram macros can have a
+  // variable as histogram name.
+  for (int i = 0; i < 2; i++) {
+    UmaHistogramSparse(StringPrintf("Sparse%d", i), 100);
+  }
+
+  const StatisticsRecorder::Histograms histograms =
+      StatisticsRecorder::Sort(StatisticsRecorder::GetHistograms());
+  ASSERT_THAT(histograms, testing::SizeIs(2));
+  EXPECT_STREQ(histograms[0]->histogram_name(), "Sparse0");
+  EXPECT_STREQ(histograms[1]->histogram_name(), "Sparse1");
+}
+
+TEST_P(SparseHistogramTest, Serialize) {
+  std::unique_ptr<SparseHistogram> histogram(NewSparseHistogram("Sparse"));
+  histogram->SetFlags(HistogramBase::kIPCSerializationSourceFlag);
+
+  Pickle pickle;
+  histogram->SerializeInfo(&pickle);
+
+  PickleIterator iter(pickle);
+
+  int type;
+  EXPECT_TRUE(iter.ReadInt(&type));
+  EXPECT_EQ(SPARSE_HISTOGRAM, type);
+
+  std::string name;
+  EXPECT_TRUE(iter.ReadString(&name));
+  EXPECT_EQ("Sparse", name);
+
+  int flag;
+  EXPECT_TRUE(iter.ReadInt(&flag));
+  EXPECT_EQ(HistogramBase::kIPCSerializationSourceFlag, flag);
+
+  // No more data in the pickle.
+  EXPECT_FALSE(iter.SkipBytes(1));
+}
+
+// Ensure that race conditions that cause multiple, identical sparse histograms
+// to be created will safely resolve to a single one.
+TEST_P(SparseHistogramTest, DuplicationSafety) {
+  const char histogram_name[] = "Duplicated";
+  size_t histogram_count = StatisticsRecorder::GetHistogramCount();
+
+  // Create a histogram that we will later duplicate.
+  HistogramBase* original =
+      SparseHistogram::FactoryGet(histogram_name, HistogramBase::kNoFlags);
+  ++histogram_count;
+  DCHECK_EQ(histogram_count, StatisticsRecorder::GetHistogramCount());
+  original->Add(1);
+
+  // Create a duplicate. This has to happen differently depending on where the
+  // memory is taken from.
+  if (use_persistent_histogram_allocator_) {
+    // To allocate from persistent memory, clear the last_created reference in
+    // the GlobalHistogramAllocator. This will cause an Import to recreate
+    // the just-created histogram which will then be released as a duplicate.
+    GlobalHistogramAllocator::Get()->ClearLastCreatedReferenceForTesting();
+    // Creating a different histogram will first do an Import to ensure it
+    // hasn't been created elsewhere, triggering the duplication and release.
+    SparseHistogram::FactoryGet("something.new", HistogramBase::kNoFlags);
+    ++histogram_count;
+  } else {
+    // To allocate from the heap, just call the (private) constructor directly.
+    // Delete it immediately like would have happened within FactoryGet();
+    std::unique_ptr<SparseHistogram> something =
+        NewSparseHistogram(histogram_name);
+    DCHECK_NE(original, something.get());
+  }
+  DCHECK_EQ(histogram_count, StatisticsRecorder::GetHistogramCount());
+
+  // Re-creating the histogram via FactoryGet() will return the same one.
+  HistogramBase* duplicate =
+      SparseHistogram::FactoryGet(histogram_name, HistogramBase::kNoFlags);
+  DCHECK_EQ(original, duplicate);
+  DCHECK_EQ(histogram_count, StatisticsRecorder::GetHistogramCount());
+  duplicate->Add(2);
+
+  // Ensure that original histograms are still cross-functional.
+  original->Add(2);
+  duplicate->Add(1);
+  std::unique_ptr<HistogramSamples> snapshot_orig = original->SnapshotSamples();
+  std::unique_ptr<HistogramSamples> snapshot_dup = duplicate->SnapshotSamples();
+  DCHECK_EQ(2, snapshot_orig->GetCount(2));
+  DCHECK_EQ(2, snapshot_dup->GetCount(1));
+}
+
+TEST_P(SparseHistogramTest, FactoryTime) {
+  const int kTestCreateCount = 1 << 10;  // Must be power-of-2.
+  const int kTestLookupCount = 100000;
+  const int kTestAddCount = 100000;
+
+  // Create all histogram names in advance for accurate timing below.
+  std::vector<std::string> histogram_names;
+  for (int i = 0; i < kTestCreateCount; ++i) {
+    histogram_names.push_back(
+        StringPrintf("TestHistogram.%d", i % kTestCreateCount));
+  }
+
+  // Calculate cost of creating histograms.
+  TimeTicks create_start = TimeTicks::Now();
+  for (int i = 0; i < kTestCreateCount; ++i)
+    SparseHistogram::FactoryGet(histogram_names[i], HistogramBase::kNoFlags);
+  TimeDelta create_ticks = TimeTicks::Now() - create_start;
+  int64_t create_ms = create_ticks.InMilliseconds();
+
+  VLOG(1) << kTestCreateCount << " histogram creations took " << create_ms
+          << "ms or about "
+          << (create_ms * 1000000) / kTestCreateCount
+          << "ns each.";
+
+  // Calculate cost of looking up existing histograms.
+  TimeTicks lookup_start = TimeTicks::Now();
+  for (int i = 0; i < kTestLookupCount; ++i) {
+    // 6007 is co-prime with kTestCreateCount and so will do lookups in an
+    // order less likely to be cacheable (but still hit them all) should the
+    // underlying storage use the exact histogram name as the key.
+    const int i_mult = 6007;
+    static_assert(i_mult < INT_MAX / kTestCreateCount, "Multiplier too big");
+    int index = (i * i_mult) & (kTestCreateCount - 1);
+    SparseHistogram::FactoryGet(histogram_names[index],
+                                HistogramBase::kNoFlags);
+  }
+  TimeDelta lookup_ticks = TimeTicks::Now() - lookup_start;
+  int64_t lookup_ms = lookup_ticks.InMilliseconds();
+
+  VLOG(1) << kTestLookupCount << " histogram lookups took " << lookup_ms
+          << "ms or about "
+          << (lookup_ms * 1000000) / kTestLookupCount
+          << "ns each.";
+
+  // Calculate cost of accessing histograms.
+  HistogramBase* histogram =
+      SparseHistogram::FactoryGet(histogram_names[0], HistogramBase::kNoFlags);
+  ASSERT_TRUE(histogram);
+  TimeTicks add_start = TimeTicks::Now();
+  for (int i = 0; i < kTestAddCount; ++i)
+    histogram->Add(i & 127);
+  TimeDelta add_ticks = TimeTicks::Now() - add_start;
+  int64_t add_ms = add_ticks.InMilliseconds();
+
+  VLOG(1) << kTestAddCount << " histogram adds took " << add_ms
+          << "ms or about "
+          << (add_ms * 1000000) / kTestAddCount
+          << "ns each.";
+}
+
+TEST_P(SparseHistogramTest, ExtremeValues) {
+  static const struct {
+    Histogram::Sample sample;
+    int64_t expected_max;
+  } cases[] = {
+      // Note: We use -2147483647 - 1 rather than -2147483648 because the later
+      // is interpreted as - operator applied to 2147483648 and the latter can't
+      // be represented as an int32 and causes a warning.
+      {-2147483647 - 1, -2147483647LL},
+      {0, 1},
+      {2147483647, 2147483648LL},
+  };
+
+  for (size_t i = 0; i < arraysize(cases); ++i) {
+    HistogramBase* histogram =
+        SparseHistogram::FactoryGet(StringPrintf("ExtremeValues_%zu", i),
+                                    HistogramBase::kUmaTargetedHistogramFlag);
+    histogram->Add(cases[i].sample);
+
+    std::unique_ptr<HistogramSamples> snapshot = histogram->SnapshotSamples();
+    std::unique_ptr<SampleCountIterator> it = snapshot->Iterator();
+    ASSERT_FALSE(it->Done());
+
+    base::Histogram::Sample min;
+    int64_t max;
+    base::Histogram::Count count;
+    it->Get(&min, &max, &count);
+
+    EXPECT_EQ(1, count);
+    EXPECT_EQ(cases[i].sample, min);
+    EXPECT_EQ(cases[i].expected_max, max);
+
+    it->Next();
+    EXPECT_TRUE(it->Done());
+  }
+}
+
+TEST_P(SparseHistogramTest, HistogramNameHash) {
+  const char kName[] = "TestName";
+  HistogramBase* histogram = SparseHistogram::FactoryGet(
+      kName, HistogramBase::kUmaTargetedHistogramFlag);
+  EXPECT_EQ(histogram->name_hash(), HashMetricName(kName));
+}
+
+}  // namespace base
diff --git a/base/metrics/statistics_recorder.cc b/base/metrics/statistics_recorder.cc
new file mode 100644
index 0000000..28773a1
--- /dev/null
+++ b/base/metrics/statistics_recorder.cc
@@ -0,0 +1,416 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/metrics/statistics_recorder.h"
+
+#include <memory>
+
+#include "base/at_exit.h"
+#include "base/debug/leak_annotations.h"
+#include "base/json/string_escape.h"
+#include "base/logging.h"
+#include "base/memory/ptr_util.h"
+#include "base/metrics/histogram.h"
+#include "base/metrics/histogram_snapshot_manager.h"
+#include "base/metrics/metrics_hashes.h"
+#include "base/metrics/persistent_histogram_allocator.h"
+#include "base/metrics/record_histogram_checker.h"
+#include "base/stl_util.h"
+#include "base/strings/stringprintf.h"
+#include "base/values.h"
+
+namespace base {
+namespace {
+
+bool HistogramNameLesser(const base::HistogramBase* a,
+                         const base::HistogramBase* b) {
+  return strcmp(a->histogram_name(), b->histogram_name()) < 0;
+}
+
+}  // namespace
+
+// static
+LazyInstance<Lock>::Leaky StatisticsRecorder::lock_;
+
+// static
+StatisticsRecorder* StatisticsRecorder::top_ = nullptr;
+
+// static
+bool StatisticsRecorder::is_vlog_initialized_ = false;
+
+size_t StatisticsRecorder::BucketRangesHash::operator()(
+    const BucketRanges* const a) const {
+  return a->checksum();
+}
+
+bool StatisticsRecorder::BucketRangesEqual::operator()(
+    const BucketRanges* const a,
+    const BucketRanges* const b) const {
+  return a->Equals(b);
+}
+
+StatisticsRecorder::~StatisticsRecorder() {
+  const AutoLock auto_lock(lock_.Get());
+  DCHECK_EQ(this, top_);
+  top_ = previous_;
+}
+
+// static
+void StatisticsRecorder::EnsureGlobalRecorderWhileLocked() {
+  lock_.Get().AssertAcquired();
+  if (top_)
+    return;
+
+  const StatisticsRecorder* const p = new StatisticsRecorder;
+  // The global recorder is never deleted.
+  ANNOTATE_LEAKING_OBJECT_PTR(p);
+  DCHECK_EQ(p, top_);
+}
+
+// static
+void StatisticsRecorder::RegisterHistogramProvider(
+    const WeakPtr<HistogramProvider>& provider) {
+  const AutoLock auto_lock(lock_.Get());
+  EnsureGlobalRecorderWhileLocked();
+  top_->providers_.push_back(provider);
+}
+
+// static
+HistogramBase* StatisticsRecorder::RegisterOrDeleteDuplicate(
+    HistogramBase* histogram) {
+  // Declared before |auto_lock| to ensure correct destruction order.
+  std::unique_ptr<HistogramBase> histogram_deleter;
+  const AutoLock auto_lock(lock_.Get());
+  EnsureGlobalRecorderWhileLocked();
+
+  const char* const name = histogram->histogram_name();
+  HistogramBase*& registered = top_->histograms_[name];
+
+  if (!registered) {
+    // |name| is guaranteed to never change or be deallocated so long
+    // as the histogram is alive (which is forever).
+    registered = histogram;
+    ANNOTATE_LEAKING_OBJECT_PTR(histogram);  // see crbug.com/79322
+    // If there are callbacks for this histogram, we set the kCallbackExists
+    // flag.
+    const auto callback_iterator = top_->callbacks_.find(name);
+    if (callback_iterator != top_->callbacks_.end()) {
+      if (!callback_iterator->second.is_null())
+        histogram->SetFlags(HistogramBase::kCallbackExists);
+      else
+        histogram->ClearFlags(HistogramBase::kCallbackExists);
+    }
+    return histogram;
+  }
+
+  if (histogram == registered) {
+    // The histogram was registered before.
+    return histogram;
+  }
+
+  // We already have one histogram with this name.
+  histogram_deleter.reset(histogram);
+  return registered;
+}
+
+// static
+const BucketRanges* StatisticsRecorder::RegisterOrDeleteDuplicateRanges(
+    const BucketRanges* ranges) {
+  DCHECK(ranges->HasValidChecksum());
+
+  // Declared before |auto_lock| to ensure correct destruction order.
+  std::unique_ptr<const BucketRanges> ranges_deleter;
+  const AutoLock auto_lock(lock_.Get());
+  EnsureGlobalRecorderWhileLocked();
+
+  const BucketRanges* const registered = *top_->ranges_.insert(ranges).first;
+  if (registered == ranges) {
+    ANNOTATE_LEAKING_OBJECT_PTR(ranges);
+  } else {
+    ranges_deleter.reset(ranges);
+  }
+
+  return registered;
+}
+
+// static
+void StatisticsRecorder::WriteHTMLGraph(const std::string& query,
+                                        std::string* output) {
+  for (const HistogramBase* const histogram :
+       Sort(WithName(GetHistograms(), query))) {
+    histogram->WriteHTMLGraph(output);
+    *output += "<br><hr><br>";
+  }
+}
+
+// static
+void StatisticsRecorder::WriteGraph(const std::string& query,
+                                    std::string* output) {
+  if (query.length())
+    StringAppendF(output, "Collections of histograms for %s\n", query.c_str());
+  else
+    output->append("Collections of all histograms\n");
+
+  for (const HistogramBase* const histogram :
+       Sort(WithName(GetHistograms(), query))) {
+    histogram->WriteAscii(output);
+    output->append("\n");
+  }
+}
+
+// static
+std::string StatisticsRecorder::ToJSON(JSONVerbosityLevel verbosity_level) {
+  std::string output = "{\"histograms\":[";
+  const char* sep = "";
+  for (const HistogramBase* const histogram : Sort(GetHistograms())) {
+    output += sep;
+    sep = ",";
+    std::string json;
+    histogram->WriteJSON(&json, verbosity_level);
+    output += json;
+  }
+  output += "]}";
+  return output;
+}
+
+// static
+std::vector<const BucketRanges*> StatisticsRecorder::GetBucketRanges() {
+  std::vector<const BucketRanges*> out;
+  const AutoLock auto_lock(lock_.Get());
+  EnsureGlobalRecorderWhileLocked();
+  out.reserve(top_->ranges_.size());
+  out.assign(top_->ranges_.begin(), top_->ranges_.end());
+  return out;
+}
+
+// static
+HistogramBase* StatisticsRecorder::FindHistogram(base::StringPiece name) {
+  // This must be called *before* the lock is acquired below because it will
+  // call back into this object to register histograms. Those called methods
+  // will acquire the lock at that time.
+  ImportGlobalPersistentHistograms();
+
+  const AutoLock auto_lock(lock_.Get());
+  EnsureGlobalRecorderWhileLocked();
+
+  const HistogramMap::const_iterator it = top_->histograms_.find(name);
+  return it != top_->histograms_.end() ? it->second : nullptr;
+}
+
+// static
+StatisticsRecorder::HistogramProviders
+StatisticsRecorder::GetHistogramProviders() {
+  const AutoLock auto_lock(lock_.Get());
+  EnsureGlobalRecorderWhileLocked();
+  return top_->providers_;
+}
+
+// static
+void StatisticsRecorder::ImportProvidedHistograms() {
+  // Merge histogram data from each provider in turn.
+  for (const WeakPtr<HistogramProvider>& provider : GetHistogramProviders()) {
+    // Weak-pointer may be invalid if the provider was destructed, though they
+    // generally never are.
+    if (provider)
+      provider->MergeHistogramDeltas();
+  }
+}
+
+// static
+void StatisticsRecorder::PrepareDeltas(
+    bool include_persistent,
+    HistogramBase::Flags flags_to_set,
+    HistogramBase::Flags required_flags,
+    HistogramSnapshotManager* snapshot_manager) {
+  Histograms histograms = GetHistograms();
+  if (!include_persistent)
+    histograms = NonPersistent(std::move(histograms));
+  snapshot_manager->PrepareDeltas(Sort(std::move(histograms)), flags_to_set,
+                                  required_flags);
+}
+
+// static
+void StatisticsRecorder::InitLogOnShutdown() {
+  const AutoLock auto_lock(lock_.Get());
+  InitLogOnShutdownWhileLocked();
+}
+
+// static
+bool StatisticsRecorder::SetCallback(
+    const std::string& name,
+    const StatisticsRecorder::OnSampleCallback& cb) {
+  DCHECK(!cb.is_null());
+  const AutoLock auto_lock(lock_.Get());
+  EnsureGlobalRecorderWhileLocked();
+
+  if (!top_->callbacks_.insert({name, cb}).second)
+    return false;
+
+  const HistogramMap::const_iterator it = top_->histograms_.find(name);
+  if (it != top_->histograms_.end())
+    it->second->SetFlags(HistogramBase::kCallbackExists);
+
+  return true;
+}
+
+// static
+void StatisticsRecorder::ClearCallback(const std::string& name) {
+  const AutoLock auto_lock(lock_.Get());
+  EnsureGlobalRecorderWhileLocked();
+
+  top_->callbacks_.erase(name);
+
+  // We also clear the flag from the histogram (if it exists).
+  const HistogramMap::const_iterator it = top_->histograms_.find(name);
+  if (it != top_->histograms_.end())
+    it->second->ClearFlags(HistogramBase::kCallbackExists);
+}
+
+// static
+StatisticsRecorder::OnSampleCallback StatisticsRecorder::FindCallback(
+    const std::string& name) {
+  const AutoLock auto_lock(lock_.Get());
+  EnsureGlobalRecorderWhileLocked();
+  const auto it = top_->callbacks_.find(name);
+  return it != top_->callbacks_.end() ? it->second : OnSampleCallback();
+}
+
+// static
+size_t StatisticsRecorder::GetHistogramCount() {
+  const AutoLock auto_lock(lock_.Get());
+  EnsureGlobalRecorderWhileLocked();
+  return top_->histograms_.size();
+}
+
+// static
+void StatisticsRecorder::ForgetHistogramForTesting(base::StringPiece name) {
+  const AutoLock auto_lock(lock_.Get());
+  EnsureGlobalRecorderWhileLocked();
+
+  const HistogramMap::iterator found = top_->histograms_.find(name);
+  if (found == top_->histograms_.end())
+    return;
+
+  HistogramBase* const base = found->second;
+  if (base->GetHistogramType() != SPARSE_HISTOGRAM) {
+    // When forgetting a histogram, it's likely that other information is
+    // also becoming invalid. Clear the persistent reference that may no
+    // longer be valid. There's no danger in this as, at worst, duplicates
+    // will be created in persistent memory.
+    static_cast<Histogram*>(base)->bucket_ranges()->set_persistent_reference(0);
+  }
+
+  top_->histograms_.erase(found);
+}
+
+// static
+std::unique_ptr<StatisticsRecorder>
+StatisticsRecorder::CreateTemporaryForTesting() {
+  const AutoLock auto_lock(lock_.Get());
+  return WrapUnique(new StatisticsRecorder());
+}
+
+// static
+void StatisticsRecorder::SetRecordChecker(
+    std::unique_ptr<RecordHistogramChecker> record_checker) {
+  const AutoLock auto_lock(lock_.Get());
+  EnsureGlobalRecorderWhileLocked();
+  top_->record_checker_ = std::move(record_checker);
+}
+
+// static
+bool StatisticsRecorder::ShouldRecordHistogram(uint64_t histogram_hash) {
+  const AutoLock auto_lock(lock_.Get());
+  EnsureGlobalRecorderWhileLocked();
+  return !top_->record_checker_ ||
+         top_->record_checker_->ShouldRecord(histogram_hash);
+}
+
+// static
+StatisticsRecorder::Histograms StatisticsRecorder::GetHistograms() {
+  // This must be called *before* the lock is acquired below because it will
+  // call back into this object to register histograms. Those called methods
+  // will acquire the lock at that time.
+  ImportGlobalPersistentHistograms();
+
+  Histograms out;
+
+  const AutoLock auto_lock(lock_.Get());
+  EnsureGlobalRecorderWhileLocked();
+
+  out.reserve(top_->histograms_.size());
+  for (const auto& entry : top_->histograms_)
+    out.push_back(entry.second);
+
+  return out;
+}
+
+// static
+StatisticsRecorder::Histograms StatisticsRecorder::Sort(Histograms histograms) {
+  std::sort(histograms.begin(), histograms.end(), &HistogramNameLesser);
+  return histograms;
+}
+
+// static
+StatisticsRecorder::Histograms StatisticsRecorder::WithName(
+    Histograms histograms,
+    const std::string& query) {
+  // Need a C-string query for comparisons against C-string histogram name.
+  const char* const query_string = query.c_str();
+  histograms.erase(std::remove_if(histograms.begin(), histograms.end(),
+                                  [query_string](const HistogramBase* const h) {
+                                    return !strstr(h->histogram_name(),
+                                                   query_string);
+                                  }),
+                   histograms.end());
+  return histograms;
+}
+
+// static
+StatisticsRecorder::Histograms StatisticsRecorder::NonPersistent(
+    Histograms histograms) {
+  histograms.erase(
+      std::remove_if(histograms.begin(), histograms.end(),
+                     [](const HistogramBase* const h) {
+                       return (h->flags() & HistogramBase::kIsPersistent) != 0;
+                     }),
+      histograms.end());
+  return histograms;
+}
+
+// static
+void StatisticsRecorder::ImportGlobalPersistentHistograms() {
+  // Import histograms from known persistent storage. Histograms could have been
+  // added by other processes and they must be fetched and recognized locally.
+  // If the persistent memory segment is not shared between processes, this call
+  // does nothing.
+  if (GlobalHistogramAllocator* allocator = GlobalHistogramAllocator::Get())
+    allocator->ImportHistogramsToStatisticsRecorder();
+}
+
+// This singleton instance should be started during the single threaded portion
+// of main(), and hence it is not thread safe. It initializes globals to provide
+// support for all future calls.
+StatisticsRecorder::StatisticsRecorder() {
+  lock_.Get().AssertAcquired();
+  previous_ = top_;
+  top_ = this;
+  InitLogOnShutdownWhileLocked();
+}
+
+// static
+void StatisticsRecorder::InitLogOnShutdownWhileLocked() {
+  lock_.Get().AssertAcquired();
+  if (!is_vlog_initialized_ && VLOG_IS_ON(1)) {
+    is_vlog_initialized_ = true;
+    const auto dump_to_vlog = [](void*) {
+      std::string output;
+      WriteGraph("", &output);
+      VLOG(1) << output;
+    };
+    AtExitManager::RegisterCallback(dump_to_vlog, nullptr);
+  }
+}
+
+}  // namespace base
diff --git a/base/metrics/statistics_recorder.h b/base/metrics/statistics_recorder.h
new file mode 100644
index 0000000..87a9311
--- /dev/null
+++ b/base/metrics/statistics_recorder.h
@@ -0,0 +1,302 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// StatisticsRecorder holds all Histograms and BucketRanges that are used by
+// Histograms in the system. It provides a general place for
+// Histograms/BucketRanges to register, and supports a global API for accessing
+// (i.e., dumping, or graphing) the data.
+
+#ifndef BASE_METRICS_STATISTICS_RECORDER_H_
+#define BASE_METRICS_STATISTICS_RECORDER_H_
+
+#include <stdint.h>
+
+#include <memory>
+#include <string>
+#include <unordered_map>
+#include <unordered_set>
+#include <vector>
+
+#include "base/base_export.h"
+#include "base/callback.h"
+#include "base/gtest_prod_util.h"
+#include "base/lazy_instance.h"
+#include "base/macros.h"
+#include "base/memory/weak_ptr.h"
+#include "base/metrics/histogram_base.h"
+#include "base/metrics/record_histogram_checker.h"
+#include "base/strings/string_piece.h"
+#include "base/synchronization/lock.h"
+
+namespace base {
+
+class BucketRanges;
+class HistogramSnapshotManager;
+
+// In-memory recorder of usage statistics (aka metrics, aka histograms).
+//
+// All the public methods are static and act on a global recorder. This global
+// recorder is internally synchronized and all the static methods are thread
+// safe.
+//
+// StatisticsRecorder doesn't have any public constructor. For testing purpose,
+// you can create a temporary recorder using the factory method
+// CreateTemporaryForTesting(). This temporary recorder becomes the global one
+// until deleted. When this temporary recorder is deleted, it restores the
+// previous global one.
+class BASE_EXPORT StatisticsRecorder {
+ public:
+  // An interface class that allows the StatisticsRecorder to forcibly merge
+  // histograms from providers when necessary.
+  class HistogramProvider {
+   public:
+    // Merges all histogram information into the global versions.
+    virtual void MergeHistogramDeltas() = 0;
+  };
+
+  typedef std::vector<HistogramBase*> Histograms;
+
+  // Restores the previous global recorder.
+  //
+  // When several temporary recorders are created using
+  // CreateTemporaryForTesting(), these recorders must be deleted in reverse
+  // order of creation.
+  //
+  // This method is thread safe.
+  //
+  // Precondition: The recorder being deleted is the current global recorder.
+  ~StatisticsRecorder();
+
+  // Registers a provider of histograms that can be called to merge those into
+  // the global recorder. Calls to ImportProvidedHistograms() will fetch from
+  // registered providers.
+  //
+  // This method is thread safe.
+  static void RegisterHistogramProvider(
+      const WeakPtr<HistogramProvider>& provider);
+
+  // Registers or adds a new histogram to the collection of statistics. If an
+  // identically named histogram is already registered, then the argument
+  // |histogram| will be deleted. The returned value is always the registered
+  // histogram (either the argument, or the pre-existing registered histogram).
+  //
+  // This method is thread safe.
+  static HistogramBase* RegisterOrDeleteDuplicate(HistogramBase* histogram);
+
+  // Registers or adds a new BucketRanges. If an equivalent BucketRanges is
+  // already registered, then the argument |ranges| will be deleted. The
+  // returned value is always the registered BucketRanges (either the argument,
+  // or the pre-existing one).
+  //
+  // This method is thread safe.
+  static const BucketRanges* RegisterOrDeleteDuplicateRanges(
+      const BucketRanges* ranges);
+
+  // Methods for appending histogram data to a string.  Only histograms which
+  // have |query| as a substring are written to |output| (an empty string will
+  // process all registered histograms).
+  //
+  // These methods are thread safe.
+  static void WriteHTMLGraph(const std::string& query, std::string* output);
+  static void WriteGraph(const std::string& query, std::string* output);
+
+  // Returns the histograms with |verbosity_level| as the serialization
+  // verbosity.
+  //
+  // This method is thread safe.
+  static std::string ToJSON(JSONVerbosityLevel verbosity_level);
+
+  // Gets existing histograms.
+  //
+  // The order of returned histograms is not guaranteed.
+  //
+  // Ownership of the individual histograms remains with the StatisticsRecorder.
+  //
+  // This method is thread safe.
+  static Histograms GetHistograms();
+
+  // Gets BucketRanges used by all histograms registered. The order of returned
+  // BucketRanges is not guaranteed.
+  //
+  // This method is thread safe.
+  static std::vector<const BucketRanges*> GetBucketRanges();
+
+  // Finds a histogram by name. Matches the exact name. Returns a null pointer
+  // if a matching histogram is not found.
+  //
+  // This method is thread safe.
+  static HistogramBase* FindHistogram(base::StringPiece name);
+
+  // Imports histograms from providers.
+  //
+  // This method must be called on the UI thread.
+  static void ImportProvidedHistograms();
+
+  // Snapshots all histograms via |snapshot_manager|. |flags_to_set| is used to
+  // set flags for each histogram. |required_flags| is used to select
+  // histograms to be recorded. Only histograms that have all the flags
+  // specified by the argument will be chosen. If all histograms should be
+  // recorded, set it to |Histogram::kNoFlags|.
+  static void PrepareDeltas(bool include_persistent,
+                            HistogramBase::Flags flags_to_set,
+                            HistogramBase::Flags required_flags,
+                            HistogramSnapshotManager* snapshot_manager);
+
+  typedef base::Callback<void(HistogramBase::Sample)> OnSampleCallback;
+
+  // Sets the callback to notify when a new sample is recorded on the histogram
+  // referred to by |histogram_name|. Can be called before or after the
+  // histogram is created. Returns whether the callback was successfully set.
+  //
+  // This method is thread safe.
+  static bool SetCallback(const std::string& histogram_name,
+                          const OnSampleCallback& callback);
+
+  // Clears any callback set on the histogram referred to by |histogram_name|.
+  //
+  // This method is thread safe.
+  static void ClearCallback(const std::string& histogram_name);
+
+  // Retrieves the callback for the histogram referred to by |histogram_name|,
+  // or a null callback if no callback exists for this histogram.
+  //
+  // This method is thread safe.
+  static OnSampleCallback FindCallback(const std::string& histogram_name);
+
+  // Returns the number of known histograms.
+  //
+  // This method is thread safe.
+  static size_t GetHistogramCount();
+
+  // Initializes logging histograms with --v=1. Safe to call multiple times.
+  // Is called from ctor but for browser it seems that it is more useful to
+  // start logging after statistics recorder, so we need to init log-on-shutdown
+  // later.
+  //
+  // This method is thread safe.
+  static void InitLogOnShutdown();
+
+  // Removes a histogram from the internal set of known ones. This can be
+  // necessary during testing persistent histograms where the underlying
+  // memory is being released.
+  //
+  // This method is thread safe.
+  static void ForgetHistogramForTesting(base::StringPiece name);
+
+  // Creates a temporary StatisticsRecorder object for testing purposes. All new
+  // histograms will be registered in it until it is destructed or pushed aside
+  // for the lifetime of yet another StatisticsRecorder object. The destruction
+  // of the returned object will re-activate the previous one.
+  // StatisticsRecorder objects must be deleted in the opposite order to which
+  // they're created.
+  //
+  // This method is thread safe.
+  static std::unique_ptr<StatisticsRecorder> CreateTemporaryForTesting()
+      WARN_UNUSED_RESULT;
+
+  // Sets the record checker for determining if a histogram should be recorded.
+  // Record checker doesn't affect any already recorded histograms, so this
+  // method must be called very early, before any threads have started.
+  // Record checker methods can be called on any thread, so they shouldn't
+  // mutate any state.
+  static void SetRecordChecker(
+      std::unique_ptr<RecordHistogramChecker> record_checker);
+
+  // Checks if the given histogram should be recorded based on the
+  // ShouldRecord() method of the record checker. If the record checker is not
+  // set, returns true.
+  //
+  // This method is thread safe.
+  static bool ShouldRecordHistogram(uint64_t histogram_hash);
+
+  // Sorts histograms by name.
+  static Histograms Sort(Histograms histograms);
+
+  // Filters histograms by name. Only histograms which have |query| as a
+  // substring in their name are kept. An empty query keeps all histograms.
+  static Histograms WithName(Histograms histograms, const std::string& query);
+
+  // Filters histograms by persistency. Only non-persistent histograms are kept.
+  static Histograms NonPersistent(Histograms histograms);
+
+ private:
+  typedef std::vector<WeakPtr<HistogramProvider>> HistogramProviders;
+
+  typedef std::unordered_map<StringPiece, HistogramBase*, StringPieceHash>
+      HistogramMap;
+
+  // We keep a map of callbacks to histograms, so that as histograms are
+  // created, we can set the callback properly.
+  typedef std::unordered_map<std::string, OnSampleCallback> CallbackMap;
+
+  struct BucketRangesHash {
+    size_t operator()(const BucketRanges* a) const;
+  };
+
+  struct BucketRangesEqual {
+    bool operator()(const BucketRanges* a, const BucketRanges* b) const;
+  };
+
+  typedef std::
+      unordered_set<const BucketRanges*, BucketRangesHash, BucketRangesEqual>
+          RangesMap;
+
+  friend class StatisticsRecorderTest;
+  FRIEND_TEST_ALL_PREFIXES(StatisticsRecorderTest, IterationTest);
+
+  // Initializes the global recorder if it doesn't already exist. Safe to call
+  // multiple times.
+  //
+  // Precondition: The global lock is already acquired.
+  static void EnsureGlobalRecorderWhileLocked();
+
+  // Gets histogram providers.
+  //
+  // This method is thread safe.
+  static HistogramProviders GetHistogramProviders();
+
+  // Imports histograms from global persistent memory.
+  //
+  // Precondition: The global lock must not be held during this call.
+  static void ImportGlobalPersistentHistograms();
+
+  // Constructs a new StatisticsRecorder and sets it as the current global
+  // recorder.
+  //
+  // Precondition: The global lock is already acquired.
+  StatisticsRecorder();
+
+  // Initialize implementation but without lock. Caller should guard
+  // StatisticsRecorder by itself if needed (it isn't in unit tests).
+  //
+  // Precondition: The global lock is already acquired.
+  static void InitLogOnShutdownWhileLocked();
+
+  HistogramMap histograms_;
+  CallbackMap callbacks_;
+  RangesMap ranges_;
+  HistogramProviders providers_;
+  std::unique_ptr<RecordHistogramChecker> record_checker_;
+
+  // Previous global recorder that existed when this one was created.
+  StatisticsRecorder* previous_ = nullptr;
+
+  // Global lock for internal synchronization.
+  static LazyInstance<Lock>::Leaky lock_;
+
+  // Current global recorder. This recorder is used by static methods. When a
+  // new global recorder is created by CreateTemporaryForTesting(), then the
+  // previous global recorder is referenced by top_->previous_.
+  static StatisticsRecorder* top_;
+
+  // Tracks whether InitLogOnShutdownWhileLocked() has registered a logging
+  // function that will be called when the program finishes.
+  static bool is_vlog_initialized_;
+
+  DISALLOW_COPY_AND_ASSIGN(StatisticsRecorder);
+};
+
+}  // namespace base
+
+#endif  // BASE_METRICS_STATISTICS_RECORDER_H_
diff --git a/base/metrics/statistics_recorder_unittest.cc b/base/metrics/statistics_recorder_unittest.cc
new file mode 100644
index 0000000..63ba136
--- /dev/null
+++ b/base/metrics/statistics_recorder_unittest.cc
@@ -0,0 +1,718 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/metrics/statistics_recorder.h"
+
+#include <stddef.h>
+
+#include <memory>
+#include <utility>
+#include <vector>
+
+#include "base/bind.h"
+#include "base/json/json_reader.h"
+#include "base/logging.h"
+#include "base/memory/weak_ptr.h"
+#include "base/metrics/histogram_base.h"
+#include "base/metrics/histogram_macros.h"
+#include "base/metrics/persistent_histogram_allocator.h"
+#include "base/metrics/record_histogram_checker.h"
+#include "base/metrics/sparse_histogram.h"
+#include "base/values.h"
+#include "testing/gmock/include/gmock/gmock.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace {
+
+// Class to make sure any manipulations we do to the min log level are
+// contained (i.e., do not affect other unit tests).
+class LogStateSaver {
+ public:
+  LogStateSaver() : old_min_log_level_(logging::GetMinLogLevel()) {}
+
+  ~LogStateSaver() { logging::SetMinLogLevel(old_min_log_level_); }
+
+ private:
+  int old_min_log_level_;
+
+  DISALLOW_COPY_AND_ASSIGN(LogStateSaver);
+};
+
+// Test implementation of RecordHistogramChecker interface.
+class OddRecordHistogramChecker : public base::RecordHistogramChecker {
+ public:
+  ~OddRecordHistogramChecker() override = default;
+
+  // base::RecordHistogramChecker:
+  bool ShouldRecord(uint64_t histogram_hash) const override {
+    return histogram_hash % 2;
+  }
+};
+
+}  // namespace
+
+namespace base {
+
+using testing::IsEmpty;
+using testing::SizeIs;
+using testing::UnorderedElementsAre;
+
+class StatisticsRecorderTest : public testing::TestWithParam<bool> {
+ protected:
+  const int32_t kAllocatorMemorySize = 64 << 10;  // 64 KiB
+
+  StatisticsRecorderTest() : use_persistent_histogram_allocator_(GetParam()) {
+    // Each test will have a clean state (no Histogram / BucketRanges
+    // registered).
+    InitializeStatisticsRecorder();
+
+    // Use persistent memory for histograms if so indicated by test parameter.
+    if (use_persistent_histogram_allocator_) {
+      GlobalHistogramAllocator::CreateWithLocalMemory(kAllocatorMemorySize, 0,
+                                                      "StatisticsRecorderTest");
+    }
+  }
+
+  ~StatisticsRecorderTest() override {
+    GlobalHistogramAllocator::ReleaseForTesting();
+    UninitializeStatisticsRecorder();
+  }
+
+  void InitializeStatisticsRecorder() {
+    DCHECK(!statistics_recorder_);
+    statistics_recorder_ = StatisticsRecorder::CreateTemporaryForTesting();
+  }
+
+  // Deletes the global recorder if there is any. This is used by test
+  // NotInitialized to ensure a clean global state.
+  void UninitializeStatisticsRecorder() {
+    statistics_recorder_.reset();
+    delete StatisticsRecorder::top_;
+    DCHECK(!StatisticsRecorder::top_);
+  }
+
+  bool HasGlobalRecorder() { return StatisticsRecorder::top_ != nullptr; }
+
+  Histogram* CreateHistogram(const char* name,
+                             HistogramBase::Sample min,
+                             HistogramBase::Sample max,
+                             size_t bucket_count) {
+    BucketRanges* ranges = new BucketRanges(bucket_count + 1);
+    Histogram::InitializeBucketRanges(min, max, ranges);
+    const BucketRanges* registered_ranges =
+        StatisticsRecorder::RegisterOrDeleteDuplicateRanges(ranges);
+    return new Histogram(name, min, max, registered_ranges);
+  }
+
+  void InitLogOnShutdown() { StatisticsRecorder::InitLogOnShutdown(); }
+
+  bool IsVLogInitialized() { return StatisticsRecorder::is_vlog_initialized_; }
+
+  void ResetVLogInitialized() {
+    UninitializeStatisticsRecorder();
+    StatisticsRecorder::is_vlog_initialized_ = false;
+  }
+
+  const bool use_persistent_histogram_allocator_;
+
+  std::unique_ptr<StatisticsRecorder> statistics_recorder_;
+  std::unique_ptr<GlobalHistogramAllocator> old_global_allocator_;
+
+ private:
+  LogStateSaver log_state_saver_;
+
+  DISALLOW_COPY_AND_ASSIGN(StatisticsRecorderTest);
+};
+
+// Run all HistogramTest cases with both heap and persistent memory.
+INSTANTIATE_TEST_CASE_P(Allocator, StatisticsRecorderTest, testing::Bool());
+
+TEST_P(StatisticsRecorderTest, NotInitialized) {
+  UninitializeStatisticsRecorder();
+  EXPECT_FALSE(HasGlobalRecorder());
+
+  HistogramBase* const histogram =
+      CreateHistogram("TestHistogram", 1, 1000, 10);
+  EXPECT_EQ(StatisticsRecorder::RegisterOrDeleteDuplicate(histogram),
+            histogram);
+  EXPECT_TRUE(HasGlobalRecorder());
+  EXPECT_THAT(StatisticsRecorder::GetHistograms(),
+              UnorderedElementsAre(histogram));
+
+  UninitializeStatisticsRecorder();
+  EXPECT_FALSE(HasGlobalRecorder());
+
+  BucketRanges* const ranges = new BucketRanges(3);
+  ranges->ResetChecksum();
+  EXPECT_EQ(StatisticsRecorder::RegisterOrDeleteDuplicateRanges(ranges),
+            ranges);
+  EXPECT_TRUE(HasGlobalRecorder());
+  EXPECT_THAT(StatisticsRecorder::GetBucketRanges(),
+              UnorderedElementsAre(ranges));
+}
+
+TEST_P(StatisticsRecorderTest, RegisterBucketRanges) {
+  std::vector<const BucketRanges*> registered_ranges;
+
+  BucketRanges* ranges1 = new BucketRanges(3);
+  ranges1->ResetChecksum();
+  BucketRanges* ranges2 = new BucketRanges(4);
+  ranges2->ResetChecksum();
+
+  // Register new ranges.
+  EXPECT_EQ(ranges1,
+            StatisticsRecorder::RegisterOrDeleteDuplicateRanges(ranges1));
+  EXPECT_EQ(ranges2,
+            StatisticsRecorder::RegisterOrDeleteDuplicateRanges(ranges2));
+  EXPECT_THAT(StatisticsRecorder::GetBucketRanges(),
+              UnorderedElementsAre(ranges1, ranges2));
+
+  // Register some ranges again.
+  EXPECT_EQ(ranges1,
+            StatisticsRecorder::RegisterOrDeleteDuplicateRanges(ranges1));
+  EXPECT_THAT(StatisticsRecorder::GetBucketRanges(),
+              UnorderedElementsAre(ranges1, ranges2));
+
+  // Make sure the ranges is still the one we know.
+  ASSERT_EQ(3u, ranges1->size());
+  EXPECT_EQ(0, ranges1->range(0));
+  EXPECT_EQ(0, ranges1->range(1));
+  EXPECT_EQ(0, ranges1->range(2));
+
+  // Register ranges with same values.
+  BucketRanges* ranges3 = new BucketRanges(3);
+  ranges3->ResetChecksum();
+  EXPECT_EQ(ranges1,  // returning ranges1
+            StatisticsRecorder::RegisterOrDeleteDuplicateRanges(ranges3));
+  EXPECT_THAT(StatisticsRecorder::GetBucketRanges(),
+              UnorderedElementsAre(ranges1, ranges2));
+}
+
+TEST_P(StatisticsRecorderTest, RegisterHistogram) {
+  // Create a Histogram that was not registered.
+  Histogram* const histogram1 = CreateHistogram("TestHistogram1", 1, 1000, 10);
+
+  EXPECT_THAT(StatisticsRecorder::GetHistograms(), IsEmpty());
+
+  // Register the Histogram.
+  EXPECT_EQ(histogram1,
+            StatisticsRecorder::RegisterOrDeleteDuplicate(histogram1));
+  EXPECT_THAT(StatisticsRecorder::GetHistograms(),
+              UnorderedElementsAre(histogram1));
+
+  // Register the same Histogram again.
+  EXPECT_EQ(histogram1,
+            StatisticsRecorder::RegisterOrDeleteDuplicate(histogram1));
+  EXPECT_THAT(StatisticsRecorder::GetHistograms(),
+              UnorderedElementsAre(histogram1));
+
+  // Register another Histogram with the same name.
+  Histogram* const histogram2 = CreateHistogram("TestHistogram1", 1, 1000, 10);
+  EXPECT_NE(histogram1, histogram2);
+  EXPECT_EQ(histogram1,
+            StatisticsRecorder::RegisterOrDeleteDuplicate(histogram2));
+  EXPECT_THAT(StatisticsRecorder::GetHistograms(),
+              UnorderedElementsAre(histogram1));
+
+  // Register another Histogram with a different name.
+  Histogram* const histogram3 = CreateHistogram("TestHistogram0", 1, 1000, 10);
+  EXPECT_NE(histogram1, histogram3);
+  EXPECT_EQ(histogram3,
+            StatisticsRecorder::RegisterOrDeleteDuplicate(histogram3));
+  EXPECT_THAT(StatisticsRecorder::GetHistograms(),
+              UnorderedElementsAre(histogram1, histogram3));
+}
+
+TEST_P(StatisticsRecorderTest, FindHistogram) {
+  HistogramBase* histogram1 = Histogram::FactoryGet(
+      "TestHistogram1", 1, 1000, 10, HistogramBase::kNoFlags);
+  HistogramBase* histogram2 = Histogram::FactoryGet(
+      "TestHistogram2", 1, 1000, 10, HistogramBase::kNoFlags);
+
+  EXPECT_EQ(histogram1, StatisticsRecorder::FindHistogram("TestHistogram1"));
+  EXPECT_EQ(histogram2, StatisticsRecorder::FindHistogram("TestHistogram2"));
+  EXPECT_FALSE(StatisticsRecorder::FindHistogram("TestHistogram"));
+
+  // Create a new global allocator using the same memory as the old one. Any
+  // old one is kept around so the memory doesn't get released.
+  old_global_allocator_ = GlobalHistogramAllocator::ReleaseForTesting();
+  if (use_persistent_histogram_allocator_) {
+    GlobalHistogramAllocator::CreateWithPersistentMemory(
+        const_cast<void*>(old_global_allocator_->data()),
+        old_global_allocator_->length(), 0, old_global_allocator_->Id(),
+        old_global_allocator_->Name());
+  }
+
+  // Reset statistics-recorder to validate operation from a clean start.
+  UninitializeStatisticsRecorder();
+  InitializeStatisticsRecorder();
+
+  if (use_persistent_histogram_allocator_) {
+    EXPECT_TRUE(StatisticsRecorder::FindHistogram("TestHistogram1"));
+    EXPECT_TRUE(StatisticsRecorder::FindHistogram("TestHistogram2"));
+  } else {
+    EXPECT_FALSE(StatisticsRecorder::FindHistogram("TestHistogram1"));
+    EXPECT_FALSE(StatisticsRecorder::FindHistogram("TestHistogram2"));
+  }
+  EXPECT_FALSE(StatisticsRecorder::FindHistogram("TestHistogram"));
+}
+
+TEST_P(StatisticsRecorderTest, WithName) {
+  Histogram::FactoryGet("TestHistogram1", 1, 1000, 10, Histogram::kNoFlags);
+  Histogram::FactoryGet("TestHistogram2", 1, 1000, 10, Histogram::kNoFlags);
+  Histogram::FactoryGet("TestHistogram3", 1, 1000, 10, Histogram::kNoFlags);
+
+  const auto histograms = StatisticsRecorder::GetHistograms();
+  EXPECT_THAT(histograms, SizeIs(3));
+  EXPECT_THAT(StatisticsRecorder::WithName(histograms, ""), SizeIs(3));
+  EXPECT_THAT(StatisticsRecorder::WithName(histograms, "Test"), SizeIs(3));
+  EXPECT_THAT(StatisticsRecorder::WithName(histograms, "1"), SizeIs(1));
+  EXPECT_THAT(StatisticsRecorder::WithName(histograms, "hello"), IsEmpty());
+}
+
+TEST_P(StatisticsRecorderTest, RegisterHistogramWithFactoryGet) {
+  EXPECT_THAT(StatisticsRecorder::GetHistograms(), IsEmpty());
+
+  // Create a histogram.
+  HistogramBase* const histogram1 = Histogram::FactoryGet(
+      "TestHistogram", 1, 1000, 10, HistogramBase::kNoFlags);
+  EXPECT_THAT(StatisticsRecorder::GetHistograms(),
+              UnorderedElementsAre(histogram1));
+
+  // Get an existing histogram.
+  HistogramBase* const histogram2 = Histogram::FactoryGet(
+      "TestHistogram", 1, 1000, 10, HistogramBase::kNoFlags);
+  EXPECT_EQ(histogram1, histogram2);
+  EXPECT_THAT(StatisticsRecorder::GetHistograms(),
+              UnorderedElementsAre(histogram1));
+
+  // Create a LinearHistogram.
+  HistogramBase* const histogram3 = LinearHistogram::FactoryGet(
+      "TestLinearHistogram", 1, 1000, 10, HistogramBase::kNoFlags);
+  EXPECT_THAT(StatisticsRecorder::GetHistograms(),
+              UnorderedElementsAre(histogram1, histogram3));
+
+  // Create a BooleanHistogram.
+  HistogramBase* const histogram4 = BooleanHistogram::FactoryGet(
+      "TestBooleanHistogram", HistogramBase::kNoFlags);
+  EXPECT_THAT(StatisticsRecorder::GetHistograms(),
+              UnorderedElementsAre(histogram1, histogram3, histogram4));
+
+  // Create a CustomHistogram.
+  std::vector<int> custom_ranges;
+  custom_ranges.push_back(1);
+  custom_ranges.push_back(5);
+  HistogramBase* const histogram5 = CustomHistogram::FactoryGet(
+      "TestCustomHistogram", custom_ranges, HistogramBase::kNoFlags);
+  EXPECT_THAT(
+      StatisticsRecorder::GetHistograms(),
+      UnorderedElementsAre(histogram1, histogram3, histogram4, histogram5));
+}
+
+TEST_P(StatisticsRecorderTest, RegisterHistogramWithMacros) {
+  // Macros cache pointers and so tests that use them can only be run once.
+  // Stop immediately if this test has run previously.
+  static bool already_run = false;
+  if (already_run)
+    return;
+  already_run = true;
+
+  StatisticsRecorder::Histograms registered_histograms;
+
+  HistogramBase* histogram = Histogram::FactoryGet(
+      "TestHistogramCounts", 1, 1000000, 50, HistogramBase::kNoFlags);
+
+  // The histogram we got from macro is the same as from FactoryGet.
+  LOCAL_HISTOGRAM_COUNTS("TestHistogramCounts", 30);
+  registered_histograms = StatisticsRecorder::GetHistograms();
+  ASSERT_EQ(1u, registered_histograms.size());
+  EXPECT_EQ(histogram, registered_histograms[0]);
+
+  LOCAL_HISTOGRAM_TIMES("TestHistogramTimes", TimeDelta::FromDays(1));
+  LOCAL_HISTOGRAM_ENUMERATION("TestHistogramEnumeration", 20, 200);
+
+  EXPECT_THAT(StatisticsRecorder::GetHistograms(), SizeIs(3));
+}
+
+TEST_P(StatisticsRecorderTest, BucketRangesSharing) {
+  EXPECT_THAT(StatisticsRecorder::GetBucketRanges(), IsEmpty());
+
+  Histogram::FactoryGet("Histogram", 1, 64, 8, HistogramBase::kNoFlags);
+  Histogram::FactoryGet("Histogram2", 1, 64, 8, HistogramBase::kNoFlags);
+  EXPECT_THAT(StatisticsRecorder::GetBucketRanges(), SizeIs(1));
+
+  Histogram::FactoryGet("Histogram3", 1, 64, 16, HistogramBase::kNoFlags);
+  EXPECT_THAT(StatisticsRecorder::GetBucketRanges(), SizeIs(2));
+}
+
+TEST_P(StatisticsRecorderTest, ToJSON) {
+  Histogram::FactoryGet("TestHistogram1", 1, 1000, 50, HistogramBase::kNoFlags)
+      ->Add(30);
+  Histogram::FactoryGet("TestHistogram1", 1, 1000, 50, HistogramBase::kNoFlags)
+      ->Add(40);
+  Histogram::FactoryGet("TestHistogram2", 1, 1000, 50, HistogramBase::kNoFlags)
+      ->Add(30);
+  Histogram::FactoryGet("TestHistogram2", 1, 1000, 50, HistogramBase::kNoFlags)
+      ->Add(40);
+
+  std::string json(StatisticsRecorder::ToJSON(JSON_VERBOSITY_LEVEL_FULL));
+
+  // Check for valid JSON.
+  std::unique_ptr<Value> root = JSONReader::Read(json);
+  ASSERT_TRUE(root.get());
+
+  DictionaryValue* root_dict = nullptr;
+  ASSERT_TRUE(root->GetAsDictionary(&root_dict));
+
+  // No query should be set.
+  ASSERT_FALSE(root_dict->HasKey("query"));
+
+  ListValue* histogram_list = nullptr;
+  ASSERT_TRUE(root_dict->GetList("histograms", &histogram_list));
+  ASSERT_EQ(2u, histogram_list->GetSize());
+
+  // Examine the first histogram.
+  DictionaryValue* histogram_dict = nullptr;
+  ASSERT_TRUE(histogram_list->GetDictionary(0, &histogram_dict));
+
+  int sample_count;
+  ASSERT_TRUE(histogram_dict->GetInteger("count", &sample_count));
+  EXPECT_EQ(2, sample_count);
+
+  ListValue* buckets_list = nullptr;
+  ASSERT_TRUE(histogram_dict->GetList("buckets", &buckets_list));
+  EXPECT_EQ(2u, buckets_list->GetList().size());
+
+  // Check the serialized JSON with a different verbosity level.
+  json = StatisticsRecorder::ToJSON(JSON_VERBOSITY_LEVEL_OMIT_BUCKETS);
+  root = JSONReader::Read(json);
+  ASSERT_TRUE(root.get());
+  root_dict = nullptr;
+  ASSERT_TRUE(root->GetAsDictionary(&root_dict));
+  histogram_list = nullptr;
+  ASSERT_TRUE(root_dict->GetList("histograms", &histogram_list));
+  ASSERT_EQ(2u, histogram_list->GetSize());
+  histogram_dict = nullptr;
+  ASSERT_TRUE(histogram_list->GetDictionary(0, &histogram_dict));
+  sample_count = 0;
+  ASSERT_TRUE(histogram_dict->GetInteger("count", &sample_count));
+  EXPECT_EQ(2, sample_count);
+  buckets_list = nullptr;
+  // Bucket information should be omitted.
+  ASSERT_FALSE(histogram_dict->GetList("buckets", &buckets_list));
+}
+
+TEST_P(StatisticsRecorderTest, IterationTest) {
+  Histogram::FactoryGet("IterationTest1", 1, 64, 16, HistogramBase::kNoFlags);
+  Histogram::FactoryGet("IterationTest2", 1, 64, 16, HistogramBase::kNoFlags);
+
+  auto histograms = StatisticsRecorder::GetHistograms();
+  EXPECT_THAT(histograms, SizeIs(2));
+  histograms = StatisticsRecorder::NonPersistent(std::move(histograms));
+  EXPECT_THAT(histograms, SizeIs(use_persistent_histogram_allocator_ ? 0 : 2));
+
+  // Create a new global allocator using the same memory as the old one. Any
+  // old one is kept around so the memory doesn't get released.
+  old_global_allocator_ = GlobalHistogramAllocator::ReleaseForTesting();
+  if (use_persistent_histogram_allocator_) {
+    GlobalHistogramAllocator::CreateWithPersistentMemory(
+        const_cast<void*>(old_global_allocator_->data()),
+        old_global_allocator_->length(), 0, old_global_allocator_->Id(),
+        old_global_allocator_->Name());
+  }
+
+  // Reset statistics-recorder to validate operation from a clean start.
+  UninitializeStatisticsRecorder();
+  InitializeStatisticsRecorder();
+
+  histograms = StatisticsRecorder::GetHistograms();
+  EXPECT_THAT(histograms, SizeIs(use_persistent_histogram_allocator_ ? 2 : 0));
+  histograms = StatisticsRecorder::NonPersistent(std::move(histograms));
+  EXPECT_THAT(histograms, IsEmpty());
+}
+
+namespace {
+
+// CallbackCheckWrapper is simply a convenient way to check and store that
+// a callback was actually run.
+struct CallbackCheckWrapper {
+  CallbackCheckWrapper() : called(false), last_histogram_value(0) {}
+
+  void OnHistogramChanged(base::HistogramBase::Sample histogram_value) {
+    called = true;
+    last_histogram_value = histogram_value;
+  }
+
+  bool called;
+  base::HistogramBase::Sample last_histogram_value;
+};
+
+}  // namespace
+
+// Check that you can't overwrite the callback with another.
+TEST_P(StatisticsRecorderTest, SetCallbackFailsWithoutHistogramTest) {
+  CallbackCheckWrapper callback_wrapper;
+
+  bool result = base::StatisticsRecorder::SetCallback(
+      "TestHistogram", base::Bind(&CallbackCheckWrapper::OnHistogramChanged,
+                                  base::Unretained(&callback_wrapper)));
+  EXPECT_TRUE(result);
+
+  result = base::StatisticsRecorder::SetCallback(
+      "TestHistogram", base::Bind(&CallbackCheckWrapper::OnHistogramChanged,
+                                  base::Unretained(&callback_wrapper)));
+  EXPECT_FALSE(result);
+}
+
+// Check that you can't overwrite the callback with another.
+TEST_P(StatisticsRecorderTest, SetCallbackFailsWithHistogramTest) {
+  HistogramBase* histogram = Histogram::FactoryGet("TestHistogram", 1, 1000, 10,
+                                                   HistogramBase::kNoFlags);
+  EXPECT_TRUE(histogram);
+
+  CallbackCheckWrapper callback_wrapper;
+
+  bool result = base::StatisticsRecorder::SetCallback(
+      "TestHistogram", base::Bind(&CallbackCheckWrapper::OnHistogramChanged,
+                                  base::Unretained(&callback_wrapper)));
+  EXPECT_TRUE(result);
+  EXPECT_EQ(histogram->flags() & base::HistogramBase::kCallbackExists,
+            base::HistogramBase::kCallbackExists);
+
+  result = base::StatisticsRecorder::SetCallback(
+      "TestHistogram", base::Bind(&CallbackCheckWrapper::OnHistogramChanged,
+                                  base::Unretained(&callback_wrapper)));
+  EXPECT_FALSE(result);
+  EXPECT_EQ(histogram->flags() & base::HistogramBase::kCallbackExists,
+            base::HistogramBase::kCallbackExists);
+
+  histogram->Add(1);
+
+  EXPECT_TRUE(callback_wrapper.called);
+}
+
+// Check that you can't overwrite the callback with another.
+TEST_P(StatisticsRecorderTest, ClearCallbackSuceedsWithHistogramTest) {
+  HistogramBase* histogram = Histogram::FactoryGet("TestHistogram", 1, 1000, 10,
+                                                   HistogramBase::kNoFlags);
+  EXPECT_TRUE(histogram);
+
+  CallbackCheckWrapper callback_wrapper;
+
+  bool result = base::StatisticsRecorder::SetCallback(
+      "TestHistogram", base::Bind(&CallbackCheckWrapper::OnHistogramChanged,
+                                  base::Unretained(&callback_wrapper)));
+  EXPECT_TRUE(result);
+  EXPECT_EQ(histogram->flags() & base::HistogramBase::kCallbackExists,
+            base::HistogramBase::kCallbackExists);
+
+  base::StatisticsRecorder::ClearCallback("TestHistogram");
+  EXPECT_EQ(histogram->flags() & base::HistogramBase::kCallbackExists, 0);
+
+  histogram->Add(1);
+
+  EXPECT_FALSE(callback_wrapper.called);
+}
+
+// Check that callback is used.
+TEST_P(StatisticsRecorderTest, CallbackUsedTest) {
+  {
+    HistogramBase* histogram = Histogram::FactoryGet(
+        "TestHistogram", 1, 1000, 10, HistogramBase::kNoFlags);
+    EXPECT_TRUE(histogram);
+
+    CallbackCheckWrapper callback_wrapper;
+
+    base::StatisticsRecorder::SetCallback(
+        "TestHistogram", base::Bind(&CallbackCheckWrapper::OnHistogramChanged,
+                                    base::Unretained(&callback_wrapper)));
+
+    histogram->Add(1);
+
+    EXPECT_TRUE(callback_wrapper.called);
+    EXPECT_EQ(callback_wrapper.last_histogram_value, 1);
+  }
+
+  {
+    HistogramBase* linear_histogram = LinearHistogram::FactoryGet(
+        "TestLinearHistogram", 1, 1000, 10, HistogramBase::kNoFlags);
+
+    CallbackCheckWrapper callback_wrapper;
+
+    base::StatisticsRecorder::SetCallback(
+        "TestLinearHistogram",
+        base::Bind(&CallbackCheckWrapper::OnHistogramChanged,
+                   base::Unretained(&callback_wrapper)));
+
+    linear_histogram->Add(1);
+
+    EXPECT_TRUE(callback_wrapper.called);
+    EXPECT_EQ(callback_wrapper.last_histogram_value, 1);
+  }
+
+  {
+    std::vector<int> custom_ranges;
+    custom_ranges.push_back(1);
+    custom_ranges.push_back(5);
+    HistogramBase* custom_histogram = CustomHistogram::FactoryGet(
+        "TestCustomHistogram", custom_ranges, HistogramBase::kNoFlags);
+
+    CallbackCheckWrapper callback_wrapper;
+
+    base::StatisticsRecorder::SetCallback(
+        "TestCustomHistogram",
+        base::Bind(&CallbackCheckWrapper::OnHistogramChanged,
+                   base::Unretained(&callback_wrapper)));
+
+    custom_histogram->Add(1);
+
+    EXPECT_TRUE(callback_wrapper.called);
+    EXPECT_EQ(callback_wrapper.last_histogram_value, 1);
+  }
+
+  {
+    HistogramBase* custom_histogram = SparseHistogram::FactoryGet(
+        "TestSparseHistogram", HistogramBase::kNoFlags);
+
+    CallbackCheckWrapper callback_wrapper;
+
+    base::StatisticsRecorder::SetCallback(
+        "TestSparseHistogram",
+        base::Bind(&CallbackCheckWrapper::OnHistogramChanged,
+                   base::Unretained(&callback_wrapper)));
+
+    custom_histogram->Add(1);
+
+    EXPECT_TRUE(callback_wrapper.called);
+    EXPECT_EQ(callback_wrapper.last_histogram_value, 1);
+  }
+}
+
+// Check that setting a callback before the histogram exists works.
+TEST_P(StatisticsRecorderTest, CallbackUsedBeforeHistogramCreatedTest) {
+  CallbackCheckWrapper callback_wrapper;
+
+  base::StatisticsRecorder::SetCallback(
+      "TestHistogram", base::Bind(&CallbackCheckWrapper::OnHistogramChanged,
+                                  base::Unretained(&callback_wrapper)));
+
+  HistogramBase* histogram = Histogram::FactoryGet("TestHistogram", 1, 1000, 10,
+                                                   HistogramBase::kNoFlags);
+  EXPECT_TRUE(histogram);
+  histogram->Add(1);
+
+  EXPECT_TRUE(callback_wrapper.called);
+  EXPECT_EQ(callback_wrapper.last_histogram_value, 1);
+}
+
+TEST_P(StatisticsRecorderTest, LogOnShutdownNotInitialized) {
+  ResetVLogInitialized();
+  logging::SetMinLogLevel(logging::LOG_WARNING);
+  InitializeStatisticsRecorder();
+  EXPECT_FALSE(VLOG_IS_ON(1));
+  EXPECT_FALSE(IsVLogInitialized());
+  InitLogOnShutdown();
+  EXPECT_FALSE(IsVLogInitialized());
+}
+
+TEST_P(StatisticsRecorderTest, LogOnShutdownInitializedExplicitly) {
+  ResetVLogInitialized();
+  logging::SetMinLogLevel(logging::LOG_WARNING);
+  InitializeStatisticsRecorder();
+  EXPECT_FALSE(VLOG_IS_ON(1));
+  EXPECT_FALSE(IsVLogInitialized());
+  logging::SetMinLogLevel(logging::LOG_VERBOSE);
+  EXPECT_TRUE(VLOG_IS_ON(1));
+  InitLogOnShutdown();
+  EXPECT_TRUE(IsVLogInitialized());
+}
+
+TEST_P(StatisticsRecorderTest, LogOnShutdownInitialized) {
+  ResetVLogInitialized();
+  logging::SetMinLogLevel(logging::LOG_VERBOSE);
+  InitializeStatisticsRecorder();
+  EXPECT_TRUE(VLOG_IS_ON(1));
+  EXPECT_TRUE(IsVLogInitialized());
+}
+
+class TestHistogramProvider : public StatisticsRecorder::HistogramProvider {
+ public:
+  TestHistogramProvider(std::unique_ptr<PersistentHistogramAllocator> allocator)
+      : allocator_(std::move(allocator)), weak_factory_(this) {
+    StatisticsRecorder::RegisterHistogramProvider(weak_factory_.GetWeakPtr());
+  }
+
+  void MergeHistogramDeltas() override {
+    PersistentHistogramAllocator::Iterator hist_iter(allocator_.get());
+    while (true) {
+      std::unique_ptr<base::HistogramBase> histogram = hist_iter.GetNext();
+      if (!histogram)
+        break;
+      allocator_->MergeHistogramDeltaToStatisticsRecorder(histogram.get());
+    }
+  }
+
+ private:
+  std::unique_ptr<PersistentHistogramAllocator> allocator_;
+  WeakPtrFactory<TestHistogramProvider> weak_factory_;
+
+  DISALLOW_COPY_AND_ASSIGN(TestHistogramProvider);
+};
+
+TEST_P(StatisticsRecorderTest, ImportHistogramsTest) {
+  // Create a second SR to create some histograms for later import.
+  std::unique_ptr<StatisticsRecorder> temp_sr =
+      StatisticsRecorder::CreateTemporaryForTesting();
+
+  // Extract any existing global allocator so a new one can be created.
+  std::unique_ptr<GlobalHistogramAllocator> old_allocator =
+      GlobalHistogramAllocator::ReleaseForTesting();
+
+  // Create a histogram inside a new allocator for testing.
+  GlobalHistogramAllocator::CreateWithLocalMemory(kAllocatorMemorySize, 0, "");
+  HistogramBase* histogram = LinearHistogram::FactoryGet("Foo", 1, 10, 11, 0);
+  histogram->Add(3);
+
+  // Undo back to the starting point.
+  std::unique_ptr<GlobalHistogramAllocator> new_allocator =
+      GlobalHistogramAllocator::ReleaseForTesting();
+  GlobalHistogramAllocator::Set(std::move(old_allocator));
+  temp_sr.reset();
+
+  // Create a provider that can supply histograms to the current SR.
+  TestHistogramProvider provider(std::move(new_allocator));
+
+  // Verify that the created histogram is no longer known.
+  ASSERT_FALSE(StatisticsRecorder::FindHistogram(histogram->histogram_name()));
+
+  // Now test that it merges.
+  StatisticsRecorder::ImportProvidedHistograms();
+  HistogramBase* found =
+      StatisticsRecorder::FindHistogram(histogram->histogram_name());
+  ASSERT_TRUE(found);
+  EXPECT_NE(histogram, found);
+  std::unique_ptr<HistogramSamples> snapshot = found->SnapshotSamples();
+  EXPECT_EQ(1, snapshot->TotalCount());
+  EXPECT_EQ(1, snapshot->GetCount(3));
+
+  // Finally, verify that updates can also be merged.
+  histogram->Add(3);
+  histogram->Add(5);
+  StatisticsRecorder::ImportProvidedHistograms();
+  snapshot = found->SnapshotSamples();
+  EXPECT_EQ(3, snapshot->TotalCount());
+  EXPECT_EQ(2, snapshot->GetCount(3));
+  EXPECT_EQ(1, snapshot->GetCount(5));
+}
+
+TEST_P(StatisticsRecorderTest, RecordHistogramChecker) {
+  // Record checker isn't set
+  EXPECT_TRUE(base::StatisticsRecorder::ShouldRecordHistogram(0));
+  auto record_checker = std::make_unique<OddRecordHistogramChecker>();
+  base::StatisticsRecorder::SetRecordChecker(std::move(record_checker));
+  EXPECT_TRUE(base::StatisticsRecorder::ShouldRecordHistogram(1));
+  EXPECT_FALSE(base::StatisticsRecorder::ShouldRecordHistogram(2));
+}
+
+}  // namespace base
diff --git a/base/metrics/user_metrics.cc b/base/metrics/user_metrics.cc
new file mode 100644
index 0000000..9fcc9e8
--- /dev/null
+++ b/base/metrics/user_metrics.cc
@@ -0,0 +1,74 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/metrics/user_metrics.h"
+
+#include <stddef.h>
+
+#include <vector>
+
+#include "base/bind.h"
+#include "base/lazy_instance.h"
+#include "base/location.h"
+#include "base/macros.h"
+#include "base/threading/thread_checker.h"
+
+namespace base {
+namespace {
+
+LazyInstance<std::vector<ActionCallback>>::DestructorAtExit g_callbacks =
+    LAZY_INSTANCE_INITIALIZER;
+LazyInstance<scoped_refptr<SingleThreadTaskRunner>>::DestructorAtExit
+    g_task_runner = LAZY_INSTANCE_INITIALIZER;
+
+}  // namespace
+
+void RecordAction(const UserMetricsAction& action) {
+  RecordComputedAction(action.str_);
+}
+
+void RecordComputedAction(const std::string& action) {
+  if (!g_task_runner.Get()) {
+    DCHECK(g_callbacks.Get().empty());
+    return;
+  }
+
+  if (!g_task_runner.Get()->BelongsToCurrentThread()) {
+    g_task_runner.Get()->PostTask(FROM_HERE,
+                                  BindOnce(&RecordComputedAction, action));
+    return;
+  }
+
+  for (const ActionCallback& callback : g_callbacks.Get()) {
+    callback.Run(action);
+  }
+}
+
+void AddActionCallback(const ActionCallback& callback) {
+  // Only allow adding a callback if the task runner is set.
+  DCHECK(g_task_runner.Get());
+  DCHECK(g_task_runner.Get()->BelongsToCurrentThread());
+  g_callbacks.Get().push_back(callback);
+}
+
+void RemoveActionCallback(const ActionCallback& callback) {
+  DCHECK(g_task_runner.Get());
+  DCHECK(g_task_runner.Get()->BelongsToCurrentThread());
+  std::vector<ActionCallback>* callbacks = g_callbacks.Pointer();
+  for (size_t i = 0; i < callbacks->size(); ++i) {
+    if ((*callbacks)[i].Equals(callback)) {
+      callbacks->erase(callbacks->begin() + i);
+      return;
+    }
+  }
+}
+
+void SetRecordActionTaskRunner(
+    scoped_refptr<SingleThreadTaskRunner> task_runner) {
+  DCHECK(task_runner->BelongsToCurrentThread());
+  DCHECK(!g_task_runner.Get() || g_task_runner.Get()->BelongsToCurrentThread());
+  g_task_runner.Get() = task_runner;
+}
+
+}  // namespace base
diff --git a/base/metrics/user_metrics.h b/base/metrics/user_metrics.h
new file mode 100644
index 0000000..87fbd9c
--- /dev/null
+++ b/base/metrics/user_metrics.h
@@ -0,0 +1,73 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_METRICS_USER_METRICS_H_
+#define BASE_METRICS_USER_METRICS_H_
+
+#include <string>
+
+#include "base/base_export.h"
+#include "base/callback.h"
+#include "base/metrics/user_metrics_action.h"
+#include "base/single_thread_task_runner.h"
+
+namespace base {
+
+// This module provides some helper functions for logging actions tracked by
+// the user metrics system.
+
+// For best practices on deciding when to emit a user action, see
+// https://chromium.googlesource.com/chromium/src.git/+/HEAD/tools/metrics/actions/README.md
+
+// Record that the user performed an action.
+// This function must be called after the task runner has been set with
+// SetRecordActionTaskRunner().
+//
+// "Action" here means a user-generated event:
+//   good: "Reload", "CloseTab", and "IMEInvoked"
+//   not good: "SSLDialogShown", "PageLoaded", "DiskFull"
+// We use this to gather anonymized information about how users are
+// interacting with the browser.
+// WARNING: In calls to this function, UserMetricsAction should be followed by a
+// string literal parameter and not a variable e.g.
+//   RecordAction(UserMetricsAction("my action name"));
+// This ensures that our processing scripts can associate this action's hash
+// with its metric name. Therefore, it will be possible to retrieve the metric
+// name from the hash later on.
+//
+// Once a new recorded action is added, run
+//   tools/metrics/actions/extract_actions.py
+// to add the metric to actions.xml, then update the <owner>s and <description>
+// sections. Make sure to include the actions.xml file when you upload your code
+// for review!
+//
+// For more complicated situations (like when there are many different
+// possible actions), see RecordComputedAction().
+BASE_EXPORT void RecordAction(const UserMetricsAction& action);
+
+// This function has identical input and behavior to RecordAction(), but is
+// not automatically found by the action-processing scripts.  It can be used
+// when it's a pain to enumerate all possible actions, but if you use this
+// you need to also update the rules for extracting known actions in
+// tools/metrics/actions/extract_actions.py.
+// This function must be called after the task runner has been set with
+// SetRecordActionTaskRunner().
+BASE_EXPORT void RecordComputedAction(const std::string& action);
+
+// Called with the action string.
+typedef Callback<void(const std::string&)> ActionCallback;
+
+// Add/remove action callbacks (see above).
+// These functions must be called after the task runner has been set with
+// SetRecordActionTaskRunner().
+BASE_EXPORT void AddActionCallback(const ActionCallback& callback);
+BASE_EXPORT void RemoveActionCallback(const ActionCallback& callback);
+
+// Set the task runner on which to record actions.
+BASE_EXPORT void SetRecordActionTaskRunner(
+    scoped_refptr<SingleThreadTaskRunner> task_runner);
+
+}  // namespace base
+
+#endif  // BASE_METRICS_USER_METRICS_H_
diff --git a/base/metrics/user_metrics_action.h b/base/metrics/user_metrics_action.h
new file mode 100644
index 0000000..454ed83
--- /dev/null
+++ b/base/metrics/user_metrics_action.h
@@ -0,0 +1,27 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_METRICS_USER_METRICS_ACTION_H_
+#define BASE_METRICS_USER_METRICS_ACTION_H_
+
+namespace base {
+
+// UserMetricsAction exists purely to standardize on the parameters passed to
+// UserMetrics. That way, our toolset can scan the source code reliable for
+// constructors and extract the associated string constants.
+// WARNING: When using UserMetricsAction you should use a string literal
+// parameter e.g.
+//   RecordAction(UserMetricsAction("my action name"));
+// This ensures that our processing scripts can associate this action's hash
+// with its metric name. Therefore, it will be possible to retrieve the metric
+// name from the hash later on.
+// Please see tools/metrics/actions/extract_actions.py for details.
+struct UserMetricsAction {
+  const char* str_;
+  explicit constexpr UserMetricsAction(const char* str) noexcept : str_(str) {}
+};
+
+}  // namespace base
+
+#endif  // BASE_METRICS_USER_METRICS_ACTION_H_
diff --git a/base/native_library.cc b/base/native_library.cc
new file mode 100644
index 0000000..72012a3
--- /dev/null
+++ b/base/native_library.cc
@@ -0,0 +1,15 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/native_library.h"
+
+namespace base {
+
+NativeLibrary LoadNativeLibrary(const FilePath& library_path,
+                                NativeLibraryLoadError* error) {
+  return LoadNativeLibraryWithOptions(
+      library_path, NativeLibraryOptions(), error);
+}
+
+}  // namespace base
diff --git a/base/native_library.h b/base/native_library.h
new file mode 100644
index 0000000..04356d9
--- /dev/null
+++ b/base/native_library.h
@@ -0,0 +1,118 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_NATIVE_LIBRARY_H_
+#define BASE_NATIVE_LIBRARY_H_
+
+// This file defines a cross-platform "NativeLibrary" type which represents
+// a loadable module.
+
+#include <string>
+
+#include "base/base_export.h"
+#include "base/strings/string_piece.h"
+#include "build/build_config.h"
+
+#if defined(OS_WIN)
+#include <windows.h>
+#elif defined(OS_MACOSX)
+#import <CoreFoundation/CoreFoundation.h>
+#endif  // OS_*
+
+namespace base {
+
+class FilePath;
+
+#if defined(OS_WIN)
+using NativeLibrary = HMODULE;
+#elif defined(OS_MACOSX)
+enum NativeLibraryType {
+  BUNDLE,
+  DYNAMIC_LIB
+};
+enum NativeLibraryObjCStatus {
+  OBJC_UNKNOWN,
+  OBJC_PRESENT,
+  OBJC_NOT_PRESENT,
+};
+struct NativeLibraryStruct {
+  NativeLibraryType type;
+  CFBundleRefNum bundle_resource_ref;
+  NativeLibraryObjCStatus objc_status;
+  union {
+    CFBundleRef bundle;
+    void* dylib;
+  };
+};
+using NativeLibrary = NativeLibraryStruct*;
+#elif defined(OS_POSIX) || defined(OS_FUCHSIA)
+using NativeLibrary = void*;
+#endif  // OS_*
+
+struct BASE_EXPORT NativeLibraryLoadError {
+#if defined(OS_WIN)
+  NativeLibraryLoadError() : code(0) {}
+#endif  // OS_WIN
+
+  // Returns a string representation of the load error.
+  std::string ToString() const;
+
+#if defined(OS_WIN)
+  DWORD code;
+#elif defined(OS_POSIX) || defined(OS_FUCHSIA)
+  std::string message;
+#endif  // OS_WIN
+};
+
+struct BASE_EXPORT NativeLibraryOptions {
+  NativeLibraryOptions() = default;
+  NativeLibraryOptions(const NativeLibraryOptions& options) = default;
+
+  // If |true|, a loaded library is required to prefer local symbol resolution
+  // before considering global symbols. Note that this is already the default
+  // behavior on most systems. Setting this to |false| does not guarantee the
+  // inverse, i.e., it does not force a preference for global symbols over local
+  // ones.
+  bool prefer_own_symbols = false;
+};
+
+// Loads a native library from disk.  Release it with UnloadNativeLibrary when
+// you're done.  Returns NULL on failure.
+// If |error| is not NULL, it may be filled in on load error.
+BASE_EXPORT NativeLibrary LoadNativeLibrary(const FilePath& library_path,
+                                            NativeLibraryLoadError* error);
+
+// Loads a native library from disk.  Release it with UnloadNativeLibrary when
+// you're done.  Returns NULL on failure.
+// If |error| is not NULL, it may be filled in on load error.
+BASE_EXPORT NativeLibrary LoadNativeLibraryWithOptions(
+    const FilePath& library_path,
+    const NativeLibraryOptions& options,
+    NativeLibraryLoadError* error);
+
+// Unloads a native library.
+BASE_EXPORT void UnloadNativeLibrary(NativeLibrary library);
+
+// Gets a function pointer from a native library.
+BASE_EXPORT void* GetFunctionPointerFromNativeLibrary(NativeLibrary library,
+                                                      StringPiece name);
+
+// Returns the full platform-specific name for a native library. |name| must be
+// ASCII. This is also the default name for the output of a gn |shared_library|
+// target. See tools/gn/docs/reference.md#shared_library.
+// For example for "mylib", it returns:
+// - "mylib.dll" on Windows
+// - "libmylib.so" on Linux
+// - "libmylib.dylib" on Mac
+BASE_EXPORT std::string GetNativeLibraryName(StringPiece name);
+
+// Returns the full platform-specific name for a gn |loadable_module| target.
+// See tools/gn/docs/reference.md#loadable_module
+// The returned name is the same as GetNativeLibraryName() on all platforms
+// except for Mac where for "mylib" it returns "mylib.so".
+BASE_EXPORT std::string GetLoadableModuleName(StringPiece name);
+
+}  // namespace base
+
+#endif  // BASE_NATIVE_LIBRARY_H_
diff --git a/base/native_library_fuchsia.cc b/base/native_library_fuchsia.cc
new file mode 100644
index 0000000..1d74273
--- /dev/null
+++ b/base/native_library_fuchsia.cc
@@ -0,0 +1,87 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/native_library.h"
+
+#include <fcntl.h>
+#include <fdio/io.h>
+#include <stdio.h>
+#include <zircon/dlfcn.h>
+#include <zircon/status.h>
+#include <zircon/syscalls.h>
+
+#include "base/base_paths_fuchsia.h"
+#include "base/files/file.h"
+#include "base/files/file_path.h"
+#include "base/fuchsia/fuchsia_logging.h"
+#include "base/fuchsia/scoped_zx_handle.h"
+#include "base/logging.h"
+#include "base/path_service.h"
+#include "base/posix/safe_strerror.h"
+#include "base/strings/stringprintf.h"
+#include "base/strings/utf_string_conversions.h"
+#include "base/threading/thread_restrictions.h"
+
+namespace base {
+
+std::string NativeLibraryLoadError::ToString() const {
+  return message;
+}
+
+NativeLibrary LoadNativeLibraryWithOptions(const FilePath& library_path,
+                                           const NativeLibraryOptions& options,
+                                           NativeLibraryLoadError* error) {
+  std::vector<base::FilePath::StringType> components;
+  library_path.GetComponents(&components);
+  if (components.size() != 1u) {
+    NOTREACHED() << "library_path is a path, should be a filename: "
+                 << library_path.MaybeAsASCII();
+    return nullptr;
+  }
+
+  FilePath computed_path = base::GetPackageRoot();
+  computed_path = computed_path.AppendASCII("lib").Append(components[0]);
+  base::File library(computed_path,
+                     base::File::FLAG_OPEN | base::File::FLAG_READ);
+  if (!library.IsValid()) {
+    if (error) {
+      error->message = base::StringPrintf(
+          "open library: %s",
+          base::File::ErrorToString(library.error_details()).c_str());
+    }
+    return nullptr;
+  }
+
+  base::ScopedZxHandle vmo;
+  zx_status_t status =
+      fdio_get_vmo_clone(library.GetPlatformFile(), vmo.receive());
+  if (status != ZX_OK) {
+    if (error) {
+      error->message = base::StringPrintf("fdio_get_vmo_clone: %s",
+                                          zx_status_get_string(status));
+    }
+    return nullptr;
+  }
+  NativeLibrary result = dlopen_vmo(vmo.get(), RTLD_LAZY | RTLD_LOCAL);
+  return result;
+}
+
+void UnloadNativeLibrary(NativeLibrary library) {
+  // dlclose() is a no-op on Fuchsia, so do nothing here.
+}
+
+void* GetFunctionPointerFromNativeLibrary(NativeLibrary library,
+                                          StringPiece name) {
+  return dlsym(library, name.data());
+}
+
+std::string GetNativeLibraryName(StringPiece name) {
+  return base::StringPrintf("lib%s.so", name.as_string().c_str());
+}
+
+std::string GetLoadableModuleName(StringPiece name) {
+  return GetNativeLibraryName(name);
+}
+
+}  // namespace base
diff --git a/base/native_library_ios.mm b/base/native_library_ios.mm
new file mode 100644
index 0000000..dbcafb4
--- /dev/null
+++ b/base/native_library_ios.mm
@@ -0,0 +1,46 @@
+// Copyright (c) 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/native_library.h"
+
+#include "base/logging.h"
+
+#include "base/strings/string_util.h"
+
+namespace base {
+
+std::string NativeLibraryLoadError::ToString() const {
+  return message;
+}
+
+NativeLibrary LoadNativeLibraryWithOptions(const base::FilePath& library_path,
+                                           const NativeLibraryOptions& options,
+                                           NativeLibraryLoadError* error) {
+  NOTIMPLEMENTED();
+  if (error)
+    error->message = "Not implemented.";
+  return nullptr;
+}
+
+void UnloadNativeLibrary(NativeLibrary library) {
+  NOTIMPLEMENTED();
+  DCHECK(!library);
+}
+
+void* GetFunctionPointerFromNativeLibrary(NativeLibrary library,
+                                          StringPiece name) {
+  NOTIMPLEMENTED();
+  return nullptr;
+}
+
+std::string GetNativeLibraryName(StringPiece name) {
+  DCHECK(IsStringASCII(name));
+  return name.as_string();
+}
+
+std::string GetLoadableModuleName(StringPiece name) {
+  return GetNativeLibraryName(name);
+}
+
+}  // namespace base
diff --git a/base/native_library_mac.mm b/base/native_library_mac.mm
new file mode 100644
index 0000000..0d31b80
--- /dev/null
+++ b/base/native_library_mac.mm
@@ -0,0 +1,128 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/native_library.h"
+
+#include <dlfcn.h>
+#include <mach-o/getsect.h>
+
+#include "base/files/file_path.h"
+#include "base/files/file_util.h"
+#include "base/logging.h"
+#include "base/mac/scoped_cftyperef.h"
+#include "base/strings/string_util.h"
+#include "base/strings/utf_string_conversions.h"
+#include "base/threading/thread_restrictions.h"
+
+namespace base {
+
+static NativeLibraryObjCStatus GetObjCStatusForImage(
+    const void* function_pointer) {
+  Dl_info info;
+  if (!dladdr(function_pointer, &info))
+    return OBJC_UNKNOWN;
+
+  // See if the the image contains an "ObjC image info" segment. This method
+  // of testing is used in _CFBundleGrokObjcImageInfoFromFile in
+  // CF-744/CFBundle.c, around lines 2447-2474.
+  //
+  // In 64-bit images, ObjC can be recognized in __DATA,__objc_imageinfo.
+  const section_64* section = getsectbynamefromheader_64(
+      reinterpret_cast<const struct mach_header_64*>(info.dli_fbase),
+      SEG_DATA, "__objc_imageinfo");
+  return section ? OBJC_PRESENT : OBJC_NOT_PRESENT;
+}
+
+std::string NativeLibraryLoadError::ToString() const {
+  return message;
+}
+
+NativeLibrary LoadNativeLibraryWithOptions(const FilePath& library_path,
+                                           const NativeLibraryOptions& options,
+                                           NativeLibraryLoadError* error) {
+  // dlopen() etc. open the file off disk.
+  if (library_path.Extension() == "dylib" || !DirectoryExists(library_path)) {
+    void* dylib = dlopen(library_path.value().c_str(), RTLD_LAZY);
+    if (!dylib) {
+      if (error)
+        error->message = dlerror();
+      return nullptr;
+    }
+    NativeLibrary native_lib = new NativeLibraryStruct();
+    native_lib->type = DYNAMIC_LIB;
+    native_lib->dylib = dylib;
+    native_lib->objc_status = OBJC_UNKNOWN;
+    return native_lib;
+  }
+  ScopedCFTypeRef<CFURLRef> url(CFURLCreateFromFileSystemRepresentation(
+      kCFAllocatorDefault,
+      (const UInt8*)library_path.value().c_str(),
+      library_path.value().length(),
+      true));
+  if (!url)
+    return nullptr;
+  CFBundleRef bundle = CFBundleCreate(kCFAllocatorDefault, url.get());
+  if (!bundle)
+    return nullptr;
+
+  NativeLibrary native_lib = new NativeLibraryStruct();
+  native_lib->type = BUNDLE;
+  native_lib->bundle = bundle;
+  native_lib->bundle_resource_ref = CFBundleOpenBundleResourceMap(bundle);
+  native_lib->objc_status = OBJC_UNKNOWN;
+  return native_lib;
+}
+
+void UnloadNativeLibrary(NativeLibrary library) {
+  if (library->objc_status == OBJC_NOT_PRESENT) {
+    if (library->type == BUNDLE) {
+      CFBundleCloseBundleResourceMap(library->bundle,
+                                     library->bundle_resource_ref);
+      CFRelease(library->bundle);
+    } else {
+      dlclose(library->dylib);
+    }
+  } else {
+    VLOG(2) << "Not unloading NativeLibrary because it may contain an ObjC "
+               "segment. library->objc_status = " << library->objc_status;
+    // Deliberately do not CFRelease the bundle or dlclose the dylib because
+    // doing so can corrupt the ObjC runtime method caches. See
+    // http://crbug.com/172319 for details.
+  }
+  delete library;
+}
+
+void* GetFunctionPointerFromNativeLibrary(NativeLibrary library,
+                                          StringPiece name) {
+  void* function_pointer = nullptr;
+
+  // Get the function pointer using the right API for the type.
+  if (library->type == BUNDLE) {
+    ScopedCFTypeRef<CFStringRef> symbol_name(CFStringCreateWithCString(
+        kCFAllocatorDefault, name.data(), kCFStringEncodingUTF8));
+    function_pointer = CFBundleGetFunctionPointerForName(library->bundle,
+                                                         symbol_name);
+  } else {
+    function_pointer = dlsym(library->dylib, name.data());
+  }
+
+  // If this library hasn't been tested for having ObjC, use the function
+  // pointer to look up the section information for the library.
+  if (function_pointer && library->objc_status == OBJC_UNKNOWN)
+    library->objc_status = GetObjCStatusForImage(function_pointer);
+
+  return function_pointer;
+}
+
+std::string GetNativeLibraryName(StringPiece name) {
+  DCHECK(IsStringASCII(name));
+  return "lib" + name.as_string() + ".dylib";
+}
+
+std::string GetLoadableModuleName(StringPiece name) {
+  DCHECK(IsStringASCII(name));
+  return name.as_string() + ".so";
+}
+
+}  // namespace base
diff --git a/base/native_library_posix.cc b/base/native_library_posix.cc
new file mode 100644
index 0000000..19ff7a4
--- /dev/null
+++ b/base/native_library_posix.cc
@@ -0,0 +1,70 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/native_library.h"
+
+#include <dlfcn.h>
+
+#include "base/files/file_path.h"
+#include "base/logging.h"
+#include "base/strings/string_util.h"
+#include "base/strings/utf_string_conversions.h"
+#include "base/threading/thread_restrictions.h"
+
+namespace base {
+
+std::string NativeLibraryLoadError::ToString() const {
+  return message;
+}
+
+NativeLibrary LoadNativeLibraryWithOptions(const FilePath& library_path,
+                                           const NativeLibraryOptions& options,
+                                           NativeLibraryLoadError* error) {
+  // dlopen() opens the file off disk.
+  AssertBlockingAllowed();
+
+  // We deliberately do not use RTLD_DEEPBIND by default.  For the history why,
+  // please refer to the bug tracker.  Some useful bug reports to read include:
+  // http://crbug.com/17943, http://crbug.com/17557, http://crbug.com/36892,
+  // and http://crbug.com/40794.
+  int flags = RTLD_LAZY;
+#if defined(OS_ANDROID) || !defined(RTLD_DEEPBIND)
+  // Certain platforms don't define RTLD_DEEPBIND. Android dlopen() requires
+  // further investigation, as it might vary across versions. Crash here to
+  // warn developers that they're trying to rely on uncertain behavior.
+  CHECK(!options.prefer_own_symbols);
+#else
+  if (options.prefer_own_symbols)
+    flags |= RTLD_DEEPBIND;
+#endif
+  void* dl = dlopen(library_path.value().c_str(), flags);
+  if (!dl && error)
+    error->message = dlerror();
+
+  return dl;
+}
+
+void UnloadNativeLibrary(NativeLibrary library) {
+  int ret = dlclose(library);
+  if (ret < 0) {
+    DLOG(ERROR) << "dlclose failed: " << dlerror();
+    NOTREACHED();
+  }
+}
+
+void* GetFunctionPointerFromNativeLibrary(NativeLibrary library,
+                                          StringPiece name) {
+  return dlsym(library, name.data());
+}
+
+std::string GetNativeLibraryName(StringPiece name) {
+  DCHECK(IsStringASCII(name));
+  return "lib" + name.as_string() + ".so";
+}
+
+std::string GetLoadableModuleName(StringPiece name) {
+  return GetNativeLibraryName(name);
+}
+
+}  // namespace base
diff --git a/base/native_library_unittest.cc b/base/native_library_unittest.cc
new file mode 100644
index 0000000..2bfb9ec
--- /dev/null
+++ b/base/native_library_unittest.cc
@@ -0,0 +1,166 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/files/file_path.h"
+#include "base/macros.h"
+#include "base/native_library.h"
+#include "base/path_service.h"
+#include "base/test/native_library_test_utils.h"
+#include "build/build_config.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+
+const FilePath::CharType kDummyLibraryPath[] =
+    FILE_PATH_LITERAL("dummy_library");
+
+TEST(NativeLibraryTest, LoadFailure) {
+  NativeLibraryLoadError error;
+  EXPECT_FALSE(LoadNativeLibrary(FilePath(kDummyLibraryPath), &error));
+  EXPECT_FALSE(error.ToString().empty());
+}
+
+// |error| is optional and can be null.
+TEST(NativeLibraryTest, LoadFailureWithNullError) {
+  EXPECT_FALSE(LoadNativeLibrary(FilePath(kDummyLibraryPath), nullptr));
+}
+
+TEST(NativeLibraryTest, GetNativeLibraryName) {
+  const char kExpectedName[] =
+#if defined(OS_WIN)
+      "mylib.dll";
+#elif defined(OS_IOS)
+      "mylib";
+#elif defined(OS_MACOSX)
+      "libmylib.dylib";
+#elif defined(OS_POSIX) || defined(OS_FUCHSIA)
+      "libmylib.so";
+#endif
+  EXPECT_EQ(kExpectedName, GetNativeLibraryName("mylib"));
+}
+
+TEST(NativeLibraryTest, GetLoadableModuleName) {
+  const char kExpectedName[] =
+#if defined(OS_WIN)
+      "mylib.dll";
+#elif defined(OS_IOS)
+      "mylib";
+#elif defined(OS_MACOSX)
+      "mylib.so";
+#elif defined(OS_POSIX) || defined(OS_FUCHSIA)
+      "libmylib.so";
+#endif
+  EXPECT_EQ(kExpectedName, GetLoadableModuleName("mylib"));
+}
+
+// We don't support dynamic loading on iOS, and ASAN will complain about our
+// intentional ODR violation because of |g_native_library_exported_value| being
+// defined globally both here and in the shared library.
+#if !defined(OS_IOS) && !defined(ADDRESS_SANITIZER)
+
+const char kTestLibraryName[] =
+#if defined(OS_WIN)
+    "test_shared_library.dll";
+#elif defined(OS_MACOSX)
+    "libtest_shared_library.dylib";
+#elif defined(OS_ANDROID) && defined(COMPONENT_BUILD)
+    "libtest_shared_library.cr.so";
+#elif defined(OS_POSIX) || defined(OS_FUCHSIA)
+    "libtest_shared_library.so";
+#endif
+
+class TestLibrary {
+ public:
+  TestLibrary() : TestLibrary(NativeLibraryOptions()) {}
+
+  explicit TestLibrary(const NativeLibraryOptions& options)
+    : library_(nullptr) {
+    base::FilePath exe_path;
+
+#if !defined(OS_FUCHSIA)
+    // Libraries do not sit alongside the executable in Fuchsia. NativeLibrary
+    // is aware of this and is able to resolve library paths correctly.
+    CHECK(base::PathService::Get(base::DIR_EXE, &exe_path));
+#endif
+
+    library_ = LoadNativeLibraryWithOptions(
+        exe_path.AppendASCII(kTestLibraryName), options, nullptr);
+    CHECK(library_);
+  }
+
+  ~TestLibrary() {
+    UnloadNativeLibrary(library_);
+  }
+
+  template <typename ReturnType, typename... Args>
+  ReturnType Call(const char* function_name, Args... args) {
+    return reinterpret_cast<ReturnType(*)(Args...)>(
+        GetFunctionPointerFromNativeLibrary(library_, function_name))(args...);
+  }
+
+ private:
+  NativeLibrary library_;
+
+  DISALLOW_COPY_AND_ASSIGN(TestLibrary);
+};
+
+// NativeLibraaryTest.LoadLibrary is failing on M tablets only.
+// crbug/641309
+#if !defined(OS_ANDROID)
+
+// Verifies that we can load a native library and resolve its exported symbols.
+TEST(NativeLibraryTest, LoadLibrary) {
+  TestLibrary library;
+  EXPECT_EQ(5, library.Call<int>("GetSimpleTestValue"));
+}
+
+#endif  // !defined(OS_ANDROID)
+
+// Android dlopen() requires further investigation, as it might vary across
+// versions with respect to symbol resolution scope.
+// TSan and MSan error out on RTLD_DEEPBIND, https://crbug.com/705255
+#if !defined(OS_ANDROID) && !defined(THREAD_SANITIZER) && \
+    !defined(MEMORY_SANITIZER)
+
+// Verifies that the |prefer_own_symbols| option satisfies its guarantee that
+// a loaded library will always prefer local symbol resolution before
+// considering global symbols.
+TEST(NativeLibraryTest, LoadLibraryPreferOwnSymbols) {
+  NativeLibraryOptions options;
+  options.prefer_own_symbols = true;
+  TestLibrary library(options);
+
+  // Verify that this binary and the DSO use different storage for
+  // |g_native_library_exported_value|.
+  g_native_library_exported_value = 1;
+  library.Call<void>("SetExportedValue", 2);
+  EXPECT_EQ(1, g_native_library_exported_value);
+  g_native_library_exported_value = 3;
+  EXPECT_EQ(2, library.Call<int>("GetExportedValue"));
+
+  // Both this binary and the library link against the
+  // native_library_test_utils source library, which in turn exports the
+  // NativeLibraryTestIncrement() function whose return value depends on some
+  // static internal state.
+  //
+  // The DSO's GetIncrementValue() forwards to that function inside the DSO.
+  //
+  // Here we verify that direct calls to NativeLibraryTestIncrement() in this
+  // binary return a sequence of values independent from the sequence returned
+  // by GetIncrementValue(), ensuring that the DSO is calling its own local
+  // definition of NativeLibraryTestIncrement().
+  EXPECT_EQ(1, library.Call<int>("GetIncrementValue"));
+  EXPECT_EQ(1, NativeLibraryTestIncrement());
+  EXPECT_EQ(2, library.Call<int>("GetIncrementValue"));
+  EXPECT_EQ(3, library.Call<int>("GetIncrementValue"));
+  EXPECT_EQ(4, library.Call<int>("NativeLibraryTestIncrement"));
+  EXPECT_EQ(2, NativeLibraryTestIncrement());
+  EXPECT_EQ(3, NativeLibraryTestIncrement());
+}
+
+#endif  // !defined(OS_ANDROID)
+
+#endif  // !defined(OS_IOS) && !defined(ADDRESS_SANITIZER)
+
+}  // namespace base
diff --git a/base/native_library_win.cc b/base/native_library_win.cc
new file mode 100644
index 0000000..ca94468
--- /dev/null
+++ b/base/native_library_win.cc
@@ -0,0 +1,177 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/native_library.h"
+
+#include <windows.h>
+
+#include "base/files/file_util.h"
+#include "base/metrics/histogram_macros.h"
+#include "base/strings/string_util.h"
+#include "base/strings/stringprintf.h"
+#include "base/strings/utf_string_conversions.h"
+#include "base/threading/thread_restrictions.h"
+
+namespace base {
+
+using AddDllDirectory = HMODULE (*)(PCWSTR new_directory);
+
+namespace {
+// This enum is used to back an UMA histogram, and should therefore be treated
+// as append-only.
+enum LoadLibraryResult {
+  // LoadLibraryExW API/flags are available and the call succeeds.
+  SUCCEED = 0,
+  // LoadLibraryExW API/flags are availabe to use but the call fails, then
+  // LoadLibraryW is used and succeeds.
+  FAIL_AND_SUCCEED,
+  // LoadLibraryExW API/flags are availabe to use but the call fails, then
+  // LoadLibraryW is used but fails as well.
+  FAIL_AND_FAIL,
+  // LoadLibraryExW API/flags are unavailabe to use, then LoadLibraryW is used
+  // and succeeds.
+  UNAVAILABLE_AND_SUCCEED,
+  // LoadLibraryExW API/flags are unavailabe to use, then LoadLibraryW is used
+  // but fails.
+  UNAVAILABLE_AND_FAIL,
+  // Add new items before this one, always keep this one at the end.
+  END
+};
+
+// A helper method to log library loading result to UMA.
+void LogLibrarayLoadResultToUMA(LoadLibraryResult result) {
+  UMA_HISTOGRAM_ENUMERATION("LibraryLoader.LoadNativeLibraryWindows", result,
+                            LoadLibraryResult::END);
+}
+
+// A helper method to check if AddDllDirectory method is available, thus
+// LOAD_LIBRARY_SEARCH_* flags are available on systems.
+bool AreSearchFlagsAvailable() {
+  // The LOAD_LIBRARY_SEARCH_* flags are available on systems that have
+  // KB2533623 installed. To determine whether the flags are available, use
+  // GetProcAddress to get the address of the AddDllDirectory,
+  // RemoveDllDirectory, or SetDefaultDllDirectories function. If GetProcAddress
+  // succeeds, the LOAD_LIBRARY_SEARCH_* flags can be used with LoadLibraryEx.
+  // https://msdn.microsoft.com/en-us/library/windows/desktop/ms684179(v=vs.85).aspx
+  // The LOAD_LIBRARY_SEARCH_* flags are used in the LoadNativeLibraryHelper
+  // method.
+  auto add_dll_dir_func = reinterpret_cast<AddDllDirectory>(
+      GetProcAddress(GetModuleHandle(L"kernel32.dll"), "AddDllDirectory"));
+  return !!add_dll_dir_func;
+}
+
+// A helper method to encode the library loading result to enum
+// LoadLibraryResult.
+LoadLibraryResult GetLoadLibraryResult(bool are_search_flags_available,
+                                       bool has_load_library_succeeded) {
+  LoadLibraryResult result;
+  if (are_search_flags_available) {
+    if (has_load_library_succeeded)
+      result = LoadLibraryResult::FAIL_AND_SUCCEED;
+    else
+      result = LoadLibraryResult::FAIL_AND_FAIL;
+  } else if (has_load_library_succeeded) {
+    result = LoadLibraryResult::UNAVAILABLE_AND_SUCCEED;
+  } else {
+    result = LoadLibraryResult::UNAVAILABLE_AND_FAIL;
+  }
+  return result;
+}
+
+NativeLibrary LoadNativeLibraryHelper(const FilePath& library_path,
+                                      NativeLibraryLoadError* error) {
+  // LoadLibrary() opens the file off disk.
+  AssertBlockingAllowed();
+
+  HMODULE module = nullptr;
+
+  // This variable records the library loading result.
+  LoadLibraryResult load_library_result = LoadLibraryResult::SUCCEED;
+
+  bool are_search_flags_available = AreSearchFlagsAvailable();
+  if (are_search_flags_available) {
+    // LOAD_LIBRARY_SEARCH_DLL_LOAD_DIR flag is needed to search the library
+    // directory as the library may have dependencies on DLLs in this
+    // directory.
+    module = ::LoadLibraryExW(
+        library_path.value().c_str(), nullptr,
+        LOAD_LIBRARY_SEARCH_DLL_LOAD_DIR | LOAD_LIBRARY_SEARCH_DEFAULT_DIRS);
+    // If LoadLibraryExW succeeds, log this metric and return.
+    if (module) {
+      LogLibrarayLoadResultToUMA(load_library_result);
+      return module;
+    }
+    // GetLastError() needs to be called immediately after
+    // LoadLibraryExW call.
+    if (error)
+      error->code = GetLastError();
+  }
+
+  // If LoadLibraryExW API/flags are unavailable or API call fails, try
+  // LoadLibraryW API.
+  // TODO(chengx): Currently, if LoadLibraryExW API call fails, LoadLibraryW is
+  // still tried. We should strictly prefer the LoadLibraryExW over the
+  // LoadLibraryW if LoadLibraryW is statistically showing no extra benefits. If
+  // UMA metric shows that FAIL_AND_FAIL is the primary failure mode and/or
+  // FAIL_AND_SUCCESS is close to zero, we should remove this fallback.
+  // (http://crbug.com/701944)
+
+  // Switch the current directory to the library directory as the library
+  // may have dependencies on DLLs in this directory.
+  bool restore_directory = false;
+  FilePath current_directory;
+  if (GetCurrentDirectory(&current_directory)) {
+    FilePath plugin_path = library_path.DirName();
+    if (!plugin_path.empty()) {
+      SetCurrentDirectory(plugin_path);
+      restore_directory = true;
+    }
+  }
+
+  module = ::LoadLibraryW(library_path.value().c_str());
+
+  // GetLastError() needs to be called immediately after LoadLibraryW call.
+  if (!module && error)
+    error->code = GetLastError();
+
+  if (restore_directory)
+    SetCurrentDirectory(current_directory);
+
+  // Get the library loading result and log it to UMA.
+  LogLibrarayLoadResultToUMA(
+      GetLoadLibraryResult(are_search_flags_available, !!module));
+
+  return module;
+}
+}  // namespace
+
+std::string NativeLibraryLoadError::ToString() const {
+  return StringPrintf("%lu", code);
+}
+
+NativeLibrary LoadNativeLibraryWithOptions(const FilePath& library_path,
+                                           const NativeLibraryOptions& options,
+                                           NativeLibraryLoadError* error) {
+  return LoadNativeLibraryHelper(library_path, error);
+}
+
+void UnloadNativeLibrary(NativeLibrary library) {
+  FreeLibrary(library);
+}
+
+void* GetFunctionPointerFromNativeLibrary(NativeLibrary library,
+                                          StringPiece name) {
+  return reinterpret_cast<void*>(GetProcAddress(library, name.data()));
+}
+
+std::string GetNativeLibraryName(StringPiece name) {
+  DCHECK(IsStringASCII(name));
+  return name.as_string() + ".dll";
+}
+
+std::string GetLoadableModuleName(StringPiece name) {
+  return GetNativeLibraryName(name);
+}
+
+}  // namespace base
diff --git a/base/nix/OWNERS b/base/nix/OWNERS
new file mode 100644
index 0000000..280ba47
--- /dev/null
+++ b/base/nix/OWNERS
@@ -0,0 +1 @@
+thomasanderson@chromium.org
diff --git a/base/nix/mime_util_xdg.cc b/base/nix/mime_util_xdg.cc
new file mode 100644
index 0000000..6b5b11d
--- /dev/null
+++ b/base/nix/mime_util_xdg.cc
@@ -0,0 +1,33 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/nix/mime_util_xdg.h"
+
+#include "base/files/file_path.h"
+#include "base/lazy_instance.h"
+#include "base/synchronization/lock.h"
+#include "base/third_party/xdg_mime/xdgmime.h"
+#include "base/threading/thread_restrictions.h"
+
+namespace base {
+namespace nix {
+
+namespace {
+
+// None of the XDG stuff is thread-safe, so serialize all access under
+// this lock.
+LazyInstance<Lock>::Leaky g_mime_util_xdg_lock = LAZY_INSTANCE_INITIALIZER;
+
+}  // namespace
+
+std::string GetFileMimeType(const FilePath& filepath) {
+  if (filepath.empty())
+    return std::string();
+  AssertBlockingAllowed();
+  AutoLock scoped_lock(g_mime_util_xdg_lock.Get());
+  return xdg_mime_get_mime_type_from_file_name(filepath.value().c_str());
+}
+
+}  // namespace nix
+}  // namespace base
diff --git a/base/nix/mime_util_xdg.h b/base/nix/mime_util_xdg.h
new file mode 100644
index 0000000..e0f264a
--- /dev/null
+++ b/base/nix/mime_util_xdg.h
@@ -0,0 +1,36 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_NIX_MIME_UTIL_XDG_H_
+#define BASE_NIX_MIME_UTIL_XDG_H_
+
+#include <string>
+
+#include "base/base_export.h"
+#include "build/build_config.h"
+
+namespace base {
+
+class FilePath;
+
+namespace nix {
+
+// Gets the mime type for a file at |filepath|.
+//
+// The mime type is calculated based only on the file name of |filepath|.  In
+// particular |filepath| will not be touched on disk and |filepath| doesn't even
+// have to exist.  This means that the function does not work for directories
+// (i.e. |filepath| is assumed to be a path to a file).
+//
+// Note that this function might need to read from disk the mime-types data
+// provided by the OS.  Therefore this function should not be called from
+// threads that disallow IO via base::ThreadRestrictions::SetIOAllowed(false).
+//
+// If the mime type is unknown, this will return application/octet-stream.
+BASE_EXPORT std::string GetFileMimeType(const FilePath& filepath);
+
+}  // namespace nix
+}  // namespace base
+
+#endif  // BASE_NIX_MIME_UTIL_XDG_H_
diff --git a/base/nix/xdg_util.cc b/base/nix/xdg_util.cc
new file mode 100644
index 0000000..109624a
--- /dev/null
+++ b/base/nix/xdg_util.cc
@@ -0,0 +1,150 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/nix/xdg_util.h"
+
+#include <string>
+
+#include "base/base_paths.h"
+#include "base/environment.h"
+#include "base/files/file_path.h"
+#include "base/files/file_util.h"
+#include "base/path_service.h"
+#include "base/strings/string_util.h"
+#include "base/third_party/xdg_user_dirs/xdg_user_dir_lookup.h"
+
+namespace {
+
+// The KDE session version environment variable introduced in KDE 4.
+const char kKDESessionEnvVar[] = "KDE_SESSION_VERSION";
+
+}  // namespace
+
+namespace base {
+namespace nix {
+
+const char kDotConfigDir[] = ".config";
+const char kXdgConfigHomeEnvVar[] = "XDG_CONFIG_HOME";
+
+FilePath GetXDGDirectory(Environment* env, const char* env_name,
+                         const char* fallback_dir) {
+  FilePath path;
+  std::string env_value;
+  if (env->GetVar(env_name, &env_value) && !env_value.empty()) {
+    path = FilePath(env_value);
+  } else {
+    PathService::Get(DIR_HOME, &path);
+    path = path.Append(fallback_dir);
+  }
+  return path.StripTrailingSeparators();
+}
+
+FilePath GetXDGUserDirectory(const char* dir_name, const char* fallback_dir) {
+  FilePath path;
+  char* xdg_dir = xdg_user_dir_lookup(dir_name);
+  if (xdg_dir) {
+    path = FilePath(xdg_dir);
+    free(xdg_dir);
+  } else {
+    PathService::Get(DIR_HOME, &path);
+    path = path.Append(fallback_dir);
+  }
+  return path.StripTrailingSeparators();
+}
+
+DesktopEnvironment GetDesktopEnvironment(Environment* env) {
+  // XDG_CURRENT_DESKTOP is the newest standard circa 2012.
+  std::string xdg_current_desktop;
+  if (env->GetVar("XDG_CURRENT_DESKTOP", &xdg_current_desktop)) {
+    // Not all desktop environments set this env var as of this writing.
+    if (base::StartsWith(xdg_current_desktop, "Unity",
+                         base::CompareCase::SENSITIVE)) {
+      // gnome-fallback sessions set XDG_CURRENT_DESKTOP to Unity
+      // DESKTOP_SESSION can be gnome-fallback or gnome-fallback-compiz
+      std::string desktop_session;
+      if (env->GetVar("DESKTOP_SESSION", &desktop_session) &&
+          desktop_session.find("gnome-fallback") != std::string::npos) {
+        return DESKTOP_ENVIRONMENT_GNOME;
+      }
+      return DESKTOP_ENVIRONMENT_UNITY;
+    }
+    if (xdg_current_desktop == "GNOME")
+      return DESKTOP_ENVIRONMENT_GNOME;
+    if (xdg_current_desktop == "X-Cinnamon")
+      return DESKTOP_ENVIRONMENT_CINNAMON;
+    if (xdg_current_desktop == "KDE") {
+      std::string kde_session;
+      if (env->GetVar(kKDESessionEnvVar, &kde_session)) {
+        if (kde_session == "5") {
+          return DESKTOP_ENVIRONMENT_KDE5;
+        }
+      }
+      return DESKTOP_ENVIRONMENT_KDE4;
+    }
+    if (xdg_current_desktop == "Pantheon")
+      return DESKTOP_ENVIRONMENT_PANTHEON;
+  }
+
+  // DESKTOP_SESSION was what everyone used in 2010.
+  std::string desktop_session;
+  if (env->GetVar("DESKTOP_SESSION", &desktop_session)) {
+    if (desktop_session == "gnome" || desktop_session == "mate")
+      return DESKTOP_ENVIRONMENT_GNOME;
+    if (desktop_session == "kde4" || desktop_session == "kde-plasma")
+      return DESKTOP_ENVIRONMENT_KDE4;
+    if (desktop_session == "kde") {
+      // This may mean KDE4 on newer systems, so we have to check.
+      if (env->HasVar(kKDESessionEnvVar))
+        return DESKTOP_ENVIRONMENT_KDE4;
+      return DESKTOP_ENVIRONMENT_KDE3;
+    }
+    if (desktop_session.find("xfce") != std::string::npos ||
+        desktop_session == "xubuntu") {
+      return DESKTOP_ENVIRONMENT_XFCE;
+    }
+  }
+
+  // Fall back on some older environment variables.
+  // Useful particularly in the DESKTOP_SESSION=default case.
+  if (env->HasVar("GNOME_DESKTOP_SESSION_ID"))
+    return DESKTOP_ENVIRONMENT_GNOME;
+  if (env->HasVar("KDE_FULL_SESSION")) {
+    if (env->HasVar(kKDESessionEnvVar))
+      return DESKTOP_ENVIRONMENT_KDE4;
+    return DESKTOP_ENVIRONMENT_KDE3;
+  }
+
+  return DESKTOP_ENVIRONMENT_OTHER;
+}
+
+const char* GetDesktopEnvironmentName(DesktopEnvironment env) {
+  switch (env) {
+    case DESKTOP_ENVIRONMENT_OTHER:
+      return nullptr;
+    case DESKTOP_ENVIRONMENT_CINNAMON:
+      return "CINNAMON";
+    case DESKTOP_ENVIRONMENT_GNOME:
+      return "GNOME";
+    case DESKTOP_ENVIRONMENT_KDE3:
+      return "KDE3";
+    case DESKTOP_ENVIRONMENT_KDE4:
+      return "KDE4";
+    case DESKTOP_ENVIRONMENT_KDE5:
+      return "KDE5";
+    case DESKTOP_ENVIRONMENT_PANTHEON:
+      return "PANTHEON";
+    case DESKTOP_ENVIRONMENT_UNITY:
+      return "UNITY";
+    case DESKTOP_ENVIRONMENT_XFCE:
+      return "XFCE";
+  }
+  return nullptr;
+}
+
+const char* GetDesktopEnvironmentName(Environment* env) {
+  return GetDesktopEnvironmentName(GetDesktopEnvironment(env));
+}
+
+}  // namespace nix
+}  // namespace base
diff --git a/base/nix/xdg_util.h b/base/nix/xdg_util.h
new file mode 100644
index 0000000..65f7d15
--- /dev/null
+++ b/base/nix/xdg_util.h
@@ -0,0 +1,77 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_NIX_XDG_UTIL_H_
+#define BASE_NIX_XDG_UTIL_H_
+
+// XDG refers to http://en.wikipedia.org/wiki/Freedesktop.org .
+// This file contains utilities found across free desktop environments.
+//
+// TODO(brettw) this file should be in app/x11, but is currently used by
+// net. We should have a net API to allow the embedder to specify the behavior
+// that it uses XDG for, and then move this file.
+
+#include "base/base_export.h"
+
+#ifdef nix
+#error asdf
+#endif
+
+namespace base {
+
+class Environment;
+class FilePath;
+
+namespace nix {
+
+// The default XDG config directory name.
+BASE_EXPORT extern const char kDotConfigDir[];
+
+// The XDG config directory environment variable.
+BASE_EXPORT extern const char kXdgConfigHomeEnvVar[];
+
+// Utility function for getting XDG directories.
+// |env_name| is the name of an environment variable that we want to use to get
+// a directory path. |fallback_dir| is the directory relative to $HOME that we
+// use if |env_name| cannot be found or is empty. |fallback_dir| may be NULL.
+// Examples of |env_name| are XDG_CONFIG_HOME and XDG_DATA_HOME.
+BASE_EXPORT FilePath GetXDGDirectory(Environment* env, const char* env_name,
+                                     const char* fallback_dir);
+
+// Wrapper around xdg_user_dir_lookup() from src/base/third_party/xdg-user-dirs
+// This looks up "well known" user directories like the desktop and music
+// folder. Examples of |dir_name| are DESKTOP and MUSIC.
+BASE_EXPORT FilePath GetXDGUserDirectory(const char* dir_name,
+                                         const char* fallback_dir);
+
+enum DesktopEnvironment {
+  DESKTOP_ENVIRONMENT_OTHER,
+  DESKTOP_ENVIRONMENT_CINNAMON,
+  DESKTOP_ENVIRONMENT_GNOME,
+  // KDE3, KDE4 and KDE5 are sufficiently different that we count
+  // them as different desktop environments here.
+  DESKTOP_ENVIRONMENT_KDE3,
+  DESKTOP_ENVIRONMENT_KDE4,
+  DESKTOP_ENVIRONMENT_KDE5,
+  DESKTOP_ENVIRONMENT_PANTHEON,
+  DESKTOP_ENVIRONMENT_UNITY,
+  DESKTOP_ENVIRONMENT_XFCE,
+};
+
+// Return an entry from the DesktopEnvironment enum with a best guess
+// of which desktop environment we're using.  We use this to know when
+// to attempt to use preferences from the desktop environment --
+// proxy settings, password manager, etc.
+BASE_EXPORT DesktopEnvironment GetDesktopEnvironment(Environment* env);
+
+// Return a string representation of the given desktop environment.
+// May return NULL in the case of DESKTOP_ENVIRONMENT_OTHER.
+BASE_EXPORT const char* GetDesktopEnvironmentName(DesktopEnvironment env);
+// Convenience wrapper that calls GetDesktopEnvironment() first.
+BASE_EXPORT const char* GetDesktopEnvironmentName(Environment* env);
+
+}  // namespace nix
+}  // namespace base
+
+#endif  // BASE_NIX_XDG_UTIL_H_
diff --git a/base/nix/xdg_util_unittest.cc b/base/nix/xdg_util_unittest.cc
new file mode 100644
index 0000000..e195303
--- /dev/null
+++ b/base/nix/xdg_util_unittest.cc
@@ -0,0 +1,181 @@
+// Copyright (c) 2010 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/nix/xdg_util.h"
+
+#include "base/environment.h"
+#include "testing/gmock/include/gmock/gmock.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+using ::testing::_;
+using ::testing::Eq;
+using ::testing::Return;
+using ::testing::SetArgPointee;
+
+namespace base {
+namespace nix {
+
+namespace {
+
+class MockEnvironment : public Environment {
+ public:
+  MOCK_METHOD2(GetVar, bool(StringPiece, std::string* result));
+  MOCK_METHOD2(SetVar, bool(StringPiece, const std::string& new_value));
+  MOCK_METHOD1(UnSetVar, bool(StringPiece));
+};
+
+// Needs to be const char* to make gmock happy.
+const char* const kDesktopGnome = "gnome";
+const char* const kDesktopGnomeFallback = "gnome-fallback";
+const char* const kDesktopMATE = "mate";
+const char* const kDesktopKDE4 = "kde4";
+const char* const kDesktopKDE = "kde";
+const char* const kDesktopXFCE = "xfce";
+const char* const kXdgDesktopCinnamon = "X-Cinnamon";
+const char* const kXdgDesktopGNOME = "GNOME";
+const char* const kXdgDesktopKDE = "KDE";
+const char* const kXdgDesktopPantheon = "Pantheon";
+const char* const kXdgDesktopUnity = "Unity";
+const char* const kXdgDesktopUnity7 = "Unity:Unity7";
+const char* const kXdgDesktopUnity8 = "Unity:Unity8";
+const char* const kKDESessionKDE5 = "5";
+
+const char kDesktopSession[] = "DESKTOP_SESSION";
+const char kKDESession[] = "KDE_SESSION_VERSION";
+const char kXdgDesktop[] = "XDG_CURRENT_DESKTOP";
+
+}  // namespace
+
+TEST(XDGUtilTest, GetDesktopEnvironmentGnome) {
+  MockEnvironment getter;
+  EXPECT_CALL(getter, GetVar(_, _)).WillRepeatedly(Return(false));
+  EXPECT_CALL(getter, GetVar(Eq(kDesktopSession), _))
+      .WillOnce(DoAll(SetArgPointee<1>(kDesktopGnome), Return(true)));
+
+  EXPECT_EQ(DESKTOP_ENVIRONMENT_GNOME, GetDesktopEnvironment(&getter));
+}
+
+TEST(XDGUtilTest, GetDesktopEnvironmentMATE) {
+  MockEnvironment getter;
+  EXPECT_CALL(getter, GetVar(_, _)).WillRepeatedly(Return(false));
+  EXPECT_CALL(getter, GetVar(Eq(kDesktopSession), _))
+      .WillOnce(DoAll(SetArgPointee<1>(kDesktopMATE), Return(true)));
+
+  EXPECT_EQ(DESKTOP_ENVIRONMENT_GNOME, GetDesktopEnvironment(&getter));
+}
+
+TEST(XDGUtilTest, GetDesktopEnvironmentKDE4) {
+  MockEnvironment getter;
+  EXPECT_CALL(getter, GetVar(_, _)).WillRepeatedly(Return(false));
+  EXPECT_CALL(getter, GetVar(Eq(kDesktopSession), _))
+      .WillOnce(DoAll(SetArgPointee<1>(kDesktopKDE4), Return(true)));
+
+  EXPECT_EQ(DESKTOP_ENVIRONMENT_KDE4, GetDesktopEnvironment(&getter));
+}
+
+TEST(XDGUtilTest, GetDesktopEnvironmentKDE3) {
+  MockEnvironment getter;
+  EXPECT_CALL(getter, GetVar(_, _)).WillRepeatedly(Return(false));
+  EXPECT_CALL(getter, GetVar(Eq(kDesktopSession), _))
+      .WillOnce(DoAll(SetArgPointee<1>(kDesktopKDE), Return(true)));
+
+  EXPECT_EQ(DESKTOP_ENVIRONMENT_KDE3, GetDesktopEnvironment(&getter));
+}
+
+TEST(XDGUtilTest, GetDesktopEnvironmentXFCE) {
+  MockEnvironment getter;
+  EXPECT_CALL(getter, GetVar(_, _)).WillRepeatedly(Return(false));
+  EXPECT_CALL(getter, GetVar(Eq(kDesktopSession), _))
+      .WillOnce(DoAll(SetArgPointee<1>(kDesktopXFCE), Return(true)));
+
+  EXPECT_EQ(DESKTOP_ENVIRONMENT_XFCE, GetDesktopEnvironment(&getter));
+}
+
+TEST(XDGUtilTest, GetXdgDesktopCinnamon) {
+  MockEnvironment getter;
+  EXPECT_CALL(getter, GetVar(_, _)).WillRepeatedly(Return(false));
+  EXPECT_CALL(getter, GetVar(Eq(kXdgDesktop), _))
+      .WillOnce(DoAll(SetArgPointee<1>(kXdgDesktopCinnamon), Return(true)));
+
+  EXPECT_EQ(DESKTOP_ENVIRONMENT_CINNAMON, GetDesktopEnvironment(&getter));
+}
+
+TEST(XDGUtilTest, GetXdgDesktopGnome) {
+  MockEnvironment getter;
+  EXPECT_CALL(getter, GetVar(_, _)).WillRepeatedly(Return(false));
+  EXPECT_CALL(getter, GetVar(Eq(kXdgDesktop), _))
+      .WillOnce(DoAll(SetArgPointee<1>(kXdgDesktopGNOME), Return(true)));
+
+  EXPECT_EQ(DESKTOP_ENVIRONMENT_GNOME, GetDesktopEnvironment(&getter));
+}
+
+TEST(XDGUtilTest, GetXdgDesktopGnomeFallback) {
+  MockEnvironment getter;
+  EXPECT_CALL(getter, GetVar(_, _)).WillRepeatedly(Return(false));
+  EXPECT_CALL(getter, GetVar(Eq(kXdgDesktop), _))
+      .WillOnce(DoAll(SetArgPointee<1>(kXdgDesktopUnity), Return(true)));
+  EXPECT_CALL(getter, GetVar(Eq(kDesktopSession), _))
+      .WillOnce(DoAll(SetArgPointee<1>(kDesktopGnomeFallback), Return(true)));
+
+  EXPECT_EQ(DESKTOP_ENVIRONMENT_GNOME, GetDesktopEnvironment(&getter));
+}
+
+TEST(XDGUtilTest, GetXdgDesktopKDE5) {
+  MockEnvironment getter;
+  EXPECT_CALL(getter, GetVar(_, _)).WillRepeatedly(Return(false));
+  EXPECT_CALL(getter, GetVar(Eq(kXdgDesktop), _))
+      .WillOnce(DoAll(SetArgPointee<1>(kXdgDesktopKDE), Return(true)));
+  EXPECT_CALL(getter, GetVar(Eq(kKDESession), _))
+      .WillOnce(DoAll(SetArgPointee<1>(kKDESessionKDE5), Return(true)));
+
+  EXPECT_EQ(DESKTOP_ENVIRONMENT_KDE5, GetDesktopEnvironment(&getter));
+}
+
+TEST(XDGUtilTest, GetXdgDesktopKDE4) {
+  MockEnvironment getter;
+  EXPECT_CALL(getter, GetVar(_, _)).WillRepeatedly(Return(false));
+  EXPECT_CALL(getter, GetVar(Eq(kXdgDesktop), _))
+      .WillOnce(DoAll(SetArgPointee<1>(kXdgDesktopKDE), Return(true)));
+
+  EXPECT_EQ(DESKTOP_ENVIRONMENT_KDE4, GetDesktopEnvironment(&getter));
+}
+
+TEST(XDGUtilTest, GetXdgDesktopPantheon) {
+  MockEnvironment getter;
+  EXPECT_CALL(getter, GetVar(_, _)).WillRepeatedly(Return(false));
+  EXPECT_CALL(getter, GetVar(Eq(kXdgDesktop), _))
+      .WillOnce(DoAll(SetArgPointee<1>(kXdgDesktopPantheon), Return(true)));
+
+  EXPECT_EQ(DESKTOP_ENVIRONMENT_PANTHEON, GetDesktopEnvironment(&getter));
+}
+
+TEST(XDGUtilTest, GetXdgDesktopUnity) {
+  MockEnvironment getter;
+  EXPECT_CALL(getter, GetVar(_, _)).WillRepeatedly(Return(false));
+  EXPECT_CALL(getter, GetVar(Eq(kXdgDesktop), _))
+      .WillOnce(DoAll(SetArgPointee<1>(kXdgDesktopUnity), Return(true)));
+
+  EXPECT_EQ(DESKTOP_ENVIRONMENT_UNITY, GetDesktopEnvironment(&getter));
+}
+
+TEST(XDGUtilTest, GetXdgDesktopUnity7) {
+  MockEnvironment getter;
+  EXPECT_CALL(getter, GetVar(_, _)).WillRepeatedly(Return(false));
+  EXPECT_CALL(getter, GetVar(Eq(kXdgDesktop), _))
+      .WillOnce(DoAll(SetArgPointee<1>(kXdgDesktopUnity7), Return(true)));
+
+  EXPECT_EQ(DESKTOP_ENVIRONMENT_UNITY, GetDesktopEnvironment(&getter));
+}
+
+TEST(XDGUtilTest, GetXdgDesktopUnity8) {
+  MockEnvironment getter;
+  EXPECT_CALL(getter, GetVar(_, _)).WillRepeatedly(Return(false));
+  EXPECT_CALL(getter, GetVar(Eq(kXdgDesktop), _))
+      .WillOnce(DoAll(SetArgPointee<1>(kXdgDesktopUnity8), Return(true)));
+
+  EXPECT_EQ(DESKTOP_ENVIRONMENT_UNITY, GetDesktopEnvironment(&getter));
+}
+
+}  // namespace nix
+}  // namespace base
diff --git a/base/no_destructor.h b/base/no_destructor.h
new file mode 100644
index 0000000..aabc6e6
--- /dev/null
+++ b/base/no_destructor.h
@@ -0,0 +1,99 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_NO_DESTRUCTOR_H_
+#define BASE_NO_DESTRUCTOR_H_
+
+#include <new>
+#include <utility>
+
+namespace base {
+
+// A wrapper that makes it easy to create an object of type T with static
+// storage duration that:
+// - is only constructed on first access
+// - never invokes the destructor
+// in order to satisfy the styleguide ban on global constructors and
+// destructors.
+//
+// Runtime constant example:
+// const std::string& GetLineSeparator() {
+//  // Forwards to std::string(size_t, char, const Allocator&) constructor.
+//   static const base::NoDestructor<std::string> s(5, '-');
+//   return *s;
+// }
+//
+// More complex initialization with a lambda:
+// const std::string& GetSessionNonce() {
+//   static const base::NoDestructor<std::string> nonce([] {
+//     std::string s(16);
+//     crypto::RandString(s.data(), s.size());
+//     return s;
+//   }());
+//   return *nonce;
+// }
+//
+// NoDestructor<T> stores the object inline, so it also avoids a pointer
+// indirection and a malloc. Also note that since C++11 static local variable
+// initialization is thread-safe and so is this pattern. Code should prefer to
+// use NoDestructor<T> over:
+// - The CR_DEFINE_STATIC_LOCAL() helper macro.
+// - A function scoped static T* or T& that is dynamically initialized.
+// - A global base::LazyInstance<T>.
+//
+// Note that since the destructor is never run, this *will* leak memory if used
+// as a stack or member variable. Furthermore, a NoDestructor<T> should never
+// have global scope as that may require a static initializer.
+template <typename T>
+class NoDestructor {
+ public:
+  // Not constexpr; just write static constexpr T x = ...; if the value should
+  // be a constexpr.
+  template <typename... Args>
+  explicit NoDestructor(Args&&... args) {
+    new (storage_) T(std::forward<Args>(args)...);
+  }
+
+  // Allows copy and move construction of the contained type, to allow
+  // construction from an initializer list, e.g. for std::vector.
+  explicit NoDestructor(const T& x) { new (storage_) T(x); }
+  explicit NoDestructor(T&& x) { new (storage_) T(std::move(x)); }
+
+  NoDestructor(const NoDestructor&) = delete;
+  NoDestructor& operator=(const NoDestructor&) = delete;
+
+  ~NoDestructor() = default;
+
+  const T& operator*() const { return *get(); }
+  T& operator*() { return *get(); }
+
+  const T* operator->() const { return get(); }
+  T* operator->() { return get(); }
+
+  const T* get() const { return reinterpret_cast<const T*>(storage_); }
+  T* get() { return reinterpret_cast<T*>(storage_); }
+
+ private:
+  alignas(T) char storage_[sizeof(T)];
+
+#if defined(LEAK_SANITIZER)
+  // TODO(https://crbug.com/812277): This is a hack to work around the fact
+  // that LSan doesn't seem to treat NoDestructor as a root for reachability
+  // analysis. This means that code like this:
+  //   static base::NoDestructor<std::vector<int>> v({1, 2, 3});
+  // is considered a leak. Using the standard leak sanitizer annotations to
+  // suppress leaks doesn't work: std::vector is implicitly constructed before
+  // calling the base::NoDestructor constructor.
+  //
+  // Unfortunately, I haven't been able to demonstrate this issue in simpler
+  // reproductions: until that's resolved, hold an explicit pointer to the
+  // placement-new'd object in leak sanitizer mode to help LSan realize that
+  // objects allocated by the contained type are still reachable.
+  T* storage_ptr_ = reinterpret_cast<T*>(storage_);
+#endif  // defined(LEAK_SANITIZER)
+};
+
+}  // namespace base
+
+#endif  // BASE_NO_DESTRUCTOR_H_
diff --git a/base/no_destructor_unittest.cc b/base/no_destructor_unittest.cc
new file mode 100644
index 0000000..8f9d4a4
--- /dev/null
+++ b/base/no_destructor_unittest.cc
@@ -0,0 +1,76 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/no_destructor.h"
+
+#include <string>
+#include <utility>
+
+#include "base/logging.h"
+#include "build/build_config.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+
+namespace {
+
+struct CheckOnDestroy {
+  ~CheckOnDestroy() { CHECK(false); }
+};
+
+TEST(NoDestructorTest, SkipsDestructors) {
+  NoDestructor<CheckOnDestroy> destructor_should_not_run;
+}
+
+struct CopyOnly {
+  CopyOnly() = default;
+
+  CopyOnly(const CopyOnly&) = default;
+  CopyOnly& operator=(const CopyOnly&) = default;
+
+  CopyOnly(CopyOnly&&) = delete;
+  CopyOnly& operator=(CopyOnly&&) = delete;
+};
+
+struct MoveOnly {
+  MoveOnly() = default;
+
+  MoveOnly(const MoveOnly&) = delete;
+  MoveOnly& operator=(const MoveOnly&) = delete;
+
+  MoveOnly(MoveOnly&&) = default;
+  MoveOnly& operator=(MoveOnly&&) = default;
+};
+
+struct ForwardingTestStruct {
+  ForwardingTestStruct(const CopyOnly&, MoveOnly&&) {}
+};
+
+TEST(NoDestructorTest, ForwardsArguments) {
+  CopyOnly copy_only;
+  MoveOnly move_only;
+
+  static NoDestructor<ForwardingTestStruct> test_forwarding(
+      copy_only, std::move(move_only));
+}
+
+TEST(NoDestructorTest, Accessors) {
+  static NoDestructor<std::string> awesome("awesome");
+
+  EXPECT_EQ("awesome", *awesome);
+  EXPECT_EQ(0, awesome->compare("awesome"));
+  EXPECT_EQ(0, awesome.get()->compare("awesome"));
+}
+
+// Passing initializer list to a NoDestructor like in this test
+// is ambiguous in GCC.
+// https://gcc.gnu.org/bugzilla/show_bug.cgi?id=84849
+#if !defined(COMPILER_GCC) && !defined(__clang__)
+TEST(NoDestructorTest, InitializerList) {
+  static NoDestructor<std::vector<std::string>> vector({"a", "b", "c"});
+}
+#endif
+}  // namespace
+
+}  // namespace base
diff --git a/base/numerics/BUILD.gn b/base/numerics/BUILD.gn
new file mode 100644
index 0000000..0bb8dd1
--- /dev/null
+++ b/base/numerics/BUILD.gn
@@ -0,0 +1,28 @@
+# Copyright (c) 2017 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# This is a dependency-free, header-only, library, and it needs to stay that
+# way to facilitate pulling it into various third-party projects. So, this
+# file is here to protect against accidentally introducing external
+# dependencies or depending on internal implementation details.
+source_set("base_numerics") {
+  visibility = [ "//base/*" ]
+  sources = [
+    "checked_math_impl.h",
+    "clamped_math_impl.h",
+    "safe_conversions_arm_impl.h",
+    "safe_conversions_impl.h",
+    "safe_math_arm_impl.h",
+    "safe_math_clang_gcc_impl.h",
+    "safe_math_shared_impl.h",
+  ]
+  public = [
+    "checked_math.h",
+    "clamped_math.h",
+    "math_constants.h",
+    "ranges.h",
+    "safe_conversions.h",
+    "safe_math.h",
+  ]
+}
diff --git a/base/numerics/DEPS b/base/numerics/DEPS
new file mode 100644
index 0000000..d95bf13
--- /dev/null
+++ b/base/numerics/DEPS
@@ -0,0 +1,7 @@
+# This is a dependency-free, header-only, library, and it needs to stay that
+# way to facilitate pulling it into various third-party projects. So, this
+# file is here to protect against accidentally introducing dependencies.
+include_rules = [
+  "-base",
+  "+base/numerics",
+]
diff --git a/base/numerics/OWNERS b/base/numerics/OWNERS
new file mode 100644
index 0000000..5493fba
--- /dev/null
+++ b/base/numerics/OWNERS
@@ -0,0 +1,5 @@
+jschuh@chromium.org
+tsepez@chromium.org
+
+
+# COMPONENT: Internals
diff --git a/base/numerics/README.md b/base/numerics/README.md
new file mode 100644
index 0000000..896b124
--- /dev/null
+++ b/base/numerics/README.md
@@ -0,0 +1,409 @@
+# `base/numerics`
+
+This directory contains a dependency-free, header-only library of templates
+providing well-defined semantics for safely and performantly handling a variety
+of numeric operations, including most common arithmetic operations and
+conversions.
+
+The public API is broken out into the following header files:
+
+*   `checked_math.h` contains the `CheckedNumeric` template class and helper
+    functions for performing arithmetic and conversion operations that detect
+    errors and boundary conditions (e.g. overflow, truncation, etc.).
+*   `clamped_math.h` contains the `ClampedNumeric` template class and
+    helper functions for performing fast, clamped (i.e. non-sticky saturating)
+    arithmetic operations and conversions.
+*   `safe_conversions.h` contains the `StrictNumeric` template class and
+    a collection of custom casting templates and helper functions for safely
+    converting between a range of numeric types.
+*   `safe_math.h` includes all of the previously mentioned headers.
+
+*** aside
+**Note:** The `Numeric` template types implicitly convert from C numeric types
+and `Numeric` templates that are convertable to an underlying C numeric type.
+The conversion priority for `Numeric` type coercions is:
+
+*   `StrictNumeric` coerces to `ClampedNumeric` and `CheckedNumeric`
+*   `ClampedNumeric` coerces to `CheckedNumeric`
+***
+
+[TOC]
+
+## Common patterns and use-cases
+
+The following covers the preferred style for the most common uses of this
+library. Please don't cargo-cult from anywhere else. 😉
+
+### Performing checked arithmetic conversions
+
+The `checked_cast` template converts between arbitrary arithmetic types, and is
+used for cases where a conversion failure should result in program termination:
+
+```cpp
+// Crash if signed_value is out of range for buff_size.
+size_t buff_size = checked_cast<size_t>(signed_value);
+```
+
+### Performing saturated (clamped) arithmetic conversions
+
+The `saturated_cast` template converts between arbitrary arithmetic types, and
+is used in cases where an out-of-bounds source value should be saturated to the
+corresponding maximum or minimum of the destination type:
+
+```cpp
+// Convert from float with saturation to INT_MAX, INT_MIN, or 0 for NaN.
+int int_value = saturated_cast<int>(floating_point_value);
+```
+
+### Enforcing arithmetic conversions at compile-time
+
+The `strict_cast` enforces type restrictions at compile-time and results in
+emitted code that is identical to a normal `static_cast`. However, a
+`strict_cast` assignment will fail to compile if the destination type cannot
+represent the full range of the source type:
+
+```cpp
+// Throw a compiler error if byte_value is changed to an out-of-range-type.
+int int_value = strict_cast<int>(byte_value);
+```
+
+You can also enforce these compile-time restrictions on function parameters by
+using the `StrictNumeric` template:
+
+```cpp
+// Throw a compiler error if the size argument cannot be represented by a
+// size_t (e.g. passing an int will fail to compile).
+bool AllocateBuffer(void** buffer, StrictCast<size_t> size);
+```
+
+### Comparing values between arbitrary arithmetic types
+
+Both the `StrictNumeric` and `ClampedNumeric` types provide well defined
+comparisons between arbitrary arithmetic types. This allows you to perform
+comparisons that are not legal or would trigger compiler warnings or errors
+under the normal arithmetic promotion rules:
+
+```cpp
+bool foo(unsigned value, int upper_bound) {
+  // Converting to StrictNumeric allows this comparison to work correctly.
+  if (MakeStrictNum(value) >= upper_bound)
+    return false;
+```
+
+*** note
+**Warning:** Do not perform manual conversions using the comparison operators.
+Instead, use the cast templates described in the previous sections, or the
+constexpr template functions `IsValueInRangeForNumericType` and
+`IsTypeInRangeForNumericType`, as these templates properly handle the full range
+of corner cases and employ various optimizations.
+***
+
+### Calculating a buffer size (checked arithmetic)
+
+When making exact calculations—such as for buffer lengths—it's often necessary
+to know when those calculations trigger an overflow, undefined behavior, or
+other boundary conditions. The `CheckedNumeric` template does this by storing
+a bit determining whether or not some arithmetic operation has occured that
+would put the variable in an "invalid" state. Attempting to extract the value
+from a variable in an invalid state will trigger a check/trap condition, that
+by default will result in process termination.
+
+Here's an example of a buffer calculation using a `CheckedNumeric` type (note:
+the AssignIfValid method will trigger a compile error if the result is ignored).
+
+```cpp
+// Calculate the buffer size and detect if an overflow occurs.
+size_t size;
+if (!CheckAdd(kHeaderSize, CheckMul(count, kItemSize)).AssignIfValid(&size)) {
+  // Handle an overflow error...
+}
+```
+
+### Calculating clamped coordinates (non-sticky saturating arithmetic)
+
+Certain classes of calculations—such as coordinate calculations—require
+well-defined semantics that always produce a valid result on boundary
+conditions. The `ClampedNumeric` template addresses this by providing
+performant, non-sticky saturating arithmetic operations.
+
+Here's an example of using a `ClampedNumeric` to calculate an operation
+insetting a rectangle.
+
+```cpp
+// Use clamped arithmetic since inset calculations might overflow.
+void Rect::Inset(int left, int top, int right, int bottom) {
+  origin_ += Vector2d(left, top);
+  set_width(ClampSub(width(), ClampAdd(left, right)));
+  set_height(ClampSub(height(), ClampAdd(top, bottom)));
+}
+```
+
+*** note
+The `ClampedNumeric` type is not "sticky", which means the saturation is not
+retained across individual operations. As such, one arithmetic operation may
+result in a saturated value, while the next operation may then "desaturate"
+the value. Here's an example:
+
+```cpp
+ClampedNumeric<int> value = INT_MAX;
+++value;  // value is still INT_MAX, due to saturation.
+--value;  // value is now (INT_MAX - 1), because saturation is not sticky.
+```
+
+***
+
+## Conversion functions and StrictNumeric<> in safe_conversions.h
+
+This header includes a collection of helper `constexpr` templates for safely
+performing a range of conversions, assignments, and tests.
+
+### Safe casting templates
+
+*   `as_signed()` - Returns the supplied integral value as a signed type of
+    the same width.
+*   `as_unsigned()` - Returns the supplied integral value as an unsigned type
+    of the same width.
+*   `checked_cast<>()` - Analogous to `static_cast<>` for numeric types, except
+    that by default it will trigger a crash on an out-of-bounds conversion (e.g.
+    overflow, underflow, NaN to integral) or a compile error if the conversion
+    error can be detected at compile time. The crash handler can be overridden
+    to perform a behavior other than crashing.
+*   `saturated_cast<>()` - Analogous to `static_cast` for numeric types, except
+    that it returns a saturated result when the specified numeric conversion
+    would otherwise overflow or underflow. An NaN source returns 0 by
+    default, but can be overridden to return a different result.
+*   `strict_cast<>()` - Analogous to `static_cast` for numeric types, except
+    this causes a compile failure if the destination type is not large
+    enough to contain any value in the source type. It performs no runtime
+    checking and thus introduces no runtime overhead.
+
+### Other helper and conversion functions
+
+*   `IsValueInRangeForNumericType<>()` - A convenience function that returns
+    true if the type supplied as the template parameter can represent the value
+    passed as an argument to the function.
+*   `IsTypeInRangeForNumericType<>()` - A convenience function that evaluates
+    entirely at compile-time and returns true if the destination type (first
+    template parameter) can represent the full range of the source type
+    (second template parameter).
+*   `IsValueNegative()` - A convenience function that will accept any
+    arithmetic type as an argument and will return whether the value is less
+    than zero. Unsigned types always return false.
+*   `SafeUnsignedAbs()` - Returns the absolute value of the supplied integer
+    parameter as an unsigned result (thus avoiding an overflow if the value
+    is the signed, two's complement minimum).
+
+### StrictNumeric<>
+
+`StrictNumeric<>` is a wrapper type that performs assignments and copies via
+the `strict_cast` template, and can perform valid arithmetic comparisons
+across any range of arithmetic types. `StrictNumeric` is the return type for
+values extracted from a `CheckedNumeric` class instance. The raw numeric value
+is extracted via `static_cast` to the underlying type or any type with
+sufficient range to represent the underlying type.
+
+*   `MakeStrictNum()` - Creates a new `StrictNumeric` from the underlying type
+    of the supplied arithmetic or StrictNumeric type.
+*   `SizeT` - Alias for `StrictNumeric<size_t>`.
+
+## CheckedNumeric<> in checked_math.h
+
+`CheckedNumeric<>` implements all the logic and operators for detecting integer
+boundary conditions such as overflow, underflow, and invalid conversions.
+The `CheckedNumeric` type implicitly converts from floating point and integer
+data types, and contains overloads for basic arithmetic operations (i.e.: `+`,
+`-`, `*`, `/` for all types and `%`, `<<`, `>>`, `&`, `|`, `^` for integers).
+However, *the [variadic template functions
+](#CheckedNumeric_in-checked_math_h-Non_member-helper-functions)
+are the prefered API,* as they remove type ambiguities and help prevent a number
+of common errors. The variadic functions can also be more performant, as they
+eliminate redundant expressions that are unavoidable with the with the operator
+overloads. (Ideally the compiler should optimize those away, but better to avoid
+them in the first place.)
+
+Type promotions are a slightly modified version of the [standard C/C++ numeric
+promotions
+](http://en.cppreference.com/w/cpp/language/implicit_conversion#Numeric_promotions)
+with the two differences being that *there is no default promotion to int*
+and *bitwise logical operations always return an unsigned of the wider type.*
+
+### Members
+
+The unary negation, increment, and decrement operators are supported, along
+with the following unary arithmetic methods, which return a new
+`CheckedNumeric` as a result of the operation:
+
+*   `Abs()` - Absolute value.
+*   `UnsignedAbs()` - Absolute value as an equal-width unsigned underlying type
+    (valid for only integral types).
+*   `Max()` - Returns whichever is greater of the current instance or argument.
+    The underlying return type is whichever has the greatest magnitude.
+*   `Min()` - Returns whichever is lowest of the current instance or argument.
+    The underlying return type is whichever has can represent the lowest
+    number in the smallest width (e.g. int8_t over unsigned, int over
+    int8_t, and float over int).
+
+The following are for converting `CheckedNumeric` instances:
+
+*   `type` - The underlying numeric type.
+*   `AssignIfValid()` - Assigns the underlying value to the supplied
+    destination pointer if the value is currently valid and within the
+    range supported by the destination type. Returns true on success.
+*   `Cast<>()` - Instance method returning a `CheckedNumeric` derived from
+    casting the current instance to a `CheckedNumeric` of the supplied
+    destination type.
+
+*** aside
+The following member functions return a `StrictNumeric`, which is valid for
+comparison and assignment operations, but will trigger a compile failure on
+attempts to assign to a type of insufficient range. The underlying value can
+be extracted by an explicit `static_cast` to the underlying type or any type
+with sufficient range to represent the underlying type.
+***
+
+*   `IsValid()` - Returns true if the underlying numeric value is valid (i.e.
+    has not wrapped or saturated and is not the result of an invalid
+    conversion).
+*   `ValueOrDie()` - Returns the underlying value. If the state is not valid
+    this call will trigger a crash by default (but may be overridden by
+    supplying an alternate handler to the template).
+*   `ValueOrDefault()` - Returns the current value, or the supplied default if
+    the state is not valid (but will not crash).
+
+**Comparison operators are explicitly not provided** for `CheckedNumeric`
+types because they could result in a crash if the type is not in a valid state.
+Patterns like the following should be used instead:
+
+```cpp
+// Either input or padding (or both) may be arbitrary sizes.
+size_t buff_size;
+if (!CheckAdd(input, padding, kHeaderLength).AssignIfValid(&buff_size) ||
+     buff_size >= kMaxBuffer) {
+  // Handle an error...
+} else {
+  // Do stuff on success...
+}
+```
+
+### Non-member helper functions
+
+The following variadic convenience functions, which accept standard arithmetic
+or `CheckedNumeric` types, perform arithmetic operations, and return a
+`CheckedNumeric` result. The supported functions are:
+
+*   `CheckAdd()` - Addition.
+*   `CheckSub()` - Subtraction.
+*   `CheckMul()` - Multiplication.
+*   `CheckDiv()` - Division.
+*   `CheckMod()` - Modulus (integer only).
+*   `CheckLsh()` - Left integer shift (integer only).
+*   `CheckRsh()` - Right integer shift (integer only).
+*   `CheckAnd()` - Bitwise AND (integer only with unsigned result).
+*   `CheckOr()`  - Bitwise OR (integer only with unsigned result).
+*   `CheckXor()` - Bitwise XOR (integer only with unsigned result).
+*   `CheckMax()` - Maximum of supplied arguments.
+*   `CheckMin()` - Minimum of supplied arguments.
+
+The following wrapper functions can be used to avoid the template
+disambiguator syntax when converting a destination type.
+
+*   `IsValidForType<>()` in place of: `a.template IsValid<>()`
+*   `ValueOrDieForType<>()` in place of: `a.template ValueOrDie<>()`
+*   `ValueOrDefaultForType<>()` in place of: `a.template ValueOrDefault<>()`
+
+The following general utility methods is are useful for converting from
+arithmetic types to `CheckedNumeric` types:
+
+*   `MakeCheckedNum()` - Creates a new `CheckedNumeric` from the underlying type
+    of the supplied arithmetic or directly convertible type.
+
+## ClampedNumeric<> in clamped_math.h
+
+`ClampedNumeric<>` implements all the logic and operators for clamped
+(non-sticky saturating) arithmetic operations and conversions. The
+`ClampedNumeric` type implicitly converts back and forth between floating point
+and integer data types, saturating on assignment as appropriate. It contains
+overloads for basic arithmetic operations (i.e.: `+`, `-`, `*`, `/` for
+all types and `%`, `<<`, `>>`, `&`, `|`, `^` for integers) along with comparison
+operators for arithmetic types of any size. However, *the [variadic template
+functions
+](#ClampedNumeric_in-clamped_math_h-Non_member-helper-functions)
+are the prefered API,* as they remove type ambiguities and help prevent
+a number of common errors. The variadic functions can also be more performant,
+as they eliminate redundant expressions that are unavoidable with the operator
+overloads. (Ideally the compiler should optimize those away, but better to avoid
+them in the first place.)
+
+Type promotions are a slightly modified version of the [standard C/C++ numeric
+promotions
+](http://en.cppreference.com/w/cpp/language/implicit_conversion#Numeric_promotions)
+with the two differences being that *there is no default promotion to int*
+and *bitwise logical operations always return an unsigned of the wider type.*
+
+*** aside
+Most arithmetic operations saturate normally, to the numeric limit in the
+direction of the sign. The potentially unusual cases are:
+
+*   **Division:** Division by zero returns the saturated limit in the direction
+    of sign of the dividend (first argument). The one exception is 0/0, which
+	returns zero (although logically is NaN).
+*   **Modulus:** Division by zero returns the dividend (first argument).
+*   **Left shift:** Non-zero values saturate in the direction of the signed
+    limit (max/min), even for shifts larger than the bit width. 0 shifted any
+    amount results in 0.
+*   **Right shift:** Negative values saturate to -1. Positive or 0 saturates
+    to 0. (Effectively just an unbounded arithmetic-right-shift.)
+*   **Bitwise operations:** No saturation; bit pattern is identical to
+    non-saturated bitwise operations.
+***
+
+### Members
+
+The unary negation, increment, and decrement operators are supported, along
+with the following unary arithmetic methods, which return a new
+`ClampedNumeric` as a result of the operation:
+
+*   `Abs()` - Absolute value.
+*   `UnsignedAbs()` - Absolute value as an equal-width unsigned underlying type
+    (valid for only integral types).
+*   `Max()` - Returns whichever is greater of the current instance or argument.
+    The underlying return type is whichever has the greatest magnitude.
+*   `Min()` - Returns whichever is lowest of the current instance or argument.
+    The underlying return type is whichever has can represent the lowest
+    number in the smallest width (e.g. int8_t over unsigned, int over
+    int8_t, and float over int).
+
+The following are for converting `ClampedNumeric` instances:
+
+*   `type` - The underlying numeric type.
+*   `RawValue()` - Returns the raw value as the underlying arithmetic type. This
+    is useful when e.g. assigning to an auto type or passing as a deduced
+    template parameter.
+*   `Cast<>()` - Instance method returning a `ClampedNumeric` derived from
+    casting the current instance to a `ClampedNumeric` of the supplied
+    destination type.
+
+### Non-member helper functions
+
+The following variadic convenience functions, which accept standard arithmetic
+or `ClampedNumeric` types, perform arithmetic operations, and return a
+`ClampedNumeric` result. The supported functions are:
+
+*   `ClampAdd()` - Addition.
+*   `ClampSub()` - Subtraction.
+*   `ClampMul()` - Multiplication.
+*   `ClampDiv()` - Division.
+*   `ClampMod()` - Modulus (integer only).
+*   `ClampLsh()` - Left integer shift (integer only).
+*   `ClampRsh()` - Right integer shift (integer only).
+*   `ClampAnd()` - Bitwise AND (integer only with unsigned result).
+*   `ClampOr()`  - Bitwise OR (integer only with unsigned result).
+*   `ClampXor()` - Bitwise XOR (integer only with unsigned result).
+*   `ClampMax()` - Maximum of supplied arguments.
+*   `ClampMin()` - Minimum of supplied arguments.
+
+The following is a general utility method that is useful for converting
+to a `ClampedNumeric` type:
+
+*   `MakeClampedNum()` - Creates a new `ClampedNumeric` from the underlying type
+    of the supplied arithmetic or directly convertible type.
diff --git a/base/numerics/checked_math.h b/base/numerics/checked_math.h
new file mode 100644
index 0000000..ede3344
--- /dev/null
+++ b/base/numerics/checked_math.h
@@ -0,0 +1,393 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_NUMERICS_CHECKED_MATH_H_
+#define BASE_NUMERICS_CHECKED_MATH_H_
+
+#include <stddef.h>
+
+#include <limits>
+#include <type_traits>
+
+#include "base/numerics/checked_math_impl.h"
+
+namespace base {
+namespace internal {
+
+template <typename T>
+class CheckedNumeric {
+  static_assert(std::is_arithmetic<T>::value,
+                "CheckedNumeric<T>: T must be a numeric type.");
+
+ public:
+  using type = T;
+
+  constexpr CheckedNumeric() = default;
+
+  // Copy constructor.
+  template <typename Src>
+  constexpr CheckedNumeric(const CheckedNumeric<Src>& rhs)
+      : state_(rhs.state_.value(), rhs.IsValid()) {}
+
+  template <typename Src>
+  friend class CheckedNumeric;
+
+  // This is not an explicit constructor because we implicitly upgrade regular
+  // numerics to CheckedNumerics to make them easier to use.
+  template <typename Src>
+  constexpr CheckedNumeric(Src value)  // NOLINT(runtime/explicit)
+      : state_(value) {
+    static_assert(std::is_arithmetic<Src>::value, "Argument must be numeric.");
+  }
+
+  // This is not an explicit constructor because we want a seamless conversion
+  // from StrictNumeric types.
+  template <typename Src>
+  constexpr CheckedNumeric(
+      StrictNumeric<Src> value)  // NOLINT(runtime/explicit)
+      : state_(static_cast<Src>(value)) {}
+
+  // IsValid() - The public API to test if a CheckedNumeric is currently valid.
+  // A range checked destination type can be supplied using the Dst template
+  // parameter.
+  template <typename Dst = T>
+  constexpr bool IsValid() const {
+    return state_.is_valid() &&
+           IsValueInRangeForNumericType<Dst>(state_.value());
+  }
+
+  // AssignIfValid(Dst) - Assigns the underlying value if it is currently valid
+  // and is within the range supported by the destination type. Returns true if
+  // successful and false otherwise.
+  template <typename Dst>
+#if defined(__clang__) || defined(__GNUC__)
+  __attribute__((warn_unused_result))
+#elif defined(_MSC_VER)
+  _Check_return_
+#endif
+  constexpr bool
+  AssignIfValid(Dst* result) const {
+    return BASE_NUMERICS_LIKELY(IsValid<Dst>())
+               ? ((*result = static_cast<Dst>(state_.value())), true)
+               : false;
+  }
+
+  // ValueOrDie() - The primary accessor for the underlying value. If the
+  // current state is not valid it will CHECK and crash.
+  // A range checked destination type can be supplied using the Dst template
+  // parameter, which will trigger a CHECK if the value is not in bounds for
+  // the destination.
+  // The CHECK behavior can be overridden by supplying a handler as a
+  // template parameter, for test code, etc. However, the handler cannot access
+  // the underlying value, and it is not available through other means.
+  template <typename Dst = T, class CheckHandler = CheckOnFailure>
+  constexpr StrictNumeric<Dst> ValueOrDie() const {
+    return BASE_NUMERICS_LIKELY(IsValid<Dst>())
+               ? static_cast<Dst>(state_.value())
+               : CheckHandler::template HandleFailure<Dst>();
+  }
+
+  // ValueOrDefault(T default_value) - A convenience method that returns the
+  // current value if the state is valid, and the supplied default_value for
+  // any other state.
+  // A range checked destination type can be supplied using the Dst template
+  // parameter. WARNING: This function may fail to compile or CHECK at runtime
+  // if the supplied default_value is not within range of the destination type.
+  template <typename Dst = T, typename Src>
+  constexpr StrictNumeric<Dst> ValueOrDefault(const Src default_value) const {
+    return BASE_NUMERICS_LIKELY(IsValid<Dst>())
+               ? static_cast<Dst>(state_.value())
+               : checked_cast<Dst>(default_value);
+  }
+
+  // Returns a checked numeric of the specified type, cast from the current
+  // CheckedNumeric. If the current state is invalid or the destination cannot
+  // represent the result then the returned CheckedNumeric will be invalid.
+  template <typename Dst>
+  constexpr CheckedNumeric<typename UnderlyingType<Dst>::type> Cast() const {
+    return *this;
+  }
+
+  // This friend method is available solely for providing more detailed logging
+  // in the the tests. Do not implement it in production code, because the
+  // underlying values may change at any time.
+  template <typename U>
+  friend U GetNumericValueForTest(const CheckedNumeric<U>& src);
+
+  // Prototypes for the supported arithmetic operator overloads.
+  template <typename Src>
+  constexpr CheckedNumeric& operator+=(const Src rhs);
+  template <typename Src>
+  constexpr CheckedNumeric& operator-=(const Src rhs);
+  template <typename Src>
+  constexpr CheckedNumeric& operator*=(const Src rhs);
+  template <typename Src>
+  constexpr CheckedNumeric& operator/=(const Src rhs);
+  template <typename Src>
+  constexpr CheckedNumeric& operator%=(const Src rhs);
+  template <typename Src>
+  constexpr CheckedNumeric& operator<<=(const Src rhs);
+  template <typename Src>
+  constexpr CheckedNumeric& operator>>=(const Src rhs);
+  template <typename Src>
+  constexpr CheckedNumeric& operator&=(const Src rhs);
+  template <typename Src>
+  constexpr CheckedNumeric& operator|=(const Src rhs);
+  template <typename Src>
+  constexpr CheckedNumeric& operator^=(const Src rhs);
+
+  constexpr CheckedNumeric operator-() const {
+    // The negation of two's complement int min is int min, so we simply
+    // check for that in the constexpr case.
+    // We use an optimized code path for a known run-time variable.
+    return MustTreatAsConstexpr(state_.value()) || !std::is_signed<T>::value ||
+                   std::is_floating_point<T>::value
+               ? CheckedNumeric<T>(
+                     NegateWrapper(state_.value()),
+                     IsValid() && (!std::is_signed<T>::value ||
+                                   std::is_floating_point<T>::value ||
+                                   NegateWrapper(state_.value()) !=
+                                       std::numeric_limits<T>::lowest()))
+               : FastRuntimeNegate();
+  }
+
+  constexpr CheckedNumeric operator~() const {
+    return CheckedNumeric<decltype(InvertWrapper(T()))>(
+        InvertWrapper(state_.value()), IsValid());
+  }
+
+  constexpr CheckedNumeric Abs() const {
+    return !IsValueNegative(state_.value()) ? *this : -*this;
+  }
+
+  template <typename U>
+  constexpr CheckedNumeric<typename MathWrapper<CheckedMaxOp, T, U>::type> Max(
+      const U rhs) const {
+    using R = typename UnderlyingType<U>::type;
+    using result_type = typename MathWrapper<CheckedMaxOp, T, U>::type;
+    // TODO(jschuh): This can be converted to the MathOp version and remain
+    // constexpr once we have C++14 support.
+    return CheckedNumeric<result_type>(
+        static_cast<result_type>(
+            IsGreater<T, R>::Test(state_.value(), Wrapper<U>::value(rhs))
+                ? state_.value()
+                : Wrapper<U>::value(rhs)),
+        state_.is_valid() && Wrapper<U>::is_valid(rhs));
+  }
+
+  template <typename U>
+  constexpr CheckedNumeric<typename MathWrapper<CheckedMinOp, T, U>::type> Min(
+      const U rhs) const {
+    using R = typename UnderlyingType<U>::type;
+    using result_type = typename MathWrapper<CheckedMinOp, T, U>::type;
+    // TODO(jschuh): This can be converted to the MathOp version and remain
+    // constexpr once we have C++14 support.
+    return CheckedNumeric<result_type>(
+        static_cast<result_type>(
+            IsLess<T, R>::Test(state_.value(), Wrapper<U>::value(rhs))
+                ? state_.value()
+                : Wrapper<U>::value(rhs)),
+        state_.is_valid() && Wrapper<U>::is_valid(rhs));
+  }
+
+  // This function is available only for integral types. It returns an unsigned
+  // integer of the same width as the source type, containing the absolute value
+  // of the source, and properly handling signed min.
+  constexpr CheckedNumeric<typename UnsignedOrFloatForSize<T>::type>
+  UnsignedAbs() const {
+    return CheckedNumeric<typename UnsignedOrFloatForSize<T>::type>(
+        SafeUnsignedAbs(state_.value()), state_.is_valid());
+  }
+
+  constexpr CheckedNumeric& operator++() {
+    *this += 1;
+    return *this;
+  }
+
+  constexpr CheckedNumeric operator++(int) {
+    CheckedNumeric value = *this;
+    *this += 1;
+    return value;
+  }
+
+  constexpr CheckedNumeric& operator--() {
+    *this -= 1;
+    return *this;
+  }
+
+  constexpr CheckedNumeric operator--(int) {
+    CheckedNumeric value = *this;
+    *this -= 1;
+    return value;
+  }
+
+  // These perform the actual math operations on the CheckedNumerics.
+  // Binary arithmetic operations.
+  template <template <typename, typename, typename> class M,
+            typename L,
+            typename R>
+  static constexpr CheckedNumeric MathOp(const L lhs, const R rhs) {
+    using Math = typename MathWrapper<M, L, R>::math;
+    T result = 0;
+    bool is_valid =
+        Wrapper<L>::is_valid(lhs) && Wrapper<R>::is_valid(rhs) &&
+        Math::Do(Wrapper<L>::value(lhs), Wrapper<R>::value(rhs), &result);
+    return CheckedNumeric<T>(result, is_valid);
+  }
+
+  // Assignment arithmetic operations.
+  template <template <typename, typename, typename> class M, typename R>
+  constexpr CheckedNumeric& MathOp(const R rhs) {
+    using Math = typename MathWrapper<M, T, R>::math;
+    T result = 0;  // Using T as the destination saves a range check.
+    bool is_valid = state_.is_valid() && Wrapper<R>::is_valid(rhs) &&
+                    Math::Do(state_.value(), Wrapper<R>::value(rhs), &result);
+    *this = CheckedNumeric<T>(result, is_valid);
+    return *this;
+  }
+
+ private:
+  CheckedNumericState<T> state_;
+
+  CheckedNumeric FastRuntimeNegate() const {
+    T result;
+    bool success = CheckedSubOp<T, T>::Do(T(0), state_.value(), &result);
+    return CheckedNumeric<T>(result, IsValid() && success);
+  }
+
+  template <typename Src>
+  constexpr CheckedNumeric(Src value, bool is_valid)
+      : state_(value, is_valid) {}
+
+  // These wrappers allow us to handle state the same way for both
+  // CheckedNumeric and POD arithmetic types.
+  template <typename Src>
+  struct Wrapper {
+    static constexpr bool is_valid(Src) { return true; }
+    static constexpr Src value(Src value) { return value; }
+  };
+
+  template <typename Src>
+  struct Wrapper<CheckedNumeric<Src>> {
+    static constexpr bool is_valid(const CheckedNumeric<Src> v) {
+      return v.IsValid();
+    }
+    static constexpr Src value(const CheckedNumeric<Src> v) {
+      return v.state_.value();
+    }
+  };
+
+  template <typename Src>
+  struct Wrapper<StrictNumeric<Src>> {
+    static constexpr bool is_valid(const StrictNumeric<Src>) { return true; }
+    static constexpr Src value(const StrictNumeric<Src> v) {
+      return static_cast<Src>(v);
+    }
+  };
+};
+
+// Convenience functions to avoid the ugly template disambiguator syntax.
+template <typename Dst, typename Src>
+constexpr bool IsValidForType(const CheckedNumeric<Src> value) {
+  return value.template IsValid<Dst>();
+}
+
+template <typename Dst, typename Src>
+constexpr StrictNumeric<Dst> ValueOrDieForType(
+    const CheckedNumeric<Src> value) {
+  return value.template ValueOrDie<Dst>();
+}
+
+template <typename Dst, typename Src, typename Default>
+constexpr StrictNumeric<Dst> ValueOrDefaultForType(
+    const CheckedNumeric<Src> value,
+    const Default default_value) {
+  return value.template ValueOrDefault<Dst>(default_value);
+}
+
+// Convience wrapper to return a new CheckedNumeric from the provided arithmetic
+// or CheckedNumericType.
+template <typename T>
+constexpr CheckedNumeric<typename UnderlyingType<T>::type> MakeCheckedNum(
+    const T value) {
+  return value;
+}
+
+// These implement the variadic wrapper for the math operations.
+template <template <typename, typename, typename> class M,
+          typename L,
+          typename R>
+constexpr CheckedNumeric<typename MathWrapper<M, L, R>::type> CheckMathOp(
+    const L lhs,
+    const R rhs) {
+  using Math = typename MathWrapper<M, L, R>::math;
+  return CheckedNumeric<typename Math::result_type>::template MathOp<M>(lhs,
+                                                                        rhs);
+}
+
+// General purpose wrapper template for arithmetic operations.
+template <template <typename, typename, typename> class M,
+          typename L,
+          typename R,
+          typename... Args>
+constexpr CheckedNumeric<typename ResultType<M, L, R, Args...>::type>
+CheckMathOp(const L lhs, const R rhs, const Args... args) {
+  return CheckMathOp<M>(CheckMathOp<M>(lhs, rhs), args...);
+}
+
+BASE_NUMERIC_ARITHMETIC_OPERATORS(Checked, Check, Add, +, +=)
+BASE_NUMERIC_ARITHMETIC_OPERATORS(Checked, Check, Sub, -, -=)
+BASE_NUMERIC_ARITHMETIC_OPERATORS(Checked, Check, Mul, *, *=)
+BASE_NUMERIC_ARITHMETIC_OPERATORS(Checked, Check, Div, /, /=)
+BASE_NUMERIC_ARITHMETIC_OPERATORS(Checked, Check, Mod, %, %=)
+BASE_NUMERIC_ARITHMETIC_OPERATORS(Checked, Check, Lsh, <<, <<=)
+BASE_NUMERIC_ARITHMETIC_OPERATORS(Checked, Check, Rsh, >>, >>=)
+BASE_NUMERIC_ARITHMETIC_OPERATORS(Checked, Check, And, &, &=)
+BASE_NUMERIC_ARITHMETIC_OPERATORS(Checked, Check, Or, |, |=)
+BASE_NUMERIC_ARITHMETIC_OPERATORS(Checked, Check, Xor, ^, ^=)
+BASE_NUMERIC_ARITHMETIC_VARIADIC(Checked, Check, Max)
+BASE_NUMERIC_ARITHMETIC_VARIADIC(Checked, Check, Min)
+
+// These are some extra StrictNumeric operators to support simple pointer
+// arithmetic with our result types. Since wrapping on a pointer is always
+// bad, we trigger the CHECK condition here.
+template <typename L, typename R>
+L* operator+(L* lhs, const StrictNumeric<R> rhs) {
+  uintptr_t result = CheckAdd(reinterpret_cast<uintptr_t>(lhs),
+                              CheckMul(sizeof(L), static_cast<R>(rhs)))
+                         .template ValueOrDie<uintptr_t>();
+  return reinterpret_cast<L*>(result);
+}
+
+template <typename L, typename R>
+L* operator-(L* lhs, const StrictNumeric<R> rhs) {
+  uintptr_t result = CheckSub(reinterpret_cast<uintptr_t>(lhs),
+                              CheckMul(sizeof(L), static_cast<R>(rhs)))
+                         .template ValueOrDie<uintptr_t>();
+  return reinterpret_cast<L*>(result);
+}
+
+}  // namespace internal
+
+using internal::CheckedNumeric;
+using internal::IsValidForType;
+using internal::ValueOrDieForType;
+using internal::ValueOrDefaultForType;
+using internal::MakeCheckedNum;
+using internal::CheckMax;
+using internal::CheckMin;
+using internal::CheckAdd;
+using internal::CheckSub;
+using internal::CheckMul;
+using internal::CheckDiv;
+using internal::CheckMod;
+using internal::CheckLsh;
+using internal::CheckRsh;
+using internal::CheckAnd;
+using internal::CheckOr;
+using internal::CheckXor;
+
+}  // namespace base
+
+#endif  // BASE_NUMERICS_CHECKED_MATH_H_
diff --git a/base/numerics/checked_math_impl.h b/base/numerics/checked_math_impl.h
new file mode 100644
index 0000000..e083389
--- /dev/null
+++ b/base/numerics/checked_math_impl.h
@@ -0,0 +1,567 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_NUMERICS_CHECKED_MATH_IMPL_H_
+#define BASE_NUMERICS_CHECKED_MATH_IMPL_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <climits>
+#include <cmath>
+#include <cstdlib>
+#include <limits>
+#include <type_traits>
+
+#include "base/numerics/safe_conversions.h"
+#include "base/numerics/safe_math_shared_impl.h"
+
+namespace base {
+namespace internal {
+
+template <typename T>
+constexpr bool CheckedAddImpl(T x, T y, T* result) {
+  static_assert(std::is_integral<T>::value, "Type must be integral");
+  // Since the value of x+y is undefined if we have a signed type, we compute
+  // it using the unsigned type of the same size.
+  using UnsignedDst = typename std::make_unsigned<T>::type;
+  using SignedDst = typename std::make_signed<T>::type;
+  UnsignedDst ux = static_cast<UnsignedDst>(x);
+  UnsignedDst uy = static_cast<UnsignedDst>(y);
+  UnsignedDst uresult = static_cast<UnsignedDst>(ux + uy);
+  *result = static_cast<T>(uresult);
+  // Addition is valid if the sign of (x + y) is equal to either that of x or
+  // that of y.
+  return (std::is_signed<T>::value)
+             ? static_cast<SignedDst>((uresult ^ ux) & (uresult ^ uy)) >= 0
+             : uresult >= uy;  // Unsigned is either valid or underflow.
+}
+
+template <typename T, typename U, class Enable = void>
+struct CheckedAddOp {};
+
+template <typename T, typename U>
+struct CheckedAddOp<T,
+                    U,
+                    typename std::enable_if<std::is_integral<T>::value &&
+                                            std::is_integral<U>::value>::type> {
+  using result_type = typename MaxExponentPromotion<T, U>::type;
+  template <typename V>
+  static constexpr bool Do(T x, U y, V* result) {
+    // TODO(jschuh) Make this "constexpr if" once we're C++17.
+    if (CheckedAddFastOp<T, U>::is_supported)
+      return CheckedAddFastOp<T, U>::Do(x, y, result);
+
+    // Double the underlying type up to a full machine word.
+    using FastPromotion = typename FastIntegerArithmeticPromotion<T, U>::type;
+    using Promotion =
+        typename std::conditional<(IntegerBitsPlusSign<FastPromotion>::value >
+                                   IntegerBitsPlusSign<intptr_t>::value),
+                                  typename BigEnoughPromotion<T, U>::type,
+                                  FastPromotion>::type;
+    // Fail if either operand is out of range for the promoted type.
+    // TODO(jschuh): This could be made to work for a broader range of values.
+    if (BASE_NUMERICS_UNLIKELY(!IsValueInRangeForNumericType<Promotion>(x) ||
+                               !IsValueInRangeForNumericType<Promotion>(y))) {
+      return false;
+    }
+
+    Promotion presult = {};
+    bool is_valid = true;
+    if (IsIntegerArithmeticSafe<Promotion, T, U>::value) {
+      presult = static_cast<Promotion>(x) + static_cast<Promotion>(y);
+    } else {
+      is_valid = CheckedAddImpl(static_cast<Promotion>(x),
+                                static_cast<Promotion>(y), &presult);
+    }
+    *result = static_cast<V>(presult);
+    return is_valid && IsValueInRangeForNumericType<V>(presult);
+  }
+};
+
+template <typename T>
+constexpr bool CheckedSubImpl(T x, T y, T* result) {
+  static_assert(std::is_integral<T>::value, "Type must be integral");
+  // Since the value of x+y is undefined if we have a signed type, we compute
+  // it using the unsigned type of the same size.
+  using UnsignedDst = typename std::make_unsigned<T>::type;
+  using SignedDst = typename std::make_signed<T>::type;
+  UnsignedDst ux = static_cast<UnsignedDst>(x);
+  UnsignedDst uy = static_cast<UnsignedDst>(y);
+  UnsignedDst uresult = static_cast<UnsignedDst>(ux - uy);
+  *result = static_cast<T>(uresult);
+  // Subtraction is valid if either x and y have same sign, or (x-y) and x have
+  // the same sign.
+  return (std::is_signed<T>::value)
+             ? static_cast<SignedDst>((uresult ^ ux) & (ux ^ uy)) >= 0
+             : x >= y;
+}
+
+template <typename T, typename U, class Enable = void>
+struct CheckedSubOp {};
+
+template <typename T, typename U>
+struct CheckedSubOp<T,
+                    U,
+                    typename std::enable_if<std::is_integral<T>::value &&
+                                            std::is_integral<U>::value>::type> {
+  using result_type = typename MaxExponentPromotion<T, U>::type;
+  template <typename V>
+  static constexpr bool Do(T x, U y, V* result) {
+    // TODO(jschuh) Make this "constexpr if" once we're C++17.
+    if (CheckedSubFastOp<T, U>::is_supported)
+      return CheckedSubFastOp<T, U>::Do(x, y, result);
+
+    // Double the underlying type up to a full machine word.
+    using FastPromotion = typename FastIntegerArithmeticPromotion<T, U>::type;
+    using Promotion =
+        typename std::conditional<(IntegerBitsPlusSign<FastPromotion>::value >
+                                   IntegerBitsPlusSign<intptr_t>::value),
+                                  typename BigEnoughPromotion<T, U>::type,
+                                  FastPromotion>::type;
+    // Fail if either operand is out of range for the promoted type.
+    // TODO(jschuh): This could be made to work for a broader range of values.
+    if (BASE_NUMERICS_UNLIKELY(!IsValueInRangeForNumericType<Promotion>(x) ||
+                               !IsValueInRangeForNumericType<Promotion>(y))) {
+      return false;
+    }
+
+    Promotion presult = {};
+    bool is_valid = true;
+    if (IsIntegerArithmeticSafe<Promotion, T, U>::value) {
+      presult = static_cast<Promotion>(x) - static_cast<Promotion>(y);
+    } else {
+      is_valid = CheckedSubImpl(static_cast<Promotion>(x),
+                                static_cast<Promotion>(y), &presult);
+    }
+    *result = static_cast<V>(presult);
+    return is_valid && IsValueInRangeForNumericType<V>(presult);
+  }
+};
+
+template <typename T>
+constexpr bool CheckedMulImpl(T x, T y, T* result) {
+  static_assert(std::is_integral<T>::value, "Type must be integral");
+  // Since the value of x*y is potentially undefined if we have a signed type,
+  // we compute it using the unsigned type of the same size.
+  using UnsignedDst = typename std::make_unsigned<T>::type;
+  using SignedDst = typename std::make_signed<T>::type;
+  const UnsignedDst ux = SafeUnsignedAbs(x);
+  const UnsignedDst uy = SafeUnsignedAbs(y);
+  UnsignedDst uresult = static_cast<UnsignedDst>(ux * uy);
+  const bool is_negative =
+      std::is_signed<T>::value && static_cast<SignedDst>(x ^ y) < 0;
+  *result = is_negative ? 0 - uresult : uresult;
+  // We have a fast out for unsigned identity or zero on the second operand.
+  // After that it's an unsigned overflow check on the absolute value, with
+  // a +1 bound for a negative result.
+  return uy <= UnsignedDst(!std::is_signed<T>::value || is_negative) ||
+         ux <= (std::numeric_limits<T>::max() + UnsignedDst(is_negative)) / uy;
+}
+
+template <typename T, typename U, class Enable = void>
+struct CheckedMulOp {};
+
+template <typename T, typename U>
+struct CheckedMulOp<T,
+                    U,
+                    typename std::enable_if<std::is_integral<T>::value &&
+                                            std::is_integral<U>::value>::type> {
+  using result_type = typename MaxExponentPromotion<T, U>::type;
+  template <typename V>
+  static constexpr bool Do(T x, U y, V* result) {
+    // TODO(jschuh) Make this "constexpr if" once we're C++17.
+    if (CheckedMulFastOp<T, U>::is_supported)
+      return CheckedMulFastOp<T, U>::Do(x, y, result);
+
+    using Promotion = typename FastIntegerArithmeticPromotion<T, U>::type;
+    // Verify the destination type can hold the result (always true for 0).
+    if (BASE_NUMERICS_UNLIKELY((!IsValueInRangeForNumericType<Promotion>(x) ||
+                                !IsValueInRangeForNumericType<Promotion>(y)) &&
+                               x && y)) {
+      return false;
+    }
+
+    Promotion presult = {};
+    bool is_valid = true;
+    if (CheckedMulFastOp<Promotion, Promotion>::is_supported) {
+      // The fast op may be available with the promoted type.
+      is_valid = CheckedMulFastOp<Promotion, Promotion>::Do(x, y, &presult);
+    } else if (IsIntegerArithmeticSafe<Promotion, T, U>::value) {
+      presult = static_cast<Promotion>(x) * static_cast<Promotion>(y);
+    } else {
+      is_valid = CheckedMulImpl(static_cast<Promotion>(x),
+                                static_cast<Promotion>(y), &presult);
+    }
+    *result = static_cast<V>(presult);
+    return is_valid && IsValueInRangeForNumericType<V>(presult);
+  }
+};
+
+// Division just requires a check for a zero denominator or an invalid negation
+// on signed min/-1.
+template <typename T, typename U, class Enable = void>
+struct CheckedDivOp {};
+
+template <typename T, typename U>
+struct CheckedDivOp<T,
+                    U,
+                    typename std::enable_if<std::is_integral<T>::value &&
+                                            std::is_integral<U>::value>::type> {
+  using result_type = typename MaxExponentPromotion<T, U>::type;
+  template <typename V>
+  static constexpr bool Do(T x, U y, V* result) {
+    if (BASE_NUMERICS_UNLIKELY(!y))
+      return false;
+
+    // The overflow check can be compiled away if we don't have the exact
+    // combination of types needed to trigger this case.
+    using Promotion = typename BigEnoughPromotion<T, U>::type;
+    if (BASE_NUMERICS_UNLIKELY(
+            (std::is_signed<T>::value && std::is_signed<U>::value &&
+             IsTypeInRangeForNumericType<T, Promotion>::value &&
+             static_cast<Promotion>(x) ==
+                 std::numeric_limits<Promotion>::lowest() &&
+             y == static_cast<U>(-1)))) {
+      return false;
+    }
+
+    // This branch always compiles away if the above branch wasn't removed.
+    if (BASE_NUMERICS_UNLIKELY((!IsValueInRangeForNumericType<Promotion>(x) ||
+                                !IsValueInRangeForNumericType<Promotion>(y)) &&
+                               x)) {
+      return false;
+    }
+
+    Promotion presult = Promotion(x) / Promotion(y);
+    *result = static_cast<V>(presult);
+    return IsValueInRangeForNumericType<V>(presult);
+  }
+};
+
+template <typename T, typename U, class Enable = void>
+struct CheckedModOp {};
+
+template <typename T, typename U>
+struct CheckedModOp<T,
+                    U,
+                    typename std::enable_if<std::is_integral<T>::value &&
+                                            std::is_integral<U>::value>::type> {
+  using result_type = typename MaxExponentPromotion<T, U>::type;
+  template <typename V>
+  static constexpr bool Do(T x, U y, V* result) {
+    using Promotion = typename BigEnoughPromotion<T, U>::type;
+    if (BASE_NUMERICS_LIKELY(y)) {
+      Promotion presult = static_cast<Promotion>(x) % static_cast<Promotion>(y);
+      *result = static_cast<Promotion>(presult);
+      return IsValueInRangeForNumericType<V>(presult);
+    }
+    return false;
+  }
+};
+
+template <typename T, typename U, class Enable = void>
+struct CheckedLshOp {};
+
+// Left shift. Shifts less than 0 or greater than or equal to the number
+// of bits in the promoted type are undefined. Shifts of negative values
+// are undefined. Otherwise it is defined when the result fits.
+template <typename T, typename U>
+struct CheckedLshOp<T,
+                    U,
+                    typename std::enable_if<std::is_integral<T>::value &&
+                                            std::is_integral<U>::value>::type> {
+  using result_type = T;
+  template <typename V>
+  static constexpr bool Do(T x, U shift, V* result) {
+    // Disallow negative numbers and verify the shift is in bounds.
+    if (BASE_NUMERICS_LIKELY(!IsValueNegative(x) &&
+                             as_unsigned(shift) <
+                                 as_unsigned(std::numeric_limits<T>::digits))) {
+      // Shift as unsigned to avoid undefined behavior.
+      *result = static_cast<V>(as_unsigned(x) << shift);
+      // If the shift can be reversed, we know it was valid.
+      return *result >> shift == x;
+    }
+
+    // Handle the legal corner-case of a full-width signed shift of zero.
+    return std::is_signed<T>::value && !x &&
+           as_unsigned(shift) == as_unsigned(std::numeric_limits<T>::digits);
+  }
+};
+
+template <typename T, typename U, class Enable = void>
+struct CheckedRshOp {};
+
+// Right shift. Shifts less than 0 or greater than or equal to the number
+// of bits in the promoted type are undefined. Otherwise, it is always defined,
+// but a right shift of a negative value is implementation-dependent.
+template <typename T, typename U>
+struct CheckedRshOp<T,
+                    U,
+                    typename std::enable_if<std::is_integral<T>::value &&
+                                            std::is_integral<U>::value>::type> {
+  using result_type = T;
+  template <typename V>
+  static bool Do(T x, U shift, V* result) {
+    // Use the type conversion push negative values out of range.
+    if (BASE_NUMERICS_LIKELY(as_unsigned(shift) <
+                             IntegerBitsPlusSign<T>::value)) {
+      T tmp = x >> shift;
+      *result = static_cast<V>(tmp);
+      return IsValueInRangeForNumericType<V>(tmp);
+    }
+    return false;
+  }
+};
+
+template <typename T, typename U, class Enable = void>
+struct CheckedAndOp {};
+
+// For simplicity we support only unsigned integer results.
+template <typename T, typename U>
+struct CheckedAndOp<T,
+                    U,
+                    typename std::enable_if<std::is_integral<T>::value &&
+                                            std::is_integral<U>::value>::type> {
+  using result_type = typename std::make_unsigned<
+      typename MaxExponentPromotion<T, U>::type>::type;
+  template <typename V>
+  static constexpr bool Do(T x, U y, V* result) {
+    result_type tmp = static_cast<result_type>(x) & static_cast<result_type>(y);
+    *result = static_cast<V>(tmp);
+    return IsValueInRangeForNumericType<V>(tmp);
+  }
+};
+
+template <typename T, typename U, class Enable = void>
+struct CheckedOrOp {};
+
+// For simplicity we support only unsigned integers.
+template <typename T, typename U>
+struct CheckedOrOp<T,
+                   U,
+                   typename std::enable_if<std::is_integral<T>::value &&
+                                           std::is_integral<U>::value>::type> {
+  using result_type = typename std::make_unsigned<
+      typename MaxExponentPromotion<T, U>::type>::type;
+  template <typename V>
+  static constexpr bool Do(T x, U y, V* result) {
+    result_type tmp = static_cast<result_type>(x) | static_cast<result_type>(y);
+    *result = static_cast<V>(tmp);
+    return IsValueInRangeForNumericType<V>(tmp);
+  }
+};
+
+template <typename T, typename U, class Enable = void>
+struct CheckedXorOp {};
+
+// For simplicity we support only unsigned integers.
+template <typename T, typename U>
+struct CheckedXorOp<T,
+                    U,
+                    typename std::enable_if<std::is_integral<T>::value &&
+                                            std::is_integral<U>::value>::type> {
+  using result_type = typename std::make_unsigned<
+      typename MaxExponentPromotion<T, U>::type>::type;
+  template <typename V>
+  static constexpr bool Do(T x, U y, V* result) {
+    result_type tmp = static_cast<result_type>(x) ^ static_cast<result_type>(y);
+    *result = static_cast<V>(tmp);
+    return IsValueInRangeForNumericType<V>(tmp);
+  }
+};
+
+// Max doesn't really need to be implemented this way because it can't fail,
+// but it makes the code much cleaner to use the MathOp wrappers.
+template <typename T, typename U, class Enable = void>
+struct CheckedMaxOp {};
+
+template <typename T, typename U>
+struct CheckedMaxOp<
+    T,
+    U,
+    typename std::enable_if<std::is_arithmetic<T>::value &&
+                            std::is_arithmetic<U>::value>::type> {
+  using result_type = typename MaxExponentPromotion<T, U>::type;
+  template <typename V>
+  static constexpr bool Do(T x, U y, V* result) {
+    result_type tmp = IsGreater<T, U>::Test(x, y) ? static_cast<result_type>(x)
+                                                  : static_cast<result_type>(y);
+    *result = static_cast<V>(tmp);
+    return IsValueInRangeForNumericType<V>(tmp);
+  }
+};
+
+// Min doesn't really need to be implemented this way because it can't fail,
+// but it makes the code much cleaner to use the MathOp wrappers.
+template <typename T, typename U, class Enable = void>
+struct CheckedMinOp {};
+
+template <typename T, typename U>
+struct CheckedMinOp<
+    T,
+    U,
+    typename std::enable_if<std::is_arithmetic<T>::value &&
+                            std::is_arithmetic<U>::value>::type> {
+  using result_type = typename LowestValuePromotion<T, U>::type;
+  template <typename V>
+  static constexpr bool Do(T x, U y, V* result) {
+    result_type tmp = IsLess<T, U>::Test(x, y) ? static_cast<result_type>(x)
+                                               : static_cast<result_type>(y);
+    *result = static_cast<V>(tmp);
+    return IsValueInRangeForNumericType<V>(tmp);
+  }
+};
+
+// This is just boilerplate that wraps the standard floating point arithmetic.
+// A macro isn't the nicest solution, but it beats rewriting these repeatedly.
+#define BASE_FLOAT_ARITHMETIC_OPS(NAME, OP)                              \
+  template <typename T, typename U>                                      \
+  struct Checked##NAME##Op<                                              \
+      T, U,                                                              \
+      typename std::enable_if<std::is_floating_point<T>::value ||        \
+                              std::is_floating_point<U>::value>::type> { \
+    using result_type = typename MaxExponentPromotion<T, U>::type;       \
+    template <typename V>                                                \
+    static constexpr bool Do(T x, U y, V* result) {                      \
+      using Promotion = typename MaxExponentPromotion<T, U>::type;       \
+      Promotion presult = x OP y;                                        \
+      *result = static_cast<V>(presult);                                 \
+      return IsValueInRangeForNumericType<V>(presult);                   \
+    }                                                                    \
+  };
+
+BASE_FLOAT_ARITHMETIC_OPS(Add, +)
+BASE_FLOAT_ARITHMETIC_OPS(Sub, -)
+BASE_FLOAT_ARITHMETIC_OPS(Mul, *)
+BASE_FLOAT_ARITHMETIC_OPS(Div, /)
+
+#undef BASE_FLOAT_ARITHMETIC_OPS
+
+// Floats carry around their validity state with them, but integers do not. So,
+// we wrap the underlying value in a specialization in order to hide that detail
+// and expose an interface via accessors.
+enum NumericRepresentation {
+  NUMERIC_INTEGER,
+  NUMERIC_FLOATING,
+  NUMERIC_UNKNOWN
+};
+
+template <typename NumericType>
+struct GetNumericRepresentation {
+  static const NumericRepresentation value =
+      std::is_integral<NumericType>::value
+          ? NUMERIC_INTEGER
+          : (std::is_floating_point<NumericType>::value ? NUMERIC_FLOATING
+                                                        : NUMERIC_UNKNOWN);
+};
+
+template <typename T,
+          NumericRepresentation type = GetNumericRepresentation<T>::value>
+class CheckedNumericState {};
+
+// Integrals require quite a bit of additional housekeeping to manage state.
+template <typename T>
+class CheckedNumericState<T, NUMERIC_INTEGER> {
+ private:
+  // is_valid_ precedes value_ because member intializers in the constructors
+  // are evaluated in field order, and is_valid_ must be read when initializing
+  // value_.
+  bool is_valid_;
+  T value_;
+
+  // Ensures that a type conversion does not trigger undefined behavior.
+  template <typename Src>
+  static constexpr T WellDefinedConversionOrZero(const Src value,
+                                                 const bool is_valid) {
+    using SrcType = typename internal::UnderlyingType<Src>::type;
+    return (std::is_integral<SrcType>::value || is_valid)
+               ? static_cast<T>(value)
+               : static_cast<T>(0);
+  }
+
+ public:
+  template <typename Src, NumericRepresentation type>
+  friend class CheckedNumericState;
+
+  constexpr CheckedNumericState() : is_valid_(true), value_(0) {}
+
+  template <typename Src>
+  constexpr CheckedNumericState(Src value, bool is_valid)
+      : is_valid_(is_valid && IsValueInRangeForNumericType<T>(value)),
+        value_(WellDefinedConversionOrZero(value, is_valid_)) {
+    static_assert(std::is_arithmetic<Src>::value, "Argument must be numeric.");
+  }
+
+  // Copy constructor.
+  template <typename Src>
+  constexpr CheckedNumericState(const CheckedNumericState<Src>& rhs)
+      : is_valid_(rhs.IsValid()),
+        value_(WellDefinedConversionOrZero(rhs.value(), is_valid_)) {}
+
+  template <typename Src>
+  constexpr explicit CheckedNumericState(Src value)
+      : is_valid_(IsValueInRangeForNumericType<T>(value)),
+        value_(WellDefinedConversionOrZero(value, is_valid_)) {}
+
+  constexpr bool is_valid() const { return is_valid_; }
+  constexpr T value() const { return value_; }
+};
+
+// Floating points maintain their own validity, but need translation wrappers.
+template <typename T>
+class CheckedNumericState<T, NUMERIC_FLOATING> {
+ private:
+  T value_;
+
+  // Ensures that a type conversion does not trigger undefined behavior.
+  template <typename Src>
+  static constexpr T WellDefinedConversionOrNaN(const Src value,
+                                                const bool is_valid) {
+    using SrcType = typename internal::UnderlyingType<Src>::type;
+    return (StaticDstRangeRelationToSrcRange<T, SrcType>::value ==
+                NUMERIC_RANGE_CONTAINED ||
+            is_valid)
+               ? static_cast<T>(value)
+               : std::numeric_limits<T>::quiet_NaN();
+  }
+
+ public:
+  template <typename Src, NumericRepresentation type>
+  friend class CheckedNumericState;
+
+  constexpr CheckedNumericState() : value_(0.0) {}
+
+  template <typename Src>
+  constexpr CheckedNumericState(Src value, bool is_valid)
+      : value_(WellDefinedConversionOrNaN(value, is_valid)) {}
+
+  template <typename Src>
+  constexpr explicit CheckedNumericState(Src value)
+      : value_(WellDefinedConversionOrNaN(
+            value,
+            IsValueInRangeForNumericType<T>(value))) {}
+
+  // Copy constructor.
+  template <typename Src>
+  constexpr CheckedNumericState(const CheckedNumericState<Src>& rhs)
+      : value_(WellDefinedConversionOrNaN(
+            rhs.value(),
+            rhs.is_valid() && IsValueInRangeForNumericType<T>(rhs.value()))) {}
+
+  constexpr bool is_valid() const {
+    // Written this way because std::isfinite is not reliably constexpr.
+    return MustTreatAsConstexpr(value_)
+               ? value_ <= std::numeric_limits<T>::max() &&
+                     value_ >= std::numeric_limits<T>::lowest()
+               : std::isfinite(value_);
+  }
+  constexpr T value() const { return value_; }
+};
+
+}  // namespace internal
+}  // namespace base
+
+#endif  // BASE_NUMERICS_CHECKED_MATH_IMPL_H_
diff --git a/base/numerics/clamped_math.h b/base/numerics/clamped_math.h
new file mode 100644
index 0000000..b184363
--- /dev/null
+++ b/base/numerics/clamped_math.h
@@ -0,0 +1,262 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_NUMERICS_CLAMPED_MATH_H_
+#define BASE_NUMERICS_CLAMPED_MATH_H_
+
+#include <stddef.h>
+
+#include <limits>
+#include <type_traits>
+
+#include "base/numerics/clamped_math_impl.h"
+
+namespace base {
+namespace internal {
+
+template <typename T>
+class ClampedNumeric {
+  static_assert(std::is_arithmetic<T>::value,
+                "ClampedNumeric<T>: T must be a numeric type.");
+
+ public:
+  using type = T;
+
+  constexpr ClampedNumeric() : value_(0) {}
+
+  // Copy constructor.
+  template <typename Src>
+  constexpr ClampedNumeric(const ClampedNumeric<Src>& rhs)
+      : value_(saturated_cast<T>(rhs.value_)) {}
+
+  template <typename Src>
+  friend class ClampedNumeric;
+
+  // This is not an explicit constructor because we implicitly upgrade regular
+  // numerics to ClampedNumerics to make them easier to use.
+  template <typename Src>
+  constexpr ClampedNumeric(Src value)  // NOLINT(runtime/explicit)
+      : value_(saturated_cast<T>(value)) {
+    static_assert(std::is_arithmetic<Src>::value, "Argument must be numeric.");
+  }
+
+  // This is not an explicit constructor because we want a seamless conversion
+  // from StrictNumeric types.
+  template <typename Src>
+  constexpr ClampedNumeric(
+      StrictNumeric<Src> value)  // NOLINT(runtime/explicit)
+      : value_(saturated_cast<T>(static_cast<Src>(value))) {}
+
+  // Returns a ClampedNumeric of the specified type, cast from the current
+  // ClampedNumeric, and saturated to the destination type.
+  template <typename Dst>
+  constexpr ClampedNumeric<typename UnderlyingType<Dst>::type> Cast() const {
+    return *this;
+  }
+
+  // Prototypes for the supported arithmetic operator overloads.
+  template <typename Src>
+  constexpr ClampedNumeric& operator+=(const Src rhs);
+  template <typename Src>
+  constexpr ClampedNumeric& operator-=(const Src rhs);
+  template <typename Src>
+  constexpr ClampedNumeric& operator*=(const Src rhs);
+  template <typename Src>
+  constexpr ClampedNumeric& operator/=(const Src rhs);
+  template <typename Src>
+  constexpr ClampedNumeric& operator%=(const Src rhs);
+  template <typename Src>
+  constexpr ClampedNumeric& operator<<=(const Src rhs);
+  template <typename Src>
+  constexpr ClampedNumeric& operator>>=(const Src rhs);
+  template <typename Src>
+  constexpr ClampedNumeric& operator&=(const Src rhs);
+  template <typename Src>
+  constexpr ClampedNumeric& operator|=(const Src rhs);
+  template <typename Src>
+  constexpr ClampedNumeric& operator^=(const Src rhs);
+
+  constexpr ClampedNumeric operator-() const {
+    // The negation of two's complement int min is int min, so that's the
+    // only overflow case where we will saturate.
+    return ClampedNumeric<T>(SaturatedNegWrapper(value_));
+  }
+
+  constexpr ClampedNumeric operator~() const {
+    return ClampedNumeric<decltype(InvertWrapper(T()))>(InvertWrapper(value_));
+  }
+
+  constexpr ClampedNumeric Abs() const {
+    // The negation of two's complement int min is int min, so that's the
+    // only overflow case where we will saturate.
+    return ClampedNumeric<T>(SaturatedAbsWrapper(value_));
+  }
+
+  template <typename U>
+  constexpr ClampedNumeric<typename MathWrapper<ClampedMaxOp, T, U>::type> Max(
+      const U rhs) const {
+    using result_type = typename MathWrapper<ClampedMaxOp, T, U>::type;
+    return ClampedNumeric<result_type>(
+        ClampedMaxOp<T, U>::Do(value_, Wrapper<U>::value(rhs)));
+  }
+
+  template <typename U>
+  constexpr ClampedNumeric<typename MathWrapper<ClampedMinOp, T, U>::type> Min(
+      const U rhs) const {
+    using result_type = typename MathWrapper<ClampedMinOp, T, U>::type;
+    return ClampedNumeric<result_type>(
+        ClampedMinOp<T, U>::Do(value_, Wrapper<U>::value(rhs)));
+  }
+
+  // This function is available only for integral types. It returns an unsigned
+  // integer of the same width as the source type, containing the absolute value
+  // of the source, and properly handling signed min.
+  constexpr ClampedNumeric<typename UnsignedOrFloatForSize<T>::type>
+  UnsignedAbs() const {
+    return ClampedNumeric<typename UnsignedOrFloatForSize<T>::type>(
+        SafeUnsignedAbs(value_));
+  }
+
+  constexpr ClampedNumeric& operator++() {
+    *this += 1;
+    return *this;
+  }
+
+  constexpr ClampedNumeric operator++(int) {
+    ClampedNumeric value = *this;
+    *this += 1;
+    return value;
+  }
+
+  constexpr ClampedNumeric& operator--() {
+    *this -= 1;
+    return *this;
+  }
+
+  constexpr ClampedNumeric operator--(int) {
+    ClampedNumeric value = *this;
+    *this -= 1;
+    return value;
+  }
+
+  // These perform the actual math operations on the ClampedNumerics.
+  // Binary arithmetic operations.
+  template <template <typename, typename, typename> class M,
+            typename L,
+            typename R>
+  static constexpr ClampedNumeric MathOp(const L lhs, const R rhs) {
+    using Math = typename MathWrapper<M, L, R>::math;
+    return ClampedNumeric<T>(
+        Math::template Do<T>(Wrapper<L>::value(lhs), Wrapper<R>::value(rhs)));
+  }
+
+  // Assignment arithmetic operations.
+  template <template <typename, typename, typename> class M, typename R>
+  constexpr ClampedNumeric& MathOp(const R rhs) {
+    using Math = typename MathWrapper<M, T, R>::math;
+    *this =
+        ClampedNumeric<T>(Math::template Do<T>(value_, Wrapper<R>::value(rhs)));
+    return *this;
+  }
+
+  template <typename Dst>
+  constexpr operator Dst() const {
+    return saturated_cast<typename ArithmeticOrUnderlyingEnum<Dst>::type>(
+        value_);
+  }
+
+  // This method extracts the raw integer value without saturating it to the
+  // destination type as the conversion operator does. This is useful when
+  // e.g. assigning to an auto type or passing as a deduced template parameter.
+  constexpr T RawValue() const { return value_; }
+
+ private:
+  T value_;
+
+  // These wrappers allow us to handle state the same way for both
+  // ClampedNumeric and POD arithmetic types.
+  template <typename Src>
+  struct Wrapper {
+    static constexpr Src value(Src value) {
+      return static_cast<typename UnderlyingType<Src>::type>(value);
+    }
+  };
+};
+
+// Convience wrapper to return a new ClampedNumeric from the provided arithmetic
+// or ClampedNumericType.
+template <typename T>
+constexpr ClampedNumeric<typename UnderlyingType<T>::type> MakeClampedNum(
+    const T value) {
+  return value;
+}
+
+// Overload the ostream output operator to make logging work nicely.
+template <typename T>
+std::ostream& operator<<(std::ostream& os, const ClampedNumeric<T>& value) {
+  os << static_cast<T>(value);
+  return os;
+}
+
+// These implement the variadic wrapper for the math operations.
+template <template <typename, typename, typename> class M,
+          typename L,
+          typename R>
+constexpr ClampedNumeric<typename MathWrapper<M, L, R>::type> ClampMathOp(
+    const L lhs,
+    const R rhs) {
+  using Math = typename MathWrapper<M, L, R>::math;
+  return ClampedNumeric<typename Math::result_type>::template MathOp<M>(lhs,
+                                                                        rhs);
+}
+
+// General purpose wrapper template for arithmetic operations.
+template <template <typename, typename, typename> class M,
+          typename L,
+          typename R,
+          typename... Args>
+constexpr ClampedNumeric<typename ResultType<M, L, R, Args...>::type>
+ClampMathOp(const L lhs, const R rhs, const Args... args) {
+  return ClampMathOp<M>(ClampMathOp<M>(lhs, rhs), args...);
+}
+
+BASE_NUMERIC_ARITHMETIC_OPERATORS(Clamped, Clamp, Add, +, +=)
+BASE_NUMERIC_ARITHMETIC_OPERATORS(Clamped, Clamp, Sub, -, -=)
+BASE_NUMERIC_ARITHMETIC_OPERATORS(Clamped, Clamp, Mul, *, *=)
+BASE_NUMERIC_ARITHMETIC_OPERATORS(Clamped, Clamp, Div, /, /=)
+BASE_NUMERIC_ARITHMETIC_OPERATORS(Clamped, Clamp, Mod, %, %=)
+BASE_NUMERIC_ARITHMETIC_OPERATORS(Clamped, Clamp, Lsh, <<, <<=)
+BASE_NUMERIC_ARITHMETIC_OPERATORS(Clamped, Clamp, Rsh, >>, >>=)
+BASE_NUMERIC_ARITHMETIC_OPERATORS(Clamped, Clamp, And, &, &=)
+BASE_NUMERIC_ARITHMETIC_OPERATORS(Clamped, Clamp, Or, |, |=)
+BASE_NUMERIC_ARITHMETIC_OPERATORS(Clamped, Clamp, Xor, ^, ^=)
+BASE_NUMERIC_ARITHMETIC_VARIADIC(Clamped, Clamp, Max)
+BASE_NUMERIC_ARITHMETIC_VARIADIC(Clamped, Clamp, Min)
+BASE_NUMERIC_COMPARISON_OPERATORS(Clamped, IsLess, <);
+BASE_NUMERIC_COMPARISON_OPERATORS(Clamped, IsLessOrEqual, <=);
+BASE_NUMERIC_COMPARISON_OPERATORS(Clamped, IsGreater, >);
+BASE_NUMERIC_COMPARISON_OPERATORS(Clamped, IsGreaterOrEqual, >=);
+BASE_NUMERIC_COMPARISON_OPERATORS(Clamped, IsEqual, ==);
+BASE_NUMERIC_COMPARISON_OPERATORS(Clamped, IsNotEqual, !=);
+
+}  // namespace internal
+
+using internal::ClampedNumeric;
+using internal::MakeClampedNum;
+using internal::ClampMax;
+using internal::ClampMin;
+using internal::ClampAdd;
+using internal::ClampSub;
+using internal::ClampMul;
+using internal::ClampDiv;
+using internal::ClampMod;
+using internal::ClampLsh;
+using internal::ClampRsh;
+using internal::ClampAnd;
+using internal::ClampOr;
+using internal::ClampXor;
+
+}  // namespace base
+
+#endif  // BASE_NUMERICS_CLAMPED_MATH_H_
diff --git a/base/numerics/clamped_math_impl.h b/base/numerics/clamped_math_impl.h
new file mode 100644
index 0000000..303a7e9
--- /dev/null
+++ b/base/numerics/clamped_math_impl.h
@@ -0,0 +1,341 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_NUMERICS_CLAMPED_MATH_IMPL_H_
+#define BASE_NUMERICS_CLAMPED_MATH_IMPL_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <climits>
+#include <cmath>
+#include <cstdlib>
+#include <limits>
+#include <type_traits>
+
+#include "base/numerics/checked_math.h"
+#include "base/numerics/safe_conversions.h"
+#include "base/numerics/safe_math_shared_impl.h"
+
+namespace base {
+namespace internal {
+
+template <typename T,
+          typename std::enable_if<std::is_integral<T>::value &&
+                                  std::is_signed<T>::value>::type* = nullptr>
+constexpr T SaturatedNegWrapper(T value) {
+  return MustTreatAsConstexpr(value) || !ClampedNegFastOp<T>::is_supported
+             ? (NegateWrapper(value) != std::numeric_limits<T>::lowest()
+                    ? NegateWrapper(value)
+                    : std::numeric_limits<T>::max())
+             : ClampedNegFastOp<T>::Do(value);
+}
+
+template <typename T,
+          typename std::enable_if<std::is_integral<T>::value &&
+                                  !std::is_signed<T>::value>::type* = nullptr>
+constexpr T SaturatedNegWrapper(T value) {
+  return T(0);
+}
+
+template <
+    typename T,
+    typename std::enable_if<std::is_floating_point<T>::value>::type* = nullptr>
+constexpr T SaturatedNegWrapper(T value) {
+  return -value;
+}
+
+template <typename T,
+          typename std::enable_if<std::is_integral<T>::value>::type* = nullptr>
+constexpr T SaturatedAbsWrapper(T value) {
+  // The calculation below is a static identity for unsigned types, but for
+  // signed integer types it provides a non-branching, saturated absolute value.
+  // This works because SafeUnsignedAbs() returns an unsigned type, which can
+  // represent the absolute value of all negative numbers of an equal-width
+  // integer type. The call to IsValueNegative() then detects overflow in the
+  // special case of numeric_limits<T>::min(), by evaluating the bit pattern as
+  // a signed integer value. If it is the overflow case, we end up subtracting
+  // one from the unsigned result, thus saturating to numeric_limits<T>::max().
+  return static_cast<T>(SafeUnsignedAbs(value) -
+                        IsValueNegative<T>(SafeUnsignedAbs(value)));
+}
+
+template <
+    typename T,
+    typename std::enable_if<std::is_floating_point<T>::value>::type* = nullptr>
+constexpr T SaturatedAbsWrapper(T value) {
+  return value < 0 ? -value : value;
+}
+
+template <typename T, typename U, class Enable = void>
+struct ClampedAddOp {};
+
+template <typename T, typename U>
+struct ClampedAddOp<T,
+                    U,
+                    typename std::enable_if<std::is_integral<T>::value &&
+                                            std::is_integral<U>::value>::type> {
+  using result_type = typename MaxExponentPromotion<T, U>::type;
+  template <typename V = result_type>
+  static constexpr V Do(T x, U y) {
+    if (ClampedAddFastOp<T, U>::is_supported)
+      return ClampedAddFastOp<T, U>::template Do<V>(x, y);
+
+    static_assert(std::is_same<V, result_type>::value ||
+                      IsTypeInRangeForNumericType<U, V>::value,
+                  "The saturation result cannot be determined from the "
+                  "provided types.");
+    const V saturated = CommonMaxOrMin<V>(IsValueNegative(y));
+    V result = {};
+    return BASE_NUMERICS_LIKELY((CheckedAddOp<T, U>::Do(x, y, &result)))
+               ? result
+               : saturated;
+  }
+};
+
+template <typename T, typename U, class Enable = void>
+struct ClampedSubOp {};
+
+template <typename T, typename U>
+struct ClampedSubOp<T,
+                    U,
+                    typename std::enable_if<std::is_integral<T>::value &&
+                                            std::is_integral<U>::value>::type> {
+  using result_type = typename MaxExponentPromotion<T, U>::type;
+  template <typename V = result_type>
+  static constexpr V Do(T x, U y) {
+    // TODO(jschuh) Make this "constexpr if" once we're C++17.
+    if (ClampedSubFastOp<T, U>::is_supported)
+      return ClampedSubFastOp<T, U>::template Do<V>(x, y);
+
+    static_assert(std::is_same<V, result_type>::value ||
+                      IsTypeInRangeForNumericType<U, V>::value,
+                  "The saturation result cannot be determined from the "
+                  "provided types.");
+    const V saturated = CommonMaxOrMin<V>(!IsValueNegative(y));
+    V result = {};
+    return BASE_NUMERICS_LIKELY((CheckedSubOp<T, U>::Do(x, y, &result)))
+               ? result
+               : saturated;
+  }
+};
+
+template <typename T, typename U, class Enable = void>
+struct ClampedMulOp {};
+
+template <typename T, typename U>
+struct ClampedMulOp<T,
+                    U,
+                    typename std::enable_if<std::is_integral<T>::value &&
+                                            std::is_integral<U>::value>::type> {
+  using result_type = typename MaxExponentPromotion<T, U>::type;
+  template <typename V = result_type>
+  static constexpr V Do(T x, U y) {
+    // TODO(jschuh) Make this "constexpr if" once we're C++17.
+    if (ClampedMulFastOp<T, U>::is_supported)
+      return ClampedMulFastOp<T, U>::template Do<V>(x, y);
+
+    V result = {};
+    const V saturated =
+        CommonMaxOrMin<V>(IsValueNegative(x) ^ IsValueNegative(y));
+    return BASE_NUMERICS_LIKELY((CheckedMulOp<T, U>::Do(x, y, &result)))
+               ? result
+               : saturated;
+  }
+};
+
+template <typename T, typename U, class Enable = void>
+struct ClampedDivOp {};
+
+template <typename T, typename U>
+struct ClampedDivOp<T,
+                    U,
+                    typename std::enable_if<std::is_integral<T>::value &&
+                                            std::is_integral<U>::value>::type> {
+  using result_type = typename MaxExponentPromotion<T, U>::type;
+  template <typename V = result_type>
+  static constexpr V Do(T x, U y) {
+    V result = {};
+    if (BASE_NUMERICS_LIKELY((CheckedDivOp<T, U>::Do(x, y, &result))))
+      return result;
+    // Saturation goes to max, min, or NaN (if x is zero).
+    return x ? CommonMaxOrMin<V>(IsValueNegative(x) ^ IsValueNegative(y))
+             : SaturationDefaultLimits<V>::NaN();
+  }
+};
+
+template <typename T, typename U, class Enable = void>
+struct ClampedModOp {};
+
+template <typename T, typename U>
+struct ClampedModOp<T,
+                    U,
+                    typename std::enable_if<std::is_integral<T>::value &&
+                                            std::is_integral<U>::value>::type> {
+  using result_type = typename MaxExponentPromotion<T, U>::type;
+  template <typename V = result_type>
+  static constexpr V Do(T x, U y) {
+    V result = {};
+    return BASE_NUMERICS_LIKELY((CheckedModOp<T, U>::Do(x, y, &result)))
+               ? result
+               : x;
+  }
+};
+
+template <typename T, typename U, class Enable = void>
+struct ClampedLshOp {};
+
+// Left shift. Non-zero values saturate in the direction of the sign. A zero
+// shifted by any value always results in zero.
+template <typename T, typename U>
+struct ClampedLshOp<T,
+                    U,
+                    typename std::enable_if<std::is_integral<T>::value &&
+                                            std::is_integral<U>::value>::type> {
+  using result_type = T;
+  template <typename V = result_type>
+  static constexpr V Do(T x, U shift) {
+    static_assert(!std::is_signed<U>::value, "Shift value must be unsigned.");
+    if (BASE_NUMERICS_LIKELY(shift < std::numeric_limits<T>::digits)) {
+      // Shift as unsigned to avoid undefined behavior.
+      V result = static_cast<V>(as_unsigned(x) << shift);
+      // If the shift can be reversed, we know it was valid.
+      if (BASE_NUMERICS_LIKELY(result >> shift == x))
+        return result;
+    }
+    return x ? CommonMaxOrMin<V>(IsValueNegative(x)) : 0;
+  }
+};
+
+template <typename T, typename U, class Enable = void>
+struct ClampedRshOp {};
+
+// Right shift. Negative values saturate to -1. Positive or 0 saturates to 0.
+template <typename T, typename U>
+struct ClampedRshOp<T,
+                    U,
+                    typename std::enable_if<std::is_integral<T>::value &&
+                                            std::is_integral<U>::value>::type> {
+  using result_type = T;
+  template <typename V = result_type>
+  static constexpr V Do(T x, U shift) {
+    static_assert(!std::is_signed<U>::value, "Shift value must be unsigned.");
+    // Signed right shift is odd, because it saturates to -1 or 0.
+    const V saturated = as_unsigned(V(0)) - IsValueNegative(x);
+    return BASE_NUMERICS_LIKELY(shift < IntegerBitsPlusSign<T>::value)
+               ? saturated_cast<V>(x >> shift)
+               : saturated;
+  }
+};
+
+template <typename T, typename U, class Enable = void>
+struct ClampedAndOp {};
+
+template <typename T, typename U>
+struct ClampedAndOp<T,
+                    U,
+                    typename std::enable_if<std::is_integral<T>::value &&
+                                            std::is_integral<U>::value>::type> {
+  using result_type = typename std::make_unsigned<
+      typename MaxExponentPromotion<T, U>::type>::type;
+  template <typename V>
+  static constexpr V Do(T x, U y) {
+    return static_cast<result_type>(x) & static_cast<result_type>(y);
+  }
+};
+
+template <typename T, typename U, class Enable = void>
+struct ClampedOrOp {};
+
+// For simplicity we promote to unsigned integers.
+template <typename T, typename U>
+struct ClampedOrOp<T,
+                   U,
+                   typename std::enable_if<std::is_integral<T>::value &&
+                                           std::is_integral<U>::value>::type> {
+  using result_type = typename std::make_unsigned<
+      typename MaxExponentPromotion<T, U>::type>::type;
+  template <typename V>
+  static constexpr V Do(T x, U y) {
+    return static_cast<result_type>(x) | static_cast<result_type>(y);
+  }
+};
+
+template <typename T, typename U, class Enable = void>
+struct ClampedXorOp {};
+
+// For simplicity we support only unsigned integers.
+template <typename T, typename U>
+struct ClampedXorOp<T,
+                    U,
+                    typename std::enable_if<std::is_integral<T>::value &&
+                                            std::is_integral<U>::value>::type> {
+  using result_type = typename std::make_unsigned<
+      typename MaxExponentPromotion<T, U>::type>::type;
+  template <typename V>
+  static constexpr V Do(T x, U y) {
+    return static_cast<result_type>(x) ^ static_cast<result_type>(y);
+  }
+};
+
+template <typename T, typename U, class Enable = void>
+struct ClampedMaxOp {};
+
+template <typename T, typename U>
+struct ClampedMaxOp<
+    T,
+    U,
+    typename std::enable_if<std::is_arithmetic<T>::value &&
+                            std::is_arithmetic<U>::value>::type> {
+  using result_type = typename MaxExponentPromotion<T, U>::type;
+  template <typename V = result_type>
+  static constexpr V Do(T x, U y) {
+    return IsGreater<T, U>::Test(x, y) ? saturated_cast<V>(x)
+                                       : saturated_cast<V>(y);
+  }
+};
+
+template <typename T, typename U, class Enable = void>
+struct ClampedMinOp {};
+
+template <typename T, typename U>
+struct ClampedMinOp<
+    T,
+    U,
+    typename std::enable_if<std::is_arithmetic<T>::value &&
+                            std::is_arithmetic<U>::value>::type> {
+  using result_type = typename LowestValuePromotion<T, U>::type;
+  template <typename V = result_type>
+  static constexpr V Do(T x, U y) {
+    return IsLess<T, U>::Test(x, y) ? saturated_cast<V>(x)
+                                    : saturated_cast<V>(y);
+  }
+};
+
+// This is just boilerplate that wraps the standard floating point arithmetic.
+// A macro isn't the nicest solution, but it beats rewriting these repeatedly.
+#define BASE_FLOAT_ARITHMETIC_OPS(NAME, OP)                              \
+  template <typename T, typename U>                                      \
+  struct Clamped##NAME##Op<                                              \
+      T, U,                                                              \
+      typename std::enable_if<std::is_floating_point<T>::value ||        \
+                              std::is_floating_point<U>::value>::type> { \
+    using result_type = typename MaxExponentPromotion<T, U>::type;       \
+    template <typename V = result_type>                                  \
+    static constexpr V Do(T x, U y) {                                    \
+      return saturated_cast<V>(x OP y);                                  \
+    }                                                                    \
+  };
+
+BASE_FLOAT_ARITHMETIC_OPS(Add, +)
+BASE_FLOAT_ARITHMETIC_OPS(Sub, -)
+BASE_FLOAT_ARITHMETIC_OPS(Mul, *)
+BASE_FLOAT_ARITHMETIC_OPS(Div, /)
+
+#undef BASE_FLOAT_ARITHMETIC_OPS
+
+}  // namespace internal
+}  // namespace base
+
+#endif  // BASE_NUMERICS_CLAMPED_MATH_IMPL_H_
diff --git a/base/numerics/math_constants.h b/base/numerics/math_constants.h
new file mode 100644
index 0000000..9a5b8ef
--- /dev/null
+++ b/base/numerics/math_constants.h
@@ -0,0 +1,15 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_NUMERICS_MATH_CONSTANTS_H_
+#define BASE_NUMERICS_MATH_CONSTANTS_H_
+
+namespace base {
+
+constexpr double kPiDouble = 3.14159265358979323846;
+constexpr float kPiFloat = 3.14159265358979323846f;
+
+}  // namespace base
+
+#endif  // BASE_NUMERICS_MATH_CONSTANTS_H_
diff --git a/base/numerics/ranges.h b/base/numerics/ranges.h
new file mode 100644
index 0000000..f19320c
--- /dev/null
+++ b/base/numerics/ranges.h
@@ -0,0 +1,27 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_NUMERICS_RANGES_H_
+#define BASE_NUMERICS_RANGES_H_
+
+#include <algorithm>
+#include <cmath>
+
+namespace base {
+
+// To be replaced with std::clamp() from C++17, someday.
+template <class T>
+constexpr const T& ClampToRange(const T& value, const T& min, const T& max) {
+  return std::min(std::max(value, min), max);
+}
+
+template <typename T>
+constexpr bool IsApproximatelyEqual(T lhs, T rhs, T tolerance) {
+  static_assert(std::is_arithmetic<T>::value, "Argument must be arithmetic");
+  return std::abs(rhs - lhs) <= tolerance;
+}
+
+}  // namespace base
+
+#endif  // BASE_NUMERICS_RANGES_H_
diff --git a/base/numerics/safe_conversions.h b/base/numerics/safe_conversions.h
new file mode 100644
index 0000000..9284f8f
--- /dev/null
+++ b/base/numerics/safe_conversions.h
@@ -0,0 +1,344 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_NUMERICS_SAFE_CONVERSIONS_H_
+#define BASE_NUMERICS_SAFE_CONVERSIONS_H_
+
+#include <stddef.h>
+
+#include <limits>
+#include <ostream>
+#include <type_traits>
+
+#include "base/numerics/safe_conversions_impl.h"
+
+#if !defined(__native_client__) && (defined(__ARMEL__) || defined(__arch64__))
+#include "base/numerics/safe_conversions_arm_impl.h"
+#define BASE_HAS_OPTIMIZED_SAFE_CONVERSIONS (1)
+#else
+#define BASE_HAS_OPTIMIZED_SAFE_CONVERSIONS (0)
+#endif
+
+namespace base {
+namespace internal {
+
+#if !BASE_HAS_OPTIMIZED_SAFE_CONVERSIONS
+template <typename Dst, typename Src>
+struct SaturateFastAsmOp {
+  static const bool is_supported = false;
+  static constexpr Dst Do(Src) {
+    // Force a compile failure if instantiated.
+    return CheckOnFailure::template HandleFailure<Dst>();
+  }
+};
+#endif  // BASE_HAS_OPTIMIZED_SAFE_CONVERSIONS
+#undef BASE_HAS_OPTIMIZED_SAFE_CONVERSIONS
+
+// The following special case a few specific integer conversions where we can
+// eke out better performance than range checking.
+template <typename Dst, typename Src, typename Enable = void>
+struct IsValueInRangeFastOp {
+  static const bool is_supported = false;
+  static constexpr bool Do(Src value) {
+    // Force a compile failure if instantiated.
+    return CheckOnFailure::template HandleFailure<bool>();
+  }
+};
+
+// Signed to signed range comparison.
+template <typename Dst, typename Src>
+struct IsValueInRangeFastOp<
+    Dst,
+    Src,
+    typename std::enable_if<
+        std::is_integral<Dst>::value && std::is_integral<Src>::value &&
+        std::is_signed<Dst>::value && std::is_signed<Src>::value &&
+        !IsTypeInRangeForNumericType<Dst, Src>::value>::type> {
+  static const bool is_supported = true;
+
+  static constexpr bool Do(Src value) {
+    // Just downcast to the smaller type, sign extend it back to the original
+    // type, and then see if it matches the original value.
+    return value == static_cast<Dst>(value);
+  }
+};
+
+// Signed to unsigned range comparison.
+template <typename Dst, typename Src>
+struct IsValueInRangeFastOp<
+    Dst,
+    Src,
+    typename std::enable_if<
+        std::is_integral<Dst>::value && std::is_integral<Src>::value &&
+        !std::is_signed<Dst>::value && std::is_signed<Src>::value &&
+        !IsTypeInRangeForNumericType<Dst, Src>::value>::type> {
+  static const bool is_supported = true;
+
+  static constexpr bool Do(Src value) {
+    // We cast a signed as unsigned to overflow negative values to the top,
+    // then compare against whichever maximum is smaller, as our upper bound.
+    return as_unsigned(value) <= as_unsigned(CommonMax<Src, Dst>());
+  }
+};
+
+// Convenience function that returns true if the supplied value is in range
+// for the destination type.
+template <typename Dst, typename Src>
+constexpr bool IsValueInRangeForNumericType(Src value) {
+  using SrcType = typename internal::UnderlyingType<Src>::type;
+  return internal::IsValueInRangeFastOp<Dst, SrcType>::is_supported
+             ? internal::IsValueInRangeFastOp<Dst, SrcType>::Do(
+                   static_cast<SrcType>(value))
+             : internal::DstRangeRelationToSrcRange<Dst>(
+                   static_cast<SrcType>(value))
+                   .IsValid();
+}
+
+// checked_cast<> is analogous to static_cast<> for numeric types,
+// except that it CHECKs that the specified numeric conversion will not
+// overflow or underflow. NaN source will always trigger a CHECK.
+template <typename Dst,
+          class CheckHandler = internal::CheckOnFailure,
+          typename Src>
+constexpr Dst checked_cast(Src value) {
+  // This throws a compile-time error on evaluating the constexpr if it can be
+  // determined at compile-time as failing, otherwise it will CHECK at runtime.
+  using SrcType = typename internal::UnderlyingType<Src>::type;
+  return BASE_NUMERICS_LIKELY((IsValueInRangeForNumericType<Dst>(value)))
+             ? static_cast<Dst>(static_cast<SrcType>(value))
+             : CheckHandler::template HandleFailure<Dst>();
+}
+
+// Default boundaries for integral/float: max/infinity, lowest/-infinity, 0/NaN.
+// You may provide your own limits (e.g. to saturated_cast) so long as you
+// implement all of the static constexpr member functions in the class below.
+template <typename T>
+struct SaturationDefaultLimits : public std::numeric_limits<T> {
+  static constexpr T NaN() {
+    return std::numeric_limits<T>::has_quiet_NaN
+               ? std::numeric_limits<T>::quiet_NaN()
+               : T();
+  }
+  using std::numeric_limits<T>::max;
+  static constexpr T Overflow() {
+    return std::numeric_limits<T>::has_infinity
+               ? std::numeric_limits<T>::infinity()
+               : std::numeric_limits<T>::max();
+  }
+  using std::numeric_limits<T>::lowest;
+  static constexpr T Underflow() {
+    return std::numeric_limits<T>::has_infinity
+               ? std::numeric_limits<T>::infinity() * -1
+               : std::numeric_limits<T>::lowest();
+  }
+};
+
+template <typename Dst, template <typename> class S, typename Src>
+constexpr Dst saturated_cast_impl(Src value, RangeCheck constraint) {
+  // For some reason clang generates much better code when the branch is
+  // structured exactly this way, rather than a sequence of checks.
+  return !constraint.IsOverflowFlagSet()
+             ? (!constraint.IsUnderflowFlagSet() ? static_cast<Dst>(value)
+                                                 : S<Dst>::Underflow())
+             // Skip this check for integral Src, which cannot be NaN.
+             : (std::is_integral<Src>::value || !constraint.IsUnderflowFlagSet()
+                    ? S<Dst>::Overflow()
+                    : S<Dst>::NaN());
+}
+
+// We can reduce the number of conditions and get slightly better performance
+// for normal signed and unsigned integer ranges. And in the specific case of
+// Arm, we can use the optimized saturation instructions.
+template <typename Dst, typename Src, typename Enable = void>
+struct SaturateFastOp {
+  static const bool is_supported = false;
+  static constexpr Dst Do(Src value) {
+    // Force a compile failure if instantiated.
+    return CheckOnFailure::template HandleFailure<Dst>();
+  }
+};
+
+template <typename Dst, typename Src>
+struct SaturateFastOp<
+    Dst,
+    Src,
+    typename std::enable_if<std::is_integral<Src>::value &&
+                            std::is_integral<Dst>::value>::type> {
+  static const bool is_supported = true;
+  static Dst Do(Src value) {
+    if (SaturateFastAsmOp<Dst, Src>::is_supported)
+      return SaturateFastAsmOp<Dst, Src>::Do(value);
+
+    // The exact order of the following is structured to hit the correct
+    // optimization heuristics across compilers. Do not change without
+    // checking the emitted code.
+    Dst saturated = CommonMaxOrMin<Dst, Src>(
+        IsMaxInRangeForNumericType<Dst, Src>() ||
+        (!IsMinInRangeForNumericType<Dst, Src>() && IsValueNegative(value)));
+    return BASE_NUMERICS_LIKELY(IsValueInRangeForNumericType<Dst>(value))
+               ? static_cast<Dst>(value)
+               : saturated;
+  }
+};
+
+// saturated_cast<> is analogous to static_cast<> for numeric types, except
+// that the specified numeric conversion will saturate by default rather than
+// overflow or underflow, and NaN assignment to an integral will return 0.
+// All boundary condition behaviors can be overriden with a custom handler.
+template <typename Dst,
+          template <typename> class SaturationHandler = SaturationDefaultLimits,
+          typename Src>
+constexpr Dst saturated_cast(Src value) {
+  using SrcType = typename UnderlyingType<Src>::type;
+  return !IsCompileTimeConstant(value) &&
+                 SaturateFastOp<Dst, SrcType>::is_supported &&
+                 std::is_same<SaturationHandler<Dst>,
+                              SaturationDefaultLimits<Dst>>::value
+             ? SaturateFastOp<Dst, SrcType>::Do(static_cast<SrcType>(value))
+             : saturated_cast_impl<Dst, SaturationHandler, SrcType>(
+                   static_cast<SrcType>(value),
+                   DstRangeRelationToSrcRange<Dst, SaturationHandler, SrcType>(
+                       static_cast<SrcType>(value)));
+}
+
+// strict_cast<> is analogous to static_cast<> for numeric types, except that
+// it will cause a compile failure if the destination type is not large enough
+// to contain any value in the source type. It performs no runtime checking.
+template <typename Dst, typename Src>
+constexpr Dst strict_cast(Src value) {
+  using SrcType = typename UnderlyingType<Src>::type;
+  static_assert(UnderlyingType<Src>::is_numeric, "Argument must be numeric.");
+  static_assert(std::is_arithmetic<Dst>::value, "Result must be numeric.");
+
+  // If you got here from a compiler error, it's because you tried to assign
+  // from a source type to a destination type that has insufficient range.
+  // The solution may be to change the destination type you're assigning to,
+  // and use one large enough to represent the source.
+  // Alternatively, you may be better served with the checked_cast<> or
+  // saturated_cast<> template functions for your particular use case.
+  static_assert(StaticDstRangeRelationToSrcRange<Dst, SrcType>::value ==
+                    NUMERIC_RANGE_CONTAINED,
+                "The source type is out of range for the destination type. "
+                "Please see strict_cast<> comments for more information.");
+
+  return static_cast<Dst>(static_cast<SrcType>(value));
+}
+
+// Some wrappers to statically check that a type is in range.
+template <typename Dst, typename Src, class Enable = void>
+struct IsNumericRangeContained {
+  static const bool value = false;
+};
+
+template <typename Dst, typename Src>
+struct IsNumericRangeContained<
+    Dst,
+    Src,
+    typename std::enable_if<ArithmeticOrUnderlyingEnum<Dst>::value &&
+                            ArithmeticOrUnderlyingEnum<Src>::value>::type> {
+  static const bool value = StaticDstRangeRelationToSrcRange<Dst, Src>::value ==
+                            NUMERIC_RANGE_CONTAINED;
+};
+
+// StrictNumeric implements compile time range checking between numeric types by
+// wrapping assignment operations in a strict_cast. This class is intended to be
+// used for function arguments and return types, to ensure the destination type
+// can always contain the source type. This is essentially the same as enforcing
+// -Wconversion in gcc and C4302 warnings on MSVC, but it can be applied
+// incrementally at API boundaries, making it easier to convert code so that it
+// compiles cleanly with truncation warnings enabled.
+// This template should introduce no runtime overhead, but it also provides no
+// runtime checking of any of the associated mathematical operations. Use
+// CheckedNumeric for runtime range checks of the actual value being assigned.
+template <typename T>
+class StrictNumeric {
+ public:
+  using type = T;
+
+  constexpr StrictNumeric() : value_(0) {}
+
+  // Copy constructor.
+  template <typename Src>
+  constexpr StrictNumeric(const StrictNumeric<Src>& rhs)
+      : value_(strict_cast<T>(rhs.value_)) {}
+
+  // This is not an explicit constructor because we implicitly upgrade regular
+  // numerics to StrictNumerics to make them easier to use.
+  template <typename Src>
+  constexpr StrictNumeric(Src value)  // NOLINT(runtime/explicit)
+      : value_(strict_cast<T>(value)) {}
+
+  // If you got here from a compiler error, it's because you tried to assign
+  // from a source type to a destination type that has insufficient range.
+  // The solution may be to change the destination type you're assigning to,
+  // and use one large enough to represent the source.
+  // If you're assigning from a CheckedNumeric<> class, you may be able to use
+  // the AssignIfValid() member function, specify a narrower destination type to
+  // the member value functions (e.g. val.template ValueOrDie<Dst>()), use one
+  // of the value helper functions (e.g. ValueOrDieForType<Dst>(val)).
+  // If you've encountered an _ambiguous overload_ you can use a static_cast<>
+  // to explicitly cast the result to the destination type.
+  // If none of that works, you may be better served with the checked_cast<> or
+  // saturated_cast<> template functions for your particular use case.
+  template <typename Dst,
+            typename std::enable_if<
+                IsNumericRangeContained<Dst, T>::value>::type* = nullptr>
+  constexpr operator Dst() const {
+    return static_cast<typename ArithmeticOrUnderlyingEnum<Dst>::type>(value_);
+  }
+
+ private:
+  const T value_;
+};
+
+// Convience wrapper returns a StrictNumeric from the provided arithmetic type.
+template <typename T>
+constexpr StrictNumeric<typename UnderlyingType<T>::type> MakeStrictNum(
+    const T value) {
+  return value;
+}
+
+// Overload the ostream output operator to make logging work nicely.
+template <typename T>
+std::ostream& operator<<(std::ostream& os, const StrictNumeric<T>& value) {
+  os << static_cast<T>(value);
+  return os;
+}
+
+#define BASE_NUMERIC_COMPARISON_OPERATORS(CLASS, NAME, OP)              \
+  template <typename L, typename R,                                     \
+            typename std::enable_if<                                    \
+                internal::Is##CLASS##Op<L, R>::value>::type* = nullptr> \
+  constexpr bool operator OP(const L lhs, const R rhs) {                \
+    return SafeCompare<NAME, typename UnderlyingType<L>::type,          \
+                       typename UnderlyingType<R>::type>(lhs, rhs);     \
+  }
+
+BASE_NUMERIC_COMPARISON_OPERATORS(Strict, IsLess, <);
+BASE_NUMERIC_COMPARISON_OPERATORS(Strict, IsLessOrEqual, <=);
+BASE_NUMERIC_COMPARISON_OPERATORS(Strict, IsGreater, >);
+BASE_NUMERIC_COMPARISON_OPERATORS(Strict, IsGreaterOrEqual, >=);
+BASE_NUMERIC_COMPARISON_OPERATORS(Strict, IsEqual, ==);
+BASE_NUMERIC_COMPARISON_OPERATORS(Strict, IsNotEqual, !=);
+
+};  // namespace internal
+
+using internal::as_signed;
+using internal::as_unsigned;
+using internal::checked_cast;
+using internal::strict_cast;
+using internal::saturated_cast;
+using internal::SafeUnsignedAbs;
+using internal::StrictNumeric;
+using internal::MakeStrictNum;
+using internal::IsValueInRangeForNumericType;
+using internal::IsTypeInRangeForNumericType;
+using internal::IsValueNegative;
+
+// Explicitly make a shorter size_t alias for convenience.
+using SizeT = StrictNumeric<size_t>;
+
+}  // namespace base
+
+#endif  // BASE_NUMERICS_SAFE_CONVERSIONS_H_
diff --git a/base/numerics/safe_conversions_arm_impl.h b/base/numerics/safe_conversions_arm_impl.h
new file mode 100644
index 0000000..da5813f
--- /dev/null
+++ b/base/numerics/safe_conversions_arm_impl.h
@@ -0,0 +1,51 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_NUMERICS_SAFE_CONVERSIONS_ARM_IMPL_H_
+#define BASE_NUMERICS_SAFE_CONVERSIONS_ARM_IMPL_H_
+
+#include <cassert>
+#include <limits>
+#include <type_traits>
+
+#include "base/numerics/safe_conversions_impl.h"
+
+namespace base {
+namespace internal {
+
+// Fast saturation to a destination type.
+template <typename Dst, typename Src>
+struct SaturateFastAsmOp {
+  static const bool is_supported =
+      std::is_signed<Src>::value && std::is_integral<Dst>::value &&
+      std::is_integral<Src>::value &&
+      IntegerBitsPlusSign<Src>::value <= IntegerBitsPlusSign<int32_t>::value &&
+      IntegerBitsPlusSign<Dst>::value <= IntegerBitsPlusSign<int32_t>::value &&
+      !IsTypeInRangeForNumericType<Dst, Src>::value;
+
+  __attribute__((always_inline)) static Dst Do(Src value) {
+    int32_t src = value;
+    typename std::conditional<std::is_signed<Dst>::value, int32_t,
+                              uint32_t>::type result;
+    if (std::is_signed<Dst>::value) {
+      asm("ssat %[dst], %[shift], %[src]"
+          : [dst] "=r"(result)
+          : [src] "r"(src), [shift] "n"(IntegerBitsPlusSign<Dst>::value <= 32
+                                            ? IntegerBitsPlusSign<Dst>::value
+                                            : 32));
+    } else {
+      asm("usat %[dst], %[shift], %[src]"
+          : [dst] "=r"(result)
+          : [src] "r"(src), [shift] "n"(IntegerBitsPlusSign<Dst>::value < 32
+                                            ? IntegerBitsPlusSign<Dst>::value
+                                            : 31));
+    }
+    return static_cast<Dst>(result);
+  }
+};
+
+}  // namespace internal
+}  // namespace base
+
+#endif  // BASE_NUMERICS_SAFE_CONVERSIONS_ARM_IMPL_H_
diff --git a/base/numerics/safe_conversions_impl.h b/base/numerics/safe_conversions_impl.h
new file mode 100644
index 0000000..2516204
--- /dev/null
+++ b/base/numerics/safe_conversions_impl.h
@@ -0,0 +1,850 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_NUMERICS_SAFE_CONVERSIONS_IMPL_H_
+#define BASE_NUMERICS_SAFE_CONVERSIONS_IMPL_H_
+
+#include <stdint.h>
+
+#include <limits>
+#include <type_traits>
+
+#if defined(__GNUC__) || defined(__clang__)
+#define BASE_NUMERICS_LIKELY(x) __builtin_expect(!!(x), 1)
+#define BASE_NUMERICS_UNLIKELY(x) __builtin_expect(!!(x), 0)
+#else
+#define BASE_NUMERICS_LIKELY(x) (x)
+#define BASE_NUMERICS_UNLIKELY(x) (x)
+#endif
+
+namespace base {
+namespace internal {
+
+// The std library doesn't provide a binary max_exponent for integers, however
+// we can compute an analog using std::numeric_limits<>::digits.
+template <typename NumericType>
+struct MaxExponent {
+  static const int value = std::is_floating_point<NumericType>::value
+                               ? std::numeric_limits<NumericType>::max_exponent
+                               : std::numeric_limits<NumericType>::digits + 1;
+};
+
+// The number of bits (including the sign) in an integer. Eliminates sizeof
+// hacks.
+template <typename NumericType>
+struct IntegerBitsPlusSign {
+  static const int value = std::numeric_limits<NumericType>::digits +
+                           std::is_signed<NumericType>::value;
+};
+
+// Helper templates for integer manipulations.
+
+template <typename Integer>
+struct PositionOfSignBit {
+  static const size_t value = IntegerBitsPlusSign<Integer>::value - 1;
+};
+
+// Determines if a numeric value is negative without throwing compiler
+// warnings on: unsigned(value) < 0.
+template <typename T,
+          typename std::enable_if<std::is_signed<T>::value>::type* = nullptr>
+constexpr bool IsValueNegative(T value) {
+  static_assert(std::is_arithmetic<T>::value, "Argument must be numeric.");
+  return value < 0;
+}
+
+template <typename T,
+          typename std::enable_if<!std::is_signed<T>::value>::type* = nullptr>
+constexpr bool IsValueNegative(T) {
+  static_assert(std::is_arithmetic<T>::value, "Argument must be numeric.");
+  return false;
+}
+
+// This performs a fast negation, returning a signed value. It works on unsigned
+// arguments, but probably doesn't do what you want for any unsigned value
+// larger than max / 2 + 1 (i.e. signed min cast to unsigned).
+template <typename T>
+constexpr typename std::make_signed<T>::type ConditionalNegate(
+    T x,
+    bool is_negative) {
+  static_assert(std::is_integral<T>::value, "Type must be integral");
+  using SignedT = typename std::make_signed<T>::type;
+  using UnsignedT = typename std::make_unsigned<T>::type;
+  return static_cast<SignedT>(
+      (static_cast<UnsignedT>(x) ^ -SignedT(is_negative)) + is_negative);
+}
+
+// This performs a safe, absolute value via unsigned overflow.
+template <typename T>
+constexpr typename std::make_unsigned<T>::type SafeUnsignedAbs(T value) {
+  static_assert(std::is_integral<T>::value, "Type must be integral");
+  using UnsignedT = typename std::make_unsigned<T>::type;
+  return IsValueNegative(value) ? 0 - static_cast<UnsignedT>(value)
+                                : static_cast<UnsignedT>(value);
+}
+
+// This allows us to switch paths on known compile-time constants.
+#if defined(__clang__) || defined(__GNUC__)
+constexpr bool CanDetectCompileTimeConstant() {
+  return true;
+}
+template <typename T>
+constexpr bool IsCompileTimeConstant(const T v) {
+  return __builtin_constant_p(v);
+}
+#else
+constexpr bool CanDetectCompileTimeConstant() {
+  return false;
+}
+template <typename T>
+constexpr bool IsCompileTimeConstant(const T) {
+  return false;
+}
+#endif
+template <typename T>
+constexpr bool MustTreatAsConstexpr(const T v) {
+  // Either we can't detect a compile-time constant, and must always use the
+  // constexpr path, or we know we have a compile-time constant.
+  return !CanDetectCompileTimeConstant() || IsCompileTimeConstant(v);
+}
+
+// Forces a crash, like a CHECK(false). Used for numeric boundary errors.
+// Also used in a constexpr template to trigger a compilation failure on
+// an error condition.
+struct CheckOnFailure {
+  template <typename T>
+  static T HandleFailure() {
+#if defined(_MSC_VER)
+    __debugbreak();
+#elif defined(__GNUC__) || defined(__clang__)
+    __builtin_trap();
+#else
+    ((void)(*(volatile char*)0 = 0));
+#endif
+    return T();
+  }
+};
+
+enum IntegerRepresentation {
+  INTEGER_REPRESENTATION_UNSIGNED,
+  INTEGER_REPRESENTATION_SIGNED
+};
+
+// A range for a given nunmeric Src type is contained for a given numeric Dst
+// type if both numeric_limits<Src>::max() <= numeric_limits<Dst>::max() and
+// numeric_limits<Src>::lowest() >= numeric_limits<Dst>::lowest() are true.
+// We implement this as template specializations rather than simple static
+// comparisons to ensure type correctness in our comparisons.
+enum NumericRangeRepresentation {
+  NUMERIC_RANGE_NOT_CONTAINED,
+  NUMERIC_RANGE_CONTAINED
+};
+
+// Helper templates to statically determine if our destination type can contain
+// maximum and minimum values represented by the source type.
+
+template <typename Dst,
+          typename Src,
+          IntegerRepresentation DstSign = std::is_signed<Dst>::value
+                                              ? INTEGER_REPRESENTATION_SIGNED
+                                              : INTEGER_REPRESENTATION_UNSIGNED,
+          IntegerRepresentation SrcSign = std::is_signed<Src>::value
+                                              ? INTEGER_REPRESENTATION_SIGNED
+                                              : INTEGER_REPRESENTATION_UNSIGNED>
+struct StaticDstRangeRelationToSrcRange;
+
+// Same sign: Dst is guaranteed to contain Src only if its range is equal or
+// larger.
+template <typename Dst, typename Src, IntegerRepresentation Sign>
+struct StaticDstRangeRelationToSrcRange<Dst, Src, Sign, Sign> {
+  static const NumericRangeRepresentation value =
+      MaxExponent<Dst>::value >= MaxExponent<Src>::value
+          ? NUMERIC_RANGE_CONTAINED
+          : NUMERIC_RANGE_NOT_CONTAINED;
+};
+
+// Unsigned to signed: Dst is guaranteed to contain source only if its range is
+// larger.
+template <typename Dst, typename Src>
+struct StaticDstRangeRelationToSrcRange<Dst,
+                                        Src,
+                                        INTEGER_REPRESENTATION_SIGNED,
+                                        INTEGER_REPRESENTATION_UNSIGNED> {
+  static const NumericRangeRepresentation value =
+      MaxExponent<Dst>::value > MaxExponent<Src>::value
+          ? NUMERIC_RANGE_CONTAINED
+          : NUMERIC_RANGE_NOT_CONTAINED;
+};
+
+// Signed to unsigned: Dst cannot be statically determined to contain Src.
+template <typename Dst, typename Src>
+struct StaticDstRangeRelationToSrcRange<Dst,
+                                        Src,
+                                        INTEGER_REPRESENTATION_UNSIGNED,
+                                        INTEGER_REPRESENTATION_SIGNED> {
+  static const NumericRangeRepresentation value = NUMERIC_RANGE_NOT_CONTAINED;
+};
+
+// This class wraps the range constraints as separate booleans so the compiler
+// can identify constants and eliminate unused code paths.
+class RangeCheck {
+ public:
+  constexpr RangeCheck(bool is_in_lower_bound, bool is_in_upper_bound)
+      : is_underflow_(!is_in_lower_bound), is_overflow_(!is_in_upper_bound) {}
+  constexpr RangeCheck() : is_underflow_(0), is_overflow_(0) {}
+  constexpr bool IsValid() const { return !is_overflow_ && !is_underflow_; }
+  constexpr bool IsInvalid() const { return is_overflow_ && is_underflow_; }
+  constexpr bool IsOverflow() const { return is_overflow_ && !is_underflow_; }
+  constexpr bool IsUnderflow() const { return !is_overflow_ && is_underflow_; }
+  constexpr bool IsOverflowFlagSet() const { return is_overflow_; }
+  constexpr bool IsUnderflowFlagSet() const { return is_underflow_; }
+  constexpr bool operator==(const RangeCheck rhs) const {
+    return is_underflow_ == rhs.is_underflow_ &&
+           is_overflow_ == rhs.is_overflow_;
+  }
+  constexpr bool operator!=(const RangeCheck rhs) const {
+    return !(*this == rhs);
+  }
+
+ private:
+  // Do not change the order of these member variables. The integral conversion
+  // optimization depends on this exact order.
+  const bool is_underflow_;
+  const bool is_overflow_;
+};
+
+// The following helper template addresses a corner case in range checks for
+// conversion from a floating-point type to an integral type of smaller range
+// but larger precision (e.g. float -> unsigned). The problem is as follows:
+//   1. Integral maximum is always one less than a power of two, so it must be
+//      truncated to fit the mantissa of the floating point. The direction of
+//      rounding is implementation defined, but by default it's always IEEE
+//      floats, which round to nearest and thus result in a value of larger
+//      magnitude than the integral value.
+//      Example: float f = UINT_MAX; // f is 4294967296f but UINT_MAX
+//                                   // is 4294967295u.
+//   2. If the floating point value is equal to the promoted integral maximum
+//      value, a range check will erroneously pass.
+//      Example: (4294967296f <= 4294967295u) // This is true due to a precision
+//                                            // loss in rounding up to float.
+//   3. When the floating point value is then converted to an integral, the
+//      resulting value is out of range for the target integral type and
+//      thus is implementation defined.
+//      Example: unsigned u = (float)INT_MAX; // u will typically overflow to 0.
+// To fix this bug we manually truncate the maximum value when the destination
+// type is an integral of larger precision than the source floating-point type,
+// such that the resulting maximum is represented exactly as a floating point.
+template <typename Dst, typename Src, template <typename> class Bounds>
+struct NarrowingRange {
+  using SrcLimits = std::numeric_limits<Src>;
+  using DstLimits = typename std::numeric_limits<Dst>;
+
+  // Computes the mask required to make an accurate comparison between types.
+  static const int kShift =
+      (MaxExponent<Src>::value > MaxExponent<Dst>::value &&
+       SrcLimits::digits < DstLimits::digits)
+          ? (DstLimits::digits - SrcLimits::digits)
+          : 0;
+  template <
+      typename T,
+      typename std::enable_if<std::is_integral<T>::value>::type* = nullptr>
+
+  // Masks out the integer bits that are beyond the precision of the
+  // intermediate type used for comparison.
+  static constexpr T Adjust(T value) {
+    static_assert(std::is_same<T, Dst>::value, "");
+    static_assert(kShift < DstLimits::digits, "");
+    return static_cast<T>(
+        ConditionalNegate(SafeUnsignedAbs(value) & ~((T(1) << kShift) - T(1)),
+                          IsValueNegative(value)));
+  }
+
+  template <typename T,
+            typename std::enable_if<std::is_floating_point<T>::value>::type* =
+                nullptr>
+  static constexpr T Adjust(T value) {
+    static_assert(std::is_same<T, Dst>::value, "");
+    static_assert(kShift == 0, "");
+    return value;
+  }
+
+  static constexpr Dst max() { return Adjust(Bounds<Dst>::max()); }
+  static constexpr Dst lowest() { return Adjust(Bounds<Dst>::lowest()); }
+};
+
+template <typename Dst,
+          typename Src,
+          template <typename> class Bounds,
+          IntegerRepresentation DstSign = std::is_signed<Dst>::value
+                                              ? INTEGER_REPRESENTATION_SIGNED
+                                              : INTEGER_REPRESENTATION_UNSIGNED,
+          IntegerRepresentation SrcSign = std::is_signed<Src>::value
+                                              ? INTEGER_REPRESENTATION_SIGNED
+                                              : INTEGER_REPRESENTATION_UNSIGNED,
+          NumericRangeRepresentation DstRange =
+              StaticDstRangeRelationToSrcRange<Dst, Src>::value>
+struct DstRangeRelationToSrcRangeImpl;
+
+// The following templates are for ranges that must be verified at runtime. We
+// split it into checks based on signedness to avoid confusing casts and
+// compiler warnings on signed an unsigned comparisons.
+
+// Same sign narrowing: The range is contained for normal limits.
+template <typename Dst,
+          typename Src,
+          template <typename> class Bounds,
+          IntegerRepresentation DstSign,
+          IntegerRepresentation SrcSign>
+struct DstRangeRelationToSrcRangeImpl<Dst,
+                                      Src,
+                                      Bounds,
+                                      DstSign,
+                                      SrcSign,
+                                      NUMERIC_RANGE_CONTAINED> {
+  static constexpr RangeCheck Check(Src value) {
+    using SrcLimits = std::numeric_limits<Src>;
+    using DstLimits = NarrowingRange<Dst, Src, Bounds>;
+    return RangeCheck(
+        static_cast<Dst>(SrcLimits::lowest()) >= DstLimits::lowest() ||
+            static_cast<Dst>(value) >= DstLimits::lowest(),
+        static_cast<Dst>(SrcLimits::max()) <= DstLimits::max() ||
+            static_cast<Dst>(value) <= DstLimits::max());
+  }
+};
+
+// Signed to signed narrowing: Both the upper and lower boundaries may be
+// exceeded for standard limits.
+template <typename Dst, typename Src, template <typename> class Bounds>
+struct DstRangeRelationToSrcRangeImpl<Dst,
+                                      Src,
+                                      Bounds,
+                                      INTEGER_REPRESENTATION_SIGNED,
+                                      INTEGER_REPRESENTATION_SIGNED,
+                                      NUMERIC_RANGE_NOT_CONTAINED> {
+  static constexpr RangeCheck Check(Src value) {
+    using DstLimits = NarrowingRange<Dst, Src, Bounds>;
+    return RangeCheck(value >= DstLimits::lowest(), value <= DstLimits::max());
+  }
+};
+
+// Unsigned to unsigned narrowing: Only the upper bound can be exceeded for
+// standard limits.
+template <typename Dst, typename Src, template <typename> class Bounds>
+struct DstRangeRelationToSrcRangeImpl<Dst,
+                                      Src,
+                                      Bounds,
+                                      INTEGER_REPRESENTATION_UNSIGNED,
+                                      INTEGER_REPRESENTATION_UNSIGNED,
+                                      NUMERIC_RANGE_NOT_CONTAINED> {
+  static constexpr RangeCheck Check(Src value) {
+    using DstLimits = NarrowingRange<Dst, Src, Bounds>;
+    return RangeCheck(
+        DstLimits::lowest() == Dst(0) || value >= DstLimits::lowest(),
+        value <= DstLimits::max());
+  }
+};
+
+// Unsigned to signed: Only the upper bound can be exceeded for standard limits.
+template <typename Dst, typename Src, template <typename> class Bounds>
+struct DstRangeRelationToSrcRangeImpl<Dst,
+                                      Src,
+                                      Bounds,
+                                      INTEGER_REPRESENTATION_SIGNED,
+                                      INTEGER_REPRESENTATION_UNSIGNED,
+                                      NUMERIC_RANGE_NOT_CONTAINED> {
+  static constexpr RangeCheck Check(Src value) {
+    using DstLimits = NarrowingRange<Dst, Src, Bounds>;
+    using Promotion = decltype(Src() + Dst());
+    return RangeCheck(DstLimits::lowest() <= Dst(0) ||
+                          static_cast<Promotion>(value) >=
+                              static_cast<Promotion>(DstLimits::lowest()),
+                      static_cast<Promotion>(value) <=
+                          static_cast<Promotion>(DstLimits::max()));
+  }
+};
+
+// Signed to unsigned: The upper boundary may be exceeded for a narrower Dst,
+// and any negative value exceeds the lower boundary for standard limits.
+template <typename Dst, typename Src, template <typename> class Bounds>
+struct DstRangeRelationToSrcRangeImpl<Dst,
+                                      Src,
+                                      Bounds,
+                                      INTEGER_REPRESENTATION_UNSIGNED,
+                                      INTEGER_REPRESENTATION_SIGNED,
+                                      NUMERIC_RANGE_NOT_CONTAINED> {
+  static constexpr RangeCheck Check(Src value) {
+    using SrcLimits = std::numeric_limits<Src>;
+    using DstLimits = NarrowingRange<Dst, Src, Bounds>;
+    using Promotion = decltype(Src() + Dst());
+    return RangeCheck(
+        value >= Src(0) && (DstLimits::lowest() == 0 ||
+                            static_cast<Dst>(value) >= DstLimits::lowest()),
+        static_cast<Promotion>(SrcLimits::max()) <=
+                static_cast<Promotion>(DstLimits::max()) ||
+            static_cast<Promotion>(value) <=
+                static_cast<Promotion>(DstLimits::max()));
+  }
+};
+
+// Simple wrapper for statically checking if a type's range is contained.
+template <typename Dst, typename Src>
+struct IsTypeInRangeForNumericType {
+  static const bool value = StaticDstRangeRelationToSrcRange<Dst, Src>::value ==
+                            NUMERIC_RANGE_CONTAINED;
+};
+
+template <typename Dst,
+          template <typename> class Bounds = std::numeric_limits,
+          typename Src>
+constexpr RangeCheck DstRangeRelationToSrcRange(Src value) {
+  static_assert(std::is_arithmetic<Src>::value, "Argument must be numeric.");
+  static_assert(std::is_arithmetic<Dst>::value, "Result must be numeric.");
+  static_assert(Bounds<Dst>::lowest() < Bounds<Dst>::max(), "");
+  return DstRangeRelationToSrcRangeImpl<Dst, Src, Bounds>::Check(value);
+}
+
+// Integer promotion templates used by the portable checked integer arithmetic.
+template <size_t Size, bool IsSigned>
+struct IntegerForDigitsAndSign;
+
+#define INTEGER_FOR_DIGITS_AND_SIGN(I)                          \
+  template <>                                                   \
+  struct IntegerForDigitsAndSign<IntegerBitsPlusSign<I>::value, \
+                                 std::is_signed<I>::value> {    \
+    using type = I;                                             \
+  }
+
+INTEGER_FOR_DIGITS_AND_SIGN(int8_t);
+INTEGER_FOR_DIGITS_AND_SIGN(uint8_t);
+INTEGER_FOR_DIGITS_AND_SIGN(int16_t);
+INTEGER_FOR_DIGITS_AND_SIGN(uint16_t);
+INTEGER_FOR_DIGITS_AND_SIGN(int32_t);
+INTEGER_FOR_DIGITS_AND_SIGN(uint32_t);
+INTEGER_FOR_DIGITS_AND_SIGN(int64_t);
+INTEGER_FOR_DIGITS_AND_SIGN(uint64_t);
+#undef INTEGER_FOR_DIGITS_AND_SIGN
+
+// WARNING: We have no IntegerForSizeAndSign<16, *>. If we ever add one to
+// support 128-bit math, then the ArithmeticPromotion template below will need
+// to be updated (or more likely replaced with a decltype expression).
+static_assert(IntegerBitsPlusSign<intmax_t>::value == 64,
+              "Max integer size not supported for this toolchain.");
+
+template <typename Integer, bool IsSigned = std::is_signed<Integer>::value>
+struct TwiceWiderInteger {
+  using type =
+      typename IntegerForDigitsAndSign<IntegerBitsPlusSign<Integer>::value * 2,
+                                       IsSigned>::type;
+};
+
+enum ArithmeticPromotionCategory {
+  LEFT_PROMOTION,  // Use the type of the left-hand argument.
+  RIGHT_PROMOTION  // Use the type of the right-hand argument.
+};
+
+// Determines the type that can represent the largest positive value.
+template <typename Lhs,
+          typename Rhs,
+          ArithmeticPromotionCategory Promotion =
+              (MaxExponent<Lhs>::value > MaxExponent<Rhs>::value)
+                  ? LEFT_PROMOTION
+                  : RIGHT_PROMOTION>
+struct MaxExponentPromotion;
+
+template <typename Lhs, typename Rhs>
+struct MaxExponentPromotion<Lhs, Rhs, LEFT_PROMOTION> {
+  using type = Lhs;
+};
+
+template <typename Lhs, typename Rhs>
+struct MaxExponentPromotion<Lhs, Rhs, RIGHT_PROMOTION> {
+  using type = Rhs;
+};
+
+// Determines the type that can represent the lowest arithmetic value.
+template <typename Lhs,
+          typename Rhs,
+          ArithmeticPromotionCategory Promotion =
+              std::is_signed<Lhs>::value
+                  ? (std::is_signed<Rhs>::value
+                         ? (MaxExponent<Lhs>::value > MaxExponent<Rhs>::value
+                                ? LEFT_PROMOTION
+                                : RIGHT_PROMOTION)
+                         : LEFT_PROMOTION)
+                  : (std::is_signed<Rhs>::value
+                         ? RIGHT_PROMOTION
+                         : (MaxExponent<Lhs>::value < MaxExponent<Rhs>::value
+                                ? LEFT_PROMOTION
+                                : RIGHT_PROMOTION))>
+struct LowestValuePromotion;
+
+template <typename Lhs, typename Rhs>
+struct LowestValuePromotion<Lhs, Rhs, LEFT_PROMOTION> {
+  using type = Lhs;
+};
+
+template <typename Lhs, typename Rhs>
+struct LowestValuePromotion<Lhs, Rhs, RIGHT_PROMOTION> {
+  using type = Rhs;
+};
+
+// Determines the type that is best able to represent an arithmetic result.
+template <
+    typename Lhs,
+    typename Rhs = Lhs,
+    bool is_intmax_type =
+        std::is_integral<typename MaxExponentPromotion<Lhs, Rhs>::type>::value&&
+            IntegerBitsPlusSign<typename MaxExponentPromotion<Lhs, Rhs>::type>::
+                value == IntegerBitsPlusSign<intmax_t>::value,
+    bool is_max_exponent =
+        StaticDstRangeRelationToSrcRange<
+            typename MaxExponentPromotion<Lhs, Rhs>::type,
+            Lhs>::value ==
+        NUMERIC_RANGE_CONTAINED&& StaticDstRangeRelationToSrcRange<
+            typename MaxExponentPromotion<Lhs, Rhs>::type,
+            Rhs>::value == NUMERIC_RANGE_CONTAINED>
+struct BigEnoughPromotion;
+
+// The side with the max exponent is big enough.
+template <typename Lhs, typename Rhs, bool is_intmax_type>
+struct BigEnoughPromotion<Lhs, Rhs, is_intmax_type, true> {
+  using type = typename MaxExponentPromotion<Lhs, Rhs>::type;
+  static const bool is_contained = true;
+};
+
+// We can use a twice wider type to fit.
+template <typename Lhs, typename Rhs>
+struct BigEnoughPromotion<Lhs, Rhs, false, false> {
+  using type =
+      typename TwiceWiderInteger<typename MaxExponentPromotion<Lhs, Rhs>::type,
+                                 std::is_signed<Lhs>::value ||
+                                     std::is_signed<Rhs>::value>::type;
+  static const bool is_contained = true;
+};
+
+// No type is large enough.
+template <typename Lhs, typename Rhs>
+struct BigEnoughPromotion<Lhs, Rhs, true, false> {
+  using type = typename MaxExponentPromotion<Lhs, Rhs>::type;
+  static const bool is_contained = false;
+};
+
+// We can statically check if operations on the provided types can wrap, so we
+// can skip the checked operations if they're not needed. So, for an integer we
+// care if the destination type preserves the sign and is twice the width of
+// the source.
+template <typename T, typename Lhs, typename Rhs = Lhs>
+struct IsIntegerArithmeticSafe {
+  static const bool value =
+      !std::is_floating_point<T>::value &&
+      !std::is_floating_point<Lhs>::value &&
+      !std::is_floating_point<Rhs>::value &&
+      std::is_signed<T>::value >= std::is_signed<Lhs>::value &&
+      IntegerBitsPlusSign<T>::value >= (2 * IntegerBitsPlusSign<Lhs>::value) &&
+      std::is_signed<T>::value >= std::is_signed<Rhs>::value &&
+      IntegerBitsPlusSign<T>::value >= (2 * IntegerBitsPlusSign<Rhs>::value);
+};
+
+// Promotes to a type that can represent any possible result of a binary
+// arithmetic operation with the source types.
+template <typename Lhs,
+          typename Rhs,
+          bool is_promotion_possible = IsIntegerArithmeticSafe<
+              typename std::conditional<std::is_signed<Lhs>::value ||
+                                            std::is_signed<Rhs>::value,
+                                        intmax_t,
+                                        uintmax_t>::type,
+              typename MaxExponentPromotion<Lhs, Rhs>::type>::value>
+struct FastIntegerArithmeticPromotion;
+
+template <typename Lhs, typename Rhs>
+struct FastIntegerArithmeticPromotion<Lhs, Rhs, true> {
+  using type =
+      typename TwiceWiderInteger<typename MaxExponentPromotion<Lhs, Rhs>::type,
+                                 std::is_signed<Lhs>::value ||
+                                     std::is_signed<Rhs>::value>::type;
+  static_assert(IsIntegerArithmeticSafe<type, Lhs, Rhs>::value, "");
+  static const bool is_contained = true;
+};
+
+template <typename Lhs, typename Rhs>
+struct FastIntegerArithmeticPromotion<Lhs, Rhs, false> {
+  using type = typename BigEnoughPromotion<Lhs, Rhs>::type;
+  static const bool is_contained = false;
+};
+
+// Extracts the underlying type from an enum.
+template <typename T, bool is_enum = std::is_enum<T>::value>
+struct ArithmeticOrUnderlyingEnum;
+
+template <typename T>
+struct ArithmeticOrUnderlyingEnum<T, true> {
+  using type = typename std::underlying_type<T>::type;
+  static const bool value = std::is_arithmetic<type>::value;
+};
+
+template <typename T>
+struct ArithmeticOrUnderlyingEnum<T, false> {
+  using type = T;
+  static const bool value = std::is_arithmetic<type>::value;
+};
+
+// The following are helper templates used in the CheckedNumeric class.
+template <typename T>
+class CheckedNumeric;
+
+template <typename T>
+class ClampedNumeric;
+
+template <typename T>
+class StrictNumeric;
+
+// Used to treat CheckedNumeric and arithmetic underlying types the same.
+template <typename T>
+struct UnderlyingType {
+  using type = typename ArithmeticOrUnderlyingEnum<T>::type;
+  static const bool is_numeric = std::is_arithmetic<type>::value;
+  static const bool is_checked = false;
+  static const bool is_clamped = false;
+  static const bool is_strict = false;
+};
+
+template <typename T>
+struct UnderlyingType<CheckedNumeric<T>> {
+  using type = T;
+  static const bool is_numeric = true;
+  static const bool is_checked = true;
+  static const bool is_clamped = false;
+  static const bool is_strict = false;
+};
+
+template <typename T>
+struct UnderlyingType<ClampedNumeric<T>> {
+  using type = T;
+  static const bool is_numeric = true;
+  static const bool is_checked = false;
+  static const bool is_clamped = true;
+  static const bool is_strict = false;
+};
+
+template <typename T>
+struct UnderlyingType<StrictNumeric<T>> {
+  using type = T;
+  static const bool is_numeric = true;
+  static const bool is_checked = false;
+  static const bool is_clamped = false;
+  static const bool is_strict = true;
+};
+
+template <typename L, typename R>
+struct IsCheckedOp {
+  static const bool value =
+      UnderlyingType<L>::is_numeric && UnderlyingType<R>::is_numeric &&
+      (UnderlyingType<L>::is_checked || UnderlyingType<R>::is_checked);
+};
+
+template <typename L, typename R>
+struct IsClampedOp {
+  static const bool value =
+      UnderlyingType<L>::is_numeric && UnderlyingType<R>::is_numeric &&
+      (UnderlyingType<L>::is_clamped || UnderlyingType<R>::is_clamped) &&
+      !(UnderlyingType<L>::is_checked || UnderlyingType<R>::is_checked);
+};
+
+template <typename L, typename R>
+struct IsStrictOp {
+  static const bool value =
+      UnderlyingType<L>::is_numeric && UnderlyingType<R>::is_numeric &&
+      (UnderlyingType<L>::is_strict || UnderlyingType<R>::is_strict) &&
+      !(UnderlyingType<L>::is_checked || UnderlyingType<R>::is_checked) &&
+      !(UnderlyingType<L>::is_clamped || UnderlyingType<R>::is_clamped);
+};
+
+// as_signed<> returns the supplied integral value (or integral castable
+// Numeric template) cast as a signed integral of equivalent precision.
+// I.e. it's mostly an alias for: static_cast<std::make_signed<T>::type>(t)
+template <typename Src>
+constexpr typename std::make_signed<
+    typename base::internal::UnderlyingType<Src>::type>::type
+as_signed(const Src value) {
+  static_assert(std::is_integral<decltype(as_signed(value))>::value,
+                "Argument must be a signed or unsigned integer type.");
+  return static_cast<decltype(as_signed(value))>(value);
+}
+
+// as_unsigned<> returns the supplied integral value (or integral castable
+// Numeric template) cast as an unsigned integral of equivalent precision.
+// I.e. it's mostly an alias for: static_cast<std::make_unsigned<T>::type>(t)
+template <typename Src>
+constexpr typename std::make_unsigned<
+    typename base::internal::UnderlyingType<Src>::type>::type
+as_unsigned(const Src value) {
+  static_assert(std::is_integral<decltype(as_unsigned(value))>::value,
+                "Argument must be a signed or unsigned integer type.");
+  return static_cast<decltype(as_unsigned(value))>(value);
+}
+
+template <typename L, typename R>
+constexpr bool IsLessImpl(const L lhs,
+                          const R rhs,
+                          const RangeCheck l_range,
+                          const RangeCheck r_range) {
+  return l_range.IsUnderflow() || r_range.IsOverflow() ||
+         (l_range == r_range &&
+          static_cast<decltype(lhs + rhs)>(lhs) <
+              static_cast<decltype(lhs + rhs)>(rhs));
+}
+
+template <typename L, typename R>
+struct IsLess {
+  static_assert(std::is_arithmetic<L>::value && std::is_arithmetic<R>::value,
+                "Types must be numeric.");
+  static constexpr bool Test(const L lhs, const R rhs) {
+    return IsLessImpl(lhs, rhs, DstRangeRelationToSrcRange<R>(lhs),
+                      DstRangeRelationToSrcRange<L>(rhs));
+  }
+};
+
+template <typename L, typename R>
+constexpr bool IsLessOrEqualImpl(const L lhs,
+                                 const R rhs,
+                                 const RangeCheck l_range,
+                                 const RangeCheck r_range) {
+  return l_range.IsUnderflow() || r_range.IsOverflow() ||
+         (l_range == r_range &&
+          static_cast<decltype(lhs + rhs)>(lhs) <=
+              static_cast<decltype(lhs + rhs)>(rhs));
+}
+
+template <typename L, typename R>
+struct IsLessOrEqual {
+  static_assert(std::is_arithmetic<L>::value && std::is_arithmetic<R>::value,
+                "Types must be numeric.");
+  static constexpr bool Test(const L lhs, const R rhs) {
+    return IsLessOrEqualImpl(lhs, rhs, DstRangeRelationToSrcRange<R>(lhs),
+                             DstRangeRelationToSrcRange<L>(rhs));
+  }
+};
+
+template <typename L, typename R>
+constexpr bool IsGreaterImpl(const L lhs,
+                             const R rhs,
+                             const RangeCheck l_range,
+                             const RangeCheck r_range) {
+  return l_range.IsOverflow() || r_range.IsUnderflow() ||
+         (l_range == r_range &&
+          static_cast<decltype(lhs + rhs)>(lhs) >
+              static_cast<decltype(lhs + rhs)>(rhs));
+}
+
+template <typename L, typename R>
+struct IsGreater {
+  static_assert(std::is_arithmetic<L>::value && std::is_arithmetic<R>::value,
+                "Types must be numeric.");
+  static constexpr bool Test(const L lhs, const R rhs) {
+    return IsGreaterImpl(lhs, rhs, DstRangeRelationToSrcRange<R>(lhs),
+                         DstRangeRelationToSrcRange<L>(rhs));
+  }
+};
+
+template <typename L, typename R>
+constexpr bool IsGreaterOrEqualImpl(const L lhs,
+                                    const R rhs,
+                                    const RangeCheck l_range,
+                                    const RangeCheck r_range) {
+  return l_range.IsOverflow() || r_range.IsUnderflow() ||
+         (l_range == r_range &&
+          static_cast<decltype(lhs + rhs)>(lhs) >=
+              static_cast<decltype(lhs + rhs)>(rhs));
+}
+
+template <typename L, typename R>
+struct IsGreaterOrEqual {
+  static_assert(std::is_arithmetic<L>::value && std::is_arithmetic<R>::value,
+                "Types must be numeric.");
+  static constexpr bool Test(const L lhs, const R rhs) {
+    return IsGreaterOrEqualImpl(lhs, rhs, DstRangeRelationToSrcRange<R>(lhs),
+                                DstRangeRelationToSrcRange<L>(rhs));
+  }
+};
+
+template <typename L, typename R>
+struct IsEqual {
+  static_assert(std::is_arithmetic<L>::value && std::is_arithmetic<R>::value,
+                "Types must be numeric.");
+  static constexpr bool Test(const L lhs, const R rhs) {
+    return DstRangeRelationToSrcRange<R>(lhs) ==
+               DstRangeRelationToSrcRange<L>(rhs) &&
+           static_cast<decltype(lhs + rhs)>(lhs) ==
+               static_cast<decltype(lhs + rhs)>(rhs);
+  }
+};
+
+template <typename L, typename R>
+struct IsNotEqual {
+  static_assert(std::is_arithmetic<L>::value && std::is_arithmetic<R>::value,
+                "Types must be numeric.");
+  static constexpr bool Test(const L lhs, const R rhs) {
+    return DstRangeRelationToSrcRange<R>(lhs) !=
+               DstRangeRelationToSrcRange<L>(rhs) ||
+           static_cast<decltype(lhs + rhs)>(lhs) !=
+               static_cast<decltype(lhs + rhs)>(rhs);
+  }
+};
+
+// These perform the actual math operations on the CheckedNumerics.
+// Binary arithmetic operations.
+template <template <typename, typename> class C, typename L, typename R>
+constexpr bool SafeCompare(const L lhs, const R rhs) {
+  static_assert(std::is_arithmetic<L>::value && std::is_arithmetic<R>::value,
+                "Types must be numeric.");
+  using Promotion = BigEnoughPromotion<L, R>;
+  using BigType = typename Promotion::type;
+  return Promotion::is_contained
+             // Force to a larger type for speed if both are contained.
+             ? C<BigType, BigType>::Test(
+                   static_cast<BigType>(static_cast<L>(lhs)),
+                   static_cast<BigType>(static_cast<R>(rhs)))
+             // Let the template functions figure it out for mixed types.
+             : C<L, R>::Test(lhs, rhs);
+}
+
+template <typename Dst, typename Src>
+constexpr bool IsMaxInRangeForNumericType() {
+  return IsGreaterOrEqual<Dst, Src>::Test(std::numeric_limits<Dst>::max(),
+                                          std::numeric_limits<Src>::max());
+}
+
+template <typename Dst, typename Src>
+constexpr bool IsMinInRangeForNumericType() {
+  return IsLessOrEqual<Dst, Src>::Test(std::numeric_limits<Dst>::lowest(),
+                                       std::numeric_limits<Src>::lowest());
+}
+
+template <typename Dst, typename Src>
+constexpr Dst CommonMax() {
+  return !IsMaxInRangeForNumericType<Dst, Src>()
+             ? Dst(std::numeric_limits<Dst>::max())
+             : Dst(std::numeric_limits<Src>::max());
+}
+
+template <typename Dst, typename Src>
+constexpr Dst CommonMin() {
+  return !IsMinInRangeForNumericType<Dst, Src>()
+             ? Dst(std::numeric_limits<Dst>::lowest())
+             : Dst(std::numeric_limits<Src>::lowest());
+}
+
+// This is a wrapper to generate return the max or min for a supplied type.
+// If the argument is false, the returned value is the maximum. If true the
+// returned value is the minimum.
+template <typename Dst, typename Src = Dst>
+constexpr Dst CommonMaxOrMin(bool is_min) {
+  return is_min ? CommonMin<Dst, Src>() : CommonMax<Dst, Src>();
+}
+
+}  // namespace internal
+}  // namespace base
+
+#endif  // BASE_NUMERICS_SAFE_CONVERSIONS_IMPL_H_
diff --git a/base/numerics/safe_math.h b/base/numerics/safe_math.h
new file mode 100644
index 0000000..e30be90
--- /dev/null
+++ b/base/numerics/safe_math.h
@@ -0,0 +1,12 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_NUMERICS_SAFE_MATH_H_
+#define BASE_NUMERICS_SAFE_MATH_H_
+
+#include "base/numerics/checked_math.h"
+#include "base/numerics/clamped_math.h"
+#include "base/numerics/safe_conversions.h"
+
+#endif  // BASE_NUMERICS_SAFE_MATH_H_
diff --git a/base/numerics/safe_math_arm_impl.h b/base/numerics/safe_math_arm_impl.h
new file mode 100644
index 0000000..a7cda1b
--- /dev/null
+++ b/base/numerics/safe_math_arm_impl.h
@@ -0,0 +1,122 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_NUMERICS_SAFE_MATH_ARM_IMPL_H_
+#define BASE_NUMERICS_SAFE_MATH_ARM_IMPL_H_
+
+#include <cassert>
+#include <limits>
+#include <type_traits>
+
+#include "base/numerics/safe_conversions.h"
+
+namespace base {
+namespace internal {
+
+template <typename T, typename U>
+struct CheckedMulFastAsmOp {
+  static const bool is_supported =
+      FastIntegerArithmeticPromotion<T, U>::is_contained;
+
+  // The following is much more efficient than the Clang and GCC builtins for
+  // performing overflow-checked multiplication when a twice wider type is
+  // available. The below compiles down to 2-3 instructions, depending on the
+  // width of the types in use.
+  // As an example, an int32_t multiply compiles to:
+  //    smull   r0, r1, r0, r1
+  //    cmp     r1, r1, asr #31
+  // And an int16_t multiply compiles to:
+  //    smulbb  r1, r1, r0
+  //    asr     r2, r1, #16
+  //    cmp     r2, r1, asr #15
+  template <typename V>
+  __attribute__((always_inline)) static bool Do(T x, U y, V* result) {
+    using Promotion = typename FastIntegerArithmeticPromotion<T, U>::type;
+    Promotion presult;
+
+    presult = static_cast<Promotion>(x) * static_cast<Promotion>(y);
+    *result = static_cast<V>(presult);
+    return IsValueInRangeForNumericType<V>(presult);
+  }
+};
+
+template <typename T, typename U>
+struct ClampedAddFastAsmOp {
+  static const bool is_supported =
+      BigEnoughPromotion<T, U>::is_contained &&
+      IsTypeInRangeForNumericType<
+          int32_t,
+          typename BigEnoughPromotion<T, U>::type>::value;
+
+  template <typename V>
+  __attribute__((always_inline)) static V Do(T x, U y) {
+    // This will get promoted to an int, so let the compiler do whatever is
+    // clever and rely on the saturated cast to bounds check.
+    if (IsIntegerArithmeticSafe<int, T, U>::value)
+      return saturated_cast<V>(x + y);
+
+    int32_t result;
+    int32_t x_i32 = x;
+    int32_t y_i32 = y;
+
+    asm("qadd %[result], %[first], %[second]"
+        : [result] "=r"(result)
+        : [first] "r"(x_i32), [second] "r"(y_i32));
+    return saturated_cast<V>(result);
+  }
+};
+
+template <typename T, typename U>
+struct ClampedSubFastAsmOp {
+  static const bool is_supported =
+      BigEnoughPromotion<T, U>::is_contained &&
+      IsTypeInRangeForNumericType<
+          int32_t,
+          typename BigEnoughPromotion<T, U>::type>::value;
+
+  template <typename V>
+  __attribute__((always_inline)) static V Do(T x, U y) {
+    // This will get promoted to an int, so let the compiler do whatever is
+    // clever and rely on the saturated cast to bounds check.
+    if (IsIntegerArithmeticSafe<int, T, U>::value)
+      return saturated_cast<V>(x - y);
+
+    int32_t result;
+    int32_t x_i32 = x;
+    int32_t y_i32 = y;
+
+    asm("qsub %[result], %[first], %[second]"
+        : [result] "=r"(result)
+        : [first] "r"(x_i32), [second] "r"(y_i32));
+    return saturated_cast<V>(result);
+  }
+};
+
+template <typename T, typename U>
+struct ClampedMulFastAsmOp {
+  static const bool is_supported = CheckedMulFastAsmOp<T, U>::is_supported;
+
+  template <typename V>
+  __attribute__((always_inline)) static V Do(T x, U y) {
+    // Use the CheckedMulFastAsmOp for full-width 32-bit values, because
+    // it's fewer instructions than promoting and then saturating.
+    if (!IsIntegerArithmeticSafe<int32_t, T, U>::value &&
+        !IsIntegerArithmeticSafe<uint32_t, T, U>::value) {
+      V result;
+      if (CheckedMulFastAsmOp<T, U>::Do(x, y, &result))
+        return result;
+      return CommonMaxOrMin<V>(IsValueNegative(x) ^ IsValueNegative(y));
+    }
+
+    assert((FastIntegerArithmeticPromotion<T, U>::is_contained));
+    using Promotion = typename FastIntegerArithmeticPromotion<T, U>::type;
+    return saturated_cast<V>(static_cast<Promotion>(x) *
+                             static_cast<Promotion>(y));
+  }
+};
+
+}  // namespace internal
+}  // namespace base
+
+#endif  // BASE_NUMERICS_SAFE_MATH_ARM_IMPL_H_
diff --git a/base/numerics/safe_math_clang_gcc_impl.h b/base/numerics/safe_math_clang_gcc_impl.h
new file mode 100644
index 0000000..1760338
--- /dev/null
+++ b/base/numerics/safe_math_clang_gcc_impl.h
@@ -0,0 +1,157 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_NUMERICS_SAFE_MATH_CLANG_GCC_IMPL_H_
+#define BASE_NUMERICS_SAFE_MATH_CLANG_GCC_IMPL_H_
+
+#include <cassert>
+#include <limits>
+#include <type_traits>
+
+#include "base/numerics/safe_conversions.h"
+
+#if !defined(__native_client__) && (defined(__ARMEL__) || defined(__arch64__))
+#include "base/numerics/safe_math_arm_impl.h"
+#define BASE_HAS_ASSEMBLER_SAFE_MATH (1)
+#else
+#define BASE_HAS_ASSEMBLER_SAFE_MATH (0)
+#endif
+
+namespace base {
+namespace internal {
+
+// These are the non-functioning boilerplate implementations of the optimized
+// safe math routines.
+#if !BASE_HAS_ASSEMBLER_SAFE_MATH
+template <typename T, typename U>
+struct CheckedMulFastAsmOp {
+  static const bool is_supported = false;
+  template <typename V>
+  static constexpr bool Do(T, U, V*) {
+    // Force a compile failure if instantiated.
+    return CheckOnFailure::template HandleFailure<bool>();
+  }
+};
+
+template <typename T, typename U>
+struct ClampedAddFastAsmOp {
+  static const bool is_supported = false;
+  template <typename V>
+  static constexpr V Do(T, U) {
+    // Force a compile failure if instantiated.
+    return CheckOnFailure::template HandleFailure<V>();
+  }
+};
+
+template <typename T, typename U>
+struct ClampedSubFastAsmOp {
+  static const bool is_supported = false;
+  template <typename V>
+  static constexpr V Do(T, U) {
+    // Force a compile failure if instantiated.
+    return CheckOnFailure::template HandleFailure<V>();
+  }
+};
+
+template <typename T, typename U>
+struct ClampedMulFastAsmOp {
+  static const bool is_supported = false;
+  template <typename V>
+  static constexpr V Do(T, U) {
+    // Force a compile failure if instantiated.
+    return CheckOnFailure::template HandleFailure<V>();
+  }
+};
+#endif  // BASE_HAS_ASSEMBLER_SAFE_MATH
+#undef BASE_HAS_ASSEMBLER_SAFE_MATH
+
+template <typename T, typename U>
+struct CheckedAddFastOp {
+  static const bool is_supported = true;
+  template <typename V>
+  __attribute__((always_inline)) static constexpr bool Do(T x, U y, V* result) {
+    return !__builtin_add_overflow(x, y, result);
+  }
+};
+
+template <typename T, typename U>
+struct CheckedSubFastOp {
+  static const bool is_supported = true;
+  template <typename V>
+  __attribute__((always_inline)) static constexpr bool Do(T x, U y, V* result) {
+    return !__builtin_sub_overflow(x, y, result);
+  }
+};
+
+template <typename T, typename U>
+struct CheckedMulFastOp {
+#if defined(__clang__)
+  // TODO(jschuh): Get the Clang runtime library issues sorted out so we can
+  // support full-width, mixed-sign multiply builtins.
+  // https://crbug.com/613003
+  // We can support intptr_t, uintptr_t, or a smaller common type.
+  static const bool is_supported =
+      (IsTypeInRangeForNumericType<intptr_t, T>::value &&
+       IsTypeInRangeForNumericType<intptr_t, U>::value) ||
+      (IsTypeInRangeForNumericType<uintptr_t, T>::value &&
+       IsTypeInRangeForNumericType<uintptr_t, U>::value);
+#else
+  static const bool is_supported = true;
+#endif
+  template <typename V>
+  __attribute__((always_inline)) static constexpr bool Do(T x, U y, V* result) {
+    return CheckedMulFastAsmOp<T, U>::is_supported
+               ? CheckedMulFastAsmOp<T, U>::Do(x, y, result)
+               : !__builtin_mul_overflow(x, y, result);
+  }
+};
+
+template <typename T, typename U>
+struct ClampedAddFastOp {
+  static const bool is_supported = ClampedAddFastAsmOp<T, U>::is_supported;
+  template <typename V>
+  __attribute__((always_inline)) static V Do(T x, U y) {
+    return ClampedAddFastAsmOp<T, U>::template Do<V>(x, y);
+  }
+};
+
+template <typename T, typename U>
+struct ClampedSubFastOp {
+  static const bool is_supported = ClampedSubFastAsmOp<T, U>::is_supported;
+  template <typename V>
+  __attribute__((always_inline)) static V Do(T x, U y) {
+    return ClampedSubFastAsmOp<T, U>::template Do<V>(x, y);
+  }
+};
+
+template <typename T, typename U>
+struct ClampedMulFastOp {
+  static const bool is_supported = ClampedMulFastAsmOp<T, U>::is_supported;
+  template <typename V>
+  __attribute__((always_inline)) static V Do(T x, U y) {
+    return ClampedMulFastAsmOp<T, U>::template Do<V>(x, y);
+  }
+};
+
+template <typename T>
+struct ClampedNegFastOp {
+  static const bool is_supported = std::is_signed<T>::value;
+  __attribute__((always_inline)) static T Do(T value) {
+    // Use this when there is no assembler path available.
+    if (!ClampedSubFastAsmOp<T, T>::is_supported) {
+      T result;
+      return !__builtin_sub_overflow(T(0), value, &result)
+                 ? result
+                 : std::numeric_limits<T>::max();
+    }
+
+    // Fallback to the normal subtraction path.
+    return ClampedSubFastOp<T, T>::template Do<T>(T(0), value);
+  }
+};
+
+}  // namespace internal
+}  // namespace base
+
+#endif  // BASE_NUMERICS_SAFE_MATH_CLANG_GCC_IMPL_H_
diff --git a/base/numerics/safe_math_shared_impl.h b/base/numerics/safe_math_shared_impl.h
new file mode 100644
index 0000000..583c487
--- /dev/null
+++ b/base/numerics/safe_math_shared_impl.h
@@ -0,0 +1,237 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_NUMERICS_SAFE_MATH_SHARED_IMPL_H_
+#define BASE_NUMERICS_SAFE_MATH_SHARED_IMPL_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <cassert>
+#include <climits>
+#include <cmath>
+#include <cstdlib>
+#include <limits>
+#include <type_traits>
+
+#include "base/numerics/safe_conversions.h"
+
+// Where available use builtin math overflow support on Clang and GCC.
+#if !defined(__native_client__) &&                         \
+    ((defined(__clang__) &&                                \
+      ((__clang_major__ > 3) ||                            \
+       (__clang_major__ == 3 && __clang_minor__ >= 4))) || \
+     (defined(__GNUC__) && __GNUC__ >= 5))
+#include "base/numerics/safe_math_clang_gcc_impl.h"
+#define BASE_HAS_OPTIMIZED_SAFE_MATH (1)
+#else
+#define BASE_HAS_OPTIMIZED_SAFE_MATH (0)
+#endif
+
+namespace base {
+namespace internal {
+
+// These are the non-functioning boilerplate implementations of the optimized
+// safe math routines.
+#if !BASE_HAS_OPTIMIZED_SAFE_MATH
+template <typename T, typename U>
+struct CheckedAddFastOp {
+  static const bool is_supported = false;
+  template <typename V>
+  static constexpr bool Do(T, U, V*) {
+    // Force a compile failure if instantiated.
+    return CheckOnFailure::template HandleFailure<bool>();
+  }
+};
+
+template <typename T, typename U>
+struct CheckedSubFastOp {
+  static const bool is_supported = false;
+  template <typename V>
+  static constexpr bool Do(T, U, V*) {
+    // Force a compile failure if instantiated.
+    return CheckOnFailure::template HandleFailure<bool>();
+  }
+};
+
+template <typename T, typename U>
+struct CheckedMulFastOp {
+  static const bool is_supported = false;
+  template <typename V>
+  static constexpr bool Do(T, U, V*) {
+    // Force a compile failure if instantiated.
+    return CheckOnFailure::template HandleFailure<bool>();
+  }
+};
+
+template <typename T, typename U>
+struct ClampedAddFastOp {
+  static const bool is_supported = false;
+  template <typename V>
+  static constexpr V Do(T, U) {
+    // Force a compile failure if instantiated.
+    return CheckOnFailure::template HandleFailure<V>();
+  }
+};
+
+template <typename T, typename U>
+struct ClampedSubFastOp {
+  static const bool is_supported = false;
+  template <typename V>
+  static constexpr V Do(T, U) {
+    // Force a compile failure if instantiated.
+    return CheckOnFailure::template HandleFailure<V>();
+  }
+};
+
+template <typename T, typename U>
+struct ClampedMulFastOp {
+  static const bool is_supported = false;
+  template <typename V>
+  static constexpr V Do(T, U) {
+    // Force a compile failure if instantiated.
+    return CheckOnFailure::template HandleFailure<V>();
+  }
+};
+
+template <typename T>
+struct ClampedNegFastOp {
+  static const bool is_supported = false;
+  static constexpr T Do(T) {
+    // Force a compile failure if instantiated.
+    return CheckOnFailure::template HandleFailure<T>();
+  }
+};
+#endif  // BASE_HAS_OPTIMIZED_SAFE_MATH
+#undef BASE_HAS_OPTIMIZED_SAFE_MATH
+
+// This is used for UnsignedAbs, where we need to support floating-point
+// template instantiations even though we don't actually support the operations.
+// However, there is no corresponding implementation of e.g. SafeUnsignedAbs,
+// so the float versions will not compile.
+template <typename Numeric,
+          bool IsInteger = std::is_integral<Numeric>::value,
+          bool IsFloat = std::is_floating_point<Numeric>::value>
+struct UnsignedOrFloatForSize;
+
+template <typename Numeric>
+struct UnsignedOrFloatForSize<Numeric, true, false> {
+  using type = typename std::make_unsigned<Numeric>::type;
+};
+
+template <typename Numeric>
+struct UnsignedOrFloatForSize<Numeric, false, true> {
+  using type = Numeric;
+};
+
+// Wrap the unary operations to allow SFINAE when instantiating integrals versus
+// floating points. These don't perform any overflow checking. Rather, they
+// exhibit well-defined overflow semantics and rely on the caller to detect
+// if an overflow occured.
+
+template <typename T,
+          typename std::enable_if<std::is_integral<T>::value>::type* = nullptr>
+constexpr T NegateWrapper(T value) {
+  using UnsignedT = typename std::make_unsigned<T>::type;
+  // This will compile to a NEG on Intel, and is normal negation on ARM.
+  return static_cast<T>(UnsignedT(0) - static_cast<UnsignedT>(value));
+}
+
+template <
+    typename T,
+    typename std::enable_if<std::is_floating_point<T>::value>::type* = nullptr>
+constexpr T NegateWrapper(T value) {
+  return -value;
+}
+
+template <typename T,
+          typename std::enable_if<std::is_integral<T>::value>::type* = nullptr>
+constexpr typename std::make_unsigned<T>::type InvertWrapper(T value) {
+  return ~value;
+}
+
+template <typename T,
+          typename std::enable_if<std::is_integral<T>::value>::type* = nullptr>
+constexpr T AbsWrapper(T value) {
+  return static_cast<T>(SafeUnsignedAbs(value));
+}
+
+template <
+    typename T,
+    typename std::enable_if<std::is_floating_point<T>::value>::type* = nullptr>
+constexpr T AbsWrapper(T value) {
+  return value < 0 ? -value : value;
+}
+
+template <template <typename, typename, typename> class M,
+          typename L,
+          typename R>
+struct MathWrapper {
+  using math = M<typename UnderlyingType<L>::type,
+                 typename UnderlyingType<R>::type,
+                 void>;
+  using type = typename math::result_type;
+};
+
+// These variadic templates work out the return types.
+// TODO(jschuh): Rip all this out once we have C++14 non-trailing auto support.
+template <template <typename, typename, typename> class M,
+          typename L,
+          typename R,
+          typename... Args>
+struct ResultType;
+
+template <template <typename, typename, typename> class M,
+          typename L,
+          typename R>
+struct ResultType<M, L, R> {
+  using type = typename MathWrapper<M, L, R>::type;
+};
+
+template <template <typename, typename, typename> class M,
+          typename L,
+          typename R,
+          typename... Args>
+struct ResultType {
+  using type =
+      typename ResultType<M, typename ResultType<M, L, R>::type, Args...>::type;
+};
+
+// The following macros are just boilerplate for the standard arithmetic
+// operator overloads and variadic function templates. A macro isn't the nicest
+// solution, but it beats rewriting these over and over again.
+#define BASE_NUMERIC_ARITHMETIC_VARIADIC(CLASS, CL_ABBR, OP_NAME)       \
+  template <typename L, typename R, typename... Args>                   \
+  constexpr CLASS##Numeric<                                             \
+      typename ResultType<CLASS##OP_NAME##Op, L, R, Args...>::type>     \
+      CL_ABBR##OP_NAME(const L lhs, const R rhs, const Args... args) {  \
+    return CL_ABBR##MathOp<CLASS##OP_NAME##Op, L, R, Args...>(lhs, rhs, \
+                                                              args...); \
+  }
+
+#define BASE_NUMERIC_ARITHMETIC_OPERATORS(CLASS, CL_ABBR, OP_NAME, OP, CMP_OP) \
+  /* Binary arithmetic operator for all CLASS##Numeric operations. */          \
+  template <typename L, typename R,                                            \
+            typename std::enable_if<Is##CLASS##Op<L, R>::value>::type* =       \
+                nullptr>                                                       \
+  constexpr CLASS##Numeric<                                                    \
+      typename MathWrapper<CLASS##OP_NAME##Op, L, R>::type>                    \
+  operator OP(const L lhs, const R rhs) {                                      \
+    return decltype(lhs OP rhs)::template MathOp<CLASS##OP_NAME##Op>(lhs,      \
+                                                                     rhs);     \
+  }                                                                            \
+  /* Assignment arithmetic operator implementation from CLASS##Numeric. */     \
+  template <typename L>                                                        \
+  template <typename R>                                                        \
+  constexpr CLASS##Numeric<L>& CLASS##Numeric<L>::operator CMP_OP(             \
+      const R rhs) {                                                           \
+    return MathOp<CLASS##OP_NAME##Op>(rhs);                                    \
+  }                                                                            \
+  /* Variadic arithmetic functions that return CLASS##Numeric. */              \
+  BASE_NUMERIC_ARITHMETIC_VARIADIC(CLASS, CL_ABBR, OP_NAME)
+
+}  // namespace internal
+}  // namespace base
+
+#endif  // BASE_NUMERICS_SAFE_MATH_SHARED_IMPL_H_
diff --git a/base/observer_list.h b/base/observer_list.h
new file mode 100644
index 0000000..e900e43
--- /dev/null
+++ b/base/observer_list.h
@@ -0,0 +1,307 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_OBSERVER_LIST_H_
+#define BASE_OBSERVER_LIST_H_
+
+#include <stddef.h>
+
+#include <algorithm>
+#include <iterator>
+#include <limits>
+#include <utility>
+#include <vector>
+
+#include "base/gtest_prod_util.h"
+#include "base/logging.h"
+#include "base/macros.h"
+#include "base/memory/weak_ptr.h"
+#include "base/stl_util.h"
+
+///////////////////////////////////////////////////////////////////////////////
+//
+// OVERVIEW:
+//
+//   A list of observers. Unlike a standard vector or list, this container can
+//   be modified during iteration without invalidating the iterator. So, it
+//   safely handles the case of an observer removing itself or other observers
+//   from the list while observers are being notified.
+//
+//
+// WARNING:
+//
+//   ObserverList is not thread-compatible. Iterating on the same ObserverList
+//   simultaneously in different threads is not safe, even when the ObserverList
+//   itself is not modified.
+//
+//   For a thread-safe observer list, see ObserverListThreadSafe.
+//
+//
+// TYPICAL USAGE:
+//
+//   class MyWidget {
+//    public:
+//     ...
+//
+//     class Observer {
+//      public:
+//       virtual void OnFoo(MyWidget* w) = 0;
+//       virtual void OnBar(MyWidget* w, int x, int y) = 0;
+//     };
+//
+//     void AddObserver(Observer* obs) {
+//       observers_.AddObserver(obs);
+//     }
+//
+//     void RemoveObserver(const Observer* obs) {
+//       observers_.RemoveObserver(obs);
+//     }
+//
+//     void NotifyFoo() {
+//       for (Observer& obs : observers_)
+//         obs.OnFoo(this);
+//     }
+//
+//     void NotifyBar(int x, int y) {
+//       for (Observer& obs : observers_)
+//         obs.OnBar(this, x, y);
+//     }
+//
+//    private:
+//     base::ObserverList<Observer> observers_;
+//   };
+//
+//
+///////////////////////////////////////////////////////////////////////////////
+
+namespace base {
+
+// Enumeration of which observers are notified by ObserverList.
+enum class ObserverListPolicy {
+  // Specifies that any observers added during notification are notified.
+  // This is the default policy if no policy is provided to the constructor.
+  ALL,
+
+  // Specifies that observers added while sending out notification are not
+  // notified.
+  EXISTING_ONLY,
+};
+
+// When check_empty is true, assert that the list is empty on destruction.
+// When allow_reentrancy is false, iterating throught the list while already in
+// the iteration loop will result in DCHECK failure.
+// TODO(oshima): Change the default to non reentrant. https://crbug.com/812109
+template <class ObserverType,
+          bool check_empty = false,
+          bool allow_reentrancy = true>
+class ObserverList
+    : public SupportsWeakPtr<
+          ObserverList<ObserverType, check_empty, allow_reentrancy>> {
+ public:
+  // An iterator class that can be used to access the list of observers.
+  class Iter {
+   public:
+    using iterator_category = std::forward_iterator_tag;
+    using value_type = ObserverType;
+    using difference_type = ptrdiff_t;
+    using pointer = ObserverType*;
+    using reference = ObserverType&;
+
+    Iter() : index_(0), max_index_(0) {}
+
+    explicit Iter(const ObserverList* list)
+        : list_(const_cast<ObserverList*>(list)->AsWeakPtr()),
+          index_(0),
+          max_index_(list->policy_ == ObserverListPolicy::ALL
+                         ? std::numeric_limits<size_t>::max()
+                         : list->observers_.size()) {
+      DCHECK(list_);
+      DCHECK(allow_reentrancy || !list_->live_iterator_count_);
+      EnsureValidIndex();
+      ++list_->live_iterator_count_;
+    }
+
+    ~Iter() {
+      if (!list_)
+        return;
+
+      DCHECK_GT(list_->live_iterator_count_, 0);
+      if (--list_->live_iterator_count_ == 0)
+        list_->Compact();
+    }
+
+    Iter(const Iter& other)
+        : list_(other.list_),
+          index_(other.index_),
+          max_index_(other.max_index_) {
+      if (list_)
+        ++list_->live_iterator_count_;
+    }
+
+    Iter& operator=(Iter other) {
+      using std::swap;
+      swap(list_, other.list_);
+      swap(index_, other.index_);
+      swap(max_index_, other.max_index_);
+      return *this;
+    }
+
+    bool operator==(const Iter& other) const {
+      return (is_end() && other.is_end()) ||
+             (list_.get() == other.list_.get() && index_ == other.index_);
+    }
+
+    bool operator!=(const Iter& other) const { return !(*this == other); }
+
+    Iter& operator++() {
+      if (list_) {
+        ++index_;
+        EnsureValidIndex();
+      }
+      return *this;
+    }
+
+    Iter operator++(int) {
+      Iter it(*this);
+      ++(*this);
+      return it;
+    }
+
+    ObserverType* operator->() const {
+      ObserverType* const current = GetCurrent();
+      DCHECK(current);
+      return current;
+    }
+
+    ObserverType& operator*() const {
+      ObserverType* const current = GetCurrent();
+      DCHECK(current);
+      return *current;
+    }
+
+   private:
+    FRIEND_TEST_ALL_PREFIXES(ObserverListTest, BasicStdIterator);
+    FRIEND_TEST_ALL_PREFIXES(ObserverListTest, StdIteratorRemoveFront);
+
+    ObserverType* GetCurrent() const {
+      DCHECK(list_);
+      DCHECK_LT(index_, clamped_max_index());
+      return list_->observers_[index_];
+    }
+
+    void EnsureValidIndex() {
+      DCHECK(list_);
+      const size_t max_index = clamped_max_index();
+      while (index_ < max_index && !list_->observers_[index_])
+        ++index_;
+    }
+
+    size_t clamped_max_index() const {
+      return std::min(max_index_, list_->observers_.size());
+    }
+
+    bool is_end() const { return !list_ || index_ == clamped_max_index(); }
+
+    WeakPtr<ObserverList> list_;
+
+    // When initially constructed and each time the iterator is incremented,
+    // |index_| is guaranteed to point to a non-null index if the iterator
+    // has not reached the end of the ObserverList.
+    size_t index_;
+    size_t max_index_;
+  };
+
+  using iterator = Iter;
+  using const_iterator = Iter;
+
+  const_iterator begin() const {
+    // An optimization: do not involve weak pointers for empty list.
+    return observers_.empty() ? const_iterator() : const_iterator(this);
+  }
+
+  const_iterator end() const { return const_iterator(); }
+
+  ObserverList() = default;
+  explicit ObserverList(ObserverListPolicy policy) : policy_(policy) {}
+
+  ~ObserverList() {
+    if (check_empty) {
+      Compact();
+      DCHECK(observers_.empty());
+    }
+  }
+
+  // Add an observer to this list. An observer should not be added to the same
+  // list more than once.
+  //
+  // Precondition: obs != nullptr
+  // Precondition: !HasObserver(obs)
+  void AddObserver(ObserverType* obs) {
+    DCHECK(obs);
+    if (HasObserver(obs)) {
+      NOTREACHED() << "Observers can only be added once!";
+      return;
+    }
+    observers_.push_back(obs);
+  }
+
+  // Removes the given observer from this list. Does nothing if this observer is
+  // not in this list.
+  void RemoveObserver(const ObserverType* obs) {
+    DCHECK(obs);
+    const auto it = std::find(observers_.begin(), observers_.end(), obs);
+    if (it == observers_.end())
+      return;
+
+    DCHECK_GE(live_iterator_count_, 0);
+    if (live_iterator_count_) {
+      *it = nullptr;
+    } else {
+      observers_.erase(it);
+    }
+  }
+
+  // Determine whether a particular observer is in the list.
+  bool HasObserver(const ObserverType* obs) const {
+    return ContainsValue(observers_, obs);
+  }
+
+  // Removes all the observers from this list.
+  void Clear() {
+    DCHECK_GE(live_iterator_count_, 0);
+    if (live_iterator_count_) {
+      std::fill(observers_.begin(), observers_.end(), nullptr);
+    } else {
+      observers_.clear();
+    }
+  }
+
+  bool might_have_observers() const { return !observers_.empty(); }
+
+ private:
+  // Compacts list of observers by removing null pointers.
+  void Compact() {
+    observers_.erase(std::remove(observers_.begin(), observers_.end(), nullptr),
+                     observers_.end());
+  }
+
+  std::vector<ObserverType*> observers_;
+
+  // Number of active iterators referencing this ObserverList.
+  //
+  // This counter is not synchronized although it is modified by const
+  // iterators.
+  int live_iterator_count_ = 0;
+
+  const ObserverListPolicy policy_ = ObserverListPolicy::ALL;
+
+  DISALLOW_COPY_AND_ASSIGN(ObserverList);
+};
+
+template <class ObserverType, bool check_empty = false>
+using ReentrantObserverList = ObserverList<ObserverType, check_empty, true>;
+
+}  // namespace base
+
+#endif  // BASE_OBSERVER_LIST_H_
diff --git a/base/observer_list_threadsafe.cc b/base/observer_list_threadsafe.cc
new file mode 100644
index 0000000..95c852f
--- /dev/null
+++ b/base/observer_list_threadsafe.cc
@@ -0,0 +1,16 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/observer_list_threadsafe.h"
+
+namespace base {
+namespace internal {
+
+LazyInstance<ThreadLocalPointer<
+    const ObserverListThreadSafeBase::NotificationDataBase>>::Leaky
+    ObserverListThreadSafeBase::tls_current_notification_ =
+        LAZY_INSTANCE_INITIALIZER;
+
+}  // namespace internal
+}  // namespace base
diff --git a/base/observer_list_threadsafe.h b/base/observer_list_threadsafe.h
new file mode 100644
index 0000000..bd349f3
--- /dev/null
+++ b/base/observer_list_threadsafe.h
@@ -0,0 +1,237 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_OBSERVER_LIST_THREADSAFE_H_
+#define BASE_OBSERVER_LIST_THREADSAFE_H_
+
+#include <unordered_map>
+
+#include "base/base_export.h"
+#include "base/bind.h"
+#include "base/lazy_instance.h"
+#include "base/location.h"
+#include "base/logging.h"
+#include "base/macros.h"
+#include "base/memory/ref_counted.h"
+#include "base/observer_list.h"
+#include "base/sequenced_task_runner.h"
+#include "base/stl_util.h"
+#include "base/synchronization/lock.h"
+#include "base/threading/sequenced_task_runner_handle.h"
+#include "base/threading/thread_local.h"
+#include "build/build_config.h"
+
+// TODO(fdoray): Removing these includes causes IWYU failures in other headers,
+// remove them in a follow- up CL.
+#include "base/memory/ptr_util.h"
+#include "base/single_thread_task_runner.h"
+#include "base/threading/thread_task_runner_handle.h"
+
+///////////////////////////////////////////////////////////////////////////////
+//
+// OVERVIEW:
+//
+//   A thread-safe container for a list of observers. This is similar to the
+//   observer_list (see observer_list.h), but it is more robust for multi-
+//   threaded situations.
+//
+//   The following use cases are supported:
+//    * Observers can register for notifications from any sequence. They are
+//      always notified on the sequence from which they were registered.
+//    * Any sequence may trigger a notification via Notify().
+//    * Observers can remove themselves from the observer list inside of a
+//      callback.
+//    * If one sequence is notifying observers concurrently with an observer
+//      removing itself from the observer list, the notifications will be
+//      silently dropped.
+//
+//   The drawback of the threadsafe observer list is that notifications are not
+//   as real-time as the non-threadsafe version of this class. Notifications
+//   will always be done via PostTask() to another sequence, whereas with the
+//   non-thread-safe observer_list, notifications happen synchronously.
+//
+///////////////////////////////////////////////////////////////////////////////
+
+namespace base {
+namespace internal {
+
+class BASE_EXPORT ObserverListThreadSafeBase
+    : public RefCountedThreadSafe<ObserverListThreadSafeBase> {
+ public:
+  ObserverListThreadSafeBase() = default;
+
+ protected:
+  template <typename ObserverType, typename Method>
+  struct Dispatcher;
+
+  template <typename ObserverType, typename ReceiverType, typename... Params>
+  struct Dispatcher<ObserverType, void (ReceiverType::*)(Params...)> {
+    static void Run(void (ReceiverType::*m)(Params...),
+                    Params... params,
+                    ObserverType* obj) {
+      (obj->*m)(std::forward<Params>(params)...);
+    }
+  };
+
+  struct NotificationDataBase {
+    NotificationDataBase(void* observer_list_in, const Location& from_here_in)
+        : observer_list(observer_list_in), from_here(from_here_in) {}
+
+    void* observer_list;
+    Location from_here;
+  };
+
+  virtual ~ObserverListThreadSafeBase() = default;
+
+  static LazyInstance<ThreadLocalPointer<const NotificationDataBase>>::Leaky
+      tls_current_notification_;
+
+ private:
+  friend class RefCountedThreadSafe<ObserverListThreadSafeBase>;
+
+  DISALLOW_COPY_AND_ASSIGN(ObserverListThreadSafeBase);
+};
+
+}  // namespace internal
+
+template <class ObserverType>
+class ObserverListThreadSafe : public internal::ObserverListThreadSafeBase {
+ public:
+  ObserverListThreadSafe() = default;
+  explicit ObserverListThreadSafe(ObserverListPolicy policy)
+      : policy_(policy) {}
+
+  // Adds |observer| to the list. |observer| must not already be in the list.
+  void AddObserver(ObserverType* observer) {
+    // TODO(fdoray): Change this to a DCHECK once all call sites have a
+    // SequencedTaskRunnerHandle.
+    if (!SequencedTaskRunnerHandle::IsSet())
+      return;
+
+    AutoLock auto_lock(lock_);
+
+    // Add |observer| to the list of observers.
+    DCHECK(!ContainsKey(observers_, observer));
+    const scoped_refptr<SequencedTaskRunner> task_runner =
+        SequencedTaskRunnerHandle::Get();
+    observers_[observer] = task_runner;
+
+    // If this is called while a notification is being dispatched on this thread
+    // and |policy_| is ALL, |observer| must be notified (if a notification is
+    // being dispatched on another thread in parallel, the notification may or
+    // may not make it to |observer| depending on the outcome of the race to
+    // |lock_|).
+    if (policy_ == ObserverListPolicy::ALL) {
+      const NotificationDataBase* current_notification =
+          tls_current_notification_.Get().Get();
+      if (current_notification && current_notification->observer_list == this) {
+        task_runner->PostTask(
+            current_notification->from_here,
+            BindOnce(
+                &ObserverListThreadSafe<ObserverType>::NotifyWrapper, this,
+                observer,
+                *static_cast<const NotificationData*>(current_notification)));
+      }
+    }
+  }
+
+  // Remove an observer from the list if it is in the list.
+  //
+  // If a notification was sent to the observer but hasn't started to run yet,
+  // it will be aborted. If a notification has started to run, removing the
+  // observer won't stop it.
+  void RemoveObserver(ObserverType* observer) {
+    AutoLock auto_lock(lock_);
+    observers_.erase(observer);
+  }
+
+  // Verifies that the list is currently empty (i.e. there are no observers).
+  void AssertEmpty() const {
+#if DCHECK_IS_ON()
+    AutoLock auto_lock(lock_);
+    DCHECK(observers_.empty());
+#endif
+  }
+
+  // Asynchronously invokes a callback on all observers, on their registration
+  // sequence. You cannot assume that at the completion of the Notify call that
+  // all Observers have been Notified. The notification may still be pending
+  // delivery.
+  template <typename Method, typename... Params>
+  void Notify(const Location& from_here, Method m, Params&&... params) {
+    Callback<void(ObserverType*)> method =
+        Bind(&Dispatcher<ObserverType, Method>::Run, m,
+             std::forward<Params>(params)...);
+
+    AutoLock lock(lock_);
+    for (const auto& observer : observers_) {
+      observer.second->PostTask(
+          from_here,
+          BindOnce(&ObserverListThreadSafe<ObserverType>::NotifyWrapper, this,
+                   observer.first, NotificationData(this, from_here, method)));
+    }
+  }
+
+ private:
+  friend class RefCountedThreadSafe<ObserverListThreadSafeBase>;
+
+  struct NotificationData : public NotificationDataBase {
+    NotificationData(ObserverListThreadSafe* observer_list_in,
+                     const Location& from_here_in,
+                     const Callback<void(ObserverType*)>& method_in)
+        : NotificationDataBase(observer_list_in, from_here_in),
+          method(method_in) {}
+
+    Callback<void(ObserverType*)> method;
+  };
+
+  ~ObserverListThreadSafe() override = default;
+
+  void NotifyWrapper(ObserverType* observer,
+                     const NotificationData& notification) {
+    {
+      AutoLock auto_lock(lock_);
+
+      // Check whether the observer still needs a notification.
+      auto it = observers_.find(observer);
+      if (it == observers_.end())
+        return;
+      DCHECK(it->second->RunsTasksInCurrentSequence());
+    }
+
+    // Keep track of the notification being dispatched on the current thread.
+    // This will be used if the callback below calls AddObserver().
+    //
+    // Note: |tls_current_notification_| may not be nullptr if this runs in a
+    // nested loop started by a notification callback. In that case, it is
+    // important to save the previous value to restore it later.
+    auto& tls_current_notification = tls_current_notification_.Get();
+    const NotificationDataBase* const previous_notification =
+        tls_current_notification.Get();
+    tls_current_notification.Set(&notification);
+
+    // Invoke the callback.
+    notification.method.Run(observer);
+
+    // Reset the notification being dispatched on the current thread to its
+    // previous value.
+    tls_current_notification.Set(previous_notification);
+  }
+
+  const ObserverListPolicy policy_ = ObserverListPolicy::ALL;
+
+  // Synchronizes access to |observers_|.
+  mutable Lock lock_;
+
+  // Keys are observers. Values are the SequencedTaskRunners on which they must
+  // be notified.
+  std::unordered_map<ObserverType*, scoped_refptr<SequencedTaskRunner>>
+      observers_;
+
+  DISALLOW_COPY_AND_ASSIGN(ObserverListThreadSafe);
+};
+
+}  // namespace base
+
+#endif  // BASE_OBSERVER_LIST_THREADSAFE_H_
diff --git a/base/observer_list_unittest.cc b/base/observer_list_unittest.cc
new file mode 100644
index 0000000..37629ef
--- /dev/null
+++ b/base/observer_list_unittest.cc
@@ -0,0 +1,1276 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/observer_list.h"
+#include "base/observer_list_threadsafe.h"
+
+#include <memory>
+#include <utility>
+#include <vector>
+
+#include "base/bind.h"
+#include "base/compiler_specific.h"
+#include "base/location.h"
+#include "base/memory/weak_ptr.h"
+#include "base/message_loop/message_loop.h"
+#include "base/run_loop.h"
+#include "base/sequenced_task_runner.h"
+#include "base/single_thread_task_runner.h"
+#include "base/synchronization/waitable_event.h"
+#include "base/task_scheduler/post_task.h"
+#include "base/task_scheduler/task_scheduler.h"
+#include "base/test/gtest_util.h"
+#include "base/test/scoped_task_environment.h"
+#include "base/threading/platform_thread.h"
+#include "base/threading/thread_restrictions.h"
+#include "testing/gmock/include/gmock/gmock.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+namespace {
+
+class Foo {
+ public:
+  virtual void Observe(int x) = 0;
+  virtual ~Foo() = default;
+  virtual int GetValue() const { return 0; }
+};
+
+class Adder : public Foo {
+ public:
+  explicit Adder(int scaler) : total(0), scaler_(scaler) {}
+  ~Adder() override = default;
+
+  void Observe(int x) override { total += x * scaler_; }
+  int GetValue() const override { return total; }
+
+  int total;
+
+ private:
+  int scaler_;
+};
+
+class Disrupter : public Foo {
+ public:
+  Disrupter(ObserverList<Foo>* list, Foo* doomed, bool remove_self)
+      : list_(list), doomed_(doomed), remove_self_(remove_self) {}
+  Disrupter(ObserverList<Foo>* list, Foo* doomed)
+      : Disrupter(list, doomed, false) {}
+  Disrupter(ObserverList<Foo>* list, bool remove_self)
+      : Disrupter(list, nullptr, remove_self) {}
+
+  ~Disrupter() override = default;
+
+  void Observe(int x) override {
+    if (remove_self_)
+      list_->RemoveObserver(this);
+    if (doomed_)
+      list_->RemoveObserver(doomed_);
+  }
+
+  void SetDoomed(Foo* doomed) { doomed_ = doomed; }
+
+ private:
+  ObserverList<Foo>* list_;
+  Foo* doomed_;
+  bool remove_self_;
+};
+
+template <typename ObserverListType>
+class AddInObserve : public Foo {
+ public:
+  explicit AddInObserve(ObserverListType* observer_list)
+      : observer_list(observer_list), to_add_() {}
+
+  void SetToAdd(Foo* to_add) { to_add_ = to_add; }
+
+  void Observe(int x) override {
+    if (to_add_) {
+      observer_list->AddObserver(to_add_);
+      to_add_ = nullptr;
+    }
+  }
+
+  ObserverListType* observer_list;
+  Foo* to_add_;
+};
+
+
+static const int kThreadRunTime = 2000;  // ms to run the multi-threaded test.
+
+// A thread for use in the ThreadSafeObserver test
+// which will add and remove itself from the notification
+// list repeatedly.
+class AddRemoveThread : public PlatformThread::Delegate,
+                        public Foo {
+ public:
+  AddRemoveThread(ObserverListThreadSafe<Foo>* list,
+                  bool notify,
+                  WaitableEvent* ready)
+      : list_(list),
+        loop_(nullptr),
+        in_list_(false),
+        start_(Time::Now()),
+        count_observes_(0),
+        count_addtask_(0),
+        do_notifies_(notify),
+        ready_(ready),
+        weak_factory_(this) {}
+
+  ~AddRemoveThread() override = default;
+
+  void ThreadMain() override {
+    loop_ = new MessageLoop();  // Fire up a message loop.
+    loop_->task_runner()->PostTask(
+        FROM_HERE,
+        base::BindOnce(&AddRemoveThread::AddTask, weak_factory_.GetWeakPtr()));
+    ready_->Signal();
+    // After ready_ is signaled, loop_ is only accessed by the main test thread
+    // (i.e. not this thread) in particular by Quit() which causes Run() to
+    // return, and we "control" loop_ again.
+    RunLoop().Run();
+    delete loop_;
+    loop_ = reinterpret_cast<MessageLoop*>(0xdeadbeef);
+    delete this;
+  }
+
+  // This task just keeps posting to itself in an attempt
+  // to race with the notifier.
+  void AddTask() {
+    count_addtask_++;
+
+    if ((Time::Now() - start_).InMilliseconds() > kThreadRunTime) {
+      VLOG(1) << "DONE!";
+      return;
+    }
+
+    if (!in_list_) {
+      list_->AddObserver(this);
+      in_list_ = true;
+    }
+
+    if (do_notifies_) {
+      list_->Notify(FROM_HERE, &Foo::Observe, 10);
+    }
+
+    loop_->task_runner()->PostTask(
+        FROM_HERE,
+        base::BindOnce(&AddRemoveThread::AddTask, weak_factory_.GetWeakPtr()));
+  }
+
+  // This function is only callable from the main thread.
+  void Quit() {
+    loop_->task_runner()->PostTask(
+        FROM_HERE, RunLoop::QuitCurrentWhenIdleClosureDeprecated());
+  }
+
+  void Observe(int x) override {
+    count_observes_++;
+
+    // If we're getting called after we removed ourselves from
+    // the list, that is very bad!
+    DCHECK(in_list_);
+
+    // This callback should fire on the appropriate thread
+    EXPECT_EQ(loop_, MessageLoop::current());
+
+    list_->RemoveObserver(this);
+    in_list_ = false;
+  }
+
+ private:
+  ObserverListThreadSafe<Foo>* list_;
+  MessageLoop* loop_;
+  bool in_list_;        // Are we currently registered for notifications.
+                        // in_list_ is only used on |this| thread.
+  Time start_;          // The time we started the test.
+
+  int count_observes_;  // Number of times we observed.
+  int count_addtask_;   // Number of times thread AddTask was called
+  bool do_notifies_;    // Whether these threads should do notifications.
+  WaitableEvent* ready_;
+
+  base::WeakPtrFactory<AddRemoveThread> weak_factory_;
+};
+
+}  // namespace
+
+TEST(ObserverListTest, BasicTest) {
+  ObserverList<Foo> observer_list;
+  const ObserverList<Foo>& const_observer_list = observer_list;
+
+  {
+    const ObserverList<Foo>::const_iterator it1 = const_observer_list.begin();
+    EXPECT_EQ(it1, const_observer_list.end());
+    // Iterator copy.
+    const ObserverList<Foo>::const_iterator it2 = it1;
+    EXPECT_EQ(it2, it1);
+    // Iterator assignment.
+    ObserverList<Foo>::const_iterator it3;
+    it3 = it2;
+    EXPECT_EQ(it3, it1);
+    EXPECT_EQ(it3, it2);
+    // Self assignment.
+    it3 = *&it3;  // The *& defeats Clang's -Wself-assign warning.
+    EXPECT_EQ(it3, it1);
+    EXPECT_EQ(it3, it2);
+  }
+
+  {
+    const ObserverList<Foo>::iterator it1 = observer_list.begin();
+    EXPECT_EQ(it1, observer_list.end());
+    // Iterator copy.
+    const ObserverList<Foo>::iterator it2 = it1;
+    EXPECT_EQ(it2, it1);
+    // Iterator assignment.
+    ObserverList<Foo>::iterator it3;
+    it3 = it2;
+    EXPECT_EQ(it3, it1);
+    EXPECT_EQ(it3, it2);
+    // Self assignment.
+    it3 = *&it3;  // The *& defeats Clang's -Wself-assign warning.
+    EXPECT_EQ(it3, it1);
+    EXPECT_EQ(it3, it2);
+  }
+
+  Adder a(1), b(-1), c(1), d(-1), e(-1);
+  Disrupter evil(&observer_list, &c);
+
+  observer_list.AddObserver(&a);
+  observer_list.AddObserver(&b);
+
+  EXPECT_TRUE(const_observer_list.HasObserver(&a));
+  EXPECT_FALSE(const_observer_list.HasObserver(&c));
+
+  {
+    const ObserverList<Foo>::const_iterator it1 = const_observer_list.begin();
+    EXPECT_NE(it1, const_observer_list.end());
+    // Iterator copy.
+    const ObserverList<Foo>::const_iterator it2 = it1;
+    EXPECT_EQ(it2, it1);
+    EXPECT_NE(it2, const_observer_list.end());
+    // Iterator assignment.
+    ObserverList<Foo>::const_iterator it3;
+    it3 = it2;
+    EXPECT_EQ(it3, it1);
+    EXPECT_EQ(it3, it2);
+    // Self assignment.
+    it3 = *&it3;  // The *& defeats Clang's -Wself-assign warning.
+    EXPECT_EQ(it3, it1);
+    EXPECT_EQ(it3, it2);
+    // Iterator post increment.
+    ObserverList<Foo>::const_iterator it4 = it3++;
+    EXPECT_EQ(it4, it1);
+    EXPECT_EQ(it4, it2);
+    EXPECT_NE(it4, it3);
+  }
+
+  {
+    const ObserverList<Foo>::iterator it1 = observer_list.begin();
+    EXPECT_NE(it1, observer_list.end());
+    // Iterator copy.
+    const ObserverList<Foo>::iterator it2 = it1;
+    EXPECT_EQ(it2, it1);
+    EXPECT_NE(it2, observer_list.end());
+    // Iterator assignment.
+    ObserverList<Foo>::iterator it3;
+    it3 = it2;
+    EXPECT_EQ(it3, it1);
+    EXPECT_EQ(it3, it2);
+    // Self assignment.
+    it3 = *&it3;  // The *& defeats Clang's -Wself-assign warning.
+    EXPECT_EQ(it3, it1);
+    EXPECT_EQ(it3, it2);
+    // Iterator post increment.
+    ObserverList<Foo>::iterator it4 = it3++;
+    EXPECT_EQ(it4, it1);
+    EXPECT_EQ(it4, it2);
+    EXPECT_NE(it4, it3);
+  }
+
+  for (auto& observer : observer_list)
+    observer.Observe(10);
+
+  observer_list.AddObserver(&evil);
+  observer_list.AddObserver(&c);
+  observer_list.AddObserver(&d);
+
+  // Removing an observer not in the list should do nothing.
+  observer_list.RemoveObserver(&e);
+
+  for (auto& observer : observer_list)
+    observer.Observe(10);
+
+  EXPECT_EQ(20, a.total);
+  EXPECT_EQ(-20, b.total);
+  EXPECT_EQ(0, c.total);
+  EXPECT_EQ(-10, d.total);
+  EXPECT_EQ(0, e.total);
+}
+
+TEST(ObserverListTest, CompactsWhenNoActiveIterator) {
+  ObserverList<const Foo> ol;
+  const ObserverList<const Foo>& col = ol;
+
+  const Adder a(1);
+  const Adder b(2);
+  const Adder c(3);
+
+  ol.AddObserver(&a);
+  ol.AddObserver(&b);
+
+  EXPECT_TRUE(col.HasObserver(&a));
+  EXPECT_FALSE(col.HasObserver(&c));
+
+  EXPECT_TRUE(col.might_have_observers());
+
+  using It = ObserverList<const Foo>::const_iterator;
+
+  {
+    It it = col.begin();
+    EXPECT_NE(it, col.end());
+    It ita = it;
+    EXPECT_EQ(ita, it);
+    EXPECT_NE(++it, col.end());
+    EXPECT_NE(ita, it);
+    It itb = it;
+    EXPECT_EQ(itb, it);
+    EXPECT_EQ(++it, col.end());
+
+    EXPECT_TRUE(col.might_have_observers());
+    EXPECT_EQ(&*ita, &a);
+    EXPECT_EQ(&*itb, &b);
+
+    ol.RemoveObserver(&a);
+    EXPECT_TRUE(col.might_have_observers());
+    EXPECT_FALSE(col.HasObserver(&a));
+    EXPECT_EQ(&*itb, &b);
+
+    ol.RemoveObserver(&b);
+    EXPECT_TRUE(col.might_have_observers());
+    EXPECT_FALSE(col.HasObserver(&a));
+    EXPECT_FALSE(col.HasObserver(&b));
+
+    it = It();
+    ita = It();
+    EXPECT_TRUE(col.might_have_observers());
+    ita = itb;
+    itb = It();
+    EXPECT_TRUE(col.might_have_observers());
+    ita = It();
+    EXPECT_FALSE(col.might_have_observers());
+  }
+
+  ol.AddObserver(&a);
+  ol.AddObserver(&b);
+  EXPECT_TRUE(col.might_have_observers());
+  ol.Clear();
+  EXPECT_FALSE(col.might_have_observers());
+
+  ol.AddObserver(&a);
+  ol.AddObserver(&b);
+  EXPECT_TRUE(col.might_have_observers());
+  {
+    const It it = col.begin();
+    ol.Clear();
+    EXPECT_TRUE(col.might_have_observers());
+  }
+  EXPECT_FALSE(col.might_have_observers());
+}
+
+TEST(ObserverListTest, DisruptSelf) {
+  ObserverList<Foo> observer_list;
+  Adder a(1), b(-1), c(1), d(-1);
+  Disrupter evil(&observer_list, true);
+
+  observer_list.AddObserver(&a);
+  observer_list.AddObserver(&b);
+
+  for (auto& observer : observer_list)
+    observer.Observe(10);
+
+  observer_list.AddObserver(&evil);
+  observer_list.AddObserver(&c);
+  observer_list.AddObserver(&d);
+
+  for (auto& observer : observer_list)
+    observer.Observe(10);
+
+  EXPECT_EQ(20, a.total);
+  EXPECT_EQ(-20, b.total);
+  EXPECT_EQ(10, c.total);
+  EXPECT_EQ(-10, d.total);
+}
+
+TEST(ObserverListTest, DisruptBefore) {
+  ObserverList<Foo> observer_list;
+  Adder a(1), b(-1), c(1), d(-1);
+  Disrupter evil(&observer_list, &b);
+
+  observer_list.AddObserver(&a);
+  observer_list.AddObserver(&b);
+  observer_list.AddObserver(&evil);
+  observer_list.AddObserver(&c);
+  observer_list.AddObserver(&d);
+
+  for (auto& observer : observer_list)
+    observer.Observe(10);
+  for (auto& observer : observer_list)
+    observer.Observe(10);
+
+  EXPECT_EQ(20, a.total);
+  EXPECT_EQ(-10, b.total);
+  EXPECT_EQ(20, c.total);
+  EXPECT_EQ(-20, d.total);
+}
+
+TEST(ObserverListThreadSafeTest, BasicTest) {
+  MessageLoop loop;
+
+  scoped_refptr<ObserverListThreadSafe<Foo> > observer_list(
+      new ObserverListThreadSafe<Foo>);
+  Adder a(1);
+  Adder b(-1);
+  Adder c(1);
+  Adder d(-1);
+
+  observer_list->AddObserver(&a);
+  observer_list->AddObserver(&b);
+
+  observer_list->Notify(FROM_HERE, &Foo::Observe, 10);
+  RunLoop().RunUntilIdle();
+
+  observer_list->AddObserver(&c);
+  observer_list->AddObserver(&d);
+
+  observer_list->Notify(FROM_HERE, &Foo::Observe, 10);
+  observer_list->RemoveObserver(&c);
+  RunLoop().RunUntilIdle();
+
+  EXPECT_EQ(20, a.total);
+  EXPECT_EQ(-20, b.total);
+  EXPECT_EQ(0, c.total);
+  EXPECT_EQ(-10, d.total);
+}
+
+TEST(ObserverListThreadSafeTest, RemoveObserver) {
+  MessageLoop loop;
+
+  scoped_refptr<ObserverListThreadSafe<Foo> > observer_list(
+      new ObserverListThreadSafe<Foo>);
+  Adder a(1), b(1);
+
+  // A workaround for the compiler bug. See http://crbug.com/121960.
+  EXPECT_NE(&a, &b);
+
+  // Should do nothing.
+  observer_list->RemoveObserver(&a);
+  observer_list->RemoveObserver(&b);
+
+  observer_list->Notify(FROM_HERE, &Foo::Observe, 10);
+  RunLoop().RunUntilIdle();
+
+  EXPECT_EQ(0, a.total);
+  EXPECT_EQ(0, b.total);
+
+  observer_list->AddObserver(&a);
+
+  // Should also do nothing.
+  observer_list->RemoveObserver(&b);
+
+  observer_list->Notify(FROM_HERE, &Foo::Observe, 10);
+  RunLoop().RunUntilIdle();
+
+  EXPECT_EQ(10, a.total);
+  EXPECT_EQ(0, b.total);
+}
+
+TEST(ObserverListThreadSafeTest, WithoutSequence) {
+  scoped_refptr<ObserverListThreadSafe<Foo> > observer_list(
+      new ObserverListThreadSafe<Foo>);
+
+  Adder a(1), b(1), c(1);
+
+  // No sequence, so these should not be added.
+  observer_list->AddObserver(&a);
+  observer_list->AddObserver(&b);
+
+  {
+    // Add c when there's a sequence.
+    MessageLoop loop;
+    observer_list->AddObserver(&c);
+
+    observer_list->Notify(FROM_HERE, &Foo::Observe, 10);
+    RunLoop().RunUntilIdle();
+
+    EXPECT_EQ(0, a.total);
+    EXPECT_EQ(0, b.total);
+    EXPECT_EQ(10, c.total);
+
+    // Now add a when there's a sequence.
+    observer_list->AddObserver(&a);
+
+    // Remove c when there's a sequence.
+    observer_list->RemoveObserver(&c);
+
+    // Notify again.
+    observer_list->Notify(FROM_HERE, &Foo::Observe, 20);
+    RunLoop().RunUntilIdle();
+
+    EXPECT_EQ(20, a.total);
+    EXPECT_EQ(0, b.total);
+    EXPECT_EQ(10, c.total);
+  }
+
+  // Removing should always succeed with or without a sequence.
+  observer_list->RemoveObserver(&a);
+
+  // Notifying should not fail but should also be a no-op.
+  MessageLoop loop;
+  observer_list->AddObserver(&b);
+  observer_list->Notify(FROM_HERE, &Foo::Observe, 30);
+  RunLoop().RunUntilIdle();
+
+  EXPECT_EQ(20, a.total);
+  EXPECT_EQ(30, b.total);
+  EXPECT_EQ(10, c.total);
+}
+
+class FooRemover : public Foo {
+ public:
+  explicit FooRemover(ObserverListThreadSafe<Foo>* list) : list_(list) {}
+  ~FooRemover() override = default;
+
+  void AddFooToRemove(Foo* foo) {
+    foos_.push_back(foo);
+  }
+
+  void Observe(int x) override {
+    std::vector<Foo*> tmp;
+    tmp.swap(foos_);
+    for (std::vector<Foo*>::iterator it = tmp.begin();
+         it != tmp.end(); ++it) {
+      list_->RemoveObserver(*it);
+    }
+  }
+
+ private:
+  const scoped_refptr<ObserverListThreadSafe<Foo> > list_;
+  std::vector<Foo*> foos_;
+};
+
+TEST(ObserverListThreadSafeTest, RemoveMultipleObservers) {
+  MessageLoop loop;
+  scoped_refptr<ObserverListThreadSafe<Foo> > observer_list(
+      new ObserverListThreadSafe<Foo>);
+
+  FooRemover a(observer_list.get());
+  Adder b(1);
+
+  observer_list->AddObserver(&a);
+  observer_list->AddObserver(&b);
+
+  a.AddFooToRemove(&a);
+  a.AddFooToRemove(&b);
+
+  observer_list->Notify(FROM_HERE, &Foo::Observe, 1);
+  RunLoop().RunUntilIdle();
+}
+
+// A test driver for a multi-threaded notification loop.  Runs a number
+// of observer threads, each of which constantly adds/removes itself
+// from the observer list.  Optionally, if cross_thread_notifies is set
+// to true, the observer threads will also trigger notifications to
+// all observers.
+static void ThreadSafeObserverHarness(int num_threads,
+                                      bool cross_thread_notifies) {
+  MessageLoop loop;
+
+  scoped_refptr<ObserverListThreadSafe<Foo> > observer_list(
+      new ObserverListThreadSafe<Foo>);
+  Adder a(1);
+  Adder b(-1);
+
+  observer_list->AddObserver(&a);
+  observer_list->AddObserver(&b);
+
+  std::vector<AddRemoveThread*> threaded_observer;
+  std::vector<base::PlatformThreadHandle> threads(num_threads);
+  std::vector<std::unique_ptr<base::WaitableEvent>> ready;
+  threaded_observer.reserve(num_threads);
+  ready.reserve(num_threads);
+  for (int index = 0; index < num_threads; index++) {
+    ready.push_back(std::make_unique<WaitableEvent>(
+        WaitableEvent::ResetPolicy::MANUAL,
+        WaitableEvent::InitialState::NOT_SIGNALED));
+    threaded_observer.push_back(new AddRemoveThread(
+        observer_list.get(), cross_thread_notifies, ready.back().get()));
+    EXPECT_TRUE(
+        PlatformThread::Create(0, threaded_observer.back(), &threads[index]));
+  }
+  ASSERT_EQ(static_cast<size_t>(num_threads), threaded_observer.size());
+  ASSERT_EQ(static_cast<size_t>(num_threads), ready.size());
+
+  // This makes sure that threaded_observer has gotten to set loop_, so that we
+  // can call Quit() below safe-ish-ly.
+  for (int i = 0; i < num_threads; ++i)
+    ready[i]->Wait();
+
+  Time start = Time::Now();
+  while (true) {
+    if ((Time::Now() - start).InMilliseconds() > kThreadRunTime)
+      break;
+
+    observer_list->Notify(FROM_HERE, &Foo::Observe, 10);
+
+    RunLoop().RunUntilIdle();
+  }
+
+  for (int index = 0; index < num_threads; index++) {
+    threaded_observer[index]->Quit();
+    PlatformThread::Join(threads[index]);
+  }
+}
+
+TEST(ObserverListThreadSafeTest, CrossThreadObserver) {
+  // Use 7 observer threads.  Notifications only come from
+  // the main thread.
+  ThreadSafeObserverHarness(7, false);
+}
+
+TEST(ObserverListThreadSafeTest, CrossThreadNotifications) {
+  // Use 3 observer threads.  Notifications will fire from
+  // the main thread and all 3 observer threads.
+  ThreadSafeObserverHarness(3, true);
+}
+
+TEST(ObserverListThreadSafeTest, OutlivesMessageLoop) {
+  MessageLoop* loop = new MessageLoop;
+  scoped_refptr<ObserverListThreadSafe<Foo> > observer_list(
+      new ObserverListThreadSafe<Foo>);
+
+  Adder a(1);
+  observer_list->AddObserver(&a);
+  delete loop;
+  // Test passes if we don't crash here.
+  observer_list->Notify(FROM_HERE, &Foo::Observe, 1);
+}
+
+namespace {
+
+class SequenceVerificationObserver : public Foo {
+ public:
+  explicit SequenceVerificationObserver(
+      scoped_refptr<SequencedTaskRunner> task_runner)
+      : task_runner_(std::move(task_runner)) {}
+  ~SequenceVerificationObserver() override = default;
+
+  void Observe(int x) override {
+    called_on_valid_sequence_ = task_runner_->RunsTasksInCurrentSequence();
+  }
+
+  bool called_on_valid_sequence() const { return called_on_valid_sequence_; }
+
+ private:
+  const scoped_refptr<SequencedTaskRunner> task_runner_;
+  bool called_on_valid_sequence_ = false;
+
+  DISALLOW_COPY_AND_ASSIGN(SequenceVerificationObserver);
+};
+
+}  // namespace
+
+// Verify that observers are notified on the correct sequence.
+TEST(ObserverListThreadSafeTest, NotificationOnValidSequence) {
+  test::ScopedTaskEnvironment scoped_task_environment;
+
+  auto task_runner_1 = CreateSequencedTaskRunnerWithTraits(TaskTraits());
+  auto task_runner_2 = CreateSequencedTaskRunnerWithTraits(TaskTraits());
+
+  auto observer_list = MakeRefCounted<ObserverListThreadSafe<Foo>>();
+
+  SequenceVerificationObserver observer_1(task_runner_1);
+  SequenceVerificationObserver observer_2(task_runner_2);
+
+  task_runner_1->PostTask(FROM_HERE,
+                          BindOnce(&ObserverListThreadSafe<Foo>::AddObserver,
+                                   observer_list, Unretained(&observer_1)));
+  task_runner_2->PostTask(FROM_HERE,
+                          BindOnce(&ObserverListThreadSafe<Foo>::AddObserver,
+                                   observer_list, Unretained(&observer_2)));
+
+  TaskScheduler::GetInstance()->FlushForTesting();
+
+  observer_list->Notify(FROM_HERE, &Foo::Observe, 1);
+
+  TaskScheduler::GetInstance()->FlushForTesting();
+
+  EXPECT_TRUE(observer_1.called_on_valid_sequence());
+  EXPECT_TRUE(observer_2.called_on_valid_sequence());
+}
+
+// Verify that when an observer is added to a NOTIFY_ALL ObserverListThreadSafe
+// from a notification, it is itself notified.
+TEST(ObserverListThreadSafeTest, AddObserverFromNotificationNotifyAll) {
+  test::ScopedTaskEnvironment scoped_task_environment;
+  auto observer_list = MakeRefCounted<ObserverListThreadSafe<Foo>>();
+
+  Adder observer_added_from_notification(1);
+
+  AddInObserve<ObserverListThreadSafe<Foo>> initial_observer(
+      observer_list.get());
+  initial_observer.SetToAdd(&observer_added_from_notification);
+  observer_list->AddObserver(&initial_observer);
+
+  observer_list->Notify(FROM_HERE, &Foo::Observe, 1);
+
+  base::RunLoop().RunUntilIdle();
+
+  EXPECT_EQ(1, observer_added_from_notification.GetValue());
+}
+
+namespace {
+
+class RemoveWhileNotificationIsRunningObserver : public Foo {
+ public:
+  RemoveWhileNotificationIsRunningObserver()
+      : notification_running_(WaitableEvent::ResetPolicy::AUTOMATIC,
+                              WaitableEvent::InitialState::NOT_SIGNALED),
+        barrier_(WaitableEvent::ResetPolicy::AUTOMATIC,
+                 WaitableEvent::InitialState::NOT_SIGNALED) {}
+  ~RemoveWhileNotificationIsRunningObserver() override = default;
+
+  void Observe(int x) override {
+    notification_running_.Signal();
+    ScopedAllowBaseSyncPrimitivesForTesting allow_base_sync_primitives;
+    barrier_.Wait();
+  }
+
+  void WaitForNotificationRunning() { notification_running_.Wait(); }
+  void Unblock() { barrier_.Signal(); }
+
+ private:
+  WaitableEvent notification_running_;
+  WaitableEvent barrier_;
+
+  DISALLOW_COPY_AND_ASSIGN(RemoveWhileNotificationIsRunningObserver);
+};
+
+}  // namespace
+
+// Verify that there is no crash when an observer is removed while it is being
+// notified.
+TEST(ObserverListThreadSafeTest, RemoveWhileNotificationIsRunning) {
+  auto observer_list = MakeRefCounted<ObserverListThreadSafe<Foo>>();
+  RemoveWhileNotificationIsRunningObserver observer;
+
+  WaitableEvent task_running(WaitableEvent::ResetPolicy::AUTOMATIC,
+                             WaitableEvent::InitialState::NOT_SIGNALED);
+  WaitableEvent barrier(WaitableEvent::ResetPolicy::AUTOMATIC,
+                        WaitableEvent::InitialState::NOT_SIGNALED);
+
+  // This must be after the declaration of |barrier| so that tasks posted to
+  // TaskScheduler can safely use |barrier|.
+  test::ScopedTaskEnvironment scoped_task_environment;
+
+  CreateSequencedTaskRunnerWithTraits({})->PostTask(
+      FROM_HERE, base::BindOnce(&ObserverListThreadSafe<Foo>::AddObserver,
+                                observer_list, Unretained(&observer)));
+  TaskScheduler::GetInstance()->FlushForTesting();
+
+  observer_list->Notify(FROM_HERE, &Foo::Observe, 1);
+  observer.WaitForNotificationRunning();
+  observer_list->RemoveObserver(&observer);
+
+  observer.Unblock();
+}
+
+TEST(ObserverListTest, Existing) {
+  ObserverList<Foo> observer_list(ObserverListPolicy::EXISTING_ONLY);
+  Adder a(1);
+  AddInObserve<ObserverList<Foo> > b(&observer_list);
+  Adder c(1);
+  b.SetToAdd(&c);
+
+  observer_list.AddObserver(&a);
+  observer_list.AddObserver(&b);
+
+  for (auto& observer : observer_list)
+    observer.Observe(1);
+
+  EXPECT_FALSE(b.to_add_);
+  // B's adder should not have been notified because it was added during
+  // notification.
+  EXPECT_EQ(0, c.total);
+
+  // Notify again to make sure b's adder is notified.
+  for (auto& observer : observer_list)
+    observer.Observe(1);
+  EXPECT_EQ(1, c.total);
+}
+
+// Same as above, but for ObserverListThreadSafe
+TEST(ObserverListThreadSafeTest, Existing) {
+  MessageLoop loop;
+  scoped_refptr<ObserverListThreadSafe<Foo>> observer_list(
+      new ObserverListThreadSafe<Foo>(ObserverListPolicy::EXISTING_ONLY));
+  Adder a(1);
+  AddInObserve<ObserverListThreadSafe<Foo> > b(observer_list.get());
+  Adder c(1);
+  b.SetToAdd(&c);
+
+  observer_list->AddObserver(&a);
+  observer_list->AddObserver(&b);
+
+  observer_list->Notify(FROM_HERE, &Foo::Observe, 1);
+  RunLoop().RunUntilIdle();
+
+  EXPECT_FALSE(b.to_add_);
+  // B's adder should not have been notified because it was added during
+  // notification.
+  EXPECT_EQ(0, c.total);
+
+  // Notify again to make sure b's adder is notified.
+  observer_list->Notify(FROM_HERE, &Foo::Observe, 1);
+  RunLoop().RunUntilIdle();
+  EXPECT_EQ(1, c.total);
+}
+
+class AddInClearObserve : public Foo {
+ public:
+  explicit AddInClearObserve(ObserverList<Foo>* list)
+      : list_(list), added_(false), adder_(1) {}
+
+  void Observe(int /* x */) override {
+    list_->Clear();
+    list_->AddObserver(&adder_);
+    added_ = true;
+  }
+
+  bool added() const { return added_; }
+  const Adder& adder() const { return adder_; }
+
+ private:
+  ObserverList<Foo>* const list_;
+
+  bool added_;
+  Adder adder_;
+};
+
+TEST(ObserverListTest, ClearNotifyAll) {
+  ObserverList<Foo> observer_list;
+  AddInClearObserve a(&observer_list);
+
+  observer_list.AddObserver(&a);
+
+  for (auto& observer : observer_list)
+    observer.Observe(1);
+  EXPECT_TRUE(a.added());
+  EXPECT_EQ(1, a.adder().total)
+      << "Adder should observe once and have sum of 1.";
+}
+
+TEST(ObserverListTest, ClearNotifyExistingOnly) {
+  ObserverList<Foo> observer_list(ObserverListPolicy::EXISTING_ONLY);
+  AddInClearObserve a(&observer_list);
+
+  observer_list.AddObserver(&a);
+
+  for (auto& observer : observer_list)
+    observer.Observe(1);
+  EXPECT_TRUE(a.added());
+  EXPECT_EQ(0, a.adder().total)
+      << "Adder should not observe, so sum should still be 0.";
+}
+
+class ListDestructor : public Foo {
+ public:
+  explicit ListDestructor(ObserverList<Foo>* list) : list_(list) {}
+  ~ListDestructor() override = default;
+
+  void Observe(int x) override { delete list_; }
+
+ private:
+  ObserverList<Foo>* list_;
+};
+
+
+TEST(ObserverListTest, IteratorOutlivesList) {
+  ObserverList<Foo>* observer_list = new ObserverList<Foo>;
+  ListDestructor a(observer_list);
+  observer_list->AddObserver(&a);
+
+  for (auto& observer : *observer_list)
+    observer.Observe(0);
+
+  // There are no EXPECT* statements for this test, if we catch
+  // use-after-free errors for observer_list (eg with ASan) then
+  // this test has failed.  See http://crbug.com/85296.
+}
+
+TEST(ObserverListTest, BasicStdIterator) {
+  using FooList = ObserverList<Foo>;
+  FooList observer_list;
+
+  // An optimization: begin() and end() do not involve weak pointers on
+  // empty list.
+  EXPECT_FALSE(observer_list.begin().list_);
+  EXPECT_FALSE(observer_list.end().list_);
+
+  // Iterate over empty list: no effect, no crash.
+  for (auto& i : observer_list)
+    i.Observe(10);
+
+  Adder a(1), b(-1), c(1), d(-1);
+
+  observer_list.AddObserver(&a);
+  observer_list.AddObserver(&b);
+  observer_list.AddObserver(&c);
+  observer_list.AddObserver(&d);
+
+  for (FooList::iterator i = observer_list.begin(), e = observer_list.end();
+       i != e; ++i)
+    i->Observe(1);
+
+  EXPECT_EQ(1, a.total);
+  EXPECT_EQ(-1, b.total);
+  EXPECT_EQ(1, c.total);
+  EXPECT_EQ(-1, d.total);
+
+  // Check an iteration over a 'const view' for a given container.
+  const FooList& const_list = observer_list;
+  for (FooList::const_iterator i = const_list.begin(), e = const_list.end();
+       i != e; ++i) {
+    EXPECT_EQ(1, std::abs(i->GetValue()));
+  }
+
+  for (const auto& o : const_list)
+    EXPECT_EQ(1, std::abs(o.GetValue()));
+}
+
+TEST(ObserverListTest, StdIteratorRemoveItself) {
+  ObserverList<Foo> observer_list;
+  Adder a(1), b(-1), c(1), d(-1);
+  Disrupter disrupter(&observer_list, true);
+
+  observer_list.AddObserver(&a);
+  observer_list.AddObserver(&b);
+  observer_list.AddObserver(&disrupter);
+  observer_list.AddObserver(&c);
+  observer_list.AddObserver(&d);
+
+  for (auto& o : observer_list)
+    o.Observe(1);
+
+  for (auto& o : observer_list)
+    o.Observe(10);
+
+  EXPECT_EQ(11, a.total);
+  EXPECT_EQ(-11, b.total);
+  EXPECT_EQ(11, c.total);
+  EXPECT_EQ(-11, d.total);
+}
+
+TEST(ObserverListTest, StdIteratorRemoveBefore) {
+  ObserverList<Foo> observer_list;
+  Adder a(1), b(-1), c(1), d(-1);
+  Disrupter disrupter(&observer_list, &b);
+
+  observer_list.AddObserver(&a);
+  observer_list.AddObserver(&b);
+  observer_list.AddObserver(&disrupter);
+  observer_list.AddObserver(&c);
+  observer_list.AddObserver(&d);
+
+  for (auto& o : observer_list)
+    o.Observe(1);
+
+  for (auto& o : observer_list)
+    o.Observe(10);
+
+  EXPECT_EQ(11, a.total);
+  EXPECT_EQ(-1, b.total);
+  EXPECT_EQ(11, c.total);
+  EXPECT_EQ(-11, d.total);
+}
+
+TEST(ObserverListTest, StdIteratorRemoveAfter) {
+  ObserverList<Foo> observer_list;
+  Adder a(1), b(-1), c(1), d(-1);
+  Disrupter disrupter(&observer_list, &c);
+
+  observer_list.AddObserver(&a);
+  observer_list.AddObserver(&b);
+  observer_list.AddObserver(&disrupter);
+  observer_list.AddObserver(&c);
+  observer_list.AddObserver(&d);
+
+  for (auto& o : observer_list)
+    o.Observe(1);
+
+  for (auto& o : observer_list)
+    o.Observe(10);
+
+  EXPECT_EQ(11, a.total);
+  EXPECT_EQ(-11, b.total);
+  EXPECT_EQ(0, c.total);
+  EXPECT_EQ(-11, d.total);
+}
+
+TEST(ObserverListTest, StdIteratorRemoveAfterFront) {
+  ObserverList<Foo> observer_list;
+  Adder a(1), b(-1), c(1), d(-1);
+  Disrupter disrupter(&observer_list, &a);
+
+  observer_list.AddObserver(&a);
+  observer_list.AddObserver(&disrupter);
+  observer_list.AddObserver(&b);
+  observer_list.AddObserver(&c);
+  observer_list.AddObserver(&d);
+
+  for (auto& o : observer_list)
+    o.Observe(1);
+
+  for (auto& o : observer_list)
+    o.Observe(10);
+
+  EXPECT_EQ(1, a.total);
+  EXPECT_EQ(-11, b.total);
+  EXPECT_EQ(11, c.total);
+  EXPECT_EQ(-11, d.total);
+}
+
+TEST(ObserverListTest, StdIteratorRemoveBeforeBack) {
+  ObserverList<Foo> observer_list;
+  Adder a(1), b(-1), c(1), d(-1);
+  Disrupter disrupter(&observer_list, &d);
+
+  observer_list.AddObserver(&a);
+  observer_list.AddObserver(&b);
+  observer_list.AddObserver(&c);
+  observer_list.AddObserver(&disrupter);
+  observer_list.AddObserver(&d);
+
+  for (auto& o : observer_list)
+    o.Observe(1);
+
+  for (auto& o : observer_list)
+    o.Observe(10);
+
+  EXPECT_EQ(11, a.total);
+  EXPECT_EQ(-11, b.total);
+  EXPECT_EQ(11, c.total);
+  EXPECT_EQ(0, d.total);
+}
+
+TEST(ObserverListTest, StdIteratorRemoveFront) {
+  using FooList = ObserverList<Foo>;
+  FooList observer_list;
+  Adder a(1), b(-1), c(1), d(-1);
+  Disrupter disrupter(&observer_list, true);
+
+  observer_list.AddObserver(&disrupter);
+  observer_list.AddObserver(&a);
+  observer_list.AddObserver(&b);
+  observer_list.AddObserver(&c);
+  observer_list.AddObserver(&d);
+
+  bool test_disruptor = true;
+  for (FooList::iterator i = observer_list.begin(), e = observer_list.end();
+       i != e; ++i) {
+    i->Observe(1);
+    // Check that second call to i->Observe() would crash here.
+    if (test_disruptor) {
+      EXPECT_FALSE(i.GetCurrent());
+      test_disruptor = false;
+    }
+  }
+
+  for (auto& o : observer_list)
+    o.Observe(10);
+
+  EXPECT_EQ(11, a.total);
+  EXPECT_EQ(-11, b.total);
+  EXPECT_EQ(11, c.total);
+  EXPECT_EQ(-11, d.total);
+}
+
+TEST(ObserverListTest, StdIteratorRemoveBack) {
+  ObserverList<Foo> observer_list;
+  Adder a(1), b(-1), c(1), d(-1);
+  Disrupter disrupter(&observer_list, true);
+
+  observer_list.AddObserver(&a);
+  observer_list.AddObserver(&b);
+  observer_list.AddObserver(&c);
+  observer_list.AddObserver(&d);
+  observer_list.AddObserver(&disrupter);
+
+  for (auto& o : observer_list)
+    o.Observe(1);
+
+  for (auto& o : observer_list)
+    o.Observe(10);
+
+  EXPECT_EQ(11, a.total);
+  EXPECT_EQ(-11, b.total);
+  EXPECT_EQ(11, c.total);
+  EXPECT_EQ(-11, d.total);
+}
+
+TEST(ObserverListTest, NestedLoop) {
+  ObserverList<Foo> observer_list;
+  Adder a(1), b(-1), c(1), d(-1);
+  Disrupter disrupter(&observer_list, true);
+
+  observer_list.AddObserver(&disrupter);
+  observer_list.AddObserver(&a);
+  observer_list.AddObserver(&b);
+  observer_list.AddObserver(&c);
+  observer_list.AddObserver(&d);
+
+  for (auto& o : observer_list) {
+    o.Observe(10);
+
+    for (auto& o : observer_list)
+      o.Observe(1);
+  }
+
+  EXPECT_EQ(15, a.total);
+  EXPECT_EQ(-15, b.total);
+  EXPECT_EQ(15, c.total);
+  EXPECT_EQ(-15, d.total);
+}
+
+TEST(ObserverListTest, NonCompactList) {
+  ObserverList<Foo> observer_list;
+  Adder a(1), b(-1);
+
+  Disrupter disrupter1(&observer_list, true);
+  Disrupter disrupter2(&observer_list, true);
+
+  // Disrupt itself and another one.
+  disrupter1.SetDoomed(&disrupter2);
+
+  observer_list.AddObserver(&disrupter1);
+  observer_list.AddObserver(&disrupter2);
+  observer_list.AddObserver(&a);
+  observer_list.AddObserver(&b);
+
+  for (auto& o : observer_list) {
+    // Get the { nullptr, nullptr, &a, &b } non-compact list
+    // on the first inner pass.
+    o.Observe(10);
+
+    for (auto& o : observer_list)
+      o.Observe(1);
+  }
+
+  EXPECT_EQ(13, a.total);
+  EXPECT_EQ(-13, b.total);
+}
+
+TEST(ObserverListTest, BecomesEmptyThanNonEmpty) {
+  ObserverList<Foo> observer_list;
+  Adder a(1), b(-1);
+
+  Disrupter disrupter1(&observer_list, true);
+  Disrupter disrupter2(&observer_list, true);
+
+  // Disrupt itself and another one.
+  disrupter1.SetDoomed(&disrupter2);
+
+  observer_list.AddObserver(&disrupter1);
+  observer_list.AddObserver(&disrupter2);
+
+  bool add_observers = true;
+  for (auto& o : observer_list) {
+    // Get the { nullptr, nullptr } empty list on the first inner pass.
+    o.Observe(10);
+
+    for (auto& o : observer_list)
+      o.Observe(1);
+
+    if (add_observers) {
+      observer_list.AddObserver(&a);
+      observer_list.AddObserver(&b);
+      add_observers = false;
+    }
+  }
+
+  EXPECT_EQ(12, a.total);
+  EXPECT_EQ(-12, b.total);
+}
+
+TEST(ObserverListTest, AddObserverInTheLastObserve) {
+  using FooList = ObserverList<Foo>;
+  FooList observer_list;
+
+  AddInObserve<FooList> a(&observer_list);
+  Adder b(-1);
+
+  a.SetToAdd(&b);
+  observer_list.AddObserver(&a);
+
+  auto it = observer_list.begin();
+  while (it != observer_list.end()) {
+    auto& observer = *it;
+    // Intentionally increment the iterator before calling Observe(). The
+    // ObserverList starts with only one observer, and it == observer_list.end()
+    // should be true after the next line.
+    ++it;
+    // However, the first Observe() call will add a second observer: at this
+    // point, it != observer_list.end() should be true, and Observe() should be
+    // called on the newly added observer on the next iteration of the loop.
+    observer.Observe(10);
+  }
+
+  EXPECT_EQ(-10, b.total);
+}
+
+class MockLogAssertHandler {
+ public:
+  MOCK_METHOD4(
+      HandleLogAssert,
+      void(const char*, int, const base::StringPiece, const base::StringPiece));
+};
+
+#if DCHECK_IS_ON()
+TEST(ObserverListTest, NonReentrantObserverList) {
+  using ::testing::_;
+
+  ObserverList<Foo, /*check_empty=*/false, /*allow_reentrancy=*/false>
+      non_reentrant_observer_list;
+  Adder a(1);
+  non_reentrant_observer_list.AddObserver(&a);
+
+  EXPECT_DCHECK_DEATH({
+    for (const Foo& a : non_reentrant_observer_list) {
+      for (const Foo& b : non_reentrant_observer_list) {
+        std::ignore = a;
+        std::ignore = b;
+      }
+    }
+  });
+}
+
+TEST(ObserverListTest, ReentrantObserverList) {
+  using ::testing::_;
+
+  ReentrantObserverList<Foo> reentrant_observer_list;
+  Adder a(1);
+  reentrant_observer_list.AddObserver(&a);
+  bool passed = false;
+  for (const Foo& a : reentrant_observer_list) {
+    for (const Foo& b : reentrant_observer_list) {
+      std::ignore = a;
+      std::ignore = b;
+      passed = true;
+    }
+  }
+  EXPECT_TRUE(passed);
+}
+#endif
+
+}  // namespace base
diff --git a/base/optional.h b/base/optional.h
new file mode 100644
index 0000000..c1d11ca
--- /dev/null
+++ b/base/optional.h
@@ -0,0 +1,922 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_OPTIONAL_H_
+#define BASE_OPTIONAL_H_
+
+#include <type_traits>
+#include <utility>
+
+#include "base/logging.h"
+#include "base/template_util.h"
+
+namespace base {
+
+// Specification:
+// http://en.cppreference.com/w/cpp/utility/optional/in_place_t
+struct in_place_t {};
+
+// Specification:
+// http://en.cppreference.com/w/cpp/utility/optional/nullopt_t
+struct nullopt_t {
+  constexpr explicit nullopt_t(int) {}
+};
+
+// Specification:
+// http://en.cppreference.com/w/cpp/utility/optional/in_place
+constexpr in_place_t in_place = {};
+
+// Specification:
+// http://en.cppreference.com/w/cpp/utility/optional/nullopt
+constexpr nullopt_t nullopt(0);
+
+// Forward declaration, which is refered by following helpers.
+template <typename T>
+class Optional;
+
+namespace internal {
+
+template <typename T, bool = std::is_trivially_destructible<T>::value>
+struct OptionalStorageBase {
+  // Initializing |empty_| here instead of using default member initializing
+  // to avoid errors in g++ 4.8.
+  constexpr OptionalStorageBase() : empty_('\0') {}
+
+  template <class... Args>
+  constexpr explicit OptionalStorageBase(in_place_t, Args&&... args)
+      : is_populated_(true), value_(std::forward<Args>(args)...) {}
+
+  // When T is not trivially destructible we must call its
+  // destructor before deallocating its memory.
+  // Note that this hides the (implicitly declared) move constructor, which
+  // would be used for constexpr move constructor in OptionalStorage<T>.
+  // It is needed iff T is trivially move constructible. However, the current
+  // is_trivially_{copy,move}_constructible implementation requires
+  // is_trivially_destructible (which looks a bug, cf:
+  // https://gcc.gnu.org/bugzilla/show_bug.cgi?id=51452 and
+  // http://cplusplus.github.io/LWG/lwg-active.html#2116), so it is not
+  // necessary for this case at the moment. Please see also the destructor
+  // comment in "is_trivially_destructible = true" specialization below.
+  ~OptionalStorageBase() {
+    if (is_populated_)
+      value_.~T();
+  }
+
+  template <class... Args>
+  void Init(Args&&... args) {
+    DCHECK(!is_populated_);
+    ::new (&value_) T(std::forward<Args>(args)...);
+    is_populated_ = true;
+  }
+
+  bool is_populated_ = false;
+  union {
+    // |empty_| exists so that the union will always be initialized, even when
+    // it doesn't contain a value. Union members must be initialized for the
+    // constructor to be 'constexpr'.
+    char empty_;
+    T value_;
+  };
+};
+
+template <typename T>
+struct OptionalStorageBase<T, true /* trivially destructible */> {
+  // Initializing |empty_| here instead of using default member initializing
+  // to avoid errors in g++ 4.8.
+  constexpr OptionalStorageBase() : empty_('\0') {}
+
+  template <class... Args>
+  constexpr explicit OptionalStorageBase(in_place_t, Args&&... args)
+      : is_populated_(true), value_(std::forward<Args>(args)...) {}
+
+  // When T is trivially destructible (i.e. its destructor does nothing) there
+  // is no need to call it. Implicitly defined destructor is trivial, because
+  // both members (bool and union containing only variants which are trivially
+  // destructible) are trivially destructible.
+  // Explicitly-defaulted destructor is also trivial, but do not use it here,
+  // because it hides the implicit move constructor. It is needed to implement
+  // constexpr move constructor in OptionalStorage iff T is trivially move
+  // constructible. Note that, if T is trivially move constructible, the move
+  // constructor of OptionalStorageBase<T> is also implicitly defined and it is
+  // trivially move constructor. If T is not trivially move constructible,
+  // "not declaring move constructor without destructor declaration" here means
+  // "delete move constructor", which works because any move constructor of
+  // OptionalStorage will not refer to it in that case.
+
+  template <class... Args>
+  void Init(Args&&... args) {
+    DCHECK(!is_populated_);
+    ::new (&value_) T(std::forward<Args>(args)...);
+    is_populated_ = true;
+  }
+
+  bool is_populated_ = false;
+  union {
+    // |empty_| exists so that the union will always be initialized, even when
+    // it doesn't contain a value. Union members must be initialized for the
+    // constructor to be 'constexpr'.
+    char empty_;
+    T value_;
+  };
+};
+
+// Implement conditional constexpr copy and move constructors. These are
+// constexpr if is_trivially_{copy,move}_constructible<T>::value is true
+// respectively. If each is true, the corresponding constructor is defined as
+// "= default;", which generates a constexpr constructor (In this case,
+// the condition of constexpr-ness is satisfied because the base class also has
+// compiler generated constexpr {copy,move} constructors). Note that
+// placement-new is prohibited in constexpr.
+template <typename T,
+          bool = is_trivially_copy_constructible<T>::value,
+          bool = std::is_trivially_move_constructible<T>::value>
+struct OptionalStorage : OptionalStorageBase<T> {
+  // This is no trivially {copy,move} constructible case. Other cases are
+  // defined below as specializations.
+
+  // Accessing the members of template base class requires explicit
+  // declaration.
+  using OptionalStorageBase<T>::is_populated_;
+  using OptionalStorageBase<T>::value_;
+  using OptionalStorageBase<T>::Init;
+
+  // Inherit constructors (specifically, the in_place constructor).
+  using OptionalStorageBase<T>::OptionalStorageBase;
+
+  // User defined constructor deletes the default constructor.
+  // Define it explicitly.
+  OptionalStorage() = default;
+
+  OptionalStorage(const OptionalStorage& other) {
+    if (other.is_populated_)
+      Init(other.value_);
+  }
+
+  OptionalStorage(OptionalStorage&& other) noexcept(
+      std::is_nothrow_move_constructible<T>::value) {
+    if (other.is_populated_)
+      Init(std::move(other.value_));
+  }
+};
+
+template <typename T>
+struct OptionalStorage<T,
+                       true /* trivially copy constructible */,
+                       false /* trivially move constructible */>
+    : OptionalStorageBase<T> {
+  using OptionalStorageBase<T>::is_populated_;
+  using OptionalStorageBase<T>::value_;
+  using OptionalStorageBase<T>::Init;
+  using OptionalStorageBase<T>::OptionalStorageBase;
+
+  OptionalStorage() = default;
+  OptionalStorage(const OptionalStorage& other) = default;
+
+  OptionalStorage(OptionalStorage&& other) noexcept(
+      std::is_nothrow_move_constructible<T>::value) {
+    if (other.is_populated_)
+      Init(std::move(other.value_));
+  }
+};
+
+template <typename T>
+struct OptionalStorage<T,
+                       false /* trivially copy constructible */,
+                       true /* trivially move constructible */>
+    : OptionalStorageBase<T> {
+  using OptionalStorageBase<T>::is_populated_;
+  using OptionalStorageBase<T>::value_;
+  using OptionalStorageBase<T>::Init;
+  using OptionalStorageBase<T>::OptionalStorageBase;
+
+  OptionalStorage() = default;
+  OptionalStorage(OptionalStorage&& other) = default;
+
+  OptionalStorage(const OptionalStorage& other) {
+    if (other.is_populated_)
+      Init(other.value_);
+  }
+};
+
+template <typename T>
+struct OptionalStorage<T,
+                       true /* trivially copy constructible */,
+                       true /* trivially move constructible */>
+    : OptionalStorageBase<T> {
+  // If both trivially {copy,move} constructible are true, it is not necessary
+  // to use user-defined constructors. So, just inheriting constructors
+  // from the base class works.
+  using OptionalStorageBase<T>::OptionalStorageBase;
+};
+
+// Base class to support conditionally usable copy-/move- constructors
+// and assign operators.
+template <typename T>
+class OptionalBase {
+  // This class provides implementation rather than public API, so everything
+  // should be hidden. Often we use composition, but we cannot in this case
+  // because of C++ language restriction.
+ protected:
+  constexpr OptionalBase() = default;
+  constexpr OptionalBase(const OptionalBase& other) = default;
+  constexpr OptionalBase(OptionalBase&& other) = default;
+
+  template <class... Args>
+  constexpr explicit OptionalBase(in_place_t, Args&&... args)
+      : storage_(in_place, std::forward<Args>(args)...) {}
+
+  // Implementation of converting constructors.
+  template <typename U>
+  explicit OptionalBase(const OptionalBase<U>& other) {
+    if (other.storage_.is_populated_)
+      storage_.Init(other.storage_.value_);
+  }
+
+  template <typename U>
+  explicit OptionalBase(OptionalBase<U>&& other) {
+    if (other.storage_.is_populated_)
+      storage_.Init(std::move(other.storage_.value_));
+  }
+
+  ~OptionalBase() = default;
+
+  OptionalBase& operator=(const OptionalBase& other) {
+    CopyAssign(other);
+    return *this;
+  }
+
+  OptionalBase& operator=(OptionalBase&& other) noexcept(
+      std::is_nothrow_move_assignable<T>::value&&
+          std::is_nothrow_move_constructible<T>::value) {
+    MoveAssign(std::move(other));
+    return *this;
+  }
+
+  template <typename U>
+  void CopyAssign(const OptionalBase<U>& other) {
+    if (other.storage_.is_populated_)
+      InitOrAssign(other.storage_.value_);
+    else
+      FreeIfNeeded();
+  }
+
+  template <typename U>
+  void MoveAssign(OptionalBase<U>&& other) {
+    if (other.storage_.is_populated_)
+      InitOrAssign(std::move(other.storage_.value_));
+    else
+      FreeIfNeeded();
+  }
+
+  template <typename U>
+  void InitOrAssign(U&& value) {
+    if (storage_.is_populated_)
+      storage_.value_ = std::forward<U>(value);
+    else
+      storage_.Init(std::forward<U>(value));
+  }
+
+  void FreeIfNeeded() {
+    if (!storage_.is_populated_)
+      return;
+    storage_.value_.~T();
+    storage_.is_populated_ = false;
+  }
+
+  // For implementing conversion, allow access to other typed OptionalBase
+  // class.
+  template <typename U>
+  friend class OptionalBase;
+
+  OptionalStorage<T> storage_;
+};
+
+// The following {Copy,Move}{Constructible,Assignable} structs are helpers to
+// implement constructor/assign-operator overloading. Specifically, if T is
+// is not movable but copyable, Optional<T>'s move constructor should not
+// participate in overload resolution. This inheritance trick implements that.
+template <bool is_copy_constructible>
+struct CopyConstructible {};
+
+template <>
+struct CopyConstructible<false> {
+  constexpr CopyConstructible() = default;
+  constexpr CopyConstructible(const CopyConstructible&) = delete;
+  constexpr CopyConstructible(CopyConstructible&&) = default;
+  CopyConstructible& operator=(const CopyConstructible&) = default;
+  CopyConstructible& operator=(CopyConstructible&&) = default;
+};
+
+template <bool is_move_constructible>
+struct MoveConstructible {};
+
+template <>
+struct MoveConstructible<false> {
+  constexpr MoveConstructible() = default;
+  constexpr MoveConstructible(const MoveConstructible&) = default;
+  constexpr MoveConstructible(MoveConstructible&&) = delete;
+  MoveConstructible& operator=(const MoveConstructible&) = default;
+  MoveConstructible& operator=(MoveConstructible&&) = default;
+};
+
+template <bool is_copy_assignable>
+struct CopyAssignable {};
+
+template <>
+struct CopyAssignable<false> {
+  constexpr CopyAssignable() = default;
+  constexpr CopyAssignable(const CopyAssignable&) = default;
+  constexpr CopyAssignable(CopyAssignable&&) = default;
+  CopyAssignable& operator=(const CopyAssignable&) = delete;
+  CopyAssignable& operator=(CopyAssignable&&) = default;
+};
+
+template <bool is_move_assignable>
+struct MoveAssignable {};
+
+template <>
+struct MoveAssignable<false> {
+  constexpr MoveAssignable() = default;
+  constexpr MoveAssignable(const MoveAssignable&) = default;
+  constexpr MoveAssignable(MoveAssignable&&) = default;
+  MoveAssignable& operator=(const MoveAssignable&) = default;
+  MoveAssignable& operator=(MoveAssignable&&) = delete;
+};
+
+// Helper to conditionally enable converting constructors and assign operators.
+template <typename T, typename U>
+struct IsConvertibleFromOptional
+    : std::integral_constant<
+          bool,
+          std::is_constructible<T, Optional<U>&>::value ||
+              std::is_constructible<T, const Optional<U>&>::value ||
+              std::is_constructible<T, Optional<U>&&>::value ||
+              std::is_constructible<T, const Optional<U>&&>::value ||
+              std::is_convertible<Optional<U>&, T>::value ||
+              std::is_convertible<const Optional<U>&, T>::value ||
+              std::is_convertible<Optional<U>&&, T>::value ||
+              std::is_convertible<const Optional<U>&&, T>::value> {};
+
+template <typename T, typename U>
+struct IsAssignableFromOptional
+    : std::integral_constant<
+          bool,
+          IsConvertibleFromOptional<T, U>::value ||
+              std::is_assignable<T&, Optional<U>&>::value ||
+              std::is_assignable<T&, const Optional<U>&>::value ||
+              std::is_assignable<T&, Optional<U>&&>::value ||
+              std::is_assignable<T&, const Optional<U>&&>::value> {};
+
+// Forward compatibility for C++17.
+// Introduce one more deeper nested namespace to avoid leaking using std::swap.
+namespace swappable_impl {
+using std::swap;
+
+struct IsSwappableImpl {
+  // Tests if swap can be called. Check<T&>(0) returns true_type iff swap
+  // is available for T. Otherwise, Check's overload resolution falls back
+  // to Check(...) declared below thanks to SFINAE, so returns false_type.
+  template <typename T>
+  static auto Check(int)
+      -> decltype(swap(std::declval<T>(), std::declval<T>()), std::true_type());
+
+  template <typename T>
+  static std::false_type Check(...);
+};
+}  // namespace swappable_impl
+
+template <typename T>
+struct IsSwappable : decltype(swappable_impl::IsSwappableImpl::Check<T&>(0)) {};
+
+// Forward compatibility for C++20.
+template <typename T>
+using RemoveCvRefT = std::remove_cv_t<std::remove_reference_t<T>>;
+
+}  // namespace internal
+
+// On Windows, by default, empty-base class optimization does not work,
+// which means even if the base class is empty struct, it still consumes one
+// byte for its body. __declspec(empty_bases) enables the optimization.
+// cf)
+// https://blogs.msdn.microsoft.com/vcblog/2016/03/30/optimizing-the-layout-of-empty-base-classes-in-vs2015-update-2-3/
+#ifdef OS_WIN
+#define OPTIONAL_DECLSPEC_EMPTY_BASES __declspec(empty_bases)
+#else
+#define OPTIONAL_DECLSPEC_EMPTY_BASES
+#endif
+
+// base::Optional is a Chromium version of the C++17 optional class:
+// std::optional documentation:
+// http://en.cppreference.com/w/cpp/utility/optional
+// Chromium documentation:
+// https://chromium.googlesource.com/chromium/src/+/master/docs/optional.md
+//
+// These are the differences between the specification and the implementation:
+// - Constructors do not use 'constexpr' as it is a C++14 extension.
+// - 'constexpr' might be missing in some places for reasons specified locally.
+// - No exceptions are thrown, because they are banned from Chromium.
+//   Marked noexcept for only move constructor and move assign operators.
+// - All the non-members are in the 'base' namespace instead of 'std'.
+//
+// Note that T cannot have a constructor T(Optional<T>) etc. Optional<T> checks
+// T's constructor (specifically via IsConvertibleFromOptional), and in the
+// check whether T can be constructible from Optional<T>, which is recursive
+// so it does not work. As of Feb 2018, std::optional C++17 implementation in
+// both clang and gcc has same limitation. MSVC SFINAE looks to have different
+// behavior, but anyway it reports an error, too.
+template <typename T>
+class OPTIONAL_DECLSPEC_EMPTY_BASES Optional
+    : public internal::OptionalBase<T>,
+      public internal::CopyConstructible<std::is_copy_constructible<T>::value>,
+      public internal::MoveConstructible<std::is_move_constructible<T>::value>,
+      public internal::CopyAssignable<std::is_copy_constructible<T>::value &&
+                                      std::is_copy_assignable<T>::value>,
+      public internal::MoveAssignable<std::is_move_constructible<T>::value &&
+                                      std::is_move_assignable<T>::value> {
+ public:
+#undef OPTIONAL_DECLSPEC_EMPTY_BASES
+  using value_type = T;
+
+  // Defer default/copy/move constructor implementation to OptionalBase.
+  constexpr Optional() = default;
+  constexpr Optional(const Optional& other) = default;
+  constexpr Optional(Optional&& other) noexcept(
+      std::is_nothrow_move_constructible<T>::value) = default;
+
+  constexpr Optional(nullopt_t) {}  // NOLINT(runtime/explicit)
+
+  // Converting copy constructor. "explicit" only if
+  // std::is_convertible<const U&, T>::value is false. It is implemented by
+  // declaring two almost same constructors, but that condition in enable_if_t
+  // is different, so that either one is chosen, thanks to SFINAE.
+  template <
+      typename U,
+      std::enable_if_t<std::is_constructible<T, const U&>::value &&
+                           !internal::IsConvertibleFromOptional<T, U>::value &&
+                           std::is_convertible<const U&, T>::value,
+                       bool> = false>
+  Optional(const Optional<U>& other) : internal::OptionalBase<T>(other) {}
+
+  template <
+      typename U,
+      std::enable_if_t<std::is_constructible<T, const U&>::value &&
+                           !internal::IsConvertibleFromOptional<T, U>::value &&
+                           !std::is_convertible<const U&, T>::value,
+                       bool> = false>
+  explicit Optional(const Optional<U>& other)
+      : internal::OptionalBase<T>(other) {}
+
+  // Converting move constructor. Similar to converting copy constructor,
+  // declaring two (explicit and non-explicit) constructors.
+  template <
+      typename U,
+      std::enable_if_t<std::is_constructible<T, U&&>::value &&
+                           !internal::IsConvertibleFromOptional<T, U>::value &&
+                           std::is_convertible<U&&, T>::value,
+                       bool> = false>
+  Optional(Optional<U>&& other) : internal::OptionalBase<T>(std::move(other)) {}
+
+  template <
+      typename U,
+      std::enable_if_t<std::is_constructible<T, U&&>::value &&
+                           !internal::IsConvertibleFromOptional<T, U>::value &&
+                           !std::is_convertible<U&&, T>::value,
+                       bool> = false>
+  explicit Optional(Optional<U>&& other)
+      : internal::OptionalBase<T>(std::move(other)) {}
+
+  template <class... Args>
+  constexpr explicit Optional(in_place_t, Args&&... args)
+      : internal::OptionalBase<T>(in_place, std::forward<Args>(args)...) {}
+
+  template <
+      class U,
+      class... Args,
+      class = std::enable_if_t<std::is_constructible<value_type,
+                                                     std::initializer_list<U>&,
+                                                     Args...>::value>>
+  constexpr explicit Optional(in_place_t,
+                              std::initializer_list<U> il,
+                              Args&&... args)
+      : internal::OptionalBase<T>(in_place, il, std::forward<Args>(args)...) {}
+
+  // Forward value constructor. Similar to converting constructors,
+  // conditionally explicit.
+  template <
+      typename U = value_type,
+      std::enable_if_t<
+          std::is_constructible<T, U&&>::value &&
+              !std::is_same<internal::RemoveCvRefT<U>, in_place_t>::value &&
+              !std::is_same<internal::RemoveCvRefT<U>, Optional<T>>::value &&
+              std::is_convertible<U&&, T>::value,
+          bool> = false>
+  constexpr Optional(U&& value)
+      : internal::OptionalBase<T>(in_place, std::forward<U>(value)) {}
+
+  template <
+      typename U = value_type,
+      std::enable_if_t<
+          std::is_constructible<T, U&&>::value &&
+              !std::is_same<internal::RemoveCvRefT<U>, in_place_t>::value &&
+              !std::is_same<internal::RemoveCvRefT<U>, Optional<T>>::value &&
+              !std::is_convertible<U&&, T>::value,
+          bool> = false>
+  constexpr explicit Optional(U&& value)
+      : internal::OptionalBase<T>(in_place, std::forward<U>(value)) {}
+
+  ~Optional() = default;
+
+  // Defer copy-/move- assign operator implementation to OptionalBase.
+  Optional& operator=(const Optional& other) = default;
+  Optional& operator=(Optional&& other) noexcept(
+      std::is_nothrow_move_assignable<T>::value&&
+          std::is_nothrow_move_constructible<T>::value) = default;
+
+  Optional& operator=(nullopt_t) {
+    FreeIfNeeded();
+    return *this;
+  }
+
+  // Perfect-forwarded assignment.
+  template <typename U>
+  std::enable_if_t<
+      !std::is_same<internal::RemoveCvRefT<U>, Optional<T>>::value &&
+          std::is_constructible<T, U>::value &&
+          std::is_assignable<T&, U>::value &&
+          (!std::is_scalar<T>::value ||
+           !std::is_same<std::decay_t<U>, T>::value),
+      Optional&>
+  operator=(U&& value) {
+    InitOrAssign(std::forward<U>(value));
+    return *this;
+  }
+
+  // Copy assign the state of other.
+  template <typename U>
+  std::enable_if_t<!internal::IsAssignableFromOptional<T, U>::value &&
+                       std::is_constructible<T, const U&>::value &&
+                       std::is_assignable<T&, const U&>::value,
+                   Optional&>
+  operator=(const Optional<U>& other) {
+    CopyAssign(other);
+    return *this;
+  }
+
+  // Move assign the state of other.
+  template <typename U>
+  std::enable_if_t<!internal::IsAssignableFromOptional<T, U>::value &&
+                       std::is_constructible<T, U>::value &&
+                       std::is_assignable<T&, U>::value,
+                   Optional&>
+  operator=(Optional<U>&& other) {
+    MoveAssign(std::move(other));
+    return *this;
+  }
+
+  constexpr const T* operator->() const {
+    CHECK(storage_.is_populated_);
+    return &storage_.value_;
+  }
+
+  constexpr T* operator->() {
+    CHECK(storage_.is_populated_);
+    return &storage_.value_;
+  }
+
+  constexpr const T& operator*() const & {
+    CHECK(storage_.is_populated_);
+    return storage_.value_;
+  }
+
+  constexpr T& operator*() & {
+    CHECK(storage_.is_populated_);
+    return storage_.value_;
+  }
+
+  constexpr const T&& operator*() const && {
+    CHECK(storage_.is_populated_);
+    return std::move(storage_.value_);
+  }
+
+  constexpr T&& operator*() && {
+    CHECK(storage_.is_populated_);
+    return std::move(storage_.value_);
+  }
+
+  constexpr explicit operator bool() const { return storage_.is_populated_; }
+
+  constexpr bool has_value() const { return storage_.is_populated_; }
+
+  constexpr T& value() & {
+    CHECK(storage_.is_populated_);
+    return storage_.value_;
+  }
+
+  constexpr const T& value() const & {
+    CHECK(storage_.is_populated_);
+    return storage_.value_;
+  }
+
+  constexpr T&& value() && {
+    CHECK(storage_.is_populated_);
+    return std::move(storage_.value_);
+  }
+
+  constexpr const T&& value() const && {
+    CHECK(storage_.is_populated_);
+    return std::move(storage_.value_);
+  }
+
+  template <class U>
+  constexpr T value_or(U&& default_value) const& {
+    // TODO(mlamouri): add the following assert when possible:
+    // static_assert(std::is_copy_constructible<T>::value,
+    //               "T must be copy constructible");
+    static_assert(std::is_convertible<U, T>::value,
+                  "U must be convertible to T");
+    return storage_.is_populated_
+               ? value()
+               : static_cast<T>(std::forward<U>(default_value));
+  }
+
+  template <class U>
+  constexpr T value_or(U&& default_value) && {
+    // TODO(mlamouri): add the following assert when possible:
+    // static_assert(std::is_move_constructible<T>::value,
+    //               "T must be move constructible");
+    static_assert(std::is_convertible<U, T>::value,
+                  "U must be convertible to T");
+    return storage_.is_populated_
+               ? std::move(value())
+               : static_cast<T>(std::forward<U>(default_value));
+  }
+
+  void swap(Optional& other) {
+    if (!storage_.is_populated_ && !other.storage_.is_populated_)
+      return;
+
+    if (storage_.is_populated_ != other.storage_.is_populated_) {
+      if (storage_.is_populated_) {
+        other.storage_.Init(std::move(storage_.value_));
+        FreeIfNeeded();
+      } else {
+        storage_.Init(std::move(other.storage_.value_));
+        other.FreeIfNeeded();
+      }
+      return;
+    }
+
+    DCHECK(storage_.is_populated_ && other.storage_.is_populated_);
+    using std::swap;
+    swap(**this, *other);
+  }
+
+  void reset() { FreeIfNeeded(); }
+
+  template <class... Args>
+  T& emplace(Args&&... args) {
+    FreeIfNeeded();
+    storage_.Init(std::forward<Args>(args)...);
+    return storage_.value_;
+  }
+
+  template <class U, class... Args>
+  std::enable_if_t<
+      std::is_constructible<T, std::initializer_list<U>&, Args&&...>::value,
+      T&>
+  emplace(std::initializer_list<U> il, Args&&... args) {
+    FreeIfNeeded();
+    storage_.Init(il, std::forward<Args>(args)...);
+    return storage_.value_;
+  }
+
+ private:
+  // Accessing template base class's protected member needs explicit
+  // declaration to do so.
+  using internal::OptionalBase<T>::CopyAssign;
+  using internal::OptionalBase<T>::FreeIfNeeded;
+  using internal::OptionalBase<T>::InitOrAssign;
+  using internal::OptionalBase<T>::MoveAssign;
+  using internal::OptionalBase<T>::storage_;
+};
+
+// Here after defines comparation operators. The definition follows
+// http://en.cppreference.com/w/cpp/utility/optional/operator_cmp
+// while bool() casting is replaced by has_value() to meet the chromium
+// style guide.
+template <class T, class U>
+constexpr bool operator==(const Optional<T>& lhs, const Optional<U>& rhs) {
+  if (lhs.has_value() != rhs.has_value())
+    return false;
+  if (!lhs.has_value())
+    return true;
+  return *lhs == *rhs;
+}
+
+template <class T, class U>
+constexpr bool operator!=(const Optional<T>& lhs, const Optional<U>& rhs) {
+  if (lhs.has_value() != rhs.has_value())
+    return true;
+  if (!lhs.has_value())
+    return false;
+  return *lhs != *rhs;
+}
+
+template <class T, class U>
+constexpr bool operator<(const Optional<T>& lhs, const Optional<U>& rhs) {
+  if (!rhs.has_value())
+    return false;
+  if (!lhs.has_value())
+    return true;
+  return *lhs < *rhs;
+}
+
+template <class T, class U>
+constexpr bool operator<=(const Optional<T>& lhs, const Optional<U>& rhs) {
+  if (!lhs.has_value())
+    return true;
+  if (!rhs.has_value())
+    return false;
+  return *lhs <= *rhs;
+}
+
+template <class T, class U>
+constexpr bool operator>(const Optional<T>& lhs, const Optional<U>& rhs) {
+  if (!lhs.has_value())
+    return false;
+  if (!rhs.has_value())
+    return true;
+  return *lhs > *rhs;
+}
+
+template <class T, class U>
+constexpr bool operator>=(const Optional<T>& lhs, const Optional<U>& rhs) {
+  if (!rhs.has_value())
+    return true;
+  if (!lhs.has_value())
+    return false;
+  return *lhs >= *rhs;
+}
+
+template <class T>
+constexpr bool operator==(const Optional<T>& opt, nullopt_t) {
+  return !opt;
+}
+
+template <class T>
+constexpr bool operator==(nullopt_t, const Optional<T>& opt) {
+  return !opt;
+}
+
+template <class T>
+constexpr bool operator!=(const Optional<T>& opt, nullopt_t) {
+  return opt.has_value();
+}
+
+template <class T>
+constexpr bool operator!=(nullopt_t, const Optional<T>& opt) {
+  return opt.has_value();
+}
+
+template <class T>
+constexpr bool operator<(const Optional<T>& opt, nullopt_t) {
+  return false;
+}
+
+template <class T>
+constexpr bool operator<(nullopt_t, const Optional<T>& opt) {
+  return opt.has_value();
+}
+
+template <class T>
+constexpr bool operator<=(const Optional<T>& opt, nullopt_t) {
+  return !opt;
+}
+
+template <class T>
+constexpr bool operator<=(nullopt_t, const Optional<T>& opt) {
+  return true;
+}
+
+template <class T>
+constexpr bool operator>(const Optional<T>& opt, nullopt_t) {
+  return opt.has_value();
+}
+
+template <class T>
+constexpr bool operator>(nullopt_t, const Optional<T>& opt) {
+  return false;
+}
+
+template <class T>
+constexpr bool operator>=(const Optional<T>& opt, nullopt_t) {
+  return true;
+}
+
+template <class T>
+constexpr bool operator>=(nullopt_t, const Optional<T>& opt) {
+  return !opt;
+}
+
+template <class T, class U>
+constexpr bool operator==(const Optional<T>& opt, const U& value) {
+  return opt.has_value() ? *opt == value : false;
+}
+
+template <class T, class U>
+constexpr bool operator==(const U& value, const Optional<T>& opt) {
+  return opt.has_value() ? value == *opt : false;
+}
+
+template <class T, class U>
+constexpr bool operator!=(const Optional<T>& opt, const U& value) {
+  return opt.has_value() ? *opt != value : true;
+}
+
+template <class T, class U>
+constexpr bool operator!=(const U& value, const Optional<T>& opt) {
+  return opt.has_value() ? value != *opt : true;
+}
+
+template <class T, class U>
+constexpr bool operator<(const Optional<T>& opt, const U& value) {
+  return opt.has_value() ? *opt < value : true;
+}
+
+template <class T, class U>
+constexpr bool operator<(const U& value, const Optional<T>& opt) {
+  return opt.has_value() ? value < *opt : false;
+}
+
+template <class T, class U>
+constexpr bool operator<=(const Optional<T>& opt, const U& value) {
+  return opt.has_value() ? *opt <= value : true;
+}
+
+template <class T, class U>
+constexpr bool operator<=(const U& value, const Optional<T>& opt) {
+  return opt.has_value() ? value <= *opt : false;
+}
+
+template <class T, class U>
+constexpr bool operator>(const Optional<T>& opt, const U& value) {
+  return opt.has_value() ? *opt > value : false;
+}
+
+template <class T, class U>
+constexpr bool operator>(const U& value, const Optional<T>& opt) {
+  return opt.has_value() ? value > *opt : true;
+}
+
+template <class T, class U>
+constexpr bool operator>=(const Optional<T>& opt, const U& value) {
+  return opt.has_value() ? *opt >= value : false;
+}
+
+template <class T, class U>
+constexpr bool operator>=(const U& value, const Optional<T>& opt) {
+  return opt.has_value() ? value >= *opt : true;
+}
+
+template <class T>
+constexpr Optional<std::decay_t<T>> make_optional(T&& value) {
+  return Optional<std::decay_t<T>>(std::forward<T>(value));
+}
+
+template <class T, class... Args>
+constexpr Optional<T> make_optional(Args&&... args) {
+  return Optional<T>(in_place, std::forward<Args>(args)...);
+}
+
+template <class T, class U, class... Args>
+constexpr Optional<T> make_optional(std::initializer_list<U> il,
+                                    Args&&... args) {
+  return Optional<T>(in_place, il, std::forward<Args>(args)...);
+}
+
+// Partial specialization for a function template is not allowed. Also, it is
+// not allowed to add overload function to std namespace, while it is allowed
+// to specialize the template in std. Thus, swap() (kind of) overloading is
+// defined in base namespace, instead.
+template <class T>
+std::enable_if_t<std::is_move_constructible<T>::value &&
+                 internal::IsSwappable<T>::value>
+swap(Optional<T>& lhs, Optional<T>& rhs) {
+  lhs.swap(rhs);
+}
+
+}  // namespace base
+
+namespace std {
+
+template <class T>
+struct hash<base::Optional<T>> {
+  size_t operator()(const base::Optional<T>& opt) const {
+    return opt == base::nullopt ? 0 : std::hash<T>()(*opt);
+  }
+};
+
+}  // namespace std
+
+#endif  // BASE_OPTIONAL_H_
diff --git a/base/optional_unittest.cc b/base/optional_unittest.cc
new file mode 100644
index 0000000..7bdb46b
--- /dev/null
+++ b/base/optional_unittest.cc
@@ -0,0 +1,2185 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/optional.h"
+
+#include <memory>
+#include <set>
+#include <string>
+#include <vector>
+
+#include "testing/gmock/include/gmock/gmock.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+using ::testing::ElementsAre;
+
+namespace base {
+
+namespace {
+
+// Object used to test complex object with Optional<T> in addition of the move
+// semantics.
+class TestObject {
+ public:
+  enum class State {
+    DEFAULT_CONSTRUCTED,
+    VALUE_CONSTRUCTED,
+    COPY_CONSTRUCTED,
+    MOVE_CONSTRUCTED,
+    MOVED_FROM,
+    COPY_ASSIGNED,
+    MOVE_ASSIGNED,
+    SWAPPED,
+  };
+
+  TestObject() : foo_(0), bar_(0.0), state_(State::DEFAULT_CONSTRUCTED) {}
+
+  TestObject(int foo, double bar)
+      : foo_(foo), bar_(bar), state_(State::VALUE_CONSTRUCTED) {}
+
+  TestObject(const TestObject& other)
+      : foo_(other.foo_),
+        bar_(other.bar_),
+        state_(State::COPY_CONSTRUCTED),
+        move_ctors_count_(other.move_ctors_count_) {}
+
+  TestObject(TestObject&& other)
+      : foo_(std::move(other.foo_)),
+        bar_(std::move(other.bar_)),
+        state_(State::MOVE_CONSTRUCTED),
+        move_ctors_count_(other.move_ctors_count_ + 1) {
+    other.state_ = State::MOVED_FROM;
+  }
+
+  TestObject& operator=(const TestObject& other) {
+    foo_ = other.foo_;
+    bar_ = other.bar_;
+    state_ = State::COPY_ASSIGNED;
+    move_ctors_count_ = other.move_ctors_count_;
+    return *this;
+  }
+
+  TestObject& operator=(TestObject&& other) {
+    foo_ = other.foo_;
+    bar_ = other.bar_;
+    state_ = State::MOVE_ASSIGNED;
+    move_ctors_count_ = other.move_ctors_count_;
+    other.state_ = State::MOVED_FROM;
+    return *this;
+  }
+
+  void Swap(TestObject* other) {
+    using std::swap;
+    swap(foo_, other->foo_);
+    swap(bar_, other->bar_);
+    swap(move_ctors_count_, other->move_ctors_count_);
+    state_ = State::SWAPPED;
+    other->state_ = State::SWAPPED;
+  }
+
+  bool operator==(const TestObject& other) const {
+    return std::tie(foo_, bar_) == std::tie(other.foo_, other.bar_);
+  }
+
+  bool operator!=(const TestObject& other) const { return !(*this == other); }
+
+  int foo() const { return foo_; }
+  State state() const { return state_; }
+  int move_ctors_count() const { return move_ctors_count_; }
+
+ private:
+  int foo_;
+  double bar_;
+  State state_;
+  int move_ctors_count_ = 0;
+};
+
+// Implementing Swappable concept.
+void swap(TestObject& lhs, TestObject& rhs) {
+  lhs.Swap(&rhs);
+}
+
+class NonTriviallyDestructible {
+  ~NonTriviallyDestructible() {}
+};
+
+class DeletedDefaultConstructor {
+ public:
+  DeletedDefaultConstructor() = delete;
+  DeletedDefaultConstructor(int foo) : foo_(foo) {}
+
+  int foo() const { return foo_; }
+
+ private:
+  int foo_;
+};
+
+class DeletedCopy {
+ public:
+  explicit DeletedCopy(int foo) : foo_(foo) {}
+  DeletedCopy(const DeletedCopy&) = delete;
+  DeletedCopy(DeletedCopy&&) = default;
+
+  DeletedCopy& operator=(const DeletedCopy&) = delete;
+  DeletedCopy& operator=(DeletedCopy&&) = default;
+
+  int foo() const { return foo_; }
+
+ private:
+  int foo_;
+};
+
+class DeletedMove {
+ public:
+  explicit DeletedMove(int foo) : foo_(foo) {}
+  DeletedMove(const DeletedMove&) = default;
+  DeletedMove(DeletedMove&&) = delete;
+
+  DeletedMove& operator=(const DeletedMove&) = default;
+  DeletedMove& operator=(DeletedMove&&) = delete;
+
+  int foo() const { return foo_; }
+
+ private:
+  int foo_;
+};
+
+class NonTriviallyDestructibleDeletedCopyConstructor {
+ public:
+  explicit NonTriviallyDestructibleDeletedCopyConstructor(int foo)
+      : foo_(foo) {}
+  NonTriviallyDestructibleDeletedCopyConstructor(
+      const NonTriviallyDestructibleDeletedCopyConstructor&) = delete;
+  NonTriviallyDestructibleDeletedCopyConstructor(
+      NonTriviallyDestructibleDeletedCopyConstructor&&) = default;
+
+  ~NonTriviallyDestructibleDeletedCopyConstructor() {}
+
+  int foo() const { return foo_; }
+
+ private:
+  int foo_;
+};
+
+class DeleteNewOperators {
+ public:
+  void* operator new(size_t) = delete;
+  void* operator new(size_t, void*) = delete;
+  void* operator new[](size_t) = delete;
+  void* operator new[](size_t, void*) = delete;
+};
+
+}  // anonymous namespace
+
+static_assert(std::is_trivially_destructible<Optional<int>>::value,
+              "OptionalIsTriviallyDestructible");
+
+static_assert(
+    !std::is_trivially_destructible<Optional<NonTriviallyDestructible>>::value,
+    "OptionalIsTriviallyDestructible");
+
+static_assert(sizeof(Optional<int>) == sizeof(internal::OptionalBase<int>),
+              "internal::{Copy,Move}{Constructible,Assignable} structs "
+              "should be 0-sized");
+
+TEST(OptionalTest, DefaultConstructor) {
+  {
+    constexpr Optional<float> o;
+    EXPECT_FALSE(o);
+  }
+
+  {
+    Optional<std::string> o;
+    EXPECT_FALSE(o);
+  }
+
+  {
+    Optional<TestObject> o;
+    EXPECT_FALSE(o);
+  }
+}
+
+TEST(OptionalTest, CopyConstructor) {
+  {
+    constexpr Optional<float> first(0.1f);
+    constexpr Optional<float> other(first);
+
+    EXPECT_TRUE(other);
+    EXPECT_EQ(other.value(), 0.1f);
+    EXPECT_EQ(first, other);
+  }
+
+  {
+    Optional<std::string> first("foo");
+    Optional<std::string> other(first);
+
+    EXPECT_TRUE(other);
+    EXPECT_EQ(other.value(), "foo");
+    EXPECT_EQ(first, other);
+  }
+
+  {
+    const Optional<std::string> first("foo");
+    Optional<std::string> other(first);
+
+    EXPECT_TRUE(other);
+    EXPECT_EQ(other.value(), "foo");
+    EXPECT_EQ(first, other);
+  }
+
+  {
+    Optional<TestObject> first(TestObject(3, 0.1));
+    Optional<TestObject> other(first);
+
+    EXPECT_TRUE(!!other);
+    EXPECT_TRUE(other.value() == TestObject(3, 0.1));
+    EXPECT_TRUE(first == other);
+  }
+}
+
+TEST(OptionalTest, ValueConstructor) {
+  {
+    constexpr float value = 0.1f;
+    constexpr Optional<float> o(value);
+
+    EXPECT_TRUE(o);
+    EXPECT_EQ(value, o.value());
+  }
+
+  {
+    std::string value("foo");
+    Optional<std::string> o(value);
+
+    EXPECT_TRUE(o);
+    EXPECT_EQ(value, o.value());
+  }
+
+  {
+    TestObject value(3, 0.1);
+    Optional<TestObject> o(value);
+
+    EXPECT_TRUE(o);
+    EXPECT_EQ(TestObject::State::COPY_CONSTRUCTED, o->state());
+    EXPECT_EQ(value, o.value());
+  }
+}
+
+TEST(OptionalTest, MoveConstructor) {
+  {
+    constexpr Optional<float> first(0.1f);
+    constexpr Optional<float> second(std::move(first));
+
+    EXPECT_TRUE(second.has_value());
+    EXPECT_EQ(second.value(), 0.1f);
+
+    EXPECT_TRUE(first.has_value());
+  }
+
+  {
+    Optional<std::string> first("foo");
+    Optional<std::string> second(std::move(first));
+
+    EXPECT_TRUE(second.has_value());
+    EXPECT_EQ("foo", second.value());
+
+    EXPECT_TRUE(first.has_value());
+  }
+
+  {
+    Optional<TestObject> first(TestObject(3, 0.1));
+    Optional<TestObject> second(std::move(first));
+
+    EXPECT_TRUE(second.has_value());
+    EXPECT_EQ(TestObject::State::MOVE_CONSTRUCTED, second->state());
+    EXPECT_TRUE(TestObject(3, 0.1) == second.value());
+
+    EXPECT_TRUE(first.has_value());
+    EXPECT_EQ(TestObject::State::MOVED_FROM, first->state());
+  }
+
+  // Even if copy constructor is deleted, move constructor needs to work.
+  // Note that it couldn't be constexpr.
+  {
+    Optional<DeletedCopy> first(in_place, 42);
+    Optional<DeletedCopy> second(std::move(first));
+
+    EXPECT_TRUE(second.has_value());
+    EXPECT_EQ(42, second->foo());
+
+    EXPECT_TRUE(first.has_value());
+  }
+
+  {
+    Optional<DeletedMove> first(in_place, 42);
+    Optional<DeletedMove> second(std::move(first));
+
+    EXPECT_TRUE(second.has_value());
+    EXPECT_EQ(42, second->foo());
+
+    EXPECT_TRUE(first.has_value());
+  }
+
+  {
+    Optional<NonTriviallyDestructibleDeletedCopyConstructor> first(in_place,
+                                                                   42);
+    Optional<NonTriviallyDestructibleDeletedCopyConstructor> second(
+        std::move(first));
+
+    EXPECT_TRUE(second.has_value());
+    EXPECT_EQ(42, second->foo());
+
+    EXPECT_TRUE(first.has_value());
+  }
+}
+
+TEST(OptionalTest, MoveValueConstructor) {
+  {
+    constexpr float value = 0.1f;
+    constexpr Optional<float> o(std::move(value));
+
+    EXPECT_TRUE(o);
+    EXPECT_EQ(0.1f, o.value());
+  }
+
+  {
+    float value = 0.1f;
+    Optional<float> o(std::move(value));
+
+    EXPECT_TRUE(o);
+    EXPECT_EQ(0.1f, o.value());
+  }
+
+  {
+    std::string value("foo");
+    Optional<std::string> o(std::move(value));
+
+    EXPECT_TRUE(o);
+    EXPECT_EQ("foo", o.value());
+  }
+
+  {
+    TestObject value(3, 0.1);
+    Optional<TestObject> o(std::move(value));
+
+    EXPECT_TRUE(o);
+    EXPECT_EQ(TestObject::State::MOVE_CONSTRUCTED, o->state());
+    EXPECT_EQ(TestObject(3, 0.1), o.value());
+  }
+}
+
+TEST(OptionalTest, ConvertingCopyConstructor) {
+  {
+    Optional<int> first(1);
+    Optional<double> second(first);
+    EXPECT_TRUE(second.has_value());
+    EXPECT_EQ(1.0, second.value());
+  }
+
+  // Make sure explicit is not marked for convertible case.
+  {
+    Optional<int> o(1);
+    ignore_result<Optional<double>>(o);
+  }
+}
+
+TEST(OptionalTest, ConvertingMoveConstructor) {
+  {
+    Optional<int> first(1);
+    Optional<double> second(std::move(first));
+    EXPECT_TRUE(second.has_value());
+    EXPECT_EQ(1.0, second.value());
+  }
+
+  // Make sure explicit is not marked for convertible case.
+  {
+    Optional<int> o(1);
+    ignore_result<Optional<double>>(std::move(o));
+  }
+
+  {
+    class Test1 {
+     public:
+      explicit Test1(int foo) : foo_(foo) {}
+
+      int foo() const { return foo_; }
+
+     private:
+      int foo_;
+    };
+
+    // Not copyable but convertible from Test1.
+    class Test2 {
+     public:
+      Test2(const Test2&) = delete;
+      explicit Test2(Test1&& other) : bar_(other.foo()) {}
+
+      double bar() const { return bar_; }
+
+     private:
+      double bar_;
+    };
+
+    Optional<Test1> first(in_place, 42);
+    Optional<Test2> second(std::move(first));
+    EXPECT_TRUE(second.has_value());
+    EXPECT_EQ(42.0, second->bar());
+  }
+}
+
+TEST(OptionalTest, ConstructorForwardArguments) {
+  {
+    constexpr Optional<float> a(base::in_place, 0.1f);
+    EXPECT_TRUE(a);
+    EXPECT_EQ(0.1f, a.value());
+  }
+
+  {
+    Optional<float> a(base::in_place, 0.1f);
+    EXPECT_TRUE(a);
+    EXPECT_EQ(0.1f, a.value());
+  }
+
+  {
+    Optional<std::string> a(base::in_place, "foo");
+    EXPECT_TRUE(a);
+    EXPECT_EQ("foo", a.value());
+  }
+
+  {
+    Optional<TestObject> a(base::in_place, 0, 0.1);
+    EXPECT_TRUE(!!a);
+    EXPECT_TRUE(TestObject(0, 0.1) == a.value());
+  }
+}
+
+TEST(OptionalTest, ConstructorForwardInitListAndArguments) {
+  {
+    Optional<std::vector<int>> opt(in_place, {3, 1});
+    EXPECT_TRUE(opt);
+    EXPECT_THAT(*opt, ElementsAre(3, 1));
+    EXPECT_EQ(2u, opt->size());
+  }
+
+  {
+    Optional<std::vector<int>> opt(in_place, {3, 1}, std::allocator<int>());
+    EXPECT_TRUE(opt);
+    EXPECT_THAT(*opt, ElementsAre(3, 1));
+    EXPECT_EQ(2u, opt->size());
+  }
+}
+
+TEST(OptionalTest, ForwardConstructor) {
+  {
+    Optional<double> a(1);
+    EXPECT_TRUE(a.has_value());
+    EXPECT_EQ(1.0, a.value());
+  }
+
+  // Test that default type of 'U' is value_type.
+  {
+    struct TestData {
+      int a;
+      double b;
+      bool c;
+    };
+
+    Optional<TestData> a({1, 2.0, true});
+    EXPECT_TRUE(a.has_value());
+    EXPECT_EQ(1, a->a);
+    EXPECT_EQ(2.0, a->b);
+    EXPECT_TRUE(a->c);
+  }
+
+  // If T has a constructor with a param Optional<U>, and another ctor with a
+  // param U, then T(Optional<U>) should be used for Optional<T>(Optional<U>)
+  // constructor.
+  {
+    enum class ParamType {
+      DEFAULT_CONSTRUCTED,
+      COPY_CONSTRUCTED,
+      MOVE_CONSTRUCTED,
+      INT,
+      IN_PLACE,
+      OPTIONAL_INT,
+    };
+    struct Test {
+      Test() : param_type(ParamType::DEFAULT_CONSTRUCTED) {}
+      Test(const Test& param) : param_type(ParamType::COPY_CONSTRUCTED) {}
+      Test(Test&& param) : param_type(ParamType::MOVE_CONSTRUCTED) {}
+      explicit Test(int param) : param_type(ParamType::INT) {}
+      explicit Test(in_place_t param) : param_type(ParamType::IN_PLACE) {}
+      explicit Test(Optional<int> param)
+          : param_type(ParamType::OPTIONAL_INT) {}
+
+      ParamType param_type;
+    };
+
+    // Overload resolution with copy-conversion constructor.
+    {
+      const Optional<int> arg(in_place, 1);
+      Optional<Test> testee(arg);
+      EXPECT_EQ(ParamType::OPTIONAL_INT, testee->param_type);
+    }
+
+    // Overload resolution with move conversion constructor.
+    {
+      Optional<Test> testee(Optional<int>(in_place, 1));
+      EXPECT_EQ(ParamType::OPTIONAL_INT, testee->param_type);
+    }
+
+    // Default constructor should be used.
+    {
+      Optional<Test> testee(in_place);
+      EXPECT_EQ(ParamType::DEFAULT_CONSTRUCTED, testee->param_type);
+    }
+  }
+
+  {
+    struct Test {
+      Test(int a) {}  // NOLINT(runtime/explicit)
+    };
+    // If T is convertible from U, it is not marked as explicit.
+    static_assert(std::is_convertible<int, Test>::value,
+                  "Int should be convertible to Test.");
+    ([](Optional<Test> param) {})(1);
+  }
+}
+
+TEST(OptionalTest, NulloptConstructor) {
+  constexpr Optional<int> a(base::nullopt);
+  EXPECT_FALSE(a);
+}
+
+TEST(OptionalTest, AssignValue) {
+  {
+    Optional<float> a;
+    EXPECT_FALSE(a);
+    a = 0.1f;
+    EXPECT_TRUE(a);
+
+    Optional<float> b(0.1f);
+    EXPECT_TRUE(a == b);
+  }
+
+  {
+    Optional<std::string> a;
+    EXPECT_FALSE(a);
+    a = std::string("foo");
+    EXPECT_TRUE(a);
+
+    Optional<std::string> b(std::string("foo"));
+    EXPECT_EQ(a, b);
+  }
+
+  {
+    Optional<TestObject> a;
+    EXPECT_FALSE(!!a);
+    a = TestObject(3, 0.1);
+    EXPECT_TRUE(!!a);
+
+    Optional<TestObject> b(TestObject(3, 0.1));
+    EXPECT_TRUE(a == b);
+  }
+
+  {
+    Optional<TestObject> a = TestObject(4, 1.0);
+    EXPECT_TRUE(!!a);
+    a = TestObject(3, 0.1);
+    EXPECT_TRUE(!!a);
+
+    Optional<TestObject> b(TestObject(3, 0.1));
+    EXPECT_TRUE(a == b);
+  }
+}
+
+TEST(OptionalTest, AssignObject) {
+  {
+    Optional<float> a;
+    Optional<float> b(0.1f);
+    a = b;
+
+    EXPECT_TRUE(a);
+    EXPECT_EQ(a.value(), 0.1f);
+    EXPECT_EQ(a, b);
+  }
+
+  {
+    Optional<std::string> a;
+    Optional<std::string> b("foo");
+    a = b;
+
+    EXPECT_TRUE(a);
+    EXPECT_EQ(a.value(), "foo");
+    EXPECT_EQ(a, b);
+  }
+
+  {
+    Optional<TestObject> a;
+    Optional<TestObject> b(TestObject(3, 0.1));
+    a = b;
+
+    EXPECT_TRUE(!!a);
+    EXPECT_TRUE(a.value() == TestObject(3, 0.1));
+    EXPECT_TRUE(a == b);
+  }
+
+  {
+    Optional<TestObject> a(TestObject(4, 1.0));
+    Optional<TestObject> b(TestObject(3, 0.1));
+    a = b;
+
+    EXPECT_TRUE(!!a);
+    EXPECT_TRUE(a.value() == TestObject(3, 0.1));
+    EXPECT_TRUE(a == b);
+  }
+
+  {
+    Optional<DeletedMove> a(in_place, 42);
+    Optional<DeletedMove> b;
+    b = a;
+
+    EXPECT_TRUE(!!a);
+    EXPECT_TRUE(!!b);
+    EXPECT_EQ(a->foo(), b->foo());
+  }
+
+  {
+    Optional<DeletedMove> a(in_place, 42);
+    Optional<DeletedMove> b(in_place, 1);
+    b = a;
+
+    EXPECT_TRUE(!!a);
+    EXPECT_TRUE(!!b);
+    EXPECT_EQ(a->foo(), b->foo());
+  }
+
+  // Converting assignment.
+  {
+    Optional<int> a(in_place, 1);
+    Optional<double> b;
+    b = a;
+
+    EXPECT_TRUE(!!a);
+    EXPECT_TRUE(!!b);
+    EXPECT_EQ(1, a.value());
+    EXPECT_EQ(1.0, b.value());
+  }
+
+  {
+    Optional<int> a(in_place, 42);
+    Optional<double> b(in_place, 1);
+    b = a;
+
+    EXPECT_TRUE(!!a);
+    EXPECT_TRUE(!!b);
+    EXPECT_EQ(42, a.value());
+    EXPECT_EQ(42.0, b.value());
+  }
+
+  {
+    Optional<int> a;
+    Optional<double> b(in_place, 1);
+    b = a;
+    EXPECT_FALSE(!!a);
+    EXPECT_FALSE(!!b);
+  }
+}
+
+TEST(OptionalTest, AssignObject_rvalue) {
+  {
+    Optional<float> a;
+    Optional<float> b(0.1f);
+    a = std::move(b);
+
+    EXPECT_TRUE(a);
+    EXPECT_TRUE(b);
+    EXPECT_EQ(0.1f, a.value());
+  }
+
+  {
+    Optional<std::string> a;
+    Optional<std::string> b("foo");
+    a = std::move(b);
+
+    EXPECT_TRUE(a);
+    EXPECT_TRUE(b);
+    EXPECT_EQ("foo", a.value());
+  }
+
+  {
+    Optional<TestObject> a;
+    Optional<TestObject> b(TestObject(3, 0.1));
+    a = std::move(b);
+
+    EXPECT_TRUE(!!a);
+    EXPECT_TRUE(!!b);
+    EXPECT_TRUE(TestObject(3, 0.1) == a.value());
+
+    EXPECT_EQ(TestObject::State::MOVE_CONSTRUCTED, a->state());
+    EXPECT_EQ(TestObject::State::MOVED_FROM, b->state());
+  }
+
+  {
+    Optional<TestObject> a(TestObject(4, 1.0));
+    Optional<TestObject> b(TestObject(3, 0.1));
+    a = std::move(b);
+
+    EXPECT_TRUE(!!a);
+    EXPECT_TRUE(!!b);
+    EXPECT_TRUE(TestObject(3, 0.1) == a.value());
+
+    EXPECT_EQ(TestObject::State::MOVE_ASSIGNED, a->state());
+    EXPECT_EQ(TestObject::State::MOVED_FROM, b->state());
+  }
+
+  {
+    Optional<DeletedMove> a(in_place, 42);
+    Optional<DeletedMove> b;
+    b = std::move(a);
+
+    EXPECT_TRUE(!!a);
+    EXPECT_TRUE(!!b);
+    EXPECT_EQ(42, b->foo());
+  }
+
+  {
+    Optional<DeletedMove> a(in_place, 42);
+    Optional<DeletedMove> b(in_place, 1);
+    b = std::move(a);
+
+    EXPECT_TRUE(!!a);
+    EXPECT_TRUE(!!b);
+    EXPECT_EQ(42, b->foo());
+  }
+
+  // Converting assignment.
+  {
+    Optional<int> a(in_place, 1);
+    Optional<double> b;
+    b = std::move(a);
+
+    EXPECT_TRUE(!!a);
+    EXPECT_TRUE(!!b);
+    EXPECT_EQ(1.0, b.value());
+  }
+
+  {
+    Optional<int> a(in_place, 42);
+    Optional<double> b(in_place, 1);
+    b = std::move(a);
+
+    EXPECT_TRUE(!!a);
+    EXPECT_TRUE(!!b);
+    EXPECT_EQ(42.0, b.value());
+  }
+
+  {
+    Optional<int> a;
+    Optional<double> b(in_place, 1);
+    b = std::move(a);
+
+    EXPECT_FALSE(!!a);
+    EXPECT_FALSE(!!b);
+  }
+}
+
+TEST(OptionalTest, AssignNull) {
+  {
+    Optional<float> a(0.1f);
+    Optional<float> b(0.2f);
+    a = base::nullopt;
+    b = base::nullopt;
+    EXPECT_EQ(a, b);
+  }
+
+  {
+    Optional<std::string> a("foo");
+    Optional<std::string> b("bar");
+    a = base::nullopt;
+    b = base::nullopt;
+    EXPECT_EQ(a, b);
+  }
+
+  {
+    Optional<TestObject> a(TestObject(3, 0.1));
+    Optional<TestObject> b(TestObject(4, 1.0));
+    a = base::nullopt;
+    b = base::nullopt;
+    EXPECT_TRUE(a == b);
+  }
+}
+
+TEST(OptionalTest, AssignOverload) {
+  struct Test1 {
+    enum class State {
+      CONSTRUCTED,
+      MOVED,
+    };
+    State state = State::CONSTRUCTED;
+  };
+
+  // Here, Optional<Test2> can be assigned from Optioanl<Test1>.
+  // In case of move, marks MOVED to Test1 instance.
+  struct Test2 {
+    enum class State {
+      DEFAULT_CONSTRUCTED,
+      COPY_CONSTRUCTED_FROM_TEST1,
+      MOVE_CONSTRUCTED_FROM_TEST1,
+      COPY_ASSIGNED_FROM_TEST1,
+      MOVE_ASSIGNED_FROM_TEST1,
+    };
+
+    Test2() = default;
+    explicit Test2(const Test1& test1)
+        : state(State::COPY_CONSTRUCTED_FROM_TEST1) {}
+    explicit Test2(Test1&& test1) : state(State::MOVE_CONSTRUCTED_FROM_TEST1) {
+      test1.state = Test1::State::MOVED;
+    }
+    Test2& operator=(const Test1& test1) {
+      state = State::COPY_ASSIGNED_FROM_TEST1;
+      return *this;
+    }
+    Test2& operator=(Test1&& test1) {
+      state = State::MOVE_ASSIGNED_FROM_TEST1;
+      test1.state = Test1::State::MOVED;
+      return *this;
+    }
+
+    State state = State::DEFAULT_CONSTRUCTED;
+  };
+
+  {
+    Optional<Test1> a(in_place);
+    Optional<Test2> b;
+
+    b = a;
+    EXPECT_TRUE(!!a);
+    EXPECT_TRUE(!!b);
+    EXPECT_EQ(Test1::State::CONSTRUCTED, a->state);
+    EXPECT_EQ(Test2::State::COPY_CONSTRUCTED_FROM_TEST1, b->state);
+  }
+
+  {
+    Optional<Test1> a(in_place);
+    Optional<Test2> b(in_place);
+
+    b = a;
+    EXPECT_TRUE(!!a);
+    EXPECT_TRUE(!!b);
+    EXPECT_EQ(Test1::State::CONSTRUCTED, a->state);
+    EXPECT_EQ(Test2::State::COPY_ASSIGNED_FROM_TEST1, b->state);
+  }
+
+  {
+    Optional<Test1> a(in_place);
+    Optional<Test2> b;
+
+    b = std::move(a);
+    EXPECT_TRUE(!!a);
+    EXPECT_TRUE(!!b);
+    EXPECT_EQ(Test1::State::MOVED, a->state);
+    EXPECT_EQ(Test2::State::MOVE_CONSTRUCTED_FROM_TEST1, b->state);
+  }
+
+  {
+    Optional<Test1> a(in_place);
+    Optional<Test2> b(in_place);
+
+    b = std::move(a);
+    EXPECT_TRUE(!!a);
+    EXPECT_TRUE(!!b);
+    EXPECT_EQ(Test1::State::MOVED, a->state);
+    EXPECT_EQ(Test2::State::MOVE_ASSIGNED_FROM_TEST1, b->state);
+  }
+
+  // Similar to Test2, but Test3 also has copy/move ctor and assign operators
+  // from Optional<Test1>, too. In this case, for a = b where a is
+  // Optional<Test3> and b is Optional<Test1>,
+  // Optional<T>::operator=(U&&) where U is Optional<Test1> should be used
+  // rather than Optional<T>::operator=(Optional<U>&&) where U is Test1.
+  struct Test3 {
+    enum class State {
+      DEFAULT_CONSTRUCTED,
+      COPY_CONSTRUCTED_FROM_TEST1,
+      MOVE_CONSTRUCTED_FROM_TEST1,
+      COPY_CONSTRUCTED_FROM_OPTIONAL_TEST1,
+      MOVE_CONSTRUCTED_FROM_OPTIONAL_TEST1,
+      COPY_ASSIGNED_FROM_TEST1,
+      MOVE_ASSIGNED_FROM_TEST1,
+      COPY_ASSIGNED_FROM_OPTIONAL_TEST1,
+      MOVE_ASSIGNED_FROM_OPTIONAL_TEST1,
+    };
+
+    Test3() = default;
+    explicit Test3(const Test1& test1)
+        : state(State::COPY_CONSTRUCTED_FROM_TEST1) {}
+    explicit Test3(Test1&& test1) : state(State::MOVE_CONSTRUCTED_FROM_TEST1) {
+      test1.state = Test1::State::MOVED;
+    }
+    explicit Test3(const Optional<Test1>& test1)
+        : state(State::COPY_CONSTRUCTED_FROM_OPTIONAL_TEST1) {}
+    explicit Test3(Optional<Test1>&& test1)
+        : state(State::MOVE_CONSTRUCTED_FROM_OPTIONAL_TEST1) {
+      // In the following senarios, given |test1| should always have value.
+      DCHECK(test1.has_value());
+      test1->state = Test1::State::MOVED;
+    }
+    Test3& operator=(const Test1& test1) {
+      state = State::COPY_ASSIGNED_FROM_TEST1;
+      return *this;
+    }
+    Test3& operator=(Test1&& test1) {
+      state = State::MOVE_ASSIGNED_FROM_TEST1;
+      test1.state = Test1::State::MOVED;
+      return *this;
+    }
+    Test3& operator=(const Optional<Test1>& test1) {
+      state = State::COPY_ASSIGNED_FROM_OPTIONAL_TEST1;
+      return *this;
+    }
+    Test3& operator=(Optional<Test1>&& test1) {
+      state = State::MOVE_ASSIGNED_FROM_OPTIONAL_TEST1;
+      // In the following senarios, given |test1| should always have value.
+      DCHECK(test1.has_value());
+      test1->state = Test1::State::MOVED;
+      return *this;
+    }
+
+    State state = State::DEFAULT_CONSTRUCTED;
+  };
+
+  {
+    Optional<Test1> a(in_place);
+    Optional<Test3> b;
+
+    b = a;
+    EXPECT_TRUE(!!a);
+    EXPECT_TRUE(!!b);
+    EXPECT_EQ(Test1::State::CONSTRUCTED, a->state);
+    EXPECT_EQ(Test3::State::COPY_CONSTRUCTED_FROM_OPTIONAL_TEST1, b->state);
+  }
+
+  {
+    Optional<Test1> a(in_place);
+    Optional<Test3> b(in_place);
+
+    b = a;
+    EXPECT_TRUE(!!a);
+    EXPECT_TRUE(!!b);
+    EXPECT_EQ(Test1::State::CONSTRUCTED, a->state);
+    EXPECT_EQ(Test3::State::COPY_ASSIGNED_FROM_OPTIONAL_TEST1, b->state);
+  }
+
+  {
+    Optional<Test1> a(in_place);
+    Optional<Test3> b;
+
+    b = std::move(a);
+    EXPECT_TRUE(!!a);
+    EXPECT_TRUE(!!b);
+    EXPECT_EQ(Test1::State::MOVED, a->state);
+    EXPECT_EQ(Test3::State::MOVE_CONSTRUCTED_FROM_OPTIONAL_TEST1, b->state);
+  }
+
+  {
+    Optional<Test1> a(in_place);
+    Optional<Test3> b(in_place);
+
+    b = std::move(a);
+    EXPECT_TRUE(!!a);
+    EXPECT_TRUE(!!b);
+    EXPECT_EQ(Test1::State::MOVED, a->state);
+    EXPECT_EQ(Test3::State::MOVE_ASSIGNED_FROM_OPTIONAL_TEST1, b->state);
+  }
+}
+
+TEST(OptionalTest, OperatorStar) {
+  {
+    Optional<float> a(0.1f);
+    EXPECT_EQ(a.value(), *a);
+  }
+
+  {
+    Optional<std::string> a("foo");
+    EXPECT_EQ(a.value(), *a);
+  }
+
+  {
+    Optional<TestObject> a(TestObject(3, 0.1));
+    EXPECT_EQ(a.value(), *a);
+  }
+}
+
+TEST(OptionalTest, OperatorStar_rvalue) {
+  EXPECT_EQ(0.1f, *Optional<float>(0.1f));
+  EXPECT_EQ(std::string("foo"), *Optional<std::string>("foo"));
+  EXPECT_TRUE(TestObject(3, 0.1) == *Optional<TestObject>(TestObject(3, 0.1)));
+}
+
+TEST(OptionalTest, OperatorArrow) {
+  Optional<TestObject> a(TestObject(3, 0.1));
+  EXPECT_EQ(a->foo(), 3);
+}
+
+TEST(OptionalTest, Value_rvalue) {
+  EXPECT_EQ(0.1f, Optional<float>(0.1f).value());
+  EXPECT_EQ(std::string("foo"), Optional<std::string>("foo").value());
+  EXPECT_TRUE(TestObject(3, 0.1) ==
+              Optional<TestObject>(TestObject(3, 0.1)).value());
+}
+
+TEST(OptionalTest, ValueOr) {
+  {
+    Optional<float> a;
+    EXPECT_EQ(0.0f, a.value_or(0.0f));
+
+    a = 0.1f;
+    EXPECT_EQ(0.1f, a.value_or(0.0f));
+
+    a = base::nullopt;
+    EXPECT_EQ(0.0f, a.value_or(0.0f));
+  }
+
+  // value_or() can be constexpr.
+  {
+    constexpr Optional<int> a(in_place, 1);
+    constexpr int value = a.value_or(10);
+    EXPECT_EQ(1, value);
+  }
+  {
+    constexpr Optional<int> a;
+    constexpr int value = a.value_or(10);
+    EXPECT_EQ(10, value);
+  }
+
+  {
+    Optional<std::string> a;
+    EXPECT_EQ("bar", a.value_or("bar"));
+
+    a = std::string("foo");
+    EXPECT_EQ(std::string("foo"), a.value_or("bar"));
+
+    a = base::nullopt;
+    EXPECT_EQ(std::string("bar"), a.value_or("bar"));
+  }
+
+  {
+    Optional<TestObject> a;
+    EXPECT_TRUE(a.value_or(TestObject(1, 0.3)) == TestObject(1, 0.3));
+
+    a = TestObject(3, 0.1);
+    EXPECT_TRUE(a.value_or(TestObject(1, 0.3)) == TestObject(3, 0.1));
+
+    a = base::nullopt;
+    EXPECT_TRUE(a.value_or(TestObject(1, 0.3)) == TestObject(1, 0.3));
+  }
+}
+
+TEST(OptionalTest, Swap_bothNoValue) {
+  Optional<TestObject> a, b;
+  a.swap(b);
+
+  EXPECT_FALSE(a);
+  EXPECT_FALSE(b);
+  EXPECT_TRUE(TestObject(42, 0.42) == a.value_or(TestObject(42, 0.42)));
+  EXPECT_TRUE(TestObject(42, 0.42) == b.value_or(TestObject(42, 0.42)));
+}
+
+TEST(OptionalTest, Swap_inHasValue) {
+  Optional<TestObject> a(TestObject(1, 0.3));
+  Optional<TestObject> b;
+  a.swap(b);
+
+  EXPECT_FALSE(a);
+
+  EXPECT_TRUE(!!b);
+  EXPECT_TRUE(TestObject(42, 0.42) == a.value_or(TestObject(42, 0.42)));
+  EXPECT_TRUE(TestObject(1, 0.3) == b.value_or(TestObject(42, 0.42)));
+}
+
+TEST(OptionalTest, Swap_outHasValue) {
+  Optional<TestObject> a;
+  Optional<TestObject> b(TestObject(1, 0.3));
+  a.swap(b);
+
+  EXPECT_TRUE(!!a);
+  EXPECT_FALSE(!!b);
+  EXPECT_TRUE(TestObject(1, 0.3) == a.value_or(TestObject(42, 0.42)));
+  EXPECT_TRUE(TestObject(42, 0.42) == b.value_or(TestObject(42, 0.42)));
+}
+
+TEST(OptionalTest, Swap_bothValue) {
+  Optional<TestObject> a(TestObject(0, 0.1));
+  Optional<TestObject> b(TestObject(1, 0.3));
+  a.swap(b);
+
+  EXPECT_TRUE(!!a);
+  EXPECT_TRUE(!!b);
+  EXPECT_TRUE(TestObject(1, 0.3) == a.value_or(TestObject(42, 0.42)));
+  EXPECT_TRUE(TestObject(0, 0.1) == b.value_or(TestObject(42, 0.42)));
+  EXPECT_EQ(TestObject::State::SWAPPED, a->state());
+  EXPECT_EQ(TestObject::State::SWAPPED, b->state());
+}
+
+TEST(OptionalTest, Emplace) {
+  {
+    Optional<float> a(0.1f);
+    EXPECT_EQ(0.3f, a.emplace(0.3f));
+
+    EXPECT_TRUE(a);
+    EXPECT_EQ(0.3f, a.value());
+  }
+
+  {
+    Optional<std::string> a("foo");
+    EXPECT_EQ("bar", a.emplace("bar"));
+
+    EXPECT_TRUE(a);
+    EXPECT_EQ("bar", a.value());
+  }
+
+  {
+    Optional<TestObject> a(TestObject(0, 0.1));
+    EXPECT_EQ(TestObject(1, 0.2), a.emplace(TestObject(1, 0.2)));
+
+    EXPECT_TRUE(!!a);
+    EXPECT_TRUE(TestObject(1, 0.2) == a.value());
+  }
+
+  {
+    Optional<std::vector<int>> a;
+    auto& ref = a.emplace({2, 3});
+    static_assert(std::is_same<std::vector<int>&, decltype(ref)>::value, "");
+    EXPECT_TRUE(a);
+    EXPECT_THAT(*a, ElementsAre(2, 3));
+    EXPECT_EQ(&ref, &*a);
+  }
+
+  {
+    Optional<std::vector<int>> a;
+    auto& ref = a.emplace({4, 5}, std::allocator<int>());
+    static_assert(std::is_same<std::vector<int>&, decltype(ref)>::value, "");
+    EXPECT_TRUE(a);
+    EXPECT_THAT(*a, ElementsAre(4, 5));
+    EXPECT_EQ(&ref, &*a);
+  }
+}
+
+TEST(OptionalTest, Equals_TwoEmpty) {
+  Optional<int> a;
+  Optional<int> b;
+
+  EXPECT_TRUE(a == b);
+}
+
+TEST(OptionalTest, Equals_TwoEquals) {
+  Optional<int> a(1);
+  Optional<int> b(1);
+
+  EXPECT_TRUE(a == b);
+}
+
+TEST(OptionalTest, Equals_OneEmpty) {
+  Optional<int> a;
+  Optional<int> b(1);
+
+  EXPECT_FALSE(a == b);
+}
+
+TEST(OptionalTest, Equals_TwoDifferent) {
+  Optional<int> a(0);
+  Optional<int> b(1);
+
+  EXPECT_FALSE(a == b);
+}
+
+TEST(OptionalTest, Equals_DifferentType) {
+  Optional<int> a(0);
+  Optional<double> b(0);
+
+  EXPECT_TRUE(a == b);
+}
+
+TEST(OptionalTest, NotEquals_TwoEmpty) {
+  Optional<int> a;
+  Optional<int> b;
+
+  EXPECT_FALSE(a != b);
+}
+
+TEST(OptionalTest, NotEquals_TwoEquals) {
+  Optional<int> a(1);
+  Optional<int> b(1);
+
+  EXPECT_FALSE(a != b);
+}
+
+TEST(OptionalTest, NotEquals_OneEmpty) {
+  Optional<int> a;
+  Optional<int> b(1);
+
+  EXPECT_TRUE(a != b);
+}
+
+TEST(OptionalTest, NotEquals_TwoDifferent) {
+  Optional<int> a(0);
+  Optional<int> b(1);
+
+  EXPECT_TRUE(a != b);
+}
+
+TEST(OptionalTest, NotEquals_DifferentType) {
+  Optional<int> a(0);
+  Optional<double> b(0.0);
+
+  EXPECT_FALSE(a != b);
+}
+
+TEST(OptionalTest, Less_LeftEmpty) {
+  Optional<int> l;
+  Optional<int> r(1);
+
+  EXPECT_TRUE(l < r);
+}
+
+TEST(OptionalTest, Less_RightEmpty) {
+  Optional<int> l(1);
+  Optional<int> r;
+
+  EXPECT_FALSE(l < r);
+}
+
+TEST(OptionalTest, Less_BothEmpty) {
+  Optional<int> l;
+  Optional<int> r;
+
+  EXPECT_FALSE(l < r);
+}
+
+TEST(OptionalTest, Less_BothValues) {
+  {
+    Optional<int> l(1);
+    Optional<int> r(2);
+
+    EXPECT_TRUE(l < r);
+  }
+  {
+    Optional<int> l(2);
+    Optional<int> r(1);
+
+    EXPECT_FALSE(l < r);
+  }
+  {
+    Optional<int> l(1);
+    Optional<int> r(1);
+
+    EXPECT_FALSE(l < r);
+  }
+}
+
+TEST(OptionalTest, Less_DifferentType) {
+  Optional<int> l(1);
+  Optional<double> r(2.0);
+
+  EXPECT_TRUE(l < r);
+}
+
+TEST(OptionalTest, LessEq_LeftEmpty) {
+  Optional<int> l;
+  Optional<int> r(1);
+
+  EXPECT_TRUE(l <= r);
+}
+
+TEST(OptionalTest, LessEq_RightEmpty) {
+  Optional<int> l(1);
+  Optional<int> r;
+
+  EXPECT_FALSE(l <= r);
+}
+
+TEST(OptionalTest, LessEq_BothEmpty) {
+  Optional<int> l;
+  Optional<int> r;
+
+  EXPECT_TRUE(l <= r);
+}
+
+TEST(OptionalTest, LessEq_BothValues) {
+  {
+    Optional<int> l(1);
+    Optional<int> r(2);
+
+    EXPECT_TRUE(l <= r);
+  }
+  {
+    Optional<int> l(2);
+    Optional<int> r(1);
+
+    EXPECT_FALSE(l <= r);
+  }
+  {
+    Optional<int> l(1);
+    Optional<int> r(1);
+
+    EXPECT_TRUE(l <= r);
+  }
+}
+
+TEST(OptionalTest, LessEq_DifferentType) {
+  Optional<int> l(1);
+  Optional<double> r(2.0);
+
+  EXPECT_TRUE(l <= r);
+}
+
+TEST(OptionalTest, Greater_BothEmpty) {
+  Optional<int> l;
+  Optional<int> r;
+
+  EXPECT_FALSE(l > r);
+}
+
+TEST(OptionalTest, Greater_LeftEmpty) {
+  Optional<int> l;
+  Optional<int> r(1);
+
+  EXPECT_FALSE(l > r);
+}
+
+TEST(OptionalTest, Greater_RightEmpty) {
+  Optional<int> l(1);
+  Optional<int> r;
+
+  EXPECT_TRUE(l > r);
+}
+
+TEST(OptionalTest, Greater_BothValue) {
+  {
+    Optional<int> l(1);
+    Optional<int> r(2);
+
+    EXPECT_FALSE(l > r);
+  }
+  {
+    Optional<int> l(2);
+    Optional<int> r(1);
+
+    EXPECT_TRUE(l > r);
+  }
+  {
+    Optional<int> l(1);
+    Optional<int> r(1);
+
+    EXPECT_FALSE(l > r);
+  }
+}
+
+TEST(OptionalTest, Greater_DifferentType) {
+  Optional<int> l(1);
+  Optional<double> r(2.0);
+
+  EXPECT_FALSE(l > r);
+}
+
+TEST(OptionalTest, GreaterEq_BothEmpty) {
+  Optional<int> l;
+  Optional<int> r;
+
+  EXPECT_TRUE(l >= r);
+}
+
+TEST(OptionalTest, GreaterEq_LeftEmpty) {
+  Optional<int> l;
+  Optional<int> r(1);
+
+  EXPECT_FALSE(l >= r);
+}
+
+TEST(OptionalTest, GreaterEq_RightEmpty) {
+  Optional<int> l(1);
+  Optional<int> r;
+
+  EXPECT_TRUE(l >= r);
+}
+
+TEST(OptionalTest, GreaterEq_BothValue) {
+  {
+    Optional<int> l(1);
+    Optional<int> r(2);
+
+    EXPECT_FALSE(l >= r);
+  }
+  {
+    Optional<int> l(2);
+    Optional<int> r(1);
+
+    EXPECT_TRUE(l >= r);
+  }
+  {
+    Optional<int> l(1);
+    Optional<int> r(1);
+
+    EXPECT_TRUE(l >= r);
+  }
+}
+
+TEST(OptionalTest, GreaterEq_DifferentType) {
+  Optional<int> l(1);
+  Optional<double> r(2.0);
+
+  EXPECT_FALSE(l >= r);
+}
+
+TEST(OptionalTest, OptNullEq) {
+  {
+    Optional<int> opt;
+    EXPECT_TRUE(opt == base::nullopt);
+  }
+  {
+    Optional<int> opt(1);
+    EXPECT_FALSE(opt == base::nullopt);
+  }
+}
+
+TEST(OptionalTest, NullOptEq) {
+  {
+    Optional<int> opt;
+    EXPECT_TRUE(base::nullopt == opt);
+  }
+  {
+    Optional<int> opt(1);
+    EXPECT_FALSE(base::nullopt == opt);
+  }
+}
+
+TEST(OptionalTest, OptNullNotEq) {
+  {
+    Optional<int> opt;
+    EXPECT_FALSE(opt != base::nullopt);
+  }
+  {
+    Optional<int> opt(1);
+    EXPECT_TRUE(opt != base::nullopt);
+  }
+}
+
+TEST(OptionalTest, NullOptNotEq) {
+  {
+    Optional<int> opt;
+    EXPECT_FALSE(base::nullopt != opt);
+  }
+  {
+    Optional<int> opt(1);
+    EXPECT_TRUE(base::nullopt != opt);
+  }
+}
+
+TEST(OptionalTest, OptNullLower) {
+  {
+    Optional<int> opt;
+    EXPECT_FALSE(opt < base::nullopt);
+  }
+  {
+    Optional<int> opt(1);
+    EXPECT_FALSE(opt < base::nullopt);
+  }
+}
+
+TEST(OptionalTest, NullOptLower) {
+  {
+    Optional<int> opt;
+    EXPECT_FALSE(base::nullopt < opt);
+  }
+  {
+    Optional<int> opt(1);
+    EXPECT_TRUE(base::nullopt < opt);
+  }
+}
+
+TEST(OptionalTest, OptNullLowerEq) {
+  {
+    Optional<int> opt;
+    EXPECT_TRUE(opt <= base::nullopt);
+  }
+  {
+    Optional<int> opt(1);
+    EXPECT_FALSE(opt <= base::nullopt);
+  }
+}
+
+TEST(OptionalTest, NullOptLowerEq) {
+  {
+    Optional<int> opt;
+    EXPECT_TRUE(base::nullopt <= opt);
+  }
+  {
+    Optional<int> opt(1);
+    EXPECT_TRUE(base::nullopt <= opt);
+  }
+}
+
+TEST(OptionalTest, OptNullGreater) {
+  {
+    Optional<int> opt;
+    EXPECT_FALSE(opt > base::nullopt);
+  }
+  {
+    Optional<int> opt(1);
+    EXPECT_TRUE(opt > base::nullopt);
+  }
+}
+
+TEST(OptionalTest, NullOptGreater) {
+  {
+    Optional<int> opt;
+    EXPECT_FALSE(base::nullopt > opt);
+  }
+  {
+    Optional<int> opt(1);
+    EXPECT_FALSE(base::nullopt > opt);
+  }
+}
+
+TEST(OptionalTest, OptNullGreaterEq) {
+  {
+    Optional<int> opt;
+    EXPECT_TRUE(opt >= base::nullopt);
+  }
+  {
+    Optional<int> opt(1);
+    EXPECT_TRUE(opt >= base::nullopt);
+  }
+}
+
+TEST(OptionalTest, NullOptGreaterEq) {
+  {
+    Optional<int> opt;
+    EXPECT_TRUE(base::nullopt >= opt);
+  }
+  {
+    Optional<int> opt(1);
+    EXPECT_FALSE(base::nullopt >= opt);
+  }
+}
+
+TEST(OptionalTest, ValueEq_Empty) {
+  Optional<int> opt;
+  EXPECT_FALSE(opt == 1);
+}
+
+TEST(OptionalTest, ValueEq_NotEmpty) {
+  {
+    Optional<int> opt(0);
+    EXPECT_FALSE(opt == 1);
+  }
+  {
+    Optional<int> opt(1);
+    EXPECT_TRUE(opt == 1);
+  }
+}
+
+TEST(OptionalTest, ValueEq_DifferentType) {
+  Optional<int> opt(0);
+  EXPECT_TRUE(opt == 0.0);
+}
+
+TEST(OptionalTest, EqValue_Empty) {
+  Optional<int> opt;
+  EXPECT_FALSE(1 == opt);
+}
+
+TEST(OptionalTest, EqValue_NotEmpty) {
+  {
+    Optional<int> opt(0);
+    EXPECT_FALSE(1 == opt);
+  }
+  {
+    Optional<int> opt(1);
+    EXPECT_TRUE(1 == opt);
+  }
+}
+
+TEST(OptionalTest, EqValue_DifferentType) {
+  Optional<int> opt(0);
+  EXPECT_TRUE(0.0 == opt);
+}
+
+TEST(OptionalTest, ValueNotEq_Empty) {
+  Optional<int> opt;
+  EXPECT_TRUE(opt != 1);
+}
+
+TEST(OptionalTest, ValueNotEq_NotEmpty) {
+  {
+    Optional<int> opt(0);
+    EXPECT_TRUE(opt != 1);
+  }
+  {
+    Optional<int> opt(1);
+    EXPECT_FALSE(opt != 1);
+  }
+}
+
+TEST(OPtionalTest, ValueNotEq_DifferentType) {
+  Optional<int> opt(0);
+  EXPECT_FALSE(opt != 0.0);
+}
+
+TEST(OptionalTest, NotEqValue_Empty) {
+  Optional<int> opt;
+  EXPECT_TRUE(1 != opt);
+}
+
+TEST(OptionalTest, NotEqValue_NotEmpty) {
+  {
+    Optional<int> opt(0);
+    EXPECT_TRUE(1 != opt);
+  }
+  {
+    Optional<int> opt(1);
+    EXPECT_FALSE(1 != opt);
+  }
+}
+
+TEST(OptionalTest, NotEqValue_DifferentType) {
+  Optional<int> opt(0);
+  EXPECT_FALSE(0.0 != opt);
+}
+
+TEST(OptionalTest, ValueLess_Empty) {
+  Optional<int> opt;
+  EXPECT_TRUE(opt < 1);
+}
+
+TEST(OptionalTest, ValueLess_NotEmpty) {
+  {
+    Optional<int> opt(0);
+    EXPECT_TRUE(opt < 1);
+  }
+  {
+    Optional<int> opt(1);
+    EXPECT_FALSE(opt < 1);
+  }
+  {
+    Optional<int> opt(2);
+    EXPECT_FALSE(opt < 1);
+  }
+}
+
+TEST(OPtionalTest, ValueLess_DifferentType) {
+  Optional<int> opt(0);
+  EXPECT_TRUE(opt < 1.0);
+}
+
+TEST(OptionalTest, LessValue_Empty) {
+  Optional<int> opt;
+  EXPECT_FALSE(1 < opt);
+}
+
+TEST(OptionalTest, LessValue_NotEmpty) {
+  {
+    Optional<int> opt(0);
+    EXPECT_FALSE(1 < opt);
+  }
+  {
+    Optional<int> opt(1);
+    EXPECT_FALSE(1 < opt);
+  }
+  {
+    Optional<int> opt(2);
+    EXPECT_TRUE(1 < opt);
+  }
+}
+
+TEST(OptionalTest, LessValue_DifferentType) {
+  Optional<int> opt(0);
+  EXPECT_FALSE(0.0 < opt);
+}
+
+TEST(OptionalTest, ValueLessEq_Empty) {
+  Optional<int> opt;
+  EXPECT_TRUE(opt <= 1);
+}
+
+TEST(OptionalTest, ValueLessEq_NotEmpty) {
+  {
+    Optional<int> opt(0);
+    EXPECT_TRUE(opt <= 1);
+  }
+  {
+    Optional<int> opt(1);
+    EXPECT_TRUE(opt <= 1);
+  }
+  {
+    Optional<int> opt(2);
+    EXPECT_FALSE(opt <= 1);
+  }
+}
+
+TEST(OptionalTest, ValueLessEq_DifferentType) {
+  Optional<int> opt(0);
+  EXPECT_TRUE(opt <= 0.0);
+}
+
+TEST(OptionalTest, LessEqValue_Empty) {
+  Optional<int> opt;
+  EXPECT_FALSE(1 <= opt);
+}
+
+TEST(OptionalTest, LessEqValue_NotEmpty) {
+  {
+    Optional<int> opt(0);
+    EXPECT_FALSE(1 <= opt);
+  }
+  {
+    Optional<int> opt(1);
+    EXPECT_TRUE(1 <= opt);
+  }
+  {
+    Optional<int> opt(2);
+    EXPECT_TRUE(1 <= opt);
+  }
+}
+
+TEST(OptionalTest, LessEqValue_DifferentType) {
+  Optional<int> opt(0);
+  EXPECT_TRUE(0.0 <= opt);
+}
+
+TEST(OptionalTest, ValueGreater_Empty) {
+  Optional<int> opt;
+  EXPECT_FALSE(opt > 1);
+}
+
+TEST(OptionalTest, ValueGreater_NotEmpty) {
+  {
+    Optional<int> opt(0);
+    EXPECT_FALSE(opt > 1);
+  }
+  {
+    Optional<int> opt(1);
+    EXPECT_FALSE(opt > 1);
+  }
+  {
+    Optional<int> opt(2);
+    EXPECT_TRUE(opt > 1);
+  }
+}
+
+TEST(OptionalTest, ValueGreater_DifferentType) {
+  Optional<int> opt(0);
+  EXPECT_FALSE(opt > 0.0);
+}
+
+TEST(OptionalTest, GreaterValue_Empty) {
+  Optional<int> opt;
+  EXPECT_TRUE(1 > opt);
+}
+
+TEST(OptionalTest, GreaterValue_NotEmpty) {
+  {
+    Optional<int> opt(0);
+    EXPECT_TRUE(1 > opt);
+  }
+  {
+    Optional<int> opt(1);
+    EXPECT_FALSE(1 > opt);
+  }
+  {
+    Optional<int> opt(2);
+    EXPECT_FALSE(1 > opt);
+  }
+}
+
+TEST(OptionalTest, GreaterValue_DifferentType) {
+  Optional<int> opt(0);
+  EXPECT_FALSE(0.0 > opt);
+}
+
+TEST(OptionalTest, ValueGreaterEq_Empty) {
+  Optional<int> opt;
+  EXPECT_FALSE(opt >= 1);
+}
+
+TEST(OptionalTest, ValueGreaterEq_NotEmpty) {
+  {
+    Optional<int> opt(0);
+    EXPECT_FALSE(opt >= 1);
+  }
+  {
+    Optional<int> opt(1);
+    EXPECT_TRUE(opt >= 1);
+  }
+  {
+    Optional<int> opt(2);
+    EXPECT_TRUE(opt >= 1);
+  }
+}
+
+TEST(OptionalTest, ValueGreaterEq_DifferentType) {
+  Optional<int> opt(0);
+  EXPECT_TRUE(opt <= 0.0);
+}
+
+TEST(OptionalTest, GreaterEqValue_Empty) {
+  Optional<int> opt;
+  EXPECT_TRUE(1 >= opt);
+}
+
+TEST(OptionalTest, GreaterEqValue_NotEmpty) {
+  {
+    Optional<int> opt(0);
+    EXPECT_TRUE(1 >= opt);
+  }
+  {
+    Optional<int> opt(1);
+    EXPECT_TRUE(1 >= opt);
+  }
+  {
+    Optional<int> opt(2);
+    EXPECT_FALSE(1 >= opt);
+  }
+}
+
+TEST(OptionalTest, GreaterEqValue_DifferentType) {
+  Optional<int> opt(0);
+  EXPECT_TRUE(0.0 >= opt);
+}
+
+TEST(OptionalTest, NotEquals) {
+  {
+    Optional<float> a(0.1f);
+    Optional<float> b(0.2f);
+    EXPECT_NE(a, b);
+  }
+
+  {
+    Optional<std::string> a("foo");
+    Optional<std::string> b("bar");
+    EXPECT_NE(a, b);
+  }
+
+  {
+    Optional<int> a(1);
+    Optional<double> b(2);
+    EXPECT_NE(a, b);
+  }
+
+  {
+    Optional<TestObject> a(TestObject(3, 0.1));
+    Optional<TestObject> b(TestObject(4, 1.0));
+    EXPECT_TRUE(a != b);
+  }
+}
+
+TEST(OptionalTest, NotEqualsNull) {
+  {
+    Optional<float> a(0.1f);
+    Optional<float> b(0.1f);
+    b = base::nullopt;
+    EXPECT_NE(a, b);
+  }
+
+  {
+    Optional<std::string> a("foo");
+    Optional<std::string> b("foo");
+    b = base::nullopt;
+    EXPECT_NE(a, b);
+  }
+
+  {
+    Optional<TestObject> a(TestObject(3, 0.1));
+    Optional<TestObject> b(TestObject(3, 0.1));
+    b = base::nullopt;
+    EXPECT_TRUE(a != b);
+  }
+}
+
+TEST(OptionalTest, MakeOptional) {
+  {
+    Optional<float> o = make_optional(32.f);
+    EXPECT_TRUE(o);
+    EXPECT_EQ(32.f, *o);
+
+    float value = 3.f;
+    o = make_optional(std::move(value));
+    EXPECT_TRUE(o);
+    EXPECT_EQ(3.f, *o);
+  }
+
+  {
+    Optional<std::string> o = make_optional(std::string("foo"));
+    EXPECT_TRUE(o);
+    EXPECT_EQ("foo", *o);
+
+    std::string value = "bar";
+    o = make_optional(std::move(value));
+    EXPECT_TRUE(o);
+    EXPECT_EQ(std::string("bar"), *o);
+  }
+
+  {
+    Optional<TestObject> o = make_optional(TestObject(3, 0.1));
+    EXPECT_TRUE(!!o);
+    EXPECT_TRUE(TestObject(3, 0.1) == *o);
+
+    TestObject value = TestObject(0, 0.42);
+    o = make_optional(std::move(value));
+    EXPECT_TRUE(!!o);
+    EXPECT_TRUE(TestObject(0, 0.42) == *o);
+    EXPECT_EQ(TestObject::State::MOVED_FROM, value.state());
+    EXPECT_EQ(TestObject::State::MOVE_ASSIGNED, o->state());
+
+    EXPECT_EQ(TestObject::State::MOVE_CONSTRUCTED,
+              base::make_optional(std::move(value))->state());
+  }
+
+  {
+    struct Test {
+      Test(int a, double b, bool c) : a(a), b(b), c(c) {}
+
+      int a;
+      double b;
+      bool c;
+    };
+
+    Optional<Test> o = make_optional<Test>(1, 2.0, true);
+    EXPECT_TRUE(!!o);
+    EXPECT_EQ(1, o->a);
+    EXPECT_EQ(2.0, o->b);
+    EXPECT_TRUE(o->c);
+  }
+
+  {
+    auto str1 = make_optional<std::string>({'1', '2', '3'});
+    EXPECT_EQ("123", *str1);
+
+    auto str2 =
+        make_optional<std::string>({'a', 'b', 'c'}, std::allocator<char>());
+    EXPECT_EQ("abc", *str2);
+  }
+}
+
+TEST(OptionalTest, NonMemberSwap_bothNoValue) {
+  Optional<TestObject> a, b;
+  base::swap(a, b);
+
+  EXPECT_FALSE(!!a);
+  EXPECT_FALSE(!!b);
+  EXPECT_TRUE(TestObject(42, 0.42) == a.value_or(TestObject(42, 0.42)));
+  EXPECT_TRUE(TestObject(42, 0.42) == b.value_or(TestObject(42, 0.42)));
+}
+
+TEST(OptionalTest, NonMemberSwap_inHasValue) {
+  Optional<TestObject> a(TestObject(1, 0.3));
+  Optional<TestObject> b;
+  base::swap(a, b);
+
+  EXPECT_FALSE(!!a);
+  EXPECT_TRUE(!!b);
+  EXPECT_TRUE(TestObject(42, 0.42) == a.value_or(TestObject(42, 0.42)));
+  EXPECT_TRUE(TestObject(1, 0.3) == b.value_or(TestObject(42, 0.42)));
+}
+
+TEST(OptionalTest, NonMemberSwap_outHasValue) {
+  Optional<TestObject> a;
+  Optional<TestObject> b(TestObject(1, 0.3));
+  base::swap(a, b);
+
+  EXPECT_TRUE(!!a);
+  EXPECT_FALSE(!!b);
+  EXPECT_TRUE(TestObject(1, 0.3) == a.value_or(TestObject(42, 0.42)));
+  EXPECT_TRUE(TestObject(42, 0.42) == b.value_or(TestObject(42, 0.42)));
+}
+
+TEST(OptionalTest, NonMemberSwap_bothValue) {
+  Optional<TestObject> a(TestObject(0, 0.1));
+  Optional<TestObject> b(TestObject(1, 0.3));
+  base::swap(a, b);
+
+  EXPECT_TRUE(!!a);
+  EXPECT_TRUE(!!b);
+  EXPECT_TRUE(TestObject(1, 0.3) == a.value_or(TestObject(42, 0.42)));
+  EXPECT_TRUE(TestObject(0, 0.1) == b.value_or(TestObject(42, 0.42)));
+  EXPECT_EQ(TestObject::State::SWAPPED, a->state());
+  EXPECT_EQ(TestObject::State::SWAPPED, b->state());
+}
+
+TEST(OptionalTest, Hash_OptionalReflectsInternal) {
+  {
+    std::hash<int> int_hash;
+    std::hash<Optional<int>> opt_int_hash;
+
+    EXPECT_EQ(int_hash(1), opt_int_hash(Optional<int>(1)));
+  }
+
+  {
+    std::hash<std::string> str_hash;
+    std::hash<Optional<std::string>> opt_str_hash;
+
+    EXPECT_EQ(str_hash(std::string("foobar")),
+              opt_str_hash(Optional<std::string>(std::string("foobar"))));
+  }
+}
+
+TEST(OptionalTest, Hash_NullOptEqualsNullOpt) {
+  std::hash<Optional<int>> opt_int_hash;
+  std::hash<Optional<std::string>> opt_str_hash;
+
+  EXPECT_EQ(opt_str_hash(Optional<std::string>()),
+            opt_int_hash(Optional<int>()));
+}
+
+TEST(OptionalTest, Hash_UseInSet) {
+  std::set<Optional<int>> setOptInt;
+
+  EXPECT_EQ(setOptInt.end(), setOptInt.find(42));
+
+  setOptInt.insert(Optional<int>(3));
+  EXPECT_EQ(setOptInt.end(), setOptInt.find(42));
+  EXPECT_NE(setOptInt.end(), setOptInt.find(3));
+}
+
+TEST(OptionalTest, HasValue) {
+  Optional<int> a;
+  EXPECT_FALSE(a.has_value());
+
+  a = 42;
+  EXPECT_TRUE(a.has_value());
+
+  a = nullopt;
+  EXPECT_FALSE(a.has_value());
+
+  a = 0;
+  EXPECT_TRUE(a.has_value());
+
+  a = Optional<int>();
+  EXPECT_FALSE(a.has_value());
+}
+
+TEST(OptionalTest, Reset_int) {
+  Optional<int> a(0);
+  EXPECT_TRUE(a.has_value());
+  EXPECT_EQ(0, a.value());
+
+  a.reset();
+  EXPECT_FALSE(a.has_value());
+  EXPECT_EQ(-1, a.value_or(-1));
+}
+
+TEST(OptionalTest, Reset_Object) {
+  Optional<TestObject> a(TestObject(0, 0.1));
+  EXPECT_TRUE(a.has_value());
+  EXPECT_EQ(TestObject(0, 0.1), a.value());
+
+  a.reset();
+  EXPECT_FALSE(a.has_value());
+  EXPECT_EQ(TestObject(42, 0.0), a.value_or(TestObject(42, 0.0)));
+}
+
+TEST(OptionalTest, Reset_NoOp) {
+  Optional<int> a;
+  EXPECT_FALSE(a.has_value());
+
+  a.reset();
+  EXPECT_FALSE(a.has_value());
+}
+
+TEST(OptionalTest, AssignFromRValue) {
+  Optional<TestObject> a;
+  EXPECT_FALSE(a.has_value());
+
+  TestObject obj;
+  a = std::move(obj);
+  EXPECT_TRUE(a.has_value());
+  EXPECT_EQ(1, a->move_ctors_count());
+}
+
+TEST(OptionalTest, DontCallDefaultCtor) {
+  Optional<DeletedDefaultConstructor> a;
+  EXPECT_FALSE(a.has_value());
+
+  a = base::make_optional<DeletedDefaultConstructor>(42);
+  EXPECT_TRUE(a.has_value());
+  EXPECT_EQ(42, a->foo());
+}
+
+TEST(OptionalTest, DontCallNewMemberFunction) {
+  Optional<DeleteNewOperators> a;
+  EXPECT_FALSE(a.has_value());
+
+  a = DeleteNewOperators();
+  EXPECT_TRUE(a.has_value());
+}
+
+TEST(OptionalTest, Noexcept) {
+  // Trivial copy ctor, non-trivial move ctor, nothrow move assign.
+  struct Test1 {
+    Test1(const Test1&) = default;
+    Test1(Test1&&) {}
+    Test1& operator=(Test1&&) = default;
+  };
+  // Non-trivial copy ctor, trivial move ctor, throw move assign.
+  struct Test2 {
+    Test2(const Test2&) {}
+    Test2(Test2&&) = default;
+    Test2& operator=(Test2&&) { return *this; }
+  };
+  // Trivial copy ctor, non-trivial nothrow move ctor.
+  struct Test3 {
+    Test3(const Test3&) = default;
+    Test3(Test3&&) noexcept {}
+  };
+  // Non-trivial copy ctor, non-trivial nothrow move ctor.
+  struct Test4 {
+    Test4(const Test4&) {}
+    Test4(Test4&&) noexcept {}
+  };
+  // Non-trivial copy ctor, non-trivial move ctor.
+  struct Test5 {
+    Test5(const Test5&) {}
+    Test5(Test5&&) {}
+  };
+
+  static_assert(
+      noexcept(Optional<int>(std::declval<Optional<int>>())),
+      "move constructor for noexcept move-constructible T must be noexcept "
+      "(trivial copy, trivial move)");
+  static_assert(
+      !noexcept(Optional<Test1>(std::declval<Optional<Test1>>())),
+      "move constructor for non-noexcept move-constructible T must not be "
+      "noexcept (trivial copy)");
+  static_assert(
+      noexcept(Optional<Test2>(std::declval<Optional<Test2>>())),
+      "move constructor for noexcept move-constructible T must be noexcept "
+      "(non-trivial copy, trivial move)");
+  static_assert(
+      noexcept(Optional<Test3>(std::declval<Optional<Test3>>())),
+      "move constructor for noexcept move-constructible T must be noexcept "
+      "(trivial copy, non-trivial move)");
+  static_assert(
+      noexcept(Optional<Test4>(std::declval<Optional<Test4>>())),
+      "move constructor for noexcept move-constructible T must be noexcept "
+      "(non-trivial copy, non-trivial move)");
+  static_assert(
+      !noexcept(Optional<Test5>(std::declval<Optional<Test5>>())),
+      "move constructor for non-noexcept move-constructible T must not be "
+      "noexcept (non-trivial copy)");
+
+  static_assert(
+      noexcept(std::declval<Optional<int>>() = std::declval<Optional<int>>()),
+      "move assign for noexcept move-constructible/move-assignable T "
+      "must be noexcept");
+  static_assert(
+      !noexcept(std::declval<Optional<Test1>>() =
+                    std::declval<Optional<Test1>>()),
+      "move assign for non-noexcept move-constructible T must not be noexcept");
+  static_assert(
+      !noexcept(std::declval<Optional<Test2>>() =
+                    std::declval<Optional<Test2>>()),
+      "move assign for non-noexcept move-assignable T must not be noexcept");
+}
+
+}  // namespace base
diff --git a/base/optional_unittest.nc b/base/optional_unittest.nc
new file mode 100644
index 0000000..62c0196
--- /dev/null
+++ b/base/optional_unittest.nc
@@ -0,0 +1,65 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This is a "No Compile Test" suite.
+// http://dev.chromium.org/developers/testing/no-compile-tests
+
+#include <type_traits>
+
+#include "base/optional.h"
+
+namespace base {
+
+#if defined(NCTEST_EXPLICIT_CONVERTING_COPY_CONSTRUCTOR)  // [r"fatal error: no matching function for call to object of type"]
+
+// Optional<T>(const Optional<U>& arg) constructor is marked explicit if
+// T is not convertible from "const U&".
+void WontCompile() {
+  struct Test {
+    // Declares as explicit so that Test is still constructible from int,
+    // but not convertible.
+    explicit Test(int a) {}
+  };
+
+  static_assert(!std::is_convertible<const int&, Test>::value,
+                "const int& to Test is convertible");
+  const Optional<int> arg(in_place, 1);
+  ([](Optional<Test> param) {})(arg);
+}
+
+#elif defined(NCTEST_EXPLICIT_CONVERTING_MOVE_CONSTRUCTOR)  // [r"fatal error: no matching function for call to object of type"]
+
+// Optional<T>(Optional<U>&& arg) constructor is marked explicit if
+// T is not convertible from "U&&".
+void WontCompile() {
+  struct Test {
+    // Declares as explicit so that Test is still constructible from int,
+    // but not convertible.
+    explicit Test(int a) {}
+  };
+
+  static_assert(!std::is_convertible<int&&, Test>::value,
+                "int&& to Test is convertible");
+  ([](Optional<Test> param) {})(Optional<int>(in_place, 1));
+}
+
+#elif defined(NCTEST_EXPLICIT_VALUE_FORWARD_CONSTRUCTOR)  // [r"fatal error: no matching function for call to object of type"]
+
+// Optional<T>(U&&) constructor is marked explicit if T is not convertible
+// from U&&.
+void WontCompile() {
+  struct Test {
+    // Declares as explicit so that Test is still constructible from int,
+    // but not convertible.
+    explicit Test(int a) {}
+  };
+
+  static_assert(!std::is_convertible<int&&, Test>::value,
+                "int&& to Test is convertible");
+  ([](Optional<Test> param) {})(1);
+}
+
+#endif
+
+}  // namespace base
diff --git a/base/os_compat_android.cc b/base/os_compat_android.cc
new file mode 100644
index 0000000..c1a2ac8
--- /dev/null
+++ b/base/os_compat_android.cc
@@ -0,0 +1,178 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/os_compat_android.h"
+
+#include <asm/unistd.h>
+#include <errno.h>
+#include <limits.h>
+#include <math.h>
+#include <sys/stat.h>
+#include <sys/syscall.h>
+#include <unistd.h>
+
+#if !defined(__LP64__)
+#include <time64.h>
+#endif
+
+#include "base/rand_util.h"
+#include "base/strings/string_piece.h"
+#include "base/strings/stringprintf.h"
+
+extern "C" {
+// There is no futimes() avaiable in Bionic, so we provide our own
+// implementation until it is there.
+int futimes(int fd, const struct timeval tv[2]) {
+  if (tv == NULL)
+    return syscall(__NR_utimensat, fd, NULL, NULL, 0);
+
+  if (tv[0].tv_usec < 0 || tv[0].tv_usec >= 1000000 ||
+      tv[1].tv_usec < 0 || tv[1].tv_usec >= 1000000) {
+    errno = EINVAL;
+    return -1;
+  }
+
+  // Convert timeval to timespec.
+  struct timespec ts[2];
+  ts[0].tv_sec = tv[0].tv_sec;
+  ts[0].tv_nsec = tv[0].tv_usec * 1000;
+  ts[1].tv_sec = tv[1].tv_sec;
+  ts[1].tv_nsec = tv[1].tv_usec * 1000;
+  return syscall(__NR_utimensat, fd, NULL, ts, 0);
+}
+
+#if !defined(__LP64__)
+// 32-bit Android has only timegm64() and not timegm().
+// We replicate the behaviour of timegm() when the result overflows time_t.
+time_t timegm(struct tm* const t) {
+  // time_t is signed on Android.
+  static const time_t kTimeMax = ~(1L << (sizeof(time_t) * CHAR_BIT - 1));
+  static const time_t kTimeMin = (1L << (sizeof(time_t) * CHAR_BIT - 1));
+  time64_t result = timegm64(t);
+  if (result < kTimeMin || result > kTimeMax)
+    return -1;
+  return result;
+}
+#endif
+
+// The following is only needed when building with GCC 4.6 or higher
+// (i.e. not with Android GCC 4.4.3, nor with Clang).
+//
+// GCC is now capable of optimizing successive calls to sin() and cos() into
+// a single call to sincos(). This means that source code that looks like:
+//
+//     double c, s;
+//     c = cos(angle);
+//     s = sin(angle);
+//
+// Will generate machine code that looks like:
+//
+//     double c, s;
+//     sincos(angle, &s, &c);
+//
+// Unfortunately, sincos() and friends are not part of the Android libm.so
+// library provided by the NDK for API level 9. When the optimization kicks
+// in, it makes the final build fail with a puzzling message (puzzling
+// because 'sincos' doesn't appear anywhere in the sources!).
+//
+// To solve this, we provide our own implementation of the sincos() function
+// and related friends. Note that we must also explicitely tell GCC to disable
+// optimizations when generating these. Otherwise, the generated machine code
+// for each function would simply end up calling itself, resulting in a
+// runtime crash due to stack overflow.
+//
+#if defined(__GNUC__) && !defined(__clang__) && \
+    !defined(ANDROID_SINCOS_PROVIDED)
+
+// For the record, Clang does not support the 'optimize' attribute.
+// In the unlikely event that it begins performing this optimization too,
+// we'll have to find a different way to achieve this. NOTE: Tested with O1
+// which still performs the optimization.
+//
+#define GCC_NO_OPTIMIZE  __attribute__((optimize("O0")))
+
+GCC_NO_OPTIMIZE
+void sincos(double angle, double* s, double *c) {
+  *c = cos(angle);
+  *s = sin(angle);
+}
+
+GCC_NO_OPTIMIZE
+void sincosf(float angle, float* s, float* c) {
+  *c = cosf(angle);
+  *s = sinf(angle);
+}
+
+#endif // __GNUC__ && !__clang__
+
+// An implementation of mkdtemp, since it is not exposed by the NDK
+// for native API level 9 that we target.
+//
+// For any changes in the mkdtemp function, you should manually run the unittest
+// OsCompatAndroidTest.DISABLED_TestMkdTemp in your local machine to check if it
+// passes. Please don't enable it, since it creates a directory and may be
+// source of flakyness.
+char* mkdtemp(char* path) {
+  if (path == NULL) {
+    errno = EINVAL;
+    return NULL;
+  }
+
+  const int path_len = strlen(path);
+
+  // The last six characters of 'path' must be XXXXXX.
+  const base::StringPiece kSuffix("XXXXXX");
+  const int kSuffixLen = kSuffix.length();
+  if (!base::StringPiece(path, path_len).ends_with(kSuffix)) {
+    errno = EINVAL;
+    return NULL;
+  }
+
+  // If the path contains a directory, as in /tmp/foo/XXXXXXXX, make sure
+  // that /tmp/foo exists, otherwise we're going to loop a really long
+  // time for nothing below
+  char* dirsep = strrchr(path, '/');
+  if (dirsep != NULL) {
+    struct stat st;
+    int ret;
+
+    *dirsep = '\0';  // Terminating directory path temporarily
+
+    ret = stat(path, &st);
+
+    *dirsep = '/';  // Restoring directory separator
+    if (ret < 0)  // Directory probably does not exist
+      return NULL;
+    if (!S_ISDIR(st.st_mode)) {  // Not a directory
+      errno = ENOTDIR;
+      return NULL;
+    }
+  }
+
+  // Max number of tries using different random suffixes.
+  const int kMaxTries = 100;
+
+  // Now loop until we CAN create a directory by that name or we reach the max
+  // number of tries.
+  for (int i = 0; i < kMaxTries; ++i) {
+    // Fill the suffix XXXXXX with a random string composed of a-z chars.
+    for (int pos = 0; pos < kSuffixLen; ++pos) {
+      char rand_char = static_cast<char>(base::RandInt('a', 'z'));
+      path[path_len - kSuffixLen + pos] = rand_char;
+    }
+    if (mkdir(path, 0700) == 0) {
+      // We just created the directory succesfully.
+      return path;
+    }
+    if (errno != EEXIST) {
+      // The directory doesn't exist, but an error occured
+      return NULL;
+    }
+  }
+
+  // We reached the max number of tries.
+  return NULL;
+}
+
+}  // extern "C"
diff --git a/base/os_compat_android.h b/base/os_compat_android.h
new file mode 100644
index 0000000..e33b1f7
--- /dev/null
+++ b/base/os_compat_android.h
@@ -0,0 +1,21 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_OS_COMPAT_ANDROID_H_
+#define BASE_OS_COMPAT_ANDROID_H_
+
+#include <fcntl.h>
+#include <sys/types.h>
+#include <utime.h>
+
+// Not implemented in Bionic.
+extern "C" int futimes(int fd, const struct timeval tv[2]);
+
+// Not exposed or implemented in Bionic.
+extern "C" char* mkdtemp(char* path);
+
+// Android has no timegm().
+extern "C" time_t timegm(struct tm* const t);
+
+#endif  // BASE_OS_COMPAT_ANDROID_H_
diff --git a/base/os_compat_android_unittest.cc b/base/os_compat_android_unittest.cc
new file mode 100644
index 0000000..7fbdc6d
--- /dev/null
+++ b/base/os_compat_android_unittest.cc
@@ -0,0 +1,41 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/os_compat_android.h"
+
+#include "base/files/file_util.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+
+typedef testing::Test OsCompatAndroidTest;
+
+// Keep this Unittest DISABLED_ , because it actually creates a directory in the
+// device and it may be source of flakyness. For any changes in the mkdtemp
+// function, you should run this unittest in your local machine to check if it
+// passes.
+TEST_F(OsCompatAndroidTest, DISABLED_TestMkdTemp) {
+  FilePath tmp_dir;
+  EXPECT_TRUE(base::GetTempDir(&tmp_dir));
+
+  // Not six XXXXXX at the suffix of the path.
+  FilePath sub_dir = tmp_dir.Append("XX");
+  std::string sub_dir_string = sub_dir.value();
+  // this should be OK since mkdtemp just replaces characters in place
+  char* buffer = const_cast<char*>(sub_dir_string.c_str());
+  EXPECT_EQ(NULL, mkdtemp(buffer));
+
+  // Directory does not exist
+  char invalid_path2[] = "doesntoexist/foobarXXXXXX";
+  EXPECT_EQ(NULL, mkdtemp(invalid_path2));
+
+  // Successfully create a tmp dir.
+  FilePath sub_dir2 = tmp_dir.Append("XXXXXX");
+  std::string sub_dir2_string = sub_dir2.value();
+  // this should be OK since mkdtemp just replaces characters in place
+  char* buffer2 = const_cast<char*>(sub_dir2_string.c_str());
+  EXPECT_TRUE(mkdtemp(buffer2) != NULL);
+}
+
+}  // namespace base
diff --git a/base/os_compat_nacl.cc b/base/os_compat_nacl.cc
new file mode 100644
index 0000000..58fe93e
--- /dev/null
+++ b/base/os_compat_nacl.cc
@@ -0,0 +1,30 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/os_compat_nacl.h"
+
+#include <stdlib.h>
+#include <time.h>
+
+#if !defined (__GLIBC__)
+
+extern "C" {
+// Native Client has no timegm().
+time_t timegm(struct tm* tm) {
+  time_t ret;
+  char* tz;
+  tz = getenv("TZ");
+  setenv("TZ", "", 1);
+  tzset();
+  ret = mktime(tm);
+  if (tz)
+    setenv("TZ", tz, 1);
+  else
+    unsetenv("TZ");
+  tzset();
+  return ret;
+}
+}  // extern "C"
+
+#endif  // !defined (__GLIBC__)
diff --git a/base/os_compat_nacl.h b/base/os_compat_nacl.h
new file mode 100644
index 0000000..13e0e3f
--- /dev/null
+++ b/base/os_compat_nacl.h
@@ -0,0 +1,16 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_OS_COMPAT_NACL_H_
+#define BASE_OS_COMPAT_NACL_H_
+
+#include <sys/types.h>
+
+#if !defined (__GLIBC__)
+// NaCl has no timegm().
+extern "C" time_t timegm(struct tm* const t);
+#endif  // !defined (__GLIBC__)
+
+#endif  // BASE_OS_COMPAT_NACL_H_
+
diff --git a/base/path_service.cc b/base/path_service.cc
new file mode 100644
index 0000000..6ac501e
--- /dev/null
+++ b/base/path_service.cc
@@ -0,0 +1,339 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/path_service.h"
+
+#include <unordered_map>
+
+#if defined(OS_WIN)
+#include <windows.h>
+#include <shellapi.h>
+#include <shlobj.h>
+#endif
+
+#include "base/files/file_path.h"
+#include "base/files/file_util.h"
+#include "base/logging.h"
+#include "base/synchronization/lock.h"
+#include "build/build_config.h"
+
+namespace base {
+
+bool PathProvider(int key, FilePath* result);
+
+#if defined(OS_WIN)
+bool PathProviderWin(int key, FilePath* result);
+#elif defined(OS_MACOSX)
+bool PathProviderMac(int key, FilePath* result);
+#elif defined(OS_ANDROID)
+bool PathProviderAndroid(int key, FilePath* result);
+#elif defined(OS_FUCHSIA)
+bool PathProviderFuchsia(int key, FilePath* result);
+#elif defined(OS_POSIX)
+// PathProviderPosix is the default path provider on POSIX OSes other than
+// Mac and Android.
+bool PathProviderPosix(int key, FilePath* result);
+#endif
+
+namespace {
+
+typedef std::unordered_map<int, FilePath> PathMap;
+
+// We keep a linked list of providers.  In a debug build we ensure that no two
+// providers claim overlapping keys.
+struct Provider {
+  PathService::ProviderFunc func;
+  struct Provider* next;
+#ifndef NDEBUG
+  int key_start;
+  int key_end;
+#endif
+  bool is_static;
+};
+
+Provider base_provider = {PathProvider, nullptr,
+#ifndef NDEBUG
+                          PATH_START, PATH_END,
+#endif
+                          true};
+
+#if defined(OS_WIN)
+Provider base_provider_win = {
+  PathProviderWin,
+  &base_provider,
+#ifndef NDEBUG
+  PATH_WIN_START,
+  PATH_WIN_END,
+#endif
+  true
+};
+#endif
+
+#if defined(OS_MACOSX)
+Provider base_provider_mac = {
+  PathProviderMac,
+  &base_provider,
+#ifndef NDEBUG
+  PATH_MAC_START,
+  PATH_MAC_END,
+#endif
+  true
+};
+#endif
+
+#if defined(OS_ANDROID)
+Provider base_provider_android = {
+  PathProviderAndroid,
+  &base_provider,
+#ifndef NDEBUG
+  PATH_ANDROID_START,
+  PATH_ANDROID_END,
+#endif
+  true
+};
+#endif
+
+#if defined(OS_FUCHSIA)
+Provider base_provider_fuchsia = {PathProviderFuchsia, &base_provider,
+#ifndef NDEBUG
+                                  0, 0,
+#endif
+                                  true};
+#endif
+
+#if defined(OS_POSIX) && !defined(OS_MACOSX) && !defined(OS_ANDROID) && \
+    !defined(OS_FUCHSIA)
+Provider base_provider_posix = {
+  PathProviderPosix,
+  &base_provider,
+#ifndef NDEBUG
+  PATH_POSIX_START,
+  PATH_POSIX_END,
+#endif
+  true
+};
+#endif
+
+
+struct PathData {
+  Lock lock;
+  PathMap cache;        // Cache mappings from path key to path value.
+  PathMap overrides;    // Track path overrides.
+  Provider* providers;  // Linked list of path service providers.
+  bool cache_disabled;  // Don't use cache if true;
+
+  PathData() : cache_disabled(false) {
+#if defined(OS_WIN)
+    providers = &base_provider_win;
+#elif defined(OS_MACOSX)
+    providers = &base_provider_mac;
+#elif defined(OS_ANDROID)
+    providers = &base_provider_android;
+#elif defined(OS_FUCHSIA)
+    providers = &base_provider_fuchsia;
+#elif defined(OS_POSIX)
+    providers = &base_provider_posix;
+#endif
+  }
+};
+
+static PathData* GetPathData() {
+  static auto* path_data = new PathData();
+  return path_data;
+}
+
+// Tries to find |key| in the cache. |path_data| should be locked by the caller!
+bool LockedGetFromCache(int key, const PathData* path_data, FilePath* result) {
+  if (path_data->cache_disabled)
+    return false;
+  // check for a cached version
+  PathMap::const_iterator it = path_data->cache.find(key);
+  if (it != path_data->cache.end()) {
+    *result = it->second;
+    return true;
+  }
+  return false;
+}
+
+// Tries to find |key| in the overrides map. |path_data| should be locked by the
+// caller!
+bool LockedGetFromOverrides(int key, PathData* path_data, FilePath* result) {
+  // check for an overridden version.
+  PathMap::const_iterator it = path_data->overrides.find(key);
+  if (it != path_data->overrides.end()) {
+    if (!path_data->cache_disabled)
+      path_data->cache[key] = it->second;
+    *result = it->second;
+    return true;
+  }
+  return false;
+}
+
+}  // namespace
+
+// TODO(brettw): this function does not handle long paths (filename > MAX_PATH)
+// characters). This isn't supported very well by Windows right now, so it is
+// moot, but we should keep this in mind for the future.
+// static
+bool PathService::Get(int key, FilePath* result) {
+  PathData* path_data = GetPathData();
+  DCHECK(path_data);
+  DCHECK(result);
+  DCHECK_GE(key, DIR_CURRENT);
+
+  // special case the current directory because it can never be cached
+  if (key == DIR_CURRENT)
+    return GetCurrentDirectory(result);
+
+  Provider* provider = nullptr;
+  {
+    AutoLock scoped_lock(path_data->lock);
+    if (LockedGetFromCache(key, path_data, result))
+      return true;
+
+    if (LockedGetFromOverrides(key, path_data, result))
+      return true;
+
+    // Get the beginning of the list while it is still locked.
+    provider = path_data->providers;
+  }
+
+  FilePath path;
+
+  // Iterating does not need the lock because only the list head might be
+  // modified on another thread.
+  while (provider) {
+    if (provider->func(key, &path))
+      break;
+    DCHECK(path.empty()) << "provider should not have modified path";
+    provider = provider->next;
+  }
+
+  if (path.empty())
+    return false;
+
+  if (path.ReferencesParent()) {
+    // Make sure path service never returns a path with ".." in it.
+    path = MakeAbsoluteFilePath(path);
+    if (path.empty())
+      return false;
+  }
+  *result = path;
+
+  AutoLock scoped_lock(path_data->lock);
+  if (!path_data->cache_disabled)
+    path_data->cache[key] = path;
+
+  return true;
+}
+
+// static
+bool PathService::Override(int key, const FilePath& path) {
+  // Just call the full function with true for the value of |create|, and
+  // assume that |path| may not be absolute yet.
+  return OverrideAndCreateIfNeeded(key, path, false, true);
+}
+
+// static
+bool PathService::OverrideAndCreateIfNeeded(int key,
+                                            const FilePath& path,
+                                            bool is_absolute,
+                                            bool create) {
+  PathData* path_data = GetPathData();
+  DCHECK(path_data);
+  DCHECK_GT(key, DIR_CURRENT) << "invalid path key";
+
+  FilePath file_path = path;
+
+  // For some locations this will fail if called from inside the sandbox there-
+  // fore we protect this call with a flag.
+  if (create) {
+    // Make sure the directory exists. We need to do this before we translate
+    // this to the absolute path because on POSIX, MakeAbsoluteFilePath fails
+    // if called on a non-existent path.
+    if (!PathExists(file_path) && !CreateDirectory(file_path))
+      return false;
+  }
+
+  // We need to have an absolute path.
+  if (!is_absolute) {
+    file_path = MakeAbsoluteFilePath(file_path);
+    if (file_path.empty())
+      return false;
+  }
+  DCHECK(file_path.IsAbsolute());
+
+  AutoLock scoped_lock(path_data->lock);
+
+  // Clear the cache now. Some of its entries could have depended
+  // on the value we are overriding, and are now out of sync with reality.
+  path_data->cache.clear();
+
+  path_data->overrides[key] = file_path;
+
+  return true;
+}
+
+// static
+bool PathService::RemoveOverride(int key) {
+  PathData* path_data = GetPathData();
+  DCHECK(path_data);
+
+  AutoLock scoped_lock(path_data->lock);
+
+  if (path_data->overrides.find(key) == path_data->overrides.end())
+    return false;
+
+  // Clear the cache now. Some of its entries could have depended on the value
+  // we are going to remove, and are now out of sync.
+  path_data->cache.clear();
+
+  path_data->overrides.erase(key);
+
+  return true;
+}
+
+// static
+void PathService::RegisterProvider(ProviderFunc func, int key_start,
+                                   int key_end) {
+  PathData* path_data = GetPathData();
+  DCHECK(path_data);
+  DCHECK_GT(key_end, key_start);
+
+  Provider* p;
+
+  p = new Provider;
+  p->is_static = false;
+  p->func = func;
+#ifndef NDEBUG
+  p->key_start = key_start;
+  p->key_end = key_end;
+#endif
+
+  AutoLock scoped_lock(path_data->lock);
+
+#ifndef NDEBUG
+  Provider *iter = path_data->providers;
+  while (iter) {
+    DCHECK(key_start >= iter->key_end || key_end <= iter->key_start) <<
+      "path provider collision";
+    iter = iter->next;
+  }
+#endif
+
+  p->next = path_data->providers;
+  path_data->providers = p;
+}
+
+// static
+void PathService::DisableCache() {
+  PathData* path_data = GetPathData();
+  DCHECK(path_data);
+
+  AutoLock scoped_lock(path_data->lock);
+  path_data->cache.clear();
+  path_data->cache_disabled = true;
+}
+
+}  // namespace base
diff --git a/base/path_service.h b/base/path_service.h
new file mode 100644
index 0000000..9b4715f
--- /dev/null
+++ b/base/path_service.h
@@ -0,0 +1,94 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_PATH_SERVICE_H_
+#define BASE_PATH_SERVICE_H_
+
+#include <string>
+
+#include "base/base_export.h"
+#include "base/base_paths.h"
+#include "base/gtest_prod_util.h"
+#include "build/build_config.h"
+
+namespace base {
+
+class FilePath;
+class ScopedPathOverride;
+
+// The path service is a global table mapping keys to file system paths.  It is
+// OK to use this service from multiple threads.
+//
+class BASE_EXPORT PathService {
+ public:
+  // Retrieves a path to a special directory or file and places it into the
+  // string pointed to by 'path'. If you ask for a directory it is guaranteed
+  // to NOT have a path separator at the end. For example, "c:\windows\temp"
+  // Directories are also guaranteed to exist when this function succeeds.
+  //
+  // Returns true if the directory or file was successfully retrieved. On
+  // failure, 'path' will not be changed.
+  static bool Get(int key, FilePath* path);
+
+  // Overrides the path to a special directory or file.  This cannot be used to
+  // change the value of DIR_CURRENT, but that should be obvious.  Also, if the
+  // path specifies a directory that does not exist, the directory will be
+  // created by this method.  This method returns true if successful.
+  //
+  // If the given path is relative, then it will be resolved against
+  // DIR_CURRENT.
+  //
+  // WARNING: Consumers of PathService::Get may expect paths to be constant
+  // over the lifetime of the app, so this method should be used with caution.
+  //
+  // Unit tests generally should use ScopedPathOverride instead. Overrides from
+  // one test should not carry over to another.
+  static bool Override(int key, const FilePath& path);
+
+  // This function does the same as PathService::Override but it takes extra
+  // parameters:
+  // - |is_absolute| indicates that |path| has already been expanded into an
+  // absolute path, otherwise MakeAbsoluteFilePath() will be used. This is
+  // useful to override paths that may not exist yet, since MakeAbsoluteFilePath
+  // fails for those. Note that MakeAbsoluteFilePath also expands symbolic
+  // links, even if path.IsAbsolute() is already true.
+  // - |create| guides whether the directory to be overriden must
+  // be created in case it doesn't exist already.
+  static bool OverrideAndCreateIfNeeded(int key,
+                                        const FilePath& path,
+                                        bool is_absolute,
+                                        bool create);
+
+  // To extend the set of supported keys, you can register a path provider,
+  // which is just a function mirroring PathService::Get.  The ProviderFunc
+  // returns false if it cannot provide a non-empty path for the given key.
+  // Otherwise, true is returned.
+  //
+  // WARNING: This function could be called on any thread from which the
+  // PathService is used, so a the ProviderFunc MUST BE THREADSAFE.
+  //
+  typedef bool (*ProviderFunc)(int, FilePath*);
+
+  // Call to register a path provider.  You must specify the range "[key_start,
+  // key_end)" of supported path keys.
+  static void RegisterProvider(ProviderFunc provider,
+                               int key_start,
+                               int key_end);
+
+  // Disable internal cache.
+  static void DisableCache();
+
+ private:
+  friend class ScopedPathOverride;
+  FRIEND_TEST_ALL_PREFIXES(PathServiceTest, RemoveOverride);
+
+  // Removes an override for a special directory or file. Returns true if there
+  // was an override to remove or false if none was present.
+  // NOTE: This function is intended to be used by tests only!
+  static bool RemoveOverride(int key);
+};
+
+}  // namespace base
+
+#endif  // BASE_PATH_SERVICE_H_
diff --git a/base/path_service_unittest.cc b/base/path_service_unittest.cc
new file mode 100644
index 0000000..8fcd673
--- /dev/null
+++ b/base/path_service_unittest.cc
@@ -0,0 +1,278 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/path_service.h"
+
+#include "base/files/file_path.h"
+#include "base/files/file_util.h"
+#include "base/files/scoped_temp_dir.h"
+#include "base/strings/string_util.h"
+#include "build/build_config.h"
+#include "testing/gtest/include/gtest/gtest-spi.h"
+#include "testing/gtest/include/gtest/gtest.h"
+#include "testing/platform_test.h"
+
+#if defined(OS_WIN)
+#include "base/win/windows_version.h"
+#endif
+
+namespace base {
+
+namespace {
+
+// Returns true if PathService::Get returns true and sets the path parameter
+// to non-empty for the given PathService::DirType enumeration value.
+bool ReturnsValidPath(int dir_type) {
+  FilePath path;
+  bool result = PathService::Get(dir_type, &path);
+
+  // Some paths might not exist on some platforms in which case confirming
+  // |result| is true and !path.empty() is the best we can do.
+  bool check_path_exists = true;
+#if defined(OS_POSIX) && !defined(OS_FUCHSIA)
+  // If chromium has never been started on this account, the cache path may not
+  // exist.
+  if (dir_type == DIR_CACHE)
+    check_path_exists = false;
+#endif
+#if defined(OS_LINUX)
+  // On the linux try-bots: a path is returned (e.g. /home/chrome-bot/Desktop),
+  // but it doesn't exist.
+  if (dir_type == DIR_USER_DESKTOP)
+    check_path_exists = false;
+#endif
+#if defined(OS_IOS)
+  // Bundled unittests on iOS may not have Resources directory in the bundle.
+  if (dir_type == DIR_ASSETS)
+    check_path_exists = false;
+#endif
+#if defined(OS_MACOSX)
+  if (dir_type != DIR_EXE && dir_type != DIR_MODULE && dir_type != FILE_EXE &&
+      dir_type != FILE_MODULE) {
+    if (path.ReferencesParent())
+      return false;
+  }
+#else
+  if (path.ReferencesParent())
+    return false;
+#endif
+  return result && !path.empty() && (!check_path_exists || PathExists(path));
+}
+
+#if defined(OS_WIN)
+// Function to test any directory keys that are not supported on some versions
+// of Windows. Checks that the function fails and that the returned path is
+// empty.
+bool ReturnsInvalidPath(int dir_type) {
+  FilePath path;
+  bool result = PathService::Get(dir_type, &path);
+  return !result && path.empty();
+}
+#endif
+
+}  // namespace
+
+// On the Mac this winds up using some autoreleased objects, so we need to
+// be a PlatformTest.
+typedef PlatformTest PathServiceTest;
+
+// Test that all PathService::Get calls return a value and a true result
+// in the development environment.  (This test was created because a few
+// later changes to Get broke the semantics of the function and yielded the
+// correct value while returning false.)
+TEST_F(PathServiceTest, Get) {
+  for (int key = PATH_START + 1; key < PATH_END; ++key) {
+#if defined(OS_ANDROID)
+    if (key == FILE_MODULE || key == DIR_USER_DESKTOP ||
+        key == DIR_HOME)
+      continue;  // Android doesn't implement these.
+#elif defined(OS_IOS)
+    if (key == DIR_USER_DESKTOP)
+      continue;  // iOS doesn't implement DIR_USER_DESKTOP.
+#elif defined(OS_FUCHSIA)
+    if (key == DIR_USER_DESKTOP || key == FILE_MODULE || key == DIR_MODULE)
+      continue;  // Fuchsia doesn't implement DIR_USER_DESKTOP, FILE_MODULE and
+                 // DIR_MODULE.
+#endif
+    EXPECT_PRED1(ReturnsValidPath, key);
+  }
+#if defined(OS_WIN)
+  for (int key = PATH_WIN_START + 1; key < PATH_WIN_END; ++key) {
+    bool valid = true;
+    if (key == DIR_APP_SHORTCUTS)
+      valid = base::win::GetVersion() >= base::win::VERSION_WIN8;
+
+    if (valid)
+      EXPECT_TRUE(ReturnsValidPath(key)) << key;
+    else
+      EXPECT_TRUE(ReturnsInvalidPath(key)) << key;
+  }
+#elif defined(OS_MACOSX)
+  for (int key = PATH_MAC_START + 1; key < PATH_MAC_END; ++key) {
+    EXPECT_PRED1(ReturnsValidPath, key);
+  }
+#elif defined(OS_ANDROID)
+  for (int key = PATH_ANDROID_START + 1; key < PATH_ANDROID_END;
+       ++key) {
+    EXPECT_PRED1(ReturnsValidPath, key);
+  }
+#elif defined(OS_POSIX) && !defined(OS_FUCHSIA)
+  for (int key = PATH_POSIX_START + 1; key < PATH_POSIX_END;
+       ++key) {
+    EXPECT_PRED1(ReturnsValidPath, key);
+  }
+#endif
+}
+
+// Test that all versions of the Override function of PathService do what they
+// are supposed to do.
+TEST_F(PathServiceTest, Override) {
+  int my_special_key = 666;
+  ScopedTempDir temp_dir;
+  ASSERT_TRUE(temp_dir.CreateUniqueTempDir());
+  FilePath fake_cache_dir(temp_dir.GetPath().AppendASCII("cache"));
+  // PathService::Override should always create the path provided if it doesn't
+  // exist.
+  EXPECT_TRUE(PathService::Override(my_special_key, fake_cache_dir));
+  EXPECT_TRUE(PathExists(fake_cache_dir));
+
+  FilePath fake_cache_dir2(temp_dir.GetPath().AppendASCII("cache2"));
+  // PathService::OverrideAndCreateIfNeeded should obey the |create| parameter.
+  PathService::OverrideAndCreateIfNeeded(my_special_key,
+                                         fake_cache_dir2,
+                                         false,
+                                         false);
+  EXPECT_FALSE(PathExists(fake_cache_dir2));
+  EXPECT_TRUE(PathService::OverrideAndCreateIfNeeded(my_special_key,
+                                                     fake_cache_dir2,
+                                                     false,
+                                                     true));
+  EXPECT_TRUE(PathExists(fake_cache_dir2));
+
+#if defined(OS_POSIX)
+  FilePath non_existent(
+      MakeAbsoluteFilePath(temp_dir.GetPath()).AppendASCII("non_existent"));
+  EXPECT_TRUE(non_existent.IsAbsolute());
+  EXPECT_FALSE(PathExists(non_existent));
+#if !defined(OS_ANDROID)
+  // This fails because MakeAbsoluteFilePath fails for non-existent files.
+  // Earlier versions of Bionic libc don't fail for non-existent files, so
+  // skip this check on Android.
+  EXPECT_FALSE(PathService::OverrideAndCreateIfNeeded(my_special_key,
+                                                      non_existent,
+                                                      false,
+                                                      false));
+#endif
+  // This works because indicating that |non_existent| is absolute skips the
+  // internal MakeAbsoluteFilePath call.
+  EXPECT_TRUE(PathService::OverrideAndCreateIfNeeded(my_special_key,
+                                                     non_existent,
+                                                     true,
+                                                     false));
+  // Check that the path has been overridden and no directory was created.
+  EXPECT_FALSE(PathExists(non_existent));
+  FilePath path;
+  EXPECT_TRUE(PathService::Get(my_special_key, &path));
+  EXPECT_EQ(non_existent, path);
+#endif
+}
+
+// Check if multiple overrides can co-exist.
+TEST_F(PathServiceTest, OverrideMultiple) {
+  int my_special_key = 666;
+  ScopedTempDir temp_dir;
+  ASSERT_TRUE(temp_dir.CreateUniqueTempDir());
+  FilePath fake_cache_dir1(temp_dir.GetPath().AppendASCII("1"));
+  EXPECT_TRUE(PathService::Override(my_special_key, fake_cache_dir1));
+  EXPECT_TRUE(PathExists(fake_cache_dir1));
+  ASSERT_EQ(1, WriteFile(fake_cache_dir1.AppendASCII("t1"), ".", 1));
+
+  FilePath fake_cache_dir2(temp_dir.GetPath().AppendASCII("2"));
+  EXPECT_TRUE(PathService::Override(my_special_key + 1, fake_cache_dir2));
+  EXPECT_TRUE(PathExists(fake_cache_dir2));
+  ASSERT_EQ(1, WriteFile(fake_cache_dir2.AppendASCII("t2"), ".", 1));
+
+  FilePath result;
+  EXPECT_TRUE(PathService::Get(my_special_key, &result));
+  // Override might have changed the path representation but our test file
+  // should be still there.
+  EXPECT_TRUE(PathExists(result.AppendASCII("t1")));
+  EXPECT_TRUE(PathService::Get(my_special_key + 1, &result));
+  EXPECT_TRUE(PathExists(result.AppendASCII("t2")));
+}
+
+TEST_F(PathServiceTest, RemoveOverride) {
+  // Before we start the test we have to call RemoveOverride at least once to
+  // clear any overrides that might have been left from other tests.
+  PathService::RemoveOverride(DIR_TEMP);
+
+  FilePath original_user_data_dir;
+  EXPECT_TRUE(PathService::Get(DIR_TEMP, &original_user_data_dir));
+  EXPECT_FALSE(PathService::RemoveOverride(DIR_TEMP));
+
+  ScopedTempDir temp_dir;
+  ASSERT_TRUE(temp_dir.CreateUniqueTempDir());
+  EXPECT_TRUE(PathService::Override(DIR_TEMP, temp_dir.GetPath()));
+  FilePath new_user_data_dir;
+  EXPECT_TRUE(PathService::Get(DIR_TEMP, &new_user_data_dir));
+  EXPECT_NE(original_user_data_dir, new_user_data_dir);
+
+  EXPECT_TRUE(PathService::RemoveOverride(DIR_TEMP));
+  EXPECT_TRUE(PathService::Get(DIR_TEMP, &new_user_data_dir));
+  EXPECT_EQ(original_user_data_dir, new_user_data_dir);
+}
+
+#if defined(OS_WIN)
+TEST_F(PathServiceTest, GetProgramFiles) {
+  FilePath programfiles_dir;
+#if defined(_WIN64)
+  // 64-bit on 64-bit.
+  EXPECT_TRUE(PathService::Get(DIR_PROGRAM_FILES,
+      &programfiles_dir));
+  EXPECT_EQ(programfiles_dir.value(),
+      FILE_PATH_LITERAL("C:\\Program Files"));
+  EXPECT_TRUE(PathService::Get(DIR_PROGRAM_FILESX86,
+      &programfiles_dir));
+  EXPECT_EQ(programfiles_dir.value(),
+      FILE_PATH_LITERAL("C:\\Program Files (x86)"));
+  EXPECT_TRUE(PathService::Get(DIR_PROGRAM_FILES6432,
+      &programfiles_dir));
+  EXPECT_EQ(programfiles_dir.value(),
+      FILE_PATH_LITERAL("C:\\Program Files"));
+#else
+  if (base::win::OSInfo::GetInstance()->wow64_status() ==
+      base::win::OSInfo::WOW64_ENABLED) {
+    // 32-bit on 64-bit.
+    EXPECT_TRUE(PathService::Get(DIR_PROGRAM_FILES,
+        &programfiles_dir));
+    EXPECT_EQ(programfiles_dir.value(),
+        FILE_PATH_LITERAL("C:\\Program Files (x86)"));
+    EXPECT_TRUE(PathService::Get(DIR_PROGRAM_FILESX86,
+        &programfiles_dir));
+    EXPECT_EQ(programfiles_dir.value(),
+        FILE_PATH_LITERAL("C:\\Program Files (x86)"));
+    EXPECT_TRUE(PathService::Get(DIR_PROGRAM_FILES6432,
+        &programfiles_dir));
+    EXPECT_EQ(programfiles_dir.value(),
+        FILE_PATH_LITERAL("C:\\Program Files"));
+  } else {
+    // 32-bit on 32-bit.
+    EXPECT_TRUE(PathService::Get(DIR_PROGRAM_FILES,
+        &programfiles_dir));
+    EXPECT_EQ(programfiles_dir.value(),
+        FILE_PATH_LITERAL("C:\\Program Files"));
+    EXPECT_TRUE(PathService::Get(DIR_PROGRAM_FILESX86,
+        &programfiles_dir));
+    EXPECT_EQ(programfiles_dir.value(),
+        FILE_PATH_LITERAL("C:\\Program Files"));
+    EXPECT_TRUE(PathService::Get(DIR_PROGRAM_FILES6432,
+        &programfiles_dir));
+    EXPECT_EQ(programfiles_dir.value(),
+        FILE_PATH_LITERAL("C:\\Program Files"));
+  }
+#endif
+}
+#endif
+
+}  // namespace base
diff --git a/base/pending_task.cc b/base/pending_task.cc
new file mode 100644
index 0000000..50924fd
--- /dev/null
+++ b/base/pending_task.cc
@@ -0,0 +1,41 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/pending_task.h"
+
+
+namespace base {
+
+PendingTask::PendingTask(const Location& posted_from,
+                         OnceClosure task,
+                         TimeTicks delayed_run_time,
+                         Nestable nestable)
+    : task(std::move(task)),
+      posted_from(posted_from),
+      delayed_run_time(delayed_run_time),
+      nestable(nestable) {}
+
+PendingTask::PendingTask(PendingTask&& other) = default;
+
+PendingTask::~PendingTask() = default;
+
+PendingTask& PendingTask::operator=(PendingTask&& other) = default;
+
+bool PendingTask::operator<(const PendingTask& other) const {
+  // Since the top of a priority queue is defined as the "greatest" element, we
+  // need to invert the comparison here.  We want the smaller time to be at the
+  // top of the heap.
+
+  if (delayed_run_time < other.delayed_run_time)
+    return false;
+
+  if (delayed_run_time > other.delayed_run_time)
+    return true;
+
+  // If the times happen to match, then we use the sequence number to decide.
+  // Compare the difference to support integer roll-over.
+  return (sequence_num - other.sequence_num) > 0;
+}
+
+}  // namespace base
diff --git a/base/pending_task.h b/base/pending_task.h
new file mode 100644
index 0000000..495015b
--- /dev/null
+++ b/base/pending_task.h
@@ -0,0 +1,68 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_PENDING_TASK_H_
+#define BASE_PENDING_TASK_H_
+
+#include <array>
+
+#include "base/base_export.h"
+#include "base/callback.h"
+#include "base/containers/queue.h"
+#include "base/location.h"
+#include "base/time/time.h"
+
+namespace base {
+
+enum class Nestable {
+  kNonNestable,
+  kNestable,
+};
+
+// Contains data about a pending task. Stored in TaskQueue and DelayedTaskQueue
+// for use by classes that queue and execute tasks.
+struct BASE_EXPORT PendingTask {
+  PendingTask(const Location& posted_from,
+              OnceClosure task,
+              TimeTicks delayed_run_time = TimeTicks(),
+              Nestable nestable = Nestable::kNestable);
+  PendingTask(PendingTask&& other);
+  ~PendingTask();
+
+  PendingTask& operator=(PendingTask&& other);
+
+  // Used to support sorting.
+  bool operator<(const PendingTask& other) const;
+
+  // The task to run.
+  OnceClosure task;
+
+  // The site this PendingTask was posted from.
+  Location posted_from;
+
+  // The time when the task should be run.
+  base::TimeTicks delayed_run_time;
+
+  // Task backtrace. mutable so it can be set while annotating const PendingTask
+  // objects from TaskAnnotator::DidQueueTask().
+  mutable std::array<const void*, 4> task_backtrace = {};
+
+  // Secondary sort key for run time.
+  int sequence_num = 0;
+
+  // OK to dispatch from a nested loop.
+  Nestable nestable;
+
+  // Needs high resolution timers.
+  bool is_high_res = false;
+};
+
+using TaskQueue = base::queue<PendingTask>;
+
+// PendingTasks are sorted by their |delayed_run_time| property.
+using DelayedTaskQueue = std::priority_queue<base::PendingTask>;
+
+}  // namespace base
+
+#endif  // BASE_PENDING_TASK_H_
diff --git a/base/pickle.cc b/base/pickle.cc
new file mode 100644
index 0000000..c2189c8
--- /dev/null
+++ b/base/pickle.cc
@@ -0,0 +1,435 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/pickle.h"
+
+#include <stdlib.h>
+
+#include <algorithm>  // for max()
+#include <limits>
+
+#include "base/bits.h"
+#include "base/macros.h"
+#include "base/numerics/safe_conversions.h"
+#include "base/numerics/safe_math.h"
+#include "build/build_config.h"
+
+namespace base {
+
+// static
+const int Pickle::kPayloadUnit = 64;
+
+static const size_t kCapacityReadOnly = static_cast<size_t>(-1);
+
+PickleIterator::PickleIterator(const Pickle& pickle)
+    : payload_(pickle.payload()),
+      read_index_(0),
+      end_index_(pickle.payload_size()) {
+}
+
+template <typename Type>
+inline bool PickleIterator::ReadBuiltinType(Type* result) {
+  const char* read_from = GetReadPointerAndAdvance<Type>();
+  if (!read_from)
+    return false;
+  if (sizeof(Type) > sizeof(uint32_t))
+    memcpy(result, read_from, sizeof(*result));
+  else
+    *result = *reinterpret_cast<const Type*>(read_from);
+  return true;
+}
+
+inline void PickleIterator::Advance(size_t size) {
+  size_t aligned_size = bits::Align(size, sizeof(uint32_t));
+  if (end_index_ - read_index_ < aligned_size) {
+    read_index_ = end_index_;
+  } else {
+    read_index_ += aligned_size;
+  }
+}
+
+template<typename Type>
+inline const char* PickleIterator::GetReadPointerAndAdvance() {
+  if (sizeof(Type) > end_index_ - read_index_) {
+    read_index_ = end_index_;
+    return nullptr;
+  }
+  const char* current_read_ptr = payload_ + read_index_;
+  Advance(sizeof(Type));
+  return current_read_ptr;
+}
+
+const char* PickleIterator::GetReadPointerAndAdvance(int num_bytes) {
+  if (num_bytes < 0 ||
+      end_index_ - read_index_ < static_cast<size_t>(num_bytes)) {
+    read_index_ = end_index_;
+    return nullptr;
+  }
+  const char* current_read_ptr = payload_ + read_index_;
+  Advance(num_bytes);
+  return current_read_ptr;
+}
+
+inline const char* PickleIterator::GetReadPointerAndAdvance(
+    int num_elements,
+    size_t size_element) {
+  // Check for int32_t overflow.
+  int num_bytes;
+  if (!CheckMul(num_elements, size_element).AssignIfValid(&num_bytes))
+    return nullptr;
+  return GetReadPointerAndAdvance(num_bytes);
+}
+
+bool PickleIterator::ReadBool(bool* result) {
+  return ReadBuiltinType(result);
+}
+
+bool PickleIterator::ReadInt(int* result) {
+  return ReadBuiltinType(result);
+}
+
+bool PickleIterator::ReadLong(long* result) {
+  // Always read long as a 64-bit value to ensure compatibility between 32-bit
+  // and 64-bit processes.
+  int64_t result_int64 = 0;
+  if (!ReadBuiltinType(&result_int64))
+    return false;
+  // CHECK if the cast truncates the value so that we know to change this IPC
+  // parameter to use int64_t.
+  *result = base::checked_cast<long>(result_int64);
+  return true;
+}
+
+bool PickleIterator::ReadUInt16(uint16_t* result) {
+  return ReadBuiltinType(result);
+}
+
+bool PickleIterator::ReadUInt32(uint32_t* result) {
+  return ReadBuiltinType(result);
+}
+
+bool PickleIterator::ReadInt64(int64_t* result) {
+  return ReadBuiltinType(result);
+}
+
+bool PickleIterator::ReadUInt64(uint64_t* result) {
+  return ReadBuiltinType(result);
+}
+
+bool PickleIterator::ReadFloat(float* result) {
+  // crbug.com/315213
+  // The source data may not be properly aligned, and unaligned float reads
+  // cause SIGBUS on some ARM platforms, so force using memcpy to copy the data
+  // into the result.
+  const char* read_from = GetReadPointerAndAdvance<float>();
+  if (!read_from)
+    return false;
+  memcpy(result, read_from, sizeof(*result));
+  return true;
+}
+
+bool PickleIterator::ReadDouble(double* result) {
+  // crbug.com/315213
+  // The source data may not be properly aligned, and unaligned double reads
+  // cause SIGBUS on some ARM platforms, so force using memcpy to copy the data
+  // into the result.
+  const char* read_from = GetReadPointerAndAdvance<double>();
+  if (!read_from)
+    return false;
+  memcpy(result, read_from, sizeof(*result));
+  return true;
+}
+
+bool PickleIterator::ReadString(std::string* result) {
+  int len;
+  if (!ReadInt(&len))
+    return false;
+  const char* read_from = GetReadPointerAndAdvance(len);
+  if (!read_from)
+    return false;
+
+  result->assign(read_from, len);
+  return true;
+}
+
+bool PickleIterator::ReadStringPiece(StringPiece* result) {
+  int len;
+  if (!ReadInt(&len))
+    return false;
+  const char* read_from = GetReadPointerAndAdvance(len);
+  if (!read_from)
+    return false;
+
+  *result = StringPiece(read_from, len);
+  return true;
+}
+
+bool PickleIterator::ReadString16(string16* result) {
+  int len;
+  if (!ReadInt(&len))
+    return false;
+  const char* read_from = GetReadPointerAndAdvance(len, sizeof(char16));
+  if (!read_from)
+    return false;
+
+  result->assign(reinterpret_cast<const char16*>(read_from), len);
+  return true;
+}
+
+bool PickleIterator::ReadStringPiece16(StringPiece16* result) {
+  int len;
+  if (!ReadInt(&len))
+    return false;
+  const char* read_from = GetReadPointerAndAdvance(len, sizeof(char16));
+  if (!read_from)
+    return false;
+
+  *result = StringPiece16(reinterpret_cast<const char16*>(read_from), len);
+  return true;
+}
+
+bool PickleIterator::ReadData(const char** data, int* length) {
+  *length = 0;
+  *data = nullptr;
+
+  if (!ReadInt(length))
+    return false;
+
+  return ReadBytes(data, *length);
+}
+
+bool PickleIterator::ReadBytes(const char** data, int length) {
+  const char* read_from = GetReadPointerAndAdvance(length);
+  if (!read_from)
+    return false;
+  *data = read_from;
+  return true;
+}
+
+Pickle::Attachment::Attachment() = default;
+
+Pickle::Attachment::~Attachment() = default;
+
+// Payload is uint32_t aligned.
+
+Pickle::Pickle()
+    : header_(nullptr),
+      header_size_(sizeof(Header)),
+      capacity_after_header_(0),
+      write_offset_(0) {
+  static_assert((Pickle::kPayloadUnit & (Pickle::kPayloadUnit - 1)) == 0,
+                "Pickle::kPayloadUnit must be a power of two");
+  Resize(kPayloadUnit);
+  header_->payload_size = 0;
+}
+
+Pickle::Pickle(int header_size)
+    : header_(nullptr),
+      header_size_(bits::Align(header_size, sizeof(uint32_t))),
+      capacity_after_header_(0),
+      write_offset_(0) {
+  DCHECK_GE(static_cast<size_t>(header_size), sizeof(Header));
+  DCHECK_LE(header_size, kPayloadUnit);
+  Resize(kPayloadUnit);
+  header_->payload_size = 0;
+}
+
+Pickle::Pickle(const char* data, int data_len)
+    : header_(reinterpret_cast<Header*>(const_cast<char*>(data))),
+      header_size_(0),
+      capacity_after_header_(kCapacityReadOnly),
+      write_offset_(0) {
+  if (data_len >= static_cast<int>(sizeof(Header)))
+    header_size_ = data_len - header_->payload_size;
+
+  if (header_size_ > static_cast<unsigned int>(data_len))
+    header_size_ = 0;
+
+  if (header_size_ != bits::Align(header_size_, sizeof(uint32_t)))
+    header_size_ = 0;
+
+  // If there is anything wrong with the data, we're not going to use it.
+  if (!header_size_)
+    header_ = nullptr;
+}
+
+Pickle::Pickle(const Pickle& other)
+    : header_(nullptr),
+      header_size_(other.header_size_),
+      capacity_after_header_(0),
+      write_offset_(other.write_offset_) {
+  Resize(other.header_->payload_size);
+  memcpy(header_, other.header_, header_size_ + other.header_->payload_size);
+}
+
+Pickle::~Pickle() {
+  if (capacity_after_header_ != kCapacityReadOnly)
+    free(header_);
+}
+
+Pickle& Pickle::operator=(const Pickle& other) {
+  if (this == &other) {
+    return *this;
+  }
+  if (capacity_after_header_ == kCapacityReadOnly) {
+    header_ = nullptr;
+    capacity_after_header_ = 0;
+  }
+  if (header_size_ != other.header_size_) {
+    free(header_);
+    header_ = nullptr;
+    header_size_ = other.header_size_;
+  }
+  Resize(other.header_->payload_size);
+  memcpy(header_, other.header_,
+         other.header_size_ + other.header_->payload_size);
+  write_offset_ = other.write_offset_;
+  return *this;
+}
+
+void Pickle::WriteString(const StringPiece& value) {
+  WriteInt(static_cast<int>(value.size()));
+  WriteBytes(value.data(), static_cast<int>(value.size()));
+}
+
+void Pickle::WriteString16(const StringPiece16& value) {
+  WriteInt(static_cast<int>(value.size()));
+  WriteBytes(value.data(), static_cast<int>(value.size()) * sizeof(char16));
+}
+
+void Pickle::WriteData(const char* data, int length) {
+  DCHECK_GE(length, 0);
+  WriteInt(length);
+  WriteBytes(data, length);
+}
+
+void Pickle::WriteBytes(const void* data, int length) {
+  WriteBytesCommon(data, length);
+}
+
+void Pickle::Reserve(size_t length) {
+  size_t data_len = bits::Align(length, sizeof(uint32_t));
+  DCHECK_GE(data_len, length);
+#ifdef ARCH_CPU_64_BITS
+  DCHECK_LE(data_len, std::numeric_limits<uint32_t>::max());
+#endif
+  DCHECK_LE(write_offset_, std::numeric_limits<uint32_t>::max() - data_len);
+  size_t new_size = write_offset_ + data_len;
+  if (new_size > capacity_after_header_)
+    Resize(capacity_after_header_ * 2 + new_size);
+}
+
+bool Pickle::WriteAttachment(scoped_refptr<Attachment> attachment) {
+  return false;
+}
+
+bool Pickle::ReadAttachment(base::PickleIterator* iter,
+                            scoped_refptr<Attachment>* attachment) const {
+  return false;
+}
+
+bool Pickle::HasAttachments() const {
+  return false;
+}
+
+void Pickle::Resize(size_t new_capacity) {
+  CHECK_NE(capacity_after_header_, kCapacityReadOnly);
+  capacity_after_header_ = bits::Align(new_capacity, kPayloadUnit);
+  void* p = realloc(header_, GetTotalAllocatedSize());
+  CHECK(p);
+  header_ = reinterpret_cast<Header*>(p);
+}
+
+void* Pickle::ClaimBytes(size_t num_bytes) {
+  void* p = ClaimUninitializedBytesInternal(num_bytes);
+  CHECK(p);
+  memset(p, 0, num_bytes);
+  return p;
+}
+
+size_t Pickle::GetTotalAllocatedSize() const {
+  if (capacity_after_header_ == kCapacityReadOnly)
+    return 0;
+  return header_size_ + capacity_after_header_;
+}
+
+// static
+const char* Pickle::FindNext(size_t header_size,
+                             const char* start,
+                             const char* end) {
+  size_t pickle_size = 0;
+  if (!PeekNext(header_size, start, end, &pickle_size))
+    return nullptr;
+
+  if (pickle_size > static_cast<size_t>(end - start))
+    return nullptr;
+
+  return start + pickle_size;
+}
+
+// static
+bool Pickle::PeekNext(size_t header_size,
+                      const char* start,
+                      const char* end,
+                      size_t* pickle_size) {
+  DCHECK_EQ(header_size, bits::Align(header_size, sizeof(uint32_t)));
+  DCHECK_GE(header_size, sizeof(Header));
+  DCHECK_LE(header_size, static_cast<size_t>(kPayloadUnit));
+
+  size_t length = static_cast<size_t>(end - start);
+  if (length < sizeof(Header))
+    return false;
+
+  const Header* hdr = reinterpret_cast<const Header*>(start);
+  if (length < header_size)
+    return false;
+
+  // If payload_size causes an overflow, we return maximum possible
+  // pickle size to indicate that.
+  *pickle_size = ClampAdd(header_size, hdr->payload_size);
+  return true;
+}
+
+template <size_t length> void Pickle::WriteBytesStatic(const void* data) {
+  WriteBytesCommon(data, length);
+}
+
+template void Pickle::WriteBytesStatic<2>(const void* data);
+template void Pickle::WriteBytesStatic<4>(const void* data);
+template void Pickle::WriteBytesStatic<8>(const void* data);
+
+inline void* Pickle::ClaimUninitializedBytesInternal(size_t length) {
+  DCHECK_NE(kCapacityReadOnly, capacity_after_header_)
+      << "oops: pickle is readonly";
+  size_t data_len = bits::Align(length, sizeof(uint32_t));
+  DCHECK_GE(data_len, length);
+#ifdef ARCH_CPU_64_BITS
+  DCHECK_LE(data_len, std::numeric_limits<uint32_t>::max());
+#endif
+  DCHECK_LE(write_offset_, std::numeric_limits<uint32_t>::max() - data_len);
+  size_t new_size = write_offset_ + data_len;
+  if (new_size > capacity_after_header_) {
+    size_t new_capacity = capacity_after_header_ * 2;
+    const size_t kPickleHeapAlign = 4096;
+    if (new_capacity > kPickleHeapAlign)
+      new_capacity = bits::Align(new_capacity, kPickleHeapAlign) - kPayloadUnit;
+    Resize(std::max(new_capacity, new_size));
+  }
+
+  char* write = mutable_payload() + write_offset_;
+  memset(write + length, 0, data_len - length);  // Always initialize padding
+  header_->payload_size = static_cast<uint32_t>(new_size);
+  write_offset_ = new_size;
+  return write;
+}
+
+inline void Pickle::WriteBytesCommon(const void* data, size_t length) {
+  DCHECK_NE(kCapacityReadOnly, capacity_after_header_)
+      << "oops: pickle is readonly";
+  MSAN_CHECK_MEM_IS_INITIALIZED(data, length);
+  void* write = ClaimUninitializedBytesInternal(length);
+  memcpy(write, data, length);
+}
+
+}  // namespace base
diff --git a/base/pickle.h b/base/pickle.h
new file mode 100644
index 0000000..eff2092
--- /dev/null
+++ b/base/pickle.h
@@ -0,0 +1,345 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_PICKLE_H_
+#define BASE_PICKLE_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <string>
+
+#include "base/base_export.h"
+#include "base/compiler_specific.h"
+#include "base/gtest_prod_util.h"
+#include "base/logging.h"
+#include "base/memory/ref_counted.h"
+#include "base/strings/string16.h"
+#include "base/strings/string_piece.h"
+
+#if defined(OS_POSIX)
+#include "base/files/file.h"
+#endif
+
+namespace base {
+
+class Pickle;
+
+// PickleIterator reads data from a Pickle. The Pickle object must remain valid
+// while the PickleIterator object is in use.
+class BASE_EXPORT PickleIterator {
+ public:
+  PickleIterator() : payload_(NULL), read_index_(0), end_index_(0) {}
+  explicit PickleIterator(const Pickle& pickle);
+
+  // Methods for reading the payload of the Pickle. To read from the start of
+  // the Pickle, create a PickleIterator from a Pickle. If successful, these
+  // methods return true. Otherwise, false is returned to indicate that the
+  // result could not be extracted. It is not possible to read from the iterator
+  // after that.
+  bool ReadBool(bool* result) WARN_UNUSED_RESULT;
+  bool ReadInt(int* result) WARN_UNUSED_RESULT;
+  bool ReadLong(long* result) WARN_UNUSED_RESULT;
+  bool ReadUInt16(uint16_t* result) WARN_UNUSED_RESULT;
+  bool ReadUInt32(uint32_t* result) WARN_UNUSED_RESULT;
+  bool ReadInt64(int64_t* result) WARN_UNUSED_RESULT;
+  bool ReadUInt64(uint64_t* result) WARN_UNUSED_RESULT;
+  bool ReadFloat(float* result) WARN_UNUSED_RESULT;
+  bool ReadDouble(double* result) WARN_UNUSED_RESULT;
+  bool ReadString(std::string* result) WARN_UNUSED_RESULT;
+  // The StringPiece data will only be valid for the lifetime of the message.
+  bool ReadStringPiece(StringPiece* result) WARN_UNUSED_RESULT;
+  bool ReadString16(string16* result) WARN_UNUSED_RESULT;
+  // The StringPiece16 data will only be valid for the lifetime of the message.
+  bool ReadStringPiece16(StringPiece16* result) WARN_UNUSED_RESULT;
+
+  // A pointer to the data will be placed in |*data|, and the length will be
+  // placed in |*length|. The pointer placed into |*data| points into the
+  // message's buffer so it will be scoped to the lifetime of the message (or
+  // until the message data is mutated). Do not keep the pointer around!
+  bool ReadData(const char** data, int* length) WARN_UNUSED_RESULT;
+
+  // A pointer to the data will be placed in |*data|. The caller specifies the
+  // number of bytes to read, and ReadBytes will validate this length. The
+  // pointer placed into |*data| points into the message's buffer so it will be
+  // scoped to the lifetime of the message (or until the message data is
+  // mutated). Do not keep the pointer around!
+  bool ReadBytes(const char** data, int length) WARN_UNUSED_RESULT;
+
+  // A safer version of ReadInt() that checks for the result not being negative.
+  // Use it for reading the object sizes.
+  bool ReadLength(int* result) WARN_UNUSED_RESULT {
+    return ReadInt(result) && *result >= 0;
+  }
+
+  // Skips bytes in the read buffer and returns true if there are at least
+  // num_bytes available. Otherwise, does nothing and returns false.
+  bool SkipBytes(int num_bytes) WARN_UNUSED_RESULT {
+    return !!GetReadPointerAndAdvance(num_bytes);
+  }
+
+ private:
+  // Read Type from Pickle.
+  template <typename Type>
+  bool ReadBuiltinType(Type* result);
+
+  // Advance read_index_ but do not allow it to exceed end_index_.
+  // Keeps read_index_ aligned.
+  void Advance(size_t size);
+
+  // Get read pointer for Type and advance read pointer.
+  template<typename Type>
+  const char* GetReadPointerAndAdvance();
+
+  // Get read pointer for |num_bytes| and advance read pointer. This method
+  // checks num_bytes for negativity and wrapping.
+  const char* GetReadPointerAndAdvance(int num_bytes);
+
+  // Get read pointer for (num_elements * size_element) bytes and advance read
+  // pointer. This method checks for int overflow, negativity and wrapping.
+  const char* GetReadPointerAndAdvance(int num_elements,
+                                       size_t size_element);
+
+  const char* payload_;  // Start of our pickle's payload.
+  size_t read_index_;  // Offset of the next readable byte in payload.
+  size_t end_index_;  // Payload size.
+
+  FRIEND_TEST_ALL_PREFIXES(PickleTest, GetReadPointerAndAdvance);
+};
+
+// This class provides facilities for basic binary value packing and unpacking.
+//
+// The Pickle class supports appending primitive values (ints, strings, etc.)
+// to a pickle instance.  The Pickle instance grows its internal memory buffer
+// dynamically to hold the sequence of primitive values.   The internal memory
+// buffer is exposed as the "data" of the Pickle.  This "data" can be passed
+// to a Pickle object to initialize it for reading.
+//
+// When reading from a Pickle object, it is important for the consumer to know
+// what value types to read and in what order to read them as the Pickle does
+// not keep track of the type of data written to it.
+//
+// The Pickle's data has a header which contains the size of the Pickle's
+// payload.  It can optionally support additional space in the header.  That
+// space is controlled by the header_size parameter passed to the Pickle
+// constructor.
+//
+class BASE_EXPORT Pickle {
+ public:
+  // Auxiliary data attached to a Pickle. Pickle must be subclassed along with
+  // this interface in order to provide a concrete implementation of support
+  // for attachments. The base Pickle implementation does not accept
+  // attachments.
+  class BASE_EXPORT Attachment : public RefCountedThreadSafe<Attachment> {
+   public:
+    Attachment();
+
+   protected:
+    friend class RefCountedThreadSafe<Attachment>;
+    virtual ~Attachment();
+
+    DISALLOW_COPY_AND_ASSIGN(Attachment);
+  };
+
+  // Initialize a Pickle object using the default header size.
+  Pickle();
+
+  // Initialize a Pickle object with the specified header size in bytes, which
+  // must be greater-than-or-equal-to sizeof(Pickle::Header).  The header size
+  // will be rounded up to ensure that the header size is 32bit-aligned.
+  explicit Pickle(int header_size);
+
+  // Initializes a Pickle from a const block of data.  The data is not copied;
+  // instead the data is merely referenced by this Pickle.  Only const methods
+  // should be used on the Pickle when initialized this way.  The header
+  // padding size is deduced from the data length.
+  Pickle(const char* data, int data_len);
+
+  // Initializes a Pickle as a deep copy of another Pickle.
+  Pickle(const Pickle& other);
+
+  // Note: There are no virtual methods in this class.  This destructor is
+  // virtual as an element of defensive coding.  Other classes have derived from
+  // this class, and there is a *chance* that they will cast into this base
+  // class before destruction.  At least one such class does have a virtual
+  // destructor, suggesting at least some need to call more derived destructors.
+  virtual ~Pickle();
+
+  // Performs a deep copy.
+  Pickle& operator=(const Pickle& other);
+
+  // Returns the number of bytes written in the Pickle, including the header.
+  size_t size() const { return header_size_ + header_->payload_size; }
+
+  // Returns the data for this Pickle.
+  const void* data() const { return header_; }
+
+  // Returns the effective memory capacity of this Pickle, that is, the total
+  // number of bytes currently dynamically allocated or 0 in the case of a
+  // read-only Pickle. This should be used only for diagnostic / profiling
+  // purposes.
+  size_t GetTotalAllocatedSize() const;
+
+  // Methods for adding to the payload of the Pickle.  These values are
+  // appended to the end of the Pickle's payload.  When reading values from a
+  // Pickle, it is important to read them in the order in which they were added
+  // to the Pickle.
+
+  void WriteBool(bool value) { WriteInt(value ? 1 : 0); }
+  void WriteInt(int value) { WritePOD(value); }
+  void WriteLong(long value) {
+    // Always write long as a 64-bit value to ensure compatibility between
+    // 32-bit and 64-bit processes.
+    WritePOD(static_cast<int64_t>(value));
+  }
+  void WriteUInt16(uint16_t value) { WritePOD(value); }
+  void WriteUInt32(uint32_t value) { WritePOD(value); }
+  void WriteInt64(int64_t value) { WritePOD(value); }
+  void WriteUInt64(uint64_t value) { WritePOD(value); }
+  void WriteFloat(float value) { WritePOD(value); }
+  void WriteDouble(double value) { WritePOD(value); }
+  void WriteString(const StringPiece& value);
+  void WriteString16(const StringPiece16& value);
+  // "Data" is a blob with a length. When you read it out you will be given the
+  // length. See also WriteBytes.
+  void WriteData(const char* data, int length);
+  // "Bytes" is a blob with no length. The caller must specify the length both
+  // when reading and writing. It is normally used to serialize PoD types of a
+  // known size. See also WriteData.
+  void WriteBytes(const void* data, int length);
+
+  // WriteAttachment appends |attachment| to the pickle. It returns
+  // false iff the set is full or if the Pickle implementation does not support
+  // attachments.
+  virtual bool WriteAttachment(scoped_refptr<Attachment> attachment);
+
+  // ReadAttachment parses an attachment given the parsing state |iter| and
+  // writes it to |*attachment|. It returns true on success.
+  virtual bool ReadAttachment(base::PickleIterator* iter,
+                              scoped_refptr<Attachment>* attachment) const;
+
+  // Indicates whether the pickle has any attachments.
+  virtual bool HasAttachments() const;
+
+  // Reserves space for upcoming writes when multiple writes will be made and
+  // their sizes are computed in advance. It can be significantly faster to call
+  // Reserve() before calling WriteFoo() multiple times.
+  void Reserve(size_t additional_capacity);
+
+  // Payload follows after allocation of Header (header size is customizable).
+  struct Header {
+    uint32_t payload_size;  // Specifies the size of the payload.
+  };
+
+  // Returns the header, cast to a user-specified type T.  The type T must be a
+  // subclass of Header and its size must correspond to the header_size passed
+  // to the Pickle constructor.
+  template <class T>
+  T* headerT() {
+    DCHECK_EQ(header_size_, sizeof(T));
+    return static_cast<T*>(header_);
+  }
+  template <class T>
+  const T* headerT() const {
+    DCHECK_EQ(header_size_, sizeof(T));
+    return static_cast<const T*>(header_);
+  }
+
+  // The payload is the pickle data immediately following the header.
+  size_t payload_size() const {
+    return header_ ? header_->payload_size : 0;
+  }
+
+  const char* payload() const {
+    return reinterpret_cast<const char*>(header_) + header_size_;
+  }
+
+  // Returns the address of the byte immediately following the currently valid
+  // header + payload.
+  const char* end_of_payload() const {
+    // This object may be invalid.
+    return header_ ? payload() + payload_size() : NULL;
+  }
+
+ protected:
+  // Returns size of the header, which can have default value, set by user or
+  // calculated by passed raw data.
+  size_t header_size() const { return header_size_; }
+
+  char* mutable_payload() {
+    return reinterpret_cast<char*>(header_) + header_size_;
+  }
+
+  size_t capacity_after_header() const {
+    return capacity_after_header_;
+  }
+
+  // Resize the capacity, note that the input value should not include the size
+  // of the header.
+  void Resize(size_t new_capacity);
+
+  // Claims |num_bytes| bytes of payload. This is similar to Reserve() in that
+  // it may grow the capacity, but it also advances the write offset of the
+  // pickle by |num_bytes|. Claimed memory, including padding, is zeroed.
+  //
+  // Returns the address of the first byte claimed.
+  void* ClaimBytes(size_t num_bytes);
+
+  // Find the end of the pickled data that starts at range_start.  Returns NULL
+  // if the entire Pickle is not found in the given data range.
+  static const char* FindNext(size_t header_size,
+                              const char* range_start,
+                              const char* range_end);
+
+  // Parse pickle header and return total size of the pickle. Data range
+  // doesn't need to contain entire pickle.
+  // Returns true if pickle header was found and parsed. Callers must check
+  // returned |pickle_size| for sanity (against maximum message size, etc).
+  // NOTE: when function successfully parses a header, but encounters an
+  // overflow during pickle size calculation, it sets |pickle_size| to the
+  // maximum size_t value and returns true.
+  static bool PeekNext(size_t header_size,
+                       const char* range_start,
+                       const char* range_end,
+                       size_t* pickle_size);
+
+  // The allocation granularity of the payload.
+  static const int kPayloadUnit;
+
+ private:
+  friend class PickleIterator;
+
+  Header* header_;
+  size_t header_size_;  // Supports extra data between header and payload.
+  // Allocation size of payload (or -1 if allocation is const). Note: this
+  // doesn't count the header.
+  size_t capacity_after_header_;
+  // The offset at which we will write the next field. Note: this doesn't count
+  // the header.
+  size_t write_offset_;
+
+  // Just like WriteBytes, but with a compile-time size, for performance.
+  template<size_t length> void BASE_EXPORT WriteBytesStatic(const void* data);
+
+  // Writes a POD by copying its bytes.
+  template <typename T> bool WritePOD(const T& data) {
+    WriteBytesStatic<sizeof(data)>(&data);
+    return true;
+  }
+
+  inline void* ClaimUninitializedBytesInternal(size_t num_bytes);
+  inline void WriteBytesCommon(const void* data, size_t length);
+
+  FRIEND_TEST_ALL_PREFIXES(PickleTest, DeepCopyResize);
+  FRIEND_TEST_ALL_PREFIXES(PickleTest, Resize);
+  FRIEND_TEST_ALL_PREFIXES(PickleTest, PeekNext);
+  FRIEND_TEST_ALL_PREFIXES(PickleTest, PeekNextOverflow);
+  FRIEND_TEST_ALL_PREFIXES(PickleTest, FindNext);
+  FRIEND_TEST_ALL_PREFIXES(PickleTest, FindNextWithIncompleteHeader);
+  FRIEND_TEST_ALL_PREFIXES(PickleTest, FindNextOverflow);
+};
+
+}  // namespace base
+
+#endif  // BASE_PICKLE_H_
diff --git a/base/pickle_unittest.cc b/base/pickle_unittest.cc
new file mode 100644
index 0000000..4563047
--- /dev/null
+++ b/base/pickle_unittest.cc
@@ -0,0 +1,573 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/pickle.h"
+
+#include <limits.h>
+#include <stddef.h>
+#include <stdint.h>
+
+#include <memory>
+#include <string>
+
+#include "base/macros.h"
+#include "base/strings/string16.h"
+#include "base/strings/utf_string_conversions.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+
+namespace {
+
+const bool testbool1 = false;
+const bool testbool2 = true;
+const int testint = 2'093'847'192;
+const long testlong = 1'093'847'192;
+const uint16_t testuint16 = 32123;
+const uint32_t testuint32 = 1593847192;
+const int64_t testint64 = -0x7E8CA925'3104BDFCLL;
+const uint64_t testuint64 = 0xCE8CA925'3104BDF7ULL;
+const float testfloat = 3.1415926935f;
+const double testdouble = 2.71828182845904523;
+const std::string teststring("Hello world");  // note non-aligned string length
+const std::wstring testwstring(L"Hello, world");
+const string16 teststring16(ASCIIToUTF16("Hello, world"));
+const char testrawstring[] = "Hello new world"; // Test raw string writing
+// Test raw char16 writing, assumes UTF16 encoding is ANSI for alpha chars.
+const char16 testrawstring16[] = {'A', 'l', 'o', 'h', 'a', 0};
+const char testdata[] = "AAA\0BBB\0";
+const int testdatalen = arraysize(testdata) - 1;
+
+// checks that the results can be read correctly from the Pickle
+void VerifyResult(const Pickle& pickle) {
+  PickleIterator iter(pickle);
+
+  bool outbool;
+  EXPECT_TRUE(iter.ReadBool(&outbool));
+  EXPECT_FALSE(outbool);
+  EXPECT_TRUE(iter.ReadBool(&outbool));
+  EXPECT_TRUE(outbool);
+
+  int outint;
+  EXPECT_TRUE(iter.ReadInt(&outint));
+  EXPECT_EQ(testint, outint);
+
+  long outlong;
+  EXPECT_TRUE(iter.ReadLong(&outlong));
+  EXPECT_EQ(testlong, outlong);
+
+  uint16_t outuint16;
+  EXPECT_TRUE(iter.ReadUInt16(&outuint16));
+  EXPECT_EQ(testuint16, outuint16);
+
+  uint32_t outuint32;
+  EXPECT_TRUE(iter.ReadUInt32(&outuint32));
+  EXPECT_EQ(testuint32, outuint32);
+
+  int64_t outint64;
+  EXPECT_TRUE(iter.ReadInt64(&outint64));
+  EXPECT_EQ(testint64, outint64);
+
+  uint64_t outuint64;
+  EXPECT_TRUE(iter.ReadUInt64(&outuint64));
+  EXPECT_EQ(testuint64, outuint64);
+
+  float outfloat;
+  EXPECT_TRUE(iter.ReadFloat(&outfloat));
+  EXPECT_EQ(testfloat, outfloat);
+
+  double outdouble;
+  EXPECT_TRUE(iter.ReadDouble(&outdouble));
+  EXPECT_EQ(testdouble, outdouble);
+
+  std::string outstring;
+  EXPECT_TRUE(iter.ReadString(&outstring));
+  EXPECT_EQ(teststring, outstring);
+
+  string16 outstring16;
+  EXPECT_TRUE(iter.ReadString16(&outstring16));
+  EXPECT_EQ(teststring16, outstring16);
+
+  StringPiece outstringpiece;
+  EXPECT_TRUE(iter.ReadStringPiece(&outstringpiece));
+  EXPECT_EQ(testrawstring, outstringpiece);
+
+  StringPiece16 outstringpiece16;
+  EXPECT_TRUE(iter.ReadStringPiece16(&outstringpiece16));
+  EXPECT_EQ(testrawstring16, outstringpiece16);
+
+  const char* outdata;
+  int outdatalen;
+  EXPECT_TRUE(iter.ReadData(&outdata, &outdatalen));
+  EXPECT_EQ(testdatalen, outdatalen);
+  EXPECT_EQ(memcmp(testdata, outdata, outdatalen), 0);
+
+  // reads past the end should fail
+  EXPECT_FALSE(iter.ReadInt(&outint));
+}
+
+}  // namespace
+
+TEST(PickleTest, EncodeDecode) {
+  Pickle pickle;
+
+  pickle.WriteBool(testbool1);
+  pickle.WriteBool(testbool2);
+  pickle.WriteInt(testint);
+  pickle.WriteLong(testlong);
+  pickle.WriteUInt16(testuint16);
+  pickle.WriteUInt32(testuint32);
+  pickle.WriteInt64(testint64);
+  pickle.WriteUInt64(testuint64);
+  pickle.WriteFloat(testfloat);
+  pickle.WriteDouble(testdouble);
+  pickle.WriteString(teststring);
+  pickle.WriteString16(teststring16);
+  pickle.WriteString(testrawstring);
+  pickle.WriteString16(testrawstring16);
+  pickle.WriteData(testdata, testdatalen);
+  VerifyResult(pickle);
+
+  // test copy constructor
+  Pickle pickle2(pickle);
+  VerifyResult(pickle2);
+
+  // test operator=
+  Pickle pickle3;
+  pickle3 = pickle;
+  VerifyResult(pickle3);
+}
+
+// Tests that reading/writing a long works correctly when the source process
+// is 64-bit.  We rely on having both 32- and 64-bit trybots to validate both
+// arms of the conditional in this test.
+TEST(PickleTest, LongFrom64Bit) {
+  Pickle pickle;
+  // Under the hood long is always written as a 64-bit value, so simulate a
+  // 64-bit long even on 32-bit architectures by explicitly writing an int64_t.
+  pickle.WriteInt64(testint64);
+
+  PickleIterator iter(pickle);
+  long outlong;
+  if (sizeof(long) < sizeof(int64_t)) {
+    // ReadLong() should return false when the original written value can't be
+    // represented as a long.
+#if GTEST_HAS_DEATH_TEST
+    EXPECT_DEATH(ignore_result(iter.ReadLong(&outlong)), "");
+#endif
+  } else {
+    EXPECT_TRUE(iter.ReadLong(&outlong));
+    EXPECT_EQ(testint64, outlong);
+  }
+}
+
+// Tests that we can handle really small buffers.
+TEST(PickleTest, SmallBuffer) {
+  std::unique_ptr<char[]> buffer(new char[1]);
+
+  // We should not touch the buffer.
+  Pickle pickle(buffer.get(), 1);
+
+  PickleIterator iter(pickle);
+  int data;
+  EXPECT_FALSE(iter.ReadInt(&data));
+}
+
+// Tests that we can handle improper headers.
+TEST(PickleTest, BigSize) {
+  int buffer[] = { 0x56035200, 25, 40, 50 };
+
+  Pickle pickle(reinterpret_cast<char*>(buffer), sizeof(buffer));
+
+  PickleIterator iter(pickle);
+  int data;
+  EXPECT_FALSE(iter.ReadInt(&data));
+}
+
+TEST(PickleTest, UnalignedSize) {
+  int buffer[] = { 10, 25, 40, 50 };
+
+  Pickle pickle(reinterpret_cast<char*>(buffer), sizeof(buffer));
+
+  PickleIterator iter(pickle);
+  int data;
+  EXPECT_FALSE(iter.ReadInt(&data));
+}
+
+TEST(PickleTest, ZeroLenStr) {
+  Pickle pickle;
+  pickle.WriteString(std::string());
+
+  PickleIterator iter(pickle);
+  std::string outstr;
+  EXPECT_TRUE(iter.ReadString(&outstr));
+  EXPECT_EQ("", outstr);
+}
+
+TEST(PickleTest, ZeroLenStr16) {
+  Pickle pickle;
+  pickle.WriteString16(string16());
+
+  PickleIterator iter(pickle);
+  std::string outstr;
+  EXPECT_TRUE(iter.ReadString(&outstr));
+  EXPECT_EQ("", outstr);
+}
+
+TEST(PickleTest, BadLenStr) {
+  Pickle pickle;
+  pickle.WriteInt(-2);
+
+  PickleIterator iter(pickle);
+  std::string outstr;
+  EXPECT_FALSE(iter.ReadString(&outstr));
+}
+
+TEST(PickleTest, BadLenStr16) {
+  Pickle pickle;
+  pickle.WriteInt(-1);
+
+  PickleIterator iter(pickle);
+  string16 outstr;
+  EXPECT_FALSE(iter.ReadString16(&outstr));
+}
+
+TEST(PickleTest, PeekNext) {
+  struct CustomHeader : base::Pickle::Header {
+    int cookies[10];
+  };
+
+  Pickle pickle(sizeof(CustomHeader));
+
+  pickle.WriteString("Goooooooooooogle");
+
+  const char* pickle_data = static_cast<const char*>(pickle.data());
+
+  size_t pickle_size;
+
+  // Data range doesn't contain header
+  EXPECT_FALSE(Pickle::PeekNext(
+      sizeof(CustomHeader),
+      pickle_data,
+      pickle_data + sizeof(CustomHeader) - 1,
+      &pickle_size));
+
+  // Data range contains header
+  EXPECT_TRUE(Pickle::PeekNext(
+      sizeof(CustomHeader),
+      pickle_data,
+      pickle_data + sizeof(CustomHeader),
+      &pickle_size));
+  EXPECT_EQ(pickle_size, pickle.size());
+
+  // Data range contains header and some other data
+  EXPECT_TRUE(Pickle::PeekNext(
+      sizeof(CustomHeader),
+      pickle_data,
+      pickle_data + sizeof(CustomHeader) + 1,
+      &pickle_size));
+  EXPECT_EQ(pickle_size, pickle.size());
+
+  // Data range contains full pickle
+  EXPECT_TRUE(Pickle::PeekNext(
+      sizeof(CustomHeader),
+      pickle_data,
+      pickle_data + pickle.size(),
+      &pickle_size));
+  EXPECT_EQ(pickle_size, pickle.size());
+}
+
+TEST(PickleTest, PeekNextOverflow) {
+  struct CustomHeader : base::Pickle::Header {
+    int cookies[10];
+  };
+
+  CustomHeader header;
+
+  // Check if we can wrap around at all
+  if (sizeof(size_t) > sizeof(header.payload_size))
+    return;
+
+  const char* pickle_data = reinterpret_cast<const char*>(&header);
+
+  size_t pickle_size;
+
+  // Wrapping around is detected and reported as maximum size_t value
+  header.payload_size = static_cast<uint32_t>(
+      1 - static_cast<int32_t>(sizeof(CustomHeader)));
+  EXPECT_TRUE(Pickle::PeekNext(
+      sizeof(CustomHeader),
+      pickle_data,
+      pickle_data + sizeof(CustomHeader),
+      &pickle_size));
+  EXPECT_EQ(pickle_size, std::numeric_limits<size_t>::max());
+
+  // Ridiculous pickle sizes are fine (callers are supposed to
+  // verify them)
+  header.payload_size =
+      std::numeric_limits<uint32_t>::max() / 2 - sizeof(CustomHeader);
+  EXPECT_TRUE(Pickle::PeekNext(
+      sizeof(CustomHeader),
+      pickle_data,
+      pickle_data + sizeof(CustomHeader),
+      &pickle_size));
+  EXPECT_EQ(pickle_size, std::numeric_limits<uint32_t>::max() / 2);
+}
+
+TEST(PickleTest, FindNext) {
+  Pickle pickle;
+  pickle.WriteInt(1);
+  pickle.WriteString("Domo");
+
+  const char* start = reinterpret_cast<const char*>(pickle.data());
+  const char* end = start + pickle.size();
+
+  EXPECT_EQ(end, Pickle::FindNext(pickle.header_size_, start, end));
+  EXPECT_EQ(nullptr, Pickle::FindNext(pickle.header_size_, start, end - 1));
+  EXPECT_EQ(end, Pickle::FindNext(pickle.header_size_, start, end + 1));
+}
+
+TEST(PickleTest, FindNextWithIncompleteHeader) {
+  size_t header_size = sizeof(Pickle::Header);
+  std::unique_ptr<char[]> buffer(new char[header_size - 1]);
+  memset(buffer.get(), 0x1, header_size - 1);
+
+  const char* start = buffer.get();
+  const char* end = start + header_size - 1;
+
+  EXPECT_EQ(nullptr, Pickle::FindNext(header_size, start, end));
+}
+
+#if defined(COMPILER_MSVC)
+#pragma warning(push)
+#pragma warning(disable: 4146)
+#endif
+TEST(PickleTest, FindNextOverflow) {
+  size_t header_size = sizeof(Pickle::Header);
+  size_t header_size2 = 2 * header_size;
+  size_t payload_received = 100;
+  std::unique_ptr<char[]> buffer(new char[header_size2 + payload_received]);
+  const char* start = buffer.get();
+  Pickle::Header* header = reinterpret_cast<Pickle::Header*>(buffer.get());
+  const char* end = start + header_size2 + payload_received;
+  // It is impossible to construct an overflow test otherwise.
+  if (sizeof(size_t) > sizeof(header->payload_size) ||
+      sizeof(uintptr_t) > sizeof(header->payload_size))
+    return;
+
+  header->payload_size = -(reinterpret_cast<uintptr_t>(start) + header_size2);
+  EXPECT_EQ(nullptr, Pickle::FindNext(header_size2, start, end));
+
+  header->payload_size = -header_size2;
+  EXPECT_EQ(nullptr, Pickle::FindNext(header_size2, start, end));
+
+  header->payload_size = 0;
+  end = start + header_size;
+  EXPECT_EQ(nullptr, Pickle::FindNext(header_size2, start, end));
+}
+#if defined(COMPILER_MSVC)
+#pragma warning(pop)
+#endif
+
+TEST(PickleTest, GetReadPointerAndAdvance) {
+  Pickle pickle;
+
+  PickleIterator iter(pickle);
+  EXPECT_FALSE(iter.GetReadPointerAndAdvance(1));
+
+  pickle.WriteInt(1);
+  pickle.WriteInt(2);
+  int bytes = sizeof(int) * 2;
+
+  EXPECT_TRUE(PickleIterator(pickle).GetReadPointerAndAdvance(0));
+  EXPECT_TRUE(PickleIterator(pickle).GetReadPointerAndAdvance(1));
+  EXPECT_FALSE(PickleIterator(pickle).GetReadPointerAndAdvance(-1));
+  EXPECT_TRUE(PickleIterator(pickle).GetReadPointerAndAdvance(bytes));
+  EXPECT_FALSE(PickleIterator(pickle).GetReadPointerAndAdvance(bytes + 1));
+  EXPECT_FALSE(PickleIterator(pickle).GetReadPointerAndAdvance(INT_MAX));
+  EXPECT_FALSE(PickleIterator(pickle).GetReadPointerAndAdvance(INT_MIN));
+}
+
+TEST(PickleTest, Resize) {
+  size_t unit = Pickle::kPayloadUnit;
+  std::unique_ptr<char[]> data(new char[unit]);
+  char* data_ptr = data.get();
+  for (size_t i = 0; i < unit; i++)
+    data_ptr[i] = 'G';
+
+  // construct a message that will be exactly the size of one payload unit,
+  // note that any data will have a 4-byte header indicating the size
+  const size_t payload_size_after_header = unit - sizeof(uint32_t);
+  Pickle pickle;
+  pickle.WriteData(
+      data_ptr, static_cast<int>(payload_size_after_header - sizeof(uint32_t)));
+  size_t cur_payload = payload_size_after_header;
+
+  // note: we assume 'unit' is a power of 2
+  EXPECT_EQ(unit, pickle.capacity_after_header());
+  EXPECT_EQ(pickle.payload_size(), payload_size_after_header);
+
+  // fill out a full page (noting data header)
+  pickle.WriteData(data_ptr, static_cast<int>(unit - sizeof(uint32_t)));
+  cur_payload += unit;
+  EXPECT_EQ(unit * 2, pickle.capacity_after_header());
+  EXPECT_EQ(cur_payload, pickle.payload_size());
+
+  // one more byte should double the capacity
+  pickle.WriteData(data_ptr, 1);
+  cur_payload += 8;
+  EXPECT_EQ(unit * 4, pickle.capacity_after_header());
+  EXPECT_EQ(cur_payload, pickle.payload_size());
+}
+
+namespace {
+
+struct CustomHeader : Pickle::Header {
+  int blah;
+};
+
+}  // namespace
+
+TEST(PickleTest, HeaderPadding) {
+  const uint32_t kMagic = 0x12345678;
+
+  Pickle pickle(sizeof(CustomHeader));
+  pickle.WriteInt(kMagic);
+
+  // this should not overwrite the 'int' payload
+  pickle.headerT<CustomHeader>()->blah = 10;
+
+  PickleIterator iter(pickle);
+  int result;
+  ASSERT_TRUE(iter.ReadInt(&result));
+
+  EXPECT_EQ(static_cast<uint32_t>(result), kMagic);
+}
+
+TEST(PickleTest, EqualsOperator) {
+  Pickle source;
+  source.WriteInt(1);
+
+  Pickle copy_refs_source_buffer(static_cast<const char*>(source.data()),
+                                 source.size());
+  Pickle copy;
+  copy = copy_refs_source_buffer;
+  ASSERT_EQ(source.size(), copy.size());
+}
+
+TEST(PickleTest, EvilLengths) {
+  Pickle source;
+  std::string str(100000, 'A');
+  source.WriteData(str.c_str(), 100000);
+  // ReadString16 used to have its read buffer length calculation wrong leading
+  // to out-of-bounds reading.
+  PickleIterator iter(source);
+  string16 str16;
+  EXPECT_FALSE(iter.ReadString16(&str16));
+
+  // And check we didn't break ReadString16.
+  str16 = (wchar_t) 'A';
+  Pickle str16_pickle;
+  str16_pickle.WriteString16(str16);
+  iter = PickleIterator(str16_pickle);
+  EXPECT_TRUE(iter.ReadString16(&str16));
+  EXPECT_EQ(1U, str16.length());
+
+  // Check we don't fail in a length check with invalid String16 size.
+  // (1<<31) * sizeof(char16) == 0, so this is particularly evil.
+  Pickle bad_len;
+  bad_len.WriteInt(1 << 31);
+  iter = PickleIterator(bad_len);
+  EXPECT_FALSE(iter.ReadString16(&str16));
+}
+
+// Check we can write zero bytes of data and 'data' can be NULL.
+TEST(PickleTest, ZeroLength) {
+  Pickle pickle;
+  pickle.WriteData(nullptr, 0);
+
+  PickleIterator iter(pickle);
+  const char* outdata;
+  int outdatalen;
+  EXPECT_TRUE(iter.ReadData(&outdata, &outdatalen));
+  EXPECT_EQ(0, outdatalen);
+  // We can't assert that outdata is NULL.
+}
+
+// Check that ReadBytes works properly with an iterator initialized to NULL.
+TEST(PickleTest, ReadBytes) {
+  Pickle pickle;
+  int data = 0x7abcd;
+  pickle.WriteBytes(&data, sizeof(data));
+
+  PickleIterator iter(pickle);
+  const char* outdata_char = nullptr;
+  EXPECT_TRUE(iter.ReadBytes(&outdata_char, sizeof(data)));
+
+  int outdata;
+  memcpy(&outdata, outdata_char, sizeof(outdata));
+  EXPECT_EQ(data, outdata);
+}
+
+// Checks that when a pickle is deep-copied, the result is not larger than
+// needed.
+TEST(PickleTest, DeepCopyResize) {
+  Pickle pickle;
+  while (pickle.capacity_after_header() != pickle.payload_size())
+    pickle.WriteBool(true);
+
+  // Make a deep copy.
+  Pickle pickle2(pickle);
+
+  // Check that there isn't any extraneous capacity.
+  EXPECT_EQ(pickle.capacity_after_header(), pickle2.capacity_after_header());
+}
+
+namespace {
+
+// Publicly exposes the ClaimBytes interface for testing.
+class TestingPickle : public Pickle {
+ public:
+  TestingPickle() = default;
+
+  void* ClaimBytes(size_t num_bytes) { return Pickle::ClaimBytes(num_bytes); }
+};
+
+}  // namespace
+
+// Checks that claimed bytes are zero-initialized.
+TEST(PickleTest, ClaimBytesInitialization) {
+  static const int kChunkSize = 64;
+  TestingPickle pickle;
+  const char* bytes = static_cast<const char*>(pickle.ClaimBytes(kChunkSize));
+  for (size_t i = 0; i < kChunkSize; ++i) {
+    EXPECT_EQ(0, bytes[i]);
+  }
+}
+
+// Checks that ClaimBytes properly advances the write offset.
+TEST(PickleTest, ClaimBytes) {
+  std::string data("Hello, world!");
+
+  TestingPickle pickle;
+  pickle.WriteUInt32(data.size());
+  void* bytes = pickle.ClaimBytes(data.size());
+  pickle.WriteInt(42);
+  memcpy(bytes, data.data(), data.size());
+
+  PickleIterator iter(pickle);
+  uint32_t out_data_length;
+  EXPECT_TRUE(iter.ReadUInt32(&out_data_length));
+  EXPECT_EQ(data.size(), out_data_length);
+
+  const char* out_data = nullptr;
+  EXPECT_TRUE(iter.ReadBytes(&out_data, out_data_length));
+  EXPECT_EQ(data, std::string(out_data, out_data_length));
+
+  int out_value;
+  EXPECT_TRUE(iter.ReadInt(&out_value));
+  EXPECT_EQ(42, out_value);
+}
+
+}  // namespace base
diff --git a/base/posix/eintr_wrapper.h b/base/posix/eintr_wrapper.h
new file mode 100644
index 0000000..c0ffced
--- /dev/null
+++ b/base/posix/eintr_wrapper.h
@@ -0,0 +1,68 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This provides a wrapper around system calls which may be interrupted by a
+// signal and return EINTR. See man 7 signal.
+// To prevent long-lasting loops (which would likely be a bug, such as a signal
+// that should be masked) to go unnoticed, there is a limit after which the
+// caller will nonetheless see an EINTR in Debug builds.
+//
+// On Windows and Fuchsia, this wrapper macro does nothing because there are no
+// signals.
+//
+// Don't wrap close calls in HANDLE_EINTR. Use IGNORE_EINTR if the return
+// value of close is significant. See http://crbug.com/269623.
+
+#ifndef BASE_POSIX_EINTR_WRAPPER_H_
+#define BASE_POSIX_EINTR_WRAPPER_H_
+
+#include "build/build_config.h"
+
+#if defined(OS_POSIX) && !defined(OS_FUCHSIA)
+
+#include <errno.h>
+
+#if defined(NDEBUG)
+
+#define HANDLE_EINTR(x) ({ \
+  decltype(x) eintr_wrapper_result; \
+  do { \
+    eintr_wrapper_result = (x); \
+  } while (eintr_wrapper_result == -1 && errno == EINTR); \
+  eintr_wrapper_result; \
+})
+
+#else
+
+#define HANDLE_EINTR(x) ({ \
+  int eintr_wrapper_counter = 0; \
+  decltype(x) eintr_wrapper_result; \
+  do { \
+    eintr_wrapper_result = (x); \
+  } while (eintr_wrapper_result == -1 && errno == EINTR && \
+           eintr_wrapper_counter++ < 100); \
+  eintr_wrapper_result; \
+})
+
+#endif  // NDEBUG
+
+#define IGNORE_EINTR(x) ({ \
+  decltype(x) eintr_wrapper_result; \
+  do { \
+    eintr_wrapper_result = (x); \
+    if (eintr_wrapper_result == -1 && errno == EINTR) { \
+      eintr_wrapper_result = 0; \
+    } \
+  } while (0); \
+  eintr_wrapper_result; \
+})
+
+#else  // !OS_POSIX || OS_FUCHSIA
+
+#define HANDLE_EINTR(x) (x)
+#define IGNORE_EINTR(x) (x)
+
+#endif  // !OS_POSIX || OS_FUCHSIA
+
+#endif  // BASE_POSIX_EINTR_WRAPPER_H_
diff --git a/base/posix/file_descriptor_shuffle.cc b/base/posix/file_descriptor_shuffle.cc
new file mode 100644
index 0000000..d2fd39a
--- /dev/null
+++ b/base/posix/file_descriptor_shuffle.cc
@@ -0,0 +1,100 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/posix/file_descriptor_shuffle.h"
+
+#include <unistd.h>
+#include <stddef.h>
+#include <ostream>
+
+#include "base/posix/eintr_wrapper.h"
+#include "base/logging.h"
+
+namespace base {
+
+bool PerformInjectiveMultimapDestructive(
+    InjectiveMultimap* m, InjectionDelegate* delegate) {
+  static const size_t kMaxExtraFDs = 16;
+  int extra_fds[kMaxExtraFDs];
+  unsigned next_extra_fd = 0;
+
+  // DANGER: this function must not allocate or lock.
+  // Cannot use STL iterators here, since debug iterators use locks.
+
+  for (size_t i_index = 0; i_index < m->size(); ++i_index) {
+    InjectiveMultimap::value_type* i = &(*m)[i_index];
+    int temp_fd = -1;
+
+    // We DCHECK the injectiveness of the mapping.
+    for (size_t j_index = i_index + 1; j_index < m->size(); ++j_index) {
+      InjectiveMultimap::value_type* j = &(*m)[j_index];
+      DCHECK(i->dest != j->dest) << "Both fd " << i->source
+          << " and " << j->source << " map to " << i->dest;
+    }
+
+    const bool is_identity = i->source == i->dest;
+
+    for (size_t j_index = i_index + 1; j_index < m->size(); ++j_index) {
+      InjectiveMultimap::value_type* j = &(*m)[j_index];
+      if (!is_identity && i->dest == j->source) {
+        if (temp_fd == -1) {
+          if (!delegate->Duplicate(&temp_fd, i->dest))
+            return false;
+          if (next_extra_fd < kMaxExtraFDs) {
+            extra_fds[next_extra_fd++] = temp_fd;
+          } else {
+            RAW_LOG(ERROR, "PerformInjectiveMultimapDestructive overflowed "
+                           "extra_fds. Leaking file descriptors!");
+          }
+        }
+
+        j->source = temp_fd;
+        j->close = false;
+      }
+
+      if (i->close && i->source == j->dest)
+        i->close = false;
+
+      if (i->close && i->source == j->source) {
+        i->close = false;
+        j->close = true;
+      }
+    }
+
+    if (!is_identity) {
+      if (!delegate->Move(i->source, i->dest))
+        return false;
+    }
+
+    if (!is_identity && i->close)
+      delegate->Close(i->source);
+  }
+
+  for (unsigned i = 0; i < next_extra_fd; i++)
+    delegate->Close(extra_fds[i]);
+
+  return true;
+}
+
+bool PerformInjectiveMultimap(const InjectiveMultimap& m_in,
+                              InjectionDelegate* delegate) {
+  InjectiveMultimap m(m_in);
+  return PerformInjectiveMultimapDestructive(&m, delegate);
+}
+
+bool FileDescriptorTableInjection::Duplicate(int* result, int fd) {
+  *result = HANDLE_EINTR(dup(fd));
+  return *result >= 0;
+}
+
+bool FileDescriptorTableInjection::Move(int src, int dest) {
+  return HANDLE_EINTR(dup2(src, dest)) != -1;
+}
+
+void FileDescriptorTableInjection::Close(int fd) {
+  int ret = IGNORE_EINTR(close(fd));
+  DPCHECK(ret == 0);
+}
+
+}  // namespace base
diff --git a/base/posix/file_descriptor_shuffle.h b/base/posix/file_descriptor_shuffle.h
new file mode 100644
index 0000000..2afdc28
--- /dev/null
+++ b/base/posix/file_descriptor_shuffle.h
@@ -0,0 +1,87 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_POSIX_FILE_DESCRIPTOR_SHUFFLE_H_
+#define BASE_POSIX_FILE_DESCRIPTOR_SHUFFLE_H_
+
+// This code exists to shuffle file descriptors, which is commonly needed when
+// forking subprocesses. The naive approach (just call dup2 to set up the
+// desired descriptors) is very simple, but wrong: it won't handle edge cases
+// (like mapping 0 -> 1, 1 -> 0) correctly.
+//
+// In order to unittest this code, it's broken into the abstract action (an
+// injective multimap) and the concrete code for dealing with file descriptors.
+// Users should use the code like this:
+//   base::InjectiveMultimap file_descriptor_map;
+//   file_descriptor_map.push_back(base::InjectionArc(devnull, 0, true));
+//   file_descriptor_map.push_back(base::InjectionArc(devnull, 2, true));
+//   file_descriptor_map.push_back(base::InjectionArc(pipe[1], 1, true));
+//   base::ShuffleFileDescriptors(file_descriptor_map);
+//
+// and trust the the Right Thing will get done.
+
+#include <vector>
+
+#include "base/base_export.h"
+#include "base/compiler_specific.h"
+
+namespace base {
+
+// A Delegate which performs the actions required to perform an injective
+// multimapping in place.
+class InjectionDelegate {
+ public:
+  // Duplicate |fd|, an element of the domain, and write a fresh element of the
+  // domain into |result|. Returns true iff successful.
+  virtual bool Duplicate(int* result, int fd) = 0;
+  // Destructively move |src| to |dest|, overwriting |dest|. Returns true iff
+  // successful.
+  virtual bool Move(int src, int dest) = 0;
+  // Delete an element of the domain.
+  virtual void Close(int fd) = 0;
+
+ protected:
+  virtual ~InjectionDelegate() = default;
+};
+
+// An implementation of the InjectionDelegate interface using the file
+// descriptor table of the current process as the domain.
+class BASE_EXPORT FileDescriptorTableInjection : public InjectionDelegate {
+  bool Duplicate(int* result, int fd) override;
+  bool Move(int src, int dest) override;
+  void Close(int fd) override;
+};
+
+// A single arc of the directed graph which describes an injective multimapping.
+struct InjectionArc {
+  InjectionArc(int in_source, int in_dest, bool in_close)
+      : source(in_source),
+        dest(in_dest),
+        close(in_close) {
+  }
+
+  int source;
+  int dest;
+  bool close;  // if true, delete the source element after performing the
+               // mapping.
+};
+
+typedef std::vector<InjectionArc> InjectiveMultimap;
+
+BASE_EXPORT bool PerformInjectiveMultimap(const InjectiveMultimap& map,
+                                          InjectionDelegate* delegate);
+
+BASE_EXPORT bool PerformInjectiveMultimapDestructive(
+    InjectiveMultimap* map,
+    InjectionDelegate* delegate);
+
+// This function will not call malloc but will mutate |map|
+static inline bool ShuffleFileDescriptors(InjectiveMultimap* map) {
+  FileDescriptorTableInjection delegate;
+  return PerformInjectiveMultimapDestructive(map, &delegate);
+}
+
+}  // namespace base
+
+#endif  // BASE_POSIX_FILE_DESCRIPTOR_SHUFFLE_H_
diff --git a/base/posix/file_descriptor_shuffle_unittest.cc b/base/posix/file_descriptor_shuffle_unittest.cc
new file mode 100644
index 0000000..3dfbf7e
--- /dev/null
+++ b/base/posix/file_descriptor_shuffle_unittest.cc
@@ -0,0 +1,281 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/posix/file_descriptor_shuffle.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace {
+
+// 'Duplicated' file descriptors start at this number
+const int kDuplicateBase = 1000;
+
+}  // namespace
+
+namespace base {
+
+struct Action {
+  enum Type {
+    CLOSE,
+    MOVE,
+    DUPLICATE,
+  };
+
+  Action(Type in_type, int in_fd1, int in_fd2 = -1)
+      : type(in_type),
+        fd1(in_fd1),
+        fd2(in_fd2) {
+  }
+
+  bool operator==(const Action& other) const {
+    return other.type == type &&
+           other.fd1 == fd1 &&
+           other.fd2 == fd2;
+  }
+
+  Type type;
+  int fd1;
+  int fd2;
+};
+
+class InjectionTracer : public InjectionDelegate {
+ public:
+  InjectionTracer()
+      : next_duplicate_(kDuplicateBase) {
+  }
+
+  bool Duplicate(int* result, int fd) override {
+    *result = next_duplicate_++;
+    actions_.push_back(Action(Action::DUPLICATE, *result, fd));
+    return true;
+  }
+
+  bool Move(int src, int dest) override {
+    actions_.push_back(Action(Action::MOVE, src, dest));
+    return true;
+  }
+
+  void Close(int fd) override { actions_.push_back(Action(Action::CLOSE, fd)); }
+
+  const std::vector<Action>& actions() const { return actions_; }
+
+ private:
+  int next_duplicate_;
+  std::vector<Action> actions_;
+};
+
+TEST(FileDescriptorShuffleTest, Empty) {
+  InjectiveMultimap map;
+  InjectionTracer tracer;
+
+  EXPECT_TRUE(PerformInjectiveMultimap(map, &tracer));
+  EXPECT_EQ(0u, tracer.actions().size());
+}
+
+TEST(FileDescriptorShuffleTest, Noop) {
+  InjectiveMultimap map;
+  InjectionTracer tracer;
+  map.push_back(InjectionArc(0, 0, false));
+
+  EXPECT_TRUE(PerformInjectiveMultimap(map, &tracer));
+  EXPECT_EQ(0u, tracer.actions().size());
+}
+
+TEST(FileDescriptorShuffleTest, NoopAndClose) {
+  InjectiveMultimap map;
+  InjectionTracer tracer;
+  map.push_back(InjectionArc(0, 0, true));
+
+  EXPECT_TRUE(PerformInjectiveMultimap(map, &tracer));
+  EXPECT_EQ(0u, tracer.actions().size());
+}
+
+TEST(FileDescriptorShuffleTest, Simple1) {
+  InjectiveMultimap map;
+  InjectionTracer tracer;
+  map.push_back(InjectionArc(0, 1, false));
+
+  EXPECT_TRUE(PerformInjectiveMultimap(map, &tracer));
+  ASSERT_EQ(1u, tracer.actions().size());
+  EXPECT_TRUE(tracer.actions()[0] == Action(Action::MOVE, 0, 1));
+}
+
+TEST(FileDescriptorShuffleTest, Simple2) {
+  InjectiveMultimap map;
+  InjectionTracer tracer;
+  map.push_back(InjectionArc(0, 1, false));
+  map.push_back(InjectionArc(2, 3, false));
+
+  EXPECT_TRUE(PerformInjectiveMultimap(map, &tracer));
+  ASSERT_EQ(2u, tracer.actions().size());
+  EXPECT_TRUE(tracer.actions()[0] == Action(Action::MOVE, 0, 1));
+  EXPECT_TRUE(tracer.actions()[1] == Action(Action::MOVE, 2, 3));
+}
+
+TEST(FileDescriptorShuffleTest, Simple3) {
+  InjectiveMultimap map;
+  InjectionTracer tracer;
+  map.push_back(InjectionArc(0, 1, true));
+
+  EXPECT_TRUE(PerformInjectiveMultimap(map, &tracer));
+  ASSERT_EQ(2u, tracer.actions().size());
+  EXPECT_TRUE(tracer.actions()[0] == Action(Action::MOVE, 0, 1));
+  EXPECT_TRUE(tracer.actions()[1] == Action(Action::CLOSE, 0));
+}
+
+TEST(FileDescriptorShuffleTest, Simple4) {
+  InjectiveMultimap map;
+  InjectionTracer tracer;
+  map.push_back(InjectionArc(10, 0, true));
+  map.push_back(InjectionArc(1, 1, true));
+
+  EXPECT_TRUE(PerformInjectiveMultimap(map, &tracer));
+  ASSERT_EQ(2u, tracer.actions().size());
+  EXPECT_TRUE(tracer.actions()[0] == Action(Action::MOVE, 10, 0));
+  EXPECT_TRUE(tracer.actions()[1] == Action(Action::CLOSE, 10));
+}
+
+TEST(FileDescriptorShuffleTest, Cycle) {
+  InjectiveMultimap map;
+  InjectionTracer tracer;
+  map.push_back(InjectionArc(0, 1, false));
+  map.push_back(InjectionArc(1, 0, false));
+
+  EXPECT_TRUE(PerformInjectiveMultimap(map, &tracer));
+  ASSERT_EQ(4u, tracer.actions().size());
+  EXPECT_TRUE(tracer.actions()[0] ==
+              Action(Action::DUPLICATE, kDuplicateBase, 1));
+  EXPECT_TRUE(tracer.actions()[1] == Action(Action::MOVE, 0, 1));
+  EXPECT_TRUE(tracer.actions()[2] == Action(Action::MOVE, kDuplicateBase, 0));
+  EXPECT_TRUE(tracer.actions()[3] == Action(Action::CLOSE, kDuplicateBase));
+}
+
+TEST(FileDescriptorShuffleTest, CycleAndClose1) {
+  InjectiveMultimap map;
+  InjectionTracer tracer;
+  map.push_back(InjectionArc(0, 1, true));
+  map.push_back(InjectionArc(1, 0, false));
+
+  EXPECT_TRUE(PerformInjectiveMultimap(map, &tracer));
+  ASSERT_EQ(4u, tracer.actions().size());
+  EXPECT_TRUE(tracer.actions()[0] ==
+              Action(Action::DUPLICATE, kDuplicateBase, 1));
+  EXPECT_TRUE(tracer.actions()[1] == Action(Action::MOVE, 0, 1));
+  EXPECT_TRUE(tracer.actions()[2] == Action(Action::MOVE, kDuplicateBase, 0));
+  EXPECT_TRUE(tracer.actions()[3] == Action(Action::CLOSE, kDuplicateBase));
+}
+
+TEST(FileDescriptorShuffleTest, CycleAndClose2) {
+  InjectiveMultimap map;
+  InjectionTracer tracer;
+  map.push_back(InjectionArc(0, 1, false));
+  map.push_back(InjectionArc(1, 0, true));
+
+  EXPECT_TRUE(PerformInjectiveMultimap(map, &tracer));
+  ASSERT_EQ(4u, tracer.actions().size());
+  EXPECT_TRUE(tracer.actions()[0] ==
+              Action(Action::DUPLICATE, kDuplicateBase, 1));
+  EXPECT_TRUE(tracer.actions()[1] == Action(Action::MOVE, 0, 1));
+  EXPECT_TRUE(tracer.actions()[2] == Action(Action::MOVE, kDuplicateBase, 0));
+  EXPECT_TRUE(tracer.actions()[3] == Action(Action::CLOSE, kDuplicateBase));
+}
+
+TEST(FileDescriptorShuffleTest, CycleAndClose3) {
+  InjectiveMultimap map;
+  InjectionTracer tracer;
+  map.push_back(InjectionArc(0, 1, true));
+  map.push_back(InjectionArc(1, 0, true));
+
+  EXPECT_TRUE(PerformInjectiveMultimap(map, &tracer));
+  ASSERT_EQ(4u, tracer.actions().size());
+  EXPECT_TRUE(tracer.actions()[0] ==
+              Action(Action::DUPLICATE, kDuplicateBase, 1));
+  EXPECT_TRUE(tracer.actions()[1] == Action(Action::MOVE, 0, 1));
+  EXPECT_TRUE(tracer.actions()[2] == Action(Action::MOVE, kDuplicateBase, 0));
+  EXPECT_TRUE(tracer.actions()[3] == Action(Action::CLOSE, kDuplicateBase));
+}
+
+TEST(FileDescriptorShuffleTest, Fanout) {
+  InjectiveMultimap map;
+  InjectionTracer tracer;
+  map.push_back(InjectionArc(0, 1, false));
+  map.push_back(InjectionArc(0, 2, false));
+
+  EXPECT_TRUE(PerformInjectiveMultimap(map, &tracer));
+  ASSERT_EQ(2u, tracer.actions().size());
+  EXPECT_TRUE(tracer.actions()[0] == Action(Action::MOVE, 0, 1));
+  EXPECT_TRUE(tracer.actions()[1] == Action(Action::MOVE, 0, 2));
+}
+
+TEST(FileDescriptorShuffleTest, FanoutAndClose1) {
+  InjectiveMultimap map;
+  InjectionTracer tracer;
+  map.push_back(InjectionArc(0, 1, true));
+  map.push_back(InjectionArc(0, 2, false));
+
+  EXPECT_TRUE(PerformInjectiveMultimap(map, &tracer));
+  ASSERT_EQ(3u, tracer.actions().size());
+  EXPECT_TRUE(tracer.actions()[0] == Action(Action::MOVE, 0, 1));
+  EXPECT_TRUE(tracer.actions()[1] == Action(Action::MOVE, 0, 2));
+  EXPECT_TRUE(tracer.actions()[2] == Action(Action::CLOSE, 0));
+}
+
+TEST(FileDescriptorShuffleTest, FanoutAndClose2) {
+  InjectiveMultimap map;
+  InjectionTracer tracer;
+  map.push_back(InjectionArc(0, 1, false));
+  map.push_back(InjectionArc(0, 2, true));
+
+  EXPECT_TRUE(PerformInjectiveMultimap(map, &tracer));
+  ASSERT_EQ(3u, tracer.actions().size());
+  EXPECT_TRUE(tracer.actions()[0] == Action(Action::MOVE, 0, 1));
+  EXPECT_TRUE(tracer.actions()[1] == Action(Action::MOVE, 0, 2));
+  EXPECT_TRUE(tracer.actions()[2] == Action(Action::CLOSE, 0));
+}
+
+TEST(FileDescriptorShuffleTest, FanoutAndClose3) {
+  InjectiveMultimap map;
+  InjectionTracer tracer;
+  map.push_back(InjectionArc(0, 1, true));
+  map.push_back(InjectionArc(0, 2, true));
+
+  EXPECT_TRUE(PerformInjectiveMultimap(map, &tracer));
+  ASSERT_EQ(3u, tracer.actions().size());
+  EXPECT_TRUE(tracer.actions()[0] == Action(Action::MOVE, 0, 1));
+  EXPECT_TRUE(tracer.actions()[1] == Action(Action::MOVE, 0, 2));
+  EXPECT_TRUE(tracer.actions()[2] == Action(Action::CLOSE, 0));
+}
+
+class FailingDelegate : public InjectionDelegate {
+ public:
+  bool Duplicate(int* result, int fd) override { return false; }
+
+  bool Move(int src, int dest) override { return false; }
+
+  void Close(int fd) override {}
+};
+
+TEST(FileDescriptorShuffleTest, EmptyWithFailure) {
+  InjectiveMultimap map;
+  FailingDelegate failing;
+
+  EXPECT_TRUE(PerformInjectiveMultimap(map, &failing));
+}
+
+TEST(FileDescriptorShuffleTest, NoopWithFailure) {
+  InjectiveMultimap map;
+  FailingDelegate failing;
+  map.push_back(InjectionArc(0, 0, false));
+
+  EXPECT_TRUE(PerformInjectiveMultimap(map, &failing));
+}
+
+TEST(FileDescriptorShuffleTest, Simple1WithFailure) {
+  InjectiveMultimap map;
+  FailingDelegate failing;
+  map.push_back(InjectionArc(0, 1, false));
+
+  EXPECT_FALSE(PerformInjectiveMultimap(map, &failing));
+}
+
+}  // namespace base
diff --git a/base/posix/global_descriptors.cc b/base/posix/global_descriptors.cc
new file mode 100644
index 0000000..738d14e
--- /dev/null
+++ b/base/posix/global_descriptors.cc
@@ -0,0 +1,101 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/posix/global_descriptors.h"
+
+#include <vector>
+#include <utility>
+
+#include "base/logging.h"
+
+namespace base {
+
+GlobalDescriptors::Descriptor::Descriptor(Key key, int fd)
+    : key(key), fd(fd), region(base::MemoryMappedFile::Region::kWholeFile) {
+}
+
+GlobalDescriptors::Descriptor::Descriptor(Key key,
+                                          int fd,
+                                          base::MemoryMappedFile::Region region)
+    : key(key), fd(fd), region(region) {
+}
+
+// static
+GlobalDescriptors* GlobalDescriptors::GetInstance() {
+  typedef Singleton<base::GlobalDescriptors,
+                    LeakySingletonTraits<base::GlobalDescriptors> >
+      GlobalDescriptorsSingleton;
+  return GlobalDescriptorsSingleton::get();
+}
+
+int GlobalDescriptors::Get(Key key) const {
+  const int ret = MaybeGet(key);
+
+  if (ret == -1)
+    DLOG(DCHECK) << "Unknown global descriptor: " << key;
+  return ret;
+}
+
+int GlobalDescriptors::MaybeGet(Key key) const {
+  for (Mapping::const_iterator
+       i = descriptors_.begin(); i != descriptors_.end(); ++i) {
+    if (i->key == key)
+      return i->fd;
+  }
+
+  return -1;
+}
+
+base::ScopedFD GlobalDescriptors::TakeFD(
+    Key key,
+    base::MemoryMappedFile::Region* region) {
+  base::ScopedFD fd;
+  for (Mapping::iterator i = descriptors_.begin(); i != descriptors_.end();
+       ++i) {
+    if (i->key == key) {
+      *region = i->region;
+      fd.reset(i->fd);
+      descriptors_.erase(i);
+      break;
+    }
+  }
+  return fd;
+}
+
+void GlobalDescriptors::Set(Key key, int fd) {
+  Set(key, fd, base::MemoryMappedFile::Region::kWholeFile);
+}
+
+void GlobalDescriptors::Set(Key key,
+                            int fd,
+                            base::MemoryMappedFile::Region region) {
+  for (auto& i : descriptors_) {
+    if (i.key == key) {
+      i.fd = fd;
+      i.region = region;
+      return;
+    }
+  }
+
+  descriptors_.push_back(Descriptor(key, fd, region));
+}
+
+base::MemoryMappedFile::Region GlobalDescriptors::GetRegion(Key key) const {
+  for (const auto& i : descriptors_) {
+    if (i.key == key)
+      return i.region;
+  }
+  DLOG(DCHECK) << "Unknown global descriptor: " << key;
+  return base::MemoryMappedFile::Region::kWholeFile;
+}
+
+void GlobalDescriptors::Reset(const Mapping& mapping) {
+  descriptors_ = mapping;
+}
+
+GlobalDescriptors::GlobalDescriptors() = default;
+
+GlobalDescriptors::~GlobalDescriptors() = default;
+
+}  // namespace base
diff --git a/base/posix/global_descriptors.h b/base/posix/global_descriptors.h
new file mode 100644
index 0000000..9d68761
--- /dev/null
+++ b/base/posix/global_descriptors.h
@@ -0,0 +1,98 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_POSIX_GLOBAL_DESCRIPTORS_H_
+#define BASE_POSIX_GLOBAL_DESCRIPTORS_H_
+
+#include "build/build_config.h"
+
+#include <vector>
+#include <utility>
+
+#include <stdint.h>
+
+#include "base/files/memory_mapped_file.h"
+#include "base/files/scoped_file.h"
+#include "base/memory/singleton.h"
+
+namespace base {
+
+// It's common practice to install file descriptors into well known slot
+// numbers before execing a child; stdin, stdout and stderr are ubiqutous
+// examples.
+//
+// However, when using a zygote model, this becomes troublesome. Since the
+// descriptors which need to be in these slots generally aren't known, any code
+// could open a resource and take one of the reserved descriptors. Simply
+// overwriting the slot isn't a viable solution.
+//
+// We could try to fill the reserved slots as soon as possible, but this is a
+// fragile solution since global constructors etc are able to open files.
+//
+// Instead, we retreat from the idea of installing descriptors in specific
+// slots and add a layer of indirection in the form of this singleton object.
+// It maps from an abstract key to a descriptor. If independent modules each
+// need to define keys, then values should be chosen randomly so as not to
+// collide.
+//
+// Note that this class is deprecated and passing file descriptor should ideally
+// be done through the command line and using FileDescriptorStore.
+// See https://crbugs.com/detail?id=692619
+class BASE_EXPORT GlobalDescriptors {
+ public:
+  typedef uint32_t Key;
+  struct Descriptor {
+    Descriptor(Key key, int fd);
+    Descriptor(Key key, int fd, base::MemoryMappedFile::Region region);
+
+    // Globally unique key.
+    Key key;
+    // Actual FD.
+    int fd;
+    // Optional region, defaults to kWholeFile.
+    base::MemoryMappedFile::Region region;
+  };
+  typedef std::vector<Descriptor> Mapping;
+
+  // Often we want a canonical descriptor for a given Key. In this case, we add
+  // the following constant to the key value:
+  static const int kBaseDescriptor = 3;  // 0, 1, 2 are already taken.
+
+  // Return the singleton instance of GlobalDescriptors.
+  static GlobalDescriptors* GetInstance();
+
+  // Get a descriptor given a key. It is a fatal error if the key is not known.
+  int Get(Key key) const;
+
+  // Get a descriptor given a key. Returns -1 on error.
+  int MaybeGet(Key key) const;
+
+  // Returns a descriptor given a key and removes it from this class mappings.
+  // Also populates |region|.
+  // It is a fatal error if the key is not known.
+  base::ScopedFD TakeFD(Key key, base::MemoryMappedFile::Region* region);
+
+  // Get a region given a key. It is a fatal error if the key is not known.
+  base::MemoryMappedFile::Region GetRegion(Key key) const;
+
+  // Set the descriptor for the given |key|. This sets the region associated
+  // with |key| to kWholeFile.
+  void Set(Key key, int fd);
+
+  // Set the descriptor and |region| for the given |key|.
+  void Set(Key key, int fd, base::MemoryMappedFile::Region region);
+
+  void Reset(const Mapping& mapping);
+
+ private:
+  friend struct DefaultSingletonTraits<GlobalDescriptors>;
+  GlobalDescriptors();
+  ~GlobalDescriptors();
+
+  Mapping descriptors_;
+};
+
+}  // namespace base
+
+#endif  // BASE_POSIX_GLOBAL_DESCRIPTORS_H_
diff --git a/base/posix/safe_strerror.cc b/base/posix/safe_strerror.cc
new file mode 100644
index 0000000..aef5742
--- /dev/null
+++ b/base/posix/safe_strerror.cc
@@ -0,0 +1,128 @@
+// Copyright (c) 2006-2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#if defined(__ANDROID__)
+// Post-L versions of bionic define the GNU-specific strerror_r if _GNU_SOURCE
+// is defined, but the symbol is renamed to __gnu_strerror_r which only exists
+// on those later versions. To preserve ABI compatibility with older versions,
+// undefine _GNU_SOURCE and use the POSIX version.
+#undef _GNU_SOURCE
+#endif
+
+#include "base/posix/safe_strerror.h"
+
+#include <errno.h>
+#include <stdio.h>
+#include <string.h>
+
+#include "build/build_config.h"
+
+namespace base {
+
+#if defined(__GLIBC__) || defined(OS_NACL)
+#define USE_HISTORICAL_STRERRO_R 1
+#else
+#define USE_HISTORICAL_STRERRO_R 0
+#endif
+
+#if USE_HISTORICAL_STRERRO_R && defined(__GNUC__)
+// GCC will complain about the unused second wrap function unless we tell it
+// that we meant for them to be potentially unused, which is exactly what this
+// attribute is for.
+#define POSSIBLY_UNUSED __attribute__((unused))
+#else
+#define POSSIBLY_UNUSED
+#endif
+
+#if USE_HISTORICAL_STRERRO_R
+// glibc has two strerror_r functions: a historical GNU-specific one that
+// returns type char *, and a POSIX.1-2001 compliant one available since 2.3.4
+// that returns int. This wraps the GNU-specific one.
+static void POSSIBLY_UNUSED wrap_posix_strerror_r(
+    char *(*strerror_r_ptr)(int, char *, size_t),
+    int err,
+    char *buf,
+    size_t len) {
+  // GNU version.
+  char *rc = (*strerror_r_ptr)(err, buf, len);
+  if (rc != buf) {
+    // glibc did not use buf and returned a static string instead. Copy it
+    // into buf.
+    buf[0] = '\0';
+    strncat(buf, rc, len - 1);
+  }
+  // The GNU version never fails. Unknown errors get an "unknown error" message.
+  // The result is always null terminated.
+}
+#endif  // USE_HISTORICAL_STRERRO_R
+
+// Wrapper for strerror_r functions that implement the POSIX interface. POSIX
+// does not define the behaviour for some of the edge cases, so we wrap it to
+// guarantee that they are handled. This is compiled on all POSIX platforms, but
+// it will only be used on Linux if the POSIX strerror_r implementation is
+// being used (see below).
+static void POSSIBLY_UNUSED wrap_posix_strerror_r(
+    int (*strerror_r_ptr)(int, char *, size_t),
+    int err,
+    char *buf,
+    size_t len) {
+  int old_errno = errno;
+  // Have to cast since otherwise we get an error if this is the GNU version
+  // (but in such a scenario this function is never called). Sadly we can't use
+  // C++-style casts because the appropriate one is reinterpret_cast but it's
+  // considered illegal to reinterpret_cast a type to itself, so we get an
+  // error in the opposite case.
+  int result = (*strerror_r_ptr)(err, buf, len);
+  if (result == 0) {
+    // POSIX is vague about whether the string will be terminated, although
+    // it indirectly implies that typically ERANGE will be returned, instead
+    // of truncating the string. We play it safe by always terminating the
+    // string explicitly.
+    buf[len - 1] = '\0';
+  } else {
+    // Error. POSIX is vague about whether the return value is itself a system
+    // error code or something else. On Linux currently it is -1 and errno is
+    // set. On BSD-derived systems it is a system error and errno is unchanged.
+    // We try and detect which case it is so as to put as much useful info as
+    // we can into our message.
+    int strerror_error;  // The error encountered in strerror
+    int new_errno = errno;
+    if (new_errno != old_errno) {
+      // errno was changed, so probably the return value is just -1 or something
+      // else that doesn't provide any info, and errno is the error.
+      strerror_error = new_errno;
+    } else {
+      // Either the error from strerror_r was the same as the previous value, or
+      // errno wasn't used. Assume the latter.
+      strerror_error = result;
+    }
+    // snprintf truncates and always null-terminates.
+    snprintf(buf,
+             len,
+             "Error %d while retrieving error %d",
+             strerror_error,
+             err);
+  }
+  errno = old_errno;
+}
+
+void safe_strerror_r(int err, char *buf, size_t len) {
+  if (buf == nullptr || len <= 0) {
+    return;
+  }
+  // If using glibc (i.e., Linux), the compiler will automatically select the
+  // appropriate overloaded function based on the function type of strerror_r.
+  // The other one will be elided from the translation unit since both are
+  // static.
+  wrap_posix_strerror_r(&strerror_r, err, buf, len);
+}
+
+std::string safe_strerror(int err) {
+  const int buffer_size = 256;
+  char buf[buffer_size];
+  safe_strerror_r(err, buf, sizeof(buf));
+  return std::string(buf);
+}
+
+}  // namespace base
diff --git a/base/posix/safe_strerror.h b/base/posix/safe_strerror.h
new file mode 100644
index 0000000..2945312
--- /dev/null
+++ b/base/posix/safe_strerror.h
@@ -0,0 +1,44 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_POSIX_SAFE_STRERROR_H_
+#define BASE_POSIX_SAFE_STRERROR_H_
+
+#include <stddef.h>
+
+#include <string>
+
+#include "base/base_export.h"
+
+namespace base {
+
+// BEFORE using anything from this file, first look at PLOG and friends in
+// logging.h and use them instead if applicable.
+//
+// This file declares safe, portable alternatives to the POSIX strerror()
+// function. strerror() is inherently unsafe in multi-threaded apps and should
+// never be used. Doing so can cause crashes. Additionally, the thread-safe
+// alternative strerror_r varies in semantics across platforms. Use these
+// functions instead.
+
+// Thread-safe strerror function with dependable semantics that never fails.
+// It will write the string form of error "err" to buffer buf of length len.
+// If there is an error calling the OS's strerror_r() function then a message to
+// that effect will be printed into buf, truncating if necessary. The final
+// result is always null-terminated. The value of errno is never changed.
+//
+// Use this instead of strerror_r().
+BASE_EXPORT void safe_strerror_r(int err, char *buf, size_t len);
+
+// Calls safe_strerror_r with a buffer of suitable size and returns the result
+// in a C++ string.
+//
+// Use this instead of strerror(). Note though that safe_strerror_r will be
+// more robust in the case of heap corruption errors, since it doesn't need to
+// allocate a string.
+BASE_EXPORT std::string safe_strerror(int err);
+
+}  // namespace base
+
+#endif  // BASE_POSIX_SAFE_STRERROR_H_
diff --git a/base/posix/unix_domain_socket.cc b/base/posix/unix_domain_socket.cc
new file mode 100644
index 0000000..7c087a5
--- /dev/null
+++ b/base/posix/unix_domain_socket.cc
@@ -0,0 +1,288 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/posix/unix_domain_socket.h"
+
+#include <errno.h>
+#include <sys/socket.h>
+#if !defined(OS_NACL_NONSFI)
+#include <sys/un.h>
+#endif
+#include <unistd.h>
+
+#include <vector>
+
+#include "base/files/scoped_file.h"
+#include "base/logging.h"
+#include "base/pickle.h"
+#include "base/posix/eintr_wrapper.h"
+#include "base/stl_util.h"
+#include "build/build_config.h"
+
+#if !defined(OS_NACL_NONSFI)
+#include <sys/uio.h>
+#endif
+
+namespace base {
+
+const size_t UnixDomainSocket::kMaxFileDescriptors = 16;
+
+#if !defined(OS_NACL_NONSFI)
+bool CreateSocketPair(ScopedFD* one, ScopedFD* two) {
+  int raw_socks[2];
+#if defined(OS_MACOSX)
+  // macOS does not support SEQPACKET.
+  const int flags = SOCK_STREAM;
+#else
+  const int flags = SOCK_SEQPACKET;
+#endif
+  if (socketpair(AF_UNIX, flags, 0, raw_socks) == -1)
+    return false;
+#if defined(OS_MACOSX)
+  // On macOS, preventing SIGPIPE is done with socket option.
+  const int no_sigpipe = 1;
+  if (setsockopt(raw_socks[0], SOL_SOCKET, SO_NOSIGPIPE, &no_sigpipe,
+                 sizeof(no_sigpipe)) != 0)
+    return false;
+  if (setsockopt(raw_socks[1], SOL_SOCKET, SO_NOSIGPIPE, &no_sigpipe,
+                 sizeof(no_sigpipe)) != 0)
+    return false;
+#endif
+  one->reset(raw_socks[0]);
+  two->reset(raw_socks[1]);
+  return true;
+}
+
+// static
+bool UnixDomainSocket::EnableReceiveProcessId(int fd) {
+#if !defined(OS_MACOSX)
+  const int enable = 1;
+  return setsockopt(fd, SOL_SOCKET, SO_PASSCRED, &enable, sizeof(enable)) == 0;
+#else
+  // SO_PASSCRED is not supported on macOS.
+  return true;
+#endif  // OS_MACOSX
+}
+#endif  // !defined(OS_NACL_NONSFI)
+
+// static
+bool UnixDomainSocket::SendMsg(int fd,
+                               const void* buf,
+                               size_t length,
+                               const std::vector<int>& fds) {
+  struct msghdr msg = {};
+  struct iovec iov = {const_cast<void*>(buf), length};
+  msg.msg_iov = &iov;
+  msg.msg_iovlen = 1;
+
+  char* control_buffer = nullptr;
+  if (fds.size()) {
+    const unsigned control_len = CMSG_SPACE(sizeof(int) * fds.size());
+    control_buffer = new char[control_len];
+
+    struct cmsghdr* cmsg;
+    msg.msg_control = control_buffer;
+    msg.msg_controllen = control_len;
+    cmsg = CMSG_FIRSTHDR(&msg);
+    cmsg->cmsg_level = SOL_SOCKET;
+    cmsg->cmsg_type = SCM_RIGHTS;
+    cmsg->cmsg_len = CMSG_LEN(sizeof(int) * fds.size());
+    memcpy(CMSG_DATA(cmsg), &fds[0], sizeof(int) * fds.size());
+    msg.msg_controllen = cmsg->cmsg_len;
+  }
+
+// Avoid a SIGPIPE if the other end breaks the connection.
+// Due to a bug in the Linux kernel (net/unix/af_unix.c) MSG_NOSIGNAL isn't
+// regarded for SOCK_SEQPACKET in the AF_UNIX domain, but it is mandated by
+// POSIX. On Mac MSG_NOSIGNAL is not supported, so we need to ensure that
+// SO_NOSIGPIPE is set during socket creation.
+#if defined(OS_MACOSX)
+  const int flags = 0;
+  int no_sigpipe = 0;
+  socklen_t no_sigpipe_len = sizeof(no_sigpipe);
+  DPCHECK(getsockopt(fd, SOL_SOCKET, SO_NOSIGPIPE, &no_sigpipe,
+                     &no_sigpipe_len) == 0)
+      << "Failed ot get socket option.";
+  DCHECK(no_sigpipe) << "SO_NOSIGPIPE not set on the socket.";
+#else
+  const int flags = MSG_NOSIGNAL;
+#endif  // OS_MACOSX
+  const ssize_t r = HANDLE_EINTR(sendmsg(fd, &msg, flags));
+  const bool ret = static_cast<ssize_t>(length) == r;
+  delete[] control_buffer;
+  return ret;
+}
+
+// static
+ssize_t UnixDomainSocket::RecvMsg(int fd,
+                                  void* buf,
+                                  size_t length,
+                                  std::vector<ScopedFD>* fds) {
+  return UnixDomainSocket::RecvMsgWithPid(fd, buf, length, fds, nullptr);
+}
+
+// static
+ssize_t UnixDomainSocket::RecvMsgWithPid(int fd,
+                                         void* buf,
+                                         size_t length,
+                                         std::vector<ScopedFD>* fds,
+                                         ProcessId* pid) {
+  return UnixDomainSocket::RecvMsgWithFlags(fd, buf, length, 0, fds, pid);
+}
+
+// static
+ssize_t UnixDomainSocket::RecvMsgWithFlags(int fd,
+                                           void* buf,
+                                           size_t length,
+                                           int flags,
+                                           std::vector<ScopedFD>* fds,
+                                           ProcessId* out_pid) {
+  fds->clear();
+
+  struct msghdr msg = {};
+  struct iovec iov = {buf, length};
+  msg.msg_iov = &iov;
+  msg.msg_iovlen = 1;
+
+  const size_t kControlBufferSize =
+      CMSG_SPACE(sizeof(int) * kMaxFileDescriptors)
+#if !defined(OS_NACL_NONSFI) && !defined(OS_MACOSX)
+      // The PNaCl toolchain for Non-SFI binary build and macOS do not support
+      // ucred. macOS supports xucred, but this structure is insufficient.
+      + CMSG_SPACE(sizeof(struct ucred))
+#endif  // OS_NACL_NONSFI or OS_MACOSX
+      ;
+  char control_buffer[kControlBufferSize];
+  msg.msg_control = control_buffer;
+  msg.msg_controllen = sizeof(control_buffer);
+
+  const ssize_t r = HANDLE_EINTR(recvmsg(fd, &msg, flags));
+  if (r == -1)
+    return -1;
+
+  int* wire_fds = nullptr;
+  unsigned wire_fds_len = 0;
+  ProcessId pid = -1;
+
+  if (msg.msg_controllen > 0) {
+    struct cmsghdr* cmsg;
+    for (cmsg = CMSG_FIRSTHDR(&msg); cmsg; cmsg = CMSG_NXTHDR(&msg, cmsg)) {
+      const unsigned payload_len = cmsg->cmsg_len - CMSG_LEN(0);
+      if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) {
+        DCHECK_EQ(payload_len % sizeof(int), 0u);
+        DCHECK_EQ(wire_fds, static_cast<void*>(nullptr));
+        wire_fds = reinterpret_cast<int*>(CMSG_DATA(cmsg));
+        wire_fds_len = payload_len / sizeof(int);
+      }
+#if !defined(OS_NACL_NONSFI) && !defined(OS_MACOSX)
+      // The PNaCl toolchain for Non-SFI binary build and macOS do not support
+      // SCM_CREDENTIALS.
+      if (cmsg->cmsg_level == SOL_SOCKET &&
+          cmsg->cmsg_type == SCM_CREDENTIALS) {
+        DCHECK_EQ(payload_len, sizeof(struct ucred));
+        DCHECK_EQ(pid, -1);
+        pid = reinterpret_cast<struct ucred*>(CMSG_DATA(cmsg))->pid;
+      }
+#endif  // !defined(OS_NACL_NONSFI) && !defined(OS_MACOSX)
+    }
+  }
+
+  if (msg.msg_flags & MSG_TRUNC || msg.msg_flags & MSG_CTRUNC) {
+    if (msg.msg_flags & MSG_CTRUNC) {
+      // Extraordinary case, not caller fixable. Log something.
+      LOG(ERROR) << "recvmsg returned MSG_CTRUNC flag, buffer len is "
+                 << msg.msg_controllen;
+    }
+    for (unsigned i = 0; i < wire_fds_len; ++i)
+      close(wire_fds[i]);
+    errno = EMSGSIZE;
+    return -1;
+  }
+
+  if (wire_fds) {
+    for (unsigned i = 0; i < wire_fds_len; ++i)
+      fds->push_back(ScopedFD(wire_fds[i]));  // TODO(mdempsky): emplace_back
+  }
+
+  if (out_pid) {
+#if defined(OS_MACOSX)
+    socklen_t pid_size = sizeof(pid);
+    if (getsockopt(fd, SOL_LOCAL, LOCAL_PEERPID, &pid, &pid_size) != 0)
+      pid = -1;
+#else
+    // |pid| will legitimately be -1 if we read EOF, so only DCHECK if we
+    // actually received a message.  Unfortunately, Linux allows sending zero
+    // length messages, which are indistinguishable from EOF, so this check
+    // has false negatives.
+    if (r > 0 || msg.msg_controllen > 0)
+      DCHECK_GE(pid, 0);
+#endif
+
+    *out_pid = pid;
+  }
+
+  return r;
+}
+
+#if !defined(OS_NACL_NONSFI)
+// static
+ssize_t UnixDomainSocket::SendRecvMsg(int fd,
+                                      uint8_t* reply,
+                                      unsigned max_reply_len,
+                                      int* result_fd,
+                                      const Pickle& request) {
+  return UnixDomainSocket::SendRecvMsgWithFlags(fd, reply, max_reply_len,
+                                                0, /* recvmsg_flags */
+                                                result_fd, request);
+}
+
+// static
+ssize_t UnixDomainSocket::SendRecvMsgWithFlags(int fd,
+                                               uint8_t* reply,
+                                               unsigned max_reply_len,
+                                               int recvmsg_flags,
+                                               int* result_fd,
+                                               const Pickle& request) {
+  // This socketpair is only used for the IPC and is cleaned up before
+  // returning.
+  ScopedFD recv_sock, send_sock;
+  if (!CreateSocketPair(&recv_sock, &send_sock))
+    return -1;
+
+  {
+    std::vector<int> send_fds;
+    send_fds.push_back(send_sock.get());
+    if (!SendMsg(fd, request.data(), request.size(), send_fds))
+      return -1;
+  }
+
+  // Close the sending end of the socket right away so that if our peer closes
+  // it before sending a response (e.g., from exiting), RecvMsgWithFlags() will
+  // return EOF instead of hanging.
+  send_sock.reset();
+
+  std::vector<ScopedFD> recv_fds;
+  // When porting to OSX keep in mind it doesn't support MSG_NOSIGNAL, so the
+  // sender might get a SIGPIPE.
+  const ssize_t reply_len = RecvMsgWithFlags(
+      recv_sock.get(), reply, max_reply_len, recvmsg_flags, &recv_fds, nullptr);
+  recv_sock.reset();
+  if (reply_len == -1)
+    return -1;
+
+  // If we received more file descriptors than caller expected, then we treat
+  // that as an error.
+  if (recv_fds.size() > (result_fd != nullptr ? 1 : 0)) {
+    NOTREACHED();
+    return -1;
+  }
+
+  if (result_fd)
+    *result_fd = recv_fds.empty() ? -1 : recv_fds[0].release();
+
+  return reply_len;
+}
+#endif  // !defined(OS_NACL_NONSFI)
+
+}  // namespace base
diff --git a/base/posix/unix_domain_socket.h b/base/posix/unix_domain_socket.h
new file mode 100644
index 0000000..5c74f07
--- /dev/null
+++ b/base/posix/unix_domain_socket.h
@@ -0,0 +1,111 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_POSIX_UNIX_DOMAIN_SOCKET_H_
+#define BASE_POSIX_UNIX_DOMAIN_SOCKET_H_
+
+#include <stddef.h>
+#include <stdint.h>
+#include <sys/types.h>
+#include <vector>
+
+#include "base/base_export.h"
+#include "base/files/scoped_file.h"
+#include "base/process/process_handle.h"
+#include "build/build_config.h"
+
+namespace base {
+
+class Pickle;
+
+#if !defined(OS_NACL_NONSFI)
+// Creates a connected pair of UNIX-domain SOCK_SEQPACKET sockets, and passes
+// ownership of the newly allocated file descriptors to |one| and |two|.
+// Returns true on success.
+bool BASE_EXPORT CreateSocketPair(ScopedFD* one, ScopedFD* two);
+#endif
+
+class BASE_EXPORT UnixDomainSocket {
+ public:
+  // Maximum number of file descriptors that can be read by RecvMsg().
+  static const size_t kMaxFileDescriptors;
+
+#if !defined(OS_NACL_NONSFI)
+  // Use to enable receiving process IDs in RecvMsgWithPid.  Should be called on
+  // the receiving socket (i.e., the socket passed to RecvMsgWithPid). Returns
+  // true if successful.
+  static bool EnableReceiveProcessId(int fd);
+#endif  // !defined(OS_NACL_NONSFI)
+
+  // Use sendmsg to write the given msg and include a vector of file
+  // descriptors. Returns true if successful.
+  static bool SendMsg(int fd,
+                      const void* msg,
+                      size_t length,
+                      const std::vector<int>& fds);
+
+  // Use recvmsg to read a message and an array of file descriptors. Returns
+  // -1 on failure. Note: will read, at most, |kMaxFileDescriptors| descriptors.
+  static ssize_t RecvMsg(int fd,
+                         void* msg,
+                         size_t length,
+                         std::vector<ScopedFD>* fds);
+
+  // Same as RecvMsg above, but also returns the sender's process ID (as seen
+  // from the caller's namespace).  However, before using this function to
+  // receive process IDs, EnableReceiveProcessId() should be called on the
+  // receiving socket.
+  static ssize_t RecvMsgWithPid(int fd,
+                                void* msg,
+                                size_t length,
+                                std::vector<ScopedFD>* fds,
+                                ProcessId* pid);
+
+#if !defined(OS_NACL_NONSFI)
+  // Perform a sendmsg/recvmsg pair.
+  //   1. This process creates a UNIX SEQPACKET socketpair. Using
+  //      connection-oriented sockets (SEQPACKET or STREAM) is critical here,
+  //      because if one of the ends closes the other one must be notified.
+  //   2. This process writes a request to |fd| with an SCM_RIGHTS control
+  //      message containing on end of the fresh socket pair.
+  //   3. This process blocks reading from the other end of the fresh
+  //      socketpair.
+  //   4. The target process receives the request, processes it and writes the
+  //      reply to the end of the socketpair contained in the request.
+  //   5. This process wakes up and continues.
+  //
+  //   fd: descriptor to send the request on
+  //   reply: buffer for the reply
+  //   reply_len: size of |reply|
+  //   result_fd: (may be NULL) the file descriptor returned in the reply
+  //              (if any)
+  //   request: the bytes to send in the request
+  static ssize_t SendRecvMsg(int fd,
+                             uint8_t* reply,
+                             unsigned reply_len,
+                             int* result_fd,
+                             const Pickle& request);
+
+  // Similar to SendRecvMsg(), but |recvmsg_flags| allows to control the flags
+  // of the recvmsg(2) call.
+  static ssize_t SendRecvMsgWithFlags(int fd,
+                                      uint8_t* reply,
+                                      unsigned reply_len,
+                                      int recvmsg_flags,
+                                      int* result_fd,
+                                      const Pickle& request);
+#endif  // !defined(OS_NACL_NONSFI)
+ private:
+  // Similar to RecvMsg, but allows to specify |flags| for recvmsg(2).
+  static ssize_t RecvMsgWithFlags(int fd,
+                                  void* msg,
+                                  size_t length,
+                                  int flags,
+                                  std::vector<ScopedFD>* fds,
+                                  ProcessId* pid);
+};
+
+}  // namespace base
+
+#endif  // BASE_POSIX_UNIX_DOMAIN_SOCKET_H_
diff --git a/base/posix/unix_domain_socket_unittest.cc b/base/posix/unix_domain_socket_unittest.cc
new file mode 100644
index 0000000..453064f
--- /dev/null
+++ b/base/posix/unix_domain_socket_unittest.cc
@@ -0,0 +1,183 @@
+// Copyright (c) 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "build/build_config.h"
+
+#include <stddef.h>
+#include <stdint.h>
+#include <sys/socket.h>
+#include <sys/types.h>
+#include <unistd.h>
+
+#include "base/bind.h"
+#include "base/bind_helpers.h"
+#include "base/files/file_util.h"
+#include "base/files/scoped_file.h"
+#include "base/location.h"
+#include "base/pickle.h"
+#include "base/posix/unix_domain_socket.h"
+#include "base/single_thread_task_runner.h"
+#include "base/synchronization/waitable_event.h"
+#include "base/threading/thread.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+
+namespace {
+
+// Callers should use ASSERT_NO_FATAL_FAILURE with this function, to
+// ensure that execution is aborted if the function has assertion failure.
+void CreateSocketPair(int fds[2]) {
+#if defined(OS_MACOSX)
+  // Mac OS does not support SOCK_SEQPACKET.
+  int flags = SOCK_STREAM;
+#else
+  int flags = SOCK_SEQPACKET;
+#endif
+  ASSERT_EQ(0, socketpair(AF_UNIX, flags, 0, fds));
+#if defined(OS_MACOSX)
+  // On OSX an attempt to read or write to a closed socket may generate a
+  // SIGPIPE rather than returning -1, corrected with SO_NOSIGPIPE option.
+  int nosigpipe = 1;
+  ASSERT_EQ(0, setsockopt(fds[0], SOL_SOCKET, SO_NOSIGPIPE, &nosigpipe,
+                          sizeof(nosigpipe)));
+  ASSERT_EQ(0, setsockopt(fds[1], SOL_SOCKET, SO_NOSIGPIPE, &nosigpipe,
+                          sizeof(nosigpipe)));
+#endif
+}
+
+TEST(UnixDomainSocketTest, SendRecvMsgAbortOnReplyFDClose) {
+  Thread message_thread("UnixDomainSocketTest");
+  ASSERT_TRUE(message_thread.Start());
+  int fds[2];
+  ASSERT_NO_FATAL_FAILURE(CreateSocketPair(fds));
+  ScopedFD scoped_fd0(fds[0]);
+  ScopedFD scoped_fd1(fds[1]);
+
+  // Have the thread send a synchronous message via the socket.
+  Pickle request;
+  message_thread.task_runner()->PostTask(
+      FROM_HERE, BindOnce(IgnoreResult(&UnixDomainSocket::SendRecvMsg), fds[1],
+                          nullptr, 0U, nullptr, request));
+
+  // Receive the message.
+  std::vector<ScopedFD> message_fds;
+  uint8_t buffer[16];
+  ASSERT_EQ(
+      static_cast<int>(request.size()),
+      UnixDomainSocket::RecvMsg(fds[0], buffer, sizeof(buffer), &message_fds));
+  ASSERT_EQ(1U, message_fds.size());
+
+  // Close the reply FD.
+  message_fds.clear();
+
+  // Check that the thread didn't get blocked.
+  WaitableEvent event(WaitableEvent::ResetPolicy::AUTOMATIC,
+                      WaitableEvent::InitialState::NOT_SIGNALED);
+  message_thread.task_runner()->PostTask(
+      FROM_HERE, BindOnce(&WaitableEvent::Signal, Unretained(&event)));
+  ASSERT_TRUE(event.TimedWait(TimeDelta::FromMilliseconds(5000)));
+}
+
+TEST(UnixDomainSocketTest, SendRecvMsgAvoidsSIGPIPE) {
+  // Make sure SIGPIPE isn't being ignored.
+  struct sigaction act = {}, oldact;
+  act.sa_handler = SIG_DFL;
+  ASSERT_EQ(0, sigaction(SIGPIPE, &act, &oldact));
+  int fds[2];
+  ASSERT_NO_FATAL_FAILURE(CreateSocketPair(fds));
+  ScopedFD scoped_fd1(fds[1]);
+  ASSERT_EQ(0, IGNORE_EINTR(close(fds[0])));
+
+  // Have the thread send a synchronous message via the socket. Unless the
+  // message is sent with MSG_NOSIGNAL, this shall result in SIGPIPE.
+  Pickle request;
+  ASSERT_EQ(
+      -1, UnixDomainSocket::SendRecvMsg(fds[1], nullptr, 0U, nullptr, request));
+  ASSERT_EQ(EPIPE, errno);
+  // Restore the SIGPIPE handler.
+  ASSERT_EQ(0, sigaction(SIGPIPE, &oldact, nullptr));
+}
+
+// Simple sanity check within a single process that receiving PIDs works.
+TEST(UnixDomainSocketTest, RecvPid) {
+  int fds[2];
+  ASSERT_NO_FATAL_FAILURE(CreateSocketPair(fds));
+  ScopedFD recv_sock(fds[0]);
+  ScopedFD send_sock(fds[1]);
+
+  ASSERT_TRUE(UnixDomainSocket::EnableReceiveProcessId(recv_sock.get()));
+
+  static const char kHello[] = "hello";
+  ASSERT_TRUE(UnixDomainSocket::SendMsg(send_sock.get(), kHello, sizeof(kHello),
+                                        std::vector<int>()));
+
+  // Extra receiving buffer space to make sure we really received only
+  // sizeof(kHello) bytes and it wasn't just truncated to fit the buffer.
+  char buf[sizeof(kHello) + 1];
+  ProcessId sender_pid;
+  std::vector<ScopedFD> fd_vec;
+  const ssize_t nread = UnixDomainSocket::RecvMsgWithPid(
+      recv_sock.get(), buf, sizeof(buf), &fd_vec, &sender_pid);
+  ASSERT_EQ(sizeof(kHello), static_cast<size_t>(nread));
+  ASSERT_EQ(0, memcmp(buf, kHello, sizeof(kHello)));
+  ASSERT_EQ(0U, fd_vec.size());
+
+  ASSERT_EQ(getpid(), sender_pid);
+}
+
+// Same as above, but send the max number of file descriptors too.
+TEST(UnixDomainSocketTest, RecvPidWithMaxDescriptors) {
+  int fds[2];
+  ASSERT_NO_FATAL_FAILURE(CreateSocketPair(fds));
+  ScopedFD recv_sock(fds[0]);
+  ScopedFD send_sock(fds[1]);
+
+  ASSERT_TRUE(UnixDomainSocket::EnableReceiveProcessId(recv_sock.get()));
+
+  static const char kHello[] = "hello";
+  std::vector<int> send_fds(UnixDomainSocket::kMaxFileDescriptors,
+                            send_sock.get());
+  ASSERT_TRUE(UnixDomainSocket::SendMsg(send_sock.get(), kHello, sizeof(kHello),
+                                        send_fds));
+
+  // Extra receiving buffer space to make sure we really received only
+  // sizeof(kHello) bytes and it wasn't just truncated to fit the buffer.
+  char buf[sizeof(kHello) + 1];
+  ProcessId sender_pid;
+  std::vector<ScopedFD> recv_fds;
+  const ssize_t nread = UnixDomainSocket::RecvMsgWithPid(
+      recv_sock.get(), buf, sizeof(buf), &recv_fds, &sender_pid);
+  ASSERT_EQ(sizeof(kHello), static_cast<size_t>(nread));
+  ASSERT_EQ(0, memcmp(buf, kHello, sizeof(kHello)));
+  ASSERT_EQ(UnixDomainSocket::kMaxFileDescriptors, recv_fds.size());
+
+  ASSERT_EQ(getpid(), sender_pid);
+}
+
+// Check that RecvMsgWithPid doesn't DCHECK fail when reading EOF from a
+// disconnected socket.
+TEST(UnixDomianSocketTest, RecvPidDisconnectedSocket) {
+  int fds[2];
+  ASSERT_NO_FATAL_FAILURE(CreateSocketPair(fds));
+  ScopedFD recv_sock(fds[0]);
+  ScopedFD send_sock(fds[1]);
+
+  ASSERT_TRUE(UnixDomainSocket::EnableReceiveProcessId(recv_sock.get()));
+
+  send_sock.reset();
+
+  char ch;
+  ProcessId sender_pid;
+  std::vector<ScopedFD> recv_fds;
+  const ssize_t nread = UnixDomainSocket::RecvMsgWithPid(
+      recv_sock.get(), &ch, sizeof(ch), &recv_fds, &sender_pid);
+  ASSERT_EQ(0, nread);
+  ASSERT_EQ(-1, sender_pid);
+  ASSERT_EQ(0U, recv_fds.size());
+}
+
+}  // namespace
+
+}  // namespace base
diff --git a/base/post_task_and_reply_with_result_internal.h b/base/post_task_and_reply_with_result_internal.h
new file mode 100644
index 0000000..6f50de8
--- /dev/null
+++ b/base/post_task_and_reply_with_result_internal.h
@@ -0,0 +1,34 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_POST_TASK_AND_REPLY_WITH_RESULT_INTERNAL_H_
+#define BASE_POST_TASK_AND_REPLY_WITH_RESULT_INTERNAL_H_
+
+#include <utility>
+
+#include "base/callback.h"
+
+namespace base {
+
+namespace internal {
+
+// Adapts a function that produces a result via a return value to
+// one that returns via an output parameter.
+template <typename ReturnType>
+void ReturnAsParamAdapter(OnceCallback<ReturnType()> func, ReturnType* result) {
+  *result = std::move(func).Run();
+}
+
+// Adapts a T* result to a callblack that expects a T.
+template <typename TaskReturnType, typename ReplyArgType>
+void ReplyAdapter(OnceCallback<void(ReplyArgType)> callback,
+                  TaskReturnType* result) {
+  std::move(callback).Run(std::move(*result));
+}
+
+}  // namespace internal
+
+}  // namespace base
+
+#endif  // BASE_POST_TASK_AND_REPLY_WITH_RESULT_INTERNAL_H_
diff --git a/base/power_monitor/power_monitor.cc b/base/power_monitor/power_monitor.cc
new file mode 100644
index 0000000..30e06a2
--- /dev/null
+++ b/base/power_monitor/power_monitor.cc
@@ -0,0 +1,65 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/power_monitor/power_monitor.h"
+
+#include <utility>
+
+#include "base/power_monitor/power_monitor_source.h"
+
+namespace base {
+
+static PowerMonitor* g_power_monitor = nullptr;
+
+PowerMonitor::PowerMonitor(std::unique_ptr<PowerMonitorSource> source)
+    : observers_(new ObserverListThreadSafe<PowerObserver>()),
+      source_(std::move(source)) {
+  DCHECK(!g_power_monitor);
+  g_power_monitor = this;
+}
+
+PowerMonitor::~PowerMonitor() {
+  DCHECK_EQ(this, g_power_monitor);
+  g_power_monitor = nullptr;
+}
+
+// static
+PowerMonitor* PowerMonitor::Get() {
+  return g_power_monitor;
+}
+
+void PowerMonitor::AddObserver(PowerObserver* obs) {
+  observers_->AddObserver(obs);
+}
+
+void PowerMonitor::RemoveObserver(PowerObserver* obs) {
+  observers_->RemoveObserver(obs);
+}
+
+PowerMonitorSource* PowerMonitor::Source() {
+  return source_.get();
+}
+
+bool PowerMonitor::IsOnBatteryPower() {
+  return source_->IsOnBatteryPower();
+}
+
+void PowerMonitor::NotifyPowerStateChange(bool battery_in_use) {
+  DVLOG(1) << "PowerStateChange: " << (battery_in_use ? "On" : "Off")
+           << " battery";
+  observers_->Notify(FROM_HERE, &PowerObserver::OnPowerStateChange,
+                     battery_in_use);
+}
+
+void PowerMonitor::NotifySuspend() {
+  DVLOG(1) << "Power Suspending";
+  observers_->Notify(FROM_HERE, &PowerObserver::OnSuspend);
+}
+
+void PowerMonitor::NotifyResume() {
+  DVLOG(1) << "Power Resuming";
+  observers_->Notify(FROM_HERE, &PowerObserver::OnResume);
+}
+
+}  // namespace base
diff --git a/base/power_monitor/power_monitor.h b/base/power_monitor/power_monitor.h
new file mode 100644
index 0000000..e025b32
--- /dev/null
+++ b/base/power_monitor/power_monitor.h
@@ -0,0 +1,55 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_POWER_MONITOR_POWER_MONITOR_H_
+#define BASE_POWER_MONITOR_POWER_MONITOR_H_
+
+#include "base/base_export.h"
+#include "base/macros.h"
+#include "base/memory/ref_counted.h"
+#include "base/observer_list_threadsafe.h"
+#include "base/power_monitor/power_observer.h"
+
+namespace base {
+
+class PowerMonitorSource;
+
+// A class used to monitor the power state change and notify the observers about
+// the change event.
+class BASE_EXPORT PowerMonitor {
+ public:
+  // Takes ownership of |source|.
+  explicit PowerMonitor(std::unique_ptr<PowerMonitorSource> source);
+  ~PowerMonitor();
+
+  // Get the process-wide PowerMonitor (if not present, returns NULL).
+  static PowerMonitor* Get();
+
+  // Add and remove an observer.
+  // Can be called from any thread.
+  // Must not be called from within a notification callback.
+  void AddObserver(PowerObserver* observer);
+  void RemoveObserver(PowerObserver* observer);
+
+  // Is the computer currently on battery power.
+  bool IsOnBatteryPower();
+
+ private:
+  friend class PowerMonitorSource;
+
+  PowerMonitorSource* Source();
+
+  void NotifyPowerStateChange(bool battery_in_use);
+  void NotifySuspend();
+  void NotifyResume();
+
+  scoped_refptr<ObserverListThreadSafe<PowerObserver> > observers_;
+  std::unique_ptr<PowerMonitorSource> source_;
+
+  DISALLOW_COPY_AND_ASSIGN(PowerMonitor);
+};
+
+}  // namespace base
+
+#endif  // BASE_POWER_MONITOR_POWER_MONITOR_H_
diff --git a/base/power_monitor/power_monitor_device_source.cc b/base/power_monitor/power_monitor_device_source.cc
new file mode 100644
index 0000000..5df5800
--- /dev/null
+++ b/base/power_monitor/power_monitor_device_source.cc
@@ -0,0 +1,28 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/power_monitor/power_monitor_device_source.h"
+
+namespace base {
+
+PowerMonitorDeviceSource::PowerMonitorDeviceSource() {
+#if defined(OS_MACOSX)
+  PlatformInit();
+#endif
+
+#if defined(OS_WIN) || defined(OS_MACOSX)
+  // Provide the correct battery status if possible. Others platforms, such as
+  // Android and ChromeOS, will update their status once their backends are
+  // actually initialized.
+  SetInitialOnBatteryPowerState(IsOnBatteryPowerImpl());
+#endif
+}
+
+PowerMonitorDeviceSource::~PowerMonitorDeviceSource() {
+#if defined(OS_MACOSX)
+  PlatformDestroy();
+#endif
+}
+
+}  // namespace base
diff --git a/base/power_monitor/power_monitor_device_source.h b/base/power_monitor/power_monitor_device_source.h
new file mode 100644
index 0000000..1e2c885
--- /dev/null
+++ b/base/power_monitor/power_monitor_device_source.h
@@ -0,0 +1,98 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_POWER_MONITOR_POWER_MONITOR_DEVICE_SOURCE_H_
+#define BASE_POWER_MONITOR_POWER_MONITOR_DEVICE_SOURCE_H_
+
+#include "base/base_export.h"
+#include "base/macros.h"
+#include "base/power_monitor/power_monitor_source.h"
+#include "base/power_monitor/power_observer.h"
+#include "build/build_config.h"
+
+#if defined(OS_WIN)
+#include <windows.h>
+#endif  // !OS_WIN
+
+#if defined(OS_IOS)
+#include <objc/runtime.h>
+#endif  // OS_IOS
+
+namespace base {
+
+// A class used to monitor the power state change and notify the observers about
+// the change event.
+class BASE_EXPORT PowerMonitorDeviceSource : public PowerMonitorSource {
+ public:
+  PowerMonitorDeviceSource();
+  ~PowerMonitorDeviceSource() override;
+
+#if defined(OS_MACOSX)
+  // Allocate system resources needed by the PowerMonitor class.
+  //
+  // This function must be called before instantiating an instance of the class
+  // and before the Sandbox is initialized.
+#if !defined(OS_IOS)
+  static void AllocateSystemIOPorts();
+#else
+  static void AllocateSystemIOPorts() {}
+#endif  // OS_IOS
+#endif  // OS_MACOSX
+
+#if defined(OS_CHROMEOS)
+  // On Chrome OS, Chrome receives power-related events from powerd, the system
+  // power daemon, via D-Bus signals received on the UI thread. base can't
+  // directly depend on that code, so this class instead exposes static methods
+  // so that events can be passed in.
+  static void SetPowerSource(bool on_battery);
+  static void HandleSystemSuspending();
+  static void HandleSystemResumed();
+#endif
+
+ private:
+#if defined(OS_WIN)
+  // Represents a message-only window for power message handling on Windows.
+  // Only allow PowerMonitor to create it.
+  class PowerMessageWindow {
+   public:
+    PowerMessageWindow();
+    ~PowerMessageWindow();
+
+   private:
+    static LRESULT CALLBACK WndProcThunk(HWND hwnd,
+                                         UINT message,
+                                         WPARAM wparam,
+                                         LPARAM lparam);
+    // Instance of the module containing the window procedure.
+    HMODULE instance_;
+    // A hidden message-only window.
+    HWND message_hwnd_;
+  };
+#endif  // OS_WIN
+
+#if defined(OS_MACOSX)
+  void PlatformInit();
+  void PlatformDestroy();
+#endif
+
+  // Platform-specific method to check whether the system is currently
+  // running on battery power.  Returns true if running on batteries,
+  // false otherwise.
+  bool IsOnBatteryPowerImpl() override;
+
+#if defined(OS_IOS)
+  // Holds pointers to system event notification observers.
+  std::vector<id> notification_observers_;
+#endif
+
+#if defined(OS_WIN)
+  PowerMessageWindow power_message_window_;
+#endif
+
+  DISALLOW_COPY_AND_ASSIGN(PowerMonitorDeviceSource);
+};
+
+}  // namespace base
+
+#endif  // BASE_POWER_MONITOR_POWER_MONITOR_DEVICE_SOURCE_H_
diff --git a/base/power_monitor/power_monitor_device_source_android.cc b/base/power_monitor/power_monitor_device_source_android.cc
new file mode 100644
index 0000000..7688513
--- /dev/null
+++ b/base/power_monitor/power_monitor_device_source_android.cc
@@ -0,0 +1,39 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/power_monitor/power_monitor.h"
+#include "base/power_monitor/power_monitor_device_source.h"
+#include "base/power_monitor/power_monitor_source.h"
+#include "jni/PowerMonitor_jni.h"
+
+namespace base {
+
+// A helper function which is a friend of PowerMonitorSource.
+void ProcessPowerEventHelper(PowerMonitorSource::PowerEvent event) {
+  PowerMonitorSource::ProcessPowerEvent(event);
+}
+
+namespace android {
+
+// Native implementation of PowerMonitor.java. Note: This will be invoked by
+// PowerMonitor.java shortly after startup to set the correct initial value for
+// "is on battery power."
+void JNI_PowerMonitor_OnBatteryChargingChanged(
+    JNIEnv* env,
+    const JavaParamRef<jclass>& clazz) {
+  ProcessPowerEventHelper(PowerMonitorSource::POWER_STATE_EVENT);
+}
+
+// Note: Android does not have the concept of suspend / resume as it's known by
+// other platforms. Thus we do not send Suspend/Resume notifications. See
+// http://crbug.com/644515
+
+}  // namespace android
+
+bool PowerMonitorDeviceSource::IsOnBatteryPowerImpl() {
+  JNIEnv* env = base::android::AttachCurrentThread();
+  return base::android::Java_PowerMonitor_isBatteryPower(env);
+}
+
+}  // namespace base
diff --git a/base/power_monitor/power_monitor_device_source_chromeos.cc b/base/power_monitor/power_monitor_device_source_chromeos.cc
new file mode 100644
index 0000000..c3466ee
--- /dev/null
+++ b/base/power_monitor/power_monitor_device_source_chromeos.cc
@@ -0,0 +1,40 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/power_monitor/power_monitor.h"
+#include "base/power_monitor/power_monitor_device_source.h"
+#include "base/power_monitor/power_monitor_source.h"
+
+namespace base {
+
+namespace {
+
+// The most-recently-seen power source.
+bool g_on_battery = false;
+
+}  // namespace
+
+// static
+void PowerMonitorDeviceSource::SetPowerSource(bool on_battery) {
+  if (on_battery != g_on_battery) {
+    g_on_battery = on_battery;
+    ProcessPowerEvent(POWER_STATE_EVENT);
+  }
+}
+
+// static
+void PowerMonitorDeviceSource::HandleSystemSuspending() {
+  ProcessPowerEvent(SUSPEND_EVENT);
+}
+
+// static
+void PowerMonitorDeviceSource::HandleSystemResumed() {
+  ProcessPowerEvent(RESUME_EVENT);
+}
+
+bool PowerMonitorDeviceSource::IsOnBatteryPowerImpl() {
+  return g_on_battery;
+}
+
+}  // namespace base
diff --git a/base/power_monitor/power_monitor_device_source_ios.mm b/base/power_monitor/power_monitor_device_source_ios.mm
new file mode 100644
index 0000000..3e86b2e
--- /dev/null
+++ b/base/power_monitor/power_monitor_device_source_ios.mm
@@ -0,0 +1,45 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/power_monitor/power_monitor_device_source.h"
+
+#import <UIKit/UIKit.h>
+
+namespace base {
+
+bool PowerMonitorDeviceSource::IsOnBatteryPowerImpl() {
+  NOTIMPLEMENTED();
+  return false;
+}
+
+void PowerMonitorDeviceSource::PlatformInit() {
+  NSNotificationCenter* nc = [NSNotificationCenter defaultCenter];
+  id foreground =
+      [nc addObserverForName:UIApplicationWillEnterForegroundNotification
+                      object:nil
+                       queue:nil
+                  usingBlock:^(NSNotification* notification) {
+                      ProcessPowerEvent(RESUME_EVENT);
+                  }];
+  id background =
+      [nc addObserverForName:UIApplicationDidEnterBackgroundNotification
+                      object:nil
+                       queue:nil
+                  usingBlock:^(NSNotification* notification) {
+                      ProcessPowerEvent(SUSPEND_EVENT);
+                  }];
+  notification_observers_.push_back(foreground);
+  notification_observers_.push_back(background);
+}
+
+void PowerMonitorDeviceSource::PlatformDestroy() {
+  NSNotificationCenter* nc = [NSNotificationCenter defaultCenter];
+  for (std::vector<id>::iterator it = notification_observers_.begin();
+       it != notification_observers_.end(); ++it) {
+    [nc removeObserver:*it];
+  }
+  notification_observers_.clear();
+}
+
+}  // namespace base
diff --git a/base/power_monitor/power_monitor_device_source_mac.mm b/base/power_monitor/power_monitor_device_source_mac.mm
new file mode 100644
index 0000000..be2b8b9
--- /dev/null
+++ b/base/power_monitor/power_monitor_device_source_mac.mm
@@ -0,0 +1,155 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Implementation based on sample code from
+// http://developer.apple.com/library/mac/#qa/qa1340/_index.html.
+
+#include "base/power_monitor/power_monitor_device_source.h"
+
+#include "base/mac/foundation_util.h"
+#include "base/mac/scoped_cftyperef.h"
+#include "base/power_monitor/power_monitor.h"
+#include "base/power_monitor/power_monitor_source.h"
+
+#include <IOKit/IOMessage.h>
+#include <IOKit/ps/IOPSKeys.h>
+#include <IOKit/ps/IOPowerSources.h>
+#include <IOKit/pwr_mgt/IOPMLib.h>
+
+namespace base {
+
+void ProcessPowerEventHelper(PowerMonitorSource::PowerEvent event) {
+  PowerMonitorSource::ProcessPowerEvent(event);
+}
+
+bool PowerMonitorDeviceSource::IsOnBatteryPowerImpl() {
+  base::ScopedCFTypeRef<CFTypeRef> info(IOPSCopyPowerSourcesInfo());
+  base::ScopedCFTypeRef<CFArrayRef> power_sources_list(
+      IOPSCopyPowerSourcesList(info));
+
+  const CFIndex count = CFArrayGetCount(power_sources_list);
+  for (CFIndex i = 0; i < count; ++i) {
+    const CFDictionaryRef description = IOPSGetPowerSourceDescription(
+        info, CFArrayGetValueAtIndex(power_sources_list, i));
+    if (!description)
+      continue;
+
+    CFStringRef current_state = base::mac::GetValueFromDictionary<CFStringRef>(
+        description, CFSTR(kIOPSPowerSourceStateKey));
+
+    if (!current_state)
+      continue;
+
+    // We only report "on battery power" if no source is on AC power.
+    if (CFStringCompare(current_state, CFSTR(kIOPSBatteryPowerValue), 0) !=
+        kCFCompareEqualTo) {
+      return false;
+    }
+  }
+
+  return true;
+}
+
+namespace {
+
+io_connect_t g_system_power_io_port = 0;
+IONotificationPortRef g_notification_port_ref = 0;
+io_object_t g_notifier_object = 0;
+CFRunLoopSourceRef g_battery_status_ref = 0;
+
+void BatteryEventCallback(void*) {
+  ProcessPowerEventHelper(PowerMonitorSource::POWER_STATE_EVENT);
+}
+
+void SystemPowerEventCallback(void*,
+                              io_service_t service,
+                              natural_t message_type,
+                              void* message_argument) {
+  switch (message_type) {
+    // If this message is not handled the system may delay sleep for 30 seconds.
+    case kIOMessageCanSystemSleep:
+      IOAllowPowerChange(g_system_power_io_port,
+          reinterpret_cast<intptr_t>(message_argument));
+      break;
+    case kIOMessageSystemWillSleep:
+      ProcessPowerEventHelper(base::PowerMonitorSource::SUSPEND_EVENT);
+      IOAllowPowerChange(g_system_power_io_port,
+          reinterpret_cast<intptr_t>(message_argument));
+      break;
+
+    case kIOMessageSystemWillPowerOn:
+      ProcessPowerEventHelper(PowerMonitorSource::RESUME_EVENT);
+      break;
+  }
+}
+
+}  // namespace
+
+// The reason we can't include this code in the constructor is because
+// PlatformInit() requires an active runloop and the IO port needs to be
+// allocated at sandbox initialization time, before there's a runloop.
+// See crbug.com/83783 .
+
+// static
+void PowerMonitorDeviceSource::AllocateSystemIOPorts() {
+  DCHECK_EQ(g_system_power_io_port, 0u);
+
+  // Notification port allocated by IORegisterForSystemPower.
+  g_system_power_io_port = IORegisterForSystemPower(
+      NULL, &g_notification_port_ref, SystemPowerEventCallback,
+      &g_notifier_object);
+
+  DCHECK_NE(g_system_power_io_port, 0u);
+}
+
+void PowerMonitorDeviceSource::PlatformInit() {
+  // Need to call AllocateSystemIOPorts() before creating a PowerMonitor
+  // object.
+  DCHECK_NE(g_system_power_io_port, 0u);
+  if (g_system_power_io_port == 0)
+    return;
+
+  // Add the notification port and battery monitor to the application runloop
+  CFRunLoopAddSource(CFRunLoopGetCurrent(), IONotificationPortGetRunLoopSource(
+                                                g_notification_port_ref),
+                     kCFRunLoopCommonModes);
+
+  base::ScopedCFTypeRef<CFRunLoopSourceRef> battery_status_ref(
+      IOPSNotificationCreateRunLoopSource(BatteryEventCallback, nullptr));
+  CFRunLoopAddSource(CFRunLoopGetCurrent(), battery_status_ref,
+                     kCFRunLoopDefaultMode);
+  g_battery_status_ref = battery_status_ref.release();
+}
+
+void PowerMonitorDeviceSource::PlatformDestroy() {
+  DCHECK_NE(g_system_power_io_port, 0u);
+  if (g_system_power_io_port == 0)
+    return;
+
+  // Remove the sleep notification port from the application runloop
+  CFRunLoopRemoveSource(
+      CFRunLoopGetCurrent(),
+      IONotificationPortGetRunLoopSource(g_notification_port_ref),
+      kCFRunLoopCommonModes);
+
+  base::ScopedCFTypeRef<CFRunLoopSourceRef> battery_status_ref(
+      g_battery_status_ref);
+  CFRunLoopRemoveSource(CFRunLoopGetCurrent(), g_battery_status_ref,
+                        kCFRunLoopDefaultMode);
+  g_battery_status_ref = 0;
+
+  // Deregister for system sleep notifications
+  IODeregisterForSystemPower(&g_notifier_object);
+
+  // IORegisterForSystemPower implicitly opens the Root Power Domain IOService,
+  // so we close it here.
+  IOServiceClose(g_system_power_io_port);
+
+  g_system_power_io_port = 0;
+
+  // Destroy the notification port allocated by IORegisterForSystemPower.
+  IONotificationPortDestroy(g_notification_port_ref);
+}
+
+}  // namespace base
diff --git a/base/power_monitor/power_monitor_device_source_stub.cc b/base/power_monitor/power_monitor_device_source_stub.cc
new file mode 100644
index 0000000..f24e5b2
--- /dev/null
+++ b/base/power_monitor/power_monitor_device_source_stub.cc
@@ -0,0 +1,14 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/power_monitor/power_monitor_device_source.h"
+
+namespace base {
+
+bool PowerMonitorDeviceSource::IsOnBatteryPowerImpl() {
+  NOTIMPLEMENTED();
+  return false;
+}
+
+}  // namespace base
diff --git a/base/power_monitor/power_monitor_device_source_win.cc b/base/power_monitor/power_monitor_device_source_win.cc
new file mode 100644
index 0000000..e74be50
--- /dev/null
+++ b/base/power_monitor/power_monitor_device_source_win.cc
@@ -0,0 +1,112 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/power_monitor/power_monitor_device_source.h"
+
+#include "base/message_loop/message_loop.h"
+#include "base/power_monitor/power_monitor.h"
+#include "base/power_monitor/power_monitor_source.h"
+#include "base/win/wrapped_window_proc.h"
+
+namespace base {
+
+void ProcessPowerEventHelper(PowerMonitorSource::PowerEvent event) {
+  PowerMonitorSource::ProcessPowerEvent(event);
+}
+
+namespace {
+
+const wchar_t kWindowClassName[] = L"Base_PowerMessageWindow";
+
+void ProcessWmPowerBroadcastMessage(WPARAM event_id) {
+  PowerMonitorSource::PowerEvent power_event;
+  switch (event_id) {
+    case PBT_APMPOWERSTATUSCHANGE:  // The power status changed.
+      power_event = PowerMonitorSource::POWER_STATE_EVENT;
+      break;
+    case PBT_APMRESUMEAUTOMATIC:  // Resume from suspend.
+      //case PBT_APMRESUMESUSPEND:  // User-initiated resume from suspend.
+      // We don't notify for this latter event
+      // because if it occurs it is always sent as a
+      // second event after PBT_APMRESUMEAUTOMATIC.
+      power_event = PowerMonitorSource::RESUME_EVENT;
+      break;
+    case PBT_APMSUSPEND:  // System has been suspended.
+      power_event = PowerMonitorSource::SUSPEND_EVENT;
+      break;
+    default:
+      return;
+
+      // Other Power Events:
+      // PBT_APMBATTERYLOW - removed in Vista.
+      // PBT_APMOEMEVENT - removed in Vista.
+      // PBT_APMQUERYSUSPEND - removed in Vista.
+      // PBT_APMQUERYSUSPENDFAILED - removed in Vista.
+      // PBT_APMRESUMECRITICAL - removed in Vista.
+      // PBT_POWERSETTINGCHANGE - user changed the power settings.
+  }
+
+  ProcessPowerEventHelper(power_event);
+}
+
+}  // namespace
+
+// Function to query the system to see if it is currently running on
+// battery power.  Returns true if running on battery.
+bool PowerMonitorDeviceSource::IsOnBatteryPowerImpl() {
+  SYSTEM_POWER_STATUS status;
+  if (!GetSystemPowerStatus(&status)) {
+    DPLOG(ERROR) << "GetSystemPowerStatus failed";
+    return false;
+  }
+  return (status.ACLineStatus == 0);
+}
+
+PowerMonitorDeviceSource::PowerMessageWindow::PowerMessageWindow()
+    : instance_(NULL), message_hwnd_(NULL) {
+  if (!MessageLoopForUI::IsCurrent()) {
+    // Creating this window in (e.g.) a renderer inhibits shutdown on Windows.
+    // See http://crbug.com/230122. TODO(vandebo): http://crbug.com/236031
+    DLOG(ERROR)
+        << "Cannot create windows on non-UI thread, power monitor disabled!";
+    return;
+  }
+  WNDCLASSEX window_class;
+  base::win::InitializeWindowClass(
+      kWindowClassName,
+      &base::win::WrappedWindowProc<
+          PowerMonitorDeviceSource::PowerMessageWindow::WndProcThunk>,
+      0, 0, 0, NULL, NULL, NULL, NULL, NULL,
+      &window_class);
+  instance_ = window_class.hInstance;
+  ATOM clazz = RegisterClassEx(&window_class);
+  DCHECK(clazz);
+
+  message_hwnd_ = CreateWindowEx(WS_EX_NOACTIVATE, kWindowClassName,
+      NULL, WS_POPUP, 0, 0, 0, 0, NULL, NULL, instance_, NULL);
+}
+
+PowerMonitorDeviceSource::PowerMessageWindow::~PowerMessageWindow() {
+  if (message_hwnd_) {
+    DestroyWindow(message_hwnd_);
+    UnregisterClass(kWindowClassName, instance_);
+  }
+}
+
+// static
+LRESULT CALLBACK PowerMonitorDeviceSource::PowerMessageWindow::WndProcThunk(
+    HWND hwnd,
+    UINT message,
+    WPARAM wparam,
+    LPARAM lparam) {
+  switch (message) {
+    case WM_POWERBROADCAST:
+      ProcessWmPowerBroadcastMessage(wparam);
+      return TRUE;
+    default:
+      return ::DefWindowProc(hwnd, message, wparam, lparam);
+  }
+}
+
+}  // namespace base
diff --git a/base/power_monitor/power_monitor_source.cc b/base/power_monitor/power_monitor_source.cc
new file mode 100644
index 0000000..d4757b0
--- /dev/null
+++ b/base/power_monitor/power_monitor_source.cc
@@ -0,0 +1,69 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/power_monitor/power_monitor_source.h"
+
+#include "base/power_monitor/power_monitor.h"
+#include "build/build_config.h"
+
+namespace base {
+
+PowerMonitorSource::PowerMonitorSource() = default;
+PowerMonitorSource::~PowerMonitorSource() = default;
+
+bool PowerMonitorSource::IsOnBatteryPower() {
+  AutoLock auto_lock(battery_lock_);
+  return on_battery_power_;
+}
+
+void PowerMonitorSource::ProcessPowerEvent(PowerEvent event_id) {
+  PowerMonitor* monitor = PowerMonitor::Get();
+  if (!monitor)
+    return;
+
+  PowerMonitorSource* source = monitor->Source();
+
+  // Suppress duplicate notifications.  Some platforms may
+  // send multiple notifications of the same event.
+  switch (event_id) {
+    case POWER_STATE_EVENT:
+      {
+        bool new_on_battery_power = source->IsOnBatteryPowerImpl();
+        bool changed = false;
+
+        {
+          AutoLock auto_lock(source->battery_lock_);
+          if (source->on_battery_power_ != new_on_battery_power) {
+              changed = true;
+              source->on_battery_power_ = new_on_battery_power;
+          }
+        }
+
+        if (changed)
+          monitor->NotifyPowerStateChange(new_on_battery_power);
+      }
+      break;
+    case RESUME_EVENT:
+      if (source->suspended_) {
+        source->suspended_ = false;
+        monitor->NotifyResume();
+      }
+      break;
+    case SUSPEND_EVENT:
+      if (!source->suspended_) {
+        source->suspended_ = true;
+        monitor->NotifySuspend();
+      }
+      break;
+  }
+}
+
+void PowerMonitorSource::SetInitialOnBatteryPowerState(bool on_battery_power) {
+  // Must only be called before a monitor exists, otherwise the caller should
+  // have just used a normal ProcessPowerEvent(POWER_STATE_EVENT) call.
+  DCHECK(!PowerMonitor::Get());
+  on_battery_power_ = on_battery_power;
+}
+
+}  // namespace base
diff --git a/base/power_monitor/power_monitor_source.h b/base/power_monitor/power_monitor_source.h
new file mode 100644
index 0000000..b69cbf8
--- /dev/null
+++ b/base/power_monitor/power_monitor_source.h
@@ -0,0 +1,70 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_POWER_MONITOR_POWER_MONITOR_SOURCE_H_
+#define BASE_POWER_MONITOR_POWER_MONITOR_SOURCE_H_
+
+#include "base/base_export.h"
+#include "base/macros.h"
+#include "base/memory/ref_counted.h"
+#include "base/observer_list_threadsafe.h"
+#include "base/synchronization/lock.h"
+
+namespace base {
+
+class PowerMonitor;
+
+// Communicates power state changes to the power monitor.
+class BASE_EXPORT PowerMonitorSource {
+ public:
+  PowerMonitorSource();
+  virtual ~PowerMonitorSource();
+
+  // Normalized list of power events.
+  enum PowerEvent {
+    POWER_STATE_EVENT,  // The Power status of the system has changed.
+    SUSPEND_EVENT,      // The system is being suspended.
+    RESUME_EVENT        // The system is being resumed.
+  };
+
+  // Is the computer currently on battery power. Can be called on any thread.
+  bool IsOnBatteryPower();
+
+ protected:
+  friend class PowerMonitorTest;
+
+  // Friend function that is allowed to access the protected ProcessPowerEvent.
+  friend void ProcessPowerEventHelper(PowerEvent);
+
+  // Get the process-wide PowerMonitorSource (if not present, returns NULL).
+  static PowerMonitorSource* Get();
+
+  // ProcessPowerEvent should only be called from a single thread, most likely
+  // the UI thread or, in child processes, the IO thread.
+  static void ProcessPowerEvent(PowerEvent event_id);
+
+  // Platform-specific method to check whether the system is currently
+  // running on battery power.  Returns true if running on batteries,
+  // false otherwise.
+  virtual bool IsOnBatteryPowerImpl() = 0;
+
+  // Sets the initial state for |on_battery_power_|, which defaults to false
+  // since not all implementations can provide the value at construction. May
+  // only be called before a base::PowerMonitor has been created.
+  void SetInitialOnBatteryPowerState(bool on_battery_power);
+
+ private:
+  bool on_battery_power_ = false;
+  bool suspended_ = false;
+
+  // This lock guards access to on_battery_power_, to ensure that
+  // IsOnBatteryPower can be called from any thread.
+  Lock battery_lock_;
+
+  DISALLOW_COPY_AND_ASSIGN(PowerMonitorSource);
+};
+
+}  // namespace base
+
+#endif  // BASE_POWER_MONITOR_POWER_MONITOR_SOURCE_H_
diff --git a/base/power_monitor/power_monitor_unittest.cc b/base/power_monitor/power_monitor_unittest.cc
new file mode 100644
index 0000000..7f2a84b
--- /dev/null
+++ b/base/power_monitor/power_monitor_unittest.cc
@@ -0,0 +1,83 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/macros.h"
+#include "base/message_loop/message_loop.h"
+#include "base/power_monitor/power_monitor.h"
+#include "base/test/power_monitor_test_base.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+
+class PowerMonitorTest : public testing::Test {
+ protected:
+  PowerMonitorTest() {
+    power_monitor_source_ = new PowerMonitorTestSource();
+    power_monitor_.reset(new PowerMonitor(
+        std::unique_ptr<PowerMonitorSource>(power_monitor_source_)));
+  }
+  ~PowerMonitorTest() override = default;
+
+  PowerMonitorTestSource* source() { return power_monitor_source_; }
+  PowerMonitor* monitor() { return power_monitor_.get(); }
+
+ private:
+  base::MessageLoop message_loop_;
+  PowerMonitorTestSource* power_monitor_source_;
+  std::unique_ptr<PowerMonitor> power_monitor_;
+
+  DISALLOW_COPY_AND_ASSIGN(PowerMonitorTest);
+};
+
+// PowerMonitorSource is tightly coupled with the PowerMonitor, so this test
+// Will cover both classes
+TEST_F(PowerMonitorTest, PowerNotifications) {
+  const int kObservers = 5;
+
+  PowerMonitorTestObserver observers[kObservers];
+  for (int index = 0; index < kObservers; ++index)
+    monitor()->AddObserver(&observers[index]);
+
+  // Sending resume when not suspended should have no effect.
+  source()->GenerateResumeEvent();
+  EXPECT_EQ(observers[0].resumes(), 0);
+
+  // Pretend we suspended.
+  source()->GenerateSuspendEvent();
+  // Ensure all observers were notified of the event
+  for (int index = 0; index < kObservers; ++index)
+    EXPECT_EQ(observers[index].suspends(), 1);
+
+  // Send a second suspend notification.  This should be suppressed.
+  source()->GenerateSuspendEvent();
+  EXPECT_EQ(observers[0].suspends(), 1);
+
+  // Pretend we were awakened.
+  source()->GenerateResumeEvent();
+  EXPECT_EQ(observers[0].resumes(), 1);
+
+  // Send a duplicate resume notification.  This should be suppressed.
+  source()->GenerateResumeEvent();
+  EXPECT_EQ(observers[0].resumes(), 1);
+
+  // Pretend the device has gone on battery power
+  source()->GeneratePowerStateEvent(true);
+  EXPECT_EQ(observers[0].power_state_changes(), 1);
+  EXPECT_EQ(observers[0].last_power_state(), true);
+
+  // Repeated indications the device is on battery power should be suppressed.
+  source()->GeneratePowerStateEvent(true);
+  EXPECT_EQ(observers[0].power_state_changes(), 1);
+
+  // Pretend the device has gone off battery power
+  source()->GeneratePowerStateEvent(false);
+  EXPECT_EQ(observers[0].power_state_changes(), 2);
+  EXPECT_EQ(observers[0].last_power_state(), false);
+
+  // Repeated indications the device is off battery power should be suppressed.
+  source()->GeneratePowerStateEvent(false);
+  EXPECT_EQ(observers[0].power_state_changes(), 2);
+}
+
+}  // namespace base
diff --git a/base/power_monitor/power_observer.h b/base/power_monitor/power_observer.h
new file mode 100644
index 0000000..0142b2a
--- /dev/null
+++ b/base/power_monitor/power_observer.h
@@ -0,0 +1,31 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_POWER_MONITOR_POWER_OBSERVER_H_
+#define BASE_POWER_MONITOR_POWER_OBSERVER_H_
+
+#include "base/base_export.h"
+#include "base/compiler_specific.h"
+
+namespace base {
+
+class BASE_EXPORT PowerObserver {
+ public:
+  // Notification of a change in power status of the computer, such
+  // as from switching between battery and A/C power.
+  virtual void OnPowerStateChange(bool on_battery_power) {};
+
+  // Notification that the system is suspending.
+  virtual void OnSuspend() {}
+
+  // Notification that the system is resuming.
+  virtual void OnResume() {}
+
+ protected:
+  virtual ~PowerObserver() = default;
+};
+
+}  // namespace base
+
+#endif  // BASE_POWER_MONITOR_POWER_OBSERVER_H_
diff --git a/base/process/internal_aix.cc b/base/process/internal_aix.cc
new file mode 100644
index 0000000..7f03aee
--- /dev/null
+++ b/base/process/internal_aix.cc
@@ -0,0 +1,155 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/process/internal_aix.h"
+
+#include <sys/procfs.h>
+
+#include <errno.h>
+#include <fcntl.h>
+#include <limits.h>
+#include <unistd.h>
+
+#include <map>
+#include <string>
+#include <vector>
+
+#include "base/files/file_util.h"
+#include "base/logging.h"
+#include "base/strings/string_number_conversions.h"
+#include "base/strings/string_split.h"
+#include "base/strings/string_util.h"
+#include "base/threading/thread_restrictions.h"
+#include "base/time/time.h"
+
+// Not defined on AIX by default.
+#define NAME_MAX 255
+
+namespace base {
+namespace internalAIX {
+
+const char kProcDir[] = "/proc";
+
+const char kStatFile[] = "psinfo";  // AIX specific
+
+FilePath GetProcPidDir(pid_t pid) {
+  return FilePath(kProcDir).Append(IntToString(pid));
+}
+
+pid_t ProcDirSlotToPid(const char* d_name) {
+  int i;
+  for (i = 0; i < NAME_MAX && d_name[i]; ++i) {
+    if (!IsAsciiDigit(d_name[i])) {
+      return 0;
+    }
+  }
+  if (i == NAME_MAX)
+    return 0;
+
+  // Read the process's command line.
+  pid_t pid;
+  std::string pid_string(d_name);
+  if (!StringToInt(pid_string, &pid)) {
+    NOTREACHED();
+    return 0;
+  }
+  return pid;
+}
+
+bool ReadProcFile(const FilePath& file, struct psinfo* info) {
+  // Synchronously reading files in /proc is safe.
+  ThreadRestrictions::ScopedAllowIO allow_io;
+  int fileId;
+  if ((fileId = open(file.value().c_str(), O_RDONLY)) < 0) {
+    DLOG(WARNING) << "Failed to open " << file.MaybeAsASCII()
+                  << " errno = " << errno;
+    return false;
+  }
+
+  if (read(fileId, info, sizeof(*info)) < 0) {
+    DLOG(WARNING) << "Failed to read " << file.MaybeAsASCII()
+                  << " errno = " << errno;
+    return false;
+  }
+
+  return true;
+}
+
+bool ReadProcStats(pid_t pid, struct psinfo* info) {
+  FilePath stat_file = internalAIX::GetProcPidDir(pid).Append(kStatFile);
+  return ReadProcFile(stat_file, info);
+}
+
+bool ParseProcStats(struct psinfo& stats_data,
+                    std::vector<std::string>* proc_stats) {
+  // The stat file is formatted as:
+  // struct psinfo
+  // see -
+  // https://www.ibm.com/support/knowledgecenter/ssw_aix_71/com.ibm.aix.files/proc.htm
+  proc_stats->clear();
+  // PID.
+  proc_stats->push_back(IntToString(stats_data.pr_pid));
+  // Process name without parentheses. // 1
+  proc_stats->push_back(stats_data.pr_fname);
+  // Process State (Not available)  // 2
+  proc_stats->push_back("0");
+  // Process id of parent  // 3
+  proc_stats->push_back(IntToString(stats_data.pr_ppid));
+
+  // Process group id // 4
+  proc_stats->push_back(IntToString(stats_data.pr_pgid));
+
+  return true;
+}
+
+typedef std::map<std::string, std::string> ProcStatMap;
+void ParseProcStat(const std::string& contents, ProcStatMap* output) {
+  StringPairs key_value_pairs;
+  SplitStringIntoKeyValuePairs(contents, ' ', '\n', &key_value_pairs);
+  for (size_t i = 0; i < key_value_pairs.size(); ++i) {
+    output->insert(key_value_pairs[i]);
+  }
+}
+
+int64_t GetProcStatsFieldAsInt64(const std::vector<std::string>& proc_stats,
+                                 ProcStatsFields field_num) {
+  DCHECK_GE(field_num, VM_PPID);
+  CHECK_LT(static_cast<size_t>(field_num), proc_stats.size());
+
+  int64_t value;
+  return StringToInt64(proc_stats[field_num], &value) ? value : 0;
+}
+
+size_t GetProcStatsFieldAsSizeT(const std::vector<std::string>& proc_stats,
+                                ProcStatsFields field_num) {
+  DCHECK_GE(field_num, VM_PPID);
+  CHECK_LT(static_cast<size_t>(field_num), proc_stats.size());
+
+  size_t value;
+  return StringToSizeT(proc_stats[field_num], &value) ? value : 0;
+}
+
+int64_t ReadProcStatsAndGetFieldAsInt64(pid_t pid, ProcStatsFields field_num) {
+  struct psinfo stats_data;
+  if (!ReadProcStats(pid, &stats_data))
+    return 0;
+  std::vector<std::string> proc_stats;
+  if (!ParseProcStats(stats_data, &proc_stats))
+    return 0;
+
+  return GetProcStatsFieldAsInt64(proc_stats, field_num);
+}
+
+size_t ReadProcStatsAndGetFieldAsSizeT(pid_t pid, ProcStatsFields field_num) {
+  struct psinfo stats_data;
+  if (!ReadProcStats(pid, &stats_data))
+    return 0;
+  std::vector<std::string> proc_stats;
+  if (!ParseProcStats(stats_data, &proc_stats))
+    return 0;
+  return GetProcStatsFieldAsSizeT(proc_stats, field_num);
+}
+
+}  // namespace internalAIX
+}  // namespace base
diff --git a/base/process/internal_aix.h b/base/process/internal_aix.h
new file mode 100644
index 0000000..d9694ff
--- /dev/null
+++ b/base/process/internal_aix.h
@@ -0,0 +1,84 @@
+// Copyright (c) 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file contains internal routines that are called by other files in
+// base/process/.
+
+#ifndef BASE_PROCESS_INTERNAL_AIX_H_
+#define BASE_PROCESS_INTERNAL_AIX_H_
+
+#include <stddef.h>
+#include <stdint.h>
+#include <unistd.h>
+
+#include "base/files/file_path.h"
+
+namespace base {
+
+namespace internalAIX {
+
+// "/proc"
+extern const char kProcDir[];
+
+// "psinfo"
+extern const char kStatFile[];
+
+// Returns a FilePath to "/proc/pid".
+base::FilePath GetProcPidDir(pid_t pid);
+
+// Take a /proc directory entry named |d_name|, and if it is the directory for
+// a process, convert it to a pid_t.
+// Returns 0 on failure.
+// e.g. /proc/self/ will return 0, whereas /proc/1234 will return 1234.
+pid_t ProcDirSlotToPid(const char* d_name);
+
+// Reads /proc/<pid>/stat into |buffer|. Returns true if the file can be read
+// and is non-empty.
+bool ReadProcStats(pid_t pid, std::string* buffer);
+
+// Takes |stats_data| and populates |proc_stats| with the values split by
+// spaces. Taking into account the 2nd field may, in itself, contain spaces.
+// Returns true if successful.
+bool ParseProcStats(const std::string& stats_data,
+                    std::vector<std::string>* proc_stats);
+
+// Fields from /proc/<pid>/psinfo.
+// If the ordering ever changes, carefully review functions that use these
+// values.
+// For AIX this is the bare minimum that we need. Most of the commented out
+// fields can still be extracted but currently none of these are required.
+enum ProcStatsFields {
+  VM_COMM = 1,  // Filename of executable, without parentheses.
+  //  VM_STATE          = 2,   // Letter indicating the state of the process.
+  VM_PPID = 3,  // PID of the parent.
+  VM_PGRP = 4,  // Process group id.
+  //  VM_UTIME          = 13,  // Time scheduled in user mode in clock ticks.
+  //  VM_STIME          = 14,  // Time scheduled in kernel mode in clock ticks.
+  //  VM_NUMTHREADS     = 19,  // Number of threads.
+  //  VM_STARTTIME      = 21,  // The time the process started in clock ticks.
+  //  VM_VSIZE          = 22,  // Virtual memory size in bytes.
+  //  VM_RSS            = 23,  // Resident Set Size in pages.
+};
+
+// Reads the |field_num|th field from |proc_stats|. Returns 0 on failure.
+// This version does not handle the first 3 values, since the first value is
+// simply |pid|, and the next two values are strings.
+int64_t GetProcStatsFieldAsInt64(const std::vector<std::string>& proc_stats,
+                                 ProcStatsFields field_num);
+
+// Same as GetProcStatsFieldAsInt64(), but for size_t values.
+size_t GetProcStatsFieldAsSizeT(const std::vector<std::string>& proc_stats,
+                                ProcStatsFields field_num);
+
+// Convenience wrapper around GetProcStatsFieldAsInt64(), ParseProcStats() and
+// ReadProcStats(). See GetProcStatsFieldAsInt64() for details.
+int64_t ReadProcStatsAndGetFieldAsInt64(pid_t pid, ProcStatsFields field_num);
+
+// Same as ReadProcStatsAndGetFieldAsInt64() but for size_t values.
+size_t ReadProcStatsAndGetFieldAsSizeT(pid_t pid, ProcStatsFields field_num);
+
+}  // namespace internal
+}  // namespace base
+
+#endif  // BASE_PROCESS_INTERNAL_AIX_H_
diff --git a/base/process/internal_linux.cc b/base/process/internal_linux.cc
new file mode 100644
index 0000000..7f38fff
--- /dev/null
+++ b/base/process/internal_linux.cc
@@ -0,0 +1,231 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/process/internal_linux.h"
+
+#include <limits.h>
+#include <unistd.h>
+
+#include <map>
+#include <string>
+#include <vector>
+
+#include "base/files/file_util.h"
+#include "base/logging.h"
+#include "base/strings/string_number_conversions.h"
+#include "base/strings/string_split.h"
+#include "base/strings/string_util.h"
+#include "base/threading/thread_restrictions.h"
+#include "base/time/time.h"
+
+// Not defined on AIX by default.
+#if defined(OS_AIX)
+#define NAME_MAX 255
+#endif
+
+namespace base {
+namespace internal {
+
+const char kProcDir[] = "/proc";
+
+const char kStatFile[] = "stat";
+
+FilePath GetProcPidDir(pid_t pid) {
+  return FilePath(kProcDir).Append(IntToString(pid));
+}
+
+pid_t ProcDirSlotToPid(const char* d_name) {
+  int i;
+  for (i = 0; i < NAME_MAX && d_name[i]; ++i) {
+    if (!IsAsciiDigit(d_name[i])) {
+      return 0;
+    }
+  }
+  if (i == NAME_MAX)
+    return 0;
+
+  // Read the process's command line.
+  pid_t pid;
+  std::string pid_string(d_name);
+  if (!StringToInt(pid_string, &pid)) {
+    NOTREACHED();
+    return 0;
+  }
+  return pid;
+}
+
+bool ReadProcFile(const FilePath& file, std::string* buffer) {
+  buffer->clear();
+  // Synchronously reading files in /proc is safe.
+  ThreadRestrictions::ScopedAllowIO allow_io;
+
+  if (!ReadFileToString(file, buffer)) {
+    DLOG(WARNING) << "Failed to read " << file.MaybeAsASCII();
+    return false;
+  }
+  return !buffer->empty();
+}
+
+bool ReadProcStats(pid_t pid, std::string* buffer) {
+  FilePath stat_file = internal::GetProcPidDir(pid).Append(kStatFile);
+  return ReadProcFile(stat_file, buffer);
+}
+
+bool ParseProcStats(const std::string& stats_data,
+                    std::vector<std::string>* proc_stats) {
+  // |stats_data| may be empty if the process disappeared somehow.
+  // e.g. http://crbug.com/145811
+  if (stats_data.empty())
+    return false;
+
+  // The stat file is formatted as:
+  // pid (process name) data1 data2 .... dataN
+  // Look for the closing paren by scanning backwards, to avoid being fooled by
+  // processes with ')' in the name.
+  size_t open_parens_idx = stats_data.find(" (");
+  size_t close_parens_idx = stats_data.rfind(") ");
+  if (open_parens_idx == std::string::npos ||
+      close_parens_idx == std::string::npos ||
+      open_parens_idx > close_parens_idx) {
+    DLOG(WARNING) << "Failed to find matched parens in '" << stats_data << "'";
+    NOTREACHED();
+    return false;
+  }
+  open_parens_idx++;
+
+  proc_stats->clear();
+  // PID.
+  proc_stats->push_back(stats_data.substr(0, open_parens_idx));
+  // Process name without parentheses.
+  proc_stats->push_back(
+      stats_data.substr(open_parens_idx + 1,
+                        close_parens_idx - (open_parens_idx + 1)));
+
+  // Split the rest.
+  std::vector<std::string> other_stats = SplitString(
+      stats_data.substr(close_parens_idx + 2), " ",
+      base::TRIM_WHITESPACE, base::SPLIT_WANT_ALL);
+  for (size_t i = 0; i < other_stats.size(); ++i)
+    proc_stats->push_back(other_stats[i]);
+  return true;
+}
+
+typedef std::map<std::string, std::string> ProcStatMap;
+void ParseProcStat(const std::string& contents, ProcStatMap* output) {
+  StringPairs key_value_pairs;
+  SplitStringIntoKeyValuePairs(contents, ' ', '\n', &key_value_pairs);
+  for (size_t i = 0; i < key_value_pairs.size(); ++i) {
+    output->insert(key_value_pairs[i]);
+  }
+}
+
+int64_t GetProcStatsFieldAsInt64(const std::vector<std::string>& proc_stats,
+                                 ProcStatsFields field_num) {
+  DCHECK_GE(field_num, VM_PPID);
+  CHECK_LT(static_cast<size_t>(field_num), proc_stats.size());
+
+  int64_t value;
+  return StringToInt64(proc_stats[field_num], &value) ? value : 0;
+}
+
+size_t GetProcStatsFieldAsSizeT(const std::vector<std::string>& proc_stats,
+                                ProcStatsFields field_num) {
+  DCHECK_GE(field_num, VM_PPID);
+  CHECK_LT(static_cast<size_t>(field_num), proc_stats.size());
+
+  size_t value;
+  return StringToSizeT(proc_stats[field_num], &value) ? value : 0;
+}
+
+int64_t ReadStatFileAndGetFieldAsInt64(const FilePath& stat_file,
+                                       ProcStatsFields field_num) {
+  std::string stats_data;
+  if (!ReadProcFile(stat_file, &stats_data))
+    return 0;
+  std::vector<std::string> proc_stats;
+  if (!ParseProcStats(stats_data, &proc_stats))
+    return 0;
+  return GetProcStatsFieldAsInt64(proc_stats, field_num);
+}
+
+int64_t ReadProcStatsAndGetFieldAsInt64(pid_t pid, ProcStatsFields field_num) {
+  FilePath stat_file = internal::GetProcPidDir(pid).Append(kStatFile);
+  return ReadStatFileAndGetFieldAsInt64(stat_file, field_num);
+}
+
+int64_t ReadProcSelfStatsAndGetFieldAsInt64(ProcStatsFields field_num) {
+  FilePath stat_file = FilePath(kProcDir).Append("self").Append(kStatFile);
+  return ReadStatFileAndGetFieldAsInt64(stat_file, field_num);
+}
+
+size_t ReadProcStatsAndGetFieldAsSizeT(pid_t pid,
+                                       ProcStatsFields field_num) {
+  std::string stats_data;
+  if (!ReadProcStats(pid, &stats_data))
+    return 0;
+  std::vector<std::string> proc_stats;
+  if (!ParseProcStats(stats_data, &proc_stats))
+    return 0;
+  return GetProcStatsFieldAsSizeT(proc_stats, field_num);
+}
+
+Time GetBootTime() {
+  FilePath path("/proc/stat");
+  std::string contents;
+  if (!ReadProcFile(path, &contents))
+    return Time();
+  ProcStatMap proc_stat;
+  ParseProcStat(contents, &proc_stat);
+  ProcStatMap::const_iterator btime_it = proc_stat.find("btime");
+  if (btime_it == proc_stat.end())
+    return Time();
+  int btime;
+  if (!StringToInt(btime_it->second, &btime))
+    return Time();
+  return Time::FromTimeT(btime);
+}
+
+TimeDelta GetUserCpuTimeSinceBoot() {
+  FilePath path("/proc/stat");
+  std::string contents;
+  if (!ReadProcFile(path, &contents))
+    return TimeDelta();
+
+  ProcStatMap proc_stat;
+  ParseProcStat(contents, &proc_stat);
+  ProcStatMap::const_iterator cpu_it = proc_stat.find("cpu");
+  if (cpu_it == proc_stat.end())
+    return TimeDelta();
+
+  std::vector<std::string> cpu = SplitString(
+      cpu_it->second, kWhitespaceASCII, TRIM_WHITESPACE, SPLIT_WANT_NONEMPTY);
+
+  if (cpu.size() < 2 || cpu[0] != "cpu")
+    return TimeDelta();
+
+  uint64_t user;
+  uint64_t nice;
+  if (!StringToUint64(cpu[0], &user) || !StringToUint64(cpu[1], &nice))
+    return TimeDelta();
+
+  return ClockTicksToTimeDelta(user + nice);
+}
+
+TimeDelta ClockTicksToTimeDelta(int clock_ticks) {
+  // This queries the /proc-specific scaling factor which is
+  // conceptually the system hertz.  To dump this value on another
+  // system, try
+  //   od -t dL /proc/self/auxv
+  // and look for the number after 17 in the output; mine is
+  //   0000040          17         100           3   134512692
+  // which means the answer is 100.
+  // It may be the case that this value is always 100.
+  static const int kHertz = sysconf(_SC_CLK_TCK);
+
+  return TimeDelta::FromMicroseconds(
+      Time::kMicrosecondsPerSecond * clock_ticks / kHertz);
+}
+
+}  // namespace internal
+}  // namespace base
diff --git a/base/process/internal_linux.h b/base/process/internal_linux.h
new file mode 100644
index 0000000..d8904fd
--- /dev/null
+++ b/base/process/internal_linux.h
@@ -0,0 +1,100 @@
+// Copyright (c) 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file contains internal routines that are called by other files in
+// base/process/.
+
+#ifndef BASE_PROCESS_INTERNAL_LINUX_H_
+#define BASE_PROCESS_INTERNAL_LINUX_H_
+
+#include <stddef.h>
+#include <stdint.h>
+#include <unistd.h>
+
+#include "base/files/file_path.h"
+
+namespace base {
+
+class Time;
+class TimeDelta;
+
+namespace internal {
+
+// "/proc"
+extern const char kProcDir[];
+
+// "stat"
+extern const char kStatFile[];
+
+// Returns a FilePath to "/proc/pid".
+base::FilePath GetProcPidDir(pid_t pid);
+
+// Take a /proc directory entry named |d_name|, and if it is the directory for
+// a process, convert it to a pid_t.
+// Returns 0 on failure.
+// e.g. /proc/self/ will return 0, whereas /proc/1234 will return 1234.
+pid_t ProcDirSlotToPid(const char* d_name);
+
+// Reads /proc/<pid>/stat into |buffer|. Returns true if the file can be read
+// and is non-empty.
+bool ReadProcStats(pid_t pid, std::string* buffer);
+
+// Takes |stats_data| and populates |proc_stats| with the values split by
+// spaces. Taking into account the 2nd field may, in itself, contain spaces.
+// Returns true if successful.
+bool ParseProcStats(const std::string& stats_data,
+                    std::vector<std::string>* proc_stats);
+
+// Fields from /proc/<pid>/stat, 0-based. See man 5 proc.
+// If the ordering ever changes, carefully review functions that use these
+// values.
+enum ProcStatsFields {
+  VM_COMM = 1,         // Filename of executable, without parentheses.
+  VM_STATE = 2,        // Letter indicating the state of the process.
+  VM_PPID = 3,         // PID of the parent.
+  VM_PGRP = 4,         // Process group id.
+  VM_MINFLT = 9,       // Minor page fault count excluding children.
+  VM_MAJFLT = 11,      // Major page fault count excluding children.
+  VM_UTIME = 13,       // Time scheduled in user mode in clock ticks.
+  VM_STIME = 14,       // Time scheduled in kernel mode in clock ticks.
+  VM_NUMTHREADS = 19,  // Number of threads.
+  VM_STARTTIME = 21,   // The time the process started in clock ticks.
+  VM_VSIZE = 22,       // Virtual memory size in bytes.
+  VM_RSS = 23,         // Resident Set Size in pages.
+};
+
+// Reads the |field_num|th field from |proc_stats|. Returns 0 on failure.
+// This version does not handle the first 3 values, since the first value is
+// simply |pid|, and the next two values are strings.
+int64_t GetProcStatsFieldAsInt64(const std::vector<std::string>& proc_stats,
+                                 ProcStatsFields field_num);
+
+// Same as GetProcStatsFieldAsInt64(), but for size_t values.
+size_t GetProcStatsFieldAsSizeT(const std::vector<std::string>& proc_stats,
+                                ProcStatsFields field_num);
+
+// Convenience wrappers around GetProcStatsFieldAsInt64(), ParseProcStats() and
+// ReadProcStats(). See GetProcStatsFieldAsInt64() for details.
+int64_t ReadStatsFilendGetFieldAsInt64(const FilePath& stat_file,
+                                       ProcStatsFields field_num);
+int64_t ReadProcStatsAndGetFieldAsInt64(pid_t pid, ProcStatsFields field_num);
+int64_t ReadProcSelfStatsAndGetFieldAsInt64(ProcStatsFields field_num);
+
+// Same as ReadProcStatsAndGetFieldAsInt64() but for size_t values.
+size_t ReadProcStatsAndGetFieldAsSizeT(pid_t pid,
+                                       ProcStatsFields field_num);
+
+// Returns the time that the OS started. Clock ticks are relative to this.
+Time GetBootTime();
+
+// Returns the amount of time spent in user space since boot across all CPUs.
+TimeDelta GetUserCpuTimeSinceBoot();
+
+// Converts Linux clock ticks to a wall time delta.
+TimeDelta ClockTicksToTimeDelta(int clock_ticks);
+
+}  // namespace internal
+}  // namespace base
+
+#endif  // BASE_PROCESS_INTERNAL_LINUX_H_
diff --git a/base/process/kill.cc b/base/process/kill.cc
new file mode 100644
index 0000000..0332ac0
--- /dev/null
+++ b/base/process/kill.cc
@@ -0,0 +1,61 @@
+// Copyright (c) 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/process/kill.h"
+
+#include "base/bind.h"
+#include "base/process/process_iterator.h"
+#include "base/task_scheduler/post_task.h"
+#include "base/time/time.h"
+
+namespace base {
+
+bool KillProcesses(const FilePath::StringType& executable_name,
+                   int exit_code,
+                   const ProcessFilter* filter) {
+  bool result = true;
+  NamedProcessIterator iter(executable_name, filter);
+  while (const ProcessEntry* entry = iter.NextProcessEntry()) {
+    Process process = Process::Open(entry->pid());
+    // Sometimes process open fails. This would cause a DCHECK in
+    // process.Terminate(). Maybe the process has killed itself between the
+    // time the process list was enumerated and the time we try to open the
+    // process?
+    if (!process.IsValid()) {
+      result = false;
+      continue;
+    }
+    result &= process.Terminate(exit_code, true);
+  }
+  return result;
+}
+
+#if defined(OS_WIN) || defined(OS_FUCHSIA)
+// Common implementation for platforms under which |process| is a handle to
+// the process, rather than an identifier that must be "reaped".
+void EnsureProcessTerminated(Process process) {
+  DCHECK(!process.is_current());
+
+  if (process.WaitForExitWithTimeout(TimeDelta(), nullptr))
+    return;
+
+  PostDelayedTaskWithTraits(
+      FROM_HERE,
+      {TaskPriority::BACKGROUND, TaskShutdownBehavior::CONTINUE_ON_SHUTDOWN},
+      BindOnce(
+          [](Process process) {
+            if (process.WaitForExitWithTimeout(TimeDelta(), nullptr))
+              return;
+#if defined(OS_WIN)
+            process.Terminate(win::kProcessKilledExitCode, false);
+#else
+            process.Terminate(-1, false);
+#endif
+          },
+          std::move(process)),
+      TimeDelta::FromSeconds(2));
+}
+#endif  // defined(OS_WIN) || defined(OS_FUCHSIA)
+
+}  // namespace base
diff --git a/base/process/kill.h b/base/process/kill.h
new file mode 100644
index 0000000..005b72e
--- /dev/null
+++ b/base/process/kill.h
@@ -0,0 +1,155 @@
+// Copyright (c) 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file contains routines to kill processes and get the exit code and
+// termination status.
+
+#ifndef BASE_PROCESS_KILL_H_
+#define BASE_PROCESS_KILL_H_
+
+#include "base/files/file_path.h"
+#include "base/process/process.h"
+#include "base/process/process_handle.h"
+#include "base/time/time.h"
+#include "build/build_config.h"
+
+namespace base {
+
+class ProcessFilter;
+
+#if defined(OS_WIN)
+namespace win {
+
+// See definition in sandbox/win/src/sandbox_types.h
+const DWORD kSandboxFatalMemoryExceeded = 7012;
+
+// Exit codes with special meanings on Windows.
+const DWORD kNormalTerminationExitCode = 0;
+const DWORD kDebuggerInactiveExitCode = 0xC0000354;
+const DWORD kKeyboardInterruptExitCode = 0xC000013A;
+const DWORD kDebuggerTerminatedExitCode = 0x40010004;
+
+// This exit code is used by the Windows task manager when it kills a
+// process.  It's value is obviously not that unique, and it's
+// surprising to me that the task manager uses this value, but it
+// seems to be common practice on Windows to test for it as an
+// indication that the task manager has killed something if the
+// process goes away.
+const DWORD kProcessKilledExitCode = 1;
+
+}  // namespace win
+
+#endif  // OS_WIN
+
+// Return status values from GetTerminationStatus.  Don't use these as
+// exit code arguments to KillProcess*(), use platform/application
+// specific values instead.
+enum TerminationStatus {
+  TERMINATION_STATUS_NORMAL_TERMINATION,   // zero exit status
+  TERMINATION_STATUS_ABNORMAL_TERMINATION, // non-zero exit status
+  TERMINATION_STATUS_PROCESS_WAS_KILLED,   // e.g. SIGKILL or task manager kill
+  TERMINATION_STATUS_PROCESS_CRASHED,      // e.g. Segmentation fault
+  TERMINATION_STATUS_STILL_RUNNING,        // child hasn't exited yet
+#if defined(OS_CHROMEOS)
+  // Used for the case when oom-killer kills a process on ChromeOS.
+  TERMINATION_STATUS_PROCESS_WAS_KILLED_BY_OOM,
+#endif
+#if defined(OS_ANDROID)
+  // On Android processes are spawned from the system Zygote and we do not get
+  // the termination status.  We can't know if the termination was a crash or an
+  // oom kill for sure, but we can use status of the strong process bindings as
+  // a hint.
+  TERMINATION_STATUS_OOM_PROTECTED,        // child was protected from oom kill
+#endif
+  TERMINATION_STATUS_LAUNCH_FAILED,        // child process never launched
+  TERMINATION_STATUS_OOM,                  // Process died due to oom
+  TERMINATION_STATUS_MAX_ENUM
+};
+
+// Attempts to kill all the processes on the current machine that were launched
+// from the given executable name, ending them with the given exit code.  If
+// filter is non-null, then only processes selected by the filter are killed.
+// Returns true if all processes were able to be killed off, false if at least
+// one couldn't be killed.
+BASE_EXPORT bool KillProcesses(const FilePath::StringType& executable_name,
+                               int exit_code,
+                               const ProcessFilter* filter);
+
+#if defined(OS_POSIX)
+// Attempts to kill the process group identified by |process_group_id|. Returns
+// true on success.
+BASE_EXPORT bool KillProcessGroup(ProcessHandle process_group_id);
+#endif  // defined(OS_POSIX)
+
+// Get the termination status of the process by interpreting the
+// circumstances of the child process' death. |exit_code| is set to
+// the status returned by waitpid() on POSIX, and from GetExitCodeProcess() on
+// Windows, and may not be null.  Note that on Linux, this function
+// will only return a useful result the first time it is called after
+// the child exits (because it will reap the child and the information
+// will no longer be available).
+BASE_EXPORT TerminationStatus GetTerminationStatus(ProcessHandle handle,
+                                                   int* exit_code);
+
+#if defined(OS_POSIX) && !defined(OS_FUCHSIA)
+// Send a kill signal to the process and then wait for the process to exit
+// and get the termination status.
+//
+// This is used in situations where it is believed that the process is dead
+// or dying (because communication with the child process has been cut).
+// In order to avoid erroneously returning that the process is still running
+// because the kernel is still cleaning it up, this will wait for the process
+// to terminate. In order to avoid the risk of hanging while waiting for the
+// process to terminate, send a SIGKILL to the process before waiting for the
+// termination status.
+//
+// Note that it is not an option to call WaitForExitCode and then
+// GetTerminationStatus as the child will be reaped when WaitForExitCode
+// returns, and this information will be lost.
+//
+BASE_EXPORT TerminationStatus GetKnownDeadTerminationStatus(
+    ProcessHandle handle, int* exit_code);
+
+#if defined(OS_LINUX)
+// Spawns a thread to wait asynchronously for the child |process| to exit
+// and then reaps it.
+BASE_EXPORT void EnsureProcessGetsReaped(Process process);
+#endif  // defined(OS_LINUX)
+#endif  // defined(OS_POSIX) && !defined(OS_FUCHSIA)
+
+// Registers |process| to be asynchronously monitored for termination, forcibly
+// terminated if necessary, and reaped on exit. The caller should have signalled
+// |process| to exit before calling this API. The API will allow a couple of
+// seconds grace period before forcibly terminating |process|.
+// TODO(https://crbug.com/806451): The Mac implementation currently blocks the
+// calling thread for up to two seconds.
+BASE_EXPORT void EnsureProcessTerminated(Process process);
+
+// These are only sparingly used, and not needed on Fuchsia. They could be
+// implemented if necessary.
+#if !defined(OS_FUCHSIA)
+// Wait for all the processes based on the named executable to exit.  If filter
+// is non-null, then only processes selected by the filter are waited on.
+// Returns after all processes have exited or wait_milliseconds have expired.
+// Returns true if all the processes exited, false otherwise.
+BASE_EXPORT bool WaitForProcessesToExit(
+    const FilePath::StringType& executable_name,
+    base::TimeDelta wait,
+    const ProcessFilter* filter);
+
+// Waits a certain amount of time (can be 0) for all the processes with a given
+// executable name to exit, then kills off any of them that are still around.
+// If filter is non-null, then only processes selected by the filter are waited
+// on.  Killed processes are ended with the given exit code.  Returns false if
+// any processes needed to be killed, true if they all exited cleanly within
+// the wait_milliseconds delay.
+BASE_EXPORT bool CleanupProcesses(const FilePath::StringType& executable_name,
+                                  base::TimeDelta wait,
+                                  int exit_code,
+                                  const ProcessFilter* filter);
+#endif  // !defined(OS_FUCHSIA)
+
+}  // namespace base
+
+#endif  // BASE_PROCESS_KILL_H_
diff --git a/base/process/kill_fuchsia.cc b/base/process/kill_fuchsia.cc
new file mode 100644
index 0000000..a862fc3
--- /dev/null
+++ b/base/process/kill_fuchsia.cc
@@ -0,0 +1,53 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/process/kill.h"
+
+#include <zircon/syscalls.h>
+
+#include "base/process/process_iterator.h"
+#include "base/task_scheduler/post_task.h"
+#include "base/threading/platform_thread.h"
+
+namespace base {
+
+bool KillProcessGroup(ProcessHandle process_group_id) {
+  // |process_group_id| is really a job on Fuchsia.
+  zx_status_t status = zx_task_kill(process_group_id);
+  DLOG_IF(ERROR, status != ZX_OK)
+      << "unable to terminate job " << process_group_id;
+  return status == ZX_OK;
+}
+
+TerminationStatus GetTerminationStatus(ProcessHandle handle, int* exit_code) {
+  DCHECK(exit_code);
+
+  zx_info_process_t process_info;
+  zx_status_t status =
+      zx_object_get_info(handle, ZX_INFO_PROCESS, &process_info,
+                         sizeof(process_info), nullptr, nullptr);
+  if (status != ZX_OK) {
+    DLOG(ERROR) << "unable to get termination status for " << handle;
+    *exit_code = 0;
+    return TERMINATION_STATUS_NORMAL_TERMINATION;
+  }
+  if (!process_info.started) {
+    *exit_code = 0;
+    return TERMINATION_STATUS_LAUNCH_FAILED;
+  }
+  if (!process_info.exited) {
+    *exit_code = 0;
+    return TERMINATION_STATUS_STILL_RUNNING;
+  }
+
+  // TODO(fuchsia): Is there more information about types of crashes, OOM, etc.
+  // available? https://crbug.com/706592.
+
+  *exit_code = process_info.return_code;
+  return process_info.return_code == 0
+             ? TERMINATION_STATUS_NORMAL_TERMINATION
+             : TERMINATION_STATUS_ABNORMAL_TERMINATION;
+}
+
+}  // namespace base
diff --git a/base/process/kill_mac.cc b/base/process/kill_mac.cc
new file mode 100644
index 0000000..0110c90
--- /dev/null
+++ b/base/process/kill_mac.cc
@@ -0,0 +1,173 @@
+// Copyright (c) 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/process/kill.h"
+
+#include <errno.h>
+#include <signal.h>
+#include <sys/event.h>
+#include <sys/types.h>
+#include <sys/wait.h>
+
+#include "base/files/file_util.h"
+#include "base/files/scoped_file.h"
+#include "base/logging.h"
+#include "base/posix/eintr_wrapper.h"
+
+namespace base {
+
+namespace {
+
+const int kWaitBeforeKillSeconds = 2;
+
+// Reap |child| process. This call blocks until completion.
+void BlockingReap(pid_t child) {
+  const pid_t result = HANDLE_EINTR(waitpid(child, NULL, 0));
+  if (result == -1) {
+    DPLOG(ERROR) << "waitpid(" << child << ", NULL, 0)";
+  }
+}
+
+// Waits for |timeout| seconds for the given |child| to exit and reap it. If
+// the child doesn't exit within the time specified, kills it.
+//
+// This function takes two approaches: first, it tries to use kqueue to
+// observe when the process exits. kevent can monitor a kqueue with a
+// timeout, so this method is preferred to wait for a specified period of
+// time. Once the kqueue indicates the process has exited, waitpid will reap
+// the exited child. If the kqueue doesn't provide an exit event notification,
+// before the timeout expires, or if the kqueue fails or misbehaves, the
+// process will be mercilessly killed and reaped.
+//
+// A child process passed to this function may be in one of several states:
+// running, terminated and not yet reaped, and (apparently, and unfortunately)
+// terminated and already reaped. Normally, a process will at least have been
+// asked to exit before this function is called, but this is not required.
+// If a process is terminating and unreaped, there may be a window between the
+// time that kqueue will no longer recognize it and when it becomes an actual
+// zombie that a non-blocking (WNOHANG) waitpid can reap. This condition is
+// detected when kqueue indicates that the process is not running and a
+// non-blocking waitpid fails to reap the process but indicates that it is
+// still running. In this event, a blocking attempt to reap the process
+// collects the known-dying child, preventing zombies from congregating.
+//
+// In the event that the kqueue misbehaves entirely, as it might under a
+// EMFILE condition ("too many open files", or out of file descriptors), this
+// function will forcibly kill and reap the child without delay. This
+// eliminates another potential zombie vector. (If you're out of file
+// descriptors, you're probably deep into something else, but that doesn't
+// mean that zombies be allowed to kick you while you're down.)
+//
+// The fact that this function seemingly can be called to wait on a child
+// that's not only already terminated but already reaped is a bit of a
+// problem: a reaped child's pid can be reclaimed and may refer to a distinct
+// process in that case. The fact that this function can seemingly be called
+// to wait on a process that's not even a child is also a problem: kqueue will
+// work in that case, but waitpid won't, and killing a non-child might not be
+// the best approach.
+void WaitForChildToDie(pid_t child, int timeout) {
+  DCHECK_GT(child, 0);
+  DCHECK_GT(timeout, 0);
+
+  // DON'T ADD ANY EARLY RETURNS TO THIS FUNCTION without ensuring that
+  // |child| has been reaped. Specifically, even if a kqueue, kevent, or other
+  // call fails, this function should fall back to the last resort of trying
+  // to kill and reap the process. Not observing this rule will resurrect
+  // zombies.
+
+  int result;
+
+  ScopedFD kq(HANDLE_EINTR(kqueue()));
+  if (!kq.is_valid()) {
+    DPLOG(ERROR) << "kqueue()";
+  } else {
+    struct kevent change = {0};
+    EV_SET(&change, child, EVFILT_PROC, EV_ADD, NOTE_EXIT, 0, NULL);
+    result = HANDLE_EINTR(kevent(kq.get(), &change, 1, NULL, 0, NULL));
+
+    if (result == -1) {
+      if (errno != ESRCH) {
+        DPLOG(ERROR) << "kevent (setup " << child << ")";
+      } else {
+        // At this point, one of the following has occurred:
+        // 1. The process has died but has not yet been reaped.
+        // 2. The process has died and has already been reaped.
+        // 3. The process is in the process of dying. It's no longer
+        //    kqueueable, but it may not be waitable yet either. Mark calls
+        //    this case the "zombie death race".
+
+        result = HANDLE_EINTR(waitpid(child, NULL, WNOHANG));
+
+        if (result != 0) {
+          // A positive result indicates case 1. waitpid succeeded and reaped
+          // the child. A result of -1 indicates case 2. The child has already
+          // been reaped. In both of these cases, no further action is
+          // necessary.
+          return;
+        }
+
+        // |result| is 0, indicating case 3. The process will be waitable in
+        // short order. Fall back out of the kqueue code to kill it (for good
+        // measure) and reap it.
+      }
+    } else {
+      // Keep track of the elapsed time to be able to restart kevent if it's
+      // interrupted.
+      TimeDelta remaining_delta = TimeDelta::FromSeconds(timeout);
+      TimeTicks deadline = TimeTicks::Now() + remaining_delta;
+      result = -1;
+      struct kevent event = {0};
+      while (remaining_delta.InMilliseconds() > 0) {
+        const struct timespec remaining_timespec = remaining_delta.ToTimeSpec();
+        result = kevent(kq.get(), NULL, 0, &event, 1, &remaining_timespec);
+        if (result == -1 && errno == EINTR) {
+          remaining_delta = deadline - TimeTicks::Now();
+          result = 0;
+        } else {
+          break;
+        }
+      }
+
+      if (result == -1) {
+        DPLOG(ERROR) << "kevent (wait " << child << ")";
+      } else if (result > 1) {
+        DLOG(ERROR) << "kevent (wait " << child << "): unexpected result "
+                    << result;
+      } else if (result == 1) {
+        if ((event.fflags & NOTE_EXIT) &&
+            (event.ident == static_cast<uintptr_t>(child))) {
+          // The process is dead or dying. This won't block for long, if at
+          // all.
+          BlockingReap(child);
+          return;
+        } else {
+          DLOG(ERROR) << "kevent (wait " << child
+                      << "): unexpected event: fflags=" << event.fflags
+                      << ", ident=" << event.ident;
+        }
+      }
+    }
+  }
+
+  // The child is still alive, or is very freshly dead. Be sure by sending it
+  // a signal. This is safe even if it's freshly dead, because it will be a
+  // zombie (or on the way to zombiedom) and kill will return 0 even if the
+  // signal is not delivered to a live process.
+  result = kill(child, SIGKILL);
+  if (result == -1) {
+    DPLOG(ERROR) << "kill(" << child << ", SIGKILL)";
+  } else {
+    // The child is definitely on the way out now. BlockingReap won't need to
+    // wait for long, if at all.
+    BlockingReap(child);
+  }
+}
+
+}  // namespace
+
+void EnsureProcessTerminated(Process process) {
+  WaitForChildToDie(process.Pid(), kWaitBeforeKillSeconds);
+}
+
+}  // namespace base
diff --git a/base/process/kill_posix.cc b/base/process/kill_posix.cc
new file mode 100644
index 0000000..4b52d8b
--- /dev/null
+++ b/base/process/kill_posix.cc
@@ -0,0 +1,186 @@
+// Copyright (c) 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/process/kill.h"
+
+#include <errno.h>
+#include <signal.h>
+#include <sys/types.h>
+#include <sys/wait.h>
+#include <unistd.h>
+
+#include "base/debug/activity_tracker.h"
+#include "base/files/file_util.h"
+#include "base/logging.h"
+#include "base/macros.h"
+#include "base/posix/eintr_wrapper.h"
+#include "base/process/process_iterator.h"
+#include "base/task_scheduler/post_task.h"
+#include "base/threading/platform_thread.h"
+#include "build/build_config.h"
+
+namespace base {
+
+namespace {
+
+TerminationStatus GetTerminationStatusImpl(ProcessHandle handle,
+                                           bool can_block,
+                                           int* exit_code) {
+  DCHECK(exit_code);
+
+  int status = 0;
+  const pid_t result = HANDLE_EINTR(waitpid(handle, &status,
+                                            can_block ? 0 : WNOHANG));
+  if (result == -1) {
+    DPLOG(ERROR) << "waitpid(" << handle << ")";
+    *exit_code = 0;
+    return TERMINATION_STATUS_NORMAL_TERMINATION;
+  } else if (result == 0) {
+    // the child hasn't exited yet.
+    *exit_code = 0;
+    return TERMINATION_STATUS_STILL_RUNNING;
+  }
+
+  *exit_code = status;
+
+  if (WIFSIGNALED(status)) {
+    switch (WTERMSIG(status)) {
+      case SIGABRT:
+      case SIGBUS:
+      case SIGFPE:
+      case SIGILL:
+      case SIGSEGV:
+      case SIGTRAP:
+      case SIGSYS:
+        return TERMINATION_STATUS_PROCESS_CRASHED;
+      case SIGKILL:
+#if defined(OS_CHROMEOS)
+        // On ChromeOS, only way a process gets kill by SIGKILL
+        // is by oom-killer.
+        return TERMINATION_STATUS_PROCESS_WAS_KILLED_BY_OOM;
+#endif
+      case SIGINT:
+      case SIGTERM:
+        return TERMINATION_STATUS_PROCESS_WAS_KILLED;
+      default:
+        break;
+    }
+  }
+
+  if (WIFEXITED(status) && WEXITSTATUS(status) != 0)
+    return TERMINATION_STATUS_ABNORMAL_TERMINATION;
+
+  return TERMINATION_STATUS_NORMAL_TERMINATION;
+}
+
+}  // namespace
+
+#if !defined(OS_NACL_NONSFI)
+bool KillProcessGroup(ProcessHandle process_group_id) {
+  bool result = kill(-1 * process_group_id, SIGKILL) == 0;
+  if (!result)
+    DPLOG(ERROR) << "Unable to terminate process group " << process_group_id;
+  return result;
+}
+#endif  // !defined(OS_NACL_NONSFI)
+
+TerminationStatus GetTerminationStatus(ProcessHandle handle, int* exit_code) {
+  return GetTerminationStatusImpl(handle, false /* can_block */, exit_code);
+}
+
+TerminationStatus GetKnownDeadTerminationStatus(ProcessHandle handle,
+                                                int* exit_code) {
+  bool result = kill(handle, SIGKILL) == 0;
+
+  if (!result)
+    DPLOG(ERROR) << "Unable to terminate process " << handle;
+
+  return GetTerminationStatusImpl(handle, true /* can_block */, exit_code);
+}
+
+#if !defined(OS_NACL_NONSFI)
+bool WaitForProcessesToExit(const FilePath::StringType& executable_name,
+                            TimeDelta wait,
+                            const ProcessFilter* filter) {
+  bool result = false;
+
+  // TODO(port): This is inefficient, but works if there are multiple procs.
+  // TODO(port): use waitpid to avoid leaving zombies around
+
+  TimeTicks end_time = TimeTicks::Now() + wait;
+  do {
+    NamedProcessIterator iter(executable_name, filter);
+    if (!iter.NextProcessEntry()) {
+      result = true;
+      break;
+    }
+    PlatformThread::Sleep(TimeDelta::FromMilliseconds(100));
+  } while ((end_time - TimeTicks::Now()) > TimeDelta());
+
+  return result;
+}
+
+bool CleanupProcesses(const FilePath::StringType& executable_name,
+                      TimeDelta wait,
+                      int exit_code,
+                      const ProcessFilter* filter) {
+  bool exited_cleanly = WaitForProcessesToExit(executable_name, wait, filter);
+  if (!exited_cleanly)
+    KillProcesses(executable_name, exit_code, filter);
+  return exited_cleanly;
+}
+
+#if !defined(OS_MACOSX)
+
+namespace {
+
+class BackgroundReaper : public PlatformThread::Delegate {
+ public:
+  BackgroundReaper(base::Process child_process, const TimeDelta& wait_time)
+      : child_process_(std::move(child_process)), wait_time_(wait_time) {}
+
+  void ThreadMain() override {
+    if (!wait_time_.is_zero()) {
+      child_process_.WaitForExitWithTimeout(wait_time_, nullptr);
+      kill(child_process_.Handle(), SIGKILL);
+    }
+    child_process_.WaitForExit(nullptr);
+    delete this;
+  }
+
+ private:
+  Process child_process_;
+  const TimeDelta wait_time_;
+  DISALLOW_COPY_AND_ASSIGN(BackgroundReaper);
+};
+
+}  // namespace
+
+void EnsureProcessTerminated(Process process) {
+  DCHECK(!process.is_current());
+
+  if (process.WaitForExitWithTimeout(TimeDelta(), nullptr))
+    return;
+
+  PlatformThread::CreateNonJoinable(
+      0, new BackgroundReaper(std::move(process), TimeDelta::FromSeconds(2)));
+}
+
+#if defined(OS_LINUX)
+void EnsureProcessGetsReaped(Process process) {
+  DCHECK(!process.is_current());
+
+  // If the child is already dead, then there's nothing to do.
+  if (process.WaitForExitWithTimeout(TimeDelta(), nullptr))
+    return;
+
+  PlatformThread::CreateNonJoinable(
+      0, new BackgroundReaper(std::move(process), TimeDelta()));
+}
+#endif  // defined(OS_LINUX)
+
+#endif  // !defined(OS_MACOSX)
+#endif  // !defined(OS_NACL_NONSFI)
+
+}  // namespace base
diff --git a/base/process/kill_win.cc b/base/process/kill_win.cc
new file mode 100644
index 0000000..7a66442
--- /dev/null
+++ b/base/process/kill_win.cc
@@ -0,0 +1,116 @@
+// Copyright (c) 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/process/kill.h"
+
+#include <algorithm>
+
+#include <windows.h>
+#include <io.h>
+#include <stdint.h>
+
+#include "base/logging.h"
+#include "base/macros.h"
+#include "base/process/memory.h"
+#include "base/process/process_iterator.h"
+
+namespace base {
+
+TerminationStatus GetTerminationStatus(ProcessHandle handle, int* exit_code) {
+  DCHECK(exit_code);
+
+  DWORD tmp_exit_code = 0;
+
+  if (!::GetExitCodeProcess(handle, &tmp_exit_code)) {
+    DPLOG(FATAL) << "GetExitCodeProcess() failed";
+
+    // This really is a random number.  We haven't received any
+    // information about the exit code, presumably because this
+    // process doesn't have permission to get the exit code, or
+    // because of some other cause for GetExitCodeProcess to fail
+    // (MSDN docs don't give the possible failure error codes for
+    // this function, so it could be anything).  But we don't want
+    // to leave exit_code uninitialized, since that could cause
+    // random interpretations of the exit code.  So we assume it
+    // terminated "normally" in this case.
+    *exit_code = win::kNormalTerminationExitCode;
+
+    // Assume the child has exited normally if we can't get the exit
+    // code.
+    return TERMINATION_STATUS_NORMAL_TERMINATION;
+  }
+  if (tmp_exit_code == STILL_ACTIVE) {
+    DWORD wait_result = WaitForSingleObject(handle, 0);
+    if (wait_result == WAIT_TIMEOUT) {
+      *exit_code = wait_result;
+      return TERMINATION_STATUS_STILL_RUNNING;
+    }
+
+    if (wait_result == WAIT_FAILED) {
+      DPLOG(ERROR) << "WaitForSingleObject() failed";
+    } else {
+      DCHECK_EQ(WAIT_OBJECT_0, wait_result);
+
+      // Strange, the process used 0x103 (STILL_ACTIVE) as exit code.
+      NOTREACHED();
+    }
+
+    return TERMINATION_STATUS_ABNORMAL_TERMINATION;
+  }
+
+  *exit_code = tmp_exit_code;
+
+  switch (tmp_exit_code) {
+    case win::kNormalTerminationExitCode:
+      return TERMINATION_STATUS_NORMAL_TERMINATION;
+    case win::kDebuggerInactiveExitCode:    // STATUS_DEBUGGER_INACTIVE.
+    case win::kKeyboardInterruptExitCode:   // Control-C/end session.
+    case win::kDebuggerTerminatedExitCode:  // Debugger terminated process.
+    case win::kProcessKilledExitCode:       // Task manager kill.
+      return TERMINATION_STATUS_PROCESS_WAS_KILLED;
+    case win::kSandboxFatalMemoryExceeded:  // Terminated process due to
+                                            // exceeding the sandbox job
+                                            // object memory limits.
+    case win::kOomExceptionCode:            // Ran out of memory.
+      return TERMINATION_STATUS_OOM;
+    default:
+      // All other exit codes indicate crashes.
+      return TERMINATION_STATUS_PROCESS_CRASHED;
+  }
+}
+
+bool WaitForProcessesToExit(const FilePath::StringType& executable_name,
+                            TimeDelta wait,
+                            const ProcessFilter* filter) {
+  bool result = true;
+  DWORD start_time = GetTickCount();
+
+  NamedProcessIterator iter(executable_name, filter);
+  for (const ProcessEntry* entry = iter.NextProcessEntry(); entry;
+       entry = iter.NextProcessEntry()) {
+    DWORD remaining_wait = static_cast<DWORD>(
+        std::max(static_cast<int64_t>(0),
+                 wait.InMilliseconds() - (GetTickCount() - start_time)));
+    HANDLE process = OpenProcess(SYNCHRONIZE,
+                                 FALSE,
+                                 entry->th32ProcessID);
+    DWORD wait_result = WaitForSingleObject(process, remaining_wait);
+    CloseHandle(process);
+    result &= (wait_result == WAIT_OBJECT_0);
+  }
+
+  return result;
+}
+
+bool CleanupProcesses(const FilePath::StringType& executable_name,
+                      TimeDelta wait,
+                      int exit_code,
+                      const ProcessFilter* filter) {
+  if (WaitForProcessesToExit(executable_name, wait, filter))
+    return true;
+  KillProcesses(executable_name, exit_code, filter);
+  return false;
+}
+
+}  // namespace base
diff --git a/base/process/launch.cc b/base/process/launch.cc
new file mode 100644
index 0000000..c03e1a7
--- /dev/null
+++ b/base/process/launch.cc
@@ -0,0 +1,27 @@
+// Copyright (c) 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/process/launch.h"
+#include "build/build_config.h"
+
+namespace base {
+
+LaunchOptions::LaunchOptions() = default;
+
+LaunchOptions::LaunchOptions(const LaunchOptions& other) = default;
+
+LaunchOptions::~LaunchOptions() = default;
+
+LaunchOptions LaunchOptionsForTest() {
+  LaunchOptions options;
+#if defined(OS_LINUX)
+  // To prevent accidental privilege sharing to an untrusted child, processes
+  // are started with PR_SET_NO_NEW_PRIVS. Do not set that here, since this
+  // new child will be used for testing only.
+  options.allow_new_privs = true;
+#endif
+  return options;
+}
+
+}  // namespace base
diff --git a/base/process/launch.h b/base/process/launch.h
new file mode 100644
index 0000000..b4530b7
--- /dev/null
+++ b/base/process/launch.h
@@ -0,0 +1,391 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file contains functions for launching subprocesses.
+
+#ifndef BASE_PROCESS_LAUNCH_H_
+#define BASE_PROCESS_LAUNCH_H_
+
+#include <stddef.h>
+
+#include <string>
+#include <utility>
+#include <vector>
+
+#include "base/base_export.h"
+#include "base/environment.h"
+#include "base/macros.h"
+#include "base/process/process.h"
+#include "base/process/process_handle.h"
+#include "base/strings/string_piece.h"
+#include "build/build_config.h"
+
+#if defined(OS_WIN)
+#include <windows.h>
+#elif defined(OS_FUCHSIA)
+#include <launchpad/launchpad.h>
+#include <zircon/types.h>
+#endif
+
+#if defined(OS_POSIX) || defined(OS_FUCHSIA)
+#include "base/posix/file_descriptor_shuffle.h"
+#endif
+
+namespace base {
+
+class CommandLine;
+
+#if defined(OS_WIN)
+typedef std::vector<HANDLE> HandlesToInheritVector;
+#elif defined(OS_FUCHSIA)
+struct HandleToTransfer {
+  uint32_t id;
+  zx_handle_t handle;
+};
+typedef std::vector<HandleToTransfer> HandlesToTransferVector;
+typedef std::vector<std::pair<int, int>> FileHandleMappingVector;
+#elif defined(OS_POSIX)
+typedef std::vector<std::pair<int, int>> FileHandleMappingVector;
+#endif  // defined(OS_WIN)
+
+// Options for launching a subprocess that are passed to LaunchProcess().
+// The default constructor constructs the object with default options.
+struct BASE_EXPORT LaunchOptions {
+#if defined(OS_POSIX) || defined(OS_FUCHSIA)
+  // Delegate to be run in between fork and exec in the subprocess (see
+  // pre_exec_delegate below)
+  class BASE_EXPORT PreExecDelegate {
+   public:
+    PreExecDelegate() = default;
+    virtual ~PreExecDelegate() = default;
+
+    // Since this is to be run between fork and exec, and fork may have happened
+    // while multiple threads were running, this function needs to be async
+    // safe.
+    virtual void RunAsyncSafe() = 0;
+
+   private:
+    DISALLOW_COPY_AND_ASSIGN(PreExecDelegate);
+  };
+#endif  // defined(OS_POSIX)
+
+  LaunchOptions();
+  LaunchOptions(const LaunchOptions&);
+  ~LaunchOptions();
+
+  // If true, wait for the process to complete.
+  bool wait = false;
+
+  // If not empty, change to this directory before executing the new process.
+  base::FilePath current_directory;
+
+#if defined(OS_WIN)
+  bool start_hidden = false;
+
+  // Windows can inherit handles when it launches child processes.
+  // See https://blogs.msdn.microsoft.com/oldnewthing/20111216-00/?p=8873
+  // for a good overview of Windows handle inheritance.
+  //
+  // Implementation note: it might be nice to implement in terms of
+  // base::Optional<>, but then the natural default state (vector not present)
+  // would be "all inheritable handles" while we want "no inheritance."
+  enum class Inherit {
+    // Only those handles in |handles_to_inherit| vector are inherited. If the
+    // vector is empty, no handles are inherited. The handles in the vector must
+    // all be inheritable.
+    kSpecific,
+
+    // All handles in the current process which are inheritable are inherited.
+    // In production code this flag should be used only when running
+    // short-lived, trusted binaries, because open handles from other libraries
+    // and subsystems will leak to the child process, causing errors such as
+    // open socket hangs. There are also race conditions that can cause handle
+    // over-sharing.
+    //
+    // |handles_to_inherit| must be null.
+    //
+    // DEPRECATED. THIS SHOULD NOT BE USED. Explicitly map all handles that
+    // need to be shared in new code.
+    // TODO(brettw) bug 748258: remove this.
+    kAll
+  };
+  Inherit inherit_mode = Inherit::kSpecific;
+  HandlesToInheritVector handles_to_inherit;
+
+  // If non-null, runs as if the user represented by the token had launched it.
+  // Whether the application is visible on the interactive desktop depends on
+  // the token belonging to an interactive logon session.
+  //
+  // To avoid hard to diagnose problems, when specified this loads the
+  // environment variables associated with the user and if this operation fails
+  // the entire call fails as well.
+  UserTokenHandle as_user = nullptr;
+
+  // If true, use an empty string for the desktop name.
+  bool empty_desktop_name = false;
+
+  // If non-null, launches the application in that job object. The process will
+  // be terminated immediately and LaunchProcess() will fail if assignment to
+  // the job object fails.
+  HANDLE job_handle = nullptr;
+
+  // Handles for the redirection of stdin, stdout and stderr. The caller should
+  // either set all three of them or none (i.e. there is no way to redirect
+  // stderr without redirecting stdin).
+  //
+  // The handles must be inheritable. Pseudo handles are used when stdout and
+  // stderr redirect to the console. In that case, GetFileType() will return
+  // FILE_TYPE_CHAR and they're automatically inherited by child processes. See
+  // https://msdn.microsoft.com/en-us/library/windows/desktop/ms682075.aspx
+  // Otherwise, the caller must ensure that the |inherit_mode| and/or
+  // |handles_to_inherit| set so that the handles are inherited.
+  HANDLE stdin_handle = nullptr;
+  HANDLE stdout_handle = nullptr;
+  HANDLE stderr_handle = nullptr;
+
+  // If set to true, ensures that the child process is launched with the
+  // CREATE_BREAKAWAY_FROM_JOB flag which allows it to breakout of the parent
+  // job if any.
+  bool force_breakaway_from_job_ = false;
+
+  // If set to true, permission to bring windows to the foreground is passed to
+  // the launched process if the current process has such permission.
+  bool grant_foreground_privilege = false;
+#elif defined(OS_POSIX) || defined(OS_FUCHSIA)
+  // Set/unset environment variables. These are applied on top of the parent
+  // process environment.  Empty (the default) means to inherit the same
+  // environment. See AlterEnvironment().
+  EnvironmentMap environ;
+
+  // Clear the environment for the new process before processing changes from
+  // |environ|.
+  bool clear_environ = false;
+
+  // Remap file descriptors according to the mapping of src_fd->dest_fd to
+  // propagate FDs into the child process.
+  FileHandleMappingVector fds_to_remap;
+#endif  // defined(OS_WIN)
+
+#if defined(OS_LINUX)
+  // If non-zero, start the process using clone(), using flags as provided.
+  // Unlike in clone, clone_flags may not contain a custom termination signal
+  // that is sent to the parent when the child dies. The termination signal will
+  // always be set to SIGCHLD.
+  int clone_flags = 0;
+
+  // By default, child processes will have the PR_SET_NO_NEW_PRIVS bit set. If
+  // true, then this bit will not be set in the new child process.
+  bool allow_new_privs = false;
+
+  // Sets parent process death signal to SIGKILL.
+  bool kill_on_parent_death = false;
+#endif  // defined(OS_LINUX)
+
+#if defined(OS_FUCHSIA)
+  // If valid, launches the application in that job object.
+  zx_handle_t job_handle = ZX_HANDLE_INVALID;
+
+  // Specifies additional handles to transfer (not duplicate) to the child
+  // process. The handles remain valid in this process if launch fails.
+  // Each entry is an <id,handle> pair, with an |id| created using the PA_HND()
+  // macro. The child retrieves the handle |zx_get_startup_handle(id)|.
+  HandlesToTransferVector handles_to_transfer;
+
+  // If set, specifies which capabilities should be granted (cloned) to the
+  // child process.
+  // A zero value indicates that the child process will receive
+  // no capabilities.
+  // By default the child will inherit the same capabilities, job, and CWD
+  // from the parent process.
+  uint32_t clone_flags =
+      LP_CLONE_FDIO_NAMESPACE | LP_CLONE_DEFAULT_JOB | LP_CLONE_FDIO_STDIO;
+
+  // Specifies the namespace paths which are to be cloned in the child process'
+  // namespace. If left unset, the child process will be launched with an empty
+  // namespace.
+  // This flag allows the parent to pass only the bare minimum OS capabilities
+  // to the child process, so that the potential attack surface is reduced in
+  // case child process is compromised.
+  // Cannot be combined with the clone flag LP_CLONE_FDIO_NAMESPACE, which is
+  // equivalent to cloning every path.
+  std::vector<FilePath> paths_to_map;
+#endif  // defined(OS_FUCHSIA)
+
+#if defined(OS_POSIX)
+  // If not empty, launch the specified executable instead of
+  // cmdline.GetProgram(). This is useful when it is necessary to pass a custom
+  // argv[0].
+  base::FilePath real_path;
+
+  // If non-null, a delegate to be run immediately prior to executing the new
+  // program in the child process.
+  //
+  // WARNING: If LaunchProcess is called in the presence of multiple threads,
+  // code running in this delegate essentially needs to be async-signal safe
+  // (see man 7 signal for a list of allowed functions).
+  PreExecDelegate* pre_exec_delegate = nullptr;
+
+  // Each element is an RLIMIT_* constant that should be raised to its
+  // rlim_max.  This pointer is owned by the caller and must live through
+  // the call to LaunchProcess().
+  const std::vector<int>* maximize_rlimits = nullptr;
+
+  // If true, start the process in a new process group, instead of
+  // inheriting the parent's process group.  The pgid of the child process
+  // will be the same as its pid.
+  bool new_process_group = false;
+#endif  // defined(OS_POSIX)
+
+#if defined(OS_CHROMEOS)
+  // If non-negative, the specified file descriptor will be set as the launched
+  // process' controlling terminal.
+  int ctrl_terminal_fd = -1;
+#endif  // defined(OS_CHROMEOS)
+};
+
+// Launch a process via the command line |cmdline|.
+// See the documentation of LaunchOptions for details on |options|.
+//
+// Returns a valid Process upon success.
+//
+// Unix-specific notes:
+// - All file descriptors open in the parent process will be closed in the
+//   child process except for any preserved by options::fds_to_remap, and
+//   stdin, stdout, and stderr. If not remapped by options::fds_to_remap,
+//   stdin is reopened as /dev/null, and the child is allowed to inherit its
+//   parent's stdout and stderr.
+// - If the first argument on the command line does not contain a slash,
+//   PATH will be searched.  (See man execvp.)
+BASE_EXPORT Process LaunchProcess(const CommandLine& cmdline,
+                                  const LaunchOptions& options);
+
+#if defined(OS_WIN)
+// Windows-specific LaunchProcess that takes the command line as a
+// string.  Useful for situations where you need to control the
+// command line arguments directly, but prefer the CommandLine version
+// if launching Chrome itself.
+//
+// The first command line argument should be the path to the process,
+// and don't forget to quote it.
+//
+// Example (including literal quotes)
+//  cmdline = "c:\windows\explorer.exe" -foo "c:\bar\"
+BASE_EXPORT Process LaunchProcess(const string16& cmdline,
+                                  const LaunchOptions& options);
+
+// Launches a process with elevated privileges.  This does not behave exactly
+// like LaunchProcess as it uses ShellExecuteEx instead of CreateProcess to
+// create the process.  This means the process will have elevated privileges
+// and thus some common operations like OpenProcess will fail. Currently the
+// only supported LaunchOptions are |start_hidden| and |wait|.
+BASE_EXPORT Process LaunchElevatedProcess(const CommandLine& cmdline,
+                                          const LaunchOptions& options);
+
+#elif defined(OS_POSIX) || defined(OS_FUCHSIA)
+// A POSIX-specific version of LaunchProcess that takes an argv array
+// instead of a CommandLine.  Useful for situations where you need to
+// control the command line arguments directly, but prefer the
+// CommandLine version if launching Chrome itself.
+BASE_EXPORT Process LaunchProcess(const std::vector<std::string>& argv,
+                                  const LaunchOptions& options);
+
+// Close all file descriptors, except those which are a destination in the
+// given multimap. Only call this function in a child process where you know
+// that there aren't any other threads.
+BASE_EXPORT void CloseSuperfluousFds(const InjectiveMultimap& saved_map);
+#endif  // defined(OS_WIN)
+
+#if defined(OS_WIN)
+// Set |job_object|'s JOBOBJECT_EXTENDED_LIMIT_INFORMATION
+// BasicLimitInformation.LimitFlags to |limit_flags|.
+BASE_EXPORT bool SetJobObjectLimitFlags(HANDLE job_object, DWORD limit_flags);
+
+// Output multi-process printf, cout, cerr, etc to the cmd.exe console that ran
+// chrome. This is not thread-safe: only call from main thread.
+BASE_EXPORT void RouteStdioToConsole(bool create_console_if_not_found);
+#endif  // defined(OS_WIN)
+
+// Executes the application specified by |cl| and wait for it to exit. Stores
+// the output (stdout) in |output|. Redirects stderr to /dev/null. Returns true
+// on success (application launched and exited cleanly, with exit code
+// indicating success).
+BASE_EXPORT bool GetAppOutput(const CommandLine& cl, std::string* output);
+
+// Like GetAppOutput, but also includes stderr.
+BASE_EXPORT bool GetAppOutputAndError(const CommandLine& cl,
+                                      std::string* output);
+
+// A version of |GetAppOutput()| which also returns the exit code of the
+// executed command. Returns true if the application runs and exits cleanly. If
+// this is the case the exit code of the application is available in
+// |*exit_code|.
+BASE_EXPORT bool GetAppOutputWithExitCode(const CommandLine& cl,
+                                          std::string* output, int* exit_code);
+
+#if defined(OS_WIN)
+// A Windows-specific version of GetAppOutput that takes a command line string
+// instead of a CommandLine object. Useful for situations where you need to
+// control the command line arguments directly.
+BASE_EXPORT bool GetAppOutput(const StringPiece16& cl, std::string* output);
+#elif defined(OS_POSIX) || defined(OS_FUCHSIA)
+// A POSIX-specific version of GetAppOutput that takes an argv array
+// instead of a CommandLine.  Useful for situations where you need to
+// control the command line arguments directly.
+BASE_EXPORT bool GetAppOutput(const std::vector<std::string>& argv,
+                              std::string* output);
+
+// Like the above POSIX-specific version of GetAppOutput, but also includes
+// stderr.
+BASE_EXPORT bool GetAppOutputAndError(const std::vector<std::string>& argv,
+                                      std::string* output);
+#endif  // defined(OS_WIN)
+
+// If supported on the platform, and the user has sufficent rights, increase
+// the current process's scheduling priority to a high priority.
+BASE_EXPORT void RaiseProcessToHighPriority();
+
+#if defined(OS_MACOSX)
+// An implementation of LaunchProcess() that uses posix_spawn() instead of
+// fork()+exec(). This does not support the |pre_exec_delegate| and
+// |current_directory| options.
+Process LaunchProcessPosixSpawn(const std::vector<std::string>& argv,
+                                const LaunchOptions& options);
+
+// Restore the default exception handler, setting it to Apple Crash Reporter
+// (ReportCrash).  When forking and execing a new process, the child will
+// inherit the parent's exception ports, which may be set to the Breakpad
+// instance running inside the parent.  The parent's Breakpad instance should
+// not handle the child's exceptions.  Calling RestoreDefaultExceptionHandler
+// in the child after forking will restore the standard exception handler.
+// See http://crbug.com/20371/ for more details.
+void RestoreDefaultExceptionHandler();
+#endif  // defined(OS_MACOSX)
+
+// Creates a LaunchOptions object suitable for launching processes in a test
+// binary. This should not be called in production/released code.
+BASE_EXPORT LaunchOptions LaunchOptionsForTest();
+
+#if defined(OS_LINUX) || defined(OS_NACL_NONSFI)
+// A wrapper for clone with fork-like behavior, meaning that it returns the
+// child's pid in the parent and 0 in the child. |flags|, |ptid|, and |ctid| are
+// as in the clone system call (the CLONE_VM flag is not supported).
+//
+// This function uses the libc clone wrapper (which updates libc's pid cache)
+// internally, so callers may expect things like getpid() to work correctly
+// after in both the child and parent.
+//
+// As with fork(), callers should be extremely careful when calling this while
+// multiple threads are running, since at the time the fork happened, the
+// threads could have been in any state (potentially holding locks, etc.).
+// Callers should most likely call execve() in the child soon after calling
+// this.
+//
+// It is unsafe to use any pthread APIs after ForkWithFlags().
+// However, performing an exec() will lift this restriction.
+BASE_EXPORT pid_t ForkWithFlags(unsigned long flags, pid_t* ptid, pid_t* ctid);
+#endif
+
+}  // namespace base
+
+#endif  // BASE_PROCESS_LAUNCH_H_
diff --git a/base/process/launch_fuchsia.cc b/base/process/launch_fuchsia.cc
new file mode 100644
index 0000000..3bc7580
--- /dev/null
+++ b/base/process/launch_fuchsia.cc
@@ -0,0 +1,307 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/process/launch.h"
+
+#include <fdio/limits.h>
+#include <fdio/namespace.h>
+#include <fdio/util.h>
+#include <launchpad/launchpad.h>
+#include <stdint.h>
+#include <unistd.h>
+#include <zircon/process.h>
+#include <zircon/processargs.h>
+
+#include "base/command_line.h"
+#include "base/files/file_util.h"
+#include "base/fuchsia/default_job.h"
+#include "base/fuchsia/fuchsia_logging.h"
+#include "base/logging.h"
+#include "base/memory/ptr_util.h"
+#include "base/scoped_generic.h"
+
+namespace base {
+
+namespace {
+
+bool GetAppOutputInternal(const CommandLine& cmd_line,
+                          bool include_stderr,
+                          std::string* output,
+                          int* exit_code) {
+  DCHECK(exit_code);
+
+  LaunchOptions options;
+
+  // LaunchProcess will automatically clone any stdio fd we do not explicitly
+  // map.
+  int pipe_fd[2];
+  if (pipe(pipe_fd) < 0)
+    return false;
+  options.fds_to_remap.emplace_back(pipe_fd[1], STDOUT_FILENO);
+  if (include_stderr)
+    options.fds_to_remap.emplace_back(pipe_fd[1], STDERR_FILENO);
+
+  Process process = LaunchProcess(cmd_line, options);
+  close(pipe_fd[1]);
+  if (!process.IsValid()) {
+    close(pipe_fd[0]);
+    return false;
+  }
+
+  output->clear();
+  for (;;) {
+    char buffer[256];
+    ssize_t bytes_read = read(pipe_fd[0], buffer, sizeof(buffer));
+    if (bytes_read <= 0)
+      break;
+    output->append(buffer, bytes_read);
+  }
+  close(pipe_fd[0]);
+
+  return process.WaitForExit(exit_code);
+}
+
+bool MapPathsToLaunchpad(const std::vector<FilePath>& paths_to_map,
+                         launchpad_t* lp) {
+  zx_status_t status;
+
+  // Build a array of null terminated strings, which which will be used as an
+  // argument for launchpad_set_nametable().
+  std::vector<const char*> paths_c_str;
+  paths_c_str.reserve(paths_to_map.size());
+
+  for (size_t paths_idx = 0; paths_idx < paths_to_map.size(); ++paths_idx) {
+    const FilePath& next_path = paths_to_map[paths_idx];
+    if (!PathExists(next_path)) {
+      DLOG(ERROR) << "Path does not exist: " << next_path;
+      return false;
+    }
+
+    File dir(next_path, File::FLAG_OPEN | File::FLAG_READ);
+    ScopedPlatformFile scoped_fd(dir.TakePlatformFile());
+    zx_handle_t handles[FDIO_MAX_HANDLES] = {};
+    uint32_t types[FDIO_MAX_HANDLES] = {};
+    zx_status_t num_handles =
+        fdio_transfer_fd(scoped_fd.get(), 0, handles, types);
+    // fdio_transfer_fd() returns number of transferred handles, or negative
+    // error.
+    if (num_handles <= 0) {
+      DCHECK_LT(num_handles, 0);
+      ZX_LOG(ERROR, num_handles) << "fdio_transfer_fd";
+      return false;
+    }
+    ScopedZxHandle scoped_handle(handles[0]);
+    ignore_result(scoped_fd.release());
+
+    // Close the handles that we won't use.
+    for (int i = 1; i < num_handles; ++i) {
+      zx_handle_close(handles[i]);
+    }
+
+    if (types[0] != PA_FDIO_REMOTE) {
+      LOG(ERROR) << "Handle type for " << next_path.AsUTF8Unsafe()
+                 << " is not PA_FDIO_REMOTE: " << types[0];
+      return false;
+    }
+
+    // Add the handle to the child's nametable.
+    // We use the macro PA_HND(..., <index>) to relate the handle to its
+    // position in the nametable, which is stored as an array of path strings
+    // |paths_str|.
+    status = launchpad_add_handle(lp, scoped_handle.release(),
+                                  PA_HND(PA_NS_DIR, paths_idx));
+    if (status != ZX_OK) {
+      ZX_LOG(ERROR, status) << "launchpad_add_handle";
+      return false;
+    }
+    paths_c_str.push_back(next_path.value().c_str());
+  }
+
+  if (!paths_c_str.empty()) {
+    status =
+        launchpad_set_nametable(lp, paths_c_str.size(), paths_c_str.data());
+    if (status != ZX_OK) {
+      ZX_LOG(ERROR, status) << "launchpad_set_nametable";
+      return false;
+    }
+  }
+
+  return true;
+}
+
+struct LaunchpadScopedTraits {
+  static launchpad_t* InvalidValue() { return nullptr; }
+
+  static void Free(launchpad_t* lp) { launchpad_destroy(lp); }
+};
+
+using ScopedLaunchpad = ScopedGeneric<launchpad_t*, LaunchpadScopedTraits>;
+
+}  // namespace
+
+Process LaunchProcess(const CommandLine& cmdline,
+                      const LaunchOptions& options) {
+  return LaunchProcess(cmdline.argv(), options);
+}
+
+// TODO(768416): Investigate whether we can make LaunchProcess() create
+// unprivileged processes by default (no implicit capabilities are granted).
+Process LaunchProcess(const std::vector<std::string>& argv,
+                      const LaunchOptions& options) {
+  std::vector<const char*> argv_cstr;
+  argv_cstr.reserve(argv.size() + 1);
+  for (const auto& arg : argv)
+    argv_cstr.push_back(arg.c_str());
+  argv_cstr.push_back(nullptr);
+
+  // Note that per launchpad.h, the intention is that launchpad_ functions are
+  // used in a "builder" style. From launchpad_create() to launchpad_go() the
+  // status is tracked in the launchpad_t object, and launchpad_go() reports on
+  // the final status, and cleans up |lp| (assuming it was even created).
+  zx_handle_t job = options.job_handle != ZX_HANDLE_INVALID ? options.job_handle
+                                                            : GetDefaultJob();
+  DCHECK_NE(ZX_HANDLE_INVALID, job);
+  ScopedLaunchpad lp;
+  zx_status_t status;
+  if ((status = launchpad_create(job, argv_cstr[0], lp.receive())) != ZX_OK) {
+    ZX_LOG(ERROR, status) << "launchpad_create(job)";
+    return Process();
+  }
+
+  if ((status = launchpad_load_from_file(lp.get(), argv_cstr[0])) != ZX_OK) {
+    ZX_LOG(ERROR, status) << "launchpad_load_from_file(" << argv_cstr[0] << ")";
+    return Process();
+  }
+
+  if ((status = launchpad_set_args(lp.get(), argv.size(), argv_cstr.data())) !=
+      ZX_OK) {
+    ZX_LOG(ERROR, status) << "launchpad_set_args";
+    return Process();
+  }
+
+  uint32_t to_clone = options.clone_flags;
+
+  std::unique_ptr<char* []> new_environ;
+  char* const empty_environ = nullptr;
+  char* const* old_environ = environ;
+  if (options.clear_environ)
+    old_environ = &empty_environ;
+
+  EnvironmentMap environ_modifications = options.environ;
+  if (!options.current_directory.empty()) {
+    environ_modifications["PWD"] = options.current_directory.value();
+  } else {
+    FilePath cwd;
+    GetCurrentDirectory(&cwd);
+    environ_modifications["PWD"] = cwd.value();
+  }
+
+  if (to_clone & LP_CLONE_DEFAULT_JOB) {
+    // Override Fuchsia's built in default job cloning behavior with our own
+    // logic which uses |job| instead of zx_job_default().
+    // This logic is based on the launchpad implementation.
+    zx_handle_t job_duplicate = ZX_HANDLE_INVALID;
+    if ((status = zx_handle_duplicate(job, ZX_RIGHT_SAME_RIGHTS,
+                                      &job_duplicate)) != ZX_OK) {
+      ZX_LOG(ERROR, status) << "zx_handle_duplicate";
+      return Process();
+    }
+    launchpad_add_handle(lp.get(), job_duplicate, PA_HND(PA_JOB_DEFAULT, 0));
+    to_clone &= ~LP_CLONE_DEFAULT_JOB;
+  }
+
+  if (!environ_modifications.empty())
+    new_environ = AlterEnvironment(old_environ, environ_modifications);
+
+  if (!environ_modifications.empty() || options.clear_environ)
+    launchpad_set_environ(lp.get(), new_environ.get());
+  else
+    to_clone |= LP_CLONE_ENVIRON;
+
+  if (!options.paths_to_map.empty()) {
+    DCHECK(!(to_clone & LP_CLONE_FDIO_NAMESPACE));
+    if (!MapPathsToLaunchpad(options.paths_to_map, lp.get())) {
+      return Process();
+    }
+  }
+
+  launchpad_clone(lp.get(), to_clone);
+
+  // Clone the mapped file-descriptors, plus any of the stdio descriptors
+  // which were not explicitly specified.
+  bool stdio_already_mapped[3] = {false};
+  for (const auto& src_target : options.fds_to_remap) {
+    if (static_cast<size_t>(src_target.second) <
+        arraysize(stdio_already_mapped)) {
+      stdio_already_mapped[src_target.second] = true;
+    }
+    launchpad_clone_fd(lp.get(), src_target.first, src_target.second);
+  }
+  if (to_clone & LP_CLONE_FDIO_STDIO) {
+    for (size_t stdio_fd = 0; stdio_fd < arraysize(stdio_already_mapped);
+         ++stdio_fd) {
+      if (!stdio_already_mapped[stdio_fd])
+        launchpad_clone_fd(lp.get(), stdio_fd, stdio_fd);
+    }
+    to_clone &= ~LP_CLONE_FDIO_STDIO;
+  }
+
+  for (const auto& id_and_handle : options.handles_to_transfer) {
+    launchpad_add_handle(lp.get(), id_and_handle.handle, id_and_handle.id);
+  }
+
+  zx_handle_t process_handle;
+  const char* errmsg;
+  if ((status = launchpad_go(lp.get(), &process_handle, &errmsg)) != ZX_OK) {
+    ZX_LOG(ERROR, status) << "launchpad_go failed: " << errmsg;
+    return Process();
+  }
+  ignore_result(lp.release());  // launchpad_go() took ownership.
+
+  Process process(process_handle);
+  if (options.wait) {
+    status = zx_object_wait_one(process.Handle(), ZX_TASK_TERMINATED,
+                                ZX_TIME_INFINITE, nullptr);
+    DCHECK(status == ZX_OK)
+        << "zx_object_wait_one: " << zx_status_get_string(status);
+  }
+
+  return process;
+}
+
+bool GetAppOutput(const CommandLine& cl, std::string* output) {
+  int exit_code;
+  bool result = GetAppOutputInternal(cl, false, output, &exit_code);
+  return result && exit_code == EXIT_SUCCESS;
+}
+
+bool GetAppOutput(const std::vector<std::string>& argv, std::string* output) {
+  return GetAppOutput(CommandLine(argv), output);
+}
+
+bool GetAppOutputAndError(const CommandLine& cl, std::string* output) {
+  int exit_code;
+  bool result = GetAppOutputInternal(cl, true, output, &exit_code);
+  return result && exit_code == EXIT_SUCCESS;
+}
+
+bool GetAppOutputAndError(const std::vector<std::string>& argv,
+                          std::string* output) {
+  return GetAppOutputAndError(CommandLine(argv), output);
+}
+
+bool GetAppOutputWithExitCode(const CommandLine& cl,
+                              std::string* output,
+                              int* exit_code) {
+  // Contrary to GetAppOutput(), |true| return here means that the process was
+  // launched and the exit code was waited upon successfully, but not
+  // necessarily that the exit code was EXIT_SUCCESS.
+  return GetAppOutputInternal(cl, false, output, exit_code);
+}
+
+void RaiseProcessToHighPriority() {
+  // Fuchsia doesn't provide an API to change process priority.
+}
+
+}  // namespace base
diff --git a/base/process/launch_ios.cc b/base/process/launch_ios.cc
new file mode 100644
index 0000000..3c700f8
--- /dev/null
+++ b/base/process/launch_ios.cc
@@ -0,0 +1,13 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/process/launch.h"
+
+namespace base {
+
+void RaiseProcessToHighPriority() {
+  // Impossible on iOS. Do nothing.
+}
+
+}  // namespace base
diff --git a/base/process/launch_mac.cc b/base/process/launch_mac.cc
new file mode 100644
index 0000000..06dbb99
--- /dev/null
+++ b/base/process/launch_mac.cc
@@ -0,0 +1,180 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/process/launch.h"
+
+#include <crt_externs.h>
+#include <mach/mach.h>
+#include <spawn.h>
+#include <string.h>
+#include <sys/wait.h>
+
+#include "base/logging.h"
+#include "base/posix/eintr_wrapper.h"
+#include "base/threading/thread_restrictions.h"
+
+namespace base {
+
+namespace {
+
+// DPSXCHECK is a Debug Posix Spawn Check macro. The posix_spawn* family of
+// functions return an errno value, as opposed to setting errno directly. This
+// macro emulates a DPCHECK().
+#define DPSXCHECK(expr)                                              \
+  do {                                                               \
+    int rv = (expr);                                                 \
+    DCHECK_EQ(rv, 0) << #expr << ": -" << rv << " " << strerror(rv); \
+  } while (0)
+
+class PosixSpawnAttr {
+ public:
+  PosixSpawnAttr() { DPSXCHECK(posix_spawnattr_init(&attr_)); }
+
+  ~PosixSpawnAttr() { DPSXCHECK(posix_spawnattr_destroy(&attr_)); }
+
+  posix_spawnattr_t* get() { return &attr_; }
+
+ private:
+  posix_spawnattr_t attr_;
+};
+
+class PosixSpawnFileActions {
+ public:
+  PosixSpawnFileActions() {
+    DPSXCHECK(posix_spawn_file_actions_init(&file_actions_));
+  }
+
+  ~PosixSpawnFileActions() {
+    DPSXCHECK(posix_spawn_file_actions_destroy(&file_actions_));
+  }
+
+  void Open(int filedes, const char* path, int mode) {
+    DPSXCHECK(posix_spawn_file_actions_addopen(&file_actions_, filedes, path,
+                                               mode, 0));
+  }
+
+  void Dup2(int filedes, int newfiledes) {
+    DPSXCHECK(
+        posix_spawn_file_actions_adddup2(&file_actions_, filedes, newfiledes));
+  }
+
+  void Inherit(int filedes) {
+    DPSXCHECK(posix_spawn_file_actions_addinherit_np(&file_actions_, filedes));
+  }
+
+  const posix_spawn_file_actions_t* get() const { return &file_actions_; }
+
+ private:
+  posix_spawn_file_actions_t file_actions_;
+
+  DISALLOW_COPY_AND_ASSIGN(PosixSpawnFileActions);
+};
+
+}  // namespace
+
+void RestoreDefaultExceptionHandler() {
+  // This function is tailored to remove the Breakpad exception handler.
+  // exception_mask matches s_exception_mask in
+  // third_party/breakpad/breakpad/src/client/mac/handler/exception_handler.cc
+  const exception_mask_t exception_mask = EXC_MASK_BAD_ACCESS |
+                                          EXC_MASK_BAD_INSTRUCTION |
+                                          EXC_MASK_ARITHMETIC |
+                                          EXC_MASK_BREAKPOINT;
+
+  // Setting the exception port to MACH_PORT_NULL may not be entirely
+  // kosher to restore the default exception handler, but in practice,
+  // it results in the exception port being set to Apple Crash Reporter,
+  // the desired behavior.
+  task_set_exception_ports(mach_task_self(), exception_mask, MACH_PORT_NULL,
+                           EXCEPTION_DEFAULT, THREAD_STATE_NONE);
+}
+
+Process LaunchProcessPosixSpawn(const std::vector<std::string>& argv,
+                                const LaunchOptions& options) {
+  DCHECK(!options.pre_exec_delegate)
+      << "LaunchProcessPosixSpawn does not support PreExecDelegate";
+  DCHECK(options.current_directory.empty())
+      << "LaunchProcessPosixSpawn does not support current_directory";
+
+  PosixSpawnAttr attr;
+
+  short flags = POSIX_SPAWN_CLOEXEC_DEFAULT;
+  if (options.new_process_group) {
+    flags |= POSIX_SPAWN_SETPGROUP;
+    DPSXCHECK(posix_spawnattr_setpgroup(attr.get(), 0));
+  }
+  DPSXCHECK(posix_spawnattr_setflags(attr.get(), flags));
+
+  PosixSpawnFileActions file_actions;
+
+  // Process file descriptors for the child. By default, LaunchProcess will
+  // open stdin to /dev/null and inherit stdout and stderr.
+  bool inherit_stdout = true, inherit_stderr = true;
+  bool null_stdin = true;
+  for (const auto& dup2_pair : options.fds_to_remap) {
+    if (dup2_pair.second == STDIN_FILENO) {
+      null_stdin = false;
+    } else if (dup2_pair.second == STDOUT_FILENO) {
+      inherit_stdout = false;
+    } else if (dup2_pair.second == STDERR_FILENO) {
+      inherit_stderr = false;
+    }
+
+    if (dup2_pair.first == dup2_pair.second) {
+      file_actions.Inherit(dup2_pair.second);
+    } else {
+      file_actions.Dup2(dup2_pair.first, dup2_pair.second);
+    }
+  }
+
+  if (null_stdin) {
+    file_actions.Open(STDIN_FILENO, "/dev/null", O_RDONLY);
+  }
+  if (inherit_stdout) {
+    file_actions.Inherit(STDOUT_FILENO);
+  }
+  if (inherit_stderr) {
+    file_actions.Inherit(STDERR_FILENO);
+  }
+
+  std::vector<char*> argv_cstr;
+  argv_cstr.reserve(argv.size() + 1);
+  for (const auto& arg : argv)
+    argv_cstr.push_back(const_cast<char*>(arg.c_str()));
+  argv_cstr.push_back(nullptr);
+
+  std::unique_ptr<char* []> owned_environ;
+  char** new_environ = options.clear_environ ? nullptr : *_NSGetEnviron();
+  if (!options.environ.empty()) {
+    owned_environ = AlterEnvironment(new_environ, options.environ);
+    new_environ = owned_environ.get();
+  }
+
+  const char* executable_path = !options.real_path.empty()
+                                    ? options.real_path.value().c_str()
+                                    : argv_cstr[0];
+
+  // Use posix_spawnp as some callers expect to have PATH consulted.
+  pid_t pid;
+  int rv = posix_spawnp(&pid, executable_path, file_actions.get(), attr.get(),
+                        &argv_cstr[0], new_environ);
+
+  if (rv != 0) {
+    DLOG(ERROR) << "posix_spawnp(" << executable_path << "): -" << rv << " "
+                << strerror(rv);
+    return Process();
+  }
+
+  if (options.wait) {
+    // While this isn't strictly disk IO, waiting for another process to
+    // finish is the sort of thing ThreadRestrictions is trying to prevent.
+    base::AssertBlockingAllowed();
+    pid_t ret = HANDLE_EINTR(waitpid(pid, nullptr, 0));
+    DPCHECK(ret > 0);
+  }
+
+  return Process(pid);
+}
+
+}  // namespace base
diff --git a/base/process/launch_posix.cc b/base/process/launch_posix.cc
new file mode 100644
index 0000000..ec58488
--- /dev/null
+++ b/base/process/launch_posix.cc
@@ -0,0 +1,764 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/process/launch.h"
+
+#include <dirent.h>
+#include <errno.h>
+#include <fcntl.h>
+#include <sched.h>
+#include <setjmp.h>
+#include <signal.h>
+#include <stddef.h>
+#include <stdint.h>
+#include <stdlib.h>
+#include <sys/resource.h>
+#include <sys/syscall.h>
+#include <sys/time.h>
+#include <sys/types.h>
+#include <sys/wait.h>
+#include <unistd.h>
+
+#include <iterator>
+#include <limits>
+#include <memory>
+#include <set>
+
+#include "base/command_line.h"
+#include "base/compiler_specific.h"
+#include "base/debug/debugger.h"
+#include "base/debug/stack_trace.h"
+#include "base/files/dir_reader_posix.h"
+#include "base/files/file_util.h"
+#include "base/files/scoped_file.h"
+#include "base/logging.h"
+#include "base/metrics/histogram_macros.h"
+#include "base/posix/eintr_wrapper.h"
+#include "base/process/process.h"
+#include "base/process/process_metrics.h"
+#include "base/strings/stringprintf.h"
+#include "base/synchronization/waitable_event.h"
+#include "base/threading/platform_thread.h"
+#include "base/threading/thread_restrictions.h"
+#include "base/time/time.h"
+#include "base/trace_event/trace_event.h"
+#include "build/build_config.h"
+
+#if defined(OS_LINUX) || defined(OS_AIX)
+#include <sys/prctl.h>
+#endif
+
+#if defined(OS_CHROMEOS)
+#include <sys/ioctl.h>
+#endif
+
+#if defined(OS_FREEBSD)
+#include <sys/event.h>
+#include <sys/ucontext.h>
+#endif
+
+#if defined(OS_MACOSX)
+#include <crt_externs.h>
+#include <sys/event.h>
+
+#include "base/feature_list.h"
+#else
+extern char** environ;
+#endif
+
+namespace base {
+
+// Friend and derived class of ScopedAllowBaseSyncPrimitives which allows
+// GetAppOutputInternal() to join a process. GetAppOutputInternal() can't itself
+// be a friend of ScopedAllowBaseSyncPrimitives because it is in the anonymous
+// namespace.
+class GetAppOutputScopedAllowBaseSyncPrimitives
+    : public base::ScopedAllowBaseSyncPrimitives {};
+
+#if !defined(OS_NACL_NONSFI)
+
+namespace {
+
+#if defined(OS_MACOSX)
+const Feature kMacLaunchProcessPosixSpawn{"MacLaunchProcessPosixSpawn",
+                                          FEATURE_ENABLED_BY_DEFAULT};
+#endif
+
+// Get the process's "environment" (i.e. the thing that setenv/getenv
+// work with).
+char** GetEnvironment() {
+#if defined(OS_MACOSX)
+  return *_NSGetEnviron();
+#else
+  return environ;
+#endif
+}
+
+// Set the process's "environment" (i.e. the thing that setenv/getenv
+// work with).
+void SetEnvironment(char** env) {
+#if defined(OS_MACOSX)
+  *_NSGetEnviron() = env;
+#else
+  environ = env;
+#endif
+}
+
+// Set the calling thread's signal mask to new_sigmask and return
+// the previous signal mask.
+sigset_t SetSignalMask(const sigset_t& new_sigmask) {
+  sigset_t old_sigmask;
+#if defined(OS_ANDROID)
+  // POSIX says pthread_sigmask() must be used in multi-threaded processes,
+  // but Android's pthread_sigmask() was broken until 4.1:
+  // https://code.google.com/p/android/issues/detail?id=15337
+  // http://stackoverflow.com/questions/13777109/pthread-sigmask-on-android-not-working
+  RAW_CHECK(sigprocmask(SIG_SETMASK, &new_sigmask, &old_sigmask) == 0);
+#else
+  RAW_CHECK(pthread_sigmask(SIG_SETMASK, &new_sigmask, &old_sigmask) == 0);
+#endif
+  return old_sigmask;
+}
+
+#if (!defined(OS_LINUX) && !defined(OS_AIX)) || \
+    (!defined(__i386__) && !defined(__x86_64__) && !defined(__arm__))
+void ResetChildSignalHandlersToDefaults() {
+  // The previous signal handlers are likely to be meaningless in the child's
+  // context so we reset them to the defaults for now. http://crbug.com/44953
+  // These signal handlers are set up at least in browser_main_posix.cc:
+  // BrowserMainPartsPosix::PreEarlyInitialization and stack_trace_posix.cc:
+  // EnableInProcessStackDumping.
+  signal(SIGHUP, SIG_DFL);
+  signal(SIGINT, SIG_DFL);
+  signal(SIGILL, SIG_DFL);
+  signal(SIGABRT, SIG_DFL);
+  signal(SIGFPE, SIG_DFL);
+  signal(SIGBUS, SIG_DFL);
+  signal(SIGSEGV, SIG_DFL);
+  signal(SIGSYS, SIG_DFL);
+  signal(SIGTERM, SIG_DFL);
+}
+
+#else
+
+// TODO(jln): remove the Linux special case once kernels are fixed.
+
+// Internally the kernel makes sigset_t an array of long large enough to have
+// one bit per signal.
+typedef uint64_t kernel_sigset_t;
+
+// This is what struct sigaction looks like to the kernel at least on X86 and
+// ARM. MIPS, for instance, is very different.
+struct kernel_sigaction {
+  void* k_sa_handler;  // For this usage it only needs to be a generic pointer.
+  unsigned long k_sa_flags;
+  void* k_sa_restorer;  // For this usage it only needs to be a generic pointer.
+  kernel_sigset_t k_sa_mask;
+};
+
+// glibc's sigaction() will prevent access to sa_restorer, so we need to roll
+// our own.
+int sys_rt_sigaction(int sig, const struct kernel_sigaction* act,
+                     struct kernel_sigaction* oact) {
+  return syscall(SYS_rt_sigaction, sig, act, oact, sizeof(kernel_sigset_t));
+}
+
+// This function is intended to be used in between fork() and execve() and will
+// reset all signal handlers to the default.
+// The motivation for going through all of them is that sa_restorer can leak
+// from parents and help defeat ASLR on buggy kernels.  We reset it to null.
+// See crbug.com/177956.
+void ResetChildSignalHandlersToDefaults(void) {
+  for (int signum = 1; ; ++signum) {
+    struct kernel_sigaction act = {nullptr};
+    int sigaction_get_ret = sys_rt_sigaction(signum, nullptr, &act);
+    if (sigaction_get_ret && errno == EINVAL) {
+#if !defined(NDEBUG)
+      // Linux supports 32 real-time signals from 33 to 64.
+      // If the number of signals in the Linux kernel changes, someone should
+      // look at this code.
+      const int kNumberOfSignals = 64;
+      RAW_CHECK(signum == kNumberOfSignals + 1);
+#endif  // !defined(NDEBUG)
+      break;
+    }
+    // All other failures are fatal.
+    if (sigaction_get_ret) {
+      RAW_LOG(FATAL, "sigaction (get) failed.");
+    }
+
+    // The kernel won't allow to re-set SIGKILL or SIGSTOP.
+    if (signum != SIGSTOP && signum != SIGKILL) {
+      act.k_sa_handler = reinterpret_cast<void*>(SIG_DFL);
+      act.k_sa_restorer = nullptr;
+      if (sys_rt_sigaction(signum, &act, nullptr)) {
+        RAW_LOG(FATAL, "sigaction (set) failed.");
+      }
+    }
+#if !defined(NDEBUG)
+    // Now ask the kernel again and check that no restorer will leak.
+    if (sys_rt_sigaction(signum, nullptr, &act) || act.k_sa_restorer) {
+      RAW_LOG(FATAL, "Cound not fix sa_restorer.");
+    }
+#endif  // !defined(NDEBUG)
+  }
+}
+#endif  // !defined(OS_LINUX) ||
+        // (!defined(__i386__) && !defined(__x86_64__) && !defined(__arm__))
+}  // anonymous namespace
+
+// Functor for |ScopedDIR| (below).
+struct ScopedDIRClose {
+  inline void operator()(DIR* x) const {
+    if (x)
+      closedir(x);
+  }
+};
+
+// Automatically closes |DIR*|s.
+typedef std::unique_ptr<DIR, ScopedDIRClose> ScopedDIR;
+
+#if defined(OS_LINUX) || defined(OS_AIX)
+static const char kFDDir[] = "/proc/self/fd";
+#elif defined(OS_MACOSX)
+static const char kFDDir[] = "/dev/fd";
+#elif defined(OS_SOLARIS)
+static const char kFDDir[] = "/dev/fd";
+#elif defined(OS_FREEBSD)
+static const char kFDDir[] = "/dev/fd";
+#elif defined(OS_OPENBSD)
+static const char kFDDir[] = "/dev/fd";
+#elif defined(OS_ANDROID)
+static const char kFDDir[] = "/proc/self/fd";
+#endif
+
+void CloseSuperfluousFds(const base::InjectiveMultimap& saved_mapping) {
+  // DANGER: no calls to malloc or locks are allowed from now on:
+  // http://crbug.com/36678
+
+  // Get the maximum number of FDs possible.
+  size_t max_fds = GetMaxFds();
+
+  DirReaderPosix fd_dir(kFDDir);
+  if (!fd_dir.IsValid()) {
+    // Fallback case: Try every possible fd.
+    for (size_t i = 0; i < max_fds; ++i) {
+      const int fd = static_cast<int>(i);
+      if (fd == STDIN_FILENO || fd == STDOUT_FILENO || fd == STDERR_FILENO)
+        continue;
+      // Cannot use STL iterators here, since debug iterators use locks.
+      size_t j;
+      for (j = 0; j < saved_mapping.size(); j++) {
+        if (fd == saved_mapping[j].dest)
+          break;
+      }
+      if (j < saved_mapping.size())
+        continue;
+
+      // Since we're just trying to close anything we can find,
+      // ignore any error return values of close().
+      close(fd);
+    }
+    return;
+  }
+
+  const int dir_fd = fd_dir.fd();
+
+  for ( ; fd_dir.Next(); ) {
+    // Skip . and .. entries.
+    if (fd_dir.name()[0] == '.')
+      continue;
+
+    char *endptr;
+    errno = 0;
+    const long int fd = strtol(fd_dir.name(), &endptr, 10);
+    if (fd_dir.name()[0] == 0 || *endptr || fd < 0 || errno)
+      continue;
+    if (fd == STDIN_FILENO || fd == STDOUT_FILENO || fd == STDERR_FILENO)
+      continue;
+    // Cannot use STL iterators here, since debug iterators use locks.
+    size_t i;
+    for (i = 0; i < saved_mapping.size(); i++) {
+      if (fd == saved_mapping[i].dest)
+        break;
+    }
+    if (i < saved_mapping.size())
+      continue;
+    if (fd == dir_fd)
+      continue;
+
+    int ret = IGNORE_EINTR(close(fd));
+    DPCHECK(ret == 0);
+  }
+}
+
+Process LaunchProcess(const CommandLine& cmdline,
+                      const LaunchOptions& options) {
+  return LaunchProcess(cmdline.argv(), options);
+}
+
+Process LaunchProcess(const std::vector<std::string>& argv,
+                      const LaunchOptions& options) {
+  TRACE_EVENT0("base", "LaunchProcess");
+#if defined(OS_MACOSX)
+  if (FeatureList::IsEnabled(kMacLaunchProcessPosixSpawn)) {
+    // TODO(rsesek): Do this unconditionally. There is one user for each of
+    // these two options. https://crbug.com/179923.
+    if (!options.pre_exec_delegate && options.current_directory.empty())
+      return LaunchProcessPosixSpawn(argv, options);
+  }
+#endif
+
+  InjectiveMultimap fd_shuffle1;
+  InjectiveMultimap fd_shuffle2;
+  fd_shuffle1.reserve(options.fds_to_remap.size());
+  fd_shuffle2.reserve(options.fds_to_remap.size());
+
+  std::vector<char*> argv_cstr;
+  argv_cstr.reserve(argv.size() + 1);
+  for (const auto& arg : argv)
+    argv_cstr.push_back(const_cast<char*>(arg.c_str()));
+  argv_cstr.push_back(nullptr);
+
+  std::unique_ptr<char* []> new_environ;
+  char* const empty_environ = nullptr;
+  char* const* old_environ = GetEnvironment();
+  if (options.clear_environ)
+    old_environ = &empty_environ;
+  if (!options.environ.empty())
+    new_environ = AlterEnvironment(old_environ, options.environ);
+
+  sigset_t full_sigset;
+  sigfillset(&full_sigset);
+  const sigset_t orig_sigmask = SetSignalMask(full_sigset);
+
+  const char* current_directory = nullptr;
+  if (!options.current_directory.empty()) {
+    current_directory = options.current_directory.value().c_str();
+  }
+
+  pid_t pid;
+  base::TimeTicks before_fork = TimeTicks::Now();
+#if defined(OS_LINUX) || defined(OS_AIX)
+  if (options.clone_flags) {
+    // Signal handling in this function assumes the creation of a new
+    // process, so we check that a thread is not being created by mistake
+    // and that signal handling follows the process-creation rules.
+    RAW_CHECK(
+        !(options.clone_flags & (CLONE_SIGHAND | CLONE_THREAD | CLONE_VM)));
+
+    // We specify a null ptid and ctid.
+    RAW_CHECK(
+        !(options.clone_flags &
+          (CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT_SETTID)));
+
+    // Since we use waitpid, we do not support custom termination signals in the
+    // clone flags.
+    RAW_CHECK((options.clone_flags & 0xff) == 0);
+
+    pid = ForkWithFlags(options.clone_flags | SIGCHLD, nullptr, nullptr);
+  } else
+#endif
+  {
+    pid = fork();
+  }
+
+  // Always restore the original signal mask in the parent.
+  if (pid != 0) {
+    base::TimeTicks after_fork = TimeTicks::Now();
+    SetSignalMask(orig_sigmask);
+
+    base::TimeDelta fork_time = after_fork - before_fork;
+    UMA_HISTOGRAM_TIMES("MPArch.ForkTime", fork_time);
+  }
+
+  if (pid < 0) {
+    DPLOG(ERROR) << "fork";
+    return Process();
+  } else if (pid == 0) {
+    // Child process
+
+    // DANGER: no calls to malloc or locks are allowed from now on:
+    // http://crbug.com/36678
+
+    // DANGER: fork() rule: in the child, if you don't end up doing exec*(),
+    // you call _exit() instead of exit(). This is because _exit() does not
+    // call any previously-registered (in the parent) exit handlers, which
+    // might do things like block waiting for threads that don't even exist
+    // in the child.
+
+    // If a child process uses the readline library, the process block forever.
+    // In BSD like OSes including OS X it is safe to assign /dev/null as stdin.
+    // See http://crbug.com/56596.
+    base::ScopedFD null_fd(HANDLE_EINTR(open("/dev/null", O_RDONLY)));
+    if (!null_fd.is_valid()) {
+      RAW_LOG(ERROR, "Failed to open /dev/null");
+      _exit(127);
+    }
+
+    int new_fd = HANDLE_EINTR(dup2(null_fd.get(), STDIN_FILENO));
+    if (new_fd != STDIN_FILENO) {
+      RAW_LOG(ERROR, "Failed to dup /dev/null for stdin");
+      _exit(127);
+    }
+
+    if (options.new_process_group) {
+      // Instead of inheriting the process group ID of the parent, the child
+      // starts off a new process group with pgid equal to its process ID.
+      if (setpgid(0, 0) < 0) {
+        RAW_LOG(ERROR, "setpgid failed");
+        _exit(127);
+      }
+    }
+
+    if (options.maximize_rlimits) {
+      // Some resource limits need to be maximal in this child.
+      for (size_t i = 0; i < options.maximize_rlimits->size(); ++i) {
+        const int resource = (*options.maximize_rlimits)[i];
+        struct rlimit limit;
+        if (getrlimit(resource, &limit) < 0) {
+          RAW_LOG(WARNING, "getrlimit failed");
+        } else if (limit.rlim_cur < limit.rlim_max) {
+          limit.rlim_cur = limit.rlim_max;
+          if (setrlimit(resource, &limit) < 0) {
+            RAW_LOG(WARNING, "setrlimit failed");
+          }
+        }
+      }
+    }
+
+#if defined(OS_MACOSX)
+    RestoreDefaultExceptionHandler();
+#endif  // defined(OS_MACOSX)
+
+    ResetChildSignalHandlersToDefaults();
+    SetSignalMask(orig_sigmask);
+
+#if 0
+    // When debugging it can be helpful to check that we really aren't making
+    // any hidden calls to malloc.
+    void *malloc_thunk =
+        reinterpret_cast<void*>(reinterpret_cast<intptr_t>(malloc) & ~4095);
+    mprotect(malloc_thunk, 4096, PROT_READ | PROT_WRITE | PROT_EXEC);
+    memset(reinterpret_cast<void*>(malloc), 0xff, 8);
+#endif  // 0
+
+#if defined(OS_CHROMEOS)
+    if (options.ctrl_terminal_fd >= 0) {
+      // Set process' controlling terminal.
+      if (HANDLE_EINTR(setsid()) != -1) {
+        if (HANDLE_EINTR(
+                ioctl(options.ctrl_terminal_fd, TIOCSCTTY, nullptr)) == -1) {
+          RAW_LOG(WARNING, "ioctl(TIOCSCTTY), ctrl terminal not set");
+        }
+      } else {
+        RAW_LOG(WARNING, "setsid failed, ctrl terminal not set");
+      }
+    }
+#endif  // defined(OS_CHROMEOS)
+
+    // Cannot use STL iterators here, since debug iterators use locks.
+    for (size_t i = 0; i < options.fds_to_remap.size(); ++i) {
+      const FileHandleMappingVector::value_type& value =
+          options.fds_to_remap[i];
+      fd_shuffle1.push_back(InjectionArc(value.first, value.second, false));
+      fd_shuffle2.push_back(InjectionArc(value.first, value.second, false));
+    }
+
+    if (!options.environ.empty() || options.clear_environ)
+      SetEnvironment(new_environ.get());
+
+    // fd_shuffle1 is mutated by this call because it cannot malloc.
+    if (!ShuffleFileDescriptors(&fd_shuffle1))
+      _exit(127);
+
+    CloseSuperfluousFds(fd_shuffle2);
+
+    // Set NO_NEW_PRIVS by default. Since NO_NEW_PRIVS only exists in kernel
+    // 3.5+, do not check the return value of prctl here.
+#if defined(OS_LINUX) || defined(OS_AIX)
+#ifndef PR_SET_NO_NEW_PRIVS
+#define PR_SET_NO_NEW_PRIVS 38
+#endif
+    if (!options.allow_new_privs) {
+      if (prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0) && errno != EINVAL) {
+        // Only log if the error is not EINVAL (i.e. not supported).
+        RAW_LOG(FATAL, "prctl(PR_SET_NO_NEW_PRIVS) failed");
+      }
+    }
+
+    if (options.kill_on_parent_death) {
+      if (prctl(PR_SET_PDEATHSIG, SIGKILL) != 0) {
+        RAW_LOG(ERROR, "prctl(PR_SET_PDEATHSIG) failed");
+        _exit(127);
+      }
+    }
+#endif
+
+    if (current_directory != nullptr) {
+      RAW_CHECK(chdir(current_directory) == 0);
+    }
+
+    if (options.pre_exec_delegate != nullptr) {
+      options.pre_exec_delegate->RunAsyncSafe();
+    }
+
+    const char* executable_path = !options.real_path.empty() ?
+        options.real_path.value().c_str() : argv_cstr[0];
+
+    execvp(executable_path, argv_cstr.data());
+
+    RAW_LOG(ERROR, "LaunchProcess: failed to execvp:");
+    RAW_LOG(ERROR, argv_cstr[0]);
+    _exit(127);
+  } else {
+    // Parent process
+    if (options.wait) {
+      // While this isn't strictly disk IO, waiting for another process to
+      // finish is the sort of thing ThreadRestrictions is trying to prevent.
+      base::AssertBlockingAllowed();
+      pid_t ret = HANDLE_EINTR(waitpid(pid, nullptr, 0));
+      DPCHECK(ret > 0);
+    }
+  }
+
+  return Process(pid);
+}
+
+void RaiseProcessToHighPriority() {
+  // On POSIX, we don't actually do anything here.  We could try to nice() or
+  // setpriority() or sched_getscheduler, but these all require extra rights.
+}
+
+// Executes the application specified by |argv| and wait for it to exit. Stores
+// the output (stdout) in |output|. If |do_search_path| is set, it searches the
+// path for the application; in that case, |envp| must be null, and it will use
+// the current environment. If |do_search_path| is false, |argv[0]| should fully
+// specify the path of the application, and |envp| will be used as the
+// environment. If |include_stderr| is true, includes stderr otherwise redirects
+// it to /dev/null.
+// The return value of the function indicates success or failure. In the case of
+// success, the application exit code will be returned in |*exit_code|, which
+// should be checked to determine if the application ran successfully.
+static bool GetAppOutputInternal(
+    const std::vector<std::string>& argv,
+    char* const envp[],
+    bool include_stderr,
+    std::string* output,
+    bool do_search_path,
+    int* exit_code) {
+  base::AssertBlockingAllowed();
+  // exit_code must be supplied so calling function can determine success.
+  DCHECK(exit_code);
+  *exit_code = EXIT_FAILURE;
+
+  // Declare and call reserve() here before calling fork() because the child
+  // process cannot allocate memory.
+  std::vector<char*> argv_cstr;
+  argv_cstr.reserve(argv.size() + 1);
+  InjectiveMultimap fd_shuffle1;
+  InjectiveMultimap fd_shuffle2;
+  fd_shuffle1.reserve(3);
+  fd_shuffle2.reserve(3);
+
+  // Either |do_search_path| should be false or |envp| should be null, but not
+  // both.
+  DCHECK(!do_search_path ^ !envp);
+
+  int pipe_fd[2];
+  if (pipe(pipe_fd) < 0)
+    return false;
+
+  pid_t pid = fork();
+  switch (pid) {
+    case -1: {
+      // error
+      close(pipe_fd[0]);
+      close(pipe_fd[1]);
+      return false;
+    }
+    case 0: {
+      // child
+      //
+      // DANGER: no calls to malloc or locks are allowed from now on:
+      // http://crbug.com/36678
+
+#if defined(OS_MACOSX)
+      RestoreDefaultExceptionHandler();
+#endif
+
+      // Obscure fork() rule: in the child, if you don't end up doing exec*(),
+      // you call _exit() instead of exit(). This is because _exit() does not
+      // call any previously-registered (in the parent) exit handlers, which
+      // might do things like block waiting for threads that don't even exist
+      // in the child.
+      int dev_null = open("/dev/null", O_WRONLY);
+      if (dev_null < 0)
+        _exit(127);
+
+      fd_shuffle1.push_back(InjectionArc(pipe_fd[1], STDOUT_FILENO, true));
+      fd_shuffle1.push_back(InjectionArc(include_stderr ? pipe_fd[1] : dev_null,
+                                         STDERR_FILENO, true));
+      fd_shuffle1.push_back(InjectionArc(dev_null, STDIN_FILENO, true));
+      // Adding another element here? Remeber to increase the argument to
+      // reserve(), above.
+
+      for (size_t i = 0; i < fd_shuffle1.size(); ++i)
+        fd_shuffle2.push_back(fd_shuffle1[i]);
+
+      if (!ShuffleFileDescriptors(&fd_shuffle1))
+        _exit(127);
+
+      CloseSuperfluousFds(fd_shuffle2);
+
+      for (const auto& arg : argv)
+        argv_cstr.push_back(const_cast<char*>(arg.c_str()));
+      argv_cstr.push_back(nullptr);
+
+      if (do_search_path)
+        execvp(argv_cstr[0], argv_cstr.data());
+      else
+        execve(argv_cstr[0], argv_cstr.data(), envp);
+      _exit(127);
+    }
+    default: {
+      // parent
+      //
+      // Close our writing end of pipe now. Otherwise later read would not
+      // be able to detect end of child's output (in theory we could still
+      // write to the pipe).
+      close(pipe_fd[1]);
+
+      output->clear();
+
+      while (true) {
+        char buffer[256];
+        ssize_t bytes_read =
+            HANDLE_EINTR(read(pipe_fd[0], buffer, sizeof(buffer)));
+        if (bytes_read <= 0)
+          break;
+        output->append(buffer, bytes_read);
+      }
+      close(pipe_fd[0]);
+
+      // Always wait for exit code (even if we know we'll declare
+      // GOT_MAX_OUTPUT).
+      Process process(pid);
+      // A process launched with GetAppOutput*() usually doesn't wait on the
+      // process that launched it and thus chances of deadlock are low.
+      GetAppOutputScopedAllowBaseSyncPrimitives allow_base_sync_primitives;
+      return process.WaitForExit(exit_code);
+    }
+  }
+}
+
+bool GetAppOutput(const CommandLine& cl, std::string* output) {
+  return GetAppOutput(cl.argv(), output);
+}
+
+bool GetAppOutput(const std::vector<std::string>& argv, std::string* output) {
+  // Run |execve()| with the current environment.
+  int exit_code;
+  bool result =
+      GetAppOutputInternal(argv, nullptr, false, output, true, &exit_code);
+  return result && exit_code == EXIT_SUCCESS;
+}
+
+bool GetAppOutputAndError(const CommandLine& cl, std::string* output) {
+  // Run |execve()| with the current environment.
+  int exit_code;
+  bool result =
+      GetAppOutputInternal(cl.argv(), nullptr, true, output, true, &exit_code);
+  return result && exit_code == EXIT_SUCCESS;
+}
+
+bool GetAppOutputAndError(const std::vector<std::string>& argv,
+                          std::string* output) {
+  int exit_code;
+  bool result =
+      GetAppOutputInternal(argv, nullptr, true, output, true, &exit_code);
+  return result && exit_code == EXIT_SUCCESS;
+}
+
+bool GetAppOutputWithExitCode(const CommandLine& cl,
+                              std::string* output,
+                              int* exit_code) {
+  // Run |execve()| with the current environment.
+  return GetAppOutputInternal(cl.argv(), nullptr, false, output, true,
+                              exit_code);
+}
+
+#endif  // !defined(OS_NACL_NONSFI)
+
+#if defined(OS_LINUX) || defined(OS_NACL_NONSFI) || defined(OS_AIX)
+namespace {
+
+// This function runs on the stack specified on the clone call. It uses longjmp
+// to switch back to the original stack so the child can return from sys_clone.
+int CloneHelper(void* arg) {
+  jmp_buf* env_ptr = reinterpret_cast<jmp_buf*>(arg);
+  longjmp(*env_ptr, 1);
+
+  // Should not be reached.
+  RAW_CHECK(false);
+  return 1;
+}
+
+// This function is noinline to ensure that stack_buf is below the stack pointer
+// that is saved when setjmp is called below. This is needed because when
+// compiled with FORTIFY_SOURCE, glibc's longjmp checks that the stack is moved
+// upwards. See crbug.com/442912 for more details.
+#if defined(ADDRESS_SANITIZER)
+// Disable AddressSanitizer instrumentation for this function to make sure
+// |stack_buf| is allocated on thread stack instead of ASan's fake stack.
+// Under ASan longjmp() will attempt to clean up the area between the old and
+// new stack pointers and print a warning that may confuse the user.
+__attribute__((no_sanitize_address))
+#endif
+NOINLINE pid_t CloneAndLongjmpInChild(unsigned long flags,
+                                      pid_t* ptid,
+                                      pid_t* ctid,
+                                      jmp_buf* env) {
+  // We use the libc clone wrapper instead of making the syscall
+  // directly because making the syscall may fail to update the libc's
+  // internal pid cache. The libc interface unfortunately requires
+  // specifying a new stack, so we use setjmp/longjmp to emulate
+  // fork-like behavior.
+  alignas(16) char stack_buf[PTHREAD_STACK_MIN];
+#if defined(ARCH_CPU_X86_FAMILY) || defined(ARCH_CPU_ARM_FAMILY) ||   \
+    defined(ARCH_CPU_MIPS_FAMILY) || defined(ARCH_CPU_S390_FAMILY) || \
+    defined(ARCH_CPU_PPC64_FAMILY)
+  // The stack grows downward.
+  void* stack = stack_buf + sizeof(stack_buf);
+#else
+#error "Unsupported architecture"
+#endif
+  return clone(&CloneHelper, stack, flags, env, ptid, nullptr, ctid);
+}
+
+}  // anonymous namespace
+
+pid_t ForkWithFlags(unsigned long flags, pid_t* ptid, pid_t* ctid) {
+  const bool clone_tls_used = flags & CLONE_SETTLS;
+  const bool invalid_ctid =
+      (flags & (CLONE_CHILD_SETTID | CLONE_CHILD_CLEARTID)) && !ctid;
+  const bool invalid_ptid = (flags & CLONE_PARENT_SETTID) && !ptid;
+
+  // We do not support CLONE_VM.
+  const bool clone_vm_used = flags & CLONE_VM;
+
+  if (clone_tls_used || invalid_ctid || invalid_ptid || clone_vm_used) {
+    RAW_LOG(FATAL, "Invalid usage of ForkWithFlags");
+  }
+
+  jmp_buf env;
+  if (setjmp(env) == 0) {
+    return CloneAndLongjmpInChild(flags, ptid, ctid, &env);
+  }
+
+  return 0;
+}
+#endif  // defined(OS_LINUX) || defined(OS_NACL_NONSFI)
+
+}  // namespace base
diff --git a/base/process/launch_unittest_win.cc b/base/process/launch_unittest_win.cc
new file mode 100644
index 0000000..1b060c5
--- /dev/null
+++ b/base/process/launch_unittest_win.cc
@@ -0,0 +1,24 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/process/launch.h"
+
+#include "base/command_line.h"
+#include "base/files/file_path.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+
+TEST(LaunchWinTest, GetAppOutputWithExitCodeShouldReturnExitCode) {
+  CommandLine cl(FilePath(FILE_PATH_LITERAL("cmd")));
+  cl.AppendArg("/c");
+  cl.AppendArg("this-is-not-an-application");
+  std::string output;
+  int exit_code;
+  ASSERT_TRUE(GetAppOutputWithExitCode(cl, &output, &exit_code));
+  ASSERT_TRUE(output.empty());
+  ASSERT_EQ(1, exit_code);
+}
+
+}  // namespace
diff --git a/base/process/launch_win.cc b/base/process/launch_win.cc
new file mode 100644
index 0000000..0ae6820
--- /dev/null
+++ b/base/process/launch_win.cc
@@ -0,0 +1,403 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/process/launch.h"
+
+#include <fcntl.h>
+#include <io.h>
+#include <shellapi.h>
+#include <windows.h>
+#include <userenv.h>
+#include <psapi.h>
+
+#include <ios>
+#include <limits>
+
+#include "base/bind.h"
+#include "base/bind_helpers.h"
+#include "base/command_line.h"
+#include "base/debug/activity_tracker.h"
+#include "base/debug/stack_trace.h"
+#include "base/logging.h"
+#include "base/metrics/histogram.h"
+#include "base/process/kill.h"
+#include "base/strings/utf_string_conversions.h"
+#include "base/sys_info.h"
+#include "base/win/scoped_handle.h"
+#include "base/win/scoped_process_information.h"
+#include "base/win/startup_information.h"
+#include "base/win/windows_version.h"
+
+namespace base {
+
+namespace {
+
+bool GetAppOutputInternal(const StringPiece16& cl,
+                          bool include_stderr,
+                          std::string* output,
+                          int* exit_code) {
+  HANDLE out_read = nullptr;
+  HANDLE out_write = nullptr;
+
+  SECURITY_ATTRIBUTES sa_attr;
+  // Set the bInheritHandle flag so pipe handles are inherited.
+  sa_attr.nLength = sizeof(SECURITY_ATTRIBUTES);
+  sa_attr.bInheritHandle = TRUE;
+  sa_attr.lpSecurityDescriptor = nullptr;
+
+  // Create the pipe for the child process's STDOUT.
+  if (!CreatePipe(&out_read, &out_write, &sa_attr, 0)) {
+    NOTREACHED() << "Failed to create pipe";
+    return false;
+  }
+
+  // Ensure we don't leak the handles.
+  win::ScopedHandle scoped_out_read(out_read);
+  win::ScopedHandle scoped_out_write(out_write);
+
+  // Ensure the read handles to the pipes are not inherited.
+  if (!SetHandleInformation(out_read, HANDLE_FLAG_INHERIT, 0)) {
+    NOTREACHED() << "Failed to disabled pipe inheritance";
+    return false;
+  }
+
+  FilePath::StringType writable_command_line_string;
+  writable_command_line_string.assign(cl.data(), cl.size());
+
+  STARTUPINFO start_info = {};
+
+  start_info.cb = sizeof(STARTUPINFO);
+  start_info.hStdOutput = out_write;
+  // Keep the normal stdin.
+  start_info.hStdInput = GetStdHandle(STD_INPUT_HANDLE);
+  if (include_stderr) {
+    start_info.hStdError = out_write;
+  } else {
+    start_info.hStdError = GetStdHandle(STD_ERROR_HANDLE);
+  }
+  start_info.dwFlags |= STARTF_USESTDHANDLES;
+
+  // Create the child process.
+  PROCESS_INFORMATION temp_process_info = {};
+  if (!CreateProcess(nullptr, &writable_command_line_string[0], nullptr,
+                     nullptr,
+                     TRUE,  // Handles are inherited.
+                     0, nullptr, nullptr, &start_info, &temp_process_info)) {
+    NOTREACHED() << "Failed to start process";
+    return false;
+  }
+
+  base::win::ScopedProcessInformation proc_info(temp_process_info);
+  base::debug::GlobalActivityTracker* tracker =
+      base::debug::GlobalActivityTracker::Get();
+  if (tracker)
+    tracker->RecordProcessLaunch(proc_info.process_id(), cl.as_string());
+
+  // Close our writing end of pipe now. Otherwise later read would not be able
+  // to detect end of child's output.
+  scoped_out_write.Close();
+
+  // Read output from the child process's pipe for STDOUT
+  const int kBufferSize = 1024;
+  char buffer[kBufferSize];
+
+  for (;;) {
+    DWORD bytes_read = 0;
+    BOOL success =
+        ::ReadFile(out_read, buffer, kBufferSize, &bytes_read, nullptr);
+    if (!success || bytes_read == 0)
+      break;
+    output->append(buffer, bytes_read);
+  }
+
+  // Let's wait for the process to finish.
+  WaitForSingleObject(proc_info.process_handle(), INFINITE);
+
+  base::TerminationStatus status = GetTerminationStatus(
+      proc_info.process_handle(), exit_code);
+  base::debug::GlobalActivityTracker::RecordProcessExitIfEnabled(
+      proc_info.process_id(), *exit_code);
+  return status != base::TERMINATION_STATUS_PROCESS_CRASHED &&
+         status != base::TERMINATION_STATUS_ABNORMAL_TERMINATION;
+}
+
+}  // namespace
+
+void RouteStdioToConsole(bool create_console_if_not_found) {
+  // Don't change anything if stdout or stderr already point to a
+  // valid stream.
+  //
+  // If we are running under Buildbot or under Cygwin's default
+  // terminal (mintty), stderr and stderr will be pipe handles.  In
+  // that case, we don't want to open CONOUT$, because its output
+  // likely does not go anywhere.
+  //
+  // We don't use GetStdHandle() to check stdout/stderr here because
+  // it can return dangling IDs of handles that were never inherited
+  // by this process.  These IDs could have been reused by the time
+  // this function is called.  The CRT checks the validity of
+  // stdout/stderr on startup (before the handle IDs can be reused).
+  // _fileno(stdout) will return -2 (_NO_CONSOLE_FILENO) if stdout was
+  // invalid.
+  if (_fileno(stdout) >= 0 || _fileno(stderr) >= 0) {
+    // _fileno was broken for SUBSYSTEM:WINDOWS from VS2010 to VS2012/2013.
+    // http://crbug.com/358267. Confirm that the underlying HANDLE is valid
+    // before aborting.
+
+    intptr_t stdout_handle = _get_osfhandle(_fileno(stdout));
+    intptr_t stderr_handle = _get_osfhandle(_fileno(stderr));
+    if (stdout_handle >= 0 || stderr_handle >= 0)
+      return;
+  }
+
+  if (!AttachConsole(ATTACH_PARENT_PROCESS)) {
+    unsigned int result = GetLastError();
+    // Was probably already attached.
+    if (result == ERROR_ACCESS_DENIED)
+      return;
+    // Don't bother creating a new console for each child process if the
+    // parent process is invalid (eg: crashed).
+    if (result == ERROR_GEN_FAILURE)
+      return;
+    if (create_console_if_not_found) {
+      // Make a new console if attaching to parent fails with any other error.
+      // It should be ERROR_INVALID_HANDLE at this point, which means the
+      // browser was likely not started from a console.
+      AllocConsole();
+    } else {
+      return;
+    }
+  }
+
+  // Arbitrary byte count to use when buffering output lines.  More
+  // means potential waste, less means more risk of interleaved
+  // log-lines in output.
+  enum { kOutputBufferSize = 64 * 1024 };
+
+  if (freopen("CONOUT$", "w", stdout)) {
+    setvbuf(stdout, nullptr, _IOLBF, kOutputBufferSize);
+    // Overwrite FD 1 for the benefit of any code that uses this FD
+    // directly.  This is safe because the CRT allocates FDs 0, 1 and
+    // 2 at startup even if they don't have valid underlying Windows
+    // handles.  This means we won't be overwriting an FD created by
+    // _open() after startup.
+    _dup2(_fileno(stdout), 1);
+  }
+  if (freopen("CONOUT$", "w", stderr)) {
+    setvbuf(stderr, nullptr, _IOLBF, kOutputBufferSize);
+    _dup2(_fileno(stderr), 2);
+  }
+
+  // Fix all cout, wcout, cin, wcin, cerr, wcerr, clog and wclog.
+  std::ios::sync_with_stdio();
+}
+
+Process LaunchProcess(const CommandLine& cmdline,
+                      const LaunchOptions& options) {
+  return LaunchProcess(cmdline.GetCommandLineString(), options);
+}
+
+Process LaunchProcess(const string16& cmdline,
+                      const LaunchOptions& options) {
+  win::StartupInformation startup_info_wrapper;
+  STARTUPINFO* startup_info = startup_info_wrapper.startup_info();
+
+  bool inherit_handles = options.inherit_mode == LaunchOptions::Inherit::kAll;
+  DWORD flags = 0;
+  if (!options.handles_to_inherit.empty()) {
+    DCHECK_EQ(options.inherit_mode, LaunchOptions::Inherit::kSpecific);
+
+    if (options.handles_to_inherit.size() >
+        std::numeric_limits<DWORD>::max() / sizeof(HANDLE)) {
+      DLOG(ERROR) << "Too many handles to inherit.";
+      return Process();
+    }
+
+    // Ensure the handles can be inherited.
+    for (HANDLE handle : options.handles_to_inherit) {
+      BOOL result = SetHandleInformation(handle, HANDLE_FLAG_INHERIT,
+                                         HANDLE_FLAG_INHERIT);
+      PCHECK(result);
+    }
+
+    if (!startup_info_wrapper.InitializeProcThreadAttributeList(1)) {
+      DPLOG(ERROR);
+      return Process();
+    }
+
+    if (!startup_info_wrapper.UpdateProcThreadAttribute(
+            PROC_THREAD_ATTRIBUTE_HANDLE_LIST,
+            const_cast<HANDLE*>(&options.handles_to_inherit[0]),
+            static_cast<DWORD>(options.handles_to_inherit.size() *
+                               sizeof(HANDLE)))) {
+      DPLOG(ERROR);
+      return Process();
+    }
+
+    inherit_handles = true;
+    flags |= EXTENDED_STARTUPINFO_PRESENT;
+  }
+
+  if (options.empty_desktop_name)
+    startup_info->lpDesktop = const_cast<wchar_t*>(L"");
+  startup_info->dwFlags = STARTF_USESHOWWINDOW;
+  startup_info->wShowWindow = options.start_hidden ? SW_HIDE : SW_SHOWNORMAL;
+
+  if (options.stdin_handle || options.stdout_handle || options.stderr_handle) {
+    DCHECK(inherit_handles);
+    DCHECK(options.stdin_handle);
+    DCHECK(options.stdout_handle);
+    DCHECK(options.stderr_handle);
+    startup_info->dwFlags |= STARTF_USESTDHANDLES;
+    startup_info->hStdInput = options.stdin_handle;
+    startup_info->hStdOutput = options.stdout_handle;
+    startup_info->hStdError = options.stderr_handle;
+  }
+
+  const bool launch_suspended =
+      options.job_handle || options.grant_foreground_privilege;
+
+  if (launch_suspended)
+    flags |= CREATE_SUSPENDED;
+
+  if (options.job_handle) {
+    // If this code is run under a debugger, the launched process is
+    // automatically associated with a job object created by the debugger.
+    // The CREATE_BREAKAWAY_FROM_JOB flag is used to prevent this on Windows
+    // releases that do not support nested jobs.
+    if (win::GetVersion() < win::VERSION_WIN8)
+      flags |= CREATE_BREAKAWAY_FROM_JOB;
+  }
+
+  if (options.force_breakaway_from_job_)
+    flags |= CREATE_BREAKAWAY_FROM_JOB;
+
+  PROCESS_INFORMATION temp_process_info = {};
+
+  LPCTSTR current_directory = options.current_directory.empty()
+                                  ? nullptr
+                                  : options.current_directory.value().c_str();
+
+  string16 writable_cmdline(cmdline);
+  if (options.as_user) {
+    flags |= CREATE_UNICODE_ENVIRONMENT;
+    void* enviroment_block = nullptr;
+
+    if (!CreateEnvironmentBlock(&enviroment_block, options.as_user, FALSE)) {
+      DPLOG(ERROR);
+      return Process();
+    }
+
+    BOOL launched = CreateProcessAsUser(
+        options.as_user, nullptr, &writable_cmdline[0], nullptr, nullptr,
+        inherit_handles, flags, enviroment_block, current_directory,
+        startup_info, &temp_process_info);
+    DestroyEnvironmentBlock(enviroment_block);
+    if (!launched) {
+      DPLOG(ERROR) << "Command line:" << std::endl << UTF16ToUTF8(cmdline)
+                   << std::endl;
+      return Process();
+    }
+  } else {
+    if (!CreateProcess(nullptr, &writable_cmdline[0], nullptr, nullptr,
+                       inherit_handles, flags, nullptr, current_directory,
+                       startup_info, &temp_process_info)) {
+      DPLOG(ERROR) << "Command line:" << std::endl << UTF16ToUTF8(cmdline)
+                   << std::endl;
+      return Process();
+    }
+  }
+  base::win::ScopedProcessInformation process_info(temp_process_info);
+
+  if (options.job_handle &&
+      !AssignProcessToJobObject(options.job_handle,
+                                process_info.process_handle())) {
+    DPLOG(ERROR) << "Could not AssignProcessToObject";
+    Process scoped_process(process_info.TakeProcessHandle());
+    scoped_process.Terminate(win::kProcessKilledExitCode, true);
+    return Process();
+  }
+
+  if (options.grant_foreground_privilege &&
+      !AllowSetForegroundWindow(GetProcId(process_info.process_handle()))) {
+    DPLOG(ERROR) << "Failed to grant foreground privilege to launched process";
+  }
+
+  if (launch_suspended)
+    ResumeThread(process_info.thread_handle());
+
+  if (options.wait)
+    WaitForSingleObject(process_info.process_handle(), INFINITE);
+
+  base::debug::GlobalActivityTracker::RecordProcessLaunchIfEnabled(
+      process_info.process_id(), cmdline);
+  return Process(process_info.TakeProcessHandle());
+}
+
+Process LaunchElevatedProcess(const CommandLine& cmdline,
+                              const LaunchOptions& options) {
+  const string16 file = cmdline.GetProgram().value();
+  const string16 arguments = cmdline.GetArgumentsString();
+
+  SHELLEXECUTEINFO shex_info = {};
+  shex_info.cbSize = sizeof(shex_info);
+  shex_info.fMask = SEE_MASK_NOCLOSEPROCESS;
+  shex_info.hwnd = GetActiveWindow();
+  shex_info.lpVerb = L"runas";
+  shex_info.lpFile = file.c_str();
+  shex_info.lpParameters = arguments.c_str();
+  shex_info.lpDirectory = nullptr;
+  shex_info.nShow = options.start_hidden ? SW_HIDE : SW_SHOWNORMAL;
+  shex_info.hInstApp = nullptr;
+
+  if (!ShellExecuteEx(&shex_info)) {
+    DPLOG(ERROR);
+    return Process();
+  }
+
+  if (options.wait)
+    WaitForSingleObject(shex_info.hProcess, INFINITE);
+
+  base::debug::GlobalActivityTracker::RecordProcessLaunchIfEnabled(
+      GetProcessId(shex_info.hProcess), file, arguments);
+  return Process(shex_info.hProcess);
+}
+
+bool SetJobObjectLimitFlags(HANDLE job_object, DWORD limit_flags) {
+  JOBOBJECT_EXTENDED_LIMIT_INFORMATION limit_info = {};
+  limit_info.BasicLimitInformation.LimitFlags = limit_flags;
+  return 0 != SetInformationJobObject(
+      job_object,
+      JobObjectExtendedLimitInformation,
+      &limit_info,
+      sizeof(limit_info));
+}
+
+bool GetAppOutput(const CommandLine& cl, std::string* output) {
+  return GetAppOutput(cl.GetCommandLineString(), output);
+}
+
+bool GetAppOutputAndError(const CommandLine& cl, std::string* output) {
+  int exit_code;
+  return GetAppOutputInternal(
+      cl.GetCommandLineString(), true, output, &exit_code);
+}
+
+bool GetAppOutputWithExitCode(const CommandLine& cl,
+                              std::string* output,
+                              int* exit_code) {
+  return GetAppOutputInternal(
+      cl.GetCommandLineString(), false, output, exit_code);
+}
+
+bool GetAppOutput(const StringPiece16& cl, std::string* output) {
+  int exit_code;
+  return GetAppOutputInternal(cl, false, output, &exit_code);
+}
+
+void RaiseProcessToHighPriority() {
+  SetPriorityClass(GetCurrentProcess(), HIGH_PRIORITY_CLASS);
+}
+
+}  // namespace base
diff --git a/base/process/memory.cc b/base/process/memory.cc
new file mode 100644
index 0000000..5b98733
--- /dev/null
+++ b/base/process/memory.cc
@@ -0,0 +1,54 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/debug/alias.h"
+#include "base/logging.h"
+#include "base/process/memory.h"
+#include "build/build_config.h"
+
+namespace base {
+
+// Defined in memory_win.cc for Windows.
+#if !defined(OS_WIN)
+
+namespace {
+
+// Breakpad server classifies base::`anonymous namespace'::OnNoMemory as
+// out-of-memory crash.
+NOINLINE void OnNoMemory(size_t size) {
+  size_t tmp_size = size;
+  base::debug::Alias(&tmp_size);
+  LOG(FATAL) << "Out of memory. size=" << tmp_size;
+}
+
+}  // namespace
+
+void TerminateBecauseOutOfMemory(size_t size) {
+  OnNoMemory(size);
+}
+
+#endif
+
+// Defined in memory_mac.mm for Mac.
+#if !defined(OS_MACOSX)
+
+bool UncheckedCalloc(size_t num_items, size_t size, void** result) {
+  const size_t alloc_size = num_items * size;
+
+  // Overflow check
+  if (size && ((alloc_size / size) != num_items)) {
+    *result = nullptr;
+    return false;
+  }
+
+  if (!UncheckedMalloc(alloc_size, result))
+    return false;
+
+  memset(*result, 0, alloc_size);
+  return true;
+}
+
+#endif
+
+}  // namespace base
diff --git a/base/process/memory.h b/base/process/memory.h
new file mode 100644
index 0000000..7f16e12
--- /dev/null
+++ b/base/process/memory.h
@@ -0,0 +1,83 @@
+// Copyright (c) 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_PROCESS_MEMORY_H_
+#define BASE_PROCESS_MEMORY_H_
+
+#include <stddef.h>
+
+#include "base/base_export.h"
+#include "base/process/process_handle.h"
+#include "build/build_config.h"
+
+#ifdef PVALLOC_AVAILABLE
+// Build config explicitly tells us whether or not pvalloc is available.
+#elif defined(LIBC_GLIBC) && !defined(USE_TCMALLOC)
+#define PVALLOC_AVAILABLE 1
+#else
+#define PVALLOC_AVAILABLE 0
+#endif
+
+namespace base {
+
+// Enables 'terminate on heap corruption' flag. Helps protect against heap
+// overflow. Has no effect if the OS doesn't provide the necessary facility.
+BASE_EXPORT void EnableTerminationOnHeapCorruption();
+
+// Turns on process termination if memory runs out.
+BASE_EXPORT void EnableTerminationOnOutOfMemory();
+
+// Terminates process. Should be called only for out of memory errors.
+// Crash reporting classifies such crashes as OOM.
+BASE_EXPORT void TerminateBecauseOutOfMemory(size_t size);
+
+#if defined(OS_LINUX) || defined(OS_ANDROID) || defined(OS_AIX)
+BASE_EXPORT extern size_t g_oom_size;
+
+// The maximum allowed value for the OOM score.
+const int kMaxOomScore = 1000;
+
+// This adjusts /proc/<pid>/oom_score_adj so the Linux OOM killer will
+// prefer to kill certain process types over others. The range for the
+// adjustment is [-1000, 1000], with [0, 1000] being user accessible.
+// If the Linux system doesn't support the newer oom_score_adj range
+// of [0, 1000], then we revert to using the older oom_adj, and
+// translate the given value into [0, 15].  Some aliasing of values
+// may occur in that case, of course.
+BASE_EXPORT bool AdjustOOMScore(ProcessId process, int score);
+#endif
+
+#if defined(OS_WIN)
+namespace win {
+
+// Custom Windows exception code chosen to indicate an out of memory error.
+// See https://msdn.microsoft.com/en-us/library/het71c37.aspx.
+// "To make sure that you do not define a code that conflicts with an existing
+// exception code" ... "The resulting error code should therefore have the
+// highest four bits set to hexadecimal E."
+// 0xe0000008 was chosen arbitrarily, as 0x00000008 is ERROR_NOT_ENOUGH_MEMORY.
+const DWORD kOomExceptionCode = 0xe0000008;
+
+}  // namespace win
+#endif
+
+// Special allocator functions for callers that want to check for OOM.
+// These will not abort if the allocation fails even if
+// EnableTerminationOnOutOfMemory has been called.
+// This can be useful for huge and/or unpredictable size memory allocations.
+// Please only use this if you really handle the case when the allocation
+// fails. Doing otherwise would risk security.
+// These functions may still crash on OOM when running under memory tools,
+// specifically ASan and other sanitizers.
+// Return value tells whether the allocation succeeded. If it fails |result| is
+// set to NULL, otherwise it holds the memory address.
+BASE_EXPORT WARN_UNUSED_RESULT bool UncheckedMalloc(size_t size,
+                                                    void** result);
+BASE_EXPORT WARN_UNUSED_RESULT bool UncheckedCalloc(size_t num_items,
+                                                    size_t size,
+                                                    void** result);
+
+}  // namespace base
+
+#endif  // BASE_PROCESS_MEMORY_H_
diff --git a/base/process/memory_fuchsia.cc b/base/process/memory_fuchsia.cc
new file mode 100644
index 0000000..6f559a4
--- /dev/null
+++ b/base/process/memory_fuchsia.cc
@@ -0,0 +1,24 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/process/memory.h"
+
+#include <stdlib.h>
+
+namespace base {
+
+void EnableTerminationOnOutOfMemory() {
+  // Nothing to be done here.
+}
+
+void EnableTerminationOnHeapCorruption() {
+  // Nothing to be done here.
+}
+
+bool UncheckedMalloc(size_t size, void** result) {
+  *result = malloc(size);
+  return *result != nullptr;
+}
+
+}  // namespace base
diff --git a/base/process/memory_linux.cc b/base/process/memory_linux.cc
new file mode 100644
index 0000000..21b2069
--- /dev/null
+++ b/base/process/memory_linux.cc
@@ -0,0 +1,114 @@
+// Copyright (c) 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/process/memory.h"
+
+#include <stddef.h>
+
+#include <new>
+
+#include "base/allocator/allocator_shim.h"
+#include "base/allocator/buildflags.h"
+#include "base/files/file_path.h"
+#include "base/files/file_util.h"
+#include "base/logging.h"
+#include "base/process/internal_linux.h"
+#include "base/strings/string_number_conversions.h"
+#include "build/build_config.h"
+
+#if defined(USE_TCMALLOC)
+#include "third_party/tcmalloc/chromium/src/config.h"
+#include "third_party/tcmalloc/chromium/src/gperftools/tcmalloc.h"
+#endif
+
+namespace base {
+
+size_t g_oom_size = 0U;
+
+namespace {
+
+void OnNoMemorySize(size_t size) {
+  g_oom_size = size;
+
+  if (size != 0)
+    LOG(FATAL) << "Out of memory, size = " << size;
+  LOG(FATAL) << "Out of memory.";
+}
+
+void OnNoMemory() {
+  OnNoMemorySize(0);
+}
+
+}  // namespace
+
+void EnableTerminationOnHeapCorruption() {
+  // On Linux, there nothing to do AFAIK.
+}
+
+void EnableTerminationOnOutOfMemory() {
+  // Set the new-out of memory handler.
+  std::set_new_handler(&OnNoMemory);
+  // If we're using glibc's allocator, the above functions will override
+  // malloc and friends and make them die on out of memory.
+
+#if BUILDFLAG(USE_ALLOCATOR_SHIM)
+  allocator::SetCallNewHandlerOnMallocFailure(true);
+#elif defined(USE_TCMALLOC)
+  // For tcmalloc, we need to tell it to behave like new.
+  tc_set_new_mode(1);
+#endif
+}
+
+// NOTE: This is not the only version of this function in the source:
+// the setuid sandbox (in process_util_linux.c, in the sandbox source)
+// also has its own C version.
+bool AdjustOOMScore(ProcessId process, int score) {
+  if (score < 0 || score > kMaxOomScore)
+    return false;
+
+  FilePath oom_path(internal::GetProcPidDir(process));
+
+  // Attempt to write the newer oom_score_adj file first.
+  FilePath oom_file = oom_path.AppendASCII("oom_score_adj");
+  if (PathExists(oom_file)) {
+    std::string score_str = IntToString(score);
+    DVLOG(1) << "Adjusting oom_score_adj of " << process << " to "
+             << score_str;
+    int score_len = static_cast<int>(score_str.length());
+    return (score_len == WriteFile(oom_file, score_str.c_str(), score_len));
+  }
+
+  // If the oom_score_adj file doesn't exist, then we write the old
+  // style file and translate the oom_adj score to the range 0-15.
+  oom_file = oom_path.AppendASCII("oom_adj");
+  if (PathExists(oom_file)) {
+    // Max score for the old oom_adj range.  Used for conversion of new
+    // values to old values.
+    const int kMaxOldOomScore = 15;
+
+    int converted_score = score * kMaxOldOomScore / kMaxOomScore;
+    std::string score_str = IntToString(converted_score);
+    DVLOG(1) << "Adjusting oom_adj of " << process << " to " << score_str;
+    int score_len = static_cast<int>(score_str.length());
+    return (score_len == WriteFile(oom_file, score_str.c_str(), score_len));
+  }
+
+  return false;
+}
+
+bool UncheckedMalloc(size_t size, void** result) {
+#if BUILDFLAG(USE_ALLOCATOR_SHIM)
+  *result = allocator::UncheckedAlloc(size);
+#elif defined(MEMORY_TOOL_REPLACES_ALLOCATOR) || \
+    (!defined(LIBC_GLIBC) && !defined(USE_TCMALLOC))
+  *result = malloc(size);
+#elif defined(LIBC_GLIBC) && !defined(USE_TCMALLOC)
+  *result = __libc_malloc(size);
+#elif defined(USE_TCMALLOC)
+  *result = tc_malloc_skip_new_handler(size);
+#endif
+  return *result != nullptr;
+}
+
+}  // namespace base
diff --git a/base/process/memory_mac.mm b/base/process/memory_mac.mm
new file mode 100644
index 0000000..5b8cd13
--- /dev/null
+++ b/base/process/memory_mac.mm
@@ -0,0 +1,49 @@
+// Copyright (c) 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/process/memory.h"
+
+#include "base/allocator/allocator_interception_mac.h"
+#include "base/allocator/allocator_shim.h"
+#include "base/allocator/buildflags.h"
+#include "build/build_config.h"
+
+namespace base {
+
+namespace {
+void oom_killer_new() {
+  TerminateBecauseOutOfMemory(0);
+}
+}  // namespace
+
+void EnableTerminationOnHeapCorruption() {
+#if !ARCH_CPU_64_BITS
+  DLOG(WARNING) << "EnableTerminationOnHeapCorruption only works on 64-bit";
+#endif
+}
+
+bool UncheckedMalloc(size_t size, void** result) {
+  return allocator::UncheckedMallocMac(size, result);
+}
+
+bool UncheckedCalloc(size_t num_items, size_t size, void** result) {
+  return allocator::UncheckedCallocMac(num_items, size, result);
+}
+
+void EnableTerminationOnOutOfMemory() {
+  // Step 1: Enable OOM killer on C++ failures.
+  std::set_new_handler(oom_killer_new);
+
+// Step 2: Enable OOM killer on C-malloc failures for the default zone (if we
+// have a shim).
+#if BUILDFLAG(USE_ALLOCATOR_SHIM)
+  allocator::SetCallNewHandlerOnMallocFailure(true);
+#endif
+
+  // Step 3: Enable OOM killer on all other malloc zones (or just "all" without
+  // "other" if shim is disabled).
+  allocator::InterceptAllocationsMac();
+}
+
+}  // namespace base
diff --git a/base/process/memory_stubs.cc b/base/process/memory_stubs.cc
new file mode 100644
index 0000000..787d9ae
--- /dev/null
+++ b/base/process/memory_stubs.cc
@@ -0,0 +1,44 @@
+// Copyright (c) 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/process/memory.h"
+
+#include <stddef.h>
+#include <stdlib.h>
+
+namespace base {
+
+void EnableTerminationOnOutOfMemory() {
+}
+
+void EnableTerminationOnHeapCorruption() {
+}
+
+bool AdjustOOMScore(ProcessId process, int score) {
+  return false;
+}
+
+void TerminateBecauseOutOfMemory(size_t size) {
+  abort();
+}
+
+// UncheckedMalloc and Calloc exist so that platforms making use of
+// EnableTerminationOnOutOfMemory have a way to allocate memory without
+// crashing. This _stubs.cc file is for platforms that do not support
+// EnableTerminationOnOutOfMemory (note the empty implementation above). As
+// such, these two Unchecked.alloc functions need only trivially pass-through to
+// their respective stdlib function since those functions will return null on a
+// failure to allocate.
+
+bool UncheckedMalloc(size_t size, void** result) {
+  *result = malloc(size);
+  return *result != nullptr;
+}
+
+bool UncheckedCalloc(size_t num_items, size_t size, void** result) {
+  *result = calloc(num_items, size);
+  return *result != nullptr;
+}
+
+}  // namespace base
diff --git a/base/process/memory_unittest.cc b/base/process/memory_unittest.cc
new file mode 100644
index 0000000..835cf7e
--- /dev/null
+++ b/base/process/memory_unittest.cc
@@ -0,0 +1,533 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#define _CRT_SECURE_NO_WARNINGS
+
+#include "base/process/memory.h"
+
+#include <stddef.h>
+
+#include <limits>
+
+#include "base/allocator/allocator_check.h"
+#include "base/allocator/buildflags.h"
+#include "base/compiler_specific.h"
+#include "base/debug/alias.h"
+#include "base/memory/aligned_memory.h"
+#include "base/strings/stringprintf.h"
+#include "build/build_config.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+#if defined(OS_WIN)
+#include <windows.h>
+#endif
+#if defined(OS_POSIX)
+#include <errno.h>
+#endif
+#if defined(OS_MACOSX)
+#include <malloc/malloc.h>
+#include "base/allocator/allocator_interception_mac.h"
+#include "base/allocator/allocator_shim.h"
+#include "base/process/memory_unittest_mac.h"
+#endif
+#if defined(OS_LINUX)
+#include <malloc.h>
+#include "base/test/malloc_wrapper.h"
+#endif
+
+#if defined(OS_WIN)
+
+#if defined(COMPILER_MSVC)
+// ssize_t needed for OutOfMemoryTest.
+#if defined(_WIN64)
+typedef __int64 ssize_t;
+#else
+typedef long ssize_t;
+#endif
+#endif
+
+// HeapQueryInformation function pointer.
+typedef BOOL (WINAPI* HeapQueryFn)  \
+    (HANDLE, HEAP_INFORMATION_CLASS, PVOID, SIZE_T, PSIZE_T);
+
+#endif  // defined(OS_WIN)
+
+#if defined(OS_MACOSX)
+
+// For the following Mac tests:
+// Note that base::EnableTerminationOnHeapCorruption() is called as part of
+// test suite setup and does not need to be done again, else mach_override
+// will fail.
+
+TEST(ProcessMemoryTest, MacTerminateOnHeapCorruption) {
+#if BUILDFLAG(USE_ALLOCATOR_SHIM)
+  base::allocator::InitializeAllocatorShim();
+#endif
+  // Assert that freeing an unallocated pointer will crash the process.
+  char buf[9];
+  asm("" : "=r" (buf));  // Prevent clang from being too smart.
+#if ARCH_CPU_64_BITS
+  // On 64 bit Macs, the malloc system automatically abort()s on heap corruption
+  // but does not output anything.
+  ASSERT_DEATH(free(buf), "");
+#elif defined(ADDRESS_SANITIZER)
+  // AddressSanitizer replaces malloc() and prints a different error message on
+  // heap corruption.
+  ASSERT_DEATH(free(buf), "attempting free on address which "
+      "was not malloc\\(\\)-ed");
+#else
+  ADD_FAILURE() << "This test is not supported in this build configuration.";
+#endif
+
+#if BUILDFLAG(USE_ALLOCATOR_SHIM)
+  base::allocator::UninterceptMallocZonesForTesting();
+#endif
+}
+
+#endif  // defined(OS_MACOSX)
+
+TEST(MemoryTest, AllocatorShimWorking) {
+#if defined(OS_MACOSX)
+#if BUILDFLAG(USE_ALLOCATOR_SHIM)
+  base::allocator::InitializeAllocatorShim();
+#endif
+  base::allocator::InterceptAllocationsMac();
+#endif
+  ASSERT_TRUE(base::allocator::IsAllocatorInitialized());
+
+#if defined(OS_MACOSX)
+  base::allocator::UninterceptMallocZonesForTesting();
+#endif
+}
+
+// OpenBSD does not support these tests. Don't test these on ASan/TSan/MSan
+// configurations: only test the real allocator.
+// Windows only supports these tests with the allocator shim in place.
+#if !defined(OS_OPENBSD) && BUILDFLAG(USE_ALLOCATOR_SHIM) && \
+    !defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
+
+namespace {
+#if defined(OS_WIN)
+// Windows raises an exception rather than using LOG(FATAL) in order to make the
+// exit code unique to OOM.
+const char* kOomRegex = "";
+const int kExitCode = base::win::kOomExceptionCode;
+#else
+const char* kOomRegex = "Out of memory";
+const int kExitCode = 1;
+#endif
+}  // namespace
+
+class OutOfMemoryTest : public testing::Test {
+ public:
+  OutOfMemoryTest()
+      : value_(nullptr),
+        // Make test size as large as possible minus a few pages so
+        // that alignment or other rounding doesn't make it wrap.
+        test_size_(std::numeric_limits<std::size_t>::max() - 12 * 1024),
+        // A test size that is > 2Gb and will cause the allocators to reject
+        // the allocation due to security restrictions. See crbug.com/169327.
+        insecure_test_size_(std::numeric_limits<int>::max()),
+        signed_test_size_(std::numeric_limits<ssize_t>::max()) {}
+
+ protected:
+  void* value_;
+  size_t test_size_;
+  size_t insecure_test_size_;
+  ssize_t signed_test_size_;
+};
+
+class OutOfMemoryDeathTest : public OutOfMemoryTest {
+ public:
+  void SetUpInDeathAssert() {
+#if defined(OS_MACOSX) && BUILDFLAG(USE_ALLOCATOR_SHIM)
+    base::allocator::InitializeAllocatorShim();
+#endif
+
+    // Must call EnableTerminationOnOutOfMemory() because that is called from
+    // chrome's main function and therefore hasn't been called yet.
+    // Since this call may result in another thread being created and death
+    // tests shouldn't be started in a multithread environment, this call
+    // should be done inside of the ASSERT_DEATH.
+    base::EnableTerminationOnOutOfMemory();
+  }
+
+#if defined(OS_MACOSX)
+  void TearDown() override {
+    base::allocator::UninterceptMallocZonesForTesting();
+  }
+#endif
+};
+
+TEST_F(OutOfMemoryDeathTest, New) {
+  ASSERT_EXIT({
+      SetUpInDeathAssert();
+      value_ = operator new(test_size_);
+    }, testing::ExitedWithCode(kExitCode), kOomRegex);
+}
+
+TEST_F(OutOfMemoryDeathTest, NewArray) {
+  ASSERT_EXIT({
+      SetUpInDeathAssert();
+      value_ = new char[test_size_];
+    }, testing::ExitedWithCode(kExitCode), kOomRegex);
+}
+
+TEST_F(OutOfMemoryDeathTest, Malloc) {
+  ASSERT_EXIT({
+      SetUpInDeathAssert();
+      value_ = malloc(test_size_);
+    }, testing::ExitedWithCode(kExitCode), kOomRegex);
+}
+
+TEST_F(OutOfMemoryDeathTest, Realloc) {
+  ASSERT_EXIT({
+      SetUpInDeathAssert();
+      value_ = realloc(nullptr, test_size_);
+    }, testing::ExitedWithCode(kExitCode), kOomRegex);
+}
+
+TEST_F(OutOfMemoryDeathTest, Calloc) {
+  ASSERT_EXIT({
+      SetUpInDeathAssert();
+      value_ = calloc(1024, test_size_ / 1024L);
+    }, testing::ExitedWithCode(kExitCode), kOomRegex);
+}
+
+TEST_F(OutOfMemoryDeathTest, AlignedAlloc) {
+  ASSERT_EXIT({
+      SetUpInDeathAssert();
+      value_ = base::AlignedAlloc(test_size_, 8);
+    }, testing::ExitedWithCode(kExitCode), kOomRegex);
+}
+
+// POSIX does not define an aligned realloc function.
+#if defined(OS_WIN)
+TEST_F(OutOfMemoryDeathTest, AlignedRealloc) {
+  ASSERT_EXIT({
+      SetUpInDeathAssert();
+      value_ = _aligned_realloc(NULL, test_size_, 8);
+    }, testing::ExitedWithCode(kExitCode), kOomRegex);
+}
+
+namespace {
+
+constexpr uint32_t kUnhandledExceptionExitCode = 0xBADA55;
+
+// This unhandled exception filter exits the process with an exit code distinct
+// from the exception code. This is to verify that the out of memory new handler
+// causes an unhandled exception.
+LONG WINAPI ExitingUnhandledExceptionFilter(EXCEPTION_POINTERS* ExceptionInfo) {
+  _exit(kUnhandledExceptionExitCode);
+}
+
+}  // namespace
+
+TEST_F(OutOfMemoryDeathTest, NewHandlerGeneratesUnhandledException) {
+  ASSERT_EXIT(
+      {
+        SetUpInDeathAssert();
+        SetUnhandledExceptionFilter(&ExitingUnhandledExceptionFilter);
+        value_ = new char[test_size_];
+      },
+      testing::ExitedWithCode(kUnhandledExceptionExitCode), kOomRegex);
+}
+#endif  // defined(OS_WIN)
+
+// OS X and Android have no 2Gb allocation limit.
+// See https://crbug.com/169327.
+#if !defined(OS_MACOSX) && !defined(OS_ANDROID)
+TEST_F(OutOfMemoryDeathTest, SecurityNew) {
+  ASSERT_EXIT({
+      SetUpInDeathAssert();
+      value_ = operator new(insecure_test_size_);
+    }, testing::ExitedWithCode(kExitCode), kOomRegex);
+}
+
+TEST_F(OutOfMemoryDeathTest, SecurityNewArray) {
+  ASSERT_EXIT({
+      SetUpInDeathAssert();
+      value_ = new char[insecure_test_size_];
+    }, testing::ExitedWithCode(kExitCode), kOomRegex);
+}
+
+TEST_F(OutOfMemoryDeathTest, SecurityMalloc) {
+  ASSERT_EXIT({
+      SetUpInDeathAssert();
+      value_ = malloc(insecure_test_size_);
+    }, testing::ExitedWithCode(kExitCode), kOomRegex);
+}
+
+TEST_F(OutOfMemoryDeathTest, SecurityRealloc) {
+  ASSERT_EXIT({
+      SetUpInDeathAssert();
+      value_ = realloc(nullptr, insecure_test_size_);
+    }, testing::ExitedWithCode(kExitCode), kOomRegex);
+}
+
+TEST_F(OutOfMemoryDeathTest, SecurityCalloc) {
+  ASSERT_EXIT({
+      SetUpInDeathAssert();
+      value_ = calloc(1024, insecure_test_size_ / 1024L);
+    }, testing::ExitedWithCode(kExitCode), kOomRegex);
+}
+
+TEST_F(OutOfMemoryDeathTest, SecurityAlignedAlloc) {
+  ASSERT_EXIT({
+      SetUpInDeathAssert();
+      value_ = base::AlignedAlloc(insecure_test_size_, 8);
+    }, testing::ExitedWithCode(kExitCode), kOomRegex);
+}
+
+// POSIX does not define an aligned realloc function.
+#if defined(OS_WIN)
+TEST_F(OutOfMemoryDeathTest, SecurityAlignedRealloc) {
+  ASSERT_EXIT({
+      SetUpInDeathAssert();
+      value_ = _aligned_realloc(NULL, insecure_test_size_, 8);
+    }, testing::ExitedWithCode(kExitCode), kOomRegex);
+}
+#endif  // defined(OS_WIN)
+#endif  // !defined(OS_MACOSX) && !defined(OS_ANDROID)
+
+#if defined(OS_LINUX)
+
+TEST_F(OutOfMemoryDeathTest, Valloc) {
+  ASSERT_DEATH({
+      SetUpInDeathAssert();
+      value_ = valloc(test_size_);
+    }, kOomRegex);
+}
+
+TEST_F(OutOfMemoryDeathTest, SecurityValloc) {
+  ASSERT_DEATH({
+      SetUpInDeathAssert();
+      value_ = valloc(insecure_test_size_);
+    }, kOomRegex);
+}
+
+#if PVALLOC_AVAILABLE == 1
+TEST_F(OutOfMemoryDeathTest, Pvalloc) {
+  ASSERT_DEATH({
+      SetUpInDeathAssert();
+      value_ = pvalloc(test_size_);
+    }, kOomRegex);
+}
+
+TEST_F(OutOfMemoryDeathTest, SecurityPvalloc) {
+  ASSERT_DEATH({
+      SetUpInDeathAssert();
+      value_ = pvalloc(insecure_test_size_);
+    }, kOomRegex);
+}
+#endif  // PVALLOC_AVAILABLE == 1
+
+TEST_F(OutOfMemoryDeathTest, Memalign) {
+  ASSERT_DEATH({
+      SetUpInDeathAssert();
+      value_ = memalign(4, test_size_);
+    }, kOomRegex);
+}
+
+TEST_F(OutOfMemoryDeathTest, ViaSharedLibraries) {
+  // This tests that the run-time symbol resolution is overriding malloc for
+  // shared libraries as well as for our code.
+  ASSERT_DEATH({
+    SetUpInDeathAssert();
+    value_ = MallocWrapper(test_size_);
+  }, kOomRegex);
+}
+#endif  // OS_LINUX
+
+// Android doesn't implement posix_memalign().
+#if defined(OS_POSIX) && !defined(OS_ANDROID)
+TEST_F(OutOfMemoryDeathTest, Posix_memalign) {
+  // Grab the return value of posix_memalign to silence a compiler warning
+  // about unused return values. We don't actually care about the return
+  // value, since we're asserting death.
+  ASSERT_DEATH({
+      SetUpInDeathAssert();
+      EXPECT_EQ(ENOMEM, posix_memalign(&value_, 8, test_size_));
+    }, kOomRegex);
+}
+#endif  // defined(OS_POSIX) && !defined(OS_ANDROID)
+
+#if defined(OS_MACOSX)
+
+// Purgeable zone tests
+
+TEST_F(OutOfMemoryDeathTest, MallocPurgeable) {
+  malloc_zone_t* zone = malloc_default_purgeable_zone();
+  ASSERT_DEATH({
+      SetUpInDeathAssert();
+      value_ = malloc_zone_malloc(zone, test_size_);
+    }, kOomRegex);
+}
+
+TEST_F(OutOfMemoryDeathTest, ReallocPurgeable) {
+  malloc_zone_t* zone = malloc_default_purgeable_zone();
+  ASSERT_DEATH({
+      SetUpInDeathAssert();
+      value_ = malloc_zone_realloc(zone, NULL, test_size_);
+    }, kOomRegex);
+}
+
+TEST_F(OutOfMemoryDeathTest, CallocPurgeable) {
+  malloc_zone_t* zone = malloc_default_purgeable_zone();
+  ASSERT_DEATH({
+      SetUpInDeathAssert();
+      value_ = malloc_zone_calloc(zone, 1024, test_size_ / 1024L);
+    }, kOomRegex);
+}
+
+TEST_F(OutOfMemoryDeathTest, VallocPurgeable) {
+  malloc_zone_t* zone = malloc_default_purgeable_zone();
+  ASSERT_DEATH({
+      SetUpInDeathAssert();
+      value_ = malloc_zone_valloc(zone, test_size_);
+    }, kOomRegex);
+}
+
+TEST_F(OutOfMemoryDeathTest, PosixMemalignPurgeable) {
+  malloc_zone_t* zone = malloc_default_purgeable_zone();
+  ASSERT_DEATH({
+      SetUpInDeathAssert();
+      value_ = malloc_zone_memalign(zone, 8, test_size_);
+    }, kOomRegex);
+}
+
+// Since these allocation functions take a signed size, it's possible that
+// calling them just once won't be enough to exhaust memory. In the 32-bit
+// environment, it's likely that these allocation attempts will fail because
+// not enough contiguous address space is available. In the 64-bit environment,
+// it's likely that they'll fail because they would require a preposterous
+// amount of (virtual) memory.
+
+TEST_F(OutOfMemoryDeathTest, CFAllocatorSystemDefault) {
+  ASSERT_DEATH({
+      SetUpInDeathAssert();
+      while ((value_ =
+              base::AllocateViaCFAllocatorSystemDefault(signed_test_size_))) {}
+    }, kOomRegex);
+}
+
+TEST_F(OutOfMemoryDeathTest, CFAllocatorMalloc) {
+  ASSERT_DEATH({
+      SetUpInDeathAssert();
+      while ((value_ =
+              base::AllocateViaCFAllocatorMalloc(signed_test_size_))) {}
+    }, kOomRegex);
+}
+
+TEST_F(OutOfMemoryDeathTest, CFAllocatorMallocZone) {
+  ASSERT_DEATH({
+      SetUpInDeathAssert();
+      while ((value_ =
+              base::AllocateViaCFAllocatorMallocZone(signed_test_size_))) {}
+    }, kOomRegex);
+}
+
+#if !defined(ARCH_CPU_64_BITS)
+
+// See process_util_unittest_mac.mm for an explanation of why this test isn't
+// run in the 64-bit environment.
+
+TEST_F(OutOfMemoryDeathTest, PsychoticallyBigObjCObject) {
+  ASSERT_DEATH({
+      SetUpInDeathAssert();
+      while ((value_ = base::AllocatePsychoticallyBigObjCObject())) {}
+    }, kOomRegex);
+}
+
+#endif  // !ARCH_CPU_64_BITS
+#endif  // OS_MACOSX
+
+class OutOfMemoryHandledTest : public OutOfMemoryTest {
+ public:
+  static const size_t kSafeMallocSize = 512;
+  static const size_t kSafeCallocSize = 128;
+  static const size_t kSafeCallocItems = 4;
+
+  void SetUp() override {
+    OutOfMemoryTest::SetUp();
+
+    // We enable termination on OOM - just as Chrome does at early
+    // initialization - and test that UncheckedMalloc and  UncheckedCalloc
+    // properly by-pass this in order to allow the caller to handle OOM.
+    base::EnableTerminationOnOutOfMemory();
+  }
+
+  void TearDown() override {
+#if defined(OS_MACOSX)
+    base::allocator::UninterceptMallocZonesForTesting();
+#endif
+  }
+};
+
+#if defined(OS_WIN)
+
+namespace {
+
+DWORD HandleOutOfMemoryException(EXCEPTION_POINTERS* exception_ptrs,
+                                 size_t expected_size) {
+  EXPECT_EQ(base::win::kOomExceptionCode,
+            exception_ptrs->ExceptionRecord->ExceptionCode);
+  EXPECT_LE(1U, exception_ptrs->ExceptionRecord->NumberParameters);
+  EXPECT_EQ(expected_size,
+            exception_ptrs->ExceptionRecord->ExceptionInformation[0]);
+  return EXCEPTION_EXECUTE_HANDLER;
+}
+
+}  // namespace
+
+TEST_F(OutOfMemoryTest, TerminateBecauseOutOfMemoryReportsAllocSize) {
+// On Windows, TerminateBecauseOutOfMemory reports the attempted allocation
+// size in the exception raised.
+#if defined(ARCH_CPU_64_BITS)
+  // Test with a size larger than 32 bits on 64 bit machines.
+  const size_t kAttemptedAllocationSize = 0xBADA55F00DULL;
+#else
+  const size_t kAttemptedAllocationSize = 0xBADA55;
+#endif
+
+  __try {
+    base::TerminateBecauseOutOfMemory(kAttemptedAllocationSize);
+  } __except (HandleOutOfMemoryException(GetExceptionInformation(),
+                                         kAttemptedAllocationSize)) {
+  }
+}
+#endif  // OS_WIN
+
+// TODO(b.kelemen): make UncheckedMalloc and UncheckedCalloc work
+// on Windows as well.
+TEST_F(OutOfMemoryHandledTest, UncheckedMalloc) {
+  EXPECT_TRUE(base::UncheckedMalloc(kSafeMallocSize, &value_));
+  EXPECT_TRUE(value_ != nullptr);
+  free(value_);
+
+  EXPECT_FALSE(base::UncheckedMalloc(test_size_, &value_));
+  EXPECT_TRUE(value_ == nullptr);
+}
+
+TEST_F(OutOfMemoryHandledTest, UncheckedCalloc) {
+  EXPECT_TRUE(base::UncheckedCalloc(1, kSafeMallocSize, &value_));
+  EXPECT_TRUE(value_ != nullptr);
+  const char* bytes = static_cast<const char*>(value_);
+  for (size_t i = 0; i < kSafeMallocSize; ++i)
+    EXPECT_EQ(0, bytes[i]);
+  free(value_);
+
+  EXPECT_TRUE(
+      base::UncheckedCalloc(kSafeCallocItems, kSafeCallocSize, &value_));
+  EXPECT_TRUE(value_ != nullptr);
+  bytes = static_cast<const char*>(value_);
+  for (size_t i = 0; i < (kSafeCallocItems * kSafeCallocSize); ++i)
+    EXPECT_EQ(0, bytes[i]);
+  free(value_);
+
+  EXPECT_FALSE(base::UncheckedCalloc(1, test_size_, &value_));
+  EXPECT_TRUE(value_ == nullptr);
+}
+#endif  // !defined(OS_OPENBSD) && BUILDFLAG(ENABLE_WIN_ALLOCATOR_SHIM_TESTS) &&
+        // !defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
diff --git a/base/process/memory_unittest_mac.h b/base/process/memory_unittest_mac.h
new file mode 100644
index 0000000..713589b
--- /dev/null
+++ b/base/process/memory_unittest_mac.h
@@ -0,0 +1,35 @@
+// Copyright (c) 2010 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file contains helpers for the process_util_unittest to allow it to fully
+// test the Mac code.
+
+#ifndef BASE_PROCESS_MEMORY_UNITTEST_MAC_H_
+#define BASE_PROCESS_MEMORY_UNITTEST_MAC_H_
+
+#include <stddef.h>
+#include <sys/types.h>
+
+#include "build/build_config.h"
+
+namespace base {
+
+// Allocates memory via system allocators. Alas, they take a _signed_ size for
+// allocation.
+void* AllocateViaCFAllocatorSystemDefault(ssize_t size);
+void* AllocateViaCFAllocatorMalloc(ssize_t size);
+void* AllocateViaCFAllocatorMallocZone(ssize_t size);
+
+#if !defined(ARCH_CPU_64_BITS)
+// See process_util_unittest_mac.mm for an explanation of why this function
+// isn't implemented for the 64-bit environment.
+
+// Allocates a huge Objective C object.
+void* AllocatePsychoticallyBigObjCObject();
+
+#endif  // !ARCH_CPU_64_BITS
+
+}  // namespace base
+
+#endif  // BASE_PROCESS_MEMORY_UNITTEST_MAC_H_
diff --git a/base/process/memory_unittest_mac.mm b/base/process/memory_unittest_mac.mm
new file mode 100644
index 0000000..26fe1af
--- /dev/null
+++ b/base/process/memory_unittest_mac.mm
@@ -0,0 +1,60 @@
+// Copyright (c) 2010 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/process/memory_unittest_mac.h"
+#include "build/build_config.h"
+
+#import <Foundation/Foundation.h>
+#include <CoreFoundation/CoreFoundation.h>
+
+#if !defined(ARCH_CPU_64_BITS)
+
+// In the 64-bit environment, the Objective-C 2.0 Runtime Reference states
+// that sizeof(anInstance) is constrained to 32 bits. That's not necessarily
+// "psychotically big" and in fact a 64-bit program is expected to be able to
+// successfully allocate an object that large, likely reserving a good deal of
+// swap space. The only way to test the behavior of memory exhaustion for
+// Objective-C allocation in this environment would be to loop over allocation
+// of these large objects, but that would slowly consume all available memory
+// and cause swap file proliferation. That's bad, so this behavior isn't
+// tested in the 64-bit environment.
+
+@interface PsychoticallyBigObjCObject : NSObject
+{
+  // In the 32-bit environment, the compiler limits Objective-C objects to
+  // < 2GB in size.
+  int justUnder2Gigs_[(2U * 1024 * 1024 * 1024 - 1) / sizeof(int)];
+}
+
+@end
+
+@implementation PsychoticallyBigObjCObject
+
+@end
+
+namespace base {
+
+void* AllocatePsychoticallyBigObjCObject() {
+  return [[PsychoticallyBigObjCObject alloc] init];
+}
+
+}  // namespace base
+
+#endif  // ARCH_CPU_64_BITS
+
+namespace base {
+
+void* AllocateViaCFAllocatorSystemDefault(ssize_t size) {
+  return CFAllocatorAllocate(kCFAllocatorSystemDefault, size, 0);
+}
+
+void* AllocateViaCFAllocatorMalloc(ssize_t size) {
+  return CFAllocatorAllocate(kCFAllocatorMalloc, size, 0);
+}
+
+void* AllocateViaCFAllocatorMallocZone(ssize_t size) {
+  return CFAllocatorAllocate(kCFAllocatorMallocZone, size, 0);
+}
+
+}  // namespace base
diff --git a/base/process/memory_win.cc b/base/process/memory_win.cc
new file mode 100644
index 0000000..c3fe758
--- /dev/null
+++ b/base/process/memory_win.cc
@@ -0,0 +1,83 @@
+// Copyright (c) 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/process/memory.h"
+
+#include <windows.h>  // Must be in front of other Windows header files.
+
+#include <new.h>
+#include <psapi.h>
+#include <stddef.h>
+
+#if defined(__clang__)
+// This global constructor is trivial and non-racy (per being const).
+#pragma clang diagnostic push
+#pragma clang diagnostic ignored "-Wglobal-constructors"
+#endif
+
+// malloc_unchecked is required to implement UncheckedMalloc properly.
+// It's provided by allocator_shim_win.cc but since that's not always present,
+// we provide a default that falls back to regular malloc.
+typedef void* (*MallocFn)(size_t);
+extern "C" void* (*const malloc_unchecked)(size_t);
+extern "C" void* (*const malloc_default)(size_t) = &malloc;
+
+#if defined(__clang__)
+#pragma clang diagnostic pop  // -Wglobal-constructors
+#endif
+
+#if defined(_M_IX86)
+#pragma comment(linker, "/alternatename:_malloc_unchecked=_malloc_default")
+#elif defined(_M_X64) || defined(_M_ARM)
+#pragma comment(linker, "/alternatename:malloc_unchecked=malloc_default")
+#else
+#error Unsupported platform
+#endif
+
+namespace base {
+
+namespace {
+
+#pragma warning(push)
+#pragma warning(disable: 4702)  // Unreachable code after the _exit.
+
+NOINLINE int OnNoMemory(size_t size) {
+  // Kill the process. This is important for security since most of code
+  // does not check the result of memory allocation.
+  // https://msdn.microsoft.com/en-us/library/het71c37.aspx
+  // Pass the size of the failed request in an exception argument.
+  ULONG_PTR exception_args[] = {size};
+  ::RaiseException(win::kOomExceptionCode, EXCEPTION_NONCONTINUABLE,
+                   arraysize(exception_args), exception_args);
+
+  // Safety check, make sure process exits here.
+  _exit(win::kOomExceptionCode);
+  return 0;
+}
+
+#pragma warning(pop)
+
+}  // namespace
+
+void TerminateBecauseOutOfMemory(size_t size) {
+  OnNoMemory(size);
+}
+
+void EnableTerminationOnHeapCorruption() {
+  // Ignore the result code. Supported on XP SP3 and Vista.
+  HeapSetInformation(NULL, HeapEnableTerminationOnCorruption, NULL, 0);
+}
+
+void EnableTerminationOnOutOfMemory() {
+  _set_new_handler(&OnNoMemory);
+  _set_new_mode(1);
+}
+
+// Implemented using a weak symbol.
+bool UncheckedMalloc(size_t size, void** result) {
+  *result = malloc_unchecked(size);
+  return *result != NULL;
+}
+
+}  // namespace base
diff --git a/base/process/port_provider_mac.cc b/base/process/port_provider_mac.cc
new file mode 100644
index 0000000..23d214c
--- /dev/null
+++ b/base/process/port_provider_mac.cc
@@ -0,0 +1,28 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/process/port_provider_mac.h"
+
+namespace base {
+
+PortProvider::PortProvider() : lock_(), observer_list_() {}
+PortProvider::~PortProvider() {}
+
+void PortProvider::AddObserver(Observer* observer) {
+  base::AutoLock l(lock_);
+  observer_list_.AddObserver(observer);
+}
+
+void PortProvider::RemoveObserver(Observer* observer) {
+  base::AutoLock l(lock_);
+  observer_list_.RemoveObserver(observer);
+}
+
+void PortProvider::NotifyObservers(ProcessHandle process) {
+  base::AutoLock l(lock_);
+  for (auto& observer : observer_list_)
+    observer.OnReceivedTaskPort(process);
+}
+
+}  // namespace base
diff --git a/base/process/port_provider_mac.h b/base/process/port_provider_mac.h
new file mode 100644
index 0000000..2f40297
--- /dev/null
+++ b/base/process/port_provider_mac.h
@@ -0,0 +1,61 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_PROCESS_PORT_PROVIDER_MAC_H_
+#define BASE_PROCESS_PORT_PROVIDER_MAC_H_
+
+#include <mach/mach.h>
+
+#include "base/base_export.h"
+#include "base/macros.h"
+#include "base/observer_list.h"
+#include "base/process/process_handle.h"
+#include "base/synchronization/lock.h"
+
+namespace base {
+
+// Abstract base class that provides a mapping from ProcessHandle (pid_t) to the
+// Mach task port. This replicates task_for_pid(), which requires root
+// privileges.
+class BASE_EXPORT PortProvider {
+ public:
+  PortProvider();
+  virtual ~PortProvider();
+
+  class Observer {
+   public:
+    virtual ~Observer() {};
+    // Called by the PortProvider to notify observers that the task port was
+    // received for a given process.
+    // No guarantees are made about the thread on which this notification will
+    // be sent.
+    // Observers must not call AddObserver() or RemoveObserver() in this
+    // callback, as doing so will deadlock.
+    virtual void OnReceivedTaskPort(ProcessHandle process) = 0;
+  };
+
+  // Returns the mach task port for |process| if possible, or else
+  // |MACH_PORT_NULL|.
+  virtual mach_port_t TaskForPid(ProcessHandle process) const = 0;
+
+  // Observer interface.
+  void AddObserver(Observer* observer);
+  void RemoveObserver(Observer* observer);
+
+ protected:
+  // Called by subclasses to send a notification to observers.
+  void NotifyObservers(ProcessHandle process);
+
+ private:
+  // ObserverList is not thread-safe, so |lock_| ensures consistency of
+  // |observer_list_|.
+  base::Lock lock_;
+  base::ObserverList<Observer> observer_list_;
+
+  DISALLOW_COPY_AND_ASSIGN(PortProvider);
+};
+
+}  // namespace base
+
+#endif  // BASE_PROCESS_PORT_PROVIDER_MAC_H_
diff --git a/base/process/process.h b/base/process/process.h
new file mode 100644
index 0000000..c06998e
--- /dev/null
+++ b/base/process/process.h
@@ -0,0 +1,213 @@
+// Copyright 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_PROCESS_PROCESS_H_
+#define BASE_PROCESS_PROCESS_H_
+
+#include "base/base_export.h"
+#include "base/macros.h"
+#include "base/process/process_handle.h"
+#include "base/time/time.h"
+#include "build/build_config.h"
+
+#if defined(OS_WIN)
+#include "base/win/scoped_handle.h"
+#endif
+
+#if defined(OS_FUCHSIA)
+#include "base/fuchsia/scoped_zx_handle.h"
+#endif
+
+#if defined(OS_MACOSX)
+#include "base/feature_list.h"
+#include "base/process/port_provider_mac.h"
+#endif
+
+namespace base {
+
+#if defined(OS_MACOSX)
+extern const Feature kMacAllowBackgroundingProcesses;
+#endif
+
+// Provides a move-only encapsulation of a process.
+//
+// This object is not tied to the lifetime of the underlying process: the
+// process may be killed and this object may still around, and it will still
+// claim to be valid. The actual behavior in that case is OS dependent like so:
+//
+// Windows: The underlying ProcessHandle will be valid after the process dies
+// and can be used to gather some information about that process, but most
+// methods will obviously fail.
+//
+// POSIX: The underlying ProcessHandle is not guaranteed to remain valid after
+// the process dies, and it may be reused by the system, which means that it may
+// end up pointing to the wrong process.
+class BASE_EXPORT Process {
+ public:
+  // On Windows, this takes ownership of |handle|. On POSIX, this does not take
+  // ownership of |handle|.
+  explicit Process(ProcessHandle handle = kNullProcessHandle);
+
+  Process(Process&& other);
+
+  // The destructor does not terminate the process.
+  ~Process();
+
+  Process& operator=(Process&& other);
+
+  // Returns an object for the current process.
+  static Process Current();
+
+  // Returns a Process for the given |pid|.
+  static Process Open(ProcessId pid);
+
+  // Returns a Process for the given |pid|. On Windows the handle is opened
+  // with more access rights and must only be used by trusted code (can read the
+  // address space and duplicate handles).
+  static Process OpenWithExtraPrivileges(ProcessId pid);
+
+#if defined(OS_WIN)
+  // Returns a Process for the given |pid|, using some |desired_access|.
+  // See ::OpenProcess documentation for valid |desired_access|.
+  static Process OpenWithAccess(ProcessId pid, DWORD desired_access);
+#endif
+
+  // Creates an object from a |handle| owned by someone else.
+  // Don't use this for new code. It is only intended to ease the migration to
+  // a strict ownership model.
+  // TODO(rvargas) crbug.com/417532: Remove this code.
+  static Process DeprecatedGetProcessFromHandle(ProcessHandle handle);
+
+  // Returns true if processes can be backgrounded.
+  static bool CanBackgroundProcesses();
+
+  // Terminates the current process immediately with |exit_code|.
+  [[noreturn]] static void TerminateCurrentProcessImmediately(int exit_code);
+
+  // Returns true if this objects represents a valid process.
+  bool IsValid() const;
+
+  // Returns a handle for this process. There is no guarantee about when that
+  // handle becomes invalid because this object retains ownership.
+  ProcessHandle Handle() const;
+
+  // Returns a second object that represents this process.
+  Process Duplicate() const;
+
+  // Get the PID for this process.
+  ProcessId Pid() const;
+
+  // Returns true if this process is the current process.
+  bool is_current() const;
+
+  // Close the process handle. This will not terminate the process.
+  void Close();
+
+  // Returns true if this process is still running. This is only safe on Windows
+  // (and maybe Fuchsia?), because the ProcessHandle will keep the zombie
+  // process information available until itself has been released. But on Posix,
+  // the OS may reuse the ProcessId.
+#if defined(OS_WIN)
+  bool IsRunning() const {
+    return !WaitForExitWithTimeout(base::TimeDelta(), nullptr);
+  }
+#endif
+
+  // Terminates the process with extreme prejudice. The given |exit_code| will
+  // be the exit code of the process. If |wait| is true, this method will wait
+  // for up to one minute for the process to actually terminate.
+  // Returns true if the process terminates within the allowed time.
+  // NOTE: On POSIX |exit_code| is ignored.
+  bool Terminate(int exit_code, bool wait) const;
+
+  // Waits for the process to exit. Returns true on success.
+  // On POSIX, if the process has been signaled then |exit_code| is set to -1.
+  // On Linux this must be a child process, however on Mac and Windows it can be
+  // any process.
+  // NOTE: |exit_code| is optional, nullptr can be passed if the exit code is
+  // not required.
+  bool WaitForExit(int* exit_code) const;
+
+  // Same as WaitForExit() but only waits for up to |timeout|.
+  // NOTE: |exit_code| is optional, nullptr can be passed if the exit code
+  // is not required.
+  bool WaitForExitWithTimeout(TimeDelta timeout, int* exit_code) const;
+
+  // Indicates that the process has exited with the specified |exit_code|.
+  // This should be called if process exit is observed outside of this class.
+  // (i.e. Not because Terminate or WaitForExit, above, was called.)
+  // Note that nothing prevents this being called multiple times for a dead
+  // process though that should be avoided.
+  void Exited(int exit_code) const;
+
+#if defined(OS_MACOSX)
+  // The Mac needs a Mach port in order to manipulate a process's priority,
+  // and there's no good way to get that from base given the pid. These Mac
+  // variants of the IsProcessBackgrounded and SetProcessBackgrounded API take
+  // a port provider for this reason. See crbug.com/460102
+  //
+  // A process is backgrounded when its task priority is
+  // |TASK_BACKGROUND_APPLICATION|.
+  //
+  // Returns true if the port_provider can locate a task port for the process
+  // and it is backgrounded. If port_provider is null, returns false.
+  bool IsProcessBackgrounded(PortProvider* port_provider) const;
+
+  // Set the process as backgrounded. If value is
+  // true, the priority of the associated task will be set to
+  // TASK_BACKGROUND_APPLICATION. If value is false, the
+  // priority of the process will be set to TASK_FOREGROUND_APPLICATION.
+  //
+  // Returns true if the priority was changed, false otherwise. If
+  // |port_provider| is null, this is a no-op and it returns false.
+  bool SetProcessBackgrounded(PortProvider* port_provider, bool value);
+#else
+  // A process is backgrounded when it's priority is lower than normal.
+  // Return true if this process is backgrounded, false otherwise.
+  bool IsProcessBackgrounded() const;
+
+  // Set a process as backgrounded. If value is true, the priority of the
+  // process will be lowered. If value is false, the priority of the process
+  // will be made "normal" - equivalent to default process priority.
+  // Returns true if the priority was changed, false otherwise.
+  bool SetProcessBackgrounded(bool value);
+#endif  // defined(OS_MACOSX)
+  // Returns an integer representing the priority of a process. The meaning
+  // of this value is OS dependent.
+  int GetPriority() const;
+
+#if defined(OS_CHROMEOS)
+  // Get the PID in its PID namespace.
+  // If the process is not in a PID namespace or /proc/<pid>/status does not
+  // report NSpid, kNullProcessId is returned.
+  ProcessId GetPidInNamespace() const;
+#endif
+
+ private:
+#if defined(OS_WIN)
+  win::ScopedHandle process_;
+#elif defined(OS_FUCHSIA)
+  ScopedZxHandle process_;
+#else
+  ProcessHandle process_;
+#endif
+
+#if defined(OS_WIN) || defined(OS_FUCHSIA)
+  bool is_current_process_;
+#endif
+
+  DISALLOW_COPY_AND_ASSIGN(Process);
+};
+
+#if defined(OS_CHROMEOS)
+// Exposed for testing.
+// Given the contents of the /proc/<pid>/cgroup file, determine whether the
+// process is backgrounded or not.
+BASE_EXPORT bool IsProcessBackgroundedCGroup(
+    const StringPiece& cgroup_contents);
+#endif  // defined(OS_CHROMEOS)
+
+}  // namespace base
+
+#endif  // BASE_PROCESS_PROCESS_H_
diff --git a/base/process/process_fuchsia.cc b/base/process/process_fuchsia.cc
new file mode 100644
index 0000000..94bce34
--- /dev/null
+++ b/base/process/process_fuchsia.cc
@@ -0,0 +1,226 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/process/process.h"
+
+#include <zircon/process.h>
+#include <zircon/syscalls.h>
+
+#include "base/debug/activity_tracker.h"
+#include "base/fuchsia/default_job.h"
+#include "base/strings/stringprintf.h"
+
+namespace base {
+
+Process::Process(ProcessHandle handle)
+    : process_(handle), is_current_process_(false) {
+  CHECK_NE(handle, zx_process_self());
+}
+
+Process::~Process() {
+  Close();
+}
+
+Process::Process(Process&& other)
+    : process_(std::move(other.process_)),
+      is_current_process_(other.is_current_process_) {
+  other.is_current_process_ = false;
+}
+
+Process& Process::operator=(Process&& other) {
+  process_ = std::move(other.process_);
+  is_current_process_ = other.is_current_process_;
+  other.is_current_process_ = false;
+  return *this;
+}
+
+// static
+Process Process::Current() {
+  Process process;
+  process.is_current_process_ = true;
+  return process;
+}
+
+// static
+Process Process::Open(ProcessId pid) {
+  if (pid == GetCurrentProcId())
+    return Current();
+
+  // While a process with object id |pid| might exist, the job returned by
+  // zx_job_default() might not contain it, so this call can fail.
+  ScopedZxHandle handle;
+  zx_status_t status = zx_object_get_child(
+      GetDefaultJob(), pid, ZX_RIGHT_SAME_RIGHTS, handle.receive());
+  if (status != ZX_OK) {
+    DLOG(ERROR) << "zx_object_get_child failed: " << status;
+    return Process();
+  }
+  return Process(handle.release());
+}
+
+// static
+Process Process::OpenWithExtraPrivileges(ProcessId pid) {
+  // No privileges to set.
+  return Open(pid);
+}
+
+// static
+Process Process::DeprecatedGetProcessFromHandle(ProcessHandle handle) {
+  DCHECK_NE(handle, GetCurrentProcessHandle());
+  ScopedZxHandle out;
+  if (zx_handle_duplicate(handle, ZX_RIGHT_SAME_RIGHTS, out.receive()) !=
+      ZX_OK) {
+    DLOG(ERROR) << "zx_handle_duplicate failed: " << handle;
+    return Process();
+  }
+
+  return Process(out.release());
+}
+
+// static
+bool Process::CanBackgroundProcesses() {
+  return false;
+}
+
+// static
+void Process::TerminateCurrentProcessImmediately(int exit_code) {
+  _exit(exit_code);
+}
+
+bool Process::IsValid() const {
+  return process_.is_valid() || is_current();
+}
+
+ProcessHandle Process::Handle() const {
+  return is_current_process_ ? zx_process_self() : process_.get();
+}
+
+Process Process::Duplicate() const {
+  if (is_current())
+    return Current();
+
+  if (!IsValid())
+    return Process();
+
+  ScopedZxHandle out;
+  if (zx_handle_duplicate(process_.get(), ZX_RIGHT_SAME_RIGHTS,
+                          out.receive()) != ZX_OK) {
+    DLOG(ERROR) << "zx_handle_duplicate failed: " << process_.get();
+    return Process();
+  }
+
+  return Process(out.release());
+}
+
+ProcessId Process::Pid() const {
+  DCHECK(IsValid());
+  return GetProcId(Handle());
+}
+
+bool Process::is_current() const {
+  return is_current_process_;
+}
+
+void Process::Close() {
+  is_current_process_ = false;
+  process_.reset();
+}
+
+bool Process::Terminate(int exit_code, bool wait) const {
+  // exit_code isn't supportable. https://crbug.com/753490.
+  zx_status_t status = zx_task_kill(Handle());
+  // TODO(scottmg): Put these LOG/CHECK back to DLOG/DCHECK after
+  // https://crbug.com/750756 is diagnosed.
+  if (status == ZX_OK && wait) {
+    zx_signals_t signals;
+    status = zx_object_wait_one(Handle(), ZX_TASK_TERMINATED,
+                                zx_deadline_after(ZX_SEC(60)), &signals);
+    if (status != ZX_OK) {
+      LOG(ERROR) << "Error waiting for process exit: " << status;
+    } else {
+      CHECK(signals & ZX_TASK_TERMINATED);
+    }
+  } else if (status != ZX_OK) {
+    LOG(ERROR) << "Unable to terminate process: " << status;
+  }
+
+  return status >= 0;
+}
+
+bool Process::WaitForExit(int* exit_code) const {
+  return WaitForExitWithTimeout(TimeDelta::Max(), exit_code);
+}
+
+bool Process::WaitForExitWithTimeout(TimeDelta timeout, int* exit_code) const {
+  if (is_current_process_)
+    return false;
+
+  // Record the event that this thread is blocking upon (for hang diagnosis).
+  base::debug::ScopedProcessWaitActivity process_activity(this);
+
+  zx_time_t deadline = timeout == TimeDelta::Max()
+                           ? ZX_TIME_INFINITE
+                           : (TimeTicks::Now() + timeout).ToZxTime();
+  // TODO(scottmg): https://crbug.com/755282
+  const bool kOnBot = getenv("CHROME_HEADLESS") != nullptr;
+  if (kOnBot) {
+    LOG(ERROR) << base::StringPrintf(
+        "going to wait for process %x (deadline=%zu, now=%zu)", process_.get(),
+        deadline, TimeTicks::Now().ToZxTime());
+  }
+  zx_signals_t signals_observed = 0;
+  zx_status_t status = zx_object_wait_one(process_.get(), ZX_TASK_TERMINATED,
+                                          deadline, &signals_observed);
+
+  // TODO(scottmg): Make these LOGs into DLOGs after https://crbug.com/750756 is
+  // fixed.
+  if (status != ZX_OK && status != ZX_ERR_TIMED_OUT) {
+    LOG(ERROR) << "zx_object_wait_one failed, status=" << status;
+    return false;
+  }
+  if (status == ZX_ERR_TIMED_OUT) {
+    zx_time_t now = TimeTicks::Now().ToZxTime();
+    LOG(ERROR) << "zx_object_wait_one timed out, signals=" << signals_observed
+               << ", deadline=" << deadline << ", now=" << now
+               << ", delta=" << (now - deadline);
+    return false;
+  }
+
+  zx_info_process_t proc_info;
+  status = zx_object_get_info(process_.get(), ZX_INFO_PROCESS, &proc_info,
+                              sizeof(proc_info), nullptr, nullptr);
+  if (status != ZX_OK) {
+    LOG(ERROR) << "zx_object_get_info failed, status=" << status;
+    if (exit_code)
+      *exit_code = -1;
+    return false;
+  }
+
+  if (exit_code)
+    *exit_code = proc_info.return_code;
+
+  return true;
+}
+
+void Process::Exited(int exit_code) const {}
+
+bool Process::IsProcessBackgrounded() const {
+  // See SetProcessBackgrounded().
+  DCHECK(IsValid());
+  return false;
+}
+
+bool Process::SetProcessBackgrounded(bool value) {
+  // No process priorities on Fuchsia. TODO(fuchsia): See MG-783, and update
+  // this later if priorities are implemented.
+  return false;
+}
+
+int Process::GetPriority() const {
+  DCHECK(IsValid());
+  // No process priorities on Fuchsia.
+  return 0;
+}
+
+}  // namespace base
diff --git a/base/process/process_handle.cc b/base/process/process_handle.cc
new file mode 100644
index 0000000..58ceb08
--- /dev/null
+++ b/base/process/process_handle.cc
@@ -0,0 +1,52 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <stdint.h>
+
+#include "base/logging.h"
+#include "base/process/process_handle.h"
+#include "build/build_config.h"
+
+namespace base {
+
+namespace {
+bool g_have_unique_id = false;
+uint32_t g_unique_id;
+
+// The process which set |g_unique_id|.
+ProcessId g_procid;
+
+// Mangle IDs so that they are not accidentally used as PIDs, e.g. as an
+// argument to kill or waitpid.
+uint32_t MangleProcessId(ProcessId process_id) {
+  // Add a large power of 10 so that the pid is still the pid is still readable
+  // inside the mangled id.
+  return static_cast<uint32_t>(process_id) + 1000000000U;
+}
+
+}  // namespace
+
+uint32_t GetUniqueIdForProcess() {
+  if (!g_have_unique_id) {
+    return MangleProcessId(GetCurrentProcId());
+  }
+
+  // Make sure we are the same process that set |g_procid|. This check may have
+  // false negatives (if a process ID was reused) but should have no false
+  // positives.
+  DCHECK_EQ(GetCurrentProcId(), g_procid);
+  return g_unique_id;
+}
+
+#if defined(OS_LINUX) || defined(OS_AIX)
+
+void InitUniqueIdForProcessInPidNamespace(ProcessId pid_outside_of_namespace) {
+  g_unique_id = MangleProcessId(pid_outside_of_namespace);
+  g_procid = GetCurrentProcId();
+  g_have_unique_id = true;
+}
+
+#endif
+
+}  // namespace base
diff --git a/base/process/process_handle.h b/base/process/process_handle.h
new file mode 100644
index 0000000..f3f6343
--- /dev/null
+++ b/base/process/process_handle.h
@@ -0,0 +1,103 @@
+// Copyright (c) 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_PROCESS_PROCESS_HANDLE_H_
+#define BASE_PROCESS_PROCESS_HANDLE_H_
+
+#include <stdint.h>
+#include <sys/types.h>
+
+#include "base/base_export.h"
+#include "base/files/file_path.h"
+#include "build/build_config.h"
+
+#if defined(OS_WIN)
+#include "base/win/windows_types.h"
+#endif
+
+#if defined(OS_FUCHSIA)
+#include <zircon/types.h>
+#endif
+
+namespace base {
+
+// ProcessHandle is a platform specific type which represents the underlying OS
+// handle to a process.
+// ProcessId is a number which identifies the process in the OS.
+#if defined(OS_WIN)
+typedef HANDLE ProcessHandle;
+typedef DWORD ProcessId;
+typedef HANDLE UserTokenHandle;
+const ProcessHandle kNullProcessHandle = NULL;
+const ProcessId kNullProcessId = 0;
+#elif defined(OS_FUCHSIA)
+typedef zx_handle_t ProcessHandle;
+typedef zx_koid_t ProcessId;
+const ProcessHandle kNullProcessHandle = ZX_HANDLE_INVALID;
+const ProcessId kNullProcessId = ZX_KOID_INVALID;
+#elif defined(OS_POSIX)
+// On POSIX, our ProcessHandle will just be the PID.
+typedef pid_t ProcessHandle;
+typedef pid_t ProcessId;
+const ProcessHandle kNullProcessHandle = 0;
+const ProcessId kNullProcessId = 0;
+#endif  // defined(OS_WIN)
+
+// To print ProcessIds portably use CrPRIdPid (based on PRIuS and friends from
+// C99 and format_macros.h) like this:
+// base::StringPrintf("PID is %" CrPRIdPid ".\n", pid);
+#if defined(OS_WIN) || defined(OS_FUCHSIA)
+#define CrPRIdPid "ld"
+#else
+#define CrPRIdPid "d"
+#endif
+
+// Returns the id of the current process.
+// Note that on some platforms, this is not guaranteed to be unique across
+// processes (use GetUniqueIdForProcess if uniqueness is required).
+BASE_EXPORT ProcessId GetCurrentProcId();
+
+// Returns a unique ID for the current process. The ID will be unique across all
+// currently running processes within the chrome session, but IDs of terminated
+// processes may be reused. This returns an opaque value that is different from
+// a process's PID.
+BASE_EXPORT uint32_t GetUniqueIdForProcess();
+
+#if defined(OS_LINUX)
+// When a process is started in a different PID namespace from the browser
+// process, this function must be called with the process's PID in the browser's
+// PID namespace in order to initialize its unique ID. Not thread safe.
+// WARNING: To avoid inconsistent results from GetUniqueIdForProcess, this
+// should only be called very early after process startup - ideally as soon
+// after process creation as possible.
+BASE_EXPORT void InitUniqueIdForProcessInPidNamespace(
+    ProcessId pid_outside_of_namespace);
+#endif
+
+// Returns the ProcessHandle of the current process.
+BASE_EXPORT ProcessHandle GetCurrentProcessHandle();
+
+// Returns the process ID for the specified process. This is functionally the
+// same as Windows' GetProcessId(), but works on versions of Windows before Win
+// XP SP1 as well.
+// DEPRECATED. New code should be using Process::Pid() instead.
+// Note that on some platforms, this is not guaranteed to be unique across
+// processes.
+BASE_EXPORT ProcessId GetProcId(ProcessHandle process);
+
+#if !defined(OS_FUCHSIA)
+// Returns the ID for the parent of the given process. Not available on Fuchsia.
+// Returning a negative value indicates an error, such as if the |process| does
+// not exist. Returns 0 when |process| has no parent process.
+BASE_EXPORT ProcessId GetParentProcessId(ProcessHandle process);
+#endif  // !defined(OS_FUCHSIA)
+
+#if defined(OS_POSIX)
+// Returns the path to the executable of the given process.
+BASE_EXPORT FilePath GetProcessExecutablePath(ProcessHandle process);
+#endif
+
+}  // namespace base
+
+#endif  // BASE_PROCESS_PROCESS_HANDLE_H_
diff --git a/base/process/process_handle_freebsd.cc b/base/process/process_handle_freebsd.cc
new file mode 100644
index 0000000..192d72b
--- /dev/null
+++ b/base/process/process_handle_freebsd.cc
@@ -0,0 +1,43 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/macros.h"
+#include "base/process/process_handle.h"
+
+#include <limits.h>
+#include <stddef.h>
+#include <sys/sysctl.h>
+#include <sys/types.h>
+#include <sys/user.h>
+#include <unistd.h>
+
+namespace base {
+
+ProcessId GetParentProcessId(ProcessHandle process) {
+  struct kinfo_proc info;
+  size_t length;
+  int mib[] = { CTL_KERN, KERN_PROC, KERN_PROC_PID, process };
+
+  if (sysctl(mib, arraysize(mib), &info, &length, NULL, 0) < 0)
+    return -1;
+
+  return info.ki_ppid;
+}
+
+FilePath GetProcessExecutablePath(ProcessHandle process) {
+  char pathname[PATH_MAX];
+  size_t length;
+  int mib[] = { CTL_KERN, KERN_PROC, KERN_PROC_PATHNAME, process };
+
+  length = sizeof(pathname);
+
+  if (sysctl(mib, arraysize(mib), pathname, &length, NULL, 0) < 0 ||
+      length == 0) {
+    return FilePath();
+  }
+
+  return FilePath(std::string(pathname));
+}
+
+}  // namespace base
diff --git a/base/process/process_handle_fuchsia.cc b/base/process/process_handle_fuchsia.cc
new file mode 100644
index 0000000..c9d2a8e
--- /dev/null
+++ b/base/process/process_handle_fuchsia.cc
@@ -0,0 +1,37 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/process/process_handle.h"
+
+#include <zircon/process.h>
+#include <zircon/status.h>
+#include <zircon/syscalls.h>
+
+#include "base/logging.h"
+
+namespace base {
+
+ProcessId GetCurrentProcId() {
+  return GetProcId(GetCurrentProcessHandle());
+}
+
+ProcessHandle GetCurrentProcessHandle() {
+  // Note that zx_process_self() returns a real handle, and ownership is not
+  // transferred to the caller (i.e. this should never be closed).
+  return zx_process_self();
+}
+
+ProcessId GetProcId(ProcessHandle process) {
+  zx_info_handle_basic_t basic;
+  zx_status_t status = zx_object_get_info(process, ZX_INFO_HANDLE_BASIC, &basic,
+                                          sizeof(basic), nullptr, nullptr);
+  if (status != ZX_OK) {
+    DLOG(ERROR) << "zx_object_get_info failed: "
+                << zx_status_get_string(status);
+    return ZX_KOID_INVALID;
+  }
+  return basic.koid;
+}
+
+}  // namespace base
diff --git a/base/process/process_handle_linux.cc b/base/process/process_handle_linux.cc
new file mode 100644
index 0000000..f921b42
--- /dev/null
+++ b/base/process/process_handle_linux.cc
@@ -0,0 +1,39 @@
+// Copyright (c) 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/process/process_handle.h"
+
+#include "base/files/file_util.h"
+#include "base/process/internal_linux.h"
+#if defined(OS_AIX)
+#include "base/process/internal_aix.h"
+#endif
+
+namespace base {
+
+ProcessId GetParentProcessId(ProcessHandle process) {
+  ProcessId pid =
+#if defined(OS_AIX)
+      internalAIX::ReadProcStatsAndGetFieldAsInt64(process,
+                                                   internalAIX::VM_PPID);
+#else
+      internal::ReadProcStatsAndGetFieldAsInt64(process, internal::VM_PPID);
+#endif
+  // TODO(zijiehe): Returns 0 if |process| does not have a parent process.
+  if (pid)
+    return pid;
+  return -1;
+}
+
+FilePath GetProcessExecutablePath(ProcessHandle process) {
+  FilePath stat_file = internal::GetProcPidDir(process).Append("exe");
+  FilePath exe_name;
+  if (!ReadSymbolicLink(stat_file, &exe_name)) {
+    // No such process.  Happens frequently in e.g. TerminateAllChromeProcesses
+    return FilePath();
+  }
+  return exe_name;
+}
+
+}  // namespace base
diff --git a/base/process/process_handle_mac.cc b/base/process/process_handle_mac.cc
new file mode 100644
index 0000000..d9d22f7
--- /dev/null
+++ b/base/process/process_handle_mac.cc
@@ -0,0 +1,37 @@
+// Copyright (c) 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/process/process_handle.h"
+
+#include <libproc.h>
+#include <stddef.h>
+#include <sys/sysctl.h>
+#include <sys/types.h>
+
+#include "base/logging.h"
+
+namespace base {
+
+ProcessId GetParentProcessId(ProcessHandle process) {
+  struct kinfo_proc info;
+  size_t length = sizeof(struct kinfo_proc);
+  int mib[4] = { CTL_KERN, KERN_PROC, KERN_PROC_PID, process };
+  if (sysctl(mib, 4, &info, &length, NULL, 0) < 0) {
+    DPLOG(ERROR) << "sysctl";
+    return -1;
+  }
+  if (length == 0)
+    return -1;
+  return info.kp_eproc.e_ppid;
+}
+
+FilePath GetProcessExecutablePath(ProcessHandle process) {
+  char pathbuf[PROC_PIDPATHINFO_MAXSIZE];
+  if (!proc_pidpath(process, pathbuf, sizeof(pathbuf)))
+    return FilePath();
+
+  return FilePath(pathbuf);
+}
+
+}  // namespace base
diff --git a/base/process/process_handle_openbsd.cc b/base/process/process_handle_openbsd.cc
new file mode 100644
index 0000000..045e720
--- /dev/null
+++ b/base/process/process_handle_openbsd.cc
@@ -0,0 +1,51 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/macros.h"
+#include "base/process/process_handle.h"
+
+#include <stddef.h>
+#include <sys/sysctl.h>
+#include <sys/types.h>
+#include <unistd.h>
+
+namespace base {
+
+ProcessId GetParentProcessId(ProcessHandle process) {
+  struct kinfo_proc info;
+  size_t length;
+  int mib[] = { CTL_KERN, KERN_PROC, KERN_PROC_PID, process,
+                sizeof(struct kinfo_proc), 0 };
+
+  if (sysctl(mib, arraysize(mib), NULL, &length, NULL, 0) < 0)
+    return -1;
+
+  mib[5] = (length / sizeof(struct kinfo_proc));
+
+  if (sysctl(mib, arraysize(mib), &info, &length, NULL, 0) < 0)
+    return -1;
+
+  return info.p_ppid;
+}
+
+FilePath GetProcessExecutablePath(ProcessHandle process) {
+  struct kinfo_proc kp;
+  size_t len;
+  int mib[] = { CTL_KERN, KERN_PROC, KERN_PROC_PID, process,
+                sizeof(struct kinfo_proc), 0 };
+
+  if (sysctl(mib, arraysize(mib), NULL, &len, NULL, 0) == -1)
+    return FilePath();
+  mib[5] = (len / sizeof(struct kinfo_proc));
+  if (sysctl(mib, arraysize(mib), &kp, &len, NULL, 0) < 0)
+    return FilePath();
+  if ((kp.p_flag & P_SYSTEM) != 0)
+    return FilePath();
+  if (strcmp(kp.p_comm, "chrome") == 0)
+    return FilePath(kp.p_comm);
+
+  return FilePath();
+}
+
+}  // namespace base
diff --git a/base/process/process_handle_posix.cc b/base/process/process_handle_posix.cc
new file mode 100644
index 0000000..4e332df
--- /dev/null
+++ b/base/process/process_handle_posix.cc
@@ -0,0 +1,23 @@
+// Copyright (c) 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/process/process_handle.h"
+
+#include <unistd.h>
+
+namespace base {
+
+ProcessId GetCurrentProcId() {
+  return getpid();
+}
+
+ProcessHandle GetCurrentProcessHandle() {
+  return GetCurrentProcId();
+}
+
+ProcessId GetProcId(ProcessHandle process) {
+  return process;
+}
+
+}  // namespace base
diff --git a/base/process/process_handle_win.cc b/base/process/process_handle_win.cc
new file mode 100644
index 0000000..67986cd
--- /dev/null
+++ b/base/process/process_handle_win.cc
@@ -0,0 +1,46 @@
+// Copyright (c) 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/process/process_handle.h"
+
+#include <windows.h>
+#include <tlhelp32.h>
+
+#include "base/win/scoped_handle.h"
+#include "base/win/windows_version.h"
+
+namespace base {
+
+ProcessId GetCurrentProcId() {
+  return ::GetCurrentProcessId();
+}
+
+ProcessHandle GetCurrentProcessHandle() {
+  return ::GetCurrentProcess();
+}
+
+ProcessId GetProcId(ProcessHandle process) {
+  // This returns 0 if we have insufficient rights to query the process handle.
+  return GetProcessId(process);
+}
+
+ProcessId GetParentProcessId(ProcessHandle process) {
+  ProcessId child_pid = GetProcId(process);
+  PROCESSENTRY32 process_entry;
+      process_entry.dwSize = sizeof(PROCESSENTRY32);
+
+  win::ScopedHandle snapshot(CreateToolhelp32Snapshot(TH32CS_SNAPPROCESS, 0));
+  if (snapshot.IsValid() && Process32First(snapshot.Get(), &process_entry)) {
+    do {
+      if (process_entry.th32ProcessID == child_pid)
+        return process_entry.th32ParentProcessID;
+    } while (Process32Next(snapshot.Get(), &process_entry));
+  }
+
+  // TODO(zijiehe): To match other platforms, -1 (UINT32_MAX) should be returned
+  // if |child_id| cannot be found in the |snapshot|.
+  return 0u;
+}
+
+}  // namespace base
diff --git a/base/process/process_info.h b/base/process/process_info.h
new file mode 100644
index 0000000..5138e24
--- /dev/null
+++ b/base/process/process_info.h
@@ -0,0 +1,42 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_PROCESS_PROCESS_INFO_H_
+#define BASE_PROCESS_PROCESS_INFO_H_
+
+#include "base/base_export.h"
+#include "build/build_config.h"
+
+namespace base {
+
+class Time;
+
+// Vends information about the current process.
+class BASE_EXPORT CurrentProcessInfo {
+ public:
+  // Returns the time at which the process was launched. May be empty if an
+  // error occurred retrieving the information.
+  static const Time CreationTime();
+};
+
+#if defined(OS_WIN)
+enum IntegrityLevel {
+  INTEGRITY_UNKNOWN,
+  LOW_INTEGRITY,
+  MEDIUM_INTEGRITY,
+  HIGH_INTEGRITY,
+};
+
+// Returns the integrity level of the process. Returns INTEGRITY_UNKNOWN in the
+// case of an underlying system failure.
+BASE_EXPORT IntegrityLevel GetCurrentProcessIntegrityLevel();
+
+// Determines whether the current process is elevated.
+BASE_EXPORT bool IsCurrentProcessElevated();
+
+#endif  // defined(OS_WIN)
+
+}  // namespace base
+
+#endif  // BASE_PROCESS_PROCESS_INFO_H_
diff --git a/base/process/process_info_linux.cc b/base/process/process_info_linux.cc
new file mode 100644
index 0000000..2f22748
--- /dev/null
+++ b/base/process/process_info_linux.cc
@@ -0,0 +1,29 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/process/process_info.h"
+
+#include <stdint.h>
+
+#include "base/logging.h"
+#include "base/process/internal_linux.h"
+#include "base/process/process_handle.h"
+#include "base/time/time.h"
+
+namespace base {
+
+// static
+const Time CurrentProcessInfo::CreationTime() {
+  int64_t start_ticks =
+      internal::ReadProcSelfStatsAndGetFieldAsInt64(internal::VM_STARTTIME);
+  if (!start_ticks)
+    return Time();
+  TimeDelta start_offset = internal::ClockTicksToTimeDelta(start_ticks);
+  Time boot_time = internal::GetBootTime();
+  if (boot_time.is_null())
+    return Time();
+  return Time(boot_time + start_offset);
+}
+
+}  // namespace base
diff --git a/base/process/process_info_mac.cc b/base/process/process_info_mac.cc
new file mode 100644
index 0000000..27b9623
--- /dev/null
+++ b/base/process/process_info_mac.cc
@@ -0,0 +1,34 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/process/process_info.h"
+
+#include <stddef.h>
+#include <sys/sysctl.h>
+#include <sys/time.h>
+#include <unistd.h>
+
+#include <memory>
+
+#include "base/macros.h"
+#include "base/memory/free_deleter.h"
+#include "base/time/time.h"
+
+namespace base {
+
+// static
+const Time CurrentProcessInfo::CreationTime() {
+  int mib[] = { CTL_KERN, KERN_PROC, KERN_PROC_PID, getpid() };
+  size_t len = 0;
+  if (sysctl(mib, arraysize(mib), NULL, &len, NULL, 0) < 0)
+    return Time();
+
+  std::unique_ptr<struct kinfo_proc, base::FreeDeleter> proc(
+      static_cast<struct kinfo_proc*>(malloc(len)));
+  if (sysctl(mib, arraysize(mib), proc.get(), &len, NULL, 0) < 0)
+    return Time();
+  return Time::FromTimeVal(proc->kp_proc.p_un.__p_starttime);
+}
+
+}  // namespace base
diff --git a/base/process/process_info_unittest.cc b/base/process/process_info_unittest.cc
new file mode 100644
index 0000000..f54d957
--- /dev/null
+++ b/base/process/process_info_unittest.cc
@@ -0,0 +1,22 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/process/process_info.h"
+
+#include "base/time/time.h"
+#include "build/build_config.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+
+// See https://crbug.com/726484 for Fuchsia.
+// Cannot read boot time on Android O, crbug.com/788870.
+#if !defined(OS_IOS) && !defined(OS_FUCHSIA) && !defined(OS_ANDROID)
+TEST(ProcessInfoTest, CreationTime) {
+  Time creation_time = CurrentProcessInfo::CreationTime();
+  ASSERT_FALSE(creation_time.is_null());
+}
+#endif  // !defined(OS_IOS) && !defined(OS_FUCHSIA) && !defined(OS_ANDROID)
+
+}  // namespace base
diff --git a/base/process/process_info_win.cc b/base/process/process_info_win.cc
new file mode 100644
index 0000000..23e93e3
--- /dev/null
+++ b/base/process/process_info_win.cc
@@ -0,0 +1,94 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/process/process_info.h"
+
+#include <windows.h>
+#include <memory>
+
+#include "base/logging.h"
+#include "base/memory/ptr_util.h"
+#include "base/time/time.h"
+#include "base/win/scoped_handle.h"
+
+namespace base {
+
+namespace {
+
+HANDLE GetCurrentProcessToken() {
+  HANDLE process_token;
+  OpenProcessToken(GetCurrentProcess(), TOKEN_QUERY, &process_token);
+  DCHECK(process_token != NULL && process_token != INVALID_HANDLE_VALUE);
+  return process_token;
+}
+
+}  // namespace
+
+// static
+const Time CurrentProcessInfo::CreationTime() {
+  FILETIME creation_time = {};
+  FILETIME ignore1 = {};
+  FILETIME ignore2 = {};
+  FILETIME ignore3 = {};
+  if (!::GetProcessTimes(::GetCurrentProcess(), &creation_time, &ignore1,
+                         &ignore2, &ignore3)) {
+    return Time();
+  }
+  return Time::FromFileTime(creation_time);
+}
+
+IntegrityLevel GetCurrentProcessIntegrityLevel() {
+  HANDLE process_token(GetCurrentProcessToken());
+
+  DWORD token_info_length = 0;
+  if (::GetTokenInformation(process_token, TokenIntegrityLevel, nullptr, 0,
+                            &token_info_length) ||
+      ::GetLastError() != ERROR_INSUFFICIENT_BUFFER) {
+    return INTEGRITY_UNKNOWN;
+  }
+
+  auto token_label_bytes = std::make_unique<char[]>(token_info_length);
+  TOKEN_MANDATORY_LABEL* token_label =
+      reinterpret_cast<TOKEN_MANDATORY_LABEL*>(token_label_bytes.get());
+  if (!::GetTokenInformation(process_token, TokenIntegrityLevel, token_label,
+                             token_info_length, &token_info_length)) {
+    return INTEGRITY_UNKNOWN;
+  }
+
+  DWORD integrity_level = *::GetSidSubAuthority(
+      token_label->Label.Sid,
+      static_cast<DWORD>(*::GetSidSubAuthorityCount(token_label->Label.Sid) -
+                         1));
+
+  if (integrity_level < SECURITY_MANDATORY_MEDIUM_RID)
+    return LOW_INTEGRITY;
+
+  if (integrity_level >= SECURITY_MANDATORY_MEDIUM_RID &&
+      integrity_level < SECURITY_MANDATORY_HIGH_RID) {
+    return MEDIUM_INTEGRITY;
+  }
+
+  if (integrity_level >= SECURITY_MANDATORY_HIGH_RID)
+    return HIGH_INTEGRITY;
+
+  NOTREACHED();
+  return INTEGRITY_UNKNOWN;
+}
+
+bool IsCurrentProcessElevated() {
+  HANDLE process_token(GetCurrentProcessToken());
+
+  // Unlike TOKEN_ELEVATION_TYPE which returns TokenElevationTypeDefault when
+  // UAC is turned off, TOKEN_ELEVATION returns whether the process is elevated.
+  DWORD size;
+  TOKEN_ELEVATION elevation;
+  if (!GetTokenInformation(process_token, TokenElevation, &elevation,
+                           sizeof(elevation), &size)) {
+    PLOG(ERROR) << "GetTokenInformation() failed";
+    return false;
+  }
+  return !!elevation.TokenIsElevated;
+}
+
+}  // namespace base
diff --git a/base/process/process_iterator.cc b/base/process/process_iterator.cc
new file mode 100644
index 0000000..8b530a0
--- /dev/null
+++ b/base/process/process_iterator.cc
@@ -0,0 +1,66 @@
+// Copyright (c) 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/process/process_iterator.h"
+#include "build/build_config.h"
+
+namespace base {
+
+#if defined(OS_POSIX) || defined(OS_FUCHSIA)
+ProcessEntry::ProcessEntry() : pid_(0), ppid_(0), gid_(0) {}
+ProcessEntry::ProcessEntry(const ProcessEntry& other) = default;
+ProcessEntry::~ProcessEntry() = default;
+#endif
+
+const ProcessEntry* ProcessIterator::NextProcessEntry() {
+  bool result = false;
+  do {
+    result = CheckForNextProcess();
+  } while (result && !IncludeEntry());
+  if (result)
+    return &entry_;
+  return nullptr;
+}
+
+ProcessIterator::ProcessEntries ProcessIterator::Snapshot() {
+  ProcessEntries found;
+  while (const ProcessEntry* process_entry = NextProcessEntry()) {
+    found.push_back(*process_entry);
+  }
+  return found;
+}
+
+bool ProcessIterator::IncludeEntry() {
+  return !filter_ || filter_->Includes(entry_);
+}
+
+NamedProcessIterator::NamedProcessIterator(
+    const FilePath::StringType& executable_name,
+    const ProcessFilter* filter) : ProcessIterator(filter),
+                                   executable_name_(executable_name) {
+#if defined(OS_ANDROID)
+  // On Android, the process name contains only the last 15 characters, which
+  // is in file /proc/<pid>/stat, the string between open parenthesis and close
+  // parenthesis. Please See ProcessIterator::CheckForNextProcess for details.
+  // Now if the length of input process name is greater than 15, only save the
+  // last 15 characters.
+  if (executable_name_.size() > 15) {
+    executable_name_ = FilePath::StringType(executable_name_,
+                                            executable_name_.size() - 15, 15);
+  }
+#endif
+}
+
+NamedProcessIterator::~NamedProcessIterator() = default;
+
+int GetProcessCount(const FilePath::StringType& executable_name,
+                    const ProcessFilter* filter) {
+  int count = 0;
+  NamedProcessIterator iter(executable_name, filter);
+  while (iter.NextProcessEntry())
+    ++count;
+  return count;
+}
+
+}  // namespace base
diff --git a/base/process/process_iterator.h b/base/process/process_iterator.h
new file mode 100644
index 0000000..b30ad41
--- /dev/null
+++ b/base/process/process_iterator.h
@@ -0,0 +1,151 @@
+// Copyright (c) 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file contains methods to iterate over processes on the system.
+
+#ifndef BASE_PROCESS_PROCESS_ITERATOR_H_
+#define BASE_PROCESS_PROCESS_ITERATOR_H_
+
+#include <stddef.h>
+
+#include <list>
+#include <string>
+#include <vector>
+
+#include "base/base_export.h"
+#include "base/files/file_path.h"
+#include "base/macros.h"
+#include "base/process/process.h"
+#include "build/build_config.h"
+
+#if defined(OS_WIN)
+#include <windows.h>
+#include <tlhelp32.h>
+#elif defined(OS_MACOSX) || defined(OS_OPENBSD)
+#include <sys/sysctl.h>
+#elif defined(OS_FREEBSD)
+#include <sys/user.h>
+#elif defined(OS_POSIX) || defined(OS_FUCHSIA)
+#include <dirent.h>
+#endif
+
+namespace base {
+
+#if defined(OS_WIN)
+struct ProcessEntry : public PROCESSENTRY32 {
+  ProcessId pid() const { return th32ProcessID; }
+  ProcessId parent_pid() const { return th32ParentProcessID; }
+  const wchar_t* exe_file() const { return szExeFile; }
+};
+#elif defined(OS_POSIX) || defined(OS_FUCHSIA)
+struct BASE_EXPORT ProcessEntry {
+  ProcessEntry();
+  ProcessEntry(const ProcessEntry& other);
+  ~ProcessEntry();
+
+  ProcessId pid() const { return pid_; }
+  ProcessId parent_pid() const { return ppid_; }
+  ProcessId gid() const { return gid_; }
+  const char* exe_file() const { return exe_file_.c_str(); }
+  const std::vector<std::string>& cmd_line_args() const {
+    return cmd_line_args_;
+  }
+
+  ProcessId pid_;
+  ProcessId ppid_;
+  ProcessId gid_;
+  std::string exe_file_;
+  std::vector<std::string> cmd_line_args_;
+};
+#endif  // defined(OS_WIN)
+
+// Used to filter processes by process ID.
+class ProcessFilter {
+ public:
+  // Returns true to indicate set-inclusion and false otherwise.  This method
+  // should not have side-effects and should be idempotent.
+  virtual bool Includes(const ProcessEntry& entry) const = 0;
+
+ protected:
+  virtual ~ProcessFilter() = default;
+};
+
+// This class provides a way to iterate through a list of processes on the
+// current machine with a specified filter.
+// To use, create an instance and then call NextProcessEntry() until it returns
+// false.
+class BASE_EXPORT ProcessIterator {
+ public:
+  typedef std::list<ProcessEntry> ProcessEntries;
+
+  explicit ProcessIterator(const ProcessFilter* filter);
+  virtual ~ProcessIterator();
+
+  // If there's another process that matches the given executable name,
+  // returns a const pointer to the corresponding PROCESSENTRY32.
+  // If there are no more matching processes, returns NULL.
+  // The returned pointer will remain valid until NextProcessEntry()
+  // is called again or this NamedProcessIterator goes out of scope.
+  const ProcessEntry* NextProcessEntry();
+
+  // Takes a snapshot of all the ProcessEntry found.
+  ProcessEntries Snapshot();
+
+ protected:
+  virtual bool IncludeEntry();
+  const ProcessEntry& entry() { return entry_; }
+
+ private:
+  // Determines whether there's another process (regardless of executable)
+  // left in the list of all processes.  Returns true and sets entry_ to
+  // that process's info if there is one, false otherwise.
+  bool CheckForNextProcess();
+
+  // Initializes a PROCESSENTRY32 data structure so that it's ready for
+  // use with Process32First/Process32Next.
+  void InitProcessEntry(ProcessEntry* entry);
+
+#if defined(OS_WIN)
+  HANDLE snapshot_;
+  bool started_iteration_;
+#elif defined(OS_MACOSX) || defined(OS_BSD)
+  std::vector<kinfo_proc> kinfo_procs_;
+  size_t index_of_kinfo_proc_;
+#elif defined(OS_POSIX) || defined(OS_FUCHSIA)
+  DIR* procfs_dir_;
+#endif
+  ProcessEntry entry_;
+  const ProcessFilter* filter_;
+
+  DISALLOW_COPY_AND_ASSIGN(ProcessIterator);
+};
+
+// This class provides a way to iterate through the list of processes
+// on the current machine that were started from the given executable
+// name.  To use, create an instance and then call NextProcessEntry()
+// until it returns false.
+class BASE_EXPORT NamedProcessIterator : public ProcessIterator {
+ public:
+  NamedProcessIterator(const FilePath::StringType& executable_name,
+                       const ProcessFilter* filter);
+  ~NamedProcessIterator() override;
+
+ protected:
+  bool IncludeEntry() override;
+
+ private:
+  FilePath::StringType executable_name_;
+
+  DISALLOW_COPY_AND_ASSIGN(NamedProcessIterator);
+};
+
+// Returns the number of processes on the machine that are running from the
+// given executable name.  If filter is non-null, then only processes selected
+// by the filter will be counted.
+BASE_EXPORT int GetProcessCount(const FilePath::StringType& executable_name,
+                                const ProcessFilter* filter);
+
+}  // namespace base
+
+#endif  // BASE_PROCESS_PROCESS_ITERATOR_H_
diff --git a/base/process/process_iterator_freebsd.cc b/base/process/process_iterator_freebsd.cc
new file mode 100644
index 0000000..4df0d90
--- /dev/null
+++ b/base/process/process_iterator_freebsd.cc
@@ -0,0 +1,131 @@
+// Copyright (c) 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/process/process_iterator.h"
+
+#include <errno.h>
+#include <sys/types.h>
+#include <stddef.h>
+#include <sys/sysctl.h>
+#include <unistd.h>
+
+#include "base/logging.h"
+#include "base/macros.h"
+#include "base/strings/string_split.h"
+#include "base/strings/string_util.h"
+
+namespace base {
+
+ProcessIterator::ProcessIterator(const ProcessFilter* filter)
+    : index_of_kinfo_proc_(),
+      filter_(filter) {
+
+  int mib[] = { CTL_KERN, KERN_PROC, KERN_PROC_UID, getuid() };
+
+  bool done = false;
+  int try_num = 1;
+  const int max_tries = 10;
+
+  do {
+    size_t len = 0;
+    if (sysctl(mib, arraysize(mib), NULL, &len, NULL, 0) < 0) {
+      LOG(ERROR) << "failed to get the size needed for the process list";
+      kinfo_procs_.resize(0);
+      done = true;
+    } else {
+      size_t num_of_kinfo_proc = len / sizeof(struct kinfo_proc);
+      // Leave some spare room for process table growth (more could show up
+      // between when we check and now)
+      num_of_kinfo_proc += 16;
+      kinfo_procs_.resize(num_of_kinfo_proc);
+      len = num_of_kinfo_proc * sizeof(struct kinfo_proc);
+      if (sysctl(mib, arraysize(mib), &kinfo_procs_[0], &len, NULL, 0) <0) {
+        // If we get a mem error, it just means we need a bigger buffer, so
+        // loop around again.  Anything else is a real error and give up.
+        if (errno != ENOMEM) {
+          LOG(ERROR) << "failed to get the process list";
+          kinfo_procs_.resize(0);
+          done = true;
+        }
+      } else {
+        // Got the list, just make sure we're sized exactly right
+        size_t num_of_kinfo_proc = len / sizeof(struct kinfo_proc);
+        kinfo_procs_.resize(num_of_kinfo_proc);
+        done = true;
+      }
+    }
+  } while (!done && (try_num++ < max_tries));
+
+  if (!done) {
+    LOG(ERROR) << "failed to collect the process list in a few tries";
+    kinfo_procs_.resize(0);
+  }
+}
+
+ProcessIterator::~ProcessIterator() {
+}
+
+bool ProcessIterator::CheckForNextProcess() {
+  std::string data;
+
+  for (; index_of_kinfo_proc_ < kinfo_procs_.size(); ++index_of_kinfo_proc_) {
+    size_t length;
+    struct kinfo_proc kinfo = kinfo_procs_[index_of_kinfo_proc_];
+    int mib[] = { CTL_KERN, KERN_PROC_ARGS, kinfo.ki_pid };
+
+    if ((kinfo.ki_pid > 0) && (kinfo.ki_stat == SZOMB))
+      continue;
+
+    length = 0;
+    if (sysctl(mib, arraysize(mib), NULL, &length, NULL, 0) < 0) {
+      LOG(ERROR) << "failed to figure out the buffer size for a command line";
+      continue;
+    }
+
+    data.resize(length);
+
+    if (sysctl(mib, arraysize(mib), &data[0], &length, NULL, 0) < 0) {
+      LOG(ERROR) << "failed to fetch a commandline";
+      continue;
+    }
+
+    std::string delimiters;
+    delimiters.push_back('\0');
+    entry_.cmd_line_args_ = SplitString(data, delimiters,
+                                        KEEP_WHITESPACE, SPLIT_WANT_NONEMPTY);
+
+    size_t exec_name_end = data.find('\0');
+    if (exec_name_end == std::string::npos) {
+      LOG(ERROR) << "command line data didn't match expected format";
+      continue;
+    }
+
+    entry_.pid_ = kinfo.ki_pid;
+    entry_.ppid_ = kinfo.ki_ppid;
+    entry_.gid_ = kinfo.ki_pgid;
+
+    size_t last_slash = data.rfind('/', exec_name_end);
+    if (last_slash == std::string::npos) {
+      entry_.exe_file_.assign(data, 0, exec_name_end);
+    } else {
+      entry_.exe_file_.assign(data, last_slash + 1,
+                              exec_name_end - last_slash - 1);
+    }
+
+    // Start w/ the next entry next time through
+    ++index_of_kinfo_proc_;
+
+    return true;
+  }
+  return false;
+}
+
+bool NamedProcessIterator::IncludeEntry() {
+  if (executable_name_ != entry().exe_file())
+    return false;
+
+  return ProcessIterator::IncludeEntry();
+}
+
+}  // namespace base
diff --git a/base/process/process_iterator_fuchsia.cc b/base/process/process_iterator_fuchsia.cc
new file mode 100644
index 0000000..6d411ba
--- /dev/null
+++ b/base/process/process_iterator_fuchsia.cc
@@ -0,0 +1,26 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/process/process_iterator.h"
+
+namespace base {
+
+ProcessIterator::ProcessIterator(const ProcessFilter* filter) {
+  // TODO(fuchsia): There's no Fuchsia API to iterate processes currently.
+  NOTREACHED();
+}
+
+ProcessIterator::~ProcessIterator() {}
+
+bool ProcessIterator::CheckForNextProcess() {
+  // TODO(fuchsia): There's no Fuchsia API to iterate processes currently.
+  return false;
+}
+
+bool NamedProcessIterator::IncludeEntry() {
+  // TODO(fuchsia): There's no Fuchsia API to iterate processes currently.
+  return false;
+}
+
+}  // namespace base
diff --git a/base/process/process_iterator_linux.cc b/base/process/process_iterator_linux.cc
new file mode 100644
index 0000000..9fea70e
--- /dev/null
+++ b/base/process/process_iterator_linux.cc
@@ -0,0 +1,151 @@
+// Copyright (c) 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/process/process_iterator.h"
+
+#include <stddef.h>
+
+#include "base/files/file_util.h"
+#include "base/logging.h"
+#include "base/process/internal_linux.h"
+#include "base/strings/string_split.h"
+#include "base/strings/string_util.h"
+#include "base/threading/thread_restrictions.h"
+
+namespace base {
+
+namespace {
+
+// Reads the |field_num|th field from |proc_stats|.
+// Returns an empty string on failure.
+// This version only handles VM_COMM and VM_STATE, which are the only fields
+// that are strings.
+std::string GetProcStatsFieldAsString(
+    const std::vector<std::string>& proc_stats,
+    internal::ProcStatsFields field_num) {
+  if (field_num < internal::VM_COMM || field_num > internal::VM_STATE) {
+    NOTREACHED();
+    return std::string();
+  }
+
+  if (proc_stats.size() > static_cast<size_t>(field_num))
+    return proc_stats[field_num];
+
+  NOTREACHED();
+  return nullptr;
+}
+
+// Reads /proc/<pid>/cmdline and populates |proc_cmd_line_args| with the command
+// line arguments. Returns true if successful.
+// Note: /proc/<pid>/cmdline contains command line arguments separated by single
+// null characters. We tokenize it into a vector of strings using '\0' as a
+// delimiter.
+bool GetProcCmdline(pid_t pid, std::vector<std::string>* proc_cmd_line_args) {
+  // Synchronously reading files in /proc is safe.
+  ThreadRestrictions::ScopedAllowIO allow_io;
+
+  FilePath cmd_line_file = internal::GetProcPidDir(pid).Append("cmdline");
+  std::string cmd_line;
+  if (!ReadFileToString(cmd_line_file, &cmd_line))
+    return false;
+  std::string delimiters;
+  delimiters.push_back('\0');
+  *proc_cmd_line_args = SplitString(cmd_line, delimiters, KEEP_WHITESPACE,
+                                    SPLIT_WANT_NONEMPTY);
+  return true;
+}
+
+}  // namespace
+
+ProcessIterator::ProcessIterator(const ProcessFilter* filter)
+    : filter_(filter) {
+  procfs_dir_ = opendir(internal::kProcDir);
+  if (!procfs_dir_) {
+    // On Android, SELinux may prevent reading /proc. See
+    // https://crbug.com/581517 for details.
+    PLOG(ERROR) << "opendir " << internal::kProcDir;
+  }
+}
+
+ProcessIterator::~ProcessIterator() {
+  if (procfs_dir_) {
+    closedir(procfs_dir_);
+    procfs_dir_ = nullptr;
+  }
+}
+
+bool ProcessIterator::CheckForNextProcess() {
+  // TODO(port): skip processes owned by different UID
+
+  if (!procfs_dir_) {
+    DLOG(ERROR) << "Skipping CheckForNextProcess(), no procfs_dir_";
+    return false;
+  }
+
+  pid_t pid = kNullProcessId;
+  std::vector<std::string> cmd_line_args;
+  std::string stats_data;
+  std::vector<std::string> proc_stats;
+
+  // Arbitrarily guess that there will never be more than 200 non-process
+  // files in /proc.  Hardy has 53 and Lucid has 61.
+  int skipped = 0;
+  const int kSkipLimit = 200;
+  while (skipped < kSkipLimit) {
+    dirent* slot = readdir(procfs_dir_);
+    // all done looking through /proc?
+    if (!slot)
+      return false;
+
+    // If not a process, keep looking for one.
+    pid = internal::ProcDirSlotToPid(slot->d_name);
+    if (!pid) {
+      skipped++;
+      continue;
+    }
+
+    if (!GetProcCmdline(pid, &cmd_line_args))
+      continue;
+
+    if (!internal::ReadProcStats(pid, &stats_data))
+      continue;
+    if (!internal::ParseProcStats(stats_data, &proc_stats))
+      continue;
+
+    std::string runstate =
+        GetProcStatsFieldAsString(proc_stats, internal::VM_STATE);
+    if (runstate.size() != 1) {
+      NOTREACHED();
+      continue;
+    }
+
+    // Is the process in 'Zombie' state, i.e. dead but waiting to be reaped?
+    // Allowed values: D R S T Z
+    if (runstate[0] != 'Z')
+      break;
+
+    // Nope, it's a zombie; somebody isn't cleaning up after their children.
+    // (e.g. WaitForProcessesToExit doesn't clean up after dead children yet.)
+    // There could be a lot of zombies, can't really decrement i here.
+  }
+  if (skipped >= kSkipLimit) {
+    NOTREACHED();
+    return false;
+  }
+
+  entry_.pid_ = pid;
+  entry_.ppid_ = GetProcStatsFieldAsInt64(proc_stats, internal::VM_PPID);
+  entry_.gid_ = GetProcStatsFieldAsInt64(proc_stats, internal::VM_PGRP);
+  entry_.cmd_line_args_.assign(cmd_line_args.begin(), cmd_line_args.end());
+  entry_.exe_file_ = GetProcessExecutablePath(pid).BaseName().value();
+  return true;
+}
+
+bool NamedProcessIterator::IncludeEntry() {
+  if (executable_name_ != entry().exe_file())
+    return false;
+  return ProcessIterator::IncludeEntry();
+}
+
+}  // namespace base
diff --git a/base/process/process_iterator_mac.cc b/base/process/process_iterator_mac.cc
new file mode 100644
index 0000000..f33121a
--- /dev/null
+++ b/base/process/process_iterator_mac.cc
@@ -0,0 +1,139 @@
+// Copyright (c) 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/process/process_iterator.h"
+
+#include <errno.h>
+#include <stddef.h>
+#include <sys/sysctl.h>
+#include <sys/types.h>
+#include <unistd.h>
+
+#include "base/logging.h"
+#include "base/macros.h"
+#include "base/strings/string_split.h"
+#include "base/strings/string_util.h"
+
+namespace base {
+
+ProcessIterator::ProcessIterator(const ProcessFilter* filter)
+    : index_of_kinfo_proc_(0),
+      filter_(filter) {
+  // Get a snapshot of all of my processes (yes, as we loop it can go stale, but
+  // but trying to find where we were in a constantly changing list is basically
+  // impossible.
+
+  int mib[] = { CTL_KERN, KERN_PROC, KERN_PROC_UID,
+                static_cast<int>(geteuid()) };
+
+  // Since more processes could start between when we get the size and when
+  // we get the list, we do a loop to keep trying until we get it.
+  bool done = false;
+  int try_num = 1;
+  const int max_tries = 10;
+  do {
+    // Get the size of the buffer
+    size_t len = 0;
+    if (sysctl(mib, arraysize(mib), NULL, &len, NULL, 0) < 0) {
+      DLOG(ERROR) << "failed to get the size needed for the process list";
+      kinfo_procs_.resize(0);
+      done = true;
+    } else {
+      size_t num_of_kinfo_proc = len / sizeof(struct kinfo_proc);
+      // Leave some spare room for process table growth (more could show up
+      // between when we check and now)
+      num_of_kinfo_proc += 16;
+      kinfo_procs_.resize(num_of_kinfo_proc);
+      len = num_of_kinfo_proc * sizeof(struct kinfo_proc);
+      // Load the list of processes
+      if (sysctl(mib, arraysize(mib), &kinfo_procs_[0], &len, NULL, 0) < 0) {
+        // If we get a mem error, it just means we need a bigger buffer, so
+        // loop around again.  Anything else is a real error and give up.
+        if (errno != ENOMEM) {
+          DLOG(ERROR) << "failed to get the process list";
+          kinfo_procs_.resize(0);
+          done = true;
+        }
+      } else {
+        // Got the list, just make sure we're sized exactly right
+        kinfo_procs_.resize(len / sizeof(struct kinfo_proc));
+        done = true;
+      }
+    }
+  } while (!done && (try_num++ < max_tries));
+
+  if (!done) {
+    DLOG(ERROR) << "failed to collect the process list in a few tries";
+    kinfo_procs_.resize(0);
+  }
+}
+
+ProcessIterator::~ProcessIterator() {
+}
+
+bool ProcessIterator::CheckForNextProcess() {
+  std::string data;
+  for (; index_of_kinfo_proc_ < kinfo_procs_.size(); ++index_of_kinfo_proc_) {
+    kinfo_proc& kinfo = kinfo_procs_[index_of_kinfo_proc_];
+
+    // Skip processes just awaiting collection
+    if ((kinfo.kp_proc.p_pid > 0) && (kinfo.kp_proc.p_stat == SZOMB))
+      continue;
+
+    int mib[] = { CTL_KERN, KERN_PROCARGS, kinfo.kp_proc.p_pid };
+
+    // Find out what size buffer we need.
+    size_t data_len = 0;
+    if (sysctl(mib, arraysize(mib), NULL, &data_len, NULL, 0) < 0) {
+      DVPLOG(1) << "failed to figure out the buffer size for a commandline";
+      continue;
+    }
+
+    data.resize(data_len);
+    if (sysctl(mib, arraysize(mib), &data[0], &data_len, NULL, 0) < 0) {
+      DVPLOG(1) << "failed to fetch a commandline";
+      continue;
+    }
+
+    // |data| contains all the command line parameters of the process, separated
+    // by blocks of one or more null characters. We tokenize |data| into a
+    // vector of strings using '\0' as a delimiter and populate
+    // |entry_.cmd_line_args_|.
+    std::string delimiters;
+    delimiters.push_back('\0');
+    entry_.cmd_line_args_ = SplitString(data, delimiters,
+                                        KEEP_WHITESPACE, SPLIT_WANT_NONEMPTY);
+
+    // |data| starts with the full executable path followed by a null character.
+    // We search for the first instance of '\0' and extract everything before it
+    // to populate |entry_.exe_file_|.
+    size_t exec_name_end = data.find('\0');
+    if (exec_name_end == std::string::npos) {
+      DLOG(ERROR) << "command line data didn't match expected format";
+      continue;
+    }
+
+    entry_.pid_ = kinfo.kp_proc.p_pid;
+    entry_.ppid_ = kinfo.kp_eproc.e_ppid;
+    entry_.gid_ = kinfo.kp_eproc.e_pgid;
+    size_t last_slash = data.rfind('/', exec_name_end);
+    if (last_slash == std::string::npos)
+      entry_.exe_file_.assign(data, 0, exec_name_end);
+    else
+      entry_.exe_file_.assign(data, last_slash + 1,
+                              exec_name_end - last_slash - 1);
+    // Start w/ the next entry next time through
+    ++index_of_kinfo_proc_;
+    // Done
+    return true;
+  }
+  return false;
+}
+
+bool NamedProcessIterator::IncludeEntry() {
+  return (executable_name_ == entry().exe_file() &&
+          ProcessIterator::IncludeEntry());
+}
+
+}  // namespace base
diff --git a/base/process/process_iterator_openbsd.cc b/base/process/process_iterator_openbsd.cc
new file mode 100644
index 0000000..74306c0
--- /dev/null
+++ b/base/process/process_iterator_openbsd.cc
@@ -0,0 +1,132 @@
+// Copyright (c) 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/process/process_iterator.h"
+
+#include <errno.h>
+#include <stddef.h>
+#include <sys/sysctl.h>
+
+#include "base/logging.h"
+#include "base/macros.h"
+#include "base/strings/string_split.h"
+#include "base/strings/string_util.h"
+
+namespace base {
+
+ProcessIterator::ProcessIterator(const ProcessFilter* filter)
+    : index_of_kinfo_proc_(),
+      filter_(filter) {
+
+  int mib[] = { CTL_KERN, KERN_PROC, KERN_PROC_UID, getuid(),
+                sizeof(struct kinfo_proc), 0 };
+
+  bool done = false;
+  int try_num = 1;
+  const int max_tries = 10;
+
+  do {
+    size_t len = 0;
+    if (sysctl(mib, arraysize(mib), NULL, &len, NULL, 0) < 0) {
+      DLOG(ERROR) << "failed to get the size needed for the process list";
+      kinfo_procs_.resize(0);
+      done = true;
+    } else {
+      size_t num_of_kinfo_proc = len / sizeof(struct kinfo_proc);
+      // Leave some spare room for process table growth (more could show up
+      // between when we check and now)
+      num_of_kinfo_proc += 16;
+      kinfo_procs_.resize(num_of_kinfo_proc);
+      len = num_of_kinfo_proc * sizeof(struct kinfo_proc);
+      if (sysctl(mib, arraysize(mib), &kinfo_procs_[0], &len, NULL, 0) < 0) {
+        // If we get a mem error, it just means we need a bigger buffer, so
+        // loop around again.  Anything else is a real error and give up.
+        if (errno != ENOMEM) {
+          DLOG(ERROR) << "failed to get the process list";
+          kinfo_procs_.resize(0);
+          done = true;
+        }
+      } else {
+        // Got the list, just make sure we're sized exactly right
+        size_t num_of_kinfo_proc = len / sizeof(struct kinfo_proc);
+        kinfo_procs_.resize(num_of_kinfo_proc);
+        done = true;
+      }
+    }
+  } while (!done && (try_num++ < max_tries));
+
+  if (!done) {
+    DLOG(ERROR) << "failed to collect the process list in a few tries";
+    kinfo_procs_.resize(0);
+  }
+}
+
+ProcessIterator::~ProcessIterator() {
+}
+
+bool ProcessIterator::CheckForNextProcess() {
+  std::string data;
+  for (; index_of_kinfo_proc_ < kinfo_procs_.size(); ++index_of_kinfo_proc_) {
+    kinfo_proc& kinfo = kinfo_procs_[index_of_kinfo_proc_];
+
+    // Skip processes just awaiting collection
+    if ((kinfo.p_pid > 0) && (kinfo.p_stat == SZOMB))
+      continue;
+
+    int mib[] = { CTL_KERN, KERN_PROC_ARGS, kinfo.p_pid };
+
+    // Find out what size buffer we need.
+    size_t data_len = 0;
+    if (sysctl(mib, arraysize(mib), NULL, &data_len, NULL, 0) < 0) {
+      DVPLOG(1) << "failed to figure out the buffer size for a commandline";
+      continue;
+    }
+
+    data.resize(data_len);
+    if (sysctl(mib, arraysize(mib), &data[0], &data_len, NULL, 0) < 0) {
+      DVPLOG(1) << "failed to fetch a commandline";
+      continue;
+    }
+
+    // |data| contains all the command line parameters of the process, separated
+    // by blocks of one or more null characters. We tokenize |data| into a
+    // vector of strings using '\0' as a delimiter and populate
+    // |entry_.cmd_line_args_|.
+    std::string delimiters;
+    delimiters.push_back('\0');
+    entry_.cmd_line_args_ = SplitString(data, delimiters, KEEP_WHITESPACE,
+                                        SPLIT_WANT_NONEMPTY);
+
+    // |data| starts with the full executable path followed by a null character.
+    // We search for the first instance of '\0' and extract everything before it
+    // to populate |entry_.exe_file_|.
+    size_t exec_name_end = data.find('\0');
+    if (exec_name_end == std::string::npos) {
+      DLOG(ERROR) << "command line data didn't match expected format";
+      continue;
+    }
+
+    entry_.pid_ = kinfo.p_pid;
+    entry_.ppid_ = kinfo.p_ppid;
+    entry_.gid_ = kinfo.p__pgid;
+    size_t last_slash = data.rfind('/', exec_name_end);
+    if (last_slash == std::string::npos)
+      entry_.exe_file_.assign(data, 0, exec_name_end);
+    else
+      entry_.exe_file_.assign(data, last_slash + 1,
+                              exec_name_end - last_slash - 1);
+    // Start w/ the next entry next time through
+    ++index_of_kinfo_proc_;
+    // Done
+    return true;
+  }
+  return false;
+}
+
+bool NamedProcessIterator::IncludeEntry() {
+  return (executable_name_ == entry().exe_file() &&
+          ProcessIterator::IncludeEntry());
+}
+
+}  // namespace base
diff --git a/base/process/process_iterator_win.cc b/base/process/process_iterator_win.cc
new file mode 100644
index 0000000..9d5a970
--- /dev/null
+++ b/base/process/process_iterator_win.cc
@@ -0,0 +1,41 @@
+// Copyright (c) 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/process/process_iterator.h"
+
+namespace base {
+
+ProcessIterator::ProcessIterator(const ProcessFilter* filter)
+    : started_iteration_(false),
+      filter_(filter) {
+  snapshot_ = CreateToolhelp32Snapshot(TH32CS_SNAPPROCESS, 0);
+}
+
+ProcessIterator::~ProcessIterator() {
+  CloseHandle(snapshot_);
+}
+
+bool ProcessIterator::CheckForNextProcess() {
+  InitProcessEntry(&entry_);
+
+  if (!started_iteration_) {
+    started_iteration_ = true;
+    return !!Process32First(snapshot_, &entry_);
+  }
+
+  return !!Process32Next(snapshot_, &entry_);
+}
+
+void ProcessIterator::InitProcessEntry(ProcessEntry* entry) {
+  memset(entry, 0, sizeof(*entry));
+  entry->dwSize = sizeof(*entry);
+}
+
+bool NamedProcessIterator::IncludeEntry() {
+  // Case insensitive.
+  return _wcsicmp(executable_name_.c_str(), entry().exe_file()) == 0 &&
+         ProcessIterator::IncludeEntry();
+}
+
+}  // namespace base
diff --git a/base/process/process_linux.cc b/base/process/process_linux.cc
new file mode 100644
index 0000000..faf39af
--- /dev/null
+++ b/base/process/process_linux.cc
@@ -0,0 +1,201 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/process/process.h"
+
+#include <errno.h>
+#include <sys/resource.h>
+
+#include "base/files/file_util.h"
+#include "base/logging.h"
+#include "base/strings/string_number_conversions.h"
+#include "base/strings/string_split.h"
+#include "base/strings/stringprintf.h"
+#include "base/synchronization/lock.h"
+#include "base/threading/thread_restrictions.h"
+#include "build/build_config.h"
+
+// Not defined on AIX by default.
+#if defined(OS_AIX)
+#define RLIMIT_NICE 20
+#endif
+
+namespace base {
+
+namespace {
+
+const int kForegroundPriority = 0;
+
+#if defined(OS_CHROMEOS)
+// We are more aggressive in our lowering of background process priority
+// for chromeos as we have much more control over other processes running
+// on the machine.
+//
+// TODO(davemoore) Refactor this by adding support for higher levels to set
+// the foregrounding / backgrounding process so we don't have to keep
+// chrome / chromeos specific logic here.
+const int kBackgroundPriority = 19;
+const char kControlPath[] = "/sys/fs/cgroup/cpu%s/cgroup.procs";
+const char kForeground[] = "/chrome_renderers/foreground";
+const char kBackground[] = "/chrome_renderers/background";
+const char kProcPath[] = "/proc/%d/cgroup";
+
+struct CGroups {
+  // Check for cgroups files. ChromeOS supports these by default. It creates
+  // a cgroup mount in /sys/fs/cgroup and then configures two cpu task groups,
+  // one contains at most a single foreground renderer and the other contains
+  // all background renderers. This allows us to limit the impact of background
+  // renderers on foreground ones to a greater level than simple renicing.
+  bool enabled;
+  base::FilePath foreground_file;
+  base::FilePath background_file;
+
+  CGroups() {
+    foreground_file =
+        base::FilePath(base::StringPrintf(kControlPath, kForeground));
+    background_file =
+        base::FilePath(base::StringPrintf(kControlPath, kBackground));
+    base::FileSystemType foreground_type;
+    base::FileSystemType background_type;
+    enabled =
+        base::GetFileSystemType(foreground_file, &foreground_type) &&
+        base::GetFileSystemType(background_file, &background_type) &&
+        foreground_type == FILE_SYSTEM_CGROUP &&
+        background_type == FILE_SYSTEM_CGROUP;
+  }
+
+  static CGroups& Get() {
+    static auto& groups = *new CGroups;
+    return groups;
+  }
+};
+#else
+const int kBackgroundPriority = 5;
+#endif  // defined(OS_CHROMEOS)
+
+bool CanReraisePriority() {
+  // We won't be able to raise the priority if we don't have the right rlimit.
+  // The limit may be adjusted in /etc/security/limits.conf for PAM systems.
+  struct rlimit rlim;
+  return (getrlimit(RLIMIT_NICE, &rlim) == 0) &&
+         (20 - kForegroundPriority) <= static_cast<int>(rlim.rlim_cur);
+}
+
+}  // namespace
+
+// static
+bool Process::CanBackgroundProcesses() {
+#if defined(OS_CHROMEOS)
+  if (CGroups::Get().enabled)
+    return true;
+#endif  // defined(OS_CHROMEOS)
+
+  static const bool can_reraise_priority = CanReraisePriority();
+  return can_reraise_priority;
+}
+
+bool Process::IsProcessBackgrounded() const {
+  DCHECK(IsValid());
+
+#if defined(OS_CHROMEOS)
+  if (CGroups::Get().enabled) {
+    // Used to allow reading the process priority from proc on thread launch.
+    base::ThreadRestrictions::ScopedAllowIO allow_io;
+    std::string proc;
+    if (base::ReadFileToString(
+            base::FilePath(StringPrintf(kProcPath, process_)), &proc)) {
+      return IsProcessBackgroundedCGroup(proc);
+    }
+    return false;
+  }
+#endif  // defined(OS_CHROMEOS)
+
+  return GetPriority() == kBackgroundPriority;
+}
+
+bool Process::SetProcessBackgrounded(bool background) {
+  DCHECK(IsValid());
+
+#if defined(OS_CHROMEOS)
+  if (CGroups::Get().enabled) {
+    std::string pid = IntToString(process_);
+    const base::FilePath file = background ? CGroups::Get().background_file
+                                           : CGroups::Get().foreground_file;
+    return base::WriteFile(file, pid.c_str(), pid.size()) > 0;
+  }
+#endif  // defined(OS_CHROMEOS)
+
+  if (!CanBackgroundProcesses())
+    return false;
+
+  int priority = background ? kBackgroundPriority : kForegroundPriority;
+  int result = setpriority(PRIO_PROCESS, process_, priority);
+  DPCHECK(result == 0);
+  return result == 0;
+}
+
+#if defined(OS_CHROMEOS)
+bool IsProcessBackgroundedCGroup(const StringPiece& cgroup_contents) {
+  // The process can be part of multiple control groups, and for each cgroup
+  // hierarchy there's an entry in the file. We look for a control group
+  // named "/chrome_renderers/background" to determine if the process is
+  // backgrounded. crbug.com/548818.
+  std::vector<StringPiece> lines = SplitStringPiece(
+      cgroup_contents, "\n", TRIM_WHITESPACE, SPLIT_WANT_NONEMPTY);
+  for (const auto& line : lines) {
+    std::vector<StringPiece> fields =
+        SplitStringPiece(line, ":", TRIM_WHITESPACE, SPLIT_WANT_ALL);
+    if (fields.size() != 3U) {
+      NOTREACHED();
+      continue;
+    }
+    if (fields[2] == kBackground)
+      return true;
+  }
+
+  return false;
+}
+#endif  // defined(OS_CHROMEOS)
+
+#if defined(OS_CHROMEOS)
+// Reads /proc/<pid>/status and returns the PID in its PID namespace.
+// If the process is not in a PID namespace or /proc/<pid>/status does not
+// report NSpid, kNullProcessId is returned.
+ProcessId Process::GetPidInNamespace() const {
+  std::string status;
+  {
+    // Synchronously reading files in /proc does not hit the disk.
+    ThreadRestrictions::ScopedAllowIO allow_io;
+    FilePath status_file =
+        FilePath("/proc").Append(IntToString(process_)).Append("status");
+    if (!ReadFileToString(status_file, &status)) {
+      return kNullProcessId;
+    }
+  }
+
+  StringPairs pairs;
+  SplitStringIntoKeyValuePairs(status, ':', '\n', &pairs);
+  for (const auto& pair : pairs) {
+    const std::string& key = pair.first;
+    const std::string& value_str = pair.second;
+    if (key == "NSpid") {
+      std::vector<StringPiece> split_value_str = SplitStringPiece(
+          value_str, "\t", TRIM_WHITESPACE, SPLIT_WANT_NONEMPTY);
+      if (split_value_str.size() <= 1) {
+        return kNullProcessId;
+      }
+      int value;
+      // The last value in the list is the PID in the namespace.
+      if (!StringToInt(split_value_str.back(), &value)) {
+        NOTREACHED();
+        return kNullProcessId;
+      }
+      return value;
+    }
+  }
+  return kNullProcessId;
+}
+#endif  // defined(OS_CHROMEOS)
+
+}  // namespace base
diff --git a/base/process/process_mac.cc b/base/process/process_mac.cc
new file mode 100644
index 0000000..70bc4c2
--- /dev/null
+++ b/base/process/process_mac.cc
@@ -0,0 +1,77 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/process/process.h"
+
+#include <mach/mach.h>
+
+#include "base/feature_list.h"
+#include "base/mac/mach_logging.h"
+
+namespace base {
+
+// Enables backgrounding hidden renderers on Mac.
+const Feature kMacAllowBackgroundingProcesses{"MacAllowBackgroundingProcesses",
+                                              FEATURE_DISABLED_BY_DEFAULT};
+
+bool Process::CanBackgroundProcesses() {
+  return FeatureList::IsEnabled(kMacAllowBackgroundingProcesses);
+}
+
+bool Process::IsProcessBackgrounded(PortProvider* port_provider) const {
+  DCHECK(IsValid());
+  if (port_provider == nullptr || !CanBackgroundProcesses())
+    return false;
+
+  mach_port_t task_port = port_provider->TaskForPid(Pid());
+  if (task_port == TASK_NULL)
+    return false;
+
+  task_category_policy_data_t category_policy;
+  mach_msg_type_number_t task_info_count = TASK_CATEGORY_POLICY_COUNT;
+  boolean_t get_default = FALSE;
+
+  kern_return_t result =
+      task_policy_get(task_port, TASK_CATEGORY_POLICY,
+                      reinterpret_cast<task_policy_t>(&category_policy),
+                      &task_info_count, &get_default);
+  MACH_LOG_IF(ERROR, result != KERN_SUCCESS, result)
+      << "task_policy_get TASK_CATEGORY_POLICY";
+
+  if (result == KERN_SUCCESS && get_default == FALSE) {
+    return category_policy.role == TASK_BACKGROUND_APPLICATION;
+  }
+  return false;
+}
+
+bool Process::SetProcessBackgrounded(PortProvider* port_provider,
+                                     bool background) {
+  DCHECK(IsValid());
+  if (port_provider == nullptr || !CanBackgroundProcesses())
+    return false;
+
+  mach_port_t task_port = port_provider->TaskForPid(Pid());
+  if (task_port == TASK_NULL)
+    return false;
+
+  if (IsProcessBackgrounded(port_provider) == background)
+    return true;
+
+  task_category_policy category_policy;
+  category_policy.role =
+      background ? TASK_BACKGROUND_APPLICATION : TASK_FOREGROUND_APPLICATION;
+  kern_return_t result =
+      task_policy_set(task_port, TASK_CATEGORY_POLICY,
+                      reinterpret_cast<task_policy_t>(&category_policy),
+                      TASK_CATEGORY_POLICY_COUNT);
+
+  if (result != KERN_SUCCESS) {
+    MACH_LOG(ERROR, result) << "task_policy_set TASK_CATEGORY_POLICY";
+    return false;
+  }
+
+  return true;
+}
+
+}  // namespace base
diff --git a/base/process/process_metrics.cc b/base/process/process_metrics.cc
new file mode 100644
index 0000000..c3a7063
--- /dev/null
+++ b/base/process/process_metrics.cc
@@ -0,0 +1,149 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/process/process_metrics.h"
+
+#include <utility>
+
+#include "base/logging.h"
+#include "base/values.h"
+#include "build/build_config.h"
+
+#if defined(OS_MACOSX) || defined(OS_LINUX) || defined(OS_AIX)
+namespace {
+int CalculateEventsPerSecond(uint64_t event_count,
+                             uint64_t* last_event_count,
+                             base::TimeTicks* last_calculated) {
+  base::TimeTicks time = base::TimeTicks::Now();
+
+  if (*last_event_count == 0) {
+    // First call, just set the last values.
+    *last_calculated = time;
+    *last_event_count = event_count;
+    return 0;
+  }
+
+  int64_t events_delta = event_count - *last_event_count;
+  int64_t time_delta = (time - *last_calculated).InMicroseconds();
+  if (time_delta == 0) {
+    NOTREACHED();
+    return 0;
+  }
+
+  *last_calculated = time;
+  *last_event_count = event_count;
+
+  int64_t events_delta_for_ms =
+      events_delta * base::Time::kMicrosecondsPerSecond;
+  // Round the result up by adding 1/2 (the second term resolves to 1/2 without
+  // dropping down into floating point).
+  return (events_delta_for_ms + time_delta / 2) / time_delta;
+}
+
+}  // namespace
+#endif  // defined(OS_MACOSX) || defined(OS_LINUX) || defined(OS_AIX)
+
+namespace base {
+
+SystemMemoryInfoKB::SystemMemoryInfoKB() = default;
+
+SystemMemoryInfoKB::SystemMemoryInfoKB(const SystemMemoryInfoKB& other) =
+    default;
+
+SystemMetrics::SystemMetrics() {
+  committed_memory_ = 0;
+}
+
+SystemMetrics SystemMetrics::Sample() {
+  SystemMetrics system_metrics;
+
+  system_metrics.committed_memory_ = GetSystemCommitCharge();
+#if defined(OS_LINUX) || defined(OS_ANDROID)
+  GetSystemMemoryInfo(&system_metrics.memory_info_);
+  GetVmStatInfo(&system_metrics.vmstat_info_);
+  GetSystemDiskInfo(&system_metrics.disk_info_);
+#endif
+#if defined(OS_CHROMEOS)
+  GetSwapInfo(&system_metrics.swap_info_);
+#endif
+
+  return system_metrics;
+}
+
+std::unique_ptr<Value> SystemMetrics::ToValue() const {
+  std::unique_ptr<DictionaryValue> res(new DictionaryValue());
+
+  res->SetInteger("committed_memory", static_cast<int>(committed_memory_));
+#if defined(OS_LINUX) || defined(OS_ANDROID)
+  std::unique_ptr<DictionaryValue> meminfo = memory_info_.ToValue();
+  std::unique_ptr<DictionaryValue> vmstat = vmstat_info_.ToValue();
+  meminfo->MergeDictionary(vmstat.get());
+  res->Set("meminfo", std::move(meminfo));
+  res->Set("diskinfo", disk_info_.ToValue());
+#endif
+#if defined(OS_CHROMEOS)
+  res->Set("swapinfo", swap_info_.ToValue());
+#endif
+
+  return std::move(res);
+}
+
+std::unique_ptr<ProcessMetrics> ProcessMetrics::CreateCurrentProcessMetrics() {
+#if !defined(OS_MACOSX) || defined(OS_IOS)
+  return CreateProcessMetrics(base::GetCurrentProcessHandle());
+#else
+  return CreateProcessMetrics(base::GetCurrentProcessHandle(), nullptr);
+#endif  // !defined(OS_MACOSX) || defined(OS_IOS)
+}
+
+#if !defined(OS_FREEBSD) || !defined(OS_POSIX)
+double ProcessMetrics::GetPlatformIndependentCPUUsage() {
+  TimeDelta cumulative_cpu = GetCumulativeCPUUsage();
+  TimeTicks time = TimeTicks::Now();
+
+  if (last_cumulative_cpu_.is_zero()) {
+    // First call, just set the last values.
+    last_cumulative_cpu_ = cumulative_cpu;
+    last_cpu_time_ = time;
+    return 0;
+  }
+
+  TimeDelta system_time_delta = cumulative_cpu - last_cumulative_cpu_;
+  TimeDelta time_delta = time - last_cpu_time_;
+  DCHECK(!time_delta.is_zero());
+  if (time_delta.is_zero())
+    return 0;
+
+  last_cumulative_cpu_ = cumulative_cpu;
+  last_cpu_time_ = time;
+
+  return 100.0 * system_time_delta.InMicrosecondsF() /
+         time_delta.InMicrosecondsF();
+}
+#endif
+
+#if defined(OS_MACOSX) || defined(OS_LINUX) || defined(OS_AIX)
+int ProcessMetrics::CalculateIdleWakeupsPerSecond(
+    uint64_t absolute_idle_wakeups) {
+  return CalculateEventsPerSecond(absolute_idle_wakeups,
+                                  &last_absolute_idle_wakeups_,
+                                  &last_idle_wakeups_time_);
+}
+#else
+int ProcessMetrics::GetIdleWakeupsPerSecond() {
+  NOTIMPLEMENTED();  // http://crbug.com/120488
+  return 0;
+}
+#endif  // defined(OS_MACOSX) || defined(OS_LINUX) || defined(OS_AIX)
+
+#if defined(OS_MACOSX)
+int ProcessMetrics::CalculatePackageIdleWakeupsPerSecond(
+    uint64_t absolute_package_idle_wakeups) {
+  return CalculateEventsPerSecond(absolute_package_idle_wakeups,
+                                  &last_absolute_package_idle_wakeups_,
+                                  &last_package_idle_wakeups_time_);
+}
+
+#endif  // defined(OS_MACOSX)
+}  // namespace base
diff --git a/base/process/process_metrics.h b/base/process/process_metrics.h
new file mode 100644
index 0000000..0170a0c
--- /dev/null
+++ b/base/process/process_metrics.h
@@ -0,0 +1,542 @@
+// Copyright (c) 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file contains routines for gathering resource statistics for processes
+// running on the system.
+
+#ifndef BASE_PROCESS_PROCESS_METRICS_H_
+#define BASE_PROCESS_PROCESS_METRICS_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <memory>
+#include <string>
+
+#include "base/base_export.h"
+#include "base/gtest_prod_util.h"
+#include "base/macros.h"
+#include "base/process/process_handle.h"
+#include "base/time/time.h"
+#include "base/values.h"
+#include "build/build_config.h"
+
+#if defined(OS_MACOSX)
+#include <mach/mach.h>
+#include "base/process/port_provider_mac.h"
+
+#if !defined(OS_IOS)
+#include <mach/mach_vm.h>
+#endif
+#endif
+
+#if defined(OS_WIN)
+#include "base/win/scoped_handle.h"
+#include "base/win/windows_types.h"
+#endif
+
+namespace base {
+
+// Full declaration is in process_metrics_iocounters.h.
+struct IoCounters;
+
+#if defined(OS_LINUX) || defined(OS_ANDROID)
+// Minor and major page fault counts since the process creation.
+// Both counts are process-wide, and exclude child processes.
+//
+// minor: Number of page faults that didn't require disk IO.
+// major: Number of page faults that required disk IO.
+struct PageFaultCounts {
+  int64_t minor;
+  int64_t major;
+};
+#endif  // defined(OS_LINUX) || defined(OS_ANDROID)
+
+// Convert a POSIX timeval to microseconds.
+BASE_EXPORT int64_t TimeValToMicroseconds(const struct timeval& tv);
+
+// Provides performance metrics for a specified process (CPU usage and IO
+// counters). Use CreateCurrentProcessMetrics() to get an instance for the
+// current process, or CreateProcessMetrics() to get an instance for an
+// arbitrary process. Then, access the information with the different get
+// methods.
+//
+// This class exposes a few platform-specific APIs for parsing memory usage, but
+// these are not intended to generalize to other platforms, since the memory
+// models differ substantially.
+//
+// To obtain consistent memory metrics, use the memory_instrumentation service.
+//
+// For further documentation on memory, see
+// https://chromium.googlesource.com/chromium/src/+/HEAD/docs/README.md
+class BASE_EXPORT ProcessMetrics {
+ public:
+  ~ProcessMetrics();
+
+  // Creates a ProcessMetrics for the specified process.
+#if !defined(OS_MACOSX) || defined(OS_IOS)
+  static std::unique_ptr<ProcessMetrics> CreateProcessMetrics(
+      ProcessHandle process);
+#else
+
+  // The port provider needs to outlive the ProcessMetrics object returned by
+  // this function. If NULL is passed as provider, the returned object
+  // only returns valid metrics if |process| is the current process.
+  static std::unique_ptr<ProcessMetrics> CreateProcessMetrics(
+      ProcessHandle process,
+      PortProvider* port_provider);
+#endif  // !defined(OS_MACOSX) || defined(OS_IOS)
+
+  // Creates a ProcessMetrics for the current process. This a cross-platform
+  // convenience wrapper for CreateProcessMetrics().
+  static std::unique_ptr<ProcessMetrics> CreateCurrentProcessMetrics();
+
+#if defined(OS_LINUX) || defined(OS_ANDROID)
+  // Resident Set Size is a Linux/Android specific memory concept. Do not
+  // attempt to extend this to other platforms.
+  BASE_EXPORT size_t GetResidentSetSize() const;
+#endif
+
+#if defined(OS_CHROMEOS)
+  // /proc/<pid>/totmaps is a syscall that returns memory summary statistics for
+  // the process.
+  // totmaps is a Linux specific concept, currently only being used on ChromeOS.
+  // Do not attempt to extend this to other platforms.
+  //
+  struct TotalsSummary {
+    size_t private_clean_kb;
+    size_t private_dirty_kb;
+    size_t swap_kb;
+  };
+  BASE_EXPORT TotalsSummary GetTotalsSummary() const;
+#endif
+
+#if defined(OS_MACOSX)
+  struct TaskVMInfo {
+    // Only available on macOS 10.12+.
+    // Anonymous, non-discardable memory, including non-volatile IOKit.
+    // Measured in bytes.
+    uint64_t phys_footprint = 0;
+
+    // Anonymous, non-discardable, non-compressed memory, excluding IOKit.
+    // Measured in bytes.
+    uint64_t internal = 0;
+
+    // Compressed memory measured in bytes.
+    uint64_t compressed = 0;
+  };
+  TaskVMInfo GetTaskVMInfo() const;
+#endif
+
+  // Returns the percentage of time spent executing, across all threads of the
+  // process, in the interval since the last time the method was called. Since
+  // this considers the total execution time across all threads in a process,
+  // the result can easily exceed 100% in multi-thread processes running on
+  // multi-core systems. In general the result is therefore a value in the
+  // range 0% to SysInfo::NumberOfProcessors() * 100%.
+  //
+  // To obtain the percentage of total available CPU resources consumed by this
+  // process over the interval, the caller must divide by NumberOfProcessors().
+  //
+  // Since this API measures usage over an interval, it will return zero on the
+  // first call, and an actual value only on the second and subsequent calls.
+  double GetPlatformIndependentCPUUsage();
+
+  // Returns the cumulative CPU usage across all threads of the process since
+  // process start. In case of multi-core processors, a process can consume CPU
+  // at a rate higher than wall-clock time, e.g. two cores at full utilization
+  // will result in a time delta of 2 seconds/per 1 wall-clock second.
+  TimeDelta GetCumulativeCPUUsage();
+
+  // Returns the number of average idle cpu wakeups per second since the last
+  // call.
+  int GetIdleWakeupsPerSecond();
+
+#if defined(OS_MACOSX)
+  // Returns the number of average "package idle exits" per second, which have
+  // a higher energy impact than a regular wakeup, since the last call.
+  //
+  // From the powermetrics man page:
+  // "With the exception of some Mac Pro systems, Mac and
+  // iOS systems are typically single package systems, wherein all CPUs are
+  // part of a single processor complex (typically a single IC die) with shared
+  // logic that can include (depending on system specifics) shared last level
+  // caches, an integrated memory controller etc. When all CPUs in the package
+  // are idle, the hardware can power-gate significant portions of the shared
+  // logic in addition to each individual processor's logic, as well as take
+  // measures such as placing DRAM in to self-refresh (also referred to as
+  // auto-refresh), place interconnects into lower-power states etc"
+  int GetPackageIdleWakeupsPerSecond();
+#endif
+
+  // Retrieves accounting information for all I/O operations performed by the
+  // process.
+  // If IO information is retrieved successfully, the function returns true
+  // and fills in the IO_COUNTERS passed in. The function returns false
+  // otherwise.
+  bool GetIOCounters(IoCounters* io_counters) const;
+
+#if defined(OS_LINUX) || defined(OS_AIX) || defined(OS_ANDROID)
+  // Returns the number of file descriptors currently open by the process, or
+  // -1 on error.
+  int GetOpenFdCount() const;
+
+  // Returns the soft limit of file descriptors that can be opened by the
+  // process, or -1 on error.
+  int GetOpenFdSoftLimit() const;
+#endif  // defined(OS_LINUX) || defined(OS_AIX) || defined(OS_ANDROID)
+
+#if defined(OS_LINUX) || defined(OS_ANDROID)
+  // Bytes of swap as reported by /proc/[pid]/status.
+  uint64_t GetVmSwapBytes() const;
+
+  // Minor and major page fault count as reported by /proc/[pid]/stat.
+  // Returns true for success.
+  bool GetPageFaultCounts(PageFaultCounts* counts) const;
+#endif  // defined(OS_LINUX) || defined(OS_ANDROID)
+
+  // Returns total memory usage of malloc.
+  size_t GetMallocUsage();
+
+ private:
+#if !defined(OS_MACOSX) || defined(OS_IOS)
+  explicit ProcessMetrics(ProcessHandle process);
+#else
+  ProcessMetrics(ProcessHandle process, PortProvider* port_provider);
+#endif  // !defined(OS_MACOSX) || defined(OS_IOS)
+
+#if defined(OS_MACOSX) || defined(OS_LINUX) || defined(OS_AIX)
+  int CalculateIdleWakeupsPerSecond(uint64_t absolute_idle_wakeups);
+#endif
+#if defined(OS_MACOSX)
+  // The subset of wakeups that cause a "package exit" can be tracked on macOS.
+  // See |GetPackageIdleWakeupsForSecond| comment for more info.
+  int CalculatePackageIdleWakeupsPerSecond(
+      uint64_t absolute_package_idle_wakeups);
+#endif
+
+#if defined(OS_WIN)
+  win::ScopedHandle process_;
+#else
+  ProcessHandle process_;
+#endif
+
+  // Used to store the previous times and CPU usage counts so we can
+  // compute the CPU usage between calls.
+  TimeTicks last_cpu_time_;
+#if !defined(OS_FREEBSD) || !defined(OS_POSIX)
+  TimeDelta last_cumulative_cpu_;
+#endif
+
+#if defined(OS_MACOSX) || defined(OS_LINUX) || defined(OS_AIX)
+  // Same thing for idle wakeups.
+  TimeTicks last_idle_wakeups_time_;
+  uint64_t last_absolute_idle_wakeups_;
+#endif
+
+#if defined(OS_MACOSX)
+  // And same thing for package idle exit wakeups.
+  TimeTicks last_package_idle_wakeups_time_;
+  uint64_t last_absolute_package_idle_wakeups_;
+#endif
+
+#if !defined(OS_IOS)
+#if defined(OS_MACOSX)
+  // Queries the port provider if it's set.
+  mach_port_t TaskForPid(ProcessHandle process) const;
+
+  PortProvider* port_provider_;
+#endif  // defined(OS_MACOSX)
+#endif  // !defined(OS_IOS)
+
+  DISALLOW_COPY_AND_ASSIGN(ProcessMetrics);
+};
+
+// Returns the memory committed by the system in KBytes.
+// Returns 0 if it can't compute the commit charge.
+BASE_EXPORT size_t GetSystemCommitCharge();
+
+// Returns the number of bytes in a memory page. Do not use this to compute
+// the number of pages in a block of memory for calling mincore(). On some
+// platforms, e.g. iOS, mincore() uses a different page size from what is
+// returned by GetPageSize().
+BASE_EXPORT size_t GetPageSize();
+
+// Returns the maximum number of file descriptors that can be open by a process
+// at once. If the number is unavailable, a conservative best guess is returned.
+BASE_EXPORT size_t GetMaxFds();
+
+#if defined(OS_POSIX) && !defined(OS_FUCHSIA)
+// Increases the file descriptor soft limit to |max_descriptors| or the OS hard
+// limit, whichever is lower. If the limit is already higher than
+// |max_descriptors|, then nothing happens.
+BASE_EXPORT void IncreaseFdLimitTo(unsigned int max_descriptors);
+#endif  // defined(OS_POSIX) && !defined(OS_FUCHSIA)
+
+#if defined(OS_WIN) || defined(OS_MACOSX) || defined(OS_LINUX) || \
+    defined(OS_ANDROID) || defined(OS_AIX) || defined(OS_FUCHSIA)
+// Data about system-wide memory consumption. Values are in KB. Available on
+// Windows, Mac, Linux, Android and Chrome OS.
+//
+// Total memory are available on all platforms that implement
+// GetSystemMemoryInfo(). Total/free swap memory are available on all platforms
+// except on Mac. Buffers/cached/active_anon/inactive_anon/active_file/
+// inactive_file/dirty/reclaimable/pswpin/pswpout/pgmajfault are available on
+// Linux/Android/Chrome OS. Shmem/slab/gem_objects/gem_size are Chrome OS only.
+// Speculative/file_backed/purgeable are Mac and iOS only.
+// Free is absent on Windows (see "avail_phys" below).
+struct BASE_EXPORT SystemMemoryInfoKB {
+  SystemMemoryInfoKB();
+  SystemMemoryInfoKB(const SystemMemoryInfoKB& other);
+
+  // Serializes the platform specific fields to value.
+  std::unique_ptr<DictionaryValue> ToValue() const;
+
+  int total = 0;
+
+#if !defined(OS_WIN)
+  int free = 0;
+#endif
+
+#if defined(OS_WIN)
+  // "This is the amount of physical memory that can be immediately reused
+  // without having to write its contents to disk first. It is the sum of the
+  // size of the standby, free, and zero lists." (MSDN).
+  // Standby: not modified pages of physical ram (file-backed memory) that are
+  // not actively being used.
+  int avail_phys = 0;
+#endif
+
+#if defined(OS_LINUX) || defined(OS_ANDROID) || defined(OS_AIX)
+  // This provides an estimate of available memory as described here:
+  // https://git.kernel.org/cgit/linux/kernel/git/torvalds/linux.git/commit/?id=34e431b0ae398fc54ea69ff85ec700722c9da773
+  // NOTE: this is ONLY valid in kernels 3.14 and up.  Its value will always
+  // be 0 in earlier kernel versions.
+  // Note: it includes _all_ file-backed memory (active + inactive).
+  int available = 0;
+#endif
+
+#if !defined(OS_MACOSX)
+  int swap_total = 0;
+  int swap_free = 0;
+#endif
+
+#if defined(OS_ANDROID) || defined(OS_LINUX) || defined(OS_AIX) || \
+    defined(OS_FUCHSIA)
+  int buffers = 0;
+  int cached = 0;
+  int active_anon = 0;
+  int inactive_anon = 0;
+  int active_file = 0;
+  int inactive_file = 0;
+  int dirty = 0;
+  int reclaimable = 0;
+#endif  // defined(OS_ANDROID) || defined(OS_LINUX) || defined(OS_AIX) ||
+        // defined(OS_FUCHSIA)
+
+#if defined(OS_CHROMEOS)
+  int shmem = 0;
+  int slab = 0;
+  // Gem data will be -1 if not supported.
+  int gem_objects = -1;
+  long long gem_size = -1;
+#endif  // defined(OS_CHROMEOS)
+
+#if defined(OS_MACOSX)
+  int speculative = 0;
+  int file_backed = 0;
+  int purgeable = 0;
+#endif  // defined(OS_MACOSX)
+};
+
+// On Linux/Android/Chrome OS, system-wide memory consumption data is parsed
+// from /proc/meminfo and /proc/vmstat. On Windows/Mac, it is obtained using
+// system API calls.
+//
+// Fills in the provided |meminfo| structure. Returns true on success.
+// Exposed for memory debugging widget.
+BASE_EXPORT bool GetSystemMemoryInfo(SystemMemoryInfoKB* meminfo);
+
+#endif  // defined(OS_WIN) || defined(OS_MACOSX) || defined(OS_LINUX) ||
+        // defined(OS_ANDROID) || defined(OS_AIX) || defined(OS_FUCHSIA)
+
+#if defined(OS_LINUX) || defined(OS_ANDROID) || defined(OS_AIX)
+// Parse the data found in /proc/<pid>/stat and return the sum of the
+// CPU-related ticks.  Returns -1 on parse error.
+// Exposed for testing.
+BASE_EXPORT int ParseProcStatCPU(StringPiece input);
+
+// Get the number of threads of |process| as available in /proc/<pid>/stat.
+// This should be used with care as no synchronization with running threads is
+// done. This is mostly useful to guarantee being single-threaded.
+// Returns 0 on failure.
+BASE_EXPORT int GetNumberOfThreads(ProcessHandle process);
+
+// /proc/self/exe refers to the current executable.
+BASE_EXPORT extern const char kProcSelfExe[];
+
+// Parses a string containing the contents of /proc/meminfo
+// returns true on success or false for a parsing error
+// Exposed for testing.
+BASE_EXPORT bool ParseProcMeminfo(StringPiece input,
+                                  SystemMemoryInfoKB* meminfo);
+
+// Data from /proc/vmstat.
+struct BASE_EXPORT VmStatInfo {
+  // Serializes the platform specific fields to value.
+  std::unique_ptr<DictionaryValue> ToValue() const;
+
+  unsigned long pswpin = 0;
+  unsigned long pswpout = 0;
+  unsigned long pgmajfault = 0;
+};
+
+// Retrieves data from /proc/vmstat about system-wide vm operations.
+// Fills in the provided |vmstat| structure. Returns true on success.
+BASE_EXPORT bool GetVmStatInfo(VmStatInfo* vmstat);
+
+// Parses a string containing the contents of /proc/vmstat
+// returns true on success or false for a parsing error
+// Exposed for testing.
+BASE_EXPORT bool ParseProcVmstat(StringPiece input, VmStatInfo* vmstat);
+
+// Data from /proc/diskstats about system-wide disk I/O.
+struct BASE_EXPORT SystemDiskInfo {
+  SystemDiskInfo();
+  SystemDiskInfo(const SystemDiskInfo& other);
+
+  // Serializes the platform specific fields to value.
+  std::unique_ptr<Value> ToValue() const;
+
+  uint64_t reads = 0;
+  uint64_t reads_merged = 0;
+  uint64_t sectors_read = 0;
+  uint64_t read_time = 0;
+  uint64_t writes = 0;
+  uint64_t writes_merged = 0;
+  uint64_t sectors_written = 0;
+  uint64_t write_time = 0;
+  uint64_t io = 0;
+  uint64_t io_time = 0;
+  uint64_t weighted_io_time = 0;
+};
+
+// Checks whether the candidate string is a valid disk name, [hsv]d[a-z]+
+// for a generic disk or mmcblk[0-9]+ for the MMC case.
+// Names of disk partitions (e.g. sda1) are not valid.
+BASE_EXPORT bool IsValidDiskName(StringPiece candidate);
+
+// Retrieves data from /proc/diskstats about system-wide disk I/O.
+// Fills in the provided |diskinfo| structure. Returns true on success.
+BASE_EXPORT bool GetSystemDiskInfo(SystemDiskInfo* diskinfo);
+
+// Returns the amount of time spent in user space since boot across all CPUs.
+BASE_EXPORT TimeDelta GetUserCpuTimeSinceBoot();
+
+#endif  // defined(OS_LINUX) || defined(OS_ANDROID)
+
+#if defined(OS_CHROMEOS)
+// Data from files in directory /sys/block/zram0 about ZRAM usage.
+struct BASE_EXPORT SwapInfo {
+  SwapInfo()
+      : num_reads(0),
+        num_writes(0),
+        compr_data_size(0),
+        orig_data_size(0),
+        mem_used_total(0) {
+  }
+
+  // Serializes the platform specific fields to value.
+  std::unique_ptr<Value> ToValue() const;
+
+  uint64_t num_reads = 0;
+  uint64_t num_writes = 0;
+  uint64_t compr_data_size = 0;
+  uint64_t orig_data_size = 0;
+  uint64_t mem_used_total = 0;
+};
+
+// Parses a string containing the contents of /sys/block/zram0/mm_stat.
+// This should be used for the new ZRAM sysfs interfaces.
+// Returns true on success or false for a parsing error.
+// Exposed for testing.
+BASE_EXPORT bool ParseZramMmStat(StringPiece mm_stat_data, SwapInfo* swap_info);
+
+// Parses a string containing the contents of /sys/block/zram0/stat
+// This should be used for the new ZRAM sysfs interfaces.
+// Returns true on success or false for a parsing error.
+// Exposed for testing.
+BASE_EXPORT bool ParseZramStat(StringPiece stat_data, SwapInfo* swap_info);
+
+// In ChromeOS, reads files from /sys/block/zram0 that contain ZRAM usage data.
+// Fills in the provided |swap_data| structure.
+// Returns true on success or false for a parsing error.
+BASE_EXPORT bool GetSwapInfo(SwapInfo* swap_info);
+#endif  // defined(OS_CHROMEOS)
+
+// Collects and holds performance metrics for system memory and disk.
+// Provides functionality to retrieve the data on various platforms and
+// to serialize the stored data.
+class SystemMetrics {
+ public:
+  SystemMetrics();
+
+  static SystemMetrics Sample();
+
+  // Serializes the system metrics to value.
+  std::unique_ptr<Value> ToValue() const;
+
+ private:
+  FRIEND_TEST_ALL_PREFIXES(SystemMetricsTest, SystemMetrics);
+
+  size_t committed_memory_;
+#if defined(OS_LINUX) || defined(OS_ANDROID)
+  SystemMemoryInfoKB memory_info_;
+  VmStatInfo vmstat_info_;
+  SystemDiskInfo disk_info_;
+#endif
+#if defined(OS_CHROMEOS)
+  SwapInfo swap_info_;
+#endif
+};
+
+#if defined(OS_MACOSX) && !defined(OS_IOS)
+enum class MachVMRegionResult {
+  // There were no more memory regions between |address| and the end of the
+  // virtual address space.
+  Finished,
+
+  // All output parameters are invalid.
+  Error,
+
+  // All output parameters are filled in.
+  Success
+};
+
+// Returns info on the first memory region at or after |address|, including
+// resident memory and share mode. On Success, |size| reflects the size of the
+// memory region.
+// |size| and |info| are output parameters, only valid on Success.
+// |address| is an in-out parameter, than represents both the address to start
+// looking, and the start address of the memory region.
+BASE_EXPORT MachVMRegionResult GetTopInfo(mach_port_t task,
+                                          mach_vm_size_t* size,
+                                          mach_vm_address_t* address,
+                                          vm_region_top_info_data_t* info);
+
+// Returns info on the first memory region at or after |address|, including
+// protection values. On Success, |size| reflects the size of the
+// memory region.
+// Returns info on the first memory region at or after |address|, including
+// resident memory and share mode.
+// |size| and |info| are output parameters, only valid on Success.
+BASE_EXPORT MachVMRegionResult GetBasicInfo(mach_port_t task,
+                                            mach_vm_size_t* size,
+                                            mach_vm_address_t* address,
+                                            vm_region_basic_info_64* info);
+#endif  // defined(OS_MACOSX) && !defined(OS_IOS)
+
+}  // namespace base
+
+#endif  // BASE_PROCESS_PROCESS_METRICS_H_
diff --git a/base/process/process_metrics_freebsd.cc b/base/process/process_metrics_freebsd.cc
new file mode 100644
index 0000000..a552c03
--- /dev/null
+++ b/base/process/process_metrics_freebsd.cc
@@ -0,0 +1,72 @@
+// Copyright (c) 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/process/process_metrics.h"
+
+#include <stddef.h>
+#include <sys/sysctl.h>
+#include <sys/user.h>
+#include <unistd.h>
+
+#include "base/macros.h"
+#include "base/memory/ptr_util.h"
+#include "base/process/process_metrics_iocounters.h"
+#include "base/stl_util.h"
+
+namespace base {
+
+ProcessMetrics::ProcessMetrics(ProcessHandle process)
+    : process_(process),
+      last_cpu_(0) {}
+
+// static
+std::unique_ptr<ProcessMetrics> ProcessMetrics::CreateProcessMetrics(
+    ProcessHandle process) {
+  return WrapUnique(new ProcessMetrics(process));
+}
+
+double ProcessMetrics::GetPlatformIndependentCPUUsage() {
+  struct kinfo_proc info;
+  int mib[] = {CTL_KERN, KERN_PROC, KERN_PROC_PID, process_};
+  size_t length = sizeof(info);
+
+  if (sysctl(mib, base::size(mib), &info, &length, NULL, 0) < 0)
+    return 0;
+
+  return (info.ki_pctcpu / FSCALE) * 100.0;
+}
+
+TimeDelta ProcessMetrics::GetCumulativeCPUUsage() {
+  NOTREACHED();
+  return TimeDelta();
+}
+
+bool ProcessMetrics::GetIOCounters(IoCounters* io_counters) const {
+  return false;
+}
+
+size_t GetSystemCommitCharge() {
+  int mib[2], pagesize;
+  unsigned long mem_total, mem_free, mem_inactive;
+  size_t length = sizeof(mem_total);
+
+  if (sysctl(mib, base::size(mib), &mem_total, &length, NULL, 0) < 0)
+    return 0;
+
+  length = sizeof(mem_free);
+  if (sysctlbyname("vm.stats.vm.v_free_count", &mem_free, &length, NULL, 0) < 0)
+    return 0;
+
+  length = sizeof(mem_inactive);
+  if (sysctlbyname("vm.stats.vm.v_inactive_count", &mem_inactive, &length,
+      NULL, 0) < 0) {
+    return 0;
+  }
+
+  pagesize = getpagesize();
+
+  return mem_total - (mem_free*pagesize) - (mem_inactive*pagesize);
+}
+
+}  // namespace base
diff --git a/base/process/process_metrics_fuchsia.cc b/base/process/process_metrics_fuchsia.cc
new file mode 100644
index 0000000..a34dff7
--- /dev/null
+++ b/base/process/process_metrics_fuchsia.cc
@@ -0,0 +1,38 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/process/process_metrics.h"
+
+#include <fdio/limits.h>
+
+namespace base {
+
+size_t GetMaxFds() {
+  return FDIO_MAX_FD;
+}
+
+size_t GetSystemCommitCharge() {
+  // Not available, doesn't seem likely that it will be (for the whole system).
+  NOTIMPLEMENTED();
+  return 0;
+}
+
+// static
+std::unique_ptr<ProcessMetrics> ProcessMetrics::CreateProcessMetrics(
+    ProcessHandle process) {
+  NOTIMPLEMENTED();  // TODO(fuchsia): https://crbug.com/706592.
+  return nullptr;
+}
+
+TimeDelta ProcessMetrics::GetCumulativeCPUUsage() {
+  NOTIMPLEMENTED();  // TODO(fuchsia): https://crbug.com/706592.
+  return TimeDelta();
+}
+
+bool GetSystemMemoryInfo(SystemMemoryInfoKB* meminfo) {
+  NOTIMPLEMENTED();  // TODO(fuchsia): https://crbug.com/706592.
+  return false;
+}
+
+}  // namespace base
diff --git a/base/process/process_metrics_iocounters.h b/base/process/process_metrics_iocounters.h
new file mode 100644
index 0000000..e12d090
--- /dev/null
+++ b/base/process/process_metrics_iocounters.h
@@ -0,0 +1,37 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This is a separate file so that users of process metrics don't need to
+// include windows.h unless they need IoCounters.
+
+#ifndef BASE_PROCESS_PROCESS_METRICS_IOCOUNTERS_H_
+#define BASE_PROCESS_PROCESS_METRICS_IOCOUNTERS_H_
+
+#include <stdint.h>
+
+#include "base/process/process_metrics.h"
+#include "build/build_config.h"
+
+#if defined(OS_WIN)
+#include <windows.h>
+#endif
+
+namespace base {
+
+#if defined(OS_WIN)
+struct IoCounters : public IO_COUNTERS {};
+#elif defined(OS_POSIX)
+struct IoCounters {
+  uint64_t ReadOperationCount;
+  uint64_t WriteOperationCount;
+  uint64_t OtherOperationCount;
+  uint64_t ReadTransferCount;
+  uint64_t WriteTransferCount;
+  uint64_t OtherTransferCount;
+};
+#endif
+
+}  // namespace base
+
+#endif  // BASE_PROCESS_PROCESS_METRICS_IOCOUNTERS_H_
diff --git a/base/process/process_metrics_ios.cc b/base/process/process_metrics_ios.cc
new file mode 100644
index 0000000..83fc3d6
--- /dev/null
+++ b/base/process/process_metrics_ios.cc
@@ -0,0 +1,100 @@
+// Copyright (c) 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/process/process_metrics.h"
+
+#include <limits.h>
+#include <mach/task.h>
+#include <stddef.h>
+
+#include "base/logging.h"
+#include "base/mac/scoped_mach_port.h"
+#include "base/memory/ptr_util.h"
+#include "base/numerics/safe_conversions.h"
+
+namespace base {
+
+ProcessMetrics::ProcessMetrics(ProcessHandle process) {}
+
+ProcessMetrics::~ProcessMetrics() {}
+
+// static
+std::unique_ptr<ProcessMetrics> ProcessMetrics::CreateProcessMetrics(
+    ProcessHandle process) {
+  return WrapUnique(new ProcessMetrics(process));
+}
+
+TimeDelta ProcessMetrics::GetCumulativeCPUUsage() {
+  NOTIMPLEMENTED();
+  return TimeDelta();
+}
+
+size_t GetMaxFds() {
+  static const rlim_t kSystemDefaultMaxFds = 256;
+  rlim_t max_fds;
+  struct rlimit nofile;
+  if (getrlimit(RLIMIT_NOFILE, &nofile)) {
+    // Error case: Take a best guess.
+    max_fds = kSystemDefaultMaxFds;
+  } else {
+    max_fds = nofile.rlim_cur;
+  }
+
+  if (max_fds > INT_MAX)
+    max_fds = INT_MAX;
+
+  return static_cast<size_t>(max_fds);
+}
+
+void IncreaseFdLimitTo(unsigned int max_descriptors) {
+  // Unimplemented.
+}
+
+size_t GetPageSize() {
+  return getpagesize();
+}
+
+// Bytes committed by the system.
+size_t GetSystemCommitCharge() {
+  NOTIMPLEMENTED();
+  return 0;
+}
+
+bool GetSystemMemoryInfo(SystemMemoryInfoKB* meminfo) {
+  struct host_basic_info hostinfo;
+  mach_msg_type_number_t count = HOST_BASIC_INFO_COUNT;
+  base::mac::ScopedMachSendRight host(mach_host_self());
+  int result = host_info(host.get(), HOST_BASIC_INFO,
+                         reinterpret_cast<host_info_t>(&hostinfo), &count);
+  if (result != KERN_SUCCESS)
+    return false;
+
+  DCHECK_EQ(HOST_BASIC_INFO_COUNT, count);
+  meminfo->total = static_cast<int>(hostinfo.max_mem / 1024);
+
+  vm_statistics64_data_t vm_info;
+  count = HOST_VM_INFO64_COUNT;
+
+  if (host_statistics64(host.get(), HOST_VM_INFO64,
+                        reinterpret_cast<host_info64_t>(&vm_info),
+                        &count) != KERN_SUCCESS) {
+    return false;
+  }
+  DCHECK_EQ(HOST_VM_INFO64_COUNT, count);
+
+  // Check that PAGE_SIZE is divisible by 1024 (2^10).
+  CHECK_EQ(PAGE_SIZE, (PAGE_SIZE >> 10) << 10);
+  meminfo->free = saturated_cast<int>(
+      PAGE_SIZE / 1024 * (vm_info.free_count - vm_info.speculative_count));
+  meminfo->speculative =
+      saturated_cast<int>(PAGE_SIZE / 1024 * vm_info.speculative_count);
+  meminfo->file_backed =
+      saturated_cast<int>(PAGE_SIZE / 1024 * vm_info.external_page_count);
+  meminfo->purgeable =
+      saturated_cast<int>(PAGE_SIZE / 1024 * vm_info.purgeable_count);
+
+  return true;
+}
+
+}  // namespace base
diff --git a/base/process/process_metrics_linux.cc b/base/process/process_metrics_linux.cc
new file mode 100644
index 0000000..16cde35
--- /dev/null
+++ b/base/process/process_metrics_linux.cc
@@ -0,0 +1,975 @@
+// Copyright (c) 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/process/process_metrics.h"
+
+#include <dirent.h>
+#include <fcntl.h>
+#include <stddef.h>
+#include <stdint.h>
+#include <sys/stat.h>
+#include <sys/time.h>
+#include <sys/types.h>
+#include <unistd.h>
+#include <utility>
+
+#include "base/files/dir_reader_posix.h"
+#include "base/files/file_util.h"
+#include "base/logging.h"
+#include "base/memory/ptr_util.h"
+#include "base/optional.h"
+#include "base/process/internal_linux.h"
+#include "base/process/process_metrics_iocounters.h"
+#include "base/strings/string_number_conversions.h"
+#include "base/strings/string_split.h"
+#include "base/strings/string_tokenizer.h"
+#include "base/strings/string_util.h"
+#include "base/threading/thread_restrictions.h"
+#include "build/build_config.h"
+
+namespace base {
+
+namespace {
+
+void TrimKeyValuePairs(StringPairs* pairs) {
+  for (auto& pair : *pairs) {
+    TrimWhitespaceASCII(pair.first, TRIM_ALL, &pair.first);
+    TrimWhitespaceASCII(pair.second, TRIM_ALL, &pair.second);
+  }
+}
+
+#if defined(OS_CHROMEOS)
+// Read a file with a single number string and return the number as a uint64_t.
+uint64_t ReadFileToUint64(const FilePath& file) {
+  std::string file_contents;
+  if (!ReadFileToString(file, &file_contents))
+    return 0;
+  TrimWhitespaceASCII(file_contents, TRIM_ALL, &file_contents);
+  uint64_t file_contents_uint64 = 0;
+  if (!StringToUint64(file_contents, &file_contents_uint64))
+    return 0;
+  return file_contents_uint64;
+}
+#endif
+
+// Read |filename| in /proc/<pid>/, split the entries into key/value pairs, and
+// trim the key and value. On success, return true and write the trimmed
+// key/value pairs into |key_value_pairs|.
+bool ReadProcFileToTrimmedStringPairs(pid_t pid,
+                                      StringPiece filename,
+                                      StringPairs* key_value_pairs) {
+  std::string status_data;
+  {
+    // Synchronously reading files in /proc does not hit the disk.
+    ThreadRestrictions::ScopedAllowIO allow_io;
+    FilePath status_file = internal::GetProcPidDir(pid).Append(filename);
+    if (!ReadFileToString(status_file, &status_data))
+      return false;
+  }
+  SplitStringIntoKeyValuePairs(status_data, ':', '\n', key_value_pairs);
+  TrimKeyValuePairs(key_value_pairs);
+  return true;
+}
+
+// Read /proc/<pid>/status and return the value for |field|, or 0 on failure.
+// Only works for fields in the form of "Field: value kB".
+size_t ReadProcStatusAndGetFieldAsSizeT(pid_t pid, StringPiece field) {
+  StringPairs pairs;
+  if (!ReadProcFileToTrimmedStringPairs(pid, "status", &pairs))
+    return 0;
+
+  for (const auto& pair : pairs) {
+    const std::string& key = pair.first;
+    const std::string& value_str = pair.second;
+    if (key != field)
+      continue;
+
+    std::vector<StringPiece> split_value_str =
+        SplitStringPiece(value_str, " ", TRIM_WHITESPACE, SPLIT_WANT_ALL);
+    if (split_value_str.size() != 2 || split_value_str[1] != "kB") {
+      NOTREACHED();
+      return 0;
+    }
+    size_t value;
+    if (!StringToSizeT(split_value_str[0], &value)) {
+      NOTREACHED();
+      return 0;
+    }
+    return value;
+  }
+  // This can be reached if the process dies when proc is read -- in that case,
+  // the kernel can return missing fields.
+  return 0;
+}
+
+#if defined(OS_LINUX) || defined(OS_AIX)
+// Read /proc/<pid>/status and look for |field|. On success, return true and
+// write the value for |field| into |result|.
+// Only works for fields in the form of "field    :     uint_value"
+bool ReadProcStatusAndGetFieldAsUint64(pid_t pid,
+                                       StringPiece field,
+                                       uint64_t* result) {
+  StringPairs pairs;
+  if (!ReadProcFileToTrimmedStringPairs(pid, "status", &pairs))
+    return false;
+
+  for (const auto& pair : pairs) {
+    const std::string& key = pair.first;
+    const std::string& value_str = pair.second;
+    if (key != field)
+      continue;
+
+    uint64_t value;
+    if (!StringToUint64(value_str, &value))
+      return false;
+    *result = value;
+    return true;
+  }
+  return false;
+}
+#endif  // defined(OS_LINUX) || defined(OS_AIX)
+
+// Get the total CPU of a single process.  Return value is number of jiffies
+// on success or -1 on error.
+int64_t GetProcessCPU(pid_t pid) {
+  std::string buffer;
+  std::vector<std::string> proc_stats;
+  if (!internal::ReadProcStats(pid, &buffer) ||
+      !internal::ParseProcStats(buffer, &proc_stats)) {
+    return -1;
+  }
+
+  int64_t total_cpu =
+      internal::GetProcStatsFieldAsInt64(proc_stats, internal::VM_UTIME) +
+      internal::GetProcStatsFieldAsInt64(proc_stats, internal::VM_STIME);
+
+  return total_cpu;
+}
+
+#if defined(OS_CHROMEOS)
+// Report on Chrome OS GEM object graphics memory. /run/debugfs_gpu is a
+// bind mount into /sys/kernel/debug and synchronously reading the in-memory
+// files in /sys is fast.
+void ReadChromeOSGraphicsMemory(SystemMemoryInfoKB* meminfo) {
+#if defined(ARCH_CPU_ARM_FAMILY)
+  FilePath geminfo_file("/run/debugfs_gpu/exynos_gem_objects");
+#else
+  FilePath geminfo_file("/run/debugfs_gpu/i915_gem_objects");
+#endif
+  std::string geminfo_data;
+  meminfo->gem_objects = -1;
+  meminfo->gem_size = -1;
+  if (ReadFileToString(geminfo_file, &geminfo_data)) {
+    int gem_objects = -1;
+    long long gem_size = -1;
+    int num_res = sscanf(geminfo_data.c_str(), "%d objects, %lld bytes",
+                         &gem_objects, &gem_size);
+    if (num_res == 2) {
+      meminfo->gem_objects = gem_objects;
+      meminfo->gem_size = gem_size;
+    }
+  }
+
+#if defined(ARCH_CPU_ARM_FAMILY)
+  // Incorporate Mali graphics memory if present.
+  FilePath mali_memory_file("/sys/class/misc/mali0/device/memory");
+  std::string mali_memory_data;
+  if (ReadFileToString(mali_memory_file, &mali_memory_data)) {
+    long long mali_size = -1;
+    int num_res = sscanf(mali_memory_data.c_str(), "%lld bytes", &mali_size);
+    if (num_res == 1)
+      meminfo->gem_size += mali_size;
+  }
+#endif  // defined(ARCH_CPU_ARM_FAMILY)
+}
+#endif  // defined(OS_CHROMEOS)
+
+}  // namespace
+
+// static
+std::unique_ptr<ProcessMetrics> ProcessMetrics::CreateProcessMetrics(
+    ProcessHandle process) {
+  return WrapUnique(new ProcessMetrics(process));
+}
+
+size_t ProcessMetrics::GetResidentSetSize() const {
+  return internal::ReadProcStatsAndGetFieldAsSizeT(process_, internal::VM_RSS) *
+      getpagesize();
+}
+
+TimeDelta ProcessMetrics::GetCumulativeCPUUsage() {
+  return internal::ClockTicksToTimeDelta(GetProcessCPU(process_));
+}
+
+// For the /proc/self/io file to exist, the Linux kernel must have
+// CONFIG_TASK_IO_ACCOUNTING enabled.
+bool ProcessMetrics::GetIOCounters(IoCounters* io_counters) const {
+  StringPairs pairs;
+  if (!ReadProcFileToTrimmedStringPairs(process_, "io", &pairs))
+    return false;
+
+  io_counters->OtherOperationCount = 0;
+  io_counters->OtherTransferCount = 0;
+
+  for (const auto& pair : pairs) {
+    const std::string& key = pair.first;
+    const std::string& value_str = pair.second;
+    uint64_t* target_counter = nullptr;
+    if (key == "syscr")
+      target_counter = &io_counters->ReadOperationCount;
+    else if (key == "syscw")
+      target_counter = &io_counters->WriteOperationCount;
+    else if (key == "rchar")
+      target_counter = &io_counters->ReadTransferCount;
+    else if (key == "wchar")
+      target_counter = &io_counters->WriteTransferCount;
+    if (!target_counter)
+      continue;
+    bool converted = StringToUint64(value_str, target_counter);
+    DCHECK(converted);
+  }
+  return true;
+}
+
+#if defined(OS_LINUX) || defined(OS_ANDROID)
+uint64_t ProcessMetrics::GetVmSwapBytes() const {
+  return ReadProcStatusAndGetFieldAsSizeT(process_, "VmSwap") * 1024;
+}
+#endif  // defined(OS_LINUX) || defined(OS_ANDROID)
+
+#if defined(OS_LINUX) || defined(OS_ANDROID)
+bool ProcessMetrics::GetPageFaultCounts(PageFaultCounts* counts) const {
+  // We are not using internal::ReadStatsFileAndGetFieldAsInt64(), since it
+  // would read the file twice, and return inconsistent numbers.
+  std::string stats_data;
+  if (!internal::ReadProcStats(process_, &stats_data))
+    return false;
+  std::vector<std::string> proc_stats;
+  if (!internal::ParseProcStats(stats_data, &proc_stats))
+    return false;
+
+  counts->minor =
+      internal::GetProcStatsFieldAsInt64(proc_stats, internal::VM_MINFLT);
+  counts->major =
+      internal::GetProcStatsFieldAsInt64(proc_stats, internal::VM_MAJFLT);
+  return true;
+}
+#endif  // defined(OS_LINUX) || defined(OS_ANDROID)
+
+int ProcessMetrics::GetOpenFdCount() const {
+  // Use /proc/<pid>/fd to count the number of entries there.
+  FilePath fd_path = internal::GetProcPidDir(process_).Append("fd");
+
+  DirReaderPosix dir_reader(fd_path.value().c_str());
+  if (!dir_reader.IsValid())
+    return -1;
+
+  int total_count = 0;
+  for (; dir_reader.Next(); ) {
+    const char* name = dir_reader.name();
+    if (strcmp(name, ".") != 0 && strcmp(name, "..") != 0)
+      ++total_count;
+  }
+
+  return total_count;
+}
+
+int ProcessMetrics::GetOpenFdSoftLimit() const {
+  // Use /proc/<pid>/limits to read the open fd limit.
+  FilePath fd_path = internal::GetProcPidDir(process_).Append("limits");
+
+  std::string limits_contents;
+  if (!ReadFileToString(fd_path, &limits_contents))
+    return -1;
+
+  for (const auto& line : SplitStringPiece(
+           limits_contents, "\n", KEEP_WHITESPACE, SPLIT_WANT_NONEMPTY)) {
+    if (!line.starts_with("Max open files"))
+      continue;
+
+    auto tokens =
+        SplitStringPiece(line, " ", TRIM_WHITESPACE, SPLIT_WANT_NONEMPTY);
+    if (tokens.size() > 3) {
+      int limit = -1;
+      if (!StringToInt(tokens[3], &limit))
+        return -1;
+      return limit;
+    }
+  }
+  return -1;
+}
+
+#if defined(OS_LINUX) || defined(OS_AIX)
+ProcessMetrics::ProcessMetrics(ProcessHandle process)
+    : process_(process), last_absolute_idle_wakeups_(0) {}
+#else
+ProcessMetrics::ProcessMetrics(ProcessHandle process) : process_(process) {}
+#endif
+
+#if defined(OS_CHROMEOS)
+// Private, Shared and Proportional working set sizes are obtained from
+// /proc/<pid>/totmaps
+ProcessMetrics::TotalsSummary ProcessMetrics::GetTotalsSummary() const {
+  // The format of /proc/<pid>/totmaps is:
+  //
+  // Rss:                6120 kB
+  // Pss:                3335 kB
+  // Shared_Clean:       1008 kB
+  // Shared_Dirty:       4012 kB
+  // Private_Clean:         4 kB
+  // Private_Dirty:      1096 kB
+  // Referenced:          XXX kB
+  // Anonymous:           XXX kB
+  // AnonHugePages:       XXX kB
+  // Swap:                XXX kB
+  // Locked:              XXX kB
+  ProcessMetrics::TotalsSummary summary = {};
+
+  const size_t kPrivate_CleanIndex = (4 * 3) + 1;
+  const size_t kPrivate_DirtyIndex = (5 * 3) + 1;
+  const size_t kSwapIndex = (9 * 3) + 1;
+
+  std::string totmaps_data;
+  {
+    FilePath totmaps_file = internal::GetProcPidDir(process_).Append("totmaps");
+    ThreadRestrictions::ScopedAllowIO allow_io;
+    bool ret = ReadFileToString(totmaps_file, &totmaps_data);
+    if (!ret || totmaps_data.length() == 0)
+      return summary;
+  }
+
+  std::vector<std::string> totmaps_fields = SplitString(
+      totmaps_data, kWhitespaceASCII, KEEP_WHITESPACE, SPLIT_WANT_NONEMPTY);
+
+  DCHECK_EQ("Private_Clean:", totmaps_fields[kPrivate_CleanIndex - 1]);
+  DCHECK_EQ("Private_Dirty:", totmaps_fields[kPrivate_DirtyIndex - 1]);
+  DCHECK_EQ("Swap:", totmaps_fields[kSwapIndex-1]);
+
+  int private_clean_kb = 0;
+  int private_dirty_kb = 0;
+  int swap_kb = 0;
+  bool success = true;
+  success &=
+      StringToInt(totmaps_fields[kPrivate_CleanIndex], &private_clean_kb);
+  success &=
+      StringToInt(totmaps_fields[kPrivate_DirtyIndex], &private_dirty_kb);
+  success &= StringToInt(totmaps_fields[kSwapIndex], &swap_kb);
+
+  if (!success)
+    return summary;
+
+  summary.private_clean_kb = private_clean_kb;
+  summary.private_dirty_kb = private_dirty_kb;
+  summary.swap_kb = swap_kb;
+
+  return summary;
+}
+#endif
+
+size_t GetSystemCommitCharge() {
+  SystemMemoryInfoKB meminfo;
+  if (!GetSystemMemoryInfo(&meminfo))
+    return 0;
+  return meminfo.total - meminfo.free - meminfo.buffers - meminfo.cached;
+}
+
+int ParseProcStatCPU(StringPiece input) {
+  // |input| may be empty if the process disappeared somehow.
+  // e.g. http://crbug.com/145811.
+  if (input.empty())
+    return -1;
+
+  size_t start = input.find_last_of(')');
+  if (start == input.npos)
+    return -1;
+
+  // Number of spaces remaining until reaching utime's index starting after the
+  // last ')'.
+  int num_spaces_remaining = internal::VM_UTIME - 1;
+
+  size_t i = start;
+  while ((i = input.find(' ', i + 1)) != input.npos) {
+    // Validate the assumption that there aren't any contiguous spaces
+    // in |input| before utime.
+    DCHECK_NE(input[i - 1], ' ');
+    if (--num_spaces_remaining == 0) {
+      int utime = 0;
+      int stime = 0;
+      if (sscanf(&input.data()[i], "%d %d", &utime, &stime) != 2)
+        return -1;
+
+      return utime + stime;
+    }
+  }
+
+  return -1;
+}
+
+int GetNumberOfThreads(ProcessHandle process) {
+  return internal::ReadProcStatsAndGetFieldAsInt64(process,
+                                                   internal::VM_NUMTHREADS);
+}
+
+const char kProcSelfExe[] = "/proc/self/exe";
+
+namespace {
+
+// The format of /proc/diskstats is:
+//  Device major number
+//  Device minor number
+//  Device name
+//  Field  1 -- # of reads completed
+//      This is the total number of reads completed successfully.
+//  Field  2 -- # of reads merged, field 6 -- # of writes merged
+//      Reads and writes which are adjacent to each other may be merged for
+//      efficiency.  Thus two 4K reads may become one 8K read before it is
+//      ultimately handed to the disk, and so it will be counted (and queued)
+//      as only one I/O.  This field lets you know how often this was done.
+//  Field  3 -- # of sectors read
+//      This is the total number of sectors read successfully.
+//  Field  4 -- # of milliseconds spent reading
+//      This is the total number of milliseconds spent by all reads (as
+//      measured from __make_request() to end_that_request_last()).
+//  Field  5 -- # of writes completed
+//      This is the total number of writes completed successfully.
+//  Field  6 -- # of writes merged
+//      See the description of field 2.
+//  Field  7 -- # of sectors written
+//      This is the total number of sectors written successfully.
+//  Field  8 -- # of milliseconds spent writing
+//      This is the total number of milliseconds spent by all writes (as
+//      measured from __make_request() to end_that_request_last()).
+//  Field  9 -- # of I/Os currently in progress
+//      The only field that should go to zero. Incremented as requests are
+//      given to appropriate struct request_queue and decremented as they
+//      finish.
+//  Field 10 -- # of milliseconds spent doing I/Os
+//      This field increases so long as field 9 is nonzero.
+//  Field 11 -- weighted # of milliseconds spent doing I/Os
+//      This field is incremented at each I/O start, I/O completion, I/O
+//      merge, or read of these stats by the number of I/Os in progress
+//      (field 9) times the number of milliseconds spent doing I/O since the
+//      last update of this field.  This can provide an easy measure of both
+//      I/O completion time and the backlog that may be accumulating.
+
+const size_t kDiskDriveName = 2;
+const size_t kDiskReads = 3;
+const size_t kDiskReadsMerged = 4;
+const size_t kDiskSectorsRead = 5;
+const size_t kDiskReadTime = 6;
+const size_t kDiskWrites = 7;
+const size_t kDiskWritesMerged = 8;
+const size_t kDiskSectorsWritten = 9;
+const size_t kDiskWriteTime = 10;
+const size_t kDiskIO = 11;
+const size_t kDiskIOTime = 12;
+const size_t kDiskWeightedIOTime = 13;
+
+}  // namespace
+
+std::unique_ptr<DictionaryValue> SystemMemoryInfoKB::ToValue() const {
+  auto res = std::make_unique<DictionaryValue>();
+  res->SetInteger("total", total);
+  res->SetInteger("free", free);
+  res->SetInteger("available", available);
+  res->SetInteger("buffers", buffers);
+  res->SetInteger("cached", cached);
+  res->SetInteger("active_anon", active_anon);
+  res->SetInteger("inactive_anon", inactive_anon);
+  res->SetInteger("active_file", active_file);
+  res->SetInteger("inactive_file", inactive_file);
+  res->SetInteger("swap_total", swap_total);
+  res->SetInteger("swap_free", swap_free);
+  res->SetInteger("swap_used", swap_total - swap_free);
+  res->SetInteger("dirty", dirty);
+  res->SetInteger("reclaimable", reclaimable);
+#ifdef OS_CHROMEOS
+  res->SetInteger("shmem", shmem);
+  res->SetInteger("slab", slab);
+  res->SetInteger("gem_objects", gem_objects);
+  res->SetInteger("gem_size", gem_size);
+#endif
+
+  return res;
+}
+
+bool ParseProcMeminfo(StringPiece meminfo_data, SystemMemoryInfoKB* meminfo) {
+  // The format of /proc/meminfo is:
+  //
+  // MemTotal:      8235324 kB
+  // MemFree:       1628304 kB
+  // Buffers:        429596 kB
+  // Cached:        4728232 kB
+  // ...
+  // There is no guarantee on the ordering or position
+  // though it doesn't appear to change very often
+
+  // As a basic sanity check at the end, make sure the MemTotal value will be at
+  // least non-zero. So start off with a zero total.
+  meminfo->total = 0;
+
+  for (const StringPiece& line : SplitStringPiece(
+           meminfo_data, "\n", KEEP_WHITESPACE, SPLIT_WANT_NONEMPTY)) {
+    std::vector<StringPiece> tokens = SplitStringPiece(
+        line, kWhitespaceASCII, TRIM_WHITESPACE, SPLIT_WANT_NONEMPTY);
+    // HugePages_* only has a number and no suffix so there may not be exactly 3
+    // tokens.
+    if (tokens.size() <= 1) {
+      DLOG(WARNING) << "meminfo: tokens: " << tokens.size()
+                    << " malformed line: " << line.as_string();
+      continue;
+    }
+
+    int* target = nullptr;
+    if (tokens[0] == "MemTotal:")
+      target = &meminfo->total;
+    else if (tokens[0] == "MemFree:")
+      target = &meminfo->free;
+    else if (tokens[0] == "MemAvailable:")
+      target = &meminfo->available;
+    else if (tokens[0] == "Buffers:")
+      target = &meminfo->buffers;
+    else if (tokens[0] == "Cached:")
+      target = &meminfo->cached;
+    else if (tokens[0] == "Active(anon):")
+      target = &meminfo->active_anon;
+    else if (tokens[0] == "Inactive(anon):")
+      target = &meminfo->inactive_anon;
+    else if (tokens[0] == "Active(file):")
+      target = &meminfo->active_file;
+    else if (tokens[0] == "Inactive(file):")
+      target = &meminfo->inactive_file;
+    else if (tokens[0] == "SwapTotal:")
+      target = &meminfo->swap_total;
+    else if (tokens[0] == "SwapFree:")
+      target = &meminfo->swap_free;
+    else if (tokens[0] == "Dirty:")
+      target = &meminfo->dirty;
+    else if (tokens[0] == "SReclaimable:")
+      target = &meminfo->reclaimable;
+#if defined(OS_CHROMEOS)
+    // Chrome OS has a tweaked kernel that allows querying Shmem, which is
+    // usually video memory otherwise invisible to the OS.
+    else if (tokens[0] == "Shmem:")
+      target = &meminfo->shmem;
+    else if (tokens[0] == "Slab:")
+      target = &meminfo->slab;
+#endif
+    if (target)
+      StringToInt(tokens[1], target);
+  }
+
+  // Make sure the MemTotal is valid.
+  return meminfo->total > 0;
+}
+
+bool ParseProcVmstat(StringPiece vmstat_data, VmStatInfo* vmstat) {
+  // The format of /proc/vmstat is:
+  //
+  // nr_free_pages 299878
+  // nr_inactive_anon 239863
+  // nr_active_anon 1318966
+  // nr_inactive_file 2015629
+  // ...
+  //
+  // Iterate through the whole file because the position of the
+  // fields are dependent on the kernel version and configuration.
+  bool has_pswpin = false;
+  bool has_pswpout = false;
+  bool has_pgmajfault = false;
+  for (const StringPiece& line : SplitStringPiece(
+           vmstat_data, "\n", KEEP_WHITESPACE, SPLIT_WANT_NONEMPTY)) {
+    std::vector<StringPiece> tokens = SplitStringPiece(
+        line, " ", KEEP_WHITESPACE, SPLIT_WANT_NONEMPTY);
+    if (tokens.size() != 2)
+      continue;
+
+    uint64_t val;
+    if (!StringToUint64(tokens[1], &val))
+      continue;
+
+    if (tokens[0] == "pswpin") {
+      vmstat->pswpin = val;
+      DCHECK(!has_pswpin);
+      has_pswpin = true;
+    } else if (tokens[0] == "pswpout") {
+      vmstat->pswpout = val;
+      DCHECK(!has_pswpout);
+      has_pswpout = true;
+    } else if (tokens[0] == "pgmajfault") {
+      vmstat->pgmajfault = val;
+      DCHECK(!has_pgmajfault);
+      has_pgmajfault = true;
+    }
+    if (has_pswpin && has_pswpout && has_pgmajfault)
+      return true;
+  }
+
+  return false;
+}
+
+bool GetSystemMemoryInfo(SystemMemoryInfoKB* meminfo) {
+  // Synchronously reading files in /proc and /sys are safe.
+  ThreadRestrictions::ScopedAllowIO allow_io;
+
+  // Used memory is: total - free - buffers - caches
+  FilePath meminfo_file("/proc/meminfo");
+  std::string meminfo_data;
+  if (!ReadFileToString(meminfo_file, &meminfo_data)) {
+    DLOG(WARNING) << "Failed to open " << meminfo_file.value();
+    return false;
+  }
+
+  if (!ParseProcMeminfo(meminfo_data, meminfo)) {
+    DLOG(WARNING) << "Failed to parse " << meminfo_file.value();
+    return false;
+  }
+
+#if defined(OS_CHROMEOS)
+  ReadChromeOSGraphicsMemory(meminfo);
+#endif
+
+  return true;
+}
+
+std::unique_ptr<DictionaryValue> VmStatInfo::ToValue() const {
+  auto res = std::make_unique<DictionaryValue>();
+  res->SetInteger("pswpin", pswpin);
+  res->SetInteger("pswpout", pswpout);
+  res->SetInteger("pgmajfault", pgmajfault);
+  return res;
+}
+
+bool GetVmStatInfo(VmStatInfo* vmstat) {
+  // Synchronously reading files in /proc and /sys are safe.
+  ThreadRestrictions::ScopedAllowIO allow_io;
+
+  FilePath vmstat_file("/proc/vmstat");
+  std::string vmstat_data;
+  if (!ReadFileToString(vmstat_file, &vmstat_data)) {
+    DLOG(WARNING) << "Failed to open " << vmstat_file.value();
+    return false;
+  }
+  if (!ParseProcVmstat(vmstat_data, vmstat)) {
+    DLOG(WARNING) << "Failed to parse " << vmstat_file.value();
+    return false;
+  }
+  return true;
+}
+
+SystemDiskInfo::SystemDiskInfo() {
+  reads = 0;
+  reads_merged = 0;
+  sectors_read = 0;
+  read_time = 0;
+  writes = 0;
+  writes_merged = 0;
+  sectors_written = 0;
+  write_time = 0;
+  io = 0;
+  io_time = 0;
+  weighted_io_time = 0;
+}
+
+SystemDiskInfo::SystemDiskInfo(const SystemDiskInfo& other) = default;
+
+std::unique_ptr<Value> SystemDiskInfo::ToValue() const {
+  auto res = std::make_unique<DictionaryValue>();
+
+  // Write out uint64_t variables as doubles.
+  // Note: this may discard some precision, but for JS there's no other option.
+  res->SetDouble("reads", static_cast<double>(reads));
+  res->SetDouble("reads_merged", static_cast<double>(reads_merged));
+  res->SetDouble("sectors_read", static_cast<double>(sectors_read));
+  res->SetDouble("read_time", static_cast<double>(read_time));
+  res->SetDouble("writes", static_cast<double>(writes));
+  res->SetDouble("writes_merged", static_cast<double>(writes_merged));
+  res->SetDouble("sectors_written", static_cast<double>(sectors_written));
+  res->SetDouble("write_time", static_cast<double>(write_time));
+  res->SetDouble("io", static_cast<double>(io));
+  res->SetDouble("io_time", static_cast<double>(io_time));
+  res->SetDouble("weighted_io_time", static_cast<double>(weighted_io_time));
+
+  return std::move(res);
+}
+
+bool IsValidDiskName(StringPiece candidate) {
+  if (candidate.length() < 3)
+    return false;
+
+  if (candidate[1] == 'd' &&
+      (candidate[0] == 'h' || candidate[0] == 's' || candidate[0] == 'v')) {
+    // [hsv]d[a-z]+ case
+    for (size_t i = 2; i < candidate.length(); ++i) {
+      if (!islower(candidate[i]))
+        return false;
+    }
+    return true;
+  }
+
+  const char kMMCName[] = "mmcblk";
+  if (!candidate.starts_with(kMMCName))
+    return false;
+
+  // mmcblk[0-9]+ case
+  for (size_t i = strlen(kMMCName); i < candidate.length(); ++i) {
+    if (!isdigit(candidate[i]))
+      return false;
+  }
+  return true;
+}
+
+bool GetSystemDiskInfo(SystemDiskInfo* diskinfo) {
+  // Synchronously reading files in /proc does not hit the disk.
+  ThreadRestrictions::ScopedAllowIO allow_io;
+
+  FilePath diskinfo_file("/proc/diskstats");
+  std::string diskinfo_data;
+  if (!ReadFileToString(diskinfo_file, &diskinfo_data)) {
+    DLOG(WARNING) << "Failed to open " << diskinfo_file.value();
+    return false;
+  }
+
+  std::vector<StringPiece> diskinfo_lines = SplitStringPiece(
+      diskinfo_data, "\n", KEEP_WHITESPACE, SPLIT_WANT_NONEMPTY);
+  if (diskinfo_lines.empty()) {
+    DLOG(WARNING) << "No lines found";
+    return false;
+  }
+
+  diskinfo->reads = 0;
+  diskinfo->reads_merged = 0;
+  diskinfo->sectors_read = 0;
+  diskinfo->read_time = 0;
+  diskinfo->writes = 0;
+  diskinfo->writes_merged = 0;
+  diskinfo->sectors_written = 0;
+  diskinfo->write_time = 0;
+  diskinfo->io = 0;
+  diskinfo->io_time = 0;
+  diskinfo->weighted_io_time = 0;
+
+  uint64_t reads = 0;
+  uint64_t reads_merged = 0;
+  uint64_t sectors_read = 0;
+  uint64_t read_time = 0;
+  uint64_t writes = 0;
+  uint64_t writes_merged = 0;
+  uint64_t sectors_written = 0;
+  uint64_t write_time = 0;
+  uint64_t io = 0;
+  uint64_t io_time = 0;
+  uint64_t weighted_io_time = 0;
+
+  for (const StringPiece& line : diskinfo_lines) {
+    std::vector<StringPiece> disk_fields = SplitStringPiece(
+        line, kWhitespaceASCII, TRIM_WHITESPACE, SPLIT_WANT_NONEMPTY);
+
+    // Fields may have overflowed and reset to zero.
+    if (!IsValidDiskName(disk_fields[kDiskDriveName].as_string()))
+      continue;
+
+    StringToUint64(disk_fields[kDiskReads], &reads);
+    StringToUint64(disk_fields[kDiskReadsMerged], &reads_merged);
+    StringToUint64(disk_fields[kDiskSectorsRead], &sectors_read);
+    StringToUint64(disk_fields[kDiskReadTime], &read_time);
+    StringToUint64(disk_fields[kDiskWrites], &writes);
+    StringToUint64(disk_fields[kDiskWritesMerged], &writes_merged);
+    StringToUint64(disk_fields[kDiskSectorsWritten], &sectors_written);
+    StringToUint64(disk_fields[kDiskWriteTime], &write_time);
+    StringToUint64(disk_fields[kDiskIO], &io);
+    StringToUint64(disk_fields[kDiskIOTime], &io_time);
+    StringToUint64(disk_fields[kDiskWeightedIOTime], &weighted_io_time);
+
+    diskinfo->reads += reads;
+    diskinfo->reads_merged += reads_merged;
+    diskinfo->sectors_read += sectors_read;
+    diskinfo->read_time += read_time;
+    diskinfo->writes += writes;
+    diskinfo->writes_merged += writes_merged;
+    diskinfo->sectors_written += sectors_written;
+    diskinfo->write_time += write_time;
+    diskinfo->io += io;
+    diskinfo->io_time += io_time;
+    diskinfo->weighted_io_time += weighted_io_time;
+  }
+
+  return true;
+}
+
+TimeDelta GetUserCpuTimeSinceBoot() {
+  return internal::GetUserCpuTimeSinceBoot();
+}
+
+#if defined(OS_CHROMEOS)
+std::unique_ptr<Value> SwapInfo::ToValue() const {
+  auto res = std::make_unique<DictionaryValue>();
+
+  // Write out uint64_t variables as doubles.
+  // Note: this may discard some precision, but for JS there's no other option.
+  res->SetDouble("num_reads", static_cast<double>(num_reads));
+  res->SetDouble("num_writes", static_cast<double>(num_writes));
+  res->SetDouble("orig_data_size", static_cast<double>(orig_data_size));
+  res->SetDouble("compr_data_size", static_cast<double>(compr_data_size));
+  res->SetDouble("mem_used_total", static_cast<double>(mem_used_total));
+  double ratio = compr_data_size ? static_cast<double>(orig_data_size) /
+                                       static_cast<double>(compr_data_size)
+                                 : 0;
+  res->SetDouble("compression_ratio", ratio);
+
+  return std::move(res);
+}
+
+bool ParseZramMmStat(StringPiece mm_stat_data, SwapInfo* swap_info) {
+  // There are 7 columns in /sys/block/zram0/mm_stat,
+  // split by several spaces. The first three columns
+  // are orig_data_size, compr_data_size and mem_used_total.
+  // Example:
+  // 17715200 5008166 566062  0 1225715712  127 183842
+  //
+  // For more details:
+  // https://www.kernel.org/doc/Documentation/blockdev/zram.txt
+
+  std::vector<StringPiece> tokens = SplitStringPiece(
+      mm_stat_data, kWhitespaceASCII, TRIM_WHITESPACE, SPLIT_WANT_NONEMPTY);
+  if (tokens.size() < 7) {
+    DLOG(WARNING) << "zram mm_stat: tokens: " << tokens.size()
+                  << " malformed line: " << mm_stat_data.as_string();
+    return false;
+  }
+
+  if (!StringToUint64(tokens[0], &swap_info->orig_data_size))
+    return false;
+  if (!StringToUint64(tokens[1], &swap_info->compr_data_size))
+    return false;
+  if (!StringToUint64(tokens[2], &swap_info->mem_used_total))
+    return false;
+
+  return true;
+}
+
+bool ParseZramStat(StringPiece stat_data, SwapInfo* swap_info) {
+  // There are 11 columns in /sys/block/zram0/stat,
+  // split by several spaces. The first column is read I/Os
+  // and fifth column is write I/Os.
+  // Example:
+  // 299    0    2392    0    1    0    8    0    0    0    0
+  //
+  // For more details:
+  // https://www.kernel.org/doc/Documentation/blockdev/zram.txt
+
+  std::vector<StringPiece> tokens = SplitStringPiece(
+      stat_data, kWhitespaceASCII, TRIM_WHITESPACE, SPLIT_WANT_NONEMPTY);
+  if (tokens.size() < 11) {
+    DLOG(WARNING) << "zram stat: tokens: " << tokens.size()
+                  << " malformed line: " << stat_data.as_string();
+    return false;
+  }
+
+  if (!StringToUint64(tokens[0], &swap_info->num_reads))
+    return false;
+  if (!StringToUint64(tokens[4], &swap_info->num_writes))
+    return false;
+
+  return true;
+}
+
+namespace {
+
+bool IgnoreZramFirstPage(uint64_t orig_data_size, SwapInfo* swap_info) {
+  if (orig_data_size <= 4096) {
+    // A single page is compressed at startup, and has a high compression
+    // ratio. Ignore this as it doesn't indicate any real swapping.
+    swap_info->orig_data_size = 0;
+    swap_info->num_reads = 0;
+    swap_info->num_writes = 0;
+    swap_info->compr_data_size = 0;
+    swap_info->mem_used_total = 0;
+    return true;
+  }
+  return false;
+}
+
+void ParseZramPath(SwapInfo* swap_info) {
+  FilePath zram_path("/sys/block/zram0");
+  uint64_t orig_data_size =
+      ReadFileToUint64(zram_path.Append("orig_data_size"));
+  if (IgnoreZramFirstPage(orig_data_size, swap_info))
+    return;
+
+  swap_info->orig_data_size = orig_data_size;
+  swap_info->num_reads = ReadFileToUint64(zram_path.Append("num_reads"));
+  swap_info->num_writes = ReadFileToUint64(zram_path.Append("num_writes"));
+  swap_info->compr_data_size =
+      ReadFileToUint64(zram_path.Append("compr_data_size"));
+  swap_info->mem_used_total =
+      ReadFileToUint64(zram_path.Append("mem_used_total"));
+}
+
+bool GetSwapInfoImpl(SwapInfo* swap_info) {
+  // Synchronously reading files in /sys/block/zram0 does not hit the disk.
+  ThreadRestrictions::ScopedAllowIO allow_io;
+
+  // Since ZRAM update, it shows the usage data in different places.
+  // If file "/sys/block/zram0/mm_stat" exists, use the new way, otherwise,
+  // use the old way.
+  static Optional<bool> use_new_zram_interface;
+  FilePath zram_mm_stat_file("/sys/block/zram0/mm_stat");
+  if (!use_new_zram_interface.has_value()) {
+    use_new_zram_interface = PathExists(zram_mm_stat_file);
+  }
+
+  if (!use_new_zram_interface.value()) {
+    ParseZramPath(swap_info);
+    return true;
+  }
+
+  std::string mm_stat_data;
+  if (!ReadFileToString(zram_mm_stat_file, &mm_stat_data)) {
+    DLOG(WARNING) << "Failed to open " << zram_mm_stat_file.value();
+    return false;
+  }
+  if (!ParseZramMmStat(mm_stat_data, swap_info)) {
+    DLOG(WARNING) << "Failed to parse " << zram_mm_stat_file.value();
+    return false;
+  }
+  if (IgnoreZramFirstPage(swap_info->orig_data_size, swap_info))
+    return true;
+
+  FilePath zram_stat_file("/sys/block/zram0/stat");
+  std::string stat_data;
+  if (!ReadFileToString(zram_stat_file, &stat_data)) {
+    DLOG(WARNING) << "Failed to open " << zram_stat_file.value();
+    return false;
+  }
+  if (!ParseZramStat(stat_data, swap_info)) {
+    DLOG(WARNING) << "Failed to parse " << zram_stat_file.value();
+    return false;
+  }
+
+  return true;
+}
+
+}  // namespace
+
+bool GetSwapInfo(SwapInfo* swap_info) {
+  if (!GetSwapInfoImpl(swap_info)) {
+    *swap_info = SwapInfo();
+    return false;
+  }
+  return true;
+}
+#endif  // defined(OS_CHROMEOS)
+
+#if defined(OS_LINUX) || defined(OS_AIX)
+int ProcessMetrics::GetIdleWakeupsPerSecond() {
+  uint64_t num_switches;
+  static const char kSwitchStat[] = "voluntary_ctxt_switches";
+  return ReadProcStatusAndGetFieldAsUint64(process_, kSwitchStat, &num_switches)
+             ? CalculateIdleWakeupsPerSecond(num_switches)
+             : 0;
+}
+#endif  // defined(OS_LINUX) || defined(OS_AIX)
+
+}  // namespace base
diff --git a/base/process/process_metrics_mac.cc b/base/process/process_metrics_mac.cc
new file mode 100644
index 0000000..4ecf8cf
--- /dev/null
+++ b/base/process/process_metrics_mac.cc
@@ -0,0 +1,302 @@
+// Copyright (c) 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/process/process_metrics.h"
+
+#include <mach/mach.h>
+#include <mach/mach_vm.h>
+#include <mach/shared_region.h>
+#include <stddef.h>
+#include <stdint.h>
+#include <sys/sysctl.h>
+
+#include "base/containers/hash_tables.h"
+#include "base/logging.h"
+#include "base/mac/mac_util.h"
+#include "base/mac/mach_logging.h"
+#include "base/mac/scoped_mach_port.h"
+#include "base/memory/ptr_util.h"
+#include "base/numerics/safe_conversions.h"
+#include "base/numerics/safe_math.h"
+#include "base/process/process_metrics_iocounters.h"
+
+namespace base {
+
+namespace {
+
+#if !defined(MAC_OS_X_VERSION_10_11) || \
+    MAC_OS_X_VERSION_MAX_ALLOWED < MAC_OS_X_VERSION_10_11
+// The |phys_footprint| field was introduced in 10.11.
+struct ChromeTaskVMInfo {
+  mach_vm_size_t virtual_size;
+  integer_t region_count;
+  integer_t page_size;
+  mach_vm_size_t resident_size;
+  mach_vm_size_t resident_size_peak;
+  mach_vm_size_t device;
+  mach_vm_size_t device_peak;
+  mach_vm_size_t internal;
+  mach_vm_size_t internal_peak;
+  mach_vm_size_t external;
+  mach_vm_size_t external_peak;
+  mach_vm_size_t reusable;
+  mach_vm_size_t reusable_peak;
+  mach_vm_size_t purgeable_volatile_pmap;
+  mach_vm_size_t purgeable_volatile_resident;
+  mach_vm_size_t purgeable_volatile_virtual;
+  mach_vm_size_t compressed;
+  mach_vm_size_t compressed_peak;
+  mach_vm_size_t compressed_lifetime;
+  mach_vm_size_t phys_footprint;
+};
+#else
+using ChromeTaskVMInfo = task_vm_info;
+#endif  // MAC_OS_X_VERSION_10_11
+mach_msg_type_number_t ChromeTaskVMInfoCount =
+    sizeof(ChromeTaskVMInfo) / sizeof(natural_t);
+
+bool GetTaskInfo(mach_port_t task, task_basic_info_64* task_info_data) {
+  if (task == MACH_PORT_NULL)
+    return false;
+  mach_msg_type_number_t count = TASK_BASIC_INFO_64_COUNT;
+  kern_return_t kr = task_info(task,
+                               TASK_BASIC_INFO_64,
+                               reinterpret_cast<task_info_t>(task_info_data),
+                               &count);
+  // Most likely cause for failure: |task| is a zombie.
+  return kr == KERN_SUCCESS;
+}
+
+MachVMRegionResult ParseOutputFromMachVMRegion(kern_return_t kr) {
+  if (kr == KERN_INVALID_ADDRESS) {
+    // We're at the end of the address space.
+    return MachVMRegionResult::Finished;
+  } else if (kr != KERN_SUCCESS) {
+    return MachVMRegionResult::Error;
+  }
+  return MachVMRegionResult::Success;
+}
+
+bool GetPowerInfo(mach_port_t task, task_power_info* power_info_data) {
+  if (task == MACH_PORT_NULL)
+    return false;
+
+  mach_msg_type_number_t power_info_count = TASK_POWER_INFO_COUNT;
+  kern_return_t kr = task_info(task, TASK_POWER_INFO,
+                               reinterpret_cast<task_info_t>(power_info_data),
+                               &power_info_count);
+  // Most likely cause for failure: |task| is a zombie.
+  return kr == KERN_SUCCESS;
+}
+
+}  // namespace
+
+// Getting a mach task from a pid for another process requires permissions in
+// general, so there doesn't really seem to be a way to do these (and spinning
+// up ps to fetch each stats seems dangerous to put in a base api for anyone to
+// call). Child processes ipc their port, so return something if available,
+// otherwise return 0.
+
+// static
+std::unique_ptr<ProcessMetrics> ProcessMetrics::CreateProcessMetrics(
+    ProcessHandle process,
+    PortProvider* port_provider) {
+  return WrapUnique(new ProcessMetrics(process, port_provider));
+}
+
+ProcessMetrics::TaskVMInfo ProcessMetrics::GetTaskVMInfo() const {
+  TaskVMInfo info;
+  ChromeTaskVMInfo task_vm_info;
+  mach_msg_type_number_t count = ChromeTaskVMInfoCount;
+  kern_return_t result =
+      task_info(TaskForPid(process_), TASK_VM_INFO,
+                reinterpret_cast<task_info_t>(&task_vm_info), &count);
+  if (result != KERN_SUCCESS)
+    return info;
+
+  info.internal = task_vm_info.internal;
+  info.compressed = task_vm_info.compressed;
+  if (count == ChromeTaskVMInfoCount)
+    info.phys_footprint = task_vm_info.phys_footprint;
+  return info;
+}
+
+#define TIME_VALUE_TO_TIMEVAL(a, r) do {  \
+  (r)->tv_sec = (a)->seconds;             \
+  (r)->tv_usec = (a)->microseconds;       \
+} while (0)
+
+TimeDelta ProcessMetrics::GetCumulativeCPUUsage() {
+  mach_port_t task = TaskForPid(process_);
+  if (task == MACH_PORT_NULL)
+    return TimeDelta();
+
+  // Libtop explicitly loops over the threads (libtop_pinfo_update_cpu_usage()
+  // in libtop.c), but this is more concise and gives the same results:
+  task_thread_times_info thread_info_data;
+  mach_msg_type_number_t thread_info_count = TASK_THREAD_TIMES_INFO_COUNT;
+  kern_return_t kr = task_info(task,
+                               TASK_THREAD_TIMES_INFO,
+                               reinterpret_cast<task_info_t>(&thread_info_data),
+                               &thread_info_count);
+  if (kr != KERN_SUCCESS) {
+    // Most likely cause: |task| is a zombie.
+    return TimeDelta();
+  }
+
+  task_basic_info_64 task_info_data;
+  if (!GetTaskInfo(task, &task_info_data))
+    return TimeDelta();
+
+  /* Set total_time. */
+  // thread info contains live time...
+  struct timeval user_timeval, system_timeval, task_timeval;
+  TIME_VALUE_TO_TIMEVAL(&thread_info_data.user_time, &user_timeval);
+  TIME_VALUE_TO_TIMEVAL(&thread_info_data.system_time, &system_timeval);
+  timeradd(&user_timeval, &system_timeval, &task_timeval);
+
+  // ... task info contains terminated time.
+  TIME_VALUE_TO_TIMEVAL(&task_info_data.user_time, &user_timeval);
+  TIME_VALUE_TO_TIMEVAL(&task_info_data.system_time, &system_timeval);
+  timeradd(&user_timeval, &task_timeval, &task_timeval);
+  timeradd(&system_timeval, &task_timeval, &task_timeval);
+
+  return TimeDelta::FromMicroseconds(TimeValToMicroseconds(task_timeval));
+}
+
+int ProcessMetrics::GetPackageIdleWakeupsPerSecond() {
+  mach_port_t task = TaskForPid(process_);
+  task_power_info power_info_data;
+
+  GetPowerInfo(task, &power_info_data);
+
+  // The task_power_info struct contains two wakeup counters:
+  // task_interrupt_wakeups and task_platform_idle_wakeups.
+  // task_interrupt_wakeups is the total number of wakeups generated by the
+  // process, and is the number that Activity Monitor reports.
+  // task_platform_idle_wakeups is a subset of task_interrupt_wakeups that
+  // tallies the number of times the processor was taken out of its low-power
+  // idle state to handle a wakeup. task_platform_idle_wakeups therefore result
+  // in a greater power increase than the other interrupts which occur while the
+  // CPU is already working, and reducing them has a greater overall impact on
+  // power usage. See the powermetrics man page for more info.
+  return CalculatePackageIdleWakeupsPerSecond(
+      power_info_data.task_platform_idle_wakeups);
+}
+
+int ProcessMetrics::GetIdleWakeupsPerSecond() {
+  mach_port_t task = TaskForPid(process_);
+  task_power_info power_info_data;
+
+  GetPowerInfo(task, &power_info_data);
+
+  return CalculateIdleWakeupsPerSecond(power_info_data.task_interrupt_wakeups);
+}
+
+bool ProcessMetrics::GetIOCounters(IoCounters* io_counters) const {
+  return false;
+}
+
+ProcessMetrics::ProcessMetrics(ProcessHandle process,
+                               PortProvider* port_provider)
+    : process_(process),
+      last_absolute_idle_wakeups_(0),
+      last_absolute_package_idle_wakeups_(0),
+      port_provider_(port_provider) {}
+
+mach_port_t ProcessMetrics::TaskForPid(ProcessHandle process) const {
+  mach_port_t task = MACH_PORT_NULL;
+  if (port_provider_)
+    task = port_provider_->TaskForPid(process_);
+  if (task == MACH_PORT_NULL && process_ == getpid())
+    task = mach_task_self();
+  return task;
+}
+
+// Bytes committed by the system.
+size_t GetSystemCommitCharge() {
+  base::mac::ScopedMachSendRight host(mach_host_self());
+  mach_msg_type_number_t count = HOST_VM_INFO_COUNT;
+  vm_statistics_data_t data;
+  kern_return_t kr = host_statistics(host.get(), HOST_VM_INFO,
+                                     reinterpret_cast<host_info_t>(&data),
+                                     &count);
+  if (kr != KERN_SUCCESS) {
+    MACH_DLOG(WARNING, kr) << "host_statistics";
+    return 0;
+  }
+
+  return (data.active_count * PAGE_SIZE) / 1024;
+}
+
+bool GetSystemMemoryInfo(SystemMemoryInfoKB* meminfo) {
+  struct host_basic_info hostinfo;
+  mach_msg_type_number_t count = HOST_BASIC_INFO_COUNT;
+  base::mac::ScopedMachSendRight host(mach_host_self());
+  int result = host_info(host.get(), HOST_BASIC_INFO,
+                         reinterpret_cast<host_info_t>(&hostinfo), &count);
+  if (result != KERN_SUCCESS)
+    return false;
+
+  DCHECK_EQ(HOST_BASIC_INFO_COUNT, count);
+  meminfo->total = static_cast<int>(hostinfo.max_mem / 1024);
+
+  vm_statistics64_data_t vm_info;
+  count = HOST_VM_INFO64_COUNT;
+
+  if (host_statistics64(host.get(), HOST_VM_INFO64,
+                        reinterpret_cast<host_info64_t>(&vm_info),
+                        &count) != KERN_SUCCESS) {
+    return false;
+  }
+  DCHECK_EQ(HOST_VM_INFO64_COUNT, count);
+
+  static_assert(PAGE_SIZE % 1024 == 0, "Invalid page size");
+  meminfo->free = saturated_cast<int>(
+      PAGE_SIZE / 1024 * (vm_info.free_count - vm_info.speculative_count));
+  meminfo->speculative =
+      saturated_cast<int>(PAGE_SIZE / 1024 * vm_info.speculative_count);
+  meminfo->file_backed =
+      saturated_cast<int>(PAGE_SIZE / 1024 * vm_info.external_page_count);
+  meminfo->purgeable =
+      saturated_cast<int>(PAGE_SIZE / 1024 * vm_info.purgeable_count);
+
+  return true;
+}
+
+// Both |size| and |address| are in-out parameters.
+// |info| is an output parameter, only valid on Success.
+MachVMRegionResult GetTopInfo(mach_port_t task,
+                              mach_vm_size_t* size,
+                              mach_vm_address_t* address,
+                              vm_region_top_info_data_t* info) {
+  mach_msg_type_number_t info_count = VM_REGION_TOP_INFO_COUNT;
+  mach_port_t object_name;
+  kern_return_t kr = mach_vm_region(task, address, size, VM_REGION_TOP_INFO,
+                                    reinterpret_cast<vm_region_info_t>(info),
+                                    &info_count, &object_name);
+  // The kernel always returns a null object for VM_REGION_TOP_INFO, but
+  // balance it with a deallocate in case this ever changes. See 10.9.2
+  // xnu-2422.90.20/osfmk/vm/vm_map.c vm_map_region.
+  mach_port_deallocate(task, object_name);
+  return ParseOutputFromMachVMRegion(kr);
+}
+
+MachVMRegionResult GetBasicInfo(mach_port_t task,
+                                mach_vm_size_t* size,
+                                mach_vm_address_t* address,
+                                vm_region_basic_info_64* info) {
+  mach_msg_type_number_t info_count = VM_REGION_BASIC_INFO_COUNT_64;
+  mach_port_t object_name;
+  kern_return_t kr = mach_vm_region(
+      task, address, size, VM_REGION_BASIC_INFO_64,
+      reinterpret_cast<vm_region_info_t>(info), &info_count, &object_name);
+  // The kernel always returns a null object for VM_REGION_BASIC_INFO_64, but
+  // balance it with a deallocate in case this ever changes. See 10.9.2
+  // xnu-2422.90.20/osfmk/vm/vm_map.c vm_map_region.
+  mach_port_deallocate(task, object_name);
+  return ParseOutputFromMachVMRegion(kr);
+}
+
+}  // namespace base
diff --git a/base/process/process_metrics_nacl.cc b/base/process/process_metrics_nacl.cc
new file mode 100644
index 0000000..025ffd5
--- /dev/null
+++ b/base/process/process_metrics_nacl.cc
@@ -0,0 +1,16 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/process/process_metrics.h"
+
+#include <stddef.h>
+#include <unistd.h>
+
+namespace base {
+
+size_t GetPageSize() {
+  return getpagesize();
+}
+
+}  // namespace base
diff --git a/base/process/process_metrics_openbsd.cc b/base/process/process_metrics_openbsd.cc
new file mode 100644
index 0000000..509ed0b
--- /dev/null
+++ b/base/process/process_metrics_openbsd.cc
@@ -0,0 +1,90 @@
+// Copyright (c) 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/process/process_metrics.h"
+
+#include <stddef.h>
+#include <stdint.h>
+#include <sys/param.h>
+#include <sys/sysctl.h>
+
+#include "base/macros.h"
+#include "base/memory/ptr_util.h"
+#include "base/process/process_metrics_iocounters.h"
+
+namespace base {
+
+// static
+std::unique_ptr<ProcessMetrics> ProcessMetrics::CreateProcessMetrics(
+    ProcessHandle process) {
+  return WrapUnique(new ProcessMetrics(process));
+}
+
+bool ProcessMetrics::GetIOCounters(IoCounters* io_counters) const {
+  return false;
+}
+
+static int GetProcessCPU(pid_t pid) {
+  struct kinfo_proc info;
+  size_t length;
+  int mib[] = { CTL_KERN, KERN_PROC, KERN_PROC_PID, pid,
+                sizeof(struct kinfo_proc), 0 };
+
+  if (sysctl(mib, arraysize(mib), NULL, &length, NULL, 0) < 0)
+    return -1;
+
+  mib[5] = (length / sizeof(struct kinfo_proc));
+
+  if (sysctl(mib, arraysize(mib), &info, &length, NULL, 0) < 0)
+    return 0;
+
+  return info.p_pctcpu;
+}
+
+double ProcessMetrics::GetPlatformIndependentCPUUsage() {
+  TimeTicks time = TimeTicks::Now();
+
+  if (last_cpu_time_.is_zero()) {
+    // First call, just set the last values.
+    last_cpu_time_ = time;
+    return 0;
+  }
+
+  int cpu = GetProcessCPU(process_);
+
+  last_cpu_time_ = time;
+  double percentage = static_cast<double>((cpu * 100.0) / FSCALE);
+
+  return percentage;
+}
+
+TimeDelta ProcessMetrics::GetCumulativeCPUUsage() {
+  NOTREACHED();
+  return TimeDelta();
+}
+
+ProcessMetrics::ProcessMetrics(ProcessHandle process)
+    : process_(process),
+      last_cpu_(0) {}
+
+size_t GetSystemCommitCharge() {
+  int mib[] = { CTL_VM, VM_METER };
+  int pagesize;
+  struct vmtotal vmtotal;
+  unsigned long mem_total, mem_free, mem_inactive;
+  size_t len = sizeof(vmtotal);
+
+  if (sysctl(mib, arraysize(mib), &vmtotal, &len, NULL, 0) < 0)
+    return 0;
+
+  mem_total = vmtotal.t_vm;
+  mem_free = vmtotal.t_free;
+  mem_inactive = vmtotal.t_vm - vmtotal.t_avm;
+
+  pagesize = getpagesize();
+
+  return mem_total - (mem_free*pagesize) - (mem_inactive*pagesize);
+}
+
+}  // namespace base
diff --git a/base/process/process_metrics_posix.cc b/base/process/process_metrics_posix.cc
new file mode 100644
index 0000000..a09bbf2
--- /dev/null
+++ b/base/process/process_metrics_posix.cc
@@ -0,0 +1,116 @@
+// Copyright (c) 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/process/process_metrics.h"
+
+#include <limits.h>
+#include <stddef.h>
+#include <stdint.h>
+#include <sys/time.h>
+#include <unistd.h>
+
+#include "base/logging.h"
+#include "build/build_config.h"
+
+#if !defined(OS_FUCHSIA)
+#include <sys/resource.h>
+#endif
+
+#if defined(OS_MACOSX)
+#include <malloc/malloc.h>
+#else
+#include <malloc.h>
+#endif
+
+namespace base {
+
+int64_t TimeValToMicroseconds(const struct timeval& tv) {
+  int64_t ret = tv.tv_sec;  // Avoid (int * int) integer overflow.
+  ret *= Time::kMicrosecondsPerSecond;
+  ret += tv.tv_usec;
+  return ret;
+}
+
+ProcessMetrics::~ProcessMetrics() = default;
+
+#if !defined(OS_FUCHSIA)
+
+#if defined(OS_LINUX)
+static const rlim_t kSystemDefaultMaxFds = 8192;
+#elif defined(OS_MACOSX)
+static const rlim_t kSystemDefaultMaxFds = 256;
+#elif defined(OS_SOLARIS)
+static const rlim_t kSystemDefaultMaxFds = 8192;
+#elif defined(OS_FREEBSD)
+static const rlim_t kSystemDefaultMaxFds = 8192;
+#elif defined(OS_NETBSD)
+static const rlim_t kSystemDefaultMaxFds = 1024;
+#elif defined(OS_OPENBSD)
+static const rlim_t kSystemDefaultMaxFds = 256;
+#elif defined(OS_ANDROID)
+static const rlim_t kSystemDefaultMaxFds = 1024;
+#elif defined(OS_AIX)
+static const rlim_t kSystemDefaultMaxFds = 8192;
+#endif
+
+size_t GetMaxFds() {
+  rlim_t max_fds;
+  struct rlimit nofile;
+  if (getrlimit(RLIMIT_NOFILE, &nofile)) {
+    // getrlimit failed. Take a best guess.
+    max_fds = kSystemDefaultMaxFds;
+    RAW_LOG(ERROR, "getrlimit(RLIMIT_NOFILE) failed");
+  } else {
+    max_fds = nofile.rlim_cur;
+  }
+
+  if (max_fds > INT_MAX)
+    max_fds = INT_MAX;
+
+  return static_cast<size_t>(max_fds);
+}
+
+void IncreaseFdLimitTo(unsigned int max_descriptors) {
+  struct rlimit limits;
+  if (getrlimit(RLIMIT_NOFILE, &limits) == 0) {
+    unsigned int new_limit = max_descriptors;
+    if (max_descriptors <= limits.rlim_cur)
+      return;
+    if (limits.rlim_max > 0 && limits.rlim_max < max_descriptors) {
+      new_limit = limits.rlim_max;
+    }
+    limits.rlim_cur = new_limit;
+    if (setrlimit(RLIMIT_NOFILE, &limits) != 0) {
+      PLOG(INFO) << "Failed to set file descriptor limit";
+    }
+  } else {
+    PLOG(INFO) << "Failed to get file descriptor limit";
+  }
+}
+
+#endif  // !defined(OS_FUCHSIA)
+
+size_t GetPageSize() {
+  return getpagesize();
+}
+
+size_t ProcessMetrics::GetMallocUsage() {
+#if defined(OS_MACOSX) || defined(OS_IOS)
+  malloc_statistics_t stats = {0};
+  malloc_zone_statistics(nullptr, &stats);
+  return stats.size_in_use;
+#elif defined(OS_LINUX) || defined(OS_ANDROID)
+  struct mallinfo minfo = mallinfo();
+#if defined(USE_TCMALLOC)
+  return minfo.uordblks;
+#else
+  return minfo.hblkhd + minfo.arena;
+#endif
+#elif defined(OS_FUCHSIA)
+  // TODO(fuchsia): Not currently exposed. https://crbug.com/735087.
+  return 0;
+#endif
+}
+
+}  // namespace base
diff --git a/base/process/process_metrics_unittest.cc b/base/process/process_metrics_unittest.cc
new file mode 100644
index 0000000..eba543a
--- /dev/null
+++ b/base/process/process_metrics_unittest.cc
@@ -0,0 +1,633 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/process/process_metrics.h"
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <sstream>
+#include <string>
+#include <vector>
+
+#include "base/bind.h"
+#include "base/command_line.h"
+#include "base/files/file.h"
+#include "base/files/file_util.h"
+#include "base/files/scoped_temp_dir.h"
+#include "base/macros.h"
+#include "base/memory/shared_memory.h"
+#include "base/strings/string_number_conversions.h"
+#include "base/sys_info.h"
+#include "base/test/multiprocess_test.h"
+#include "base/threading/thread.h"
+#include "build/build_config.h"
+#include "testing/gtest/include/gtest/gtest.h"
+#include "testing/multiprocess_func_list.h"
+
+#if defined(OS_MACOSX)
+#include <sys/mman.h>
+#endif
+
+namespace base {
+namespace debug {
+
+#if defined(OS_LINUX) || defined(OS_CHROMEOS) || defined(OS_WIN)
+namespace {
+
+void BusyWork(std::vector<std::string>* vec) {
+  int64_t test_value = 0;
+  for (int i = 0; i < 100000; ++i) {
+    ++test_value;
+    vec->push_back(Int64ToString(test_value));
+  }
+}
+
+}  // namespace
+#endif  // defined(OS_LINUX) || defined(OS_CHROMEOS)
+
+// Tests for SystemMetrics.
+// Exists as a class so it can be a friend of SystemMetrics.
+class SystemMetricsTest : public testing::Test {
+ public:
+  SystemMetricsTest() = default;
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(SystemMetricsTest);
+};
+
+#if defined(OS_LINUX) || defined(OS_ANDROID)
+TEST_F(SystemMetricsTest, IsValidDiskName) {
+  const char invalid_input1[] = "";
+  const char invalid_input2[] = "s";
+  const char invalid_input3[] = "sdz+";
+  const char invalid_input4[] = "hda0";
+  const char invalid_input5[] = "mmcbl";
+  const char invalid_input6[] = "mmcblka";
+  const char invalid_input7[] = "mmcblkb";
+  const char invalid_input8[] = "mmmblk0";
+
+  EXPECT_FALSE(IsValidDiskName(invalid_input1));
+  EXPECT_FALSE(IsValidDiskName(invalid_input2));
+  EXPECT_FALSE(IsValidDiskName(invalid_input3));
+  EXPECT_FALSE(IsValidDiskName(invalid_input4));
+  EXPECT_FALSE(IsValidDiskName(invalid_input5));
+  EXPECT_FALSE(IsValidDiskName(invalid_input6));
+  EXPECT_FALSE(IsValidDiskName(invalid_input7));
+  EXPECT_FALSE(IsValidDiskName(invalid_input8));
+
+  const char valid_input1[] = "sda";
+  const char valid_input2[] = "sdaaaa";
+  const char valid_input3[] = "hdz";
+  const char valid_input4[] = "mmcblk0";
+  const char valid_input5[] = "mmcblk999";
+
+  EXPECT_TRUE(IsValidDiskName(valid_input1));
+  EXPECT_TRUE(IsValidDiskName(valid_input2));
+  EXPECT_TRUE(IsValidDiskName(valid_input3));
+  EXPECT_TRUE(IsValidDiskName(valid_input4));
+  EXPECT_TRUE(IsValidDiskName(valid_input5));
+}
+
+TEST_F(SystemMetricsTest, ParseMeminfo) {
+  SystemMemoryInfoKB meminfo;
+  const char invalid_input1[] = "abc";
+  const char invalid_input2[] = "MemTotal:";
+  // Partial file with no MemTotal
+  const char invalid_input3[] =
+      "MemFree:         3913968 kB\n"
+      "Buffers:         2348340 kB\n"
+      "Cached:         49071596 kB\n"
+      "SwapCached:           12 kB\n"
+      "Active:         36393900 kB\n"
+      "Inactive:       21221496 kB\n"
+      "Active(anon):    5674352 kB\n"
+      "Inactive(anon):   633992 kB\n";
+  EXPECT_FALSE(ParseProcMeminfo(invalid_input1, &meminfo));
+  EXPECT_FALSE(ParseProcMeminfo(invalid_input2, &meminfo));
+  EXPECT_FALSE(ParseProcMeminfo(invalid_input3, &meminfo));
+
+  const char valid_input1[] =
+      "MemTotal:        3981504 kB\n"
+      "MemFree:          140764 kB\n"
+      "MemAvailable:     535413 kB\n"
+      "Buffers:          116480 kB\n"
+      "Cached:           406160 kB\n"
+      "SwapCached:        21304 kB\n"
+      "Active:          3152040 kB\n"
+      "Inactive:         472856 kB\n"
+      "Active(anon):    2972352 kB\n"
+      "Inactive(anon):   270108 kB\n"
+      "Active(file):     179688 kB\n"
+      "Inactive(file):   202748 kB\n"
+      "Unevictable:           0 kB\n"
+      "Mlocked:               0 kB\n"
+      "SwapTotal:       5832280 kB\n"
+      "SwapFree:        3672368 kB\n"
+      "Dirty:               184 kB\n"
+      "Writeback:             0 kB\n"
+      "AnonPages:       3101224 kB\n"
+      "Mapped:           142296 kB\n"
+      "Shmem:            140204 kB\n"
+      "Slab:              54212 kB\n"
+      "SReclaimable:      30936 kB\n"
+      "SUnreclaim:        23276 kB\n"
+      "KernelStack:        2464 kB\n"
+      "PageTables:        24812 kB\n"
+      "NFS_Unstable:          0 kB\n"
+      "Bounce:                0 kB\n"
+      "WritebackTmp:          0 kB\n"
+      "CommitLimit:     7823032 kB\n"
+      "Committed_AS:    7973536 kB\n"
+      "VmallocTotal:   34359738367 kB\n"
+      "VmallocUsed:      375940 kB\n"
+      "VmallocChunk:   34359361127 kB\n"
+      "DirectMap4k:       72448 kB\n"
+      "DirectMap2M:     4061184 kB\n";
+  // output from a much older kernel where the Active and Inactive aren't
+  // broken down into anon and file and Huge Pages are enabled
+  const char valid_input2[] =
+      "MemTotal:       255908 kB\n"
+      "MemFree:         69936 kB\n"
+      "Buffers:         15812 kB\n"
+      "Cached:         115124 kB\n"
+      "SwapCached:          0 kB\n"
+      "Active:          92700 kB\n"
+      "Inactive:        63792 kB\n"
+      "HighTotal:           0 kB\n"
+      "HighFree:            0 kB\n"
+      "LowTotal:       255908 kB\n"
+      "LowFree:         69936 kB\n"
+      "SwapTotal:      524280 kB\n"
+      "SwapFree:       524200 kB\n"
+      "Dirty:               4 kB\n"
+      "Writeback:           0 kB\n"
+      "Mapped:          42236 kB\n"
+      "Slab:            25912 kB\n"
+      "Committed_AS:   118680 kB\n"
+      "PageTables:       1236 kB\n"
+      "VmallocTotal:  3874808 kB\n"
+      "VmallocUsed:      1416 kB\n"
+      "VmallocChunk:  3872908 kB\n"
+      "HugePages_Total:     0\n"
+      "HugePages_Free:      0\n"
+      "Hugepagesize:     4096 kB\n";
+
+  EXPECT_TRUE(ParseProcMeminfo(valid_input1, &meminfo));
+  EXPECT_EQ(meminfo.total, 3981504);
+  EXPECT_EQ(meminfo.free, 140764);
+  EXPECT_EQ(meminfo.available, 535413);
+  EXPECT_EQ(meminfo.buffers, 116480);
+  EXPECT_EQ(meminfo.cached, 406160);
+  EXPECT_EQ(meminfo.active_anon, 2972352);
+  EXPECT_EQ(meminfo.active_file, 179688);
+  EXPECT_EQ(meminfo.inactive_anon, 270108);
+  EXPECT_EQ(meminfo.inactive_file, 202748);
+  EXPECT_EQ(meminfo.swap_total, 5832280);
+  EXPECT_EQ(meminfo.swap_free, 3672368);
+  EXPECT_EQ(meminfo.dirty, 184);
+  EXPECT_EQ(meminfo.reclaimable, 30936);
+#if defined(OS_CHROMEOS)
+  EXPECT_EQ(meminfo.shmem, 140204);
+  EXPECT_EQ(meminfo.slab, 54212);
+#endif
+  EXPECT_EQ(355725,
+            base::SysInfo::AmountOfAvailablePhysicalMemory(meminfo) / 1024);
+  // Simulate as if there is no MemAvailable.
+  meminfo.available = 0;
+  EXPECT_EQ(374448,
+            base::SysInfo::AmountOfAvailablePhysicalMemory(meminfo) / 1024);
+  meminfo = {};
+  EXPECT_TRUE(ParseProcMeminfo(valid_input2, &meminfo));
+  EXPECT_EQ(meminfo.total, 255908);
+  EXPECT_EQ(meminfo.free, 69936);
+  EXPECT_EQ(meminfo.available, 0);
+  EXPECT_EQ(meminfo.buffers, 15812);
+  EXPECT_EQ(meminfo.cached, 115124);
+  EXPECT_EQ(meminfo.swap_total, 524280);
+  EXPECT_EQ(meminfo.swap_free, 524200);
+  EXPECT_EQ(meminfo.dirty, 4);
+  EXPECT_EQ(69936,
+            base::SysInfo::AmountOfAvailablePhysicalMemory(meminfo) / 1024);
+}
+
+TEST_F(SystemMetricsTest, ParseVmstat) {
+  VmStatInfo vmstat;
+  // part of vmstat from a 3.2 kernel with numa enabled
+  const char valid_input1[] =
+      "nr_free_pages 905104\n"
+      "nr_inactive_anon 142478"
+      "nr_active_anon 1520046\n"
+      "nr_inactive_file 4481001\n"
+      "nr_active_file 8313439\n"
+      "nr_unevictable 5044\n"
+      "nr_mlock 5044\n"
+      "nr_anon_pages 1633780\n"
+      "nr_mapped 104742\n"
+      "nr_file_pages 12828218\n"
+      "nr_dirty 245\n"
+      "nr_writeback 0\n"
+      "nr_slab_reclaimable 831609\n"
+      "nr_slab_unreclaimable 41164\n"
+      "nr_page_table_pages 31470\n"
+      "nr_kernel_stack 1735\n"
+      "nr_unstable 0\n"
+      "nr_bounce 0\n"
+      "nr_vmscan_write 406\n"
+      "nr_vmscan_immediate_reclaim 281\n"
+      "nr_writeback_temp 0\n"
+      "nr_isolated_anon 0\n"
+      "nr_isolated_file 0\n"
+      "nr_shmem 28820\n"
+      "nr_dirtied 84674644\n"
+      "nr_written 75307109\n"
+      "nr_anon_transparent_hugepages 0\n"
+      "nr_dirty_threshold 1536206\n"
+      "nr_dirty_background_threshold 768103\n"
+      "pgpgin 30777108\n"
+      "pgpgout 319023278\n"
+      "pswpin 179\n"
+      "pswpout 406\n"
+      "pgalloc_dma 0\n"
+      "pgalloc_dma32 20833399\n"
+      "pgalloc_normal 1622609290\n"
+      "pgalloc_movable 0\n"
+      "pgfree 1644355583\n"
+      "pgactivate 75391882\n"
+      "pgdeactivate 4121019\n"
+      "pgfault 2542879679\n"
+      "pgmajfault 487192\n";
+  const char valid_input2[] =
+      "nr_free_pages 180125\n"
+      "nr_inactive_anon 51\n"
+      "nr_active_anon 38832\n"
+      "nr_inactive_file 50171\n"
+      "nr_active_file 47510\n"
+      "nr_unevictable 0\n"
+      "nr_mlock 0\n"
+      "nr_anon_pages 38825\n"
+      "nr_mapped 24043\n"
+      "nr_file_pages 97733\n"
+      "nr_dirty 0\n"
+      "nr_writeback 0\n"
+      "nr_slab_reclaimable 4032\n"
+      "nr_slab_unreclaimable 2848\n"
+      "nr_page_table_pages 1505\n"
+      "nr_kernel_stack 626\n"
+      "nr_unstable 0\n"
+      "nr_bounce 0\n"
+      "nr_vmscan_write 0\n"
+      "nr_vmscan_immediate_reclaim 0\n"
+      "nr_writeback_temp 0\n"
+      "nr_isolated_anon 0\n"
+      "nr_isolated_file 0\n"
+      "nr_shmem 58\n"
+      "nr_dirtied 435358\n"
+      "nr_written 401258\n"
+      "nr_anon_transparent_hugepages 0\n"
+      "nr_dirty_threshold 18566\n"
+      "nr_dirty_background_threshold 4641\n"
+      "pgpgin 299464\n"
+      "pgpgout 2437788\n"
+      "pswpin 12\n"
+      "pswpout 901\n"
+      "pgalloc_normal 144213030\n"
+      "pgalloc_high 164501274\n"
+      "pgalloc_movable 0\n"
+      "pgfree 308894908\n"
+      "pgactivate 239320\n"
+      "pgdeactivate 1\n"
+      "pgfault 716044601\n"
+      "pgmajfault 2023\n"
+      "pgrefill_normal 0\n"
+      "pgrefill_high 0\n"
+      "pgrefill_movable 0\n";
+  EXPECT_TRUE(ParseProcVmstat(valid_input1, &vmstat));
+  EXPECT_EQ(179LU, vmstat.pswpin);
+  EXPECT_EQ(406LU, vmstat.pswpout);
+  EXPECT_EQ(487192LU, vmstat.pgmajfault);
+  EXPECT_TRUE(ParseProcVmstat(valid_input2, &vmstat));
+  EXPECT_EQ(12LU, vmstat.pswpin);
+  EXPECT_EQ(901LU, vmstat.pswpout);
+  EXPECT_EQ(2023LU, vmstat.pgmajfault);
+
+  const char missing_pgmajfault_input[] =
+      "pswpin 12\n"
+      "pswpout 901\n";
+  EXPECT_FALSE(ParseProcVmstat(missing_pgmajfault_input, &vmstat));
+  const char empty_input[] = "";
+  EXPECT_FALSE(ParseProcVmstat(empty_input, &vmstat));
+}
+#endif  // defined(OS_LINUX) || defined(OS_ANDROID)
+
+#if defined(OS_LINUX) || defined(OS_CHROMEOS) || defined(OS_WIN)
+
+// Test that ProcessMetrics::GetPlatformIndependentCPUUsage() doesn't return
+// negative values when the number of threads running on the process decreases
+// between two successive calls to it.
+TEST_F(SystemMetricsTest, TestNoNegativeCpuUsage) {
+  ProcessHandle handle = GetCurrentProcessHandle();
+  std::unique_ptr<ProcessMetrics> metrics(
+      ProcessMetrics::CreateProcessMetrics(handle));
+
+  EXPECT_GE(metrics->GetPlatformIndependentCPUUsage(), 0.0);
+  Thread thread1("thread1");
+  Thread thread2("thread2");
+  Thread thread3("thread3");
+
+  thread1.StartAndWaitForTesting();
+  thread2.StartAndWaitForTesting();
+  thread3.StartAndWaitForTesting();
+
+  ASSERT_TRUE(thread1.IsRunning());
+  ASSERT_TRUE(thread2.IsRunning());
+  ASSERT_TRUE(thread3.IsRunning());
+
+  std::vector<std::string> vec1;
+  std::vector<std::string> vec2;
+  std::vector<std::string> vec3;
+
+  thread1.task_runner()->PostTask(FROM_HERE, BindOnce(&BusyWork, &vec1));
+  thread2.task_runner()->PostTask(FROM_HERE, BindOnce(&BusyWork, &vec2));
+  thread3.task_runner()->PostTask(FROM_HERE, BindOnce(&BusyWork, &vec3));
+
+  TimeDelta prev_cpu_usage = metrics->GetCumulativeCPUUsage();
+  EXPECT_GE(prev_cpu_usage, TimeDelta());
+  EXPECT_GE(metrics->GetPlatformIndependentCPUUsage(), 0.0);
+
+  thread1.Stop();
+  TimeDelta current_cpu_usage = metrics->GetCumulativeCPUUsage();
+  EXPECT_GE(current_cpu_usage, prev_cpu_usage);
+  prev_cpu_usage = current_cpu_usage;
+  EXPECT_GE(metrics->GetPlatformIndependentCPUUsage(), 0.0);
+
+  thread2.Stop();
+  current_cpu_usage = metrics->GetCumulativeCPUUsage();
+  EXPECT_GE(current_cpu_usage, prev_cpu_usage);
+  prev_cpu_usage = current_cpu_usage;
+  EXPECT_GE(metrics->GetPlatformIndependentCPUUsage(), 0.0);
+
+  thread3.Stop();
+  current_cpu_usage = metrics->GetCumulativeCPUUsage();
+  EXPECT_GE(current_cpu_usage, prev_cpu_usage);
+  EXPECT_GE(metrics->GetPlatformIndependentCPUUsage(), 0.0);
+}
+
+#endif  // defined(OS_LINUX) || defined(OS_CHROMEOS)
+
+#if defined(OS_CHROMEOS)
+TEST_F(SystemMetricsTest, ParseZramMmStat) {
+  SwapInfo swapinfo;
+
+  const char invalid_input1[] = "aaa";
+  const char invalid_input2[] = "1 2 3 4 5 6";
+  const char invalid_input3[] = "a 2 3 4 5 6 7";
+  EXPECT_FALSE(ParseZramMmStat(invalid_input1, &swapinfo));
+  EXPECT_FALSE(ParseZramMmStat(invalid_input2, &swapinfo));
+  EXPECT_FALSE(ParseZramMmStat(invalid_input3, &swapinfo));
+
+  const char valid_input1[] =
+      "17715200 5008166 566062  0 1225715712  127 183842";
+  EXPECT_TRUE(ParseZramMmStat(valid_input1, &swapinfo));
+  EXPECT_EQ(17715200ULL, swapinfo.orig_data_size);
+  EXPECT_EQ(5008166ULL, swapinfo.compr_data_size);
+  EXPECT_EQ(566062ULL, swapinfo.mem_used_total);
+}
+
+TEST_F(SystemMetricsTest, ParseZramStat) {
+  SwapInfo swapinfo;
+
+  const char invalid_input1[] = "aaa";
+  const char invalid_input2[] = "1 2 3 4 5 6 7 8 9 10";
+  const char invalid_input3[] = "a 2 3 4 5 6 7 8 9 10 11";
+  EXPECT_FALSE(ParseZramStat(invalid_input1, &swapinfo));
+  EXPECT_FALSE(ParseZramStat(invalid_input2, &swapinfo));
+  EXPECT_FALSE(ParseZramStat(invalid_input3, &swapinfo));
+
+  const char valid_input1[] =
+      "299    0    2392    0    1    0    8    0    0    0    0";
+  EXPECT_TRUE(ParseZramStat(valid_input1, &swapinfo));
+  EXPECT_EQ(299ULL, swapinfo.num_reads);
+  EXPECT_EQ(1ULL, swapinfo.num_writes);
+}
+#endif  // defined(OS_CHROMEOS)
+
+#if defined(OS_WIN) || defined(OS_MACOSX) || defined(OS_LINUX) || \
+    defined(OS_ANDROID)
+TEST(SystemMetrics2Test, GetSystemMemoryInfo) {
+  SystemMemoryInfoKB info;
+  EXPECT_TRUE(GetSystemMemoryInfo(&info));
+
+  // Ensure each field received a value.
+  EXPECT_GT(info.total, 0);
+#if defined(OS_WIN)
+  EXPECT_GT(info.avail_phys, 0);
+#else
+  EXPECT_GT(info.free, 0);
+#endif
+#if defined(OS_LINUX) || defined(OS_ANDROID)
+  EXPECT_GT(info.buffers, 0);
+  EXPECT_GT(info.cached, 0);
+  EXPECT_GT(info.active_anon, 0);
+  EXPECT_GT(info.inactive_anon, 0);
+  EXPECT_GT(info.active_file, 0);
+  EXPECT_GT(info.inactive_file, 0);
+#endif  // defined(OS_LINUX) || defined(OS_ANDROID)
+
+  // All the values should be less than the total amount of memory.
+#if !defined(OS_WIN) && !defined(OS_IOS)
+  // TODO(crbug.com/711450): re-enable the following assertion on iOS.
+  EXPECT_LT(info.free, info.total);
+#endif
+#if defined(OS_LINUX) || defined(OS_ANDROID)
+  EXPECT_LT(info.buffers, info.total);
+  EXPECT_LT(info.cached, info.total);
+  EXPECT_LT(info.active_anon, info.total);
+  EXPECT_LT(info.inactive_anon, info.total);
+  EXPECT_LT(info.active_file, info.total);
+  EXPECT_LT(info.inactive_file, info.total);
+#endif  // defined(OS_LINUX) || defined(OS_ANDROID)
+
+#if defined(OS_MACOSX) || defined(OS_IOS)
+  EXPECT_GT(info.file_backed, 0);
+#endif
+
+#if defined(OS_CHROMEOS)
+  // Chrome OS exposes shmem.
+  EXPECT_GT(info.shmem, 0);
+  EXPECT_LT(info.shmem, info.total);
+  // Chrome unit tests are not run on actual Chrome OS hardware, so gem_objects
+  // and gem_size cannot be tested here.
+#endif
+}
+#endif  // defined(OS_WIN) || defined(OS_MACOSX) || defined(OS_LINUX) ||
+        // defined(OS_ANDROID)
+
+#if defined(OS_LINUX) || defined(OS_ANDROID)
+TEST(ProcessMetricsTest, ParseProcStatCPU) {
+  // /proc/self/stat for a process running "top".
+  const char kTopStat[] = "960 (top) S 16230 960 16230 34818 960 "
+      "4202496 471 0 0 0 "
+      "12 16 0 0 "  // <- These are the goods.
+      "20 0 1 0 121946157 15077376 314 18446744073709551615 4194304 "
+      "4246868 140733983044336 18446744073709551615 140244213071219 "
+      "0 0 0 138047495 0 0 0 17 1 0 0 0 0 0";
+  EXPECT_EQ(12 + 16, ParseProcStatCPU(kTopStat));
+
+  // cat /proc/self/stat on a random other machine I have.
+  const char kSelfStat[] = "5364 (cat) R 5354 5364 5354 34819 5364 "
+      "0 142 0 0 0 "
+      "0 0 0 0 "  // <- No CPU, apparently.
+      "16 0 1 0 1676099790 2957312 114 4294967295 134512640 134528148 "
+      "3221224832 3221224344 3086339742 0 0 0 0 0 0 0 17 0 0 0";
+
+  EXPECT_EQ(0, ParseProcStatCPU(kSelfStat));
+
+  // Some weird long-running process with a weird name that I created for the
+  // purposes of this test.
+  const char kWeirdNameStat[] = "26115 (Hello) You ()))  ) R 24614 26115 24614"
+      " 34839 26115 4218880 227 0 0 0 "
+      "5186 11 0 0 "
+      "20 0 1 0 36933953 4296704 90 18446744073709551615 4194304 4196116 "
+      "140735857761568 140735857761160 4195644 0 0 0 0 0 0 0 17 14 0 0 0 0 0 "
+      "6295056 6295616 16519168 140735857770710 140735857770737 "
+      "140735857770737 140735857774557 0";
+  EXPECT_EQ(5186 + 11, ParseProcStatCPU(kWeirdNameStat));
+}
+#endif  // defined(OS_LINUX) || defined(OS_ANDROID)
+
+// Disable on Android because base_unittests runs inside a Dalvik VM that
+// starts and stop threads (crbug.com/175563).
+#if defined(OS_LINUX)
+// http://crbug.com/396455
+TEST(ProcessMetricsTest, DISABLED_GetNumberOfThreads) {
+  const ProcessHandle current = GetCurrentProcessHandle();
+  const int initial_threads = GetNumberOfThreads(current);
+  ASSERT_GT(initial_threads, 0);
+  const int kNumAdditionalThreads = 10;
+  {
+    std::unique_ptr<Thread> my_threads[kNumAdditionalThreads];
+    for (int i = 0; i < kNumAdditionalThreads; ++i) {
+      my_threads[i].reset(new Thread("GetNumberOfThreadsTest"));
+      my_threads[i]->Start();
+      ASSERT_EQ(GetNumberOfThreads(current), initial_threads + 1 + i);
+    }
+  }
+  // The Thread destructor will stop them.
+  ASSERT_EQ(initial_threads, GetNumberOfThreads(current));
+}
+#endif  // defined(OS_LINUX)
+
+#if defined(OS_LINUX)
+namespace {
+
+// Keep these in sync so the GetChildOpenFdCount test can refer to correct test
+// main.
+#define ChildMain ChildFdCount
+#define ChildMainString "ChildFdCount"
+
+// Command line flag name and file name used for synchronization.
+const char kTempDirFlag[] = "temp-dir";
+const char kSignalClosed[] = "closed";
+
+bool SignalEvent(const FilePath& signal_dir, const char* signal_file) {
+  File file(signal_dir.AppendASCII(signal_file),
+            File::FLAG_CREATE | File::FLAG_WRITE);
+  return file.IsValid();
+}
+
+// Check whether an event was signaled.
+bool CheckEvent(const FilePath& signal_dir, const char* signal_file) {
+  File file(signal_dir.AppendASCII(signal_file),
+            File::FLAG_OPEN | File::FLAG_READ);
+  return file.IsValid();
+}
+
+// Busy-wait for an event to be signaled.
+void WaitForEvent(const FilePath& signal_dir, const char* signal_file) {
+  while (!CheckEvent(signal_dir, signal_file))
+    PlatformThread::Sleep(TimeDelta::FromMilliseconds(10));
+}
+
+// Subprocess to test the number of open file descriptors.
+MULTIPROCESS_TEST_MAIN(ChildMain) {
+  CommandLine* command_line = CommandLine::ForCurrentProcess();
+  const FilePath temp_path = command_line->GetSwitchValuePath(kTempDirFlag);
+  CHECK(DirectoryExists(temp_path));
+
+  // Try to close all the file descriptors, so the open count goes to 0.
+  for (size_t i = 0; i < 1000; ++i)
+    close(i);
+  CHECK(SignalEvent(temp_path, kSignalClosed));
+
+  // Wait to be terminated.
+  while (true)
+    PlatformThread::Sleep(TimeDelta::FromSeconds(1));
+  return 0;
+}
+
+}  // namespace
+
+TEST(ProcessMetricsTest, GetChildOpenFdCount) {
+  ScopedTempDir temp_dir;
+  ASSERT_TRUE(temp_dir.CreateUniqueTempDir());
+  const FilePath temp_path = temp_dir.GetPath();
+  CommandLine child_command_line(GetMultiProcessTestChildBaseCommandLine());
+  child_command_line.AppendSwitchPath(kTempDirFlag, temp_path);
+  Process child = SpawnMultiProcessTestChild(
+      ChildMainString, child_command_line, LaunchOptions());
+  ASSERT_TRUE(child.IsValid());
+  WaitForEvent(temp_path, kSignalClosed);
+
+  std::unique_ptr<ProcessMetrics> metrics(
+      ProcessMetrics::CreateProcessMetrics(child.Handle()));
+  EXPECT_EQ(0, metrics->GetOpenFdCount());
+  ASSERT_TRUE(child.Terminate(0, true));
+}
+#endif  // defined(OS_LINUX)
+
+#if defined(OS_ANDROID) || defined(OS_LINUX)
+
+TEST(ProcessMetricsTest, GetOpenFdCount) {
+  std::unique_ptr<base::ProcessMetrics> metrics(
+      base::ProcessMetrics::CreateProcessMetrics(
+          base::GetCurrentProcessHandle()));
+  int fd_count = metrics->GetOpenFdCount();
+  EXPECT_GT(fd_count, 0);
+  ScopedFILE file(fopen("/proc/self/statm", "r"));
+  EXPECT_TRUE(file);
+  int new_fd_count = metrics->GetOpenFdCount();
+  EXPECT_GT(new_fd_count, 0);
+  EXPECT_EQ(new_fd_count, fd_count + 1);
+}
+
+TEST(ProcessMetricsTestLinux, GetPageFaultCounts) {
+  std::unique_ptr<base::ProcessMetrics> process_metrics(
+      base::ProcessMetrics::CreateProcessMetrics(
+          base::GetCurrentProcessHandle()));
+
+  PageFaultCounts counts;
+  ASSERT_TRUE(process_metrics->GetPageFaultCounts(&counts));
+  ASSERT_GT(counts.minor, 0);
+  ASSERT_GE(counts.major, 0);
+
+  {
+    // Allocate and touch memory. Touching it is required to make sure that the
+    // page fault count goes up, as memory is typically mapped lazily.
+    const size_t kMappedSize = 4 * (1 << 20);
+    SharedMemory memory;
+    ASSERT_TRUE(memory.CreateAndMapAnonymous(kMappedSize));
+    memset(memory.memory(), 42, kMappedSize);
+    memory.Unmap();
+  }
+
+  PageFaultCounts counts_after;
+  ASSERT_TRUE(process_metrics->GetPageFaultCounts(&counts_after));
+  ASSERT_GT(counts_after.minor, counts.minor);
+  ASSERT_GE(counts_after.major, counts.major);
+}
+#endif  // defined(OS_ANDROID) || defined(OS_LINUX)
+
+}  // namespace debug
+}  // namespace base
diff --git a/base/process/process_metrics_win.cc b/base/process/process_metrics_win.cc
new file mode 100644
index 0000000..18ef58a
--- /dev/null
+++ b/base/process/process_metrics_win.cc
@@ -0,0 +1,207 @@
+// Copyright (c) 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/process/process_metrics.h"
+
+#include <windows.h>  // Must be in front of other Windows header files.
+
+#include <psapi.h>
+#include <stddef.h>
+#include <stdint.h>
+#include <winternl.h>
+
+#include <algorithm>
+
+#include "base/logging.h"
+#include "base/memory/ptr_util.h"
+#include "base/process/memory.h"
+#include "base/process/process_metrics_iocounters.h"
+#include "base/sys_info.h"
+
+namespace base {
+namespace {
+
+// System pagesize. This value remains constant on x86/64 architectures.
+const int PAGESIZE_KB = 4;
+
+typedef NTSTATUS(WINAPI* NTQUERYSYSTEMINFORMATION)(
+    SYSTEM_INFORMATION_CLASS SystemInformationClass,
+    PVOID SystemInformation,
+    ULONG SystemInformationLength,
+    PULONG ReturnLength);
+
+}  // namespace
+
+ProcessMetrics::~ProcessMetrics() { }
+
+size_t GetMaxFds() {
+  // Windows is only limited by the amount of physical memory.
+  return std::numeric_limits<size_t>::max();
+}
+
+// static
+std::unique_ptr<ProcessMetrics> ProcessMetrics::CreateProcessMetrics(
+    ProcessHandle process) {
+  return WrapUnique(new ProcessMetrics(process));
+}
+
+namespace {
+
+class WorkingSetInformationBuffer {
+ public:
+  WorkingSetInformationBuffer() {}
+  ~WorkingSetInformationBuffer() { Clear(); }
+
+  bool Reserve(size_t size) {
+    Clear();
+    // Use UncheckedMalloc here because this can be called from the code
+    // that handles low memory condition.
+    return UncheckedMalloc(size, reinterpret_cast<void**>(&buffer_));
+  }
+
+  const PSAPI_WORKING_SET_INFORMATION* operator ->() const { return buffer_; }
+
+  size_t GetPageEntryCount() const { return number_of_entries; }
+
+  // This function is used to get page entries for a process.
+  bool QueryPageEntries(const ProcessHandle& process) {
+    int retries = 5;
+    number_of_entries = 4096;  // Just a guess.
+
+    for (;;) {
+      size_t buffer_size =
+          sizeof(PSAPI_WORKING_SET_INFORMATION) +
+          (number_of_entries * sizeof(PSAPI_WORKING_SET_BLOCK));
+
+      if (!Reserve(buffer_size))
+        return false;
+
+      // On success, |buffer_| is populated with info about the working set of
+      // |process|. On ERROR_BAD_LENGTH failure, increase the size of the
+      // buffer and try again.
+      if (QueryWorkingSet(process, buffer_, buffer_size))
+        break;  // Success
+
+      if (GetLastError() != ERROR_BAD_LENGTH)
+        return false;
+
+      number_of_entries = buffer_->NumberOfEntries;
+
+      // Maybe some entries are being added right now. Increase the buffer to
+      // take that into account. Increasing by 10% should generally be enough,
+      // especially considering the potentially low memory condition during the
+      // call (when called from OomMemoryDetails) and the potentially high
+      // number of entries (300K was observed in crash dumps).
+      number_of_entries *= 1.1;
+
+      if (--retries == 0) {
+        // If we're looping, eventually fail.
+        return false;
+      }
+    }
+
+    // TODO(chengx): Remove the comment and the logic below. It is no longer
+    // needed since we don't have Win2000 support.
+    // On windows 2000 the function returns 1 even when the buffer is too small.
+    // The number of entries that we are going to parse is the minimum between
+    // the size we allocated and the real number of entries.
+    number_of_entries = std::min(number_of_entries,
+                                 static_cast<size_t>(buffer_->NumberOfEntries));
+
+    return true;
+  }
+
+ private:
+  void Clear() {
+    free(buffer_);
+    buffer_ = nullptr;
+  }
+
+  PSAPI_WORKING_SET_INFORMATION* buffer_ = nullptr;
+
+  // Number of page entries.
+  size_t number_of_entries = 0;
+
+  DISALLOW_COPY_AND_ASSIGN(WorkingSetInformationBuffer);
+};
+
+}  // namespace
+
+TimeDelta ProcessMetrics::GetCumulativeCPUUsage() {
+  FILETIME creation_time;
+  FILETIME exit_time;
+  FILETIME kernel_time;
+  FILETIME user_time;
+
+  if (!GetProcessTimes(process_.Get(), &creation_time, &exit_time, &kernel_time,
+                       &user_time)) {
+    // We don't assert here because in some cases (such as in the Task Manager)
+    // we may call this function on a process that has just exited but we have
+    // not yet received the notification.
+    return TimeDelta();
+  }
+
+  return TimeDelta::FromFileTime(kernel_time) +
+         TimeDelta::FromFileTime(user_time);
+}
+
+bool ProcessMetrics::GetIOCounters(IoCounters* io_counters) const {
+  return GetProcessIoCounters(process_.Get(), io_counters) != FALSE;
+}
+
+ProcessMetrics::ProcessMetrics(ProcessHandle process) {
+  if (process) {
+    HANDLE duplicate_handle = INVALID_HANDLE_VALUE;
+    BOOL result = ::DuplicateHandle(::GetCurrentProcess(), process,
+                                    ::GetCurrentProcess(), &duplicate_handle,
+                                    PROCESS_QUERY_INFORMATION, FALSE, 0);
+    DPCHECK(result);
+    process_.Set(duplicate_handle);
+  }
+}
+
+size_t GetSystemCommitCharge() {
+  // Get the System Page Size.
+  SYSTEM_INFO system_info;
+  GetSystemInfo(&system_info);
+
+  PERFORMANCE_INFORMATION info;
+  if (!GetPerformanceInfo(&info, sizeof(info))) {
+    DLOG(ERROR) << "Failed to fetch internal performance info.";
+    return 0;
+  }
+  return (info.CommitTotal * system_info.dwPageSize) / 1024;
+}
+
+size_t GetPageSize() {
+  return PAGESIZE_KB * 1024;
+}
+
+// This function uses the following mapping between MEMORYSTATUSEX and
+// SystemMemoryInfoKB:
+//   ullTotalPhys ==> total
+//   ullAvailPhys ==> avail_phys
+//   ullTotalPageFile ==> swap_total
+//   ullAvailPageFile ==> swap_free
+bool GetSystemMemoryInfo(SystemMemoryInfoKB* meminfo) {
+  MEMORYSTATUSEX mem_status;
+  mem_status.dwLength = sizeof(mem_status);
+  if (!::GlobalMemoryStatusEx(&mem_status))
+    return false;
+
+  meminfo->total = mem_status.ullTotalPhys / 1024;
+  meminfo->avail_phys = mem_status.ullAvailPhys / 1024;
+  meminfo->swap_total = mem_status.ullTotalPageFile / 1024;
+  meminfo->swap_free = mem_status.ullAvailPageFile / 1024;
+
+  return true;
+}
+
+size_t ProcessMetrics::GetMallocUsage() {
+  // Unsupported as getting malloc usage on Windows requires iterating through
+  // the heap which is slow and crashes.
+  return 0;
+}
+
+}  // namespace base
diff --git a/base/process/process_posix.cc b/base/process/process_posix.cc
new file mode 100644
index 0000000..7645b78
--- /dev/null
+++ b/base/process/process_posix.cc
@@ -0,0 +1,378 @@
+// Copyright 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/process/process.h"
+
+#include <errno.h>
+#include <signal.h>
+#include <stdint.h>
+#include <sys/resource.h>
+#include <sys/wait.h>
+
+#include "base/debug/activity_tracker.h"
+#include "base/files/scoped_file.h"
+#include "base/logging.h"
+#include "base/posix/eintr_wrapper.h"
+#include "base/process/kill.h"
+#include "base/threading/thread_restrictions.h"
+#include "build/build_config.h"
+
+#if defined(OS_MACOSX)
+#include <sys/event.h>
+#endif
+
+namespace {
+
+#if !defined(OS_NACL_NONSFI)
+
+bool WaitpidWithTimeout(base::ProcessHandle handle,
+                        int* status,
+                        base::TimeDelta wait) {
+  // This POSIX version of this function only guarantees that we wait no less
+  // than |wait| for the process to exit.  The child process may
+  // exit sometime before the timeout has ended but we may still block for up
+  // to 256 milliseconds after the fact.
+  //
+  // waitpid() has no direct support on POSIX for specifying a timeout, you can
+  // either ask it to block indefinitely or return immediately (WNOHANG).
+  // When a child process terminates a SIGCHLD signal is sent to the parent.
+  // Catching this signal would involve installing a signal handler which may
+  // affect other parts of the application and would be difficult to debug.
+  //
+  // Our strategy is to call waitpid() once up front to check if the process
+  // has already exited, otherwise to loop for |wait|, sleeping for
+  // at most 256 milliseconds each time using usleep() and then calling
+  // waitpid().  The amount of time we sleep starts out at 1 milliseconds, and
+  // we double it every 4 sleep cycles.
+  //
+  // usleep() is speced to exit if a signal is received for which a handler
+  // has been installed.  This means that when a SIGCHLD is sent, it will exit
+  // depending on behavior external to this function.
+  //
+  // This function is used primarily for unit tests, if we want to use it in
+  // the application itself it would probably be best to examine other routes.
+
+  if (wait == base::TimeDelta::Max()) {
+    return HANDLE_EINTR(waitpid(handle, status, 0)) > 0;
+  }
+
+  pid_t ret_pid = HANDLE_EINTR(waitpid(handle, status, WNOHANG));
+  static const int64_t kMaxSleepInMicroseconds = 1 << 18;  // ~256 milliseconds.
+  int64_t max_sleep_time_usecs = 1 << 10;                  // ~1 milliseconds.
+  int64_t double_sleep_time = 0;
+
+  // If the process hasn't exited yet, then sleep and try again.
+  base::TimeTicks wakeup_time = base::TimeTicks::Now() + wait;
+  while (ret_pid == 0) {
+    base::TimeTicks now = base::TimeTicks::Now();
+    if (now > wakeup_time)
+      break;
+    // Guaranteed to be non-negative!
+    int64_t sleep_time_usecs = (wakeup_time - now).InMicroseconds();
+    // Sleep for a bit while we wait for the process to finish.
+    if (sleep_time_usecs > max_sleep_time_usecs)
+      sleep_time_usecs = max_sleep_time_usecs;
+
+    // usleep() will return 0 and set errno to EINTR on receipt of a signal
+    // such as SIGCHLD.
+    usleep(sleep_time_usecs);
+    ret_pid = HANDLE_EINTR(waitpid(handle, status, WNOHANG));
+
+    if ((max_sleep_time_usecs < kMaxSleepInMicroseconds) &&
+        (double_sleep_time++ % 4 == 0)) {
+      max_sleep_time_usecs *= 2;
+    }
+  }
+
+  return ret_pid > 0;
+}
+
+#if defined(OS_MACOSX)
+// Using kqueue on Mac so that we can wait on non-child processes.
+// We can't use kqueues on child processes because we need to reap
+// our own children using wait.
+bool WaitForSingleNonChildProcess(base::ProcessHandle handle,
+                                  base::TimeDelta wait) {
+  DCHECK_GT(handle, 0);
+
+  base::ScopedFD kq(kqueue());
+  if (!kq.is_valid()) {
+    DPLOG(ERROR) << "kqueue";
+    return false;
+  }
+
+  struct kevent change = {0};
+  EV_SET(&change, handle, EVFILT_PROC, EV_ADD, NOTE_EXIT, 0, NULL);
+  int result = HANDLE_EINTR(kevent(kq.get(), &change, 1, NULL, 0, NULL));
+  if (result == -1) {
+    if (errno == ESRCH) {
+      // If the process wasn't found, it must be dead.
+      return true;
+    }
+
+    DPLOG(ERROR) << "kevent (setup " << handle << ")";
+    return false;
+  }
+
+  // Keep track of the elapsed time to be able to restart kevent if it's
+  // interrupted.
+  bool wait_forever = (wait == base::TimeDelta::Max());
+  base::TimeDelta remaining_delta;
+  base::TimeTicks deadline;
+  if (!wait_forever) {
+    remaining_delta = wait;
+    deadline = base::TimeTicks::Now() + remaining_delta;
+  }
+
+  result = -1;
+  struct kevent event = {0};
+
+  do {
+    struct timespec remaining_timespec;
+    struct timespec* remaining_timespec_ptr;
+    if (wait_forever) {
+      remaining_timespec_ptr = NULL;
+    } else {
+      remaining_timespec = remaining_delta.ToTimeSpec();
+      remaining_timespec_ptr = &remaining_timespec;
+    }
+
+    result = kevent(kq.get(), NULL, 0, &event, 1, remaining_timespec_ptr);
+
+    if (result == -1 && errno == EINTR) {
+      if (!wait_forever) {
+        remaining_delta = deadline - base::TimeTicks::Now();
+      }
+      result = 0;
+    } else {
+      break;
+    }
+  } while (wait_forever || remaining_delta > base::TimeDelta());
+
+  if (result < 0) {
+    DPLOG(ERROR) << "kevent (wait " << handle << ")";
+    return false;
+  } else if (result > 1) {
+    DLOG(ERROR) << "kevent (wait " << handle << "): unexpected result "
+                << result;
+    return false;
+  } else if (result == 0) {
+    // Timed out.
+    return false;
+  }
+
+  DCHECK_EQ(result, 1);
+
+  if (event.filter != EVFILT_PROC ||
+      (event.fflags & NOTE_EXIT) == 0 ||
+      event.ident != static_cast<uintptr_t>(handle)) {
+    DLOG(ERROR) << "kevent (wait " << handle
+                << "): unexpected event: filter=" << event.filter
+                << ", fflags=" << event.fflags
+                << ", ident=" << event.ident;
+    return false;
+  }
+
+  return true;
+}
+#endif  // OS_MACOSX
+
+bool WaitForExitWithTimeoutImpl(base::ProcessHandle handle,
+                                int* exit_code,
+                                base::TimeDelta timeout) {
+  const base::ProcessHandle our_pid = base::GetCurrentProcessHandle();
+  if (handle == our_pid) {
+    // We won't be able to wait for ourselves to exit.
+    return false;
+  }
+
+  const base::ProcessHandle parent_pid = base::GetParentProcessId(handle);
+  const bool exited = (parent_pid < 0);
+
+  if (!exited && parent_pid != our_pid) {
+#if defined(OS_MACOSX)
+    // On Mac we can wait on non child processes.
+    return WaitForSingleNonChildProcess(handle, timeout);
+#else
+    // Currently on Linux we can't handle non child processes.
+    NOTIMPLEMENTED();
+#endif  // OS_MACOSX
+  }
+
+  int status;
+  if (!WaitpidWithTimeout(handle, &status, timeout)) {
+    // If multiple threads wait on the same |handle| then one wait will succeed
+    // and the other will fail with errno set to ECHILD.
+    return exited || (errno == ECHILD);
+  }
+  if (WIFSIGNALED(status)) {
+    if (exit_code)
+      *exit_code = -1;
+    return true;
+  }
+  if (WIFEXITED(status)) {
+    if (exit_code)
+      *exit_code = WEXITSTATUS(status);
+    return true;
+  }
+  return exited;
+}
+#endif  // !defined(OS_NACL_NONSFI)
+
+}  // namespace
+
+namespace base {
+
+Process::Process(ProcessHandle handle) : process_(handle) {
+}
+
+Process::~Process() = default;
+
+Process::Process(Process&& other) : process_(other.process_) {
+  other.Close();
+}
+
+Process& Process::operator=(Process&& other) {
+  process_ = other.process_;
+  other.Close();
+  return *this;
+}
+
+// static
+Process Process::Current() {
+  return Process(GetCurrentProcessHandle());
+}
+
+// static
+Process Process::Open(ProcessId pid) {
+  if (pid == GetCurrentProcId())
+    return Current();
+
+  // On POSIX process handles are the same as PIDs.
+  return Process(pid);
+}
+
+// static
+Process Process::OpenWithExtraPrivileges(ProcessId pid) {
+  // On POSIX there are no privileges to set.
+  return Open(pid);
+}
+
+// static
+Process Process::DeprecatedGetProcessFromHandle(ProcessHandle handle) {
+  DCHECK_NE(handle, GetCurrentProcessHandle());
+  return Process(handle);
+}
+
+#if !defined(OS_LINUX) && !defined(OS_MACOSX) && !defined(OS_AIX)
+// static
+bool Process::CanBackgroundProcesses() {
+  return false;
+}
+#endif  // !defined(OS_LINUX) && !defined(OS_MACOSX) && !defined(OS_AIX)
+
+// static
+void Process::TerminateCurrentProcessImmediately(int exit_code) {
+  _exit(exit_code);
+}
+
+bool Process::IsValid() const {
+  return process_ != kNullProcessHandle;
+}
+
+ProcessHandle Process::Handle() const {
+  return process_;
+}
+
+Process Process::Duplicate() const {
+  if (is_current())
+    return Current();
+
+  return Process(process_);
+}
+
+ProcessId Process::Pid() const {
+  DCHECK(IsValid());
+  return GetProcId(process_);
+}
+
+bool Process::is_current() const {
+  return process_ == GetCurrentProcessHandle();
+}
+
+void Process::Close() {
+  process_ = kNullProcessHandle;
+  // if the process wasn't terminated (so we waited) or the state
+  // wasn't already collected w/ a wait from process_utils, we're gonna
+  // end up w/ a zombie when it does finally exit.
+}
+
+#if !defined(OS_NACL_NONSFI)
+bool Process::Terminate(int exit_code, bool wait) const {
+  // exit_code isn't supportable.
+  DCHECK(IsValid());
+  CHECK_GT(process_, 0);
+
+  bool did_terminate = kill(process_, SIGTERM) == 0;
+
+  if (wait && did_terminate) {
+    if (WaitForExitWithTimeout(TimeDelta::FromSeconds(60), nullptr))
+      return true;
+    did_terminate = kill(process_, SIGKILL) == 0;
+    if (did_terminate)
+      return WaitForExit(nullptr);
+  }
+
+  if (!did_terminate)
+    DPLOG(ERROR) << "Unable to terminate process " << process_;
+
+  return did_terminate;
+}
+#endif  // !defined(OS_NACL_NONSFI)
+
+bool Process::WaitForExit(int* exit_code) const {
+  return WaitForExitWithTimeout(TimeDelta::Max(), exit_code);
+}
+
+bool Process::WaitForExitWithTimeout(TimeDelta timeout, int* exit_code) const {
+  if (!timeout.is_zero())
+    internal::AssertBaseSyncPrimitivesAllowed();
+
+  // Record the event that this thread is blocking upon (for hang diagnosis).
+  base::debug::ScopedProcessWaitActivity process_activity(this);
+
+  int local_exit_code;
+  bool exited = WaitForExitWithTimeoutImpl(Handle(), &local_exit_code, timeout);
+  if (exited) {
+    Exited(local_exit_code);
+    if (exit_code)
+      *exit_code = local_exit_code;
+  }
+  return exited;
+}
+
+void Process::Exited(int exit_code) const {}
+
+#if !defined(OS_LINUX) && !defined(OS_MACOSX) && !defined(OS_AIX)
+bool Process::IsProcessBackgrounded() const {
+  // See SetProcessBackgrounded().
+  DCHECK(IsValid());
+  return false;
+}
+
+bool Process::SetProcessBackgrounded(bool value) {
+  // Not implemented for POSIX systems other than Linux and Mac. With POSIX, if
+  // we were to lower the process priority we wouldn't be able to raise it back
+  // to its initial priority.
+  NOTIMPLEMENTED();
+  return false;
+}
+#endif  // !defined(OS_LINUX) && !defined(OS_MACOSX) && !defined(OS_AIX)
+
+int Process::GetPriority() const {
+  DCHECK(IsValid());
+  return getpriority(PRIO_PROCESS, process_);
+}
+
+}  // namespace base
diff --git a/base/process/process_unittest.cc b/base/process/process_unittest.cc
new file mode 100644
index 0000000..9f678d1
--- /dev/null
+++ b/base/process/process_unittest.cc
@@ -0,0 +1,330 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/process/process.h"
+
+#include <utility>
+
+#include "base/at_exit.h"
+#include "base/process/kill.h"
+#include "base/test/multiprocess_test.h"
+#include "base/test/test_timeouts.h"
+#include "base/threading/platform_thread.h"
+#include "base/threading/thread_local.h"
+#include "build/build_config.h"
+#include "testing/gtest/include/gtest/gtest.h"
+#include "testing/multiprocess_func_list.h"
+
+namespace {
+
+#if defined(OS_WIN)
+const int kExpectedStillRunningExitCode = 0x102;
+#else
+const int kExpectedStillRunningExitCode = 0;
+#endif
+
+#if defined(OS_MACOSX)
+// Fake port provider that returns the calling process's
+// task port, ignoring its argument.
+class FakePortProvider : public base::PortProvider {
+  mach_port_t TaskForPid(base::ProcessHandle process) const override {
+    return mach_task_self();
+  }
+};
+#endif
+
+}  // namespace
+
+namespace base {
+
+class ProcessTest : public MultiProcessTest {
+};
+
+TEST_F(ProcessTest, Create) {
+  Process process(SpawnChild("SimpleChildProcess"));
+  ASSERT_TRUE(process.IsValid());
+  ASSERT_FALSE(process.is_current());
+  EXPECT_NE(process.Pid(), kNullProcessId);
+  process.Close();
+  ASSERT_FALSE(process.IsValid());
+}
+
+TEST_F(ProcessTest, CreateCurrent) {
+  Process process = Process::Current();
+  ASSERT_TRUE(process.IsValid());
+  ASSERT_TRUE(process.is_current());
+  EXPECT_NE(process.Pid(), kNullProcessId);
+  process.Close();
+  ASSERT_FALSE(process.IsValid());
+}
+
+TEST_F(ProcessTest, Move) {
+  Process process1(SpawnChild("SimpleChildProcess"));
+  EXPECT_TRUE(process1.IsValid());
+
+  Process process2;
+  EXPECT_FALSE(process2.IsValid());
+
+  process2 = std::move(process1);
+  EXPECT_TRUE(process2.IsValid());
+  EXPECT_FALSE(process1.IsValid());
+  EXPECT_FALSE(process2.is_current());
+
+  Process process3 = Process::Current();
+  process2 = std::move(process3);
+  EXPECT_TRUE(process2.is_current());
+  EXPECT_TRUE(process2.IsValid());
+  EXPECT_FALSE(process3.IsValid());
+}
+
+TEST_F(ProcessTest, Duplicate) {
+  Process process1(SpawnChild("SimpleChildProcess"));
+  ASSERT_TRUE(process1.IsValid());
+
+  Process process2 = process1.Duplicate();
+  ASSERT_TRUE(process1.IsValid());
+  ASSERT_TRUE(process2.IsValid());
+  EXPECT_EQ(process1.Pid(), process2.Pid());
+  EXPECT_FALSE(process1.is_current());
+  EXPECT_FALSE(process2.is_current());
+
+  process1.Close();
+  ASSERT_TRUE(process2.IsValid());
+}
+
+TEST_F(ProcessTest, DuplicateCurrent) {
+  Process process1 = Process::Current();
+  ASSERT_TRUE(process1.IsValid());
+
+  Process process2 = process1.Duplicate();
+  ASSERT_TRUE(process1.IsValid());
+  ASSERT_TRUE(process2.IsValid());
+  EXPECT_EQ(process1.Pid(), process2.Pid());
+  EXPECT_TRUE(process1.is_current());
+  EXPECT_TRUE(process2.is_current());
+
+  process1.Close();
+  ASSERT_TRUE(process2.IsValid());
+}
+
+TEST_F(ProcessTest, DeprecatedGetProcessFromHandle) {
+  Process process1(SpawnChild("SimpleChildProcess"));
+  ASSERT_TRUE(process1.IsValid());
+
+  Process process2 = Process::DeprecatedGetProcessFromHandle(process1.Handle());
+  ASSERT_TRUE(process1.IsValid());
+  ASSERT_TRUE(process2.IsValid());
+  EXPECT_EQ(process1.Pid(), process2.Pid());
+  EXPECT_FALSE(process1.is_current());
+  EXPECT_FALSE(process2.is_current());
+
+  process1.Close();
+  ASSERT_TRUE(process2.IsValid());
+}
+
+MULTIPROCESS_TEST_MAIN(SleepyChildProcess) {
+  PlatformThread::Sleep(TestTimeouts::action_max_timeout());
+  return 0;
+}
+
+TEST_F(ProcessTest, Terminate) {
+  Process process(SpawnChild("SleepyChildProcess"));
+  ASSERT_TRUE(process.IsValid());
+
+  const int kDummyExitCode = 42;
+  int exit_code = kDummyExitCode;
+  EXPECT_EQ(TERMINATION_STATUS_STILL_RUNNING,
+            GetTerminationStatus(process.Handle(), &exit_code));
+  EXPECT_EQ(kExpectedStillRunningExitCode, exit_code);
+
+  exit_code = kDummyExitCode;
+  int kExpectedExitCode = 250;
+  process.Terminate(kExpectedExitCode, false);
+  process.WaitForExitWithTimeout(TestTimeouts::action_max_timeout(),
+                                 &exit_code);
+
+  EXPECT_NE(TERMINATION_STATUS_STILL_RUNNING,
+            GetTerminationStatus(process.Handle(), &exit_code));
+#if !defined(OS_POSIX) && !defined(OS_FUCHSIA)
+  // The POSIX implementation actually ignores the exit_code.
+  EXPECT_EQ(kExpectedExitCode, exit_code);
+#endif
+}
+
+void AtExitHandler(void*) {
+  // At-exit handler should not be called at
+  // Process::TerminateCurrentProcessImmediately.
+  DCHECK(false);
+}
+
+class ThreadLocalObject {
+  ~ThreadLocalObject() {
+    // Thread-local storage should not be destructed at
+    // Process::TerminateCurrentProcessImmediately.
+    DCHECK(false);
+  }
+};
+
+MULTIPROCESS_TEST_MAIN(TerminateCurrentProcessImmediatelyWithCode0) {
+  base::ThreadLocalPointer<ThreadLocalObject> object;
+  base::AtExitManager::RegisterCallback(&AtExitHandler, nullptr);
+  Process::TerminateCurrentProcessImmediately(0);
+}
+
+TEST_F(ProcessTest, TerminateCurrentProcessImmediatelyWithZeroExitCode) {
+  Process process(SpawnChild("TerminateCurrentProcessImmediatelyWithCode0"));
+  ASSERT_TRUE(process.IsValid());
+  int exit_code = 42;
+  ASSERT_TRUE(process.WaitForExitWithTimeout(TestTimeouts::action_max_timeout(),
+                                             &exit_code));
+  EXPECT_EQ(0, exit_code);
+}
+
+MULTIPROCESS_TEST_MAIN(TerminateCurrentProcessImmediatelyWithCode250) {
+  Process::TerminateCurrentProcessImmediately(250);
+}
+
+TEST_F(ProcessTest, TerminateCurrentProcessImmediatelyWithNonZeroExitCode) {
+  Process process(SpawnChild("TerminateCurrentProcessImmediatelyWithCode250"));
+  ASSERT_TRUE(process.IsValid());
+  int exit_code = 42;
+  ASSERT_TRUE(process.WaitForExitWithTimeout(TestTimeouts::action_max_timeout(),
+                                             &exit_code));
+  EXPECT_EQ(250, exit_code);
+}
+
+MULTIPROCESS_TEST_MAIN(FastSleepyChildProcess) {
+  PlatformThread::Sleep(TestTimeouts::tiny_timeout() * 10);
+  return 0;
+}
+
+TEST_F(ProcessTest, WaitForExit) {
+  Process process(SpawnChild("FastSleepyChildProcess"));
+  ASSERT_TRUE(process.IsValid());
+
+  const int kDummyExitCode = 42;
+  int exit_code = kDummyExitCode;
+  EXPECT_TRUE(process.WaitForExit(&exit_code));
+  EXPECT_EQ(0, exit_code);
+}
+
+TEST_F(ProcessTest, WaitForExitWithTimeout) {
+  Process process(SpawnChild("SleepyChildProcess"));
+  ASSERT_TRUE(process.IsValid());
+
+  const int kDummyExitCode = 42;
+  int exit_code = kDummyExitCode;
+  TimeDelta timeout = TestTimeouts::tiny_timeout();
+  EXPECT_FALSE(process.WaitForExitWithTimeout(timeout, &exit_code));
+  EXPECT_EQ(kDummyExitCode, exit_code);
+
+  process.Terminate(kDummyExitCode, false);
+}
+
+// Ensure that the priority of a process is restored correctly after
+// backgrounding and restoring.
+// Note: a platform may not be willing or able to lower the priority of
+// a process. The calls to SetProcessBackground should be noops then.
+TEST_F(ProcessTest, SetProcessBackgrounded) {
+  if (!Process::CanBackgroundProcesses())
+    return;
+  Process process(SpawnChild("SimpleChildProcess"));
+  int old_priority = process.GetPriority();
+#if defined(OS_WIN)
+  EXPECT_TRUE(process.SetProcessBackgrounded(true));
+  EXPECT_TRUE(process.IsProcessBackgrounded());
+  EXPECT_TRUE(process.SetProcessBackgrounded(false));
+  EXPECT_FALSE(process.IsProcessBackgrounded());
+#elif defined(OS_MACOSX)
+  // On the Mac, backgrounding a process requires a port to that process.
+  // In the browser it's available through the MachBroker class, which is not
+  // part of base. Additionally, there is an indefinite amount of time between
+  // spawning a process and receiving its port. Because this test just checks
+  // the ability to background/foreground a process, we can use the current
+  // process's port instead.
+  FakePortProvider provider;
+  EXPECT_TRUE(process.SetProcessBackgrounded(&provider, true));
+  EXPECT_TRUE(process.IsProcessBackgrounded(&provider));
+  EXPECT_TRUE(process.SetProcessBackgrounded(&provider, false));
+  EXPECT_FALSE(process.IsProcessBackgrounded(&provider));
+
+#else
+  process.SetProcessBackgrounded(true);
+  process.SetProcessBackgrounded(false);
+#endif
+  int new_priority = process.GetPriority();
+  EXPECT_EQ(old_priority, new_priority);
+}
+
+// Same as SetProcessBackgrounded but to this very process. It uses
+// a different code path at least for Windows.
+TEST_F(ProcessTest, SetProcessBackgroundedSelf) {
+  if (!Process::CanBackgroundProcesses())
+    return;
+  Process process = Process::Current();
+  int old_priority = process.GetPriority();
+#if defined(OS_WIN)
+  EXPECT_TRUE(process.SetProcessBackgrounded(true));
+  EXPECT_TRUE(process.IsProcessBackgrounded());
+  EXPECT_TRUE(process.SetProcessBackgrounded(false));
+  EXPECT_FALSE(process.IsProcessBackgrounded());
+#elif defined(OS_MACOSX)
+  FakePortProvider provider;
+  EXPECT_TRUE(process.SetProcessBackgrounded(&provider, true));
+  EXPECT_TRUE(process.IsProcessBackgrounded(&provider));
+  EXPECT_TRUE(process.SetProcessBackgrounded(&provider, false));
+  EXPECT_FALSE(process.IsProcessBackgrounded(&provider));
+#else
+  process.SetProcessBackgrounded(true);
+  process.SetProcessBackgrounded(false);
+#endif
+  int new_priority = process.GetPriority();
+  EXPECT_EQ(old_priority, new_priority);
+}
+
+// Consumers can use WaitForExitWithTimeout(base::TimeDelta(), nullptr) to check
+// whether the process is still running. This may not be safe because of the
+// potential reusing of the process id. So we won't export Process::IsRunning()
+// on all platforms. But for the controllable scenario in the test cases, the
+// behavior should be guaranteed.
+TEST_F(ProcessTest, CurrentProcessIsRunning) {
+  EXPECT_FALSE(Process::Current().WaitForExitWithTimeout(
+      base::TimeDelta(), nullptr));
+}
+
+#if defined(OS_MACOSX)
+// On Mac OSX, we can detect whether a non-child process is running.
+TEST_F(ProcessTest, PredefinedProcessIsRunning) {
+  // Process 1 is the /sbin/launchd, it should be always running.
+  EXPECT_FALSE(Process::Open(1).WaitForExitWithTimeout(
+      base::TimeDelta(), nullptr));
+}
+#endif
+
+TEST_F(ProcessTest, ChildProcessIsRunning) {
+  Process process(SpawnChild("SleepyChildProcess"));
+  EXPECT_FALSE(process.WaitForExitWithTimeout(
+      base::TimeDelta(), nullptr));
+  process.Terminate(0, true);
+  EXPECT_TRUE(process.WaitForExitWithTimeout(
+      base::TimeDelta(), nullptr));
+}
+
+#if defined(OS_CHROMEOS)
+
+// Tests that the function IsProcessBackgroundedCGroup() can parse the contents
+// of the /proc/<pid>/cgroup file successfully.
+TEST_F(ProcessTest, TestIsProcessBackgroundedCGroup) {
+  const char kNotBackgrounded[] = "5:cpuacct,cpu,cpuset:/daemons\n";
+  const char kBackgrounded[] =
+      "2:freezer:/chrome_renderers/to_be_frozen\n"
+      "1:cpu:/chrome_renderers/background\n";
+
+  EXPECT_FALSE(IsProcessBackgroundedCGroup(kNotBackgrounded));
+  EXPECT_TRUE(IsProcessBackgroundedCGroup(kBackgrounded));
+}
+
+#endif  // defined(OS_CHROMEOS)
+
+}  // namespace base
diff --git a/base/process/process_util_unittest.cc b/base/process/process_util_unittest.cc
new file mode 100644
index 0000000..8946669
--- /dev/null
+++ b/base/process/process_util_unittest.cc
@@ -0,0 +1,1140 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#define _CRT_SECURE_NO_WARNINGS
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <limits>
+
+#include "base/command_line.h"
+#include "base/debug/alias.h"
+#include "base/debug/stack_trace.h"
+#include "base/files/file_enumerator.h"
+#include "base/files/file_path.h"
+#include "base/files/file_util.h"
+#include "base/files/scoped_file.h"
+#include "base/logging.h"
+#include "base/macros.h"
+#include "base/path_service.h"
+#include "base/posix/eintr_wrapper.h"
+#include "base/process/kill.h"
+#include "base/process/launch.h"
+#include "base/process/memory.h"
+#include "base/process/process.h"
+#include "base/process/process_metrics.h"
+#include "base/strings/string_number_conversions.h"
+#include "base/strings/utf_string_conversions.h"
+#include "base/synchronization/waitable_event.h"
+#include "base/test/multiprocess_test.h"
+#include "base/test/scoped_task_environment.h"
+#include "base/test/test_timeouts.h"
+#include "base/threading/platform_thread.h"
+#include "base/threading/thread.h"
+#include "build/build_config.h"
+#include "testing/gtest/include/gtest/gtest.h"
+#include "testing/multiprocess_func_list.h"
+
+#if defined(OS_LINUX)
+#include <malloc.h>
+#include <sched.h>
+#include <sys/syscall.h>
+#endif
+#if defined(OS_POSIX)
+#include <sys/resource.h>
+#endif
+#if defined(OS_POSIX) || defined(OS_FUCHSIA)
+#include <dlfcn.h>
+#include <errno.h>
+#include <fcntl.h>
+#include <sched.h>
+#include <signal.h>
+#include <sys/socket.h>
+#include <sys/types.h>
+#include <sys/wait.h>
+#include <unistd.h>
+#endif
+#if defined(OS_WIN)
+#include <windows.h>
+#endif
+#if defined(OS_MACOSX)
+#include <mach/vm_param.h>
+#include <malloc/malloc.h>
+#endif
+#if defined(OS_ANDROID)
+#include "third_party/lss/linux_syscall_support.h"
+#endif
+#if defined(OS_FUCHSIA)
+#include <fdio/limits.h>
+#include <zircon/process.h>
+#include <zircon/processargs.h>
+#include <zircon/syscalls.h>
+#include "base/base_paths_fuchsia.h"
+#endif
+
+namespace base {
+
+namespace {
+
+const char kSignalFileSlow[] = "SlowChildProcess.die";
+const char kSignalFileKill[] = "KilledChildProcess.die";
+const char kTestHelper[] = "test_child_process";
+
+#if defined(OS_POSIX) || defined(OS_FUCHSIA)
+const char kSignalFileTerm[] = "TerminatedChildProcess.die";
+#endif
+
+#if defined(OS_FUCHSIA)
+const char kSignalFileClone[] = "ClonedTmpDir.die";
+#endif
+
+#if defined(OS_WIN)
+const int kExpectedStillRunningExitCode = 0x102;
+const int kExpectedKilledExitCode = 1;
+#elif defined(OS_POSIX) || defined(OS_FUCHSIA)
+const int kExpectedStillRunningExitCode = 0;
+#endif
+
+// Sleeps until file filename is created.
+void WaitToDie(const char* filename) {
+  FILE* fp;
+  do {
+    PlatformThread::Sleep(TimeDelta::FromMilliseconds(10));
+    fp = fopen(filename, "r");
+  } while (!fp);
+  fclose(fp);
+}
+
+// Signals children they should die now.
+void SignalChildren(const char* filename) {
+  FILE* fp = fopen(filename, "w");
+  fclose(fp);
+}
+
+// Using a pipe to the child to wait for an event was considered, but
+// there were cases in the past where pipes caused problems (other
+// libraries closing the fds, child deadlocking). This is a simple
+// case, so it's not worth the risk.  Using wait loops is discouraged
+// in most instances.
+TerminationStatus WaitForChildTermination(ProcessHandle handle,
+                                          int* exit_code) {
+  // Now we wait until the result is something other than STILL_RUNNING.
+  TerminationStatus status = TERMINATION_STATUS_STILL_RUNNING;
+  const TimeDelta kInterval = TimeDelta::FromMilliseconds(20);
+  TimeDelta waited;
+  do {
+    status = GetTerminationStatus(handle, exit_code);
+    PlatformThread::Sleep(kInterval);
+    waited += kInterval;
+  } while (status == TERMINATION_STATUS_STILL_RUNNING &&
+           waited < TestTimeouts::action_max_timeout());
+
+  return status;
+}
+
+}  // namespace
+
+const int kSuccess = 0;
+
+class ProcessUtilTest : public MultiProcessTest {
+ public:
+  void SetUp() override {
+    ASSERT_TRUE(PathService::Get(DIR_ASSETS, &test_helper_path_));
+    test_helper_path_ = test_helper_path_.AppendASCII(kTestHelper);
+  }
+
+#if defined(OS_POSIX) || defined(OS_FUCHSIA)
+  // Spawn a child process that counts how many file descriptors are open.
+  int CountOpenFDsInChild();
+#endif
+  // Converts the filename to a platform specific filepath.
+  // On Android files can not be created in arbitrary directories.
+  static std::string GetSignalFilePath(const char* filename);
+
+ protected:
+  base::FilePath test_helper_path_;
+};
+
+std::string ProcessUtilTest::GetSignalFilePath(const char* filename) {
+#if defined(OS_ANDROID) || defined(OS_FUCHSIA)
+  FilePath tmp_dir;
+  PathService::Get(DIR_TEMP, &tmp_dir);
+  tmp_dir = tmp_dir.Append(filename);
+  return tmp_dir.value();
+#else
+  return filename;
+#endif
+}
+
+MULTIPROCESS_TEST_MAIN(SimpleChildProcess) {
+  return kSuccess;
+}
+
+// TODO(viettrungluu): This should be in a "MultiProcessTestTest".
+TEST_F(ProcessUtilTest, SpawnChild) {
+  Process process = SpawnChild("SimpleChildProcess");
+  ASSERT_TRUE(process.IsValid());
+  int exit_code;
+  EXPECT_TRUE(process.WaitForExitWithTimeout(TestTimeouts::action_max_timeout(),
+                                             &exit_code));
+}
+
+MULTIPROCESS_TEST_MAIN(SlowChildProcess) {
+  WaitToDie(ProcessUtilTest::GetSignalFilePath(kSignalFileSlow).c_str());
+  return kSuccess;
+}
+
+TEST_F(ProcessUtilTest, KillSlowChild) {
+  const std::string signal_file =
+      ProcessUtilTest::GetSignalFilePath(kSignalFileSlow);
+  remove(signal_file.c_str());
+  Process process = SpawnChild("SlowChildProcess");
+  ASSERT_TRUE(process.IsValid());
+  SignalChildren(signal_file.c_str());
+  int exit_code;
+  EXPECT_TRUE(process.WaitForExitWithTimeout(TestTimeouts::action_max_timeout(),
+                                             &exit_code));
+  remove(signal_file.c_str());
+}
+
+// Times out on Linux and Win, flakes on other platforms, http://crbug.com/95058
+TEST_F(ProcessUtilTest, DISABLED_GetTerminationStatusExit) {
+  const std::string signal_file =
+      ProcessUtilTest::GetSignalFilePath(kSignalFileSlow);
+  remove(signal_file.c_str());
+  Process process = SpawnChild("SlowChildProcess");
+  ASSERT_TRUE(process.IsValid());
+
+  int exit_code = 42;
+  EXPECT_EQ(TERMINATION_STATUS_STILL_RUNNING,
+            GetTerminationStatus(process.Handle(), &exit_code));
+  EXPECT_EQ(kExpectedStillRunningExitCode, exit_code);
+
+  SignalChildren(signal_file.c_str());
+  exit_code = 42;
+  TerminationStatus status =
+      WaitForChildTermination(process.Handle(), &exit_code);
+  EXPECT_EQ(TERMINATION_STATUS_NORMAL_TERMINATION, status);
+  EXPECT_EQ(kSuccess, exit_code);
+  remove(signal_file.c_str());
+}
+
+#if defined(OS_FUCHSIA)
+
+MULTIPROCESS_TEST_MAIN(CheckTmpFileExists) {
+  // Look through the filesystem to ensure that no other directories
+  // besides "tmp" are in the namespace.
+  base::FileEnumerator enumerator(
+      base::FilePath("/"), false,
+      base::FileEnumerator::FILES | base::FileEnumerator::DIRECTORIES);
+  base::FilePath next_path;
+  while (!(next_path = enumerator.Next()).empty()) {
+    if (next_path != base::FilePath("/tmp")) {
+      LOG(ERROR) << "Clone policy violation: found non-tmp directory "
+                 << next_path.MaybeAsASCII();
+      return 1;
+    }
+  }
+  WaitToDie(ProcessUtilTest::GetSignalFilePath(kSignalFileClone).c_str());
+  return kSuccess;
+}
+
+TEST_F(ProcessUtilTest, SelectivelyClonedDir) {
+  const std::string signal_file =
+      ProcessUtilTest::GetSignalFilePath(kSignalFileClone);
+  remove(signal_file.c_str());
+
+  LaunchOptions options;
+  options.paths_to_map.push_back(base::FilePath("/tmp"));
+  options.clone_flags = LP_CLONE_FDIO_STDIO;
+
+  Process process(SpawnChildWithOptions("CheckTmpFileExists", options));
+  ASSERT_TRUE(process.IsValid());
+
+  SignalChildren(signal_file.c_str());
+
+  int exit_code = 42;
+  EXPECT_TRUE(process.WaitForExit(&exit_code));
+  EXPECT_EQ(kSuccess, exit_code);
+}
+
+// Test that we can clone other directories. CheckTmpFileExists will return an
+// error code if it detects a directory other than "/tmp", so we can use that as
+// a signal that it successfully detected another entry in the root namespace.
+TEST_F(ProcessUtilTest, CloneAlternateDir) {
+  const std::string signal_file =
+      ProcessUtilTest::GetSignalFilePath(kSignalFileClone);
+  remove(signal_file.c_str());
+
+  LaunchOptions options;
+  options.paths_to_map.push_back(base::FilePath("/tmp"));
+  options.paths_to_map.push_back(base::FilePath("/data"));
+  options.clone_flags = LP_CLONE_FDIO_STDIO;
+
+  Process process(SpawnChildWithOptions("CheckTmpFileExists", options));
+  ASSERT_TRUE(process.IsValid());
+
+  SignalChildren(signal_file.c_str());
+
+  int exit_code = 42;
+  EXPECT_TRUE(process.WaitForExit(&exit_code));
+  EXPECT_EQ(1, exit_code);
+}
+
+#endif  // defined(OS_FUCHSIA)
+
+// On Android SpawnProcess() doesn't use LaunchProcess() and doesn't support
+// LaunchOptions::current_directory.
+#if !defined(OS_ANDROID)
+MULTIPROCESS_TEST_MAIN(CheckCwdProcess) {
+  FilePath expected;
+  CHECK(GetTempDir(&expected));
+  expected = MakeAbsoluteFilePath(expected);
+  CHECK(!expected.empty());
+
+  FilePath actual;
+  CHECK(GetCurrentDirectory(&actual));
+  actual = MakeAbsoluteFilePath(actual);
+  CHECK(!actual.empty());
+
+  CHECK(expected == actual) << "Expected: " << expected.value()
+                            << "  Actual: " << actual.value();
+  return kSuccess;
+}
+
+TEST_F(ProcessUtilTest, CurrentDirectory) {
+  // TODO(rickyz): Add support for passing arguments to multiprocess children,
+  // then create a special directory for this test.
+  FilePath tmp_dir;
+  ASSERT_TRUE(GetTempDir(&tmp_dir));
+
+  LaunchOptions options;
+  options.current_directory = tmp_dir;
+
+  Process process(SpawnChildWithOptions("CheckCwdProcess", options));
+  ASSERT_TRUE(process.IsValid());
+
+  int exit_code = 42;
+  EXPECT_TRUE(process.WaitForExit(&exit_code));
+  EXPECT_EQ(kSuccess, exit_code);
+}
+#endif  // !defined(OS_ANDROID)
+
+#if defined(OS_WIN)
+// TODO(cpu): figure out how to test this in other platforms.
+TEST_F(ProcessUtilTest, GetProcId) {
+  ProcessId id1 = GetProcId(GetCurrentProcess());
+  EXPECT_NE(0ul, id1);
+  Process process = SpawnChild("SimpleChildProcess");
+  ASSERT_TRUE(process.IsValid());
+  ProcessId id2 = process.Pid();
+  EXPECT_NE(0ul, id2);
+  EXPECT_NE(id1, id2);
+}
+#endif  // defined(OS_WIN)
+
+#if !defined(OS_MACOSX) && !defined(OS_ANDROID)
+// This test is disabled on Mac, since it's flaky due to ReportCrash
+// taking a variable amount of time to parse and load the debug and
+// symbol data for this unit test's executable before firing the
+// signal handler.
+//
+// TODO(gspencer): turn this test process into a very small program
+// with no symbols (instead of using the multiprocess testing
+// framework) to reduce the ReportCrash overhead.
+//
+// It is disabled on Android as MultiprocessTests are started as services that
+// the framework restarts on crashes.
+const char kSignalFileCrash[] = "CrashingChildProcess.die";
+
+MULTIPROCESS_TEST_MAIN(CrashingChildProcess) {
+  WaitToDie(ProcessUtilTest::GetSignalFilePath(kSignalFileCrash).c_str());
+#if defined(OS_POSIX) || defined(OS_FUCHSIA)
+  // Have to disable to signal handler for segv so we can get a crash
+  // instead of an abnormal termination through the crash dump handler.
+  ::signal(SIGSEGV, SIG_DFL);
+#endif
+  // Make this process have a segmentation fault.
+  volatile int* oops = nullptr;
+  *oops = 0xDEAD;
+  return 1;
+}
+
+// This test intentionally crashes, so we don't need to run it under
+// AddressSanitizer.
+#if defined(ADDRESS_SANITIZER)
+#define MAYBE_GetTerminationStatusCrash DISABLED_GetTerminationStatusCrash
+#else
+#define MAYBE_GetTerminationStatusCrash GetTerminationStatusCrash
+#endif
+TEST_F(ProcessUtilTest, MAYBE_GetTerminationStatusCrash) {
+  const std::string signal_file =
+    ProcessUtilTest::GetSignalFilePath(kSignalFileCrash);
+  remove(signal_file.c_str());
+  Process process = SpawnChild("CrashingChildProcess");
+  ASSERT_TRUE(process.IsValid());
+
+  int exit_code = 42;
+  EXPECT_EQ(TERMINATION_STATUS_STILL_RUNNING,
+            GetTerminationStatus(process.Handle(), &exit_code));
+  EXPECT_EQ(kExpectedStillRunningExitCode, exit_code);
+
+  SignalChildren(signal_file.c_str());
+  exit_code = 42;
+  TerminationStatus status =
+      WaitForChildTermination(process.Handle(), &exit_code);
+  EXPECT_EQ(TERMINATION_STATUS_PROCESS_CRASHED, status);
+
+#if defined(OS_WIN)
+  EXPECT_EQ(static_cast<int>(0xc0000005), exit_code);
+#elif defined(OS_POSIX) || defined(OS_FUCHSIA)
+  int signaled = WIFSIGNALED(exit_code);
+  EXPECT_NE(0, signaled);
+  int signal = WTERMSIG(exit_code);
+  EXPECT_EQ(SIGSEGV, signal);
+#endif
+
+  // Reset signal handlers back to "normal".
+  debug::EnableInProcessStackDumping();
+  remove(signal_file.c_str());
+}
+#endif  // !defined(OS_MACOSX) && !defined(OS_ANDROID)
+
+MULTIPROCESS_TEST_MAIN(KilledChildProcess) {
+  WaitToDie(ProcessUtilTest::GetSignalFilePath(kSignalFileKill).c_str());
+#if defined(OS_WIN)
+  // Kill ourselves.
+  HANDLE handle = ::OpenProcess(PROCESS_ALL_ACCESS, 0, ::GetCurrentProcessId());
+  ::TerminateProcess(handle, kExpectedKilledExitCode);
+#elif defined(OS_POSIX) || defined(OS_FUCHSIA)
+  // Send a SIGKILL to this process, just like the OOM killer would.
+  ::kill(getpid(), SIGKILL);
+#endif
+  return 1;
+}
+
+#if defined(OS_POSIX) || defined(OS_FUCHSIA)
+MULTIPROCESS_TEST_MAIN(TerminatedChildProcess) {
+  WaitToDie(ProcessUtilTest::GetSignalFilePath(kSignalFileTerm).c_str());
+  // Send a SIGTERM to this process.
+  ::kill(getpid(), SIGTERM);
+  return 1;
+}
+#endif  // defined(OS_POSIX) || defined(OS_FUCHSIA)
+
+TEST_F(ProcessUtilTest, GetTerminationStatusSigKill) {
+  const std::string signal_file =
+    ProcessUtilTest::GetSignalFilePath(kSignalFileKill);
+  remove(signal_file.c_str());
+  Process process = SpawnChild("KilledChildProcess");
+  ASSERT_TRUE(process.IsValid());
+
+  int exit_code = 42;
+  EXPECT_EQ(TERMINATION_STATUS_STILL_RUNNING,
+            GetTerminationStatus(process.Handle(), &exit_code));
+  EXPECT_EQ(kExpectedStillRunningExitCode, exit_code);
+
+  SignalChildren(signal_file.c_str());
+  exit_code = 42;
+  TerminationStatus status =
+      WaitForChildTermination(process.Handle(), &exit_code);
+#if defined(OS_CHROMEOS)
+  EXPECT_EQ(TERMINATION_STATUS_PROCESS_WAS_KILLED_BY_OOM, status);
+#else
+  EXPECT_EQ(TERMINATION_STATUS_PROCESS_WAS_KILLED, status);
+#endif
+
+#if defined(OS_WIN)
+  EXPECT_EQ(kExpectedKilledExitCode, exit_code);
+#elif defined(OS_POSIX) || defined(OS_FUCHSIA)
+  int signaled = WIFSIGNALED(exit_code);
+  EXPECT_NE(0, signaled);
+  int signal = WTERMSIG(exit_code);
+  EXPECT_EQ(SIGKILL, signal);
+#endif
+  remove(signal_file.c_str());
+}
+
+#if defined(OS_POSIX) || defined(OS_FUCHSIA)
+TEST_F(ProcessUtilTest, GetTerminationStatusSigTerm) {
+  const std::string signal_file =
+    ProcessUtilTest::GetSignalFilePath(kSignalFileTerm);
+  remove(signal_file.c_str());
+  Process process = SpawnChild("TerminatedChildProcess");
+  ASSERT_TRUE(process.IsValid());
+
+  int exit_code = 42;
+  EXPECT_EQ(TERMINATION_STATUS_STILL_RUNNING,
+            GetTerminationStatus(process.Handle(), &exit_code));
+  EXPECT_EQ(kExpectedStillRunningExitCode, exit_code);
+
+  SignalChildren(signal_file.c_str());
+  exit_code = 42;
+  TerminationStatus status =
+      WaitForChildTermination(process.Handle(), &exit_code);
+  EXPECT_EQ(TERMINATION_STATUS_PROCESS_WAS_KILLED, status);
+
+  int signaled = WIFSIGNALED(exit_code);
+  EXPECT_NE(0, signaled);
+  int signal = WTERMSIG(exit_code);
+  EXPECT_EQ(SIGTERM, signal);
+  remove(signal_file.c_str());
+}
+#endif  // defined(OS_POSIX) || defined(OS_FUCHSIA)
+
+TEST_F(ProcessUtilTest, EnsureTerminationUndying) {
+  test::ScopedTaskEnvironment task_environment;
+
+  Process child_process = SpawnChild("process_util_test_never_die");
+  ASSERT_TRUE(child_process.IsValid());
+
+  EnsureProcessTerminated(child_process.Duplicate());
+
+  // Allow a generous timeout, to cope with slow/loaded test bots.
+  EXPECT_TRUE(child_process.WaitForExitWithTimeout(
+      TestTimeouts::action_max_timeout(), nullptr));
+}
+
+MULTIPROCESS_TEST_MAIN(process_util_test_never_die) {
+  while (1) {
+    PlatformThread::Sleep(TimeDelta::FromSeconds(500));
+  }
+  return kSuccess;
+}
+
+TEST_F(ProcessUtilTest, EnsureTerminationGracefulExit) {
+  test::ScopedTaskEnvironment task_environment;
+
+  Process child_process = SpawnChild("process_util_test_die_immediately");
+  ASSERT_TRUE(child_process.IsValid());
+
+  // Wait for the child process to actually exit.
+  child_process.Duplicate().WaitForExitWithTimeout(
+      TestTimeouts::action_max_timeout(), nullptr);
+
+  EnsureProcessTerminated(child_process.Duplicate());
+
+  // Verify that the process is really, truly gone.
+  EXPECT_TRUE(child_process.WaitForExitWithTimeout(
+      TestTimeouts::action_max_timeout(), nullptr));
+}
+
+MULTIPROCESS_TEST_MAIN(process_util_test_die_immediately) {
+  return kSuccess;
+}
+
+#if defined(OS_WIN)
+// TODO(estade): if possible, port this test.
+TEST_F(ProcessUtilTest, LaunchAsUser) {
+  UserTokenHandle token;
+  ASSERT_TRUE(OpenProcessToken(GetCurrentProcess(), TOKEN_ALL_ACCESS, &token));
+  LaunchOptions options;
+  options.as_user = token;
+  EXPECT_TRUE(
+      LaunchProcess(MakeCmdLine("SimpleChildProcess"), options).IsValid());
+}
+
+static const char kEventToTriggerHandleSwitch[] = "event-to-trigger-handle";
+
+MULTIPROCESS_TEST_MAIN(TriggerEventChildProcess) {
+  std::string handle_value_string =
+      CommandLine::ForCurrentProcess()->GetSwitchValueASCII(
+          kEventToTriggerHandleSwitch);
+  CHECK(!handle_value_string.empty());
+
+  uint64_t handle_value_uint64;
+  CHECK(StringToUint64(handle_value_string, &handle_value_uint64));
+  // Give ownership of the handle to |event|.
+  WaitableEvent event(
+      win::ScopedHandle(reinterpret_cast<HANDLE>(handle_value_uint64)));
+
+  event.Signal();
+
+  return 0;
+}
+
+TEST_F(ProcessUtilTest, InheritSpecifiedHandles) {
+  // Manually create the event, so that it can be inheritable.
+  SECURITY_ATTRIBUTES security_attributes = {};
+  security_attributes.nLength = static_cast<DWORD>(sizeof(security_attributes));
+  security_attributes.lpSecurityDescriptor = NULL;
+  security_attributes.bInheritHandle = true;
+
+  // Takes ownership of the event handle.
+  WaitableEvent event(
+      win::ScopedHandle(CreateEvent(&security_attributes, true, false, NULL)));
+  LaunchOptions options;
+  options.handles_to_inherit.emplace_back(event.handle());
+
+  CommandLine cmd_line = MakeCmdLine("TriggerEventChildProcess");
+  cmd_line.AppendSwitchASCII(
+      kEventToTriggerHandleSwitch,
+      NumberToString(reinterpret_cast<uint64_t>(event.handle())));
+
+  // Launch the process and wait for it to trigger the event.
+  ASSERT_TRUE(LaunchProcess(cmd_line, options).IsValid());
+  EXPECT_TRUE(event.TimedWait(TestTimeouts::action_max_timeout()));
+}
+#endif  // defined(OS_WIN)
+
+TEST_F(ProcessUtilTest, GetAppOutput) {
+  base::CommandLine command(test_helper_path_);
+  command.AppendArg("hello");
+  command.AppendArg("there");
+  command.AppendArg("good");
+  command.AppendArg("people");
+  std::string output;
+  EXPECT_TRUE(GetAppOutput(command, &output));
+  EXPECT_EQ("hello there good people", output);
+  output.clear();
+
+  const char* kEchoMessage = "blah";
+  command = base::CommandLine(test_helper_path_);
+  command.AppendArg("-x");
+  command.AppendArg("28");
+  command.AppendArg(kEchoMessage);
+  EXPECT_FALSE(GetAppOutput(command, &output));
+  EXPECT_EQ(kEchoMessage, output);
+}
+
+TEST_F(ProcessUtilTest, GetAppOutputWithExitCode) {
+  const char* kEchoMessage1 = "doge";
+  int exit_code = -1;
+  base::CommandLine command(test_helper_path_);
+  command.AppendArg(kEchoMessage1);
+  std::string output;
+  EXPECT_TRUE(GetAppOutputWithExitCode(command, &output, &exit_code));
+  EXPECT_EQ(kEchoMessage1, output);
+  EXPECT_EQ(0, exit_code);
+  output.clear();
+
+  const char* kEchoMessage2 = "pupper";
+  const int kExpectedExitCode = 42;
+  command = base::CommandLine(test_helper_path_);
+  command.AppendArg("-x");
+  command.AppendArg(base::IntToString(kExpectedExitCode));
+  command.AppendArg(kEchoMessage2);
+#if defined(OS_WIN)
+  // On Windows, anything that quits with a nonzero status code is handled as a
+  // "crash", so just ignore GetAppOutputWithExitCode's return value.
+  GetAppOutputWithExitCode(command, &output, &exit_code);
+#elif defined(OS_POSIX) || defined(OS_FUCHSIA)
+  EXPECT_TRUE(GetAppOutputWithExitCode(command, &output, &exit_code));
+#endif
+  EXPECT_EQ(kEchoMessage2, output);
+  EXPECT_EQ(kExpectedExitCode, exit_code);
+}
+
+#if defined(OS_POSIX) || defined(OS_FUCHSIA)
+
+namespace {
+
+// Returns the maximum number of files that a process can have open.
+// Returns 0 on error.
+int GetMaxFilesOpenInProcess() {
+#if defined(OS_FUCHSIA)
+  return FDIO_MAX_FD;
+#else
+  struct rlimit rlim;
+  if (getrlimit(RLIMIT_NOFILE, &rlim) != 0) {
+    return 0;
+  }
+
+  // rlim_t is a uint64_t - clip to maxint. We do this since FD #s are ints
+  // which are all 32 bits on the supported platforms.
+  rlim_t max_int = static_cast<rlim_t>(std::numeric_limits<int32_t>::max());
+  if (rlim.rlim_cur > max_int) {
+    return max_int;
+  }
+
+  return rlim.rlim_cur;
+#endif  // defined(OS_FUCHSIA)
+}
+
+const int kChildPipe = 20;  // FD # for write end of pipe in child process.
+
+#if defined(OS_MACOSX)
+
+// <http://opensource.apple.com/source/xnu/xnu-2422.1.72/bsd/sys/guarded.h>
+#if !defined(_GUARDID_T)
+#define _GUARDID_T
+typedef __uint64_t guardid_t;
+#endif  // _GUARDID_T
+
+// From .../MacOSX10.9.sdk/usr/include/sys/syscall.h
+#if !defined(SYS_change_fdguard_np)
+#define SYS_change_fdguard_np 444
+#endif
+
+// <http://opensource.apple.com/source/xnu/xnu-2422.1.72/bsd/sys/guarded.h>
+#if !defined(GUARD_DUP)
+#define GUARD_DUP (1u << 1)
+#endif
+
+// <http://opensource.apple.com/source/xnu/xnu-2422.1.72/bsd/kern/kern_guarded.c?txt>
+//
+// Atomically replaces |guard|/|guardflags| with |nguard|/|nguardflags| on |fd|.
+int change_fdguard_np(int fd,
+                      const guardid_t *guard, u_int guardflags,
+                      const guardid_t *nguard, u_int nguardflags,
+                      int *fdflagsp) {
+  return syscall(SYS_change_fdguard_np, fd, guard, guardflags,
+                 nguard, nguardflags, fdflagsp);
+}
+
+// Attempt to set a file-descriptor guard on |fd|.  In case of success, remove
+// it and return |true| to indicate that it can be guarded.  Returning |false|
+// means either that |fd| is guarded by some other code, or more likely EBADF.
+//
+// Starting with 10.9, libdispatch began setting GUARD_DUP on a file descriptor.
+// Unfortunately, it is spun up as part of +[NSApplication initialize], which is
+// not really something that Chromium can avoid using on OSX.  See
+// <http://crbug.com/338157>.  This function allows querying whether the file
+// descriptor is guarded before attempting to close it.
+bool CanGuardFd(int fd) {
+  // Saves the original flags to reset later.
+  int original_fdflags = 0;
+
+  // This can be any value at all, it just has to match up between the two
+  // calls.
+  const guardid_t kGuard = 15;
+
+  // Attempt to change the guard.  This can fail with EBADF if the file
+  // descriptor is bad, or EINVAL if the fd already has a guard set.
+  int ret =
+      change_fdguard_np(fd, NULL, 0, &kGuard, GUARD_DUP, &original_fdflags);
+  if (ret == -1)
+    return false;
+
+  // Remove the guard.  It should not be possible to fail in removing the guard
+  // just added.
+  ret = change_fdguard_np(fd, &kGuard, GUARD_DUP, NULL, 0, &original_fdflags);
+  DPCHECK(ret == 0);
+
+  return true;
+}
+#endif  // defined(OS_MACOSX)
+
+}  // namespace
+
+MULTIPROCESS_TEST_MAIN(ProcessUtilsLeakFDChildProcess) {
+  // This child process counts the number of open FDs, it then writes that
+  // number out to a pipe connected to the parent.
+  int num_open_files = 0;
+  int write_pipe = kChildPipe;
+  int max_files = GetMaxFilesOpenInProcess();
+  for (int i = STDERR_FILENO + 1; i < max_files; i++) {
+#if defined(OS_MACOSX)
+    // Ignore guarded or invalid file descriptors.
+    if (!CanGuardFd(i))
+      continue;
+#endif
+
+    if (i != kChildPipe) {
+      int fd;
+      if ((fd = HANDLE_EINTR(dup(i))) != -1) {
+        close(fd);
+        num_open_files += 1;
+      }
+    }
+  }
+
+  int written = HANDLE_EINTR(write(write_pipe, &num_open_files,
+                                   sizeof(num_open_files)));
+  DCHECK_EQ(static_cast<size_t>(written), sizeof(num_open_files));
+  int ret = IGNORE_EINTR(close(write_pipe));
+  DPCHECK(ret == 0);
+
+  return 0;
+}
+
+int ProcessUtilTest::CountOpenFDsInChild() {
+  int fds[2];
+  if (pipe(fds) < 0)
+    NOTREACHED();
+
+  LaunchOptions options;
+  options.fds_to_remap.emplace_back(fds[1], kChildPipe);
+  Process process =
+      SpawnChildWithOptions("ProcessUtilsLeakFDChildProcess", options);
+  CHECK(process.IsValid());
+  int ret = IGNORE_EINTR(close(fds[1]));
+  DPCHECK(ret == 0);
+
+  // Read number of open files in client process from pipe;
+  int num_open_files = -1;
+  ssize_t bytes_read =
+      HANDLE_EINTR(read(fds[0], &num_open_files, sizeof(num_open_files)));
+  CHECK_EQ(bytes_read, static_cast<ssize_t>(sizeof(num_open_files)));
+
+#if defined(THREAD_SANITIZER)
+  // Compiler-based ThreadSanitizer makes this test slow.
+  TimeDelta timeout = TimeDelta::FromSeconds(3);
+#else
+  TimeDelta timeout = TimeDelta::FromSeconds(1);
+#endif
+  int exit_code;
+  CHECK(process.WaitForExitWithTimeout(timeout, &exit_code));
+  ret = IGNORE_EINTR(close(fds[0]));
+  DPCHECK(ret == 0);
+
+  return num_open_files;
+}
+
+#if defined(ADDRESS_SANITIZER) || defined(THREAD_SANITIZER)
+// ProcessUtilTest.FDRemapping is flaky when ran under xvfb-run on Precise.
+// The problem is 100% reproducible with both ASan and TSan.
+// See http://crbug.com/136720.
+#define MAYBE_FDRemapping DISABLED_FDRemapping
+#else
+#define MAYBE_FDRemapping FDRemapping
+#endif  // defined(ADDRESS_SANITIZER) || defined(THREAD_SANITIZER)
+TEST_F(ProcessUtilTest, MAYBE_FDRemapping) {
+  int fds_before = CountOpenFDsInChild();
+
+  // open some dummy fds to make sure they don't propagate over to the
+  // child process.
+  int dev_null = open("/dev/null", O_RDONLY);
+  DPCHECK(dev_null != -1);
+  int sockets[2];
+  int ret = socketpair(AF_UNIX, SOCK_STREAM, 0, sockets);
+  DPCHECK(ret == 0);
+
+  int fds_after = CountOpenFDsInChild();
+
+  ASSERT_EQ(fds_after, fds_before);
+
+  ret = IGNORE_EINTR(close(sockets[0]));
+  DPCHECK(ret == 0);
+  ret = IGNORE_EINTR(close(sockets[1]));
+  DPCHECK(ret == 0);
+  ret = IGNORE_EINTR(close(dev_null));
+  DPCHECK(ret == 0);
+}
+
+const char kPipeValue = '\xcc';
+MULTIPROCESS_TEST_MAIN(ProcessUtilsVerifyStdio) {
+  // Write to stdio so the parent process can observe output.
+  CHECK_EQ(1, HANDLE_EINTR(write(STDOUT_FILENO, &kPipeValue, 1)));
+
+  // Close all of the handles, to verify they are valid.
+  CHECK_EQ(0, IGNORE_EINTR(close(STDIN_FILENO)));
+  CHECK_EQ(0, IGNORE_EINTR(close(STDOUT_FILENO)));
+  CHECK_EQ(0, IGNORE_EINTR(close(STDERR_FILENO)));
+  return 0;
+}
+
+TEST_F(ProcessUtilTest, FDRemappingIncludesStdio) {
+  int dev_null = open("/dev/null", O_RDONLY);
+  ASSERT_LT(2, dev_null);
+
+  // Backup stdio and replace it with the write end of a pipe, for our
+  // child process to inherit.
+  int pipe_fds[2];
+  int result = pipe(pipe_fds);
+  ASSERT_EQ(0, result);
+  int backup_stdio = HANDLE_EINTR(dup(STDOUT_FILENO));
+  ASSERT_LE(0, backup_stdio);
+  result = dup2(pipe_fds[1], STDOUT_FILENO);
+  ASSERT_EQ(STDOUT_FILENO, result);
+
+  // Launch the test process, which should inherit our pipe stdio.
+  LaunchOptions options;
+  options.fds_to_remap.emplace_back(dev_null, dev_null);
+  Process process = SpawnChildWithOptions("ProcessUtilsVerifyStdio", options);
+  ASSERT_TRUE(process.IsValid());
+
+  // Restore stdio, so we can output stuff.
+  result = dup2(backup_stdio, STDOUT_FILENO);
+  ASSERT_EQ(STDOUT_FILENO, result);
+
+  // Close our copy of the write end of the pipe, so that the read()
+  // from the other end will see EOF if it wasn't copied to the child.
+  result = IGNORE_EINTR(close(pipe_fds[1]));
+  ASSERT_EQ(0, result);
+
+  result = IGNORE_EINTR(close(backup_stdio));
+  ASSERT_EQ(0, result);
+  result = IGNORE_EINTR(close(dev_null));
+  ASSERT_EQ(0, result);
+
+  // Read from the pipe to verify that it is connected to the child
+  // process' stdio.
+  char buf[16] = {};
+  EXPECT_EQ(1, HANDLE_EINTR(read(pipe_fds[0], buf, sizeof(buf))));
+  EXPECT_EQ(kPipeValue, buf[0]);
+
+  result = IGNORE_EINTR(close(pipe_fds[0]));
+  ASSERT_EQ(0, result);
+
+  int exit_code;
+  ASSERT_TRUE(
+      process.WaitForExitWithTimeout(TimeDelta::FromSeconds(5), &exit_code));
+  EXPECT_EQ(0, exit_code);
+}
+
+#if defined(OS_FUCHSIA)
+
+const uint16_t kStartupHandleId = 43;
+MULTIPROCESS_TEST_MAIN(ProcessUtilsVerifyHandle) {
+  zx_handle_t handle =
+      zx_get_startup_handle(PA_HND(PA_USER0, kStartupHandleId));
+  CHECK_NE(ZX_HANDLE_INVALID, handle);
+
+  // Write to the pipe so the parent process can observe output.
+  size_t bytes_written = 0;
+  zx_status_t result = zx_socket_write(handle, 0, &kPipeValue,
+                                       sizeof(kPipeValue), &bytes_written);
+  CHECK_EQ(ZX_OK, result);
+  CHECK_EQ(1u, bytes_written);
+
+  CHECK_EQ(ZX_OK, zx_handle_close(handle));
+  return 0;
+}
+
+TEST_F(ProcessUtilTest, LaunchWithHandleTransfer) {
+  // Create a pipe to pass to the child process.
+  zx_handle_t handles[2];
+  zx_status_t result =
+      zx_socket_create(ZX_SOCKET_STREAM, &handles[0], &handles[1]);
+  ASSERT_EQ(ZX_OK, result);
+
+  // Launch the test process, and pass it one end of the pipe.
+  LaunchOptions options;
+  options.handles_to_transfer.push_back(
+      {PA_HND(PA_USER0, kStartupHandleId), handles[0]});
+  Process process = SpawnChildWithOptions("ProcessUtilsVerifyHandle", options);
+  ASSERT_TRUE(process.IsValid());
+
+  // Read from the pipe to verify that the child received it.
+  zx_signals_t signals = 0;
+  result = zx_object_wait_one(
+      handles[1], ZX_SOCKET_READABLE | ZX_SOCKET_PEER_CLOSED,
+      (base::TimeTicks::Now() + TestTimeouts::action_timeout()).ToZxTime(),
+      &signals);
+  ASSERT_EQ(ZX_OK, result);
+  ASSERT_TRUE(signals & ZX_SOCKET_READABLE);
+
+  size_t bytes_read = 0;
+  char buf[16] = {0};
+  result = zx_socket_read(handles[1], 0, buf, sizeof(buf), &bytes_read);
+  EXPECT_EQ(ZX_OK, result);
+  EXPECT_EQ(1u, bytes_read);
+  EXPECT_EQ(kPipeValue, buf[0]);
+
+  CHECK_EQ(ZX_OK, zx_handle_close(handles[1]));
+
+  int exit_code;
+  ASSERT_TRUE(process.WaitForExitWithTimeout(TestTimeouts::action_timeout(),
+                                             &exit_code));
+  EXPECT_EQ(0, exit_code);
+}
+
+#endif  // defined(OS_FUCHSIA)
+
+namespace {
+
+std::string TestLaunchProcess(const std::vector<std::string>& args,
+                              const EnvironmentMap& env_changes,
+                              const bool clear_environ,
+                              const int clone_flags) {
+  int fds[2];
+  PCHECK(pipe(fds) == 0);
+
+  LaunchOptions options;
+  options.wait = true;
+  options.environ = env_changes;
+  options.clear_environ = clear_environ;
+  options.fds_to_remap.emplace_back(fds[1], 1);
+#if defined(OS_LINUX)
+  options.clone_flags = clone_flags;
+#else
+  CHECK_EQ(0, clone_flags);
+#endif  // defined(OS_LINUX)
+  EXPECT_TRUE(LaunchProcess(args, options).IsValid());
+  PCHECK(IGNORE_EINTR(close(fds[1])) == 0);
+
+  char buf[512];
+  const ssize_t n = HANDLE_EINTR(read(fds[0], buf, sizeof(buf)));
+
+  PCHECK(IGNORE_EINTR(close(fds[0])) == 0);
+
+  return std::string(buf, n);
+}
+
+const char kLargeString[] =
+    "0123456789012345678901234567890123456789012345678901234567890123456789"
+    "0123456789012345678901234567890123456789012345678901234567890123456789"
+    "0123456789012345678901234567890123456789012345678901234567890123456789"
+    "0123456789012345678901234567890123456789012345678901234567890123456789"
+    "0123456789012345678901234567890123456789012345678901234567890123456789"
+    "0123456789012345678901234567890123456789012345678901234567890123456789"
+    "0123456789012345678901234567890123456789012345678901234567890123456789";
+
+}  // namespace
+
+TEST_F(ProcessUtilTest, LaunchProcess) {
+  const int no_clone_flags = 0;
+  const bool no_clear_environ = false;
+  const char kBaseTest[] = "BASE_TEST";
+  const std::vector<std::string> kPrintEnvCommand = {test_helper_path_.value(),
+                                                     "-e", kBaseTest};
+
+  EnvironmentMap env_changes;
+  env_changes[kBaseTest] = "bar";
+  EXPECT_EQ("bar", TestLaunchProcess(kPrintEnvCommand, env_changes,
+                                     no_clear_environ, no_clone_flags));
+  env_changes.clear();
+
+  EXPECT_EQ(0, setenv(kBaseTest, "testing", 1 /* override */));
+  EXPECT_EQ("testing", TestLaunchProcess(kPrintEnvCommand, env_changes,
+                                         no_clear_environ, no_clone_flags));
+
+  env_changes[kBaseTest] = std::string();
+  EXPECT_EQ("", TestLaunchProcess(kPrintEnvCommand, env_changes,
+                                  no_clear_environ, no_clone_flags));
+
+  env_changes[kBaseTest] = "foo";
+  EXPECT_EQ("foo", TestLaunchProcess(kPrintEnvCommand, env_changes,
+                                     no_clear_environ, no_clone_flags));
+
+  env_changes.clear();
+  EXPECT_EQ(0, setenv(kBaseTest, kLargeString, 1 /* override */));
+  EXPECT_EQ(std::string(kLargeString),
+            TestLaunchProcess(kPrintEnvCommand, env_changes, no_clear_environ,
+                              no_clone_flags));
+
+  env_changes[kBaseTest] = "wibble";
+  EXPECT_EQ("wibble", TestLaunchProcess(kPrintEnvCommand, env_changes,
+                                        no_clear_environ, no_clone_flags));
+
+#if defined(OS_LINUX)
+  // Test a non-trival value for clone_flags.
+  EXPECT_EQ("wibble", TestLaunchProcess(kPrintEnvCommand, env_changes,
+                                        no_clear_environ, CLONE_FS));
+
+  EXPECT_EQ("wibble",
+            TestLaunchProcess(kPrintEnvCommand, env_changes,
+                              true /* clear_environ */, no_clone_flags));
+  env_changes.clear();
+  EXPECT_EQ("", TestLaunchProcess(kPrintEnvCommand, env_changes,
+                                  true /* clear_environ */, no_clone_flags));
+#endif  // defined(OS_LINUX)
+}
+
+// There's no such thing as a parent process id on Fuchsia.
+#if !defined(OS_FUCHSIA)
+TEST_F(ProcessUtilTest, GetParentProcessId) {
+  ProcessId ppid = GetParentProcessId(GetCurrentProcessHandle());
+  EXPECT_EQ(ppid, static_cast<ProcessId>(getppid()));
+}
+#endif  // !defined(OS_FUCHSIA)
+
+#if !defined(OS_ANDROID) && !defined(OS_FUCHSIA)
+class WriteToPipeDelegate : public LaunchOptions::PreExecDelegate {
+ public:
+  explicit WriteToPipeDelegate(int fd) : fd_(fd) {}
+  ~WriteToPipeDelegate() override = default;
+  void RunAsyncSafe() override {
+    RAW_CHECK(HANDLE_EINTR(write(fd_, &kPipeValue, 1)) == 1);
+    RAW_CHECK(IGNORE_EINTR(close(fd_)) == 0);
+  }
+
+ private:
+  int fd_;
+  DISALLOW_COPY_AND_ASSIGN(WriteToPipeDelegate);
+};
+
+TEST_F(ProcessUtilTest, PreExecHook) {
+  int pipe_fds[2];
+  ASSERT_EQ(0, pipe(pipe_fds));
+
+  ScopedFD read_fd(pipe_fds[0]);
+  ScopedFD write_fd(pipe_fds[1]);
+
+  WriteToPipeDelegate write_to_pipe_delegate(write_fd.get());
+  LaunchOptions options;
+  options.fds_to_remap.emplace_back(write_fd.get(), write_fd.get());
+  options.pre_exec_delegate = &write_to_pipe_delegate;
+  Process process(SpawnChildWithOptions("SimpleChildProcess", options));
+  ASSERT_TRUE(process.IsValid());
+
+  write_fd.reset();
+  char c;
+  ASSERT_EQ(1, HANDLE_EINTR(read(read_fd.get(), &c, 1)));
+  EXPECT_EQ(c, kPipeValue);
+
+  int exit_code = 42;
+  EXPECT_TRUE(process.WaitForExit(&exit_code));
+  EXPECT_EQ(0, exit_code);
+}
+#endif  // !defined(OS_ANDROID) && !defined(OS_FUCHSIA)
+
+#endif  // !defined(OS_MACOSX) && !defined(OS_ANDROID)
+
+#if defined(OS_LINUX)
+MULTIPROCESS_TEST_MAIN(CheckPidProcess) {
+  const pid_t kInitPid = 1;
+  const pid_t pid = syscall(__NR_getpid);
+  CHECK(pid == kInitPid);
+  CHECK(getpid() == pid);
+  return kSuccess;
+}
+
+#if defined(CLONE_NEWUSER) && defined(CLONE_NEWPID)
+TEST_F(ProcessUtilTest, CloneFlags) {
+  if (!PathExists(FilePath("/proc/self/ns/user")) ||
+      !PathExists(FilePath("/proc/self/ns/pid"))) {
+    // User or PID namespaces are not supported.
+    return;
+  }
+
+  LaunchOptions options;
+  options.clone_flags = CLONE_NEWUSER | CLONE_NEWPID;
+
+  Process process(SpawnChildWithOptions("CheckPidProcess", options));
+  ASSERT_TRUE(process.IsValid());
+
+  int exit_code = 42;
+  EXPECT_TRUE(process.WaitForExit(&exit_code));
+  EXPECT_EQ(kSuccess, exit_code);
+}
+#endif  // defined(CLONE_NEWUSER) && defined(CLONE_NEWPID)
+
+TEST(ForkWithFlagsTest, UpdatesPidCache) {
+  // Warm up the libc pid cache, if there is one.
+  ASSERT_EQ(syscall(__NR_getpid), getpid());
+
+  pid_t ctid = 0;
+  const pid_t pid = ForkWithFlags(SIGCHLD | CLONE_CHILD_SETTID, nullptr, &ctid);
+  if (pid == 0) {
+    // In child.  Check both the raw getpid syscall and the libc getpid wrapper
+    // (which may rely on a pid cache).
+    RAW_CHECK(syscall(__NR_getpid) == ctid);
+    RAW_CHECK(getpid() == ctid);
+    _exit(kSuccess);
+  }
+
+  ASSERT_NE(-1, pid);
+  int status = 42;
+  ASSERT_EQ(pid, HANDLE_EINTR(waitpid(pid, &status, 0)));
+  ASSERT_TRUE(WIFEXITED(status));
+  EXPECT_EQ(kSuccess, WEXITSTATUS(status));
+}
+
+TEST_F(ProcessUtilTest, InvalidCurrentDirectory) {
+  LaunchOptions options;
+  options.current_directory = FilePath("/dev/null");
+
+  Process process(SpawnChildWithOptions("SimpleChildProcess", options));
+  ASSERT_TRUE(process.IsValid());
+
+  int exit_code = kSuccess;
+  EXPECT_TRUE(process.WaitForExit(&exit_code));
+  EXPECT_NE(kSuccess, exit_code);
+}
+#endif  // defined(OS_LINUX)
+
+}  // namespace base
diff --git a/base/process/process_win.cc b/base/process/process_win.cc
new file mode 100644
index 0000000..a2e614c
--- /dev/null
+++ b/base/process/process_win.cc
@@ -0,0 +1,225 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/process/process.h"
+
+#include "base/debug/activity_tracker.h"
+#include "base/logging.h"
+#include "base/numerics/safe_conversions.h"
+#include "base/process/kill.h"
+#include "base/threading/thread_restrictions.h"
+
+#include <windows.h>
+
+namespace {
+
+DWORD kBasicProcessAccess =
+  PROCESS_TERMINATE | PROCESS_QUERY_INFORMATION | SYNCHRONIZE;
+
+} // namespace
+
+namespace base {
+
+Process::Process(ProcessHandle handle)
+    : process_(handle), is_current_process_(false) {
+  CHECK_NE(handle, ::GetCurrentProcess());
+}
+
+Process::Process(Process&& other)
+    : process_(other.process_.Take()),
+      is_current_process_(other.is_current_process_) {
+  other.Close();
+}
+
+Process::~Process() {
+}
+
+Process& Process::operator=(Process&& other) {
+  DCHECK_NE(this, &other);
+  process_.Set(other.process_.Take());
+  is_current_process_ = other.is_current_process_;
+  other.Close();
+  return *this;
+}
+
+// static
+Process Process::Current() {
+  Process process;
+  process.is_current_process_ = true;
+  return process;
+}
+
+// static
+Process Process::Open(ProcessId pid) {
+  return Process(::OpenProcess(kBasicProcessAccess, FALSE, pid));
+}
+
+// static
+Process Process::OpenWithExtraPrivileges(ProcessId pid) {
+  DWORD access = kBasicProcessAccess | PROCESS_DUP_HANDLE | PROCESS_VM_READ;
+  return Process(::OpenProcess(access, FALSE, pid));
+}
+
+// static
+Process Process::OpenWithAccess(ProcessId pid, DWORD desired_access) {
+  return Process(::OpenProcess(desired_access, FALSE, pid));
+}
+
+// static
+Process Process::DeprecatedGetProcessFromHandle(ProcessHandle handle) {
+  DCHECK_NE(handle, ::GetCurrentProcess());
+  ProcessHandle out_handle;
+  if (!::DuplicateHandle(GetCurrentProcess(), handle,
+                         GetCurrentProcess(), &out_handle,
+                         0, FALSE, DUPLICATE_SAME_ACCESS)) {
+    return Process();
+  }
+  return Process(out_handle);
+}
+
+// static
+bool Process::CanBackgroundProcesses() {
+  return true;
+}
+
+// static
+void Process::TerminateCurrentProcessImmediately(int exit_code) {
+  ::TerminateProcess(GetCurrentProcess(), exit_code);
+  // There is some ambiguity over whether the call above can return. Rather than
+  // hitting confusing crashes later on we should crash right here.
+  IMMEDIATE_CRASH();
+}
+
+bool Process::IsValid() const {
+  return process_.IsValid() || is_current();
+}
+
+ProcessHandle Process::Handle() const {
+  return is_current_process_ ? GetCurrentProcess() : process_.Get();
+}
+
+Process Process::Duplicate() const {
+  if (is_current())
+    return Current();
+
+  ProcessHandle out_handle;
+  if (!IsValid() || !::DuplicateHandle(GetCurrentProcess(),
+                                       Handle(),
+                                       GetCurrentProcess(),
+                                       &out_handle,
+                                       0,
+                                       FALSE,
+                                       DUPLICATE_SAME_ACCESS)) {
+    return Process();
+  }
+  return Process(out_handle);
+}
+
+ProcessId Process::Pid() const {
+  DCHECK(IsValid());
+  return GetProcId(Handle());
+}
+
+bool Process::is_current() const {
+  return is_current_process_;
+}
+
+void Process::Close() {
+  is_current_process_ = false;
+  if (!process_.IsValid())
+    return;
+
+  process_.Close();
+}
+
+bool Process::Terminate(int exit_code, bool wait) const {
+  constexpr DWORD kWaitMs = 60 * 1000;
+
+  // exit_code cannot be implemented.
+  DCHECK(IsValid());
+  bool result = (::TerminateProcess(Handle(), exit_code) != FALSE);
+  if (result) {
+    // The process may not end immediately due to pending I/O
+    if (wait && ::WaitForSingleObject(Handle(), kWaitMs) != WAIT_OBJECT_0)
+      DPLOG(ERROR) << "Error waiting for process exit";
+    Exited(exit_code);
+  } else {
+    // The process can't be terminated, perhaps because it has already
+    // exited or is in the process of exiting. A non-zero timeout is necessary
+    // here for the same reasons as above.
+    DPLOG(ERROR) << "Unable to terminate process";
+    if (::WaitForSingleObject(Handle(), kWaitMs) == WAIT_OBJECT_0) {
+      DWORD actual_exit;
+      Exited(::GetExitCodeProcess(Handle(), &actual_exit) ? actual_exit
+                                                          : exit_code);
+      result = true;
+    }
+  }
+  return result;
+}
+
+bool Process::WaitForExit(int* exit_code) const {
+  return WaitForExitWithTimeout(TimeDelta::FromMilliseconds(INFINITE),
+                                exit_code);
+}
+
+bool Process::WaitForExitWithTimeout(TimeDelta timeout, int* exit_code) const {
+  if (!timeout.is_zero())
+    internal::AssertBaseSyncPrimitivesAllowed();
+
+  // Record the event that this thread is blocking upon (for hang diagnosis).
+  base::debug::ScopedProcessWaitActivity process_activity(this);
+
+  // Limit timeout to INFINITE.
+  DWORD timeout_ms = saturated_cast<DWORD>(timeout.InMilliseconds());
+  if (::WaitForSingleObject(Handle(), timeout_ms) != WAIT_OBJECT_0)
+    return false;
+
+  DWORD temp_code;  // Don't clobber out-parameters in case of failure.
+  if (!::GetExitCodeProcess(Handle(), &temp_code))
+    return false;
+
+  if (exit_code)
+    *exit_code = temp_code;
+
+  Exited(temp_code);
+  return true;
+}
+
+void Process::Exited(int exit_code) const {
+  base::debug::GlobalActivityTracker::RecordProcessExitIfEnabled(Pid(),
+                                                                 exit_code);
+}
+
+bool Process::IsProcessBackgrounded() const {
+  DCHECK(IsValid());
+  DWORD priority = GetPriority();
+  if (priority == 0)
+    return false;  // Failure case.
+  return ((priority == BELOW_NORMAL_PRIORITY_CLASS) ||
+          (priority == IDLE_PRIORITY_CLASS));
+}
+
+bool Process::SetProcessBackgrounded(bool value) {
+  DCHECK(IsValid());
+  // Vista and above introduce a real background mode, which not only
+  // sets the priority class on the threads but also on the IO generated
+  // by it. Unfortunately it can only be set for the calling process.
+  DWORD priority;
+  if (is_current()) {
+    priority = value ? PROCESS_MODE_BACKGROUND_BEGIN :
+                       PROCESS_MODE_BACKGROUND_END;
+  } else {
+    priority = value ? IDLE_PRIORITY_CLASS : NORMAL_PRIORITY_CLASS;
+  }
+
+  return (::SetPriorityClass(Handle(), priority) != 0);
+}
+
+int Process::GetPriority() const {
+  DCHECK(IsValid());
+  return ::GetPriorityClass(Handle());
+}
+
+}  // namespace base
diff --git a/base/profiler/OWNERS b/base/profiler/OWNERS
new file mode 100644
index 0000000..81ff9fa
--- /dev/null
+++ b/base/profiler/OWNERS
@@ -0,0 +1,5 @@
+# Stack sampling profiler
+per-file native_stack_sampler*=wittman@chromium.org
+per-file stack_sampling_profiler*=wittman@chromium.org
+per-file test_support_library*=wittman@chromium.org
+per-file win32_stack_frame_unwinder*=wittman@chromium.org
diff --git a/base/profiler/native_stack_sampler.cc b/base/profiler/native_stack_sampler.cc
new file mode 100644
index 0000000..6eed54f
--- /dev/null
+++ b/base/profiler/native_stack_sampler.cc
@@ -0,0 +1,34 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/profiler/native_stack_sampler.h"
+
+#include "base/memory/ptr_util.h"
+
+namespace base {
+
+NativeStackSampler::StackBuffer::StackBuffer(size_t buffer_size)
+    : buffer_(new uintptr_t[(buffer_size + sizeof(uintptr_t) - 1) /
+                            sizeof(uintptr_t)]),
+      size_(buffer_size) {}
+
+NativeStackSampler::StackBuffer::~StackBuffer() = default;
+
+NativeStackSampler::NativeStackSampler() = default;
+
+NativeStackSampler::~NativeStackSampler() = default;
+
+std::unique_ptr<NativeStackSampler::StackBuffer>
+NativeStackSampler::CreateStackBuffer() {
+  size_t size = GetStackBufferSize();
+  if (size == 0)
+    return nullptr;
+  return std::make_unique<StackBuffer>(size);
+}
+
+NativeStackSamplerTestDelegate::~NativeStackSamplerTestDelegate() = default;
+
+NativeStackSamplerTestDelegate::NativeStackSamplerTestDelegate() = default;
+
+}  // namespace base
diff --git a/base/profiler/native_stack_sampler.h b/base/profiler/native_stack_sampler.h
new file mode 100644
index 0000000..ebd7c3c
--- /dev/null
+++ b/base/profiler/native_stack_sampler.h
@@ -0,0 +1,112 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_PROFILER_NATIVE_STACK_SAMPLER_H_
+#define BASE_PROFILER_NATIVE_STACK_SAMPLER_H_
+
+#include <memory>
+
+#include "base/base_export.h"
+#include "base/macros.h"
+#include "base/profiler/stack_sampling_profiler.h"
+#include "base/threading/platform_thread.h"
+
+namespace base {
+
+class NativeStackSamplerTestDelegate;
+
+// NativeStackSampler is an implementation detail of StackSamplingProfiler. It
+// abstracts the native implementation required to record a stack sample for a
+// given thread.
+class NativeStackSampler {
+ public:
+  // This class contains a buffer for stack copies that can be shared across
+  // multiple instances of NativeStackSampler.
+  class StackBuffer {
+   public:
+    StackBuffer(size_t buffer_size);
+    ~StackBuffer();
+
+    void* buffer() const { return buffer_.get(); }
+    size_t size() const { return size_; }
+
+   private:
+    // The word-aligned buffer.
+    const std::unique_ptr<uintptr_t[]> buffer_;
+
+    // The size of the buffer.
+    const size_t size_;
+
+    DISALLOW_COPY_AND_ASSIGN(StackBuffer);
+  };
+
+  // The callback type used to add annotations to a sample during collection.
+  // This is passed to the native sampler to be applied at the most appropriate
+  // time. It is a simple function-pointer because the generated code must be
+  // completely predictable and do nothing that could acquire a mutex; a
+  // Callback object is code outside the control of this object and could,
+  // for example, acquire a mutex as part of allocating memory for a LOG
+  // message.
+  using AnnotateCallback = void (*)(StackSamplingProfiler::Sample*);
+
+  virtual ~NativeStackSampler();
+
+  // Creates a stack sampler that records samples for |thread_handle|. Returns
+  // null if this platform does not support stack sampling.
+  static std::unique_ptr<NativeStackSampler> Create(
+      PlatformThreadId thread_id,
+      AnnotateCallback annotator,
+      NativeStackSamplerTestDelegate* test_delegate);
+
+  // Gets the required size of the stack buffer.
+  static size_t GetStackBufferSize();
+
+  // Creates an instance of the a stack buffer that can be used for calls to
+  // any NativeStackSampler object.
+  static std::unique_ptr<StackBuffer> CreateStackBuffer();
+
+  // The following functions are all called on the SamplingThread (not the
+  // thread being sampled).
+
+  // Notifies the sampler that we're starting to record a new profile. Modules
+  // shared across samples in the profile should be recorded in |modules|.
+  virtual void ProfileRecordingStarting(
+      std::vector<StackSamplingProfiler::Module>* modules) = 0;
+
+  // Records a stack sample to |sample|.
+  virtual void RecordStackSample(StackBuffer* stackbuffer,
+                                 StackSamplingProfiler::Sample* sample) = 0;
+
+  // Notifies the sampler that we've stopped recording the current
+  // profile.
+  virtual void ProfileRecordingStopped(StackBuffer* stackbuffer) = 0;
+
+ protected:
+  NativeStackSampler();
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(NativeStackSampler);
+};
+
+// NativeStackSamplerTestDelegate provides seams for test code to execute during
+// stack collection.
+class BASE_EXPORT NativeStackSamplerTestDelegate {
+ public:
+  virtual ~NativeStackSamplerTestDelegate();
+
+  // Called after copying the stack and resuming the target thread, but prior to
+  // walking the stack. Invoked on the SamplingThread.
+  virtual void OnPreStackWalk() = 0;
+
+ protected:
+  NativeStackSamplerTestDelegate();
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(NativeStackSamplerTestDelegate);
+};
+
+}  // namespace base
+
+#endif  // BASE_PROFILER_NATIVE_STACK_SAMPLER_H_
+
diff --git a/base/profiler/native_stack_sampler_mac.cc b/base/profiler/native_stack_sampler_mac.cc
new file mode 100644
index 0000000..a161173
--- /dev/null
+++ b/base/profiler/native_stack_sampler_mac.cc
@@ -0,0 +1,666 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/profiler/native_stack_sampler.h"
+
+#include <dlfcn.h>
+#include <libkern/OSByteOrder.h>
+#include <libunwind.h>
+#include <mach-o/compact_unwind_encoding.h>
+#include <mach-o/getsect.h>
+#include <mach-o/swap.h>
+#include <mach/kern_return.h>
+#include <mach/mach.h>
+#include <mach/thread_act.h>
+#include <mach/vm_map.h>
+#include <pthread.h>
+#include <sys/resource.h>
+#include <sys/syslimits.h>
+
+#include <algorithm>
+#include <map>
+#include <memory>
+
+#include "base/logging.h"
+#include "base/mac/mach_logging.h"
+#include "base/macros.h"
+#include "base/memory/ptr_util.h"
+#include "base/strings/string_number_conversions.h"
+
+extern "C" {
+void _sigtramp(int, int, struct sigset*);
+}
+
+namespace base {
+
+namespace {
+
+// Maps a module's address range (half-open) in memory to an index in a separate
+// data structure.
+struct ModuleIndex {
+  ModuleIndex(uintptr_t start, uintptr_t end, size_t idx)
+      : base_address(start), end_address(end), index(idx){};
+  // Base address of the represented module.
+  uintptr_t base_address;
+  // First address off the end of the represented module.
+  uintptr_t end_address;
+  // An index to the represented module in a separate container.
+  size_t index;
+};
+
+// Module identifiers ---------------------------------------------------------
+
+// Returns the unique build ID for a module loaded at |module_addr|. Returns the
+// empty string if the function fails to get the build ID.
+//
+// Build IDs are created by the concatenation of the module's GUID (Windows) /
+// UUID (Mac) and an "age" field that indicates how many times that GUID/UUID
+// has been reused. In Windows binaries, the "age" field is present in the
+// module header, but on the Mac, UUIDs are never reused and so the "age" value
+// appended to the UUID is always 0.
+std::string GetUniqueId(const void* module_addr) {
+  const mach_header_64* mach_header =
+      reinterpret_cast<const mach_header_64*>(module_addr);
+  DCHECK_EQ(MH_MAGIC_64, mach_header->magic);
+
+  size_t offset = sizeof(mach_header_64);
+  size_t offset_limit = sizeof(mach_header_64) + mach_header->sizeofcmds;
+  for (uint32_t i = 0; (i < mach_header->ncmds) &&
+                       (offset + sizeof(load_command) < offset_limit);
+       ++i) {
+    const load_command* current_cmd = reinterpret_cast<const load_command*>(
+        reinterpret_cast<const uint8_t*>(mach_header) + offset);
+
+    if (offset + current_cmd->cmdsize > offset_limit) {
+      // This command runs off the end of the command list. This is malformed.
+      return std::string();
+    }
+
+    if (current_cmd->cmd == LC_UUID) {
+      if (current_cmd->cmdsize < sizeof(uuid_command)) {
+        // This "UUID command" is too small. This is malformed.
+        return std::string();
+      }
+
+      const uuid_command* uuid_cmd =
+          reinterpret_cast<const uuid_command*>(current_cmd);
+      static_assert(sizeof(uuid_cmd->uuid) == sizeof(uuid_t),
+                    "UUID field of UUID command should be 16 bytes.");
+      // The ID is comprised of the UUID concatenated with the Mac's "age" value
+      // which is always 0.
+      return HexEncode(&uuid_cmd->uuid, sizeof(uuid_cmd->uuid)) + "0";
+    }
+    offset += current_cmd->cmdsize;
+  }
+  return std::string();
+}
+
+// Returns the size of the _TEXT segment of the module loaded at |module_addr|.
+size_t GetModuleTextSize(const void* module_addr) {
+  const mach_header_64* mach_header =
+      reinterpret_cast<const mach_header_64*>(module_addr);
+  DCHECK_EQ(MH_MAGIC_64, mach_header->magic);
+
+  unsigned long module_size;
+  getsegmentdata(mach_header, SEG_TEXT, &module_size);
+
+  return module_size;
+}
+
+// Gets the index for the Module containing |instruction_pointer| in
+// |modules|, adding it if it's not already present. Returns
+// StackSamplingProfiler::Frame::kUnknownModuleIndex if no Module can be
+// determined for |module|.
+size_t GetModuleIndex(const uintptr_t instruction_pointer,
+                      std::vector<StackSamplingProfiler::Module>* modules,
+                      std::vector<ModuleIndex>* profile_module_index) {
+  // Check if |instruction_pointer| is in the address range of a module we've
+  // already seen.
+  auto module_index =
+      std::find_if(profile_module_index->begin(), profile_module_index->end(),
+                   [instruction_pointer](const ModuleIndex& index) {
+                     return instruction_pointer >= index.base_address &&
+                            instruction_pointer < index.end_address;
+                   });
+  if (module_index != profile_module_index->end()) {
+    return module_index->index;
+  }
+  Dl_info inf;
+  if (!dladdr(reinterpret_cast<const void*>(instruction_pointer), &inf))
+    return StackSamplingProfiler::Frame::kUnknownModuleIndex;
+
+  StackSamplingProfiler::Module module(
+      reinterpret_cast<uintptr_t>(inf.dli_fbase), GetUniqueId(inf.dli_fbase),
+      base::FilePath(inf.dli_fname));
+  modules->push_back(module);
+
+  uintptr_t base_module_address = reinterpret_cast<uintptr_t>(inf.dli_fbase);
+  size_t index = modules->size() - 1;
+  profile_module_index->emplace_back(
+      base_module_address,
+      base_module_address + GetModuleTextSize(inf.dli_fbase), index);
+  return index;
+}
+
+// Stack walking --------------------------------------------------------------
+
+// Fills |state| with |target_thread|'s context.
+//
+// Note that this is called while a thread is suspended. Make very very sure
+// that no shared resources (e.g. memory allocators) are used for the duration
+// of this function.
+bool GetThreadState(thread_act_t target_thread, x86_thread_state64_t* state) {
+  mach_msg_type_number_t count =
+      static_cast<mach_msg_type_number_t>(x86_THREAD_STATE64_COUNT);
+  return thread_get_state(target_thread, x86_THREAD_STATE64,
+                          reinterpret_cast<thread_state_t>(state),
+                          &count) == KERN_SUCCESS;
+}
+
+// If the value at |pointer| points to the original stack, rewrites it to point
+// to the corresponding location in the copied stack.
+//
+// Note that this is called while a thread is suspended. Make very very sure
+// that no shared resources (e.g. memory allocators) are used for the duration
+// of this function.
+uintptr_t RewritePointerIfInOriginalStack(
+    const uintptr_t* original_stack_bottom,
+    const uintptr_t* original_stack_top,
+    uintptr_t* stack_copy_bottom,
+    uintptr_t pointer) {
+  uintptr_t original_stack_bottom_int =
+      reinterpret_cast<uintptr_t>(original_stack_bottom);
+  uintptr_t original_stack_top_int =
+      reinterpret_cast<uintptr_t>(original_stack_top);
+  uintptr_t stack_copy_bottom_int =
+      reinterpret_cast<uintptr_t>(stack_copy_bottom);
+
+  if ((pointer < original_stack_bottom_int) ||
+      (pointer >= original_stack_top_int)) {
+    return pointer;
+  }
+
+  return stack_copy_bottom_int + (pointer - original_stack_bottom_int);
+}
+
+// Copies the stack to a buffer while rewriting possible pointers to locations
+// within the stack to point to the corresponding locations in the copy. This is
+// necessary to handle stack frames with dynamic stack allocation, where a
+// pointer to the beginning of the dynamic allocation area is stored on the
+// stack and/or in a non-volatile register.
+//
+// Eager rewriting of anything that looks like a pointer to the stack, as done
+// in this function, does not adversely affect the stack unwinding. The only
+// other values on the stack the unwinding depends on are return addresses,
+// which should not point within the stack memory. The rewriting is guaranteed
+// to catch all pointers because the stacks are guaranteed by the ABI to be
+// sizeof(void*) aligned.
+//
+// Note that this is called while a thread is suspended. Make very very sure
+// that no shared resources (e.g. memory allocators) are used for the duration
+// of this function.
+void CopyStackAndRewritePointers(uintptr_t* stack_copy_bottom,
+                                 const uintptr_t* original_stack_bottom,
+                                 const uintptr_t* original_stack_top,
+                                 x86_thread_state64_t* thread_state)
+    NO_SANITIZE("address") {
+  size_t count = original_stack_top - original_stack_bottom;
+  for (size_t pos = 0; pos < count; ++pos) {
+    stack_copy_bottom[pos] = RewritePointerIfInOriginalStack(
+        original_stack_bottom, original_stack_top, stack_copy_bottom,
+        original_stack_bottom[pos]);
+  }
+
+  uint64_t* rewrite_registers[] = {&thread_state->__rbx, &thread_state->__rbp,
+                                   &thread_state->__rsp, &thread_state->__r12,
+                                   &thread_state->__r13, &thread_state->__r14,
+                                   &thread_state->__r15};
+  for (auto* reg : rewrite_registers) {
+    *reg = RewritePointerIfInOriginalStack(
+        original_stack_bottom, original_stack_top, stack_copy_bottom, *reg);
+  }
+}
+
+// Extracts the "frame offset" for a given frame from the compact unwind info.
+// A frame offset indicates the location of saved non-volatile registers in
+// relation to the frame pointer. See |mach-o/compact_unwind_encoding.h| for
+// details.
+uint32_t GetFrameOffset(int compact_unwind_info) {
+  // The frame offset lives in bytes 16-23. This shifts it down by the number of
+  // leading zeroes in the mask, then masks with (1 << number of one bits in the
+  // mask) - 1, turning 0x00FF0000 into 0x000000FF. Adapted from |EXTRACT_BITS|
+  // in libunwind's CompactUnwinder.hpp.
+  return (
+      (compact_unwind_info >> __builtin_ctz(UNWIND_X86_64_RBP_FRAME_OFFSET)) &
+      (((1 << __builtin_popcount(UNWIND_X86_64_RBP_FRAME_OFFSET))) - 1));
+}
+
+// True if the unwind from |leaf_frame_rip| may trigger a crash bug in
+// unw_init_local. If so, the stack walk should be aborted at the leaf frame.
+bool MayTriggerUnwInitLocalCrash(uint64_t leaf_frame_rip) {
+  // The issue here is a bug in unw_init_local that, in some unwinds, results in
+  // attempts to access memory at the address immediately following the address
+  // range of the library. When the library is the last of the mapped libraries
+  // that address is in a different memory region. Starting with 10.13.4 beta
+  // releases it appears that this region is sometimes either unmapped or mapped
+  // without read access, resulting in crashes on the attempted access. It's not
+  // clear what circumstances result in this situation; attempts to reproduce on
+  // a 10.13.4 beta did not trigger the issue.
+  //
+  // The workaround is to check if the memory address that would be accessed is
+  // readable, and if not, abort the stack walk before calling unw_init_local.
+  // As of 2018/03/19 about 0.1% of non-idle stacks on the UI and GPU main
+  // threads have a leaf frame in the last library. Since the issue appears to
+  // only occur some of the time it's expected that the quantity of lost samples
+  // will be lower than 0.1%, possibly significantly lower.
+  //
+  // TODO(lgrey): Add references above to LLVM/Radar bugs on unw_init_local once
+  // filed.
+  Dl_info info;
+  if (dladdr(reinterpret_cast<const void*>(leaf_frame_rip), &info) == 0)
+    return false;
+  uint64_t unused;
+  vm_size_t size = sizeof(unused);
+  return vm_read_overwrite(current_task(),
+                           reinterpret_cast<vm_address_t>(info.dli_fbase) +
+                               GetModuleTextSize(info.dli_fbase),
+                           sizeof(unused),
+                           reinterpret_cast<vm_address_t>(&unused), &size) != 0;
+}
+
+// Check if the cursor contains a valid-looking frame pointer for frame pointer
+// unwinds. If the stack frame has a frame pointer, stepping the cursor will
+// involve indexing memory access off of that pointer. In that case,
+// sanity-check the frame pointer register to ensure it's within bounds.
+//
+// Additionally, the stack frame might be in a prologue or epilogue, which can
+// cause a crash when the unwinder attempts to access non-volatile registers
+// that have not yet been pushed, or have already been popped from the
+// stack. libwunwind will try to restore those registers using an offset from
+// the frame pointer. However, since we copy the stack from RSP up, any
+// locations below the stack pointer are before the beginning of the stack
+// buffer. Account for this by checking that the expected location is above the
+// stack pointer, and rejecting the sample if it isn't.
+bool HasValidRbp(unw_cursor_t* unwind_cursor, uintptr_t stack_top) {
+  unw_proc_info_t proc_info;
+  unw_get_proc_info(unwind_cursor, &proc_info);
+  if ((proc_info.format & UNWIND_X86_64_MODE_MASK) ==
+      UNWIND_X86_64_MODE_RBP_FRAME) {
+    unw_word_t rsp, rbp;
+    unw_get_reg(unwind_cursor, UNW_X86_64_RSP, &rsp);
+    unw_get_reg(unwind_cursor, UNW_X86_64_RBP, &rbp);
+    uint32_t offset = GetFrameOffset(proc_info.format) * sizeof(unw_word_t);
+    if (rbp < offset || (rbp - offset) < rsp || rbp > stack_top) {
+      return false;
+    }
+  }
+  return true;
+}
+
+// Walks the stack represented by |unwind_context|, calling back to the provided
+// lambda for each frame. Returns false if an error occurred, otherwise returns
+// true.
+template <typename StackFrameCallback, typename ContinueUnwindPredicate>
+bool WalkStackFromContext(
+    unw_context_t* unwind_context,
+    size_t* frame_count,
+    std::vector<StackSamplingProfiler::Module>* current_modules,
+    std::vector<ModuleIndex>* profile_module_index,
+    const StackFrameCallback& callback,
+    const ContinueUnwindPredicate& continue_unwind) {
+  unw_cursor_t unwind_cursor;
+  unw_init_local(&unwind_cursor, unwind_context);
+
+  int step_result;
+  unw_word_t rip;
+  do {
+    ++(*frame_count);
+    unw_get_reg(&unwind_cursor, UNW_REG_IP, &rip);
+
+    // Ensure IP is in a module.
+    //
+    // Frameless unwinding (non-DWARF) works by fetching the function's
+    // stack size from the unwind encoding or stack, and adding it to the
+    // stack pointer to determine the function's return address.
+    //
+    // If we're in a function prologue or epilogue, the actual stack size
+    // may be smaller than it will be during the normal course of execution.
+    // When libunwind adds the expected stack size, it will look for the
+    // return address in the wrong place. This check should ensure that we
+    // bail before trying to deref a bad IP obtained this way in the previous
+    // frame.
+    size_t module_index =
+        GetModuleIndex(rip, current_modules, profile_module_index);
+    if (module_index == StackSamplingProfiler::Frame::kUnknownModuleIndex) {
+      return false;
+    }
+
+    callback(static_cast<uintptr_t>(rip), module_index);
+
+    if (!continue_unwind(&unwind_cursor))
+      return false;
+
+    step_result = unw_step(&unwind_cursor);
+  } while (step_result > 0);
+
+  if (step_result != 0)
+    return false;
+
+  return true;
+}
+
+const char* LibSystemKernelName() {
+  static char path[PATH_MAX];
+  static char* name = nullptr;
+  if (name)
+    return name;
+
+  Dl_info info;
+  dladdr(reinterpret_cast<void*>(_exit), &info);
+  strlcpy(path, info.dli_fname, PATH_MAX);
+  name = path;
+
+#if !defined(ADDRESS_SANITIZER)
+  DCHECK_EQ(std::string(name),
+            std::string("/usr/lib/system/libsystem_kernel.dylib"));
+#endif
+  return name;
+}
+
+void GetSigtrampRange(uintptr_t* start, uintptr_t* end) {
+  uintptr_t address = reinterpret_cast<uintptr_t>(&_sigtramp);
+  DCHECK(address != 0);
+
+  *start = address;
+
+  unw_context_t context;
+  unw_cursor_t cursor;
+  unw_proc_info_t info;
+
+  unw_getcontext(&context);
+  // Set the context's RIP to the beginning of sigtramp,
+  // +1 byte to work around a bug in 10.11 (crbug.com/764468).
+  context.data[16] = address + 1;
+  unw_init_local(&cursor, &context);
+  unw_get_proc_info(&cursor, &info);
+
+  DCHECK_EQ(info.start_ip, address);
+  *end = info.end_ip;
+}
+
+// Walks the stack represented by |thread_state|, calling back to the provided
+// lambda for each frame.
+template <typename StackFrameCallback, typename ContinueUnwindPredicate>
+void WalkStack(const x86_thread_state64_t& thread_state,
+               std::vector<StackSamplingProfiler::Module>* current_modules,
+               std::vector<ModuleIndex>* profile_module_index,
+               const StackFrameCallback& callback,
+               const ContinueUnwindPredicate& continue_unwind) {
+  size_t frame_count = 0;
+  // This uses libunwind to walk the stack. libunwind is designed to be used for
+  // a thread to walk its own stack. This creates two problems.
+
+  // Problem 1: There is no official way to create a unw_context other than to
+  // create it from the current state of the current thread's stack. To get
+  // around this, forge a context. A unw_context is just a copy of the 16 main
+  // registers followed by the instruction pointer, nothing more.
+  // Coincidentally, the first 17 items of the x86_thread_state64_t type are
+  // exactly those registers in exactly the same order, so just bulk copy them
+  // over.
+  unw_context_t unwind_context;
+  memcpy(&unwind_context, &thread_state, sizeof(uintptr_t) * 17);
+  bool result =
+      WalkStackFromContext(&unwind_context, &frame_count, current_modules,
+                           profile_module_index, callback, continue_unwind);
+
+  if (!result)
+    return;
+
+  if (frame_count == 1) {
+    // Problem 2: Because libunwind is designed to be triggered by user code on
+    // their own thread, if it hits a library that has no unwind info for the
+    // function that is being executed, it just stops. This isn't a problem in
+    // the normal case, but in this case, it's quite possible that the stack
+    // being walked is stopped in a function that bridges to the kernel and thus
+    // is missing the unwind info.
+
+    // For now, just unwind the single case where the thread is stopped in a
+    // function in libsystem_kernel.
+    uint64_t& rsp = unwind_context.data[7];
+    uint64_t& rip = unwind_context.data[16];
+    Dl_info info;
+    if (dladdr(reinterpret_cast<void*>(rip), &info) != 0 &&
+      strcmp(info.dli_fname, LibSystemKernelName()) == 0) {
+      rip = *reinterpret_cast<uint64_t*>(rsp);
+      rsp += 8;
+      WalkStackFromContext(&unwind_context, &frame_count, current_modules,
+                           profile_module_index, callback, continue_unwind);
+    }
+  }
+}
+
+// ScopedSuspendThread --------------------------------------------------------
+
+// Suspends a thread for the lifetime of the object.
+class ScopedSuspendThread {
+ public:
+  explicit ScopedSuspendThread(mach_port_t thread_port)
+      : thread_port_(thread_suspend(thread_port) == KERN_SUCCESS
+                         ? thread_port
+                         : MACH_PORT_NULL) {}
+
+  ~ScopedSuspendThread() {
+    if (!was_successful())
+      return;
+
+    kern_return_t kr = thread_resume(thread_port_);
+    MACH_CHECK(kr == KERN_SUCCESS, kr) << "thread_resume";
+  }
+
+  bool was_successful() const { return thread_port_ != MACH_PORT_NULL; }
+
+ private:
+  mach_port_t thread_port_;
+
+  DISALLOW_COPY_AND_ASSIGN(ScopedSuspendThread);
+};
+
+// NativeStackSamplerMac ------------------------------------------------------
+
+class NativeStackSamplerMac : public NativeStackSampler {
+ public:
+  NativeStackSamplerMac(mach_port_t thread_port,
+                        AnnotateCallback annotator,
+                        NativeStackSamplerTestDelegate* test_delegate);
+  ~NativeStackSamplerMac() override;
+
+  // StackSamplingProfiler::NativeStackSampler:
+  void ProfileRecordingStarting(
+      std::vector<StackSamplingProfiler::Module>* modules) override;
+  void RecordStackSample(StackBuffer* stack_buffer,
+                         StackSamplingProfiler::Sample* sample) override;
+  void ProfileRecordingStopped(StackBuffer* stack_buffer) override;
+
+ private:
+  // Suspends the thread with |thread_port_|, copies its stack and resumes the
+  // thread, then records the stack frames and associated modules into |sample|.
+  void SuspendThreadAndRecordStack(StackBuffer* stack_buffer,
+                                   StackSamplingProfiler::Sample* sample);
+
+  // Weak reference: Mach port for thread being profiled.
+  mach_port_t thread_port_;
+
+  const AnnotateCallback annotator_;
+
+  NativeStackSamplerTestDelegate* const test_delegate_;
+
+  // The stack base address corresponding to |thread_handle_|.
+  const void* const thread_stack_base_address_;
+
+  // Weak. Points to the modules associated with the profile being recorded
+  // between ProfileRecordingStarting() and ProfileRecordingStopped().
+  std::vector<StackSamplingProfiler::Module>* current_modules_ = nullptr;
+
+  // Maps a module's address range to the corresponding Module's index within
+  // current_modules_.
+  std::vector<ModuleIndex> profile_module_index_;
+
+  // The address range of |_sigtramp|, the signal trampoline function.
+  uintptr_t sigtramp_start_;
+  uintptr_t sigtramp_end_;
+
+  DISALLOW_COPY_AND_ASSIGN(NativeStackSamplerMac);
+};
+
+NativeStackSamplerMac::NativeStackSamplerMac(
+    mach_port_t thread_port,
+    AnnotateCallback annotator,
+    NativeStackSamplerTestDelegate* test_delegate)
+    : thread_port_(thread_port),
+      annotator_(annotator),
+      test_delegate_(test_delegate),
+      thread_stack_base_address_(
+          pthread_get_stackaddr_np(pthread_from_mach_thread_np(thread_port))) {
+  DCHECK(annotator_);
+
+  GetSigtrampRange(&sigtramp_start_, &sigtramp_end_);
+  // This class suspends threads, and those threads might be suspended in dyld.
+  // Therefore, for all the system functions that might be linked in dynamically
+  // that are used while threads are suspended, make calls to them to make sure
+  // that they are linked up.
+  x86_thread_state64_t thread_state;
+  GetThreadState(thread_port_, &thread_state);
+}
+
+NativeStackSamplerMac::~NativeStackSamplerMac() {}
+
+void NativeStackSamplerMac::ProfileRecordingStarting(
+    std::vector<StackSamplingProfiler::Module>* modules) {
+  current_modules_ = modules;
+  profile_module_index_.clear();
+}
+
+void NativeStackSamplerMac::RecordStackSample(
+    StackBuffer* stack_buffer,
+    StackSamplingProfiler::Sample* sample) {
+  DCHECK(current_modules_);
+
+  SuspendThreadAndRecordStack(stack_buffer, sample);
+}
+
+void NativeStackSamplerMac::ProfileRecordingStopped(StackBuffer* stack_buffer) {
+  current_modules_ = nullptr;
+}
+
+void NativeStackSamplerMac::SuspendThreadAndRecordStack(
+    StackBuffer* stack_buffer,
+    StackSamplingProfiler::Sample* sample) {
+  x86_thread_state64_t thread_state;
+
+  // Copy the stack.
+
+  uintptr_t new_stack_top = 0;
+  {
+    // IMPORTANT NOTE: Do not do ANYTHING in this in this scope that might
+    // allocate memory, including indirectly via use of DCHECK/CHECK or other
+    // logging statements. Otherwise this code can deadlock on heap locks in the
+    // default heap acquired by the target thread before it was suspended.
+    ScopedSuspendThread suspend_thread(thread_port_);
+    if (!suspend_thread.was_successful())
+      return;
+
+    if (!GetThreadState(thread_port_, &thread_state))
+      return;
+    uintptr_t stack_top =
+        reinterpret_cast<uintptr_t>(thread_stack_base_address_);
+    uintptr_t stack_bottom = thread_state.__rsp;
+    if (stack_bottom >= stack_top)
+      return;
+    uintptr_t stack_size = stack_top - stack_bottom;
+
+    if (stack_size > stack_buffer->size())
+      return;
+
+    (*annotator_)(sample);
+
+    CopyStackAndRewritePointers(
+        reinterpret_cast<uintptr_t*>(stack_buffer->buffer()),
+        reinterpret_cast<uintptr_t*>(stack_bottom),
+        reinterpret_cast<uintptr_t*>(stack_top), &thread_state);
+
+    new_stack_top =
+        reinterpret_cast<uintptr_t>(stack_buffer->buffer()) + stack_size;
+  }  // ScopedSuspendThread
+
+  if (test_delegate_)
+    test_delegate_->OnPreStackWalk();
+
+  // Walk the stack and record it.
+
+  // Reserve enough memory for most stacks, to avoid repeated allocations.
+  // Approximately 99.9% of recorded stacks are 128 frames or fewer.
+  sample->frames.reserve(128);
+
+  auto* current_modules = current_modules_;
+  auto* profile_module_index = &profile_module_index_;
+
+  // Avoid an out-of-bounds read bug in libunwind that can crash us in some
+  // circumstances. If we're subject to that case, just record the first frame
+  // and bail. See MayTriggerUnwInitLocalCrash for details.
+  uintptr_t rip = thread_state.__rip;
+  if (MayTriggerUnwInitLocalCrash(rip)) {
+    sample->frames.emplace_back(
+        rip, GetModuleIndex(rip, current_modules, profile_module_index));
+    return;
+  }
+
+  const auto continue_predicate = [this,
+                                   new_stack_top](unw_cursor_t* unwind_cursor) {
+    // Don't continue if we're in sigtramp. Unwinding this from another thread
+    // is very fragile. It's a complex DWARF unwind that needs to restore the
+    // entire thread context which was saved by the kernel when the interrupt
+    // occurred.
+    unw_word_t rip;
+    unw_get_reg(unwind_cursor, UNW_REG_IP, &rip);
+    if (rip >= sigtramp_start_ && rip < sigtramp_end_)
+      return false;
+
+    // Don't continue if rbp appears to be invalid (due to a previous bad
+    // unwind).
+    return HasValidRbp(unwind_cursor, new_stack_top);
+  };
+
+  WalkStack(thread_state, current_modules, profile_module_index,
+            [sample, current_modules, profile_module_index](
+                uintptr_t frame_ip, size_t module_index) {
+              sample->frames.emplace_back(frame_ip, module_index);
+            },
+            continue_predicate);
+}
+
+}  // namespace
+
+std::unique_ptr<NativeStackSampler> NativeStackSampler::Create(
+    PlatformThreadId thread_id,
+    AnnotateCallback annotator,
+    NativeStackSamplerTestDelegate* test_delegate) {
+  return std::make_unique<NativeStackSamplerMac>(thread_id, annotator,
+                                                 test_delegate);
+}
+
+size_t NativeStackSampler::GetStackBufferSize() {
+  // In platform_thread_mac's GetDefaultThreadStackSize(), RLIMIT_STACK is used
+  // for all stacks, not just the main thread's, so it is good for use here.
+  struct rlimit stack_rlimit;
+  if (getrlimit(RLIMIT_STACK, &stack_rlimit) == 0 &&
+      stack_rlimit.rlim_cur != RLIM_INFINITY) {
+    return stack_rlimit.rlim_cur;
+  }
+
+  // If getrlimit somehow fails, return the default macOS main thread stack size
+  // of 8 MB (DFLSSIZ in <i386/vmparam.h>) with extra wiggle room.
+  return 12 * 1024 * 1024;
+}
+
+}  // namespace base
diff --git a/base/profiler/native_stack_sampler_posix.cc b/base/profiler/native_stack_sampler_posix.cc
new file mode 100644
index 0000000..1055d44
--- /dev/null
+++ b/base/profiler/native_stack_sampler_posix.cc
@@ -0,0 +1,20 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/profiler/native_stack_sampler.h"
+
+namespace base {
+
+std::unique_ptr<NativeStackSampler> NativeStackSampler::Create(
+    PlatformThreadId thread_id,
+    AnnotateCallback annotator,
+    NativeStackSamplerTestDelegate* test_delegate) {
+  return std::unique_ptr<NativeStackSampler>();
+}
+
+size_t NativeStackSampler::GetStackBufferSize() {
+  return 0;
+}
+
+}  // namespace base
diff --git a/base/profiler/native_stack_sampler_win.cc b/base/profiler/native_stack_sampler_win.cc
new file mode 100644
index 0000000..b53197d
--- /dev/null
+++ b/base/profiler/native_stack_sampler_win.cc
@@ -0,0 +1,562 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/profiler/native_stack_sampler.h"
+
+#include <objbase.h>
+#include <windows.h>
+#include <stddef.h>
+#include <winternl.h>
+
+#include <cstdlib>
+#include <map>
+#include <memory>
+#include <utility>
+#include <vector>
+
+#include "base/lazy_instance.h"
+#include "base/logging.h"
+#include "base/macros.h"
+#include "base/memory/ptr_util.h"
+#include "base/profiler/win32_stack_frame_unwinder.h"
+#include "base/strings/string_util.h"
+#include "base/strings/stringprintf.h"
+#include "base/strings/utf_string_conversions.h"
+#include "base/time/time.h"
+#include "base/win/pe_image.h"
+#include "base/win/scoped_handle.h"
+
+namespace base {
+
+// Stack recording functions --------------------------------------------------
+
+namespace {
+
+// The thread environment block internal type.
+struct TEB {
+  NT_TIB Tib;
+  // Rest of struct is ignored.
+};
+
+// Returns the thread environment block pointer for |thread_handle|.
+const TEB* GetThreadEnvironmentBlock(HANDLE thread_handle) {
+  // Define the internal types we need to invoke NtQueryInformationThread.
+  enum THREAD_INFORMATION_CLASS { ThreadBasicInformation };
+
+  struct CLIENT_ID {
+    HANDLE UniqueProcess;
+    HANDLE UniqueThread;
+  };
+
+  struct THREAD_BASIC_INFORMATION {
+    NTSTATUS ExitStatus;
+    TEB* Teb;
+    CLIENT_ID ClientId;
+    KAFFINITY AffinityMask;
+    LONG Priority;
+    LONG BasePriority;
+  };
+
+  using NtQueryInformationThreadFunction =
+      NTSTATUS (WINAPI*)(HANDLE, THREAD_INFORMATION_CLASS, PVOID, ULONG,
+                         PULONG);
+
+  const NtQueryInformationThreadFunction nt_query_information_thread =
+      reinterpret_cast<NtQueryInformationThreadFunction>(
+          ::GetProcAddress(::GetModuleHandle(L"ntdll.dll"),
+                           "NtQueryInformationThread"));
+  if (!nt_query_information_thread)
+    return nullptr;
+
+  THREAD_BASIC_INFORMATION basic_info = {0};
+  NTSTATUS status =
+      nt_query_information_thread(thread_handle, ThreadBasicInformation,
+                                  &basic_info, sizeof(THREAD_BASIC_INFORMATION),
+                                  nullptr);
+  if (status != 0)
+    return nullptr;
+
+  return basic_info.Teb;
+}
+
+#if defined(_WIN64)
+// If the value at |pointer| points to the original stack, rewrite it to point
+// to the corresponding location in the copied stack.
+void RewritePointerIfInOriginalStack(uintptr_t top, uintptr_t bottom,
+                                     void* stack_copy, const void** pointer) {
+  const uintptr_t value = reinterpret_cast<uintptr_t>(*pointer);
+  if (value >= bottom && value < top) {
+    *pointer = reinterpret_cast<const void*>(
+        static_cast<unsigned char*>(stack_copy) + (value - bottom));
+  }
+}
+#endif
+
+void CopyMemoryFromStack(void* to, const void* from, size_t length)
+    NO_SANITIZE("address") {
+#if defined(ADDRESS_SANITIZER)
+  // The following loop is an inlined version of memcpy. The code must be
+  // inlined to avoid instrumentation when using ASAN (memory sanitizer). The
+  // stack profiler is generating false positive when walking the stack.
+  for (size_t pos = 0; pos < length; ++pos)
+    reinterpret_cast<char*>(to)[pos] = reinterpret_cast<const char*>(from)[pos];
+#else
+  std::memcpy(to, from, length);
+#endif
+}
+
+// Rewrites possible pointers to locations within the stack to point to the
+// corresponding locations in the copy, and rewrites the non-volatile registers
+// in |context| likewise. This is necessary to handle stack frames with dynamic
+// stack allocation, where a pointer to the beginning of the dynamic allocation
+// area is stored on the stack and/or in a non-volatile register.
+//
+// Eager rewriting of anything that looks like a pointer to the stack, as done
+// in this function, does not adversely affect the stack unwinding. The only
+// other values on the stack the unwinding depends on are return addresses,
+// which should not point within the stack memory. The rewriting is guaranteed
+// to catch all pointers because the stacks are guaranteed by the ABI to be
+// sizeof(void*) aligned.
+//
+// Note: this function must not access memory in the original stack as it may
+// have been changed or deallocated by this point. This is why |top| and
+// |bottom| are passed as uintptr_t.
+void RewritePointersToStackMemory(uintptr_t top, uintptr_t bottom,
+                                  CONTEXT* context, void* stack_copy) {
+#if defined(_WIN64)
+  DWORD64 CONTEXT::* const nonvolatile_registers[] = {
+    &CONTEXT::R12,
+    &CONTEXT::R13,
+    &CONTEXT::R14,
+    &CONTEXT::R15,
+    &CONTEXT::Rdi,
+    &CONTEXT::Rsi,
+    &CONTEXT::Rbx,
+    &CONTEXT::Rbp,
+    &CONTEXT::Rsp
+  };
+
+  // Rewrite pointers in the context.
+  for (size_t i = 0; i < arraysize(nonvolatile_registers); ++i) {
+    DWORD64* const reg = &(context->*nonvolatile_registers[i]);
+    RewritePointerIfInOriginalStack(top, bottom, stack_copy,
+                                    reinterpret_cast<const void**>(reg));
+  }
+
+  // Rewrite pointers on the stack.
+  const void** start = reinterpret_cast<const void**>(stack_copy);
+  const void** end = reinterpret_cast<const void**>(
+      reinterpret_cast<char*>(stack_copy) + (top - bottom));
+  for (const void** loc = start; loc < end; ++loc)
+    RewritePointerIfInOriginalStack(top, bottom, stack_copy, loc);
+#endif
+}
+
+// Movable type representing a recorded stack frame.
+struct RecordedFrame {
+  RecordedFrame() {}
+
+  RecordedFrame(RecordedFrame&& other)
+      : instruction_pointer(other.instruction_pointer),
+        module(std::move(other.module)) {
+  }
+
+  RecordedFrame& operator=(RecordedFrame&& other) {
+    instruction_pointer = other.instruction_pointer;
+    module = std::move(other.module);
+    return *this;
+  }
+
+  const void* instruction_pointer;
+  ScopedModuleHandle module;
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(RecordedFrame);
+};
+
+// Walks the stack represented by |context| from the current frame downwards,
+// recording the instruction pointer and associated module for each frame in
+// |stack|.
+void RecordStack(CONTEXT* context, std::vector<RecordedFrame>* stack) {
+#ifdef _WIN64
+  DCHECK(stack->empty());
+
+  // Reserve enough memory for most stacks, to avoid repeated
+  // allocations. Approximately 99.9% of recorded stacks are 128 frames or
+  // fewer.
+  stack->reserve(128);
+
+  Win32StackFrameUnwinder frame_unwinder;
+  while (context->Rip) {
+    const void* instruction_pointer =
+        reinterpret_cast<const void*>(context->Rip);
+    ScopedModuleHandle module;
+    if (!frame_unwinder.TryUnwind(context, &module))
+      return;
+    RecordedFrame frame;
+    frame.instruction_pointer = instruction_pointer;
+    frame.module = std::move(module);
+    stack->push_back(std::move(frame));
+  }
+#endif
+}
+
+// Gets the unique build ID for a module. Windows build IDs are created by a
+// concatenation of a GUID and AGE fields found in the headers of a module. The
+// GUID is stored in the first 16 bytes and the AGE is stored in the last 4
+// bytes. Returns the empty string if the function fails to get the build ID.
+//
+// Example:
+// dumpbin chrome.exe /headers | find "Format:"
+//   ... Format: RSDS, {16B2A428-1DED-442E-9A36-FCE8CBD29726}, 10, ...
+//
+// The resulting buildID string of this instance of chrome.exe is
+// "16B2A4281DED442E9A36FCE8CBD2972610".
+//
+// Note that the AGE field is encoded in decimal, not hex.
+std::string GetBuildIDForModule(HMODULE module_handle) {
+  GUID guid;
+  DWORD age;
+  win::PEImage(module_handle).GetDebugId(&guid, &age, /* pdb_file= */ nullptr);
+  const int kGUIDSize = 39;
+  std::wstring build_id;
+  int result =
+      ::StringFromGUID2(guid, WriteInto(&build_id, kGUIDSize), kGUIDSize);
+  if (result != kGUIDSize)
+    return std::string();
+  RemoveChars(build_id, L"{}-", &build_id);
+  build_id += StringPrintf(L"%d", age);
+  return WideToUTF8(build_id);
+}
+
+// ScopedDisablePriorityBoost -------------------------------------------------
+
+// Disables priority boost on a thread for the lifetime of the object.
+class ScopedDisablePriorityBoost {
+ public:
+  ScopedDisablePriorityBoost(HANDLE thread_handle);
+  ~ScopedDisablePriorityBoost();
+
+ private:
+  HANDLE thread_handle_;
+  BOOL got_previous_boost_state_;
+  BOOL boost_state_was_disabled_;
+
+  DISALLOW_COPY_AND_ASSIGN(ScopedDisablePriorityBoost);
+};
+
+ScopedDisablePriorityBoost::ScopedDisablePriorityBoost(HANDLE thread_handle)
+    : thread_handle_(thread_handle),
+      got_previous_boost_state_(false),
+      boost_state_was_disabled_(false) {
+  got_previous_boost_state_ =
+      ::GetThreadPriorityBoost(thread_handle_, &boost_state_was_disabled_);
+  if (got_previous_boost_state_) {
+    // Confusingly, TRUE disables priority boost.
+    ::SetThreadPriorityBoost(thread_handle_, TRUE);
+  }
+}
+
+ScopedDisablePriorityBoost::~ScopedDisablePriorityBoost() {
+  if (got_previous_boost_state_)
+    ::SetThreadPriorityBoost(thread_handle_, boost_state_was_disabled_);
+}
+
+// ScopedSuspendThread --------------------------------------------------------
+
+// Suspends a thread for the lifetime of the object.
+class ScopedSuspendThread {
+ public:
+  ScopedSuspendThread(HANDLE thread_handle);
+  ~ScopedSuspendThread();
+
+  bool was_successful() const { return was_successful_; }
+
+ private:
+  HANDLE thread_handle_;
+  bool was_successful_;
+
+  DISALLOW_COPY_AND_ASSIGN(ScopedSuspendThread);
+};
+
+ScopedSuspendThread::ScopedSuspendThread(HANDLE thread_handle)
+    : thread_handle_(thread_handle),
+      was_successful_(::SuspendThread(thread_handle) !=
+                      static_cast<DWORD>(-1)) {}
+
+ScopedSuspendThread::~ScopedSuspendThread() {
+  if (!was_successful_)
+    return;
+
+  // Disable the priority boost that the thread would otherwise receive on
+  // resume. We do this to avoid artificially altering the dynamics of the
+  // executing application any more than we already are by suspending and
+  // resuming the thread.
+  //
+  // Note that this can racily disable a priority boost that otherwise would
+  // have been given to the thread, if the thread is waiting on other wait
+  // conditions at the time of SuspendThread and those conditions are satisfied
+  // before priority boost is reenabled. The measured length of this window is
+  // ~100us, so this should occur fairly rarely.
+  ScopedDisablePriorityBoost disable_priority_boost(thread_handle_);
+  bool resume_thread_succeeded =
+      ::ResumeThread(thread_handle_) != static_cast<DWORD>(-1);
+  CHECK(resume_thread_succeeded) << "ResumeThread failed: " << GetLastError();
+}
+
+// Tests whether |stack_pointer| points to a location in the guard page.
+//
+// IMPORTANT NOTE: This function is invoked while the target thread is
+// suspended so it must not do any allocation from the default heap, including
+// indirectly via use of DCHECK/CHECK or other logging statements. Otherwise
+// this code can deadlock on heap locks in the default heap acquired by the
+// target thread before it was suspended.
+bool PointsToGuardPage(uintptr_t stack_pointer) {
+  MEMORY_BASIC_INFORMATION memory_info;
+  SIZE_T result = ::VirtualQuery(reinterpret_cast<LPCVOID>(stack_pointer),
+                                 &memory_info,
+                                 sizeof(memory_info));
+  return result != 0 && (memory_info.Protect & PAGE_GUARD);
+}
+
+// Suspends the thread with |thread_handle|, copies its stack and resumes the
+// thread, then records the stack frames and associated modules into |stack|.
+//
+// IMPORTANT NOTE: No allocations from the default heap may occur in the
+// ScopedSuspendThread scope, including indirectly via use of DCHECK/CHECK or
+// other logging statements. Otherwise this code can deadlock on heap locks in
+// the default heap acquired by the target thread before it was suspended.
+void SuspendThreadAndRecordStack(
+    HANDLE thread_handle,
+    const void* base_address,
+    void* stack_copy_buffer,
+    size_t stack_copy_buffer_size,
+    std::vector<RecordedFrame>* stack,
+    NativeStackSampler::AnnotateCallback annotator,
+    StackSamplingProfiler::Sample* sample,
+    NativeStackSamplerTestDelegate* test_delegate) {
+  DCHECK(stack->empty());
+
+  CONTEXT thread_context = {0};
+  thread_context.ContextFlags = CONTEXT_FULL;
+  // The stack bounds are saved to uintptr_ts for use outside
+  // ScopedSuspendThread, as the thread's memory is not safe to dereference
+  // beyond that point.
+  const uintptr_t top = reinterpret_cast<uintptr_t>(base_address);
+  uintptr_t bottom = 0u;
+
+  {
+    ScopedSuspendThread suspend_thread(thread_handle);
+
+    if (!suspend_thread.was_successful())
+      return;
+
+    if (!::GetThreadContext(thread_handle, &thread_context))
+      return;
+#if defined(_WIN64)
+    bottom = thread_context.Rsp;
+#else
+    bottom = thread_context.Esp;
+#endif
+
+    if ((top - bottom) > stack_copy_buffer_size)
+      return;
+
+    // Dereferencing a pointer in the guard page in a thread that doesn't own
+    // the stack results in a STATUS_GUARD_PAGE_VIOLATION exception and a crash.
+    // This occurs very rarely, but reliably over the population.
+    if (PointsToGuardPage(bottom))
+      return;
+
+    (*annotator)(sample);
+
+    CopyMemoryFromStack(stack_copy_buffer,
+                        reinterpret_cast<const void*>(bottom), top - bottom);
+  }
+
+  if (test_delegate)
+    test_delegate->OnPreStackWalk();
+
+  RewritePointersToStackMemory(top, bottom, &thread_context, stack_copy_buffer);
+
+  RecordStack(&thread_context, stack);
+}
+
+// NativeStackSamplerWin ------------------------------------------------------
+
+class NativeStackSamplerWin : public NativeStackSampler {
+ public:
+  NativeStackSamplerWin(win::ScopedHandle thread_handle,
+                        AnnotateCallback annotator,
+                        NativeStackSamplerTestDelegate* test_delegate);
+  ~NativeStackSamplerWin() override;
+
+  // StackSamplingProfiler::NativeStackSampler:
+  void ProfileRecordingStarting(
+      std::vector<StackSamplingProfiler::Module>* modules) override;
+  void RecordStackSample(StackBuffer* stack_buffer,
+                         StackSamplingProfiler::Sample* sample) override;
+  void ProfileRecordingStopped(StackBuffer* stack_buffer) override;
+
+ private:
+  // Attempts to query the module filename, base address, and id for
+  // |module_handle|, and store them in |module|. Returns true if it succeeded.
+  static bool GetModuleForHandle(HMODULE module_handle,
+                                 StackSamplingProfiler::Module* module);
+
+  // Gets the index for the Module corresponding to |module_handle| in
+  // |modules|, adding it if it's not already present. Returns
+  // StackSamplingProfiler::Frame::kUnknownModuleIndex if no Module can be
+  // determined for |module|.
+  size_t GetModuleIndex(HMODULE module_handle,
+                        std::vector<StackSamplingProfiler::Module>* modules);
+
+  // Copies the information represented by |stack| into |sample| and |modules|.
+  void CopyToSample(const std::vector<RecordedFrame>& stack,
+                    StackSamplingProfiler::Sample* sample,
+                    std::vector<StackSamplingProfiler::Module>* modules);
+
+  win::ScopedHandle thread_handle_;
+
+  const AnnotateCallback annotator_;
+
+  NativeStackSamplerTestDelegate* const test_delegate_;
+
+  // The stack base address corresponding to |thread_handle_|.
+  const void* const thread_stack_base_address_;
+
+  // Weak. Points to the modules associated with the profile being recorded
+  // between ProfileRecordingStarting() and ProfileRecordingStopped().
+  std::vector<StackSamplingProfiler::Module>* current_modules_;
+
+  // Maps a module handle to the corresponding Module's index within
+  // current_modules_.
+  std::map<HMODULE, size_t> profile_module_index_;
+
+  DISALLOW_COPY_AND_ASSIGN(NativeStackSamplerWin);
+};
+
+NativeStackSamplerWin::NativeStackSamplerWin(
+    win::ScopedHandle thread_handle,
+    AnnotateCallback annotator,
+    NativeStackSamplerTestDelegate* test_delegate)
+    : thread_handle_(thread_handle.Take()),
+      annotator_(annotator),
+      test_delegate_(test_delegate),
+      thread_stack_base_address_(
+          GetThreadEnvironmentBlock(thread_handle_.Get())->Tib.StackBase) {
+  DCHECK(annotator_);
+}
+
+NativeStackSamplerWin::~NativeStackSamplerWin() {
+}
+
+void NativeStackSamplerWin::ProfileRecordingStarting(
+    std::vector<StackSamplingProfiler::Module>* modules) {
+  current_modules_ = modules;
+  profile_module_index_.clear();
+}
+
+void NativeStackSamplerWin::RecordStackSample(
+    StackBuffer* stack_buffer,
+    StackSamplingProfiler::Sample* sample) {
+  DCHECK(stack_buffer);
+  DCHECK(current_modules_);
+
+  std::vector<RecordedFrame> stack;
+  SuspendThreadAndRecordStack(thread_handle_.Get(), thread_stack_base_address_,
+                              stack_buffer->buffer(), stack_buffer->size(),
+                              &stack, annotator_, sample, test_delegate_);
+  CopyToSample(stack, sample, current_modules_);
+}
+
+void NativeStackSamplerWin::ProfileRecordingStopped(StackBuffer* stack_buffer) {
+  current_modules_ = nullptr;
+}
+
+// static
+bool NativeStackSamplerWin::GetModuleForHandle(
+    HMODULE module_handle,
+    StackSamplingProfiler::Module* module) {
+  wchar_t module_name[MAX_PATH];
+  DWORD result_length =
+      GetModuleFileName(module_handle, module_name, arraysize(module_name));
+  if (result_length == 0)
+    return false;
+
+  module->filename = base::FilePath(module_name);
+
+  module->base_address = reinterpret_cast<uintptr_t>(module_handle);
+
+  module->id = GetBuildIDForModule(module_handle);
+  if (module->id.empty())
+    return false;
+
+  return true;
+}
+
+size_t NativeStackSamplerWin::GetModuleIndex(
+    HMODULE module_handle,
+    std::vector<StackSamplingProfiler::Module>* modules) {
+  if (!module_handle)
+    return StackSamplingProfiler::Frame::kUnknownModuleIndex;
+
+  auto loc = profile_module_index_.find(module_handle);
+  if (loc == profile_module_index_.end()) {
+    StackSamplingProfiler::Module module;
+    if (!GetModuleForHandle(module_handle, &module))
+      return StackSamplingProfiler::Frame::kUnknownModuleIndex;
+    modules->push_back(module);
+    loc = profile_module_index_.insert(std::make_pair(
+        module_handle, modules->size() - 1)).first;
+  }
+
+  return loc->second;
+}
+
+void NativeStackSamplerWin::CopyToSample(
+    const std::vector<RecordedFrame>& stack,
+    StackSamplingProfiler::Sample* sample,
+    std::vector<StackSamplingProfiler::Module>* modules) {
+  sample->frames.clear();
+  sample->frames.reserve(stack.size());
+
+  for (const RecordedFrame& frame : stack) {
+    sample->frames.push_back(StackSamplingProfiler::Frame(
+        reinterpret_cast<uintptr_t>(frame.instruction_pointer),
+        GetModuleIndex(frame.module.Get(), modules)));
+  }
+}
+
+}  // namespace
+
+std::unique_ptr<NativeStackSampler> NativeStackSampler::Create(
+    PlatformThreadId thread_id,
+    AnnotateCallback annotator,
+    NativeStackSamplerTestDelegate* test_delegate) {
+#if _WIN64
+  // Get the thread's handle.
+  HANDLE thread_handle = ::OpenThread(
+      THREAD_GET_CONTEXT | THREAD_SUSPEND_RESUME | THREAD_QUERY_INFORMATION,
+      FALSE,
+      thread_id);
+
+  if (thread_handle) {
+    return std::unique_ptr<NativeStackSampler>(new NativeStackSamplerWin(
+        win::ScopedHandle(thread_handle), annotator, test_delegate));
+  }
+#endif
+  return std::unique_ptr<NativeStackSampler>();
+}
+
+size_t NativeStackSampler::GetStackBufferSize() {
+  // The default Win32 reserved stack size is 1 MB and Chrome Windows threads
+  // currently always use the default, but this allows for expansion if it
+  // occurs. The size beyond the actual stack size consists of unallocated
+  // virtual memory pages so carries little cost (just a bit of wasted address
+  // space).
+  return 2 << 20;  // 2 MiB
+}
+
+}  // namespace base
diff --git a/base/profiler/stack_sampling_profiler.cc b/base/profiler/stack_sampling_profiler.cc
new file mode 100644
index 0000000..a8cddf0
--- /dev/null
+++ b/base/profiler/stack_sampling_profiler.cc
@@ -0,0 +1,890 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/profiler/stack_sampling_profiler.h"
+
+#include <algorithm>
+#include <map>
+#include <utility>
+
+#include "base/atomic_sequence_num.h"
+#include "base/atomicops.h"
+#include "base/bind.h"
+#include "base/bind_helpers.h"
+#include "base/callback.h"
+#include "base/lazy_instance.h"
+#include "base/location.h"
+#include "base/macros.h"
+#include "base/memory/ptr_util.h"
+#include "base/memory/singleton.h"
+#include "base/profiler/native_stack_sampler.h"
+#include "base/synchronization/lock.h"
+#include "base/threading/thread.h"
+#include "base/threading/thread_restrictions.h"
+#include "base/threading/thread_task_runner_handle.h"
+#include "base/timer/elapsed_timer.h"
+
+namespace base {
+
+namespace {
+
+// This value is used to initialize the WaitableEvent object. This MUST BE set
+// to MANUAL for correct operation of the IsSignaled() call in Start(). See the
+// comment there for why.
+constexpr WaitableEvent::ResetPolicy kResetPolicy =
+    WaitableEvent::ResetPolicy::MANUAL;
+
+// This value is used when there is no collection in progress and thus no ID
+// for referencing the active collection to the SamplingThread.
+const int NULL_PROFILER_ID = -1;
+
+void ChangeAtomicFlags(subtle::Atomic32* flags,
+                       subtle::Atomic32 set,
+                       subtle::Atomic32 clear) {
+  DCHECK(set != 0 || clear != 0);
+  DCHECK_EQ(0, set & clear);
+
+  subtle::Atomic32 bits = subtle::NoBarrier_Load(flags);
+  while (true) {
+    subtle::Atomic32 existing =
+        subtle::NoBarrier_CompareAndSwap(flags, bits, (bits | set) & ~clear);
+    if (existing == bits)
+      break;
+    bits = existing;
+  }
+}
+
+}  // namespace
+
+// StackSamplingProfiler::Module ----------------------------------------------
+
+StackSamplingProfiler::Module::Module() : base_address(0u) {}
+StackSamplingProfiler::Module::Module(uintptr_t base_address,
+                                      const std::string& id,
+                                      const FilePath& filename)
+    : base_address(base_address), id(id), filename(filename) {}
+
+StackSamplingProfiler::Module::~Module() = default;
+
+// StackSamplingProfiler::Frame -----------------------------------------------
+
+StackSamplingProfiler::Frame::Frame(uintptr_t instruction_pointer,
+                                    size_t module_index)
+    : instruction_pointer(instruction_pointer), module_index(module_index) {}
+
+StackSamplingProfiler::Frame::~Frame() = default;
+
+StackSamplingProfiler::Frame::Frame()
+    : instruction_pointer(0), module_index(kUnknownModuleIndex) {
+}
+
+// StackSamplingProfiler::Sample ----------------------------------------------
+
+StackSamplingProfiler::Sample::Sample() = default;
+
+StackSamplingProfiler::Sample::Sample(const Sample& sample) = default;
+
+StackSamplingProfiler::Sample::~Sample() = default;
+
+StackSamplingProfiler::Sample::Sample(const Frame& frame) {
+  frames.push_back(std::move(frame));
+}
+
+StackSamplingProfiler::Sample::Sample(const std::vector<Frame>& frames)
+    : frames(frames) {}
+
+// StackSamplingProfiler::CallStackProfile ------------------------------------
+
+StackSamplingProfiler::CallStackProfile::CallStackProfile() = default;
+
+StackSamplingProfiler::CallStackProfile::CallStackProfile(
+    CallStackProfile&& other) = default;
+
+StackSamplingProfiler::CallStackProfile::~CallStackProfile() = default;
+
+StackSamplingProfiler::CallStackProfile&
+StackSamplingProfiler::CallStackProfile::operator=(CallStackProfile&& other) =
+    default;
+
+StackSamplingProfiler::CallStackProfile
+StackSamplingProfiler::CallStackProfile::CopyForTesting() const {
+  return CallStackProfile(*this);
+}
+
+StackSamplingProfiler::CallStackProfile::CallStackProfile(
+    const CallStackProfile& other) = default;
+
+// StackSamplingProfiler::SamplingThread --------------------------------------
+
+class StackSamplingProfiler::SamplingThread : public Thread {
+ public:
+  class TestAPI {
+   public:
+    // Reset the existing sampler. This will unfortunately create the object
+    // unnecessarily if it doesn't already exist but there's no way around that.
+    static void Reset();
+
+    // Disables inherent idle-shutdown behavior.
+    static void DisableIdleShutdown();
+
+    // Begins an idle shutdown as if the idle-timer had expired and wait for
+    // it to execute. Since the timer would have only been started at a time
+    // when the sampling thread actually was idle, this must be called only
+    // when it is known that there are no active sampling threads. If
+    // |simulate_intervening_add| is true then, when executed, the shutdown
+    // task will believe that a new collection has been added since it was
+    // posted.
+    static void ShutdownAssumingIdle(bool simulate_intervening_add);
+
+   private:
+    // Calls the sampling threads ShutdownTask and then signals an event.
+    static void ShutdownTaskAndSignalEvent(SamplingThread* sampler,
+                                           int add_events,
+                                           WaitableEvent* event);
+  };
+
+  struct CollectionContext {
+    CollectionContext(int profiler_id,
+                      PlatformThreadId target,
+                      const SamplingParams& params,
+                      const CompletedCallback& callback,
+                      WaitableEvent* finished,
+                      std::unique_ptr<NativeStackSampler> sampler)
+        : profiler_id(profiler_id),
+          target(target),
+          params(params),
+          callback(callback),
+          finished(finished),
+          native_sampler(std::move(sampler)) {}
+    ~CollectionContext() = default;
+
+    // An identifier for the profiler associated with this collection, used to
+    // uniquely identify the collection to outside interests.
+    const int profiler_id;
+
+    const PlatformThreadId target;     // ID of The thread being sampled.
+    const SamplingParams params;       // Information about how to sample.
+    const CompletedCallback callback;  // Callback made when sampling complete.
+    WaitableEvent* const finished;     // Signaled when all sampling complete.
+
+    // Platform-specific module that does the actual sampling.
+    std::unique_ptr<NativeStackSampler> native_sampler;
+
+    // The absolute time for the next sample.
+    Time next_sample_time;
+
+    // The time that a profile was started, for calculating the total duration.
+    Time profile_start_time;
+
+    // Counters that indicate the current position along the acquisition.
+    int burst = 0;
+    int sample = 0;
+
+    // The collected stack samples. The active profile is always at the back().
+    CallStackProfiles profiles;
+
+    // Sequence number for generating new profiler ids.
+    static AtomicSequenceNumber next_profiler_id;
+  };
+
+  // Gets the single instance of this class.
+  static SamplingThread* GetInstance();
+
+  // Adds a new CollectionContext to the thread. This can be called externally
+  // from any thread. This returns an ID that can later be used to stop
+  // the sampling.
+  int Add(std::unique_ptr<CollectionContext> collection);
+
+  // Removes an active collection based on its ID, forcing it to run its
+  // callback if any data has been collected. This can be called externally
+  // from any thread.
+  void Remove(int id);
+
+ private:
+  friend class TestAPI;
+  friend struct DefaultSingletonTraits<SamplingThread>;
+
+  // The different states in which the sampling-thread can be.
+  enum ThreadExecutionState {
+    // The thread is not running because it has never been started. It will be
+    // started when a sampling request is received.
+    NOT_STARTED,
+
+    // The thread is running and processing tasks. This is the state when any
+    // sampling requests are active and during the "idle" period afterward
+    // before the thread is stopped.
+    RUNNING,
+
+    // Once all sampling requests have finished and the "idle" period has
+    // expired, the thread will be set to this state and its shutdown
+    // initiated. A call to Stop() must be made to ensure the previous thread
+    // has completely exited before calling Start() and moving back to the
+    // RUNNING state.
+    EXITING,
+  };
+
+  SamplingThread();
+  ~SamplingThread() override;
+
+  // Get task runner that is usable from the outside.
+  scoped_refptr<SingleThreadTaskRunner> GetOrCreateTaskRunnerForAdd();
+  scoped_refptr<SingleThreadTaskRunner> GetTaskRunner(
+      ThreadExecutionState* out_state);
+
+  // Get task runner that is usable from the sampling thread itself.
+  scoped_refptr<SingleThreadTaskRunner> GetTaskRunnerOnSamplingThread();
+
+  // Finishes a collection and reports collected data via callback. The
+  // collection's |finished| waitable event will be signalled. The |collection|
+  // should already have been removed from |active_collections_| by the caller,
+  // as this is needed to avoid flakyness in unit tests.
+  void FinishCollection(CollectionContext* collection);
+
+  // Records a single sample of a collection.
+  void RecordSample(CollectionContext* collection);
+
+  // Check if the sampling thread is idle and begin a shutdown if it is.
+  void ScheduleShutdownIfIdle();
+
+  // These methods are tasks that get posted to the internal message queue.
+  void AddCollectionTask(std::unique_ptr<CollectionContext> collection);
+  void RemoveCollectionTask(int id);
+  void PerformCollectionTask(int id);
+  void ShutdownTask(int add_events);
+
+  // Updates the |next_sample_time| time based on configured parameters.
+  // Returns true if there is a next sample or false if sampling is complete.
+  bool UpdateNextSampleTime(CollectionContext* collection);
+
+  // Thread:
+  void CleanUp() override;
+
+  // A stack-buffer used by the native sampler for its work. This buffer can
+  // be re-used for multiple native sampler objects so long as the API calls
+  // that take it are not called concurrently.
+  std::unique_ptr<NativeStackSampler::StackBuffer> stack_buffer_;
+
+  // A map of IDs to collection contexts. Because this class is a singleton
+  // that is never destroyed, context objects will never be destructed except
+  // by explicit action. Thus, it's acceptable to pass unretained pointers
+  // to these objects when posting tasks.
+  std::map<int, std::unique_ptr<CollectionContext>> active_collections_;
+
+  // State maintained about the current execution (or non-execution) of
+  // the thread. This state must always be accessed while holding the
+  // lock. A copy of the task-runner is maintained here for use by any
+  // calling thread; this is necessary because Thread's accessor for it is
+  // not itself thread-safe. The lock is also used to order calls to the
+  // Thread API (Start, Stop, StopSoon, & DetachFromSequence) so that
+  // multiple threads may make those calls.
+  Lock thread_execution_state_lock_;  // Protects all thread_execution_state_*
+  ThreadExecutionState thread_execution_state_ = NOT_STARTED;
+  scoped_refptr<SingleThreadTaskRunner> thread_execution_state_task_runner_;
+  bool thread_execution_state_disable_idle_shutdown_for_testing_ = false;
+
+  // A counter that notes adds of new collection requests. It is incremented
+  // when changes occur so that delayed shutdown tasks are able to detect if
+  // samething new has happened while it was waiting. Like all "execution_state"
+  // vars, this must be accessed while holding |thread_execution_state_lock_|.
+  int thread_execution_state_add_events_ = 0;
+
+  DISALLOW_COPY_AND_ASSIGN(SamplingThread);
+};
+
+// static
+void StackSamplingProfiler::SamplingThread::TestAPI::Reset() {
+  SamplingThread* sampler = SamplingThread::GetInstance();
+
+  ThreadExecutionState state;
+  {
+    AutoLock lock(sampler->thread_execution_state_lock_);
+    state = sampler->thread_execution_state_;
+    DCHECK(sampler->active_collections_.empty());
+  }
+
+  // Stop the thread and wait for it to exit. This has to be done through by
+  // the thread itself because it has taken ownership of its own lifetime.
+  if (state == RUNNING) {
+    ShutdownAssumingIdle(false);
+    state = EXITING;
+  }
+  // Make sure thread is cleaned up since state will be reset to NOT_STARTED.
+  if (state == EXITING)
+    sampler->Stop();
+
+  // Reset internal variables to the just-initialized state.
+  {
+    AutoLock lock(sampler->thread_execution_state_lock_);
+    sampler->thread_execution_state_ = NOT_STARTED;
+    sampler->thread_execution_state_task_runner_ = nullptr;
+    sampler->thread_execution_state_disable_idle_shutdown_for_testing_ = false;
+    sampler->thread_execution_state_add_events_ = 0;
+  }
+}
+
+// static
+void StackSamplingProfiler::SamplingThread::TestAPI::DisableIdleShutdown() {
+  SamplingThread* sampler = SamplingThread::GetInstance();
+
+  {
+    AutoLock lock(sampler->thread_execution_state_lock_);
+    sampler->thread_execution_state_disable_idle_shutdown_for_testing_ = true;
+  }
+}
+
+// static
+void StackSamplingProfiler::SamplingThread::TestAPI::ShutdownAssumingIdle(
+    bool simulate_intervening_add) {
+  SamplingThread* sampler = SamplingThread::GetInstance();
+
+  ThreadExecutionState state;
+  scoped_refptr<SingleThreadTaskRunner> task_runner =
+      sampler->GetTaskRunner(&state);
+  DCHECK_EQ(RUNNING, state);
+  DCHECK(task_runner);
+
+  int add_events;
+  {
+    AutoLock lock(sampler->thread_execution_state_lock_);
+    add_events = sampler->thread_execution_state_add_events_;
+    if (simulate_intervening_add)
+      ++sampler->thread_execution_state_add_events_;
+  }
+
+  WaitableEvent executed(WaitableEvent::ResetPolicy::MANUAL,
+                         WaitableEvent::InitialState::NOT_SIGNALED);
+  // PostTaskAndReply won't work because thread and associated message-loop may
+  // be shut down.
+  task_runner->PostTask(
+      FROM_HERE, BindOnce(&ShutdownTaskAndSignalEvent, Unretained(sampler),
+                          add_events, Unretained(&executed)));
+  executed.Wait();
+}
+
+// static
+void StackSamplingProfiler::SamplingThread::TestAPI::ShutdownTaskAndSignalEvent(
+    SamplingThread* sampler,
+    int add_events,
+    WaitableEvent* event) {
+  sampler->ShutdownTask(add_events);
+  event->Signal();
+}
+
+AtomicSequenceNumber
+    StackSamplingProfiler::SamplingThread::CollectionContext::next_profiler_id;
+
+StackSamplingProfiler::SamplingThread::SamplingThread()
+    : Thread("StackSamplingProfiler") {}
+
+StackSamplingProfiler::SamplingThread::~SamplingThread() = default;
+
+StackSamplingProfiler::SamplingThread*
+StackSamplingProfiler::SamplingThread::GetInstance() {
+  return Singleton<SamplingThread, LeakySingletonTraits<SamplingThread>>::get();
+}
+
+int StackSamplingProfiler::SamplingThread::Add(
+    std::unique_ptr<CollectionContext> collection) {
+  // This is not to be run on the sampling thread.
+
+  int id = collection->profiler_id;
+  scoped_refptr<SingleThreadTaskRunner> task_runner =
+      GetOrCreateTaskRunnerForAdd();
+
+  task_runner->PostTask(
+      FROM_HERE, BindOnce(&SamplingThread::AddCollectionTask, Unretained(this),
+                          std::move(collection)));
+
+  return id;
+}
+
+void StackSamplingProfiler::SamplingThread::Remove(int id) {
+  // This is not to be run on the sampling thread.
+
+  ThreadExecutionState state;
+  scoped_refptr<SingleThreadTaskRunner> task_runner = GetTaskRunner(&state);
+  if (state != RUNNING)
+    return;
+  DCHECK(task_runner);
+
+  // This can fail if the thread were to exit between acquisition of the task
+  // runner above and the call below. In that case, however, everything has
+  // stopped so there's no need to try to stop it.
+  task_runner->PostTask(
+      FROM_HERE,
+      BindOnce(&SamplingThread::RemoveCollectionTask, Unretained(this), id));
+}
+
+scoped_refptr<SingleThreadTaskRunner>
+StackSamplingProfiler::SamplingThread::GetOrCreateTaskRunnerForAdd() {
+  AutoLock lock(thread_execution_state_lock_);
+
+  // The increment of the "add events" count is why this method is to be only
+  // called from "add".
+  ++thread_execution_state_add_events_;
+
+  if (thread_execution_state_ == RUNNING) {
+    DCHECK(thread_execution_state_task_runner_);
+    // This shouldn't be called from the sampling thread as it's inefficient.
+    // Use GetTaskRunnerOnSamplingThread() instead.
+    DCHECK_NE(GetThreadId(), PlatformThread::CurrentId());
+    return thread_execution_state_task_runner_;
+  }
+
+  if (thread_execution_state_ == EXITING) {
+    // StopSoon() was previously called to shut down the thread
+    // asynchonously. Stop() must now be called before calling Start() again to
+    // reset the thread state.
+    //
+    // We must allow blocking here to satisfy the Thread implementation, but in
+    // practice the Stop() call is unlikely to actually block. For this to
+    // happen a new profiling request would have to be made within the narrow
+    // window between StopSoon() and thread exit following the end of the 60
+    // second idle period.
+    ScopedAllowBlocking allow_blocking;
+    Stop();
+  }
+
+  DCHECK(!stack_buffer_);
+  stack_buffer_ = NativeStackSampler::CreateStackBuffer();
+
+  // The thread is not running. Start it and get associated runner. The task-
+  // runner has to be saved for future use because though it can be used from
+  // any thread, it can be acquired via task_runner() only on the created
+  // thread and the thread that creates it (i.e. this thread) for thread-safety
+  // reasons which are alleviated in SamplingThread by gating access to it with
+  // the |thread_execution_state_lock_|.
+  Start();
+  thread_execution_state_ = RUNNING;
+  thread_execution_state_task_runner_ = Thread::task_runner();
+
+  // Detach the sampling thread from the "sequence" (i.e. thread) that
+  // started it so that it can be self-managed or stopped by another thread.
+  DetachFromSequence();
+
+  return thread_execution_state_task_runner_;
+}
+
+scoped_refptr<SingleThreadTaskRunner>
+StackSamplingProfiler::SamplingThread::GetTaskRunner(
+    ThreadExecutionState* out_state) {
+  AutoLock lock(thread_execution_state_lock_);
+  if (out_state)
+    *out_state = thread_execution_state_;
+  if (thread_execution_state_ == RUNNING) {
+    // This shouldn't be called from the sampling thread as it's inefficient.
+    // Use GetTaskRunnerOnSamplingThread() instead.
+    DCHECK_NE(GetThreadId(), PlatformThread::CurrentId());
+    DCHECK(thread_execution_state_task_runner_);
+  } else {
+    DCHECK(!thread_execution_state_task_runner_);
+  }
+
+  return thread_execution_state_task_runner_;
+}
+
+scoped_refptr<SingleThreadTaskRunner>
+StackSamplingProfiler::SamplingThread::GetTaskRunnerOnSamplingThread() {
+  // This should be called only from the sampling thread as it has limited
+  // accessibility.
+  DCHECK_EQ(GetThreadId(), PlatformThread::CurrentId());
+
+  return Thread::task_runner();
+}
+
+void StackSamplingProfiler::SamplingThread::FinishCollection(
+    CollectionContext* collection) {
+  DCHECK_EQ(GetThreadId(), PlatformThread::CurrentId());
+  DCHECK_EQ(0u, active_collections_.count(collection->profiler_id));
+
+  // If there is no duration for the final profile (because it was stopped),
+  // calculate it now.
+  if (!collection->profiles.empty() &&
+      collection->profiles.back().profile_duration == TimeDelta()) {
+    collection->profiles.back().profile_duration =
+        Time::Now() - collection->profile_start_time +
+        collection->params.sampling_interval;
+  }
+
+  // Extract some information so callback and event-signalling can still be
+  // done after the collection has been removed from the list of "active" ones.
+  // This allows the the controlling object (and tests using it) to be confident
+  // that collection is fully finished when those things occur.
+  const CompletedCallback callback = collection->callback;
+  CallStackProfiles profiles = std::move(collection->profiles);
+  WaitableEvent* finished = collection->finished;
+
+  // Run the associated callback, passing the collected profiles.
+  callback.Run(std::move(profiles));
+
+  // Signal that this collection is finished.
+  finished->Signal();
+}
+
+void StackSamplingProfiler::SamplingThread::RecordSample(
+    CollectionContext* collection) {
+  DCHECK_EQ(GetThreadId(), PlatformThread::CurrentId());
+  DCHECK(collection->native_sampler);
+
+  // If this is the first sample of a burst, a new Profile needs to be created
+  // and filled.
+  if (collection->sample == 0) {
+    collection->profiles.push_back(CallStackProfile());
+    CallStackProfile& profile = collection->profiles.back();
+    profile.sampling_period = collection->params.sampling_interval;
+    collection->profile_start_time = Time::Now();
+    collection->native_sampler->ProfileRecordingStarting(&profile.modules);
+  }
+
+  // The currently active profile being captured.
+  CallStackProfile& profile = collection->profiles.back();
+
+  // Record a single sample.
+  profile.samples.push_back(Sample());
+  collection->native_sampler->RecordStackSample(stack_buffer_.get(),
+                                                &profile.samples.back());
+
+  // If this is the last sample of a burst, record the total time.
+  if (collection->sample == collection->params.samples_per_burst - 1) {
+    profile.profile_duration = Time::Now() - collection->profile_start_time +
+                               collection->params.sampling_interval;
+    collection->native_sampler->ProfileRecordingStopped(stack_buffer_.get());
+  }
+}
+
+void StackSamplingProfiler::SamplingThread::ScheduleShutdownIfIdle() {
+  DCHECK_EQ(GetThreadId(), PlatformThread::CurrentId());
+
+  if (!active_collections_.empty())
+    return;
+
+  int add_events;
+  {
+    AutoLock lock(thread_execution_state_lock_);
+    if (thread_execution_state_disable_idle_shutdown_for_testing_)
+      return;
+    add_events = thread_execution_state_add_events_;
+  }
+
+  GetTaskRunnerOnSamplingThread()->PostDelayedTask(
+      FROM_HERE,
+      BindOnce(&SamplingThread::ShutdownTask, Unretained(this), add_events),
+      TimeDelta::FromSeconds(60));
+}
+
+void StackSamplingProfiler::SamplingThread::AddCollectionTask(
+    std::unique_ptr<CollectionContext> collection) {
+  DCHECK_EQ(GetThreadId(), PlatformThread::CurrentId());
+
+  const int profiler_id = collection->profiler_id;
+  const TimeDelta initial_delay = collection->params.initial_delay;
+
+  active_collections_.insert(
+      std::make_pair(profiler_id, std::move(collection)));
+
+  GetTaskRunnerOnSamplingThread()->PostDelayedTask(
+      FROM_HERE,
+      BindOnce(&SamplingThread::PerformCollectionTask, Unretained(this),
+               profiler_id),
+      initial_delay);
+
+  // Another increment of "add events" serves to invalidate any pending
+  // shutdown tasks that may have been initiated between the Add() and this
+  // task running.
+  {
+    AutoLock lock(thread_execution_state_lock_);
+    ++thread_execution_state_add_events_;
+  }
+}
+
+void StackSamplingProfiler::SamplingThread::RemoveCollectionTask(int id) {
+  DCHECK_EQ(GetThreadId(), PlatformThread::CurrentId());
+
+  auto found = active_collections_.find(id);
+  if (found == active_collections_.end())
+    return;
+
+  // Remove |collection| from |active_collections_|.
+  std::unique_ptr<CollectionContext> collection = std::move(found->second);
+  size_t count = active_collections_.erase(id);
+  DCHECK_EQ(1U, count);
+
+  FinishCollection(collection.get());
+  ScheduleShutdownIfIdle();
+}
+
+void StackSamplingProfiler::SamplingThread::PerformCollectionTask(int id) {
+  DCHECK_EQ(GetThreadId(), PlatformThread::CurrentId());
+
+  auto found = active_collections_.find(id);
+
+  // The task won't be found if it has been stopped.
+  if (found == active_collections_.end())
+    return;
+
+  CollectionContext* collection = found->second.get();
+
+  // Handle first-run with no "next time".
+  if (collection->next_sample_time == Time())
+    collection->next_sample_time = Time::Now();
+
+  // Do the collection of a single sample.
+  RecordSample(collection);
+
+  // Update the time of the next sample recording.
+  const bool collection_finished = !UpdateNextSampleTime(collection);
+  if (!collection_finished) {
+    bool success = GetTaskRunnerOnSamplingThread()->PostDelayedTask(
+        FROM_HERE,
+        BindOnce(&SamplingThread::PerformCollectionTask, Unretained(this), id),
+        std::max(collection->next_sample_time - Time::Now(), TimeDelta()));
+    DCHECK(success);
+    return;
+  }
+
+  // Take ownership of |collection| and remove it from the map. If collection is
+  // to be restarted, a new collection task will be added below.
+  std::unique_ptr<CollectionContext> owned_collection =
+      std::move(found->second);
+  size_t count = active_collections_.erase(id);
+  DCHECK_EQ(1U, count);
+
+  // All capturing has completed so finish the collection.
+  FinishCollection(collection);
+  ScheduleShutdownIfIdle();
+}
+
+void StackSamplingProfiler::SamplingThread::ShutdownTask(int add_events) {
+  DCHECK_EQ(GetThreadId(), PlatformThread::CurrentId());
+
+  // Holding this lock ensures that any attempt to start another job will
+  // get postponed until |thread_execution_state_| is updated, thus eliminating
+  // the race in starting a new thread while the previous one is exiting.
+  AutoLock lock(thread_execution_state_lock_);
+
+  // If the current count of creation requests doesn't match the passed count
+  // then other tasks have been created since this was posted. Abort shutdown.
+  if (thread_execution_state_add_events_ != add_events)
+    return;
+
+  // There can be no new AddCollectionTasks at this point because creating
+  // those always increments "add events". There may be other requests, like
+  // Remove, but it's okay to schedule the thread to stop once they've been
+  // executed (i.e. "soon").
+  DCHECK(active_collections_.empty());
+  StopSoon();
+
+  // StopSoon will have set the owning sequence (again) so it must be detached
+  // (again) in order for Stop/Start to be called (again) should more work
+  // come in. Holding the |thread_execution_state_lock_| ensures the necessary
+  // happens-after with regard to this detach and future Thread API calls.
+  DetachFromSequence();
+
+  // Set the thread_state variable so the thread will be restarted when new
+  // work comes in. Remove the |thread_execution_state_task_runner_| to avoid
+  // confusion.
+  thread_execution_state_ = EXITING;
+  thread_execution_state_task_runner_ = nullptr;
+  stack_buffer_.reset();
+}
+
+bool StackSamplingProfiler::SamplingThread::UpdateNextSampleTime(
+    CollectionContext* collection) {
+  // This will keep a consistent average interval between samples but will
+  // result in constant series of acquisitions, thus nearly locking out the
+  // target thread, if the interval is smaller than the time it takes to
+  // actually acquire the sample. Anything sampling that quickly is going
+  // to be a problem anyway so don't worry about it.
+  if (++collection->sample < collection->params.samples_per_burst) {
+    collection->next_sample_time += collection->params.sampling_interval;
+    return true;
+  }
+
+  if (++collection->burst < collection->params.bursts) {
+    collection->sample = 0;
+    collection->next_sample_time += collection->params.burst_interval;
+    return true;
+  }
+
+  return false;
+}
+
+void StackSamplingProfiler::SamplingThread::CleanUp() {
+  DCHECK_EQ(GetThreadId(), PlatformThread::CurrentId());
+
+  // There should be no collections remaining when the thread stops.
+  DCHECK(active_collections_.empty());
+
+  // Let the parent clean up.
+  Thread::CleanUp();
+}
+
+// StackSamplingProfiler ------------------------------------------------------
+
+// static
+void StackSamplingProfiler::TestAPI::Reset() {
+  SamplingThread::TestAPI::Reset();
+  ResetAnnotations();
+}
+
+// static
+void StackSamplingProfiler::TestAPI::ResetAnnotations() {
+  subtle::NoBarrier_Store(&process_milestones_, 0u);
+}
+
+// static
+bool StackSamplingProfiler::TestAPI::IsSamplingThreadRunning() {
+  return SamplingThread::GetInstance()->IsRunning();
+}
+
+// static
+void StackSamplingProfiler::TestAPI::DisableIdleShutdown() {
+  SamplingThread::TestAPI::DisableIdleShutdown();
+}
+
+// static
+void StackSamplingProfiler::TestAPI::PerformSamplingThreadIdleShutdown(
+    bool simulate_intervening_start) {
+  SamplingThread::TestAPI::ShutdownAssumingIdle(simulate_intervening_start);
+}
+
+subtle::Atomic32 StackSamplingProfiler::process_milestones_ = 0;
+
+StackSamplingProfiler::StackSamplingProfiler(
+    const SamplingParams& params,
+    const CompletedCallback& callback,
+    NativeStackSamplerTestDelegate* test_delegate)
+    : StackSamplingProfiler(base::PlatformThread::CurrentId(),
+                            params,
+                            callback,
+                            test_delegate) {}
+
+StackSamplingProfiler::StackSamplingProfiler(
+    PlatformThreadId thread_id,
+    const SamplingParams& params,
+    const CompletedCallback& callback,
+    NativeStackSamplerTestDelegate* test_delegate)
+    : thread_id_(thread_id),
+      params_(params),
+      completed_callback_(callback),
+      // The event starts "signaled" so code knows it's safe to start thread
+      // and "manual" so that it can be waited in multiple places.
+      profiling_inactive_(kResetPolicy, WaitableEvent::InitialState::SIGNALED),
+      profiler_id_(NULL_PROFILER_ID),
+      test_delegate_(test_delegate) {}
+
+StackSamplingProfiler::~StackSamplingProfiler() {
+  // Stop returns immediately but the shutdown runs asynchronously. There is a
+  // non-zero probability that one more sample will be taken after this call
+  // returns.
+  Stop();
+
+  // The behavior of sampling a thread that has exited is undefined and could
+  // cause Bad Things(tm) to occur. The safety model provided by this class is
+  // that an instance of this object is expected to live at least as long as
+  // the thread it is sampling. However, because the sampling is performed
+  // asynchronously by the SamplingThread, there is no way to guarantee this
+  // is true without waiting for it to signal that it has finished.
+  //
+  // The wait time should, at most, be only as long as it takes to collect one
+  // sample (~200us) or none at all if sampling has already completed.
+  ThreadRestrictions::ScopedAllowWait allow_wait;
+  profiling_inactive_.Wait();
+}
+
+void StackSamplingProfiler::Start() {
+  if (completed_callback_.is_null())
+    return;
+
+  std::unique_ptr<NativeStackSampler> native_sampler =
+      NativeStackSampler::Create(thread_id_, &RecordAnnotations,
+                                 test_delegate_);
+
+  if (!native_sampler)
+    return;
+
+  // The IsSignaled() check below requires that the WaitableEvent be manually
+  // reset, to avoid signaling the event in IsSignaled() itself.
+  static_assert(kResetPolicy == WaitableEvent::ResetPolicy::MANUAL,
+                "The reset policy must be set to MANUAL");
+
+  // If a previous profiling phase is still winding down, wait for it to
+  // complete. We can't use task posting for this coordination because the
+  // thread owning the profiler may not have a message loop.
+  if (!profiling_inactive_.IsSignaled())
+    profiling_inactive_.Wait();
+  profiling_inactive_.Reset();
+
+  DCHECK_EQ(NULL_PROFILER_ID, profiler_id_);
+  profiler_id_ = SamplingThread::GetInstance()->Add(
+      std::make_unique<SamplingThread::CollectionContext>(
+          SamplingThread::CollectionContext::next_profiler_id.GetNext(),
+          thread_id_, params_, completed_callback_, &profiling_inactive_,
+          std::move(native_sampler)));
+  DCHECK_NE(NULL_PROFILER_ID, profiler_id_);
+}
+
+void StackSamplingProfiler::Stop() {
+  SamplingThread::GetInstance()->Remove(profiler_id_);
+  profiler_id_ = NULL_PROFILER_ID;
+}
+
+// static
+void StackSamplingProfiler::SetProcessMilestone(int milestone) {
+  DCHECK_LE(0, milestone);
+  DCHECK_GT(static_cast<int>(sizeof(process_milestones_) * 8), milestone);
+  DCHECK_EQ(0, subtle::NoBarrier_Load(&process_milestones_) & (1 << milestone));
+  ChangeAtomicFlags(&process_milestones_, 1 << milestone, 0);
+}
+
+// static
+void StackSamplingProfiler::RecordAnnotations(Sample* sample) {
+  // The code inside this method must not do anything that could acquire a
+  // mutex, including allocating memory (which includes LOG messages) because
+  // that mutex could be held by a stopped thread, thus resulting in deadlock.
+  sample->process_milestones = subtle::NoBarrier_Load(&process_milestones_);
+}
+
+// StackSamplingProfiler::Frame global functions ------------------------------
+
+bool operator==(const StackSamplingProfiler::Module& a,
+                const StackSamplingProfiler::Module& b) {
+  return a.base_address == b.base_address && a.id == b.id &&
+      a.filename == b.filename;
+}
+
+bool operator==(const StackSamplingProfiler::Sample& a,
+                const StackSamplingProfiler::Sample& b) {
+  return a.process_milestones == b.process_milestones && a.frames == b.frames;
+}
+
+bool operator!=(const StackSamplingProfiler::Sample& a,
+                const StackSamplingProfiler::Sample& b) {
+  return !(a == b);
+}
+
+bool operator<(const StackSamplingProfiler::Sample& a,
+               const StackSamplingProfiler::Sample& b) {
+  if (a.process_milestones < b.process_milestones)
+    return true;
+  if (a.process_milestones > b.process_milestones)
+    return false;
+
+  return a.frames < b.frames;
+}
+
+bool operator==(const StackSamplingProfiler::Frame &a,
+                const StackSamplingProfiler::Frame &b) {
+  return a.instruction_pointer == b.instruction_pointer &&
+      a.module_index == b.module_index;
+}
+
+bool operator<(const StackSamplingProfiler::Frame &a,
+               const StackSamplingProfiler::Frame &b) {
+  return (a.module_index < b.module_index) ||
+      (a.module_index == b.module_index &&
+       a.instruction_pointer < b.instruction_pointer);
+}
+
+}  // namespace base
diff --git a/base/profiler/stack_sampling_profiler.h b/base/profiler/stack_sampling_profiler.h
new file mode 100644
index 0000000..2f9ade5
--- /dev/null
+++ b/base/profiler/stack_sampling_profiler.h
@@ -0,0 +1,331 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_PROFILER_STACK_SAMPLING_PROFILER_H_
+#define BASE_PROFILER_STACK_SAMPLING_PROFILER_H_
+
+#include <stddef.h>
+
+#include <memory>
+#include <string>
+#include <vector>
+
+#include "base/atomicops.h"
+#include "base/base_export.h"
+#include "base/callback.h"
+#include "base/files/file_path.h"
+#include "base/macros.h"
+#include "base/strings/string16.h"
+#include "base/synchronization/waitable_event.h"
+#include "base/threading/platform_thread.h"
+#include "base/time/time.h"
+
+namespace base {
+
+class NativeStackSampler;
+class NativeStackSamplerTestDelegate;
+
+// StackSamplingProfiler periodically stops a thread to sample its stack, for
+// the purpose of collecting information about which code paths are
+// executing. This information is used in aggregate by UMA to identify hot
+// and/or janky code paths.
+//
+// Sample StackSamplingProfiler usage:
+//
+//   // Create and customize params as desired.
+//   base::StackStackSamplingProfiler::SamplingParams params;
+//   // Any thread's ID may be passed as the target.
+//   base::StackSamplingProfiler profiler(base::PlatformThread::CurrentId()),
+//       params);
+//
+//   // Or, to process the profiles within Chrome rather than via UMA, use a
+//   // custom completed callback:
+//   base::StackStackSamplingProfiler::CompletedCallback
+//       thread_safe_callback = ...;
+//   base::StackSamplingProfiler profiler(base::PlatformThread::CurrentId()),
+//       params, thread_safe_callback);
+//
+//   profiler.Start();
+//   // ... work being done on the target thread here ...
+//   profiler.Stop();  // optional, stops collection before complete per params
+//
+// The default SamplingParams causes stacks to be recorded in a single burst at
+// a 10Hz interval for a total of 30 seconds. All of these parameters may be
+// altered as desired.
+//
+// When all call stack profiles are complete, or the profiler is stopped, the
+// completed callback is called from a thread created by the profiler with the
+// collected profiles.
+//
+// The results of the profiling are passed to the completed callback and consist
+// of a vector of CallStackProfiles. Each CallStackProfile corresponds to a
+// burst as specified in SamplingParams and contains a set of Samples and
+// Modules. One Sample corresponds to a single recorded stack, and the Modules
+// record those modules associated with the recorded stack frames.
+class BASE_EXPORT StackSamplingProfiler {
+ public:
+  // Module represents the module (DLL or exe) corresponding to a stack frame.
+  struct BASE_EXPORT Module {
+    Module();
+    Module(uintptr_t base_address,
+           const std::string& id,
+           const FilePath& filename);
+    ~Module();
+
+    // Points to the base address of the module.
+    uintptr_t base_address;
+
+    // An opaque binary string that uniquely identifies a particular program
+    // version with high probability. This is parsed from headers of the loaded
+    // module.
+    // For binaries generated by GNU tools:
+    //   Contents of the .note.gnu.build-id field.
+    // On Windows:
+    //   GUID + AGE in the debug image headers of a module.
+    std::string id;
+
+    // The filename of the module.
+    FilePath filename;
+  };
+
+  // Frame represents an individual sampled stack frame with module information.
+  struct BASE_EXPORT Frame {
+    // Identifies an unknown module.
+    static const size_t kUnknownModuleIndex = static_cast<size_t>(-1);
+
+    Frame(uintptr_t instruction_pointer, size_t module_index);
+    ~Frame();
+
+    // Default constructor to satisfy IPC macros. Do not use explicitly.
+    Frame();
+
+    // The sampled instruction pointer within the function.
+    uintptr_t instruction_pointer;
+
+    // Index of the module in CallStackProfile::modules. We don't represent
+    // module state directly here to save space.
+    size_t module_index;
+  };
+
+  // Sample represents a set of stack frames with some extra information.
+  struct BASE_EXPORT Sample {
+    Sample();
+    Sample(const Sample& sample);
+    ~Sample();
+
+    // These constructors are used only during testing.
+    Sample(const Frame& frame);
+    Sample(const std::vector<Frame>& frames);
+
+    // The entire stack frame when the sample is taken.
+    std::vector<Frame> frames;
+
+    // A bit-field indicating which process milestones have passed. This can be
+    // used to tell where in the process lifetime the samples are taken. Just
+    // as a "lifetime" can only move forward, these bits mark the milestones of
+    // the processes life as they occur. Bits can be set but never reset. The
+    // actual definition of the individual bits is left to the user of this
+    // module.
+    uint32_t process_milestones = 0;
+  };
+
+  // CallStackProfile represents a set of samples.
+  struct BASE_EXPORT CallStackProfile {
+    CallStackProfile();
+    CallStackProfile(CallStackProfile&& other);
+    ~CallStackProfile();
+
+    CallStackProfile& operator=(CallStackProfile&& other);
+
+    CallStackProfile CopyForTesting() const;
+
+    std::vector<Module> modules;
+    std::vector<Sample> samples;
+
+    // Duration of this profile.
+    TimeDelta profile_duration;
+
+    // Time between samples.
+    TimeDelta sampling_period;
+
+   private:
+    // Copying is possible but expensive so disallow it except for internal use
+    // (i.e. CopyForTesting); use std::move instead.
+    CallStackProfile(const CallStackProfile& other);
+
+    DISALLOW_ASSIGN(CallStackProfile);
+  };
+
+  using CallStackProfiles = std::vector<CallStackProfile>;
+
+  // Represents parameters that configure the sampling.
+  struct BASE_EXPORT SamplingParams {
+    // Time to delay before first samples are taken.
+    TimeDelta initial_delay = TimeDelta::FromMilliseconds(0);
+
+    // Number of sampling bursts to perform.
+    int bursts = 1;
+
+    // Interval between sampling bursts. This is the desired duration from the
+    // start of one burst to the start of the next burst.
+    TimeDelta burst_interval = TimeDelta::FromSeconds(10);
+
+    // Number of samples to record per burst.
+    int samples_per_burst = 300;
+
+    // Interval between samples during a sampling burst. This is the desired
+    // duration from the start of one sample to the start of the next sample.
+    TimeDelta sampling_interval = TimeDelta::FromMilliseconds(100);
+  };
+
+  // Testing support. These methods are static beause they interact with the
+  // sampling thread, a singleton used by all StackSamplingProfiler objects.
+  // These methods can only be called by the same thread that started the
+  // sampling.
+  class BASE_EXPORT TestAPI {
+   public:
+    // Resets the internal state to that of a fresh start. This is necessary
+    // so that tests don't inherit state from previous tests.
+    static void Reset();
+
+    // Resets internal annotations (like process phase) to initial values.
+    static void ResetAnnotations();
+
+    // Returns whether the sampling thread is currently running or not.
+    static bool IsSamplingThreadRunning();
+
+    // Disables inherent idle-shutdown behavior.
+    static void DisableIdleShutdown();
+
+    // Initiates an idle shutdown task, as though the idle timer had expired,
+    // causing the thread to exit. There is no "idle" check so this must be
+    // called only when all sampling tasks have completed. This blocks until
+    // the task has been executed, though the actual stopping of the thread
+    // still happens asynchronously. Watch IsSamplingThreadRunning() to know
+    // when the thread has exited. If |simulate_intervening_start| is true then
+    // this method will make it appear to the shutdown task that a new profiler
+    // was started between when the idle-shutdown was initiated and when it
+    // runs.
+    static void PerformSamplingThreadIdleShutdown(
+        bool simulate_intervening_start);
+  };
+
+  // The callback type used to collect completed profiles. The passed |profiles|
+  // are move-only. Other threads, including the UI thread, may block on
+  // callback completion so this should run as quickly as possible.
+  //
+  // IMPORTANT NOTE: The callback is invoked on a thread the profiler
+  // constructs, rather than on the thread used to construct the profiler and
+  // set the callback, and thus the callback must be callable on any thread. For
+  // threads with message loops that create StackSamplingProfilers, posting a
+  // task to the message loop with the moved (i.e. std::move) profiles is the
+  // thread-safe callback implementation.
+  using CompletedCallback = Callback<void(CallStackProfiles)>;
+
+  // Creates a profiler for the CURRENT thread that sends completed profiles
+  // to |callback|. An optional |test_delegate| can be supplied by tests.
+  // The caller must ensure that this object gets destroyed before the current
+  // thread exits.
+  StackSamplingProfiler(
+      const SamplingParams& params,
+      const CompletedCallback& callback,
+      NativeStackSamplerTestDelegate* test_delegate = nullptr);
+
+  // Creates a profiler for ANOTHER thread that sends completed profiles to
+  // |callback|. An optional |test_delegate| can be supplied by tests.
+  //
+  // IMPORTANT: The caller must ensure that the thread being sampled does not
+  // exit before this object gets destructed or Bad Things(tm) may occur.
+  StackSamplingProfiler(
+      PlatformThreadId thread_id,
+      const SamplingParams& params,
+      const CompletedCallback& callback,
+      NativeStackSamplerTestDelegate* test_delegate = nullptr);
+
+  // Stops any profiling currently taking place before destroying the profiler.
+  // This will block until the callback has been run if profiling has started
+  // but not already finished.
+  ~StackSamplingProfiler();
+
+  // Initializes the profiler and starts sampling. Might block on a
+  // WaitableEvent if this StackSamplingProfiler was previously started and
+  // recently stopped, while the previous profiling phase winds down.
+  void Start();
+
+  // Stops the profiler and any ongoing sampling. This method will return
+  // immediately with the callback being run asynchronously. At most one
+  // more stack sample will be taken after this method returns. Calling this
+  // function is optional; if not invoked profiling terminates when all the
+  // profiling bursts specified in the SamplingParams are completed or the
+  // profiler object is destroyed, whichever occurs first.
+  void Stop();
+
+  // Set the current system state that is recorded with each captured stack
+  // frame. This is thread-safe so can be called from anywhere. The parameter
+  // value should be from an enumeration of the appropriate type with values
+  // ranging from 0 to 31, inclusive. This sets bits within Sample field of
+  // |process_milestones|. The actual meanings of these bits are defined
+  // (globally) by the caller(s).
+  static void SetProcessMilestone(int milestone);
+
+ private:
+  friend class TestAPI;
+
+  // SamplingThread is a separate thread used to suspend and sample stacks from
+  // the target thread.
+  class SamplingThread;
+
+  // Adds annotations to a Sample.
+  static void RecordAnnotations(Sample* sample);
+
+  // This global variables holds the current system state and is recorded with
+  // every captured sample, done on a separate thread which is why updates to
+  // this must be atomic. A PostTask to move the the updates to that thread
+  // would skew the timing and a lock could result in deadlock if the thread
+  // making a change was also being profiled and got stopped.
+  static subtle::Atomic32 process_milestones_;
+
+  // The thread whose stack will be sampled.
+  PlatformThreadId thread_id_;
+
+  const SamplingParams params_;
+
+  const CompletedCallback completed_callback_;
+
+  // This starts "signaled", is reset when sampling begins, and is signaled
+  // when that sampling is complete and the callback done.
+  WaitableEvent profiling_inactive_;
+
+  // Object that does the native sampling. This is created during construction
+  // and later passed to the sampling thread when profiling is started.
+  std::unique_ptr<NativeStackSampler> native_sampler_;
+
+  // An ID uniquely identifying this profiler to the sampling thread. This
+  // will be an internal "null" value when no collection has been started.
+  int profiler_id_;
+
+  // Stored until it can be passed to the NativeStackSampler created in Start().
+  NativeStackSamplerTestDelegate* const test_delegate_;
+
+  DISALLOW_COPY_AND_ASSIGN(StackSamplingProfiler);
+};
+
+// These operators permit types to be compared and used in a map of Samples, as
+// done in tests and by the metrics provider code.
+BASE_EXPORT bool operator==(const StackSamplingProfiler::Module& a,
+                            const StackSamplingProfiler::Module& b);
+BASE_EXPORT bool operator==(const StackSamplingProfiler::Sample& a,
+                            const StackSamplingProfiler::Sample& b);
+BASE_EXPORT bool operator!=(const StackSamplingProfiler::Sample& a,
+                            const StackSamplingProfiler::Sample& b);
+BASE_EXPORT bool operator<(const StackSamplingProfiler::Sample& a,
+                           const StackSamplingProfiler::Sample& b);
+BASE_EXPORT bool operator==(const StackSamplingProfiler::Frame& a,
+                            const StackSamplingProfiler::Frame& b);
+BASE_EXPORT bool operator<(const StackSamplingProfiler::Frame& a,
+                           const StackSamplingProfiler::Frame& b);
+
+}  // namespace base
+
+#endif  // BASE_PROFILER_STACK_SAMPLING_PROFILER_H_
diff --git a/base/profiler/stack_sampling_profiler_unittest.cc b/base/profiler/stack_sampling_profiler_unittest.cc
new file mode 100644
index 0000000..8fc25c9
--- /dev/null
+++ b/base/profiler/stack_sampling_profiler_unittest.cc
@@ -0,0 +1,1506 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <algorithm>
+#include <cstdlib>
+#include <memory>
+#include <utility>
+#include <vector>
+
+#include "base/bind.h"
+#include "base/compiler_specific.h"
+#include "base/files/file_util.h"
+#include "base/macros.h"
+#include "base/memory/ptr_util.h"
+#include "base/native_library.h"
+#include "base/path_service.h"
+#include "base/profiler/native_stack_sampler.h"
+#include "base/profiler/stack_sampling_profiler.h"
+#include "base/run_loop.h"
+#include "base/scoped_native_library.h"
+#include "base/strings/stringprintf.h"
+#include "base/strings/utf_string_conversions.h"
+#include "base/synchronization/lock.h"
+#include "base/synchronization/waitable_event.h"
+#include "base/threading/platform_thread.h"
+#include "base/threading/simple_thread.h"
+#include "base/time/time.h"
+#include "build/build_config.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+#if defined(OS_WIN)
+#include <intrin.h>
+#include <malloc.h>
+#include <windows.h>
+#else
+#include <alloca.h>
+#endif
+
+// STACK_SAMPLING_PROFILER_SUPPORTED is used to conditionally enable the tests
+// below for supported platforms (currently Win x64 and Mac x64).
+#if defined(_WIN64) || (defined(OS_MACOSX) && !defined(OS_IOS))
+#define STACK_SAMPLING_PROFILER_SUPPORTED 1
+#endif
+
+#if defined(OS_WIN)
+#pragma intrinsic(_ReturnAddress)
+#endif
+
+namespace base {
+
+#if defined(STACK_SAMPLING_PROFILER_SUPPORTED)
+#define PROFILER_TEST_F(TestClass, TestName) TEST_F(TestClass, TestName)
+#else
+#define PROFILER_TEST_F(TestClass, TestName) \
+  TEST_F(TestClass, DISABLED_##TestName)
+#endif
+
+using SamplingParams = StackSamplingProfiler::SamplingParams;
+using Frame = StackSamplingProfiler::Frame;
+using Frames = std::vector<StackSamplingProfiler::Frame>;
+using Module = StackSamplingProfiler::Module;
+using Sample = StackSamplingProfiler::Sample;
+using CallStackProfile = StackSamplingProfiler::CallStackProfile;
+using CallStackProfiles = StackSamplingProfiler::CallStackProfiles;
+
+namespace {
+
+// Configuration for the frames that appear on the stack.
+struct StackConfiguration {
+  enum Config { NORMAL, WITH_ALLOCA, WITH_OTHER_LIBRARY };
+
+  explicit StackConfiguration(Config config)
+      : StackConfiguration(config, nullptr) {
+    EXPECT_NE(config, WITH_OTHER_LIBRARY);
+  }
+
+  StackConfiguration(Config config, NativeLibrary library)
+      : config(config), library(library) {
+    EXPECT_TRUE(config != WITH_OTHER_LIBRARY || library);
+  }
+
+  Config config;
+
+  // Only used if config == WITH_OTHER_LIBRARY.
+  NativeLibrary library;
+};
+
+// Signature for a target function that is expected to appear in the stack. See
+// SignalAndWaitUntilSignaled() below. The return value should be a program
+// counter pointer near the end of the function.
+using TargetFunction = const void*(*)(WaitableEvent*, WaitableEvent*,
+                                      const StackConfiguration*);
+
+// A thread to target for profiling, whose stack is guaranteed to contain
+// SignalAndWaitUntilSignaled() when coordinated with the main thread.
+class TargetThread : public PlatformThread::Delegate {
+ public:
+  explicit TargetThread(const StackConfiguration& stack_config);
+
+  // PlatformThread::Delegate:
+  void ThreadMain() override;
+
+  // Waits for the thread to have started and be executing in
+  // SignalAndWaitUntilSignaled().
+  void WaitForThreadStart();
+
+  // Allows the thread to return from SignalAndWaitUntilSignaled() and finish
+  // execution.
+  void SignalThreadToFinish();
+
+  // This function is guaranteed to be executing between calls to
+  // WaitForThreadStart() and SignalThreadToFinish() when invoked with
+  // |thread_started_event_| and |finish_event_|. Returns a program counter
+  // value near the end of the function. May be invoked with null WaitableEvents
+  // to just return the program counter.
+  //
+  // This function is static so that we can get a straightforward address
+  // for it in one of the tests below, rather than dealing with the complexity
+  // of a member function pointer representation.
+  static const void* SignalAndWaitUntilSignaled(
+      WaitableEvent* thread_started_event,
+      WaitableEvent* finish_event,
+      const StackConfiguration* stack_config);
+
+  // Calls into SignalAndWaitUntilSignaled() after allocating memory on the
+  // stack with alloca.
+  static const void* CallWithAlloca(WaitableEvent* thread_started_event,
+                                    WaitableEvent* finish_event,
+                                    const StackConfiguration* stack_config);
+
+  // Calls into SignalAndWaitUntilSignaled() via a function in
+  // base_profiler_test_support_library.
+  static const void* CallThroughOtherLibrary(
+      WaitableEvent* thread_started_event,
+      WaitableEvent* finish_event,
+      const StackConfiguration* stack_config);
+
+  PlatformThreadId id() const { return id_; }
+
+ private:
+  struct TargetFunctionArgs {
+    WaitableEvent* thread_started_event;
+    WaitableEvent* finish_event;
+    const StackConfiguration* stack_config;
+  };
+
+  // Callback function to be provided when calling through the other library.
+  static void OtherLibraryCallback(void *arg);
+
+  // Returns the current program counter, or a value very close to it.
+  static const void* GetProgramCounter();
+
+  WaitableEvent thread_started_event_;
+  WaitableEvent finish_event_;
+  PlatformThreadId id_;
+  const StackConfiguration stack_config_;
+
+  DISALLOW_COPY_AND_ASSIGN(TargetThread);
+};
+
+TargetThread::TargetThread(const StackConfiguration& stack_config)
+    : thread_started_event_(WaitableEvent::ResetPolicy::AUTOMATIC,
+                            WaitableEvent::InitialState::NOT_SIGNALED),
+      finish_event_(WaitableEvent::ResetPolicy::AUTOMATIC,
+                    WaitableEvent::InitialState::NOT_SIGNALED),
+      id_(0),
+      stack_config_(stack_config) {}
+
+void TargetThread::ThreadMain() {
+  id_ = PlatformThread::CurrentId();
+  switch (stack_config_.config) {
+    case StackConfiguration::NORMAL:
+      SignalAndWaitUntilSignaled(&thread_started_event_, &finish_event_,
+                                 &stack_config_);
+      break;
+
+    case StackConfiguration::WITH_ALLOCA:
+      CallWithAlloca(&thread_started_event_, &finish_event_, &stack_config_);
+      break;
+
+    case StackConfiguration::WITH_OTHER_LIBRARY:
+      CallThroughOtherLibrary(&thread_started_event_, &finish_event_,
+                              &stack_config_);
+      break;
+  }
+}
+
+void TargetThread::WaitForThreadStart() {
+  thread_started_event_.Wait();
+}
+
+void TargetThread::SignalThreadToFinish() {
+  finish_event_.Signal();
+}
+
+// static
+// Disable inlining for this function so that it gets its own stack frame.
+NOINLINE const void* TargetThread::SignalAndWaitUntilSignaled(
+    WaitableEvent* thread_started_event,
+    WaitableEvent* finish_event,
+    const StackConfiguration* stack_config) {
+  if (thread_started_event && finish_event) {
+    thread_started_event->Signal();
+    finish_event->Wait();
+  }
+
+  // Volatile to prevent a tail call to GetProgramCounter().
+  const void* volatile program_counter = GetProgramCounter();
+  return program_counter;
+}
+
+// static
+// Disable inlining for this function so that it gets its own stack frame.
+NOINLINE const void* TargetThread::CallWithAlloca(
+    WaitableEvent* thread_started_event,
+    WaitableEvent* finish_event,
+    const StackConfiguration* stack_config) {
+  const size_t alloca_size = 100;
+  // Memset to 0 to generate a clean failure.
+  std::memset(alloca(alloca_size), 0, alloca_size);
+
+  SignalAndWaitUntilSignaled(thread_started_event, finish_event, stack_config);
+
+  // Volatile to prevent a tail call to GetProgramCounter().
+  const void* volatile program_counter = GetProgramCounter();
+  return program_counter;
+}
+
+// static
+NOINLINE const void* TargetThread::CallThroughOtherLibrary(
+    WaitableEvent* thread_started_event,
+    WaitableEvent* finish_event,
+    const StackConfiguration* stack_config) {
+  if (stack_config) {
+    // A function whose arguments are a function accepting void*, and a void*.
+    using InvokeCallbackFunction = void(*)(void (*)(void*), void*);
+    EXPECT_TRUE(stack_config->library);
+    InvokeCallbackFunction function = reinterpret_cast<InvokeCallbackFunction>(
+        GetFunctionPointerFromNativeLibrary(stack_config->library,
+                                            "InvokeCallbackFunction"));
+    EXPECT_TRUE(function);
+
+    TargetFunctionArgs args = {
+      thread_started_event,
+      finish_event,
+      stack_config
+    };
+    (*function)(&OtherLibraryCallback, &args);
+  }
+
+  // Volatile to prevent a tail call to GetProgramCounter().
+  const void* volatile program_counter = GetProgramCounter();
+  return program_counter;
+}
+
+// static
+void TargetThread::OtherLibraryCallback(void *arg) {
+  const TargetFunctionArgs* args = static_cast<TargetFunctionArgs*>(arg);
+  SignalAndWaitUntilSignaled(args->thread_started_event, args->finish_event,
+                             args->stack_config);
+  // Prevent tail call.
+  volatile int i = 0;
+  ALLOW_UNUSED_LOCAL(i);
+}
+
+// static
+// Disable inlining for this function so that it gets its own stack frame.
+NOINLINE const void* TargetThread::GetProgramCounter() {
+#if defined(OS_WIN)
+  return _ReturnAddress();
+#else
+  return __builtin_return_address(0);
+#endif
+}
+
+// Loads the other library, which defines a function to be called in the
+// WITH_OTHER_LIBRARY configuration.
+NativeLibrary LoadOtherLibrary() {
+  // The lambda gymnastics works around the fact that we can't use ASSERT_*
+  // macros in a function returning non-null.
+  const auto load = [](NativeLibrary* library) {
+    FilePath other_library_path;
+    ASSERT_TRUE(PathService::Get(DIR_EXE, &other_library_path));
+    other_library_path = other_library_path.AppendASCII(
+        GetNativeLibraryName("base_profiler_test_support_library"));
+    NativeLibraryLoadError load_error;
+    *library = LoadNativeLibrary(other_library_path, &load_error);
+    ASSERT_TRUE(*library) << "error loading " << other_library_path.value()
+                          << ": " << load_error.ToString();
+  };
+
+  NativeLibrary library = nullptr;
+  load(&library);
+  return library;
+}
+
+// Unloads |library| and returns when it has completed unloading. Unloading a
+// library is asynchronous on Windows, so simply calling UnloadNativeLibrary()
+// is insufficient to ensure it's been unloaded.
+void SynchronousUnloadNativeLibrary(NativeLibrary library) {
+  UnloadNativeLibrary(library);
+#if defined(OS_WIN)
+  // NativeLibrary is a typedef for HMODULE, which is actually the base address
+  // of the module.
+  uintptr_t module_base_address = reinterpret_cast<uintptr_t>(library);
+  HMODULE module_handle;
+  // Keep trying to get the module handle until the call fails.
+  while (::GetModuleHandleEx(GET_MODULE_HANDLE_EX_FLAG_FROM_ADDRESS |
+                             GET_MODULE_HANDLE_EX_FLAG_UNCHANGED_REFCOUNT,
+                             reinterpret_cast<LPCTSTR>(module_base_address),
+                             &module_handle) ||
+         ::GetLastError() != ERROR_MOD_NOT_FOUND) {
+    PlatformThread::Sleep(TimeDelta::FromMilliseconds(1));
+  }
+#elif defined(OS_MACOSX)
+// Unloading a library on the Mac is synchronous.
+#else
+  NOTIMPLEMENTED();
+#endif
+}
+
+// Called on the profiler thread when complete, to collect profiles.
+void SaveProfiles(CallStackProfiles* profiles,
+                  CallStackProfiles pending_profiles) {
+  *profiles = std::move(pending_profiles);
+}
+
+// Called on the profiler thread when complete. Collects profiles produced by
+// the profiler, and signals an event to allow the main thread to know that that
+// the profiler is done.
+void SaveProfilesAndSignalEvent(CallStackProfiles* profiles,
+                                WaitableEvent* event,
+                                CallStackProfiles pending_profiles) {
+  *profiles = std::move(pending_profiles);
+  event->Signal();
+}
+
+// Executes the function with the target thread running and executing within
+// SignalAndWaitUntilSignaled(). Performs all necessary target thread startup
+// and shutdown work before and afterward.
+template <class Function>
+void WithTargetThread(Function function,
+                      const StackConfiguration& stack_config) {
+  TargetThread target_thread(stack_config);
+  PlatformThreadHandle target_thread_handle;
+  EXPECT_TRUE(PlatformThread::Create(0, &target_thread, &target_thread_handle));
+
+  target_thread.WaitForThreadStart();
+
+  function(target_thread.id());
+
+  target_thread.SignalThreadToFinish();
+
+  PlatformThread::Join(target_thread_handle);
+}
+
+template <class Function>
+void WithTargetThread(Function function) {
+  WithTargetThread(function, StackConfiguration(StackConfiguration::NORMAL));
+}
+
+struct TestProfilerInfo {
+  TestProfilerInfo(PlatformThreadId thread_id,
+                   const SamplingParams& params,
+                   NativeStackSamplerTestDelegate* delegate = nullptr)
+      : completed(WaitableEvent::ResetPolicy::MANUAL,
+                  WaitableEvent::InitialState::NOT_SIGNALED),
+        profiler(thread_id,
+                 params,
+                 Bind(&SaveProfilesAndSignalEvent,
+                      Unretained(&profiles),
+                      Unretained(&completed)),
+                 delegate) {}
+
+  // The order here is important to ensure objects being referenced don't get
+  // destructed until after the objects referencing them.
+  CallStackProfiles profiles;
+  WaitableEvent completed;
+  StackSamplingProfiler profiler;
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(TestProfilerInfo);
+};
+
+// Creates multiple profilers based on a vector of parameters.
+std::vector<std::unique_ptr<TestProfilerInfo>> CreateProfilers(
+    PlatformThreadId target_thread_id,
+    const std::vector<SamplingParams>& params) {
+  DCHECK(!params.empty());
+
+  std::vector<std::unique_ptr<TestProfilerInfo>> profilers;
+  for (size_t i = 0; i < params.size(); ++i) {
+    profilers.push_back(
+        std::make_unique<TestProfilerInfo>(target_thread_id, params[i]));
+  }
+
+  return profilers;
+}
+
+// Captures profiles as specified by |params| on the TargetThread, and returns
+// them in |profiles|. Waits up to |profiler_wait_time| for the profiler to
+// complete.
+void CaptureProfiles(const SamplingParams& params, TimeDelta profiler_wait_time,
+                     CallStackProfiles* profiles) {
+  WithTargetThread([&params, profiles,
+                    profiler_wait_time](PlatformThreadId target_thread_id) {
+    TestProfilerInfo info(target_thread_id, params);
+    info.profiler.Start();
+    info.completed.TimedWait(profiler_wait_time);
+    info.profiler.Stop();
+    info.completed.Wait();
+
+    *profiles = std::move(info.profiles);
+  });
+}
+
+// Waits for one of multiple samplings to complete.
+size_t WaitForSamplingComplete(
+    const std::vector<std::unique_ptr<TestProfilerInfo>>& infos) {
+  // Map unique_ptrs to something that WaitMany can accept.
+  std::vector<WaitableEvent*> sampling_completed_rawptrs(infos.size());
+  std::transform(infos.begin(), infos.end(), sampling_completed_rawptrs.begin(),
+                 [](const std::unique_ptr<TestProfilerInfo>& info) {
+                   return &info.get()->completed;
+                 });
+  // Wait for one profiler to finish.
+  return WaitableEvent::WaitMany(sampling_completed_rawptrs.data(),
+                                 sampling_completed_rawptrs.size());
+}
+
+// If this executable was linked with /INCREMENTAL (the default for non-official
+// debug and release builds on Windows), function addresses do not correspond to
+// function code itself, but instead to instructions in the Incremental Link
+// Table that jump to the functions. Checks for a jump instruction and if
+// present does a little decompilation to find the function's actual starting
+// address.
+const void* MaybeFixupFunctionAddressForILT(const void* function_address) {
+#if defined(_WIN64)
+  const unsigned char* opcode =
+      reinterpret_cast<const unsigned char*>(function_address);
+  if (*opcode == 0xe9) {
+    // This is a relative jump instruction. Assume we're in the ILT and compute
+    // the function start address from the instruction offset.
+    const int32_t* offset = reinterpret_cast<const int32_t*>(opcode + 1);
+    const unsigned char* next_instruction =
+        reinterpret_cast<const unsigned char*>(offset + 1);
+    return next_instruction + *offset;
+  }
+#endif
+  return function_address;
+}
+
+// Searches through the frames in |sample|, returning an iterator to the first
+// frame that has an instruction pointer within |target_function|. Returns
+// sample.end() if no such frames are found.
+Frames::const_iterator FindFirstFrameWithinFunction(
+    const Sample& sample,
+    TargetFunction target_function) {
+  uintptr_t function_start = reinterpret_cast<uintptr_t>(
+      MaybeFixupFunctionAddressForILT(reinterpret_cast<const void*>(
+          target_function)));
+  uintptr_t function_end =
+      reinterpret_cast<uintptr_t>(target_function(nullptr, nullptr, nullptr));
+  for (auto it = sample.frames.begin(); it != sample.frames.end(); ++it) {
+    if ((it->instruction_pointer >= function_start) &&
+        (it->instruction_pointer <= function_end))
+      return it;
+  }
+  return sample.frames.end();
+}
+
+// Formats a sample into a string that can be output for test diagnostics.
+std::string FormatSampleForDiagnosticOutput(
+    const Sample& sample,
+    const std::vector<Module>& modules) {
+  std::string output;
+  for (const Frame& frame : sample.frames) {
+    output += StringPrintf(
+        "0x%p %s\n", reinterpret_cast<const void*>(frame.instruction_pointer),
+        modules[frame.module_index].filename.AsUTF8Unsafe().c_str());
+  }
+  return output;
+}
+
+// Returns a duration that is longer than the test timeout. We would use
+// TimeDelta::Max() but https://crbug.com/465948.
+TimeDelta AVeryLongTimeDelta() { return TimeDelta::FromDays(1); }
+
+// Tests the scenario where the library is unloaded after copying the stack, but
+// before walking it. If |wait_until_unloaded| is true, ensures that the
+// asynchronous library loading has completed before walking the stack. If
+// false, the unloading may still be occurring during the stack walk.
+void TestLibraryUnload(bool wait_until_unloaded) {
+  // Test delegate that supports intervening between the copying of the stack
+  // and the walking of the stack.
+  class StackCopiedSignaler : public NativeStackSamplerTestDelegate {
+   public:
+    StackCopiedSignaler(WaitableEvent* stack_copied,
+                        WaitableEvent* start_stack_walk,
+                        bool wait_to_walk_stack)
+        : stack_copied_(stack_copied),
+          start_stack_walk_(start_stack_walk),
+          wait_to_walk_stack_(wait_to_walk_stack) {}
+
+    void OnPreStackWalk() override {
+      stack_copied_->Signal();
+      if (wait_to_walk_stack_)
+        start_stack_walk_->Wait();
+    }
+
+   private:
+    WaitableEvent* const stack_copied_;
+    WaitableEvent* const start_stack_walk_;
+    const bool wait_to_walk_stack_;
+  };
+
+  SamplingParams params;
+  params.sampling_interval = TimeDelta::FromMilliseconds(0);
+  params.samples_per_burst = 1;
+
+  NativeLibrary other_library = LoadOtherLibrary();
+  TargetThread target_thread(StackConfiguration(
+      StackConfiguration::WITH_OTHER_LIBRARY,
+      other_library));
+
+  PlatformThreadHandle target_thread_handle;
+  EXPECT_TRUE(PlatformThread::Create(0, &target_thread, &target_thread_handle));
+
+  target_thread.WaitForThreadStart();
+
+  WaitableEvent sampling_thread_completed(
+      WaitableEvent::ResetPolicy::MANUAL,
+      WaitableEvent::InitialState::NOT_SIGNALED);
+  std::vector<CallStackProfile> profiles;
+  const StackSamplingProfiler::CompletedCallback callback =
+      Bind(&SaveProfilesAndSignalEvent, Unretained(&profiles),
+           Unretained(&sampling_thread_completed));
+  WaitableEvent stack_copied(WaitableEvent::ResetPolicy::MANUAL,
+                             WaitableEvent::InitialState::NOT_SIGNALED);
+  WaitableEvent start_stack_walk(WaitableEvent::ResetPolicy::MANUAL,
+                                 WaitableEvent::InitialState::NOT_SIGNALED);
+  StackCopiedSignaler test_delegate(&stack_copied, &start_stack_walk,
+                                    wait_until_unloaded);
+  StackSamplingProfiler profiler(target_thread.id(), params, callback,
+                                 &test_delegate);
+
+  profiler.Start();
+
+  // Wait for the stack to be copied and the target thread to be resumed.
+  stack_copied.Wait();
+
+  // Cause the target thread to finish, so that it's no longer executing code in
+  // the library we're about to unload.
+  target_thread.SignalThreadToFinish();
+  PlatformThread::Join(target_thread_handle);
+
+  // Unload the library now that it's not being used.
+  if (wait_until_unloaded)
+    SynchronousUnloadNativeLibrary(other_library);
+  else
+    UnloadNativeLibrary(other_library);
+
+  // Let the stack walk commence after unloading the library, if we're waiting
+  // on that event.
+  start_stack_walk.Signal();
+
+  // Wait for the sampling thread to complete and fill out |profiles|.
+  sampling_thread_completed.Wait();
+
+  // Look up the sample.
+  ASSERT_EQ(1u, profiles.size());
+  const CallStackProfile& profile = profiles[0];
+  ASSERT_EQ(1u, profile.samples.size());
+  const Sample& sample = profile.samples[0];
+
+  // Check that the stack contains a frame for
+  // TargetThread::SignalAndWaitUntilSignaled().
+  Frames::const_iterator end_frame = FindFirstFrameWithinFunction(
+      sample, &TargetThread::SignalAndWaitUntilSignaled);
+  ASSERT_TRUE(end_frame != sample.frames.end())
+      << "Function at "
+      << MaybeFixupFunctionAddressForILT(reinterpret_cast<const void*>(
+             &TargetThread::SignalAndWaitUntilSignaled))
+      << " was not found in stack:\n"
+      << FormatSampleForDiagnosticOutput(sample, profile.modules);
+
+  if (wait_until_unloaded) {
+    // The stack should look like this, resulting one frame after
+    // SignalAndWaitUntilSignaled. The frame in the now-unloaded library is not
+    // recorded since we can't get module information.
+    //
+    // ... WaitableEvent and system frames ...
+    // TargetThread::SignalAndWaitUntilSignaled
+    // TargetThread::OtherLibraryCallback
+    EXPECT_EQ(2, sample.frames.end() - end_frame)
+        << "Stack:\n"
+        << FormatSampleForDiagnosticOutput(sample, profile.modules);
+  } else {
+    // We didn't wait for the asynchronous unloading to complete, so the results
+    // are non-deterministic: if the library finished unloading we should have
+    // the same stack as |wait_until_unloaded|, if not we should have the full
+    // stack. The important thing is that we should not crash.
+
+    if (sample.frames.end() - end_frame == 2) {
+      // This is the same case as |wait_until_unloaded|.
+      return;
+    }
+
+    // Check that the stack contains a frame for
+    // TargetThread::CallThroughOtherLibrary().
+    Frames::const_iterator other_library_frame = FindFirstFrameWithinFunction(
+        sample, &TargetThread::CallThroughOtherLibrary);
+    ASSERT_TRUE(other_library_frame != sample.frames.end())
+        << "Function at "
+        << MaybeFixupFunctionAddressForILT(reinterpret_cast<const void*>(
+               &TargetThread::CallThroughOtherLibrary))
+        << " was not found in stack:\n"
+        << FormatSampleForDiagnosticOutput(sample, profile.modules);
+
+    // The stack should look like this, resulting in three frames between
+    // SignalAndWaitUntilSignaled and CallThroughOtherLibrary:
+    //
+    // ... WaitableEvent and system frames ...
+    // TargetThread::SignalAndWaitUntilSignaled
+    // TargetThread::OtherLibraryCallback
+    // InvokeCallbackFunction (in other library)
+    // TargetThread::CallThroughOtherLibrary
+    EXPECT_EQ(3, other_library_frame - end_frame)
+        << "Stack:\n"
+        << FormatSampleForDiagnosticOutput(sample, profile.modules);
+  }
+}
+
+// Provide a suitable (and clean) environment for the tests below. All tests
+// must use this class to ensure that proper clean-up is done and thus be
+// usable in a later test.
+class StackSamplingProfilerTest : public testing::Test {
+ public:
+  void SetUp() override {
+    // The idle-shutdown time is too long for convenient (and accurate) testing.
+    // That behavior is checked instead by artificially triggering it through
+    // the TestAPI.
+    StackSamplingProfiler::TestAPI::DisableIdleShutdown();
+  }
+
+  void TearDown() override {
+    // Be a good citizen and clean up after ourselves. This also re-enables the
+    // idle-shutdown behavior.
+    StackSamplingProfiler::TestAPI::Reset();
+  }
+};
+
+}  // namespace
+
+// Checks that the basic expected information is present in a sampled call stack
+// profile.
+// macOS ASAN is not yet supported - crbug.com/718628.
+#if !(defined(ADDRESS_SANITIZER) && defined(OS_MACOSX))
+#define MAYBE_Basic Basic
+#else
+#define MAYBE_Basic DISABLED_Basic
+#endif
+PROFILER_TEST_F(StackSamplingProfilerTest, MAYBE_Basic) {
+  SamplingParams params;
+  params.sampling_interval = TimeDelta::FromMilliseconds(0);
+  params.samples_per_burst = 1;
+
+  std::vector<CallStackProfile> profiles;
+  CaptureProfiles(params, AVeryLongTimeDelta(), &profiles);
+
+  // Check that the profile and samples sizes are correct, and the module
+  // indices are in range.
+  ASSERT_EQ(1u, profiles.size());
+  const CallStackProfile& profile = profiles[0];
+  ASSERT_EQ(1u, profile.samples.size());
+  EXPECT_EQ(params.sampling_interval, profile.sampling_period);
+  const Sample& sample = profile.samples[0];
+  EXPECT_EQ(0u, sample.process_milestones);
+  for (const auto& frame : sample.frames) {
+    ASSERT_GE(frame.module_index, 0u);
+    ASSERT_LT(frame.module_index, profile.modules.size());
+  }
+
+  // Check that the stack contains a frame for
+  // TargetThread::SignalAndWaitUntilSignaled() and that the frame has this
+  // executable's module.
+  Frames::const_iterator loc = FindFirstFrameWithinFunction(
+      sample, &TargetThread::SignalAndWaitUntilSignaled);
+  ASSERT_TRUE(loc != sample.frames.end())
+      << "Function at "
+      << MaybeFixupFunctionAddressForILT(reinterpret_cast<const void*>(
+             &TargetThread::SignalAndWaitUntilSignaled))
+      << " was not found in stack:\n"
+      << FormatSampleForDiagnosticOutput(sample, profile.modules);
+  FilePath executable_path;
+  EXPECT_TRUE(PathService::Get(FILE_EXE, &executable_path));
+  EXPECT_EQ(executable_path,
+            MakeAbsoluteFilePath(profile.modules[loc->module_index].filename));
+}
+
+// Checks that annotations are recorded in samples.
+PROFILER_TEST_F(StackSamplingProfilerTest, Annotations) {
+  SamplingParams params;
+  params.sampling_interval = TimeDelta::FromMilliseconds(0);
+  params.samples_per_burst = 1;
+
+  // Check that a run picks up annotations.
+  StackSamplingProfiler::SetProcessMilestone(1);
+  std::vector<CallStackProfile> profiles1;
+  CaptureProfiles(params, AVeryLongTimeDelta(), &profiles1);
+  ASSERT_EQ(1u, profiles1.size());
+  const CallStackProfile& profile1 = profiles1[0];
+  ASSERT_EQ(1u, profile1.samples.size());
+  const Sample& sample1 = profile1.samples[0];
+  EXPECT_EQ(1u << 1, sample1.process_milestones);
+
+  // Run it a second time but with changed annotations. These annotations
+  // should appear in the first acquired sample.
+  StackSamplingProfiler::SetProcessMilestone(2);
+  std::vector<CallStackProfile> profiles2;
+  CaptureProfiles(params, AVeryLongTimeDelta(), &profiles2);
+  ASSERT_EQ(1u, profiles2.size());
+  const CallStackProfile& profile2 = profiles2[0];
+  ASSERT_EQ(1u, profile2.samples.size());
+  const Sample& sample2 = profile2.samples[0];
+  EXPECT_EQ(sample1.process_milestones | (1u << 2), sample2.process_milestones);
+}
+
+// Checks that the profiler handles stacks containing dynamically-allocated
+// stack memory.
+// macOS ASAN is not yet supported - crbug.com/718628.
+#if !(defined(ADDRESS_SANITIZER) && defined(OS_MACOSX))
+#define MAYBE_Alloca Alloca
+#else
+#define MAYBE_Alloca DISABLED_Alloca
+#endif
+PROFILER_TEST_F(StackSamplingProfilerTest, MAYBE_Alloca) {
+  SamplingParams params;
+  params.sampling_interval = TimeDelta::FromMilliseconds(0);
+  params.samples_per_burst = 1;
+
+  std::vector<CallStackProfile> profiles;
+  WithTargetThread(
+      [&params, &profiles](PlatformThreadId target_thread_id) {
+        WaitableEvent sampling_thread_completed(
+            WaitableEvent::ResetPolicy::MANUAL,
+            WaitableEvent::InitialState::NOT_SIGNALED);
+        const StackSamplingProfiler::CompletedCallback callback =
+            Bind(&SaveProfilesAndSignalEvent, Unretained(&profiles),
+                 Unretained(&sampling_thread_completed));
+        StackSamplingProfiler profiler(target_thread_id, params, callback);
+        profiler.Start();
+        sampling_thread_completed.Wait();
+      },
+      StackConfiguration(StackConfiguration::WITH_ALLOCA));
+
+  // Look up the sample.
+  ASSERT_EQ(1u, profiles.size());
+  const CallStackProfile& profile = profiles[0];
+  ASSERT_EQ(1u, profile.samples.size());
+  const Sample& sample = profile.samples[0];
+
+  // Check that the stack contains a frame for
+  // TargetThread::SignalAndWaitUntilSignaled().
+  Frames::const_iterator end_frame = FindFirstFrameWithinFunction(
+      sample, &TargetThread::SignalAndWaitUntilSignaled);
+  ASSERT_TRUE(end_frame != sample.frames.end())
+      << "Function at "
+      << MaybeFixupFunctionAddressForILT(reinterpret_cast<const void*>(
+             &TargetThread::SignalAndWaitUntilSignaled))
+      << " was not found in stack:\n"
+      << FormatSampleForDiagnosticOutput(sample, profile.modules);
+
+  // Check that the stack contains a frame for TargetThread::CallWithAlloca().
+  Frames::const_iterator alloca_frame =
+      FindFirstFrameWithinFunction(sample, &TargetThread::CallWithAlloca);
+  ASSERT_TRUE(alloca_frame != sample.frames.end())
+      << "Function at "
+      << MaybeFixupFunctionAddressForILT(
+             reinterpret_cast<const void*>(&TargetThread::CallWithAlloca))
+      << " was not found in stack:\n"
+      << FormatSampleForDiagnosticOutput(sample, profile.modules);
+
+  // These frames should be adjacent on the stack.
+  EXPECT_EQ(1, alloca_frame - end_frame)
+      << "Stack:\n"
+      << FormatSampleForDiagnosticOutput(sample, profile.modules);
+}
+
+// Checks that the expected number of profiles and samples are present in the
+// call stack profiles produced.
+PROFILER_TEST_F(StackSamplingProfilerTest, MultipleProfilesAndSamples) {
+  SamplingParams params;
+  params.burst_interval = params.sampling_interval =
+      TimeDelta::FromMilliseconds(0);
+  params.bursts = 2;
+  params.samples_per_burst = 3;
+
+  std::vector<CallStackProfile> profiles;
+  CaptureProfiles(params, AVeryLongTimeDelta(), &profiles);
+
+  ASSERT_EQ(2u, profiles.size());
+  EXPECT_EQ(3u, profiles[0].samples.size());
+  EXPECT_EQ(3u, profiles[1].samples.size());
+}
+
+// Checks that a profiler can stop/destruct without ever having started.
+PROFILER_TEST_F(StackSamplingProfilerTest, StopWithoutStarting) {
+  WithTargetThread([](PlatformThreadId target_thread_id) {
+    SamplingParams params;
+    params.sampling_interval = TimeDelta::FromMilliseconds(0);
+    params.samples_per_burst = 1;
+
+    CallStackProfiles profiles;
+    WaitableEvent sampling_completed(WaitableEvent::ResetPolicy::MANUAL,
+                                     WaitableEvent::InitialState::NOT_SIGNALED);
+    const StackSamplingProfiler::CompletedCallback callback =
+        Bind(&SaveProfilesAndSignalEvent, Unretained(&profiles),
+             Unretained(&sampling_completed));
+    StackSamplingProfiler profiler(target_thread_id, params, callback);
+
+    profiler.Stop();  // Constructed but never started.
+    EXPECT_FALSE(sampling_completed.IsSignaled());
+  });
+}
+
+// Checks that its okay to stop a profiler before it finishes even when the
+// sampling thread continues to run.
+PROFILER_TEST_F(StackSamplingProfilerTest, StopSafely) {
+  // Test delegate that counts samples.
+  class SampleRecordedCounter : public NativeStackSamplerTestDelegate {
+   public:
+    SampleRecordedCounter() = default;
+
+    void OnPreStackWalk() override {
+      AutoLock lock(lock_);
+      ++count_;
+    }
+
+    size_t Get() {
+      AutoLock lock(lock_);
+      return count_;
+    }
+
+   private:
+    Lock lock_;
+    size_t count_ = 0;
+  };
+
+  WithTargetThread([](PlatformThreadId target_thread_id) {
+    SamplingParams params[2];
+
+    // Providing an initial delay makes it more likely that both will be
+    // scheduled before either starts to run. Once started, samples will
+    // run ordered by their scheduled, interleaved times regardless of
+    // whatever interval the thread wakes up.
+    params[0].initial_delay = TimeDelta::FromMilliseconds(10);
+    params[0].sampling_interval = TimeDelta::FromMilliseconds(1);
+    params[0].samples_per_burst = 100000;
+
+    params[1].initial_delay = TimeDelta::FromMilliseconds(10);
+    params[1].sampling_interval = TimeDelta::FromMilliseconds(1);
+    params[1].samples_per_burst = 100000;
+
+    SampleRecordedCounter samples_recorded[arraysize(params)];
+
+    TestProfilerInfo profiler_info0(target_thread_id, params[0],
+                                    &samples_recorded[0]);
+    TestProfilerInfo profiler_info1(target_thread_id, params[1],
+                                    &samples_recorded[1]);
+
+    profiler_info0.profiler.Start();
+    profiler_info1.profiler.Start();
+
+    // Wait for both to start accumulating samples. Using a WaitableEvent is
+    // possible but gets complicated later on because there's no way of knowing
+    // if 0 or 1 additional sample will be taken after Stop() and thus no way
+    // of knowing how many Wait() calls to make on it.
+    while (samples_recorded[0].Get() == 0 || samples_recorded[1].Get() == 0)
+      PlatformThread::Sleep(TimeDelta::FromMilliseconds(1));
+
+    // Ensure that the first sampler can be safely stopped while the second
+    // continues to run. The stopped first profiler will still have a
+    // PerformCollectionTask pending that will do nothing when executed because
+    // the collection will have been removed by Stop().
+    profiler_info0.profiler.Stop();
+    profiler_info0.completed.Wait();
+    size_t count0 = samples_recorded[0].Get();
+    size_t count1 = samples_recorded[1].Get();
+
+    // Waiting for the second sampler to collect a couple samples ensures that
+    // the pending PerformCollectionTask for the first has executed because
+    // tasks are always ordered by their next scheduled time.
+    while (samples_recorded[1].Get() < count1 + 2)
+      PlatformThread::Sleep(TimeDelta::FromMilliseconds(1));
+
+    // Ensure that the first profiler didn't do anything since it was stopped.
+    EXPECT_EQ(count0, samples_recorded[0].Get());
+  });
+}
+
+// Checks that no call stack profiles are captured if the profiling is stopped
+// during the initial delay.
+PROFILER_TEST_F(StackSamplingProfilerTest, StopDuringInitialDelay) {
+  SamplingParams params;
+  params.initial_delay = TimeDelta::FromSeconds(60);
+
+  std::vector<CallStackProfile> profiles;
+  CaptureProfiles(params, TimeDelta::FromMilliseconds(0), &profiles);
+
+  EXPECT_TRUE(profiles.empty());
+}
+
+// Checks that the single completed call stack profile is captured if the
+// profiling is stopped between bursts.
+PROFILER_TEST_F(StackSamplingProfilerTest, StopDuringInterBurstInterval) {
+  SamplingParams params;
+  params.sampling_interval = TimeDelta::FromMilliseconds(0);
+  params.burst_interval = TimeDelta::FromSeconds(60);
+  params.bursts = 2;
+  params.samples_per_burst = 1;
+
+  std::vector<CallStackProfile> profiles;
+  CaptureProfiles(params, TimeDelta::FromMilliseconds(50), &profiles);
+
+  ASSERT_EQ(1u, profiles.size());
+  EXPECT_EQ(1u, profiles[0].samples.size());
+}
+
+// Checks that tasks can be stopped before completion and incomplete call stack
+// profiles are captured.
+PROFILER_TEST_F(StackSamplingProfilerTest, StopDuringInterSampleInterval) {
+  // Test delegate that counts samples.
+  class SampleRecordedEvent : public NativeStackSamplerTestDelegate {
+   public:
+    SampleRecordedEvent()
+        : sample_recorded_(WaitableEvent::ResetPolicy::MANUAL,
+                           WaitableEvent::InitialState::NOT_SIGNALED) {}
+
+    void OnPreStackWalk() override { sample_recorded_.Signal(); }
+
+    void WaitForSample() { sample_recorded_.Wait(); }
+
+   private:
+    WaitableEvent sample_recorded_;
+  };
+
+  WithTargetThread([](PlatformThreadId target_thread_id) {
+    SamplingParams params;
+
+    params.sampling_interval = AVeryLongTimeDelta();
+    params.samples_per_burst = 2;
+
+    SampleRecordedEvent samples_recorded;
+    TestProfilerInfo profiler_info(target_thread_id, params, &samples_recorded);
+
+    profiler_info.profiler.Start();
+
+    // Wait for profiler to start accumulating samples.
+    samples_recorded.WaitForSample();
+
+    // Ensure that it can stop safely.
+    profiler_info.profiler.Stop();
+    profiler_info.completed.Wait();
+
+    ASSERT_EQ(1u, profiler_info.profiles.size());
+    EXPECT_EQ(1u, profiler_info.profiles[0].samples.size());
+  });
+}
+
+// Checks that we can destroy the profiler while profiling.
+PROFILER_TEST_F(StackSamplingProfilerTest, DestroyProfilerWhileProfiling) {
+  SamplingParams params;
+  params.sampling_interval = TimeDelta::FromMilliseconds(10);
+
+  CallStackProfiles profiles;
+  WithTargetThread([&params, &profiles](PlatformThreadId target_thread_id) {
+    std::unique_ptr<StackSamplingProfiler> profiler;
+    profiler.reset(new StackSamplingProfiler(
+        target_thread_id, params, Bind(&SaveProfiles, Unretained(&profiles))));
+    profiler->Start();
+    profiler.reset();
+
+    // Wait longer than a sample interval to catch any use-after-free actions by
+    // the profiler thread.
+    PlatformThread::Sleep(TimeDelta::FromMilliseconds(50));
+  });
+}
+
+// Checks that the same profiler may be run multiple times.
+PROFILER_TEST_F(StackSamplingProfilerTest, CanRunMultipleTimes) {
+  WithTargetThread([](PlatformThreadId target_thread_id) {
+    SamplingParams params;
+    params.sampling_interval = TimeDelta::FromMilliseconds(0);
+    params.samples_per_burst = 1;
+
+    CallStackProfiles profiles;
+    WaitableEvent sampling_completed(WaitableEvent::ResetPolicy::MANUAL,
+                                     WaitableEvent::InitialState::NOT_SIGNALED);
+    const StackSamplingProfiler::CompletedCallback callback =
+        Bind(&SaveProfilesAndSignalEvent, Unretained(&profiles),
+             Unretained(&sampling_completed));
+    StackSamplingProfiler profiler(target_thread_id, params, callback);
+
+    // Just start and stop to execute code paths.
+    profiler.Start();
+    profiler.Stop();
+    sampling_completed.Wait();
+
+    // Ensure a second request will run and not block.
+    sampling_completed.Reset();
+    profiles.clear();
+    profiler.Start();
+    sampling_completed.Wait();
+    profiler.Stop();
+    ASSERT_EQ(1u, profiles.size());
+  });
+}
+
+// Checks that the different profilers may be run.
+PROFILER_TEST_F(StackSamplingProfilerTest, CanRunMultipleProfilers) {
+  SamplingParams params;
+  params.sampling_interval = TimeDelta::FromMilliseconds(0);
+  params.samples_per_burst = 1;
+
+  std::vector<CallStackProfile> profiles;
+  CaptureProfiles(params, AVeryLongTimeDelta(), &profiles);
+  ASSERT_EQ(1u, profiles.size());
+
+  profiles.clear();
+  CaptureProfiles(params, AVeryLongTimeDelta(), &profiles);
+  ASSERT_EQ(1u, profiles.size());
+}
+
+// Checks that a sampler can be started while another is running.
+PROFILER_TEST_F(StackSamplingProfilerTest, MultipleStart) {
+  WithTargetThread([](PlatformThreadId target_thread_id) {
+    std::vector<SamplingParams> params(2);
+
+    params[0].initial_delay = AVeryLongTimeDelta();
+    params[0].samples_per_burst = 1;
+
+    params[1].sampling_interval = TimeDelta::FromMilliseconds(1);
+    params[1].samples_per_burst = 1;
+
+    std::vector<std::unique_ptr<TestProfilerInfo>> profiler_infos =
+        CreateProfilers(target_thread_id, params);
+
+    profiler_infos[0]->profiler.Start();
+    profiler_infos[1]->profiler.Start();
+    profiler_infos[1]->completed.Wait();
+    EXPECT_EQ(1u, profiler_infos[1]->profiles.size());
+  });
+}
+
+// Checks that the sampling thread can shut down.
+PROFILER_TEST_F(StackSamplingProfilerTest, SamplerIdleShutdown) {
+  SamplingParams params;
+  params.sampling_interval = TimeDelta::FromMilliseconds(0);
+  params.samples_per_burst = 1;
+
+  std::vector<CallStackProfile> profiles;
+  CaptureProfiles(params, AVeryLongTimeDelta(), &profiles);
+  ASSERT_EQ(1u, profiles.size());
+
+  // Capture thread should still be running at this point.
+  ASSERT_TRUE(StackSamplingProfiler::TestAPI::IsSamplingThreadRunning());
+
+  // Initiate an "idle" shutdown and ensure it happens. Idle-shutdown was
+  // disabled by the test fixture so the test will fail due to a timeout if
+  // it does not exit.
+  StackSamplingProfiler::TestAPI::PerformSamplingThreadIdleShutdown(false);
+
+  // While the shutdown has been initiated, the actual exit of the thread still
+  // happens asynchronously. Watch until the thread actually exits. This test
+  // will time-out in the case of failure.
+  while (StackSamplingProfiler::TestAPI::IsSamplingThreadRunning())
+    PlatformThread::Sleep(base::TimeDelta::FromMilliseconds(1));
+}
+
+// Checks that additional requests will restart a stopped profiler.
+PROFILER_TEST_F(StackSamplingProfilerTest,
+                WillRestartSamplerAfterIdleShutdown) {
+  SamplingParams params;
+  params.sampling_interval = TimeDelta::FromMilliseconds(0);
+  params.samples_per_burst = 1;
+
+  std::vector<CallStackProfile> profiles;
+  CaptureProfiles(params, AVeryLongTimeDelta(), &profiles);
+  ASSERT_EQ(1u, profiles.size());
+
+  // Capture thread should still be running at this point.
+  ASSERT_TRUE(StackSamplingProfiler::TestAPI::IsSamplingThreadRunning());
+
+  // Post a ShutdownTask on the sampling thread which, when executed, will
+  // mark the thread as EXITING and begin shut down of the thread.
+  StackSamplingProfiler::TestAPI::PerformSamplingThreadIdleShutdown(false);
+
+  // Ensure another capture will start the sampling thread and run.
+  profiles.clear();
+  CaptureProfiles(params, AVeryLongTimeDelta(), &profiles);
+  ASSERT_EQ(1u, profiles.size());
+  EXPECT_TRUE(StackSamplingProfiler::TestAPI::IsSamplingThreadRunning());
+}
+
+// Checks that it's safe to stop a task after it's completed and the sampling
+// thread has shut-down for being idle.
+PROFILER_TEST_F(StackSamplingProfilerTest, StopAfterIdleShutdown) {
+  WithTargetThread([](PlatformThreadId target_thread_id) {
+    SamplingParams params;
+
+    params.sampling_interval = TimeDelta::FromMilliseconds(1);
+    params.samples_per_burst = 1;
+
+    TestProfilerInfo profiler_info(target_thread_id, params);
+
+    profiler_info.profiler.Start();
+    profiler_info.completed.Wait();
+
+    // Capture thread should still be running at this point.
+    ASSERT_TRUE(StackSamplingProfiler::TestAPI::IsSamplingThreadRunning());
+
+    // Perform an idle shutdown.
+    StackSamplingProfiler::TestAPI::PerformSamplingThreadIdleShutdown(false);
+
+    // Stop should be safe though its impossible to know at this moment if the
+    // sampling thread has completely exited or will just "stop soon".
+    profiler_info.profiler.Stop();
+  });
+}
+
+// Checks that profilers can run both before and after the sampling thread has
+// started.
+PROFILER_TEST_F(StackSamplingProfilerTest,
+                ProfileBeforeAndAfterSamplingThreadRunning) {
+  WithTargetThread([](PlatformThreadId target_thread_id) {
+    std::vector<SamplingParams> params(2);
+
+    params[0].initial_delay = AVeryLongTimeDelta();
+    params[0].sampling_interval = TimeDelta::FromMilliseconds(1);
+    params[0].samples_per_burst = 1;
+
+    params[1].initial_delay = TimeDelta::FromMilliseconds(0);
+    params[1].sampling_interval = TimeDelta::FromMilliseconds(1);
+    params[1].samples_per_burst = 1;
+
+    std::vector<std::unique_ptr<TestProfilerInfo>> profiler_infos =
+        CreateProfilers(target_thread_id, params);
+
+    // First profiler is started when there has never been a sampling thread.
+    EXPECT_FALSE(StackSamplingProfiler::TestAPI::IsSamplingThreadRunning());
+    profiler_infos[0]->profiler.Start();
+    // Second profiler is started when sampling thread is already running.
+    EXPECT_TRUE(StackSamplingProfiler::TestAPI::IsSamplingThreadRunning());
+    profiler_infos[1]->profiler.Start();
+
+    // Only the second profiler should finish before test times out.
+    size_t completed_profiler = WaitForSamplingComplete(profiler_infos);
+    EXPECT_EQ(1U, completed_profiler);
+  });
+}
+
+// Checks that an idle-shutdown task will abort if a new profiler starts
+// between when it was posted and when it runs.
+PROFILER_TEST_F(StackSamplingProfilerTest, IdleShutdownAbort) {
+  WithTargetThread([](PlatformThreadId target_thread_id) {
+    SamplingParams params;
+
+    params.sampling_interval = TimeDelta::FromMilliseconds(1);
+    params.samples_per_burst = 1;
+
+    TestProfilerInfo profiler_info(target_thread_id, params);
+
+    profiler_info.profiler.Start();
+    profiler_info.completed.Wait();
+    EXPECT_EQ(1u, profiler_info.profiles.size());
+
+    // Perform an idle shutdown but simulate that a new capture is started
+    // before it can actually run.
+    StackSamplingProfiler::TestAPI::PerformSamplingThreadIdleShutdown(true);
+
+    // Though the shutdown-task has been executed, any actual exit of the
+    // thread is asynchronous so there is no way to detect that *didn't* exit
+    // except to wait a reasonable amount of time and then check. Since the
+    // thread was just running ("perform" blocked until it was), it should
+    // finish almost immediately and without any waiting for tasks or events.
+    PlatformThread::Sleep(base::TimeDelta::FromMilliseconds(200));
+    EXPECT_TRUE(StackSamplingProfiler::TestAPI::IsSamplingThreadRunning());
+
+    // Ensure that it's still possible to run another sampler.
+    TestProfilerInfo another_info(target_thread_id, params);
+    another_info.profiler.Start();
+    another_info.completed.Wait();
+    EXPECT_EQ(1u, another_info.profiles.size());
+  });
+}
+
+// Checks that synchronized multiple sampling requests execute in parallel.
+PROFILER_TEST_F(StackSamplingProfilerTest, ConcurrentProfiling_InSync) {
+  WithTargetThread([](PlatformThreadId target_thread_id) {
+    std::vector<SamplingParams> params(2);
+
+    // Providing an initial delay makes it more likely that both will be
+    // scheduled before either starts to run. Once started, samples will
+    // run ordered by their scheduled, interleaved times regardless of
+    // whatever interval the thread wakes up. Thus, total execution time
+    // will be 10ms (delay) + 10x1ms (sampling) + 1/2 timer minimum interval.
+    params[0].initial_delay = TimeDelta::FromMilliseconds(10);
+    params[0].sampling_interval = TimeDelta::FromMilliseconds(1);
+    params[0].samples_per_burst = 9;
+
+    params[1].initial_delay = TimeDelta::FromMilliseconds(11);
+    params[1].sampling_interval = TimeDelta::FromMilliseconds(1);
+    params[1].samples_per_burst = 8;
+
+    std::vector<std::unique_ptr<TestProfilerInfo>> profiler_infos =
+        CreateProfilers(target_thread_id, params);
+
+    profiler_infos[0]->profiler.Start();
+    profiler_infos[1]->profiler.Start();
+
+    // Wait for one profiler to finish.
+    size_t completed_profiler = WaitForSamplingComplete(profiler_infos);
+    ASSERT_EQ(1u, profiler_infos[completed_profiler]->profiles.size());
+
+    size_t other_profiler = 1 - completed_profiler;
+    // Wait for the other profiler to finish.
+    profiler_infos[other_profiler]->completed.Wait();
+    ASSERT_EQ(1u, profiler_infos[other_profiler]->profiles.size());
+
+    // Ensure each got the correct number of samples.
+    EXPECT_EQ(9u, profiler_infos[0]->profiles[0].samples.size());
+    EXPECT_EQ(8u, profiler_infos[1]->profiles[0].samples.size());
+  });
+}
+
+// Checks that several mixed sampling requests execute in parallel.
+PROFILER_TEST_F(StackSamplingProfilerTest, ConcurrentProfiling_Mixed) {
+  WithTargetThread([](PlatformThreadId target_thread_id) {
+    std::vector<SamplingParams> params(3);
+
+    params[0].initial_delay = TimeDelta::FromMilliseconds(8);
+    params[0].sampling_interval = TimeDelta::FromMilliseconds(4);
+    params[0].samples_per_burst = 10;
+
+    params[1].initial_delay = TimeDelta::FromMilliseconds(9);
+    params[1].sampling_interval = TimeDelta::FromMilliseconds(3);
+    params[1].samples_per_burst = 10;
+
+    params[2].initial_delay = TimeDelta::FromMilliseconds(10);
+    params[2].sampling_interval = TimeDelta::FromMilliseconds(2);
+    params[2].samples_per_burst = 10;
+
+    std::vector<std::unique_ptr<TestProfilerInfo>> profiler_infos =
+        CreateProfilers(target_thread_id, params);
+
+    for (size_t i = 0; i < profiler_infos.size(); ++i)
+      profiler_infos[i]->profiler.Start();
+
+    // Wait for one profiler to finish.
+    size_t completed_profiler = WaitForSamplingComplete(profiler_infos);
+    EXPECT_EQ(1u, profiler_infos[completed_profiler]->profiles.size());
+    // Stop and destroy all profilers, always in the same order. Don't crash.
+    for (size_t i = 0; i < profiler_infos.size(); ++i)
+      profiler_infos[i]->profiler.Stop();
+    for (size_t i = 0; i < profiler_infos.size(); ++i)
+      profiler_infos[i].reset();
+  });
+}
+
+// Checks that a stack that runs through another library produces a stack with
+// the expected functions.
+// macOS ASAN is not yet supported - crbug.com/718628.
+#if !(defined(ADDRESS_SANITIZER) && defined(OS_MACOSX))
+#define MAYBE_OtherLibrary OtherLibrary
+#else
+#define MAYBE_OtherLibrary DISABLED_OtherLibrary
+#endif
+PROFILER_TEST_F(StackSamplingProfilerTest, MAYBE_OtherLibrary) {
+  SamplingParams params;
+  params.sampling_interval = TimeDelta::FromMilliseconds(0);
+  params.samples_per_burst = 1;
+
+  std::vector<CallStackProfile> profiles;
+  {
+    ScopedNativeLibrary other_library(LoadOtherLibrary());
+    WithTargetThread(
+        [&params, &profiles](PlatformThreadId target_thread_id) {
+          WaitableEvent sampling_thread_completed(
+              WaitableEvent::ResetPolicy::MANUAL,
+              WaitableEvent::InitialState::NOT_SIGNALED);
+          const StackSamplingProfiler::CompletedCallback callback =
+              Bind(&SaveProfilesAndSignalEvent, Unretained(&profiles),
+                   Unretained(&sampling_thread_completed));
+          StackSamplingProfiler profiler(target_thread_id, params, callback);
+          profiler.Start();
+          sampling_thread_completed.Wait();
+        },
+        StackConfiguration(StackConfiguration::WITH_OTHER_LIBRARY,
+                           other_library.get()));
+  }
+
+  // Look up the sample.
+  ASSERT_EQ(1u, profiles.size());
+  const CallStackProfile& profile = profiles[0];
+  ASSERT_EQ(1u, profile.samples.size());
+  const Sample& sample = profile.samples[0];
+
+  // Check that the stack contains a frame for
+  // TargetThread::CallThroughOtherLibrary().
+  Frames::const_iterator other_library_frame = FindFirstFrameWithinFunction(
+      sample, &TargetThread::CallThroughOtherLibrary);
+  ASSERT_TRUE(other_library_frame != sample.frames.end())
+      << "Function at "
+      << MaybeFixupFunctionAddressForILT(reinterpret_cast<const void*>(
+             &TargetThread::CallThroughOtherLibrary))
+      << " was not found in stack:\n"
+      << FormatSampleForDiagnosticOutput(sample, profile.modules);
+
+  // Check that the stack contains a frame for
+  // TargetThread::SignalAndWaitUntilSignaled().
+  Frames::const_iterator end_frame = FindFirstFrameWithinFunction(
+      sample, &TargetThread::SignalAndWaitUntilSignaled);
+  ASSERT_TRUE(end_frame != sample.frames.end())
+      << "Function at "
+      << MaybeFixupFunctionAddressForILT(reinterpret_cast<const void*>(
+             &TargetThread::SignalAndWaitUntilSignaled))
+      << " was not found in stack:\n"
+      << FormatSampleForDiagnosticOutput(sample, profile.modules);
+
+  // The stack should look like this, resulting in three frames between
+  // SignalAndWaitUntilSignaled and CallThroughOtherLibrary:
+  //
+  // ... WaitableEvent and system frames ...
+  // TargetThread::SignalAndWaitUntilSignaled
+  // TargetThread::OtherLibraryCallback
+  // InvokeCallbackFunction (in other library)
+  // TargetThread::CallThroughOtherLibrary
+  EXPECT_EQ(3, other_library_frame - end_frame)
+      << "Stack:\n" << FormatSampleForDiagnosticOutput(sample, profile.modules);
+}
+
+// Checks that a stack that runs through a library that is unloading produces a
+// stack, and doesn't crash.
+// Unloading is synchronous on the Mac, so this test is inapplicable.
+#if !defined(OS_MACOSX)
+#define MAYBE_UnloadingLibrary UnloadingLibrary
+#else
+#define MAYBE_UnloadingLibrary DISABLED_UnloadingLibrary
+#endif
+PROFILER_TEST_F(StackSamplingProfilerTest, MAYBE_UnloadingLibrary) {
+  TestLibraryUnload(false);
+}
+
+// Checks that a stack that runs through a library that has been unloaded
+// produces a stack, and doesn't crash.
+// macOS ASAN is not yet supported - crbug.com/718628.
+#if !(defined(ADDRESS_SANITIZER) && defined(OS_MACOSX))
+#define MAYBE_UnloadedLibrary UnloadedLibrary
+#else
+#define MAYBE_UnloadedLibrary DISABLED_UnloadedLibrary
+#endif
+PROFILER_TEST_F(StackSamplingProfilerTest, MAYBE_UnloadedLibrary) {
+  TestLibraryUnload(true);
+}
+
+// Checks that different threads can be sampled in parallel.
+PROFILER_TEST_F(StackSamplingProfilerTest, MultipleSampledThreads) {
+  // Create target threads. The extra parethesis around the StackConfiguration
+  // call are to avoid the most-vexing-parse problem.
+  TargetThread target_thread1((StackConfiguration(StackConfiguration::NORMAL)));
+  TargetThread target_thread2((StackConfiguration(StackConfiguration::NORMAL)));
+  PlatformThreadHandle target_thread_handle1, target_thread_handle2;
+  EXPECT_TRUE(
+      PlatformThread::Create(0, &target_thread1, &target_thread_handle1));
+  EXPECT_TRUE(
+      PlatformThread::Create(0, &target_thread2, &target_thread_handle2));
+  target_thread1.WaitForThreadStart();
+  target_thread2.WaitForThreadStart();
+
+  // Providing an initial delay makes it more likely that both will be
+  // scheduled before either starts to run. Once started, samples will
+  // run ordered by their scheduled, interleaved times regardless of
+  // whatever interval the thread wakes up.
+  SamplingParams params1, params2;
+  params1.initial_delay = TimeDelta::FromMilliseconds(10);
+  params1.sampling_interval = TimeDelta::FromMilliseconds(1);
+  params1.samples_per_burst = 9;
+  params2.initial_delay = TimeDelta::FromMilliseconds(10);
+  params2.sampling_interval = TimeDelta::FromMilliseconds(1);
+  params2.samples_per_burst = 8;
+
+  std::vector<CallStackProfile> profiles1, profiles2;
+
+  WaitableEvent sampling_thread_completed1(
+      WaitableEvent::ResetPolicy::MANUAL,
+      WaitableEvent::InitialState::NOT_SIGNALED);
+  const StackSamplingProfiler::CompletedCallback callback1 =
+      Bind(&SaveProfilesAndSignalEvent, Unretained(&profiles1),
+           Unretained(&sampling_thread_completed1));
+  StackSamplingProfiler profiler1(target_thread1.id(), params1, callback1);
+
+  WaitableEvent sampling_thread_completed2(
+      WaitableEvent::ResetPolicy::MANUAL,
+      WaitableEvent::InitialState::NOT_SIGNALED);
+  const StackSamplingProfiler::CompletedCallback callback2 =
+      Bind(&SaveProfilesAndSignalEvent, Unretained(&profiles2),
+           Unretained(&sampling_thread_completed2));
+  StackSamplingProfiler profiler2(target_thread2.id(), params2, callback2);
+
+  // Finally the real work.
+  profiler1.Start();
+  profiler2.Start();
+  sampling_thread_completed1.Wait();
+  sampling_thread_completed2.Wait();
+  ASSERT_EQ(1u, profiles1.size());
+  EXPECT_EQ(9u, profiles1[0].samples.size());
+  ASSERT_EQ(1u, profiles2.size());
+  EXPECT_EQ(8u, profiles2[0].samples.size());
+
+  target_thread1.SignalThreadToFinish();
+  target_thread2.SignalThreadToFinish();
+  PlatformThread::Join(target_thread_handle1);
+  PlatformThread::Join(target_thread_handle2);
+}
+
+// A simple thread that runs a profiler on another thread.
+class ProfilerThread : public SimpleThread {
+ public:
+  ProfilerThread(const std::string& name,
+                 PlatformThreadId thread_id,
+                 const SamplingParams& params)
+      : SimpleThread(name, Options()),
+        run_(WaitableEvent::ResetPolicy::MANUAL,
+             WaitableEvent::InitialState::NOT_SIGNALED),
+        completed_(WaitableEvent::ResetPolicy::MANUAL,
+                   WaitableEvent::InitialState::NOT_SIGNALED),
+        profiler_(thread_id,
+                  params,
+                  Bind(&SaveProfilesAndSignalEvent,
+                       Unretained(&profiles_),
+                       Unretained(&completed_))) {}
+
+  void Run() override {
+    run_.Wait();
+    profiler_.Start();
+  }
+
+  void Go() { run_.Signal(); }
+
+  void Wait() { completed_.Wait(); }
+
+  CallStackProfiles& profiles() { return profiles_; }
+
+ private:
+  WaitableEvent run_;
+
+  CallStackProfiles profiles_;
+  WaitableEvent completed_;
+  StackSamplingProfiler profiler_;
+};
+
+// Checks that different threads can run samplers in parallel.
+PROFILER_TEST_F(StackSamplingProfilerTest, MultipleProfilerThreads) {
+  WithTargetThread([](PlatformThreadId target_thread_id) {
+    // Providing an initial delay makes it more likely that both will be
+    // scheduled before either starts to run. Once started, samples will
+    // run ordered by their scheduled, interleaved times regardless of
+    // whatever interval the thread wakes up.
+    SamplingParams params1, params2;
+    params1.initial_delay = TimeDelta::FromMilliseconds(10);
+    params1.sampling_interval = TimeDelta::FromMilliseconds(1);
+    params1.samples_per_burst = 9;
+    params2.initial_delay = TimeDelta::FromMilliseconds(10);
+    params2.sampling_interval = TimeDelta::FromMilliseconds(1);
+    params2.samples_per_burst = 8;
+
+    // Start the profiler threads and give them a moment to get going.
+    ProfilerThread profiler_thread1("profiler1", target_thread_id, params1);
+    ProfilerThread profiler_thread2("profiler2", target_thread_id, params2);
+    profiler_thread1.Start();
+    profiler_thread2.Start();
+    PlatformThread::Sleep(base::TimeDelta::FromMilliseconds(10));
+
+    // This will (approximately) synchronize the two threads.
+    profiler_thread1.Go();
+    profiler_thread2.Go();
+
+    // Wait for them both to finish and validate collection.
+    profiler_thread1.Wait();
+    profiler_thread2.Wait();
+    ASSERT_EQ(1u, profiler_thread1.profiles().size());
+    EXPECT_EQ(9u, profiler_thread1.profiles()[0].samples.size());
+    ASSERT_EQ(1u, profiler_thread2.profiles().size());
+    EXPECT_EQ(8u, profiler_thread2.profiles()[0].samples.size());
+
+    profiler_thread1.Join();
+    profiler_thread2.Join();
+  });
+}
+
+}  // namespace base
diff --git a/base/profiler/test_support_library.cc b/base/profiler/test_support_library.cc
new file mode 100644
index 0000000..035f8f7
--- /dev/null
+++ b/base/profiler/test_support_library.cc
@@ -0,0 +1,30 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Note: there is intentionally no header file associated with this library so
+// we don't risk implicitly demand loading it by accessing a symbol.
+
+#if defined(WIN32)
+#define BASE_PROFILER_TEST_SUPPORT_LIBRARY_EXPORT __declspec(dllexport)
+#else  // defined(WIN32)
+#define BASE_PROFILER_TEST_SUPPORT_LIBRARY_EXPORT __attribute__((visibility("default")))
+#endif
+
+namespace base {
+
+// Must be defined in an extern "C" block so we can look up the unmangled name.
+extern "C" {
+
+BASE_PROFILER_TEST_SUPPORT_LIBRARY_EXPORT void InvokeCallbackFunction(
+    void (*function)(void*),
+    void* arg) {
+  function(arg);
+  // Prevent tail call.
+  volatile int i = 0;
+  i = 1;
+}
+
+}  // extern "C"
+
+}  // namespace base
diff --git a/base/profiler/win32_stack_frame_unwinder.cc b/base/profiler/win32_stack_frame_unwinder.cc
new file mode 100644
index 0000000..9e6ab39
--- /dev/null
+++ b/base/profiler/win32_stack_frame_unwinder.cc
@@ -0,0 +1,186 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/profiler/win32_stack_frame_unwinder.h"
+
+#include <windows.h>
+
+#include <utility>
+
+#include "base/macros.h"
+#include "base/memory/ptr_util.h"
+
+namespace base {
+
+// Win32UnwindFunctions -------------------------------------------------------
+
+const HMODULE ModuleHandleTraits::kNonNullModuleForTesting =
+    reinterpret_cast<HMODULE>(static_cast<uintptr_t>(-1));
+
+// static
+bool ModuleHandleTraits::CloseHandle(HMODULE handle) {
+  if (handle == kNonNullModuleForTesting)
+    return true;
+
+  return ::FreeLibrary(handle) != 0;
+}
+
+// static
+bool ModuleHandleTraits::IsHandleValid(HMODULE handle) {
+  return handle != nullptr;
+}
+
+// static
+HMODULE ModuleHandleTraits::NullHandle() {
+  return nullptr;
+}
+
+namespace {
+
+// Implements the UnwindFunctions interface for the corresponding Win32
+// functions.
+class Win32UnwindFunctions : public Win32StackFrameUnwinder::UnwindFunctions {
+public:
+  Win32UnwindFunctions();
+  ~Win32UnwindFunctions() override;
+
+  PRUNTIME_FUNCTION LookupFunctionEntry(DWORD64 program_counter,
+                                        PDWORD64 image_base) override;
+
+  void VirtualUnwind(DWORD64 image_base,
+                     DWORD64 program_counter,
+                     PRUNTIME_FUNCTION runtime_function,
+                     CONTEXT* context) override;
+
+  ScopedModuleHandle GetModuleForProgramCounter(
+      DWORD64 program_counter) override;
+
+private:
+  DISALLOW_COPY_AND_ASSIGN(Win32UnwindFunctions);
+};
+
+Win32UnwindFunctions::Win32UnwindFunctions() {}
+Win32UnwindFunctions::~Win32UnwindFunctions() {}
+
+PRUNTIME_FUNCTION Win32UnwindFunctions::LookupFunctionEntry(
+    DWORD64 program_counter,
+    PDWORD64 image_base) {
+#ifdef _WIN64
+  return RtlLookupFunctionEntry(program_counter, image_base, nullptr);
+#else
+  NOTREACHED();
+  return nullptr;
+#endif
+}
+
+void Win32UnwindFunctions::VirtualUnwind(DWORD64 image_base,
+                                         DWORD64 program_counter,
+                                         PRUNTIME_FUNCTION runtime_function,
+                                         CONTEXT* context) {
+#ifdef _WIN64
+  void* handler_data;
+  ULONG64 establisher_frame;
+  KNONVOLATILE_CONTEXT_POINTERS nvcontext = {};
+  RtlVirtualUnwind(UNW_FLAG_NHANDLER, image_base, program_counter,
+                   runtime_function, context, &handler_data,
+                   &establisher_frame, &nvcontext);
+#else
+  NOTREACHED();
+#endif
+}
+
+ScopedModuleHandle Win32UnwindFunctions::GetModuleForProgramCounter(
+    DWORD64 program_counter) {
+  HMODULE module_handle = nullptr;
+  // GetModuleHandleEx() increments the module reference count, which is then
+  // managed and ultimately decremented by ScopedModuleHandle.
+  if (!::GetModuleHandleEx(GET_MODULE_HANDLE_EX_FLAG_FROM_ADDRESS,
+                           reinterpret_cast<LPCTSTR>(program_counter),
+                           &module_handle)) {
+    const DWORD error = ::GetLastError();
+    DCHECK_EQ(ERROR_MOD_NOT_FOUND, static_cast<int>(error));
+  }
+  return ScopedModuleHandle(module_handle);
+}
+
+}  // namespace
+
+// Win32StackFrameUnwinder ----------------------------------------------------
+
+Win32StackFrameUnwinder::UnwindFunctions::~UnwindFunctions() {}
+Win32StackFrameUnwinder::UnwindFunctions::UnwindFunctions() {}
+
+Win32StackFrameUnwinder::Win32StackFrameUnwinder()
+    : Win32StackFrameUnwinder(WrapUnique(new Win32UnwindFunctions)) {}
+
+Win32StackFrameUnwinder::~Win32StackFrameUnwinder() {}
+
+bool Win32StackFrameUnwinder::TryUnwind(CONTEXT* context,
+                                        ScopedModuleHandle* module) {
+#ifdef _WIN64
+  ScopedModuleHandle frame_module =
+      unwind_functions_->GetModuleForProgramCounter(context->Rip);
+  if (!frame_module.IsValid()) {
+    // There's no loaded module containing the instruction pointer. This can be
+    // due to executing code that is not in a module. In particular,
+    // runtime-generated code associated with third-party injected DLLs
+    // typically is not in a module. It can also be due to the the module having
+    // been unloaded since we recorded the stack.  In the latter case the
+    // function unwind information was part of the unloaded module, so it's not
+    // possible to unwind further.
+    //
+    // If a module was found, it's still theoretically possible for the detected
+    // module module to be different than the one that was loaded when the stack
+    // was copied (i.e. if the module was unloaded and a different module loaded
+    // in overlapping memory). This likely would cause a crash, but has not been
+    // observed in practice.
+    return false;
+  }
+
+  ULONG64 image_base;
+  // Try to look up unwind metadata for the current function.
+  PRUNTIME_FUNCTION runtime_function =
+      unwind_functions_->LookupFunctionEntry(context->Rip, &image_base);
+
+  if (runtime_function) {
+    unwind_functions_->VirtualUnwind(image_base, context->Rip, runtime_function,
+                                     context);
+    at_top_frame_ = false;
+  } else {
+    if (at_top_frame_) {
+      at_top_frame_ = false;
+
+      // This is a leaf function (i.e. a function that neither calls a function,
+      // nor allocates any stack space itself) so the return address is at RSP.
+      context->Rip = *reinterpret_cast<DWORD64*>(context->Rsp);
+      context->Rsp += 8;
+    } else {
+      // In theory we shouldn't get here, as it means we've encountered a
+      // function without unwind information below the top of the stack, which
+      // is forbidden by the Microsoft x64 calling convention.
+      //
+      // The one known case in Chrome code that executes this path occurs
+      // because of BoringSSL unwind information inconsistent with the actual
+      // function code. See https://crbug.com/542919.
+      //
+      // Note that dodgy third-party generated code that otherwise would enter
+      // this path should be caught by the module check above, since the code
+      // typically is located outside of a module.
+      return false;
+    }
+  }
+
+  module->Set(frame_module.Take());
+  return true;
+#else
+  NOTREACHED();
+  return false;
+#endif
+}
+
+Win32StackFrameUnwinder::Win32StackFrameUnwinder(
+    std::unique_ptr<UnwindFunctions> unwind_functions)
+    : at_top_frame_(true), unwind_functions_(std::move(unwind_functions)) {}
+
+}  // namespace base
diff --git a/base/profiler/win32_stack_frame_unwinder.h b/base/profiler/win32_stack_frame_unwinder.h
new file mode 100644
index 0000000..c92d50c
--- /dev/null
+++ b/base/profiler/win32_stack_frame_unwinder.h
@@ -0,0 +1,102 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_PROFILER_WIN32_STACK_FRAME_UNWINDER_H_
+#define BASE_PROFILER_WIN32_STACK_FRAME_UNWINDER_H_
+
+#include <windows.h>
+
+#include <memory>
+
+#include "base/base_export.h"
+#include "base/macros.h"
+#include "base/win/scoped_handle.h"
+
+namespace base {
+
+#if !defined(_WIN64)
+// Allows code to compile for x86. Actual support for x86 will require either
+// refactoring these interfaces or separate architecture-specific interfaces.
+struct RUNTIME_FUNCTION {
+  DWORD BeginAddress;
+  DWORD EndAddress;
+};
+using PRUNTIME_FUNCTION = RUNTIME_FUNCTION*;
+#endif  // !defined(_WIN64)
+
+// Traits class to adapt GenericScopedHandle for HMODULES.
+class ModuleHandleTraits : public win::HandleTraits {
+ public:
+  using Handle = HMODULE;
+
+  static bool BASE_EXPORT CloseHandle(HMODULE handle);
+  static bool BASE_EXPORT IsHandleValid(HMODULE handle);
+  static HMODULE BASE_EXPORT NullHandle();
+
+  BASE_EXPORT static const HMODULE kNonNullModuleForTesting;
+
+ private:
+  DISALLOW_IMPLICIT_CONSTRUCTORS(ModuleHandleTraits);
+};
+
+// HMODULE is not really a handle, and has reference count semantics, so the
+// standard VerifierTraits does not apply.
+using ScopedModuleHandle =
+    win::GenericScopedHandle<ModuleHandleTraits, win::DummyVerifierTraits>;
+
+// Instances of this class are expected to be created and destroyed for each
+// stack unwinding. This class is not used while the target thread is suspended,
+// so may allocate from the default heap.
+class BASE_EXPORT Win32StackFrameUnwinder {
+ public:
+  // Interface for Win32 unwind-related functionality this class depends
+  // on. Provides a seam for testing.
+  class BASE_EXPORT UnwindFunctions {
+   public:
+    virtual ~UnwindFunctions();
+
+    virtual PRUNTIME_FUNCTION LookupFunctionEntry(DWORD64 program_counter,
+                                                  PDWORD64 image_base) = 0;
+    virtual void VirtualUnwind(DWORD64 image_base,
+                               DWORD64 program_counter,
+                               PRUNTIME_FUNCTION runtime_function,
+                               CONTEXT* context) = 0;
+
+    // Returns the module containing |program_counter|. Can return null if the
+    // module has been unloaded.
+    virtual ScopedModuleHandle GetModuleForProgramCounter(
+        DWORD64 program_counter) = 0;
+
+   protected:
+    UnwindFunctions();
+
+   private:
+    DISALLOW_COPY_AND_ASSIGN(UnwindFunctions);
+  };
+
+  Win32StackFrameUnwinder();
+  ~Win32StackFrameUnwinder();
+
+  // Attempts to unwind the frame represented by the stack and instruction
+  // pointers in |context|. If successful, updates |context| and provides the
+  // module associated with the frame in |module|.
+  bool TryUnwind(CONTEXT* context, ScopedModuleHandle* module);
+
+ private:
+  // This function is for internal and test purposes only.
+  Win32StackFrameUnwinder(std::unique_ptr<UnwindFunctions> unwind_functions);
+  friend class Win32StackFrameUnwinderTest;
+
+  // State associated with each stack unwinding.
+  bool at_top_frame_;
+  bool unwind_info_present_for_all_frames_;
+
+  std::unique_ptr<UnwindFunctions> unwind_functions_;
+
+  DISALLOW_COPY_AND_ASSIGN(Win32StackFrameUnwinder);
+};
+
+}  // namespace base
+
+#endif  // BASE_PROFILER_WIN32_STACK_FRAME_UNWINDER_H_
diff --git a/base/profiler/win32_stack_frame_unwinder_unittest.cc b/base/profiler/win32_stack_frame_unwinder_unittest.cc
new file mode 100644
index 0000000..cecfe22
--- /dev/null
+++ b/base/profiler/win32_stack_frame_unwinder_unittest.cc
@@ -0,0 +1,223 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/profiler/win32_stack_frame_unwinder.h"
+
+#include <memory>
+#include <utility>
+#include <vector>
+
+#include "base/compiler_specific.h"
+#include "base/macros.h"
+#include "base/memory/ptr_util.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+
+namespace {
+
+class TestUnwindFunctions : public Win32StackFrameUnwinder::UnwindFunctions {
+ public:
+  TestUnwindFunctions();
+
+  PRUNTIME_FUNCTION LookupFunctionEntry(DWORD64 program_counter,
+                                        PDWORD64 image_base) override;
+  void VirtualUnwind(DWORD64 image_base,
+                     DWORD64 program_counter,
+                     PRUNTIME_FUNCTION runtime_function,
+                     CONTEXT* context) override;
+  ScopedModuleHandle GetModuleForProgramCounter(
+      DWORD64 program_counter) override;
+
+  // Instructs GetModuleForProgramCounter to return null on the next call.
+  void SetUnloadedModule();
+
+  // These functions set whether the next frame will have a RUNTIME_FUNCTION.
+  void SetHasRuntimeFunction(CONTEXT* context);
+  void SetNoRuntimeFunction(CONTEXT* context);
+
+ private:
+  enum { kImageBaseIncrement = 1 << 20 };
+
+  static RUNTIME_FUNCTION* const kInvalidRuntimeFunction;
+
+  bool module_is_loaded_;
+  DWORD64 expected_program_counter_;
+  DWORD64 next_image_base_;
+  DWORD64 expected_image_base_;
+  RUNTIME_FUNCTION* next_runtime_function_;
+  std::vector<RUNTIME_FUNCTION> runtime_functions_;
+
+  DISALLOW_COPY_AND_ASSIGN(TestUnwindFunctions);
+};
+
+RUNTIME_FUNCTION* const TestUnwindFunctions::kInvalidRuntimeFunction =
+    reinterpret_cast<RUNTIME_FUNCTION*>(static_cast<uintptr_t>(-1));
+
+TestUnwindFunctions::TestUnwindFunctions()
+    : module_is_loaded_(true),
+      expected_program_counter_(0),
+      next_image_base_(kImageBaseIncrement),
+      expected_image_base_(0),
+      next_runtime_function_(kInvalidRuntimeFunction) {
+}
+
+PRUNTIME_FUNCTION TestUnwindFunctions::LookupFunctionEntry(
+    DWORD64 program_counter,
+    PDWORD64 image_base) {
+  EXPECT_EQ(expected_program_counter_, program_counter);
+  *image_base = expected_image_base_ = next_image_base_;
+  next_image_base_ += kImageBaseIncrement;
+  RUNTIME_FUNCTION* return_value = next_runtime_function_;
+  next_runtime_function_ = kInvalidRuntimeFunction;
+  return return_value;
+}
+
+void TestUnwindFunctions::VirtualUnwind(DWORD64 image_base,
+                                        DWORD64 program_counter,
+                                        PRUNTIME_FUNCTION runtime_function,
+                                        CONTEXT* context) {
+  ASSERT_NE(kInvalidRuntimeFunction, runtime_function)
+      << "expected call to SetHasRuntimeFunction() or SetNoRuntimeFunction() "
+      << "before invoking TryUnwind()";
+  EXPECT_EQ(expected_image_base_, image_base);
+  expected_image_base_ = 0;
+  EXPECT_EQ(expected_program_counter_, program_counter);
+  expected_program_counter_ = 0;
+  // This function should only be called when LookupFunctionEntry returns
+  // a RUNTIME_FUNCTION.
+  EXPECT_EQ(&runtime_functions_.back(), runtime_function);
+}
+
+ScopedModuleHandle TestUnwindFunctions::GetModuleForProgramCounter(
+    DWORD64 program_counter) {
+  bool return_non_null_value = module_is_loaded_;
+  module_is_loaded_ = true;
+  return ScopedModuleHandle(return_non_null_value ?
+                            ModuleHandleTraits::kNonNullModuleForTesting :
+                            nullptr);
+}
+
+void TestUnwindFunctions::SetUnloadedModule() {
+  module_is_loaded_ = false;
+}
+
+void TestUnwindFunctions::SetHasRuntimeFunction(CONTEXT* context) {
+  RUNTIME_FUNCTION runtime_function = {};
+  runtime_function.BeginAddress = 16;
+  runtime_function.EndAddress = runtime_function.BeginAddress + 256;
+  runtime_functions_.push_back(runtime_function);
+  next_runtime_function_ = &runtime_functions_.back();
+
+  expected_program_counter_ = context->Rip =
+      next_image_base_ + runtime_function.BeginAddress + 8;
+}
+
+void TestUnwindFunctions::SetNoRuntimeFunction(CONTEXT* context) {
+  expected_program_counter_ = context->Rip = 100;
+  next_runtime_function_ = nullptr;
+}
+
+}  // namespace
+
+class Win32StackFrameUnwinderTest : public testing::Test {
+ protected:
+  Win32StackFrameUnwinderTest() {}
+
+  // This exists so that Win32StackFrameUnwinder's constructor can be private
+  // with a single friend declaration of this test fixture.
+  std::unique_ptr<Win32StackFrameUnwinder> CreateUnwinder();
+
+  // Weak pointer to the unwind functions used by last created unwinder.
+  TestUnwindFunctions* unwind_functions_;
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(Win32StackFrameUnwinderTest);
+};
+
+std::unique_ptr<Win32StackFrameUnwinder>
+Win32StackFrameUnwinderTest::CreateUnwinder() {
+  std::unique_ptr<TestUnwindFunctions> unwind_functions(
+      new TestUnwindFunctions);
+  unwind_functions_ = unwind_functions.get();
+  return WrapUnique(
+      new Win32StackFrameUnwinder(std::move(unwind_functions)));
+}
+
+// Checks the case where all frames have unwind information.
+TEST_F(Win32StackFrameUnwinderTest, FramesWithUnwindInfo) {
+  std::unique_ptr<Win32StackFrameUnwinder> unwinder = CreateUnwinder();
+  CONTEXT context = {0};
+  ScopedModuleHandle module;
+
+  unwind_functions_->SetHasRuntimeFunction(&context);
+  EXPECT_TRUE(unwinder->TryUnwind(&context, &module));
+  EXPECT_TRUE(module.IsValid());
+
+  unwind_functions_->SetHasRuntimeFunction(&context);
+  module.Set(nullptr);
+  EXPECT_TRUE(unwinder->TryUnwind(&context, &module));
+  EXPECT_TRUE(module.IsValid());
+
+  unwind_functions_->SetHasRuntimeFunction(&context);
+  module.Set(nullptr);
+  EXPECT_TRUE(unwinder->TryUnwind(&context, &module));
+  EXPECT_TRUE(module.IsValid());
+}
+
+// Checks that an instruction pointer in an unloaded module fails to unwind.
+TEST_F(Win32StackFrameUnwinderTest, UnloadedModule) {
+  std::unique_ptr<Win32StackFrameUnwinder> unwinder = CreateUnwinder();
+  CONTEXT context = {0};
+  ScopedModuleHandle module;
+
+  unwind_functions_->SetUnloadedModule();
+  EXPECT_FALSE(unwinder->TryUnwind(&context, &module));
+}
+
+// Checks that the CONTEXT's stack pointer gets popped when the top frame has no
+// unwind information.
+TEST_F(Win32StackFrameUnwinderTest, FrameAtTopWithoutUnwindInfo) {
+  std::unique_ptr<Win32StackFrameUnwinder> unwinder = CreateUnwinder();
+  CONTEXT context = {0};
+  ScopedModuleHandle module;
+  DWORD64 next_ip = 0x0123456789abcdef;
+  DWORD64 original_rsp = reinterpret_cast<DWORD64>(&next_ip);
+  context.Rsp = original_rsp;
+
+  unwind_functions_->SetNoRuntimeFunction(&context);
+  EXPECT_TRUE(unwinder->TryUnwind(&context, &module));
+  EXPECT_EQ(next_ip, context.Rip);
+  EXPECT_EQ(original_rsp + 8, context.Rsp);
+  EXPECT_TRUE(module.IsValid());
+
+  unwind_functions_->SetHasRuntimeFunction(&context);
+  module.Set(nullptr);
+  EXPECT_TRUE(unwinder->TryUnwind(&context, &module));
+  EXPECT_TRUE(module.IsValid());
+
+  unwind_functions_->SetHasRuntimeFunction(&context);
+  module.Set(nullptr);
+  EXPECT_TRUE(unwinder->TryUnwind(&context, &module));
+  EXPECT_TRUE(module.IsValid());
+}
+
+// Checks that a frame below the top of the stack with missing unwind info
+// terminates the unwinding.
+TEST_F(Win32StackFrameUnwinderTest, FrameBelowTopWithoutUnwindInfo) {
+  {
+    // First stack, with a bad function below the top of the stack.
+    std::unique_ptr<Win32StackFrameUnwinder> unwinder = CreateUnwinder();
+    CONTEXT context = {0};
+    ScopedModuleHandle module;
+    unwind_functions_->SetHasRuntimeFunction(&context);
+    EXPECT_TRUE(unwinder->TryUnwind(&context, &module));
+    EXPECT_TRUE(module.IsValid());
+
+    unwind_functions_->SetNoRuntimeFunction(&context);
+    EXPECT_FALSE(unwinder->TryUnwind(&context, &module));
+  }
+}
+
+}  // namespace base
diff --git a/base/rand_util.cc b/base/rand_util.cc
new file mode 100644
index 0000000..5881ef2
--- /dev/null
+++ b/base/rand_util.cc
@@ -0,0 +1,82 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/rand_util.h"
+
+#include <limits.h>
+#include <math.h>
+#include <stdint.h>
+
+#include <algorithm>
+#include <limits>
+
+#include "base/logging.h"
+#include "base/strings/string_util.h"
+
+namespace base {
+
+uint64_t RandUint64() {
+  uint64_t number;
+  RandBytes(&number, sizeof(number));
+  return number;
+}
+
+int RandInt(int min, int max) {
+  DCHECK_LE(min, max);
+
+  uint64_t range = static_cast<uint64_t>(max) - min + 1;
+  // |range| is at most UINT_MAX + 1, so the result of RandGenerator(range)
+  // is at most UINT_MAX.  Hence it's safe to cast it from uint64_t to int64_t.
+  int result =
+      static_cast<int>(min + static_cast<int64_t>(base::RandGenerator(range)));
+  DCHECK_GE(result, min);
+  DCHECK_LE(result, max);
+  return result;
+}
+
+double RandDouble() {
+  return BitsToOpenEndedUnitInterval(base::RandUint64());
+}
+
+double BitsToOpenEndedUnitInterval(uint64_t bits) {
+  // We try to get maximum precision by masking out as many bits as will fit
+  // in the target type's mantissa, and raising it to an appropriate power to
+  // produce output in the range [0, 1).  For IEEE 754 doubles, the mantissa
+  // is expected to accommodate 53 bits.
+
+  static_assert(std::numeric_limits<double>::radix == 2,
+                "otherwise use scalbn");
+  static const int kBits = std::numeric_limits<double>::digits;
+  uint64_t random_bits = bits & ((UINT64_C(1) << kBits) - 1);
+  double result = ldexp(static_cast<double>(random_bits), -1 * kBits);
+  DCHECK_GE(result, 0.0);
+  DCHECK_LT(result, 1.0);
+  return result;
+}
+
+uint64_t RandGenerator(uint64_t range) {
+  DCHECK_GT(range, 0u);
+  // We must discard random results above this number, as they would
+  // make the random generator non-uniform (consider e.g. if
+  // MAX_UINT64 was 7 and |range| was 5, then a result of 1 would be twice
+  // as likely as a result of 3 or 4).
+  uint64_t max_acceptable_value =
+      (std::numeric_limits<uint64_t>::max() / range) * range - 1;
+
+  uint64_t value;
+  do {
+    value = base::RandUint64();
+  } while (value > max_acceptable_value);
+
+  return value % range;
+}
+
+std::string RandBytesAsString(size_t length) {
+  DCHECK_GT(length, 0u);
+  std::string result;
+  RandBytes(WriteInto(&result, length + 1), length);
+  return result;
+}
+
+}  // namespace base
diff --git a/base/rand_util.h b/base/rand_util.h
new file mode 100644
index 0000000..03bf46f
--- /dev/null
+++ b/base/rand_util.h
@@ -0,0 +1,78 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_RAND_UTIL_H_
+#define BASE_RAND_UTIL_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <algorithm>
+#include <string>
+
+#include "base/base_export.h"
+#include "build/build_config.h"
+
+namespace base {
+
+// Returns a random number in range [0, UINT64_MAX]. Thread-safe.
+BASE_EXPORT uint64_t RandUint64();
+
+// Returns a random number between min and max (inclusive). Thread-safe.
+BASE_EXPORT int RandInt(int min, int max);
+
+// Returns a random number in range [0, range).  Thread-safe.
+BASE_EXPORT uint64_t RandGenerator(uint64_t range);
+
+// Returns a random double in range [0, 1). Thread-safe.
+BASE_EXPORT double RandDouble();
+
+// Given input |bits|, convert with maximum precision to a double in
+// the range [0, 1). Thread-safe.
+BASE_EXPORT double BitsToOpenEndedUnitInterval(uint64_t bits);
+
+// Fills |output_length| bytes of |output| with random data. Thread-safe.
+//
+// Although implementations are required to use a cryptographically secure
+// random number source, code outside of base/ that relies on this should use
+// crypto::RandBytes instead to ensure the requirement is easily discoverable.
+BASE_EXPORT void RandBytes(void* output, size_t output_length);
+
+// Fills a string of length |length| with random data and returns it.
+// |length| should be nonzero. Thread-safe.
+//
+// Note that this is a variation of |RandBytes| with a different return type.
+// The returned string is likely not ASCII/UTF-8. Use with care.
+//
+// Although implementations are required to use a cryptographically secure
+// random number source, code outside of base/ that relies on this should use
+// crypto::RandBytes instead to ensure the requirement is easily discoverable.
+BASE_EXPORT std::string RandBytesAsString(size_t length);
+
+// An STL UniformRandomBitGenerator backed by RandUint64.
+// TODO(tzik): Consider replacing this with a faster implementation.
+class RandomBitGenerator {
+ public:
+  using result_type = uint64_t;
+  static constexpr result_type min() { return 0; }
+  static constexpr result_type max() { return UINT64_MAX; }
+  result_type operator()() const { return RandUint64(); }
+
+  RandomBitGenerator() = default;
+  ~RandomBitGenerator() = default;
+};
+
+// Shuffles [first, last) randomly. Thread-safe.
+template <typename Itr>
+void RandomShuffle(Itr first, Itr last) {
+  std::shuffle(first, last, RandomBitGenerator());
+}
+
+#if defined(OS_POSIX) && !defined(OS_FUCHSIA)
+BASE_EXPORT int GetUrandomFD();
+#endif
+
+}  // namespace base
+
+#endif  // BASE_RAND_UTIL_H_
diff --git a/base/rand_util_fuchsia.cc b/base/rand_util_fuchsia.cc
new file mode 100644
index 0000000..5f991d5
--- /dev/null
+++ b/base/rand_util_fuchsia.cc
@@ -0,0 +1,33 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/rand_util.h"
+
+#include <zircon/syscalls.h>
+
+#include <algorithm>
+
+#include "base/logging.h"
+
+namespace base {
+
+void RandBytes(void* output, size_t output_length) {
+  size_t remaining = output_length;
+  unsigned char* cur = reinterpret_cast<unsigned char*>(output);
+  while (remaining > 0) {
+    // The syscall has a maximum number of bytes that can be read at once.
+    size_t read_len =
+        std::min(remaining, static_cast<size_t>(ZX_CPRNG_DRAW_MAX_LEN));
+
+    size_t actual;
+    zx_status_t status = zx_cprng_draw(cur, read_len, &actual);
+    CHECK(status == ZX_OK && read_len == actual);
+
+    CHECK(remaining >= actual);
+    remaining -= actual;
+    cur += actual;
+  }
+}
+
+}  // namespace base
diff --git a/base/rand_util_nacl.cc b/base/rand_util_nacl.cc
new file mode 100644
index 0000000..b26b408
--- /dev/null
+++ b/base/rand_util_nacl.cc
@@ -0,0 +1,27 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/rand_util.h"
+
+#include <nacl/nacl_random.h>
+#include <stddef.h>
+#include <stdint.h>
+
+#include "base/logging.h"
+
+namespace base {
+
+void RandBytes(void* output, size_t output_length) {
+  char* output_ptr = static_cast<char*>(output);
+  while (output_length > 0) {
+    size_t nread;
+    const int error = nacl_secure_random(output_ptr, output_length, &nread);
+    CHECK_EQ(error, 0);
+    CHECK_LE(nread, output_length);
+    output_ptr += nread;
+    output_length -= nread;
+  }
+}
+
+}  // namespace base
diff --git a/base/rand_util_posix.cc b/base/rand_util_posix.cc
new file mode 100644
index 0000000..2c1653d
--- /dev/null
+++ b/base/rand_util_posix.cc
@@ -0,0 +1,63 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/rand_util.h"
+
+#include <errno.h>
+#include <fcntl.h>
+#include <stddef.h>
+#include <stdint.h>
+#include <unistd.h>
+
+#include "base/files/file_util.h"
+#include "base/lazy_instance.h"
+#include "base/logging.h"
+#include "base/posix/eintr_wrapper.h"
+
+namespace {
+
+// We keep the file descriptor for /dev/urandom around so we don't need to
+// reopen it (which is expensive), and since we may not even be able to reopen
+// it if we are later put in a sandbox. This class wraps the file descriptor so
+// we can use LazyInstance to handle opening it on the first access.
+class URandomFd {
+ public:
+#if defined(OS_AIX)
+  // AIX has no 64-bit support for open falgs such as -
+  //  O_CLOEXEC, O_NOFOLLOW and O_TTY_INIT
+  URandomFd() : fd_(HANDLE_EINTR(open("/dev/urandom", O_RDONLY))) {
+    DCHECK_GE(fd_, 0) << "Cannot open /dev/urandom: " << errno;
+  }
+#else
+  URandomFd() : fd_(HANDLE_EINTR(open("/dev/urandom", O_RDONLY | O_CLOEXEC))) {
+    DCHECK_GE(fd_, 0) << "Cannot open /dev/urandom: " << errno;
+  }
+#endif
+
+  ~URandomFd() { close(fd_); }
+
+  int fd() const { return fd_; }
+
+ private:
+  const int fd_;
+};
+
+base::LazyInstance<URandomFd>::Leaky g_urandom_fd = LAZY_INSTANCE_INITIALIZER;
+
+}  // namespace
+
+namespace base {
+
+void RandBytes(void* output, size_t output_length) {
+  const int urandom_fd = g_urandom_fd.Pointer()->fd();
+  const bool success =
+      ReadFromFD(urandom_fd, static_cast<char*>(output), output_length);
+  CHECK(success);
+}
+
+int GetUrandomFD(void) {
+  return g_urandom_fd.Pointer()->fd();
+}
+
+}  // namespace base
diff --git a/base/rand_util_unittest.cc b/base/rand_util_unittest.cc
new file mode 100644
index 0000000..11a118a
--- /dev/null
+++ b/base/rand_util_unittest.cc
@@ -0,0 +1,170 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/rand_util.h"
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <algorithm>
+#include <limits>
+#include <memory>
+
+#include "base/logging.h"
+#include "base/time/time.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace {
+
+const int kIntMin = std::numeric_limits<int>::min();
+const int kIntMax = std::numeric_limits<int>::max();
+
+}  // namespace
+
+TEST(RandUtilTest, RandInt) {
+  EXPECT_EQ(base::RandInt(0, 0), 0);
+  EXPECT_EQ(base::RandInt(kIntMin, kIntMin), kIntMin);
+  EXPECT_EQ(base::RandInt(kIntMax, kIntMax), kIntMax);
+
+  // Check that the DCHECKS in RandInt() don't fire due to internal overflow.
+  // There was a 50% chance of that happening, so calling it 40 times means
+  // the chances of this passing by accident are tiny (9e-13).
+  for (int i = 0; i < 40; ++i)
+    base::RandInt(kIntMin, kIntMax);
+}
+
+TEST(RandUtilTest, RandDouble) {
+  // Force 64-bit precision, making sure we're not in a 80-bit FPU register.
+  volatile double number = base::RandDouble();
+  EXPECT_GT(1.0, number);
+  EXPECT_LE(0.0, number);
+}
+
+TEST(RandUtilTest, RandBytes) {
+  const size_t buffer_size = 50;
+  char buffer[buffer_size];
+  memset(buffer, 0, buffer_size);
+  base::RandBytes(buffer, buffer_size);
+  std::sort(buffer, buffer + buffer_size);
+  // Probability of occurrence of less than 25 unique bytes in 50 random bytes
+  // is below 10^-25.
+  EXPECT_GT(std::unique(buffer, buffer + buffer_size) - buffer, 25);
+}
+
+// Verify that calling base::RandBytes with an empty buffer doesn't fail.
+TEST(RandUtilTest, RandBytes0) {
+  base::RandBytes(nullptr, 0);
+}
+
+TEST(RandUtilTest, RandBytesAsString) {
+  std::string random_string = base::RandBytesAsString(1);
+  EXPECT_EQ(1U, random_string.size());
+  random_string = base::RandBytesAsString(145);
+  EXPECT_EQ(145U, random_string.size());
+  char accumulator = 0;
+  for (size_t i = 0; i < random_string.size(); ++i)
+    accumulator |= random_string[i];
+  // In theory this test can fail, but it won't before the universe dies of
+  // heat death.
+  EXPECT_NE(0, accumulator);
+}
+
+// Make sure that it is still appropriate to use RandGenerator in conjunction
+// with std::random_shuffle().
+TEST(RandUtilTest, RandGeneratorForRandomShuffle) {
+  EXPECT_EQ(base::RandGenerator(1), 0U);
+  EXPECT_LE(std::numeric_limits<ptrdiff_t>::max(),
+            std::numeric_limits<int64_t>::max());
+}
+
+TEST(RandUtilTest, RandGeneratorIsUniform) {
+  // Verify that RandGenerator has a uniform distribution. This is a
+  // regression test that consistently failed when RandGenerator was
+  // implemented this way:
+  //
+  //   return base::RandUint64() % max;
+  //
+  // A degenerate case for such an implementation is e.g. a top of
+  // range that is 2/3rds of the way to MAX_UINT64, in which case the
+  // bottom half of the range would be twice as likely to occur as the
+  // top half. A bit of calculus care of jar@ shows that the largest
+  // measurable delta is when the top of the range is 3/4ths of the
+  // way, so that's what we use in the test.
+  const uint64_t kTopOfRange =
+      (std::numeric_limits<uint64_t>::max() / 4ULL) * 3ULL;
+  const uint64_t kExpectedAverage = kTopOfRange / 2ULL;
+  const uint64_t kAllowedVariance = kExpectedAverage / 50ULL;  // +/- 2%
+  const int kMinAttempts = 1000;
+  const int kMaxAttempts = 1000000;
+
+  double cumulative_average = 0.0;
+  int count = 0;
+  while (count < kMaxAttempts) {
+    uint64_t value = base::RandGenerator(kTopOfRange);
+    cumulative_average = (count * cumulative_average + value) / (count + 1);
+
+    // Don't quit too quickly for things to start converging, or we may have
+    // a false positive.
+    if (count > kMinAttempts &&
+        kExpectedAverage - kAllowedVariance < cumulative_average &&
+        cumulative_average < kExpectedAverage + kAllowedVariance) {
+      break;
+    }
+
+    ++count;
+  }
+
+  ASSERT_LT(count, kMaxAttempts) << "Expected average was " <<
+      kExpectedAverage << ", average ended at " << cumulative_average;
+}
+
+TEST(RandUtilTest, RandUint64ProducesBothValuesOfAllBits) {
+  // This tests to see that our underlying random generator is good
+  // enough, for some value of good enough.
+  uint64_t kAllZeros = 0ULL;
+  uint64_t kAllOnes = ~kAllZeros;
+  uint64_t found_ones = kAllZeros;
+  uint64_t found_zeros = kAllOnes;
+
+  for (size_t i = 0; i < 1000; ++i) {
+    uint64_t value = base::RandUint64();
+    found_ones |= value;
+    found_zeros &= value;
+
+    if (found_zeros == kAllZeros && found_ones == kAllOnes)
+      return;
+  }
+
+  FAIL() << "Didn't achieve all bit values in maximum number of tries.";
+}
+
+TEST(RandUtilTest, RandBytesLonger) {
+  // Fuchsia can only retrieve 256 bytes of entropy at a time, so make sure we
+  // handle longer requests than that.
+  std::string random_string0 = base::RandBytesAsString(255);
+  EXPECT_EQ(255u, random_string0.size());
+  std::string random_string1 = base::RandBytesAsString(1023);
+  EXPECT_EQ(1023u, random_string1.size());
+  std::string random_string2 = base::RandBytesAsString(4097);
+  EXPECT_EQ(4097u, random_string2.size());
+}
+
+// Benchmark test for RandBytes().  Disabled since it's intentionally slow and
+// does not test anything that isn't already tested by the existing RandBytes()
+// tests.
+TEST(RandUtilTest, DISABLED_RandBytesPerf) {
+  // Benchmark the performance of |kTestIterations| of RandBytes() using a
+  // buffer size of |kTestBufferSize|.
+  const int kTestIterations = 10;
+  const size_t kTestBufferSize = 1 * 1024 * 1024;
+
+  std::unique_ptr<uint8_t[]> buffer(new uint8_t[kTestBufferSize]);
+  const base::TimeTicks now = base::TimeTicks::Now();
+  for (int i = 0; i < kTestIterations; ++i)
+    base::RandBytes(buffer.get(), kTestBufferSize);
+  const base::TimeTicks end = base::TimeTicks::Now();
+
+  LOG(INFO) << "RandBytes(" << kTestBufferSize << ") took: "
+            << (end - now).InMicroseconds() << "µs";
+}
diff --git a/base/rand_util_win.cc b/base/rand_util_win.cc
new file mode 100644
index 0000000..e85c216
--- /dev/null
+++ b/base/rand_util_win.cc
@@ -0,0 +1,38 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/rand_util.h"
+
+#include <windows.h>
+#include <stddef.h>
+#include <stdint.h>
+
+// #define needed to link in RtlGenRandom(), a.k.a. SystemFunction036.  See the
+// "Community Additions" comment on MSDN here:
+// http://msdn.microsoft.com/en-us/library/windows/desktop/aa387694.aspx
+#define SystemFunction036 NTAPI SystemFunction036
+#include <NTSecAPI.h>
+#undef SystemFunction036
+
+#include <algorithm>
+#include <limits>
+
+#include "base/logging.h"
+
+namespace base {
+
+void RandBytes(void* output, size_t output_length) {
+  char* output_ptr = static_cast<char*>(output);
+  while (output_length > 0) {
+    const ULONG output_bytes_this_pass = static_cast<ULONG>(std::min(
+        output_length, static_cast<size_t>(std::numeric_limits<ULONG>::max())));
+    const bool success =
+        RtlGenRandom(output_ptr, output_bytes_this_pass) != FALSE;
+    CHECK(success);
+    output_length -= output_bytes_this_pass;
+    output_ptr += output_bytes_this_pass;
+  }
+}
+
+}  // namespace base
diff --git a/base/run_loop.cc b/base/run_loop.cc
new file mode 100644
index 0000000..3882f64
--- /dev/null
+++ b/base/run_loop.cc
@@ -0,0 +1,298 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/run_loop.h"
+
+#include "base/bind.h"
+#include "base/callback.h"
+#include "base/lazy_instance.h"
+#include "base/message_loop/message_loop.h"
+#include "base/single_thread_task_runner.h"
+#include "base/threading/thread_local.h"
+#include "base/threading/thread_task_runner_handle.h"
+#include "build/build_config.h"
+
+namespace base {
+
+namespace {
+
+LazyInstance<ThreadLocalPointer<RunLoop::Delegate>>::Leaky tls_delegate =
+    LAZY_INSTANCE_INITIALIZER;
+
+// Runs |closure| immediately if this is called on |task_runner|, otherwise
+// forwards |closure| to it.
+void ProxyToTaskRunner(scoped_refptr<SequencedTaskRunner> task_runner,
+                       OnceClosure closure) {
+  if (task_runner->RunsTasksInCurrentSequence()) {
+    std::move(closure).Run();
+    return;
+  }
+  task_runner->PostTask(FROM_HERE, std::move(closure));
+}
+
+}  // namespace
+
+RunLoop::Delegate::Delegate() {
+  // The Delegate can be created on another thread. It is only bound in
+  // RegisterDelegateForCurrentThread().
+  DETACH_FROM_THREAD(bound_thread_checker_);
+}
+
+RunLoop::Delegate::~Delegate() {
+  DCHECK_CALLED_ON_VALID_THREAD(bound_thread_checker_);
+  // A RunLoop::Delegate may be destroyed before it is bound, if so it may still
+  // be on its creation thread (e.g. a Thread that fails to start) and
+  // shouldn't disrupt that thread's state.
+  if (bound_)
+    tls_delegate.Get().Set(nullptr);
+}
+
+bool RunLoop::Delegate::ShouldQuitWhenIdle() {
+  return active_run_loops_.top()->quit_when_idle_received_;
+}
+
+// static
+void RunLoop::RegisterDelegateForCurrentThread(Delegate* delegate) {
+  // Bind |delegate| to this thread.
+  DCHECK(!delegate->bound_);
+  DCHECK_CALLED_ON_VALID_THREAD(delegate->bound_thread_checker_);
+
+  // There can only be one RunLoop::Delegate per thread.
+  DCHECK(!tls_delegate.Get().Get())
+      << "Error: Multiple RunLoop::Delegates registered on the same thread.\n\n"
+         "Hint: You perhaps instantiated a second "
+         "MessageLoop/ScopedTaskEnvironment on a thread that already had one?";
+  tls_delegate.Get().Set(delegate);
+  delegate->bound_ = true;
+}
+
+RunLoop::RunLoop(Type type)
+    : delegate_(tls_delegate.Get().Get()),
+      type_(type),
+      origin_task_runner_(ThreadTaskRunnerHandle::Get()),
+      weak_factory_(this) {
+  DCHECK(delegate_) << "A RunLoop::Delegate must be bound to this thread prior "
+                       "to using RunLoop.";
+  DCHECK(origin_task_runner_);
+}
+
+RunLoop::~RunLoop() {
+  // TODO(gab): Fix bad usage and enable this check, http://crbug.com/715235.
+  // DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
+}
+
+void RunLoop::Run() {
+  DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
+
+  if (!BeforeRun())
+    return;
+
+  // It is okay to access this RunLoop from another sequence while Run() is
+  // active as this RunLoop won't touch its state until after that returns (if
+  // the RunLoop's state is accessed while processing Run(), it will be re-bound
+  // to the accessing sequence for the remainder of that Run() -- accessing from
+  // multiple sequences is still disallowed).
+  DETACH_FROM_SEQUENCE(sequence_checker_);
+
+  DCHECK_EQ(this, delegate_->active_run_loops_.top());
+  const bool application_tasks_allowed =
+      delegate_->active_run_loops_.size() == 1U ||
+      type_ == Type::kNestableTasksAllowed;
+  delegate_->Run(application_tasks_allowed);
+
+  // Rebind this RunLoop to the current thread after Run().
+  DETACH_FROM_SEQUENCE(sequence_checker_);
+  DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
+
+  AfterRun();
+}
+
+void RunLoop::RunUntilIdle() {
+  DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
+
+  quit_when_idle_received_ = true;
+  Run();
+}
+
+void RunLoop::Quit() {
+  // Thread-safe.
+
+  // This can only be hit if run_loop->Quit() is called directly (QuitClosure()
+  // proxies through ProxyToTaskRunner() as it can only deref its WeakPtr on
+  // |origin_task_runner_|).
+  if (!origin_task_runner_->RunsTasksInCurrentSequence()) {
+    origin_task_runner_->PostTask(
+        FROM_HERE, base::BindOnce(&RunLoop::Quit, Unretained(this)));
+    return;
+  }
+
+  quit_called_ = true;
+  if (running_ && delegate_->active_run_loops_.top() == this) {
+    // This is the inner-most RunLoop, so quit now.
+    delegate_->Quit();
+  }
+}
+
+void RunLoop::QuitWhenIdle() {
+  // Thread-safe.
+
+  // This can only be hit if run_loop->QuitWhenIdle() is called directly
+  // (QuitWhenIdleClosure() proxies through ProxyToTaskRunner() as it can only
+  // deref its WeakPtr on |origin_task_runner_|).
+  if (!origin_task_runner_->RunsTasksInCurrentSequence()) {
+    origin_task_runner_->PostTask(
+        FROM_HERE, base::BindOnce(&RunLoop::QuitWhenIdle, Unretained(this)));
+    return;
+  }
+
+  quit_when_idle_received_ = true;
+}
+
+base::Closure RunLoop::QuitClosure() {
+  // TODO(gab): Fix bad usage and enable this check, http://crbug.com/715235.
+  // DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
+
+  // Need to use ProxyToTaskRunner() as WeakPtrs vended from
+  // |weak_factory_| may only be accessed on |origin_task_runner_|.
+  // TODO(gab): It feels wrong that QuitClosure() is bound to a WeakPtr.
+  return base::Bind(&ProxyToTaskRunner, origin_task_runner_,
+                    base::Bind(&RunLoop::Quit, weak_factory_.GetWeakPtr()));
+}
+
+base::Closure RunLoop::QuitWhenIdleClosure() {
+  // TODO(gab): Fix bad usage and enable this check, http://crbug.com/715235.
+  // DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
+
+  // Need to use ProxyToTaskRunner() as WeakPtrs vended from
+  // |weak_factory_| may only be accessed on |origin_task_runner_|.
+  // TODO(gab): It feels wrong that QuitWhenIdleClosure() is bound to a WeakPtr.
+  return base::Bind(
+      &ProxyToTaskRunner, origin_task_runner_,
+      base::Bind(&RunLoop::QuitWhenIdle, weak_factory_.GetWeakPtr()));
+}
+
+// static
+bool RunLoop::IsRunningOnCurrentThread() {
+  Delegate* delegate = tls_delegate.Get().Get();
+  return delegate && !delegate->active_run_loops_.empty();
+}
+
+// static
+bool RunLoop::IsNestedOnCurrentThread() {
+  Delegate* delegate = tls_delegate.Get().Get();
+  return delegate && delegate->active_run_loops_.size() > 1;
+}
+
+// static
+void RunLoop::AddNestingObserverOnCurrentThread(NestingObserver* observer) {
+  Delegate* delegate = tls_delegate.Get().Get();
+  DCHECK(delegate);
+  delegate->nesting_observers_.AddObserver(observer);
+}
+
+// static
+void RunLoop::RemoveNestingObserverOnCurrentThread(NestingObserver* observer) {
+  Delegate* delegate = tls_delegate.Get().Get();
+  DCHECK(delegate);
+  delegate->nesting_observers_.RemoveObserver(observer);
+}
+
+// static
+void RunLoop::QuitCurrentDeprecated() {
+  DCHECK(IsRunningOnCurrentThread());
+  tls_delegate.Get().Get()->active_run_loops_.top()->Quit();
+}
+
+// static
+void RunLoop::QuitCurrentWhenIdleDeprecated() {
+  DCHECK(IsRunningOnCurrentThread());
+  tls_delegate.Get().Get()->active_run_loops_.top()->QuitWhenIdle();
+}
+
+// static
+Closure RunLoop::QuitCurrentWhenIdleClosureDeprecated() {
+  return Bind(&RunLoop::QuitCurrentWhenIdleDeprecated);
+}
+
+#if DCHECK_IS_ON()
+RunLoop::ScopedDisallowRunningForTesting::ScopedDisallowRunningForTesting()
+    : current_delegate_(tls_delegate.Get().Get()),
+      previous_run_allowance_(
+          current_delegate_ ? current_delegate_->allow_running_for_testing_
+                            : false) {
+  if (current_delegate_)
+    current_delegate_->allow_running_for_testing_ = false;
+}
+
+RunLoop::ScopedDisallowRunningForTesting::~ScopedDisallowRunningForTesting() {
+  DCHECK_EQ(current_delegate_, tls_delegate.Get().Get());
+  if (current_delegate_)
+    current_delegate_->allow_running_for_testing_ = previous_run_allowance_;
+}
+#else   // DCHECK_IS_ON()
+// Defined out of line so that the compiler doesn't inline these and realize
+// the scope has no effect and then throws an "unused variable" warning in
+// non-dcheck builds.
+RunLoop::ScopedDisallowRunningForTesting::ScopedDisallowRunningForTesting() =
+    default;
+RunLoop::ScopedDisallowRunningForTesting::~ScopedDisallowRunningForTesting() =
+    default;
+#endif  // DCHECK_IS_ON()
+
+bool RunLoop::BeforeRun() {
+  DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
+
+#if DCHECK_IS_ON()
+  DCHECK(delegate_->allow_running_for_testing_)
+      << "RunLoop::Run() isn't allowed in the scope of a "
+         "ScopedDisallowRunningForTesting. Hint: if mixing "
+         "TestMockTimeTaskRunners on same thread, use TestMockTimeTaskRunner's "
+         "API instead of RunLoop to drive individual task runners.";
+  DCHECK(!run_called_);
+  run_called_ = true;
+#endif  // DCHECK_IS_ON()
+
+  // Allow Quit to be called before Run.
+  if (quit_called_)
+    return false;
+
+  auto& active_run_loops_ = delegate_->active_run_loops_;
+  active_run_loops_.push(this);
+
+  const bool is_nested = active_run_loops_.size() > 1;
+
+  if (is_nested) {
+    for (auto& observer : delegate_->nesting_observers_)
+      observer.OnBeginNestedRunLoop();
+    if (type_ == Type::kNestableTasksAllowed)
+      delegate_->EnsureWorkScheduled();
+  }
+
+  running_ = true;
+  return true;
+}
+
+void RunLoop::AfterRun() {
+  DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
+
+  running_ = false;
+
+  auto& active_run_loops_ = delegate_->active_run_loops_;
+  DCHECK_EQ(active_run_loops_.top(), this);
+  active_run_loops_.pop();
+
+  RunLoop* previous_run_loop =
+      active_run_loops_.empty() ? nullptr : active_run_loops_.top();
+
+  if (previous_run_loop) {
+    for (auto& observer : delegate_->nesting_observers_)
+      observer.OnExitNestedRunLoop();
+  }
+
+  // Execute deferred Quit, if any:
+  if (previous_run_loop && previous_run_loop->quit_called_)
+    delegate_->Quit();
+}
+
+}  // namespace base
diff --git a/base/run_loop.h b/base/run_loop.h
new file mode 100644
index 0000000..719f928
--- /dev/null
+++ b/base/run_loop.h
@@ -0,0 +1,304 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_RUN_LOOP_H_
+#define BASE_RUN_LOOP_H_
+
+#include <utility>
+#include <vector>
+
+#include "base/base_export.h"
+#include "base/callback.h"
+#include "base/containers/stack.h"
+#include "base/macros.h"
+#include "base/memory/ref_counted.h"
+#include "base/memory/weak_ptr.h"
+#include "base/observer_list.h"
+#include "base/sequence_checker.h"
+#include "base/threading/thread_checker.h"
+#include "build/build_config.h"
+
+namespace base {
+#if defined(OS_ANDROID)
+class MessagePumpForUI;
+#endif
+
+#if defined(OS_IOS)
+class MessagePumpUIApplication;
+#endif
+
+class SingleThreadTaskRunner;
+
+// Helper class to run the RunLoop::Delegate associated with the current thread.
+// A RunLoop::Delegate must have been bound to this thread (ref.
+// RunLoop::RegisterDelegateForCurrentThread()) prior to using any of RunLoop's
+// member and static methods unless explicitly indicated otherwise (e.g.
+// IsRunning/IsNestedOnCurrentThread()). RunLoop::Run can only be called once
+// per RunLoop lifetime. Create a RunLoop on the stack and call Run/Quit to run
+// a nested RunLoop but please do not use nested loops in production code!
+class BASE_EXPORT RunLoop {
+ public:
+  // The type of RunLoop: a kDefault RunLoop at the top-level (non-nested) will
+  // process system and application tasks assigned to its Delegate. When nested
+  // however a kDefault RunLoop will only process system tasks while a
+  // kNestableTasksAllowed RunLoop will continue to process application tasks
+  // even if nested.
+  //
+  // This is relevant in the case of recursive RunLoops. Some unwanted run loops
+  // may occur when using common controls or printer functions. By default,
+  // recursive task processing is disabled.
+  //
+  // In general, nestable RunLoops are to be avoided. They are dangerous and
+  // difficult to get right, so please use with extreme caution.
+  //
+  // A specific example where this makes a difference is:
+  // - The thread is running a RunLoop.
+  // - It receives a task #1 and executes it.
+  // - The task #1 implicitly starts a RunLoop, like a MessageBox in the unit
+  //   test. This can also be StartDoc or GetSaveFileName.
+  // - The thread receives a task #2 before or while in this second RunLoop.
+  // - With a kNestableTasksAllowed RunLoop, the task #2 will run right away.
+  //   Otherwise, it will get executed right after task #1 completes in the main
+  //   RunLoop.
+  enum class Type {
+    kDefault,
+    kNestableTasksAllowed,
+  };
+
+  RunLoop(Type type = Type::kDefault);
+  ~RunLoop();
+
+  // Run the current RunLoop::Delegate. This blocks until Quit is called. Before
+  // calling Run, be sure to grab the QuitClosure in order to stop the
+  // RunLoop::Delegate asynchronously.
+  void Run();
+
+  // Run the current RunLoop::Delegate until it doesn't find any tasks or
+  // messages in its queue (it goes idle). WARNING: This may never return! Only
+  // use this when repeating tasks such as animated web pages have been shut
+  // down.
+  void RunUntilIdle();
+
+  bool running() const {
+    DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
+    return running_;
+  }
+
+  // Quit() quits an earlier call to Run() immediately. QuitWhenIdle() quits an
+  // earlier call to Run() when there aren't any tasks or messages in the queue.
+  //
+  // These methods are thread-safe but note that Quit() is best-effort when
+  // called from another thread (will quit soon but tasks that were already
+  // queued on this RunLoop will get to run first).
+  //
+  // There can be other nested RunLoops servicing the same task queue. Quitting
+  // one RunLoop has no bearing on the others. Quit() and QuitWhenIdle() can be
+  // called before, during or after Run(). If called before Run(), Run() will
+  // return immediately when called. Calling Quit() or QuitWhenIdle() after the
+  // RunLoop has already finished running has no effect.
+  //
+  // WARNING: You must NEVER assume that a call to Quit() or QuitWhenIdle() will
+  // terminate the targetted message loop. If a nested RunLoop continues
+  // running, the target may NEVER terminate. It is very easy to livelock (run
+  // forever) in such a case.
+  void Quit();
+  void QuitWhenIdle();
+
+  // Convenience methods to get a closure that safely calls Quit() or
+  // QuitWhenIdle() (has no effect if the RunLoop instance is gone).
+  //
+  // The resulting Closure is thread-safe (note however that invoking the
+  // QuitClosure() from another thread than this RunLoop's will result in an
+  // asynchronous rather than immediate Quit()).
+  //
+  // Example:
+  //   RunLoop run_loop;
+  //   PostTask(run_loop.QuitClosure());
+  //   run_loop.Run();
+  base::Closure QuitClosure();
+  base::Closure QuitWhenIdleClosure();
+
+  // Returns true if there is an active RunLoop on this thread.
+  // Safe to call before RegisterDelegateForCurrentThread().
+  static bool IsRunningOnCurrentThread();
+
+  // Returns true if there is an active RunLoop on this thread and it's nested
+  // within another active RunLoop.
+  // Safe to call before RegisterDelegateForCurrentThread().
+  static bool IsNestedOnCurrentThread();
+
+  // A NestingObserver is notified when a nested RunLoop begins and ends.
+  class BASE_EXPORT NestingObserver {
+   public:
+    // Notified before a nested loop starts running work on the current thread.
+    virtual void OnBeginNestedRunLoop() = 0;
+    // Notified after a nested loop is done running work on the current thread.
+    virtual void OnExitNestedRunLoop() {}
+
+   protected:
+    virtual ~NestingObserver() = default;
+  };
+
+  static void AddNestingObserverOnCurrentThread(NestingObserver* observer);
+  static void RemoveNestingObserverOnCurrentThread(NestingObserver* observer);
+
+  // A RunLoop::Delegate is a generic interface that allows RunLoop to be
+  // separate from the underlying implementation of the message loop for this
+  // thread. It holds private state used by RunLoops on its associated thread.
+  // One and only one RunLoop::Delegate must be registered on a given thread
+  // via RunLoop::RegisterDelegateForCurrentThread() before RunLoop instances
+  // and RunLoop static methods can be used on it.
+  class BASE_EXPORT Delegate {
+   public:
+    Delegate();
+    virtual ~Delegate();
+
+    // Used by RunLoop to inform its Delegate to Run/Quit. Implementations are
+    // expected to keep on running synchronously from the Run() call until the
+    // eventual matching Quit() call. Upon receiving a Quit() call it should
+    // return from the Run() call as soon as possible without executing
+    // remaining tasks/messages. Run() calls can nest in which case each Quit()
+    // call should result in the topmost active Run() call returning. The only
+    // other trigger for Run() to return is the
+    // |should_quit_when_idle_callback_| which the Delegate should probe before
+    // sleeping when it becomes idle. |application_tasks_allowed| is true if
+    // this is the first Run() call on the stack or it was made from a nested
+    // RunLoop of Type::kNestableTasksAllowed (otherwise this Run() level should
+    // only process system tasks).
+    virtual void Run(bool application_tasks_allowed) = 0;
+    virtual void Quit() = 0;
+
+    // Invoked right before a RunLoop enters a nested Run() call on this
+    // Delegate iff this RunLoop is of type kNestableTasksAllowed. The Delegate
+    // should ensure that the upcoming Run() call will result in processing
+    // application tasks queued ahead of it without further probing. e.g.
+    // message pumps on some platforms, like Mac, need an explicit request to
+    // process application tasks when nested, otherwise they'll only wait for
+    // system messages.
+    virtual void EnsureWorkScheduled() = 0;
+
+   protected:
+    // Returns the result of this Delegate's |should_quit_when_idle_callback_|.
+    // "protected" so it can be invoked only by the Delegate itself.
+    bool ShouldQuitWhenIdle();
+
+   private:
+    // While the state is owned by the Delegate subclass, only RunLoop can use
+    // it.
+    friend class RunLoop;
+
+    // A vector-based stack is more memory efficient than the default
+    // deque-based stack as the active RunLoop stack isn't expected to ever
+    // have more than a few entries.
+    using RunLoopStack = base::stack<RunLoop*, std::vector<RunLoop*>>;
+
+    RunLoopStack active_run_loops_;
+    ObserverList<RunLoop::NestingObserver> nesting_observers_;
+
+#if DCHECK_IS_ON()
+    bool allow_running_for_testing_ = true;
+#endif
+
+    // True once this Delegate is bound to a thread via
+    // RegisterDelegateForCurrentThread().
+    bool bound_ = false;
+
+    // Thread-affine per its use of TLS.
+    THREAD_CHECKER(bound_thread_checker_);
+
+    DISALLOW_COPY_AND_ASSIGN(Delegate);
+  };
+
+  // Registers |delegate| on the current thread. Must be called once and only
+  // once per thread before using RunLoop methods on it. |delegate| is from then
+  // on forever bound to that thread (including its destruction).
+  static void RegisterDelegateForCurrentThread(Delegate* delegate);
+
+  // Quits the active RunLoop (when idle) -- there must be one. These were
+  // introduced as prefered temporary replacements to the long deprecated
+  // MessageLoop::Quit(WhenIdle)(Closure) methods. Callers should properly plumb
+  // a reference to the appropriate RunLoop instance (or its QuitClosure)
+  // instead of using these in order to link Run()/Quit() to a single RunLoop
+  // instance and increase readability.
+  static void QuitCurrentDeprecated();
+  static void QuitCurrentWhenIdleDeprecated();
+  static Closure QuitCurrentWhenIdleClosureDeprecated();
+
+  // Run() will DCHECK if called while there's a ScopedDisallowRunningForTesting
+  // in scope on its thread. This is useful to add safety to some test
+  // constructs which allow multiple task runners to share the main thread in
+  // unit tests. While the main thread can be shared by multiple runners to
+  // deterministically fake multi threading, there can still only be a single
+  // RunLoop::Delegate per thread and RunLoop::Run() should only be invoked from
+  // it (or it would result in incorrectly driving TaskRunner A while in
+  // TaskRunner B's context).
+  class BASE_EXPORT ScopedDisallowRunningForTesting {
+   public:
+    ScopedDisallowRunningForTesting();
+    ~ScopedDisallowRunningForTesting();
+
+   private:
+#if DCHECK_IS_ON()
+    Delegate* current_delegate_;
+    const bool previous_run_allowance_;
+#endif  // DCHECK_IS_ON()
+
+    DISALLOW_COPY_AND_ASSIGN(ScopedDisallowRunningForTesting);
+  };
+
+ private:
+#if defined(OS_ANDROID)
+  // Android doesn't support the blocking RunLoop::Run, so it calls
+  // BeforeRun and AfterRun directly.
+  friend class base::MessagePumpForUI;
+#endif
+
+#if defined(OS_IOS)
+  // iOS doesn't support the blocking RunLoop::Run, so it calls
+  // BeforeRun directly.
+  friend class base::MessagePumpUIApplication;
+#endif
+
+  // Return false to abort the Run.
+  bool BeforeRun();
+  void AfterRun();
+
+  // A copy of RunLoop::Delegate for the thread driven by tis RunLoop for quick
+  // access without using TLS (also allows access to state from another sequence
+  // during Run(), ref. |sequence_checker_| below).
+  Delegate* delegate_;
+
+  const Type type_;
+
+#if DCHECK_IS_ON()
+  bool run_called_ = false;
+#endif
+
+  bool quit_called_ = false;
+  bool running_ = false;
+  // Used to record that QuitWhenIdle() was called on this RunLoop, meaning that
+  // the Delegate should quit Run() once it becomes idle (it's responsible for
+  // probing this state via ShouldQuitWhenIdle()). This state is stored here
+  // rather than pushed to Delegate to support nested RunLoops.
+  bool quit_when_idle_received_ = false;
+
+  // RunLoop is not thread-safe. Its state/methods, unless marked as such, may
+  // not be accessed from any other sequence than the thread it was constructed
+  // on. Exception: RunLoop can be safely accessed from one other sequence (or
+  // single parallel task) during Run() -- e.g. to Quit() without having to
+  // plumb ThreatTaskRunnerHandle::Get() throughout a test to repost QuitClosure
+  // to origin thread.
+  SEQUENCE_CHECKER(sequence_checker_);
+
+  const scoped_refptr<SingleThreadTaskRunner> origin_task_runner_;
+
+  // WeakPtrFactory for QuitClosure safety.
+  base::WeakPtrFactory<RunLoop> weak_factory_;
+
+  DISALLOW_COPY_AND_ASSIGN(RunLoop);
+};
+
+}  // namespace base
+
+#endif  // BASE_RUN_LOOP_H_
diff --git a/base/run_loop_unittest.cc b/base/run_loop_unittest.cc
new file mode 100644
index 0000000..c7db14a
--- /dev/null
+++ b/base/run_loop_unittest.cc
@@ -0,0 +1,636 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/run_loop.h"
+
+#include <utility>
+
+#include "base/bind.h"
+#include "base/bind_helpers.h"
+#include "base/containers/queue.h"
+#include "base/location.h"
+#include "base/macros.h"
+#include "base/memory/ptr_util.h"
+#include "base/memory/ref_counted.h"
+#include "base/single_thread_task_runner.h"
+#include "base/synchronization/lock.h"
+#include "base/synchronization/waitable_event.h"
+#include "base/test/gtest_util.h"
+#include "base/test/scoped_task_environment.h"
+#include "base/test/test_timeouts.h"
+#include "base/threading/platform_thread.h"
+#include "base/threading/thread.h"
+#include "base/threading/thread_checker_impl.h"
+#include "base/threading/thread_task_runner_handle.h"
+#include "build/build_config.h"
+#include "testing/gmock/include/gmock/gmock.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+
+namespace {
+
+void QuitWhenIdleTask(RunLoop* run_loop, int* counter) {
+  run_loop->QuitWhenIdle();
+  ++(*counter);
+}
+
+void ShouldRunTask(int* counter) {
+  ++(*counter);
+}
+
+void ShouldNotRunTask() {
+  ADD_FAILURE() << "Ran a task that shouldn't run.";
+}
+
+void RunNestedLoopTask(int* counter) {
+  RunLoop nested_run_loop(RunLoop::Type::kNestableTasksAllowed);
+
+  // This task should quit |nested_run_loop| but not the main RunLoop.
+  ThreadTaskRunnerHandle::Get()->PostTask(
+      FROM_HERE, BindOnce(&QuitWhenIdleTask, Unretained(&nested_run_loop),
+                          Unretained(counter)));
+
+  ThreadTaskRunnerHandle::Get()->PostDelayedTask(
+      FROM_HERE, BindOnce(&ShouldNotRunTask), TimeDelta::FromDays(1));
+
+  nested_run_loop.Run();
+
+  ++(*counter);
+}
+
+// A simple SingleThreadTaskRunner that just queues undelayed tasks (and ignores
+// delayed tasks). Tasks can then be processed one by one by ProcessTask() which
+// will return true if it processed a task and false otherwise.
+class SimpleSingleThreadTaskRunner : public SingleThreadTaskRunner {
+ public:
+  SimpleSingleThreadTaskRunner() = default;
+
+  bool PostDelayedTask(const Location& from_here,
+                       OnceClosure task,
+                       base::TimeDelta delay) override {
+    if (delay > base::TimeDelta())
+      return false;
+    AutoLock auto_lock(tasks_lock_);
+    pending_tasks_.push(std::move(task));
+    return true;
+  }
+
+  bool PostNonNestableDelayedTask(const Location& from_here,
+                                  OnceClosure task,
+                                  base::TimeDelta delay) override {
+    return PostDelayedTask(from_here, std::move(task), delay);
+  }
+
+  bool RunsTasksInCurrentSequence() const override {
+    return origin_thread_checker_.CalledOnValidThread();
+  }
+
+  bool ProcessSingleTask() {
+    OnceClosure task;
+    {
+      AutoLock auto_lock(tasks_lock_);
+      if (pending_tasks_.empty())
+        return false;
+      task = std::move(pending_tasks_.front());
+      pending_tasks_.pop();
+    }
+    // It's important to Run() after pop() and outside the lock as |task| may
+    // run a nested loop which will re-enter ProcessSingleTask().
+    std::move(task).Run();
+    return true;
+  }
+
+ private:
+  ~SimpleSingleThreadTaskRunner() override = default;
+
+  Lock tasks_lock_;
+  base::queue<OnceClosure> pending_tasks_;
+
+  // RunLoop relies on RunsTasksInCurrentSequence() signal. Use a
+  // ThreadCheckerImpl to be able to reliably provide that signal even in
+  // non-dcheck builds.
+  ThreadCheckerImpl origin_thread_checker_;
+
+  DISALLOW_COPY_AND_ASSIGN(SimpleSingleThreadTaskRunner);
+};
+
+// The basis of all TestDelegates, allows safely injecting a OnceClosure to be
+// run in the next idle phase of this delegate's Run() implementation. This can
+// be used to have code run on a thread that is otherwise livelocked in an idle
+// phase (sometimes a simple PostTask() won't do it -- e.g. when processing
+// application tasks is disallowed).
+class InjectableTestDelegate : public RunLoop::Delegate {
+ public:
+  void InjectClosureOnDelegate(OnceClosure closure) {
+    AutoLock auto_lock(closure_lock_);
+    closure_ = std::move(closure);
+  }
+
+  bool RunInjectedClosure() {
+    AutoLock auto_lock(closure_lock_);
+    if (closure_.is_null())
+      return false;
+    std::move(closure_).Run();
+    return true;
+  }
+
+ private:
+  Lock closure_lock_;
+  OnceClosure closure_;
+};
+
+// A simple test RunLoop::Delegate to exercise Runloop logic independent of any
+// other base constructs. BindToCurrentThread() must be called before this
+// TestBoundDelegate is operational.
+class TestBoundDelegate final : public InjectableTestDelegate {
+ public:
+  TestBoundDelegate() = default;
+
+  // Makes this TestBoundDelegate become the RunLoop::Delegate and
+  // ThreadTaskRunnerHandle for this thread.
+  void BindToCurrentThread() {
+    thread_task_runner_handle_ =
+        std::make_unique<ThreadTaskRunnerHandle>(simple_task_runner_);
+    RunLoop::RegisterDelegateForCurrentThread(this);
+  }
+
+ private:
+  void Run(bool application_tasks_allowed) override {
+    if (nested_run_allowing_tasks_incoming_) {
+      EXPECT_TRUE(RunLoop::IsNestedOnCurrentThread());
+      EXPECT_TRUE(application_tasks_allowed);
+    } else if (RunLoop::IsNestedOnCurrentThread()) {
+      EXPECT_FALSE(application_tasks_allowed);
+    }
+    nested_run_allowing_tasks_incoming_ = false;
+
+    while (!should_quit_) {
+      if (application_tasks_allowed && simple_task_runner_->ProcessSingleTask())
+        continue;
+
+      if (ShouldQuitWhenIdle())
+        break;
+
+      if (RunInjectedClosure())
+        continue;
+
+      PlatformThread::YieldCurrentThread();
+    }
+    should_quit_ = false;
+  }
+
+  void Quit() override { should_quit_ = true; }
+
+  void EnsureWorkScheduled() override {
+    nested_run_allowing_tasks_incoming_ = true;
+  }
+
+  // True if the next invocation of Run() is expected to be from a
+  // kNestableTasksAllowed RunLoop.
+  bool nested_run_allowing_tasks_incoming_ = false;
+
+  scoped_refptr<SimpleSingleThreadTaskRunner> simple_task_runner_ =
+      MakeRefCounted<SimpleSingleThreadTaskRunner>();
+
+  std::unique_ptr<ThreadTaskRunnerHandle> thread_task_runner_handle_;
+
+  bool should_quit_ = false;
+};
+
+enum class RunLoopTestType {
+  // Runs all RunLoopTests under a ScopedTaskEnvironment to make sure real world
+  // scenarios work.
+  kRealEnvironment,
+
+  // Runs all RunLoopTests under a test RunLoop::Delegate to make sure the
+  // delegate interface fully works standalone.
+  kTestDelegate,
+};
+
+// The task environment for the RunLoopTest of a given type. A separate class
+// so it can be instantiated on the stack in the RunLoopTest fixture.
+class RunLoopTestEnvironment {
+ public:
+  RunLoopTestEnvironment(RunLoopTestType type) {
+    switch (type) {
+      case RunLoopTestType::kRealEnvironment: {
+        task_environment_ = std::make_unique<test::ScopedTaskEnvironment>();
+        break;
+      }
+      case RunLoopTestType::kTestDelegate: {
+        auto test_delegate = std::make_unique<TestBoundDelegate>();
+        test_delegate->BindToCurrentThread();
+        test_delegate_ = std::move(test_delegate);
+        break;
+      }
+    }
+  }
+
+ private:
+  // Instantiates one or the other based on the RunLoopTestType.
+  std::unique_ptr<test::ScopedTaskEnvironment> task_environment_;
+  std::unique_ptr<InjectableTestDelegate> test_delegate_;
+};
+
+class RunLoopTest : public testing::TestWithParam<RunLoopTestType> {
+ protected:
+  RunLoopTest() : test_environment_(GetParam()) {}
+
+  RunLoopTestEnvironment test_environment_;
+  RunLoop run_loop_;
+  int counter_ = 0;
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(RunLoopTest);
+};
+
+}  // namespace
+
+TEST_P(RunLoopTest, QuitWhenIdle) {
+  ThreadTaskRunnerHandle::Get()->PostTask(
+      FROM_HERE, BindOnce(&QuitWhenIdleTask, Unretained(&run_loop_),
+                          Unretained(&counter_)));
+  ThreadTaskRunnerHandle::Get()->PostTask(
+      FROM_HERE, BindOnce(&ShouldRunTask, Unretained(&counter_)));
+  ThreadTaskRunnerHandle::Get()->PostDelayedTask(
+      FROM_HERE, BindOnce(&ShouldNotRunTask), TimeDelta::FromDays(1));
+
+  run_loop_.Run();
+  EXPECT_EQ(2, counter_);
+}
+
+TEST_P(RunLoopTest, QuitWhenIdleNestedLoop) {
+  ThreadTaskRunnerHandle::Get()->PostTask(
+      FROM_HERE, BindOnce(&RunNestedLoopTask, Unretained(&counter_)));
+  ThreadTaskRunnerHandle::Get()->PostTask(
+      FROM_HERE, BindOnce(&QuitWhenIdleTask, Unretained(&run_loop_),
+                          Unretained(&counter_)));
+  ThreadTaskRunnerHandle::Get()->PostTask(
+      FROM_HERE, BindOnce(&ShouldRunTask, Unretained(&counter_)));
+  ThreadTaskRunnerHandle::Get()->PostDelayedTask(
+      FROM_HERE, BindOnce(&ShouldNotRunTask), TimeDelta::FromDays(1));
+
+  run_loop_.Run();
+  EXPECT_EQ(4, counter_);
+}
+
+TEST_P(RunLoopTest, QuitWhenIdleClosure) {
+  ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
+                                          run_loop_.QuitWhenIdleClosure());
+  ThreadTaskRunnerHandle::Get()->PostTask(
+      FROM_HERE, BindOnce(&ShouldRunTask, Unretained(&counter_)));
+  ThreadTaskRunnerHandle::Get()->PostDelayedTask(
+      FROM_HERE, BindOnce(&ShouldNotRunTask), TimeDelta::FromDays(1));
+
+  run_loop_.Run();
+  EXPECT_EQ(1, counter_);
+}
+
+// Verify that the QuitWhenIdleClosure() can run after the RunLoop has been
+// deleted. It should have no effect.
+TEST_P(RunLoopTest, QuitWhenIdleClosureAfterRunLoopScope) {
+  Closure quit_when_idle_closure;
+  {
+    RunLoop run_loop;
+    quit_when_idle_closure = run_loop.QuitWhenIdleClosure();
+    run_loop.RunUntilIdle();
+  }
+  quit_when_idle_closure.Run();
+}
+
+// Verify that Quit can be executed from another sequence.
+TEST_P(RunLoopTest, QuitFromOtherSequence) {
+  Thread other_thread("test");
+  other_thread.Start();
+  scoped_refptr<SequencedTaskRunner> other_sequence =
+      other_thread.task_runner();
+
+  // Always expected to run before asynchronous Quit() kicks in.
+  ThreadTaskRunnerHandle::Get()->PostTask(
+      FROM_HERE, base::BindOnce(&ShouldRunTask, Unretained(&counter_)));
+
+  WaitableEvent loop_was_quit(WaitableEvent::ResetPolicy::MANUAL,
+                              WaitableEvent::InitialState::NOT_SIGNALED);
+  other_sequence->PostTask(
+      FROM_HERE, base::BindOnce([](RunLoop* run_loop) { run_loop->Quit(); },
+                                Unretained(&run_loop_)));
+  other_sequence->PostTask(
+      FROM_HERE,
+      base::BindOnce(&WaitableEvent::Signal, base::Unretained(&loop_was_quit)));
+
+  // Anything that's posted after the Quit closure was posted back to this
+  // sequence shouldn't get a chance to run.
+  loop_was_quit.Wait();
+  ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
+                                          base::BindOnce(&ShouldNotRunTask));
+
+  run_loop_.Run();
+
+  EXPECT_EQ(1, counter_);
+}
+
+// Verify that QuitClosure can be executed from another sequence.
+TEST_P(RunLoopTest, QuitFromOtherSequenceWithClosure) {
+  Thread other_thread("test");
+  other_thread.Start();
+  scoped_refptr<SequencedTaskRunner> other_sequence =
+      other_thread.task_runner();
+
+  // Always expected to run before asynchronous Quit() kicks in.
+  ThreadTaskRunnerHandle::Get()->PostTask(
+      FROM_HERE, base::BindOnce(&ShouldRunTask, Unretained(&counter_)));
+
+  WaitableEvent loop_was_quit(WaitableEvent::ResetPolicy::MANUAL,
+                              WaitableEvent::InitialState::NOT_SIGNALED);
+  other_sequence->PostTask(FROM_HERE, run_loop_.QuitClosure());
+  other_sequence->PostTask(
+      FROM_HERE,
+      base::BindOnce(&WaitableEvent::Signal, base::Unretained(&loop_was_quit)));
+
+  // Anything that's posted after the Quit closure was posted back to this
+  // sequence shouldn't get a chance to run.
+  loop_was_quit.Wait();
+  ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
+                                          base::BindOnce(&ShouldNotRunTask));
+
+  run_loop_.Run();
+
+  EXPECT_EQ(1, counter_);
+}
+
+// Verify that Quit can be executed from another sequence even when the
+// Quit is racing with Run() -- i.e. forgo the WaitableEvent used above.
+TEST_P(RunLoopTest, QuitFromOtherSequenceRacy) {
+  Thread other_thread("test");
+  other_thread.Start();
+  scoped_refptr<SequencedTaskRunner> other_sequence =
+      other_thread.task_runner();
+
+  // Always expected to run before asynchronous Quit() kicks in.
+  ThreadTaskRunnerHandle::Get()->PostTask(
+      FROM_HERE, base::BindOnce(&ShouldRunTask, Unretained(&counter_)));
+
+  other_sequence->PostTask(
+      FROM_HERE, base::BindOnce([](RunLoop* run_loop) { run_loop->Quit(); },
+                                Unretained(&run_loop_)));
+
+  run_loop_.Run();
+
+  EXPECT_EQ(1, counter_);
+}
+
+// Verify that QuitClosure can be executed from another sequence even when the
+// Quit is racing with Run() -- i.e. forgo the WaitableEvent used above.
+TEST_P(RunLoopTest, QuitFromOtherSequenceRacyWithClosure) {
+  Thread other_thread("test");
+  other_thread.Start();
+  scoped_refptr<SequencedTaskRunner> other_sequence =
+      other_thread.task_runner();
+
+  // Always expected to run before asynchronous Quit() kicks in.
+  ThreadTaskRunnerHandle::Get()->PostTask(
+      FROM_HERE, base::BindOnce(&ShouldRunTask, Unretained(&counter_)));
+
+  other_sequence->PostTask(FROM_HERE, run_loop_.QuitClosure());
+
+  run_loop_.Run();
+
+  EXPECT_EQ(1, counter_);
+}
+
+// Verify that QuitWhenIdle can be executed from another sequence.
+TEST_P(RunLoopTest, QuitWhenIdleFromOtherSequence) {
+  Thread other_thread("test");
+  other_thread.Start();
+  scoped_refptr<SequencedTaskRunner> other_sequence =
+      other_thread.task_runner();
+
+  ThreadTaskRunnerHandle::Get()->PostTask(
+      FROM_HERE, base::BindOnce(&ShouldRunTask, Unretained(&counter_)));
+
+  other_sequence->PostTask(
+      FROM_HERE,
+      base::BindOnce([](RunLoop* run_loop) { run_loop->QuitWhenIdle(); },
+                     Unretained(&run_loop_)));
+
+  ThreadTaskRunnerHandle::Get()->PostTask(
+      FROM_HERE, base::BindOnce(&ShouldRunTask, Unretained(&counter_)));
+
+  run_loop_.Run();
+
+  // Regardless of the outcome of the race this thread shouldn't have been idle
+  // until the counter was ticked twice.
+  EXPECT_EQ(2, counter_);
+}
+
+// Verify that QuitWhenIdleClosure can be executed from another sequence.
+TEST_P(RunLoopTest, QuitWhenIdleFromOtherSequenceWithClosure) {
+  Thread other_thread("test");
+  other_thread.Start();
+  scoped_refptr<SequencedTaskRunner> other_sequence =
+      other_thread.task_runner();
+
+  ThreadTaskRunnerHandle::Get()->PostTask(
+      FROM_HERE, base::BindOnce(&ShouldRunTask, Unretained(&counter_)));
+
+  other_sequence->PostTask(FROM_HERE, run_loop_.QuitWhenIdleClosure());
+
+  ThreadTaskRunnerHandle::Get()->PostTask(
+      FROM_HERE, base::BindOnce(&ShouldRunTask, Unretained(&counter_)));
+
+  run_loop_.Run();
+
+  // Regardless of the outcome of the race this thread shouldn't have been idle
+  // until the counter was ticked twice.
+  EXPECT_EQ(2, counter_);
+}
+
+TEST_P(RunLoopTest, IsRunningOnCurrentThread) {
+  EXPECT_FALSE(RunLoop::IsRunningOnCurrentThread());
+  ThreadTaskRunnerHandle::Get()->PostTask(
+      FROM_HERE,
+      BindOnce([]() { EXPECT_TRUE(RunLoop::IsRunningOnCurrentThread()); }));
+  ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE, run_loop_.QuitClosure());
+  run_loop_.Run();
+}
+
+TEST_P(RunLoopTest, IsNestedOnCurrentThread) {
+  EXPECT_FALSE(RunLoop::IsNestedOnCurrentThread());
+
+  ThreadTaskRunnerHandle::Get()->PostTask(
+      FROM_HERE, BindOnce([]() {
+        EXPECT_FALSE(RunLoop::IsNestedOnCurrentThread());
+
+        RunLoop nested_run_loop(RunLoop::Type::kNestableTasksAllowed);
+
+        ThreadTaskRunnerHandle::Get()->PostTask(
+            FROM_HERE, BindOnce([]() {
+              EXPECT_TRUE(RunLoop::IsNestedOnCurrentThread());
+            }));
+        ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
+                                                nested_run_loop.QuitClosure());
+
+        EXPECT_FALSE(RunLoop::IsNestedOnCurrentThread());
+        nested_run_loop.Run();
+        EXPECT_FALSE(RunLoop::IsNestedOnCurrentThread());
+      }));
+
+  ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE, run_loop_.QuitClosure());
+  run_loop_.Run();
+}
+
+namespace {
+
+class MockNestingObserver : public RunLoop::NestingObserver {
+ public:
+  MockNestingObserver() = default;
+
+  // RunLoop::NestingObserver:
+  MOCK_METHOD0(OnBeginNestedRunLoop, void());
+  MOCK_METHOD0(OnExitNestedRunLoop, void());
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(MockNestingObserver);
+};
+
+class MockTask {
+ public:
+  MockTask() = default;
+  MOCK_METHOD0(Task, void());
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(MockTask);
+};
+
+}  // namespace
+
+TEST_P(RunLoopTest, NestingObservers) {
+  testing::StrictMock<MockNestingObserver> nesting_observer;
+  testing::StrictMock<MockTask> mock_task_a;
+  testing::StrictMock<MockTask> mock_task_b;
+
+  RunLoop::AddNestingObserverOnCurrentThread(&nesting_observer);
+
+  const RepeatingClosure run_nested_loop = Bind([]() {
+    RunLoop nested_run_loop(RunLoop::Type::kNestableTasksAllowed);
+    ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
+                                            nested_run_loop.QuitClosure());
+    nested_run_loop.Run();
+  });
+
+  // Generate a stack of nested RunLoops. OnBeginNestedRunLoop() is expected
+  // when beginning each nesting depth and OnExitNestedRunLoop() is expected
+  // when exiting each nesting depth. Each one of these tasks is ahead of the
+  // QuitClosures as those are only posted at the end of the queue when
+  // |run_nested_loop| is executed.
+  ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE, run_nested_loop);
+  ThreadTaskRunnerHandle::Get()->PostTask(
+      FROM_HERE,
+      base::BindOnce(&MockTask::Task, base::Unretained(&mock_task_a)));
+  ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE, run_nested_loop);
+  ThreadTaskRunnerHandle::Get()->PostTask(
+      FROM_HERE,
+      base::BindOnce(&MockTask::Task, base::Unretained(&mock_task_b)));
+
+  {
+    testing::InSequence in_sequence;
+    EXPECT_CALL(nesting_observer, OnBeginNestedRunLoop());
+    EXPECT_CALL(mock_task_a, Task());
+    EXPECT_CALL(nesting_observer, OnBeginNestedRunLoop());
+    EXPECT_CALL(mock_task_b, Task());
+    EXPECT_CALL(nesting_observer, OnExitNestedRunLoop()).Times(2);
+  }
+  run_loop_.RunUntilIdle();
+
+  RunLoop::RemoveNestingObserverOnCurrentThread(&nesting_observer);
+}
+
+TEST_P(RunLoopTest, DisallowRunningForTesting) {
+  RunLoop::ScopedDisallowRunningForTesting disallow_running;
+  EXPECT_DCHECK_DEATH({ run_loop_.RunUntilIdle(); });
+}
+
+TEST_P(RunLoopTest, ExpiredDisallowRunningForTesting) {
+  { RunLoop::ScopedDisallowRunningForTesting disallow_running; }
+  // Running should be fine after |disallow_running| goes out of scope.
+  run_loop_.RunUntilIdle();
+}
+
+INSTANTIATE_TEST_CASE_P(Real,
+                        RunLoopTest,
+                        testing::Values(RunLoopTestType::kRealEnvironment));
+INSTANTIATE_TEST_CASE_P(Mock,
+                        RunLoopTest,
+                        testing::Values(RunLoopTestType::kTestDelegate));
+
+TEST(RunLoopDeathTest, MustRegisterBeforeInstantiating) {
+  TestBoundDelegate unbound_test_delegate_;
+  // RunLoop::RunLoop() should CHECK fetching the ThreadTaskRunnerHandle.
+  EXPECT_DEATH_IF_SUPPORTED({ RunLoop(); }, "");
+}
+
+TEST(RunLoopDelegateTest, NestableTasksDontRunInDefaultNestedLoops) {
+  TestBoundDelegate test_delegate;
+  test_delegate.BindToCurrentThread();
+
+  base::Thread other_thread("test");
+  other_thread.Start();
+
+  RunLoop main_loop;
+  // A nested run loop which isn't kNestableTasksAllowed.
+  RunLoop nested_run_loop(RunLoop::Type::kDefault);
+
+  bool nested_run_loop_ended = false;
+
+  // The first task on the main loop will result in a nested run loop. Since
+  // it's not kNestableTasksAllowed, no further task should be processed until
+  // it's quit.
+  ThreadTaskRunnerHandle::Get()->PostTask(
+      FROM_HERE,
+      BindOnce([](RunLoop* nested_run_loop) { nested_run_loop->Run(); },
+               Unretained(&nested_run_loop)));
+
+  // Post a task that will fail if it runs inside the nested run loop.
+  ThreadTaskRunnerHandle::Get()->PostTask(
+      FROM_HERE, BindOnce(
+                     [](const bool& nested_run_loop_ended,
+                        OnceClosure continuation_callback) {
+                       EXPECT_TRUE(nested_run_loop_ended);
+                       EXPECT_FALSE(RunLoop::IsNestedOnCurrentThread());
+                       std::move(continuation_callback).Run();
+                     },
+                     ConstRef(nested_run_loop_ended), main_loop.QuitClosure()));
+
+  // Post a task flipping the boolean bit for extra verification right before
+  // quitting |nested_run_loop|.
+  other_thread.task_runner()->PostDelayedTask(
+      FROM_HERE,
+      BindOnce(
+          [](bool* nested_run_loop_ended) {
+            EXPECT_FALSE(*nested_run_loop_ended);
+            *nested_run_loop_ended = true;
+          },
+          Unretained(&nested_run_loop_ended)),
+      TestTimeouts::tiny_timeout());
+  // Post an async delayed task to exit the run loop when idle. This confirms
+  // that (1) the test task only ran in the main loop after the nested loop
+  // exited and (2) the nested run loop actually considers itself idle while
+  // spinning. Note: The quit closure needs to be injected directly on the
+  // delegate as invoking QuitWhenIdle() off-thread results in a thread bounce
+  // which will not processed because of the very logic under test (nestable
+  // tasks don't run in |nested_run_loop|).
+  other_thread.task_runner()->PostDelayedTask(
+      FROM_HERE,
+      BindOnce(
+          [](TestBoundDelegate* test_delegate, OnceClosure injected_closure) {
+            test_delegate->InjectClosureOnDelegate(std::move(injected_closure));
+          },
+          Unretained(&test_delegate), nested_run_loop.QuitWhenIdleClosure()),
+      TestTimeouts::tiny_timeout());
+
+  main_loop.Run();
+}
+
+}  // namespace base
diff --git a/base/safe_numerics_unittest.cc b/base/safe_numerics_unittest.cc
new file mode 100644
index 0000000..44675cf
--- /dev/null
+++ b/base/safe_numerics_unittest.cc
@@ -0,0 +1,1640 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <limits>
+#include <type_traits>
+
+#include "base/compiler_specific.h"
+
+// WARNING: This block must come before the base/numerics headers are included.
+// These tests deliberately cause arithmetic boundary errors. If the compiler is
+// aggressive enough, it can const detect these errors, so we disable warnings.
+#if defined(OS_WIN)
+#pragma warning(disable : 4756)  // Arithmetic overflow.
+#pragma warning(disable : 4293)  // Invalid shift.
+#endif
+
+// This may not need to come before the base/numerics headers, but let's keep
+// it close to the MSVC equivalent.
+#if defined(__clang__)
+#pragma clang diagnostic push
+#pragma clang diagnostic ignored "-Winteger-overflow"
+#endif
+
+#include "base/logging.h"
+#include "base/numerics/safe_conversions.h"
+#include "base/numerics/safe_math.h"
+#include "base/test/gtest_util.h"
+#include "build/build_config.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+#if defined(COMPILER_MSVC) && defined(ARCH_CPU_32_BITS)
+#include <mmintrin.h>
+#endif
+
+namespace base {
+namespace internal {
+
+using std::numeric_limits;
+
+// This is a helper function for finding the maximum value in Src that can be
+// wholy represented as the destination floating-point type.
+template <typename Dst, typename Src>
+Dst GetMaxConvertibleToFloat() {
+  using DstLimits = numeric_limits<Dst>;
+  using SrcLimits = numeric_limits<Src>;
+  static_assert(SrcLimits::is_specialized, "Source must be numeric.");
+  static_assert(DstLimits::is_specialized, "Destination must be numeric.");
+  CHECK(DstLimits::is_iec559);
+
+  if (SrcLimits::digits <= DstLimits::digits &&
+      MaxExponent<Src>::value <= MaxExponent<Dst>::value)
+    return SrcLimits::max();
+  Src max = SrcLimits::max() / 2 + (SrcLimits::is_integer ? 1 : 0);
+  while (max != static_cast<Src>(static_cast<Dst>(max))) {
+    max /= 2;
+  }
+  return static_cast<Dst>(max);
+}
+
+// Test corner case promotions used
+static_assert(IsIntegerArithmeticSafe<int32_t, int8_t, int8_t>::value, "");
+static_assert(IsIntegerArithmeticSafe<int32_t, int16_t, int8_t>::value, "");
+static_assert(IsIntegerArithmeticSafe<int32_t, int8_t, int16_t>::value, "");
+static_assert(!IsIntegerArithmeticSafe<int32_t, int32_t, int8_t>::value, "");
+static_assert(BigEnoughPromotion<int16_t, int8_t>::is_contained, "");
+static_assert(BigEnoughPromotion<int32_t, uint32_t>::is_contained, "");
+static_assert(BigEnoughPromotion<intmax_t, int8_t>::is_contained, "");
+static_assert(!BigEnoughPromotion<uintmax_t, int8_t>::is_contained, "");
+static_assert(
+    std::is_same<BigEnoughPromotion<int16_t, int8_t>::type, int16_t>::value,
+    "");
+static_assert(
+    std::is_same<BigEnoughPromotion<int32_t, uint32_t>::type, int64_t>::value,
+    "");
+static_assert(
+    std::is_same<BigEnoughPromotion<intmax_t, int8_t>::type, intmax_t>::value,
+    "");
+static_assert(
+    std::is_same<BigEnoughPromotion<uintmax_t, int8_t>::type, uintmax_t>::value,
+    "");
+static_assert(BigEnoughPromotion<int16_t, int8_t>::is_contained, "");
+static_assert(BigEnoughPromotion<int32_t, uint32_t>::is_contained, "");
+static_assert(BigEnoughPromotion<intmax_t, int8_t>::is_contained, "");
+static_assert(!BigEnoughPromotion<uintmax_t, int8_t>::is_contained, "");
+static_assert(
+    std::is_same<FastIntegerArithmeticPromotion<int16_t, int8_t>::type,
+                 int32_t>::value,
+    "");
+static_assert(
+    std::is_same<FastIntegerArithmeticPromotion<int32_t, uint32_t>::type,
+                 int64_t>::value,
+    "");
+static_assert(
+    std::is_same<FastIntegerArithmeticPromotion<intmax_t, int8_t>::type,
+                 intmax_t>::value,
+    "");
+static_assert(
+    std::is_same<FastIntegerArithmeticPromotion<uintmax_t, int8_t>::type,
+                 uintmax_t>::value,
+    "");
+static_assert(FastIntegerArithmeticPromotion<int16_t, int8_t>::is_contained,
+              "");
+static_assert(FastIntegerArithmeticPromotion<int32_t, uint32_t>::is_contained,
+              "");
+static_assert(!FastIntegerArithmeticPromotion<intmax_t, int8_t>::is_contained,
+              "");
+static_assert(!FastIntegerArithmeticPromotion<uintmax_t, int8_t>::is_contained,
+              "");
+
+template <typename U>
+U GetNumericValueForTest(const CheckedNumeric<U>& src) {
+  return src.state_.value();
+}
+
+template <typename U>
+U GetNumericValueForTest(const ClampedNumeric<U>& src) {
+  return static_cast<U>(src);
+}
+
+template <typename U>
+U GetNumericValueForTest(const U& src) {
+  return src;
+}
+
+// Logs the ValueOrDie() failure instead of crashing.
+struct LogOnFailure {
+  template <typename T>
+  static T HandleFailure() {
+    LOG(WARNING) << "ValueOrDie() failed unexpectedly.";
+    return T();
+  }
+};
+
+template <typename T>
+constexpr T GetValue(const T& src) {
+  return src;
+}
+
+template <typename T, typename U>
+constexpr T GetValueAsDest(const U& src) {
+  return static_cast<T>(src);
+}
+
+template <typename T>
+constexpr T GetValue(const CheckedNumeric<T>& src) {
+  return src.template ValueOrDie<T, LogOnFailure>();
+}
+
+template <typename T, typename U>
+constexpr T GetValueAsDest(const CheckedNumeric<U>& src) {
+  return src.template ValueOrDie<T, LogOnFailure>();
+}
+
+template <typename T>
+constexpr T GetValue(const ClampedNumeric<T>& src) {
+  return static_cast<T>(src);
+}
+
+template <typename T, typename U>
+constexpr T GetValueAsDest(const ClampedNumeric<U>& src) {
+  return static_cast<T>(src);
+}
+
+// Helper macros to wrap displaying the conversion types and line numbers.
+#define TEST_EXPECTED_VALIDITY(expected, actual)                           \
+  EXPECT_EQ(expected, (actual).template Cast<Dst>().IsValid())             \
+      << "Result test: Value " << GetNumericValueForTest(actual) << " as " \
+      << dst << " on line " << line
+
+#define TEST_EXPECTED_SUCCESS(actual) TEST_EXPECTED_VALIDITY(true, actual)
+#define TEST_EXPECTED_FAILURE(actual) TEST_EXPECTED_VALIDITY(false, actual)
+
+// We have to handle promotions, so infer the underlying type below from actual.
+#define TEST_EXPECTED_VALUE(expected, actual)                               \
+  EXPECT_EQ(GetValue(expected), GetValueAsDest<decltype(expected)>(actual)) \
+      << "Result test: Value " << GetNumericValueForTest(actual) << " as "  \
+      << dst << " on line " << line
+
+// Test the simple pointer arithmetic overrides.
+template <typename Dst>
+void TestStrictPointerMath() {
+  Dst dummy_value = 0;
+  Dst* dummy_ptr = &dummy_value;
+  static const Dst kDummyOffset = 2;  // Don't want to go too far.
+  EXPECT_EQ(dummy_ptr + kDummyOffset,
+            dummy_ptr + StrictNumeric<Dst>(kDummyOffset));
+  EXPECT_EQ(dummy_ptr - kDummyOffset,
+            dummy_ptr - StrictNumeric<Dst>(kDummyOffset));
+  EXPECT_NE(dummy_ptr, dummy_ptr + StrictNumeric<Dst>(kDummyOffset));
+  EXPECT_NE(dummy_ptr, dummy_ptr - StrictNumeric<Dst>(kDummyOffset));
+  EXPECT_DEATH_IF_SUPPORTED(
+      dummy_ptr + StrictNumeric<size_t>(std::numeric_limits<size_t>::max()),
+      "");
+}
+
+// Signed integer arithmetic.
+template <typename Dst>
+static void TestSpecializedArithmetic(
+    const char* dst,
+    int line,
+    typename std::enable_if<numeric_limits<Dst>::is_integer &&
+                                numeric_limits<Dst>::is_signed,
+                            int>::type = 0) {
+  using DstLimits = SaturationDefaultLimits<Dst>;
+  TEST_EXPECTED_FAILURE(-CheckedNumeric<Dst>(DstLimits::lowest()));
+  TEST_EXPECTED_FAILURE(CheckedNumeric<Dst>(DstLimits::lowest()).Abs());
+  TEST_EXPECTED_VALUE(1, CheckedNumeric<Dst>(-1).Abs());
+  TEST_EXPECTED_VALUE(DstLimits::max(),
+                      MakeCheckedNum(-DstLimits::max()).Abs());
+
+  TEST_EXPECTED_VALUE(DstLimits::Overflow(),
+                      -ClampedNumeric<Dst>(DstLimits::lowest()));
+  TEST_EXPECTED_VALUE(DstLimits::Overflow(),
+                      ClampedNumeric<Dst>(DstLimits::lowest()).Abs());
+  TEST_EXPECTED_VALUE(1, ClampedNumeric<Dst>(-1).Abs());
+  TEST_EXPECTED_VALUE(DstLimits::max(),
+                      MakeClampedNum(-DstLimits::max()).Abs());
+
+  TEST_EXPECTED_SUCCESS(CheckedNumeric<Dst>(DstLimits::max()) + -1);
+  TEST_EXPECTED_FAILURE(CheckedNumeric<Dst>(DstLimits::lowest()) + -1);
+  TEST_EXPECTED_FAILURE(CheckedNumeric<Dst>(DstLimits::lowest()) +
+                        DstLimits::lowest());
+
+  TEST_EXPECTED_VALUE(DstLimits::max() - 1,
+                      ClampedNumeric<Dst>(DstLimits::max()) + -1);
+  TEST_EXPECTED_VALUE(DstLimits::Underflow(),
+                      ClampedNumeric<Dst>(DstLimits::lowest()) + -1);
+  TEST_EXPECTED_VALUE(
+      DstLimits::Underflow(),
+      ClampedNumeric<Dst>(DstLimits::lowest()) + DstLimits::lowest());
+
+  TEST_EXPECTED_FAILURE(CheckedNumeric<Dst>(DstLimits::lowest()) - 1);
+  TEST_EXPECTED_SUCCESS(CheckedNumeric<Dst>(DstLimits::lowest()) - -1);
+  TEST_EXPECTED_FAILURE(CheckedNumeric<Dst>(DstLimits::max()) -
+                        DstLimits::lowest());
+  TEST_EXPECTED_FAILURE(CheckedNumeric<Dst>(DstLimits::lowest()) -
+                        DstLimits::max());
+
+  TEST_EXPECTED_VALUE(DstLimits::Underflow(),
+                      ClampedNumeric<Dst>(DstLimits::lowest()) - 1);
+  TEST_EXPECTED_VALUE(DstLimits::lowest() + 1,
+                      ClampedNumeric<Dst>(DstLimits::lowest()) - -1);
+  TEST_EXPECTED_VALUE(
+      DstLimits::Overflow(),
+      ClampedNumeric<Dst>(DstLimits::max()) - DstLimits::lowest());
+  TEST_EXPECTED_VALUE(
+      DstLimits::Underflow(),
+      ClampedNumeric<Dst>(DstLimits::lowest()) - DstLimits::max());
+
+  TEST_EXPECTED_FAILURE(CheckedNumeric<Dst>(DstLimits::lowest()) * 2);
+  TEST_EXPECTED_VALUE(DstLimits::Underflow(),
+                      ClampedNumeric<Dst>(DstLimits::lowest()) * 2);
+
+  TEST_EXPECTED_FAILURE(CheckedNumeric<Dst>(DstLimits::lowest()) / -1);
+  TEST_EXPECTED_VALUE(0, CheckedNumeric<Dst>(-1) / 2);
+  TEST_EXPECTED_FAILURE(CheckedNumeric<Dst>(DstLimits::lowest()) * -1);
+  TEST_EXPECTED_VALUE(DstLimits::max(),
+                      CheckedNumeric<Dst>(DstLimits::lowest() + 1) * Dst(-1));
+  TEST_EXPECTED_VALUE(DstLimits::max(),
+                      CheckedNumeric<Dst>(-1) * Dst(DstLimits::lowest() + 1));
+  TEST_EXPECTED_VALUE(DstLimits::lowest(),
+                      CheckedNumeric<Dst>(DstLimits::lowest()) * Dst(1));
+  TEST_EXPECTED_VALUE(DstLimits::lowest(),
+                      CheckedNumeric<Dst>(1) * Dst(DstLimits::lowest()));
+  TEST_EXPECTED_VALUE(
+      typename std::make_unsigned<Dst>::type(0) - DstLimits::lowest(),
+      MakeCheckedNum(DstLimits::lowest()).UnsignedAbs());
+  TEST_EXPECTED_VALUE(DstLimits::max(),
+                      MakeCheckedNum(DstLimits::max()).UnsignedAbs());
+  TEST_EXPECTED_VALUE(0, CheckedNumeric<Dst>(0).UnsignedAbs());
+  TEST_EXPECTED_VALUE(1, CheckedNumeric<Dst>(1).UnsignedAbs());
+  TEST_EXPECTED_VALUE(1, CheckedNumeric<Dst>(-1).UnsignedAbs());
+
+  TEST_EXPECTED_VALUE(DstLimits::Overflow(),
+                      ClampedNumeric<Dst>(DstLimits::lowest()) / -1);
+  TEST_EXPECTED_VALUE(0, ClampedNumeric<Dst>(-1) / 2);
+  TEST_EXPECTED_VALUE(DstLimits::Overflow(),
+                      ClampedNumeric<Dst>(DstLimits::lowest()) * -1);
+  TEST_EXPECTED_VALUE(DstLimits::max(),
+                      ClampedNumeric<Dst>(DstLimits::lowest() + 1) * Dst(-1));
+  TEST_EXPECTED_VALUE(DstLimits::max(),
+                      ClampedNumeric<Dst>(-1) * Dst(DstLimits::lowest() + 1));
+  TEST_EXPECTED_VALUE(DstLimits::lowest(),
+                      ClampedNumeric<Dst>(DstLimits::lowest()) * Dst(1));
+  TEST_EXPECTED_VALUE(DstLimits::lowest(),
+                      ClampedNumeric<Dst>(1) * Dst(DstLimits::lowest()));
+  TEST_EXPECTED_VALUE(
+      typename std::make_unsigned<Dst>::type(0) - DstLimits::lowest(),
+      MakeClampedNum(DstLimits::lowest()).UnsignedAbs());
+  TEST_EXPECTED_VALUE(DstLimits::max(),
+                      MakeClampedNum(DstLimits::max()).UnsignedAbs());
+  TEST_EXPECTED_VALUE(0, ClampedNumeric<Dst>(0).UnsignedAbs());
+  TEST_EXPECTED_VALUE(1, ClampedNumeric<Dst>(1).UnsignedAbs());
+  TEST_EXPECTED_VALUE(1, ClampedNumeric<Dst>(-1).UnsignedAbs());
+
+  // Modulus is legal only for integers.
+  TEST_EXPECTED_VALUE(0, CheckedNumeric<Dst>() % 1);
+  TEST_EXPECTED_VALUE(0, CheckedNumeric<Dst>(1) % 1);
+  TEST_EXPECTED_VALUE(-1, CheckedNumeric<Dst>(-1) % 2);
+  TEST_EXPECTED_VALUE(-1, CheckedNumeric<Dst>(-1) % -2);
+  TEST_EXPECTED_VALUE(0, CheckedNumeric<Dst>(DstLimits::lowest()) % 2);
+  TEST_EXPECTED_VALUE(1, CheckedNumeric<Dst>(DstLimits::max()) % 2);
+  // Test all the different modulus combinations.
+  TEST_EXPECTED_VALUE(0, CheckedNumeric<Dst>(1) % CheckedNumeric<Dst>(1));
+  TEST_EXPECTED_VALUE(0, 1 % CheckedNumeric<Dst>(1));
+  TEST_EXPECTED_VALUE(0, CheckedNumeric<Dst>(1) % 1);
+  CheckedNumeric<Dst> checked_dst = 1;
+  TEST_EXPECTED_VALUE(0, checked_dst %= 1);
+  // Test that div by 0 is avoided but returns invalid result.
+  TEST_EXPECTED_FAILURE(CheckedNumeric<Dst>(1) % 0);
+  // Test bit shifts.
+  volatile Dst negative_one = -1;
+  TEST_EXPECTED_FAILURE(CheckedNumeric<Dst>(1) << negative_one);
+  TEST_EXPECTED_FAILURE(CheckedNumeric<Dst>(1)
+                        << (IntegerBitsPlusSign<Dst>::value - 1));
+  TEST_EXPECTED_FAILURE(CheckedNumeric<Dst>(0)
+                        << IntegerBitsPlusSign<Dst>::value);
+  TEST_EXPECTED_FAILURE(CheckedNumeric<Dst>(DstLimits::max()) << 1);
+  TEST_EXPECTED_VALUE(
+      static_cast<Dst>(1) << (IntegerBitsPlusSign<Dst>::value - 2),
+      CheckedNumeric<Dst>(1) << (IntegerBitsPlusSign<Dst>::value - 2));
+  TEST_EXPECTED_VALUE(0, CheckedNumeric<Dst>(0)
+                             << (IntegerBitsPlusSign<Dst>::value - 1));
+  TEST_EXPECTED_VALUE(1, CheckedNumeric<Dst>(1) << 0);
+  TEST_EXPECTED_VALUE(2, CheckedNumeric<Dst>(1) << 1);
+  TEST_EXPECTED_FAILURE(CheckedNumeric<Dst>(1) >>
+                        IntegerBitsPlusSign<Dst>::value);
+  TEST_EXPECTED_VALUE(
+      0, CheckedNumeric<Dst>(1) >> (IntegerBitsPlusSign<Dst>::value - 1));
+  TEST_EXPECTED_FAILURE(CheckedNumeric<Dst>(1) >> negative_one);
+
+  // Modulus is legal only for integers.
+  TEST_EXPECTED_VALUE(0, ClampedNumeric<Dst>() % 1);
+  TEST_EXPECTED_VALUE(0, ClampedNumeric<Dst>(1) % 1);
+  TEST_EXPECTED_VALUE(-1, ClampedNumeric<Dst>(-1) % 2);
+  TEST_EXPECTED_VALUE(-1, ClampedNumeric<Dst>(-1) % -2);
+  TEST_EXPECTED_VALUE(0, ClampedNumeric<Dst>(DstLimits::lowest()) % 2);
+  TEST_EXPECTED_VALUE(1, ClampedNumeric<Dst>(DstLimits::max()) % 2);
+  // Test all the different modulus combinations.
+  TEST_EXPECTED_VALUE(0, ClampedNumeric<Dst>(1) % ClampedNumeric<Dst>(1));
+  TEST_EXPECTED_VALUE(0, 1 % ClampedNumeric<Dst>(1));
+  TEST_EXPECTED_VALUE(0, ClampedNumeric<Dst>(1) % 1);
+  ClampedNumeric<Dst> clamped_dst = 1;
+  TEST_EXPECTED_VALUE(0, clamped_dst %= 1);
+  TEST_EXPECTED_VALUE(Dst(1), ClampedNumeric<Dst>(1) % 0);
+  // Test bit shifts.
+  TEST_EXPECTED_VALUE(DstLimits::Overflow(),
+                      ClampedNumeric<Dst>(1)
+                          << (IntegerBitsPlusSign<Dst>::value - 1U));
+  TEST_EXPECTED_VALUE(Dst(0), ClampedNumeric<Dst>(0)
+                                  << (IntegerBitsPlusSign<Dst>::value + 0U));
+  TEST_EXPECTED_VALUE(DstLimits::Overflow(),
+                      ClampedNumeric<Dst>(DstLimits::max()) << 1U);
+  TEST_EXPECTED_VALUE(
+      static_cast<Dst>(1) << (IntegerBitsPlusSign<Dst>::value - 2U),
+      ClampedNumeric<Dst>(1) << (IntegerBitsPlusSign<Dst>::value - 2U));
+  TEST_EXPECTED_VALUE(0, ClampedNumeric<Dst>(0)
+                             << (IntegerBitsPlusSign<Dst>::value - 1U));
+  TEST_EXPECTED_VALUE(1, ClampedNumeric<Dst>(1) << 0U);
+  TEST_EXPECTED_VALUE(2, ClampedNumeric<Dst>(1) << 1U);
+  TEST_EXPECTED_VALUE(
+      0, ClampedNumeric<Dst>(1) >> (IntegerBitsPlusSign<Dst>::value + 0U));
+  TEST_EXPECTED_VALUE(
+      0, ClampedNumeric<Dst>(1) >> (IntegerBitsPlusSign<Dst>::value - 1U));
+  TEST_EXPECTED_VALUE(
+      -1, ClampedNumeric<Dst>(-1) >> (IntegerBitsPlusSign<Dst>::value - 1U));
+  TEST_EXPECTED_VALUE(-1, ClampedNumeric<Dst>(DstLimits::lowest()) >>
+                              (IntegerBitsPlusSign<Dst>::value - 0U));
+
+  TestStrictPointerMath<Dst>();
+}
+
+// Unsigned integer arithmetic.
+template <typename Dst>
+static void TestSpecializedArithmetic(
+    const char* dst,
+    int line,
+    typename std::enable_if<numeric_limits<Dst>::is_integer &&
+                                !numeric_limits<Dst>::is_signed,
+                            int>::type = 0) {
+  using DstLimits = SaturationDefaultLimits<Dst>;
+  TEST_EXPECTED_SUCCESS(-CheckedNumeric<Dst>(DstLimits::lowest()));
+  TEST_EXPECTED_SUCCESS(CheckedNumeric<Dst>(DstLimits::lowest()).Abs());
+  TEST_EXPECTED_FAILURE(CheckedNumeric<Dst>(DstLimits::lowest()) + -1);
+  TEST_EXPECTED_FAILURE(CheckedNumeric<Dst>(DstLimits::lowest()) - 1);
+  TEST_EXPECTED_VALUE(0, CheckedNumeric<Dst>(DstLimits::lowest()) * 2);
+  TEST_EXPECTED_VALUE(0, CheckedNumeric<Dst>(1) / 2);
+  TEST_EXPECTED_SUCCESS(CheckedNumeric<Dst>(DstLimits::lowest()).UnsignedAbs());
+  TEST_EXPECTED_SUCCESS(
+      CheckedNumeric<typename std::make_signed<Dst>::type>(
+          std::numeric_limits<typename std::make_signed<Dst>::type>::lowest())
+          .UnsignedAbs());
+  TEST_EXPECTED_VALUE(DstLimits::lowest(),
+                      MakeCheckedNum(DstLimits::lowest()).UnsignedAbs());
+  TEST_EXPECTED_VALUE(DstLimits::max(),
+                      MakeCheckedNum(DstLimits::max()).UnsignedAbs());
+  TEST_EXPECTED_VALUE(0, CheckedNumeric<Dst>(0).UnsignedAbs());
+  TEST_EXPECTED_VALUE(1, CheckedNumeric<Dst>(1).UnsignedAbs());
+
+  TEST_EXPECTED_VALUE(0, -ClampedNumeric<Dst>(DstLimits::lowest()));
+  TEST_EXPECTED_VALUE(0, ClampedNumeric<Dst>(DstLimits::lowest()).Abs());
+  TEST_EXPECTED_VALUE(DstLimits::Underflow(),
+                      ClampedNumeric<Dst>(DstLimits::lowest()) + -1);
+  TEST_EXPECTED_VALUE(DstLimits::Underflow(),
+                      ClampedNumeric<Dst>(DstLimits::lowest()) - 1);
+  TEST_EXPECTED_VALUE(0, ClampedNumeric<Dst>(DstLimits::lowest()) * 2);
+  TEST_EXPECTED_VALUE(0, ClampedNumeric<Dst>(1) / 2);
+  TEST_EXPECTED_VALUE(0,
+                      ClampedNumeric<Dst>(DstLimits::lowest()).UnsignedAbs());
+  TEST_EXPECTED_VALUE(
+      as_unsigned(
+          std::numeric_limits<typename std::make_signed<Dst>::type>::lowest()),
+      ClampedNumeric<typename std::make_signed<Dst>::type>(
+          std::numeric_limits<typename std::make_signed<Dst>::type>::lowest())
+          .UnsignedAbs());
+  TEST_EXPECTED_VALUE(DstLimits::lowest(),
+                      MakeClampedNum(DstLimits::lowest()).UnsignedAbs());
+  TEST_EXPECTED_VALUE(DstLimits::max(),
+                      MakeClampedNum(DstLimits::max()).UnsignedAbs());
+  TEST_EXPECTED_VALUE(0, ClampedNumeric<Dst>(0).UnsignedAbs());
+  TEST_EXPECTED_VALUE(1, ClampedNumeric<Dst>(1).UnsignedAbs());
+
+  // Modulus is legal only for integers.
+  TEST_EXPECTED_VALUE(0, CheckedNumeric<Dst>() % 1);
+  TEST_EXPECTED_VALUE(0, CheckedNumeric<Dst>(1) % 1);
+  TEST_EXPECTED_VALUE(1, CheckedNumeric<Dst>(1) % 2);
+  TEST_EXPECTED_VALUE(0, CheckedNumeric<Dst>(DstLimits::lowest()) % 2);
+  TEST_EXPECTED_VALUE(1, CheckedNumeric<Dst>(DstLimits::max()) % 2);
+  // Test all the different modulus combinations.
+  TEST_EXPECTED_VALUE(0, CheckedNumeric<Dst>(1) % CheckedNumeric<Dst>(1));
+  TEST_EXPECTED_VALUE(0, 1 % CheckedNumeric<Dst>(1));
+  TEST_EXPECTED_VALUE(0, CheckedNumeric<Dst>(1) % 1);
+  CheckedNumeric<Dst> checked_dst = 1;
+  TEST_EXPECTED_VALUE(0, checked_dst %= 1);
+  // Test that div by 0 is avoided but returns invalid result.
+  TEST_EXPECTED_FAILURE(CheckedNumeric<Dst>(1) % 0);
+  TEST_EXPECTED_FAILURE(CheckedNumeric<Dst>(1)
+                        << IntegerBitsPlusSign<Dst>::value);
+  // Test bit shifts.
+  volatile int negative_one = -1;
+  TEST_EXPECTED_FAILURE(CheckedNumeric<Dst>(1) << negative_one);
+  TEST_EXPECTED_FAILURE(CheckedNumeric<Dst>(1)
+                        << IntegerBitsPlusSign<Dst>::value);
+  TEST_EXPECTED_FAILURE(CheckedNumeric<Dst>(0)
+                        << IntegerBitsPlusSign<Dst>::value);
+  TEST_EXPECTED_FAILURE(CheckedNumeric<Dst>(DstLimits::max()) << 1);
+  TEST_EXPECTED_VALUE(
+      static_cast<Dst>(1) << (IntegerBitsPlusSign<Dst>::value - 1),
+      CheckedNumeric<Dst>(1) << (IntegerBitsPlusSign<Dst>::value - 1));
+  TEST_EXPECTED_VALUE(1, CheckedNumeric<Dst>(1) << 0);
+  TEST_EXPECTED_VALUE(2, CheckedNumeric<Dst>(1) << 1);
+  TEST_EXPECTED_FAILURE(CheckedNumeric<Dst>(1) >>
+                        IntegerBitsPlusSign<Dst>::value);
+  TEST_EXPECTED_VALUE(
+      0, CheckedNumeric<Dst>(1) >> (IntegerBitsPlusSign<Dst>::value - 1));
+  TEST_EXPECTED_FAILURE(CheckedNumeric<Dst>(1) >> negative_one);
+  TEST_EXPECTED_VALUE(1, CheckedNumeric<Dst>(1) & 1);
+  TEST_EXPECTED_VALUE(0, CheckedNumeric<Dst>(1) & 0);
+  TEST_EXPECTED_VALUE(0, CheckedNumeric<Dst>(0) & 1);
+  TEST_EXPECTED_VALUE(0, CheckedNumeric<Dst>(1) & 0);
+  TEST_EXPECTED_VALUE(std::numeric_limits<Dst>::max(),
+                      MakeCheckedNum(DstLimits::max()) & -1);
+  TEST_EXPECTED_VALUE(1, CheckedNumeric<Dst>(1) | 1);
+  TEST_EXPECTED_VALUE(1, CheckedNumeric<Dst>(1) | 0);
+  TEST_EXPECTED_VALUE(1, CheckedNumeric<Dst>(0) | 1);
+  TEST_EXPECTED_VALUE(0, CheckedNumeric<Dst>(0) | 0);
+  TEST_EXPECTED_VALUE(std::numeric_limits<Dst>::max(),
+                      CheckedNumeric<Dst>(0) | static_cast<Dst>(-1));
+  TEST_EXPECTED_VALUE(0, CheckedNumeric<Dst>(1) ^ 1);
+  TEST_EXPECTED_VALUE(1, CheckedNumeric<Dst>(1) ^ 0);
+  TEST_EXPECTED_VALUE(1, CheckedNumeric<Dst>(0) ^ 1);
+  TEST_EXPECTED_VALUE(0, CheckedNumeric<Dst>(0) ^ 0);
+  TEST_EXPECTED_VALUE(std::numeric_limits<Dst>::max(),
+                      CheckedNumeric<Dst>(0) ^ static_cast<Dst>(-1));
+  TEST_EXPECTED_VALUE(DstLimits::max(), ~CheckedNumeric<Dst>(0));
+
+  // Modulus is legal only for integers.
+  TEST_EXPECTED_VALUE(0, ClampedNumeric<Dst>() % 1);
+  TEST_EXPECTED_VALUE(0, ClampedNumeric<Dst>(1) % 1);
+  TEST_EXPECTED_VALUE(1, ClampedNumeric<Dst>(1) % 2);
+  TEST_EXPECTED_VALUE(0, ClampedNumeric<Dst>(DstLimits::lowest()) % 2);
+  TEST_EXPECTED_VALUE(1, ClampedNumeric<Dst>(DstLimits::max()) % 2);
+  // Test all the different modulus combinations.
+  TEST_EXPECTED_VALUE(0, ClampedNumeric<Dst>(1) % ClampedNumeric<Dst>(1));
+  TEST_EXPECTED_VALUE(0, 1 % ClampedNumeric<Dst>(1));
+  TEST_EXPECTED_VALUE(0, ClampedNumeric<Dst>(1) % 1);
+  ClampedNumeric<Dst> clamped_dst = 1;
+  TEST_EXPECTED_VALUE(0, clamped_dst %= 1);
+  // Test that div by 0 is avoided but returns invalid result.
+  TEST_EXPECTED_VALUE(Dst(1), ClampedNumeric<Dst>(1) % 0);
+  // Test bit shifts.
+  TEST_EXPECTED_VALUE(DstLimits::Overflow(),
+                      ClampedNumeric<Dst>(1)
+                          << as_unsigned(IntegerBitsPlusSign<Dst>::value));
+  TEST_EXPECTED_VALUE(Dst(0), ClampedNumeric<Dst>(0) << as_unsigned(
+                                  IntegerBitsPlusSign<Dst>::value));
+  TEST_EXPECTED_VALUE(DstLimits::Overflow(),
+                      ClampedNumeric<Dst>(DstLimits::max()) << 1U);
+  TEST_EXPECTED_VALUE(
+      static_cast<Dst>(1) << (IntegerBitsPlusSign<Dst>::value - 1U),
+      ClampedNumeric<Dst>(1) << (IntegerBitsPlusSign<Dst>::value - 1U));
+  TEST_EXPECTED_VALUE(1, ClampedNumeric<Dst>(1) << 0U);
+  TEST_EXPECTED_VALUE(2, ClampedNumeric<Dst>(1) << 1U);
+  TEST_EXPECTED_VALUE(0, ClampedNumeric<Dst>(1) >>
+                             as_unsigned(IntegerBitsPlusSign<Dst>::value));
+  TEST_EXPECTED_VALUE(
+      0, ClampedNumeric<Dst>(1) >> (IntegerBitsPlusSign<Dst>::value - 1U));
+  TEST_EXPECTED_VALUE(1, ClampedNumeric<Dst>(1) & 1);
+  TEST_EXPECTED_VALUE(0, ClampedNumeric<Dst>(1) & 0);
+  TEST_EXPECTED_VALUE(0, ClampedNumeric<Dst>(0) & 1);
+  TEST_EXPECTED_VALUE(0, ClampedNumeric<Dst>(1) & 0);
+  TEST_EXPECTED_VALUE(std::numeric_limits<Dst>::max(),
+                      MakeClampedNum(DstLimits::max()) & -1);
+  TEST_EXPECTED_VALUE(1, ClampedNumeric<Dst>(1) | 1);
+  TEST_EXPECTED_VALUE(1, ClampedNumeric<Dst>(1) | 0);
+  TEST_EXPECTED_VALUE(1, ClampedNumeric<Dst>(0) | 1);
+  TEST_EXPECTED_VALUE(0, ClampedNumeric<Dst>(0) | 0);
+  TEST_EXPECTED_VALUE(std::numeric_limits<Dst>::max(),
+                      ClampedNumeric<Dst>(0) | static_cast<Dst>(-1));
+  TEST_EXPECTED_VALUE(0, ClampedNumeric<Dst>(1) ^ 1);
+  TEST_EXPECTED_VALUE(1, ClampedNumeric<Dst>(1) ^ 0);
+  TEST_EXPECTED_VALUE(1, ClampedNumeric<Dst>(0) ^ 1);
+  TEST_EXPECTED_VALUE(0, ClampedNumeric<Dst>(0) ^ 0);
+  TEST_EXPECTED_VALUE(std::numeric_limits<Dst>::max(),
+                      ClampedNumeric<Dst>(0) ^ static_cast<Dst>(-1));
+  TEST_EXPECTED_VALUE(DstLimits::max(), ~ClampedNumeric<Dst>(0));
+
+  TestStrictPointerMath<Dst>();
+}
+
+// Floating point arithmetic.
+template <typename Dst>
+void TestSpecializedArithmetic(
+    const char* dst,
+    int line,
+    typename std::enable_if<numeric_limits<Dst>::is_iec559, int>::type = 0) {
+  using DstLimits = SaturationDefaultLimits<Dst>;
+  TEST_EXPECTED_SUCCESS(-CheckedNumeric<Dst>(DstLimits::lowest()));
+
+  TEST_EXPECTED_SUCCESS(CheckedNumeric<Dst>(DstLimits::lowest()).Abs());
+  TEST_EXPECTED_VALUE(1, CheckedNumeric<Dst>(-1).Abs());
+
+  TEST_EXPECTED_SUCCESS(CheckedNumeric<Dst>(DstLimits::lowest()) + -1);
+  TEST_EXPECTED_SUCCESS(CheckedNumeric<Dst>(DstLimits::max()) + 1);
+  TEST_EXPECTED_FAILURE(CheckedNumeric<Dst>(DstLimits::lowest()) +
+                        DstLimits::lowest());
+
+  TEST_EXPECTED_FAILURE(CheckedNumeric<Dst>(DstLimits::max()) -
+                        DstLimits::lowest());
+  TEST_EXPECTED_FAILURE(CheckedNumeric<Dst>(DstLimits::lowest()) -
+                        DstLimits::max());
+
+  TEST_EXPECTED_FAILURE(CheckedNumeric<Dst>(DstLimits::lowest()) * 2);
+
+  TEST_EXPECTED_VALUE(-0.5, CheckedNumeric<Dst>(-1.0) / 2);
+
+  TEST_EXPECTED_VALUE(DstLimits::max(),
+                      -ClampedNumeric<Dst>(DstLimits::lowest()));
+
+  TEST_EXPECTED_VALUE(DstLimits::max(),
+                      ClampedNumeric<Dst>(DstLimits::lowest()).Abs());
+  TEST_EXPECTED_VALUE(1, ClampedNumeric<Dst>(-1).Abs());
+
+  TEST_EXPECTED_VALUE(DstLimits::lowest() - 1,
+                      ClampedNumeric<Dst>(DstLimits::lowest()) + -1);
+  TEST_EXPECTED_VALUE(DstLimits::max() + 1,
+                      ClampedNumeric<Dst>(DstLimits::max()) + 1);
+  TEST_EXPECTED_VALUE(
+      DstLimits::Underflow(),
+      ClampedNumeric<Dst>(DstLimits::lowest()) + DstLimits::lowest());
+
+  TEST_EXPECTED_VALUE(
+      DstLimits::Overflow(),
+      ClampedNumeric<Dst>(DstLimits::max()) - DstLimits::lowest());
+  TEST_EXPECTED_VALUE(
+      DstLimits::Underflow(),
+      ClampedNumeric<Dst>(DstLimits::lowest()) - DstLimits::max());
+
+  TEST_EXPECTED_VALUE(DstLimits::Underflow(),
+                      ClampedNumeric<Dst>(DstLimits::lowest()) * 2);
+
+  TEST_EXPECTED_VALUE(-0.5, ClampedNumeric<Dst>(-1.0) / 2);
+}
+
+// Generic arithmetic tests.
+template <typename Dst>
+static void TestArithmetic(const char* dst, int line) {
+  using DstLimits = SaturationDefaultLimits<Dst>;
+
+  EXPECT_EQ(true, CheckedNumeric<Dst>().IsValid());
+  EXPECT_EQ(false, CheckedNumeric<Dst>(CheckedNumeric<Dst>(DstLimits::max()) *
+                                       DstLimits::max())
+                       .IsValid());
+  EXPECT_EQ(static_cast<Dst>(0), CheckedNumeric<Dst>().ValueOrDie());
+  EXPECT_EQ(static_cast<Dst>(0), CheckedNumeric<Dst>().ValueOrDefault(1));
+  EXPECT_EQ(static_cast<Dst>(1),
+            CheckedNumeric<Dst>(CheckedNumeric<Dst>(DstLimits::max()) *
+                                DstLimits::max())
+                .ValueOrDefault(1));
+
+  // Test the operator combinations.
+  TEST_EXPECTED_VALUE(2, CheckedNumeric<Dst>(1) + CheckedNumeric<Dst>(1));
+  TEST_EXPECTED_VALUE(0, CheckedNumeric<Dst>(1) - CheckedNumeric<Dst>(1));
+  TEST_EXPECTED_VALUE(1, CheckedNumeric<Dst>(1) * CheckedNumeric<Dst>(1));
+  TEST_EXPECTED_VALUE(1, CheckedNumeric<Dst>(1) / CheckedNumeric<Dst>(1));
+  TEST_EXPECTED_VALUE(2, 1 + CheckedNumeric<Dst>(1));
+  TEST_EXPECTED_VALUE(0, 1 - CheckedNumeric<Dst>(1));
+  TEST_EXPECTED_VALUE(1, 1 * CheckedNumeric<Dst>(1));
+  TEST_EXPECTED_VALUE(1, 1 / CheckedNumeric<Dst>(1));
+  TEST_EXPECTED_VALUE(2, CheckedNumeric<Dst>(1) + 1);
+  TEST_EXPECTED_VALUE(0, CheckedNumeric<Dst>(1) - 1);
+  TEST_EXPECTED_VALUE(1, CheckedNumeric<Dst>(1) * 1);
+  TEST_EXPECTED_VALUE(1, CheckedNumeric<Dst>(1) / 1);
+  CheckedNumeric<Dst> checked_dst = 1;
+  TEST_EXPECTED_VALUE(2, checked_dst += 1);
+  checked_dst = 1;
+  TEST_EXPECTED_VALUE(0, checked_dst -= 1);
+  checked_dst = 1;
+  TEST_EXPECTED_VALUE(1, checked_dst *= 1);
+  checked_dst = 1;
+  TEST_EXPECTED_VALUE(1, checked_dst /= 1);
+
+  TEST_EXPECTED_VALUE(2, ClampedNumeric<Dst>(1) + ClampedNumeric<Dst>(1));
+  TEST_EXPECTED_VALUE(0, ClampedNumeric<Dst>(1) - ClampedNumeric<Dst>(1));
+  TEST_EXPECTED_VALUE(1, ClampedNumeric<Dst>(1) * ClampedNumeric<Dst>(1));
+  TEST_EXPECTED_VALUE(1, ClampedNumeric<Dst>(1) / ClampedNumeric<Dst>(1));
+  TEST_EXPECTED_VALUE(2, 1 + ClampedNumeric<Dst>(1));
+  TEST_EXPECTED_VALUE(0, 1 - ClampedNumeric<Dst>(1));
+  TEST_EXPECTED_VALUE(1, 1 * ClampedNumeric<Dst>(1));
+  TEST_EXPECTED_VALUE(1, 1 / ClampedNumeric<Dst>(1));
+  TEST_EXPECTED_VALUE(2, ClampedNumeric<Dst>(1) + 1);
+  TEST_EXPECTED_VALUE(0, ClampedNumeric<Dst>(1) - 1);
+  TEST_EXPECTED_VALUE(1, ClampedNumeric<Dst>(1) * 1);
+  TEST_EXPECTED_VALUE(1, ClampedNumeric<Dst>(1) / 1);
+  ClampedNumeric<Dst> clamped_dst = 1;
+  TEST_EXPECTED_VALUE(2, clamped_dst += 1);
+  clamped_dst = 1;
+  TEST_EXPECTED_VALUE(0, clamped_dst -= 1);
+  clamped_dst = 1;
+  TEST_EXPECTED_VALUE(1, clamped_dst *= 1);
+  clamped_dst = 1;
+  TEST_EXPECTED_VALUE(1, clamped_dst /= 1);
+
+  // Generic negation.
+  if (DstLimits::is_signed) {
+    TEST_EXPECTED_VALUE(0, -CheckedNumeric<Dst>());
+    TEST_EXPECTED_VALUE(-1, -CheckedNumeric<Dst>(1));
+    TEST_EXPECTED_VALUE(1, -CheckedNumeric<Dst>(-1));
+    TEST_EXPECTED_VALUE(static_cast<Dst>(DstLimits::max() * -1),
+                        -CheckedNumeric<Dst>(DstLimits::max()));
+
+    TEST_EXPECTED_VALUE(0, -ClampedNumeric<Dst>());
+    TEST_EXPECTED_VALUE(-1, -ClampedNumeric<Dst>(1));
+    TEST_EXPECTED_VALUE(1, -ClampedNumeric<Dst>(-1));
+    TEST_EXPECTED_VALUE(static_cast<Dst>(DstLimits::max() * -1),
+                        -ClampedNumeric<Dst>(DstLimits::max()));
+
+    // The runtime paths for saturated negation differ significantly from what
+    // gets evaluated at compile-time. Making this test volatile forces the
+    // compiler to generate code rather than fold constant expressions.
+    volatile Dst value = Dst(0);
+    TEST_EXPECTED_VALUE(0, -MakeClampedNum(value));
+    value = Dst(1);
+    TEST_EXPECTED_VALUE(-1, -MakeClampedNum(value));
+    value = Dst(2);
+    TEST_EXPECTED_VALUE(-2, -MakeClampedNum(value));
+    value = Dst(-1);
+    TEST_EXPECTED_VALUE(1, -MakeClampedNum(value));
+    value = Dst(-2);
+    TEST_EXPECTED_VALUE(2, -MakeClampedNum(value));
+    value = DstLimits::max();
+    TEST_EXPECTED_VALUE(Dst(DstLimits::max() * -1), -MakeClampedNum(value));
+    value = Dst(-1 * DstLimits::max());
+    TEST_EXPECTED_VALUE(DstLimits::max(), -MakeClampedNum(value));
+    value = DstLimits::lowest();
+    TEST_EXPECTED_VALUE(DstLimits::max(), -MakeClampedNum(value));
+  }
+
+  // Generic absolute value.
+  TEST_EXPECTED_VALUE(0, CheckedNumeric<Dst>().Abs());
+  TEST_EXPECTED_VALUE(1, CheckedNumeric<Dst>(1).Abs());
+  TEST_EXPECTED_VALUE(DstLimits::max(),
+                      CheckedNumeric<Dst>(DstLimits::max()).Abs());
+
+  TEST_EXPECTED_VALUE(0, ClampedNumeric<Dst>().Abs());
+  TEST_EXPECTED_VALUE(1, ClampedNumeric<Dst>(1).Abs());
+  TEST_EXPECTED_VALUE(DstLimits::max(),
+                      ClampedNumeric<Dst>(DstLimits::max()).Abs());
+
+  // Generic addition.
+  TEST_EXPECTED_VALUE(1, (CheckedNumeric<Dst>() + 1));
+  TEST_EXPECTED_VALUE(2, (CheckedNumeric<Dst>(1) + 1));
+  if (numeric_limits<Dst>::is_signed)
+    TEST_EXPECTED_VALUE(0, (CheckedNumeric<Dst>(-1) + 1));
+  TEST_EXPECTED_SUCCESS(CheckedNumeric<Dst>(DstLimits::lowest()) + 1);
+  TEST_EXPECTED_FAILURE(CheckedNumeric<Dst>(DstLimits::max()) +
+                        DstLimits::max());
+
+  TEST_EXPECTED_VALUE(1, (ClampedNumeric<Dst>() + 1));
+  TEST_EXPECTED_VALUE(2, (ClampedNumeric<Dst>(1) + 1));
+  if (numeric_limits<Dst>::is_signed)
+    TEST_EXPECTED_VALUE(0, (ClampedNumeric<Dst>(-1) + 1));
+  TEST_EXPECTED_VALUE(DstLimits::lowest() + 1,
+                      ClampedNumeric<Dst>(DstLimits::lowest()) + 1);
+  TEST_EXPECTED_VALUE(DstLimits::Overflow(),
+                      ClampedNumeric<Dst>(DstLimits::max()) + DstLimits::max());
+
+  // Generic subtraction.
+  TEST_EXPECTED_VALUE(0, (CheckedNumeric<Dst>(1) - 1));
+  TEST_EXPECTED_SUCCESS(CheckedNumeric<Dst>(DstLimits::max()) - 1);
+  if (numeric_limits<Dst>::is_signed) {
+    TEST_EXPECTED_VALUE(-1, (CheckedNumeric<Dst>() - 1));
+    TEST_EXPECTED_VALUE(-2, (CheckedNumeric<Dst>(-1) - 1));
+  } else {
+    TEST_EXPECTED_FAILURE(CheckedNumeric<Dst>(DstLimits::max()) - -1);
+  }
+
+  TEST_EXPECTED_VALUE(0, (ClampedNumeric<Dst>(1) - 1));
+  TEST_EXPECTED_VALUE(DstLimits::max() - 1,
+                      ClampedNumeric<Dst>(DstLimits::max()) - 1);
+  if (numeric_limits<Dst>::is_signed) {
+    TEST_EXPECTED_VALUE(-1, (ClampedNumeric<Dst>() - 1));
+    TEST_EXPECTED_VALUE(-2, (ClampedNumeric<Dst>(-1) - 1));
+  } else {
+    TEST_EXPECTED_VALUE(DstLimits::max(),
+                        ClampedNumeric<Dst>(DstLimits::max()) - -1);
+  }
+
+  // Generic multiplication.
+  TEST_EXPECTED_VALUE(0, (CheckedNumeric<Dst>() * 1));
+  TEST_EXPECTED_VALUE(1, (CheckedNumeric<Dst>(1) * 1));
+  TEST_EXPECTED_VALUE(0, (CheckedNumeric<Dst>(0) * 0));
+  if (numeric_limits<Dst>::is_signed) {
+    TEST_EXPECTED_VALUE(0, (CheckedNumeric<Dst>(-1) * 0));
+    TEST_EXPECTED_VALUE(0, (CheckedNumeric<Dst>(0) * -1));
+    TEST_EXPECTED_VALUE(-2, (CheckedNumeric<Dst>(-1) * 2));
+  } else {
+    TEST_EXPECTED_FAILURE(CheckedNumeric<Dst>(DstLimits::max()) * -2);
+    TEST_EXPECTED_FAILURE(CheckedNumeric<Dst>(DstLimits::max()) *
+                          CheckedNumeric<uintmax_t>(-2));
+  }
+  TEST_EXPECTED_FAILURE(CheckedNumeric<Dst>(DstLimits::max()) *
+                        DstLimits::max());
+
+  TEST_EXPECTED_VALUE(0, (ClampedNumeric<Dst>() * 1));
+  TEST_EXPECTED_VALUE(1, (ClampedNumeric<Dst>(1) * 1));
+  TEST_EXPECTED_VALUE(0, (ClampedNumeric<Dst>(0) * 0));
+  if (numeric_limits<Dst>::is_signed) {
+    TEST_EXPECTED_VALUE(0, (ClampedNumeric<Dst>(-1) * 0));
+    TEST_EXPECTED_VALUE(0, (ClampedNumeric<Dst>(0) * -1));
+    TEST_EXPECTED_VALUE(-2, (ClampedNumeric<Dst>(-1) * 2));
+  } else {
+    TEST_EXPECTED_VALUE(DstLimits::Underflow(),
+                        ClampedNumeric<Dst>(DstLimits::max()) * -2);
+    TEST_EXPECTED_VALUE(0, ClampedNumeric<Dst>(DstLimits::max()) *
+                               ClampedNumeric<uintmax_t>(-2));
+  }
+  TEST_EXPECTED_VALUE(DstLimits::Overflow(),
+                      ClampedNumeric<Dst>(DstLimits::max()) * DstLimits::max());
+
+  // Generic division.
+  TEST_EXPECTED_VALUE(0, CheckedNumeric<Dst>() / 1);
+  TEST_EXPECTED_VALUE(1, CheckedNumeric<Dst>(1) / 1);
+  TEST_EXPECTED_VALUE(DstLimits::lowest() / 2,
+                      CheckedNumeric<Dst>(DstLimits::lowest()) / 2);
+  TEST_EXPECTED_VALUE(DstLimits::max() / 2,
+                      CheckedNumeric<Dst>(DstLimits::max()) / 2);
+  TEST_EXPECTED_FAILURE(CheckedNumeric<Dst>(1) / 0);
+
+  TEST_EXPECTED_VALUE(0, ClampedNumeric<Dst>() / 1);
+  TEST_EXPECTED_VALUE(1, ClampedNumeric<Dst>(1) / 1);
+  TEST_EXPECTED_VALUE(DstLimits::lowest() / 2,
+                      ClampedNumeric<Dst>(DstLimits::lowest()) / 2);
+  TEST_EXPECTED_VALUE(DstLimits::max() / 2,
+                      ClampedNumeric<Dst>(DstLimits::max()) / 2);
+  TEST_EXPECTED_VALUE(DstLimits::Overflow(), ClampedNumeric<Dst>(1) / 0);
+  TEST_EXPECTED_VALUE(DstLimits::Underflow(), ClampedNumeric<Dst>(-1) / 0);
+  TEST_EXPECTED_VALUE(0, ClampedNumeric<Dst>(0) / 0);
+
+  TestSpecializedArithmetic<Dst>(dst, line);
+}
+
+// Helper macro to wrap displaying the conversion types and line numbers.
+#define TEST_ARITHMETIC(Dst) TestArithmetic<Dst>(#Dst, __LINE__)
+
+TEST(SafeNumerics, SignedIntegerMath) {
+  TEST_ARITHMETIC(int8_t);
+  TEST_ARITHMETIC(int16_t);
+  TEST_ARITHMETIC(int);
+  TEST_ARITHMETIC(intptr_t);
+  TEST_ARITHMETIC(intmax_t);
+}
+
+TEST(SafeNumerics, UnsignedIntegerMath) {
+  TEST_ARITHMETIC(uint8_t);
+  TEST_ARITHMETIC(uint16_t);
+  TEST_ARITHMETIC(unsigned int);
+  TEST_ARITHMETIC(uintptr_t);
+  TEST_ARITHMETIC(uintmax_t);
+}
+
+TEST(SafeNumerics, FloatingPointMath) {
+  TEST_ARITHMETIC(float);
+  TEST_ARITHMETIC(double);
+}
+
+// Enumerates the five different conversions types we need to test.
+enum NumericConversionType {
+  SIGN_PRESERVING_VALUE_PRESERVING,
+  SIGN_PRESERVING_NARROW,
+  SIGN_TO_UNSIGN_WIDEN_OR_EQUAL,
+  SIGN_TO_UNSIGN_NARROW,
+  UNSIGN_TO_SIGN_NARROW_OR_EQUAL,
+};
+
+// Template covering the different conversion tests.
+template <typename Dst, typename Src, NumericConversionType conversion>
+struct TestNumericConversion {};
+
+enum RangeConstraint {
+  RANGE_VALID = 0x0,      // Value can be represented by the destination type.
+  RANGE_UNDERFLOW = 0x1,  // Value would underflow.
+  RANGE_OVERFLOW = 0x2,   // Value would overflow.
+  RANGE_INVALID = RANGE_UNDERFLOW | RANGE_OVERFLOW  // Invalid (i.e. NaN).
+};
+
+// These are some wrappers to make the tests a bit cleaner.
+constexpr RangeConstraint RangeCheckToEnum(const RangeCheck constraint) {
+  return static_cast<RangeConstraint>(
+      static_cast<int>(constraint.IsOverflowFlagSet()) << 1 |
+      static_cast<int>(constraint.IsUnderflowFlagSet()));
+}
+
+// EXPECT_EQ wrappers providing specific detail on test failures.
+#define TEST_EXPECTED_RANGE(expected, actual)                               \
+  EXPECT_EQ(expected,                                                       \
+            RangeCheckToEnum(DstRangeRelationToSrcRange<Dst>(actual)))      \
+      << "Conversion test: " << src << " value " << actual << " to " << dst \
+      << " on line " << line
+
+template <typename Dst, typename Src>
+void TestStrictComparison(const char* dst, const char* src, int line) {
+  using DstLimits = numeric_limits<Dst>;
+  using SrcLimits = numeric_limits<Src>;
+  static_assert(StrictNumeric<Src>(SrcLimits::lowest()) < DstLimits::max(), "");
+  static_assert(StrictNumeric<Src>(SrcLimits::lowest()) < SrcLimits::max(), "");
+  static_assert(!(StrictNumeric<Src>(SrcLimits::lowest()) >= DstLimits::max()),
+                "");
+  static_assert(!(StrictNumeric<Src>(SrcLimits::lowest()) >= SrcLimits::max()),
+                "");
+  static_assert(StrictNumeric<Src>(SrcLimits::lowest()) <= DstLimits::max(),
+                "");
+  static_assert(StrictNumeric<Src>(SrcLimits::lowest()) <= SrcLimits::max(),
+                "");
+  static_assert(!(StrictNumeric<Src>(SrcLimits::lowest()) > DstLimits::max()),
+                "");
+  static_assert(!(StrictNumeric<Src>(SrcLimits::lowest()) > SrcLimits::max()),
+                "");
+  static_assert(StrictNumeric<Src>(SrcLimits::max()) > DstLimits::lowest(), "");
+  static_assert(StrictNumeric<Src>(SrcLimits::max()) > SrcLimits::lowest(), "");
+  static_assert(!(StrictNumeric<Src>(SrcLimits::max()) <= DstLimits::lowest()),
+                "");
+  static_assert(!(StrictNumeric<Src>(SrcLimits::max()) <= SrcLimits::lowest()),
+                "");
+  static_assert(StrictNumeric<Src>(SrcLimits::max()) >= DstLimits::lowest(),
+                "");
+  static_assert(StrictNumeric<Src>(SrcLimits::max()) >= SrcLimits::lowest(),
+                "");
+  static_assert(!(StrictNumeric<Src>(SrcLimits::max()) < DstLimits::lowest()),
+                "");
+  static_assert(!(StrictNumeric<Src>(SrcLimits::max()) < SrcLimits::lowest()),
+                "");
+  static_assert(StrictNumeric<Src>(static_cast<Src>(1)) == static_cast<Dst>(1),
+                "");
+  static_assert(StrictNumeric<Src>(static_cast<Src>(1)) != static_cast<Dst>(0),
+                "");
+  static_assert(StrictNumeric<Src>(SrcLimits::max()) != static_cast<Dst>(0),
+                "");
+  static_assert(StrictNumeric<Src>(SrcLimits::max()) != DstLimits::lowest(),
+                "");
+  static_assert(
+      !(StrictNumeric<Src>(static_cast<Src>(1)) != static_cast<Dst>(1)), "");
+  static_assert(
+      !(StrictNumeric<Src>(static_cast<Src>(1)) == static_cast<Dst>(0)), "");
+
+  // Due to differences in float handling between compilers, these aren't
+  // compile-time constants everywhere. So, we use run-time tests.
+  EXPECT_EQ(
+      SrcLimits::max(),
+      MakeCheckedNum(SrcLimits::max()).Max(DstLimits::lowest()).ValueOrDie());
+  EXPECT_EQ(
+      DstLimits::max(),
+      MakeCheckedNum(SrcLimits::lowest()).Max(DstLimits::max()).ValueOrDie());
+  EXPECT_EQ(
+      DstLimits::lowest(),
+      MakeCheckedNum(SrcLimits::max()).Min(DstLimits::lowest()).ValueOrDie());
+  EXPECT_EQ(
+      SrcLimits::lowest(),
+      MakeCheckedNum(SrcLimits::lowest()).Min(DstLimits::max()).ValueOrDie());
+  EXPECT_EQ(SrcLimits::lowest(), CheckMin(MakeStrictNum(1), MakeCheckedNum(0),
+                                          DstLimits::max(), SrcLimits::lowest())
+                                     .ValueOrDie());
+  EXPECT_EQ(DstLimits::max(), CheckMax(MakeStrictNum(1), MakeCheckedNum(0),
+                                       DstLimits::max(), SrcLimits::lowest())
+                                  .ValueOrDie());
+
+  EXPECT_EQ(SrcLimits::max(),
+            MakeClampedNum(SrcLimits::max()).Max(DstLimits::lowest()));
+  EXPECT_EQ(DstLimits::max(),
+            MakeClampedNum(SrcLimits::lowest()).Max(DstLimits::max()));
+  EXPECT_EQ(DstLimits::lowest(),
+            MakeClampedNum(SrcLimits::max()).Min(DstLimits::lowest()));
+  EXPECT_EQ(SrcLimits::lowest(),
+            MakeClampedNum(SrcLimits::lowest()).Min(DstLimits::max()));
+  EXPECT_EQ(SrcLimits::lowest(),
+            ClampMin(MakeStrictNum(1), MakeClampedNum(0), DstLimits::max(),
+                     SrcLimits::lowest()));
+  EXPECT_EQ(DstLimits::max(), ClampMax(MakeStrictNum(1), MakeClampedNum(0),
+                                       DstLimits::max(), SrcLimits::lowest()));
+
+  if (IsValueInRangeForNumericType<Dst>(SrcLimits::max())) {
+    TEST_EXPECTED_VALUE(Dst(SrcLimits::max()), (CommonMax<Dst, Src>()));
+    TEST_EXPECTED_VALUE(Dst(SrcLimits::max()),
+                        (CommonMaxOrMin<Dst, Src>(false)));
+  } else {
+    TEST_EXPECTED_VALUE(DstLimits::max(), (CommonMax<Dst, Src>()));
+    TEST_EXPECTED_VALUE(DstLimits::max(), (CommonMaxOrMin<Dst, Src>(false)));
+  }
+
+  if (IsValueInRangeForNumericType<Dst>(SrcLimits::lowest())) {
+    TEST_EXPECTED_VALUE(Dst(SrcLimits::lowest()), (CommonMin<Dst, Src>()));
+    TEST_EXPECTED_VALUE(Dst(SrcLimits::lowest()),
+                        (CommonMaxOrMin<Dst, Src>(true)));
+  } else {
+    TEST_EXPECTED_VALUE(DstLimits::lowest(), (CommonMin<Dst, Src>()));
+    TEST_EXPECTED_VALUE(DstLimits::lowest(), (CommonMaxOrMin<Dst, Src>(true)));
+  }
+}
+
+template <typename Dst, typename Src>
+struct TestNumericConversion<Dst, Src, SIGN_PRESERVING_VALUE_PRESERVING> {
+  static void Test(const char* dst, const char* src, int line) {
+    using SrcLimits = SaturationDefaultLimits<Src>;
+    using DstLimits = SaturationDefaultLimits<Dst>;
+    // Integral to floating.
+    static_assert((DstLimits::is_iec559 && SrcLimits::is_integer) ||
+                      // Not floating to integral and...
+                      (!(DstLimits::is_integer && SrcLimits::is_iec559) &&
+                       // Same sign, same numeric, source is narrower or same.
+                       ((SrcLimits::is_signed == DstLimits::is_signed &&
+                         MaxExponent<Dst>::value >= MaxExponent<Src>::value) ||
+                        // Or signed destination and source is smaller
+                        (DstLimits::is_signed &&
+                         MaxExponent<Dst>::value >= MaxExponent<Src>::value))),
+                  "Comparison must be sign preserving and value preserving");
+
+    TestStrictComparison<Dst, Src>(dst, src, line);
+
+    const CheckedNumeric<Dst> checked_dst = SrcLimits::max();
+    const ClampedNumeric<Dst> clamped_dst = SrcLimits::max();
+    TEST_EXPECTED_SUCCESS(checked_dst);
+    TEST_EXPECTED_VALUE(Dst(SrcLimits::max()), clamped_dst);
+    if (MaxExponent<Dst>::value > MaxExponent<Src>::value) {
+      if (MaxExponent<Dst>::value >= MaxExponent<Src>::value * 2 - 1) {
+        // At least twice larger type.
+        TEST_EXPECTED_SUCCESS(SrcLimits::max() * checked_dst);
+        TEST_EXPECTED_VALUE(SrcLimits::max() * clamped_dst,
+                            Dst(SrcLimits::max()) * SrcLimits::max());
+      } else {  // Larger, but not at least twice as large.
+        TEST_EXPECTED_FAILURE(SrcLimits::max() * checked_dst);
+        TEST_EXPECTED_SUCCESS(checked_dst + 1);
+        TEST_EXPECTED_VALUE(DstLimits::Overflow(),
+                            SrcLimits::max() * clamped_dst);
+        TEST_EXPECTED_VALUE(Dst(SrcLimits::max()) + Dst(1),
+                            clamped_dst + Dst(1));
+      }
+    } else {  // Same width type.
+      TEST_EXPECTED_FAILURE(checked_dst + 1);
+      TEST_EXPECTED_VALUE(DstLimits::Overflow(), clamped_dst + Dst(1));
+    }
+
+    TEST_EXPECTED_RANGE(RANGE_VALID, SrcLimits::max());
+    TEST_EXPECTED_RANGE(RANGE_VALID, static_cast<Src>(1));
+    if (SrcLimits::is_iec559) {
+      TEST_EXPECTED_RANGE(RANGE_VALID, SrcLimits::max() * static_cast<Src>(-1));
+      TEST_EXPECTED_RANGE(RANGE_OVERFLOW, SrcLimits::infinity());
+      TEST_EXPECTED_RANGE(RANGE_UNDERFLOW, SrcLimits::infinity() * -1);
+      TEST_EXPECTED_RANGE(RANGE_INVALID, SrcLimits::quiet_NaN());
+    } else if (numeric_limits<Src>::is_signed) {
+      // This block reverses the Src to Dst relationship so we don't have to
+      // complicate the test macros.
+      if (!std::is_same<Src, Dst>::value) {
+        TEST_EXPECTED_SUCCESS(CheckDiv(SrcLimits::lowest(), Dst(-1)));
+      }
+      TEST_EXPECTED_RANGE(RANGE_VALID, static_cast<Src>(-1));
+      TEST_EXPECTED_RANGE(RANGE_VALID, SrcLimits::lowest());
+    }
+  }
+};
+
+template <typename Dst, typename Src>
+struct TestNumericConversion<Dst, Src, SIGN_PRESERVING_NARROW> {
+  static void Test(const char* dst, const char* src, int line) {
+    using SrcLimits = SaturationDefaultLimits<Src>;
+    using DstLimits = SaturationDefaultLimits<Dst>;
+    static_assert(SrcLimits::is_signed == DstLimits::is_signed,
+                  "Destination and source sign must be the same");
+    static_assert(MaxExponent<Dst>::value <= MaxExponent<Src>::value,
+                  "Destination must be narrower than source");
+
+    TestStrictComparison<Dst, Src>(dst, src, line);
+
+    const CheckedNumeric<Dst> checked_dst;
+    TEST_EXPECTED_FAILURE(checked_dst + SrcLimits::max());
+    TEST_EXPECTED_VALUE(1, checked_dst + Src(1));
+    TEST_EXPECTED_FAILURE(checked_dst - SrcLimits::max());
+
+    ClampedNumeric<Dst> clamped_dst;
+    TEST_EXPECTED_VALUE(DstLimits::Overflow(), clamped_dst + SrcLimits::max());
+    TEST_EXPECTED_VALUE(1, clamped_dst + Src(1));
+    TEST_EXPECTED_VALUE(DstLimits::Underflow(), clamped_dst - SrcLimits::max());
+    clamped_dst += SrcLimits::max();
+    TEST_EXPECTED_VALUE(DstLimits::Overflow(), clamped_dst);
+    clamped_dst = DstLimits::max();
+    clamped_dst += SrcLimits::max();
+    TEST_EXPECTED_VALUE(DstLimits::Overflow(), clamped_dst);
+    clamped_dst = DstLimits::max();
+    clamped_dst -= SrcLimits::max();
+    TEST_EXPECTED_VALUE(DstLimits::Underflow(), clamped_dst);
+    clamped_dst = 0;
+
+    TEST_EXPECTED_RANGE(RANGE_OVERFLOW, SrcLimits::max());
+    TEST_EXPECTED_RANGE(RANGE_VALID, static_cast<Src>(1));
+    if (SrcLimits::is_iec559) {
+      TEST_EXPECTED_RANGE(RANGE_UNDERFLOW, SrcLimits::max() * -1);
+      TEST_EXPECTED_RANGE(RANGE_VALID, static_cast<Src>(-1));
+      TEST_EXPECTED_RANGE(RANGE_OVERFLOW, SrcLimits::infinity());
+      TEST_EXPECTED_RANGE(RANGE_UNDERFLOW, SrcLimits::infinity() * -1);
+      TEST_EXPECTED_RANGE(RANGE_INVALID, SrcLimits::quiet_NaN());
+      if (DstLimits::is_integer) {
+        if (SrcLimits::digits < DstLimits::digits) {
+          TEST_EXPECTED_RANGE(RANGE_OVERFLOW,
+                              static_cast<Src>(DstLimits::max()));
+        } else {
+          TEST_EXPECTED_RANGE(RANGE_VALID, static_cast<Src>(DstLimits::max()));
+        }
+        TEST_EXPECTED_RANGE(
+            RANGE_VALID,
+            static_cast<Src>(GetMaxConvertibleToFloat<Src, Dst>()));
+        TEST_EXPECTED_RANGE(RANGE_VALID, static_cast<Src>(DstLimits::lowest()));
+      }
+    } else if (SrcLimits::is_signed) {
+      TEST_EXPECTED_VALUE(-1, checked_dst - static_cast<Src>(1));
+      TEST_EXPECTED_VALUE(-1, clamped_dst - static_cast<Src>(1));
+      TEST_EXPECTED_VALUE(Src(Src(0) - DstLimits::lowest()),
+                          ClampDiv(DstLimits::lowest(), Src(-1)));
+      TEST_EXPECTED_RANGE(RANGE_UNDERFLOW, SrcLimits::lowest());
+      TEST_EXPECTED_RANGE(RANGE_VALID, static_cast<Src>(-1));
+    } else {
+      TEST_EXPECTED_FAILURE(checked_dst - static_cast<Src>(1));
+      TEST_EXPECTED_VALUE(Dst(0), clamped_dst - static_cast<Src>(1));
+      TEST_EXPECTED_RANGE(RANGE_VALID, SrcLimits::lowest());
+    }
+  }
+};
+
+template <typename Dst, typename Src>
+struct TestNumericConversion<Dst, Src, SIGN_TO_UNSIGN_WIDEN_OR_EQUAL> {
+  static void Test(const char* dst, const char* src, int line) {
+    using SrcLimits = SaturationDefaultLimits<Src>;
+    using DstLimits = SaturationDefaultLimits<Dst>;
+    static_assert(MaxExponent<Dst>::value >= MaxExponent<Src>::value,
+                  "Destination must be equal or wider than source.");
+    static_assert(SrcLimits::is_signed, "Source must be signed");
+    static_assert(!DstLimits::is_signed, "Destination must be unsigned");
+
+    TestStrictComparison<Dst, Src>(dst, src, line);
+
+    const CheckedNumeric<Dst> checked_dst;
+    TEST_EXPECTED_VALUE(SrcLimits::max(), checked_dst + SrcLimits::max());
+    TEST_EXPECTED_FAILURE(checked_dst + static_cast<Src>(-1));
+    TEST_EXPECTED_SUCCESS(checked_dst * static_cast<Src>(-1));
+    TEST_EXPECTED_FAILURE(checked_dst + SrcLimits::lowest());
+    TEST_EXPECTED_VALUE(Dst(0), CheckDiv(Dst(0), Src(-1)));
+
+    const ClampedNumeric<Dst> clamped_dst;
+    TEST_EXPECTED_VALUE(SrcLimits::max(), clamped_dst + SrcLimits::max());
+    TEST_EXPECTED_VALUE(DstLimits::Underflow(),
+                        clamped_dst + static_cast<Src>(-1));
+    TEST_EXPECTED_VALUE(0, clamped_dst * static_cast<Src>(-1));
+    TEST_EXPECTED_VALUE(DstLimits::Underflow(),
+                        clamped_dst + SrcLimits::lowest());
+
+    TEST_EXPECTED_RANGE(RANGE_UNDERFLOW, SrcLimits::lowest());
+    TEST_EXPECTED_RANGE(RANGE_VALID, SrcLimits::max());
+    TEST_EXPECTED_RANGE(RANGE_VALID, static_cast<Src>(1));
+    TEST_EXPECTED_RANGE(RANGE_UNDERFLOW, static_cast<Src>(-1));
+  }
+};
+
+template <typename Dst, typename Src>
+struct TestNumericConversion<Dst, Src, SIGN_TO_UNSIGN_NARROW> {
+  static void Test(const char* dst, const char* src, int line) {
+    using SrcLimits = SaturationDefaultLimits<Src>;
+    using DstLimits = SaturationDefaultLimits<Dst>;
+    static_assert(MaxExponent<Dst>::value < MaxExponent<Src>::value,
+                  "Destination must be narrower than source.");
+    static_assert(SrcLimits::is_signed, "Source must be signed.");
+    static_assert(!DstLimits::is_signed, "Destination must be unsigned.");
+
+    TestStrictComparison<Dst, Src>(dst, src, line);
+
+    const CheckedNumeric<Dst> checked_dst;
+    TEST_EXPECTED_VALUE(1, checked_dst + static_cast<Src>(1));
+    TEST_EXPECTED_FAILURE(checked_dst + SrcLimits::max());
+    TEST_EXPECTED_FAILURE(checked_dst + static_cast<Src>(-1));
+    TEST_EXPECTED_FAILURE(checked_dst + SrcLimits::lowest());
+
+    ClampedNumeric<Dst> clamped_dst;
+    TEST_EXPECTED_VALUE(1, clamped_dst + static_cast<Src>(1));
+    TEST_EXPECTED_VALUE(DstLimits::Overflow(), clamped_dst + SrcLimits::max());
+    TEST_EXPECTED_VALUE(DstLimits::Underflow(),
+                        clamped_dst + static_cast<Src>(-1));
+    TEST_EXPECTED_VALUE(DstLimits::Underflow(),
+                        clamped_dst + SrcLimits::lowest());
+    clamped_dst += SrcLimits::max();
+    TEST_EXPECTED_VALUE(DstLimits::Overflow(), clamped_dst);
+    clamped_dst = DstLimits::max();
+    clamped_dst += SrcLimits::max();
+    TEST_EXPECTED_VALUE(DstLimits::Overflow(), clamped_dst);
+    clamped_dst = DstLimits::max();
+    clamped_dst -= SrcLimits::max();
+    TEST_EXPECTED_VALUE(DstLimits::Underflow(), clamped_dst);
+    clamped_dst = 0;
+
+    TEST_EXPECTED_RANGE(RANGE_OVERFLOW, SrcLimits::max());
+    TEST_EXPECTED_RANGE(RANGE_VALID, static_cast<Src>(1));
+    TEST_EXPECTED_RANGE(RANGE_UNDERFLOW, static_cast<Src>(-1));
+
+    // Additional saturation tests.
+    EXPECT_EQ(DstLimits::max(), saturated_cast<Dst>(SrcLimits::max()));
+    EXPECT_EQ(DstLimits::lowest(), saturated_cast<Dst>(SrcLimits::lowest()));
+
+    if (SrcLimits::is_iec559) {
+      EXPECT_EQ(Dst(0), saturated_cast<Dst>(SrcLimits::quiet_NaN()));
+
+      TEST_EXPECTED_RANGE(RANGE_UNDERFLOW, SrcLimits::max() * -1);
+      TEST_EXPECTED_RANGE(RANGE_OVERFLOW, SrcLimits::infinity());
+      TEST_EXPECTED_RANGE(RANGE_UNDERFLOW, SrcLimits::infinity() * -1);
+      TEST_EXPECTED_RANGE(RANGE_INVALID, SrcLimits::quiet_NaN());
+      if (DstLimits::is_integer) {
+        if (SrcLimits::digits < DstLimits::digits) {
+          TEST_EXPECTED_RANGE(RANGE_OVERFLOW,
+                              static_cast<Src>(DstLimits::max()));
+        } else {
+          TEST_EXPECTED_RANGE(RANGE_VALID, static_cast<Src>(DstLimits::max()));
+        }
+        TEST_EXPECTED_RANGE(
+            RANGE_VALID,
+            static_cast<Src>(GetMaxConvertibleToFloat<Src, Dst>()));
+        TEST_EXPECTED_RANGE(RANGE_VALID, static_cast<Src>(DstLimits::lowest()));
+      }
+    } else {
+      TEST_EXPECTED_RANGE(RANGE_UNDERFLOW, SrcLimits::lowest());
+    }
+  }
+};
+
+template <typename Dst, typename Src>
+struct TestNumericConversion<Dst, Src, UNSIGN_TO_SIGN_NARROW_OR_EQUAL> {
+  static void Test(const char* dst, const char* src, int line) {
+    using SrcLimits = SaturationDefaultLimits<Src>;
+    using DstLimits = SaturationDefaultLimits<Dst>;
+    static_assert(MaxExponent<Dst>::value <= MaxExponent<Src>::value,
+                  "Destination must be narrower or equal to source.");
+    static_assert(!SrcLimits::is_signed, "Source must be unsigned.");
+    static_assert(DstLimits::is_signed, "Destination must be signed.");
+
+    TestStrictComparison<Dst, Src>(dst, src, line);
+
+    const CheckedNumeric<Dst> checked_dst;
+    TEST_EXPECTED_VALUE(1, checked_dst + static_cast<Src>(1));
+    TEST_EXPECTED_FAILURE(checked_dst + SrcLimits::max());
+    TEST_EXPECTED_VALUE(SrcLimits::lowest(), checked_dst + SrcLimits::lowest());
+
+    const ClampedNumeric<Dst> clamped_dst;
+    TEST_EXPECTED_VALUE(1, clamped_dst + static_cast<Src>(1));
+    TEST_EXPECTED_VALUE(DstLimits::Overflow(), clamped_dst + SrcLimits::max());
+    TEST_EXPECTED_VALUE(SrcLimits::lowest(), clamped_dst + SrcLimits::lowest());
+
+    TEST_EXPECTED_RANGE(RANGE_VALID, SrcLimits::lowest());
+    TEST_EXPECTED_RANGE(RANGE_OVERFLOW, SrcLimits::max());
+    TEST_EXPECTED_RANGE(RANGE_VALID, static_cast<Src>(1));
+
+    // Additional saturation tests.
+    EXPECT_EQ(DstLimits::max(), saturated_cast<Dst>(SrcLimits::max()));
+    EXPECT_EQ(Dst(0), saturated_cast<Dst>(SrcLimits::lowest()));
+  }
+};
+
+// Helper macro to wrap displaying the conversion types and line numbers
+#define TEST_NUMERIC_CONVERSION(d, s, t) \
+  TestNumericConversion<d, s, t>::Test(#d, #s, __LINE__)
+
+TEST(SafeNumerics, IntMinOperations) {
+  TEST_NUMERIC_CONVERSION(int8_t, int8_t, SIGN_PRESERVING_VALUE_PRESERVING);
+  TEST_NUMERIC_CONVERSION(uint8_t, uint8_t, SIGN_PRESERVING_VALUE_PRESERVING);
+
+  TEST_NUMERIC_CONVERSION(int8_t, int16_t, SIGN_PRESERVING_NARROW);
+  TEST_NUMERIC_CONVERSION(int8_t, int, SIGN_PRESERVING_NARROW);
+  TEST_NUMERIC_CONVERSION(uint8_t, uint16_t, SIGN_PRESERVING_NARROW);
+  TEST_NUMERIC_CONVERSION(uint8_t, unsigned int, SIGN_PRESERVING_NARROW);
+  TEST_NUMERIC_CONVERSION(int8_t, float, SIGN_PRESERVING_NARROW);
+
+  TEST_NUMERIC_CONVERSION(uint8_t, int8_t, SIGN_TO_UNSIGN_WIDEN_OR_EQUAL);
+
+  TEST_NUMERIC_CONVERSION(uint8_t, int16_t, SIGN_TO_UNSIGN_NARROW);
+  TEST_NUMERIC_CONVERSION(uint8_t, int, SIGN_TO_UNSIGN_NARROW);
+  TEST_NUMERIC_CONVERSION(uint8_t, intmax_t, SIGN_TO_UNSIGN_NARROW);
+  TEST_NUMERIC_CONVERSION(uint8_t, float, SIGN_TO_UNSIGN_NARROW);
+
+  TEST_NUMERIC_CONVERSION(int8_t, uint16_t, UNSIGN_TO_SIGN_NARROW_OR_EQUAL);
+  TEST_NUMERIC_CONVERSION(int8_t, unsigned int, UNSIGN_TO_SIGN_NARROW_OR_EQUAL);
+  TEST_NUMERIC_CONVERSION(int8_t, uintmax_t, UNSIGN_TO_SIGN_NARROW_OR_EQUAL);
+}
+
+TEST(SafeNumerics, Int16Operations) {
+  TEST_NUMERIC_CONVERSION(int16_t, int16_t, SIGN_PRESERVING_VALUE_PRESERVING);
+  TEST_NUMERIC_CONVERSION(uint16_t, uint16_t, SIGN_PRESERVING_VALUE_PRESERVING);
+
+  TEST_NUMERIC_CONVERSION(int16_t, int, SIGN_PRESERVING_NARROW);
+  TEST_NUMERIC_CONVERSION(uint16_t, unsigned int, SIGN_PRESERVING_NARROW);
+  TEST_NUMERIC_CONVERSION(int16_t, float, SIGN_PRESERVING_NARROW);
+
+  TEST_NUMERIC_CONVERSION(uint16_t, int16_t, SIGN_TO_UNSIGN_WIDEN_OR_EQUAL);
+
+  TEST_NUMERIC_CONVERSION(uint16_t, int, SIGN_TO_UNSIGN_NARROW);
+  TEST_NUMERIC_CONVERSION(uint16_t, intmax_t, SIGN_TO_UNSIGN_NARROW);
+  TEST_NUMERIC_CONVERSION(uint16_t, float, SIGN_TO_UNSIGN_NARROW);
+
+  TEST_NUMERIC_CONVERSION(int16_t, unsigned int,
+                          UNSIGN_TO_SIGN_NARROW_OR_EQUAL);
+  TEST_NUMERIC_CONVERSION(int16_t, uintmax_t, UNSIGN_TO_SIGN_NARROW_OR_EQUAL);
+}
+
+TEST(SafeNumerics, IntOperations) {
+  TEST_NUMERIC_CONVERSION(int, int, SIGN_PRESERVING_VALUE_PRESERVING);
+  TEST_NUMERIC_CONVERSION(unsigned int, unsigned int,
+                          SIGN_PRESERVING_VALUE_PRESERVING);
+  TEST_NUMERIC_CONVERSION(int, int8_t, SIGN_PRESERVING_VALUE_PRESERVING);
+  TEST_NUMERIC_CONVERSION(unsigned int, uint8_t,
+                          SIGN_PRESERVING_VALUE_PRESERVING);
+  TEST_NUMERIC_CONVERSION(int, uint8_t, SIGN_PRESERVING_VALUE_PRESERVING);
+
+  TEST_NUMERIC_CONVERSION(int, intmax_t, SIGN_PRESERVING_NARROW);
+  TEST_NUMERIC_CONVERSION(unsigned int, uintmax_t, SIGN_PRESERVING_NARROW);
+  TEST_NUMERIC_CONVERSION(int, float, SIGN_PRESERVING_NARROW);
+  TEST_NUMERIC_CONVERSION(int, double, SIGN_PRESERVING_NARROW);
+
+  TEST_NUMERIC_CONVERSION(unsigned int, int, SIGN_TO_UNSIGN_WIDEN_OR_EQUAL);
+  TEST_NUMERIC_CONVERSION(unsigned int, int8_t, SIGN_TO_UNSIGN_WIDEN_OR_EQUAL);
+
+  TEST_NUMERIC_CONVERSION(unsigned int, intmax_t, SIGN_TO_UNSIGN_NARROW);
+  TEST_NUMERIC_CONVERSION(unsigned int, float, SIGN_TO_UNSIGN_NARROW);
+  TEST_NUMERIC_CONVERSION(unsigned int, double, SIGN_TO_UNSIGN_NARROW);
+
+  TEST_NUMERIC_CONVERSION(int, unsigned int, UNSIGN_TO_SIGN_NARROW_OR_EQUAL);
+  TEST_NUMERIC_CONVERSION(int, uintmax_t, UNSIGN_TO_SIGN_NARROW_OR_EQUAL);
+}
+
+TEST(SafeNumerics, IntMaxOperations) {
+  TEST_NUMERIC_CONVERSION(intmax_t, intmax_t, SIGN_PRESERVING_VALUE_PRESERVING);
+  TEST_NUMERIC_CONVERSION(uintmax_t, uintmax_t,
+                          SIGN_PRESERVING_VALUE_PRESERVING);
+  TEST_NUMERIC_CONVERSION(intmax_t, int, SIGN_PRESERVING_VALUE_PRESERVING);
+  TEST_NUMERIC_CONVERSION(uintmax_t, unsigned int,
+                          SIGN_PRESERVING_VALUE_PRESERVING);
+  TEST_NUMERIC_CONVERSION(intmax_t, unsigned int,
+                          SIGN_PRESERVING_VALUE_PRESERVING);
+  TEST_NUMERIC_CONVERSION(intmax_t, uint8_t, SIGN_PRESERVING_VALUE_PRESERVING);
+
+  TEST_NUMERIC_CONVERSION(intmax_t, float, SIGN_PRESERVING_NARROW);
+  TEST_NUMERIC_CONVERSION(intmax_t, double, SIGN_PRESERVING_NARROW);
+
+  TEST_NUMERIC_CONVERSION(uintmax_t, int, SIGN_TO_UNSIGN_WIDEN_OR_EQUAL);
+  TEST_NUMERIC_CONVERSION(uintmax_t, int8_t, SIGN_TO_UNSIGN_WIDEN_OR_EQUAL);
+
+  TEST_NUMERIC_CONVERSION(uintmax_t, float, SIGN_TO_UNSIGN_NARROW);
+  TEST_NUMERIC_CONVERSION(uintmax_t, double, SIGN_TO_UNSIGN_NARROW);
+
+  TEST_NUMERIC_CONVERSION(intmax_t, uintmax_t, UNSIGN_TO_SIGN_NARROW_OR_EQUAL);
+}
+
+TEST(SafeNumerics, FloatOperations) {
+  TEST_NUMERIC_CONVERSION(float, intmax_t, SIGN_PRESERVING_VALUE_PRESERVING);
+  TEST_NUMERIC_CONVERSION(float, uintmax_t, SIGN_PRESERVING_VALUE_PRESERVING);
+  TEST_NUMERIC_CONVERSION(float, int, SIGN_PRESERVING_VALUE_PRESERVING);
+  TEST_NUMERIC_CONVERSION(float, unsigned int,
+                          SIGN_PRESERVING_VALUE_PRESERVING);
+
+  TEST_NUMERIC_CONVERSION(float, double, SIGN_PRESERVING_NARROW);
+}
+
+TEST(SafeNumerics, DoubleOperations) {
+  TEST_NUMERIC_CONVERSION(double, intmax_t, SIGN_PRESERVING_VALUE_PRESERVING);
+  TEST_NUMERIC_CONVERSION(double, uintmax_t, SIGN_PRESERVING_VALUE_PRESERVING);
+  TEST_NUMERIC_CONVERSION(double, int, SIGN_PRESERVING_VALUE_PRESERVING);
+  TEST_NUMERIC_CONVERSION(double, unsigned int,
+                          SIGN_PRESERVING_VALUE_PRESERVING);
+}
+
+TEST(SafeNumerics, SizeTOperations) {
+  TEST_NUMERIC_CONVERSION(size_t, int, SIGN_TO_UNSIGN_WIDEN_OR_EQUAL);
+  TEST_NUMERIC_CONVERSION(int, size_t, UNSIGN_TO_SIGN_NARROW_OR_EQUAL);
+}
+
+// A one-off test to ensure StrictNumeric won't resolve to an incorrect type.
+// If this fails we'll just get a compiler error on an ambiguous overload.
+int TestOverload(int) {  // Overload fails.
+  return 0;
+}
+uint8_t TestOverload(uint8_t) {  // Overload fails.
+  return 0;
+}
+size_t TestOverload(size_t) {  // Overload succeeds.
+  return 0;
+}
+
+static_assert(
+    std::is_same<decltype(TestOverload(StrictNumeric<int>())), int>::value,
+    "");
+static_assert(std::is_same<decltype(TestOverload(StrictNumeric<size_t>())),
+                           size_t>::value,
+              "");
+
+template <typename T>
+struct CastTest1 {
+  static constexpr T NaN() { return -1; }
+  static constexpr T max() { return numeric_limits<T>::max() - 1; }
+  static constexpr T Overflow() { return max(); }
+  static constexpr T lowest() { return numeric_limits<T>::lowest() + 1; }
+  static constexpr T Underflow() { return lowest(); }
+};
+
+template <typename T>
+struct CastTest2 {
+  static constexpr T NaN() { return 11; }
+  static constexpr T max() { return 10; }
+  static constexpr T Overflow() { return max(); }
+  static constexpr T lowest() { return 1; }
+  static constexpr T Underflow() { return lowest(); }
+};
+
+TEST(SafeNumerics, CastTests) {
+// MSVC catches and warns that we're forcing saturation in these tests.
+// Since that's intentional, we need to shut this warning off.
+#if defined(COMPILER_MSVC)
+#pragma warning(disable : 4756)
+#endif
+
+  int small_positive = 1;
+  int small_negative = -1;
+  double double_small = 1.0;
+  double double_large = numeric_limits<double>::max();
+  double double_infinity = numeric_limits<float>::infinity();
+  double double_large_int = numeric_limits<int>::max();
+  double double_small_int = numeric_limits<int>::lowest();
+
+  // Just test that the casts compile, since the other tests cover logic.
+  EXPECT_EQ(0, checked_cast<int>(static_cast<size_t>(0)));
+  EXPECT_EQ(0, strict_cast<int>(static_cast<char>(0)));
+  EXPECT_EQ(0, strict_cast<int>(static_cast<unsigned char>(0)));
+  EXPECT_EQ(0U, strict_cast<unsigned>(static_cast<unsigned char>(0)));
+  EXPECT_EQ(1ULL, static_cast<uint64_t>(StrictNumeric<size_t>(1U)));
+  EXPECT_EQ(1ULL, static_cast<uint64_t>(SizeT(1U)));
+  EXPECT_EQ(1U, static_cast<size_t>(StrictNumeric<unsigned>(1U)));
+
+  EXPECT_TRUE(CheckedNumeric<uint64_t>(StrictNumeric<unsigned>(1U)).IsValid());
+  EXPECT_TRUE(CheckedNumeric<int>(StrictNumeric<unsigned>(1U)).IsValid());
+  EXPECT_FALSE(CheckedNumeric<unsigned>(StrictNumeric<int>(-1)).IsValid());
+
+  EXPECT_TRUE(IsValueNegative(-1));
+  EXPECT_TRUE(IsValueNegative(numeric_limits<int>::lowest()));
+  EXPECT_FALSE(IsValueNegative(numeric_limits<unsigned>::lowest()));
+  EXPECT_TRUE(IsValueNegative(numeric_limits<double>::lowest()));
+  EXPECT_FALSE(IsValueNegative(0));
+  EXPECT_FALSE(IsValueNegative(1));
+  EXPECT_FALSE(IsValueNegative(0u));
+  EXPECT_FALSE(IsValueNegative(1u));
+  EXPECT_FALSE(IsValueNegative(numeric_limits<int>::max()));
+  EXPECT_FALSE(IsValueNegative(numeric_limits<unsigned>::max()));
+  EXPECT_FALSE(IsValueNegative(numeric_limits<double>::max()));
+
+  // These casts and coercions will fail to compile:
+  // EXPECT_EQ(0, strict_cast<int>(static_cast<size_t>(0)));
+  // EXPECT_EQ(0, strict_cast<size_t>(static_cast<int>(0)));
+  // EXPECT_EQ(1ULL, StrictNumeric<size_t>(1));
+  // EXPECT_EQ(1, StrictNumeric<size_t>(1U));
+
+  // Test various saturation corner cases.
+  EXPECT_EQ(saturated_cast<int>(small_negative),
+            static_cast<int>(small_negative));
+  EXPECT_EQ(saturated_cast<int>(small_positive),
+            static_cast<int>(small_positive));
+  EXPECT_EQ(saturated_cast<unsigned>(small_negative), static_cast<unsigned>(0));
+  EXPECT_EQ(saturated_cast<int>(double_small), static_cast<int>(double_small));
+  EXPECT_EQ(saturated_cast<int>(double_large), numeric_limits<int>::max());
+  EXPECT_EQ(saturated_cast<float>(double_large), double_infinity);
+  EXPECT_EQ(saturated_cast<float>(-double_large), -double_infinity);
+  EXPECT_EQ(numeric_limits<int>::lowest(),
+            saturated_cast<int>(double_small_int));
+  EXPECT_EQ(numeric_limits<int>::max(), saturated_cast<int>(double_large_int));
+
+  // Test the saturated cast overrides.
+  using FloatLimits = numeric_limits<float>;
+  using IntLimits = numeric_limits<int>;
+  EXPECT_EQ(-1, (saturated_cast<int, CastTest1>(FloatLimits::quiet_NaN())));
+  EXPECT_EQ(CastTest1<int>::max(),
+            (saturated_cast<int, CastTest1>(FloatLimits::infinity())));
+  EXPECT_EQ(CastTest1<int>::max(),
+            (saturated_cast<int, CastTest1>(FloatLimits::max())));
+  EXPECT_EQ(CastTest1<int>::max(),
+            (saturated_cast<int, CastTest1>(float(IntLimits::max()))));
+  EXPECT_EQ(CastTest1<int>::lowest(),
+            (saturated_cast<int, CastTest1>(-FloatLimits::infinity())));
+  EXPECT_EQ(CastTest1<int>::lowest(),
+            (saturated_cast<int, CastTest1>(FloatLimits::lowest())));
+  EXPECT_EQ(0, (saturated_cast<int, CastTest1>(0.0)));
+  EXPECT_EQ(1, (saturated_cast<int, CastTest1>(1.0)));
+  EXPECT_EQ(-1, (saturated_cast<int, CastTest1>(-1.0)));
+  EXPECT_EQ(0, (saturated_cast<int, CastTest1>(0)));
+  EXPECT_EQ(1, (saturated_cast<int, CastTest1>(1)));
+  EXPECT_EQ(-1, (saturated_cast<int, CastTest1>(-1)));
+  EXPECT_EQ(CastTest1<int>::lowest(),
+            (saturated_cast<int, CastTest1>(float(IntLimits::lowest()))));
+  EXPECT_EQ(11, (saturated_cast<int, CastTest2>(FloatLimits::quiet_NaN())));
+  EXPECT_EQ(10, (saturated_cast<int, CastTest2>(FloatLimits::infinity())));
+  EXPECT_EQ(10, (saturated_cast<int, CastTest2>(FloatLimits::max())));
+  EXPECT_EQ(1, (saturated_cast<int, CastTest2>(-FloatLimits::infinity())));
+  EXPECT_EQ(1, (saturated_cast<int, CastTest2>(FloatLimits::lowest())));
+  EXPECT_EQ(1, (saturated_cast<int, CastTest2>(0U)));
+
+  float not_a_number = std::numeric_limits<float>::infinity() -
+                       std::numeric_limits<float>::infinity();
+  EXPECT_TRUE(std::isnan(not_a_number));
+  EXPECT_EQ(0, saturated_cast<int>(not_a_number));
+
+  // Test the CheckedNumeric value extractions functions.
+  auto int8_min = MakeCheckedNum(numeric_limits<int8_t>::lowest());
+  auto int8_max = MakeCheckedNum(numeric_limits<int8_t>::max());
+  auto double_max = MakeCheckedNum(numeric_limits<double>::max());
+  static_assert(
+      std::is_same<int16_t,
+                   decltype(int8_min.ValueOrDie<int16_t>())::type>::value,
+      "ValueOrDie returning incorrect type.");
+  static_assert(
+      std::is_same<int16_t,
+                   decltype(int8_min.ValueOrDefault<int16_t>(0))::type>::value,
+      "ValueOrDefault returning incorrect type.");
+  EXPECT_FALSE(IsValidForType<uint8_t>(int8_min));
+  EXPECT_TRUE(IsValidForType<uint8_t>(int8_max));
+  EXPECT_EQ(static_cast<int>(numeric_limits<int8_t>::lowest()),
+            ValueOrDieForType<int>(int8_min));
+  EXPECT_TRUE(IsValidForType<uint32_t>(int8_max));
+  EXPECT_EQ(static_cast<int>(numeric_limits<int8_t>::max()),
+            ValueOrDieForType<int>(int8_max));
+  EXPECT_EQ(0, ValueOrDefaultForType<int>(double_max, 0));
+  uint8_t uint8_dest = 0;
+  int16_t int16_dest = 0;
+  double double_dest = 0;
+  EXPECT_TRUE(int8_max.AssignIfValid(&uint8_dest));
+  EXPECT_EQ(static_cast<uint8_t>(numeric_limits<int8_t>::max()), uint8_dest);
+  EXPECT_FALSE(int8_min.AssignIfValid(&uint8_dest));
+  EXPECT_TRUE(int8_max.AssignIfValid(&int16_dest));
+  EXPECT_EQ(static_cast<int16_t>(numeric_limits<int8_t>::max()), int16_dest);
+  EXPECT_TRUE(int8_min.AssignIfValid(&int16_dest));
+  EXPECT_EQ(static_cast<int16_t>(numeric_limits<int8_t>::lowest()), int16_dest);
+  EXPECT_FALSE(double_max.AssignIfValid(&uint8_dest));
+  EXPECT_FALSE(double_max.AssignIfValid(&int16_dest));
+  EXPECT_TRUE(double_max.AssignIfValid(&double_dest));
+  EXPECT_EQ(numeric_limits<double>::max(), double_dest);
+  EXPECT_EQ(1, checked_cast<int>(StrictNumeric<int>(1)));
+  EXPECT_EQ(1, saturated_cast<int>(StrictNumeric<int>(1)));
+  EXPECT_EQ(1, strict_cast<int>(StrictNumeric<int>(1)));
+
+  enum class EnumTest { kOne = 1 };
+  EXPECT_EQ(1, checked_cast<int>(EnumTest::kOne));
+  EXPECT_EQ(1, saturated_cast<int>(EnumTest::kOne));
+  EXPECT_EQ(1, strict_cast<int>(EnumTest::kOne));
+}
+
+TEST(SafeNumerics, IsValueInRangeForNumericType) {
+  EXPECT_TRUE(IsValueInRangeForNumericType<uint32_t>(0));
+  EXPECT_TRUE(IsValueInRangeForNumericType<uint32_t>(1));
+  EXPECT_TRUE(IsValueInRangeForNumericType<uint32_t>(2));
+  EXPECT_FALSE(IsValueInRangeForNumericType<uint32_t>(-1));
+  EXPECT_TRUE(IsValueInRangeForNumericType<uint32_t>(0xffffffffu));
+  EXPECT_TRUE(IsValueInRangeForNumericType<uint32_t>(UINT64_C(0xffffffff)));
+  EXPECT_FALSE(IsValueInRangeForNumericType<uint32_t>(UINT64_C(0x100000000)));
+  EXPECT_FALSE(IsValueInRangeForNumericType<uint32_t>(UINT64_C(0x100000001)));
+  EXPECT_FALSE(IsValueInRangeForNumericType<uint32_t>(
+      std::numeric_limits<int32_t>::lowest()));
+  EXPECT_FALSE(IsValueInRangeForNumericType<uint32_t>(
+      std::numeric_limits<int64_t>::lowest()));
+
+  EXPECT_TRUE(IsValueInRangeForNumericType<int32_t>(0));
+  EXPECT_TRUE(IsValueInRangeForNumericType<int32_t>(1));
+  EXPECT_TRUE(IsValueInRangeForNumericType<int32_t>(2));
+  EXPECT_TRUE(IsValueInRangeForNumericType<int32_t>(-1));
+  EXPECT_TRUE(IsValueInRangeForNumericType<int32_t>(0x7fffffff));
+  EXPECT_TRUE(IsValueInRangeForNumericType<int32_t>(0x7fffffffu));
+  EXPECT_FALSE(IsValueInRangeForNumericType<int32_t>(0x80000000u));
+  EXPECT_FALSE(IsValueInRangeForNumericType<int32_t>(0xffffffffu));
+  EXPECT_FALSE(IsValueInRangeForNumericType<int32_t>(INT64_C(0x80000000)));
+  EXPECT_FALSE(IsValueInRangeForNumericType<int32_t>(INT64_C(0xffffffff)));
+  EXPECT_FALSE(IsValueInRangeForNumericType<int32_t>(INT64_C(0x100000000)));
+  EXPECT_TRUE(IsValueInRangeForNumericType<int32_t>(
+      std::numeric_limits<int32_t>::lowest()));
+  EXPECT_TRUE(IsValueInRangeForNumericType<int32_t>(
+      static_cast<int64_t>(std::numeric_limits<int32_t>::lowest())));
+  EXPECT_FALSE(IsValueInRangeForNumericType<int32_t>(
+      static_cast<int64_t>(std::numeric_limits<int32_t>::lowest()) - 1));
+  EXPECT_FALSE(IsValueInRangeForNumericType<int32_t>(
+      std::numeric_limits<int64_t>::lowest()));
+
+  EXPECT_TRUE(IsValueInRangeForNumericType<uint64_t>(0));
+  EXPECT_TRUE(IsValueInRangeForNumericType<uint64_t>(1));
+  EXPECT_TRUE(IsValueInRangeForNumericType<uint64_t>(2));
+  EXPECT_FALSE(IsValueInRangeForNumericType<uint64_t>(-1));
+  EXPECT_TRUE(IsValueInRangeForNumericType<uint64_t>(0xffffffffu));
+  EXPECT_TRUE(IsValueInRangeForNumericType<uint64_t>(UINT64_C(0xffffffff)));
+  EXPECT_TRUE(IsValueInRangeForNumericType<uint64_t>(UINT64_C(0x100000000)));
+  EXPECT_TRUE(IsValueInRangeForNumericType<uint64_t>(UINT64_C(0x100000001)));
+  EXPECT_FALSE(IsValueInRangeForNumericType<uint64_t>(
+      std::numeric_limits<int32_t>::lowest()));
+  EXPECT_FALSE(IsValueInRangeForNumericType<uint64_t>(INT64_C(-1)));
+  EXPECT_FALSE(IsValueInRangeForNumericType<uint64_t>(
+      std::numeric_limits<int64_t>::lowest()));
+
+  EXPECT_TRUE(IsValueInRangeForNumericType<int64_t>(0));
+  EXPECT_TRUE(IsValueInRangeForNumericType<int64_t>(1));
+  EXPECT_TRUE(IsValueInRangeForNumericType<int64_t>(2));
+  EXPECT_TRUE(IsValueInRangeForNumericType<int64_t>(-1));
+  EXPECT_TRUE(IsValueInRangeForNumericType<int64_t>(0x7fffffff));
+  EXPECT_TRUE(IsValueInRangeForNumericType<int64_t>(0x7fffffffu));
+  EXPECT_TRUE(IsValueInRangeForNumericType<int64_t>(0x80000000u));
+  EXPECT_TRUE(IsValueInRangeForNumericType<int64_t>(0xffffffffu));
+  EXPECT_TRUE(IsValueInRangeForNumericType<int64_t>(INT64_C(0x80000000)));
+  EXPECT_TRUE(IsValueInRangeForNumericType<int64_t>(INT64_C(0xffffffff)));
+  EXPECT_TRUE(IsValueInRangeForNumericType<int64_t>(INT64_C(0x100000000)));
+  EXPECT_TRUE(
+      IsValueInRangeForNumericType<int64_t>(INT64_C(0x7fffffffffffffff)));
+  EXPECT_TRUE(
+      IsValueInRangeForNumericType<int64_t>(UINT64_C(0x7fffffffffffffff)));
+  EXPECT_FALSE(
+      IsValueInRangeForNumericType<int64_t>(UINT64_C(0x8000000000000000)));
+  EXPECT_FALSE(
+      IsValueInRangeForNumericType<int64_t>(UINT64_C(0xffffffffffffffff)));
+  EXPECT_TRUE(IsValueInRangeForNumericType<int64_t>(
+      std::numeric_limits<int32_t>::lowest()));
+  EXPECT_TRUE(IsValueInRangeForNumericType<int64_t>(
+      static_cast<int64_t>(std::numeric_limits<int32_t>::lowest())));
+  EXPECT_TRUE(IsValueInRangeForNumericType<int64_t>(
+      std::numeric_limits<int64_t>::lowest()));
+}
+
+TEST(SafeNumerics, CompoundNumericOperations) {
+  CheckedNumeric<int> a = 1;
+  CheckedNumeric<int> b = 2;
+  CheckedNumeric<int> c = 3;
+  CheckedNumeric<int> d = 4;
+  a += b;
+  EXPECT_EQ(3, a.ValueOrDie());
+  a -= c;
+  EXPECT_EQ(0, a.ValueOrDie());
+  d /= b;
+  EXPECT_EQ(2, d.ValueOrDie());
+  d *= d;
+  EXPECT_EQ(4, d.ValueOrDie());
+
+  CheckedNumeric<int> too_large = std::numeric_limits<int>::max();
+  EXPECT_TRUE(too_large.IsValid());
+  too_large += d;
+  EXPECT_FALSE(too_large.IsValid());
+  too_large -= d;
+  EXPECT_FALSE(too_large.IsValid());
+  too_large /= d;
+  EXPECT_FALSE(too_large.IsValid());
+}
+
+TEST(SafeNumerics, VariadicNumericOperations) {
+  {  // Synthetic scope to avoid variable naming collisions.
+    auto a = CheckAdd(1, 2UL, MakeCheckedNum(3LL), 4).ValueOrDie();
+    EXPECT_EQ(static_cast<decltype(a)::type>(10), a);
+    auto b = CheckSub(MakeCheckedNum(20.0), 2UL, 4).ValueOrDie();
+    EXPECT_EQ(static_cast<decltype(b)::type>(14.0), b);
+    auto c = CheckMul(20.0, MakeCheckedNum(1), 5, 3UL).ValueOrDie();
+    EXPECT_EQ(static_cast<decltype(c)::type>(300.0), c);
+    auto d = CheckDiv(20.0, 2.0, MakeCheckedNum(5LL), -4).ValueOrDie();
+    EXPECT_EQ(static_cast<decltype(d)::type>(-.5), d);
+    auto e = CheckMod(MakeCheckedNum(20), 3).ValueOrDie();
+    EXPECT_EQ(static_cast<decltype(e)::type>(2), e);
+    auto f = CheckLsh(1, MakeCheckedNum(2)).ValueOrDie();
+    EXPECT_EQ(static_cast<decltype(f)::type>(4), f);
+    auto g = CheckRsh(4, MakeCheckedNum(2)).ValueOrDie();
+    EXPECT_EQ(static_cast<decltype(g)::type>(1), g);
+    auto h = CheckRsh(CheckAdd(1, 1, 1, 1), CheckSub(4, 2)).ValueOrDie();
+    EXPECT_EQ(static_cast<decltype(h)::type>(1), h);
+  }
+
+  {
+    auto a = ClampAdd(1, 2UL, MakeClampedNum(3LL), 4);
+    EXPECT_EQ(static_cast<decltype(a)::type>(10), a);
+    auto b = ClampSub(MakeClampedNum(20.0), 2UL, 4);
+    EXPECT_EQ(static_cast<decltype(b)::type>(14.0), b);
+    auto c = ClampMul(20.0, MakeClampedNum(1), 5, 3UL);
+    EXPECT_EQ(static_cast<decltype(c)::type>(300.0), c);
+    auto d = ClampDiv(20.0, 2.0, MakeClampedNum(5LL), -4);
+    EXPECT_EQ(static_cast<decltype(d)::type>(-.5), d);
+    auto e = ClampMod(MakeClampedNum(20), 3);
+    EXPECT_EQ(static_cast<decltype(e)::type>(2), e);
+    auto f = ClampLsh(1, MakeClampedNum(2U));
+    EXPECT_EQ(static_cast<decltype(f)::type>(4), f);
+    auto g = ClampRsh(4, MakeClampedNum(2U));
+    EXPECT_EQ(static_cast<decltype(g)::type>(1), g);
+    auto h = ClampRsh(ClampAdd(1, 1, 1, 1), ClampSub(4U, 2));
+    EXPECT_EQ(static_cast<decltype(h)::type>(1), h);
+  }
+}
+
+#if defined(__clang__)
+#pragma clang diagnostic pop  // -Winteger-overflow
+#endif
+
+}  // namespace internal
+}  // namespace base
diff --git a/base/sampling_heap_profiler/OWNERS b/base/sampling_heap_profiler/OWNERS
new file mode 100644
index 0000000..87c9661
--- /dev/null
+++ b/base/sampling_heap_profiler/OWNERS
@@ -0,0 +1 @@
+alph@chromium.org
diff --git a/base/sampling_heap_profiler/sampling_heap_profiler.cc b/base/sampling_heap_profiler/sampling_heap_profiler.cc
new file mode 100644
index 0000000..3d7424b
--- /dev/null
+++ b/base/sampling_heap_profiler/sampling_heap_profiler.cc
@@ -0,0 +1,452 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/sampling_heap_profiler/sampling_heap_profiler.h"
+
+#include <algorithm>
+#include <cmath>
+#include <utility>
+
+#include "base/allocator/allocator_shim.h"
+#include "base/allocator/buildflags.h"
+#include "base/allocator/partition_allocator/partition_alloc.h"
+#include "base/atomicops.h"
+#include "base/debug/stack_trace.h"
+#include "base/macros.h"
+#include "base/no_destructor.h"
+#include "base/partition_alloc_buildflags.h"
+#include "base/rand_util.h"
+#include "base/threading/thread_local_storage.h"
+#include "build/build_config.h"
+
+namespace base {
+
+using base::allocator::AllocatorDispatch;
+using base::subtle::Atomic32;
+using base::subtle::AtomicWord;
+
+namespace {
+
+// Control how many top frames to skip when recording call stack.
+// These frames correspond to the profiler own frames.
+const uint32_t kSkipBaseAllocatorFrames = 2;
+
+const size_t kDefaultSamplingIntervalBytes = 128 * 1024;
+
+// Controls if sample intervals should not be randomized. Used for testing.
+bool g_deterministic;
+
+// A positive value if profiling is running, otherwise it's zero.
+Atomic32 g_running;
+
+// Number of lock-free safe (not causing rehashing) accesses to samples_ map
+// currently being performed.
+Atomic32 g_operations_in_flight;
+
+// Controls if new incoming lock-free accesses are allowed.
+// When set to true, threads should not enter lock-free paths.
+Atomic32 g_fast_path_is_closed;
+
+// Sampling interval parameter, the mean value for intervals between samples.
+AtomicWord g_sampling_interval = kDefaultSamplingIntervalBytes;
+
+// Last generated sample ordinal number.
+uint32_t g_last_sample_ordinal = 0;
+
+void (*g_hooks_install_callback)();
+Atomic32 g_hooks_installed;
+
+void* AllocFn(const AllocatorDispatch* self, size_t size, void* context) {
+  void* address = self->next->alloc_function(self->next, size, context);
+  SamplingHeapProfiler::RecordAlloc(address, size, kSkipBaseAllocatorFrames);
+  return address;
+}
+
+void* AllocZeroInitializedFn(const AllocatorDispatch* self,
+                             size_t n,
+                             size_t size,
+                             void* context) {
+  void* address =
+      self->next->alloc_zero_initialized_function(self->next, n, size, context);
+  SamplingHeapProfiler::RecordAlloc(address, n * size,
+                                    kSkipBaseAllocatorFrames);
+  return address;
+}
+
+void* AllocAlignedFn(const AllocatorDispatch* self,
+                     size_t alignment,
+                     size_t size,
+                     void* context) {
+  void* address =
+      self->next->alloc_aligned_function(self->next, alignment, size, context);
+  SamplingHeapProfiler::RecordAlloc(address, size, kSkipBaseAllocatorFrames);
+  return address;
+}
+
+void* ReallocFn(const AllocatorDispatch* self,
+                void* address,
+                size_t size,
+                void* context) {
+  // Note: size == 0 actually performs free.
+  SamplingHeapProfiler::RecordFree(address);
+  address = self->next->realloc_function(self->next, address, size, context);
+  SamplingHeapProfiler::RecordAlloc(address, size, kSkipBaseAllocatorFrames);
+  return address;
+}
+
+void FreeFn(const AllocatorDispatch* self, void* address, void* context) {
+  SamplingHeapProfiler::RecordFree(address);
+  self->next->free_function(self->next, address, context);
+}
+
+size_t GetSizeEstimateFn(const AllocatorDispatch* self,
+                         void* address,
+                         void* context) {
+  return self->next->get_size_estimate_function(self->next, address, context);
+}
+
+unsigned BatchMallocFn(const AllocatorDispatch* self,
+                       size_t size,
+                       void** results,
+                       unsigned num_requested,
+                       void* context) {
+  unsigned num_allocated = self->next->batch_malloc_function(
+      self->next, size, results, num_requested, context);
+  for (unsigned i = 0; i < num_allocated; ++i) {
+    SamplingHeapProfiler::RecordAlloc(results[i], size,
+                                      kSkipBaseAllocatorFrames);
+  }
+  return num_allocated;
+}
+
+void BatchFreeFn(const AllocatorDispatch* self,
+                 void** to_be_freed,
+                 unsigned num_to_be_freed,
+                 void* context) {
+  for (unsigned i = 0; i < num_to_be_freed; ++i)
+    SamplingHeapProfiler::RecordFree(to_be_freed[i]);
+  self->next->batch_free_function(self->next, to_be_freed, num_to_be_freed,
+                                  context);
+}
+
+void FreeDefiniteSizeFn(const AllocatorDispatch* self,
+                        void* address,
+                        size_t size,
+                        void* context) {
+  SamplingHeapProfiler::RecordFree(address);
+  self->next->free_definite_size_function(self->next, address, size, context);
+}
+
+AllocatorDispatch g_allocator_dispatch = {&AllocFn,
+                                          &AllocZeroInitializedFn,
+                                          &AllocAlignedFn,
+                                          &ReallocFn,
+                                          &FreeFn,
+                                          &GetSizeEstimateFn,
+                                          &BatchMallocFn,
+                                          &BatchFreeFn,
+                                          &FreeDefiniteSizeFn,
+                                          nullptr};
+
+#if BUILDFLAG(USE_PARTITION_ALLOC) && !defined(OS_NACL)
+
+void PartitionAllocHook(void* address, size_t size, const char*) {
+  SamplingHeapProfiler::RecordAlloc(address, size);
+}
+
+void PartitionFreeHook(void* address) {
+  SamplingHeapProfiler::RecordFree(address);
+}
+
+#endif  // BUILDFLAG(USE_PARTITION_ALLOC) && !defined(OS_NACL)
+
+ThreadLocalStorage::Slot& AccumulatedBytesTLS() {
+  static base::NoDestructor<base::ThreadLocalStorage::Slot>
+      accumulated_bytes_tls;
+  return *accumulated_bytes_tls;
+}
+
+}  // namespace
+
+SamplingHeapProfiler::Sample::Sample(size_t size,
+                                     size_t total,
+                                     uint32_t ordinal)
+    : size(size), total(total), ordinal(ordinal) {}
+
+SamplingHeapProfiler::Sample::Sample(const Sample&) = default;
+
+SamplingHeapProfiler::Sample::~Sample() = default;
+
+SamplingHeapProfiler* SamplingHeapProfiler::instance_;
+
+SamplingHeapProfiler::SamplingHeapProfiler() {
+  instance_ = this;
+}
+
+// static
+void SamplingHeapProfiler::InitTLSSlot() {
+  // Preallocate the TLS slot early, so it can't cause reentracy issues
+  // when sampling is started.
+  ignore_result(AccumulatedBytesTLS().Get());
+}
+
+// static
+void SamplingHeapProfiler::InstallAllocatorHooksOnce() {
+  static bool hook_installed = InstallAllocatorHooks();
+  ignore_result(hook_installed);
+}
+
+// static
+bool SamplingHeapProfiler::InstallAllocatorHooks() {
+#if BUILDFLAG(USE_ALLOCATOR_SHIM)
+  base::allocator::InsertAllocatorDispatch(&g_allocator_dispatch);
+#else
+  ignore_result(g_allocator_dispatch);
+  DLOG(WARNING)
+      << "base::allocator shims are not available for memory sampling.";
+#endif  // BUILDFLAG(USE_ALLOCATOR_SHIM)
+
+#if BUILDFLAG(USE_PARTITION_ALLOC) && !defined(OS_NACL)
+  base::PartitionAllocHooks::SetAllocationHook(&PartitionAllocHook);
+  base::PartitionAllocHooks::SetFreeHook(&PartitionFreeHook);
+#endif  // BUILDFLAG(USE_PARTITION_ALLOC) && !defined(OS_NACL)
+
+  int32_t hooks_install_callback_has_been_set =
+      base::subtle::Acquire_CompareAndSwap(&g_hooks_installed, 0, 1);
+  if (hooks_install_callback_has_been_set)
+    g_hooks_install_callback();
+
+  return true;
+}
+
+// static
+void SamplingHeapProfiler::SetHooksInstallCallback(
+    void (*hooks_install_callback)()) {
+  CHECK(!g_hooks_install_callback && hooks_install_callback);
+  g_hooks_install_callback = hooks_install_callback;
+
+  int32_t profiler_has_already_been_initialized =
+      base::subtle::Release_CompareAndSwap(&g_hooks_installed, 0, 1);
+  if (profiler_has_already_been_initialized)
+    g_hooks_install_callback();
+}
+
+uint32_t SamplingHeapProfiler::Start() {
+  InstallAllocatorHooksOnce();
+  base::subtle::Barrier_AtomicIncrement(&g_running, 1);
+  return g_last_sample_ordinal;
+}
+
+void SamplingHeapProfiler::Stop() {
+  AtomicWord count = base::subtle::Barrier_AtomicIncrement(&g_running, -1);
+  CHECK_GE(count, 0);
+}
+
+void SamplingHeapProfiler::SetSamplingInterval(size_t sampling_interval) {
+  // TODO(alph): Reset the sample being collected if running.
+  base::subtle::Release_Store(&g_sampling_interval,
+                              static_cast<AtomicWord>(sampling_interval));
+}
+
+// static
+size_t SamplingHeapProfiler::GetNextSampleInterval(size_t interval) {
+  if (UNLIKELY(g_deterministic))
+    return interval;
+
+  // We sample with a Poisson process, with constant average sampling
+  // interval. This follows the exponential probability distribution with
+  // parameter λ = 1/interval where |interval| is the average number of bytes
+  // between samples.
+  // Let u be a uniformly distributed random number between 0 and 1, then
+  // next_sample = -ln(u) / λ
+  double uniform = base::RandDouble();
+  double value = -log(uniform) * interval;
+  size_t min_value = sizeof(intptr_t);
+  // We limit the upper bound of a sample interval to make sure we don't have
+  // huge gaps in the sampling stream. Probability of the upper bound gets hit
+  // is exp(-20) ~ 2e-9, so it should not skew the distibution.
+  size_t max_value = interval * 20;
+  if (UNLIKELY(value < min_value))
+    return min_value;
+  if (UNLIKELY(value > max_value))
+    return max_value;
+  return static_cast<size_t>(value);
+}
+
+// static
+void SamplingHeapProfiler::RecordAlloc(void* address,
+                                       size_t size,
+                                       uint32_t skip_frames) {
+  if (UNLIKELY(!base::subtle::NoBarrier_Load(&g_running)))
+    return;
+  if (UNLIKELY(base::ThreadLocalStorage::HasBeenDestroyed()))
+    return;
+
+  // TODO(alph): On MacOS it may call the hook several times for a single
+  // allocation. Handle the case.
+
+  intptr_t accumulated_bytes =
+      reinterpret_cast<intptr_t>(AccumulatedBytesTLS().Get());
+  accumulated_bytes += size;
+  if (LIKELY(accumulated_bytes < 0)) {
+    AccumulatedBytesTLS().Set(reinterpret_cast<void*>(accumulated_bytes));
+    return;
+  }
+
+  size_t mean_interval = base::subtle::NoBarrier_Load(&g_sampling_interval);
+  size_t samples = accumulated_bytes / mean_interval;
+  accumulated_bytes %= mean_interval;
+
+  do {
+    accumulated_bytes -= GetNextSampleInterval(mean_interval);
+    ++samples;
+  } while (accumulated_bytes >= 0);
+
+  AccumulatedBytesTLS().Set(reinterpret_cast<void*>(accumulated_bytes));
+
+  instance_->DoRecordAlloc(samples * mean_interval, size, address, skip_frames);
+}
+
+void SamplingHeapProfiler::RecordStackTrace(Sample* sample,
+                                            uint32_t skip_frames) {
+#if !defined(OS_NACL)
+  // TODO(alph): Consider using debug::TraceStackFramePointers. It should be
+  // somewhat faster than base::debug::StackTrace.
+  base::debug::StackTrace trace;
+  size_t count;
+  void* const* addresses = const_cast<void* const*>(trace.Addresses(&count));
+  const uint32_t kSkipProfilerOwnFrames = 2;
+  skip_frames += kSkipProfilerOwnFrames;
+  sample->stack.insert(
+      sample->stack.end(), &addresses[skip_frames],
+      &addresses[std::max(count, static_cast<size_t>(skip_frames))]);
+#endif
+}
+
+void SamplingHeapProfiler::DoRecordAlloc(size_t total_allocated,
+                                         size_t size,
+                                         void* address,
+                                         uint32_t skip_frames) {
+  if (entered_.Get())
+    return;
+  entered_.Set(true);
+  {
+    base::AutoLock lock(mutex_);
+
+    Sample sample(size, total_allocated, ++g_last_sample_ordinal);
+    RecordStackTrace(&sample, skip_frames);
+
+    if (MayRehashOnInsert()) {
+      // Close the fast path as inserting an element into samples_ may cause
+      // rehashing that invalidates iterators affecting all the concurrent
+      // readers.
+      base::subtle::Release_Store(&g_fast_path_is_closed, 1);
+      // Wait until all current readers leave.
+      while (base::subtle::Acquire_Load(&g_operations_in_flight)) {
+        while (base::subtle::NoBarrier_Load(&g_operations_in_flight)) {
+        }
+      }
+      samples_.emplace(address, std::move(sample));
+      // Open the fast path.
+      base::subtle::Release_Store(&g_fast_path_is_closed, 0);
+    } else {
+      samples_.emplace(address, std::move(sample));
+    }
+
+    for (auto* observer : observers_)
+      observer->SampleAdded(sample.ordinal, size, total_allocated);
+  }
+
+  entered_.Set(false);
+}
+
+// static
+void SamplingHeapProfiler::RecordFree(void* address) {
+  bool maybe_sampled = true;  // Pessimistically assume allocation was sampled.
+  base::subtle::Barrier_AtomicIncrement(&g_operations_in_flight, 1);
+  if (LIKELY(!base::subtle::NoBarrier_Load(&g_fast_path_is_closed))) {
+    maybe_sampled =
+        instance_->samples_.find(address) != instance_->samples_.end();
+  }
+  base::subtle::Barrier_AtomicIncrement(&g_operations_in_flight, -1);
+  if (maybe_sampled)
+    instance_->DoRecordFree(address);
+}
+
+void SamplingHeapProfiler::DoRecordFree(void* address) {
+  if (UNLIKELY(base::ThreadLocalStorage::HasBeenDestroyed()))
+    return;
+  if (entered_.Get())
+    return;
+  entered_.Set(true);
+  {
+    base::AutoLock lock(mutex_);
+    auto it = samples_.find(address);
+    if (it != samples_.end()) {
+      for (auto* observer : observers_)
+        observer->SampleRemoved(it->second.ordinal);
+      samples_.erase(it);
+    }
+  }
+  entered_.Set(false);
+}
+
+bool SamplingHeapProfiler::MayRehashOnInsert() {
+  size_t max_items_before_rehash =
+      std::floor(samples_.bucket_count() * samples_.max_load_factor());
+  // Conservatively use 2 instead of 1 to workaround potential rounding errors.
+  return samples_.size() + 2 >= max_items_before_rehash;
+}
+
+// static
+SamplingHeapProfiler* SamplingHeapProfiler::GetInstance() {
+  static base::NoDestructor<SamplingHeapProfiler> instance;
+  return instance.get();
+}
+
+// static
+void SamplingHeapProfiler::SuppressRandomnessForTest(bool suppress) {
+  g_deterministic = suppress;
+}
+
+void SamplingHeapProfiler::AddSamplesObserver(SamplesObserver* observer) {
+  CHECK(!entered_.Get());
+  entered_.Set(true);
+  {
+    base::AutoLock lock(mutex_);
+    observers_.push_back(observer);
+  }
+  entered_.Set(false);
+}
+
+void SamplingHeapProfiler::RemoveSamplesObserver(SamplesObserver* observer) {
+  CHECK(!entered_.Get());
+  entered_.Set(true);
+  {
+    base::AutoLock lock(mutex_);
+    auto it = std::find(observers_.begin(), observers_.end(), observer);
+    CHECK(it != observers_.end());
+    observers_.erase(it);
+  }
+  entered_.Set(false);
+}
+
+std::vector<SamplingHeapProfiler::Sample> SamplingHeapProfiler::GetSamples(
+    uint32_t profile_id) {
+  CHECK(!entered_.Get());
+  entered_.Set(true);
+  std::vector<Sample> samples;
+  {
+    base::AutoLock lock(mutex_);
+    for (auto& it : samples_) {
+      Sample& sample = it.second;
+      if (sample.ordinal > profile_id)
+        samples.push_back(sample);
+    }
+  }
+  entered_.Set(false);
+  return samples;
+}
+
+}  // namespace base
diff --git a/base/sampling_heap_profiler/sampling_heap_profiler.h b/base/sampling_heap_profiler/sampling_heap_profiler.h
new file mode 100644
index 0000000..3f2f227
--- /dev/null
+++ b/base/sampling_heap_profiler/sampling_heap_profiler.h
@@ -0,0 +1,111 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_SAMPLING_HEAP_PROFILER_SAMPLING_HEAP_PROFILER_H
+#define BASE_SAMPLING_HEAP_PROFILER_SAMPLING_HEAP_PROFILER_H
+
+#include <unordered_map>
+#include <vector>
+
+#include "base/base_export.h"
+#include "base/macros.h"
+#include "base/synchronization/lock.h"
+#include "base/threading/thread_local.h"
+
+namespace base {
+
+template <typename T>
+class NoDestructor;
+
+// The class implements sampling profiling of native memory heap.
+// It hooks on base::allocator and base::PartitionAlloc.
+// When started it selects and records allocation samples based on
+// the sampling_interval parameter.
+// The recorded samples can then be retrieved using GetSamples method.
+class BASE_EXPORT SamplingHeapProfiler {
+ public:
+  class BASE_EXPORT Sample {
+   public:
+    Sample(const Sample&);
+    ~Sample();
+
+    size_t size;   // Allocation size.
+    size_t total;  // Total size attributed to the sample.
+    std::vector<void*> stack;
+
+   private:
+    friend class SamplingHeapProfiler;
+
+    Sample(size_t, size_t total, uint32_t ordinal);
+
+    uint32_t ordinal;
+  };
+
+  class SamplesObserver {
+   public:
+    virtual ~SamplesObserver() = default;
+    virtual void SampleAdded(uint32_t id, size_t size, size_t total) = 0;
+    virtual void SampleRemoved(uint32_t id) = 0;
+  };
+
+  // Must be called early during the process initialization. It creates and
+  // reserves a TLS slot.
+  static void InitTLSSlot();
+
+  // This is an entry point for plugging in an external allocator.
+  // Profiler will invoke the provided callback upon initialization.
+  // The callback should install hooks onto the corresponding memory allocator
+  // and make them invoke SamplingHeapProfiler::RecordAlloc and
+  // SamplingHeapProfiler::RecordFree upon corresponding allocation events.
+  //
+  // If the method is called after profiler is initialized, the callback
+  // is invoked right away.
+  static void SetHooksInstallCallback(void (*hooks_install_callback)());
+
+  void AddSamplesObserver(SamplesObserver*);
+  void RemoveSamplesObserver(SamplesObserver*);
+
+  uint32_t Start();
+  void Stop();
+  void SetSamplingInterval(size_t sampling_interval);
+  void SuppressRandomnessForTest(bool suppress);
+
+  std::vector<Sample> GetSamples(uint32_t profile_id);
+
+  static void RecordAlloc(void* address, size_t, uint32_t skip_frames = 0);
+  static void RecordFree(void* address);
+
+  static SamplingHeapProfiler* GetInstance();
+
+ private:
+  SamplingHeapProfiler();
+  ~SamplingHeapProfiler() = delete;
+
+  static void InstallAllocatorHooksOnce();
+  static bool InstallAllocatorHooks();
+  static size_t GetNextSampleInterval(size_t base_interval);
+
+  void DoRecordAlloc(size_t total_allocated,
+                     size_t allocation_size,
+                     void* address,
+                     uint32_t skip_frames);
+  void DoRecordFree(void* address);
+  void RecordStackTrace(Sample*, uint32_t skip_frames);
+  bool MayRehashOnInsert();
+
+  base::ThreadLocalBoolean entered_;
+  base::Lock mutex_;
+  std::unordered_map<void*, Sample> samples_;
+  std::vector<SamplesObserver*> observers_;
+
+  static SamplingHeapProfiler* instance_;
+
+  friend class base::NoDestructor<SamplingHeapProfiler>;
+
+  DISALLOW_COPY_AND_ASSIGN(SamplingHeapProfiler);
+};
+
+}  // namespace base
+
+#endif  // BASE_SAMPLING_HEAP_PROFILER_SAMPLING_HEAP_PROFILER_H
diff --git a/base/sampling_heap_profiler/sampling_heap_profiler_unittest.cc b/base/sampling_heap_profiler/sampling_heap_profiler_unittest.cc
new file mode 100644
index 0000000..6602e6c
--- /dev/null
+++ b/base/sampling_heap_profiler/sampling_heap_profiler_unittest.cc
@@ -0,0 +1,165 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/sampling_heap_profiler/sampling_heap_profiler.h"
+
+#include <stdlib.h>
+#include <cinttypes>
+
+#include "base/allocator/allocator_shim.h"
+#include "base/debug/alias.h"
+#include "base/threading/simple_thread.h"
+#include "build/build_config.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+namespace {
+
+class SamplingHeapProfilerTest : public ::testing::Test {
+#if defined(OS_MACOSX)
+  void SetUp() override { allocator::InitializeAllocatorShim(); }
+#endif
+};
+
+class SamplesCollector : public SamplingHeapProfiler::SamplesObserver {
+ public:
+  explicit SamplesCollector(size_t watch_size) : watch_size_(watch_size) {}
+
+  void SampleAdded(uint32_t id, size_t size, size_t) override {
+    if (sample_added || size != watch_size_)
+      return;
+    sample_id_ = id;
+    sample_added = true;
+  }
+
+  void SampleRemoved(uint32_t id) override {
+    if (id == sample_id_)
+      sample_removed = true;
+  }
+
+  bool sample_added = false;
+  bool sample_removed = false;
+
+ private:
+  size_t watch_size_;
+  uint32_t sample_id_ = 0;
+};
+
+TEST_F(SamplingHeapProfilerTest, CollectSamples) {
+  SamplingHeapProfiler::InitTLSSlot();
+  SamplesCollector collector(10000);
+  SamplingHeapProfiler* profiler = SamplingHeapProfiler::GetInstance();
+  profiler->SuppressRandomnessForTest(true);
+  profiler->SetSamplingInterval(1024);
+  profiler->Start();
+  profiler->AddSamplesObserver(&collector);
+  void* volatile p = malloc(10000);
+  free(p);
+  profiler->Stop();
+  profiler->RemoveSamplesObserver(&collector);
+  CHECK(collector.sample_added);
+  CHECK(collector.sample_removed);
+}
+
+const int kNumberOfAllocations = 10000;
+
+NOINLINE void Allocate1() {
+  void* p = malloc(400);
+  base::debug::Alias(&p);
+}
+
+NOINLINE void Allocate2() {
+  void* p = malloc(700);
+  base::debug::Alias(&p);
+}
+
+NOINLINE void Allocate3() {
+  void* p = malloc(20480);
+  base::debug::Alias(&p);
+}
+
+class MyThread1 : public SimpleThread {
+ public:
+  MyThread1() : SimpleThread("MyThread1") {}
+  void Run() override {
+    for (int i = 0; i < kNumberOfAllocations; ++i)
+      Allocate1();
+  }
+};
+
+class MyThread2 : public SimpleThread {
+ public:
+  MyThread2() : SimpleThread("MyThread2") {}
+  void Run() override {
+    for (int i = 0; i < kNumberOfAllocations; ++i)
+      Allocate2();
+  }
+};
+
+void CheckAllocationPattern(void (*allocate_callback)()) {
+  SamplingHeapProfiler::InitTLSSlot();
+  SamplingHeapProfiler* profiler = SamplingHeapProfiler::GetInstance();
+  profiler->SuppressRandomnessForTest(false);
+  profiler->SetSamplingInterval(10240);
+  base::TimeTicks t0 = base::TimeTicks::Now();
+  std::map<size_t, size_t> sums;
+  const int iterations = 40;
+  for (int i = 0; i < iterations; ++i) {
+    uint32_t id = profiler->Start();
+    allocate_callback();
+    std::vector<SamplingHeapProfiler::Sample> samples =
+        profiler->GetSamples(id);
+    profiler->Stop();
+    std::map<size_t, size_t> buckets;
+    for (auto& sample : samples) {
+      buckets[sample.size] += sample.total;
+    }
+    for (auto& it : buckets) {
+      if (it.first != 400 && it.first != 700 && it.first != 20480)
+        continue;
+      sums[it.first] += it.second;
+      printf("%zu,", it.second);
+    }
+    printf("\n");
+  }
+
+  printf("Time taken %" PRIu64 "ms\n",
+         (base::TimeTicks::Now() - t0).InMilliseconds());
+
+  for (auto sum : sums) {
+    intptr_t expected = sum.first * kNumberOfAllocations;
+    intptr_t actual = sum.second / iterations;
+    printf("%zu:\tmean: %zu\trelative error: %.2f%%\n", sum.first, actual,
+           100. * (actual - expected) / expected);
+  }
+}
+
+// Manual tests to check precision of the sampling profiler.
+// Yes, they do leak lots of memory.
+
+TEST_F(SamplingHeapProfilerTest, DISABLED_ParallelLargeSmallStats) {
+  CheckAllocationPattern([]() {
+    SimpleThread* t1 = new MyThread1();
+    SimpleThread* t2 = new MyThread2();
+    t1->Start();
+    t2->Start();
+    for (int i = 0; i < kNumberOfAllocations; ++i)
+      Allocate3();
+    t1->Join();
+    t2->Join();
+  });
+}
+
+TEST_F(SamplingHeapProfilerTest, DISABLED_SequentialLargeSmallStats) {
+  CheckAllocationPattern([]() {
+    for (int i = 0; i < kNumberOfAllocations; ++i) {
+      Allocate1();
+      Allocate2();
+      Allocate3();
+    }
+  });
+}
+
+}  // namespace
+}  // namespace base
diff --git a/base/scoped_clear_errno.h b/base/scoped_clear_errno.h
new file mode 100644
index 0000000..585f6f7
--- /dev/null
+++ b/base/scoped_clear_errno.h
@@ -0,0 +1,34 @@
+// Copyright (c) 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_SCOPED_CLEAR_ERRNO_H_
+#define BASE_SCOPED_CLEAR_ERRNO_H_
+
+#include <errno.h>
+
+#include "base/macros.h"
+
+namespace base {
+
+// Simple scoper that saves the current value of errno, resets it to 0, and on
+// destruction puts the old value back.
+class ScopedClearErrno {
+ public:
+  ScopedClearErrno() : old_errno_(errno) {
+    errno = 0;
+  }
+  ~ScopedClearErrno() {
+    if (errno == 0)
+      errno = old_errno_;
+  }
+
+ private:
+  const int old_errno_;
+
+  DISALLOW_COPY_AND_ASSIGN(ScopedClearErrno);
+};
+
+}  // namespace base
+
+#endif  // BASE_SCOPED_CLEAR_ERRNO_H_
diff --git a/base/scoped_clear_errno_unittest.cc b/base/scoped_clear_errno_unittest.cc
new file mode 100644
index 0000000..8afb33e
--- /dev/null
+++ b/base/scoped_clear_errno_unittest.cc
@@ -0,0 +1,30 @@
+// Copyright (c) 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <errno.h>
+
+#include "base/scoped_clear_errno.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+
+TEST(ScopedClearErrno, TestNoError) {
+  errno = 1;
+  {
+    ScopedClearErrno clear_error;
+    EXPECT_EQ(0, errno);
+  }
+  EXPECT_EQ(1, errno);
+}
+
+TEST(ScopedClearErrno, TestError) {
+  errno = 1;
+  {
+    ScopedClearErrno clear_error;
+    errno = 2;
+  }
+  EXPECT_EQ(2, errno);
+}
+
+}  // namespace base
diff --git a/base/scoped_generic.h b/base/scoped_generic.h
new file mode 100644
index 0000000..25e6208
--- /dev/null
+++ b/base/scoped_generic.h
@@ -0,0 +1,189 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_SCOPED_GENERIC_H_
+#define BASE_SCOPED_GENERIC_H_
+
+#include <stdlib.h>
+
+#include <algorithm>
+
+#include "base/compiler_specific.h"
+#include "base/macros.h"
+
+namespace base {
+
+// This class acts like unique_ptr with a custom deleter (although is slightly
+// less fancy in some of the more escoteric respects) except that it keeps a
+// copy of the object rather than a pointer, and we require that the contained
+// object has some kind of "invalid" value.
+//
+// Defining a scoper based on this class allows you to get a scoper for
+// non-pointer types without having to write custom code for set, reset, and
+// move, etc. and get almost identical semantics that people are used to from
+// unique_ptr.
+//
+// It is intended that you will typedef this class with an appropriate deleter
+// to implement clean up tasks for objects that act like pointers from a
+// resource management standpoint but aren't, such as file descriptors and
+// various types of operating system handles. Using unique_ptr for these
+// things requires that you keep a pointer to the handle valid for the lifetime
+// of the scoper (which is easy to mess up).
+//
+// For an object to be able to be put into a ScopedGeneric, it must support
+// standard copyable semantics and have a specific "invalid" value. The traits
+// must define a free function and also the invalid value to assign for
+// default-constructed and released objects.
+//
+//   struct FooScopedTraits {
+//     // It's assumed that this is a fast inline function with little-to-no
+//     // penalty for duplicate calls. This must be a static function even
+//     // for stateful traits.
+//     static int InvalidValue() {
+//       return 0;
+//     }
+//
+//     // This free function will not be called if f == InvalidValue()!
+//     static void Free(int f) {
+//       ::FreeFoo(f);
+//     }
+//   };
+//
+//   typedef ScopedGeneric<int, FooScopedTraits> ScopedFoo;
+template<typename T, typename Traits>
+class ScopedGeneric {
+ private:
+  // This must be first since it's used inline below.
+  //
+  // Use the empty base class optimization to allow us to have a D
+  // member, while avoiding any space overhead for it when D is an
+  // empty class.  See e.g. http://www.cantrip.org/emptyopt.html for a good
+  // discussion of this technique.
+  struct Data : public Traits {
+    explicit Data(const T& in) : generic(in) {}
+    Data(const T& in, const Traits& other) : Traits(other), generic(in) {}
+    T generic;
+  };
+
+ public:
+  typedef T element_type;
+  typedef Traits traits_type;
+
+  ScopedGeneric() : data_(traits_type::InvalidValue()) {}
+
+  // Constructor. Takes responsibility for freeing the resource associated with
+  // the object T.
+  explicit ScopedGeneric(const element_type& value) : data_(value) {}
+
+  // Constructor. Allows initialization of a stateful traits object.
+  ScopedGeneric(const element_type& value, const traits_type& traits)
+      : data_(value, traits) {
+  }
+
+  // Move constructor. Allows initialization from a ScopedGeneric rvalue.
+  ScopedGeneric(ScopedGeneric<T, Traits>&& rvalue)
+      : data_(rvalue.release(), rvalue.get_traits()) {
+  }
+
+  ~ScopedGeneric() {
+    FreeIfNecessary();
+  }
+
+  // operator=. Allows assignment from a ScopedGeneric rvalue.
+  ScopedGeneric& operator=(ScopedGeneric<T, Traits>&& rvalue) {
+    reset(rvalue.release());
+    return *this;
+  }
+
+  // Frees the currently owned object, if any. Then takes ownership of a new
+  // object, if given. Self-resets are not allowd as on unique_ptr. See
+  // http://crbug.com/162971
+  void reset(const element_type& value = traits_type::InvalidValue()) {
+    if (data_.generic != traits_type::InvalidValue() && data_.generic == value)
+      abort();
+    FreeIfNecessary();
+    data_.generic = value;
+  }
+
+  void swap(ScopedGeneric& other) {
+    // Standard swap idiom: 'using std::swap' ensures that std::swap is
+    // present in the overload set, but we call swap unqualified so that
+    // any more-specific overloads can be used, if available.
+    using std::swap;
+    swap(static_cast<Traits&>(data_), static_cast<Traits&>(other.data_));
+    swap(data_.generic, other.data_.generic);
+  }
+
+  // Release the object. The return value is the current object held by this
+  // object. After this operation, this object will hold a null value, and
+  // will not own the object any more.
+  element_type release() WARN_UNUSED_RESULT {
+    element_type old_generic = data_.generic;
+    data_.generic = traits_type::InvalidValue();
+    return old_generic;
+  }
+
+  // Returns a raw pointer to the object storage, to allow the scoper to be used
+  // to receive and manage out-parameter values. Implies reset().
+  element_type* receive() WARN_UNUSED_RESULT {
+    reset();
+    return &data_.generic;
+  }
+
+  const element_type& get() const { return data_.generic; }
+
+  // Returns true if this object doesn't hold the special null value for the
+  // associated data type.
+  bool is_valid() const { return data_.generic != traits_type::InvalidValue(); }
+
+  bool operator==(const element_type& value) const {
+    return data_.generic == value;
+  }
+  bool operator!=(const element_type& value) const {
+    return data_.generic != value;
+  }
+
+  Traits& get_traits() { return data_; }
+  const Traits& get_traits() const { return data_; }
+
+ private:
+  void FreeIfNecessary() {
+    if (data_.generic != traits_type::InvalidValue()) {
+      data_.Free(data_.generic);
+      data_.generic = traits_type::InvalidValue();
+    }
+  }
+
+  // Forbid comparison. If U != T, it totally doesn't make sense, and if U ==
+  // T, it still doesn't make sense because you should never have the same
+  // object owned by two different ScopedGenerics.
+  template <typename T2, typename Traits2> bool operator==(
+      const ScopedGeneric<T2, Traits2>& p2) const;
+  template <typename T2, typename Traits2> bool operator!=(
+      const ScopedGeneric<T2, Traits2>& p2) const;
+
+  Data data_;
+
+  DISALLOW_COPY_AND_ASSIGN(ScopedGeneric);
+};
+
+template<class T, class Traits>
+void swap(const ScopedGeneric<T, Traits>& a,
+          const ScopedGeneric<T, Traits>& b) {
+  a.swap(b);
+}
+
+template<class T, class Traits>
+bool operator==(const T& value, const ScopedGeneric<T, Traits>& scoped) {
+  return value == scoped.get();
+}
+
+template<class T, class Traits>
+bool operator!=(const T& value, const ScopedGeneric<T, Traits>& scoped) {
+  return value != scoped.get();
+}
+
+}  // namespace base
+
+#endif  // BASE_SCOPED_GENERIC_H_
diff --git a/base/scoped_generic_unittest.cc b/base/scoped_generic_unittest.cc
new file mode 100644
index 0000000..5a6abfb
--- /dev/null
+++ b/base/scoped_generic_unittest.cc
@@ -0,0 +1,172 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/scoped_generic.h"
+
+#include <utility>
+#include <vector>
+
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+
+namespace {
+
+struct IntTraits {
+  IntTraits(std::vector<int>* freed) : freed_ints(freed) {}
+
+  static int InvalidValue() {
+    return -1;
+  }
+  void Free(int value) {
+    freed_ints->push_back(value);
+  }
+
+  std::vector<int>* freed_ints;
+};
+
+typedef ScopedGeneric<int, IntTraits> ScopedInt;
+
+}  // namespace
+
+TEST(ScopedGenericTest, ScopedGeneric) {
+  std::vector<int> values_freed;
+  IntTraits traits(&values_freed);
+
+  // Invalid case, delete should not be called.
+  {
+    ScopedInt a(IntTraits::InvalidValue(), traits);
+  }
+  EXPECT_TRUE(values_freed.empty());
+
+  // Simple deleting case.
+  static const int kFirst = 0;
+  {
+    ScopedInt a(kFirst, traits);
+  }
+  ASSERT_EQ(1u, values_freed.size());
+  ASSERT_EQ(kFirst, values_freed[0]);
+  values_freed.clear();
+
+  // Release should return the right value and leave the object empty.
+  {
+    ScopedInt a(kFirst, traits);
+    EXPECT_EQ(kFirst, a.release());
+
+    ScopedInt b(IntTraits::InvalidValue(), traits);
+    EXPECT_EQ(IntTraits::InvalidValue(), b.release());
+  }
+  ASSERT_TRUE(values_freed.empty());
+
+  // Reset should free the old value, then the new one should go away when
+  // it goes out of scope.
+  static const int kSecond = 1;
+  {
+    ScopedInt b(kFirst, traits);
+    b.reset(kSecond);
+    ASSERT_EQ(1u, values_freed.size());
+    ASSERT_EQ(kFirst, values_freed[0]);
+  }
+  ASSERT_EQ(2u, values_freed.size());
+  ASSERT_EQ(kSecond, values_freed[1]);
+  values_freed.clear();
+
+  // Swap.
+  {
+    ScopedInt a(kFirst, traits);
+    ScopedInt b(kSecond, traits);
+    a.swap(b);
+    EXPECT_TRUE(values_freed.empty());  // Nothing should be freed.
+    EXPECT_EQ(kSecond, a.get());
+    EXPECT_EQ(kFirst, b.get());
+  }
+  // Values should be deleted in the opposite order.
+  ASSERT_EQ(2u, values_freed.size());
+  EXPECT_EQ(kFirst, values_freed[0]);
+  EXPECT_EQ(kSecond, values_freed[1]);
+  values_freed.clear();
+
+  // Move constructor.
+  {
+    ScopedInt a(kFirst, traits);
+    ScopedInt b(std::move(a));
+    EXPECT_TRUE(values_freed.empty());  // Nothing should be freed.
+    ASSERT_EQ(IntTraits::InvalidValue(), a.get());
+    ASSERT_EQ(kFirst, b.get());
+  }
+
+  ASSERT_EQ(1u, values_freed.size());
+  ASSERT_EQ(kFirst, values_freed[0]);
+  values_freed.clear();
+
+  // Move assign.
+  {
+    ScopedInt a(kFirst, traits);
+    ScopedInt b(kSecond, traits);
+    b = std::move(a);
+    ASSERT_EQ(1u, values_freed.size());
+    EXPECT_EQ(kSecond, values_freed[0]);
+    ASSERT_EQ(IntTraits::InvalidValue(), a.get());
+    ASSERT_EQ(kFirst, b.get());
+  }
+
+  ASSERT_EQ(2u, values_freed.size());
+  EXPECT_EQ(kFirst, values_freed[1]);
+  values_freed.clear();
+}
+
+TEST(ScopedGenericTest, Operators) {
+  std::vector<int> values_freed;
+  IntTraits traits(&values_freed);
+
+  static const int kFirst = 0;
+  static const int kSecond = 1;
+  {
+    ScopedInt a(kFirst, traits);
+    EXPECT_TRUE(a == kFirst);
+    EXPECT_FALSE(a != kFirst);
+    EXPECT_FALSE(a == kSecond);
+    EXPECT_TRUE(a != kSecond);
+
+    EXPECT_TRUE(kFirst == a);
+    EXPECT_FALSE(kFirst != a);
+    EXPECT_FALSE(kSecond == a);
+    EXPECT_TRUE(kSecond != a);
+  }
+
+  // is_valid().
+  {
+    ScopedInt a(kFirst, traits);
+    EXPECT_TRUE(a.is_valid());
+    a.reset();
+    EXPECT_FALSE(a.is_valid());
+  }
+}
+
+// Cheesy manual "no compile" test for manually validating changes.
+#if 0
+TEST(ScopedGenericTest, NoCompile) {
+  // Assignment shouldn't work.
+  /*{
+    ScopedInt a(kFirst, traits);
+    ScopedInt b(a);
+  }*/
+
+  // Comparison shouldn't work.
+  /*{
+    ScopedInt a(kFirst, traits);
+    ScopedInt b(kFirst, traits);
+    if (a == b) {
+    }
+  }*/
+
+  // Implicit conversion to bool shouldn't work.
+  /*{
+    ScopedInt a(kFirst, traits);
+    bool result = a;
+  }*/
+}
+#endif
+
+}  // namespace base
diff --git a/base/scoped_native_library.cc b/base/scoped_native_library.cc
new file mode 100644
index 0000000..c94f262
--- /dev/null
+++ b/base/scoped_native_library.cc
@@ -0,0 +1,43 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/scoped_native_library.h"
+
+namespace base {
+
+ScopedNativeLibrary::ScopedNativeLibrary() : library_(nullptr) {}
+
+ScopedNativeLibrary::ScopedNativeLibrary(NativeLibrary library)
+    : library_(library) {
+}
+
+ScopedNativeLibrary::ScopedNativeLibrary(const FilePath& library_path) {
+  library_ = base::LoadNativeLibrary(library_path, nullptr);
+}
+
+ScopedNativeLibrary::~ScopedNativeLibrary() {
+  if (library_)
+    base::UnloadNativeLibrary(library_);
+}
+
+void* ScopedNativeLibrary::GetFunctionPointer(
+    const char* function_name) const {
+  if (!library_)
+    return nullptr;
+  return base::GetFunctionPointerFromNativeLibrary(library_, function_name);
+}
+
+void ScopedNativeLibrary::Reset(NativeLibrary library) {
+  if (library_)
+    base::UnloadNativeLibrary(library_);
+  library_ = library;
+}
+
+NativeLibrary ScopedNativeLibrary::Release() {
+  NativeLibrary result = library_;
+  library_ = nullptr;
+  return result;
+}
+
+}  // namespace base
diff --git a/base/scoped_native_library.h b/base/scoped_native_library.h
new file mode 100644
index 0000000..e58297b
--- /dev/null
+++ b/base/scoped_native_library.h
@@ -0,0 +1,55 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_SCOPED_NATIVE_LIBRARY_H_
+#define BASE_SCOPED_NATIVE_LIBRARY_H_
+
+#include "base/base_export.h"
+#include "base/macros.h"
+#include "base/native_library.h"
+
+namespace base {
+
+class FilePath;
+
+// A class which encapsulates a base::NativeLibrary object available only in a
+// scope.
+// This class automatically unloads the loaded library in its destructor.
+class BASE_EXPORT ScopedNativeLibrary {
+ public:
+  // Initializes with a NULL library.
+  ScopedNativeLibrary();
+
+  // Takes ownership of the given library handle.
+  explicit ScopedNativeLibrary(NativeLibrary library);
+
+  // Opens the given library and manages its lifetime.
+  explicit ScopedNativeLibrary(const FilePath& library_path);
+
+  ~ScopedNativeLibrary();
+
+  // Returns true if there's a valid library loaded.
+  bool is_valid() const { return !!library_; }
+
+  NativeLibrary get() const { return library_; }
+
+  void* GetFunctionPointer(const char* function_name) const;
+
+  // Takes ownership of the given library handle. Any existing handle will
+  // be freed.
+  void Reset(NativeLibrary library);
+
+  // Returns the native library handle and removes it from this object. The
+  // caller must manage the lifetime of the handle.
+  NativeLibrary Release();
+
+ private:
+  NativeLibrary library_;
+
+  DISALLOW_COPY_AND_ASSIGN(ScopedNativeLibrary);
+};
+
+}  // namespace base
+
+#endif  // BASE_SCOPED_NATIVE_LIBRARY_H_
diff --git a/base/scoped_native_library_unittest.cc b/base/scoped_native_library_unittest.cc
new file mode 100644
index 0000000..763b45f
--- /dev/null
+++ b/base/scoped_native_library_unittest.cc
@@ -0,0 +1,48 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/scoped_native_library.h"
+
+#include "build/build_config.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+#if defined(OS_WIN)
+#include "base/files/file_path.h"
+#include "base/strings/utf_string_conversions.h"
+#endif
+
+namespace base {
+
+// Tests whether or not a function pointer retrieved via ScopedNativeLibrary
+// is available only in a scope.
+TEST(ScopedNativeLibrary, Basic) {
+#if defined(OS_WIN)
+  // Get the pointer to DirectDrawCreate() from "ddraw.dll" and verify it
+  // is valid only in this scope.
+  // FreeLibrary() doesn't actually unload a DLL until its reference count
+  // becomes zero, i.e. function pointer is still valid if the DLL used
+  // in this test is also used by another part of this executable.
+  // So, this test uses "ddraw.dll", which is not used by Chrome at all but
+  // installed on all versions of Windows.
+  const char kFunctionName[] = "DirectDrawCreate";
+  NativeLibrary native_library;
+  {
+    FilePath path(FilePath::FromUTF8Unsafe(GetNativeLibraryName("ddraw")));
+    native_library = LoadNativeLibrary(path, nullptr);
+    ScopedNativeLibrary library(native_library);
+    EXPECT_TRUE(library.is_valid());
+    EXPECT_EQ(native_library, library.get());
+    FARPROC test_function =
+        reinterpret_cast<FARPROC>(library.GetFunctionPointer(kFunctionName));
+    EXPECT_EQ(0, IsBadCodePtr(test_function));
+    EXPECT_EQ(
+        GetFunctionPointerFromNativeLibrary(native_library, kFunctionName),
+        test_function);
+  }
+  EXPECT_FALSE(
+      GetFunctionPointerFromNativeLibrary(native_library, kFunctionName));
+#endif
+}
+
+}  // namespace base
diff --git a/base/scoped_observer.h b/base/scoped_observer.h
new file mode 100644
index 0000000..7f1d6fb
--- /dev/null
+++ b/base/scoped_observer.h
@@ -0,0 +1,63 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_SCOPED_OBSERVER_H_
+#define BASE_SCOPED_OBSERVER_H_
+
+#include <stddef.h>
+
+#include <algorithm>
+#include <vector>
+
+#include "base/logging.h"
+#include "base/macros.h"
+#include "base/stl_util.h"
+
+// ScopedObserver is used to keep track of the set of sources an object has
+// attached itself to as an observer. When ScopedObserver is destroyed it
+// removes the object as an observer from all sources it has been added to.
+template <class Source, class Observer>
+class ScopedObserver {
+ public:
+  explicit ScopedObserver(Observer* observer) : observer_(observer) {}
+
+  ~ScopedObserver() {
+    RemoveAll();
+  }
+
+  // Adds the object passed to the constructor as an observer on |source|.
+  void Add(Source* source) {
+    sources_.push_back(source);
+    source->AddObserver(observer_);
+  }
+
+  // Remove the object passed to the constructor as an observer from |source|.
+  void Remove(Source* source) {
+    auto it = std::find(sources_.begin(), sources_.end(), source);
+    DCHECK(it != sources_.end());
+    sources_.erase(it);
+    source->RemoveObserver(observer_);
+  }
+
+  void RemoveAll() {
+    for (size_t i = 0; i < sources_.size(); ++i)
+      sources_[i]->RemoveObserver(observer_);
+    sources_.clear();
+  }
+
+  bool IsObserving(Source* source) const {
+    return base::ContainsValue(sources_, source);
+  }
+
+  bool IsObservingSources() const { return !sources_.empty(); }
+
+ private:
+  Observer* observer_;
+
+  std::vector<Source*> sources_;
+
+  DISALLOW_COPY_AND_ASSIGN(ScopedObserver);
+};
+
+#endif  // BASE_SCOPED_OBSERVER_H_
diff --git a/base/security_unittest.cc b/base/security_unittest.cc
new file mode 100644
index 0000000..13e9594
--- /dev/null
+++ b/base/security_unittest.cc
@@ -0,0 +1,173 @@
+// Copyright (c) 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <fcntl.h>
+#include <stddef.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+
+#include <algorithm>
+#include <limits>
+#include <memory>
+
+#include "base/allocator/buildflags.h"
+#include "base/files/file_util.h"
+#include "base/logging.h"
+#include "base/memory/free_deleter.h"
+#include "build/build_config.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+#if defined(OS_POSIX)
+#include <sys/mman.h>
+#include <unistd.h>
+#endif
+
+using std::nothrow;
+using std::numeric_limits;
+
+namespace {
+
+// This function acts as a compiler optimization barrier. We use it to
+// prevent the compiler from making an expression a compile-time constant.
+// We also use it so that the compiler doesn't discard certain return values
+// as something we don't need (see the comment with calloc below).
+template <typename Type>
+NOINLINE Type HideValueFromCompiler(volatile Type value) {
+#if defined(__GNUC__)
+  // In a GCC compatible compiler (GCC or Clang), make this compiler barrier
+  // more robust than merely using "volatile".
+  __asm__ volatile ("" : "+r" (value));
+#endif  // __GNUC__
+  return value;
+}
+
+// TCmalloc, currently supported only by Linux/CrOS, supports malloc limits.
+// - NO_TCMALLOC (should be defined if compiled with use_allocator!="tcmalloc")
+// - ADDRESS_SANITIZER it has its own memory allocator
+#if defined(OS_LINUX) && !defined(NO_TCMALLOC) && !defined(ADDRESS_SANITIZER)
+#define MALLOC_OVERFLOW_TEST(function) function
+#else
+#define MALLOC_OVERFLOW_TEST(function) DISABLED_##function
+#endif
+
+// There are platforms where these tests are known to fail. We would like to
+// be able to easily check the status on the bots, but marking tests as
+// FAILS_ is too clunky.
+void OverflowTestsSoftExpectTrue(bool overflow_detected) {
+  if (!overflow_detected) {
+#if defined(OS_LINUX) || defined(OS_ANDROID) || defined(OS_MACOSX)
+    // Sadly, on Linux, Android, and OSX we don't have a good story yet. Don't
+    // fail the test, but report.
+    printf("Platform has overflow: %s\n",
+           !overflow_detected ? "yes." : "no.");
+#else
+    // Otherwise, fail the test. (Note: EXPECT are ok in subfunctions, ASSERT
+    // aren't).
+    EXPECT_TRUE(overflow_detected);
+#endif
+  }
+}
+
+#if defined(OS_IOS) || defined(OS_FUCHSIA) || defined(ADDRESS_SANITIZER) || \
+    defined(THREAD_SANITIZER) || defined(MEMORY_SANITIZER)
+#define MAYBE_NewOverflow DISABLED_NewOverflow
+#else
+#define MAYBE_NewOverflow NewOverflow
+#endif
+// Test array[TooBig][X] and array[X][TooBig] allocations for int overflows.
+// IOS doesn't honor nothrow, so disable the test there.
+// TODO(https://crbug.com/828229): Fuchsia SDK exports an incorrect new[] that
+// gets picked up in Debug/component builds, breaking this test.
+// Disabled under XSan because asan aborts when new returns nullptr,
+// https://bugs.chromium.org/p/chromium/issues/detail?id=690271#c15
+TEST(SecurityTest, MAYBE_NewOverflow) {
+  const size_t kArraySize = 4096;
+  // We want something "dynamic" here, so that the compiler doesn't
+  // immediately reject crazy arrays.
+  const size_t kDynamicArraySize = HideValueFromCompiler(kArraySize);
+  const size_t kMaxSizeT = std::numeric_limits<size_t>::max();
+  const size_t kArraySize2 = kMaxSizeT / kArraySize + 10;
+  const size_t kDynamicArraySize2 = HideValueFromCompiler(kArraySize2);
+  {
+    std::unique_ptr<char[][kArraySize]> array_pointer(
+        new (nothrow) char[kDynamicArraySize2][kArraySize]);
+    // Prevent clang from optimizing away the whole test.
+    char* volatile p = reinterpret_cast<char*>(array_pointer.get());
+    OverflowTestsSoftExpectTrue(!p);
+  }
+  // On windows, the compiler prevents static array sizes of more than
+  // 0x7fffffff (error C2148).
+#if defined(OS_WIN) && defined(ARCH_CPU_64_BITS)
+  ALLOW_UNUSED_LOCAL(kDynamicArraySize);
+#else
+  {
+    std::unique_ptr<char[][kArraySize2]> array_pointer(
+        new (nothrow) char[kDynamicArraySize][kArraySize2]);
+    // Prevent clang from optimizing away the whole test.
+    char* volatile p = reinterpret_cast<char*>(array_pointer.get());
+    OverflowTestsSoftExpectTrue(!p);
+  }
+#endif  // !defined(OS_WIN) || !defined(ARCH_CPU_64_BITS)
+}
+
+#if defined(OS_LINUX) && defined(__x86_64__)
+// Check if ptr1 and ptr2 are separated by less than size chars.
+bool ArePointersToSameArea(void* ptr1, void* ptr2, size_t size) {
+  ptrdiff_t ptr_diff = reinterpret_cast<char*>(std::max(ptr1, ptr2)) -
+                       reinterpret_cast<char*>(std::min(ptr1, ptr2));
+  return static_cast<size_t>(ptr_diff) <= size;
+}
+
+// Check if TCMalloc uses an underlying random memory allocator.
+TEST(SecurityTest, MALLOC_OVERFLOW_TEST(RandomMemoryAllocations)) {
+  size_t kPageSize = 4096;  // We support x86_64 only.
+  // Check that malloc() returns an address that is neither the kernel's
+  // un-hinted mmap area, nor the current brk() area. The first malloc() may
+  // not be at a random address because TCMalloc will first exhaust any memory
+  // that it has allocated early on, before starting the sophisticated
+  // allocators.
+  void* default_mmap_heap_address =
+      mmap(nullptr, kPageSize, PROT_READ | PROT_WRITE,
+           MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
+  ASSERT_NE(default_mmap_heap_address,
+            static_cast<void*>(MAP_FAILED));
+  ASSERT_EQ(munmap(default_mmap_heap_address, kPageSize), 0);
+  void* brk_heap_address = sbrk(0);
+  ASSERT_NE(brk_heap_address, reinterpret_cast<void*>(-1));
+  ASSERT_TRUE(brk_heap_address != nullptr);
+  // 1 MB should get us past what TCMalloc pre-allocated before initializing
+  // the sophisticated allocators.
+  size_t kAllocSize = 1<<20;
+  std::unique_ptr<char, base::FreeDeleter> ptr(
+      static_cast<char*>(malloc(kAllocSize)));
+  ASSERT_TRUE(ptr != nullptr);
+  // If two pointers are separated by less than 512MB, they are considered
+  // to be in the same area.
+  // Our random pointer could be anywhere within 0x3fffffffffff (46bits),
+  // and we are checking that it's not withing 1GB (30 bits) from two
+  // addresses (brk and mmap heap). We have roughly one chance out of
+  // 2^15 to flake.
+  const size_t kAreaRadius = 1<<29;
+  bool in_default_mmap_heap = ArePointersToSameArea(
+      ptr.get(), default_mmap_heap_address, kAreaRadius);
+  EXPECT_FALSE(in_default_mmap_heap);
+
+  bool in_default_brk_heap = ArePointersToSameArea(
+      ptr.get(), brk_heap_address, kAreaRadius);
+  EXPECT_FALSE(in_default_brk_heap);
+
+  // In the implementation, we always mask our random addresses with
+  // kRandomMask, so we use it as an additional detection mechanism.
+  const uintptr_t kRandomMask = 0x3fffffffffffULL;
+  bool impossible_random_address =
+      reinterpret_cast<uintptr_t>(ptr.get()) & ~kRandomMask;
+  EXPECT_FALSE(impossible_random_address);
+}
+
+#endif  // defined(OS_LINUX) && defined(__x86_64__)
+
+}  // namespace
diff --git a/base/sequence_checker.h b/base/sequence_checker.h
new file mode 100644
index 0000000..48b593b
--- /dev/null
+++ b/base/sequence_checker.h
@@ -0,0 +1,88 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_SEQUENCE_CHECKER_H_
+#define BASE_SEQUENCE_CHECKER_H_
+
+#include "base/compiler_specific.h"
+#include "base/logging.h"
+#include "base/sequence_checker_impl.h"
+
+// SequenceChecker is a helper class used to help verify that some methods of a
+// class are called sequentially (for thread-safety).
+//
+// Use the macros below instead of the SequenceChecker directly so that the
+// unused member doesn't result in an extra byte (four when padded) per
+// instance in production.
+//
+// This class is much prefered to ThreadChecker for thread-safety checks.
+// ThreadChecker should only be used for classes that are truly thread-affine
+// (use thread-local-storage or a third-party API that does).
+//
+// Usage:
+//   class MyClass {
+//    public:
+//     MyClass() {
+//       // It's sometimes useful to detach on construction for objects that are
+//       // constructed in one place and forever after used from another
+//       // sequence.
+//       DETACH_FROM_SEQUENCE(my_sequence_checker_);
+//     }
+//
+//     ~MyClass() {
+//       // SequenceChecker doesn't automatically check it's destroyed on origin
+//       // sequence for the same reason it's sometimes detached in the
+//       // constructor. It's okay to destroy off sequence if the owner
+//       // otherwise knows usage on the associated sequence is done. If you're
+//       // not detaching in the constructor, you probably want to explicitly
+//       // check in the destructor.
+//       DCHECK_CALLED_ON_VALID_SEQUENCE(my_sequence_checker_);
+//     }
+//     void MyMethod() {
+//       DCHECK_CALLED_ON_VALID_SEQUENCE(my_sequence_checker_);
+//       ... (do stuff) ...
+//     }
+//
+//    private:
+//     SEQUENCE_CHECKER(my_sequence_checker_);
+//   }
+
+#if DCHECK_IS_ON()
+#define SEQUENCE_CHECKER(name) base::SequenceChecker name
+#define DCHECK_CALLED_ON_VALID_SEQUENCE(name) \
+  DCHECK((name).CalledOnValidSequence())
+#define DETACH_FROM_SEQUENCE(name) (name).DetachFromSequence()
+#else  // DCHECK_IS_ON()
+#define SEQUENCE_CHECKER(name)
+#define DCHECK_CALLED_ON_VALID_SEQUENCE(name) EAT_STREAM_PARAMETERS
+#define DETACH_FROM_SEQUENCE(name)
+#endif  // DCHECK_IS_ON()
+
+namespace base {
+
+// Do nothing implementation, for use in release mode.
+//
+// Note: You should almost always use the SequenceChecker class (through the
+// above macros) to get the right version for your build configuration.
+class SequenceCheckerDoNothing {
+ public:
+  SequenceCheckerDoNothing() = default;
+  bool CalledOnValidSequence() const WARN_UNUSED_RESULT { return true; }
+  void DetachFromSequence() {}
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(SequenceCheckerDoNothing);
+};
+
+#if DCHECK_IS_ON()
+class SequenceChecker : public SequenceCheckerImpl {
+};
+#else
+class SequenceChecker : public SequenceCheckerDoNothing {
+};
+#endif  // DCHECK_IS_ON()
+
+}  // namespace base
+
+#endif  // BASE_SEQUENCE_CHECKER_H_
diff --git a/base/sequence_checker_impl.cc b/base/sequence_checker_impl.cc
new file mode 100644
index 0000000..daa774b
--- /dev/null
+++ b/base/sequence_checker_impl.cc
@@ -0,0 +1,51 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/sequence_checker_impl.h"
+
+#include "base/logging.h"
+#include "base/memory/ptr_util.h"
+#include "base/sequence_token.h"
+#include "base/threading/thread_checker_impl.h"
+
+namespace base {
+
+class SequenceCheckerImpl::Core {
+ public:
+  Core() : sequence_token_(SequenceToken::GetForCurrentThread()) {}
+
+  ~Core() = default;
+
+  bool CalledOnValidSequence() const {
+    if (sequence_token_.IsValid())
+      return sequence_token_ == SequenceToken::GetForCurrentThread();
+
+    // SequenceChecker behaves as a ThreadChecker when it is not bound to a
+    // valid sequence token.
+    return thread_checker_.CalledOnValidThread();
+  }
+
+ private:
+  SequenceToken sequence_token_;
+
+  // Used when |sequence_token_| is invalid.
+  ThreadCheckerImpl thread_checker_;
+};
+
+SequenceCheckerImpl::SequenceCheckerImpl() : core_(std::make_unique<Core>()) {}
+SequenceCheckerImpl::~SequenceCheckerImpl() = default;
+
+bool SequenceCheckerImpl::CalledOnValidSequence() const {
+  AutoLock auto_lock(lock_);
+  if (!core_)
+    core_ = std::make_unique<Core>();
+  return core_->CalledOnValidSequence();
+}
+
+void SequenceCheckerImpl::DetachFromSequence() {
+  AutoLock auto_lock(lock_);
+  core_.reset();
+}
+
+}  // namespace base
diff --git a/base/sequence_checker_impl.h b/base/sequence_checker_impl.h
new file mode 100644
index 0000000..a54c388
--- /dev/null
+++ b/base/sequence_checker_impl.h
@@ -0,0 +1,48 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_SEQUENCE_CHECKER_IMPL_H_
+#define BASE_SEQUENCE_CHECKER_IMPL_H_
+
+#include <memory>
+
+#include "base/base_export.h"
+#include "base/compiler_specific.h"
+#include "base/macros.h"
+#include "base/synchronization/lock.h"
+
+namespace base {
+
+// Real implementation of SequenceChecker for use in debug mode or for temporary
+// use in release mode (e.g. to CHECK on a threading issue seen only in the
+// wild).
+//
+// Note: You should almost always use the SequenceChecker class to get the right
+// version for your build configuration.
+class BASE_EXPORT SequenceCheckerImpl {
+ public:
+  SequenceCheckerImpl();
+  ~SequenceCheckerImpl();
+
+  // Returns true if called in sequence with previous calls to this method and
+  // the constructor.
+  bool CalledOnValidSequence() const WARN_UNUSED_RESULT;
+
+  // Unbinds the checker from the currently associated sequence. The checker
+  // will be re-bound on the next call to CalledOnValidSequence().
+  void DetachFromSequence();
+
+ private:
+  class Core;
+
+  // Guards all variables below.
+  mutable Lock lock_;
+  mutable std::unique_ptr<Core> core_;
+
+  DISALLOW_COPY_AND_ASSIGN(SequenceCheckerImpl);
+};
+
+}  // namespace base
+
+#endif  // BASE_SEQUENCE_CHECKER_IMPL_H_
diff --git a/base/sequence_checker_unittest.cc b/base/sequence_checker_unittest.cc
new file mode 100644
index 0000000..8d44f3e
--- /dev/null
+++ b/base/sequence_checker_unittest.cc
@@ -0,0 +1,181 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/sequence_checker.h"
+
+#include <stddef.h>
+
+#include <memory>
+#include <string>
+
+#include "base/bind.h"
+#include "base/bind_helpers.h"
+#include "base/callback_forward.h"
+#include "base/macros.h"
+#include "base/sequence_token.h"
+#include "base/single_thread_task_runner.h"
+#include "base/test/gtest_util.h"
+#include "base/threading/simple_thread.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+
+namespace {
+
+// Runs a callback on another thread.
+class RunCallbackThread : public SimpleThread {
+ public:
+  explicit RunCallbackThread(const Closure& callback)
+      : SimpleThread("RunCallbackThread"), callback_(callback) {
+    Start();
+    Join();
+  }
+
+ private:
+  // SimpleThread:
+  void Run() override { callback_.Run(); }
+
+  const Closure callback_;
+
+  DISALLOW_COPY_AND_ASSIGN(RunCallbackThread);
+};
+
+void ExpectCalledOnValidSequence(SequenceCheckerImpl* sequence_checker) {
+  ASSERT_TRUE(sequence_checker);
+
+  // This should bind |sequence_checker| to the current sequence if it wasn't
+  // already bound to a sequence.
+  EXPECT_TRUE(sequence_checker->CalledOnValidSequence());
+
+  // Since |sequence_checker| is now bound to the current sequence, another call
+  // to CalledOnValidSequence() should return true.
+  EXPECT_TRUE(sequence_checker->CalledOnValidSequence());
+}
+
+void ExpectCalledOnValidSequenceWithSequenceToken(
+    SequenceCheckerImpl* sequence_checker,
+    SequenceToken sequence_token) {
+  ScopedSetSequenceTokenForCurrentThread
+      scoped_set_sequence_token_for_current_thread(sequence_token);
+  ExpectCalledOnValidSequence(sequence_checker);
+}
+
+void ExpectNotCalledOnValidSequence(SequenceCheckerImpl* sequence_checker) {
+  ASSERT_TRUE(sequence_checker);
+  EXPECT_FALSE(sequence_checker->CalledOnValidSequence());
+}
+
+}  // namespace
+
+TEST(SequenceCheckerTest, CallsAllowedOnSameThreadNoSequenceToken) {
+  SequenceCheckerImpl sequence_checker;
+  EXPECT_TRUE(sequence_checker.CalledOnValidSequence());
+}
+
+TEST(SequenceCheckerTest, CallsAllowedOnSameThreadSameSequenceToken) {
+  ScopedSetSequenceTokenForCurrentThread
+      scoped_set_sequence_token_for_current_thread(SequenceToken::Create());
+  SequenceCheckerImpl sequence_checker;
+  EXPECT_TRUE(sequence_checker.CalledOnValidSequence());
+}
+
+TEST(SequenceCheckerTest, CallsDisallowedOnDifferentThreadsNoSequenceToken) {
+  SequenceCheckerImpl sequence_checker;
+  RunCallbackThread thread(
+      Bind(&ExpectNotCalledOnValidSequence, Unretained(&sequence_checker)));
+}
+
+TEST(SequenceCheckerTest, CallsAllowedOnDifferentThreadsSameSequenceToken) {
+  const SequenceToken sequence_token(SequenceToken::Create());
+
+  ScopedSetSequenceTokenForCurrentThread
+      scoped_set_sequence_token_for_current_thread(sequence_token);
+  SequenceCheckerImpl sequence_checker;
+  EXPECT_TRUE(sequence_checker.CalledOnValidSequence());
+
+  RunCallbackThread thread(Bind(&ExpectCalledOnValidSequenceWithSequenceToken,
+                                Unretained(&sequence_checker), sequence_token));
+}
+
+TEST(SequenceCheckerTest, CallsDisallowedOnSameThreadDifferentSequenceToken) {
+  std::unique_ptr<SequenceCheckerImpl> sequence_checker;
+
+  {
+    ScopedSetSequenceTokenForCurrentThread
+        scoped_set_sequence_token_for_current_thread(SequenceToken::Create());
+    sequence_checker.reset(new SequenceCheckerImpl);
+  }
+
+  {
+    // Different SequenceToken.
+    ScopedSetSequenceTokenForCurrentThread
+        scoped_set_sequence_token_for_current_thread(SequenceToken::Create());
+    EXPECT_FALSE(sequence_checker->CalledOnValidSequence());
+  }
+
+  // No SequenceToken.
+  EXPECT_FALSE(sequence_checker->CalledOnValidSequence());
+}
+
+TEST(SequenceCheckerTest, DetachFromSequence) {
+  std::unique_ptr<SequenceCheckerImpl> sequence_checker;
+
+  {
+    ScopedSetSequenceTokenForCurrentThread
+        scoped_set_sequence_token_for_current_thread(SequenceToken::Create());
+    sequence_checker.reset(new SequenceCheckerImpl);
+  }
+
+  sequence_checker->DetachFromSequence();
+
+  {
+    // Verify that CalledOnValidSequence() returns true when called with
+    // a different sequence token after a call to DetachFromSequence().
+    ScopedSetSequenceTokenForCurrentThread
+        scoped_set_sequence_token_for_current_thread(SequenceToken::Create());
+    EXPECT_TRUE(sequence_checker->CalledOnValidSequence());
+  }
+}
+
+TEST(SequenceCheckerTest, DetachFromSequenceNoSequenceToken) {
+  SequenceCheckerImpl sequence_checker;
+  sequence_checker.DetachFromSequence();
+
+  // Verify that CalledOnValidSequence() returns true when called on a
+  // different thread after a call to DetachFromSequence().
+  RunCallbackThread thread(
+      Bind(&ExpectCalledOnValidSequence, Unretained(&sequence_checker)));
+
+  EXPECT_FALSE(sequence_checker.CalledOnValidSequence());
+}
+
+TEST(SequenceCheckerMacroTest, Macros) {
+  auto scope = std::make_unique<ScopedSetSequenceTokenForCurrentThread>(
+      SequenceToken::Create());
+  SEQUENCE_CHECKER(my_sequence_checker);
+
+  // Don't expect a DCHECK death when a SequenceChecker is used on the right
+  // sequence.
+  DCHECK_CALLED_ON_VALID_SEQUENCE(my_sequence_checker) << "Error message.";
+
+  scope.reset();
+
+#if DCHECK_IS_ON()
+  // Expect DCHECK death when used on a different sequence.
+  EXPECT_DCHECK_DEATH({
+    DCHECK_CALLED_ON_VALID_SEQUENCE(my_sequence_checker) << "Error message.";
+  });
+#else
+    // Happily no-ops on non-dcheck builds.
+    DCHECK_CALLED_ON_VALID_SEQUENCE(my_sequence_checker) << "Error message.";
+#endif
+
+  DETACH_FROM_SEQUENCE(my_sequence_checker);
+
+  // Don't expect a DCHECK death when a SequenceChecker is used for the first
+  // time after having been detached.
+  DCHECK_CALLED_ON_VALID_SEQUENCE(my_sequence_checker) << "Error message.";
+}
+
+}  // namespace base
diff --git a/base/sequence_token.cc b/base/sequence_token.cc
new file mode 100644
index 0000000..0bf2b44
--- /dev/null
+++ b/base/sequence_token.cc
@@ -0,0 +1,92 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/sequence_token.h"
+
+#include "base/atomic_sequence_num.h"
+#include "base/lazy_instance.h"
+#include "base/logging.h"
+#include "base/threading/thread_local.h"
+
+namespace base {
+
+namespace {
+
+base::AtomicSequenceNumber g_sequence_token_generator;
+
+base::AtomicSequenceNumber g_task_token_generator;
+
+LazyInstance<ThreadLocalPointer<const SequenceToken>>::Leaky
+    tls_current_sequence_token = LAZY_INSTANCE_INITIALIZER;
+
+LazyInstance<ThreadLocalPointer<const TaskToken>>::Leaky
+    tls_current_task_token = LAZY_INSTANCE_INITIALIZER;
+
+}  // namespace
+
+bool SequenceToken::operator==(const SequenceToken& other) const {
+  return token_ == other.token_ && IsValid();
+}
+
+bool SequenceToken::operator!=(const SequenceToken& other) const {
+  return !(*this == other);
+}
+
+bool SequenceToken::IsValid() const {
+  return token_ != kInvalidSequenceToken;
+}
+
+int SequenceToken::ToInternalValue() const {
+  return token_;
+}
+
+SequenceToken SequenceToken::Create() {
+  return SequenceToken(g_sequence_token_generator.GetNext());
+}
+
+SequenceToken SequenceToken::GetForCurrentThread() {
+  const SequenceToken* current_sequence_token =
+      tls_current_sequence_token.Get().Get();
+  return current_sequence_token ? *current_sequence_token : SequenceToken();
+}
+
+bool TaskToken::operator==(const TaskToken& other) const {
+  return token_ == other.token_ && IsValid();
+}
+
+bool TaskToken::operator!=(const TaskToken& other) const {
+  return !(*this == other);
+}
+
+bool TaskToken::IsValid() const {
+  return token_ != kInvalidTaskToken;
+}
+
+TaskToken TaskToken::Create() {
+  return TaskToken(g_task_token_generator.GetNext());
+}
+
+TaskToken TaskToken::GetForCurrentThread() {
+  const TaskToken* current_task_token = tls_current_task_token.Get().Get();
+  return current_task_token ? *current_task_token : TaskToken();
+}
+
+ScopedSetSequenceTokenForCurrentThread::ScopedSetSequenceTokenForCurrentThread(
+    const SequenceToken& sequence_token)
+    : sequence_token_(sequence_token), task_token_(TaskToken::Create()) {
+  DCHECK(!tls_current_sequence_token.Get().Get());
+  DCHECK(!tls_current_task_token.Get().Get());
+  tls_current_sequence_token.Get().Set(&sequence_token_);
+  tls_current_task_token.Get().Set(&task_token_);
+}
+
+ScopedSetSequenceTokenForCurrentThread::
+    ~ScopedSetSequenceTokenForCurrentThread() {
+  DCHECK_EQ(tls_current_sequence_token.Get().Get(), &sequence_token_);
+  DCHECK_EQ(tls_current_task_token.Get().Get(), &task_token_);
+  tls_current_sequence_token.Get().Set(nullptr);
+  tls_current_task_token.Get().Set(nullptr);
+}
+
+}  // namespace base
diff --git a/base/sequence_token.h b/base/sequence_token.h
new file mode 100644
index 0000000..6e7d191
--- /dev/null
+++ b/base/sequence_token.h
@@ -0,0 +1,115 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_SEQUENCE_TOKEN_H_
+#define BASE_SEQUENCE_TOKEN_H_
+
+#include "base/base_export.h"
+#include "base/macros.h"
+
+namespace base {
+
+// A token that identifies a series of sequenced tasks (i.e. tasks that run one
+// at a time in posting order).
+class BASE_EXPORT SequenceToken {
+ public:
+  // Instantiates an invalid SequenceToken.
+  SequenceToken() = default;
+
+  // Explicitly allow copy.
+  SequenceToken(const SequenceToken& other) = default;
+  SequenceToken& operator=(const SequenceToken& other) = default;
+
+  // An invalid SequenceToken is not equal to any other SequenceToken, including
+  // other invalid SequenceTokens.
+  bool operator==(const SequenceToken& other) const;
+  bool operator!=(const SequenceToken& other) const;
+
+  // Returns true if this is a valid SequenceToken.
+  bool IsValid() const;
+
+  // Returns the integer uniquely representing this SequenceToken. This method
+  // should only be used for tracing and debugging.
+  int ToInternalValue() const;
+
+  // Returns a valid SequenceToken which isn't equal to any previously returned
+  // SequenceToken.
+  static SequenceToken Create();
+
+  // Returns the SequenceToken associated with the task running on the current
+  // thread, as determined by the active ScopedSetSequenceTokenForCurrentThread
+  // if any.
+  static SequenceToken GetForCurrentThread();
+
+ private:
+  explicit SequenceToken(int token) : token_(token) {}
+
+  static constexpr int kInvalidSequenceToken = -1;
+  int token_ = kInvalidSequenceToken;
+};
+
+// A token that identifies a task.
+//
+// This is used by ThreadCheckerImpl to determine whether calls to
+// CalledOnValidThread() come from the same task and hence are deterministically
+// single-threaded (vs. calls coming from different sequenced or parallel tasks,
+// which may or may not run on the same thread).
+class BASE_EXPORT TaskToken {
+ public:
+  // Instantiates an invalid TaskToken.
+  TaskToken() = default;
+
+  // Explicitly allow copy.
+  TaskToken(const TaskToken& other) = default;
+  TaskToken& operator=(const TaskToken& other) = default;
+
+  // An invalid TaskToken is not equal to any other TaskToken, including
+  // other invalid TaskTokens.
+  bool operator==(const TaskToken& other) const;
+  bool operator!=(const TaskToken& other) const;
+
+  // Returns true if this is a valid TaskToken.
+  bool IsValid() const;
+
+  // In the scope of a ScopedSetSequenceTokenForCurrentThread, returns a valid
+  // TaskToken which isn't equal to any TaskToken returned in the scope of a
+  // different ScopedSetSequenceTokenForCurrentThread. Otherwise, returns an
+  // invalid TaskToken.
+  static TaskToken GetForCurrentThread();
+
+ private:
+  friend class ScopedSetSequenceTokenForCurrentThread;
+
+  explicit TaskToken(int token) : token_(token) {}
+
+  // Returns a valid TaskToken which isn't equal to any previously returned
+  // TaskToken. This is private as it only meant to be instantiated by
+  // ScopedSetSequenceTokenForCurrentThread.
+  static TaskToken Create();
+
+  static constexpr int kInvalidTaskToken = -1;
+  int token_ = kInvalidTaskToken;
+};
+
+// Instantiate this in the scope where a single task runs.
+class BASE_EXPORT ScopedSetSequenceTokenForCurrentThread {
+ public:
+  // Throughout the lifetime of the constructed object,
+  // SequenceToken::GetForCurrentThread() will return |sequence_token| and
+  // TaskToken::GetForCurrentThread() will return a TaskToken which is not equal
+  // to any TaskToken returned in the scope of another
+  // ScopedSetSequenceTokenForCurrentThread.
+  ScopedSetSequenceTokenForCurrentThread(const SequenceToken& sequence_token);
+  ~ScopedSetSequenceTokenForCurrentThread();
+
+ private:
+  const SequenceToken sequence_token_;
+  const TaskToken task_token_;
+
+  DISALLOW_COPY_AND_ASSIGN(ScopedSetSequenceTokenForCurrentThread);
+};
+
+}  // namespace base
+
+#endif  // BASE_SEQUENCE_TOKEN_H_
diff --git a/base/sequence_token_unittest.cc b/base/sequence_token_unittest.cc
new file mode 100644
index 0000000..2ed6878
--- /dev/null
+++ b/base/sequence_token_unittest.cc
@@ -0,0 +1,133 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/sequence_token.h"
+
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+
+TEST(SequenceTokenTest, IsValid) {
+  EXPECT_FALSE(SequenceToken().IsValid());
+  EXPECT_TRUE(SequenceToken::Create().IsValid());
+}
+
+TEST(SequenceTokenTest, OperatorEquals) {
+  const SequenceToken invalid_a;
+  const SequenceToken invalid_b;
+  const SequenceToken valid_a = SequenceToken::Create();
+  const SequenceToken valid_b = SequenceToken::Create();
+
+  EXPECT_FALSE(invalid_a == invalid_a);
+  EXPECT_FALSE(invalid_a == invalid_b);
+  EXPECT_FALSE(invalid_a == valid_a);
+  EXPECT_FALSE(invalid_a == valid_b);
+
+  EXPECT_FALSE(valid_a == invalid_a);
+  EXPECT_FALSE(valid_a == invalid_b);
+  EXPECT_EQ(valid_a, valid_a);
+  EXPECT_FALSE(valid_a == valid_b);
+}
+
+TEST(SequenceTokenTest, OperatorNotEquals) {
+  const SequenceToken invalid_a;
+  const SequenceToken invalid_b;
+  const SequenceToken valid_a = SequenceToken::Create();
+  const SequenceToken valid_b = SequenceToken::Create();
+
+  EXPECT_NE(invalid_a, invalid_a);
+  EXPECT_NE(invalid_a, invalid_b);
+  EXPECT_NE(invalid_a, valid_a);
+  EXPECT_NE(invalid_a, valid_b);
+
+  EXPECT_NE(valid_a, invalid_a);
+  EXPECT_NE(valid_a, invalid_b);
+  EXPECT_FALSE(valid_a != valid_a);
+  EXPECT_NE(valid_a, valid_b);
+}
+
+TEST(SequenceTokenTest, GetForCurrentThread) {
+  const SequenceToken token = SequenceToken::Create();
+
+  EXPECT_FALSE(SequenceToken::GetForCurrentThread().IsValid());
+
+  {
+    ScopedSetSequenceTokenForCurrentThread
+        scoped_set_sequence_token_for_current_thread(token);
+    EXPECT_TRUE(SequenceToken::GetForCurrentThread().IsValid());
+    EXPECT_EQ(token, SequenceToken::GetForCurrentThread());
+  }
+
+  EXPECT_FALSE(SequenceToken::GetForCurrentThread().IsValid());
+}
+
+TEST(SequenceTokenTest, ToInternalValue) {
+  const SequenceToken token1 = SequenceToken::Create();
+  const SequenceToken token2 = SequenceToken::Create();
+
+  // Confirm that internal values are unique.
+  EXPECT_NE(token1.ToInternalValue(), token2.ToInternalValue());
+}
+
+// Expect a default-constructed TaskToken to be invalid and not equal to
+// another invalid TaskToken.
+TEST(TaskTokenTest, InvalidDefaultConstructed) {
+  EXPECT_FALSE(TaskToken().IsValid());
+  EXPECT_NE(TaskToken(), TaskToken());
+}
+
+// Expect a TaskToken returned by TaskToken::GetForCurrentThread() outside the
+// scope of a ScopedSetSequenceTokenForCurrentThread to be invalid.
+TEST(TaskTokenTest, InvalidOutsideScope) {
+  EXPECT_FALSE(TaskToken::GetForCurrentThread().IsValid());
+}
+
+// Expect an invalid TaskToken not to be equal with a valid TaskToken.
+TEST(TaskTokenTest, ValidNotEqualsInvalid) {
+  ScopedSetSequenceTokenForCurrentThread
+      scoped_set_sequence_token_for_current_thread(SequenceToken::Create());
+  TaskToken valid = TaskToken::GetForCurrentThread();
+  TaskToken invalid;
+  EXPECT_NE(valid, invalid);
+}
+
+// Expect TaskTokens returned by TaskToken::GetForCurrentThread() in the scope
+// of the same ScopedSetSequenceTokenForCurrentThread instance to be
+// valid and equal with each other.
+TEST(TaskTokenTest, EqualInSameScope) {
+  ScopedSetSequenceTokenForCurrentThread
+      scoped_set_sequence_token_for_current_thread(SequenceToken::Create());
+
+  const TaskToken token_a = TaskToken::GetForCurrentThread();
+  const TaskToken token_b = TaskToken::GetForCurrentThread();
+
+  EXPECT_TRUE(token_a.IsValid());
+  EXPECT_TRUE(token_b.IsValid());
+  EXPECT_EQ(token_a, token_b);
+}
+
+// Expect TaskTokens returned by TaskToken::GetForCurrentThread() in the scope
+// of different ScopedSetSequenceTokenForCurrentThread instances to be
+// valid but not equal to each other.
+TEST(TaskTokenTest, NotEqualInDifferentScopes) {
+  TaskToken token_a;
+  TaskToken token_b;
+
+  {
+    ScopedSetSequenceTokenForCurrentThread
+        scoped_set_sequence_token_for_current_thread(SequenceToken::Create());
+    token_a = TaskToken::GetForCurrentThread();
+  }
+  {
+    ScopedSetSequenceTokenForCurrentThread
+        scoped_set_sequence_token_for_current_thread(SequenceToken::Create());
+    token_b = TaskToken::GetForCurrentThread();
+  }
+
+  EXPECT_TRUE(token_a.IsValid());
+  EXPECT_TRUE(token_b.IsValid());
+  EXPECT_NE(token_a, token_b);
+}
+
+}  // namespace base
diff --git a/base/sequenced_task_runner.cc b/base/sequenced_task_runner.cc
new file mode 100644
index 0000000..86771c6
--- /dev/null
+++ b/base/sequenced_task_runner.cc
@@ -0,0 +1,38 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/sequenced_task_runner.h"
+
+#include <utility>
+
+#include "base/bind.h"
+
+namespace base {
+
+bool SequencedTaskRunner::PostNonNestableTask(const Location& from_here,
+                                              OnceClosure task) {
+  return PostNonNestableDelayedTask(from_here, std::move(task),
+                                    base::TimeDelta());
+}
+
+bool SequencedTaskRunner::DeleteOrReleaseSoonInternal(
+    const Location& from_here,
+    void (*deleter)(const void*),
+    const void* object) {
+  return PostNonNestableTask(from_here, BindOnce(deleter, object));
+}
+
+OnTaskRunnerDeleter::OnTaskRunnerDeleter(
+    scoped_refptr<SequencedTaskRunner> task_runner)
+    : task_runner_(std::move(task_runner)) {
+}
+
+OnTaskRunnerDeleter::~OnTaskRunnerDeleter() = default;
+
+OnTaskRunnerDeleter::OnTaskRunnerDeleter(OnTaskRunnerDeleter&&) = default;
+
+OnTaskRunnerDeleter& OnTaskRunnerDeleter::operator=(
+    OnTaskRunnerDeleter&&) = default;
+
+}  // namespace base
diff --git a/base/sequenced_task_runner.h b/base/sequenced_task_runner.h
new file mode 100644
index 0000000..53d21ad
--- /dev/null
+++ b/base/sequenced_task_runner.h
@@ -0,0 +1,176 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_SEQUENCED_TASK_RUNNER_H_
+#define BASE_SEQUENCED_TASK_RUNNER_H_
+
+#include <memory>
+
+#include "base/base_export.h"
+#include "base/callback.h"
+#include "base/sequenced_task_runner_helpers.h"
+#include "base/task_runner.h"
+
+namespace base {
+
+// A SequencedTaskRunner is a subclass of TaskRunner that provides
+// additional guarantees on the order that tasks are started, as well
+// as guarantees on when tasks are in sequence, i.e. one task finishes
+// before the other one starts.
+//
+// Summary
+// -------
+// Non-nested tasks with the same delay will run one by one in FIFO
+// order.
+//
+// Detailed guarantees
+// -------------------
+//
+// SequencedTaskRunner also adds additional methods for posting
+// non-nestable tasks.  In general, an implementation of TaskRunner
+// may expose task-running methods which are themselves callable from
+// within tasks.  A non-nestable task is one that is guaranteed to not
+// be run from within an already-running task.  Conversely, a nestable
+// task (the default) is a task that can be run from within an
+// already-running task.
+//
+// The guarantees of SequencedTaskRunner are as follows:
+//
+//   - Given two tasks T2 and T1, T2 will start after T1 starts if:
+//
+//       * T2 is posted after T1; and
+//       * T2 has equal or higher delay than T1; and
+//       * T2 is non-nestable or T1 is nestable.
+//
+//   - If T2 will start after T1 starts by the above guarantee, then
+//     T2 will start after T1 finishes and is destroyed if:
+//
+//       * T2 is non-nestable, or
+//       * T1 doesn't call any task-running methods.
+//
+//   - If T2 will start after T1 finishes by the above guarantee, then
+//     all memory changes in T1 and T1's destruction will be visible
+//     to T2.
+//
+//   - If T2 runs nested within T1 via a call to the task-running
+//     method M, then all memory changes in T1 up to the call to M
+//     will be visible to T2, and all memory changes in T2 will be
+//     visible to T1 from the return from M.
+//
+// Note that SequencedTaskRunner does not guarantee that tasks are run
+// on a single dedicated thread, although the above guarantees provide
+// most (but not all) of the same guarantees.  If you do need to
+// guarantee that tasks are run on a single dedicated thread, see
+// SingleThreadTaskRunner (in single_thread_task_runner.h).
+//
+// Some corollaries to the above guarantees, assuming the tasks in
+// question don't call any task-running methods:
+//
+//   - Tasks posted via PostTask are run in FIFO order.
+//
+//   - Tasks posted via PostNonNestableTask are run in FIFO order.
+//
+//   - Tasks posted with the same delay and the same nestable state
+//     are run in FIFO order.
+//
+//   - A list of tasks with the same nestable state posted in order of
+//     non-decreasing delay is run in FIFO order.
+//
+//   - A list of tasks posted in order of non-decreasing delay with at
+//     most a single change in nestable state from nestable to
+//     non-nestable is run in FIFO order. (This is equivalent to the
+//     statement of the first guarantee above.)
+//
+// Some theoretical implementations of SequencedTaskRunner:
+//
+//   - A SequencedTaskRunner that wraps a regular TaskRunner but makes
+//     sure that only one task at a time is posted to the TaskRunner,
+//     with appropriate memory barriers in between tasks.
+//
+//   - A SequencedTaskRunner that, for each task, spawns a joinable
+//     thread to run that task and immediately quit, and then
+//     immediately joins that thread.
+//
+//   - A SequencedTaskRunner that stores the list of posted tasks and
+//     has a method Run() that runs each runnable task in FIFO order
+//     that can be called from any thread, but only if another
+//     (non-nested) Run() call isn't already happening.
+class BASE_EXPORT SequencedTaskRunner : public TaskRunner {
+ public:
+  // The two PostNonNestable*Task methods below are like their
+  // nestable equivalents in TaskRunner, but they guarantee that the
+  // posted task will not run nested within an already-running task.
+  //
+  // A simple corollary is that posting a task as non-nestable can
+  // only delay when the task gets run.  That is, posting a task as
+  // non-nestable may not affect when the task gets run, or it could
+  // make it run later than it normally would, but it won't make it
+  // run earlier than it normally would.
+
+  // TODO(akalin): Get rid of the boolean return value for the methods
+  // below.
+
+  bool PostNonNestableTask(const Location& from_here, OnceClosure task);
+
+  virtual bool PostNonNestableDelayedTask(const Location& from_here,
+                                          OnceClosure task,
+                                          base::TimeDelta delay) = 0;
+
+  // Submits a non-nestable task to delete the given object.  Returns
+  // true if the object may be deleted at some point in the future,
+  // and false if the object definitely will not be deleted.
+  template <class T>
+  bool DeleteSoon(const Location& from_here, const T* object) {
+    return DeleteOrReleaseSoonInternal(from_here, &DeleteHelper<T>::DoDelete,
+                                       object);
+  }
+
+  template <class T>
+  bool DeleteSoon(const Location& from_here, std::unique_ptr<T> object) {
+    return DeleteSoon(from_here, object.release());
+  }
+
+  // Submits a non-nestable task to release the given object.  Returns
+  // true if the object may be released at some point in the future,
+  // and false if the object definitely will not be released.
+  template <class T>
+  bool ReleaseSoon(const Location& from_here, const T* object) {
+    return DeleteOrReleaseSoonInternal(from_here, &ReleaseHelper<T>::DoRelease,
+                                       object);
+  }
+
+ protected:
+  ~SequencedTaskRunner() override = default;
+
+ private:
+  bool DeleteOrReleaseSoonInternal(const Location& from_here,
+                                   void (*deleter)(const void*),
+                                   const void* object);
+};
+
+// Sample usage with std::unique_ptr :
+// std::unique_ptr<Foo, base::OnTaskRunnerDeleter> ptr(
+//     new Foo, base::OnTaskRunnerDeleter(my_task_runner));
+//
+// For RefCounted see base::RefCountedDeleteOnSequence.
+struct BASE_EXPORT OnTaskRunnerDeleter {
+  explicit OnTaskRunnerDeleter(scoped_refptr<SequencedTaskRunner> task_runner);
+  ~OnTaskRunnerDeleter();
+
+  OnTaskRunnerDeleter(OnTaskRunnerDeleter&&);
+  OnTaskRunnerDeleter& operator=(OnTaskRunnerDeleter&&);
+
+  // For compatibility with std:: deleters.
+  template <typename T>
+  void operator()(const T* ptr) {
+    if (ptr)
+      task_runner_->DeleteSoon(FROM_HERE, ptr);
+  }
+
+  scoped_refptr<SequencedTaskRunner> task_runner_;
+};
+
+}  // namespace base
+
+#endif  // BASE_SEQUENCED_TASK_RUNNER_H_
diff --git a/base/sequenced_task_runner_helpers.h b/base/sequenced_task_runner_helpers.h
new file mode 100644
index 0000000..18ec0e2
--- /dev/null
+++ b/base/sequenced_task_runner_helpers.h
@@ -0,0 +1,42 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_SEQUENCED_TASK_RUNNER_HELPERS_H_
+#define BASE_SEQUENCED_TASK_RUNNER_HELPERS_H_
+
+namespace base {
+
+class SequencedTaskRunner;
+
+// Template helpers which use function indirection to erase T from the
+// function signature while still remembering it so we can call the
+// correct destructor/release function.
+//
+// We use this trick so we don't need to include bind.h in a header
+// file like sequenced_task_runner.h. We also wrap the helpers in a
+// templated class to make it easier for users of DeleteSoon to
+// declare the helper as a friend.
+template <class T>
+class DeleteHelper {
+ private:
+  static void DoDelete(const void* object) {
+    delete static_cast<const T*>(object);
+  }
+
+  friend class SequencedTaskRunner;
+};
+
+template <class T>
+class ReleaseHelper {
+ private:
+  static void DoRelease(const void* object) {
+    static_cast<const T*>(object)->Release();
+  }
+
+  friend class SequencedTaskRunner;
+};
+
+}  // namespace base
+
+#endif  // BASE_SEQUENCED_TASK_RUNNER_HELPERS_H_
diff --git a/base/sequenced_task_runner_unittest.cc b/base/sequenced_task_runner_unittest.cc
new file mode 100644
index 0000000..4dcc7e5
--- /dev/null
+++ b/base/sequenced_task_runner_unittest.cc
@@ -0,0 +1,104 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/sequenced_task_runner.h"
+
+#include <utility>
+
+#include "base/bind.h"
+#include "base/gtest_prod_util.h"
+#include "base/message_loop/message_loop.h"
+#include "base/run_loop.h"
+#include "base/threading/thread.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+namespace {
+
+class FlagOnDelete {
+ public:
+  FlagOnDelete(bool* deleted,
+               scoped_refptr<SequencedTaskRunner> expected_deletion_sequence)
+      : deleted_(deleted),
+        expected_deletion_sequence_(std::move(expected_deletion_sequence)) {}
+
+ private:
+  friend class DeleteHelper<FlagOnDelete>;
+  FRIEND_TEST_ALL_PREFIXES(SequencedTaskRunnerTest,
+                           OnTaskRunnerDeleterTargetStoppedEarly);
+
+  ~FlagOnDelete() {
+    EXPECT_FALSE(*deleted_);
+    *deleted_ = true;
+    if (expected_deletion_sequence_)
+      EXPECT_TRUE(expected_deletion_sequence_->RunsTasksInCurrentSequence());
+  }
+
+  bool* deleted_;
+  const scoped_refptr<SequencedTaskRunner> expected_deletion_sequence_;
+
+  DISALLOW_COPY_AND_ASSIGN(FlagOnDelete);
+};
+
+class SequencedTaskRunnerTest : public testing::Test {
+ protected:
+  SequencedTaskRunnerTest() : foreign_thread_("foreign") {}
+
+  void SetUp() override {
+    main_runner_ = message_loop_.task_runner();
+
+    foreign_thread_.Start();
+    foreign_runner_ = foreign_thread_.task_runner();
+  }
+
+  scoped_refptr<SequencedTaskRunner> main_runner_;
+  scoped_refptr<SequencedTaskRunner> foreign_runner_;
+
+  Thread foreign_thread_;
+
+ private:
+  MessageLoop message_loop_;
+
+  DISALLOW_COPY_AND_ASSIGN(SequencedTaskRunnerTest);
+};
+
+using SequenceBoundUniquePtr =
+    std::unique_ptr<FlagOnDelete, OnTaskRunnerDeleter>;
+
+TEST_F(SequencedTaskRunnerTest, OnTaskRunnerDeleterOnMainThread) {
+  bool deleted_on_main_thread = false;
+  SequenceBoundUniquePtr ptr(
+      new FlagOnDelete(&deleted_on_main_thread, main_runner_),
+      OnTaskRunnerDeleter(main_runner_));
+  EXPECT_FALSE(deleted_on_main_thread);
+  foreign_runner_->PostTask(
+      FROM_HERE, BindOnce([](SequenceBoundUniquePtr) {}, std::move(ptr)));
+
+  {
+    RunLoop run_loop;
+    foreign_runner_->PostTaskAndReply(FROM_HERE, BindOnce([] {}),
+                                      run_loop.QuitClosure());
+    run_loop.Run();
+  }
+  EXPECT_TRUE(deleted_on_main_thread);
+}
+
+TEST_F(SequencedTaskRunnerTest, OnTaskRunnerDeleterTargetStoppedEarly) {
+  bool deleted_on_main_thread = false;
+  FlagOnDelete* raw = new FlagOnDelete(&deleted_on_main_thread, main_runner_);
+  SequenceBoundUniquePtr ptr(raw, OnTaskRunnerDeleter(foreign_runner_));
+  EXPECT_FALSE(deleted_on_main_thread);
+
+  // Stopping the target ahead of deleting |ptr| should make its
+  // OnTaskRunnerDeleter no-op.
+  foreign_thread_.Stop();
+  ptr = nullptr;
+  EXPECT_FALSE(deleted_on_main_thread);
+
+  delete raw;
+  EXPECT_TRUE(deleted_on_main_thread);
+}
+
+}  // namespace
+}  // namespace base
diff --git a/base/sha1.cc b/base/sha1.cc
new file mode 100644
index 0000000..a710001
--- /dev/null
+++ b/base/sha1.cc
@@ -0,0 +1,214 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/sha1.h"
+
+#include <stddef.h>
+#include <stdint.h>
+#include <string.h>
+
+#include "base/sys_byteorder.h"
+
+namespace base {
+
+// Implementation of SHA-1. Only handles data in byte-sized blocks,
+// which simplifies the code a fair bit.
+
+// Identifier names follow notation in FIPS PUB 180-3, where you'll
+// also find a description of the algorithm:
+// http://csrc.nist.gov/publications/fips/fips180-3/fips180-3_final.pdf
+
+// Usage example:
+//
+// SecureHashAlgorithm sha;
+// while(there is data to hash)
+//   sha.Update(moredata, size of data);
+// sha.Final();
+// memcpy(somewhere, sha.Digest(), 20);
+//
+// to reuse the instance of sha, call sha.Init();
+
+// TODO(jhawkins): Replace this implementation with a per-platform
+// implementation using each platform's crypto library.  See
+// http://crbug.com/47218
+
+class SecureHashAlgorithm {
+ public:
+  SecureHashAlgorithm() { Init(); }
+
+  static const int kDigestSizeBytes;
+
+  void Init();
+  void Update(const void* data, size_t nbytes);
+  void Final();
+
+  // 20 bytes of message digest.
+  const unsigned char* Digest() const {
+    return reinterpret_cast<const unsigned char*>(H);
+  }
+
+ private:
+  void Pad();
+  void Process();
+
+  uint32_t A, B, C, D, E;
+
+  uint32_t H[5];
+
+  union {
+    uint32_t W[80];
+    uint8_t M[64];
+  };
+
+  uint32_t cursor;
+  uint64_t l;
+};
+
+static inline uint32_t f(uint32_t t, uint32_t B, uint32_t C, uint32_t D) {
+  if (t < 20) {
+    return (B & C) | ((~B) & D);
+  } else if (t < 40) {
+    return B ^ C ^ D;
+  } else if (t < 60) {
+    return (B & C) | (B & D) | (C & D);
+  } else {
+    return B ^ C ^ D;
+  }
+}
+
+static inline uint32_t S(uint32_t n, uint32_t X) {
+  return (X << n) | (X >> (32-n));
+}
+
+static inline uint32_t K(uint32_t t) {
+  if (t < 20) {
+    return 0x5a827999;
+  } else if (t < 40) {
+    return 0x6ed9eba1;
+  } else if (t < 60) {
+    return 0x8f1bbcdc;
+  } else {
+    return 0xca62c1d6;
+  }
+}
+
+const int SecureHashAlgorithm::kDigestSizeBytes = 20;
+
+void SecureHashAlgorithm::Init() {
+  A = 0;
+  B = 0;
+  C = 0;
+  D = 0;
+  E = 0;
+  cursor = 0;
+  l = 0;
+  H[0] = 0x67452301;
+  H[1] = 0xefcdab89;
+  H[2] = 0x98badcfe;
+  H[3] = 0x10325476;
+  H[4] = 0xc3d2e1f0;
+}
+
+void SecureHashAlgorithm::Final() {
+  Pad();
+  Process();
+
+  for (int t = 0; t < 5; ++t)
+    H[t] = ByteSwap(H[t]);
+}
+
+void SecureHashAlgorithm::Update(const void* data, size_t nbytes) {
+  const uint8_t* d = reinterpret_cast<const uint8_t*>(data);
+  while (nbytes--) {
+    M[cursor++] = *d++;
+    if (cursor >= 64)
+      Process();
+    l += 8;
+  }
+}
+
+void SecureHashAlgorithm::Pad() {
+  M[cursor++] = 0x80;
+
+  if (cursor > 64-8) {
+    // pad out to next block
+    while (cursor < 64)
+      M[cursor++] = 0;
+
+    Process();
+  }
+
+  while (cursor < 64-8)
+    M[cursor++] = 0;
+
+  M[cursor++] = (l >> 56) & 0xff;
+  M[cursor++] = (l >> 48) & 0xff;
+  M[cursor++] = (l >> 40) & 0xff;
+  M[cursor++] = (l >> 32) & 0xff;
+  M[cursor++] = (l >> 24) & 0xff;
+  M[cursor++] = (l >> 16) & 0xff;
+  M[cursor++] = (l >> 8) & 0xff;
+  M[cursor++] = l & 0xff;
+}
+
+void SecureHashAlgorithm::Process() {
+  uint32_t t;
+
+  // Each a...e corresponds to a section in the FIPS 180-3 algorithm.
+
+  // a.
+  //
+  // W and M are in a union, so no need to memcpy.
+  // memcpy(W, M, sizeof(M));
+  for (t = 0; t < 16; ++t)
+    W[t] = ByteSwap(W[t]);
+
+  // b.
+  for (t = 16; t < 80; ++t)
+    W[t] = S(1, W[t - 3] ^ W[t - 8] ^ W[t - 14] ^ W[t - 16]);
+
+  // c.
+  A = H[0];
+  B = H[1];
+  C = H[2];
+  D = H[3];
+  E = H[4];
+
+  // d.
+  for (t = 0; t < 80; ++t) {
+    uint32_t TEMP = S(5, A) + f(t, B, C, D) + E + W[t] + K(t);
+    E = D;
+    D = C;
+    C = S(30, B);
+    B = A;
+    A = TEMP;
+  }
+
+  // e.
+  H[0] += A;
+  H[1] += B;
+  H[2] += C;
+  H[3] += D;
+  H[4] += E;
+
+  cursor = 0;
+}
+
+std::string SHA1HashString(const std::string& str) {
+  char hash[SecureHashAlgorithm::kDigestSizeBytes];
+  SHA1HashBytes(reinterpret_cast<const unsigned char*>(str.c_str()),
+                str.length(), reinterpret_cast<unsigned char*>(hash));
+  return std::string(hash, SecureHashAlgorithm::kDigestSizeBytes);
+}
+
+void SHA1HashBytes(const unsigned char* data, size_t len,
+                   unsigned char* hash) {
+  SecureHashAlgorithm sha;
+  sha.Update(data, len);
+  sha.Final();
+
+  memcpy(hash, sha.Digest(), SecureHashAlgorithm::kDigestSizeBytes);
+}
+
+}  // namespace base
diff --git a/base/sha1.h b/base/sha1.h
new file mode 100644
index 0000000..902e301
--- /dev/null
+++ b/base/sha1.h
@@ -0,0 +1,31 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_SHA1_H_
+#define BASE_SHA1_H_
+
+#include <stddef.h>
+
+#include <string>
+
+#include "base/base_export.h"
+
+namespace base {
+
+// These functions perform SHA-1 operations.
+
+static const size_t kSHA1Length = 20;  // Length in bytes of a SHA-1 hash.
+
+// Computes the SHA-1 hash of the input string |str| and returns the full
+// hash.
+BASE_EXPORT std::string SHA1HashString(const std::string& str);
+
+// Computes the SHA-1 hash of the |len| bytes in |data| and puts the hash
+// in |hash|. |hash| must be kSHA1Length bytes long.
+BASE_EXPORT void SHA1HashBytes(const unsigned char* data, size_t len,
+                               unsigned char* hash);
+
+}  // namespace base
+
+#endif  // BASE_SHA1_H_
diff --git a/base/sha1_unittest.cc b/base/sha1_unittest.cc
new file mode 100644
index 0000000..ea9cf63
--- /dev/null
+++ b/base/sha1_unittest.cc
@@ -0,0 +1,109 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/sha1.h"
+
+#include <stddef.h>
+
+#include <string>
+
+#include "testing/gtest/include/gtest/gtest.h"
+
+TEST(SHA1Test, Test1) {
+  // Example A.1 from FIPS 180-2: one-block message.
+  std::string input = "abc";
+
+  int expected[] = { 0xa9, 0x99, 0x3e, 0x36,
+                     0x47, 0x06, 0x81, 0x6a,
+                     0xba, 0x3e, 0x25, 0x71,
+                     0x78, 0x50, 0xc2, 0x6c,
+                     0x9c, 0xd0, 0xd8, 0x9d };
+
+  std::string output = base::SHA1HashString(input);
+  for (size_t i = 0; i < base::kSHA1Length; i++)
+    EXPECT_EQ(expected[i], output[i] & 0xFF);
+}
+
+TEST(SHA1Test, Test2) {
+  // Example A.2 from FIPS 180-2: multi-block message.
+  std::string input =
+      "abcdbcdecdefdefgefghfghighijhijkijkljklmklmnlmnomnopnopq";
+
+  int expected[] = { 0x84, 0x98, 0x3e, 0x44,
+                     0x1c, 0x3b, 0xd2, 0x6e,
+                     0xba, 0xae, 0x4a, 0xa1,
+                     0xf9, 0x51, 0x29, 0xe5,
+                     0xe5, 0x46, 0x70, 0xf1 };
+
+  std::string output = base::SHA1HashString(input);
+  for (size_t i = 0; i < base::kSHA1Length; i++)
+    EXPECT_EQ(expected[i], output[i] & 0xFF);
+}
+
+TEST(SHA1Test, Test3) {
+  // Example A.3 from FIPS 180-2: long message.
+  std::string input(1000000, 'a');
+
+  int expected[] = { 0x34, 0xaa, 0x97, 0x3c,
+                     0xd4, 0xc4, 0xda, 0xa4,
+                     0xf6, 0x1e, 0xeb, 0x2b,
+                     0xdb, 0xad, 0x27, 0x31,
+                     0x65, 0x34, 0x01, 0x6f };
+
+  std::string output = base::SHA1HashString(input);
+  for (size_t i = 0; i < base::kSHA1Length; i++)
+    EXPECT_EQ(expected[i], output[i] & 0xFF);
+}
+
+TEST(SHA1Test, Test1Bytes) {
+  // Example A.1 from FIPS 180-2: one-block message.
+  std::string input = "abc";
+  unsigned char output[base::kSHA1Length];
+
+  unsigned char expected[] = { 0xa9, 0x99, 0x3e, 0x36,
+                               0x47, 0x06, 0x81, 0x6a,
+                               0xba, 0x3e, 0x25, 0x71,
+                               0x78, 0x50, 0xc2, 0x6c,
+                               0x9c, 0xd0, 0xd8, 0x9d };
+
+  base::SHA1HashBytes(reinterpret_cast<const unsigned char*>(input.c_str()),
+                      input.length(), output);
+  for (size_t i = 0; i < base::kSHA1Length; i++)
+    EXPECT_EQ(expected[i], output[i]);
+}
+
+TEST(SHA1Test, Test2Bytes) {
+  // Example A.2 from FIPS 180-2: multi-block message.
+  std::string input =
+      "abcdbcdecdefdefgefghfghighijhijkijkljklmklmnlmnomnopnopq";
+  unsigned char output[base::kSHA1Length];
+
+  unsigned char expected[] = { 0x84, 0x98, 0x3e, 0x44,
+                               0x1c, 0x3b, 0xd2, 0x6e,
+                               0xba, 0xae, 0x4a, 0xa1,
+                               0xf9, 0x51, 0x29, 0xe5,
+                               0xe5, 0x46, 0x70, 0xf1 };
+
+  base::SHA1HashBytes(reinterpret_cast<const unsigned char*>(input.c_str()),
+                      input.length(), output);
+  for (size_t i = 0; i < base::kSHA1Length; i++)
+    EXPECT_EQ(expected[i], output[i]);
+}
+
+TEST(SHA1Test, Test3Bytes) {
+  // Example A.3 from FIPS 180-2: long message.
+  std::string input(1000000, 'a');
+  unsigned char output[base::kSHA1Length];
+
+  unsigned char expected[] = { 0x34, 0xaa, 0x97, 0x3c,
+                               0xd4, 0xc4, 0xda, 0xa4,
+                               0xf6, 0x1e, 0xeb, 0x2b,
+                               0xdb, 0xad, 0x27, 0x31,
+                               0x65, 0x34, 0x01, 0x6f };
+
+  base::SHA1HashBytes(reinterpret_cast<const unsigned char*>(input.c_str()),
+                      input.length(), output);
+  for (size_t i = 0; i < base::kSHA1Length; i++)
+    EXPECT_EQ(expected[i], output[i]);
+}
diff --git a/base/single_thread_task_runner.h b/base/single_thread_task_runner.h
new file mode 100644
index 0000000..4d6938e
--- /dev/null
+++ b/base/single_thread_task_runner.h
@@ -0,0 +1,36 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_SINGLE_THREAD_TASK_RUNNER_H_
+#define BASE_SINGLE_THREAD_TASK_RUNNER_H_
+
+#include "base/base_export.h"
+#include "base/sequenced_task_runner.h"
+
+namespace base {
+
+// A SingleThreadTaskRunner is a SequencedTaskRunner with one more
+// guarantee; namely, that all tasks are run on a single dedicated
+// thread.  Most use cases require only a SequencedTaskRunner, unless
+// there is a specific need to run tasks on only a single thread.
+//
+// SingleThreadTaskRunner implementations might:
+//   - Post tasks to an existing thread's MessageLoop (see
+//     MessageLoop::task_runner()).
+//   - Create their own worker thread and MessageLoop to post tasks to.
+//   - Add tasks to a FIFO and signal to a non-MessageLoop thread for them to
+//     be processed. This allows TaskRunner-oriented code run on threads
+//     running other kinds of message loop, e.g. Jingle threads.
+class BASE_EXPORT SingleThreadTaskRunner : public SequencedTaskRunner {
+ public:
+  // A more explicit alias to RunsTasksInCurrentSequence().
+  bool BelongsToCurrentThread() const { return RunsTasksInCurrentSequence(); }
+
+ protected:
+  ~SingleThreadTaskRunner() override = default;
+};
+
+}  // namespace base
+
+#endif  // BASE_SINGLE_THREAD_TASK_RUNNER_H_
diff --git a/base/stl_util.h b/base/stl_util.h
new file mode 100644
index 0000000..6d521cc
--- /dev/null
+++ b/base/stl_util.h
@@ -0,0 +1,410 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Derived from google3/util/gtl/stl_util.h
+
+#ifndef BASE_STL_UTIL_H_
+#define BASE_STL_UTIL_H_
+
+#include <algorithm>
+#include <deque>
+#include <forward_list>
+#include <functional>
+#include <initializer_list>
+#include <iterator>
+#include <list>
+#include <map>
+#include <set>
+#include <string>
+#include <unordered_map>
+#include <unordered_set>
+#include <vector>
+
+#include "base/logging.h"
+#include "base/optional.h"
+
+namespace base {
+
+namespace internal {
+
+// Calls erase on iterators of matching elements.
+template <typename Container, typename Predicate>
+void IterateAndEraseIf(Container& container, Predicate pred) {
+  for (auto it = container.begin(); it != container.end();) {
+    if (pred(*it))
+      it = container.erase(it);
+    else
+      ++it;
+  }
+}
+
+}  // namespace internal
+
+// C++14 implementation of C++17's std::size():
+// http://en.cppreference.com/w/cpp/iterator/size
+template <typename Container>
+constexpr auto size(const Container& c) -> decltype(c.size()) {
+  return c.size();
+}
+
+template <typename T, size_t N>
+constexpr size_t size(const T (&array)[N]) noexcept {
+  return N;
+}
+
+// C++14 implementation of C++17's std::empty():
+// http://en.cppreference.com/w/cpp/iterator/empty
+template <typename Container>
+constexpr auto empty(const Container& c) -> decltype(c.empty()) {
+  return c.empty();
+}
+
+template <typename T, size_t N>
+constexpr bool empty(const T (&array)[N]) noexcept {
+  return false;
+}
+
+template <typename T>
+constexpr bool empty(std::initializer_list<T> il) noexcept {
+  return il.size() == 0;
+}
+
+// C++14 implementation of C++17's std::data():
+// http://en.cppreference.com/w/cpp/iterator/data
+template <typename Container>
+constexpr auto data(Container& c) -> decltype(c.data()) {
+  return c.data();
+}
+
+// std::basic_string::data() had no mutable overload prior to C++17 [1].
+// Hence this overload is provided.
+// Note: str[0] is safe even for empty strings, as they are guaranteed to be
+// null-terminated [2].
+//
+// [1] http://en.cppreference.com/w/cpp/string/basic_string/data
+// [2] http://en.cppreference.com/w/cpp/string/basic_string/operator_at
+template <typename CharT, typename Traits, typename Allocator>
+CharT* data(std::basic_string<CharT, Traits, Allocator>& str) {
+  return std::addressof(str[0]);
+}
+
+template <typename Container>
+constexpr auto data(const Container& c) -> decltype(c.data()) {
+  return c.data();
+}
+
+template <typename T, size_t N>
+constexpr T* data(T (&array)[N]) noexcept {
+  return array;
+}
+
+template <typename T>
+constexpr const T* data(std::initializer_list<T> il) noexcept {
+  return il.begin();
+}
+
+// Returns a const reference to the underlying container of a container adapter.
+// Works for std::priority_queue, std::queue, and std::stack.
+template <class A>
+const typename A::container_type& GetUnderlyingContainer(const A& adapter) {
+  struct ExposedAdapter : A {
+    using A::c;
+  };
+  return adapter.*&ExposedAdapter::c;
+}
+
+// Clears internal memory of an STL object.
+// STL clear()/reserve(0) does not always free internal memory allocated
+// This function uses swap/destructor to ensure the internal memory is freed.
+template<class T>
+void STLClearObject(T* obj) {
+  T tmp;
+  tmp.swap(*obj);
+  // Sometimes "T tmp" allocates objects with memory (arena implementation?).
+  // Hence using additional reserve(0) even if it doesn't always work.
+  obj->reserve(0);
+}
+
+// Counts the number of instances of val in a container.
+template <typename Container, typename T>
+typename std::iterator_traits<
+    typename Container::const_iterator>::difference_type
+STLCount(const Container& container, const T& val) {
+  return std::count(container.begin(), container.end(), val);
+}
+
+// Test to see if a set or map contains a particular key.
+// Returns true if the key is in the collection.
+template <typename Collection, typename Key>
+bool ContainsKey(const Collection& collection, const Key& key) {
+  return collection.find(key) != collection.end();
+}
+
+namespace internal {
+
+template <typename Collection>
+class HasKeyType {
+  template <typename C>
+  static std::true_type test(typename C::key_type*);
+  template <typename C>
+  static std::false_type test(...);
+
+ public:
+  static constexpr bool value = decltype(test<Collection>(nullptr))::value;
+};
+
+}  // namespace internal
+
+// Test to see if a collection like a vector contains a particular value.
+// Returns true if the value is in the collection.
+// Don't use this on collections such as sets or maps. This is enforced by
+// disabling this method if the collection defines a key_type.
+template <typename Collection,
+          typename Value,
+          typename std::enable_if<!internal::HasKeyType<Collection>::value,
+                                  int>::type = 0>
+bool ContainsValue(const Collection& collection, const Value& value) {
+  return std::find(std::begin(collection), std::end(collection), value) !=
+         std::end(collection);
+}
+
+// Returns true if the container is sorted.
+template <typename Container>
+bool STLIsSorted(const Container& cont) {
+  // Note: Use reverse iterator on container to ensure we only require
+  // value_type to implement operator<.
+  return std::adjacent_find(cont.rbegin(), cont.rend(),
+                            std::less<typename Container::value_type>())
+      == cont.rend();
+}
+
+// Returns a new ResultType containing the difference of two sorted containers.
+template <typename ResultType, typename Arg1, typename Arg2>
+ResultType STLSetDifference(const Arg1& a1, const Arg2& a2) {
+  DCHECK(STLIsSorted(a1));
+  DCHECK(STLIsSorted(a2));
+  ResultType difference;
+  std::set_difference(a1.begin(), a1.end(),
+                      a2.begin(), a2.end(),
+                      std::inserter(difference, difference.end()));
+  return difference;
+}
+
+// Returns a new ResultType containing the union of two sorted containers.
+template <typename ResultType, typename Arg1, typename Arg2>
+ResultType STLSetUnion(const Arg1& a1, const Arg2& a2) {
+  DCHECK(STLIsSorted(a1));
+  DCHECK(STLIsSorted(a2));
+  ResultType result;
+  std::set_union(a1.begin(), a1.end(),
+                 a2.begin(), a2.end(),
+                 std::inserter(result, result.end()));
+  return result;
+}
+
+// Returns a new ResultType containing the intersection of two sorted
+// containers.
+template <typename ResultType, typename Arg1, typename Arg2>
+ResultType STLSetIntersection(const Arg1& a1, const Arg2& a2) {
+  DCHECK(STLIsSorted(a1));
+  DCHECK(STLIsSorted(a2));
+  ResultType result;
+  std::set_intersection(a1.begin(), a1.end(),
+                        a2.begin(), a2.end(),
+                        std::inserter(result, result.end()));
+  return result;
+}
+
+// Returns true if the sorted container |a1| contains all elements of the sorted
+// container |a2|.
+template <typename Arg1, typename Arg2>
+bool STLIncludes(const Arg1& a1, const Arg2& a2) {
+  DCHECK(STLIsSorted(a1));
+  DCHECK(STLIsSorted(a2));
+  return std::includes(a1.begin(), a1.end(),
+                       a2.begin(), a2.end());
+}
+
+// Erase/EraseIf are based on library fundamentals ts v2 erase/erase_if
+// http://en.cppreference.com/w/cpp/experimental/lib_extensions_2
+// They provide a generic way to erase elements from a container.
+// The functions here implement these for the standard containers until those
+// functions are available in the C++ standard.
+// For Chromium containers overloads should be defined in their own headers
+// (like standard containers).
+// Note: there is no std::erase for standard associative containers so we don't
+// have it either.
+
+template <typename CharT, typename Traits, typename Allocator, typename Value>
+void Erase(std::basic_string<CharT, Traits, Allocator>& container,
+           const Value& value) {
+  container.erase(std::remove(container.begin(), container.end(), value),
+                  container.end());
+}
+
+template <typename CharT, typename Traits, typename Allocator, class Predicate>
+void EraseIf(std::basic_string<CharT, Traits, Allocator>& container,
+             Predicate pred) {
+  container.erase(std::remove_if(container.begin(), container.end(), pred),
+                  container.end());
+}
+
+template <class T, class Allocator, class Value>
+void Erase(std::deque<T, Allocator>& container, const Value& value) {
+  container.erase(std::remove(container.begin(), container.end(), value),
+                  container.end());
+}
+
+template <class T, class Allocator, class Predicate>
+void EraseIf(std::deque<T, Allocator>& container, Predicate pred) {
+  container.erase(std::remove_if(container.begin(), container.end(), pred),
+                  container.end());
+}
+
+template <class T, class Allocator, class Value>
+void Erase(std::vector<T, Allocator>& container, const Value& value) {
+  container.erase(std::remove(container.begin(), container.end(), value),
+                  container.end());
+}
+
+template <class T, class Allocator, class Predicate>
+void EraseIf(std::vector<T, Allocator>& container, Predicate pred) {
+  container.erase(std::remove_if(container.begin(), container.end(), pred),
+                  container.end());
+}
+
+template <class T, class Allocator, class Value>
+void Erase(std::forward_list<T, Allocator>& container, const Value& value) {
+  // Unlike std::forward_list::remove, this function template accepts
+  // heterogeneous types and does not force a conversion to the container's
+  // value type before invoking the == operator.
+  container.remove_if([&](const T& cur) { return cur == value; });
+}
+
+template <class T, class Allocator, class Predicate>
+void EraseIf(std::forward_list<T, Allocator>& container, Predicate pred) {
+  container.remove_if(pred);
+}
+
+template <class T, class Allocator, class Value>
+void Erase(std::list<T, Allocator>& container, const Value& value) {
+  // Unlike std::list::remove, this function template accepts heterogeneous
+  // types and does not force a conversion to the container's value type before
+  // invoking the == operator.
+  container.remove_if([&](const T& cur) { return cur == value; });
+}
+
+template <class T, class Allocator, class Predicate>
+void EraseIf(std::list<T, Allocator>& container, Predicate pred) {
+  container.remove_if(pred);
+}
+
+template <class Key, class T, class Compare, class Allocator, class Predicate>
+void EraseIf(std::map<Key, T, Compare, Allocator>& container, Predicate pred) {
+  internal::IterateAndEraseIf(container, pred);
+}
+
+template <class Key, class T, class Compare, class Allocator, class Predicate>
+void EraseIf(std::multimap<Key, T, Compare, Allocator>& container,
+             Predicate pred) {
+  internal::IterateAndEraseIf(container, pred);
+}
+
+template <class Key, class Compare, class Allocator, class Predicate>
+void EraseIf(std::set<Key, Compare, Allocator>& container, Predicate pred) {
+  internal::IterateAndEraseIf(container, pred);
+}
+
+template <class Key, class Compare, class Allocator, class Predicate>
+void EraseIf(std::multiset<Key, Compare, Allocator>& container,
+             Predicate pred) {
+  internal::IterateAndEraseIf(container, pred);
+}
+
+template <class Key,
+          class T,
+          class Hash,
+          class KeyEqual,
+          class Allocator,
+          class Predicate>
+void EraseIf(std::unordered_map<Key, T, Hash, KeyEqual, Allocator>& container,
+             Predicate pred) {
+  internal::IterateAndEraseIf(container, pred);
+}
+
+template <class Key,
+          class T,
+          class Hash,
+          class KeyEqual,
+          class Allocator,
+          class Predicate>
+void EraseIf(
+    std::unordered_multimap<Key, T, Hash, KeyEqual, Allocator>& container,
+    Predicate pred) {
+  internal::IterateAndEraseIf(container, pred);
+}
+
+template <class Key,
+          class Hash,
+          class KeyEqual,
+          class Allocator,
+          class Predicate>
+void EraseIf(std::unordered_set<Key, Hash, KeyEqual, Allocator>& container,
+             Predicate pred) {
+  internal::IterateAndEraseIf(container, pred);
+}
+
+template <class Key,
+          class Hash,
+          class KeyEqual,
+          class Allocator,
+          class Predicate>
+void EraseIf(std::unordered_multiset<Key, Hash, KeyEqual, Allocator>& container,
+             Predicate pred) {
+  internal::IterateAndEraseIf(container, pred);
+}
+
+// A helper class to be used as the predicate with |EraseIf| to implement
+// in-place set intersection. Helps implement the algorithm of going through
+// each container an element at a time, erasing elements from the first
+// container if they aren't in the second container. Requires each container be
+// sorted. Note that the logic below appears inverted since it is returning
+// whether an element should be erased.
+template <class Collection>
+class IsNotIn {
+ public:
+  explicit IsNotIn(const Collection& collection)
+      : i_(collection.begin()), end_(collection.end()) {}
+
+  bool operator()(const typename Collection::value_type& x) {
+    while (i_ != end_ && *i_ < x)
+      ++i_;
+    if (i_ == end_)
+      return true;
+    if (*i_ == x) {
+      ++i_;
+      return false;
+    }
+    return true;
+  }
+
+ private:
+  typename Collection::const_iterator i_;
+  const typename Collection::const_iterator end_;
+};
+
+// Helper for returning the optional value's address, or nullptr.
+template <class T>
+T* OptionalOrNullptr(base::Optional<T>& optional) {
+  return optional.has_value() ? &optional.value() : nullptr;
+}
+
+template <class T>
+const T* OptionalOrNullptr(const base::Optional<T>& optional) {
+  return optional.has_value() ? &optional.value() : nullptr;
+}
+
+}  // namespace base
+
+#endif  // BASE_STL_UTIL_H_
diff --git a/base/stl_util_unittest.cc b/base/stl_util_unittest.cc
new file mode 100644
index 0000000..f13f881
--- /dev/null
+++ b/base/stl_util_unittest.cc
@@ -0,0 +1,612 @@
+// Copyright 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/stl_util.h"
+
+#include <array>
+#include <deque>
+#include <forward_list>
+#include <functional>
+#include <initializer_list>
+#include <iterator>
+#include <list>
+#include <map>
+#include <queue>
+#include <set>
+#include <stack>
+#include <string>
+#include <type_traits>
+#include <unordered_map>
+#include <unordered_set>
+#include <vector>
+
+#include "base/containers/queue.h"
+#include "base/strings/string16.h"
+#include "base/strings/utf_string_conversions.h"
+#include "testing/gmock/include/gmock/gmock.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace {
+
+// Used as test case to ensure the various base::STLXxx functions don't require
+// more than operators "<" and "==" on values stored in containers.
+class ComparableValue {
+ public:
+  explicit ComparableValue(int value) : value_(value) {}
+
+  bool operator==(const ComparableValue& rhs) const {
+    return value_ == rhs.value_;
+  }
+
+  bool operator<(const ComparableValue& rhs) const {
+    return value_ < rhs.value_;
+  }
+
+ private:
+  int value_;
+};
+
+template <typename Container>
+void RunEraseTest() {
+  const std::pair<Container, Container> test_data[] = {
+      {Container(), Container()}, {{1, 2, 3}, {1, 3}}, {{1, 2, 3, 2}, {1, 3}}};
+
+  for (auto test_case : test_data) {
+    base::Erase(test_case.first, 2);
+    EXPECT_EQ(test_case.second, test_case.first);
+  }
+}
+
+// This test is written for containers of std::pair<int, int> to support maps.
+template <typename Container>
+void RunEraseIfTest() {
+  struct {
+    Container input;
+    Container erase_even;
+    Container erase_odd;
+  } test_data[] = {
+      {Container(), Container(), Container()},
+      {{{1, 1}, {2, 2}, {3, 3}}, {{1, 1}, {3, 3}}, {{2, 2}}},
+      {{{1, 1}, {2, 2}, {3, 3}, {4, 4}}, {{1, 1}, {3, 3}}, {{2, 2}, {4, 4}}},
+  };
+
+  for (auto test_case : test_data) {
+    base::EraseIf(test_case.input, [](const std::pair<int, int>& elem) {
+      return !(elem.first & 1);
+    });
+    EXPECT_EQ(test_case.erase_even, test_case.input);
+  }
+
+  for (auto test_case : test_data) {
+    base::EraseIf(test_case.input, [](const std::pair<int, int>& elem) {
+      return elem.first & 1;
+    });
+    EXPECT_EQ(test_case.erase_odd, test_case.input);
+  }
+}
+
+struct CustomIntHash {
+  size_t operator()(int elem) const { return std::hash<int>()(elem) + 1; }
+};
+
+struct HashByFirst {
+  size_t operator()(const std::pair<int, int>& elem) const {
+    return std::hash<int>()(elem.first);
+  }
+};
+
+}  // namespace
+
+namespace base {
+namespace {
+
+TEST(STLUtilTest, Size) {
+  {
+    std::vector<int> vector = {1, 2, 3, 4, 5};
+    static_assert(
+        std::is_same<decltype(base::size(vector)),
+                     decltype(vector.size())>::value,
+        "base::size(vector) should have the same type as vector.size()");
+    EXPECT_EQ(vector.size(), base::size(vector));
+  }
+
+  {
+    std::string empty_str;
+    static_assert(
+        std::is_same<decltype(base::size(empty_str)),
+                     decltype(empty_str.size())>::value,
+        "base::size(empty_str) should have the same type as empty_str.size()");
+    EXPECT_EQ(0u, base::size(empty_str));
+  }
+
+  {
+    std::array<int, 4> array = {{1, 2, 3, 4}};
+    static_assert(
+        std::is_same<decltype(base::size(array)),
+                     decltype(array.size())>::value,
+        "base::size(array) should have the same type as array.size()");
+    static_assert(base::size(array) == array.size(),
+                  "base::size(array) should be equal to array.size()");
+  }
+
+  {
+    int array[] = {1, 2, 3};
+    static_assert(std::is_same<size_t, decltype(base::size(array))>::value,
+                  "base::size(array) should be of type size_t");
+    static_assert(3u == base::size(array), "base::size(array) should be 3");
+  }
+}
+
+TEST(STLUtilTest, Empty) {
+  {
+    std::vector<int> vector;
+    static_assert(
+        std::is_same<decltype(base::empty(vector)),
+                     decltype(vector.empty())>::value,
+        "base::empty(vector) should have the same type as vector.empty()");
+    EXPECT_EQ(vector.empty(), base::empty(vector));
+  }
+
+  {
+    std::array<int, 4> array = {{1, 2, 3, 4}};
+    static_assert(
+        std::is_same<decltype(base::empty(array)),
+                     decltype(array.empty())>::value,
+        "base::empty(array) should have the same type as array.empty()");
+    static_assert(base::empty(array) == array.empty(),
+                  "base::empty(array) should be equal to array.empty()");
+  }
+
+  {
+    int array[] = {1, 2, 3};
+    static_assert(std::is_same<bool, decltype(base::empty(array))>::value,
+                  "base::empty(array) should be of type bool");
+    static_assert(!base::empty(array), "base::empty(array) should be false");
+  }
+
+  {
+    constexpr std::initializer_list<int> il;
+    static_assert(std::is_same<bool, decltype(base::empty(il))>::value,
+                  "base::empty(il) should be of type bool");
+    static_assert(base::empty(il), "base::empty(il) should be true");
+  }
+}
+
+TEST(STLUtilTest, Data) {
+  {
+    std::vector<int> vector = {1, 2, 3, 4, 5};
+    static_assert(
+        std::is_same<decltype(base::data(vector)),
+                     decltype(vector.data())>::value,
+        "base::data(vector) should have the same type as vector.data()");
+    EXPECT_EQ(vector.data(), base::data(vector));
+  }
+
+  {
+    const std::string cstr = "const string";
+    static_assert(
+        std::is_same<decltype(base::data(cstr)), decltype(cstr.data())>::value,
+        "base::data(cstr) should have the same type as cstr.data()");
+
+    EXPECT_EQ(cstr.data(), base::data(cstr));
+  }
+
+  {
+    std::string str = "mutable string";
+    static_assert(std::is_same<decltype(base::data(str)), char*>::value,
+                  "base::data(str) should be of type char*");
+    EXPECT_EQ(str.data(), base::data(str));
+  }
+
+  {
+    std::string empty_str;
+    static_assert(std::is_same<decltype(base::data(empty_str)), char*>::value,
+                  "base::data(empty_str) should be of type char*");
+    EXPECT_EQ(empty_str.data(), base::data(empty_str));
+  }
+
+  {
+    std::array<int, 4> array = {{1, 2, 3, 4}};
+    static_assert(
+        std::is_same<decltype(base::data(array)),
+                     decltype(array.data())>::value,
+        "base::data(array) should have the same type as array.data()");
+    // std::array::data() is not constexpr prior to C++17, hence the runtime
+    // check.
+    EXPECT_EQ(array.data(), base::data(array));
+  }
+
+  {
+    constexpr int array[] = {1, 2, 3};
+    static_assert(std::is_same<const int*, decltype(base::data(array))>::value,
+                  "base::data(array) should be of type const int*");
+    static_assert(array == base::data(array),
+                  "base::data(array) should be array");
+  }
+
+  {
+    constexpr std::initializer_list<int> il;
+    static_assert(
+        std::is_same<decltype(il.begin()), decltype(base::data(il))>::value,
+        "base::data(il) should have the same type as il.begin()");
+    static_assert(il.begin() == base::data(il),
+                  "base::data(il) should be equal to il.begin()");
+  }
+}
+
+TEST(STLUtilTest, GetUnderlyingContainer) {
+  {
+    std::queue<int> queue({1, 2, 3, 4, 5});
+    static_assert(std::is_same<decltype(GetUnderlyingContainer(queue)),
+                               const std::deque<int>&>::value,
+                  "GetUnderlyingContainer(queue) should be of type deque");
+    EXPECT_THAT(GetUnderlyingContainer(queue),
+                testing::ElementsAre(1, 2, 3, 4, 5));
+  }
+
+  {
+    std::queue<int> queue;
+    EXPECT_THAT(GetUnderlyingContainer(queue), testing::ElementsAre());
+  }
+
+  {
+    base::queue<int> queue({1, 2, 3, 4, 5});
+    static_assert(
+        std::is_same<decltype(GetUnderlyingContainer(queue)),
+                     const base::circular_deque<int>&>::value,
+        "GetUnderlyingContainer(queue) should be of type circular_deque");
+    EXPECT_THAT(GetUnderlyingContainer(queue),
+                testing::ElementsAre(1, 2, 3, 4, 5));
+  }
+
+  {
+    std::vector<int> values = {1, 2, 3, 4, 5};
+    std::priority_queue<int> queue(values.begin(), values.end());
+    static_assert(std::is_same<decltype(GetUnderlyingContainer(queue)),
+                               const std::vector<int>&>::value,
+                  "GetUnderlyingContainer(queue) should be of type vector");
+    EXPECT_THAT(GetUnderlyingContainer(queue),
+                testing::UnorderedElementsAre(1, 2, 3, 4, 5));
+  }
+
+  {
+    std::stack<int> stack({1, 2, 3, 4, 5});
+    static_assert(std::is_same<decltype(GetUnderlyingContainer(stack)),
+                               const std::deque<int>&>::value,
+                  "GetUnderlyingContainer(stack) should be of type deque");
+    EXPECT_THAT(GetUnderlyingContainer(stack),
+                testing::ElementsAre(1, 2, 3, 4, 5));
+  }
+}
+
+TEST(STLUtilTest, STLIsSorted) {
+  {
+    std::set<int> set;
+    set.insert(24);
+    set.insert(1);
+    set.insert(12);
+    EXPECT_TRUE(STLIsSorted(set));
+  }
+
+  {
+    std::set<ComparableValue> set;
+    set.insert(ComparableValue(24));
+    set.insert(ComparableValue(1));
+    set.insert(ComparableValue(12));
+    EXPECT_TRUE(STLIsSorted(set));
+  }
+
+  {
+    std::vector<int> vector;
+    vector.push_back(1);
+    vector.push_back(1);
+    vector.push_back(4);
+    vector.push_back(64);
+    vector.push_back(12432);
+    EXPECT_TRUE(STLIsSorted(vector));
+    vector.back() = 1;
+    EXPECT_FALSE(STLIsSorted(vector));
+  }
+}
+
+TEST(STLUtilTest, STLSetDifference) {
+  std::set<int> a1;
+  a1.insert(1);
+  a1.insert(2);
+  a1.insert(3);
+  a1.insert(4);
+
+  std::set<int> a2;
+  a2.insert(3);
+  a2.insert(4);
+  a2.insert(5);
+  a2.insert(6);
+  a2.insert(7);
+
+  {
+    std::set<int> difference;
+    difference.insert(1);
+    difference.insert(2);
+    EXPECT_EQ(difference, STLSetDifference<std::set<int> >(a1, a2));
+  }
+
+  {
+    std::set<int> difference;
+    difference.insert(5);
+    difference.insert(6);
+    difference.insert(7);
+    EXPECT_EQ(difference, STLSetDifference<std::set<int> >(a2, a1));
+  }
+
+  {
+    std::vector<int> difference;
+    difference.push_back(1);
+    difference.push_back(2);
+    EXPECT_EQ(difference, STLSetDifference<std::vector<int> >(a1, a2));
+  }
+
+  {
+    std::vector<int> difference;
+    difference.push_back(5);
+    difference.push_back(6);
+    difference.push_back(7);
+    EXPECT_EQ(difference, STLSetDifference<std::vector<int> >(a2, a1));
+  }
+}
+
+TEST(STLUtilTest, STLSetUnion) {
+  std::set<int> a1;
+  a1.insert(1);
+  a1.insert(2);
+  a1.insert(3);
+  a1.insert(4);
+
+  std::set<int> a2;
+  a2.insert(3);
+  a2.insert(4);
+  a2.insert(5);
+  a2.insert(6);
+  a2.insert(7);
+
+  {
+    std::set<int> result;
+    result.insert(1);
+    result.insert(2);
+    result.insert(3);
+    result.insert(4);
+    result.insert(5);
+    result.insert(6);
+    result.insert(7);
+    EXPECT_EQ(result, STLSetUnion<std::set<int> >(a1, a2));
+  }
+
+  {
+    std::set<int> result;
+    result.insert(1);
+    result.insert(2);
+    result.insert(3);
+    result.insert(4);
+    result.insert(5);
+    result.insert(6);
+    result.insert(7);
+    EXPECT_EQ(result, STLSetUnion<std::set<int> >(a2, a1));
+  }
+
+  {
+    std::vector<int> result;
+    result.push_back(1);
+    result.push_back(2);
+    result.push_back(3);
+    result.push_back(4);
+    result.push_back(5);
+    result.push_back(6);
+    result.push_back(7);
+    EXPECT_EQ(result, STLSetUnion<std::vector<int> >(a1, a2));
+  }
+
+  {
+    std::vector<int> result;
+    result.push_back(1);
+    result.push_back(2);
+    result.push_back(3);
+    result.push_back(4);
+    result.push_back(5);
+    result.push_back(6);
+    result.push_back(7);
+    EXPECT_EQ(result, STLSetUnion<std::vector<int> >(a2, a1));
+  }
+}
+
+TEST(STLUtilTest, STLSetIntersection) {
+  std::set<int> a1;
+  a1.insert(1);
+  a1.insert(2);
+  a1.insert(3);
+  a1.insert(4);
+
+  std::set<int> a2;
+  a2.insert(3);
+  a2.insert(4);
+  a2.insert(5);
+  a2.insert(6);
+  a2.insert(7);
+
+  {
+    std::set<int> result;
+    result.insert(3);
+    result.insert(4);
+    EXPECT_EQ(result, STLSetIntersection<std::set<int> >(a1, a2));
+  }
+
+  {
+    std::set<int> result;
+    result.insert(3);
+    result.insert(4);
+    EXPECT_EQ(result, STLSetIntersection<std::set<int> >(a2, a1));
+  }
+
+  {
+    std::vector<int> result;
+    result.push_back(3);
+    result.push_back(4);
+    EXPECT_EQ(result, STLSetIntersection<std::vector<int> >(a1, a2));
+  }
+
+  {
+    std::vector<int> result;
+    result.push_back(3);
+    result.push_back(4);
+    EXPECT_EQ(result, STLSetIntersection<std::vector<int> >(a2, a1));
+  }
+}
+
+TEST(STLUtilTest, STLIncludes) {
+  std::set<int> a1;
+  a1.insert(1);
+  a1.insert(2);
+  a1.insert(3);
+  a1.insert(4);
+
+  std::set<int> a2;
+  a2.insert(3);
+  a2.insert(4);
+
+  std::set<int> a3;
+  a3.insert(3);
+  a3.insert(4);
+  a3.insert(5);
+
+  EXPECT_TRUE(STLIncludes<std::set<int> >(a1, a2));
+  EXPECT_FALSE(STLIncludes<std::set<int> >(a1, a3));
+  EXPECT_FALSE(STLIncludes<std::set<int> >(a2, a1));
+  EXPECT_FALSE(STLIncludes<std::set<int> >(a2, a3));
+  EXPECT_FALSE(STLIncludes<std::set<int> >(a3, a1));
+  EXPECT_TRUE(STLIncludes<std::set<int> >(a3, a2));
+}
+
+TEST(Erase, String) {
+  const std::pair<std::string, std::string> test_data[] = {
+      {"", ""}, {"abc", "bc"}, {"abca", "bc"},
+  };
+
+  for (auto test_case : test_data) {
+    Erase(test_case.first, 'a');
+    EXPECT_EQ(test_case.second, test_case.first);
+  }
+
+  for (auto test_case : test_data) {
+    EraseIf(test_case.first, [](char elem) { return elem < 'b'; });
+    EXPECT_EQ(test_case.second, test_case.first);
+  }
+}
+
+TEST(Erase, String16) {
+  std::pair<base::string16, base::string16> test_data[] = {
+      {base::string16(), base::string16()},
+      {UTF8ToUTF16("abc"), UTF8ToUTF16("bc")},
+      {UTF8ToUTF16("abca"), UTF8ToUTF16("bc")},
+  };
+
+  const base::string16 letters = UTF8ToUTF16("ab");
+  for (auto test_case : test_data) {
+    Erase(test_case.first, letters[0]);
+    EXPECT_EQ(test_case.second, test_case.first);
+  }
+
+  for (auto test_case : test_data) {
+    EraseIf(test_case.first, [&](short elem) { return elem < letters[1]; });
+    EXPECT_EQ(test_case.second, test_case.first);
+  }
+}
+
+TEST(Erase, Deque) {
+  RunEraseTest<std::deque<int>>();
+  RunEraseIfTest<std::deque<std::pair<int, int>>>();
+}
+
+TEST(Erase, Vector) {
+  RunEraseTest<std::vector<int>>();
+  RunEraseIfTest<std::vector<std::pair<int, int>>>();
+}
+
+TEST(Erase, ForwardList) {
+  RunEraseTest<std::forward_list<int>>();
+  RunEraseIfTest<std::forward_list<std::pair<int, int>>>();
+}
+
+TEST(Erase, List) {
+  RunEraseTest<std::list<int>>();
+  RunEraseIfTest<std::list<std::pair<int, int>>>();
+}
+
+TEST(Erase, Map) {
+  RunEraseIfTest<std::map<int, int>>();
+  RunEraseIfTest<std::map<int, int, std::greater<int>>>();
+}
+
+TEST(Erase, Multimap) {
+  RunEraseIfTest<std::multimap<int, int>>();
+  RunEraseIfTest<std::multimap<int, int, std::greater<int>>>();
+}
+
+TEST(Erase, Set) {
+  RunEraseIfTest<std::set<std::pair<int, int>>>();
+  RunEraseIfTest<
+      std::set<std::pair<int, int>, std::greater<std::pair<int, int>>>>();
+}
+
+TEST(Erase, Multiset) {
+  RunEraseIfTest<std::multiset<std::pair<int, int>>>();
+  RunEraseIfTest<
+      std::multiset<std::pair<int, int>, std::greater<std::pair<int, int>>>>();
+}
+
+TEST(Erase, UnorderedMap) {
+  RunEraseIfTest<std::unordered_map<int, int>>();
+  RunEraseIfTest<std::unordered_map<int, int, CustomIntHash>>();
+}
+
+TEST(Erase, UnorderedMultimap) {
+  RunEraseIfTest<std::unordered_multimap<int, int>>();
+  RunEraseIfTest<std::unordered_multimap<int, int, CustomIntHash>>();
+}
+
+TEST(Erase, UnorderedSet) {
+  RunEraseIfTest<std::unordered_set<std::pair<int, int>, HashByFirst>>();
+}
+
+TEST(Erase, UnorderedMultiset) {
+  RunEraseIfTest<std::unordered_multiset<std::pair<int, int>, HashByFirst>>();
+}
+
+TEST(Erase, IsNotIn) {
+  // Should keep both '2' but only one '4', like std::set_intersection.
+  std::vector<int> lhs = {0, 2, 2, 4, 4, 4, 6, 8, 10};
+  std::vector<int> rhs = {1, 2, 2, 4, 5, 6, 7};
+  std::vector<int> expected = {2, 2, 4, 6};
+  EraseIf(lhs, IsNotIn<std::vector<int>>(rhs));
+  EXPECT_EQ(expected, lhs);
+}
+
+TEST(ContainsValue, OrdinaryArrays) {
+  const char allowed_chars[] = {'a', 'b', 'c', 'd'};
+  EXPECT_TRUE(ContainsValue(allowed_chars, 'a'));
+  EXPECT_FALSE(ContainsValue(allowed_chars, 'z'));
+  EXPECT_FALSE(ContainsValue(allowed_chars, 0));
+
+  const char allowed_chars_including_nul[] = "abcd";
+  EXPECT_TRUE(ContainsValue(allowed_chars_including_nul, 0));
+}
+
+TEST(STLUtilTest, OptionalOrNullptr) {
+  Optional<float> optional;
+  EXPECT_EQ(nullptr, base::OptionalOrNullptr(optional));
+
+  optional = 0.1f;
+  EXPECT_EQ(&optional.value(), base::OptionalOrNullptr(optional));
+  EXPECT_NE(nullptr, base::OptionalOrNullptr(optional));
+}
+
+}  // namespace
+}  // namespace base
diff --git a/base/strings/OWNERS b/base/strings/OWNERS
new file mode 100644
index 0000000..5381872
--- /dev/null
+++ b/base/strings/OWNERS
@@ -0,0 +1,2 @@
+per-file safe_sprintf*=jln@chromium.org
+per-file safe_sprintf*=mdempsky@chromium.org
diff --git a/base/strings/char_traits.h b/base/strings/char_traits.h
new file mode 100644
index 0000000..b193e21
--- /dev/null
+++ b/base/strings/char_traits.h
@@ -0,0 +1,92 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_STRINGS_CHAR_TRAITS_H_
+#define BASE_STRINGS_CHAR_TRAITS_H_
+
+#include <stddef.h>
+
+#include "base/compiler_specific.h"
+
+namespace base {
+
+// constexpr version of http://en.cppreference.com/w/cpp/string/char_traits.
+// This currently just implements the bits needed to support a (mostly)
+// constexpr StringPiece.
+//
+// TODO(dcheng): Once we switch to C++17, most methods will become constexpr and
+// we can switch over to using the one in the standard library.
+template <typename T>
+struct CharTraits {
+  // Performs a lexographical comparison of the first N characters of |s1| and
+  // |s2|. Returns 0 if equal, -1 if |s1| is less than |s2|, and 1 if |s1| is
+  // greater than |s2|.
+  static constexpr int compare(const T* s1, const T* s2, size_t n) noexcept;
+
+  // Returns the length of |s|, assuming null termination (and not including the
+  // terminating null).
+  static constexpr size_t length(const T* s) noexcept;
+};
+
+template <typename T>
+constexpr int CharTraits<T>::compare(const T* s1,
+                                     const T* s2,
+                                     size_t n) noexcept {
+  for (; n; --n, ++s1, ++s2) {
+    if (*s1 < *s2)
+      return -1;
+    if (*s1 > *s2)
+      return 1;
+  }
+  return 0;
+}
+
+template <typename T>
+constexpr size_t CharTraits<T>::length(const T* s) noexcept {
+  size_t i = 0;
+  for (; *s; ++s)
+    ++i;
+  return i;
+}
+
+// char specialization of CharTraits that can use clang's constexpr instrinsics,
+// where available.
+template <>
+struct CharTraits<char> {
+  static constexpr int compare(const char* s1,
+                               const char* s2,
+                               size_t n) noexcept;
+  static constexpr size_t length(const char* s) noexcept;
+};
+
+constexpr int CharTraits<char>::compare(const char* s1,
+                                        const char* s2,
+                                        size_t n) noexcept {
+#if HAS_FEATURE(cxx_constexpr_string_builtins)
+  return __builtin_memcmp(s1, s2, n);
+#else
+  for (; n; --n, ++s1, ++s2) {
+    if (*s1 < *s2)
+      return -1;
+    if (*s1 > *s2)
+      return 1;
+  }
+  return 0;
+#endif
+}
+
+constexpr size_t CharTraits<char>::length(const char* s) noexcept {
+#if defined(__clang__)
+  return __builtin_strlen(s);
+#else
+  size_t i = 0;
+  for (; *s; ++s)
+    ++i;
+  return i;
+#endif
+}
+
+}  // namespace base
+
+#endif  // BASE_STRINGS_CHAR_TRAITS_H_
diff --git a/base/strings/char_traits_unittest.cc b/base/strings/char_traits_unittest.cc
new file mode 100644
index 0000000..31c421b
--- /dev/null
+++ b/base/strings/char_traits_unittest.cc
@@ -0,0 +1,32 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/strings/char_traits.h"
+#include "base/strings/string16.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+
+TEST(CharTraitsTest, CharCompare) {
+  static_assert(CharTraits<char>::compare("abc", "def", 3) == -1, "");
+  static_assert(CharTraits<char>::compare("def", "def", 3) == 0, "");
+  static_assert(CharTraits<char>::compare("ghi", "def", 3) == 1, "");
+}
+
+TEST(CharTraitsTest, CharLength) {
+  static_assert(CharTraits<char>::length("") == 0, "");
+  static_assert(CharTraits<char>::length("abc") == 3, "");
+}
+
+TEST(CharTraitsTest, Char16TCompare) {
+  static_assert(CharTraits<char16_t>::compare(u"abc", u"def", 3) == -1, "");
+  static_assert(CharTraits<char16_t>::compare(u"def", u"def", 3) == 0, "");
+  static_assert(CharTraits<char16_t>::compare(u"ghi", u"def", 3) == 1, "");
+}
+
+TEST(CharTraitsTest, Char16TLength) {
+  static_assert(CharTraits<char16_t>::length(u"abc") == 3, "");
+}
+
+}  // namespace base
diff --git a/base/strings/latin1_string_conversions.cc b/base/strings/latin1_string_conversions.cc
new file mode 100644
index 0000000..dca62ce
--- /dev/null
+++ b/base/strings/latin1_string_conversions.cc
@@ -0,0 +1,19 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/strings/latin1_string_conversions.h"
+
+namespace base {
+
+string16 Latin1OrUTF16ToUTF16(size_t length,
+                              const Latin1Char* latin1,
+                              const char16* utf16) {
+  if (!length)
+    return string16();
+  if (latin1)
+    return string16(latin1, latin1 + length);
+  return string16(utf16, utf16 + length);
+}
+
+}  // namespace base
diff --git a/base/strings/latin1_string_conversions.h b/base/strings/latin1_string_conversions.h
new file mode 100644
index 0000000..42113ef
--- /dev/null
+++ b/base/strings/latin1_string_conversions.h
@@ -0,0 +1,34 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_STRINGS_LATIN1_STRING_CONVERSIONS_H_
+#define BASE_STRINGS_LATIN1_STRING_CONVERSIONS_H_
+
+#include <stddef.h>
+
+#include <string>
+
+#include "base/base_export.h"
+#include "base/strings/string16.h"
+
+namespace base {
+
+// This definition of Latin1Char matches the definition of LChar in Blink. We
+// use unsigned char rather than char to make less tempting to mix and match
+// Latin-1 and UTF-8 characters..
+typedef unsigned char Latin1Char;
+
+// This somewhat odd function is designed to help us convert from Blink Strings
+// to string16. A Blink string is either backed by an array of Latin-1
+// characters or an array of UTF-16 characters. This function is called by
+// WebString::operator string16() to convert one or the other character array
+// to string16. This function is defined here rather than in WebString.h to
+// avoid binary bloat in all the callers of the conversion operator.
+BASE_EXPORT string16 Latin1OrUTF16ToUTF16(size_t length,
+                                          const Latin1Char* latin1,
+                                          const char16* utf16);
+
+}  // namespace base
+
+#endif  // BASE_STRINGS_LATIN1_STRING_CONVERSIONS_H_
diff --git a/base/strings/nullable_string16.cc b/base/strings/nullable_string16.cc
new file mode 100644
index 0000000..076b282
--- /dev/null
+++ b/base/strings/nullable_string16.cc
@@ -0,0 +1,33 @@
+// Copyright (c) 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/strings/nullable_string16.h"
+
+#include <ostream>
+#include <utility>
+
+namespace base {
+NullableString16::NullableString16() = default;
+NullableString16::NullableString16(const NullableString16& other) = default;
+NullableString16::NullableString16(NullableString16&& other) = default;
+
+NullableString16::NullableString16(const string16& string, bool is_null) {
+  if (!is_null)
+    string_.emplace(string);
+}
+
+NullableString16::NullableString16(Optional<string16> optional_string16)
+    : string_(std::move(optional_string16)) {}
+
+NullableString16::~NullableString16() = default;
+NullableString16& NullableString16::operator=(const NullableString16& other) =
+    default;
+NullableString16& NullableString16::operator=(NullableString16&& other) =
+    default;
+
+std::ostream& operator<<(std::ostream& out, const NullableString16& value) {
+  return value.is_null() ? out << "(null)" : out << value.string();
+}
+
+}  // namespace base
diff --git a/base/strings/nullable_string16.h b/base/strings/nullable_string16.h
new file mode 100644
index 0000000..abddee0
--- /dev/null
+++ b/base/strings/nullable_string16.h
@@ -0,0 +1,55 @@
+// Copyright (c) 2010 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_STRINGS_NULLABLE_STRING16_H_
+#define BASE_STRINGS_NULLABLE_STRING16_H_
+
+#include <iosfwd>
+
+#include "base/base_export.h"
+#include "base/optional.h"
+#include "base/strings/string16.h"
+#include "base/strings/string_util.h"
+
+namespace base {
+
+// This class is a simple wrapper for string16 which also contains a null
+// state.  This should be used only where the difference between null and
+// empty is meaningful.
+class BASE_EXPORT NullableString16 {
+ public:
+  NullableString16();
+  NullableString16(const NullableString16& other);
+  NullableString16(NullableString16&& other);
+  NullableString16(const string16& string, bool is_null);
+  explicit NullableString16(Optional<string16> optional_string16);
+  ~NullableString16();
+
+  NullableString16& operator=(const NullableString16& other);
+  NullableString16& operator=(NullableString16&& other);
+
+  const string16& string() const {
+    return string_ ? *string_ : EmptyString16();
+  }
+  bool is_null() const { return !string_; }
+  const Optional<string16>& as_optional_string16() const { return string_; }
+
+ private:
+  Optional<string16> string_;
+};
+
+inline bool operator==(const NullableString16& a, const NullableString16& b) {
+  return a.as_optional_string16() == b.as_optional_string16();
+}
+
+inline bool operator!=(const NullableString16& a, const NullableString16& b) {
+  return !(a == b);
+}
+
+BASE_EXPORT std::ostream& operator<<(std::ostream& out,
+                                     const NullableString16& value);
+
+}  // namespace base
+
+#endif  // BASE_STRINGS_NULLABLE_STRING16_H_
diff --git a/base/strings/nullable_string16_unittest.cc b/base/strings/nullable_string16_unittest.cc
new file mode 100644
index 0000000..f02fdce
--- /dev/null
+++ b/base/strings/nullable_string16_unittest.cc
@@ -0,0 +1,35 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/strings/nullable_string16.h"
+#include "base/strings/utf_string_conversions.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+
+TEST(NullableString16Test, DefaultConstructor) {
+  NullableString16 s;
+  EXPECT_TRUE(s.is_null());
+  EXPECT_EQ(string16(), s.string());
+}
+
+TEST(NullableString16Test, Equals) {
+  NullableString16 a(ASCIIToUTF16("hello"), false);
+  NullableString16 b(ASCIIToUTF16("hello"), false);
+  EXPECT_EQ(a, b);
+}
+
+TEST(NullableString16Test, NotEquals) {
+  NullableString16 a(ASCIIToUTF16("hello"), false);
+  NullableString16 b(ASCIIToUTF16("world"), false);
+  EXPECT_NE(a, b);
+}
+
+TEST(NullableString16Test, NotEqualsNull) {
+  NullableString16 a(ASCIIToUTF16("hello"), false);
+  NullableString16 b;
+  EXPECT_NE(a, b);
+}
+
+}  // namespace base
diff --git a/base/strings/old_utf_string_conversions.cc b/base/strings/old_utf_string_conversions.cc
new file mode 100644
index 0000000..5cab038
--- /dev/null
+++ b/base/strings/old_utf_string_conversions.cc
@@ -0,0 +1,262 @@
+// Copyright (c) 2010 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/strings/old_utf_string_conversions.h"
+
+#include <stdint.h>
+
+#include "base/strings/string_piece.h"
+#include "base/strings/string_util.h"
+#include "base/strings/utf_string_conversion_utils.h"
+#include "build/build_config.h"
+
+namespace base_old {
+
+using base::IsStringASCII;
+using base::ReadUnicodeCharacter;
+using base::WriteUnicodeCharacter;
+
+template<typename CHAR>
+void PrepareForUTF8Output(const CHAR* src,
+                          size_t src_len,
+                          std::string* output) {
+  output->clear();
+  if (src_len == 0)
+    return;
+  if (src[0] < 0x80) {
+    // Assume that the entire input will be ASCII.
+    output->reserve(src_len);
+  } else {
+    // Assume that the entire input is non-ASCII and will have 3 bytes per char.
+    output->reserve(src_len * 3);
+  }
+}
+
+template<typename STRING>
+void PrepareForUTF16Or32Output(const char* src,
+                               size_t src_len,
+                               STRING* output) {
+  output->clear();
+  if (src_len == 0)
+    return;
+  if (static_cast<unsigned char>(src[0]) < 0x80) {
+    // Assume the input is all ASCII, which means 1:1 correspondence.
+    output->reserve(src_len);
+  } else {
+    // Otherwise assume that the UTF-8 sequences will have 2 bytes for each
+    // character.
+    output->reserve(src_len / 2);
+  }
+}
+
+namespace {
+
+// Generalized Unicode converter -----------------------------------------------
+
+// Converts the given source Unicode character type to the given destination
+// Unicode character type as a STL string. The given input buffer and size
+// determine the source, and the given output STL string will be replaced by
+// the result.
+template <typename SRC_CHAR, typename DEST_STRING>
+bool ConvertUnicode(const SRC_CHAR* src, size_t src_len, DEST_STRING* output) {
+  // ICU requires 32-bit numbers.
+  bool success = true;
+  int32_t src_len32 = static_cast<int32_t>(src_len);
+  for (int32_t i = 0; i < src_len32; i++) {
+    uint32_t code_point;
+    if (ReadUnicodeCharacter(src, src_len32, &i, &code_point)) {
+      WriteUnicodeCharacter(code_point, output);
+    } else {
+      WriteUnicodeCharacter(0xFFFD, output);
+      success = false;
+    }
+  }
+
+  return success;
+}
+
+}  // namespace
+
+// UTF-8 <-> Wide --------------------------------------------------------------
+
+bool WideToUTF8(const wchar_t* src, size_t src_len, std::string* output) {
+  if (IsStringASCII(std::wstring(src, src_len))) {
+    output->assign(src, src + src_len);
+    return true;
+  } else {
+    PrepareForUTF8Output(src, src_len, output);
+    return ConvertUnicode(src, src_len, output);
+  }
+}
+
+std::string WideToUTF8(const std::wstring& wide) {
+  if (IsStringASCII(wide)) {
+    return std::string(wide.data(), wide.data() + wide.length());
+  }
+
+  std::string ret;
+  PrepareForUTF8Output(wide.data(), wide.length(), &ret);
+  ConvertUnicode(wide.data(), wide.length(), &ret);
+  return ret;
+}
+
+bool UTF8ToWide(const char* src, size_t src_len, std::wstring* output) {
+  if (IsStringASCII(StringPiece(src, src_len))) {
+    output->assign(src, src + src_len);
+    return true;
+  } else {
+    PrepareForUTF16Or32Output(src, src_len, output);
+    return ConvertUnicode(src, src_len, output);
+  }
+}
+
+std::wstring UTF8ToWide(StringPiece utf8) {
+  if (IsStringASCII(utf8)) {
+    return std::wstring(utf8.begin(), utf8.end());
+  }
+
+  std::wstring ret;
+  PrepareForUTF16Or32Output(utf8.data(), utf8.length(), &ret);
+  ConvertUnicode(utf8.data(), utf8.length(), &ret);
+  return ret;
+}
+
+// UTF-16 <-> Wide -------------------------------------------------------------
+
+#if defined(WCHAR_T_IS_UTF16)
+
+// When wide == UTF-16, then conversions are a NOP.
+bool WideToUTF16(const wchar_t* src, size_t src_len, string16* output) {
+  output->assign(src, src_len);
+  return true;
+}
+
+string16 WideToUTF16(const std::wstring& wide) {
+  return wide;
+}
+
+bool UTF16ToWide(const char16* src, size_t src_len, std::wstring* output) {
+  output->assign(src, src_len);
+  return true;
+}
+
+std::wstring UTF16ToWide(const string16& utf16) {
+  return utf16;
+}
+
+#elif defined(WCHAR_T_IS_UTF32)
+
+bool WideToUTF16(const wchar_t* src, size_t src_len, string16* output) {
+  output->clear();
+  // Assume that normally we won't have any non-BMP characters so the counts
+  // will be the same.
+  output->reserve(src_len);
+  return ConvertUnicode(src, src_len, output);
+}
+
+string16 WideToUTF16(const std::wstring& wide) {
+  string16 ret;
+  WideToUTF16(wide.data(), wide.length(), &ret);
+  return ret;
+}
+
+bool UTF16ToWide(const char16* src, size_t src_len, std::wstring* output) {
+  output->clear();
+  // Assume that normally we won't have any non-BMP characters so the counts
+  // will be the same.
+  output->reserve(src_len);
+  return ConvertUnicode(src, src_len, output);
+}
+
+std::wstring UTF16ToWide(const string16& utf16) {
+  std::wstring ret;
+  UTF16ToWide(utf16.data(), utf16.length(), &ret);
+  return ret;
+}
+
+#endif  // defined(WCHAR_T_IS_UTF32)
+
+// UTF16 <-> UTF8 --------------------------------------------------------------
+
+#if defined(WCHAR_T_IS_UTF32)
+
+bool UTF8ToUTF16(const char* src, size_t src_len, string16* output) {
+  if (IsStringASCII(StringPiece(src, src_len))) {
+    output->assign(src, src + src_len);
+    return true;
+  } else {
+    PrepareForUTF16Or32Output(src, src_len, output);
+    return ConvertUnicode(src, src_len, output);
+  }
+}
+
+string16 UTF8ToUTF16(StringPiece utf8) {
+  if (IsStringASCII(utf8)) {
+    return string16(utf8.begin(), utf8.end());
+  }
+
+  string16 ret;
+  PrepareForUTF16Or32Output(utf8.data(), utf8.length(), &ret);
+  // Ignore the success flag of this call, it will do the best it can for
+  // invalid input, which is what we want here.
+  ConvertUnicode(utf8.data(), utf8.length(), &ret);
+  return ret;
+}
+
+bool UTF16ToUTF8(const char16* src, size_t src_len, std::string* output) {
+  if (IsStringASCII(StringPiece16(src, src_len))) {
+    output->assign(src, src + src_len);
+    return true;
+  } else {
+    PrepareForUTF8Output(src, src_len, output);
+    return ConvertUnicode(src, src_len, output);
+  }
+}
+
+std::string UTF16ToUTF8(StringPiece16 utf16) {
+  std::string ret;
+  // Ignore the success flag of this call, it will do the best it can for
+  // invalid input, which is what we want here.
+  UTF16ToUTF8(utf16.data(), utf16.length(), &ret);
+  return ret;
+}
+
+#elif defined(WCHAR_T_IS_UTF16)
+// Easy case since we can use the "wide" versions we already wrote above.
+
+bool UTF8ToUTF16(const char* src, size_t src_len, string16* output) {
+  return UTF8ToWide(src, src_len, output);
+}
+
+string16 UTF8ToUTF16(StringPiece utf8) {
+  return UTF8ToWide(utf8);
+}
+
+bool UTF16ToUTF8(const char16* src, size_t src_len, std::string* output) {
+  return WideToUTF8(src, src_len, output);
+}
+
+std::string UTF16ToUTF8(StringPiece16 utf16) {
+  if (IsStringASCII(utf16))
+    return std::string(utf16.data(), utf16.data() + utf16.length());
+
+  std::string ret;
+  PrepareForUTF8Output(utf16.data(), utf16.length(), &ret);
+  ConvertUnicode(utf16.data(), utf16.length(), &ret);
+  return ret;
+}
+
+#endif
+
+string16 ASCIIToUTF16(StringPiece ascii) {
+  DCHECK(IsStringASCII(ascii)) << ascii;
+  return string16(ascii.begin(), ascii.end());
+}
+
+std::string UTF16ToASCII(StringPiece16 utf16) {
+  DCHECK(IsStringASCII(utf16)) << UTF16ToUTF8(utf16);
+  return std::string(utf16.begin(), utf16.end());
+}
+
+}  // namespace base_old
diff --git a/base/strings/old_utf_string_conversions.h b/base/strings/old_utf_string_conversions.h
new file mode 100644
index 0000000..2f0c6c5
--- /dev/null
+++ b/base/strings/old_utf_string_conversions.h
@@ -0,0 +1,64 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_STRINGS_OLD_UTF_STRING_CONVERSIONS_H_
+#define BASE_STRINGS_OLD_UTF_STRING_CONVERSIONS_H_
+
+#include <stddef.h>
+
+#include <string>
+
+#include "base/base_export.h"
+#include "base/strings/string16.h"
+#include "base/strings/string_piece.h"
+
+namespace base_old {
+
+using base::char16;
+using base::string16;
+using base::StringPiece16;
+using base::StringPiece;
+
+// These convert between UTF-8, -16, and -32 strings. They are potentially slow,
+// so avoid unnecessary conversions. The low-level versions return a boolean
+// indicating whether the conversion was 100% valid. In this case, it will still
+// do the best it can and put the result in the output buffer. The versions that
+// return strings ignore this error and just return the best conversion
+// possible.
+BASE_EXPORT bool WideToUTF8(const wchar_t* src,
+                            size_t src_len,
+                            std::string* output);
+BASE_EXPORT std::string WideToUTF8(const std::wstring& wide);
+BASE_EXPORT bool UTF8ToWide(const char* src,
+                            size_t src_len,
+                            std::wstring* output);
+BASE_EXPORT std::wstring UTF8ToWide(StringPiece utf8);
+
+BASE_EXPORT bool WideToUTF16(const wchar_t* src,
+                             size_t src_len,
+                             string16* output);
+BASE_EXPORT string16 WideToUTF16(const std::wstring& wide);
+BASE_EXPORT bool UTF16ToWide(const char16* src,
+                             size_t src_len,
+                             std::wstring* output);
+BASE_EXPORT std::wstring UTF16ToWide(const string16& utf16);
+
+BASE_EXPORT bool UTF8ToUTF16(const char* src, size_t src_len, string16* output);
+BASE_EXPORT string16 UTF8ToUTF16(StringPiece utf8);
+BASE_EXPORT bool UTF16ToUTF8(const char16* src,
+                             size_t src_len,
+                             std::string* output);
+BASE_EXPORT std::string UTF16ToUTF8(StringPiece16 utf16);
+
+// This converts an ASCII string, typically a hardcoded constant, to a UTF16
+// string.
+BASE_EXPORT string16 ASCIIToUTF16(StringPiece ascii);
+
+// Converts to 7-bit ASCII by truncating. The result must be known to be ASCII
+// beforehand.
+BASE_EXPORT std::string UTF16ToASCII(StringPiece16 utf16);
+
+}  // namespace base_old
+
+#endif  // BASE_STRINGS_OLD_UTF_STRING_CONVERSIONS_H_
diff --git a/base/strings/pattern.cc b/base/strings/pattern.cc
new file mode 100644
index 0000000..f3de0af
--- /dev/null
+++ b/base/strings/pattern.cc
@@ -0,0 +1,155 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/strings/pattern.h"
+
+#include "base/third_party/icu/icu_utf.h"
+
+namespace base {
+
+namespace {
+
+constexpr bool IsWildcard(base_icu::UChar32 character) {
+  return character == '*' || character == '?';
+}
+
+// Searches for the next subpattern of |pattern| in |string|, up to the given
+// |maximum_distance|. The subpattern extends from the start of |pattern| up to
+// the first wildcard character (or the end of the string). If the value of
+// |maximum_distance| is negative, the maximum distance is considered infinite.
+template <typename CHAR, typename NEXT>
+constexpr bool SearchForChars(const CHAR** pattern,
+                              const CHAR* pattern_end,
+                              const CHAR** string,
+                              const CHAR* string_end,
+                              int maximum_distance,
+                              NEXT next) {
+  const CHAR* pattern_start = *pattern;
+  const CHAR* string_start = *string;
+  bool escape = false;
+  while (true) {
+    if (*pattern == pattern_end) {
+      // If this is the end of the pattern, only accept the end of the string;
+      // anything else falls through to the mismatch case.
+      if (*string == string_end)
+        return true;
+    } else {
+      // If we have found a wildcard, we're done.
+      if (!escape && IsWildcard(**pattern))
+        return true;
+
+      // Check if the escape character is found. If so, skip it and move to the
+      // next character.
+      if (!escape && **pattern == '\\') {
+        escape = true;
+        next(pattern, pattern_end);
+        continue;
+      }
+
+      escape = false;
+
+      if (*string == string_end)
+        return false;
+
+      // Check if the chars match, if so, increment the ptrs.
+      const CHAR* pattern_next = *pattern;
+      const CHAR* string_next = *string;
+      base_icu::UChar32 pattern_char = next(&pattern_next, pattern_end);
+      if (pattern_char == next(&string_next, string_end) &&
+          pattern_char != CBU_SENTINEL) {
+        *pattern = pattern_next;
+        *string = string_next;
+        continue;
+      }
+    }
+
+    // Mismatch. If we have reached the maximum distance, return false,
+    // otherwise restart at the beginning of the pattern with the next character
+    // in the string.
+    // TODO(bauerb): This is a naive implementation of substring search, which
+    // could be implemented with a more efficient algorithm, e.g.
+    // Knuth-Morris-Pratt (at the expense of requiring preprocessing).
+    if (maximum_distance == 0)
+      return false;
+
+    // Because unlimited distance is represented as -1, this will never reach 0
+    // and therefore fail the match above.
+    maximum_distance--;
+    *pattern = pattern_start;
+    next(&string_start, string_end);
+    *string = string_start;
+  }
+}
+
+// Consumes consecutive wildcard characters (? or *). Returns the maximum number
+// of characters matched by the sequence of wildcards, or -1 if the wildcards
+// match an arbitrary number of characters (which is the case if it contains at
+// least one *).
+template <typename CHAR, typename NEXT>
+constexpr int EatWildcards(const CHAR** pattern, const CHAR* end, NEXT next) {
+  int num_question_marks = 0;
+  bool has_asterisk = false;
+  while (*pattern != end) {
+    if (**pattern == '?') {
+      num_question_marks++;
+    } else if (**pattern == '*') {
+      has_asterisk = true;
+    } else {
+      break;
+    }
+
+    next(pattern, end);
+  }
+  return has_asterisk ? -1 : num_question_marks;
+}
+
+template <typename CHAR, typename NEXT>
+constexpr bool MatchPatternT(const CHAR* eval,
+                             const CHAR* eval_end,
+                             const CHAR* pattern,
+                             const CHAR* pattern_end,
+                             NEXT next) {
+  do {
+    int maximum_wildcard_length = EatWildcards(&pattern, pattern_end, next);
+    if (!SearchForChars(&pattern, pattern_end, &eval, eval_end,
+                        maximum_wildcard_length, next)) {
+      return false;
+    }
+  } while (pattern != pattern_end);
+  return true;
+}
+
+struct NextCharUTF8 {
+  base_icu::UChar32 operator()(const char** p, const char* end) {
+    base_icu::UChar32 c;
+    int offset = 0;
+    CBU8_NEXT(*p, offset, end - *p, c);
+    *p += offset;
+    return c;
+  }
+};
+
+struct NextCharUTF16 {
+  base_icu::UChar32 operator()(const char16** p, const char16* end) {
+    base_icu::UChar32 c;
+    int offset = 0;
+    CBU16_NEXT(*p, offset, end - *p, c);
+    *p += offset;
+    return c;
+  }
+};
+
+}  // namespace
+
+bool MatchPattern(StringPiece eval, StringPiece pattern) {
+  return MatchPatternT(eval.data(), eval.data() + eval.size(), pattern.data(),
+                       pattern.data() + pattern.size(), NextCharUTF8());
+}
+
+bool MatchPattern(StringPiece16 eval, StringPiece16 pattern) {
+  return MatchPatternT(eval.data(), eval.data() + eval.size(), pattern.data(),
+                       pattern.data() + pattern.size(), NextCharUTF16());
+}
+
+}  // namespace base
diff --git a/base/strings/pattern.h b/base/strings/pattern.h
new file mode 100644
index 0000000..b5172ab
--- /dev/null
+++ b/base/strings/pattern.h
@@ -0,0 +1,23 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_STRINGS_PATTERN_H_
+#define BASE_STRINGS_PATTERN_H_
+
+#include "base/base_export.h"
+#include "base/strings/string_piece.h"
+
+namespace base {
+
+// Returns true if the |string| passed in matches the |pattern|. The pattern
+// string can contain wildcards like * and ?.
+//
+// The backslash character (\) is an escape character for * and ?.
+// ? matches 0 or 1 character, while * matches 0 or more characters.
+BASE_EXPORT bool MatchPattern(StringPiece string, StringPiece pattern);
+BASE_EXPORT bool MatchPattern(StringPiece16 string, StringPiece16 pattern);
+
+}  // namespace base
+
+#endif  // BASE_STRINGS_PATTERN_H_
diff --git a/base/strings/pattern_unittest.cc b/base/strings/pattern_unittest.cc
new file mode 100644
index 0000000..8ec5495
--- /dev/null
+++ b/base/strings/pattern_unittest.cc
@@ -0,0 +1,52 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/strings/pattern.h"
+#include "base/strings/utf_string_conversions.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+
+TEST(StringUtilTest, MatchPatternTest) {
+  EXPECT_TRUE(MatchPattern("www.google.com", "*.com"));
+  EXPECT_TRUE(MatchPattern("www.google.com", "*"));
+  EXPECT_FALSE(MatchPattern("www.google.com", "www*.g*.org"));
+  EXPECT_TRUE(MatchPattern("Hello", "H?l?o"));
+  EXPECT_FALSE(MatchPattern("www.google.com", "http://*)"));
+  EXPECT_FALSE(MatchPattern("www.msn.com", "*.COM"));
+  EXPECT_TRUE(MatchPattern("Hello*1234", "He??o\\*1*"));
+  EXPECT_FALSE(MatchPattern("", "*.*"));
+  EXPECT_TRUE(MatchPattern("", "*"));
+  EXPECT_TRUE(MatchPattern("", "?"));
+  EXPECT_TRUE(MatchPattern("", ""));
+  EXPECT_FALSE(MatchPattern("Hello", ""));
+  EXPECT_TRUE(MatchPattern("Hello*", "Hello*"));
+  EXPECT_TRUE(MatchPattern("abcd", "*???"));
+  EXPECT_FALSE(MatchPattern("abcd", "???"));
+  EXPECT_TRUE(MatchPattern("abcb", "a*b"));
+  EXPECT_FALSE(MatchPattern("abcb", "a?b"));
+
+  // Test UTF8 matching.
+  EXPECT_TRUE(MatchPattern("heart: \xe2\x99\xa0", "*\xe2\x99\xa0"));
+  EXPECT_TRUE(MatchPattern("heart: \xe2\x99\xa0.", "heart: ?."));
+  EXPECT_TRUE(MatchPattern("hearts: \xe2\x99\xa0\xe2\x99\xa0", "*"));
+  // Invalid sequences should be handled as a single invalid character.
+  EXPECT_TRUE(MatchPattern("invalid: \xef\xbf\xbe", "invalid: ?"));
+  // If the pattern has invalid characters, it shouldn't match anything.
+  EXPECT_FALSE(MatchPattern("\xf4\x90\x80\x80", "\xf4\x90\x80\x80"));
+
+  // Test UTF16 character matching.
+  EXPECT_TRUE(MatchPattern(UTF8ToUTF16("www.google.com"),
+                           UTF8ToUTF16("*.com")));
+  EXPECT_TRUE(MatchPattern(UTF8ToUTF16("Hello*1234"),
+                           UTF8ToUTF16("He??o\\*1*")));
+
+  // Some test cases that might cause naive implementations to exhibit
+  // exponential run time or fail.
+  EXPECT_TRUE(MatchPattern("Hello", "He********************************o"));
+  EXPECT_TRUE(MatchPattern("123456789012345678", "?????????????????*"));
+  EXPECT_TRUE(MatchPattern("aaaaaaaaaaab", "a*a*a*a*a*a*a*a*a*a*a*b"));
+}
+
+}  // namespace base
diff --git a/base/strings/safe_sprintf.cc b/base/strings/safe_sprintf.cc
new file mode 100644
index 0000000..4d695cf
--- /dev/null
+++ b/base/strings/safe_sprintf.cc
@@ -0,0 +1,686 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/strings/safe_sprintf.h"
+
+#include <errno.h>
+#include <string.h>
+
+#include <limits>
+
+#include "base/macros.h"
+#include "build/build_config.h"
+
+#if !defined(NDEBUG)
+// In debug builds, we use RAW_CHECK() to print useful error messages, if
+// SafeSPrintf() is called with broken arguments.
+// As our contract promises that SafeSPrintf() can be called from any
+// restricted run-time context, it is not actually safe to call logging
+// functions from it; and we only ever do so for debug builds and hope for the
+// best. We should _never_ call any logging function other than RAW_CHECK(),
+// and we should _never_ include any logging code that is active in production
+// builds. Most notably, we should not include these logging functions in
+// unofficial release builds, even though those builds would otherwise have
+// DCHECKS() enabled.
+// In other words; please do not remove the #ifdef around this #include.
+// Instead, in production builds we opt for returning a degraded result,
+// whenever an error is encountered.
+// E.g. The broken function call
+//        SafeSPrintf("errno = %d (%x)", errno, strerror(errno))
+//      will print something like
+//        errno = 13, (%x)
+//      instead of
+//        errno = 13 (Access denied)
+//      In most of the anticipated use cases, that's probably the preferred
+//      behavior.
+#include "base/logging.h"
+#define DEBUG_CHECK RAW_CHECK
+#else
+#define DEBUG_CHECK(x) do { if (x) { } } while (0)
+#endif
+
+namespace base {
+namespace strings {
+
+// The code in this file is extremely careful to be async-signal-safe.
+//
+// Most obviously, we avoid calling any code that could dynamically allocate
+// memory. Doing so would almost certainly result in bugs and dead-locks.
+// We also avoid calling any other STL functions that could have unintended
+// side-effects involving memory allocation or access to other shared
+// resources.
+//
+// But on top of that, we also avoid calling other library functions, as many
+// of them have the side-effect of calling getenv() (in order to deal with
+// localization) or accessing errno. The latter sounds benign, but there are
+// several execution contexts where it isn't even possible to safely read let
+// alone write errno.
+//
+// The stated design goal of the SafeSPrintf() function is that it can be
+// called from any context that can safely call C or C++ code (i.e. anything
+// that doesn't require assembly code).
+//
+// For a brief overview of some but not all of the issues with async-signal-
+// safety, refer to:
+// http://pubs.opengroup.org/onlinepubs/009695399/functions/xsh_chap02_04.html
+
+namespace {
+const size_t kSSizeMaxConst = ((size_t)(ssize_t)-1) >> 1;
+
+const char kUpCaseHexDigits[]   = "0123456789ABCDEF";
+const char kDownCaseHexDigits[] = "0123456789abcdef";
+}
+
+#if defined(NDEBUG)
+// We would like to define kSSizeMax as std::numeric_limits<ssize_t>::max(),
+// but C++ doesn't allow us to do that for constants. Instead, we have to
+// use careful casting and shifting. We later use a static_assert to
+// verify that this worked correctly.
+namespace {
+const size_t kSSizeMax = kSSizeMaxConst;
+}
+#else  // defined(NDEBUG)
+// For efficiency, we really need kSSizeMax to be a constant. But for unit
+// tests, it should be adjustable. This allows us to verify edge cases without
+// having to fill the entire available address space. As a compromise, we make
+// kSSizeMax adjustable in debug builds, and then only compile that particular
+// part of the unit test in debug builds.
+namespace {
+static size_t kSSizeMax = kSSizeMaxConst;
+}
+
+namespace internal {
+void SetSafeSPrintfSSizeMaxForTest(size_t max) {
+  kSSizeMax = max;
+}
+
+size_t GetSafeSPrintfSSizeMaxForTest() {
+  return kSSizeMax;
+}
+}
+#endif  // defined(NDEBUG)
+
+namespace {
+class Buffer {
+ public:
+  // |buffer| is caller-allocated storage that SafeSPrintf() writes to. It
+  // has |size| bytes of writable storage. It is the caller's responsibility
+  // to ensure that the buffer is at least one byte in size, so that it fits
+  // the trailing NUL that will be added by the destructor. The buffer also
+  // must be smaller or equal to kSSizeMax in size.
+  Buffer(char* buffer, size_t size)
+      : buffer_(buffer),
+        size_(size - 1),  // Account for trailing NUL byte
+        count_(0) {
+// MSVS2013's standard library doesn't mark max() as constexpr yet. cl.exe
+// supports static_cast but doesn't really implement constexpr yet so it doesn't
+// complain, but clang does.
+#if __cplusplus >= 201103 && !(defined(__clang__) && defined(OS_WIN))
+    static_assert(kSSizeMaxConst ==
+                      static_cast<size_t>(std::numeric_limits<ssize_t>::max()),
+                  "kSSizeMaxConst should be the max value of an ssize_t");
+#endif
+    DEBUG_CHECK(size > 0);
+    DEBUG_CHECK(size <= kSSizeMax);
+  }
+
+  ~Buffer() {
+    // The code calling the constructor guaranteed that there was enough space
+    // to store a trailing NUL -- and in debug builds, we are actually
+    // verifying this with DEBUG_CHECK()s in the constructor. So, we can
+    // always unconditionally write the NUL byte in the destructor.  We do not
+    // need to adjust the count_, as SafeSPrintf() copies snprintf() in not
+    // including the NUL byte in its return code.
+    *GetInsertionPoint() = '\000';
+  }
+
+  // Returns true, iff the buffer is filled all the way to |kSSizeMax-1|. The
+  // caller can now stop adding more data, as GetCount() has reached its
+  // maximum possible value.
+  inline bool OutOfAddressableSpace() const {
+    return count_ == static_cast<size_t>(kSSizeMax - 1);
+  }
+
+  // Returns the number of bytes that would have been emitted to |buffer_|
+  // if it was sized sufficiently large. This number can be larger than
+  // |size_|, if the caller provided an insufficiently large output buffer.
+  // But it will never be bigger than |kSSizeMax-1|.
+  inline ssize_t GetCount() const {
+    DEBUG_CHECK(count_ < kSSizeMax);
+    return static_cast<ssize_t>(count_);
+  }
+
+  // Emits one |ch| character into the |buffer_| and updates the |count_| of
+  // characters that are currently supposed to be in the buffer.
+  // Returns "false", iff the buffer was already full.
+  // N.B. |count_| increases even if no characters have been written. This is
+  // needed so that GetCount() can return the number of bytes that should
+  // have been allocated for the |buffer_|.
+  inline bool Out(char ch) {
+    if (size_ >= 1 && count_ < size_) {
+      buffer_[count_] = ch;
+      return IncrementCountByOne();
+    }
+    // |count_| still needs to be updated, even if the buffer has been
+    // filled completely. This allows SafeSPrintf() to return the number of
+    // bytes that should have been emitted.
+    IncrementCountByOne();
+    return false;
+  }
+
+  // Inserts |padding|-|len| bytes worth of padding into the |buffer_|.
+  // |count_| will also be incremented by the number of bytes that were meant
+  // to be emitted. The |pad| character is typically either a ' ' space
+  // or a '0' zero, but other non-NUL values are legal.
+  // Returns "false", iff the the |buffer_| filled up (i.e. |count_|
+  // overflowed |size_|) at any time during padding.
+  inline bool Pad(char pad, size_t padding, size_t len) {
+    DEBUG_CHECK(pad);
+    DEBUG_CHECK(padding <= kSSizeMax);
+    for (; padding > len; --padding) {
+      if (!Out(pad)) {
+        if (--padding) {
+          IncrementCount(padding-len);
+        }
+        return false;
+      }
+    }
+    return true;
+  }
+
+  // POSIX doesn't define any async-signal-safe function for converting
+  // an integer to ASCII. Define our own version.
+  //
+  // This also gives us the ability to make the function a little more
+  // powerful and have it deal with |padding|, with truncation, and with
+  // predicting the length of the untruncated output.
+  //
+  // IToASCII() converts an integer |i| to ASCII.
+  //
+  // Unlike similar functions in the standard C library, it never appends a
+  // NUL character. This is left for the caller to do.
+  //
+  // While the function signature takes a signed int64_t, the code decides at
+  // run-time whether to treat the argument as signed (int64_t) or as unsigned
+  // (uint64_t) based on the value of |sign|.
+  //
+  // It supports |base|s 2 through 16. Only a |base| of 10 is allowed to have
+  // a |sign|. Otherwise, |i| is treated as unsigned.
+  //
+  // For bases larger than 10, |upcase| decides whether lower-case or upper-
+  // case letters should be used to designate digits greater than 10.
+  //
+  // Padding can be done with either '0' zeros or ' ' spaces. Padding has to
+  // be positive and will always be applied to the left of the output.
+  //
+  // Prepends a |prefix| to the number (e.g. "0x"). This prefix goes to
+  // the left of |padding|, if |pad| is '0'; and to the right of |padding|
+  // if |pad| is ' '.
+  //
+  // Returns "false", if the |buffer_| overflowed at any time.
+  bool IToASCII(bool sign, bool upcase, int64_t i, int base,
+                char pad, size_t padding, const char* prefix);
+
+ private:
+  // Increments |count_| by |inc| unless this would cause |count_| to
+  // overflow |kSSizeMax-1|. Returns "false", iff an overflow was detected;
+  // it then clamps |count_| to |kSSizeMax-1|.
+  inline bool IncrementCount(size_t inc) {
+    // "inc" is either 1 or a "padding" value. Padding is clamped at
+    // run-time to at most kSSizeMax-1. So, we know that "inc" is always in
+    // the range 1..kSSizeMax-1.
+    // This allows us to compute "kSSizeMax - 1 - inc" without incurring any
+    // integer overflows.
+    DEBUG_CHECK(inc <= kSSizeMax - 1);
+    if (count_ > kSSizeMax - 1 - inc) {
+      count_ = kSSizeMax - 1;
+      return false;
+    } else {
+      count_ += inc;
+      return true;
+    }
+  }
+
+  // Convenience method for the common case of incrementing |count_| by one.
+  inline bool IncrementCountByOne() {
+    return IncrementCount(1);
+  }
+
+  // Return the current insertion point into the buffer. This is typically
+  // at |buffer_| + |count_|, but could be before that if truncation
+  // happened. It always points to one byte past the last byte that was
+  // successfully placed into the |buffer_|.
+  inline char* GetInsertionPoint() const {
+    size_t idx = count_;
+    if (idx > size_) {
+      idx = size_;
+    }
+    return buffer_ + idx;
+  }
+
+  // User-provided buffer that will receive the fully formatted output string.
+  char* buffer_;
+
+  // Number of bytes that are available in the buffer excluding the trailing
+  // NUL byte that will be added by the destructor.
+  const size_t size_;
+
+  // Number of bytes that would have been emitted to the buffer, if the buffer
+  // was sufficiently big. This number always excludes the trailing NUL byte
+  // and it is guaranteed to never grow bigger than kSSizeMax-1.
+  size_t count_;
+
+  DISALLOW_COPY_AND_ASSIGN(Buffer);
+};
+
+
+bool Buffer::IToASCII(bool sign, bool upcase, int64_t i, int base,
+                      char pad, size_t padding, const char* prefix) {
+  // Sanity check for parameters. None of these should ever fail, but see
+  // above for the rationale why we can't call CHECK().
+  DEBUG_CHECK(base >= 2);
+  DEBUG_CHECK(base <= 16);
+  DEBUG_CHECK(!sign || base == 10);
+  DEBUG_CHECK(pad == '0' || pad == ' ');
+  DEBUG_CHECK(padding <= kSSizeMax);
+  DEBUG_CHECK(!(sign && prefix && *prefix));
+
+  // Handle negative numbers, if the caller indicated that |i| should be
+  // treated as a signed number; otherwise treat |i| as unsigned (even if the
+  // MSB is set!)
+  // Details are tricky, because of limited data-types, but equivalent pseudo-
+  // code would look like:
+  //   if (sign && i < 0)
+  //     prefix = "-";
+  //   num = abs(i);
+  int minint = 0;
+  uint64_t num;
+  if (sign && i < 0) {
+    prefix = "-";
+
+    // Turn our number positive.
+    if (i == std::numeric_limits<int64_t>::min()) {
+      // The most negative integer needs special treatment.
+      minint = 1;
+      num = static_cast<uint64_t>(-(i + 1));
+    } else {
+      // "Normal" negative numbers are easy.
+      num = static_cast<uint64_t>(-i);
+    }
+  } else {
+    num = static_cast<uint64_t>(i);
+  }
+
+  // If padding with '0' zero, emit the prefix or '-' character now. Otherwise,
+  // make the prefix accessible in reverse order, so that we can later output
+  // it right between padding and the number.
+  // We cannot choose the easier approach of just reversing the number, as that
+  // fails in situations where we need to truncate numbers that have padding
+  // and/or prefixes.
+  const char* reverse_prefix = nullptr;
+  if (prefix && *prefix) {
+    if (pad == '0') {
+      while (*prefix) {
+        if (padding) {
+          --padding;
+        }
+        Out(*prefix++);
+      }
+      prefix = nullptr;
+    } else {
+      for (reverse_prefix = prefix; *reverse_prefix; ++reverse_prefix) {
+      }
+    }
+  } else
+    prefix = nullptr;
+  const size_t prefix_length = reverse_prefix - prefix;
+
+  // Loop until we have converted the entire number. Output at least one
+  // character (i.e. '0').
+  size_t start = count_;
+  size_t discarded = 0;
+  bool started = false;
+  do {
+    // Make sure there is still enough space left in our output buffer.
+    if (count_ >= size_) {
+      if (start < size_) {
+        // It is rare that we need to output a partial number. But if asked
+        // to do so, we will still make sure we output the correct number of
+        // leading digits.
+        // Since we are generating the digits in reverse order, we actually
+        // have to discard digits in the order that we have already emitted
+        // them. This is essentially equivalent to:
+        //   memmove(buffer_ + start, buffer_ + start + 1, size_ - start - 1)
+        for (char* move = buffer_ + start, *end = buffer_ + size_ - 1;
+             move < end;
+             ++move) {
+          *move = move[1];
+        }
+        ++discarded;
+        --count_;
+      } else if (count_ - size_ > 1) {
+        // Need to increment either |count_| or |discarded| to make progress.
+        // The latter is more efficient, as it eventually triggers fast
+        // handling of padding. But we have to ensure we don't accidentally
+        // change the overall state (i.e. switch the state-machine from
+        // discarding to non-discarding). |count_| needs to always stay
+        // bigger than |size_|.
+        --count_;
+        ++discarded;
+      }
+    }
+
+    // Output the next digit and (if necessary) compensate for the most
+    // negative integer needing special treatment. This works because,
+    // no matter the bit width of the integer, the lowest-most decimal
+    // integer always ends in 2, 4, 6, or 8.
+    if (!num && started) {
+      if (reverse_prefix > prefix) {
+        Out(*--reverse_prefix);
+      } else {
+        Out(pad);
+      }
+    } else {
+      started = true;
+      Out((upcase ? kUpCaseHexDigits : kDownCaseHexDigits)[num%base + minint]);
+    }
+
+    minint = 0;
+    num /= base;
+
+    // Add padding, if requested.
+    if (padding > 0) {
+      --padding;
+
+      // Performance optimization for when we are asked to output excessive
+      // padding, but our output buffer is limited in size.  Even if we output
+      // a 64bit number in binary, we would never write more than 64 plus
+      // prefix non-padding characters. So, once this limit has been passed,
+      // any further state change can be computed arithmetically; we know that
+      // by this time, our entire final output consists of padding characters
+      // that have all already been output.
+      if (discarded > 8*sizeof(num) + prefix_length) {
+        IncrementCount(padding);
+        padding = 0;
+      }
+    }
+  } while (num || padding || (reverse_prefix > prefix));
+
+  // Conversion to ASCII actually resulted in the digits being in reverse
+  // order. We can't easily generate them in forward order, as we can't tell
+  // the number of characters needed until we are done converting.
+  // So, now, we reverse the string (except for the possible '-' sign).
+  char* front = buffer_ + start;
+  char* back = GetInsertionPoint();
+  while (--back > front) {
+    char ch = *back;
+    *back = *front;
+    *front++ = ch;
+  }
+
+  IncrementCount(discarded);
+  return !discarded;
+}
+
+}  // anonymous namespace
+
+namespace internal {
+
+ssize_t SafeSNPrintf(char* buf, size_t sz, const char* fmt, const Arg* args,
+                     const size_t max_args) {
+  // Make sure that at least one NUL byte can be written, and that the buffer
+  // never overflows kSSizeMax. Not only does that use up most or all of the
+  // address space, it also would result in a return code that cannot be
+  // represented.
+  if (static_cast<ssize_t>(sz) < 1) {
+    return -1;
+  } else if (sz > kSSizeMax) {
+    sz = kSSizeMax;
+  }
+
+  // Iterate over format string and interpret '%' arguments as they are
+  // encountered.
+  Buffer buffer(buf, sz);
+  size_t padding;
+  char pad;
+  for (unsigned int cur_arg = 0; *fmt && !buffer.OutOfAddressableSpace(); ) {
+    if (*fmt++ == '%') {
+      padding = 0;
+      pad = ' ';
+      char ch = *fmt++;
+    format_character_found:
+      switch (ch) {
+      case '0': case '1': case '2': case '3': case '4':
+      case '5': case '6': case '7': case '8': case '9':
+        // Found a width parameter. Convert to an integer value and store in
+        // "padding". If the leading digit is a zero, change the padding
+        // character from a space ' ' to a zero '0'.
+        pad = ch == '0' ? '0' : ' ';
+        for (;;) {
+          // The maximum allowed padding fills all the available address
+          // space and leaves just enough space to insert the trailing NUL.
+          const size_t max_padding = kSSizeMax - 1;
+          if (padding > max_padding/10 ||
+              10*padding > max_padding - (ch - '0')) {
+            DEBUG_CHECK(padding <= max_padding/10 &&
+                        10*padding <= max_padding - (ch - '0'));
+            // Integer overflow detected. Skip the rest of the width until
+            // we find the format character, then do the normal error handling.
+          padding_overflow:
+            padding = max_padding;
+            while ((ch = *fmt++) >= '0' && ch <= '9') {
+            }
+            if (cur_arg < max_args) {
+              ++cur_arg;
+            }
+            goto fail_to_expand;
+          }
+          padding = 10*padding + ch - '0';
+          if (padding > max_padding) {
+            // This doesn't happen for "sane" values of kSSizeMax. But once
+            // kSSizeMax gets smaller than about 10, our earlier range checks
+            // are incomplete. Unittests do trigger this artificial corner
+            // case.
+            DEBUG_CHECK(padding <= max_padding);
+            goto padding_overflow;
+          }
+          ch = *fmt++;
+          if (ch < '0' || ch > '9') {
+            // Reached the end of the width parameter. This is where the format
+            // character is found.
+            goto format_character_found;
+          }
+        }
+        break;
+      case 'c': {  // Output an ASCII character.
+        // Check that there are arguments left to be inserted.
+        if (cur_arg >= max_args) {
+          DEBUG_CHECK(cur_arg < max_args);
+          goto fail_to_expand;
+        }
+
+        // Check that the argument has the expected type.
+        const Arg& arg = args[cur_arg++];
+        if (arg.type != Arg::INT && arg.type != Arg::UINT) {
+          DEBUG_CHECK(arg.type == Arg::INT || arg.type == Arg::UINT);
+          goto fail_to_expand;
+        }
+
+        // Apply padding, if needed.
+        buffer.Pad(' ', padding, 1);
+
+        // Convert the argument to an ASCII character and output it.
+        char as_char = static_cast<char>(arg.integer.i);
+        if (!as_char) {
+          goto end_of_output_buffer;
+        }
+        buffer.Out(as_char);
+        break; }
+      case 'd':    // Output a possibly signed decimal value.
+      case 'o':    // Output an unsigned octal value.
+      case 'x':    // Output an unsigned hexadecimal value.
+      case 'X':
+      case 'p': {  // Output a pointer value.
+        // Check that there are arguments left to be inserted.
+        if (cur_arg >= max_args) {
+          DEBUG_CHECK(cur_arg < max_args);
+          goto fail_to_expand;
+        }
+
+        const Arg& arg = args[cur_arg++];
+        int64_t i;
+        const char* prefix = nullptr;
+        if (ch != 'p') {
+          // Check that the argument has the expected type.
+          if (arg.type != Arg::INT && arg.type != Arg::UINT) {
+            DEBUG_CHECK(arg.type == Arg::INT || arg.type == Arg::UINT);
+            goto fail_to_expand;
+          }
+          i = arg.integer.i;
+
+          if (ch != 'd') {
+            // The Arg() constructor automatically performed sign expansion on
+            // signed parameters. This is great when outputting a %d decimal
+            // number, but can result in unexpected leading 0xFF bytes when
+            // outputting a %x hexadecimal number. Mask bits, if necessary.
+            // We have to do this here, instead of in the Arg() constructor, as
+            // the Arg() constructor cannot tell whether we will output a %d
+            // or a %x. Only the latter should experience masking.
+            if (arg.integer.width < sizeof(int64_t)) {
+              i &= (1LL << (8*arg.integer.width)) - 1;
+            }
+          }
+        } else {
+          // Pointer values require an actual pointer or a string.
+          if (arg.type == Arg::POINTER) {
+            i = reinterpret_cast<uintptr_t>(arg.ptr);
+          } else if (arg.type == Arg::STRING) {
+            i = reinterpret_cast<uintptr_t>(arg.str);
+          } else if (arg.type == Arg::INT &&
+                     arg.integer.width == sizeof(NULL) &&
+                     arg.integer.i == 0) {  // Allow C++'s version of NULL
+            i = 0;
+          } else {
+            DEBUG_CHECK(arg.type == Arg::POINTER || arg.type == Arg::STRING);
+            goto fail_to_expand;
+          }
+
+          // Pointers always include the "0x" prefix.
+          prefix = "0x";
+        }
+
+        // Use IToASCII() to convert to ASCII representation. For decimal
+        // numbers, optionally print a sign. For hexadecimal numbers,
+        // distinguish between upper and lower case. %p addresses are always
+        // printed as upcase. Supports base 8, 10, and 16. Prints padding
+        // and/or prefixes, if so requested.
+        buffer.IToASCII(ch == 'd' && arg.type == Arg::INT,
+                        ch != 'x', i,
+                        ch == 'o' ? 8 : ch == 'd' ? 10 : 16,
+                        pad, padding, prefix);
+        break; }
+      case 's': {
+        // Check that there are arguments left to be inserted.
+        if (cur_arg >= max_args) {
+          DEBUG_CHECK(cur_arg < max_args);
+          goto fail_to_expand;
+        }
+
+        // Check that the argument has the expected type.
+        const Arg& arg = args[cur_arg++];
+        const char *s;
+        if (arg.type == Arg::STRING) {
+          s = arg.str ? arg.str : "<NULL>";
+        } else if (arg.type == Arg::INT && arg.integer.width == sizeof(NULL) &&
+                   arg.integer.i == 0) {  // Allow C++'s version of NULL
+          s = "<NULL>";
+        } else {
+          DEBUG_CHECK(arg.type == Arg::STRING);
+          goto fail_to_expand;
+        }
+
+        // Apply padding, if needed. This requires us to first check the
+        // length of the string that we are outputting.
+        if (padding) {
+          size_t len = 0;
+          for (const char* src = s; *src++; ) {
+            ++len;
+          }
+          buffer.Pad(' ', padding, len);
+        }
+
+        // Printing a string involves nothing more than copying it into the
+        // output buffer and making sure we don't output more bytes than
+        // available space; Out() takes care of doing that.
+        for (const char* src = s; *src; ) {
+          buffer.Out(*src++);
+        }
+        break; }
+      case '%':
+        // Quoted percent '%' character.
+        goto copy_verbatim;
+      fail_to_expand:
+        // C++ gives us tools to do type checking -- something that snprintf()
+        // could never really do. So, whenever we see arguments that don't
+        // match up with the format string, we refuse to output them. But
+        // since we have to be extremely conservative about being async-
+        // signal-safe, we are limited in the type of error handling that we
+        // can do in production builds (in debug builds we can use
+        // DEBUG_CHECK() and hope for the best). So, all we do is pass the
+        // format string unchanged. That should eventually get the user's
+        // attention; and in the meantime, it hopefully doesn't lose too much
+        // data.
+      default:
+        // Unknown or unsupported format character. Just copy verbatim to
+        // output.
+        buffer.Out('%');
+        DEBUG_CHECK(ch);
+        if (!ch) {
+          goto end_of_format_string;
+        }
+        buffer.Out(ch);
+        break;
+      }
+    } else {
+  copy_verbatim:
+    buffer.Out(fmt[-1]);
+    }
+  }
+ end_of_format_string:
+ end_of_output_buffer:
+  return buffer.GetCount();
+}
+
+}  // namespace internal
+
+ssize_t SafeSNPrintf(char* buf, size_t sz, const char* fmt) {
+  // Make sure that at least one NUL byte can be written, and that the buffer
+  // never overflows kSSizeMax. Not only does that use up most or all of the
+  // address space, it also would result in a return code that cannot be
+  // represented.
+  if (static_cast<ssize_t>(sz) < 1) {
+    return -1;
+  } else if (sz > kSSizeMax) {
+    sz = kSSizeMax;
+  }
+
+  Buffer buffer(buf, sz);
+
+  // In the slow-path, we deal with errors by copying the contents of
+  // "fmt" unexpanded. This means, if there are no arguments passed, the
+  // SafeSPrintf() function always degenerates to a version of strncpy() that
+  // de-duplicates '%' characters.
+  const char* src = fmt;
+  for (; *src; ++src) {
+    buffer.Out(*src);
+    DEBUG_CHECK(src[0] != '%' || src[1] == '%');
+    if (src[0] == '%' && src[1] == '%') {
+      ++src;
+    }
+  }
+  return buffer.GetCount();
+}
+
+}  // namespace strings
+}  // namespace base
diff --git a/base/strings/safe_sprintf.h b/base/strings/safe_sprintf.h
new file mode 100644
index 0000000..01d649d
--- /dev/null
+++ b/base/strings/safe_sprintf.h
@@ -0,0 +1,246 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_STRINGS_SAFE_SPRINTF_H_
+#define BASE_STRINGS_SAFE_SPRINTF_H_
+
+#include "build/build_config.h"
+
+#include <stddef.h>
+#include <stdint.h>
+#include <stdlib.h>
+
+#if defined(OS_POSIX) || defined(OS_FUCHSIA)
+// For ssize_t
+#include <unistd.h>
+#endif
+
+#include "base/base_export.h"
+
+namespace base {
+namespace strings {
+
+#if defined(COMPILER_MSVC)
+// Define ssize_t inside of our namespace.
+#if defined(_WIN64)
+typedef __int64 ssize_t;
+#else
+typedef long ssize_t;
+#endif
+#endif
+
+// SafeSPrintf() is a type-safe and completely self-contained version of
+// snprintf().
+//
+// SafeSNPrintf() is an alternative function signature that can be used when
+// not dealing with fixed-sized buffers. When possible, SafeSPrintf() should
+// always be used instead of SafeSNPrintf()
+//
+// These functions allow for formatting complicated messages from contexts that
+// require strict async-signal-safety. In fact, it is safe to call them from
+// any low-level execution context, as they are guaranteed to make no library
+// or system calls. It deliberately never touches "errno", either.
+//
+// The only exception to this rule is that in debug builds the code calls
+// RAW_CHECK() to help diagnose problems when the format string does not
+// match the rest of the arguments. In release builds, no CHECK()s are used,
+// and SafeSPrintf() instead returns an output string that expands only
+// those arguments that match their format characters. Mismatched arguments
+// are ignored.
+//
+// The code currently only supports a subset of format characters:
+//   %c, %o, %d, %x, %X, %p, and %s.
+//
+// SafeSPrintf() aims to be as liberal as reasonably possible. Integer-like
+// values of arbitrary width can be passed to all of the format characters
+// that expect integers. Thus, it is explicitly legal to pass an "int" to
+// "%c", and output will automatically look at the LSB only. It is also
+// explicitly legal to pass either signed or unsigned values, and the format
+// characters will automatically interpret the arguments accordingly.
+//
+// It is still not legal to mix-and-match integer-like values with pointer
+// values. For instance, you cannot pass a pointer to %x, nor can you pass an
+// integer to %p.
+//
+// The one exception is "0" zero being accepted by "%p". This works-around
+// the problem of C++ defining NULL as an integer-like value.
+//
+// All format characters take an optional width parameter. This must be a
+// positive integer. For %d, %o, %x, %X and %p, if the width starts with
+// a leading '0', padding is done with '0' instead of ' ' characters.
+//
+// There are a few features of snprintf()-style format strings, that
+// SafeSPrintf() does not support at this time.
+//
+// If an actual user showed up, there is no particularly strong reason they
+// couldn't be added. But that assumes that the trade-offs between complexity
+// and utility are favorable.
+//
+// For example, adding support for negative padding widths, and for %n are all
+// likely to be viewed positively. They are all clearly useful, low-risk, easy
+// to test, don't jeopardize the async-signal-safety of the code, and overall
+// have little impact on other parts of SafeSPrintf() function.
+//
+// On the other hands, adding support for alternate forms, positional
+// arguments, grouping, wide characters, localization or floating point numbers
+// are all unlikely to ever be added.
+//
+// SafeSPrintf() and SafeSNPrintf() mimic the behavior of snprintf() and they
+// return the number of bytes needed to store the untruncated output. This
+// does *not* include the terminating NUL byte.
+//
+// They return -1, iff a fatal error happened. This typically can only happen,
+// if the buffer size is a) negative, or b) zero (i.e. not even the NUL byte
+// can be written). The return value can never be larger than SSIZE_MAX-1.
+// This ensures that the caller can always add one to the signed return code
+// in order to determine the amount of storage that needs to be allocated.
+//
+// While the code supports type checking and while it is generally very careful
+// to avoid printing incorrect values, it tends to be conservative in printing
+// as much as possible, even when given incorrect parameters. Typically, in
+// case of an error, the format string will not be expanded. (i.e. something
+// like SafeSPrintf(buf, "%p %d", 1, 2) results in "%p 2"). See above for
+// the use of RAW_CHECK() in debug builds, though.
+//
+// Basic example:
+//   char buf[20];
+//   base::strings::SafeSPrintf(buf, "The answer: %2d", 42);
+//
+// Example with dynamically sized buffer (async-signal-safe). This code won't
+// work on Visual studio, as it requires dynamically allocating arrays on the
+// stack. Consider picking a smaller value for |kMaxSize| if stack size is
+// limited and known. On the other hand, if the parameters to SafeSNPrintf()
+// are trusted and not controllable by the user, you can consider eliminating
+// the check for |kMaxSize| altogether. The current value of SSIZE_MAX is
+// essentially a no-op that just illustrates how to implement an upper bound:
+//   const size_t kInitialSize = 128;
+//   const size_t kMaxSize = std::numeric_limits<ssize_t>::max();
+//   size_t size = kInitialSize;
+//   for (;;) {
+//     char buf[size];
+//     size = SafeSNPrintf(buf, size, "Error message \"%s\"\n", err) + 1;
+//     if (sizeof(buf) < kMaxSize && size > kMaxSize) {
+//       size = kMaxSize;
+//       continue;
+//     } else if (size > sizeof(buf))
+//       continue;
+//     write(2, buf, size-1);
+//     break;
+//   }
+
+namespace internal {
+// Helpers that use C++ overloading, templates, and specializations to deduce
+// and record type information from function arguments. This allows us to
+// later write a type-safe version of snprintf().
+
+struct Arg {
+  enum Type { INT, UINT, STRING, POINTER };
+
+  // Any integer-like value.
+  Arg(signed char c) : type(INT) {
+    integer.i = c;
+    integer.width = sizeof(char);
+  }
+  Arg(unsigned char c) : type(UINT) {
+    integer.i = c;
+    integer.width = sizeof(char);
+  }
+  Arg(signed short j) : type(INT) {
+    integer.i = j;
+    integer.width = sizeof(short);
+  }
+  Arg(unsigned short j) : type(UINT) {
+    integer.i = j;
+    integer.width = sizeof(short);
+  }
+  Arg(signed int j) : type(INT) {
+    integer.i = j;
+    integer.width = sizeof(int);
+  }
+  Arg(unsigned int j) : type(UINT) {
+    integer.i = j;
+    integer.width = sizeof(int);
+  }
+  Arg(signed long j) : type(INT) {
+    integer.i = j;
+    integer.width = sizeof(long);
+  }
+  Arg(unsigned long j) : type(UINT) {
+    integer.i = j;
+    integer.width = sizeof(long);
+  }
+  Arg(signed long long j) : type(INT) {
+    integer.i = j;
+    integer.width = sizeof(long long);
+  }
+  Arg(unsigned long long j) : type(UINT) {
+    integer.i = j;
+    integer.width = sizeof(long long);
+  }
+
+  // A C-style text string.
+  Arg(const char* s) : str(s), type(STRING) { }
+  Arg(char* s)       : str(s), type(STRING) { }
+
+  // Any pointer value that can be cast to a "void*".
+  template<class T> Arg(T* p) : ptr((void*)p), type(POINTER) { }
+
+  union {
+    // An integer-like value.
+    struct {
+      int64_t       i;
+      unsigned char width;
+    } integer;
+
+    // A C-style text string.
+    const char* str;
+
+    // A pointer to an arbitrary object.
+    const void* ptr;
+  };
+  const enum Type type;
+};
+
+// This is the internal function that performs the actual formatting of
+// an snprintf()-style format string.
+BASE_EXPORT ssize_t SafeSNPrintf(char* buf, size_t sz, const char* fmt,
+                                 const Arg* args, size_t max_args);
+
+#if !defined(NDEBUG)
+// In debug builds, allow unit tests to artificially lower the kSSizeMax
+// constant that is used as a hard upper-bound for all buffers. In normal
+// use, this constant should always be std::numeric_limits<ssize_t>::max().
+BASE_EXPORT void SetSafeSPrintfSSizeMaxForTest(size_t max);
+BASE_EXPORT size_t GetSafeSPrintfSSizeMaxForTest();
+#endif
+
+}  // namespace internal
+
+template<typename... Args>
+ssize_t SafeSNPrintf(char* buf, size_t N, const char* fmt, Args... args) {
+  // Use Arg() object to record type information and then copy arguments to an
+  // array to make it easier to iterate over them.
+  const internal::Arg arg_array[] = { args... };
+  return internal::SafeSNPrintf(buf, N, fmt, arg_array, sizeof...(args));
+}
+
+template<size_t N, typename... Args>
+ssize_t SafeSPrintf(char (&buf)[N], const char* fmt, Args... args) {
+  // Use Arg() object to record type information and then copy arguments to an
+  // array to make it easier to iterate over them.
+  const internal::Arg arg_array[] = { args... };
+  return internal::SafeSNPrintf(buf, N, fmt, arg_array, sizeof...(args));
+}
+
+// Fast-path when we don't actually need to substitute any arguments.
+BASE_EXPORT ssize_t SafeSNPrintf(char* buf, size_t N, const char* fmt);
+template<size_t N>
+inline ssize_t SafeSPrintf(char (&buf)[N], const char* fmt) {
+  return SafeSNPrintf(buf, N, fmt);
+}
+
+}  // namespace strings
+}  // namespace base
+
+#endif  // BASE_STRINGS_SAFE_SPRINTF_H_
diff --git a/base/strings/safe_sprintf_unittest.cc b/base/strings/safe_sprintf_unittest.cc
new file mode 100644
index 0000000..bb9908f
--- /dev/null
+++ b/base/strings/safe_sprintf_unittest.cc
@@ -0,0 +1,765 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/strings/safe_sprintf.h"
+
+#include <stddef.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <string.h>
+
+#include <limits>
+#include <memory>
+
+#include "base/logging.h"
+#include "base/macros.h"
+#include "build/build_config.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+// Death tests on Android are currently very flaky. No need to add more flaky
+// tests, as they just make it hard to spot real problems.
+// TODO(markus): See if the restrictions on Android can eventually be lifted.
+#if defined(GTEST_HAS_DEATH_TEST) && !defined(OS_ANDROID)
+#define ALLOW_DEATH_TEST
+#endif
+
+namespace base {
+namespace strings {
+
+TEST(SafeSPrintfTest, Empty) {
+  char buf[2] = { 'X', 'X' };
+
+  // Negative buffer size should always result in an error.
+  EXPECT_EQ(-1, SafeSNPrintf(buf, static_cast<size_t>(-1), ""));
+  EXPECT_EQ('X', buf[0]);
+  EXPECT_EQ('X', buf[1]);
+
+  // Zero buffer size should always result in an error.
+  EXPECT_EQ(-1, SafeSNPrintf(buf, 0, ""));
+  EXPECT_EQ('X', buf[0]);
+  EXPECT_EQ('X', buf[1]);
+
+  // A one-byte buffer should always print a single NUL byte.
+  EXPECT_EQ(0, SafeSNPrintf(buf, 1, ""));
+  EXPECT_EQ(0, buf[0]);
+  EXPECT_EQ('X', buf[1]);
+  buf[0] = 'X';
+
+  // A larger buffer should leave the trailing bytes unchanged.
+  EXPECT_EQ(0, SafeSNPrintf(buf, 2, ""));
+  EXPECT_EQ(0, buf[0]);
+  EXPECT_EQ('X', buf[1]);
+  buf[0] = 'X';
+
+  // The same test using SafeSPrintf() instead of SafeSNPrintf().
+  EXPECT_EQ(0, SafeSPrintf(buf, ""));
+  EXPECT_EQ(0, buf[0]);
+  EXPECT_EQ('X', buf[1]);
+  buf[0] = 'X';
+}
+
+TEST(SafeSPrintfTest, NoArguments) {
+  // Output a text message that doesn't require any substitutions. This
+  // is roughly equivalent to calling strncpy() (but unlike strncpy(), it does
+  // always add a trailing NUL; it always deduplicates '%' characters).
+  static const char text[] = "hello world";
+  char ref[20], buf[20];
+  memset(ref, 'X', sizeof(ref));
+  memcpy(buf, ref, sizeof(buf));
+
+  // A negative buffer size should always result in an error.
+  EXPECT_EQ(-1, SafeSNPrintf(buf, static_cast<size_t>(-1), text));
+  EXPECT_TRUE(!memcmp(buf, ref, sizeof(buf)));
+
+  // Zero buffer size should always result in an error.
+  EXPECT_EQ(-1, SafeSNPrintf(buf, 0, text));
+  EXPECT_TRUE(!memcmp(buf, ref, sizeof(buf)));
+
+  // A one-byte buffer should always print a single NUL byte.
+  EXPECT_EQ(static_cast<ssize_t>(sizeof(text))-1, SafeSNPrintf(buf, 1, text));
+  EXPECT_EQ(0, buf[0]);
+  EXPECT_TRUE(!memcmp(buf+1, ref+1, sizeof(buf)-1));
+  memcpy(buf, ref, sizeof(buf));
+
+  // A larger (but limited) buffer should always leave the trailing bytes
+  // unchanged.
+  EXPECT_EQ(static_cast<ssize_t>(sizeof(text))-1, SafeSNPrintf(buf, 2, text));
+  EXPECT_EQ(text[0], buf[0]);
+  EXPECT_EQ(0, buf[1]);
+  EXPECT_TRUE(!memcmp(buf+2, ref+2, sizeof(buf)-2));
+  memcpy(buf, ref, sizeof(buf));
+
+  // A unrestricted buffer length should always leave the trailing bytes
+  // unchanged.
+  EXPECT_EQ(static_cast<ssize_t>(sizeof(text))-1,
+            SafeSNPrintf(buf, sizeof(buf), text));
+  EXPECT_EQ(std::string(text), std::string(buf));
+  EXPECT_TRUE(!memcmp(buf + sizeof(text), ref + sizeof(text),
+                      sizeof(buf) - sizeof(text)));
+  memcpy(buf, ref, sizeof(buf));
+
+  // The same test using SafeSPrintf() instead of SafeSNPrintf().
+  EXPECT_EQ(static_cast<ssize_t>(sizeof(text))-1, SafeSPrintf(buf, text));
+  EXPECT_EQ(std::string(text), std::string(buf));
+  EXPECT_TRUE(!memcmp(buf + sizeof(text), ref + sizeof(text),
+                      sizeof(buf) - sizeof(text)));
+  memcpy(buf, ref, sizeof(buf));
+
+  // Check for deduplication of '%' percent characters.
+  EXPECT_EQ(1, SafeSPrintf(buf, "%%"));
+  EXPECT_EQ(2, SafeSPrintf(buf, "%%%%"));
+  EXPECT_EQ(2, SafeSPrintf(buf, "%%X"));
+  EXPECT_EQ(3, SafeSPrintf(buf, "%%%%X"));
+#if defined(NDEBUG)
+  EXPECT_EQ(1, SafeSPrintf(buf, "%"));
+  EXPECT_EQ(2, SafeSPrintf(buf, "%%%"));
+  EXPECT_EQ(2, SafeSPrintf(buf, "%X"));
+  EXPECT_EQ(3, SafeSPrintf(buf, "%%%X"));
+#elif defined(ALLOW_DEATH_TEST)
+  EXPECT_DEATH(SafeSPrintf(buf, "%"), "src.1. == '%'");
+  EXPECT_DEATH(SafeSPrintf(buf, "%%%"), "src.1. == '%'");
+  EXPECT_DEATH(SafeSPrintf(buf, "%X"), "src.1. == '%'");
+  EXPECT_DEATH(SafeSPrintf(buf, "%%%X"), "src.1. == '%'");
+#endif
+}
+
+TEST(SafeSPrintfTest, OneArgument) {
+  // Test basic single-argument single-character substitution.
+  const char text[] = "hello world";
+  const char fmt[]  = "hello%cworld";
+  char ref[20], buf[20];
+  memset(ref, 'X', sizeof(buf));
+  memcpy(buf, ref, sizeof(buf));
+
+  // A negative buffer size should always result in an error.
+  EXPECT_EQ(-1, SafeSNPrintf(buf, static_cast<size_t>(-1), fmt, ' '));
+  EXPECT_TRUE(!memcmp(buf, ref, sizeof(buf)));
+
+  // Zero buffer size should always result in an error.
+  EXPECT_EQ(-1, SafeSNPrintf(buf, 0, fmt, ' '));
+  EXPECT_TRUE(!memcmp(buf, ref, sizeof(buf)));
+
+  // A one-byte buffer should always print a single NUL byte.
+  EXPECT_EQ(static_cast<ssize_t>(sizeof(text))-1,
+            SafeSNPrintf(buf, 1, fmt, ' '));
+  EXPECT_EQ(0, buf[0]);
+  EXPECT_TRUE(!memcmp(buf+1, ref+1, sizeof(buf)-1));
+  memcpy(buf, ref, sizeof(buf));
+
+  // A larger (but limited) buffer should always leave the trailing bytes
+  // unchanged.
+  EXPECT_EQ(static_cast<ssize_t>(sizeof(text))-1,
+            SafeSNPrintf(buf, 2, fmt, ' '));
+  EXPECT_EQ(text[0], buf[0]);
+  EXPECT_EQ(0, buf[1]);
+  EXPECT_TRUE(!memcmp(buf+2, ref+2, sizeof(buf)-2));
+  memcpy(buf, ref, sizeof(buf));
+
+  // A unrestricted buffer length should always leave the trailing bytes
+  // unchanged.
+  EXPECT_EQ(static_cast<ssize_t>(sizeof(text))-1,
+            SafeSNPrintf(buf, sizeof(buf), fmt, ' '));
+  EXPECT_EQ(std::string(text), std::string(buf));
+  EXPECT_TRUE(!memcmp(buf + sizeof(text), ref + sizeof(text),
+                      sizeof(buf) - sizeof(text)));
+  memcpy(buf, ref, sizeof(buf));
+
+  // The same test using SafeSPrintf() instead of SafeSNPrintf().
+  EXPECT_EQ(static_cast<ssize_t>(sizeof(text))-1, SafeSPrintf(buf, fmt, ' '));
+  EXPECT_EQ(std::string(text), std::string(buf));
+  EXPECT_TRUE(!memcmp(buf + sizeof(text), ref + sizeof(text),
+                      sizeof(buf) - sizeof(text)));
+  memcpy(buf, ref, sizeof(buf));
+
+  // Check for deduplication of '%' percent characters.
+  EXPECT_EQ(1, SafeSPrintf(buf, "%%", 0));
+  EXPECT_EQ(2, SafeSPrintf(buf, "%%%%", 0));
+  EXPECT_EQ(2, SafeSPrintf(buf, "%Y", 0));
+  EXPECT_EQ(2, SafeSPrintf(buf, "%%Y", 0));
+  EXPECT_EQ(3, SafeSPrintf(buf, "%%%Y", 0));
+  EXPECT_EQ(3, SafeSPrintf(buf, "%%%%Y", 0));
+#if defined(NDEBUG)
+  EXPECT_EQ(1, SafeSPrintf(buf, "%", 0));
+  EXPECT_EQ(2, SafeSPrintf(buf, "%%%", 0));
+#elif defined(ALLOW_DEATH_TEST)
+  EXPECT_DEATH(SafeSPrintf(buf, "%", 0), "ch");
+  EXPECT_DEATH(SafeSPrintf(buf, "%%%", 0), "ch");
+#endif
+}
+
+TEST(SafeSPrintfTest, MissingArg) {
+#if defined(NDEBUG)
+  char buf[20];
+  EXPECT_EQ(3, SafeSPrintf(buf, "%c%c", 'A'));
+  EXPECT_EQ("A%c", std::string(buf));
+#elif defined(ALLOW_DEATH_TEST)
+  char buf[20];
+  EXPECT_DEATH(SafeSPrintf(buf, "%c%c", 'A'), "cur_arg < max_args");
+#endif
+}
+
+TEST(SafeSPrintfTest, ASANFriendlyBufferTest) {
+  // Print into a buffer that is sized exactly to size. ASAN can verify that
+  // nobody attempts to write past the end of the buffer.
+  // There is a more complicated test in PrintLongString() that covers a lot
+  // more edge case, but it is also harder to debug in case of a failure.
+  const char kTestString[] = "This is a test";
+  std::unique_ptr<char[]> buf(new char[sizeof(kTestString)]);
+  EXPECT_EQ(static_cast<ssize_t>(sizeof(kTestString) - 1),
+            SafeSNPrintf(buf.get(), sizeof(kTestString), kTestString));
+  EXPECT_EQ(std::string(kTestString), std::string(buf.get()));
+  EXPECT_EQ(static_cast<ssize_t>(sizeof(kTestString) - 1),
+            SafeSNPrintf(buf.get(), sizeof(kTestString), "%s", kTestString));
+  EXPECT_EQ(std::string(kTestString), std::string(buf.get()));
+}
+
+TEST(SafeSPrintfTest, NArgs) {
+  // Pre-C++11 compilers have a different code path, that can only print
+  // up to ten distinct arguments.
+  // We test both SafeSPrintf() and SafeSNPrintf(). This makes sure we don't
+  // have typos in the copy-n-pasted code that is needed to deal with various
+  // numbers of arguments.
+  char buf[12];
+  EXPECT_EQ(1, SafeSPrintf(buf, "%c", 1));
+  EXPECT_EQ("\1", std::string(buf));
+  EXPECT_EQ(2, SafeSPrintf(buf, "%c%c", 1, 2));
+  EXPECT_EQ("\1\2", std::string(buf));
+  EXPECT_EQ(3, SafeSPrintf(buf, "%c%c%c", 1, 2, 3));
+  EXPECT_EQ("\1\2\3", std::string(buf));
+  EXPECT_EQ(4, SafeSPrintf(buf, "%c%c%c%c", 1, 2, 3, 4));
+  EXPECT_EQ("\1\2\3\4", std::string(buf));
+  EXPECT_EQ(5, SafeSPrintf(buf, "%c%c%c%c%c", 1, 2, 3, 4, 5));
+  EXPECT_EQ("\1\2\3\4\5", std::string(buf));
+  EXPECT_EQ(6, SafeSPrintf(buf, "%c%c%c%c%c%c", 1, 2, 3, 4, 5, 6));
+  EXPECT_EQ("\1\2\3\4\5\6", std::string(buf));
+  EXPECT_EQ(7, SafeSPrintf(buf, "%c%c%c%c%c%c%c", 1, 2, 3, 4, 5, 6, 7));
+  EXPECT_EQ("\1\2\3\4\5\6\7", std::string(buf));
+  EXPECT_EQ(8, SafeSPrintf(buf, "%c%c%c%c%c%c%c%c", 1, 2, 3, 4, 5, 6, 7, 8));
+  EXPECT_EQ("\1\2\3\4\5\6\7\10", std::string(buf));
+  EXPECT_EQ(9, SafeSPrintf(buf, "%c%c%c%c%c%c%c%c%c",
+                           1, 2, 3, 4, 5, 6, 7, 8, 9));
+  EXPECT_EQ("\1\2\3\4\5\6\7\10\11", std::string(buf));
+  EXPECT_EQ(10, SafeSPrintf(buf, "%c%c%c%c%c%c%c%c%c%c",
+                            1, 2, 3, 4, 5, 6, 7, 8, 9, 10));
+
+  // Repeat all the tests with SafeSNPrintf() instead of SafeSPrintf().
+  EXPECT_EQ("\1\2\3\4\5\6\7\10\11\12", std::string(buf));
+  EXPECT_EQ(1, SafeSNPrintf(buf, 11, "%c", 1));
+  EXPECT_EQ("\1", std::string(buf));
+  EXPECT_EQ(2, SafeSNPrintf(buf, 11, "%c%c", 1, 2));
+  EXPECT_EQ("\1\2", std::string(buf));
+  EXPECT_EQ(3, SafeSNPrintf(buf, 11, "%c%c%c", 1, 2, 3));
+  EXPECT_EQ("\1\2\3", std::string(buf));
+  EXPECT_EQ(4, SafeSNPrintf(buf, 11, "%c%c%c%c", 1, 2, 3, 4));
+  EXPECT_EQ("\1\2\3\4", std::string(buf));
+  EXPECT_EQ(5, SafeSNPrintf(buf, 11, "%c%c%c%c%c", 1, 2, 3, 4, 5));
+  EXPECT_EQ("\1\2\3\4\5", std::string(buf));
+  EXPECT_EQ(6, SafeSNPrintf(buf, 11, "%c%c%c%c%c%c", 1, 2, 3, 4, 5, 6));
+  EXPECT_EQ("\1\2\3\4\5\6", std::string(buf));
+  EXPECT_EQ(7, SafeSNPrintf(buf, 11, "%c%c%c%c%c%c%c", 1, 2, 3, 4, 5, 6, 7));
+  EXPECT_EQ("\1\2\3\4\5\6\7", std::string(buf));
+  EXPECT_EQ(8, SafeSNPrintf(buf, 11, "%c%c%c%c%c%c%c%c",
+                            1, 2, 3, 4, 5, 6, 7, 8));
+  EXPECT_EQ("\1\2\3\4\5\6\7\10", std::string(buf));
+  EXPECT_EQ(9, SafeSNPrintf(buf, 11, "%c%c%c%c%c%c%c%c%c",
+                            1, 2, 3, 4, 5, 6, 7, 8, 9));
+  EXPECT_EQ("\1\2\3\4\5\6\7\10\11", std::string(buf));
+  EXPECT_EQ(10, SafeSNPrintf(buf, 11, "%c%c%c%c%c%c%c%c%c%c",
+                             1, 2, 3, 4, 5, 6, 7, 8, 9, 10));
+  EXPECT_EQ("\1\2\3\4\5\6\7\10\11\12", std::string(buf));
+
+  EXPECT_EQ(11, SafeSPrintf(buf, "%c%c%c%c%c%c%c%c%c%c%c",
+                            1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11));
+  EXPECT_EQ("\1\2\3\4\5\6\7\10\11\12\13", std::string(buf));
+  EXPECT_EQ(11, SafeSNPrintf(buf, 12, "%c%c%c%c%c%c%c%c%c%c%c",
+                             1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11));
+  EXPECT_EQ("\1\2\3\4\5\6\7\10\11\12\13", std::string(buf));
+}
+
+TEST(SafeSPrintfTest, DataTypes) {
+  char buf[40];
+
+  // Bytes
+  EXPECT_EQ(1, SafeSPrintf(buf, "%d", (uint8_t)1));
+  EXPECT_EQ("1", std::string(buf));
+  EXPECT_EQ(3, SafeSPrintf(buf, "%d", (uint8_t)-1));
+  EXPECT_EQ("255", std::string(buf));
+  EXPECT_EQ(1, SafeSPrintf(buf, "%d", (int8_t)1));
+  EXPECT_EQ("1", std::string(buf));
+  EXPECT_EQ(2, SafeSPrintf(buf, "%d", (int8_t)-1));
+  EXPECT_EQ("-1", std::string(buf));
+  EXPECT_EQ(4, SafeSPrintf(buf, "%d", (int8_t)-128));
+  EXPECT_EQ("-128", std::string(buf));
+
+  // Half-words
+  EXPECT_EQ(1, SafeSPrintf(buf, "%d", (uint16_t)1));
+  EXPECT_EQ("1", std::string(buf));
+  EXPECT_EQ(5, SafeSPrintf(buf, "%d", (uint16_t)-1));
+  EXPECT_EQ("65535", std::string(buf));
+  EXPECT_EQ(1, SafeSPrintf(buf, "%d", (int16_t)1));
+  EXPECT_EQ("1", std::string(buf));
+  EXPECT_EQ(2, SafeSPrintf(buf, "%d", (int16_t)-1));
+  EXPECT_EQ("-1", std::string(buf));
+  EXPECT_EQ(6, SafeSPrintf(buf, "%d", (int16_t)-32768));
+  EXPECT_EQ("-32768", std::string(buf));
+
+  // Words
+  EXPECT_EQ(1, SafeSPrintf(buf, "%d", (uint32_t)1));
+  EXPECT_EQ("1", std::string(buf));
+  EXPECT_EQ(10, SafeSPrintf(buf, "%d", (uint32_t)-1));
+  EXPECT_EQ("4294967295", std::string(buf));
+  EXPECT_EQ(1, SafeSPrintf(buf, "%d", (int32_t)1));
+  EXPECT_EQ("1", std::string(buf));
+  EXPECT_EQ(2, SafeSPrintf(buf, "%d", (int32_t)-1));
+  EXPECT_EQ("-1", std::string(buf));
+  // Work-around for an limitation of C90
+  EXPECT_EQ(11, SafeSPrintf(buf, "%d", (int32_t)-2147483647-1));
+  EXPECT_EQ("-2147483648", std::string(buf));
+
+  // Quads
+  EXPECT_EQ(1, SafeSPrintf(buf, "%d", (uint64_t)1));
+  EXPECT_EQ("1", std::string(buf));
+  EXPECT_EQ(20, SafeSPrintf(buf, "%d", (uint64_t)-1));
+  EXPECT_EQ("18446744073709551615", std::string(buf));
+  EXPECT_EQ(1, SafeSPrintf(buf, "%d", (int64_t)1));
+  EXPECT_EQ("1", std::string(buf));
+  EXPECT_EQ(2, SafeSPrintf(buf, "%d", (int64_t)-1));
+  EXPECT_EQ("-1", std::string(buf));
+  // Work-around for an limitation of C90
+  EXPECT_EQ(20, SafeSPrintf(buf, "%d", (int64_t)-9223372036854775807LL-1));
+  EXPECT_EQ("-9223372036854775808", std::string(buf));
+
+  // Strings (both const and mutable).
+  EXPECT_EQ(4, SafeSPrintf(buf, "test"));
+  EXPECT_EQ("test", std::string(buf));
+  EXPECT_EQ(4, SafeSPrintf(buf, buf));
+  EXPECT_EQ("test", std::string(buf));
+
+  // Pointer
+  char addr[20];
+  sprintf(addr, "0x%llX", (unsigned long long)(uintptr_t)buf);
+  SafeSPrintf(buf, "%p", buf);
+  EXPECT_EQ(std::string(addr), std::string(buf));
+  SafeSPrintf(buf, "%p", (const char *)buf);
+  EXPECT_EQ(std::string(addr), std::string(buf));
+  sprintf(addr, "0x%llX", (unsigned long long)(uintptr_t)sprintf);
+  SafeSPrintf(buf, "%p", sprintf);
+  EXPECT_EQ(std::string(addr), std::string(buf));
+
+  // Padding for pointers is a little more complicated because of the "0x"
+  // prefix. Padding with '0' zeros is relatively straight-forward, but
+  // padding with ' ' spaces requires more effort.
+  sprintf(addr, "0x%017llX", (unsigned long long)(uintptr_t)buf);
+  SafeSPrintf(buf, "%019p", buf);
+  EXPECT_EQ(std::string(addr), std::string(buf));
+  sprintf(addr, "0x%llX", (unsigned long long)(uintptr_t)buf);
+  memset(addr, ' ',
+         (char*)memmove(addr + sizeof(addr) - strlen(addr) - 1,
+                        addr, strlen(addr)+1) - addr);
+  SafeSPrintf(buf, "%19p", buf);
+  EXPECT_EQ(std::string(addr), std::string(buf));
+}
+
+namespace {
+void PrintLongString(char* buf, size_t sz) {
+  // Output a reasonably complex expression into a limited-size buffer.
+  // At least one byte is available for writing the NUL character.
+  CHECK_GT(sz, static_cast<size_t>(0));
+
+  // Allocate slightly more space, so that we can verify that SafeSPrintf()
+  // never writes past the end of the buffer.
+  std::unique_ptr<char[]> tmp(new char[sz + 2]);
+  memset(tmp.get(), 'X', sz+2);
+
+  // Use SafeSPrintf() to output a complex list of arguments:
+  // - test padding and truncating %c single characters.
+  // - test truncating %s simple strings.
+  // - test mismatching arguments and truncating (for %d != %s).
+  // - test zero-padding and truncating %x hexadecimal numbers.
+  // - test outputting and truncating %d MININT.
+  // - test outputting and truncating %p arbitrary pointer values.
+  // - test outputting, padding and truncating NULL-pointer %s strings.
+  char* out = tmp.get();
+  size_t out_sz = sz;
+  size_t len;
+  for (std::unique_ptr<char[]> perfect_buf;;) {
+    size_t needed =
+        SafeSNPrintf(out, out_sz,
+#if defined(NDEBUG)
+                     "A%2cong %s: %d %010X %d %p%7s", 'l', "string", "",
+#else
+                     "A%2cong %s: %%d %010X %d %p%7s", 'l', "string",
+#endif
+                     0xDEADBEEF, std::numeric_limits<intptr_t>::min(),
+                     PrintLongString, static_cast<char*>(nullptr)) +
+        1;
+
+    // Various sanity checks:
+    // The numbered of characters needed to print the full string should always
+    // be bigger or equal to the bytes that have actually been output.
+    len = strlen(tmp.get());
+    CHECK_GE(needed, len+1);
+
+    // The number of characters output should always fit into the buffer that
+    // was passed into SafeSPrintf().
+    CHECK_LT(len, out_sz);
+
+    // The output is always terminated with a NUL byte (actually, this test is
+    // always going to pass, as strlen() already verified this)
+    EXPECT_FALSE(tmp[len]);
+
+    // ASAN can check that we are not overwriting buffers, iff we make sure the
+    // buffer is exactly the size that we are expecting to be written. After
+    // running SafeSNPrintf() the first time, it is possible to compute the
+    // correct buffer size for this test. So, allocate a second buffer and run
+    // the exact same SafeSNPrintf() command again.
+    if (!perfect_buf.get()) {
+      out_sz = std::min(needed, sz);
+      out = new char[out_sz];
+      perfect_buf.reset(out);
+    } else {
+      break;
+    }
+  }
+
+  // All trailing bytes are unchanged.
+  for (size_t i = len+1; i < sz+2; ++i)
+    EXPECT_EQ('X', tmp[i]);
+
+  // The text that was generated by SafeSPrintf() should always match the
+  // equivalent text generated by sprintf(). Please note that the format
+  // string for sprintf() is not complicated, as it does not have the
+  // benefit of getting type information from the C++ compiler.
+  //
+  // N.B.: It would be so much cleaner to use snprintf(). But unfortunately,
+  //       Visual Studio doesn't support this function, and the work-arounds
+  //       are all really awkward.
+  char ref[256];
+  CHECK_LE(sz, sizeof(ref));
+  sprintf(ref, "A long string: %%d 00DEADBEEF %lld 0x%llX <NULL>",
+          static_cast<long long>(std::numeric_limits<intptr_t>::min()),
+          static_cast<unsigned long long>(
+            reinterpret_cast<uintptr_t>(PrintLongString)));
+  ref[sz-1] = '\000';
+
+#if defined(NDEBUG)
+  const size_t kSSizeMax = std::numeric_limits<ssize_t>::max();
+#else
+  const size_t kSSizeMax = internal::GetSafeSPrintfSSizeMaxForTest();
+#endif
+
+  // Compare the output from SafeSPrintf() to the one from sprintf().
+  EXPECT_EQ(std::string(ref).substr(0, kSSizeMax-1), std::string(tmp.get()));
+
+  // We allocated a slightly larger buffer, so that we could perform some
+  // extra sanity checks. Now that the tests have all passed, we copy the
+  // data to the output buffer that the caller provided.
+  memcpy(buf, tmp.get(), len+1);
+}
+
+#if !defined(NDEBUG)
+class ScopedSafeSPrintfSSizeMaxSetter {
+ public:
+  ScopedSafeSPrintfSSizeMaxSetter(size_t sz) {
+    old_ssize_max_ = internal::GetSafeSPrintfSSizeMaxForTest();
+    internal::SetSafeSPrintfSSizeMaxForTest(sz);
+  }
+
+  ~ScopedSafeSPrintfSSizeMaxSetter() {
+    internal::SetSafeSPrintfSSizeMaxForTest(old_ssize_max_);
+  }
+
+ private:
+  size_t old_ssize_max_;
+
+  DISALLOW_COPY_AND_ASSIGN(ScopedSafeSPrintfSSizeMaxSetter);
+};
+#endif
+
+}  // anonymous namespace
+
+TEST(SafeSPrintfTest, Truncation) {
+  // We use PrintLongString() to print a complex long string and then
+  // truncate to all possible lengths. This ends up exercising a lot of
+  // different code paths in SafeSPrintf() and IToASCII(), as truncation can
+  // happen in a lot of different states.
+  char ref[256];
+  PrintLongString(ref, sizeof(ref));
+  for (size_t i = strlen(ref)+1; i; --i) {
+    char buf[sizeof(ref)];
+    PrintLongString(buf, i);
+    EXPECT_EQ(std::string(ref, i - 1), std::string(buf));
+  }
+
+  // When compiling in debug mode, we have the ability to fake a small
+  // upper limit for the maximum value that can be stored in an ssize_t.
+  // SafeSPrintf() uses this upper limit to determine how many bytes it will
+  // write to the buffer, even if the caller claimed a bigger buffer size.
+  // Repeat the truncation test and verify that this other code path in
+  // SafeSPrintf() works correctly, too.
+#if !defined(NDEBUG)
+  for (size_t i = strlen(ref)+1; i > 1; --i) {
+    ScopedSafeSPrintfSSizeMaxSetter ssize_max_setter(i);
+    char buf[sizeof(ref)];
+    PrintLongString(buf, sizeof(buf));
+    EXPECT_EQ(std::string(ref, i - 1), std::string(buf));
+  }
+
+  // kSSizeMax is also used to constrain the maximum amount of padding, before
+  // SafeSPrintf() detects an error in the format string.
+  ScopedSafeSPrintfSSizeMaxSetter ssize_max_setter(100);
+  char buf[256];
+  EXPECT_EQ(99, SafeSPrintf(buf, "%99c", ' '));
+  EXPECT_EQ(std::string(99, ' '), std::string(buf));
+  *buf = '\000';
+#if defined(ALLOW_DEATH_TEST)
+  EXPECT_DEATH(SafeSPrintf(buf, "%100c", ' '), "padding <= max_padding");
+#endif
+  EXPECT_EQ(0, *buf);
+#endif
+}
+
+TEST(SafeSPrintfTest, Padding) {
+  char buf[40], fmt[40];
+
+  // Chars %c
+  EXPECT_EQ(1, SafeSPrintf(buf, "%c", 'A'));
+  EXPECT_EQ("A", std::string(buf));
+  EXPECT_EQ(2, SafeSPrintf(buf, "%2c", 'A'));
+  EXPECT_EQ(" A", std::string(buf));
+  EXPECT_EQ(2, SafeSPrintf(buf, "%02c", 'A'));
+  EXPECT_EQ(" A", std::string(buf));
+  EXPECT_EQ(4, SafeSPrintf(buf, "%-2c", 'A'));
+  EXPECT_EQ("%-2c", std::string(buf));
+  SafeSPrintf(fmt, "%%%dc", std::numeric_limits<ssize_t>::max() - 1);
+  EXPECT_EQ(std::numeric_limits<ssize_t>::max()-1, SafeSPrintf(buf, fmt, 'A'));
+  SafeSPrintf(fmt, "%%%dc",
+              static_cast<size_t>(std::numeric_limits<ssize_t>::max()));
+#if defined(NDEBUG)
+  EXPECT_EQ(2, SafeSPrintf(buf, fmt, 'A'));
+  EXPECT_EQ("%c", std::string(buf));
+#elif defined(ALLOW_DEATH_TEST)
+  EXPECT_DEATH(SafeSPrintf(buf, fmt, 'A'), "padding <= max_padding");
+#endif
+
+  // Octal %o
+  EXPECT_EQ(1, SafeSPrintf(buf, "%o", 1));
+  EXPECT_EQ("1", std::string(buf));
+  EXPECT_EQ(2, SafeSPrintf(buf, "%2o", 1));
+  EXPECT_EQ(" 1", std::string(buf));
+  EXPECT_EQ(2, SafeSPrintf(buf, "%02o", 1));
+  EXPECT_EQ("01", std::string(buf));
+  EXPECT_EQ(12, SafeSPrintf(buf, "%12o", -1));
+  EXPECT_EQ(" 37777777777", std::string(buf));
+  EXPECT_EQ(12, SafeSPrintf(buf, "%012o", -1));
+  EXPECT_EQ("037777777777", std::string(buf));
+  EXPECT_EQ(23, SafeSPrintf(buf, "%23o", -1LL));
+  EXPECT_EQ(" 1777777777777777777777", std::string(buf));
+  EXPECT_EQ(23, SafeSPrintf(buf, "%023o", -1LL));
+  EXPECT_EQ("01777777777777777777777", std::string(buf));
+  EXPECT_EQ(3, SafeSPrintf(buf, "%2o", 0111));
+  EXPECT_EQ("111", std::string(buf));
+  EXPECT_EQ(4, SafeSPrintf(buf, "%-2o", 1));
+  EXPECT_EQ("%-2o", std::string(buf));
+  SafeSPrintf(fmt, "%%%do", std::numeric_limits<ssize_t>::max()-1);
+  EXPECT_EQ(std::numeric_limits<ssize_t>::max()-1,
+            SafeSNPrintf(buf, 4, fmt, 1));
+  EXPECT_EQ("   ", std::string(buf));
+  SafeSPrintf(fmt, "%%0%do", std::numeric_limits<ssize_t>::max()-1);
+  EXPECT_EQ(std::numeric_limits<ssize_t>::max()-1,
+            SafeSNPrintf(buf, 4, fmt, 1));
+  EXPECT_EQ("000", std::string(buf));
+  SafeSPrintf(fmt, "%%%do",
+              static_cast<size_t>(std::numeric_limits<ssize_t>::max()));
+#if defined(NDEBUG)
+  EXPECT_EQ(2, SafeSPrintf(buf, fmt, 1));
+  EXPECT_EQ("%o", std::string(buf));
+#elif defined(ALLOW_DEATH_TEST)
+  EXPECT_DEATH(SafeSPrintf(buf, fmt, 1), "padding <= max_padding");
+#endif
+
+  // Decimals %d
+  EXPECT_EQ(1, SafeSPrintf(buf, "%d", 1));
+  EXPECT_EQ("1", std::string(buf));
+  EXPECT_EQ(2, SafeSPrintf(buf, "%2d", 1));
+  EXPECT_EQ(" 1", std::string(buf));
+  EXPECT_EQ(2, SafeSPrintf(buf, "%02d", 1));
+  EXPECT_EQ("01", std::string(buf));
+  EXPECT_EQ(3, SafeSPrintf(buf, "%3d", -1));
+  EXPECT_EQ(" -1", std::string(buf));
+  EXPECT_EQ(3, SafeSPrintf(buf, "%03d", -1));
+  EXPECT_EQ("-01", std::string(buf));
+  EXPECT_EQ(3, SafeSPrintf(buf, "%2d", 111));
+  EXPECT_EQ("111", std::string(buf));
+  EXPECT_EQ(4, SafeSPrintf(buf, "%2d", -111));
+  EXPECT_EQ("-111", std::string(buf));
+  EXPECT_EQ(4, SafeSPrintf(buf, "%-2d", 1));
+  EXPECT_EQ("%-2d", std::string(buf));
+  SafeSPrintf(fmt, "%%%dd", std::numeric_limits<ssize_t>::max()-1);
+  EXPECT_EQ(std::numeric_limits<ssize_t>::max()-1,
+            SafeSNPrintf(buf, 4, fmt, 1));
+  EXPECT_EQ("   ", std::string(buf));
+  SafeSPrintf(fmt, "%%0%dd", std::numeric_limits<ssize_t>::max()-1);
+  EXPECT_EQ(std::numeric_limits<ssize_t>::max()-1,
+            SafeSNPrintf(buf, 4, fmt, 1));
+  EXPECT_EQ("000", std::string(buf));
+  SafeSPrintf(fmt, "%%%dd",
+              static_cast<size_t>(std::numeric_limits<ssize_t>::max()));
+#if defined(NDEBUG)
+  EXPECT_EQ(2, SafeSPrintf(buf, fmt, 1));
+  EXPECT_EQ("%d", std::string(buf));
+#elif defined(ALLOW_DEATH_TEST)
+  EXPECT_DEATH(SafeSPrintf(buf, fmt, 1), "padding <= max_padding");
+#endif
+
+  // Hex %X
+  EXPECT_EQ(1, SafeSPrintf(buf, "%X", 1));
+  EXPECT_EQ("1", std::string(buf));
+  EXPECT_EQ(2, SafeSPrintf(buf, "%2X", 1));
+  EXPECT_EQ(" 1", std::string(buf));
+  EXPECT_EQ(2, SafeSPrintf(buf, "%02X", 1));
+  EXPECT_EQ("01", std::string(buf));
+  EXPECT_EQ(9, SafeSPrintf(buf, "%9X", -1));
+  EXPECT_EQ(" FFFFFFFF", std::string(buf));
+  EXPECT_EQ(9, SafeSPrintf(buf, "%09X", -1));
+  EXPECT_EQ("0FFFFFFFF", std::string(buf));
+  EXPECT_EQ(17, SafeSPrintf(buf, "%17X", -1LL));
+  EXPECT_EQ(" FFFFFFFFFFFFFFFF", std::string(buf));
+  EXPECT_EQ(17, SafeSPrintf(buf, "%017X", -1LL));
+  EXPECT_EQ("0FFFFFFFFFFFFFFFF", std::string(buf));
+  EXPECT_EQ(3, SafeSPrintf(buf, "%2X", 0x111));
+  EXPECT_EQ("111", std::string(buf));
+  EXPECT_EQ(4, SafeSPrintf(buf, "%-2X", 1));
+  EXPECT_EQ("%-2X", std::string(buf));
+  SafeSPrintf(fmt, "%%%dX", std::numeric_limits<ssize_t>::max()-1);
+  EXPECT_EQ(std::numeric_limits<ssize_t>::max()-1,
+            SafeSNPrintf(buf, 4, fmt, 1));
+  EXPECT_EQ("   ", std::string(buf));
+  SafeSPrintf(fmt, "%%0%dX", std::numeric_limits<ssize_t>::max()-1);
+  EXPECT_EQ(std::numeric_limits<ssize_t>::max()-1,
+            SafeSNPrintf(buf, 4, fmt, 1));
+  EXPECT_EQ("000", std::string(buf));
+  SafeSPrintf(fmt, "%%%dX",
+              static_cast<size_t>(std::numeric_limits<ssize_t>::max()));
+#if defined(NDEBUG)
+  EXPECT_EQ(2, SafeSPrintf(buf, fmt, 1));
+  EXPECT_EQ("%X", std::string(buf));
+#elif defined(ALLOW_DEATH_TEST)
+  EXPECT_DEATH(SafeSPrintf(buf, fmt, 1), "padding <= max_padding");
+#endif
+
+  // Pointer %p
+  EXPECT_EQ(3, SafeSPrintf(buf, "%p", (void*)1));
+  EXPECT_EQ("0x1", std::string(buf));
+  EXPECT_EQ(4, SafeSPrintf(buf, "%4p", (void*)1));
+  EXPECT_EQ(" 0x1", std::string(buf));
+  EXPECT_EQ(4, SafeSPrintf(buf, "%04p", (void*)1));
+  EXPECT_EQ("0x01", std::string(buf));
+  EXPECT_EQ(5, SafeSPrintf(buf, "%4p", (void*)0x111));
+  EXPECT_EQ("0x111", std::string(buf));
+  EXPECT_EQ(4, SafeSPrintf(buf, "%-2p", (void*)1));
+  EXPECT_EQ("%-2p", std::string(buf));
+  SafeSPrintf(fmt, "%%%dp", std::numeric_limits<ssize_t>::max()-1);
+  EXPECT_EQ(std::numeric_limits<ssize_t>::max()-1,
+            SafeSNPrintf(buf, 4, fmt, (void*)1));
+  EXPECT_EQ("   ", std::string(buf));
+  SafeSPrintf(fmt, "%%0%dp", std::numeric_limits<ssize_t>::max()-1);
+  EXPECT_EQ(std::numeric_limits<ssize_t>::max()-1,
+            SafeSNPrintf(buf, 4, fmt, (void*)1));
+  EXPECT_EQ("0x0", std::string(buf));
+  SafeSPrintf(fmt, "%%%dp",
+              static_cast<size_t>(std::numeric_limits<ssize_t>::max()));
+#if defined(NDEBUG)
+  EXPECT_EQ(2, SafeSPrintf(buf, fmt, 1));
+  EXPECT_EQ("%p", std::string(buf));
+#elif defined(ALLOW_DEATH_TEST)
+  EXPECT_DEATH(SafeSPrintf(buf, fmt, 1), "padding <= max_padding");
+#endif
+
+  // String
+  EXPECT_EQ(1, SafeSPrintf(buf, "%s", "A"));
+  EXPECT_EQ("A", std::string(buf));
+  EXPECT_EQ(2, SafeSPrintf(buf, "%2s", "A"));
+  EXPECT_EQ(" A", std::string(buf));
+  EXPECT_EQ(2, SafeSPrintf(buf, "%02s", "A"));
+  EXPECT_EQ(" A", std::string(buf));
+  EXPECT_EQ(3, SafeSPrintf(buf, "%2s", "AAA"));
+  EXPECT_EQ("AAA", std::string(buf));
+  EXPECT_EQ(4, SafeSPrintf(buf, "%-2s", "A"));
+  EXPECT_EQ("%-2s", std::string(buf));
+  SafeSPrintf(fmt, "%%%ds", std::numeric_limits<ssize_t>::max()-1);
+  EXPECT_EQ(std::numeric_limits<ssize_t>::max()-1,
+            SafeSNPrintf(buf, 4, fmt, "A"));
+  EXPECT_EQ("   ", std::string(buf));
+  SafeSPrintf(fmt, "%%0%ds", std::numeric_limits<ssize_t>::max()-1);
+  EXPECT_EQ(std::numeric_limits<ssize_t>::max()-1,
+            SafeSNPrintf(buf, 4, fmt, "A"));
+  EXPECT_EQ("   ", std::string(buf));
+  SafeSPrintf(fmt, "%%%ds",
+              static_cast<size_t>(std::numeric_limits<ssize_t>::max()));
+#if defined(NDEBUG)
+  EXPECT_EQ(2, SafeSPrintf(buf, fmt, "A"));
+  EXPECT_EQ("%s", std::string(buf));
+#elif defined(ALLOW_DEATH_TEST)
+  EXPECT_DEATH(SafeSPrintf(buf, fmt, "A"), "padding <= max_padding");
+#endif
+}
+
+TEST(SafeSPrintfTest, EmbeddedNul) {
+  char buf[] = { 'X', 'X', 'X', 'X' };
+  EXPECT_EQ(2, SafeSPrintf(buf, "%3c", 0));
+  EXPECT_EQ(' ', buf[0]);
+  EXPECT_EQ(' ', buf[1]);
+  EXPECT_EQ(0,   buf[2]);
+  EXPECT_EQ('X', buf[3]);
+
+  // Check handling of a NUL format character. N.B. this takes two different
+  // code paths depending on whether we are actually passing arguments. If
+  // we don't have any arguments, we are running in the fast-path code, that
+  // looks (almost) like a strncpy().
+#if defined(NDEBUG)
+  EXPECT_EQ(2, SafeSPrintf(buf, "%%%"));
+  EXPECT_EQ("%%", std::string(buf));
+  EXPECT_EQ(2, SafeSPrintf(buf, "%%%", 0));
+  EXPECT_EQ("%%", std::string(buf));
+#elif defined(ALLOW_DEATH_TEST)
+  EXPECT_DEATH(SafeSPrintf(buf, "%%%"), "src.1. == '%'");
+  EXPECT_DEATH(SafeSPrintf(buf, "%%%", 0), "ch");
+#endif
+}
+
+TEST(SafeSPrintfTest, EmitNULL) {
+  char buf[40];
+#if defined(__GNUC__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wconversion-null"
+#endif
+  EXPECT_EQ(1, SafeSPrintf(buf, "%d", NULL));
+  EXPECT_EQ("0", std::string(buf));
+  EXPECT_EQ(3, SafeSPrintf(buf, "%p", NULL));
+  EXPECT_EQ("0x0", std::string(buf));
+  EXPECT_EQ(6, SafeSPrintf(buf, "%s", NULL));
+  EXPECT_EQ("<NULL>", std::string(buf));
+#if defined(__GCC__)
+#pragma GCC diagnostic pop
+#endif
+}
+
+TEST(SafeSPrintfTest, PointerSize) {
+  // The internal data representation is a 64bit value, independent of the
+  // native word size. We want to perform sign-extension for signed integers,
+  // but we want to avoid doing so for pointer types. This could be a
+  // problem on systems, where pointers are only 32bit. This tests verifies
+  // that there is no such problem.
+  char *str = reinterpret_cast<char *>(0x80000000u);
+  void *ptr = str;
+  char buf[40];
+  EXPECT_EQ(10, SafeSPrintf(buf, "%p", str));
+  EXPECT_EQ("0x80000000", std::string(buf));
+  EXPECT_EQ(10, SafeSPrintf(buf, "%p", ptr));
+  EXPECT_EQ("0x80000000", std::string(buf));
+}
+
+}  // namespace strings
+}  // namespace base
diff --git a/base/strings/strcat.cc b/base/strings/strcat.cc
new file mode 100644
index 0000000..3d5b2ca
--- /dev/null
+++ b/base/strings/strcat.cc
@@ -0,0 +1,81 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/strings/strcat.h"
+
+namespace base {
+
+namespace {
+
+// Reserves an additional amount of size in the given string, growing by at
+// least 2x. Used by StrAppend().
+//
+// The "at least 2x" growing rule duplicates the exponential growth of
+// std::string. The problem is that most implementations of reserve() will grow
+// exactly to the requested amount instead of exponentially growing like would
+// happen when appending normally. If we didn't do this, an append after the
+// call to StrAppend() would definitely cause a reallocation, and loops with
+// StrAppend() calls would have O(n^2) complexity to execute. Instead, we want
+// StrAppend() to have the same semantics as std::string::append().
+//
+// If the string is empty, we assume that exponential growth is not necessary.
+template <typename String>
+void ReserveAdditional(String* str, typename String::size_type additional) {
+  str->reserve(std::max(str->size() + additional, str->size() * 2));
+}
+
+template <typename DestString, typename InputString>
+void StrAppendT(DestString* dest, span<const InputString> pieces) {
+  size_t additional_size = 0;
+  for (const auto& cur : pieces)
+    additional_size += cur.size();
+  ReserveAdditional(dest, additional_size);
+
+  for (const auto& cur : pieces)
+    dest->append(cur.data(), cur.size());
+}
+
+}  // namespace
+
+std::string StrCat(span<const StringPiece> pieces) {
+  std::string result;
+  StrAppendT(&result, pieces);
+  return result;
+}
+
+string16 StrCat(span<const StringPiece16> pieces) {
+  string16 result;
+  StrAppendT(&result, pieces);
+  return result;
+}
+
+std::string StrCat(span<const std::string> pieces) {
+  std::string result;
+  StrAppendT(&result, pieces);
+  return result;
+}
+
+string16 StrCat(span<const string16> pieces) {
+  string16 result;
+  StrAppendT(&result, pieces);
+  return result;
+}
+
+void StrAppend(std::string* dest, span<const StringPiece> pieces) {
+  StrAppendT(dest, pieces);
+}
+
+void StrAppend(string16* dest, span<const StringPiece16> pieces) {
+  StrAppendT(dest, pieces);
+}
+
+void StrAppend(std::string* dest, span<const std::string> pieces) {
+  StrAppendT(dest, pieces);
+}
+
+void StrAppend(string16* dest, span<const string16> pieces) {
+  StrAppendT(dest, pieces);
+}
+
+}  // namespace base
diff --git a/base/strings/strcat.h b/base/strings/strcat.h
new file mode 100644
index 0000000..44c6211
--- /dev/null
+++ b/base/strings/strcat.h
@@ -0,0 +1,99 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_STRINGS_STRCAT_H_
+#define BASE_STRINGS_STRCAT_H_
+
+#include <initializer_list>
+
+#include "base/base_export.h"
+#include "base/compiler_specific.h"
+#include "base/containers/span.h"
+#include "base/strings/string_piece.h"
+#include "build/build_config.h"
+
+#if defined(OS_WIN)
+// To resolve a conflict with Win32 API StrCat macro.
+#include "base/win/windows_types.h"
+#endif
+
+namespace base {
+
+// StrCat ----------------------------------------------------------------------
+//
+// StrCat is a function to perform concatenation on a sequence of strings.
+// It is preferrable to a sequence of "a + b + c" because it is both faster and
+// generates less code.
+//
+//   std::string result = base::StrCat({"foo ", result, "\nfoo ", bar});
+//
+// To join an array of strings with a separator, see base::JoinString in
+// base/strings/string_util.h.
+//
+// MORE INFO
+//
+// StrCat can see all arguments at once, so it can allocate one return buffer
+// of exactly the right size and copy once, as opposed to a sequence of
+// operator+ which generates a series of temporary strings, copying as it goes.
+// And by using StringPiece arguments, StrCat can avoid creating temporary
+// string objects for char* constants.
+//
+// ALTERNATIVES
+//
+// Internal Google / Abseil has a similar StrCat function. That version takes
+// an overloaded number of arguments instead of initializer list (overflowing
+// to initializer list for many arguments). We don't have any legacy
+// requirements and using only initializer_list is simpler and generates
+// roughly the same amount of code at the call sites.
+//
+// Abseil's StrCat also allows numbers by using an intermediate class that can
+// be implicitly constructed from either a string or various number types. This
+// class formats the numbers into a static buffer for increased performance,
+// and the call sites look nice.
+//
+// As-written Abseil's helper class for numbers generates slightly more code
+// than the raw StringPiece version. We can de-inline the helper class'
+// constructors which will cause the StringPiece constructors to be de-inlined
+// for this call and generate slightly less code. This is something we can
+// explore more in the future.
+
+BASE_EXPORT std::string StrCat(span<const StringPiece> pieces);
+BASE_EXPORT string16 StrCat(span<const StringPiece16> pieces);
+BASE_EXPORT std::string StrCat(span<const std::string> pieces);
+BASE_EXPORT string16 StrCat(span<const string16> pieces);
+
+// Initializer list forwards to the array version.
+inline std::string StrCat(std::initializer_list<StringPiece> pieces) {
+  return StrCat(make_span(pieces.begin(), pieces.size()));
+}
+inline string16 StrCat(std::initializer_list<StringPiece16> pieces) {
+  return StrCat(make_span(pieces.begin(), pieces.size()));
+}
+
+// StrAppend -------------------------------------------------------------------
+//
+// Appends a sequence of strings to a destination. Prefer:
+//   StrAppend(&foo, ...);
+// over:
+//   foo += StrCat(...);
+// because it avoids a temporary string allocation and copy.
+
+BASE_EXPORT void StrAppend(std::string* dest, span<const StringPiece> pieces);
+BASE_EXPORT void StrAppend(string16* dest, span<const StringPiece16> pieces);
+BASE_EXPORT void StrAppend(std::string* dest, span<const std::string> pieces);
+BASE_EXPORT void StrAppend(string16* dest, span<const string16> pieces);
+
+// Initializer list forwards to the array version.
+inline void StrAppend(std::string* dest,
+                      std::initializer_list<StringPiece> pieces) {
+  return StrAppend(dest, make_span(pieces.begin(), pieces.size()));
+}
+inline void StrAppend(string16* dest,
+                      std::initializer_list<StringPiece16> pieces) {
+  return StrAppend(dest, make_span(pieces.begin(), pieces.size()));
+}
+
+}  // namespace base
+
+#endif  // BASE_STRINGS_STRCAT_H_
diff --git a/base/strings/strcat_unittest.cc b/base/strings/strcat_unittest.cc
new file mode 100644
index 0000000..cf2db51
--- /dev/null
+++ b/base/strings/strcat_unittest.cc
@@ -0,0 +1,67 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/strings/strcat.h"
+#include "base/strings/utf_string_conversions.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+
+TEST(StrCat, 8Bit) {
+  EXPECT_EQ("", StrCat({""}));
+  EXPECT_EQ("1", StrCat({"1"}));
+  EXPECT_EQ("122", StrCat({"1", "22"}));
+  EXPECT_EQ("122333", StrCat({"1", "22", "333"}));
+  EXPECT_EQ("1223334444", StrCat({"1", "22", "333", "4444"}));
+  EXPECT_EQ("122333444455555", StrCat({"1", "22", "333", "4444", "55555"}));
+}
+
+TEST(StrCat, 16Bit) {
+  string16 arg1 = ASCIIToUTF16("1");
+  string16 arg2 = ASCIIToUTF16("22");
+  string16 arg3 = ASCIIToUTF16("333");
+
+  EXPECT_EQ(ASCIIToUTF16(""), StrCat({string16()}));
+  EXPECT_EQ(ASCIIToUTF16("1"), StrCat({arg1}));
+  EXPECT_EQ(ASCIIToUTF16("122"), StrCat({arg1, arg2}));
+  EXPECT_EQ(ASCIIToUTF16("122333"), StrCat({arg1, arg2, arg3}));
+}
+
+TEST(StrAppend, 8Bit) {
+  std::string result;
+
+  result = "foo";
+  StrAppend(&result, {std::string()});
+  EXPECT_EQ("foo", result);
+
+  result = "foo";
+  StrAppend(&result, {"1"});
+  EXPECT_EQ("foo1", result);
+
+  result = "foo";
+  StrAppend(&result, {"1", "22", "333"});
+  EXPECT_EQ("foo122333", result);
+}
+
+TEST(StrAppend, 16Bit) {
+  string16 arg1 = ASCIIToUTF16("1");
+  string16 arg2 = ASCIIToUTF16("22");
+  string16 arg3 = ASCIIToUTF16("333");
+
+  string16 result;
+
+  result = ASCIIToUTF16("foo");
+  StrAppend(&result, {string16()});
+  EXPECT_EQ(ASCIIToUTF16("foo"), result);
+
+  result = ASCIIToUTF16("foo");
+  StrAppend(&result, {arg1});
+  EXPECT_EQ(ASCIIToUTF16("foo1"), result);
+
+  result = ASCIIToUTF16("foo");
+  StrAppend(&result, {arg1, arg2, arg3});
+  EXPECT_EQ(ASCIIToUTF16("foo122333"), result);
+}
+
+}  // namespace base
diff --git a/base/strings/string16.cc b/base/strings/string16.cc
new file mode 100644
index 0000000..2abb0e5
--- /dev/null
+++ b/base/strings/string16.cc
@@ -0,0 +1,87 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/strings/string16.h"
+
+#if defined(WCHAR_T_IS_UTF16) && !defined(_AIX)
+
+#error This file should not be used on 2-byte wchar_t systems
+// If this winds up being needed on 2-byte wchar_t systems, either the
+// definitions below can be used, or the host system's wide character
+// functions like wmemcmp can be wrapped.
+
+#elif defined(WCHAR_T_IS_UTF32)
+
+#include <ostream>
+
+#include "base/strings/utf_string_conversions.h"
+
+namespace base {
+
+int c16memcmp(const char16* s1, const char16* s2, size_t n) {
+  // We cannot call memcmp because that changes the semantics.
+  while (n-- > 0) {
+    if (*s1 != *s2) {
+      // We cannot use (*s1 - *s2) because char16 is unsigned.
+      return ((*s1 < *s2) ? -1 : 1);
+    }
+    ++s1;
+    ++s2;
+  }
+  return 0;
+}
+
+size_t c16len(const char16* s) {
+  const char16 *s_orig = s;
+  while (*s) {
+    ++s;
+  }
+  return s - s_orig;
+}
+
+const char16* c16memchr(const char16* s, char16 c, size_t n) {
+  while (n-- > 0) {
+    if (*s == c) {
+      return s;
+    }
+    ++s;
+  }
+  return nullptr;
+}
+
+char16* c16memmove(char16* s1, const char16* s2, size_t n) {
+  return static_cast<char16*>(memmove(s1, s2, n * sizeof(char16)));
+}
+
+char16* c16memcpy(char16* s1, const char16* s2, size_t n) {
+  return static_cast<char16*>(memcpy(s1, s2, n * sizeof(char16)));
+}
+
+char16* c16memset(char16* s, char16 c, size_t n) {
+  char16 *s_orig = s;
+  while (n-- > 0) {
+    *s = c;
+    ++s;
+  }
+  return s_orig;
+}
+
+namespace string16_internals {
+
+std::ostream& operator<<(std::ostream& out, const string16& str) {
+  return out << UTF16ToUTF8(str);
+}
+
+void PrintTo(const string16& str, std::ostream* out) {
+  *out << str;
+}
+
+}  // namespace string16_internals
+
+}  // namespace base
+
+template class std::
+    basic_string<base::char16, base::string16_internals::string16_char_traits>;
+
+#endif  // WCHAR_T_IS_UTF32
diff --git a/base/strings/string16.h b/base/strings/string16.h
new file mode 100644
index 0000000..a86baa2
--- /dev/null
+++ b/base/strings/string16.h
@@ -0,0 +1,219 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_STRINGS_STRING16_H_
+#define BASE_STRINGS_STRING16_H_
+
+// WHAT:
+// A version of std::basic_string that provides 2-byte characters even when
+// wchar_t is not implemented as a 2-byte type. You can access this class as
+// string16. We also define char16, which string16 is based upon.
+//
+// WHY:
+// On Windows, wchar_t is 2 bytes, and it can conveniently handle UTF-16/UCS-2
+// data. Plenty of existing code operates on strings encoded as UTF-16.
+//
+// On many other platforms, sizeof(wchar_t) is 4 bytes by default. We can make
+// it 2 bytes by using the GCC flag -fshort-wchar. But then std::wstring fails
+// at run time, because it calls some functions (like wcslen) that come from
+// the system's native C library -- which was built with a 4-byte wchar_t!
+// It's wasteful to use 4-byte wchar_t strings to carry UTF-16 data, and it's
+// entirely improper on those systems where the encoding of wchar_t is defined
+// as UTF-32.
+//
+// Here, we define string16, which is similar to std::wstring but replaces all
+// libc functions with custom, 2-byte-char compatible routines. It is capable
+// of carrying UTF-16-encoded data.
+
+#include <stddef.h>
+#include <stdint.h>
+#include <stdio.h>
+
+#include <functional>
+#include <string>
+
+#include "base/base_export.h"
+#include "build/build_config.h"
+
+#if defined(WCHAR_T_IS_UTF16)
+
+namespace base {
+
+typedef wchar_t char16;
+typedef std::wstring string16;
+
+}  // namespace base
+
+#elif defined(WCHAR_T_IS_UTF32)
+
+#include <wchar.h>  // for mbstate_t
+
+namespace base {
+
+typedef uint16_t char16;
+
+// char16 versions of the functions required by string16_char_traits; these
+// are based on the wide character functions of similar names ("w" or "wcs"
+// instead of "c16").
+BASE_EXPORT int c16memcmp(const char16* s1, const char16* s2, size_t n);
+BASE_EXPORT size_t c16len(const char16* s);
+BASE_EXPORT const char16* c16memchr(const char16* s, char16 c, size_t n);
+BASE_EXPORT char16* c16memmove(char16* s1, const char16* s2, size_t n);
+BASE_EXPORT char16* c16memcpy(char16* s1, const char16* s2, size_t n);
+BASE_EXPORT char16* c16memset(char16* s, char16 c, size_t n);
+
+// This namespace contains the implementation of base::string16 along with
+// things that need to be found via argument-dependent lookup from a
+// base::string16.
+namespace string16_internals {
+
+struct string16_char_traits {
+  typedef char16 char_type;
+  typedef int int_type;
+
+  // int_type needs to be able to hold each possible value of char_type, and in
+  // addition, the distinct value of eof().
+  static_assert(sizeof(int_type) > sizeof(char_type),
+                "int must be larger than 16 bits wide");
+
+  typedef std::streamoff off_type;
+  typedef mbstate_t state_type;
+  typedef std::fpos<state_type> pos_type;
+
+  static void assign(char_type& c1, const char_type& c2) {
+    c1 = c2;
+  }
+
+  static bool eq(const char_type& c1, const char_type& c2) {
+    return c1 == c2;
+  }
+  static bool lt(const char_type& c1, const char_type& c2) {
+    return c1 < c2;
+  }
+
+  static int compare(const char_type* s1, const char_type* s2, size_t n) {
+    return c16memcmp(s1, s2, n);
+  }
+
+  static size_t length(const char_type* s) {
+    return c16len(s);
+  }
+
+  static const char_type* find(const char_type* s, size_t n,
+                               const char_type& a) {
+    return c16memchr(s, a, n);
+  }
+
+  static char_type* move(char_type* s1, const char_type* s2, size_t n) {
+    return c16memmove(s1, s2, n);
+  }
+
+  static char_type* copy(char_type* s1, const char_type* s2, size_t n) {
+    return c16memcpy(s1, s2, n);
+  }
+
+  static char_type* assign(char_type* s, size_t n, char_type a) {
+    return c16memset(s, a, n);
+  }
+
+  static int_type not_eof(const int_type& c) {
+    return eq_int_type(c, eof()) ? 0 : c;
+  }
+
+  static char_type to_char_type(const int_type& c) {
+    return char_type(c);
+  }
+
+  static int_type to_int_type(const char_type& c) {
+    return int_type(c);
+  }
+
+  static bool eq_int_type(const int_type& c1, const int_type& c2) {
+    return c1 == c2;
+  }
+
+  static int_type eof() {
+    return static_cast<int_type>(EOF);
+  }
+};
+
+}  // namespace string16_internals
+
+typedef std::basic_string<char16,
+                          base::string16_internals::string16_char_traits>
+    string16;
+
+namespace string16_internals {
+
+BASE_EXPORT extern std::ostream& operator<<(std::ostream& out,
+                                            const string16& str);
+
+// This is required by googletest to print a readable output on test failures.
+BASE_EXPORT extern void PrintTo(const string16& str, std::ostream* out);
+
+}  // namespace string16_internals
+
+}  // namespace base
+
+// The string class will be explicitly instantiated only once, in string16.cc.
+//
+// std::basic_string<> in GNU libstdc++ contains a static data member,
+// _S_empty_rep_storage, to represent empty strings.  When an operation such
+// as assignment or destruction is performed on a string, causing its existing
+// data member to be invalidated, it must not be freed if this static data
+// member is being used.  Otherwise, it counts as an attempt to free static
+// (and not allocated) data, which is a memory error.
+//
+// Generally, due to C++ template magic, _S_empty_rep_storage will be marked
+// as a coalesced symbol, meaning that the linker will combine multiple
+// instances into a single one when generating output.
+//
+// If a string class is used by multiple shared libraries, a problem occurs.
+// Each library will get its own copy of _S_empty_rep_storage.  When strings
+// are passed across a library boundary for alteration or destruction, memory
+// errors will result.  GNU libstdc++ contains a configuration option,
+// --enable-fully-dynamic-string (_GLIBCXX_FULLY_DYNAMIC_STRING), which
+// disables the static data member optimization, but it's a good optimization
+// and non-STL code is generally at the mercy of the system's STL
+// configuration.  Fully-dynamic strings are not the default for GNU libstdc++
+// libstdc++ itself or for the libstdc++ installations on the systems we care
+// about, such as Mac OS X and relevant flavors of Linux.
+//
+// See also http://gcc.gnu.org/bugzilla/show_bug.cgi?id=24196 .
+//
+// To avoid problems, string classes need to be explicitly instantiated only
+// once, in exactly one library.  All other string users see it via an "extern"
+// declaration.  This is precisely how GNU libstdc++ handles
+// std::basic_string<char> (string) and std::basic_string<wchar_t> (wstring).
+//
+// This also works around a Mac OS X linker bug in ld64-85.2.1 (Xcode 3.1.2),
+// in which the linker does not fully coalesce symbols when dead code
+// stripping is enabled.  This bug causes the memory errors described above
+// to occur even when a std::basic_string<> does not cross shared library
+// boundaries, such as in statically-linked executables.
+//
+// TODO(mark): File this bug with Apple and update this note with a bug number.
+
+extern template class BASE_EXPORT
+    std::basic_string<base::char16,
+                      base::string16_internals::string16_char_traits>;
+
+// Specialize std::hash for base::string16. Although the style guide forbids
+// this in general, it is necessary for consistency with WCHAR_T_IS_UTF16
+// platforms, where base::string16 is a type alias for std::wstring.
+namespace std {
+template <>
+struct hash<base::string16> {
+  std::size_t operator()(const base::string16& s) const {
+    std::size_t result = 0;
+    for (base::char16 c : s)
+      result = (result * 131) + c;
+    return result;
+  }
+};
+}  // namespace std
+
+#endif  // WCHAR_T_IS_UTF32
+
+#endif  // BASE_STRINGS_STRING16_H_
diff --git a/base/strings/string16_unittest.cc b/base/strings/string16_unittest.cc
new file mode 100644
index 0000000..0d2ca80
--- /dev/null
+++ b/base/strings/string16_unittest.cc
@@ -0,0 +1,66 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <sstream>
+#include <unordered_set>
+
+#include "base/strings/string16.h"
+
+#include "base/strings/utf_string_conversions.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+
+// We define a custom operator<< for string16 so we can use it with logging.
+// This tests that conversion.
+TEST(String16Test, OutputStream) {
+  // Basic stream test.
+  {
+    std::ostringstream stream;
+    stream << "Empty '" << string16() << "' standard '"
+           << string16(ASCIIToUTF16("Hello, world")) << "'";
+    EXPECT_STREQ("Empty '' standard 'Hello, world'",
+                 stream.str().c_str());
+  }
+
+  // Interesting edge cases.
+  {
+    // These should each get converted to the invalid character: EF BF BD.
+    string16 initial_surrogate;
+    initial_surrogate.push_back(0xd800);
+    string16 final_surrogate;
+    final_surrogate.push_back(0xdc00);
+
+    // Old italic A = U+10300, will get converted to: F0 90 8C 80 'z'.
+    string16 surrogate_pair;
+    surrogate_pair.push_back(0xd800);
+    surrogate_pair.push_back(0xdf00);
+    surrogate_pair.push_back('z');
+
+    // Will get converted to the invalid char + 's': EF BF BD 's'.
+    string16 unterminated_surrogate;
+    unterminated_surrogate.push_back(0xd800);
+    unterminated_surrogate.push_back('s');
+
+    std::ostringstream stream;
+    stream << initial_surrogate << "," << final_surrogate << ","
+           << surrogate_pair << "," << unterminated_surrogate;
+
+    EXPECT_STREQ("\xef\xbf\xbd,\xef\xbf\xbd,\xf0\x90\x8c\x80z,\xef\xbf\xbds",
+                 stream.str().c_str());
+  }
+}
+
+TEST(String16Test, Hash) {
+  string16 str1 = ASCIIToUTF16("hello");
+  string16 str2 = ASCIIToUTF16("world");
+
+  std::unordered_set<string16> set;
+
+  set.insert(str1);
+  EXPECT_EQ(1u, set.count(str1));
+  EXPECT_EQ(0u, set.count(str2));
+}
+
+}  // namespace base
diff --git a/base/strings/string16_unittest.nc b/base/strings/string16_unittest.nc
new file mode 100644
index 0000000..5186a45
--- /dev/null
+++ b/base/strings/string16_unittest.nc
@@ -0,0 +1,25 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This is a "No Compile Test".
+// http://dev.chromium.org/developers/testing/no-compile-tests
+
+#include "base/strings/string16.h"
+
+#if defined(NCTEST_NO_KOENIG_LOOKUP_FOR_STRING16)  // [r"use of undeclared identifier 'ShouldNotBeFound'"]
+
+// base::string16 is declared as a typedef. It should not cause other functions
+// in base to be found via Argument-dependent lookup.
+
+namespace base {
+void ShouldNotBeFound(const base::string16& arg) {}
+}
+
+// Intentionally not in base:: namespace.
+void WontCompile() {
+  base::string16 s;
+  ShouldNotBeFound(s);
+}
+
+#endif
diff --git a/base/strings/string_number_conversions.cc b/base/strings/string_number_conversions.cc
new file mode 100644
index 0000000..86fa2e3
--- /dev/null
+++ b/base/strings/string_number_conversions.cc
@@ -0,0 +1,501 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/strings/string_number_conversions.h"
+
+#include <ctype.h>
+#include <errno.h>
+#include <stdlib.h>
+#include <wctype.h>
+
+#include <limits>
+#include <type_traits>
+
+#include "base/logging.h"
+#include "base/numerics/safe_math.h"
+#include "base/scoped_clear_errno.h"
+#include "base/strings/utf_string_conversions.h"
+#include "base/third_party/dmg_fp/dmg_fp.h"
+
+namespace base {
+
+namespace {
+
+template <typename STR, typename INT>
+struct IntToStringT {
+  static STR IntToString(INT value) {
+    // log10(2) ~= 0.3 bytes needed per bit or per byte log10(2**8) ~= 2.4.
+    // So round up to allocate 3 output characters per byte, plus 1 for '-'.
+    const size_t kOutputBufSize =
+        3 * sizeof(INT) + std::numeric_limits<INT>::is_signed;
+
+    // Create the string in a temporary buffer, write it back to front, and
+    // then return the substr of what we ended up using.
+    using CHR = typename STR::value_type;
+    CHR outbuf[kOutputBufSize];
+
+    // The ValueOrDie call below can never fail, because UnsignedAbs is valid
+    // for all valid inputs.
+    typename std::make_unsigned<INT>::type res =
+        CheckedNumeric<INT>(value).UnsignedAbs().ValueOrDie();
+
+    CHR* end = outbuf + kOutputBufSize;
+    CHR* i = end;
+    do {
+      --i;
+      DCHECK(i != outbuf);
+      *i = static_cast<CHR>((res % 10) + '0');
+      res /= 10;
+    } while (res != 0);
+    if (IsValueNegative(value)) {
+      --i;
+      DCHECK(i != outbuf);
+      *i = static_cast<CHR>('-');
+    }
+    return STR(i, end);
+  }
+};
+
+// Utility to convert a character to a digit in a given base
+template<typename CHAR, int BASE, bool BASE_LTE_10> class BaseCharToDigit {
+};
+
+// Faster specialization for bases <= 10
+template<typename CHAR, int BASE> class BaseCharToDigit<CHAR, BASE, true> {
+ public:
+  static bool Convert(CHAR c, uint8_t* digit) {
+    if (c >= '0' && c < '0' + BASE) {
+      *digit = static_cast<uint8_t>(c - '0');
+      return true;
+    }
+    return false;
+  }
+};
+
+// Specialization for bases where 10 < base <= 36
+template<typename CHAR, int BASE> class BaseCharToDigit<CHAR, BASE, false> {
+ public:
+  static bool Convert(CHAR c, uint8_t* digit) {
+    if (c >= '0' && c <= '9') {
+      *digit = c - '0';
+    } else if (c >= 'a' && c < 'a' + BASE - 10) {
+      *digit = c - 'a' + 10;
+    } else if (c >= 'A' && c < 'A' + BASE - 10) {
+      *digit = c - 'A' + 10;
+    } else {
+      return false;
+    }
+    return true;
+  }
+};
+
+template <int BASE, typename CHAR>
+bool CharToDigit(CHAR c, uint8_t* digit) {
+  return BaseCharToDigit<CHAR, BASE, BASE <= 10>::Convert(c, digit);
+}
+
+// There is an IsUnicodeWhitespace for wchars defined in string_util.h, but it
+// is locale independent, whereas the functions we are replacing were
+// locale-dependent. TBD what is desired, but for the moment let's not
+// introduce a change in behaviour.
+template<typename CHAR> class WhitespaceHelper {
+};
+
+template<> class WhitespaceHelper<char> {
+ public:
+  static bool Invoke(char c) {
+    return 0 != isspace(static_cast<unsigned char>(c));
+  }
+};
+
+template<> class WhitespaceHelper<char16> {
+ public:
+  static bool Invoke(char16 c) {
+    return 0 != iswspace(c);
+  }
+};
+
+template<typename CHAR> bool LocalIsWhitespace(CHAR c) {
+  return WhitespaceHelper<CHAR>::Invoke(c);
+}
+
+// IteratorRangeToNumberTraits should provide:
+//  - a typedef for iterator_type, the iterator type used as input.
+//  - a typedef for value_type, the target numeric type.
+//  - static functions min, max (returning the minimum and maximum permitted
+//    values)
+//  - constant kBase, the base in which to interpret the input
+template<typename IteratorRangeToNumberTraits>
+class IteratorRangeToNumber {
+ public:
+  typedef IteratorRangeToNumberTraits traits;
+  typedef typename traits::iterator_type const_iterator;
+  typedef typename traits::value_type value_type;
+
+  // Generalized iterator-range-to-number conversion.
+  //
+  static bool Invoke(const_iterator begin,
+                     const_iterator end,
+                     value_type* output) {
+    bool valid = true;
+
+    while (begin != end && LocalIsWhitespace(*begin)) {
+      valid = false;
+      ++begin;
+    }
+
+    if (begin != end && *begin == '-') {
+      if (!std::numeric_limits<value_type>::is_signed) {
+        *output = 0;
+        valid = false;
+      } else if (!Negative::Invoke(begin + 1, end, output)) {
+        valid = false;
+      }
+    } else {
+      if (begin != end && *begin == '+') {
+        ++begin;
+      }
+      if (!Positive::Invoke(begin, end, output)) {
+        valid = false;
+      }
+    }
+
+    return valid;
+  }
+
+ private:
+  // Sign provides:
+  //  - a static function, CheckBounds, that determines whether the next digit
+  //    causes an overflow/underflow
+  //  - a static function, Increment, that appends the next digit appropriately
+  //    according to the sign of the number being parsed.
+  template<typename Sign>
+  class Base {
+   public:
+    static bool Invoke(const_iterator begin, const_iterator end,
+                       typename traits::value_type* output) {
+      *output = 0;
+
+      if (begin == end) {
+        return false;
+      }
+
+      // Note: no performance difference was found when using template
+      // specialization to remove this check in bases other than 16
+      if (traits::kBase == 16 && end - begin > 2 && *begin == '0' &&
+          (*(begin + 1) == 'x' || *(begin + 1) == 'X')) {
+        begin += 2;
+      }
+
+      for (const_iterator current = begin; current != end; ++current) {
+        uint8_t new_digit = 0;
+
+        if (!CharToDigit<traits::kBase>(*current, &new_digit)) {
+          return false;
+        }
+
+        if (current != begin) {
+          if (!Sign::CheckBounds(output, new_digit)) {
+            return false;
+          }
+          *output *= traits::kBase;
+        }
+
+        Sign::Increment(new_digit, output);
+      }
+      return true;
+    }
+  };
+
+  class Positive : public Base<Positive> {
+   public:
+    static bool CheckBounds(value_type* output, uint8_t new_digit) {
+      if (*output > static_cast<value_type>(traits::max() / traits::kBase) ||
+          (*output == static_cast<value_type>(traits::max() / traits::kBase) &&
+           new_digit > traits::max() % traits::kBase)) {
+        *output = traits::max();
+        return false;
+      }
+      return true;
+    }
+    static void Increment(uint8_t increment, value_type* output) {
+      *output += increment;
+    }
+  };
+
+  class Negative : public Base<Negative> {
+   public:
+    static bool CheckBounds(value_type* output, uint8_t new_digit) {
+      if (*output < traits::min() / traits::kBase ||
+          (*output == traits::min() / traits::kBase &&
+           new_digit > 0 - traits::min() % traits::kBase)) {
+        *output = traits::min();
+        return false;
+      }
+      return true;
+    }
+    static void Increment(uint8_t increment, value_type* output) {
+      *output -= increment;
+    }
+  };
+};
+
+template<typename ITERATOR, typename VALUE, int BASE>
+class BaseIteratorRangeToNumberTraits {
+ public:
+  typedef ITERATOR iterator_type;
+  typedef VALUE value_type;
+  static value_type min() {
+    return std::numeric_limits<value_type>::min();
+  }
+  static value_type max() {
+    return std::numeric_limits<value_type>::max();
+  }
+  static const int kBase = BASE;
+};
+
+template<typename ITERATOR>
+class BaseHexIteratorRangeToIntTraits
+    : public BaseIteratorRangeToNumberTraits<ITERATOR, int, 16> {
+};
+
+template <typename ITERATOR>
+class BaseHexIteratorRangeToUIntTraits
+    : public BaseIteratorRangeToNumberTraits<ITERATOR, uint32_t, 16> {};
+
+template <typename ITERATOR>
+class BaseHexIteratorRangeToInt64Traits
+    : public BaseIteratorRangeToNumberTraits<ITERATOR, int64_t, 16> {};
+
+template <typename ITERATOR>
+class BaseHexIteratorRangeToUInt64Traits
+    : public BaseIteratorRangeToNumberTraits<ITERATOR, uint64_t, 16> {};
+
+typedef BaseHexIteratorRangeToIntTraits<StringPiece::const_iterator>
+    HexIteratorRangeToIntTraits;
+
+typedef BaseHexIteratorRangeToUIntTraits<StringPiece::const_iterator>
+    HexIteratorRangeToUIntTraits;
+
+typedef BaseHexIteratorRangeToInt64Traits<StringPiece::const_iterator>
+    HexIteratorRangeToInt64Traits;
+
+typedef BaseHexIteratorRangeToUInt64Traits<StringPiece::const_iterator>
+    HexIteratorRangeToUInt64Traits;
+
+template <typename VALUE, int BASE>
+class StringPieceToNumberTraits
+    : public BaseIteratorRangeToNumberTraits<StringPiece::const_iterator,
+                                             VALUE,
+                                             BASE> {
+};
+
+template <typename VALUE>
+bool StringToIntImpl(StringPiece input, VALUE* output) {
+  return IteratorRangeToNumber<StringPieceToNumberTraits<VALUE, 10> >::Invoke(
+      input.begin(), input.end(), output);
+}
+
+template <typename VALUE, int BASE>
+class StringPiece16ToNumberTraits
+    : public BaseIteratorRangeToNumberTraits<StringPiece16::const_iterator,
+                                             VALUE,
+                                             BASE> {
+};
+
+template <typename VALUE>
+bool String16ToIntImpl(StringPiece16 input, VALUE* output) {
+  return IteratorRangeToNumber<StringPiece16ToNumberTraits<VALUE, 10> >::Invoke(
+      input.begin(), input.end(), output);
+}
+
+}  // namespace
+
+std::string NumberToString(int value) {
+  return IntToStringT<std::string, int>::IntToString(value);
+}
+
+string16 NumberToString16(int value) {
+  return IntToStringT<string16, int>::IntToString(value);
+}
+
+std::string NumberToString(unsigned value) {
+  return IntToStringT<std::string, unsigned>::IntToString(value);
+}
+
+string16 NumberToString16(unsigned value) {
+  return IntToStringT<string16, unsigned>::IntToString(value);
+}
+
+std::string NumberToString(long value) {
+  return IntToStringT<std::string, long>::IntToString(value);
+}
+
+string16 NumberToString16(long value) {
+  return IntToStringT<string16, long>::IntToString(value);
+}
+
+std::string NumberToString(unsigned long value) {
+  return IntToStringT<std::string, unsigned long>::IntToString(value);
+}
+
+string16 NumberToString16(unsigned long value) {
+  return IntToStringT<string16, unsigned long>::IntToString(value);
+}
+
+std::string NumberToString(long long value) {
+  return IntToStringT<std::string, long long>::IntToString(value);
+}
+
+string16 NumberToString16(long long value) {
+  return IntToStringT<string16, long long>::IntToString(value);
+}
+
+std::string NumberToString(unsigned long long value) {
+  return IntToStringT<std::string, unsigned long long>::IntToString(value);
+}
+
+string16 NumberToString16(unsigned long long value) {
+  return IntToStringT<string16, unsigned long long>::IntToString(value);
+}
+
+std::string NumberToString(double value) {
+  // According to g_fmt.cc, it is sufficient to declare a buffer of size 32.
+  char buffer[32];
+  dmg_fp::g_fmt(buffer, value);
+  return std::string(buffer);
+}
+
+base::string16 NumberToString16(double value) {
+  // According to g_fmt.cc, it is sufficient to declare a buffer of size 32.
+  char buffer[32];
+  dmg_fp::g_fmt(buffer, value);
+
+  // The number will be ASCII. This creates the string using the "input
+  // iterator" variant which promotes from 8-bit to 16-bit via "=".
+  return base::string16(&buffer[0], &buffer[strlen(buffer)]);
+}
+
+bool StringToInt(StringPiece input, int* output) {
+  return StringToIntImpl(input, output);
+}
+
+bool StringToInt(StringPiece16 input, int* output) {
+  return String16ToIntImpl(input, output);
+}
+
+bool StringToUint(StringPiece input, unsigned* output) {
+  return StringToIntImpl(input, output);
+}
+
+bool StringToUint(StringPiece16 input, unsigned* output) {
+  return String16ToIntImpl(input, output);
+}
+
+bool StringToInt64(StringPiece input, int64_t* output) {
+  return StringToIntImpl(input, output);
+}
+
+bool StringToInt64(StringPiece16 input, int64_t* output) {
+  return String16ToIntImpl(input, output);
+}
+
+bool StringToUint64(StringPiece input, uint64_t* output) {
+  return StringToIntImpl(input, output);
+}
+
+bool StringToUint64(StringPiece16 input, uint64_t* output) {
+  return String16ToIntImpl(input, output);
+}
+
+bool StringToSizeT(StringPiece input, size_t* output) {
+  return StringToIntImpl(input, output);
+}
+
+bool StringToSizeT(StringPiece16 input, size_t* output) {
+  return String16ToIntImpl(input, output);
+}
+
+bool StringToDouble(const std::string& input, double* output) {
+  // Thread-safe?  It is on at least Mac, Linux, and Windows.
+  ScopedClearErrno clear_errno;
+
+  char* endptr = nullptr;
+  *output = dmg_fp::strtod(input.c_str(), &endptr);
+
+  // Cases to return false:
+  //  - If errno is ERANGE, there was an overflow or underflow.
+  //  - If the input string is empty, there was nothing to parse.
+  //  - If endptr does not point to the end of the string, there are either
+  //    characters remaining in the string after a parsed number, or the string
+  //    does not begin with a parseable number.  endptr is compared to the
+  //    expected end given the string's stated length to correctly catch cases
+  //    where the string contains embedded NUL characters.
+  //  - If the first character is a space, there was leading whitespace
+  return errno == 0 &&
+         !input.empty() &&
+         input.c_str() + input.length() == endptr &&
+         !isspace(input[0]);
+}
+
+// Note: if you need to add String16ToDouble, first ask yourself if it's
+// really necessary. If it is, probably the best implementation here is to
+// convert to 8-bit and then use the 8-bit version.
+
+// Note: if you need to add an iterator range version of StringToDouble, first
+// ask yourself if it's really necessary. If it is, probably the best
+// implementation here is to instantiate a string and use the string version.
+
+std::string HexEncode(const void* bytes, size_t size) {
+  static const char kHexChars[] = "0123456789ABCDEF";
+
+  // Each input byte creates two output hex characters.
+  std::string ret(size * 2, '\0');
+
+  for (size_t i = 0; i < size; ++i) {
+    char b = reinterpret_cast<const char*>(bytes)[i];
+    ret[(i * 2)] = kHexChars[(b >> 4) & 0xf];
+    ret[(i * 2) + 1] = kHexChars[b & 0xf];
+  }
+  return ret;
+}
+
+bool HexStringToInt(StringPiece input, int* output) {
+  return IteratorRangeToNumber<HexIteratorRangeToIntTraits>::Invoke(
+    input.begin(), input.end(), output);
+}
+
+bool HexStringToUInt(StringPiece input, uint32_t* output) {
+  return IteratorRangeToNumber<HexIteratorRangeToUIntTraits>::Invoke(
+      input.begin(), input.end(), output);
+}
+
+bool HexStringToInt64(StringPiece input, int64_t* output) {
+  return IteratorRangeToNumber<HexIteratorRangeToInt64Traits>::Invoke(
+    input.begin(), input.end(), output);
+}
+
+bool HexStringToUInt64(StringPiece input, uint64_t* output) {
+  return IteratorRangeToNumber<HexIteratorRangeToUInt64Traits>::Invoke(
+      input.begin(), input.end(), output);
+}
+
+bool HexStringToBytes(StringPiece input, std::vector<uint8_t>* output) {
+  DCHECK_EQ(output->size(), 0u);
+  size_t count = input.size();
+  if (count == 0 || (count % 2) != 0)
+    return false;
+  for (uintptr_t i = 0; i < count / 2; ++i) {
+    uint8_t msb = 0;  // most significant 4 bits
+    uint8_t lsb = 0;  // least significant 4 bits
+    if (!CharToDigit<16>(input[i * 2], &msb) ||
+        !CharToDigit<16>(input[i * 2 + 1], &lsb)) {
+      return false;
+    }
+    output->push_back((msb << 4) | lsb);
+  }
+  return true;
+}
+
+}  // namespace base
diff --git a/base/strings/string_number_conversions.h b/base/strings/string_number_conversions.h
new file mode 100644
index 0000000..057b60a
--- /dev/null
+++ b/base/strings/string_number_conversions.h
@@ -0,0 +1,166 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_STRINGS_STRING_NUMBER_CONVERSIONS_H_
+#define BASE_STRINGS_STRING_NUMBER_CONVERSIONS_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <string>
+#include <vector>
+
+#include "base/base_export.h"
+#include "base/strings/string16.h"
+#include "base/strings/string_piece.h"
+#include "build/build_config.h"
+
+// ----------------------------------------------------------------------------
+// IMPORTANT MESSAGE FROM YOUR SPONSOR
+//
+// This file contains no "wstring" variants. New code should use string16. If
+// you need to make old code work, use the UTF8 version and convert. Please do
+// not add wstring variants.
+//
+// Please do not add "convenience" functions for converting strings to integers
+// that return the value and ignore success/failure. That encourages people to
+// write code that doesn't properly handle the error conditions.
+//
+// DO NOT use these functions in any UI unless it's NOT localized on purpose.
+// Instead, use base::MessageFormatter for a complex message with numbers
+// (integer, float, double) embedded or base::Format{Number,Double,Percent} to
+// just format a single number/percent. Note that some languages use native
+// digits instead of ASCII digits while others use a group separator or decimal
+// point different from ',' and '.'. Using these functions in the UI would lead
+// numbers to be formatted in a non-native way.
+// ----------------------------------------------------------------------------
+
+namespace base {
+
+// Number -> string conversions ------------------------------------------------
+
+// Ignores locale! see warning above.
+BASE_EXPORT std::string NumberToString(int value);
+BASE_EXPORT string16 NumberToString16(int value);
+BASE_EXPORT std::string NumberToString(unsigned int value);
+BASE_EXPORT string16 NumberToString16(unsigned int value);
+BASE_EXPORT std::string NumberToString(long value);
+BASE_EXPORT string16 NumberToString16(long value);
+BASE_EXPORT std::string NumberToString(unsigned long value);
+BASE_EXPORT string16 NumberToString16(unsigned long value);
+BASE_EXPORT std::string NumberToString(long long value);
+BASE_EXPORT string16 NumberToString16(long long value);
+BASE_EXPORT std::string NumberToString(unsigned long long value);
+BASE_EXPORT string16 NumberToString16(unsigned long long value);
+BASE_EXPORT std::string NumberToString(double value);
+BASE_EXPORT string16 NumberToString16(double value);
+
+// Type-specific naming for backwards compatibility.
+//
+// TODO(brettw) these should be removed and callers converted to the overloaded
+// "NumberToString" variant.
+inline std::string IntToString(int value) {
+  return NumberToString(value);
+}
+inline string16 IntToString16(int value) {
+  return NumberToString16(value);
+}
+inline std::string UintToString(unsigned value) {
+  return NumberToString(value);
+}
+inline string16 UintToString16(unsigned value) {
+  return NumberToString16(value);
+}
+inline std::string Int64ToString(int64_t value) {
+  return NumberToString(value);
+}
+inline string16 Int64ToString16(int64_t value) {
+  return NumberToString16(value);
+}
+
+// String -> number conversions ------------------------------------------------
+
+// Perform a best-effort conversion of the input string to a numeric type,
+// setting |*output| to the result of the conversion.  Returns true for
+// "perfect" conversions; returns false in the following cases:
+//  - Overflow. |*output| will be set to the maximum value supported
+//    by the data type.
+//  - Underflow. |*output| will be set to the minimum value supported
+//    by the data type.
+//  - Trailing characters in the string after parsing the number.  |*output|
+//    will be set to the value of the number that was parsed.
+//  - Leading whitespace in the string before parsing the number. |*output| will
+//    be set to the value of the number that was parsed.
+//  - No characters parseable as a number at the beginning of the string.
+//    |*output| will be set to 0.
+//  - Empty string.  |*output| will be set to 0.
+// WARNING: Will write to |output| even when returning false.
+//          Read the comments above carefully.
+BASE_EXPORT bool StringToInt(StringPiece input, int* output);
+BASE_EXPORT bool StringToInt(StringPiece16 input, int* output);
+
+BASE_EXPORT bool StringToUint(StringPiece input, unsigned* output);
+BASE_EXPORT bool StringToUint(StringPiece16 input, unsigned* output);
+
+BASE_EXPORT bool StringToInt64(StringPiece input, int64_t* output);
+BASE_EXPORT bool StringToInt64(StringPiece16 input, int64_t* output);
+
+BASE_EXPORT bool StringToUint64(StringPiece input, uint64_t* output);
+BASE_EXPORT bool StringToUint64(StringPiece16 input, uint64_t* output);
+
+BASE_EXPORT bool StringToSizeT(StringPiece input, size_t* output);
+BASE_EXPORT bool StringToSizeT(StringPiece16 input, size_t* output);
+
+// For floating-point conversions, only conversions of input strings in decimal
+// form are defined to work.  Behavior with strings representing floating-point
+// numbers in hexadecimal, and strings representing non-finite values (such as
+// NaN and inf) is undefined.  Otherwise, these behave the same as the integral
+// variants.  This expects the input string to NOT be specific to the locale.
+// If your input is locale specific, use ICU to read the number.
+// WARNING: Will write to |output| even when returning false.
+//          Read the comments here and above StringToInt() carefully.
+BASE_EXPORT bool StringToDouble(const std::string& input, double* output);
+
+// Hex encoding ----------------------------------------------------------------
+
+// Returns a hex string representation of a binary buffer. The returned hex
+// string will be in upper case. This function does not check if |size| is
+// within reasonable limits since it's written with trusted data in mind.  If
+// you suspect that the data you want to format might be large, the absolute
+// max size for |size| should be is
+//   std::numeric_limits<size_t>::max() / 2
+BASE_EXPORT std::string HexEncode(const void* bytes, size_t size);
+
+// Best effort conversion, see StringToInt above for restrictions.
+// Will only successful parse hex values that will fit into |output|, i.e.
+// -0x80000000 < |input| < 0x7FFFFFFF.
+BASE_EXPORT bool HexStringToInt(StringPiece input, int* output);
+
+// Best effort conversion, see StringToInt above for restrictions.
+// Will only successful parse hex values that will fit into |output|, i.e.
+// 0x00000000 < |input| < 0xFFFFFFFF.
+// The string is not required to start with 0x.
+BASE_EXPORT bool HexStringToUInt(StringPiece input, uint32_t* output);
+
+// Best effort conversion, see StringToInt above for restrictions.
+// Will only successful parse hex values that will fit into |output|, i.e.
+// -0x8000000000000000 < |input| < 0x7FFFFFFFFFFFFFFF.
+BASE_EXPORT bool HexStringToInt64(StringPiece input, int64_t* output);
+
+// Best effort conversion, see StringToInt above for restrictions.
+// Will only successful parse hex values that will fit into |output|, i.e.
+// 0x0000000000000000 < |input| < 0xFFFFFFFFFFFFFFFF.
+// The string is not required to start with 0x.
+BASE_EXPORT bool HexStringToUInt64(StringPiece input, uint64_t* output);
+
+// Similar to the previous functions, except that output is a vector of bytes.
+// |*output| will contain as many bytes as were successfully parsed prior to the
+// error.  There is no overflow, but input.size() must be evenly divisible by 2.
+// Leading 0x or +/- are not allowed.
+BASE_EXPORT bool HexStringToBytes(StringPiece input,
+                                  std::vector<uint8_t>* output);
+
+}  // namespace base
+
+#endif  // BASE_STRINGS_STRING_NUMBER_CONVERSIONS_H_
diff --git a/base/strings/string_number_conversions_fuzzer.cc b/base/strings/string_number_conversions_fuzzer.cc
new file mode 100644
index 0000000..2fed7de
--- /dev/null
+++ b/base/strings/string_number_conversions_fuzzer.cc
@@ -0,0 +1,67 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <string>
+#include <vector>
+
+#include "base/strings/string_number_conversions.h"
+
+// Entry point for LibFuzzer.
+extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) {
+  base::StringPiece string_piece_input(reinterpret_cast<const char*>(data),
+                                       size);
+  std::string string_input(reinterpret_cast<const char*>(data), size);
+
+  int out_int;
+  base::StringToInt(string_piece_input, &out_int);
+  unsigned out_uint;
+  base::StringToUint(string_piece_input, &out_uint);
+  int64_t out_int64;
+  base::StringToInt64(string_piece_input, &out_int64);
+  uint64_t out_uint64;
+  base::StringToUint64(string_piece_input, &out_uint64);
+  size_t out_size;
+  base::StringToSizeT(string_piece_input, &out_size);
+
+  // Test for StringPiece16 if size is even.
+  if (size % 2 == 0) {
+    base::StringPiece16 string_piece_input16(
+        reinterpret_cast<const base::char16*>(data), size / 2);
+
+    base::StringToInt(string_piece_input16, &out_int);
+    base::StringToUint(string_piece_input16, &out_uint);
+    base::StringToInt64(string_piece_input16, &out_int64);
+    base::StringToUint64(string_piece_input16, &out_uint64);
+    base::StringToSizeT(string_piece_input16, &out_size);
+  }
+
+  double out_double;
+  base::StringToDouble(string_input, &out_double);
+
+  base::HexStringToInt(string_piece_input, &out_int);
+  base::HexStringToUInt(string_piece_input, &out_uint);
+  base::HexStringToInt64(string_piece_input, &out_int64);
+  base::HexStringToUInt64(string_piece_input, &out_uint64);
+  std::vector<uint8_t> out_bytes;
+  base::HexStringToBytes(string_piece_input, &out_bytes);
+
+  base::HexEncode(data, size);
+
+  // Convert the numbers back to strings.
+  base::NumberToString(out_int);
+  base::NumberToString16(out_int);
+  base::NumberToString(out_uint);
+  base::NumberToString16(out_uint);
+  base::NumberToString(out_int64);
+  base::NumberToString16(out_int64);
+  base::NumberToString(out_uint64);
+  base::NumberToString16(out_uint64);
+  base::NumberToString(out_double);
+  base::NumberToString16(out_double);
+
+  return 0;
+}
diff --git a/base/strings/string_number_conversions_unittest.cc b/base/strings/string_number_conversions_unittest.cc
new file mode 100644
index 0000000..d969450
--- /dev/null
+++ b/base/strings/string_number_conversions_unittest.cc
@@ -0,0 +1,905 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/strings/string_number_conversions.h"
+
+#include <errno.h>
+#include <limits.h>
+#include <stddef.h>
+#include <stdint.h>
+#include <stdio.h>
+
+#include <cmath>
+#include <limits>
+
+#include "base/bit_cast.h"
+#include "base/format_macros.h"
+#include "base/macros.h"
+#include "base/strings/stringprintf.h"
+#include "base/strings/utf_string_conversions.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+
+namespace {
+
+template <typename INT>
+struct NumberToStringTest {
+  INT num;
+  const char* sexpected;
+  const char* uexpected;
+};
+
+}  // namespace
+
+TEST(StringNumberConversionsTest, NumberToString) {
+  static const NumberToStringTest<int> int_tests[] = {
+      {0, "0", "0"},
+      {-1, "-1", "4294967295"},
+      {std::numeric_limits<int>::max(), "2147483647", "2147483647"},
+      {std::numeric_limits<int>::min(), "-2147483648", "2147483648"},
+  };
+  static const NumberToStringTest<int64_t> int64_tests[] = {
+      {0, "0", "0"},
+      {-1, "-1", "18446744073709551615"},
+      {
+          std::numeric_limits<int64_t>::max(), "9223372036854775807",
+          "9223372036854775807",
+      },
+      {std::numeric_limits<int64_t>::min(), "-9223372036854775808",
+       "9223372036854775808"},
+  };
+
+  for (size_t i = 0; i < arraysize(int_tests); ++i) {
+    const NumberToStringTest<int>& test = int_tests[i];
+    EXPECT_EQ(NumberToString(test.num), test.sexpected);
+    EXPECT_EQ(NumberToString16(test.num), UTF8ToUTF16(test.sexpected));
+    EXPECT_EQ(NumberToString(static_cast<unsigned>(test.num)), test.uexpected);
+    EXPECT_EQ(NumberToString16(static_cast<unsigned>(test.num)),
+              UTF8ToUTF16(test.uexpected));
+  }
+  for (size_t i = 0; i < arraysize(int64_tests); ++i) {
+    const NumberToStringTest<int64_t>& test = int64_tests[i];
+    EXPECT_EQ(NumberToString(test.num), test.sexpected);
+    EXPECT_EQ(NumberToString16(test.num), UTF8ToUTF16(test.sexpected));
+    EXPECT_EQ(NumberToString(static_cast<uint64_t>(test.num)), test.uexpected);
+    EXPECT_EQ(NumberToString16(static_cast<uint64_t>(test.num)),
+              UTF8ToUTF16(test.uexpected));
+  }
+}
+
+TEST(StringNumberConversionsTest, Uint64ToString) {
+  static const struct {
+    uint64_t input;
+    std::string output;
+  } cases[] = {
+      {0, "0"},
+      {42, "42"},
+      {INT_MAX, "2147483647"},
+      {std::numeric_limits<uint64_t>::max(), "18446744073709551615"},
+  };
+
+  for (size_t i = 0; i < arraysize(cases); ++i)
+    EXPECT_EQ(cases[i].output, NumberToString(cases[i].input));
+}
+
+TEST(StringNumberConversionsTest, SizeTToString) {
+  size_t size_t_max = std::numeric_limits<size_t>::max();
+  std::string size_t_max_string = StringPrintf("%" PRIuS, size_t_max);
+
+  static const struct {
+    size_t input;
+    std::string output;
+  } cases[] = {
+    {0, "0"},
+    {9, "9"},
+    {42, "42"},
+    {INT_MAX, "2147483647"},
+    {2147483648U, "2147483648"},
+#if SIZE_MAX > 4294967295U
+    {99999999999U, "99999999999"},
+#endif
+    {size_t_max, size_t_max_string},
+  };
+
+  for (size_t i = 0; i < arraysize(cases); ++i)
+    EXPECT_EQ(cases[i].output, NumberToString(cases[i].input));
+}
+
+TEST(StringNumberConversionsTest, StringToInt) {
+  static const struct {
+    std::string input;
+    int output;
+    bool success;
+  } cases[] = {
+    {"0", 0, true},
+    {"42", 42, true},
+    {"42\x99", 42, false},
+    {"\x99" "42\x99", 0, false},
+    {"-2147483648", INT_MIN, true},
+    {"2147483647", INT_MAX, true},
+    {"", 0, false},
+    {" 42", 42, false},
+    {"42 ", 42, false},
+    {"\t\n\v\f\r 42", 42, false},
+    {"blah42", 0, false},
+    {"42blah", 42, false},
+    {"blah42blah", 0, false},
+    {"-273.15", -273, false},
+    {"+98.6", 98, false},
+    {"--123", 0, false},
+    {"++123", 0, false},
+    {"-+123", 0, false},
+    {"+-123", 0, false},
+    {"-", 0, false},
+    {"-2147483649", INT_MIN, false},
+    {"-99999999999", INT_MIN, false},
+    {"2147483648", INT_MAX, false},
+    {"99999999999", INT_MAX, false},
+  };
+
+  for (size_t i = 0; i < arraysize(cases); ++i) {
+    int output = cases[i].output ^ 1;  // Ensure StringToInt wrote something.
+    EXPECT_EQ(cases[i].success, StringToInt(cases[i].input, &output));
+    EXPECT_EQ(cases[i].output, output);
+
+    string16 utf16_input = UTF8ToUTF16(cases[i].input);
+    output = cases[i].output ^ 1;  // Ensure StringToInt wrote something.
+    EXPECT_EQ(cases[i].success, StringToInt(utf16_input, &output));
+    EXPECT_EQ(cases[i].output, output);
+  }
+
+  // One additional test to verify that conversion of numbers in strings with
+  // embedded NUL characters.  The NUL and extra data after it should be
+  // interpreted as junk after the number.
+  const char input[] = "6\06";
+  std::string input_string(input, arraysize(input) - 1);
+  int output;
+  EXPECT_FALSE(StringToInt(input_string, &output));
+  EXPECT_EQ(6, output);
+
+  string16 utf16_input = UTF8ToUTF16(input_string);
+  output = 0;
+  EXPECT_FALSE(StringToInt(utf16_input, &output));
+  EXPECT_EQ(6, output);
+
+  output = 0;
+  const char16 negative_wide_input[] = { 0xFF4D, '4', '2', 0};
+  EXPECT_FALSE(StringToInt(string16(negative_wide_input), &output));
+  EXPECT_EQ(0, output);
+}
+
+TEST(StringNumberConversionsTest, StringToUint) {
+  static const struct {
+    std::string input;
+    unsigned output;
+    bool success;
+  } cases[] = {
+    {"0", 0, true},
+    {"42", 42, true},
+    {"42\x99", 42, false},
+    {"\x99" "42\x99", 0, false},
+    {"-2147483648", 0, false},
+    {"2147483647", INT_MAX, true},
+    {"", 0, false},
+    {" 42", 42, false},
+    {"42 ", 42, false},
+    {"\t\n\v\f\r 42", 42, false},
+    {"blah42", 0, false},
+    {"42blah", 42, false},
+    {"blah42blah", 0, false},
+    {"-273.15", 0, false},
+    {"+98.6", 98, false},
+    {"--123", 0, false},
+    {"++123", 0, false},
+    {"-+123", 0, false},
+    {"+-123", 0, false},
+    {"-", 0, false},
+    {"-2147483649", 0, false},
+    {"-99999999999", 0, false},
+    {"4294967295", UINT_MAX, true},
+    {"4294967296", UINT_MAX, false},
+    {"99999999999", UINT_MAX, false},
+  };
+
+  for (size_t i = 0; i < arraysize(cases); ++i) {
+    unsigned output =
+        cases[i].output ^ 1;  // Ensure StringToUint wrote something.
+    EXPECT_EQ(cases[i].success, StringToUint(cases[i].input, &output));
+    EXPECT_EQ(cases[i].output, output);
+
+    string16 utf16_input = UTF8ToUTF16(cases[i].input);
+    output = cases[i].output ^ 1;  // Ensure StringToUint wrote something.
+    EXPECT_EQ(cases[i].success, StringToUint(utf16_input, &output));
+    EXPECT_EQ(cases[i].output, output);
+  }
+
+  // One additional test to verify that conversion of numbers in strings with
+  // embedded NUL characters.  The NUL and extra data after it should be
+  // interpreted as junk after the number.
+  const char input[] = "6\06";
+  std::string input_string(input, arraysize(input) - 1);
+  unsigned output;
+  EXPECT_FALSE(StringToUint(input_string, &output));
+  EXPECT_EQ(6U, output);
+
+  string16 utf16_input = UTF8ToUTF16(input_string);
+  output = 0;
+  EXPECT_FALSE(StringToUint(utf16_input, &output));
+  EXPECT_EQ(6U, output);
+
+  output = 0;
+  const char16 negative_wide_input[] = { 0xFF4D, '4', '2', 0};
+  EXPECT_FALSE(StringToUint(string16(negative_wide_input), &output));
+  EXPECT_EQ(0U, output);
+}
+
+TEST(StringNumberConversionsTest, StringToInt64) {
+  static const struct {
+    std::string input;
+    int64_t output;
+    bool success;
+  } cases[] = {
+      {"0", 0, true},
+      {"42", 42, true},
+      {"-2147483648", INT_MIN, true},
+      {"2147483647", INT_MAX, true},
+      {"-2147483649", INT64_C(-2147483649), true},
+      {"-99999999999", INT64_C(-99999999999), true},
+      {"2147483648", INT64_C(2147483648), true},
+      {"99999999999", INT64_C(99999999999), true},
+      {"9223372036854775807", std::numeric_limits<int64_t>::max(), true},
+      {"-9223372036854775808", std::numeric_limits<int64_t>::min(), true},
+      {"09", 9, true},
+      {"-09", -9, true},
+      {"", 0, false},
+      {" 42", 42, false},
+      {"42 ", 42, false},
+      {"0x42", 0, false},
+      {"\t\n\v\f\r 42", 42, false},
+      {"blah42", 0, false},
+      {"42blah", 42, false},
+      {"blah42blah", 0, false},
+      {"-273.15", -273, false},
+      {"+98.6", 98, false},
+      {"--123", 0, false},
+      {"++123", 0, false},
+      {"-+123", 0, false},
+      {"+-123", 0, false},
+      {"-", 0, false},
+      {"-9223372036854775809", std::numeric_limits<int64_t>::min(), false},
+      {"-99999999999999999999", std::numeric_limits<int64_t>::min(), false},
+      {"9223372036854775808", std::numeric_limits<int64_t>::max(), false},
+      {"99999999999999999999", std::numeric_limits<int64_t>::max(), false},
+  };
+
+  for (size_t i = 0; i < arraysize(cases); ++i) {
+    int64_t output = 0;
+    EXPECT_EQ(cases[i].success, StringToInt64(cases[i].input, &output));
+    EXPECT_EQ(cases[i].output, output);
+
+    string16 utf16_input = UTF8ToUTF16(cases[i].input);
+    output = 0;
+    EXPECT_EQ(cases[i].success, StringToInt64(utf16_input, &output));
+    EXPECT_EQ(cases[i].output, output);
+  }
+
+  // One additional test to verify that conversion of numbers in strings with
+  // embedded NUL characters.  The NUL and extra data after it should be
+  // interpreted as junk after the number.
+  const char input[] = "6\06";
+  std::string input_string(input, arraysize(input) - 1);
+  int64_t output;
+  EXPECT_FALSE(StringToInt64(input_string, &output));
+  EXPECT_EQ(6, output);
+
+  string16 utf16_input = UTF8ToUTF16(input_string);
+  output = 0;
+  EXPECT_FALSE(StringToInt64(utf16_input, &output));
+  EXPECT_EQ(6, output);
+}
+
+TEST(StringNumberConversionsTest, StringToUint64) {
+  static const struct {
+    std::string input;
+    uint64_t output;
+    bool success;
+  } cases[] = {
+      {"0", 0, true},
+      {"42", 42, true},
+      {"-2147483648", 0, false},
+      {"2147483647", INT_MAX, true},
+      {"-2147483649", 0, false},
+      {"-99999999999", 0, false},
+      {"2147483648", UINT64_C(2147483648), true},
+      {"99999999999", UINT64_C(99999999999), true},
+      {"9223372036854775807", std::numeric_limits<int64_t>::max(), true},
+      {"-9223372036854775808", 0, false},
+      {"09", 9, true},
+      {"-09", 0, false},
+      {"", 0, false},
+      {" 42", 42, false},
+      {"42 ", 42, false},
+      {"0x42", 0, false},
+      {"\t\n\v\f\r 42", 42, false},
+      {"blah42", 0, false},
+      {"42blah", 42, false},
+      {"blah42blah", 0, false},
+      {"-273.15", 0, false},
+      {"+98.6", 98, false},
+      {"--123", 0, false},
+      {"++123", 0, false},
+      {"-+123", 0, false},
+      {"+-123", 0, false},
+      {"-", 0, false},
+      {"-9223372036854775809", 0, false},
+      {"-99999999999999999999", 0, false},
+      {"9223372036854775808", UINT64_C(9223372036854775808), true},
+      {"99999999999999999999", std::numeric_limits<uint64_t>::max(), false},
+      {"18446744073709551615", std::numeric_limits<uint64_t>::max(), true},
+      {"18446744073709551616", std::numeric_limits<uint64_t>::max(), false},
+  };
+
+  for (size_t i = 0; i < arraysize(cases); ++i) {
+    uint64_t output = 0;
+    EXPECT_EQ(cases[i].success, StringToUint64(cases[i].input, &output));
+    EXPECT_EQ(cases[i].output, output);
+
+    string16 utf16_input = UTF8ToUTF16(cases[i].input);
+    output = 0;
+    EXPECT_EQ(cases[i].success, StringToUint64(utf16_input, &output));
+    EXPECT_EQ(cases[i].output, output);
+  }
+
+  // One additional test to verify that conversion of numbers in strings with
+  // embedded NUL characters.  The NUL and extra data after it should be
+  // interpreted as junk after the number.
+  const char input[] = "6\06";
+  std::string input_string(input, arraysize(input) - 1);
+  uint64_t output;
+  EXPECT_FALSE(StringToUint64(input_string, &output));
+  EXPECT_EQ(6U, output);
+
+  string16 utf16_input = UTF8ToUTF16(input_string);
+  output = 0;
+  EXPECT_FALSE(StringToUint64(utf16_input, &output));
+  EXPECT_EQ(6U, output);
+}
+
+TEST(StringNumberConversionsTest, StringToSizeT) {
+  size_t size_t_max = std::numeric_limits<size_t>::max();
+  std::string size_t_max_string = StringPrintf("%" PRIuS, size_t_max);
+
+  static const struct {
+    std::string input;
+    size_t output;
+    bool success;
+  } cases[] = {
+    {"0", 0, true},
+    {"42", 42, true},
+    {"-2147483648", 0, false},
+    {"2147483647", INT_MAX, true},
+    {"-2147483649", 0, false},
+    {"-99999999999", 0, false},
+    {"2147483648", 2147483648U, true},
+#if SIZE_MAX > 4294967295U
+    {"99999999999", 99999999999U, true},
+#endif
+    {"-9223372036854775808", 0, false},
+    {"09", 9, true},
+    {"-09", 0, false},
+    {"", 0, false},
+    {" 42", 42, false},
+    {"42 ", 42, false},
+    {"0x42", 0, false},
+    {"\t\n\v\f\r 42", 42, false},
+    {"blah42", 0, false},
+    {"42blah", 42, false},
+    {"blah42blah", 0, false},
+    {"-273.15", 0, false},
+    {"+98.6", 98, false},
+    {"--123", 0, false},
+    {"++123", 0, false},
+    {"-+123", 0, false},
+    {"+-123", 0, false},
+    {"-", 0, false},
+    {"-9223372036854775809", 0, false},
+    {"-99999999999999999999", 0, false},
+    {"999999999999999999999999", size_t_max, false},
+    {size_t_max_string, size_t_max, true},
+  };
+
+  for (size_t i = 0; i < arraysize(cases); ++i) {
+    size_t output = 0;
+    EXPECT_EQ(cases[i].success, StringToSizeT(cases[i].input, &output));
+    EXPECT_EQ(cases[i].output, output);
+
+    string16 utf16_input = UTF8ToUTF16(cases[i].input);
+    output = 0;
+    EXPECT_EQ(cases[i].success, StringToSizeT(utf16_input, &output));
+    EXPECT_EQ(cases[i].output, output);
+  }
+
+  // One additional test to verify that conversion of numbers in strings with
+  // embedded NUL characters.  The NUL and extra data after it should be
+  // interpreted as junk after the number.
+  const char input[] = "6\06";
+  std::string input_string(input, arraysize(input) - 1);
+  size_t output;
+  EXPECT_FALSE(StringToSizeT(input_string, &output));
+  EXPECT_EQ(6U, output);
+
+  string16 utf16_input = UTF8ToUTF16(input_string);
+  output = 0;
+  EXPECT_FALSE(StringToSizeT(utf16_input, &output));
+  EXPECT_EQ(6U, output);
+}
+
+TEST(StringNumberConversionsTest, HexStringToInt) {
+  static const struct {
+    std::string input;
+    int64_t output;
+    bool success;
+  } cases[] = {
+    {"0", 0, true},
+    {"42", 66, true},
+    {"-42", -66, true},
+    {"+42", 66, true},
+    {"7fffffff", INT_MAX, true},
+    {"-80000000", INT_MIN, true},
+    {"80000000", INT_MAX, false},  // Overflow test.
+    {"-80000001", INT_MIN, false},  // Underflow test.
+    {"0x42", 66, true},
+    {"-0x42", -66, true},
+    {"+0x42", 66, true},
+    {"0x7fffffff", INT_MAX, true},
+    {"-0x80000000", INT_MIN, true},
+    {"-80000000", INT_MIN, true},
+    {"80000000", INT_MAX, false},  // Overflow test.
+    {"-80000001", INT_MIN, false},  // Underflow test.
+    {"0x0f", 15, true},
+    {"0f", 15, true},
+    {" 45", 0x45, false},
+    {"\t\n\v\f\r 0x45", 0x45, false},
+    {" 45", 0x45, false},
+    {"45 ", 0x45, false},
+    {"45:", 0x45, false},
+    {"efgh", 0xef, false},
+    {"0xefgh", 0xef, false},
+    {"hgfe", 0, false},
+    {"-", 0, false},
+    {"", 0, false},
+    {"0x", 0, false},
+  };
+
+  for (size_t i = 0; i < arraysize(cases); ++i) {
+    int output = 0;
+    EXPECT_EQ(cases[i].success, HexStringToInt(cases[i].input, &output));
+    EXPECT_EQ(cases[i].output, output);
+  }
+  // One additional test to verify that conversion of numbers in strings with
+  // embedded NUL characters.  The NUL and extra data after it should be
+  // interpreted as junk after the number.
+  const char input[] = "0xc0ffee\0" "9";
+  std::string input_string(input, arraysize(input) - 1);
+  int output;
+  EXPECT_FALSE(HexStringToInt(input_string, &output));
+  EXPECT_EQ(0xc0ffee, output);
+}
+
+TEST(StringNumberConversionsTest, HexStringToUInt) {
+  static const struct {
+    std::string input;
+    uint32_t output;
+    bool success;
+  } cases[] = {
+      {"0", 0, true},
+      {"42", 0x42, true},
+      {"-42", 0, false},
+      {"+42", 0x42, true},
+      {"7fffffff", INT_MAX, true},
+      {"-80000000", 0, false},
+      {"ffffffff", 0xffffffff, true},
+      {"DeadBeef", 0xdeadbeef, true},
+      {"0x42", 0x42, true},
+      {"-0x42", 0, false},
+      {"+0x42", 0x42, true},
+      {"0x7fffffff", INT_MAX, true},
+      {"-0x80000000", 0, false},
+      {"0xffffffff", std::numeric_limits<uint32_t>::max(), true},
+      {"0XDeadBeef", 0xdeadbeef, true},
+      {"0x7fffffffffffffff", std::numeric_limits<uint32_t>::max(),
+       false},  // Overflow test.
+      {"-0x8000000000000000", 0, false},
+      {"0x8000000000000000", std::numeric_limits<uint32_t>::max(),
+       false},  // Overflow test.
+      {"-0x8000000000000001", 0, false},
+      {"0xFFFFFFFFFFFFFFFF", std::numeric_limits<uint32_t>::max(),
+       false},  // Overflow test.
+      {"FFFFFFFFFFFFFFFF", std::numeric_limits<uint32_t>::max(),
+       false},  // Overflow test.
+      {"0x0000000000000000", 0, true},
+      {"0000000000000000", 0, true},
+      {"1FFFFFFFFFFFFFFFF", std::numeric_limits<uint32_t>::max(),
+       false},  // Overflow test.
+      {"0x0f", 0x0f, true},
+      {"0f", 0x0f, true},
+      {" 45", 0x45, false},
+      {"\t\n\v\f\r 0x45", 0x45, false},
+      {" 45", 0x45, false},
+      {"45 ", 0x45, false},
+      {"45:", 0x45, false},
+      {"efgh", 0xef, false},
+      {"0xefgh", 0xef, false},
+      {"hgfe", 0, false},
+      {"-", 0, false},
+      {"", 0, false},
+      {"0x", 0, false},
+  };
+
+  for (size_t i = 0; i < arraysize(cases); ++i) {
+    uint32_t output = 0;
+    EXPECT_EQ(cases[i].success, HexStringToUInt(cases[i].input, &output));
+    EXPECT_EQ(cases[i].output, output);
+  }
+  // One additional test to verify that conversion of numbers in strings with
+  // embedded NUL characters.  The NUL and extra data after it should be
+  // interpreted as junk after the number.
+  const char input[] = "0xc0ffee\0" "9";
+  std::string input_string(input, arraysize(input) - 1);
+  uint32_t output;
+  EXPECT_FALSE(HexStringToUInt(input_string, &output));
+  EXPECT_EQ(0xc0ffeeU, output);
+}
+
+TEST(StringNumberConversionsTest, HexStringToInt64) {
+  static const struct {
+    std::string input;
+    int64_t output;
+    bool success;
+  } cases[] = {
+      {"0", 0, true},
+      {"42", 66, true},
+      {"-42", -66, true},
+      {"+42", 66, true},
+      {"40acd88557b", INT64_C(4444444448123), true},
+      {"7fffffff", INT_MAX, true},
+      {"-80000000", INT_MIN, true},
+      {"ffffffff", 0xffffffff, true},
+      {"DeadBeef", 0xdeadbeef, true},
+      {"0x42", 66, true},
+      {"-0x42", -66, true},
+      {"+0x42", 66, true},
+      {"0x40acd88557b", INT64_C(4444444448123), true},
+      {"0x7fffffff", INT_MAX, true},
+      {"-0x80000000", INT_MIN, true},
+      {"0xffffffff", 0xffffffff, true},
+      {"0XDeadBeef", 0xdeadbeef, true},
+      {"0x7fffffffffffffff", std::numeric_limits<int64_t>::max(), true},
+      {"-0x8000000000000000", std::numeric_limits<int64_t>::min(), true},
+      {"0x8000000000000000", std::numeric_limits<int64_t>::max(),
+       false},  // Overflow test.
+      {"-0x8000000000000001", std::numeric_limits<int64_t>::min(),
+       false},  // Underflow test.
+      {"0x0f", 15, true},
+      {"0f", 15, true},
+      {" 45", 0x45, false},
+      {"\t\n\v\f\r 0x45", 0x45, false},
+      {" 45", 0x45, false},
+      {"45 ", 0x45, false},
+      {"45:", 0x45, false},
+      {"efgh", 0xef, false},
+      {"0xefgh", 0xef, false},
+      {"hgfe", 0, false},
+      {"-", 0, false},
+      {"", 0, false},
+      {"0x", 0, false},
+  };
+
+  for (size_t i = 0; i < arraysize(cases); ++i) {
+    int64_t output = 0;
+    EXPECT_EQ(cases[i].success, HexStringToInt64(cases[i].input, &output));
+    EXPECT_EQ(cases[i].output, output);
+  }
+  // One additional test to verify that conversion of numbers in strings with
+  // embedded NUL characters.  The NUL and extra data after it should be
+  // interpreted as junk after the number.
+  const char input[] = "0xc0ffee\0" "9";
+  std::string input_string(input, arraysize(input) - 1);
+  int64_t output;
+  EXPECT_FALSE(HexStringToInt64(input_string, &output));
+  EXPECT_EQ(0xc0ffee, output);
+}
+
+TEST(StringNumberConversionsTest, HexStringToUInt64) {
+  static const struct {
+    std::string input;
+    uint64_t output;
+    bool success;
+  } cases[] = {
+      {"0", 0, true},
+      {"42", 66, true},
+      {"-42", 0, false},
+      {"+42", 66, true},
+      {"40acd88557b", INT64_C(4444444448123), true},
+      {"7fffffff", INT_MAX, true},
+      {"-80000000", 0, false},
+      {"ffffffff", 0xffffffff, true},
+      {"DeadBeef", 0xdeadbeef, true},
+      {"0x42", 66, true},
+      {"-0x42", 0, false},
+      {"+0x42", 66, true},
+      {"0x40acd88557b", INT64_C(4444444448123), true},
+      {"0x7fffffff", INT_MAX, true},
+      {"-0x80000000", 0, false},
+      {"0xffffffff", 0xffffffff, true},
+      {"0XDeadBeef", 0xdeadbeef, true},
+      {"0x7fffffffffffffff", std::numeric_limits<int64_t>::max(), true},
+      {"-0x8000000000000000", 0, false},
+      {"0x8000000000000000", UINT64_C(0x8000000000000000), true},
+      {"-0x8000000000000001", 0, false},
+      {"0xFFFFFFFFFFFFFFFF", std::numeric_limits<uint64_t>::max(), true},
+      {"FFFFFFFFFFFFFFFF", std::numeric_limits<uint64_t>::max(), true},
+      {"0x0000000000000000", 0, true},
+      {"0000000000000000", 0, true},
+      {"1FFFFFFFFFFFFFFFF", std::numeric_limits<uint64_t>::max(),
+       false},  // Overflow test.
+      {"0x0f", 15, true},
+      {"0f", 15, true},
+      {" 45", 0x45, false},
+      {"\t\n\v\f\r 0x45", 0x45, false},
+      {" 45", 0x45, false},
+      {"45 ", 0x45, false},
+      {"45:", 0x45, false},
+      {"efgh", 0xef, false},
+      {"0xefgh", 0xef, false},
+      {"hgfe", 0, false},
+      {"-", 0, false},
+      {"", 0, false},
+      {"0x", 0, false},
+  };
+
+  for (size_t i = 0; i < arraysize(cases); ++i) {
+    uint64_t output = 0;
+    EXPECT_EQ(cases[i].success, HexStringToUInt64(cases[i].input, &output));
+    EXPECT_EQ(cases[i].output, output);
+  }
+  // One additional test to verify that conversion of numbers in strings with
+  // embedded NUL characters.  The NUL and extra data after it should be
+  // interpreted as junk after the number.
+  const char input[] = "0xc0ffee\0" "9";
+  std::string input_string(input, arraysize(input) - 1);
+  uint64_t output;
+  EXPECT_FALSE(HexStringToUInt64(input_string, &output));
+  EXPECT_EQ(0xc0ffeeU, output);
+}
+
+TEST(StringNumberConversionsTest, HexStringToBytes) {
+  static const struct {
+    const std::string input;
+    const char* output;
+    size_t output_len;
+    bool success;
+  } cases[] = {
+    {"0", "", 0, false},  // odd number of characters fails
+    {"00", "\0", 1, true},
+    {"42", "\x42", 1, true},
+    {"-42", "", 0, false},  // any non-hex value fails
+    {"+42", "", 0, false},
+    {"7fffffff", "\x7f\xff\xff\xff", 4, true},
+    {"80000000", "\x80\0\0\0", 4, true},
+    {"deadbeef", "\xde\xad\xbe\xef", 4, true},
+    {"DeadBeef", "\xde\xad\xbe\xef", 4, true},
+    {"0x42", "", 0, false},  // leading 0x fails (x is not hex)
+    {"0f", "\xf", 1, true},
+    {"45  ", "\x45", 1, false},
+    {"efgh", "\xef", 1, false},
+    {"", "", 0, false},
+    {"0123456789ABCDEF", "\x01\x23\x45\x67\x89\xAB\xCD\xEF", 8, true},
+    {"0123456789ABCDEF012345",
+     "\x01\x23\x45\x67\x89\xAB\xCD\xEF\x01\x23\x45", 11, true},
+  };
+
+
+  for (size_t i = 0; i < arraysize(cases); ++i) {
+    std::vector<uint8_t> output;
+    std::vector<uint8_t> compare;
+    EXPECT_EQ(cases[i].success, HexStringToBytes(cases[i].input, &output)) <<
+        i << ": " << cases[i].input;
+    for (size_t j = 0; j < cases[i].output_len; ++j)
+      compare.push_back(static_cast<uint8_t>(cases[i].output[j]));
+    ASSERT_EQ(output.size(), compare.size()) << i << ": " << cases[i].input;
+    EXPECT_TRUE(std::equal(output.begin(), output.end(), compare.begin())) <<
+        i << ": " << cases[i].input;
+  }
+}
+
+TEST(StringNumberConversionsTest, StringToDouble) {
+  static const struct {
+    std::string input;
+    double output;
+    bool success;
+  } cases[] = {
+    // Test different forms of zero.
+    {"0", 0.0, true},
+    {"+0", 0.0, true},
+    {"-0", 0.0, true},
+    {"0.0", 0.0, true},
+    {"000000000000000000000000000000.0", 0.0, true},
+    {"0.000000000000000000000000000", 0.0, true},
+
+    // Test the answer.
+    {"42", 42.0, true},
+    {"-42", -42.0, true},
+
+    // Test variances of an ordinary number.
+    {"123.45", 123.45, true},
+    {"-123.45", -123.45, true},
+    {"+123.45", 123.45, true},
+
+    // Test different forms of representation.
+    {"2.99792458e8", 299792458.0, true},
+    {"149597870.691E+3", 149597870691.0, true},
+    {"6.", 6.0, true},
+
+    // Test around the largest/smallest value that a double can represent.
+    {"9e307", 9e307, true},
+    {"1.7976e308", 1.7976e308, true},
+    {"1.7977e308", HUGE_VAL, false},
+    {"1.797693134862315807e+308", HUGE_VAL, true},
+    {"1.797693134862315808e+308", HUGE_VAL, false},
+    {"9e308", HUGE_VAL, false},
+    {"9e309", HUGE_VAL, false},
+    {"9e999", HUGE_VAL, false},
+    {"9e1999", HUGE_VAL, false},
+    {"9e19999", HUGE_VAL, false},
+    {"9e99999999999999999999", HUGE_VAL, false},
+    {"-9e307", -9e307, true},
+    {"-1.7976e308", -1.7976e308, true},
+    {"-1.7977e308", -HUGE_VAL, false},
+    {"-1.797693134862315807e+308", -HUGE_VAL, true},
+    {"-1.797693134862315808e+308", -HUGE_VAL, false},
+    {"-9e308", -HUGE_VAL, false},
+    {"-9e309", -HUGE_VAL, false},
+    {"-9e999", -HUGE_VAL, false},
+    {"-9e1999", -HUGE_VAL, false},
+    {"-9e19999", -HUGE_VAL, false},
+    {"-9e99999999999999999999", -HUGE_VAL, false},
+
+    // Test more exponents.
+    {"1e-2", 0.01, true},
+    {"42 ", 42.0, false},
+    {" 1e-2", 0.01, false},
+    {"1e-2 ", 0.01, false},
+    {"-1E-7", -0.0000001, true},
+    {"01e02", 100, true},
+    {"2.3e15", 2.3e15, true},
+    {"100e-309", 100e-309, true},
+
+    // Test some invalid cases.
+    {"\t\n\v\f\r -123.45e2", -12345.0, false},
+    {"+123 e4", 123.0, false},
+    {"123e ", 123.0, false},
+    {"123e", 123.0, false},
+    {" 2.99", 2.99, false},
+    {"1e3.4", 1000.0, false},
+    {"nothing", 0.0, false},
+    {"-", 0.0, false},
+    {"+", 0.0, false},
+    {"", 0.0, false},
+
+    // crbug.org/588726
+    {"-0.0010000000000000000000000000000000000000001e-256",
+     -1.0000000000000001e-259, true},
+  };
+
+  for (size_t i = 0; i < arraysize(cases); ++i) {
+    double output;
+    errno = 1;
+    EXPECT_EQ(cases[i].success, StringToDouble(cases[i].input, &output));
+    if (cases[i].success)
+      EXPECT_EQ(1, errno) << i;  // confirm that errno is unchanged.
+    EXPECT_DOUBLE_EQ(cases[i].output, output);
+  }
+
+  // One additional test to verify that conversion of numbers in strings with
+  // embedded NUL characters.  The NUL and extra data after it should be
+  // interpreted as junk after the number.
+  const char input[] = "3.14\0" "159";
+  std::string input_string(input, arraysize(input) - 1);
+  double output;
+  EXPECT_FALSE(StringToDouble(input_string, &output));
+  EXPECT_DOUBLE_EQ(3.14, output);
+}
+
+TEST(StringNumberConversionsTest, DoubleToString) {
+  static const struct {
+    double input;
+    const char* expected;
+  } cases[] = {
+    {0.0, "0"},
+    {1.25, "1.25"},
+    {1.33518e+012, "1.33518e+12"},
+    {1.33489e+012, "1.33489e+12"},
+    {1.33505e+012, "1.33505e+12"},
+    {1.33545e+009, "1335450000"},
+    {1.33503e+009, "1335030000"},
+  };
+
+  for (size_t i = 0; i < arraysize(cases); ++i) {
+    EXPECT_EQ(cases[i].expected, NumberToString(cases[i].input));
+    EXPECT_EQ(cases[i].expected, UTF16ToUTF8(NumberToString16(cases[i].input)));
+  }
+
+  // The following two values were seen in crashes in the wild.
+  const char input_bytes[8] = {0, 0, 0, 0, '\xee', '\x6d', '\x73', '\x42'};
+  double input = 0;
+  memcpy(&input, input_bytes, arraysize(input_bytes));
+  EXPECT_EQ("1335179083776", NumberToString(input));
+  const char input_bytes2[8] =
+      {0, 0, 0, '\xa0', '\xda', '\x6c', '\x73', '\x42'};
+  input = 0;
+  memcpy(&input, input_bytes2, arraysize(input_bytes2));
+  EXPECT_EQ("1334890332160", NumberToString(input));
+}
+
+TEST(StringNumberConversionsTest, HexEncode) {
+  std::string hex(HexEncode(nullptr, 0));
+  EXPECT_EQ(hex.length(), 0U);
+  unsigned char bytes[] = {0x01, 0xff, 0x02, 0xfe, 0x03, 0x80, 0x81};
+  hex = HexEncode(bytes, sizeof(bytes));
+  EXPECT_EQ(hex.compare("01FF02FE038081"), 0);
+}
+
+// Test cases of known-bad strtod conversions that motivated the use of dmg_fp.
+// See https://bugs.chromium.org/p/chromium/issues/detail?id=593512.
+TEST(StringNumberConversionsTest, StrtodFailures) {
+  static const struct {
+    const char* input;
+    uint64_t expected;
+  } cases[] = {
+      // http://www.exploringbinary.com/incorrectly-rounded-conversions-in-visual-c-plus-plus/
+      {"9214843084008499", 0x43405e6cec57761aULL},
+      {"0.500000000000000166533453693773481063544750213623046875",
+       0x3fe0000000000002ULL},
+      {"30078505129381147446200", 0x44997a3c7271b021ULL},
+      {"1777820000000000000001", 0x4458180d5bad2e3eULL},
+      {"0.500000000000000166547006220929549868969843373633921146392822265625",
+       0x3fe0000000000002ULL},
+      {"0.50000000000000016656055874808561867439493653364479541778564453125",
+       0x3fe0000000000002ULL},
+      {"0.3932922657273", 0x3fd92bb352c4623aULL},
+
+      // http://www.exploringbinary.com/incorrectly-rounded-conversions-in-gcc-and-glibc/
+      {"0.500000000000000166533453693773481063544750213623046875",
+       0x3fe0000000000002ULL},
+      {"3.518437208883201171875e13", 0x42c0000000000002ULL},
+      {"62.5364939768271845828", 0x404f44abd5aa7ca4ULL},
+      {"8.10109172351e-10", 0x3e0bd5cbaef0fd0cULL},
+      {"1.50000000000000011102230246251565404236316680908203125",
+       0x3ff8000000000000ULL},
+      {"9007199254740991.4999999999999999999999999999999995",
+       0x433fffffffffffffULL},
+
+      // http://www.exploringbinary.com/incorrect-decimal-to-floating-point-conversion-in-sqlite/
+      {"1e-23", 0x3b282db34012b251ULL},
+      {"8.533e+68", 0x4e3fa69165a8eea2ULL},
+      {"4.1006e-184", 0x19dbe0d1c7ea60c9ULL},
+      {"9.998e+307", 0x7fe1cc0a350ca87bULL},
+      {"9.9538452227e-280", 0x0602117ae45cde43ULL},
+      {"6.47660115e-260", 0x0a1fdd9e333badadULL},
+      {"7.4e+47", 0x49e033d7eca0adefULL},
+      {"5.92e+48", 0x4a1033d7eca0adefULL},
+      {"7.35e+66", 0x4dd172b70eababa9ULL},
+      {"8.32116e+55", 0x4b8b2628393e02cdULL},
+  };
+
+  for (const auto& test : cases) {
+    double output;
+    EXPECT_TRUE(StringToDouble(test.input, &output));
+    EXPECT_EQ(bit_cast<uint64_t>(output), test.expected);
+  }
+}
+
+}  // namespace base
diff --git a/base/strings/string_piece.cc b/base/strings/string_piece.cc
new file mode 100644
index 0000000..c82a223
--- /dev/null
+++ b/base/strings/string_piece.cc
@@ -0,0 +1,453 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+// Copied from strings/stringpiece.cc with modifications
+
+#include "base/strings/string_piece.h"
+
+#include <limits.h>
+
+#include <algorithm>
+#include <ostream>
+
+#include "base/logging.h"
+
+namespace base {
+namespace {
+
+// For each character in characters_wanted, sets the index corresponding
+// to the ASCII code of that character to 1 in table.  This is used by
+// the find_.*_of methods below to tell whether or not a character is in
+// the lookup table in constant time.
+// The argument `table' must be an array that is large enough to hold all
+// the possible values of an unsigned char.  Thus it should be be declared
+// as follows:
+//   bool table[UCHAR_MAX + 1]
+inline void BuildLookupTable(const StringPiece& characters_wanted,
+                             bool* table) {
+  const size_t length = characters_wanted.length();
+  const char* const data = characters_wanted.data();
+  for (size_t i = 0; i < length; ++i) {
+    table[static_cast<unsigned char>(data[i])] = true;
+  }
+}
+
+}  // namespace
+
+// MSVC doesn't like complex extern templates and DLLs.
+#if !defined(COMPILER_MSVC)
+template class BasicStringPiece<std::string>;
+template class BasicStringPiece<string16>;
+#endif
+
+bool operator==(const StringPiece& x, const StringPiece& y) {
+  if (x.size() != y.size())
+    return false;
+
+  return CharTraits<StringPiece::value_type>::compare(x.data(), y.data(),
+                                                      x.size()) == 0;
+}
+
+std::ostream& operator<<(std::ostream& o, const StringPiece& piece) {
+  o.write(piece.data(), static_cast<std::streamsize>(piece.size()));
+  return o;
+}
+
+namespace internal {
+
+template<typename STR>
+void CopyToStringT(const BasicStringPiece<STR>& self, STR* target) {
+  if (self.empty())
+    target->clear();
+  else
+    target->assign(self.data(), self.size());
+}
+
+void CopyToString(const StringPiece& self, std::string* target) {
+  CopyToStringT(self, target);
+}
+
+void CopyToString(const StringPiece16& self, string16* target) {
+  CopyToStringT(self, target);
+}
+
+template<typename STR>
+void AppendToStringT(const BasicStringPiece<STR>& self, STR* target) {
+  if (!self.empty())
+    target->append(self.data(), self.size());
+}
+
+void AppendToString(const StringPiece& self, std::string* target) {
+  AppendToStringT(self, target);
+}
+
+void AppendToString(const StringPiece16& self, string16* target) {
+  AppendToStringT(self, target);
+}
+
+template<typename STR>
+size_t copyT(const BasicStringPiece<STR>& self,
+             typename STR::value_type* buf,
+             size_t n,
+             size_t pos) {
+  size_t ret = std::min(self.size() - pos, n);
+  memcpy(buf, self.data() + pos, ret * sizeof(typename STR::value_type));
+  return ret;
+}
+
+size_t copy(const StringPiece& self, char* buf, size_t n, size_t pos) {
+  return copyT(self, buf, n, pos);
+}
+
+size_t copy(const StringPiece16& self, char16* buf, size_t n, size_t pos) {
+  return copyT(self, buf, n, pos);
+}
+
+template<typename STR>
+size_t findT(const BasicStringPiece<STR>& self,
+             const BasicStringPiece<STR>& s,
+             size_t pos) {
+  if (pos > self.size())
+    return BasicStringPiece<STR>::npos;
+
+  typename BasicStringPiece<STR>::const_iterator result =
+      std::search(self.begin() + pos, self.end(), s.begin(), s.end());
+  const size_t xpos =
+    static_cast<size_t>(result - self.begin());
+  return xpos + s.size() <= self.size() ? xpos : BasicStringPiece<STR>::npos;
+}
+
+size_t find(const StringPiece& self, const StringPiece& s, size_t pos) {
+  return findT(self, s, pos);
+}
+
+size_t find(const StringPiece16& self, const StringPiece16& s, size_t pos) {
+  return findT(self, s, pos);
+}
+
+template<typename STR>
+size_t findT(const BasicStringPiece<STR>& self,
+             typename STR::value_type c,
+             size_t pos) {
+  if (pos >= self.size())
+    return BasicStringPiece<STR>::npos;
+
+  typename BasicStringPiece<STR>::const_iterator result =
+      std::find(self.begin() + pos, self.end(), c);
+  return result != self.end() ?
+      static_cast<size_t>(result - self.begin()) : BasicStringPiece<STR>::npos;
+}
+
+size_t find(const StringPiece& self, char c, size_t pos) {
+  return findT(self, c, pos);
+}
+
+size_t find(const StringPiece16& self, char16 c, size_t pos) {
+  return findT(self, c, pos);
+}
+
+template<typename STR>
+size_t rfindT(const BasicStringPiece<STR>& self,
+              const BasicStringPiece<STR>& s,
+              size_t pos) {
+  if (self.size() < s.size())
+    return BasicStringPiece<STR>::npos;
+
+  if (s.empty())
+    return std::min(self.size(), pos);
+
+  typename BasicStringPiece<STR>::const_iterator last =
+      self.begin() + std::min(self.size() - s.size(), pos) + s.size();
+  typename BasicStringPiece<STR>::const_iterator result =
+      std::find_end(self.begin(), last, s.begin(), s.end());
+  return result != last ?
+      static_cast<size_t>(result - self.begin()) : BasicStringPiece<STR>::npos;
+}
+
+size_t rfind(const StringPiece& self, const StringPiece& s, size_t pos) {
+  return rfindT(self, s, pos);
+}
+
+size_t rfind(const StringPiece16& self, const StringPiece16& s, size_t pos) {
+  return rfindT(self, s, pos);
+}
+
+template<typename STR>
+size_t rfindT(const BasicStringPiece<STR>& self,
+              typename STR::value_type c,
+              size_t pos) {
+  if (self.size() == 0)
+    return BasicStringPiece<STR>::npos;
+
+  for (size_t i = std::min(pos, self.size() - 1); ;
+       --i) {
+    if (self.data()[i] == c)
+      return i;
+    if (i == 0)
+      break;
+  }
+  return BasicStringPiece<STR>::npos;
+}
+
+size_t rfind(const StringPiece& self, char c, size_t pos) {
+  return rfindT(self, c, pos);
+}
+
+size_t rfind(const StringPiece16& self, char16 c, size_t pos) {
+  return rfindT(self, c, pos);
+}
+
+// 8-bit version using lookup table.
+size_t find_first_of(const StringPiece& self,
+                     const StringPiece& s,
+                     size_t pos) {
+  if (self.size() == 0 || s.size() == 0)
+    return StringPiece::npos;
+
+  // Avoid the cost of BuildLookupTable() for a single-character search.
+  if (s.size() == 1)
+    return find(self, s.data()[0], pos);
+
+  bool lookup[UCHAR_MAX + 1] = { false };
+  BuildLookupTable(s, lookup);
+  for (size_t i = pos; i < self.size(); ++i) {
+    if (lookup[static_cast<unsigned char>(self.data()[i])]) {
+      return i;
+    }
+  }
+  return StringPiece::npos;
+}
+
+// 16-bit brute force version.
+size_t find_first_of(const StringPiece16& self,
+                     const StringPiece16& s,
+                     size_t pos) {
+  StringPiece16::const_iterator found =
+      std::find_first_of(self.begin() + pos, self.end(), s.begin(), s.end());
+  if (found == self.end())
+    return StringPiece16::npos;
+  return found - self.begin();
+}
+
+// 8-bit version using lookup table.
+size_t find_first_not_of(const StringPiece& self,
+                         const StringPiece& s,
+                         size_t pos) {
+  if (self.size() == 0)
+    return StringPiece::npos;
+
+  if (s.size() == 0)
+    return 0;
+
+  // Avoid the cost of BuildLookupTable() for a single-character search.
+  if (s.size() == 1)
+    return find_first_not_of(self, s.data()[0], pos);
+
+  bool lookup[UCHAR_MAX + 1] = { false };
+  BuildLookupTable(s, lookup);
+  for (size_t i = pos; i < self.size(); ++i) {
+    if (!lookup[static_cast<unsigned char>(self.data()[i])]) {
+      return i;
+    }
+  }
+  return StringPiece::npos;
+}
+
+// 16-bit brute-force version.
+BASE_EXPORT size_t find_first_not_of(const StringPiece16& self,
+                                     const StringPiece16& s,
+                                     size_t pos) {
+  if (self.size() == 0)
+    return StringPiece16::npos;
+
+  for (size_t self_i = pos; self_i < self.size(); ++self_i) {
+    bool found = false;
+    for (size_t s_i = 0; s_i < s.size(); ++s_i) {
+      if (self[self_i] == s[s_i]) {
+        found = true;
+        break;
+      }
+    }
+    if (!found)
+      return self_i;
+  }
+  return StringPiece16::npos;
+}
+
+template<typename STR>
+size_t find_first_not_ofT(const BasicStringPiece<STR>& self,
+                          typename STR::value_type c,
+                          size_t pos) {
+  if (self.size() == 0)
+    return BasicStringPiece<STR>::npos;
+
+  for (; pos < self.size(); ++pos) {
+    if (self.data()[pos] != c) {
+      return pos;
+    }
+  }
+  return BasicStringPiece<STR>::npos;
+}
+
+size_t find_first_not_of(const StringPiece& self,
+                         char c,
+                         size_t pos) {
+  return find_first_not_ofT(self, c, pos);
+}
+
+size_t find_first_not_of(const StringPiece16& self,
+                         char16 c,
+                         size_t pos) {
+  return find_first_not_ofT(self, c, pos);
+}
+
+// 8-bit version using lookup table.
+size_t find_last_of(const StringPiece& self, const StringPiece& s, size_t pos) {
+  if (self.size() == 0 || s.size() == 0)
+    return StringPiece::npos;
+
+  // Avoid the cost of BuildLookupTable() for a single-character search.
+  if (s.size() == 1)
+    return rfind(self, s.data()[0], pos);
+
+  bool lookup[UCHAR_MAX + 1] = { false };
+  BuildLookupTable(s, lookup);
+  for (size_t i = std::min(pos, self.size() - 1); ; --i) {
+    if (lookup[static_cast<unsigned char>(self.data()[i])])
+      return i;
+    if (i == 0)
+      break;
+  }
+  return StringPiece::npos;
+}
+
+// 16-bit brute-force version.
+size_t find_last_of(const StringPiece16& self,
+                    const StringPiece16& s,
+                    size_t pos) {
+  if (self.size() == 0)
+    return StringPiece16::npos;
+
+  for (size_t self_i = std::min(pos, self.size() - 1); ;
+       --self_i) {
+    for (size_t s_i = 0; s_i < s.size(); s_i++) {
+      if (self.data()[self_i] == s[s_i])
+        return self_i;
+    }
+    if (self_i == 0)
+      break;
+  }
+  return StringPiece16::npos;
+}
+
+// 8-bit version using lookup table.
+size_t find_last_not_of(const StringPiece& self,
+                        const StringPiece& s,
+                        size_t pos) {
+  if (self.size() == 0)
+    return StringPiece::npos;
+
+  size_t i = std::min(pos, self.size() - 1);
+  if (s.size() == 0)
+    return i;
+
+  // Avoid the cost of BuildLookupTable() for a single-character search.
+  if (s.size() == 1)
+    return find_last_not_of(self, s.data()[0], pos);
+
+  bool lookup[UCHAR_MAX + 1] = { false };
+  BuildLookupTable(s, lookup);
+  for (; ; --i) {
+    if (!lookup[static_cast<unsigned char>(self.data()[i])])
+      return i;
+    if (i == 0)
+      break;
+  }
+  return StringPiece::npos;
+}
+
+// 16-bit brute-force version.
+size_t find_last_not_of(const StringPiece16& self,
+                        const StringPiece16& s,
+                        size_t pos) {
+  if (self.size() == 0)
+    return StringPiece::npos;
+
+  for (size_t self_i = std::min(pos, self.size() - 1); ; --self_i) {
+    bool found = false;
+    for (size_t s_i = 0; s_i < s.size(); s_i++) {
+      if (self.data()[self_i] == s[s_i]) {
+        found = true;
+        break;
+      }
+    }
+    if (!found)
+      return self_i;
+    if (self_i == 0)
+      break;
+  }
+  return StringPiece16::npos;
+}
+
+template<typename STR>
+size_t find_last_not_ofT(const BasicStringPiece<STR>& self,
+                         typename STR::value_type c,
+                         size_t pos) {
+  if (self.size() == 0)
+    return BasicStringPiece<STR>::npos;
+
+  for (size_t i = std::min(pos, self.size() - 1); ; --i) {
+    if (self.data()[i] != c)
+      return i;
+    if (i == 0)
+      break;
+  }
+  return BasicStringPiece<STR>::npos;
+}
+
+size_t find_last_not_of(const StringPiece& self,
+                        char c,
+                        size_t pos) {
+  return find_last_not_ofT(self, c, pos);
+}
+
+size_t find_last_not_of(const StringPiece16& self,
+                        char16 c,
+                        size_t pos) {
+  return find_last_not_ofT(self, c, pos);
+}
+
+template<typename STR>
+BasicStringPiece<STR> substrT(const BasicStringPiece<STR>& self,
+                              size_t pos,
+                              size_t n) {
+  if (pos > self.size()) pos = self.size();
+  if (n > self.size() - pos) n = self.size() - pos;
+  return BasicStringPiece<STR>(self.data() + pos, n);
+}
+
+StringPiece substr(const StringPiece& self,
+                   size_t pos,
+                   size_t n) {
+  return substrT(self, pos, n);
+}
+
+StringPiece16 substr(const StringPiece16& self,
+                     size_t pos,
+                     size_t n) {
+  return substrT(self, pos, n);
+}
+
+#if DCHECK_IS_ON()
+void AssertIteratorsInOrder(std::string::const_iterator begin,
+                            std::string::const_iterator end) {
+  DCHECK(begin <= end) << "StringPiece iterators swapped or invalid.";
+}
+void AssertIteratorsInOrder(string16::const_iterator begin,
+                            string16::const_iterator end) {
+  DCHECK(begin <= end) << "StringPiece iterators swapped or invalid.";
+}
+#endif
+
+}  // namespace internal
+}  // namespace base
diff --git a/base/strings/string_piece.h b/base/strings/string_piece.h
new file mode 100644
index 0000000..775ea7c
--- /dev/null
+++ b/base/strings/string_piece.h
@@ -0,0 +1,483 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+// Copied from strings/stringpiece.h with modifications
+//
+// A string-like object that points to a sized piece of memory.
+//
+// You can use StringPiece as a function or method parameter.  A StringPiece
+// parameter can receive a double-quoted string literal argument, a "const
+// char*" argument, a string argument, or a StringPiece argument with no data
+// copying.  Systematic use of StringPiece for arguments reduces data
+// copies and strlen() calls.
+//
+// Prefer passing StringPieces by value:
+//   void MyFunction(StringPiece arg);
+// If circumstances require, you may also pass by const reference:
+//   void MyFunction(const StringPiece& arg);  // not preferred
+// Both of these have the same lifetime semantics.  Passing by value
+// generates slightly smaller code.  For more discussion, Googlers can see
+// the thread go/stringpiecebyvalue on c-users.
+
+#ifndef BASE_STRINGS_STRING_PIECE_H_
+#define BASE_STRINGS_STRING_PIECE_H_
+
+#include <stddef.h>
+
+#include <iosfwd>
+#include <string>
+
+#include "base/base_export.h"
+#include "base/logging.h"
+#include "base/strings/char_traits.h"
+#include "base/strings/string16.h"
+#include "base/strings/string_piece_forward.h"
+
+namespace base {
+
+// internal --------------------------------------------------------------------
+
+// Many of the StringPiece functions use different implementations for the
+// 8-bit and 16-bit versions, and we don't want lots of template expansions in
+// this (very common) header that will slow down compilation.
+//
+// So here we define overloaded functions called by the StringPiece template.
+// For those that share an implementation, the two versions will expand to a
+// template internal to the .cc file.
+namespace internal {
+
+BASE_EXPORT void CopyToString(const StringPiece& self, std::string* target);
+BASE_EXPORT void CopyToString(const StringPiece16& self, string16* target);
+
+BASE_EXPORT void AppendToString(const StringPiece& self, std::string* target);
+BASE_EXPORT void AppendToString(const StringPiece16& self, string16* target);
+
+BASE_EXPORT size_t copy(const StringPiece& self,
+                        char* buf,
+                        size_t n,
+                        size_t pos);
+BASE_EXPORT size_t copy(const StringPiece16& self,
+                        char16* buf,
+                        size_t n,
+                        size_t pos);
+
+BASE_EXPORT size_t find(const StringPiece& self,
+                        const StringPiece& s,
+                        size_t pos);
+BASE_EXPORT size_t find(const StringPiece16& self,
+                        const StringPiece16& s,
+                        size_t pos);
+BASE_EXPORT size_t find(const StringPiece& self,
+                        char c,
+                        size_t pos);
+BASE_EXPORT size_t find(const StringPiece16& self,
+                        char16 c,
+                        size_t pos);
+
+BASE_EXPORT size_t rfind(const StringPiece& self,
+                         const StringPiece& s,
+                         size_t pos);
+BASE_EXPORT size_t rfind(const StringPiece16& self,
+                         const StringPiece16& s,
+                         size_t pos);
+BASE_EXPORT size_t rfind(const StringPiece& self,
+                         char c,
+                         size_t pos);
+BASE_EXPORT size_t rfind(const StringPiece16& self,
+                         char16 c,
+                         size_t pos);
+
+BASE_EXPORT size_t find_first_of(const StringPiece& self,
+                                 const StringPiece& s,
+                                 size_t pos);
+BASE_EXPORT size_t find_first_of(const StringPiece16& self,
+                                 const StringPiece16& s,
+                                 size_t pos);
+
+BASE_EXPORT size_t find_first_not_of(const StringPiece& self,
+                                     const StringPiece& s,
+                                     size_t pos);
+BASE_EXPORT size_t find_first_not_of(const StringPiece16& self,
+                                     const StringPiece16& s,
+                                     size_t pos);
+BASE_EXPORT size_t find_first_not_of(const StringPiece& self,
+                                     char c,
+                                     size_t pos);
+BASE_EXPORT size_t find_first_not_of(const StringPiece16& self,
+                                     char16 c,
+                                     size_t pos);
+
+BASE_EXPORT size_t find_last_of(const StringPiece& self,
+                                const StringPiece& s,
+                                size_t pos);
+BASE_EXPORT size_t find_last_of(const StringPiece16& self,
+                                const StringPiece16& s,
+                                size_t pos);
+BASE_EXPORT size_t find_last_of(const StringPiece& self,
+                                char c,
+                                size_t pos);
+BASE_EXPORT size_t find_last_of(const StringPiece16& self,
+                                char16 c,
+                                size_t pos);
+
+BASE_EXPORT size_t find_last_not_of(const StringPiece& self,
+                                    const StringPiece& s,
+                                    size_t pos);
+BASE_EXPORT size_t find_last_not_of(const StringPiece16& self,
+                                    const StringPiece16& s,
+                                    size_t pos);
+BASE_EXPORT size_t find_last_not_of(const StringPiece16& self,
+                                    char16 c,
+                                    size_t pos);
+BASE_EXPORT size_t find_last_not_of(const StringPiece& self,
+                                    char c,
+                                    size_t pos);
+
+BASE_EXPORT StringPiece substr(const StringPiece& self,
+                               size_t pos,
+                               size_t n);
+BASE_EXPORT StringPiece16 substr(const StringPiece16& self,
+                                 size_t pos,
+                                 size_t n);
+
+#if DCHECK_IS_ON()
+// Asserts that begin <= end to catch some errors with iterator usage.
+BASE_EXPORT void AssertIteratorsInOrder(std::string::const_iterator begin,
+                                        std::string::const_iterator end);
+BASE_EXPORT void AssertIteratorsInOrder(string16::const_iterator begin,
+                                        string16::const_iterator end);
+#endif
+
+}  // namespace internal
+
+// BasicStringPiece ------------------------------------------------------------
+
+// Defines the types, methods, operators, and data members common to both
+// StringPiece and StringPiece16. Do not refer to this class directly, but
+// rather to BasicStringPiece, StringPiece, or StringPiece16.
+//
+// This is templatized by string class type rather than character type, so
+// BasicStringPiece<std::string> or BasicStringPiece<base::string16>.
+template <typename STRING_TYPE> class BasicStringPiece {
+ public:
+  // Standard STL container boilerplate.
+  typedef size_t size_type;
+  typedef typename STRING_TYPE::value_type value_type;
+  typedef const value_type* pointer;
+  typedef const value_type& reference;
+  typedef const value_type& const_reference;
+  typedef ptrdiff_t difference_type;
+  typedef const value_type* const_iterator;
+  typedef std::reverse_iterator<const_iterator> const_reverse_iterator;
+
+  static const size_type npos;
+
+ public:
+  // We provide non-explicit singleton constructors so users can pass
+  // in a "const char*" or a "string" wherever a "StringPiece" is
+  // expected (likewise for char16, string16, StringPiece16).
+  constexpr BasicStringPiece() : ptr_(NULL), length_(0) {}
+  // TODO(dcheng): Construction from nullptr is not allowed for
+  // std::basic_string_view, so remove the special handling for it.
+  // Note: This doesn't just use STRING_TYPE::traits_type::length(), since that
+  // isn't constexpr until C++17.
+  constexpr BasicStringPiece(const value_type* str)
+      : ptr_(str), length_(!str ? 0 : CharTraits<value_type>::length(str)) {}
+  BasicStringPiece(const STRING_TYPE& str)
+      : ptr_(str.data()), length_(str.size()) {}
+  constexpr BasicStringPiece(const value_type* offset, size_type len)
+      : ptr_(offset), length_(len) {}
+  BasicStringPiece(const typename STRING_TYPE::const_iterator& begin,
+                   const typename STRING_TYPE::const_iterator& end) {
+#if DCHECK_IS_ON()
+    // This assertion is done out-of-line to avoid bringing in logging.h and
+    // instantiating logging macros for every instantiation.
+    internal::AssertIteratorsInOrder(begin, end);
+#endif
+    length_ = static_cast<size_t>(std::distance(begin, end));
+
+    // The length test before assignment is to avoid dereferencing an iterator
+    // that may point to the end() of a string.
+    ptr_ = length_ > 0 ? &*begin : nullptr;
+  }
+
+  // data() may return a pointer to a buffer with embedded NULs, and the
+  // returned buffer may or may not be null terminated.  Therefore it is
+  // typically a mistake to pass data() to a routine that expects a NUL
+  // terminated string.
+  constexpr const value_type* data() const { return ptr_; }
+  constexpr size_type size() const { return length_; }
+  constexpr size_type length() const { return length_; }
+  bool empty() const { return length_ == 0; }
+
+  void clear() {
+    ptr_ = NULL;
+    length_ = 0;
+  }
+  void set(const value_type* data, size_type len) {
+    ptr_ = data;
+    length_ = len;
+  }
+  void set(const value_type* str) {
+    ptr_ = str;
+    length_ = str ? STRING_TYPE::traits_type::length(str) : 0;
+  }
+
+  constexpr value_type operator[](size_type i) const {
+    CHECK(i < length_);
+    return ptr_[i];
+  }
+
+  value_type front() const {
+    CHECK_NE(0UL, length_);
+    return ptr_[0];
+  }
+
+  value_type back() const {
+    CHECK_NE(0UL, length_);
+    return ptr_[length_ - 1];
+  }
+
+  constexpr void remove_prefix(size_type n) {
+    CHECK(n <= length_);
+    ptr_ += n;
+    length_ -= n;
+  }
+
+  constexpr void remove_suffix(size_type n) {
+    CHECK(n <= length_);
+    length_ -= n;
+  }
+
+  constexpr int compare(BasicStringPiece x) const noexcept {
+    int r = CharTraits<value_type>::compare(
+        ptr_, x.ptr_, (length_ < x.length_ ? length_ : x.length_));
+    if (r == 0) {
+      if (length_ < x.length_) r = -1;
+      else if (length_ > x.length_) r = +1;
+    }
+    return r;
+  }
+
+  // This is the style of conversion preferred by std::string_view in C++17.
+  explicit operator STRING_TYPE() const { return as_string(); }
+
+  STRING_TYPE as_string() const {
+    // std::string doesn't like to take a NULL pointer even with a 0 size.
+    return empty() ? STRING_TYPE() : STRING_TYPE(data(), size());
+  }
+
+  const_iterator begin() const { return ptr_; }
+  const_iterator end() const { return ptr_ + length_; }
+  const_reverse_iterator rbegin() const {
+    return const_reverse_iterator(ptr_ + length_);
+  }
+  const_reverse_iterator rend() const {
+    return const_reverse_iterator(ptr_);
+  }
+
+  size_type max_size() const { return length_; }
+  size_type capacity() const { return length_; }
+
+  // Sets the value of the given string target type to be the current string.
+  // This saves a temporary over doing |a = b.as_string()|
+  void CopyToString(STRING_TYPE* target) const {
+    internal::CopyToString(*this, target);
+  }
+
+  void AppendToString(STRING_TYPE* target) const {
+    internal::AppendToString(*this, target);
+  }
+
+  size_type copy(value_type* buf, size_type n, size_type pos = 0) const {
+    return internal::copy(*this, buf, n, pos);
+  }
+
+  // Does "this" start with "x"
+  constexpr bool starts_with(BasicStringPiece x) const noexcept {
+    return (
+        (this->length_ >= x.length_) &&
+        (CharTraits<value_type>::compare(this->ptr_, x.ptr_, x.length_) == 0));
+  }
+
+  // Does "this" end with "x"
+  constexpr bool ends_with(BasicStringPiece x) const noexcept {
+    return ((this->length_ >= x.length_) &&
+            (CharTraits<value_type>::compare(
+                 this->ptr_ + (this->length_ - x.length_), x.ptr_, x.length_) ==
+             0));
+  }
+
+  // find: Search for a character or substring at a given offset.
+  size_type find(const BasicStringPiece<STRING_TYPE>& s,
+                 size_type pos = 0) const {
+    return internal::find(*this, s, pos);
+  }
+  size_type find(value_type c, size_type pos = 0) const {
+    return internal::find(*this, c, pos);
+  }
+
+  // rfind: Reverse find.
+  size_type rfind(const BasicStringPiece& s,
+                  size_type pos = BasicStringPiece::npos) const {
+    return internal::rfind(*this, s, pos);
+  }
+  size_type rfind(value_type c, size_type pos = BasicStringPiece::npos) const {
+    return internal::rfind(*this, c, pos);
+  }
+
+  // find_first_of: Find the first occurence of one of a set of characters.
+  size_type find_first_of(const BasicStringPiece& s,
+                          size_type pos = 0) const {
+    return internal::find_first_of(*this, s, pos);
+  }
+  size_type find_first_of(value_type c, size_type pos = 0) const {
+    return find(c, pos);
+  }
+
+  // find_first_not_of: Find the first occurence not of a set of characters.
+  size_type find_first_not_of(const BasicStringPiece& s,
+                              size_type pos = 0) const {
+    return internal::find_first_not_of(*this, s, pos);
+  }
+  size_type find_first_not_of(value_type c, size_type pos = 0) const {
+    return internal::find_first_not_of(*this, c, pos);
+  }
+
+  // find_last_of: Find the last occurence of one of a set of characters.
+  size_type find_last_of(const BasicStringPiece& s,
+                         size_type pos = BasicStringPiece::npos) const {
+    return internal::find_last_of(*this, s, pos);
+  }
+  size_type find_last_of(value_type c,
+                         size_type pos = BasicStringPiece::npos) const {
+    return rfind(c, pos);
+  }
+
+  // find_last_not_of: Find the last occurence not of a set of characters.
+  size_type find_last_not_of(const BasicStringPiece& s,
+                             size_type pos = BasicStringPiece::npos) const {
+    return internal::find_last_not_of(*this, s, pos);
+  }
+  size_type find_last_not_of(value_type c,
+                             size_type pos = BasicStringPiece::npos) const {
+    return internal::find_last_not_of(*this, c, pos);
+  }
+
+  // substr.
+  BasicStringPiece substr(size_type pos,
+                          size_type n = BasicStringPiece::npos) const {
+    return internal::substr(*this, pos, n);
+  }
+
+ protected:
+  const value_type* ptr_;
+  size_type length_;
+};
+
+template <typename STRING_TYPE>
+const typename BasicStringPiece<STRING_TYPE>::size_type
+BasicStringPiece<STRING_TYPE>::npos =
+    typename BasicStringPiece<STRING_TYPE>::size_type(-1);
+
+// MSVC doesn't like complex extern templates and DLLs.
+#if !defined(COMPILER_MSVC)
+extern template class BASE_EXPORT BasicStringPiece<std::string>;
+extern template class BASE_EXPORT BasicStringPiece<string16>;
+#endif
+
+// StingPiece operators --------------------------------------------------------
+
+BASE_EXPORT bool operator==(const StringPiece& x, const StringPiece& y);
+
+inline bool operator!=(const StringPiece& x, const StringPiece& y) {
+  return !(x == y);
+}
+
+inline bool operator<(const StringPiece& x, const StringPiece& y) {
+  const int r = CharTraits<StringPiece::value_type>::compare(
+      x.data(), y.data(), (x.size() < y.size() ? x.size() : y.size()));
+  return ((r < 0) || ((r == 0) && (x.size() < y.size())));
+}
+
+inline bool operator>(const StringPiece& x, const StringPiece& y) {
+  return y < x;
+}
+
+inline bool operator<=(const StringPiece& x, const StringPiece& y) {
+  return !(x > y);
+}
+
+inline bool operator>=(const StringPiece& x, const StringPiece& y) {
+  return !(x < y);
+}
+
+// StringPiece16 operators -----------------------------------------------------
+
+inline bool operator==(const StringPiece16& x, const StringPiece16& y) {
+  if (x.size() != y.size())
+    return false;
+
+  return CharTraits<StringPiece16::value_type>::compare(x.data(), y.data(),
+                                                        x.size()) == 0;
+}
+
+inline bool operator!=(const StringPiece16& x, const StringPiece16& y) {
+  return !(x == y);
+}
+
+inline bool operator<(const StringPiece16& x, const StringPiece16& y) {
+  const int r = CharTraits<StringPiece16::value_type>::compare(
+      x.data(), y.data(), (x.size() < y.size() ? x.size() : y.size()));
+  return ((r < 0) || ((r == 0) && (x.size() < y.size())));
+}
+
+inline bool operator>(const StringPiece16& x, const StringPiece16& y) {
+  return y < x;
+}
+
+inline bool operator<=(const StringPiece16& x, const StringPiece16& y) {
+  return !(x > y);
+}
+
+inline bool operator>=(const StringPiece16& x, const StringPiece16& y) {
+  return !(x < y);
+}
+
+BASE_EXPORT std::ostream& operator<<(std::ostream& o,
+                                     const StringPiece& piece);
+
+// Hashing ---------------------------------------------------------------------
+
+// We provide appropriate hash functions so StringPiece and StringPiece16 can
+// be used as keys in hash sets and maps.
+
+// This hash function is copied from base/strings/string16.h. We don't use the
+// ones already defined for string and string16 directly because it would
+// require the string constructors to be called, which we don't want.
+#define HASH_STRING_PIECE(StringPieceType, string_piece)         \
+  std::size_t result = 0;                                        \
+  for (StringPieceType::const_iterator i = string_piece.begin(); \
+       i != string_piece.end(); ++i)                             \
+    result = (result * 131) + *i;                                \
+  return result;
+
+struct StringPieceHash {
+  std::size_t operator()(const StringPiece& sp) const {
+    HASH_STRING_PIECE(StringPiece, sp);
+  }
+};
+struct StringPiece16Hash {
+  std::size_t operator()(const StringPiece16& sp16) const {
+    HASH_STRING_PIECE(StringPiece16, sp16);
+  }
+};
+struct WStringPieceHash {
+  std::size_t operator()(const WStringPiece& wsp) const {
+    HASH_STRING_PIECE(WStringPiece, wsp);
+  }
+};
+
+}  // namespace base
+
+#endif  // BASE_STRINGS_STRING_PIECE_H_
diff --git a/base/strings/string_piece_forward.h b/base/strings/string_piece_forward.h
new file mode 100644
index 0000000..b50b980
--- /dev/null
+++ b/base/strings/string_piece_forward.h
@@ -0,0 +1,24 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Forward declaration of StringPiece types from base/strings/string_piece.h
+
+#ifndef BASE_STRINGS_STRING_PIECE_FORWARD_H_
+#define BASE_STRINGS_STRING_PIECE_FORWARD_H_
+
+#include <string>
+
+#include "base/strings/string16.h"
+
+namespace base {
+
+template <typename STRING_TYPE>
+class BasicStringPiece;
+typedef BasicStringPiece<std::string> StringPiece;
+typedef BasicStringPiece<string16> StringPiece16;
+typedef BasicStringPiece<std::wstring> WStringPiece;
+
+}  // namespace base
+
+#endif  // BASE_STRINGS_STRING_PIECE_FORWARD_H_
diff --git a/base/strings/string_piece_unittest.cc b/base/strings/string_piece_unittest.cc
new file mode 100644
index 0000000..17d0897
--- /dev/null
+++ b/base/strings/string_piece_unittest.cc
@@ -0,0 +1,805 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <stddef.h>
+
+#include <string>
+
+#include "base/strings/string16.h"
+#include "base/strings/string_piece.h"
+#include "base/strings/utf_string_conversions.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+
+template <typename T>
+class CommonStringPieceTest : public ::testing::Test {
+ public:
+  static const T as_string(const char* input) {
+    return T(input);
+  }
+  static const T& as_string(const T& input) {
+    return input;
+  }
+};
+
+template <>
+class CommonStringPieceTest<string16> : public ::testing::Test {
+ public:
+  static const string16 as_string(const char* input) {
+    return ASCIIToUTF16(input);
+  }
+  static const string16 as_string(const std::string& input) {
+    return ASCIIToUTF16(input);
+  }
+};
+
+typedef ::testing::Types<std::string, string16> SupportedStringTypes;
+
+TYPED_TEST_CASE(CommonStringPieceTest, SupportedStringTypes);
+
+TYPED_TEST(CommonStringPieceTest, CheckComparisonOperators) {
+#define CMP_Y(op, x, y)                                                    \
+  {                                                                        \
+    TypeParam lhs(TestFixture::as_string(x));                              \
+    TypeParam rhs(TestFixture::as_string(y));                              \
+    ASSERT_TRUE( (BasicStringPiece<TypeParam>((lhs.c_str())) op            \
+                  BasicStringPiece<TypeParam>((rhs.c_str()))));            \
+    ASSERT_TRUE( (BasicStringPiece<TypeParam>((lhs.c_str())).compare(      \
+                      BasicStringPiece<TypeParam>((rhs.c_str()))) op 0));  \
+  }
+
+#define CMP_N(op, x, y)                                                    \
+  {                                                                        \
+    TypeParam lhs(TestFixture::as_string(x));                              \
+    TypeParam rhs(TestFixture::as_string(y));                              \
+    ASSERT_FALSE( (BasicStringPiece<TypeParam>((lhs.c_str())) op           \
+                  BasicStringPiece<TypeParam>((rhs.c_str()))));            \
+    ASSERT_FALSE( (BasicStringPiece<TypeParam>((lhs.c_str())).compare(     \
+                      BasicStringPiece<TypeParam>((rhs.c_str()))) op 0));  \
+  }
+
+  CMP_Y(==, "",   "");
+  CMP_Y(==, "a",  "a");
+  CMP_Y(==, "aa", "aa");
+  CMP_N(==, "a",  "");
+  CMP_N(==, "",   "a");
+  CMP_N(==, "a",  "b");
+  CMP_N(==, "a",  "aa");
+  CMP_N(==, "aa", "a");
+
+  CMP_N(!=, "",   "");
+  CMP_N(!=, "a",  "a");
+  CMP_N(!=, "aa", "aa");
+  CMP_Y(!=, "a",  "");
+  CMP_Y(!=, "",   "a");
+  CMP_Y(!=, "a",  "b");
+  CMP_Y(!=, "a",  "aa");
+  CMP_Y(!=, "aa", "a");
+
+  CMP_Y(<, "a",  "b");
+  CMP_Y(<, "a",  "aa");
+  CMP_Y(<, "aa", "b");
+  CMP_Y(<, "aa", "bb");
+  CMP_N(<, "a",  "a");
+  CMP_N(<, "b",  "a");
+  CMP_N(<, "aa", "a");
+  CMP_N(<, "b",  "aa");
+  CMP_N(<, "bb", "aa");
+
+  CMP_Y(<=, "a",  "a");
+  CMP_Y(<=, "a",  "b");
+  CMP_Y(<=, "a",  "aa");
+  CMP_Y(<=, "aa", "b");
+  CMP_Y(<=, "aa", "bb");
+  CMP_N(<=, "b",  "a");
+  CMP_N(<=, "aa", "a");
+  CMP_N(<=, "b",  "aa");
+  CMP_N(<=, "bb", "aa");
+
+  CMP_N(>=, "a",  "b");
+  CMP_N(>=, "a",  "aa");
+  CMP_N(>=, "aa", "b");
+  CMP_N(>=, "aa", "bb");
+  CMP_Y(>=, "a",  "a");
+  CMP_Y(>=, "b",  "a");
+  CMP_Y(>=, "aa", "a");
+  CMP_Y(>=, "b",  "aa");
+  CMP_Y(>=, "bb", "aa");
+
+  CMP_N(>, "a",  "a");
+  CMP_N(>, "a",  "b");
+  CMP_N(>, "a",  "aa");
+  CMP_N(>, "aa", "b");
+  CMP_N(>, "aa", "bb");
+  CMP_Y(>, "b",  "a");
+  CMP_Y(>, "aa", "a");
+  CMP_Y(>, "b",  "aa");
+  CMP_Y(>, "bb", "aa");
+
+  std::string x;
+  for (int i = 0; i < 256; i++) {
+    x += 'a';
+    std::string y = x;
+    CMP_Y(==, x, y);
+    for (int j = 0; j < i; j++) {
+      std::string z = x;
+      z[j] = 'b';       // Differs in position 'j'
+      CMP_N(==, x, z);
+    }
+  }
+
+#undef CMP_Y
+#undef CMP_N
+}
+
+TYPED_TEST(CommonStringPieceTest, CheckSTL) {
+  TypeParam alphabet(TestFixture::as_string("abcdefghijklmnopqrstuvwxyz"));
+  TypeParam abc(TestFixture::as_string("abc"));
+  TypeParam xyz(TestFixture::as_string("xyz"));
+  TypeParam foobar(TestFixture::as_string("foobar"));
+
+  BasicStringPiece<TypeParam> a(alphabet);
+  BasicStringPiece<TypeParam> b(abc);
+  BasicStringPiece<TypeParam> c(xyz);
+  BasicStringPiece<TypeParam> d(foobar);
+  BasicStringPiece<TypeParam> e;
+  TypeParam temp(TestFixture::as_string("123"));
+  temp += static_cast<typename TypeParam::value_type>(0);
+  temp += TestFixture::as_string("456");
+  BasicStringPiece<TypeParam> f(temp);
+
+  ASSERT_EQ(a[6], static_cast<typename TypeParam::value_type>('g'));
+  ASSERT_EQ(b[0], static_cast<typename TypeParam::value_type>('a'));
+  ASSERT_EQ(c[2], static_cast<typename TypeParam::value_type>('z'));
+  ASSERT_EQ(f[3], static_cast<typename TypeParam::value_type>('\0'));
+  ASSERT_EQ(f[5], static_cast<typename TypeParam::value_type>('5'));
+
+  ASSERT_EQ(*d.data(), static_cast<typename TypeParam::value_type>('f'));
+  ASSERT_EQ(d.data()[5], static_cast<typename TypeParam::value_type>('r'));
+  ASSERT_EQ(e.data(), nullptr);
+
+  ASSERT_EQ(*a.begin(), static_cast<typename TypeParam::value_type>('a'));
+  ASSERT_EQ(*(b.begin() + 2), static_cast<typename TypeParam::value_type>('c'));
+  ASSERT_EQ(*(c.end() - 1), static_cast<typename TypeParam::value_type>('z'));
+
+  ASSERT_EQ(*a.rbegin(), static_cast<typename TypeParam::value_type>('z'));
+  ASSERT_EQ(*(b.rbegin() + 2),
+            static_cast<typename TypeParam::value_type>('a'));
+  ASSERT_EQ(*(c.rend() - 1), static_cast<typename TypeParam::value_type>('x'));
+  ASSERT_EQ(a.rbegin() + 26, a.rend());
+
+  ASSERT_EQ(a.size(), 26U);
+  ASSERT_EQ(b.size(), 3U);
+  ASSERT_EQ(c.size(), 3U);
+  ASSERT_EQ(d.size(), 6U);
+  ASSERT_EQ(e.size(), 0U);
+  ASSERT_EQ(f.size(), 7U);
+
+  ASSERT_TRUE(!d.empty());
+  ASSERT_TRUE(d.begin() != d.end());
+  ASSERT_EQ(d.begin() + 6, d.end());
+
+  ASSERT_TRUE(e.empty());
+  ASSERT_EQ(e.begin(), e.end());
+
+  d.clear();
+  ASSERT_EQ(d.size(), 0U);
+  ASSERT_TRUE(d.empty());
+  ASSERT_EQ(d.data(), nullptr);
+  ASSERT_EQ(d.begin(), d.end());
+
+  ASSERT_GE(a.max_size(), a.capacity());
+  ASSERT_GE(a.capacity(), a.size());
+}
+
+TYPED_TEST(CommonStringPieceTest, CheckFind) {
+  typedef BasicStringPiece<TypeParam> Piece;
+
+  TypeParam alphabet(TestFixture::as_string("abcdefghijklmnopqrstuvwxyz"));
+  TypeParam abc(TestFixture::as_string("abc"));
+  TypeParam xyz(TestFixture::as_string("xyz"));
+  TypeParam foobar(TestFixture::as_string("foobar"));
+
+  BasicStringPiece<TypeParam> a(alphabet);
+  BasicStringPiece<TypeParam> b(abc);
+  BasicStringPiece<TypeParam> c(xyz);
+  BasicStringPiece<TypeParam> d(foobar);
+
+  d.clear();
+  Piece e;
+  TypeParam temp(TestFixture::as_string("123"));
+  temp.push_back('\0');
+  temp += TestFixture::as_string("456");
+  Piece f(temp);
+
+  typename TypeParam::value_type buf[4] = { '%', '%', '%', '%' };
+  ASSERT_EQ(a.copy(buf, 4), 4U);
+  ASSERT_EQ(buf[0], a[0]);
+  ASSERT_EQ(buf[1], a[1]);
+  ASSERT_EQ(buf[2], a[2]);
+  ASSERT_EQ(buf[3], a[3]);
+  ASSERT_EQ(a.copy(buf, 3, 7), 3U);
+  ASSERT_EQ(buf[0], a[7]);
+  ASSERT_EQ(buf[1], a[8]);
+  ASSERT_EQ(buf[2], a[9]);
+  ASSERT_EQ(buf[3], a[3]);
+  ASSERT_EQ(c.copy(buf, 99), 3U);
+  ASSERT_EQ(buf[0], c[0]);
+  ASSERT_EQ(buf[1], c[1]);
+  ASSERT_EQ(buf[2], c[2]);
+  ASSERT_EQ(buf[3], a[3]);
+
+  ASSERT_EQ(Piece::npos, TypeParam::npos);
+
+  ASSERT_EQ(a.find(b), 0U);
+  ASSERT_EQ(a.find(b, 1), Piece::npos);
+  ASSERT_EQ(a.find(c), 23U);
+  ASSERT_EQ(a.find(c, 9), 23U);
+  ASSERT_EQ(a.find(c, Piece::npos), Piece::npos);
+  ASSERT_EQ(b.find(c), Piece::npos);
+  ASSERT_EQ(b.find(c, Piece::npos), Piece::npos);
+  ASSERT_EQ(a.find(d), 0U);
+  ASSERT_EQ(a.find(e), 0U);
+  ASSERT_EQ(a.find(d, 12), 12U);
+  ASSERT_EQ(a.find(e, 17), 17U);
+  TypeParam not_found(TestFixture::as_string("xx not found bb"));
+  Piece g(not_found);
+  ASSERT_EQ(a.find(g), Piece::npos);
+  // empty string nonsense
+  ASSERT_EQ(d.find(b), Piece::npos);
+  ASSERT_EQ(e.find(b), Piece::npos);
+  ASSERT_EQ(d.find(b, 4), Piece::npos);
+  ASSERT_EQ(e.find(b, 7), Piece::npos);
+
+  size_t empty_search_pos = TypeParam().find(TypeParam());
+  ASSERT_EQ(d.find(d), empty_search_pos);
+  ASSERT_EQ(d.find(e), empty_search_pos);
+  ASSERT_EQ(e.find(d), empty_search_pos);
+  ASSERT_EQ(e.find(e), empty_search_pos);
+  ASSERT_EQ(d.find(d, 4), std::string().find(std::string(), 4));
+  ASSERT_EQ(d.find(e, 4), std::string().find(std::string(), 4));
+  ASSERT_EQ(e.find(d, 4), std::string().find(std::string(), 4));
+  ASSERT_EQ(e.find(e, 4), std::string().find(std::string(), 4));
+
+  ASSERT_EQ(a.find('a'), 0U);
+  ASSERT_EQ(a.find('c'), 2U);
+  ASSERT_EQ(a.find('z'), 25U);
+  ASSERT_EQ(a.find('$'), Piece::npos);
+  ASSERT_EQ(a.find('\0'), Piece::npos);
+  ASSERT_EQ(f.find('\0'), 3U);
+  ASSERT_EQ(f.find('3'), 2U);
+  ASSERT_EQ(f.find('5'), 5U);
+  ASSERT_EQ(g.find('o'), 4U);
+  ASSERT_EQ(g.find('o', 4), 4U);
+  ASSERT_EQ(g.find('o', 5), 8U);
+  ASSERT_EQ(a.find('b', 5), Piece::npos);
+  // empty string nonsense
+  ASSERT_EQ(d.find('\0'), Piece::npos);
+  ASSERT_EQ(e.find('\0'), Piece::npos);
+  ASSERT_EQ(d.find('\0', 4), Piece::npos);
+  ASSERT_EQ(e.find('\0', 7), Piece::npos);
+  ASSERT_EQ(d.find('x'), Piece::npos);
+  ASSERT_EQ(e.find('x'), Piece::npos);
+  ASSERT_EQ(d.find('x', 4), Piece::npos);
+  ASSERT_EQ(e.find('x', 7), Piece::npos);
+
+  ASSERT_EQ(a.rfind(b), 0U);
+  ASSERT_EQ(a.rfind(b, 1), 0U);
+  ASSERT_EQ(a.rfind(c), 23U);
+  ASSERT_EQ(a.rfind(c, 22U), Piece::npos);
+  ASSERT_EQ(a.rfind(c, 1U), Piece::npos);
+  ASSERT_EQ(a.rfind(c, 0U), Piece::npos);
+  ASSERT_EQ(b.rfind(c), Piece::npos);
+  ASSERT_EQ(b.rfind(c, 0U), Piece::npos);
+  ASSERT_EQ(a.rfind(d), static_cast<size_t>(a.as_string().rfind(TypeParam())));
+  ASSERT_EQ(a.rfind(e), a.as_string().rfind(TypeParam()));
+  ASSERT_EQ(a.rfind(d), static_cast<size_t>(TypeParam(a).rfind(TypeParam())));
+  ASSERT_EQ(a.rfind(e), TypeParam(a).rfind(TypeParam()));
+  ASSERT_EQ(a.rfind(d, 12), 12U);
+  ASSERT_EQ(a.rfind(e, 17), 17U);
+  ASSERT_EQ(a.rfind(g), Piece::npos);
+  ASSERT_EQ(d.rfind(b), Piece::npos);
+  ASSERT_EQ(e.rfind(b), Piece::npos);
+  ASSERT_EQ(d.rfind(b, 4), Piece::npos);
+  ASSERT_EQ(e.rfind(b, 7), Piece::npos);
+  // empty string nonsense
+  ASSERT_EQ(d.rfind(d, 4), std::string().rfind(std::string()));
+  ASSERT_EQ(e.rfind(d, 7), std::string().rfind(std::string()));
+  ASSERT_EQ(d.rfind(e, 4), std::string().rfind(std::string()));
+  ASSERT_EQ(e.rfind(e, 7), std::string().rfind(std::string()));
+  ASSERT_EQ(d.rfind(d), std::string().rfind(std::string()));
+  ASSERT_EQ(e.rfind(d), std::string().rfind(std::string()));
+  ASSERT_EQ(d.rfind(e), std::string().rfind(std::string()));
+  ASSERT_EQ(e.rfind(e), std::string().rfind(std::string()));
+
+  ASSERT_EQ(g.rfind('o'), 8U);
+  ASSERT_EQ(g.rfind('q'), Piece::npos);
+  ASSERT_EQ(g.rfind('o', 8), 8U);
+  ASSERT_EQ(g.rfind('o', 7), 4U);
+  ASSERT_EQ(g.rfind('o', 3), Piece::npos);
+  ASSERT_EQ(f.rfind('\0'), 3U);
+  ASSERT_EQ(f.rfind('\0', 12), 3U);
+  ASSERT_EQ(f.rfind('3'), 2U);
+  ASSERT_EQ(f.rfind('5'), 5U);
+  // empty string nonsense
+  ASSERT_EQ(d.rfind('o'), Piece::npos);
+  ASSERT_EQ(e.rfind('o'), Piece::npos);
+  ASSERT_EQ(d.rfind('o', 4), Piece::npos);
+  ASSERT_EQ(e.rfind('o', 7), Piece::npos);
+
+  TypeParam one_two_three_four(TestFixture::as_string("one,two:three;four"));
+  TypeParam comma_colon(TestFixture::as_string(",:"));
+  ASSERT_EQ(3U, Piece(one_two_three_four).find_first_of(comma_colon));
+  ASSERT_EQ(a.find_first_of(b), 0U);
+  ASSERT_EQ(a.find_first_of(b, 0), 0U);
+  ASSERT_EQ(a.find_first_of(b, 1), 1U);
+  ASSERT_EQ(a.find_first_of(b, 2), 2U);
+  ASSERT_EQ(a.find_first_of(b, 3), Piece::npos);
+  ASSERT_EQ(a.find_first_of(c), 23U);
+  ASSERT_EQ(a.find_first_of(c, 23), 23U);
+  ASSERT_EQ(a.find_first_of(c, 24), 24U);
+  ASSERT_EQ(a.find_first_of(c, 25), 25U);
+  ASSERT_EQ(a.find_first_of(c, 26), Piece::npos);
+  ASSERT_EQ(g.find_first_of(b), 13U);
+  ASSERT_EQ(g.find_first_of(c), 0U);
+  ASSERT_EQ(a.find_first_of(f), Piece::npos);
+  ASSERT_EQ(f.find_first_of(a), Piece::npos);
+  // empty string nonsense
+  ASSERT_EQ(a.find_first_of(d), Piece::npos);
+  ASSERT_EQ(a.find_first_of(e), Piece::npos);
+  ASSERT_EQ(d.find_first_of(b), Piece::npos);
+  ASSERT_EQ(e.find_first_of(b), Piece::npos);
+  ASSERT_EQ(d.find_first_of(d), Piece::npos);
+  ASSERT_EQ(e.find_first_of(d), Piece::npos);
+  ASSERT_EQ(d.find_first_of(e), Piece::npos);
+  ASSERT_EQ(e.find_first_of(e), Piece::npos);
+
+  ASSERT_EQ(a.find_first_not_of(b), 3U);
+  ASSERT_EQ(a.find_first_not_of(c), 0U);
+  ASSERT_EQ(b.find_first_not_of(a), Piece::npos);
+  ASSERT_EQ(c.find_first_not_of(a), Piece::npos);
+  ASSERT_EQ(f.find_first_not_of(a), 0U);
+  ASSERT_EQ(a.find_first_not_of(f), 0U);
+  ASSERT_EQ(a.find_first_not_of(d), 0U);
+  ASSERT_EQ(a.find_first_not_of(e), 0U);
+  // empty string nonsense
+  ASSERT_EQ(d.find_first_not_of(a), Piece::npos);
+  ASSERT_EQ(e.find_first_not_of(a), Piece::npos);
+  ASSERT_EQ(d.find_first_not_of(d), Piece::npos);
+  ASSERT_EQ(e.find_first_not_of(d), Piece::npos);
+  ASSERT_EQ(d.find_first_not_of(e), Piece::npos);
+  ASSERT_EQ(e.find_first_not_of(e), Piece::npos);
+
+  TypeParam equals(TestFixture::as_string("===="));
+  Piece h(equals);
+  ASSERT_EQ(h.find_first_not_of('='), Piece::npos);
+  ASSERT_EQ(h.find_first_not_of('=', 3), Piece::npos);
+  ASSERT_EQ(h.find_first_not_of('\0'), 0U);
+  ASSERT_EQ(g.find_first_not_of('x'), 2U);
+  ASSERT_EQ(f.find_first_not_of('\0'), 0U);
+  ASSERT_EQ(f.find_first_not_of('\0', 3), 4U);
+  ASSERT_EQ(f.find_first_not_of('\0', 2), 2U);
+  // empty string nonsense
+  ASSERT_EQ(d.find_first_not_of('x'), Piece::npos);
+  ASSERT_EQ(e.find_first_not_of('x'), Piece::npos);
+  ASSERT_EQ(d.find_first_not_of('\0'), Piece::npos);
+  ASSERT_EQ(e.find_first_not_of('\0'), Piece::npos);
+
+  //  Piece g("xx not found bb");
+  TypeParam fifty_six(TestFixture::as_string("56"));
+  Piece i(fifty_six);
+  ASSERT_EQ(h.find_last_of(a), Piece::npos);
+  ASSERT_EQ(g.find_last_of(a), g.size()-1);
+  ASSERT_EQ(a.find_last_of(b), 2U);
+  ASSERT_EQ(a.find_last_of(c), a.size()-1);
+  ASSERT_EQ(f.find_last_of(i), 6U);
+  ASSERT_EQ(a.find_last_of('a'), 0U);
+  ASSERT_EQ(a.find_last_of('b'), 1U);
+  ASSERT_EQ(a.find_last_of('z'), 25U);
+  ASSERT_EQ(a.find_last_of('a', 5), 0U);
+  ASSERT_EQ(a.find_last_of('b', 5), 1U);
+  ASSERT_EQ(a.find_last_of('b', 0), Piece::npos);
+  ASSERT_EQ(a.find_last_of('z', 25), 25U);
+  ASSERT_EQ(a.find_last_of('z', 24), Piece::npos);
+  ASSERT_EQ(f.find_last_of(i, 5), 5U);
+  ASSERT_EQ(f.find_last_of(i, 6), 6U);
+  ASSERT_EQ(f.find_last_of(a, 4), Piece::npos);
+  // empty string nonsense
+  ASSERT_EQ(f.find_last_of(d), Piece::npos);
+  ASSERT_EQ(f.find_last_of(e), Piece::npos);
+  ASSERT_EQ(f.find_last_of(d, 4), Piece::npos);
+  ASSERT_EQ(f.find_last_of(e, 4), Piece::npos);
+  ASSERT_EQ(d.find_last_of(d), Piece::npos);
+  ASSERT_EQ(d.find_last_of(e), Piece::npos);
+  ASSERT_EQ(e.find_last_of(d), Piece::npos);
+  ASSERT_EQ(e.find_last_of(e), Piece::npos);
+  ASSERT_EQ(d.find_last_of(f), Piece::npos);
+  ASSERT_EQ(e.find_last_of(f), Piece::npos);
+  ASSERT_EQ(d.find_last_of(d, 4), Piece::npos);
+  ASSERT_EQ(d.find_last_of(e, 4), Piece::npos);
+  ASSERT_EQ(e.find_last_of(d, 4), Piece::npos);
+  ASSERT_EQ(e.find_last_of(e, 4), Piece::npos);
+  ASSERT_EQ(d.find_last_of(f, 4), Piece::npos);
+  ASSERT_EQ(e.find_last_of(f, 4), Piece::npos);
+
+  ASSERT_EQ(a.find_last_not_of(b), a.size()-1);
+  ASSERT_EQ(a.find_last_not_of(c), 22U);
+  ASSERT_EQ(b.find_last_not_of(a), Piece::npos);
+  ASSERT_EQ(b.find_last_not_of(b), Piece::npos);
+  ASSERT_EQ(f.find_last_not_of(i), 4U);
+  ASSERT_EQ(a.find_last_not_of(c, 24), 22U);
+  ASSERT_EQ(a.find_last_not_of(b, 3), 3U);
+  ASSERT_EQ(a.find_last_not_of(b, 2), Piece::npos);
+  // empty string nonsense
+  ASSERT_EQ(f.find_last_not_of(d), f.size()-1);
+  ASSERT_EQ(f.find_last_not_of(e), f.size()-1);
+  ASSERT_EQ(f.find_last_not_of(d, 4), 4U);
+  ASSERT_EQ(f.find_last_not_of(e, 4), 4U);
+  ASSERT_EQ(d.find_last_not_of(d), Piece::npos);
+  ASSERT_EQ(d.find_last_not_of(e), Piece::npos);
+  ASSERT_EQ(e.find_last_not_of(d), Piece::npos);
+  ASSERT_EQ(e.find_last_not_of(e), Piece::npos);
+  ASSERT_EQ(d.find_last_not_of(f), Piece::npos);
+  ASSERT_EQ(e.find_last_not_of(f), Piece::npos);
+  ASSERT_EQ(d.find_last_not_of(d, 4), Piece::npos);
+  ASSERT_EQ(d.find_last_not_of(e, 4), Piece::npos);
+  ASSERT_EQ(e.find_last_not_of(d, 4), Piece::npos);
+  ASSERT_EQ(e.find_last_not_of(e, 4), Piece::npos);
+  ASSERT_EQ(d.find_last_not_of(f, 4), Piece::npos);
+  ASSERT_EQ(e.find_last_not_of(f, 4), Piece::npos);
+
+  ASSERT_EQ(h.find_last_not_of('x'), h.size() - 1);
+  ASSERT_EQ(h.find_last_not_of('='), Piece::npos);
+  ASSERT_EQ(b.find_last_not_of('c'), 1U);
+  ASSERT_EQ(h.find_last_not_of('x', 2), 2U);
+  ASSERT_EQ(h.find_last_not_of('=', 2), Piece::npos);
+  ASSERT_EQ(b.find_last_not_of('b', 1), 0U);
+  // empty string nonsense
+  ASSERT_EQ(d.find_last_not_of('x'), Piece::npos);
+  ASSERT_EQ(e.find_last_not_of('x'), Piece::npos);
+  ASSERT_EQ(d.find_last_not_of('\0'), Piece::npos);
+  ASSERT_EQ(e.find_last_not_of('\0'), Piece::npos);
+
+  ASSERT_EQ(a.substr(0, 3), b);
+  ASSERT_EQ(a.substr(23), c);
+  ASSERT_EQ(a.substr(23, 3), c);
+  ASSERT_EQ(a.substr(23, 99), c);
+  ASSERT_EQ(a.substr(0), a);
+  ASSERT_EQ(a.substr(3, 2), TestFixture::as_string("de"));
+  // empty string nonsense
+  ASSERT_EQ(a.substr(99, 2), e);
+  ASSERT_EQ(d.substr(99), e);
+  ASSERT_EQ(d.substr(0, 99), e);
+  ASSERT_EQ(d.substr(99, 99), e);
+}
+
+TYPED_TEST(CommonStringPieceTest, CheckCustom) {
+  TypeParam foobar(TestFixture::as_string("foobar"));
+  BasicStringPiece<TypeParam> a(foobar);
+  TypeParam s1(TestFixture::as_string("123"));
+  s1 += static_cast<typename TypeParam::value_type>('\0');
+  s1 += TestFixture::as_string("456");
+  BasicStringPiece<TypeParam> b(s1);
+  BasicStringPiece<TypeParam> e;
+  TypeParam s2;
+
+  // remove_prefix
+  BasicStringPiece<TypeParam> c(a);
+  c.remove_prefix(3);
+  ASSERT_EQ(c, TestFixture::as_string("bar"));
+  c = a;
+  c.remove_prefix(0);
+  ASSERT_EQ(c, a);
+  c.remove_prefix(c.size());
+  ASSERT_EQ(c, e);
+
+  // remove_suffix
+  c = a;
+  c.remove_suffix(3);
+  ASSERT_EQ(c, TestFixture::as_string("foo"));
+  c = a;
+  c.remove_suffix(0);
+  ASSERT_EQ(c, a);
+  c.remove_suffix(c.size());
+  ASSERT_EQ(c, e);
+
+  // set
+  c.set(foobar.c_str());
+  ASSERT_EQ(c, a);
+  c.set(foobar.c_str(), 6);
+  ASSERT_EQ(c, a);
+  c.set(foobar.c_str(), 0);
+  ASSERT_EQ(c, e);
+  c.set(foobar.c_str(), 7);  // Note, has an embedded NULL
+  ASSERT_NE(c, a);
+
+  // as_string
+  TypeParam s3(a.as_string().c_str(), 7);  // Note, has an embedded NULL
+  ASSERT_EQ(c, s3);
+  TypeParam s4(e.as_string());
+  ASSERT_TRUE(s4.empty());
+
+  // operator STRING_TYPE()
+  TypeParam s5(TypeParam(a).c_str(), 7);  // Note, has an embedded NULL
+  ASSERT_EQ(c, s5);
+  TypeParam s6(e);
+  ASSERT_TRUE(s6.empty());
+}
+
+TEST(StringPieceTest, CheckCustom) {
+  StringPiece a("foobar");
+  std::string s1("123");
+  s1 += '\0';
+  s1 += "456";
+  StringPiece b(s1);
+  StringPiece e;
+  std::string s2;
+
+  // CopyToString
+  a.CopyToString(&s2);
+  ASSERT_EQ(s2.size(), 6U);
+  ASSERT_EQ(s2, "foobar");
+  b.CopyToString(&s2);
+  ASSERT_EQ(s2.size(), 7U);
+  ASSERT_EQ(s1, s2);
+  e.CopyToString(&s2);
+  ASSERT_TRUE(s2.empty());
+
+  // AppendToString
+  s2.erase();
+  a.AppendToString(&s2);
+  ASSERT_EQ(s2.size(), 6U);
+  ASSERT_EQ(s2, "foobar");
+  a.AppendToString(&s2);
+  ASSERT_EQ(s2.size(), 12U);
+  ASSERT_EQ(s2, "foobarfoobar");
+
+  // starts_with
+  ASSERT_TRUE(a.starts_with(a));
+  ASSERT_TRUE(a.starts_with("foo"));
+  ASSERT_TRUE(a.starts_with(e));
+  ASSERT_TRUE(b.starts_with(s1));
+  ASSERT_TRUE(b.starts_with(b));
+  ASSERT_TRUE(b.starts_with(e));
+  ASSERT_TRUE(e.starts_with(""));
+  ASSERT_TRUE(!a.starts_with(b));
+  ASSERT_TRUE(!b.starts_with(a));
+  ASSERT_TRUE(!e.starts_with(a));
+
+  // ends with
+  ASSERT_TRUE(a.ends_with(a));
+  ASSERT_TRUE(a.ends_with("bar"));
+  ASSERT_TRUE(a.ends_with(e));
+  ASSERT_TRUE(b.ends_with(s1));
+  ASSERT_TRUE(b.ends_with(b));
+  ASSERT_TRUE(b.ends_with(e));
+  ASSERT_TRUE(e.ends_with(""));
+  ASSERT_TRUE(!a.ends_with(b));
+  ASSERT_TRUE(!b.ends_with(a));
+  ASSERT_TRUE(!e.ends_with(a));
+
+  StringPiece c;
+  c.set("foobar", 6);
+  ASSERT_EQ(c, a);
+  c.set("foobar", 0);
+  ASSERT_EQ(c, e);
+  c.set("foobar", 7);
+  ASSERT_NE(c, a);
+}
+
+TYPED_TEST(CommonStringPieceTest, CheckNULL) {
+  // we used to crash here, but now we don't.
+  BasicStringPiece<TypeParam> s(nullptr);
+  ASSERT_EQ(s.data(), nullptr);
+  ASSERT_EQ(s.size(), 0U);
+
+  s.set(nullptr);
+  ASSERT_EQ(s.data(), nullptr);
+  ASSERT_EQ(s.size(), 0U);
+
+  TypeParam str(s);
+  ASSERT_EQ(str.length(), 0U);
+  ASSERT_EQ(str, TypeParam());
+
+  str = s.as_string();
+  ASSERT_EQ(str.length(), 0U);
+  ASSERT_EQ(str, TypeParam());
+}
+
+TYPED_TEST(CommonStringPieceTest, CheckComparisons2) {
+  TypeParam alphabet(TestFixture::as_string("abcdefghijklmnopqrstuvwxyz"));
+  TypeParam alphabet_z(TestFixture::as_string("abcdefghijklmnopqrstuvwxyzz"));
+  TypeParam alphabet_y(TestFixture::as_string("abcdefghijklmnopqrstuvwxyy"));
+  BasicStringPiece<TypeParam> abc(alphabet);
+
+  // check comparison operations on strings longer than 4 bytes.
+  ASSERT_EQ(abc, BasicStringPiece<TypeParam>(alphabet));
+  ASSERT_EQ(abc.compare(BasicStringPiece<TypeParam>(alphabet)), 0);
+
+  ASSERT_TRUE(abc < BasicStringPiece<TypeParam>(alphabet_z));
+  ASSERT_LT(abc.compare(BasicStringPiece<TypeParam>(alphabet_z)), 0);
+
+  ASSERT_TRUE(abc > BasicStringPiece<TypeParam>(alphabet_y));
+  ASSERT_GT(abc.compare(BasicStringPiece<TypeParam>(alphabet_y)), 0);
+}
+
+// Test operations only supported by std::string version.
+TEST(StringPieceTest, CheckComparisons2) {
+  StringPiece abc("abcdefghijklmnopqrstuvwxyz");
+
+  // starts_with
+  ASSERT_TRUE(abc.starts_with(abc));
+  ASSERT_TRUE(abc.starts_with("abcdefghijklm"));
+  ASSERT_TRUE(!abc.starts_with("abcdefguvwxyz"));
+
+  // ends_with
+  ASSERT_TRUE(abc.ends_with(abc));
+  ASSERT_TRUE(!abc.ends_with("abcdefguvwxyz"));
+  ASSERT_TRUE(abc.ends_with("nopqrstuvwxyz"));
+}
+
+TYPED_TEST(CommonStringPieceTest, StringCompareNotAmbiguous) {
+  ASSERT_TRUE(TestFixture::as_string("hello").c_str() ==
+              TestFixture::as_string("hello"));
+  ASSERT_TRUE(TestFixture::as_string("hello").c_str() <
+              TestFixture::as_string("world"));
+}
+
+TYPED_TEST(CommonStringPieceTest, HeterogenousStringPieceEquals) {
+  TypeParam hello(TestFixture::as_string("hello"));
+
+  ASSERT_EQ(BasicStringPiece<TypeParam>(hello), hello);
+  ASSERT_EQ(hello.c_str(), BasicStringPiece<TypeParam>(hello));
+}
+
+// string16-specific stuff
+TEST(StringPiece16Test, CheckSTL) {
+  // Check some non-ascii characters.
+  string16 fifth(ASCIIToUTF16("123"));
+  fifth.push_back(0x0000);
+  fifth.push_back(0xd8c5);
+  fifth.push_back(0xdffe);
+  StringPiece16 f(fifth);
+
+  ASSERT_EQ(f[3], '\0');
+  ASSERT_EQ(f[5], static_cast<char16>(0xdffe));
+
+  ASSERT_EQ(f.size(), 6U);
+}
+
+
+
+TEST(StringPiece16Test, CheckConversion) {
+  // Make sure that we can convert from UTF8 to UTF16 and back. We use a two
+  // byte character (G clef) to test this.
+  ASSERT_EQ(
+      UTF16ToUTF8(
+          StringPiece16(UTF8ToUTF16("\xf0\x9d\x84\x9e")).as_string()),
+      "\xf0\x9d\x84\x9e");
+}
+
+TYPED_TEST(CommonStringPieceTest, CheckConstructors) {
+  TypeParam str(TestFixture::as_string("hello world"));
+  TypeParam empty;
+
+  ASSERT_EQ(str, BasicStringPiece<TypeParam>(str));
+  ASSERT_EQ(str, BasicStringPiece<TypeParam>(str.c_str()));
+  ASSERT_TRUE(TestFixture::as_string("hello") ==
+              BasicStringPiece<TypeParam>(str.c_str(), 5));
+  ASSERT_EQ(
+      empty,
+      BasicStringPiece<TypeParam>(
+          str.c_str(),
+          static_cast<typename BasicStringPiece<TypeParam>::size_type>(0)));
+  ASSERT_EQ(empty, BasicStringPiece<TypeParam>(nullptr));
+  ASSERT_TRUE(
+      empty ==
+      BasicStringPiece<TypeParam>(
+          nullptr,
+          static_cast<typename BasicStringPiece<TypeParam>::size_type>(0)));
+  ASSERT_EQ(empty, BasicStringPiece<TypeParam>());
+  ASSERT_EQ(str, BasicStringPiece<TypeParam>(str.begin(), str.end()));
+  ASSERT_EQ(empty, BasicStringPiece<TypeParam>(str.begin(), str.begin()));
+  ASSERT_EQ(empty, BasicStringPiece<TypeParam>(empty));
+  ASSERT_EQ(empty, BasicStringPiece<TypeParam>(empty.begin(), empty.end()));
+}
+
+TEST(StringPieceTest, ConstexprCtor) {
+  {
+    constexpr StringPiece piece;
+    std::ignore = piece;
+  }
+
+  {
+    constexpr StringPiece piece("abc");
+    std::ignore = piece;
+  }
+
+  {
+    constexpr StringPiece piece("abc", 2);
+    std::ignore = piece;
+  }
+}
+
+TEST(StringPieceTest, ConstexprData) {
+  {
+    constexpr StringPiece piece;
+    static_assert(piece.data() == nullptr, "");
+  }
+
+  {
+    constexpr StringPiece piece("abc");
+    static_assert(piece.data()[0] == 'a', "");
+    static_assert(piece.data()[1] == 'b', "");
+    static_assert(piece.data()[2] == 'c', "");
+  }
+
+  {
+    constexpr StringPiece piece("def", 2);
+    static_assert(piece.data()[0] == 'd', "");
+    static_assert(piece.data()[1] == 'e', "");
+  }
+}
+
+TEST(StringPieceTest, ConstexprSize) {
+  {
+    constexpr StringPiece piece;
+    static_assert(piece.size() == 0, "");
+  }
+
+  {
+    constexpr StringPiece piece("abc");
+    static_assert(piece.size() == 3, "");
+  }
+
+  {
+    constexpr StringPiece piece("def", 2);
+    static_assert(piece.size() == 2, "");
+  }
+}
+
+TEST(StringPieceTest, Compare) {
+  constexpr StringPiece piece = "def";
+
+  static_assert(piece.compare("ab") == 1, "");
+  static_assert(piece.compare("abc") == 1, "");
+  static_assert(piece.compare("abcd") == 1, "");
+  static_assert(piece.compare("de") == 1, "");
+  static_assert(piece.compare("def") == 0, "");
+  static_assert(piece.compare("defg") == -1, "");
+  static_assert(piece.compare("gh") == -1, "");
+  static_assert(piece.compare("ghi") == -1, "");
+  static_assert(piece.compare("ghij") == -1, "");
+}
+
+TEST(StringPieceTest, StartsWith) {
+  constexpr StringPiece piece("abc");
+
+  static_assert(piece.starts_with(""), "");
+  static_assert(piece.starts_with("a"), "");
+  static_assert(piece.starts_with("ab"), "");
+  static_assert(piece.starts_with("abc"), "");
+
+  static_assert(!piece.starts_with("b"), "");
+  static_assert(!piece.starts_with("bc"), "");
+
+  static_assert(!piece.starts_with("abcd"), "");
+}
+
+TEST(StringPieceTest, EndsWith) {
+  constexpr StringPiece piece("abc");
+
+  static_assert(piece.ends_with(""), "");
+  static_assert(piece.ends_with("c"), "");
+  static_assert(piece.ends_with("bc"), "");
+  static_assert(piece.ends_with("abc"), "");
+
+  static_assert(!piece.ends_with("a"), "");
+  static_assert(!piece.ends_with("ab"), "");
+
+  static_assert(!piece.ends_with("abcd"), "");
+}
+
+}  // namespace base
diff --git a/base/strings/string_split.cc b/base/strings/string_split.cc
new file mode 100644
index 0000000..a8180b2
--- /dev/null
+++ b/base/strings/string_split.cc
@@ -0,0 +1,268 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/strings/string_split.h"
+
+#include <stddef.h>
+
+#include "base/logging.h"
+#include "base/strings/string_util.h"
+#include "base/third_party/icu/icu_utf.h"
+
+namespace base {
+
+namespace {
+
+// PieceToOutputType converts a StringPiece as needed to a given output type,
+// which is either the same type of StringPiece (a NOP) or the corresponding
+// non-piece string type.
+//
+// The default converter is a NOP, it works when the OutputType is the
+// correct StringPiece.
+template<typename Str, typename OutputType>
+OutputType PieceToOutputType(BasicStringPiece<Str> piece) {
+  return piece;
+}
+template<>  // Convert StringPiece to std::string
+std::string PieceToOutputType<std::string, std::string>(StringPiece piece) {
+  return piece.as_string();
+}
+template<>  // Convert StringPiece16 to string16.
+string16 PieceToOutputType<string16, string16>(StringPiece16 piece) {
+  return piece.as_string();
+}
+
+// Returns either the ASCII or UTF-16 whitespace.
+template<typename Str> BasicStringPiece<Str> WhitespaceForType();
+template<> StringPiece16 WhitespaceForType<string16>() {
+  return kWhitespaceUTF16;
+}
+template<> StringPiece WhitespaceForType<std::string>() {
+  return kWhitespaceASCII;
+}
+
+// Optimize the single-character case to call find() on the string instead,
+// since this is the common case and can be made faster. This could have been
+// done with template specialization too, but would have been less clear.
+//
+// There is no corresponding FindFirstNotOf because StringPiece already
+// implements these different versions that do the optimized searching.
+size_t FindFirstOf(StringPiece piece, char c, size_t pos) {
+  return piece.find(c, pos);
+}
+size_t FindFirstOf(StringPiece16 piece, char16 c, size_t pos) {
+  return piece.find(c, pos);
+}
+size_t FindFirstOf(StringPiece piece, StringPiece one_of, size_t pos) {
+  return piece.find_first_of(one_of, pos);
+}
+size_t FindFirstOf(StringPiece16 piece, StringPiece16 one_of, size_t pos) {
+  return piece.find_first_of(one_of, pos);
+}
+
+// General string splitter template. Can take 8- or 16-bit input, can produce
+// the corresponding string or StringPiece output, and can take single- or
+// multiple-character delimiters.
+//
+// DelimiterType is either a character (Str::value_type) or a string piece of
+// multiple characters (BasicStringPiece<Str>). StringPiece has a version of
+// find for both of these cases, and the single-character version is the most
+// common and can be implemented faster, which is why this is a template.
+template<typename Str, typename OutputStringType, typename DelimiterType>
+static std::vector<OutputStringType> SplitStringT(
+    BasicStringPiece<Str> str,
+    DelimiterType delimiter,
+    WhitespaceHandling whitespace,
+    SplitResult result_type) {
+  std::vector<OutputStringType> result;
+  if (str.empty())
+    return result;
+
+  size_t start = 0;
+  while (start != Str::npos) {
+    size_t end = FindFirstOf(str, delimiter, start);
+
+    BasicStringPiece<Str> piece;
+    if (end == Str::npos) {
+      piece = str.substr(start);
+      start = Str::npos;
+    } else {
+      piece = str.substr(start, end - start);
+      start = end + 1;
+    }
+
+    if (whitespace == TRIM_WHITESPACE)
+      piece = TrimString(piece, WhitespaceForType<Str>(), TRIM_ALL);
+
+    if (result_type == SPLIT_WANT_ALL || !piece.empty())
+      result.push_back(PieceToOutputType<Str, OutputStringType>(piece));
+  }
+  return result;
+}
+
+bool AppendStringKeyValue(StringPiece input,
+                          char delimiter,
+                          StringPairs* result) {
+  // Always append a new item regardless of success (it might be empty). The
+  // below code will copy the strings directly into the result pair.
+  result->resize(result->size() + 1);
+  auto& result_pair = result->back();
+
+  // Find the delimiter.
+  size_t end_key_pos = input.find_first_of(delimiter);
+  if (end_key_pos == std::string::npos) {
+    DVLOG(1) << "cannot find delimiter in: " << input;
+    return false;    // No delimiter.
+  }
+  input.substr(0, end_key_pos).CopyToString(&result_pair.first);
+
+  // Find the value string.
+  StringPiece remains = input.substr(end_key_pos, input.size() - end_key_pos);
+  size_t begin_value_pos = remains.find_first_not_of(delimiter);
+  if (begin_value_pos == StringPiece::npos) {
+    DVLOG(1) << "cannot parse value from input: " << input;
+    return false;   // No value.
+  }
+  remains.substr(begin_value_pos, remains.size() - begin_value_pos)
+      .CopyToString(&result_pair.second);
+
+  return true;
+}
+
+template <typename Str, typename OutputStringType>
+void SplitStringUsingSubstrT(BasicStringPiece<Str> input,
+                             BasicStringPiece<Str> delimiter,
+                             WhitespaceHandling whitespace,
+                             SplitResult result_type,
+                             std::vector<OutputStringType>* result) {
+  using Piece = BasicStringPiece<Str>;
+  using size_type = typename Piece::size_type;
+
+  result->clear();
+  for (size_type begin_index = 0, end_index = 0; end_index != Piece::npos;
+       begin_index = end_index + delimiter.size()) {
+    end_index = input.find(delimiter, begin_index);
+    Piece term = end_index == Piece::npos
+                     ? input.substr(begin_index)
+                     : input.substr(begin_index, end_index - begin_index);
+
+    if (whitespace == TRIM_WHITESPACE)
+      term = TrimString(term, WhitespaceForType<Str>(), TRIM_ALL);
+
+    if (result_type == SPLIT_WANT_ALL || !term.empty())
+      result->push_back(PieceToOutputType<Str, OutputStringType>(term));
+  }
+}
+
+}  // namespace
+
+std::vector<std::string> SplitString(StringPiece input,
+                                     StringPiece separators,
+                                     WhitespaceHandling whitespace,
+                                     SplitResult result_type) {
+  if (separators.size() == 1) {
+    return SplitStringT<std::string, std::string, char>(
+        input, separators[0], whitespace, result_type);
+  }
+  return SplitStringT<std::string, std::string, StringPiece>(
+      input, separators, whitespace, result_type);
+}
+
+std::vector<string16> SplitString(StringPiece16 input,
+                                  StringPiece16 separators,
+                                  WhitespaceHandling whitespace,
+                                  SplitResult result_type) {
+  if (separators.size() == 1) {
+    return SplitStringT<string16, string16, char16>(
+        input, separators[0], whitespace, result_type);
+  }
+  return SplitStringT<string16, string16, StringPiece16>(
+      input, separators, whitespace, result_type);
+}
+
+std::vector<StringPiece> SplitStringPiece(StringPiece input,
+                                          StringPiece separators,
+                                          WhitespaceHandling whitespace,
+                                          SplitResult result_type) {
+  if (separators.size() == 1) {
+    return SplitStringT<std::string, StringPiece, char>(
+        input, separators[0], whitespace, result_type);
+  }
+  return SplitStringT<std::string, StringPiece, StringPiece>(
+      input, separators, whitespace, result_type);
+}
+
+std::vector<StringPiece16> SplitStringPiece(StringPiece16 input,
+                                            StringPiece16 separators,
+                                            WhitespaceHandling whitespace,
+                                            SplitResult result_type) {
+  if (separators.size() == 1) {
+    return SplitStringT<string16, StringPiece16, char16>(
+        input, separators[0], whitespace, result_type);
+  }
+  return SplitStringT<string16, StringPiece16, StringPiece16>(
+      input, separators, whitespace, result_type);
+}
+
+bool SplitStringIntoKeyValuePairs(StringPiece input,
+                                  char key_value_delimiter,
+                                  char key_value_pair_delimiter,
+                                  StringPairs* key_value_pairs) {
+  key_value_pairs->clear();
+
+  std::vector<StringPiece> pairs = SplitStringPiece(
+      input, std::string(1, key_value_pair_delimiter),
+      TRIM_WHITESPACE, SPLIT_WANT_NONEMPTY);
+  key_value_pairs->reserve(pairs.size());
+
+  bool success = true;
+  for (const StringPiece& pair : pairs) {
+    if (!AppendStringKeyValue(pair, key_value_delimiter, key_value_pairs)) {
+      // Don't return here, to allow for pairs without associated
+      // value or key; just record that the split failed.
+      success = false;
+    }
+  }
+  return success;
+}
+
+std::vector<string16> SplitStringUsingSubstr(StringPiece16 input,
+                                             StringPiece16 delimiter,
+                                             WhitespaceHandling whitespace,
+                                             SplitResult result_type) {
+  std::vector<string16> result;
+  SplitStringUsingSubstrT(input, delimiter, whitespace, result_type, &result);
+  return result;
+}
+
+std::vector<std::string> SplitStringUsingSubstr(StringPiece input,
+                                                StringPiece delimiter,
+                                                WhitespaceHandling whitespace,
+                                                SplitResult result_type) {
+  std::vector<std::string> result;
+  SplitStringUsingSubstrT(input, delimiter, whitespace, result_type, &result);
+  return result;
+}
+
+std::vector<StringPiece16> SplitStringPieceUsingSubstr(
+    StringPiece16 input,
+    StringPiece16 delimiter,
+    WhitespaceHandling whitespace,
+    SplitResult result_type) {
+  std::vector<StringPiece16> result;
+  SplitStringUsingSubstrT(input, delimiter, whitespace, result_type, &result);
+  return result;
+}
+
+std::vector<StringPiece> SplitStringPieceUsingSubstr(
+    StringPiece input,
+    StringPiece delimiter,
+    WhitespaceHandling whitespace,
+    SplitResult result_type) {
+  std::vector<StringPiece> result;
+  SplitStringUsingSubstrT(input, delimiter, whitespace, result_type, &result);
+  return result;
+}
+
+}  // namespace base
diff --git a/base/strings/string_split.h b/base/strings/string_split.h
new file mode 100644
index 0000000..24b9dfa
--- /dev/null
+++ b/base/strings/string_split.h
@@ -0,0 +1,129 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_STRINGS_STRING_SPLIT_H_
+#define BASE_STRINGS_STRING_SPLIT_H_
+
+#include <string>
+#include <utility>
+#include <vector>
+
+#include "base/base_export.h"
+#include "base/strings/string16.h"
+#include "base/strings/string_piece.h"
+
+namespace base {
+
+enum WhitespaceHandling {
+  KEEP_WHITESPACE,
+  TRIM_WHITESPACE,
+};
+
+enum SplitResult {
+  // Strictly return all results.
+  //
+  // If the input is ",," and the separator is ',' this will return a
+  // vector of three empty strings.
+  SPLIT_WANT_ALL,
+
+  // Only nonempty results will be added to the results. Multiple separators
+  // will be coalesced. Separators at the beginning and end of the input will
+  // be ignored. With TRIM_WHITESPACE, whitespace-only results will be dropped.
+  //
+  // If the input is ",," and the separator is ',', this will return an empty
+  // vector.
+  SPLIT_WANT_NONEMPTY,
+};
+
+// Split the given string on ANY of the given separators, returning copies of
+// the result.
+//
+// To split on either commas or semicolons, keeping all whitespace:
+//
+//   std::vector<std::string> tokens = base::SplitString(
+//       input, ",;", base::KEEP_WHITESPACE, base::SPLIT_WANT_ALL);
+BASE_EXPORT std::vector<std::string> SplitString(
+    StringPiece input,
+    StringPiece separators,
+    WhitespaceHandling whitespace,
+    SplitResult result_type);
+BASE_EXPORT std::vector<string16> SplitString(
+    StringPiece16 input,
+    StringPiece16 separators,
+    WhitespaceHandling whitespace,
+    SplitResult result_type);
+
+// Like SplitString above except it returns a vector of StringPieces which
+// reference the original buffer without copying. Although you have to be
+// careful to keep the original string unmodified, this provides an efficient
+// way to iterate through tokens in a string.
+//
+// To iterate through all whitespace-separated tokens in an input string:
+//
+//   for (const auto& cur :
+//        base::SplitStringPiece(input, base::kWhitespaceASCII,
+//                               base::KEEP_WHITESPACE,
+//                               base::SPLIT_WANT_NONEMPTY)) {
+//     ...
+BASE_EXPORT std::vector<StringPiece> SplitStringPiece(
+    StringPiece input,
+    StringPiece separators,
+    WhitespaceHandling whitespace,
+    SplitResult result_type);
+BASE_EXPORT std::vector<StringPiece16> SplitStringPiece(
+    StringPiece16 input,
+    StringPiece16 separators,
+    WhitespaceHandling whitespace,
+    SplitResult result_type);
+
+using StringPairs = std::vector<std::pair<std::string, std::string>>;
+
+// Splits |line| into key value pairs according to the given delimiters and
+// removes whitespace leading each key and trailing each value. Returns true
+// only if each pair has a non-empty key and value. |key_value_pairs| will
+// include ("","") pairs for entries without |key_value_delimiter|.
+BASE_EXPORT bool SplitStringIntoKeyValuePairs(StringPiece input,
+                                              char key_value_delimiter,
+                                              char key_value_pair_delimiter,
+                                              StringPairs* key_value_pairs);
+
+// Similar to SplitString, but use a substring delimiter instead of a list of
+// characters that are all possible delimiters.
+BASE_EXPORT std::vector<string16> SplitStringUsingSubstr(
+    StringPiece16 input,
+    StringPiece16 delimiter,
+    WhitespaceHandling whitespace,
+    SplitResult result_type);
+BASE_EXPORT std::vector<std::string> SplitStringUsingSubstr(
+    StringPiece input,
+    StringPiece delimiter,
+    WhitespaceHandling whitespace,
+    SplitResult result_type);
+
+// Like SplitStringUsingSubstr above except it returns a vector of StringPieces
+// which reference the original buffer without copying. Although you have to be
+// careful to keep the original string unmodified, this provides an efficient
+// way to iterate through tokens in a string.
+//
+// To iterate through all newline-separated tokens in an input string:
+//
+//   for (const auto& cur :
+//        base::SplitStringUsingSubstr(input, "\r\n",
+//                                     base::KEEP_WHITESPACE,
+//                                     base::SPLIT_WANT_NONEMPTY)) {
+//     ...
+BASE_EXPORT std::vector<StringPiece16> SplitStringPieceUsingSubstr(
+    StringPiece16 input,
+    StringPiece16 delimiter,
+    WhitespaceHandling whitespace,
+    SplitResult result_type);
+BASE_EXPORT std::vector<StringPiece> SplitStringPieceUsingSubstr(
+    StringPiece input,
+    StringPiece delimiter,
+    WhitespaceHandling whitespace,
+    SplitResult result_type);
+
+}  // namespace base
+
+#endif  // BASE_STRINGS_STRING_SPLIT_H_
diff --git a/base/strings/string_split_unittest.cc b/base/strings/string_split_unittest.cc
new file mode 100644
index 0000000..bf09aa5
--- /dev/null
+++ b/base/strings/string_split_unittest.cc
@@ -0,0 +1,386 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/strings/string_split.h"
+
+#include <stddef.h>
+
+#include "base/macros.h"
+#include "base/strings/string_util.h"
+#include "base/strings/utf_string_conversions.h"
+#include "testing/gmock/include/gmock/gmock.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+using ::testing::ElementsAre;
+
+namespace base {
+
+class SplitStringIntoKeyValuePairsTest : public testing::Test {
+ protected:
+  base::StringPairs kv_pairs;
+};
+
+TEST_F(SplitStringIntoKeyValuePairsTest, EmptyString) {
+  EXPECT_TRUE(SplitStringIntoKeyValuePairs(std::string(),
+                                           ':',  // Key-value delimiter
+                                           ',',  // Key-value pair delimiter
+                                           &kv_pairs));
+  EXPECT_TRUE(kv_pairs.empty());
+}
+
+TEST_F(SplitStringIntoKeyValuePairsTest, MissingKeyValueDelimiter) {
+  EXPECT_FALSE(SplitStringIntoKeyValuePairs("key1,key2:value2",
+                                            ':',  // Key-value delimiter
+                                            ',',  // Key-value pair delimiter
+                                            &kv_pairs));
+  ASSERT_EQ(2U, kv_pairs.size());
+  EXPECT_TRUE(kv_pairs[0].first.empty());
+  EXPECT_TRUE(kv_pairs[0].second.empty());
+  EXPECT_EQ("key2", kv_pairs[1].first);
+  EXPECT_EQ("value2", kv_pairs[1].second);
+}
+
+TEST_F(SplitStringIntoKeyValuePairsTest, EmptyKeyWithKeyValueDelimiter) {
+  EXPECT_TRUE(SplitStringIntoKeyValuePairs(":value1,key2:value2",
+                                           ':',  // Key-value delimiter
+                                           ',',  // Key-value pair delimiter
+                                           &kv_pairs));
+  ASSERT_EQ(2U, kv_pairs.size());
+  EXPECT_TRUE(kv_pairs[0].first.empty());
+  EXPECT_EQ("value1", kv_pairs[0].second);
+  EXPECT_EQ("key2", kv_pairs[1].first);
+  EXPECT_EQ("value2", kv_pairs[1].second);
+}
+
+TEST_F(SplitStringIntoKeyValuePairsTest, TrailingAndLeadingPairDelimiter) {
+  EXPECT_TRUE(SplitStringIntoKeyValuePairs(",key1:value1,key2:value2,",
+                                           ':',   // Key-value delimiter
+                                           ',',   // Key-value pair delimiter
+                                           &kv_pairs));
+  ASSERT_EQ(2U, kv_pairs.size());
+  EXPECT_EQ("key1", kv_pairs[0].first);
+  EXPECT_EQ("value1", kv_pairs[0].second);
+  EXPECT_EQ("key2", kv_pairs[1].first);
+  EXPECT_EQ("value2", kv_pairs[1].second);
+}
+
+TEST_F(SplitStringIntoKeyValuePairsTest, EmptyPair) {
+  EXPECT_TRUE(SplitStringIntoKeyValuePairs("key1:value1,,key3:value3",
+                                           ':',   // Key-value delimiter
+                                           ',',   // Key-value pair delimiter
+                                           &kv_pairs));
+  ASSERT_EQ(2U, kv_pairs.size());
+  EXPECT_EQ("key1", kv_pairs[0].first);
+  EXPECT_EQ("value1", kv_pairs[0].second);
+  EXPECT_EQ("key3", kv_pairs[1].first);
+  EXPECT_EQ("value3", kv_pairs[1].second);
+}
+
+TEST_F(SplitStringIntoKeyValuePairsTest, EmptyValue) {
+  EXPECT_FALSE(SplitStringIntoKeyValuePairs("key1:,key2:value2",
+                                            ':',   // Key-value delimiter
+                                            ',',   // Key-value pair delimiter
+                                            &kv_pairs));
+  ASSERT_EQ(2U, kv_pairs.size());
+  EXPECT_EQ("key1", kv_pairs[0].first);
+  EXPECT_EQ("", kv_pairs[0].second);
+  EXPECT_EQ("key2", kv_pairs[1].first);
+  EXPECT_EQ("value2", kv_pairs[1].second);
+}
+
+TEST_F(SplitStringIntoKeyValuePairsTest, UntrimmedWhitespace) {
+  EXPECT_TRUE(SplitStringIntoKeyValuePairs("key1 : value1",
+                                           ':',  // Key-value delimiter
+                                           ',',  // Key-value pair delimiter
+                                           &kv_pairs));
+  ASSERT_EQ(1U, kv_pairs.size());
+  EXPECT_EQ("key1 ", kv_pairs[0].first);
+  EXPECT_EQ(" value1", kv_pairs[0].second);
+}
+
+TEST_F(SplitStringIntoKeyValuePairsTest, TrimmedWhitespace) {
+  EXPECT_TRUE(SplitStringIntoKeyValuePairs("key1:value1 , key2:value2",
+                                           ':',   // Key-value delimiter
+                                           ',',   // Key-value pair delimiter
+                                           &kv_pairs));
+  ASSERT_EQ(2U, kv_pairs.size());
+  EXPECT_EQ("key1", kv_pairs[0].first);
+  EXPECT_EQ("value1", kv_pairs[0].second);
+  EXPECT_EQ("key2", kv_pairs[1].first);
+  EXPECT_EQ("value2", kv_pairs[1].second);
+}
+
+TEST_F(SplitStringIntoKeyValuePairsTest, MultipleKeyValueDelimiters) {
+  EXPECT_TRUE(SplitStringIntoKeyValuePairs("key1:::value1,key2:value2",
+                                           ':',   // Key-value delimiter
+                                           ',',   // Key-value pair delimiter
+                                           &kv_pairs));
+  ASSERT_EQ(2U, kv_pairs.size());
+  EXPECT_EQ("key1", kv_pairs[0].first);
+  EXPECT_EQ("value1", kv_pairs[0].second);
+  EXPECT_EQ("key2", kv_pairs[1].first);
+  EXPECT_EQ("value2", kv_pairs[1].second);
+}
+
+TEST_F(SplitStringIntoKeyValuePairsTest, OnlySplitAtGivenSeparator) {
+  std::string a("a ?!@#$%^&*()_+:/{}\\\t\nb");
+  EXPECT_TRUE(SplitStringIntoKeyValuePairs(a + "X" + a + "Y" + a + "X" + a,
+                                           'X',  // Key-value delimiter
+                                           'Y',  // Key-value pair delimiter
+                                           &kv_pairs));
+  ASSERT_EQ(2U, kv_pairs.size());
+  EXPECT_EQ(a, kv_pairs[0].first);
+  EXPECT_EQ(a, kv_pairs[0].second);
+  EXPECT_EQ(a, kv_pairs[1].first);
+  EXPECT_EQ(a, kv_pairs[1].second);
+}
+
+
+TEST_F(SplitStringIntoKeyValuePairsTest, DelimiterInValue) {
+  EXPECT_TRUE(SplitStringIntoKeyValuePairs("key1:va:ue1,key2:value2",
+                                           ':',   // Key-value delimiter
+                                           ',',   // Key-value pair delimiter
+                                           &kv_pairs));
+  ASSERT_EQ(2U, kv_pairs.size());
+  EXPECT_EQ("key1", kv_pairs[0].first);
+  EXPECT_EQ("va:ue1", kv_pairs[0].second);
+  EXPECT_EQ("key2", kv_pairs[1].first);
+  EXPECT_EQ("value2", kv_pairs[1].second);
+}
+
+TEST(SplitStringUsingSubstrTest, EmptyString) {
+  std::vector<std::string> results = SplitStringUsingSubstr(
+      std::string(), "DELIMITER", TRIM_WHITESPACE, SPLIT_WANT_ALL);
+  ASSERT_EQ(1u, results.size());
+  EXPECT_THAT(results, ElementsAre(""));
+}
+
+TEST(StringUtilTest, SplitString_Basics) {
+  std::vector<std::string> r;
+
+  r = SplitString(std::string(), ",:;", KEEP_WHITESPACE, SPLIT_WANT_ALL);
+  EXPECT_TRUE(r.empty());
+
+  // Empty separator list
+  r = SplitString("hello, world", "", KEEP_WHITESPACE, SPLIT_WANT_ALL);
+  ASSERT_EQ(1u, r.size());
+  EXPECT_EQ("hello, world", r[0]);
+
+  // Should split on any of the separators.
+  r = SplitString("::,,;;", ",:;", KEEP_WHITESPACE, SPLIT_WANT_ALL);
+  ASSERT_EQ(7u, r.size());
+  for (auto str : r)
+    ASSERT_TRUE(str.empty());
+
+  r = SplitString("red, green; blue:", ",:;", TRIM_WHITESPACE,
+                  SPLIT_WANT_NONEMPTY);
+  ASSERT_EQ(3u, r.size());
+  EXPECT_EQ("red", r[0]);
+  EXPECT_EQ("green", r[1]);
+  EXPECT_EQ("blue", r[2]);
+
+  // Want to split a string along whitespace sequences.
+  r = SplitString("  red green   \tblue\n", " \t\n", TRIM_WHITESPACE,
+                  SPLIT_WANT_NONEMPTY);
+  ASSERT_EQ(3u, r.size());
+  EXPECT_EQ("red", r[0]);
+  EXPECT_EQ("green", r[1]);
+  EXPECT_EQ("blue", r[2]);
+
+  // Weird case of splitting on spaces but not trimming.
+  r = SplitString(" red ", " ", TRIM_WHITESPACE, SPLIT_WANT_ALL);
+  ASSERT_EQ(3u, r.size());
+  EXPECT_EQ("", r[0]);  // Before the first space.
+  EXPECT_EQ("red", r[1]);
+  EXPECT_EQ("", r[2]);  // After the last space.
+}
+
+TEST(StringUtilTest, SplitString_WhitespaceAndResultType) {
+  std::vector<std::string> r;
+
+  // Empty input handling.
+  r = SplitString(std::string(), ",", KEEP_WHITESPACE, SPLIT_WANT_ALL);
+  EXPECT_TRUE(r.empty());
+  r = SplitString(std::string(), ",", KEEP_WHITESPACE, SPLIT_WANT_NONEMPTY);
+  EXPECT_TRUE(r.empty());
+
+  // Input string is space and we're trimming.
+  r = SplitString(" ", ",", TRIM_WHITESPACE, SPLIT_WANT_ALL);
+  ASSERT_EQ(1u, r.size());
+  EXPECT_EQ("", r[0]);
+  r = SplitString(" ", ",", TRIM_WHITESPACE, SPLIT_WANT_NONEMPTY);
+  EXPECT_TRUE(r.empty());
+
+  // Test all 4 combinations of flags on ", ,".
+  r = SplitString(", ,", ",", KEEP_WHITESPACE, SPLIT_WANT_ALL);
+  ASSERT_EQ(3u, r.size());
+  EXPECT_EQ("", r[0]);
+  EXPECT_EQ(" ", r[1]);
+  EXPECT_EQ("", r[2]);
+  r = SplitString(", ,", ",", KEEP_WHITESPACE, SPLIT_WANT_NONEMPTY);
+  ASSERT_EQ(1u, r.size());
+  ASSERT_EQ(" ", r[0]);
+  r = SplitString(", ,", ",", TRIM_WHITESPACE, SPLIT_WANT_ALL);
+  ASSERT_EQ(3u, r.size());
+  EXPECT_EQ("", r[0]);
+  EXPECT_EQ("", r[1]);
+  EXPECT_EQ("", r[2]);
+  r = SplitString(", ,", ",", TRIM_WHITESPACE, SPLIT_WANT_NONEMPTY);
+  ASSERT_TRUE(r.empty());
+}
+
+TEST(SplitStringUsingSubstrTest, StringWithNoDelimiter) {
+  std::vector<std::string> results = SplitStringUsingSubstr(
+      "alongwordwithnodelimiter", "DELIMITER", TRIM_WHITESPACE,
+      SPLIT_WANT_ALL);
+  ASSERT_EQ(1u, results.size());
+  EXPECT_THAT(results, ElementsAre("alongwordwithnodelimiter"));
+}
+
+TEST(SplitStringUsingSubstrTest, LeadingDelimitersSkipped) {
+  std::vector<std::string> results = SplitStringUsingSubstr(
+      "DELIMITERDELIMITERDELIMITERoneDELIMITERtwoDELIMITERthree",
+      "DELIMITER", TRIM_WHITESPACE, SPLIT_WANT_ALL);
+  ASSERT_EQ(6u, results.size());
+  EXPECT_THAT(results, ElementsAre("", "", "", "one", "two", "three"));
+}
+
+TEST(SplitStringUsingSubstrTest, ConsecutiveDelimitersSkipped) {
+  std::vector<std::string> results = SplitStringUsingSubstr(
+      "unoDELIMITERDELIMITERDELIMITERdosDELIMITERtresDELIMITERDELIMITERcuatro",
+      "DELIMITER", TRIM_WHITESPACE, SPLIT_WANT_ALL);
+  ASSERT_EQ(7u, results.size());
+  EXPECT_THAT(results, ElementsAre("uno", "", "", "dos", "tres", "", "cuatro"));
+}
+
+TEST(SplitStringUsingSubstrTest, TrailingDelimitersSkipped) {
+  std::vector<std::string> results = SplitStringUsingSubstr(
+      "unDELIMITERdeuxDELIMITERtroisDELIMITERquatreDELIMITERDELIMITERDELIMITER",
+      "DELIMITER", TRIM_WHITESPACE, SPLIT_WANT_ALL);
+  ASSERT_EQ(7u, results.size());
+  EXPECT_THAT(
+      results, ElementsAre("un", "deux", "trois", "quatre", "", "", ""));
+}
+
+TEST(SplitStringPieceUsingSubstrTest, StringWithNoDelimiter) {
+  std::vector<base::StringPiece> results =
+      SplitStringPieceUsingSubstr("alongwordwithnodelimiter", "DELIMITER",
+                                  base::TRIM_WHITESPACE, base::SPLIT_WANT_ALL);
+  ASSERT_EQ(1u, results.size());
+  EXPECT_THAT(results, ElementsAre("alongwordwithnodelimiter"));
+}
+
+TEST(SplitStringPieceUsingSubstrTest, LeadingDelimitersSkipped) {
+  std::vector<base::StringPiece> results = SplitStringPieceUsingSubstr(
+      "DELIMITERDELIMITERDELIMITERoneDELIMITERtwoDELIMITERthree", "DELIMITER",
+      base::TRIM_WHITESPACE, base::SPLIT_WANT_ALL);
+  ASSERT_EQ(6u, results.size());
+  EXPECT_THAT(results, ElementsAre("", "", "", "one", "two", "three"));
+}
+
+TEST(SplitStringPieceUsingSubstrTest, ConsecutiveDelimitersSkipped) {
+  std::vector<base::StringPiece> results = SplitStringPieceUsingSubstr(
+      "unoDELIMITERDELIMITERDELIMITERdosDELIMITERtresDELIMITERDELIMITERcuatro",
+      "DELIMITER", base::TRIM_WHITESPACE, base::SPLIT_WANT_ALL);
+  ASSERT_EQ(7u, results.size());
+  EXPECT_THAT(results, ElementsAre("uno", "", "", "dos", "tres", "", "cuatro"));
+}
+
+TEST(SplitStringPieceUsingSubstrTest, TrailingDelimitersSkipped) {
+  std::vector<base::StringPiece> results = SplitStringPieceUsingSubstr(
+      "unDELIMITERdeuxDELIMITERtroisDELIMITERquatreDELIMITERDELIMITERDELIMITER",
+      "DELIMITER", base::TRIM_WHITESPACE, base::SPLIT_WANT_ALL);
+  ASSERT_EQ(7u, results.size());
+  EXPECT_THAT(results,
+              ElementsAre("un", "deux", "trois", "quatre", "", "", ""));
+}
+
+TEST(SplitStringPieceUsingSubstrTest, KeepWhitespace) {
+  std::vector<base::StringPiece> results = SplitStringPieceUsingSubstr(
+      "un DELIMITERdeux\tDELIMITERtrois\nDELIMITERquatre", "DELIMITER",
+      base::KEEP_WHITESPACE, base::SPLIT_WANT_ALL);
+  ASSERT_EQ(4u, results.size());
+  EXPECT_THAT(results, ElementsAre("un ", "deux\t", "trois\n", "quatre"));
+}
+
+TEST(SplitStringPieceUsingSubstrTest, TrimWhitespace) {
+  std::vector<base::StringPiece> results = SplitStringPieceUsingSubstr(
+      "un DELIMITERdeux\tDELIMITERtrois\nDELIMITERquatre", "DELIMITER",
+      base::TRIM_WHITESPACE, base::SPLIT_WANT_ALL);
+  ASSERT_EQ(4u, results.size());
+  EXPECT_THAT(results, ElementsAre("un", "deux", "trois", "quatre"));
+}
+
+TEST(SplitStringPieceUsingSubstrTest, SplitWantAll) {
+  std::vector<base::StringPiece> results = SplitStringPieceUsingSubstr(
+      "unDELIMITERdeuxDELIMITERtroisDELIMITERDELIMITER", "DELIMITER",
+      base::TRIM_WHITESPACE, base::SPLIT_WANT_ALL);
+  ASSERT_EQ(5u, results.size());
+  EXPECT_THAT(results, ElementsAre("un", "deux", "trois", "", ""));
+}
+
+TEST(SplitStringPieceUsingSubstrTest, SplitWantNonEmpty) {
+  std::vector<base::StringPiece> results = SplitStringPieceUsingSubstr(
+      "unDELIMITERdeuxDELIMITERtroisDELIMITERDELIMITER", "DELIMITER",
+      base::TRIM_WHITESPACE, base::SPLIT_WANT_NONEMPTY);
+  ASSERT_EQ(3u, results.size());
+  EXPECT_THAT(results, ElementsAre("un", "deux", "trois"));
+}
+
+TEST(StringSplitTest, StringSplitKeepWhitespace) {
+  std::vector<std::string> r;
+
+  r = SplitString("   ", "*", base::KEEP_WHITESPACE, base::SPLIT_WANT_ALL);
+  ASSERT_EQ(1U, r.size());
+  EXPECT_EQ(r[0], "   ");
+
+  r = SplitString("\t  \ta\t ", "\t", base::KEEP_WHITESPACE,
+                  base::SPLIT_WANT_ALL);
+  ASSERT_EQ(4U, r.size());
+  EXPECT_EQ(r[0], "");
+  EXPECT_EQ(r[1], "  ");
+  EXPECT_EQ(r[2], "a");
+  EXPECT_EQ(r[3], " ");
+
+  r = SplitString("\ta\t\nb\tcc", "\n", base::KEEP_WHITESPACE,
+                  base::SPLIT_WANT_ALL);
+  ASSERT_EQ(2U, r.size());
+  EXPECT_EQ(r[0], "\ta\t");
+  EXPECT_EQ(r[1], "b\tcc");
+}
+
+TEST(StringSplitTest, SplitStringAlongWhitespace) {
+  struct TestData {
+    const char* input;
+    const size_t expected_result_count;
+    const char* output1;
+    const char* output2;
+  } data[] = {
+    { "a",       1, "a",  ""   },
+    { " ",       0, "",   ""   },
+    { " a",      1, "a",  ""   },
+    { " ab ",    1, "ab", ""   },
+    { " ab c",   2, "ab", "c"  },
+    { " ab c ",  2, "ab", "c"  },
+    { " ab cd",  2, "ab", "cd" },
+    { " ab cd ", 2, "ab", "cd" },
+    { " \ta\t",  1, "a",  ""   },
+    { " b\ta\t", 2, "b",  "a"  },
+    { " b\tat",  2, "b",  "at" },
+    { "b\tat",   2, "b",  "at" },
+    { "b\t at",  2, "b",  "at" },
+  };
+  for (size_t i = 0; i < arraysize(data); ++i) {
+    std::vector<std::string> results = base::SplitString(
+        data[i].input, kWhitespaceASCII, base::KEEP_WHITESPACE,
+        base::SPLIT_WANT_NONEMPTY);
+    ASSERT_EQ(data[i].expected_result_count, results.size());
+    if (data[i].expected_result_count > 0)
+      ASSERT_EQ(data[i].output1, results[0]);
+    if (data[i].expected_result_count > 1)
+      ASSERT_EQ(data[i].output2, results[1]);
+  }
+}
+
+}  // namespace base
diff --git a/base/strings/string_tokenizer.h b/base/strings/string_tokenizer.h
new file mode 100644
index 0000000..72fc016
--- /dev/null
+++ b/base/strings/string_tokenizer.h
@@ -0,0 +1,258 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_STRINGS_STRING_TOKENIZER_H_
+#define BASE_STRINGS_STRING_TOKENIZER_H_
+
+#include <algorithm>
+#include <string>
+
+#include "base/strings/string_piece.h"
+
+namespace base {
+
+// StringTokenizerT is a simple string tokenizer class.  It works like an
+// iterator that with each step (see the Advance method) updates members that
+// refer to the next token in the input string.  The user may optionally
+// configure the tokenizer to return delimiters.
+//
+// EXAMPLE 1:
+//
+//   char input[] = "this is a test";
+//   CStringTokenizer t(input, input + strlen(input), " ");
+//   while (t.GetNext()) {
+//     printf("%s\n", t.token().c_str());
+//   }
+//
+// Output:
+//
+//   this
+//   is
+//   a
+//   test
+//
+//
+// EXAMPLE 2:
+//
+//   std::string input = "no-cache=\"foo, bar\", private";
+//   StringTokenizer t(input, ", ");
+//   t.set_quote_chars("\"");
+//   while (t.GetNext()) {
+//     printf("%s\n", t.token().c_str());
+//   }
+//
+// Output:
+//
+//   no-cache="foo, bar"
+//   private
+//
+//
+// EXAMPLE 3:
+//
+//   bool next_is_option = false, next_is_value = false;
+//   std::string input = "text/html; charset=UTF-8; foo=bar";
+//   StringTokenizer t(input, "; =");
+//   t.set_options(StringTokenizer::RETURN_DELIMS);
+//   while (t.GetNext()) {
+//     if (t.token_is_delim()) {
+//       switch (*t.token_begin()) {
+//         case ';':
+//           next_is_option = true;
+//           break;
+//         case '=':
+//           next_is_value = true;
+//           break;
+//       }
+//     } else {
+//       const char* label;
+//       if (next_is_option) {
+//         label = "option-name";
+//         next_is_option = false;
+//       } else if (next_is_value) {
+//         label = "option-value";
+//         next_is_value = false;
+//       } else {
+//         label = "mime-type";
+//       }
+//       printf("%s: %s\n", label, t.token().c_str());
+//     }
+//   }
+//
+//
+template <class str, class const_iterator>
+class StringTokenizerT {
+ public:
+  typedef typename str::value_type char_type;
+
+  // Options that may be pass to set_options()
+  enum {
+    // Specifies the delimiters should be returned as tokens
+    RETURN_DELIMS = 1 << 0,
+  };
+
+  // The string object must live longer than the tokenizer. In particular, this
+  // should not be constructed with a temporary. The deleted rvalue constructor
+  // blocks the most obvious instances of this (e.g. passing a string literal to
+  // the constructor), but caution must still be exercised.
+  StringTokenizerT(const str& string,
+                   const str& delims) {
+    Init(string.begin(), string.end(), delims);
+  }
+
+  // Don't allow temporary strings to be used with string tokenizer, since
+  // Init() would otherwise save iterators to a temporary string.
+  StringTokenizerT(str&&, const str& delims) = delete;
+
+  StringTokenizerT(const_iterator string_begin,
+                   const_iterator string_end,
+                   const str& delims) {
+    Init(string_begin, string_end, delims);
+  }
+
+  // Set the options for this tokenizer.  By default, this is 0.
+  void set_options(int options) { options_ = options; }
+
+  // Set the characters to regard as quotes.  By default, this is empty.  When
+  // a quote char is encountered, the tokenizer will switch into a mode where
+  // it ignores delimiters that it finds.  It switches out of this mode once it
+  // finds another instance of the quote char.  If a backslash is encountered
+  // within a quoted string, then the next character is skipped.
+  void set_quote_chars(const str& quotes) { quotes_ = quotes; }
+
+  // Call this method to advance the tokenizer to the next delimiter.  This
+  // returns false if the tokenizer is complete.  This method must be called
+  // before calling any of the token* methods.
+  bool GetNext() {
+    if (quotes_.empty() && options_ == 0)
+      return QuickGetNext();
+    else
+      return FullGetNext();
+  }
+
+  // Start iterating through tokens from the beginning of the string.
+  void Reset() {
+    token_end_ = start_pos_;
+  }
+
+  // Returns true if token is a delimiter.  When the tokenizer is constructed
+  // with the RETURN_DELIMS option, this method can be used to check if the
+  // returned token is actually a delimiter.
+  bool token_is_delim() const { return token_is_delim_; }
+
+  // If GetNext() returned true, then these methods may be used to read the
+  // value of the token.
+  const_iterator token_begin() const { return token_begin_; }
+  const_iterator token_end() const { return token_end_; }
+  str token() const { return str(token_begin_, token_end_); }
+  BasicStringPiece<str> token_piece() const {
+    return BasicStringPiece<str>(&*token_begin_,
+                                 std::distance(token_begin_, token_end_));
+  }
+
+ private:
+  void Init(const_iterator string_begin,
+            const_iterator string_end,
+            const str& delims) {
+    start_pos_ = string_begin;
+    token_begin_ = string_begin;
+    token_end_ = string_begin;
+    end_ = string_end;
+    delims_ = delims;
+    options_ = 0;
+    token_is_delim_ = false;
+  }
+
+  // Implementation of GetNext() for when we have no quote characters. We have
+  // two separate implementations because AdvanceOne() is a hot spot in large
+  // text files with large tokens.
+  bool QuickGetNext() {
+    token_is_delim_ = false;
+    for (;;) {
+      token_begin_ = token_end_;
+      if (token_end_ == end_)
+        return false;
+      ++token_end_;
+      if (delims_.find(*token_begin_) == str::npos)
+        break;
+      // else skip over delimiter.
+    }
+    while (token_end_ != end_ && delims_.find(*token_end_) == str::npos)
+      ++token_end_;
+    return true;
+  }
+
+  // Implementation of GetNext() for when we have to take quotes into account.
+  bool FullGetNext() {
+    AdvanceState state;
+    token_is_delim_ = false;
+    for (;;) {
+      token_begin_ = token_end_;
+      if (token_end_ == end_)
+        return false;
+      ++token_end_;
+      if (AdvanceOne(&state, *token_begin_))
+        break;
+      if (options_ & RETURN_DELIMS) {
+        token_is_delim_ = true;
+        return true;
+      }
+      // else skip over delimiter.
+    }
+    while (token_end_ != end_ && AdvanceOne(&state, *token_end_))
+      ++token_end_;
+    return true;
+  }
+
+  bool IsDelim(char_type c) const {
+    return delims_.find(c) != str::npos;
+  }
+
+  bool IsQuote(char_type c) const {
+    return quotes_.find(c) != str::npos;
+  }
+
+  struct AdvanceState {
+    bool in_quote;
+    bool in_escape;
+    char_type quote_char;
+    AdvanceState() : in_quote(false), in_escape(false), quote_char('\0') {}
+  };
+
+  // Returns true if a delimiter was not hit.
+  bool AdvanceOne(AdvanceState* state, char_type c) {
+    if (state->in_quote) {
+      if (state->in_escape) {
+        state->in_escape = false;
+      } else if (c == '\\') {
+        state->in_escape = true;
+      } else if (c == state->quote_char) {
+        state->in_quote = false;
+      }
+    } else {
+      if (IsDelim(c))
+        return false;
+      state->in_quote = IsQuote(state->quote_char = c);
+    }
+    return true;
+  }
+
+  const_iterator start_pos_;
+  const_iterator token_begin_;
+  const_iterator token_end_;
+  const_iterator end_;
+  str delims_;
+  str quotes_;
+  int options_;
+  bool token_is_delim_;
+};
+
+typedef StringTokenizerT<std::string, std::string::const_iterator>
+    StringTokenizer;
+typedef StringTokenizerT<std::wstring, std::wstring::const_iterator>
+    WStringTokenizer;
+typedef StringTokenizerT<std::string, const char*> CStringTokenizer;
+
+}  // namespace base
+
+#endif  // BASE_STRINGS_STRING_TOKENIZER_H_
diff --git a/base/strings/string_tokenizer_fuzzer.cc b/base/strings/string_tokenizer_fuzzer.cc
new file mode 100644
index 0000000..917041b
--- /dev/null
+++ b/base/strings/string_tokenizer_fuzzer.cc
@@ -0,0 +1,56 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <string>
+
+#include "base/strings/string_tokenizer.h"
+
+void GetAllTokens(base::StringTokenizer& t) {
+  while (t.GetNext()) {
+    (void)t.token();
+  }
+}
+
+// Entry point for LibFuzzer.
+extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) {
+  uint8_t size_t_bytes = sizeof(size_t);
+  if (size < size_t_bytes + 1) {
+    return 0;
+  }
+
+  // Calculate pattern size based on remaining bytes, otherwise fuzzing is
+  // inefficient with bailouts in most cases.
+  size_t pattern_size =
+      *reinterpret_cast<const size_t*>(data) % (size - size_t_bytes);
+
+  std::string pattern(reinterpret_cast<const char*>(data + size_t_bytes),
+                      pattern_size);
+  std::string input(
+      reinterpret_cast<const char*>(data + size_t_bytes + pattern_size),
+      size - pattern_size - size_t_bytes);
+
+  // Allow quote_chars and options to be set. Otherwise full coverage
+  // won't be possible since IsQuote, FullGetNext and other functions
+  // won't be called.
+  base::StringTokenizer t(input, pattern);
+  GetAllTokens(t);
+
+  base::StringTokenizer t_quote(input, pattern);
+  t_quote.set_quote_chars("\"");
+  GetAllTokens(t_quote);
+
+  base::StringTokenizer t_options(input, pattern);
+  t_options.set_options(base::StringTokenizer::RETURN_DELIMS);
+  GetAllTokens(t_options);
+
+  base::StringTokenizer t_quote_and_options(input, pattern);
+  t_quote_and_options.set_quote_chars("\"");
+  t_quote_and_options.set_options(base::StringTokenizer::RETURN_DELIMS);
+  GetAllTokens(t_quote_and_options);
+
+  return 0;
+}
diff --git a/base/strings/string_tokenizer_unittest.cc b/base/strings/string_tokenizer_unittest.cc
new file mode 100644
index 0000000..d391845
--- /dev/null
+++ b/base/strings/string_tokenizer_unittest.cc
@@ -0,0 +1,234 @@
+// Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/strings/string_tokenizer.h"
+
+#include "testing/gtest/include/gtest/gtest.h"
+
+using std::string;
+
+namespace base {
+
+namespace {
+
+TEST(StringTokenizerTest, Simple) {
+  string input = "this is a test";
+  StringTokenizer t(input, " ");
+
+  EXPECT_TRUE(t.GetNext());
+  EXPECT_EQ(string("this"), t.token());
+
+  EXPECT_TRUE(t.GetNext());
+  EXPECT_EQ(string("is"), t.token());
+
+  EXPECT_TRUE(t.GetNext());
+  EXPECT_EQ(string("a"), t.token());
+
+  EXPECT_TRUE(t.GetNext());
+  EXPECT_EQ(string("test"), t.token());
+
+  EXPECT_FALSE(t.GetNext());
+}
+
+TEST(StringTokenizerTest, Reset) {
+  string input = "this is a test";
+  StringTokenizer t(input, " ");
+
+  for (int i = 0; i < 2; ++i) {
+    EXPECT_TRUE(t.GetNext());
+    EXPECT_EQ(string("this"), t.token());
+
+    EXPECT_TRUE(t.GetNext());
+    EXPECT_EQ(string("is"), t.token());
+
+    EXPECT_TRUE(t.GetNext());
+    EXPECT_EQ(string("a"), t.token());
+
+    EXPECT_TRUE(t.GetNext());
+    EXPECT_EQ(string("test"), t.token());
+
+    EXPECT_FALSE(t.GetNext());
+    t.Reset();
+  }
+}
+
+TEST(StringTokenizerTest, RetDelims) {
+  string input = "this is a test";
+  StringTokenizer t(input, " ");
+  t.set_options(StringTokenizer::RETURN_DELIMS);
+
+  EXPECT_TRUE(t.GetNext());
+  EXPECT_EQ(string("this"), t.token());
+
+  EXPECT_TRUE(t.GetNext());
+  EXPECT_EQ(string(" "), t.token());
+
+  EXPECT_TRUE(t.GetNext());
+  EXPECT_EQ(string("is"), t.token());
+
+  EXPECT_TRUE(t.GetNext());
+  EXPECT_EQ(string(" "), t.token());
+
+  EXPECT_TRUE(t.GetNext());
+  EXPECT_EQ(string("a"), t.token());
+
+  EXPECT_TRUE(t.GetNext());
+  EXPECT_EQ(string(" "), t.token());
+
+  EXPECT_TRUE(t.GetNext());
+  EXPECT_EQ(string("test"), t.token());
+
+  EXPECT_FALSE(t.GetNext());
+}
+
+TEST(StringTokenizerTest, ManyDelims) {
+  string input = "this: is, a-test";
+  StringTokenizer t(input, ": ,-");
+
+  EXPECT_TRUE(t.GetNext());
+  EXPECT_EQ(string("this"), t.token());
+
+  EXPECT_TRUE(t.GetNext());
+  EXPECT_EQ(string("is"), t.token());
+
+  EXPECT_TRUE(t.GetNext());
+  EXPECT_EQ(string("a"), t.token());
+
+  EXPECT_TRUE(t.GetNext());
+  EXPECT_EQ(string("test"), t.token());
+
+  EXPECT_FALSE(t.GetNext());
+}
+
+TEST(StringTokenizerTest, ParseHeader) {
+  string input = "Content-Type: text/html ; charset=UTF-8";
+  StringTokenizer t(input, ": ;=");
+  t.set_options(StringTokenizer::RETURN_DELIMS);
+
+  EXPECT_TRUE(t.GetNext());
+  EXPECT_FALSE(t.token_is_delim());
+  EXPECT_EQ(string("Content-Type"), t.token());
+
+  EXPECT_TRUE(t.GetNext());
+  EXPECT_TRUE(t.token_is_delim());
+  EXPECT_EQ(string(":"), t.token());
+
+  EXPECT_TRUE(t.GetNext());
+  EXPECT_TRUE(t.token_is_delim());
+  EXPECT_EQ(string(" "), t.token());
+
+  EXPECT_TRUE(t.GetNext());
+  EXPECT_FALSE(t.token_is_delim());
+  EXPECT_EQ(string("text/html"), t.token());
+
+  EXPECT_TRUE(t.GetNext());
+  EXPECT_TRUE(t.token_is_delim());
+  EXPECT_EQ(string(" "), t.token());
+
+  EXPECT_TRUE(t.GetNext());
+  EXPECT_TRUE(t.token_is_delim());
+  EXPECT_EQ(string(";"), t.token());
+
+  EXPECT_TRUE(t.GetNext());
+  EXPECT_TRUE(t.token_is_delim());
+  EXPECT_EQ(string(" "), t.token());
+
+  EXPECT_TRUE(t.GetNext());
+  EXPECT_FALSE(t.token_is_delim());
+  EXPECT_EQ(string("charset"), t.token());
+
+  EXPECT_TRUE(t.GetNext());
+  EXPECT_TRUE(t.token_is_delim());
+  EXPECT_EQ(string("="), t.token());
+
+  EXPECT_TRUE(t.GetNext());
+  EXPECT_FALSE(t.token_is_delim());
+  EXPECT_EQ(string("UTF-8"), t.token());
+
+  EXPECT_FALSE(t.GetNext());
+  EXPECT_FALSE(t.token_is_delim());
+}
+
+TEST(StringTokenizerTest, ParseQuotedString) {
+  string input = "foo bar 'hello world' baz";
+  StringTokenizer t(input, " ");
+  t.set_quote_chars("'");
+
+  EXPECT_TRUE(t.GetNext());
+  EXPECT_EQ(string("foo"), t.token());
+
+  EXPECT_TRUE(t.GetNext());
+  EXPECT_EQ(string("bar"), t.token());
+
+  EXPECT_TRUE(t.GetNext());
+  EXPECT_EQ(string("'hello world'"), t.token());
+
+  EXPECT_TRUE(t.GetNext());
+  EXPECT_EQ(string("baz"), t.token());
+
+  EXPECT_FALSE(t.GetNext());
+}
+
+TEST(StringTokenizerTest, ParseQuotedString_Malformed) {
+  string input = "bar 'hello wo";
+  StringTokenizer t(input, " ");
+  t.set_quote_chars("'");
+
+  EXPECT_TRUE(t.GetNext());
+  EXPECT_EQ(string("bar"), t.token());
+
+  EXPECT_TRUE(t.GetNext());
+  EXPECT_EQ(string("'hello wo"), t.token());
+
+  EXPECT_FALSE(t.GetNext());
+}
+
+TEST(StringTokenizerTest, ParseQuotedString_Multiple) {
+  string input = "bar 'hel\"lo\" wo' baz\"";
+  StringTokenizer t(input, " ");
+  t.set_quote_chars("'\"");
+
+  EXPECT_TRUE(t.GetNext());
+  EXPECT_EQ(string("bar"), t.token());
+
+  EXPECT_TRUE(t.GetNext());
+  EXPECT_EQ(string("'hel\"lo\" wo'"), t.token());
+
+  EXPECT_TRUE(t.GetNext());
+  EXPECT_EQ(string("baz\""), t.token());
+
+  EXPECT_FALSE(t.GetNext());
+}
+
+TEST(StringTokenizerTest, ParseQuotedString_EscapedQuotes) {
+  string input = "foo 'don\\'t do that'";
+  StringTokenizer t(input, " ");
+  t.set_quote_chars("'");
+
+  EXPECT_TRUE(t.GetNext());
+  EXPECT_EQ(string("foo"), t.token());
+
+  EXPECT_TRUE(t.GetNext());
+  EXPECT_EQ(string("'don\\'t do that'"), t.token());
+
+  EXPECT_FALSE(t.GetNext());
+}
+
+TEST(StringTokenizerTest, ParseQuotedString_EscapedQuotes2) {
+  string input = "foo='a, b', bar";
+  StringTokenizer t(input, ", ");
+  t.set_quote_chars("'");
+
+  EXPECT_TRUE(t.GetNext());
+  EXPECT_EQ(string("foo='a, b'"), t.token());
+
+  EXPECT_TRUE(t.GetNext());
+  EXPECT_EQ(string("bar"), t.token());
+
+  EXPECT_FALSE(t.GetNext());
+}
+
+}  // namespace
+
+}  // namespace base
diff --git a/base/strings/string_util.cc b/base/strings/string_util.cc
new file mode 100644
index 0000000..32e5ff2
--- /dev/null
+++ b/base/strings/string_util.cc
@@ -0,0 +1,1124 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/strings/string_util.h"
+
+#include <ctype.h>
+#include <errno.h>
+#include <math.h>
+#include <stdarg.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <time.h>
+#include <wchar.h>
+#include <wctype.h>
+
+#include <algorithm>
+#include <limits>
+#include <vector>
+
+#include "base/logging.h"
+#include "base/macros.h"
+#include "base/memory/singleton.h"
+#include "base/strings/utf_string_conversion_utils.h"
+#include "base/strings/utf_string_conversions.h"
+#include "base/third_party/icu/icu_utf.h"
+#include "build/build_config.h"
+
+namespace base {
+
+namespace {
+
+// Force the singleton used by EmptyString[16] to be a unique type. This
+// prevents other code that might accidentally use Singleton<string> from
+// getting our internal one.
+struct EmptyStrings {
+  EmptyStrings() = default;
+  const std::string s;
+  const string16 s16;
+
+  static EmptyStrings* GetInstance() {
+    return Singleton<EmptyStrings>::get();
+  }
+};
+
+// Used by ReplaceStringPlaceholders to track the position in the string of
+// replaced parameters.
+struct ReplacementOffset {
+  ReplacementOffset(uintptr_t parameter, size_t offset)
+      : parameter(parameter),
+        offset(offset) {}
+
+  // Index of the parameter.
+  uintptr_t parameter;
+
+  // Starting position in the string.
+  size_t offset;
+};
+
+static bool CompareParameter(const ReplacementOffset& elem1,
+                             const ReplacementOffset& elem2) {
+  return elem1.parameter < elem2.parameter;
+}
+
+// Overloaded function to append one string onto the end of another. Having a
+// separate overload for |source| as both string and StringPiece allows for more
+// efficient usage from functions templated to work with either type (avoiding a
+// redundant call to the BasicStringPiece constructor in both cases).
+template <typename string_type>
+inline void AppendToString(string_type* target, const string_type& source) {
+  target->append(source);
+}
+
+template <typename string_type>
+inline void AppendToString(string_type* target,
+                           const BasicStringPiece<string_type>& source) {
+  source.AppendToString(target);
+}
+
+// Assuming that a pointer is the size of a "machine word", then
+// uintptr_t is an integer type that is also a machine word.
+typedef uintptr_t MachineWord;
+const uintptr_t kMachineWordAlignmentMask = sizeof(MachineWord) - 1;
+
+inline bool IsAlignedToMachineWord(const void* pointer) {
+  return !(reinterpret_cast<MachineWord>(pointer) & kMachineWordAlignmentMask);
+}
+
+template<typename T> inline T* AlignToMachineWord(T* pointer) {
+  return reinterpret_cast<T*>(reinterpret_cast<MachineWord>(pointer) &
+                              ~kMachineWordAlignmentMask);
+}
+
+template<size_t size, typename CharacterType> struct NonASCIIMask;
+template<> struct NonASCIIMask<4, char16> {
+    static inline uint32_t value() { return 0xFF80FF80U; }
+};
+template<> struct NonASCIIMask<4, char> {
+    static inline uint32_t value() { return 0x80808080U; }
+};
+template<> struct NonASCIIMask<8, char16> {
+    static inline uint64_t value() { return 0xFF80FF80FF80FF80ULL; }
+};
+template<> struct NonASCIIMask<8, char> {
+    static inline uint64_t value() { return 0x8080808080808080ULL; }
+};
+#if defined(WCHAR_T_IS_UTF32)
+template<> struct NonASCIIMask<4, wchar_t> {
+    static inline uint32_t value() { return 0xFFFFFF80U; }
+};
+template<> struct NonASCIIMask<8, wchar_t> {
+    static inline uint64_t value() { return 0xFFFFFF80FFFFFF80ULL; }
+};
+#endif  // WCHAR_T_IS_UTF32
+
+}  // namespace
+
+bool IsWprintfFormatPortable(const wchar_t* format) {
+  for (const wchar_t* position = format; *position != '\0'; ++position) {
+    if (*position == '%') {
+      bool in_specification = true;
+      bool modifier_l = false;
+      while (in_specification) {
+        // Eat up characters until reaching a known specifier.
+        if (*++position == '\0') {
+          // The format string ended in the middle of a specification.  Call
+          // it portable because no unportable specifications were found.  The
+          // string is equally broken on all platforms.
+          return true;
+        }
+
+        if (*position == 'l') {
+          // 'l' is the only thing that can save the 's' and 'c' specifiers.
+          modifier_l = true;
+        } else if (((*position == 's' || *position == 'c') && !modifier_l) ||
+                   *position == 'S' || *position == 'C' || *position == 'F' ||
+                   *position == 'D' || *position == 'O' || *position == 'U') {
+          // Not portable.
+          return false;
+        }
+
+        if (wcschr(L"diouxXeEfgGaAcspn%", *position)) {
+          // Portable, keep scanning the rest of the format string.
+          in_specification = false;
+        }
+      }
+    }
+  }
+
+  return true;
+}
+
+namespace {
+
+template<typename StringType>
+StringType ToLowerASCIIImpl(BasicStringPiece<StringType> str) {
+  StringType ret;
+  ret.reserve(str.size());
+  for (size_t i = 0; i < str.size(); i++)
+    ret.push_back(ToLowerASCII(str[i]));
+  return ret;
+}
+
+template<typename StringType>
+StringType ToUpperASCIIImpl(BasicStringPiece<StringType> str) {
+  StringType ret;
+  ret.reserve(str.size());
+  for (size_t i = 0; i < str.size(); i++)
+    ret.push_back(ToUpperASCII(str[i]));
+  return ret;
+}
+
+}  // namespace
+
+std::string ToLowerASCII(StringPiece str) {
+  return ToLowerASCIIImpl<std::string>(str);
+}
+
+string16 ToLowerASCII(StringPiece16 str) {
+  return ToLowerASCIIImpl<string16>(str);
+}
+
+std::string ToUpperASCII(StringPiece str) {
+  return ToUpperASCIIImpl<std::string>(str);
+}
+
+string16 ToUpperASCII(StringPiece16 str) {
+  return ToUpperASCIIImpl<string16>(str);
+}
+
+template<class StringType>
+int CompareCaseInsensitiveASCIIT(BasicStringPiece<StringType> a,
+                                 BasicStringPiece<StringType> b) {
+  // Find the first characters that aren't equal and compare them.  If the end
+  // of one of the strings is found before a nonequal character, the lengths
+  // of the strings are compared.
+  size_t i = 0;
+  while (i < a.length() && i < b.length()) {
+    typename StringType::value_type lower_a = ToLowerASCII(a[i]);
+    typename StringType::value_type lower_b = ToLowerASCII(b[i]);
+    if (lower_a < lower_b)
+      return -1;
+    if (lower_a > lower_b)
+      return 1;
+    i++;
+  }
+
+  // End of one string hit before finding a different character. Expect the
+  // common case to be "strings equal" at this point so check that first.
+  if (a.length() == b.length())
+    return 0;
+
+  if (a.length() < b.length())
+    return -1;
+  return 1;
+}
+
+int CompareCaseInsensitiveASCII(StringPiece a, StringPiece b) {
+  return CompareCaseInsensitiveASCIIT<std::string>(a, b);
+}
+
+int CompareCaseInsensitiveASCII(StringPiece16 a, StringPiece16 b) {
+  return CompareCaseInsensitiveASCIIT<string16>(a, b);
+}
+
+bool EqualsCaseInsensitiveASCII(StringPiece a, StringPiece b) {
+  if (a.length() != b.length())
+    return false;
+  return CompareCaseInsensitiveASCIIT<std::string>(a, b) == 0;
+}
+
+bool EqualsCaseInsensitiveASCII(StringPiece16 a, StringPiece16 b) {
+  if (a.length() != b.length())
+    return false;
+  return CompareCaseInsensitiveASCIIT<string16>(a, b) == 0;
+}
+
+const std::string& EmptyString() {
+  return EmptyStrings::GetInstance()->s;
+}
+
+const string16& EmptyString16() {
+  return EmptyStrings::GetInstance()->s16;
+}
+
+template <class StringType>
+bool ReplaceCharsT(const StringType& input,
+                   BasicStringPiece<StringType> find_any_of_these,
+                   BasicStringPiece<StringType> replace_with,
+                   StringType* output);
+
+bool ReplaceChars(const string16& input,
+                  StringPiece16 replace_chars,
+                  const string16& replace_with,
+                  string16* output) {
+  return ReplaceCharsT(input, replace_chars, StringPiece16(replace_with),
+                       output);
+}
+
+bool ReplaceChars(const std::string& input,
+                  StringPiece replace_chars,
+                  const std::string& replace_with,
+                  std::string* output) {
+  return ReplaceCharsT(input, replace_chars, StringPiece(replace_with), output);
+}
+
+bool RemoveChars(const string16& input,
+                 StringPiece16 remove_chars,
+                 string16* output) {
+  return ReplaceCharsT(input, remove_chars, StringPiece16(), output);
+}
+
+bool RemoveChars(const std::string& input,
+                 StringPiece remove_chars,
+                 std::string* output) {
+  return ReplaceCharsT(input, remove_chars, StringPiece(), output);
+}
+
+template<typename Str>
+TrimPositions TrimStringT(const Str& input,
+                          BasicStringPiece<Str> trim_chars,
+                          TrimPositions positions,
+                          Str* output) {
+  // Find the edges of leading/trailing whitespace as desired. Need to use
+  // a StringPiece version of input to be able to call find* on it with the
+  // StringPiece version of trim_chars (normally the trim_chars will be a
+  // constant so avoid making a copy).
+  BasicStringPiece<Str> input_piece(input);
+  const size_t last_char = input.length() - 1;
+  const size_t first_good_char = (positions & TRIM_LEADING) ?
+      input_piece.find_first_not_of(trim_chars) : 0;
+  const size_t last_good_char = (positions & TRIM_TRAILING) ?
+      input_piece.find_last_not_of(trim_chars) : last_char;
+
+  // When the string was all trimmed, report that we stripped off characters
+  // from whichever position the caller was interested in. For empty input, we
+  // stripped no characters, but we still need to clear |output|.
+  if (input.empty() ||
+      (first_good_char == Str::npos) || (last_good_char == Str::npos)) {
+    bool input_was_empty = input.empty();  // in case output == &input
+    output->clear();
+    return input_was_empty ? TRIM_NONE : positions;
+  }
+
+  // Trim.
+  *output =
+      input.substr(first_good_char, last_good_char - first_good_char + 1);
+
+  // Return where we trimmed from.
+  return static_cast<TrimPositions>(
+      ((first_good_char == 0) ? TRIM_NONE : TRIM_LEADING) |
+      ((last_good_char == last_char) ? TRIM_NONE : TRIM_TRAILING));
+}
+
+bool TrimString(const string16& input,
+                StringPiece16 trim_chars,
+                string16* output) {
+  return TrimStringT(input, trim_chars, TRIM_ALL, output) != TRIM_NONE;
+}
+
+bool TrimString(const std::string& input,
+                StringPiece trim_chars,
+                std::string* output) {
+  return TrimStringT(input, trim_chars, TRIM_ALL, output) != TRIM_NONE;
+}
+
+template<typename Str>
+BasicStringPiece<Str> TrimStringPieceT(BasicStringPiece<Str> input,
+                                       BasicStringPiece<Str> trim_chars,
+                                       TrimPositions positions) {
+  size_t begin = (positions & TRIM_LEADING) ?
+      input.find_first_not_of(trim_chars) : 0;
+  size_t end = (positions & TRIM_TRAILING) ?
+      input.find_last_not_of(trim_chars) + 1 : input.size();
+  return input.substr(begin, end - begin);
+}
+
+StringPiece16 TrimString(StringPiece16 input,
+                         StringPiece16 trim_chars,
+                         TrimPositions positions) {
+  return TrimStringPieceT(input, trim_chars, positions);
+}
+
+StringPiece TrimString(StringPiece input,
+                       StringPiece trim_chars,
+                       TrimPositions positions) {
+  return TrimStringPieceT(input, trim_chars, positions);
+}
+
+void TruncateUTF8ToByteSize(const std::string& input,
+                            const size_t byte_size,
+                            std::string* output) {
+  DCHECK(output);
+  if (byte_size > input.length()) {
+    *output = input;
+    return;
+  }
+  DCHECK_LE(byte_size,
+            static_cast<uint32_t>(std::numeric_limits<int32_t>::max()));
+  // Note: This cast is necessary because CBU8_NEXT uses int32_ts.
+  int32_t truncation_length = static_cast<int32_t>(byte_size);
+  int32_t char_index = truncation_length - 1;
+  const char* data = input.data();
+
+  // Using CBU8, we will move backwards from the truncation point
+  // to the beginning of the string looking for a valid UTF8
+  // character.  Once a full UTF8 character is found, we will
+  // truncate the string to the end of that character.
+  while (char_index >= 0) {
+    int32_t prev = char_index;
+    base_icu::UChar32 code_point = 0;
+    CBU8_NEXT(data, char_index, truncation_length, code_point);
+    if (!IsValidCharacter(code_point) ||
+        !IsValidCodepoint(code_point)) {
+      char_index = prev - 1;
+    } else {
+      break;
+    }
+  }
+
+  if (char_index >= 0 )
+    *output = input.substr(0, char_index);
+  else
+    output->clear();
+}
+
+TrimPositions TrimWhitespace(const string16& input,
+                             TrimPositions positions,
+                             string16* output) {
+  return TrimStringT(input, StringPiece16(kWhitespaceUTF16), positions, output);
+}
+
+StringPiece16 TrimWhitespace(StringPiece16 input,
+                             TrimPositions positions) {
+  return TrimStringPieceT(input, StringPiece16(kWhitespaceUTF16), positions);
+}
+
+TrimPositions TrimWhitespaceASCII(const std::string& input,
+                                  TrimPositions positions,
+                                  std::string* output) {
+  return TrimStringT(input, StringPiece(kWhitespaceASCII), positions, output);
+}
+
+StringPiece TrimWhitespaceASCII(StringPiece input, TrimPositions positions) {
+  return TrimStringPieceT(input, StringPiece(kWhitespaceASCII), positions);
+}
+
+template<typename STR>
+STR CollapseWhitespaceT(const STR& text,
+                        bool trim_sequences_with_line_breaks) {
+  STR result;
+  result.resize(text.size());
+
+  // Set flags to pretend we're already in a trimmed whitespace sequence, so we
+  // will trim any leading whitespace.
+  bool in_whitespace = true;
+  bool already_trimmed = true;
+
+  int chars_written = 0;
+  for (typename STR::const_iterator i(text.begin()); i != text.end(); ++i) {
+    if (IsUnicodeWhitespace(*i)) {
+      if (!in_whitespace) {
+        // Reduce all whitespace sequences to a single space.
+        in_whitespace = true;
+        result[chars_written++] = L' ';
+      }
+      if (trim_sequences_with_line_breaks && !already_trimmed &&
+          ((*i == '\n') || (*i == '\r'))) {
+        // Whitespace sequences containing CR or LF are eliminated entirely.
+        already_trimmed = true;
+        --chars_written;
+      }
+    } else {
+      // Non-whitespace chracters are copied straight across.
+      in_whitespace = false;
+      already_trimmed = false;
+      result[chars_written++] = *i;
+    }
+  }
+
+  if (in_whitespace && !already_trimmed) {
+    // Any trailing whitespace is eliminated.
+    --chars_written;
+  }
+
+  result.resize(chars_written);
+  return result;
+}
+
+string16 CollapseWhitespace(const string16& text,
+                            bool trim_sequences_with_line_breaks) {
+  return CollapseWhitespaceT(text, trim_sequences_with_line_breaks);
+}
+
+std::string CollapseWhitespaceASCII(const std::string& text,
+                                    bool trim_sequences_with_line_breaks) {
+  return CollapseWhitespaceT(text, trim_sequences_with_line_breaks);
+}
+
+bool ContainsOnlyChars(StringPiece input, StringPiece characters) {
+  return input.find_first_not_of(characters) == StringPiece::npos;
+}
+
+bool ContainsOnlyChars(StringPiece16 input, StringPiece16 characters) {
+  return input.find_first_not_of(characters) == StringPiece16::npos;
+}
+
+template <class Char>
+inline bool DoIsStringASCII(const Char* characters, size_t length) {
+  MachineWord all_char_bits = 0;
+  const Char* end = characters + length;
+
+  // Prologue: align the input.
+  while (!IsAlignedToMachineWord(characters) && characters != end) {
+    all_char_bits |= *characters;
+    ++characters;
+  }
+
+  // Compare the values of CPU word size.
+  const Char* word_end = AlignToMachineWord(end);
+  const size_t loop_increment = sizeof(MachineWord) / sizeof(Char);
+  while (characters < word_end) {
+    all_char_bits |= *(reinterpret_cast<const MachineWord*>(characters));
+    characters += loop_increment;
+  }
+
+  // Process the remaining bytes.
+  while (characters != end) {
+    all_char_bits |= *characters;
+    ++characters;
+  }
+
+  MachineWord non_ascii_bit_mask =
+      NonASCIIMask<sizeof(MachineWord), Char>::value();
+  return !(all_char_bits & non_ascii_bit_mask);
+}
+
+bool IsStringASCII(StringPiece str) {
+  return DoIsStringASCII(str.data(), str.length());
+}
+
+bool IsStringASCII(StringPiece16 str) {
+  return DoIsStringASCII(str.data(), str.length());
+}
+
+#if defined(WCHAR_T_IS_UTF32)
+bool IsStringASCII(WStringPiece str) {
+  return DoIsStringASCII(str.data(), str.length());
+}
+#endif
+
+bool IsStringUTF8(StringPiece str) {
+  const char *src = str.data();
+  int32_t src_len = static_cast<int32_t>(str.length());
+  int32_t char_index = 0;
+
+  while (char_index < src_len) {
+    int32_t code_point;
+    CBU8_NEXT(src, char_index, src_len, code_point);
+    if (!IsValidCharacter(code_point))
+      return false;
+  }
+  return true;
+}
+
+// Implementation note: Normally this function will be called with a hardcoded
+// constant for the lowercase_ascii parameter. Constructing a StringPiece from
+// a C constant requires running strlen, so the result will be two passes
+// through the buffers, one to file the length of lowercase_ascii, and one to
+// compare each letter.
+//
+// This function could have taken a const char* to avoid this and only do one
+// pass through the string. But the strlen is faster than the case-insensitive
+// compares and lets us early-exit in the case that the strings are different
+// lengths (will often be the case for non-matches). So whether one approach or
+// the other will be faster depends on the case.
+//
+// The hardcoded strings are typically very short so it doesn't matter, and the
+// string piece gives additional flexibility for the caller (doesn't have to be
+// null terminated) so we choose the StringPiece route.
+template<typename Str>
+static inline bool DoLowerCaseEqualsASCII(BasicStringPiece<Str> str,
+                                          StringPiece lowercase_ascii) {
+  if (str.size() != lowercase_ascii.size())
+    return false;
+  for (size_t i = 0; i < str.size(); i++) {
+    if (ToLowerASCII(str[i]) != lowercase_ascii[i])
+      return false;
+  }
+  return true;
+}
+
+bool LowerCaseEqualsASCII(StringPiece str, StringPiece lowercase_ascii) {
+  return DoLowerCaseEqualsASCII<std::string>(str, lowercase_ascii);
+}
+
+bool LowerCaseEqualsASCII(StringPiece16 str, StringPiece lowercase_ascii) {
+  return DoLowerCaseEqualsASCII<string16>(str, lowercase_ascii);
+}
+
+bool EqualsASCII(StringPiece16 str, StringPiece ascii) {
+  if (str.length() != ascii.length())
+    return false;
+  return std::equal(ascii.begin(), ascii.end(), str.begin());
+}
+
+template<typename Str>
+bool StartsWithT(BasicStringPiece<Str> str,
+                 BasicStringPiece<Str> search_for,
+                 CompareCase case_sensitivity) {
+  if (search_for.size() > str.size())
+    return false;
+
+  BasicStringPiece<Str> source = str.substr(0, search_for.size());
+
+  switch (case_sensitivity) {
+    case CompareCase::SENSITIVE:
+      return source == search_for;
+
+    case CompareCase::INSENSITIVE_ASCII:
+      return std::equal(
+          search_for.begin(), search_for.end(),
+          source.begin(),
+          CaseInsensitiveCompareASCII<typename Str::value_type>());
+
+    default:
+      NOTREACHED();
+      return false;
+  }
+}
+
+bool StartsWith(StringPiece str,
+                StringPiece search_for,
+                CompareCase case_sensitivity) {
+  return StartsWithT<std::string>(str, search_for, case_sensitivity);
+}
+
+bool StartsWith(StringPiece16 str,
+                StringPiece16 search_for,
+                CompareCase case_sensitivity) {
+  return StartsWithT<string16>(str, search_for, case_sensitivity);
+}
+
+template <typename Str>
+bool EndsWithT(BasicStringPiece<Str> str,
+               BasicStringPiece<Str> search_for,
+               CompareCase case_sensitivity) {
+  if (search_for.size() > str.size())
+    return false;
+
+  BasicStringPiece<Str> source = str.substr(str.size() - search_for.size(),
+                                            search_for.size());
+
+  switch (case_sensitivity) {
+    case CompareCase::SENSITIVE:
+      return source == search_for;
+
+    case CompareCase::INSENSITIVE_ASCII:
+      return std::equal(
+          source.begin(), source.end(),
+          search_for.begin(),
+          CaseInsensitiveCompareASCII<typename Str::value_type>());
+
+    default:
+      NOTREACHED();
+      return false;
+  }
+}
+
+bool EndsWith(StringPiece str,
+              StringPiece search_for,
+              CompareCase case_sensitivity) {
+  return EndsWithT<std::string>(str, search_for, case_sensitivity);
+}
+
+bool EndsWith(StringPiece16 str,
+              StringPiece16 search_for,
+              CompareCase case_sensitivity) {
+  return EndsWithT<string16>(str, search_for, case_sensitivity);
+}
+
+char HexDigitToInt(wchar_t c) {
+  DCHECK(IsHexDigit(c));
+  if (c >= '0' && c <= '9')
+    return static_cast<char>(c - '0');
+  if (c >= 'A' && c <= 'F')
+    return static_cast<char>(c - 'A' + 10);
+  if (c >= 'a' && c <= 'f')
+    return static_cast<char>(c - 'a' + 10);
+  return 0;
+}
+
+bool IsUnicodeWhitespace(wchar_t c) {
+  // kWhitespaceWide is a NULL-terminated string
+  for (const wchar_t* cur = kWhitespaceWide; *cur; ++cur) {
+    if (*cur == c)
+      return true;
+  }
+  return false;
+}
+
+static const char* const kByteStringsUnlocalized[] = {
+  " B",
+  " kB",
+  " MB",
+  " GB",
+  " TB",
+  " PB"
+};
+
+string16 FormatBytesUnlocalized(int64_t bytes) {
+  double unit_amount = static_cast<double>(bytes);
+  size_t dimension = 0;
+  const int kKilo = 1024;
+  while (unit_amount >= kKilo &&
+         dimension < arraysize(kByteStringsUnlocalized) - 1) {
+    unit_amount /= kKilo;
+    dimension++;
+  }
+
+  char buf[64];
+  if (bytes != 0 && dimension > 0 && unit_amount < 100) {
+    base::snprintf(buf, arraysize(buf), "%.1lf%s", unit_amount,
+                   kByteStringsUnlocalized[dimension]);
+  } else {
+    base::snprintf(buf, arraysize(buf), "%.0lf%s", unit_amount,
+                   kByteStringsUnlocalized[dimension]);
+  }
+
+  return ASCIIToUTF16(buf);
+}
+
+// A Matcher for DoReplaceMatchesAfterOffset() that matches substrings.
+template <class StringType>
+struct SubstringMatcher {
+  BasicStringPiece<StringType> find_this;
+
+  size_t Find(const StringType& input, size_t pos) {
+    return input.find(find_this.data(), pos, find_this.length());
+  }
+  size_t MatchSize() { return find_this.length(); }
+};
+
+// A Matcher for DoReplaceMatchesAfterOffset() that matches single characters.
+template <class StringType>
+struct CharacterMatcher {
+  BasicStringPiece<StringType> find_any_of_these;
+
+  size_t Find(const StringType& input, size_t pos) {
+    return input.find_first_of(find_any_of_these.data(), pos,
+                               find_any_of_these.length());
+  }
+  constexpr size_t MatchSize() { return 1; }
+};
+
+enum class ReplaceType { REPLACE_ALL, REPLACE_FIRST };
+
+// Runs in O(n) time in the length of |str|, and transforms the string without
+// reallocating when possible. Returns |true| if any matches were found.
+//
+// This is parameterized on a |Matcher| traits type, so that it can be the
+// implementation for both ReplaceChars() and ReplaceSubstringsAfterOffset().
+template <class StringType, class Matcher>
+bool DoReplaceMatchesAfterOffset(StringType* str,
+                                 size_t initial_offset,
+                                 Matcher matcher,
+                                 BasicStringPiece<StringType> replace_with,
+                                 ReplaceType replace_type) {
+  using CharTraits = typename StringType::traits_type;
+
+  const size_t find_length = matcher.MatchSize();
+  if (!find_length)
+    return false;
+
+  // If the find string doesn't appear, there's nothing to do.
+  size_t first_match = matcher.Find(*str, initial_offset);
+  if (first_match == StringType::npos)
+    return false;
+
+  // If we're only replacing one instance, there's no need to do anything
+  // complicated.
+  const size_t replace_length = replace_with.length();
+  if (replace_type == ReplaceType::REPLACE_FIRST) {
+    str->replace(first_match, find_length, replace_with.data(), replace_length);
+    return true;
+  }
+
+  // If the find and replace strings are the same length, we can simply use
+  // replace() on each instance, and finish the entire operation in O(n) time.
+  if (find_length == replace_length) {
+    auto* buffer = &((*str)[0]);
+    for (size_t offset = first_match; offset != StringType::npos;
+         offset = matcher.Find(*str, offset + replace_length)) {
+      CharTraits::copy(buffer + offset, replace_with.data(), replace_length);
+    }
+    return true;
+  }
+
+  // Since the find and replace strings aren't the same length, a loop like the
+  // one above would be O(n^2) in the worst case, as replace() will shift the
+  // entire remaining string each time. We need to be more clever to keep things
+  // O(n).
+  //
+  // When the string is being shortened, it's possible to just shift the matches
+  // down in one pass while finding, and truncate the length at the end of the
+  // search.
+  //
+  // If the string is being lengthened, more work is required. The strategy used
+  // here is to make two find() passes through the string. The first pass counts
+  // the number of matches to determine the new size. The second pass will
+  // either construct the new string into a new buffer (if the existing buffer
+  // lacked capacity), or else -- if there is room -- create a region of scratch
+  // space after |first_match| by shifting the tail of the string to a higher
+  // index, and doing in-place moves from the tail to lower indices thereafter.
+  size_t str_length = str->length();
+  size_t expansion = 0;
+  if (replace_length > find_length) {
+    // This operation lengthens the string; determine the new length by counting
+    // matches.
+    const size_t expansion_per_match = (replace_length - find_length);
+    size_t num_matches = 0;
+    for (size_t match = first_match; match != StringType::npos;
+         match = matcher.Find(*str, match + find_length)) {
+      expansion += expansion_per_match;
+      ++num_matches;
+    }
+    const size_t final_length = str_length + expansion;
+
+    if (str->capacity() < final_length) {
+      // If we'd have to allocate a new buffer to grow the string, build the
+      // result directly into the new allocation via append().
+      StringType src(str->get_allocator());
+      str->swap(src);
+      str->reserve(final_length);
+
+      size_t pos = 0;
+      for (size_t match = first_match;; match = matcher.Find(src, pos)) {
+        str->append(src, pos, match - pos);
+        str->append(replace_with.data(), replace_length);
+        pos = match + find_length;
+
+        // A mid-loop test/break enables skipping the final Find() call; the
+        // number of matches is known, so don't search past the last one.
+        if (!--num_matches)
+          break;
+      }
+
+      // Handle substring after the final match.
+      str->append(src, pos, str_length - pos);
+      return true;
+    }
+
+    // Prepare for the copy/move loop below -- expand the string to its final
+    // size by shifting the data after the first match to the end of the resized
+    // string.
+    size_t shift_src = first_match + find_length;
+    size_t shift_dst = shift_src + expansion;
+
+    // Big |expansion| factors (relative to |str_length|) require padding up to
+    // |shift_dst|.
+    if (shift_dst > str_length)
+      str->resize(shift_dst);
+
+    str->replace(shift_dst, str_length - shift_src, *str, shift_src,
+                 str_length - shift_src);
+    str_length = final_length;
+  }
+
+  // We can alternate replacement and move operations. This won't overwrite the
+  // unsearched region of the string so long as |write_offset| <= |read_offset|;
+  // that condition is always satisfied because:
+  //
+  //   (a) If the string is being shortened, |expansion| is zero and
+  //       |write_offset| grows slower than |read_offset|.
+  //
+  //   (b) If the string is being lengthened, |write_offset| grows faster than
+  //       |read_offset|, but |expansion| is big enough so that |write_offset|
+  //       will only catch up to |read_offset| at the point of the last match.
+  auto* buffer = &((*str)[0]);
+  size_t write_offset = first_match;
+  size_t read_offset = first_match + expansion;
+  do {
+    if (replace_length) {
+      CharTraits::copy(buffer + write_offset, replace_with.data(),
+                       replace_length);
+      write_offset += replace_length;
+    }
+    read_offset += find_length;
+
+    // min() clamps StringType::npos (the largest unsigned value) to str_length.
+    size_t match = std::min(matcher.Find(*str, read_offset), str_length);
+
+    size_t length = match - read_offset;
+    if (length) {
+      CharTraits::move(buffer + write_offset, buffer + read_offset, length);
+      write_offset += length;
+      read_offset += length;
+    }
+  } while (read_offset < str_length);
+
+  // If we're shortening the string, truncate it now.
+  str->resize(write_offset);
+  return true;
+}
+
+template <class StringType>
+bool ReplaceCharsT(const StringType& input,
+                   BasicStringPiece<StringType> find_any_of_these,
+                   BasicStringPiece<StringType> replace_with,
+                   StringType* output) {
+  // Commonly, this is called with output and input being the same string; in
+  // that case, this assignment is inexpensive.
+  *output = input;
+
+  return DoReplaceMatchesAfterOffset(
+      output, 0, CharacterMatcher<StringType>{find_any_of_these}, replace_with,
+      ReplaceType::REPLACE_ALL);
+}
+
+void ReplaceFirstSubstringAfterOffset(string16* str,
+                                      size_t start_offset,
+                                      StringPiece16 find_this,
+                                      StringPiece16 replace_with) {
+  DoReplaceMatchesAfterOffset(str, start_offset,
+                              SubstringMatcher<string16>{find_this},
+                              replace_with, ReplaceType::REPLACE_FIRST);
+}
+
+void ReplaceFirstSubstringAfterOffset(std::string* str,
+                                      size_t start_offset,
+                                      StringPiece find_this,
+                                      StringPiece replace_with) {
+  DoReplaceMatchesAfterOffset(str, start_offset,
+                              SubstringMatcher<std::string>{find_this},
+                              replace_with, ReplaceType::REPLACE_FIRST);
+}
+
+void ReplaceSubstringsAfterOffset(string16* str,
+                                  size_t start_offset,
+                                  StringPiece16 find_this,
+                                  StringPiece16 replace_with) {
+  DoReplaceMatchesAfterOffset(str, start_offset,
+                              SubstringMatcher<string16>{find_this},
+                              replace_with, ReplaceType::REPLACE_ALL);
+}
+
+void ReplaceSubstringsAfterOffset(std::string* str,
+                                  size_t start_offset,
+                                  StringPiece find_this,
+                                  StringPiece replace_with) {
+  DoReplaceMatchesAfterOffset(str, start_offset,
+                              SubstringMatcher<std::string>{find_this},
+                              replace_with, ReplaceType::REPLACE_ALL);
+}
+
+template <class string_type>
+inline typename string_type::value_type* WriteIntoT(string_type* str,
+                                                    size_t length_with_null) {
+  DCHECK_GT(length_with_null, 1u);
+  str->reserve(length_with_null);
+  str->resize(length_with_null - 1);
+  return &((*str)[0]);
+}
+
+char* WriteInto(std::string* str, size_t length_with_null) {
+  return WriteIntoT(str, length_with_null);
+}
+
+char16* WriteInto(string16* str, size_t length_with_null) {
+  return WriteIntoT(str, length_with_null);
+}
+
+#if defined(_MSC_VER) && !defined(__clang__)
+// Work around VC++ code-gen bug. https://crbug.com/804884
+#pragma optimize("", off)
+#endif
+
+// Generic version for all JoinString overloads. |list_type| must be a sequence
+// (std::vector or std::initializer_list) of strings/StringPieces (std::string,
+// string16, StringPiece or StringPiece16). |string_type| is either std::string
+// or string16.
+template <typename list_type, typename string_type>
+static string_type JoinStringT(const list_type& parts,
+                               BasicStringPiece<string_type> sep) {
+  if (parts.size() == 0)
+    return string_type();
+
+  // Pre-allocate the eventual size of the string. Start with the size of all of
+  // the separators (note that this *assumes* parts.size() > 0).
+  size_t total_size = (parts.size() - 1) * sep.size();
+  for (const auto& part : parts)
+    total_size += part.size();
+  string_type result;
+  result.reserve(total_size);
+
+  auto iter = parts.begin();
+  DCHECK(iter != parts.end());
+  AppendToString(&result, *iter);
+  ++iter;
+
+  for (; iter != parts.end(); ++iter) {
+    sep.AppendToString(&result);
+    // Using the overloaded AppendToString allows this template function to work
+    // on both strings and StringPieces without creating an intermediate
+    // StringPiece object.
+    AppendToString(&result, *iter);
+  }
+
+  // Sanity-check that we pre-allocated correctly.
+  DCHECK_EQ(total_size, result.size());
+
+  return result;
+}
+
+std::string JoinString(const std::vector<std::string>& parts,
+                       StringPiece separator) {
+  return JoinStringT(parts, separator);
+}
+
+string16 JoinString(const std::vector<string16>& parts,
+                    StringPiece16 separator) {
+  return JoinStringT(parts, separator);
+}
+
+#if defined(_MSC_VER) && !defined(__clang__)
+// Work around VC++ code-gen bug. https://crbug.com/804884
+#pragma optimize("", on)
+#endif
+
+std::string JoinString(const std::vector<StringPiece>& parts,
+                       StringPiece separator) {
+  return JoinStringT(parts, separator);
+}
+
+string16 JoinString(const std::vector<StringPiece16>& parts,
+                    StringPiece16 separator) {
+  return JoinStringT(parts, separator);
+}
+
+std::string JoinString(std::initializer_list<StringPiece> parts,
+                       StringPiece separator) {
+  return JoinStringT(parts, separator);
+}
+
+string16 JoinString(std::initializer_list<StringPiece16> parts,
+                    StringPiece16 separator) {
+  return JoinStringT(parts, separator);
+}
+
+template<class FormatStringType, class OutStringType>
+OutStringType DoReplaceStringPlaceholders(
+    const FormatStringType& format_string,
+    const std::vector<OutStringType>& subst,
+    std::vector<size_t>* offsets) {
+  size_t substitutions = subst.size();
+  DCHECK_LT(substitutions, 10U);
+
+  size_t sub_length = 0;
+  for (const auto& cur : subst)
+    sub_length += cur.length();
+
+  OutStringType formatted;
+  formatted.reserve(format_string.length() + sub_length);
+
+  std::vector<ReplacementOffset> r_offsets;
+  for (auto i = format_string.begin(); i != format_string.end(); ++i) {
+    if ('$' == *i) {
+      if (i + 1 != format_string.end()) {
+        ++i;
+        if ('$' == *i) {
+          while (i != format_string.end() && '$' == *i) {
+            formatted.push_back('$');
+            ++i;
+          }
+          --i;
+        } else {
+          if (*i < '1' || *i > '9') {
+            DLOG(ERROR) << "Invalid placeholder: $" << *i;
+            continue;
+          }
+          uintptr_t index = *i - '1';
+          if (offsets) {
+            ReplacementOffset r_offset(index,
+                                       static_cast<int>(formatted.size()));
+            r_offsets.insert(
+                std::upper_bound(r_offsets.begin(), r_offsets.end(), r_offset,
+                                 &CompareParameter),
+                r_offset);
+          }
+          if (index < substitutions)
+            formatted.append(subst.at(index));
+        }
+      }
+    } else {
+      formatted.push_back(*i);
+    }
+  }
+  if (offsets) {
+    for (const auto& cur : r_offsets)
+      offsets->push_back(cur.offset);
+  }
+  return formatted;
+}
+
+string16 ReplaceStringPlaceholders(const string16& format_string,
+                                   const std::vector<string16>& subst,
+                                   std::vector<size_t>* offsets) {
+  return DoReplaceStringPlaceholders(format_string, subst, offsets);
+}
+
+std::string ReplaceStringPlaceholders(StringPiece format_string,
+                                      const std::vector<std::string>& subst,
+                                      std::vector<size_t>* offsets) {
+  return DoReplaceStringPlaceholders(format_string, subst, offsets);
+}
+
+string16 ReplaceStringPlaceholders(const string16& format_string,
+                                   const string16& a,
+                                   size_t* offset) {
+  std::vector<size_t> offsets;
+  std::vector<string16> subst;
+  subst.push_back(a);
+  string16 result = ReplaceStringPlaceholders(format_string, subst, &offsets);
+
+  DCHECK_EQ(1U, offsets.size());
+  if (offset)
+    *offset = offsets[0];
+  return result;
+}
+
+// The following code is compatible with the OpenBSD lcpy interface.  See:
+//   http://www.gratisoft.us/todd/papers/strlcpy.html
+//   ftp://ftp.openbsd.org/pub/OpenBSD/src/lib/libc/string/{wcs,str}lcpy.c
+
+namespace {
+
+template <typename CHAR>
+size_t lcpyT(CHAR* dst, const CHAR* src, size_t dst_size) {
+  for (size_t i = 0; i < dst_size; ++i) {
+    if ((dst[i] = src[i]) == 0)  // We hit and copied the terminating NULL.
+      return i;
+  }
+
+  // We were left off at dst_size.  We over copied 1 byte.  Null terminate.
+  if (dst_size != 0)
+    dst[dst_size - 1] = 0;
+
+  // Count the rest of the |src|, and return it's length in characters.
+  while (src[dst_size]) ++dst_size;
+  return dst_size;
+}
+
+}  // namespace
+
+size_t strlcpy(char* dst, const char* src, size_t dst_size) {
+  return lcpyT<char>(dst, src, dst_size);
+}
+size_t wcslcpy(wchar_t* dst, const wchar_t* src, size_t dst_size) {
+  return lcpyT<wchar_t>(dst, src, dst_size);
+}
+
+}  // namespace base
diff --git a/base/strings/string_util.h b/base/strings/string_util.h
new file mode 100644
index 0000000..d6780ec
--- /dev/null
+++ b/base/strings/string_util.h
@@ -0,0 +1,485 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// This file defines utility functions for working with strings.
+
+#ifndef BASE_STRINGS_STRING_UTIL_H_
+#define BASE_STRINGS_STRING_UTIL_H_
+
+#include <ctype.h>
+#include <stdarg.h>   // va_list
+#include <stddef.h>
+#include <stdint.h>
+
+#include <initializer_list>
+#include <string>
+#include <vector>
+
+#include "base/base_export.h"
+#include "base/compiler_specific.h"
+#include "base/strings/string16.h"
+#include "base/strings/string_piece.h"  // For implicit conversions.
+#include "build/build_config.h"
+
+namespace base {
+
+// C standard-library functions that aren't cross-platform are provided as
+// "base::...", and their prototypes are listed below. These functions are
+// then implemented as inline calls to the platform-specific equivalents in the
+// platform-specific headers.
+
+// Wrapper for vsnprintf that always null-terminates and always returns the
+// number of characters that would be in an untruncated formatted
+// string, even when truncation occurs.
+int vsnprintf(char* buffer, size_t size, const char* format, va_list arguments)
+    PRINTF_FORMAT(3, 0);
+
+// Some of these implementations need to be inlined.
+
+// We separate the declaration from the implementation of this inline
+// function just so the PRINTF_FORMAT works.
+inline int snprintf(char* buffer,
+                    size_t size,
+                    _Printf_format_string_ const char* format,
+                    ...) PRINTF_FORMAT(3, 4);
+inline int snprintf(char* buffer,
+                    size_t size,
+                    _Printf_format_string_ const char* format,
+                    ...) {
+  va_list arguments;
+  va_start(arguments, format);
+  int result = vsnprintf(buffer, size, format, arguments);
+  va_end(arguments);
+  return result;
+}
+
+// BSD-style safe and consistent string copy functions.
+// Copies |src| to |dst|, where |dst_size| is the total allocated size of |dst|.
+// Copies at most |dst_size|-1 characters, and always NULL terminates |dst|, as
+// long as |dst_size| is not 0.  Returns the length of |src| in characters.
+// If the return value is >= dst_size, then the output was truncated.
+// NOTE: All sizes are in number of characters, NOT in bytes.
+BASE_EXPORT size_t strlcpy(char* dst, const char* src, size_t dst_size);
+BASE_EXPORT size_t wcslcpy(wchar_t* dst, const wchar_t* src, size_t dst_size);
+
+// Scan a wprintf format string to determine whether it's portable across a
+// variety of systems.  This function only checks that the conversion
+// specifiers used by the format string are supported and have the same meaning
+// on a variety of systems.  It doesn't check for other errors that might occur
+// within a format string.
+//
+// Nonportable conversion specifiers for wprintf are:
+//  - 's' and 'c' without an 'l' length modifier.  %s and %c operate on char
+//     data on all systems except Windows, which treat them as wchar_t data.
+//     Use %ls and %lc for wchar_t data instead.
+//  - 'S' and 'C', which operate on wchar_t data on all systems except Windows,
+//     which treat them as char data.  Use %ls and %lc for wchar_t data
+//     instead.
+//  - 'F', which is not identified by Windows wprintf documentation.
+//  - 'D', 'O', and 'U', which are deprecated and not available on all systems.
+//     Use %ld, %lo, and %lu instead.
+//
+// Note that there is no portable conversion specifier for char data when
+// working with wprintf.
+//
+// This function is intended to be called from base::vswprintf.
+BASE_EXPORT bool IsWprintfFormatPortable(const wchar_t* format);
+
+// ASCII-specific tolower.  The standard library's tolower is locale sensitive,
+// so we don't want to use it here.
+inline char ToLowerASCII(char c) {
+  return (c >= 'A' && c <= 'Z') ? (c + ('a' - 'A')) : c;
+}
+inline char16 ToLowerASCII(char16 c) {
+  return (c >= 'A' && c <= 'Z') ? (c + ('a' - 'A')) : c;
+}
+
+// ASCII-specific toupper.  The standard library's toupper is locale sensitive,
+// so we don't want to use it here.
+inline char ToUpperASCII(char c) {
+  return (c >= 'a' && c <= 'z') ? (c + ('A' - 'a')) : c;
+}
+inline char16 ToUpperASCII(char16 c) {
+  return (c >= 'a' && c <= 'z') ? (c + ('A' - 'a')) : c;
+}
+
+// Converts the given string to it's ASCII-lowercase equivalent.
+BASE_EXPORT std::string ToLowerASCII(StringPiece str);
+BASE_EXPORT string16 ToLowerASCII(StringPiece16 str);
+
+// Converts the given string to it's ASCII-uppercase equivalent.
+BASE_EXPORT std::string ToUpperASCII(StringPiece str);
+BASE_EXPORT string16 ToUpperASCII(StringPiece16 str);
+
+// Functor for case-insensitive ASCII comparisons for STL algorithms like
+// std::search.
+//
+// Note that a full Unicode version of this functor is not possible to write
+// because case mappings might change the number of characters, depend on
+// context (combining accents), and require handling UTF-16. If you need
+// proper Unicode support, use base::i18n::ToLower/FoldCase and then just
+// use a normal operator== on the result.
+template<typename Char> struct CaseInsensitiveCompareASCII {
+ public:
+  bool operator()(Char x, Char y) const {
+    return ToLowerASCII(x) == ToLowerASCII(y);
+  }
+};
+
+// Like strcasecmp for case-insensitive ASCII characters only. Returns:
+//   -1  (a < b)
+//    0  (a == b)
+//    1  (a > b)
+// (unlike strcasecmp which can return values greater or less than 1/-1). For
+// full Unicode support, use base::i18n::ToLower or base::i18h::FoldCase
+// and then just call the normal string operators on the result.
+BASE_EXPORT int CompareCaseInsensitiveASCII(StringPiece a, StringPiece b);
+BASE_EXPORT int CompareCaseInsensitiveASCII(StringPiece16 a, StringPiece16 b);
+
+// Equality for ASCII case-insensitive comparisons. For full Unicode support,
+// use base::i18n::ToLower or base::i18h::FoldCase and then compare with either
+// == or !=.
+BASE_EXPORT bool EqualsCaseInsensitiveASCII(StringPiece a, StringPiece b);
+BASE_EXPORT bool EqualsCaseInsensitiveASCII(StringPiece16 a, StringPiece16 b);
+
+// These threadsafe functions return references to globally unique empty
+// strings.
+//
+// It is likely faster to construct a new empty string object (just a few
+// instructions to set the length to 0) than to get the empty string singleton
+// returned by these functions (which requires threadsafe singleton access).
+//
+// Therefore, DO NOT USE THESE AS A GENERAL-PURPOSE SUBSTITUTE FOR DEFAULT
+// CONSTRUCTORS. There is only one case where you should use these: functions
+// which need to return a string by reference (e.g. as a class member
+// accessor), and don't have an empty string to use (e.g. in an error case).
+// These should not be used as initializers, function arguments, or return
+// values for functions which return by value or outparam.
+BASE_EXPORT const std::string& EmptyString();
+BASE_EXPORT const string16& EmptyString16();
+
+// Contains the set of characters representing whitespace in the corresponding
+// encoding. Null-terminated. The ASCII versions are the whitespaces as defined
+// by HTML5, and don't include control characters.
+BASE_EXPORT extern const wchar_t kWhitespaceWide[];  // Includes Unicode.
+BASE_EXPORT extern const char16 kWhitespaceUTF16[];  // Includes Unicode.
+BASE_EXPORT extern const char kWhitespaceASCII[];
+BASE_EXPORT extern const char16 kWhitespaceASCIIAs16[];  // No unicode.
+
+// Null-terminated string representing the UTF-8 byte order mark.
+BASE_EXPORT extern const char kUtf8ByteOrderMark[];
+
+// Removes characters in |remove_chars| from anywhere in |input|.  Returns true
+// if any characters were removed.  |remove_chars| must be null-terminated.
+// NOTE: Safe to use the same variable for both |input| and |output|.
+BASE_EXPORT bool RemoveChars(const string16& input,
+                             StringPiece16 remove_chars,
+                             string16* output);
+BASE_EXPORT bool RemoveChars(const std::string& input,
+                             StringPiece remove_chars,
+                             std::string* output);
+
+// Replaces characters in |replace_chars| from anywhere in |input| with
+// |replace_with|.  Each character in |replace_chars| will be replaced with
+// the |replace_with| string.  Returns true if any characters were replaced.
+// |replace_chars| must be null-terminated.
+// NOTE: Safe to use the same variable for both |input| and |output|.
+BASE_EXPORT bool ReplaceChars(const string16& input,
+                              StringPiece16 replace_chars,
+                              const string16& replace_with,
+                              string16* output);
+BASE_EXPORT bool ReplaceChars(const std::string& input,
+                              StringPiece replace_chars,
+                              const std::string& replace_with,
+                              std::string* output);
+
+enum TrimPositions {
+  TRIM_NONE     = 0,
+  TRIM_LEADING  = 1 << 0,
+  TRIM_TRAILING = 1 << 1,
+  TRIM_ALL      = TRIM_LEADING | TRIM_TRAILING,
+};
+
+// Removes characters in |trim_chars| from the beginning and end of |input|.
+// The 8-bit version only works on 8-bit characters, not UTF-8. Returns true if
+// any characters were removed.
+//
+// It is safe to use the same variable for both |input| and |output| (this is
+// the normal usage to trim in-place).
+BASE_EXPORT bool TrimString(const string16& input,
+                            StringPiece16 trim_chars,
+                            string16* output);
+BASE_EXPORT bool TrimString(const std::string& input,
+                            StringPiece trim_chars,
+                            std::string* output);
+
+// StringPiece versions of the above. The returned pieces refer to the original
+// buffer.
+BASE_EXPORT StringPiece16 TrimString(StringPiece16 input,
+                                     StringPiece16 trim_chars,
+                                     TrimPositions positions);
+BASE_EXPORT StringPiece TrimString(StringPiece input,
+                                   StringPiece trim_chars,
+                                   TrimPositions positions);
+
+// Truncates a string to the nearest UTF-8 character that will leave
+// the string less than or equal to the specified byte size.
+BASE_EXPORT void TruncateUTF8ToByteSize(const std::string& input,
+                                        const size_t byte_size,
+                                        std::string* output);
+
+// Trims any whitespace from either end of the input string.
+//
+// The StringPiece versions return a substring referencing the input buffer.
+// The ASCII versions look only for ASCII whitespace.
+//
+// The std::string versions return where whitespace was found.
+// NOTE: Safe to use the same variable for both input and output.
+BASE_EXPORT TrimPositions TrimWhitespace(const string16& input,
+                                         TrimPositions positions,
+                                         string16* output);
+BASE_EXPORT StringPiece16 TrimWhitespace(StringPiece16 input,
+                                         TrimPositions positions);
+BASE_EXPORT TrimPositions TrimWhitespaceASCII(const std::string& input,
+                                              TrimPositions positions,
+                                              std::string* output);
+BASE_EXPORT StringPiece TrimWhitespaceASCII(StringPiece input,
+                                            TrimPositions positions);
+
+// Searches for CR or LF characters.  Removes all contiguous whitespace
+// strings that contain them.  This is useful when trying to deal with text
+// copied from terminals.
+// Returns |text|, with the following three transformations:
+// (1) Leading and trailing whitespace is trimmed.
+// (2) If |trim_sequences_with_line_breaks| is true, any other whitespace
+//     sequences containing a CR or LF are trimmed.
+// (3) All other whitespace sequences are converted to single spaces.
+BASE_EXPORT string16 CollapseWhitespace(
+    const string16& text,
+    bool trim_sequences_with_line_breaks);
+BASE_EXPORT std::string CollapseWhitespaceASCII(
+    const std::string& text,
+    bool trim_sequences_with_line_breaks);
+
+// Returns true if |input| is empty or contains only characters found in
+// |characters|.
+BASE_EXPORT bool ContainsOnlyChars(StringPiece input, StringPiece characters);
+BASE_EXPORT bool ContainsOnlyChars(StringPiece16 input,
+                                   StringPiece16 characters);
+
+// Returns true if the specified string matches the criteria. How can a wide
+// string be 8-bit or UTF8? It contains only characters that are < 256 (in the
+// first case) or characters that use only 8-bits and whose 8-bit
+// representation looks like a UTF-8 string (the second case).
+//
+// Note that IsStringUTF8 checks not only if the input is structurally
+// valid but also if it doesn't contain any non-character codepoint
+// (e.g. U+FFFE). It's done on purpose because all the existing callers want
+// to have the maximum 'discriminating' power from other encodings. If
+// there's a use case for just checking the structural validity, we have to
+// add a new function for that.
+//
+// IsStringASCII assumes the input is likely all ASCII, and does not leave early
+// if it is not the case.
+BASE_EXPORT bool IsStringUTF8(StringPiece str);
+BASE_EXPORT bool IsStringASCII(StringPiece str);
+BASE_EXPORT bool IsStringASCII(StringPiece16 str);
+#if defined(WCHAR_T_IS_UTF32)
+BASE_EXPORT bool IsStringASCII(WStringPiece str);
+#endif
+
+// Compare the lower-case form of the given string against the given
+// previously-lower-cased ASCII string (typically a constant).
+BASE_EXPORT bool LowerCaseEqualsASCII(StringPiece str,
+                                      StringPiece lowecase_ascii);
+BASE_EXPORT bool LowerCaseEqualsASCII(StringPiece16 str,
+                                      StringPiece lowecase_ascii);
+
+// Performs a case-sensitive string compare of the given 16-bit string against
+// the given 8-bit ASCII string (typically a constant). The behavior is
+// undefined if the |ascii| string is not ASCII.
+BASE_EXPORT bool EqualsASCII(StringPiece16 str, StringPiece ascii);
+
+// Indicates case sensitivity of comparisons. Only ASCII case insensitivity
+// is supported. Full Unicode case-insensitive conversions would need to go in
+// base/i18n so it can use ICU.
+//
+// If you need to do Unicode-aware case-insensitive StartsWith/EndsWith, it's
+// best to call base::i18n::ToLower() or base::i18n::FoldCase() (see
+// base/i18n/case_conversion.h for usage advice) on the arguments, and then use
+// the results to a case-sensitive comparison.
+enum class CompareCase {
+  SENSITIVE,
+  INSENSITIVE_ASCII,
+};
+
+BASE_EXPORT bool StartsWith(StringPiece str,
+                            StringPiece search_for,
+                            CompareCase case_sensitivity);
+BASE_EXPORT bool StartsWith(StringPiece16 str,
+                            StringPiece16 search_for,
+                            CompareCase case_sensitivity);
+BASE_EXPORT bool EndsWith(StringPiece str,
+                          StringPiece search_for,
+                          CompareCase case_sensitivity);
+BASE_EXPORT bool EndsWith(StringPiece16 str,
+                          StringPiece16 search_for,
+                          CompareCase case_sensitivity);
+
+// Determines the type of ASCII character, independent of locale (the C
+// library versions will change based on locale).
+template <typename Char>
+inline bool IsAsciiWhitespace(Char c) {
+  return c == ' ' || c == '\r' || c == '\n' || c == '\t';
+}
+template <typename Char>
+inline bool IsAsciiAlpha(Char c) {
+  return (c >= 'A' && c <= 'Z') || (c >= 'a' && c <= 'z');
+}
+template <typename Char>
+inline bool IsAsciiUpper(Char c) {
+  return c >= 'A' && c <= 'Z';
+}
+template <typename Char>
+inline bool IsAsciiLower(Char c) {
+  return c >= 'a' && c <= 'z';
+}
+template <typename Char>
+inline bool IsAsciiDigit(Char c) {
+  return c >= '0' && c <= '9';
+}
+
+template <typename Char>
+inline bool IsHexDigit(Char c) {
+  return (c >= '0' && c <= '9') ||
+         (c >= 'A' && c <= 'F') ||
+         (c >= 'a' && c <= 'f');
+}
+
+// Returns the integer corresponding to the given hex character. For example:
+//    '4' -> 4
+//    'a' -> 10
+//    'B' -> 11
+// Assumes the input is a valid hex character. DCHECKs in debug builds if not.
+BASE_EXPORT char HexDigitToInt(wchar_t c);
+
+// Returns true if it's a Unicode whitespace character.
+BASE_EXPORT bool IsUnicodeWhitespace(wchar_t c);
+
+// Return a byte string in human-readable format with a unit suffix. Not
+// appropriate for use in any UI; use of FormatBytes and friends in ui/base is
+// highly recommended instead. TODO(avi): Figure out how to get callers to use
+// FormatBytes instead; remove this.
+BASE_EXPORT string16 FormatBytesUnlocalized(int64_t bytes);
+
+// Starting at |start_offset| (usually 0), replace the first instance of
+// |find_this| with |replace_with|.
+BASE_EXPORT void ReplaceFirstSubstringAfterOffset(
+    base::string16* str,
+    size_t start_offset,
+    StringPiece16 find_this,
+    StringPiece16 replace_with);
+BASE_EXPORT void ReplaceFirstSubstringAfterOffset(
+    std::string* str,
+    size_t start_offset,
+    StringPiece find_this,
+    StringPiece replace_with);
+
+// Starting at |start_offset| (usually 0), look through |str| and replace all
+// instances of |find_this| with |replace_with|.
+//
+// This does entire substrings; use std::replace in <algorithm> for single
+// characters, for example:
+//   std::replace(str.begin(), str.end(), 'a', 'b');
+BASE_EXPORT void ReplaceSubstringsAfterOffset(
+    string16* str,
+    size_t start_offset,
+    StringPiece16 find_this,
+    StringPiece16 replace_with);
+BASE_EXPORT void ReplaceSubstringsAfterOffset(
+    std::string* str,
+    size_t start_offset,
+    StringPiece find_this,
+    StringPiece replace_with);
+
+// Reserves enough memory in |str| to accommodate |length_with_null| characters,
+// sets the size of |str| to |length_with_null - 1| characters, and returns a
+// pointer to the underlying contiguous array of characters.  This is typically
+// used when calling a function that writes results into a character array, but
+// the caller wants the data to be managed by a string-like object.  It is
+// convenient in that is can be used inline in the call, and fast in that it
+// avoids copying the results of the call from a char* into a string.
+//
+// |length_with_null| must be at least 2, since otherwise the underlying string
+// would have size 0, and trying to access &((*str)[0]) in that case can result
+// in a number of problems.
+//
+// Internally, this takes linear time because the resize() call 0-fills the
+// underlying array for potentially all
+// (|length_with_null - 1| * sizeof(string_type::value_type)) bytes.  Ideally we
+// could avoid this aspect of the resize() call, as we expect the caller to
+// immediately write over this memory, but there is no other way to set the size
+// of the string, and not doing that will mean people who access |str| rather
+// than str.c_str() will get back a string of whatever size |str| had on entry
+// to this function (probably 0).
+BASE_EXPORT char* WriteInto(std::string* str, size_t length_with_null);
+BASE_EXPORT char16* WriteInto(string16* str, size_t length_with_null);
+
+// Does the opposite of SplitString()/SplitStringPiece(). Joins a vector or list
+// of strings into a single string, inserting |separator| (which may be empty)
+// in between all elements.
+//
+// If possible, callers should build a vector of StringPieces and use the
+// StringPiece variant, so that they do not create unnecessary copies of
+// strings. For example, instead of using SplitString, modifying the vector,
+// then using JoinString, use SplitStringPiece followed by JoinString so that no
+// copies of those strings are created until the final join operation.
+//
+// Use StrCat (in base/strings/strcat.h) if you don't need a separator.
+BASE_EXPORT std::string JoinString(const std::vector<std::string>& parts,
+                                   StringPiece separator);
+BASE_EXPORT string16 JoinString(const std::vector<string16>& parts,
+                                StringPiece16 separator);
+BASE_EXPORT std::string JoinString(const std::vector<StringPiece>& parts,
+                                   StringPiece separator);
+BASE_EXPORT string16 JoinString(const std::vector<StringPiece16>& parts,
+                                StringPiece16 separator);
+// Explicit initializer_list overloads are required to break ambiguity when used
+// with a literal initializer list (otherwise the compiler would not be able to
+// decide between the string and StringPiece overloads).
+BASE_EXPORT std::string JoinString(std::initializer_list<StringPiece> parts,
+                                   StringPiece separator);
+BASE_EXPORT string16 JoinString(std::initializer_list<StringPiece16> parts,
+                                StringPiece16 separator);
+
+// Replace $1-$2-$3..$9 in the format string with values from |subst|.
+// Additionally, any number of consecutive '$' characters is replaced by that
+// number less one. Eg $$->$, $$$->$$, etc. The offsets parameter here can be
+// NULL. This only allows you to use up to nine replacements.
+BASE_EXPORT string16 ReplaceStringPlaceholders(
+    const string16& format_string,
+    const std::vector<string16>& subst,
+    std::vector<size_t>* offsets);
+
+BASE_EXPORT std::string ReplaceStringPlaceholders(
+    StringPiece format_string,
+    const std::vector<std::string>& subst,
+    std::vector<size_t>* offsets);
+
+// Single-string shortcut for ReplaceStringHolders. |offset| may be NULL.
+BASE_EXPORT string16 ReplaceStringPlaceholders(const string16& format_string,
+                                               const string16& a,
+                                               size_t* offset);
+
+}  // namespace base
+
+#if defined(OS_WIN)
+#include "base/strings/string_util_win.h"
+#elif defined(OS_POSIX) || defined(OS_FUCHSIA)
+#include "base/strings/string_util_posix.h"
+#else
+#error Define string operations appropriately for your platform
+#endif
+
+#endif  // BASE_STRINGS_STRING_UTIL_H_
diff --git a/base/strings/string_util_constants.cc b/base/strings/string_util_constants.cc
new file mode 100644
index 0000000..aba1b12
--- /dev/null
+++ b/base/strings/string_util_constants.cc
@@ -0,0 +1,67 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/strings/string_util.h"
+
+namespace base {
+
+#define WHITESPACE_UNICODE \
+  0x0009, /* CHARACTER TABULATION */      \
+  0x000A, /* LINE FEED (LF) */            \
+  0x000B, /* LINE TABULATION */           \
+  0x000C, /* FORM FEED (FF) */            \
+  0x000D, /* CARRIAGE RETURN (CR) */      \
+  0x0020, /* SPACE */                     \
+  0x0085, /* NEXT LINE (NEL) */           \
+  0x00A0, /* NO-BREAK SPACE */            \
+  0x1680, /* OGHAM SPACE MARK */          \
+  0x2000, /* EN QUAD */                   \
+  0x2001, /* EM QUAD */                   \
+  0x2002, /* EN SPACE */                  \
+  0x2003, /* EM SPACE */                  \
+  0x2004, /* THREE-PER-EM SPACE */        \
+  0x2005, /* FOUR-PER-EM SPACE */         \
+  0x2006, /* SIX-PER-EM SPACE */          \
+  0x2007, /* FIGURE SPACE */              \
+  0x2008, /* PUNCTUATION SPACE */         \
+  0x2009, /* THIN SPACE */                \
+  0x200A, /* HAIR SPACE */                \
+  0x2028, /* LINE SEPARATOR */            \
+  0x2029, /* PARAGRAPH SEPARATOR */       \
+  0x202F, /* NARROW NO-BREAK SPACE */     \
+  0x205F, /* MEDIUM MATHEMATICAL SPACE */ \
+  0x3000, /* IDEOGRAPHIC SPACE */         \
+  0
+
+const wchar_t kWhitespaceWide[] = {
+  WHITESPACE_UNICODE
+};
+
+const char16 kWhitespaceUTF16[] = {
+  WHITESPACE_UNICODE
+};
+
+const char kWhitespaceASCII[] = {
+  0x09,    // CHARACTER TABULATION
+  0x0A,    // LINE FEED (LF)
+  0x0B,    // LINE TABULATION
+  0x0C,    // FORM FEED (FF)
+  0x0D,    // CARRIAGE RETURN (CR)
+  0x20,    // SPACE
+  0
+};
+
+const char16 kWhitespaceASCIIAs16[] = {
+  0x09,    // CHARACTER TABULATION
+  0x0A,    // LINE FEED (LF)
+  0x0B,    // LINE TABULATION
+  0x0C,    // FORM FEED (FF)
+  0x0D,    // CARRIAGE RETURN (CR)
+  0x20,    // SPACE
+  0
+};
+
+const char kUtf8ByteOrderMark[] = "\xEF\xBB\xBF";
+
+}  // namespace base
diff --git a/base/strings/string_util_posix.h b/base/strings/string_util_posix.h
new file mode 100644
index 0000000..8299118
--- /dev/null
+++ b/base/strings/string_util_posix.h
@@ -0,0 +1,37 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_STRINGS_STRING_UTIL_POSIX_H_
+#define BASE_STRINGS_STRING_UTIL_POSIX_H_
+
+#include <stdarg.h>
+#include <stddef.h>
+#include <stdio.h>
+#include <string.h>
+#include <wchar.h>
+
+#include "base/logging.h"
+
+namespace base {
+
+// Chromium code style is to not use malloc'd strings; this is only for use
+// for interaction with APIs that require it.
+inline char* strdup(const char* str) {
+  return ::strdup(str);
+}
+
+inline int vsnprintf(char* buffer, size_t size,
+                     const char* format, va_list arguments) {
+  return ::vsnprintf(buffer, size, format, arguments);
+}
+
+inline int vswprintf(wchar_t* buffer, size_t size,
+                     const wchar_t* format, va_list arguments) {
+  DCHECK(IsWprintfFormatPortable(format));
+  return ::vswprintf(buffer, size, format, arguments);
+}
+
+}  // namespace base
+
+#endif  // BASE_STRINGS_STRING_UTIL_POSIX_H_
diff --git a/base/strings/string_util_unittest.cc b/base/strings/string_util_unittest.cc
new file mode 100644
index 0000000..509889e
--- /dev/null
+++ b/base/strings/string_util_unittest.cc
@@ -0,0 +1,1379 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/strings/string_util.h"
+
+#include <math.h>
+#include <stdarg.h>
+#include <stddef.h>
+#include <stdint.h>
+
+#include <algorithm>
+
+#include "base/macros.h"
+#include "base/strings/string16.h"
+#include "base/strings/utf_string_conversions.h"
+#include "testing/gmock/include/gmock/gmock.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+using ::testing::ElementsAre;
+
+namespace base {
+
+static const struct trim_case {
+  const wchar_t* input;
+  const TrimPositions positions;
+  const wchar_t* output;
+  const TrimPositions return_value;
+} trim_cases[] = {
+  {L" Google Video ", TRIM_LEADING, L"Google Video ", TRIM_LEADING},
+  {L" Google Video ", TRIM_TRAILING, L" Google Video", TRIM_TRAILING},
+  {L" Google Video ", TRIM_ALL, L"Google Video", TRIM_ALL},
+  {L"Google Video", TRIM_ALL, L"Google Video", TRIM_NONE},
+  {L"", TRIM_ALL, L"", TRIM_NONE},
+  {L"  ", TRIM_LEADING, L"", TRIM_LEADING},
+  {L"  ", TRIM_TRAILING, L"", TRIM_TRAILING},
+  {L"  ", TRIM_ALL, L"", TRIM_ALL},
+  {L"\t\rTest String\n", TRIM_ALL, L"Test String", TRIM_ALL},
+  {L"\x2002Test String\x00A0\x3000", TRIM_ALL, L"Test String", TRIM_ALL},
+};
+
+static const struct trim_case_ascii {
+  const char* input;
+  const TrimPositions positions;
+  const char* output;
+  const TrimPositions return_value;
+} trim_cases_ascii[] = {
+  {" Google Video ", TRIM_LEADING, "Google Video ", TRIM_LEADING},
+  {" Google Video ", TRIM_TRAILING, " Google Video", TRIM_TRAILING},
+  {" Google Video ", TRIM_ALL, "Google Video", TRIM_ALL},
+  {"Google Video", TRIM_ALL, "Google Video", TRIM_NONE},
+  {"", TRIM_ALL, "", TRIM_NONE},
+  {"  ", TRIM_LEADING, "", TRIM_LEADING},
+  {"  ", TRIM_TRAILING, "", TRIM_TRAILING},
+  {"  ", TRIM_ALL, "", TRIM_ALL},
+  {"\t\rTest String\n", TRIM_ALL, "Test String", TRIM_ALL},
+};
+
+namespace {
+
+// Helper used to test TruncateUTF8ToByteSize.
+bool Truncated(const std::string& input,
+               const size_t byte_size,
+               std::string* output) {
+    size_t prev = input.length();
+    TruncateUTF8ToByteSize(input, byte_size, output);
+    return prev != output->length();
+}
+
+}  // namespace
+
+TEST(StringUtilTest, TruncateUTF8ToByteSize) {
+  std::string output;
+
+  // Empty strings and invalid byte_size arguments
+  EXPECT_FALSE(Truncated(std::string(), 0, &output));
+  EXPECT_EQ(output, "");
+  EXPECT_TRUE(Truncated("\xe1\x80\xbf", 0, &output));
+  EXPECT_EQ(output, "");
+  EXPECT_FALSE(Truncated("\xe1\x80\xbf", static_cast<size_t>(-1), &output));
+  EXPECT_FALSE(Truncated("\xe1\x80\xbf", 4, &output));
+
+  // Testing the truncation of valid UTF8 correctly
+  EXPECT_TRUE(Truncated("abc", 2, &output));
+  EXPECT_EQ(output, "ab");
+  EXPECT_TRUE(Truncated("\xc2\x81\xc2\x81", 2, &output));
+  EXPECT_EQ(output.compare("\xc2\x81"), 0);
+  EXPECT_TRUE(Truncated("\xc2\x81\xc2\x81", 3, &output));
+  EXPECT_EQ(output.compare("\xc2\x81"), 0);
+  EXPECT_FALSE(Truncated("\xc2\x81\xc2\x81", 4, &output));
+  EXPECT_EQ(output.compare("\xc2\x81\xc2\x81"), 0);
+
+  {
+    const char array[] = "\x00\x00\xc2\x81\xc2\x81";
+    const std::string array_string(array, arraysize(array));
+    EXPECT_TRUE(Truncated(array_string, 4, &output));
+    EXPECT_EQ(output.compare(std::string("\x00\x00\xc2\x81", 4)), 0);
+  }
+
+  {
+    const char array[] = "\x00\xc2\x81\xc2\x81";
+    const std::string array_string(array, arraysize(array));
+    EXPECT_TRUE(Truncated(array_string, 4, &output));
+    EXPECT_EQ(output.compare(std::string("\x00\xc2\x81", 3)), 0);
+  }
+
+  // Testing invalid UTF8
+  EXPECT_TRUE(Truncated("\xed\xa0\x80\xed\xbf\xbf", 6, &output));
+  EXPECT_EQ(output.compare(""), 0);
+  EXPECT_TRUE(Truncated("\xed\xa0\x8f", 3, &output));
+  EXPECT_EQ(output.compare(""), 0);
+  EXPECT_TRUE(Truncated("\xed\xbf\xbf", 3, &output));
+  EXPECT_EQ(output.compare(""), 0);
+
+  // Testing invalid UTF8 mixed with valid UTF8
+  EXPECT_FALSE(Truncated("\xe1\x80\xbf", 3, &output));
+  EXPECT_EQ(output.compare("\xe1\x80\xbf"), 0);
+  EXPECT_FALSE(Truncated("\xf1\x80\xa0\xbf", 4, &output));
+  EXPECT_EQ(output.compare("\xf1\x80\xa0\xbf"), 0);
+  EXPECT_FALSE(Truncated("a\xc2\x81\xe1\x80\xbf\xf1\x80\xa0\xbf",
+              10, &output));
+  EXPECT_EQ(output.compare("a\xc2\x81\xe1\x80\xbf\xf1\x80\xa0\xbf"), 0);
+  EXPECT_TRUE(Truncated("a\xc2\x81\xe1\x80\xbf\xf1""a""\x80\xa0",
+              10, &output));
+  EXPECT_EQ(output.compare("a\xc2\x81\xe1\x80\xbf\xf1""a"), 0);
+  EXPECT_FALSE(Truncated("\xef\xbb\xbf" "abc", 6, &output));
+  EXPECT_EQ(output.compare("\xef\xbb\xbf" "abc"), 0);
+
+  // Overlong sequences
+  EXPECT_TRUE(Truncated("\xc0\x80", 2, &output));
+  EXPECT_EQ(output.compare(""), 0);
+  EXPECT_TRUE(Truncated("\xc1\x80\xc1\x81", 4, &output));
+  EXPECT_EQ(output.compare(""), 0);
+  EXPECT_TRUE(Truncated("\xe0\x80\x80", 3, &output));
+  EXPECT_EQ(output.compare(""), 0);
+  EXPECT_TRUE(Truncated("\xe0\x82\x80", 3, &output));
+  EXPECT_EQ(output.compare(""), 0);
+  EXPECT_TRUE(Truncated("\xe0\x9f\xbf", 3, &output));
+  EXPECT_EQ(output.compare(""), 0);
+  EXPECT_TRUE(Truncated("\xf0\x80\x80\x8D", 4, &output));
+  EXPECT_EQ(output.compare(""), 0);
+  EXPECT_TRUE(Truncated("\xf0\x80\x82\x91", 4, &output));
+  EXPECT_EQ(output.compare(""), 0);
+  EXPECT_TRUE(Truncated("\xf0\x80\xa0\x80", 4, &output));
+  EXPECT_EQ(output.compare(""), 0);
+  EXPECT_TRUE(Truncated("\xf0\x8f\xbb\xbf", 4, &output));
+  EXPECT_EQ(output.compare(""), 0);
+  EXPECT_TRUE(Truncated("\xf8\x80\x80\x80\xbf", 5, &output));
+  EXPECT_EQ(output.compare(""), 0);
+  EXPECT_TRUE(Truncated("\xfc\x80\x80\x80\xa0\xa5", 6, &output));
+  EXPECT_EQ(output.compare(""), 0);
+
+  // Beyond U+10FFFF (the upper limit of Unicode codespace)
+  EXPECT_TRUE(Truncated("\xf4\x90\x80\x80", 4, &output));
+  EXPECT_EQ(output.compare(""), 0);
+  EXPECT_TRUE(Truncated("\xf8\xa0\xbf\x80\xbf", 5, &output));
+  EXPECT_EQ(output.compare(""), 0);
+  EXPECT_TRUE(Truncated("\xfc\x9c\xbf\x80\xbf\x80", 6, &output));
+  EXPECT_EQ(output.compare(""), 0);
+
+  // BOMs in UTF-16(BE|LE) and UTF-32(BE|LE)
+  EXPECT_TRUE(Truncated("\xfe\xff", 2, &output));
+  EXPECT_EQ(output.compare(""), 0);
+  EXPECT_TRUE(Truncated("\xff\xfe", 2, &output));
+  EXPECT_EQ(output.compare(""), 0);
+
+  {
+    const char array[] = "\x00\x00\xfe\xff";
+    const std::string array_string(array, arraysize(array));
+    EXPECT_TRUE(Truncated(array_string, 4, &output));
+    EXPECT_EQ(output.compare(std::string("\x00\x00", 2)), 0);
+  }
+
+  // Variants on the previous test
+  {
+    const char array[] = "\xff\xfe\x00\x00";
+    const std::string array_string(array, 4);
+    EXPECT_FALSE(Truncated(array_string, 4, &output));
+    EXPECT_EQ(output.compare(std::string("\xff\xfe\x00\x00", 4)), 0);
+  }
+  {
+    const char array[] = "\xff\x00\x00\xfe";
+    const std::string array_string(array, arraysize(array));
+    EXPECT_TRUE(Truncated(array_string, 4, &output));
+    EXPECT_EQ(output.compare(std::string("\xff\x00\x00", 3)), 0);
+  }
+
+  // Non-characters : U+xxFFF[EF] where xx is 0x00 through 0x10 and <FDD0,FDEF>
+  EXPECT_TRUE(Truncated("\xef\xbf\xbe", 3, &output));
+  EXPECT_EQ(output.compare(""), 0);
+  EXPECT_TRUE(Truncated("\xf0\x8f\xbf\xbe", 4, &output));
+  EXPECT_EQ(output.compare(""), 0);
+  EXPECT_TRUE(Truncated("\xf3\xbf\xbf\xbf", 4, &output));
+  EXPECT_EQ(output.compare(""), 0);
+  EXPECT_TRUE(Truncated("\xef\xb7\x90", 3, &output));
+  EXPECT_EQ(output.compare(""), 0);
+  EXPECT_TRUE(Truncated("\xef\xb7\xaf", 3, &output));
+  EXPECT_EQ(output.compare(""), 0);
+
+  // Strings in legacy encodings that are valid in UTF-8, but
+  // are invalid as UTF-8 in real data.
+  EXPECT_TRUE(Truncated("caf\xe9", 4, &output));
+  EXPECT_EQ(output.compare("caf"), 0);
+  EXPECT_TRUE(Truncated("\xb0\xa1\xb0\xa2", 4, &output));
+  EXPECT_EQ(output.compare(""), 0);
+  EXPECT_FALSE(Truncated("\xa7\x41\xa6\x6e", 4, &output));
+  EXPECT_EQ(output.compare("\xa7\x41\xa6\x6e"), 0);
+  EXPECT_TRUE(Truncated("\xa7\x41\xa6\x6e\xd9\xee\xe4\xee", 7,
+              &output));
+  EXPECT_EQ(output.compare("\xa7\x41\xa6\x6e"), 0);
+
+  // Testing using the same string as input and output.
+  EXPECT_FALSE(Truncated(output, 4, &output));
+  EXPECT_EQ(output.compare("\xa7\x41\xa6\x6e"), 0);
+  EXPECT_TRUE(Truncated(output, 3, &output));
+  EXPECT_EQ(output.compare("\xa7\x41"), 0);
+
+  // "abc" with U+201[CD] in windows-125[0-8]
+  EXPECT_TRUE(Truncated("\x93" "abc\x94", 5, &output));
+  EXPECT_EQ(output.compare("\x93" "abc"), 0);
+
+  // U+0639 U+064E U+0644 U+064E in ISO-8859-6
+  EXPECT_TRUE(Truncated("\xd9\xee\xe4\xee", 4, &output));
+  EXPECT_EQ(output.compare(""), 0);
+
+  // U+03B3 U+03B5 U+03B9 U+03AC in ISO-8859-7
+  EXPECT_TRUE(Truncated("\xe3\xe5\xe9\xdC", 4, &output));
+  EXPECT_EQ(output.compare(""), 0);
+}
+
+TEST(StringUtilTest, TrimWhitespace) {
+  string16 output;  // Allow contents to carry over to next testcase
+  for (size_t i = 0; i < arraysize(trim_cases); ++i) {
+    const trim_case& value = trim_cases[i];
+    EXPECT_EQ(value.return_value,
+              TrimWhitespace(WideToUTF16(value.input), value.positions,
+                             &output));
+    EXPECT_EQ(WideToUTF16(value.output), output);
+  }
+
+  // Test that TrimWhitespace() can take the same string for input and output
+  output = ASCIIToUTF16("  This is a test \r\n");
+  EXPECT_EQ(TRIM_ALL, TrimWhitespace(output, TRIM_ALL, &output));
+  EXPECT_EQ(ASCIIToUTF16("This is a test"), output);
+
+  // Once more, but with a string of whitespace
+  output = ASCIIToUTF16("  \r\n");
+  EXPECT_EQ(TRIM_ALL, TrimWhitespace(output, TRIM_ALL, &output));
+  EXPECT_EQ(string16(), output);
+
+  std::string output_ascii;
+  for (size_t i = 0; i < arraysize(trim_cases_ascii); ++i) {
+    const trim_case_ascii& value = trim_cases_ascii[i];
+    EXPECT_EQ(value.return_value,
+              TrimWhitespaceASCII(value.input, value.positions, &output_ascii));
+    EXPECT_EQ(value.output, output_ascii);
+  }
+}
+
+static const struct collapse_case {
+  const wchar_t* input;
+  const bool trim;
+  const wchar_t* output;
+} collapse_cases[] = {
+  {L" Google Video ", false, L"Google Video"},
+  {L"Google Video", false, L"Google Video"},
+  {L"", false, L""},
+  {L"  ", false, L""},
+  {L"\t\rTest String\n", false, L"Test String"},
+  {L"\x2002Test String\x00A0\x3000", false, L"Test String"},
+  {L"    Test     \n  \t String    ", false, L"Test String"},
+  {L"\x2002Test\x1680 \x2028 \tString\x00A0\x3000", false, L"Test String"},
+  {L"   Test String", false, L"Test String"},
+  {L"Test String    ", false, L"Test String"},
+  {L"Test String", false, L"Test String"},
+  {L"", true, L""},
+  {L"\n", true, L""},
+  {L"  \r  ", true, L""},
+  {L"\nFoo", true, L"Foo"},
+  {L"\r  Foo  ", true, L"Foo"},
+  {L" Foo bar ", true, L"Foo bar"},
+  {L"  \tFoo  bar  \n", true, L"Foo bar"},
+  {L" a \r b\n c \r\n d \t\re \t f \n ", true, L"abcde f"},
+};
+
+TEST(StringUtilTest, CollapseWhitespace) {
+  for (size_t i = 0; i < arraysize(collapse_cases); ++i) {
+    const collapse_case& value = collapse_cases[i];
+    EXPECT_EQ(WideToUTF16(value.output),
+              CollapseWhitespace(WideToUTF16(value.input), value.trim));
+  }
+}
+
+static const struct collapse_case_ascii {
+  const char* input;
+  const bool trim;
+  const char* output;
+} collapse_cases_ascii[] = {
+  {" Google Video ", false, "Google Video"},
+  {"Google Video", false, "Google Video"},
+  {"", false, ""},
+  {"  ", false, ""},
+  {"\t\rTest String\n", false, "Test String"},
+  {"    Test     \n  \t String    ", false, "Test String"},
+  {"   Test String", false, "Test String"},
+  {"Test String    ", false, "Test String"},
+  {"Test String", false, "Test String"},
+  {"", true, ""},
+  {"\n", true, ""},
+  {"  \r  ", true, ""},
+  {"\nFoo", true, "Foo"},
+  {"\r  Foo  ", true, "Foo"},
+  {" Foo bar ", true, "Foo bar"},
+  {"  \tFoo  bar  \n", true, "Foo bar"},
+  {" a \r b\n c \r\n d \t\re \t f \n ", true, "abcde f"},
+};
+
+TEST(StringUtilTest, CollapseWhitespaceASCII) {
+  for (size_t i = 0; i < arraysize(collapse_cases_ascii); ++i) {
+    const collapse_case_ascii& value = collapse_cases_ascii[i];
+    EXPECT_EQ(value.output, CollapseWhitespaceASCII(value.input, value.trim));
+  }
+}
+
+TEST(StringUtilTest, IsStringUTF8) {
+  EXPECT_TRUE(IsStringUTF8("abc"));
+  EXPECT_TRUE(IsStringUTF8("\xc2\x81"));
+  EXPECT_TRUE(IsStringUTF8("\xe1\x80\xbf"));
+  EXPECT_TRUE(IsStringUTF8("\xf1\x80\xa0\xbf"));
+  EXPECT_TRUE(IsStringUTF8("a\xc2\x81\xe1\x80\xbf\xf1\x80\xa0\xbf"));
+  EXPECT_TRUE(IsStringUTF8("\xef\xbb\xbf" "abc"));  // UTF-8 BOM
+
+  // surrogate code points
+  EXPECT_FALSE(IsStringUTF8("\xed\xa0\x80\xed\xbf\xbf"));
+  EXPECT_FALSE(IsStringUTF8("\xed\xa0\x8f"));
+  EXPECT_FALSE(IsStringUTF8("\xed\xbf\xbf"));
+
+  // overlong sequences
+  EXPECT_FALSE(IsStringUTF8("\xc0\x80"));  // U+0000
+  EXPECT_FALSE(IsStringUTF8("\xc1\x80\xc1\x81"));  // "AB"
+  EXPECT_FALSE(IsStringUTF8("\xe0\x80\x80"));  // U+0000
+  EXPECT_FALSE(IsStringUTF8("\xe0\x82\x80"));  // U+0080
+  EXPECT_FALSE(IsStringUTF8("\xe0\x9f\xbf"));  // U+07ff
+  EXPECT_FALSE(IsStringUTF8("\xf0\x80\x80\x8D"));  // U+000D
+  EXPECT_FALSE(IsStringUTF8("\xf0\x80\x82\x91"));  // U+0091
+  EXPECT_FALSE(IsStringUTF8("\xf0\x80\xa0\x80"));  // U+0800
+  EXPECT_FALSE(IsStringUTF8("\xf0\x8f\xbb\xbf"));  // U+FEFF (BOM)
+  EXPECT_FALSE(IsStringUTF8("\xf8\x80\x80\x80\xbf"));  // U+003F
+  EXPECT_FALSE(IsStringUTF8("\xfc\x80\x80\x80\xa0\xa5"));  // U+00A5
+
+  // Beyond U+10FFFF (the upper limit of Unicode codespace)
+  EXPECT_FALSE(IsStringUTF8("\xf4\x90\x80\x80"));  // U+110000
+  EXPECT_FALSE(IsStringUTF8("\xf8\xa0\xbf\x80\xbf"));  // 5 bytes
+  EXPECT_FALSE(IsStringUTF8("\xfc\x9c\xbf\x80\xbf\x80"));  // 6 bytes
+
+  // BOMs in UTF-16(BE|LE) and UTF-32(BE|LE)
+  EXPECT_FALSE(IsStringUTF8("\xfe\xff"));
+  EXPECT_FALSE(IsStringUTF8("\xff\xfe"));
+  EXPECT_FALSE(IsStringUTF8(std::string("\x00\x00\xfe\xff", 4)));
+  EXPECT_FALSE(IsStringUTF8("\xff\xfe\x00\x00"));
+
+  // Non-characters : U+xxFFF[EF] where xx is 0x00 through 0x10 and <FDD0,FDEF>
+  EXPECT_FALSE(IsStringUTF8("\xef\xbf\xbe"));  // U+FFFE)
+  EXPECT_FALSE(IsStringUTF8("\xf0\x8f\xbf\xbe"));  // U+1FFFE
+  EXPECT_FALSE(IsStringUTF8("\xf3\xbf\xbf\xbf"));  // U+10FFFF
+  EXPECT_FALSE(IsStringUTF8("\xef\xb7\x90"));  // U+FDD0
+  EXPECT_FALSE(IsStringUTF8("\xef\xb7\xaf"));  // U+FDEF
+  // Strings in legacy encodings. We can certainly make up strings
+  // in a legacy encoding that are valid in UTF-8, but in real data,
+  // most of them are invalid as UTF-8.
+  EXPECT_FALSE(IsStringUTF8("caf\xe9"));  // cafe with U+00E9 in ISO-8859-1
+  EXPECT_FALSE(IsStringUTF8("\xb0\xa1\xb0\xa2"));  // U+AC00, U+AC001 in EUC-KR
+  EXPECT_FALSE(IsStringUTF8("\xa7\x41\xa6\x6e"));  // U+4F60 U+597D in Big5
+  // "abc" with U+201[CD] in windows-125[0-8]
+  EXPECT_FALSE(IsStringUTF8("\x93" "abc\x94"));
+  // U+0639 U+064E U+0644 U+064E in ISO-8859-6
+  EXPECT_FALSE(IsStringUTF8("\xd9\xee\xe4\xee"));
+  // U+03B3 U+03B5 U+03B9 U+03AC in ISO-8859-7
+  EXPECT_FALSE(IsStringUTF8("\xe3\xe5\xe9\xdC"));
+
+  // Check that we support Embedded Nulls. The first uses the canonical UTF-8
+  // representation, and the second uses a 2-byte sequence. The second version
+  // is invalid UTF-8 since UTF-8 states that the shortest encoding for a
+  // given codepoint must be used.
+  static const char kEmbeddedNull[] = "embedded\0null";
+  EXPECT_TRUE(IsStringUTF8(
+      std::string(kEmbeddedNull, sizeof(kEmbeddedNull))));
+  EXPECT_FALSE(IsStringUTF8("embedded\xc0\x80U+0000"));
+}
+
+TEST(StringUtilTest, IsStringASCII) {
+  static char char_ascii[] =
+      "0123456789ABCDEF0123456789ABCDEF0123456789ABCDEF";
+  static char16 char16_ascii[] = {
+      '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '0', 'A',
+      'B', 'C', 'D', 'E', 'F', '0', '1', '2', '3', '4', '5', '6',
+      '7', '8', '9', '0', 'A', 'B', 'C', 'D', 'E', 'F', 0 };
+  static std::wstring wchar_ascii(
+      L"0123456789ABCDEF0123456789ABCDEF0123456789ABCDEF");
+
+  // Test a variety of the fragment start positions and lengths in order to make
+  // sure that bit masking in IsStringASCII works correctly.
+  // Also, test that a non-ASCII character will be detected regardless of its
+  // position inside the string.
+  {
+    const size_t string_length = arraysize(char_ascii) - 1;
+    for (size_t offset = 0; offset < 8; ++offset) {
+      for (size_t len = 0, max_len = string_length - offset; len < max_len;
+           ++len) {
+        EXPECT_TRUE(IsStringASCII(StringPiece(char_ascii + offset, len)));
+        for (size_t char_pos = offset; char_pos < len; ++char_pos) {
+          char_ascii[char_pos] |= '\x80';
+          EXPECT_FALSE(IsStringASCII(StringPiece(char_ascii + offset, len)));
+          char_ascii[char_pos] &= ~'\x80';
+        }
+      }
+    }
+  }
+
+  {
+    const size_t string_length = arraysize(char16_ascii) - 1;
+    for (size_t offset = 0; offset < 4; ++offset) {
+      for (size_t len = 0, max_len = string_length - offset; len < max_len;
+           ++len) {
+        EXPECT_TRUE(IsStringASCII(StringPiece16(char16_ascii + offset, len)));
+        for (size_t char_pos = offset; char_pos < len; ++char_pos) {
+          char16_ascii[char_pos] |= 0x80;
+          EXPECT_FALSE(
+              IsStringASCII(StringPiece16(char16_ascii + offset, len)));
+          char16_ascii[char_pos] &= ~0x80;
+          // Also test when the upper half is non-zero.
+          char16_ascii[char_pos] |= 0x100;
+          EXPECT_FALSE(
+              IsStringASCII(StringPiece16(char16_ascii + offset, len)));
+          char16_ascii[char_pos] &= ~0x100;
+        }
+      }
+    }
+  }
+
+  {
+    const size_t string_length = wchar_ascii.length();
+    for (size_t len = 0; len < string_length; ++len) {
+      EXPECT_TRUE(IsStringASCII(wchar_ascii.substr(0, len)));
+      for (size_t char_pos = 0; char_pos < len; ++char_pos) {
+        wchar_ascii[char_pos] |= 0x80;
+        EXPECT_FALSE(
+            IsStringASCII(wchar_ascii.substr(0, len)));
+        wchar_ascii[char_pos] &= ~0x80;
+        wchar_ascii[char_pos] |= 0x100;
+        EXPECT_FALSE(
+            IsStringASCII(wchar_ascii.substr(0, len)));
+        wchar_ascii[char_pos] &= ~0x100;
+#if defined(WCHAR_T_IS_UTF32)
+        wchar_ascii[char_pos] |= 0x10000;
+        EXPECT_FALSE(
+            IsStringASCII(wchar_ascii.substr(0, len)));
+        wchar_ascii[char_pos] &= ~0x10000;
+#endif  // WCHAR_T_IS_UTF32
+      }
+    }
+  }
+}
+
+TEST(StringUtilTest, ConvertASCII) {
+  static const char* const char_cases[] = {
+    "Google Video",
+    "Hello, world\n",
+    "0123ABCDwxyz \a\b\t\r\n!+,.~"
+  };
+
+  static const wchar_t* const wchar_cases[] = {
+    L"Google Video",
+    L"Hello, world\n",
+    L"0123ABCDwxyz \a\b\t\r\n!+,.~"
+  };
+
+  for (size_t i = 0; i < arraysize(char_cases); ++i) {
+    EXPECT_TRUE(IsStringASCII(char_cases[i]));
+    string16 utf16 = ASCIIToUTF16(char_cases[i]);
+    EXPECT_EQ(WideToUTF16(wchar_cases[i]), utf16);
+
+    std::string ascii = UTF16ToASCII(WideToUTF16(wchar_cases[i]));
+    EXPECT_EQ(char_cases[i], ascii);
+  }
+
+  EXPECT_FALSE(IsStringASCII("Google \x80Video"));
+
+  // Convert empty strings.
+  string16 empty16;
+  std::string empty;
+  EXPECT_EQ(empty, UTF16ToASCII(empty16));
+  EXPECT_EQ(empty16, ASCIIToUTF16(empty));
+
+  // Convert strings with an embedded NUL character.
+  const char chars_with_nul[] = "test\0string";
+  const int length_with_nul = arraysize(chars_with_nul) - 1;
+  std::string string_with_nul(chars_with_nul, length_with_nul);
+  string16 string16_with_nul = ASCIIToUTF16(string_with_nul);
+  EXPECT_EQ(static_cast<string16::size_type>(length_with_nul),
+            string16_with_nul.length());
+  std::string narrow_with_nul = UTF16ToASCII(string16_with_nul);
+  EXPECT_EQ(static_cast<std::string::size_type>(length_with_nul),
+            narrow_with_nul.length());
+  EXPECT_EQ(0, string_with_nul.compare(narrow_with_nul));
+}
+
+TEST(StringUtilTest, ToLowerASCII) {
+  EXPECT_EQ('c', ToLowerASCII('C'));
+  EXPECT_EQ('c', ToLowerASCII('c'));
+  EXPECT_EQ('2', ToLowerASCII('2'));
+
+  EXPECT_EQ(static_cast<char16>('c'), ToLowerASCII(static_cast<char16>('C')));
+  EXPECT_EQ(static_cast<char16>('c'), ToLowerASCII(static_cast<char16>('c')));
+  EXPECT_EQ(static_cast<char16>('2'), ToLowerASCII(static_cast<char16>('2')));
+
+  EXPECT_EQ("cc2", ToLowerASCII("Cc2"));
+  EXPECT_EQ(ASCIIToUTF16("cc2"), ToLowerASCII(ASCIIToUTF16("Cc2")));
+}
+
+TEST(StringUtilTest, ToUpperASCII) {
+  EXPECT_EQ('C', ToUpperASCII('C'));
+  EXPECT_EQ('C', ToUpperASCII('c'));
+  EXPECT_EQ('2', ToUpperASCII('2'));
+
+  EXPECT_EQ(static_cast<char16>('C'), ToUpperASCII(static_cast<char16>('C')));
+  EXPECT_EQ(static_cast<char16>('C'), ToUpperASCII(static_cast<char16>('c')));
+  EXPECT_EQ(static_cast<char16>('2'), ToUpperASCII(static_cast<char16>('2')));
+
+  EXPECT_EQ("CC2", ToUpperASCII("Cc2"));
+  EXPECT_EQ(ASCIIToUTF16("CC2"), ToUpperASCII(ASCIIToUTF16("Cc2")));
+}
+
+TEST(StringUtilTest, LowerCaseEqualsASCII) {
+  static const struct {
+    const char*    src_a;
+    const char*    dst;
+  } lowercase_cases[] = {
+    { "FoO", "foo" },
+    { "foo", "foo" },
+    { "FOO", "foo" },
+  };
+
+  for (size_t i = 0; i < arraysize(lowercase_cases); ++i) {
+    EXPECT_TRUE(LowerCaseEqualsASCII(ASCIIToUTF16(lowercase_cases[i].src_a),
+                                     lowercase_cases[i].dst));
+    EXPECT_TRUE(LowerCaseEqualsASCII(lowercase_cases[i].src_a,
+                                     lowercase_cases[i].dst));
+  }
+}
+
+TEST(StringUtilTest, FormatBytesUnlocalized) {
+  static const struct {
+    int64_t bytes;
+    const char* expected;
+  } cases[] = {
+    // Expected behavior: we show one post-decimal digit when we have
+    // under two pre-decimal digits, except in cases where it makes no
+    // sense (zero or bytes).
+    // Since we switch units once we cross the 1000 mark, this keeps
+    // the display of file sizes or bytes consistently around three
+    // digits.
+    {0, "0 B"},
+    {512, "512 B"},
+    {1024*1024, "1.0 MB"},
+    {1024*1024*1024, "1.0 GB"},
+    {10LL*1024*1024*1024, "10.0 GB"},
+    {99LL*1024*1024*1024, "99.0 GB"},
+    {105LL*1024*1024*1024, "105 GB"},
+    {105LL*1024*1024*1024 + 500LL*1024*1024, "105 GB"},
+    {~(1LL << 63), "8192 PB"},
+
+    {99*1024 + 103, "99.1 kB"},
+    {1024*1024 + 103, "1.0 MB"},
+    {1024*1024 + 205 * 1024, "1.2 MB"},
+    {1024*1024*1024 + (927 * 1024*1024), "1.9 GB"},
+    {10LL*1024*1024*1024, "10.0 GB"},
+    {100LL*1024*1024*1024, "100 GB"},
+  };
+
+  for (size_t i = 0; i < arraysize(cases); ++i) {
+    EXPECT_EQ(ASCIIToUTF16(cases[i].expected),
+              FormatBytesUnlocalized(cases[i].bytes));
+  }
+}
+TEST(StringUtilTest, ReplaceSubstringsAfterOffset) {
+  static const struct {
+    StringPiece str;
+    size_t start_offset;
+    StringPiece find_this;
+    StringPiece replace_with;
+    StringPiece expected;
+  } cases[] = {
+      {"aaa", 0, "", "b", "aaa"},
+      {"aaa", 1, "", "b", "aaa"},
+      {"aaa", 0, "a", "b", "bbb"},
+      {"aaa", 0, "aa", "b", "ba"},
+      {"aaa", 0, "aa", "bbb", "bbba"},
+      {"aaaaa", 0, "aa", "b", "bba"},
+      {"ababaaababa", 0, "aba", "", "baaba"},
+      {"ababaaababa", 0, "aba", "_", "_baa_ba"},
+      {"ababaaababa", 0, "aba", "__", "__baa__ba"},
+      {"ababaaababa", 0, "aba", "___", "___baa___ba"},
+      {"ababaaababa", 0, "aba", "____", "____baa____ba"},
+      {"ababaaababa", 0, "aba", "_____", "_____baa_____ba"},
+      {"abb", 0, "ab", "a", "ab"},
+      {"Removing some substrings inging", 0, "ing", "", "Remov some substrs "},
+      {"Not found", 0, "x", "0", "Not found"},
+      {"Not found again", 5, "x", "0", "Not found again"},
+      {" Making it much longer ", 0, " ", "Four score and seven years ago",
+       "Four score and seven years agoMakingFour score and seven years agoit"
+       "Four score and seven years agomuchFour score and seven years agolonger"
+       "Four score and seven years ago"},
+      {" Making it much much much much shorter ", 0,
+       "Making it much much much much shorter", "", "  "},
+      {"so much much much much much very much much much shorter", 0, "much ",
+       "", "so very shorter"},
+      {"Invalid offset", 9999, "t", "foobar", "Invalid offset"},
+      {"Replace me only me once", 9, "me ", "", "Replace me only once"},
+      {"abababab", 2, "ab", "c", "abccc"},
+      {"abababab", 1, "ab", "c", "abccc"},
+      {"abababab", 1, "aba", "c", "abcbab"},
+  };
+
+  // base::string16 variant
+  for (const auto& scenario : cases) {
+    string16 str = ASCIIToUTF16(scenario.str);
+    ReplaceSubstringsAfterOffset(&str, scenario.start_offset,
+                                 ASCIIToUTF16(scenario.find_this),
+                                 ASCIIToUTF16(scenario.replace_with));
+    EXPECT_EQ(ASCIIToUTF16(scenario.expected), str);
+  }
+
+  // std::string with insufficient capacity: expansion must realloc the buffer.
+  for (const auto& scenario : cases) {
+    std::string str = scenario.str.as_string();
+    str.shrink_to_fit();  // This is nonbinding, but it's the best we've got.
+    ReplaceSubstringsAfterOffset(&str, scenario.start_offset,
+                                 scenario.find_this, scenario.replace_with);
+    EXPECT_EQ(scenario.expected, str);
+  }
+
+  // std::string with ample capacity: should be possible to grow in-place.
+  for (const auto& scenario : cases) {
+    std::string str = scenario.str.as_string();
+    str.reserve(std::max(scenario.str.length(), scenario.expected.length()) *
+                2);
+
+    ReplaceSubstringsAfterOffset(&str, scenario.start_offset,
+                                 scenario.find_this, scenario.replace_with);
+    EXPECT_EQ(scenario.expected, str);
+  }
+}
+
+TEST(StringUtilTest, ReplaceFirstSubstringAfterOffset) {
+  static const struct {
+    const char* str;
+    string16::size_type start_offset;
+    const char* find_this;
+    const char* replace_with;
+    const char* expected;
+  } cases[] = {
+    {"aaa", 0, "a", "b", "baa"},
+    {"abb", 0, "ab", "a", "ab"},
+    {"Removing some substrings inging", 0, "ing", "",
+      "Remov some substrings inging"},
+    {"Not found", 0, "x", "0", "Not found"},
+    {"Not found again", 5, "x", "0", "Not found again"},
+    {" Making it much longer ", 0, " ", "Four score and seven years ago",
+     "Four score and seven years agoMaking it much longer "},
+    {"Invalid offset", 9999, "t", "foobar", "Invalid offset"},
+    {"Replace me only me once", 4, "me ", "", "Replace only me once"},
+    {"abababab", 2, "ab", "c", "abcabab"},
+  };
+
+  for (size_t i = 0; i < arraysize(cases); i++) {
+    string16 str = ASCIIToUTF16(cases[i].str);
+    ReplaceFirstSubstringAfterOffset(&str, cases[i].start_offset,
+                                     ASCIIToUTF16(cases[i].find_this),
+                                     ASCIIToUTF16(cases[i].replace_with));
+    EXPECT_EQ(ASCIIToUTF16(cases[i].expected), str);
+  }
+}
+
+TEST(StringUtilTest, HexDigitToInt) {
+  EXPECT_EQ(0, HexDigitToInt('0'));
+  EXPECT_EQ(1, HexDigitToInt('1'));
+  EXPECT_EQ(2, HexDigitToInt('2'));
+  EXPECT_EQ(3, HexDigitToInt('3'));
+  EXPECT_EQ(4, HexDigitToInt('4'));
+  EXPECT_EQ(5, HexDigitToInt('5'));
+  EXPECT_EQ(6, HexDigitToInt('6'));
+  EXPECT_EQ(7, HexDigitToInt('7'));
+  EXPECT_EQ(8, HexDigitToInt('8'));
+  EXPECT_EQ(9, HexDigitToInt('9'));
+  EXPECT_EQ(10, HexDigitToInt('A'));
+  EXPECT_EQ(11, HexDigitToInt('B'));
+  EXPECT_EQ(12, HexDigitToInt('C'));
+  EXPECT_EQ(13, HexDigitToInt('D'));
+  EXPECT_EQ(14, HexDigitToInt('E'));
+  EXPECT_EQ(15, HexDigitToInt('F'));
+
+  // Verify the lower case as well.
+  EXPECT_EQ(10, HexDigitToInt('a'));
+  EXPECT_EQ(11, HexDigitToInt('b'));
+  EXPECT_EQ(12, HexDigitToInt('c'));
+  EXPECT_EQ(13, HexDigitToInt('d'));
+  EXPECT_EQ(14, HexDigitToInt('e'));
+  EXPECT_EQ(15, HexDigitToInt('f'));
+}
+
+TEST(StringUtilTest, JoinString) {
+  std::string separator(", ");
+  std::vector<std::string> parts;
+  EXPECT_EQ(std::string(), JoinString(parts, separator));
+
+  parts.push_back(std::string());
+  EXPECT_EQ(std::string(), JoinString(parts, separator));
+  parts.clear();
+
+  parts.push_back("a");
+  EXPECT_EQ("a", JoinString(parts, separator));
+
+  parts.push_back("b");
+  parts.push_back("c");
+  EXPECT_EQ("a, b, c", JoinString(parts, separator));
+
+  parts.push_back(std::string());
+  EXPECT_EQ("a, b, c, ", JoinString(parts, separator));
+  parts.push_back(" ");
+  EXPECT_EQ("a|b|c|| ", JoinString(parts, "|"));
+}
+
+TEST(StringUtilTest, JoinString16) {
+  string16 separator = ASCIIToUTF16(", ");
+  std::vector<string16> parts;
+  EXPECT_EQ(string16(), JoinString(parts, separator));
+
+  parts.push_back(string16());
+  EXPECT_EQ(string16(), JoinString(parts, separator));
+  parts.clear();
+
+  parts.push_back(ASCIIToUTF16("a"));
+  EXPECT_EQ(ASCIIToUTF16("a"), JoinString(parts, separator));
+
+  parts.push_back(ASCIIToUTF16("b"));
+  parts.push_back(ASCIIToUTF16("c"));
+  EXPECT_EQ(ASCIIToUTF16("a, b, c"), JoinString(parts, separator));
+
+  parts.push_back(ASCIIToUTF16(""));
+  EXPECT_EQ(ASCIIToUTF16("a, b, c, "), JoinString(parts, separator));
+  parts.push_back(ASCIIToUTF16(" "));
+  EXPECT_EQ(ASCIIToUTF16("a|b|c|| "), JoinString(parts, ASCIIToUTF16("|")));
+}
+
+TEST(StringUtilTest, JoinStringPiece) {
+  std::string separator(", ");
+  std::vector<StringPiece> parts;
+  EXPECT_EQ(std::string(), JoinString(parts, separator));
+
+  // Test empty first part (https://crbug.com/698073).
+  parts.push_back(StringPiece());
+  EXPECT_EQ(std::string(), JoinString(parts, separator));
+  parts.clear();
+
+  parts.push_back("a");
+  EXPECT_EQ("a", JoinString(parts, separator));
+
+  parts.push_back("b");
+  parts.push_back("c");
+  EXPECT_EQ("a, b, c", JoinString(parts, separator));
+
+  parts.push_back(StringPiece());
+  EXPECT_EQ("a, b, c, ", JoinString(parts, separator));
+  parts.push_back(" ");
+  EXPECT_EQ("a|b|c|| ", JoinString(parts, "|"));
+}
+
+TEST(StringUtilTest, JoinStringPiece16) {
+  string16 separator = ASCIIToUTF16(", ");
+  std::vector<StringPiece16> parts;
+  EXPECT_EQ(string16(), JoinString(parts, separator));
+
+  // Test empty first part (https://crbug.com/698073).
+  parts.push_back(StringPiece16());
+  EXPECT_EQ(string16(), JoinString(parts, separator));
+  parts.clear();
+
+  const string16 kA = ASCIIToUTF16("a");
+  parts.push_back(kA);
+  EXPECT_EQ(ASCIIToUTF16("a"), JoinString(parts, separator));
+
+  const string16 kB = ASCIIToUTF16("b");
+  parts.push_back(kB);
+  const string16 kC = ASCIIToUTF16("c");
+  parts.push_back(kC);
+  EXPECT_EQ(ASCIIToUTF16("a, b, c"), JoinString(parts, separator));
+
+  parts.push_back(StringPiece16());
+  EXPECT_EQ(ASCIIToUTF16("a, b, c, "), JoinString(parts, separator));
+  const string16 kSpace = ASCIIToUTF16(" ");
+  parts.push_back(kSpace);
+  EXPECT_EQ(ASCIIToUTF16("a|b|c|| "), JoinString(parts, ASCIIToUTF16("|")));
+}
+
+TEST(StringUtilTest, JoinStringInitializerList) {
+  std::string separator(", ");
+  EXPECT_EQ(std::string(), JoinString({}, separator));
+
+  // Test empty first part (https://crbug.com/698073).
+  EXPECT_EQ(std::string(), JoinString({StringPiece()}, separator));
+
+  // With const char*s.
+  EXPECT_EQ("a", JoinString({"a"}, separator));
+  EXPECT_EQ("a, b, c", JoinString({"a", "b", "c"}, separator));
+  EXPECT_EQ("a, b, c, ", JoinString({"a", "b", "c", StringPiece()}, separator));
+  EXPECT_EQ("a|b|c|| ", JoinString({"a", "b", "c", StringPiece(), " "}, "|"));
+
+  // With std::strings.
+  const std::string kA = "a";
+  const std::string kB = "b";
+  EXPECT_EQ("a, b", JoinString({kA, kB}, separator));
+
+  // With StringPieces.
+  const StringPiece kPieceA = kA;
+  const StringPiece kPieceB = kB;
+  EXPECT_EQ("a, b", JoinString({kPieceA, kPieceB}, separator));
+}
+
+TEST(StringUtilTest, JoinStringInitializerList16) {
+  string16 separator = ASCIIToUTF16(", ");
+  EXPECT_EQ(string16(), JoinString({}, separator));
+
+  // Test empty first part (https://crbug.com/698073).
+  EXPECT_EQ(string16(), JoinString({StringPiece16()}, separator));
+
+  // With string16s.
+  const string16 kA = ASCIIToUTF16("a");
+  EXPECT_EQ(ASCIIToUTF16("a"), JoinString({kA}, separator));
+
+  const string16 kB = ASCIIToUTF16("b");
+  const string16 kC = ASCIIToUTF16("c");
+  EXPECT_EQ(ASCIIToUTF16("a, b, c"), JoinString({kA, kB, kC}, separator));
+
+  EXPECT_EQ(ASCIIToUTF16("a, b, c, "),
+            JoinString({kA, kB, kC, StringPiece16()}, separator));
+  const string16 kSpace = ASCIIToUTF16(" ");
+  EXPECT_EQ(
+      ASCIIToUTF16("a|b|c|| "),
+      JoinString({kA, kB, kC, StringPiece16(), kSpace}, ASCIIToUTF16("|")));
+
+  // With StringPiece16s.
+  const StringPiece16 kPieceA = kA;
+  const StringPiece16 kPieceB = kB;
+  EXPECT_EQ(ASCIIToUTF16("a, b"), JoinString({kPieceA, kPieceB}, separator));
+}
+
+TEST(StringUtilTest, StartsWith) {
+  EXPECT_TRUE(StartsWith("javascript:url", "javascript",
+                         base::CompareCase::SENSITIVE));
+  EXPECT_FALSE(StartsWith("JavaScript:url", "javascript",
+                          base::CompareCase::SENSITIVE));
+  EXPECT_TRUE(StartsWith("javascript:url", "javascript",
+                         base::CompareCase::INSENSITIVE_ASCII));
+  EXPECT_TRUE(StartsWith("JavaScript:url", "javascript",
+                         base::CompareCase::INSENSITIVE_ASCII));
+  EXPECT_FALSE(StartsWith("java", "javascript", base::CompareCase::SENSITIVE));
+  EXPECT_FALSE(StartsWith("java", "javascript",
+                          base::CompareCase::INSENSITIVE_ASCII));
+  EXPECT_FALSE(StartsWith(std::string(), "javascript",
+                          base::CompareCase::INSENSITIVE_ASCII));
+  EXPECT_FALSE(StartsWith(std::string(), "javascript",
+                          base::CompareCase::SENSITIVE));
+  EXPECT_TRUE(StartsWith("java", std::string(),
+                         base::CompareCase::INSENSITIVE_ASCII));
+  EXPECT_TRUE(StartsWith("java", std::string(), base::CompareCase::SENSITIVE));
+
+  EXPECT_TRUE(StartsWith(ASCIIToUTF16("javascript:url"),
+                         ASCIIToUTF16("javascript"),
+                         base::CompareCase::SENSITIVE));
+  EXPECT_FALSE(StartsWith(ASCIIToUTF16("JavaScript:url"),
+                          ASCIIToUTF16("javascript"),
+                          base::CompareCase::SENSITIVE));
+  EXPECT_TRUE(StartsWith(ASCIIToUTF16("javascript:url"),
+                         ASCIIToUTF16("javascript"),
+                         base::CompareCase::INSENSITIVE_ASCII));
+  EXPECT_TRUE(StartsWith(ASCIIToUTF16("JavaScript:url"),
+                         ASCIIToUTF16("javascript"),
+                         base::CompareCase::INSENSITIVE_ASCII));
+  EXPECT_FALSE(StartsWith(ASCIIToUTF16("java"), ASCIIToUTF16("javascript"),
+                          base::CompareCase::SENSITIVE));
+  EXPECT_FALSE(StartsWith(ASCIIToUTF16("java"), ASCIIToUTF16("javascript"),
+                          base::CompareCase::INSENSITIVE_ASCII));
+  EXPECT_FALSE(StartsWith(string16(), ASCIIToUTF16("javascript"),
+                          base::CompareCase::INSENSITIVE_ASCII));
+  EXPECT_FALSE(StartsWith(string16(), ASCIIToUTF16("javascript"),
+                          base::CompareCase::SENSITIVE));
+  EXPECT_TRUE(StartsWith(ASCIIToUTF16("java"), string16(),
+                         base::CompareCase::INSENSITIVE_ASCII));
+  EXPECT_TRUE(StartsWith(ASCIIToUTF16("java"), string16(),
+                         base::CompareCase::SENSITIVE));
+}
+
+TEST(StringUtilTest, EndsWith) {
+  EXPECT_TRUE(EndsWith(ASCIIToUTF16("Foo.plugin"), ASCIIToUTF16(".plugin"),
+                       base::CompareCase::SENSITIVE));
+  EXPECT_FALSE(EndsWith(ASCIIToUTF16("Foo.Plugin"), ASCIIToUTF16(".plugin"),
+                        base::CompareCase::SENSITIVE));
+  EXPECT_TRUE(EndsWith(ASCIIToUTF16("Foo.plugin"), ASCIIToUTF16(".plugin"),
+                       base::CompareCase::INSENSITIVE_ASCII));
+  EXPECT_TRUE(EndsWith(ASCIIToUTF16("Foo.Plugin"), ASCIIToUTF16(".plugin"),
+                       base::CompareCase::INSENSITIVE_ASCII));
+  EXPECT_FALSE(EndsWith(ASCIIToUTF16(".plug"), ASCIIToUTF16(".plugin"),
+                        base::CompareCase::SENSITIVE));
+  EXPECT_FALSE(EndsWith(ASCIIToUTF16(".plug"), ASCIIToUTF16(".plugin"),
+                        base::CompareCase::INSENSITIVE_ASCII));
+  EXPECT_FALSE(EndsWith(ASCIIToUTF16("Foo.plugin Bar"), ASCIIToUTF16(".plugin"),
+                        base::CompareCase::SENSITIVE));
+  EXPECT_FALSE(EndsWith(ASCIIToUTF16("Foo.plugin Bar"), ASCIIToUTF16(".plugin"),
+                        base::CompareCase::INSENSITIVE_ASCII));
+  EXPECT_FALSE(EndsWith(string16(), ASCIIToUTF16(".plugin"),
+                        base::CompareCase::INSENSITIVE_ASCII));
+  EXPECT_FALSE(EndsWith(string16(), ASCIIToUTF16(".plugin"),
+                        base::CompareCase::SENSITIVE));
+  EXPECT_TRUE(EndsWith(ASCIIToUTF16("Foo.plugin"), string16(),
+                       base::CompareCase::INSENSITIVE_ASCII));
+  EXPECT_TRUE(EndsWith(ASCIIToUTF16("Foo.plugin"), string16(),
+                       base::CompareCase::SENSITIVE));
+  EXPECT_TRUE(EndsWith(ASCIIToUTF16(".plugin"), ASCIIToUTF16(".plugin"),
+                       base::CompareCase::INSENSITIVE_ASCII));
+  EXPECT_TRUE(EndsWith(ASCIIToUTF16(".plugin"), ASCIIToUTF16(".plugin"),
+                       base::CompareCase::SENSITIVE));
+  EXPECT_TRUE(
+      EndsWith(string16(), string16(), base::CompareCase::INSENSITIVE_ASCII));
+  EXPECT_TRUE(EndsWith(string16(), string16(), base::CompareCase::SENSITIVE));
+}
+
+TEST(StringUtilTest, GetStringFWithOffsets) {
+  std::vector<string16> subst;
+  subst.push_back(ASCIIToUTF16("1"));
+  subst.push_back(ASCIIToUTF16("2"));
+  std::vector<size_t> offsets;
+
+  ReplaceStringPlaceholders(ASCIIToUTF16("Hello, $1. Your number is $2."),
+                            subst,
+                            &offsets);
+  EXPECT_EQ(2U, offsets.size());
+  EXPECT_EQ(7U, offsets[0]);
+  EXPECT_EQ(25U, offsets[1]);
+  offsets.clear();
+
+  ReplaceStringPlaceholders(ASCIIToUTF16("Hello, $2. Your number is $1."),
+                            subst,
+                            &offsets);
+  EXPECT_EQ(2U, offsets.size());
+  EXPECT_EQ(25U, offsets[0]);
+  EXPECT_EQ(7U, offsets[1]);
+  offsets.clear();
+}
+
+TEST(StringUtilTest, ReplaceStringPlaceholdersTooFew) {
+  // Test whether replacestringplaceholders works as expected when there
+  // are fewer inputs than outputs.
+  std::vector<string16> subst;
+  subst.push_back(ASCIIToUTF16("9a"));
+  subst.push_back(ASCIIToUTF16("8b"));
+  subst.push_back(ASCIIToUTF16("7c"));
+
+  string16 formatted =
+      ReplaceStringPlaceholders(
+          ASCIIToUTF16("$1a,$2b,$3c,$4d,$5e,$6f,$1g,$2h,$3i"), subst, nullptr);
+
+  EXPECT_EQ(ASCIIToUTF16("9aa,8bb,7cc,d,e,f,9ag,8bh,7ci"), formatted);
+}
+
+TEST(StringUtilTest, ReplaceStringPlaceholders) {
+  std::vector<string16> subst;
+  subst.push_back(ASCIIToUTF16("9a"));
+  subst.push_back(ASCIIToUTF16("8b"));
+  subst.push_back(ASCIIToUTF16("7c"));
+  subst.push_back(ASCIIToUTF16("6d"));
+  subst.push_back(ASCIIToUTF16("5e"));
+  subst.push_back(ASCIIToUTF16("4f"));
+  subst.push_back(ASCIIToUTF16("3g"));
+  subst.push_back(ASCIIToUTF16("2h"));
+  subst.push_back(ASCIIToUTF16("1i"));
+
+  string16 formatted =
+      ReplaceStringPlaceholders(
+          ASCIIToUTF16("$1a,$2b,$3c,$4d,$5e,$6f,$7g,$8h,$9i"), subst, nullptr);
+
+  EXPECT_EQ(ASCIIToUTF16("9aa,8bb,7cc,6dd,5ee,4ff,3gg,2hh,1ii"), formatted);
+}
+
+TEST(StringUtilTest, ReplaceStringPlaceholdersNetExpansionWithContraction) {
+  // In this test, some of the substitutions are shorter than the placeholders,
+  // but overall the string gets longer.
+  std::vector<string16> subst;
+  subst.push_back(ASCIIToUTF16("9a____"));
+  subst.push_back(ASCIIToUTF16("B"));
+  subst.push_back(ASCIIToUTF16("7c___"));
+  subst.push_back(ASCIIToUTF16("d"));
+  subst.push_back(ASCIIToUTF16("5e____"));
+  subst.push_back(ASCIIToUTF16("F"));
+  subst.push_back(ASCIIToUTF16("3g___"));
+  subst.push_back(ASCIIToUTF16("h"));
+  subst.push_back(ASCIIToUTF16("1i_____"));
+
+  string16 original = ASCIIToUTF16("$1a,$2b,$3c,$4d,$5e,$6f,$7g,$8h,$9i");
+  string16 expected =
+      ASCIIToUTF16("9a____a,Bb,7c___c,dd,5e____e,Ff,3g___g,hh,1i_____i");
+
+  EXPECT_EQ(expected, ReplaceStringPlaceholders(original, subst, nullptr));
+
+  std::vector<size_t> offsets;
+  EXPECT_EQ(expected, ReplaceStringPlaceholders(original, subst, &offsets));
+  std::vector<size_t> expected_offsets = {0, 8, 11, 18, 21, 29, 32, 39, 42};
+  EXPECT_EQ(offsets.size(), subst.size());
+  EXPECT_EQ(expected_offsets, offsets);
+  for (size_t i = 0; i < offsets.size(); i++) {
+    EXPECT_EQ(expected.substr(expected_offsets[i], subst[i].length()),
+              subst[i]);
+  }
+}
+
+TEST(StringUtilTest, ReplaceStringPlaceholdersNetContractionWithExpansion) {
+  // In this test, some of the substitutions are longer than the placeholders,
+  // but overall the string gets smaller. Additionally, the placeholders appear
+  // in a permuted order.
+  std::vector<string16> subst;
+  subst.push_back(ASCIIToUTF16("z"));
+  subst.push_back(ASCIIToUTF16("y"));
+  subst.push_back(ASCIIToUTF16("XYZW"));
+  subst.push_back(ASCIIToUTF16("x"));
+  subst.push_back(ASCIIToUTF16("w"));
+
+  string16 formatted =
+      ReplaceStringPlaceholders(ASCIIToUTF16("$3_$4$2$1$5"), subst, nullptr);
+
+  EXPECT_EQ(ASCIIToUTF16("XYZW_xyzw"), formatted);
+}
+
+TEST(StringUtilTest, ReplaceStringPlaceholdersOneDigit) {
+  std::vector<string16> subst;
+  subst.push_back(ASCIIToUTF16("1a"));
+  string16 formatted =
+      ReplaceStringPlaceholders(ASCIIToUTF16(" $16 "), subst, nullptr);
+  EXPECT_EQ(ASCIIToUTF16(" 1a6 "), formatted);
+}
+
+TEST(StringUtilTest, ReplaceStringPlaceholdersInvalidPlaceholder) {
+  std::vector<string16> subst;
+  subst.push_back(ASCIIToUTF16("1a"));
+  string16 formatted =
+      ReplaceStringPlaceholders(ASCIIToUTF16("+$-+$A+$1+"), subst, nullptr);
+  EXPECT_EQ(ASCIIToUTF16("+++1a+"), formatted);
+}
+
+TEST(StringUtilTest, StdStringReplaceStringPlaceholders) {
+  std::vector<std::string> subst;
+  subst.push_back("9a");
+  subst.push_back("8b");
+  subst.push_back("7c");
+  subst.push_back("6d");
+  subst.push_back("5e");
+  subst.push_back("4f");
+  subst.push_back("3g");
+  subst.push_back("2h");
+  subst.push_back("1i");
+
+  std::string formatted =
+      ReplaceStringPlaceholders(
+          "$1a,$2b,$3c,$4d,$5e,$6f,$7g,$8h,$9i", subst, nullptr);
+
+  EXPECT_EQ("9aa,8bb,7cc,6dd,5ee,4ff,3gg,2hh,1ii", formatted);
+}
+
+TEST(StringUtilTest, StdStringReplaceStringPlaceholdersMultipleMatches) {
+  std::vector<std::string> subst;
+  subst.push_back("4");   // Referenced twice.
+  subst.push_back("?");   // Unreferenced.
+  subst.push_back("!");   // Unreferenced.
+  subst.push_back("16");  // Referenced once.
+
+  std::string original = "$1 * $1 == $4";
+  std::string expected = "4 * 4 == 16";
+  EXPECT_EQ(expected, ReplaceStringPlaceholders(original, subst, nullptr));
+  std::vector<size_t> offsets;
+  EXPECT_EQ(expected, ReplaceStringPlaceholders(original, subst, &offsets));
+  std::vector<size_t> expected_offsets = {0, 4, 9};
+  EXPECT_EQ(expected_offsets, offsets);
+}
+
+TEST(StringUtilTest, ReplaceStringPlaceholdersConsecutiveDollarSigns) {
+  std::vector<std::string> subst;
+  subst.push_back("a");
+  subst.push_back("b");
+  subst.push_back("c");
+  EXPECT_EQ(ReplaceStringPlaceholders("$$1 $$$2 $$$$3", subst, nullptr),
+            "$1 $$2 $$$3");
+}
+
+TEST(StringUtilTest, LcpyTest) {
+  // Test the normal case where we fit in our buffer.
+  {
+    char dst[10];
+    wchar_t wdst[10];
+    EXPECT_EQ(7U, strlcpy(dst, "abcdefg", arraysize(dst)));
+    EXPECT_EQ(0, memcmp(dst, "abcdefg", 8));
+    EXPECT_EQ(7U, wcslcpy(wdst, L"abcdefg", arraysize(wdst)));
+    EXPECT_EQ(0, memcmp(wdst, L"abcdefg", sizeof(wchar_t) * 8));
+  }
+
+  // Test dst_size == 0, nothing should be written to |dst| and we should
+  // have the equivalent of strlen(src).
+  {
+    char dst[2] = {1, 2};
+    wchar_t wdst[2] = {1, 2};
+    EXPECT_EQ(7U, strlcpy(dst, "abcdefg", 0));
+    EXPECT_EQ(1, dst[0]);
+    EXPECT_EQ(2, dst[1]);
+    EXPECT_EQ(7U, wcslcpy(wdst, L"abcdefg", 0));
+    EXPECT_EQ(static_cast<wchar_t>(1), wdst[0]);
+    EXPECT_EQ(static_cast<wchar_t>(2), wdst[1]);
+  }
+
+  // Test the case were we _just_ competely fit including the null.
+  {
+    char dst[8];
+    wchar_t wdst[8];
+    EXPECT_EQ(7U, strlcpy(dst, "abcdefg", arraysize(dst)));
+    EXPECT_EQ(0, memcmp(dst, "abcdefg", 8));
+    EXPECT_EQ(7U, wcslcpy(wdst, L"abcdefg", arraysize(wdst)));
+    EXPECT_EQ(0, memcmp(wdst, L"abcdefg", sizeof(wchar_t) * 8));
+  }
+
+  // Test the case were we we are one smaller, so we can't fit the null.
+  {
+    char dst[7];
+    wchar_t wdst[7];
+    EXPECT_EQ(7U, strlcpy(dst, "abcdefg", arraysize(dst)));
+    EXPECT_EQ(0, memcmp(dst, "abcdef", 7));
+    EXPECT_EQ(7U, wcslcpy(wdst, L"abcdefg", arraysize(wdst)));
+    EXPECT_EQ(0, memcmp(wdst, L"abcdef", sizeof(wchar_t) * 7));
+  }
+
+  // Test the case were we are just too small.
+  {
+    char dst[3];
+    wchar_t wdst[3];
+    EXPECT_EQ(7U, strlcpy(dst, "abcdefg", arraysize(dst)));
+    EXPECT_EQ(0, memcmp(dst, "ab", 3));
+    EXPECT_EQ(7U, wcslcpy(wdst, L"abcdefg", arraysize(wdst)));
+    EXPECT_EQ(0, memcmp(wdst, L"ab", sizeof(wchar_t) * 3));
+  }
+}
+
+TEST(StringUtilTest, WprintfFormatPortabilityTest) {
+  static const struct {
+    const wchar_t* input;
+    bool portable;
+  } cases[] = {
+    { L"%ls", true },
+    { L"%s", false },
+    { L"%S", false },
+    { L"%lS", false },
+    { L"Hello, %s", false },
+    { L"%lc", true },
+    { L"%c", false },
+    { L"%C", false },
+    { L"%lC", false },
+    { L"%ls %s", false },
+    { L"%s %ls", false },
+    { L"%s %ls %s", false },
+    { L"%f", true },
+    { L"%f %F", false },
+    { L"%d %D", false },
+    { L"%o %O", false },
+    { L"%u %U", false },
+    { L"%f %d %o %u", true },
+    { L"%-8d (%02.1f%)", true },
+    { L"% 10s", false },
+    { L"% 10ls", true }
+  };
+  for (size_t i = 0; i < arraysize(cases); ++i)
+    EXPECT_EQ(cases[i].portable, IsWprintfFormatPortable(cases[i].input));
+}
+
+TEST(StringUtilTest, RemoveChars) {
+  const char kRemoveChars[] = "-/+*";
+  std::string input = "A-+bc/d!*";
+  EXPECT_TRUE(RemoveChars(input, kRemoveChars, &input));
+  EXPECT_EQ("Abcd!", input);
+
+  // No characters match kRemoveChars.
+  EXPECT_FALSE(RemoveChars(input, kRemoveChars, &input));
+  EXPECT_EQ("Abcd!", input);
+
+  // Empty string.
+  input.clear();
+  EXPECT_FALSE(RemoveChars(input, kRemoveChars, &input));
+  EXPECT_EQ(std::string(), input);
+}
+
+TEST(StringUtilTest, ReplaceChars) {
+  struct TestData {
+    const char* input;
+    const char* replace_chars;
+    const char* replace_with;
+    const char* output;
+    bool result;
+  } cases[] = {
+      {"", "", "", "", false},
+      {"t", "t", "t", "t", true},
+      {"a", "b", "c", "a", false},
+      {"b", "b", "c", "c", true},
+      {"bob", "b", "p", "pop", true},
+      {"bob", "o", "i", "bib", true},
+      {"test", "", "", "test", false},
+      {"test", "", "!", "test", false},
+      {"test", "z", "!", "test", false},
+      {"test", "e", "!", "t!st", true},
+      {"test", "e", "!?", "t!?st", true},
+      {"test", "ez", "!", "t!st", true},
+      {"test", "zed", "!?", "t!?st", true},
+      {"test", "t", "!?", "!?es!?", true},
+      {"test", "et", "!>", "!>!>s!>", true},
+      {"test", "zest", "!", "!!!!", true},
+      {"test", "szt", "!", "!e!!", true},
+      {"test", "t", "test", "testestest", true},
+      {"tetst", "t", "test", "testeteststest", true},
+      {"ttttttt", "t", "-", "-------", true},
+      {"aAaAaAAaAAa", "A", "", "aaaaa", true},
+      {"xxxxxxxxxx", "x", "", "", true},
+      {"xxxxxxxxxx", "x", "x", "xxxxxxxxxx", true},
+      {"xxxxxxxxxx", "x", "y-", "y-y-y-y-y-y-y-y-y-y-", true},
+      {"xxxxxxxxxx", "x", "xy", "xyxyxyxyxyxyxyxyxyxy", true},
+      {"xxxxxxxxxx", "x", "zyx", "zyxzyxzyxzyxzyxzyxzyxzyxzyxzyx", true},
+      {"xaxxaxxxaxxxax", "x", "xy", "xyaxyxyaxyxyxyaxyxyxyaxy", true},
+      {"-xaxxaxxxaxxxax-", "x", "xy", "-xyaxyxyaxyxyxyaxyxyxyaxy-", true},
+  };
+
+  for (const TestData& scenario : cases) {
+    // Test with separate output and input vars.
+    std::string output;
+    bool result = ReplaceChars(scenario.input, scenario.replace_chars,
+                               scenario.replace_with, &output);
+    EXPECT_EQ(scenario.result, result) << scenario.input;
+    EXPECT_EQ(scenario.output, output);
+  }
+
+  for (const TestData& scenario : cases) {
+    // Test with an input/output var of limited capacity.
+    std::string input_output = scenario.input;
+    input_output.shrink_to_fit();
+    bool result = ReplaceChars(input_output, scenario.replace_chars,
+                               scenario.replace_with, &input_output);
+    EXPECT_EQ(scenario.result, result) << scenario.input;
+    EXPECT_EQ(scenario.output, input_output);
+  }
+
+  for (const TestData& scenario : cases) {
+    // Test with an input/output var of ample capacity; should
+    // not realloc.
+    std::string input_output = scenario.input;
+    input_output.reserve(strlen(scenario.output) * 2);
+    const void* original_buffer = input_output.data();
+    bool result = ReplaceChars(input_output, scenario.replace_chars,
+                               scenario.replace_with, &input_output);
+    EXPECT_EQ(scenario.result, result) << scenario.input;
+    EXPECT_EQ(scenario.output, input_output);
+    EXPECT_EQ(original_buffer, input_output.data());
+  }
+}
+
+TEST(StringUtilTest, ContainsOnlyChars) {
+  // Providing an empty list of characters should return false but for the empty
+  // string.
+  EXPECT_TRUE(ContainsOnlyChars(std::string(), std::string()));
+  EXPECT_FALSE(ContainsOnlyChars("Hello", std::string()));
+
+  EXPECT_TRUE(ContainsOnlyChars(std::string(), "1234"));
+  EXPECT_TRUE(ContainsOnlyChars("1", "1234"));
+  EXPECT_TRUE(ContainsOnlyChars("1", "4321"));
+  EXPECT_TRUE(ContainsOnlyChars("123", "4321"));
+  EXPECT_FALSE(ContainsOnlyChars("123a", "4321"));
+
+  EXPECT_TRUE(ContainsOnlyChars(std::string(), kWhitespaceASCII));
+  EXPECT_TRUE(ContainsOnlyChars(" ", kWhitespaceASCII));
+  EXPECT_TRUE(ContainsOnlyChars("\t", kWhitespaceASCII));
+  EXPECT_TRUE(ContainsOnlyChars("\t \r \n  ", kWhitespaceASCII));
+  EXPECT_FALSE(ContainsOnlyChars("a", kWhitespaceASCII));
+  EXPECT_FALSE(ContainsOnlyChars("\thello\r \n  ", kWhitespaceASCII));
+
+  EXPECT_TRUE(ContainsOnlyChars(string16(), kWhitespaceUTF16));
+  EXPECT_TRUE(ContainsOnlyChars(ASCIIToUTF16(" "), kWhitespaceUTF16));
+  EXPECT_TRUE(ContainsOnlyChars(ASCIIToUTF16("\t"), kWhitespaceUTF16));
+  EXPECT_TRUE(ContainsOnlyChars(ASCIIToUTF16("\t \r \n  "), kWhitespaceUTF16));
+  EXPECT_FALSE(ContainsOnlyChars(ASCIIToUTF16("a"), kWhitespaceUTF16));
+  EXPECT_FALSE(ContainsOnlyChars(ASCIIToUTF16("\thello\r \n  "),
+                                  kWhitespaceUTF16));
+}
+
+TEST(StringUtilTest, CompareCaseInsensitiveASCII) {
+  EXPECT_EQ(0, CompareCaseInsensitiveASCII("", ""));
+  EXPECT_EQ(0, CompareCaseInsensitiveASCII("Asdf", "aSDf"));
+
+  // Differing lengths.
+  EXPECT_EQ(-1, CompareCaseInsensitiveASCII("Asdf", "aSDfA"));
+  EXPECT_EQ(1, CompareCaseInsensitiveASCII("AsdfA", "aSDf"));
+
+  // Differing values.
+  EXPECT_EQ(-1, CompareCaseInsensitiveASCII("AsdfA", "aSDfb"));
+  EXPECT_EQ(1, CompareCaseInsensitiveASCII("Asdfb", "aSDfA"));
+}
+
+TEST(StringUtilTest, EqualsCaseInsensitiveASCII) {
+  EXPECT_TRUE(EqualsCaseInsensitiveASCII("", ""));
+  EXPECT_TRUE(EqualsCaseInsensitiveASCII("Asdf", "aSDF"));
+  EXPECT_FALSE(EqualsCaseInsensitiveASCII("bsdf", "aSDF"));
+  EXPECT_FALSE(EqualsCaseInsensitiveASCII("Asdf", "aSDFz"));
+}
+
+TEST(StringUtilTest, IsUnicodeWhitespace) {
+  // NOT unicode white space.
+  EXPECT_FALSE(IsUnicodeWhitespace(L'\0'));
+  EXPECT_FALSE(IsUnicodeWhitespace(L'A'));
+  EXPECT_FALSE(IsUnicodeWhitespace(L'0'));
+  EXPECT_FALSE(IsUnicodeWhitespace(L'.'));
+  EXPECT_FALSE(IsUnicodeWhitespace(L';'));
+  EXPECT_FALSE(IsUnicodeWhitespace(L'\x4100'));
+
+  // Actual unicode whitespace.
+  EXPECT_TRUE(IsUnicodeWhitespace(L' '));
+  EXPECT_TRUE(IsUnicodeWhitespace(L'\xa0'));
+  EXPECT_TRUE(IsUnicodeWhitespace(L'\x3000'));
+  EXPECT_TRUE(IsUnicodeWhitespace(L'\t'));
+  EXPECT_TRUE(IsUnicodeWhitespace(L'\r'));
+  EXPECT_TRUE(IsUnicodeWhitespace(L'\v'));
+  EXPECT_TRUE(IsUnicodeWhitespace(L'\f'));
+  EXPECT_TRUE(IsUnicodeWhitespace(L'\n'));
+}
+
+class WriteIntoTest : public testing::Test {
+ protected:
+  static void WritesCorrectly(size_t num_chars) {
+    std::string buffer;
+    char kOriginal[] = "supercali";
+    strncpy(WriteInto(&buffer, num_chars + 1), kOriginal, num_chars);
+    // Using std::string(buffer.c_str()) instead of |buffer| truncates the
+    // string at the first \0.
+    EXPECT_EQ(std::string(kOriginal,
+                          std::min(num_chars, arraysize(kOriginal) - 1)),
+              std::string(buffer.c_str()));
+    EXPECT_EQ(num_chars, buffer.size());
+  }
+};
+
+TEST_F(WriteIntoTest, WriteInto) {
+  // Validate that WriteInto reserves enough space and
+  // sizes a string correctly.
+  WritesCorrectly(1);
+  WritesCorrectly(2);
+  WritesCorrectly(5000);
+
+  // Validate that WriteInto doesn't modify other strings
+  // when using a Copy-on-Write implementation.
+  const char kLive[] = "live";
+  const char kDead[] = "dead";
+  const std::string live = kLive;
+  std::string dead = live;
+  strncpy(WriteInto(&dead, 5), kDead, 4);
+  EXPECT_EQ(kDead, dead);
+  EXPECT_EQ(4u, dead.size());
+  EXPECT_EQ(kLive, live);
+  EXPECT_EQ(4u, live.size());
+}
+
+}  // namespace base
diff --git a/base/strings/string_util_win.h b/base/strings/string_util_win.h
new file mode 100644
index 0000000..7f260bf
--- /dev/null
+++ b/base/strings/string_util_win.h
@@ -0,0 +1,44 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_STRINGS_STRING_UTIL_WIN_H_
+#define BASE_STRINGS_STRING_UTIL_WIN_H_
+
+#include <stdarg.h>
+#include <stddef.h>
+#include <stdio.h>
+#include <string.h>
+#include <wchar.h>
+
+#include "base/logging.h"
+
+namespace base {
+
+// Chromium code style is to not use malloc'd strings; this is only for use
+// for interaction with APIs that require it.
+inline char* strdup(const char* str) {
+  return _strdup(str);
+}
+
+inline int vsnprintf(char* buffer, size_t size,
+                     const char* format, va_list arguments) {
+  int length = vsnprintf_s(buffer, size, size - 1, format, arguments);
+  if (length < 0)
+    return _vscprintf(format, arguments);
+  return length;
+}
+
+inline int vswprintf(wchar_t* buffer, size_t size,
+                     const wchar_t* format, va_list arguments) {
+  DCHECK(IsWprintfFormatPortable(format));
+
+  int length = _vsnwprintf_s(buffer, size, size - 1, format, arguments);
+  if (length < 0)
+    return _vscwprintf(format, arguments);
+  return length;
+}
+
+}  // namespace base
+
+#endif  // BASE_STRINGS_STRING_UTIL_WIN_H_
diff --git a/base/strings/stringize_macros.h b/base/strings/stringize_macros.h
new file mode 100644
index 0000000..d4e2707
--- /dev/null
+++ b/base/strings/stringize_macros.h
@@ -0,0 +1,31 @@
+// Copyright (c) 2010 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// This file defines preprocessor macros for stringizing preprocessor
+// symbols (or their output) and manipulating preprocessor symbols
+// that define strings.
+
+#ifndef BASE_STRINGS_STRINGIZE_MACROS_H_
+#define BASE_STRINGS_STRINGIZE_MACROS_H_
+
+#include "build/build_config.h"
+
+// This is not very useful as it does not expand defined symbols if
+// called directly. Use its counterpart without the _NO_EXPANSION
+// suffix, below.
+#define STRINGIZE_NO_EXPANSION(x) #x
+
+// Use this to quote the provided parameter, first expanding it if it
+// is a preprocessor symbol.
+//
+// For example, if:
+//   #define A FOO
+//   #define B(x) myobj->FunctionCall(x)
+//
+// Then:
+//   STRINGIZE(A) produces "FOO"
+//   STRINGIZE(B(y)) produces "myobj->FunctionCall(y)"
+#define STRINGIZE(x) STRINGIZE_NO_EXPANSION(x)
+
+#endif  // BASE_STRINGS_STRINGIZE_MACROS_H_
diff --git a/base/strings/stringize_macros_unittest.cc b/base/strings/stringize_macros_unittest.cc
new file mode 100644
index 0000000..d7f9e56
--- /dev/null
+++ b/base/strings/stringize_macros_unittest.cc
@@ -0,0 +1,29 @@
+// Copyright (c) 2010 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/strings/stringize_macros.h"
+
+#include "testing/gtest/include/gtest/gtest.h"
+
+// Macros as per documentation in header file.
+#define PREPROCESSOR_UTIL_UNITTEST_A FOO
+#define PREPROCESSOR_UTIL_UNITTEST_B(x) myobj->FunctionCall(x)
+#define PREPROCESSOR_UTIL_UNITTEST_C "foo"
+
+TEST(StringizeTest, Ansi) {
+  EXPECT_STREQ(
+      "PREPROCESSOR_UTIL_UNITTEST_A",
+      STRINGIZE_NO_EXPANSION(PREPROCESSOR_UTIL_UNITTEST_A));
+  EXPECT_STREQ(
+      "PREPROCESSOR_UTIL_UNITTEST_B(y)",
+      STRINGIZE_NO_EXPANSION(PREPROCESSOR_UTIL_UNITTEST_B(y)));
+  EXPECT_STREQ(
+      "PREPROCESSOR_UTIL_UNITTEST_C",
+      STRINGIZE_NO_EXPANSION(PREPROCESSOR_UTIL_UNITTEST_C));
+
+  EXPECT_STREQ("FOO", STRINGIZE(PREPROCESSOR_UTIL_UNITTEST_A));
+  EXPECT_STREQ("myobj->FunctionCall(y)",
+               STRINGIZE(PREPROCESSOR_UTIL_UNITTEST_B(y)));
+  EXPECT_STREQ("\"foo\"", STRINGIZE(PREPROCESSOR_UTIL_UNITTEST_C));
+}
diff --git a/base/strings/stringprintf.cc b/base/strings/stringprintf.cc
new file mode 100644
index 0000000..415845d
--- /dev/null
+++ b/base/strings/stringprintf.cc
@@ -0,0 +1,189 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/strings/stringprintf.h"
+
+#include <errno.h>
+#include <stddef.h>
+
+#include <vector>
+
+#include "base/macros.h"
+#include "base/scoped_clear_errno.h"
+#include "base/strings/string_util.h"
+#include "base/strings/utf_string_conversions.h"
+#include "build/build_config.h"
+
+namespace base {
+
+namespace {
+
+// Overloaded wrappers around vsnprintf and vswprintf. The buf_size parameter
+// is the size of the buffer. These return the number of characters in the
+// formatted string excluding the NUL terminator. If the buffer is not
+// large enough to accommodate the formatted string without truncation, they
+// return the number of characters that would be in the fully-formatted string
+// (vsnprintf, and vswprintf on Windows), or -1 (vswprintf on POSIX platforms).
+inline int vsnprintfT(char* buffer,
+                      size_t buf_size,
+                      const char* format,
+                      va_list argptr) {
+  return base::vsnprintf(buffer, buf_size, format, argptr);
+}
+
+#if defined(OS_WIN)
+inline int vsnprintfT(wchar_t* buffer,
+                      size_t buf_size,
+                      const wchar_t* format,
+                      va_list argptr) {
+  return base::vswprintf(buffer, buf_size, format, argptr);
+}
+#endif
+
+// Templatized backend for StringPrintF/StringAppendF. This does not finalize
+// the va_list, the caller is expected to do that.
+template <class StringType>
+static void StringAppendVT(StringType* dst,
+                           const typename StringType::value_type* format,
+                           va_list ap) {
+  // First try with a small fixed size buffer.
+  // This buffer size should be kept in sync with StringUtilTest.GrowBoundary
+  // and StringUtilTest.StringPrintfBounds.
+  typename StringType::value_type stack_buf[1024];
+
+  va_list ap_copy;
+  va_copy(ap_copy, ap);
+
+#if !defined(OS_WIN)
+  ScopedClearErrno clear_errno;
+#endif
+  int result = vsnprintfT(stack_buf, arraysize(stack_buf), format, ap_copy);
+  va_end(ap_copy);
+
+  if (result >= 0 && result < static_cast<int>(arraysize(stack_buf))) {
+    // It fit.
+    dst->append(stack_buf, result);
+    return;
+  }
+
+  // Repeatedly increase buffer size until it fits.
+  int mem_length = arraysize(stack_buf);
+  while (true) {
+    if (result < 0) {
+#if defined(OS_WIN)
+      // On Windows, vsnprintfT always returns the number of characters in a
+      // fully-formatted string, so if we reach this point, something else is
+      // wrong and no amount of buffer-doubling is going to fix it.
+      return;
+#else
+      if (errno != 0 && errno != EOVERFLOW)
+        return;
+      // Try doubling the buffer size.
+      mem_length *= 2;
+#endif
+    } else {
+      // We need exactly "result + 1" characters.
+      mem_length = result + 1;
+    }
+
+    if (mem_length > 32 * 1024 * 1024) {
+      // That should be plenty, don't try anything larger.  This protects
+      // against huge allocations when using vsnprintfT implementations that
+      // return -1 for reasons other than overflow without setting errno.
+      DLOG(WARNING) << "Unable to printf the requested string due to size.";
+      return;
+    }
+
+    std::vector<typename StringType::value_type> mem_buf(mem_length);
+
+    // NOTE: You can only use a va_list once.  Since we're in a while loop, we
+    // need to make a new copy each time so we don't use up the original.
+    va_copy(ap_copy, ap);
+    result = vsnprintfT(&mem_buf[0], mem_length, format, ap_copy);
+    va_end(ap_copy);
+
+    if ((result >= 0) && (result < mem_length)) {
+      // It fit.
+      dst->append(&mem_buf[0], result);
+      return;
+    }
+  }
+}
+
+}  // namespace
+
+std::string StringPrintf(const char* format, ...) {
+  va_list ap;
+  va_start(ap, format);
+  std::string result;
+  StringAppendV(&result, format, ap);
+  va_end(ap);
+  return result;
+}
+
+#if defined(OS_WIN)
+std::wstring StringPrintf(const wchar_t* format, ...) {
+  va_list ap;
+  va_start(ap, format);
+  std::wstring result;
+  StringAppendV(&result, format, ap);
+  va_end(ap);
+  return result;
+}
+#endif
+
+std::string StringPrintV(const char* format, va_list ap) {
+  std::string result;
+  StringAppendV(&result, format, ap);
+  return result;
+}
+
+const std::string& SStringPrintf(std::string* dst, const char* format, ...) {
+  va_list ap;
+  va_start(ap, format);
+  dst->clear();
+  StringAppendV(dst, format, ap);
+  va_end(ap);
+  return *dst;
+}
+
+#if defined(OS_WIN)
+const std::wstring& SStringPrintf(std::wstring* dst,
+                                  const wchar_t* format, ...) {
+  va_list ap;
+  va_start(ap, format);
+  dst->clear();
+  StringAppendV(dst, format, ap);
+  va_end(ap);
+  return *dst;
+}
+#endif
+
+void StringAppendF(std::string* dst, const char* format, ...) {
+  va_list ap;
+  va_start(ap, format);
+  StringAppendV(dst, format, ap);
+  va_end(ap);
+}
+
+#if defined(OS_WIN)
+void StringAppendF(std::wstring* dst, const wchar_t* format, ...) {
+  va_list ap;
+  va_start(ap, format);
+  StringAppendV(dst, format, ap);
+  va_end(ap);
+}
+#endif
+
+void StringAppendV(std::string* dst, const char* format, va_list ap) {
+  StringAppendVT(dst, format, ap);
+}
+
+#if defined(OS_WIN)
+void StringAppendV(std::wstring* dst, const wchar_t* format, va_list ap) {
+  StringAppendVT(dst, format, ap);
+}
+#endif
+
+}  // namespace base
diff --git a/base/strings/stringprintf.h b/base/strings/stringprintf.h
new file mode 100644
index 0000000..7a75d89
--- /dev/null
+++ b/base/strings/stringprintf.h
@@ -0,0 +1,66 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_STRINGS_STRINGPRINTF_H_
+#define BASE_STRINGS_STRINGPRINTF_H_
+
+#include <stdarg.h>   // va_list
+
+#include <string>
+
+#include "base/base_export.h"
+#include "base/compiler_specific.h"
+#include "build/build_config.h"
+
+namespace base {
+
+// Return a C++ string given printf-like input.
+BASE_EXPORT std::string StringPrintf(_Printf_format_string_ const char* format,
+                                     ...)
+    PRINTF_FORMAT(1, 2) WARN_UNUSED_RESULT;
+#if defined(OS_WIN)
+BASE_EXPORT std::wstring StringPrintf(
+    _Printf_format_string_ const wchar_t* format,
+    ...) WPRINTF_FORMAT(1, 2) WARN_UNUSED_RESULT;
+#endif
+
+// Return a C++ string given vprintf-like input.
+BASE_EXPORT std::string StringPrintV(const char* format, va_list ap)
+    PRINTF_FORMAT(1, 0) WARN_UNUSED_RESULT;
+
+// Store result into a supplied string and return it.
+BASE_EXPORT const std::string& SStringPrintf(
+    std::string* dst,
+    _Printf_format_string_ const char* format,
+    ...) PRINTF_FORMAT(2, 3);
+#if defined(OS_WIN)
+BASE_EXPORT const std::wstring& SStringPrintf(
+    std::wstring* dst,
+    _Printf_format_string_ const wchar_t* format,
+    ...) WPRINTF_FORMAT(2, 3);
+#endif
+
+// Append result to a supplied string.
+BASE_EXPORT void StringAppendF(std::string* dst,
+                               _Printf_format_string_ const char* format,
+                               ...) PRINTF_FORMAT(2, 3);
+#if defined(OS_WIN)
+BASE_EXPORT void StringAppendF(std::wstring* dst,
+                               _Printf_format_string_ const wchar_t* format,
+                               ...) WPRINTF_FORMAT(2, 3);
+#endif
+
+// Lower-level routine that takes a va_list and appends to a specified
+// string.  All other routines are just convenience wrappers around it.
+BASE_EXPORT void StringAppendV(std::string* dst, const char* format, va_list ap)
+    PRINTF_FORMAT(2, 0);
+#if defined(OS_WIN)
+BASE_EXPORT void StringAppendV(std::wstring* dst,
+                               const wchar_t* format, va_list ap)
+    WPRINTF_FORMAT(2, 0);
+#endif
+
+}  // namespace base
+
+#endif  // BASE_STRINGS_STRINGPRINTF_H_
diff --git a/base/strings/stringprintf_unittest.cc b/base/strings/stringprintf_unittest.cc
new file mode 100644
index 0000000..3d43e8c
--- /dev/null
+++ b/base/strings/stringprintf_unittest.cc
@@ -0,0 +1,182 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/strings/stringprintf.h"
+
+#include <errno.h>
+#include <stddef.h>
+
+#include "base/macros.h"
+#include "build/build_config.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+
+namespace {
+
+// A helper for the StringAppendV test that follows.
+//
+// Just forwards its args to StringAppendV.
+static void StringAppendVTestHelper(std::string* out, const char* format, ...) {
+  va_list ap;
+  va_start(ap, format);
+  StringAppendV(out, format, ap);
+  va_end(ap);
+}
+
+}  // namespace
+
+TEST(StringPrintfTest, StringPrintfEmpty) {
+  EXPECT_EQ("", StringPrintf("%s", ""));
+}
+
+TEST(StringPrintfTest, StringPrintfMisc) {
+  EXPECT_EQ("123hello w", StringPrintf("%3d%2s %1c", 123, "hello", 'w'));
+#if defined(OS_WIN)
+  EXPECT_EQ(L"123hello w", StringPrintf(L"%3d%2ls %1lc", 123, L"hello", 'w'));
+#endif
+}
+
+TEST(StringPrintfTest, StringAppendfEmptyString) {
+  std::string value("Hello");
+  StringAppendF(&value, "%s", "");
+  EXPECT_EQ("Hello", value);
+
+#if defined(OS_WIN)
+  std::wstring valuew(L"Hello");
+  StringAppendF(&valuew, L"%ls", L"");
+  EXPECT_EQ(L"Hello", valuew);
+#endif
+}
+
+TEST(StringPrintfTest, StringAppendfString) {
+  std::string value("Hello");
+  StringAppendF(&value, " %s", "World");
+  EXPECT_EQ("Hello World", value);
+
+#if defined(OS_WIN)
+  std::wstring valuew(L"Hello");
+  StringAppendF(&valuew, L" %ls", L"World");
+  EXPECT_EQ(L"Hello World", valuew);
+#endif
+}
+
+TEST(StringPrintfTest, StringAppendfInt) {
+  std::string value("Hello");
+  StringAppendF(&value, " %d", 123);
+  EXPECT_EQ("Hello 123", value);
+
+#if defined(OS_WIN)
+  std::wstring valuew(L"Hello");
+  StringAppendF(&valuew, L" %d", 123);
+  EXPECT_EQ(L"Hello 123", valuew);
+#endif
+}
+
+// Make sure that lengths exactly around the initial buffer size are handled
+// correctly.
+TEST(StringPrintfTest, StringPrintfBounds) {
+  const int kSrcLen = 1026;
+  char src[kSrcLen];
+  for (size_t i = 0; i < arraysize(src); i++)
+    src[i] = 'A';
+
+  wchar_t srcw[kSrcLen];
+  for (size_t i = 0; i < arraysize(srcw); i++)
+    srcw[i] = 'A';
+
+  for (int i = 1; i < 3; i++) {
+    src[kSrcLen - i] = 0;
+    std::string out;
+    SStringPrintf(&out, "%s", src);
+    EXPECT_STREQ(src, out.c_str());
+
+#if defined(OS_WIN)
+    srcw[kSrcLen - i] = 0;
+    std::wstring outw;
+    SStringPrintf(&outw, L"%ls", srcw);
+    EXPECT_STREQ(srcw, outw.c_str());
+#endif
+  }
+}
+
+// Test very large sprintfs that will cause the buffer to grow.
+TEST(StringPrintfTest, Grow) {
+  char src[1026];
+  for (size_t i = 0; i < arraysize(src); i++)
+    src[i] = 'A';
+  src[1025] = 0;
+
+  const char fmt[] = "%sB%sB%sB%sB%sB%sB%s";
+
+  std::string out;
+  SStringPrintf(&out, fmt, src, src, src, src, src, src, src);
+
+  const int kRefSize = 320000;
+  char* ref = new char[kRefSize];
+#if defined(OS_WIN)
+  sprintf_s(ref, kRefSize, fmt, src, src, src, src, src, src, src);
+#elif defined(OS_POSIX) || defined(OS_FUCHSIA)
+  snprintf(ref, kRefSize, fmt, src, src, src, src, src, src, src);
+#endif
+
+  EXPECT_STREQ(ref, out.c_str());
+  delete[] ref;
+}
+
+TEST(StringPrintfTest, StringAppendV) {
+  std::string out;
+  StringAppendVTestHelper(&out, "%d foo %s", 1, "bar");
+  EXPECT_EQ("1 foo bar", out);
+}
+
+// Test the boundary condition for the size of the string_util's
+// internal buffer.
+TEST(StringPrintfTest, GrowBoundary) {
+  const int kStringUtilBufLen = 1024;
+  // Our buffer should be one larger than the size of StringAppendVT's stack
+  // buffer.
+  // And need extra one for NULL-terminator.
+  const int kBufLen = kStringUtilBufLen + 1 + 1;
+  char src[kBufLen];
+  for (int i = 0; i < kBufLen - 1; ++i)
+    src[i] = 'a';
+  src[kBufLen - 1] = 0;
+
+  std::string out;
+  SStringPrintf(&out, "%s", src);
+
+  EXPECT_STREQ(src, out.c_str());
+}
+
+#if defined(OS_WIN)
+// vswprintf in Visual Studio 2013 fails when given U+FFFF. This tests that the
+// failure case is gracefuly handled. In Visual Studio 2015 the bad character
+// is passed through.
+TEST(StringPrintfTest, Invalid) {
+  wchar_t invalid[2];
+  invalid[0] = 0xffff;
+  invalid[1] = 0;
+
+  std::wstring out;
+  SStringPrintf(&out, L"%ls", invalid);
+#if _MSC_VER >= 1900
+  EXPECT_STREQ(invalid, out.c_str());
+#else
+  EXPECT_STREQ(L"", out.c_str());
+#endif
+}
+#endif
+
+// Test that StringPrintf and StringAppendV do not change errno.
+TEST(StringPrintfTest, StringPrintfErrno) {
+  errno = 1;
+  EXPECT_EQ("", StringPrintf("%s", ""));
+  EXPECT_EQ(1, errno);
+  std::string out;
+  StringAppendVTestHelper(&out, "%d foo %s", 1, "bar");
+  EXPECT_EQ(1, errno);
+}
+
+}  // namespace base
diff --git a/base/strings/sys_string_conversions.h b/base/strings/sys_string_conversions.h
new file mode 100644
index 0000000..1ad0307
--- /dev/null
+++ b/base/strings/sys_string_conversions.h
@@ -0,0 +1,84 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_STRINGS_SYS_STRING_CONVERSIONS_H_
+#define BASE_STRINGS_SYS_STRING_CONVERSIONS_H_
+
+// Provides system-dependent string type conversions for cases where it's
+// necessary to not use ICU. Generally, you should not need this in Chrome,
+// but it is used in some shared code. Dependencies should be minimal.
+
+#include <stdint.h>
+
+#include <string>
+
+#include "base/base_export.h"
+#include "base/strings/string16.h"
+#include "base/strings/string_piece.h"
+#include "build/build_config.h"
+
+#if defined(OS_MACOSX)
+#include <CoreFoundation/CoreFoundation.h>
+#ifdef __OBJC__
+@class NSString;
+#else
+class NSString;
+#endif
+#endif  // OS_MACOSX
+
+namespace base {
+
+// Converts between wide and UTF-8 representations of a string. On error, the
+// result is system-dependent.
+BASE_EXPORT std::string SysWideToUTF8(const std::wstring& wide);
+BASE_EXPORT std::wstring SysUTF8ToWide(StringPiece utf8);
+
+// Converts between wide and the system multi-byte representations of a string.
+// DANGER: This will lose information and can change (on Windows, this can
+// change between reboots).
+BASE_EXPORT std::string SysWideToNativeMB(const std::wstring& wide);
+BASE_EXPORT std::wstring SysNativeMBToWide(StringPiece native_mb);
+
+// Windows-specific ------------------------------------------------------------
+
+#if defined(OS_WIN)
+
+// Converts between 8-bit and wide strings, using the given code page. The
+// code page identifier is one accepted by the Windows function
+// MultiByteToWideChar().
+BASE_EXPORT std::wstring SysMultiByteToWide(StringPiece mb, uint32_t code_page);
+BASE_EXPORT std::string SysWideToMultiByte(const std::wstring& wide,
+                                           uint32_t code_page);
+
+#endif  // defined(OS_WIN)
+
+// Mac-specific ----------------------------------------------------------------
+
+#if defined(OS_MACOSX)
+
+// Converts between STL strings and CFStringRefs/NSStrings.
+
+// Creates a string, and returns it with a refcount of 1. You are responsible
+// for releasing it. Returns NULL on failure.
+BASE_EXPORT CFStringRef SysUTF8ToCFStringRef(const std::string& utf8);
+BASE_EXPORT CFStringRef SysUTF16ToCFStringRef(const string16& utf16);
+
+// Same, but returns an autoreleased NSString.
+BASE_EXPORT NSString* SysUTF8ToNSString(const std::string& utf8);
+BASE_EXPORT NSString* SysUTF16ToNSString(const string16& utf16);
+
+// Converts a CFStringRef to an STL string. Returns an empty string on failure.
+BASE_EXPORT std::string SysCFStringRefToUTF8(CFStringRef ref);
+BASE_EXPORT string16 SysCFStringRefToUTF16(CFStringRef ref);
+
+// Same, but accepts NSString input. Converts nil NSString* to the appropriate
+// string type of length 0.
+BASE_EXPORT std::string SysNSStringToUTF8(NSString* ref);
+BASE_EXPORT string16 SysNSStringToUTF16(NSString* ref);
+
+#endif  // defined(OS_MACOSX)
+
+}  // namespace base
+
+#endif  // BASE_STRINGS_SYS_STRING_CONVERSIONS_H_
diff --git a/base/strings/sys_string_conversions_mac.mm b/base/strings/sys_string_conversions_mac.mm
new file mode 100644
index 0000000..637d941
--- /dev/null
+++ b/base/strings/sys_string_conversions_mac.mm
@@ -0,0 +1,187 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/strings/sys_string_conversions.h"
+
+#import <Foundation/Foundation.h>
+#include <stddef.h>
+
+#include <vector>
+
+#include "base/mac/foundation_util.h"
+#include "base/mac/scoped_cftyperef.h"
+#include "base/strings/string_piece.h"
+
+namespace base {
+
+namespace {
+
+// Convert the supplied CFString into the specified encoding, and return it as
+// an STL string of the template type.  Returns an empty string on failure.
+//
+// Do not assert in this function since it is used by the asssertion code!
+template<typename StringType>
+static StringType CFStringToSTLStringWithEncodingT(CFStringRef cfstring,
+                                                   CFStringEncoding encoding) {
+  CFIndex length = CFStringGetLength(cfstring);
+  if (length == 0)
+    return StringType();
+
+  CFRange whole_string = CFRangeMake(0, length);
+  CFIndex out_size;
+  CFIndex converted = CFStringGetBytes(cfstring,
+                                       whole_string,
+                                       encoding,
+                                       0,      // lossByte
+                                       false,  // isExternalRepresentation
+                                       NULL,   // buffer
+                                       0,      // maxBufLen
+                                       &out_size);
+  if (converted == 0 || out_size == 0)
+    return StringType();
+
+  // out_size is the number of UInt8-sized units needed in the destination.
+  // A buffer allocated as UInt8 units might not be properly aligned to
+  // contain elements of StringType::value_type.  Use a container for the
+  // proper value_type, and convert out_size by figuring the number of
+  // value_type elements per UInt8.  Leave room for a NUL terminator.
+  typename StringType::size_type elements =
+      out_size * sizeof(UInt8) / sizeof(typename StringType::value_type) + 1;
+
+  std::vector<typename StringType::value_type> out_buffer(elements);
+  converted = CFStringGetBytes(cfstring,
+                               whole_string,
+                               encoding,
+                               0,      // lossByte
+                               false,  // isExternalRepresentation
+                               reinterpret_cast<UInt8*>(&out_buffer[0]),
+                               out_size,
+                               NULL);  // usedBufLen
+  if (converted == 0)
+    return StringType();
+
+  out_buffer[elements - 1] = '\0';
+  return StringType(&out_buffer[0], elements - 1);
+}
+
+// Given an STL string |in| with an encoding specified by |in_encoding|,
+// convert it to |out_encoding| and return it as an STL string of the
+// |OutStringType| template type.  Returns an empty string on failure.
+//
+// Do not assert in this function since it is used by the asssertion code!
+template<typename InStringType, typename OutStringType>
+static OutStringType STLStringToSTLStringWithEncodingsT(
+    const InStringType& in,
+    CFStringEncoding in_encoding,
+    CFStringEncoding out_encoding) {
+  typename InStringType::size_type in_length = in.length();
+  if (in_length == 0)
+    return OutStringType();
+
+  base::ScopedCFTypeRef<CFStringRef> cfstring(CFStringCreateWithBytesNoCopy(
+      NULL,
+      reinterpret_cast<const UInt8*>(in.data()),
+      in_length * sizeof(typename InStringType::value_type),
+      in_encoding,
+      false,
+      kCFAllocatorNull));
+  if (!cfstring)
+    return OutStringType();
+
+  return CFStringToSTLStringWithEncodingT<OutStringType>(cfstring,
+                                                         out_encoding);
+}
+
+// Given an STL string |in| with an encoding specified by |in_encoding|,
+// return it as a CFStringRef.  Returns NULL on failure.
+template<typename StringType>
+static CFStringRef STLStringToCFStringWithEncodingsT(
+    const StringType& in,
+    CFStringEncoding in_encoding) {
+  typename StringType::size_type in_length = in.length();
+  if (in_length == 0)
+    return CFSTR("");
+
+  return CFStringCreateWithBytes(kCFAllocatorDefault,
+                                 reinterpret_cast<const UInt8*>(in.data()),
+                                 in_length *
+                                   sizeof(typename StringType::value_type),
+                                 in_encoding,
+                                 false);
+}
+
+// Specify the byte ordering explicitly, otherwise CFString will be confused
+// when strings don't carry BOMs, as they typically won't.
+static const CFStringEncoding kNarrowStringEncoding = kCFStringEncodingUTF8;
+#ifdef __BIG_ENDIAN__
+static const CFStringEncoding kMediumStringEncoding = kCFStringEncodingUTF16BE;
+static const CFStringEncoding kWideStringEncoding = kCFStringEncodingUTF32BE;
+#elif defined(__LITTLE_ENDIAN__)
+static const CFStringEncoding kMediumStringEncoding = kCFStringEncodingUTF16LE;
+static const CFStringEncoding kWideStringEncoding = kCFStringEncodingUTF32LE;
+#endif  // __LITTLE_ENDIAN__
+
+}  // namespace
+
+// Do not assert in this function since it is used by the asssertion code!
+std::string SysWideToUTF8(const std::wstring& wide) {
+  return STLStringToSTLStringWithEncodingsT<std::wstring, std::string>(
+      wide, kWideStringEncoding, kNarrowStringEncoding);
+}
+
+// Do not assert in this function since it is used by the asssertion code!
+std::wstring SysUTF8ToWide(StringPiece utf8) {
+  return STLStringToSTLStringWithEncodingsT<StringPiece, std::wstring>(
+      utf8, kNarrowStringEncoding, kWideStringEncoding);
+}
+
+std::string SysWideToNativeMB(const std::wstring& wide) {
+  return SysWideToUTF8(wide);
+}
+
+std::wstring SysNativeMBToWide(StringPiece native_mb) {
+  return SysUTF8ToWide(native_mb);
+}
+
+CFStringRef SysUTF8ToCFStringRef(const std::string& utf8) {
+  return STLStringToCFStringWithEncodingsT(utf8, kNarrowStringEncoding);
+}
+
+CFStringRef SysUTF16ToCFStringRef(const string16& utf16) {
+  return STLStringToCFStringWithEncodingsT(utf16, kMediumStringEncoding);
+}
+
+NSString* SysUTF8ToNSString(const std::string& utf8) {
+  return (NSString*)base::mac::CFTypeRefToNSObjectAutorelease(
+      SysUTF8ToCFStringRef(utf8));
+}
+
+NSString* SysUTF16ToNSString(const string16& utf16) {
+  return (NSString*)base::mac::CFTypeRefToNSObjectAutorelease(
+      SysUTF16ToCFStringRef(utf16));
+}
+
+std::string SysCFStringRefToUTF8(CFStringRef ref) {
+  return CFStringToSTLStringWithEncodingT<std::string>(ref,
+                                                       kNarrowStringEncoding);
+}
+
+string16 SysCFStringRefToUTF16(CFStringRef ref) {
+  return CFStringToSTLStringWithEncodingT<string16>(ref,
+                                                    kMediumStringEncoding);
+}
+
+std::string SysNSStringToUTF8(NSString* nsstring) {
+  if (!nsstring)
+    return std::string();
+  return SysCFStringRefToUTF8(reinterpret_cast<CFStringRef>(nsstring));
+}
+
+string16 SysNSStringToUTF16(NSString* nsstring) {
+  if (!nsstring)
+    return string16();
+  return SysCFStringRefToUTF16(reinterpret_cast<CFStringRef>(nsstring));
+}
+
+}  // namespace base
diff --git a/base/strings/sys_string_conversions_mac_unittest.mm b/base/strings/sys_string_conversions_mac_unittest.mm
new file mode 100644
index 0000000..4750a9a
--- /dev/null
+++ b/base/strings/sys_string_conversions_mac_unittest.mm
@@ -0,0 +1,21 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#import <Foundation/Foundation.h>
+
+#include "base/strings/string16.h"
+#include "base/strings/sys_string_conversions.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+
+TEST(SysStrings, ConversionsFromNSString) {
+  EXPECT_STREQ("Hello, world!", SysNSStringToUTF8(@"Hello, world!").c_str());
+
+  // Conversions should be able to handle a NULL value without crashing.
+  EXPECT_STREQ("", SysNSStringToUTF8(nil).c_str());
+  EXPECT_EQ(string16(), SysNSStringToUTF16(nil));
+}
+
+}  // namespace base
diff --git a/base/strings/sys_string_conversions_posix.cc b/base/strings/sys_string_conversions_posix.cc
new file mode 100644
index 0000000..cfa7b76
--- /dev/null
+++ b/base/strings/sys_string_conversions_posix.cc
@@ -0,0 +1,163 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/strings/sys_string_conversions.h"
+
+#include <stddef.h>
+#include <wchar.h>
+
+#include "base/strings/string_piece.h"
+#include "base/strings/utf_string_conversions.h"
+#include "build/build_config.h"
+
+namespace base {
+
+std::string SysWideToUTF8(const std::wstring& wide) {
+  // In theory this should be using the system-provided conversion rather
+  // than our ICU, but this will do for now.
+  return WideToUTF8(wide);
+}
+std::wstring SysUTF8ToWide(StringPiece utf8) {
+  // In theory this should be using the system-provided conversion rather
+  // than our ICU, but this will do for now.
+  std::wstring out;
+  UTF8ToWide(utf8.data(), utf8.size(), &out);
+  return out;
+}
+
+#if defined(SYSTEM_NATIVE_UTF8) || defined(OS_ANDROID)
+// TODO(port): Consider reverting the OS_ANDROID when we have wcrtomb()
+// support and a better understanding of what calls these routines.
+
+std::string SysWideToNativeMB(const std::wstring& wide) {
+  return WideToUTF8(wide);
+}
+
+std::wstring SysNativeMBToWide(StringPiece native_mb) {
+  return SysUTF8ToWide(native_mb);
+}
+
+#else
+
+std::string SysWideToNativeMB(const std::wstring& wide) {
+  mbstate_t ps;
+
+  // Calculate the number of multi-byte characters.  We walk through the string
+  // without writing the output, counting the number of multi-byte characters.
+  size_t num_out_chars = 0;
+  memset(&ps, 0, sizeof(ps));
+  for (size_t i = 0; i < wide.size(); ++i) {
+    const wchar_t src = wide[i];
+    // Use a temp buffer since calling wcrtomb with an output of NULL does not
+    // calculate the output length.
+    char buf[16];
+    // Skip NULLs to avoid wcrtomb's special handling of them.
+    size_t res = src ? wcrtomb(buf, src, &ps) : 0;
+    switch (res) {
+      // Handle any errors and return an empty string.
+      case static_cast<size_t>(-1):
+        return std::string();
+        break;
+      case 0:
+        // We hit an embedded null byte, keep going.
+        ++num_out_chars;
+        break;
+      default:
+        num_out_chars += res;
+        break;
+    }
+  }
+
+  if (num_out_chars == 0)
+    return std::string();
+
+  std::string out;
+  out.resize(num_out_chars);
+
+  // We walk the input string again, with |i| tracking the index of the
+  // wide input, and |j| tracking the multi-byte output.
+  memset(&ps, 0, sizeof(ps));
+  for (size_t i = 0, j = 0; i < wide.size(); ++i) {
+    const wchar_t src = wide[i];
+    // We don't want wcrtomb to do its funkiness for embedded NULLs.
+    size_t res = src ? wcrtomb(&out[j], src, &ps) : 0;
+    switch (res) {
+      // Handle any errors and return an empty string.
+      case static_cast<size_t>(-1):
+        return std::string();
+        break;
+      case 0:
+        // We hit an embedded null byte, keep going.
+        ++j;  // Output is already zeroed.
+        break;
+      default:
+        j += res;
+        break;
+    }
+  }
+
+  return out;
+}
+
+std::wstring SysNativeMBToWide(StringPiece native_mb) {
+  mbstate_t ps;
+
+  // Calculate the number of wide characters.  We walk through the string
+  // without writing the output, counting the number of wide characters.
+  size_t num_out_chars = 0;
+  memset(&ps, 0, sizeof(ps));
+  for (size_t i = 0; i < native_mb.size(); ) {
+    const char* src = native_mb.data() + i;
+    size_t res = mbrtowc(nullptr, src, native_mb.size() - i, &ps);
+    switch (res) {
+      // Handle any errors and return an empty string.
+      case static_cast<size_t>(-2):
+      case static_cast<size_t>(-1):
+        return std::wstring();
+        break;
+      case 0:
+        // We hit an embedded null byte, keep going.
+        i += 1;
+        FALLTHROUGH;
+      default:
+        i += res;
+        ++num_out_chars;
+        break;
+    }
+  }
+
+  if (num_out_chars == 0)
+    return std::wstring();
+
+  std::wstring out;
+  out.resize(num_out_chars);
+
+  memset(&ps, 0, sizeof(ps));  // Clear the shift state.
+  // We walk the input string again, with |i| tracking the index of the
+  // multi-byte input, and |j| tracking the wide output.
+  for (size_t i = 0, j = 0; i < native_mb.size(); ++j) {
+    const char* src = native_mb.data() + i;
+    wchar_t* dst = &out[j];
+    size_t res = mbrtowc(dst, src, native_mb.size() - i, &ps);
+    switch (res) {
+      // Handle any errors and return an empty string.
+      case static_cast<size_t>(-2):
+      case static_cast<size_t>(-1):
+        return std::wstring();
+        break;
+      case 0:
+        i += 1;  // Skip null byte.
+        break;
+      default:
+        i += res;
+        break;
+    }
+  }
+
+  return out;
+}
+
+#endif  // defined(SYSTEM_NATIVE_UTF8) || defined(OS_ANDROID)
+
+}  // namespace base
diff --git a/base/strings/sys_string_conversions_unittest.cc b/base/strings/sys_string_conversions_unittest.cc
new file mode 100644
index 0000000..f5ffaec
--- /dev/null
+++ b/base/strings/sys_string_conversions_unittest.cc
@@ -0,0 +1,196 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <stddef.h>
+
+#include <string>
+
+#include "base/macros.h"
+#include "base/strings/string_piece.h"
+#include "base/strings/sys_string_conversions.h"
+#include "base/strings/utf_string_conversions.h"
+#include "base/test/scoped_locale.h"
+#include "build/build_config.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+#ifdef WCHAR_T_IS_UTF32
+static const std::wstring kSysWideOldItalicLetterA = L"\x10300";
+#else
+static const std::wstring kSysWideOldItalicLetterA = L"\xd800\xdf00";
+#endif
+
+namespace base {
+
+TEST(SysStrings, SysWideToUTF8) {
+  EXPECT_EQ("Hello, world", SysWideToUTF8(L"Hello, world"));
+  EXPECT_EQ("\xe4\xbd\xa0\xe5\xa5\xbd", SysWideToUTF8(L"\x4f60\x597d"));
+
+  // >16 bits
+  EXPECT_EQ("\xF0\x90\x8C\x80", SysWideToUTF8(kSysWideOldItalicLetterA));
+
+  // Error case. When Windows finds a UTF-16 character going off the end of
+  // a string, it just converts that literal value to UTF-8, even though this
+  // is invalid.
+  //
+  // This is what XP does, but Vista has different behavior, so we don't bother
+  // verifying it:
+  // EXPECT_EQ("\xE4\xBD\xA0\xED\xA0\x80zyxw",
+  //           SysWideToUTF8(L"\x4f60\xd800zyxw"));
+
+  // Test embedded NULLs.
+  std::wstring wide_null(L"a");
+  wide_null.push_back(0);
+  wide_null.push_back('b');
+
+  std::string expected_null("a");
+  expected_null.push_back(0);
+  expected_null.push_back('b');
+
+  EXPECT_EQ(expected_null, SysWideToUTF8(wide_null));
+}
+
+TEST(SysStrings, SysUTF8ToWide) {
+  EXPECT_EQ(L"Hello, world", SysUTF8ToWide("Hello, world"));
+  EXPECT_EQ(L"\x4f60\x597d", SysUTF8ToWide("\xe4\xbd\xa0\xe5\xa5\xbd"));
+  // >16 bits
+  EXPECT_EQ(kSysWideOldItalicLetterA, SysUTF8ToWide("\xF0\x90\x8C\x80"));
+
+  // Error case. When Windows finds an invalid UTF-8 character, it just skips
+  // it. This seems weird because it's inconsistent with the reverse conversion.
+  //
+  // This is what XP does, but Vista has different behavior, so we don't bother
+  // verifying it:
+  // EXPECT_EQ(L"\x4f60zyxw", SysUTF8ToWide("\xe4\xbd\xa0\xe5\xa5zyxw"));
+
+  // Test embedded NULLs.
+  std::string utf8_null("a");
+  utf8_null.push_back(0);
+  utf8_null.push_back('b');
+
+  std::wstring expected_null(L"a");
+  expected_null.push_back(0);
+  expected_null.push_back('b');
+
+  EXPECT_EQ(expected_null, SysUTF8ToWide(utf8_null));
+}
+
+#if defined(OS_LINUX)  // Tests depend on setting a specific Linux locale.
+
+TEST(SysStrings, SysWideToNativeMB) {
+#if !defined(SYSTEM_NATIVE_UTF8)
+  ScopedLocale locale("en_US.UTF-8");
+#endif
+  EXPECT_EQ("Hello, world", SysWideToNativeMB(L"Hello, world"));
+  EXPECT_EQ("\xe4\xbd\xa0\xe5\xa5\xbd", SysWideToNativeMB(L"\x4f60\x597d"));
+
+  // >16 bits
+  EXPECT_EQ("\xF0\x90\x8C\x80", SysWideToNativeMB(kSysWideOldItalicLetterA));
+
+  // Error case. When Windows finds a UTF-16 character going off the end of
+  // a string, it just converts that literal value to UTF-8, even though this
+  // is invalid.
+  //
+  // This is what XP does, but Vista has different behavior, so we don't bother
+  // verifying it:
+  // EXPECT_EQ("\xE4\xBD\xA0\xED\xA0\x80zyxw",
+  //           SysWideToNativeMB(L"\x4f60\xd800zyxw"));
+
+  // Test embedded NULLs.
+  std::wstring wide_null(L"a");
+  wide_null.push_back(0);
+  wide_null.push_back('b');
+
+  std::string expected_null("a");
+  expected_null.push_back(0);
+  expected_null.push_back('b');
+
+  EXPECT_EQ(expected_null, SysWideToNativeMB(wide_null));
+}
+
+// We assume the test is running in a UTF8 locale.
+TEST(SysStrings, SysNativeMBToWide) {
+#if !defined(SYSTEM_NATIVE_UTF8)
+  ScopedLocale locale("en_US.UTF-8");
+#endif
+  EXPECT_EQ(L"Hello, world", SysNativeMBToWide("Hello, world"));
+  EXPECT_EQ(L"\x4f60\x597d", SysNativeMBToWide("\xe4\xbd\xa0\xe5\xa5\xbd"));
+  // >16 bits
+  EXPECT_EQ(kSysWideOldItalicLetterA, SysNativeMBToWide("\xF0\x90\x8C\x80"));
+
+  // Error case. When Windows finds an invalid UTF-8 character, it just skips
+  // it. This seems weird because it's inconsistent with the reverse conversion.
+  //
+  // This is what XP does, but Vista has different behavior, so we don't bother
+  // verifying it:
+  // EXPECT_EQ(L"\x4f60zyxw", SysNativeMBToWide("\xe4\xbd\xa0\xe5\xa5zyxw"));
+
+  // Test embedded NULLs.
+  std::string utf8_null("a");
+  utf8_null.push_back(0);
+  utf8_null.push_back('b');
+
+  std::wstring expected_null(L"a");
+  expected_null.push_back(0);
+  expected_null.push_back('b');
+
+  EXPECT_EQ(expected_null, SysNativeMBToWide(utf8_null));
+}
+
+static const wchar_t* const kConvertRoundtripCases[] = {
+  L"Google Video",
+  // "网页 图片 资讯更多 »"
+  L"\x7f51\x9875\x0020\x56fe\x7247\x0020\x8d44\x8baf\x66f4\x591a\x0020\x00bb",
+  //  "Παγκόσμιος Ιστός"
+  L"\x03a0\x03b1\x03b3\x03ba\x03cc\x03c3\x03bc\x03b9"
+  L"\x03bf\x03c2\x0020\x0399\x03c3\x03c4\x03cc\x03c2",
+  // "Поиск страниц на русском"
+  L"\x041f\x043e\x0438\x0441\x043a\x0020\x0441\x0442"
+  L"\x0440\x0430\x043d\x0438\x0446\x0020\x043d\x0430"
+  L"\x0020\x0440\x0443\x0441\x0441\x043a\x043e\x043c",
+  // "전체서비스"
+  L"\xc804\xccb4\xc11c\xbe44\xc2a4",
+
+  // Test characters that take more than 16 bits. This will depend on whether
+  // wchar_t is 16 or 32 bits.
+#if defined(WCHAR_T_IS_UTF16)
+  L"\xd800\xdf00",
+  // ?????  (Mathematical Alphanumeric Symbols (U+011d40 - U+011d44 : A,B,C,D,E)
+  L"\xd807\xdd40\xd807\xdd41\xd807\xdd42\xd807\xdd43\xd807\xdd44",
+#elif defined(WCHAR_T_IS_UTF32)
+  L"\x10300",
+  // ?????  (Mathematical Alphanumeric Symbols (U+011d40 - U+011d44 : A,B,C,D,E)
+  L"\x11d40\x11d41\x11d42\x11d43\x11d44",
+#endif
+};
+
+
+TEST(SysStrings, SysNativeMBAndWide) {
+#if !defined(SYSTEM_NATIVE_UTF8)
+  ScopedLocale locale("en_US.UTF-8");
+#endif
+  for (size_t i = 0; i < arraysize(kConvertRoundtripCases); ++i) {
+    std::wstring wide = kConvertRoundtripCases[i];
+    std::wstring trip = SysNativeMBToWide(SysWideToNativeMB(wide));
+    EXPECT_EQ(wide.size(), trip.size());
+    EXPECT_EQ(wide, trip);
+  }
+
+  // We assume our test is running in UTF-8, so double check through ICU.
+  for (size_t i = 0; i < arraysize(kConvertRoundtripCases); ++i) {
+    std::wstring wide = kConvertRoundtripCases[i];
+    std::wstring trip = SysNativeMBToWide(WideToUTF8(wide));
+    EXPECT_EQ(wide.size(), trip.size());
+    EXPECT_EQ(wide, trip);
+  }
+
+  for (size_t i = 0; i < arraysize(kConvertRoundtripCases); ++i) {
+    std::wstring wide = kConvertRoundtripCases[i];
+    std::wstring trip = UTF8ToWide(SysWideToNativeMB(wide));
+    EXPECT_EQ(wide.size(), trip.size());
+    EXPECT_EQ(wide, trip);
+  }
+}
+#endif  // OS_LINUX
+
+}  // namespace base
diff --git a/base/strings/sys_string_conversions_win.cc b/base/strings/sys_string_conversions_win.cc
new file mode 100644
index 0000000..356064f
--- /dev/null
+++ b/base/strings/sys_string_conversions_win.cc
@@ -0,0 +1,71 @@
+// Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/strings/sys_string_conversions.h"
+
+#include <windows.h>
+#include <stdint.h>
+
+#include "base/strings/string_piece.h"
+
+namespace base {
+
+// Do not assert in this function since it is used by the asssertion code!
+std::string SysWideToUTF8(const std::wstring& wide) {
+  return SysWideToMultiByte(wide, CP_UTF8);
+}
+
+// Do not assert in this function since it is used by the asssertion code!
+std::wstring SysUTF8ToWide(StringPiece utf8) {
+  return SysMultiByteToWide(utf8, CP_UTF8);
+}
+
+std::string SysWideToNativeMB(const std::wstring& wide) {
+  return SysWideToMultiByte(wide, CP_ACP);
+}
+
+std::wstring SysNativeMBToWide(StringPiece native_mb) {
+  return SysMultiByteToWide(native_mb, CP_ACP);
+}
+
+// Do not assert in this function since it is used by the asssertion code!
+std::wstring SysMultiByteToWide(StringPiece mb, uint32_t code_page) {
+  if (mb.empty())
+    return std::wstring();
+
+  int mb_length = static_cast<int>(mb.length());
+  // Compute the length of the buffer.
+  int charcount = MultiByteToWideChar(code_page, 0,
+                                      mb.data(), mb_length, NULL, 0);
+  if (charcount == 0)
+    return std::wstring();
+
+  std::wstring wide;
+  wide.resize(charcount);
+  MultiByteToWideChar(code_page, 0, mb.data(), mb_length, &wide[0], charcount);
+
+  return wide;
+}
+
+// Do not assert in this function since it is used by the asssertion code!
+std::string SysWideToMultiByte(const std::wstring& wide, uint32_t code_page) {
+  int wide_length = static_cast<int>(wide.length());
+  if (wide_length == 0)
+    return std::string();
+
+  // Compute the length of the buffer we'll need.
+  int charcount = WideCharToMultiByte(code_page, 0, wide.data(), wide_length,
+                                      NULL, 0, NULL, NULL);
+  if (charcount == 0)
+    return std::string();
+
+  std::string mb;
+  mb.resize(charcount);
+  WideCharToMultiByte(code_page, 0, wide.data(), wide_length,
+                      &mb[0], charcount, NULL, NULL);
+
+  return mb;
+}
+
+}  // namespace base
diff --git a/base/strings/utf_offset_string_conversions.cc b/base/strings/utf_offset_string_conversions.cc
new file mode 100644
index 0000000..b91ee03
--- /dev/null
+++ b/base/strings/utf_offset_string_conversions.cc
@@ -0,0 +1,268 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/strings/utf_offset_string_conversions.h"
+
+#include <stdint.h>
+
+#include <algorithm>
+#include <memory>
+
+#include "base/logging.h"
+#include "base/strings/string_piece.h"
+#include "base/strings/utf_string_conversion_utils.h"
+
+namespace base {
+
+OffsetAdjuster::Adjustment::Adjustment(size_t original_offset,
+                                       size_t original_length,
+                                       size_t output_length)
+    : original_offset(original_offset),
+      original_length(original_length),
+      output_length(output_length) {
+}
+
+// static
+void OffsetAdjuster::AdjustOffsets(const Adjustments& adjustments,
+                                   std::vector<size_t>* offsets_for_adjustment,
+                                   size_t limit) {
+  DCHECK(offsets_for_adjustment);
+  for (std::vector<size_t>::iterator i(offsets_for_adjustment->begin());
+       i != offsets_for_adjustment->end(); ++i)
+    AdjustOffset(adjustments, &(*i), limit);
+}
+
+// static
+void OffsetAdjuster::AdjustOffset(const Adjustments& adjustments,
+                                  size_t* offset,
+                                  size_t limit) {
+  DCHECK(offset);
+  if (*offset == string16::npos)
+    return;
+  int adjustment = 0;
+  for (Adjustments::const_iterator i = adjustments.begin();
+       i != adjustments.end(); ++i) {
+    if (*offset <= i->original_offset)
+      break;
+    if (*offset < (i->original_offset + i->original_length)) {
+      *offset = string16::npos;
+      return;
+    }
+    adjustment += static_cast<int>(i->original_length - i->output_length);
+  }
+  *offset -= adjustment;
+
+  if (*offset > limit)
+    *offset = string16::npos;
+}
+
+// static
+void OffsetAdjuster::UnadjustOffsets(
+    const Adjustments& adjustments,
+    std::vector<size_t>* offsets_for_unadjustment) {
+  if (!offsets_for_unadjustment || adjustments.empty())
+    return;
+  for (std::vector<size_t>::iterator i(offsets_for_unadjustment->begin());
+       i != offsets_for_unadjustment->end(); ++i)
+    UnadjustOffset(adjustments, &(*i));
+}
+
+// static
+void OffsetAdjuster::UnadjustOffset(const Adjustments& adjustments,
+                                    size_t* offset) {
+  if (*offset == string16::npos)
+    return;
+  int adjustment = 0;
+  for (Adjustments::const_iterator i = adjustments.begin();
+       i != adjustments.end(); ++i) {
+    if (*offset + adjustment <= i->original_offset)
+      break;
+    adjustment += static_cast<int>(i->original_length - i->output_length);
+    if ((*offset + adjustment) <
+        (i->original_offset + i->original_length)) {
+      *offset = string16::npos;
+      return;
+    }
+  }
+  *offset += adjustment;
+}
+
+// static
+void OffsetAdjuster::MergeSequentialAdjustments(
+    const Adjustments& first_adjustments,
+    Adjustments* adjustments_on_adjusted_string) {
+  Adjustments::iterator adjusted_iter = adjustments_on_adjusted_string->begin();
+  Adjustments::const_iterator first_iter = first_adjustments.begin();
+  // Simultaneously iterate over all |adjustments_on_adjusted_string| and
+  // |first_adjustments|, adding adjustments to or correcting the adjustments
+  // in |adjustments_on_adjusted_string| as we go.  |shift| keeps track of the
+  // current number of characters collapsed by |first_adjustments| up to this
+  // point.  |currently_collapsing| keeps track of the number of characters
+  // collapsed by |first_adjustments| into the current |adjusted_iter|'s
+  // length.  These are characters that will change |shift| as soon as we're
+  // done processing the current |adjusted_iter|; they are not yet reflected in
+  // |shift|.
+  size_t shift = 0;
+  size_t currently_collapsing = 0;
+  while (adjusted_iter != adjustments_on_adjusted_string->end()) {
+    if ((first_iter == first_adjustments.end()) ||
+        ((adjusted_iter->original_offset + shift +
+          adjusted_iter->original_length) <= first_iter->original_offset)) {
+      // Entire |adjusted_iter| (accounting for its shift and including its
+      // whole original length) comes before |first_iter|.
+      //
+      // Correct the offset at |adjusted_iter| and move onto the next
+      // adjustment that needs revising.
+      adjusted_iter->original_offset += shift;
+      shift += currently_collapsing;
+      currently_collapsing = 0;
+      ++adjusted_iter;
+    } else if ((adjusted_iter->original_offset + shift) >
+               first_iter->original_offset) {
+      // |first_iter| comes before the |adjusted_iter| (as adjusted by |shift|).
+
+      // It's not possible for the adjustments to overlap.  (It shouldn't
+      // be possible that we have an |adjusted_iter->original_offset| that,
+      // when adjusted by the computed |shift|, is in the middle of
+      // |first_iter|'s output's length.  After all, that would mean the
+      // current adjustment_on_adjusted_string somehow points to an offset
+      // that was supposed to have been eliminated by the first set of
+      // adjustments.)
+      DCHECK_LE(first_iter->original_offset + first_iter->output_length,
+                adjusted_iter->original_offset + shift);
+
+      // Add the |first_adjustment_iter| to the full set of adjustments while
+      // making sure |adjusted_iter| continues pointing to the same element.
+      // We do this by inserting the |first_adjustment_iter| right before
+      // |adjusted_iter|, then incrementing |adjusted_iter| so it points to
+      // the following element.
+      shift += first_iter->original_length - first_iter->output_length;
+      adjusted_iter = adjustments_on_adjusted_string->insert(
+          adjusted_iter, *first_iter);
+      ++adjusted_iter;
+      ++first_iter;
+    } else {
+      // The first adjustment adjusted something that then got further adjusted
+      // by the second set of adjustments.  In other words, |first_iter| points
+      // to something in the range covered by |adjusted_iter|'s length (after
+      // accounting for |shift|).  Precisely,
+      //   adjusted_iter->original_offset + shift
+      //   <=
+      //   first_iter->original_offset
+      //   <=
+      //   adjusted_iter->original_offset + shift +
+      //       adjusted_iter->original_length
+
+      // Modify the current |adjusted_iter| to include whatever collapsing
+      // happened in |first_iter|, then advance to the next |first_adjustments|
+      // because we dealt with the current one.
+      const int collapse = static_cast<int>(first_iter->original_length) -
+          static_cast<int>(first_iter->output_length);
+      // This function does not know how to deal with a string that expands and
+      // then gets modified, only strings that collapse and then get modified.
+      DCHECK_GT(collapse, 0);
+      adjusted_iter->original_length += collapse;
+      currently_collapsing += collapse;
+      ++first_iter;
+    }
+  }
+  DCHECK_EQ(0u, currently_collapsing);
+  if (first_iter != first_adjustments.end()) {
+    // Only first adjustments are left.  These do not need to be modified.
+    // (Their offsets are already correct with respect to the original string.)
+    // Append them all.
+    DCHECK(adjusted_iter == adjustments_on_adjusted_string->end());
+    adjustments_on_adjusted_string->insert(
+        adjustments_on_adjusted_string->end(), first_iter,
+        first_adjustments.end());
+  }
+}
+
+// Converts the given source Unicode character type to the given destination
+// Unicode character type as a STL string. The given input buffer and size
+// determine the source, and the given output STL string will be replaced by
+// the result.  If non-NULL, |adjustments| is set to reflect the all the
+// alterations to the string that are not one-character-to-one-character.
+// It will always be sorted by increasing offset.
+template<typename SrcChar, typename DestStdString>
+bool ConvertUnicode(const SrcChar* src,
+                    size_t src_len,
+                    DestStdString* output,
+                    OffsetAdjuster::Adjustments* adjustments) {
+  if (adjustments)
+    adjustments->clear();
+  // ICU requires 32-bit numbers.
+  bool success = true;
+  int32_t src_len32 = static_cast<int32_t>(src_len);
+  for (int32_t i = 0; i < src_len32; i++) {
+    uint32_t code_point;
+    size_t original_i = i;
+    size_t chars_written = 0;
+    if (ReadUnicodeCharacter(src, src_len32, &i, &code_point)) {
+      chars_written = WriteUnicodeCharacter(code_point, output);
+    } else {
+      chars_written = WriteUnicodeCharacter(0xFFFD, output);
+      success = false;
+    }
+
+    // Only bother writing an adjustment if this modification changed the
+    // length of this character.
+    // NOTE: ReadUnicodeCharacter() adjusts |i| to point _at_ the last
+    // character read, not after it (so that incrementing it in the loop
+    // increment will place it at the right location), so we need to account
+    // for that in determining the amount that was read.
+    if (adjustments && ((i - original_i + 1) != chars_written)) {
+      adjustments->push_back(OffsetAdjuster::Adjustment(
+          original_i, i - original_i + 1, chars_written));
+    }
+  }
+  return success;
+}
+
+bool UTF8ToUTF16WithAdjustments(
+    const char* src,
+    size_t src_len,
+    string16* output,
+    base::OffsetAdjuster::Adjustments* adjustments) {
+  PrepareForUTF16Or32Output(src, src_len, output);
+  return ConvertUnicode(src, src_len, output, adjustments);
+}
+
+string16 UTF8ToUTF16WithAdjustments(
+    const base::StringPiece& utf8,
+    base::OffsetAdjuster::Adjustments* adjustments) {
+  string16 result;
+  UTF8ToUTF16WithAdjustments(utf8.data(), utf8.length(), &result, adjustments);
+  return result;
+}
+
+string16 UTF8ToUTF16AndAdjustOffsets(
+    const base::StringPiece& utf8,
+    std::vector<size_t>* offsets_for_adjustment) {
+  for (size_t& offset : *offsets_for_adjustment) {
+    if (offset > utf8.length())
+      offset = string16::npos;
+  }
+  OffsetAdjuster::Adjustments adjustments;
+  string16 result = UTF8ToUTF16WithAdjustments(utf8, &adjustments);
+  OffsetAdjuster::AdjustOffsets(adjustments, offsets_for_adjustment);
+  return result;
+}
+
+std::string UTF16ToUTF8AndAdjustOffsets(
+    const base::StringPiece16& utf16,
+    std::vector<size_t>* offsets_for_adjustment) {
+  for (size_t& offset : *offsets_for_adjustment) {
+    if (offset > utf16.length())
+      offset = string16::npos;
+  }
+  std::string result;
+  PrepareForUTF8Output(utf16.data(), utf16.length(), &result);
+  OffsetAdjuster::Adjustments adjustments;
+  ConvertUnicode(utf16.data(), utf16.length(), &result, &adjustments);
+  OffsetAdjuster::AdjustOffsets(adjustments, offsets_for_adjustment);
+  return result;
+}
+
+}  // namespace base
diff --git a/base/strings/utf_offset_string_conversions.h b/base/strings/utf_offset_string_conversions.h
new file mode 100644
index 0000000..f741955
--- /dev/null
+++ b/base/strings/utf_offset_string_conversions.h
@@ -0,0 +1,114 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_STRINGS_UTF_OFFSET_STRING_CONVERSIONS_H_
+#define BASE_STRINGS_UTF_OFFSET_STRING_CONVERSIONS_H_
+
+#include <stddef.h>
+
+#include <string>
+#include <vector>
+
+#include "base/base_export.h"
+#include "base/strings/string16.h"
+#include "base/strings/string_piece.h"
+
+namespace base {
+
+// A helper class and associated data structures to adjust offsets into a
+// string in response to various adjustments one might do to that string
+// (e.g., eliminating a range).  For details on offsets, see the comments by
+// the AdjustOffsets() function below.
+class BASE_EXPORT OffsetAdjuster {
+ public:
+  struct BASE_EXPORT Adjustment {
+    Adjustment(size_t original_offset,
+               size_t original_length,
+               size_t output_length);
+
+    size_t original_offset;
+    size_t original_length;
+    size_t output_length;
+  };
+  typedef std::vector<Adjustment> Adjustments;
+
+  // Adjusts all offsets in |offsets_for_adjustment| to reflect the adjustments
+  // recorded in |adjustments|.  Adjusted offsets greater than |limit| will be
+  // set to string16::npos.
+  //
+  // Offsets represents insertion/selection points between characters: if |src|
+  // is "abcd", then 0 is before 'a', 2 is between 'b' and 'c', and 4 is at the
+  // end of the string.  Valid input offsets range from 0 to |src_len|.  On
+  // exit, each offset will have been modified to point at the same logical
+  // position in the output string.  If an offset cannot be successfully
+  // adjusted (e.g., because it points into the middle of a multibyte sequence),
+  // it will be set to string16::npos.
+  static void AdjustOffsets(const Adjustments& adjustments,
+                            std::vector<size_t>* offsets_for_adjustment,
+                            size_t limit = string16::npos);
+
+  // Adjusts the single |offset| to reflect the adjustments recorded in
+  // |adjustments|.
+  static void AdjustOffset(const Adjustments& adjustments,
+                           size_t* offset,
+                           size_t limit = string16::npos);
+
+  // Adjusts all offsets in |offsets_for_unadjustment| to reflect the reverse
+  // of the adjustments recorded in |adjustments|.  In other words, the offsets
+  // provided represent offsets into an adjusted string and the caller wants
+  // to know the offsets they correspond to in the original string.  If an
+  // offset cannot be successfully unadjusted (e.g., because it points into
+  // the middle of a multibyte sequence), it will be set to string16::npos.
+  static void UnadjustOffsets(const Adjustments& adjustments,
+                              std::vector<size_t>* offsets_for_unadjustment);
+
+  // Adjusts the single |offset| to reflect the reverse of the adjustments
+  // recorded in |adjustments|.
+  static void UnadjustOffset(const Adjustments& adjustments,
+                             size_t* offset);
+
+  // Combines two sequential sets of adjustments, storing the combined revised
+  // adjustments in |adjustments_on_adjusted_string|.  That is, suppose a
+  // string was altered in some way, with the alterations recorded as
+  // adjustments in |first_adjustments|.  Then suppose the resulting string is
+  // further altered, with the alterations recorded as adjustments scored in
+  // |adjustments_on_adjusted_string|, with the offsets recorded in these
+  // adjustments being with respect to the intermediate string.  This function
+  // combines the two sets of adjustments into one, storing the result in
+  // |adjustments_on_adjusted_string|, whose offsets are correct with respect
+  // to the original string.
+  //
+  // Assumes both parameters are sorted by increasing offset.
+  //
+  // WARNING: Only supports |first_adjustments| that involve collapsing ranges
+  // of text, not expanding ranges.
+  static void MergeSequentialAdjustments(
+      const Adjustments& first_adjustments,
+      Adjustments* adjustments_on_adjusted_string);
+};
+
+// Like the conversions in utf_string_conversions.h, but also fills in an
+// |adjustments| parameter that reflects the alterations done to the string.
+// It may be NULL.
+BASE_EXPORT bool UTF8ToUTF16WithAdjustments(
+    const char* src,
+    size_t src_len,
+    string16* output,
+    base::OffsetAdjuster::Adjustments* adjustments);
+BASE_EXPORT string16 UTF8ToUTF16WithAdjustments(
+    const base::StringPiece& utf8,
+    base::OffsetAdjuster::Adjustments* adjustments);
+// As above, but instead internally examines the adjustments and applies them
+// to |offsets_for_adjustment|.  Input offsets greater than the length of the
+// input string will be set to string16::npos.  See comments by AdjustOffsets().
+BASE_EXPORT string16 UTF8ToUTF16AndAdjustOffsets(
+    const base::StringPiece& utf8,
+    std::vector<size_t>* offsets_for_adjustment);
+BASE_EXPORT std::string UTF16ToUTF8AndAdjustOffsets(
+    const base::StringPiece16& utf16,
+    std::vector<size_t>* offsets_for_adjustment);
+
+}  // namespace base
+
+#endif  // BASE_STRINGS_UTF_OFFSET_STRING_CONVERSIONS_H_
diff --git a/base/strings/utf_offset_string_conversions_unittest.cc b/base/strings/utf_offset_string_conversions_unittest.cc
new file mode 100644
index 0000000..c5ce647
--- /dev/null
+++ b/base/strings/utf_offset_string_conversions_unittest.cc
@@ -0,0 +1,300 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <stddef.h>
+
+#include <algorithm>
+
+#include "base/logging.h"
+#include "base/macros.h"
+#include "base/strings/string_piece.h"
+#include "base/strings/utf_offset_string_conversions.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+
+namespace {
+
+static const size_t kNpos = string16::npos;
+
+}  // namespace
+
+TEST(UTFOffsetStringConversionsTest, AdjustOffset) {
+  struct UTF8ToUTF16Case {
+    const char* utf8;
+    size_t input_offset;
+    size_t output_offset;
+  } utf8_to_utf16_cases[] = {
+    {"", 0, 0},
+    {"", kNpos, kNpos},
+    {"\xe4\xbd\xa0\xe5\xa5\xbd", 1, kNpos},
+    {"\xe4\xbd\xa0\xe5\xa5\xbd", 3, 1},
+    {"\xed\xb0\x80z", 3, 3},
+    {"A\xF0\x90\x8C\x80z", 1, 1},
+    {"A\xF0\x90\x8C\x80z", 2, kNpos},
+    {"A\xF0\x90\x8C\x80z", 5, 3},
+    {"A\xF0\x90\x8C\x80z", 6, 4},
+    {"A\xF0\x90\x8C\x80z", kNpos, kNpos},
+  };
+  for (size_t i = 0; i < arraysize(utf8_to_utf16_cases); ++i) {
+    const size_t offset = utf8_to_utf16_cases[i].input_offset;
+    std::vector<size_t> offsets;
+    offsets.push_back(offset);
+    UTF8ToUTF16AndAdjustOffsets(utf8_to_utf16_cases[i].utf8, &offsets);
+    EXPECT_EQ(utf8_to_utf16_cases[i].output_offset, offsets[0]);
+  }
+
+  struct UTF16ToUTF8Case {
+    char16 utf16[10];
+    size_t input_offset;
+    size_t output_offset;
+  } utf16_to_utf8_cases[] = {
+      {{}, 0, 0},
+      // Converted to 3-byte utf-8 sequences
+      {{0x5909, 0x63DB}, 3, kNpos},
+      {{0x5909, 0x63DB}, 2, 6},
+      {{0x5909, 0x63DB}, 1, 3},
+      {{0x5909, 0x63DB}, 0, 0},
+      // Converted to 2-byte utf-8 sequences
+      {{'A', 0x00bc, 0x00be, 'z'}, 1, 1},
+      {{'A', 0x00bc, 0x00be, 'z'}, 2, 3},
+      {{'A', 0x00bc, 0x00be, 'z'}, 3, 5},
+      {{'A', 0x00bc, 0x00be, 'z'}, 4, 6},
+      // Surrogate pair
+      {{'A', 0xd800, 0xdf00, 'z'}, 1, 1},
+      {{'A', 0xd800, 0xdf00, 'z'}, 2, kNpos},
+      {{'A', 0xd800, 0xdf00, 'z'}, 3, 5},
+      {{'A', 0xd800, 0xdf00, 'z'}, 4, 6},
+  };
+  for (size_t i = 0; i < arraysize(utf16_to_utf8_cases); ++i) {
+    size_t offset = utf16_to_utf8_cases[i].input_offset;
+    std::vector<size_t> offsets;
+    offsets.push_back(offset);
+    UTF16ToUTF8AndAdjustOffsets(utf16_to_utf8_cases[i].utf16, &offsets);
+    EXPECT_EQ(utf16_to_utf8_cases[i].output_offset, offsets[0]) << i;
+  }
+}
+
+TEST(UTFOffsetStringConversionsTest, LimitOffsets) {
+  const OffsetAdjuster::Adjustments kNoAdjustments;
+  const size_t kLimit = 10;
+  const size_t kItems = 20;
+  std::vector<size_t> size_ts;
+  for (size_t t = 0; t < kItems; ++t) {
+    size_ts.push_back(t);
+    OffsetAdjuster::AdjustOffset(kNoAdjustments, &size_ts.back(), kLimit);
+  }
+  size_t unlimited_count = 0;
+  for (std::vector<size_t>::iterator ti = size_ts.begin(); ti != size_ts.end();
+       ++ti) {
+    if (*ti != kNpos)
+      ++unlimited_count;
+  }
+  EXPECT_EQ(11U, unlimited_count);
+
+  // Reverse the values in the vector and try again.
+  size_ts.clear();
+  for (size_t t = kItems; t > 0; --t) {
+    size_ts.push_back(t - 1);
+    OffsetAdjuster::AdjustOffset(kNoAdjustments, &size_ts.back(), kLimit);
+  }
+  unlimited_count = 0;
+  for (std::vector<size_t>::iterator ti = size_ts.begin(); ti != size_ts.end();
+       ++ti) {
+    if (*ti != kNpos)
+      ++unlimited_count;
+  }
+  EXPECT_EQ(11U, unlimited_count);
+}
+
+TEST(UTFOffsetStringConversionsTest, AdjustOffsets) {
+  // Imagine we have strings as shown in the following cases where the
+  // X's represent encoded characters.
+  // 1: abcXXXdef ==> abcXdef
+  {
+    std::vector<size_t> offsets;
+    for (size_t t = 0; t <= 9; ++t)
+      offsets.push_back(t);
+    OffsetAdjuster::Adjustments adjustments;
+    adjustments.push_back(OffsetAdjuster::Adjustment(3, 3, 1));
+    OffsetAdjuster::AdjustOffsets(adjustments, &offsets);
+    size_t expected_1[] = {0, 1, 2, 3, kNpos, kNpos, 4, 5, 6, 7};
+    EXPECT_EQ(offsets.size(), arraysize(expected_1));
+    for (size_t i = 0; i < arraysize(expected_1); ++i)
+      EXPECT_EQ(expected_1[i], offsets[i]);
+  }
+
+  // 2: XXXaXXXXbcXXXXXXXdefXXX ==> XaXXbcXXXXdefX
+  {
+    std::vector<size_t> offsets;
+    for (size_t t = 0; t <= 23; ++t)
+      offsets.push_back(t);
+    OffsetAdjuster::Adjustments adjustments;
+    adjustments.push_back(OffsetAdjuster::Adjustment(0, 3, 1));
+    adjustments.push_back(OffsetAdjuster::Adjustment(4, 4, 2));
+    adjustments.push_back(OffsetAdjuster::Adjustment(10, 7, 4));
+    adjustments.push_back(OffsetAdjuster::Adjustment(20, 3, 1));
+    OffsetAdjuster::AdjustOffsets(adjustments, &offsets);
+    size_t expected_2[] = {
+      0, kNpos, kNpos, 1, 2, kNpos, kNpos, kNpos, 4, 5, 6, kNpos, kNpos, kNpos,
+      kNpos, kNpos, kNpos, 10, 11, 12, 13, kNpos, kNpos, 14
+    };
+    EXPECT_EQ(offsets.size(), arraysize(expected_2));
+    for (size_t i = 0; i < arraysize(expected_2); ++i)
+      EXPECT_EQ(expected_2[i], offsets[i]);
+  }
+
+  // 3: XXXaXXXXbcdXXXeXX ==> aXXXXbcdXXXe
+  {
+    std::vector<size_t> offsets;
+    for (size_t t = 0; t <= 17; ++t)
+      offsets.push_back(t);
+    OffsetAdjuster::Adjustments adjustments;
+    adjustments.push_back(OffsetAdjuster::Adjustment(0, 3, 0));
+    adjustments.push_back(OffsetAdjuster::Adjustment(4, 4, 4));
+    adjustments.push_back(OffsetAdjuster::Adjustment(11, 3, 3));
+    adjustments.push_back(OffsetAdjuster::Adjustment(15, 2, 0));
+    OffsetAdjuster::AdjustOffsets(adjustments, &offsets);
+    size_t expected_3[] = {
+      0, kNpos, kNpos, 0, 1, kNpos, kNpos, kNpos, 5, 6, 7, 8, kNpos, kNpos, 11,
+      12, kNpos, 12
+    };
+    EXPECT_EQ(offsets.size(), arraysize(expected_3));
+    for (size_t i = 0; i < arraysize(expected_3); ++i)
+      EXPECT_EQ(expected_3[i], offsets[i]);
+  }
+}
+
+TEST(UTFOffsetStringConversionsTest, UnadjustOffsets) {
+  // Imagine we have strings as shown in the following cases where the
+  // X's represent encoded characters.
+  // 1: abcXXXdef ==> abcXdef
+  {
+    std::vector<size_t> offsets;
+    for (size_t t = 0; t <= 7; ++t)
+      offsets.push_back(t);
+    OffsetAdjuster::Adjustments adjustments;
+    adjustments.push_back(OffsetAdjuster::Adjustment(3, 3, 1));
+    OffsetAdjuster::UnadjustOffsets(adjustments, &offsets);
+    size_t expected_1[] = {0, 1, 2, 3, 6, 7, 8, 9};
+    EXPECT_EQ(offsets.size(), arraysize(expected_1));
+    for (size_t i = 0; i < arraysize(expected_1); ++i)
+      EXPECT_EQ(expected_1[i], offsets[i]);
+  }
+
+  // 2: XXXaXXXXbcXXXXXXXdefXXX ==> XaXXbcXXXXdefX
+  {
+    std::vector<size_t> offsets;
+    for (size_t t = 0; t <= 14; ++t)
+      offsets.push_back(t);
+    OffsetAdjuster::Adjustments adjustments;
+    adjustments.push_back(OffsetAdjuster::Adjustment(0, 3, 1));
+    adjustments.push_back(OffsetAdjuster::Adjustment(4, 4, 2));
+    adjustments.push_back(OffsetAdjuster::Adjustment(10, 7, 4));
+    adjustments.push_back(OffsetAdjuster::Adjustment(20, 3, 1));
+    OffsetAdjuster::UnadjustOffsets(adjustments, &offsets);
+    size_t expected_2[] = {
+      0, 3, 4, kNpos, 8, 9, 10, kNpos, kNpos, kNpos, 17, 18, 19, 20, 23
+    };
+    EXPECT_EQ(offsets.size(), arraysize(expected_2));
+    for (size_t i = 0; i < arraysize(expected_2); ++i)
+      EXPECT_EQ(expected_2[i], offsets[i]);
+  }
+
+  // 3: XXXaXXXXbcdXXXeXX ==> aXXXXbcdXXXe
+  {
+    std::vector<size_t> offsets;
+    for (size_t t = 0; t <= 12; ++t)
+      offsets.push_back(t);
+    OffsetAdjuster::Adjustments adjustments;
+    adjustments.push_back(OffsetAdjuster::Adjustment(0, 3, 0));
+    adjustments.push_back(OffsetAdjuster::Adjustment(4, 4, 4));
+    adjustments.push_back(OffsetAdjuster::Adjustment(11, 3, 3));
+    adjustments.push_back(OffsetAdjuster::Adjustment(15, 2, 0));
+    OffsetAdjuster::UnadjustOffsets(adjustments, &offsets);
+    size_t expected_3[] = {
+      0,  // this could just as easily be 3
+      4, kNpos, kNpos, kNpos, 8, 9, 10, 11, kNpos, kNpos, 14,
+      15  // this could just as easily be 17
+    };
+    EXPECT_EQ(offsets.size(), arraysize(expected_3));
+    for (size_t i = 0; i < arraysize(expected_3); ++i)
+      EXPECT_EQ(expected_3[i], offsets[i]);
+  }
+}
+
+// MergeSequentialAdjustments is used by net/base/escape.{h,cc} and
+// net/base/net_util.{h,cc}.  The two tests EscapeTest.AdjustOffset and
+// NetUtilTest.FormatUrlWithOffsets test its behavior extensively.  This
+// is simply a short, additional test.
+TEST(UTFOffsetStringConversionsTest, MergeSequentialAdjustments) {
+  // Pretend the input string is "abcdefghijklmnopqrstuvwxyz".
+
+  // Set up |first_adjustments| to
+  // - remove the leading "a"
+  // - combine the "bc" into one character (call it ".")
+  // - remove the "f"
+  // - remove the "tuv"
+  // The resulting string should be ".deghijklmnopqrswxyz".
+  OffsetAdjuster::Adjustments first_adjustments;
+  first_adjustments.push_back(OffsetAdjuster::Adjustment(0, 1, 0));
+  first_adjustments.push_back(OffsetAdjuster::Adjustment(1, 2, 1));
+  first_adjustments.push_back(OffsetAdjuster::Adjustment(5, 1, 0));
+  first_adjustments.push_back(OffsetAdjuster::Adjustment(19, 3, 0));
+
+  // Set up |adjustments_on_adjusted_string| to
+  // - combine the "." character that replaced "bc" with "d" into one character
+  //   (call it "?")
+  // - remove the "egh"
+  // - expand the "i" into two characters (call them "12")
+  // - combine the "jkl" into one character (call it "@")
+  // - expand the "z" into two characters (call it "34")
+  // The resulting string should be "?12@mnopqrswxy34".
+  OffsetAdjuster::Adjustments adjustments_on_adjusted_string;
+  adjustments_on_adjusted_string.push_back(OffsetAdjuster::Adjustment(
+      0, 2, 1));
+  adjustments_on_adjusted_string.push_back(OffsetAdjuster::Adjustment(
+      2, 3, 0));
+  adjustments_on_adjusted_string.push_back(OffsetAdjuster::Adjustment(
+      5, 1, 2));
+  adjustments_on_adjusted_string.push_back(OffsetAdjuster::Adjustment(
+      6, 3, 1));
+  adjustments_on_adjusted_string.push_back(OffsetAdjuster::Adjustment(
+      19, 1, 2));
+
+  // Now merge the adjustments and check the results.
+  OffsetAdjuster::MergeSequentialAdjustments(first_adjustments,
+                                             &adjustments_on_adjusted_string);
+  // The merged adjustments should look like
+  // - combine abcd into "?"
+  //   - note: it's also reasonable for the Merge function to instead produce
+  //     two adjustments instead of this, one to remove a and another to
+  //     combine bcd into "?".  This test verifies the current behavior.
+  // - remove efgh
+  // - expand i into "12"
+  // - combine jkl into "@"
+  // - remove tuv
+  // - expand z into "34"
+  ASSERT_EQ(6u, adjustments_on_adjusted_string.size());
+  EXPECT_EQ(0u, adjustments_on_adjusted_string[0].original_offset);
+  EXPECT_EQ(4u, adjustments_on_adjusted_string[0].original_length);
+  EXPECT_EQ(1u, adjustments_on_adjusted_string[0].output_length);
+  EXPECT_EQ(4u, adjustments_on_adjusted_string[1].original_offset);
+  EXPECT_EQ(4u, adjustments_on_adjusted_string[1].original_length);
+  EXPECT_EQ(0u, adjustments_on_adjusted_string[1].output_length);
+  EXPECT_EQ(8u, adjustments_on_adjusted_string[2].original_offset);
+  EXPECT_EQ(1u, adjustments_on_adjusted_string[2].original_length);
+  EXPECT_EQ(2u, adjustments_on_adjusted_string[2].output_length);
+  EXPECT_EQ(9u, adjustments_on_adjusted_string[3].original_offset);
+  EXPECT_EQ(3u, adjustments_on_adjusted_string[3].original_length);
+  EXPECT_EQ(1u, adjustments_on_adjusted_string[3].output_length);
+  EXPECT_EQ(19u, adjustments_on_adjusted_string[4].original_offset);
+  EXPECT_EQ(3u, adjustments_on_adjusted_string[4].original_length);
+  EXPECT_EQ(0u, adjustments_on_adjusted_string[4].output_length);
+  EXPECT_EQ(25u, adjustments_on_adjusted_string[5].original_offset);
+  EXPECT_EQ(1u, adjustments_on_adjusted_string[5].original_length);
+  EXPECT_EQ(2u, adjustments_on_adjusted_string[5].output_length);
+}
+
+}  // namespace base
diff --git a/base/strings/utf_string_conversion_utils.cc b/base/strings/utf_string_conversion_utils.cc
new file mode 100644
index 0000000..f7682c1
--- /dev/null
+++ b/base/strings/utf_string_conversion_utils.cc
@@ -0,0 +1,155 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/strings/utf_string_conversion_utils.h"
+
+#include "base/third_party/icu/icu_utf.h"
+#include "build/build_config.h"
+
+namespace base {
+
+// ReadUnicodeCharacter --------------------------------------------------------
+
+bool ReadUnicodeCharacter(const char* src,
+                          int32_t src_len,
+                          int32_t* char_index,
+                          uint32_t* code_point_out) {
+  // U8_NEXT expects to be able to use -1 to signal an error, so we must
+  // use a signed type for code_point.  But this function returns false
+  // on error anyway, so code_point_out is unsigned.
+  int32_t code_point;
+  CBU8_NEXT(src, *char_index, src_len, code_point);
+  *code_point_out = static_cast<uint32_t>(code_point);
+
+  // The ICU macro above moves to the next char, we want to point to the last
+  // char consumed.
+  (*char_index)--;
+
+  // Validate the decoded value.
+  return IsValidCodepoint(code_point);
+}
+
+bool ReadUnicodeCharacter(const char16* src,
+                          int32_t src_len,
+                          int32_t* char_index,
+                          uint32_t* code_point) {
+  if (CBU16_IS_SURROGATE(src[*char_index])) {
+    if (!CBU16_IS_SURROGATE_LEAD(src[*char_index]) ||
+        *char_index + 1 >= src_len ||
+        !CBU16_IS_TRAIL(src[*char_index + 1])) {
+      // Invalid surrogate pair.
+      return false;
+    }
+
+    // Valid surrogate pair.
+    *code_point = CBU16_GET_SUPPLEMENTARY(src[*char_index],
+                                          src[*char_index + 1]);
+    (*char_index)++;
+  } else {
+    // Not a surrogate, just one 16-bit word.
+    *code_point = src[*char_index];
+  }
+
+  return IsValidCodepoint(*code_point);
+}
+
+#if defined(WCHAR_T_IS_UTF32)
+bool ReadUnicodeCharacter(const wchar_t* src,
+                          int32_t src_len,
+                          int32_t* char_index,
+                          uint32_t* code_point) {
+  // Conversion is easy since the source is 32-bit.
+  *code_point = src[*char_index];
+
+  // Validate the value.
+  return IsValidCodepoint(*code_point);
+}
+#endif  // defined(WCHAR_T_IS_UTF32)
+
+// WriteUnicodeCharacter -------------------------------------------------------
+
+size_t WriteUnicodeCharacter(uint32_t code_point, std::string* output) {
+  if (code_point <= 0x7f) {
+    // Fast path the common case of one byte.
+    output->push_back(static_cast<char>(code_point));
+    return 1;
+  }
+
+
+  // CBU8_APPEND_UNSAFE can append up to 4 bytes.
+  size_t char_offset = output->length();
+  size_t original_char_offset = char_offset;
+  output->resize(char_offset + CBU8_MAX_LENGTH);
+
+  CBU8_APPEND_UNSAFE(&(*output)[0], char_offset, code_point);
+
+  // CBU8_APPEND_UNSAFE will advance our pointer past the inserted character, so
+  // it will represent the new length of the string.
+  output->resize(char_offset);
+  return char_offset - original_char_offset;
+}
+
+size_t WriteUnicodeCharacter(uint32_t code_point, string16* output) {
+  if (CBU16_LENGTH(code_point) == 1) {
+    // Thie code point is in the Basic Multilingual Plane (BMP).
+    output->push_back(static_cast<char16>(code_point));
+    return 1;
+  }
+  // Non-BMP characters use a double-character encoding.
+  size_t char_offset = output->length();
+  output->resize(char_offset + CBU16_MAX_LENGTH);
+  CBU16_APPEND_UNSAFE(&(*output)[0], char_offset, code_point);
+  return CBU16_MAX_LENGTH;
+}
+
+// Generalized Unicode converter -----------------------------------------------
+
+template<typename CHAR>
+void PrepareForUTF8Output(const CHAR* src,
+                          size_t src_len,
+                          std::string* output) {
+  output->clear();
+  if (src_len == 0)
+    return;
+  if (src[0] < 0x80) {
+    // Assume that the entire input will be ASCII.
+    output->reserve(src_len);
+  } else {
+    // Assume that the entire input is non-ASCII and will have 3 bytes per char.
+    output->reserve(src_len * 3);
+  }
+}
+
+// Instantiate versions we know callers will need.
+#if !defined(OS_WIN)
+// wchar_t and char16 are the same thing on Windows.
+template void PrepareForUTF8Output(const wchar_t*, size_t, std::string*);
+#endif
+template void PrepareForUTF8Output(const char16*, size_t, std::string*);
+
+template<typename STRING>
+void PrepareForUTF16Or32Output(const char* src,
+                               size_t src_len,
+                               STRING* output) {
+  output->clear();
+  if (src_len == 0)
+    return;
+  if (static_cast<unsigned char>(src[0]) < 0x80) {
+    // Assume the input is all ASCII, which means 1:1 correspondence.
+    output->reserve(src_len);
+  } else {
+    // Otherwise assume that the UTF-8 sequences will have 2 bytes for each
+    // character.
+    output->reserve(src_len / 2);
+  }
+}
+
+// Instantiate versions we know callers will need.
+#if !defined(OS_WIN)
+// std::wstring and string16 are the same thing on Windows.
+template void PrepareForUTF16Or32Output(const char*, size_t, std::wstring*);
+#endif
+template void PrepareForUTF16Or32Output(const char*, size_t, string16*);
+
+}  // namespace base
diff --git a/base/strings/utf_string_conversion_utils.h b/base/strings/utf_string_conversion_utils.h
new file mode 100644
index 0000000..2d95870
--- /dev/null
+++ b/base/strings/utf_string_conversion_utils.h
@@ -0,0 +1,100 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_STRINGS_UTF_STRING_CONVERSION_UTILS_H_
+#define BASE_STRINGS_UTF_STRING_CONVERSION_UTILS_H_
+
+// Low-level UTF handling functions. Most code will want to use the functions
+// in utf_string_conversions.h
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include "base/base_export.h"
+#include "base/strings/string16.h"
+
+namespace base {
+
+inline bool IsValidCodepoint(uint32_t code_point) {
+  // Excludes the surrogate code points ([0xD800, 0xDFFF]) and
+  // codepoints larger than 0x10FFFF (the highest codepoint allowed).
+  // Non-characters and unassigned codepoints are allowed.
+  return code_point < 0xD800u ||
+         (code_point >= 0xE000u && code_point <= 0x10FFFFu);
+}
+
+inline bool IsValidCharacter(uint32_t code_point) {
+  // Excludes non-characters (U+FDD0..U+FDEF, and all codepoints ending in
+  // 0xFFFE or 0xFFFF) from the set of valid code points.
+  return code_point < 0xD800u || (code_point >= 0xE000u &&
+      code_point < 0xFDD0u) || (code_point > 0xFDEFu &&
+      code_point <= 0x10FFFFu && (code_point & 0xFFFEu) != 0xFFFEu);
+}
+
+// ReadUnicodeCharacter --------------------------------------------------------
+
+// Reads a UTF-8 stream, placing the next code point into the given output
+// |*code_point|. |src| represents the entire string to read, and |*char_index|
+// is the character offset within the string to start reading at. |*char_index|
+// will be updated to index the last character read, such that incrementing it
+// (as in a for loop) will take the reader to the next character.
+//
+// Returns true on success. On false, |*code_point| will be invalid.
+BASE_EXPORT bool ReadUnicodeCharacter(const char* src,
+                                      int32_t src_len,
+                                      int32_t* char_index,
+                                      uint32_t* code_point_out);
+
+// Reads a UTF-16 character. The usage is the same as the 8-bit version above.
+BASE_EXPORT bool ReadUnicodeCharacter(const char16* src,
+                                      int32_t src_len,
+                                      int32_t* char_index,
+                                      uint32_t* code_point);
+
+#if defined(WCHAR_T_IS_UTF32)
+// Reads UTF-32 character. The usage is the same as the 8-bit version above.
+BASE_EXPORT bool ReadUnicodeCharacter(const wchar_t* src,
+                                      int32_t src_len,
+                                      int32_t* char_index,
+                                      uint32_t* code_point);
+#endif  // defined(WCHAR_T_IS_UTF32)
+
+// WriteUnicodeCharacter -------------------------------------------------------
+
+// Appends a UTF-8 character to the given 8-bit string.  Returns the number of
+// bytes written.
+BASE_EXPORT size_t WriteUnicodeCharacter(uint32_t code_point,
+                                         std::string* output);
+
+// Appends the given code point as a UTF-16 character to the given 16-bit
+// string.  Returns the number of 16-bit values written.
+BASE_EXPORT size_t WriteUnicodeCharacter(uint32_t code_point, string16* output);
+
+#if defined(WCHAR_T_IS_UTF32)
+// Appends the given UTF-32 character to the given 32-bit string.  Returns the
+// number of 32-bit values written.
+inline size_t WriteUnicodeCharacter(uint32_t code_point, std::wstring* output) {
+  // This is the easy case, just append the character.
+  output->push_back(code_point);
+  return 1;
+}
+#endif  // defined(WCHAR_T_IS_UTF32)
+
+// Generalized Unicode converter -----------------------------------------------
+
+// Guesses the length of the output in UTF-8 in bytes, clears that output
+// string, and reserves that amount of space.  We assume that the input
+// character types are unsigned, which will be true for UTF-16 and -32 on our
+// systems.
+template<typename CHAR>
+void PrepareForUTF8Output(const CHAR* src, size_t src_len, std::string* output);
+
+// Prepares an output buffer (containing either UTF-16 or -32 data) given some
+// UTF-8 input that will be converted to it.  See PrepareForUTF8Output().
+template<typename STRING>
+void PrepareForUTF16Or32Output(const char* src, size_t src_len, STRING* output);
+
+}  // namespace base
+
+#endif  // BASE_STRINGS_UTF_STRING_CONVERSION_UTILS_H_
diff --git a/base/strings/utf_string_conversions.cc b/base/strings/utf_string_conversions.cc
new file mode 100644
index 0000000..89acc38
--- /dev/null
+++ b/base/strings/utf_string_conversions.cc
@@ -0,0 +1,333 @@
+// Copyright (c) 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/strings/utf_string_conversions.h"
+
+#include <stdint.h>
+
+#include "base/strings/string_piece.h"
+#include "base/strings/string_util.h"
+#include "base/strings/utf_string_conversion_utils.h"
+#include "base/third_party/icu/icu_utf.h"
+#include "build/build_config.h"
+
+namespace base {
+
+namespace {
+
+constexpr int32_t kErrorCodePoint = 0xFFFD;
+
+// Size coefficient ----------------------------------------------------------
+// The maximum number of codeunits in the destination encoding corresponding to
+// one codeunit in the source encoding.
+
+template <typename SrcChar, typename DestChar>
+struct SizeCoefficient {
+  static_assert(sizeof(SrcChar) < sizeof(DestChar),
+                "Default case: from a smaller encoding to the bigger one");
+
+  // ASCII symbols are encoded by one codeunit in all encodings.
+  static constexpr int value = 1;
+};
+
+template <>
+struct SizeCoefficient<char16, char> {
+  // One UTF-16 codeunit corresponds to at most 3 codeunits in UTF-8.
+  static constexpr int value = 3;
+};
+
+#if defined(WCHAR_T_IS_UTF32)
+template <>
+struct SizeCoefficient<wchar_t, char> {
+  // UTF-8 uses at most 4 codeunits per character.
+  static constexpr int value = 4;
+};
+
+template <>
+struct SizeCoefficient<wchar_t, char16> {
+  // UTF-16 uses at most 2 codeunits per character.
+  static constexpr int value = 2;
+};
+#endif  // defined(WCHAR_T_IS_UTF32)
+
+template <typename SrcChar, typename DestChar>
+constexpr int size_coefficient_v =
+    SizeCoefficient<std::decay_t<SrcChar>, std::decay_t<DestChar>>::value;
+
+// UnicodeAppendUnsafe --------------------------------------------------------
+// Function overloads that write code_point to the output string. Output string
+// has to have enough space for the codepoint.
+
+void UnicodeAppendUnsafe(char* out, int32_t* size, uint32_t code_point) {
+  CBU8_APPEND_UNSAFE(out, *size, code_point);
+}
+
+void UnicodeAppendUnsafe(char16* out, int32_t* size, uint32_t code_point) {
+  CBU16_APPEND_UNSAFE(out, *size, code_point);
+}
+
+#if defined(WCHAR_T_IS_UTF32)
+
+void UnicodeAppendUnsafe(wchar_t* out, int32_t* size, uint32_t code_point) {
+  out[(*size)++] = code_point;
+}
+
+#endif  // defined(WCHAR_T_IS_UTF32)
+
+// DoUTFConversion ------------------------------------------------------------
+// Main driver of UTFConversion specialized for different Src encodings.
+// dest has to have enough room for the converted text.
+
+template <typename DestChar>
+bool DoUTFConversion(const char* src,
+                     int32_t src_len,
+                     DestChar* dest,
+                     int32_t* dest_len) {
+  bool success = true;
+
+  for (int32_t i = 0; i < src_len;) {
+    int32_t code_point;
+    CBU8_NEXT(src, i, src_len, code_point);
+
+    if (!IsValidCodepoint(code_point)) {
+      success = false;
+      code_point = kErrorCodePoint;
+    }
+
+    UnicodeAppendUnsafe(dest, dest_len, code_point);
+  }
+
+  return success;
+}
+
+template <typename DestChar>
+bool DoUTFConversion(const char16* src,
+                     int32_t src_len,
+                     DestChar* dest,
+                     int32_t* dest_len) {
+  bool success = true;
+
+  auto ConvertSingleChar = [&success](char16 in) -> int32_t {
+    if (!CBU16_IS_SINGLE(in) || !IsValidCodepoint(in)) {
+      success = false;
+      return kErrorCodePoint;
+    }
+    return in;
+  };
+
+  int32_t i = 0;
+
+  // Always have another symbol in order to avoid checking boundaries in the
+  // middle of the surrogate pair.
+  while (i < src_len - 1) {
+    int32_t code_point;
+
+    if (CBU16_IS_LEAD(src[i]) && CBU16_IS_TRAIL(src[i + 1])) {
+      code_point = CBU16_GET_SUPPLEMENTARY(src[i], src[i + 1]);
+      if (!IsValidCodepoint(code_point)) {
+        code_point = kErrorCodePoint;
+        success = false;
+      }
+      i += 2;
+    } else {
+      code_point = ConvertSingleChar(src[i]);
+      ++i;
+    }
+
+    UnicodeAppendUnsafe(dest, dest_len, code_point);
+  }
+
+  if (i < src_len)
+    UnicodeAppendUnsafe(dest, dest_len, ConvertSingleChar(src[i]));
+
+  return success;
+}
+
+#if defined(WCHAR_T_IS_UTF32)
+
+template <typename DestChar>
+bool DoUTFConversion(const wchar_t* src,
+                     int32_t src_len,
+                     DestChar* dest,
+                     int32_t* dest_len) {
+  bool success = true;
+
+  for (int32_t i = 0; i < src_len; ++i) {
+    int32_t code_point = src[i];
+
+    if (!IsValidCodepoint(code_point)) {
+      success = false;
+      code_point = kErrorCodePoint;
+    }
+
+    UnicodeAppendUnsafe(dest, dest_len, code_point);
+  }
+
+  return success;
+}
+
+#endif  // defined(WCHAR_T_IS_UTF32)
+
+// UTFConversion --------------------------------------------------------------
+// Function template for generating all UTF conversions.
+
+template <typename InputString, typename DestString>
+bool UTFConversion(const InputString& src_str, DestString* dest_str) {
+  if (IsStringASCII(src_str)) {
+    dest_str->assign(src_str.begin(), src_str.end());
+    return true;
+  }
+
+  dest_str->resize(src_str.length() *
+                   size_coefficient_v<typename InputString::value_type,
+                                      typename DestString::value_type>);
+
+  // Empty string is ASCII => it OK to call operator[].
+  auto* dest = &(*dest_str)[0];
+
+  // ICU requires 32 bit numbers.
+  int32_t src_len32 = static_cast<int32_t>(src_str.length());
+  int32_t dest_len32 = 0;
+
+  bool res = DoUTFConversion(src_str.data(), src_len32, dest, &dest_len32);
+
+  dest_str->resize(dest_len32);
+  dest_str->shrink_to_fit();
+
+  return res;
+}
+
+}  // namespace
+
+// UTF16 <-> UTF8 --------------------------------------------------------------
+
+bool UTF8ToUTF16(const char* src, size_t src_len, string16* output) {
+  return UTFConversion(StringPiece(src, src_len), output);
+}
+
+string16 UTF8ToUTF16(StringPiece utf8) {
+  string16 ret;
+  // Ignore the success flag of this call, it will do the best it can for
+  // invalid input, which is what we want here.
+  UTF8ToUTF16(utf8.data(), utf8.size(), &ret);
+  return ret;
+}
+
+bool UTF16ToUTF8(const char16* src, size_t src_len, std::string* output) {
+  return UTFConversion(StringPiece16(src, src_len), output);
+}
+
+std::string UTF16ToUTF8(StringPiece16 utf16) {
+  std::string ret;
+  // Ignore the success flag of this call, it will do the best it can for
+  // invalid input, which is what we want here.
+  UTF16ToUTF8(utf16.data(), utf16.length(), &ret);
+  return ret;
+}
+
+// UTF-16 <-> Wide -------------------------------------------------------------
+
+#if defined(WCHAR_T_IS_UTF16)
+// When wide == UTF-16 the conversions are a NOP.
+
+bool WideToUTF16(const wchar_t* src, size_t src_len, string16* output) {
+  output->assign(src, src_len);
+  return true;
+}
+
+string16 WideToUTF16(WStringPiece wide) {
+  return wide.as_string();
+}
+
+bool UTF16ToWide(const char16* src, size_t src_len, std::wstring* output) {
+  output->assign(src, src_len);
+  return true;
+}
+
+std::wstring UTF16ToWide(StringPiece16 utf16) {
+  return utf16.as_string();
+}
+
+#elif defined(WCHAR_T_IS_UTF32)
+
+bool WideToUTF16(const wchar_t* src, size_t src_len, string16* output) {
+  return UTFConversion(base::WStringPiece(src, src_len), output);
+}
+
+string16 WideToUTF16(WStringPiece wide) {
+  string16 ret;
+  // Ignore the success flag of this call, it will do the best it can for
+  // invalid input, which is what we want here.
+  WideToUTF16(wide.data(), wide.length(), &ret);
+  return ret;
+}
+
+bool UTF16ToWide(const char16* src, size_t src_len, std::wstring* output) {
+  return UTFConversion(StringPiece16(src, src_len), output);
+}
+
+std::wstring UTF16ToWide(StringPiece16 utf16) {
+  std::wstring ret;
+  // Ignore the success flag of this call, it will do the best it can for
+  // invalid input, which is what we want here.
+  UTF16ToWide(utf16.data(), utf16.length(), &ret);
+  return ret;
+}
+
+#endif  // defined(WCHAR_T_IS_UTF32)
+
+// UTF-8 <-> Wide --------------------------------------------------------------
+
+// UTF8ToWide is the same code, regardless of whether wide is 16 or 32 bits
+
+bool UTF8ToWide(const char* src, size_t src_len, std::wstring* output) {
+  return UTFConversion(StringPiece(src, src_len), output);
+}
+
+std::wstring UTF8ToWide(StringPiece utf8) {
+  std::wstring ret;
+  // Ignore the success flag of this call, it will do the best it can for
+  // invalid input, which is what we want here.
+  UTF8ToWide(utf8.data(), utf8.length(), &ret);
+  return ret;
+}
+
+#if defined(WCHAR_T_IS_UTF16)
+// Easy case since we can use the "utf" versions we already wrote above.
+
+bool WideToUTF8(const wchar_t* src, size_t src_len, std::string* output) {
+  return UTF16ToUTF8(src, src_len, output);
+}
+
+std::string WideToUTF8(WStringPiece wide) {
+  return UTF16ToUTF8(wide);
+}
+
+#elif defined(WCHAR_T_IS_UTF32)
+
+bool WideToUTF8(const wchar_t* src, size_t src_len, std::string* output) {
+  return UTFConversion(WStringPiece(src, src_len), output);
+}
+
+std::string WideToUTF8(WStringPiece wide) {
+  std::string ret;
+  // Ignore the success flag of this call, it will do the best it can for
+  // invalid input, which is what we want here.
+  WideToUTF8(wide.data(), wide.length(), &ret);
+  return ret;
+}
+
+#endif  // defined(WCHAR_T_IS_UTF32)
+
+string16 ASCIIToUTF16(StringPiece ascii) {
+  DCHECK(IsStringASCII(ascii)) << ascii;
+  return string16(ascii.begin(), ascii.end());
+}
+
+std::string UTF16ToASCII(StringPiece16 utf16) {
+  DCHECK(IsStringASCII(utf16)) << UTF16ToUTF8(utf16);
+  return std::string(utf16.begin(), utf16.end());
+}
+
+}  // namespace base
diff --git a/base/strings/utf_string_conversions.h b/base/strings/utf_string_conversions.h
new file mode 100644
index 0000000..14f94ac
--- /dev/null
+++ b/base/strings/utf_string_conversions.h
@@ -0,0 +1,54 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_STRINGS_UTF_STRING_CONVERSIONS_H_
+#define BASE_STRINGS_UTF_STRING_CONVERSIONS_H_
+
+#include <stddef.h>
+
+#include <string>
+
+#include "base/base_export.h"
+#include "base/strings/string16.h"
+#include "base/strings/string_piece.h"
+
+namespace base {
+
+// These convert between UTF-8, -16, and -32 strings. They are potentially slow,
+// so avoid unnecessary conversions. The low-level versions return a boolean
+// indicating whether the conversion was 100% valid. In this case, it will still
+// do the best it can and put the result in the output buffer. The versions that
+// return strings ignore this error and just return the best conversion
+// possible.
+BASE_EXPORT bool WideToUTF8(const wchar_t* src, size_t src_len,
+                            std::string* output);
+BASE_EXPORT std::string WideToUTF8(WStringPiece wide);
+BASE_EXPORT bool UTF8ToWide(const char* src, size_t src_len,
+                            std::wstring* output);
+BASE_EXPORT std::wstring UTF8ToWide(StringPiece utf8);
+
+BASE_EXPORT bool WideToUTF16(const wchar_t* src, size_t src_len,
+                             string16* output);
+BASE_EXPORT string16 WideToUTF16(WStringPiece wide);
+BASE_EXPORT bool UTF16ToWide(const char16* src, size_t src_len,
+                             std::wstring* output);
+BASE_EXPORT std::wstring UTF16ToWide(StringPiece16 utf16);
+
+BASE_EXPORT bool UTF8ToUTF16(const char* src, size_t src_len, string16* output);
+BASE_EXPORT string16 UTF8ToUTF16(StringPiece utf8);
+BASE_EXPORT bool UTF16ToUTF8(const char16* src, size_t src_len,
+                             std::string* output);
+BASE_EXPORT std::string UTF16ToUTF8(StringPiece16 utf16);
+
+// This converts an ASCII string, typically a hardcoded constant, to a UTF16
+// string.
+BASE_EXPORT string16 ASCIIToUTF16(StringPiece ascii);
+
+// Converts to 7-bit ASCII by truncating. The result must be known to be ASCII
+// beforehand.
+BASE_EXPORT std::string UTF16ToASCII(StringPiece16 utf16);
+
+}  // namespace base
+
+#endif  // BASE_STRINGS_UTF_STRING_CONVERSIONS_H_
diff --git a/base/strings/utf_string_conversions_fuzzer.cc b/base/strings/utf_string_conversions_fuzzer.cc
new file mode 100644
index 0000000..37d4be2
--- /dev/null
+++ b/base/strings/utf_string_conversions_fuzzer.cc
@@ -0,0 +1,56 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/strings/string_util.h"
+#include "base/strings/utf_string_conversions.h"
+
+std::string output_std_string;
+std::wstring output_std_wstring;
+base::string16 output_string16;
+
+// Entry point for LibFuzzer.
+extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) {
+  base::StringPiece string_piece_input(reinterpret_cast<const char*>(data),
+                                       size);
+
+  base::UTF8ToWide(string_piece_input);
+  base::UTF8ToWide(reinterpret_cast<const char*>(data), size,
+                   &output_std_wstring);
+  base::UTF8ToUTF16(string_piece_input);
+  base::UTF8ToUTF16(reinterpret_cast<const char*>(data), size,
+                    &output_string16);
+
+  // Test for char16.
+  if (size % 2 == 0) {
+    base::StringPiece16 string_piece_input16(
+        reinterpret_cast<const base::char16*>(data), size / 2);
+    base::UTF16ToWide(output_string16);
+    base::UTF16ToWide(reinterpret_cast<const base::char16*>(data), size / 2,
+                      &output_std_wstring);
+    base::UTF16ToUTF8(string_piece_input16);
+    base::UTF16ToUTF8(reinterpret_cast<const base::char16*>(data), size / 2,
+                      &output_std_string);
+  }
+
+  // Test for wchar_t.
+  size_t wchar_t_size = sizeof(wchar_t);
+  if (size % wchar_t_size == 0) {
+    base::WideToUTF8(output_std_wstring);
+    base::WideToUTF8(reinterpret_cast<const wchar_t*>(data),
+                     size / wchar_t_size, &output_std_string);
+    base::WideToUTF16(output_std_wstring);
+    base::WideToUTF16(reinterpret_cast<const wchar_t*>(data),
+                      size / wchar_t_size, &output_string16);
+  }
+
+  // Test for ASCII. This condition is needed to avoid hitting instant CHECK
+  // failures.
+  if (base::IsStringASCII(string_piece_input)) {
+    output_string16 = base::ASCIIToUTF16(string_piece_input);
+    base::StringPiece16 string_piece_input16(output_string16);
+    base::UTF16ToASCII(string_piece_input16);
+  }
+
+  return 0;
+}
diff --git a/base/strings/utf_string_conversions_regression_fuzzer.cc b/base/strings/utf_string_conversions_regression_fuzzer.cc
new file mode 100644
index 0000000..ca6b4a2
--- /dev/null
+++ b/base/strings/utf_string_conversions_regression_fuzzer.cc
@@ -0,0 +1,105 @@
+// Copyright (c) 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/logging.h"
+#include "base/strings/old_utf_string_conversions.h"
+#include "base/strings/utf_string_conversions.h"
+
+namespace {
+
+void UTF8ToCheck(const uint8_t* data, size_t size) {
+  const auto* src = reinterpret_cast<const char*>(data);
+  const size_t src_len = size;
+
+  // UTF16
+  {
+    base::string16 new_out;
+    bool new_res = base::UTF8ToUTF16(src, src_len, &new_out);
+
+    base::string16 old_out;
+    bool old_res = base_old::UTF8ToUTF16(src, src_len, &old_out);
+
+    CHECK(new_res == old_res);
+    CHECK(new_out == old_out);
+  }
+
+  // Wide
+  {
+    std::wstring new_out;
+    bool new_res = base::UTF8ToWide(src, src_len, &new_out);
+
+    std::wstring old_out;
+    bool old_res = base_old::UTF8ToWide(src, src_len, &old_out);
+
+    CHECK(new_res == old_res);
+    CHECK(new_out == old_out);
+  }
+}
+
+void UTF16ToCheck(const uint8_t* data, size_t size) {
+  const auto* src = reinterpret_cast<const base::char16*>(data);
+  const size_t src_len = size / 2;
+
+  // UTF8
+  {
+    std::string new_out;
+    bool new_res = base::UTF16ToUTF8(src, src_len, &new_out);
+
+    std::string old_out;
+    bool old_res = base_old::UTF16ToUTF8(src, src_len, &old_out);
+
+    CHECK(new_res == old_res);
+    CHECK(new_out == old_out);
+  }
+
+  // Wide
+  {
+    std::wstring new_out;
+    bool new_res = base::UTF16ToWide(src, src_len, &new_out);
+
+    std::wstring old_out;
+    bool old_res = base_old::UTF16ToWide(src, src_len, &old_out);
+
+    CHECK(new_res == old_res);
+    CHECK(new_out == old_out);
+  }
+}
+
+void WideToCheck(const uint8_t* data, size_t size) {
+  const auto* src = reinterpret_cast<const wchar_t*>(data);
+  const size_t src_len = size / 4;  // It's OK even if Wide is 16bit.
+
+  // UTF8
+  {
+    std::string new_out;
+    bool new_res = base::WideToUTF8(src, src_len, &new_out);
+
+    std::string old_out;
+    bool old_res = base_old::WideToUTF8(src, src_len, &old_out);
+
+    CHECK(new_res == old_res);
+    CHECK(new_out == old_out);
+  }
+
+  // UTF16
+  {
+    base::string16 new_out;
+    bool new_res = base::WideToUTF16(src, src_len, &new_out);
+
+    base::string16 old_out;
+    bool old_res = base_old::WideToUTF16(src, src_len, &old_out);
+
+    CHECK(new_res == old_res);
+    CHECK(new_out == old_out);
+  }
+}
+
+}  // namespace
+
+extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) {
+  UTF8ToCheck(data, size);
+  UTF16ToCheck(data, size);
+  WideToCheck(data, size);
+  return 0;
+}
diff --git a/base/strings/utf_string_conversions_unittest.cc b/base/strings/utf_string_conversions_unittest.cc
new file mode 100644
index 0000000..6f5e60c
--- /dev/null
+++ b/base/strings/utf_string_conversions_unittest.cc
@@ -0,0 +1,211 @@
+// Copyright (c) 2010 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <stddef.h>
+
+#include "base/logging.h"
+#include "base/macros.h"
+#include "base/strings/string_piece.h"
+#include "base/strings/string_util.h"
+#include "base/strings/utf_string_conversions.h"
+#include "build/build_config.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+
+namespace {
+
+const wchar_t* const kConvertRoundtripCases[] = {
+  L"Google Video",
+  // "网页 图片 资讯更多 »"
+  L"\x7f51\x9875\x0020\x56fe\x7247\x0020\x8d44\x8baf\x66f4\x591a\x0020\x00bb",
+  //  "Παγκόσμιος Ιστός"
+  L"\x03a0\x03b1\x03b3\x03ba\x03cc\x03c3\x03bc\x03b9"
+  L"\x03bf\x03c2\x0020\x0399\x03c3\x03c4\x03cc\x03c2",
+  // "Поиск страниц на русском"
+  L"\x041f\x043e\x0438\x0441\x043a\x0020\x0441\x0442"
+  L"\x0440\x0430\x043d\x0438\x0446\x0020\x043d\x0430"
+  L"\x0020\x0440\x0443\x0441\x0441\x043a\x043e\x043c",
+  // "전체서비스"
+  L"\xc804\xccb4\xc11c\xbe44\xc2a4",
+
+  // Test characters that take more than 16 bits. This will depend on whether
+  // wchar_t is 16 or 32 bits.
+#if defined(WCHAR_T_IS_UTF16)
+  L"\xd800\xdf00",
+  // ?????  (Mathematical Alphanumeric Symbols (U+011d40 - U+011d44 : A,B,C,D,E)
+  L"\xd807\xdd40\xd807\xdd41\xd807\xdd42\xd807\xdd43\xd807\xdd44",
+#elif defined(WCHAR_T_IS_UTF32)
+  L"\x10300",
+  // ?????  (Mathematical Alphanumeric Symbols (U+011d40 - U+011d44 : A,B,C,D,E)
+  L"\x11d40\x11d41\x11d42\x11d43\x11d44",
+#endif
+};
+
+}  // namespace
+
+TEST(UTFStringConversionsTest, ConvertUTF8AndWide) {
+  // we round-trip all the wide strings through UTF-8 to make sure everything
+  // agrees on the conversion. This uses the stream operators to test them
+  // simultaneously.
+  for (size_t i = 0; i < arraysize(kConvertRoundtripCases); ++i) {
+    std::ostringstream utf8;
+    utf8 << WideToUTF8(kConvertRoundtripCases[i]);
+    std::wostringstream wide;
+    wide << UTF8ToWide(utf8.str());
+
+    EXPECT_EQ(kConvertRoundtripCases[i], wide.str());
+  }
+}
+
+TEST(UTFStringConversionsTest, ConvertUTF8AndWideEmptyString) {
+  // An empty std::wstring should be converted to an empty std::string,
+  // and vice versa.
+  std::wstring wempty;
+  std::string empty;
+  EXPECT_EQ(empty, WideToUTF8(wempty));
+  EXPECT_EQ(wempty, UTF8ToWide(empty));
+}
+
+TEST(UTFStringConversionsTest, ConvertUTF8ToWide) {
+  struct UTF8ToWideCase {
+    const char* utf8;
+    const wchar_t* wide;
+    bool success;
+  } convert_cases[] = {
+    // Regular UTF-8 input.
+    {"\xe4\xbd\xa0\xe5\xa5\xbd", L"\x4f60\x597d", true},
+    // Non-character is passed through.
+    {"\xef\xbf\xbfHello", L"\xffffHello", true},
+    // Truncated UTF-8 sequence.
+    {"\xe4\xa0\xe5\xa5\xbd", L"\xfffd\x597d", false},
+    // Truncated off the end.
+    {"\xe5\xa5\xbd\xe4\xa0", L"\x597d\xfffd", false},
+    // Non-shortest-form UTF-8.
+    {"\xf0\x84\xbd\xa0\xe5\xa5\xbd", L"\xfffd\xfffd\xfffd\xfffd\x597d", false},
+    // This UTF-8 character decodes to a UTF-16 surrogate, which is illegal.
+    {"\xed\xb0\x80", L"\xfffd\xfffd\xfffd", false},
+    // Non-BMP characters. The second is a non-character regarded as valid.
+    // The result will either be in UTF-16 or UTF-32.
+#if defined(WCHAR_T_IS_UTF16)
+    {"A\xF0\x90\x8C\x80z", L"A\xd800\xdf00z", true},
+    {"A\xF4\x8F\xBF\xBEz", L"A\xdbff\xdffez", true},
+#elif defined(WCHAR_T_IS_UTF32)
+    {"A\xF0\x90\x8C\x80z", L"A\x10300z", true},
+    {"A\xF4\x8F\xBF\xBEz", L"A\x10fffez", true},
+#endif
+  };
+
+  for (size_t i = 0; i < arraysize(convert_cases); i++) {
+    std::wstring converted;
+    EXPECT_EQ(convert_cases[i].success,
+              UTF8ToWide(convert_cases[i].utf8,
+                         strlen(convert_cases[i].utf8),
+                         &converted));
+    std::wstring expected(convert_cases[i].wide);
+    EXPECT_EQ(expected, converted);
+  }
+
+  // Manually test an embedded NULL.
+  std::wstring converted;
+  EXPECT_TRUE(UTF8ToWide("\00Z\t", 3, &converted));
+  ASSERT_EQ(3U, converted.length());
+  EXPECT_EQ(static_cast<wchar_t>(0), converted[0]);
+  EXPECT_EQ('Z', converted[1]);
+  EXPECT_EQ('\t', converted[2]);
+
+  // Make sure that conversion replaces, not appends.
+  EXPECT_TRUE(UTF8ToWide("B", 1, &converted));
+  ASSERT_EQ(1U, converted.length());
+  EXPECT_EQ('B', converted[0]);
+}
+
+#if defined(WCHAR_T_IS_UTF16)
+// This test is only valid when wchar_t == UTF-16.
+TEST(UTFStringConversionsTest, ConvertUTF16ToUTF8) {
+  struct WideToUTF8Case {
+    const wchar_t* utf16;
+    const char* utf8;
+    bool success;
+  } convert_cases[] = {
+    // Regular UTF-16 input.
+    {L"\x4f60\x597d", "\xe4\xbd\xa0\xe5\xa5\xbd", true},
+    // Test a non-BMP character.
+    {L"\xd800\xdf00", "\xF0\x90\x8C\x80", true},
+    // Non-characters are passed through.
+    {L"\xffffHello", "\xEF\xBF\xBFHello", true},
+    {L"\xdbff\xdffeHello", "\xF4\x8F\xBF\xBEHello", true},
+    // The first character is a truncated UTF-16 character.
+    {L"\xd800\x597d", "\xef\xbf\xbd\xe5\xa5\xbd", false},
+    // Truncated at the end.
+    {L"\x597d\xd800", "\xe5\xa5\xbd\xef\xbf\xbd", false},
+  };
+
+  for (const auto& test : convert_cases) {
+    std::string converted;
+    EXPECT_EQ(test.success,
+              WideToUTF8(test.utf16, wcslen(test.utf16), &converted));
+    std::string expected(test.utf8);
+    EXPECT_EQ(expected, converted);
+  }
+}
+
+#elif defined(WCHAR_T_IS_UTF32)
+// This test is only valid when wchar_t == UTF-32.
+TEST(UTFStringConversionsTest, ConvertUTF32ToUTF8) {
+  struct WideToUTF8Case {
+    const wchar_t* utf32;
+    const char* utf8;
+    bool success;
+  } convert_cases[] = {
+    // Regular 16-bit input.
+    {L"\x4f60\x597d", "\xe4\xbd\xa0\xe5\xa5\xbd", true},
+    // Test a non-BMP character.
+    {L"A\x10300z", "A\xF0\x90\x8C\x80z", true},
+    // Non-characters are passed through.
+    {L"\xffffHello", "\xEF\xBF\xBFHello", true},
+    {L"\x10fffeHello", "\xF4\x8F\xBF\xBEHello", true},
+    // Invalid Unicode code points.
+    {L"\xfffffffHello", "\xEF\xBF\xBDHello", false},
+    // The first character is a truncated UTF-16 character.
+    {L"\xd800\x597d", "\xef\xbf\xbd\xe5\xa5\xbd", false},
+    {L"\xdc01Hello", "\xef\xbf\xbdHello", false},
+  };
+
+  for (const auto& test : convert_cases) {
+    std::string converted;
+    EXPECT_EQ(test.success,
+              WideToUTF8(test.utf32, wcslen(test.utf32), &converted));
+    std::string expected(test.utf8);
+    EXPECT_EQ(expected, converted);
+  }
+}
+#endif  // defined(WCHAR_T_IS_UTF32)
+
+TEST(UTFStringConversionsTest, ConvertMultiString) {
+  static char16 multi16[] = {
+    'f', 'o', 'o', '\0',
+    'b', 'a', 'r', '\0',
+    'b', 'a', 'z', '\0',
+    '\0'
+  };
+  static char multi[] = {
+    'f', 'o', 'o', '\0',
+    'b', 'a', 'r', '\0',
+    'b', 'a', 'z', '\0',
+    '\0'
+  };
+  string16 multistring16;
+  memcpy(WriteInto(&multistring16, arraysize(multi16)), multi16,
+                   sizeof(multi16));
+  EXPECT_EQ(arraysize(multi16) - 1, multistring16.length());
+  std::string expected;
+  memcpy(WriteInto(&expected, arraysize(multi)), multi, sizeof(multi));
+  EXPECT_EQ(arraysize(multi) - 1, expected.length());
+  const std::string& converted = UTF16ToUTF8(multistring16);
+  EXPECT_EQ(arraysize(multi) - 1, converted.length());
+  EXPECT_EQ(expected, converted);
+}
+
+}  // namespace base
diff --git a/base/supports_user_data.cc b/base/supports_user_data.cc
new file mode 100644
index 0000000..43ab21a
--- /dev/null
+++ b/base/supports_user_data.cc
@@ -0,0 +1,51 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/supports_user_data.h"
+
+namespace base {
+
+SupportsUserData::SupportsUserData() {
+  // Harmless to construct on a different execution sequence to subsequent
+  // usage.
+  sequence_checker_.DetachFromSequence();
+}
+
+SupportsUserData::Data* SupportsUserData::GetUserData(const void* key) const {
+  DCHECK(sequence_checker_.CalledOnValidSequence());
+  // Avoid null keys; they are too vulnerable to collision.
+  DCHECK(key);
+  DataMap::const_iterator found = user_data_.find(key);
+  if (found != user_data_.end())
+    return found->second.get();
+  return nullptr;
+}
+
+void SupportsUserData::SetUserData(const void* key,
+                                   std::unique_ptr<Data> data) {
+  DCHECK(sequence_checker_.CalledOnValidSequence());
+  // Avoid null keys; they are too vulnerable to collision.
+  DCHECK(key);
+  user_data_[key] = std::move(data);
+}
+
+void SupportsUserData::RemoveUserData(const void* key) {
+  DCHECK(sequence_checker_.CalledOnValidSequence());
+  user_data_.erase(key);
+}
+
+void SupportsUserData::DetachFromSequence() {
+  sequence_checker_.DetachFromSequence();
+}
+
+SupportsUserData::~SupportsUserData() {
+  DCHECK(sequence_checker_.CalledOnValidSequence() || user_data_.empty());
+  DataMap local_user_data;
+  user_data_.swap(local_user_data);
+  // Now this->user_data_ is empty, and any destructors called transitively from
+  // the destruction of |local_user_data| will see it that way instead of
+  // examining a being-destroyed object.
+}
+
+}  // namespace base
diff --git a/base/supports_user_data.h b/base/supports_user_data.h
new file mode 100644
index 0000000..356c973
--- /dev/null
+++ b/base/supports_user_data.h
@@ -0,0 +1,87 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_SUPPORTS_USER_DATA_H_
+#define BASE_SUPPORTS_USER_DATA_H_
+
+#include <map>
+#include <memory>
+
+#include "base/base_export.h"
+#include "base/macros.h"
+#include "base/memory/ref_counted.h"
+#include "base/sequence_checker.h"
+
+// TODO(gab): Removing this include causes IWYU failures in other headers,
+// remove it in a follow- up CL.
+#include "base/threading/thread_checker.h"
+
+namespace base {
+
+// This is a helper for classes that want to allow users to stash random data by
+// key. At destruction all the objects will be destructed.
+class BASE_EXPORT SupportsUserData {
+ public:
+  SupportsUserData();
+
+  // Derive from this class and add your own data members to associate extra
+  // information with this object. Alternatively, add this as a public base
+  // class to any class with a virtual destructor.
+  class BASE_EXPORT Data {
+   public:
+    virtual ~Data() = default;
+  };
+
+  // The user data allows the clients to associate data with this object.
+  // Multiple user data values can be stored under different keys.
+  // This object will TAKE OWNERSHIP of the given data pointer, and will
+  // delete the object if it is changed or the object is destroyed.
+  // |key| must not be null--that value is too vulnerable for collision.
+  Data* GetUserData(const void* key) const;
+  void SetUserData(const void* key, std::unique_ptr<Data> data);
+  void RemoveUserData(const void* key);
+
+  // SupportsUserData is not thread-safe, and on debug build will assert it is
+  // only used on one execution sequence. Calling this method allows the caller
+  // to hand the SupportsUserData instance across execution sequences. Use only
+  // if you are taking full control of the synchronization of that hand over.
+  void DetachFromSequence();
+
+ protected:
+  virtual ~SupportsUserData();
+
+ private:
+  using DataMap = std::map<const void*, std::unique_ptr<Data>>;
+
+  // Externally-defined data accessible by key.
+  DataMap user_data_;
+  // Guards usage of |user_data_|
+  SequenceChecker sequence_checker_;
+
+  DISALLOW_COPY_AND_ASSIGN(SupportsUserData);
+};
+
+// Adapter class that releases a refcounted object when the
+// SupportsUserData::Data object is deleted.
+template <typename T>
+class UserDataAdapter : public base::SupportsUserData::Data {
+ public:
+  static T* Get(const SupportsUserData* supports_user_data, const void* key) {
+    UserDataAdapter* data =
+      static_cast<UserDataAdapter*>(supports_user_data->GetUserData(key));
+    return data ? static_cast<T*>(data->object_.get()) : NULL;
+  }
+
+  UserDataAdapter(T* object) : object_(object) {}
+  T* release() { return object_.release(); }
+
+ private:
+  scoped_refptr<T> object_;
+
+  DISALLOW_COPY_AND_ASSIGN(UserDataAdapter);
+};
+
+}  // namespace base
+
+#endif  // BASE_SUPPORTS_USER_DATA_H_
diff --git a/base/supports_user_data_unittest.cc b/base/supports_user_data_unittest.cc
new file mode 100644
index 0000000..2e0a724
--- /dev/null
+++ b/base/supports_user_data_unittest.cc
@@ -0,0 +1,40 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/supports_user_data.h"
+
+#include <vector>
+
+#include "base/memory/ptr_util.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+namespace {
+
+struct TestSupportsUserData : public SupportsUserData {};
+
+struct UsesItself : public SupportsUserData::Data {
+  UsesItself(SupportsUserData* supports_user_data, const void* key)
+      : supports_user_data_(supports_user_data),
+        key_(key) {
+  }
+
+  ~UsesItself() override {
+    EXPECT_EQ(nullptr, supports_user_data_->GetUserData(key_));
+  }
+
+  SupportsUserData* supports_user_data_;
+  const void* key_;
+};
+
+TEST(SupportsUserDataTest, ClearWorksRecursively) {
+  TestSupportsUserData supports_user_data;
+  char key = 0;
+  supports_user_data.SetUserData(
+      &key, std::make_unique<UsesItself>(&supports_user_data, &key));
+  // Destruction of supports_user_data runs the actual test.
+}
+
+}  // namespace
+}  // namespace base
diff --git a/base/sync_socket.h b/base/sync_socket.h
new file mode 100644
index 0000000..42db9a2
--- /dev/null
+++ b/base/sync_socket.h
@@ -0,0 +1,161 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_SYNC_SOCKET_H_
+#define BASE_SYNC_SOCKET_H_
+
+// A socket abstraction used for sending and receiving plain
+// data.  Because the receiving is blocking, they can be used to perform
+// rudimentary cross-process synchronization with low latency.
+
+#include <stddef.h>
+
+#include "base/base_export.h"
+#include "base/compiler_specific.h"
+#include "base/macros.h"
+#include "base/process/process_handle.h"
+#include "base/synchronization/waitable_event.h"
+#include "base/time/time.h"
+#include "build/build_config.h"
+
+#if defined(OS_WIN)
+#include <windows.h>
+#endif
+#include <sys/types.h>
+
+#if defined(OS_POSIX) || defined(OS_FUCHSIA)
+#include "base/file_descriptor_posix.h"
+#endif
+
+namespace base {
+
+class BASE_EXPORT SyncSocket {
+ public:
+#if defined(OS_WIN)
+  typedef HANDLE Handle;
+  typedef Handle TransitDescriptor;
+#elif defined(OS_POSIX) || defined(OS_FUCHSIA)
+  typedef int Handle;
+  typedef FileDescriptor TransitDescriptor;
+#endif
+  static const Handle kInvalidHandle;
+
+  SyncSocket();
+
+  // Creates a SyncSocket from a Handle.  Used in transport.
+  explicit SyncSocket(Handle handle) : handle_(handle)  {}
+  virtual ~SyncSocket();
+
+  // Initializes and connects a pair of sockets.
+  // |socket_a| and |socket_b| must not hold a valid handle.  Upon successful
+  // return, the sockets will both be valid and connected.
+  static bool CreatePair(SyncSocket* socket_a, SyncSocket* socket_b);
+
+  // Returns |Handle| wrapped in a |TransitDescriptor|.
+  static Handle UnwrapHandle(const TransitDescriptor& descriptor);
+
+  // Prepares a |TransitDescriptor| which wraps |Handle| used for transit.
+  // This is used to prepare the underlying shared resource before passing back
+  // the handle to be used by the peer process.
+  bool PrepareTransitDescriptor(ProcessHandle peer_process_handle,
+                                TransitDescriptor* descriptor);
+
+  // Closes the SyncSocket.  Returns true on success, false on failure.
+  virtual bool Close();
+
+  // Sends the message to the remote peer of the SyncSocket.
+  // Note it is not safe to send messages from the same socket handle by
+  // multiple threads simultaneously.
+  // buffer is a pointer to the data to send.
+  // length is the length of the data to send (must be non-zero).
+  // Returns the number of bytes sent, or 0 upon failure.
+  virtual size_t Send(const void* buffer, size_t length);
+
+  // Receives a message from an SyncSocket.
+  // buffer is a pointer to the buffer to receive data.
+  // length is the number of bytes of data to receive (must be non-zero).
+  // Returns the number of bytes received, or 0 upon failure.
+  virtual size_t Receive(void* buffer, size_t length);
+
+  // Same as Receive() but only blocks for data until |timeout| has elapsed or
+  // |buffer| |length| is exhausted.  Currently only timeouts less than one
+  // second are allowed.  Return the amount of data read.
+  virtual size_t ReceiveWithTimeout(void* buffer,
+                                    size_t length,
+                                    TimeDelta timeout);
+
+  // Returns the number of bytes available. If non-zero, Receive() will not
+  // not block when called.
+  virtual size_t Peek();
+
+  // Extracts the contained handle.  Used for transferring between
+  // processes.
+  Handle handle() const { return handle_; }
+
+  // Extracts and takes ownership of the contained handle.
+  Handle Release();
+
+ protected:
+  Handle handle_;
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(SyncSocket);
+};
+
+// Derives from SyncSocket and adds support for shutting down the socket from
+// another thread while a blocking Receive or Send is being done from the
+// thread that owns the socket.
+class BASE_EXPORT CancelableSyncSocket : public SyncSocket {
+ public:
+  CancelableSyncSocket();
+  explicit CancelableSyncSocket(Handle handle);
+  ~CancelableSyncSocket() override = default;
+
+  // Initializes a pair of cancelable sockets.  See documentation for
+  // SyncSocket::CreatePair for more details.
+  static bool CreatePair(CancelableSyncSocket* socket_a,
+                         CancelableSyncSocket* socket_b);
+
+  // A way to shut down a socket even if another thread is currently performing
+  // a blocking Receive or Send.
+  bool Shutdown();
+
+#if defined(OS_WIN)
+  // Since the Linux and Mac implementations actually use a socket, shutting
+  // them down from another thread is pretty simple - we can just call
+  // shutdown().  However, the Windows implementation relies on named pipes
+  // and there isn't a way to cancel a blocking synchronous Read that is
+  // supported on <Vista. So, for Windows only, we override these
+  // SyncSocket methods in order to support shutting down the 'socket'.
+  bool Close() override;
+  size_t Receive(void* buffer, size_t length) override;
+  size_t ReceiveWithTimeout(void* buffer,
+                            size_t length,
+                            TimeDelta timeout) override;
+#endif
+
+  // Send() is overridden to catch cases where the remote end is not responding
+  // and we fill the local socket buffer. When the buffer is full, this
+  // implementation of Send() will not block indefinitely as
+  // SyncSocket::Send will, but instead return 0, as no bytes could be sent.
+  // Note that the socket will not be closed in this case.
+  size_t Send(const void* buffer, size_t length) override;
+
+ private:
+#if defined(OS_WIN)
+  WaitableEvent shutdown_event_;
+  WaitableEvent file_operation_;
+#endif
+  DISALLOW_COPY_AND_ASSIGN(CancelableSyncSocket);
+};
+
+#if defined(OS_WIN) && !defined(COMPONENT_BUILD)
+// TODO(cpu): remove this once chrome is split in two dlls.
+__declspec(selectany)
+    const SyncSocket::Handle SyncSocket::kInvalidHandle = INVALID_HANDLE_VALUE;
+#endif
+
+}  // namespace base
+
+#endif  // BASE_SYNC_SOCKET_H_
diff --git a/base/sync_socket_nacl.cc b/base/sync_socket_nacl.cc
new file mode 100644
index 0000000..19a20be
--- /dev/null
+++ b/base/sync_socket_nacl.cc
@@ -0,0 +1,105 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/sync_socket.h"
+
+#include <errno.h>
+#include <limits.h>
+#include <stddef.h>
+#include <stdio.h>
+#include <sys/types.h>
+
+#include "base/logging.h"
+
+namespace base {
+
+const SyncSocket::Handle SyncSocket::kInvalidHandle = -1;
+
+SyncSocket::SyncSocket() : handle_(kInvalidHandle) {
+}
+
+SyncSocket::~SyncSocket() {
+  Close();
+}
+
+// static
+bool SyncSocket::CreatePair(SyncSocket* socket_a, SyncSocket* socket_b) {
+  return false;
+}
+
+// static
+SyncSocket::Handle SyncSocket::UnwrapHandle(
+    const SyncSocket::TransitDescriptor& descriptor) {
+  // TODO(xians): Still unclear how NaCl uses SyncSocket.
+  // See http://crbug.com/409656
+  NOTIMPLEMENTED();
+  return SyncSocket::kInvalidHandle;
+}
+
+bool SyncSocket::PrepareTransitDescriptor(
+    ProcessHandle peer_process_handle,
+    SyncSocket::TransitDescriptor* descriptor) {
+  // TODO(xians): Still unclear how NaCl uses SyncSocket.
+  // See http://crbug.com/409656
+  NOTIMPLEMENTED();
+  return false;
+}
+
+bool SyncSocket::Close() {
+  if (handle_ != kInvalidHandle) {
+    if (close(handle_) < 0)
+      DPLOG(ERROR) << "close";
+    handle_ = kInvalidHandle;
+  }
+  return true;
+}
+
+size_t SyncSocket::Send(const void* buffer, size_t length) {
+  const ssize_t bytes_written = write(handle_, buffer, length);
+  return bytes_written > 0 ? bytes_written : 0;
+}
+
+size_t SyncSocket::Receive(void* buffer, size_t length) {
+  const ssize_t bytes_read = read(handle_, buffer, length);
+  return bytes_read > 0 ? bytes_read : 0;
+}
+
+size_t SyncSocket::ReceiveWithTimeout(void* buffer, size_t length, TimeDelta) {
+  NOTIMPLEMENTED();
+  return 0;
+}
+
+size_t SyncSocket::Peek() {
+  NOTIMPLEMENTED();
+  return 0;
+}
+
+SyncSocket::Handle SyncSocket::Release() {
+  Handle r = handle_;
+  handle_ = kInvalidHandle;
+  return r;
+}
+
+CancelableSyncSocket::CancelableSyncSocket() {
+}
+
+CancelableSyncSocket::CancelableSyncSocket(Handle handle)
+    : SyncSocket(handle) {
+}
+
+size_t CancelableSyncSocket::Send(const void* buffer, size_t length) {
+  return SyncSocket::Send(buffer, length);
+}
+
+bool CancelableSyncSocket::Shutdown() {
+  return SyncSocket::Close();
+}
+
+// static
+bool CancelableSyncSocket::CreatePair(CancelableSyncSocket* socket_a,
+                                      CancelableSyncSocket* socket_b) {
+  return SyncSocket::CreatePair(socket_a, socket_b);
+}
+
+}  // namespace base
diff --git a/base/sync_socket_posix.cc b/base/sync_socket_posix.cc
new file mode 100644
index 0000000..ff1e0e6
--- /dev/null
+++ b/base/sync_socket_posix.cc
@@ -0,0 +1,253 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/sync_socket.h"
+
+#include <errno.h>
+#include <fcntl.h>
+#include <limits.h>
+#include <poll.h>
+#include <stddef.h>
+#include <stdio.h>
+#include <sys/ioctl.h>
+#include <sys/socket.h>
+#include <sys/types.h>
+
+#if defined(OS_SOLARIS)
+#include <sys/filio.h>
+#endif
+
+#include "base/files/file_util.h"
+#include "base/logging.h"
+#include "base/threading/thread_restrictions.h"
+#include "build/build_config.h"
+
+namespace base {
+
+namespace {
+// To avoid users sending negative message lengths to Send/Receive
+// we clamp message lengths, which are size_t, to no more than INT_MAX.
+const size_t kMaxMessageLength = static_cast<size_t>(INT_MAX);
+
+// Writes |length| of |buffer| into |handle|.  Returns the number of bytes
+// written or zero on error.  |length| must be greater than 0.
+size_t SendHelper(SyncSocket::Handle handle,
+                  const void* buffer,
+                  size_t length) {
+  DCHECK_GT(length, 0u);
+  DCHECK_LE(length, kMaxMessageLength);
+  DCHECK_NE(handle, SyncSocket::kInvalidHandle);
+  const char* charbuffer = static_cast<const char*>(buffer);
+  return WriteFileDescriptor(handle, charbuffer, length)
+             ? static_cast<size_t>(length)
+             : 0;
+}
+
+bool CloseHandle(SyncSocket::Handle handle) {
+  if (handle != SyncSocket::kInvalidHandle && close(handle) < 0) {
+    DPLOG(ERROR) << "close";
+    return false;
+  }
+
+  return true;
+}
+
+}  // namespace
+
+const SyncSocket::Handle SyncSocket::kInvalidHandle = -1;
+
+SyncSocket::SyncSocket() : handle_(kInvalidHandle) {}
+
+SyncSocket::~SyncSocket() {
+  Close();
+}
+
+// static
+bool SyncSocket::CreatePair(SyncSocket* socket_a, SyncSocket* socket_b) {
+  DCHECK_NE(socket_a, socket_b);
+  DCHECK_EQ(socket_a->handle_, kInvalidHandle);
+  DCHECK_EQ(socket_b->handle_, kInvalidHandle);
+
+#if defined(OS_MACOSX)
+  int nosigpipe = 1;
+#endif  // defined(OS_MACOSX)
+
+  Handle handles[2] = { kInvalidHandle, kInvalidHandle };
+  if (socketpair(AF_UNIX, SOCK_STREAM, 0, handles) != 0) {
+    CloseHandle(handles[0]);
+    CloseHandle(handles[1]);
+    return false;
+  }
+
+#if defined(OS_MACOSX)
+  // On OSX an attempt to read or write to a closed socket may generate a
+  // SIGPIPE rather than returning -1.  setsockopt will shut this off.
+  if (0 != setsockopt(handles[0], SOL_SOCKET, SO_NOSIGPIPE,
+                      &nosigpipe, sizeof nosigpipe) ||
+      0 != setsockopt(handles[1], SOL_SOCKET, SO_NOSIGPIPE,
+                      &nosigpipe, sizeof nosigpipe)) {
+    CloseHandle(handles[0]);
+    CloseHandle(handles[1]);
+    return false;
+  }
+#endif
+
+  // Copy the handles out for successful return.
+  socket_a->handle_ = handles[0];
+  socket_b->handle_ = handles[1];
+
+  return true;
+}
+
+// static
+SyncSocket::Handle SyncSocket::UnwrapHandle(
+    const TransitDescriptor& descriptor) {
+  return descriptor.fd;
+}
+
+bool SyncSocket::PrepareTransitDescriptor(ProcessHandle peer_process_handle,
+                                          TransitDescriptor* descriptor) {
+  descriptor->fd = handle();
+  descriptor->auto_close = false;
+  return descriptor->fd != kInvalidHandle;
+}
+
+bool SyncSocket::Close() {
+  const bool retval = CloseHandle(handle_);
+  handle_ = kInvalidHandle;
+  return retval;
+}
+
+size_t SyncSocket::Send(const void* buffer, size_t length) {
+  AssertBlockingAllowed();
+  return SendHelper(handle_, buffer, length);
+}
+
+size_t SyncSocket::Receive(void* buffer, size_t length) {
+  AssertBlockingAllowed();
+  DCHECK_GT(length, 0u);
+  DCHECK_LE(length, kMaxMessageLength);
+  DCHECK_NE(handle_, kInvalidHandle);
+  char* charbuffer = static_cast<char*>(buffer);
+  if (ReadFromFD(handle_, charbuffer, length))
+    return length;
+  return 0;
+}
+
+size_t SyncSocket::ReceiveWithTimeout(void* buffer,
+                                      size_t length,
+                                      TimeDelta timeout) {
+  AssertBlockingAllowed();
+  DCHECK_GT(length, 0u);
+  DCHECK_LE(length, kMaxMessageLength);
+  DCHECK_NE(handle_, kInvalidHandle);
+
+  // Only timeouts greater than zero and less than one second are allowed.
+  DCHECK_GT(timeout.InMicroseconds(), 0);
+  DCHECK_LT(timeout.InMicroseconds(),
+            TimeDelta::FromSeconds(1).InMicroseconds());
+
+  // Track the start time so we can reduce the timeout as data is read.
+  TimeTicks start_time = TimeTicks::Now();
+  const TimeTicks finish_time = start_time + timeout;
+
+  struct pollfd pollfd;
+  pollfd.fd = handle_;
+  pollfd.events = POLLIN;
+  pollfd.revents = 0;
+
+  size_t bytes_read_total = 0;
+  while (bytes_read_total < length) {
+    const TimeDelta this_timeout = finish_time - TimeTicks::Now();
+    const int timeout_ms =
+        static_cast<int>(this_timeout.InMillisecondsRoundedUp());
+    if (timeout_ms <= 0)
+      break;
+    const int poll_result = poll(&pollfd, 1, timeout_ms);
+    // Handle EINTR manually since we need to update the timeout value.
+    if (poll_result == -1 && errno == EINTR)
+      continue;
+    // Return if other type of error or a timeout.
+    if (poll_result <= 0)
+      return bytes_read_total;
+
+    // poll() only tells us that data is ready for reading, not how much.  We
+    // must Peek() for the amount ready for reading to avoid blocking.
+    // At hang up (POLLHUP), the write end has been closed and there might still
+    // be data to be read.
+    // No special handling is needed for error (POLLERR); we can let any of the
+    // following operations fail and handle it there.
+    DCHECK(pollfd.revents & (POLLIN | POLLHUP | POLLERR)) << pollfd.revents;
+    const size_t bytes_to_read = std::min(Peek(), length - bytes_read_total);
+
+    // There may be zero bytes to read if the socket at the other end closed.
+    if (!bytes_to_read)
+      return bytes_read_total;
+
+    const size_t bytes_received =
+        Receive(static_cast<char*>(buffer) + bytes_read_total, bytes_to_read);
+    bytes_read_total += bytes_received;
+    if (bytes_received != bytes_to_read)
+      return bytes_read_total;
+  }
+
+  return bytes_read_total;
+}
+
+size_t SyncSocket::Peek() {
+  DCHECK_NE(handle_, kInvalidHandle);
+  int number_chars = 0;
+  if (ioctl(handle_, FIONREAD, &number_chars) == -1) {
+    // If there is an error in ioctl, signal that the channel would block.
+    return 0;
+  }
+  DCHECK_GE(number_chars, 0);
+  return number_chars;
+}
+
+SyncSocket::Handle SyncSocket::Release() {
+  Handle r = handle_;
+  handle_ = kInvalidHandle;
+  return r;
+}
+
+CancelableSyncSocket::CancelableSyncSocket() = default;
+CancelableSyncSocket::CancelableSyncSocket(Handle handle)
+    : SyncSocket(handle) {
+}
+
+bool CancelableSyncSocket::Shutdown() {
+  DCHECK_NE(handle_, kInvalidHandle);
+  return HANDLE_EINTR(shutdown(handle_, SHUT_RDWR)) >= 0;
+}
+
+size_t CancelableSyncSocket::Send(const void* buffer, size_t length) {
+  DCHECK_GT(length, 0u);
+  DCHECK_LE(length, kMaxMessageLength);
+  DCHECK_NE(handle_, kInvalidHandle);
+
+  const int flags = fcntl(handle_, F_GETFL);
+  if (flags != -1 && (flags & O_NONBLOCK) == 0) {
+    // Set the socket to non-blocking mode for sending if its original mode
+    // is blocking.
+    fcntl(handle_, F_SETFL, flags | O_NONBLOCK);
+  }
+
+  const size_t len = SendHelper(handle_, buffer, length);
+
+  if (flags != -1 && (flags & O_NONBLOCK) == 0) {
+    // Restore the original flags.
+    fcntl(handle_, F_SETFL, flags);
+  }
+
+  return len;
+}
+
+// static
+bool CancelableSyncSocket::CreatePair(CancelableSyncSocket* socket_a,
+                                      CancelableSyncSocket* socket_b) {
+  return SyncSocket::CreatePair(socket_a, socket_b);
+}
+
+}  // namespace base
diff --git a/base/sync_socket_unittest.cc b/base/sync_socket_unittest.cc
new file mode 100644
index 0000000..fdcd9a1
--- /dev/null
+++ b/base/sync_socket_unittest.cc
@@ -0,0 +1,190 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/sync_socket.h"
+
+#include "base/macros.h"
+#include "base/synchronization/waitable_event.h"
+#include "base/threading/platform_thread.h"
+#include "base/threading/simple_thread.h"
+#include "base/time/time.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+
+namespace {
+
+constexpr TimeDelta kReceiveTimeout = base::TimeDelta::FromMilliseconds(750);
+
+class HangingReceiveThread : public DelegateSimpleThread::Delegate {
+ public:
+  explicit HangingReceiveThread(SyncSocket* socket, bool with_timeout)
+      : socket_(socket),
+        thread_(this, "HangingReceiveThread"),
+        with_timeout_(with_timeout),
+        started_event_(WaitableEvent::ResetPolicy::MANUAL,
+                       WaitableEvent::InitialState::NOT_SIGNALED),
+        done_event_(WaitableEvent::ResetPolicy::MANUAL,
+                    WaitableEvent::InitialState::NOT_SIGNALED) {
+    thread_.Start();
+  }
+
+  ~HangingReceiveThread() override = default;
+
+  void Run() override {
+    int data = 0;
+    ASSERT_EQ(socket_->Peek(), 0u);
+
+    started_event_.Signal();
+
+    if (with_timeout_) {
+      ASSERT_EQ(0u, socket_->ReceiveWithTimeout(&data, sizeof(data),
+                                                kReceiveTimeout));
+    } else {
+      ASSERT_EQ(0u, socket_->Receive(&data, sizeof(data)));
+    }
+
+    done_event_.Signal();
+  }
+
+  void Stop() {
+    thread_.Join();
+  }
+
+  WaitableEvent* started_event() { return &started_event_; }
+  WaitableEvent* done_event() { return &done_event_; }
+
+ private:
+  SyncSocket* socket_;
+  DelegateSimpleThread thread_;
+  bool with_timeout_;
+  WaitableEvent started_event_;
+  WaitableEvent done_event_;
+
+  DISALLOW_COPY_AND_ASSIGN(HangingReceiveThread);
+};
+
+// Tests sending data between two SyncSockets. Uses ASSERT() and thus will exit
+// early upon failure.  Callers should use ASSERT_NO_FATAL_FAILURE() if testing
+// continues after return.
+void SendReceivePeek(SyncSocket* socket_a, SyncSocket* socket_b) {
+  int received = 0;
+  const int kSending = 123;
+  static_assert(sizeof(kSending) == sizeof(received), "invalid data size");
+
+  ASSERT_EQ(0u, socket_a->Peek());
+  ASSERT_EQ(0u, socket_b->Peek());
+
+  // Verify |socket_a| can send to |socket_a| and |socket_a| can Receive from
+  // |socket_a|.
+  ASSERT_EQ(sizeof(kSending), socket_a->Send(&kSending, sizeof(kSending)));
+  ASSERT_EQ(sizeof(kSending), socket_b->Peek());
+  ASSERT_EQ(sizeof(kSending), socket_b->Receive(&received, sizeof(kSending)));
+  ASSERT_EQ(kSending, received);
+
+  ASSERT_EQ(0u, socket_a->Peek());
+  ASSERT_EQ(0u, socket_b->Peek());
+
+  // Now verify the reverse.
+  received = 0;
+  ASSERT_EQ(sizeof(kSending), socket_b->Send(&kSending, sizeof(kSending)));
+  ASSERT_EQ(sizeof(kSending), socket_a->Peek());
+  ASSERT_EQ(sizeof(kSending), socket_a->Receive(&received, sizeof(kSending)));
+  ASSERT_EQ(kSending, received);
+
+  ASSERT_EQ(0u, socket_a->Peek());
+  ASSERT_EQ(0u, socket_b->Peek());
+
+  ASSERT_TRUE(socket_a->Close());
+  ASSERT_TRUE(socket_b->Close());
+}
+
+}  // namespace
+
+class SyncSocketTest : public testing::Test {
+ public:
+  void SetUp() override {
+    ASSERT_TRUE(SyncSocket::CreatePair(&socket_a_, &socket_b_));
+  }
+
+ protected:
+  SyncSocket socket_a_;
+  SyncSocket socket_b_;
+};
+
+TEST_F(SyncSocketTest, NormalSendReceivePeek) {
+  SendReceivePeek(&socket_a_, &socket_b_);
+}
+
+TEST_F(SyncSocketTest, ClonedSendReceivePeek) {
+  SyncSocket socket_c(socket_a_.Release());
+  SyncSocket socket_d(socket_b_.Release());
+  SendReceivePeek(&socket_c, &socket_d);
+};
+
+class CancelableSyncSocketTest : public testing::Test {
+ public:
+  void SetUp() override {
+    ASSERT_TRUE(CancelableSyncSocket::CreatePair(&socket_a_, &socket_b_));
+  }
+
+ protected:
+  CancelableSyncSocket socket_a_;
+  CancelableSyncSocket socket_b_;
+};
+
+TEST_F(CancelableSyncSocketTest, NormalSendReceivePeek) {
+  SendReceivePeek(&socket_a_, &socket_b_);
+}
+
+TEST_F(CancelableSyncSocketTest, ClonedSendReceivePeek) {
+  CancelableSyncSocket socket_c(socket_a_.Release());
+  CancelableSyncSocket socket_d(socket_b_.Release());
+  SendReceivePeek(&socket_c, &socket_d);
+}
+
+TEST_F(CancelableSyncSocketTest, ShutdownCancelsReceive) {
+  HangingReceiveThread thread(&socket_b_, /* with_timeout = */ false);
+
+  // Wait for the thread to be started. Note that this doesn't guarantee that
+  // Receive() is called before Shutdown().
+  thread.started_event()->Wait();
+
+  EXPECT_TRUE(socket_b_.Shutdown());
+  EXPECT_TRUE(thread.done_event()->TimedWait(kReceiveTimeout));
+
+  thread.Stop();
+}
+
+TEST_F(CancelableSyncSocketTest, ShutdownCancelsReceiveWithTimeout) {
+  HangingReceiveThread thread(&socket_b_, /* with_timeout = */ true);
+
+  // Wait for the thread to be started. Note that this doesn't guarantee that
+  // Receive() is called before Shutdown().
+  thread.started_event()->Wait();
+
+  EXPECT_TRUE(socket_b_.Shutdown());
+  EXPECT_TRUE(thread.done_event()->TimedWait(kReceiveTimeout));
+
+  thread.Stop();
+}
+
+TEST_F(CancelableSyncSocketTest, ReceiveAfterShutdown) {
+  socket_a_.Shutdown();
+  int data = 0;
+  EXPECT_EQ(0u, socket_a_.Receive(&data, sizeof(data)));
+}
+
+TEST_F(CancelableSyncSocketTest, ReceiveWithTimeoutAfterShutdown) {
+  socket_a_.Shutdown();
+  TimeTicks start = TimeTicks::Now();
+  int data = 0;
+  EXPECT_EQ(0u,
+            socket_a_.ReceiveWithTimeout(&data, sizeof(data), kReceiveTimeout));
+
+  // Ensure the receive didn't just timeout.
+  EXPECT_LT(TimeTicks::Now() - start, kReceiveTimeout);
+}
+
+}  // namespace base
diff --git a/base/sync_socket_win.cc b/base/sync_socket_win.cc
new file mode 100644
index 0000000..905d0a2
--- /dev/null
+++ b/base/sync_socket_win.cc
@@ -0,0 +1,356 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/sync_socket.h"
+
+#include <limits.h>
+#include <stddef.h>
+
+#include "base/logging.h"
+#include "base/macros.h"
+#include "base/rand_util.h"
+#include "base/threading/thread_restrictions.h"
+#include "base/win/scoped_handle.h"
+
+namespace base {
+
+using win::ScopedHandle;
+
+namespace {
+// IMPORTANT: do not change how this name is generated because it will break
+// in sandboxed scenarios as we might have by-name policies that allow pipe
+// creation. Also keep the secure random number generation.
+const wchar_t kPipeNameFormat[] = L"\\\\.\\pipe\\chrome.sync.%u.%u.%lu";
+const size_t kPipePathMax =  arraysize(kPipeNameFormat) + (3 * 10) + 1;
+
+// To avoid users sending negative message lengths to Send/Receive
+// we clamp message lengths, which are size_t, to no more than INT_MAX.
+const size_t kMaxMessageLength = static_cast<size_t>(INT_MAX);
+
+const int kOutBufferSize = 4096;
+const int kInBufferSize = 4096;
+const int kDefaultTimeoutMilliSeconds = 1000;
+
+bool CreatePairImpl(HANDLE* socket_a, HANDLE* socket_b, bool overlapped) {
+  DCHECK_NE(socket_a, socket_b);
+  DCHECK_EQ(*socket_a, SyncSocket::kInvalidHandle);
+  DCHECK_EQ(*socket_b, SyncSocket::kInvalidHandle);
+
+  wchar_t name[kPipePathMax];
+  ScopedHandle handle_a;
+  DWORD flags = PIPE_ACCESS_DUPLEX | FILE_FLAG_FIRST_PIPE_INSTANCE;
+  if (overlapped)
+    flags |= FILE_FLAG_OVERLAPPED;
+
+  do {
+    unsigned long rnd_name;
+    RandBytes(&rnd_name, sizeof(rnd_name));
+
+    swprintf(name, kPipePathMax,
+             kPipeNameFormat,
+             GetCurrentProcessId(),
+             GetCurrentThreadId(),
+             rnd_name);
+
+    handle_a.Set(CreateNamedPipeW(
+        name,
+        flags,
+        PIPE_TYPE_BYTE | PIPE_READMODE_BYTE,
+        1,
+        kOutBufferSize,
+        kInBufferSize,
+        kDefaultTimeoutMilliSeconds,
+        NULL));
+  } while (!handle_a.IsValid() &&
+           (GetLastError() == ERROR_PIPE_BUSY));
+
+  if (!handle_a.IsValid()) {
+    NOTREACHED();
+    return false;
+  }
+
+  // The SECURITY_ANONYMOUS flag means that the server side (handle_a) cannot
+  // impersonate the client (handle_b). This allows us not to care which side
+  // ends up in which side of a privilege boundary.
+  flags = SECURITY_SQOS_PRESENT | SECURITY_ANONYMOUS;
+  if (overlapped)
+    flags |= FILE_FLAG_OVERLAPPED;
+
+  ScopedHandle handle_b(CreateFileW(name,
+                                    GENERIC_READ | GENERIC_WRITE,
+                                    0,          // no sharing.
+                                    NULL,       // default security attributes.
+                                    OPEN_EXISTING,  // opens existing pipe.
+                                    flags,
+                                    NULL));     // no template file.
+  if (!handle_b.IsValid()) {
+    DPLOG(ERROR) << "CreateFileW failed";
+    return false;
+  }
+
+  if (!ConnectNamedPipe(handle_a.Get(), NULL)) {
+    DWORD error = GetLastError();
+    if (error != ERROR_PIPE_CONNECTED) {
+      DPLOG(ERROR) << "ConnectNamedPipe failed";
+      return false;
+    }
+  }
+
+  *socket_a = handle_a.Take();
+  *socket_b = handle_b.Take();
+
+  return true;
+}
+
+// Inline helper to avoid having the cast everywhere.
+DWORD GetNextChunkSize(size_t current_pos, size_t max_size) {
+  // The following statement is for 64 bit portability.
+  return static_cast<DWORD>(((max_size - current_pos) <= UINT_MAX) ?
+      (max_size - current_pos) : UINT_MAX);
+}
+
+// Template function that supports calling ReadFile or WriteFile in an
+// overlapped fashion and waits for IO completion.  The function also waits
+// on an event that can be used to cancel the operation.  If the operation
+// is cancelled, the function returns and closes the relevant socket object.
+template <typename BufferType, typename Function>
+size_t CancelableFileOperation(Function operation,
+                               HANDLE file,
+                               BufferType* buffer,
+                               size_t length,
+                               WaitableEvent* io_event,
+                               WaitableEvent* cancel_event,
+                               CancelableSyncSocket* socket,
+                               DWORD timeout_in_ms) {
+  AssertBlockingAllowed();
+  // The buffer must be byte size or the length check won't make much sense.
+  static_assert(sizeof(buffer[0]) == sizeof(char), "incorrect buffer type");
+  DCHECK_GT(length, 0u);
+  DCHECK_LE(length, kMaxMessageLength);
+  DCHECK_NE(file, SyncSocket::kInvalidHandle);
+
+  // Track the finish time so we can calculate the timeout as data is read.
+  TimeTicks current_time, finish_time;
+  if (timeout_in_ms != INFINITE) {
+    current_time = TimeTicks::Now();
+    finish_time =
+        current_time + base::TimeDelta::FromMilliseconds(timeout_in_ms);
+  }
+
+  size_t count = 0;
+  do {
+    // The OVERLAPPED structure will be modified by ReadFile or WriteFile.
+    OVERLAPPED ol = { 0 };
+    ol.hEvent = io_event->handle();
+
+    const DWORD chunk = GetNextChunkSize(count, length);
+    // This is either the ReadFile or WriteFile call depending on whether
+    // we're receiving or sending data.
+    DWORD len = 0;
+    const BOOL operation_ok = operation(
+        file, static_cast<BufferType*>(buffer) + count, chunk, &len, &ol);
+    if (!operation_ok) {
+      if (::GetLastError() == ERROR_IO_PENDING) {
+        HANDLE events[] = { io_event->handle(), cancel_event->handle() };
+        const int wait_result = WaitForMultipleObjects(
+            arraysize(events), events, FALSE,
+            timeout_in_ms == INFINITE ?
+                timeout_in_ms :
+                static_cast<DWORD>(
+                    (finish_time - current_time).InMilliseconds()));
+        if (wait_result != WAIT_OBJECT_0 + 0) {
+          // CancelIo() doesn't synchronously cancel outstanding IO, only marks
+          // outstanding IO for cancellation. We must call GetOverlappedResult()
+          // below to ensure in flight writes complete before returning.
+          CancelIo(file);
+        }
+
+        // We set the |bWait| parameter to TRUE for GetOverlappedResult() to
+        // ensure writes are complete before returning.
+        if (!GetOverlappedResult(file, &ol, &len, TRUE))
+          len = 0;
+
+        if (wait_result == WAIT_OBJECT_0 + 1) {
+          DVLOG(1) << "Shutdown was signaled. Closing socket.";
+          socket->Close();
+          return count;
+        }
+
+        // Timeouts will be handled by the while() condition below since
+        // GetOverlappedResult() may complete successfully after CancelIo().
+        DCHECK(wait_result == WAIT_OBJECT_0 + 0 || wait_result == WAIT_TIMEOUT);
+      } else {
+        break;
+      }
+    }
+
+    count += len;
+
+    // Quit the operation if we can't write/read anymore.
+    if (len != chunk)
+      break;
+
+    // Since TimeTicks::Now() is expensive, only bother updating the time if we
+    // have more work to do.
+    if (timeout_in_ms != INFINITE && count < length)
+      current_time = base::TimeTicks::Now();
+  } while (count < length &&
+           (timeout_in_ms == INFINITE || current_time < finish_time));
+
+  return count;
+}
+
+}  // namespace
+
+#if defined(COMPONENT_BUILD)
+const SyncSocket::Handle SyncSocket::kInvalidHandle = INVALID_HANDLE_VALUE;
+#endif
+
+SyncSocket::SyncSocket() : handle_(kInvalidHandle) {}
+
+SyncSocket::~SyncSocket() {
+  Close();
+}
+
+// static
+bool SyncSocket::CreatePair(SyncSocket* socket_a, SyncSocket* socket_b) {
+  return CreatePairImpl(&socket_a->handle_, &socket_b->handle_, false);
+}
+
+// static
+SyncSocket::Handle SyncSocket::UnwrapHandle(
+    const TransitDescriptor& descriptor) {
+  return descriptor;
+}
+
+bool SyncSocket::PrepareTransitDescriptor(ProcessHandle peer_process_handle,
+                                          TransitDescriptor* descriptor) {
+  DCHECK(descriptor);
+  if (!::DuplicateHandle(GetCurrentProcess(), handle(), peer_process_handle,
+                         descriptor, 0, FALSE, DUPLICATE_SAME_ACCESS)) {
+    DPLOG(ERROR) << "Cannot duplicate socket handle for peer process.";
+    return false;
+  }
+  return true;
+}
+
+bool SyncSocket::Close() {
+  if (handle_ == kInvalidHandle)
+    return true;
+
+  const BOOL result = CloseHandle(handle_);
+  handle_ = kInvalidHandle;
+  return result == TRUE;
+}
+
+size_t SyncSocket::Send(const void* buffer, size_t length) {
+  AssertBlockingAllowed();
+  DCHECK_GT(length, 0u);
+  DCHECK_LE(length, kMaxMessageLength);
+  DCHECK_NE(handle_, kInvalidHandle);
+  size_t count = 0;
+  while (count < length) {
+    DWORD len;
+    DWORD chunk = GetNextChunkSize(count, length);
+    if (::WriteFile(handle_, static_cast<const char*>(buffer) + count, chunk,
+                    &len, NULL) == FALSE) {
+      return count;
+    }
+    count += len;
+  }
+  return count;
+}
+
+size_t SyncSocket::ReceiveWithTimeout(void* buffer,
+                                      size_t length,
+                                      TimeDelta timeout) {
+  NOTIMPLEMENTED();
+  return 0;
+}
+
+size_t SyncSocket::Receive(void* buffer, size_t length) {
+  AssertBlockingAllowed();
+  DCHECK_GT(length, 0u);
+  DCHECK_LE(length, kMaxMessageLength);
+  DCHECK_NE(handle_, kInvalidHandle);
+  size_t count = 0;
+  while (count < length) {
+    DWORD len;
+    DWORD chunk = GetNextChunkSize(count, length);
+    if (::ReadFile(handle_, static_cast<char*>(buffer) + count, chunk, &len,
+                   NULL) == FALSE) {
+      return count;
+    }
+    count += len;
+  }
+  return count;
+}
+
+size_t SyncSocket::Peek() {
+  DWORD available = 0;
+  PeekNamedPipe(handle_, NULL, 0, NULL, &available, NULL);
+  return available;
+}
+
+SyncSocket::Handle SyncSocket::Release() {
+  Handle r = handle_;
+  handle_ = kInvalidHandle;
+  return r;
+}
+
+CancelableSyncSocket::CancelableSyncSocket()
+    : shutdown_event_(base::WaitableEvent::ResetPolicy::MANUAL,
+                      base::WaitableEvent::InitialState::NOT_SIGNALED),
+      file_operation_(base::WaitableEvent::ResetPolicy::MANUAL,
+                      base::WaitableEvent::InitialState::NOT_SIGNALED) {}
+
+CancelableSyncSocket::CancelableSyncSocket(Handle handle)
+    : SyncSocket(handle),
+      shutdown_event_(base::WaitableEvent::ResetPolicy::MANUAL,
+                      base::WaitableEvent::InitialState::NOT_SIGNALED),
+      file_operation_(base::WaitableEvent::ResetPolicy::MANUAL,
+                      base::WaitableEvent::InitialState::NOT_SIGNALED) {}
+
+bool CancelableSyncSocket::Shutdown() {
+  // This doesn't shut down the pipe immediately, but subsequent Receive or Send
+  // methods will fail straight away.
+  shutdown_event_.Signal();
+  return true;
+}
+
+bool CancelableSyncSocket::Close() {
+  const bool result = SyncSocket::Close();
+  shutdown_event_.Reset();
+  return result;
+}
+
+size_t CancelableSyncSocket::Send(const void* buffer, size_t length) {
+  static const DWORD kWaitTimeOutInMs = 500;
+  return CancelableFileOperation(
+      &::WriteFile, handle_, reinterpret_cast<const char*>(buffer), length,
+      &file_operation_, &shutdown_event_, this, kWaitTimeOutInMs);
+}
+
+size_t CancelableSyncSocket::Receive(void* buffer, size_t length) {
+  return CancelableFileOperation(
+      &::ReadFile, handle_, reinterpret_cast<char*>(buffer), length,
+      &file_operation_, &shutdown_event_, this, INFINITE);
+}
+
+size_t CancelableSyncSocket::ReceiveWithTimeout(void* buffer,
+                                                size_t length,
+                                                TimeDelta timeout) {
+  return CancelableFileOperation(&::ReadFile, handle_,
+                                 reinterpret_cast<char*>(buffer), length,
+                                 &file_operation_, &shutdown_event_, this,
+                                 static_cast<DWORD>(timeout.InMilliseconds()));
+}
+
+// static
+bool CancelableSyncSocket::CreatePair(CancelableSyncSocket* socket_a,
+                                      CancelableSyncSocket* socket_b) {
+  return CreatePairImpl(&socket_a->handle_, &socket_b->handle_, true);
+}
+
+}  // namespace base
diff --git a/base/synchronization/atomic_flag.cc b/base/synchronization/atomic_flag.cc
new file mode 100644
index 0000000..8c2018d
--- /dev/null
+++ b/base/synchronization/atomic_flag.cc
@@ -0,0 +1,32 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/synchronization/atomic_flag.h"
+
+#include "base/logging.h"
+
+namespace base {
+
+AtomicFlag::AtomicFlag() {
+  // It doesn't matter where the AtomicFlag is built so long as it's always
+  // Set() from the same sequence after. Note: the sequencing requirements are
+  // necessary for IsSet()'s callers to know which sequence's memory operations
+  // they are synchronized with.
+  set_sequence_checker_.DetachFromSequence();
+}
+
+void AtomicFlag::Set() {
+  DCHECK(set_sequence_checker_.CalledOnValidSequence());
+  base::subtle::Release_Store(&flag_, 1);
+}
+
+bool AtomicFlag::IsSet() const {
+  return base::subtle::Acquire_Load(&flag_) != 0;
+}
+
+void AtomicFlag::UnsafeResetForTesting() {
+  base::subtle::Release_Store(&flag_, 0);
+}
+
+}  // namespace base
diff --git a/base/synchronization/atomic_flag.h b/base/synchronization/atomic_flag.h
new file mode 100644
index 0000000..ff175e1
--- /dev/null
+++ b/base/synchronization/atomic_flag.h
@@ -0,0 +1,44 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_SYNCHRONIZATION_ATOMIC_FLAG_H_
+#define BASE_SYNCHRONIZATION_ATOMIC_FLAG_H_
+
+#include "base/atomicops.h"
+#include "base/base_export.h"
+#include "base/macros.h"
+#include "base/sequence_checker.h"
+
+namespace base {
+
+// A flag that can safely be set from one thread and read from other threads.
+//
+// This class IS NOT intended for synchronization between threads.
+class BASE_EXPORT AtomicFlag {
+ public:
+  AtomicFlag();
+  ~AtomicFlag() = default;
+
+  // Set the flag. Must always be called from the same sequence.
+  void Set();
+
+  // Returns true iff the flag was set. If this returns true, the current thread
+  // is guaranteed to be synchronized with all memory operations on the sequence
+  // which invoked Set() up until at least the first call to Set() on it.
+  bool IsSet() const;
+
+  // Resets the flag. Be careful when using this: callers might not expect
+  // IsSet() to return false after returning true once.
+  void UnsafeResetForTesting();
+
+ private:
+  base::subtle::Atomic32 flag_ = 0;
+  SequenceChecker set_sequence_checker_;
+
+  DISALLOW_COPY_AND_ASSIGN(AtomicFlag);
+};
+
+}  // namespace base
+
+#endif  // BASE_SYNCHRONIZATION_ATOMIC_FLAG_H_
diff --git a/base/synchronization/atomic_flag_unittest.cc b/base/synchronization/atomic_flag_unittest.cc
new file mode 100644
index 0000000..f7daafa
--- /dev/null
+++ b/base/synchronization/atomic_flag_unittest.cc
@@ -0,0 +1,135 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/synchronization/atomic_flag.h"
+
+#include "base/bind.h"
+#include "base/logging.h"
+#include "base/single_thread_task_runner.h"
+#include "base/synchronization/waitable_event.h"
+#include "base/test/gtest_util.h"
+#include "base/threading/platform_thread.h"
+#include "base/threading/thread.h"
+#include "build/build_config.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+
+namespace {
+
+void ExpectSetFlagDeath(AtomicFlag* flag) {
+  ASSERT_TRUE(flag);
+  EXPECT_DCHECK_DEATH(flag->Set());
+}
+
+// Busy waits (to explicitly avoid using synchronization constructs that would
+// defeat the purpose of testing atomics) until |tested_flag| is set and then
+// verifies that non-atomic |*expected_after_flag| is true and sets |*done_flag|
+// before returning if it's non-null.
+void BusyWaitUntilFlagIsSet(AtomicFlag* tested_flag, bool* expected_after_flag,
+                            AtomicFlag* done_flag) {
+  while (!tested_flag->IsSet())
+    PlatformThread::YieldCurrentThread();
+
+  EXPECT_TRUE(*expected_after_flag);
+  if (done_flag)
+    done_flag->Set();
+}
+
+}  // namespace
+
+TEST(AtomicFlagTest, SimpleSingleThreadedTest) {
+  AtomicFlag flag;
+  ASSERT_FALSE(flag.IsSet());
+  flag.Set();
+  ASSERT_TRUE(flag.IsSet());
+}
+
+TEST(AtomicFlagTest, DoubleSetTest) {
+  AtomicFlag flag;
+  ASSERT_FALSE(flag.IsSet());
+  flag.Set();
+  ASSERT_TRUE(flag.IsSet());
+  flag.Set();
+  ASSERT_TRUE(flag.IsSet());
+}
+
+TEST(AtomicFlagTest, ReadFromDifferentThread) {
+  // |tested_flag| is the one being tested below.
+  AtomicFlag tested_flag;
+  // |expected_after_flag| is used to confirm that sequential consistency is
+  // obtained around |tested_flag|.
+  bool expected_after_flag = false;
+  // |reset_flag| is used to confirm the test flows as intended without using
+  // synchronization constructs which would defeat the purpose of exercising
+  // atomics.
+  AtomicFlag reset_flag;
+
+  Thread thread("AtomicFlagTest.ReadFromDifferentThread");
+  ASSERT_TRUE(thread.Start());
+  thread.task_runner()->PostTask(FROM_HERE,
+                                 BindOnce(&BusyWaitUntilFlagIsSet, &tested_flag,
+                                          &expected_after_flag, &reset_flag));
+
+  // To verify that IsSet() fetches the flag's value from memory every time it
+  // is called (not just the first time that it is called on a thread), sleep
+  // before setting the flag.
+  PlatformThread::Sleep(TimeDelta::FromMilliseconds(20));
+
+  // |expected_after_flag| is used to verify that all memory operations
+  // performed before |tested_flag| is Set() are visible to threads that can see
+  // IsSet().
+  expected_after_flag = true;
+  tested_flag.Set();
+
+  // Sleep again to give the busy loop time to observe the flag and verify
+  // expectations.
+  PlatformThread::Sleep(TimeDelta::FromMilliseconds(20));
+
+  // Use |reset_flag| to confirm that the above completed (which the rest of
+  // this test assumes).
+  while (!reset_flag.IsSet())
+    PlatformThread::YieldCurrentThread();
+
+  tested_flag.UnsafeResetForTesting();
+  EXPECT_FALSE(tested_flag.IsSet());
+  expected_after_flag = false;
+
+  // Perform the same test again after the controlled UnsafeResetForTesting(),
+  // |thread| is guaranteed to be synchronized past the
+  // |UnsafeResetForTesting()| call when the task runs per the implicit
+  // synchronization in the post task mechanism.
+  thread.task_runner()->PostTask(FROM_HERE,
+                                 BindOnce(&BusyWaitUntilFlagIsSet, &tested_flag,
+                                          &expected_after_flag, nullptr));
+
+  PlatformThread::Sleep(TimeDelta::FromMilliseconds(20));
+
+  expected_after_flag = true;
+  tested_flag.Set();
+
+  // The |thread|'s destructor will block until the posted task completes, so
+  // the test will time out if it fails to see the flag be set.
+}
+
+TEST(AtomicFlagTest, SetOnDifferentSequenceDeathTest) {
+  // Checks that Set() can't be called from another sequence after being called
+  // on this one. AtomicFlag should die on a DCHECK if Set() is called again
+  // from another sequence.
+
+  // Note: flag must be declared before the Thread so that its destructor runs
+  // later. Otherwise there's a race between destructing flag and running
+  // ExpectSetFlagDeath.
+  AtomicFlag flag;
+
+  ::testing::FLAGS_gtest_death_test_style = "threadsafe";
+  Thread t("AtomicFlagTest.SetOnDifferentThreadDeathTest");
+  ASSERT_TRUE(t.Start());
+  EXPECT_TRUE(t.WaitUntilThreadStarted());
+
+  flag.Set();
+  t.task_runner()->PostTask(FROM_HERE, BindOnce(&ExpectSetFlagDeath, &flag));
+}
+
+}  // namespace base
diff --git a/base/synchronization/cancellation_flag.h b/base/synchronization/cancellation_flag.h
new file mode 100644
index 0000000..39094e2
--- /dev/null
+++ b/base/synchronization/cancellation_flag.h
@@ -0,0 +1,20 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_SYNCHRONIZATION_CANCELLATION_FLAG_H_
+#define BASE_SYNCHRONIZATION_CANCELLATION_FLAG_H_
+
+#include "base/synchronization/atomic_flag.h"
+
+namespace base {
+
+// Use inheritance instead of "using" to allow forward declaration of "class
+// CancellationFlag".
+// TODO(fdoray): Replace CancellationFlag with AtomicFlag throughout the
+// codebase and delete this file. crbug.com/630251
+class CancellationFlag : public AtomicFlag {};
+
+}  // namespace base
+
+#endif  // BASE_SYNCHRONIZATION_CANCELLATION_FLAG_H_
diff --git a/base/synchronization/condition_variable.h b/base/synchronization/condition_variable.h
new file mode 100644
index 0000000..dfcf813
--- /dev/null
+++ b/base/synchronization/condition_variable.h
@@ -0,0 +1,124 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// ConditionVariable wraps pthreads condition variable synchronization or, on
+// Windows, simulates it.  This functionality is very helpful for having
+// several threads wait for an event, as is common with a thread pool managed
+// by a master.  The meaning of such an event in the (worker) thread pool
+// scenario is that additional tasks are now available for processing.  It is
+// used in Chrome in the DNS prefetching system to notify worker threads that
+// a queue now has items (tasks) which need to be tended to.  A related use
+// would have a pool manager waiting on a ConditionVariable, waiting for a
+// thread in the pool to announce (signal) that there is now more room in a
+// (bounded size) communications queue for the manager to deposit tasks, or,
+// as a second example, that the queue of tasks is completely empty and all
+// workers are waiting.
+//
+// USAGE NOTE 1: spurious signal events are possible with this and
+// most implementations of condition variables.  As a result, be
+// *sure* to retest your condition before proceeding.  The following
+// is a good example of doing this correctly:
+//
+// while (!work_to_be_done()) Wait(...);
+//
+// In contrast do NOT do the following:
+//
+// if (!work_to_be_done()) Wait(...);  // Don't do this.
+//
+// Especially avoid the above if you are relying on some other thread only
+// issuing a signal up *if* there is work-to-do.  There can/will
+// be spurious signals.  Recheck state on waiting thread before
+// assuming the signal was intentional. Caveat caller ;-).
+//
+// USAGE NOTE 2: Broadcast() frees up all waiting threads at once,
+// which leads to contention for the locks they all held when they
+// called Wait().  This results in POOR performance.  A much better
+// approach to getting a lot of threads out of Wait() is to have each
+// thread (upon exiting Wait()) call Signal() to free up another
+// Wait'ing thread.  Look at condition_variable_unittest.cc for
+// both examples.
+//
+// Broadcast() can be used nicely during teardown, as it gets the job
+// done, and leaves no sleeping threads... and performance is less
+// critical at that point.
+//
+// The semantics of Broadcast() are carefully crafted so that *all*
+// threads that were waiting when the request was made will indeed
+// get signaled.  Some implementations mess up, and don't signal them
+// all, while others allow the wait to be effectively turned off (for
+// a while while waiting threads come around).  This implementation
+// appears correct, as it will not "lose" any signals, and will guarantee
+// that all threads get signaled by Broadcast().
+//
+// This implementation offers support for "performance" in its selection of
+// which thread to revive.  Performance, in direct contrast with "fairness,"
+// assures that the thread that most recently began to Wait() is selected by
+// Signal to revive.  Fairness would (if publicly supported) assure that the
+// thread that has Wait()ed the longest is selected. The default policy
+// may improve performance, as the selected thread may have a greater chance of
+// having some of its stack data in various CPU caches.
+//
+// For a discussion of the many very subtle implementation details, see the FAQ
+// at the end of condition_variable_win.cc.
+
+#ifndef BASE_SYNCHRONIZATION_CONDITION_VARIABLE_H_
+#define BASE_SYNCHRONIZATION_CONDITION_VARIABLE_H_
+
+#if defined(OS_POSIX) || defined(OS_FUCHSIA)
+#include <pthread.h>
+#endif
+
+#include "base/base_export.h"
+#include "base/logging.h"
+#include "base/macros.h"
+#include "base/synchronization/lock.h"
+#include "build/build_config.h"
+
+#if defined(OS_WIN)
+#include "base/win/windows_types.h"
+#endif
+
+namespace base {
+
+class TimeDelta;
+
+class BASE_EXPORT ConditionVariable {
+ public:
+  // Construct a cv for use with ONLY one user lock.
+  explicit ConditionVariable(Lock* user_lock);
+
+  ~ConditionVariable();
+
+  // Wait() releases the caller's critical section atomically as it starts to
+  // sleep, and the reacquires it when it is signaled. The wait functions are
+  // susceptible to spurious wakeups. (See usage note 1 for more details.)
+  void Wait();
+  void TimedWait(const TimeDelta& max_time);
+
+  // Broadcast() revives all waiting threads. (See usage note 2 for more
+  // details.)
+  void Broadcast();
+  // Signal() revives one waiting thread.
+  void Signal();
+
+ private:
+
+#if defined(OS_WIN)
+  CHROME_CONDITION_VARIABLE cv_;
+  CHROME_SRWLOCK* const srwlock_;
+#elif defined(OS_POSIX) || defined(OS_FUCHSIA)
+  pthread_cond_t condition_;
+  pthread_mutex_t* user_mutex_;
+#endif
+
+#if DCHECK_IS_ON()
+  base::Lock* const user_lock_;  // Needed to adjust shadow lock state on wait.
+#endif
+
+  DISALLOW_COPY_AND_ASSIGN(ConditionVariable);
+};
+
+}  // namespace base
+
+#endif  // BASE_SYNCHRONIZATION_CONDITION_VARIABLE_H_
diff --git a/base/synchronization/condition_variable_posix.cc b/base/synchronization/condition_variable_posix.cc
new file mode 100644
index 0000000..f263252
--- /dev/null
+++ b/base/synchronization/condition_variable_posix.cc
@@ -0,0 +1,142 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/synchronization/condition_variable.h"
+
+#include <errno.h>
+#include <stdint.h>
+#include <sys/time.h>
+
+#include "base/synchronization/lock.h"
+#include "base/threading/scoped_blocking_call.h"
+#include "base/threading/thread_restrictions.h"
+#include "base/time/time.h"
+#include "build/build_config.h"
+
+namespace base {
+
+ConditionVariable::ConditionVariable(Lock* user_lock)
+    : user_mutex_(user_lock->lock_.native_handle())
+#if DCHECK_IS_ON()
+    , user_lock_(user_lock)
+#endif
+{
+  int rv = 0;
+  // http://crbug.com/293736
+  // NaCl doesn't support monotonic clock based absolute deadlines.
+  // On older Android platform versions, it's supported through the
+  // non-standard pthread_cond_timedwait_monotonic_np. Newer platform
+  // versions have pthread_condattr_setclock.
+  // Mac can use relative time deadlines.
+#if !defined(OS_MACOSX) && !defined(OS_NACL) && \
+      !(defined(OS_ANDROID) && defined(HAVE_PTHREAD_COND_TIMEDWAIT_MONOTONIC))
+  pthread_condattr_t attrs;
+  rv = pthread_condattr_init(&attrs);
+  DCHECK_EQ(0, rv);
+  pthread_condattr_setclock(&attrs, CLOCK_MONOTONIC);
+  rv = pthread_cond_init(&condition_, &attrs);
+  pthread_condattr_destroy(&attrs);
+#else
+  rv = pthread_cond_init(&condition_, NULL);
+#endif
+  DCHECK_EQ(0, rv);
+}
+
+ConditionVariable::~ConditionVariable() {
+#if defined(OS_MACOSX)
+  // This hack is necessary to avoid a fatal pthreads subsystem bug in the
+  // Darwin kernel. http://crbug.com/517681.
+  {
+    base::Lock lock;
+    base::AutoLock l(lock);
+    struct timespec ts;
+    ts.tv_sec = 0;
+    ts.tv_nsec = 1;
+    pthread_cond_timedwait_relative_np(&condition_, lock.lock_.native_handle(),
+                                       &ts);
+  }
+#endif
+
+  int rv = pthread_cond_destroy(&condition_);
+  DCHECK_EQ(0, rv);
+}
+
+void ConditionVariable::Wait() {
+  internal::AssertBaseSyncPrimitivesAllowed();
+  ScopedBlockingCall scoped_blocking_call(BlockingType::MAY_BLOCK);
+#if DCHECK_IS_ON()
+  user_lock_->CheckHeldAndUnmark();
+#endif
+  int rv = pthread_cond_wait(&condition_, user_mutex_);
+  DCHECK_EQ(0, rv);
+#if DCHECK_IS_ON()
+  user_lock_->CheckUnheldAndMark();
+#endif
+}
+
+void ConditionVariable::TimedWait(const TimeDelta& max_time) {
+  internal::AssertBaseSyncPrimitivesAllowed();
+  ScopedBlockingCall scoped_blocking_call(BlockingType::MAY_BLOCK);
+  int64_t usecs = max_time.InMicroseconds();
+  struct timespec relative_time;
+  relative_time.tv_sec = usecs / Time::kMicrosecondsPerSecond;
+  relative_time.tv_nsec =
+      (usecs % Time::kMicrosecondsPerSecond) * Time::kNanosecondsPerMicrosecond;
+
+#if DCHECK_IS_ON()
+  user_lock_->CheckHeldAndUnmark();
+#endif
+
+#if defined(OS_MACOSX)
+  int rv = pthread_cond_timedwait_relative_np(
+      &condition_, user_mutex_, &relative_time);
+#else
+  // The timeout argument to pthread_cond_timedwait is in absolute time.
+  struct timespec absolute_time;
+#if defined(OS_NACL)
+  // See comment in constructor for why this is different in NaCl.
+  struct timeval now;
+  gettimeofday(&now, NULL);
+  absolute_time.tv_sec = now.tv_sec;
+  absolute_time.tv_nsec = now.tv_usec * Time::kNanosecondsPerMicrosecond;
+#else
+  struct timespec now;
+  clock_gettime(CLOCK_MONOTONIC, &now);
+  absolute_time.tv_sec = now.tv_sec;
+  absolute_time.tv_nsec = now.tv_nsec;
+#endif
+
+  absolute_time.tv_sec += relative_time.tv_sec;
+  absolute_time.tv_nsec += relative_time.tv_nsec;
+  absolute_time.tv_sec += absolute_time.tv_nsec / Time::kNanosecondsPerSecond;
+  absolute_time.tv_nsec %= Time::kNanosecondsPerSecond;
+  DCHECK_GE(absolute_time.tv_sec, now.tv_sec);  // Overflow paranoia
+
+#if defined(OS_ANDROID) && defined(HAVE_PTHREAD_COND_TIMEDWAIT_MONOTONIC)
+  int rv = pthread_cond_timedwait_monotonic_np(
+      &condition_, user_mutex_, &absolute_time);
+#else
+  int rv = pthread_cond_timedwait(&condition_, user_mutex_, &absolute_time);
+#endif  // OS_ANDROID && HAVE_PTHREAD_COND_TIMEDWAIT_MONOTONIC
+#endif  // OS_MACOSX
+
+  // On failure, we only expect the CV to timeout. Any other error value means
+  // that we've unexpectedly woken up.
+  DCHECK(rv == 0 || rv == ETIMEDOUT);
+#if DCHECK_IS_ON()
+  user_lock_->CheckUnheldAndMark();
+#endif
+}
+
+void ConditionVariable::Broadcast() {
+  int rv = pthread_cond_broadcast(&condition_);
+  DCHECK_EQ(0, rv);
+}
+
+void ConditionVariable::Signal() {
+  int rv = pthread_cond_signal(&condition_);
+  DCHECK_EQ(0, rv);
+}
+
+}  // namespace base
diff --git a/base/synchronization/condition_variable_unittest.cc b/base/synchronization/condition_variable_unittest.cc
new file mode 100644
index 0000000..705257a
--- /dev/null
+++ b/base/synchronization/condition_variable_unittest.cc
@@ -0,0 +1,768 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Multi-threaded tests of ConditionVariable class.
+
+#include "base/synchronization/condition_variable.h"
+
+#include <time.h>
+
+#include <algorithm>
+#include <memory>
+#include <vector>
+
+#include "base/bind.h"
+#include "base/location.h"
+#include "base/logging.h"
+#include "base/single_thread_task_runner.h"
+#include "base/synchronization/lock.h"
+#include "base/synchronization/spin_wait.h"
+#include "base/threading/platform_thread.h"
+#include "base/threading/thread.h"
+#include "base/threading/thread_collision_warner.h"
+#include "base/time/time.h"
+#include "build/build_config.h"
+#include "testing/gtest/include/gtest/gtest.h"
+#include "testing/platform_test.h"
+
+namespace base {
+
+namespace {
+//------------------------------------------------------------------------------
+// Define our test class, with several common variables.
+//------------------------------------------------------------------------------
+
+class ConditionVariableTest : public PlatformTest {
+ public:
+  const TimeDelta kZeroMs;
+  const TimeDelta kTenMs;
+  const TimeDelta kThirtyMs;
+  const TimeDelta kFortyFiveMs;
+  const TimeDelta kSixtyMs;
+  const TimeDelta kOneHundredMs;
+
+  ConditionVariableTest()
+      : kZeroMs(TimeDelta::FromMilliseconds(0)),
+        kTenMs(TimeDelta::FromMilliseconds(10)),
+        kThirtyMs(TimeDelta::FromMilliseconds(30)),
+        kFortyFiveMs(TimeDelta::FromMilliseconds(45)),
+        kSixtyMs(TimeDelta::FromMilliseconds(60)),
+        kOneHundredMs(TimeDelta::FromMilliseconds(100)) {
+  }
+};
+
+//------------------------------------------------------------------------------
+// Define a class that will control activities an several multi-threaded tests.
+// The general structure of multi-threaded tests is that a test case will
+// construct an instance of a WorkQueue.  The WorkQueue will spin up some
+// threads and control them throughout their lifetime, as well as maintaining
+// a central repository of the work thread's activity.  Finally, the WorkQueue
+// will command the the worker threads to terminate.  At that point, the test
+// cases will validate that the WorkQueue has records showing that the desired
+// activities were performed.
+//------------------------------------------------------------------------------
+
+// Callers are responsible for synchronizing access to the following class.
+// The WorkQueue::lock_, as accessed via WorkQueue::lock(), should be used for
+// all synchronized access.
+class WorkQueue : public PlatformThread::Delegate {
+ public:
+  explicit WorkQueue(int thread_count);
+  ~WorkQueue() override;
+
+  // PlatformThread::Delegate interface.
+  void ThreadMain() override;
+
+  //----------------------------------------------------------------------------
+  // Worker threads only call the following methods.
+  // They should use the lock to get exclusive access.
+  int GetThreadId();  // Get an ID assigned to a thread..
+  bool EveryIdWasAllocated() const;  // Indicates that all IDs were handed out.
+  TimeDelta GetAnAssignment(int thread_id);  // Get a work task duration.
+  void WorkIsCompleted(int thread_id);
+
+  int task_count() const;
+  bool allow_help_requests() const;  // Workers can signal more workers.
+  bool shutdown() const;  // Check if shutdown has been requested.
+
+  void thread_shutting_down();
+
+
+  //----------------------------------------------------------------------------
+  // Worker threads can call them but not needed to acquire a lock.
+  Lock* lock();
+
+  ConditionVariable* work_is_available();
+  ConditionVariable* all_threads_have_ids();
+  ConditionVariable* no_more_tasks();
+
+  //----------------------------------------------------------------------------
+  // The rest of the methods are for use by the controlling master thread (the
+  // test case code).
+  void ResetHistory();
+  int GetMinCompletionsByWorkerThread() const;
+  int GetMaxCompletionsByWorkerThread() const;
+  int GetNumThreadsTakingAssignments() const;
+  int GetNumThreadsCompletingTasks() const;
+  int GetNumberOfCompletedTasks() const;
+
+  void SetWorkTime(TimeDelta delay);
+  void SetTaskCount(int count);
+  void SetAllowHelp(bool allow);
+
+  // The following must be called without locking, and will spin wait until the
+  // threads are all in a wait state.
+  void SpinUntilAllThreadsAreWaiting();
+  void SpinUntilTaskCountLessThan(int task_count);
+
+  // Caller must acquire lock before calling.
+  void SetShutdown();
+
+  // Compares the |shutdown_task_count_| to the |thread_count| and returns true
+  // if they are equal.  This check will acquire the |lock_| so the caller
+  // should not hold the lock when calling this method.
+  bool ThreadSafeCheckShutdown(int thread_count);
+
+ private:
+  // Both worker threads and controller use the following to synchronize.
+  Lock lock_;
+  ConditionVariable work_is_available_;  // To tell threads there is work.
+
+  // Conditions to notify the controlling process (if it is interested).
+  ConditionVariable all_threads_have_ids_;  // All threads are running.
+  ConditionVariable no_more_tasks_;  // Task count is zero.
+
+  const int thread_count_;
+  int waiting_thread_count_;
+  std::unique_ptr<PlatformThreadHandle[]> thread_handles_;
+  std::vector<int> assignment_history_;  // Number of assignment per worker.
+  std::vector<int> completion_history_;  // Number of completions per worker.
+  int thread_started_counter_;  // Used to issue unique id to workers.
+  int shutdown_task_count_;  // Number of tasks told to shutdown
+  int task_count_;  // Number of assignment tasks waiting to be processed.
+  TimeDelta worker_delay_;  // Time each task takes to complete.
+  bool allow_help_requests_;  // Workers can signal more workers.
+  bool shutdown_;  // Set when threads need to terminate.
+
+  DFAKE_MUTEX(locked_methods_);
+};
+
+//------------------------------------------------------------------------------
+// The next section contains the actual tests.
+//------------------------------------------------------------------------------
+
+TEST_F(ConditionVariableTest, StartupShutdownTest) {
+  Lock lock;
+
+  // First try trivial startup/shutdown.
+  {
+    ConditionVariable cv1(&lock);
+  }  // Call for cv1 destruction.
+
+  // Exercise with at least a few waits.
+  ConditionVariable cv(&lock);
+
+  lock.Acquire();
+  cv.TimedWait(kTenMs);  // Wait for 10 ms.
+  cv.TimedWait(kTenMs);  // Wait for 10 ms.
+  lock.Release();
+
+  lock.Acquire();
+  cv.TimedWait(kTenMs);  // Wait for 10 ms.
+  cv.TimedWait(kTenMs);  // Wait for 10 ms.
+  cv.TimedWait(kTenMs);  // Wait for 10 ms.
+  lock.Release();
+}  // Call for cv destruction.
+
+TEST_F(ConditionVariableTest, TimeoutTest) {
+  Lock lock;
+  ConditionVariable cv(&lock);
+  lock.Acquire();
+
+  TimeTicks start = TimeTicks::Now();
+  const TimeDelta WAIT_TIME = TimeDelta::FromMilliseconds(300);
+  // Allow for clocking rate granularity.
+  const TimeDelta FUDGE_TIME = TimeDelta::FromMilliseconds(50);
+
+  cv.TimedWait(WAIT_TIME + FUDGE_TIME);
+  TimeDelta duration = TimeTicks::Now() - start;
+  // We can't use EXPECT_GE here as the TimeDelta class does not support the
+  // required stream conversion.
+  EXPECT_TRUE(duration >= WAIT_TIME);
+
+  lock.Release();
+}
+
+#if defined(OS_POSIX) && !defined(OS_FUCHSIA)
+const int kDiscontinuitySeconds = 2;
+
+void BackInTime(Lock* lock) {
+  AutoLock auto_lock(*lock);
+
+  timeval tv;
+  gettimeofday(&tv, nullptr);
+  tv.tv_sec -= kDiscontinuitySeconds;
+  settimeofday(&tv, nullptr);
+}
+
+// Tests that TimedWait ignores changes to the system clock.
+// Test is disabled by default, because it needs to run as root to muck with the
+// system clock.
+// http://crbug.com/293736
+TEST_F(ConditionVariableTest, DISABLED_TimeoutAcrossSetTimeOfDay) {
+  timeval tv;
+  gettimeofday(&tv, nullptr);
+  tv.tv_sec += kDiscontinuitySeconds;
+  if (settimeofday(&tv, nullptr) < 0) {
+    PLOG(ERROR) << "Could not set time of day. Run as root?";
+    return;
+  }
+
+  Lock lock;
+  ConditionVariable cv(&lock);
+  lock.Acquire();
+
+  Thread thread("Helper");
+  thread.Start();
+  thread.task_runner()->PostTask(FROM_HERE, base::BindOnce(&BackInTime, &lock));
+
+  TimeTicks start = TimeTicks::Now();
+  const TimeDelta kWaitTime = TimeDelta::FromMilliseconds(300);
+  // Allow for clocking rate granularity.
+  const TimeDelta kFudgeTime = TimeDelta::FromMilliseconds(50);
+
+  cv.TimedWait(kWaitTime + kFudgeTime);
+  TimeDelta duration = TimeTicks::Now() - start;
+
+  thread.Stop();
+  // We can't use EXPECT_GE here as the TimeDelta class does not support the
+  // required stream conversion.
+  EXPECT_TRUE(duration >= kWaitTime);
+  EXPECT_TRUE(duration <= TimeDelta::FromSeconds(kDiscontinuitySeconds));
+
+  lock.Release();
+}
+#endif
+
+
+// Suddenly got flaky on Win, see http://crbug.com/10607 (starting at
+// comment #15).
+#if defined(OS_WIN)
+#define MAYBE_MultiThreadConsumerTest DISABLED_MultiThreadConsumerTest
+#else
+#define MAYBE_MultiThreadConsumerTest MultiThreadConsumerTest
+#endif
+// Test serial task servicing, as well as two parallel task servicing methods.
+TEST_F(ConditionVariableTest, MAYBE_MultiThreadConsumerTest) {
+  const int kThreadCount = 10;
+  WorkQueue queue(kThreadCount);  // Start the threads.
+
+  const int kTaskCount = 10;  // Number of tasks in each mini-test here.
+
+  Time start_time;  // Used to time task processing.
+
+  {
+    base::AutoLock auto_lock(*queue.lock());
+    while (!queue.EveryIdWasAllocated())
+      queue.all_threads_have_ids()->Wait();
+  }
+
+  // If threads aren't in a wait state, they may start to gobble up tasks in
+  // parallel, short-circuiting (breaking) this test.
+  queue.SpinUntilAllThreadsAreWaiting();
+
+  {
+    // Since we have no tasks yet, all threads should be waiting by now.
+    base::AutoLock auto_lock(*queue.lock());
+    EXPECT_EQ(0, queue.GetNumThreadsTakingAssignments());
+    EXPECT_EQ(0, queue.GetNumThreadsCompletingTasks());
+    EXPECT_EQ(0, queue.task_count());
+    EXPECT_EQ(0, queue.GetMaxCompletionsByWorkerThread());
+    EXPECT_EQ(0, queue.GetMinCompletionsByWorkerThread());
+    EXPECT_EQ(0, queue.GetNumberOfCompletedTasks());
+
+    // Set up to make each task include getting help from another worker, so
+    // so that the work gets done in paralell.
+    queue.ResetHistory();
+    queue.SetTaskCount(kTaskCount);
+    queue.SetWorkTime(kThirtyMs);
+    queue.SetAllowHelp(true);
+
+    start_time = Time::Now();
+  }
+
+  queue.work_is_available()->Signal();  // But each worker can signal another.
+  // Wait till we at least start to handle tasks (and we're not all waiting).
+  queue.SpinUntilTaskCountLessThan(kTaskCount);
+  // Wait to allow the all workers to get done.
+  queue.SpinUntilAllThreadsAreWaiting();
+
+  {
+    // Wait until all work tasks have at least been assigned.
+    base::AutoLock auto_lock(*queue.lock());
+    while (queue.task_count())
+      queue.no_more_tasks()->Wait();
+
+    // To avoid racy assumptions, we'll just assert that at least 2 threads
+    // did work.  We know that the first worker should have gone to sleep, and
+    // hence a second worker should have gotten an assignment.
+    EXPECT_LE(2, queue.GetNumThreadsTakingAssignments());
+    EXPECT_EQ(kTaskCount, queue.GetNumberOfCompletedTasks());
+
+    // Try to ask all workers to help, and only a few will do the work.
+    queue.ResetHistory();
+    queue.SetTaskCount(3);
+    queue.SetWorkTime(kThirtyMs);
+    queue.SetAllowHelp(false);
+  }
+  queue.work_is_available()->Broadcast();  // Make them all try.
+  // Wait till we at least start to handle tasks (and we're not all waiting).
+  queue.SpinUntilTaskCountLessThan(3);
+  // Wait to allow the 3 workers to get done.
+  queue.SpinUntilAllThreadsAreWaiting();
+
+  {
+    base::AutoLock auto_lock(*queue.lock());
+    EXPECT_EQ(3, queue.GetNumThreadsTakingAssignments());
+    EXPECT_EQ(3, queue.GetNumThreadsCompletingTasks());
+    EXPECT_EQ(0, queue.task_count());
+    EXPECT_EQ(1, queue.GetMaxCompletionsByWorkerThread());
+    EXPECT_EQ(0, queue.GetMinCompletionsByWorkerThread());
+    EXPECT_EQ(3, queue.GetNumberOfCompletedTasks());
+
+    // Set up to make each task get help from another worker.
+    queue.ResetHistory();
+    queue.SetTaskCount(3);
+    queue.SetWorkTime(kThirtyMs);
+    queue.SetAllowHelp(true);  // Allow (unnecessary) help requests.
+  }
+  queue.work_is_available()->Broadcast();  // Signal all threads.
+  // Wait till we at least start to handle tasks (and we're not all waiting).
+  queue.SpinUntilTaskCountLessThan(3);
+  // Wait to allow the 3 workers to get done.
+  queue.SpinUntilAllThreadsAreWaiting();
+
+  {
+    base::AutoLock auto_lock(*queue.lock());
+    EXPECT_EQ(3, queue.GetNumThreadsTakingAssignments());
+    EXPECT_EQ(3, queue.GetNumThreadsCompletingTasks());
+    EXPECT_EQ(0, queue.task_count());
+    EXPECT_EQ(1, queue.GetMaxCompletionsByWorkerThread());
+    EXPECT_EQ(0, queue.GetMinCompletionsByWorkerThread());
+    EXPECT_EQ(3, queue.GetNumberOfCompletedTasks());
+
+    // Set up to make each task get help from another worker.
+    queue.ResetHistory();
+    queue.SetTaskCount(20);  // 2 tasks per thread.
+    queue.SetWorkTime(kThirtyMs);
+    queue.SetAllowHelp(true);
+  }
+  queue.work_is_available()->Signal();  // But each worker can signal another.
+  // Wait till we at least start to handle tasks (and we're not all waiting).
+  queue.SpinUntilTaskCountLessThan(20);
+  // Wait to allow the 10 workers to get done.
+  queue.SpinUntilAllThreadsAreWaiting();  // Should take about 60 ms.
+
+  {
+    base::AutoLock auto_lock(*queue.lock());
+    EXPECT_EQ(10, queue.GetNumThreadsTakingAssignments());
+    EXPECT_EQ(10, queue.GetNumThreadsCompletingTasks());
+    EXPECT_EQ(0, queue.task_count());
+    EXPECT_EQ(20, queue.GetNumberOfCompletedTasks());
+
+    // Same as last test, but with Broadcast().
+    queue.ResetHistory();
+    queue.SetTaskCount(20);  // 2 tasks per thread.
+    queue.SetWorkTime(kThirtyMs);
+    queue.SetAllowHelp(true);
+  }
+  queue.work_is_available()->Broadcast();
+  // Wait till we at least start to handle tasks (and we're not all waiting).
+  queue.SpinUntilTaskCountLessThan(20);
+  // Wait to allow the 10 workers to get done.
+  queue.SpinUntilAllThreadsAreWaiting();  // Should take about 60 ms.
+
+  {
+    base::AutoLock auto_lock(*queue.lock());
+    EXPECT_EQ(10, queue.GetNumThreadsTakingAssignments());
+    EXPECT_EQ(10, queue.GetNumThreadsCompletingTasks());
+    EXPECT_EQ(0, queue.task_count());
+    EXPECT_EQ(20, queue.GetNumberOfCompletedTasks());
+
+    queue.SetShutdown();
+  }
+  queue.work_is_available()->Broadcast();  // Force check for shutdown.
+
+  SPIN_FOR_TIMEDELTA_OR_UNTIL_TRUE(TimeDelta::FromMinutes(1),
+                                   queue.ThreadSafeCheckShutdown(kThreadCount));
+}
+
+TEST_F(ConditionVariableTest, LargeFastTaskTest) {
+  const int kThreadCount = 200;
+  WorkQueue queue(kThreadCount);  // Start the threads.
+
+  Lock private_lock;  // Used locally for master to wait.
+  base::AutoLock private_held_lock(private_lock);
+  ConditionVariable private_cv(&private_lock);
+
+  {
+    base::AutoLock auto_lock(*queue.lock());
+    while (!queue.EveryIdWasAllocated())
+      queue.all_threads_have_ids()->Wait();
+  }
+
+  // Wait a bit more to allow threads to reach their wait state.
+  queue.SpinUntilAllThreadsAreWaiting();
+
+  {
+    // Since we have no tasks, all threads should be waiting by now.
+    base::AutoLock auto_lock(*queue.lock());
+    EXPECT_EQ(0, queue.GetNumThreadsTakingAssignments());
+    EXPECT_EQ(0, queue.GetNumThreadsCompletingTasks());
+    EXPECT_EQ(0, queue.task_count());
+    EXPECT_EQ(0, queue.GetMaxCompletionsByWorkerThread());
+    EXPECT_EQ(0, queue.GetMinCompletionsByWorkerThread());
+    EXPECT_EQ(0, queue.GetNumberOfCompletedTasks());
+
+    // Set up to make all workers do (an average of) 20 tasks.
+    queue.ResetHistory();
+    queue.SetTaskCount(20 * kThreadCount);
+    queue.SetWorkTime(kFortyFiveMs);
+    queue.SetAllowHelp(false);
+  }
+  queue.work_is_available()->Broadcast();  // Start up all threads.
+  // Wait until we've handed out all tasks.
+  {
+    base::AutoLock auto_lock(*queue.lock());
+    while (queue.task_count() != 0)
+      queue.no_more_tasks()->Wait();
+  }
+
+  // Wait till the last of the tasks complete.
+  queue.SpinUntilAllThreadsAreWaiting();
+
+  {
+    // With Broadcast(), every thread should have participated.
+    // but with racing.. they may not all have done equal numbers of tasks.
+    base::AutoLock auto_lock(*queue.lock());
+    EXPECT_EQ(kThreadCount, queue.GetNumThreadsTakingAssignments());
+    EXPECT_EQ(kThreadCount, queue.GetNumThreadsCompletingTasks());
+    EXPECT_EQ(0, queue.task_count());
+    EXPECT_LE(20, queue.GetMaxCompletionsByWorkerThread());
+    EXPECT_EQ(20 * kThreadCount, queue.GetNumberOfCompletedTasks());
+
+    // Set up to make all workers do (an average of) 4 tasks.
+    queue.ResetHistory();
+    queue.SetTaskCount(kThreadCount * 4);
+    queue.SetWorkTime(kFortyFiveMs);
+    queue.SetAllowHelp(true);  // Might outperform Broadcast().
+  }
+  queue.work_is_available()->Signal();  // Start up one thread.
+
+  // Wait until we've handed out all tasks
+  {
+    base::AutoLock auto_lock(*queue.lock());
+    while (queue.task_count() != 0)
+      queue.no_more_tasks()->Wait();
+  }
+
+  // Wait till the last of the tasks complete.
+  queue.SpinUntilAllThreadsAreWaiting();
+
+  {
+    // With Signal(), every thread should have participated.
+    // but with racing.. they may not all have done four tasks.
+    base::AutoLock auto_lock(*queue.lock());
+    EXPECT_EQ(kThreadCount, queue.GetNumThreadsTakingAssignments());
+    EXPECT_EQ(kThreadCount, queue.GetNumThreadsCompletingTasks());
+    EXPECT_EQ(0, queue.task_count());
+    EXPECT_LE(4, queue.GetMaxCompletionsByWorkerThread());
+    EXPECT_EQ(4 * kThreadCount, queue.GetNumberOfCompletedTasks());
+
+    queue.SetShutdown();
+  }
+  queue.work_is_available()->Broadcast();  // Force check for shutdown.
+
+  // Wait for shutdowns to complete.
+  SPIN_FOR_TIMEDELTA_OR_UNTIL_TRUE(TimeDelta::FromMinutes(1),
+                                   queue.ThreadSafeCheckShutdown(kThreadCount));
+}
+
+//------------------------------------------------------------------------------
+// Finally we provide the implementation for the methods in the WorkQueue class.
+//------------------------------------------------------------------------------
+
+WorkQueue::WorkQueue(int thread_count)
+  : lock_(),
+    work_is_available_(&lock_),
+    all_threads_have_ids_(&lock_),
+    no_more_tasks_(&lock_),
+    thread_count_(thread_count),
+    waiting_thread_count_(0),
+    thread_handles_(new PlatformThreadHandle[thread_count]),
+    assignment_history_(thread_count),
+    completion_history_(thread_count),
+    thread_started_counter_(0),
+    shutdown_task_count_(0),
+    task_count_(0),
+    allow_help_requests_(false),
+    shutdown_(false) {
+  EXPECT_GE(thread_count_, 1);
+  ResetHistory();
+  SetTaskCount(0);
+  SetWorkTime(TimeDelta::FromMilliseconds(30));
+
+  for (int i = 0; i < thread_count_; ++i) {
+    PlatformThreadHandle pth;
+    EXPECT_TRUE(PlatformThread::Create(0, this, &pth));
+    thread_handles_[i] = pth;
+  }
+}
+
+WorkQueue::~WorkQueue() {
+  {
+    base::AutoLock auto_lock(lock_);
+    SetShutdown();
+  }
+  work_is_available_.Broadcast();  // Tell them all to terminate.
+
+  for (int i = 0; i < thread_count_; ++i) {
+    PlatformThread::Join(thread_handles_[i]);
+  }
+  EXPECT_EQ(0, waiting_thread_count_);
+}
+
+int WorkQueue::GetThreadId() {
+  DFAKE_SCOPED_RECURSIVE_LOCK(locked_methods_);
+  DCHECK(!EveryIdWasAllocated());
+  return thread_started_counter_++;  // Give out Unique IDs.
+}
+
+bool WorkQueue::EveryIdWasAllocated() const {
+  DFAKE_SCOPED_RECURSIVE_LOCK(locked_methods_);
+  return thread_count_ == thread_started_counter_;
+}
+
+TimeDelta WorkQueue::GetAnAssignment(int thread_id) {
+  DFAKE_SCOPED_RECURSIVE_LOCK(locked_methods_);
+  DCHECK_LT(0, task_count_);
+  assignment_history_[thread_id]++;
+  if (0 == --task_count_) {
+    no_more_tasks_.Signal();
+  }
+  return worker_delay_;
+}
+
+void WorkQueue::WorkIsCompleted(int thread_id) {
+  DFAKE_SCOPED_RECURSIVE_LOCK(locked_methods_);
+  completion_history_[thread_id]++;
+}
+
+int WorkQueue::task_count() const {
+  DFAKE_SCOPED_RECURSIVE_LOCK(locked_methods_);
+  return task_count_;
+}
+
+bool WorkQueue::allow_help_requests() const {
+  DFAKE_SCOPED_RECURSIVE_LOCK(locked_methods_);
+  return allow_help_requests_;
+}
+
+bool WorkQueue::shutdown() const {
+  lock_.AssertAcquired();
+  DFAKE_SCOPED_RECURSIVE_LOCK(locked_methods_);
+  return shutdown_;
+}
+
+// Because this method is called from the test's main thread we need to actually
+// take the lock.  Threads will call the thread_shutting_down() method with the
+// lock already acquired.
+bool WorkQueue::ThreadSafeCheckShutdown(int thread_count) {
+  bool all_shutdown;
+  base::AutoLock auto_lock(lock_);
+  {
+    // Declare in scope so DFAKE is guranteed to be destroyed before AutoLock.
+    DFAKE_SCOPED_RECURSIVE_LOCK(locked_methods_);
+    all_shutdown = (shutdown_task_count_ == thread_count);
+  }
+  return all_shutdown;
+}
+
+void WorkQueue::thread_shutting_down() {
+  lock_.AssertAcquired();
+  DFAKE_SCOPED_RECURSIVE_LOCK(locked_methods_);
+  shutdown_task_count_++;
+}
+
+Lock* WorkQueue::lock() {
+  return &lock_;
+}
+
+ConditionVariable* WorkQueue::work_is_available() {
+  return &work_is_available_;
+}
+
+ConditionVariable* WorkQueue::all_threads_have_ids() {
+  return &all_threads_have_ids_;
+}
+
+ConditionVariable* WorkQueue::no_more_tasks() {
+  return &no_more_tasks_;
+}
+
+void WorkQueue::ResetHistory() {
+  for (int i = 0; i < thread_count_; ++i) {
+    assignment_history_[i] = 0;
+    completion_history_[i] = 0;
+  }
+}
+
+int WorkQueue::GetMinCompletionsByWorkerThread() const {
+  int minumum = completion_history_[0];
+  for (int i = 0; i < thread_count_; ++i)
+    minumum = std::min(minumum, completion_history_[i]);
+  return minumum;
+}
+
+int WorkQueue::GetMaxCompletionsByWorkerThread() const {
+  int maximum = completion_history_[0];
+  for (int i = 0; i < thread_count_; ++i)
+    maximum = std::max(maximum, completion_history_[i]);
+  return maximum;
+}
+
+int WorkQueue::GetNumThreadsTakingAssignments() const {
+  int count = 0;
+  for (int i = 0; i < thread_count_; ++i)
+    if (assignment_history_[i])
+      count++;
+  return count;
+}
+
+int WorkQueue::GetNumThreadsCompletingTasks() const {
+  int count = 0;
+  for (int i = 0; i < thread_count_; ++i)
+    if (completion_history_[i])
+      count++;
+  return count;
+}
+
+int WorkQueue::GetNumberOfCompletedTasks() const {
+  int total = 0;
+  for (int i = 0; i < thread_count_; ++i)
+    total += completion_history_[i];
+  return total;
+}
+
+void WorkQueue::SetWorkTime(TimeDelta delay) {
+  worker_delay_ = delay;
+}
+
+void WorkQueue::SetTaskCount(int count) {
+  task_count_ = count;
+}
+
+void WorkQueue::SetAllowHelp(bool allow) {
+  allow_help_requests_ = allow;
+}
+
+void WorkQueue::SetShutdown() {
+  lock_.AssertAcquired();
+  shutdown_ = true;
+}
+
+void WorkQueue::SpinUntilAllThreadsAreWaiting() {
+  while (true) {
+    {
+      base::AutoLock auto_lock(lock_);
+      if (waiting_thread_count_ == thread_count_)
+        break;
+    }
+    PlatformThread::Sleep(TimeDelta::FromMilliseconds(30));
+  }
+}
+
+void WorkQueue::SpinUntilTaskCountLessThan(int task_count) {
+  while (true) {
+    {
+      base::AutoLock auto_lock(lock_);
+      if (task_count_ < task_count)
+        break;
+    }
+    PlatformThread::Sleep(TimeDelta::FromMilliseconds(30));
+  }
+}
+
+
+//------------------------------------------------------------------------------
+// Define the standard worker task. Several tests will spin out many of these
+// threads.
+//------------------------------------------------------------------------------
+
+// The multithread tests involve several threads with a task to perform as
+// directed by an instance of the class WorkQueue.
+// The task is to:
+// a) Check to see if there are more tasks (there is a task counter).
+//    a1) Wait on condition variable if there are no tasks currently.
+// b) Call a function to see what should be done.
+// c) Do some computation based on the number of milliseconds returned in (b).
+// d) go back to (a).
+
+// WorkQueue::ThreadMain() implements the above task for all threads.
+// It calls the controlling object to tell the creator about progress, and to
+// ask about tasks.
+
+void WorkQueue::ThreadMain() {
+  int thread_id;
+  {
+    base::AutoLock auto_lock(lock_);
+    thread_id = GetThreadId();
+    if (EveryIdWasAllocated())
+      all_threads_have_ids()->Signal();  // Tell creator we're ready.
+  }
+
+  Lock private_lock;  // Used to waste time on "our work".
+  while (1) {  // This is the main consumer loop.
+    TimeDelta work_time;
+    bool could_use_help;
+    {
+      base::AutoLock auto_lock(lock_);
+      while (0 == task_count() && !shutdown()) {
+        ++waiting_thread_count_;
+        work_is_available()->Wait();
+        --waiting_thread_count_;
+      }
+      if (shutdown()) {
+        // Ack the notification of a shutdown message back to the controller.
+        thread_shutting_down();
+        return;  // Terminate.
+      }
+      // Get our task duration from the queue.
+      work_time = GetAnAssignment(thread_id);
+      could_use_help = (task_count() > 0) && allow_help_requests();
+    }  // Release lock
+
+    // Do work (outside of locked region.
+    if (could_use_help)
+      work_is_available()->Signal();  // Get help from other threads.
+
+    if (work_time > TimeDelta::FromMilliseconds(0)) {
+      // We could just sleep(), but we'll instead further exercise the
+      // condition variable class, and do a timed wait.
+      base::AutoLock auto_lock(private_lock);
+      ConditionVariable private_cv(&private_lock);
+      private_cv.TimedWait(work_time);  // Unsynchronized waiting.
+    }
+
+    {
+      base::AutoLock auto_lock(lock_);
+      // Send notification that we completed our "work."
+      WorkIsCompleted(thread_id);
+    }
+  }
+}
+
+}  // namespace
+
+}  // namespace base
diff --git a/base/synchronization/condition_variable_win.cc b/base/synchronization/condition_variable_win.cc
new file mode 100644
index 0000000..ddaef07
--- /dev/null
+++ b/base/synchronization/condition_variable_win.cc
@@ -0,0 +1,65 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/synchronization/condition_variable.h"
+
+#include "base/synchronization/lock.h"
+#include "base/threading/scoped_blocking_call.h"
+#include "base/threading/thread_restrictions.h"
+#include "base/time/time.h"
+
+#include <windows.h>
+
+namespace base {
+
+ConditionVariable::ConditionVariable(Lock* user_lock)
+    : srwlock_(user_lock->lock_.native_handle())
+#if DCHECK_IS_ON()
+    , user_lock_(user_lock)
+#endif
+{
+  DCHECK(user_lock);
+  InitializeConditionVariable(reinterpret_cast<PCONDITION_VARIABLE>(&cv_));
+}
+
+ConditionVariable::~ConditionVariable() = default;
+
+void ConditionVariable::Wait() {
+  TimedWait(TimeDelta::FromMilliseconds(INFINITE));
+}
+
+void ConditionVariable::TimedWait(const TimeDelta& max_time) {
+  internal::AssertBaseSyncPrimitivesAllowed();
+  ScopedBlockingCall scoped_blocking_call(BlockingType::MAY_BLOCK);
+  DWORD timeout = static_cast<DWORD>(max_time.InMilliseconds());
+
+#if DCHECK_IS_ON()
+  user_lock_->CheckHeldAndUnmark();
+#endif
+
+  if (!SleepConditionVariableSRW(reinterpret_cast<PCONDITION_VARIABLE>(&cv_),
+                                 reinterpret_cast<PSRWLOCK>(srwlock_), timeout,
+                                 0)) {
+    // On failure, we only expect the CV to timeout. Any other error value means
+    // that we've unexpectedly woken up.
+    // Note that WAIT_TIMEOUT != ERROR_TIMEOUT. WAIT_TIMEOUT is used with the
+    // WaitFor* family of functions as a direct return value. ERROR_TIMEOUT is
+    // used with GetLastError().
+    DCHECK_EQ(static_cast<DWORD>(ERROR_TIMEOUT), GetLastError());
+  }
+
+#if DCHECK_IS_ON()
+  user_lock_->CheckUnheldAndMark();
+#endif
+}
+
+void ConditionVariable::Broadcast() {
+  WakeAllConditionVariable(reinterpret_cast<PCONDITION_VARIABLE>(&cv_));
+}
+
+void ConditionVariable::Signal() {
+  WakeConditionVariable(reinterpret_cast<PCONDITION_VARIABLE>(&cv_));
+}
+
+}  // namespace base
diff --git a/base/synchronization/lock.cc b/base/synchronization/lock.cc
new file mode 100644
index 0000000..03297ad
--- /dev/null
+++ b/base/synchronization/lock.cc
@@ -0,0 +1,38 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file is used for debugging assertion support.  The Lock class
+// is functionally a wrapper around the LockImpl class, so the only
+// real intelligence in the class is in the debugging logic.
+
+#include "base/synchronization/lock.h"
+
+#if DCHECK_IS_ON()
+
+namespace base {
+
+Lock::Lock() : lock_() {
+}
+
+Lock::~Lock() {
+  DCHECK(owning_thread_ref_.is_null());
+}
+
+void Lock::AssertAcquired() const {
+  DCHECK(owning_thread_ref_ == PlatformThread::CurrentRef());
+}
+
+void Lock::CheckHeldAndUnmark() {
+  DCHECK(owning_thread_ref_ == PlatformThread::CurrentRef());
+  owning_thread_ref_ = PlatformThreadRef();
+}
+
+void Lock::CheckUnheldAndMark() {
+  DCHECK(owning_thread_ref_.is_null());
+  owning_thread_ref_ = PlatformThread::CurrentRef();
+}
+
+}  // namespace base
+
+#endif  // DCHECK_IS_ON()
diff --git a/base/synchronization/lock.h b/base/synchronization/lock.h
new file mode 100644
index 0000000..d1c647c
--- /dev/null
+++ b/base/synchronization/lock.h
@@ -0,0 +1,151 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_SYNCHRONIZATION_LOCK_H_
+#define BASE_SYNCHRONIZATION_LOCK_H_
+
+#include "base/base_export.h"
+#include "base/logging.h"
+#include "base/macros.h"
+#include "base/synchronization/lock_impl.h"
+#include "base/threading/platform_thread.h"
+#include "build/build_config.h"
+
+namespace base {
+
+// A convenient wrapper for an OS specific critical section.  The only real
+// intelligence in this class is in debug mode for the support for the
+// AssertAcquired() method.
+class BASE_EXPORT Lock {
+ public:
+#if !DCHECK_IS_ON()
+   // Optimized wrapper implementation
+  Lock() : lock_() {}
+  ~Lock() {}
+  void Acquire() { lock_.Lock(); }
+  void Release() { lock_.Unlock(); }
+
+  // If the lock is not held, take it and return true. If the lock is already
+  // held by another thread, immediately return false. This must not be called
+  // by a thread already holding the lock (what happens is undefined and an
+  // assertion may fail).
+  bool Try() { return lock_.Try(); }
+
+  // Null implementation if not debug.
+  void AssertAcquired() const {}
+#else
+  Lock();
+  ~Lock();
+
+  // NOTE: We do not permit recursive locks and will commonly fire a DCHECK() if
+  // a thread attempts to acquire the lock a second time (while already holding
+  // it).
+  void Acquire() {
+    lock_.Lock();
+    CheckUnheldAndMark();
+  }
+  void Release() {
+    CheckHeldAndUnmark();
+    lock_.Unlock();
+  }
+
+  bool Try() {
+    bool rv = lock_.Try();
+    if (rv) {
+      CheckUnheldAndMark();
+    }
+    return rv;
+  }
+
+  void AssertAcquired() const;
+#endif  // DCHECK_IS_ON()
+
+  // Whether Lock mitigates priority inversion when used from different thread
+  // priorities.
+  static bool HandlesMultipleThreadPriorities() {
+#if defined(OS_WIN)
+    // Windows mitigates priority inversion by randomly boosting the priority of
+    // ready threads.
+    // https://msdn.microsoft.com/library/windows/desktop/ms684831.aspx
+    return true;
+#elif defined(OS_POSIX) || defined(OS_FUCHSIA)
+    // POSIX mitigates priority inversion by setting the priority of a thread
+    // holding a Lock to the maximum priority of any other thread waiting on it.
+    return internal::LockImpl::PriorityInheritanceAvailable();
+#else
+#error Unsupported platform
+#endif
+  }
+
+  // Both Windows and POSIX implementations of ConditionVariable need to be
+  // able to see our lock and tweak our debugging counters, as they release and
+  // acquire locks inside of their condition variable APIs.
+  friend class ConditionVariable;
+
+ private:
+#if DCHECK_IS_ON()
+  // Members and routines taking care of locks assertions.
+  // Note that this checks for recursive locks and allows them
+  // if the variable is set.  This is allowed by the underlying implementation
+  // on windows but not on Posix, so we're doing unneeded checks on Posix.
+  // It's worth it to share the code.
+  void CheckHeldAndUnmark();
+  void CheckUnheldAndMark();
+
+  // All private data is implicitly protected by lock_.
+  // Be VERY careful to only access members under that lock.
+  base::PlatformThreadRef owning_thread_ref_;
+#endif  // DCHECK_IS_ON()
+
+  // Platform specific underlying lock implementation.
+  internal::LockImpl lock_;
+
+  DISALLOW_COPY_AND_ASSIGN(Lock);
+};
+
+// A helper class that acquires the given Lock while the AutoLock is in scope.
+class AutoLock {
+ public:
+  struct AlreadyAcquired {};
+
+  explicit AutoLock(Lock& lock) : lock_(lock) {
+    lock_.Acquire();
+  }
+
+  AutoLock(Lock& lock, const AlreadyAcquired&) : lock_(lock) {
+    lock_.AssertAcquired();
+  }
+
+  ~AutoLock() {
+    lock_.AssertAcquired();
+    lock_.Release();
+  }
+
+ private:
+  Lock& lock_;
+  DISALLOW_COPY_AND_ASSIGN(AutoLock);
+};
+
+// AutoUnlock is a helper that will Release() the |lock| argument in the
+// constructor, and re-Acquire() it in the destructor.
+class AutoUnlock {
+ public:
+  explicit AutoUnlock(Lock& lock) : lock_(lock) {
+    // We require our caller to have the lock.
+    lock_.AssertAcquired();
+    lock_.Release();
+  }
+
+  ~AutoUnlock() {
+    lock_.Acquire();
+  }
+
+ private:
+  Lock& lock_;
+  DISALLOW_COPY_AND_ASSIGN(AutoUnlock);
+};
+
+}  // namespace base
+
+#endif  // BASE_SYNCHRONIZATION_LOCK_H_
diff --git a/base/synchronization/lock_impl.h b/base/synchronization/lock_impl.h
new file mode 100644
index 0000000..221d763
--- /dev/null
+++ b/base/synchronization/lock_impl.h
@@ -0,0 +1,78 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_SYNCHRONIZATION_LOCK_IMPL_H_
+#define BASE_SYNCHRONIZATION_LOCK_IMPL_H_
+
+#include "base/base_export.h"
+#include "base/logging.h"
+#include "base/macros.h"
+#include "build/build_config.h"
+
+#if defined(OS_WIN)
+#include "base/win/windows_types.h"
+#elif defined(OS_POSIX) || defined(OS_FUCHSIA)
+#include <errno.h>
+#include <pthread.h>
+#endif
+
+namespace base {
+namespace internal {
+
+// This class implements the underlying platform-specific spin-lock mechanism
+// used for the Lock class.  Most users should not use LockImpl directly, but
+// should instead use Lock.
+class BASE_EXPORT LockImpl {
+ public:
+#if defined(OS_WIN)
+  using NativeHandle = CHROME_SRWLOCK;
+#elif defined(OS_POSIX) || defined(OS_FUCHSIA)
+  using NativeHandle = pthread_mutex_t;
+#endif
+
+  LockImpl();
+  ~LockImpl();
+
+  // If the lock is not held, take it and return true.  If the lock is already
+  // held by something else, immediately return false.
+  bool Try();
+
+  // Take the lock, blocking until it is available if necessary.
+  void Lock();
+
+  // Release the lock.  This must only be called by the lock's holder: after
+  // a successful call to Try, or a call to Lock.
+  inline void Unlock();
+
+  // Return the native underlying lock.
+  // TODO(awalker): refactor lock and condition variables so that this is
+  // unnecessary.
+  NativeHandle* native_handle() { return &native_handle_; }
+
+#if defined(OS_POSIX) || defined(OS_FUCHSIA)
+  // Whether this lock will attempt to use priority inheritance.
+  static bool PriorityInheritanceAvailable();
+#endif
+
+ private:
+  NativeHandle native_handle_;
+
+  DISALLOW_COPY_AND_ASSIGN(LockImpl);
+};
+
+#if defined(OS_WIN)
+void LockImpl::Unlock() {
+  ::ReleaseSRWLockExclusive(reinterpret_cast<PSRWLOCK>(&native_handle_));
+}
+#elif defined(OS_POSIX) || defined(OS_FUCHSIA)
+void LockImpl::Unlock() {
+  int rv = pthread_mutex_unlock(&native_handle_);
+  DCHECK_EQ(rv, 0) << ". " << strerror(rv);
+}
+#endif
+
+}  // namespace internal
+}  // namespace base
+
+#endif  // BASE_SYNCHRONIZATION_LOCK_IMPL_H_
diff --git a/base/synchronization/lock_impl_posix.cc b/base/synchronization/lock_impl_posix.cc
new file mode 100644
index 0000000..7571f68
--- /dev/null
+++ b/base/synchronization/lock_impl_posix.cc
@@ -0,0 +1,133 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/synchronization/lock_impl.h"
+
+#include <string>
+
+#include "base/debug/activity_tracker.h"
+#include "base/logging.h"
+#include "base/posix/safe_strerror.h"
+#include "base/strings/stringprintf.h"
+#include "base/synchronization/lock.h"
+#include "base/synchronization/synchronization_buildflags.h"
+#include "build/build_config.h"
+
+namespace base {
+namespace internal {
+
+namespace {
+
+#if DCHECK_IS_ON()
+const char* AdditionalHintForSystemErrorCode(int error_code) {
+  switch (error_code) {
+    case EINVAL:
+      return "Hint: This is often related to a use-after-free.";
+    default:
+      return "";
+  }
+}
+#endif  // DCHECK_IS_ON()
+
+std::string SystemErrorCodeToString(int error_code) {
+#if DCHECK_IS_ON()
+  return base::safe_strerror(error_code) + ". " +
+         AdditionalHintForSystemErrorCode(error_code);
+#else   // DCHECK_IS_ON()
+  return std::string();
+#endif  // DCHECK_IS_ON()
+}
+
+}  // namespace
+
+// Determines which platforms can consider using priority inheritance locks. Use
+// this define for platform code that may not compile if priority inheritance
+// locks aren't available. For this platform code,
+// PRIORITY_INHERITANCE_LOCKS_POSSIBLE() is a necessary but insufficient check.
+// Lock::PriorityInheritanceAvailable still must be checked as the code may
+// compile but the underlying platform still may not correctly support priority
+// inheritance locks.
+#if defined(OS_NACL) || defined(OS_ANDROID) || defined(OS_FUCHSIA)
+#define PRIORITY_INHERITANCE_LOCKS_POSSIBLE() 0
+#else
+#define PRIORITY_INHERITANCE_LOCKS_POSSIBLE() 1
+#endif
+
+LockImpl::LockImpl() {
+  pthread_mutexattr_t mta;
+  int rv = pthread_mutexattr_init(&mta);
+  DCHECK_EQ(rv, 0) << ". " << SystemErrorCodeToString(rv);
+#if PRIORITY_INHERITANCE_LOCKS_POSSIBLE()
+  if (PriorityInheritanceAvailable()) {
+    rv = pthread_mutexattr_setprotocol(&mta, PTHREAD_PRIO_INHERIT);
+    DCHECK_EQ(rv, 0) << ". " << SystemErrorCodeToString(rv);
+  }
+#endif
+#ifndef NDEBUG
+  // In debug, setup attributes for lock error checking.
+  rv = pthread_mutexattr_settype(&mta, PTHREAD_MUTEX_ERRORCHECK);
+  DCHECK_EQ(rv, 0) << ". " << SystemErrorCodeToString(rv);
+#endif
+  rv = pthread_mutex_init(&native_handle_, &mta);
+  DCHECK_EQ(rv, 0) << ". " << SystemErrorCodeToString(rv);
+  rv = pthread_mutexattr_destroy(&mta);
+  DCHECK_EQ(rv, 0) << ". " << SystemErrorCodeToString(rv);
+}
+
+LockImpl::~LockImpl() {
+  int rv = pthread_mutex_destroy(&native_handle_);
+  DCHECK_EQ(rv, 0) << ". " << SystemErrorCodeToString(rv);
+}
+
+bool LockImpl::Try() {
+  int rv = pthread_mutex_trylock(&native_handle_);
+  DCHECK(rv == 0 || rv == EBUSY) << ". " << SystemErrorCodeToString(rv);
+  return rv == 0;
+}
+
+void LockImpl::Lock() {
+  // The ScopedLockAcquireActivity below is relatively expensive and so its
+  // actions can become significant due to the very large number of locks
+  // that tend to be used throughout the build. To avoid this cost in the
+  // vast majority of the calls, simply "try" the lock first and only do the
+  // (tracked) blocking call if that fails. Since "try" itself is a system
+  // call, and thus also somewhat expensive, don't bother with it unless
+  // tracking is actually enabled.
+  if (base::debug::GlobalActivityTracker::IsEnabled())
+    if (Try())
+      return;
+
+  base::debug::ScopedLockAcquireActivity lock_activity(this);
+  int rv = pthread_mutex_lock(&native_handle_);
+  DCHECK_EQ(rv, 0) << ". " << SystemErrorCodeToString(rv);
+}
+
+// static
+bool LockImpl::PriorityInheritanceAvailable() {
+#if BUILDFLAG(ENABLE_MUTEX_PRIORITY_INHERITANCE)
+  return true;
+#elif PRIORITY_INHERITANCE_LOCKS_POSSIBLE() && defined(OS_MACOSX)
+  return true;
+#else
+  // Security concerns prevent the use of priority inheritance mutexes on Linux.
+  //   * CVE-2010-0622 - Linux < 2.6.33-rc7, wake_futex_pi possible DoS.
+  //     https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2010-0622
+  //   * CVE-2012-6647 - Linux < 3.5.1, futex_wait_requeue_pi possible DoS.
+  //     https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2012-6647
+  //   * CVE-2014-3153 - Linux <= 3.14.5, futex_requeue, privilege escalation.
+  //     https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2014-3153
+  //
+  // If the above were all addressed, we still need a runtime check to deal with
+  // the bug below.
+  //   * glibc Bug 14652: https://sourceware.org/bugzilla/show_bug.cgi?id=14652
+  //     Fixed in glibc 2.17.
+  //     Priority inheritance mutexes may deadlock with condition variables
+  //     during reacquisition of the mutex after the condition variable is
+  //     signalled.
+  return false;
+#endif
+}
+
+}  // namespace internal
+}  // namespace base
diff --git a/base/synchronization/lock_impl_win.cc b/base/synchronization/lock_impl_win.cc
new file mode 100644
index 0000000..e0c4e9d
--- /dev/null
+++ b/base/synchronization/lock_impl_win.cc
@@ -0,0 +1,40 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/synchronization/lock_impl.h"
+
+#include "base/debug/activity_tracker.h"
+
+#include <windows.h>
+
+namespace base {
+namespace internal {
+
+LockImpl::LockImpl() : native_handle_(SRWLOCK_INIT) {}
+
+LockImpl::~LockImpl() = default;
+
+bool LockImpl::Try() {
+  return !!::TryAcquireSRWLockExclusive(
+      reinterpret_cast<PSRWLOCK>(&native_handle_));
+}
+
+void LockImpl::Lock() {
+  // The ScopedLockAcquireActivity below is relatively expensive and so its
+  // actions can become significant due to the very large number of locks
+  // that tend to be used throughout the build. To avoid this cost in the
+  // vast majority of the calls, simply "try" the lock first and only do the
+  // (tracked) blocking call if that fails. Since "try" itself is a system
+  // call, and thus also somewhat expensive, don't bother with it unless
+  // tracking is actually enabled.
+  if (base::debug::GlobalActivityTracker::IsEnabled())
+    if (Try())
+      return;
+
+  base::debug::ScopedLockAcquireActivity lock_activity(this);
+  ::AcquireSRWLockExclusive(reinterpret_cast<PSRWLOCK>(&native_handle_));
+}
+
+}  // namespace internal
+}  // namespace base
diff --git a/base/synchronization/lock_unittest.cc b/base/synchronization/lock_unittest.cc
new file mode 100644
index 0000000..1e2f998
--- /dev/null
+++ b/base/synchronization/lock_unittest.cc
@@ -0,0 +1,257 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/synchronization/lock.h"
+
+#include <stdlib.h>
+
+#include "base/compiler_specific.h"
+#include "base/debug/activity_tracker.h"
+#include "base/macros.h"
+#include "base/threading/platform_thread.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+
+// Basic test to make sure that Acquire()/Release()/Try() don't crash ----------
+
+class BasicLockTestThread : public PlatformThread::Delegate {
+ public:
+  explicit BasicLockTestThread(Lock* lock) : lock_(lock), acquired_(0) {}
+
+  void ThreadMain() override {
+    for (int i = 0; i < 10; i++) {
+      lock_->Acquire();
+      acquired_++;
+      lock_->Release();
+    }
+    for (int i = 0; i < 10; i++) {
+      lock_->Acquire();
+      acquired_++;
+      PlatformThread::Sleep(TimeDelta::FromMilliseconds(rand() % 20));
+      lock_->Release();
+    }
+    for (int i = 0; i < 10; i++) {
+      if (lock_->Try()) {
+        acquired_++;
+        PlatformThread::Sleep(TimeDelta::FromMilliseconds(rand() % 20));
+        lock_->Release();
+      }
+    }
+  }
+
+  int acquired() const { return acquired_; }
+
+ private:
+  Lock* lock_;
+  int acquired_;
+
+  DISALLOW_COPY_AND_ASSIGN(BasicLockTestThread);
+};
+
+TEST(LockTest, Basic) {
+  Lock lock;
+  BasicLockTestThread thread(&lock);
+  PlatformThreadHandle handle;
+
+  ASSERT_TRUE(PlatformThread::Create(0, &thread, &handle));
+
+  int acquired = 0;
+  for (int i = 0; i < 5; i++) {
+    lock.Acquire();
+    acquired++;
+    lock.Release();
+  }
+  for (int i = 0; i < 10; i++) {
+    lock.Acquire();
+    acquired++;
+    PlatformThread::Sleep(TimeDelta::FromMilliseconds(rand() % 20));
+    lock.Release();
+  }
+  for (int i = 0; i < 10; i++) {
+    if (lock.Try()) {
+      acquired++;
+      PlatformThread::Sleep(TimeDelta::FromMilliseconds(rand() % 20));
+      lock.Release();
+    }
+  }
+  for (int i = 0; i < 5; i++) {
+    lock.Acquire();
+    acquired++;
+    PlatformThread::Sleep(TimeDelta::FromMilliseconds(rand() % 20));
+    lock.Release();
+  }
+
+  PlatformThread::Join(handle);
+
+  EXPECT_GE(acquired, 20);
+  EXPECT_GE(thread.acquired(), 20);
+}
+
+// Test that Try() works as expected -------------------------------------------
+
+class TryLockTestThread : public PlatformThread::Delegate {
+ public:
+  explicit TryLockTestThread(Lock* lock) : lock_(lock), got_lock_(false) {}
+
+  void ThreadMain() override {
+    got_lock_ = lock_->Try();
+    if (got_lock_)
+      lock_->Release();
+  }
+
+  bool got_lock() const { return got_lock_; }
+
+ private:
+  Lock* lock_;
+  bool got_lock_;
+
+  DISALLOW_COPY_AND_ASSIGN(TryLockTestThread);
+};
+
+TEST(LockTest, TryLock) {
+  Lock lock;
+
+  ASSERT_TRUE(lock.Try());
+  // We now have the lock....
+
+  // This thread will not be able to get the lock.
+  {
+    TryLockTestThread thread(&lock);
+    PlatformThreadHandle handle;
+
+    ASSERT_TRUE(PlatformThread::Create(0, &thread, &handle));
+
+    PlatformThread::Join(handle);
+
+    ASSERT_FALSE(thread.got_lock());
+  }
+
+  lock.Release();
+
+  // This thread will....
+  {
+    TryLockTestThread thread(&lock);
+    PlatformThreadHandle handle;
+
+    ASSERT_TRUE(PlatformThread::Create(0, &thread, &handle));
+
+    PlatformThread::Join(handle);
+
+    ASSERT_TRUE(thread.got_lock());
+    // But it released it....
+    ASSERT_TRUE(lock.Try());
+  }
+
+  lock.Release();
+}
+
+TEST(LockTest, TryTrackedLock) {
+  // Enable the activity tracker.
+  debug::GlobalActivityTracker::CreateWithLocalMemory(64 << 10, 0, "", 3, 0);
+
+  Lock lock;
+
+  ASSERT_TRUE(lock.Try());
+  // We now have the lock....
+
+  // This thread will not be able to get the lock.
+  {
+    TryLockTestThread thread(&lock);
+    PlatformThreadHandle handle;
+
+    ASSERT_TRUE(PlatformThread::Create(0, &thread, &handle));
+
+    PlatformThread::Join(handle);
+
+    ASSERT_FALSE(thread.got_lock());
+  }
+
+  lock.Release();
+
+  // This thread will....
+  {
+    TryLockTestThread thread(&lock);
+    PlatformThreadHandle handle;
+
+    ASSERT_TRUE(PlatformThread::Create(0, &thread, &handle));
+
+    PlatformThread::Join(handle);
+
+    ASSERT_TRUE(thread.got_lock());
+    // But it released it....
+    ASSERT_TRUE(lock.Try());
+  }
+
+  lock.Release();
+  debug::GlobalActivityTracker::ReleaseForTesting();
+}
+
+// Tests that locks actually exclude -------------------------------------------
+
+class MutexLockTestThread : public PlatformThread::Delegate {
+ public:
+  MutexLockTestThread(Lock* lock, int* value) : lock_(lock), value_(value) {}
+
+  // Static helper which can also be called from the main thread.
+  static void DoStuff(Lock* lock, int* value) {
+    for (int i = 0; i < 40; i++) {
+      lock->Acquire();
+      int v = *value;
+      PlatformThread::Sleep(TimeDelta::FromMilliseconds(rand() % 10));
+      *value = v + 1;
+      lock->Release();
+    }
+  }
+
+  void ThreadMain() override { DoStuff(lock_, value_); }
+
+ private:
+  Lock* lock_;
+  int* value_;
+
+  DISALLOW_COPY_AND_ASSIGN(MutexLockTestThread);
+};
+
+TEST(LockTest, MutexTwoThreads) {
+  Lock lock;
+  int value = 0;
+
+  MutexLockTestThread thread(&lock, &value);
+  PlatformThreadHandle handle;
+
+  ASSERT_TRUE(PlatformThread::Create(0, &thread, &handle));
+
+  MutexLockTestThread::DoStuff(&lock, &value);
+
+  PlatformThread::Join(handle);
+
+  EXPECT_EQ(2 * 40, value);
+}
+
+TEST(LockTest, MutexFourThreads) {
+  Lock lock;
+  int value = 0;
+
+  MutexLockTestThread thread1(&lock, &value);
+  MutexLockTestThread thread2(&lock, &value);
+  MutexLockTestThread thread3(&lock, &value);
+  PlatformThreadHandle handle1;
+  PlatformThreadHandle handle2;
+  PlatformThreadHandle handle3;
+
+  ASSERT_TRUE(PlatformThread::Create(0, &thread1, &handle1));
+  ASSERT_TRUE(PlatformThread::Create(0, &thread2, &handle2));
+  ASSERT_TRUE(PlatformThread::Create(0, &thread3, &handle3));
+
+  MutexLockTestThread::DoStuff(&lock, &value);
+
+  PlatformThread::Join(handle1);
+  PlatformThread::Join(handle2);
+  PlatformThread::Join(handle3);
+
+  EXPECT_EQ(4 * 40, value);
+}
+
+}  // namespace base
diff --git a/base/synchronization/spin_wait.h b/base/synchronization/spin_wait.h
new file mode 100644
index 0000000..9b147cd
--- /dev/null
+++ b/base/synchronization/spin_wait.h
@@ -0,0 +1,50 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file provides a macro ONLY for use in testing.
+// DO NOT USE IN PRODUCTION CODE.  There are much better ways to wait.
+
+// This code is very helpful in testing multi-threaded code, without depending
+// on almost any primitives.  This is especially helpful if you are testing
+// those primitive multi-threaded constructs.
+
+// We provide a simple one argument spin wait (for 1 second), and a generic
+// spin wait (for longer periods of time).
+
+#ifndef BASE_SYNCHRONIZATION_SPIN_WAIT_H_
+#define BASE_SYNCHRONIZATION_SPIN_WAIT_H_
+
+#include "base/threading/platform_thread.h"
+#include "base/time/time.h"
+
+// Provide a macro that will wait no longer than 1 second for an asynchronous
+// change is the value of an expression.
+// A typical use would be:
+//
+//   SPIN_FOR_1_SECOND_OR_UNTIL_TRUE(0 == f(x));
+//
+// The expression will be evaluated repeatedly until it is true, or until
+// the time (1 second) expires.
+// Since tests generally have a 5 second watch dog timer, this spin loop is
+// typically used to get the padding needed on a given test platform to assure
+// that the test passes, even if load varies, and external events vary.
+
+#define SPIN_FOR_1_SECOND_OR_UNTIL_TRUE(expression) \
+    SPIN_FOR_TIMEDELTA_OR_UNTIL_TRUE(base::TimeDelta::FromSeconds(1), \
+                                     (expression))
+
+#define SPIN_FOR_TIMEDELTA_OR_UNTIL_TRUE(delta, expression) do { \
+  base::TimeTicks start = base::TimeTicks::Now(); \
+  const base::TimeDelta kTimeout = delta; \
+    while (!(expression)) { \
+      if (kTimeout < base::TimeTicks::Now() - start) { \
+      EXPECT_LE((base::TimeTicks::Now() - start).InMilliseconds(), \
+                kTimeout.InMilliseconds()) << "Timed out"; \
+        break; \
+      } \
+      base::PlatformThread::Sleep(base::TimeDelta::FromMilliseconds(50)); \
+    } \
+  } while (0)
+
+#endif  // BASE_SYNCHRONIZATION_SPIN_WAIT_H_
diff --git a/base/synchronization/waitable_event.h b/base/synchronization/waitable_event.h
new file mode 100644
index 0000000..836adc0
--- /dev/null
+++ b/base/synchronization/waitable_event.h
@@ -0,0 +1,284 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_SYNCHRONIZATION_WAITABLE_EVENT_H_
+#define BASE_SYNCHRONIZATION_WAITABLE_EVENT_H_
+
+#include <stddef.h>
+
+#include "base/base_export.h"
+#include "base/macros.h"
+#include "build/build_config.h"
+
+#if defined(OS_WIN)
+#include "base/win/scoped_handle.h"
+#elif defined(OS_MACOSX)
+#include <mach/mach.h>
+
+#include <list>
+#include <memory>
+
+#include "base/callback_forward.h"
+#include "base/mac/scoped_mach_port.h"
+#include "base/memory/ref_counted.h"
+#include "base/synchronization/lock.h"
+#elif defined(OS_POSIX) || defined(OS_FUCHSIA)
+#include <list>
+#include <utility>
+
+#include "base/memory/ref_counted.h"
+#include "base/synchronization/lock.h"
+#endif
+
+namespace base {
+
+class TimeDelta;
+class TimeTicks;
+
+// A WaitableEvent can be a useful thread synchronization tool when you want to
+// allow one thread to wait for another thread to finish some work. For
+// non-Windows systems, this can only be used from within a single address
+// space.
+//
+// Use a WaitableEvent when you would otherwise use a Lock+ConditionVariable to
+// protect a simple boolean value.  However, if you find yourself using a
+// WaitableEvent in conjunction with a Lock to wait for a more complex state
+// change (e.g., for an item to be added to a queue), then you should probably
+// be using a ConditionVariable instead of a WaitableEvent.
+//
+// NOTE: On Windows, this class provides a subset of the functionality afforded
+// by a Windows event object.  This is intentional.  If you are writing Windows
+// specific code and you need other features of a Windows event, then you might
+// be better off just using an Windows event directly.
+class BASE_EXPORT WaitableEvent {
+ public:
+  // Indicates whether a WaitableEvent should automatically reset the event
+  // state after a single waiting thread has been released or remain signaled
+  // until Reset() is manually invoked.
+  enum class ResetPolicy { MANUAL, AUTOMATIC };
+
+  // Indicates whether a new WaitableEvent should start in a signaled state or
+  // not.
+  enum class InitialState { SIGNALED, NOT_SIGNALED };
+
+  // Constructs a WaitableEvent with policy and initial state as detailed in
+  // the above enums.
+  WaitableEvent(ResetPolicy reset_policy = ResetPolicy::MANUAL,
+                InitialState initial_state = InitialState::NOT_SIGNALED);
+
+#if defined(OS_WIN)
+  // Create a WaitableEvent from an Event HANDLE which has already been
+  // created. This objects takes ownership of the HANDLE and will close it when
+  // deleted.
+  explicit WaitableEvent(win::ScopedHandle event_handle);
+#endif
+
+  ~WaitableEvent();
+
+  // Put the event in the un-signaled state.
+  void Reset();
+
+  // Put the event in the signaled state.  Causing any thread blocked on Wait
+  // to be woken up.
+  void Signal();
+
+  // Returns true if the event is in the signaled state, else false.  If this
+  // is not a manual reset event, then this test will cause a reset.
+  bool IsSignaled();
+
+  // Wait indefinitely for the event to be signaled. Wait's return "happens
+  // after" |Signal| has completed. This means that it's safe for a
+  // WaitableEvent to synchronise its own destruction, like this:
+  //
+  //   WaitableEvent *e = new WaitableEvent;
+  //   SendToOtherThread(e);
+  //   e->Wait();
+  //   delete e;
+  void Wait();
+
+  // Wait up until wait_delta has passed for the event to be signaled.  Returns
+  // true if the event was signaled.
+  //
+  // TimedWait can synchronise its own destruction like |Wait|.
+  bool TimedWait(const TimeDelta& wait_delta);
+
+  // Wait up until end_time deadline has passed for the event to be signaled.
+  // Return true if the event was signaled.
+  //
+  // TimedWaitUntil can synchronise its own destruction like |Wait|.
+  bool TimedWaitUntil(const TimeTicks& end_time);
+
+#if defined(OS_WIN)
+  HANDLE handle() const { return handle_.Get(); }
+#endif
+
+  // Wait, synchronously, on multiple events.
+  //   waitables: an array of WaitableEvent pointers
+  //   count: the number of elements in @waitables
+  //
+  // returns: the index of a WaitableEvent which has been signaled.
+  //
+  // You MUST NOT delete any of the WaitableEvent objects while this wait is
+  // happening, however WaitMany's return "happens after" the |Signal| call
+  // that caused it has completed, like |Wait|.
+  //
+  // If more than one WaitableEvent is signaled to unblock WaitMany, the lowest
+  // index among them is returned.
+  static size_t WaitMany(WaitableEvent** waitables, size_t count);
+
+  // For asynchronous waiting, see WaitableEventWatcher
+
+  // This is a private helper class. It's here because it's used by friends of
+  // this class (such as WaitableEventWatcher) to be able to enqueue elements
+  // of the wait-list
+  class Waiter {
+   public:
+    // Signal the waiter to wake up.
+    //
+    // Consider the case of a Waiter which is in multiple WaitableEvent's
+    // wait-lists. Each WaitableEvent is automatic-reset and two of them are
+    // signaled at the same time. Now, each will wake only the first waiter in
+    // the wake-list before resetting. However, if those two waiters happen to
+    // be the same object (as can happen if another thread didn't have a chance
+    // to dequeue the waiter from the other wait-list in time), two auto-resets
+    // will have happened, but only one waiter has been signaled!
+    //
+    // Because of this, a Waiter may "reject" a wake by returning false. In
+    // this case, the auto-reset WaitableEvent shouldn't act as if anything has
+    // been notified.
+    virtual bool Fire(WaitableEvent* signaling_event) = 0;
+
+    // Waiters may implement this in order to provide an extra condition for
+    // two Waiters to be considered equal. In WaitableEvent::Dequeue, if the
+    // pointers match then this function is called as a final check. See the
+    // comments in ~Handle for why.
+    virtual bool Compare(void* tag) = 0;
+
+   protected:
+    virtual ~Waiter() = default;
+  };
+
+ private:
+  friend class WaitableEventWatcher;
+
+#if defined(OS_WIN)
+  win::ScopedHandle handle_;
+#elif defined(OS_MACOSX)
+  // Prior to macOS 10.12, a TYPE_MACH_RECV dispatch source may not be invoked
+  // immediately. If a WaitableEventWatcher is used on a manual-reset event,
+  // and another thread that is Wait()ing on the event calls Reset()
+  // immediately after waking up, the watcher may not receive the callback.
+  // On macOS 10.12 and higher, dispatch delivery is reliable. But for OSes
+  // prior, a lock-protected list of callbacks is used for manual-reset event
+  // watchers. Automatic-reset events are not prone to this issue, since the
+  // first thread to wake will claim the event.
+  static bool UseSlowWatchList(ResetPolicy policy);
+
+  // Peeks the message queue named by |port| and returns true if a message
+  // is present and false if not. If |dequeue| is true, the messsage will be
+  // drained from the queue. If |dequeue| is false, the queue will only be
+  // peeked. |port| must be a receive right.
+  static bool PeekPort(mach_port_t port, bool dequeue);
+
+  // The Mach receive right is waited on by both WaitableEvent and
+  // WaitableEventWatcher. It is valid to signal and then delete an event, and
+  // a watcher should still be notified. If the right were to be destroyed
+  // immediately, the watcher would not receive the signal. Because Mach
+  // receive rights cannot have a user refcount greater than one, the right
+  // must be reference-counted manually.
+  class ReceiveRight : public RefCountedThreadSafe<ReceiveRight> {
+   public:
+    ReceiveRight(mach_port_t name, bool create_slow_watch_list);
+
+    mach_port_t Name() const { return right_.get(); };
+
+    // This structure is used iff UseSlowWatchList() is true. See the comment
+    // in Signal() for details.
+    struct WatchList {
+      WatchList();
+      ~WatchList();
+
+      // The lock protects a list of closures to be run when the event is
+      // Signal()ed. The closures are invoked on the signaling thread, so they
+      // must be safe to be called from any thread.
+      Lock lock;
+      std::list<OnceClosure> list;
+    };
+
+    WatchList* SlowWatchList() const { return slow_watch_list_.get(); }
+
+   private:
+    friend class RefCountedThreadSafe<ReceiveRight>;
+    ~ReceiveRight();
+
+    mac::ScopedMachReceiveRight right_;
+
+    // This is allocated iff UseSlowWatchList() is true. It is created on the
+    // heap to avoid performing initialization when not using the slow path.
+    std::unique_ptr<WatchList> slow_watch_list_;
+
+    DISALLOW_COPY_AND_ASSIGN(ReceiveRight);
+  };
+
+  const ResetPolicy policy_;
+
+  // The receive right for the event.
+  scoped_refptr<ReceiveRight> receive_right_;
+
+  // The send right used to signal the event. This can be disposed of with
+  // the event, unlike the receive right, since a deleted event cannot be
+  // signaled.
+  mac::ScopedMachSendRight send_right_;
+#elif defined(OS_POSIX) || defined(OS_FUCHSIA)
+  // On Windows, you must not close a HANDLE which is currently being waited on.
+  // The MSDN documentation says that the resulting behaviour is 'undefined'.
+  // To solve that issue each WaitableEventWatcher duplicates the given event
+  // handle.
+
+  // However, if we were to include the following members
+  // directly then, on POSIX, one couldn't use WaitableEventWatcher to watch an
+  // event which gets deleted. This mismatch has bitten us several times now,
+  // so we have a kernel of the WaitableEvent, which is reference counted.
+  // WaitableEventWatchers may then take a reference and thus match the Windows
+  // behaviour.
+  struct WaitableEventKernel :
+      public RefCountedThreadSafe<WaitableEventKernel> {
+   public:
+    WaitableEventKernel(ResetPolicy reset_policy, InitialState initial_state);
+
+    bool Dequeue(Waiter* waiter, void* tag);
+
+    base::Lock lock_;
+    const bool manual_reset_;
+    bool signaled_;
+    std::list<Waiter*> waiters_;
+
+   private:
+    friend class RefCountedThreadSafe<WaitableEventKernel>;
+    ~WaitableEventKernel();
+  };
+
+  typedef std::pair<WaitableEvent*, size_t> WaiterAndIndex;
+
+  // When dealing with arrays of WaitableEvent*, we want to sort by the address
+  // of the WaitableEvent in order to have a globally consistent locking order.
+  // In that case we keep them, in sorted order, in an array of pairs where the
+  // second element is the index of the WaitableEvent in the original,
+  // unsorted, array.
+  static size_t EnqueueMany(WaiterAndIndex* waitables,
+                            size_t count, Waiter* waiter);
+
+  bool SignalAll();
+  bool SignalOne();
+  void Enqueue(Waiter* waiter);
+
+  scoped_refptr<WaitableEventKernel> kernel_;
+#endif
+
+  DISALLOW_COPY_AND_ASSIGN(WaitableEvent);
+};
+
+}  // namespace base
+
+#endif  // BASE_SYNCHRONIZATION_WAITABLE_EVENT_H_
diff --git a/base/synchronization/waitable_event_mac.cc b/base/synchronization/waitable_event_mac.cc
new file mode 100644
index 0000000..ad6f8cb
--- /dev/null
+++ b/base/synchronization/waitable_event_mac.cc
@@ -0,0 +1,359 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/synchronization/waitable_event.h"
+
+#include <dispatch/dispatch.h>
+#include <mach/mach.h>
+#include <sys/event.h>
+
+#include "base/debug/activity_tracker.h"
+#include "base/files/scoped_file.h"
+#include "base/mac/dispatch_source_mach.h"
+#include "base/mac/mac_util.h"
+#include "base/mac/mach_logging.h"
+#include "base/mac/scoped_dispatch_object.h"
+#include "base/posix/eintr_wrapper.h"
+#include "base/threading/scoped_blocking_call.h"
+#include "base/threading/thread_restrictions.h"
+#include "build/build_config.h"
+
+namespace base {
+
+WaitableEvent::WaitableEvent(ResetPolicy reset_policy,
+                             InitialState initial_state)
+    : policy_(reset_policy) {
+  mach_port_options_t options{};
+  options.flags = MPO_INSERT_SEND_RIGHT;
+  options.mpl.mpl_qlimit = 1;
+
+  mach_port_t name;
+  kern_return_t kr = mach_port_construct(mach_task_self(), &options, 0, &name);
+  MACH_CHECK(kr == KERN_SUCCESS, kr) << "mach_port_construct";
+
+  receive_right_ = new ReceiveRight(name, UseSlowWatchList(policy_));
+  send_right_.reset(name);
+
+  if (initial_state == InitialState::SIGNALED)
+    Signal();
+}
+
+WaitableEvent::~WaitableEvent() = default;
+
+void WaitableEvent::Reset() {
+  PeekPort(receive_right_->Name(), true);
+}
+
+void WaitableEvent::Signal() {
+  // If using the slow watch-list, copy the watchers to a local. After
+  // mach_msg(), the event object may be deleted by an awoken thread.
+  const bool use_slow_path = UseSlowWatchList(policy_);
+  ReceiveRight* receive_right = nullptr;  // Manually reference counted.
+  std::unique_ptr<std::list<OnceClosure>> watch_list;
+  if (use_slow_path) {
+    // To avoid a race condition of a WaitableEventWatcher getting added
+    // while another thread is in this method, hold the watch-list lock for
+    // the duration of mach_msg(). This requires ref-counting the
+    // |receive_right_| object that contains it, in case the event is deleted
+    // by a waiting thread after mach_msg().
+    receive_right = receive_right_.get();
+    receive_right->AddRef();
+
+    ReceiveRight::WatchList* slow_watch_list = receive_right->SlowWatchList();
+    slow_watch_list->lock.Acquire();
+
+    if (!slow_watch_list->list.empty()) {
+      watch_list.reset(new std::list<OnceClosure>());
+      std::swap(*watch_list, slow_watch_list->list);
+    }
+  }
+
+  mach_msg_empty_send_t msg{};
+  msg.header.msgh_bits = MACH_MSGH_BITS_REMOTE(MACH_MSG_TYPE_COPY_SEND);
+  msg.header.msgh_size = sizeof(&msg);
+  msg.header.msgh_remote_port = send_right_.get();
+  // If the event is already signaled, this will time out because the queue
+  // has a length of one.
+  kern_return_t kr =
+      mach_msg(&msg.header, MACH_SEND_MSG | MACH_SEND_TIMEOUT, sizeof(msg), 0,
+               MACH_PORT_NULL, 0, MACH_PORT_NULL);
+  MACH_CHECK(kr == KERN_SUCCESS || kr == MACH_SEND_TIMED_OUT, kr) << "mach_msg";
+
+  if (use_slow_path) {
+    // If a WaitableEventWatcher were to start watching when the event is
+    // signaled, it runs the callback immediately without adding it to the
+    // list. Therefore the watch list can only be non-empty if the event is
+    // newly signaled.
+    if (watch_list.get()) {
+      MACH_CHECK(kr == KERN_SUCCESS, kr);
+      for (auto& watcher : *watch_list) {
+        std::move(watcher).Run();
+      }
+    }
+
+    receive_right->SlowWatchList()->lock.Release();
+    receive_right->Release();
+  }
+}
+
+bool WaitableEvent::IsSignaled() {
+  return PeekPort(receive_right_->Name(), policy_ == ResetPolicy::AUTOMATIC);
+}
+
+void WaitableEvent::Wait() {
+  bool result = TimedWaitUntil(TimeTicks::Max());
+  DCHECK(result) << "TimedWait() should never fail with infinite timeout";
+}
+
+bool WaitableEvent::TimedWait(const TimeDelta& wait_delta) {
+  return TimedWaitUntil(TimeTicks::Now() + wait_delta);
+}
+
+bool WaitableEvent::TimedWaitUntil(const TimeTicks& end_time) {
+  internal::AssertBaseSyncPrimitivesAllowed();
+  ScopedBlockingCall scoped_blocking_call(BlockingType::MAY_BLOCK);
+  // Record the event that this thread is blocking upon (for hang diagnosis).
+  debug::ScopedEventWaitActivity event_activity(this);
+
+  TimeDelta wait_time = end_time - TimeTicks::Now();
+  if (wait_time < TimeDelta()) {
+    // A negative delta would be treated by the system as indefinite, but
+    // it needs to be treated as a poll instead.
+    wait_time = TimeDelta();
+  }
+
+  mach_msg_empty_rcv_t msg{};
+  msg.header.msgh_local_port = receive_right_->Name();
+
+  mach_msg_option_t options = MACH_RCV_MSG;
+
+  mach_msg_timeout_t timeout = 0;
+  if (!end_time.is_max()) {
+    options |= MACH_RCV_TIMEOUT;
+    timeout = wait_time.InMillisecondsRoundedUp();
+  }
+
+  mach_msg_size_t rcv_size = sizeof(msg);
+  if (policy_ == ResetPolicy::MANUAL) {
+    // To avoid dequeing the message, receive with a size of 0 and set
+    // MACH_RCV_LARGE to keep the message in the queue.
+    options |= MACH_RCV_LARGE;
+    rcv_size = 0;
+  }
+
+  kern_return_t kr = mach_msg(&msg.header, options, 0, rcv_size,
+                              receive_right_->Name(), timeout, MACH_PORT_NULL);
+  if (kr == KERN_SUCCESS) {
+    return true;
+  } else if (rcv_size == 0 && kr == MACH_RCV_TOO_LARGE) {
+    return true;
+  } else {
+    MACH_CHECK(kr == MACH_RCV_TIMED_OUT, kr) << "mach_msg";
+    return false;
+  }
+}
+
+// static
+bool WaitableEvent::UseSlowWatchList(ResetPolicy policy) {
+#if defined(OS_IOS)
+  const bool use_slow_path = false;
+#else
+  static bool use_slow_path = !mac::IsAtLeastOS10_12();
+#endif
+  return policy == ResetPolicy::MANUAL && use_slow_path;
+}
+
+// static
+size_t WaitableEvent::WaitMany(WaitableEvent** raw_waitables, size_t count) {
+  internal::AssertBaseSyncPrimitivesAllowed();
+  DCHECK(count) << "Cannot wait on no events";
+  ScopedBlockingCall scoped_blocking_call(BlockingType::MAY_BLOCK);
+  // Record an event (the first) that this thread is blocking upon.
+  debug::ScopedEventWaitActivity event_activity(raw_waitables[0]);
+
+  // On macOS 10.11+, using Mach port sets may cause system instability, per
+  // https://crbug.com/756102. On macOS 10.12+, a kqueue can be used
+  // instead to work around that. On macOS 10.9 and 10.10, kqueue only works
+  // for port sets, so port sets are just used directly. On macOS 10.11,
+  // libdispatch sources are used. Therefore, there are three different
+  // primitives that can be used to implement WaitMany. Which one to use is
+  // selected at run-time by OS version checks.
+  enum WaitManyPrimitive {
+    KQUEUE,
+    DISPATCH,
+    PORT_SET,
+  };
+#if defined(OS_IOS)
+  const WaitManyPrimitive kPrimitive = PORT_SET;
+#else
+  const WaitManyPrimitive kPrimitive =
+      mac::IsAtLeastOS10_12() ? KQUEUE
+                              : (mac::IsOS10_11() ? DISPATCH : PORT_SET);
+#endif
+  if (kPrimitive == KQUEUE) {
+    std::vector<kevent64_s> events(count);
+    for (size_t i = 0; i < count; ++i) {
+      EV_SET64(&events[i], raw_waitables[i]->receive_right_->Name(),
+               EVFILT_MACHPORT, EV_ADD, 0, 0, i, 0, 0);
+    }
+
+    std::vector<kevent64_s> out_events(count);
+
+    ScopedFD wait_many(kqueue());
+    PCHECK(wait_many.is_valid()) << "kqueue";
+
+    int rv = HANDLE_EINTR(kevent64(wait_many.get(), events.data(), count,
+                                   out_events.data(), count, 0, nullptr));
+    PCHECK(rv > 0) << "kevent64";
+
+    size_t triggered = -1;
+    for (size_t i = 0; i < static_cast<size_t>(rv); ++i) {
+      // WaitMany should return the lowest index in |raw_waitables| that was
+      // triggered.
+      size_t index = static_cast<size_t>(out_events[i].udata);
+      triggered = std::min(triggered, index);
+    }
+
+    if (raw_waitables[triggered]->policy_ == ResetPolicy::AUTOMATIC) {
+      // The message needs to be dequeued to reset the event.
+      PeekPort(raw_waitables[triggered]->receive_right_->Name(), true);
+    }
+
+    return triggered;
+  } else if (kPrimitive == DISPATCH) {
+    // Each item in |raw_waitables| will be watched using a dispatch souce
+    // scheduled on the serial |queue|. The first one to be invoked will
+    // signal the |semaphore| that this method will wait on.
+    ScopedDispatchObject<dispatch_queue_t> queue(dispatch_queue_create(
+        "org.chromium.base.WaitableEvent.WaitMany", DISPATCH_QUEUE_SERIAL));
+    ScopedDispatchObject<dispatch_semaphore_t> semaphore(
+        dispatch_semaphore_create(0));
+
+    // Block capture references. |signaled| will identify the index in
+    // |raw_waitables| whose source was invoked.
+    dispatch_semaphore_t semaphore_ref = semaphore.get();
+    const size_t kUnsignaled = -1;
+    __block size_t signaled = kUnsignaled;
+
+    // Create a MACH_RECV dispatch source for each event. These must be
+    // destroyed before the |queue| and |semaphore|.
+    std::vector<std::unique_ptr<DispatchSourceMach>> sources;
+    for (size_t i = 0; i < count; ++i) {
+      const bool auto_reset =
+          raw_waitables[i]->policy_ == WaitableEvent::ResetPolicy::AUTOMATIC;
+      // The block will copy a reference to |right|.
+      scoped_refptr<WaitableEvent::ReceiveRight> right =
+          raw_waitables[i]->receive_right_;
+      auto source =
+          std::make_unique<DispatchSourceMach>(queue, right->Name(), ^{
+            // After the semaphore is signaled, another event be signaled and
+            // the source may have its block put on the |queue|. WaitMany
+            // should only report (and auto-reset) one event, so the first
+            // event to signal is reported.
+            if (signaled == kUnsignaled) {
+              signaled = i;
+              if (auto_reset) {
+                PeekPort(right->Name(), true);
+              }
+              dispatch_semaphore_signal(semaphore_ref);
+            }
+          });
+      source->Resume();
+      sources.push_back(std::move(source));
+    }
+
+    dispatch_semaphore_wait(semaphore, DISPATCH_TIME_FOREVER);
+    DCHECK_NE(signaled, kUnsignaled);
+    return signaled;
+  } else {
+    DCHECK_EQ(kPrimitive, PORT_SET);
+
+    kern_return_t kr;
+
+    mac::ScopedMachPortSet port_set;
+    {
+      mach_port_t name;
+      kr =
+          mach_port_allocate(mach_task_self(), MACH_PORT_RIGHT_PORT_SET, &name);
+      MACH_CHECK(kr == KERN_SUCCESS, kr) << "mach_port_allocate";
+      port_set.reset(name);
+    }
+
+    for (size_t i = 0; i < count; ++i) {
+      kr = mach_port_insert_member(mach_task_self(),
+                                   raw_waitables[i]->receive_right_->Name(),
+                                   port_set.get());
+      MACH_CHECK(kr == KERN_SUCCESS, kr) << "index " << i;
+    }
+
+    mach_msg_empty_rcv_t msg{};
+    // Wait on the port set. Only specify space enough for the header, to
+    // identify which port in the set is signaled. Otherwise, receiving from the
+    // port set may dequeue a message for a manual-reset event object, which
+    // would cause it to be reset.
+    kr = mach_msg(&msg.header,
+                  MACH_RCV_MSG | MACH_RCV_LARGE | MACH_RCV_LARGE_IDENTITY, 0,
+                  sizeof(msg.header), port_set.get(), 0, MACH_PORT_NULL);
+    MACH_CHECK(kr == MACH_RCV_TOO_LARGE, kr) << "mach_msg";
+
+    for (size_t i = 0; i < count; ++i) {
+      WaitableEvent* event = raw_waitables[i];
+      if (msg.header.msgh_local_port == event->receive_right_->Name()) {
+        if (event->policy_ == ResetPolicy::AUTOMATIC) {
+          // The message needs to be dequeued to reset the event.
+          PeekPort(msg.header.msgh_local_port, true);
+        }
+        return i;
+      }
+    }
+
+    NOTREACHED();
+    return 0;
+  }
+}
+
+// static
+bool WaitableEvent::PeekPort(mach_port_t port, bool dequeue) {
+  if (dequeue) {
+    mach_msg_empty_rcv_t msg{};
+    msg.header.msgh_local_port = port;
+    kern_return_t kr = mach_msg(&msg.header, MACH_RCV_MSG | MACH_RCV_TIMEOUT, 0,
+                                sizeof(msg), port, 0, MACH_PORT_NULL);
+    if (kr == KERN_SUCCESS) {
+      return true;
+    } else {
+      MACH_CHECK(kr == MACH_RCV_TIMED_OUT, kr) << "mach_msg";
+      return false;
+    }
+  } else {
+    mach_port_seqno_t seqno = 0;
+    mach_msg_size_t size;
+    mach_msg_id_t id;
+    mach_msg_trailer_t trailer;
+    mach_msg_type_number_t trailer_size = sizeof(trailer);
+    kern_return_t kr = mach_port_peek(
+        mach_task_self(), port, MACH_RCV_TRAILER_TYPE(MACH_RCV_TRAILER_NULL),
+        &seqno, &size, &id, reinterpret_cast<mach_msg_trailer_info_t>(&trailer),
+        &trailer_size);
+    if (kr == KERN_SUCCESS) {
+      return true;
+    } else {
+      MACH_CHECK(kr == KERN_FAILURE, kr) << "mach_port_peek";
+      return false;
+    }
+  }
+}
+
+WaitableEvent::ReceiveRight::ReceiveRight(mach_port_t name,
+                                          bool create_slow_watch_list)
+    : right_(name),
+      slow_watch_list_(create_slow_watch_list ? new WatchList() : nullptr) {}
+
+WaitableEvent::ReceiveRight::~ReceiveRight() = default;
+
+WaitableEvent::ReceiveRight::WatchList::WatchList() = default;
+
+WaitableEvent::ReceiveRight::WatchList::~WatchList() = default;
+
+}  // namespace base
diff --git a/base/synchronization/waitable_event_perftest.cc b/base/synchronization/waitable_event_perftest.cc
new file mode 100644
index 0000000..1888077
--- /dev/null
+++ b/base/synchronization/waitable_event_perftest.cc
@@ -0,0 +1,178 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/synchronization/waitable_event.h"
+
+#include <numeric>
+
+#include "base/threading/simple_thread.h"
+#include "testing/gtest/include/gtest/gtest.h"
+#include "testing/perf/perf_test.h"
+
+namespace base {
+
+namespace {
+
+class TraceWaitableEvent {
+ public:
+  TraceWaitableEvent(size_t samples)
+      : event_(WaitableEvent::ResetPolicy::AUTOMATIC,
+               WaitableEvent::InitialState::NOT_SIGNALED),
+        samples_(samples) {
+    signal_times_.reserve(samples);
+    wait_times_.reserve(samples);
+  }
+
+  ~TraceWaitableEvent() = default;
+
+  void Signal() {
+    TimeTicks start = TimeTicks::Now();
+    event_.Signal();
+    signal_times_.push_back(TimeTicks::Now() - start);
+  }
+
+  void Wait() {
+    TimeTicks start = TimeTicks::Now();
+    event_.Wait();
+    wait_times_.push_back(TimeTicks::Now() - start);
+  }
+
+  bool TimedWaitUntil(const TimeTicks& end_time) {
+    TimeTicks start = TimeTicks::Now();
+    bool signaled = event_.TimedWaitUntil(end_time);
+    wait_times_.push_back(TimeTicks::Now() - start);
+    return signaled;
+  }
+
+  bool IsSignaled() { return event_.IsSignaled(); }
+
+  const std::vector<TimeDelta>& signal_times() const { return signal_times_; }
+  const std::vector<TimeDelta>& wait_times() const { return wait_times_; }
+  size_t samples() const { return samples_; }
+
+ private:
+  WaitableEvent event_;
+
+  std::vector<TimeDelta> signal_times_;
+  std::vector<TimeDelta> wait_times_;
+
+  const size_t samples_;
+
+  DISALLOW_COPY_AND_ASSIGN(TraceWaitableEvent);
+};
+
+class SignalerThread : public SimpleThread {
+ public:
+  SignalerThread(TraceWaitableEvent* waiter, TraceWaitableEvent* signaler)
+      : SimpleThread("WaitableEventPerfTest signaler"),
+        waiter_(waiter),
+        signaler_(signaler) {}
+
+  ~SignalerThread() override = default;
+
+  void Run() override {
+    while (!stop_event_.IsSignaled()) {
+      if (waiter_)
+        waiter_->Wait();
+      if (signaler_)
+        signaler_->Signal();
+    }
+  }
+
+  // Signals the thread to stop on the next iteration of its loop (which
+  // will happen immediately if no |waiter_| is present or is signaled.
+  void RequestStop() { stop_event_.Signal(); }
+
+ private:
+  WaitableEvent stop_event_{WaitableEvent::ResetPolicy::MANUAL,
+                            WaitableEvent::InitialState::NOT_SIGNALED};
+  TraceWaitableEvent* waiter_;
+  TraceWaitableEvent* signaler_;
+  DISALLOW_COPY_AND_ASSIGN(SignalerThread);
+};
+
+void PrintPerfWaitableEvent(const TraceWaitableEvent* event,
+                            const std::string& modifier,
+                            const std::string& trace) {
+  TimeDelta signal_time = std::accumulate(
+      event->signal_times().begin(), event->signal_times().end(), TimeDelta());
+  TimeDelta wait_time = std::accumulate(event->wait_times().begin(),
+                                        event->wait_times().end(), TimeDelta());
+  perf_test::PrintResult(
+      "signal_time", modifier, trace,
+      static_cast<size_t>(signal_time.InNanoseconds()) / event->samples(),
+      "ns/sample", true);
+  perf_test::PrintResult(
+      "wait_time", modifier, trace,
+      static_cast<size_t>(wait_time.InNanoseconds()) / event->samples(),
+      "ns/sample", true);
+}
+
+}  // namespace
+
+TEST(WaitableEventPerfTest, SingleThread) {
+  const size_t kSamples = 1000;
+
+  TraceWaitableEvent event(kSamples);
+
+  for (size_t i = 0; i < kSamples; ++i) {
+    event.Signal();
+    event.Wait();
+  }
+
+  PrintPerfWaitableEvent(&event, "", "singlethread-1000-samples");
+}
+
+TEST(WaitableEventPerfTest, MultipleThreads) {
+  const size_t kSamples = 1000;
+
+  TraceWaitableEvent waiter(kSamples);
+  TraceWaitableEvent signaler(kSamples);
+
+  // The other thread will wait and signal on the respective opposite events.
+  SignalerThread thread(&signaler, &waiter);
+  thread.Start();
+
+  for (size_t i = 0; i < kSamples; ++i) {
+    signaler.Signal();
+    waiter.Wait();
+  }
+
+  // Signal the stop event and then make sure the signaler event it is
+  // waiting on is also signaled.
+  thread.RequestStop();
+  signaler.Signal();
+
+  thread.Join();
+
+  PrintPerfWaitableEvent(&waiter, "_waiter", "multithread-1000-samples");
+  PrintPerfWaitableEvent(&signaler, "_signaler", "multithread-1000-samples");
+}
+
+TEST(WaitableEventPerfTest, Throughput) {
+  // Reserve a lot of sample space.
+  const size_t kCapacity = 500000;
+  TraceWaitableEvent event(kCapacity);
+
+  SignalerThread thread(nullptr, &event);
+  thread.Start();
+
+  TimeTicks end_time = TimeTicks::Now() + TimeDelta::FromSeconds(1);
+  size_t count = 0;
+  while (event.TimedWaitUntil(end_time)) {
+    ++count;
+  }
+
+  thread.RequestStop();
+  thread.Join();
+
+  perf_test::PrintResult("counts", "", "throughput", count, "signals", true);
+  PrintPerfWaitableEvent(&event, "", "throughput");
+
+  // Make sure that allocation didn't happen during the test.
+  EXPECT_LE(event.signal_times().capacity(), kCapacity);
+  EXPECT_LE(event.wait_times().capacity(), kCapacity);
+}
+
+}  // namespace base
diff --git a/base/synchronization/waitable_event_posix.cc b/base/synchronization/waitable_event_posix.cc
new file mode 100644
index 0000000..9799e7d
--- /dev/null
+++ b/base/synchronization/waitable_event_posix.cc
@@ -0,0 +1,436 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <stddef.h>
+
+#include <algorithm>
+#include <limits>
+#include <vector>
+
+#include "base/debug/activity_tracker.h"
+#include "base/logging.h"
+#include "base/synchronization/condition_variable.h"
+#include "base/synchronization/lock.h"
+#include "base/synchronization/waitable_event.h"
+#include "base/threading/scoped_blocking_call.h"
+#include "base/threading/thread_restrictions.h"
+
+// -----------------------------------------------------------------------------
+// A WaitableEvent on POSIX is implemented as a wait-list. Currently we don't
+// support cross-process events (where one process can signal an event which
+// others are waiting on). Because of this, we can avoid having one thread per
+// listener in several cases.
+//
+// The WaitableEvent maintains a list of waiters, protected by a lock. Each
+// waiter is either an async wait, in which case we have a Task and the
+// MessageLoop to run it on, or a blocking wait, in which case we have the
+// condition variable to signal.
+//
+// Waiting involves grabbing the lock and adding oneself to the wait list. Async
+// waits can be canceled, which means grabbing the lock and removing oneself
+// from the list.
+//
+// Waiting on multiple events is handled by adding a single, synchronous wait to
+// the wait-list of many events. An event passes a pointer to itself when
+// firing a waiter and so we can store that pointer to find out which event
+// triggered.
+// -----------------------------------------------------------------------------
+
+namespace base {
+
+// -----------------------------------------------------------------------------
+// This is just an abstract base class for waking the two types of waiters
+// -----------------------------------------------------------------------------
+WaitableEvent::WaitableEvent(ResetPolicy reset_policy,
+                             InitialState initial_state)
+    : kernel_(new WaitableEventKernel(reset_policy, initial_state)) {}
+
+WaitableEvent::~WaitableEvent() = default;
+
+void WaitableEvent::Reset() {
+  base::AutoLock locked(kernel_->lock_);
+  kernel_->signaled_ = false;
+}
+
+void WaitableEvent::Signal() {
+  base::AutoLock locked(kernel_->lock_);
+
+  if (kernel_->signaled_)
+    return;
+
+  if (kernel_->manual_reset_) {
+    SignalAll();
+    kernel_->signaled_ = true;
+  } else {
+    // In the case of auto reset, if no waiters were woken, we remain
+    // signaled.
+    if (!SignalOne())
+      kernel_->signaled_ = true;
+  }
+}
+
+bool WaitableEvent::IsSignaled() {
+  base::AutoLock locked(kernel_->lock_);
+
+  const bool result = kernel_->signaled_;
+  if (result && !kernel_->manual_reset_)
+    kernel_->signaled_ = false;
+  return result;
+}
+
+// -----------------------------------------------------------------------------
+// Synchronous waits
+
+// -----------------------------------------------------------------------------
+// This is a synchronous waiter. The thread is waiting on the given condition
+// variable and the fired flag in this object.
+// -----------------------------------------------------------------------------
+class SyncWaiter : public WaitableEvent::Waiter {
+ public:
+  SyncWaiter()
+      : fired_(false), signaling_event_(nullptr), lock_(), cv_(&lock_) {}
+
+  bool Fire(WaitableEvent* signaling_event) override {
+    base::AutoLock locked(lock_);
+
+    if (fired_)
+      return false;
+
+    fired_ = true;
+    signaling_event_ = signaling_event;
+
+    cv_.Broadcast();
+
+    // Unlike AsyncWaiter objects, SyncWaiter objects are stack-allocated on
+    // the blocking thread's stack.  There is no |delete this;| in Fire.  The
+    // SyncWaiter object is destroyed when it goes out of scope.
+
+    return true;
+  }
+
+  WaitableEvent* signaling_event() const {
+    return signaling_event_;
+  }
+
+  // ---------------------------------------------------------------------------
+  // These waiters are always stack allocated and don't delete themselves. Thus
+  // there's no problem and the ABA tag is the same as the object pointer.
+  // ---------------------------------------------------------------------------
+  bool Compare(void* tag) override { return this == tag; }
+
+  // ---------------------------------------------------------------------------
+  // Called with lock held.
+  // ---------------------------------------------------------------------------
+  bool fired() const {
+    return fired_;
+  }
+
+  // ---------------------------------------------------------------------------
+  // During a TimedWait, we need a way to make sure that an auto-reset
+  // WaitableEvent doesn't think that this event has been signaled between
+  // unlocking it and removing it from the wait-list. Called with lock held.
+  // ---------------------------------------------------------------------------
+  void Disable() {
+    fired_ = true;
+  }
+
+  base::Lock* lock() {
+    return &lock_;
+  }
+
+  base::ConditionVariable* cv() {
+    return &cv_;
+  }
+
+ private:
+  bool fired_;
+  WaitableEvent* signaling_event_;  // The WaitableEvent which woke us
+  base::Lock lock_;
+  base::ConditionVariable cv_;
+};
+
+void WaitableEvent::Wait() {
+  bool result = TimedWaitUntil(TimeTicks::Max());
+  DCHECK(result) << "TimedWait() should never fail with infinite timeout";
+}
+
+bool WaitableEvent::TimedWait(const TimeDelta& wait_delta) {
+  // TimeTicks takes care of overflow including the cases when wait_delta
+  // is a maximum value.
+  return TimedWaitUntil(TimeTicks::Now() + wait_delta);
+}
+
+bool WaitableEvent::TimedWaitUntil(const TimeTicks& end_time) {
+  internal::AssertBaseSyncPrimitivesAllowed();
+  ScopedBlockingCall scoped_blocking_call(BlockingType::MAY_BLOCK);
+  // Record the event that this thread is blocking upon (for hang diagnosis).
+  base::debug::ScopedEventWaitActivity event_activity(this);
+
+  const bool finite_time = !end_time.is_max();
+
+  kernel_->lock_.Acquire();
+  if (kernel_->signaled_) {
+    if (!kernel_->manual_reset_) {
+      // In this case we were signaled when we had no waiters. Now that
+      // someone has waited upon us, we can automatically reset.
+      kernel_->signaled_ = false;
+    }
+
+    kernel_->lock_.Release();
+    return true;
+  }
+
+  SyncWaiter sw;
+  sw.lock()->Acquire();
+
+  Enqueue(&sw);
+  kernel_->lock_.Release();
+  // We are violating locking order here by holding the SyncWaiter lock but not
+  // the WaitableEvent lock. However, this is safe because we don't lock @lock_
+  // again before unlocking it.
+
+  for (;;) {
+    const TimeTicks current_time(TimeTicks::Now());
+
+    if (sw.fired() || (finite_time && current_time >= end_time)) {
+      const bool return_value = sw.fired();
+
+      // We can't acquire @lock_ before releasing the SyncWaiter lock (because
+      // of locking order), however, in between the two a signal could be fired
+      // and @sw would accept it, however we will still return false, so the
+      // signal would be lost on an auto-reset WaitableEvent. Thus we call
+      // Disable which makes sw::Fire return false.
+      sw.Disable();
+      sw.lock()->Release();
+
+      // This is a bug that has been enshrined in the interface of
+      // WaitableEvent now: |Dequeue| is called even when |sw.fired()| is true,
+      // even though it'll always return false in that case. However, taking
+      // the lock ensures that |Signal| has completed before we return and
+      // means that a WaitableEvent can synchronise its own destruction.
+      kernel_->lock_.Acquire();
+      kernel_->Dequeue(&sw, &sw);
+      kernel_->lock_.Release();
+
+      return return_value;
+    }
+
+    if (finite_time) {
+      const TimeDelta max_wait(end_time - current_time);
+      sw.cv()->TimedWait(max_wait);
+    } else {
+      sw.cv()->Wait();
+    }
+  }
+}
+
+// -----------------------------------------------------------------------------
+// Synchronous waiting on multiple objects.
+
+static bool  // StrictWeakOrdering
+cmp_fst_addr(const std::pair<WaitableEvent*, unsigned> &a,
+             const std::pair<WaitableEvent*, unsigned> &b) {
+  return a.first < b.first;
+}
+
+// static
+size_t WaitableEvent::WaitMany(WaitableEvent** raw_waitables,
+                               size_t count) {
+  internal::AssertBaseSyncPrimitivesAllowed();
+  DCHECK(count) << "Cannot wait on no events";
+  ScopedBlockingCall scoped_blocking_call(BlockingType::MAY_BLOCK);
+  // Record an event (the first) that this thread is blocking upon.
+  base::debug::ScopedEventWaitActivity event_activity(raw_waitables[0]);
+
+  // We need to acquire the locks in a globally consistent order. Thus we sort
+  // the array of waitables by address. We actually sort a pairs so that we can
+  // map back to the original index values later.
+  std::vector<std::pair<WaitableEvent*, size_t> > waitables;
+  waitables.reserve(count);
+  for (size_t i = 0; i < count; ++i)
+    waitables.push_back(std::make_pair(raw_waitables[i], i));
+
+  DCHECK_EQ(count, waitables.size());
+
+  sort(waitables.begin(), waitables.end(), cmp_fst_addr);
+
+  // The set of waitables must be distinct. Since we have just sorted by
+  // address, we can check this cheaply by comparing pairs of consecutive
+  // elements.
+  for (size_t i = 0; i < waitables.size() - 1; ++i) {
+    DCHECK(waitables[i].first != waitables[i+1].first);
+  }
+
+  SyncWaiter sw;
+
+  const size_t r = EnqueueMany(&waitables[0], count, &sw);
+  if (r < count) {
+    // One of the events is already signaled. The SyncWaiter has not been
+    // enqueued anywhere.
+    return waitables[r].second;
+  }
+
+  // At this point, we hold the locks on all the WaitableEvents and we have
+  // enqueued our waiter in them all.
+  sw.lock()->Acquire();
+    // Release the WaitableEvent locks in the reverse order
+    for (size_t i = 0; i < count; ++i) {
+      waitables[count - (1 + i)].first->kernel_->lock_.Release();
+    }
+
+    for (;;) {
+      if (sw.fired())
+        break;
+
+      sw.cv()->Wait();
+    }
+  sw.lock()->Release();
+
+  // The address of the WaitableEvent which fired is stored in the SyncWaiter.
+  WaitableEvent *const signaled_event = sw.signaling_event();
+  // This will store the index of the raw_waitables which fired.
+  size_t signaled_index = 0;
+
+  // Take the locks of each WaitableEvent in turn (except the signaled one) and
+  // remove our SyncWaiter from the wait-list
+  for (size_t i = 0; i < count; ++i) {
+    if (raw_waitables[i] != signaled_event) {
+      raw_waitables[i]->kernel_->lock_.Acquire();
+        // There's no possible ABA issue with the address of the SyncWaiter here
+        // because it lives on the stack. Thus the tag value is just the pointer
+        // value again.
+        raw_waitables[i]->kernel_->Dequeue(&sw, &sw);
+      raw_waitables[i]->kernel_->lock_.Release();
+    } else {
+      // By taking this lock here we ensure that |Signal| has completed by the
+      // time we return, because |Signal| holds this lock. This matches the
+      // behaviour of |Wait| and |TimedWait|.
+      raw_waitables[i]->kernel_->lock_.Acquire();
+      raw_waitables[i]->kernel_->lock_.Release();
+      signaled_index = i;
+    }
+  }
+
+  return signaled_index;
+}
+
+// -----------------------------------------------------------------------------
+// If return value == count:
+//   The locks of the WaitableEvents have been taken in order and the Waiter has
+//   been enqueued in the wait-list of each. None of the WaitableEvents are
+//   currently signaled
+// else:
+//   None of the WaitableEvent locks are held. The Waiter has not been enqueued
+//   in any of them and the return value is the index of the WaitableEvent which
+//   was signaled with the lowest input index from the original WaitMany call.
+// -----------------------------------------------------------------------------
+// static
+size_t WaitableEvent::EnqueueMany(std::pair<WaitableEvent*, size_t>* waitables,
+                                  size_t count,
+                                  Waiter* waiter) {
+  size_t winner = count;
+  size_t winner_index = count;
+  for (size_t i = 0; i < count; ++i) {
+    auto& kernel = waitables[i].first->kernel_;
+    kernel->lock_.Acquire();
+    if (kernel->signaled_ && waitables[i].second < winner) {
+      winner = waitables[i].second;
+      winner_index = i;
+    }
+  }
+
+  // No events signaled. All locks acquired. Enqueue the Waiter on all of them
+  // and return.
+  if (winner == count) {
+    for (size_t i = 0; i < count; ++i)
+      waitables[i].first->Enqueue(waiter);
+    return count;
+  }
+
+  // Unlock in reverse order and possibly clear the chosen winner's signal
+  // before returning its index.
+  for (auto* w = waitables + count - 1; w >= waitables; --w) {
+    auto& kernel = w->first->kernel_;
+    if (w->second == winner) {
+      if (!kernel->manual_reset_)
+        kernel->signaled_ = false;
+    }
+    kernel->lock_.Release();
+  }
+
+  return winner_index;
+}
+
+// -----------------------------------------------------------------------------
+
+
+// -----------------------------------------------------------------------------
+// Private functions...
+
+WaitableEvent::WaitableEventKernel::WaitableEventKernel(
+    ResetPolicy reset_policy,
+    InitialState initial_state)
+    : manual_reset_(reset_policy == ResetPolicy::MANUAL),
+      signaled_(initial_state == InitialState::SIGNALED) {}
+
+WaitableEvent::WaitableEventKernel::~WaitableEventKernel() = default;
+
+// -----------------------------------------------------------------------------
+// Wake all waiting waiters. Called with lock held.
+// -----------------------------------------------------------------------------
+bool WaitableEvent::SignalAll() {
+  bool signaled_at_least_one = false;
+
+  for (std::list<Waiter*>::iterator
+       i = kernel_->waiters_.begin(); i != kernel_->waiters_.end(); ++i) {
+    if ((*i)->Fire(this))
+      signaled_at_least_one = true;
+  }
+
+  kernel_->waiters_.clear();
+  return signaled_at_least_one;
+}
+
+// ---------------------------------------------------------------------------
+// Try to wake a single waiter. Return true if one was woken. Called with lock
+// held.
+// ---------------------------------------------------------------------------
+bool WaitableEvent::SignalOne() {
+  for (;;) {
+    if (kernel_->waiters_.empty())
+      return false;
+
+    const bool r = (*kernel_->waiters_.begin())->Fire(this);
+    kernel_->waiters_.pop_front();
+    if (r)
+      return true;
+  }
+}
+
+// -----------------------------------------------------------------------------
+// Add a waiter to the list of those waiting. Called with lock held.
+// -----------------------------------------------------------------------------
+void WaitableEvent::Enqueue(Waiter* waiter) {
+  kernel_->waiters_.push_back(waiter);
+}
+
+// -----------------------------------------------------------------------------
+// Remove a waiter from the list of those waiting. Return true if the waiter was
+// actually removed. Called with lock held.
+// -----------------------------------------------------------------------------
+bool WaitableEvent::WaitableEventKernel::Dequeue(Waiter* waiter, void* tag) {
+  for (std::list<Waiter*>::iterator
+       i = waiters_.begin(); i != waiters_.end(); ++i) {
+    if (*i == waiter && (*i)->Compare(tag)) {
+      waiters_.erase(i);
+      return true;
+    }
+  }
+
+  return false;
+}
+
+// -----------------------------------------------------------------------------
+
+}  // namespace base
diff --git a/base/synchronization/waitable_event_unittest.cc b/base/synchronization/waitable_event_unittest.cc
new file mode 100644
index 0000000..e1d2683
--- /dev/null
+++ b/base/synchronization/waitable_event_unittest.cc
@@ -0,0 +1,274 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/synchronization/waitable_event.h"
+
+#include <stddef.h>
+
+#include <algorithm>
+
+#include "base/compiler_specific.h"
+#include "base/threading/platform_thread.h"
+#include "base/time/time.h"
+#include "build/build_config.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+
+TEST(WaitableEventTest, ManualBasics) {
+  WaitableEvent event(WaitableEvent::ResetPolicy::MANUAL,
+                      WaitableEvent::InitialState::NOT_SIGNALED);
+
+  EXPECT_FALSE(event.IsSignaled());
+
+  event.Signal();
+  EXPECT_TRUE(event.IsSignaled());
+  EXPECT_TRUE(event.IsSignaled());
+
+  event.Reset();
+  EXPECT_FALSE(event.IsSignaled());
+  EXPECT_FALSE(event.TimedWait(TimeDelta::FromMilliseconds(10)));
+
+  event.Signal();
+  event.Wait();
+  EXPECT_TRUE(event.TimedWait(TimeDelta::FromMilliseconds(10)));
+}
+
+TEST(WaitableEventTest, ManualInitiallySignaled) {
+  WaitableEvent event(WaitableEvent::ResetPolicy::MANUAL,
+                      WaitableEvent::InitialState::SIGNALED);
+
+  EXPECT_TRUE(event.IsSignaled());
+  EXPECT_TRUE(event.IsSignaled());
+
+  event.Reset();
+
+  EXPECT_FALSE(event.IsSignaled());
+  EXPECT_FALSE(event.IsSignaled());
+
+  event.Signal();
+
+  event.Wait();
+  EXPECT_TRUE(event.IsSignaled());
+  EXPECT_TRUE(event.IsSignaled());
+}
+
+TEST(WaitableEventTest, AutoBasics) {
+  WaitableEvent event(WaitableEvent::ResetPolicy::AUTOMATIC,
+                      WaitableEvent::InitialState::NOT_SIGNALED);
+
+  EXPECT_FALSE(event.IsSignaled());
+
+  event.Signal();
+  EXPECT_TRUE(event.IsSignaled());
+  EXPECT_FALSE(event.IsSignaled());
+
+  event.Reset();
+  EXPECT_FALSE(event.IsSignaled());
+  EXPECT_FALSE(event.TimedWait(TimeDelta::FromMilliseconds(10)));
+
+  event.Signal();
+  event.Wait();
+  EXPECT_FALSE(event.TimedWait(TimeDelta::FromMilliseconds(10)));
+
+  event.Signal();
+  EXPECT_TRUE(event.TimedWait(TimeDelta::FromMilliseconds(10)));
+}
+
+TEST(WaitableEventTest, AutoInitiallySignaled) {
+  WaitableEvent event(WaitableEvent::ResetPolicy::AUTOMATIC,
+                      WaitableEvent::InitialState::SIGNALED);
+
+  EXPECT_TRUE(event.IsSignaled());
+  EXPECT_FALSE(event.IsSignaled());
+
+  event.Signal();
+
+  EXPECT_TRUE(event.IsSignaled());
+  EXPECT_FALSE(event.IsSignaled());
+}
+
+TEST(WaitableEventTest, WaitManyShortcut) {
+  WaitableEvent* ev[5];
+  for (unsigned i = 0; i < 5; ++i) {
+    ev[i] = new WaitableEvent(WaitableEvent::ResetPolicy::AUTOMATIC,
+                              WaitableEvent::InitialState::NOT_SIGNALED);
+  }
+
+  ev[3]->Signal();
+  EXPECT_EQ(WaitableEvent::WaitMany(ev, 5), 3u);
+
+  ev[3]->Signal();
+  EXPECT_EQ(WaitableEvent::WaitMany(ev, 5), 3u);
+
+  ev[4]->Signal();
+  EXPECT_EQ(WaitableEvent::WaitMany(ev, 5), 4u);
+
+  ev[0]->Signal();
+  EXPECT_EQ(WaitableEvent::WaitMany(ev, 5), 0u);
+
+  for (unsigned i = 0; i < 5; ++i)
+    delete ev[i];
+}
+
+TEST(WaitableEventTest, WaitManyLeftToRight) {
+  WaitableEvent* ev[5];
+  for (size_t i = 0; i < 5; ++i) {
+    ev[i] = new WaitableEvent(WaitableEvent::ResetPolicy::AUTOMATIC,
+                              WaitableEvent::InitialState::NOT_SIGNALED);
+  }
+
+  // Test for consistent left-to-right return behavior across all permutations
+  // of the input array. This is to verify that only the indices -- and not
+  // the WaitableEvents' addresses -- are relevant in determining who wins when
+  // multiple events are signaled.
+
+  std::sort(ev, ev + 5);
+  do {
+    ev[0]->Signal();
+    ev[1]->Signal();
+    EXPECT_EQ(0u, WaitableEvent::WaitMany(ev, 5));
+
+    ev[2]->Signal();
+    EXPECT_EQ(1u, WaitableEvent::WaitMany(ev, 5));
+    EXPECT_EQ(2u, WaitableEvent::WaitMany(ev, 5));
+
+    ev[3]->Signal();
+    ev[4]->Signal();
+    ev[0]->Signal();
+    EXPECT_EQ(0u, WaitableEvent::WaitMany(ev, 5));
+    EXPECT_EQ(3u, WaitableEvent::WaitMany(ev, 5));
+    ev[2]->Signal();
+    EXPECT_EQ(2u, WaitableEvent::WaitMany(ev, 5));
+    EXPECT_EQ(4u, WaitableEvent::WaitMany(ev, 5));
+  } while (std::next_permutation(ev, ev + 5));
+
+  for (size_t i = 0; i < 5; ++i)
+    delete ev[i];
+}
+
+class WaitableEventSignaler : public PlatformThread::Delegate {
+ public:
+  WaitableEventSignaler(TimeDelta delay, WaitableEvent* event)
+      : delay_(delay),
+        event_(event) {
+  }
+
+  void ThreadMain() override {
+    PlatformThread::Sleep(delay_);
+    event_->Signal();
+  }
+
+ private:
+  const TimeDelta delay_;
+  WaitableEvent* event_;
+};
+
+// Tests that a WaitableEvent can be safely deleted when |Wait| is done without
+// additional synchronization.
+TEST(WaitableEventTest, WaitAndDelete) {
+  WaitableEvent* ev =
+      new WaitableEvent(WaitableEvent::ResetPolicy::AUTOMATIC,
+                        WaitableEvent::InitialState::NOT_SIGNALED);
+
+  WaitableEventSignaler signaler(TimeDelta::FromMilliseconds(10), ev);
+  PlatformThreadHandle thread;
+  PlatformThread::Create(0, &signaler, &thread);
+
+  ev->Wait();
+  delete ev;
+
+  PlatformThread::Join(thread);
+}
+
+// Tests that a WaitableEvent can be safely deleted when |WaitMany| is done
+// without additional synchronization.
+TEST(WaitableEventTest, WaitMany) {
+  WaitableEvent* ev[5];
+  for (unsigned i = 0; i < 5; ++i) {
+    ev[i] = new WaitableEvent(WaitableEvent::ResetPolicy::AUTOMATIC,
+                              WaitableEvent::InitialState::NOT_SIGNALED);
+  }
+
+  WaitableEventSignaler signaler(TimeDelta::FromMilliseconds(10), ev[2]);
+  PlatformThreadHandle thread;
+  PlatformThread::Create(0, &signaler, &thread);
+
+  size_t index = WaitableEvent::WaitMany(ev, 5);
+
+  for (unsigned i = 0; i < 5; ++i)
+    delete ev[i];
+
+  PlatformThread::Join(thread);
+  EXPECT_EQ(2u, index);
+}
+
+// Tests that using TimeDelta::Max() on TimedWait() is not the same as passing
+// a timeout of 0. (crbug.com/465948)
+TEST(WaitableEventTest, TimedWait) {
+  WaitableEvent* ev =
+      new WaitableEvent(WaitableEvent::ResetPolicy::AUTOMATIC,
+                        WaitableEvent::InitialState::NOT_SIGNALED);
+
+  TimeDelta thread_delay = TimeDelta::FromMilliseconds(10);
+  WaitableEventSignaler signaler(thread_delay, ev);
+  PlatformThreadHandle thread;
+  TimeTicks start = TimeTicks::Now();
+  PlatformThread::Create(0, &signaler, &thread);
+
+  EXPECT_TRUE(ev->TimedWait(TimeDelta::Max()));
+  EXPECT_GE(TimeTicks::Now() - start, thread_delay);
+  delete ev;
+
+  PlatformThread::Join(thread);
+}
+
+// Tests that a sub-ms TimedWait doesn't time out promptly.
+TEST(WaitableEventTest, SubMsTimedWait) {
+  WaitableEvent ev(WaitableEvent::ResetPolicy::AUTOMATIC,
+                   WaitableEvent::InitialState::NOT_SIGNALED);
+
+  TimeDelta delay = TimeDelta::FromMicroseconds(900);
+  TimeTicks start_time = TimeTicks::Now();
+  ev.TimedWait(delay);
+  EXPECT_GE(TimeTicks::Now() - start_time, delay);
+}
+
+// Tests that TimedWaitUntil can be safely used with various end_time deadline
+// values.
+TEST(WaitableEventTest, TimedWaitUntil) {
+  WaitableEvent ev(WaitableEvent::ResetPolicy::AUTOMATIC,
+                   WaitableEvent::InitialState::NOT_SIGNALED);
+
+  TimeTicks start_time(TimeTicks::Now());
+  TimeDelta delay = TimeDelta::FromMilliseconds(10);
+
+  // Should be OK to wait for the current time or time in the past.
+  // That should end promptly and be equivalent to IsSignalled.
+  EXPECT_FALSE(ev.TimedWaitUntil(start_time));
+  EXPECT_FALSE(ev.TimedWaitUntil(start_time - delay));
+
+  // Should be OK to wait for zero TimeTicks().
+  EXPECT_FALSE(ev.TimedWaitUntil(TimeTicks()));
+
+  // Waiting for a time in the future shouldn't end before the deadline
+  // if the event isn't signalled.
+  EXPECT_FALSE(ev.TimedWaitUntil(start_time + delay));
+  EXPECT_GE(TimeTicks::Now() - start_time, delay);
+
+  // Test that passing TimeTicks::Max to TimedWaitUntil is valid and isn't
+  // the same as passing TimeTicks(). Also verifies that signaling event
+  // ends the wait promptly.
+  WaitableEventSignaler signaler(delay, &ev);
+  PlatformThreadHandle thread;
+  start_time = TimeTicks::Now();
+  PlatformThread::Create(0, &signaler, &thread);
+
+  EXPECT_TRUE(ev.TimedWaitUntil(TimeTicks::Max()));
+  EXPECT_GE(TimeTicks::Now() - start_time, delay);
+
+  PlatformThread::Join(thread);
+}
+
+}  // namespace base
diff --git a/base/synchronization/waitable_event_watcher.h b/base/synchronization/waitable_event_watcher.h
new file mode 100644
index 0000000..51728e3
--- /dev/null
+++ b/base/synchronization/waitable_event_watcher.h
@@ -0,0 +1,160 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_SYNCHRONIZATION_WAITABLE_EVENT_WATCHER_H_
+#define BASE_SYNCHRONIZATION_WAITABLE_EVENT_WATCHER_H_
+
+#include "base/base_export.h"
+#include "base/macros.h"
+#include "base/sequenced_task_runner.h"
+#include "build/build_config.h"
+
+#if defined(OS_WIN)
+#include "base/win/object_watcher.h"
+#include "base/win/scoped_handle.h"
+#elif defined(OS_MACOSX)
+#include <dispatch/dispatch.h>
+
+#include "base/mac/scoped_dispatch_object.h"
+#include "base/memory/weak_ptr.h"
+#include "base/synchronization/waitable_event.h"
+#else
+#include "base/sequence_checker.h"
+#include "base/synchronization/waitable_event.h"
+#endif
+
+#if !defined(OS_WIN)
+#include "base/callback.h"
+#endif
+
+namespace base {
+
+class Flag;
+class AsyncWaiter;
+class WaitableEvent;
+
+// This class provides a way to wait on a WaitableEvent asynchronously.
+//
+// Each instance of this object can be waiting on a single WaitableEvent. When
+// the waitable event is signaled, a callback is invoked on the sequence that
+// called StartWatching(). This callback can be deleted by deleting the waiter.
+//
+// Typical usage:
+//
+//   class MyClass {
+//    public:
+//     void DoStuffWhenSignaled(WaitableEvent *waitable_event) {
+//       watcher_.StartWatching(waitable_event,
+//           base::BindOnce(&MyClass::OnWaitableEventSignaled, this);
+//     }
+//    private:
+//     void OnWaitableEventSignaled(WaitableEvent* waitable_event) {
+//       // OK, time to do stuff!
+//     }
+//     base::WaitableEventWatcher watcher_;
+//   };
+//
+// In the above example, MyClass wants to "do stuff" when waitable_event
+// becomes signaled. WaitableEventWatcher makes this task easy. When MyClass
+// goes out of scope, the watcher_ will be destroyed, and there is no need to
+// worry about OnWaitableEventSignaled being called on a deleted MyClass
+// pointer.
+//
+// BEWARE: With automatically reset WaitableEvents, a signal may be lost if it
+// occurs just before a WaitableEventWatcher is deleted. There is currently no
+// safe way to stop watching an automatic reset WaitableEvent without possibly
+// missing a signal.
+//
+// NOTE: you /are/ allowed to delete the WaitableEvent while still waiting on
+// it with a Watcher. But pay attention: if the event was signaled and deleted
+// right after, the callback may be called with deleted WaitableEvent pointer.
+
+class BASE_EXPORT WaitableEventWatcher
+#if defined(OS_WIN)
+    : public win::ObjectWatcher::Delegate
+#endif
+{
+ public:
+  using EventCallback = OnceCallback<void(WaitableEvent*)>;
+
+  WaitableEventWatcher();
+
+#if defined(OS_WIN)
+  ~WaitableEventWatcher() override;
+#else
+  ~WaitableEventWatcher();
+#endif
+
+  // When |event| is signaled, |callback| is called on the sequence that called
+  // StartWatching().
+  // |task_runner| is used for asynchronous executions of calling |callback|.
+  bool StartWatching(WaitableEvent* event,
+                     EventCallback callback,
+                     scoped_refptr<SequencedTaskRunner> task_runner);
+
+  // Cancel the current watch. Must be called from the same sequence which
+  // started the watch.
+  //
+  // Does nothing if no event is being watched, nor if the watch has completed.
+  // The callback will *not* be called for the current watch after this
+  // function returns. Since the callback runs on the same sequence as this
+  // function, it cannot be called during this function either.
+  void StopWatching();
+
+ private:
+#if defined(OS_WIN)
+  void OnObjectSignaled(HANDLE h) override;
+
+  // Duplicated handle of the event passed to StartWatching().
+  win::ScopedHandle duplicated_event_handle_;
+
+  // A watcher for |duplicated_event_handle_|. The handle MUST outlive
+  // |watcher_|.
+  win::ObjectWatcher watcher_;
+
+  EventCallback callback_;
+  WaitableEvent* event_ = nullptr;
+#elif defined(OS_MACOSX)
+  // Invokes the callback and resets the source. Must be called on the task
+  // runner on which StartWatching() was called.
+  void InvokeCallback();
+
+  // Closure bound to the event being watched. This will be is_null() if
+  // nothing is being watched.
+  OnceClosure callback_;
+
+  // A reference to the receive right that is kept alive while a watcher
+  // is waiting. Null if no event is being watched.
+  scoped_refptr<WaitableEvent::ReceiveRight> receive_right_;
+
+  // A TYPE_MACH_RECV dispatch source on |receive_right_|. When a receive event
+  // is delivered, the message queue will be peeked and the bound |callback_|
+  // may be run. This will be null if nothing is currently being watched.
+  ScopedDispatchObject<dispatch_source_t> source_;
+
+  // Used to vend a weak pointer for calling InvokeCallback() from the
+  // |source_| event handler.
+  WeakPtrFactory<WaitableEventWatcher> weak_ptr_factory_;
+#else
+  // Instantiated in StartWatching(). Set before the callback runs. Reset in
+  // StopWatching() or StartWatching().
+  scoped_refptr<Flag> cancel_flag_;
+
+  // Enqueued in the wait list of the watched WaitableEvent.
+  AsyncWaiter* waiter_ = nullptr;
+
+  // Kernel of the watched WaitableEvent.
+  scoped_refptr<WaitableEvent::WaitableEventKernel> kernel_;
+
+  // Ensures that StartWatching() and StopWatching() are called on the same
+  // sequence.
+  SequenceChecker sequence_checker_;
+#endif
+
+  DISALLOW_COPY_AND_ASSIGN(WaitableEventWatcher);
+};
+
+}  // namespace base
+
+#endif  // BASE_SYNCHRONIZATION_WAITABLE_EVENT_WATCHER_H_
diff --git a/base/synchronization/waitable_event_watcher_mac.cc b/base/synchronization/waitable_event_watcher_mac.cc
new file mode 100644
index 0000000..772fd10
--- /dev/null
+++ b/base/synchronization/waitable_event_watcher_mac.cc
@@ -0,0 +1,113 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/synchronization/waitable_event_watcher.h"
+
+#include "base/bind.h"
+#include "base/callback.h"
+#include "base/threading/sequenced_task_runner_handle.h"
+
+namespace base {
+
+WaitableEventWatcher::WaitableEventWatcher() : weak_ptr_factory_(this) {}
+
+WaitableEventWatcher::~WaitableEventWatcher() {
+  StopWatching();
+}
+
+bool WaitableEventWatcher::StartWatching(
+    WaitableEvent* event,
+    EventCallback callback,
+    scoped_refptr<SequencedTaskRunner> task_runner) {
+  DCHECK(task_runner->RunsTasksInCurrentSequence());
+  DCHECK(!source_ || dispatch_source_testcancel(source_));
+
+  // Keep a reference to the receive right, so that if the event is deleted
+  // out from under the watcher, a signal can still be observed.
+  receive_right_ = event->receive_right_;
+
+  callback_ = BindOnce(std::move(callback), event);
+
+  // Locals for capture by the block. Accessing anything through the |this| or
+  // |event| pointers is not safe, since either may have been deleted by the
+  // time the handler block is invoked.
+  WeakPtr<WaitableEventWatcher> weak_this = weak_ptr_factory_.GetWeakPtr();
+  const bool auto_reset =
+      event->policy_ == WaitableEvent::ResetPolicy::AUTOMATIC;
+
+  // Auto-reset events always use a dispatch source. Manual-reset events
+  // only do so if dispatch provides reliable delivery, otherwise a manual
+  // watcher list is used.
+  if (!WaitableEvent::UseSlowWatchList(event->policy_)) {
+    // Use the global concurrent queue here, since it is only used to thunk
+    // to the real callback on the target task runner.
+    source_.reset(dispatch_source_create(
+        DISPATCH_SOURCE_TYPE_MACH_RECV, receive_right_->Name(), 0,
+        dispatch_get_global_queue(DISPATCH_QUEUE_PRIORITY_DEFAULT, 0)));
+
+    // Additional locals for block capture.
+    dispatch_source_t source = source_.get();
+    mach_port_t name = receive_right_->Name();
+
+    dispatch_source_set_event_handler(source_, ^{
+      // For automatic-reset events, only fire the callback if this watcher
+      // can claim/dequeue the event. For manual-reset events, all watchers can
+      // be called back.
+      if (auto_reset && !WaitableEvent::PeekPort(name, true)) {
+        return;
+      }
+
+      // The event has been consumed. A watcher is one-shot, so cancel the
+      // source to prevent receiving future event signals.
+      dispatch_source_cancel(source);
+
+      task_runner->PostTask(
+          FROM_HERE,
+          BindOnce(&WaitableEventWatcher::InvokeCallback, weak_this));
+    });
+    dispatch_resume(source_);
+  } else {
+    // The |event->watch_list_| closures can be run from any thread, so bind
+    // the callback as an invocation of PostTask.
+    OnceClosure watcher =
+        BindOnce(IgnoreResult(&TaskRunner::PostTask), task_runner, FROM_HERE,
+                 BindOnce(&WaitableEventWatcher::InvokeCallback, weak_this));
+
+    // Hold an additional reference to the ReceiveRight, in case |watcher|
+    // runs and deletes the event while the lock is held.
+    // Hold the lock for the duration of IsSignaled() so that if Signal()
+    // is called by another thread, it waits for this to be added to the
+    // watch list.
+    scoped_refptr<WaitableEvent::ReceiveRight> receive_right(receive_right_);
+    AutoLock lock(receive_right->SlowWatchList()->lock);
+    if (event->IsSignaled()) {
+      std::move(watcher).Run();
+      return true;
+    }
+    receive_right_->SlowWatchList()->list.push_back(std::move(watcher));
+  }
+
+  return true;
+}
+
+void WaitableEventWatcher::StopWatching() {
+  callback_.Reset();
+  receive_right_ = nullptr;
+  if (source_) {
+    dispatch_source_cancel(source_);
+    source_.reset();
+  }
+}
+
+void WaitableEventWatcher::InvokeCallback() {
+  // The callback can be null if StopWatching() is called between signaling
+  // and the |callback_| getting run on the target task runner.
+  if (callback_.is_null())
+    return;
+  source_.reset();
+  receive_right_ = nullptr;
+  std::move(callback_).Run();
+}
+
+}  // namespace base
diff --git a/base/synchronization/waitable_event_watcher_posix.cc b/base/synchronization/waitable_event_watcher_posix.cc
new file mode 100644
index 0000000..2b296da
--- /dev/null
+++ b/base/synchronization/waitable_event_watcher_posix.cc
@@ -0,0 +1,234 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/synchronization/waitable_event_watcher.h"
+
+#include <utility>
+
+#include "base/bind.h"
+#include "base/logging.h"
+#include "base/synchronization/lock.h"
+#include "base/threading/sequenced_task_runner_handle.h"
+
+namespace base {
+
+// -----------------------------------------------------------------------------
+// WaitableEventWatcher (async waits).
+//
+// The basic design is that we add an AsyncWaiter to the wait-list of the event.
+// That AsyncWaiter has a pointer to SequencedTaskRunner, and a Task to be
+// posted to it. The task ends up calling the callback when it runs on the
+// sequence.
+//
+// Since the wait can be canceled, we have a thread-safe Flag object which is
+// set when the wait has been canceled. At each stage in the above, we check the
+// flag before going onto the next stage. Since the wait may only be canceled in
+// the sequence which runs the Task, we are assured that the callback cannot be
+// called after canceling...
+
+// -----------------------------------------------------------------------------
+// A thread-safe, reference-counted, write-once flag.
+// -----------------------------------------------------------------------------
+class Flag : public RefCountedThreadSafe<Flag> {
+ public:
+  Flag() { flag_ = false; }
+
+  void Set() {
+    AutoLock locked(lock_);
+    flag_ = true;
+  }
+
+  bool value() const {
+    AutoLock locked(lock_);
+    return flag_;
+  }
+
+ private:
+  friend class RefCountedThreadSafe<Flag>;
+  ~Flag() = default;
+
+  mutable Lock lock_;
+  bool flag_;
+
+  DISALLOW_COPY_AND_ASSIGN(Flag);
+};
+
+// -----------------------------------------------------------------------------
+// This is an asynchronous waiter which posts a task to a SequencedTaskRunner
+// when fired. An AsyncWaiter may only be in a single wait-list.
+// -----------------------------------------------------------------------------
+class AsyncWaiter : public WaitableEvent::Waiter {
+ public:
+  AsyncWaiter(scoped_refptr<SequencedTaskRunner> task_runner,
+              base::OnceClosure callback,
+              Flag* flag)
+      : task_runner_(std::move(task_runner)),
+        callback_(std::move(callback)),
+        flag_(flag) {}
+
+  bool Fire(WaitableEvent* event) override {
+    // Post the callback if we haven't been cancelled.
+    if (!flag_->value())
+      task_runner_->PostTask(FROM_HERE, std::move(callback_));
+
+    // We are removed from the wait-list by the WaitableEvent itself. It only
+    // remains to delete ourselves.
+    delete this;
+
+    // We can always return true because an AsyncWaiter is never in two
+    // different wait-lists at the same time.
+    return true;
+  }
+
+  // See StopWatching for discussion
+  bool Compare(void* tag) override { return tag == flag_.get(); }
+
+ private:
+  const scoped_refptr<SequencedTaskRunner> task_runner_;
+  base::OnceClosure callback_;
+  const scoped_refptr<Flag> flag_;
+};
+
+// -----------------------------------------------------------------------------
+// For async waits we need to run a callback on a sequence. We do this by
+// posting an AsyncCallbackHelper task, which calls the callback and keeps track
+// of when the event is canceled.
+// -----------------------------------------------------------------------------
+void AsyncCallbackHelper(Flag* flag,
+                         WaitableEventWatcher::EventCallback callback,
+                         WaitableEvent* event) {
+  // Runs on the sequence that called StartWatching().
+  if (!flag->value()) {
+    // This is to let the WaitableEventWatcher know that the event has occured.
+    flag->Set();
+    std::move(callback).Run(event);
+  }
+}
+
+WaitableEventWatcher::WaitableEventWatcher() {
+  sequence_checker_.DetachFromSequence();
+}
+
+WaitableEventWatcher::~WaitableEventWatcher() {
+  // The destructor may be called from a different sequence than StartWatching()
+  // when there is no active watch. To avoid triggering a DCHECK in
+  // StopWatching(), do not call it when there is no active watch.
+  if (cancel_flag_ && !cancel_flag_->value())
+    StopWatching();
+}
+
+// -----------------------------------------------------------------------------
+// The Handle is how the user cancels a wait. After deleting the Handle we
+// insure that the delegate cannot be called.
+// -----------------------------------------------------------------------------
+bool WaitableEventWatcher::StartWatching(
+    WaitableEvent* event,
+    EventCallback callback,
+    scoped_refptr<SequencedTaskRunner> task_runner) {
+  DCHECK(sequence_checker_.CalledOnValidSequence());
+
+  // A user may call StartWatching from within the callback function. In this
+  // case, we won't know that we have finished watching, expect that the Flag
+  // will have been set in AsyncCallbackHelper().
+  if (cancel_flag_.get() && cancel_flag_->value())
+    cancel_flag_ = nullptr;
+
+  DCHECK(!cancel_flag_) << "StartWatching called while still watching";
+
+  cancel_flag_ = new Flag;
+  OnceClosure internal_callback =
+      base::BindOnce(&AsyncCallbackHelper, base::RetainedRef(cancel_flag_),
+                     std::move(callback), event);
+  WaitableEvent::WaitableEventKernel* kernel = event->kernel_.get();
+
+  AutoLock locked(kernel->lock_);
+
+  if (kernel->signaled_) {
+    if (!kernel->manual_reset_)
+      kernel->signaled_ = false;
+
+    // No hairpinning - we can't call the delegate directly here. We have to
+    // post a task to |task_runner| as usual.
+    task_runner->PostTask(FROM_HERE, std::move(internal_callback));
+    return true;
+  }
+
+  kernel_ = kernel;
+  waiter_ = new AsyncWaiter(std::move(task_runner),
+                            std::move(internal_callback), cancel_flag_.get());
+  event->Enqueue(waiter_);
+
+  return true;
+}
+
+void WaitableEventWatcher::StopWatching() {
+  DCHECK(sequence_checker_.CalledOnValidSequence());
+
+  if (!cancel_flag_.get())  // if not currently watching...
+    return;
+
+  if (cancel_flag_->value()) {
+    // In this case, the event has fired, but we haven't figured that out yet.
+    // The WaitableEvent may have been deleted too.
+    cancel_flag_ = nullptr;
+    return;
+  }
+
+  if (!kernel_.get()) {
+    // We have no kernel. This means that we never enqueued a Waiter on an
+    // event because the event was already signaled when StartWatching was
+    // called.
+    //
+    // In this case, a task was enqueued on the MessageLoop and will run.
+    // We set the flag in case the task hasn't yet run. The flag will stop the
+    // delegate getting called. If the task has run then we have the last
+    // reference to the flag and it will be deleted immedately after.
+    cancel_flag_->Set();
+    cancel_flag_ = nullptr;
+    return;
+  }
+
+  AutoLock locked(kernel_->lock_);
+  // We have a lock on the kernel. No one else can signal the event while we
+  // have it.
+
+  // We have a possible ABA issue here. If Dequeue was to compare only the
+  // pointer values then it's possible that the AsyncWaiter could have been
+  // fired, freed and the memory reused for a different Waiter which was
+  // enqueued in the same wait-list. We would think that that waiter was our
+  // AsyncWaiter and remove it.
+  //
+  // To stop this, Dequeue also takes a tag argument which is passed to the
+  // virtual Compare function before the two are considered a match. So we need
+  // a tag which is good for the lifetime of this handle: the Flag. Since we
+  // have a reference to the Flag, its memory cannot be reused while this object
+  // still exists. So if we find a waiter with the correct pointer value, and
+  // which shares a Flag pointer, we have a real match.
+  if (kernel_->Dequeue(waiter_, cancel_flag_.get())) {
+    // Case 2: the waiter hasn't been signaled yet; it was still on the wait
+    // list. We've removed it, thus we can delete it and the task (which cannot
+    // have been enqueued with the MessageLoop because the waiter was never
+    // signaled)
+    delete waiter_;
+    cancel_flag_ = nullptr;
+    return;
+  }
+
+  // Case 3: the waiter isn't on the wait-list, thus it was signaled. It may not
+  // have run yet, so we set the flag to tell it not to bother enqueuing the
+  // task on the SequencedTaskRunner, but to delete it instead. The Waiter
+  // deletes itself once run.
+  cancel_flag_->Set();
+  cancel_flag_ = nullptr;
+
+  // If the waiter has already run then the task has been enqueued. If the Task
+  // hasn't yet run, the flag will stop the delegate from getting called. (This
+  // is thread safe because one may only delete a Handle from the sequence that
+  // called StartWatching()).
+  //
+  // If the delegate has already been called then we have nothing to do. The
+  // task has been deleted by the MessageLoop.
+}
+
+}  // namespace base
diff --git a/base/synchronization/waitable_event_watcher_unittest.cc b/base/synchronization/waitable_event_watcher_unittest.cc
new file mode 100644
index 0000000..ec056ef
--- /dev/null
+++ b/base/synchronization/waitable_event_watcher_unittest.cc
@@ -0,0 +1,429 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/synchronization/waitable_event_watcher.h"
+
+#include "base/bind.h"
+#include "base/callback.h"
+#include "base/macros.h"
+#include "base/memory/ptr_util.h"
+#include "base/message_loop/message_loop.h"
+#include "base/run_loop.h"
+#include "base/synchronization/waitable_event.h"
+#include "base/threading/platform_thread.h"
+#include "base/threading/sequenced_task_runner_handle.h"
+#include "build/build_config.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+
+namespace {
+
+// The message loops on which each waitable event timer should be tested.
+const MessageLoop::Type testing_message_loops[] = {
+  MessageLoop::TYPE_DEFAULT,
+  MessageLoop::TYPE_IO,
+#if !defined(OS_IOS)  // iOS does not allow direct running of the UI loop.
+  MessageLoop::TYPE_UI,
+#endif
+};
+
+void QuitWhenSignaled(WaitableEvent* event) {
+  RunLoop::QuitCurrentWhenIdleDeprecated();
+}
+
+class DecrementCountContainer {
+ public:
+  explicit DecrementCountContainer(int* counter) : counter_(counter) {}
+  void OnWaitableEventSignaled(WaitableEvent* object) {
+    // NOTE: |object| may be already deleted.
+    --(*counter_);
+  }
+
+ private:
+  int* counter_;
+};
+
+}  // namespace
+
+class WaitableEventWatcherTest
+    : public testing::TestWithParam<MessageLoop::Type> {};
+
+TEST_P(WaitableEventWatcherTest, BasicSignalManual) {
+  MessageLoop message_loop(GetParam());
+
+  // A manual-reset event that is not yet signaled.
+  WaitableEvent event(WaitableEvent::ResetPolicy::MANUAL,
+                      WaitableEvent::InitialState::NOT_SIGNALED);
+
+  WaitableEventWatcher watcher;
+  watcher.StartWatching(&event, BindOnce(&QuitWhenSignaled),
+                        SequencedTaskRunnerHandle::Get());
+
+  event.Signal();
+
+  RunLoop().Run();
+
+  EXPECT_TRUE(event.IsSignaled());
+}
+
+TEST_P(WaitableEventWatcherTest, BasicSignalAutomatic) {
+  MessageLoop message_loop(GetParam());
+
+  WaitableEvent event(WaitableEvent::ResetPolicy::AUTOMATIC,
+                      WaitableEvent::InitialState::NOT_SIGNALED);
+
+  WaitableEventWatcher watcher;
+  watcher.StartWatching(&event, BindOnce(&QuitWhenSignaled),
+                        SequencedTaskRunnerHandle::Get());
+
+  event.Signal();
+
+  RunLoop().Run();
+
+  // The WaitableEventWatcher consumes the event signal.
+  EXPECT_FALSE(event.IsSignaled());
+}
+
+TEST_P(WaitableEventWatcherTest, BasicCancel) {
+  MessageLoop message_loop(GetParam());
+
+  // A manual-reset event that is not yet signaled.
+  WaitableEvent event(WaitableEvent::ResetPolicy::MANUAL,
+                      WaitableEvent::InitialState::NOT_SIGNALED);
+
+  WaitableEventWatcher watcher;
+
+  watcher.StartWatching(&event, BindOnce(&QuitWhenSignaled),
+                        SequencedTaskRunnerHandle::Get());
+
+  watcher.StopWatching();
+}
+
+TEST_P(WaitableEventWatcherTest, CancelAfterSet) {
+  MessageLoop message_loop(GetParam());
+
+  // A manual-reset event that is not yet signaled.
+  WaitableEvent event(WaitableEvent::ResetPolicy::MANUAL,
+                      WaitableEvent::InitialState::NOT_SIGNALED);
+
+  WaitableEventWatcher watcher;
+
+  int counter = 1;
+  DecrementCountContainer delegate(&counter);
+  WaitableEventWatcher::EventCallback callback = BindOnce(
+      &DecrementCountContainer::OnWaitableEventSignaled, Unretained(&delegate));
+  watcher.StartWatching(&event, std::move(callback),
+                        SequencedTaskRunnerHandle::Get());
+
+  event.Signal();
+
+  // Let the background thread do its business
+  PlatformThread::Sleep(TimeDelta::FromMilliseconds(30));
+
+  watcher.StopWatching();
+
+  RunLoop().RunUntilIdle();
+
+  // Our delegate should not have fired.
+  EXPECT_EQ(1, counter);
+}
+
+TEST_P(WaitableEventWatcherTest, OutlivesMessageLoop) {
+  // Simulate a MessageLoop that dies before an WaitableEventWatcher.  This
+  // ordinarily doesn't happen when people use the Thread class, but it can
+  // happen when people use the Singleton pattern or atexit.
+  WaitableEvent event(WaitableEvent::ResetPolicy::MANUAL,
+                      WaitableEvent::InitialState::NOT_SIGNALED);
+  {
+    std::unique_ptr<WaitableEventWatcher> watcher;
+    {
+      MessageLoop message_loop(GetParam());
+      watcher = std::make_unique<WaitableEventWatcher>();
+
+      watcher->StartWatching(&event, BindOnce(&QuitWhenSignaled),
+                             SequencedTaskRunnerHandle::Get());
+    }
+  }
+}
+
+TEST_P(WaitableEventWatcherTest, SignaledAtStartManual) {
+  MessageLoop message_loop(GetParam());
+
+  WaitableEvent event(WaitableEvent::ResetPolicy::MANUAL,
+                      WaitableEvent::InitialState::SIGNALED);
+
+  WaitableEventWatcher watcher;
+  watcher.StartWatching(&event, BindOnce(&QuitWhenSignaled),
+                        SequencedTaskRunnerHandle::Get());
+
+  RunLoop().Run();
+
+  EXPECT_TRUE(event.IsSignaled());
+}
+
+TEST_P(WaitableEventWatcherTest, SignaledAtStartAutomatic) {
+  MessageLoop message_loop(GetParam());
+
+  WaitableEvent event(WaitableEvent::ResetPolicy::AUTOMATIC,
+                      WaitableEvent::InitialState::SIGNALED);
+
+  WaitableEventWatcher watcher;
+  watcher.StartWatching(&event, BindOnce(&QuitWhenSignaled),
+                        SequencedTaskRunnerHandle::Get());
+
+  RunLoop().Run();
+
+  // The watcher consumes the event signal.
+  EXPECT_FALSE(event.IsSignaled());
+}
+
+TEST_P(WaitableEventWatcherTest, StartWatchingInCallback) {
+  MessageLoop message_loop(GetParam());
+
+  WaitableEvent event(WaitableEvent::ResetPolicy::MANUAL,
+                      WaitableEvent::InitialState::NOT_SIGNALED);
+
+  WaitableEventWatcher watcher;
+  watcher.StartWatching(
+      &event,
+      BindOnce(
+          [](WaitableEventWatcher* watcher, WaitableEvent* event) {
+            // |event| is manual, so the second watcher will run
+            // immediately.
+            watcher->StartWatching(event, BindOnce(&QuitWhenSignaled),
+                                   SequencedTaskRunnerHandle::Get());
+          },
+          &watcher),
+      SequencedTaskRunnerHandle::Get());
+
+  event.Signal();
+
+  RunLoop().Run();
+}
+
+TEST_P(WaitableEventWatcherTest, MultipleWatchersManual) {
+  MessageLoop message_loop(GetParam());
+
+  WaitableEvent event(WaitableEvent::ResetPolicy::MANUAL,
+                      WaitableEvent::InitialState::NOT_SIGNALED);
+
+  int counter1 = 0;
+  int counter2 = 0;
+
+  auto callback = [](RunLoop* run_loop, int* counter, WaitableEvent* event) {
+    ++(*counter);
+    run_loop->QuitWhenIdle();
+  };
+
+  RunLoop run_loop;
+
+  WaitableEventWatcher watcher1;
+  watcher1.StartWatching(
+      &event, BindOnce(callback, Unretained(&run_loop), Unretained(&counter1)),
+      SequencedTaskRunnerHandle::Get());
+
+  WaitableEventWatcher watcher2;
+  watcher2.StartWatching(
+      &event, BindOnce(callback, Unretained(&run_loop), Unretained(&counter2)),
+      SequencedTaskRunnerHandle::Get());
+
+  event.Signal();
+  run_loop.Run();
+
+  EXPECT_EQ(1, counter1);
+  EXPECT_EQ(1, counter2);
+  EXPECT_TRUE(event.IsSignaled());
+}
+
+// Tests that only one async waiter gets called back for an auto-reset event.
+TEST_P(WaitableEventWatcherTest, MultipleWatchersAutomatic) {
+  MessageLoop message_loop(GetParam());
+
+  WaitableEvent event(WaitableEvent::ResetPolicy::AUTOMATIC,
+                      WaitableEvent::InitialState::NOT_SIGNALED);
+
+  int counter1 = 0;
+  int counter2 = 0;
+
+  auto callback = [](RunLoop** run_loop, int* counter, WaitableEvent* event) {
+    ++(*counter);
+    (*run_loop)->QuitWhenIdle();
+  };
+
+  // The same RunLoop instance cannot be Run more than once, and it is
+  // undefined which watcher will get called back first. Have the callback
+  // dereference this pointer to quit the loop, which will be updated on each
+  // Run.
+  RunLoop* current_run_loop;
+
+  WaitableEventWatcher watcher1;
+  watcher1.StartWatching(
+      &event,
+      BindOnce(callback, Unretained(&current_run_loop), Unretained(&counter1)),
+      SequencedTaskRunnerHandle::Get());
+
+  WaitableEventWatcher watcher2;
+  watcher2.StartWatching(
+      &event,
+      BindOnce(callback, Unretained(&current_run_loop), Unretained(&counter2)),
+      SequencedTaskRunnerHandle::Get());
+
+  event.Signal();
+  {
+    RunLoop run_loop;
+    current_run_loop = &run_loop;
+    run_loop.Run();
+  }
+
+  // Only one of the waiters should have been signaled.
+  EXPECT_TRUE((counter1 == 1) ^ (counter2 == 1));
+
+  EXPECT_FALSE(event.IsSignaled());
+
+  event.Signal();
+  {
+    RunLoop run_loop;
+    current_run_loop = &run_loop;
+    run_loop.Run();
+  }
+
+  EXPECT_FALSE(event.IsSignaled());
+
+  // The other watcher should have been signaled.
+  EXPECT_EQ(1, counter1);
+  EXPECT_EQ(1, counter2);
+}
+
+// To help detect errors around deleting WaitableEventWatcher, an additional
+// bool parameter is used to test sleeping between watching and deletion.
+class WaitableEventWatcherDeletionTest
+    : public testing::TestWithParam<std::tuple<MessageLoop::Type, bool>> {};
+
+TEST_P(WaitableEventWatcherDeletionTest, DeleteUnder) {
+  MessageLoop::Type message_loop_type;
+  bool delay_after_delete;
+  std::tie(message_loop_type, delay_after_delete) = GetParam();
+
+  // Delete the WaitableEvent out from under the Watcher. This is explictly
+  // allowed by the interface.
+
+  MessageLoop message_loop(message_loop_type);
+
+  {
+    WaitableEventWatcher watcher;
+
+    auto* event = new WaitableEvent(WaitableEvent::ResetPolicy::AUTOMATIC,
+                                    WaitableEvent::InitialState::NOT_SIGNALED);
+
+    watcher.StartWatching(event, BindOnce(&QuitWhenSignaled),
+                          SequencedTaskRunnerHandle::Get());
+
+    if (delay_after_delete) {
+      // On Windows that sleep() improves the chance to catch some problems.
+      // It postpones the dtor |watcher| (which immediately cancel the waiting)
+      // and gives some time to run to a created background thread.
+      // Unfortunately, that thread is under OS control and we can't
+      // manipulate it directly.
+      PlatformThread::Sleep(TimeDelta::FromMilliseconds(30));
+    }
+
+    delete event;
+  }
+}
+
+TEST_P(WaitableEventWatcherDeletionTest, SignalAndDelete) {
+  MessageLoop::Type message_loop_type;
+  bool delay_after_delete;
+  std::tie(message_loop_type, delay_after_delete) = GetParam();
+
+  // Signal and immediately delete the WaitableEvent out from under the Watcher.
+
+  MessageLoop message_loop(message_loop_type);
+
+  {
+    WaitableEventWatcher watcher;
+
+    auto event = std::make_unique<WaitableEvent>(
+        WaitableEvent::ResetPolicy::AUTOMATIC,
+        WaitableEvent::InitialState::NOT_SIGNALED);
+
+    watcher.StartWatching(event.get(), BindOnce(&QuitWhenSignaled),
+                          SequencedTaskRunnerHandle::Get());
+    event->Signal();
+    event.reset();
+
+    if (delay_after_delete) {
+      // On Windows that sleep() improves the chance to catch some problems.
+      // It postpones the dtor |watcher| (which immediately cancel the waiting)
+      // and gives some time to run to a created background thread.
+      // Unfortunately, that thread is under OS control and we can't
+      // manipulate it directly.
+      PlatformThread::Sleep(TimeDelta::FromMilliseconds(30));
+    }
+
+    // Wait for the watcher callback.
+    RunLoop().Run();
+  }
+}
+
+// Tests deleting the WaitableEventWatcher between signaling the event and
+// when the callback should be run.
+TEST_P(WaitableEventWatcherDeletionTest, DeleteWatcherBeforeCallback) {
+  MessageLoop::Type message_loop_type;
+  bool delay_after_delete;
+  std::tie(message_loop_type, delay_after_delete) = GetParam();
+
+  MessageLoop message_loop(message_loop_type);
+  scoped_refptr<SingleThreadTaskRunner> task_runner =
+      message_loop.task_runner();
+
+  // Flag used to esnure that the |watcher_callback| never runs.
+  bool did_callback = false;
+
+  WaitableEvent event(WaitableEvent::ResetPolicy::AUTOMATIC,
+                      WaitableEvent::InitialState::NOT_SIGNALED);
+  auto watcher = std::make_unique<WaitableEventWatcher>();
+
+  // Queue up a series of tasks:
+  // 1. StartWatching the WaitableEvent
+  // 2. Signal the event (which will result in another task getting posted to
+  //    the |task_runner|)
+  // 3. Delete the WaitableEventWatcher
+  // 4. WaitableEventWatcher callback should run (from #2)
+
+  WaitableEventWatcher::EventCallback watcher_callback = BindOnce(
+      [](bool* did_callback, WaitableEvent*) {
+        *did_callback = true;
+      },
+      Unretained(&did_callback));
+
+  task_runner->PostTask(
+      FROM_HERE, BindOnce(IgnoreResult(&WaitableEventWatcher::StartWatching),
+                          Unretained(watcher.get()), Unretained(&event),
+                          std::move(watcher_callback), task_runner));
+  task_runner->PostTask(FROM_HERE,
+                        BindOnce(&WaitableEvent::Signal, Unretained(&event)));
+  task_runner->DeleteSoon(FROM_HERE, std::move(watcher));
+  if (delay_after_delete) {
+    task_runner->PostTask(FROM_HERE, BindOnce(&PlatformThread::Sleep,
+                                              TimeDelta::FromMilliseconds(30)));
+  }
+
+  RunLoop().RunUntilIdle();
+
+  EXPECT_FALSE(did_callback);
+}
+
+INSTANTIATE_TEST_CASE_P(,
+                        WaitableEventWatcherTest,
+                        testing::ValuesIn(testing_message_loops));
+
+INSTANTIATE_TEST_CASE_P(
+    ,
+    WaitableEventWatcherDeletionTest,
+    testing::Combine(testing::ValuesIn(testing_message_loops),
+                     testing::Bool()));
+
+}  // namespace base
diff --git a/base/synchronization/waitable_event_watcher_win.cc b/base/synchronization/waitable_event_watcher_win.cc
new file mode 100644
index 0000000..6003fd4
--- /dev/null
+++ b/base/synchronization/waitable_event_watcher_win.cc
@@ -0,0 +1,61 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/synchronization/waitable_event_watcher.h"
+
+#include "base/compiler_specific.h"
+#include "base/synchronization/waitable_event.h"
+#include "base/win/object_watcher.h"
+
+#include <windows.h>
+
+namespace base {
+
+WaitableEventWatcher::WaitableEventWatcher() = default;
+
+WaitableEventWatcher::~WaitableEventWatcher() {}
+
+bool WaitableEventWatcher::StartWatching(
+    WaitableEvent* event,
+    EventCallback callback,
+    scoped_refptr<SequencedTaskRunner> task_runner) {
+  DCHECK(event);
+  callback_ = std::move(callback);
+  event_ = event;
+
+  // Duplicate and hold the event handle until a callback is returned or
+  // waiting is stopped.
+  HANDLE handle = nullptr;
+  if (!::DuplicateHandle(::GetCurrentProcess(),  // hSourceProcessHandle
+                         event->handle(),
+                         ::GetCurrentProcess(),  // hTargetProcessHandle
+                         &handle,
+                         0,      // dwDesiredAccess ignored due to SAME_ACCESS
+                         FALSE,  // !bInheritHandle
+                         DUPLICATE_SAME_ACCESS)) {
+    return false;
+  }
+  duplicated_event_handle_.Set(handle);
+  return watcher_.StartWatchingOnce(handle, this);
+}
+
+void WaitableEventWatcher::StopWatching() {
+  callback_.Reset();
+  event_ = NULL;
+  watcher_.StopWatching();
+  duplicated_event_handle_.Close();
+}
+
+void WaitableEventWatcher::OnObjectSignaled(HANDLE h) {
+  DCHECK_EQ(duplicated_event_handle_.Get(), h);
+  WaitableEvent* event = event_;
+  EventCallback callback = std::move(callback_);
+  event_ = NULL;
+  duplicated_event_handle_.Close();
+  DCHECK(event);
+
+  std::move(callback).Run(event);
+}
+
+}  // namespace base
diff --git a/base/synchronization/waitable_event_win.cc b/base/synchronization/waitable_event_win.cc
new file mode 100644
index 0000000..d04a5a6
--- /dev/null
+++ b/base/synchronization/waitable_event_win.cc
@@ -0,0 +1,165 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/synchronization/waitable_event.h"
+
+#include <windows.h>
+#include <stddef.h>
+
+#include <algorithm>
+#include <utility>
+
+#include "base/debug/activity_tracker.h"
+#include "base/logging.h"
+#include "base/numerics/safe_conversions.h"
+#include "base/threading/scoped_blocking_call.h"
+#include "base/threading/thread_restrictions.h"
+#include "base/time/time.h"
+
+namespace base {
+
+WaitableEvent::WaitableEvent(ResetPolicy reset_policy,
+                             InitialState initial_state)
+    : handle_(CreateEvent(nullptr,
+                          reset_policy == ResetPolicy::MANUAL,
+                          initial_state == InitialState::SIGNALED,
+                          nullptr)) {
+  // We're probably going to crash anyways if this is ever NULL, so we might as
+  // well make our stack reports more informative by crashing here.
+  CHECK(handle_.IsValid());
+}
+
+WaitableEvent::WaitableEvent(win::ScopedHandle handle)
+    : handle_(std::move(handle)) {
+  CHECK(handle_.IsValid()) << "Tried to create WaitableEvent from NULL handle";
+}
+
+WaitableEvent::~WaitableEvent() = default;
+
+void WaitableEvent::Reset() {
+  ResetEvent(handle_.Get());
+}
+
+void WaitableEvent::Signal() {
+  SetEvent(handle_.Get());
+}
+
+bool WaitableEvent::IsSignaled() {
+  DWORD result = WaitForSingleObject(handle_.Get(), 0);
+  DCHECK(result == WAIT_OBJECT_0 || result == WAIT_TIMEOUT)
+      << "Unexpected WaitForSingleObject result " << result;
+  return result == WAIT_OBJECT_0;
+}
+
+void WaitableEvent::Wait() {
+  internal::AssertBaseSyncPrimitivesAllowed();
+  ScopedBlockingCall scoped_blocking_call(BlockingType::MAY_BLOCK);
+  // Record the event that this thread is blocking upon (for hang diagnosis).
+  base::debug::ScopedEventWaitActivity event_activity(this);
+
+  DWORD result = WaitForSingleObject(handle_.Get(), INFINITE);
+  // It is most unexpected that this should ever fail.  Help consumers learn
+  // about it if it should ever fail.
+  DPCHECK(result != WAIT_FAILED);
+  DCHECK_EQ(WAIT_OBJECT_0, result);
+}
+
+namespace {
+
+// Helper function called from TimedWait and TimedWaitUntil.
+bool WaitUntil(HANDLE handle, const TimeTicks& now, const TimeTicks& end_time) {
+  ScopedBlockingCall scoped_blocking_call(BlockingType::MAY_BLOCK);
+
+  TimeDelta delta = end_time - now;
+  DCHECK_GT(delta, TimeDelta());
+
+  do {
+    // On Windows, waiting for less than 1 ms results in WaitForSingleObject
+    // returning promptly which may result in the caller code spinning.
+    // We need to ensure that we specify at least the minimally possible 1 ms
+    // delay unless the initial timeout was exactly zero.
+    delta = std::max(delta, TimeDelta::FromMilliseconds(1));
+    // Truncate the timeout to milliseconds.
+    DWORD timeout_ms = saturated_cast<DWORD>(delta.InMilliseconds());
+    DWORD result = WaitForSingleObject(handle, timeout_ms);
+    DCHECK(result == WAIT_OBJECT_0 || result == WAIT_TIMEOUT)
+        << "Unexpected WaitForSingleObject result " << result;
+    switch (result) {
+      case WAIT_OBJECT_0:
+        return true;
+      case WAIT_TIMEOUT:
+        // TimedWait can time out earlier than the specified |timeout| on
+        // Windows. To make this consistent with the posix implementation we
+        // should guarantee that TimedWait doesn't return earlier than the
+        // specified |max_time| and wait again for the remaining time.
+        delta = end_time - TimeTicks::Now();
+        break;
+    }
+  } while (delta > TimeDelta());
+  return false;
+}
+
+}  // namespace
+
+bool WaitableEvent::TimedWait(const TimeDelta& wait_delta) {
+  DCHECK_GE(wait_delta, TimeDelta());
+  if (wait_delta.is_zero())
+    return IsSignaled();
+
+  internal::AssertBaseSyncPrimitivesAllowed();
+  // Record the event that this thread is blocking upon (for hang diagnosis).
+  base::debug::ScopedEventWaitActivity event_activity(this);
+
+  TimeTicks now(TimeTicks::Now());
+  // TimeTicks takes care of overflow including the cases when wait_delta
+  // is a maximum value.
+  return WaitUntil(handle_.Get(), now, now + wait_delta);
+}
+
+bool WaitableEvent::TimedWaitUntil(const TimeTicks& end_time) {
+  if (end_time.is_null())
+    return IsSignaled();
+
+  internal::AssertBaseSyncPrimitivesAllowed();
+  // Record the event that this thread is blocking upon (for hang diagnosis).
+  base::debug::ScopedEventWaitActivity event_activity(this);
+
+  TimeTicks now(TimeTicks::Now());
+  if (end_time <= now)
+    return IsSignaled();
+
+  return WaitUntil(handle_.Get(), now, end_time);
+}
+
+// static
+size_t WaitableEvent::WaitMany(WaitableEvent** events, size_t count) {
+  DCHECK(count) << "Cannot wait on no events";
+
+  internal::AssertBaseSyncPrimitivesAllowed();
+  ScopedBlockingCall scoped_blocking_call(BlockingType::MAY_BLOCK);
+  // Record an event (the first) that this thread is blocking upon.
+  base::debug::ScopedEventWaitActivity event_activity(events[0]);
+
+  HANDLE handles[MAXIMUM_WAIT_OBJECTS];
+  CHECK_LE(count, static_cast<size_t>(MAXIMUM_WAIT_OBJECTS))
+      << "Can only wait on " << MAXIMUM_WAIT_OBJECTS << " with WaitMany";
+
+  for (size_t i = 0; i < count; ++i)
+    handles[i] = events[i]->handle();
+
+  // The cast is safe because count is small - see the CHECK above.
+  DWORD result =
+      WaitForMultipleObjects(static_cast<DWORD>(count),
+                             handles,
+                             FALSE,      // don't wait for all the objects
+                             INFINITE);  // no timeout
+  if (result >= WAIT_OBJECT_0 + count) {
+    DPLOG(FATAL) << "WaitForMultipleObjects failed";
+    return 0;
+  }
+
+  return result - WAIT_OBJECT_0;
+}
+
+}  // namespace base
diff --git a/base/sys_byteorder.h b/base/sys_byteorder.h
new file mode 100644
index 0000000..9ee1827
--- /dev/null
+++ b/base/sys_byteorder.h
@@ -0,0 +1,139 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This header defines cross-platform ByteSwap() implementations for 16, 32 and
+// 64-bit values, and NetToHostXX() / HostToNextXX() functions equivalent to
+// the traditional ntohX() and htonX() functions.
+// Use the functions defined here rather than using the platform-specific
+// functions directly.
+
+#ifndef BASE_SYS_BYTEORDER_H_
+#define BASE_SYS_BYTEORDER_H_
+
+#include <stdint.h>
+
+#include "base/logging.h"
+#include "build/build_config.h"
+
+#if defined(COMPILER_MSVC)
+#include <stdlib.h>
+#endif
+
+namespace base {
+
+// Returns a value with all bytes in |x| swapped, i.e. reverses the endianness.
+inline uint16_t ByteSwap(uint16_t x) {
+#if defined(COMPILER_MSVC)
+  return _byteswap_ushort(x);
+#else
+  return __builtin_bswap16(x);
+#endif
+}
+
+inline uint32_t ByteSwap(uint32_t x) {
+#if defined(COMPILER_MSVC)
+  return _byteswap_ulong(x);
+#else
+  return __builtin_bswap32(x);
+#endif
+}
+
+inline uint64_t ByteSwap(uint64_t x) {
+#if defined(COMPILER_MSVC)
+  return _byteswap_uint64(x);
+#else
+  return __builtin_bswap64(x);
+#endif
+}
+
+inline uintptr_t ByteSwapUintPtrT(uintptr_t x) {
+  // We do it this way because some build configurations are ILP32 even when
+  // defined(ARCH_CPU_64_BITS). Unfortunately, we can't use sizeof in #ifs. But,
+  // because these conditionals are constexprs, the irrelevant branches will
+  // likely be optimized away, so this construction should not result in code
+  // bloat.
+  if (sizeof(uintptr_t) == 4) {
+    return ByteSwap(static_cast<uint32_t>(x));
+  } else if (sizeof(uintptr_t) == 8) {
+    return ByteSwap(static_cast<uint64_t>(x));
+  } else {
+    NOTREACHED();
+  }
+}
+
+// Converts the bytes in |x| from host order (endianness) to little endian, and
+// returns the result.
+inline uint16_t ByteSwapToLE16(uint16_t x) {
+#if defined(ARCH_CPU_LITTLE_ENDIAN)
+  return x;
+#else
+  return ByteSwap(x);
+#endif
+}
+inline uint32_t ByteSwapToLE32(uint32_t x) {
+#if defined(ARCH_CPU_LITTLE_ENDIAN)
+  return x;
+#else
+  return ByteSwap(x);
+#endif
+}
+inline uint64_t ByteSwapToLE64(uint64_t x) {
+#if defined(ARCH_CPU_LITTLE_ENDIAN)
+  return x;
+#else
+  return ByteSwap(x);
+#endif
+}
+
+// Converts the bytes in |x| from network to host order (endianness), and
+// returns the result.
+inline uint16_t NetToHost16(uint16_t x) {
+#if defined(ARCH_CPU_LITTLE_ENDIAN)
+  return ByteSwap(x);
+#else
+  return x;
+#endif
+}
+inline uint32_t NetToHost32(uint32_t x) {
+#if defined(ARCH_CPU_LITTLE_ENDIAN)
+  return ByteSwap(x);
+#else
+  return x;
+#endif
+}
+inline uint64_t NetToHost64(uint64_t x) {
+#if defined(ARCH_CPU_LITTLE_ENDIAN)
+  return ByteSwap(x);
+#else
+  return x;
+#endif
+}
+
+// Converts the bytes in |x| from host to network order (endianness), and
+// returns the result.
+inline uint16_t HostToNet16(uint16_t x) {
+#if defined(ARCH_CPU_LITTLE_ENDIAN)
+  return ByteSwap(x);
+#else
+  return x;
+#endif
+}
+inline uint32_t HostToNet32(uint32_t x) {
+#if defined(ARCH_CPU_LITTLE_ENDIAN)
+  return ByteSwap(x);
+#else
+  return x;
+#endif
+}
+inline uint64_t HostToNet64(uint64_t x) {
+#if defined(ARCH_CPU_LITTLE_ENDIAN)
+  return ByteSwap(x);
+#else
+  return x;
+#endif
+}
+
+}  // namespace base
+
+#endif  // BASE_SYS_BYTEORDER_H_
diff --git a/base/sys_byteorder_unittest.cc b/base/sys_byteorder_unittest.cc
new file mode 100644
index 0000000..8167be3
--- /dev/null
+++ b/base/sys_byteorder_unittest.cc
@@ -0,0 +1,142 @@
+// Copyright (c) 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/sys_byteorder.h"
+
+#include <stdint.h>
+
+#include "build/build_config.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace {
+
+const uint16_t k16BitTestData = 0xaabb;
+const uint16_t k16BitSwappedTestData = 0xbbaa;
+const uint32_t k32BitTestData = 0xaabbccdd;
+const uint32_t k32BitSwappedTestData = 0xddccbbaa;
+const uint64_t k64BitTestData = 0xaabbccdd44332211;
+const uint64_t k64BitSwappedTestData = 0x11223344ddccbbaa;
+
+}  // namespace
+
+TEST(ByteOrderTest, ByteSwap16) {
+  uint16_t swapped = base::ByteSwap(k16BitTestData);
+  EXPECT_EQ(k16BitSwappedTestData, swapped);
+  uint16_t reswapped = base::ByteSwap(swapped);
+  EXPECT_EQ(k16BitTestData, reswapped);
+}
+
+TEST(ByteOrderTest, ByteSwap32) {
+  uint32_t swapped = base::ByteSwap(k32BitTestData);
+  EXPECT_EQ(k32BitSwappedTestData, swapped);
+  uint32_t reswapped = base::ByteSwap(swapped);
+  EXPECT_EQ(k32BitTestData, reswapped);
+}
+
+TEST(ByteOrderTest, ByteSwap64) {
+  uint64_t swapped = base::ByteSwap(k64BitTestData);
+  EXPECT_EQ(k64BitSwappedTestData, swapped);
+  uint64_t reswapped = base::ByteSwap(swapped);
+  EXPECT_EQ(k64BitTestData, reswapped);
+}
+
+TEST(ByteOrderTest, ByteSwapUintPtrT) {
+#if defined(ARCH_CPU_64_BITS)
+  const uintptr_t test_data = static_cast<uintptr_t>(k64BitTestData);
+  const uintptr_t swapped_test_data =
+      static_cast<uintptr_t>(k64BitSwappedTestData);
+#elif defined(ARCH_CPU_32_BITS)
+  const uintptr_t test_data = static_cast<uintptr_t>(k32BitTestData);
+  const uintptr_t swapped_test_data =
+      static_cast<uintptr_t>(k32BitSwappedTestData);
+#else
+#error architecture not supported
+#endif
+
+  uintptr_t swapped = base::ByteSwapUintPtrT(test_data);
+  EXPECT_EQ(swapped_test_data, swapped);
+  uintptr_t reswapped = base::ByteSwapUintPtrT(swapped);
+  EXPECT_EQ(test_data, reswapped);
+}
+
+TEST(ByteOrderTest, ByteSwapToLE16) {
+  uint16_t le = base::ByteSwapToLE16(k16BitTestData);
+#if defined(ARCH_CPU_LITTLE_ENDIAN)
+  EXPECT_EQ(k16BitTestData, le);
+#else
+  EXPECT_EQ(k16BitSwappedTestData, le);
+#endif
+}
+
+TEST(ByteOrderTest, ByteSwapToLE32) {
+  uint32_t le = base::ByteSwapToLE32(k32BitTestData);
+#if defined(ARCH_CPU_LITTLE_ENDIAN)
+  EXPECT_EQ(k32BitTestData, le);
+#else
+  EXPECT_EQ(k32BitSwappedTestData, le);
+#endif
+}
+
+TEST(ByteOrderTest, ByteSwapToLE64) {
+  uint64_t le = base::ByteSwapToLE64(k64BitTestData);
+#if defined(ARCH_CPU_LITTLE_ENDIAN)
+  EXPECT_EQ(k64BitTestData, le);
+#else
+  EXPECT_EQ(k64BitSwappedTestData, le);
+#endif
+}
+
+TEST(ByteOrderTest, NetToHost16) {
+  uint16_t h = base::NetToHost16(k16BitTestData);
+#if defined(ARCH_CPU_LITTLE_ENDIAN)
+  EXPECT_EQ(k16BitSwappedTestData, h);
+#else
+  EXPECT_EQ(k16BitTestData, h);
+#endif
+}
+
+TEST(ByteOrderTest, NetToHost32) {
+  uint32_t h = base::NetToHost32(k32BitTestData);
+#if defined(ARCH_CPU_LITTLE_ENDIAN)
+  EXPECT_EQ(k32BitSwappedTestData, h);
+#else
+  EXPECT_EQ(k32BitTestData, h);
+#endif
+}
+
+TEST(ByteOrderTest, NetToHost64) {
+  uint64_t h = base::NetToHost64(k64BitTestData);
+#if defined(ARCH_CPU_LITTLE_ENDIAN)
+  EXPECT_EQ(k64BitSwappedTestData, h);
+#else
+  EXPECT_EQ(k64BitTestData, h);
+#endif
+}
+
+TEST(ByteOrderTest, HostToNet16) {
+  uint16_t n = base::HostToNet16(k16BitTestData);
+#if defined(ARCH_CPU_LITTLE_ENDIAN)
+  EXPECT_EQ(k16BitSwappedTestData, n);
+#else
+  EXPECT_EQ(k16BitTestData, n);
+#endif
+}
+
+TEST(ByteOrderTest, HostToNet32) {
+  uint32_t n = base::HostToNet32(k32BitTestData);
+#if defined(ARCH_CPU_LITTLE_ENDIAN)
+  EXPECT_EQ(k32BitSwappedTestData, n);
+#else
+  EXPECT_EQ(k32BitTestData, n);
+#endif
+}
+
+TEST(ByteOrderTest, HostToNet64) {
+  uint64_t n = base::HostToNet64(k64BitTestData);
+#if defined(ARCH_CPU_LITTLE_ENDIAN)
+  EXPECT_EQ(k64BitSwappedTestData, n);
+#else
+  EXPECT_EQ(k64BitTestData, n);
+#endif
+}
diff --git a/base/sys_info.cc b/base/sys_info.cc
new file mode 100644
index 0000000..379d7f2
--- /dev/null
+++ b/base/sys_info.cc
@@ -0,0 +1,94 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/sys_info.h"
+
+#include <algorithm>
+
+#include "base/base_switches.h"
+#include "base/command_line.h"
+#include "base/lazy_instance.h"
+#include "base/sys_info_internal.h"
+#include "base/time/time.h"
+#include "build/build_config.h"
+
+namespace base {
+namespace {
+static const int kLowMemoryDeviceThresholdMB = 512;
+}
+
+// static
+int64_t SysInfo::AmountOfPhysicalMemory() {
+  if (base::CommandLine::ForCurrentProcess()->HasSwitch(
+          switches::kEnableLowEndDeviceMode)) {
+    return kLowMemoryDeviceThresholdMB * 1024 * 1024;
+  }
+
+  return AmountOfPhysicalMemoryImpl();
+}
+
+// static
+int64_t SysInfo::AmountOfAvailablePhysicalMemory() {
+  if (base::CommandLine::ForCurrentProcess()->HasSwitch(
+          switches::kEnableLowEndDeviceMode)) {
+    // Estimate the available memory by subtracting our memory used estimate
+    // from the fake |kLowMemoryDeviceThresholdMB| limit.
+    size_t memory_used =
+        AmountOfPhysicalMemoryImpl() - AmountOfAvailablePhysicalMemoryImpl();
+    size_t memory_limit = kLowMemoryDeviceThresholdMB * 1024 * 1024;
+    // std::min ensures no underflow, as |memory_used| can be > |memory_limit|.
+    return memory_limit - std::min(memory_used, memory_limit);
+  }
+
+  return AmountOfAvailablePhysicalMemoryImpl();
+}
+
+bool SysInfo::IsLowEndDevice() {
+  if (base::CommandLine::ForCurrentProcess()->HasSwitch(
+          switches::kEnableLowEndDeviceMode)) {
+    return true;
+  }
+
+  return IsLowEndDeviceImpl();
+}
+
+#if !defined(OS_ANDROID)
+
+bool DetectLowEndDevice() {
+  CommandLine* command_line = CommandLine::ForCurrentProcess();
+  if (command_line->HasSwitch(switches::kEnableLowEndDeviceMode))
+    return true;
+  if (command_line->HasSwitch(switches::kDisableLowEndDeviceMode))
+    return false;
+
+  int ram_size_mb = SysInfo::AmountOfPhysicalMemoryMB();
+  return (ram_size_mb > 0 && ram_size_mb <= kLowMemoryDeviceThresholdMB);
+}
+
+static LazyInstance<
+  internal::LazySysInfoValue<bool, DetectLowEndDevice> >::Leaky
+  g_lazy_low_end_device = LAZY_INSTANCE_INITIALIZER;
+
+// static
+bool SysInfo::IsLowEndDeviceImpl() {
+  return g_lazy_low_end_device.Get().value();
+}
+#endif
+
+#if !defined(OS_MACOSX) && !defined(OS_ANDROID)
+std::string SysInfo::HardwareModelName() {
+  return std::string();
+}
+#endif
+
+// static
+base::TimeDelta SysInfo::Uptime() {
+  // This code relies on an implementation detail of TimeTicks::Now() - that
+  // its return value happens to coincide with the system uptime value in
+  // microseconds, on Win/Mac/iOS/Linux/ChromeOS and Android.
+  int64_t uptime_in_microseconds = TimeTicks::Now().ToInternalValue();
+  return base::TimeDelta::FromMicroseconds(uptime_in_microseconds);
+}
+
+}  // namespace base
diff --git a/base/sys_info.h b/base/sys_info.h
new file mode 100644
index 0000000..6e58715
--- /dev/null
+++ b/base/sys_info.h
@@ -0,0 +1,185 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_SYS_INFO_H_
+#define BASE_SYS_INFO_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <map>
+#include <string>
+
+#include "base/base_export.h"
+#include "base/files/file_path.h"
+#include "base/gtest_prod_util.h"
+#include "base/time/time.h"
+#include "build/build_config.h"
+
+namespace base {
+
+namespace debug {
+FORWARD_DECLARE_TEST(SystemMetricsTest, ParseMeminfo);
+}
+
+struct SystemMemoryInfoKB;
+
+class BASE_EXPORT SysInfo {
+ public:
+  // Return the number of logical processors/cores on the current machine.
+  static int NumberOfProcessors();
+
+  // Return the number of bytes of physical memory on the current machine.
+  static int64_t AmountOfPhysicalMemory();
+
+  // Return the number of bytes of current available physical memory on the
+  // machine.
+  // (The amount of memory that can be allocated without any significant
+  // impact on the system. It can lead to freeing inactive file-backed
+  // and/or speculative file-backed memory).
+  static int64_t AmountOfAvailablePhysicalMemory();
+
+  // Return the number of bytes of virtual memory of this process. A return
+  // value of zero means that there is no limit on the available virtual
+  // memory.
+  static int64_t AmountOfVirtualMemory();
+
+  // Return the number of megabytes of physical memory on the current machine.
+  static int AmountOfPhysicalMemoryMB() {
+    return static_cast<int>(AmountOfPhysicalMemory() / 1024 / 1024);
+  }
+
+  // Return the number of megabytes of available virtual memory, or zero if it
+  // is unlimited.
+  static int AmountOfVirtualMemoryMB() {
+    return static_cast<int>(AmountOfVirtualMemory() / 1024 / 1024);
+  }
+
+  // Return the available disk space in bytes on the volume containing |path|,
+  // or -1 on failure.
+  static int64_t AmountOfFreeDiskSpace(const FilePath& path);
+
+  // Return the total disk space in bytes on the volume containing |path|, or -1
+  // on failure.
+  static int64_t AmountOfTotalDiskSpace(const FilePath& path);
+
+  // Returns system uptime.
+  static TimeDelta Uptime();
+
+  // Returns a descriptive string for the current machine model or an empty
+  // string if the machine model is unknown or an error occured.
+  // e.g. "MacPro1,1" on Mac, "iPhone9,3" on iOS or "Nexus 5" on Android. Only
+  // implemented on OS X, iOS, Android, and Chrome OS. This returns an empty
+  // string on other platforms.
+  static std::string HardwareModelName();
+
+  // Returns the name of the host operating system.
+  static std::string OperatingSystemName();
+
+  // Returns the version of the host operating system.
+  static std::string OperatingSystemVersion();
+
+  // Retrieves detailed numeric values for the OS version.
+  // DON'T USE THIS ON THE MAC OR WINDOWS to determine the current OS release
+  // for OS version-specific feature checks and workarounds. If you must use
+  // an OS version check instead of a feature check, use the base::mac::IsOS*
+  // family from base/mac/mac_util.h, or base::win::GetVersion from
+  // base/win/windows_version.h.
+  static void OperatingSystemVersionNumbers(int32_t* major_version,
+                                            int32_t* minor_version,
+                                            int32_t* bugfix_version);
+
+  // Returns the architecture of the running operating system.
+  // Exact return value may differ across platforms.
+  // e.g. a 32-bit x86 kernel on a 64-bit capable CPU will return "x86",
+  //      whereas a x86-64 kernel on the same CPU will return "x86_64"
+  static std::string OperatingSystemArchitecture();
+
+  // Avoid using this. Use base/cpu.h to get information about the CPU instead.
+  // http://crbug.com/148884
+  // Returns the CPU model name of the system. If it can not be figured out,
+  // an empty string is returned.
+  static std::string CPUModelName();
+
+  // Return the smallest amount of memory (in bytes) which the VM system will
+  // allocate.
+  static size_t VMAllocationGranularity();
+
+#if defined(OS_CHROMEOS)
+  typedef std::map<std::string, std::string> LsbReleaseMap;
+
+  // Returns the contents of /etc/lsb-release as a map.
+  static const LsbReleaseMap& GetLsbReleaseMap();
+
+  // If |key| is present in the LsbReleaseMap, sets |value| and returns true.
+  static bool GetLsbReleaseValue(const std::string& key, std::string* value);
+
+  // Convenience function for GetLsbReleaseValue("CHROMEOS_RELEASE_BOARD",...).
+  // Returns "unknown" if CHROMEOS_RELEASE_BOARD is not set. Otherwise, returns
+  // the full name of the board. Note that the returned value often differs
+  // between developers' systems and devices that use official builds. E.g. for
+  // a developer-built image, the function could return 'glimmer', while in an
+  // official build, it may be something like 'glimmer-signed-mp-v4keys'.
+  //
+  // NOTE: Strings returned by this function should be treated as opaque values
+  // within Chrome (e.g. for reporting metrics elsewhere). If you need to make
+  // Chrome behave differently for different Chrome OS devices, either directly
+  // check for the hardware feature that you care about (preferred) or add a
+  // command-line flag to Chrome and pass it from session_manager (based on
+  // whether a USE flag is set or not). See https://goo.gl/BbBkzg for more
+  // details.
+  static std::string GetLsbReleaseBoard();
+
+  // DEPRECATED: Please see GetLsbReleaseBoard's comment.
+  // Convenience function for GetLsbReleaseBoard() removing trailing "-signed-*"
+  // if present. Returns "unknown" if CHROMEOS_RELEASE_BOARD is not set.
+  // TODO(derat): Delete this after October 2017.
+  static std::string GetStrippedReleaseBoard();
+
+  // Returns the creation time of /etc/lsb-release. (Used to get the date and
+  // time of the Chrome OS build).
+  static Time GetLsbReleaseTime();
+
+  // Returns true when actually running in a Chrome OS environment.
+  static bool IsRunningOnChromeOS();
+
+  // Test method to force re-parsing of lsb-release.
+  static void SetChromeOSVersionInfoForTest(const std::string& lsb_release,
+                                            const Time& lsb_release_time);
+#endif  // defined(OS_CHROMEOS)
+
+#if defined(OS_ANDROID)
+  // Returns the Android build's codename.
+  static std::string GetAndroidBuildCodename();
+
+  // Returns the Android build ID.
+  static std::string GetAndroidBuildID();
+
+  static int DalvikHeapSizeMB();
+  static int DalvikHeapGrowthLimitMB();
+#endif  // defined(OS_ANDROID)
+
+  // Returns true if this is a low-end device.
+  // Low-end device refers to devices having a very low amount of total
+  // system memory, typically <= 1GB.
+  // See also SysUtils.java, method isLowEndDevice.
+  static bool IsLowEndDevice();
+
+ private:
+  FRIEND_TEST_ALL_PREFIXES(SysInfoTest, AmountOfAvailablePhysicalMemory);
+  FRIEND_TEST_ALL_PREFIXES(debug::SystemMetricsTest, ParseMeminfo);
+
+  static int64_t AmountOfPhysicalMemoryImpl();
+  static int64_t AmountOfAvailablePhysicalMemoryImpl();
+  static bool IsLowEndDeviceImpl();
+
+#if defined(OS_LINUX) || defined(OS_ANDROID) || defined(OS_AIX)
+  static int64_t AmountOfAvailablePhysicalMemory(
+      const SystemMemoryInfoKB& meminfo);
+#endif
+};
+
+}  // namespace base
+
+#endif  // BASE_SYS_INFO_H_
diff --git a/base/sys_info_android.cc b/base/sys_info_android.cc
new file mode 100644
index 0000000..7704796
--- /dev/null
+++ b/base/sys_info_android.cc
@@ -0,0 +1,241 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/sys_info.h"
+
+#include <dlfcn.h>
+#include <stddef.h>
+#include <stdint.h>
+#include <sys/system_properties.h>
+
+#include "base/android/jni_android.h"
+#include "base/android/sys_utils.h"
+#include "base/lazy_instance.h"
+#include "base/logging.h"
+#include "base/strings/string_number_conversions.h"
+#include "base/strings/string_piece.h"
+#include "base/strings/stringprintf.h"
+#include "base/sys_info_internal.h"
+
+#if (__ANDROID_API__ >= 21 /* 5.0 - Lollipop */)
+
+namespace {
+
+typedef int (SystemPropertyGetFunction)(const char*, char*);
+
+SystemPropertyGetFunction* DynamicallyLoadRealSystemPropertyGet() {
+  // libc.so should already be open, get a handle to it.
+  void* handle = dlopen("libc.so", RTLD_NOLOAD);
+  if (!handle) {
+    LOG(FATAL) << "Cannot dlopen libc.so: " << dlerror();
+  }
+  SystemPropertyGetFunction* real_system_property_get =
+      reinterpret_cast<SystemPropertyGetFunction*>(
+          dlsym(handle, "__system_property_get"));
+  if (!real_system_property_get) {
+    LOG(FATAL) << "Cannot resolve __system_property_get(): " << dlerror();
+  }
+  return real_system_property_get;
+}
+
+static base::LazyInstance<base::internal::LazySysInfoValue<
+    SystemPropertyGetFunction*, DynamicallyLoadRealSystemPropertyGet> >::Leaky
+    g_lazy_real_system_property_get = LAZY_INSTANCE_INITIALIZER;
+
+}  // namespace
+
+// Android 'L' removes __system_property_get from the NDK, however it is still
+// a hidden symbol in libc. Until we remove all calls of __system_property_get
+// from Chrome we work around this by defining a weak stub here, which uses
+// dlsym to but ensures that Chrome uses the real system
+// implementatation when loaded.  http://crbug.com/392191.
+BASE_EXPORT int __system_property_get(const char* name, char* value) {
+  return g_lazy_real_system_property_get.Get().value()(name, value);
+}
+
+#endif
+
+namespace {
+
+// Default version of Android to fall back to when actual version numbers
+// cannot be acquired. Use the latest Android release with a higher bug fix
+// version to avoid unnecessarily comparison errors with the latest release.
+// This should be manually kept up to date on each Android release.
+const int kDefaultAndroidMajorVersion = 8;
+const int kDefaultAndroidMinorVersion = 1;
+const int kDefaultAndroidBugfixVersion = 99;
+
+// Get and parse out the OS version numbers from the system properties.
+// Note if parse fails, the "default" version is returned as fallback.
+void GetOsVersionStringAndNumbers(std::string* version_string,
+                                  int32_t* major_version,
+                                  int32_t* minor_version,
+                                  int32_t* bugfix_version) {
+  // Read the version number string out from the properties.
+  char os_version_str[PROP_VALUE_MAX];
+  __system_property_get("ro.build.version.release", os_version_str);
+
+  if (os_version_str[0]) {
+    // Try to parse out the version numbers from the string.
+    int num_read = sscanf(os_version_str, "%d.%d.%d", major_version,
+                          minor_version, bugfix_version);
+
+    if (num_read > 0) {
+      // If we don't have a full set of version numbers, make the extras 0.
+      if (num_read < 2)
+        *minor_version = 0;
+      if (num_read < 3)
+        *bugfix_version = 0;
+      *version_string = std::string(os_version_str);
+      return;
+    }
+  }
+
+  // For some reason, we couldn't parse the version number string.
+  *major_version = kDefaultAndroidMajorVersion;
+  *minor_version = kDefaultAndroidMinorVersion;
+  *bugfix_version = kDefaultAndroidBugfixVersion;
+  *version_string = ::base::StringPrintf("%d.%d.%d", *major_version,
+                                         *minor_version, *bugfix_version);
+}
+
+// Parses a system property (specified with unit 'k','m' or 'g').
+// Returns a value in bytes.
+// Returns -1 if the string could not be parsed.
+int64_t ParseSystemPropertyBytes(const base::StringPiece& str) {
+  const int64_t KB = 1024;
+  const int64_t MB = 1024 * KB;
+  const int64_t GB = 1024 * MB;
+  if (str.size() == 0u)
+    return -1;
+  int64_t unit_multiplier = 1;
+  size_t length = str.size();
+  if (str[length - 1] == 'k') {
+    unit_multiplier = KB;
+    length--;
+  } else if (str[length - 1] == 'm') {
+    unit_multiplier = MB;
+    length--;
+  } else if (str[length - 1] == 'g') {
+    unit_multiplier = GB;
+    length--;
+  }
+  int64_t result = 0;
+  bool parsed = base::StringToInt64(str.substr(0, length), &result);
+  bool negative = result <= 0;
+  bool overflow =
+      result >= std::numeric_limits<int64_t>::max() / unit_multiplier;
+  if (!parsed || negative || overflow)
+    return -1;
+  return result * unit_multiplier;
+}
+
+int GetDalvikHeapSizeMB() {
+  char heap_size_str[PROP_VALUE_MAX];
+  __system_property_get("dalvik.vm.heapsize", heap_size_str);
+  // dalvik.vm.heapsize property is writable by a root user.
+  // Clamp it to reasonable range as a sanity check,
+  // a typical android device will never have less than 48MB.
+  const int64_t MB = 1024 * 1024;
+  int64_t result = ParseSystemPropertyBytes(heap_size_str);
+  if (result == -1) {
+     // We should consider not exposing these values if they are not reliable.
+     LOG(ERROR) << "Can't parse dalvik.vm.heapsize: " << heap_size_str;
+     result = base::SysInfo::AmountOfPhysicalMemoryMB() / 3;
+  }
+  result =
+      std::min<int64_t>(std::max<int64_t>(32 * MB, result), 1024 * MB) / MB;
+  return static_cast<int>(result);
+}
+
+int GetDalvikHeapGrowthLimitMB() {
+  char heap_size_str[PROP_VALUE_MAX];
+  __system_property_get("dalvik.vm.heapgrowthlimit", heap_size_str);
+  // dalvik.vm.heapgrowthlimit property is writable by a root user.
+  // Clamp it to reasonable range as a sanity check,
+  // a typical android device will never have less than 24MB.
+  const int64_t MB = 1024 * 1024;
+  int64_t result = ParseSystemPropertyBytes(heap_size_str);
+  if (result == -1) {
+     // We should consider not exposing these values if they are not reliable.
+     LOG(ERROR) << "Can't parse dalvik.vm.heapgrowthlimit: " << heap_size_str;
+     result = base::SysInfo::AmountOfPhysicalMemoryMB() / 6;
+  }
+  result = std::min<int64_t>(std::max<int64_t>(16 * MB, result), 512 * MB) / MB;
+  return static_cast<int>(result);
+}
+
+}  // anonymous namespace
+
+namespace base {
+
+std::string SysInfo::HardwareModelName() {
+  char device_model_str[PROP_VALUE_MAX];
+  __system_property_get("ro.product.model", device_model_str);
+  return std::string(device_model_str);
+}
+
+std::string SysInfo::OperatingSystemName() {
+  return "Android";
+}
+
+std::string SysInfo::OperatingSystemVersion() {
+  std::string version_string;
+  int32_t major, minor, bugfix;
+  GetOsVersionStringAndNumbers(&version_string, &major, &minor, &bugfix);
+  return version_string;
+}
+
+void SysInfo::OperatingSystemVersionNumbers(int32_t* major_version,
+                                            int32_t* minor_version,
+                                            int32_t* bugfix_version) {
+  std::string version_string;
+  GetOsVersionStringAndNumbers(&version_string, major_version, minor_version,
+                               bugfix_version);
+}
+
+std::string SysInfo::GetAndroidBuildCodename() {
+  char os_version_codename_str[PROP_VALUE_MAX];
+  __system_property_get("ro.build.version.codename", os_version_codename_str);
+  return std::string(os_version_codename_str);
+}
+
+std::string SysInfo::GetAndroidBuildID() {
+  char os_build_id_str[PROP_VALUE_MAX];
+  __system_property_get("ro.build.id", os_build_id_str);
+  return std::string(os_build_id_str);
+}
+
+int SysInfo::DalvikHeapSizeMB() {
+  static int heap_size = GetDalvikHeapSizeMB();
+  return heap_size;
+}
+
+int SysInfo::DalvikHeapGrowthLimitMB() {
+  static int heap_growth_limit = GetDalvikHeapGrowthLimitMB();
+  return heap_growth_limit;
+}
+
+static base::LazyInstance<
+    base::internal::LazySysInfoValue<bool,
+        android::SysUtils::IsLowEndDeviceFromJni> >::Leaky
+    g_lazy_low_end_device = LAZY_INSTANCE_INITIALIZER;
+
+bool SysInfo::IsLowEndDeviceImpl() {
+  // This code might be used in some environments
+  // which might not have a Java environment.
+  // Note that we need to call the Java version here.
+  // There exists a complete native implementation in
+  // sys_info.cc but calling that here would mean that
+  // the Java code and the native code would call different
+  // implementations which could give different results.
+  // Also the Java code cannot depend on the native code
+  // since it might not be loaded yet.
+  if (!base::android::IsVMInitialized())
+    return false;
+  return g_lazy_low_end_device.Get().value();
+}
+
+
+}  // namespace base
diff --git a/base/sys_info_chromeos.cc b/base/sys_info_chromeos.cc
new file mode 100644
index 0000000..b9ec2c9
--- /dev/null
+++ b/base/sys_info_chromeos.cc
@@ -0,0 +1,228 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/sys_info.h"
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include "base/environment.h"
+#include "base/files/file.h"
+#include "base/files/file_path.h"
+#include "base/files/file_util.h"
+#include "base/lazy_instance.h"
+#include "base/macros.h"
+#include "base/strings/string_number_conversions.h"
+#include "base/strings/string_piece.h"
+#include "base/strings/string_split.h"
+#include "base/strings/string_tokenizer.h"
+#include "base/strings/string_util.h"
+#include "base/threading/thread_restrictions.h"
+
+namespace base {
+
+namespace {
+
+const char* const kLinuxStandardBaseVersionKeys[] = {
+  "CHROMEOS_RELEASE_VERSION",
+  "GOOGLE_RELEASE",
+  "DISTRIB_RELEASE",
+};
+
+const char kChromeOsReleaseNameKey[] = "CHROMEOS_RELEASE_NAME";
+
+const char* const kChromeOsReleaseNames[] = {
+  "Chrome OS",
+  "Chromium OS",
+};
+
+const char kLinuxStandardBaseReleaseFile[] = "/etc/lsb-release";
+
+const char kLsbReleaseKey[] = "LSB_RELEASE";
+const char kLsbReleaseTimeKey[] = "LSB_RELEASE_TIME";  // Seconds since epoch
+
+const char kLsbReleaseSourceKey[] = "lsb-release";
+const char kLsbReleaseSourceEnv[] = "env";
+const char kLsbReleaseSourceFile[] = "file";
+
+class ChromeOSVersionInfo {
+ public:
+  ChromeOSVersionInfo() {
+    Parse();
+  }
+
+  void Parse() {
+    lsb_release_map_.clear();
+    major_version_ = 0;
+    minor_version_ = 0;
+    bugfix_version_ = 0;
+    is_running_on_chromeos_ = false;
+
+    std::string lsb_release, lsb_release_time_str;
+    std::unique_ptr<Environment> env(Environment::Create());
+    bool parsed_from_env =
+        env->GetVar(kLsbReleaseKey, &lsb_release) &&
+        env->GetVar(kLsbReleaseTimeKey, &lsb_release_time_str);
+    if (parsed_from_env) {
+      double us = 0;
+      if (StringToDouble(lsb_release_time_str, &us))
+        lsb_release_time_ = Time::FromDoubleT(us);
+    } else {
+      // If the LSB_RELEASE and LSB_RELEASE_TIME environment variables are not
+      // set, fall back to a blocking read of the lsb_release file. This should
+      // only happen in non Chrome OS environments.
+      ThreadRestrictions::ScopedAllowIO allow_io;
+      FilePath path(kLinuxStandardBaseReleaseFile);
+      ReadFileToString(path, &lsb_release);
+      File::Info fileinfo;
+      if (GetFileInfo(path, &fileinfo))
+        lsb_release_time_ = fileinfo.creation_time;
+    }
+    ParseLsbRelease(lsb_release);
+    // For debugging:
+    lsb_release_map_[kLsbReleaseSourceKey] =
+        parsed_from_env ? kLsbReleaseSourceEnv : kLsbReleaseSourceFile;
+  }
+
+  bool GetLsbReleaseValue(const std::string& key, std::string* value) {
+    SysInfo::LsbReleaseMap::const_iterator iter = lsb_release_map_.find(key);
+    if (iter == lsb_release_map_.end())
+      return false;
+    *value = iter->second;
+    return true;
+  }
+
+  void GetVersionNumbers(int32_t* major_version,
+                         int32_t* minor_version,
+                         int32_t* bugfix_version) {
+    *major_version = major_version_;
+    *minor_version = minor_version_;
+    *bugfix_version = bugfix_version_;
+  }
+
+  const Time& lsb_release_time() const { return lsb_release_time_; }
+  const SysInfo::LsbReleaseMap& lsb_release_map() const {
+    return lsb_release_map_;
+  }
+  bool is_running_on_chromeos() const { return is_running_on_chromeos_; }
+
+ private:
+  void ParseLsbRelease(const std::string& lsb_release) {
+    // Parse and cache lsb_release key pairs. There should only be a handful
+    // of entries so the overhead for this will be small, and it can be
+    // useful for debugging.
+    base::StringPairs pairs;
+    SplitStringIntoKeyValuePairs(lsb_release, '=', '\n', &pairs);
+    for (size_t i = 0; i < pairs.size(); ++i) {
+      std::string key, value;
+      TrimWhitespaceASCII(pairs[i].first, TRIM_ALL, &key);
+      TrimWhitespaceASCII(pairs[i].second, TRIM_ALL, &value);
+      if (key.empty())
+        continue;
+      lsb_release_map_[key] = value;
+    }
+    // Parse the version from the first matching recognized version key.
+    std::string version;
+    for (size_t i = 0; i < arraysize(kLinuxStandardBaseVersionKeys); ++i) {
+      std::string key = kLinuxStandardBaseVersionKeys[i];
+      if (GetLsbReleaseValue(key, &version) && !version.empty())
+        break;
+    }
+    StringTokenizer tokenizer(version, ".");
+    if (tokenizer.GetNext()) {
+      StringToInt(tokenizer.token_piece(), &major_version_);
+    }
+    if (tokenizer.GetNext()) {
+      StringToInt(tokenizer.token_piece(), &minor_version_);
+    }
+    if (tokenizer.GetNext()) {
+      StringToInt(tokenizer.token_piece(), &bugfix_version_);
+    }
+
+    // Check release name for Chrome OS.
+    std::string release_name;
+    if (GetLsbReleaseValue(kChromeOsReleaseNameKey, &release_name)) {
+      for (size_t i = 0; i < arraysize(kChromeOsReleaseNames); ++i) {
+        if (release_name == kChromeOsReleaseNames[i]) {
+          is_running_on_chromeos_ = true;
+          break;
+        }
+      }
+    }
+  }
+
+  Time lsb_release_time_;
+  SysInfo::LsbReleaseMap lsb_release_map_;
+  int32_t major_version_;
+  int32_t minor_version_;
+  int32_t bugfix_version_;
+  bool is_running_on_chromeos_;
+};
+
+static LazyInstance<ChromeOSVersionInfo>::Leaky
+    g_chrome_os_version_info = LAZY_INSTANCE_INITIALIZER;
+
+ChromeOSVersionInfo& GetChromeOSVersionInfo() {
+  return g_chrome_os_version_info.Get();
+}
+
+}  // namespace
+
+// static
+void SysInfo::OperatingSystemVersionNumbers(int32_t* major_version,
+                                            int32_t* minor_version,
+                                            int32_t* bugfix_version) {
+  return GetChromeOSVersionInfo().GetVersionNumbers(
+      major_version, minor_version, bugfix_version);
+}
+
+// static
+const SysInfo::LsbReleaseMap& SysInfo::GetLsbReleaseMap() {
+  return GetChromeOSVersionInfo().lsb_release_map();
+}
+
+// static
+bool SysInfo::GetLsbReleaseValue(const std::string& key, std::string* value) {
+  return GetChromeOSVersionInfo().GetLsbReleaseValue(key, value);
+}
+
+// static
+std::string SysInfo::GetLsbReleaseBoard() {
+  const char kMachineInfoBoard[] = "CHROMEOS_RELEASE_BOARD";
+  std::string board;
+  if (!GetLsbReleaseValue(kMachineInfoBoard, &board))
+    board = "unknown";
+  return board;
+}
+
+// static
+std::string SysInfo::GetStrippedReleaseBoard() {
+  std::string board = GetLsbReleaseBoard();
+  const size_t index = board.find("-signed-");
+  if (index != std::string::npos)
+    board.resize(index);
+
+  return base::ToLowerASCII(board);
+}
+
+// static
+Time SysInfo::GetLsbReleaseTime() {
+  return GetChromeOSVersionInfo().lsb_release_time();
+}
+
+// static
+bool SysInfo::IsRunningOnChromeOS() {
+  return GetChromeOSVersionInfo().is_running_on_chromeos();
+}
+
+// static
+void SysInfo::SetChromeOSVersionInfoForTest(const std::string& lsb_release,
+                                            const Time& lsb_release_time) {
+  std::unique_ptr<Environment> env(Environment::Create());
+  env->SetVar(kLsbReleaseKey, lsb_release);
+  env->SetVar(kLsbReleaseTimeKey, NumberToString(lsb_release_time.ToDoubleT()));
+  g_chrome_os_version_info.Get().Parse();
+}
+
+}  // namespace base
diff --git a/base/sys_info_freebsd.cc b/base/sys_info_freebsd.cc
new file mode 100644
index 0000000..8591655
--- /dev/null
+++ b/base/sys_info_freebsd.cc
@@ -0,0 +1,38 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/sys_info.h"
+
+#include <stddef.h>
+#include <stdint.h>
+#include <sys/sysctl.h>
+
+#include "base/logging.h"
+
+namespace base {
+
+int64_t SysInfo::AmountOfPhysicalMemoryImpl() {
+  int pages, page_size;
+  size_t size = sizeof(pages);
+  sysctlbyname("vm.stats.vm.v_page_count", &pages, &size, NULL, 0);
+  sysctlbyname("vm.stats.vm.v_page_size", &page_size, &size, NULL, 0);
+  if (pages == -1 || page_size == -1) {
+    NOTREACHED();
+    return 0;
+  }
+  return static_cast<int64_t>(pages) * page_size;
+}
+
+// static
+uint64_t SysInfo::MaxSharedMemorySize() {
+  size_t limit;
+  size_t size = sizeof(limit);
+  if (sysctlbyname("kern.ipc.shmmax", &limit, &size, NULL, 0) < 0) {
+    NOTREACHED();
+    return 0;
+  }
+  return static_cast<uint64_t>(limit);
+}
+
+}  // namespace base
diff --git a/base/sys_info_fuchsia.cc b/base/sys_info_fuchsia.cc
new file mode 100644
index 0000000..081a55d
--- /dev/null
+++ b/base/sys_info_fuchsia.cc
@@ -0,0 +1,35 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/sys_info.h"
+
+#include <zircon/syscalls.h>
+
+#include "base/logging.h"
+
+namespace base {
+
+// static
+int64_t SysInfo::AmountOfPhysicalMemoryImpl() {
+  return zx_system_get_physmem();
+}
+
+// static
+int64_t SysInfo::AmountOfAvailablePhysicalMemoryImpl() {
+  // TODO(fuchsia): https://crbug.com/706592 This is not exposed.
+  NOTREACHED();
+  return 0;
+}
+
+// static
+int SysInfo::NumberOfProcessors() {
+  return zx_system_get_num_cpus();
+}
+
+// static
+int64_t SysInfo::AmountOfVirtualMemory() {
+  return 0;
+}
+
+}  // namespace base
diff --git a/base/sys_info_internal.h b/base/sys_info_internal.h
new file mode 100644
index 0000000..2168e9f
--- /dev/null
+++ b/base/sys_info_internal.h
@@ -0,0 +1,34 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_SYS_INFO_INTERNAL_H_
+#define BASE_SYS_INFO_INTERNAL_H_
+
+#include "base/macros.h"
+
+namespace base {
+
+namespace internal {
+
+template<typename T, T (*F)(void)>
+class LazySysInfoValue {
+ public:
+  LazySysInfoValue()
+      : value_(F()) { }
+
+  ~LazySysInfoValue() = default;
+
+  T value() { return value_; }
+
+ private:
+  const T value_;
+
+  DISALLOW_COPY_AND_ASSIGN(LazySysInfoValue);
+};
+
+}  // namespace internal
+
+}  // namespace base
+
+#endif  // BASE_SYS_INFO_INTERNAL_H_
diff --git a/base/sys_info_ios.mm b/base/sys_info_ios.mm
new file mode 100644
index 0000000..60a7531
--- /dev/null
+++ b/base/sys_info_ios.mm
@@ -0,0 +1,130 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/sys_info.h"
+
+#include <mach/mach.h>
+#include <stddef.h>
+#include <stdint.h>
+#include <sys/sysctl.h>
+#include <sys/types.h>
+#import <UIKit/UIKit.h>
+
+#include "base/logging.h"
+#include "base/mac/scoped_mach_port.h"
+#include "base/mac/scoped_nsautorelease_pool.h"
+#include "base/macros.h"
+#include "base/process/process_metrics.h"
+#include "base/strings/sys_string_conversions.h"
+
+namespace base {
+
+namespace {
+
+// Queries sysctlbyname() for the given key and returns the value from the
+// system or the empty string on failure.
+std::string GetSysctlValue(const char* key_name) {
+  char value[256];
+  size_t len = arraysize(value);
+  if (sysctlbyname(key_name, &value, &len, nullptr, 0) == 0) {
+    DCHECK_GE(len, 1u);
+    DCHECK_EQ('\0', value[len - 1]);
+    return std::string(value, len - 1);
+  }
+  return std::string();
+}
+
+}  // namespace
+
+// static
+std::string SysInfo::OperatingSystemName() {
+  static dispatch_once_t get_system_name_once;
+  static std::string* system_name;
+  dispatch_once(&get_system_name_once, ^{
+      base::mac::ScopedNSAutoreleasePool pool;
+      system_name = new std::string(
+          SysNSStringToUTF8([[UIDevice currentDevice] systemName]));
+  });
+  // Examples of returned value: 'iPhone OS' on iPad 5.1.1
+  // and iPhone 5.1.1.
+  return *system_name;
+}
+
+// static
+std::string SysInfo::OperatingSystemVersion() {
+  static dispatch_once_t get_system_version_once;
+  static std::string* system_version;
+  dispatch_once(&get_system_version_once, ^{
+      base::mac::ScopedNSAutoreleasePool pool;
+      system_version = new std::string(
+          SysNSStringToUTF8([[UIDevice currentDevice] systemVersion]));
+  });
+  return *system_version;
+}
+
+// static
+void SysInfo::OperatingSystemVersionNumbers(int32_t* major_version,
+                                            int32_t* minor_version,
+                                            int32_t* bugfix_version) {
+  base::mac::ScopedNSAutoreleasePool pool;
+  std::string system_version = OperatingSystemVersion();
+  if (!system_version.empty()) {
+    // Try to parse out the version numbers from the string.
+    int num_read = sscanf(system_version.c_str(), "%d.%d.%d", major_version,
+                          minor_version, bugfix_version);
+    if (num_read < 1)
+      *major_version = 0;
+    if (num_read < 2)
+      *minor_version = 0;
+    if (num_read < 3)
+      *bugfix_version = 0;
+  }
+}
+
+// static
+int64_t SysInfo::AmountOfPhysicalMemoryImpl() {
+  struct host_basic_info hostinfo;
+  mach_msg_type_number_t count = HOST_BASIC_INFO_COUNT;
+  base::mac::ScopedMachSendRight host(mach_host_self());
+  int result = host_info(host.get(),
+                         HOST_BASIC_INFO,
+                         reinterpret_cast<host_info_t>(&hostinfo),
+                         &count);
+  if (result != KERN_SUCCESS) {
+    NOTREACHED();
+    return 0;
+  }
+  DCHECK_EQ(HOST_BASIC_INFO_COUNT, count);
+  return static_cast<int64_t>(hostinfo.max_mem);
+}
+
+// static
+int64_t SysInfo::AmountOfAvailablePhysicalMemoryImpl() {
+  SystemMemoryInfoKB info;
+  if (!GetSystemMemoryInfo(&info))
+    return 0;
+  // We should add inactive file-backed memory also but there is no such
+  // information from iOS unfortunately.
+  return static_cast<int64_t>(info.free + info.speculative) * 1024;
+}
+
+// static
+std::string SysInfo::CPUModelName() {
+  return GetSysctlValue("machdep.cpu.brand_string");
+}
+
+// static
+std::string SysInfo::HardwareModelName() {
+#if TARGET_OS_SIMULATOR
+  // On the simulator, "hw.machine" returns "i386" or "x86_64" which doesn't
+  // match the expected format, so supply a fake string here.
+  return "Simulator1,1";
+#else
+  // Note: This uses "hw.machine" instead of "hw.model" like the Mac code,
+  // because "hw.model" doesn't always return the right string on some devices.
+  return GetSysctlValue("hw.machine");
+#endif
+}
+
+}  // namespace base
diff --git a/base/sys_info_linux.cc b/base/sys_info_linux.cc
new file mode 100644
index 0000000..b1fecff
--- /dev/null
+++ b/base/sys_info_linux.cc
@@ -0,0 +1,94 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/sys_info.h"
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <limits>
+
+#include "base/files/file_util.h"
+#include "base/lazy_instance.h"
+#include "base/logging.h"
+#include "base/numerics/safe_conversions.h"
+#include "base/process/process_metrics.h"
+#include "base/strings/string_number_conversions.h"
+#include "base/sys_info_internal.h"
+#include "build/build_config.h"
+
+namespace {
+
+int64_t AmountOfMemory(int pages_name) {
+  long pages = sysconf(pages_name);
+  long page_size = sysconf(_SC_PAGESIZE);
+  if (pages == -1 || page_size == -1) {
+    NOTREACHED();
+    return 0;
+  }
+  return static_cast<int64_t>(pages) * page_size;
+}
+
+int64_t AmountOfPhysicalMemory() {
+  return AmountOfMemory(_SC_PHYS_PAGES);
+}
+
+base::LazyInstance<
+    base::internal::LazySysInfoValue<int64_t, AmountOfPhysicalMemory>>::Leaky
+    g_lazy_physical_memory = LAZY_INSTANCE_INITIALIZER;
+
+}  // namespace
+
+namespace base {
+
+// static
+int64_t SysInfo::AmountOfPhysicalMemoryImpl() {
+  return g_lazy_physical_memory.Get().value();
+}
+
+// static
+int64_t SysInfo::AmountOfAvailablePhysicalMemoryImpl() {
+  SystemMemoryInfoKB info;
+  if (!GetSystemMemoryInfo(&info))
+    return 0;
+  return AmountOfAvailablePhysicalMemory(info);
+}
+
+// static
+int64_t SysInfo::AmountOfAvailablePhysicalMemory(
+    const SystemMemoryInfoKB& info) {
+  // See details here:
+  // https://git.kernel.org/cgit/linux/kernel/git/torvalds/linux.git/commit/?id=34e431b0ae398fc54ea69ff85ec700722c9da773
+  // The fallback logic (when there is no MemAvailable) would be more precise
+  // if we had info about zones watermarks (/proc/zoneinfo).
+  int64_t res_kb = info.available != 0
+                       ? info.available - info.active_file
+                       : info.free + info.reclaimable + info.inactive_file;
+  return res_kb * 1024;
+}
+
+// static
+std::string SysInfo::CPUModelName() {
+#if defined(OS_CHROMEOS) && defined(ARCH_CPU_ARMEL)
+  const char kCpuModelPrefix[] = "Hardware";
+#else
+  const char kCpuModelPrefix[] = "model name";
+#endif
+  std::string contents;
+  ReadFileToString(FilePath("/proc/cpuinfo"), &contents);
+  DCHECK(!contents.empty());
+  if (!contents.empty()) {
+    std::istringstream iss(contents);
+    std::string line;
+    while (std::getline(iss, line)) {
+      if (line.compare(0, strlen(kCpuModelPrefix), kCpuModelPrefix) == 0) {
+        size_t pos = line.find(": ");
+        return line.substr(pos + 2);
+      }
+    }
+  }
+  return std::string();
+}
+
+}  // namespace base
diff --git a/base/sys_info_mac.mm b/base/sys_info_mac.mm
new file mode 100644
index 0000000..89bebb8
--- /dev/null
+++ b/base/sys_info_mac.mm
@@ -0,0 +1,108 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/sys_info.h"
+
+#include <ApplicationServices/ApplicationServices.h>
+#include <CoreServices/CoreServices.h>
+#import <Foundation/Foundation.h>
+#include <mach/mach_host.h>
+#include <mach/mach_init.h>
+#include <stddef.h>
+#include <stdint.h>
+#include <sys/sysctl.h>
+#include <sys/types.h>
+
+#include "base/logging.h"
+#include "base/mac/mac_util.h"
+#include "base/mac/scoped_mach_port.h"
+#import "base/mac/sdk_forward_declarations.h"
+#include "base/macros.h"
+#include "base/process/process_metrics.h"
+#include "base/strings/stringprintf.h"
+
+namespace base {
+
+namespace {
+
+// Queries sysctlbyname() for the given key and returns the value from the
+// system or the empty string on failure.
+std::string GetSysctlValue(const char* key_name) {
+  char value[256];
+  size_t len = arraysize(value);
+  if (sysctlbyname(key_name, &value, &len, nullptr, 0) == 0) {
+    DCHECK_GE(len, 1u);
+    DCHECK_EQ('\0', value[len - 1]);
+    return std::string(value, len - 1);
+  }
+  return std::string();
+}
+
+}  // namespace
+
+// static
+std::string SysInfo::OperatingSystemName() {
+  return "Mac OS X";
+}
+
+// static
+std::string SysInfo::OperatingSystemVersion() {
+  int32_t major, minor, bugfix;
+  OperatingSystemVersionNumbers(&major, &minor, &bugfix);
+  return base::StringPrintf("%d.%d.%d", major, minor, bugfix);
+}
+
+// static
+void SysInfo::OperatingSystemVersionNumbers(int32_t* major_version,
+                                            int32_t* minor_version,
+                                            int32_t* bugfix_version) {
+  if (@available(macOS 10.10, *)) {
+    NSOperatingSystemVersion version =
+        [[NSProcessInfo processInfo] operatingSystemVersion];
+    *major_version = version.majorVersion;
+    *minor_version = version.minorVersion;
+    *bugfix_version = version.patchVersion;
+  } else {
+    NOTREACHED();
+  }
+}
+
+// static
+int64_t SysInfo::AmountOfPhysicalMemoryImpl() {
+  struct host_basic_info hostinfo;
+  mach_msg_type_number_t count = HOST_BASIC_INFO_COUNT;
+  base::mac::ScopedMachSendRight host(mach_host_self());
+  int result = host_info(host.get(),
+                         HOST_BASIC_INFO,
+                         reinterpret_cast<host_info_t>(&hostinfo),
+                         &count);
+  if (result != KERN_SUCCESS) {
+    NOTREACHED();
+    return 0;
+  }
+  DCHECK_EQ(HOST_BASIC_INFO_COUNT, count);
+  return static_cast<int64_t>(hostinfo.max_mem);
+}
+
+// static
+int64_t SysInfo::AmountOfAvailablePhysicalMemoryImpl() {
+  SystemMemoryInfoKB info;
+  if (!GetSystemMemoryInfo(&info))
+    return 0;
+  // We should add inactive file-backed memory also but there is no such
+  // information from Mac OS unfortunately.
+  return static_cast<int64_t>(info.free + info.speculative) * 1024;
+}
+
+// static
+std::string SysInfo::CPUModelName() {
+  return GetSysctlValue("machdep.cpu.brand_string");
+}
+
+// static
+std::string SysInfo::HardwareModelName() {
+  return GetSysctlValue("hw.model");
+}
+
+}  // namespace base
diff --git a/base/sys_info_openbsd.cc b/base/sys_info_openbsd.cc
new file mode 100644
index 0000000..5a1ad56
--- /dev/null
+++ b/base/sys_info_openbsd.cc
@@ -0,0 +1,80 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/sys_info.h"
+
+#include <stddef.h>
+#include <stdint.h>
+#include <sys/param.h>
+#include <sys/shm.h>
+#include <sys/sysctl.h>
+
+#include "base/logging.h"
+#include "base/macros.h"
+
+namespace {
+
+int64_t AmountOfMemory(int pages_name) {
+  long pages = sysconf(pages_name);
+  long page_size = sysconf(_SC_PAGESIZE);
+  if (pages == -1 || page_size == -1) {
+    NOTREACHED();
+    return 0;
+  }
+  return static_cast<int64_t>(pages) * page_size;
+}
+
+}  // namespace
+
+namespace base {
+
+// static
+int SysInfo::NumberOfProcessors() {
+  int mib[] = { CTL_HW, HW_NCPU };
+  int ncpu;
+  size_t size = sizeof(ncpu);
+  if (sysctl(mib, arraysize(mib), &ncpu, &size, NULL, 0) < 0) {
+    NOTREACHED();
+    return 1;
+  }
+  return ncpu;
+}
+
+// static
+int64_t SysInfo::AmountOfPhysicalMemoryImpl() {
+  return AmountOfMemory(_SC_PHYS_PAGES);
+}
+
+// static
+int64_t SysInfo::AmountOfAvailablePhysicalMemoryImpl() {
+  // We should add inactive file-backed memory also but there is no such
+  // information from OpenBSD unfortunately.
+  return AmountOfMemory(_SC_AVPHYS_PAGES);
+}
+
+// static
+uint64_t SysInfo::MaxSharedMemorySize() {
+  int mib[] = { CTL_KERN, KERN_SHMINFO, KERN_SHMINFO_SHMMAX };
+  size_t limit;
+  size_t size = sizeof(limit);
+  if (sysctl(mib, arraysize(mib), &limit, &size, NULL, 0) < 0) {
+    NOTREACHED();
+    return 0;
+  }
+  return static_cast<uint64_t>(limit);
+}
+
+// static
+std::string SysInfo::CPUModelName() {
+  int mib[] = { CTL_HW, HW_MODEL };
+  char name[256];
+  size_t len = arraysize(name);
+  if (sysctl(mib, arraysize(mib), name, &len, NULL, 0) < 0) {
+    NOTREACHED();
+    return std::string();
+  }
+  return name;
+}
+
+}  // namespace base
diff --git a/base/sys_info_posix.cc b/base/sys_info_posix.cc
new file mode 100644
index 0000000..f6fcd10
--- /dev/null
+++ b/base/sys_info_posix.cc
@@ -0,0 +1,240 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/sys_info.h"
+
+#include <errno.h>
+#include <stddef.h>
+#include <stdint.h>
+#include <string.h>
+#include <sys/param.h>
+#include <sys/utsname.h>
+#include <unistd.h>
+
+#include "base/files/file_util.h"
+#include "base/lazy_instance.h"
+#include "base/logging.h"
+#include "base/strings/utf_string_conversions.h"
+#include "base/sys_info_internal.h"
+#include "base/threading/thread_restrictions.h"
+#include "build/build_config.h"
+
+#if !defined(OS_FUCHSIA)
+#include <sys/resource.h>
+#endif
+
+#if defined(OS_ANDROID)
+#include <sys/vfs.h>
+#define statvfs statfs  // Android uses a statvfs-like statfs struct and call.
+#else
+#include <sys/statvfs.h>
+#endif
+
+#if defined(OS_LINUX)
+#include <linux/magic.h>
+#include <sys/vfs.h>
+#endif
+
+namespace {
+
+#if !defined(OS_OPENBSD) && !defined(OS_FUCHSIA)
+int NumberOfProcessors() {
+  // sysconf returns the number of "logical" (not "physical") processors on both
+  // Mac and Linux.  So we get the number of max available "logical" processors.
+  //
+  // Note that the number of "currently online" processors may be fewer than the
+  // returned value of NumberOfProcessors(). On some platforms, the kernel may
+  // make some processors offline intermittently, to save power when system
+  // loading is low.
+  //
+  // One common use case that needs to know the processor count is to create
+  // optimal number of threads for optimization. It should make plan according
+  // to the number of "max available" processors instead of "currently online"
+  // ones. The kernel should be smart enough to make all processors online when
+  // it has sufficient number of threads waiting to run.
+  long res = sysconf(_SC_NPROCESSORS_CONF);
+  if (res == -1) {
+    NOTREACHED();
+    return 1;
+  }
+
+  return static_cast<int>(res);
+}
+
+base::LazyInstance<
+    base::internal::LazySysInfoValue<int, NumberOfProcessors> >::Leaky
+    g_lazy_number_of_processors = LAZY_INSTANCE_INITIALIZER;
+#endif  // !defined(OS_OPENBSD) && !defined(OS_FUCHSIA)
+
+#if !defined(OS_FUCHSIA)
+int64_t AmountOfVirtualMemory() {
+  struct rlimit limit;
+  int result = getrlimit(RLIMIT_DATA, &limit);
+  if (result != 0) {
+    NOTREACHED();
+    return 0;
+  }
+  return limit.rlim_cur == RLIM_INFINITY ? 0 : limit.rlim_cur;
+}
+
+base::LazyInstance<
+    base::internal::LazySysInfoValue<int64_t, AmountOfVirtualMemory>>::Leaky
+    g_lazy_virtual_memory = LAZY_INSTANCE_INITIALIZER;
+#endif  // !defined(OS_FUCHSIA)
+
+#if defined(OS_LINUX)
+bool IsStatsZeroIfUnlimited(const base::FilePath& path) {
+  struct statfs stats;
+
+  if (HANDLE_EINTR(statfs(path.value().c_str(), &stats)) != 0)
+    return false;
+
+  switch (stats.f_type) {
+    case TMPFS_MAGIC:
+    case HUGETLBFS_MAGIC:
+    case RAMFS_MAGIC:
+      return true;
+  }
+  return false;
+}
+#endif
+
+bool GetDiskSpaceInfo(const base::FilePath& path,
+                      int64_t* available_bytes,
+                      int64_t* total_bytes) {
+  struct statvfs stats;
+  if (HANDLE_EINTR(statvfs(path.value().c_str(), &stats)) != 0)
+    return false;
+
+#if defined(OS_LINUX)
+  const bool zero_size_means_unlimited =
+      stats.f_blocks == 0 && IsStatsZeroIfUnlimited(path);
+#else
+  const bool zero_size_means_unlimited = false;
+#endif
+
+  if (available_bytes) {
+    *available_bytes =
+        zero_size_means_unlimited
+            ? std::numeric_limits<int64_t>::max()
+            : static_cast<int64_t>(stats.f_bavail) * stats.f_frsize;
+  }
+
+  if (total_bytes) {
+    *total_bytes = zero_size_means_unlimited
+                       ? std::numeric_limits<int64_t>::max()
+                       : static_cast<int64_t>(stats.f_blocks) * stats.f_frsize;
+  }
+  return true;
+}
+
+}  // namespace
+
+namespace base {
+
+#if !defined(OS_OPENBSD) && !defined(OS_FUCHSIA)
+int SysInfo::NumberOfProcessors() {
+  return g_lazy_number_of_processors.Get().value();
+}
+#endif
+
+#if !defined(OS_FUCHSIA)
+// static
+int64_t SysInfo::AmountOfVirtualMemory() {
+  return g_lazy_virtual_memory.Get().value();
+}
+#endif
+
+// static
+int64_t SysInfo::AmountOfFreeDiskSpace(const FilePath& path) {
+  AssertBlockingAllowed();
+
+  int64_t available;
+  if (!GetDiskSpaceInfo(path, &available, nullptr))
+    return -1;
+  return available;
+}
+
+// static
+int64_t SysInfo::AmountOfTotalDiskSpace(const FilePath& path) {
+  AssertBlockingAllowed();
+
+  int64_t total;
+  if (!GetDiskSpaceInfo(path, nullptr, &total))
+    return -1;
+  return total;
+}
+
+#if !defined(OS_MACOSX) && !defined(OS_ANDROID)
+// static
+std::string SysInfo::OperatingSystemName() {
+  struct utsname info;
+  if (uname(&info) < 0) {
+    NOTREACHED();
+    return std::string();
+  }
+  return std::string(info.sysname);
+}
+#endif
+
+#if !defined(OS_MACOSX) && !defined(OS_ANDROID)
+// static
+std::string SysInfo::OperatingSystemVersion() {
+  struct utsname info;
+  if (uname(&info) < 0) {
+    NOTREACHED();
+    return std::string();
+  }
+  return std::string(info.release);
+}
+#endif
+
+#if !defined(OS_MACOSX) && !defined(OS_ANDROID) && !defined(OS_CHROMEOS)
+// static
+void SysInfo::OperatingSystemVersionNumbers(int32_t* major_version,
+                                            int32_t* minor_version,
+                                            int32_t* bugfix_version) {
+  struct utsname info;
+  if (uname(&info) < 0) {
+    NOTREACHED();
+    *major_version = 0;
+    *minor_version = 0;
+    *bugfix_version = 0;
+    return;
+  }
+  int num_read = sscanf(info.release, "%d.%d.%d", major_version, minor_version,
+                        bugfix_version);
+  if (num_read < 1)
+    *major_version = 0;
+  if (num_read < 2)
+    *minor_version = 0;
+  if (num_read < 3)
+    *bugfix_version = 0;
+}
+#endif
+
+// static
+std::string SysInfo::OperatingSystemArchitecture() {
+  struct utsname info;
+  if (uname(&info) < 0) {
+    NOTREACHED();
+    return std::string();
+  }
+  std::string arch(info.machine);
+  if (arch == "i386" || arch == "i486" || arch == "i586" || arch == "i686") {
+    arch = "x86";
+  } else if (arch == "amd64") {
+    arch = "x86_64";
+  } else if (std::string(info.sysname) == "AIX") {
+    arch = "ppc64";
+  }
+  return arch;
+}
+
+// static
+size_t SysInfo::VMAllocationGranularity() {
+  return getpagesize();
+}
+
+}  // namespace base
diff --git a/base/sys_info_unittest.cc b/base/sys_info_unittest.cc
new file mode 100644
index 0000000..e97ab57
--- /dev/null
+++ b/base/sys_info_unittest.cc
@@ -0,0 +1,212 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <stdint.h>
+
+#include "base/environment.h"
+#include "base/files/file_util.h"
+#include "base/process/process_metrics.h"
+#include "base/strings/string_number_conversions.h"
+#include "base/strings/string_split.h"
+#include "base/sys_info.h"
+#include "base/threading/platform_thread.h"
+#include "base/time/time.h"
+#include "build/build_config.h"
+#include "testing/gtest/include/gtest/gtest.h"
+#include "testing/platform_test.h"
+
+namespace base {
+
+using SysInfoTest = PlatformTest;
+
+TEST_F(SysInfoTest, NumProcs) {
+  // We aren't actually testing that it's correct, just that it's sane.
+  EXPECT_GE(SysInfo::NumberOfProcessors(), 1);
+}
+
+TEST_F(SysInfoTest, AmountOfMem) {
+  // We aren't actually testing that it's correct, just that it's sane.
+  EXPECT_GT(SysInfo::AmountOfPhysicalMemory(), 0);
+  EXPECT_GT(SysInfo::AmountOfPhysicalMemoryMB(), 0);
+  // The maxmimal amount of virtual memory can be zero which means unlimited.
+  EXPECT_GE(SysInfo::AmountOfVirtualMemory(), 0);
+}
+
+#if defined(OS_LINUX) || defined(OS_ANDROID)
+#if defined(OS_LINUX)
+#define MAYBE_AmountOfAvailablePhysicalMemory \
+  DISABLED_AmountOfAvailablePhysicalMemory
+#else
+#define MAYBE_AmountOfAvailablePhysicalMemory AmountOfAvailablePhysicalMemory
+#endif  // defined(OS_LINUX)
+TEST_F(SysInfoTest, MAYBE_AmountOfAvailablePhysicalMemory) {
+  // Note: info is in _K_bytes.
+  SystemMemoryInfoKB info;
+  ASSERT_TRUE(GetSystemMemoryInfo(&info));
+  EXPECT_GT(info.free, 0);
+
+  if (info.available != 0) {
+    // If there is MemAvailable from kernel.
+    EXPECT_LT(info.available, info.total);
+    const int64_t amount = SysInfo::AmountOfAvailablePhysicalMemory(info);
+    // We aren't actually testing that it's correct, just that it's sane.
+    EXPECT_GT(amount, static_cast<int64_t>(info.free) * 1024);
+    EXPECT_LT(amount / 1024, info.available);
+    // Simulate as if there is no MemAvailable.
+    info.available = 0;
+  }
+
+  // There is no MemAvailable. Check the fallback logic.
+  const int64_t amount = SysInfo::AmountOfAvailablePhysicalMemory(info);
+  // We aren't actually testing that it's correct, just that it's sane.
+  EXPECT_GT(amount, static_cast<int64_t>(info.free) * 1024);
+  EXPECT_LT(amount / 1024, info.total);
+}
+#endif  // defined(OS_LINUX) || defined(OS_ANDROID)
+
+TEST_F(SysInfoTest, AmountOfFreeDiskSpace) {
+  // We aren't actually testing that it's correct, just that it's sane.
+  FilePath tmp_path;
+  ASSERT_TRUE(GetTempDir(&tmp_path));
+  EXPECT_GE(SysInfo::AmountOfFreeDiskSpace(tmp_path), 0) << tmp_path.value();
+}
+
+TEST_F(SysInfoTest, AmountOfTotalDiskSpace) {
+  // We aren't actually testing that it's correct, just that it's sane.
+  FilePath tmp_path;
+  ASSERT_TRUE(GetTempDir(&tmp_path));
+  EXPECT_GT(SysInfo::AmountOfTotalDiskSpace(tmp_path), 0) << tmp_path.value();
+}
+
+#if defined(OS_WIN) || defined(OS_MACOSX) || defined(OS_LINUX)
+TEST_F(SysInfoTest, OperatingSystemVersionNumbers) {
+  int32_t os_major_version = -1;
+  int32_t os_minor_version = -1;
+  int32_t os_bugfix_version = -1;
+  SysInfo::OperatingSystemVersionNumbers(&os_major_version,
+                                         &os_minor_version,
+                                         &os_bugfix_version);
+  EXPECT_GT(os_major_version, -1);
+  EXPECT_GT(os_minor_version, -1);
+  EXPECT_GT(os_bugfix_version, -1);
+}
+#endif
+
+TEST_F(SysInfoTest, Uptime) {
+  TimeDelta up_time_1 = SysInfo::Uptime();
+  // UpTime() is implemented internally using TimeTicks::Now(), which documents
+  // system resolution as being 1-15ms. Sleep a little longer than that.
+  PlatformThread::Sleep(TimeDelta::FromMilliseconds(20));
+  TimeDelta up_time_2 = SysInfo::Uptime();
+  EXPECT_GT(up_time_1.InMicroseconds(), 0);
+  EXPECT_GT(up_time_2.InMicroseconds(), up_time_1.InMicroseconds());
+}
+
+#if defined(OS_MACOSX)
+TEST_F(SysInfoTest, HardwareModelNameFormatMacAndiOS) {
+  std::string hardware_model = SysInfo::HardwareModelName();
+  ASSERT_FALSE(hardware_model.empty());
+  // Check that the model is of the expected format "Foo,Bar" where "Bar" is
+  // a number.
+  std::vector<StringPiece> pieces =
+      SplitStringPiece(hardware_model, ",", KEEP_WHITESPACE, SPLIT_WANT_ALL);
+  ASSERT_EQ(2u, pieces.size()) << hardware_model;
+  int value;
+  EXPECT_TRUE(StringToInt(pieces[1], &value)) << hardware_model;
+}
+#endif
+
+#if defined(OS_CHROMEOS)
+
+TEST_F(SysInfoTest, GoogleChromeOSVersionNumbers) {
+  int32_t os_major_version = -1;
+  int32_t os_minor_version = -1;
+  int32_t os_bugfix_version = -1;
+  const char kLsbRelease[] =
+      "FOO=1234123.34.5\n"
+      "CHROMEOS_RELEASE_VERSION=1.2.3.4\n";
+  SysInfo::SetChromeOSVersionInfoForTest(kLsbRelease, Time());
+  SysInfo::OperatingSystemVersionNumbers(&os_major_version,
+                                         &os_minor_version,
+                                         &os_bugfix_version);
+  EXPECT_EQ(1, os_major_version);
+  EXPECT_EQ(2, os_minor_version);
+  EXPECT_EQ(3, os_bugfix_version);
+}
+
+TEST_F(SysInfoTest, GoogleChromeOSVersionNumbersFirst) {
+  int32_t os_major_version = -1;
+  int32_t os_minor_version = -1;
+  int32_t os_bugfix_version = -1;
+  const char kLsbRelease[] =
+      "CHROMEOS_RELEASE_VERSION=1.2.3.4\n"
+      "FOO=1234123.34.5\n";
+  SysInfo::SetChromeOSVersionInfoForTest(kLsbRelease, Time());
+  SysInfo::OperatingSystemVersionNumbers(&os_major_version,
+                                         &os_minor_version,
+                                         &os_bugfix_version);
+  EXPECT_EQ(1, os_major_version);
+  EXPECT_EQ(2, os_minor_version);
+  EXPECT_EQ(3, os_bugfix_version);
+}
+
+TEST_F(SysInfoTest, GoogleChromeOSNoVersionNumbers) {
+  int32_t os_major_version = -1;
+  int32_t os_minor_version = -1;
+  int32_t os_bugfix_version = -1;
+  const char kLsbRelease[] = "FOO=1234123.34.5\n";
+  SysInfo::SetChromeOSVersionInfoForTest(kLsbRelease, Time());
+  SysInfo::OperatingSystemVersionNumbers(&os_major_version,
+                                         &os_minor_version,
+                                         &os_bugfix_version);
+  EXPECT_EQ(0, os_major_version);
+  EXPECT_EQ(0, os_minor_version);
+  EXPECT_EQ(0, os_bugfix_version);
+}
+
+TEST_F(SysInfoTest, GoogleChromeOSLsbReleaseTime) {
+  const char kLsbRelease[] = "CHROMEOS_RELEASE_VERSION=1.2.3.4";
+  // Use a fake time that can be safely displayed as a string.
+  const Time lsb_release_time(Time::FromDoubleT(12345.6));
+  SysInfo::SetChromeOSVersionInfoForTest(kLsbRelease, lsb_release_time);
+  Time parsed_lsb_release_time = SysInfo::GetLsbReleaseTime();
+  EXPECT_DOUBLE_EQ(lsb_release_time.ToDoubleT(),
+                   parsed_lsb_release_time.ToDoubleT());
+}
+
+TEST_F(SysInfoTest, IsRunningOnChromeOS) {
+  SysInfo::SetChromeOSVersionInfoForTest("", Time());
+  EXPECT_FALSE(SysInfo::IsRunningOnChromeOS());
+
+  const char kLsbRelease1[] =
+      "CHROMEOS_RELEASE_NAME=Non Chrome OS\n"
+      "CHROMEOS_RELEASE_VERSION=1.2.3.4\n";
+  SysInfo::SetChromeOSVersionInfoForTest(kLsbRelease1, Time());
+  EXPECT_FALSE(SysInfo::IsRunningOnChromeOS());
+
+  const char kLsbRelease2[] =
+      "CHROMEOS_RELEASE_NAME=Chrome OS\n"
+      "CHROMEOS_RELEASE_VERSION=1.2.3.4\n";
+  SysInfo::SetChromeOSVersionInfoForTest(kLsbRelease2, Time());
+  EXPECT_TRUE(SysInfo::IsRunningOnChromeOS());
+
+  const char kLsbRelease3[] =
+      "CHROMEOS_RELEASE_NAME=Chromium OS\n";
+  SysInfo::SetChromeOSVersionInfoForTest(kLsbRelease3, Time());
+  EXPECT_TRUE(SysInfo::IsRunningOnChromeOS());
+}
+
+TEST_F(SysInfoTest, GetStrippedReleaseBoard) {
+  const char* kLsbRelease1 = "CHROMEOS_RELEASE_BOARD=Glimmer\n";
+  SysInfo::SetChromeOSVersionInfoForTest(kLsbRelease1, Time());
+  EXPECT_EQ("glimmer", SysInfo::GetStrippedReleaseBoard());
+
+  const char* kLsbRelease2 = "CHROMEOS_RELEASE_BOARD=glimmer-signed-mp-v4keys";
+  SysInfo::SetChromeOSVersionInfoForTest(kLsbRelease2, Time());
+  EXPECT_EQ("glimmer", SysInfo::GetStrippedReleaseBoard());
+}
+
+#endif  // OS_CHROMEOS
+
+}  // namespace base
diff --git a/base/sys_info_win.cc b/base/sys_info_win.cc
new file mode 100644
index 0000000..0945549
--- /dev/null
+++ b/base/sys_info_win.cc
@@ -0,0 +1,163 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/sys_info.h"
+
+#include <windows.h>
+#include <stddef.h>
+#include <stdint.h>
+
+#include <limits>
+
+#include "base/files/file_path.h"
+#include "base/logging.h"
+#include "base/process/process_metrics.h"
+#include "base/strings/stringprintf.h"
+#include "base/threading/thread_restrictions.h"
+#include "base/win/windows_version.h"
+
+namespace {
+
+int64_t AmountOfMemory(DWORDLONG MEMORYSTATUSEX::*memory_field) {
+  MEMORYSTATUSEX memory_info;
+  memory_info.dwLength = sizeof(memory_info);
+  if (!GlobalMemoryStatusEx(&memory_info)) {
+    NOTREACHED();
+    return 0;
+  }
+
+  int64_t rv = static_cast<int64_t>(memory_info.*memory_field);
+  return rv < 0 ? std::numeric_limits<int64_t>::max() : rv;
+}
+
+bool GetDiskSpaceInfo(const base::FilePath& path,
+                      int64_t* available_bytes,
+                      int64_t* total_bytes) {
+  ULARGE_INTEGER available;
+  ULARGE_INTEGER total;
+  ULARGE_INTEGER free;
+  if (!GetDiskFreeSpaceExW(path.value().c_str(), &available, &total, &free))
+    return false;
+
+  if (available_bytes) {
+    *available_bytes = static_cast<int64_t>(available.QuadPart);
+    if (*available_bytes < 0)
+      *available_bytes = std::numeric_limits<int64_t>::max();
+  }
+  if (total_bytes) {
+    *total_bytes = static_cast<int64_t>(total.QuadPart);
+    if (*total_bytes < 0)
+      *total_bytes = std::numeric_limits<int64_t>::max();
+  }
+  return true;
+}
+
+}  // namespace
+
+namespace base {
+
+// static
+int SysInfo::NumberOfProcessors() {
+  return win::OSInfo::GetInstance()->processors();
+}
+
+// static
+int64_t SysInfo::AmountOfPhysicalMemoryImpl() {
+  return AmountOfMemory(&MEMORYSTATUSEX::ullTotalPhys);
+}
+
+// static
+int64_t SysInfo::AmountOfAvailablePhysicalMemoryImpl() {
+  SystemMemoryInfoKB info;
+  if (!GetSystemMemoryInfo(&info))
+    return 0;
+  return static_cast<int64_t>(info.avail_phys) * 1024;
+}
+
+// static
+int64_t SysInfo::AmountOfVirtualMemory() {
+  return AmountOfMemory(&MEMORYSTATUSEX::ullTotalVirtual);
+}
+
+// static
+int64_t SysInfo::AmountOfFreeDiskSpace(const FilePath& path) {
+  AssertBlockingAllowed();
+
+  int64_t available;
+  if (!GetDiskSpaceInfo(path, &available, nullptr))
+    return -1;
+  return available;
+}
+
+// static
+int64_t SysInfo::AmountOfTotalDiskSpace(const FilePath& path) {
+  AssertBlockingAllowed();
+
+  int64_t total;
+  if (!GetDiskSpaceInfo(path, nullptr, &total))
+    return -1;
+  return total;
+}
+
+std::string SysInfo::OperatingSystemName() {
+  return "Windows NT";
+}
+
+// static
+std::string SysInfo::OperatingSystemVersion() {
+  win::OSInfo* os_info = win::OSInfo::GetInstance();
+  win::OSInfo::VersionNumber version_number = os_info->version_number();
+  std::string version(StringPrintf("%d.%d.%d", version_number.major,
+                                   version_number.minor,
+                                   version_number.build));
+  win::OSInfo::ServicePack service_pack = os_info->service_pack();
+  if (service_pack.major != 0) {
+    version += StringPrintf(" SP%d", service_pack.major);
+    if (service_pack.minor != 0)
+      version += StringPrintf(".%d", service_pack.minor);
+  }
+  return version;
+}
+
+// TODO: Implement OperatingSystemVersionComplete, which would include
+// patchlevel/service pack number.
+// See chrome/browser/feedback/feedback_util.h, FeedbackUtil::SetOSVersion.
+
+// static
+std::string SysInfo::OperatingSystemArchitecture() {
+  win::OSInfo::WindowsArchitecture arch =
+      win::OSInfo::GetInstance()->architecture();
+  switch (arch) {
+    case win::OSInfo::X86_ARCHITECTURE:
+      return "x86";
+    case win::OSInfo::X64_ARCHITECTURE:
+      return "x86_64";
+    case win::OSInfo::IA64_ARCHITECTURE:
+      return "ia64";
+    default:
+      return "";
+  }
+}
+
+// static
+std::string SysInfo::CPUModelName() {
+  return win::OSInfo::GetInstance()->processor_model_name();
+}
+
+// static
+size_t SysInfo::VMAllocationGranularity() {
+  return win::OSInfo::GetInstance()->allocation_granularity();
+}
+
+// static
+void SysInfo::OperatingSystemVersionNumbers(int32_t* major_version,
+                                            int32_t* minor_version,
+                                            int32_t* bugfix_version) {
+  win::OSInfo* os_info = win::OSInfo::GetInstance();
+  *major_version = os_info->version_number().major;
+  *minor_version = os_info->version_number().minor;
+  *bugfix_version = 0;
+}
+
+}  // namespace base
diff --git a/base/syslog_logging.cc b/base/syslog_logging.cc
new file mode 100644
index 0000000..03c2b5e
--- /dev/null
+++ b/base/syslog_logging.cc
@@ -0,0 +1,118 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/syslog_logging.h"
+
+#if defined(OS_WIN)
+#include <windows.h>
+#include "base/bind.h"
+#include "base/callback_helpers.h"
+#include "base/debug/stack_trace.h"
+#elif defined(OS_LINUX)
+// <syslog.h> defines a LOG_WARNING macro that could conflict with
+// base::LOG_WARNING.
+#include <syslog.h>
+#undef LOG_WARNING
+#endif
+
+#include <ostream>
+#include <string>
+
+namespace logging {
+
+#if defined(OS_WIN)
+
+namespace {
+
+std::string* g_event_source_name = nullptr;
+uint16_t g_category = 0;
+uint32_t g_event_id = 0;
+
+}  // namespace
+
+void SetEventSource(const std::string& name,
+                    uint16_t category,
+                    uint32_t event_id) {
+  DCHECK_EQ(nullptr, g_event_source_name);
+  g_event_source_name = new std::string(name);
+  g_category = category;
+  g_event_id = event_id;
+}
+
+#endif  // defined(OS_WIN)
+
+EventLogMessage::EventLogMessage(const char* file,
+                                 int line,
+                                 LogSeverity severity)
+    : log_message_(file, line, severity) {
+}
+
+EventLogMessage::~EventLogMessage() {
+#if defined(OS_WIN)
+  // If g_event_source_name is nullptr (which it is per default) SYSLOG will
+  // degrade gracefully to regular LOG. If you see this happening most probably
+  // you are using SYSLOG before you called SetEventSourceName.
+  if (g_event_source_name == nullptr)
+    return;
+
+  HANDLE event_log_handle =
+      RegisterEventSourceA(nullptr, g_event_source_name->c_str());
+  if (event_log_handle == nullptr) {
+    stream() << " !!NOT ADDED TO EVENTLOG!!";
+    return;
+  }
+
+  base::ScopedClosureRunner auto_deregister(
+      base::Bind(base::IgnoreResult(&DeregisterEventSource), event_log_handle));
+  std::string message(log_message_.str());
+  WORD log_type = EVENTLOG_ERROR_TYPE;
+  switch (log_message_.severity()) {
+    case LOG_INFO:
+      log_type = EVENTLOG_INFORMATION_TYPE;
+      break;
+    case LOG_WARNING:
+      log_type = EVENTLOG_WARNING_TYPE;
+      break;
+    case LOG_ERROR:
+    case LOG_FATAL:
+      // The price of getting the stack trace is not worth the hassle for
+      // non-error conditions.
+      base::debug::StackTrace trace;
+      message.append(trace.ToString());
+      log_type = EVENTLOG_ERROR_TYPE;
+      break;
+  }
+  LPCSTR strings[1] = {message.data()};
+  if (!ReportEventA(event_log_handle, log_type, g_category, g_event_id, nullptr,
+                    1, 0, strings, nullptr)) {
+    stream() << " !!NOT ADDED TO EVENTLOG!!";
+  }
+#elif defined(OS_LINUX)
+  const char kEventSource[] = "chrome";
+  openlog(kEventSource, LOG_NOWAIT | LOG_PID, LOG_USER);
+  // We can't use the defined names for the logging severity from syslog.h
+  // because they collide with the names of our own severity levels. Therefore
+  // we use the actual values which of course do not match ours.
+  // See sys/syslog.h for reference.
+  int priority = 3;
+  switch (log_message_.severity()) {
+    case LOG_INFO:
+      priority = 6;
+      break;
+    case LOG_WARNING:
+      priority = 4;
+      break;
+    case LOG_ERROR:
+      priority = 3;
+      break;
+    case LOG_FATAL:
+      priority = 2;
+      break;
+  }
+  syslog(priority, "%s", log_message_.str().c_str());
+  closelog();
+#endif  // defined(OS_WIN)
+}
+
+}  // namespace logging
diff --git a/base/syslog_logging.h b/base/syslog_logging.h
new file mode 100644
index 0000000..736a5b2
--- /dev/null
+++ b/base/syslog_logging.h
@@ -0,0 +1,50 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_SYSLOG_LOGGING_H_
+#define BASE_SYSLOG_LOGGING_H_
+
+#include <iosfwd>
+
+#include "base/logging.h"
+#include "build/build_config.h"
+
+namespace logging {
+
+// Keep in mind that the syslog is always active regardless of the logging level
+// and applied flags. Use only for important information that a system
+// administrator might need to maintain the browser installation.
+#define SYSLOG_STREAM(severity) \
+  COMPACT_GOOGLE_LOG_EX_ ## severity(EventLogMessage).stream()
+#define SYSLOG(severity) \
+  SYSLOG_STREAM(severity)
+
+#if defined(OS_WIN)
+// Sets the name, category and event id of the event source for logging to the
+// Windows Event Log. Call this function once before using the SYSLOG macro or
+// otherwise it will behave as a regular LOG macro.
+void BASE_EXPORT SetEventSource(const std::string& name,
+                                uint16_t category,
+                                uint32_t event_id);
+#endif  // defined(OS_WIN)
+
+// Creates a formatted message on the system event log. That would be the
+// Application Event log on Windows and the messages log file on POSIX systems.
+class BASE_EXPORT EventLogMessage {
+ public:
+  EventLogMessage(const char* file, int line, LogSeverity severity);
+
+  ~EventLogMessage();
+
+  std::ostream& stream() { return log_message_.stream(); }
+
+ private:
+  LogMessage log_message_;
+
+  DISALLOW_COPY_AND_ASSIGN(EventLogMessage);
+};
+
+}  // namespace logging
+
+#endif  // BASE_SYSLOG_LOGGING_H_
diff --git a/base/system_monitor/system_monitor.cc b/base/system_monitor/system_monitor.cc
new file mode 100644
index 0000000..71e4f07
--- /dev/null
+++ b/base/system_monitor/system_monitor.cc
@@ -0,0 +1,51 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/system_monitor/system_monitor.h"
+
+#include <utility>
+
+#include "base/logging.h"
+#include "base/time/time.h"
+
+namespace base {
+
+static SystemMonitor* g_system_monitor = nullptr;
+
+SystemMonitor::SystemMonitor()
+    :  devices_changed_observer_list_(
+          new ObserverListThreadSafe<DevicesChangedObserver>()) {
+  DCHECK(!g_system_monitor);
+  g_system_monitor = this;
+}
+
+SystemMonitor::~SystemMonitor() {
+  DCHECK_EQ(this, g_system_monitor);
+  g_system_monitor = nullptr;
+}
+
+// static
+SystemMonitor* SystemMonitor::Get() {
+  return g_system_monitor;
+}
+
+void SystemMonitor::ProcessDevicesChanged(DeviceType device_type) {
+  NotifyDevicesChanged(device_type);
+}
+
+void SystemMonitor::AddDevicesChangedObserver(DevicesChangedObserver* obs) {
+  devices_changed_observer_list_->AddObserver(obs);
+}
+
+void SystemMonitor::RemoveDevicesChangedObserver(DevicesChangedObserver* obs) {
+  devices_changed_observer_list_->RemoveObserver(obs);
+}
+
+void SystemMonitor::NotifyDevicesChanged(DeviceType device_type) {
+  DVLOG(1) << "DevicesChanged with device type " << device_type;
+  devices_changed_observer_list_->Notify(
+      FROM_HERE, &DevicesChangedObserver::OnDevicesChanged, device_type);
+}
+
+}  // namespace base
diff --git a/base/system_monitor/system_monitor.h b/base/system_monitor/system_monitor.h
new file mode 100644
index 0000000..7f21e47
--- /dev/null
+++ b/base/system_monitor/system_monitor.h
@@ -0,0 +1,75 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_SYSTEM_MONITOR_SYSTEM_MONITOR_H_
+#define BASE_SYSTEM_MONITOR_SYSTEM_MONITOR_H_
+
+#include "base/base_export.h"
+#include "base/macros.h"
+#include "base/memory/ref_counted.h"
+#include "base/observer_list_threadsafe.h"
+#include "build/build_config.h"
+
+namespace base {
+
+// Class for monitoring various system-related subsystems
+// such as power management, network status, etc.
+// TODO(mbelshe):  Add support beyond just power management.
+class BASE_EXPORT SystemMonitor {
+ public:
+  // Type of devices whose change need to be monitored, such as add/remove.
+  enum DeviceType {
+    DEVTYPE_AUDIO,          // Audio device, e.g., microphone.
+    DEVTYPE_VIDEO_CAPTURE,  // Video capture device, e.g., webcam.
+    DEVTYPE_UNKNOWN,        // Other devices.
+  };
+
+  // Create SystemMonitor. Only one SystemMonitor instance per application
+  // is allowed.
+  SystemMonitor();
+  ~SystemMonitor();
+
+  // Get the application-wide SystemMonitor (if not present, returns NULL).
+  static SystemMonitor* Get();
+
+  class BASE_EXPORT DevicesChangedObserver {
+   public:
+    // Notification that the devices connected to the system have changed.
+    // This is only implemented on Windows currently.
+    virtual void OnDevicesChanged(DeviceType device_type) {}
+
+   protected:
+    virtual ~DevicesChangedObserver() = default;
+  };
+
+  // Add a new observer.
+  // Can be called from any thread.
+  // Must not be called from within a notification callback.
+  void AddDevicesChangedObserver(DevicesChangedObserver* obs);
+
+  // Remove an existing observer.
+  // Can be called from any thread.
+  // Must not be called from within a notification callback.
+  void RemoveDevicesChangedObserver(DevicesChangedObserver* obs);
+
+  // The ProcessFoo() style methods are a broken pattern and should not
+  // be copied. Any significant addition to this class is blocked on
+  // refactoring to improve the state of affairs. See http://crbug.com/149059
+
+  // Cross-platform handling of a device change event.
+  void ProcessDevicesChanged(DeviceType device_type);
+
+ private:
+  // Functions to trigger notifications.
+  void NotifyDevicesChanged(DeviceType device_type);
+
+  scoped_refptr<ObserverListThreadSafe<DevicesChangedObserver> >
+      devices_changed_observer_list_;
+
+  DISALLOW_COPY_AND_ASSIGN(SystemMonitor);
+};
+
+}  // namespace base
+
+#endif  // BASE_SYSTEM_MONITOR_SYSTEM_MONITOR_H_
diff --git a/base/system_monitor/system_monitor_unittest.cc b/base/system_monitor/system_monitor_unittest.cc
new file mode 100644
index 0000000..8963f7b
--- /dev/null
+++ b/base/system_monitor/system_monitor_unittest.cc
@@ -0,0 +1,55 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/system_monitor/system_monitor.h"
+
+#include "base/macros.h"
+#include "base/message_loop/message_loop.h"
+#include "base/run_loop.h"
+#include "base/test/mock_devices_changed_observer.h"
+#include "testing/gmock/include/gmock/gmock.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+
+namespace {
+
+class SystemMonitorTest : public testing::Test {
+ protected:
+  SystemMonitorTest() {
+    system_monitor_.reset(new SystemMonitor);
+  }
+
+  MessageLoop message_loop_;
+  std::unique_ptr<SystemMonitor> system_monitor_;
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(SystemMonitorTest);
+};
+
+TEST_F(SystemMonitorTest, DeviceChangeNotifications) {
+  const int kObservers = 5;
+
+  testing::Sequence mock_sequencer[kObservers];
+  MockDevicesChangedObserver observers[kObservers];
+  for (int index = 0; index < kObservers; ++index) {
+    system_monitor_->AddDevicesChangedObserver(&observers[index]);
+
+    EXPECT_CALL(observers[index],
+                OnDevicesChanged(SystemMonitor::DEVTYPE_UNKNOWN))
+        .Times(3)
+        .InSequence(mock_sequencer[index]);
+  }
+
+  system_monitor_->ProcessDevicesChanged(SystemMonitor::DEVTYPE_UNKNOWN);
+  RunLoop().RunUntilIdle();
+
+  system_monitor_->ProcessDevicesChanged(SystemMonitor::DEVTYPE_UNKNOWN);
+  system_monitor_->ProcessDevicesChanged(SystemMonitor::DEVTYPE_UNKNOWN);
+  RunLoop().RunUntilIdle();
+}
+
+}  // namespace
+
+}  // namespace base
diff --git a/base/task/OWNERS b/base/task/OWNERS
new file mode 100644
index 0000000..0f3ad5e
--- /dev/null
+++ b/base/task/OWNERS
@@ -0,0 +1,6 @@
+fdoray@chromium.org
+gab@chromium.org
+robliao@chromium.org
+
+# TEAM: scheduler-dev@chromium.org
+# COMPONENT: Internals>TaskScheduler
diff --git a/base/task/README.md b/base/task/README.md
new file mode 100644
index 0000000..0db116a
--- /dev/null
+++ b/base/task/README.md
@@ -0,0 +1,12 @@
+This directory has the following layout (WIP):
+- base/task/: public APIs for posting tasks and managing task queues.
+- base/task/task_scheduler/: implementation of the TaskScheduler.
+- base/task/sequence_manager/: implementation of the SequenceManager.
+
+Apart from embedders explicitly managing a TaskScheduler and/or SequenceManager
+instance(s) for their process/threads, the vast majority of users should only
+need APIs in base/task/.
+
+Documentation:
+- [Threading and tasks](https://chromium.googlesource.com/chromium/src/+/lkcr/docs/threading_and_tasks.md)
+- [Callbacks](https://chromium.googlesource.com/chromium/src/+/lkcr/docs/callback.md)
diff --git a/base/task/cancelable_task_tracker.cc b/base/task/cancelable_task_tracker.cc
new file mode 100644
index 0000000..f304da8
--- /dev/null
+++ b/base/task/cancelable_task_tracker.cc
@@ -0,0 +1,176 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/task/cancelable_task_tracker.h"
+
+#include <stddef.h>
+
+#include <utility>
+
+#include "base/bind.h"
+#include "base/bind_helpers.h"
+#include "base/callback_helpers.h"
+#include "base/location.h"
+#include "base/memory/ref_counted.h"
+#include "base/synchronization/cancellation_flag.h"
+#include "base/task_runner.h"
+#include "base/threading/sequenced_task_runner_handle.h"
+
+namespace base {
+
+namespace {
+
+void RunIfNotCanceled(const CancellationFlag* flag, OnceClosure task) {
+  if (!flag->IsSet())
+    std::move(task).Run();
+}
+
+void RunIfNotCanceledThenUntrack(const CancellationFlag* flag,
+                                 OnceClosure task,
+                                 OnceClosure untrack) {
+  RunIfNotCanceled(flag, std::move(task));
+  std::move(untrack).Run();
+}
+
+bool IsCanceled(const CancellationFlag* flag,
+                ScopedClosureRunner* cleanup_runner) {
+  return flag->IsSet();
+}
+
+void RunAndDeleteFlag(OnceClosure closure, const CancellationFlag* flag) {
+  std::move(closure).Run();
+  delete flag;
+}
+
+void RunOrPostToTaskRunner(TaskRunner* task_runner, OnceClosure closure) {
+  if (task_runner->RunsTasksInCurrentSequence())
+    std::move(closure).Run();
+  else
+    task_runner->PostTask(FROM_HERE, std::move(closure));
+}
+
+}  // namespace
+
+// static
+const CancelableTaskTracker::TaskId CancelableTaskTracker::kBadTaskId = 0;
+
+CancelableTaskTracker::CancelableTaskTracker()
+    : next_id_(1),weak_factory_(this) {}
+
+CancelableTaskTracker::~CancelableTaskTracker() {
+  DCHECK(sequence_checker_.CalledOnValidSequence());
+
+  TryCancelAll();
+}
+
+CancelableTaskTracker::TaskId CancelableTaskTracker::PostTask(
+    TaskRunner* task_runner,
+    const Location& from_here,
+    OnceClosure task) {
+  DCHECK(sequence_checker_.CalledOnValidSequence());
+
+  return PostTaskAndReply(task_runner, from_here, std::move(task), DoNothing());
+}
+
+CancelableTaskTracker::TaskId CancelableTaskTracker::PostTaskAndReply(
+    TaskRunner* task_runner,
+    const Location& from_here,
+    OnceClosure task,
+    OnceClosure reply) {
+  DCHECK(sequence_checker_.CalledOnValidSequence());
+
+  // We need a SequencedTaskRunnerHandle to run |reply|.
+  DCHECK(SequencedTaskRunnerHandle::IsSet());
+
+  // Owned by reply callback below.
+  CancellationFlag* flag = new CancellationFlag();
+
+  TaskId id = next_id_;
+  next_id_++;  // int64_t is big enough that we ignore the potential overflow.
+
+  OnceClosure untrack_closure =
+      BindOnce(&CancelableTaskTracker::Untrack, weak_factory_.GetWeakPtr(), id);
+  bool success = task_runner->PostTaskAndReply(
+      from_here, BindOnce(&RunIfNotCanceled, flag, std::move(task)),
+      BindOnce(&RunIfNotCanceledThenUntrack, Owned(flag), std::move(reply),
+               std::move(untrack_closure)));
+
+  if (!success)
+    return kBadTaskId;
+
+  Track(id, flag);
+  return id;
+}
+
+CancelableTaskTracker::TaskId CancelableTaskTracker::NewTrackedTaskId(
+    IsCanceledCallback* is_canceled_cb) {
+  DCHECK(sequence_checker_.CalledOnValidSequence());
+  DCHECK(SequencedTaskRunnerHandle::IsSet());
+
+  TaskId id = next_id_;
+  next_id_++;  // int64_t is big enough that we ignore the potential overflow.
+
+  // Will be deleted by |untrack_and_delete_flag| after Untrack().
+  CancellationFlag* flag = new CancellationFlag();
+
+  OnceClosure untrack_and_delete_flag = BindOnce(
+      &RunAndDeleteFlag,
+      BindOnce(&CancelableTaskTracker::Untrack, weak_factory_.GetWeakPtr(), id),
+      flag);
+
+  // Will always run |untrack_and_delete_flag| on current sequence.
+  ScopedClosureRunner* untrack_and_delete_flag_runner =
+      new ScopedClosureRunner(BindOnce(
+          &RunOrPostToTaskRunner, RetainedRef(SequencedTaskRunnerHandle::Get()),
+          std::move(untrack_and_delete_flag)));
+
+  *is_canceled_cb =
+      Bind(&IsCanceled, flag, Owned(untrack_and_delete_flag_runner));
+
+  Track(id, flag);
+  return id;
+}
+
+void CancelableTaskTracker::TryCancel(TaskId id) {
+  DCHECK(sequence_checker_.CalledOnValidSequence());
+
+  const auto it = task_flags_.find(id);
+  if (it == task_flags_.end()) {
+    // Two possibilities:
+    //
+    //   1. The task has already been untracked.
+    //   2. The TaskId is bad or unknown.
+    //
+    // Since this function is best-effort, it's OK to ignore these.
+    return;
+  }
+  it->second->Set();
+}
+
+void CancelableTaskTracker::TryCancelAll() {
+  DCHECK(sequence_checker_.CalledOnValidSequence());
+  for (const auto& it : task_flags_)
+    it.second->Set();
+  weak_factory_.InvalidateWeakPtrs();
+  task_flags_.clear();
+}
+
+bool CancelableTaskTracker::HasTrackedTasks() const {
+  DCHECK(sequence_checker_.CalledOnValidSequence());
+  return !task_flags_.empty();
+}
+
+void CancelableTaskTracker::Track(TaskId id, CancellationFlag* flag) {
+  DCHECK(sequence_checker_.CalledOnValidSequence());
+  bool success = task_flags_.insert(std::make_pair(id, flag)).second;
+  DCHECK(success);
+}
+
+void CancelableTaskTracker::Untrack(TaskId id) {
+  DCHECK(sequence_checker_.CalledOnValidSequence());
+  size_t num = task_flags_.erase(id);
+  DCHECK_EQ(1u, num);
+}
+
+}  // namespace base
diff --git a/base/task/cancelable_task_tracker.h b/base/task/cancelable_task_tracker.h
new file mode 100644
index 0000000..e5e6b5e
--- /dev/null
+++ b/base/task/cancelable_task_tracker.h
@@ -0,0 +1,158 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// CancelableTaskTracker posts tasks (in the form of a Closure) to a
+// TaskRunner, and is able to cancel the task later if it's not needed
+// anymore.  On destruction, CancelableTaskTracker will cancel all
+// tracked tasks.
+//
+// Each cancelable task can be associated with a reply (also a Closure). After
+// the task is run on the TaskRunner, |reply| will be posted back to
+// originating TaskRunner.
+//
+// NOTE:
+//
+// CancelableCallback (base/cancelable_callback.h) and WeakPtr binding are
+// preferred solutions for canceling a task. However, they don't support
+// cancelation from another sequence. This is sometimes a performance critical
+// requirement. E.g. We need to cancel database lookup task on DB thread when
+// user changes inputed text. If it is performance critical to do a best effort
+// cancelation of a task, then CancelableTaskTracker is appropriate, otherwise
+// use one of the other mechanisms.
+//
+// THREAD-SAFETY:
+//
+// 1. A CancelableTaskTracker object must be created, used, and destroyed on a
+//    single sequence.
+//
+// 2. It's safe to destroy a CancelableTaskTracker while there are outstanding
+//    tasks. This is commonly used to cancel all outstanding tasks.
+//
+// 3. The task is deleted on the target sequence, and the reply are deleted on
+//    the originating sequence.
+//
+// 4. IsCanceledCallback can be run or deleted on any sequence.
+#ifndef BASE_TASK_CANCELABLE_TASK_TRACKER_H_
+#define BASE_TASK_CANCELABLE_TASK_TRACKER_H_
+
+#include <stdint.h>
+
+#include <utility>
+
+#include "base/base_export.h"
+#include "base/bind.h"
+#include "base/callback.h"
+#include "base/containers/small_map.h"
+#include "base/macros.h"
+#include "base/memory/weak_ptr.h"
+#include "base/post_task_and_reply_with_result_internal.h"
+#include "base/sequence_checker.h"
+
+namespace base {
+
+class CancellationFlag;
+class Location;
+class TaskRunner;
+
+class BASE_EXPORT CancelableTaskTracker {
+ public:
+  // All values except kBadTaskId are valid.
+  typedef int64_t TaskId;
+  static const TaskId kBadTaskId;
+
+  typedef Callback<bool()> IsCanceledCallback;
+
+  CancelableTaskTracker();
+
+  // Cancels all tracked tasks.
+  ~CancelableTaskTracker();
+
+  TaskId PostTask(TaskRunner* task_runner,
+                  const Location& from_here,
+                  OnceClosure task);
+
+  TaskId PostTaskAndReply(TaskRunner* task_runner,
+                          const Location& from_here,
+                          OnceClosure task,
+                          OnceClosure reply);
+
+  template <typename TaskReturnType, typename ReplyArgType>
+  TaskId PostTaskAndReplyWithResult(TaskRunner* task_runner,
+                                    const Location& from_here,
+                                    OnceCallback<TaskReturnType()> task,
+                                    OnceCallback<void(ReplyArgType)> reply) {
+    TaskReturnType* result = new TaskReturnType();
+    return PostTaskAndReply(
+        task_runner, from_here,
+        BindOnce(&internal::ReturnAsParamAdapter<TaskReturnType>,
+                 std::move(task), Unretained(result)),
+        BindOnce(&internal::ReplyAdapter<TaskReturnType, ReplyArgType>,
+                 std::move(reply), Owned(result)));
+  }
+
+  // Callback version of PostTaskWithTraitsAndReplyWithResult above.
+  // Though RepeatingCallback is convertible to OnceCallback, we need this since
+  // we can not use template deduction and object conversion at once on the
+  // overload resolution.
+  // TODO(tzik): Update all callers of the Callback version to use OnceCallback.
+  template <typename TaskReturnType, typename ReplyArgType>
+  TaskId PostTaskAndReplyWithResult(TaskRunner* task_runner,
+                                    const Location& from_here,
+                                    Callback<TaskReturnType()> task,
+                                    Callback<void(ReplyArgType)> reply) {
+    return PostTaskAndReplyWithResult(
+        task_runner, from_here,
+        static_cast<OnceCallback<TaskReturnType()>>(std::move(task)),
+        static_cast<OnceCallback<void(ReplyArgType)>>(std::move(reply)));
+  }
+
+  // Creates a tracked TaskId and an associated IsCanceledCallback. Client can
+  // later call TryCancel() with the returned TaskId, and run |is_canceled_cb|
+  // from any thread to check whether the TaskId is canceled.
+  //
+  // The returned task ID is tracked until the last copy of
+  // |is_canceled_cb| is destroyed.
+  //
+  // Note. This function is used to address some special cancelation requirement
+  // in existing code. You SHOULD NOT need this function in new code.
+  TaskId NewTrackedTaskId(IsCanceledCallback* is_canceled_cb);
+
+  // After calling this function, |task| and |reply| will not run. If the
+  // cancelation happens when |task| is running or has finished running, |reply|
+  // will not run. If |reply| is running or has finished running, cancellation
+  // is a noop.
+  //
+  // Note. It's OK to cancel a |task| for more than once. The later calls are
+  // noops.
+  void TryCancel(TaskId id);
+
+  // It's OK to call this function for more than once. The later calls are
+  // noops.
+  void TryCancelAll();
+
+  // Returns true iff there are in-flight tasks that are still being
+  // tracked.
+  bool HasTrackedTasks() const;
+
+ private:
+  void Track(TaskId id, CancellationFlag* flag);
+  void Untrack(TaskId id);
+
+  // Typically the number of tasks are 0-2 and occationally 3-4. But since
+  // this is a general API that could be used in unexpected ways, use a
+  // small_map instead of a flat_map to avoid falling over if there are many
+  // tasks.
+  small_map<std::map<TaskId, CancellationFlag*>, 4> task_flags_;
+
+  TaskId next_id_;
+  SequenceChecker sequence_checker_;
+
+  WeakPtrFactory<CancelableTaskTracker> weak_factory_;
+
+  DISALLOW_COPY_AND_ASSIGN(CancelableTaskTracker);
+};
+
+}  // namespace base
+
+#endif  // BASE_TASK_CANCELABLE_TASK_TRACKER_H_
diff --git a/base/task/cancelable_task_tracker_unittest.cc b/base/task/cancelable_task_tracker_unittest.cc
new file mode 100644
index 0000000..c75adc4
--- /dev/null
+++ b/base/task/cancelable_task_tracker_unittest.cc
@@ -0,0 +1,403 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/task/cancelable_task_tracker.h"
+
+#include <cstddef>
+
+#include "base/bind.h"
+#include "base/bind_helpers.h"
+#include "base/location.h"
+#include "base/logging.h"
+#include "base/memory/ref_counted.h"
+#include "base/memory/weak_ptr.h"
+#include "base/message_loop/message_loop.h"
+#include "base/run_loop.h"
+#include "base/single_thread_task_runner.h"
+#include "base/test/gtest_util.h"
+#include "base/test/test_simple_task_runner.h"
+#include "base/threading/thread.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+
+namespace {
+
+class CancelableTaskTrackerTest : public testing::Test {
+ protected:
+  ~CancelableTaskTrackerTest() override { RunCurrentLoopUntilIdle(); }
+
+  void RunCurrentLoopUntilIdle() {
+    RunLoop run_loop;
+    run_loop.RunUntilIdle();
+  }
+
+  CancelableTaskTracker task_tracker_;
+
+ private:
+  // Needed by CancelableTaskTracker methods.
+  MessageLoop message_loop_;
+};
+
+void AddFailureAt(const Location& location) {
+  ADD_FAILURE_AT(location.file_name(), location.line_number());
+}
+
+// Returns a closure that fails if run.
+Closure MakeExpectedNotRunClosure(const Location& location) {
+  return Bind(&AddFailureAt, location);
+}
+
+// A helper class for MakeExpectedRunClosure() that fails if it is
+// destroyed without Run() having been called.  This class may be used
+// from multiple threads as long as Run() is called at most once
+// before destruction.
+class RunChecker {
+ public:
+  explicit RunChecker(const Location& location)
+      : location_(location), called_(false) {}
+
+  ~RunChecker() {
+    if (!called_) {
+      ADD_FAILURE_AT(location_.file_name(), location_.line_number());
+    }
+  }
+
+  void Run() { called_ = true; }
+
+ private:
+  Location location_;
+  bool called_;
+};
+
+// Returns a closure that fails on destruction if it hasn't been run.
+Closure MakeExpectedRunClosure(const Location& location) {
+  return Bind(&RunChecker::Run, Owned(new RunChecker(location)));
+}
+
+}  // namespace
+
+// With the task tracker, post a task, a task with a reply, and get a
+// new task id without canceling any of them.  The tasks and the reply
+// should run and the "is canceled" callback should return false.
+TEST_F(CancelableTaskTrackerTest, NoCancel) {
+  Thread worker_thread("worker thread");
+  ASSERT_TRUE(worker_thread.Start());
+
+  ignore_result(task_tracker_.PostTask(worker_thread.task_runner().get(),
+                                       FROM_HERE,
+                                       MakeExpectedRunClosure(FROM_HERE)));
+
+  ignore_result(task_tracker_.PostTaskAndReply(
+      worker_thread.task_runner().get(), FROM_HERE,
+      MakeExpectedRunClosure(FROM_HERE), MakeExpectedRunClosure(FROM_HERE)));
+
+  CancelableTaskTracker::IsCanceledCallback is_canceled;
+  ignore_result(task_tracker_.NewTrackedTaskId(&is_canceled));
+
+  worker_thread.Stop();
+
+  RunCurrentLoopUntilIdle();
+
+  EXPECT_FALSE(is_canceled.Run());
+}
+
+// Post a task with the task tracker but cancel it before running the
+// task runner.  The task should not run.
+TEST_F(CancelableTaskTrackerTest, CancelPostedTask) {
+  scoped_refptr<TestSimpleTaskRunner> test_task_runner(
+      new TestSimpleTaskRunner());
+
+  CancelableTaskTracker::TaskId task_id = task_tracker_.PostTask(
+      test_task_runner.get(), FROM_HERE, MakeExpectedNotRunClosure(FROM_HERE));
+  EXPECT_NE(CancelableTaskTracker::kBadTaskId, task_id);
+
+  EXPECT_EQ(1U, test_task_runner->NumPendingTasks());
+
+  task_tracker_.TryCancel(task_id);
+
+  test_task_runner->RunUntilIdle();
+}
+
+// Post a task with reply with the task tracker and cancel it before
+// running the task runner.  Neither the task nor the reply should
+// run.
+TEST_F(CancelableTaskTrackerTest, CancelPostedTaskAndReply) {
+  scoped_refptr<TestSimpleTaskRunner> test_task_runner(
+      new TestSimpleTaskRunner());
+
+  CancelableTaskTracker::TaskId task_id =
+      task_tracker_.PostTaskAndReply(test_task_runner.get(),
+                                     FROM_HERE,
+                                     MakeExpectedNotRunClosure(FROM_HERE),
+                                     MakeExpectedNotRunClosure(FROM_HERE));
+  EXPECT_NE(CancelableTaskTracker::kBadTaskId, task_id);
+
+  task_tracker_.TryCancel(task_id);
+
+  test_task_runner->RunUntilIdle();
+}
+
+// Post a task with reply with the task tracker and cancel it after
+// running the task runner but before running the current message
+// loop.  The task should run but the reply should not.
+TEST_F(CancelableTaskTrackerTest, CancelReply) {
+  scoped_refptr<TestSimpleTaskRunner> test_task_runner(
+      new TestSimpleTaskRunner());
+
+  CancelableTaskTracker::TaskId task_id =
+      task_tracker_.PostTaskAndReply(test_task_runner.get(),
+                                     FROM_HERE,
+                                     MakeExpectedRunClosure(FROM_HERE),
+                                     MakeExpectedNotRunClosure(FROM_HERE));
+  EXPECT_NE(CancelableTaskTracker::kBadTaskId, task_id);
+
+  test_task_runner->RunUntilIdle();
+
+  task_tracker_.TryCancel(task_id);
+}
+
+// Post a task with reply with the task tracker on a worker thread and
+// cancel it before running the current message loop.  The task should
+// run but the reply should not.
+TEST_F(CancelableTaskTrackerTest, CancelReplyDifferentThread) {
+  Thread worker_thread("worker thread");
+  ASSERT_TRUE(worker_thread.Start());
+
+  CancelableTaskTracker::TaskId task_id = task_tracker_.PostTaskAndReply(
+      worker_thread.task_runner().get(), FROM_HERE, DoNothing(),
+      MakeExpectedNotRunClosure(FROM_HERE));
+  EXPECT_NE(CancelableTaskTracker::kBadTaskId, task_id);
+
+  task_tracker_.TryCancel(task_id);
+
+  worker_thread.Stop();
+}
+
+void ExpectIsCanceled(
+    const CancelableTaskTracker::IsCanceledCallback& is_canceled,
+    bool expected_is_canceled) {
+  EXPECT_EQ(expected_is_canceled, is_canceled.Run());
+}
+
+// Create a new task ID and check its status on a separate thread
+// before and after canceling.  The is-canceled callback should be
+// thread-safe (i.e., nothing should blow up).
+TEST_F(CancelableTaskTrackerTest, NewTrackedTaskIdDifferentThread) {
+  CancelableTaskTracker::IsCanceledCallback is_canceled;
+  CancelableTaskTracker::TaskId task_id =
+      task_tracker_.NewTrackedTaskId(&is_canceled);
+
+  EXPECT_FALSE(is_canceled.Run());
+
+  Thread other_thread("other thread");
+  ASSERT_TRUE(other_thread.Start());
+  other_thread.task_runner()->PostTask(
+      FROM_HERE, BindOnce(&ExpectIsCanceled, is_canceled, false));
+  other_thread.Stop();
+
+  task_tracker_.TryCancel(task_id);
+
+  ASSERT_TRUE(other_thread.Start());
+  other_thread.task_runner()->PostTask(
+      FROM_HERE, BindOnce(&ExpectIsCanceled, is_canceled, true));
+  other_thread.Stop();
+}
+
+// With the task tracker, post a task, a task with a reply, get a new
+// task id, and then cancel all of them.  None of the tasks nor the
+// reply should run and the "is canceled" callback should return
+// true.
+TEST_F(CancelableTaskTrackerTest, CancelAll) {
+  scoped_refptr<TestSimpleTaskRunner> test_task_runner(
+      new TestSimpleTaskRunner());
+
+  ignore_result(task_tracker_.PostTask(
+      test_task_runner.get(), FROM_HERE, MakeExpectedNotRunClosure(FROM_HERE)));
+
+  ignore_result(
+      task_tracker_.PostTaskAndReply(test_task_runner.get(),
+                                     FROM_HERE,
+                                     MakeExpectedNotRunClosure(FROM_HERE),
+                                     MakeExpectedNotRunClosure(FROM_HERE)));
+
+  CancelableTaskTracker::IsCanceledCallback is_canceled;
+  ignore_result(task_tracker_.NewTrackedTaskId(&is_canceled));
+
+  task_tracker_.TryCancelAll();
+
+  test_task_runner->RunUntilIdle();
+
+  RunCurrentLoopUntilIdle();
+
+  EXPECT_TRUE(is_canceled.Run());
+}
+
+// With the task tracker, post a task, a task with a reply, get a new
+// task id, and then cancel all of them.  None of the tasks nor the
+// reply should run and the "is canceled" callback should return
+// true.
+TEST_F(CancelableTaskTrackerTest, DestructionCancelsAll) {
+  scoped_refptr<TestSimpleTaskRunner> test_task_runner(
+      new TestSimpleTaskRunner());
+
+  CancelableTaskTracker::IsCanceledCallback is_canceled;
+
+  {
+    // Create another task tracker with a smaller scope.
+    CancelableTaskTracker task_tracker;
+
+    ignore_result(task_tracker.PostTask(test_task_runner.get(),
+                                        FROM_HERE,
+                                        MakeExpectedNotRunClosure(FROM_HERE)));
+
+    ignore_result(
+        task_tracker.PostTaskAndReply(test_task_runner.get(),
+                                      FROM_HERE,
+                                      MakeExpectedNotRunClosure(FROM_HERE),
+                                      MakeExpectedNotRunClosure(FROM_HERE)));
+
+    ignore_result(task_tracker_.NewTrackedTaskId(&is_canceled));
+  }
+
+  test_task_runner->RunUntilIdle();
+
+  RunCurrentLoopUntilIdle();
+
+  EXPECT_FALSE(is_canceled.Run());
+}
+
+// Post a task and cancel it. HasTrackedTasks() should return false as soon as
+// TryCancelAll() is called.
+TEST_F(CancelableTaskTrackerTest, HasTrackedTasksPost) {
+  scoped_refptr<TestSimpleTaskRunner> test_task_runner(
+      new TestSimpleTaskRunner());
+
+  EXPECT_FALSE(task_tracker_.HasTrackedTasks());
+
+  ignore_result(task_tracker_.PostTask(
+      test_task_runner.get(), FROM_HERE, MakeExpectedNotRunClosure(FROM_HERE)));
+
+  task_tracker_.TryCancelAll();
+
+  EXPECT_FALSE(task_tracker_.HasTrackedTasks());
+
+  test_task_runner->RunUntilIdle();
+  RunCurrentLoopUntilIdle();
+}
+
+// Post a task with a reply and cancel it. HasTrackedTasks() should return false
+// as soon as TryCancelAll() is called.
+TEST_F(CancelableTaskTrackerTest, HasTrackedTasksPostWithReply) {
+  scoped_refptr<TestSimpleTaskRunner> test_task_runner(
+      new TestSimpleTaskRunner());
+
+  EXPECT_FALSE(task_tracker_.HasTrackedTasks());
+
+  ignore_result(
+      task_tracker_.PostTaskAndReply(test_task_runner.get(),
+                                     FROM_HERE,
+                                     MakeExpectedNotRunClosure(FROM_HERE),
+                                     MakeExpectedNotRunClosure(FROM_HERE)));
+
+  task_tracker_.TryCancelAll();
+
+  EXPECT_FALSE(task_tracker_.HasTrackedTasks());
+
+  test_task_runner->RunUntilIdle();
+  RunCurrentLoopUntilIdle();
+}
+
+// Create a new tracked task ID. HasTrackedTasks() should return false as soon
+// as TryCancelAll() is called.
+TEST_F(CancelableTaskTrackerTest, HasTrackedTasksIsCancelled) {
+  EXPECT_FALSE(task_tracker_.HasTrackedTasks());
+
+  CancelableTaskTracker::IsCanceledCallback is_canceled;
+  ignore_result(task_tracker_.NewTrackedTaskId(&is_canceled));
+
+  task_tracker_.TryCancelAll();
+
+  EXPECT_FALSE(task_tracker_.HasTrackedTasks());
+}
+
+// The death tests below make sure that calling task tracker member
+// functions from a thread different from its owner thread DCHECKs in
+// debug mode.
+
+class CancelableTaskTrackerDeathTest : public CancelableTaskTrackerTest {
+ protected:
+  CancelableTaskTrackerDeathTest() {
+    // The default style "fast" does not support multi-threaded tests.
+    ::testing::FLAGS_gtest_death_test_style = "threadsafe";
+  }
+};
+
+// Runs |fn| with |task_tracker|, expecting it to crash in debug mode.
+void MaybeRunDeadlyTaskTrackerMemberFunction(
+    CancelableTaskTracker* task_tracker,
+    const Callback<void(CancelableTaskTracker*)>& fn) {
+  EXPECT_DCHECK_DEATH(fn.Run(task_tracker));
+}
+
+void PostDoNothingTask(CancelableTaskTracker* task_tracker) {
+  ignore_result(task_tracker->PostTask(
+      scoped_refptr<TestSimpleTaskRunner>(new TestSimpleTaskRunner()).get(),
+      FROM_HERE, DoNothing()));
+}
+
+TEST_F(CancelableTaskTrackerDeathTest, PostFromDifferentThread) {
+  Thread bad_thread("bad thread");
+  ASSERT_TRUE(bad_thread.Start());
+
+  bad_thread.task_runner()->PostTask(
+      FROM_HERE,
+      BindOnce(&MaybeRunDeadlyTaskTrackerMemberFunction,
+               Unretained(&task_tracker_), Bind(&PostDoNothingTask)));
+}
+
+void TryCancel(CancelableTaskTracker::TaskId task_id,
+               CancelableTaskTracker* task_tracker) {
+  task_tracker->TryCancel(task_id);
+}
+
+TEST_F(CancelableTaskTrackerDeathTest, CancelOnDifferentThread) {
+  scoped_refptr<TestSimpleTaskRunner> test_task_runner(
+      new TestSimpleTaskRunner());
+
+  Thread bad_thread("bad thread");
+  ASSERT_TRUE(bad_thread.Start());
+
+  CancelableTaskTracker::TaskId task_id =
+      task_tracker_.PostTask(test_task_runner.get(), FROM_HERE, DoNothing());
+  EXPECT_NE(CancelableTaskTracker::kBadTaskId, task_id);
+
+  bad_thread.task_runner()->PostTask(
+      FROM_HERE,
+      BindOnce(&MaybeRunDeadlyTaskTrackerMemberFunction,
+               Unretained(&task_tracker_), Bind(&TryCancel, task_id)));
+
+  test_task_runner->RunUntilIdle();
+}
+
+TEST_F(CancelableTaskTrackerDeathTest, CancelAllOnDifferentThread) {
+  scoped_refptr<TestSimpleTaskRunner> test_task_runner(
+      new TestSimpleTaskRunner());
+
+  Thread bad_thread("bad thread");
+  ASSERT_TRUE(bad_thread.Start());
+
+  CancelableTaskTracker::TaskId task_id =
+      task_tracker_.PostTask(test_task_runner.get(), FROM_HERE, DoNothing());
+  EXPECT_NE(CancelableTaskTracker::kBadTaskId, task_id);
+
+  bad_thread.task_runner()->PostTask(
+      FROM_HERE, BindOnce(&MaybeRunDeadlyTaskTrackerMemberFunction,
+                          Unretained(&task_tracker_),
+                          Bind(&CancelableTaskTracker::TryCancelAll)));
+
+  test_task_runner->RunUntilIdle();
+}
+
+}  // namespace base
diff --git a/base/task/sequence_manager/OWNERS b/base/task/sequence_manager/OWNERS
new file mode 100644
index 0000000..ac6eae8
--- /dev/null
+++ b/base/task/sequence_manager/OWNERS
@@ -0,0 +1,6 @@
+altimin@chromium.org
+alexclarke@chromium.org
+skyostil@chromium.org
+
+# TEAM: scheduler-dev@chromium.org
+# Component: Blink>Scheduling
diff --git a/base/task/task_scheduler/OWNERS b/base/task/task_scheduler/OWNERS
new file mode 100644
index 0000000..0f3ad5e
--- /dev/null
+++ b/base/task/task_scheduler/OWNERS
@@ -0,0 +1,6 @@
+fdoray@chromium.org
+gab@chromium.org
+robliao@chromium.org
+
+# TEAM: scheduler-dev@chromium.org
+# COMPONENT: Internals>TaskScheduler
diff --git a/base/task_runner.cc b/base/task_runner.cc
new file mode 100644
index 0000000..aae9f9e
--- /dev/null
+++ b/base/task_runner.cc
@@ -0,0 +1,66 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/task_runner.h"
+
+#include <utility>
+
+#include "base/compiler_specific.h"
+#include "base/logging.h"
+#include "base/threading/post_task_and_reply_impl.h"
+
+namespace base {
+
+namespace {
+
+// TODO(akalin): There's only one other implementation of
+// PostTaskAndReplyImpl in WorkerPool.  Investigate whether it'll be
+// possible to merge the two.
+class PostTaskAndReplyTaskRunner : public internal::PostTaskAndReplyImpl {
+ public:
+  explicit PostTaskAndReplyTaskRunner(TaskRunner* destination);
+
+ private:
+  bool PostTask(const Location& from_here, OnceClosure task) override;
+
+  // Non-owning.
+  TaskRunner* destination_;
+};
+
+PostTaskAndReplyTaskRunner::PostTaskAndReplyTaskRunner(
+    TaskRunner* destination) : destination_(destination) {
+  DCHECK(destination_);
+}
+
+bool PostTaskAndReplyTaskRunner::PostTask(const Location& from_here,
+                                          OnceClosure task) {
+  return destination_->PostTask(from_here, std::move(task));
+}
+
+}  // namespace
+
+bool TaskRunner::PostTask(const Location& from_here, OnceClosure task) {
+  return PostDelayedTask(from_here, std::move(task), base::TimeDelta());
+}
+
+bool TaskRunner::PostTaskAndReply(const Location& from_here,
+                                  OnceClosure task,
+                                  OnceClosure reply) {
+  return PostTaskAndReplyTaskRunner(this).PostTaskAndReply(
+      from_here, std::move(task), std::move(reply));
+}
+
+TaskRunner::TaskRunner() = default;
+
+TaskRunner::~TaskRunner() = default;
+
+void TaskRunner::OnDestruct() const {
+  delete this;
+}
+
+void TaskRunnerTraits::Destruct(const TaskRunner* task_runner) {
+  task_runner->OnDestruct();
+}
+
+}  // namespace base
diff --git a/base/task_runner.h b/base/task_runner.h
new file mode 100644
index 0000000..e4c6b41
--- /dev/null
+++ b/base/task_runner.h
@@ -0,0 +1,158 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TASK_RUNNER_H_
+#define BASE_TASK_RUNNER_H_
+
+#include <stddef.h>
+
+#include "base/base_export.h"
+#include "base/callback.h"
+#include "base/location.h"
+#include "base/memory/ref_counted.h"
+#include "base/time/time.h"
+
+namespace base {
+
+struct TaskRunnerTraits;
+
+// A TaskRunner is an object that runs posted tasks (in the form of
+// Closure objects).  The TaskRunner interface provides a way of
+// decoupling task posting from the mechanics of how each task will be
+// run.  TaskRunner provides very weak guarantees as to how posted
+// tasks are run (or if they're run at all).  In particular, it only
+// guarantees:
+//
+//   - Posting a task will not run it synchronously.  That is, no
+//     Post*Task method will call task.Run() directly.
+//
+//   - Increasing the delay can only delay when the task gets run.
+//     That is, increasing the delay may not affect when the task gets
+//     run, or it could make it run later than it normally would, but
+//     it won't make it run earlier than it normally would.
+//
+// TaskRunner does not guarantee the order in which posted tasks are
+// run, whether tasks overlap, or whether they're run on a particular
+// thread.  Also it does not guarantee a memory model for shared data
+// between tasks.  (In other words, you should use your own
+// synchronization/locking primitives if you need to share data
+// between tasks.)
+//
+// Implementations of TaskRunner should be thread-safe in that all
+// methods must be safe to call on any thread.  Ownership semantics
+// for TaskRunners are in general not clear, which is why the
+// interface itself is RefCountedThreadSafe.
+//
+// Some theoretical implementations of TaskRunner:
+//
+//   - A TaskRunner that uses a thread pool to run posted tasks.
+//
+//   - A TaskRunner that, for each task, spawns a non-joinable thread
+//     to run that task and immediately quit.
+//
+//   - A TaskRunner that stores the list of posted tasks and has a
+//     method Run() that runs each runnable task in random order.
+class BASE_EXPORT TaskRunner
+    : public RefCountedThreadSafe<TaskRunner, TaskRunnerTraits> {
+ public:
+  // Posts the given task to be run.  Returns true if the task may be
+  // run at some point in the future, and false if the task definitely
+  // will not be run.
+  //
+  // Equivalent to PostDelayedTask(from_here, task, 0).
+  bool PostTask(const Location& from_here, OnceClosure task);
+
+  // Like PostTask, but tries to run the posted task only after |delay_ms|
+  // has passed. Implementations should use a tick clock, rather than wall-
+  // clock time, to implement |delay|.
+  virtual bool PostDelayedTask(const Location& from_here,
+                               OnceClosure task,
+                               base::TimeDelta delay) = 0;
+
+  // Returns true iff tasks posted to this TaskRunner are sequenced
+  // with this call.
+  //
+  // In particular:
+  // - Returns true if this is a SequencedTaskRunner to which the
+  //   current task was posted.
+  // - Returns true if this is a SequencedTaskRunner bound to the
+  //   same sequence as the SequencedTaskRunner to which the current
+  //   task was posted.
+  // - Returns true if this is a SingleThreadTaskRunner bound to
+  //   the current thread.
+  // TODO(http://crbug.com/665062):
+  //   This API doesn't make sense for parallel TaskRunners.
+  //   Introduce alternate static APIs for documentation purposes of "this runs
+  //   in pool X", have RunsTasksInCurrentSequence() return false for parallel
+  //   TaskRunners, and ultimately move this method down to SequencedTaskRunner.
+  virtual bool RunsTasksInCurrentSequence() const = 0;
+
+  // Posts |task| on the current TaskRunner.  On completion, |reply|
+  // is posted to the thread that called PostTaskAndReply().  Both
+  // |task| and |reply| are guaranteed to be deleted on the thread
+  // from which PostTaskAndReply() is invoked.  This allows objects
+  // that must be deleted on the originating thread to be bound into
+  // the |task| and |reply| Closures.  In particular, it can be useful
+  // to use WeakPtr<> in the |reply| Closure so that the reply
+  // operation can be canceled. See the following pseudo-code:
+  //
+  // class DataBuffer : public RefCountedThreadSafe<DataBuffer> {
+  //  public:
+  //   // Called to add data into a buffer.
+  //   void AddData(void* buf, size_t length);
+  //   ...
+  // };
+  //
+  //
+  // class DataLoader : public SupportsWeakPtr<DataLoader> {
+  //  public:
+  //    void GetData() {
+  //      scoped_refptr<DataBuffer> buffer = new DataBuffer();
+  //      target_thread_.task_runner()->PostTaskAndReply(
+  //          FROM_HERE,
+  //          base::Bind(&DataBuffer::AddData, buffer),
+  //          base::Bind(&DataLoader::OnDataReceived, AsWeakPtr(), buffer));
+  //    }
+  //
+  //  private:
+  //    void OnDataReceived(scoped_refptr<DataBuffer> buffer) {
+  //      // Do something with buffer.
+  //    }
+  // };
+  //
+  //
+  // Things to notice:
+  //   * Results of |task| are shared with |reply| by binding a shared argument
+  //     (a DataBuffer instance).
+  //   * The DataLoader object has no special thread safety.
+  //   * The DataLoader object can be deleted while |task| is still running,
+  //     and the reply will cancel itself safely because it is bound to a
+  //     WeakPtr<>.
+  bool PostTaskAndReply(const Location& from_here,
+                        OnceClosure task,
+                        OnceClosure reply);
+
+ protected:
+  friend struct TaskRunnerTraits;
+
+  // Only the Windows debug build seems to need this: see
+  // http://crbug.com/112250.
+  friend class RefCountedThreadSafe<TaskRunner, TaskRunnerTraits>;
+
+  TaskRunner();
+  virtual ~TaskRunner();
+
+  // Called when this object should be destroyed.  By default simply
+  // deletes |this|, but can be overridden to do something else, like
+  // delete on a certain thread.
+  virtual void OnDestruct() const;
+};
+
+struct BASE_EXPORT TaskRunnerTraits {
+  static void Destruct(const TaskRunner* task_runner);
+};
+
+}  // namespace base
+
+#endif  // BASE_TASK_RUNNER_H_
diff --git a/base/task_runner_util.h b/base/task_runner_util.h
new file mode 100644
index 0000000..d79f5b8
--- /dev/null
+++ b/base/task_runner_util.h
@@ -0,0 +1,67 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TASK_RUNNER_UTIL_H_
+#define BASE_TASK_RUNNER_UTIL_H_
+
+#include <utility>
+
+#include "base/bind.h"
+#include "base/bind_helpers.h"
+#include "base/callback.h"
+#include "base/logging.h"
+#include "base/post_task_and_reply_with_result_internal.h"
+#include "base/task_runner.h"
+
+namespace base {
+
+// When you have these methods
+//
+//   R DoWorkAndReturn();
+//   void Callback(const R& result);
+//
+// and want to call them in a PostTaskAndReply kind of fashion where the
+// result of DoWorkAndReturn is passed to the Callback, you can use
+// PostTaskAndReplyWithResult as in this example:
+//
+// PostTaskAndReplyWithResult(
+//     target_thread_.task_runner(),
+//     FROM_HERE,
+//     BindOnce(&DoWorkAndReturn),
+//     BindOnce(&Callback));
+template <typename TaskReturnType, typename ReplyArgType>
+bool PostTaskAndReplyWithResult(TaskRunner* task_runner,
+                                const Location& from_here,
+                                OnceCallback<TaskReturnType()> task,
+                                OnceCallback<void(ReplyArgType)> reply) {
+  DCHECK(task);
+  DCHECK(reply);
+  TaskReturnType* result = new TaskReturnType();
+  return task_runner->PostTaskAndReply(
+      from_here,
+      BindOnce(&internal::ReturnAsParamAdapter<TaskReturnType>, std::move(task),
+               result),
+      BindOnce(&internal::ReplyAdapter<TaskReturnType, ReplyArgType>,
+               std::move(reply), Owned(result)));
+}
+
+// Callback version of PostTaskAndReplyWithResult above.
+// Though RepeatingCallback is convertible to OnceCallback, we need this since
+// we cannot use template deduction and object conversion at once on the
+// overload resolution.
+// TODO(crbug.com/714018): Update all callers of the Callback version to use
+// OnceCallback.
+template <typename TaskReturnType, typename ReplyArgType>
+bool PostTaskAndReplyWithResult(TaskRunner* task_runner,
+                                const Location& from_here,
+                                Callback<TaskReturnType()> task,
+                                Callback<void(ReplyArgType)> reply) {
+  return PostTaskAndReplyWithResult(
+      task_runner, from_here, OnceCallback<TaskReturnType()>(std::move(task)),
+      OnceCallback<void(ReplyArgType)>(std::move(reply)));
+}
+
+}  // namespace base
+
+#endif  // BASE_TASK_RUNNER_UTIL_H_
diff --git a/base/task_runner_util_unittest.cc b/base/task_runner_util_unittest.cc
new file mode 100644
index 0000000..44baad4
--- /dev/null
+++ b/base/task_runner_util_unittest.cc
@@ -0,0 +1,125 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/task_runner_util.h"
+
+#include <utility>
+
+#include "base/bind.h"
+#include "base/location.h"
+#include "base/message_loop/message_loop.h"
+#include "base/run_loop.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+
+namespace {
+
+int ReturnFourtyTwo() {
+  return 42;
+}
+
+void StoreValue(int* destination, int value) {
+  *destination = value;
+}
+
+void StoreDoubleValue(double* destination, double value) {
+  *destination = value;
+}
+
+int g_foo_destruct_count = 0;
+int g_foo_free_count = 0;
+
+struct Foo {
+  ~Foo() {
+    ++g_foo_destruct_count;
+  }
+};
+
+std::unique_ptr<Foo> CreateFoo() {
+  return std::unique_ptr<Foo>(new Foo);
+}
+
+void ExpectFoo(std::unique_ptr<Foo> foo) {
+  EXPECT_TRUE(foo.get());
+  std::unique_ptr<Foo> local_foo(std::move(foo));
+  EXPECT_TRUE(local_foo.get());
+  EXPECT_FALSE(foo.get());
+}
+
+struct FooDeleter {
+  void operator()(Foo* foo) const {
+    ++g_foo_free_count;
+    delete foo;
+  };
+};
+
+std::unique_ptr<Foo, FooDeleter> CreateScopedFoo() {
+  return std::unique_ptr<Foo, FooDeleter>(new Foo);
+}
+
+void ExpectScopedFoo(std::unique_ptr<Foo, FooDeleter> foo) {
+  EXPECT_TRUE(foo.get());
+  std::unique_ptr<Foo, FooDeleter> local_foo(std::move(foo));
+  EXPECT_TRUE(local_foo.get());
+  EXPECT_FALSE(foo.get());
+}
+
+}  // namespace
+
+TEST(TaskRunnerHelpersTest, PostTaskAndReplyWithResult) {
+  int result = 0;
+
+  MessageLoop message_loop;
+  PostTaskAndReplyWithResult(message_loop.task_runner().get(), FROM_HERE,
+                             Bind(&ReturnFourtyTwo),
+                             Bind(&StoreValue, &result));
+
+  RunLoop().RunUntilIdle();
+
+  EXPECT_EQ(42, result);
+}
+
+TEST(TaskRunnerHelpersTest, PostTaskAndReplyWithResultImplicitConvert) {
+  double result = 0;
+
+  MessageLoop message_loop;
+  PostTaskAndReplyWithResult(message_loop.task_runner().get(), FROM_HERE,
+                             Bind(&ReturnFourtyTwo),
+                             Bind(&StoreDoubleValue, &result));
+
+  RunLoop().RunUntilIdle();
+
+  EXPECT_DOUBLE_EQ(42.0, result);
+}
+
+TEST(TaskRunnerHelpersTest, PostTaskAndReplyWithResultPassed) {
+  g_foo_destruct_count = 0;
+  g_foo_free_count = 0;
+
+  MessageLoop message_loop;
+  PostTaskAndReplyWithResult(message_loop.task_runner().get(), FROM_HERE,
+                             Bind(&CreateFoo), Bind(&ExpectFoo));
+
+  RunLoop().RunUntilIdle();
+
+  EXPECT_EQ(1, g_foo_destruct_count);
+  EXPECT_EQ(0, g_foo_free_count);
+}
+
+TEST(TaskRunnerHelpersTest, PostTaskAndReplyWithResultPassedFreeProc) {
+  g_foo_destruct_count = 0;
+  g_foo_free_count = 0;
+
+  MessageLoop message_loop;
+  PostTaskAndReplyWithResult(message_loop.task_runner().get(), FROM_HERE,
+                             Bind(&CreateScopedFoo), Bind(&ExpectScopedFoo));
+
+  RunLoop().RunUntilIdle();
+
+  EXPECT_EQ(1, g_foo_destruct_count);
+  EXPECT_EQ(1, g_foo_free_count);
+}
+
+}  // namespace base
diff --git a/base/task_scheduler/OWNERS b/base/task_scheduler/OWNERS
new file mode 100644
index 0000000..0f3ad5e
--- /dev/null
+++ b/base/task_scheduler/OWNERS
@@ -0,0 +1,6 @@
+fdoray@chromium.org
+gab@chromium.org
+robliao@chromium.org
+
+# TEAM: scheduler-dev@chromium.org
+# COMPONENT: Internals>TaskScheduler
diff --git a/base/task_scheduler/can_schedule_sequence_observer.h b/base/task_scheduler/can_schedule_sequence_observer.h
new file mode 100644
index 0000000..f2b0551
--- /dev/null
+++ b/base/task_scheduler/can_schedule_sequence_observer.h
@@ -0,0 +1,27 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TASK_SCHEDULER_CAN_SCHEDULE_SEQUENCE_OBSERVER_H_
+#define BASE_TASK_SCHEDULER_CAN_SCHEDULE_SEQUENCE_OBSERVER_H_
+
+#include "base/task_scheduler/sequence.h"
+
+namespace base {
+namespace internal {
+
+class CanScheduleSequenceObserver {
+ public:
+  // Called when |sequence| can be scheduled. It is expected that
+  // TaskTracker::RunNextTask() will be called with |sequence| as argument after
+  // this is called.
+  virtual void OnCanScheduleSequence(scoped_refptr<Sequence> sequence) = 0;
+
+ protected:
+  virtual ~CanScheduleSequenceObserver() = default;
+};
+
+}  // namespace internal
+}  // namespace base
+
+#endif  // BASE_TASK_SCHEDULER_CAN_SCHEDULE_SEQUENCE_OBSERVER_H_
diff --git a/base/task_scheduler/delayed_task_manager.cc b/base/task_scheduler/delayed_task_manager.cc
new file mode 100644
index 0000000..86a6721
--- /dev/null
+++ b/base/task_scheduler/delayed_task_manager.cc
@@ -0,0 +1,95 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/task_scheduler/delayed_task_manager.h"
+
+#include <algorithm>
+
+#include "base/bind.h"
+#include "base/logging.h"
+#include "base/task_runner.h"
+#include "base/task_scheduler/task.h"
+
+namespace base {
+namespace internal {
+
+DelayedTaskManager::DelayedTaskManager(
+    std::unique_ptr<const TickClock> tick_clock)
+    : tick_clock_(std::move(tick_clock)) {
+  DCHECK(tick_clock_);
+}
+
+DelayedTaskManager::~DelayedTaskManager() = default;
+
+void DelayedTaskManager::Start(
+    scoped_refptr<TaskRunner> service_thread_task_runner) {
+  DCHECK(service_thread_task_runner);
+
+  decltype(tasks_added_before_start_) tasks_added_before_start;
+
+  {
+    AutoSchedulerLock auto_lock(lock_);
+    DCHECK(!service_thread_task_runner_);
+    DCHECK(!started_.IsSet());
+    service_thread_task_runner_ = std::move(service_thread_task_runner);
+    tasks_added_before_start = std::move(tasks_added_before_start_);
+    // |service_thread_task_runner_| must not change after |started_| is set
+    // (cf. comment above |lock_| in header file).
+    started_.Set();
+  }
+
+  const TimeTicks now = tick_clock_->NowTicks();
+  for (auto& task_and_callback : tasks_added_before_start) {
+    const TimeDelta delay =
+        std::max(TimeDelta(), task_and_callback.first.delayed_run_time - now);
+    AddDelayedTaskNow(std::move(task_and_callback.first), delay,
+                      std::move(task_and_callback.second));
+  }
+}
+
+void DelayedTaskManager::AddDelayedTask(
+    Task task,
+    PostTaskNowCallback post_task_now_callback) {
+  DCHECK(task.task);
+
+  const TimeDelta delay = task.delay;
+  DCHECK(!delay.is_zero());
+
+  // Use CHECK instead of DCHECK to crash earlier. See http://crbug.com/711167
+  // for details.
+  CHECK(task.task);
+
+  // If |started_| is set, the DelayedTaskManager is in a stable state and
+  // AddDelayedTaskNow() can be called without synchronization. Otherwise, it is
+  // necessary to acquire |lock_| and recheck.
+  if (started_.IsSet()) {
+    AddDelayedTaskNow(std::move(task), delay,
+                      std::move(post_task_now_callback));
+  } else {
+    AutoSchedulerLock auto_lock(lock_);
+    if (started_.IsSet()) {
+      AddDelayedTaskNow(std::move(task), delay,
+                        std::move(post_task_now_callback));
+    } else {
+      tasks_added_before_start_.push_back(
+          {std::move(task), std::move(post_task_now_callback)});
+    }
+  }
+}
+
+void DelayedTaskManager::AddDelayedTaskNow(
+    Task task,
+    TimeDelta delay,
+    PostTaskNowCallback post_task_now_callback) {
+  DCHECK(task.task);
+  DCHECK(started_.IsSet());
+  // TODO(fdoray): Use |task->delayed_run_time| on the service thread
+  // MessageLoop rather than recomputing it from |delay|.
+  service_thread_task_runner_->PostDelayedTask(
+      FROM_HERE, BindOnce(std::move(post_task_now_callback), std::move(task)),
+      delay);
+}
+
+}  // namespace internal
+}  // namespace base
diff --git a/base/task_scheduler/delayed_task_manager.h b/base/task_scheduler/delayed_task_manager.h
new file mode 100644
index 0000000..c48aeb1
--- /dev/null
+++ b/base/task_scheduler/delayed_task_manager.h
@@ -0,0 +1,80 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TASK_SCHEDULER_DELAYED_TASK_MANAGER_H_
+#define BASE_TASK_SCHEDULER_DELAYED_TASK_MANAGER_H_
+
+#include <memory>
+#include <utility>
+#include <vector>
+
+#include "base/base_export.h"
+#include "base/callback.h"
+#include "base/macros.h"
+#include "base/memory/ptr_util.h"
+#include "base/memory/ref_counted.h"
+#include "base/synchronization/atomic_flag.h"
+#include "base/task_scheduler/scheduler_lock.h"
+#include "base/time/default_tick_clock.h"
+#include "base/time/tick_clock.h"
+
+namespace base {
+
+class TaskRunner;
+
+namespace internal {
+
+struct Task;
+
+// The DelayedTaskManager forwards tasks to post task callbacks when they become
+// ripe for execution. Tasks are not forwarded before Start() is called. This
+// class is thread-safe.
+class BASE_EXPORT DelayedTaskManager {
+ public:
+  // Posts |task| for execution immediately.
+  using PostTaskNowCallback = OnceCallback<void(Task task)>;
+
+  // |tick_clock| can be specified for testing.
+  DelayedTaskManager(std::unique_ptr<const TickClock> tick_clock =
+                         std::make_unique<DefaultTickClock>());
+  ~DelayedTaskManager();
+
+  // Starts the delayed task manager, allowing past and future tasks to be
+  // forwarded to their callbacks as they become ripe for execution.
+  // |service_thread_task_runner| posts tasks to the TaskScheduler service
+  // thread.
+  void Start(scoped_refptr<TaskRunner> service_thread_task_runner);
+
+  // Schedules a call to |post_task_now_callback| with |task| as argument when
+  // |task| is ripe for execution and Start() has been called.
+  void AddDelayedTask(Task task, PostTaskNowCallback post_task_now_callback);
+
+ private:
+  // Schedules a call to |post_task_now_callback| with |task| as argument when
+  // |delay| expires. Start() must have been called before this.
+  void AddDelayedTaskNow(Task task,
+                         TimeDelta delay,
+                         PostTaskNowCallback post_task_now_callback);
+
+  const std::unique_ptr<const TickClock> tick_clock_;
+
+  AtomicFlag started_;
+
+  // Synchronizes access to all members below before |started_| is set. Once
+  // |started_| is set:
+  // - |service_thread_task_runner| doest not change, so it can be read without
+  //   holding the lock.
+  // - |tasks_added_before_start_| isn't accessed anymore.
+  SchedulerLock lock_;
+
+  scoped_refptr<TaskRunner> service_thread_task_runner_;
+  std::vector<std::pair<Task, PostTaskNowCallback>> tasks_added_before_start_;
+
+  DISALLOW_COPY_AND_ASSIGN(DelayedTaskManager);
+};
+
+}  // namespace internal
+}  // namespace base
+
+#endif  // BASE_TASK_SCHEDULER_DELAYED_TASK_MANAGER_H_
diff --git a/base/task_scheduler/delayed_task_manager_unittest.cc b/base/task_scheduler/delayed_task_manager_unittest.cc
new file mode 100644
index 0000000..67c797a
--- /dev/null
+++ b/base/task_scheduler/delayed_task_manager_unittest.cc
@@ -0,0 +1,209 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/task_scheduler/delayed_task_manager.h"
+
+#include <memory>
+#include <utility>
+
+#include "base/bind.h"
+#include "base/memory/ptr_util.h"
+#include "base/memory/ref_counted.h"
+#include "base/synchronization/waitable_event.h"
+#include "base/task_scheduler/task.h"
+#include "base/test/bind_test_util.h"
+#include "base/test/test_mock_time_task_runner.h"
+#include "base/threading/thread.h"
+#include "base/time/time.h"
+#include "testing/gmock/include/gmock/gmock.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+namespace internal {
+namespace {
+
+constexpr TimeDelta kLongDelay = TimeDelta::FromHours(1);
+
+class MockTask {
+ public:
+  MOCK_METHOD0(Run, void());
+};
+
+void RunTask(Task task) {
+  std::move(task.task).Run();
+}
+
+class TaskSchedulerDelayedTaskManagerTest : public testing::Test {
+ protected:
+  TaskSchedulerDelayedTaskManagerTest()
+      : delayed_task_manager_(
+            service_thread_task_runner_->DeprecatedGetMockTickClock()),
+        task_(FROM_HERE,
+              BindOnce(&MockTask::Run, Unretained(&mock_task_)),
+              TaskTraits(),
+              kLongDelay) {
+    // The constructor of Task computes |delayed_run_time| by adding |delay| to
+    // the real time. Recompute it by adding |delay| to the mock time.
+    task_.delayed_run_time =
+        service_thread_task_runner_->GetMockTickClock()->NowTicks() +
+        kLongDelay;
+  }
+  ~TaskSchedulerDelayedTaskManagerTest() override = default;
+
+  const scoped_refptr<TestMockTimeTaskRunner> service_thread_task_runner_ =
+      MakeRefCounted<TestMockTimeTaskRunner>();
+  DelayedTaskManager delayed_task_manager_;
+  testing::StrictMock<MockTask> mock_task_;
+  Task task_;
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(TaskSchedulerDelayedTaskManagerTest);
+};
+
+}  // namespace
+
+// Verify that a delayed task isn't forwarded before Start().
+TEST_F(TaskSchedulerDelayedTaskManagerTest, DelayedTaskDoesNotRunBeforeStart) {
+  // Send |task| to the DelayedTaskManager.
+  delayed_task_manager_.AddDelayedTask(std::move(task_), BindOnce(&RunTask));
+
+  // Fast-forward time until the task is ripe for execution. Since Start() has
+  // not been called, the task should not be forwarded to RunTask() (MockTask is
+  // a StrictMock without expectations so test will fail if RunTask() runs it).
+  service_thread_task_runner_->FastForwardBy(kLongDelay);
+}
+
+// Verify that a delayed task added before Start() and whose delay expires after
+// Start() is forwarded when its delay expires.
+TEST_F(TaskSchedulerDelayedTaskManagerTest,
+       DelayedTaskPostedBeforeStartExpiresAfterStartRunsOnExpire) {
+  // Send |task| to the DelayedTaskManager.
+  delayed_task_manager_.AddDelayedTask(std::move(task_), BindOnce(&RunTask));
+
+  delayed_task_manager_.Start(service_thread_task_runner_);
+
+  // Run tasks on the service thread. Don't expect any forwarding to
+  // |task_target_| since the task isn't ripe for execution.
+  service_thread_task_runner_->RunUntilIdle();
+
+  // Fast-forward time until the task is ripe for execution. Expect the task to
+  // be forwarded to RunTask().
+  EXPECT_CALL(mock_task_, Run());
+  service_thread_task_runner_->FastForwardBy(kLongDelay);
+}
+
+// Verify that a delayed task added before Start() and whose delay expires
+// before Start() is forwarded when Start() is called.
+TEST_F(TaskSchedulerDelayedTaskManagerTest,
+       DelayedTaskPostedBeforeStartExpiresBeforeStartRunsOnStart) {
+  // Send |task| to the DelayedTaskManager.
+  delayed_task_manager_.AddDelayedTask(std::move(task_), BindOnce(&RunTask));
+
+  // Run tasks on the service thread. Don't expect any forwarding to
+  // |task_target_| since the task isn't ripe for execution.
+  service_thread_task_runner_->RunUntilIdle();
+
+  // Fast-forward time until the task is ripe for execution. Don't expect the
+  // task to be forwarded since Start() hasn't been called yet.
+  service_thread_task_runner_->FastForwardBy(kLongDelay);
+
+  // Start the DelayedTaskManager. Expect the task to be forwarded to RunTask().
+  EXPECT_CALL(mock_task_, Run());
+  delayed_task_manager_.Start(service_thread_task_runner_);
+  service_thread_task_runner_->RunUntilIdle();
+}
+
+// Verify that a delayed task added after Start() isn't forwarded before it is
+// ripe for execution.
+TEST_F(TaskSchedulerDelayedTaskManagerTest, DelayedTaskDoesNotRunTooEarly) {
+  delayed_task_manager_.Start(service_thread_task_runner_);
+
+  // Send |task| to the DelayedTaskManager.
+  delayed_task_manager_.AddDelayedTask(std::move(task_), BindOnce(&RunTask));
+
+  // Run tasks that are ripe for execution. Don't expect any forwarding to
+  // RunTask().
+  service_thread_task_runner_->RunUntilIdle();
+}
+
+// Verify that a delayed task added after Start() is forwarded when it is ripe
+// for execution.
+TEST_F(TaskSchedulerDelayedTaskManagerTest, DelayedTaskRunsAfterDelay) {
+  delayed_task_manager_.Start(service_thread_task_runner_);
+
+  // Send |task| to the DelayedTaskManager.
+  delayed_task_manager_.AddDelayedTask(std::move(task_), BindOnce(&RunTask));
+
+  // Fast-forward time. Expect the task to be forwarded to RunTask().
+  EXPECT_CALL(mock_task_, Run());
+  service_thread_task_runner_->FastForwardBy(kLongDelay);
+}
+
+// Verify that multiple delayed tasks added after Start() are forwarded when
+// they are ripe for execution.
+TEST_F(TaskSchedulerDelayedTaskManagerTest, DelayedTasksRunAfterDelay) {
+  delayed_task_manager_.Start(service_thread_task_runner_);
+
+  testing::StrictMock<MockTask> mock_task_a;
+  Task task_a(FROM_HERE, BindOnce(&MockTask::Run, Unretained(&mock_task_a)),
+              TaskTraits(), TimeDelta::FromHours(1));
+
+  testing::StrictMock<MockTask> mock_task_b;
+  Task task_b(FROM_HERE, BindOnce(&MockTask::Run, Unretained(&mock_task_b)),
+              TaskTraits(), TimeDelta::FromHours(2));
+
+  testing::StrictMock<MockTask> mock_task_c;
+  Task task_c(FROM_HERE, BindOnce(&MockTask::Run, Unretained(&mock_task_c)),
+              TaskTraits(), TimeDelta::FromHours(1));
+
+  // Send tasks to the DelayedTaskManager.
+  delayed_task_manager_.AddDelayedTask(std::move(task_a), BindOnce(&RunTask));
+  delayed_task_manager_.AddDelayedTask(std::move(task_b), BindOnce(&RunTask));
+  delayed_task_manager_.AddDelayedTask(std::move(task_c), BindOnce(&RunTask));
+
+  // Run tasks that are ripe for execution on the service thread. Don't expect
+  // any call to RunTask().
+  service_thread_task_runner_->RunUntilIdle();
+
+  // Fast-forward time. Expect |task_a| and |task_c| to be forwarded to
+  // |task_target_|.
+  EXPECT_CALL(mock_task_a, Run());
+  EXPECT_CALL(mock_task_c, Run());
+  service_thread_task_runner_->FastForwardBy(TimeDelta::FromHours(1));
+  testing::Mock::VerifyAndClear(&mock_task_a);
+  testing::Mock::VerifyAndClear(&mock_task_c);
+
+  // Fast-forward time. Expect |task_b| to be forwarded to RunTask().
+  EXPECT_CALL(mock_task_b, Run());
+  service_thread_task_runner_->FastForwardBy(TimeDelta::FromHours(1));
+  testing::Mock::VerifyAndClear(&mock_task_b);
+}
+
+TEST_F(TaskSchedulerDelayedTaskManagerTest, PostTaskDuringStart) {
+  Thread other_thread("Test");
+  other_thread.StartAndWaitForTesting();
+
+  WaitableEvent task_posted;
+
+  other_thread.task_runner()->PostTask(FROM_HERE, BindLambdaForTesting([&]() {
+                                         delayed_task_manager_.AddDelayedTask(
+                                             std::move(task_),
+                                             BindOnce(&RunTask));
+                                         task_posted.Signal();
+                                       }));
+
+  delayed_task_manager_.Start(service_thread_task_runner_);
+
+  // The test is testing a race between AddDelayedTask/Start but it still needs
+  // synchronization to ensure we don't do the final verification before the
+  // task itself is posted.
+  task_posted.Wait();
+
+  // Fast-forward time. Expect the task to be forwarded to RunTask().
+  EXPECT_CALL(mock_task_, Run());
+  service_thread_task_runner_->FastForwardBy(kLongDelay);
+}
+
+}  // namespace internal
+}  // namespace base
diff --git a/base/task_scheduler/environment_config.cc b/base/task_scheduler/environment_config.cc
new file mode 100644
index 0000000..393b591
--- /dev/null
+++ b/base/task_scheduler/environment_config.cc
@@ -0,0 +1,19 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/task_scheduler/environment_config.h"
+
+namespace base {
+namespace internal {
+
+size_t GetEnvironmentIndexForTraits(const TaskTraits& traits) {
+  const bool is_background =
+      traits.priority() == base::TaskPriority::BACKGROUND;
+  if (traits.may_block() || traits.with_base_sync_primitives())
+    return is_background ? BACKGROUND_BLOCKING : FOREGROUND_BLOCKING;
+  return is_background ? BACKGROUND : FOREGROUND;
+}
+
+}  // namespace internal
+}  // namespace base
diff --git a/base/task_scheduler/environment_config.h b/base/task_scheduler/environment_config.h
new file mode 100644
index 0000000..54f2ff3
--- /dev/null
+++ b/base/task_scheduler/environment_config.h
@@ -0,0 +1,46 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TASK_SCHEDULER_ENVIRONMENT_CONFIG_H_
+#define BASE_TASK_SCHEDULER_ENVIRONMENT_CONFIG_H_
+
+#include <stddef.h>
+
+#include "base/base_export.h"
+#include "base/task_scheduler/task_traits.h"
+#include "base/threading/thread.h"
+
+namespace base {
+namespace internal {
+
+enum EnvironmentType {
+  BACKGROUND = 0,
+  BACKGROUND_BLOCKING,
+  FOREGROUND,
+  FOREGROUND_BLOCKING,
+  ENVIRONMENT_COUNT  // Always last.
+};
+
+// Order must match the EnvironmentType enum.
+constexpr struct {
+  // The threads and histograms of this environment will be labeled with
+  // the task scheduler name concatenated to this.
+  const char* name_suffix;
+
+  // Preferred priority for threads in this environment; the actual thread
+  // priority depends on shutdown state and platform capabilities.
+  ThreadPriority priority_hint;
+} kEnvironmentParams[] = {
+    {"Background", base::ThreadPriority::BACKGROUND},
+    {"BackgroundBlocking", base::ThreadPriority::BACKGROUND},
+    {"Foreground", base::ThreadPriority::NORMAL},
+    {"ForegroundBlocking", base::ThreadPriority::NORMAL},
+};
+
+size_t BASE_EXPORT GetEnvironmentIndexForTraits(const TaskTraits& traits);
+
+}  // namespace internal
+}  // namespace base
+
+#endif  // BASE_TASK_SCHEDULER_ENVIRONMENT_CONFIG_H_
diff --git a/base/task_scheduler/initialization_util.cc b/base/task_scheduler/initialization_util.cc
new file mode 100644
index 0000000..7accd19
--- /dev/null
+++ b/base/task_scheduler/initialization_util.cc
@@ -0,0 +1,22 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/task_scheduler/initialization_util.h"
+
+#include <algorithm>
+
+#include "base/sys_info.h"
+
+namespace base {
+
+int RecommendedMaxNumberOfThreadsInPool(int min,
+                                        int max,
+                                        double cores_multiplier,
+                                        int offset) {
+  const int num_of_cores = SysInfo::NumberOfProcessors();
+  const int threads = std::ceil<int>(num_of_cores * cores_multiplier) + offset;
+  return std::min(max, std::max(min, threads));
+}
+
+}  // namespace base
diff --git a/base/task_scheduler/initialization_util.h b/base/task_scheduler/initialization_util.h
new file mode 100644
index 0000000..c3bd9e7
--- /dev/null
+++ b/base/task_scheduler/initialization_util.h
@@ -0,0 +1,21 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TASK_SCHEDULER_INITIALIZATION_UTIL_H_
+#define BASE_TASK_SCHEDULER_INITIALIZATION_UTIL_H_
+
+#include "base/base_export.h"
+
+namespace base {
+
+// Computes a value that may be used as the maximum number of threads in a
+// TaskScheduler pool. Developers may use other methods to choose this maximum.
+BASE_EXPORT int RecommendedMaxNumberOfThreadsInPool(int min,
+                                                    int max,
+                                                    double cores_multiplier,
+                                                    int offset);
+
+}  // namespace base
+
+#endif  // BASE_TASK_SCHEDULER_INITIALIZATION_UTIL_H_
diff --git a/base/task_scheduler/lazy_task_runner.cc b/base/task_scheduler/lazy_task_runner.cc
new file mode 100644
index 0000000..218d02b
--- /dev/null
+++ b/base/task_scheduler/lazy_task_runner.cc
@@ -0,0 +1,122 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/task_scheduler/lazy_task_runner.h"
+
+#include <utility>
+
+#include "base/lazy_instance.h"
+#include "base/logging.h"
+#include "base/task_scheduler/post_task.h"
+
+namespace base {
+namespace internal {
+
+namespace {
+ScopedLazyTaskRunnerListForTesting* g_scoped_lazy_task_runner_list_for_testing =
+    nullptr;
+}  // namespace
+
+template <typename TaskRunnerType, bool com_sta>
+void LazyTaskRunner<TaskRunnerType, com_sta>::Reset() {
+  subtle::AtomicWord state = subtle::Acquire_Load(&state_);
+
+  DCHECK_NE(state, kLazyInstanceStateCreating) << "Race: all threads should be "
+                                                  "unwound in unittests before "
+                                                  "resetting TaskRunners.";
+
+  // Return if no reference is held by this instance.
+  if (!state)
+    return;
+
+  // Release the reference acquired in Get().
+  SequencedTaskRunner* task_runner = reinterpret_cast<TaskRunnerType*>(state);
+  task_runner->Release();
+
+  // Clear the state.
+  subtle::NoBarrier_Store(&state_, 0);
+}
+
+template <>
+scoped_refptr<SequencedTaskRunner>
+LazyTaskRunner<SequencedTaskRunner, false>::Create() {
+  // It is invalid to specify a SingleThreadTaskRunnerThreadMode with a
+  // LazySequencedTaskRunner.
+  DCHECK_EQ(thread_mode_, SingleThreadTaskRunnerThreadMode::SHARED);
+
+  return CreateSequencedTaskRunnerWithTraits(traits_);
+}
+
+template <>
+scoped_refptr<SingleThreadTaskRunner>
+LazyTaskRunner<SingleThreadTaskRunner, false>::Create() {
+  return CreateSingleThreadTaskRunnerWithTraits(traits_, thread_mode_);
+}
+
+#if defined(OS_WIN)
+template <>
+scoped_refptr<SingleThreadTaskRunner>
+LazyTaskRunner<SingleThreadTaskRunner, true>::Create() {
+  return CreateCOMSTATaskRunnerWithTraits(traits_, thread_mode_);
+}
+#endif
+
+// static
+template <typename TaskRunnerType, bool com_sta>
+TaskRunnerType* LazyTaskRunner<TaskRunnerType, com_sta>::CreateRaw(
+    void* void_self) {
+  auto self =
+      reinterpret_cast<LazyTaskRunner<TaskRunnerType, com_sta>*>(void_self);
+
+  scoped_refptr<TaskRunnerType> task_runner = self->Create();
+
+  // Acquire a reference to the TaskRunner. The reference will either
+  // never be released or be released in Reset(). The reference is not
+  // managed by a scoped_refptr because adding a scoped_refptr member to
+  // LazyTaskRunner would prevent its static initialization.
+  task_runner->AddRef();
+
+  // Reset this instance when the current
+  // ScopedLazyTaskRunnerListForTesting is destroyed, if any.
+  if (g_scoped_lazy_task_runner_list_for_testing) {
+    g_scoped_lazy_task_runner_list_for_testing->AddCallback(BindOnce(
+        &LazyTaskRunner<TaskRunnerType, com_sta>::Reset, Unretained(self)));
+  }
+
+  return task_runner.get();
+}
+
+template <typename TaskRunnerType, bool com_sta>
+scoped_refptr<TaskRunnerType> LazyTaskRunner<TaskRunnerType, com_sta>::Get() {
+  return WrapRefCounted(subtle::GetOrCreateLazyPointer(
+      &state_, &LazyTaskRunner<TaskRunnerType, com_sta>::CreateRaw,
+      reinterpret_cast<void*>(this), nullptr, nullptr));
+}
+
+template class LazyTaskRunner<SequencedTaskRunner, false>;
+template class LazyTaskRunner<SingleThreadTaskRunner, false>;
+
+#if defined(OS_WIN)
+template class LazyTaskRunner<SingleThreadTaskRunner, true>;
+#endif
+
+ScopedLazyTaskRunnerListForTesting::ScopedLazyTaskRunnerListForTesting() {
+  DCHECK(!g_scoped_lazy_task_runner_list_for_testing);
+  g_scoped_lazy_task_runner_list_for_testing = this;
+}
+
+ScopedLazyTaskRunnerListForTesting::~ScopedLazyTaskRunnerListForTesting() {
+  internal::AutoSchedulerLock auto_lock(lock_);
+  for (auto& callback : callbacks_)
+    std::move(callback).Run();
+  g_scoped_lazy_task_runner_list_for_testing = nullptr;
+}
+
+void ScopedLazyTaskRunnerListForTesting::AddCallback(OnceClosure callback) {
+  internal::AutoSchedulerLock auto_lock(lock_);
+  callbacks_.push_back(std::move(callback));
+}
+
+}  // namespace internal
+}  // namespace base
diff --git a/base/task_scheduler/lazy_task_runner.h b/base/task_scheduler/lazy_task_runner.h
new file mode 100644
index 0000000..7fcbddf
--- /dev/null
+++ b/base/task_scheduler/lazy_task_runner.h
@@ -0,0 +1,218 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TASK_SCHEDULER_LAZY_TASK_RUNNER_H_
+#define BASE_TASK_SCHEDULER_LAZY_TASK_RUNNER_H_
+
+#include <vector>
+
+#include "base/atomicops.h"
+#include "base/callback.h"
+#include "base/compiler_specific.h"
+#include "base/lazy_instance_helpers.h"
+#include "base/sequenced_task_runner.h"
+#include "base/single_thread_task_runner.h"
+#include "base/task_scheduler/scheduler_lock.h"
+#include "base/task_scheduler/single_thread_task_runner_thread_mode.h"
+#include "base/task_scheduler/task_traits.h"
+#include "build/build_config.h"
+
+// Lazy(Sequenced|SingleThread|COMSTA)TaskRunner lazily creates a TaskRunner.
+//
+// Lazy(Sequenced|SingleThread|COMSTA)TaskRunner is meant to be instantiated in
+// an anonymous namespace (no static initializer is generated) and used to post
+// tasks to the same sequence/thread from pieces of code that don't have a
+// better way of sharing a TaskRunner. It is important to use this class
+// instead of a self-managed global variable or LazyInstance so that the
+// TaskRunners do not outlive the scope of the ScopedTaskEnvironment in unit
+// tests (otherwise the next test in the same process will die in use-after-
+// frees).
+//
+// IMPORTANT: Only use this API as a last resort. Prefer storing a
+// (Sequenced|SingleThread)TaskRunner returned by
+// base::Create(Sequenced|SingleThread|COMSTA)TaskRunnerWithTraits() as a member
+// on an object accessible by all PostTask() call sites.
+//
+// Example usage 1:
+//
+// namespace {
+// base::LazySequencedTaskRunner g_sequenced_task_runner =
+//     LAZY_SEQUENCED_TASK_RUNNER_INITIALIZER(
+//         base::TaskTraits(base::MayBlock(),
+//                          base::TaskPriority::USER_VISIBLE));
+// }  // namespace
+//
+// void SequencedFunction() {
+//   // Different invocations of this function post to the same
+//   // MayBlock() SequencedTaskRunner.
+//   g_sequenced_task_runner.Get()->PostTask(FROM_HERE, base::BindOnce(...));
+// }
+//
+// Example usage 2:
+//
+// namespace {
+// base::LazySequencedTaskRunner g_sequenced_task_task_runner =
+//     LAZY_SEQUENCED_TASK_RUNNER_INITIALIZER({base::MayBlock()});
+// }  // namespace
+//
+// // Code from different files can access the SequencedTaskRunner via this
+// // function.
+// scoped_refptr<base::SequencedTaskRunner> GetTaskRunner() {
+//   return g_sequenced_task_runner.Get();
+// }
+
+namespace base {
+
+namespace internal {
+template <typename TaskRunnerType, bool com_sta>
+class BASE_EXPORT LazyTaskRunner;
+}  // namespace internal
+
+// Lazy SequencedTaskRunner.
+using LazySequencedTaskRunner =
+    internal::LazyTaskRunner<SequencedTaskRunner, false>;
+
+// Lazy SingleThreadTaskRunner.
+using LazySingleThreadTaskRunner =
+    internal::LazyTaskRunner<SingleThreadTaskRunner, false>;
+
+#if defined(OS_WIN)
+// Lazy COM-STA enabled SingleThreadTaskRunner.
+using LazyCOMSTATaskRunner =
+    internal::LazyTaskRunner<SingleThreadTaskRunner, true>;
+#endif
+
+// Helper macros to generate a variable name by concatenation.
+#define LAZY_TASK_RUNNER_CONCATENATE_INTERNAL2(a, b) a##b
+#define LAZY_TASK_RUNNER_CONCATENATE_INTERNAL(a, b) \
+  LAZY_TASK_RUNNER_CONCATENATE_INTERNAL2(a, b)
+
+// Use the macros below to initialize a LazyTaskRunner. These macros verify that
+// their arguments are constexpr, which is important to prevent the generation
+// of a static initializer.
+
+// |traits| are TaskTraits used when creating the SequencedTaskRunner.
+#define LAZY_SEQUENCED_TASK_RUNNER_INITIALIZER(traits)                 \
+  base::LazySequencedTaskRunner::CreateInternal(traits);               \
+  ALLOW_UNUSED_TYPE constexpr base::TaskTraits                         \
+      LAZY_TASK_RUNNER_CONCATENATE_INTERNAL(kVerifyTraitsAreConstexpr, \
+                                            __LINE__) = traits
+
+// |traits| are TaskTraits used when creating the SingleThreadTaskRunner.
+// |thread_mode| specifies whether the SingleThreadTaskRunner can share its
+// thread with other SingleThreadTaskRunners.
+#define LAZY_SINGLE_THREAD_TASK_RUNNER_INITIALIZER(traits, thread_mode)   \
+  base::LazySingleThreadTaskRunner::CreateInternal(traits, thread_mode);  \
+  ALLOW_UNUSED_TYPE constexpr base::TaskTraits                            \
+      LAZY_TASK_RUNNER_CONCATENATE_INTERNAL(kVerifyTraitsAreConstexpr,    \
+                                            __LINE__) = traits;           \
+  ALLOW_UNUSED_TYPE constexpr base::SingleThreadTaskRunnerThreadMode      \
+      LAZY_TASK_RUNNER_CONCATENATE_INTERNAL(kVerifyThreadModeIsConstexpr, \
+                                            __LINE__) = thread_mode
+
+// |traits| are TaskTraits used when creating the COM STA
+// SingleThreadTaskRunner. |thread_mode| specifies whether the COM STA
+// SingleThreadTaskRunner can share its thread with other
+// SingleThreadTaskRunners.
+#define LAZY_COM_STA_TASK_RUNNER_INITIALIZER(traits, thread_mode)         \
+  base::LazyCOMSTATaskRunner::CreateInternal(traits, thread_mode);        \
+  ALLOW_UNUSED_TYPE constexpr base::TaskTraits                            \
+      LAZY_TASK_RUNNER_CONCATENATE_INTERNAL(kVerifyTraitsAreConstexpr,    \
+                                            __LINE__) = traits;           \
+  ALLOW_UNUSED_TYPE constexpr base::SingleThreadTaskRunnerThreadMode      \
+      LAZY_TASK_RUNNER_CONCATENATE_INTERNAL(kVerifyThreadModeIsConstexpr, \
+                                            __LINE__) = thread_mode
+
+namespace internal {
+
+template <typename TaskRunnerType, bool com_sta>
+class BASE_EXPORT LazyTaskRunner {
+ public:
+  // Use the macros above rather than a direct call to this.
+  //
+  // |traits| are TaskTraits to use to create the TaskRunner. If this
+  // LazyTaskRunner is specialized to create a SingleThreadTaskRunner,
+  // |thread_mode| specifies whether the SingleThreadTaskRunner can share its
+  // thread with other SingleThreadTaskRunner. Otherwise, it is unused.
+  static constexpr LazyTaskRunner CreateInternal(
+      const TaskTraits& traits,
+      SingleThreadTaskRunnerThreadMode thread_mode =
+          SingleThreadTaskRunnerThreadMode::SHARED) {
+    return LazyTaskRunner(traits, thread_mode);
+  }
+
+  // Returns the TaskRunner held by this instance. Creates it if it didn't
+  // already exist. Thread-safe.
+  scoped_refptr<TaskRunnerType> Get();
+
+ private:
+  constexpr LazyTaskRunner(const TaskTraits& traits,
+                           SingleThreadTaskRunnerThreadMode thread_mode =
+                               SingleThreadTaskRunnerThreadMode::SHARED)
+      : traits_(traits), thread_mode_(thread_mode) {}
+
+  // Releases the TaskRunner held by this instance.
+  void Reset();
+
+  // Creates and returns a new TaskRunner.
+  scoped_refptr<TaskRunnerType> Create();
+
+  // Creates a new TaskRunner via Create(), adds an explicit ref to it, and
+  // returns it raw. Used as an adapter for lazy instance helpers. Static and
+  // takes |this| as an explicit param to match the void* signature of
+  // GetOrCreateLazyPointer().
+  static TaskRunnerType* CreateRaw(void* void_self);
+
+  // TaskTraits to create the TaskRunner.
+  const TaskTraits traits_;
+
+  // SingleThreadTaskRunnerThreadMode to create the TaskRunner.
+  const SingleThreadTaskRunnerThreadMode thread_mode_;
+
+  // Can have 3 states:
+  // - This instance does not hold a TaskRunner: 0
+  // - This instance is creating a TaskRunner: kLazyInstanceStateCreating
+  // - This instance holds a TaskRunner: Pointer to the TaskRunner.
+  // LazyInstance's internals are reused to handle transition between states.
+  subtle::AtomicWord state_ = 0;
+
+  // No DISALLOW_COPY_AND_ASSIGN since that prevents static initialization with
+  // Visual Studio (warning C4592: 'symbol will be dynamically initialized
+  // (implementation limitation))'.
+};
+
+// When a LazyTaskRunner becomes active (invokes Get()), it adds a callback to
+// the current ScopedLazyTaskRunnerListForTesting, if any. Callbacks run when
+// the ScopedLazyTaskRunnerListForTesting is destroyed. In a test process, a
+// ScopedLazyTaskRunnerListForTesting must be instantiated before any
+// LazyTaskRunner becomes active.
+class BASE_EXPORT ScopedLazyTaskRunnerListForTesting {
+ public:
+  ScopedLazyTaskRunnerListForTesting();
+  ~ScopedLazyTaskRunnerListForTesting();
+
+ private:
+  friend class LazyTaskRunner<SequencedTaskRunner, false>;
+  friend class LazyTaskRunner<SingleThreadTaskRunner, false>;
+
+#if defined(OS_WIN)
+  friend class LazyTaskRunner<SingleThreadTaskRunner, true>;
+#endif
+
+  // Add |callback| to the list of callbacks to run on destruction.
+  void AddCallback(OnceClosure callback);
+
+  // Synchronizes accesses to |callbacks_|.
+  SchedulerLock lock_;
+
+  // List of callbacks to run on destruction.
+  std::vector<OnceClosure> callbacks_;
+
+  DISALLOW_COPY_AND_ASSIGN(ScopedLazyTaskRunnerListForTesting);
+};
+
+}  // namespace internal
+}  // namespace base
+
+#endif  // BASE_TASK_SCHEDULER_LAZY_TASK_RUNNER_H_
diff --git a/base/task_scheduler/lazy_task_runner_unittest.cc b/base/task_scheduler/lazy_task_runner_unittest.cc
new file mode 100644
index 0000000..3ca09c9
--- /dev/null
+++ b/base/task_scheduler/lazy_task_runner_unittest.cc
@@ -0,0 +1,199 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/task_scheduler/lazy_task_runner.h"
+
+#include "base/bind.h"
+#include "base/bind_helpers.h"
+#include "base/sequence_checker_impl.h"
+#include "base/task_scheduler/scoped_set_task_priority_for_current_thread.h"
+#include "base/test/scoped_task_environment.h"
+#include "base/threading/thread_checker_impl.h"
+#include "build/build_config.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+#if defined(OS_WIN)
+#include "base/win/com_init_util.h"
+#endif
+
+namespace base {
+
+namespace {
+
+LazySequencedTaskRunner g_sequenced_task_runner_user_visible =
+    LAZY_SEQUENCED_TASK_RUNNER_INITIALIZER({TaskPriority::USER_VISIBLE});
+LazySequencedTaskRunner g_sequenced_task_runner_user_blocking =
+    LAZY_SEQUENCED_TASK_RUNNER_INITIALIZER({TaskPriority::USER_BLOCKING});
+
+LazySingleThreadTaskRunner g_single_thread_task_runner_user_visible =
+    LAZY_SINGLE_THREAD_TASK_RUNNER_INITIALIZER(
+        {TaskPriority::USER_VISIBLE},
+        SingleThreadTaskRunnerThreadMode::SHARED);
+LazySingleThreadTaskRunner g_single_thread_task_runner_user_blocking =
+    LAZY_SINGLE_THREAD_TASK_RUNNER_INITIALIZER(
+        {TaskPriority::USER_BLOCKING},
+        SingleThreadTaskRunnerThreadMode::SHARED);
+
+#if defined(OS_WIN)
+LazyCOMSTATaskRunner g_com_sta_task_runner_user_visible =
+    LAZY_COM_STA_TASK_RUNNER_INITIALIZER(
+        {TaskPriority::USER_VISIBLE},
+        SingleThreadTaskRunnerThreadMode::SHARED);
+LazyCOMSTATaskRunner g_com_sta_task_runner_user_blocking =
+    LAZY_COM_STA_TASK_RUNNER_INITIALIZER(
+        {TaskPriority::USER_BLOCKING},
+        SingleThreadTaskRunnerThreadMode::SHARED);
+#endif  // defined(OS_WIN)
+
+void InitCheckers(SequenceCheckerImpl* sequence_checker,
+                  ThreadCheckerImpl* thread_checker) {
+  sequence_checker->DetachFromSequence();
+  EXPECT_TRUE(sequence_checker->CalledOnValidSequence());
+  thread_checker->DetachFromThread();
+  EXPECT_TRUE(thread_checker->CalledOnValidThread());
+}
+
+void ExpectSequencedEnvironment(SequenceCheckerImpl* sequence_checker,
+                                ThreadCheckerImpl* thread_checker,
+                                TaskPriority expected_priority) {
+  EXPECT_TRUE(sequence_checker->CalledOnValidSequence());
+  EXPECT_FALSE(thread_checker->CalledOnValidThread());
+  EXPECT_EQ(expected_priority, internal::GetTaskPriorityForCurrentThread());
+}
+
+void ExpectSingleThreadEnvironment(SequenceCheckerImpl* sequence_checker,
+                                   ThreadCheckerImpl* thread_checker,
+                                   TaskPriority expected_priority
+#if defined(OS_WIN)
+                                   ,
+                                   bool expect_com_sta = false
+#endif
+                                   ) {
+  EXPECT_TRUE(sequence_checker->CalledOnValidSequence());
+  EXPECT_TRUE(thread_checker->CalledOnValidThread());
+  EXPECT_EQ(expected_priority, internal::GetTaskPriorityForCurrentThread());
+
+#if defined(OS_WIN)
+  if (expect_com_sta)
+    win::AssertComApartmentType(win::ComApartmentType::STA);
+#endif
+}
+
+class TaskSchedulerLazyTaskRunnerEnvironmentTest : public testing::Test {
+ protected:
+  TaskSchedulerLazyTaskRunnerEnvironmentTest() = default;
+
+  void TestTaskRunnerEnvironment(scoped_refptr<SequencedTaskRunner> task_runner,
+                                 bool expect_single_thread,
+                                 TaskPriority expected_priority
+#if defined(OS_WIN)
+                                 ,
+                                 bool expect_com_sta = false
+#endif
+                                 ) {
+    SequenceCheckerImpl sequence_checker;
+    ThreadCheckerImpl thread_checker;
+    task_runner->PostTask(FROM_HERE,
+                          BindOnce(&InitCheckers, Unretained(&sequence_checker),
+                                   Unretained(&thread_checker)));
+    scoped_task_environment_.RunUntilIdle();
+
+    OnceClosure task =
+        expect_single_thread
+            ? BindOnce(&ExpectSingleThreadEnvironment,
+                       Unretained(&sequence_checker),
+                       Unretained(&thread_checker), expected_priority
+#if defined(OS_WIN)
+                       ,
+                       expect_com_sta
+#endif
+                       )
+            : BindOnce(&ExpectSequencedEnvironment,
+                       Unretained(&sequence_checker),
+                       Unretained(&thread_checker), expected_priority);
+    task_runner->PostTask(FROM_HERE, std::move(task));
+    scoped_task_environment_.RunUntilIdle();
+  }
+
+  test::ScopedTaskEnvironment scoped_task_environment_;
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(TaskSchedulerLazyTaskRunnerEnvironmentTest);
+};
+
+}  // namespace
+
+TEST_F(TaskSchedulerLazyTaskRunnerEnvironmentTest,
+       LazySequencedTaskRunnerUserVisible) {
+  TestTaskRunnerEnvironment(g_sequenced_task_runner_user_visible.Get(), false,
+                            TaskPriority::USER_VISIBLE);
+}
+
+TEST_F(TaskSchedulerLazyTaskRunnerEnvironmentTest,
+       LazySequencedTaskRunnerUserBlocking) {
+  TestTaskRunnerEnvironment(g_sequenced_task_runner_user_blocking.Get(), false,
+                            TaskPriority::USER_BLOCKING);
+}
+
+TEST_F(TaskSchedulerLazyTaskRunnerEnvironmentTest,
+       LazySingleThreadTaskRunnerUserVisible) {
+  TestTaskRunnerEnvironment(g_single_thread_task_runner_user_visible.Get(),
+                            true, TaskPriority::USER_VISIBLE);
+}
+
+TEST_F(TaskSchedulerLazyTaskRunnerEnvironmentTest,
+       LazySingleThreadTaskRunnerUserBlocking) {
+  TestTaskRunnerEnvironment(g_single_thread_task_runner_user_blocking.Get(),
+                            true, TaskPriority::USER_BLOCKING);
+}
+
+#if defined(OS_WIN)
+TEST_F(TaskSchedulerLazyTaskRunnerEnvironmentTest,
+       LazyCOMSTATaskRunnerUserVisible) {
+  TestTaskRunnerEnvironment(g_com_sta_task_runner_user_visible.Get(), true,
+                            TaskPriority::USER_VISIBLE, true);
+}
+
+TEST_F(TaskSchedulerLazyTaskRunnerEnvironmentTest,
+       LazyCOMSTATaskRunnerUserBlocking) {
+  TestTaskRunnerEnvironment(g_com_sta_task_runner_user_blocking.Get(), true,
+                            TaskPriority::USER_BLOCKING, true);
+}
+#endif  // defined(OS_WIN)
+
+TEST(TaskSchdulerLazyTaskRunnerTest, LazySequencedTaskRunnerReset) {
+  for (int i = 0; i < 2; ++i) {
+    test::ScopedTaskEnvironment scoped_task_environment;
+    // If the TaskRunner isn't released when the test::ScopedTaskEnvironment
+    // goes out of scope, the second invocation of the line below will access a
+    // deleted TaskScheduler and crash.
+    g_sequenced_task_runner_user_visible.Get()->PostTask(FROM_HERE,
+                                                         DoNothing());
+  }
+}
+
+TEST(TaskSchdulerLazyTaskRunnerTest, LazySingleThreadTaskRunnerReset) {
+  for (int i = 0; i < 2; ++i) {
+    test::ScopedTaskEnvironment scoped_task_environment;
+    // If the TaskRunner isn't released when the test::ScopedTaskEnvironment
+    // goes out of scope, the second invocation of the line below will access a
+    // deleted TaskScheduler and crash.
+    g_single_thread_task_runner_user_visible.Get()->PostTask(FROM_HERE,
+                                                             DoNothing());
+  }
+}
+
+#if defined(OS_WIN)
+TEST(TaskSchdulerLazyTaskRunnerTest, LazyCOMSTATaskRunnerReset) {
+  for (int i = 0; i < 2; ++i) {
+    test::ScopedTaskEnvironment scoped_task_environment;
+    // If the TaskRunner isn't released when the test::ScopedTaskEnvironment
+    // goes out of scope, the second invocation of the line below will access a
+    // deleted TaskScheduler and crash.
+    g_com_sta_task_runner_user_visible.Get()->PostTask(FROM_HERE, DoNothing());
+  }
+}
+#endif  // defined(OS_WIN)
+
+}  // namespace base
diff --git a/base/task_scheduler/platform_native_worker_pool_win.cc b/base/task_scheduler/platform_native_worker_pool_win.cc
new file mode 100644
index 0000000..ebad3a3
--- /dev/null
+++ b/base/task_scheduler/platform_native_worker_pool_win.cc
@@ -0,0 +1,110 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/task_scheduler/platform_native_worker_pool_win.h"
+
+#include "base/task_scheduler/task_tracker.h"
+
+namespace base {
+namespace internal {
+
+PlatformNativeWorkerPoolWin::PlatformNativeWorkerPoolWin(
+    TrackedRef<TaskTracker> task_tracker,
+    DelayedTaskManager* delayed_task_manager)
+    : SchedulerWorkerPool(task_tracker, delayed_task_manager) {}
+
+PlatformNativeWorkerPoolWin::~PlatformNativeWorkerPoolWin() {
+#if DCHECK_IS_ON()
+  // Verify join_for_testing has been called to ensure that there is no more
+  // outstanding work. Otherwise, work may try to de-reference an invalid
+  // pointer to this class.
+  DCHECK(join_for_testing_returned_.IsSet());
+#endif
+  ::DestroyThreadpoolEnvironment(&environment_);
+  ::CloseThreadpoolWork(work_);
+  ::CloseThreadpool(pool_);
+}
+
+void PlatformNativeWorkerPoolWin::Start() {
+  ::InitializeThreadpoolEnvironment(&environment_);
+
+  pool_ = ::CreateThreadpool(nullptr);
+  DCHECK(pool_) << "LastError: " << ::GetLastError();
+  ::SetThreadpoolThreadMinimum(pool_, 1);
+  ::SetThreadpoolThreadMaximum(pool_, 256);
+
+  work_ = ::CreateThreadpoolWork(&RunNextSequence, this, &environment_);
+  DCHECK(work_) << "LastError: " << GetLastError();
+  ::SetThreadpoolCallbackPool(&environment_, pool_);
+
+  size_t local_num_sequences_before_start;
+  {
+    auto transaction(priority_queue_.BeginTransaction());
+    DCHECK(!started_);
+    started_ = true;
+    local_num_sequences_before_start = transaction->Size();
+  }
+
+  // Schedule sequences added to |priority_queue_| before Start().
+  for (size_t i = 0; i < local_num_sequences_before_start; ++i)
+    ::SubmitThreadpoolWork(work_);
+}
+
+void PlatformNativeWorkerPoolWin::JoinForTesting() {
+  ::WaitForThreadpoolWorkCallbacks(work_, true);
+#if DCHECK_IS_ON()
+  DCHECK(!join_for_testing_returned_.IsSet());
+  join_for_testing_returned_.Set();
+#endif
+}
+
+// static
+void CALLBACK PlatformNativeWorkerPoolWin::RunNextSequence(
+    PTP_CALLBACK_INSTANCE,
+    void* scheduler_worker_pool_windows_impl,
+    PTP_WORK) {
+  auto* worker_pool = static_cast<PlatformNativeWorkerPoolWin*>(
+      scheduler_worker_pool_windows_impl);
+
+  worker_pool->BindToCurrentThread();
+
+  scoped_refptr<Sequence> sequence = worker_pool->GetWork();
+  DCHECK(sequence);
+
+  sequence = worker_pool->task_tracker_->RunAndPopNextTask(
+      std::move(sequence.get()), worker_pool);
+
+  // Re-enqueue sequence and then submit another task to the Windows thread
+  // pool.
+  if (sequence)
+    worker_pool->OnCanScheduleSequence(std::move(sequence));
+
+  worker_pool->UnbindFromCurrentThread();
+}
+
+scoped_refptr<Sequence> PlatformNativeWorkerPoolWin::GetWork() {
+  auto transaction(priority_queue_.BeginTransaction());
+
+  // The PQ should never be empty here as there's a 1:1 correspondence between
+  // a call to ScheduleSequence()/SubmitThreadpoolWork() and GetWork().
+  DCHECK(!transaction->IsEmpty());
+  return transaction->PopSequence();
+}
+
+void PlatformNativeWorkerPoolWin::OnCanScheduleSequence(
+    scoped_refptr<Sequence> sequence) {
+  const SequenceSortKey sequence_sort_key = sequence->GetSortKey();
+  auto transaction(priority_queue_.BeginTransaction());
+
+  transaction->Push(std::move(sequence), sequence_sort_key);
+  if (started_) {
+    // TODO(fdoray): Handle priorities by having different work objects and
+    // using ::SetThreadpoolCallbackPriority() and
+    // ::SetThreadpoolCallbackRunsLong().
+    ::SubmitThreadpoolWork(work_);
+  }
+}
+
+}  // namespace internal
+}  // namespace base
diff --git a/base/task_scheduler/platform_native_worker_pool_win.h b/base/task_scheduler/platform_native_worker_pool_win.h
new file mode 100644
index 0000000..be903b5
--- /dev/null
+++ b/base/task_scheduler/platform_native_worker_pool_win.h
@@ -0,0 +1,90 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TASK_SCHEDULER_PLATFORM_NATIVE_WORKER_POOL_WIN_H
+#define BASE_TASK_SCHEDULER_PLATFORM_NATIVE_WORKER_POOL_WIN_H
+
+#include <windows.h>
+
+#include "base/base_export.h"
+#include "base/logging.h"
+#include "base/synchronization/atomic_flag.h"
+#include "base/task_scheduler/priority_queue.h"
+#include "base/task_scheduler/scheduler_worker_pool.h"
+
+namespace base {
+namespace internal {
+
+// A SchedulerWorkerPool implementation backed by the Windows Thread Pool API.
+//
+// Windows Thread Pool API official documentation:
+// https://msdn.microsoft.com/en-us/library/windows/desktop/ms686766(v=vs.85).aspx
+//
+// Blog posts on the Windows Thread Pool API:
+// https://msdn.microsoft.com/magazine/hh335066.aspx
+// https://msdn.microsoft.com/magazine/hh394144.aspx
+// https://msdn.microsoft.com/magazine/hh456398.aspx
+// https://msdn.microsoft.com/magazine/hh547107.aspx
+// https://msdn.microsoft.com/magazine/hh580731.aspx
+class BASE_EXPORT PlatformNativeWorkerPoolWin : public SchedulerWorkerPool {
+ public:
+  PlatformNativeWorkerPoolWin(TrackedRef<TaskTracker> task_tracker,
+                              DelayedTaskManager* delayed_task_manager);
+
+  // Destroying a PlatformNativeWorkerPoolWin is not allowed in
+  // production; it is always leaked. In tests, it can only be destroyed after
+  // JoinForTesting() has returned.
+  ~PlatformNativeWorkerPoolWin() override;
+
+  // Starts the worker pool and allows tasks to begin running.
+  void Start();
+
+  // SchedulerWorkerPool:
+  void JoinForTesting() override;
+
+ private:
+  // Callback that gets run by |pool_|. It runs a task off the next sequence on
+  // the |priority_queue_|.
+  static void CALLBACK RunNextSequence(PTP_CALLBACK_INSTANCE,
+                                       void* scheduler_worker_pool_windows_impl,
+                                       PTP_WORK);
+
+  // SchedulerWorkerPool:
+  void OnCanScheduleSequence(scoped_refptr<Sequence> sequence) override;
+
+  // Returns the top Sequence off the |priority_queue_|. Returns nullptr
+  // if the |priority_queue_| is empty.
+  scoped_refptr<Sequence> GetWork();
+
+  // Thread pool object that |work_| gets executed on.
+  PTP_POOL pool_ = nullptr;
+
+  // Callback environment. |pool_| is associated with |environment_| so that
+  // work objects using this environment run on |pool_|.
+  TP_CALLBACK_ENVIRON environment_ = {};
+
+  // Work object that executes RunNextSequence. It has a pointer to the current
+  // |PlatformNativeWorkerPoolWin| and a pointer to |environment_| bound to
+  // it.
+  PTP_WORK work_ = nullptr;
+
+  // PriorityQueue from which all threads of this worker pool get work.
+  PriorityQueue priority_queue_;
+
+  // Indicates whether the pool has been started yet. This is only accessed
+  // under |priority_queue_|'s lock.
+  bool started_ = false;
+
+#if DCHECK_IS_ON()
+  // Set once JoinForTesting() has returned.
+  AtomicFlag join_for_testing_returned_;
+#endif
+
+  DISALLOW_COPY_AND_ASSIGN(PlatformNativeWorkerPoolWin);
+};
+
+}  // namespace internal
+}  // namespace base
+
+#endif  // BASE_TASK_SCHEDULER_PLATFORM_NATIVE_WORKER_POOL_WIN_H
diff --git a/base/task_scheduler/post_task.cc b/base/task_scheduler/post_task.cc
new file mode 100644
index 0000000..15210a5
--- /dev/null
+++ b/base/task_scheduler/post_task.cc
@@ -0,0 +1,132 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/task_scheduler/post_task.h"
+
+#include <utility>
+
+#include "base/logging.h"
+#include "base/task_scheduler/scoped_set_task_priority_for_current_thread.h"
+#include "base/task_scheduler/task_scheduler.h"
+#include "base/threading/post_task_and_reply_impl.h"
+
+namespace base {
+
+namespace {
+
+class PostTaskAndReplyWithTraitsTaskRunner
+    : public internal::PostTaskAndReplyImpl {
+ public:
+  explicit PostTaskAndReplyWithTraitsTaskRunner(const TaskTraits& traits)
+      : traits_(traits) {}
+
+ private:
+  bool PostTask(const Location& from_here, OnceClosure task) override {
+    PostTaskWithTraits(from_here, traits_, std::move(task));
+    return true;
+  }
+
+  const TaskTraits traits_;
+};
+
+// Returns TaskTraits based on |traits|. If TaskPriority hasn't been set
+// explicitly in |traits|, the returned TaskTraits have the current
+// TaskPriority.
+TaskTraits GetTaskTraitsWithExplicitPriority(const TaskTraits& traits) {
+  if (traits.priority_set_explicitly())
+    return traits;
+  return TaskTraits::Override(traits,
+                              {internal::GetTaskPriorityForCurrentThread()});
+}
+
+}  // namespace
+
+void PostTask(const Location& from_here, OnceClosure task) {
+  PostDelayedTask(from_here, std::move(task), TimeDelta());
+}
+
+void PostDelayedTask(const Location& from_here,
+                     OnceClosure task,
+                     TimeDelta delay) {
+  PostDelayedTaskWithTraits(from_here, TaskTraits(), std::move(task), delay);
+}
+
+void PostTaskAndReply(const Location& from_here,
+                      OnceClosure task,
+                      OnceClosure reply) {
+  PostTaskWithTraitsAndReply(from_here, TaskTraits(), std::move(task),
+                             std::move(reply));
+}
+
+void PostTaskWithTraits(const Location& from_here,
+                        const TaskTraits& traits,
+                        OnceClosure task) {
+  PostDelayedTaskWithTraits(from_here, traits, std::move(task), TimeDelta());
+}
+
+void PostDelayedTaskWithTraits(const Location& from_here,
+                               const TaskTraits& traits,
+                               OnceClosure task,
+                               TimeDelta delay) {
+  DCHECK(TaskScheduler::GetInstance())
+      << "Ref. Prerequisite section of post_task.h.\n\n"
+         "Hint: if this is in a unit test, you're likely merely missing a "
+         "base::test::ScopedTaskEnvironment member in your fixture.\n";
+  TaskScheduler::GetInstance()->PostDelayedTaskWithTraits(
+      from_here, GetTaskTraitsWithExplicitPriority(traits), std::move(task),
+      std::move(delay));
+}
+
+void PostTaskWithTraitsAndReply(const Location& from_here,
+                                const TaskTraits& traits,
+                                OnceClosure task,
+                                OnceClosure reply) {
+  PostTaskAndReplyWithTraitsTaskRunner(traits).PostTaskAndReply(
+      from_here, std::move(task), std::move(reply));
+}
+
+scoped_refptr<TaskRunner> CreateTaskRunnerWithTraits(const TaskTraits& traits) {
+  DCHECK(TaskScheduler::GetInstance())
+      << "Ref. Prerequisite section of post_task.h.\n\n"
+         "Hint: if this is in a unit test, you're likely merely missing a "
+         "base::test::ScopedTaskEnvironment member in your fixture.\n";
+  return TaskScheduler::GetInstance()->CreateTaskRunnerWithTraits(
+      GetTaskTraitsWithExplicitPriority(traits));
+}
+
+scoped_refptr<SequencedTaskRunner> CreateSequencedTaskRunnerWithTraits(
+    const TaskTraits& traits) {
+  DCHECK(TaskScheduler::GetInstance())
+      << "Ref. Prerequisite section of post_task.h.\n\n"
+         "Hint: if this is in a unit test, you're likely merely missing a "
+         "base::test::ScopedTaskEnvironment member in your fixture.\n";
+  return TaskScheduler::GetInstance()->CreateSequencedTaskRunnerWithTraits(
+      GetTaskTraitsWithExplicitPriority(traits));
+}
+
+scoped_refptr<SingleThreadTaskRunner> CreateSingleThreadTaskRunnerWithTraits(
+    const TaskTraits& traits,
+    SingleThreadTaskRunnerThreadMode thread_mode) {
+  DCHECK(TaskScheduler::GetInstance())
+      << "Ref. Prerequisite section of post_task.h.\n\n"
+         "Hint: if this is in a unit test, you're likely merely missing a "
+         "base::test::ScopedTaskEnvironment member in your fixture.\n";
+  return TaskScheduler::GetInstance()->CreateSingleThreadTaskRunnerWithTraits(
+      GetTaskTraitsWithExplicitPriority(traits), thread_mode);
+}
+
+#if defined(OS_WIN)
+scoped_refptr<SingleThreadTaskRunner> CreateCOMSTATaskRunnerWithTraits(
+    const TaskTraits& traits,
+    SingleThreadTaskRunnerThreadMode thread_mode) {
+  DCHECK(TaskScheduler::GetInstance())
+      << "Ref. Prerequisite section of post_task.h.\n\n"
+         "Hint: if this is in a unit test, you're likely merely missing a "
+         "base::test::ScopedTaskEnvironment member in your fixture.\n";
+  return TaskScheduler::GetInstance()->CreateCOMSTATaskRunnerWithTraits(
+      GetTaskTraitsWithExplicitPriority(traits), thread_mode);
+}
+#endif  // defined(OS_WIN)
+
+}  // namespace base
diff --git a/base/task_scheduler/post_task.h b/base/task_scheduler/post_task.h
new file mode 100644
index 0000000..d757c85
--- /dev/null
+++ b/base/task_scheduler/post_task.h
@@ -0,0 +1,225 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TASK_SCHEDULER_POST_TASK_H_
+#define BASE_TASK_SCHEDULER_POST_TASK_H_
+
+#include <utility>
+
+#include "base/base_export.h"
+#include "base/bind.h"
+#include "base/callback.h"
+#include "base/location.h"
+#include "base/memory/ref_counted.h"
+#include "base/post_task_and_reply_with_result_internal.h"
+#include "base/sequenced_task_runner.h"
+#include "base/single_thread_task_runner.h"
+#include "base/task_runner.h"
+#include "base/task_scheduler/single_thread_task_runner_thread_mode.h"
+#include "base/task_scheduler/task_traits.h"
+#include "base/time/time.h"
+#include "build/build_config.h"
+
+namespace base {
+
+// This is the preferred interface to post tasks to the TaskScheduler.
+//
+// To post a simple one-off task with default traits:
+//     PostTask(FROM_HERE, Bind(...));
+//
+// To post a high priority one-off task to respond to a user interaction:
+//     PostTaskWithTraits(
+//         FROM_HERE,
+//         {TaskPriority::USER_BLOCKING},
+//         Bind(...));
+//
+// To post tasks that must run in sequence with default traits:
+//     scoped_refptr<SequencedTaskRunner> task_runner =
+//         CreateSequencedTaskRunnerWithTraits(TaskTraits());
+//     task_runner.PostTask(FROM_HERE, Bind(...));
+//     task_runner.PostTask(FROM_HERE, Bind(...));
+//
+// To post tasks that may block, must run in sequence and can be skipped on
+// shutdown:
+//     scoped_refptr<SequencedTaskRunner> task_runner =
+//         CreateSequencedTaskRunnerWithTraits(
+//             {MayBlock(), TaskShutdownBehavior::SKIP_ON_SHUTDOWN});
+//     task_runner.PostTask(FROM_HERE, Bind(...));
+//     task_runner.PostTask(FROM_HERE, Bind(...));
+//
+// The default traits apply to tasks that:
+//     (1) don't block (ref. MayBlock() and WithBaseSyncPrimitives()),
+//     (2) prefer inheriting the current priority to specifying their own, and
+//     (3) can either block shutdown or be skipped on shutdown
+//         (TaskScheduler implementation is free to choose a fitting default).
+// Explicit traits must be specified for tasks for which these loose
+// requirements are not sufficient.
+//
+// Tasks posted through functions below will run on threads owned by the
+// registered TaskScheduler (i.e. not on the main thread). Tasks posted through
+// functions below with a delay may be coalesced (i.e. delays may be adjusted to
+// reduce the number of wakeups and hence power consumption).
+//
+// Prerequisite: A TaskScheduler must have been registered for the current
+// process via TaskScheduler::SetInstance() before the functions below are
+// valid. This is typically done during the initialization phase in each
+// process. If your code is not running in that phase, you most likely don't
+// have to worry about this. You will encounter DCHECKs or nullptr dereferences
+// if this is violated. For tests, prefer base::test::ScopedTaskEnvironment.
+
+// Posts |task| to the TaskScheduler. Calling this is equivalent to calling
+// PostTaskWithTraits with plain TaskTraits.
+BASE_EXPORT void PostTask(const Location& from_here, OnceClosure task);
+
+// Posts |task| to the TaskScheduler. |task| will not run before |delay|
+// expires. Calling this is equivalent to calling PostDelayedTaskWithTraits with
+// plain TaskTraits.
+//
+// Use PostDelayedTaskWithTraits to specify a BACKGROUND priority if the task
+// doesn't have to run as soon as |delay| expires.
+BASE_EXPORT void PostDelayedTask(const Location& from_here,
+                                 OnceClosure task,
+                                 TimeDelta delay);
+
+// Posts |task| to the TaskScheduler and posts |reply| on the caller's execution
+// context (i.e. same sequence or thread and same TaskTraits if applicable) when
+// |task| completes. Calling this is equivalent to calling
+// PostTaskWithTraitsAndReply with plain TaskTraits. Can only be called when
+// SequencedTaskRunnerHandle::IsSet().
+BASE_EXPORT void PostTaskAndReply(const Location& from_here,
+                                  OnceClosure task,
+                                  OnceClosure reply);
+
+// Posts |task| to the TaskScheduler and posts |reply| with the return value of
+// |task| as argument on the caller's execution context (i.e. same sequence or
+// thread and same TaskTraits if applicable) when |task| completes. Calling this
+// is equivalent to calling PostTaskWithTraitsAndReplyWithResult with plain
+// TaskTraits. Can only be called when SequencedTaskRunnerHandle::IsSet().
+template <typename TaskReturnType, typename ReplyArgType>
+void PostTaskAndReplyWithResult(const Location& from_here,
+                                OnceCallback<TaskReturnType()> task,
+                                OnceCallback<void(ReplyArgType)> reply) {
+  PostTaskWithTraitsAndReplyWithResult(from_here, TaskTraits(), std::move(task),
+                                       std::move(reply));
+}
+
+// Callback version of PostTaskAndReplyWithResult above.
+// Though RepeatingCallback is convertible to OnceCallback, we need this since
+// we can not use template deduction and object conversion at once on the
+// overload resolution.
+// TODO(tzik): Update all callers of the Callback version to use OnceCallback.
+template <typename TaskReturnType, typename ReplyArgType>
+void PostTaskAndReplyWithResult(const Location& from_here,
+                                Callback<TaskReturnType()> task,
+                                Callback<void(ReplyArgType)> reply) {
+  PostTaskAndReplyWithResult(
+      from_here, OnceCallback<TaskReturnType()>(std::move(task)),
+      OnceCallback<void(ReplyArgType)>(std::move(reply)));
+}
+
+// Posts |task| with specific |traits| to the TaskScheduler.
+BASE_EXPORT void PostTaskWithTraits(const Location& from_here,
+                                    const TaskTraits& traits,
+                                    OnceClosure task);
+
+// Posts |task| with specific |traits| to the TaskScheduler. |task| will not run
+// before |delay| expires.
+//
+// Specify a BACKGROUND priority via |traits| if the task doesn't have to run as
+// soon as |delay| expires.
+BASE_EXPORT void PostDelayedTaskWithTraits(const Location& from_here,
+                                           const TaskTraits& traits,
+                                           OnceClosure task,
+                                           TimeDelta delay);
+
+// Posts |task| with specific |traits| to the TaskScheduler and posts |reply| on
+// the caller's execution context (i.e. same sequence or thread and same
+// TaskTraits if applicable) when |task| completes. Can only be called when
+// SequencedTaskRunnerHandle::IsSet().
+BASE_EXPORT void PostTaskWithTraitsAndReply(const Location& from_here,
+                                            const TaskTraits& traits,
+                                            OnceClosure task,
+                                            OnceClosure reply);
+
+// Posts |task| with specific |traits| to the TaskScheduler and posts |reply|
+// with the return value of |task| as argument on the caller's execution context
+// (i.e. same sequence or thread and same TaskTraits if applicable) when |task|
+// completes. Can only be called when SequencedTaskRunnerHandle::IsSet().
+template <typename TaskReturnType, typename ReplyArgType>
+void PostTaskWithTraitsAndReplyWithResult(
+    const Location& from_here,
+    const TaskTraits& traits,
+    OnceCallback<TaskReturnType()> task,
+    OnceCallback<void(ReplyArgType)> reply) {
+  TaskReturnType* result = new TaskReturnType();
+  return PostTaskWithTraitsAndReply(
+      from_here, traits,
+      BindOnce(&internal::ReturnAsParamAdapter<TaskReturnType>, std::move(task),
+               result),
+      BindOnce(&internal::ReplyAdapter<TaskReturnType, ReplyArgType>,
+               std::move(reply), Owned(result)));
+}
+
+// Callback version of PostTaskWithTraitsAndReplyWithResult above.
+// Though RepeatingCallback is convertible to OnceCallback, we need this since
+// we can not use template deduction and object conversion at once on the
+// overload resolution.
+// TODO(tzik): Update all callers of the Callback version to use OnceCallback.
+template <typename TaskReturnType, typename ReplyArgType>
+void PostTaskWithTraitsAndReplyWithResult(const Location& from_here,
+                                          const TaskTraits& traits,
+                                          Callback<TaskReturnType()> task,
+                                          Callback<void(ReplyArgType)> reply) {
+  PostTaskWithTraitsAndReplyWithResult(
+      from_here, traits, OnceCallback<TaskReturnType()>(std::move(task)),
+      OnceCallback<void(ReplyArgType)>(std::move(reply)));
+}
+
+// Returns a TaskRunner whose PostTask invocations result in scheduling tasks
+// using |traits|. Tasks may run in any order and in parallel.
+BASE_EXPORT scoped_refptr<TaskRunner> CreateTaskRunnerWithTraits(
+    const TaskTraits& traits);
+
+// Returns a SequencedTaskRunner whose PostTask invocations result in scheduling
+// tasks using |traits|. Tasks run one at a time in posting order.
+BASE_EXPORT scoped_refptr<SequencedTaskRunner>
+CreateSequencedTaskRunnerWithTraits(const TaskTraits& traits);
+
+// Returns a SingleThreadTaskRunner whose PostTask invocations result in
+// scheduling tasks using |traits| on a thread determined by |thread_mode|. See
+// base/task_scheduler/single_thread_task_runner_thread_mode.h for |thread_mode|
+// details. Tasks run on a single thread in posting order.
+//
+// If all you need is to make sure that tasks don't run concurrently (e.g.
+// because they access a data structure which is not thread-safe), use
+// CreateSequencedTaskRunnerWithTraits(). Only use this if you rely on a thread-
+// affine API (it might be safer to assume thread-affinity when dealing with
+// under-documented third-party APIs, e.g. other OS') or share data across tasks
+// using thread-local storage.
+BASE_EXPORT scoped_refptr<SingleThreadTaskRunner>
+CreateSingleThreadTaskRunnerWithTraits(
+    const TaskTraits& traits,
+    SingleThreadTaskRunnerThreadMode thread_mode =
+        SingleThreadTaskRunnerThreadMode::SHARED);
+
+#if defined(OS_WIN)
+// Returns a SingleThreadTaskRunner whose PostTask invocations result in
+// scheduling tasks using |traits| in a COM Single-Threaded Apartment on a
+// thread determined by |thread_mode|. See
+// base/task_scheduler/single_thread_task_runner_thread_mode.h for |thread_mode|
+// details. Tasks run in the same Single-Threaded Apartment in posting order for
+// the returned SingleThreadTaskRunner. There is not necessarily a one-to-one
+// correspondence between SingleThreadTaskRunners and Single-Threaded
+// Apartments. The implementation is free to share apartments or create new
+// apartments as necessary. In either case, care should be taken to make sure
+// COM pointers are not smuggled across apartments.
+BASE_EXPORT scoped_refptr<SingleThreadTaskRunner>
+CreateCOMSTATaskRunnerWithTraits(const TaskTraits& traits,
+                                 SingleThreadTaskRunnerThreadMode thread_mode =
+                                     SingleThreadTaskRunnerThreadMode::SHARED);
+#endif  // defined(OS_WIN)
+
+}  // namespace base
+
+#endif  // BASE_TASK_SCHEDULER_POST_TASK_H_
diff --git a/base/task_scheduler/priority_queue.cc b/base/task_scheduler/priority_queue.cc
new file mode 100644
index 0000000..59e9d3f
--- /dev/null
+++ b/base/task_scheduler/priority_queue.cc
@@ -0,0 +1,109 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/task_scheduler/priority_queue.h"
+
+#include <utility>
+
+#include "base/logging.h"
+#include "base/memory/ptr_util.h"
+
+namespace base {
+namespace internal {
+
+// A class combining a Sequence and the SequenceSortKey that determines its
+// position in a PriorityQueue. Instances are only mutable via take_sequence()
+// which can only be called once and renders its instance invalid after the
+// call.
+class PriorityQueue::SequenceAndSortKey {
+ public:
+  SequenceAndSortKey(scoped_refptr<Sequence> sequence,
+                     const SequenceSortKey& sort_key)
+      : sequence_(std::move(sequence)), sort_key_(sort_key) {
+    DCHECK(sequence_);
+  }
+
+  // Note: while |sequence_| should always be non-null post-move (i.e. we
+  // shouldn't be moving an invalid SequenceAndSortKey around), there can't be a
+  // DCHECK(sequence_) on moves as the Windows STL moves elements on pop instead
+  // of overwriting them: resulting in the move of a SequenceAndSortKey with a
+  // null |sequence_| in Transaction::Pop()'s implementation.
+  SequenceAndSortKey(SequenceAndSortKey&& other) = default;
+  SequenceAndSortKey& operator=(SequenceAndSortKey&& other) = default;
+
+  // Extracts |sequence_| from this object. This object is invalid after this
+  // call.
+  scoped_refptr<Sequence> take_sequence() {
+    DCHECK(sequence_);
+    return std::move(sequence_);
+  }
+
+  // Compares this SequenceAndSortKey to |other| based on their respective
+  // |sort_key_|.
+  bool operator<(const SequenceAndSortKey& other) const {
+    return sort_key_ < other.sort_key_;
+  }
+  bool operator>(const SequenceAndSortKey& other) const {
+    return other < *this;
+  }
+
+  const SequenceSortKey& sort_key() const { return sort_key_; }
+
+ private:
+  scoped_refptr<Sequence> sequence_;
+  SequenceSortKey sort_key_;
+
+  DISALLOW_COPY_AND_ASSIGN(SequenceAndSortKey);
+};
+
+PriorityQueue::Transaction::Transaction(PriorityQueue* outer_queue)
+    : auto_lock_(outer_queue->container_lock_), outer_queue_(outer_queue) {
+}
+
+PriorityQueue::Transaction::~Transaction() = default;
+
+void PriorityQueue::Transaction::Push(
+    scoped_refptr<Sequence> sequence,
+    const SequenceSortKey& sequence_sort_key) {
+  outer_queue_->container_.emplace(std::move(sequence), sequence_sort_key);
+}
+
+const SequenceSortKey& PriorityQueue::Transaction::PeekSortKey() const {
+  DCHECK(!IsEmpty());
+  return outer_queue_->container_.top().sort_key();
+}
+
+scoped_refptr<Sequence> PriorityQueue::Transaction::PopSequence() {
+  DCHECK(!IsEmpty());
+
+  // The const_cast on top() is okay since the SequenceAndSortKey is
+  // transactionally being popped from |container_| right after and taking its
+  // Sequence does not alter its sort order (a requirement for the Windows STL's
+  // consistency debug-checks for std::priority_queue::top()).
+  scoped_refptr<Sequence> sequence =
+      const_cast<PriorityQueue::SequenceAndSortKey&>(
+          outer_queue_->container_.top())
+          .take_sequence();
+  outer_queue_->container_.pop();
+  return sequence;
+}
+
+bool PriorityQueue::Transaction::IsEmpty() const {
+  return outer_queue_->container_.empty();
+}
+
+size_t PriorityQueue::Transaction::Size() const {
+  return outer_queue_->container_.size();
+}
+
+PriorityQueue::PriorityQueue() = default;
+
+PriorityQueue::~PriorityQueue() = default;
+
+std::unique_ptr<PriorityQueue::Transaction> PriorityQueue::BeginTransaction() {
+  return WrapUnique(new Transaction(this));
+}
+
+}  // namespace internal
+}  // namespace base
diff --git a/base/task_scheduler/priority_queue.h b/base/task_scheduler/priority_queue.h
new file mode 100644
index 0000000..d882364
--- /dev/null
+++ b/base/task_scheduler/priority_queue.h
@@ -0,0 +1,104 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TASK_SCHEDULER_PRIORITY_QUEUE_H_
+#define BASE_TASK_SCHEDULER_PRIORITY_QUEUE_H_
+
+#include <memory>
+#include <queue>
+#include <vector>
+
+#include "base/base_export.h"
+#include "base/macros.h"
+#include "base/memory/ref_counted.h"
+#include "base/task_scheduler/scheduler_lock.h"
+#include "base/task_scheduler/sequence.h"
+#include "base/task_scheduler/sequence_sort_key.h"
+
+namespace base {
+namespace internal {
+
+// A PriorityQueue holds Sequences of Tasks. This class is thread-safe.
+class BASE_EXPORT PriorityQueue {
+ public:
+  // A Transaction can perform multiple operations atomically on a
+  // PriorityQueue. While a Transaction is alive, it is guaranteed that nothing
+  // else will access the PriorityQueue.
+  //
+  // A Worker needs to be able to Peek sequences from both its PriorityQueues
+  // (single-threaded and shared) and then Pop the sequence with the highest
+  // priority. If the Peek and the Pop are done through the same Transaction, it
+  // is guaranteed that the PriorityQueue hasn't changed between the 2
+  // operations.
+  class BASE_EXPORT Transaction {
+   public:
+    ~Transaction();
+
+    // Inserts |sequence| in the PriorityQueue with |sequence_sort_key|.
+    // Note: |sequence_sort_key| is required as a parameter instead of being
+    // extracted from |sequence| in Push() to avoid this Transaction having a
+    // lock interdependency with |sequence|.
+    void Push(scoped_refptr<Sequence> sequence,
+              const SequenceSortKey& sequence_sort_key);
+
+    // Returns a reference to the SequenceSortKey representing the priority of
+    // the highest pending task in this PriorityQueue. The reference becomes
+    // invalid the next time that this PriorityQueue is modified.
+    // Cannot be called on an empty PriorityQueue.
+    const SequenceSortKey& PeekSortKey() const;
+
+    // Removes and returns the highest priority Sequence in this PriorityQueue.
+    // Cannot be called on an empty PriorityQueue.
+    scoped_refptr<Sequence> PopSequence();
+
+    // Returns true if the PriorityQueue is empty.
+    bool IsEmpty() const;
+
+    // Returns the number of Sequences in the PriorityQueue.
+    size_t Size() const;
+
+   private:
+    friend class PriorityQueue;
+
+    explicit Transaction(PriorityQueue* outer_queue);
+
+    // Holds the lock of |outer_queue_| for the lifetime of this Transaction.
+    AutoSchedulerLock auto_lock_;
+
+    PriorityQueue* const outer_queue_;
+
+    DISALLOW_COPY_AND_ASSIGN(Transaction);
+  };
+
+  PriorityQueue();
+
+  ~PriorityQueue();
+
+  // Begins a Transaction. This method cannot be called on a thread which has an
+  // active Transaction unless the last Transaction created on the thread was
+  // for the allowed predecessor specified in the constructor of this
+  // PriorityQueue.
+  std::unique_ptr<Transaction> BeginTransaction();
+
+  const SchedulerLock* container_lock() const { return &container_lock_; }
+
+ private:
+  // A class combining a Sequence and the SequenceSortKey that determines its
+  // position in a PriorityQueue.
+  class SequenceAndSortKey;
+
+  using ContainerType = std::priority_queue<SequenceAndSortKey>;
+
+  // Synchronizes access to |container_|.
+  SchedulerLock container_lock_;
+
+  ContainerType container_;
+
+  DISALLOW_COPY_AND_ASSIGN(PriorityQueue);
+};
+
+}  // namespace internal
+}  // namespace base
+
+#endif  // BASE_TASK_SCHEDULER_PRIORITY_QUEUE_H_
diff --git a/base/task_scheduler/priority_queue_unittest.cc b/base/task_scheduler/priority_queue_unittest.cc
new file mode 100644
index 0000000..9dc4d13
--- /dev/null
+++ b/base/task_scheduler/priority_queue_unittest.cc
@@ -0,0 +1,172 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/task_scheduler/priority_queue.h"
+
+#include <memory>
+
+#include "base/bind.h"
+#include "base/bind_helpers.h"
+#include "base/macros.h"
+#include "base/memory/ptr_util.h"
+#include "base/memory/ref_counted.h"
+#include "base/synchronization/waitable_event.h"
+#include "base/task_scheduler/sequence.h"
+#include "base/task_scheduler/task.h"
+#include "base/task_scheduler/task_traits.h"
+#include "base/test/gtest_util.h"
+#include "base/threading/platform_thread.h"
+#include "base/threading/simple_thread.h"
+#include "base/time/time.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+namespace internal {
+
+namespace {
+
+class ThreadBeginningTransaction : public SimpleThread {
+ public:
+  explicit ThreadBeginningTransaction(PriorityQueue* priority_queue)
+      : SimpleThread("ThreadBeginningTransaction"),
+        priority_queue_(priority_queue),
+        transaction_began_(WaitableEvent::ResetPolicy::MANUAL,
+                           WaitableEvent::InitialState::NOT_SIGNALED) {}
+
+  // SimpleThread:
+  void Run() override {
+    std::unique_ptr<PriorityQueue::Transaction> transaction =
+        priority_queue_->BeginTransaction();
+    transaction_began_.Signal();
+  }
+
+  void ExpectTransactionDoesNotBegin() {
+    // After a few milliseconds, the call to BeginTransaction() should not have
+    // returned.
+    EXPECT_FALSE(
+        transaction_began_.TimedWait(TimeDelta::FromMilliseconds(250)));
+  }
+
+ private:
+  PriorityQueue* const priority_queue_;
+  WaitableEvent transaction_began_;
+
+  DISALLOW_COPY_AND_ASSIGN(ThreadBeginningTransaction);
+};
+
+}  // namespace
+
+TEST(TaskSchedulerPriorityQueueTest, PushPopPeek) {
+  // Create test sequences.
+  scoped_refptr<Sequence> sequence_a(new Sequence);
+  sequence_a->PushTask(Task(FROM_HERE, DoNothing(),
+                            TaskTraits(TaskPriority::USER_VISIBLE),
+                            TimeDelta()));
+  SequenceSortKey sort_key_a = sequence_a->GetSortKey();
+
+  scoped_refptr<Sequence> sequence_b(new Sequence);
+  sequence_b->PushTask(Task(FROM_HERE, DoNothing(),
+                            TaskTraits(TaskPriority::USER_BLOCKING),
+                            TimeDelta()));
+  SequenceSortKey sort_key_b = sequence_b->GetSortKey();
+
+  scoped_refptr<Sequence> sequence_c(new Sequence);
+  sequence_c->PushTask(Task(FROM_HERE, DoNothing(),
+                            TaskTraits(TaskPriority::USER_BLOCKING),
+                            TimeDelta()));
+  SequenceSortKey sort_key_c = sequence_c->GetSortKey();
+
+  scoped_refptr<Sequence> sequence_d(new Sequence);
+  sequence_d->PushTask(Task(FROM_HERE, DoNothing(),
+                            TaskTraits(TaskPriority::BACKGROUND), TimeDelta()));
+  SequenceSortKey sort_key_d = sequence_d->GetSortKey();
+
+  // Create a PriorityQueue and a Transaction.
+  PriorityQueue pq;
+  auto transaction(pq.BeginTransaction());
+  EXPECT_TRUE(transaction->IsEmpty());
+
+  // Push |sequence_a| in the PriorityQueue. It becomes the sequence with the
+  // highest priority.
+  transaction->Push(sequence_a, sort_key_a);
+  EXPECT_EQ(sort_key_a, transaction->PeekSortKey());
+
+  // Push |sequence_b| in the PriorityQueue. It becomes the sequence with the
+  // highest priority.
+  transaction->Push(sequence_b, sort_key_b);
+  EXPECT_EQ(sort_key_b, transaction->PeekSortKey());
+
+  // Push |sequence_c| in the PriorityQueue. |sequence_b| is still the sequence
+  // with the highest priority.
+  transaction->Push(sequence_c, sort_key_c);
+  EXPECT_EQ(sort_key_b, transaction->PeekSortKey());
+
+  // Push |sequence_d| in the PriorityQueue. |sequence_b| is still the sequence
+  // with the highest priority.
+  transaction->Push(sequence_d, sort_key_d);
+  EXPECT_EQ(sort_key_b, transaction->PeekSortKey());
+
+  // Pop |sequence_b| from the PriorityQueue. |sequence_c| becomes the sequence
+  // with the highest priority.
+  EXPECT_EQ(sequence_b, transaction->PopSequence());
+  EXPECT_EQ(sort_key_c, transaction->PeekSortKey());
+
+  // Pop |sequence_c| from the PriorityQueue. |sequence_a| becomes the sequence
+  // with the highest priority.
+  EXPECT_EQ(sequence_c, transaction->PopSequence());
+  EXPECT_EQ(sort_key_a, transaction->PeekSortKey());
+
+  // Pop |sequence_a| from the PriorityQueue. |sequence_d| becomes the sequence
+  // with the highest priority.
+  EXPECT_EQ(sequence_a, transaction->PopSequence());
+  EXPECT_EQ(sort_key_d, transaction->PeekSortKey());
+
+  // Pop |sequence_d| from the PriorityQueue. It is now empty.
+  EXPECT_EQ(sequence_d, transaction->PopSequence());
+  EXPECT_TRUE(transaction->IsEmpty());
+}
+
+// Check that creating Transactions on the same thread for 2 unrelated
+// PriorityQueues causes a crash.
+TEST(TaskSchedulerPriorityQueueTest, IllegalTwoTransactionsSameThread) {
+  PriorityQueue pq_a;
+  PriorityQueue pq_b;
+
+  EXPECT_DCHECK_DEATH(
+      {
+        std::unique_ptr<PriorityQueue::Transaction> transaction_a =
+            pq_a.BeginTransaction();
+        std::unique_ptr<PriorityQueue::Transaction> transaction_b =
+            pq_b.BeginTransaction();
+      });
+}
+
+// Check that it is possible to begin multiple Transactions for the same
+// PriorityQueue on different threads. The call to BeginTransaction() on the
+// second thread should block until the Transaction has ended on the first
+// thread.
+TEST(TaskSchedulerPriorityQueueTest, TwoTransactionsTwoThreads) {
+  PriorityQueue pq;
+
+  // Call BeginTransaction() on this thread and keep the Transaction alive.
+  std::unique_ptr<PriorityQueue::Transaction> transaction =
+      pq.BeginTransaction();
+
+  // Call BeginTransaction() on another thread.
+  ThreadBeginningTransaction thread_beginning_transaction(&pq);
+  thread_beginning_transaction.Start();
+
+  // After a few milliseconds, the call to BeginTransaction() on the other
+  // thread should not have returned.
+  thread_beginning_transaction.ExpectTransactionDoesNotBegin();
+
+  // End the Transaction on the current thread.
+  transaction.reset();
+
+  // The other thread should exit after its call to BeginTransaction() returns.
+  thread_beginning_transaction.Join();
+}
+
+}  // namespace internal
+}  // namespace base
diff --git a/base/task_scheduler/scheduler_lock.h b/base/task_scheduler/scheduler_lock.h
new file mode 100644
index 0000000..c969eb1
--- /dev/null
+++ b/base/task_scheduler/scheduler_lock.h
@@ -0,0 +1,88 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TASK_SCHEDULER_SCHEDULER_LOCK_H
+#define BASE_TASK_SCHEDULER_SCHEDULER_LOCK_H
+
+#include <memory>
+
+#include "base/base_export.h"
+#include "base/macros.h"
+#include "base/synchronization/condition_variable.h"
+#include "base/synchronization/lock.h"
+#include "base/task_scheduler/scheduler_lock_impl.h"
+
+namespace base {
+namespace internal {
+
+// SchedulerLock should be used anywhere a lock would be used in the scheduler.
+// When DCHECK_IS_ON(), lock checking occurs. Otherwise, SchedulerLock is
+// equivalent to base::Lock.
+//
+// The shape of SchedulerLock is as follows:
+// SchedulerLock()
+//     Default constructor, no predecessor lock.
+//     DCHECKs
+//         On Acquisition if any scheduler lock is acquired on this thread.
+//
+// SchedulerLock(const SchedulerLock* predecessor)
+//     Constructor that specifies an allowed predecessor for that lock.
+//     DCHECKs
+//         On Construction if |predecessor| forms a predecessor lock cycle.
+//         On Acquisition if the previous lock acquired on the thread is not
+//             |predecessor|. Okay if there was no previous lock acquired.
+//
+// void Acquire()
+//     Acquires the lock.
+//
+// void Release()
+//     Releases the lock.
+//
+// void AssertAcquired().
+//     DCHECKs if the lock is not acquired.
+//
+// std::unique_ptr<ConditionVariable> CreateConditionVariable()
+//     Creates a condition variable using this as a lock.
+
+#if DCHECK_IS_ON()
+class SchedulerLock : public SchedulerLockImpl {
+ public:
+  SchedulerLock() = default;
+  explicit SchedulerLock(const SchedulerLock* predecessor)
+      : SchedulerLockImpl(predecessor) {}
+};
+#else  // DCHECK_IS_ON()
+class SchedulerLock : public Lock {
+ public:
+  SchedulerLock() = default;
+  explicit SchedulerLock(const SchedulerLock*) {}
+
+  std::unique_ptr<ConditionVariable> CreateConditionVariable() {
+    return std::unique_ptr<ConditionVariable>(new ConditionVariable(this));
+  }
+};
+#endif  // DCHECK_IS_ON()
+
+// Provides the same functionality as base::AutoLock for SchedulerLock.
+class AutoSchedulerLock {
+ public:
+  explicit AutoSchedulerLock(SchedulerLock& lock) : lock_(lock) {
+    lock_.Acquire();
+  }
+
+  ~AutoSchedulerLock() {
+    lock_.AssertAcquired();
+    lock_.Release();
+  }
+
+ private:
+  SchedulerLock& lock_;
+
+  DISALLOW_COPY_AND_ASSIGN(AutoSchedulerLock);
+};
+
+}  // namespace internal
+}  // namespace base
+
+#endif  // BASE_TASK_SCHEDULER_SCHEDULER_LOCK_H
diff --git a/base/task_scheduler/scheduler_lock_impl.cc b/base/task_scheduler/scheduler_lock_impl.cc
new file mode 100644
index 0000000..d60f259
--- /dev/null
+++ b/base/task_scheduler/scheduler_lock_impl.cc
@@ -0,0 +1,156 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/task_scheduler/scheduler_lock_impl.h"
+
+#include <algorithm>
+#include <unordered_map>
+#include <vector>
+
+#include "base/lazy_instance.h"
+#include "base/logging.h"
+#include "base/synchronization/condition_variable.h"
+#include "base/threading/platform_thread.h"
+#include "base/threading/thread_local_storage.h"
+
+namespace base {
+namespace internal {
+
+namespace {
+
+class SafeAcquisitionTracker {
+ public:
+  SafeAcquisitionTracker() : tls_acquired_locks_(&OnTLSDestroy) {}
+
+  void RegisterLock(
+      const SchedulerLockImpl* const lock,
+      const SchedulerLockImpl* const predecessor) {
+    DCHECK_NE(lock, predecessor) << "Reentrant locks are unsupported.";
+    AutoLock auto_lock(allowed_predecessor_map_lock_);
+    allowed_predecessor_map_[lock] = predecessor;
+    AssertSafePredecessor(lock);
+  }
+
+  void UnregisterLock(const SchedulerLockImpl* const lock) {
+    AutoLock auto_lock(allowed_predecessor_map_lock_);
+    allowed_predecessor_map_.erase(lock);
+  }
+
+  void RecordAcquisition(const SchedulerLockImpl* const lock) {
+    AssertSafeAcquire(lock);
+    GetAcquiredLocksOnCurrentThread()->push_back(lock);
+  }
+
+  void RecordRelease(const SchedulerLockImpl* const lock) {
+    LockVector* acquired_locks = GetAcquiredLocksOnCurrentThread();
+    const auto iter_at_lock =
+        std::find(acquired_locks->begin(), acquired_locks->end(), lock);
+    DCHECK(iter_at_lock != acquired_locks->end());
+    acquired_locks->erase(iter_at_lock);
+  }
+
+ private:
+  using LockVector = std::vector<const SchedulerLockImpl*>;
+  using PredecessorMap = std::unordered_map<
+      const SchedulerLockImpl*, const SchedulerLockImpl*>;
+
+  // This asserts that the lock is safe to acquire. This means that this should
+  // be run before actually recording the acquisition.
+  void AssertSafeAcquire(const SchedulerLockImpl* const lock) {
+    const LockVector* acquired_locks = GetAcquiredLocksOnCurrentThread();
+
+    // If the thread currently holds no locks, this is inherently safe.
+    if (acquired_locks->empty())
+      return;
+
+    // Otherwise, make sure that the previous lock acquired is an allowed
+    // predecessor.
+    AutoLock auto_lock(allowed_predecessor_map_lock_);
+    // Using at() is exception-safe here as |lock| was registered already.
+    const SchedulerLockImpl* allowed_predecessor =
+        allowed_predecessor_map_.at(lock);
+    DCHECK_EQ(acquired_locks->back(), allowed_predecessor);
+  }
+
+  // Asserts that |lock|'s registered predecessor is safe. Because
+  // SchedulerLocks are registered at construction time and any predecessor
+  // specified on a SchedulerLock must already exist, the first registered
+  // SchedulerLock in a potential chain must have a null predecessor and is thus
+  // cycle-free. Any subsequent SchedulerLock with a predecessor must come from
+  // the set of registered SchedulerLocks. Since the registered SchedulerLocks
+  // only contain cycle-free SchedulerLocks, this subsequent SchedulerLock is
+  // itself cycle-free and may be safely added to the registered SchedulerLock
+  // set.
+  void AssertSafePredecessor(const SchedulerLockImpl* lock) const {
+    allowed_predecessor_map_lock_.AssertAcquired();
+    // Using at() is exception-safe here as |lock| was registered already.
+    const SchedulerLockImpl* predecessor = allowed_predecessor_map_.at(lock);
+    if (predecessor) {
+      DCHECK(allowed_predecessor_map_.find(predecessor) !=
+             allowed_predecessor_map_.end())
+          << "SchedulerLock was registered before its predecessor. "
+          << "Potential cycle detected";
+    }
+  }
+
+  LockVector* GetAcquiredLocksOnCurrentThread() {
+    if (!tls_acquired_locks_.Get())
+      tls_acquired_locks_.Set(new LockVector);
+
+    return reinterpret_cast<LockVector*>(tls_acquired_locks_.Get());
+  }
+
+  static void OnTLSDestroy(void* value) {
+    delete reinterpret_cast<LockVector*>(value);
+  }
+
+  // Synchronizes access to |allowed_predecessor_map_|.
+  Lock allowed_predecessor_map_lock_;
+
+  // A map of allowed predecessors.
+  PredecessorMap allowed_predecessor_map_;
+
+  // A thread-local slot holding a vector of locks currently acquired on the
+  // current thread.
+  ThreadLocalStorage::Slot tls_acquired_locks_;
+
+  DISALLOW_COPY_AND_ASSIGN(SafeAcquisitionTracker);
+};
+
+LazyInstance<SafeAcquisitionTracker>::Leaky g_safe_acquisition_tracker =
+    LAZY_INSTANCE_INITIALIZER;
+
+}  // namespace
+
+SchedulerLockImpl::SchedulerLockImpl() : SchedulerLockImpl(nullptr) {}
+
+SchedulerLockImpl::SchedulerLockImpl(const SchedulerLockImpl* predecessor) {
+  g_safe_acquisition_tracker.Get().RegisterLock(this, predecessor);
+}
+
+SchedulerLockImpl::~SchedulerLockImpl() {
+  g_safe_acquisition_tracker.Get().UnregisterLock(this);
+}
+
+void SchedulerLockImpl::Acquire() {
+  lock_.Acquire();
+  g_safe_acquisition_tracker.Get().RecordAcquisition(this);
+}
+
+void SchedulerLockImpl::Release() {
+  lock_.Release();
+  g_safe_acquisition_tracker.Get().RecordRelease(this);
+}
+
+void SchedulerLockImpl::AssertAcquired() const {
+  lock_.AssertAcquired();
+}
+
+std::unique_ptr<ConditionVariable>
+SchedulerLockImpl::CreateConditionVariable() {
+  return std::unique_ptr<ConditionVariable>(new ConditionVariable(&lock_));
+}
+
+}  // namespace internal
+}  // base
diff --git a/base/task_scheduler/scheduler_lock_impl.h b/base/task_scheduler/scheduler_lock_impl.h
new file mode 100644
index 0000000..65699bb
--- /dev/null
+++ b/base/task_scheduler/scheduler_lock_impl.h
@@ -0,0 +1,46 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TASK_SCHEDULER_SCHEDULER_LOCK_IMPL_H
+#define BASE_TASK_SCHEDULER_SCHEDULER_LOCK_IMPL_H
+
+#include <memory>
+
+#include "base/base_export.h"
+#include "base/macros.h"
+#include "base/synchronization/lock.h"
+
+namespace base {
+
+class ConditionVariable;
+
+namespace internal {
+
+// A regular lock with simple deadlock correctness checking.
+// This lock tracks all of the available locks to make sure that any locks are
+// acquired in an expected order.
+// See scheduler_lock.h for details.
+class BASE_EXPORT SchedulerLockImpl {
+ public:
+  SchedulerLockImpl();
+  explicit SchedulerLockImpl(const SchedulerLockImpl* predecessor);
+  ~SchedulerLockImpl();
+
+  void Acquire();
+  void Release();
+
+  void AssertAcquired() const;
+
+  std::unique_ptr<ConditionVariable> CreateConditionVariable();
+
+ private:
+  Lock lock_;
+
+  DISALLOW_COPY_AND_ASSIGN(SchedulerLockImpl);
+};
+
+}  // namespace internal
+}  // namespace base
+
+#endif  // BASE_TASK_SCHEDULER_SCHEDULER_LOCK_IMPL_H
diff --git a/base/task_scheduler/scheduler_lock_unittest.cc b/base/task_scheduler/scheduler_lock_unittest.cc
new file mode 100644
index 0000000..5518247
--- /dev/null
+++ b/base/task_scheduler/scheduler_lock_unittest.cc
@@ -0,0 +1,296 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/task_scheduler/scheduler_lock.h"
+
+#include <stdlib.h>
+
+#include "base/compiler_specific.h"
+#include "base/macros.h"
+#include "base/rand_util.h"
+#include "base/synchronization/waitable_event.h"
+#include "base/test/gtest_util.h"
+#include "base/threading/platform_thread.h"
+#include "base/threading/simple_thread.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+namespace internal {
+namespace {
+
+// Adapted from base::Lock's BasicLockTestThread to make sure
+// Acquire()/Release() don't crash.
+class BasicLockTestThread : public SimpleThread {
+ public:
+  explicit BasicLockTestThread(SchedulerLock* lock)
+      : SimpleThread("BasicLockTestThread"),
+        lock_(lock),
+        acquired_(0) {}
+
+  int acquired() const { return acquired_; }
+
+ private:
+  void Run() override {
+    for (int i = 0; i < 10; i++) {
+      lock_->Acquire();
+      acquired_++;
+      lock_->Release();
+    }
+    for (int i = 0; i < 10; i++) {
+      lock_->Acquire();
+      acquired_++;
+      PlatformThread::Sleep(TimeDelta::FromMilliseconds(base::RandInt(0, 19)));
+      lock_->Release();
+    }
+  }
+
+  SchedulerLock* const lock_;
+  int acquired_;
+
+  DISALLOW_COPY_AND_ASSIGN(BasicLockTestThread);
+};
+
+class BasicLockAcquireAndWaitThread : public SimpleThread {
+ public:
+  explicit BasicLockAcquireAndWaitThread(SchedulerLock* lock)
+      : SimpleThread("BasicLockAcquireAndWaitThread"),
+        lock_(lock),
+        lock_acquire_event_(WaitableEvent::ResetPolicy::AUTOMATIC,
+                            WaitableEvent::InitialState::NOT_SIGNALED),
+        main_thread_continue_event_(WaitableEvent::ResetPolicy::AUTOMATIC,
+                                    WaitableEvent::InitialState::NOT_SIGNALED) {
+  }
+
+  void WaitForLockAcquisition() {
+    lock_acquire_event_.Wait();
+  }
+
+  void ContinueMain() {
+    main_thread_continue_event_.Signal();
+  }
+
+ private:
+  void Run() override {
+    lock_->Acquire();
+    lock_acquire_event_.Signal();
+    main_thread_continue_event_.Wait();
+    lock_->Release();
+  }
+
+  SchedulerLock* const lock_;
+  WaitableEvent lock_acquire_event_;
+  WaitableEvent main_thread_continue_event_;
+
+  DISALLOW_COPY_AND_ASSIGN(BasicLockAcquireAndWaitThread);
+};
+
+TEST(TaskSchedulerLock, Basic) {
+  SchedulerLock lock;
+  BasicLockTestThread thread(&lock);
+
+  thread.Start();
+
+  int acquired = 0;
+  for (int i = 0; i < 5; i++) {
+    lock.Acquire();
+    acquired++;
+    lock.Release();
+  }
+  for (int i = 0; i < 10; i++) {
+    lock.Acquire();
+    acquired++;
+    PlatformThread::Sleep(TimeDelta::FromMilliseconds(base::RandInt(0, 19)));
+    lock.Release();
+  }
+  for (int i = 0; i < 5; i++) {
+    lock.Acquire();
+    acquired++;
+    PlatformThread::Sleep(TimeDelta::FromMilliseconds(base::RandInt(0, 19)));
+    lock.Release();
+  }
+
+  thread.Join();
+
+  EXPECT_EQ(acquired, 20);
+  EXPECT_EQ(thread.acquired(), 20);
+}
+
+TEST(TaskSchedulerLock, AcquirePredecessor) {
+  SchedulerLock predecessor;
+  SchedulerLock lock(&predecessor);
+  predecessor.Acquire();
+  lock.Acquire();
+  lock.Release();
+  predecessor.Release();
+}
+
+TEST(TaskSchedulerLock, AcquirePredecessorWrongOrder) {
+  SchedulerLock predecessor;
+  SchedulerLock lock(&predecessor);
+  EXPECT_DCHECK_DEATH({
+    lock.Acquire();
+    predecessor.Acquire();
+  });
+}
+
+TEST(TaskSchedulerLock, AcquireNonPredecessor) {
+  SchedulerLock lock1;
+  SchedulerLock lock2;
+  EXPECT_DCHECK_DEATH({
+    lock1.Acquire();
+    lock2.Acquire();
+  });
+}
+
+TEST(TaskSchedulerLock, AcquireMultipleLocksInOrder) {
+  SchedulerLock lock1;
+  SchedulerLock lock2(&lock1);
+  SchedulerLock lock3(&lock2);
+  lock1.Acquire();
+  lock2.Acquire();
+  lock3.Acquire();
+  lock3.Release();
+  lock2.Release();
+  lock1.Release();
+}
+
+TEST(TaskSchedulerLock, AcquireMultipleLocksInTheMiddleOfAChain) {
+  SchedulerLock lock1;
+  SchedulerLock lock2(&lock1);
+  SchedulerLock lock3(&lock2);
+  lock2.Acquire();
+  lock3.Acquire();
+  lock3.Release();
+  lock2.Release();
+}
+
+TEST(TaskSchedulerLock, AcquireMultipleLocksNoTransitivity) {
+  SchedulerLock lock1;
+  SchedulerLock lock2(&lock1);
+  SchedulerLock lock3(&lock2);
+  EXPECT_DCHECK_DEATH({
+    lock1.Acquire();
+    lock3.Acquire();
+  });
+}
+
+TEST(TaskSchedulerLock, AcquireLocksDifferentThreadsSafely) {
+  SchedulerLock lock1;
+  SchedulerLock lock2;
+  BasicLockAcquireAndWaitThread thread(&lock1);
+  thread.Start();
+
+  lock2.Acquire();
+  thread.WaitForLockAcquisition();
+  thread.ContinueMain();
+  thread.Join();
+  lock2.Release();
+}
+
+TEST(TaskSchedulerLock,
+     AcquireLocksWithPredecessorDifferentThreadsSafelyPredecessorFirst) {
+  // A lock and its predecessor may be safely acquired on different threads.
+  // This Thread                Other Thread
+  // predecessor.Acquire()
+  //                            lock.Acquire()
+  // predecessor.Release()
+  //                            lock.Release()
+  SchedulerLock predecessor;
+  SchedulerLock lock(&predecessor);
+  predecessor.Acquire();
+  BasicLockAcquireAndWaitThread thread(&lock);
+  thread.Start();
+  thread.WaitForLockAcquisition();
+  predecessor.Release();
+  thread.ContinueMain();
+  thread.Join();
+}
+
+TEST(TaskSchedulerLock,
+     AcquireLocksWithPredecessorDifferentThreadsSafelyPredecessorLast) {
+  // A lock and its predecessor may be safely acquired on different threads.
+  // This Thread                Other Thread
+  // lock.Acquire()
+  //                            predecessor.Acquire()
+  // lock.Release()
+  //                            predecessor.Release()
+  SchedulerLock predecessor;
+  SchedulerLock lock(&predecessor);
+  lock.Acquire();
+  BasicLockAcquireAndWaitThread thread(&predecessor);
+  thread.Start();
+  thread.WaitForLockAcquisition();
+  lock.Release();
+  thread.ContinueMain();
+  thread.Join();
+}
+
+TEST(TaskSchedulerLock,
+     AcquireLocksWithPredecessorDifferentThreadsSafelyNoInterference) {
+  // Acquisition of an unrelated lock on another thread should not affect a
+  // legal lock acquisition with a predecessor on this thread.
+  // This Thread                Other Thread
+  // predecessor.Acquire()
+  //                            unrelated.Acquire()
+  // lock.Acquire()
+  //                            unrelated.Release()
+  // lock.Release()
+  // predecessor.Release();
+  SchedulerLock predecessor;
+  SchedulerLock lock(&predecessor);
+  predecessor.Acquire();
+  SchedulerLock unrelated;
+  BasicLockAcquireAndWaitThread thread(&unrelated);
+  thread.Start();
+  thread.WaitForLockAcquisition();
+  lock.Acquire();
+  thread.ContinueMain();
+  thread.Join();
+  lock.Release();
+  predecessor.Release();
+}
+
+TEST(TaskSchedulerLock, SelfReferentialLock) {
+  struct SelfReferentialLock {
+    SelfReferentialLock() : lock(&lock) {}
+
+    SchedulerLock lock;
+  };
+
+  EXPECT_DCHECK_DEATH({ SelfReferentialLock lock; });
+}
+
+TEST(TaskSchedulerLock, PredecessorCycle) {
+  struct LockCycle {
+    LockCycle() : lock1(&lock2), lock2(&lock1) {}
+
+    SchedulerLock lock1;
+    SchedulerLock lock2;
+  };
+
+  EXPECT_DCHECK_DEATH({ LockCycle cycle; });
+}
+
+TEST(TaskSchedulerLock, PredecessorLongerCycle) {
+  struct LockCycle {
+    LockCycle()
+        : lock1(&lock5),
+          lock2(&lock1),
+          lock3(&lock2),
+          lock4(&lock3),
+          lock5(&lock4) {}
+
+    SchedulerLock lock1;
+    SchedulerLock lock2;
+    SchedulerLock lock3;
+    SchedulerLock lock4;
+    SchedulerLock lock5;
+  };
+
+  EXPECT_DCHECK_DEATH({ LockCycle cycle; });
+}
+
+}  // namespace
+}  // namespace internal
+}  // namespace base
diff --git a/base/task_scheduler/scheduler_single_thread_task_runner_manager.cc b/base/task_scheduler/scheduler_single_thread_task_runner_manager.cc
new file mode 100644
index 0000000..5928f41
--- /dev/null
+++ b/base/task_scheduler/scheduler_single_thread_task_runner_manager.cc
@@ -0,0 +1,652 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/task_scheduler/scheduler_single_thread_task_runner_manager.h"
+
+#include <algorithm>
+#include <memory>
+#include <string>
+#include <utility>
+
+#include "base/bind.h"
+#include "base/callback.h"
+#include "base/memory/ptr_util.h"
+#include "base/single_thread_task_runner.h"
+#include "base/strings/stringprintf.h"
+#include "base/synchronization/atomic_flag.h"
+#include "base/task_scheduler/delayed_task_manager.h"
+#include "base/task_scheduler/scheduler_worker.h"
+#include "base/task_scheduler/sequence.h"
+#include "base/task_scheduler/task.h"
+#include "base/task_scheduler/task_tracker.h"
+#include "base/task_scheduler/task_traits.h"
+#include "base/threading/platform_thread.h"
+#include "base/time/time.h"
+
+#if defined(OS_WIN)
+#include <windows.h>
+
+#include "base/win/scoped_com_initializer.h"
+#endif  // defined(OS_WIN)
+
+namespace base {
+namespace internal {
+
+namespace {
+
+// Boolean indicating whether there's a SchedulerSingleThreadTaskRunnerManager
+// instance alive in this process. This variable should only be set when the
+// SchedulerSingleThreadTaskRunnerManager instance is brought up (on the main
+// thread; before any tasks are posted) and decremented when the instance is
+// brought down (i.e., only when unit tests tear down the task environment and
+// never in production). This makes the variable const while worker threads are
+// up and as such it doesn't need to be atomic. It is used to tell when a task
+// is posted from the main thread after the task environment was brought down in
+// unit tests so that SchedulerSingleThreadTaskRunnerManager bound TaskRunners
+// can return false on PostTask, letting such callers know they should complete
+// necessary work synchronously. Note: |!g_manager_is_alive| is generally
+// equivalent to |!TaskScheduler::GetInstance()| but has the advantage of being
+// valid in task_scheduler unit tests that don't instantiate a full
+// TaskScheduler.
+bool g_manager_is_alive = false;
+
+// Allows for checking the PlatformThread::CurrentRef() against a set
+// PlatformThreadRef atomically without using locks.
+class AtomicThreadRefChecker {
+ public:
+  AtomicThreadRefChecker() = default;
+  ~AtomicThreadRefChecker() = default;
+
+  void Set() {
+    thread_ref_ = PlatformThread::CurrentRef();
+    is_set_.Set();
+  }
+
+  bool IsCurrentThreadSameAsSetThread() {
+    return is_set_.IsSet() && thread_ref_ == PlatformThread::CurrentRef();
+  }
+
+ private:
+  AtomicFlag is_set_;
+  PlatformThreadRef thread_ref_;
+
+  DISALLOW_COPY_AND_ASSIGN(AtomicThreadRefChecker);
+};
+
+class SchedulerWorkerDelegate : public SchedulerWorker::Delegate {
+ public:
+  SchedulerWorkerDelegate(const std::string& thread_name,
+                          SchedulerWorker::ThreadLabel thread_label)
+      : thread_name_(thread_name), thread_label_(thread_label) {}
+
+  void set_worker(SchedulerWorker* worker) {
+    DCHECK(!worker_);
+    worker_ = worker;
+  }
+
+  // SchedulerWorker::Delegate:
+  void OnCanScheduleSequence(scoped_refptr<Sequence> sequence) override {
+    DCHECK(worker_);
+    ReEnqueueSequence(std::move(sequence));
+    worker_->WakeUp();
+  }
+
+  SchedulerWorker::ThreadLabel GetThreadLabel() const final {
+    return thread_label_;
+  }
+
+  void OnMainEntry(const SchedulerWorker* /* worker */) override {
+    thread_ref_checker_.Set();
+    PlatformThread::SetName(thread_name_);
+  }
+
+  scoped_refptr<Sequence> GetWork(SchedulerWorker* worker) override {
+    AutoSchedulerLock auto_lock(sequence_lock_);
+    bool has_work = has_work_;
+    has_work_ = false;
+    return has_work ? sequence_ : nullptr;
+  }
+
+  void DidRunTask() override {}
+
+  void ReEnqueueSequence(scoped_refptr<Sequence> sequence) override {
+    AutoSchedulerLock auto_lock(sequence_lock_);
+    // We've shut down, so no-op this work request. Any sequence cleanup will
+    // occur in the caller's context.
+    if (!sequence_)
+      return;
+
+    DCHECK_EQ(sequence, sequence_);
+    DCHECK(!has_work_);
+    has_work_ = true;
+  }
+
+  TimeDelta GetSleepTimeout() override { return TimeDelta::Max(); }
+
+  bool RunsTasksInCurrentSequence() {
+    // We check the thread ref instead of the sequence for the benefit of COM
+    // callbacks which may execute without a sequence context.
+    return thread_ref_checker_.IsCurrentThreadSameAsSetThread();
+  }
+
+  void OnMainExit(SchedulerWorker* /* worker */) override {
+    // Move |sequence_| to |local_sequence| so that if we have the last
+    // reference to the sequence we don't destroy it (and its tasks) within
+    // |sequence_lock_|.
+    scoped_refptr<Sequence> local_sequence;
+    {
+      AutoSchedulerLock auto_lock(sequence_lock_);
+      // To reclaim skipped tasks on shutdown, we null out the sequence to allow
+      // the tasks to destroy themselves.
+      local_sequence = std::move(sequence_);
+    }
+  }
+
+  // SchedulerWorkerDelegate:
+
+  // Consumers should release their sequence reference as soon as possible to
+  // ensure timely cleanup for general shutdown.
+  scoped_refptr<Sequence> sequence() {
+    AutoSchedulerLock auto_lock(sequence_lock_);
+    return sequence_;
+  }
+
+ private:
+  const std::string thread_name_;
+  const SchedulerWorker::ThreadLabel thread_label_;
+
+  // The SchedulerWorker that has |this| as a delegate. Must be set before
+  // starting or posting a task to the SchedulerWorker, because it's used in
+  // OnMainEntry() and OnCanScheduleSequence() (called when a sequence held up
+  // by WillScheduleSequence() in PostTaskNow() can be scheduled).
+  SchedulerWorker* worker_ = nullptr;
+
+  // Synchronizes access to |sequence_| and |has_work_|.
+  SchedulerLock sequence_lock_;
+  scoped_refptr<Sequence> sequence_ = new Sequence;
+  bool has_work_ = false;
+
+  AtomicThreadRefChecker thread_ref_checker_;
+
+  DISALLOW_COPY_AND_ASSIGN(SchedulerWorkerDelegate);
+};
+
+#if defined(OS_WIN)
+
+class SchedulerWorkerCOMDelegate : public SchedulerWorkerDelegate {
+ public:
+  SchedulerWorkerCOMDelegate(const std::string& thread_name,
+                             SchedulerWorker::ThreadLabel thread_label,
+                             TrackedRef<TaskTracker> task_tracker)
+      : SchedulerWorkerDelegate(thread_name, thread_label),
+        task_tracker_(std::move(task_tracker)) {}
+
+  ~SchedulerWorkerCOMDelegate() override { DCHECK(!scoped_com_initializer_); }
+
+  // SchedulerWorker::Delegate:
+  void OnMainEntry(const SchedulerWorker* worker) override {
+    SchedulerWorkerDelegate::OnMainEntry(worker);
+
+    scoped_com_initializer_ = std::make_unique<win::ScopedCOMInitializer>();
+  }
+
+  scoped_refptr<Sequence> GetWork(SchedulerWorker* worker) override {
+    // This scheme below allows us to cover the following scenarios:
+    // * Only SchedulerWorkerDelegate::GetWork() has work:
+    //   Always return the sequence from GetWork().
+    // * Only the Windows Message Queue has work:
+    //   Always return the sequence from GetWorkFromWindowsMessageQueue();
+    // * Both SchedulerWorkerDelegate::GetWork() and the Windows Message Queue
+    //   have work:
+    //   Process sequences from each source round-robin style.
+    scoped_refptr<Sequence> sequence;
+    if (get_work_first_) {
+      sequence = SchedulerWorkerDelegate::GetWork(worker);
+      if (sequence)
+        get_work_first_ = false;
+    }
+
+    if (!sequence) {
+      sequence = GetWorkFromWindowsMessageQueue();
+      if (sequence)
+        get_work_first_ = true;
+    }
+
+    if (!sequence && !get_work_first_) {
+      // This case is important if we checked the Windows Message Queue first
+      // and found there was no work. We don't want to return null immediately
+      // as that could cause the thread to go to sleep while work is waiting via
+      // SchedulerWorkerDelegate::GetWork().
+      sequence = SchedulerWorkerDelegate::GetWork(worker);
+    }
+    return sequence;
+  }
+
+  void OnMainExit(SchedulerWorker* /* worker */) override {
+    scoped_com_initializer_.reset();
+  }
+
+  void WaitForWork(WaitableEvent* wake_up_event) override {
+    DCHECK(wake_up_event);
+    const TimeDelta sleep_time = GetSleepTimeout();
+    const DWORD milliseconds_wait =
+        sleep_time.is_max() ? INFINITE : sleep_time.InMilliseconds();
+    const HANDLE wake_up_event_handle = wake_up_event->handle();
+    MsgWaitForMultipleObjectsEx(1, &wake_up_event_handle, milliseconds_wait,
+                                QS_ALLINPUT, 0);
+  }
+
+ private:
+  scoped_refptr<Sequence> GetWorkFromWindowsMessageQueue() {
+    MSG msg;
+    if (PeekMessage(&msg, nullptr, 0, 0, PM_REMOVE) != FALSE) {
+      Task pump_message_task(FROM_HERE,
+                             Bind(
+                                 [](MSG msg) {
+                                   TranslateMessage(&msg);
+                                   DispatchMessage(&msg);
+                                 },
+                                 std::move(msg)),
+                             TaskTraits(MayBlock()), TimeDelta());
+      if (task_tracker_->WillPostTask(pump_message_task)) {
+        bool was_empty =
+            message_pump_sequence_->PushTask(std::move(pump_message_task));
+        DCHECK(was_empty) << "GetWorkFromWindowsMessageQueue() does not expect "
+                             "queueing of pump tasks.";
+        return message_pump_sequence_;
+      }
+    }
+    return nullptr;
+  }
+
+  bool get_work_first_ = true;
+  const scoped_refptr<Sequence> message_pump_sequence_ = new Sequence;
+  const TrackedRef<TaskTracker> task_tracker_;
+  std::unique_ptr<win::ScopedCOMInitializer> scoped_com_initializer_;
+
+  DISALLOW_COPY_AND_ASSIGN(SchedulerWorkerCOMDelegate);
+};
+
+#endif  // defined(OS_WIN)
+
+}  // namespace
+
+class SchedulerSingleThreadTaskRunnerManager::SchedulerSingleThreadTaskRunner
+    : public SingleThreadTaskRunner {
+ public:
+  // Constructs a SchedulerSingleThreadTaskRunner that indirectly controls the
+  // lifetime of a dedicated |worker| for |traits|.
+  SchedulerSingleThreadTaskRunner(
+      SchedulerSingleThreadTaskRunnerManager* const outer,
+      const TaskTraits& traits,
+      SchedulerWorker* worker,
+      SingleThreadTaskRunnerThreadMode thread_mode)
+      : outer_(outer),
+        traits_(traits),
+        worker_(worker),
+        thread_mode_(thread_mode) {
+    DCHECK(outer_);
+    DCHECK(worker_);
+  }
+
+  // SingleThreadTaskRunner:
+  bool PostDelayedTask(const Location& from_here,
+                       OnceClosure closure,
+                       TimeDelta delay) override {
+    if (!g_manager_is_alive)
+      return false;
+
+    Task task(from_here, std::move(closure), traits_, delay);
+    task.single_thread_task_runner_ref = this;
+
+    if (!outer_->task_tracker_->WillPostTask(task))
+      return false;
+
+    if (task.delayed_run_time.is_null()) {
+      PostTaskNow(std::move(task));
+    } else {
+      outer_->delayed_task_manager_->AddDelayedTask(
+          std::move(task),
+          BindOnce(&SchedulerSingleThreadTaskRunner::PostTaskNow,
+                   Unretained(this)));
+    }
+    return true;
+  }
+
+  bool PostNonNestableDelayedTask(const Location& from_here,
+                                  OnceClosure closure,
+                                  TimeDelta delay) override {
+    // Tasks are never nested within the task scheduler.
+    return PostDelayedTask(from_here, std::move(closure), delay);
+  }
+
+  bool RunsTasksInCurrentSequence() const override {
+    if (!g_manager_is_alive)
+      return false;
+    return GetDelegate()->RunsTasksInCurrentSequence();
+  }
+
+ private:
+  ~SchedulerSingleThreadTaskRunner() override {
+    // Only unregister if this is a DEDICATED SingleThreadTaskRunner. SHARED
+    // task runner SchedulerWorkers are managed separately as they are reused.
+    // |g_manager_is_alive| avoids a use-after-free should this
+    // SchedulerSingleThreadTaskRunner outlive its manager. It is safe to access
+    // |g_manager_is_alive| without synchronization primitives as it is const
+    // for the lifetime of the manager and ~SchedulerSingleThreadTaskRunner()
+    // either happens prior to the end of JoinForTesting() (which happens-before
+    // manager's destruction) or on main thread after the task environment's
+    // entire destruction (which happens-after the manager's destruction). Yes,
+    // there's a theoretical use case where the last ref to this
+    // SchedulerSingleThreadTaskRunner is handed to a thread not controlled by
+    // task_scheduler and that this ends up causing
+    // ~SchedulerSingleThreadTaskRunner() to race with
+    // ~SchedulerSingleThreadTaskRunnerManager() but this is intentionally not
+    // supported (and it doesn't matter in production where we leak the task
+    // environment for such reasons). TSan should catch this weird paradigm
+    // should anyone elect to use it in a unit test and the error would point
+    // here.
+    if (g_manager_is_alive &&
+        thread_mode_ == SingleThreadTaskRunnerThreadMode::DEDICATED) {
+      outer_->UnregisterSchedulerWorker(worker_);
+    }
+  }
+
+  void PostTaskNow(Task task) {
+    scoped_refptr<Sequence> sequence = GetDelegate()->sequence();
+    // If |sequence| is null, then the thread is effectively gone (either
+    // shutdown or joined).
+    if (!sequence)
+      return;
+
+    const bool sequence_was_empty = sequence->PushTask(std::move(task));
+    if (sequence_was_empty) {
+      sequence = outer_->task_tracker_->WillScheduleSequence(
+          std::move(sequence), GetDelegate());
+      if (sequence) {
+        GetDelegate()->ReEnqueueSequence(std::move(sequence));
+        worker_->WakeUp();
+      }
+    }
+  }
+
+  SchedulerWorkerDelegate* GetDelegate() const {
+    return static_cast<SchedulerWorkerDelegate*>(worker_->delegate());
+  }
+
+  SchedulerSingleThreadTaskRunnerManager* const outer_;
+  const TaskTraits traits_;
+  SchedulerWorker* const worker_;
+  const SingleThreadTaskRunnerThreadMode thread_mode_;
+
+  DISALLOW_COPY_AND_ASSIGN(SchedulerSingleThreadTaskRunner);
+};
+
+SchedulerSingleThreadTaskRunnerManager::SchedulerSingleThreadTaskRunnerManager(
+    TrackedRef<TaskTracker> task_tracker,
+    DelayedTaskManager* delayed_task_manager)
+    : task_tracker_(std::move(task_tracker)),
+      delayed_task_manager_(delayed_task_manager) {
+  DCHECK(task_tracker_);
+  DCHECK(delayed_task_manager_);
+#if defined(OS_WIN)
+  static_assert(arraysize(shared_com_scheduler_workers_) ==
+                    arraysize(shared_scheduler_workers_),
+                "The size of |shared_com_scheduler_workers_| must match "
+                "|shared_scheduler_workers_|");
+  static_assert(arraysize(shared_com_scheduler_workers_[0]) ==
+                    arraysize(shared_scheduler_workers_[0]),
+                "The size of |shared_com_scheduler_workers_| must match "
+                "|shared_scheduler_workers_|");
+#endif  // defined(OS_WIN)
+  DCHECK(!g_manager_is_alive);
+  g_manager_is_alive = true;
+}
+
+SchedulerSingleThreadTaskRunnerManager::
+    ~SchedulerSingleThreadTaskRunnerManager() {
+  DCHECK(g_manager_is_alive);
+  g_manager_is_alive = false;
+}
+
+void SchedulerSingleThreadTaskRunnerManager::Start(
+    SchedulerWorkerObserver* scheduler_worker_observer) {
+  DCHECK(!scheduler_worker_observer_);
+  scheduler_worker_observer_ = scheduler_worker_observer;
+
+  decltype(workers_) workers_to_start;
+  {
+    AutoSchedulerLock auto_lock(lock_);
+    started_ = true;
+    workers_to_start = workers_;
+  }
+
+  // Start workers that were created before this method was called. Other
+  // workers are started as they are created.
+  for (scoped_refptr<SchedulerWorker> worker : workers_to_start) {
+    worker->Start(scheduler_worker_observer_);
+    worker->WakeUp();
+  }
+}
+
+scoped_refptr<SingleThreadTaskRunner>
+SchedulerSingleThreadTaskRunnerManager::CreateSingleThreadTaskRunnerWithTraits(
+    const TaskTraits& traits,
+    SingleThreadTaskRunnerThreadMode thread_mode) {
+  return CreateTaskRunnerWithTraitsImpl<SchedulerWorkerDelegate>(traits,
+                                                                 thread_mode);
+}
+
+#if defined(OS_WIN)
+scoped_refptr<SingleThreadTaskRunner>
+SchedulerSingleThreadTaskRunnerManager::CreateCOMSTATaskRunnerWithTraits(
+    const TaskTraits& traits,
+    SingleThreadTaskRunnerThreadMode thread_mode) {
+  return CreateTaskRunnerWithTraitsImpl<SchedulerWorkerCOMDelegate>(
+      traits, thread_mode);
+}
+#endif  // defined(OS_WIN)
+
+// static
+SchedulerSingleThreadTaskRunnerManager::ContinueOnShutdown
+SchedulerSingleThreadTaskRunnerManager::TraitsToContinueOnShutdown(
+    const TaskTraits& traits) {
+  if (traits.shutdown_behavior() == TaskShutdownBehavior::CONTINUE_ON_SHUTDOWN)
+    return IS_CONTINUE_ON_SHUTDOWN;
+  return IS_NOT_CONTINUE_ON_SHUTDOWN;
+}
+
+template <typename DelegateType>
+scoped_refptr<
+    SchedulerSingleThreadTaskRunnerManager::SchedulerSingleThreadTaskRunner>
+SchedulerSingleThreadTaskRunnerManager::CreateTaskRunnerWithTraitsImpl(
+    const TaskTraits& traits,
+    SingleThreadTaskRunnerThreadMode thread_mode) {
+  DCHECK(thread_mode != SingleThreadTaskRunnerThreadMode::SHARED ||
+         !traits.with_base_sync_primitives())
+      << "Using WithBaseSyncPrimitives() on a shared SingleThreadTaskRunner "
+         "may cause deadlocks. Either reevaluate your usage (e.g. use "
+         "SequencedTaskRunner) or use "
+         "SingleThreadTaskRunnerThreadMode::DEDICATED.";
+  // To simplify the code, |dedicated_worker| is a local only variable that
+  // allows the code to treat both the DEDICATED and SHARED cases similarly for
+  // SingleThreadTaskRunnerThreadMode. In DEDICATED, the scoped_refptr is backed
+  // by a local variable and in SHARED, the scoped_refptr is backed by a member
+  // variable.
+  SchedulerWorker* dedicated_worker = nullptr;
+  SchedulerWorker*& worker =
+      thread_mode == SingleThreadTaskRunnerThreadMode::DEDICATED
+          ? dedicated_worker
+          : GetSharedSchedulerWorkerForTraits<DelegateType>(traits);
+  bool new_worker = false;
+  bool started;
+  {
+    AutoSchedulerLock auto_lock(lock_);
+    if (!worker) {
+      const auto& environment_params =
+          kEnvironmentParams[GetEnvironmentIndexForTraits(traits)];
+      std::string worker_name;
+      if (thread_mode == SingleThreadTaskRunnerThreadMode::SHARED)
+        worker_name += "Shared";
+      worker_name += environment_params.name_suffix;
+      worker = CreateAndRegisterSchedulerWorker<DelegateType>(
+          worker_name, thread_mode, environment_params.priority_hint);
+      new_worker = true;
+    }
+    started = started_;
+  }
+
+  if (new_worker && started)
+    worker->Start(scheduler_worker_observer_);
+
+  return MakeRefCounted<SchedulerSingleThreadTaskRunner>(this, traits, worker,
+                                                         thread_mode);
+}
+
+void SchedulerSingleThreadTaskRunnerManager::JoinForTesting() {
+  decltype(workers_) local_workers;
+  {
+    AutoSchedulerLock auto_lock(lock_);
+    local_workers = std::move(workers_);
+  }
+
+  for (const auto& worker : local_workers)
+    worker->JoinForTesting();
+
+  {
+    AutoSchedulerLock auto_lock(lock_);
+    DCHECK(workers_.empty())
+        << "New worker(s) unexpectedly registered during join.";
+    workers_ = std::move(local_workers);
+  }
+
+  // Release shared SchedulerWorkers at the end so they get joined above. If
+  // this call happens before the joins, the SchedulerWorkers are effectively
+  // detached and may outlive the SchedulerSingleThreadTaskRunnerManager.
+  ReleaseSharedSchedulerWorkers();
+}
+
+template <>
+std::unique_ptr<SchedulerWorkerDelegate>
+SchedulerSingleThreadTaskRunnerManager::CreateSchedulerWorkerDelegate<
+    SchedulerWorkerDelegate>(const std::string& name,
+                             int id,
+                             SingleThreadTaskRunnerThreadMode thread_mode) {
+  return std::make_unique<SchedulerWorkerDelegate>(
+      StringPrintf("TaskSchedulerSingleThread%s%d", name.c_str(), id),
+      thread_mode == SingleThreadTaskRunnerThreadMode::DEDICATED
+          ? SchedulerWorker::ThreadLabel::DEDICATED
+          : SchedulerWorker::ThreadLabel::SHARED);
+}
+
+#if defined(OS_WIN)
+template <>
+std::unique_ptr<SchedulerWorkerDelegate>
+SchedulerSingleThreadTaskRunnerManager::CreateSchedulerWorkerDelegate<
+    SchedulerWorkerCOMDelegate>(const std::string& name,
+                                int id,
+                                SingleThreadTaskRunnerThreadMode thread_mode) {
+  return std::make_unique<SchedulerWorkerCOMDelegate>(
+      StringPrintf("TaskSchedulerSingleThreadCOMSTA%s%d", name.c_str(), id),
+      thread_mode == SingleThreadTaskRunnerThreadMode::DEDICATED
+          ? SchedulerWorker::ThreadLabel::DEDICATED_COM
+          : SchedulerWorker::ThreadLabel::SHARED_COM,
+      task_tracker_);
+}
+#endif  // defined(OS_WIN)
+
+template <typename DelegateType>
+SchedulerWorker*
+SchedulerSingleThreadTaskRunnerManager::CreateAndRegisterSchedulerWorker(
+    const std::string& name,
+    SingleThreadTaskRunnerThreadMode thread_mode,
+    ThreadPriority priority_hint) {
+  lock_.AssertAcquired();
+  int id = next_worker_id_++;
+  std::unique_ptr<SchedulerWorkerDelegate> delegate =
+      CreateSchedulerWorkerDelegate<DelegateType>(name, id, thread_mode);
+  SchedulerWorkerDelegate* delegate_raw = delegate.get();
+  scoped_refptr<SchedulerWorker> worker = MakeRefCounted<SchedulerWorker>(
+      priority_hint, std::move(delegate), task_tracker_);
+  delegate_raw->set_worker(worker.get());
+  workers_.emplace_back(std::move(worker));
+  return workers_.back().get();
+}
+
+template <>
+SchedulerWorker*&
+SchedulerSingleThreadTaskRunnerManager::GetSharedSchedulerWorkerForTraits<
+    SchedulerWorkerDelegate>(const TaskTraits& traits) {
+  return shared_scheduler_workers_[GetEnvironmentIndexForTraits(traits)]
+                                  [TraitsToContinueOnShutdown(traits)];
+}
+
+#if defined(OS_WIN)
+template <>
+SchedulerWorker*&
+SchedulerSingleThreadTaskRunnerManager::GetSharedSchedulerWorkerForTraits<
+    SchedulerWorkerCOMDelegate>(const TaskTraits& traits) {
+  return shared_com_scheduler_workers_[GetEnvironmentIndexForTraits(traits)]
+                                      [TraitsToContinueOnShutdown(traits)];
+}
+#endif  // defined(OS_WIN)
+
+void SchedulerSingleThreadTaskRunnerManager::UnregisterSchedulerWorker(
+    SchedulerWorker* worker) {
+  // Cleanup uses a SchedulerLock, so call Cleanup() after releasing
+  // |lock_|.
+  scoped_refptr<SchedulerWorker> worker_to_destroy;
+  {
+    AutoSchedulerLock auto_lock(lock_);
+
+    // Skip when joining (the join logic takes care of the rest).
+    if (workers_.empty())
+      return;
+
+    auto worker_iter =
+        std::find_if(workers_.begin(), workers_.end(),
+                     [worker](const scoped_refptr<SchedulerWorker>& candidate) {
+                       return candidate.get() == worker;
+                     });
+    DCHECK(worker_iter != workers_.end());
+    worker_to_destroy = std::move(*worker_iter);
+    workers_.erase(worker_iter);
+  }
+  worker_to_destroy->Cleanup();
+}
+
+void SchedulerSingleThreadTaskRunnerManager::ReleaseSharedSchedulerWorkers() {
+  decltype(shared_scheduler_workers_) local_shared_scheduler_workers;
+#if defined(OS_WIN)
+  decltype(shared_com_scheduler_workers_) local_shared_com_scheduler_workers;
+#endif
+  {
+    AutoSchedulerLock auto_lock(lock_);
+    for (size_t i = 0; i < arraysize(shared_scheduler_workers_); ++i) {
+      for (size_t j = 0; j < arraysize(shared_scheduler_workers_[i]); ++j) {
+        local_shared_scheduler_workers[i][j] = shared_scheduler_workers_[i][j];
+        shared_scheduler_workers_[i][j] = nullptr;
+#if defined(OS_WIN)
+        local_shared_com_scheduler_workers[i][j] =
+            shared_com_scheduler_workers_[i][j];
+        shared_com_scheduler_workers_[i][j] = nullptr;
+#endif
+    }
+    }
+  }
+
+  for (size_t i = 0; i < arraysize(local_shared_scheduler_workers); ++i) {
+    for (size_t j = 0; j < arraysize(local_shared_scheduler_workers[i]); ++j) {
+      if (local_shared_scheduler_workers[i][j])
+        UnregisterSchedulerWorker(local_shared_scheduler_workers[i][j]);
+#if defined(OS_WIN)
+      if (local_shared_com_scheduler_workers[i][j])
+        UnregisterSchedulerWorker(local_shared_com_scheduler_workers[i][j]);
+#endif
+  }
+  }
+}
+
+}  // namespace internal
+}  // namespace base
diff --git a/base/task_scheduler/scheduler_single_thread_task_runner_manager.h b/base/task_scheduler/scheduler_single_thread_task_runner_manager.h
new file mode 100644
index 0000000..b25230d
--- /dev/null
+++ b/base/task_scheduler/scheduler_single_thread_task_runner_manager.h
@@ -0,0 +1,155 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TASK_SCHEDULER_SCHEDULER_SINGLE_THREAD_TASK_RUNNER_MANAGER_H_
+#define BASE_TASK_SCHEDULER_SCHEDULER_SINGLE_THREAD_TASK_RUNNER_MANAGER_H_
+
+#include <memory>
+#include <string>
+#include <vector>
+
+#include "base/base_export.h"
+#include "base/macros.h"
+#include "base/memory/ref_counted.h"
+#include "base/task_scheduler/environment_config.h"
+#include "base/task_scheduler/scheduler_lock.h"
+#include "base/task_scheduler/single_thread_task_runner_thread_mode.h"
+#include "base/task_scheduler/tracked_ref.h"
+#include "base/threading/platform_thread.h"
+#include "build/build_config.h"
+
+namespace base {
+
+class TaskTraits;
+class SchedulerWorkerObserver;
+class SingleThreadTaskRunner;
+
+namespace internal {
+
+class DelayedTaskManager;
+class SchedulerWorker;
+class TaskTracker;
+
+namespace {
+
+class SchedulerWorkerDelegate;
+
+}  // namespace
+
+// Manages a pool of threads which are each associated with one or more
+// SingleThreadTaskRunners.
+//
+// SingleThreadTaskRunners using SingleThreadTaskRunnerThreadMode::SHARED are
+// backed by shared SchedulerWorkers for each COM+task environment combination.
+// These workers are lazily instantiated and then only reclaimed during
+// JoinForTesting()
+//
+// No threads are created (and hence no tasks can run) before Start() is called.
+//
+// This class is thread-safe.
+class BASE_EXPORT SchedulerSingleThreadTaskRunnerManager final {
+ public:
+  SchedulerSingleThreadTaskRunnerManager(
+      TrackedRef<TaskTracker> task_tracker,
+      DelayedTaskManager* delayed_task_manager);
+  ~SchedulerSingleThreadTaskRunnerManager();
+
+  // Starts threads for existing SingleThreadTaskRunners and allows threads to
+  // be started when SingleThreadTaskRunners are created in the future. If
+  // specified, |scheduler_worker_observer| will be notified when a worker
+  // enters and exits its main function. It must not be destroyed before
+  // JoinForTesting() has returned (must never be destroyed in production).
+  void Start(SchedulerWorkerObserver* scheduler_worker_observer = nullptr);
+
+  // Creates a SingleThreadTaskRunner which runs tasks with |traits| on a thread
+  // named "TaskSchedulerSingleThread[Shared]" +
+  // kEnvironmentParams[GetEnvironmentIndexForTraits(traits)].name_suffix +
+  // index.
+  scoped_refptr<SingleThreadTaskRunner> CreateSingleThreadTaskRunnerWithTraits(
+      const TaskTraits& traits,
+      SingleThreadTaskRunnerThreadMode thread_mode);
+
+#if defined(OS_WIN)
+  // Creates a SingleThreadTaskRunner which runs tasks with |traits| on a COM
+  // STA thread named "TaskSchedulerSingleThreadCOMSTA[Shared]" +
+  // kEnvironmentParams[GetEnvironmentIndexForTraits(traits)].name_suffix +
+  // index.
+  scoped_refptr<SingleThreadTaskRunner> CreateCOMSTATaskRunnerWithTraits(
+      const TaskTraits& traits,
+      SingleThreadTaskRunnerThreadMode thread_mode);
+#endif  // defined(OS_WIN)
+
+  void JoinForTesting();
+
+ private:
+  class SchedulerSingleThreadTaskRunner;
+
+  enum ContinueOnShutdown {
+    IS_CONTINUE_ON_SHUTDOWN,
+    IS_NOT_CONTINUE_ON_SHUTDOWN,
+    CONTINUE_ON_SHUTDOWN_COUNT,
+  };
+
+  static ContinueOnShutdown TraitsToContinueOnShutdown(
+      const TaskTraits& traits);
+
+  template <typename DelegateType>
+  scoped_refptr<SchedulerSingleThreadTaskRunner> CreateTaskRunnerWithTraitsImpl(
+      const TaskTraits& traits,
+      SingleThreadTaskRunnerThreadMode thread_mode);
+
+  template <typename DelegateType>
+  std::unique_ptr<SchedulerWorkerDelegate> CreateSchedulerWorkerDelegate(
+      const std::string& name,
+      int id,
+      SingleThreadTaskRunnerThreadMode thread_mode);
+
+  template <typename DelegateType>
+  SchedulerWorker* CreateAndRegisterSchedulerWorker(
+      const std::string& name,
+      SingleThreadTaskRunnerThreadMode thread_mode,
+      ThreadPriority priority_hint);
+
+  template <typename DelegateType>
+  SchedulerWorker*& GetSharedSchedulerWorkerForTraits(const TaskTraits& traits);
+
+  void UnregisterSchedulerWorker(SchedulerWorker* worker);
+
+  void ReleaseSharedSchedulerWorkers();
+
+  const TrackedRef<TaskTracker> task_tracker_;
+  DelayedTaskManager* const delayed_task_manager_;
+
+  // Optional observer notified when a worker enters and exits its main
+  // function. Set in Start() and never modified afterwards.
+  SchedulerWorkerObserver* scheduler_worker_observer_ = nullptr;
+
+  // Synchronizes access to all members below.
+  SchedulerLock lock_;
+  std::vector<scoped_refptr<SchedulerWorker>> workers_;
+  int next_worker_id_ = 0;
+
+  // Workers for SingleThreadTaskRunnerThreadMode::SHARED tasks. It is
+  // important to have separate threads for CONTINUE_ON_SHUTDOWN and non-
+  // CONTINUE_ON_SHUTDOWN to avoid being in a situation where a
+  // CONTINUE_ON_SHUTDOWN task effectively blocks shutdown by preventing a
+  // BLOCK_SHUTDOWN task to be scheduled. https://crbug.com/829786
+  SchedulerWorker* shared_scheduler_workers_[ENVIRONMENT_COUNT]
+                                            [CONTINUE_ON_SHUTDOWN_COUNT] = {};
+#if defined(OS_WIN)
+  SchedulerWorker* shared_com_scheduler_workers_[ENVIRONMENT_COUNT]
+                                                [CONTINUE_ON_SHUTDOWN_COUNT] =
+                                                    {};
+#endif  // defined(OS_WIN)
+
+  // Set to true when Start() is called.
+  bool started_ = false;
+
+  DISALLOW_COPY_AND_ASSIGN(SchedulerSingleThreadTaskRunnerManager);
+};
+
+}  // namespace internal
+}  // namespace base
+
+#endif  // BASE_TASK_SCHEDULER_SCHEDULER_SINGLE_THREAD_TASK_RUNNER_MANAGER_H_
diff --git a/base/task_scheduler/scheduler_single_thread_task_runner_manager_unittest.cc b/base/task_scheduler/scheduler_single_thread_task_runner_manager_unittest.cc
new file mode 100644
index 0000000..52d99f6
--- /dev/null
+++ b/base/task_scheduler/scheduler_single_thread_task_runner_manager_unittest.cc
@@ -0,0 +1,676 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/task_scheduler/scheduler_single_thread_task_runner_manager.h"
+
+#include "base/bind.h"
+#include "base/bind_helpers.h"
+#include "base/memory/ptr_util.h"
+#include "base/synchronization/atomic_flag.h"
+#include "base/synchronization/lock.h"
+#include "base/synchronization/waitable_event.h"
+#include "base/task_scheduler/delayed_task_manager.h"
+#include "base/task_scheduler/post_task.h"
+#include "base/task_scheduler/scheduler_worker_pool_params.h"
+#include "base/task_scheduler/task_tracker.h"
+#include "base/task_scheduler/task_traits.h"
+#include "base/test/gtest_util.h"
+#include "base/test/test_timeouts.h"
+#include "base/threading/platform_thread.h"
+#include "base/threading/simple_thread.h"
+#include "base/threading/thread.h"
+#include "base/threading/thread_restrictions.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+#if defined(OS_WIN)
+#include <windows.h>
+
+#include "base/win/com_init_util.h"
+#include "base/win/current_module.h"
+#endif  // defined(OS_WIN)
+
+namespace base {
+namespace internal {
+
+namespace {
+
+class TaskSchedulerSingleThreadTaskRunnerManagerTest : public testing::Test {
+ public:
+  TaskSchedulerSingleThreadTaskRunnerManagerTest()
+      : service_thread_("TaskSchedulerServiceThread") {}
+
+  void SetUp() override {
+    service_thread_.Start();
+    delayed_task_manager_.Start(service_thread_.task_runner());
+    single_thread_task_runner_manager_ =
+        std::make_unique<SchedulerSingleThreadTaskRunnerManager>(
+            task_tracker_.GetTrackedRef(), &delayed_task_manager_);
+    StartSingleThreadTaskRunnerManagerFromSetUp();
+  }
+
+  void TearDown() override {
+    if (single_thread_task_runner_manager_)
+      TearDownSingleThreadTaskRunnerManager();
+    service_thread_.Stop();
+  }
+
+ protected:
+  virtual void StartSingleThreadTaskRunnerManagerFromSetUp() {
+    single_thread_task_runner_manager_->Start();
+  }
+
+  virtual void TearDownSingleThreadTaskRunnerManager() {
+    single_thread_task_runner_manager_->JoinForTesting();
+    single_thread_task_runner_manager_.reset();
+  }
+
+  Thread service_thread_;
+  TaskTracker task_tracker_ = {"Test"};
+  DelayedTaskManager delayed_task_manager_;
+  std::unique_ptr<SchedulerSingleThreadTaskRunnerManager>
+      single_thread_task_runner_manager_;
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(TaskSchedulerSingleThreadTaskRunnerManagerTest);
+};
+
+void CaptureThreadRef(PlatformThreadRef* thread_ref) {
+  ASSERT_TRUE(thread_ref);
+  *thread_ref = PlatformThread::CurrentRef();
+}
+
+void CaptureThreadPriority(ThreadPriority* thread_priority) {
+  ASSERT_TRUE(thread_priority);
+  *thread_priority = PlatformThread::GetCurrentThreadPriority();
+}
+
+void CaptureThreadName(std::string* thread_name) {
+  *thread_name = PlatformThread::GetName();
+}
+
+void ShouldNotRun() {
+  ADD_FAILURE() << "Ran a task that shouldn't run.";
+}
+
+}  // namespace
+
+TEST_F(TaskSchedulerSingleThreadTaskRunnerManagerTest, DifferentThreadsUsed) {
+  scoped_refptr<SingleThreadTaskRunner> task_runner_1 =
+      single_thread_task_runner_manager_
+          ->CreateSingleThreadTaskRunnerWithTraits(
+              {TaskShutdownBehavior::BLOCK_SHUTDOWN},
+              SingleThreadTaskRunnerThreadMode::DEDICATED);
+  scoped_refptr<SingleThreadTaskRunner> task_runner_2 =
+      single_thread_task_runner_manager_
+          ->CreateSingleThreadTaskRunnerWithTraits(
+              {TaskShutdownBehavior::BLOCK_SHUTDOWN},
+              SingleThreadTaskRunnerThreadMode::DEDICATED);
+
+  PlatformThreadRef thread_ref_1;
+  task_runner_1->PostTask(FROM_HERE,
+                          BindOnce(&CaptureThreadRef, &thread_ref_1));
+  PlatformThreadRef thread_ref_2;
+  task_runner_2->PostTask(FROM_HERE,
+                          BindOnce(&CaptureThreadRef, &thread_ref_2));
+
+  task_tracker_.Shutdown();
+
+  ASSERT_FALSE(thread_ref_1.is_null());
+  ASSERT_FALSE(thread_ref_2.is_null());
+  EXPECT_NE(thread_ref_1, thread_ref_2);
+}
+
+TEST_F(TaskSchedulerSingleThreadTaskRunnerManagerTest, SameThreadUsed) {
+  scoped_refptr<SingleThreadTaskRunner> task_runner_1 =
+      single_thread_task_runner_manager_
+          ->CreateSingleThreadTaskRunnerWithTraits(
+              {TaskShutdownBehavior::BLOCK_SHUTDOWN},
+              SingleThreadTaskRunnerThreadMode::SHARED);
+  scoped_refptr<SingleThreadTaskRunner> task_runner_2 =
+      single_thread_task_runner_manager_
+          ->CreateSingleThreadTaskRunnerWithTraits(
+              {TaskShutdownBehavior::BLOCK_SHUTDOWN},
+              SingleThreadTaskRunnerThreadMode::SHARED);
+
+  PlatformThreadRef thread_ref_1;
+  task_runner_1->PostTask(FROM_HERE,
+                          BindOnce(&CaptureThreadRef, &thread_ref_1));
+  PlatformThreadRef thread_ref_2;
+  task_runner_2->PostTask(FROM_HERE,
+                          BindOnce(&CaptureThreadRef, &thread_ref_2));
+
+  task_tracker_.Shutdown();
+
+  ASSERT_FALSE(thread_ref_1.is_null());
+  ASSERT_FALSE(thread_ref_2.is_null());
+  EXPECT_EQ(thread_ref_1, thread_ref_2);
+}
+
+TEST_F(TaskSchedulerSingleThreadTaskRunnerManagerTest,
+       RunsTasksInCurrentSequence) {
+  scoped_refptr<SingleThreadTaskRunner> task_runner_1 =
+      single_thread_task_runner_manager_
+          ->CreateSingleThreadTaskRunnerWithTraits(
+              {TaskShutdownBehavior::BLOCK_SHUTDOWN},
+              SingleThreadTaskRunnerThreadMode::DEDICATED);
+  scoped_refptr<SingleThreadTaskRunner> task_runner_2 =
+      single_thread_task_runner_manager_
+          ->CreateSingleThreadTaskRunnerWithTraits(
+              {TaskShutdownBehavior::BLOCK_SHUTDOWN},
+              SingleThreadTaskRunnerThreadMode::DEDICATED);
+
+  EXPECT_FALSE(task_runner_1->RunsTasksInCurrentSequence());
+  EXPECT_FALSE(task_runner_2->RunsTasksInCurrentSequence());
+
+  task_runner_1->PostTask(
+      FROM_HERE,
+      BindOnce(
+          [](scoped_refptr<SingleThreadTaskRunner> task_runner_1,
+             scoped_refptr<SingleThreadTaskRunner> task_runner_2) {
+            EXPECT_TRUE(task_runner_1->RunsTasksInCurrentSequence());
+            EXPECT_FALSE(task_runner_2->RunsTasksInCurrentSequence());
+          },
+          task_runner_1, task_runner_2));
+
+  task_runner_2->PostTask(
+      FROM_HERE,
+      BindOnce(
+          [](scoped_refptr<SingleThreadTaskRunner> task_runner_1,
+             scoped_refptr<SingleThreadTaskRunner> task_runner_2) {
+            EXPECT_FALSE(task_runner_1->RunsTasksInCurrentSequence());
+            EXPECT_TRUE(task_runner_2->RunsTasksInCurrentSequence());
+          },
+          task_runner_1, task_runner_2));
+
+  task_tracker_.Shutdown();
+}
+
+TEST_F(TaskSchedulerSingleThreadTaskRunnerManagerTest,
+       SharedWithBaseSyncPrimitivesDCHECKs) {
+  testing::GTEST_FLAG(death_test_style) = "threadsafe";
+  EXPECT_DCHECK_DEATH({
+    single_thread_task_runner_manager_->CreateSingleThreadTaskRunnerWithTraits(
+        {WithBaseSyncPrimitives()}, SingleThreadTaskRunnerThreadMode::SHARED);
+  });
+}
+
+// Regression test for https://crbug.com/829786
+TEST_F(TaskSchedulerSingleThreadTaskRunnerManagerTest,
+       ContinueOnShutdownDoesNotBlockBlockShutdown) {
+  WaitableEvent task_has_started(WaitableEvent::ResetPolicy::MANUAL,
+                                 WaitableEvent::InitialState::NOT_SIGNALED);
+  WaitableEvent task_can_continue(WaitableEvent::ResetPolicy::MANUAL,
+                                  WaitableEvent::InitialState::NOT_SIGNALED);
+
+  // Post a CONTINUE_ON_SHUTDOWN task that waits on
+  // |task_can_continue| to a shared SingleThreadTaskRunner.
+  single_thread_task_runner_manager_
+      ->CreateSingleThreadTaskRunnerWithTraits(
+          {TaskShutdownBehavior::CONTINUE_ON_SHUTDOWN},
+          SingleThreadTaskRunnerThreadMode::SHARED)
+      ->PostTask(FROM_HERE, base::BindOnce(
+                                [](WaitableEvent* task_has_started,
+                                   WaitableEvent* task_can_continue) {
+                                  task_has_started->Signal();
+                                  ScopedAllowBaseSyncPrimitivesForTesting
+                                      allow_base_sync_primitives;
+                                  task_can_continue->Wait();
+                                },
+                                Unretained(&task_has_started),
+                                Unretained(&task_can_continue)));
+
+  task_has_started.Wait();
+
+  // Post a BLOCK_SHUTDOWN task to a shared SingleThreadTaskRunner.
+  single_thread_task_runner_manager_
+      ->CreateSingleThreadTaskRunnerWithTraits(
+          {TaskShutdownBehavior::BLOCK_SHUTDOWN},
+          SingleThreadTaskRunnerThreadMode::SHARED)
+      ->PostTask(FROM_HERE, DoNothing());
+
+  // Shutdown should not hang even though the first task hasn't finished.
+  task_tracker_.Shutdown();
+
+  // Let the first task finish.
+  task_can_continue.Signal();
+
+  // Tear down from the test body to prevent accesses to |task_can_continue|
+  // after it goes out of scope.
+  TearDownSingleThreadTaskRunnerManager();
+}
+
+namespace {
+
+class TaskSchedulerSingleThreadTaskRunnerManagerCommonTest
+    : public TaskSchedulerSingleThreadTaskRunnerManagerTest,
+      public ::testing::WithParamInterface<SingleThreadTaskRunnerThreadMode> {
+ public:
+  TaskSchedulerSingleThreadTaskRunnerManagerCommonTest() = default;
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(
+      TaskSchedulerSingleThreadTaskRunnerManagerCommonTest);
+};
+
+}  // namespace
+
+TEST_P(TaskSchedulerSingleThreadTaskRunnerManagerCommonTest,
+       PrioritySetCorrectly) {
+  // Why are events used here instead of the task tracker?
+  // Shutting down can cause priorities to get raised. This means we have to use
+  // events to determine when a task is run.
+  scoped_refptr<SingleThreadTaskRunner> task_runner_background =
+      single_thread_task_runner_manager_
+          ->CreateSingleThreadTaskRunnerWithTraits({TaskPriority::BACKGROUND},
+                                                   GetParam());
+  scoped_refptr<SingleThreadTaskRunner> task_runner_normal =
+      single_thread_task_runner_manager_
+          ->CreateSingleThreadTaskRunnerWithTraits({TaskPriority::USER_VISIBLE},
+                                                   GetParam());
+
+  ThreadPriority thread_priority_background;
+  task_runner_background->PostTask(
+      FROM_HERE, BindOnce(&CaptureThreadPriority, &thread_priority_background));
+  WaitableEvent waitable_event_background(
+      WaitableEvent::ResetPolicy::MANUAL,
+      WaitableEvent::InitialState::NOT_SIGNALED);
+  task_runner_background->PostTask(
+      FROM_HERE,
+      BindOnce(&WaitableEvent::Signal, Unretained(&waitable_event_background)));
+
+  ThreadPriority thread_priority_normal;
+  task_runner_normal->PostTask(
+      FROM_HERE, BindOnce(&CaptureThreadPriority, &thread_priority_normal));
+  WaitableEvent waitable_event_normal(
+      WaitableEvent::ResetPolicy::MANUAL,
+      WaitableEvent::InitialState::NOT_SIGNALED);
+  task_runner_normal->PostTask(
+      FROM_HERE,
+      BindOnce(&WaitableEvent::Signal, Unretained(&waitable_event_normal)));
+
+  waitable_event_background.Wait();
+  waitable_event_normal.Wait();
+
+  if (Lock::HandlesMultipleThreadPriorities() &&
+      PlatformThread::CanIncreaseCurrentThreadPriority()) {
+    EXPECT_EQ(ThreadPriority::BACKGROUND, thread_priority_background);
+  } else {
+    EXPECT_EQ(ThreadPriority::NORMAL, thread_priority_background);
+  }
+  EXPECT_EQ(ThreadPriority::NORMAL, thread_priority_normal);
+}
+
+TEST_P(TaskSchedulerSingleThreadTaskRunnerManagerCommonTest, ThreadNamesSet) {
+  constexpr TaskTraits foo_traits = {TaskPriority::BACKGROUND,
+                                     TaskShutdownBehavior::BLOCK_SHUTDOWN};
+  scoped_refptr<SingleThreadTaskRunner> foo_task_runner =
+      single_thread_task_runner_manager_
+          ->CreateSingleThreadTaskRunnerWithTraits(foo_traits, GetParam());
+  std::string foo_captured_name;
+  foo_task_runner->PostTask(FROM_HERE,
+                            BindOnce(&CaptureThreadName, &foo_captured_name));
+
+  constexpr TaskTraits user_blocking_traits = {
+      TaskPriority::USER_BLOCKING, MayBlock(),
+      TaskShutdownBehavior::BLOCK_SHUTDOWN};
+  scoped_refptr<SingleThreadTaskRunner> user_blocking_task_runner =
+      single_thread_task_runner_manager_
+          ->CreateSingleThreadTaskRunnerWithTraits(user_blocking_traits,
+                                                   GetParam());
+
+  std::string user_blocking_captured_name;
+  user_blocking_task_runner->PostTask(
+      FROM_HERE, BindOnce(&CaptureThreadName, &user_blocking_captured_name));
+
+  task_tracker_.Shutdown();
+
+  EXPECT_NE(std::string::npos,
+            foo_captured_name.find(
+                kEnvironmentParams[GetEnvironmentIndexForTraits(foo_traits)]
+                    .name_suffix));
+  EXPECT_NE(
+      std::string::npos,
+      user_blocking_captured_name.find(
+          kEnvironmentParams[GetEnvironmentIndexForTraits(user_blocking_traits)]
+              .name_suffix));
+
+  if (GetParam() == SingleThreadTaskRunnerThreadMode::DEDICATED) {
+    EXPECT_EQ(std::string::npos, foo_captured_name.find("Shared"));
+    EXPECT_EQ(std::string::npos, user_blocking_captured_name.find("Shared"));
+  } else {
+    EXPECT_NE(std::string::npos, foo_captured_name.find("Shared"));
+    EXPECT_NE(std::string::npos, user_blocking_captured_name.find("Shared"));
+  }
+}
+
+TEST_P(TaskSchedulerSingleThreadTaskRunnerManagerCommonTest,
+       PostTaskAfterShutdown) {
+  auto task_runner =
+      single_thread_task_runner_manager_
+          ->CreateSingleThreadTaskRunnerWithTraits(TaskTraits(), GetParam());
+  task_tracker_.Shutdown();
+  EXPECT_FALSE(task_runner->PostTask(FROM_HERE, BindOnce(&ShouldNotRun)));
+}
+
+// Verify that a Task runs shortly after its delay expires.
+TEST_P(TaskSchedulerSingleThreadTaskRunnerManagerCommonTest, PostDelayedTask) {
+  TimeTicks start_time = TimeTicks::Now();
+
+  WaitableEvent task_ran(WaitableEvent::ResetPolicy::AUTOMATIC,
+                         WaitableEvent::InitialState::NOT_SIGNALED);
+  auto task_runner =
+      single_thread_task_runner_manager_
+          ->CreateSingleThreadTaskRunnerWithTraits(TaskTraits(), GetParam());
+
+  // Wait until the task runner is up and running to make sure the test below is
+  // solely timing the delayed task, not bringing up a physical thread.
+  task_runner->PostTask(
+      FROM_HERE, BindOnce(&WaitableEvent::Signal, Unretained(&task_ran)));
+  task_ran.Wait();
+  ASSERT_TRUE(!task_ran.IsSignaled());
+
+  // Post a task with a short delay.
+  EXPECT_TRUE(task_runner->PostDelayedTask(
+      FROM_HERE, BindOnce(&WaitableEvent::Signal, Unretained(&task_ran)),
+      TestTimeouts::tiny_timeout()));
+
+  // Wait until the task runs.
+  task_ran.Wait();
+
+  // Expect the task to run after its delay expires, but no more than 250 ms
+  // after that.
+  const TimeDelta actual_delay = TimeTicks::Now() - start_time;
+  EXPECT_GE(actual_delay, TestTimeouts::tiny_timeout());
+  EXPECT_LT(actual_delay,
+            TimeDelta::FromMilliseconds(250) + TestTimeouts::tiny_timeout());
+}
+
+// Verify that posting tasks after the single-thread manager is destroyed fails
+// but doesn't crash.
+TEST_P(TaskSchedulerSingleThreadTaskRunnerManagerCommonTest,
+       PostTaskAfterDestroy) {
+  auto task_runner =
+      single_thread_task_runner_manager_
+          ->CreateSingleThreadTaskRunnerWithTraits(TaskTraits(), GetParam());
+  EXPECT_TRUE(task_runner->PostTask(FROM_HERE, DoNothing()));
+  task_tracker_.Shutdown();
+  TearDownSingleThreadTaskRunnerManager();
+  EXPECT_FALSE(task_runner->PostTask(FROM_HERE, BindOnce(&ShouldNotRun)));
+}
+
+INSTANTIATE_TEST_CASE_P(
+    AllModes,
+    TaskSchedulerSingleThreadTaskRunnerManagerCommonTest,
+    ::testing::Values(SingleThreadTaskRunnerThreadMode::SHARED,
+                      SingleThreadTaskRunnerThreadMode::DEDICATED));
+
+namespace {
+
+class CallJoinFromDifferentThread : public SimpleThread {
+ public:
+  CallJoinFromDifferentThread(
+      SchedulerSingleThreadTaskRunnerManager* manager_to_join)
+      : SimpleThread("SchedulerSingleThreadTaskRunnerManagerJoinThread"),
+        manager_to_join_(manager_to_join),
+        run_started_event_(WaitableEvent::ResetPolicy::MANUAL,
+                           WaitableEvent::InitialState::NOT_SIGNALED) {}
+
+  ~CallJoinFromDifferentThread() override = default;
+
+  void Run() override {
+    run_started_event_.Signal();
+    manager_to_join_->JoinForTesting();
+  }
+
+  void WaitForRunToStart() { run_started_event_.Wait(); }
+
+ private:
+  SchedulerSingleThreadTaskRunnerManager* const manager_to_join_;
+  WaitableEvent run_started_event_;
+
+  DISALLOW_COPY_AND_ASSIGN(CallJoinFromDifferentThread);
+};
+
+class TaskSchedulerSingleThreadTaskRunnerManagerJoinTest
+    : public TaskSchedulerSingleThreadTaskRunnerManagerTest {
+ public:
+  TaskSchedulerSingleThreadTaskRunnerManagerJoinTest() = default;
+  ~TaskSchedulerSingleThreadTaskRunnerManagerJoinTest() override = default;
+
+ protected:
+  void TearDownSingleThreadTaskRunnerManager() override {
+    // The tests themselves are responsible for calling JoinForTesting().
+    single_thread_task_runner_manager_.reset();
+  }
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(TaskSchedulerSingleThreadTaskRunnerManagerJoinTest);
+};
+
+}  // namespace
+
+TEST_F(TaskSchedulerSingleThreadTaskRunnerManagerJoinTest, ConcurrentJoin) {
+  // Exercises the codepath where the workers are unavailable for unregistration
+  // because of a Join call.
+  WaitableEvent task_running(WaitableEvent::ResetPolicy::MANUAL,
+                             WaitableEvent::InitialState::NOT_SIGNALED);
+  WaitableEvent task_blocking(WaitableEvent::ResetPolicy::MANUAL,
+                              WaitableEvent::InitialState::NOT_SIGNALED);
+
+  {
+    auto task_runner = single_thread_task_runner_manager_
+                           ->CreateSingleThreadTaskRunnerWithTraits(
+                               {WithBaseSyncPrimitives()},
+                               SingleThreadTaskRunnerThreadMode::DEDICATED);
+    EXPECT_TRUE(task_runner->PostTask(
+        FROM_HERE,
+        BindOnce(&WaitableEvent::Signal, Unretained(&task_running))));
+    EXPECT_TRUE(task_runner->PostTask(
+        FROM_HERE, BindOnce(&WaitableEvent::Wait, Unretained(&task_blocking))));
+  }
+
+  task_running.Wait();
+  CallJoinFromDifferentThread join_from_different_thread(
+      single_thread_task_runner_manager_.get());
+  join_from_different_thread.Start();
+  join_from_different_thread.WaitForRunToStart();
+  task_blocking.Signal();
+  join_from_different_thread.Join();
+}
+
+TEST_F(TaskSchedulerSingleThreadTaskRunnerManagerJoinTest,
+       ConcurrentJoinExtraSkippedTask) {
+  // Tests to make sure that tasks are properly cleaned up at Join, allowing
+  // SingleThreadTaskRunners to unregister themselves.
+  WaitableEvent task_running(WaitableEvent::ResetPolicy::MANUAL,
+                             WaitableEvent::InitialState::NOT_SIGNALED);
+  WaitableEvent task_blocking(WaitableEvent::ResetPolicy::MANUAL,
+                              WaitableEvent::InitialState::NOT_SIGNALED);
+
+  {
+    auto task_runner = single_thread_task_runner_manager_
+                           ->CreateSingleThreadTaskRunnerWithTraits(
+                               {WithBaseSyncPrimitives()},
+                               SingleThreadTaskRunnerThreadMode::DEDICATED);
+    EXPECT_TRUE(task_runner->PostTask(
+        FROM_HERE,
+        BindOnce(&WaitableEvent::Signal, Unretained(&task_running))));
+    EXPECT_TRUE(task_runner->PostTask(
+        FROM_HERE, BindOnce(&WaitableEvent::Wait, Unretained(&task_blocking))));
+    EXPECT_TRUE(task_runner->PostTask(FROM_HERE, DoNothing()));
+  }
+
+  task_running.Wait();
+  CallJoinFromDifferentThread join_from_different_thread(
+      single_thread_task_runner_manager_.get());
+  join_from_different_thread.Start();
+  join_from_different_thread.WaitForRunToStart();
+  task_blocking.Signal();
+  join_from_different_thread.Join();
+}
+
+#if defined(OS_WIN)
+
+TEST_P(TaskSchedulerSingleThreadTaskRunnerManagerCommonTest,
+       COMSTAInitialized) {
+  scoped_refptr<SingleThreadTaskRunner> com_task_runner =
+      single_thread_task_runner_manager_->CreateCOMSTATaskRunnerWithTraits(
+          {TaskShutdownBehavior::BLOCK_SHUTDOWN}, GetParam());
+
+  com_task_runner->PostTask(FROM_HERE, BindOnce(&win::AssertComApartmentType,
+                                                win::ComApartmentType::STA));
+
+  task_tracker_.Shutdown();
+}
+
+TEST_F(TaskSchedulerSingleThreadTaskRunnerManagerTest, COMSTASameThreadUsed) {
+  scoped_refptr<SingleThreadTaskRunner> task_runner_1 =
+      single_thread_task_runner_manager_->CreateCOMSTATaskRunnerWithTraits(
+          {TaskShutdownBehavior::BLOCK_SHUTDOWN},
+          SingleThreadTaskRunnerThreadMode::SHARED);
+  scoped_refptr<SingleThreadTaskRunner> task_runner_2 =
+      single_thread_task_runner_manager_->CreateCOMSTATaskRunnerWithTraits(
+          {TaskShutdownBehavior::BLOCK_SHUTDOWN},
+          SingleThreadTaskRunnerThreadMode::SHARED);
+
+  PlatformThreadRef thread_ref_1;
+  task_runner_1->PostTask(FROM_HERE,
+                          BindOnce(&CaptureThreadRef, &thread_ref_1));
+  PlatformThreadRef thread_ref_2;
+  task_runner_2->PostTask(FROM_HERE,
+                          BindOnce(&CaptureThreadRef, &thread_ref_2));
+
+  task_tracker_.Shutdown();
+
+  ASSERT_FALSE(thread_ref_1.is_null());
+  ASSERT_FALSE(thread_ref_2.is_null());
+  EXPECT_EQ(thread_ref_1, thread_ref_2);
+}
+
+namespace {
+
+const wchar_t* const kTestWindowClassName =
+    L"TaskSchedulerSingleThreadTaskRunnerManagerTestWinMessageWindow";
+
+class TaskSchedulerSingleThreadTaskRunnerManagerTestWin
+    : public TaskSchedulerSingleThreadTaskRunnerManagerTest {
+ public:
+  TaskSchedulerSingleThreadTaskRunnerManagerTestWin() = default;
+
+  void SetUp() override {
+    TaskSchedulerSingleThreadTaskRunnerManagerTest::SetUp();
+    register_class_succeeded_ = RegisterTestWindowClass();
+    ASSERT_TRUE(register_class_succeeded_);
+  }
+
+  void TearDown() override {
+    if (register_class_succeeded_)
+      ::UnregisterClass(kTestWindowClassName, CURRENT_MODULE());
+
+    TaskSchedulerSingleThreadTaskRunnerManagerTest::TearDown();
+  }
+
+  HWND CreateTestWindow() {
+    return CreateWindow(kTestWindowClassName, kTestWindowClassName, 0, 0, 0, 0,
+                        0, HWND_MESSAGE, nullptr, CURRENT_MODULE(), nullptr);
+  }
+
+ private:
+  bool RegisterTestWindowClass() {
+    WNDCLASSEX window_class = {};
+    window_class.cbSize = sizeof(window_class);
+    window_class.lpfnWndProc = &::DefWindowProc;
+    window_class.hInstance = CURRENT_MODULE();
+    window_class.lpszClassName = kTestWindowClassName;
+    return !!::RegisterClassEx(&window_class);
+  }
+
+  bool register_class_succeeded_ = false;
+
+  DISALLOW_COPY_AND_ASSIGN(TaskSchedulerSingleThreadTaskRunnerManagerTestWin);
+};
+
+}  // namespace
+
+TEST_F(TaskSchedulerSingleThreadTaskRunnerManagerTestWin, PumpsMessages) {
+  scoped_refptr<SingleThreadTaskRunner> com_task_runner =
+      single_thread_task_runner_manager_->CreateCOMSTATaskRunnerWithTraits(
+          {TaskShutdownBehavior::BLOCK_SHUTDOWN},
+          SingleThreadTaskRunnerThreadMode::DEDICATED);
+  HWND hwnd = nullptr;
+  // HWNDs process messages on the thread that created them, so we have to
+  // create them within the context of the task runner to properly simulate a
+  // COM callback.
+  com_task_runner->PostTask(
+      FROM_HERE,
+      BindOnce(
+          [](TaskSchedulerSingleThreadTaskRunnerManagerTestWin* test_harness,
+             HWND* hwnd) { *hwnd = test_harness->CreateTestWindow(); },
+          Unretained(this), &hwnd));
+
+  task_tracker_.FlushForTesting();
+
+  ASSERT_NE(hwnd, nullptr);
+  // If the message pump isn't running, we will hang here. This simulates how
+  // COM would receive a callback with its own message HWND.
+  SendMessage(hwnd, WM_USER, 0, 0);
+
+  com_task_runner->PostTask(
+      FROM_HERE, BindOnce([](HWND hwnd) { ::DestroyWindow(hwnd); }, hwnd));
+
+  task_tracker_.Shutdown();
+}
+
+#endif  // defined(OS_WIN)
+
+namespace {
+
+class TaskSchedulerSingleThreadTaskRunnerManagerStartTest
+    : public TaskSchedulerSingleThreadTaskRunnerManagerTest {
+ public:
+  TaskSchedulerSingleThreadTaskRunnerManagerStartTest() = default;
+
+ private:
+  void StartSingleThreadTaskRunnerManagerFromSetUp() override {
+    // Start() is called in the test body rather than in SetUp().
+  }
+
+  DISALLOW_COPY_AND_ASSIGN(TaskSchedulerSingleThreadTaskRunnerManagerStartTest);
+};
+
+}  // namespace
+
+// Verify that a task posted before Start() doesn't run until Start() is called.
+TEST_F(TaskSchedulerSingleThreadTaskRunnerManagerStartTest,
+       PostTaskBeforeStart) {
+  AtomicFlag manager_started;
+  WaitableEvent task_finished(WaitableEvent::ResetPolicy::MANUAL,
+                              WaitableEvent::InitialState::NOT_SIGNALED);
+  single_thread_task_runner_manager_
+      ->CreateSingleThreadTaskRunnerWithTraits(
+          TaskTraits(), SingleThreadTaskRunnerThreadMode::DEDICATED)
+      ->PostTask(
+          FROM_HERE,
+          BindOnce(
+              [](WaitableEvent* task_finished, AtomicFlag* manager_started) {
+                // The task should not run before Start().
+                EXPECT_TRUE(manager_started->IsSet());
+                task_finished->Signal();
+              },
+              Unretained(&task_finished), Unretained(&manager_started)));
+
+  // Wait a little bit to make sure that the task doesn't run before start.
+  // Note: This test won't catch a case where the task runs between setting
+  // |manager_started| and calling Start(). However, we expect the test to be
+  // flaky if the tested code allows that to happen.
+  PlatformThread::Sleep(TestTimeouts::tiny_timeout());
+  manager_started.Set();
+  single_thread_task_runner_manager_->Start();
+
+  // Wait for the task to complete to keep |manager_started| alive.
+  task_finished.Wait();
+}
+
+}  // namespace internal
+}  // namespace base
diff --git a/base/task_scheduler/scheduler_worker.cc b/base/task_scheduler/scheduler_worker.cc
new file mode 100644
index 0000000..d3b8c0c
--- /dev/null
+++ b/base/task_scheduler/scheduler_worker.cc
@@ -0,0 +1,353 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/task_scheduler/scheduler_worker.h"
+
+#include <stddef.h>
+
+#include <utility>
+
+#include "base/compiler_specific.h"
+#include "base/debug/alias.h"
+#include "base/logging.h"
+#include "base/task_scheduler/scheduler_worker_observer.h"
+#include "base/task_scheduler/task_tracker.h"
+#include "base/trace_event/trace_event.h"
+
+#if defined(OS_MACOSX)
+#include "base/mac/scoped_nsautorelease_pool.h"
+#elif defined(OS_WIN)
+#include "base/win/com_init_check_hook.h"
+#include "base/win/scoped_com_initializer.h"
+#endif
+
+namespace base {
+namespace internal {
+
+void SchedulerWorker::Delegate::WaitForWork(WaitableEvent* wake_up_event) {
+  DCHECK(wake_up_event);
+  const TimeDelta sleep_time = GetSleepTimeout();
+  if (sleep_time.is_max()) {
+    // Calling TimedWait with TimeDelta::Max is not recommended per
+    // http://crbug.com/465948.
+    wake_up_event->Wait();
+  } else {
+    wake_up_event->TimedWait(sleep_time);
+  }
+}
+
+SchedulerWorker::SchedulerWorker(
+    ThreadPriority priority_hint,
+    std::unique_ptr<Delegate> delegate,
+    TrackedRef<TaskTracker> task_tracker,
+    const SchedulerLock* predecessor_lock,
+    SchedulerBackwardCompatibility backward_compatibility)
+    : thread_lock_(predecessor_lock),
+      delegate_(std::move(delegate)),
+      task_tracker_(std::move(task_tracker)),
+      priority_hint_(priority_hint),
+      current_thread_priority_(GetDesiredThreadPriority())
+#if defined(OS_WIN) && !defined(COM_INIT_CHECK_HOOK_ENABLED)
+      ,
+      backward_compatibility_(backward_compatibility)
+#endif
+{
+  DCHECK(delegate_);
+  DCHECK(task_tracker_);
+}
+
+bool SchedulerWorker::Start(
+    SchedulerWorkerObserver* scheduler_worker_observer) {
+  AutoSchedulerLock auto_lock(thread_lock_);
+  DCHECK(thread_handle_.is_null());
+
+  if (should_exit_.IsSet())
+    return true;
+
+  DCHECK(!scheduler_worker_observer_);
+  scheduler_worker_observer_ = scheduler_worker_observer;
+
+  self_ = this;
+
+  constexpr size_t kDefaultStackSize = 0;
+  PlatformThread::CreateWithPriority(kDefaultStackSize, this, &thread_handle_,
+                                     current_thread_priority_);
+
+  if (thread_handle_.is_null()) {
+    self_ = nullptr;
+    return false;
+  }
+
+  return true;
+}
+
+void SchedulerWorker::WakeUp() {
+  // Calling WakeUp() after Cleanup() or Join() is wrong because the
+  // SchedulerWorker cannot run more tasks.
+  DCHECK(!join_called_for_testing_.IsSet());
+  DCHECK(!should_exit_.IsSet());
+  wake_up_event_.Signal();
+}
+
+void SchedulerWorker::JoinForTesting() {
+  DCHECK(!join_called_for_testing_.IsSet());
+  join_called_for_testing_.Set();
+  wake_up_event_.Signal();
+
+  PlatformThreadHandle thread_handle;
+
+  {
+    AutoSchedulerLock auto_lock(thread_lock_);
+    DCHECK(!thread_handle_.is_null());
+    thread_handle = thread_handle_;
+    // Reset |thread_handle_| so it isn't joined by the destructor.
+    thread_handle_ = PlatformThreadHandle();
+  }
+
+  PlatformThread::Join(thread_handle);
+}
+
+bool SchedulerWorker::ThreadAliveForTesting() const {
+  AutoSchedulerLock auto_lock(thread_lock_);
+  return !thread_handle_.is_null();
+}
+
+SchedulerWorker::~SchedulerWorker() {
+  AutoSchedulerLock auto_lock(thread_lock_);
+
+  // If |thread_handle_| wasn't joined, detach it.
+  if (!thread_handle_.is_null()) {
+    DCHECK(!join_called_for_testing_.IsSet());
+    PlatformThread::Detach(thread_handle_);
+  }
+}
+
+void SchedulerWorker::Cleanup() {
+  DCHECK(!should_exit_.IsSet());
+  should_exit_.Set();
+  wake_up_event_.Signal();
+}
+
+bool SchedulerWorker::ShouldExit() const {
+  // The ordering of the checks is important below. This SchedulerWorker may be
+  // released and outlive |task_tracker_| in unit tests. However, when the
+  // SchedulerWorker is released, |should_exit_| will be set, so check that
+  // first.
+  return should_exit_.IsSet() || join_called_for_testing_.IsSet() ||
+         task_tracker_->IsShutdownComplete();
+}
+
+ThreadPriority SchedulerWorker::GetDesiredThreadPriority() const {
+  // All threads have a NORMAL priority when Lock doesn't handle multiple thread
+  // priorities.
+  if (!Lock::HandlesMultipleThreadPriorities())
+    return ThreadPriority::NORMAL;
+
+  // To avoid shutdown hangs, disallow a priority below NORMAL during shutdown.
+  // If thread priority cannot be increased, never allow a priority below
+  // NORMAL.
+  if (static_cast<int>(priority_hint_) <
+          static_cast<int>(ThreadPriority::NORMAL) &&
+      (task_tracker_->HasShutdownStarted() ||
+       !PlatformThread::CanIncreaseCurrentThreadPriority())) {
+    return ThreadPriority::NORMAL;
+  }
+
+  return priority_hint_;
+}
+
+void SchedulerWorker::UpdateThreadPriority(
+    ThreadPriority desired_thread_priority) {
+  if (desired_thread_priority == current_thread_priority_)
+    return;
+
+  PlatformThread::SetCurrentThreadPriority(desired_thread_priority);
+  current_thread_priority_ = desired_thread_priority;
+}
+
+void SchedulerWorker::ThreadMain() {
+  if (priority_hint_ == ThreadPriority::BACKGROUND) {
+    switch (delegate_->GetThreadLabel()) {
+      case ThreadLabel::POOLED:
+        RunBackgroundPooledWorker();
+        return;
+      case ThreadLabel::SHARED:
+        RunBackgroundSharedWorker();
+        return;
+      case ThreadLabel::DEDICATED:
+        RunBackgroundDedicatedWorker();
+        return;
+#if defined(OS_WIN)
+      case ThreadLabel::SHARED_COM:
+        RunBackgroundSharedCOMWorker();
+        return;
+      case ThreadLabel::DEDICATED_COM:
+        RunBackgroundDedicatedCOMWorker();
+        return;
+#endif  // defined(OS_WIN)
+    }
+  }
+
+  switch (delegate_->GetThreadLabel()) {
+    case ThreadLabel::POOLED:
+      RunPooledWorker();
+      return;
+    case ThreadLabel::SHARED:
+      RunSharedWorker();
+      return;
+    case ThreadLabel::DEDICATED:
+      RunDedicatedWorker();
+      return;
+#if defined(OS_WIN)
+    case ThreadLabel::SHARED_COM:
+      RunSharedCOMWorker();
+      return;
+    case ThreadLabel::DEDICATED_COM:
+      RunDedicatedCOMWorker();
+      return;
+#endif  // defined(OS_WIN)
+  }
+}
+
+NOINLINE void SchedulerWorker::RunPooledWorker() {
+  const int line_number = __LINE__;
+  RunWorker();
+  base::debug::Alias(&line_number);
+}
+
+NOINLINE void SchedulerWorker::RunBackgroundPooledWorker() {
+  const int line_number = __LINE__;
+  RunWorker();
+  base::debug::Alias(&line_number);
+}
+
+NOINLINE void SchedulerWorker::RunSharedWorker() {
+  const int line_number = __LINE__;
+  RunWorker();
+  base::debug::Alias(&line_number);
+}
+
+NOINLINE void SchedulerWorker::RunBackgroundSharedWorker() {
+  const int line_number = __LINE__;
+  RunWorker();
+  base::debug::Alias(&line_number);
+}
+
+NOINLINE void SchedulerWorker::RunDedicatedWorker() {
+  const int line_number = __LINE__;
+  RunWorker();
+  base::debug::Alias(&line_number);
+}
+
+NOINLINE void SchedulerWorker::RunBackgroundDedicatedWorker() {
+  const int line_number = __LINE__;
+  RunWorker();
+  base::debug::Alias(&line_number);
+}
+
+#if defined(OS_WIN)
+NOINLINE void SchedulerWorker::RunSharedCOMWorker() {
+  const int line_number = __LINE__;
+  RunWorker();
+  base::debug::Alias(&line_number);
+}
+
+NOINLINE void SchedulerWorker::RunBackgroundSharedCOMWorker() {
+  const int line_number = __LINE__;
+  RunWorker();
+  base::debug::Alias(&line_number);
+}
+
+NOINLINE void SchedulerWorker::RunDedicatedCOMWorker() {
+  const int line_number = __LINE__;
+  RunWorker();
+  base::debug::Alias(&line_number);
+}
+
+NOINLINE void SchedulerWorker::RunBackgroundDedicatedCOMWorker() {
+  const int line_number = __LINE__;
+  RunWorker();
+  base::debug::Alias(&line_number);
+}
+#endif  // defined(OS_WIN)
+
+void SchedulerWorker::RunWorker() {
+  DCHECK_EQ(self_, this);
+  TRACE_EVENT_BEGIN0("task_scheduler", "SchedulerWorkerThread active");
+
+  if (scheduler_worker_observer_)
+    scheduler_worker_observer_->OnSchedulerWorkerMainEntry();
+
+  delegate_->OnMainEntry(this);
+
+  // A SchedulerWorker starts out waiting for work.
+  {
+    TRACE_EVENT_END0("task_scheduler", "SchedulerWorkerThread active");
+    delegate_->WaitForWork(&wake_up_event_);
+    TRACE_EVENT_BEGIN0("task_scheduler", "SchedulerWorkerThread active");
+  }
+
+// When defined(COM_INIT_CHECK_HOOK_ENABLED), ignore
+// SchedulerBackwardCompatibility::INIT_COM_STA to find incorrect uses of
+// COM that should be running in a COM STA Task Runner.
+#if defined(OS_WIN) && !defined(COM_INIT_CHECK_HOOK_ENABLED)
+  std::unique_ptr<win::ScopedCOMInitializer> com_initializer;
+  if (backward_compatibility_ == SchedulerBackwardCompatibility::INIT_COM_STA)
+    com_initializer = std::make_unique<win::ScopedCOMInitializer>();
+#endif
+
+  while (!ShouldExit()) {
+#if defined(OS_MACOSX)
+    mac::ScopedNSAutoreleasePool autorelease_pool;
+#endif
+
+    UpdateThreadPriority(GetDesiredThreadPriority());
+
+    // Get the sequence containing the next task to execute.
+    scoped_refptr<Sequence> sequence = delegate_->GetWork(this);
+    if (!sequence) {
+      // Exit immediately if GetWork() resulted in detaching this worker.
+      if (ShouldExit())
+        break;
+
+      TRACE_EVENT_END0("task_scheduler", "SchedulerWorkerThread active");
+      delegate_->WaitForWork(&wake_up_event_);
+      TRACE_EVENT_BEGIN0("task_scheduler", "SchedulerWorkerThread active");
+      continue;
+    }
+
+    sequence =
+        task_tracker_->RunAndPopNextTask(std::move(sequence), delegate_.get());
+
+    delegate_->DidRunTask();
+
+    // Re-enqueue |sequence| if allowed by RunNextTask().
+    if (sequence)
+      delegate_->ReEnqueueSequence(std::move(sequence));
+
+    // Calling WakeUp() guarantees that this SchedulerWorker will run Tasks from
+    // Sequences returned by the GetWork() method of |delegate_| until it
+    // returns nullptr. Resetting |wake_up_event_| here doesn't break this
+    // invariant and avoids a useless loop iteration before going to sleep if
+    // WakeUp() is called while this SchedulerWorker is awake.
+    wake_up_event_.Reset();
+  }
+
+  // Important: It is unsafe to access unowned state (e.g. |task_tracker_|)
+  // after invoking OnMainExit().
+
+  delegate_->OnMainExit(this);
+
+  if (scheduler_worker_observer_)
+    scheduler_worker_observer_->OnSchedulerWorkerMainExit();
+
+  // Release the self-reference to |this|. This can result in deleting |this|
+  // and as such no more member accesses should be made after this point.
+  self_ = nullptr;
+
+  TRACE_EVENT_END0("task_scheduler", "SchedulerWorkerThread active");
+}
+
+}  // namespace internal
+}  // namespace base
diff --git a/base/task_scheduler/scheduler_worker.h b/base/task_scheduler/scheduler_worker.h
new file mode 100644
index 0000000..8bcfcb3
--- /dev/null
+++ b/base/task_scheduler/scheduler_worker.h
@@ -0,0 +1,250 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TASK_SCHEDULER_SCHEDULER_WORKER_H_
+#define BASE_TASK_SCHEDULER_SCHEDULER_WORKER_H_
+
+#include <memory>
+
+#include "base/base_export.h"
+#include "base/macros.h"
+#include "base/memory/ref_counted.h"
+#include "base/synchronization/atomic_flag.h"
+#include "base/synchronization/waitable_event.h"
+#include "base/task_scheduler/can_schedule_sequence_observer.h"
+#include "base/task_scheduler/scheduler_lock.h"
+#include "base/task_scheduler/scheduler_worker_params.h"
+#include "base/task_scheduler/sequence.h"
+#include "base/task_scheduler/tracked_ref.h"
+#include "base/threading/platform_thread.h"
+#include "base/time/time.h"
+#include "build/build_config.h"
+
+#if defined(OS_WIN)
+#include "base/win/com_init_check_hook.h"
+#endif
+
+namespace base {
+
+class SchedulerWorkerObserver;
+
+namespace internal {
+
+class TaskTracker;
+
+// A worker that manages a single thread to run Tasks from Sequences returned
+// by a delegate.
+//
+// A SchedulerWorker starts out sleeping. It is woken up by a call to WakeUp().
+// After a wake-up, a SchedulerWorker runs Tasks from Sequences returned by the
+// GetWork() method of its delegate as long as it doesn't return nullptr. It
+// also periodically checks with its TaskTracker whether shutdown has completed
+// and exits when it has.
+//
+// This class is thread-safe.
+class BASE_EXPORT SchedulerWorker
+    : public RefCountedThreadSafe<SchedulerWorker>,
+      public PlatformThread::Delegate {
+ public:
+  // Labels this SchedulerWorker's association. This doesn't affect any logic
+  // but will add a stack frame labeling this thread for ease of stack trace
+  // identification.
+  enum class ThreadLabel {
+    POOLED,
+    SHARED,
+    DEDICATED,
+#if defined(OS_WIN)
+    SHARED_COM,
+    DEDICATED_COM,
+#endif  // defined(OS_WIN)
+  };
+
+  // Delegate interface for SchedulerWorker. All methods except
+  // OnCanScheduleSequence() (inherited from CanScheduleSequenceObserver) are
+  // called from the thread managed by the SchedulerWorker instance.
+  class BASE_EXPORT Delegate : public CanScheduleSequenceObserver {
+   public:
+    ~Delegate() override = default;
+
+    // Returns the ThreadLabel the Delegate wants its SchedulerWorkers' stacks
+    // to be labeled with.
+    virtual ThreadLabel GetThreadLabel() const = 0;
+
+    // Called by |worker|'s thread when it enters its main function.
+    virtual void OnMainEntry(const SchedulerWorker* worker) = 0;
+
+    // Called by |worker|'s thread to get a Sequence from which to run a Task.
+    virtual scoped_refptr<Sequence> GetWork(SchedulerWorker* worker) = 0;
+
+    // Called by the SchedulerWorker after it ran a task.
+    virtual void DidRunTask() = 0;
+
+    // Called when |sequence| isn't empty after the SchedulerWorker pops a Task
+    // from it. |sequence| is the last Sequence returned by GetWork().
+    //
+    // TODO(fdoray): Rename to RescheduleSequence() to match TaskTracker
+    // terminology.
+    virtual void ReEnqueueSequence(scoped_refptr<Sequence> sequence) = 0;
+
+    // Called to determine how long to sleep before the next call to GetWork().
+    // GetWork() may be called before this timeout expires if the worker's
+    // WakeUp() method is called.
+    virtual TimeDelta GetSleepTimeout() = 0;
+
+    // Called by the SchedulerWorker's thread to wait for work. Override this
+    // method if the thread in question needs special handling to go to sleep.
+    // |wake_up_event| is a manually resettable event and is signaled on
+    // SchedulerWorker::WakeUp()
+    virtual void WaitForWork(WaitableEvent* wake_up_event);
+
+    // Called by |worker|'s thread right before the main function exits. The
+    // Delegate is free to release any associated resources in this call. It is
+    // guaranteed that SchedulerWorker won't access the Delegate or the
+    // TaskTracker after calling OnMainExit() on the Delegate.
+    virtual void OnMainExit(SchedulerWorker* worker) {}
+  };
+
+  // Creates a SchedulerWorker that runs Tasks from Sequences returned by
+  // |delegate|. No actual thread will be created for this SchedulerWorker
+  // before Start() is called. |priority_hint| is the preferred thread priority;
+  // the actual thread priority depends on shutdown state and platform
+  // capabilities. |task_tracker| is used to handle shutdown behavior of Tasks.
+  // |predecessor_lock| is a lock that is allowed to be held when calling
+  // methods on this SchedulerWorker. |backward_compatibility| indicates
+  // whether backward compatibility is enabled. Either JoinForTesting() or
+  // Cleanup() must be called before releasing the last external reference.
+  SchedulerWorker(ThreadPriority priority_hint,
+                  std::unique_ptr<Delegate> delegate,
+                  TrackedRef<TaskTracker> task_tracker,
+                  const SchedulerLock* predecessor_lock = nullptr,
+                  SchedulerBackwardCompatibility backward_compatibility =
+                      SchedulerBackwardCompatibility::DISABLED);
+
+  // Creates a thread to back the SchedulerWorker. The thread will be in a wait
+  // state pending a WakeUp() call. No thread will be created if Cleanup() was
+  // called. If specified, |scheduler_worker_observer| will be notified when the
+  // worker enters and exits its main function. It must not be destroyed before
+  // JoinForTesting() has returned (must never be destroyed in production).
+  // Returns true on success.
+  bool Start(SchedulerWorkerObserver* scheduler_worker_observer = nullptr);
+
+  // Wakes up this SchedulerWorker if it wasn't already awake. After this is
+  // called, this SchedulerWorker will run Tasks from Sequences returned by the
+  // GetWork() method of its delegate until it returns nullptr. No-op if Start()
+  // wasn't called. DCHECKs if called after Start() has failed or after
+  // Cleanup() has been called.
+  void WakeUp();
+
+  SchedulerWorker::Delegate* delegate() { return delegate_.get(); }
+
+  // Joins this SchedulerWorker. If a Task is already running, it will be
+  // allowed to complete its execution. This can only be called once.
+  //
+  // Note: A thread that detaches before JoinForTesting() is called may still be
+  // running after JoinForTesting() returns. However, it can't run tasks after
+  // JoinForTesting() returns.
+  void JoinForTesting();
+
+  // Returns true if the worker is alive.
+  bool ThreadAliveForTesting() const;
+
+  // Makes a request to cleanup the worker. This may be called from any thread.
+  // The caller is expected to release its reference to this object after
+  // calling Cleanup(). Further method calls after Cleanup() returns are
+  // undefined.
+  //
+  // Expected Usage:
+  //   scoped_refptr<SchedulerWorker> worker_ = /* Existing Worker */
+  //   worker_->Cleanup();
+  //   worker_ = nullptr;
+  void Cleanup();
+
+ private:
+  friend class RefCountedThreadSafe<SchedulerWorker>;
+  class Thread;
+
+  ~SchedulerWorker() override;
+
+  bool ShouldExit() const;
+
+  // Returns the thread priority to use based on the priority hint, current
+  // shutdown state, and platform capabilities.
+  ThreadPriority GetDesiredThreadPriority() const;
+
+  // Changes the thread priority to |desired_thread_priority|. Must be called on
+  // the thread managed by |this|.
+  void UpdateThreadPriority(ThreadPriority desired_thread_priority);
+
+  // PlatformThread::Delegate:
+  void ThreadMain() override;
+
+  // Dummy frames to act as "RunLabeledWorker()" (see RunMain() below). Their
+  // impl is aliased to prevent compiler/linker from optimizing them out.
+  void RunPooledWorker();
+  void RunBackgroundPooledWorker();
+  void RunSharedWorker();
+  void RunBackgroundSharedWorker();
+  void RunDedicatedWorker();
+  void RunBackgroundDedicatedWorker();
+#if defined(OS_WIN)
+  void RunSharedCOMWorker();
+  void RunBackgroundSharedCOMWorker();
+  void RunDedicatedCOMWorker();
+  void RunBackgroundDedicatedCOMWorker();
+#endif  // defined(OS_WIN)
+
+  // The real main, invoked through :
+  //     ThreadMain() -> RunLabeledWorker() -> RunWorker().
+  // "RunLabeledWorker()" is a dummy frame based on ThreadLabel+ThreadPriority
+  // and used to easily identify threads in stack traces.
+  void RunWorker();
+
+  // Self-reference to prevent destruction of |this| while the thread is alive.
+  // Set in Start() before creating the thread. Reset in ThreadMain() before the
+  // thread exits. No lock required because the first access occurs before the
+  // thread is created and the second access occurs on the thread.
+  scoped_refptr<SchedulerWorker> self_;
+
+  // Synchronizes access to |thread_handle_|.
+  mutable SchedulerLock thread_lock_;
+
+  // Handle for the thread managed by |this|.
+  PlatformThreadHandle thread_handle_;
+
+  // Event to wake up the thread managed by |this|.
+  WaitableEvent wake_up_event_{WaitableEvent::ResetPolicy::AUTOMATIC,
+                               WaitableEvent::InitialState::NOT_SIGNALED};
+
+  // Whether the thread should exit. Set by Cleanup().
+  AtomicFlag should_exit_;
+
+  const std::unique_ptr<Delegate> delegate_;
+  const TrackedRef<TaskTracker> task_tracker_;
+
+  // Optional observer notified when a worker enters and exits its main
+  // function. Set in Start() and never modified afterwards.
+  SchedulerWorkerObserver* scheduler_worker_observer_ = nullptr;
+
+  // Desired thread priority.
+  const ThreadPriority priority_hint_;
+
+  // Actual thread priority. Can be different than |priority_hint_| depending on
+  // system capabilities and shutdown state. No lock required because all post-
+  // construction accesses occur on the thread.
+  ThreadPriority current_thread_priority_;
+
+#if defined(OS_WIN) && !defined(COM_INIT_CHECK_HOOK_ENABLED)
+  const SchedulerBackwardCompatibility backward_compatibility_;
+#endif
+
+  // Set once JoinForTesting() has been called.
+  AtomicFlag join_called_for_testing_;
+
+  DISALLOW_COPY_AND_ASSIGN(SchedulerWorker);
+};
+
+}  // namespace internal
+}  // namespace base
+
+#endif  // BASE_TASK_SCHEDULER_SCHEDULER_WORKER_H_
diff --git a/base/task_scheduler/scheduler_worker_observer.h b/base/task_scheduler/scheduler_worker_observer.h
new file mode 100644
index 0000000..5e6fc8f
--- /dev/null
+++ b/base/task_scheduler/scheduler_worker_observer.h
@@ -0,0 +1,27 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TASK_SCHEDULER_SCHEDULER_WORKER_OBSERVER_H_
+#define BASE_TASK_SCHEDULER_SCHEDULER_WORKER_OBSERVER_H_
+
+namespace base {
+
+// Interface to observe entry and exit of the main function of a TaskScheduler
+// worker.
+class SchedulerWorkerObserver {
+ public:
+  virtual ~SchedulerWorkerObserver() = default;
+
+  // Invoked at the beginning of the main function of a TaskScheduler worker,
+  // before any task runs.
+  virtual void OnSchedulerWorkerMainEntry() = 0;
+
+  // Invoked at the end of the main function of a TaskScheduler worker, when it
+  // can no longer run tasks.
+  virtual void OnSchedulerWorkerMainExit() = 0;
+};
+
+}  // namespace base
+
+#endif  // BASE_TASK_SCHEDULER_SCHEDULER_WORKER_OBSERVER_H_
diff --git a/base/task_scheduler/scheduler_worker_params.h b/base/task_scheduler/scheduler_worker_params.h
new file mode 100644
index 0000000..ea753ff
--- /dev/null
+++ b/base/task_scheduler/scheduler_worker_params.h
@@ -0,0 +1,24 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TASK_SCHEDULER_SCHEDULER_WORKER_PARAMS_H_
+#define BASE_TASK_SCHEDULER_SCHEDULER_WORKER_PARAMS_H_
+
+namespace base {
+
+enum class SchedulerBackwardCompatibility {
+  // No backward compatibility.
+  DISABLED,
+
+  // On Windows, initialize COM STA to mimic SequencedWorkerPool and
+  // BrowserThreadImpl. Behaves like DISABLED on other platforms.
+  // TODO(fdoray): Get rid of this and force tasks that care about a
+  // CoInitialized environment to request one explicitly (via an upcoming
+  // execution mode).
+  INIT_COM_STA,
+};
+
+}  // namespace base
+
+#endif  // BASE_TASK_SCHEDULER_SCHEDULER_WORKER_PARAMS_H_
diff --git a/base/task_scheduler/scheduler_worker_pool.cc b/base/task_scheduler/scheduler_worker_pool.cc
new file mode 100644
index 0000000..1a5c35d
--- /dev/null
+++ b/base/task_scheduler/scheduler_worker_pool.cc
@@ -0,0 +1,219 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/task_scheduler/scheduler_worker_pool.h"
+
+#include "base/bind.h"
+#include "base/bind_helpers.h"
+#include "base/lazy_instance.h"
+#include "base/task_scheduler/delayed_task_manager.h"
+#include "base/task_scheduler/task_tracker.h"
+#include "base/threading/thread_local.h"
+
+namespace base {
+namespace internal {
+
+namespace {
+
+// The number of SchedulerWorkerPool that are alive in this process. This
+// variable should only be incremented when the SchedulerWorkerPool instances
+// are brought up (on the main thread; before any tasks are posted) and
+// decremented when the same instances are brought down (i.e., only when unit
+// tests tear down the task environment and never in production). This makes the
+// variable const while worker threads are up and as such it doesn't need to be
+// atomic. It is used to tell when a task is posted from the main thread after
+// the task environment was brought down in unit tests so that
+// SchedulerWorkerPool bound TaskRunners can return false on PostTask, letting
+// such callers know they should complete necessary work synchronously. Note:
+// |!g_active_pools_count| is generally equivalent to
+// |!TaskScheduler::GetInstance()| but has the advantage of being valid in
+// task_scheduler unit tests that don't instantiate a full TaskScheduler.
+int g_active_pools_count = 0;
+
+// SchedulerWorkerPool that owns the current thread, if any.
+LazyInstance<ThreadLocalPointer<const SchedulerWorkerPool>>::Leaky
+    tls_current_worker_pool = LAZY_INSTANCE_INITIALIZER;
+
+const SchedulerWorkerPool* GetCurrentWorkerPool() {
+  return tls_current_worker_pool.Get().Get();
+}
+
+}  // namespace
+
+// A task runner that runs tasks in parallel.
+class SchedulerParallelTaskRunner : public TaskRunner {
+ public:
+  // Constructs a SchedulerParallelTaskRunner which can be used to post tasks so
+  // long as |worker_pool| is alive.
+  // TODO(robliao): Find a concrete way to manage |worker_pool|'s memory.
+  SchedulerParallelTaskRunner(const TaskTraits& traits,
+                              SchedulerWorkerPool* worker_pool)
+      : traits_(traits), worker_pool_(worker_pool) {
+    DCHECK(worker_pool_);
+  }
+
+  // TaskRunner:
+  bool PostDelayedTask(const Location& from_here,
+                       OnceClosure closure,
+                       TimeDelta delay) override {
+    if (!g_active_pools_count)
+      return false;
+
+    // Post the task as part of a one-off single-task Sequence.
+    return worker_pool_->PostTaskWithSequence(
+        Task(from_here, std::move(closure), traits_, delay),
+        MakeRefCounted<Sequence>());
+  }
+
+  bool RunsTasksInCurrentSequence() const override {
+    return GetCurrentWorkerPool() == worker_pool_;
+  }
+
+ private:
+  ~SchedulerParallelTaskRunner() override = default;
+
+  const TaskTraits traits_;
+  SchedulerWorkerPool* const worker_pool_;
+
+  DISALLOW_COPY_AND_ASSIGN(SchedulerParallelTaskRunner);
+};
+
+// A task runner that runs tasks in sequence.
+class SchedulerSequencedTaskRunner : public SequencedTaskRunner {
+ public:
+  // Constructs a SchedulerSequencedTaskRunner which can be used to post tasks
+  // so long as |worker_pool| is alive.
+  // TODO(robliao): Find a concrete way to manage |worker_pool|'s memory.
+  SchedulerSequencedTaskRunner(const TaskTraits& traits,
+                               SchedulerWorkerPool* worker_pool)
+      : traits_(traits), worker_pool_(worker_pool) {
+    DCHECK(worker_pool_);
+  }
+
+  // SequencedTaskRunner:
+  bool PostDelayedTask(const Location& from_here,
+                       OnceClosure closure,
+                       TimeDelta delay) override {
+    if (!g_active_pools_count)
+      return false;
+
+    Task task(from_here, std::move(closure), traits_, delay);
+    task.sequenced_task_runner_ref = this;
+
+    // Post the task as part of |sequence_|.
+    return worker_pool_->PostTaskWithSequence(std::move(task), sequence_);
+  }
+
+  bool PostNonNestableDelayedTask(const Location& from_here,
+                                  OnceClosure closure,
+                                  base::TimeDelta delay) override {
+    // Tasks are never nested within the task scheduler.
+    return PostDelayedTask(from_here, std::move(closure), delay);
+  }
+
+  bool RunsTasksInCurrentSequence() const override {
+    return sequence_->token() == SequenceToken::GetForCurrentThread();
+  }
+
+ private:
+  ~SchedulerSequencedTaskRunner() override = default;
+
+  // Sequence for all Tasks posted through this TaskRunner.
+  const scoped_refptr<Sequence> sequence_ = MakeRefCounted<Sequence>();
+
+  const TaskTraits traits_;
+  SchedulerWorkerPool* const worker_pool_;
+
+  DISALLOW_COPY_AND_ASSIGN(SchedulerSequencedTaskRunner);
+};
+
+scoped_refptr<TaskRunner> SchedulerWorkerPool::CreateTaskRunnerWithTraits(
+    const TaskTraits& traits) {
+  return MakeRefCounted<SchedulerParallelTaskRunner>(traits, this);
+}
+
+scoped_refptr<SequencedTaskRunner>
+SchedulerWorkerPool::CreateSequencedTaskRunnerWithTraits(
+    const TaskTraits& traits) {
+  return MakeRefCounted<SchedulerSequencedTaskRunner>(traits, this);
+}
+
+bool SchedulerWorkerPool::PostTaskWithSequence(
+    Task task,
+    scoped_refptr<Sequence> sequence) {
+  DCHECK(task.task);
+  DCHECK(sequence);
+
+  if (!task_tracker_->WillPostTask(task))
+    return false;
+
+  if (task.delayed_run_time.is_null()) {
+    PostTaskWithSequenceNow(std::move(task), std::move(sequence));
+  } else {
+    // Use CHECK instead of DCHECK to crash earlier. See http://crbug.com/711167
+    // for details.
+    CHECK(task.task);
+    delayed_task_manager_->AddDelayedTask(
+        std::move(task), BindOnce(
+                             [](scoped_refptr<Sequence> sequence,
+                                SchedulerWorkerPool* worker_pool, Task task) {
+                               worker_pool->PostTaskWithSequenceNow(
+                                   std::move(task), std::move(sequence));
+                             },
+                             std::move(sequence), Unretained(this)));
+  }
+
+  return true;
+}
+
+SchedulerWorkerPool::SchedulerWorkerPool(
+    TrackedRef<TaskTracker> task_tracker,
+    DelayedTaskManager* delayed_task_manager)
+    : task_tracker_(std::move(task_tracker)),
+      delayed_task_manager_(delayed_task_manager) {
+  DCHECK(task_tracker_);
+  DCHECK(delayed_task_manager_);
+  ++g_active_pools_count;
+}
+
+SchedulerWorkerPool::~SchedulerWorkerPool() {
+  --g_active_pools_count;
+  DCHECK_GE(g_active_pools_count, 0);
+}
+
+void SchedulerWorkerPool::BindToCurrentThread() {
+  DCHECK(!GetCurrentWorkerPool());
+  tls_current_worker_pool.Get().Set(this);
+}
+
+void SchedulerWorkerPool::UnbindFromCurrentThread() {
+  DCHECK(GetCurrentWorkerPool());
+  tls_current_worker_pool.Get().Set(nullptr);
+}
+
+void SchedulerWorkerPool::PostTaskWithSequenceNow(
+    Task task,
+    scoped_refptr<Sequence> sequence) {
+  DCHECK(task.task);
+  DCHECK(sequence);
+
+  // Confirm that |task| is ready to run (its delayed run time is either null or
+  // in the past).
+  DCHECK_LE(task.delayed_run_time, TimeTicks::Now());
+
+  const bool sequence_was_empty = sequence->PushTask(std::move(task));
+  if (sequence_was_empty) {
+    // Try to schedule |sequence| if it was empty before |task| was inserted
+    // into it. Otherwise, one of these must be true:
+    // - |sequence| is already scheduled, or,
+    // - The pool is running a Task from |sequence|. The pool is expected to
+    //   reschedule |sequence| once it's done running the Task.
+    sequence = task_tracker_->WillScheduleSequence(std::move(sequence), this);
+    if (sequence)
+      OnCanScheduleSequence(std::move(sequence));
+  }
+}
+
+}  // namespace internal
+}  // namespace base
diff --git a/base/task_scheduler/scheduler_worker_pool.h b/base/task_scheduler/scheduler_worker_pool.h
new file mode 100644
index 0000000..de5329e
--- /dev/null
+++ b/base/task_scheduler/scheduler_worker_pool.h
@@ -0,0 +1,79 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TASK_SCHEDULER_SCHEDULER_WORKER_POOL_H_
+#define BASE_TASK_SCHEDULER_SCHEDULER_WORKER_POOL_H_
+
+#include "base/base_export.h"
+#include "base/memory/ref_counted.h"
+#include "base/sequenced_task_runner.h"
+#include "base/task_runner.h"
+#include "base/task_scheduler/can_schedule_sequence_observer.h"
+#include "base/task_scheduler/sequence.h"
+#include "base/task_scheduler/task.h"
+#include "base/task_scheduler/task_traits.h"
+#include "base/task_scheduler/tracked_ref.h"
+
+namespace base {
+namespace internal {
+
+class DelayedTaskManager;
+class TaskTracker;
+
+// Interface for a worker pool.
+class BASE_EXPORT SchedulerWorkerPool : public CanScheduleSequenceObserver {
+ public:
+  ~SchedulerWorkerPool() override;
+
+  // Returns a TaskRunner whose PostTask invocations result in scheduling tasks
+  // in this SchedulerWorkerPool using |traits|. Tasks may run in any order and
+  // in parallel.
+  scoped_refptr<TaskRunner> CreateTaskRunnerWithTraits(
+      const TaskTraits& traits);
+
+  // Returns a SequencedTaskRunner whose PostTask invocations result in
+  // scheduling tasks in this SchedulerWorkerPool using |traits|. Tasks run one
+  // at a time in posting order.
+  scoped_refptr<SequencedTaskRunner> CreateSequencedTaskRunnerWithTraits(
+      const TaskTraits& traits);
+
+  // Posts |task| to be executed by this SchedulerWorkerPool as part of
+  // |sequence|. |task| won't be executed before its delayed run time, if any.
+  // Returns true if |task| is posted.
+  bool PostTaskWithSequence(Task task, scoped_refptr<Sequence> sequence);
+
+  // Registers the worker pool in TLS.
+  void BindToCurrentThread();
+
+  // Resets the worker pool in TLS.
+  void UnbindFromCurrentThread();
+
+  // Prevents new tasks from starting to run and waits for currently running
+  // tasks to complete their execution. It is guaranteed that no thread will do
+  // work on behalf of this SchedulerWorkerPool after this returns. It is
+  // invalid to post a task once this is called. TaskTracker::Flush() can be
+  // called before this to complete existing tasks, which might otherwise post a
+  // task during JoinForTesting(). This can only be called once.
+  virtual void JoinForTesting() = 0;
+
+ protected:
+  SchedulerWorkerPool(TrackedRef<TaskTracker> task_tracker,
+                      DelayedTaskManager* delayed_task_manager);
+
+  // Posts |task| to be executed by this SchedulerWorkerPool as part of
+  // |sequence|. This must only be called after |task| has gone through
+  // PostTaskWithSequence() and after |task|'s delayed run time.
+  void PostTaskWithSequenceNow(Task task, scoped_refptr<Sequence> sequence);
+
+  const TrackedRef<TaskTracker> task_tracker_;
+  DelayedTaskManager* const delayed_task_manager_;
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(SchedulerWorkerPool);
+};
+
+}  // namespace internal
+}  // namespace base
+
+#endif  // BASE_TASK_SCHEDULER_SCHEDULER_WORKER_POOL_H_
diff --git a/base/task_scheduler/scheduler_worker_pool_impl.cc b/base/task_scheduler/scheduler_worker_pool_impl.cc
new file mode 100644
index 0000000..b309bbd
--- /dev/null
+++ b/base/task_scheduler/scheduler_worker_pool_impl.cc
@@ -0,0 +1,989 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/task_scheduler/scheduler_worker_pool_impl.h"
+
+#include <stddef.h>
+
+#include <algorithm>
+#include <utility>
+
+#include "base/atomicops.h"
+#include "base/auto_reset.h"
+#include "base/bind.h"
+#include "base/bind_helpers.h"
+#include "base/compiler_specific.h"
+#include "base/location.h"
+#include "base/memory/ptr_util.h"
+#include "base/metrics/histogram.h"
+#include "base/sequence_token.h"
+#include "base/strings/string_util.h"
+#include "base/strings/stringprintf.h"
+#include "base/task_scheduler/scheduler_worker_pool_params.h"
+#include "base/task_scheduler/task_tracker.h"
+#include "base/task_scheduler/task_traits.h"
+#include "base/threading/platform_thread.h"
+#include "base/threading/scoped_blocking_call.h"
+#include "base/threading/thread_checker.h"
+#include "base/threading/thread_restrictions.h"
+
+#if defined(OS_WIN)
+#include "base/win/scoped_com_initializer.h"
+#include "base/win/scoped_windows_thread_environment.h"
+#include "base/win/scoped_winrt_initializer.h"
+#include "base/win/windows_version.h"
+#endif  // defined(OS_WIN)
+
+namespace base {
+namespace internal {
+
+constexpr TimeDelta SchedulerWorkerPoolImpl::kBlockedWorkersPollPeriod;
+
+namespace {
+
+constexpr char kPoolNameSuffix[] = "Pool";
+constexpr char kDetachDurationHistogramPrefix[] =
+    "TaskScheduler.DetachDuration.";
+constexpr char kNumTasksBeforeDetachHistogramPrefix[] =
+    "TaskScheduler.NumTasksBeforeDetach.";
+constexpr char kNumTasksBetweenWaitsHistogramPrefix[] =
+    "TaskScheduler.NumTasksBetweenWaits.";
+constexpr size_t kMaxNumberOfWorkers = 256;
+
+// Only used in DCHECKs.
+bool ContainsWorker(const std::vector<scoped_refptr<SchedulerWorker>>& workers,
+                    const SchedulerWorker* worker) {
+  auto it = std::find_if(workers.begin(), workers.end(),
+                         [worker](const scoped_refptr<SchedulerWorker>& i) {
+                           return i.get() == worker;
+                         });
+  return it != workers.end();
+}
+
+}  // namespace
+
+class SchedulerWorkerPoolImpl::SchedulerWorkerDelegateImpl
+    : public SchedulerWorker::Delegate,
+      public BlockingObserver {
+ public:
+  // |outer| owns the worker for which this delegate is constructed.
+  SchedulerWorkerDelegateImpl(TrackedRef<SchedulerWorkerPoolImpl> outer);
+  ~SchedulerWorkerDelegateImpl() override;
+
+  // SchedulerWorker::Delegate:
+  void OnCanScheduleSequence(scoped_refptr<Sequence> sequence) override;
+  SchedulerWorker::ThreadLabel GetThreadLabel() const override;
+  void OnMainEntry(const SchedulerWorker* worker) override;
+  scoped_refptr<Sequence> GetWork(SchedulerWorker* worker) override;
+  void DidRunTask() override;
+  void ReEnqueueSequence(scoped_refptr<Sequence> sequence) override;
+  TimeDelta GetSleepTimeout() override;
+  void OnMainExit(SchedulerWorker* worker) override;
+
+  // Sets |is_on_idle_workers_stack_| to be true and DCHECKS that |worker|
+  // is indeed on the idle workers stack.
+  void SetIsOnIdleWorkersStackLockRequired(SchedulerWorker* worker);
+
+  // Sets |is_on_idle_workers_stack_| to be false and DCHECKS that |worker|
+  // isn't on the idle workers stack.
+  void UnSetIsOnIdleWorkersStackLockRequired(SchedulerWorker* worker);
+
+// DCHECKs that |worker| is on the idle workers stack and
+// |is_on_idle_workers_stack_| is true.
+#if DCHECK_IS_ON()
+  void AssertIsOnIdleWorkersStackLockRequired(SchedulerWorker* worker) const;
+#else
+  void AssertIsOnIdleWorkersStackLockRequired(SchedulerWorker* worker) const {}
+#endif
+
+  // BlockingObserver:
+  void BlockingStarted(BlockingType blocking_type) override;
+  void BlockingTypeUpgraded() override;
+  void BlockingEnded() override;
+
+  void MayBlockEntered();
+  void WillBlockEntered();
+
+  // Returns true iff this worker has been within a MAY_BLOCK ScopedBlockingCall
+  // for more than |outer_->MayBlockThreshold()|. The worker capacity must be
+  // incremented if this returns true.
+  bool MustIncrementWorkerCapacityLockRequired();
+
+ private:
+  // Returns true if |worker| is allowed to cleanup and remove itself from the
+  // pool. Called from GetWork() when no work is available.
+  bool CanCleanupLockRequired(const SchedulerWorker* worker) const;
+
+  // Calls cleanup on |worker| and removes it from the pool. Called from
+  // GetWork() when no work is available and CanCleanupLockRequired() returns
+  // true.
+  void CleanupLockRequired(SchedulerWorker* worker);
+
+  // Called in GetWork() when a worker becomes idle.
+  void OnWorkerBecomesIdleLockRequired(SchedulerWorker* worker);
+
+  const TrackedRef<SchedulerWorkerPoolImpl> outer_;
+
+  // Time of the last detach.
+  TimeTicks last_detach_time_;
+
+  // Number of tasks executed since the last time the
+  // TaskScheduler.NumTasksBetweenWaits histogram was recorded.
+  size_t num_tasks_since_last_wait_ = 0;
+
+  // Number of tasks executed since the last time the
+  // TaskScheduler.NumTasksBeforeDetach histogram was recorded.
+  size_t num_tasks_since_last_detach_ = 0;
+
+  // Whether the worker holding this delegate is on the idle worker's stack.
+  // Access synchronized by |outer_->lock_|.
+  bool is_on_idle_workers_stack_ = true;
+
+  // Whether |outer_->worker_capacity_| was incremented due to a
+  // ScopedBlockingCall on the thread. Access synchronized by |outer_->lock_|.
+  bool incremented_worker_capacity_since_blocked_ = false;
+
+  // Time when MayBlockScopeEntered() was last called. Reset when
+  // BlockingScopeExited() is called. Access synchronized by |outer_->lock_|.
+  TimeTicks may_block_start_time_;
+
+  // Whether this worker is currently running a task (i.e. GetWork() has
+  // returned a non-empty sequence and DidRunTask() hasn't been called yet).
+  bool is_running_task_ = false;
+
+#if defined(OS_WIN)
+  std::unique_ptr<win::ScopedWindowsThreadEnvironment> win_thread_environment_;
+#endif  // defined(OS_WIN)
+
+  // Verifies that specific calls are always made from the worker thread.
+  THREAD_CHECKER(worker_thread_checker_);
+
+  DISALLOW_COPY_AND_ASSIGN(SchedulerWorkerDelegateImpl);
+};
+
+SchedulerWorkerPoolImpl::SchedulerWorkerPoolImpl(
+    StringPiece histogram_label,
+    StringPiece pool_label,
+    ThreadPriority priority_hint,
+    TrackedRef<TaskTracker> task_tracker,
+    DelayedTaskManager* delayed_task_manager)
+    : SchedulerWorkerPool(std::move(task_tracker), delayed_task_manager),
+      pool_label_(pool_label.as_string()),
+      priority_hint_(priority_hint),
+      lock_(shared_priority_queue_.container_lock()),
+      idle_workers_stack_cv_for_testing_(lock_.CreateConditionVariable()),
+      // Mimics the UMA_HISTOGRAM_LONG_TIMES macro.
+      detach_duration_histogram_(Histogram::FactoryTimeGet(
+          JoinString({kDetachDurationHistogramPrefix, histogram_label,
+                      kPoolNameSuffix},
+                     ""),
+          TimeDelta::FromMilliseconds(1),
+          TimeDelta::FromHours(1),
+          50,
+          HistogramBase::kUmaTargetedHistogramFlag)),
+      // Mimics the UMA_HISTOGRAM_COUNTS_1000 macro. When a worker runs more
+      // than 1000 tasks before detaching, there is no need to know the exact
+      // number of tasks that ran.
+      num_tasks_before_detach_histogram_(Histogram::FactoryGet(
+          JoinString({kNumTasksBeforeDetachHistogramPrefix, histogram_label,
+                      kPoolNameSuffix},
+                     ""),
+          1,
+          1000,
+          50,
+          HistogramBase::kUmaTargetedHistogramFlag)),
+      // Mimics the UMA_HISTOGRAM_COUNTS_100 macro. A SchedulerWorker is
+      // expected to run between zero and a few tens of tasks between waits.
+      // When it runs more than 100 tasks, there is no need to know the exact
+      // number of tasks that ran.
+      num_tasks_between_waits_histogram_(Histogram::FactoryGet(
+          JoinString({kNumTasksBetweenWaitsHistogramPrefix, histogram_label,
+                      kPoolNameSuffix},
+                     ""),
+          1,
+          100,
+          50,
+          HistogramBase::kUmaTargetedHistogramFlag)),
+      tracked_ref_factory_(this) {
+  DCHECK(!histogram_label.empty());
+  DCHECK(!pool_label_.empty());
+}
+
+void SchedulerWorkerPoolImpl::Start(
+    const SchedulerWorkerPoolParams& params,
+    scoped_refptr<TaskRunner> service_thread_task_runner,
+    SchedulerWorkerObserver* scheduler_worker_observer,
+    WorkerEnvironment worker_environment) {
+  AutoSchedulerLock auto_lock(lock_);
+
+  DCHECK(workers_.empty());
+
+  worker_capacity_ = params.max_threads();
+  initial_worker_capacity_ = worker_capacity_;
+  DCHECK_LE(initial_worker_capacity_, kMaxNumberOfWorkers);
+  suggested_reclaim_time_ = params.suggested_reclaim_time();
+  backward_compatibility_ = params.backward_compatibility();
+  worker_environment_ = worker_environment;
+
+  service_thread_task_runner_ = std::move(service_thread_task_runner);
+
+  DCHECK(!scheduler_worker_observer_);
+  scheduler_worker_observer_ = scheduler_worker_observer;
+
+  // The initial number of workers is |num_wake_ups_before_start_| + 1 to try to
+  // keep one at least one standby thread at all times (capacity permitting).
+  const int num_initial_workers = std::min(num_wake_ups_before_start_ + 1,
+                                           static_cast<int>(worker_capacity_));
+  workers_.reserve(num_initial_workers);
+
+  for (int index = 0; index < num_initial_workers; ++index) {
+    SchedulerWorker* worker =
+        CreateRegisterAndStartSchedulerWorkerLockRequired();
+
+    // CHECK that the first worker can be started (assume that failure means
+    // that threads can't be created on this machine).
+    CHECK(worker || index > 0);
+
+    if (worker) {
+      SchedulerWorkerDelegateImpl* delegate =
+          static_cast<SchedulerWorkerDelegateImpl*>(worker->delegate());
+      if (index < num_wake_ups_before_start_) {
+        delegate->UnSetIsOnIdleWorkersStackLockRequired(worker);
+        worker->WakeUp();
+      } else {
+        idle_workers_stack_.Push(worker);
+        delegate->AssertIsOnIdleWorkersStackLockRequired(worker);
+      }
+    }
+  }
+}
+
+SchedulerWorkerPoolImpl::~SchedulerWorkerPoolImpl() {
+  // SchedulerWorkerPool should only ever be deleted:
+  //  1) In tests, after JoinForTesting().
+  //  2) In production, iff initialization failed.
+  // In both cases |workers_| should be empty.
+  DCHECK(workers_.empty());
+}
+
+void SchedulerWorkerPoolImpl::OnCanScheduleSequence(
+    scoped_refptr<Sequence> sequence) {
+  const auto sequence_sort_key = sequence->GetSortKey();
+  shared_priority_queue_.BeginTransaction()->Push(std::move(sequence),
+                                                  sequence_sort_key);
+
+  WakeUpOneWorker();
+}
+
+void SchedulerWorkerPoolImpl::GetHistograms(
+    std::vector<const HistogramBase*>* histograms) const {
+  histograms->push_back(detach_duration_histogram_);
+  histograms->push_back(num_tasks_between_waits_histogram_);
+}
+
+int SchedulerWorkerPoolImpl::GetMaxConcurrentNonBlockedTasksDeprecated() const {
+#if DCHECK_IS_ON()
+  AutoSchedulerLock auto_lock(lock_);
+  DCHECK_NE(initial_worker_capacity_, 0U)
+      << "GetMaxConcurrentTasksDeprecated() should only be called after the "
+      << "worker pool has started.";
+#endif
+  return initial_worker_capacity_;
+}
+
+void SchedulerWorkerPoolImpl::WaitForWorkersIdleForTesting(size_t n) {
+  AutoSchedulerLock auto_lock(lock_);
+
+  DCHECK_EQ(0U, num_workers_cleaned_up_for_testing_)
+      << "Workers detached prior to waiting for a specific number of idle "
+         "workers. Doing the wait under such conditions is flaky.";
+
+  WaitForWorkersIdleLockRequiredForTesting(n);
+}
+
+void SchedulerWorkerPoolImpl::WaitForAllWorkersIdleForTesting() {
+  AutoSchedulerLock auto_lock(lock_);
+  WaitForWorkersIdleLockRequiredForTesting(workers_.size());
+}
+
+void SchedulerWorkerPoolImpl::WaitForWorkersCleanedUpForTesting(size_t n) {
+  AutoSchedulerLock auto_lock(lock_);
+
+  DCHECK_EQ(0U, num_workers_cleaned_up_for_testing_)
+      << "Called WaitForWorkersCleanedUpForTesting() after some workers had "
+         "already cleaned up on their own.";
+
+  DCHECK(!num_workers_cleaned_up_for_testing_cv_)
+      << "Called WaitForWorkersCleanedUpForTesting() multiple times in the "
+         "same test.";
+
+  num_workers_cleaned_up_for_testing_cv_ = lock_.CreateConditionVariable();
+
+  while (num_workers_cleaned_up_for_testing_ < n)
+    num_workers_cleaned_up_for_testing_cv_->Wait();
+}
+
+void SchedulerWorkerPoolImpl::JoinForTesting() {
+#if DCHECK_IS_ON()
+  join_for_testing_started_.Set();
+#endif
+
+  decltype(workers_) workers_copy;
+  {
+    AutoSchedulerLock auto_lock(lock_);
+
+    DCHECK_GT(workers_.size(), size_t(0)) << "Joined an unstarted worker pool.";
+
+    // Ensure SchedulerWorkers in |workers_| do not attempt to cleanup while
+    // being joined.
+    worker_cleanup_disallowed_for_testing_ = true;
+
+    // Make a copy of the SchedulerWorkers so that we can call
+    // SchedulerWorker::JoinForTesting() without holding |lock_| since
+    // SchedulerWorkers may need to access |workers_|.
+    workers_copy = workers_;
+  }
+  for (const auto& worker : workers_copy)
+    worker->JoinForTesting();
+
+  AutoSchedulerLock auto_lock(lock_);
+  DCHECK(workers_ == workers_copy);
+  // Release |workers_| to clear their TrackedRef against |this|.
+  workers_.clear();
+}
+
+size_t SchedulerWorkerPoolImpl::NumberOfWorkersForTesting() const {
+  AutoSchedulerLock auto_lock(lock_);
+  return workers_.size();
+}
+
+size_t SchedulerWorkerPoolImpl::GetWorkerCapacityForTesting() const {
+  AutoSchedulerLock auto_lock(lock_);
+  return worker_capacity_;
+}
+
+size_t SchedulerWorkerPoolImpl::NumberOfIdleWorkersForTesting() const {
+  AutoSchedulerLock auto_lock(lock_);
+  return idle_workers_stack_.Size();
+}
+
+void SchedulerWorkerPoolImpl::MaximizeMayBlockThresholdForTesting() {
+  maximum_blocked_threshold_for_testing_.Set();
+}
+
+SchedulerWorkerPoolImpl::SchedulerWorkerDelegateImpl::
+    SchedulerWorkerDelegateImpl(TrackedRef<SchedulerWorkerPoolImpl> outer)
+    : outer_(std::move(outer)) {
+  // Bound in OnMainEntry().
+  DETACH_FROM_THREAD(worker_thread_checker_);
+}
+
+// OnMainExit() handles the thread-affine cleanup; SchedulerWorkerDelegateImpl
+// can thereafter safely be deleted from any thread.
+SchedulerWorkerPoolImpl::SchedulerWorkerDelegateImpl::
+    ~SchedulerWorkerDelegateImpl() = default;
+
+void SchedulerWorkerPoolImpl::SchedulerWorkerDelegateImpl::
+    OnCanScheduleSequence(scoped_refptr<Sequence> sequence) {
+  outer_->OnCanScheduleSequence(std::move(sequence));
+}
+
+SchedulerWorker::ThreadLabel
+SchedulerWorkerPoolImpl::SchedulerWorkerDelegateImpl::GetThreadLabel() const {
+  return SchedulerWorker::ThreadLabel::POOLED;
+}
+
+void SchedulerWorkerPoolImpl::SchedulerWorkerDelegateImpl::OnMainEntry(
+    const SchedulerWorker* worker) {
+  DCHECK_CALLED_ON_VALID_THREAD(worker_thread_checker_);
+
+  {
+#if DCHECK_IS_ON()
+    AutoSchedulerLock auto_lock(outer_->lock_);
+    DCHECK(ContainsWorker(outer_->workers_, worker));
+#endif
+  }
+
+#if defined(OS_WIN)
+  if (outer_->worker_environment_ == WorkerEnvironment::COM_MTA) {
+    if (win::GetVersion() >= win::VERSION_WIN8) {
+      win_thread_environment_ = std::make_unique<win::ScopedWinrtInitializer>();
+    } else {
+      win_thread_environment_ = std::make_unique<win::ScopedCOMInitializer>(
+          win::ScopedCOMInitializer::kMTA);
+    }
+    DCHECK(win_thread_environment_->Succeeded());
+  }
+#endif  // defined(OS_WIN)
+
+  DCHECK_EQ(num_tasks_since_last_wait_, 0U);
+
+  PlatformThread::SetName(
+      StringPrintf("TaskScheduler%sWorker", outer_->pool_label_.c_str()));
+
+  outer_->BindToCurrentThread();
+  SetBlockingObserverForCurrentThread(this);
+}
+
+scoped_refptr<Sequence>
+SchedulerWorkerPoolImpl::SchedulerWorkerDelegateImpl::GetWork(
+    SchedulerWorker* worker) {
+  DCHECK_CALLED_ON_VALID_THREAD(worker_thread_checker_);
+
+  DCHECK(!is_running_task_);
+  {
+    AutoSchedulerLock auto_lock(outer_->lock_);
+
+    DCHECK(ContainsWorker(outer_->workers_, worker));
+
+    // Calling GetWork() when |is_on_idle_workers_stack_| is true indicates
+    // that we must've reached GetWork() because of the WaitableEvent timing
+    // out. In which case, we return no work and possibly cleanup the worker.
+    DCHECK_EQ(is_on_idle_workers_stack_,
+              outer_->idle_workers_stack_.Contains(worker));
+    if (is_on_idle_workers_stack_) {
+      if (CanCleanupLockRequired(worker))
+        CleanupLockRequired(worker);
+      return nullptr;
+    }
+
+    // Excess workers should not get work, until they are no longer excess (i.e.
+    // worker capacity increases or another worker cleans up). This ensures that
+    // if we have excess workers in the pool, they get a chance to no longer be
+    // excess before being cleaned up.
+    if (outer_->NumberOfExcessWorkersLockRequired() >
+        outer_->idle_workers_stack_.Size()) {
+      OnWorkerBecomesIdleLockRequired(worker);
+      return nullptr;
+    }
+  }
+  scoped_refptr<Sequence> sequence;
+  {
+    std::unique_ptr<PriorityQueue::Transaction> shared_transaction(
+        outer_->shared_priority_queue_.BeginTransaction());
+
+    if (shared_transaction->IsEmpty()) {
+      // |shared_transaction| is kept alive while |worker| is added to
+      // |idle_workers_stack_| to avoid this race:
+      // 1. This thread creates a Transaction, finds |shared_priority_queue_|
+      //    empty and ends the Transaction.
+      // 2. Other thread creates a Transaction, inserts a Sequence into
+      //    |shared_priority_queue_| and ends the Transaction. This can't happen
+      //    if the Transaction of step 1 is still active because because there
+      //    can only be one active Transaction per PriorityQueue at a time.
+      // 3. Other thread calls WakeUpOneWorker(). No thread is woken up because
+      //    |idle_workers_stack_| is empty.
+      // 4. This thread adds itself to |idle_workers_stack_| and goes to sleep.
+      //    No thread runs the Sequence inserted in step 2.
+      AutoSchedulerLock auto_lock(outer_->lock_);
+
+      OnWorkerBecomesIdleLockRequired(worker);
+      return nullptr;
+    }
+    sequence = shared_transaction->PopSequence();
+  }
+  DCHECK(sequence);
+#if DCHECK_IS_ON()
+  {
+    AutoSchedulerLock auto_lock(outer_->lock_);
+    DCHECK(!outer_->idle_workers_stack_.Contains(worker));
+  }
+#endif
+
+  is_running_task_ = true;
+  return sequence;
+}
+
+void SchedulerWorkerPoolImpl::SchedulerWorkerDelegateImpl::DidRunTask() {
+  DCHECK_CALLED_ON_VALID_THREAD(worker_thread_checker_);
+
+  DCHECK(may_block_start_time_.is_null());
+  DCHECK(!incremented_worker_capacity_since_blocked_);
+  DCHECK(is_running_task_);
+  is_running_task_ = false;
+
+  ++num_tasks_since_last_wait_;
+  ++num_tasks_since_last_detach_;
+}
+
+void SchedulerWorkerPoolImpl::SchedulerWorkerDelegateImpl::
+    ReEnqueueSequence(scoped_refptr<Sequence> sequence) {
+  DCHECK_CALLED_ON_VALID_THREAD(worker_thread_checker_);
+
+  const SequenceSortKey sequence_sort_key = sequence->GetSortKey();
+  outer_->shared_priority_queue_.BeginTransaction()->Push(std::move(sequence),
+                                                          sequence_sort_key);
+  // This worker will soon call GetWork(). Therefore, there is no need to wake
+  // up a worker to run the sequence that was just inserted into
+  // |outer_->shared_priority_queue_|.
+}
+
+TimeDelta SchedulerWorkerPoolImpl::SchedulerWorkerDelegateImpl::
+    GetSleepTimeout() {
+  DCHECK_CALLED_ON_VALID_THREAD(worker_thread_checker_);
+  return outer_->suggested_reclaim_time_;
+}
+
+bool SchedulerWorkerPoolImpl::SchedulerWorkerDelegateImpl::
+    CanCleanupLockRequired(const SchedulerWorker* worker) const {
+  DCHECK_CALLED_ON_VALID_THREAD(worker_thread_checker_);
+
+  return worker != outer_->PeekAtIdleWorkersStackLockRequired() &&
+         LIKELY(!outer_->worker_cleanup_disallowed_for_testing_);
+}
+
+void SchedulerWorkerPoolImpl::SchedulerWorkerDelegateImpl::CleanupLockRequired(
+    SchedulerWorker* worker) {
+  DCHECK_CALLED_ON_VALID_THREAD(worker_thread_checker_);
+
+  outer_->lock_.AssertAcquired();
+  outer_->num_tasks_before_detach_histogram_->Add(num_tasks_since_last_detach_);
+  outer_->cleanup_timestamps_.push(TimeTicks::Now());
+  worker->Cleanup();
+  outer_->RemoveFromIdleWorkersStackLockRequired(worker);
+
+  // Remove the worker from |workers_|.
+  auto worker_iter =
+      std::find(outer_->workers_.begin(), outer_->workers_.end(), worker);
+  DCHECK(worker_iter != outer_->workers_.end());
+  outer_->workers_.erase(worker_iter);
+
+  ++outer_->num_workers_cleaned_up_for_testing_;
+  if (outer_->num_workers_cleaned_up_for_testing_cv_)
+    outer_->num_workers_cleaned_up_for_testing_cv_->Signal();
+}
+
+void SchedulerWorkerPoolImpl::SchedulerWorkerDelegateImpl::
+    OnWorkerBecomesIdleLockRequired(SchedulerWorker* worker) {
+  DCHECK_CALLED_ON_VALID_THREAD(worker_thread_checker_);
+
+  outer_->lock_.AssertAcquired();
+  // Record the TaskScheduler.NumTasksBetweenWaits histogram. After GetWork()
+  // returns nullptr, the SchedulerWorker will perform a wait on its
+  // WaitableEvent, so we record how many tasks were ran since the last wait
+  // here.
+  outer_->num_tasks_between_waits_histogram_->Add(num_tasks_since_last_wait_);
+  num_tasks_since_last_wait_ = 0;
+  outer_->AddToIdleWorkersStackLockRequired(worker);
+  SetIsOnIdleWorkersStackLockRequired(worker);
+}
+
+void SchedulerWorkerPoolImpl::SchedulerWorkerDelegateImpl::OnMainExit(
+    SchedulerWorker* worker) {
+  DCHECK_CALLED_ON_VALID_THREAD(worker_thread_checker_);
+
+#if DCHECK_IS_ON()
+  {
+    bool shutdown_complete = outer_->task_tracker_->IsShutdownComplete();
+    AutoSchedulerLock auto_lock(outer_->lock_);
+
+    // |worker| should already have been removed from the idle workers stack and
+    // |workers_| by the time the thread is about to exit. (except in the cases
+    // where the pool is no longer going to be used - in which case, it's fine
+    // for there to be invalid workers in the pool.
+    if (!shutdown_complete && !outer_->join_for_testing_started_.IsSet()) {
+      DCHECK(!outer_->idle_workers_stack_.Contains(worker));
+      DCHECK(!ContainsWorker(outer_->workers_, worker));
+    }
+  }
+#endif
+
+#if defined(OS_WIN)
+  win_thread_environment_.reset();
+#endif  // defined(OS_WIN)
+}
+
+void SchedulerWorkerPoolImpl::SchedulerWorkerDelegateImpl::
+    SetIsOnIdleWorkersStackLockRequired(SchedulerWorker* worker) {
+  outer_->lock_.AssertAcquired();
+  DCHECK(!is_on_idle_workers_stack_);
+  DCHECK(outer_->idle_workers_stack_.Contains(worker));
+  is_on_idle_workers_stack_ = true;
+}
+
+void SchedulerWorkerPoolImpl::SchedulerWorkerDelegateImpl::
+    UnSetIsOnIdleWorkersStackLockRequired(SchedulerWorker* worker) {
+  outer_->lock_.AssertAcquired();
+  DCHECK(is_on_idle_workers_stack_);
+  DCHECK(!outer_->idle_workers_stack_.Contains(worker));
+  is_on_idle_workers_stack_ = false;
+}
+
+#if DCHECK_IS_ON()
+void SchedulerWorkerPoolImpl::SchedulerWorkerDelegateImpl::
+    AssertIsOnIdleWorkersStackLockRequired(SchedulerWorker* worker) const {
+  outer_->lock_.AssertAcquired();
+  DCHECK(is_on_idle_workers_stack_);
+  DCHECK(outer_->idle_workers_stack_.Contains(worker));
+}
+#endif
+
+void SchedulerWorkerPoolImpl::SchedulerWorkerDelegateImpl::BlockingStarted(
+    BlockingType blocking_type) {
+  DCHECK_CALLED_ON_VALID_THREAD(worker_thread_checker_);
+
+  // Blocking calls made outside of tasks should not influence the capacity
+  // count as no task is running.
+  if (!is_running_task_)
+    return;
+
+  switch (blocking_type) {
+    case BlockingType::MAY_BLOCK:
+      MayBlockEntered();
+      break;
+    case BlockingType::WILL_BLOCK:
+      WillBlockEntered();
+      break;
+  }
+}
+
+void SchedulerWorkerPoolImpl::SchedulerWorkerDelegateImpl::
+    BlockingTypeUpgraded() {
+  DCHECK_CALLED_ON_VALID_THREAD(worker_thread_checker_);
+
+  {
+    AutoSchedulerLock auto_lock(outer_->lock_);
+
+    // Don't do anything if a MAY_BLOCK ScopedBlockingCall instantiated in the
+    // same scope already caused the worker capacity to be incremented.
+    if (incremented_worker_capacity_since_blocked_)
+      return;
+
+    // Cancel the effect of a MAY_BLOCK ScopedBlockingCall instantiated in the
+    // same scope.
+    if (!may_block_start_time_.is_null()) {
+      may_block_start_time_ = TimeTicks();
+      --outer_->num_pending_may_block_workers_;
+    }
+  }
+
+  WillBlockEntered();
+}
+
+void SchedulerWorkerPoolImpl::SchedulerWorkerDelegateImpl::BlockingEnded() {
+  DCHECK_CALLED_ON_VALID_THREAD(worker_thread_checker_);
+
+  // Ignore blocking calls made outside of tasks.
+  if (!is_running_task_)
+    return;
+
+  AutoSchedulerLock auto_lock(outer_->lock_);
+  if (incremented_worker_capacity_since_blocked_) {
+    outer_->DecrementWorkerCapacityLockRequired();
+  } else {
+    DCHECK(!may_block_start_time_.is_null());
+    --outer_->num_pending_may_block_workers_;
+  }
+
+  incremented_worker_capacity_since_blocked_ = false;
+  may_block_start_time_ = TimeTicks();
+}
+
+void SchedulerWorkerPoolImpl::SchedulerWorkerDelegateImpl::MayBlockEntered() {
+  DCHECK_CALLED_ON_VALID_THREAD(worker_thread_checker_);
+
+  {
+    AutoSchedulerLock auto_lock(outer_->lock_);
+
+    DCHECK(!incremented_worker_capacity_since_blocked_);
+    DCHECK(may_block_start_time_.is_null());
+    may_block_start_time_ = TimeTicks::Now();
+    ++outer_->num_pending_may_block_workers_;
+  }
+  outer_->PostAdjustWorkerCapacityTaskIfNeeded();
+}
+
+void SchedulerWorkerPoolImpl::SchedulerWorkerDelegateImpl::WillBlockEntered() {
+  DCHECK_CALLED_ON_VALID_THREAD(worker_thread_checker_);
+
+  bool wake_up_allowed = false;
+  {
+    std::unique_ptr<PriorityQueue::Transaction> shared_transaction(
+        outer_->shared_priority_queue_.BeginTransaction());
+    AutoSchedulerLock auto_lock(outer_->lock_);
+
+    DCHECK(!incremented_worker_capacity_since_blocked_);
+    DCHECK(may_block_start_time_.is_null());
+    incremented_worker_capacity_since_blocked_ = true;
+    outer_->IncrementWorkerCapacityLockRequired();
+
+    // If the number of workers was less than the old worker capacity, PostTask
+    // would've handled creating extra workers during WakeUpOneWorker.
+    // Therefore, we don't need to do anything here.
+    if (outer_->workers_.size() < outer_->worker_capacity_ - 1)
+      return;
+
+    if (shared_transaction->IsEmpty()) {
+      outer_->MaintainAtLeastOneIdleWorkerLockRequired();
+    } else {
+      // TODO(crbug.com/757897): We may create extra workers in this case:
+      // |workers.size()| was equal to the old |worker_capacity_|, we had
+      // multiple ScopedBlockingCalls in parallel and we had work on the PQ.
+      wake_up_allowed = outer_->WakeUpOneWorkerLockRequired();
+      // |wake_up_allowed| is true when the pool is started, and a WILL_BLOCK
+      // scope cannot be entered before the pool starts.
+      DCHECK(wake_up_allowed);
+    }
+  }
+  // TODO(crbug.com/813857): This can be better handled in the PostTask()
+  // codepath. We really only should do this if there are tasks pending.
+  if (wake_up_allowed)
+    outer_->PostAdjustWorkerCapacityTaskIfNeeded();
+}
+
+bool SchedulerWorkerPoolImpl::SchedulerWorkerDelegateImpl::
+    MustIncrementWorkerCapacityLockRequired() {
+  outer_->lock_.AssertAcquired();
+
+  if (!incremented_worker_capacity_since_blocked_ &&
+      !may_block_start_time_.is_null() &&
+      TimeTicks::Now() - may_block_start_time_ >= outer_->MayBlockThreshold()) {
+    incremented_worker_capacity_since_blocked_ = true;
+
+    // Reset |may_block_start_time_| so that BlockingScopeExited() knows that it
+    // doesn't have to decrement |outer_->num_pending_may_block_workers_|.
+    may_block_start_time_ = TimeTicks();
+    --outer_->num_pending_may_block_workers_;
+
+    return true;
+  }
+
+  return false;
+}
+
+void SchedulerWorkerPoolImpl::WaitForWorkersIdleLockRequiredForTesting(
+    size_t n) {
+  lock_.AssertAcquired();
+
+  // Make sure workers do not cleanup while watching the idle count.
+  AutoReset<bool> ban_cleanups(&worker_cleanup_disallowed_for_testing_, true);
+
+  while (idle_workers_stack_.Size() < n)
+    idle_workers_stack_cv_for_testing_->Wait();
+}
+
+bool SchedulerWorkerPoolImpl::WakeUpOneWorkerLockRequired() {
+  lock_.AssertAcquired();
+
+  if (workers_.empty()) {
+    ++num_wake_ups_before_start_;
+    return false;
+  }
+
+  // Ensure that there is one worker that can run tasks on top of the idle
+  // stack, capacity permitting.
+  MaintainAtLeastOneIdleWorkerLockRequired();
+
+  // If the worker on top of the idle stack can run tasks, wake it up.
+  if (NumberOfExcessWorkersLockRequired() < idle_workers_stack_.Size()) {
+    SchedulerWorker* worker = idle_workers_stack_.Pop();
+    if (worker) {
+      SchedulerWorkerDelegateImpl* delegate =
+          static_cast<SchedulerWorkerDelegateImpl*>(worker->delegate());
+      delegate->UnSetIsOnIdleWorkersStackLockRequired(worker);
+      worker->WakeUp();
+    }
+  }
+
+  // Ensure that there is one worker that can run tasks on top of the idle
+  // stack, capacity permitting.
+  MaintainAtLeastOneIdleWorkerLockRequired();
+
+  return true;
+}
+
+void SchedulerWorkerPoolImpl::WakeUpOneWorker() {
+  bool wake_up_allowed;
+  {
+    AutoSchedulerLock auto_lock(lock_);
+    wake_up_allowed = WakeUpOneWorkerLockRequired();
+  }
+  if (wake_up_allowed)
+    PostAdjustWorkerCapacityTaskIfNeeded();
+}
+
+void SchedulerWorkerPoolImpl::MaintainAtLeastOneIdleWorkerLockRequired() {
+  lock_.AssertAcquired();
+
+  if (workers_.size() == kMaxNumberOfWorkers)
+    return;
+  DCHECK_LT(workers_.size(), kMaxNumberOfWorkers);
+
+  if (idle_workers_stack_.IsEmpty() && workers_.size() < worker_capacity_) {
+    SchedulerWorker* new_worker =
+        CreateRegisterAndStartSchedulerWorkerLockRequired();
+    if (new_worker)
+      idle_workers_stack_.Push(new_worker);
+  }
+}
+
+void SchedulerWorkerPoolImpl::AddToIdleWorkersStackLockRequired(
+    SchedulerWorker* worker) {
+  lock_.AssertAcquired();
+
+  DCHECK(!idle_workers_stack_.Contains(worker));
+  idle_workers_stack_.Push(worker);
+
+  DCHECK_LE(idle_workers_stack_.Size(), workers_.size());
+
+  idle_workers_stack_cv_for_testing_->Broadcast();
+}
+
+const SchedulerWorker*
+SchedulerWorkerPoolImpl::PeekAtIdleWorkersStackLockRequired() const {
+  lock_.AssertAcquired();
+  return idle_workers_stack_.Peek();
+}
+
+void SchedulerWorkerPoolImpl::RemoveFromIdleWorkersStackLockRequired(
+    SchedulerWorker* worker) {
+  lock_.AssertAcquired();
+  idle_workers_stack_.Remove(worker);
+}
+
+SchedulerWorker*
+SchedulerWorkerPoolImpl::CreateRegisterAndStartSchedulerWorkerLockRequired() {
+  lock_.AssertAcquired();
+
+  DCHECK_LT(workers_.size(), worker_capacity_);
+  DCHECK_LT(workers_.size(), kMaxNumberOfWorkers);
+  // SchedulerWorker needs |lock_| as a predecessor for its thread lock
+  // because in WakeUpOneWorker, |lock_| is first acquired and then
+  // the thread lock is acquired when WakeUp is called on the worker.
+  scoped_refptr<SchedulerWorker> worker = MakeRefCounted<SchedulerWorker>(
+      priority_hint_,
+      std::make_unique<SchedulerWorkerDelegateImpl>(
+          tracked_ref_factory_.GetTrackedRef()),
+      task_tracker_, &lock_, backward_compatibility_);
+
+  if (!worker->Start(scheduler_worker_observer_))
+    return nullptr;
+
+  workers_.push_back(worker);
+  DCHECK_LE(workers_.size(), worker_capacity_);
+
+  if (!cleanup_timestamps_.empty()) {
+    detach_duration_histogram_->AddTime(TimeTicks::Now() -
+                                        cleanup_timestamps_.top());
+    cleanup_timestamps_.pop();
+  }
+  return worker.get();
+}
+
+size_t SchedulerWorkerPoolImpl::NumberOfExcessWorkersLockRequired() const {
+  lock_.AssertAcquired();
+  return std::max<int>(0, workers_.size() - worker_capacity_);
+}
+
+void SchedulerWorkerPoolImpl::AdjustWorkerCapacity() {
+  DCHECK(service_thread_task_runner_->RunsTasksInCurrentSequence());
+
+  std::unique_ptr<PriorityQueue::Transaction> shared_transaction(
+      shared_priority_queue_.BeginTransaction());
+  AutoSchedulerLock auto_lock(lock_);
+
+  const size_t original_worker_capacity = worker_capacity_;
+
+  // Increment worker capacity for each worker that has been within a MAY_BLOCK
+  // ScopedBlockingCall for more than MayBlockThreshold().
+  for (scoped_refptr<SchedulerWorker> worker : workers_) {
+    // The delegates of workers inside a SchedulerWorkerPoolImpl should be
+    // SchedulerWorkerDelegateImpls.
+    SchedulerWorkerDelegateImpl* delegate =
+        static_cast<SchedulerWorkerDelegateImpl*>(worker->delegate());
+    if (delegate->MustIncrementWorkerCapacityLockRequired())
+      IncrementWorkerCapacityLockRequired();
+  }
+
+  // Wake up a worker per pending sequence, capacity permitting.
+  const size_t num_pending_sequences = shared_transaction->Size();
+  const size_t num_wake_ups_needed = std::min(
+      worker_capacity_ - original_worker_capacity, num_pending_sequences);
+
+  for (size_t i = 0; i < num_wake_ups_needed; ++i) {
+    // No need to call PostAdjustWorkerCapacityTaskIfNeeded() as the caller will
+    // take care of that for us.
+    WakeUpOneWorkerLockRequired();
+  }
+
+  MaintainAtLeastOneIdleWorkerLockRequired();
+}
+
+TimeDelta SchedulerWorkerPoolImpl::MayBlockThreshold() const {
+  if (maximum_blocked_threshold_for_testing_.IsSet())
+    return TimeDelta::Max();
+  // This value was set unscientifically based on intuition and may be adjusted
+  // in the future. This value is smaller than |kBlockedWorkersPollPeriod|
+  // because we hope than when multiple workers block around the same time, a
+  // single AdjustWorkerCapacity() call will perform all the necessary capacity
+  // adjustments.
+  return TimeDelta::FromMilliseconds(10);
+}
+
+void SchedulerWorkerPoolImpl::PostAdjustWorkerCapacityTaskIfNeeded() {
+  {
+    AutoSchedulerLock auto_lock(lock_);
+    if (polling_worker_capacity_ ||
+        !ShouldPeriodicallyAdjustWorkerCapacityLockRequired()) {
+      return;
+    }
+    polling_worker_capacity_ = true;
+  }
+  service_thread_task_runner_->PostDelayedTask(
+      FROM_HERE,
+      BindOnce(&SchedulerWorkerPoolImpl::AdjustWorkerCapacityTaskFunction,
+               Unretained(this)),
+      kBlockedWorkersPollPeriod);
+}
+
+void SchedulerWorkerPoolImpl::AdjustWorkerCapacityTaskFunction() {
+  DCHECK(service_thread_task_runner_->RunsTasksInCurrentSequence());
+
+  AdjustWorkerCapacity();
+  {
+    AutoSchedulerLock auto_lock(lock_);
+    DCHECK(polling_worker_capacity_);
+
+    if (!ShouldPeriodicallyAdjustWorkerCapacityLockRequired()) {
+      polling_worker_capacity_ = false;
+      return;
+    }
+  }
+  service_thread_task_runner_->PostDelayedTask(
+      FROM_HERE,
+      BindOnce(&SchedulerWorkerPoolImpl::AdjustWorkerCapacityTaskFunction,
+               Unretained(this)),
+      kBlockedWorkersPollPeriod);
+}
+
+bool SchedulerWorkerPoolImpl::
+    ShouldPeriodicallyAdjustWorkerCapacityLockRequired() {
+  lock_.AssertAcquired();
+  // AdjustWorkerCapacity() must be periodically called when (1) there are no
+  // idle workers that can do work (2) there are workers that are within the
+  // scope of a MAY_BLOCK ScopedBlockingCall but haven't cause a capacity
+  // increment yet.
+  //
+  // - When (1) is false: A newly posted task will run on one of the idle
+  //   workers that are allowed to do work. There is no hurry to increase
+  //   capacity.
+  // - When (2) is false: AdjustWorkerCapacity() would be a no-op.
+  const int idle_workers_that_can_do_work =
+      idle_workers_stack_.Size() - NumberOfExcessWorkersLockRequired();
+  return idle_workers_that_can_do_work <= 0 &&
+         num_pending_may_block_workers_ > 0;
+}
+
+void SchedulerWorkerPoolImpl::DecrementWorkerCapacityLockRequired() {
+  lock_.AssertAcquired();
+  --worker_capacity_;
+}
+
+void SchedulerWorkerPoolImpl::IncrementWorkerCapacityLockRequired() {
+  lock_.AssertAcquired();
+  ++worker_capacity_;
+}
+
+}  // namespace internal
+}  // namespace base
diff --git a/base/task_scheduler/scheduler_worker_pool_impl.h b/base/task_scheduler/scheduler_worker_pool_impl.h
new file mode 100644
index 0000000..d9a169b
--- /dev/null
+++ b/base/task_scheduler/scheduler_worker_pool_impl.h
@@ -0,0 +1,341 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TASK_SCHEDULER_SCHEDULER_WORKER_POOL_IMPL_H_
+#define BASE_TASK_SCHEDULER_SCHEDULER_WORKER_POOL_IMPL_H_
+
+#include <stddef.h>
+
+#include <memory>
+#include <string>
+#include <vector>
+
+#include "base/base_export.h"
+#include "base/containers/stack.h"
+#include "base/logging.h"
+#include "base/macros.h"
+#include "base/memory/ref_counted.h"
+#include "base/strings/string_piece.h"
+#include "base/synchronization/atomic_flag.h"
+#include "base/synchronization/condition_variable.h"
+#include "base/synchronization/waitable_event.h"
+#include "base/task_runner.h"
+#include "base/task_scheduler/priority_queue.h"
+#include "base/task_scheduler/scheduler_lock.h"
+#include "base/task_scheduler/scheduler_worker.h"
+#include "base/task_scheduler/scheduler_worker_pool.h"
+#include "base/task_scheduler/scheduler_worker_stack.h"
+#include "base/task_scheduler/sequence.h"
+#include "base/task_scheduler/task.h"
+#include "base/task_scheduler/tracked_ref.h"
+#include "base/time/time.h"
+#include "build/build_config.h"
+
+namespace base {
+
+class HistogramBase;
+class SchedulerWorkerObserver;
+class SchedulerWorkerPoolParams;
+
+namespace internal {
+
+class DelayedTaskManager;
+class TaskTracker;
+
+// A pool of workers that run Tasks.
+//
+// The pool doesn't create threads until Start() is called. Tasks can be posted
+// at any time but will not run until after Start() is called.
+//
+// This class is thread-safe.
+class BASE_EXPORT SchedulerWorkerPoolImpl : public SchedulerWorkerPool {
+ public:
+  enum class WorkerEnvironment {
+    // No special worker environment required.
+    NONE,
+#if defined(OS_WIN)
+    // Initialize a COM MTA on the worker.
+    COM_MTA,
+#endif  // defined(OS_WIN)
+  };
+
+  // Constructs a pool without workers.
+  //
+  // |histogram_label| is used to label the pool's histograms ("TaskScheduler."
+  // + histogram_name + "." + |histogram_label| + extra suffixes), it must not
+  // be empty. |pool_label| is used to label the pool's threads, it must not be
+  // empty. |priority_hint| is the preferred thread priority; the actual thread
+  // priority depends on shutdown state and platform capabilities.
+  // |task_tracker| keeps track of tasks. |delayed_task_manager| handles tasks
+  // posted with a delay.
+  SchedulerWorkerPoolImpl(StringPiece histogram_label,
+                          StringPiece pool_label,
+                          ThreadPriority priority_hint,
+                          TrackedRef<TaskTracker> task_tracker,
+                          DelayedTaskManager* delayed_task_manager);
+
+  // Creates workers following the |params| specification, allowing existing and
+  // future tasks to run. Uses |service_thread_task_runner| to monitor for
+  // blocked threads in the pool. If specified, |scheduler_worker_observer| will
+  // be notified when a worker enters and exits its main function. It must not
+  // be destroyed before JoinForTesting() has returned (must never be destroyed
+  // in production). |worker_environment| specifies any requested environment to
+  // execute the tasks. Can only be called once. CHECKs on failure.
+  void Start(const SchedulerWorkerPoolParams& params,
+             scoped_refptr<TaskRunner> service_thread_task_runner,
+             SchedulerWorkerObserver* scheduler_worker_observer,
+             WorkerEnvironment worker_environment);
+
+  // Destroying a SchedulerWorkerPoolImpl returned by Create() is not allowed in
+  // production; it is always leaked. In tests, it can only be destroyed after
+  // JoinForTesting() has returned.
+  ~SchedulerWorkerPoolImpl() override;
+
+  // SchedulerWorkerPool:
+  void JoinForTesting() override;
+
+  const HistogramBase* num_tasks_before_detach_histogram() const {
+    return num_tasks_before_detach_histogram_;
+  }
+
+  const HistogramBase* num_tasks_between_waits_histogram() const {
+    return num_tasks_between_waits_histogram_;
+  }
+
+  void GetHistograms(std::vector<const HistogramBase*>* histograms) const;
+
+  // Returns the maximum number of non-blocked tasks that can run concurrently
+  // in this pool.
+  //
+  // TODO(fdoray): Remove this method. https://crbug.com/687264
+  int GetMaxConcurrentNonBlockedTasksDeprecated() const;
+
+  // Waits until at least |n| workers are idle. Note that while workers are
+  // disallowed from cleaning up during this call: tests using a custom
+  // |suggested_reclaim_time_| need to be careful to invoke this swiftly after
+  // unblocking the waited upon workers as: if a worker is already detached by
+  // the time this is invoked, it will never make it onto the idle stack and
+  // this call will hang.
+  void WaitForWorkersIdleForTesting(size_t n);
+
+  // Waits until all workers are idle.
+  void WaitForAllWorkersIdleForTesting();
+
+  // Waits until |n| workers have cleaned up. Tests that use this must:
+  //  - Invoke WaitForWorkersCleanedUpForTesting(n) well before any workers
+  //    have had time to clean up.
+  //  - Have a long enough |suggested_reclaim_time_| to strengthen the above.
+  //  - Only invoke this once (currently doesn't support waiting for multiple
+  //    cleanup phases in the same test).
+  void WaitForWorkersCleanedUpForTesting(size_t n);
+
+  // Returns the number of workers in this worker pool.
+  size_t NumberOfWorkersForTesting() const;
+
+  // Returns |worker_capacity_|.
+  size_t GetWorkerCapacityForTesting() const;
+
+  // Returns the number of workers that are idle (i.e. not running tasks).
+  size_t NumberOfIdleWorkersForTesting() const;
+
+  // Sets the MayBlock waiting threshold to TimeDelta::Max().
+  void MaximizeMayBlockThresholdForTesting();
+
+ private:
+  class SchedulerWorkerDelegateImpl;
+
+  // Friend tests so that they can access |kBlockedWorkersPollPeriod| and
+  // BlockedThreshold().
+  friend class TaskSchedulerWorkerPoolBlockingTest;
+  friend class TaskSchedulerWorkerPoolMayBlockTest;
+
+  // The period between calls to AdjustWorkerCapacity() when the pool is at
+  // capacity. This value was set unscientifically based on intuition and may be
+  // adjusted in the future.
+  static constexpr TimeDelta kBlockedWorkersPollPeriod =
+      TimeDelta::FromMilliseconds(50);
+
+  // SchedulerWorkerPool:
+  void OnCanScheduleSequence(scoped_refptr<Sequence> sequence) override;
+
+  // Waits until at least |n| workers are idle. |lock_| must be held to call
+  // this function.
+  void WaitForWorkersIdleLockRequiredForTesting(size_t n);
+
+  // Wakes up the last worker from this worker pool to go idle, if any.
+  void WakeUpOneWorker();
+
+  // Performs the same action as WakeUpOneWorker() except asserts |lock_| is
+  // acquired rather than acquires it and returns true if worker wakeups are
+  // permitted.
+  bool WakeUpOneWorkerLockRequired();
+
+  // Adds a worker, if needed, to maintain one idle worker, |worker_capacity_|
+  // permitting.
+  void MaintainAtLeastOneIdleWorkerLockRequired();
+
+  // Adds |worker| to |idle_workers_stack_|.
+  void AddToIdleWorkersStackLockRequired(SchedulerWorker* worker);
+
+  // Peeks from |idle_workers_stack_|.
+  const SchedulerWorker* PeekAtIdleWorkersStackLockRequired() const;
+
+  // Removes |worker| from |idle_workers_stack_|.
+  void RemoveFromIdleWorkersStackLockRequired(SchedulerWorker* worker);
+
+  // Returns true if worker cleanup is permitted.
+  bool CanWorkerCleanupForTestingLockRequired();
+
+  // Tries to add a new SchedulerWorker to the pool. Returns the new
+  // SchedulerWorker on success, nullptr otherwise. Cannot be called before
+  // Start(). Must be called under the protection of |lock_|.
+  SchedulerWorker* CreateRegisterAndStartSchedulerWorkerLockRequired();
+
+  // Returns the number of workers in the pool that should not run tasks due to
+  // the pool being over worker capacity.
+  size_t NumberOfExcessWorkersLockRequired() const;
+
+  // Examines the list of SchedulerWorkers and increments |worker_capacity_| for
+  // each worker that has been within the scope of a MAY_BLOCK
+  // ScopedBlockingCall for more than BlockedThreshold().
+  void AdjustWorkerCapacity();
+
+  // Returns the threshold after which the worker capacity is increased to
+  // compensate for a worker that is within a MAY_BLOCK ScopedBlockingCall.
+  TimeDelta MayBlockThreshold() const;
+
+  // Starts calling AdjustWorkerCapacity() periodically on
+  // |service_thread_task_runner_| if not already requested.
+  void PostAdjustWorkerCapacityTaskIfNeeded();
+
+  // Calls AdjustWorkerCapacity() and schedules it again as necessary. May only
+  // be called from the service thread.
+  void AdjustWorkerCapacityTaskFunction();
+
+  // Returns true if AdjustWorkerCapacity() should periodically be called on
+  // |service_thread_task_runner_|.
+  bool ShouldPeriodicallyAdjustWorkerCapacityLockRequired();
+
+  void DecrementWorkerCapacityLockRequired();
+  void IncrementWorkerCapacityLockRequired();
+
+  const std::string pool_label_;
+  const ThreadPriority priority_hint_;
+
+  // PriorityQueue from which all threads of this worker pool get work.
+  PriorityQueue shared_priority_queue_;
+
+  // Suggested reclaim time for workers. Initialized by Start(). Never modified
+  // afterwards (i.e. can be read without synchronization after Start()).
+  TimeDelta suggested_reclaim_time_;
+
+  SchedulerBackwardCompatibility backward_compatibility_;
+
+  // Synchronizes accesses to |workers_|, |worker_capacity_|,
+  // |num_pending_may_block_workers_|, |idle_workers_stack_|,
+  // |idle_workers_stack_cv_for_testing_|, |num_wake_ups_before_start_|,
+  // |cleanup_timestamps_|, |polling_worker_capacity_|,
+  // |worker_cleanup_disallowed_for_testing_|,
+  // |num_workers_cleaned_up_for_testing_|,
+  // |SchedulerWorkerDelegateImpl::is_on_idle_workers_stack_|,
+  // |SchedulerWorkerDelegateImpl::incremented_worker_capacity_since_blocked_|
+  // and |SchedulerWorkerDelegateImpl::may_block_start_time_|. Has
+  // |shared_priority_queue_|'s lock as its predecessor so that a worker can be
+  // pushed to |idle_workers_stack_| within the scope of a Transaction (more
+  // details in GetWork()).
+  mutable SchedulerLock lock_;
+
+  // All workers owned by this worker pool.
+  std::vector<scoped_refptr<SchedulerWorker>> workers_;
+
+  // Workers can be added as needed up until there are |worker_capacity_|
+  // workers.
+  size_t worker_capacity_ = 0;
+
+  // Initial value of |worker_capacity_| as set in Start().
+  size_t initial_worker_capacity_ = 0;
+
+  // Number workers that are within the scope of a MAY_BLOCK ScopedBlockingCall
+  // but haven't caused a worker capacity increase yet.
+  int num_pending_may_block_workers_ = 0;
+
+  // Environment to be initialized per worker.
+  WorkerEnvironment worker_environment_ = WorkerEnvironment::NONE;
+
+  // Stack of idle workers. Initially, all workers are on this stack. A worker
+  // is removed from the stack before its WakeUp() function is called and when
+  // it receives work from GetWork() (a worker calls GetWork() when its sleep
+  // timeout expires, even if its WakeUp() method hasn't been called). A worker
+  // is pushed on this stack when it receives nullptr from GetWork().
+  SchedulerWorkerStack idle_workers_stack_;
+
+  // Signaled when a worker is added to the idle workers stack.
+  std::unique_ptr<ConditionVariable> idle_workers_stack_cv_for_testing_;
+
+  // Number of wake ups that occurred before Start(). Never modified after
+  // Start() (i.e. can be read without synchronization after Start()).
+  int num_wake_ups_before_start_ = 0;
+
+  // Stack that contains the timestamps of when workers get cleaned up.
+  // Timestamps get popped off the stack as new workers are added.
+  base::stack<TimeTicks, std::vector<TimeTicks>> cleanup_timestamps_;
+
+  // Whether we are currently polling for necessary adjustments to
+  // |worker_capacity_|.
+  bool polling_worker_capacity_ = false;
+
+  // Indicates to the delegates that workers are not permitted to cleanup.
+  bool worker_cleanup_disallowed_for_testing_ = false;
+
+  // Counts the number of workers cleaned up since Start(). Tests with a custom
+  // |suggested_reclaim_time_| can wait on a specific number of workers being
+  // cleaned up via WaitForWorkersCleanedUpForTesting().
+  size_t num_workers_cleaned_up_for_testing_ = 0;
+
+  // Signaled, if non-null, when |num_workers_cleaned_up_for_testing_| is
+  // incremented.
+  std::unique_ptr<ConditionVariable> num_workers_cleaned_up_for_testing_cv_;
+
+  // Used for testing and makes MayBlockThreshold() return the maximum
+  // TimeDelta.
+  AtomicFlag maximum_blocked_threshold_for_testing_;
+
+#if DCHECK_IS_ON()
+  // Set at the start of JoinForTesting().
+  AtomicFlag join_for_testing_started_;
+#endif
+
+  // TaskScheduler.DetachDuration.[worker pool name] histogram. Intentionally
+  // leaked.
+  HistogramBase* const detach_duration_histogram_;
+
+  // TaskScheduler.NumTasksBeforeDetach.[worker pool name] histogram.
+  // Intentionally leaked.
+  HistogramBase* const num_tasks_before_detach_histogram_;
+
+  // TaskScheduler.NumTasksBetweenWaits.[worker pool name] histogram.
+  // Intentionally leaked.
+  HistogramBase* const num_tasks_between_waits_histogram_;
+
+  scoped_refptr<TaskRunner> service_thread_task_runner_;
+
+  // Optional observer notified when a worker enters and exits its main
+  // function. Set in Start() and never modified afterwards.
+  SchedulerWorkerObserver* scheduler_worker_observer_ = nullptr;
+
+  // Ensures recently cleaned up workers (ref.
+  // SchedulerWorkerDelegateImpl::CleanupLockRequired()) had time to exit as
+  // they have a raw reference to |this| (and to TaskTracker) which can
+  // otherwise result in racy use-after-frees per no longer being part of
+  // |workers_| and hence not being explicitly joined in JoinForTesting() :
+  // https://crbug.com/810464.
+  TrackedRefFactory<SchedulerWorkerPoolImpl> tracked_ref_factory_;
+
+  DISALLOW_COPY_AND_ASSIGN(SchedulerWorkerPoolImpl);
+};
+
+}  // namespace internal
+}  // namespace base
+
+#endif  // BASE_TASK_SCHEDULER_SCHEDULER_WORKER_POOL_IMPL_H_
diff --git a/base/task_scheduler/scheduler_worker_pool_impl_unittest.cc b/base/task_scheduler/scheduler_worker_pool_impl_unittest.cc
new file mode 100644
index 0000000..5f099b3
--- /dev/null
+++ b/base/task_scheduler/scheduler_worker_pool_impl_unittest.cc
@@ -0,0 +1,1566 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/task_scheduler/scheduler_worker_pool_impl.h"
+
+#include <stddef.h>
+
+#include <memory>
+#include <unordered_set>
+#include <vector>
+
+#include "base/atomicops.h"
+#include "base/barrier_closure.h"
+#include "base/bind.h"
+#include "base/bind_helpers.h"
+#include "base/callback.h"
+#include "base/macros.h"
+#include "base/memory/ptr_util.h"
+#include "base/memory/ref_counted.h"
+#include "base/metrics/histogram.h"
+#include "base/metrics/histogram_samples.h"
+#include "base/metrics/statistics_recorder.h"
+#include "base/synchronization/condition_variable.h"
+#include "base/synchronization/lock.h"
+#include "base/synchronization/waitable_event.h"
+#include "base/task_runner.h"
+#include "base/task_scheduler/delayed_task_manager.h"
+#include "base/task_scheduler/scheduler_worker_pool_params.h"
+#include "base/task_scheduler/sequence.h"
+#include "base/task_scheduler/sequence_sort_key.h"
+#include "base/task_scheduler/task_tracker.h"
+#include "base/task_scheduler/test_task_factory.h"
+#include "base/task_scheduler/test_utils.h"
+#include "base/test/gtest_util.h"
+#include "base/test/test_simple_task_runner.h"
+#include "base/test/test_timeouts.h"
+#include "base/threading/platform_thread.h"
+#include "base/threading/scoped_blocking_call.h"
+#include "base/threading/simple_thread.h"
+#include "base/threading/thread.h"
+#include "base/threading/thread_checker_impl.h"
+#include "base/threading/thread_local_storage.h"
+#include "base/threading/thread_restrictions.h"
+#include "base/time/time.h"
+#include "build/build_config.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+#if defined(OS_WIN)
+#include "base/win/com_init_util.h"
+#endif  // defined(OS_WIN)
+
+namespace base {
+namespace internal {
+namespace {
+
+constexpr size_t kNumWorkersInWorkerPool = 4;
+constexpr size_t kNumThreadsPostingTasks = 4;
+constexpr size_t kNumTasksPostedPerThread = 150;
+// This can't be lower because Windows' WaitableEvent wakes up too early when a
+// small timeout is used. This results in many spurious wake ups before a worker
+// is allowed to cleanup.
+constexpr TimeDelta kReclaimTimeForCleanupTests =
+    TimeDelta::FromMilliseconds(500);
+
+// Waits on |event| in a scope where the blocking observer is null, to avoid
+// affecting the worker capacity.
+void WaitWithoutBlockingObserver(WaitableEvent* event) {
+  internal::ScopedClearBlockingObserverForTesting clear_blocking_observer;
+  event->Wait();
+}
+
+class TaskSchedulerWorkerPoolImplTestBase {
+ protected:
+  TaskSchedulerWorkerPoolImplTestBase()
+      : service_thread_("TaskSchedulerServiceThread"){};
+
+  void CommonSetUp() {
+    CreateAndStartWorkerPool(TimeDelta::Max(), kNumWorkersInWorkerPool);
+  }
+
+  void CommonTearDown() {
+    service_thread_.Stop();
+    task_tracker_.FlushForTesting();
+    worker_pool_->WaitForAllWorkersIdleForTesting();
+    worker_pool_->JoinForTesting();
+  }
+
+  void CreateWorkerPool() {
+    ASSERT_FALSE(worker_pool_);
+    service_thread_.Start();
+    delayed_task_manager_.Start(service_thread_.task_runner());
+    worker_pool_ = std::make_unique<SchedulerWorkerPoolImpl>(
+        "TestWorkerPool", "A", ThreadPriority::NORMAL,
+        task_tracker_.GetTrackedRef(), &delayed_task_manager_);
+    ASSERT_TRUE(worker_pool_);
+  }
+
+  virtual void StartWorkerPool(TimeDelta suggested_reclaim_time,
+                               size_t num_workers) {
+    ASSERT_TRUE(worker_pool_);
+    worker_pool_->Start(
+        SchedulerWorkerPoolParams(num_workers, suggested_reclaim_time),
+        service_thread_.task_runner(), nullptr,
+        SchedulerWorkerPoolImpl::WorkerEnvironment::NONE);
+  }
+
+  void CreateAndStartWorkerPool(TimeDelta suggested_reclaim_time,
+                                size_t num_workers) {
+    CreateWorkerPool();
+    StartWorkerPool(suggested_reclaim_time, num_workers);
+  }
+
+  Thread service_thread_;
+  TaskTracker task_tracker_ = {"Test"};
+
+  std::unique_ptr<SchedulerWorkerPoolImpl> worker_pool_;
+
+ private:
+  DelayedTaskManager delayed_task_manager_;
+
+  DISALLOW_COPY_AND_ASSIGN(TaskSchedulerWorkerPoolImplTestBase);
+};
+
+class TaskSchedulerWorkerPoolImplTest
+    : public TaskSchedulerWorkerPoolImplTestBase,
+      public testing::Test {
+ protected:
+  TaskSchedulerWorkerPoolImplTest() = default;
+
+  void SetUp() override { TaskSchedulerWorkerPoolImplTestBase::CommonSetUp(); }
+
+  void TearDown() override {
+    TaskSchedulerWorkerPoolImplTestBase::CommonTearDown();
+  }
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(TaskSchedulerWorkerPoolImplTest);
+};
+
+class TaskSchedulerWorkerPoolImplTestParam
+    : public TaskSchedulerWorkerPoolImplTestBase,
+      public testing::TestWithParam<test::ExecutionMode> {
+ protected:
+  TaskSchedulerWorkerPoolImplTestParam() = default;
+
+  void SetUp() override { TaskSchedulerWorkerPoolImplTestBase::CommonSetUp(); }
+
+  void TearDown() override {
+    TaskSchedulerWorkerPoolImplTestBase::CommonTearDown();
+  }
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(TaskSchedulerWorkerPoolImplTestParam);
+};
+
+using PostNestedTask = test::TestTaskFactory::PostNestedTask;
+
+class ThreadPostingTasksWaitIdle : public SimpleThread {
+ public:
+  // Constructs a thread that posts tasks to |worker_pool| through an
+  // |execution_mode| task runner. The thread waits until all workers in
+  // |worker_pool| are idle before posting a new task.
+  ThreadPostingTasksWaitIdle(SchedulerWorkerPoolImpl* worker_pool,
+                             test::ExecutionMode execution_mode)
+      : SimpleThread("ThreadPostingTasksWaitIdle"),
+        worker_pool_(worker_pool),
+        factory_(CreateTaskRunnerWithExecutionMode(worker_pool, execution_mode),
+                 execution_mode) {
+    DCHECK(worker_pool_);
+  }
+
+  const test::TestTaskFactory* factory() const { return &factory_; }
+
+ private:
+  void Run() override {
+    EXPECT_FALSE(factory_.task_runner()->RunsTasksInCurrentSequence());
+
+    for (size_t i = 0; i < kNumTasksPostedPerThread; ++i) {
+      worker_pool_->WaitForAllWorkersIdleForTesting();
+      EXPECT_TRUE(factory_.PostTask(PostNestedTask::NO, Closure()));
+    }
+  }
+
+  SchedulerWorkerPoolImpl* const worker_pool_;
+  const scoped_refptr<TaskRunner> task_runner_;
+  test::TestTaskFactory factory_;
+
+  DISALLOW_COPY_AND_ASSIGN(ThreadPostingTasksWaitIdle);
+};
+
+}  // namespace
+
+TEST_P(TaskSchedulerWorkerPoolImplTestParam, PostTasksWaitAllWorkersIdle) {
+  // Create threads to post tasks. To verify that workers can sleep and be woken
+  // up when new tasks are posted, wait for all workers to become idle before
+  // posting a new task.
+  std::vector<std::unique_ptr<ThreadPostingTasksWaitIdle>>
+      threads_posting_tasks;
+  for (size_t i = 0; i < kNumThreadsPostingTasks; ++i) {
+    threads_posting_tasks.push_back(
+        std::make_unique<ThreadPostingTasksWaitIdle>(worker_pool_.get(),
+                                                     GetParam()));
+    threads_posting_tasks.back()->Start();
+  }
+
+  // Wait for all tasks to run.
+  for (const auto& thread_posting_tasks : threads_posting_tasks) {
+    thread_posting_tasks->Join();
+    thread_posting_tasks->factory()->WaitForAllTasksToRun();
+  }
+
+  // Wait until all workers are idle to be sure that no task accesses its
+  // TestTaskFactory after |thread_posting_tasks| is destroyed.
+  worker_pool_->WaitForAllWorkersIdleForTesting();
+}
+
+TEST_P(TaskSchedulerWorkerPoolImplTestParam, PostTasksWithOneAvailableWorker) {
+  // Post blocking tasks to keep all workers busy except one until |event| is
+  // signaled. Use different factories so that tasks are added to different
+  // sequences and can run simultaneously when the execution mode is SEQUENCED.
+  WaitableEvent event(WaitableEvent::ResetPolicy::MANUAL,
+                      WaitableEvent::InitialState::NOT_SIGNALED);
+  std::vector<std::unique_ptr<test::TestTaskFactory>> blocked_task_factories;
+  for (size_t i = 0; i < (kNumWorkersInWorkerPool - 1); ++i) {
+    blocked_task_factories.push_back(std::make_unique<test::TestTaskFactory>(
+        CreateTaskRunnerWithExecutionMode(worker_pool_.get(), GetParam()),
+        GetParam()));
+    EXPECT_TRUE(blocked_task_factories.back()->PostTask(
+        PostNestedTask::NO,
+        BindOnce(&WaitWithoutBlockingObserver, Unretained(&event))));
+    blocked_task_factories.back()->WaitForAllTasksToRun();
+  }
+
+  // Post |kNumTasksPostedPerThread| tasks that should all run despite the fact
+  // that only one worker in |worker_pool_| isn't busy.
+  test::TestTaskFactory short_task_factory(
+      CreateTaskRunnerWithExecutionMode(worker_pool_.get(), GetParam()),
+      GetParam());
+  for (size_t i = 0; i < kNumTasksPostedPerThread; ++i)
+    EXPECT_TRUE(short_task_factory.PostTask(PostNestedTask::NO, Closure()));
+  short_task_factory.WaitForAllTasksToRun();
+
+  // Release tasks waiting on |event|.
+  event.Signal();
+
+  // Wait until all workers are idle to be sure that no task accesses
+  // its TestTaskFactory after it is destroyed.
+  worker_pool_->WaitForAllWorkersIdleForTesting();
+}
+
+TEST_P(TaskSchedulerWorkerPoolImplTestParam, Saturate) {
+  // Verify that it is possible to have |kNumWorkersInWorkerPool|
+  // tasks/sequences running simultaneously. Use different factories so that the
+  // blocking tasks are added to different sequences and can run simultaneously
+  // when the execution mode is SEQUENCED.
+  WaitableEvent event(WaitableEvent::ResetPolicy::MANUAL,
+                      WaitableEvent::InitialState::NOT_SIGNALED);
+  std::vector<std::unique_ptr<test::TestTaskFactory>> factories;
+  for (size_t i = 0; i < kNumWorkersInWorkerPool; ++i) {
+    factories.push_back(std::make_unique<test::TestTaskFactory>(
+        CreateTaskRunnerWithExecutionMode(worker_pool_.get(), GetParam()),
+        GetParam()));
+    EXPECT_TRUE(factories.back()->PostTask(
+        PostNestedTask::NO,
+        BindOnce(&WaitWithoutBlockingObserver, Unretained(&event))));
+    factories.back()->WaitForAllTasksToRun();
+  }
+
+  // Release tasks waiting on |event|.
+  event.Signal();
+
+  // Wait until all workers are idle to be sure that no task accesses
+  // its TestTaskFactory after it is destroyed.
+  worker_pool_->WaitForAllWorkersIdleForTesting();
+}
+
+#if defined(OS_WIN)
+TEST_P(TaskSchedulerWorkerPoolImplTestParam, NoEnvironment) {
+  // Verify that COM is not initialized in a SchedulerWorkerPoolImpl initialized
+  // with SchedulerWorkerPoolImpl::WorkerEnvironment::NONE.
+  scoped_refptr<TaskRunner> task_runner =
+      CreateTaskRunnerWithExecutionMode(worker_pool_.get(), GetParam());
+
+  WaitableEvent task_running(WaitableEvent::ResetPolicy::MANUAL,
+                             WaitableEvent::InitialState::NOT_SIGNALED);
+  task_runner->PostTask(
+      FROM_HERE, BindOnce(
+                     [](WaitableEvent* task_running) {
+                       win::AssertComApartmentType(win::ComApartmentType::NONE);
+                       task_running->Signal();
+                     },
+                     &task_running));
+
+  task_running.Wait();
+
+  worker_pool_->WaitForAllWorkersIdleForTesting();
+}
+#endif  // defined(OS_WIN)
+
+INSTANTIATE_TEST_CASE_P(Parallel,
+                        TaskSchedulerWorkerPoolImplTestParam,
+                        ::testing::Values(test::ExecutionMode::PARALLEL));
+INSTANTIATE_TEST_CASE_P(Sequenced,
+                        TaskSchedulerWorkerPoolImplTestParam,
+                        ::testing::Values(test::ExecutionMode::SEQUENCED));
+
+#if defined(OS_WIN)
+
+namespace {
+
+class TaskSchedulerWorkerPoolImplTestCOMMTAParam
+    : public TaskSchedulerWorkerPoolImplTestBase,
+      public testing::TestWithParam<test::ExecutionMode> {
+ protected:
+  TaskSchedulerWorkerPoolImplTestCOMMTAParam() = default;
+
+  void SetUp() override { TaskSchedulerWorkerPoolImplTestBase::CommonSetUp(); }
+
+  void TearDown() override {
+    TaskSchedulerWorkerPoolImplTestBase::CommonTearDown();
+  }
+
+ private:
+  void StartWorkerPool(TimeDelta suggested_reclaim_time,
+                       size_t num_workers) override {
+    ASSERT_TRUE(worker_pool_);
+    worker_pool_->Start(
+        SchedulerWorkerPoolParams(num_workers, suggested_reclaim_time),
+        service_thread_.task_runner(), nullptr,
+        SchedulerWorkerPoolImpl::WorkerEnvironment::COM_MTA);
+  }
+
+  DISALLOW_COPY_AND_ASSIGN(TaskSchedulerWorkerPoolImplTestCOMMTAParam);
+};
+
+}  // namespace
+
+TEST_P(TaskSchedulerWorkerPoolImplTestCOMMTAParam, COMMTAInitialized) {
+  // Verify that SchedulerWorkerPoolImpl workers have a COM MTA available.
+  scoped_refptr<TaskRunner> task_runner =
+      CreateTaskRunnerWithExecutionMode(worker_pool_.get(), GetParam());
+
+  WaitableEvent task_running(WaitableEvent::ResetPolicy::MANUAL,
+                             WaitableEvent::InitialState::NOT_SIGNALED);
+  task_runner->PostTask(
+      FROM_HERE, BindOnce(
+                     [](WaitableEvent* task_running) {
+                       win::AssertComApartmentType(win::ComApartmentType::MTA);
+                       task_running->Signal();
+                     },
+                     &task_running));
+
+  task_running.Wait();
+
+  worker_pool_->WaitForAllWorkersIdleForTesting();
+}
+
+INSTANTIATE_TEST_CASE_P(Parallel,
+                        TaskSchedulerWorkerPoolImplTestCOMMTAParam,
+                        ::testing::Values(test::ExecutionMode::PARALLEL));
+INSTANTIATE_TEST_CASE_P(Sequenced,
+                        TaskSchedulerWorkerPoolImplTestCOMMTAParam,
+                        ::testing::Values(test::ExecutionMode::SEQUENCED));
+
+#endif  // defined(OS_WIN)
+
+namespace {
+
+class TaskSchedulerWorkerPoolImplPostTaskBeforeStartTest
+    : public TaskSchedulerWorkerPoolImplTest {
+ public:
+  void SetUp() override {
+    CreateWorkerPool();
+    // Let the test start the worker pool.
+  }
+};
+
+void TaskPostedBeforeStart(PlatformThreadRef* platform_thread_ref,
+                           WaitableEvent* task_running,
+                           WaitableEvent* barrier) {
+  *platform_thread_ref = PlatformThread::CurrentRef();
+  task_running->Signal();
+  WaitWithoutBlockingObserver(barrier);
+}
+
+}  // namespace
+
+// Verify that 2 tasks posted before Start() to a SchedulerWorkerPoolImpl with
+// more than 2 workers run on different workers when Start() is called.
+TEST_F(TaskSchedulerWorkerPoolImplPostTaskBeforeStartTest,
+       PostTasksBeforeStart) {
+  PlatformThreadRef task_1_thread_ref;
+  PlatformThreadRef task_2_thread_ref;
+  WaitableEvent task_1_running(WaitableEvent::ResetPolicy::MANUAL,
+                               WaitableEvent::InitialState::NOT_SIGNALED);
+  WaitableEvent task_2_running(WaitableEvent::ResetPolicy::MANUAL,
+                               WaitableEvent::InitialState::NOT_SIGNALED);
+
+  // This event is used to prevent a task from completing before the other task
+  // starts running. If that happened, both tasks could run on the same worker
+  // and this test couldn't verify that the correct number of workers were woken
+  // up.
+  WaitableEvent barrier(WaitableEvent::ResetPolicy::MANUAL,
+                        WaitableEvent::InitialState::NOT_SIGNALED);
+
+  worker_pool_->CreateTaskRunnerWithTraits({WithBaseSyncPrimitives()})
+      ->PostTask(
+          FROM_HERE,
+          BindOnce(&TaskPostedBeforeStart, Unretained(&task_1_thread_ref),
+                   Unretained(&task_1_running), Unretained(&barrier)));
+  worker_pool_->CreateTaskRunnerWithTraits({WithBaseSyncPrimitives()})
+      ->PostTask(
+          FROM_HERE,
+          BindOnce(&TaskPostedBeforeStart, Unretained(&task_2_thread_ref),
+                   Unretained(&task_2_running), Unretained(&barrier)));
+
+  // Workers should not be created and tasks should not run before the pool is
+  // started.
+  EXPECT_EQ(0U, worker_pool_->NumberOfWorkersForTesting());
+  EXPECT_FALSE(task_1_running.IsSignaled());
+  EXPECT_FALSE(task_2_running.IsSignaled());
+
+  StartWorkerPool(TimeDelta::Max(), kNumWorkersInWorkerPool);
+
+  // Tasks should run shortly after the pool is started.
+  task_1_running.Wait();
+  task_2_running.Wait();
+
+  // Tasks should run on different threads.
+  EXPECT_NE(task_1_thread_ref, task_2_thread_ref);
+
+  barrier.Signal();
+  task_tracker_.FlushForTesting();
+}
+
+// Verify that posting many tasks before Start will cause the number of workers
+// to grow to |worker_capacity_| during Start.
+TEST_F(TaskSchedulerWorkerPoolImplPostTaskBeforeStartTest, PostManyTasks) {
+  scoped_refptr<TaskRunner> task_runner =
+      worker_pool_->CreateTaskRunnerWithTraits({WithBaseSyncPrimitives()});
+  constexpr size_t kNumTasksPosted = 2 * kNumWorkersInWorkerPool;
+  for (size_t i = 0; i < kNumTasksPosted; ++i)
+    task_runner->PostTask(FROM_HERE, DoNothing());
+
+  EXPECT_EQ(0U, worker_pool_->NumberOfWorkersForTesting());
+
+  StartWorkerPool(TimeDelta::Max(), kNumWorkersInWorkerPool);
+  ASSERT_GT(kNumTasksPosted, worker_pool_->GetWorkerCapacityForTesting());
+  EXPECT_EQ(kNumWorkersInWorkerPool,
+            worker_pool_->GetWorkerCapacityForTesting());
+
+  EXPECT_EQ(worker_pool_->NumberOfWorkersForTesting(),
+            worker_pool_->GetWorkerCapacityForTesting());
+}
+
+namespace {
+
+constexpr size_t kMagicTlsValue = 42;
+
+class TaskSchedulerWorkerPoolCheckTlsReuse
+    : public TaskSchedulerWorkerPoolImplTest {
+ public:
+  void SetTlsValueAndWait() {
+    slot_.Set(reinterpret_cast<void*>(kMagicTlsValue));
+    WaitWithoutBlockingObserver(&waiter_);
+  }
+
+  void CountZeroTlsValuesAndWait(WaitableEvent* count_waiter) {
+    if (!slot_.Get())
+      subtle::NoBarrier_AtomicIncrement(&zero_tls_values_, 1);
+
+    count_waiter->Signal();
+    WaitWithoutBlockingObserver(&waiter_);
+  }
+
+ protected:
+  TaskSchedulerWorkerPoolCheckTlsReuse() :
+      waiter_(WaitableEvent::ResetPolicy::MANUAL,
+              WaitableEvent::InitialState::NOT_SIGNALED) {}
+
+  void SetUp() override {
+    CreateAndStartWorkerPool(kReclaimTimeForCleanupTests,
+                             kNumWorkersInWorkerPool);
+  }
+
+  subtle::Atomic32 zero_tls_values_ = 0;
+
+  WaitableEvent waiter_;
+
+ private:
+  ThreadLocalStorage::Slot slot_;
+
+  DISALLOW_COPY_AND_ASSIGN(TaskSchedulerWorkerPoolCheckTlsReuse);
+};
+
+}  // namespace
+
+// Checks that at least one worker has been cleaned up by checking the TLS.
+TEST_F(TaskSchedulerWorkerPoolCheckTlsReuse, CheckCleanupWorkers) {
+  // Saturate the workers and mark each worker's thread with a magic TLS value.
+  std::vector<std::unique_ptr<test::TestTaskFactory>> factories;
+  for (size_t i = 0; i < kNumWorkersInWorkerPool; ++i) {
+    factories.push_back(std::make_unique<test::TestTaskFactory>(
+        worker_pool_->CreateTaskRunnerWithTraits({WithBaseSyncPrimitives()}),
+        test::ExecutionMode::PARALLEL));
+    ASSERT_TRUE(factories.back()->PostTask(
+        PostNestedTask::NO,
+        Bind(&TaskSchedulerWorkerPoolCheckTlsReuse::SetTlsValueAndWait,
+             Unretained(this))));
+    factories.back()->WaitForAllTasksToRun();
+  }
+
+  // Release tasks waiting on |waiter_|.
+  waiter_.Signal();
+  worker_pool_->WaitForAllWorkersIdleForTesting();
+
+  // All workers should be done running by now, so reset for the next phase.
+  waiter_.Reset();
+
+  // Wait for the worker pool to clean up at least one worker.
+  worker_pool_->WaitForWorkersCleanedUpForTesting(1U);
+
+  // Saturate and count the worker threads that do not have the magic TLS value.
+  // If the value is not there, that means we're at a new worker.
+  std::vector<std::unique_ptr<WaitableEvent>> count_waiters;
+  for (auto& factory : factories) {
+    count_waiters.push_back(WrapUnique(new WaitableEvent(
+        WaitableEvent::ResetPolicy::MANUAL,
+        WaitableEvent::InitialState::NOT_SIGNALED)));
+    ASSERT_TRUE(factory->PostTask(
+          PostNestedTask::NO,
+          Bind(&TaskSchedulerWorkerPoolCheckTlsReuse::CountZeroTlsValuesAndWait,
+               Unretained(this),
+               count_waiters.back().get())));
+    factory->WaitForAllTasksToRun();
+  }
+
+  // Wait for all counters to complete.
+  for (auto& count_waiter : count_waiters)
+    count_waiter->Wait();
+
+  EXPECT_GT(subtle::NoBarrier_Load(&zero_tls_values_), 0);
+
+  // Release tasks waiting on |waiter_|.
+  waiter_.Signal();
+}
+
+namespace {
+
+class TaskSchedulerWorkerPoolHistogramTest
+    : public TaskSchedulerWorkerPoolImplTest {
+ public:
+  TaskSchedulerWorkerPoolHistogramTest() = default;
+
+ protected:
+  // Override SetUp() to allow every test case to initialize a worker pool with
+  // its own arguments.
+  void SetUp() override {}
+
+  // Floods |worker_pool_| with a single task each that blocks until
+  // |continue_event| is signaled. Every worker in the pool is blocked on
+  // |continue_event| when this method returns. Note: this helper can easily be
+  // generalized to be useful in other tests, but it's here for now because it's
+  // only used in a TaskSchedulerWorkerPoolHistogramTest at the moment.
+  void FloodPool(WaitableEvent* continue_event) {
+    ASSERT_FALSE(continue_event->IsSignaled());
+
+    auto task_runner =
+        worker_pool_->CreateTaskRunnerWithTraits({WithBaseSyncPrimitives()});
+
+    const auto pool_capacity = worker_pool_->GetWorkerCapacityForTesting();
+
+    WaitableEvent workers_flooded(WaitableEvent::ResetPolicy::MANUAL,
+                                  WaitableEvent::InitialState::NOT_SIGNALED);
+    RepeatingClosure all_workers_running_barrier = BarrierClosure(
+        pool_capacity,
+        BindOnce(&WaitableEvent::Signal, Unretained(&workers_flooded)));
+    for (size_t i = 0; i < pool_capacity; ++i) {
+      task_runner->PostTask(
+          FROM_HERE,
+          BindOnce(
+              [](OnceClosure on_running, WaitableEvent* continue_event) {
+                std::move(on_running).Run();
+                WaitWithoutBlockingObserver(continue_event);
+              },
+              all_workers_running_barrier, continue_event));
+    }
+    workers_flooded.Wait();
+  }
+
+ private:
+  std::unique_ptr<StatisticsRecorder> statistics_recorder_ =
+      StatisticsRecorder::CreateTemporaryForTesting();
+
+  DISALLOW_COPY_AND_ASSIGN(TaskSchedulerWorkerPoolHistogramTest);
+};
+
+}  // namespace
+
+TEST_F(TaskSchedulerWorkerPoolHistogramTest, NumTasksBetweenWaits) {
+  WaitableEvent event(WaitableEvent::ResetPolicy::MANUAL,
+                      WaitableEvent::InitialState::NOT_SIGNALED);
+  CreateAndStartWorkerPool(TimeDelta::Max(), kNumWorkersInWorkerPool);
+  auto task_runner = worker_pool_->CreateSequencedTaskRunnerWithTraits(
+      {WithBaseSyncPrimitives()});
+
+  // Post a task.
+  task_runner->PostTask(
+      FROM_HERE, BindOnce(&WaitWithoutBlockingObserver, Unretained(&event)));
+
+  // Post 2 more tasks while the first task hasn't completed its execution. It
+  // is guaranteed that these tasks will run immediately after the first task,
+  // without allowing the worker to sleep.
+  task_runner->PostTask(FROM_HERE, DoNothing());
+  task_runner->PostTask(FROM_HERE, DoNothing());
+
+  // Allow tasks to run and wait until the SchedulerWorker is idle.
+  event.Signal();
+  worker_pool_->WaitForAllWorkersIdleForTesting();
+
+  // Wake up the SchedulerWorker that just became idle by posting a task and
+  // wait until it becomes idle again. The SchedulerWorker should record the
+  // TaskScheduler.NumTasksBetweenWaits.* histogram on wake up.
+  task_runner->PostTask(FROM_HERE, DoNothing());
+  worker_pool_->WaitForAllWorkersIdleForTesting();
+
+  // Verify that counts were recorded to the histogram as expected.
+  const auto* histogram = worker_pool_->num_tasks_between_waits_histogram();
+  EXPECT_EQ(0, histogram->SnapshotSamples()->GetCount(0));
+  EXPECT_EQ(1, histogram->SnapshotSamples()->GetCount(3));
+  EXPECT_EQ(0, histogram->SnapshotSamples()->GetCount(10));
+}
+
+// Verifies that NumTasksBetweenWaits histogram is logged as expected across
+// idle and cleanup periods.
+TEST_F(TaskSchedulerWorkerPoolHistogramTest,
+       NumTasksBetweenWaitsWithIdlePeriodAndCleanup) {
+  WaitableEvent tasks_can_exit_event(WaitableEvent::ResetPolicy::MANUAL,
+                                     WaitableEvent::InitialState::NOT_SIGNALED);
+  CreateAndStartWorkerPool(kReclaimTimeForCleanupTests,
+                           kNumWorkersInWorkerPool);
+
+  WaitableEvent workers_continue(WaitableEvent::ResetPolicy::MANUAL,
+                                 WaitableEvent::InitialState::NOT_SIGNALED);
+
+  FloodPool(&workers_continue);
+
+  const auto* histogram = worker_pool_->num_tasks_between_waits_histogram();
+
+  // NumTasksBetweenWaits shouldn't be logged until idle.
+  EXPECT_EQ(0, histogram->SnapshotSamples()->GetCount(0));
+  EXPECT_EQ(0, histogram->SnapshotSamples()->GetCount(1));
+  EXPECT_EQ(0, histogram->SnapshotSamples()->GetCount(10));
+
+  // Make all workers go idle.
+  workers_continue.Signal();
+  worker_pool_->WaitForAllWorkersIdleForTesting();
+
+  // All workers should have reported a single hit in the "1" bucket per the the
+  // histogram being reported when going idle and each worker having processed
+  // precisely 1 task per the controlled flooding logic above.
+  EXPECT_EQ(0, histogram->SnapshotSamples()->GetCount(0));
+  EXPECT_EQ(static_cast<int>(kNumWorkersInWorkerPool),
+            histogram->SnapshotSamples()->GetCount(1));
+  EXPECT_EQ(0, histogram->SnapshotSamples()->GetCount(10));
+
+  worker_pool_->WaitForWorkersCleanedUpForTesting(kNumWorkersInWorkerPool - 1);
+
+  EXPECT_EQ(0, histogram->SnapshotSamples()->GetCount(0));
+  EXPECT_EQ(static_cast<int>(kNumWorkersInWorkerPool),
+            histogram->SnapshotSamples()->GetCount(1));
+  EXPECT_EQ(0, histogram->SnapshotSamples()->GetCount(10));
+
+  // Flooding the pool once again (without letting any workers go idle)
+  // shouldn't affect the counts either.
+
+  workers_continue.Reset();
+  FloodPool(&workers_continue);
+
+  EXPECT_EQ(0, histogram->SnapshotSamples()->GetCount(0));
+  EXPECT_EQ(static_cast<int>(kNumWorkersInWorkerPool),
+            histogram->SnapshotSamples()->GetCount(1));
+  EXPECT_EQ(0, histogram->SnapshotSamples()->GetCount(10));
+
+  workers_continue.Signal();
+  worker_pool_->WaitForAllWorkersIdleForTesting();
+}
+
+TEST_F(TaskSchedulerWorkerPoolHistogramTest, NumTasksBeforeCleanup) {
+  CreateWorkerPool();
+  auto histogrammed_thread_task_runner =
+      worker_pool_->CreateSequencedTaskRunnerWithTraits(
+          {WithBaseSyncPrimitives()});
+
+  // Post 3 tasks and hold the thread for idle thread stack ordering.
+  // This test assumes |histogrammed_thread_task_runner| gets assigned the same
+  // thread for each of its tasks.
+  PlatformThreadRef thread_ref;
+  histogrammed_thread_task_runner->PostTask(
+      FROM_HERE, BindOnce(
+                     [](PlatformThreadRef* thread_ref) {
+                       ASSERT_TRUE(thread_ref);
+                       *thread_ref = PlatformThread::CurrentRef();
+                     },
+                     Unretained(&thread_ref)));
+  histogrammed_thread_task_runner->PostTask(
+      FROM_HERE, BindOnce(
+                     [](PlatformThreadRef* thread_ref) {
+                       ASSERT_FALSE(thread_ref->is_null());
+                       EXPECT_EQ(*thread_ref, PlatformThread::CurrentRef());
+                     },
+                     Unretained(&thread_ref)));
+
+  WaitableEvent cleanup_thread_running(
+      WaitableEvent::ResetPolicy::MANUAL,
+      WaitableEvent::InitialState::NOT_SIGNALED);
+  WaitableEvent cleanup_thread_continue(
+      WaitableEvent::ResetPolicy::MANUAL,
+      WaitableEvent::InitialState::NOT_SIGNALED);
+  histogrammed_thread_task_runner->PostTask(
+      FROM_HERE,
+      BindOnce(
+          [](PlatformThreadRef* thread_ref,
+             WaitableEvent* cleanup_thread_running,
+             WaitableEvent* cleanup_thread_continue) {
+            ASSERT_FALSE(thread_ref->is_null());
+            EXPECT_EQ(*thread_ref, PlatformThread::CurrentRef());
+            cleanup_thread_running->Signal();
+            WaitWithoutBlockingObserver(cleanup_thread_continue);
+          },
+          Unretained(&thread_ref), Unretained(&cleanup_thread_running),
+          Unretained(&cleanup_thread_continue)));
+
+  // Start the worker pool with 2 workers, to avoid depending on the scheduler's
+  // logic to always keep one extra idle worker.
+  //
+  // The pool is started after the 3 initial tasks have been posted to ensure
+  // that they are scheduled on the same worker. If the tasks could run as they
+  // are posted, there would be a chance that:
+  // 1. Worker #1:        Runs a tasks and empties the sequence, without adding
+  //                      itself to the idle stack yet.
+  // 2. Posting thread:   Posts another task to the now empty sequence. Wakes
+  //                      up a new worker, since worker #1 isn't on the idle
+  //                      stack yet.
+  // 3: Worker #2:        Runs the tasks, violating the expectation that the 3
+  //                      initial tasks run on the same worker.
+  constexpr size_t kTwoWorkers = 2;
+  StartWorkerPool(kReclaimTimeForCleanupTests, kTwoWorkers);
+
+  // Wait until the 3rd task is scheduled.
+  cleanup_thread_running.Wait();
+
+  // To allow the SchedulerWorker associated with
+  // |histogrammed_thread_task_runner| to cleanup, make sure it isn't on top of
+  // the idle stack by waking up another SchedulerWorker via
+  // |task_runner_for_top_idle|. |histogrammed_thread_task_runner| should
+  // release and go idle first and then |task_runner_for_top_idle| should
+  // release and go idle. This allows the SchedulerWorker associated with
+  // |histogrammed_thread_task_runner| to cleanup.
+  WaitableEvent top_idle_thread_running(
+      WaitableEvent::ResetPolicy::MANUAL,
+      WaitableEvent::InitialState::NOT_SIGNALED);
+  WaitableEvent top_idle_thread_continue(
+      WaitableEvent::ResetPolicy::MANUAL,
+      WaitableEvent::InitialState::NOT_SIGNALED);
+  auto task_runner_for_top_idle =
+      worker_pool_->CreateSequencedTaskRunnerWithTraits(
+          {WithBaseSyncPrimitives()});
+  task_runner_for_top_idle->PostTask(
+      FROM_HERE, BindOnce(
+                     [](PlatformThreadRef thread_ref,
+                        WaitableEvent* top_idle_thread_running,
+                        WaitableEvent* top_idle_thread_continue) {
+                       ASSERT_FALSE(thread_ref.is_null());
+                       EXPECT_NE(thread_ref, PlatformThread::CurrentRef())
+                           << "Worker reused. Worker will not cleanup and the "
+                              "histogram value will be wrong.";
+                       top_idle_thread_running->Signal();
+                       WaitWithoutBlockingObserver(top_idle_thread_continue);
+                     },
+                     thread_ref, Unretained(&top_idle_thread_running),
+                     Unretained(&top_idle_thread_continue)));
+  top_idle_thread_running.Wait();
+  EXPECT_EQ(0U, worker_pool_->NumberOfIdleWorkersForTesting());
+  cleanup_thread_continue.Signal();
+  // Wait for the cleanup thread to also become idle.
+  worker_pool_->WaitForWorkersIdleForTesting(1U);
+  top_idle_thread_continue.Signal();
+  // Allow the thread processing the |histogrammed_thread_task_runner| work to
+  // cleanup.
+  worker_pool_->WaitForWorkersCleanedUpForTesting(1U);
+
+  // Verify that counts were recorded to the histogram as expected.
+  const auto* histogram = worker_pool_->num_tasks_before_detach_histogram();
+  EXPECT_EQ(0, histogram->SnapshotSamples()->GetCount(0));
+  EXPECT_EQ(0, histogram->SnapshotSamples()->GetCount(1));
+  EXPECT_EQ(0, histogram->SnapshotSamples()->GetCount(2));
+  EXPECT_EQ(1, histogram->SnapshotSamples()->GetCount(3));
+  EXPECT_EQ(0, histogram->SnapshotSamples()->GetCount(4));
+  EXPECT_EQ(0, histogram->SnapshotSamples()->GetCount(5));
+  EXPECT_EQ(0, histogram->SnapshotSamples()->GetCount(6));
+  EXPECT_EQ(0, histogram->SnapshotSamples()->GetCount(10));
+}
+
+TEST(TaskSchedulerWorkerPoolStandbyPolicyTest, InitOne) {
+  TaskTracker task_tracker("Test");
+  DelayedTaskManager delayed_task_manager;
+  scoped_refptr<TaskRunner> service_thread_task_runner =
+      MakeRefCounted<TestSimpleTaskRunner>();
+  delayed_task_manager.Start(service_thread_task_runner);
+  auto worker_pool = std::make_unique<SchedulerWorkerPoolImpl>(
+      "OnePolicyWorkerPool", "A", ThreadPriority::NORMAL,
+      task_tracker.GetTrackedRef(), &delayed_task_manager);
+  worker_pool->Start(SchedulerWorkerPoolParams(8U, TimeDelta::Max()),
+                     service_thread_task_runner, nullptr,
+                     SchedulerWorkerPoolImpl::WorkerEnvironment::NONE);
+  ASSERT_TRUE(worker_pool);
+  EXPECT_EQ(1U, worker_pool->NumberOfWorkersForTesting());
+  worker_pool->JoinForTesting();
+}
+
+// Verify the SchedulerWorkerPoolImpl keeps at least one idle standby thread,
+// capacity permitting.
+TEST(TaskSchedulerWorkerPoolStandbyPolicyTest, VerifyStandbyThread) {
+  constexpr size_t kWorkerCapacity = 3;
+
+  TaskTracker task_tracker("Test");
+  DelayedTaskManager delayed_task_manager;
+  scoped_refptr<TaskRunner> service_thread_task_runner =
+      MakeRefCounted<TestSimpleTaskRunner>();
+  delayed_task_manager.Start(service_thread_task_runner);
+  auto worker_pool = std::make_unique<SchedulerWorkerPoolImpl>(
+      "StandbyThreadWorkerPool", "A", ThreadPriority::NORMAL,
+      task_tracker.GetTrackedRef(), &delayed_task_manager);
+  worker_pool->Start(
+      SchedulerWorkerPoolParams(kWorkerCapacity, kReclaimTimeForCleanupTests),
+      service_thread_task_runner, nullptr,
+      SchedulerWorkerPoolImpl::WorkerEnvironment::NONE);
+  ASSERT_TRUE(worker_pool);
+  EXPECT_EQ(1U, worker_pool->NumberOfWorkersForTesting());
+
+  auto task_runner =
+      worker_pool->CreateTaskRunnerWithTraits({WithBaseSyncPrimitives()});
+
+  WaitableEvent thread_running(WaitableEvent::ResetPolicy::AUTOMATIC,
+                               WaitableEvent::InitialState::NOT_SIGNALED);
+  WaitableEvent thread_continue(WaitableEvent::ResetPolicy::MANUAL,
+                                WaitableEvent::InitialState::NOT_SIGNALED);
+
+  RepeatingClosure closure = BindRepeating(
+      [](WaitableEvent* thread_running, WaitableEvent* thread_continue) {
+        thread_running->Signal();
+        WaitWithoutBlockingObserver(thread_continue);
+      },
+      Unretained(&thread_running), Unretained(&thread_continue));
+
+  // There should be one idle thread until we reach worker capacity
+  for (size_t i = 0; i < kWorkerCapacity; ++i) {
+    EXPECT_EQ(i + 1, worker_pool->NumberOfWorkersForTesting());
+    task_runner->PostTask(FROM_HERE, closure);
+    thread_running.Wait();
+  }
+
+  // There should not be an extra idle thread if it means going above capacity
+  EXPECT_EQ(kWorkerCapacity, worker_pool->NumberOfWorkersForTesting());
+
+  thread_continue.Signal();
+  // Wait long enough for all but one worker to clean up.
+  worker_pool->WaitForWorkersCleanedUpForTesting(kWorkerCapacity - 1);
+  EXPECT_EQ(1U, worker_pool->NumberOfWorkersForTesting());
+  // Give extra time for a worker to cleanup : none should as the pool is
+  // expected to keep a worker ready regardless of how long it was idle for.
+  PlatformThread::Sleep(kReclaimTimeForCleanupTests);
+  EXPECT_EQ(1U, worker_pool->NumberOfWorkersForTesting());
+
+  worker_pool->JoinForTesting();
+}
+
+namespace {
+
+enum class OptionalBlockingType {
+  NO_BLOCK,
+  MAY_BLOCK,
+  WILL_BLOCK,
+};
+
+struct NestedBlockingType {
+  NestedBlockingType(BlockingType first_in,
+                     OptionalBlockingType second_in,
+                     BlockingType behaves_as_in)
+      : first(first_in), second(second_in), behaves_as(behaves_as_in) {}
+
+  BlockingType first;
+  OptionalBlockingType second;
+  BlockingType behaves_as;
+};
+
+class NestedScopedBlockingCall {
+ public:
+  NestedScopedBlockingCall(const NestedBlockingType& nested_blocking_type)
+      : first_scoped_blocking_call_(nested_blocking_type.first),
+        second_scoped_blocking_call_(
+            nested_blocking_type.second == OptionalBlockingType::WILL_BLOCK
+                ? std::make_unique<ScopedBlockingCall>(BlockingType::WILL_BLOCK)
+                : (nested_blocking_type.second ==
+                           OptionalBlockingType::MAY_BLOCK
+                       ? std::make_unique<ScopedBlockingCall>(
+                             BlockingType::MAY_BLOCK)
+                       : nullptr)) {}
+
+ private:
+  ScopedBlockingCall first_scoped_blocking_call_;
+  std::unique_ptr<ScopedBlockingCall> second_scoped_blocking_call_;
+
+  DISALLOW_COPY_AND_ASSIGN(NestedScopedBlockingCall);
+};
+
+}  // namespace
+
+class TaskSchedulerWorkerPoolBlockingTest
+    : public TaskSchedulerWorkerPoolImplTestBase,
+      public testing::TestWithParam<NestedBlockingType> {
+ public:
+  TaskSchedulerWorkerPoolBlockingTest()
+      : blocking_thread_running_(WaitableEvent::ResetPolicy::AUTOMATIC,
+                                 WaitableEvent::InitialState::NOT_SIGNALED),
+        blocking_thread_continue_(WaitableEvent::ResetPolicy::MANUAL,
+                                  WaitableEvent::InitialState::NOT_SIGNALED) {}
+
+  static std::string ParamInfoToString(
+      ::testing::TestParamInfo<NestedBlockingType> param_info) {
+    std::string str = param_info.param.first == BlockingType::MAY_BLOCK
+                          ? "MAY_BLOCK"
+                          : "WILL_BLOCK";
+    if (param_info.param.second == OptionalBlockingType::MAY_BLOCK)
+      str += "_MAY_BLOCK";
+    else if (param_info.param.second == OptionalBlockingType::WILL_BLOCK)
+      str += "_WILL_BLOCK";
+    return str;
+  }
+
+  void SetUp() override {
+    TaskSchedulerWorkerPoolImplTestBase::CommonSetUp();
+    task_runner_ =
+        worker_pool_->CreateTaskRunnerWithTraits({WithBaseSyncPrimitives()});
+  }
+
+  void TearDown() override {
+    TaskSchedulerWorkerPoolImplTestBase::CommonTearDown();
+  }
+
+ protected:
+  // Saturates the worker pool with a task that first blocks, waits to be
+  // unblocked, then exits.
+  void SaturateWithBlockingTasks(
+      const NestedBlockingType& nested_blocking_type) {
+    RepeatingClosure blocking_thread_running_closure =
+        BarrierClosure(kNumWorkersInWorkerPool,
+                       BindOnce(&WaitableEvent::Signal,
+                                Unretained(&blocking_thread_running_)));
+
+    for (size_t i = 0; i < kNumWorkersInWorkerPool; ++i) {
+      task_runner_->PostTask(
+          FROM_HERE,
+          BindOnce(
+              [](Closure* blocking_thread_running_closure,
+                 WaitableEvent* blocking_thread_continue_,
+                 const NestedBlockingType& nested_blocking_type) {
+                NestedScopedBlockingCall nested_scoped_blocking_call(
+                    nested_blocking_type);
+                blocking_thread_running_closure->Run();
+                WaitWithoutBlockingObserver(blocking_thread_continue_);
+              },
+              Unretained(&blocking_thread_running_closure),
+              Unretained(&blocking_thread_continue_), nested_blocking_type));
+    }
+    blocking_thread_running_.Wait();
+  }
+
+  // Returns how long we can expect a change to |worker_capacity_| to occur
+  // after a task has become blocked.
+  TimeDelta GetWorkerCapacityChangeSleepTime() {
+    return std::max(SchedulerWorkerPoolImpl::kBlockedWorkersPollPeriod,
+                    worker_pool_->MayBlockThreshold()) +
+           TestTimeouts::tiny_timeout();
+  }
+
+  // Waits indefinitely, until |worker_pool_|'s worker capacity increases to
+  // |expected_worker_capacity|.
+  void ExpectWorkerCapacityIncreasesTo(size_t expected_worker_capacity) {
+    size_t capacity = worker_pool_->GetWorkerCapacityForTesting();
+    while (capacity != expected_worker_capacity) {
+      PlatformThread::Sleep(GetWorkerCapacityChangeSleepTime());
+      size_t new_capacity = worker_pool_->GetWorkerCapacityForTesting();
+      ASSERT_GE(new_capacity, capacity);
+      capacity = new_capacity;
+    }
+  }
+
+  // Unblocks tasks posted by SaturateWithBlockingTasks().
+  void UnblockTasks() { blocking_thread_continue_.Signal(); }
+
+  scoped_refptr<TaskRunner> task_runner_;
+
+ private:
+  WaitableEvent blocking_thread_running_;
+  WaitableEvent blocking_thread_continue_;
+
+  DISALLOW_COPY_AND_ASSIGN(TaskSchedulerWorkerPoolBlockingTest);
+};
+
+// Verify that BlockingScopeEntered() causes worker capacity to increase and
+// creates a worker if needed. Also verify that BlockingScopeExited() decreases
+// worker capacity after an increase.
+TEST_P(TaskSchedulerWorkerPoolBlockingTest, ThreadBlockedUnblocked) {
+  ASSERT_EQ(worker_pool_->GetWorkerCapacityForTesting(),
+            kNumWorkersInWorkerPool);
+
+  SaturateWithBlockingTasks(GetParam());
+  if (GetParam().behaves_as == BlockingType::MAY_BLOCK)
+    ExpectWorkerCapacityIncreasesTo(2 * kNumWorkersInWorkerPool);
+  // A range of possible number of workers is accepted because of
+  // crbug.com/757897.
+  EXPECT_GE(worker_pool_->NumberOfWorkersForTesting(),
+            kNumWorkersInWorkerPool + 1);
+  EXPECT_LE(worker_pool_->NumberOfWorkersForTesting(),
+            2 * kNumWorkersInWorkerPool);
+  EXPECT_EQ(worker_pool_->GetWorkerCapacityForTesting(),
+            2 * kNumWorkersInWorkerPool);
+
+  UnblockTasks();
+  task_tracker_.FlushForTesting();
+  EXPECT_EQ(worker_pool_->GetWorkerCapacityForTesting(),
+            kNumWorkersInWorkerPool);
+}
+
+// Verify that tasks posted in a saturated pool before a ScopedBlockingCall will
+// execute after ScopedBlockingCall is instantiated.
+TEST_P(TaskSchedulerWorkerPoolBlockingTest, PostBeforeBlocking) {
+  WaitableEvent thread_running(WaitableEvent::ResetPolicy::AUTOMATIC,
+                               WaitableEvent::InitialState::NOT_SIGNALED);
+  WaitableEvent thread_can_block(WaitableEvent::ResetPolicy::MANUAL,
+                                 WaitableEvent::InitialState::NOT_SIGNALED);
+  WaitableEvent thread_continue(WaitableEvent::ResetPolicy::MANUAL,
+                                WaitableEvent::InitialState::NOT_SIGNALED);
+
+  for (size_t i = 0; i < kNumWorkersInWorkerPool; ++i) {
+    task_runner_->PostTask(
+        FROM_HERE,
+        BindOnce(
+            [](const NestedBlockingType& nested_blocking_type,
+               WaitableEvent* thread_running, WaitableEvent* thread_can_block,
+               WaitableEvent* thread_continue) {
+              thread_running->Signal();
+              WaitWithoutBlockingObserver(thread_can_block);
+
+              NestedScopedBlockingCall nested_scoped_blocking_call(
+                  nested_blocking_type);
+              WaitWithoutBlockingObserver(thread_continue);
+            },
+            GetParam(), Unretained(&thread_running),
+            Unretained(&thread_can_block), Unretained(&thread_continue)));
+    thread_running.Wait();
+  }
+
+  // All workers should be occupied and the pool should be saturated. Workers
+  // have not entered ScopedBlockingCall yet.
+  EXPECT_EQ(worker_pool_->NumberOfWorkersForTesting(), kNumWorkersInWorkerPool);
+  EXPECT_EQ(worker_pool_->GetWorkerCapacityForTesting(),
+            kNumWorkersInWorkerPool);
+
+  WaitableEvent extra_thread_running(WaitableEvent::ResetPolicy::MANUAL,
+                                     WaitableEvent::InitialState::NOT_SIGNALED);
+  WaitableEvent extra_threads_continue(
+      WaitableEvent::ResetPolicy::MANUAL,
+      WaitableEvent::InitialState::NOT_SIGNALED);
+  RepeatingClosure extra_threads_running_barrier = BarrierClosure(
+      kNumWorkersInWorkerPool,
+      BindOnce(&WaitableEvent::Signal, Unretained(&extra_thread_running)));
+  for (size_t i = 0; i < kNumWorkersInWorkerPool; ++i) {
+    task_runner_->PostTask(FROM_HERE,
+                           BindOnce(
+                               [](Closure* extra_threads_running_barrier,
+                                  WaitableEvent* extra_threads_continue) {
+                                 extra_threads_running_barrier->Run();
+                                 WaitWithoutBlockingObserver(
+                                     extra_threads_continue);
+                               },
+                               Unretained(&extra_threads_running_barrier),
+                               Unretained(&extra_threads_continue)));
+  }
+
+  // Allow tasks to enter ScopedBlockingCall. Workers should be created for the
+  // tasks we just posted.
+  thread_can_block.Signal();
+  if (GetParam().behaves_as == BlockingType::MAY_BLOCK)
+    ExpectWorkerCapacityIncreasesTo(2 * kNumWorkersInWorkerPool);
+
+  // Should not block forever.
+  extra_thread_running.Wait();
+  EXPECT_EQ(worker_pool_->NumberOfWorkersForTesting(),
+            2 * kNumWorkersInWorkerPool);
+  extra_threads_continue.Signal();
+
+  thread_continue.Signal();
+  task_tracker_.FlushForTesting();
+}
+// Verify that workers become idle when the pool is over-capacity and that
+// those workers do no work.
+TEST_P(TaskSchedulerWorkerPoolBlockingTest, WorkersIdleWhenOverCapacity) {
+  ASSERT_EQ(worker_pool_->GetWorkerCapacityForTesting(),
+            kNumWorkersInWorkerPool);
+
+  SaturateWithBlockingTasks(GetParam());
+  if (GetParam().behaves_as == BlockingType::MAY_BLOCK)
+    ExpectWorkerCapacityIncreasesTo(2 * kNumWorkersInWorkerPool);
+  EXPECT_EQ(worker_pool_->GetWorkerCapacityForTesting(),
+            2 * kNumWorkersInWorkerPool);
+  // A range of possible number of workers is accepted because of
+  // crbug.com/757897.
+  EXPECT_GE(worker_pool_->NumberOfWorkersForTesting(),
+            kNumWorkersInWorkerPool + 1);
+  EXPECT_LE(worker_pool_->NumberOfWorkersForTesting(),
+            2 * kNumWorkersInWorkerPool);
+
+  WaitableEvent thread_running(WaitableEvent::ResetPolicy::AUTOMATIC,
+                               WaitableEvent::InitialState::NOT_SIGNALED);
+  WaitableEvent thread_continue(WaitableEvent::ResetPolicy::MANUAL,
+                                WaitableEvent::InitialState::NOT_SIGNALED);
+
+  RepeatingClosure thread_running_barrier = BarrierClosure(
+      kNumWorkersInWorkerPool,
+      BindOnce(&WaitableEvent::Signal, Unretained(&thread_running)));
+  // Posting these tasks should cause new workers to be created.
+  for (size_t i = 0; i < kNumWorkersInWorkerPool; ++i) {
+    auto callback = BindOnce(
+        [](Closure* thread_running_barrier, WaitableEvent* thread_continue) {
+          thread_running_barrier->Run();
+          WaitWithoutBlockingObserver(thread_continue);
+        },
+        Unretained(&thread_running_barrier), Unretained(&thread_continue));
+    task_runner_->PostTask(FROM_HERE, std::move(callback));
+  }
+  thread_running.Wait();
+
+  ASSERT_EQ(worker_pool_->NumberOfIdleWorkersForTesting(), 0U);
+  EXPECT_EQ(worker_pool_->NumberOfWorkersForTesting(),
+            2 * kNumWorkersInWorkerPool);
+
+  AtomicFlag is_exiting;
+  // These tasks should not get executed until after other tasks become
+  // unblocked.
+  for (size_t i = 0; i < kNumWorkersInWorkerPool; ++i) {
+    task_runner_->PostTask(FROM_HERE, BindOnce(
+                                          [](AtomicFlag* is_exiting) {
+                                            EXPECT_TRUE(is_exiting->IsSet());
+                                          },
+                                          Unretained(&is_exiting)));
+  }
+
+  // The original |kNumWorkersInWorkerPool| will finish their tasks after being
+  // unblocked. There will be work in the work queue, but the pool should now
+  // be over-capacity and workers will become idle.
+  UnblockTasks();
+  worker_pool_->WaitForWorkersIdleForTesting(kNumWorkersInWorkerPool);
+  EXPECT_EQ(worker_pool_->NumberOfIdleWorkersForTesting(),
+            kNumWorkersInWorkerPool);
+
+  // Posting more tasks should not cause workers idle from the pool being over
+  // capacity to begin doing work.
+  for (size_t i = 0; i < kNumWorkersInWorkerPool; ++i) {
+    task_runner_->PostTask(FROM_HERE, BindOnce(
+                                          [](AtomicFlag* is_exiting) {
+                                            EXPECT_TRUE(is_exiting->IsSet());
+                                          },
+                                          Unretained(&is_exiting)));
+  }
+
+  // Give time for those idle workers to possibly do work (which should not
+  // happen).
+  PlatformThread::Sleep(TestTimeouts::tiny_timeout());
+
+  is_exiting.Set();
+  // Unblocks the new workers.
+  thread_continue.Signal();
+  task_tracker_.FlushForTesting();
+}
+
+INSTANTIATE_TEST_CASE_P(
+    ,
+    TaskSchedulerWorkerPoolBlockingTest,
+    ::testing::Values(NestedBlockingType(BlockingType::MAY_BLOCK,
+                                         OptionalBlockingType::NO_BLOCK,
+                                         BlockingType::MAY_BLOCK),
+                      NestedBlockingType(BlockingType::WILL_BLOCK,
+                                         OptionalBlockingType::NO_BLOCK,
+                                         BlockingType::WILL_BLOCK),
+                      NestedBlockingType(BlockingType::MAY_BLOCK,
+                                         OptionalBlockingType::WILL_BLOCK,
+                                         BlockingType::WILL_BLOCK),
+                      NestedBlockingType(BlockingType::WILL_BLOCK,
+                                         OptionalBlockingType::MAY_BLOCK,
+                                         BlockingType::WILL_BLOCK)),
+    TaskSchedulerWorkerPoolBlockingTest::ParamInfoToString);
+
+// Verify that if a thread enters the scope of a MAY_BLOCK ScopedBlockingCall,
+// but exits the scope before the MayBlockThreshold() is reached, that the
+// worker capacity does not increase.
+TEST_F(TaskSchedulerWorkerPoolBlockingTest, ThreadBlockUnblockPremature) {
+  ASSERT_EQ(worker_pool_->GetWorkerCapacityForTesting(),
+            kNumWorkersInWorkerPool);
+
+  TimeDelta worker_capacity_change_sleep = GetWorkerCapacityChangeSleepTime();
+  worker_pool_->MaximizeMayBlockThresholdForTesting();
+
+  SaturateWithBlockingTasks(NestedBlockingType(BlockingType::MAY_BLOCK,
+                                               OptionalBlockingType::NO_BLOCK,
+                                               BlockingType::MAY_BLOCK));
+  PlatformThread::Sleep(worker_capacity_change_sleep);
+  EXPECT_EQ(worker_pool_->NumberOfWorkersForTesting(), kNumWorkersInWorkerPool);
+  EXPECT_EQ(worker_pool_->GetWorkerCapacityForTesting(),
+            kNumWorkersInWorkerPool);
+
+  UnblockTasks();
+  task_tracker_.FlushForTesting();
+  EXPECT_EQ(worker_pool_->GetWorkerCapacityForTesting(),
+            kNumWorkersInWorkerPool);
+}
+
+// Verify that if worker capacity is incremented because of a MAY_BLOCK
+// ScopedBlockingCall, it isn't incremented again when there is a nested
+// WILL_BLOCK ScopedBlockingCall.
+TEST_F(TaskSchedulerWorkerPoolBlockingTest,
+       MayBlockIncreaseCapacityNestedWillBlock) {
+  ASSERT_EQ(worker_pool_->GetWorkerCapacityForTesting(),
+            kNumWorkersInWorkerPool);
+  auto task_runner =
+      worker_pool_->CreateTaskRunnerWithTraits({WithBaseSyncPrimitives()});
+  WaitableEvent can_return(WaitableEvent::ResetPolicy::MANUAL,
+                           WaitableEvent::InitialState::NOT_SIGNALED);
+
+  // Saturate the pool so that a MAY_BLOCK ScopedBlockingCall would increment
+  // the worker capacity.
+  for (size_t i = 0; i < kNumWorkersInWorkerPool - 1; ++i) {
+    task_runner->PostTask(FROM_HERE, BindOnce(&WaitWithoutBlockingObserver,
+                                              Unretained(&can_return)));
+  }
+
+  WaitableEvent can_instantiate_will_block(
+      WaitableEvent::ResetPolicy::MANUAL,
+      WaitableEvent::InitialState::NOT_SIGNALED);
+  WaitableEvent did_instantiate_will_block(
+      WaitableEvent::ResetPolicy::MANUAL,
+      WaitableEvent::InitialState::NOT_SIGNALED);
+
+  // Post a task that instantiates a MAY_BLOCK ScopedBlockingCall.
+  task_runner->PostTask(
+      FROM_HERE,
+      BindOnce(
+          [](WaitableEvent* can_instantiate_will_block,
+             WaitableEvent* did_instantiate_will_block,
+             WaitableEvent* can_return) {
+            ScopedBlockingCall may_block(BlockingType::MAY_BLOCK);
+            WaitWithoutBlockingObserver(can_instantiate_will_block);
+            ScopedBlockingCall will_block(BlockingType::WILL_BLOCK);
+            did_instantiate_will_block->Signal();
+            WaitWithoutBlockingObserver(can_return);
+          },
+          Unretained(&can_instantiate_will_block),
+          Unretained(&did_instantiate_will_block), Unretained(&can_return)));
+
+  // After a short delay, worker capacity should be incremented.
+  ExpectWorkerCapacityIncreasesTo(kNumWorkersInWorkerPool + 1);
+
+  // Wait until the task instantiates a WILL_BLOCK ScopedBlockingCall.
+  can_instantiate_will_block.Signal();
+  did_instantiate_will_block.Wait();
+
+  // Worker capacity shouldn't be incremented again.
+  EXPECT_EQ(kNumWorkersInWorkerPool + 1,
+            worker_pool_->GetWorkerCapacityForTesting());
+
+  // Tear down.
+  can_return.Signal();
+  task_tracker_.FlushForTesting();
+  EXPECT_EQ(worker_pool_->GetWorkerCapacityForTesting(),
+            kNumWorkersInWorkerPool);
+}
+
+// Verify that workers that become idle due to the pool being over capacity will
+// eventually cleanup.
+TEST(TaskSchedulerWorkerPoolOverWorkerCapacityTest, VerifyCleanup) {
+  constexpr size_t kWorkerCapacity = 3;
+
+  TaskTracker task_tracker("Test");
+  DelayedTaskManager delayed_task_manager;
+  scoped_refptr<TaskRunner> service_thread_task_runner =
+      MakeRefCounted<TestSimpleTaskRunner>();
+  delayed_task_manager.Start(service_thread_task_runner);
+  SchedulerWorkerPoolImpl worker_pool(
+      "OverWorkerCapacityTestWorkerPool", "A", ThreadPriority::NORMAL,
+      task_tracker.GetTrackedRef(), &delayed_task_manager);
+  worker_pool.Start(
+      SchedulerWorkerPoolParams(kWorkerCapacity, kReclaimTimeForCleanupTests),
+      service_thread_task_runner, nullptr,
+      SchedulerWorkerPoolImpl::WorkerEnvironment::NONE);
+
+  scoped_refptr<TaskRunner> task_runner =
+      worker_pool.CreateTaskRunnerWithTraits({WithBaseSyncPrimitives()});
+
+  WaitableEvent threads_running(WaitableEvent::ResetPolicy::AUTOMATIC,
+                                WaitableEvent::InitialState::NOT_SIGNALED);
+  WaitableEvent threads_continue(WaitableEvent::ResetPolicy::MANUAL,
+                                 WaitableEvent::InitialState::NOT_SIGNALED);
+  RepeatingClosure threads_running_barrier = BarrierClosure(
+      kWorkerCapacity,
+      BindOnce(&WaitableEvent::Signal, Unretained(&threads_running)));
+
+  WaitableEvent blocked_call_continue(
+      WaitableEvent::ResetPolicy::MANUAL,
+      WaitableEvent::InitialState::NOT_SIGNALED);
+
+  RepeatingClosure closure = BindRepeating(
+      [](Closure* threads_running_barrier, WaitableEvent* threads_continue,
+         WaitableEvent* blocked_call_continue) {
+        threads_running_barrier->Run();
+        {
+          ScopedBlockingCall scoped_blocking_call(BlockingType::WILL_BLOCK);
+          WaitWithoutBlockingObserver(blocked_call_continue);
+        }
+        WaitWithoutBlockingObserver(threads_continue);
+      },
+      Unretained(&threads_running_barrier), Unretained(&threads_continue),
+      Unretained(&blocked_call_continue));
+
+  for (size_t i = 0; i < kWorkerCapacity; ++i)
+    task_runner->PostTask(FROM_HERE, closure);
+
+  threads_running.Wait();
+
+  WaitableEvent extra_threads_running(
+      WaitableEvent::ResetPolicy::AUTOMATIC,
+      WaitableEvent::InitialState::NOT_SIGNALED);
+  WaitableEvent extra_threads_continue(
+      WaitableEvent::ResetPolicy::MANUAL,
+      WaitableEvent::InitialState::NOT_SIGNALED);
+
+  RepeatingClosure extra_threads_running_barrier = BarrierClosure(
+      kWorkerCapacity,
+      BindOnce(&WaitableEvent::Signal, Unretained(&extra_threads_running)));
+  // These tasks should run on the new threads from increasing worker capacity.
+  for (size_t i = 0; i < kWorkerCapacity; ++i) {
+    task_runner->PostTask(FROM_HERE,
+                          BindOnce(
+                              [](Closure* extra_threads_running_barrier,
+                                 WaitableEvent* extra_threads_continue) {
+                                extra_threads_running_barrier->Run();
+                                WaitWithoutBlockingObserver(
+                                    extra_threads_continue);
+                              },
+                              Unretained(&extra_threads_running_barrier),
+                              Unretained(&extra_threads_continue)));
+  }
+  extra_threads_running.Wait();
+
+  ASSERT_EQ(kWorkerCapacity * 2, worker_pool.NumberOfWorkersForTesting());
+  EXPECT_EQ(kWorkerCapacity * 2, worker_pool.GetWorkerCapacityForTesting());
+  blocked_call_continue.Signal();
+  extra_threads_continue.Signal();
+
+  // Periodically post tasks to ensure that posting tasks does not prevent
+  // workers that are idle due to the pool being over capacity from cleaning up.
+  for (int i = 0; i < 16; ++i) {
+    task_runner->PostDelayedTask(FROM_HERE, DoNothing(),
+                                 kReclaimTimeForCleanupTests * i * 0.5);
+  }
+
+  // Note: one worker above capacity will not get cleaned up since it's on the
+  // top of the idle stack.
+  worker_pool.WaitForWorkersCleanedUpForTesting(kWorkerCapacity - 1);
+  EXPECT_EQ(kWorkerCapacity + 1, worker_pool.NumberOfWorkersForTesting());
+
+  threads_continue.Signal();
+
+  worker_pool.JoinForTesting();
+}
+
+// Verify that the maximum number of workers is 256 and that hitting the max
+// leaves the pool in a valid state with regards to worker capacity.
+TEST_F(TaskSchedulerWorkerPoolBlockingTest, MaximumWorkersTest) {
+  constexpr size_t kMaxNumberOfWorkers = 256;
+  constexpr size_t kNumExtraTasks = 10;
+
+  WaitableEvent early_blocking_thread_running(
+      WaitableEvent::ResetPolicy::MANUAL,
+      WaitableEvent::InitialState::NOT_SIGNALED);
+  RepeatingClosure early_threads_barrier_closure =
+      BarrierClosure(kMaxNumberOfWorkers,
+                     BindOnce(&WaitableEvent::Signal,
+                              Unretained(&early_blocking_thread_running)));
+
+  WaitableEvent early_threads_finished(
+      WaitableEvent::ResetPolicy::MANUAL,
+      WaitableEvent::InitialState::NOT_SIGNALED);
+  RepeatingClosure early_threads_finished_barrier = BarrierClosure(
+      kMaxNumberOfWorkers,
+      BindOnce(&WaitableEvent::Signal, Unretained(&early_threads_finished)));
+
+  WaitableEvent early_release_thread_continue(
+      WaitableEvent::ResetPolicy::MANUAL,
+      WaitableEvent::InitialState::NOT_SIGNALED);
+
+  // Post ScopedBlockingCall tasks to hit the worker cap.
+  for (size_t i = 0; i < kMaxNumberOfWorkers; ++i) {
+    task_runner_->PostTask(FROM_HERE,
+                           BindOnce(
+                               [](Closure* early_threads_barrier_closure,
+                                  WaitableEvent* early_release_thread_continue,
+                                  Closure* early_threads_finished) {
+                                 {
+                                   ScopedBlockingCall scoped_blocking_call(
+                                       BlockingType::WILL_BLOCK);
+                                   early_threads_barrier_closure->Run();
+                                   WaitWithoutBlockingObserver(
+                                       early_release_thread_continue);
+                                 }
+                                 early_threads_finished->Run();
+                               },
+                               Unretained(&early_threads_barrier_closure),
+                               Unretained(&early_release_thread_continue),
+                               Unretained(&early_threads_finished_barrier)));
+  }
+
+  early_blocking_thread_running.Wait();
+  EXPECT_EQ(worker_pool_->GetWorkerCapacityForTesting(),
+            kNumWorkersInWorkerPool + kMaxNumberOfWorkers);
+
+  WaitableEvent late_release_thread_contine(
+      WaitableEvent::ResetPolicy::MANUAL,
+      WaitableEvent::InitialState::NOT_SIGNALED);
+
+  WaitableEvent late_blocking_thread_running(
+      WaitableEvent::ResetPolicy::MANUAL,
+      WaitableEvent::InitialState::NOT_SIGNALED);
+  RepeatingClosure late_threads_barrier_closure = BarrierClosure(
+      kNumExtraTasks, BindOnce(&WaitableEvent::Signal,
+                               Unretained(&late_blocking_thread_running)));
+
+  // Posts additional tasks. Note: we should already have |kMaxNumberOfWorkers|
+  // tasks running. These tasks should not be able to get executed yet as
+  // the pool is already at its max worker cap.
+  for (size_t i = 0; i < kNumExtraTasks; ++i) {
+    task_runner_->PostTask(
+        FROM_HERE,
+        BindOnce(
+            [](Closure* late_threads_barrier_closure,
+               WaitableEvent* late_release_thread_contine) {
+              ScopedBlockingCall scoped_blocking_call(BlockingType::WILL_BLOCK);
+              late_threads_barrier_closure->Run();
+              WaitWithoutBlockingObserver(late_release_thread_contine);
+            },
+            Unretained(&late_threads_barrier_closure),
+            Unretained(&late_release_thread_contine)));
+  }
+
+  // Give time to see if we exceed the max number of workers.
+  PlatformThread::Sleep(TestTimeouts::tiny_timeout());
+  EXPECT_LE(worker_pool_->NumberOfWorkersForTesting(), kMaxNumberOfWorkers);
+
+  early_release_thread_continue.Signal();
+  early_threads_finished.Wait();
+  late_blocking_thread_running.Wait();
+
+  WaitableEvent final_tasks_running(WaitableEvent::ResetPolicy::MANUAL,
+                                    WaitableEvent::InitialState::NOT_SIGNALED);
+  WaitableEvent final_tasks_continue(WaitableEvent::ResetPolicy::MANUAL,
+                                     WaitableEvent::InitialState::NOT_SIGNALED);
+  RepeatingClosure final_tasks_running_barrier = BarrierClosure(
+      kNumWorkersInWorkerPool,
+      BindOnce(&WaitableEvent::Signal, Unretained(&final_tasks_running)));
+
+  // Verify that we are still able to saturate the pool.
+  for (size_t i = 0; i < kNumWorkersInWorkerPool; ++i) {
+    task_runner_->PostTask(
+        FROM_HERE,
+        BindOnce(
+            [](Closure* closure, WaitableEvent* final_tasks_continue) {
+              closure->Run();
+              WaitWithoutBlockingObserver(final_tasks_continue);
+            },
+            Unretained(&final_tasks_running_barrier),
+            Unretained(&final_tasks_continue)));
+  }
+  final_tasks_running.Wait();
+  EXPECT_EQ(worker_pool_->GetWorkerCapacityForTesting(),
+            kNumWorkersInWorkerPool + kNumExtraTasks);
+  late_release_thread_contine.Signal();
+  final_tasks_continue.Signal();
+  task_tracker_.FlushForTesting();
+}
+
+// Verify that worker detachement doesn't race with worker cleanup, regression
+// test for https://crbug.com/810464.
+TEST(TaskSchedulerWorkerPoolTest, RacyCleanup) {
+#if defined(OS_FUCHSIA)
+  // Fuchsia + QEMU doesn't deal well with *many* threads being
+  // created/destroyed at once: https://crbug.com/816575.
+  constexpr size_t kWorkerCapacity = 16;
+#else   // defined(OS_FUCHSIA)
+  constexpr size_t kWorkerCapacity = 256;
+#endif  // defined(OS_FUCHSIA)
+  constexpr TimeDelta kReclaimTimeForRacyCleanupTest =
+      TimeDelta::FromMilliseconds(10);
+
+  TaskTracker task_tracker("Test");
+  DelayedTaskManager delayed_task_manager;
+  scoped_refptr<TaskRunner> service_thread_task_runner =
+      MakeRefCounted<TestSimpleTaskRunner>();
+  delayed_task_manager.Start(service_thread_task_runner);
+  SchedulerWorkerPoolImpl worker_pool(
+      "RacyCleanupTestWorkerPool", "A", ThreadPriority::NORMAL,
+      task_tracker.GetTrackedRef(), &delayed_task_manager);
+  worker_pool.Start(SchedulerWorkerPoolParams(kWorkerCapacity,
+                                              kReclaimTimeForRacyCleanupTest),
+                    service_thread_task_runner, nullptr,
+                    SchedulerWorkerPoolImpl::WorkerEnvironment::NONE);
+
+  scoped_refptr<TaskRunner> task_runner =
+      worker_pool.CreateTaskRunnerWithTraits({WithBaseSyncPrimitives()});
+
+  WaitableEvent threads_running(WaitableEvent::ResetPolicy::AUTOMATIC,
+                                WaitableEvent::InitialState::NOT_SIGNALED);
+  WaitableEvent unblock_threads(WaitableEvent::ResetPolicy::MANUAL,
+                                WaitableEvent::InitialState::NOT_SIGNALED);
+  RepeatingClosure threads_running_barrier = BarrierClosure(
+      kWorkerCapacity,
+      BindOnce(&WaitableEvent::Signal, Unretained(&threads_running)));
+
+  for (size_t i = 0; i < kWorkerCapacity; ++i) {
+    task_runner->PostTask(
+        FROM_HERE,
+        BindOnce(
+            [](OnceClosure on_running, WaitableEvent* unblock_threads) {
+              std::move(on_running).Run();
+              WaitWithoutBlockingObserver(unblock_threads);
+            },
+            threads_running_barrier, Unretained(&unblock_threads)));
+  }
+
+  // Wait for all workers to be ready and release them all at once.
+  threads_running.Wait();
+  unblock_threads.Signal();
+
+  // Sleep to wakeup precisely when all workers are going to try to cleanup per
+  // being idle.
+  PlatformThread::Sleep(kReclaimTimeForRacyCleanupTest);
+
+  worker_pool.JoinForTesting();
+
+  // Unwinding this test will be racy if worker cleanup can race with
+  // SchedulerWorkerPoolImpl destruction : https://crbug.com/810464.
+}
+
+}  // namespace internal
+}  // namespace base
diff --git a/base/task_scheduler/scheduler_worker_pool_params.cc b/base/task_scheduler/scheduler_worker_pool_params.cc
new file mode 100644
index 0000000..db85569
--- /dev/null
+++ b/base/task_scheduler/scheduler_worker_pool_params.cc
@@ -0,0 +1,23 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/task_scheduler/scheduler_worker_pool_params.h"
+
+namespace base {
+
+SchedulerWorkerPoolParams::SchedulerWorkerPoolParams(
+    int max_threads,
+    TimeDelta suggested_reclaim_time,
+    SchedulerBackwardCompatibility backward_compatibility)
+    : max_threads_(max_threads),
+      suggested_reclaim_time_(suggested_reclaim_time),
+      backward_compatibility_(backward_compatibility) {}
+
+SchedulerWorkerPoolParams::SchedulerWorkerPoolParams(
+    const SchedulerWorkerPoolParams& other) = default;
+
+SchedulerWorkerPoolParams& SchedulerWorkerPoolParams::operator=(
+    const SchedulerWorkerPoolParams& other) = default;
+
+}  // namespace base
diff --git a/base/task_scheduler/scheduler_worker_pool_params.h b/base/task_scheduler/scheduler_worker_pool_params.h
new file mode 100644
index 0000000..928d3b4
--- /dev/null
+++ b/base/task_scheduler/scheduler_worker_pool_params.h
@@ -0,0 +1,43 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TASK_SCHEDULER_SCHEDULER_WORKER_POOL_PARAMS_H_
+#define BASE_TASK_SCHEDULER_SCHEDULER_WORKER_POOL_PARAMS_H_
+
+#include "base/task_scheduler/scheduler_worker_params.h"
+#include "base/time/time.h"
+
+namespace base {
+
+class BASE_EXPORT SchedulerWorkerPoolParams final {
+ public:
+  // Constructs a set of params used to initialize a pool. The pool will contain
+  // up to |max_threads|. |suggested_reclaim_time| sets a suggestion on when to
+  // reclaim idle threads. The pool is free to ignore this value for performance
+  // or correctness reasons. |backward_compatibility| indicates whether backward
+  // compatibility is enabled.
+  SchedulerWorkerPoolParams(
+      int max_threads,
+      TimeDelta suggested_reclaim_time,
+      SchedulerBackwardCompatibility backward_compatibility =
+          SchedulerBackwardCompatibility::DISABLED);
+
+  SchedulerWorkerPoolParams(const SchedulerWorkerPoolParams& other);
+  SchedulerWorkerPoolParams& operator=(const SchedulerWorkerPoolParams& other);
+
+  int max_threads() const { return max_threads_; }
+  TimeDelta suggested_reclaim_time() const { return suggested_reclaim_time_; }
+  SchedulerBackwardCompatibility backward_compatibility() const {
+    return backward_compatibility_;
+  }
+
+ private:
+  int max_threads_;
+  TimeDelta suggested_reclaim_time_;
+  SchedulerBackwardCompatibility backward_compatibility_;
+};
+
+}  // namespace base
+
+#endif  // BASE_TASK_SCHEDULER_SCHEDULER_WORKER_POOL_PARAMS_H_
diff --git a/base/task_scheduler/scheduler_worker_pool_unittest.cc b/base/task_scheduler/scheduler_worker_pool_unittest.cc
new file mode 100644
index 0000000..717409b
--- /dev/null
+++ b/base/task_scheduler/scheduler_worker_pool_unittest.cc
@@ -0,0 +1,345 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/task_scheduler/scheduler_worker_pool.h"
+
+#include <memory>
+
+#include "base/bind.h"
+#include "base/bind_helpers.h"
+#include "base/location.h"
+#include "base/memory/ref_counted.h"
+#include "base/task_runner.h"
+#include "base/task_scheduler/delayed_task_manager.h"
+#include "base/task_scheduler/scheduler_worker_pool_impl.h"
+#include "base/task_scheduler/scheduler_worker_pool_params.h"
+#include "base/task_scheduler/task_tracker.h"
+#include "base/task_scheduler/task_traits.h"
+#include "base/task_scheduler/test_task_factory.h"
+#include "base/task_scheduler/test_utils.h"
+#include "base/test/test_timeouts.h"
+#include "base/threading/platform_thread.h"
+#include "base/threading/simple_thread.h"
+#include "base/threading/thread.h"
+#include "build/build_config.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+#if defined(OS_WIN)
+#include "base/task_scheduler/platform_native_worker_pool_win.h"
+#endif
+
+namespace base {
+namespace internal {
+
+namespace {
+
+constexpr size_t kNumWorkersInWorkerPool = 4;
+constexpr size_t kNumThreadsPostingTasks = 4;
+constexpr size_t kNumTasksPostedPerThread = 150;
+
+enum class PoolType {
+  GENERIC,
+#if defined(OS_WIN)
+  WINDOWS,
+#endif
+};
+
+struct PoolExecutionType {
+  PoolType pool_type;
+  test::ExecutionMode execution_mode;
+};
+
+using PostNestedTask = test::TestTaskFactory::PostNestedTask;
+
+class ThreadPostingTasks : public SimpleThread {
+ public:
+  // Constructs a thread that posts |num_tasks_posted_per_thread| tasks to
+  // |worker_pool| through an |execution_mode| task runner. If
+  // |post_nested_task| is YES, each task posted by this thread posts another
+  // task when it runs.
+  ThreadPostingTasks(SchedulerWorkerPool* worker_pool,
+                     test::ExecutionMode execution_mode,
+                     PostNestedTask post_nested_task)
+      : SimpleThread("ThreadPostingTasks"),
+        worker_pool_(worker_pool),
+        post_nested_task_(post_nested_task),
+        factory_(test::CreateTaskRunnerWithExecutionMode(worker_pool,
+                                                         execution_mode),
+                 execution_mode) {
+    DCHECK(worker_pool_);
+  }
+
+  const test::TestTaskFactory* factory() const { return &factory_; }
+
+ private:
+  void Run() override {
+    EXPECT_FALSE(factory_.task_runner()->RunsTasksInCurrentSequence());
+
+    for (size_t i = 0; i < kNumTasksPostedPerThread; ++i)
+      EXPECT_TRUE(factory_.PostTask(post_nested_task_, Closure()));
+  }
+
+  SchedulerWorkerPool* const worker_pool_;
+  const scoped_refptr<TaskRunner> task_runner_;
+  const PostNestedTask post_nested_task_;
+  test::TestTaskFactory factory_;
+
+  DISALLOW_COPY_AND_ASSIGN(ThreadPostingTasks);
+};
+
+class TaskSchedulerWorkerPoolTest
+    : public testing::TestWithParam<PoolExecutionType> {
+ protected:
+  TaskSchedulerWorkerPoolTest()
+      : service_thread_("TaskSchedulerServiceThread") {}
+
+  void SetUp() override {
+    service_thread_.Start();
+    delayed_task_manager_.Start(service_thread_.task_runner());
+    CreateWorkerPool();
+  }
+
+  void TearDown() override {
+    service_thread_.Stop();
+    if (worker_pool_)
+      worker_pool_->JoinForTesting();
+  }
+
+  void CreateWorkerPool() {
+    ASSERT_FALSE(worker_pool_);
+    switch (GetParam().pool_type) {
+      case PoolType::GENERIC:
+        worker_pool_ = std::make_unique<SchedulerWorkerPoolImpl>(
+            "TestWorkerPool", "A", ThreadPriority::NORMAL,
+            task_tracker_.GetTrackedRef(), &delayed_task_manager_);
+        break;
+#if defined(OS_WIN)
+      case PoolType::WINDOWS:
+        worker_pool_ = std::make_unique<PlatformNativeWorkerPoolWin>(
+            task_tracker_.GetTrackedRef(), &delayed_task_manager_);
+        break;
+#endif
+    }
+    ASSERT_TRUE(worker_pool_);
+  }
+
+  void StartWorkerPool() {
+    ASSERT_TRUE(worker_pool_);
+    switch (GetParam().pool_type) {
+      case PoolType::GENERIC: {
+        SchedulerWorkerPoolImpl* scheduler_worker_pool_impl =
+            static_cast<SchedulerWorkerPoolImpl*>(worker_pool_.get());
+        scheduler_worker_pool_impl->Start(
+            SchedulerWorkerPoolParams(kNumWorkersInWorkerPool,
+                                      TimeDelta::Max()),
+            service_thread_.task_runner(), nullptr,
+            SchedulerWorkerPoolImpl::WorkerEnvironment::NONE);
+        break;
+      }
+#if defined(OS_WIN)
+      case PoolType::WINDOWS: {
+        PlatformNativeWorkerPoolWin* scheduler_worker_pool_windows_impl =
+            static_cast<PlatformNativeWorkerPoolWin*>(worker_pool_.get());
+        scheduler_worker_pool_windows_impl->Start();
+        break;
+      }
+#endif
+    }
+  }
+
+  Thread service_thread_;
+  TaskTracker task_tracker_ = {"Test"};
+  DelayedTaskManager delayed_task_manager_;
+
+  std::unique_ptr<SchedulerWorkerPool> worker_pool_;
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(TaskSchedulerWorkerPoolTest);
+};
+
+void ShouldNotRun() {
+  ADD_FAILURE() << "Ran a task that shouldn't run.";
+}
+
+}  // namespace
+
+TEST_P(TaskSchedulerWorkerPoolTest, PostTasks) {
+  StartWorkerPool();
+  // Create threads to post tasks.
+  std::vector<std::unique_ptr<ThreadPostingTasks>> threads_posting_tasks;
+  for (size_t i = 0; i < kNumThreadsPostingTasks; ++i) {
+    threads_posting_tasks.push_back(std::make_unique<ThreadPostingTasks>(
+        worker_pool_.get(), GetParam().execution_mode, PostNestedTask::NO));
+    threads_posting_tasks.back()->Start();
+  }
+
+  // Wait for all tasks to run.
+  for (const auto& thread_posting_tasks : threads_posting_tasks) {
+    thread_posting_tasks->Join();
+    thread_posting_tasks->factory()->WaitForAllTasksToRun();
+  }
+
+  // Flush the task tracker to be sure that no task accesses its TestTaskFactory
+  // after |thread_posting_tasks| is destroyed.
+  task_tracker_.FlushForTesting();
+}
+
+TEST_P(TaskSchedulerWorkerPoolTest, NestedPostTasks) {
+  StartWorkerPool();
+  // Create threads to post tasks. Each task posted by these threads will post
+  // another task when it runs.
+  std::vector<std::unique_ptr<ThreadPostingTasks>> threads_posting_tasks;
+  for (size_t i = 0; i < kNumThreadsPostingTasks; ++i) {
+    threads_posting_tasks.push_back(std::make_unique<ThreadPostingTasks>(
+        worker_pool_.get(), GetParam().execution_mode, PostNestedTask::YES));
+    threads_posting_tasks.back()->Start();
+  }
+
+  // Wait for all tasks to run.
+  for (const auto& thread_posting_tasks : threads_posting_tasks) {
+    thread_posting_tasks->Join();
+    thread_posting_tasks->factory()->WaitForAllTasksToRun();
+  }
+
+  // Flush the task tracker to be sure that no task accesses its TestTaskFactory
+  // after |thread_posting_tasks| is destroyed.
+  task_tracker_.FlushForTesting();
+}
+
+// Verify that a Task can't be posted after shutdown.
+TEST_P(TaskSchedulerWorkerPoolTest, PostTaskAfterShutdown) {
+  StartWorkerPool();
+  auto task_runner = test::CreateTaskRunnerWithExecutionMode(
+      worker_pool_.get(), GetParam().execution_mode);
+  task_tracker_.Shutdown();
+  EXPECT_FALSE(task_runner->PostTask(FROM_HERE, BindOnce(&ShouldNotRun)));
+}
+
+// Verify that posting tasks after the pool was destroyed fails but doesn't
+// crash.
+TEST_P(TaskSchedulerWorkerPoolTest, PostAfterDestroy) {
+  StartWorkerPool();
+  auto task_runner = test::CreateTaskRunnerWithExecutionMode(
+      worker_pool_.get(), GetParam().execution_mode);
+  EXPECT_TRUE(task_runner->PostTask(FROM_HERE, DoNothing()));
+  task_tracker_.Shutdown();
+  worker_pool_->JoinForTesting();
+  worker_pool_.reset();
+  EXPECT_FALSE(task_runner->PostTask(FROM_HERE, BindOnce(&ShouldNotRun)));
+}
+
+// Verify that a Task runs shortly after its delay expires.
+TEST_P(TaskSchedulerWorkerPoolTest, PostDelayedTask) {
+  StartWorkerPool();
+
+  WaitableEvent task_ran(WaitableEvent::ResetPolicy::AUTOMATIC,
+                         WaitableEvent::InitialState::NOT_SIGNALED);
+
+  auto task_runner = test::CreateTaskRunnerWithExecutionMode(
+      worker_pool_.get(), GetParam().execution_mode);
+
+  // Wait until the task runner is up and running to make sure the test below is
+  // solely timing the delayed task, not bringing up a physical thread.
+  task_runner->PostTask(
+      FROM_HERE, BindOnce(&WaitableEvent::Signal, Unretained(&task_ran)));
+  task_ran.Wait();
+  ASSERT_TRUE(!task_ran.IsSignaled());
+
+  // Post a task with a short delay.
+  TimeTicks start_time = TimeTicks::Now();
+  EXPECT_TRUE(task_runner->PostDelayedTask(
+      FROM_HERE, BindOnce(&WaitableEvent::Signal, Unretained(&task_ran)),
+      TestTimeouts::tiny_timeout()));
+
+  // Wait until the task runs.
+  task_ran.Wait();
+
+  // Expect the task to run after its delay expires, but no more than 250
+  // ms after that.
+  const TimeDelta actual_delay = TimeTicks::Now() - start_time;
+  EXPECT_GE(actual_delay, TestTimeouts::tiny_timeout());
+  EXPECT_LT(actual_delay,
+            TimeDelta::FromMilliseconds(250) + TestTimeouts::tiny_timeout());
+}
+
+// Verify that the RunsTasksInCurrentSequence() method of a SEQUENCED TaskRunner
+// returns false when called from a task that isn't part of the sequence. Note:
+// Tests that use TestTaskFactory already verify that
+// RunsTasksInCurrentSequence() returns true when appropriate so this method
+// complements it to get full coverage of that method.
+TEST_P(TaskSchedulerWorkerPoolTest, SequencedRunsTasksInCurrentSequence) {
+  StartWorkerPool();
+  auto task_runner = test::CreateTaskRunnerWithExecutionMode(
+      worker_pool_.get(), GetParam().execution_mode);
+  auto sequenced_task_runner =
+      worker_pool_->CreateSequencedTaskRunnerWithTraits(TaskTraits());
+
+  WaitableEvent task_ran(WaitableEvent::ResetPolicy::MANUAL,
+                         WaitableEvent::InitialState::NOT_SIGNALED);
+  task_runner->PostTask(
+      FROM_HERE,
+      BindOnce(
+          [](scoped_refptr<TaskRunner> sequenced_task_runner,
+             WaitableEvent* task_ran) {
+            EXPECT_FALSE(sequenced_task_runner->RunsTasksInCurrentSequence());
+            task_ran->Signal();
+          },
+          sequenced_task_runner, Unretained(&task_ran)));
+  task_ran.Wait();
+}
+
+// Verify that tasks posted before Start run after Start.
+TEST_P(TaskSchedulerWorkerPoolTest, PostBeforeStart) {
+  WaitableEvent task_1_running(WaitableEvent::ResetPolicy::MANUAL,
+                               WaitableEvent::InitialState::NOT_SIGNALED);
+  WaitableEvent task_2_running(WaitableEvent::ResetPolicy::MANUAL,
+                               WaitableEvent::InitialState::NOT_SIGNALED);
+
+  scoped_refptr<TaskRunner> task_runner =
+      worker_pool_->CreateTaskRunnerWithTraits({WithBaseSyncPrimitives()});
+
+  task_runner->PostTask(
+      FROM_HERE, BindOnce(&WaitableEvent::Signal, Unretained(&task_1_running)));
+  task_runner->PostTask(
+      FROM_HERE, BindOnce(&WaitableEvent::Signal, Unretained(&task_2_running)));
+
+  // Workers should not be created and tasks should not run before the pool is
+  // started. The sleep is to give time for the tasks to potentially run.
+  PlatformThread::Sleep(TestTimeouts::tiny_timeout());
+  EXPECT_FALSE(task_1_running.IsSignaled());
+  EXPECT_FALSE(task_2_running.IsSignaled());
+
+  StartWorkerPool();
+
+  // Tasks should run shortly after the pool is started.
+  task_1_running.Wait();
+  task_2_running.Wait();
+
+  task_tracker_.FlushForTesting();
+}
+
+INSTANTIATE_TEST_CASE_P(GenericParallel,
+                        TaskSchedulerWorkerPoolTest,
+                        ::testing::Values(PoolExecutionType{
+                            PoolType::GENERIC, test::ExecutionMode::PARALLEL}));
+INSTANTIATE_TEST_CASE_P(GenericSequenced,
+                        TaskSchedulerWorkerPoolTest,
+                        ::testing::Values(PoolExecutionType{
+                            PoolType::GENERIC,
+                            test::ExecutionMode::SEQUENCED}));
+
+#if defined(OS_WIN)
+INSTANTIATE_TEST_CASE_P(WinParallel,
+                        TaskSchedulerWorkerPoolTest,
+                        ::testing::Values(PoolExecutionType{
+                            PoolType::WINDOWS, test::ExecutionMode::PARALLEL}));
+INSTANTIATE_TEST_CASE_P(WinSequenced,
+                        TaskSchedulerWorkerPoolTest,
+                        ::testing::Values(PoolExecutionType{
+                            PoolType::WINDOWS,
+                            test::ExecutionMode::SEQUENCED}));
+#endif
+
+}  // namespace internal
+}  // namespace base
diff --git a/base/task_scheduler/scheduler_worker_stack.cc b/base/task_scheduler/scheduler_worker_stack.cc
new file mode 100644
index 0000000..e5a0ab1
--- /dev/null
+++ b/base/task_scheduler/scheduler_worker_stack.cc
@@ -0,0 +1,49 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/task_scheduler/scheduler_worker_stack.h"
+
+#include <algorithm>
+
+#include "base/logging.h"
+#include "base/stl_util.h"
+
+namespace base {
+namespace internal {
+
+SchedulerWorkerStack::SchedulerWorkerStack() = default;
+
+SchedulerWorkerStack::~SchedulerWorkerStack() = default;
+
+void SchedulerWorkerStack::Push(SchedulerWorker* worker) {
+  DCHECK(!Contains(worker)) << "SchedulerWorker already on stack";
+  stack_.push_back(worker);
+}
+
+SchedulerWorker* SchedulerWorkerStack::Pop() {
+  if (IsEmpty())
+    return nullptr;
+  SchedulerWorker* const worker = stack_.back();
+  stack_.pop_back();
+  return worker;
+}
+
+SchedulerWorker* SchedulerWorkerStack::Peek() const {
+  if (IsEmpty())
+    return nullptr;
+  return stack_.back();
+}
+
+bool SchedulerWorkerStack::Contains(const SchedulerWorker* worker) const {
+  return ContainsValue(stack_, worker);
+}
+
+void SchedulerWorkerStack::Remove(const SchedulerWorker* worker) {
+  auto it = std::find(stack_.begin(), stack_.end(), worker);
+  if (it != stack_.end())
+    stack_.erase(it);
+}
+
+}  // namespace internal
+}  // namespace base
diff --git a/base/task_scheduler/scheduler_worker_stack.h b/base/task_scheduler/scheduler_worker_stack.h
new file mode 100644
index 0000000..b96fc5a
--- /dev/null
+++ b/base/task_scheduler/scheduler_worker_stack.h
@@ -0,0 +1,62 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TASK_SCHEDULER_SCHEDULER_WORKER_STACK_H_
+#define BASE_TASK_SCHEDULER_SCHEDULER_WORKER_STACK_H_
+
+#include <stddef.h>
+
+#include <vector>
+
+#include "base/base_export.h"
+#include "base/macros.h"
+
+namespace base {
+namespace internal {
+
+class SchedulerWorker;
+
+// A stack of SchedulerWorkers. Supports removal of arbitrary SchedulerWorkers.
+// DCHECKs when a SchedulerWorker is inserted multiple times. SchedulerWorkers
+// are not owned by the stack. Push() is amortized O(1). Pop(), Peek(), Size()
+// and Empty() are O(1). Contains() and Remove() are O(n).
+// This class is NOT thread-safe.
+class BASE_EXPORT SchedulerWorkerStack {
+ public:
+  SchedulerWorkerStack();
+  ~SchedulerWorkerStack();
+
+  // Inserts |worker| at the top of the stack. |worker| must not already be on
+  // the stack.
+  void Push(SchedulerWorker* worker);
+
+  // Removes the top SchedulerWorker from the stack and returns it.
+  // Returns nullptr if the stack is empty.
+  SchedulerWorker* Pop();
+
+  // Returns the top SchedulerWorker from the stack, nullptr if empty.
+  SchedulerWorker* Peek() const;
+
+  // Returns true if |worker| is already on the stack.
+  bool Contains(const SchedulerWorker* worker) const;
+
+  // Removes |worker| from the stack.
+  void Remove(const SchedulerWorker* worker);
+
+  // Returns the number of SchedulerWorkers on the stack.
+  size_t Size() const { return stack_.size(); }
+
+  // Returns true if the stack is empty.
+  bool IsEmpty() const { return stack_.empty(); }
+
+ private:
+  std::vector<SchedulerWorker*> stack_;
+
+  DISALLOW_COPY_AND_ASSIGN(SchedulerWorkerStack);
+};
+
+}  // namespace internal
+}  // namespace base
+
+#endif  // BASE_TASK_SCHEDULER_SCHEDULER_WORKER_STACK_H_
diff --git a/base/task_scheduler/scheduler_worker_stack_unittest.cc b/base/task_scheduler/scheduler_worker_stack_unittest.cc
new file mode 100644
index 0000000..8707874
--- /dev/null
+++ b/base/task_scheduler/scheduler_worker_stack_unittest.cc
@@ -0,0 +1,253 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/task_scheduler/scheduler_worker_stack.h"
+
+#include "base/logging.h"
+#include "base/memory/ptr_util.h"
+#include "base/memory/ref_counted.h"
+#include "base/task_scheduler/scheduler_worker.h"
+#include "base/task_scheduler/sequence.h"
+#include "base/task_scheduler/task_tracker.h"
+#include "base/test/gtest_util.h"
+#include "base/threading/platform_thread.h"
+#include "base/time/time.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+namespace internal {
+
+namespace {
+
+class MockSchedulerWorkerDelegate : public SchedulerWorker::Delegate {
+ public:
+  void OnCanScheduleSequence(scoped_refptr<Sequence> sequence) override {
+    ADD_FAILURE() << "Unexpected call to OnCanScheduleSequence().";
+  }
+  SchedulerWorker::ThreadLabel GetThreadLabel() const override {
+    return SchedulerWorker::ThreadLabel::DEDICATED;
+  }
+  void OnMainEntry(const SchedulerWorker* worker) override {}
+  scoped_refptr<Sequence> GetWork(SchedulerWorker* worker) override {
+    return nullptr;
+  }
+  void DidRunTask() override {
+    ADD_FAILURE() << "Unexpected call to DidRunTask()";
+  }
+  void ReEnqueueSequence(scoped_refptr<Sequence> sequence) override {
+    ADD_FAILURE() << "Unexpected call to ReEnqueueSequence()";
+  }
+  TimeDelta GetSleepTimeout() override {
+    return TimeDelta::Max();
+  }
+};
+
+class TaskSchedulerWorkerStackTest : public testing::Test {
+ protected:
+  void SetUp() override {
+    worker_a_ = MakeRefCounted<SchedulerWorker>(
+        ThreadPriority::NORMAL, WrapUnique(new MockSchedulerWorkerDelegate),
+        task_tracker_.GetTrackedRef());
+    ASSERT_TRUE(worker_a_);
+    worker_b_ = MakeRefCounted<SchedulerWorker>(
+        ThreadPriority::NORMAL, WrapUnique(new MockSchedulerWorkerDelegate),
+        task_tracker_.GetTrackedRef());
+    ASSERT_TRUE(worker_b_);
+    worker_c_ = MakeRefCounted<SchedulerWorker>(
+        ThreadPriority::NORMAL, WrapUnique(new MockSchedulerWorkerDelegate),
+        task_tracker_.GetTrackedRef());
+    ASSERT_TRUE(worker_c_);
+  }
+
+ private:
+  TaskTracker task_tracker_ = {"Test"};
+
+ protected:
+  scoped_refptr<SchedulerWorker> worker_a_;
+  scoped_refptr<SchedulerWorker> worker_b_;
+  scoped_refptr<SchedulerWorker> worker_c_;
+};
+
+}  // namespace
+
+// Verify that Push() and Pop() add/remove values in FIFO order.
+TEST_F(TaskSchedulerWorkerStackTest, PushPop) {
+  SchedulerWorkerStack stack;
+  EXPECT_EQ(nullptr, stack.Pop());
+
+  EXPECT_TRUE(stack.IsEmpty());
+  EXPECT_EQ(0U, stack.Size());
+
+  stack.Push(worker_a_.get());
+  EXPECT_FALSE(stack.IsEmpty());
+  EXPECT_EQ(1U, stack.Size());
+
+  stack.Push(worker_b_.get());
+  EXPECT_FALSE(stack.IsEmpty());
+  EXPECT_EQ(2U, stack.Size());
+
+  stack.Push(worker_c_.get());
+  EXPECT_FALSE(stack.IsEmpty());
+  EXPECT_EQ(3U, stack.Size());
+
+  EXPECT_EQ(worker_c_.get(), stack.Pop());
+  EXPECT_FALSE(stack.IsEmpty());
+  EXPECT_EQ(2U, stack.Size());
+
+  stack.Push(worker_c_.get());
+  EXPECT_FALSE(stack.IsEmpty());
+  EXPECT_EQ(3U, stack.Size());
+
+  EXPECT_EQ(worker_c_.get(), stack.Pop());
+  EXPECT_FALSE(stack.IsEmpty());
+  EXPECT_EQ(2U, stack.Size());
+
+  EXPECT_EQ(worker_b_.get(), stack.Pop());
+  EXPECT_FALSE(stack.IsEmpty());
+  EXPECT_EQ(1U, stack.Size());
+
+  EXPECT_EQ(worker_a_.get(), stack.Pop());
+  EXPECT_TRUE(stack.IsEmpty());
+  EXPECT_EQ(0U, stack.Size());
+
+  EXPECT_EQ(nullptr, stack.Pop());
+}
+
+// Verify that Peek() returns the correct values in FIFO order.
+TEST_F(TaskSchedulerWorkerStackTest, PeekPop) {
+  SchedulerWorkerStack stack;
+  EXPECT_EQ(nullptr, stack.Peek());
+
+  EXPECT_TRUE(stack.IsEmpty());
+  EXPECT_EQ(0U, stack.Size());
+
+  stack.Push(worker_a_.get());
+  EXPECT_EQ(worker_a_.get(), stack.Peek());
+  EXPECT_FALSE(stack.IsEmpty());
+  EXPECT_EQ(1U, stack.Size());
+
+  stack.Push(worker_b_.get());
+  EXPECT_EQ(worker_b_.get(), stack.Peek());
+  EXPECT_FALSE(stack.IsEmpty());
+  EXPECT_EQ(2U, stack.Size());
+
+  stack.Push(worker_c_.get());
+  EXPECT_EQ(worker_c_.get(), stack.Peek());
+  EXPECT_FALSE(stack.IsEmpty());
+  EXPECT_EQ(3U, stack.Size());
+
+  EXPECT_EQ(worker_c_.get(), stack.Pop());
+  EXPECT_EQ(worker_b_.get(), stack.Peek());
+  EXPECT_FALSE(stack.IsEmpty());
+  EXPECT_EQ(2U, stack.Size());
+
+  EXPECT_EQ(worker_b_.get(), stack.Pop());
+  EXPECT_EQ(worker_a_.get(), stack.Peek());
+  EXPECT_FALSE(stack.IsEmpty());
+  EXPECT_EQ(1U, stack.Size());
+
+  EXPECT_EQ(worker_a_.get(), stack.Pop());
+  EXPECT_TRUE(stack.IsEmpty());
+  EXPECT_EQ(0U, stack.Size());
+
+  EXPECT_EQ(nullptr, stack.Peek());
+}
+
+// Verify that Contains() returns true for workers on the stack.
+TEST_F(TaskSchedulerWorkerStackTest, Contains) {
+  SchedulerWorkerStack stack;
+  EXPECT_FALSE(stack.Contains(worker_a_.get()));
+  EXPECT_FALSE(stack.Contains(worker_b_.get()));
+  EXPECT_FALSE(stack.Contains(worker_c_.get()));
+
+  stack.Push(worker_a_.get());
+  EXPECT_TRUE(stack.Contains(worker_a_.get()));
+  EXPECT_FALSE(stack.Contains(worker_b_.get()));
+  EXPECT_FALSE(stack.Contains(worker_c_.get()));
+
+  stack.Push(worker_b_.get());
+  EXPECT_TRUE(stack.Contains(worker_a_.get()));
+  EXPECT_TRUE(stack.Contains(worker_b_.get()));
+  EXPECT_FALSE(stack.Contains(worker_c_.get()));
+
+  stack.Push(worker_c_.get());
+  EXPECT_TRUE(stack.Contains(worker_a_.get()));
+  EXPECT_TRUE(stack.Contains(worker_b_.get()));
+  EXPECT_TRUE(stack.Contains(worker_c_.get()));
+
+  stack.Pop();
+  EXPECT_TRUE(stack.Contains(worker_a_.get()));
+  EXPECT_TRUE(stack.Contains(worker_b_.get()));
+  EXPECT_FALSE(stack.Contains(worker_c_.get()));
+
+  stack.Pop();
+  EXPECT_TRUE(stack.Contains(worker_a_.get()));
+  EXPECT_FALSE(stack.Contains(worker_b_.get()));
+  EXPECT_FALSE(stack.Contains(worker_c_.get()));
+
+  stack.Pop();
+  EXPECT_FALSE(stack.Contains(worker_a_.get()));
+  EXPECT_FALSE(stack.Contains(worker_b_.get()));
+  EXPECT_FALSE(stack.Contains(worker_c_.get()));
+}
+
+// Verify that a value can be removed by Remove().
+TEST_F(TaskSchedulerWorkerStackTest, Remove) {
+  SchedulerWorkerStack stack;
+  EXPECT_TRUE(stack.IsEmpty());
+  EXPECT_EQ(0U, stack.Size());
+
+  stack.Push(worker_a_.get());
+  EXPECT_FALSE(stack.IsEmpty());
+  EXPECT_EQ(1U, stack.Size());
+
+  stack.Push(worker_b_.get());
+  EXPECT_FALSE(stack.IsEmpty());
+  EXPECT_EQ(2U, stack.Size());
+
+  stack.Push(worker_c_.get());
+  EXPECT_FALSE(stack.IsEmpty());
+  EXPECT_EQ(3U, stack.Size());
+
+  stack.Remove(worker_b_.get());
+  EXPECT_FALSE(stack.IsEmpty());
+  EXPECT_EQ(2U, stack.Size());
+
+  EXPECT_EQ(worker_c_.get(), stack.Pop());
+  EXPECT_FALSE(stack.IsEmpty());
+  EXPECT_EQ(1U, stack.Size());
+
+  EXPECT_EQ(worker_a_.get(), stack.Pop());
+  EXPECT_TRUE(stack.IsEmpty());
+  EXPECT_EQ(0U, stack.Size());
+}
+
+// Verify that a value can be pushed again after it has been removed.
+TEST_F(TaskSchedulerWorkerStackTest, PushAfterRemove) {
+  SchedulerWorkerStack stack;
+  EXPECT_EQ(0U, stack.Size());
+  EXPECT_TRUE(stack.IsEmpty());
+
+  stack.Push(worker_a_.get());
+  EXPECT_EQ(1U, stack.Size());
+  EXPECT_FALSE(stack.IsEmpty());
+
+  stack.Remove(worker_a_.get());
+  EXPECT_EQ(0U, stack.Size());
+  EXPECT_TRUE(stack.IsEmpty());
+
+  stack.Push(worker_a_.get());
+  EXPECT_EQ(1U, stack.Size());
+  EXPECT_FALSE(stack.IsEmpty());
+}
+
+// Verify that Push() DCHECKs when a value is inserted twice.
+TEST_F(TaskSchedulerWorkerStackTest, PushTwice) {
+  SchedulerWorkerStack stack;
+  stack.Push(worker_a_.get());
+  EXPECT_DCHECK_DEATH({ stack.Push(worker_a_.get()); });
+}
+
+}  // namespace internal
+}  // namespace base
diff --git a/base/task_scheduler/scheduler_worker_unittest.cc b/base/task_scheduler/scheduler_worker_unittest.cc
new file mode 100644
index 0000000..cbeb355
--- /dev/null
+++ b/base/task_scheduler/scheduler_worker_unittest.cc
@@ -0,0 +1,907 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/task_scheduler/scheduler_worker.h"
+
+#include <stddef.h>
+
+#include <memory>
+#include <vector>
+
+#include "base/bind.h"
+#include "base/bind_helpers.h"
+#include "base/macros.h"
+#include "base/memory/ptr_util.h"
+#include "base/memory/ref_counted.h"
+#include "base/synchronization/condition_variable.h"
+#include "base/synchronization/waitable_event.h"
+#include "base/task_scheduler/scheduler_lock.h"
+#include "base/task_scheduler/scheduler_worker_observer.h"
+#include "base/task_scheduler/sequence.h"
+#include "base/task_scheduler/task.h"
+#include "base/task_scheduler/task_tracker.h"
+#include "base/task_scheduler/test_utils.h"
+#include "base/test/test_timeouts.h"
+#include "base/threading/platform_thread.h"
+#include "base/threading/simple_thread.h"
+#include "base/time/time.h"
+#include "build/build_config.h"
+#include "testing/gmock/include/gmock/gmock.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+#if defined(OS_WIN)
+#include <objbase.h>
+
+#include "base/win/com_init_check_hook.h"
+#endif
+
+using testing::_;
+using testing::Mock;
+using testing::Ne;
+using testing::StrictMock;
+
+namespace base {
+namespace internal {
+namespace {
+
+const size_t kNumSequencesPerTest = 150;
+
+class SchedulerWorkerDefaultDelegate : public SchedulerWorker::Delegate {
+ public:
+  SchedulerWorkerDefaultDelegate() = default;
+
+  // SchedulerWorker::Delegate:
+  void OnCanScheduleSequence(scoped_refptr<Sequence> sequence) override {
+    ADD_FAILURE() << "Unexpected call to OnCanScheduleSequence().";
+  }
+  SchedulerWorker::ThreadLabel GetThreadLabel() const override {
+    return SchedulerWorker::ThreadLabel::DEDICATED;
+  }
+  void OnMainEntry(const SchedulerWorker* worker) override {}
+  scoped_refptr<Sequence> GetWork(SchedulerWorker* worker) override {
+    return nullptr;
+  }
+  void DidRunTask() override {
+    ADD_FAILURE() << "Unexpected call to DidRunTask()";
+  }
+  void ReEnqueueSequence(scoped_refptr<Sequence> sequence) override {
+    ADD_FAILURE() << "Unexpected call to ReEnqueueSequence()";
+  }
+  TimeDelta GetSleepTimeout() override { return TimeDelta::Max(); }
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(SchedulerWorkerDefaultDelegate);
+};
+
+// The test parameter is the number of Tasks per Sequence returned by GetWork().
+class TaskSchedulerWorkerTest : public testing::TestWithParam<size_t> {
+ protected:
+  TaskSchedulerWorkerTest()
+      : main_entry_called_(WaitableEvent::ResetPolicy::MANUAL,
+                           WaitableEvent::InitialState::NOT_SIGNALED),
+        num_get_work_cv_(lock_.CreateConditionVariable()),
+        worker_set_(WaitableEvent::ResetPolicy::MANUAL,
+                    WaitableEvent::InitialState::NOT_SIGNALED) {}
+
+  void SetUp() override {
+    worker_ = MakeRefCounted<SchedulerWorker>(
+        ThreadPriority::NORMAL,
+        std::make_unique<TestSchedulerWorkerDelegate>(this),
+        task_tracker_.GetTrackedRef());
+    ASSERT_TRUE(worker_);
+    worker_->Start();
+    worker_set_.Signal();
+    main_entry_called_.Wait();
+  }
+
+  void TearDown() override {
+    // |worker_| needs to be released before ~TaskTracker() as it holds a
+    // TrackedRef to it.
+    worker_->JoinForTesting();
+    worker_ = nullptr;
+  }
+
+  size_t TasksPerSequence() const { return GetParam(); }
+
+  // Wait until GetWork() has been called |num_get_work| times.
+  void WaitForNumGetWork(size_t num_get_work) {
+    AutoSchedulerLock auto_lock(lock_);
+    while (num_get_work_ < num_get_work)
+      num_get_work_cv_->Wait();
+  }
+
+  void SetMaxGetWork(size_t max_get_work) {
+    AutoSchedulerLock auto_lock(lock_);
+    max_get_work_ = max_get_work;
+  }
+
+  void SetNumSequencesToCreate(size_t num_sequences_to_create) {
+    AutoSchedulerLock auto_lock(lock_);
+    EXPECT_EQ(0U, num_sequences_to_create_);
+    num_sequences_to_create_ = num_sequences_to_create;
+  }
+
+  size_t NumRunTasks() {
+    AutoSchedulerLock auto_lock(lock_);
+    return num_run_tasks_;
+  }
+
+  std::vector<scoped_refptr<Sequence>> CreatedSequences() {
+    AutoSchedulerLock auto_lock(lock_);
+    return created_sequences_;
+  }
+
+  std::vector<scoped_refptr<Sequence>> EnqueuedSequences() {
+    AutoSchedulerLock auto_lock(lock_);
+    return re_enqueued_sequences_;
+  }
+
+  scoped_refptr<SchedulerWorker> worker_;
+
+ private:
+  class TestSchedulerWorkerDelegate : public SchedulerWorkerDefaultDelegate {
+   public:
+    TestSchedulerWorkerDelegate(TaskSchedulerWorkerTest* outer)
+        : outer_(outer) {}
+
+    ~TestSchedulerWorkerDelegate() override {
+      EXPECT_FALSE(IsCallToDidRunTaskExpected());
+    }
+
+    // SchedulerWorker::Delegate:
+    void OnMainEntry(const SchedulerWorker* worker) override {
+      outer_->worker_set_.Wait();
+      EXPECT_EQ(outer_->worker_.get(), worker);
+      EXPECT_FALSE(IsCallToDidRunTaskExpected());
+
+      // Without synchronization, OnMainEntry() could be called twice without
+      // generating an error.
+      AutoSchedulerLock auto_lock(outer_->lock_);
+      EXPECT_FALSE(outer_->main_entry_called_.IsSignaled());
+      outer_->main_entry_called_.Signal();
+    }
+
+    scoped_refptr<Sequence> GetWork(SchedulerWorker* worker) override {
+      EXPECT_FALSE(IsCallToDidRunTaskExpected());
+      EXPECT_EQ(outer_->worker_.get(), worker);
+
+      {
+        AutoSchedulerLock auto_lock(outer_->lock_);
+
+        // Increment the number of times that this method has been called.
+        ++outer_->num_get_work_;
+        outer_->num_get_work_cv_->Signal();
+
+        // Verify that this method isn't called more times than expected.
+        EXPECT_LE(outer_->num_get_work_, outer_->max_get_work_);
+
+        // Check if a Sequence should be returned.
+        if (outer_->num_sequences_to_create_ == 0)
+          return nullptr;
+        --outer_->num_sequences_to_create_;
+      }
+
+      // Create a Sequence with TasksPerSequence() Tasks.
+      scoped_refptr<Sequence> sequence(new Sequence);
+      for (size_t i = 0; i < outer_->TasksPerSequence(); ++i) {
+        Task task(FROM_HERE,
+                  BindOnce(&TaskSchedulerWorkerTest::RunTaskCallback,
+                           Unretained(outer_)),
+                  TaskTraits(), TimeDelta());
+        EXPECT_TRUE(outer_->task_tracker_.WillPostTask(task));
+        sequence->PushTask(std::move(task));
+      }
+
+      ExpectCallToDidRunTask();
+
+      {
+        // Add the Sequence to the vector of created Sequences.
+        AutoSchedulerLock auto_lock(outer_->lock_);
+        outer_->created_sequences_.push_back(sequence);
+      }
+
+      sequence = outer_->task_tracker_.WillScheduleSequence(std::move(sequence),
+                                                            nullptr);
+      EXPECT_TRUE(sequence);
+      return sequence;
+    }
+
+    void DidRunTask() override {
+      AutoSchedulerLock auto_lock(expect_did_run_task_lock_);
+      EXPECT_TRUE(expect_did_run_task_);
+      expect_did_run_task_ = false;
+    }
+
+    // This override verifies that |sequence| contains the expected number of
+    // Tasks and adds it to |enqueued_sequences_|. Unlike a normal
+    // EnqueueSequence implementation, it doesn't reinsert |sequence| into a
+    // queue for further execution.
+    void ReEnqueueSequence(scoped_refptr<Sequence> sequence) override {
+      EXPECT_FALSE(IsCallToDidRunTaskExpected());
+      EXPECT_GT(outer_->TasksPerSequence(), 1U);
+
+      // Verify that |sequence| contains TasksPerSequence() - 1 Tasks.
+      for (size_t i = 0; i < outer_->TasksPerSequence() - 1; ++i) {
+        EXPECT_TRUE(sequence->TakeTask());
+        EXPECT_EQ(i == outer_->TasksPerSequence() - 2, sequence->Pop());
+      }
+
+      // Add |sequence| to |re_enqueued_sequences_|.
+      AutoSchedulerLock auto_lock(outer_->lock_);
+      outer_->re_enqueued_sequences_.push_back(std::move(sequence));
+      EXPECT_LE(outer_->re_enqueued_sequences_.size(),
+                outer_->created_sequences_.size());
+    }
+
+   private:
+    // Expect a call to DidRunTask() before the next call to any other method of
+    // this delegate.
+    void ExpectCallToDidRunTask() {
+      AutoSchedulerLock auto_lock(expect_did_run_task_lock_);
+      expect_did_run_task_ = true;
+    }
+
+    bool IsCallToDidRunTaskExpected() const {
+      AutoSchedulerLock auto_lock(expect_did_run_task_lock_);
+      return expect_did_run_task_;
+    }
+
+    TaskSchedulerWorkerTest* outer_;
+
+    // Synchronizes access to |expect_did_run_task_|.
+    mutable SchedulerLock expect_did_run_task_lock_;
+
+    // Whether the next method called on this delegate should be DidRunTask().
+    bool expect_did_run_task_ = false;
+
+    DISALLOW_COPY_AND_ASSIGN(TestSchedulerWorkerDelegate);
+  };
+
+  void RunTaskCallback() {
+    AutoSchedulerLock auto_lock(lock_);
+    ++num_run_tasks_;
+    EXPECT_LE(num_run_tasks_, created_sequences_.size());
+  }
+
+  TaskTracker task_tracker_ = {"Test"};
+
+  // Synchronizes access to all members below.
+  mutable SchedulerLock lock_;
+
+  // Signaled once OnMainEntry() has been called.
+  WaitableEvent main_entry_called_;
+
+  // Number of Sequences that should be created by GetWork(). When this
+  // is 0, GetWork() returns nullptr.
+  size_t num_sequences_to_create_ = 0;
+
+  // Number of times that GetWork() has been called.
+  size_t num_get_work_ = 0;
+
+  // Maximum number of times that GetWork() can be called.
+  size_t max_get_work_ = 0;
+
+  // Condition variable signaled when |num_get_work_| is incremented.
+  std::unique_ptr<ConditionVariable> num_get_work_cv_;
+
+  // Sequences created by GetWork().
+  std::vector<scoped_refptr<Sequence>> created_sequences_;
+
+  // Sequences passed to EnqueueSequence().
+  std::vector<scoped_refptr<Sequence>> re_enqueued_sequences_;
+
+  // Number of times that RunTaskCallback() has been called.
+  size_t num_run_tasks_ = 0;
+
+  // Signaled after |worker_| is set.
+  WaitableEvent worker_set_;
+
+  DISALLOW_COPY_AND_ASSIGN(TaskSchedulerWorkerTest);
+};
+
+}  // namespace
+
+// Verify that when GetWork() continuously returns Sequences, all Tasks in these
+// Sequences run successfully. The test wakes up the SchedulerWorker once.
+TEST_P(TaskSchedulerWorkerTest, ContinuousWork) {
+  // Set GetWork() to return |kNumSequencesPerTest| Sequences before starting to
+  // return nullptr.
+  SetNumSequencesToCreate(kNumSequencesPerTest);
+
+  // Expect |kNumSequencesPerTest| calls to GetWork() in which it returns a
+  // Sequence and one call in which its returns nullptr.
+  const size_t kExpectedNumGetWork = kNumSequencesPerTest + 1;
+  SetMaxGetWork(kExpectedNumGetWork);
+
+  // Wake up |worker_| and wait until GetWork() has been invoked the
+  // expected amount of times.
+  worker_->WakeUp();
+  WaitForNumGetWork(kExpectedNumGetWork);
+
+  // All tasks should have run.
+  EXPECT_EQ(kNumSequencesPerTest, NumRunTasks());
+
+  // If Sequences returned by GetWork() contain more than one Task, they aren't
+  // empty after the worker pops Tasks from them and thus should be returned to
+  // EnqueueSequence().
+  if (TasksPerSequence() > 1)
+    EXPECT_EQ(CreatedSequences(), EnqueuedSequences());
+  else
+    EXPECT_TRUE(EnqueuedSequences().empty());
+}
+
+// Verify that when GetWork() alternates between returning a Sequence and
+// returning nullptr, all Tasks in the returned Sequences run successfully. The
+// test wakes up the SchedulerWorker once for each Sequence.
+TEST_P(TaskSchedulerWorkerTest, IntermittentWork) {
+  for (size_t i = 0; i < kNumSequencesPerTest; ++i) {
+    // Set GetWork() to return 1 Sequence before starting to return
+    // nullptr.
+    SetNumSequencesToCreate(1);
+
+    // Expect |i + 1| calls to GetWork() in which it returns a Sequence and
+    // |i + 1| calls in which it returns nullptr.
+    const size_t expected_num_get_work = 2 * (i + 1);
+    SetMaxGetWork(expected_num_get_work);
+
+    // Wake up |worker_| and wait until GetWork() has been invoked
+    // the expected amount of times.
+    worker_->WakeUp();
+    WaitForNumGetWork(expected_num_get_work);
+
+    // The Task should have run
+    EXPECT_EQ(i + 1, NumRunTasks());
+
+    // If Sequences returned by GetWork() contain more than one Task, they
+    // aren't empty after the worker pops Tasks from them and thus should be
+    // returned to EnqueueSequence().
+    if (TasksPerSequence() > 1)
+      EXPECT_EQ(CreatedSequences(), EnqueuedSequences());
+    else
+      EXPECT_TRUE(EnqueuedSequences().empty());
+  }
+}
+
+INSTANTIATE_TEST_CASE_P(OneTaskPerSequence,
+                        TaskSchedulerWorkerTest,
+                        ::testing::Values(1));
+INSTANTIATE_TEST_CASE_P(TwoTasksPerSequence,
+                        TaskSchedulerWorkerTest,
+                        ::testing::Values(2));
+
+namespace {
+
+class ControllableCleanupDelegate : public SchedulerWorkerDefaultDelegate {
+ public:
+  class Controls : public RefCountedThreadSafe<Controls> {
+   public:
+    Controls()
+        : work_running_(WaitableEvent::ResetPolicy::MANUAL,
+                        WaitableEvent::InitialState::SIGNALED),
+          work_processed_(WaitableEvent::ResetPolicy::MANUAL,
+                          WaitableEvent::InitialState::NOT_SIGNALED),
+          cleanup_requested_(WaitableEvent::ResetPolicy::MANUAL,
+                             WaitableEvent::InitialState::NOT_SIGNALED),
+          destroyed_(WaitableEvent::ResetPolicy::MANUAL,
+                     WaitableEvent::InitialState::NOT_SIGNALED),
+          exited_(WaitableEvent::ResetPolicy::MANUAL,
+                  WaitableEvent::InitialState::NOT_SIGNALED) {}
+
+    void HaveWorkBlock() { work_running_.Reset(); }
+
+    void UnblockWork() { work_running_.Signal(); }
+
+    void WaitForWorkToRun() { work_processed_.Wait(); }
+
+    void WaitForCleanupRequest() { cleanup_requested_.Wait(); }
+
+    void WaitForDelegateDestroy() { destroyed_.Wait(); }
+
+    void WaitForMainExit() { exited_.Wait(); }
+
+    void set_expect_get_work(bool expect_get_work) {
+      expect_get_work_ = expect_get_work;
+    }
+
+    void ResetState() {
+      work_running_.Signal();
+      work_processed_.Reset();
+      cleanup_requested_.Reset();
+      exited_.Reset();
+      work_requested_ = false;
+    }
+
+    void set_can_cleanup(bool can_cleanup) { can_cleanup_ = can_cleanup; }
+
+   private:
+    friend class ControllableCleanupDelegate;
+    friend class RefCountedThreadSafe<Controls>;
+    ~Controls() = default;
+
+    WaitableEvent work_running_;
+    WaitableEvent work_processed_;
+    WaitableEvent cleanup_requested_;
+    WaitableEvent destroyed_;
+    WaitableEvent exited_;
+
+    bool expect_get_work_ = true;
+    bool can_cleanup_ = false;
+    bool work_requested_ = false;
+
+    DISALLOW_COPY_AND_ASSIGN(Controls);
+  };
+
+  ControllableCleanupDelegate(TaskTracker* task_tracker)
+      : task_tracker_(task_tracker), controls_(new Controls()) {}
+
+  ~ControllableCleanupDelegate() override { controls_->destroyed_.Signal(); }
+
+  scoped_refptr<Sequence> GetWork(SchedulerWorker* worker)
+      override {
+    EXPECT_TRUE(controls_->expect_get_work_);
+
+    // Sends one item of work to signal |work_processed_|. On subsequent calls,
+    // sends nullptr to indicate there's no more work to be done.
+    if (controls_->work_requested_) {
+      if (CanCleanup(worker)) {
+        OnCleanup();
+        worker->Cleanup();
+        controls_->set_expect_get_work(false);
+      }
+      return nullptr;
+    }
+
+    controls_->work_requested_ = true;
+    scoped_refptr<Sequence> sequence(new Sequence);
+    Task task(
+        FROM_HERE,
+        BindOnce(
+            [](WaitableEvent* work_processed, WaitableEvent* work_running) {
+              work_processed->Signal();
+              work_running->Wait();
+            },
+            Unretained(&controls_->work_processed_),
+            Unretained(&controls_->work_running_)),
+        {WithBaseSyncPrimitives(), TaskShutdownBehavior::CONTINUE_ON_SHUTDOWN},
+        TimeDelta());
+    EXPECT_TRUE(task_tracker_->WillPostTask(task));
+    sequence->PushTask(std::move(task));
+    sequence =
+        task_tracker_->WillScheduleSequence(std::move(sequence), nullptr);
+    EXPECT_TRUE(sequence);
+    return sequence;
+  }
+
+  void DidRunTask() override {}
+
+  void OnMainExit(SchedulerWorker* worker) override {
+    controls_->exited_.Signal();
+  }
+
+  bool CanCleanup(SchedulerWorker* worker) {
+    // Saving |can_cleanup_| now so that callers waiting on |cleanup_requested_|
+    // have the thread go to sleep and then allow timing out.
+    bool can_cleanup = controls_->can_cleanup_;
+    controls_->cleanup_requested_.Signal();
+    return can_cleanup;
+  }
+
+  void OnCleanup() {
+    EXPECT_TRUE(controls_->can_cleanup_);
+    EXPECT_TRUE(controls_->cleanup_requested_.IsSignaled());
+  }
+
+  // ControllableCleanupDelegate:
+  scoped_refptr<Controls> controls() { return controls_; }
+
+ private:
+  scoped_refptr<Sequence> work_sequence_;
+  TaskTracker* const task_tracker_;
+  scoped_refptr<Controls> controls_;
+
+  DISALLOW_COPY_AND_ASSIGN(ControllableCleanupDelegate);
+};
+
+class MockedControllableCleanupDelegate : public ControllableCleanupDelegate {
+ public:
+  MockedControllableCleanupDelegate(TaskTracker* task_tracker)
+      : ControllableCleanupDelegate(task_tracker){};
+  ~MockedControllableCleanupDelegate() override = default;
+
+  // SchedulerWorker::Delegate:
+  MOCK_METHOD1(OnMainEntry, void(const SchedulerWorker* worker));
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(MockedControllableCleanupDelegate);
+};
+
+}  // namespace
+
+// Verify that calling SchedulerWorker::Cleanup() from GetWork() causes
+// the SchedulerWorker's thread to exit.
+TEST(TaskSchedulerWorkerTest, WorkerCleanupFromGetWork) {
+  TaskTracker task_tracker("Test");
+  // Will be owned by SchedulerWorker.
+  MockedControllableCleanupDelegate* delegate =
+      new StrictMock<MockedControllableCleanupDelegate>(&task_tracker);
+  scoped_refptr<ControllableCleanupDelegate::Controls> controls =
+      delegate->controls();
+  controls->set_can_cleanup(true);
+  EXPECT_CALL(*delegate, OnMainEntry(_));
+  auto worker = MakeRefCounted<SchedulerWorker>(ThreadPriority::NORMAL,
+                                                WrapUnique(delegate),
+                                                task_tracker.GetTrackedRef());
+  worker->Start();
+  worker->WakeUp();
+  controls->WaitForWorkToRun();
+  Mock::VerifyAndClear(delegate);
+  controls->WaitForMainExit();
+}
+
+TEST(TaskSchedulerWorkerTest, WorkerCleanupDuringWork) {
+  TaskTracker task_tracker("Test");
+  // Will be owned by SchedulerWorker.
+  // No mock here as that's reasonably covered by other tests and the delegate
+  // may destroy on a different thread. Mocks aren't designed with that in mind.
+  std::unique_ptr<ControllableCleanupDelegate> delegate =
+      std::make_unique<ControllableCleanupDelegate>(&task_tracker);
+  scoped_refptr<ControllableCleanupDelegate::Controls> controls =
+      delegate->controls();
+
+  controls->HaveWorkBlock();
+
+  auto worker = MakeRefCounted<SchedulerWorker>(ThreadPriority::NORMAL,
+                                                std::move(delegate),
+                                                task_tracker.GetTrackedRef());
+  worker->Start();
+  worker->WakeUp();
+
+  controls->WaitForWorkToRun();
+  worker->Cleanup();
+  worker = nullptr;
+  controls->UnblockWork();
+  controls->WaitForDelegateDestroy();
+}
+
+TEST(TaskSchedulerWorkerTest, WorkerCleanupDuringWait) {
+  TaskTracker task_tracker("Test");
+  // Will be owned by SchedulerWorker.
+  // No mock here as that's reasonably covered by other tests and the delegate
+  // may destroy on a different thread. Mocks aren't designed with that in mind.
+  std::unique_ptr<ControllableCleanupDelegate> delegate =
+      std::make_unique<ControllableCleanupDelegate>(&task_tracker);
+  scoped_refptr<ControllableCleanupDelegate::Controls> controls =
+      delegate->controls();
+
+  auto worker = MakeRefCounted<SchedulerWorker>(ThreadPriority::NORMAL,
+                                                std::move(delegate),
+                                                task_tracker.GetTrackedRef());
+  worker->Start();
+  worker->WakeUp();
+
+  controls->WaitForCleanupRequest();
+  worker->Cleanup();
+  worker = nullptr;
+  controls->WaitForDelegateDestroy();
+}
+
+TEST(TaskSchedulerWorkerTest, WorkerCleanupDuringShutdown) {
+  TaskTracker task_tracker("Test");
+  // Will be owned by SchedulerWorker.
+  // No mock here as that's reasonably covered by other tests and the delegate
+  // may destroy on a different thread. Mocks aren't designed with that in mind.
+  std::unique_ptr<ControllableCleanupDelegate> delegate =
+      std::make_unique<ControllableCleanupDelegate>(&task_tracker);
+  scoped_refptr<ControllableCleanupDelegate::Controls> controls =
+      delegate->controls();
+
+  controls->HaveWorkBlock();
+
+  auto worker = MakeRefCounted<SchedulerWorker>(ThreadPriority::NORMAL,
+                                                std::move(delegate),
+                                                task_tracker.GetTrackedRef());
+  worker->Start();
+  worker->WakeUp();
+
+  controls->WaitForWorkToRun();
+  task_tracker.Shutdown();
+  worker->Cleanup();
+  worker = nullptr;
+  controls->UnblockWork();
+  controls->WaitForDelegateDestroy();
+}
+
+// Verify that Start() is a no-op after Cleanup().
+TEST(TaskSchedulerWorkerTest, CleanupBeforeStart) {
+  TaskTracker task_tracker("Test");
+  // Will be owned by SchedulerWorker.
+  // No mock here as that's reasonably covered by other tests and the delegate
+  // may destroy on a different thread. Mocks aren't designed with that in mind.
+  std::unique_ptr<ControllableCleanupDelegate> delegate =
+      std::make_unique<ControllableCleanupDelegate>(&task_tracker);
+  scoped_refptr<ControllableCleanupDelegate::Controls> controls =
+      delegate->controls();
+  controls->set_expect_get_work(false);
+
+  auto worker = MakeRefCounted<SchedulerWorker>(ThreadPriority::NORMAL,
+                                                std::move(delegate),
+                                                task_tracker.GetTrackedRef());
+
+  worker->Cleanup();
+  worker->Start();
+
+  EXPECT_FALSE(worker->ThreadAliveForTesting());
+}
+
+namespace {
+
+class CallJoinFromDifferentThread : public SimpleThread {
+ public:
+  CallJoinFromDifferentThread(SchedulerWorker* worker_to_join)
+      : SimpleThread("SchedulerWorkerJoinThread"),
+        worker_to_join_(worker_to_join),
+        run_started_event_(WaitableEvent::ResetPolicy::MANUAL,
+                           WaitableEvent::InitialState::NOT_SIGNALED) {}
+
+  ~CallJoinFromDifferentThread() override = default;
+
+  void Run() override {
+    run_started_event_.Signal();
+    worker_to_join_->JoinForTesting();
+  }
+
+  void WaitForRunToStart() { run_started_event_.Wait(); }
+
+ private:
+  SchedulerWorker* const worker_to_join_;
+  WaitableEvent run_started_event_;
+  DISALLOW_COPY_AND_ASSIGN(CallJoinFromDifferentThread);
+};
+
+}  // namespace
+
+TEST(TaskSchedulerWorkerTest, WorkerCleanupDuringJoin) {
+  TaskTracker task_tracker("Test");
+  // Will be owned by SchedulerWorker.
+  // No mock here as that's reasonably covered by other tests and the
+  // delegate may destroy on a different thread. Mocks aren't designed with that
+  // in mind.
+  std::unique_ptr<ControllableCleanupDelegate> delegate =
+      std::make_unique<ControllableCleanupDelegate>(&task_tracker);
+  scoped_refptr<ControllableCleanupDelegate::Controls> controls =
+      delegate->controls();
+
+  controls->HaveWorkBlock();
+
+  auto worker = MakeRefCounted<SchedulerWorker>(ThreadPriority::NORMAL,
+                                                std::move(delegate),
+                                                task_tracker.GetTrackedRef());
+  worker->Start();
+  worker->WakeUp();
+
+  controls->WaitForWorkToRun();
+  CallJoinFromDifferentThread join_from_different_thread(worker.get());
+  join_from_different_thread.Start();
+  join_from_different_thread.WaitForRunToStart();
+  // Sleep here to give the other thread a chance to call JoinForTesting().
+  // Receiving a signal that Run() was called doesn't mean JoinForTesting() was
+  // necessarily called, and we can't signal after JoinForTesting() as
+  // JoinForTesting() blocks until we call UnblockWork().
+  PlatformThread::Sleep(TestTimeouts::tiny_timeout());
+  worker->Cleanup();
+  worker = nullptr;
+  controls->UnblockWork();
+  controls->WaitForDelegateDestroy();
+  join_from_different_thread.Join();
+}
+
+namespace {
+
+class ExpectThreadPriorityDelegate : public SchedulerWorkerDefaultDelegate {
+ public:
+  ExpectThreadPriorityDelegate()
+      : priority_verified_in_get_work_event_(
+            WaitableEvent::ResetPolicy::AUTOMATIC,
+            WaitableEvent::InitialState::NOT_SIGNALED),
+        expected_thread_priority_(ThreadPriority::BACKGROUND) {}
+
+  void SetExpectedThreadPriority(ThreadPriority expected_thread_priority) {
+    expected_thread_priority_ = expected_thread_priority;
+  }
+
+  void WaitForPriorityVerifiedInGetWork() {
+    priority_verified_in_get_work_event_.Wait();
+  }
+
+  // SchedulerWorker::Delegate:
+  void OnMainEntry(const SchedulerWorker* worker) override {
+    VerifyThreadPriority();
+  }
+  scoped_refptr<Sequence> GetWork(SchedulerWorker* worker) override {
+    VerifyThreadPriority();
+    priority_verified_in_get_work_event_.Signal();
+    return nullptr;
+  }
+
+ private:
+  void VerifyThreadPriority() {
+    AutoSchedulerLock auto_lock(expected_thread_priority_lock_);
+    EXPECT_EQ(expected_thread_priority_,
+              PlatformThread::GetCurrentThreadPriority());
+  }
+
+  // Signaled after GetWork() has verified the priority of the worker thread.
+  WaitableEvent priority_verified_in_get_work_event_;
+
+  // Synchronizes access to |expected_thread_priority_|.
+  SchedulerLock expected_thread_priority_lock_;
+
+  // Expected thread priority for the next call to OnMainEntry() or GetWork().
+  ThreadPriority expected_thread_priority_;
+
+  DISALLOW_COPY_AND_ASSIGN(ExpectThreadPriorityDelegate);
+};
+
+}  // namespace
+
+TEST(TaskSchedulerWorkerTest, BumpPriorityOfAliveThreadDuringShutdown) {
+  TaskTracker task_tracker("Test");
+
+  std::unique_ptr<ExpectThreadPriorityDelegate> delegate(
+      new ExpectThreadPriorityDelegate);
+  ExpectThreadPriorityDelegate* delegate_raw = delegate.get();
+  delegate_raw->SetExpectedThreadPriority(
+      PlatformThread::CanIncreaseCurrentThreadPriority()
+          ? ThreadPriority::BACKGROUND
+          : ThreadPriority::NORMAL);
+
+  auto worker = MakeRefCounted<SchedulerWorker>(ThreadPriority::BACKGROUND,
+                                                std::move(delegate),
+                                                task_tracker.GetTrackedRef());
+  worker->Start();
+
+  // Verify that the initial thread priority is BACKGROUND (or NORMAL if thread
+  // priority can't be increased).
+  worker->WakeUp();
+  delegate_raw->WaitForPriorityVerifiedInGetWork();
+
+  // Verify that the thread priority is bumped to NORMAL during shutdown.
+  delegate_raw->SetExpectedThreadPriority(ThreadPriority::NORMAL);
+  task_tracker.SetHasShutdownStartedForTesting();
+  worker->WakeUp();
+  delegate_raw->WaitForPriorityVerifiedInGetWork();
+
+  worker->JoinForTesting();
+}
+
+namespace {
+
+class VerifyCallsToObserverDelegate : public SchedulerWorkerDefaultDelegate {
+ public:
+  VerifyCallsToObserverDelegate(test::MockSchedulerWorkerObserver* observer)
+      : observer_(observer) {}
+
+  // SchedulerWorker::Delegate:
+  void OnMainEntry(const SchedulerWorker* worker) override {
+    Mock::VerifyAndClear(observer_);
+  }
+
+  void OnMainExit(SchedulerWorker* worker) override {
+    EXPECT_CALL(*observer_, OnSchedulerWorkerMainExit());
+  }
+
+ private:
+  test::MockSchedulerWorkerObserver* const observer_;
+
+  DISALLOW_COPY_AND_ASSIGN(VerifyCallsToObserverDelegate);
+};
+
+}  // namespace
+
+// Verify that the SchedulerWorkerObserver is notified when the worker enters
+// and exits its main function.
+TEST(TaskSchedulerWorkerTest, SchedulerWorkerObserver) {
+  StrictMock<test::MockSchedulerWorkerObserver> observer;
+  {
+    TaskTracker task_tracker("Test");
+    auto delegate = std::make_unique<VerifyCallsToObserverDelegate>(&observer);
+    auto worker = MakeRefCounted<SchedulerWorker>(ThreadPriority::NORMAL,
+                                                  std::move(delegate),
+                                                  task_tracker.GetTrackedRef());
+
+    EXPECT_CALL(observer, OnSchedulerWorkerMainEntry());
+    worker->Start(&observer);
+    worker->Cleanup();
+    worker = nullptr;
+  }
+  Mock::VerifyAndClear(&observer);
+}
+
+#if defined(OS_WIN)
+
+namespace {
+
+class CoInitializeDelegate : public SchedulerWorkerDefaultDelegate {
+ public:
+  CoInitializeDelegate()
+      : get_work_returned_(WaitableEvent::ResetPolicy::MANUAL,
+                           WaitableEvent::InitialState::NOT_SIGNALED) {}
+
+  scoped_refptr<Sequence> GetWork(SchedulerWorker* worker) override {
+    EXPECT_FALSE(get_work_returned_.IsSignaled());
+    EXPECT_EQ(E_UNEXPECTED, coinitialize_hresult_);
+
+    coinitialize_hresult_ = CoInitializeEx(nullptr, COINIT_APARTMENTTHREADED);
+    if (SUCCEEDED(coinitialize_hresult_))
+      CoUninitialize();
+
+    get_work_returned_.Signal();
+    return nullptr;
+  }
+
+  void WaitUntilGetWorkReturned() { get_work_returned_.Wait(); }
+
+  HRESULT coinitialize_hresult() const { return coinitialize_hresult_; }
+
+ private:
+  WaitableEvent get_work_returned_;
+  HRESULT coinitialize_hresult_ = E_UNEXPECTED;
+
+  DISALLOW_COPY_AND_ASSIGN(CoInitializeDelegate);
+};
+
+}  // namespace
+
+TEST(TaskSchedulerWorkerTest, BackwardCompatibilityEnabled) {
+  TaskTracker task_tracker("Test");
+  auto delegate = std::make_unique<CoInitializeDelegate>();
+  CoInitializeDelegate* const delegate_raw = delegate.get();
+
+  // Create a worker with backward compatibility ENABLED. Wake it up and wait
+  // until GetWork() returns.
+  auto worker = MakeRefCounted<SchedulerWorker>(
+      ThreadPriority::NORMAL, std::move(delegate), task_tracker.GetTrackedRef(),
+      nullptr, SchedulerBackwardCompatibility::INIT_COM_STA);
+  worker->Start();
+  worker->WakeUp();
+  delegate_raw->WaitUntilGetWorkReturned();
+
+  // The call to CoInitializeEx() should have returned S_FALSE to indicate that
+  // the COM library was already initialized on the thread.
+  // See SchedulerWorker::Thread::ThreadMain for why we expect two different
+  // results here.
+#if defined(COM_INIT_CHECK_HOOK_ENABLED)
+  EXPECT_EQ(S_OK, delegate_raw->coinitialize_hresult());
+#else
+  EXPECT_EQ(S_FALSE, delegate_raw->coinitialize_hresult());
+#endif
+
+  worker->JoinForTesting();
+}
+
+TEST(TaskSchedulerWorkerTest, BackwardCompatibilityDisabled) {
+  TaskTracker task_tracker("Test");
+  auto delegate = std::make_unique<CoInitializeDelegate>();
+  CoInitializeDelegate* const delegate_raw = delegate.get();
+
+  // Create a worker with backward compatibility DISABLED. Wake it up and wait
+  // until GetWork() returns.
+  auto worker = MakeRefCounted<SchedulerWorker>(
+      ThreadPriority::NORMAL, std::move(delegate), task_tracker.GetTrackedRef(),
+      nullptr, SchedulerBackwardCompatibility::DISABLED);
+  worker->Start();
+  worker->WakeUp();
+  delegate_raw->WaitUntilGetWorkReturned();
+
+  // The call to CoInitializeEx() should have returned S_OK to indicate that the
+  // COM library wasn't already initialized on the thread.
+  EXPECT_EQ(S_OK, delegate_raw->coinitialize_hresult());
+
+  worker->JoinForTesting();
+}
+
+#endif  // defined(OS_WIN)
+
+}  // namespace internal
+}  // namespace base
diff --git a/base/task_scheduler/scoped_set_task_priority_for_current_thread.cc b/base/task_scheduler/scoped_set_task_priority_for_current_thread.cc
new file mode 100644
index 0000000..a163863
--- /dev/null
+++ b/base/task_scheduler/scoped_set_task_priority_for_current_thread.cc
@@ -0,0 +1,41 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/task_scheduler/scoped_set_task_priority_for_current_thread.h"
+
+#include "base/lazy_instance.h"
+#include "base/logging.h"
+#include "base/threading/thread_local.h"
+
+namespace base {
+namespace internal {
+
+namespace {
+
+LazyInstance<ThreadLocalPointer<const TaskPriority>>::Leaky
+    tls_task_priority_for_current_thread = LAZY_INSTANCE_INITIALIZER;
+
+}  // namespace
+
+ScopedSetTaskPriorityForCurrentThread::ScopedSetTaskPriorityForCurrentThread(
+    TaskPriority priority)
+    : priority_(priority) {
+  DCHECK(!tls_task_priority_for_current_thread.Get().Get());
+  tls_task_priority_for_current_thread.Get().Set(&priority_);
+}
+
+ScopedSetTaskPriorityForCurrentThread::
+    ~ScopedSetTaskPriorityForCurrentThread() {
+  DCHECK_EQ(&priority_, tls_task_priority_for_current_thread.Get().Get());
+  tls_task_priority_for_current_thread.Get().Set(nullptr);
+}
+
+TaskPriority GetTaskPriorityForCurrentThread() {
+  const TaskPriority* priority =
+      tls_task_priority_for_current_thread.Get().Get();
+  return priority ? *priority : TaskPriority::USER_VISIBLE;
+}
+
+}  // namespace internal
+}  // namespace base
diff --git a/base/task_scheduler/scoped_set_task_priority_for_current_thread.h b/base/task_scheduler/scoped_set_task_priority_for_current_thread.h
new file mode 100644
index 0000000..4508911
--- /dev/null
+++ b/base/task_scheduler/scoped_set_task_priority_for_current_thread.h
@@ -0,0 +1,36 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TASK_SCHEDULER_SCOPED_SET_TASK_PRIORITY_FOR_CURRENT_THREAD_H_
+#define BASE_TASK_SCHEDULER_SCOPED_SET_TASK_PRIORITY_FOR_CURRENT_THREAD_H_
+
+#include "base/base_export.h"
+#include "base/macros.h"
+#include "base/task_scheduler/task_traits.h"
+
+namespace base {
+namespace internal {
+
+class BASE_EXPORT ScopedSetTaskPriorityForCurrentThread {
+ public:
+  // Within the scope of this object, GetTaskPriorityForCurrentThread() will
+  // return |priority|.
+  ScopedSetTaskPriorityForCurrentThread(TaskPriority priority);
+  ~ScopedSetTaskPriorityForCurrentThread();
+
+ private:
+  const TaskPriority priority_;
+
+  DISALLOW_COPY_AND_ASSIGN(ScopedSetTaskPriorityForCurrentThread);
+};
+
+// Returns the priority of the TaskScheduler task running on the current thread,
+// or TaskPriority::USER_VISIBLE if no TaskScheduler task is running on the
+// current thread.
+BASE_EXPORT TaskPriority GetTaskPriorityForCurrentThread();
+
+}  // namespace internal
+}  // namespace base
+
+#endif  // BASE_TASK_SCHEDULER_SCOPED_SET_TASK_PRIORITY_FOR_CURRENT_THREAD_H_
diff --git a/base/task_scheduler/scoped_set_task_priority_for_current_thread_unittest.cc b/base/task_scheduler/scoped_set_task_priority_for_current_thread_unittest.cc
new file mode 100644
index 0000000..c497af6
--- /dev/null
+++ b/base/task_scheduler/scoped_set_task_priority_for_current_thread_unittest.cc
@@ -0,0 +1,26 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/task_scheduler/scoped_set_task_priority_for_current_thread.h"
+
+#include "base/task_scheduler/task_traits.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+namespace internal {
+
+TEST(TaskSchedulerScopedSetTaskPriorityForCurrentThreadTest,
+     ScopedSetTaskPriorityForCurrentThread) {
+  EXPECT_EQ(TaskPriority::USER_VISIBLE, GetTaskPriorityForCurrentThread());
+  {
+    ScopedSetTaskPriorityForCurrentThread
+        scoped_set_task_priority_for_current_thread(
+            TaskPriority::USER_BLOCKING);
+    EXPECT_EQ(TaskPriority::USER_BLOCKING, GetTaskPriorityForCurrentThread());
+  }
+  EXPECT_EQ(TaskPriority::USER_VISIBLE, GetTaskPriorityForCurrentThread());
+}
+
+}  // namespace internal
+}  // namespace base
diff --git a/base/task_scheduler/sequence.cc b/base/task_scheduler/sequence.cc
new file mode 100644
index 0000000..4737f8e
--- /dev/null
+++ b/base/task_scheduler/sequence.cc
@@ -0,0 +1,87 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/task_scheduler/sequence.h"
+
+#include <utility>
+
+#include "base/logging.h"
+#include "base/time/time.h"
+
+namespace base {
+namespace internal {
+
+Sequence::Sequence() = default;
+
+bool Sequence::PushTask(Task task) {
+  // Use CHECK instead of DCHECK to crash earlier. See http://crbug.com/711167
+  // for details.
+  CHECK(task.task);
+  DCHECK(task.sequenced_time.is_null());
+  task.sequenced_time = base::TimeTicks::Now();
+
+  AutoSchedulerLock auto_lock(lock_);
+  ++num_tasks_per_priority_[static_cast<int>(task.traits.priority())];
+  queue_.push(std::move(task));
+
+  // Return true if the sequence was empty before the push.
+  return queue_.size() == 1;
+}
+
+Optional<Task> Sequence::TakeTask() {
+  AutoSchedulerLock auto_lock(lock_);
+  DCHECK(!queue_.empty());
+  DCHECK(queue_.front().task);
+
+  const int priority_index = static_cast<int>(queue_.front().traits.priority());
+  DCHECK_GT(num_tasks_per_priority_[priority_index], 0U);
+  --num_tasks_per_priority_[priority_index];
+
+  return std::move(queue_.front());
+}
+
+TaskTraits Sequence::PeekTaskTraits() const {
+  AutoSchedulerLock auto_lock(lock_);
+  DCHECK(!queue_.empty());
+  DCHECK(queue_.front().task);
+  return queue_.front().traits;
+}
+
+bool Sequence::Pop() {
+  AutoSchedulerLock auto_lock(lock_);
+  DCHECK(!queue_.empty());
+  DCHECK(!queue_.front().task);
+  queue_.pop();
+  return queue_.empty();
+}
+
+SequenceSortKey Sequence::GetSortKey() const {
+  TaskPriority priority = TaskPriority::LOWEST;
+  base::TimeTicks next_task_sequenced_time;
+
+  {
+    AutoSchedulerLock auto_lock(lock_);
+    DCHECK(!queue_.empty());
+
+    // Find the highest task priority in the sequence.
+    const int highest_priority_index = static_cast<int>(TaskPriority::HIGHEST);
+    const int lowest_priority_index = static_cast<int>(TaskPriority::LOWEST);
+    for (int i = highest_priority_index; i > lowest_priority_index; --i) {
+      if (num_tasks_per_priority_[i] > 0) {
+        priority = static_cast<TaskPriority>(i);
+        break;
+      }
+    }
+
+    // Save the sequenced time of the next task in the sequence.
+    next_task_sequenced_time = queue_.front().sequenced_time;
+  }
+
+  return SequenceSortKey(priority, next_task_sequenced_time);
+}
+
+Sequence::~Sequence() = default;
+
+}  // namespace internal
+}  // namespace base
diff --git a/base/task_scheduler/sequence.h b/base/task_scheduler/sequence.h
new file mode 100644
index 0000000..ec5e8c1
--- /dev/null
+++ b/base/task_scheduler/sequence.h
@@ -0,0 +1,107 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TASK_SCHEDULER_SEQUENCE_H_
+#define BASE_TASK_SCHEDULER_SEQUENCE_H_
+
+#include <stddef.h>
+
+#include "base/base_export.h"
+#include "base/containers/queue.h"
+#include "base/macros.h"
+#include "base/memory/ref_counted.h"
+#include "base/optional.h"
+#include "base/sequence_token.h"
+#include "base/task_scheduler/scheduler_lock.h"
+#include "base/task_scheduler/sequence_sort_key.h"
+#include "base/task_scheduler/task.h"
+#include "base/task_scheduler/task_traits.h"
+#include "base/threading/sequence_local_storage_map.h"
+
+namespace base {
+namespace internal {
+
+// A Sequence holds slots each containing up to a single Task that must be
+// executed in posting order.
+//
+// In comments below, an "empty Sequence" is a Sequence with no slot.
+//
+// Note: there is a known refcounted-ownership cycle in the Scheduler
+// architecture: Sequence -> Task -> TaskRunner -> Sequence -> ...
+// This is okay so long as the other owners of Sequence (PriorityQueue and
+// SchedulerWorker in alternation and
+// SchedulerWorkerPoolImpl::SchedulerWorkerDelegateImpl::GetWork()
+// temporarily) keep running it (and taking Tasks from it as a result). A
+// dangling reference cycle would only occur should they release their reference
+// to it while it's not empty. In other words, it is only correct for them to
+// release it after PopTask() returns false to indicate it was made empty by
+// that call (in which case the next PushTask() will return true to indicate to
+// the caller that the Sequence should be re-enqueued for execution).
+//
+// This class is thread-safe.
+class BASE_EXPORT Sequence : public RefCountedThreadSafe<Sequence> {
+ public:
+  Sequence();
+
+  // Adds |task| in a new slot at the end of the Sequence. Returns true if the
+  // Sequence was empty before this operation.
+  bool PushTask(Task task);
+
+  // Transfers ownership of the Task in the front slot of the Sequence to the
+  // caller. The front slot of the Sequence will be nullptr and remain until
+  // Pop(). Cannot be called on an empty Sequence or a Sequence whose front slot
+  // is already nullptr.
+  //
+  // Because this method cannot be called on an empty Sequence, the returned
+  // Optional<Task> is never nullptr. An Optional is used in preparation for the
+  // merge between TaskScheduler and TaskQueueManager (in Blink).
+  // https://crbug.com/783309
+  Optional<Task> TakeTask();
+
+  // Returns the TaskTraits of the Task in front of the Sequence. Cannot be
+  // called on an empty Sequence or on a Sequence whose front slot is empty.
+  TaskTraits PeekTaskTraits() const;
+
+  // Removes the front slot of the Sequence. The front slot must have been
+  // emptied by TakeTask() before this is called. Cannot be called on an empty
+  // Sequence. Returns true if the Sequence is empty after this operation.
+  bool Pop();
+
+  // Returns a SequenceSortKey representing the priority of the Sequence. Cannot
+  // be called on an empty Sequence.
+  SequenceSortKey GetSortKey() const;
+
+  // Returns a token that uniquely identifies this Sequence.
+  const SequenceToken& token() const { return token_; }
+
+  SequenceLocalStorageMap* sequence_local_storage() {
+    return &sequence_local_storage_;
+  }
+
+ private:
+  friend class RefCountedThreadSafe<Sequence>;
+  ~Sequence();
+
+  const SequenceToken token_ = SequenceToken::Create();
+
+  // Synchronizes access to all members.
+  mutable SchedulerLock lock_;
+
+  // Queue of tasks to execute.
+  base::queue<Task> queue_;
+
+  // Number of tasks contained in the Sequence for each priority.
+  size_t num_tasks_per_priority_[static_cast<int>(TaskPriority::HIGHEST) + 1] =
+      {};
+
+  // Holds data stored through the SequenceLocalStorageSlot API.
+  SequenceLocalStorageMap sequence_local_storage_;
+
+  DISALLOW_COPY_AND_ASSIGN(Sequence);
+};
+
+}  // namespace internal
+}  // namespace base
+
+#endif  // BASE_TASK_SCHEDULER_SEQUENCE_H_
diff --git a/base/task_scheduler/sequence_sort_key.cc b/base/task_scheduler/sequence_sort_key.cc
new file mode 100644
index 0000000..e356c8b
--- /dev/null
+++ b/base/task_scheduler/sequence_sort_key.cc
@@ -0,0 +1,29 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/task_scheduler/sequence_sort_key.h"
+
+namespace base {
+namespace internal {
+
+SequenceSortKey::SequenceSortKey(TaskPriority priority,
+                                 TimeTicks next_task_sequenced_time)
+    : priority_(priority),
+      next_task_sequenced_time_(next_task_sequenced_time) {}
+
+bool SequenceSortKey::operator<(const SequenceSortKey& other) const {
+  // This SequenceSortKey is considered less important than |other| if it has a
+  // lower priority or if it has the same priority but its next task was posted
+  // later than |other|'s.
+  const int priority_diff =
+      static_cast<int>(priority_) - static_cast<int>(other.priority_);
+  if (priority_diff < 0)
+    return true;
+  if (priority_diff > 0)
+    return false;
+  return next_task_sequenced_time_ > other.next_task_sequenced_time_;
+}
+
+}  // namespace internal
+}  // namespace base
diff --git a/base/task_scheduler/sequence_sort_key.h b/base/task_scheduler/sequence_sort_key.h
new file mode 100644
index 0000000..2e126c5
--- /dev/null
+++ b/base/task_scheduler/sequence_sort_key.h
@@ -0,0 +1,52 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TASK_SCHEDULER_SEQUENCE_SORT_KEY_H_
+#define BASE_TASK_SCHEDULER_SEQUENCE_SORT_KEY_H_
+
+#include "base/base_export.h"
+#include "base/task_scheduler/task_traits.h"
+#include "base/time/time.h"
+
+namespace base {
+namespace internal {
+
+// An immutable but assignable representation of the priority of a Sequence.
+class BASE_EXPORT SequenceSortKey final {
+ public:
+  SequenceSortKey(TaskPriority priority, TimeTicks next_task_sequenced_time);
+
+  TaskPriority priority() const { return priority_; }
+  TimeTicks next_task_sequenced_time() const {
+    return next_task_sequenced_time_;
+  }
+
+  bool operator<(const SequenceSortKey& other) const;
+  bool operator>(const SequenceSortKey& other) const { return other < *this; }
+
+  bool operator==(const SequenceSortKey& other) const {
+    return priority_ == other.priority_ &&
+           next_task_sequenced_time_ == other.next_task_sequenced_time_;
+  }
+  bool operator!=(const SequenceSortKey& other) const {
+    return !(other == *this);
+  };
+
+ private:
+  // The private section allows this class to keep its immutable property while
+  // being copy-assignable (i.e. instead of making its members const).
+
+  // Highest task priority in the sequence at the time this sort key was
+  // created.
+  TaskPriority priority_;
+
+  // Sequenced time of the next task to run in the sequence at the time this
+  // sort key was created.
+  TimeTicks next_task_sequenced_time_;
+};
+
+}  // namespace internal
+}  // namespace base
+
+#endif  // BASE_TASK_SCHEDULER_SEQUENCE_SORT_KEY_H_
diff --git a/base/task_scheduler/sequence_sort_key_unittest.cc b/base/task_scheduler/sequence_sort_key_unittest.cc
new file mode 100644
index 0000000..2c1d80d
--- /dev/null
+++ b/base/task_scheduler/sequence_sort_key_unittest.cc
@@ -0,0 +1,243 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/task_scheduler/sequence_sort_key.h"
+
+#include "base/task_scheduler/task_traits.h"
+#include "base/time/time.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+namespace internal {
+
+TEST(TaskSchedulerSequenceSortKeyTest, OperatorLessThan) {
+  SequenceSortKey key_a(TaskPriority::USER_BLOCKING,
+                        TimeTicks::FromInternalValue(1000));
+  SequenceSortKey key_b(TaskPriority::USER_BLOCKING,
+                        TimeTicks::FromInternalValue(2000));
+  SequenceSortKey key_c(TaskPriority::USER_VISIBLE,
+                        TimeTicks::FromInternalValue(1000));
+  SequenceSortKey key_d(TaskPriority::USER_VISIBLE,
+                        TimeTicks::FromInternalValue(2000));
+  SequenceSortKey key_e(TaskPriority::BACKGROUND,
+                        TimeTicks::FromInternalValue(1000));
+  SequenceSortKey key_f(TaskPriority::BACKGROUND,
+                        TimeTicks::FromInternalValue(2000));
+
+  EXPECT_FALSE(key_a < key_a);
+  EXPECT_LT(key_b, key_a);
+  EXPECT_LT(key_c, key_a);
+  EXPECT_LT(key_d, key_a);
+  EXPECT_LT(key_e, key_a);
+  EXPECT_LT(key_f, key_a);
+
+  EXPECT_FALSE(key_a < key_b);
+  EXPECT_FALSE(key_b < key_b);
+  EXPECT_LT(key_c, key_b);
+  EXPECT_LT(key_d, key_b);
+  EXPECT_LT(key_e, key_b);
+  EXPECT_LT(key_f, key_b);
+
+  EXPECT_FALSE(key_a < key_c);
+  EXPECT_FALSE(key_b < key_c);
+  EXPECT_FALSE(key_c < key_c);
+  EXPECT_LT(key_d, key_c);
+  EXPECT_LT(key_e, key_c);
+  EXPECT_LT(key_f, key_c);
+
+  EXPECT_FALSE(key_a < key_d);
+  EXPECT_FALSE(key_b < key_d);
+  EXPECT_FALSE(key_c < key_d);
+  EXPECT_FALSE(key_d < key_d);
+  EXPECT_LT(key_e, key_d);
+  EXPECT_LT(key_f, key_d);
+
+  EXPECT_FALSE(key_a < key_e);
+  EXPECT_FALSE(key_b < key_e);
+  EXPECT_FALSE(key_c < key_e);
+  EXPECT_FALSE(key_d < key_e);
+  EXPECT_FALSE(key_e < key_e);
+  EXPECT_LT(key_f, key_e);
+
+  EXPECT_FALSE(key_a < key_f);
+  EXPECT_FALSE(key_b < key_f);
+  EXPECT_FALSE(key_c < key_f);
+  EXPECT_FALSE(key_d < key_f);
+  EXPECT_FALSE(key_e < key_f);
+  EXPECT_FALSE(key_f < key_f);
+}
+
+TEST(TaskSchedulerSequenceSortKeyTest, OperatorGreaterThan) {
+  SequenceSortKey key_a(TaskPriority::USER_BLOCKING,
+                        TimeTicks::FromInternalValue(1000));
+  SequenceSortKey key_b(TaskPriority::USER_BLOCKING,
+                        TimeTicks::FromInternalValue(2000));
+  SequenceSortKey key_c(TaskPriority::USER_VISIBLE,
+                        TimeTicks::FromInternalValue(1000));
+  SequenceSortKey key_d(TaskPriority::USER_VISIBLE,
+                        TimeTicks::FromInternalValue(2000));
+  SequenceSortKey key_e(TaskPriority::BACKGROUND,
+                        TimeTicks::FromInternalValue(1000));
+  SequenceSortKey key_f(TaskPriority::BACKGROUND,
+                        TimeTicks::FromInternalValue(2000));
+
+  EXPECT_FALSE(key_a > key_a);
+  EXPECT_FALSE(key_b > key_a);
+  EXPECT_FALSE(key_c > key_a);
+  EXPECT_FALSE(key_d > key_a);
+  EXPECT_FALSE(key_e > key_a);
+  EXPECT_FALSE(key_f > key_a);
+
+  EXPECT_GT(key_a, key_b);
+  EXPECT_FALSE(key_b > key_b);
+  EXPECT_FALSE(key_c > key_b);
+  EXPECT_FALSE(key_d > key_b);
+  EXPECT_FALSE(key_e > key_b);
+  EXPECT_FALSE(key_f > key_b);
+
+  EXPECT_GT(key_a, key_c);
+  EXPECT_GT(key_b, key_c);
+  EXPECT_FALSE(key_c > key_c);
+  EXPECT_FALSE(key_d > key_c);
+  EXPECT_FALSE(key_e > key_c);
+  EXPECT_FALSE(key_f > key_c);
+
+  EXPECT_GT(key_a, key_d);
+  EXPECT_GT(key_b, key_d);
+  EXPECT_GT(key_c, key_d);
+  EXPECT_FALSE(key_d > key_d);
+  EXPECT_FALSE(key_e > key_d);
+  EXPECT_FALSE(key_f > key_d);
+
+  EXPECT_GT(key_a, key_e);
+  EXPECT_GT(key_b, key_e);
+  EXPECT_GT(key_c, key_e);
+  EXPECT_GT(key_d, key_e);
+  EXPECT_FALSE(key_e > key_e);
+  EXPECT_FALSE(key_f > key_e);
+
+  EXPECT_GT(key_a, key_f);
+  EXPECT_GT(key_b, key_f);
+  EXPECT_GT(key_c, key_f);
+  EXPECT_GT(key_d, key_f);
+  EXPECT_GT(key_e, key_f);
+  EXPECT_FALSE(key_f > key_f);
+}
+
+TEST(TaskSchedulerSequenceSortKeyTest, OperatorEqual) {
+  SequenceSortKey key_a(TaskPriority::USER_BLOCKING,
+                        TimeTicks::FromInternalValue(1000));
+  SequenceSortKey key_b(TaskPriority::USER_BLOCKING,
+                        TimeTicks::FromInternalValue(2000));
+  SequenceSortKey key_c(TaskPriority::USER_VISIBLE,
+                        TimeTicks::FromInternalValue(1000));
+  SequenceSortKey key_d(TaskPriority::USER_VISIBLE,
+                        TimeTicks::FromInternalValue(2000));
+  SequenceSortKey key_e(TaskPriority::BACKGROUND,
+                        TimeTicks::FromInternalValue(1000));
+  SequenceSortKey key_f(TaskPriority::BACKGROUND,
+                        TimeTicks::FromInternalValue(2000));
+
+  EXPECT_EQ(key_a, key_a);
+  EXPECT_FALSE(key_b == key_a);
+  EXPECT_FALSE(key_c == key_a);
+  EXPECT_FALSE(key_d == key_a);
+  EXPECT_FALSE(key_e == key_a);
+  EXPECT_FALSE(key_f == key_a);
+
+  EXPECT_FALSE(key_a == key_b);
+  EXPECT_EQ(key_b, key_b);
+  EXPECT_FALSE(key_c == key_b);
+  EXPECT_FALSE(key_d == key_b);
+  EXPECT_FALSE(key_e == key_b);
+  EXPECT_FALSE(key_f == key_b);
+
+  EXPECT_FALSE(key_a == key_c);
+  EXPECT_FALSE(key_b == key_c);
+  EXPECT_EQ(key_c, key_c);
+  EXPECT_FALSE(key_d == key_c);
+  EXPECT_FALSE(key_e == key_c);
+  EXPECT_FALSE(key_f == key_c);
+
+  EXPECT_FALSE(key_a == key_d);
+  EXPECT_FALSE(key_b == key_d);
+  EXPECT_FALSE(key_c == key_d);
+  EXPECT_EQ(key_d, key_d);
+  EXPECT_FALSE(key_e == key_d);
+  EXPECT_FALSE(key_f == key_d);
+
+  EXPECT_FALSE(key_a == key_e);
+  EXPECT_FALSE(key_b == key_e);
+  EXPECT_FALSE(key_c == key_e);
+  EXPECT_FALSE(key_d == key_e);
+  EXPECT_EQ(key_e, key_e);
+  EXPECT_FALSE(key_f == key_e);
+
+  EXPECT_FALSE(key_a == key_f);
+  EXPECT_FALSE(key_b == key_f);
+  EXPECT_FALSE(key_c == key_f);
+  EXPECT_FALSE(key_d == key_f);
+  EXPECT_FALSE(key_e == key_f);
+  EXPECT_EQ(key_f, key_f);
+}
+
+TEST(TaskSchedulerSequenceSortKeyTest, OperatorNotEqual) {
+  SequenceSortKey key_a(TaskPriority::USER_BLOCKING,
+                        TimeTicks::FromInternalValue(1000));
+  SequenceSortKey key_b(TaskPriority::USER_BLOCKING,
+                        TimeTicks::FromInternalValue(2000));
+  SequenceSortKey key_c(TaskPriority::USER_VISIBLE,
+                        TimeTicks::FromInternalValue(1000));
+  SequenceSortKey key_d(TaskPriority::USER_VISIBLE,
+                        TimeTicks::FromInternalValue(2000));
+  SequenceSortKey key_e(TaskPriority::BACKGROUND,
+                        TimeTicks::FromInternalValue(1000));
+  SequenceSortKey key_f(TaskPriority::BACKGROUND,
+                        TimeTicks::FromInternalValue(2000));
+
+  EXPECT_FALSE(key_a != key_a);
+  EXPECT_NE(key_b, key_a);
+  EXPECT_NE(key_c, key_a);
+  EXPECT_NE(key_d, key_a);
+  EXPECT_NE(key_e, key_a);
+  EXPECT_NE(key_f, key_a);
+
+  EXPECT_NE(key_a, key_b);
+  EXPECT_FALSE(key_b != key_b);
+  EXPECT_NE(key_c, key_b);
+  EXPECT_NE(key_d, key_b);
+  EXPECT_NE(key_e, key_b);
+  EXPECT_NE(key_f, key_b);
+
+  EXPECT_NE(key_a, key_c);
+  EXPECT_NE(key_b, key_c);
+  EXPECT_FALSE(key_c != key_c);
+  EXPECT_NE(key_d, key_c);
+  EXPECT_NE(key_e, key_c);
+  EXPECT_NE(key_f, key_c);
+
+  EXPECT_NE(key_a, key_d);
+  EXPECT_NE(key_b, key_d);
+  EXPECT_NE(key_c, key_d);
+  EXPECT_FALSE(key_d != key_d);
+  EXPECT_NE(key_e, key_d);
+  EXPECT_NE(key_f, key_d);
+
+  EXPECT_NE(key_a, key_e);
+  EXPECT_NE(key_b, key_e);
+  EXPECT_NE(key_c, key_e);
+  EXPECT_NE(key_d, key_e);
+  EXPECT_FALSE(key_e != key_e);
+  EXPECT_NE(key_f, key_e);
+
+  EXPECT_NE(key_a, key_f);
+  EXPECT_NE(key_b, key_f);
+  EXPECT_NE(key_c, key_f);
+  EXPECT_NE(key_d, key_f);
+  EXPECT_NE(key_e, key_f);
+  EXPECT_FALSE(key_f != key_f);
+}
+
+}  // namespace internal
+}  // namespace base
diff --git a/base/task_scheduler/sequence_unittest.cc b/base/task_scheduler/sequence_unittest.cc
new file mode 100644
index 0000000..86d1547
--- /dev/null
+++ b/base/task_scheduler/sequence_unittest.cc
@@ -0,0 +1,172 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/task_scheduler/sequence.h"
+
+#include <utility>
+
+#include "base/bind.h"
+#include "base/bind_helpers.h"
+#include "base/memory/ptr_util.h"
+#include "base/test/gtest_util.h"
+#include "base/time/time.h"
+#include "testing/gmock/include/gmock/gmock.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+namespace internal {
+
+namespace {
+
+class MockTask {
+ public:
+  MOCK_METHOD0(Run, void());
+};
+
+Task CreateTask(MockTask* mock_task) {
+  return Task(FROM_HERE, BindOnce(&MockTask::Run, Unretained(mock_task)),
+              {TaskPriority::BACKGROUND}, TimeDelta());
+}
+
+void ExpectMockTask(MockTask* mock_task, Task* task) {
+  EXPECT_CALL(*mock_task, Run());
+  std::move(task->task).Run();
+  testing::Mock::VerifyAndClear(mock_task);
+}
+
+}  // namespace
+
+TEST(TaskSchedulerSequenceTest, PushTakeRemove) {
+  testing::StrictMock<MockTask> mock_task_a;
+  testing::StrictMock<MockTask> mock_task_b;
+  testing::StrictMock<MockTask> mock_task_c;
+  testing::StrictMock<MockTask> mock_task_d;
+  testing::StrictMock<MockTask> mock_task_e;
+
+  scoped_refptr<Sequence> sequence = MakeRefCounted<Sequence>();
+
+  // Push task A in the sequence. PushTask() should return true since it's the
+  // first task->
+  EXPECT_TRUE(sequence->PushTask(CreateTask(&mock_task_a)));
+
+  // Push task B, C and D in the sequence. PushTask() should return false since
+  // there is already a task in a sequence.
+  EXPECT_FALSE(sequence->PushTask(CreateTask(&mock_task_b)));
+  EXPECT_FALSE(sequence->PushTask(CreateTask(&mock_task_c)));
+  EXPECT_FALSE(sequence->PushTask(CreateTask(&mock_task_d)));
+
+  // Take the task in front of the sequence. It should be task A.
+  Optional<Task> task = sequence->TakeTask();
+  ExpectMockTask(&mock_task_a, &task.value());
+  EXPECT_FALSE(task->sequenced_time.is_null());
+
+  // Remove the empty slot. Task B should now be in front.
+  EXPECT_FALSE(sequence->Pop());
+  task = sequence->TakeTask();
+  ExpectMockTask(&mock_task_b, &task.value());
+  EXPECT_FALSE(task->sequenced_time.is_null());
+
+  // Remove the empty slot. Task C should now be in front.
+  EXPECT_FALSE(sequence->Pop());
+  task = sequence->TakeTask();
+  ExpectMockTask(&mock_task_c, &task.value());
+  EXPECT_FALSE(task->sequenced_time.is_null());
+
+  // Remove the empty slot.
+  EXPECT_FALSE(sequence->Pop());
+
+  // Push task E in the sequence.
+  EXPECT_FALSE(sequence->PushTask(CreateTask(&mock_task_e)));
+
+  // Task D should be in front.
+  task = sequence->TakeTask();
+  ExpectMockTask(&mock_task_d, &task.value());
+  EXPECT_FALSE(task->sequenced_time.is_null());
+
+  // Remove the empty slot. Task E should now be in front.
+  EXPECT_FALSE(sequence->Pop());
+  task = sequence->TakeTask();
+  ExpectMockTask(&mock_task_e, &task.value());
+  EXPECT_FALSE(task->sequenced_time.is_null());
+
+  // Remove the empty slot. The sequence should now be empty.
+  EXPECT_TRUE(sequence->Pop());
+}
+
+// Verifies the sort key of a sequence that contains one BACKGROUND task.
+TEST(TaskSchedulerSequenceTest, GetSortKeyBackground) {
+  // Create a sequence with a BACKGROUND task.
+  Task background_task(FROM_HERE, DoNothing(), {TaskPriority::BACKGROUND},
+                       TimeDelta());
+  scoped_refptr<Sequence> background_sequence = MakeRefCounted<Sequence>();
+  background_sequence->PushTask(std::move(background_task));
+
+  // Get the sort key.
+  const SequenceSortKey background_sort_key = background_sequence->GetSortKey();
+
+  // Take the task from the sequence, so that its sequenced time is available
+  // for the check below.
+  auto take_background_task = background_sequence->TakeTask();
+
+  // Verify the sort key.
+  EXPECT_EQ(TaskPriority::BACKGROUND, background_sort_key.priority());
+  EXPECT_EQ(take_background_task->sequenced_time,
+            background_sort_key.next_task_sequenced_time());
+
+  // Pop for correctness.
+  background_sequence->Pop();
+}
+
+// Same as TaskSchedulerSequenceTest.GetSortKeyBackground, but with a
+// USER_VISIBLE task.
+TEST(TaskSchedulerSequenceTest, GetSortKeyForeground) {
+  // Create a sequence with a USER_VISIBLE task.
+  Task foreground_task(FROM_HERE, DoNothing(), {TaskPriority::USER_VISIBLE},
+                       TimeDelta());
+  scoped_refptr<Sequence> foreground_sequence = MakeRefCounted<Sequence>();
+  foreground_sequence->PushTask(std::move(foreground_task));
+
+  // Get the sort key.
+  const SequenceSortKey foreground_sort_key = foreground_sequence->GetSortKey();
+
+  // Take the task from the sequence, so that its sequenced time is available
+  // for the check below.
+  auto take_foreground_task = foreground_sequence->TakeTask();
+
+  // Verify the sort key.
+  EXPECT_EQ(TaskPriority::USER_VISIBLE, foreground_sort_key.priority());
+  EXPECT_EQ(take_foreground_task->sequenced_time,
+            foreground_sort_key.next_task_sequenced_time());
+
+  // Pop for correctness.
+  foreground_sequence->Pop();
+}
+
+// Verify that a DCHECK fires if Pop() is called on a sequence whose front slot
+// isn't empty.
+TEST(TaskSchedulerSequenceTest, PopNonEmptyFrontSlot) {
+  scoped_refptr<Sequence> sequence = MakeRefCounted<Sequence>();
+  sequence->PushTask(Task(FROM_HERE, DoNothing(), TaskTraits(), TimeDelta()));
+
+  EXPECT_DCHECK_DEATH({ sequence->Pop(); });
+}
+
+// Verify that a DCHECK fires if TakeTask() is called on a sequence whose front
+// slot is empty.
+TEST(TaskSchedulerSequenceTest, TakeEmptyFrontSlot) {
+  scoped_refptr<Sequence> sequence = MakeRefCounted<Sequence>();
+  sequence->PushTask(Task(FROM_HERE, DoNothing(), TaskTraits(), TimeDelta()));
+
+  EXPECT_TRUE(sequence->TakeTask());
+  EXPECT_DCHECK_DEATH({ sequence->TakeTask(); });
+}
+
+// Verify that a DCHECK fires if TakeTask() is called on an empty sequence.
+TEST(TaskSchedulerSequenceTest, TakeEmptySequence) {
+  scoped_refptr<Sequence> sequence = MakeRefCounted<Sequence>();
+  EXPECT_DCHECK_DEATH({ sequence->TakeTask(); });
+}
+
+}  // namespace internal
+}  // namespace base
diff --git a/base/task_scheduler/service_thread.cc b/base/task_scheduler/service_thread.cc
new file mode 100644
index 0000000..40f217f
--- /dev/null
+++ b/base/task_scheduler/service_thread.cc
@@ -0,0 +1,53 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/task_scheduler/service_thread.h"
+
+#include "base/debug/alias.h"
+#include "base/task_scheduler/post_task.h"
+#include "base/task_scheduler/task_tracker.h"
+#include "base/task_scheduler/task_traits.h"
+#include "base/time/time.h"
+
+namespace base {
+namespace internal {
+
+ServiceThread::ServiceThread(const TaskTracker* task_tracker)
+    : Thread("TaskSchedulerServiceThread"), task_tracker_(task_tracker) {}
+
+void ServiceThread::Init() {
+  if (task_tracker_) {
+    heartbeat_latency_timer_.Start(
+        FROM_HERE, TimeDelta::FromSeconds(5),
+        BindRepeating(&ServiceThread::PerformHeartbeatLatencyReport,
+                      Unretained(this)));
+  }
+}
+
+NOINLINE void ServiceThread::Run(RunLoop* run_loop) {
+  const int line_number = __LINE__;
+  Thread::Run(run_loop);
+  base::debug::Alias(&line_number);
+}
+
+void ServiceThread::PerformHeartbeatLatencyReport() const {
+  static constexpr TaskTraits kReportedTraits[] = {
+      {TaskPriority::BACKGROUND},    {TaskPriority::BACKGROUND, MayBlock()},
+      {TaskPriority::USER_VISIBLE},  {TaskPriority::USER_VISIBLE, MayBlock()},
+      {TaskPriority::USER_BLOCKING}, {TaskPriority::USER_BLOCKING, MayBlock()}};
+
+  for (auto& traits : kReportedTraits) {
+    // Post through the static API to time the full stack. Use a new Now() for
+    // every set of traits in case PostTaskWithTraits() itself is slow.
+    base::PostTaskWithTraits(
+        FROM_HERE, traits,
+        BindOnce(&TaskTracker::RecordLatencyHistogram,
+                 Unretained(task_tracker_),
+                 TaskTracker::LatencyHistogramType::HEARTBEAT_LATENCY, traits,
+                 TimeTicks::Now()));
+  }
+}
+
+}  // namespace internal
+}  // namespace base
diff --git a/base/task_scheduler/service_thread.h b/base/task_scheduler/service_thread.h
new file mode 100644
index 0000000..f9b23fa
--- /dev/null
+++ b/base/task_scheduler/service_thread.h
@@ -0,0 +1,54 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TASK_SCHEDULER_SERVICE_THREAD_H_
+#define BASE_TASK_SCHEDULER_SERVICE_THREAD_H_
+
+#include "base/base_export.h"
+#include "base/macros.h"
+#include "base/threading/thread.h"
+#include "base/timer/timer.h"
+
+namespace base {
+namespace internal {
+
+class TaskTracker;
+
+// The TaskScheduler's ServiceThread is a mostly idle thread that is responsible
+// for handling async events (e.g. delayed tasks and async I/O). Its role is to
+// merely forward such events to their destination (hence staying mostly idle
+// and highly responsive).
+// It aliases Thread::Run() to enforce that ServiceThread::Run() be on the stack
+// and make it easier to identify the service thread in stack traces.
+class BASE_EXPORT ServiceThread : public Thread {
+ public:
+  // Constructs a ServiceThread which will report latency metrics through
+  // |task_tracker| if non-null. In that case, this ServiceThread will assume a
+  // registered TaskScheduler instance and that |task_tracker| will outlive this
+  // ServiceThread.
+  explicit ServiceThread(const TaskTracker* task_tracker);
+
+ private:
+  // Thread:
+  void Init() override;
+  void Run(RunLoop* run_loop) override;
+
+  // Kicks off async tasks which will record a histogram on the latency of
+  // various traits.
+  void PerformHeartbeatLatencyReport() const;
+
+  const TaskTracker* const task_tracker_;
+
+  // Fires a recurring heartbeat task to record latency histograms which are
+  // independent from any execution sequence. This is done on the service thread
+  // to avoid all external dependencies (even main thread).
+  base::RepeatingTimer heartbeat_latency_timer_;
+
+  DISALLOW_COPY_AND_ASSIGN(ServiceThread);
+};
+
+}  // namespace internal
+}  // namespace base
+
+#endif  // BASE_TASK_SCHEDULER_SERVICE_THREAD_H_
diff --git a/base/task_scheduler/service_thread_unittest.cc b/base/task_scheduler/service_thread_unittest.cc
new file mode 100644
index 0000000..9f61f9b
--- /dev/null
+++ b/base/task_scheduler/service_thread_unittest.cc
@@ -0,0 +1,96 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/task_scheduler/service_thread.h"
+
+#include <string>
+
+#include "base/bind.h"
+#include "base/debug/stack_trace.h"
+#include "base/task_scheduler/task_scheduler.h"
+#include "base/task_scheduler/task_scheduler_impl.h"
+#include "base/test/histogram_tester.h"
+#include "base/threading/platform_thread.h"
+#include "base/time/time.h"
+#include "build/build_config.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+namespace internal {
+
+namespace {
+
+// Verifies that |query| is found on the current stack. Ignores failures if this
+// configuration doesn't have symbols.
+void VerifyHasStringOnStack(const std::string& query) {
+  const std::string stack = debug::StackTrace().ToString();
+  SCOPED_TRACE(stack);
+  const bool found_on_stack = stack.find(query) != std::string::npos;
+  const bool stack_has_symbols =
+      stack.find("SchedulerWorker") != std::string::npos;
+  EXPECT_TRUE(found_on_stack || !stack_has_symbols) << query;
+}
+
+}  // namespace
+
+#if defined(OS_POSIX)
+// Many POSIX bots flakily crash on |debug::StackTrace().ToString()|,
+// https://crbug.com/840429.
+#define MAYBE_StackHasIdentifyingFrame DISABLED_StackHasIdentifyingFrame
+#else
+#define MAYBE_StackHasIdentifyingFrame StackHasIdentifyingFrame
+#endif
+
+TEST(TaskSchedulerServiceThreadTest, MAYBE_StackHasIdentifyingFrame) {
+  ServiceThread service_thread(nullptr);
+  service_thread.Start();
+
+  service_thread.task_runner()->PostTask(
+      FROM_HERE, BindOnce(&VerifyHasStringOnStack, "ServiceThread"));
+
+  service_thread.FlushForTesting();
+}
+
+// Integration test verifying that a service thread running in a fully
+// integrated TaskScheduler environment results in reporting
+// HeartbeatLatencyMicroseconds metrics.
+TEST(TaskSchedulerServiceThreadIntegrationTest, HeartbeatLatencyReport) {
+  TaskScheduler::SetInstance(
+      std::make_unique<internal::TaskSchedulerImpl>("Test"));
+  TaskScheduler::GetInstance()->StartWithDefaultParams();
+
+  static constexpr const char* kExpectedMetrics[] = {
+      "TaskScheduler.HeartbeatLatencyMicroseconds.Test."
+      "UserBlockingTaskPriority",
+      "TaskScheduler.HeartbeatLatencyMicroseconds.Test."
+      "UserBlockingTaskPriority_MayBlock",
+      "TaskScheduler.HeartbeatLatencyMicroseconds.Test."
+      "UserVisibleTaskPriority",
+      "TaskScheduler.HeartbeatLatencyMicroseconds.Test."
+      "UserVisibleTaskPriority_MayBlock",
+      "TaskScheduler.HeartbeatLatencyMicroseconds.Test."
+      "BackgroundTaskPriority",
+      "TaskScheduler.HeartbeatLatencyMicroseconds.Test."
+      "BackgroundTaskPriority_MayBlock"};
+
+  constexpr TimeDelta kReasonableTimeout = TimeDelta::FromSeconds(6);
+  constexpr TimeDelta kBusyWaitTime = TimeDelta::FromMilliseconds(100);
+
+  const TimeTicks start_time = TimeTicks::Now();
+
+  HistogramTester tester;
+  for (const char* expected_metric : kExpectedMetrics) {
+    for (int i = 0; tester.GetAllSamples(expected_metric).empty(); ++i) {
+      if (TimeTicks::Now() - start_time > kReasonableTimeout)
+        LOG(WARNING) << "Waiting a while for " << expected_metric;
+      PlatformThread::Sleep(kBusyWaitTime);
+    }
+  }
+
+  TaskScheduler::GetInstance()->JoinForTesting();
+  TaskScheduler::SetInstance(nullptr);
+}
+
+}  // namespace internal
+}  // namespace base
diff --git a/base/task_scheduler/single_thread_task_runner_thread_mode.h b/base/task_scheduler/single_thread_task_runner_thread_mode.h
new file mode 100644
index 0000000..6ed4228
--- /dev/null
+++ b/base/task_scheduler/single_thread_task_runner_thread_mode.h
@@ -0,0 +1,25 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TASK_SCHEDULER_SINGLE_THREAD_TASK_RUNNER_THREAD_MODE_H_
+#define BASE_TASK_SCHEDULER_SINGLE_THREAD_TASK_RUNNER_THREAD_MODE_H_
+
+namespace base {
+
+enum class SingleThreadTaskRunnerThreadMode {
+  // Allow the SingleThreadTaskRunner's thread to be shared with others,
+  // allowing for efficient use of thread resources when this
+  // SingleThreadTaskRunner is idle. This is the default mode and is
+  // recommended for most code.
+  SHARED,
+  // Dedicate a single thread for this SingleThreadTaskRunner. No other tasks
+  // from any other source will run on the thread backing the
+  // SingleThreadTaskRunner. Use sparingly as this reserves an entire thread for
+  // this SingleThreadTaskRunner.
+  DEDICATED,
+};
+
+}  // namespace base
+
+#endif  // BASE_TASK_SCHEDULER_SINGLE_THREAD_TASK_RUNNER_THREAD_MODE_H_
diff --git a/base/task_scheduler/task.cc b/base/task_scheduler/task.cc
new file mode 100644
index 0000000..563bb1e
--- /dev/null
+++ b/base/task_scheduler/task.cc
@@ -0,0 +1,66 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/task_scheduler/task.h"
+
+#include <utility>
+
+#include "base/atomic_sequence_num.h"
+#include "base/critical_closure.h"
+
+namespace base {
+namespace internal {
+
+namespace {
+
+AtomicSequenceNumber g_sequence_nums_for_tracing;
+
+}  // namespace
+
+Task::Task(const Location& posted_from,
+           OnceClosure task,
+           const TaskTraits& traits,
+           TimeDelta delay)
+    : PendingTask(
+          posted_from,
+          traits.shutdown_behavior() == TaskShutdownBehavior::BLOCK_SHUTDOWN
+              ? MakeCriticalClosure(std::move(task))
+              : std::move(task),
+          delay.is_zero() ? TimeTicks() : TimeTicks::Now() + delay,
+          Nestable::kNonNestable),
+      // Prevent a delayed BLOCK_SHUTDOWN task from blocking shutdown before it
+      // starts running by changing its shutdown behavior to SKIP_ON_SHUTDOWN.
+      traits(
+          (!delay.is_zero() &&
+           traits.shutdown_behavior() == TaskShutdownBehavior::BLOCK_SHUTDOWN)
+              ? TaskTraits::Override(traits,
+                                     {TaskShutdownBehavior::SKIP_ON_SHUTDOWN})
+              : traits),
+      delay(delay) {
+  // TaskScheduler doesn't use |sequence_num| but tracing (toplevel.flow) relies
+  // on it being unique. While this subtle dependency is a bit overreaching,
+  // TaskScheduler is the only task system that doesn't use |sequence_num| and
+  // the dependent code rarely changes so this isn't worth a big change and
+  // faking it here isn't too bad for now (posting tasks is full of atomic ops
+  // already).
+  this->sequence_num = g_sequence_nums_for_tracing.GetNext();
+}
+
+// This should be "= default but MSVC has trouble with "noexcept = default" in
+// this case.
+Task::Task(Task&& other) noexcept
+    : PendingTask(std::move(other)),
+      traits(other.traits),
+      delay(other.delay),
+      sequenced_time(other.sequenced_time),
+      sequenced_task_runner_ref(std::move(other.sequenced_task_runner_ref)),
+      single_thread_task_runner_ref(
+          std::move(other.single_thread_task_runner_ref)) {}
+
+Task::~Task() = default;
+
+Task& Task::operator=(Task&& other) = default;
+
+}  // namespace internal
+}  // namespace base
diff --git a/base/task_scheduler/task.h b/base/task_scheduler/task.h
new file mode 100644
index 0000000..3e937a8
--- /dev/null
+++ b/base/task_scheduler/task.h
@@ -0,0 +1,74 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TASK_SCHEDULER_TASK_H_
+#define BASE_TASK_SCHEDULER_TASK_H_
+
+#include "base/base_export.h"
+#include "base/callback.h"
+#include "base/location.h"
+#include "base/macros.h"
+#include "base/memory/ref_counted.h"
+#include "base/pending_task.h"
+#include "base/sequenced_task_runner.h"
+#include "base/single_thread_task_runner.h"
+#include "base/task_scheduler/task_traits.h"
+#include "base/time/time.h"
+
+namespace base {
+namespace internal {
+
+// A task is a unit of work inside the task scheduler. Support for tracing and
+// profiling inherited from PendingTask.
+struct BASE_EXPORT Task : public PendingTask {
+  // |posted_from| is the site the task was posted from. |task| is the closure
+  // to run. |traits_in| is metadata about the task. |delay| is a delay that
+  // must expire before the Task runs. If |delay| is non-zero and the shutdown
+  // behavior in |traits| is BLOCK_SHUTDOWN, the shutdown behavior is
+  // automatically adjusted to SKIP_ON_SHUTDOWN.
+  Task(const Location& posted_from,
+       OnceClosure task,
+       const TaskTraits& traits,
+       TimeDelta delay);
+
+  // Task is move-only to avoid mistakes that cause reference counts to be
+  // accidentally bumped.
+  Task(Task&& other) noexcept;
+
+  ~Task();
+
+  Task& operator=(Task&& other);
+
+  // The TaskTraits of this task.
+  TaskTraits traits;
+
+  // The delay that must expire before the task runs.
+  TimeDelta delay;
+
+  // The time at which the task was inserted in its sequence. For an undelayed
+  // task, this happens at post time. For a delayed task, this happens some
+  // time after the task's delay has expired. If the task hasn't been inserted
+  // in a sequence yet, this defaults to a null TimeTicks.
+  TimeTicks sequenced_time;
+
+  // A reference to the SequencedTaskRunner or SingleThreadTaskRunner that
+  // posted this task, if any. Used to set ThreadTaskRunnerHandle and/or
+  // SequencedTaskRunnerHandle while the task is running.
+  // Note: this creates an ownership cycle
+  //   Sequence -> Task -> TaskRunner -> Sequence -> ...
+  // but that's okay as it's broken when the Task is popped from its Sequence
+  // after being executed which means this cycle forces the TaskRunner to stick
+  // around until all its tasks have been executed which is a requirement to
+  // support TaskRunnerHandles.
+  scoped_refptr<SequencedTaskRunner> sequenced_task_runner_ref;
+  scoped_refptr<SingleThreadTaskRunner> single_thread_task_runner_ref;
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(Task);
+};
+
+}  // namespace internal
+}  // namespace base
+
+#endif  // BASE_TASK_SCHEDULER_TASK_H_
diff --git a/base/task_scheduler/task_scheduler.cc b/base/task_scheduler/task_scheduler.cc
new file mode 100644
index 0000000..6d20ead
--- /dev/null
+++ b/base/task_scheduler/task_scheduler.cc
@@ -0,0 +1,86 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/task_scheduler/task_scheduler.h"
+
+#include <algorithm>
+
+#include "base/logging.h"
+#include "base/memory/ptr_util.h"
+#include "base/sys_info.h"
+#include "base/task_scheduler/scheduler_worker_pool_params.h"
+#include "base/task_scheduler/task_scheduler_impl.h"
+#include "base/threading/platform_thread.h"
+#include "base/time/time.h"
+
+namespace base {
+
+namespace {
+
+// |g_task_scheduler| is intentionally leaked on shutdown.
+TaskScheduler* g_task_scheduler = nullptr;
+
+}  // namespace
+
+TaskScheduler::InitParams::InitParams(
+    const SchedulerWorkerPoolParams& background_worker_pool_params_in,
+    const SchedulerWorkerPoolParams& background_blocking_worker_pool_params_in,
+    const SchedulerWorkerPoolParams& foreground_worker_pool_params_in,
+    const SchedulerWorkerPoolParams& foreground_blocking_worker_pool_params_in,
+    SharedWorkerPoolEnvironment shared_worker_pool_environment_in)
+    : background_worker_pool_params(background_worker_pool_params_in),
+      background_blocking_worker_pool_params(
+          background_blocking_worker_pool_params_in),
+      foreground_worker_pool_params(foreground_worker_pool_params_in),
+      foreground_blocking_worker_pool_params(
+          foreground_blocking_worker_pool_params_in),
+      shared_worker_pool_environment(shared_worker_pool_environment_in) {}
+
+TaskScheduler::InitParams::~InitParams() = default;
+
+#if !defined(OS_NACL)
+// static
+void TaskScheduler::CreateAndStartWithDefaultParams(StringPiece name) {
+  Create(name);
+  GetInstance()->StartWithDefaultParams();
+}
+
+void TaskScheduler::StartWithDefaultParams() {
+  // Values were chosen so that:
+  // * There are few background threads.
+  // * Background threads never outnumber foreground threads.
+  // * The system is utilized maximally by foreground threads.
+  // * The main thread is assumed to be busy, cap foreground workers at
+  //   |num_cores - 1|.
+  const int num_cores = SysInfo::NumberOfProcessors();
+  constexpr int kBackgroundMaxThreads = 1;
+  constexpr int kBackgroundBlockingMaxThreads = 2;
+  const int kForegroundMaxThreads = std::max(1, num_cores - 1);
+  const int kForegroundBlockingMaxThreads = std::max(2, num_cores - 1);
+
+  constexpr TimeDelta kSuggestedReclaimTime = TimeDelta::FromSeconds(30);
+
+  Start({{kBackgroundMaxThreads, kSuggestedReclaimTime},
+         {kBackgroundBlockingMaxThreads, kSuggestedReclaimTime},
+         {kForegroundMaxThreads, kSuggestedReclaimTime},
+         {kForegroundBlockingMaxThreads, kSuggestedReclaimTime}});
+}
+#endif  // !defined(OS_NACL)
+
+void TaskScheduler::Create(StringPiece name) {
+  SetInstance(std::make_unique<internal::TaskSchedulerImpl>(name));
+}
+
+// static
+void TaskScheduler::SetInstance(std::unique_ptr<TaskScheduler> task_scheduler) {
+  delete g_task_scheduler;
+  g_task_scheduler = task_scheduler.release();
+}
+
+// static
+TaskScheduler* TaskScheduler::GetInstance() {
+  return g_task_scheduler;
+}
+
+}  // namespace base
diff --git a/base/task_scheduler/task_scheduler.h b/base/task_scheduler/task_scheduler.h
new file mode 100644
index 0000000..cb6d097
--- /dev/null
+++ b/base/task_scheduler/task_scheduler.h
@@ -0,0 +1,247 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TASK_SCHEDULER_TASK_SCHEDULER_H_
+#define BASE_TASK_SCHEDULER_TASK_SCHEDULER_H_
+
+#include <memory>
+#include <vector>
+
+#include "base/base_export.h"
+#include "base/callback.h"
+#include "base/gtest_prod_util.h"
+#include "base/memory/ref_counted.h"
+#include "base/sequenced_task_runner.h"
+#include "base/single_thread_task_runner.h"
+#include "base/strings/string_piece.h"
+#include "base/task_runner.h"
+#include "base/task_scheduler/scheduler_worker_pool_params.h"
+#include "base/task_scheduler/single_thread_task_runner_thread_mode.h"
+#include "base/task_scheduler/task_traits.h"
+#include "base/time/time.h"
+#include "build/build_config.h"
+
+namespace gin {
+class V8Platform;
+}
+
+namespace content {
+// Can't use the FRIEND_TEST_ALL_PREFIXES macro because the test is in a
+// different namespace.
+class BrowserMainLoopTest_CreateThreadsInSingleProcess_Test;
+}  // namespace content
+
+namespace base {
+
+class HistogramBase;
+class Location;
+class SchedulerWorkerObserver;
+
+// Interface for a task scheduler and static methods to manage the instance used
+// by the post_task.h API.
+//
+// The task scheduler doesn't create threads until Start() is called. Tasks can
+// be posted at any time but will not run until after Start() is called.
+//
+// The instance methods of this class are thread-safe.
+//
+// Note: All base/task_scheduler users should go through post_task.h instead of
+// TaskScheduler except for the one callsite per process which manages the
+// process's instance.
+class BASE_EXPORT TaskScheduler {
+ public:
+  struct BASE_EXPORT InitParams {
+    enum class SharedWorkerPoolEnvironment {
+      // Use the default environment (no environment).
+      DEFAULT,
+#if defined(OS_WIN)
+      // Place the worker in a COM MTA.
+      COM_MTA,
+#endif  // defined(OS_WIN)
+    };
+
+    InitParams(
+        const SchedulerWorkerPoolParams& background_worker_pool_params_in,
+        const SchedulerWorkerPoolParams&
+            background_blocking_worker_pool_params_in,
+        const SchedulerWorkerPoolParams& foreground_worker_pool_params_in,
+        const SchedulerWorkerPoolParams&
+            foreground_blocking_worker_pool_params_in,
+        SharedWorkerPoolEnvironment shared_worker_pool_environment_in =
+            SharedWorkerPoolEnvironment::DEFAULT);
+    ~InitParams();
+
+    SchedulerWorkerPoolParams background_worker_pool_params;
+    SchedulerWorkerPoolParams background_blocking_worker_pool_params;
+    SchedulerWorkerPoolParams foreground_worker_pool_params;
+    SchedulerWorkerPoolParams foreground_blocking_worker_pool_params;
+    SharedWorkerPoolEnvironment shared_worker_pool_environment;
+  };
+
+  // Destroying a TaskScheduler is not allowed in production; it is always
+  // leaked. In tests, it should only be destroyed after JoinForTesting() has
+  // returned.
+  virtual ~TaskScheduler() = default;
+
+  // Allows the task scheduler to create threads and run tasks following the
+  // |init_params| specification.
+  //
+  // If specified, |scheduler_worker_observer| will be notified when a worker
+  // enters and exits its main function. It must not be destroyed before
+  // JoinForTesting() has returned (must never be destroyed in production).
+  //
+  // CHECKs on failure.
+  virtual void Start(
+      const InitParams& init_params,
+      SchedulerWorkerObserver* scheduler_worker_observer = nullptr) = 0;
+
+  // Posts |task| with a |delay| and specific |traits|. |delay| can be zero.
+  // For one off tasks that don't require a TaskRunner.
+  virtual void PostDelayedTaskWithTraits(const Location& from_here,
+                                         const TaskTraits& traits,
+                                         OnceClosure task,
+                                         TimeDelta delay) = 0;
+
+  // Returns a TaskRunner whose PostTask invocations result in scheduling tasks
+  // using |traits|. Tasks may run in any order and in parallel.
+  virtual scoped_refptr<TaskRunner> CreateTaskRunnerWithTraits(
+      const TaskTraits& traits) = 0;
+
+  // Returns a SequencedTaskRunner whose PostTask invocations result in
+  // scheduling tasks using |traits|. Tasks run one at a time in posting order.
+  virtual scoped_refptr<SequencedTaskRunner>
+  CreateSequencedTaskRunnerWithTraits(const TaskTraits& traits) = 0;
+
+  // Returns a SingleThreadTaskRunner whose PostTask invocations result in
+  // scheduling tasks using |traits|. Tasks run on a single thread in posting
+  // order.
+  virtual scoped_refptr<SingleThreadTaskRunner>
+  CreateSingleThreadTaskRunnerWithTraits(
+      const TaskTraits& traits,
+      SingleThreadTaskRunnerThreadMode thread_mode) = 0;
+
+#if defined(OS_WIN)
+  // Returns a SingleThreadTaskRunner whose PostTask invocations result in
+  // scheduling tasks using |traits| in a COM Single-Threaded Apartment. Tasks
+  // run in the same Single-Threaded Apartment in posting order for the returned
+  // SingleThreadTaskRunner. There is not necessarily a one-to-one
+  // correspondence between SingleThreadTaskRunners and Single-Threaded
+  // Apartments. The implementation is free to share apartments or create new
+  // apartments as necessary. In either case, care should be taken to make sure
+  // COM pointers are not smuggled across apartments.
+  virtual scoped_refptr<SingleThreadTaskRunner>
+  CreateCOMSTATaskRunnerWithTraits(
+      const TaskTraits& traits,
+      SingleThreadTaskRunnerThreadMode thread_mode) = 0;
+#endif  // defined(OS_WIN)
+
+  // Returns a vector of all histograms available in this task scheduler.
+  virtual std::vector<const HistogramBase*> GetHistograms() const = 0;
+
+  // Synchronously shuts down the scheduler. Once this is called, only tasks
+  // posted with the BLOCK_SHUTDOWN behavior will be run. When this returns:
+  // - All SKIP_ON_SHUTDOWN tasks that were already running have completed their
+  //   execution.
+  // - All posted BLOCK_SHUTDOWN tasks have completed their execution.
+  // - CONTINUE_ON_SHUTDOWN tasks might still be running.
+  // Note that an implementation can keep threads and other resources alive to
+  // support running CONTINUE_ON_SHUTDOWN after this returns. This can only be
+  // called once.
+  virtual void Shutdown() = 0;
+
+  // Waits until there are no pending undelayed tasks. May be called in tests
+  // to validate that a condition is met after all undelayed tasks have run.
+  //
+  // Does not wait for delayed tasks. Waits for undelayed tasks posted from
+  // other threads during the call. Returns immediately when shutdown completes.
+  virtual void FlushForTesting() = 0;
+
+  // Returns and calls |flush_callback| when there are no incomplete undelayed
+  // tasks. |flush_callback| may be called back on any thread and should not
+  // perform a lot of work. May be used when additional work on the current
+  // thread needs to be performed during a flush. Only one
+  // FlushAsyncForTesting() may be pending at any given time.
+  virtual void FlushAsyncForTesting(OnceClosure flush_callback) = 0;
+
+  // Joins all threads. Tasks that are already running are allowed to complete
+  // their execution. This can only be called once. Using this task scheduler
+  // instance to create task runners or post tasks is not permitted during or
+  // after this call.
+  virtual void JoinForTesting() = 0;
+
+// CreateAndStartWithDefaultParams(), Create(), and SetInstance() register a
+// TaskScheduler to handle tasks posted through the post_task.h API for this
+// process.
+//
+// Processes that need to initialize TaskScheduler with custom params or that
+// need to allow tasks to be posted before the TaskScheduler creates its
+// threads should use Create() followed by Start(). Other processes can use
+// CreateAndStartWithDefaultParams().
+//
+// A registered TaskScheduler is only deleted when a new TaskScheduler is
+// registered. The last registered TaskScheduler is leaked on shutdown. The
+// methods below must not be called when TaskRunners created by a previous
+// TaskScheduler are still alive. The methods are not thread-safe; proper
+// synchronization is required to use the post_task.h API after registering a
+// new TaskScheduler.
+
+#if !defined(OS_NACL)
+  // Creates and starts a task scheduler using default params. |name| is used to
+  // label histograms, it must not be empty. It should identify the component
+  // that calls this. Start() is called by this method; it is invalid to call it
+  // again afterwards. CHECKs on failure. For tests, prefer
+  // base::test::ScopedTaskEnvironment (ensures isolation).
+  static void CreateAndStartWithDefaultParams(StringPiece name);
+
+  // Same as CreateAndStartWithDefaultParams() but allows callers to split the
+  // Create() and StartWithDefaultParams() calls.
+  void StartWithDefaultParams();
+#endif  // !defined(OS_NACL)
+
+  // Creates a ready to start task scheduler. |name| is used to label
+  // histograms, it must not be empty. It should identify the component that
+  // creates the TaskScheduler. The task scheduler doesn't create threads until
+  // Start() is called. Tasks can be posted at any time but will not run until
+  // after Start() is called. For tests, prefer
+  // base::test::ScopedTaskEnvironment (ensures isolation).
+  static void Create(StringPiece name);
+
+  // Registers |task_scheduler| to handle tasks posted through the post_task.h
+  // API for this process. For tests, prefer base::test::ScopedTaskEnvironment
+  // (ensures isolation).
+  static void SetInstance(std::unique_ptr<TaskScheduler> task_scheduler);
+
+  // Retrieve the TaskScheduler set via SetInstance() or
+  // CreateAndSet(Simple|Default)TaskScheduler(). This should be used very
+  // rarely; most users of TaskScheduler should use the post_task.h API. In
+  // particular, refrain from doing
+  //   if (!TaskScheduler::GetInstance()) {
+  //     TaskScheduler::SetInstance(...);
+  //     base::PostTask(...);
+  //   }
+  // instead make sure to SetInstance() early in one determinstic place in the
+  // process' initialization phase.
+  // In doubt, consult with //base/task_scheduler/OWNERS.
+  static TaskScheduler* GetInstance();
+
+ private:
+  friend class gin::V8Platform;
+  friend class content::BrowserMainLoopTest_CreateThreadsInSingleProcess_Test;
+
+  // Returns the maximum number of non-single-threaded non-blocked tasks posted
+  // with |traits| that can run concurrently in this TaskScheduler.
+  //
+  // Do not use this method. To process n items, post n tasks that each process
+  // 1 item rather than GetMaxConcurrentNonBlockedTasksWithTraitsDeprecated()
+  // tasks that each process
+  // n/GetMaxConcurrentNonBlockedTasksWithTraitsDeprecated() items.
+  //
+  // TODO(fdoray): Remove this method. https://crbug.com/687264
+  virtual int GetMaxConcurrentNonBlockedTasksWithTraitsDeprecated(
+      const TaskTraits& traits) const = 0;
+};
+
+}  // namespace base
+
+#endif  // BASE_TASK_SCHEDULER_TASK_SCHEDULER_H_
diff --git a/base/task_scheduler/task_scheduler_impl.cc b/base/task_scheduler/task_scheduler_impl.cc
new file mode 100644
index 0000000..a5ab06c
--- /dev/null
+++ b/base/task_scheduler/task_scheduler_impl.cc
@@ -0,0 +1,234 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/task_scheduler/task_scheduler_impl.h"
+
+#include <string>
+#include <utility>
+
+#include "base/compiler_specific.h"
+#include "base/message_loop/message_loop.h"
+#include "base/metrics/field_trial_params.h"
+#include "base/strings/string_util.h"
+#include "base/task_scheduler/delayed_task_manager.h"
+#include "base/task_scheduler/environment_config.h"
+#include "base/task_scheduler/scheduler_worker_pool_params.h"
+#include "base/task_scheduler/sequence.h"
+#include "base/task_scheduler/sequence_sort_key.h"
+#include "base/task_scheduler/service_thread.h"
+#include "base/task_scheduler/task.h"
+#include "base/task_scheduler/task_tracker.h"
+#include "base/time/time.h"
+
+namespace base {
+namespace internal {
+
+TaskSchedulerImpl::TaskSchedulerImpl(StringPiece histogram_label)
+    : TaskSchedulerImpl(histogram_label,
+                        std::make_unique<TaskTrackerImpl>(histogram_label)) {}
+
+TaskSchedulerImpl::TaskSchedulerImpl(
+    StringPiece histogram_label,
+    std::unique_ptr<TaskTrackerImpl> task_tracker)
+    : task_tracker_(std::move(task_tracker)),
+      service_thread_(std::make_unique<ServiceThread>(task_tracker_.get())),
+      single_thread_task_runner_manager_(task_tracker_->GetTrackedRef(),
+                                         &delayed_task_manager_) {
+  DCHECK(!histogram_label.empty());
+
+  static_assert(arraysize(worker_pools_) == ENVIRONMENT_COUNT,
+                "The size of |worker_pools_| must match ENVIRONMENT_COUNT.");
+  static_assert(
+      arraysize(kEnvironmentParams) == ENVIRONMENT_COUNT,
+      "The size of |kEnvironmentParams| must match ENVIRONMENT_COUNT.");
+
+  for (int environment_type = 0; environment_type < ENVIRONMENT_COUNT;
+       ++environment_type) {
+    worker_pools_[environment_type] = std::make_unique<SchedulerWorkerPoolImpl>(
+        JoinString(
+            {histogram_label, kEnvironmentParams[environment_type].name_suffix},
+            "."),
+        kEnvironmentParams[environment_type].name_suffix,
+        kEnvironmentParams[environment_type].priority_hint,
+        task_tracker_->GetTrackedRef(), &delayed_task_manager_);
+  }
+}
+
+TaskSchedulerImpl::~TaskSchedulerImpl() {
+#if DCHECK_IS_ON()
+  DCHECK(join_for_testing_returned_.IsSet());
+#endif
+}
+
+void TaskSchedulerImpl::Start(
+    const TaskScheduler::InitParams& init_params,
+    SchedulerWorkerObserver* scheduler_worker_observer) {
+  // This is set in Start() and not in the constructor because variation params
+  // are usually not ready when TaskSchedulerImpl is instantiated in a process.
+  if (base::GetFieldTrialParamValue("BrowserScheduler",
+                                    "AllTasksUserBlocking") == "true") {
+    all_tasks_user_blocking_.Set();
+  }
+
+  // Start the service thread. On platforms that support it (POSIX except NaCL
+  // SFI), the service thread runs a MessageLoopForIO which is used to support
+  // FileDescriptorWatcher in the scope in which tasks run.
+  ServiceThread::Options service_thread_options;
+  service_thread_options.message_loop_type =
+#if defined(OS_POSIX) && !defined(OS_NACL_SFI)
+      MessageLoop::TYPE_IO;
+#else
+      MessageLoop::TYPE_DEFAULT;
+#endif
+  service_thread_options.timer_slack = TIMER_SLACK_MAXIMUM;
+  CHECK(service_thread_->StartWithOptions(service_thread_options));
+
+#if defined(OS_POSIX) && !defined(OS_NACL_SFI)
+  // Needs to happen after starting the service thread to get its
+  // message_loop().
+  task_tracker_->set_watch_file_descriptor_message_loop(
+      static_cast<MessageLoopForIO*>(service_thread_->message_loop()));
+
+#if DCHECK_IS_ON()
+  task_tracker_->set_service_thread_handle(service_thread_->GetThreadHandle());
+#endif  // DCHECK_IS_ON()
+#endif  // defined(OS_POSIX) && !defined(OS_NACL_SFI)
+
+  // Needs to happen after starting the service thread to get its task_runner().
+  scoped_refptr<TaskRunner> service_thread_task_runner =
+      service_thread_->task_runner();
+  delayed_task_manager_.Start(service_thread_task_runner);
+
+  single_thread_task_runner_manager_.Start(scheduler_worker_observer);
+
+  const SchedulerWorkerPoolImpl::WorkerEnvironment worker_environment =
+#if defined(OS_WIN)
+      init_params.shared_worker_pool_environment ==
+              InitParams::SharedWorkerPoolEnvironment::COM_MTA
+          ? SchedulerWorkerPoolImpl::WorkerEnvironment::COM_MTA
+          : SchedulerWorkerPoolImpl::WorkerEnvironment::NONE;
+#else
+      SchedulerWorkerPoolImpl::WorkerEnvironment::NONE;
+#endif
+
+  worker_pools_[BACKGROUND]->Start(
+      init_params.background_worker_pool_params, service_thread_task_runner,
+      scheduler_worker_observer, worker_environment);
+  worker_pools_[BACKGROUND_BLOCKING]->Start(
+      init_params.background_blocking_worker_pool_params,
+      service_thread_task_runner, scheduler_worker_observer,
+      worker_environment);
+  worker_pools_[FOREGROUND]->Start(
+      init_params.foreground_worker_pool_params, service_thread_task_runner,
+      scheduler_worker_observer, worker_environment);
+  worker_pools_[FOREGROUND_BLOCKING]->Start(
+      init_params.foreground_blocking_worker_pool_params,
+      service_thread_task_runner, scheduler_worker_observer,
+      worker_environment);
+}
+
+void TaskSchedulerImpl::PostDelayedTaskWithTraits(const Location& from_here,
+                                                  const TaskTraits& traits,
+                                                  OnceClosure task,
+                                                  TimeDelta delay) {
+  // Post |task| as part of a one-off single-task Sequence.
+  const TaskTraits new_traits = SetUserBlockingPriorityIfNeeded(traits);
+  GetWorkerPoolForTraits(new_traits)
+      ->PostTaskWithSequence(
+          Task(from_here, std::move(task), new_traits, delay),
+          MakeRefCounted<Sequence>());
+}
+
+scoped_refptr<TaskRunner> TaskSchedulerImpl::CreateTaskRunnerWithTraits(
+    const TaskTraits& traits) {
+  const TaskTraits new_traits = SetUserBlockingPriorityIfNeeded(traits);
+  return GetWorkerPoolForTraits(new_traits)
+      ->CreateTaskRunnerWithTraits(new_traits);
+}
+
+scoped_refptr<SequencedTaskRunner>
+TaskSchedulerImpl::CreateSequencedTaskRunnerWithTraits(
+    const TaskTraits& traits) {
+  const TaskTraits new_traits = SetUserBlockingPriorityIfNeeded(traits);
+  return GetWorkerPoolForTraits(new_traits)
+      ->CreateSequencedTaskRunnerWithTraits(new_traits);
+}
+
+scoped_refptr<SingleThreadTaskRunner>
+TaskSchedulerImpl::CreateSingleThreadTaskRunnerWithTraits(
+    const TaskTraits& traits,
+    SingleThreadTaskRunnerThreadMode thread_mode) {
+  return single_thread_task_runner_manager_
+      .CreateSingleThreadTaskRunnerWithTraits(
+          SetUserBlockingPriorityIfNeeded(traits), thread_mode);
+}
+
+#if defined(OS_WIN)
+scoped_refptr<SingleThreadTaskRunner>
+TaskSchedulerImpl::CreateCOMSTATaskRunnerWithTraits(
+    const TaskTraits& traits,
+    SingleThreadTaskRunnerThreadMode thread_mode) {
+  return single_thread_task_runner_manager_.CreateCOMSTATaskRunnerWithTraits(
+      SetUserBlockingPriorityIfNeeded(traits), thread_mode);
+}
+#endif  // defined(OS_WIN)
+
+std::vector<const HistogramBase*> TaskSchedulerImpl::GetHistograms() const {
+  std::vector<const HistogramBase*> histograms;
+  for (const auto& worker_pool : worker_pools_)
+    worker_pool->GetHistograms(&histograms);
+
+  return histograms;
+}
+
+int TaskSchedulerImpl::GetMaxConcurrentNonBlockedTasksWithTraitsDeprecated(
+    const TaskTraits& traits) const {
+  return GetWorkerPoolForTraits(traits)
+      ->GetMaxConcurrentNonBlockedTasksDeprecated();
+}
+
+void TaskSchedulerImpl::Shutdown() {
+  // TODO(fdoray): Increase the priority of BACKGROUND tasks blocking shutdown.
+  task_tracker_->Shutdown();
+}
+
+void TaskSchedulerImpl::FlushForTesting() {
+  task_tracker_->FlushForTesting();
+}
+
+void TaskSchedulerImpl::FlushAsyncForTesting(OnceClosure flush_callback) {
+  task_tracker_->FlushAsyncForTesting(std::move(flush_callback));
+}
+
+void TaskSchedulerImpl::JoinForTesting() {
+#if DCHECK_IS_ON()
+  DCHECK(!join_for_testing_returned_.IsSet());
+#endif
+  // The service thread must be stopped before the workers are joined, otherwise
+  // tasks scheduled by the DelayedTaskManager might be posted between joining
+  // those workers and stopping the service thread which will cause a CHECK. See
+  // https://crbug.com/771701.
+  service_thread_->Stop();
+  single_thread_task_runner_manager_.JoinForTesting();
+  for (const auto& worker_pool : worker_pools_)
+    worker_pool->JoinForTesting();
+#if DCHECK_IS_ON()
+  join_for_testing_returned_.Set();
+#endif
+}
+
+SchedulerWorkerPoolImpl* TaskSchedulerImpl::GetWorkerPoolForTraits(
+    const TaskTraits& traits) const {
+  return worker_pools_[GetEnvironmentIndexForTraits(traits)].get();
+}
+
+TaskTraits TaskSchedulerImpl::SetUserBlockingPriorityIfNeeded(
+    const TaskTraits& traits) const {
+  return all_tasks_user_blocking_.IsSet()
+             ? TaskTraits::Override(traits, {TaskPriority::USER_BLOCKING})
+             : traits;
+}
+
+}  // namespace internal
+}  // namespace base
diff --git a/base/task_scheduler/task_scheduler_impl.h b/base/task_scheduler/task_scheduler_impl.h
new file mode 100644
index 0000000..81a5a87
--- /dev/null
+++ b/base/task_scheduler/task_scheduler_impl.h
@@ -0,0 +1,132 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TASK_SCHEDULER_TASK_SCHEDULER_IMPL_H_
+#define BASE_TASK_SCHEDULER_TASK_SCHEDULER_IMPL_H_
+
+#include <memory>
+#include <vector>
+
+#include "base/base_export.h"
+#include "base/callback.h"
+#include "base/logging.h"
+#include "base/macros.h"
+#include "base/memory/ptr_util.h"
+#include "base/memory/ref_counted.h"
+#include "base/strings/string_piece.h"
+#include "base/synchronization/atomic_flag.h"
+#include "base/task_scheduler/delayed_task_manager.h"
+#include "base/task_scheduler/scheduler_single_thread_task_runner_manager.h"
+#include "base/task_scheduler/scheduler_worker_pool_impl.h"
+#include "base/task_scheduler/single_thread_task_runner_thread_mode.h"
+#include "base/task_scheduler/task_scheduler.h"
+#include "base/task_scheduler/task_tracker.h"
+#include "base/task_scheduler/task_traits.h"
+#include "build/build_config.h"
+
+#if defined(OS_POSIX) && !defined(OS_NACL_SFI)
+#include "base/task_scheduler/task_tracker_posix.h"
+#endif
+
+#if defined(OS_WIN)
+#include "base/win/com_init_check_hook.h"
+#endif
+
+namespace base {
+
+class HistogramBase;
+class Thread;
+
+namespace internal {
+
+// Default TaskScheduler implementation. This class is thread-safe.
+class BASE_EXPORT TaskSchedulerImpl : public TaskScheduler {
+ public:
+  using TaskTrackerImpl =
+#if defined(OS_POSIX) && !defined(OS_NACL_SFI)
+      TaskTrackerPosix;
+#else
+      TaskTracker;
+#endif
+
+  // Creates a TaskSchedulerImpl with a production TaskTracker.
+  //|histogram_label| is used to label histograms, it must not be empty.
+  explicit TaskSchedulerImpl(StringPiece histogram_label);
+
+  // For testing only. Creates a TaskSchedulerImpl with a custom TaskTracker.
+  TaskSchedulerImpl(StringPiece histogram_label,
+                    std::unique_ptr<TaskTrackerImpl> task_tracker);
+
+  ~TaskSchedulerImpl() override;
+
+  // TaskScheduler:
+  void Start(const TaskScheduler::InitParams& init_params,
+             SchedulerWorkerObserver* scheduler_worker_observer) override;
+  void PostDelayedTaskWithTraits(const Location& from_here,
+                                 const TaskTraits& traits,
+                                 OnceClosure task,
+                                 TimeDelta delay) override;
+  scoped_refptr<TaskRunner> CreateTaskRunnerWithTraits(
+      const TaskTraits& traits) override;
+  scoped_refptr<SequencedTaskRunner> CreateSequencedTaskRunnerWithTraits(
+      const TaskTraits& traits) override;
+  scoped_refptr<SingleThreadTaskRunner> CreateSingleThreadTaskRunnerWithTraits(
+      const TaskTraits& traits,
+      SingleThreadTaskRunnerThreadMode thread_mode) override;
+#if defined(OS_WIN)
+  scoped_refptr<SingleThreadTaskRunner> CreateCOMSTATaskRunnerWithTraits(
+      const TaskTraits& traits,
+      SingleThreadTaskRunnerThreadMode thread_mode) override;
+#endif  // defined(OS_WIN)
+  std::vector<const HistogramBase*> GetHistograms() const override;
+  int GetMaxConcurrentNonBlockedTasksWithTraitsDeprecated(
+      const TaskTraits& traits) const override;
+  void Shutdown() override;
+  void FlushForTesting() override;
+  void FlushAsyncForTesting(OnceClosure flush_callback) override;
+  void JoinForTesting() override;
+
+ private:
+  // Returns the worker pool that runs Tasks with |traits|.
+  SchedulerWorkerPoolImpl* GetWorkerPoolForTraits(
+      const TaskTraits& traits) const;
+
+  // Returns |traits|, with priority set to TaskPriority::USER_BLOCKING if
+  // |all_tasks_user_blocking_| is set.
+  TaskTraits SetUserBlockingPriorityIfNeeded(const TaskTraits& traits) const;
+
+  const std::unique_ptr<TaskTrackerImpl> task_tracker_;
+  std::unique_ptr<Thread> service_thread_;
+  DelayedTaskManager delayed_task_manager_;
+  SchedulerSingleThreadTaskRunnerManager single_thread_task_runner_manager_;
+
+  // Indicates that all tasks are handled as if they had been posted with
+  // TaskPriority::USER_BLOCKING. Since this is set in Start(), it doesn't apply
+  // to tasks posted before Start() or to tasks posted to TaskRunners created
+  // before Start().
+  //
+  // TODO(fdoray): Remove after experiment. https://crbug.com/757022
+  AtomicFlag all_tasks_user_blocking_;
+
+  // There are 4 SchedulerWorkerPoolImpl in this array to match the 4
+  // SchedulerWorkerPoolParams in TaskScheduler::InitParams.
+  std::unique_ptr<SchedulerWorkerPoolImpl> worker_pools_[4];
+
+#if DCHECK_IS_ON()
+  // Set once JoinForTesting() has returned.
+  AtomicFlag join_for_testing_returned_;
+#endif
+
+#if defined(OS_WIN) && defined(COM_INIT_CHECK_HOOK_ENABLED)
+  // Provides COM initialization verification for supported builds.
+  base::win::ComInitCheckHook com_init_check_hook_;
+#endif
+
+  DISALLOW_COPY_AND_ASSIGN(TaskSchedulerImpl);
+};
+
+}  // namespace internal
+}  // namespace base
+
+#endif  // BASE_TASK_SCHEDULER_TASK_SCHEDULER_IMPL_H_
diff --git a/base/task_scheduler/task_scheduler_impl_unittest.cc b/base/task_scheduler/task_scheduler_impl_unittest.cc
new file mode 100644
index 0000000..4fe4a25
--- /dev/null
+++ b/base/task_scheduler/task_scheduler_impl_unittest.cc
@@ -0,0 +1,823 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/task_scheduler/task_scheduler_impl.h"
+
+#include <stddef.h>
+
+#include <string>
+#include <utility>
+#include <vector>
+
+#include "base/bind.h"
+#include "base/bind_helpers.h"
+#include "base/callback.h"
+#include "base/debug/stack_trace.h"
+#include "base/macros.h"
+#include "base/memory/ptr_util.h"
+#include "base/metrics/field_trial.h"
+#include "base/metrics/field_trial_params.h"
+#include "base/synchronization/lock.h"
+#include "base/synchronization/waitable_event.h"
+#include "base/task_scheduler/scheduler_worker_observer.h"
+#include "base/task_scheduler/scheduler_worker_pool_params.h"
+#include "base/task_scheduler/task_traits.h"
+#include "base/task_scheduler/test_task_factory.h"
+#include "base/task_scheduler/test_utils.h"
+#include "base/test/test_timeouts.h"
+#include "base/threading/platform_thread.h"
+#include "base/threading/sequence_local_storage_slot.h"
+#include "base/threading/simple_thread.h"
+#include "base/threading/thread.h"
+#include "base/threading/thread_restrictions.h"
+#include "base/time/time.h"
+#include "build/build_config.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+#if defined(OS_POSIX)
+#include <unistd.h>
+
+#include "base/debug/leak_annotations.h"
+#include "base/files/file_descriptor_watcher_posix.h"
+#include "base/files/file_util.h"
+#include "base/posix/eintr_wrapper.h"
+#endif  // defined(OS_POSIX)
+
+#if defined(OS_WIN)
+#include "base/win/com_init_util.h"
+#endif  // defined(OS_WIN)
+
+namespace base {
+namespace internal {
+
+namespace {
+
+struct TraitsExecutionModePair {
+  TraitsExecutionModePair(const TaskTraits& traits,
+                          test::ExecutionMode execution_mode)
+      : traits(traits), execution_mode(execution_mode) {}
+
+  TaskTraits traits;
+  test::ExecutionMode execution_mode;
+};
+
+#if DCHECK_IS_ON()
+// Returns whether I/O calls are allowed on the current thread.
+bool GetIOAllowed() {
+  const bool previous_value = ThreadRestrictions::SetIOAllowed(true);
+  ThreadRestrictions::SetIOAllowed(previous_value);
+  return previous_value;
+}
+#endif
+
+// Verify that the current thread priority and I/O restrictions are appropriate
+// to run a Task with |traits|.
+// Note: ExecutionMode is verified inside TestTaskFactory.
+void VerifyTaskEnvironment(const TaskTraits& traits) {
+  const bool supports_background_priority =
+      Lock::HandlesMultipleThreadPriorities() &&
+      PlatformThread::CanIncreaseCurrentThreadPriority();
+
+  EXPECT_EQ(supports_background_priority &&
+                    traits.priority() == TaskPriority::BACKGROUND
+                ? ThreadPriority::BACKGROUND
+                : ThreadPriority::NORMAL,
+            PlatformThread::GetCurrentThreadPriority());
+
+#if DCHECK_IS_ON()
+  // The #if above is required because GetIOAllowed() always returns true when
+  // !DCHECK_IS_ON(), even when |traits| don't allow file I/O.
+  EXPECT_EQ(traits.may_block(), GetIOAllowed());
+#endif
+
+  // Verify that the thread the task is running on is named as expected.
+  const std::string current_thread_name(PlatformThread::GetName());
+  EXPECT_NE(std::string::npos, current_thread_name.find("TaskScheduler"));
+  EXPECT_NE(std::string::npos,
+            current_thread_name.find(
+                traits.priority() == TaskPriority::BACKGROUND ? "Background"
+                                                              : "Foreground"));
+  EXPECT_EQ(traits.may_block(),
+            current_thread_name.find("Blocking") != std::string::npos);
+}
+
+void VerifyTaskEnvironmentAndSignalEvent(const TaskTraits& traits,
+                                         WaitableEvent* event) {
+  DCHECK(event);
+  VerifyTaskEnvironment(traits);
+  event->Signal();
+}
+
+void VerifyTimeAndTaskEnvironmentAndSignalEvent(const TaskTraits& traits,
+                                                TimeTicks expected_time,
+                                                WaitableEvent* event) {
+  DCHECK(event);
+  EXPECT_LE(expected_time, TimeTicks::Now());
+  VerifyTaskEnvironment(traits);
+  event->Signal();
+}
+
+scoped_refptr<TaskRunner> CreateTaskRunnerWithTraitsAndExecutionMode(
+    TaskScheduler* scheduler,
+    const TaskTraits& traits,
+    test::ExecutionMode execution_mode,
+    SingleThreadTaskRunnerThreadMode default_single_thread_task_runner_mode =
+        SingleThreadTaskRunnerThreadMode::SHARED) {
+  switch (execution_mode) {
+    case test::ExecutionMode::PARALLEL:
+      return scheduler->CreateTaskRunnerWithTraits(traits);
+    case test::ExecutionMode::SEQUENCED:
+      return scheduler->CreateSequencedTaskRunnerWithTraits(traits);
+    case test::ExecutionMode::SINGLE_THREADED: {
+      return scheduler->CreateSingleThreadTaskRunnerWithTraits(
+          traits, default_single_thread_task_runner_mode);
+    }
+  }
+  ADD_FAILURE() << "Unknown ExecutionMode";
+  return nullptr;
+}
+
+class ThreadPostingTasks : public SimpleThread {
+ public:
+  // Creates a thread that posts Tasks to |scheduler| with |traits| and
+  // |execution_mode|.
+  ThreadPostingTasks(TaskSchedulerImpl* scheduler,
+                     const TaskTraits& traits,
+                     test::ExecutionMode execution_mode)
+      : SimpleThread("ThreadPostingTasks"),
+        traits_(traits),
+        factory_(CreateTaskRunnerWithTraitsAndExecutionMode(scheduler,
+                                                            traits,
+                                                            execution_mode),
+                 execution_mode) {}
+
+  void WaitForAllTasksToRun() { factory_.WaitForAllTasksToRun(); }
+
+ private:
+  void Run() override {
+    EXPECT_FALSE(factory_.task_runner()->RunsTasksInCurrentSequence());
+
+    const size_t kNumTasksPerThread = 150;
+    for (size_t i = 0; i < kNumTasksPerThread; ++i) {
+      factory_.PostTask(test::TestTaskFactory::PostNestedTask::NO,
+                        Bind(&VerifyTaskEnvironment, traits_));
+    }
+  }
+
+  const TaskTraits traits_;
+  test::TestTaskFactory factory_;
+
+  DISALLOW_COPY_AND_ASSIGN(ThreadPostingTasks);
+};
+
+// Returns a vector with a TraitsExecutionModePair for each valid
+// combination of {ExecutionMode, TaskPriority, MayBlock()}.
+std::vector<TraitsExecutionModePair> GetTraitsExecutionModePairs() {
+  std::vector<TraitsExecutionModePair> params;
+
+  const test::ExecutionMode execution_modes[] = {
+      test::ExecutionMode::PARALLEL, test::ExecutionMode::SEQUENCED,
+      test::ExecutionMode::SINGLE_THREADED};
+
+  for (test::ExecutionMode execution_mode : execution_modes) {
+    for (size_t priority_index = static_cast<size_t>(TaskPriority::LOWEST);
+         priority_index <= static_cast<size_t>(TaskPriority::HIGHEST);
+         ++priority_index) {
+      const TaskPriority priority = static_cast<TaskPriority>(priority_index);
+      params.push_back(TraitsExecutionModePair({priority}, execution_mode));
+      params.push_back(TraitsExecutionModePair({MayBlock()}, execution_mode));
+    }
+  }
+
+  return params;
+}
+
+class TaskSchedulerImplTest
+    : public testing::TestWithParam<TraitsExecutionModePair> {
+ protected:
+  TaskSchedulerImplTest() : scheduler_("Test"), field_trial_list_(nullptr) {}
+
+  void EnableAllTasksUserBlocking() {
+    constexpr char kFieldTrialName[] = "BrowserScheduler";
+    constexpr char kFieldTrialTestGroup[] = "DummyGroup";
+    std::map<std::string, std::string> variation_params;
+    variation_params["AllTasksUserBlocking"] = "true";
+    base::AssociateFieldTrialParams(kFieldTrialName, kFieldTrialTestGroup,
+                                    variation_params);
+    base::FieldTrialList::CreateFieldTrial(kFieldTrialName,
+                                           kFieldTrialTestGroup);
+  }
+
+  void set_scheduler_worker_observer(
+      SchedulerWorkerObserver* scheduler_worker_observer) {
+    scheduler_worker_observer_ = scheduler_worker_observer;
+  }
+
+  void StartTaskScheduler() {
+    constexpr TimeDelta kSuggestedReclaimTime = TimeDelta::FromSeconds(30);
+    constexpr int kMaxNumBackgroundThreads = 1;
+    constexpr int kMaxNumBackgroundBlockingThreads = 3;
+    constexpr int kMaxNumForegroundThreads = 4;
+    constexpr int kMaxNumForegroundBlockingThreads = 12;
+
+    scheduler_.Start(
+        {{kMaxNumBackgroundThreads, kSuggestedReclaimTime},
+         {kMaxNumBackgroundBlockingThreads, kSuggestedReclaimTime},
+         {kMaxNumForegroundThreads, kSuggestedReclaimTime},
+         {kMaxNumForegroundBlockingThreads, kSuggestedReclaimTime}},
+        scheduler_worker_observer_);
+  }
+
+  void TearDown() override {
+    if (did_tear_down_)
+      return;
+
+    scheduler_.FlushForTesting();
+    scheduler_.JoinForTesting();
+    did_tear_down_ = true;
+  }
+
+  TaskSchedulerImpl scheduler_;
+
+ private:
+  base::FieldTrialList field_trial_list_;
+  SchedulerWorkerObserver* scheduler_worker_observer_ = nullptr;
+  bool did_tear_down_ = false;
+
+  DISALLOW_COPY_AND_ASSIGN(TaskSchedulerImplTest);
+};
+
+}  // namespace
+
+// Verifies that a Task posted via PostDelayedTaskWithTraits with parameterized
+// TaskTraits and no delay runs on a thread with the expected priority and I/O
+// restrictions. The ExecutionMode parameter is ignored by this test.
+TEST_P(TaskSchedulerImplTest, PostDelayedTaskWithTraitsNoDelay) {
+  StartTaskScheduler();
+  WaitableEvent task_ran(WaitableEvent::ResetPolicy::MANUAL,
+                         WaitableEvent::InitialState::NOT_SIGNALED);
+  scheduler_.PostDelayedTaskWithTraits(
+      FROM_HERE, GetParam().traits,
+      BindOnce(&VerifyTaskEnvironmentAndSignalEvent, GetParam().traits,
+               Unretained(&task_ran)),
+      TimeDelta());
+  task_ran.Wait();
+}
+
+// Verifies that a Task posted via PostDelayedTaskWithTraits with parameterized
+// TaskTraits and a non-zero delay runs on a thread with the expected priority
+// and I/O restrictions after the delay expires. The ExecutionMode parameter is
+// ignored by this test.
+TEST_P(TaskSchedulerImplTest, PostDelayedTaskWithTraitsWithDelay) {
+  StartTaskScheduler();
+  WaitableEvent task_ran(WaitableEvent::ResetPolicy::MANUAL,
+                         WaitableEvent::InitialState::NOT_SIGNALED);
+  scheduler_.PostDelayedTaskWithTraits(
+      FROM_HERE, GetParam().traits,
+      BindOnce(&VerifyTimeAndTaskEnvironmentAndSignalEvent, GetParam().traits,
+               TimeTicks::Now() + TestTimeouts::tiny_timeout(),
+               Unretained(&task_ran)),
+      TestTimeouts::tiny_timeout());
+  task_ran.Wait();
+}
+
+// Verifies that Tasks posted via a TaskRunner with parameterized TaskTraits and
+// ExecutionMode run on a thread with the expected priority and I/O restrictions
+// and respect the characteristics of their ExecutionMode.
+TEST_P(TaskSchedulerImplTest, PostTasksViaTaskRunner) {
+  StartTaskScheduler();
+  test::TestTaskFactory factory(
+      CreateTaskRunnerWithTraitsAndExecutionMode(&scheduler_, GetParam().traits,
+                                                 GetParam().execution_mode),
+      GetParam().execution_mode);
+  EXPECT_FALSE(factory.task_runner()->RunsTasksInCurrentSequence());
+
+  const size_t kNumTasksPerTest = 150;
+  for (size_t i = 0; i < kNumTasksPerTest; ++i) {
+    factory.PostTask(test::TestTaskFactory::PostNestedTask::NO,
+                     Bind(&VerifyTaskEnvironment, GetParam().traits));
+  }
+
+  factory.WaitForAllTasksToRun();
+}
+
+// Verifies that a task posted via PostDelayedTaskWithTraits without a delay
+// doesn't run before Start() is called.
+TEST_P(TaskSchedulerImplTest, PostDelayedTaskWithTraitsNoDelayBeforeStart) {
+  WaitableEvent task_running(WaitableEvent::ResetPolicy::MANUAL,
+                             WaitableEvent::InitialState::NOT_SIGNALED);
+  scheduler_.PostDelayedTaskWithTraits(
+      FROM_HERE, GetParam().traits,
+      BindOnce(&VerifyTaskEnvironmentAndSignalEvent, GetParam().traits,
+               Unretained(&task_running)),
+      TimeDelta());
+
+  // Wait a little bit to make sure that the task doesn't run before Start().
+  // Note: This test won't catch a case where the task runs just after the check
+  // and before Start(). However, we expect the test to be flaky if the tested
+  // code allows that to happen.
+  PlatformThread::Sleep(TestTimeouts::tiny_timeout());
+  EXPECT_FALSE(task_running.IsSignaled());
+
+  StartTaskScheduler();
+  task_running.Wait();
+}
+
+// Verifies that a task posted via PostDelayedTaskWithTraits with a delay
+// doesn't run before Start() is called.
+TEST_P(TaskSchedulerImplTest, PostDelayedTaskWithTraitsWithDelayBeforeStart) {
+  WaitableEvent task_running(WaitableEvent::ResetPolicy::MANUAL,
+                             WaitableEvent::InitialState::NOT_SIGNALED);
+  scheduler_.PostDelayedTaskWithTraits(
+      FROM_HERE, GetParam().traits,
+      BindOnce(&VerifyTimeAndTaskEnvironmentAndSignalEvent, GetParam().traits,
+               TimeTicks::Now() + TestTimeouts::tiny_timeout(),
+               Unretained(&task_running)),
+      TestTimeouts::tiny_timeout());
+
+  // Wait a little bit to make sure that the task doesn't run before Start().
+  // Note: This test won't catch a case where the task runs just after the check
+  // and before Start(). However, we expect the test to be flaky if the tested
+  // code allows that to happen.
+  PlatformThread::Sleep(TestTimeouts::tiny_timeout());
+  EXPECT_FALSE(task_running.IsSignaled());
+
+  StartTaskScheduler();
+  task_running.Wait();
+}
+
+// Verifies that a task posted via a TaskRunner doesn't run before Start() is
+// called.
+TEST_P(TaskSchedulerImplTest, PostTaskViaTaskRunnerBeforeStart) {
+  WaitableEvent task_running(WaitableEvent::ResetPolicy::MANUAL,
+                             WaitableEvent::InitialState::NOT_SIGNALED);
+  CreateTaskRunnerWithTraitsAndExecutionMode(&scheduler_, GetParam().traits,
+                                             GetParam().execution_mode)
+      ->PostTask(FROM_HERE,
+                 BindOnce(&VerifyTaskEnvironmentAndSignalEvent,
+                          GetParam().traits, Unretained(&task_running)));
+
+  // Wait a little bit to make sure that the task doesn't run before Start().
+  // Note: This test won't catch a case where the task runs just after the check
+  // and before Start(). However, we expect the test to be flaky if the tested
+  // code allows that to happen.
+  PlatformThread::Sleep(TestTimeouts::tiny_timeout());
+  EXPECT_FALSE(task_running.IsSignaled());
+
+  StartTaskScheduler();
+
+  // This should not hang if the task runs after Start().
+  task_running.Wait();
+}
+
+// Verify that all tasks posted to a TaskRunner after Start() run in a
+// USER_BLOCKING environment when the AllTasksUserBlocking variation param of
+// the BrowserScheduler experiment is true.
+TEST_P(TaskSchedulerImplTest, AllTasksAreUserBlockingTaskRunner) {
+  EnableAllTasksUserBlocking();
+  StartTaskScheduler();
+
+  WaitableEvent task_running(WaitableEvent::ResetPolicy::MANUAL,
+                             WaitableEvent::InitialState::NOT_SIGNALED);
+  CreateTaskRunnerWithTraitsAndExecutionMode(&scheduler_, GetParam().traits,
+                                             GetParam().execution_mode)
+      ->PostTask(FROM_HERE,
+                 BindOnce(&VerifyTaskEnvironmentAndSignalEvent,
+                          TaskTraits::Override(GetParam().traits,
+                                               {TaskPriority::USER_BLOCKING}),
+                          Unretained(&task_running)));
+  task_running.Wait();
+}
+
+// Verify that all tasks posted via PostDelayedTaskWithTraits() after Start()
+// run in a USER_BLOCKING environment when the AllTasksUserBlocking variation
+// param of the BrowserScheduler experiment is true.
+TEST_P(TaskSchedulerImplTest, AllTasksAreUserBlocking) {
+  EnableAllTasksUserBlocking();
+  StartTaskScheduler();
+
+  WaitableEvent task_running(WaitableEvent::ResetPolicy::MANUAL,
+                             WaitableEvent::InitialState::NOT_SIGNALED);
+  // Ignore |params.execution_mode| in this test.
+  scheduler_.PostDelayedTaskWithTraits(
+      FROM_HERE, GetParam().traits,
+      BindOnce(&VerifyTaskEnvironmentAndSignalEvent,
+               TaskTraits::Override(GetParam().traits,
+                                    {TaskPriority::USER_BLOCKING}),
+               Unretained(&task_running)),
+      TimeDelta());
+  task_running.Wait();
+}
+
+// Verifies that FlushAsyncForTesting() calls back correctly for all trait and
+// execution mode pairs.
+TEST_P(TaskSchedulerImplTest, FlushAsyncForTestingSimple) {
+  StartTaskScheduler();
+
+  WaitableEvent unblock_task(WaitableEvent::ResetPolicy::MANUAL,
+                             WaitableEvent::InitialState::NOT_SIGNALED);
+  CreateTaskRunnerWithTraitsAndExecutionMode(
+      &scheduler_,
+      TaskTraits::Override(GetParam().traits, {WithBaseSyncPrimitives()}),
+      GetParam().execution_mode, SingleThreadTaskRunnerThreadMode::DEDICATED)
+      ->PostTask(FROM_HERE,
+                 BindOnce(&WaitableEvent::Wait, Unretained(&unblock_task)));
+
+  WaitableEvent flush_event(WaitableEvent::ResetPolicy::MANUAL,
+                            WaitableEvent::InitialState::NOT_SIGNALED);
+  scheduler_.FlushAsyncForTesting(
+      BindOnce(&WaitableEvent::Signal, Unretained(&flush_event)));
+  PlatformThread::Sleep(TestTimeouts::tiny_timeout());
+  EXPECT_FALSE(flush_event.IsSignaled());
+
+  unblock_task.Signal();
+
+  flush_event.Wait();
+}
+
+INSTANTIATE_TEST_CASE_P(OneTraitsExecutionModePair,
+                        TaskSchedulerImplTest,
+                        ::testing::ValuesIn(GetTraitsExecutionModePairs()));
+
+// Spawns threads that simultaneously post Tasks to TaskRunners with various
+// TaskTraits and ExecutionModes. Verifies that each Task runs on a thread with
+// the expected priority and I/O restrictions and respects the characteristics
+// of its ExecutionMode.
+TEST_F(TaskSchedulerImplTest, MultipleTraitsExecutionModePairs) {
+  StartTaskScheduler();
+  std::vector<std::unique_ptr<ThreadPostingTasks>> threads_posting_tasks;
+  for (const auto& traits_execution_mode_pair : GetTraitsExecutionModePairs()) {
+    threads_posting_tasks.push_back(WrapUnique(
+        new ThreadPostingTasks(&scheduler_, traits_execution_mode_pair.traits,
+                               traits_execution_mode_pair.execution_mode)));
+    threads_posting_tasks.back()->Start();
+  }
+
+  for (const auto& thread : threads_posting_tasks) {
+    thread->WaitForAllTasksToRun();
+    thread->Join();
+  }
+}
+
+TEST_F(TaskSchedulerImplTest,
+       GetMaxConcurrentNonBlockedTasksWithTraitsDeprecated) {
+  StartTaskScheduler();
+  EXPECT_EQ(1, scheduler_.GetMaxConcurrentNonBlockedTasksWithTraitsDeprecated(
+                   {TaskPriority::BACKGROUND}));
+  EXPECT_EQ(3, scheduler_.GetMaxConcurrentNonBlockedTasksWithTraitsDeprecated(
+                   {MayBlock(), TaskPriority::BACKGROUND}));
+  EXPECT_EQ(4, scheduler_.GetMaxConcurrentNonBlockedTasksWithTraitsDeprecated(
+                   {TaskPriority::USER_VISIBLE}));
+  EXPECT_EQ(12, scheduler_.GetMaxConcurrentNonBlockedTasksWithTraitsDeprecated(
+                    {MayBlock(), TaskPriority::USER_VISIBLE}));
+  EXPECT_EQ(4, scheduler_.GetMaxConcurrentNonBlockedTasksWithTraitsDeprecated(
+                   {TaskPriority::USER_BLOCKING}));
+  EXPECT_EQ(12, scheduler_.GetMaxConcurrentNonBlockedTasksWithTraitsDeprecated(
+                    {MayBlock(), TaskPriority::USER_BLOCKING}));
+}
+
+// Verify that the RunsTasksInCurrentSequence() method of a SequencedTaskRunner
+// returns false when called from a task that isn't part of the sequence.
+TEST_F(TaskSchedulerImplTest, SequencedRunsTasksInCurrentSequence) {
+  StartTaskScheduler();
+  auto single_thread_task_runner =
+      scheduler_.CreateSingleThreadTaskRunnerWithTraits(
+          TaskTraits(), SingleThreadTaskRunnerThreadMode::SHARED);
+  auto sequenced_task_runner =
+      scheduler_.CreateSequencedTaskRunnerWithTraits(TaskTraits());
+
+  WaitableEvent task_ran(WaitableEvent::ResetPolicy::MANUAL,
+                         WaitableEvent::InitialState::NOT_SIGNALED);
+  single_thread_task_runner->PostTask(
+      FROM_HERE,
+      BindOnce(
+          [](scoped_refptr<TaskRunner> sequenced_task_runner,
+             WaitableEvent* task_ran) {
+            EXPECT_FALSE(sequenced_task_runner->RunsTasksInCurrentSequence());
+            task_ran->Signal();
+          },
+          sequenced_task_runner, Unretained(&task_ran)));
+  task_ran.Wait();
+}
+
+// Verify that the RunsTasksInCurrentSequence() method of a
+// SingleThreadTaskRunner returns false when called from a task that isn't part
+// of the sequence.
+TEST_F(TaskSchedulerImplTest, SingleThreadRunsTasksInCurrentSequence) {
+  StartTaskScheduler();
+  auto sequenced_task_runner =
+      scheduler_.CreateSequencedTaskRunnerWithTraits(TaskTraits());
+  auto single_thread_task_runner =
+      scheduler_.CreateSingleThreadTaskRunnerWithTraits(
+          TaskTraits(), SingleThreadTaskRunnerThreadMode::SHARED);
+
+  WaitableEvent task_ran(WaitableEvent::ResetPolicy::MANUAL,
+                         WaitableEvent::InitialState::NOT_SIGNALED);
+  sequenced_task_runner->PostTask(
+      FROM_HERE,
+      BindOnce(
+          [](scoped_refptr<TaskRunner> single_thread_task_runner,
+             WaitableEvent* task_ran) {
+            EXPECT_FALSE(
+                single_thread_task_runner->RunsTasksInCurrentSequence());
+            task_ran->Signal();
+          },
+          single_thread_task_runner, Unretained(&task_ran)));
+  task_ran.Wait();
+}
+
+#if defined(OS_WIN)
+TEST_F(TaskSchedulerImplTest, COMSTATaskRunnersRunWithCOMSTA) {
+  StartTaskScheduler();
+  auto com_sta_task_runner = scheduler_.CreateCOMSTATaskRunnerWithTraits(
+      TaskTraits(), SingleThreadTaskRunnerThreadMode::SHARED);
+
+  WaitableEvent task_ran(WaitableEvent::ResetPolicy::MANUAL,
+                         WaitableEvent::InitialState::NOT_SIGNALED);
+  com_sta_task_runner->PostTask(
+      FROM_HERE, Bind(
+                     [](WaitableEvent* task_ran) {
+                       win::AssertComApartmentType(win::ComApartmentType::STA);
+                       task_ran->Signal();
+                     },
+                     Unretained(&task_ran)));
+  task_ran.Wait();
+}
+#endif  // defined(OS_WIN)
+
+TEST_F(TaskSchedulerImplTest, DelayedTasksNotRunAfterShutdown) {
+  StartTaskScheduler();
+  // As with delayed tasks in general, this is racy. If the task does happen to
+  // run after Shutdown within the timeout, it will fail this test.
+  //
+  // The timeout should be set sufficiently long enough to ensure that the
+  // delayed task did not run. 2x is generally good enough.
+  //
+  // A non-racy way to do this would be to post two sequenced tasks:
+  // 1) Regular Post Task: A WaitableEvent.Wait
+  // 2) Delayed Task: ADD_FAILURE()
+  // and signalling the WaitableEvent after Shutdown() on a different thread
+  // since Shutdown() will block. However, the cost of managing this extra
+  // thread was deemed to be too great for the unlikely race.
+  scheduler_.PostDelayedTaskWithTraits(FROM_HERE, TaskTraits(),
+                                       BindOnce([]() { ADD_FAILURE(); }),
+                                       TestTimeouts::tiny_timeout());
+  scheduler_.Shutdown();
+  PlatformThread::Sleep(TestTimeouts::tiny_timeout() * 2);
+}
+
+#if defined(OS_POSIX)
+
+TEST_F(TaskSchedulerImplTest, FileDescriptorWatcherNoOpsAfterShutdown) {
+  StartTaskScheduler();
+
+  int pipes[2];
+  ASSERT_EQ(0, pipe(pipes));
+
+  scoped_refptr<TaskRunner> blocking_task_runner =
+      scheduler_.CreateSequencedTaskRunnerWithTraits(
+          {TaskShutdownBehavior::BLOCK_SHUTDOWN});
+  blocking_task_runner->PostTask(
+      FROM_HERE,
+      BindOnce(
+          [](int read_fd) {
+            std::unique_ptr<FileDescriptorWatcher::Controller> controller =
+                FileDescriptorWatcher::WatchReadable(
+                    read_fd, BindRepeating([]() { NOTREACHED(); }));
+
+            // This test is for components that intentionally leak their
+            // watchers at shutdown. We can't clean |controller| up because its
+            // destructor will assert that it's being called from the correct
+            // sequence. After the task scheduler is shutdown, it is not
+            // possible to run tasks on this sequence.
+            //
+            // Note: Do not inline the controller.release() call into the
+            //       ANNOTATE_LEAKING_OBJECT_PTR as the annotation is removed
+            //       by the preprocessor in non-LEAK_SANITIZER builds,
+            //       effectively breaking this test.
+            ANNOTATE_LEAKING_OBJECT_PTR(controller.get());
+            controller.release();
+          },
+          pipes[0]));
+
+  scheduler_.Shutdown();
+
+  constexpr char kByte = '!';
+  ASSERT_TRUE(WriteFileDescriptor(pipes[1], &kByte, sizeof(kByte)));
+
+  // Give a chance for the file watcher to fire before closing the handles.
+  PlatformThread::Sleep(TestTimeouts::tiny_timeout());
+
+  EXPECT_EQ(0, IGNORE_EINTR(close(pipes[0])));
+  EXPECT_EQ(0, IGNORE_EINTR(close(pipes[1])));
+}
+#endif  // defined(OS_POSIX)
+
+// Verify that tasks posted on the same sequence access the same values on
+// SequenceLocalStorage, and tasks on different sequences see different values.
+TEST_F(TaskSchedulerImplTest, SequenceLocalStorage) {
+  StartTaskScheduler();
+
+  SequenceLocalStorageSlot<int> slot;
+  auto sequenced_task_runner1 =
+      scheduler_.CreateSequencedTaskRunnerWithTraits(TaskTraits());
+  auto sequenced_task_runner2 =
+      scheduler_.CreateSequencedTaskRunnerWithTraits(TaskTraits());
+
+  sequenced_task_runner1->PostTask(
+      FROM_HERE,
+      BindOnce([](SequenceLocalStorageSlot<int>* slot) { slot->Set(11); },
+               &slot));
+
+  sequenced_task_runner1->PostTask(FROM_HERE,
+                                   BindOnce(
+                                       [](SequenceLocalStorageSlot<int>* slot) {
+                                         EXPECT_EQ(slot->Get(), 11);
+                                       },
+                                       &slot));
+
+  sequenced_task_runner2->PostTask(FROM_HERE,
+                                   BindOnce(
+                                       [](SequenceLocalStorageSlot<int>* slot) {
+                                         EXPECT_NE(slot->Get(), 11);
+                                       },
+                                       &slot));
+
+  scheduler_.FlushForTesting();
+}
+
+TEST_F(TaskSchedulerImplTest, FlushAsyncNoTasks) {
+  StartTaskScheduler();
+  bool called_back = false;
+  scheduler_.FlushAsyncForTesting(
+      BindOnce([](bool* called_back) { *called_back = true; },
+               Unretained(&called_back)));
+  EXPECT_TRUE(called_back);
+}
+
+namespace {
+
+// Verifies that |query| is found on the current stack. Ignores failures if this
+// configuration doesn't have symbols.
+void VerifyHasStringOnStack(const std::string& query) {
+  const std::string stack = debug::StackTrace().ToString();
+  SCOPED_TRACE(stack);
+  const bool found_on_stack = stack.find(query) != std::string::npos;
+  const bool stack_has_symbols =
+      stack.find("SchedulerWorker") != std::string::npos;
+  EXPECT_TRUE(found_on_stack || !stack_has_symbols) << query;
+}
+
+}  // namespace
+
+#if defined(OS_POSIX)
+// Many POSIX bots flakily crash on |debug::StackTrace().ToString()|,
+// https://crbug.com/840429.
+#define MAYBE_IdentifiableStacks DISABLED_IdentifiableStacks
+#else
+#define MAYBE_IdentifiableStacks IdentifiableStacks
+#endif
+
+// Integration test that verifies that workers have a frame on their stacks
+// which easily identifies the type of worker (useful to diagnose issues from
+// logs without memory dumps).
+TEST_F(TaskSchedulerImplTest, MAYBE_IdentifiableStacks) {
+  StartTaskScheduler();
+
+  scheduler_.CreateSequencedTaskRunnerWithTraits({})->PostTask(
+      FROM_HERE, BindOnce(&VerifyHasStringOnStack, "RunPooledWorker"));
+  scheduler_.CreateSequencedTaskRunnerWithTraits({TaskPriority::BACKGROUND})
+      ->PostTask(FROM_HERE, BindOnce(&VerifyHasStringOnStack,
+                                     "RunBackgroundPooledWorker"));
+
+  scheduler_
+      .CreateSingleThreadTaskRunnerWithTraits(
+          {}, SingleThreadTaskRunnerThreadMode::SHARED)
+      ->PostTask(FROM_HERE,
+                 BindOnce(&VerifyHasStringOnStack, "RunSharedWorker"));
+  scheduler_
+      .CreateSingleThreadTaskRunnerWithTraits(
+          {TaskPriority::BACKGROUND}, SingleThreadTaskRunnerThreadMode::SHARED)
+      ->PostTask(FROM_HERE, BindOnce(&VerifyHasStringOnStack,
+                                     "RunBackgroundSharedWorker"));
+
+  scheduler_
+      .CreateSingleThreadTaskRunnerWithTraits(
+          {}, SingleThreadTaskRunnerThreadMode::DEDICATED)
+      ->PostTask(FROM_HERE,
+                 BindOnce(&VerifyHasStringOnStack, "RunDedicatedWorker"));
+  scheduler_
+      .CreateSingleThreadTaskRunnerWithTraits(
+          {TaskPriority::BACKGROUND},
+          SingleThreadTaskRunnerThreadMode::DEDICATED)
+      ->PostTask(FROM_HERE, BindOnce(&VerifyHasStringOnStack,
+                                     "RunBackgroundDedicatedWorker"));
+
+#if defined(OS_WIN)
+  scheduler_
+      .CreateCOMSTATaskRunnerWithTraits(
+          {}, SingleThreadTaskRunnerThreadMode::SHARED)
+      ->PostTask(FROM_HERE,
+                 BindOnce(&VerifyHasStringOnStack, "RunSharedCOMWorker"));
+  scheduler_
+      .CreateCOMSTATaskRunnerWithTraits(
+          {TaskPriority::BACKGROUND}, SingleThreadTaskRunnerThreadMode::SHARED)
+      ->PostTask(FROM_HERE, BindOnce(&VerifyHasStringOnStack,
+                                     "RunBackgroundSharedCOMWorker"));
+
+  scheduler_
+      .CreateCOMSTATaskRunnerWithTraits(
+          {}, SingleThreadTaskRunnerThreadMode::DEDICATED)
+      ->PostTask(FROM_HERE,
+                 BindOnce(&VerifyHasStringOnStack, "RunDedicatedCOMWorker"));
+  scheduler_
+      .CreateCOMSTATaskRunnerWithTraits(
+          {TaskPriority::BACKGROUND},
+          SingleThreadTaskRunnerThreadMode::DEDICATED)
+      ->PostTask(FROM_HERE, BindOnce(&VerifyHasStringOnStack,
+                                     "RunBackgroundDedicatedCOMWorker"));
+#endif  // defined(OS_WIN)
+
+  scheduler_.FlushForTesting();
+}
+
+TEST_F(TaskSchedulerImplTest, SchedulerWorkerObserver) {
+  testing::StrictMock<test::MockSchedulerWorkerObserver> observer;
+  set_scheduler_worker_observer(&observer);
+
+// 4 workers should be created for the 4 pools. After that, 8 threads should
+// be created for single-threaded work (16 on Windows).
+#if defined(OS_WIN)
+  constexpr int kExpectedNumWorkers = 20;
+#else
+  constexpr int kExpectedNumWorkers = 12;
+#endif
+  EXPECT_CALL(observer, OnSchedulerWorkerMainEntry())
+      .Times(kExpectedNumWorkers);
+
+  StartTaskScheduler();
+
+  std::vector<scoped_refptr<SingleThreadTaskRunner>> task_runners;
+
+  task_runners.push_back(scheduler_.CreateSingleThreadTaskRunnerWithTraits(
+      {TaskPriority::BACKGROUND}, SingleThreadTaskRunnerThreadMode::SHARED));
+  task_runners.push_back(scheduler_.CreateSingleThreadTaskRunnerWithTraits(
+      {TaskPriority::BACKGROUND, MayBlock()},
+      SingleThreadTaskRunnerThreadMode::SHARED));
+  task_runners.push_back(scheduler_.CreateSingleThreadTaskRunnerWithTraits(
+      {TaskPriority::USER_BLOCKING}, SingleThreadTaskRunnerThreadMode::SHARED));
+  task_runners.push_back(scheduler_.CreateSingleThreadTaskRunnerWithTraits(
+      {TaskPriority::USER_BLOCKING, MayBlock()},
+      SingleThreadTaskRunnerThreadMode::SHARED));
+
+  task_runners.push_back(scheduler_.CreateSingleThreadTaskRunnerWithTraits(
+      {TaskPriority::BACKGROUND}, SingleThreadTaskRunnerThreadMode::DEDICATED));
+  task_runners.push_back(scheduler_.CreateSingleThreadTaskRunnerWithTraits(
+      {TaskPriority::BACKGROUND, MayBlock()},
+      SingleThreadTaskRunnerThreadMode::DEDICATED));
+  task_runners.push_back(scheduler_.CreateSingleThreadTaskRunnerWithTraits(
+      {TaskPriority::USER_BLOCKING},
+      SingleThreadTaskRunnerThreadMode::DEDICATED));
+  task_runners.push_back(scheduler_.CreateSingleThreadTaskRunnerWithTraits(
+      {TaskPriority::USER_BLOCKING, MayBlock()},
+      SingleThreadTaskRunnerThreadMode::DEDICATED));
+
+#if defined(OS_WIN)
+  task_runners.push_back(scheduler_.CreateCOMSTATaskRunnerWithTraits(
+      {TaskPriority::BACKGROUND}, SingleThreadTaskRunnerThreadMode::SHARED));
+  task_runners.push_back(scheduler_.CreateCOMSTATaskRunnerWithTraits(
+      {TaskPriority::BACKGROUND, MayBlock()},
+      SingleThreadTaskRunnerThreadMode::SHARED));
+  task_runners.push_back(scheduler_.CreateCOMSTATaskRunnerWithTraits(
+      {TaskPriority::USER_BLOCKING}, SingleThreadTaskRunnerThreadMode::SHARED));
+  task_runners.push_back(scheduler_.CreateCOMSTATaskRunnerWithTraits(
+      {TaskPriority::USER_BLOCKING, MayBlock()},
+      SingleThreadTaskRunnerThreadMode::SHARED));
+
+  task_runners.push_back(scheduler_.CreateCOMSTATaskRunnerWithTraits(
+      {TaskPriority::BACKGROUND}, SingleThreadTaskRunnerThreadMode::DEDICATED));
+  task_runners.push_back(scheduler_.CreateCOMSTATaskRunnerWithTraits(
+      {TaskPriority::BACKGROUND, MayBlock()},
+      SingleThreadTaskRunnerThreadMode::DEDICATED));
+  task_runners.push_back(scheduler_.CreateCOMSTATaskRunnerWithTraits(
+      {TaskPriority::USER_BLOCKING},
+      SingleThreadTaskRunnerThreadMode::DEDICATED));
+  task_runners.push_back(scheduler_.CreateCOMSTATaskRunnerWithTraits(
+      {TaskPriority::USER_BLOCKING, MayBlock()},
+      SingleThreadTaskRunnerThreadMode::DEDICATED));
+#endif
+
+  for (auto& task_runner : task_runners)
+    task_runner->PostTask(FROM_HERE, DoNothing());
+
+  EXPECT_CALL(observer, OnSchedulerWorkerMainExit()).Times(kExpectedNumWorkers);
+
+  // Allow single-threaded workers to be released.
+  task_runners.clear();
+
+  TearDown();
+}
+
+}  // namespace internal
+}  // namespace base
diff --git a/base/task_scheduler/task_tracker.cc b/base/task_scheduler/task_tracker.cc
new file mode 100644
index 0000000..ab46b9e
--- /dev/null
+++ b/base/task_scheduler/task_tracker.cc
@@ -0,0 +1,841 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/task_scheduler/task_tracker.h"
+
+#include <limits>
+#include <string>
+#include <vector>
+
+#include "base/base_switches.h"
+#include "base/callback.h"
+#include "base/command_line.h"
+#include "base/json/json_writer.h"
+#include "base/memory/ptr_util.h"
+#include "base/metrics/histogram_macros.h"
+#include "base/sequence_token.h"
+#include "base/synchronization/condition_variable.h"
+#include "base/task_scheduler/scoped_set_task_priority_for_current_thread.h"
+#include "base/threading/sequence_local_storage_map.h"
+#include "base/threading/sequenced_task_runner_handle.h"
+#include "base/threading/thread_restrictions.h"
+#include "base/threading/thread_task_runner_handle.h"
+#include "base/time/time.h"
+#include "base/trace_event/trace_event.h"
+#include "base/values.h"
+
+namespace base {
+namespace internal {
+
+namespace {
+
+constexpr char kParallelExecutionMode[] = "parallel";
+constexpr char kSequencedExecutionMode[] = "sequenced";
+constexpr char kSingleThreadExecutionMode[] = "single thread";
+
+// An immutable copy of a scheduler task's info required by tracing.
+class TaskTracingInfo : public trace_event::ConvertableToTraceFormat {
+ public:
+  TaskTracingInfo(const TaskTraits& task_traits,
+                  const char* execution_mode,
+                  const SequenceToken& sequence_token)
+      : task_traits_(task_traits),
+        execution_mode_(execution_mode),
+        sequence_token_(sequence_token) {}
+
+  // trace_event::ConvertableToTraceFormat implementation.
+  void AppendAsTraceFormat(std::string* out) const override;
+
+ private:
+  const TaskTraits task_traits_;
+  const char* const execution_mode_;
+  const SequenceToken sequence_token_;
+
+  DISALLOW_COPY_AND_ASSIGN(TaskTracingInfo);
+};
+
+void TaskTracingInfo::AppendAsTraceFormat(std::string* out) const {
+  DictionaryValue dict;
+
+  dict.SetString("task_priority",
+                 base::TaskPriorityToString(task_traits_.priority()));
+  dict.SetString("execution_mode", execution_mode_);
+  if (execution_mode_ != kParallelExecutionMode)
+    dict.SetInteger("sequence_token", sequence_token_.ToInternalValue());
+
+  std::string tmp;
+  JSONWriter::Write(dict, &tmp);
+  out->append(tmp);
+}
+
+// These name conveys that a Task is posted to/run by the task scheduler without
+// revealing its implementation details.
+constexpr char kQueueFunctionName[] = "TaskScheduler PostTask";
+constexpr char kRunFunctionName[] = "TaskScheduler RunTask";
+
+constexpr char kTaskSchedulerFlowTracingCategory[] =
+    TRACE_DISABLED_BY_DEFAULT("task_scheduler.flow");
+
+// Constructs a histogram to track latency which is logging to
+// "TaskScheduler.{histogram_name}.{histogram_label}.{task_type_suffix}".
+HistogramBase* GetLatencyHistogram(StringPiece histogram_name,
+                                   StringPiece histogram_label,
+                                   StringPiece task_type_suffix) {
+  DCHECK(!histogram_name.empty());
+  DCHECK(!histogram_label.empty());
+  DCHECK(!task_type_suffix.empty());
+  // Mimics the UMA_HISTOGRAM_HIGH_RESOLUTION_CUSTOM_TIMES macro. The minimums
+  // and maximums were chosen to place the 1ms mark at around the 70% range
+  // coverage for buckets giving us good info for tasks that have a latency
+  // below 1ms (most of them) and enough info to assess how bad the latency is
+  // for tasks that exceed this threshold.
+  const std::string histogram = JoinString(
+      {"TaskScheduler", histogram_name, histogram_label, task_type_suffix},
+      ".");
+  return Histogram::FactoryMicrosecondsTimeGet(
+      histogram, TimeDelta::FromMicroseconds(1),
+      TimeDelta::FromMilliseconds(20), 50,
+      HistogramBase::kUmaTargetedHistogramFlag);
+}
+
+// Upper bound for the
+// TaskScheduler.BlockShutdownTasksPostedDuringShutdown histogram.
+constexpr HistogramBase::Sample kMaxBlockShutdownTasksPostedDuringShutdown =
+    1000;
+
+void RecordNumBlockShutdownTasksPostedDuringShutdown(
+    HistogramBase::Sample value) {
+  UMA_HISTOGRAM_CUSTOM_COUNTS(
+      "TaskScheduler.BlockShutdownTasksPostedDuringShutdown", value, 1,
+      kMaxBlockShutdownTasksPostedDuringShutdown, 50);
+}
+
+// Returns the maximum number of TaskPriority::BACKGROUND sequences that can be
+// scheduled concurrently based on command line flags.
+int GetMaxNumScheduledBackgroundSequences() {
+  // The CommandLine might not be initialized if TaskScheduler is initialized
+  // in a dynamic library which doesn't have access to argc/argv.
+  if (CommandLine::InitializedForCurrentProcess() &&
+      CommandLine::ForCurrentProcess()->HasSwitch(
+          switches::kDisableBackgroundTasks)) {
+    return 0;
+  }
+  return std::numeric_limits<int>::max();
+}
+
+}  // namespace
+
+// Atomic internal state used by TaskTracker. Sequential consistency shouldn't
+// be assumed from these calls (i.e. a thread reading
+// |HasShutdownStarted() == true| isn't guaranteed to see all writes made before
+// |StartShutdown()| on the thread that invoked it).
+class TaskTracker::State {
+ public:
+  State() = default;
+
+  // Sets a flag indicating that shutdown has started. Returns true if there are
+  // tasks blocking shutdown. Can only be called once.
+  bool StartShutdown() {
+    const auto new_value =
+        subtle::NoBarrier_AtomicIncrement(&bits_, kShutdownHasStartedMask);
+
+    // Check that the "shutdown has started" bit isn't zero. This would happen
+    // if it was incremented twice.
+    DCHECK(new_value & kShutdownHasStartedMask);
+
+    const auto num_tasks_blocking_shutdown =
+        new_value >> kNumTasksBlockingShutdownBitOffset;
+    return num_tasks_blocking_shutdown != 0;
+  }
+
+  // Returns true if shutdown has started.
+  bool HasShutdownStarted() const {
+    return subtle::NoBarrier_Load(&bits_) & kShutdownHasStartedMask;
+  }
+
+  // Returns true if there are tasks blocking shutdown.
+  bool AreTasksBlockingShutdown() const {
+    const auto num_tasks_blocking_shutdown =
+        subtle::NoBarrier_Load(&bits_) >> kNumTasksBlockingShutdownBitOffset;
+    DCHECK_GE(num_tasks_blocking_shutdown, 0);
+    return num_tasks_blocking_shutdown != 0;
+  }
+
+  // Increments the number of tasks blocking shutdown. Returns true if shutdown
+  // has started.
+  bool IncrementNumTasksBlockingShutdown() {
+#if DCHECK_IS_ON()
+    // Verify that no overflow will occur.
+    const auto num_tasks_blocking_shutdown =
+        subtle::NoBarrier_Load(&bits_) >> kNumTasksBlockingShutdownBitOffset;
+    DCHECK_LT(num_tasks_blocking_shutdown,
+              std::numeric_limits<subtle::Atomic32>::max() -
+                  kNumTasksBlockingShutdownIncrement);
+#endif
+
+    const auto new_bits = subtle::NoBarrier_AtomicIncrement(
+        &bits_, kNumTasksBlockingShutdownIncrement);
+    return new_bits & kShutdownHasStartedMask;
+  }
+
+  // Decrements the number of tasks blocking shutdown. Returns true if shutdown
+  // has started and the number of tasks blocking shutdown becomes zero.
+  bool DecrementNumTasksBlockingShutdown() {
+    const auto new_bits = subtle::NoBarrier_AtomicIncrement(
+        &bits_, -kNumTasksBlockingShutdownIncrement);
+    const bool shutdown_has_started = new_bits & kShutdownHasStartedMask;
+    const auto num_tasks_blocking_shutdown =
+        new_bits >> kNumTasksBlockingShutdownBitOffset;
+    DCHECK_GE(num_tasks_blocking_shutdown, 0);
+    return shutdown_has_started && num_tasks_blocking_shutdown == 0;
+  }
+
+ private:
+  static constexpr subtle::Atomic32 kShutdownHasStartedMask = 1;
+  static constexpr subtle::Atomic32 kNumTasksBlockingShutdownBitOffset = 1;
+  static constexpr subtle::Atomic32 kNumTasksBlockingShutdownIncrement =
+      1 << kNumTasksBlockingShutdownBitOffset;
+
+  // The LSB indicates whether shutdown has started. The other bits count the
+  // number of tasks blocking shutdown.
+  // No barriers are required to read/write |bits_| as this class is only used
+  // as an atomic state checker, it doesn't provide sequential consistency
+  // guarantees w.r.t. external state. Sequencing of the TaskTracker::State
+  // operations themselves is guaranteed by the AtomicIncrement RMW (read-
+  // modify-write) semantics however. For example, if two threads are racing to
+  // call IncrementNumTasksBlockingShutdown() and StartShutdown() respectively,
+  // either the first thread will win and the StartShutdown() call will see the
+  // blocking task or the second thread will win and
+  // IncrementNumTasksBlockingShutdown() will know that shutdown has started.
+  subtle::Atomic32 bits_ = 0;
+
+  DISALLOW_COPY_AND_ASSIGN(State);
+};
+
+struct TaskTracker::PreemptedBackgroundSequence {
+  PreemptedBackgroundSequence() = default;
+  PreemptedBackgroundSequence(scoped_refptr<Sequence> sequence_in,
+                              TimeTicks next_task_sequenced_time_in,
+                              CanScheduleSequenceObserver* observer_in)
+      : sequence(std::move(sequence_in)),
+        next_task_sequenced_time(next_task_sequenced_time_in),
+        observer(observer_in) {}
+  PreemptedBackgroundSequence(PreemptedBackgroundSequence&& other) = default;
+  ~PreemptedBackgroundSequence() = default;
+  PreemptedBackgroundSequence& operator=(PreemptedBackgroundSequence&& other) =
+      default;
+  bool operator<(const PreemptedBackgroundSequence& other) const {
+    return next_task_sequenced_time < other.next_task_sequenced_time;
+  }
+  bool operator>(const PreemptedBackgroundSequence& other) const {
+    return next_task_sequenced_time > other.next_task_sequenced_time;
+  }
+
+  // A background sequence waiting to be scheduled.
+  scoped_refptr<Sequence> sequence;
+
+  // The sequenced time of the next task in |sequence|.
+  TimeTicks next_task_sequenced_time;
+
+  // An observer to notify when |sequence| can be scheduled.
+  CanScheduleSequenceObserver* observer = nullptr;
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(PreemptedBackgroundSequence);
+};
+
+TaskTracker::TaskTracker(StringPiece histogram_label)
+    : TaskTracker(histogram_label, GetMaxNumScheduledBackgroundSequences()) {}
+
+TaskTracker::TaskTracker(StringPiece histogram_label,
+                         int max_num_scheduled_background_sequences)
+    : state_(new State),
+      flush_cv_(flush_lock_.CreateConditionVariable()),
+      shutdown_lock_(&flush_lock_),
+      max_num_scheduled_background_sequences_(
+          max_num_scheduled_background_sequences),
+      task_latency_histograms_{
+          {GetLatencyHistogram("TaskLatencyMicroseconds",
+                               histogram_label,
+                               "BackgroundTaskPriority"),
+           GetLatencyHistogram("TaskLatencyMicroseconds",
+                               histogram_label,
+                               "BackgroundTaskPriority_MayBlock")},
+          {GetLatencyHistogram("TaskLatencyMicroseconds",
+                               histogram_label,
+                               "UserVisibleTaskPriority"),
+           GetLatencyHistogram("TaskLatencyMicroseconds",
+                               histogram_label,
+                               "UserVisibleTaskPriority_MayBlock")},
+          {GetLatencyHistogram("TaskLatencyMicroseconds",
+                               histogram_label,
+                               "UserBlockingTaskPriority"),
+           GetLatencyHistogram("TaskLatencyMicroseconds",
+                               histogram_label,
+                               "UserBlockingTaskPriority_MayBlock")}},
+      heartbeat_latency_histograms_{
+          {GetLatencyHistogram("HeartbeatLatencyMicroseconds",
+                               histogram_label,
+                               "BackgroundTaskPriority"),
+           GetLatencyHistogram("HeartbeatLatencyMicroseconds",
+                               histogram_label,
+                               "BackgroundTaskPriority_MayBlock")},
+          {GetLatencyHistogram("HeartbeatLatencyMicroseconds",
+                               histogram_label,
+                               "UserVisibleTaskPriority"),
+           GetLatencyHistogram("HeartbeatLatencyMicroseconds",
+                               histogram_label,
+                               "UserVisibleTaskPriority_MayBlock")},
+          {GetLatencyHistogram("HeartbeatLatencyMicroseconds",
+                               histogram_label,
+                               "UserBlockingTaskPriority"),
+           GetLatencyHistogram("HeartbeatLatencyMicroseconds",
+                               histogram_label,
+                               "UserBlockingTaskPriority_MayBlock")}},
+      tracked_ref_factory_(this) {
+  // Confirm that all |task_latency_histograms_| have been initialized above.
+  DCHECK(*(&task_latency_histograms_[static_cast<int>(TaskPriority::HIGHEST) +
+                                     1][0] -
+           1));
+}
+
+TaskTracker::~TaskTracker() = default;
+
+void TaskTracker::Shutdown() {
+  PerformShutdown();
+  DCHECK(IsShutdownComplete());
+
+  // Unblock FlushForTesting() and perform the FlushAsyncForTesting callback
+  // when shutdown completes.
+  {
+    AutoSchedulerLock auto_lock(flush_lock_);
+    flush_cv_->Signal();
+  }
+  CallFlushCallbackForTesting();
+}
+
+void TaskTracker::FlushForTesting() {
+  AutoSchedulerLock auto_lock(flush_lock_);
+  while (subtle::Acquire_Load(&num_incomplete_undelayed_tasks_) != 0 &&
+         !IsShutdownComplete()) {
+    flush_cv_->Wait();
+  }
+}
+
+void TaskTracker::FlushAsyncForTesting(OnceClosure flush_callback) {
+  DCHECK(flush_callback);
+  {
+    AutoSchedulerLock auto_lock(flush_lock_);
+    DCHECK(!flush_callback_for_testing_)
+        << "Only one FlushAsyncForTesting() may be pending at any time.";
+    flush_callback_for_testing_ = std::move(flush_callback);
+  }
+
+  if (subtle::Acquire_Load(&num_incomplete_undelayed_tasks_) == 0 ||
+      IsShutdownComplete()) {
+    CallFlushCallbackForTesting();
+  }
+}
+
+bool TaskTracker::WillPostTask(const Task& task) {
+  DCHECK(task.task);
+
+  if (!BeforePostTask(task.traits.shutdown_behavior()))
+    return false;
+
+  if (task.delayed_run_time.is_null())
+    subtle::NoBarrier_AtomicIncrement(&num_incomplete_undelayed_tasks_, 1);
+
+  {
+    TRACE_EVENT_WITH_FLOW0(
+        kTaskSchedulerFlowTracingCategory, kQueueFunctionName,
+        TRACE_ID_MANGLE(task_annotator_.GetTaskTraceID(task)),
+        TRACE_EVENT_FLAG_FLOW_OUT);
+  }
+
+  task_annotator_.DidQueueTask(nullptr, task);
+
+  return true;
+}
+
+scoped_refptr<Sequence> TaskTracker::WillScheduleSequence(
+    scoped_refptr<Sequence> sequence,
+    CanScheduleSequenceObserver* observer) {
+  const SequenceSortKey sort_key = sequence->GetSortKey();
+
+  // A foreground sequence can always be scheduled.
+  if (sort_key.priority() != TaskPriority::BACKGROUND)
+    return sequence;
+
+  // It is convenient not to have to specify an observer when scheduling
+  // foreground sequences in tests.
+  DCHECK(observer);
+
+  AutoSchedulerLock auto_lock(background_lock_);
+
+  if (num_scheduled_background_sequences_ <
+      max_num_scheduled_background_sequences_) {
+    ++num_scheduled_background_sequences_;
+    return sequence;
+  }
+
+  preempted_background_sequences_.emplace(
+      std::move(sequence), sort_key.next_task_sequenced_time(), observer);
+  return nullptr;
+}
+
+scoped_refptr<Sequence> TaskTracker::RunAndPopNextTask(
+    scoped_refptr<Sequence> sequence,
+    CanScheduleSequenceObserver* observer) {
+  DCHECK(sequence);
+
+  // Run the next task in |sequence|.
+  Optional<Task> task = sequence->TakeTask();
+  // TODO(fdoray): Support TakeTask() returning null. https://crbug.com/783309
+  DCHECK(task);
+
+  const TaskShutdownBehavior shutdown_behavior =
+      task->traits.shutdown_behavior();
+  const TaskPriority task_priority = task->traits.priority();
+  const bool can_run_task = BeforeRunTask(shutdown_behavior);
+  const bool is_delayed = !task->delayed_run_time.is_null();
+
+  RunOrSkipTask(std::move(task.value()), sequence.get(), can_run_task);
+  if (can_run_task)
+    AfterRunTask(shutdown_behavior);
+
+  if (!is_delayed)
+    DecrementNumIncompleteUndelayedTasks();
+
+  const bool sequence_is_empty_after_pop = sequence->Pop();
+
+  // Never reschedule a Sequence emptied by Pop(). The contract is such that
+  // next poster to make it non-empty is responsible to schedule it.
+  if (sequence_is_empty_after_pop)
+    sequence = nullptr;
+
+  if (task_priority == TaskPriority::BACKGROUND) {
+    // Allow |sequence| to be rescheduled only if its next task is set to run
+    // earlier than the earliest currently preempted sequence
+    return ManageBackgroundSequencesAfterRunningTask(std::move(sequence),
+                                                     observer);
+  }
+
+  return sequence;
+}
+
+bool TaskTracker::HasShutdownStarted() const {
+  return state_->HasShutdownStarted();
+}
+
+bool TaskTracker::IsShutdownComplete() const {
+  AutoSchedulerLock auto_lock(shutdown_lock_);
+  return shutdown_event_ && shutdown_event_->IsSignaled();
+}
+
+void TaskTracker::SetHasShutdownStartedForTesting() {
+  AutoSchedulerLock auto_lock(shutdown_lock_);
+
+  // Create a dummy |shutdown_event_| to satisfy TaskTracker's expectation of
+  // its existence during shutdown (e.g. in OnBlockingShutdownTasksComplete()).
+  shutdown_event_.reset(
+      new WaitableEvent(WaitableEvent::ResetPolicy::MANUAL,
+                        WaitableEvent::InitialState::NOT_SIGNALED));
+
+  state_->StartShutdown();
+}
+
+void TaskTracker::RecordLatencyHistogram(
+    LatencyHistogramType latency_histogram_type,
+    TaskTraits task_traits,
+    TimeTicks posted_time) const {
+  const TimeDelta task_latency = TimeTicks::Now() - posted_time;
+
+  DCHECK(latency_histogram_type == LatencyHistogramType::TASK_LATENCY ||
+         latency_histogram_type == LatencyHistogramType::HEARTBEAT_LATENCY);
+  auto& histograms =
+      latency_histogram_type == LatencyHistogramType::TASK_LATENCY
+          ? task_latency_histograms_
+          : heartbeat_latency_histograms_;
+  histograms[static_cast<int>(task_traits.priority())]
+            [task_traits.may_block() || task_traits.with_base_sync_primitives()
+                 ? 1
+                 : 0]
+                ->AddTimeMicrosecondsGranularity(task_latency);
+}
+
+void TaskTracker::RunOrSkipTask(Task task,
+                                Sequence* sequence,
+                                bool can_run_task) {
+  RecordLatencyHistogram(LatencyHistogramType::TASK_LATENCY, task.traits,
+                         task.sequenced_time);
+
+  const bool previous_singleton_allowed =
+      ThreadRestrictions::SetSingletonAllowed(
+          task.traits.shutdown_behavior() !=
+          TaskShutdownBehavior::CONTINUE_ON_SHUTDOWN);
+  const bool previous_io_allowed =
+      ThreadRestrictions::SetIOAllowed(task.traits.may_block());
+  const bool previous_wait_allowed = ThreadRestrictions::SetWaitAllowed(
+      task.traits.with_base_sync_primitives());
+
+  {
+    const SequenceToken& sequence_token = sequence->token();
+    DCHECK(sequence_token.IsValid());
+    ScopedSetSequenceTokenForCurrentThread
+        scoped_set_sequence_token_for_current_thread(sequence_token);
+    ScopedSetTaskPriorityForCurrentThread
+        scoped_set_task_priority_for_current_thread(task.traits.priority());
+    ScopedSetSequenceLocalStorageMapForCurrentThread
+        scoped_set_sequence_local_storage_map_for_current_thread(
+            sequence->sequence_local_storage());
+
+    // Set up TaskRunnerHandle as expected for the scope of the task.
+    std::unique_ptr<SequencedTaskRunnerHandle> sequenced_task_runner_handle;
+    std::unique_ptr<ThreadTaskRunnerHandle> single_thread_task_runner_handle;
+    DCHECK(!task.sequenced_task_runner_ref ||
+           !task.single_thread_task_runner_ref);
+    if (task.sequenced_task_runner_ref) {
+      sequenced_task_runner_handle.reset(
+          new SequencedTaskRunnerHandle(task.sequenced_task_runner_ref));
+    } else if (task.single_thread_task_runner_ref) {
+      single_thread_task_runner_handle.reset(
+          new ThreadTaskRunnerHandle(task.single_thread_task_runner_ref));
+    }
+
+    if (can_run_task) {
+      TRACE_TASK_EXECUTION(kRunFunctionName, task);
+
+      const char* const execution_mode =
+          task.single_thread_task_runner_ref
+              ? kSingleThreadExecutionMode
+              : (task.sequenced_task_runner_ref ? kSequencedExecutionMode
+                                                : kParallelExecutionMode);
+      // TODO(gab): In a better world this would be tacked on as an extra arg
+      // to the trace event generated above. This is not possible however until
+      // http://crbug.com/652692 is resolved.
+      TRACE_EVENT1("task_scheduler", "TaskTracker::RunTask", "task_info",
+                   std::make_unique<TaskTracingInfo>(
+                       task.traits, execution_mode, sequence_token));
+
+      {
+        // Put this in its own scope so it preceeds rather than overlaps with
+        // RunTask() in the trace view.
+        TRACE_EVENT_WITH_FLOW0(
+            kTaskSchedulerFlowTracingCategory, kQueueFunctionName,
+            TRACE_ID_MANGLE(task_annotator_.GetTaskTraceID(task)),
+            TRACE_EVENT_FLAG_FLOW_IN);
+      }
+
+      task_annotator_.RunTask(nullptr, &task);
+    }
+
+    // Make sure the arguments bound to the callback are deleted within the
+    // scope in which the callback runs.
+    task.task = OnceClosure();
+  }
+
+  ThreadRestrictions::SetWaitAllowed(previous_wait_allowed);
+  ThreadRestrictions::SetIOAllowed(previous_io_allowed);
+  ThreadRestrictions::SetSingletonAllowed(previous_singleton_allowed);
+}
+
+void TaskTracker::PerformShutdown() {
+  {
+    AutoSchedulerLock auto_lock(shutdown_lock_);
+
+    // This method can only be called once.
+    DCHECK(!shutdown_event_);
+    DCHECK(!num_block_shutdown_tasks_posted_during_shutdown_);
+    DCHECK(!state_->HasShutdownStarted());
+
+    shutdown_event_.reset(
+        new WaitableEvent(WaitableEvent::ResetPolicy::MANUAL,
+                          WaitableEvent::InitialState::NOT_SIGNALED));
+
+    const bool tasks_are_blocking_shutdown = state_->StartShutdown();
+
+    // From now, if a thread causes the number of tasks blocking shutdown to
+    // become zero, it will call OnBlockingShutdownTasksComplete().
+
+    if (!tasks_are_blocking_shutdown) {
+      // If another thread posts a BLOCK_SHUTDOWN task at this moment, it will
+      // block until this method releases |shutdown_lock_|. Then, it will fail
+      // DCHECK(!shutdown_event_->IsSignaled()). This is the desired behavior
+      // because posting a BLOCK_SHUTDOWN task when TaskTracker::Shutdown() has
+      // started and no tasks are blocking shutdown isn't allowed.
+      shutdown_event_->Signal();
+      return;
+    }
+  }
+
+  // Remove the cap on the maximum number of background sequences that can be
+  // scheduled concurrently. Done after starting shutdown to ensure that non-
+  // BLOCK_SHUTDOWN sequences don't get a chance to run and that BLOCK_SHUTDOWN
+  // sequences run on threads running with a normal priority.
+  SetMaxNumScheduledBackgroundSequences(std::numeric_limits<int>::max());
+
+  // It is safe to access |shutdown_event_| without holding |lock_| because the
+  // pointer never changes after being set above.
+  {
+    base::ThreadRestrictions::ScopedAllowWait allow_wait;
+    shutdown_event_->Wait();
+  }
+
+  {
+    AutoSchedulerLock auto_lock(shutdown_lock_);
+
+    // Record TaskScheduler.BlockShutdownTasksPostedDuringShutdown if less than
+    // |kMaxBlockShutdownTasksPostedDuringShutdown| BLOCK_SHUTDOWN tasks were
+    // posted during shutdown. Otherwise, the histogram has already been
+    // recorded in BeforePostTask().
+    if (num_block_shutdown_tasks_posted_during_shutdown_ <
+        kMaxBlockShutdownTasksPostedDuringShutdown) {
+      RecordNumBlockShutdownTasksPostedDuringShutdown(
+          num_block_shutdown_tasks_posted_during_shutdown_);
+    }
+  }
+}
+
+void TaskTracker::SetMaxNumScheduledBackgroundSequences(
+    int max_num_scheduled_background_sequences) {
+  std::vector<PreemptedBackgroundSequence> sequences_to_schedule;
+
+  {
+    AutoSchedulerLock auto_lock(background_lock_);
+    max_num_scheduled_background_sequences_ =
+        max_num_scheduled_background_sequences;
+
+    while (num_scheduled_background_sequences_ <
+               max_num_scheduled_background_sequences &&
+           !preempted_background_sequences_.empty()) {
+      sequences_to_schedule.push_back(
+          GetPreemptedBackgroundSequenceToScheduleLockRequired());
+    }
+  }
+
+  for (auto& sequence_to_schedule : sequences_to_schedule)
+    SchedulePreemptedBackgroundSequence(std::move(sequence_to_schedule));
+}
+
+TaskTracker::PreemptedBackgroundSequence
+TaskTracker::GetPreemptedBackgroundSequenceToScheduleLockRequired() {
+  background_lock_.AssertAcquired();
+  DCHECK(!preempted_background_sequences_.empty());
+
+  ++num_scheduled_background_sequences_;
+  DCHECK_LE(num_scheduled_background_sequences_,
+            max_num_scheduled_background_sequences_);
+
+  // The const_cast on top is okay since the PreemptedBackgroundSequence is
+  // transactionnaly being popped from |preempted_background_sequences_| right
+  // after and the move doesn't alter the sort order (a requirement for the
+  // Windows STL's consistency debug-checks for std::priority_queue::top()).
+  PreemptedBackgroundSequence popped_sequence =
+      std::move(const_cast<PreemptedBackgroundSequence&>(
+          preempted_background_sequences_.top()));
+  preempted_background_sequences_.pop();
+  return popped_sequence;
+}
+
+void TaskTracker::SchedulePreemptedBackgroundSequence(
+    PreemptedBackgroundSequence sequence_to_schedule) {
+  DCHECK(sequence_to_schedule.observer);
+  sequence_to_schedule.observer->OnCanScheduleSequence(
+      std::move(sequence_to_schedule.sequence));
+}
+
+#if DCHECK_IS_ON()
+bool TaskTracker::IsPostingBlockShutdownTaskAfterShutdownAllowed() {
+  return false;
+}
+#endif
+
+bool TaskTracker::HasIncompleteUndelayedTasksForTesting() const {
+  return subtle::Acquire_Load(&num_incomplete_undelayed_tasks_) != 0;
+}
+
+bool TaskTracker::BeforePostTask(TaskShutdownBehavior shutdown_behavior) {
+  if (shutdown_behavior == TaskShutdownBehavior::BLOCK_SHUTDOWN) {
+    // BLOCK_SHUTDOWN tasks block shutdown between the moment they are posted
+    // and the moment they complete their execution.
+    const bool shutdown_started = state_->IncrementNumTasksBlockingShutdown();
+
+    if (shutdown_started) {
+      AutoSchedulerLock auto_lock(shutdown_lock_);
+
+      // A BLOCK_SHUTDOWN task posted after shutdown has completed is an
+      // ordering bug. This aims to catch those early.
+      DCHECK(shutdown_event_);
+      if (shutdown_event_->IsSignaled()) {
+#if DCHECK_IS_ON()
+// clang-format off
+        // TODO(robliao): http://crbug.com/698140. Since the service thread
+        // doesn't stop processing its own tasks at shutdown, we may still
+        // attempt to post a BLOCK_SHUTDOWN task in response to a
+        // FileDescriptorWatcher. Same is true for FilePathWatcher
+        // (http://crbug.com/728235). Until it's possible for such services to
+        // post to non-BLOCK_SHUTDOWN sequences which are themselves funneled to
+        // the main execution sequence (a future plan for the post_task.h API),
+        // this DCHECK will be flaky and must be disabled.
+        // DCHECK(IsPostingBlockShutdownTaskAfterShutdownAllowed());
+// clang-format on
+#endif
+        state_->DecrementNumTasksBlockingShutdown();
+        return false;
+      }
+
+      ++num_block_shutdown_tasks_posted_during_shutdown_;
+
+      if (num_block_shutdown_tasks_posted_during_shutdown_ ==
+          kMaxBlockShutdownTasksPostedDuringShutdown) {
+        // Record the TaskScheduler.BlockShutdownTasksPostedDuringShutdown
+        // histogram as soon as its upper bound is hit. That way, a value will
+        // be recorded even if an infinite number of BLOCK_SHUTDOWN tasks are
+        // posted, preventing shutdown to complete.
+        RecordNumBlockShutdownTasksPostedDuringShutdown(
+            num_block_shutdown_tasks_posted_during_shutdown_);
+      }
+    }
+
+    return true;
+  }
+
+  // A non BLOCK_SHUTDOWN task is allowed to be posted iff shutdown hasn't
+  // started.
+  return !state_->HasShutdownStarted();
+}
+
+bool TaskTracker::BeforeRunTask(TaskShutdownBehavior shutdown_behavior) {
+  switch (shutdown_behavior) {
+    case TaskShutdownBehavior::BLOCK_SHUTDOWN: {
+      // The number of tasks blocking shutdown has been incremented when the
+      // task was posted.
+      DCHECK(state_->AreTasksBlockingShutdown());
+
+      // Trying to run a BLOCK_SHUTDOWN task after shutdown has completed is
+      // unexpected as it either shouldn't have been posted if shutdown
+      // completed or should be blocking shutdown if it was posted before it
+      // did.
+      DCHECK(!state_->HasShutdownStarted() || !IsShutdownComplete());
+
+      return true;
+    }
+
+    case TaskShutdownBehavior::SKIP_ON_SHUTDOWN: {
+      // SKIP_ON_SHUTDOWN tasks block shutdown while they are running.
+      const bool shutdown_started = state_->IncrementNumTasksBlockingShutdown();
+
+      if (shutdown_started) {
+        // The SKIP_ON_SHUTDOWN task isn't allowed to run during shutdown.
+        // Decrement the number of tasks blocking shutdown that was wrongly
+        // incremented.
+        const bool shutdown_started_and_no_tasks_block_shutdown =
+            state_->DecrementNumTasksBlockingShutdown();
+        if (shutdown_started_and_no_tasks_block_shutdown)
+          OnBlockingShutdownTasksComplete();
+
+        return false;
+      }
+
+      return true;
+    }
+
+    case TaskShutdownBehavior::CONTINUE_ON_SHUTDOWN: {
+      return !state_->HasShutdownStarted();
+    }
+  }
+
+  NOTREACHED();
+  return false;
+}
+
+void TaskTracker::AfterRunTask(TaskShutdownBehavior shutdown_behavior) {
+  if (shutdown_behavior == TaskShutdownBehavior::BLOCK_SHUTDOWN ||
+      shutdown_behavior == TaskShutdownBehavior::SKIP_ON_SHUTDOWN) {
+    const bool shutdown_started_and_no_tasks_block_shutdown =
+        state_->DecrementNumTasksBlockingShutdown();
+    if (shutdown_started_and_no_tasks_block_shutdown)
+      OnBlockingShutdownTasksComplete();
+  }
+}
+
+void TaskTracker::OnBlockingShutdownTasksComplete() {
+  AutoSchedulerLock auto_lock(shutdown_lock_);
+
+  // This method can only be called after shutdown has started.
+  DCHECK(state_->HasShutdownStarted());
+  DCHECK(shutdown_event_);
+
+  shutdown_event_->Signal();
+}
+
+void TaskTracker::DecrementNumIncompleteUndelayedTasks() {
+  const auto new_num_incomplete_undelayed_tasks =
+      subtle::Barrier_AtomicIncrement(&num_incomplete_undelayed_tasks_, -1);
+  DCHECK_GE(new_num_incomplete_undelayed_tasks, 0);
+  if (new_num_incomplete_undelayed_tasks == 0) {
+    {
+      AutoSchedulerLock auto_lock(flush_lock_);
+      flush_cv_->Signal();
+    }
+    CallFlushCallbackForTesting();
+  }
+}
+
+scoped_refptr<Sequence> TaskTracker::ManageBackgroundSequencesAfterRunningTask(
+    scoped_refptr<Sequence> just_ran_sequence,
+    CanScheduleSequenceObserver* observer) {
+  const TimeTicks next_task_sequenced_time =
+      just_ran_sequence
+          ? just_ran_sequence->GetSortKey().next_task_sequenced_time()
+          : TimeTicks();
+  PreemptedBackgroundSequence sequence_to_schedule;
+
+  {
+    AutoSchedulerLock auto_lock(background_lock_);
+
+    DCHECK(preempted_background_sequences_.empty() ||
+           num_scheduled_background_sequences_ ==
+               max_num_scheduled_background_sequences_);
+    --num_scheduled_background_sequences_;
+
+    if (just_ran_sequence) {
+      if (preempted_background_sequences_.empty() ||
+          preempted_background_sequences_.top().next_task_sequenced_time >
+              next_task_sequenced_time) {
+        ++num_scheduled_background_sequences_;
+        return just_ran_sequence;
+      }
+
+      preempted_background_sequences_.emplace(
+          std::move(just_ran_sequence), next_task_sequenced_time, observer);
+    }
+
+    if (!preempted_background_sequences_.empty()) {
+      sequence_to_schedule =
+          GetPreemptedBackgroundSequenceToScheduleLockRequired();
+    }
+  }
+
+  // |sequence_to_schedule.sequence| may be null if there was no preempted
+  // background sequence.
+  if (sequence_to_schedule.sequence)
+    SchedulePreemptedBackgroundSequence(std::move(sequence_to_schedule));
+
+  return nullptr;
+}
+
+void TaskTracker::CallFlushCallbackForTesting() {
+  OnceClosure flush_callback;
+  {
+    AutoSchedulerLock auto_lock(flush_lock_);
+    flush_callback = std::move(flush_callback_for_testing_);
+  }
+  if (flush_callback)
+    std::move(flush_callback).Run();
+}
+
+}  // namespace internal
+}  // namespace base
diff --git a/base/task_scheduler/task_tracker.h b/base/task_scheduler/task_tracker.h
new file mode 100644
index 0000000..760a8f7
--- /dev/null
+++ b/base/task_scheduler/task_tracker.h
@@ -0,0 +1,359 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TASK_SCHEDULER_TASK_TRACKER_H_
+#define BASE_TASK_SCHEDULER_TASK_TRACKER_H_
+
+#include <functional>
+#include <memory>
+#include <queue>
+
+#include "base/atomicops.h"
+#include "base/base_export.h"
+#include "base/callback_forward.h"
+#include "base/debug/task_annotator.h"
+#include "base/logging.h"
+#include "base/macros.h"
+#include "base/metrics/histogram_base.h"
+#include "base/strings/string_piece.h"
+#include "base/synchronization/waitable_event.h"
+#include "base/task_scheduler/can_schedule_sequence_observer.h"
+#include "base/task_scheduler/scheduler_lock.h"
+#include "base/task_scheduler/sequence.h"
+#include "base/task_scheduler/task.h"
+#include "base/task_scheduler/task_traits.h"
+#include "base/task_scheduler/tracked_ref.h"
+
+namespace base {
+
+class ConditionVariable;
+class HistogramBase;
+
+namespace internal {
+
+// TaskTracker enforces policies that determines whether:
+// - A task can be added to a sequence (WillPostTask).
+// - A sequence can be scheduled (WillScheduleSequence).
+// - The next task in a scheduled sequence can run (RunAndPopNextTask).
+// TaskTracker also sets up the environment to run a task (RunAndPopNextTask)
+// and records metrics and trace events. This class is thread-safe.
+//
+// Life of a sequence:
+// (possible states: IDLE, PREEMPTED, SCHEDULED, RUNNING)
+//
+//                            Create a sequence
+//                                   |
+//  ------------------------> Sequence is IDLE
+//  |                                |
+//  |                     Add a task to the sequence
+//  |            (allowed by TaskTracker::WillPostTask)
+//  |                                |
+//  |               TaskTracker:WillScheduleSequence
+//  |           _____________________|_____________________
+//  |           |                                          |
+//  |    Returns true                                Returns false
+//  |           |                                          |
+//  |           |                                Sequence is PREEMPTED <----
+//  |           |                                          |               |
+//  |           |                            Eventually,                   |
+//  |           |                            CanScheduleSequenceObserver   |
+//  |           |                            is notified that the          |
+//  |           |                            sequence can be scheduled.    |
+//  |           |__________________________________________|               |
+//  |                               |                                      |
+//  |                   (*) Sequence is SCHEDULED                          |
+//  |                               |                                      |
+//  |                A thread is ready to run the next                     |
+//  |                      task in the sequence                            |
+//  |                               |                                      |
+//  |                TaskTracker::RunAndPopNextTask                        |
+//  |                A task from the sequence is run                       |
+//  |                      Sequence is RUNNING                             |
+//  |                               |                                      |
+//  |         ______________________|____                                  |
+//  |         |                          |                                 |
+//  |   Sequence is empty      Sequence has more tasks                     |
+//  |_________|             _____________|_______________                  |
+//                          |                            |                 |
+//                   Sequence can be            Sequence cannot be         |
+//                   scheduled                  scheduled at this          |
+//                          |                   moment                     |
+//                   Go back to (*)                      |_________________|
+//
+//
+// Note: A background task is a task posted with TaskPriority::BACKGROUND. A
+// foreground task is a task posted with TaskPriority::USER_VISIBLE or
+// TaskPriority::USER_BLOCKING.
+class BASE_EXPORT TaskTracker {
+ public:
+  // |histogram_label| is used as a suffix for histograms, it must not be empty.
+  // The first constructor sets the maximum number of TaskPriority::BACKGROUND
+  // sequences that can be scheduled concurrently to 0 if the
+  // --disable-background-tasks flag is specified, max() otherwise. The second
+  // constructor sets it to |max_num_scheduled_background_sequences|.
+  TaskTracker(StringPiece histogram_label);
+  TaskTracker(StringPiece histogram_label,
+              int max_num_scheduled_background_sequences);
+
+  virtual ~TaskTracker();
+
+  // Synchronously shuts down the scheduler. Once this is called, only tasks
+  // posted with the BLOCK_SHUTDOWN behavior will be run. Returns when:
+  // - All SKIP_ON_SHUTDOWN tasks that were already running have completed their
+  //   execution.
+  // - All posted BLOCK_SHUTDOWN tasks have completed their execution.
+  // CONTINUE_ON_SHUTDOWN tasks still may be running after Shutdown returns.
+  // This can only be called once.
+  void Shutdown();
+
+  // Waits until there are no incomplete undelayed tasks. May be called in tests
+  // to validate that a condition is met after all undelayed tasks have run.
+  //
+  // Does not wait for delayed tasks. Waits for undelayed tasks posted from
+  // other threads during the call. Returns immediately when shutdown completes.
+  void FlushForTesting();
+
+  // Returns and calls |flush_callback| when there are no incomplete undelayed
+  // tasks. |flush_callback| may be called back on any thread and should not
+  // perform a lot of work. May be used when additional work on the current
+  // thread needs to be performed during a flush. Only one
+  // FlushAsyncForTesting() may be pending at any given time.
+  void FlushAsyncForTesting(OnceClosure flush_callback);
+
+  // Informs this TaskTracker that |task| is about to be posted. Returns true if
+  // this operation is allowed (|task| should be posted if-and-only-if it is).
+  bool WillPostTask(const Task& task);
+
+  // Informs this TaskTracker that |sequence| is about to be scheduled. If this
+  // returns |sequence|, it is expected that RunAndPopNextTask() will soon be
+  // called with |sequence| as argument. Otherwise, RunAndPopNextTask() must not
+  // be called with |sequence| as argument until |observer| is notified that
+  // |sequence| can be scheduled (the caller doesn't need to keep a pointer to
+  // |sequence|; it will be included in the notification to |observer|).
+  // WillPostTask() must have allowed the task in front of |sequence| to be
+  // posted before this is called. |observer| is only required if the priority
+  // of |sequence| is TaskPriority::BACKGROUND
+  scoped_refptr<Sequence> WillScheduleSequence(
+      scoped_refptr<Sequence> sequence,
+      CanScheduleSequenceObserver* observer);
+
+  // Runs the next task in |sequence| unless the current shutdown state prevents
+  // that. Then, pops the task from |sequence| (even if it didn't run). Returns
+  // |sequence| if it can be rescheduled immediately. If |sequence| is non-empty
+  // after popping a task from it but it can't be rescheduled immediately, it
+  // will be handed back to |observer| when it can be rescheduled.
+  // WillPostTask() must have allowed the task in front of |sequence| to be
+  // posted before this is called. Also, WillScheduleSequence(),
+  // RunAndPopNextTask() or CanScheduleSequenceObserver::OnCanScheduleSequence()
+  // must have allowed |sequence| to be (re)scheduled.
+  scoped_refptr<Sequence> RunAndPopNextTask(
+      scoped_refptr<Sequence> sequence,
+      CanScheduleSequenceObserver* observer);
+
+  // Returns true once shutdown has started (Shutdown() has been called but
+  // might not have returned). Note: sequential consistency with the thread
+  // calling Shutdown() (or SetHasShutdownStartedForTesting()) isn't guaranteed
+  // by this call.
+  bool HasShutdownStarted() const;
+
+  // Returns true if shutdown has completed (Shutdown() has returned).
+  bool IsShutdownComplete() const;
+
+  enum class LatencyHistogramType {
+    // Records the latency of each individual task posted through TaskTracker.
+    TASK_LATENCY,
+    // Records the latency of heartbeat tasks which are independent of current
+    // workload. These avoid a bias towards TASK_LATENCY reporting that high-
+    // priority tasks are "slower" than regular tasks because high-priority
+    // tasks tend to be correlated with heavy workloads.
+    HEARTBEAT_LATENCY,
+  };
+
+  // Causes HasShutdownStarted() to return true. Unlike when Shutdown() returns,
+  // IsShutdownComplete() won't return true after this returns. Shutdown()
+  // cannot be called after this.
+  void SetHasShutdownStartedForTesting();
+
+  // Records |Now() - posted_time| to the appropriate |latency_histogram_type|
+  // based on |task_traits|.
+  void RecordLatencyHistogram(LatencyHistogramType latency_histogram_type,
+                              TaskTraits task_traits,
+                              TimeTicks posted_time) const;
+
+  TrackedRef<TaskTracker> GetTrackedRef() {
+    return tracked_ref_factory_.GetTrackedRef();
+  }
+
+ protected:
+  // Runs and deletes |task| if |can_run_task| is true. Otherwise, just deletes
+  // |task|. |task| is always deleted in the environment where it runs or would
+  // have run. |sequence| is the sequence from which |task| was extracted. An
+  // override is expected to call its parent's implementation but is free to
+  // perform extra work before and after doing so.
+  virtual void RunOrSkipTask(Task task, Sequence* sequence, bool can_run_task);
+
+#if DCHECK_IS_ON()
+  // Returns true if this context should be exempt from blocking shutdown
+  // DCHECKs.
+  // TODO(robliao): Remove when http://crbug.com/698140 is fixed.
+  virtual bool IsPostingBlockShutdownTaskAfterShutdownAllowed();
+#endif
+
+  // Returns true if there are undelayed tasks that haven't completed their
+  // execution (still queued or in progress). If it returns false: the side-
+  // effects of all completed tasks are guaranteed to be visible to the caller.
+  bool HasIncompleteUndelayedTasksForTesting() const;
+
+ private:
+  class State;
+  struct PreemptedBackgroundSequence;
+
+  void PerformShutdown();
+
+  // Updates the maximum number of background sequences that can be scheduled
+  // concurrently to |max_num_scheduled_background_sequences|. Then, schedules
+  // as many preempted background sequences as allowed by the new value.
+  void SetMaxNumScheduledBackgroundSequences(
+      int max_num_scheduled_background_sequences);
+
+  // Pops the next sequence in |preempted_background_sequences_| and increments
+  // |num_scheduled_background_sequences_|. Must only be called in the scope of
+  // |background_lock_|, with |preempted_background_sequences_| non-empty. The
+  // caller must forward the returned sequence to the associated
+  // CanScheduleSequenceObserver as soon as |background_lock_| is released.
+  PreemptedBackgroundSequence
+  GetPreemptedBackgroundSequenceToScheduleLockRequired();
+
+  // Schedules |sequence_to_schedule.sequence| using
+  // |sequence_to_schedule.observer|. Does not verify that the sequence is
+  // allowed to be scheduled.
+  void SchedulePreemptedBackgroundSequence(
+      PreemptedBackgroundSequence sequence_to_schedule);
+
+  // Called before WillPostTask() informs the tracing system that a task has
+  // been posted. Updates |num_tasks_blocking_shutdown_| if necessary and
+  // returns true if the current shutdown state allows the task to be posted.
+  bool BeforePostTask(TaskShutdownBehavior shutdown_behavior);
+
+  // Called before a task with |shutdown_behavior| is run by RunTask(). Updates
+  // |num_tasks_blocking_shutdown_| if necessary and returns true if the current
+  // shutdown state allows the task to be run.
+  bool BeforeRunTask(TaskShutdownBehavior shutdown_behavior);
+
+  // Called after a task with |shutdown_behavior| has been run by RunTask().
+  // Updates |num_tasks_blocking_shutdown_| and signals |shutdown_cv_| if
+  // necessary.
+  void AfterRunTask(TaskShutdownBehavior shutdown_behavior);
+
+  // Called when the number of tasks blocking shutdown becomes zero after
+  // shutdown has started.
+  void OnBlockingShutdownTasksComplete();
+
+  // Decrements the number of incomplete undelayed tasks and signals |flush_cv_|
+  // if it reaches zero.
+  void DecrementNumIncompleteUndelayedTasks();
+
+  // To be called after running a background task from |just_ran_sequence|.
+  // Performs the following actions:
+  //  - If |just_ran_sequence| is non-null:
+  //    - returns it if it should be rescheduled by the caller of
+  //      RunAndPopNextTask(), i.e. its next task is set to run earlier than the
+  //      earliest currently preempted sequence.
+  //    - Otherwise |just_ran_sequence| is preempted and the next preempted
+  //      sequence is scheduled (|observer| will be notified when
+  //      |just_ran_sequence| should be scheduled again).
+  //  - If |just_ran_sequence| is null (RunAndPopNextTask() just popped the last
+  //    task from it):
+  //    - the next preempeted sequence (if any) is scheduled.
+  //  - In all cases: adjusts the number of scheduled background sequences
+  //    accordingly.
+  scoped_refptr<Sequence> ManageBackgroundSequencesAfterRunningTask(
+      scoped_refptr<Sequence> just_ran_sequence,
+      CanScheduleSequenceObserver* observer);
+
+  // Calls |flush_callback_for_testing_| if one is available in a lock-safe
+  // manner.
+  void CallFlushCallbackForTesting();
+
+  debug::TaskAnnotator task_annotator_;
+
+  // Number of tasks blocking shutdown and boolean indicating whether shutdown
+  // has started.
+  const std::unique_ptr<State> state_;
+
+  // Number of undelayed tasks that haven't completed their execution. Is
+  // decremented with a memory barrier after a task runs. Is accessed with an
+  // acquire memory barrier in FlushForTesting(). The memory barriers ensure
+  // that the memory written by flushed tasks is visible when FlushForTesting()
+  // returns.
+  subtle::Atomic32 num_incomplete_undelayed_tasks_ = 0;
+
+  // Lock associated with |flush_cv_|. Partially synchronizes access to
+  // |num_incomplete_undelayed_tasks_|. Full synchronization isn't needed
+  // because it's atomic, but synchronization is needed to coordinate waking and
+  // sleeping at the right time. Fully synchronizes access to
+  // |flush_callback_for_testing_|.
+  mutable SchedulerLock flush_lock_;
+
+  // Signaled when |num_incomplete_undelayed_tasks_| is or reaches zero or when
+  // shutdown completes.
+  const std::unique_ptr<ConditionVariable> flush_cv_;
+
+  // Invoked if non-null when |num_incomplete_undelayed_tasks_| is zero or when
+  // shutdown completes.
+  OnceClosure flush_callback_for_testing_;
+
+  // Synchronizes access to shutdown related members below.
+  mutable SchedulerLock shutdown_lock_;
+
+  // Event instantiated when shutdown starts and signaled when shutdown
+  // completes.
+  std::unique_ptr<WaitableEvent> shutdown_event_;
+
+  // Synchronizes accesses to |preempted_background_sequences_|,
+  // |max_num_scheduled_background_sequences_| and
+  // |num_scheduled_background_sequences_|.
+  SchedulerLock background_lock_;
+
+  // A priority queue of sequences that are waiting to be scheduled. Use
+  // std::greater so that the sequence which contains the task that has been
+  // posted the earliest is on top of the priority queue.
+  std::priority_queue<PreemptedBackgroundSequence,
+                      std::vector<PreemptedBackgroundSequence>,
+                      std::greater<PreemptedBackgroundSequence>>
+      preempted_background_sequences_;
+
+  // Maximum number of background sequences that can that be scheduled
+  // concurrently.
+  int max_num_scheduled_background_sequences_;
+
+  // Number of currently scheduled background sequences.
+  int num_scheduled_background_sequences_ = 0;
+
+  // TaskScheduler.TaskLatencyMicroseconds.* and
+  // TaskScheduler.HeartbeatLatencyMicroseconds.* histograms. The first index is
+  // a TaskPriority. The second index is 0 for non-blocking tasks, 1 for
+  // blocking tasks. Intentionally leaked.
+  // TODO(scheduler-dev): Consider using STATIC_HISTOGRAM_POINTER_GROUP for
+  // these.
+  static constexpr int kNumTaskPriorities =
+      static_cast<int>(TaskPriority::HIGHEST) + 1;
+  HistogramBase* const task_latency_histograms_[kNumTaskPriorities][2];
+  HistogramBase* const heartbeat_latency_histograms_[kNumTaskPriorities][2];
+
+  // Number of BLOCK_SHUTDOWN tasks posted during shutdown.
+  HistogramBase::Sample num_block_shutdown_tasks_posted_during_shutdown_ = 0;
+
+  // Ensures all state (e.g. dangling cleaned up workers) is coalesced before
+  // destroying the TaskTracker (e.g. in test environments).
+  // Ref. https://crbug.com/827615.
+  TrackedRefFactory<TaskTracker> tracked_ref_factory_;
+
+  DISALLOW_COPY_AND_ASSIGN(TaskTracker);
+};
+
+}  // namespace internal
+}  // namespace base
+
+#endif  // BASE_TASK_SCHEDULER_TASK_TRACKER_H_
diff --git a/base/task_scheduler/task_tracker_posix.cc b/base/task_scheduler/task_tracker_posix.cc
new file mode 100644
index 0000000..8289d90
--- /dev/null
+++ b/base/task_scheduler/task_tracker_posix.cc
@@ -0,0 +1,33 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/task_scheduler/task_tracker_posix.h"
+
+#include <utility>
+
+#include "base/files/file_descriptor_watcher_posix.h"
+
+namespace base {
+namespace internal {
+
+TaskTrackerPosix::TaskTrackerPosix(StringPiece name) : TaskTracker(name) {}
+TaskTrackerPosix::~TaskTrackerPosix() = default;
+
+void TaskTrackerPosix::RunOrSkipTask(Task task,
+                                     Sequence* sequence,
+                                     bool can_run_task) {
+  DCHECK(watch_file_descriptor_message_loop_);
+  FileDescriptorWatcher file_descriptor_watcher(
+      watch_file_descriptor_message_loop_);
+  TaskTracker::RunOrSkipTask(std::move(task), sequence, can_run_task);
+}
+
+#if DCHECK_IS_ON()
+bool TaskTrackerPosix::IsPostingBlockShutdownTaskAfterShutdownAllowed() {
+  return service_thread_handle_.is_equal(PlatformThread::CurrentHandle());
+}
+#endif
+
+}  // namespace internal
+}  // namespace base
diff --git a/base/task_scheduler/task_tracker_posix.h b/base/task_scheduler/task_tracker_posix.h
new file mode 100644
index 0000000..4689f7a
--- /dev/null
+++ b/base/task_scheduler/task_tracker_posix.h
@@ -0,0 +1,74 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TASK_SCHEDULER_TASK_TRACKER_POSIX_H_
+#define BASE_TASK_SCHEDULER_TASK_TRACKER_POSIX_H_
+
+#include <memory>
+
+#include "base/base_export.h"
+#include "base/logging.h"
+#include "base/macros.h"
+#include "base/task_scheduler/task_tracker.h"
+#include "base/threading/platform_thread.h"
+
+namespace base {
+
+class MessageLoopForIO;
+
+namespace internal {
+
+struct Task;
+
+// A TaskTracker that instantiates a FileDescriptorWatcher in the scope in which
+// a task runs. Used on all POSIX platforms except NaCl SFI.
+// set_watch_file_descriptor_message_loop() must be called before the
+// TaskTracker can run tasks.
+class BASE_EXPORT TaskTrackerPosix : public TaskTracker {
+ public:
+  TaskTrackerPosix(StringPiece name);
+  ~TaskTrackerPosix() override;
+
+  // Sets the MessageLoopForIO with which to setup FileDescriptorWatcher in the
+  // scope in which tasks run. Must be called before starting to run tasks.
+  // External synchronization is required between a call to this and a call to
+  // RunTask().
+  void set_watch_file_descriptor_message_loop(
+      MessageLoopForIO* watch_file_descriptor_message_loop) {
+    watch_file_descriptor_message_loop_ = watch_file_descriptor_message_loop;
+  }
+
+#if DCHECK_IS_ON()
+  // TODO(robliao): http://crbug.com/698140. This addresses service thread tasks
+  // that could run after the task scheduler has shut down. Anything from the
+  // service thread is exempted from the task scheduler shutdown DCHECKs.
+  void set_service_thread_handle(
+      const PlatformThreadHandle& service_thread_handle) {
+    DCHECK(!service_thread_handle.is_null());
+    service_thread_handle_ = service_thread_handle;
+  }
+#endif
+
+ protected:
+  // TaskTracker:
+  void RunOrSkipTask(Task task, Sequence* sequence, bool can_run_task) override;
+
+ private:
+#if DCHECK_IS_ON()
+  bool IsPostingBlockShutdownTaskAfterShutdownAllowed() override;
+#endif
+
+  MessageLoopForIO* watch_file_descriptor_message_loop_ = nullptr;
+
+#if DCHECK_IS_ON()
+  PlatformThreadHandle service_thread_handle_;
+#endif
+
+  DISALLOW_COPY_AND_ASSIGN(TaskTrackerPosix);
+};
+
+}  // namespace internal
+}  // namespace base
+
+#endif  // BASE_TASK_SCHEDULER_TASK_TRACKER_POSIX_H_
diff --git a/base/task_scheduler/task_tracker_posix_unittest.cc b/base/task_scheduler/task_tracker_posix_unittest.cc
new file mode 100644
index 0000000..d8849de
--- /dev/null
+++ b/base/task_scheduler/task_tracker_posix_unittest.cc
@@ -0,0 +1,101 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/task_scheduler/task_tracker_posix.h"
+
+#include <unistd.h>
+
+#include <utility>
+
+#include "base/bind.h"
+#include "base/bind_helpers.h"
+#include "base/files/file_descriptor_watcher_posix.h"
+#include "base/macros.h"
+#include "base/memory/ptr_util.h"
+#include "base/memory/ref_counted.h"
+#include "base/message_loop/message_loop.h"
+#include "base/posix/eintr_wrapper.h"
+#include "base/run_loop.h"
+#include "base/sequence_token.h"
+#include "base/task_scheduler/task.h"
+#include "base/task_scheduler/task_traits.h"
+#include "base/task_scheduler/test_utils.h"
+#include "base/test/null_task_runner.h"
+#include "base/threading/thread.h"
+#include "base/time/time.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+namespace internal {
+
+namespace {
+
+class TaskSchedulerTaskTrackerPosixTest : public testing::Test {
+ public:
+  TaskSchedulerTaskTrackerPosixTest() : service_thread_("ServiceThread") {
+    Thread::Options service_thread_options;
+    service_thread_options.message_loop_type = MessageLoop::TYPE_IO;
+    service_thread_.StartWithOptions(service_thread_options);
+    tracker_.set_watch_file_descriptor_message_loop(
+        static_cast<MessageLoopForIO*>(service_thread_.message_loop()));
+  }
+
+ protected:
+  Thread service_thread_;
+  TaskTrackerPosix tracker_ = {"Test"};
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(TaskSchedulerTaskTrackerPosixTest);
+};
+
+}  // namespace
+
+// Verify that TaskTrackerPosix runs a Task it receives.
+TEST_F(TaskSchedulerTaskTrackerPosixTest, RunTask) {
+  bool did_run = false;
+  Task task(FROM_HERE,
+            Bind([](bool* did_run) { *did_run = true; }, Unretained(&did_run)),
+            TaskTraits(), TimeDelta());
+
+  EXPECT_TRUE(tracker_.WillPostTask(task));
+
+  auto sequence = test::CreateSequenceWithTask(std::move(task));
+  EXPECT_EQ(sequence, tracker_.WillScheduleSequence(sequence, nullptr));
+  // Expect RunAndPopNextTask to return nullptr since |sequence| is empty after
+  // popping a task from it.
+  EXPECT_FALSE(tracker_.RunAndPopNextTask(sequence, nullptr));
+
+  EXPECT_TRUE(did_run);
+}
+
+// Verify that FileDescriptorWatcher::WatchReadable() can be called from a task
+// running in TaskTrackerPosix without a crash.
+TEST_F(TaskSchedulerTaskTrackerPosixTest, FileDescriptorWatcher) {
+  int fds[2];
+  ASSERT_EQ(0, pipe(fds));
+  Task task(FROM_HERE,
+            Bind(IgnoreResult(&FileDescriptorWatcher::WatchReadable), fds[0],
+                 DoNothing()),
+            TaskTraits(), TimeDelta());
+  // FileDescriptorWatcher::WatchReadable needs a SequencedTaskRunnerHandle.
+  task.sequenced_task_runner_ref = MakeRefCounted<NullTaskRunner>();
+
+  EXPECT_TRUE(tracker_.WillPostTask(task));
+
+  auto sequence = test::CreateSequenceWithTask(std::move(task));
+  EXPECT_EQ(sequence, tracker_.WillScheduleSequence(sequence, nullptr));
+  // Expect RunAndPopNextTask to return nullptr since |sequence| is empty after
+  // popping a task from it.
+  EXPECT_FALSE(tracker_.RunAndPopNextTask(sequence, nullptr));
+
+  // Join the service thread to make sure that the read watch is registered and
+  // unregistered before file descriptors are closed.
+  service_thread_.Stop();
+
+  EXPECT_EQ(0, IGNORE_EINTR(close(fds[0])));
+  EXPECT_EQ(0, IGNORE_EINTR(close(fds[1])));
+}
+
+}  // namespace internal
+}  // namespace base
diff --git a/base/task_scheduler/task_tracker_unittest.cc b/base/task_scheduler/task_tracker_unittest.cc
new file mode 100644
index 0000000..ea8a3c1
--- /dev/null
+++ b/base/task_scheduler/task_tracker_unittest.cc
@@ -0,0 +1,1368 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/task_scheduler/task_tracker.h"
+
+#include <stdint.h>
+
+#include <memory>
+#include <utility>
+#include <vector>
+
+#include "base/bind.h"
+#include "base/bind_helpers.h"
+#include "base/callback.h"
+#include "base/logging.h"
+#include "base/macros.h"
+#include "base/memory/ptr_util.h"
+#include "base/memory/ref_counted.h"
+#include "base/metrics/histogram_base.h"
+#include "base/metrics/histogram_samples.h"
+#include "base/metrics/statistics_recorder.h"
+#include "base/sequence_token.h"
+#include "base/sequenced_task_runner.h"
+#include "base/single_thread_task_runner.h"
+#include "base/synchronization/atomic_flag.h"
+#include "base/synchronization/waitable_event.h"
+#include "base/task_scheduler/scheduler_lock.h"
+#include "base/task_scheduler/task.h"
+#include "base/task_scheduler/task_traits.h"
+#include "base/task_scheduler/test_utils.h"
+#include "base/test/gtest_util.h"
+#include "base/test/histogram_tester.h"
+#include "base/test/test_simple_task_runner.h"
+#include "base/test/test_timeouts.h"
+#include "base/threading/platform_thread.h"
+#include "base/threading/sequenced_task_runner_handle.h"
+#include "base/threading/simple_thread.h"
+#include "base/threading/thread_restrictions.h"
+#include "base/threading/thread_task_runner_handle.h"
+#include "testing/gmock/include/gmock/gmock.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+namespace internal {
+
+namespace {
+
+constexpr size_t kLoadTestNumIterations = 75;
+
+class MockCanScheduleSequenceObserver : public CanScheduleSequenceObserver {
+ public:
+  void OnCanScheduleSequence(scoped_refptr<Sequence> sequence) override {
+    MockOnCanScheduleSequence(sequence.get());
+  }
+
+  MOCK_METHOD1(MockOnCanScheduleSequence, void(Sequence*));
+};
+
+// Invokes a closure asynchronously.
+class CallbackThread : public SimpleThread {
+ public:
+  explicit CallbackThread(const Closure& closure)
+      : SimpleThread("CallbackThread"), closure_(closure) {}
+
+  // Returns true once the callback returns.
+  bool has_returned() { return has_returned_.IsSet(); }
+
+ private:
+  void Run() override {
+    closure_.Run();
+    has_returned_.Set();
+  }
+
+  const Closure closure_;
+  AtomicFlag has_returned_;
+
+  DISALLOW_COPY_AND_ASSIGN(CallbackThread);
+};
+
+class ThreadPostingAndRunningTask : public SimpleThread {
+ public:
+  enum class Action {
+    WILL_POST,
+    RUN,
+    WILL_POST_AND_RUN,
+  };
+
+  ThreadPostingAndRunningTask(TaskTracker* tracker,
+                              Task* task,
+                              Action action,
+                              bool expect_post_succeeds)
+      : SimpleThread("ThreadPostingAndRunningTask"),
+        tracker_(tracker),
+        owned_task_(FROM_HERE, OnceClosure(), TaskTraits(), TimeDelta()),
+        task_(task),
+        action_(action),
+        expect_post_succeeds_(expect_post_succeeds) {
+    EXPECT_TRUE(task_);
+
+    // Ownership of the Task is required to run it.
+    EXPECT_NE(Action::RUN, action_);
+    EXPECT_NE(Action::WILL_POST_AND_RUN, action_);
+  }
+
+  ThreadPostingAndRunningTask(TaskTracker* tracker,
+                              Task task,
+                              Action action,
+                              bool expect_post_succeeds)
+      : SimpleThread("ThreadPostingAndRunningTask"),
+        tracker_(tracker),
+        owned_task_(std::move(task)),
+        task_(&owned_task_),
+        action_(action),
+        expect_post_succeeds_(expect_post_succeeds) {
+    EXPECT_TRUE(owned_task_.task);
+  }
+
+ private:
+  void Run() override {
+    bool post_succeeded = true;
+    if (action_ == Action::WILL_POST || action_ == Action::WILL_POST_AND_RUN) {
+      post_succeeded = tracker_->WillPostTask(*task_);
+      EXPECT_EQ(expect_post_succeeds_, post_succeeded);
+    }
+    if (post_succeeded &&
+        (action_ == Action::RUN || action_ == Action::WILL_POST_AND_RUN)) {
+      EXPECT_TRUE(owned_task_.task);
+
+      testing::StrictMock<MockCanScheduleSequenceObserver>
+          never_notified_observer;
+      auto sequence = tracker_->WillScheduleSequence(
+          test::CreateSequenceWithTask(std::move(owned_task_)),
+          &never_notified_observer);
+      ASSERT_TRUE(sequence);
+      // Expect RunAndPopNextTask to return nullptr since |sequence| is empty
+      // after popping a task from it.
+      EXPECT_FALSE(tracker_->RunAndPopNextTask(std::move(sequence),
+                                               &never_notified_observer));
+    }
+  }
+
+  TaskTracker* const tracker_;
+  Task owned_task_;
+  Task* task_;
+  const Action action_;
+  const bool expect_post_succeeds_;
+
+  DISALLOW_COPY_AND_ASSIGN(ThreadPostingAndRunningTask);
+};
+
+class ScopedSetSingletonAllowed {
+ public:
+  ScopedSetSingletonAllowed(bool singleton_allowed)
+      : previous_value_(
+            ThreadRestrictions::SetSingletonAllowed(singleton_allowed)) {}
+  ~ScopedSetSingletonAllowed() {
+    ThreadRestrictions::SetSingletonAllowed(previous_value_);
+  }
+
+ private:
+  const bool previous_value_;
+};
+
+class TaskSchedulerTaskTrackerTest
+    : public testing::TestWithParam<TaskShutdownBehavior> {
+ protected:
+  TaskSchedulerTaskTrackerTest() = default;
+
+  // Creates a task with |shutdown_behavior|.
+  Task CreateTask(TaskShutdownBehavior shutdown_behavior) {
+    return Task(
+        FROM_HERE,
+        Bind(&TaskSchedulerTaskTrackerTest::RunTaskCallback, Unretained(this)),
+        TaskTraits(shutdown_behavior), TimeDelta());
+  }
+
+  void DispatchAndRunTaskWithTracker(Task task) {
+    auto sequence = tracker_.WillScheduleSequence(
+        test::CreateSequenceWithTask(std::move(task)),
+        &never_notified_observer_);
+    ASSERT_TRUE(sequence);
+    tracker_.RunAndPopNextTask(std::move(sequence), &never_notified_observer_);
+  }
+
+  // Calls tracker_->Shutdown() on a new thread. When this returns, Shutdown()
+  // method has been entered on the new thread, but it hasn't necessarily
+  // returned.
+  void CallShutdownAsync() {
+    ASSERT_FALSE(thread_calling_shutdown_);
+    thread_calling_shutdown_.reset(new CallbackThread(
+        Bind(&TaskTracker::Shutdown, Unretained(&tracker_))));
+    thread_calling_shutdown_->Start();
+    while (!tracker_.HasShutdownStarted())
+      PlatformThread::YieldCurrentThread();
+  }
+
+  void WaitForAsyncIsShutdownComplete() {
+    ASSERT_TRUE(thread_calling_shutdown_);
+    thread_calling_shutdown_->Join();
+    EXPECT_TRUE(thread_calling_shutdown_->has_returned());
+    EXPECT_TRUE(tracker_.IsShutdownComplete());
+  }
+
+  void VerifyAsyncShutdownInProgress() {
+    ASSERT_TRUE(thread_calling_shutdown_);
+    EXPECT_FALSE(thread_calling_shutdown_->has_returned());
+    EXPECT_TRUE(tracker_.HasShutdownStarted());
+    EXPECT_FALSE(tracker_.IsShutdownComplete());
+  }
+
+  // Calls tracker_->FlushForTesting() on a new thread.
+  void CallFlushFromAnotherThread() {
+    ASSERT_FALSE(thread_calling_flush_);
+    thread_calling_flush_.reset(new CallbackThread(
+        Bind(&TaskTracker::FlushForTesting, Unretained(&tracker_))));
+    thread_calling_flush_->Start();
+  }
+
+  void WaitForAsyncFlushReturned() {
+    ASSERT_TRUE(thread_calling_flush_);
+    thread_calling_flush_->Join();
+    EXPECT_TRUE(thread_calling_flush_->has_returned());
+  }
+
+  void VerifyAsyncFlushInProgress() {
+    ASSERT_TRUE(thread_calling_flush_);
+    EXPECT_FALSE(thread_calling_flush_->has_returned());
+  }
+
+  size_t NumTasksExecuted() {
+    AutoSchedulerLock auto_lock(lock_);
+    return num_tasks_executed_;
+  }
+
+  TaskTracker tracker_ = {"Test"};
+  testing::StrictMock<MockCanScheduleSequenceObserver> never_notified_observer_;
+
+ private:
+  void RunTaskCallback() {
+    AutoSchedulerLock auto_lock(lock_);
+    ++num_tasks_executed_;
+  }
+
+  std::unique_ptr<CallbackThread> thread_calling_shutdown_;
+  std::unique_ptr<CallbackThread> thread_calling_flush_;
+
+  // Synchronizes accesses to |num_tasks_executed_|.
+  SchedulerLock lock_;
+
+  size_t num_tasks_executed_ = 0;
+
+  DISALLOW_COPY_AND_ASSIGN(TaskSchedulerTaskTrackerTest);
+};
+
+#define WAIT_FOR_ASYNC_SHUTDOWN_COMPLETED() \
+  do {                                      \
+    SCOPED_TRACE("");                       \
+    WaitForAsyncIsShutdownComplete();       \
+  } while (false)
+
+#define VERIFY_ASYNC_SHUTDOWN_IN_PROGRESS() \
+  do {                                      \
+    SCOPED_TRACE("");                       \
+    VerifyAsyncShutdownInProgress();        \
+  } while (false)
+
+#define WAIT_FOR_ASYNC_FLUSH_RETURNED() \
+  do {                                  \
+    SCOPED_TRACE("");                   \
+    WaitForAsyncFlushReturned();        \
+  } while (false)
+
+#define VERIFY_ASYNC_FLUSH_IN_PROGRESS() \
+  do {                                   \
+    SCOPED_TRACE("");                    \
+    VerifyAsyncFlushInProgress();        \
+  } while (false)
+
+}  // namespace
+
+TEST_P(TaskSchedulerTaskTrackerTest, WillPostAndRunBeforeShutdown) {
+  Task task(CreateTask(GetParam()));
+
+  // Inform |task_tracker_| that |task| will be posted.
+  EXPECT_TRUE(tracker_.WillPostTask(task));
+
+  // Run the task.
+  EXPECT_EQ(0U, NumTasksExecuted());
+
+  DispatchAndRunTaskWithTracker(std::move(task));
+  EXPECT_EQ(1U, NumTasksExecuted());
+
+  // Shutdown() shouldn't block.
+  tracker_.Shutdown();
+}
+
+TEST_P(TaskSchedulerTaskTrackerTest, WillPostAndRunLongTaskBeforeShutdown) {
+  // Create a task that signals |task_running| and blocks until |task_barrier|
+  // is signaled.
+  WaitableEvent task_running(WaitableEvent::ResetPolicy::AUTOMATIC,
+                             WaitableEvent::InitialState::NOT_SIGNALED);
+  WaitableEvent task_barrier(WaitableEvent::ResetPolicy::AUTOMATIC,
+                             WaitableEvent::InitialState::NOT_SIGNALED);
+  Task blocked_task(
+      FROM_HERE,
+      Bind(
+          [](WaitableEvent* task_running, WaitableEvent* task_barrier) {
+            task_running->Signal();
+            task_barrier->Wait();
+          },
+          Unretained(&task_running), Unretained(&task_barrier)),
+      TaskTraits(WithBaseSyncPrimitives(), GetParam()), TimeDelta());
+
+  // Inform |task_tracker_| that |blocked_task| will be posted.
+  EXPECT_TRUE(tracker_.WillPostTask(blocked_task));
+
+  // Create a thread to run the task. Wait until the task starts running.
+  ThreadPostingAndRunningTask thread_running_task(
+      &tracker_, std::move(blocked_task),
+      ThreadPostingAndRunningTask::Action::RUN, false);
+  thread_running_task.Start();
+  task_running.Wait();
+
+  // Initiate shutdown after the task has started to run.
+  CallShutdownAsync();
+
+  if (GetParam() == TaskShutdownBehavior::CONTINUE_ON_SHUTDOWN) {
+    // Shutdown should complete even with a CONTINUE_ON_SHUTDOWN in progress.
+    WAIT_FOR_ASYNC_SHUTDOWN_COMPLETED();
+  } else {
+    // Shutdown should block with any non CONTINUE_ON_SHUTDOWN task in progress.
+    VERIFY_ASYNC_SHUTDOWN_IN_PROGRESS();
+  }
+
+  // Unblock the task.
+  task_barrier.Signal();
+  thread_running_task.Join();
+
+  // Shutdown should now complete for a non CONTINUE_ON_SHUTDOWN task.
+  if (GetParam() != TaskShutdownBehavior::CONTINUE_ON_SHUTDOWN)
+    WAIT_FOR_ASYNC_SHUTDOWN_COMPLETED();
+}
+
+TEST_P(TaskSchedulerTaskTrackerTest, WillPostBeforeShutdownRunDuringShutdown) {
+  // Inform |task_tracker_| that a task will be posted.
+  Task task(CreateTask(GetParam()));
+  EXPECT_TRUE(tracker_.WillPostTask(task));
+
+  // Inform |task_tracker_| that a BLOCK_SHUTDOWN task will be posted just to
+  // block shutdown.
+  Task block_shutdown_task(CreateTask(TaskShutdownBehavior::BLOCK_SHUTDOWN));
+  EXPECT_TRUE(tracker_.WillPostTask(block_shutdown_task));
+
+  // Call Shutdown() asynchronously.
+  CallShutdownAsync();
+  VERIFY_ASYNC_SHUTDOWN_IN_PROGRESS();
+
+  // Try to run |task|. It should only run it it's BLOCK_SHUTDOWN. Otherwise it
+  // should be discarded.
+  EXPECT_EQ(0U, NumTasksExecuted());
+  const bool should_run = GetParam() == TaskShutdownBehavior::BLOCK_SHUTDOWN;
+
+  DispatchAndRunTaskWithTracker(std::move(task));
+  EXPECT_EQ(should_run ? 1U : 0U, NumTasksExecuted());
+  VERIFY_ASYNC_SHUTDOWN_IN_PROGRESS();
+
+  // Unblock shutdown by running the remaining BLOCK_SHUTDOWN task.
+  DispatchAndRunTaskWithTracker(std::move(block_shutdown_task));
+  EXPECT_EQ(should_run ? 2U : 1U, NumTasksExecuted());
+  WAIT_FOR_ASYNC_SHUTDOWN_COMPLETED();
+}
+
+TEST_P(TaskSchedulerTaskTrackerTest, WillPostBeforeShutdownRunAfterShutdown) {
+  // Inform |task_tracker_| that a task will be posted.
+  Task task(CreateTask(GetParam()));
+  EXPECT_TRUE(tracker_.WillPostTask(task));
+
+  // Call Shutdown() asynchronously.
+  CallShutdownAsync();
+  EXPECT_EQ(0U, NumTasksExecuted());
+
+  if (GetParam() == TaskShutdownBehavior::BLOCK_SHUTDOWN) {
+    VERIFY_ASYNC_SHUTDOWN_IN_PROGRESS();
+
+    // Run the task to unblock shutdown.
+    DispatchAndRunTaskWithTracker(std::move(task));
+    EXPECT_EQ(1U, NumTasksExecuted());
+    WAIT_FOR_ASYNC_SHUTDOWN_COMPLETED();
+
+    // It is not possible to test running a BLOCK_SHUTDOWN task posted before
+    // shutdown after shutdown because Shutdown() won't return if there are
+    // pending BLOCK_SHUTDOWN tasks.
+  } else {
+    WAIT_FOR_ASYNC_SHUTDOWN_COMPLETED();
+
+    // The task shouldn't be allowed to run after shutdown.
+    DispatchAndRunTaskWithTracker(std::move(task));
+    EXPECT_EQ(0U, NumTasksExecuted());
+  }
+}
+
+TEST_P(TaskSchedulerTaskTrackerTest, WillPostAndRunDuringShutdown) {
+  // Inform |task_tracker_| that a BLOCK_SHUTDOWN task will be posted just to
+  // block shutdown.
+  Task block_shutdown_task(CreateTask(TaskShutdownBehavior::BLOCK_SHUTDOWN));
+  EXPECT_TRUE(tracker_.WillPostTask(block_shutdown_task));
+
+  // Call Shutdown() asynchronously.
+  CallShutdownAsync();
+  VERIFY_ASYNC_SHUTDOWN_IN_PROGRESS();
+
+  if (GetParam() == TaskShutdownBehavior::BLOCK_SHUTDOWN) {
+    // Inform |task_tracker_| that a BLOCK_SHUTDOWN task will be posted.
+    Task task(CreateTask(GetParam()));
+    EXPECT_TRUE(tracker_.WillPostTask(task));
+
+    // Run the BLOCK_SHUTDOWN task.
+    EXPECT_EQ(0U, NumTasksExecuted());
+    DispatchAndRunTaskWithTracker(std::move(task));
+    EXPECT_EQ(1U, NumTasksExecuted());
+  } else {
+    // It shouldn't be allowed to post a non BLOCK_SHUTDOWN task.
+    Task task(CreateTask(GetParam()));
+    EXPECT_FALSE(tracker_.WillPostTask(task));
+
+    // Don't try to run the task, because it wasn't allowed to be posted.
+  }
+
+  // Unblock shutdown by running |block_shutdown_task|.
+  VERIFY_ASYNC_SHUTDOWN_IN_PROGRESS();
+  DispatchAndRunTaskWithTracker(std::move(block_shutdown_task));
+  EXPECT_EQ(GetParam() == TaskShutdownBehavior::BLOCK_SHUTDOWN ? 2U : 1U,
+            NumTasksExecuted());
+  WAIT_FOR_ASYNC_SHUTDOWN_COMPLETED();
+}
+
+TEST_P(TaskSchedulerTaskTrackerTest, WillPostAfterShutdown) {
+  tracker_.Shutdown();
+
+  Task task(CreateTask(GetParam()));
+
+  // |task_tracker_| shouldn't allow a task to be posted after shutdown.
+  EXPECT_FALSE(tracker_.WillPostTask(task));
+}
+
+// Verify that BLOCK_SHUTDOWN and SKIP_ON_SHUTDOWN tasks can
+// AssertSingletonAllowed() but CONTINUE_ON_SHUTDOWN tasks can't.
+TEST_P(TaskSchedulerTaskTrackerTest, SingletonAllowed) {
+  const bool can_use_singletons =
+      (GetParam() != TaskShutdownBehavior::CONTINUE_ON_SHUTDOWN);
+
+  Task task(FROM_HERE, BindOnce(&ThreadRestrictions::AssertSingletonAllowed),
+            TaskTraits(GetParam()), TimeDelta());
+  EXPECT_TRUE(tracker_.WillPostTask(task));
+
+  // Set the singleton allowed bit to the opposite of what it is expected to be
+  // when |tracker| runs |task| to verify that |tracker| actually sets the
+  // correct value.
+  ScopedSetSingletonAllowed scoped_singleton_allowed(!can_use_singletons);
+
+  // Running the task should fail iff the task isn't allowed to use singletons.
+  if (can_use_singletons) {
+    DispatchAndRunTaskWithTracker(std::move(task));
+  } else {
+    EXPECT_DCHECK_DEATH({ DispatchAndRunTaskWithTracker(std::move(task)); });
+  }
+}
+
+// Verify that AssertIOAllowed() succeeds only for a MayBlock() task.
+TEST_P(TaskSchedulerTaskTrackerTest, IOAllowed) {
+  // Unset the IO allowed bit. Expect TaskTracker to set it before running a
+  // task with the MayBlock() trait.
+  ThreadRestrictions::SetIOAllowed(false);
+  Task task_with_may_block(FROM_HERE, Bind([]() {
+                             // Shouldn't fail.
+                             AssertBlockingAllowed();
+                           }),
+                           TaskTraits(MayBlock(), GetParam()), TimeDelta());
+  EXPECT_TRUE(tracker_.WillPostTask(task_with_may_block));
+  DispatchAndRunTaskWithTracker(std::move(task_with_may_block));
+
+  // Set the IO allowed bit. Expect TaskTracker to unset it before running a
+  // task without the MayBlock() trait.
+  ThreadRestrictions::SetIOAllowed(true);
+  Task task_without_may_block(
+      FROM_HERE,
+      Bind([]() { EXPECT_DCHECK_DEATH({ AssertBlockingAllowed(); }); }),
+      TaskTraits(GetParam()), TimeDelta());
+  EXPECT_TRUE(tracker_.WillPostTask(task_without_may_block));
+  DispatchAndRunTaskWithTracker(std::move(task_without_may_block));
+}
+
+static void RunTaskRunnerHandleVerificationTask(TaskTracker* tracker,
+                                                Task verify_task) {
+  // Pretend |verify_task| is posted to respect TaskTracker's contract.
+  EXPECT_TRUE(tracker->WillPostTask(verify_task));
+
+  // Confirm that the test conditions are right (no TaskRunnerHandles set
+  // already).
+  EXPECT_FALSE(ThreadTaskRunnerHandle::IsSet());
+  EXPECT_FALSE(SequencedTaskRunnerHandle::IsSet());
+
+  testing::StrictMock<MockCanScheduleSequenceObserver> never_notified_observer;
+  auto sequence = tracker->WillScheduleSequence(
+      test::CreateSequenceWithTask(std::move(verify_task)),
+      &never_notified_observer);
+  ASSERT_TRUE(sequence);
+  tracker->RunAndPopNextTask(std::move(sequence), &never_notified_observer);
+
+  // TaskRunnerHandle state is reset outside of task's scope.
+  EXPECT_FALSE(ThreadTaskRunnerHandle::IsSet());
+  EXPECT_FALSE(SequencedTaskRunnerHandle::IsSet());
+}
+
+static void VerifyNoTaskRunnerHandle() {
+  EXPECT_FALSE(ThreadTaskRunnerHandle::IsSet());
+  EXPECT_FALSE(SequencedTaskRunnerHandle::IsSet());
+}
+
+TEST_P(TaskSchedulerTaskTrackerTest, TaskRunnerHandleIsNotSetOnParallel) {
+  // Create a task that will verify that TaskRunnerHandles are not set in its
+  // scope per no TaskRunner ref being set to it.
+  Task verify_task(FROM_HERE, BindOnce(&VerifyNoTaskRunnerHandle),
+                   TaskTraits(GetParam()), TimeDelta());
+
+  RunTaskRunnerHandleVerificationTask(&tracker_, std::move(verify_task));
+}
+
+static void VerifySequencedTaskRunnerHandle(
+    const SequencedTaskRunner* expected_task_runner) {
+  EXPECT_FALSE(ThreadTaskRunnerHandle::IsSet());
+  EXPECT_TRUE(SequencedTaskRunnerHandle::IsSet());
+  EXPECT_EQ(expected_task_runner, SequencedTaskRunnerHandle::Get());
+}
+
+TEST_P(TaskSchedulerTaskTrackerTest,
+       SequencedTaskRunnerHandleIsSetOnSequenced) {
+  scoped_refptr<SequencedTaskRunner> test_task_runner(new TestSimpleTaskRunner);
+
+  // Create a task that will verify that SequencedTaskRunnerHandle is properly
+  // set to |test_task_runner| in its scope per |sequenced_task_runner_ref|
+  // being set to it.
+  Task verify_task(FROM_HERE,
+                   BindOnce(&VerifySequencedTaskRunnerHandle,
+                            Unretained(test_task_runner.get())),
+                   TaskTraits(GetParam()), TimeDelta());
+  verify_task.sequenced_task_runner_ref = test_task_runner;
+
+  RunTaskRunnerHandleVerificationTask(&tracker_, std::move(verify_task));
+}
+
+static void VerifyThreadTaskRunnerHandle(
+    const SingleThreadTaskRunner* expected_task_runner) {
+  EXPECT_TRUE(ThreadTaskRunnerHandle::IsSet());
+  // SequencedTaskRunnerHandle inherits ThreadTaskRunnerHandle for thread.
+  EXPECT_TRUE(SequencedTaskRunnerHandle::IsSet());
+  EXPECT_EQ(expected_task_runner, ThreadTaskRunnerHandle::Get());
+}
+
+TEST_P(TaskSchedulerTaskTrackerTest,
+       ThreadTaskRunnerHandleIsSetOnSingleThreaded) {
+  scoped_refptr<SingleThreadTaskRunner> test_task_runner(
+      new TestSimpleTaskRunner);
+
+  // Create a task that will verify that ThreadTaskRunnerHandle is properly set
+  // to |test_task_runner| in its scope per |single_thread_task_runner_ref|
+  // being set on it.
+  Task verify_task(FROM_HERE,
+                   BindOnce(&VerifyThreadTaskRunnerHandle,
+                            Unretained(test_task_runner.get())),
+                   TaskTraits(GetParam()), TimeDelta());
+  verify_task.single_thread_task_runner_ref = test_task_runner;
+
+  RunTaskRunnerHandleVerificationTask(&tracker_, std::move(verify_task));
+}
+
+TEST_P(TaskSchedulerTaskTrackerTest, FlushPendingDelayedTask) {
+  const Task delayed_task(FROM_HERE, DoNothing(), TaskTraits(GetParam()),
+                          TimeDelta::FromDays(1));
+  tracker_.WillPostTask(delayed_task);
+  // FlushForTesting() should return even if the delayed task didn't run.
+  tracker_.FlushForTesting();
+}
+
+TEST_P(TaskSchedulerTaskTrackerTest, FlushAsyncForTestingPendingDelayedTask) {
+  const Task delayed_task(FROM_HERE, DoNothing(), TaskTraits(GetParam()),
+                          TimeDelta::FromDays(1));
+  tracker_.WillPostTask(delayed_task);
+  // FlushAsyncForTesting() should callback even if the delayed task didn't run.
+  bool called_back = false;
+  tracker_.FlushAsyncForTesting(
+      BindOnce([](bool* called_back) { *called_back = true; },
+               Unretained(&called_back)));
+  EXPECT_TRUE(called_back);
+}
+
+TEST_P(TaskSchedulerTaskTrackerTest, FlushPendingUndelayedTask) {
+  Task undelayed_task(FROM_HERE, DoNothing(), TaskTraits(GetParam()),
+                      TimeDelta());
+  tracker_.WillPostTask(undelayed_task);
+
+  // FlushForTesting() shouldn't return before the undelayed task runs.
+  CallFlushFromAnotherThread();
+  PlatformThread::Sleep(TestTimeouts::tiny_timeout());
+  VERIFY_ASYNC_FLUSH_IN_PROGRESS();
+
+  // FlushForTesting() should return after the undelayed task runs.
+  DispatchAndRunTaskWithTracker(std::move(undelayed_task));
+  WAIT_FOR_ASYNC_FLUSH_RETURNED();
+}
+
+TEST_P(TaskSchedulerTaskTrackerTest, FlushAsyncForTestingPendingUndelayedTask) {
+  Task undelayed_task(FROM_HERE, DoNothing(), TaskTraits(GetParam()),
+                      TimeDelta());
+  tracker_.WillPostTask(undelayed_task);
+
+  // FlushAsyncForTesting() shouldn't callback before the undelayed task runs.
+  WaitableEvent event(WaitableEvent::ResetPolicy::MANUAL,
+                      WaitableEvent::InitialState::NOT_SIGNALED);
+  tracker_.FlushAsyncForTesting(
+      BindOnce(&WaitableEvent::Signal, Unretained(&event)));
+  PlatformThread::Sleep(TestTimeouts::tiny_timeout());
+  EXPECT_FALSE(event.IsSignaled());
+
+  // FlushAsyncForTesting() should callback after the undelayed task runs.
+  DispatchAndRunTaskWithTracker(std::move(undelayed_task));
+  event.Wait();
+}
+
+TEST_P(TaskSchedulerTaskTrackerTest, PostTaskDuringFlush) {
+  Task undelayed_task(FROM_HERE, DoNothing(), TaskTraits(GetParam()),
+                      TimeDelta());
+  tracker_.WillPostTask(undelayed_task);
+
+  // FlushForTesting() shouldn't return before the undelayed task runs.
+  CallFlushFromAnotherThread();
+  PlatformThread::Sleep(TestTimeouts::tiny_timeout());
+  VERIFY_ASYNC_FLUSH_IN_PROGRESS();
+
+  // Simulate posting another undelayed task.
+  Task other_undelayed_task(FROM_HERE, DoNothing(), TaskTraits(GetParam()),
+                            TimeDelta());
+  tracker_.WillPostTask(other_undelayed_task);
+
+  // Run the first undelayed task.
+  DispatchAndRunTaskWithTracker(std::move(undelayed_task));
+
+  // FlushForTesting() shouldn't return before the second undelayed task runs.
+  PlatformThread::Sleep(TestTimeouts::tiny_timeout());
+  VERIFY_ASYNC_FLUSH_IN_PROGRESS();
+
+  // FlushForTesting() should return after the second undelayed task runs.
+  DispatchAndRunTaskWithTracker(std::move(other_undelayed_task));
+  WAIT_FOR_ASYNC_FLUSH_RETURNED();
+}
+
+TEST_P(TaskSchedulerTaskTrackerTest, PostTaskDuringFlushAsyncForTesting) {
+  Task undelayed_task(FROM_HERE, DoNothing(), TaskTraits(GetParam()),
+                      TimeDelta());
+  tracker_.WillPostTask(undelayed_task);
+
+  // FlushAsyncForTesting() shouldn't callback before the undelayed task runs.
+  WaitableEvent event(WaitableEvent::ResetPolicy::MANUAL,
+                      WaitableEvent::InitialState::NOT_SIGNALED);
+  tracker_.FlushAsyncForTesting(
+      BindOnce(&WaitableEvent::Signal, Unretained(&event)));
+  PlatformThread::Sleep(TestTimeouts::tiny_timeout());
+  EXPECT_FALSE(event.IsSignaled());
+
+  // Simulate posting another undelayed task.
+  Task other_undelayed_task(FROM_HERE, DoNothing(), TaskTraits(GetParam()),
+                            TimeDelta());
+  tracker_.WillPostTask(other_undelayed_task);
+
+  // Run the first undelayed task.
+  DispatchAndRunTaskWithTracker(std::move(undelayed_task));
+
+  // FlushAsyncForTesting() shouldn't callback before the second undelayed task
+  // runs.
+  PlatformThread::Sleep(TestTimeouts::tiny_timeout());
+  EXPECT_FALSE(event.IsSignaled());
+
+  // FlushAsyncForTesting() should callback after the second undelayed task
+  // runs.
+  DispatchAndRunTaskWithTracker(std::move(other_undelayed_task));
+  event.Wait();
+}
+
+TEST_P(TaskSchedulerTaskTrackerTest, RunDelayedTaskDuringFlush) {
+  // Simulate posting a delayed and an undelayed task.
+  Task delayed_task(FROM_HERE, DoNothing(), TaskTraits(GetParam()),
+                    TimeDelta::FromDays(1));
+  tracker_.WillPostTask(delayed_task);
+  Task undelayed_task(FROM_HERE, DoNothing(), TaskTraits(GetParam()),
+                      TimeDelta());
+  tracker_.WillPostTask(undelayed_task);
+
+  // FlushForTesting() shouldn't return before the undelayed task runs.
+  CallFlushFromAnotherThread();
+  PlatformThread::Sleep(TestTimeouts::tiny_timeout());
+  VERIFY_ASYNC_FLUSH_IN_PROGRESS();
+
+  // Run the delayed task.
+  DispatchAndRunTaskWithTracker(std::move(delayed_task));
+
+  // FlushForTesting() shouldn't return since there is still a pending undelayed
+  // task.
+  PlatformThread::Sleep(TestTimeouts::tiny_timeout());
+  VERIFY_ASYNC_FLUSH_IN_PROGRESS();
+
+  // Run the undelayed task.
+  DispatchAndRunTaskWithTracker(std::move(undelayed_task));
+
+  // FlushForTesting() should now return.
+  WAIT_FOR_ASYNC_FLUSH_RETURNED();
+}
+
+TEST_P(TaskSchedulerTaskTrackerTest, RunDelayedTaskDuringFlushAsyncForTesting) {
+  // Simulate posting a delayed and an undelayed task.
+  Task delayed_task(FROM_HERE, DoNothing(), TaskTraits(GetParam()),
+                    TimeDelta::FromDays(1));
+  tracker_.WillPostTask(delayed_task);
+  Task undelayed_task(FROM_HERE, DoNothing(), TaskTraits(GetParam()),
+                      TimeDelta());
+  tracker_.WillPostTask(undelayed_task);
+
+  // FlushAsyncForTesting() shouldn't callback before the undelayed task runs.
+  WaitableEvent event(WaitableEvent::ResetPolicy::MANUAL,
+                      WaitableEvent::InitialState::NOT_SIGNALED);
+  tracker_.FlushAsyncForTesting(
+      BindOnce(&WaitableEvent::Signal, Unretained(&event)));
+  PlatformThread::Sleep(TestTimeouts::tiny_timeout());
+  EXPECT_FALSE(event.IsSignaled());
+
+  // Run the delayed task.
+  DispatchAndRunTaskWithTracker(std::move(delayed_task));
+
+  // FlushAsyncForTesting() shouldn't callback since there is still a pending
+  // undelayed task.
+  PlatformThread::Sleep(TestTimeouts::tiny_timeout());
+  EXPECT_FALSE(event.IsSignaled());
+
+  // Run the undelayed task.
+  DispatchAndRunTaskWithTracker(std::move(undelayed_task));
+
+  // FlushAsyncForTesting() should now callback.
+  event.Wait();
+}
+
+TEST_P(TaskSchedulerTaskTrackerTest, FlushAfterShutdown) {
+  if (GetParam() == TaskShutdownBehavior::BLOCK_SHUTDOWN)
+    return;
+
+  // Simulate posting a task.
+  Task undelayed_task(FROM_HERE, DoNothing(), TaskTraits(GetParam()),
+                      TimeDelta());
+  tracker_.WillPostTask(undelayed_task);
+
+  // Shutdown() should return immediately since there are no pending
+  // BLOCK_SHUTDOWN tasks.
+  tracker_.Shutdown();
+
+  // FlushForTesting() should return immediately after shutdown, even if an
+  // undelayed task hasn't run.
+  tracker_.FlushForTesting();
+}
+
+TEST_P(TaskSchedulerTaskTrackerTest, FlushAfterShutdownAsync) {
+  if (GetParam() == TaskShutdownBehavior::BLOCK_SHUTDOWN)
+    return;
+
+  // Simulate posting a task.
+  Task undelayed_task(FROM_HERE, DoNothing(), TaskTraits(GetParam()),
+                      TimeDelta());
+  tracker_.WillPostTask(undelayed_task);
+
+  // Shutdown() should return immediately since there are no pending
+  // BLOCK_SHUTDOWN tasks.
+  tracker_.Shutdown();
+
+  // FlushForTesting() should callback immediately after shutdown, even if an
+  // undelayed task hasn't run.
+  bool called_back = false;
+  tracker_.FlushAsyncForTesting(
+      BindOnce([](bool* called_back) { *called_back = true; },
+               Unretained(&called_back)));
+  EXPECT_TRUE(called_back);
+}
+
+TEST_P(TaskSchedulerTaskTrackerTest, ShutdownDuringFlush) {
+  if (GetParam() == TaskShutdownBehavior::BLOCK_SHUTDOWN)
+    return;
+
+  // Simulate posting a task.
+  Task undelayed_task(FROM_HERE, DoNothing(), TaskTraits(GetParam()),
+                      TimeDelta());
+  tracker_.WillPostTask(undelayed_task);
+
+  // FlushForTesting() shouldn't return before the undelayed task runs or
+  // shutdown completes.
+  CallFlushFromAnotherThread();
+  PlatformThread::Sleep(TestTimeouts::tiny_timeout());
+  VERIFY_ASYNC_FLUSH_IN_PROGRESS();
+
+  // Shutdown() should return immediately since there are no pending
+  // BLOCK_SHUTDOWN tasks.
+  tracker_.Shutdown();
+
+  // FlushForTesting() should now return, even if an undelayed task hasn't run.
+  WAIT_FOR_ASYNC_FLUSH_RETURNED();
+}
+
+TEST_P(TaskSchedulerTaskTrackerTest, ShutdownDuringFlushAsyncForTesting) {
+  if (GetParam() == TaskShutdownBehavior::BLOCK_SHUTDOWN)
+    return;
+
+  // Simulate posting a task.
+  Task undelayed_task(FROM_HERE, DoNothing(), TaskTraits(GetParam()),
+                      TimeDelta());
+  tracker_.WillPostTask(undelayed_task);
+
+  // FlushAsyncForTesting() shouldn't callback before the undelayed task runs or
+  // shutdown completes.
+  WaitableEvent event(WaitableEvent::ResetPolicy::MANUAL,
+                      WaitableEvent::InitialState::NOT_SIGNALED);
+  tracker_.FlushAsyncForTesting(
+      BindOnce(&WaitableEvent::Signal, Unretained(&event)));
+  PlatformThread::Sleep(TestTimeouts::tiny_timeout());
+  EXPECT_FALSE(event.IsSignaled());
+
+  // Shutdown() should return immediately since there are no pending
+  // BLOCK_SHUTDOWN tasks.
+  tracker_.Shutdown();
+
+  // FlushAsyncForTesting() should now callback, even if an undelayed task
+  // hasn't run.
+  event.Wait();
+}
+
+TEST_P(TaskSchedulerTaskTrackerTest, DoublePendingFlushAsyncForTestingFails) {
+  Task undelayed_task(FROM_HERE, DoNothing(), TaskTraits(GetParam()),
+                      TimeDelta());
+  tracker_.WillPostTask(undelayed_task);
+
+  // FlushAsyncForTesting() shouldn't callback before the undelayed task runs.
+  bool called_back = false;
+  tracker_.FlushAsyncForTesting(
+      BindOnce([](bool* called_back) { *called_back = true; },
+               Unretained(&called_back)));
+  EXPECT_FALSE(called_back);
+  EXPECT_DCHECK_DEATH({ tracker_.FlushAsyncForTesting(BindOnce([]() {})); });
+}
+
+INSTANTIATE_TEST_CASE_P(
+    ContinueOnShutdown,
+    TaskSchedulerTaskTrackerTest,
+    ::testing::Values(TaskShutdownBehavior::CONTINUE_ON_SHUTDOWN));
+INSTANTIATE_TEST_CASE_P(
+    SkipOnShutdown,
+    TaskSchedulerTaskTrackerTest,
+    ::testing::Values(TaskShutdownBehavior::SKIP_ON_SHUTDOWN));
+INSTANTIATE_TEST_CASE_P(
+    BlockShutdown,
+    TaskSchedulerTaskTrackerTest,
+    ::testing::Values(TaskShutdownBehavior::BLOCK_SHUTDOWN));
+
+namespace {
+
+void ExpectSequenceToken(SequenceToken sequence_token) {
+  EXPECT_EQ(sequence_token, SequenceToken::GetForCurrentThread());
+}
+
+}  // namespace
+
+// Verify that SequenceToken::GetForCurrentThread() returns the Sequence's token
+// when a Task runs.
+TEST_F(TaskSchedulerTaskTrackerTest, CurrentSequenceToken) {
+  scoped_refptr<Sequence> sequence = MakeRefCounted<Sequence>();
+
+  const SequenceToken sequence_token = sequence->token();
+  Task task(FROM_HERE, Bind(&ExpectSequenceToken, sequence_token), TaskTraits(),
+            TimeDelta());
+  tracker_.WillPostTask(task);
+
+  sequence->PushTask(std::move(task));
+
+  EXPECT_FALSE(SequenceToken::GetForCurrentThread().IsValid());
+  sequence = tracker_.WillScheduleSequence(std::move(sequence),
+                                           &never_notified_observer_);
+  ASSERT_TRUE(sequence);
+  tracker_.RunAndPopNextTask(std::move(sequence), &never_notified_observer_);
+  EXPECT_FALSE(SequenceToken::GetForCurrentThread().IsValid());
+}
+
+TEST_F(TaskSchedulerTaskTrackerTest, LoadWillPostAndRunBeforeShutdown) {
+  // Post and run tasks asynchronously.
+  std::vector<std::unique_ptr<ThreadPostingAndRunningTask>> threads;
+
+  for (size_t i = 0; i < kLoadTestNumIterations; ++i) {
+    threads.push_back(std::make_unique<ThreadPostingAndRunningTask>(
+        &tracker_, CreateTask(TaskShutdownBehavior::CONTINUE_ON_SHUTDOWN),
+        ThreadPostingAndRunningTask::Action::WILL_POST_AND_RUN, true));
+    threads.back()->Start();
+
+    threads.push_back(std::make_unique<ThreadPostingAndRunningTask>(
+        &tracker_, CreateTask(TaskShutdownBehavior::SKIP_ON_SHUTDOWN),
+        ThreadPostingAndRunningTask::Action::WILL_POST_AND_RUN, true));
+    threads.back()->Start();
+
+    threads.push_back(std::make_unique<ThreadPostingAndRunningTask>(
+        &tracker_, CreateTask(TaskShutdownBehavior::BLOCK_SHUTDOWN),
+        ThreadPostingAndRunningTask::Action::WILL_POST_AND_RUN, true));
+    threads.back()->Start();
+  }
+
+  for (const auto& thread : threads)
+    thread->Join();
+
+  // Expect all tasks to be executed.
+  EXPECT_EQ(kLoadTestNumIterations * 3, NumTasksExecuted());
+
+  // Should return immediately because no tasks are blocking shutdown.
+  tracker_.Shutdown();
+}
+
+TEST_F(TaskSchedulerTaskTrackerTest,
+       LoadWillPostBeforeShutdownAndRunDuringShutdown) {
+  // Post tasks asynchronously.
+  std::vector<Task> tasks_continue_on_shutdown;
+  std::vector<Task> tasks_skip_on_shutdown;
+  std::vector<Task> tasks_block_shutdown;
+  for (size_t i = 0; i < kLoadTestNumIterations; ++i) {
+    tasks_continue_on_shutdown.push_back(
+        CreateTask(TaskShutdownBehavior::CONTINUE_ON_SHUTDOWN));
+    tasks_skip_on_shutdown.push_back(
+        CreateTask(TaskShutdownBehavior::SKIP_ON_SHUTDOWN));
+    tasks_block_shutdown.push_back(
+        CreateTask(TaskShutdownBehavior::BLOCK_SHUTDOWN));
+  }
+
+  std::vector<std::unique_ptr<ThreadPostingAndRunningTask>> post_threads;
+  for (size_t i = 0; i < kLoadTestNumIterations; ++i) {
+    post_threads.push_back(std::make_unique<ThreadPostingAndRunningTask>(
+        &tracker_, &tasks_continue_on_shutdown[i],
+        ThreadPostingAndRunningTask::Action::WILL_POST, true));
+    post_threads.back()->Start();
+
+    post_threads.push_back(std::make_unique<ThreadPostingAndRunningTask>(
+        &tracker_, &tasks_skip_on_shutdown[i],
+        ThreadPostingAndRunningTask::Action::WILL_POST, true));
+    post_threads.back()->Start();
+
+    post_threads.push_back(std::make_unique<ThreadPostingAndRunningTask>(
+        &tracker_, &tasks_block_shutdown[i],
+        ThreadPostingAndRunningTask::Action::WILL_POST, true));
+    post_threads.back()->Start();
+  }
+
+  for (const auto& thread : post_threads)
+    thread->Join();
+
+  // Call Shutdown() asynchronously.
+  CallShutdownAsync();
+
+  // Run tasks asynchronously.
+  std::vector<std::unique_ptr<ThreadPostingAndRunningTask>> run_threads;
+  for (size_t i = 0; i < kLoadTestNumIterations; ++i) {
+    run_threads.push_back(std::make_unique<ThreadPostingAndRunningTask>(
+        &tracker_, std::move(tasks_continue_on_shutdown[i]),
+        ThreadPostingAndRunningTask::Action::RUN, false));
+    run_threads.back()->Start();
+
+    run_threads.push_back(std::make_unique<ThreadPostingAndRunningTask>(
+        &tracker_, std::move(tasks_skip_on_shutdown[i]),
+        ThreadPostingAndRunningTask::Action::RUN, false));
+    run_threads.back()->Start();
+
+    run_threads.push_back(std::make_unique<ThreadPostingAndRunningTask>(
+        &tracker_, std::move(tasks_block_shutdown[i]),
+        ThreadPostingAndRunningTask::Action::RUN, false));
+    run_threads.back()->Start();
+  }
+
+  for (const auto& thread : run_threads)
+    thread->Join();
+
+  WAIT_FOR_ASYNC_SHUTDOWN_COMPLETED();
+
+  // Expect BLOCK_SHUTDOWN tasks to have been executed.
+  EXPECT_EQ(kLoadTestNumIterations, NumTasksExecuted());
+}
+
+TEST_F(TaskSchedulerTaskTrackerTest, LoadWillPostAndRunDuringShutdown) {
+  // Inform |task_tracker_| that a BLOCK_SHUTDOWN task will be posted just to
+  // block shutdown.
+  Task block_shutdown_task(CreateTask(TaskShutdownBehavior::BLOCK_SHUTDOWN));
+  EXPECT_TRUE(tracker_.WillPostTask(block_shutdown_task));
+
+  // Call Shutdown() asynchronously.
+  CallShutdownAsync();
+
+  // Post and run tasks asynchronously.
+  std::vector<std::unique_ptr<ThreadPostingAndRunningTask>> threads;
+
+  for (size_t i = 0; i < kLoadTestNumIterations; ++i) {
+    threads.push_back(std::make_unique<ThreadPostingAndRunningTask>(
+        &tracker_, CreateTask(TaskShutdownBehavior::CONTINUE_ON_SHUTDOWN),
+        ThreadPostingAndRunningTask::Action::WILL_POST_AND_RUN, false));
+    threads.back()->Start();
+
+    threads.push_back(std::make_unique<ThreadPostingAndRunningTask>(
+        &tracker_, CreateTask(TaskShutdownBehavior::SKIP_ON_SHUTDOWN),
+        ThreadPostingAndRunningTask::Action::WILL_POST_AND_RUN, false));
+    threads.back()->Start();
+
+    threads.push_back(std::make_unique<ThreadPostingAndRunningTask>(
+        &tracker_, CreateTask(TaskShutdownBehavior::BLOCK_SHUTDOWN),
+        ThreadPostingAndRunningTask::Action::WILL_POST_AND_RUN, true));
+    threads.back()->Start();
+  }
+
+  for (const auto& thread : threads)
+    thread->Join();
+
+  // Expect BLOCK_SHUTDOWN tasks to have been executed.
+  EXPECT_EQ(kLoadTestNumIterations, NumTasksExecuted());
+
+  // Shutdown() shouldn't return before |block_shutdown_task| is executed.
+  VERIFY_ASYNC_SHUTDOWN_IN_PROGRESS();
+
+  // Unblock shutdown by running |block_shutdown_task|.
+  DispatchAndRunTaskWithTracker(std::move(block_shutdown_task));
+  EXPECT_EQ(kLoadTestNumIterations + 1, NumTasksExecuted());
+  WAIT_FOR_ASYNC_SHUTDOWN_COMPLETED();
+}
+
+// Verify that RunAndPopNextTask() returns the sequence from which it ran a task
+// when it can be rescheduled.
+TEST_F(TaskSchedulerTaskTrackerTest,
+       RunAndPopNextTaskReturnsSequenceToReschedule) {
+  Task task_1(FROM_HERE, DoNothing(), TaskTraits(), TimeDelta());
+  EXPECT_TRUE(tracker_.WillPostTask(task_1));
+  Task task_2(FROM_HERE, DoNothing(), TaskTraits(), TimeDelta());
+  EXPECT_TRUE(tracker_.WillPostTask(task_2));
+
+  scoped_refptr<Sequence> sequence =
+      test::CreateSequenceWithTask(std::move(task_1));
+  sequence->PushTask(std::move(task_2));
+  EXPECT_EQ(sequence, tracker_.WillScheduleSequence(sequence, nullptr));
+
+  EXPECT_EQ(sequence, tracker_.RunAndPopNextTask(sequence, nullptr));
+}
+
+// Verify that WillScheduleSequence() returns nullptr when it receives a
+// background sequence and the maximum number of background sequences that can
+// be scheduled concurrently is reached. Verify that an observer is notified
+// when a background sequence can be scheduled (i.e. when one of the previously
+// scheduled background sequences has run).
+TEST_F(TaskSchedulerTaskTrackerTest,
+       WillScheduleBackgroundSequenceWithMaxBackgroundSequences) {
+  constexpr int kMaxNumScheduledBackgroundSequences = 2;
+  TaskTracker tracker("Test", kMaxNumScheduledBackgroundSequences);
+
+  // Simulate posting |kMaxNumScheduledBackgroundSequences| background tasks
+  // and scheduling the associated sequences. This should succeed.
+  std::vector<scoped_refptr<Sequence>> scheduled_sequences;
+  testing::StrictMock<MockCanScheduleSequenceObserver> never_notified_observer;
+  for (int i = 0; i < kMaxNumScheduledBackgroundSequences; ++i) {
+    Task task(FROM_HERE, DoNothing(), TaskTraits(TaskPriority::BACKGROUND),
+              TimeDelta());
+    EXPECT_TRUE(tracker.WillPostTask(task));
+    scoped_refptr<Sequence> sequence =
+        test::CreateSequenceWithTask(std::move(task));
+    EXPECT_EQ(sequence,
+              tracker.WillScheduleSequence(sequence, &never_notified_observer));
+    scheduled_sequences.push_back(std::move(sequence));
+  }
+
+  // Simulate posting extra background tasks and scheduling the associated
+  // sequences. This should fail because the maximum number of background
+  // sequences that can be scheduled concurrently is already reached.
+  std::vector<std::unique_ptr<bool>> extra_tasks_did_run;
+  std::vector<
+      std::unique_ptr<testing::StrictMock<MockCanScheduleSequenceObserver>>>
+      extra_observers;
+  std::vector<scoped_refptr<Sequence>> extra_sequences;
+  for (int i = 0; i < kMaxNumScheduledBackgroundSequences; ++i) {
+    extra_tasks_did_run.push_back(std::make_unique<bool>());
+    Task extra_task(
+        FROM_HERE,
+        BindOnce([](bool* extra_task_did_run) { *extra_task_did_run = true; },
+                 Unretained(extra_tasks_did_run.back().get())),
+        TaskTraits(TaskPriority::BACKGROUND), TimeDelta());
+    EXPECT_TRUE(tracker.WillPostTask(extra_task));
+    extra_sequences.push_back(
+        test::CreateSequenceWithTask(std::move(extra_task)));
+    extra_observers.push_back(
+        std::make_unique<
+            testing::StrictMock<MockCanScheduleSequenceObserver>>());
+    EXPECT_EQ(nullptr,
+              tracker.WillScheduleSequence(extra_sequences.back(),
+                                           extra_observers.back().get()));
+  }
+
+  // Run the sequences scheduled at the beginning of the test. Expect an
+  // observer from |extra_observer| to be notified every time a task finishes to
+  // run.
+  for (int i = 0; i < kMaxNumScheduledBackgroundSequences; ++i) {
+    EXPECT_CALL(*extra_observers[i].get(),
+                MockOnCanScheduleSequence(extra_sequences[i].get()));
+    EXPECT_FALSE(tracker.RunAndPopNextTask(scheduled_sequences[i],
+                                           &never_notified_observer));
+    testing::Mock::VerifyAndClear(extra_observers[i].get());
+  }
+
+  // Run the extra sequences.
+  for (int i = 0; i < kMaxNumScheduledBackgroundSequences; ++i) {
+    EXPECT_FALSE(*extra_tasks_did_run[i]);
+    EXPECT_FALSE(tracker.RunAndPopNextTask(extra_sequences[i],
+                                           &never_notified_observer));
+    EXPECT_TRUE(*extra_tasks_did_run[i]);
+  }
+}
+
+namespace {
+
+void SetBool(bool* arg) {
+  ASSERT_TRUE(arg);
+  EXPECT_FALSE(*arg);
+  *arg = true;
+}
+
+}  // namespace
+
+// Verify that RunAndPopNextTask() doesn't reschedule the background sequence it
+// was assigned if there is a preempted background sequence with an earlier
+// sequence time (compared to the next task in the sequence assigned to
+// RunAndPopNextTask()).
+TEST_F(TaskSchedulerTaskTrackerTest,
+       RunNextBackgroundTaskWithEarlierPendingBackgroundTask) {
+  constexpr int kMaxNumScheduledBackgroundSequences = 1;
+  TaskTracker tracker("Test", kMaxNumScheduledBackgroundSequences);
+  testing::StrictMock<MockCanScheduleSequenceObserver> never_notified_observer;
+
+  // Simulate posting a background task and scheduling the associated sequence.
+  // This should succeed.
+  bool task_a_1_did_run = false;
+  Task task_a_1(FROM_HERE, BindOnce(&SetBool, Unretained(&task_a_1_did_run)),
+                TaskTraits(TaskPriority::BACKGROUND), TimeDelta());
+  EXPECT_TRUE(tracker.WillPostTask(task_a_1));
+  scoped_refptr<Sequence> sequence_a =
+      test::CreateSequenceWithTask(std::move(task_a_1));
+  EXPECT_EQ(sequence_a,
+            tracker.WillScheduleSequence(sequence_a, &never_notified_observer));
+
+  // Simulate posting an extra background task and scheduling the associated
+  // sequence. This should fail because the maximum number of background
+  // sequences that can be scheduled concurrently is already reached.
+  bool task_b_1_did_run = false;
+  Task task_b_1(FROM_HERE, BindOnce(&SetBool, Unretained(&task_b_1_did_run)),
+                TaskTraits(TaskPriority::BACKGROUND), TimeDelta());
+  EXPECT_TRUE(tracker.WillPostTask(task_b_1));
+  scoped_refptr<Sequence> sequence_b =
+      test::CreateSequenceWithTask(std::move(task_b_1));
+  testing::StrictMock<MockCanScheduleSequenceObserver> task_b_1_observer;
+  EXPECT_FALSE(tracker.WillScheduleSequence(sequence_b, &task_b_1_observer));
+
+  // Wait to be sure that the sequence time of |task_a_2| is after the sequenced
+  // time of |task_b_1|.
+  PlatformThread::Sleep(TestTimeouts::tiny_timeout());
+
+  // Post an extra background task in |sequence_a|.
+  bool task_a_2_did_run = false;
+  Task task_a_2(FROM_HERE, BindOnce(&SetBool, Unretained(&task_a_2_did_run)),
+                TaskTraits(TaskPriority::BACKGROUND), TimeDelta());
+  EXPECT_TRUE(tracker.WillPostTask(task_a_2));
+  sequence_a->PushTask(std::move(task_a_2));
+
+  // Run the first task in |sequence_a|. RunAndPopNextTask() should return
+  // nullptr since |sequence_a| can't be rescheduled immediately.
+  // |task_b_1_observer| should be notified that |sequence_b| can be scheduled.
+  testing::StrictMock<MockCanScheduleSequenceObserver> task_a_2_observer;
+  EXPECT_CALL(task_b_1_observer, MockOnCanScheduleSequence(sequence_b.get()));
+  EXPECT_FALSE(tracker.RunAndPopNextTask(sequence_a, &task_a_2_observer));
+  testing::Mock::VerifyAndClear(&task_b_1_observer);
+  EXPECT_TRUE(task_a_1_did_run);
+
+  // Run the first task in |sequence_b|. RunAndPopNextTask() should return
+  // nullptr since |sequence_b| is empty after popping a task from it.
+  // |task_a_2_observer| should be notified that |sequence_a| can be
+  // scheduled.
+  EXPECT_CALL(task_a_2_observer, MockOnCanScheduleSequence(sequence_a.get()));
+  EXPECT_FALSE(tracker.RunAndPopNextTask(sequence_b, &never_notified_observer));
+  testing::Mock::VerifyAndClear(&task_a_2_observer);
+  EXPECT_TRUE(task_b_1_did_run);
+
+  // Run the first task in |sequence_a|. RunAndPopNextTask() should return
+  // nullptr since |sequence_b| is empty after popping a task from it. No
+  // observer should be notified.
+  EXPECT_FALSE(tracker.RunAndPopNextTask(sequence_a, &never_notified_observer));
+  EXPECT_TRUE(task_a_2_did_run);
+}
+
+// Verify that preempted background sequences are scheduled when shutdown
+// starts.
+TEST_F(TaskSchedulerTaskTrackerTest,
+       SchedulePreemptedBackgroundSequencesOnShutdown) {
+  constexpr int kMaxNumScheduledBackgroundSequences = 0;
+  TaskTracker tracker("Test", kMaxNumScheduledBackgroundSequences);
+  testing::StrictMock<MockCanScheduleSequenceObserver> observer;
+
+  // Simulate scheduling sequences. TaskTracker should prevent this.
+  std::vector<scoped_refptr<Sequence>> preempted_sequences;
+  for (int i = 0; i < 3; ++i) {
+    Task task(FROM_HERE, DoNothing(),
+              TaskTraits(TaskPriority::BACKGROUND,
+                         TaskShutdownBehavior::BLOCK_SHUTDOWN),
+              TimeDelta());
+    EXPECT_TRUE(tracker.WillPostTask(task));
+    scoped_refptr<Sequence> sequence =
+        test::CreateSequenceWithTask(std::move(task));
+    EXPECT_FALSE(tracker.WillScheduleSequence(sequence, &observer));
+    preempted_sequences.push_back(std::move(sequence));
+
+    // Wait to be sure that tasks have different |sequenced_time|.
+    PlatformThread::Sleep(TestTimeouts::tiny_timeout());
+  }
+
+  // Perform shutdown. Expect |preempted_sequences| to be scheduled in posting
+  // order.
+  {
+    testing::InSequence in_sequence;
+    for (auto& preempted_sequence : preempted_sequences) {
+      EXPECT_CALL(observer, MockOnCanScheduleSequence(preempted_sequence.get()))
+          .WillOnce(testing::Invoke([&tracker](Sequence* sequence) {
+            // Run the task to unblock shutdown.
+            tracker.RunAndPopNextTask(sequence, nullptr);
+          }));
+    }
+    tracker.Shutdown();
+  }
+}
+
+namespace {
+
+class WaitAllowedTestThread : public SimpleThread {
+ public:
+  WaitAllowedTestThread() : SimpleThread("WaitAllowedTestThread") {}
+
+ private:
+  void Run() override {
+    auto task_tracker = std::make_unique<TaskTracker>("Test");
+
+    // Waiting is allowed by default. Expect TaskTracker to disallow it before
+    // running a task without the WithBaseSyncPrimitives() trait.
+    internal::AssertBaseSyncPrimitivesAllowed();
+    Task task_without_sync_primitives(
+        FROM_HERE, Bind([]() {
+          EXPECT_DCHECK_DEATH({ internal::AssertBaseSyncPrimitivesAllowed(); });
+        }),
+        TaskTraits(), TimeDelta());
+    EXPECT_TRUE(task_tracker->WillPostTask(task_without_sync_primitives));
+    testing::StrictMock<MockCanScheduleSequenceObserver>
+        never_notified_observer;
+    auto sequence_without_sync_primitives = task_tracker->WillScheduleSequence(
+        test::CreateSequenceWithTask(std::move(task_without_sync_primitives)),
+        &never_notified_observer);
+    ASSERT_TRUE(sequence_without_sync_primitives);
+    task_tracker->RunAndPopNextTask(std::move(sequence_without_sync_primitives),
+                                    &never_notified_observer);
+
+    // Disallow waiting. Expect TaskTracker to allow it before running a task
+    // with the WithBaseSyncPrimitives() trait.
+    ThreadRestrictions::DisallowWaiting();
+    Task task_with_sync_primitives(
+        FROM_HERE, Bind([]() {
+          // Shouldn't fail.
+          internal::AssertBaseSyncPrimitivesAllowed();
+        }),
+        TaskTraits(WithBaseSyncPrimitives()), TimeDelta());
+    EXPECT_TRUE(task_tracker->WillPostTask(task_with_sync_primitives));
+    auto sequence_with_sync_primitives = task_tracker->WillScheduleSequence(
+        test::CreateSequenceWithTask(std::move(task_with_sync_primitives)),
+        &never_notified_observer);
+    ASSERT_TRUE(sequence_with_sync_primitives);
+    task_tracker->RunAndPopNextTask(std::move(sequence_with_sync_primitives),
+                                    &never_notified_observer);
+
+    ScopedAllowBaseSyncPrimitivesForTesting
+        allow_wait_in_task_tracker_destructor;
+    task_tracker.reset();
+  }
+
+  DISALLOW_COPY_AND_ASSIGN(WaitAllowedTestThread);
+};
+
+}  // namespace
+
+// Verify that AssertIOAllowed() succeeds only for a WithBaseSyncPrimitives()
+// task.
+TEST(TaskSchedulerTaskTrackerWaitAllowedTest, WaitAllowed) {
+  // Run the test on the separate thread since it is not possible to reset the
+  // "wait allowed" bit of a thread without being a friend of
+  // ThreadRestrictions.
+  testing::GTEST_FLAG(death_test_style) = "threadsafe";
+  WaitAllowedTestThread wait_allowed_test_thread;
+  wait_allowed_test_thread.Start();
+  wait_allowed_test_thread.Join();
+}
+
+// Verify that TaskScheduler.TaskLatency.* histograms are correctly recorded
+// when a task runs.
+TEST(TaskSchedulerTaskTrackerHistogramTest, TaskLatency) {
+  auto statistics_recorder = StatisticsRecorder::CreateTemporaryForTesting();
+
+  TaskTracker tracker("Test");
+  testing::StrictMock<MockCanScheduleSequenceObserver> never_notified_observer;
+
+  struct {
+    const TaskTraits traits;
+    const char* const expected_histogram;
+  } static constexpr kTests[] = {
+      {{TaskPriority::BACKGROUND},
+       "TaskScheduler.TaskLatencyMicroseconds.Test."
+       "BackgroundTaskPriority"},
+      {{MayBlock(), TaskPriority::BACKGROUND},
+       "TaskScheduler.TaskLatencyMicroseconds.Test."
+       "BackgroundTaskPriority_MayBlock"},
+      {{WithBaseSyncPrimitives(), TaskPriority::BACKGROUND},
+       "TaskScheduler.TaskLatencyMicroseconds.Test."
+       "BackgroundTaskPriority_MayBlock"},
+      {{TaskPriority::USER_VISIBLE},
+       "TaskScheduler.TaskLatencyMicroseconds.Test."
+       "UserVisibleTaskPriority"},
+      {{MayBlock(), TaskPriority::USER_VISIBLE},
+       "TaskScheduler.TaskLatencyMicroseconds.Test."
+       "UserVisibleTaskPriority_MayBlock"},
+      {{WithBaseSyncPrimitives(), TaskPriority::USER_VISIBLE},
+       "TaskScheduler.TaskLatencyMicroseconds.Test."
+       "UserVisibleTaskPriority_MayBlock"},
+      {{TaskPriority::USER_BLOCKING},
+       "TaskScheduler.TaskLatencyMicroseconds.Test."
+       "UserBlockingTaskPriority"},
+      {{MayBlock(), TaskPriority::USER_BLOCKING},
+       "TaskScheduler.TaskLatencyMicroseconds.Test."
+       "UserBlockingTaskPriority_MayBlock"},
+      {{WithBaseSyncPrimitives(), TaskPriority::USER_BLOCKING},
+       "TaskScheduler.TaskLatencyMicroseconds.Test."
+       "UserBlockingTaskPriority_MayBlock"}};
+
+  for (const auto& test : kTests) {
+    Task task(FROM_HERE, DoNothing(), test.traits, TimeDelta());
+    ASSERT_TRUE(tracker.WillPostTask(task));
+
+    HistogramTester tester;
+
+    auto sequence = tracker.WillScheduleSequence(
+        test::CreateSequenceWithTask(std::move(task)),
+        &never_notified_observer);
+    ASSERT_TRUE(sequence);
+    tracker.RunAndPopNextTask(std::move(sequence), &never_notified_observer);
+    tester.ExpectTotalCount(test.expected_histogram, 1);
+  }
+}
+
+}  // namespace internal
+}  // namespace base
diff --git a/base/task_scheduler/task_traits.cc b/base/task_scheduler/task_traits.cc
new file mode 100644
index 0000000..e82e303
--- /dev/null
+++ b/base/task_scheduler/task_traits.cc
@@ -0,0 +1,53 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/task_scheduler/task_traits.h"
+
+#include <stddef.h>
+
+#include <ostream>
+
+#include "base/logging.h"
+
+namespace base {
+
+const char* TaskPriorityToString(TaskPriority task_priority) {
+  switch (task_priority) {
+    case TaskPriority::BACKGROUND:
+      return "BACKGROUND";
+    case TaskPriority::USER_VISIBLE:
+      return "USER_VISIBLE";
+    case TaskPriority::USER_BLOCKING:
+      return "USER_BLOCKING";
+  }
+  NOTREACHED();
+  return "";
+}
+
+const char* TaskShutdownBehaviorToString(
+    TaskShutdownBehavior shutdown_behavior) {
+  switch (shutdown_behavior) {
+    case TaskShutdownBehavior::CONTINUE_ON_SHUTDOWN:
+      return "CONTINUE_ON_SHUTDOWN";
+    case TaskShutdownBehavior::SKIP_ON_SHUTDOWN:
+      return "SKIP_ON_SHUTDOWN";
+    case TaskShutdownBehavior::BLOCK_SHUTDOWN:
+      return "BLOCK_SHUTDOWN";
+  }
+  NOTREACHED();
+  return "";
+}
+
+std::ostream& operator<<(std::ostream& os, const TaskPriority& task_priority) {
+  os << TaskPriorityToString(task_priority);
+  return os;
+}
+
+std::ostream& operator<<(std::ostream& os,
+                         const TaskShutdownBehavior& shutdown_behavior) {
+  os << TaskShutdownBehaviorToString(shutdown_behavior);
+  return os;
+}
+
+}  // namespace base
diff --git a/base/task_scheduler/task_traits.h b/base/task_scheduler/task_traits.h
new file mode 100644
index 0000000..a4a41fe
--- /dev/null
+++ b/base/task_scheduler/task_traits.h
@@ -0,0 +1,247 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TASK_SCHEDULER_TASK_TRAITS_H_
+#define BASE_TASK_SCHEDULER_TASK_TRAITS_H_
+
+#include <stdint.h>
+
+#include <iosfwd>
+#include <type_traits>
+
+#include "base/base_export.h"
+#include "base/task_scheduler/task_traits_details.h"
+#include "build/build_config.h"
+
+namespace base {
+
+// Valid priorities supported by the task scheduler. Note: internal algorithms
+// depend on priorities being expressed as a continuous zero-based list from
+// lowest to highest priority. Users of this API shouldn't otherwise care about
+// nor use the underlying values.
+enum class TaskPriority {
+  // This will always be equal to the lowest priority available.
+  LOWEST = 0,
+  // User won't notice if this task takes an arbitrarily long time to complete.
+  BACKGROUND = LOWEST,
+  // This task affects UI or responsiveness of future user interactions. It is
+  // not an immediate response to a user interaction.
+  // Examples:
+  // - Updating the UI to reflect progress on a long task.
+  // - Loading data that might be shown in the UI after a future user
+  //   interaction.
+  USER_VISIBLE,
+  // This task affects UI immediately after a user interaction.
+  // Example: Generating data shown in the UI immediately after a click.
+  USER_BLOCKING,
+  // This will always be equal to the highest priority available.
+  HIGHEST = USER_BLOCKING,
+};
+
+// Valid shutdown behaviors supported by the task scheduler.
+enum class TaskShutdownBehavior {
+  // Tasks posted with this mode which have not started executing before
+  // shutdown is initiated will never run. Tasks with this mode running at
+  // shutdown will be ignored (the worker will not be joined).
+  //
+  // This option provides a nice way to post stuff you don't want blocking
+  // shutdown. For example, you might be doing a slow DNS lookup and if it's
+  // blocked on the OS, you may not want to stop shutdown, since the result
+  // doesn't really matter at that point.
+  //
+  // However, you need to be very careful what you do in your callback when you
+  // use this option. Since the thread will continue to run until the OS
+  // terminates the process, the app can be in the process of tearing down when
+  // you're running. This means any singletons or global objects you use may
+  // suddenly become invalid out from under you. For this reason, it's best to
+  // use this only for slow but simple operations like the DNS example.
+  CONTINUE_ON_SHUTDOWN,
+
+  // Tasks posted with this mode that have not started executing at
+  // shutdown will never run. However, any task that has already begun
+  // executing when shutdown is invoked will be allowed to continue and
+  // will block shutdown until completion.
+  //
+  // Note: Because TaskScheduler::Shutdown() may block while these tasks are
+  // executing, care must be taken to ensure that they do not block on the
+  // thread that called TaskScheduler::Shutdown(), as this may lead to deadlock.
+  SKIP_ON_SHUTDOWN,
+
+  // Tasks posted with this mode before shutdown is complete will block shutdown
+  // until they're executed. Generally, this should be used only to save
+  // critical user data.
+  //
+  // Note: Tasks with BACKGROUND priority that block shutdown will be promoted
+  // to USER_VISIBLE priority during shutdown.
+  BLOCK_SHUTDOWN,
+};
+
+// Tasks with this trait may block. This includes but is not limited to tasks
+// that wait on synchronous file I/O operations: read or write a file from disk,
+// interact with a pipe or a socket, rename or delete a file, enumerate files in
+// a directory, etc. This trait isn't required for the mere use of locks. For
+// tasks that block on base/ synchronization primitives, see the
+// WithBaseSyncPrimitives trait.
+struct MayBlock {};
+
+// DEPRECATED. Use base::ScopedAllowBaseSyncPrimitives(ForTesting) instead.
+//
+// Tasks with this trait will pass base::AssertBaseSyncPrimitivesAllowed(), i.e.
+// will be allowed on the following methods :
+// - base::WaitableEvent::Wait
+// - base::ConditionVariable::Wait
+// - base::PlatformThread::Join
+// - base::PlatformThread::Sleep
+// - base::Process::WaitForExit
+// - base::Process::WaitForExitWithTimeout
+//
+// Tasks should generally not use these methods.
+//
+// Instead of waiting on a WaitableEvent or a ConditionVariable, put the work
+// that should happen after the wait in a callback and post that callback from
+// where the WaitableEvent or ConditionVariable would have been signaled. If
+// something needs to be scheduled after many tasks have executed, use
+// base::BarrierClosure.
+//
+// On Windows, join processes asynchronously using base::win::ObjectWatcher.
+//
+// MayBlock() must be specified in conjunction with this trait if and only if
+// removing usage of methods listed above in the labeled tasks would still
+// result in tasks that may block (per MayBlock()'s definition).
+//
+// In doubt, consult with //base/task_scheduler/OWNERS.
+struct WithBaseSyncPrimitives {};
+
+// Describes immutable metadata for a single task or a group of tasks.
+class BASE_EXPORT TaskTraits {
+ private:
+  // ValidTrait ensures TaskTraits' constructor only accepts appropriate types.
+  struct ValidTrait {
+    ValidTrait(TaskPriority) {}
+    ValidTrait(TaskShutdownBehavior) {}
+    ValidTrait(MayBlock) {}
+    ValidTrait(WithBaseSyncPrimitives) {}
+  };
+
+ public:
+  // Invoking this constructor without arguments produces TaskTraits that are
+  // appropriate for tasks that
+  //     (1) don't block (ref. MayBlock() and WithBaseSyncPrimitives()),
+  //     (2) prefer inheriting the current priority to specifying their own, and
+  //     (3) can either block shutdown or be skipped on shutdown
+  //         (TaskScheduler implementation is free to choose a fitting default).
+  //
+  // To get TaskTraits for tasks that require stricter guarantees and/or know
+  // the specific TaskPriority appropriate for them, provide arguments of type
+  // TaskPriority, TaskShutdownBehavior, MayBlock, and/or WithBaseSyncPrimitives
+  // in any order to the constructor.
+  //
+  // E.g.
+  // constexpr base::TaskTraits default_traits = {};
+  // constexpr base::TaskTraits user_visible_traits =
+  //     {base::TaskPriority::USER_VISIBLE};
+  // constexpr base::TaskTraits user_visible_may_block_traits = {
+  //     base::TaskPriority::USER_VISIBLE, base::MayBlock()};
+  // constexpr base::TaskTraits other_user_visible_may_block_traits = {
+  //     base::MayBlock(), base::TaskPriority::USER_VISIBLE};
+  template <class... ArgTypes,
+            class CheckArgumentsAreValid = internal::InitTypes<
+                decltype(ValidTrait(std::declval<ArgTypes>()))...>>
+  constexpr TaskTraits(ArgTypes... args)
+      : priority_set_explicitly_(
+            internal::HasArgOfType<TaskPriority, ArgTypes...>::value),
+        priority_(internal::GetValueFromArgList(
+            internal::EnumArgGetter<TaskPriority, TaskPriority::USER_VISIBLE>(),
+            args...)),
+        shutdown_behavior_set_explicitly_(
+            internal::HasArgOfType<TaskShutdownBehavior, ArgTypes...>::value),
+        shutdown_behavior_(internal::GetValueFromArgList(
+            internal::EnumArgGetter<TaskShutdownBehavior,
+                                    TaskShutdownBehavior::SKIP_ON_SHUTDOWN>(),
+            args...)),
+        may_block_(internal::GetValueFromArgList(
+            internal::BooleanArgGetter<MayBlock>(),
+            args...)),
+        with_base_sync_primitives_(internal::GetValueFromArgList(
+            internal::BooleanArgGetter<WithBaseSyncPrimitives>(),
+            args...)) {}
+
+  constexpr TaskTraits(const TaskTraits& other) = default;
+  TaskTraits& operator=(const TaskTraits& other) = default;
+
+  // Returns TaskTraits constructed by combining |left| and |right|. If a trait
+  // is specified in both |left| and |right|, the returned TaskTraits will have
+  // the value from |right|.
+  static constexpr TaskTraits Override(const TaskTraits& left,
+                                       const TaskTraits& right) {
+    return TaskTraits(left, right);
+  }
+
+  // Returns true if the priority was set explicitly.
+  constexpr bool priority_set_explicitly() const {
+    return priority_set_explicitly_;
+  }
+
+  // Returns the priority of tasks with these traits.
+  constexpr TaskPriority priority() const { return priority_; }
+
+  // Returns true if the shutdown behavior was set explicitly.
+  constexpr bool shutdown_behavior_set_explicitly() const {
+    return shutdown_behavior_set_explicitly_;
+  }
+
+  // Returns the shutdown behavior of tasks with these traits.
+  constexpr TaskShutdownBehavior shutdown_behavior() const {
+    return shutdown_behavior_;
+  }
+
+  // Returns true if tasks with these traits may block.
+  constexpr bool may_block() const { return may_block_; }
+
+  // Returns true if tasks with these traits may use base/ sync primitives.
+  constexpr bool with_base_sync_primitives() const {
+    return with_base_sync_primitives_;
+  }
+
+ private:
+  constexpr TaskTraits(const TaskTraits& left, const TaskTraits& right)
+      : priority_set_explicitly_(left.priority_set_explicitly_ ||
+                                 right.priority_set_explicitly_),
+        priority_(right.priority_set_explicitly_ ? right.priority_
+                                                 : left.priority_),
+        shutdown_behavior_set_explicitly_(
+            left.shutdown_behavior_set_explicitly_ ||
+            right.shutdown_behavior_set_explicitly_),
+        shutdown_behavior_(right.shutdown_behavior_set_explicitly_
+                               ? right.shutdown_behavior_
+                               : left.shutdown_behavior_),
+        may_block_(left.may_block_ || right.may_block_),
+        with_base_sync_primitives_(left.with_base_sync_primitives_ ||
+                                   right.with_base_sync_primitives_) {}
+
+  bool priority_set_explicitly_;
+  TaskPriority priority_;
+  bool shutdown_behavior_set_explicitly_;
+  TaskShutdownBehavior shutdown_behavior_;
+  bool may_block_;
+  bool with_base_sync_primitives_;
+};
+
+// Returns string literals for the enums defined in this file. These methods
+// should only be used for tracing and debugging.
+BASE_EXPORT const char* TaskPriorityToString(TaskPriority task_priority);
+BASE_EXPORT const char* TaskShutdownBehaviorToString(
+    TaskShutdownBehavior task_priority);
+
+// Stream operators so that the enums defined in this file can be used in
+// DCHECK and EXPECT statements.
+BASE_EXPORT std::ostream& operator<<(std::ostream& os,
+                                     const TaskPriority& shutdown_behavior);
+BASE_EXPORT std::ostream& operator<<(
+    std::ostream& os,
+    const TaskShutdownBehavior& shutdown_behavior);
+
+}  // namespace base
+
+#endif  // BASE_TASK_SCHEDULER_TASK_TRAITS_H_
diff --git a/base/task_scheduler/task_traits_details.h b/base/task_scheduler/task_traits_details.h
new file mode 100644
index 0000000..05fb605
--- /dev/null
+++ b/base/task_scheduler/task_traits_details.h
@@ -0,0 +1,128 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TASK_SCHEDULER_TASK_TRAITS_DETAILS_H_
+#define BASE_TASK_SCHEDULER_TASK_TRAITS_DETAILS_H_
+
+#include <type_traits>
+#include <utility>
+
+namespace base {
+namespace internal {
+
+// HasArgOfType<CheckedType, ArgTypes...>::value is true iff a type in ArgTypes
+// matches CheckedType.
+template <class...>
+struct HasArgOfType : std::false_type {};
+template <class CheckedType, class FirstArgType, class... ArgTypes>
+struct HasArgOfType<CheckedType, FirstArgType, ArgTypes...>
+    : std::conditional<std::is_same<CheckedType, FirstArgType>::value,
+                       std::true_type,
+                       HasArgOfType<CheckedType, ArgTypes...>>::type {};
+
+// When the following call is made:
+//    GetValueFromArgListImpl(CallFirstTag(), GetterType(), args...);
+// If |args| is empty, the compiler selects the first overload. This overload
+// returns getter.GetDefaultValue(). If |args| is not empty, the compiler
+// prefers using the second overload because the type of the first argument
+// matches exactly. This overload returns getter.GetValueFromArg(first_arg),
+// where |first_arg| is the first element in |args|. If
+// getter.GetValueFromArg(first_arg) isn't defined, the compiler uses the third
+// overload instead. This overload discards the first argument in |args| and
+// makes a recursive call to GetValueFromArgListImpl() with CallFirstTag() as
+// first argument.
+
+// Tag dispatching.
+struct CallSecondTag {};
+struct CallFirstTag : CallSecondTag {};
+
+// Overload 1: Default value.
+template <class GetterType>
+constexpr typename GetterType::ValueType GetValueFromArgListImpl(
+    CallFirstTag,
+    GetterType getter) {
+  return getter.GetDefaultValue();
+}
+
+// Overload 2: Get value from first argument. Check that no argument in |args|
+// has the same type as |first_arg|.
+template <class GetterType,
+          class FirstArgType,
+          class... ArgTypes,
+          class TestGetValueFromArgDefined =
+              decltype(std::declval<GetterType>().GetValueFromArg(
+                  std::declval<FirstArgType>()))>
+constexpr typename GetterType::ValueType GetValueFromArgListImpl(
+    CallFirstTag,
+    GetterType getter,
+    const FirstArgType& first_arg,
+    const ArgTypes&... args) {
+  static_assert(!HasArgOfType<FirstArgType, ArgTypes...>::value,
+                "Multiple arguments of the same type were provided to the "
+                "constructor of TaskTraits.");
+  return getter.GetValueFromArg(first_arg);
+}
+
+// Overload 3: Discard first argument.
+template <class GetterType, class FirstArgType, class... ArgTypes>
+constexpr typename GetterType::ValueType GetValueFromArgListImpl(
+    CallSecondTag,
+    GetterType getter,
+    const FirstArgType&,
+    const ArgTypes&... args) {
+  return GetValueFromArgListImpl(CallFirstTag(), getter, args...);
+}
+
+// If there is an argument |arg_of_type| of type Getter::ArgType in |args|,
+// returns getter.GetValueFromArg(arg_of_type). If there are more than one
+// argument of type Getter::ArgType in |args|, generates a compile-time error.
+// Otherwise, returns getter.GetDefaultValue().
+//
+// |getter| must provide:
+//
+// ValueType:
+//     The return type of GetValueFromArgListImpl().
+//
+// ArgType:
+//     The type of the argument from which GetValueFromArgListImpl() derives its
+//     return value.
+//
+// ValueType GetValueFromArg(ArgType):
+//     Converts an argument of type ArgType into a value returned by
+//     GetValueFromArgListImpl().
+//
+// ValueType GetDefaultValue():
+//     Returns the value returned by GetValueFromArgListImpl() if none of its
+//     arguments is of type ArgType.
+template <class GetterType, class... ArgTypes>
+constexpr typename GetterType::ValueType GetValueFromArgList(
+    GetterType getter,
+    const ArgTypes&... args) {
+  return GetValueFromArgListImpl(CallFirstTag(), getter, args...);
+}
+
+template <typename ArgType>
+struct BooleanArgGetter {
+  using ValueType = bool;
+  constexpr ValueType GetValueFromArg(ArgType) const { return true; }
+  constexpr ValueType GetDefaultValue() const { return false; }
+};
+
+template <typename ArgType, ArgType DefaultValue>
+struct EnumArgGetter {
+  using ValueType = ArgType;
+  constexpr ValueType GetValueFromArg(ArgType arg) const { return arg; }
+  constexpr ValueType GetDefaultValue() const { return DefaultValue; }
+};
+
+// Allows instantiation of multiple types in one statement. Used to prevent
+// instantiation of the constructor of TaskTraits with inappropriate argument
+// types.
+template <class...>
+struct InitTypes {};
+
+}  // namespace internal
+}  // namespace base
+
+#endif  // BASE_TASK_SCHEDULER_TASK_TRAITS_DETAILS_H_
diff --git a/base/task_scheduler/task_traits_unittest.cc b/base/task_scheduler/task_traits_unittest.cc
new file mode 100644
index 0000000..2a35048
--- /dev/null
+++ b/base/task_scheduler/task_traits_unittest.cc
@@ -0,0 +1,175 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/task_scheduler/task_traits.h"
+
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+
+TEST(TaskSchedulerTaskTraitsTest, Default) {
+  constexpr TaskTraits traits = {};
+  EXPECT_FALSE(traits.priority_set_explicitly());
+  EXPECT_EQ(TaskPriority::USER_VISIBLE, traits.priority());
+  EXPECT_EQ(TaskShutdownBehavior::SKIP_ON_SHUTDOWN, traits.shutdown_behavior());
+  EXPECT_FALSE(traits.may_block());
+  EXPECT_FALSE(traits.with_base_sync_primitives());
+}
+
+TEST(TaskSchedulerTaskTraitsTest, TaskPriority) {
+  constexpr TaskTraits traits = {TaskPriority::BACKGROUND};
+  EXPECT_TRUE(traits.priority_set_explicitly());
+  EXPECT_EQ(TaskPriority::BACKGROUND, traits.priority());
+  EXPECT_EQ(TaskShutdownBehavior::SKIP_ON_SHUTDOWN, traits.shutdown_behavior());
+  EXPECT_FALSE(traits.may_block());
+  EXPECT_FALSE(traits.with_base_sync_primitives());
+}
+
+TEST(TaskSchedulerTaskTraitsTest, TaskShutdownBehavior) {
+  constexpr TaskTraits traits = {TaskShutdownBehavior::BLOCK_SHUTDOWN};
+  EXPECT_FALSE(traits.priority_set_explicitly());
+  EXPECT_EQ(TaskPriority::USER_VISIBLE, traits.priority());
+  EXPECT_EQ(TaskShutdownBehavior::BLOCK_SHUTDOWN, traits.shutdown_behavior());
+  EXPECT_FALSE(traits.may_block());
+  EXPECT_FALSE(traits.with_base_sync_primitives());
+}
+
+TEST(TaskSchedulerTaskTraitsTest, MayBlock) {
+  constexpr TaskTraits traits = {MayBlock()};
+  EXPECT_FALSE(traits.priority_set_explicitly());
+  EXPECT_EQ(TaskPriority::USER_VISIBLE, traits.priority());
+  EXPECT_EQ(TaskShutdownBehavior::SKIP_ON_SHUTDOWN, traits.shutdown_behavior());
+  EXPECT_TRUE(traits.may_block());
+  EXPECT_FALSE(traits.with_base_sync_primitives());
+}
+
+TEST(TaskSchedulerTaskTraitsTest, WithBaseSyncPrimitives) {
+  constexpr TaskTraits traits = {WithBaseSyncPrimitives()};
+  EXPECT_FALSE(traits.priority_set_explicitly());
+  EXPECT_EQ(TaskPriority::USER_VISIBLE, traits.priority());
+  EXPECT_EQ(TaskShutdownBehavior::SKIP_ON_SHUTDOWN, traits.shutdown_behavior());
+  EXPECT_FALSE(traits.may_block());
+  EXPECT_TRUE(traits.with_base_sync_primitives());
+}
+
+TEST(TaskSchedulerTaskTraitsTest, MultipleTraits) {
+  constexpr TaskTraits traits = {TaskPriority::BACKGROUND,
+                                 TaskShutdownBehavior::BLOCK_SHUTDOWN,
+                                 MayBlock(), WithBaseSyncPrimitives()};
+  EXPECT_TRUE(traits.priority_set_explicitly());
+  EXPECT_EQ(TaskPriority::BACKGROUND, traits.priority());
+  EXPECT_EQ(TaskShutdownBehavior::BLOCK_SHUTDOWN, traits.shutdown_behavior());
+  EXPECT_TRUE(traits.may_block());
+  EXPECT_TRUE(traits.with_base_sync_primitives());
+}
+
+TEST(TaskSchedulerTaskTraitsTest, Copy) {
+  constexpr TaskTraits traits = {TaskPriority::BACKGROUND,
+                                 TaskShutdownBehavior::BLOCK_SHUTDOWN,
+                                 MayBlock(), WithBaseSyncPrimitives()};
+  constexpr TaskTraits traits_copy(traits);
+  EXPECT_EQ(traits.priority_set_explicitly(),
+            traits_copy.priority_set_explicitly());
+  EXPECT_EQ(traits.priority(), traits_copy.priority());
+  EXPECT_EQ(traits.shutdown_behavior(), traits_copy.shutdown_behavior());
+  EXPECT_EQ(traits.may_block(), traits_copy.may_block());
+  EXPECT_EQ(traits.with_base_sync_primitives(),
+            traits_copy.with_base_sync_primitives());
+}
+
+TEST(TaskSchedulerTaskTraitsTest, OverridePriority) {
+  constexpr TaskTraits left = {TaskPriority::BACKGROUND};
+  constexpr TaskTraits right = {TaskPriority::USER_BLOCKING};
+  constexpr TaskTraits overridden = TaskTraits::Override(left, right);
+  EXPECT_TRUE(overridden.priority_set_explicitly());
+  EXPECT_EQ(TaskPriority::USER_BLOCKING, overridden.priority());
+  EXPECT_FALSE(overridden.shutdown_behavior_set_explicitly());
+  EXPECT_EQ(TaskShutdownBehavior::SKIP_ON_SHUTDOWN,
+            overridden.shutdown_behavior());
+  EXPECT_FALSE(overridden.may_block());
+  EXPECT_FALSE(overridden.with_base_sync_primitives());
+}
+
+TEST(TaskSchedulerTaskTraitsTest, OverrideShutdownBehavior) {
+  constexpr TaskTraits left = {TaskShutdownBehavior::CONTINUE_ON_SHUTDOWN};
+  constexpr TaskTraits right = {TaskShutdownBehavior::BLOCK_SHUTDOWN};
+  constexpr TaskTraits overridden = TaskTraits::Override(left, right);
+  EXPECT_FALSE(overridden.priority_set_explicitly());
+  EXPECT_EQ(TaskPriority::USER_VISIBLE, overridden.priority());
+  EXPECT_TRUE(overridden.shutdown_behavior_set_explicitly());
+  EXPECT_EQ(TaskShutdownBehavior::BLOCK_SHUTDOWN,
+            overridden.shutdown_behavior());
+  EXPECT_FALSE(overridden.may_block());
+  EXPECT_FALSE(overridden.with_base_sync_primitives());
+}
+
+TEST(TaskSchedulerTaskTraitsTest, OverrideMayBlock) {
+  {
+    constexpr TaskTraits left = {MayBlock()};
+    constexpr TaskTraits right = {};
+    constexpr TaskTraits overridden = TaskTraits::Override(left, right);
+    EXPECT_FALSE(overridden.priority_set_explicitly());
+    EXPECT_EQ(TaskPriority::USER_VISIBLE, overridden.priority());
+    EXPECT_FALSE(overridden.shutdown_behavior_set_explicitly());
+    EXPECT_EQ(TaskShutdownBehavior::SKIP_ON_SHUTDOWN,
+              overridden.shutdown_behavior());
+    EXPECT_TRUE(overridden.may_block());
+    EXPECT_FALSE(overridden.with_base_sync_primitives());
+  }
+  {
+    constexpr TaskTraits left = {};
+    constexpr TaskTraits right = {MayBlock()};
+    constexpr TaskTraits overridden = TaskTraits::Override(left, right);
+    EXPECT_FALSE(overridden.priority_set_explicitly());
+    EXPECT_EQ(TaskPriority::USER_VISIBLE, overridden.priority());
+    EXPECT_FALSE(overridden.shutdown_behavior_set_explicitly());
+    EXPECT_EQ(TaskShutdownBehavior::SKIP_ON_SHUTDOWN,
+              overridden.shutdown_behavior());
+    EXPECT_TRUE(overridden.may_block());
+    EXPECT_FALSE(overridden.with_base_sync_primitives());
+  }
+}
+
+TEST(TaskSchedulerTaskTraitsTest, OverrideWithBaseSyncPrimitives) {
+  {
+    constexpr TaskTraits left = {WithBaseSyncPrimitives()};
+    constexpr TaskTraits right = {};
+    constexpr TaskTraits overridden = TaskTraits::Override(left, right);
+    EXPECT_FALSE(overridden.priority_set_explicitly());
+    EXPECT_EQ(TaskPriority::USER_VISIBLE, overridden.priority());
+    EXPECT_FALSE(overridden.shutdown_behavior_set_explicitly());
+    EXPECT_EQ(TaskShutdownBehavior::SKIP_ON_SHUTDOWN,
+              overridden.shutdown_behavior());
+    EXPECT_FALSE(overridden.may_block());
+    EXPECT_TRUE(overridden.with_base_sync_primitives());
+  }
+  {
+    constexpr TaskTraits left = {};
+    constexpr TaskTraits right = {WithBaseSyncPrimitives()};
+    constexpr TaskTraits overridden = TaskTraits::Override(left, right);
+    EXPECT_FALSE(overridden.priority_set_explicitly());
+    EXPECT_EQ(TaskPriority::USER_VISIBLE, overridden.priority());
+    EXPECT_FALSE(overridden.shutdown_behavior_set_explicitly());
+    EXPECT_EQ(TaskShutdownBehavior::SKIP_ON_SHUTDOWN,
+              overridden.shutdown_behavior());
+    EXPECT_FALSE(overridden.may_block());
+    EXPECT_TRUE(overridden.with_base_sync_primitives());
+  }
+}
+
+TEST(TaskSchedulerTaskTraitsTest, OverrideMultipleTraits) {
+  constexpr TaskTraits left = {MayBlock(), TaskPriority::BACKGROUND,
+                               TaskShutdownBehavior::CONTINUE_ON_SHUTDOWN};
+  constexpr TaskTraits right = {WithBaseSyncPrimitives(),
+                                TaskPriority::USER_BLOCKING};
+  constexpr TaskTraits overridden = TaskTraits::Override(left, right);
+  EXPECT_TRUE(overridden.priority_set_explicitly());
+  EXPECT_EQ(right.priority(), overridden.priority());
+  EXPECT_TRUE(overridden.shutdown_behavior_set_explicitly());
+  EXPECT_EQ(left.shutdown_behavior(), overridden.shutdown_behavior());
+  EXPECT_TRUE(overridden.may_block());
+  EXPECT_TRUE(overridden.with_base_sync_primitives());
+}
+
+}  // namespace base
diff --git a/base/task_scheduler/task_traits_unittest.nc b/base/task_scheduler/task_traits_unittest.nc
new file mode 100644
index 0000000..97f9c4b
--- /dev/null
+++ b/base/task_scheduler/task_traits_unittest.nc
@@ -0,0 +1,31 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This is a "No Compile Test" suite.
+// http://dev.chromium.org/developers/testing/no-compile-tests
+
+#include "base/task_scheduler/task_traits.h"
+
+namespace base {
+
+#if defined(NCTEST_TASK_TRAITS_MULTIPLE_MAY_BLOCK)  // [r"Multiple arguments of the same type were provided to the constructor of TaskTraits."]
+constexpr TaskTraits traits = {MayBlock(), MayBlock()};
+#elif defined(NCTEST_TASK_TRAITS_MULTIPLE_WITH_BASE_SYNC_PRIMITIVES)  // [r"Multiple arguments of the same type were provided to the constructor of TaskTraits."]
+constexpr TaskTraits traits = {WithBaseSyncPrimitives(),
+                               WithBaseSyncPrimitives()};
+#elif defined(NCTEST_TASK_TRAITS_MULTIPLE_TASK_PRIORITY)  // [r"Multiple arguments of the same type were provided to the constructor of TaskTraits."]
+constexpr TaskTraits traits = {TaskPriority::BACKGROUND,
+                               TaskPriority::USER_BLOCKING};
+#elif defined(NCTEST_TASK_TRAITS_MULTIPLE_SHUTDOWN_BEHAVIOR)  // [r"Multiple arguments of the same type were provided to the constructor of TaskTraits."]
+constexpr TaskTraits traits = {TaskShutdownBehavior::BLOCK_SHUTDOWN,
+                               TaskShutdownBehavior::BLOCK_SHUTDOWN};
+#elif defined(NCTEST_TASK_TRAITS_MULTIPLE_SAME_TYPE_MIX)  // [r"Multiple arguments of the same type were provided to the constructor of TaskTraits."]
+constexpr TaskTraits traits = {TaskShutdownBehavior::BLOCK_SHUTDOWN,
+                               MayBlock(),
+                               TaskShutdownBehavior::CONTINUE_ON_SHUTDOWN};
+#elif defined(NCTEST_TASK_TRAITS_INVALID_TYPE)  // [r"no matching constructor for initialization of 'const base::TaskTraits'"]
+constexpr TaskTraits traits = {TaskShutdownBehavior::BLOCK_SHUTDOWN, true};
+#endif
+
+}  // namespace base
diff --git a/base/task_scheduler/task_unittest.cc b/base/task_scheduler/task_unittest.cc
new file mode 100644
index 0000000..31a59de
--- /dev/null
+++ b/base/task_scheduler/task_unittest.cc
@@ -0,0 +1,60 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/task_scheduler/task.h"
+
+#include "base/bind.h"
+#include "base/bind_helpers.h"
+#include "base/location.h"
+#include "base/task_scheduler/task_traits.h"
+#include "base/time/time.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+namespace internal {
+
+// Verify that the shutdown behavior of a BLOCK_SHUTDOWN delayed task is
+// adjusted to SKIP_ON_SHUTDOWN. The shutown behavior of other delayed tasks
+// should not change.
+TEST(TaskSchedulerTaskTest, ShutdownBehaviorChangeWithDelay) {
+  Task continue_on_shutdown(FROM_HERE, DoNothing(),
+                            {TaskShutdownBehavior::CONTINUE_ON_SHUTDOWN},
+                            TimeDelta::FromSeconds(1));
+  EXPECT_EQ(TaskShutdownBehavior::CONTINUE_ON_SHUTDOWN,
+            continue_on_shutdown.traits.shutdown_behavior());
+
+  Task skip_on_shutdown(FROM_HERE, DoNothing(),
+                        {TaskShutdownBehavior::SKIP_ON_SHUTDOWN},
+                        TimeDelta::FromSeconds(1));
+  EXPECT_EQ(TaskShutdownBehavior::SKIP_ON_SHUTDOWN,
+            skip_on_shutdown.traits.shutdown_behavior());
+
+  Task block_shutdown(FROM_HERE, DoNothing(),
+                      {TaskShutdownBehavior::BLOCK_SHUTDOWN},
+                      TimeDelta::FromSeconds(1));
+  EXPECT_EQ(TaskShutdownBehavior::SKIP_ON_SHUTDOWN,
+            block_shutdown.traits.shutdown_behavior());
+}
+
+// Verify that the shutdown behavior of undelayed tasks is not adjusted.
+TEST(TaskSchedulerTaskTest, NoShutdownBehaviorChangeNoDelay) {
+  Task continue_on_shutdown(FROM_HERE, DoNothing(),
+                            {TaskShutdownBehavior::CONTINUE_ON_SHUTDOWN},
+                            TimeDelta());
+  EXPECT_EQ(TaskShutdownBehavior::CONTINUE_ON_SHUTDOWN,
+            continue_on_shutdown.traits.shutdown_behavior());
+
+  Task skip_on_shutdown(FROM_HERE, DoNothing(),
+                        {TaskShutdownBehavior::SKIP_ON_SHUTDOWN}, TimeDelta());
+  EXPECT_EQ(TaskShutdownBehavior::SKIP_ON_SHUTDOWN,
+            skip_on_shutdown.traits.shutdown_behavior());
+
+  Task block_shutdown(FROM_HERE, DoNothing(),
+                      {TaskShutdownBehavior::BLOCK_SHUTDOWN}, TimeDelta());
+  EXPECT_EQ(TaskShutdownBehavior::BLOCK_SHUTDOWN,
+            block_shutdown.traits.shutdown_behavior());
+}
+
+}  // namespace internal
+}  // namespace base
diff --git a/base/task_scheduler/test_task_factory.cc b/base/task_scheduler/test_task_factory.cc
new file mode 100644
index 0000000..0867547
--- /dev/null
+++ b/base/task_scheduler/test_task_factory.cc
@@ -0,0 +1,106 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/task_scheduler/test_task_factory.h"
+
+#include "base/bind.h"
+#include "base/bind_helpers.h"
+#include "base/callback.h"
+#include "base/location.h"
+#include "base/logging.h"
+#include "base/synchronization/waitable_event.h"
+#include "base/threading/sequenced_task_runner_handle.h"
+#include "base/threading/thread_task_runner_handle.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+namespace internal {
+namespace test {
+
+TestTaskFactory::TestTaskFactory(scoped_refptr<TaskRunner> task_runner,
+                                 ExecutionMode execution_mode)
+    : cv_(&lock_),
+      task_runner_(std::move(task_runner)),
+      execution_mode_(execution_mode) {
+  // Detach |thread_checker_| from the current thread. It will be attached to
+  // the first thread that calls ThreadCheckerImpl::CalledOnValidThread().
+  thread_checker_.DetachFromThread();
+}
+
+TestTaskFactory::~TestTaskFactory() {
+  WaitForAllTasksToRun();
+}
+
+bool TestTaskFactory::PostTask(PostNestedTask post_nested_task,
+                               OnceClosure after_task_closure) {
+  AutoLock auto_lock(lock_);
+  return task_runner_->PostTask(
+      FROM_HERE, BindOnce(&TestTaskFactory::RunTaskCallback, Unretained(this),
+                          num_posted_tasks_++, post_nested_task,
+                          std::move(after_task_closure)));
+}
+
+void TestTaskFactory::WaitForAllTasksToRun() const {
+  AutoLock auto_lock(lock_);
+  while (ran_tasks_.size() < num_posted_tasks_)
+    cv_.Wait();
+}
+
+void TestTaskFactory::RunTaskCallback(size_t task_index,
+                                      PostNestedTask post_nested_task,
+                                      OnceClosure after_task_closure) {
+  if (post_nested_task == PostNestedTask::YES)
+    PostTask(PostNestedTask::NO, Closure());
+
+  EXPECT_TRUE(task_runner_->RunsTasksInCurrentSequence());
+
+  // Verify TaskRunnerHandles are set as expected in the task's scope.
+  switch (execution_mode_) {
+    case ExecutionMode::PARALLEL:
+      EXPECT_FALSE(ThreadTaskRunnerHandle::IsSet());
+      EXPECT_FALSE(SequencedTaskRunnerHandle::IsSet());
+      break;
+    case ExecutionMode::SEQUENCED:
+      EXPECT_FALSE(ThreadTaskRunnerHandle::IsSet());
+      EXPECT_TRUE(SequencedTaskRunnerHandle::IsSet());
+      EXPECT_EQ(task_runner_, SequencedTaskRunnerHandle::Get());
+      break;
+    case ExecutionMode::SINGLE_THREADED:
+      // SequencedTaskRunnerHandle inherits from ThreadTaskRunnerHandle so
+      // both are expected to be "set" in the SINGLE_THREADED case.
+      EXPECT_TRUE(ThreadTaskRunnerHandle::IsSet());
+      EXPECT_TRUE(SequencedTaskRunnerHandle::IsSet());
+      EXPECT_EQ(task_runner_, ThreadTaskRunnerHandle::Get());
+      EXPECT_EQ(task_runner_, SequencedTaskRunnerHandle::Get());
+      break;
+  }
+
+  {
+    AutoLock auto_lock(lock_);
+
+    DCHECK_LE(task_index, num_posted_tasks_);
+
+    if ((execution_mode_ == ExecutionMode::SINGLE_THREADED ||
+         execution_mode_ == ExecutionMode::SEQUENCED) &&
+        task_index != ran_tasks_.size()) {
+      ADD_FAILURE() << "A task didn't run in the expected order.";
+    }
+
+    if (execution_mode_ == ExecutionMode::SINGLE_THREADED)
+      EXPECT_TRUE(thread_checker_.CalledOnValidThread());
+
+    if (ran_tasks_.find(task_index) != ran_tasks_.end())
+      ADD_FAILURE() << "A task ran more than once.";
+    ran_tasks_.insert(task_index);
+
+    cv_.Signal();
+  }
+
+  if (!after_task_closure.is_null())
+    std::move(after_task_closure).Run();
+}
+
+}  // namespace test
+}  // namespace internal
+}  // namespace base
diff --git a/base/task_scheduler/test_task_factory.h b/base/task_scheduler/test_task_factory.h
new file mode 100644
index 0000000..300b7bf
--- /dev/null
+++ b/base/task_scheduler/test_task_factory.h
@@ -0,0 +1,99 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TASK_SCHEDULER_TEST_TASK_FACTORY_H_
+#define BASE_TASK_SCHEDULER_TEST_TASK_FACTORY_H_
+
+#include <stddef.h>
+
+#include <unordered_set>
+
+#include "base/callback_forward.h"
+#include "base/macros.h"
+#include "base/memory/ref_counted.h"
+#include "base/synchronization/condition_variable.h"
+#include "base/synchronization/lock.h"
+#include "base/task_runner.h"
+#include "base/task_scheduler/task_traits.h"
+#include "base/task_scheduler/test_utils.h"
+#include "base/threading/thread_checker_impl.h"
+
+namespace base {
+namespace internal {
+namespace test {
+
+// A TestTaskFactory posts tasks to a TaskRunner and verifies that they run as
+// expected. Generates a test failure when:
+// - The RunsTasksInCurrentSequence() method of the TaskRunner returns false on
+//   a thread on which a Task is run.
+// - The TaskRunnerHandles set in the context of the task don't match what's
+//   expected for the tested ExecutionMode.
+// - The ExecutionMode of the TaskRunner is SEQUENCED or SINGLE_THREADED and
+//   Tasks don't run in posting order.
+// - The ExecutionMode of the TaskRunner is SINGLE_THREADED and Tasks don't run
+//   on the same thread.
+// - A Task runs more than once.
+class TestTaskFactory {
+ public:
+  enum class PostNestedTask {
+    YES,
+    NO,
+  };
+
+  // Constructs a TestTaskFactory that posts tasks to |task_runner|.
+  // |execution_mode| is the ExecutionMode of |task_runner|.
+  TestTaskFactory(scoped_refptr<TaskRunner> task_runner,
+                  ExecutionMode execution_mode);
+
+  ~TestTaskFactory();
+
+  // Posts a task. The posted task will:
+  // - Post a new task if |post_nested_task| is YES. The nested task won't run
+  //   |after_task_closure|.
+  // - Verify conditions in which the task runs (see potential failures above).
+  // - Run |after_task_closure| if it is not null.
+  bool PostTask(PostNestedTask post_nested_task,
+                OnceClosure after_task_closure);
+
+  // Waits for all tasks posted by PostTask() to start running. It is not
+  // guaranteed that the tasks have completed their execution when this returns.
+  void WaitForAllTasksToRun() const;
+
+  const TaskRunner* task_runner() const { return task_runner_.get(); }
+
+ private:
+  void RunTaskCallback(size_t task_index,
+                       PostNestedTask post_nested_task,
+                       OnceClosure after_task_closure);
+
+  // Synchronizes access to all members.
+  mutable Lock lock_;
+
+  // Condition variable signaled when a task runs.
+  mutable ConditionVariable cv_;
+
+  // Task runner through which this factory posts tasks.
+  const scoped_refptr<TaskRunner> task_runner_;
+
+  // Execution mode of |task_runner_|.
+  const ExecutionMode execution_mode_;
+
+  // Number of tasks posted by PostTask().
+  size_t num_posted_tasks_ = 0;
+
+  // Indexes of tasks that ran.
+  std::unordered_set<size_t> ran_tasks_;
+
+  // Used to verify that all tasks run on the same thread when |execution_mode_|
+  // is SINGLE_THREADED.
+  ThreadCheckerImpl thread_checker_;
+
+  DISALLOW_COPY_AND_ASSIGN(TestTaskFactory);
+};
+
+}  // namespace test
+}  // namespace internal
+}  // namespace base
+
+#endif  // BASE_TASK_SCHEDULER_TEST_TASK_FACTORY_H_
diff --git a/base/task_scheduler/test_utils.cc b/base/task_scheduler/test_utils.cc
new file mode 100644
index 0000000..eb509f8
--- /dev/null
+++ b/base/task_scheduler/test_utils.cc
@@ -0,0 +1,45 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/task_scheduler/test_utils.h"
+
+#include <utility>
+
+#include "base/task_scheduler/scheduler_worker_pool.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+namespace internal {
+namespace test {
+
+MockSchedulerWorkerObserver::MockSchedulerWorkerObserver() = default;
+MockSchedulerWorkerObserver::~MockSchedulerWorkerObserver() = default;
+
+scoped_refptr<Sequence> CreateSequenceWithTask(Task task) {
+  scoped_refptr<Sequence> sequence = MakeRefCounted<Sequence>();
+  sequence->PushTask(std::move(task));
+  return sequence;
+}
+
+scoped_refptr<TaskRunner> CreateTaskRunnerWithExecutionMode(
+    SchedulerWorkerPool* worker_pool,
+    test::ExecutionMode execution_mode) {
+  // Allow tasks posted to the returned TaskRunner to wait on a WaitableEvent.
+  const TaskTraits traits = {WithBaseSyncPrimitives()};
+  switch (execution_mode) {
+    case test::ExecutionMode::PARALLEL:
+      return worker_pool->CreateTaskRunnerWithTraits(traits);
+    case test::ExecutionMode::SEQUENCED:
+      return worker_pool->CreateSequencedTaskRunnerWithTraits(traits);
+    default:
+      // Fall through.
+      break;
+  }
+  ADD_FAILURE() << "Unexpected ExecutionMode";
+  return nullptr;
+}
+
+}  // namespace test
+}  // namespace internal
+}  // namespace base
diff --git a/base/task_scheduler/test_utils.h b/base/task_scheduler/test_utils.h
new file mode 100644
index 0000000..42e4eed
--- /dev/null
+++ b/base/task_scheduler/test_utils.h
@@ -0,0 +1,52 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TASK_SCHEDULER_TEST_UTILS_H_
+#define BASE_TASK_SCHEDULER_TEST_UTILS_H_
+
+#include "base/memory/ref_counted.h"
+#include "base/task_runner.h"
+#include "base/task_scheduler/scheduler_worker_observer.h"
+#include "base/task_scheduler/sequence.h"
+#include "testing/gmock/include/gmock/gmock.h"
+
+namespace base {
+namespace internal {
+
+class SchedulerWorkerPool;
+struct Task;
+
+namespace test {
+
+class MockSchedulerWorkerObserver : public SchedulerWorkerObserver {
+ public:
+  MockSchedulerWorkerObserver();
+  ~MockSchedulerWorkerObserver();
+
+  MOCK_METHOD0(OnSchedulerWorkerMainEntry, void());
+  MOCK_METHOD0(OnSchedulerWorkerMainExit, void());
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(MockSchedulerWorkerObserver);
+};
+
+// An enumeration of possible task scheduler TaskRunner types. Used to
+// parametrize relevant task_scheduler tests.
+enum class ExecutionMode { PARALLEL, SEQUENCED, SINGLE_THREADED };
+
+// Creates a Sequence and pushes |task| to it. Returns that sequence.
+scoped_refptr<Sequence> CreateSequenceWithTask(Task task);
+
+// Creates a TaskRunner that posts tasks to |worker_pool| with the
+// |execution_mode| execution mode and the WithBaseSyncPrimitives() trait.
+// Caveat: this does not support ExecutionMode::SINGLE_THREADED.
+scoped_refptr<TaskRunner> CreateTaskRunnerWithExecutionMode(
+    SchedulerWorkerPool* worker_pool,
+    test::ExecutionMode execution_mode);
+
+}  // namespace test
+}  // namespace internal
+}  // namespace base
+
+#endif  // BASE_TASK_SCHEDULER_TEST_UTILS_H_
diff --git a/base/task_scheduler/tracked_ref.h b/base/task_scheduler/tracked_ref.h
new file mode 100644
index 0000000..d99a345
--- /dev/null
+++ b/base/task_scheduler/tracked_ref.h
@@ -0,0 +1,173 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TASK_SCHEDULER_TRACKED_REF_H_
+#define BASE_TASK_SCHEDULER_TRACKED_REF_H_
+
+#include <memory>
+
+#include "base/atomic_ref_count.h"
+#include "base/gtest_prod_util.h"
+#include "base/logging.h"
+#include "base/macros.h"
+#include "base/memory/ptr_util.h"
+#include "base/synchronization/waitable_event.h"
+
+namespace base {
+namespace internal {
+
+// TrackedRefs are effectively a ref-counting scheme for objects that have a
+// single owner.
+//
+// Deletion is still controlled by the single owner but ~T() itself will block
+// until all the TrackedRefs handed by its TrackedRefFactory have been released
+// (by ~TrackedRef<T>()).
+//
+// Just like WeakPtrFactory: TrackedRefFactory<T> should be the last member of T
+// to ensure ~TrackedRefFactory<T>() runs first in ~T().
+//
+// The owner of a T should hence be certain that the last TrackedRefs to T are
+// already gone or on their way out before destroying it or ~T() will hang
+// (indicating a bug in the tear down logic -- proper refcounting on the other
+// hand would result in a leak).
+//
+// TrackedRefFactory only makes sense to use on types that are always leaked in
+// production but need to be torn down in tests (blocking destruction is
+// impractical in production -- ref. ScopedAllowBaseSyncPrimitivesForTesting
+// below).
+//
+// Why would we ever need such a thing? In task_scheduler there is a clear
+// ownership hierarchy with mostly single owners and little refcounting. In
+// production nothing is ever torn down so this isn't a problem. In tests
+// however we must JoinForTesting(). At that point, all the raw back T* refs
+// used by the worker threads are problematic because they can result in use-
+// after-frees if a worker outlives the deletion of its corresponding
+// TaskScheduler/TaskTracker/SchedulerWorkerPool/etc.
+//
+// JoinForTesting() isn't so hard when all workers are managed. But with cleanup
+// semantics (reclaiming a worker who's been idle for too long) it becomes
+// tricky because workers can go unaccounted for before they exit their main
+// (https://crbug.com/827615).
+//
+// For that reason and to clearly document the ownership model, task_scheduler
+// uses TrackedRefs.
+//
+// On top of being a clearer ownership model than proper refcounting, a hang in
+// tear down in a test with out-of-order tear down logic is much preferred to
+// letting its worker thread and associated constructs outlive the test
+// (potentially resulting in flakes in unrelated tests running later in the same
+// process).
+//
+// Note: While there's nothing task_scheduler specific about TrackedRefs it
+// requires an ownership model where all the TrackedRefs are released on other
+// threads in sync with ~T(). This isn't a typical use case beyond shutting down
+// TaskScheduler in tests and as such this is kept internal here for now.
+
+template <class T>
+class TrackedRefFactory;
+
+// TrackedRef<T> can be used like a T*.
+template <class T>
+class TrackedRef {
+ public:
+  // Moveable and copyable.
+  TrackedRef(TrackedRef<T>&& other)
+      : ptr_(other.ptr_), factory_(other.factory_) {
+    // Null out |other_|'s factory so its destructor doesn't decrement
+    // |live_tracked_refs_|.
+    other.factory_ = nullptr;
+  }
+  TrackedRef(const TrackedRef<T>& other)
+      : ptr_(other.ptr_), factory_(other.factory_) {
+    factory_->live_tracked_refs_.Increment();
+  }
+
+  // Intentionally not assignable for now because it makes the logic slightly
+  // convoluted and it's not a use case that makes sense for the types using
+  // this at the moment.
+  TrackedRef& operator=(TrackedRef<T>&& other) = delete;
+  TrackedRef& operator=(const TrackedRef<T>& other) = delete;
+
+  ~TrackedRef() {
+    if (factory_ && !factory_->live_tracked_refs_.Decrement()) {
+      DCHECK(factory_->ready_to_destroy_);
+      DCHECK(!factory_->ready_to_destroy_->IsSignaled());
+      factory_->ready_to_destroy_->Signal();
+    }
+  }
+
+  T& operator*() const { return *ptr_; }
+
+  T* operator->() const { return ptr_; }
+
+  explicit operator bool() const { return ptr_ != nullptr; }
+
+ private:
+  friend class TrackedRefFactory<T>;
+
+  TrackedRef(T* ptr, TrackedRefFactory<T>* factory)
+      : ptr_(ptr), factory_(factory) {
+    factory_->live_tracked_refs_.Increment();
+  }
+
+  T* ptr_;
+  TrackedRefFactory<T>* factory_;
+};
+
+// TrackedRefFactory<T> should be the last member of T.
+template <class T>
+class TrackedRefFactory {
+ public:
+  TrackedRefFactory(T* ptr)
+      : ptr_(ptr), self_ref_(WrapUnique(new TrackedRef<T>(ptr_, this))) {
+    DCHECK(ptr_);
+  }
+
+  ~TrackedRefFactory() {
+    // Enter the destruction phase.
+    ready_to_destroy_ = std::make_unique<WaitableEvent>(
+        WaitableEvent::ResetPolicy::MANUAL,
+        WaitableEvent::InitialState::NOT_SIGNALED);
+
+    // Release self-ref (if this was the last one it will signal the event right
+    // away).
+    self_ref_.reset();
+
+    ready_to_destroy_->Wait();
+  }
+
+  TrackedRef<T> GetTrackedRef() {
+    // TrackedRefs cannot be obtained after |live_tracked_refs_| has already
+    // reached zero. In other words, the owner of a TrackedRefFactory shouldn't
+    // vend new TrackedRefs while it's being destroyed (owners of TrackedRefs
+    // may still copy/move their refs around during the destruction phase).
+    DCHECK(!live_tracked_refs_.IsZero());
+    return TrackedRef<T>(ptr_, this);
+  }
+
+ private:
+  friend class TrackedRef<T>;
+  FRIEND_TEST_ALL_PREFIXES(TrackedRefTest, CopyAndMoveSemantics);
+
+  T* const ptr_;
+
+  // The number of live TrackedRefs vended by this factory.
+  AtomicRefCount live_tracked_refs_{0};
+
+  // Non-null during the destruction phase. Signaled once |live_tracked_refs_|
+  // reaches 0. Note: while this could a direct member, only initializing it in
+  // the destruction phase avoids keeping a handle open for the entire session.
+  std::unique_ptr<WaitableEvent> ready_to_destroy_;
+
+  // TrackedRefFactory holds a TrackedRef as well to prevent
+  // |live_tracked_refs_| from ever reaching zero before ~TrackedRefFactory().
+  std::unique_ptr<TrackedRef<T>> self_ref_;
+
+  DISALLOW_COPY_AND_ASSIGN(TrackedRefFactory);
+};
+
+}  // namespace internal
+}  // namespace base
+
+#endif  // BASE_TASK_SCHEDULER_TRACKED_REF_H_
diff --git a/base/task_scheduler/tracked_ref_unittest.cc b/base/task_scheduler/tracked_ref_unittest.cc
new file mode 100644
index 0000000..b793c07
--- /dev/null
+++ b/base/task_scheduler/tracked_ref_unittest.cc
@@ -0,0 +1,150 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/task_scheduler/tracked_ref.h"
+
+#include <memory>
+
+#include "base/bind.h"
+#include "base/macros.h"
+#include "base/synchronization/atomic_flag.h"
+#include "base/test/test_timeouts.h"
+#include "base/threading/thread.h"
+#include "base/time/time.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+namespace internal {
+
+namespace {
+
+class ObjectWithTrackedRefs {
+ public:
+  ObjectWithTrackedRefs() : tracked_ref_factory_(this) {}
+  ~ObjectWithTrackedRefs() { under_destruction_.Set(); }
+
+  TrackedRef<ObjectWithTrackedRefs> GetTrackedRef() {
+    return tracked_ref_factory_.GetTrackedRef();
+  }
+
+  bool under_destruction() const { return under_destruction_.IsSet(); }
+
+ private:
+  // True once ~ObjectWithTrackedRefs() has been initiated.
+  AtomicFlag under_destruction_;
+
+  TrackedRefFactory<ObjectWithTrackedRefs> tracked_ref_factory_;
+
+  DISALLOW_COPY_AND_ASSIGN(ObjectWithTrackedRefs);
+};
+
+}  // namespace
+
+// Test that an object with a TrackedRefFactory can be destroyed by a single
+// owner but that its destruction will be blocked on the TrackedRefs being
+// released.
+TEST(TrackedRefTest, TrackedRefObjectDeletion) {
+  Thread thread("TrackedRefTestThread");
+  thread.Start();
+
+  std::unique_ptr<ObjectWithTrackedRefs> obj =
+      std::make_unique<ObjectWithTrackedRefs>();
+
+  TimeTicks begin = TimeTicks::Now();
+
+  thread.task_runner()->PostDelayedTask(
+      FROM_HERE,
+      BindOnce(
+          [](TrackedRef<ObjectWithTrackedRefs> obj) {
+            // By the time this kicks in, the object should already be under
+            // destruction, but blocked on this TrackedRef being released. This
+            // is technically racy (main thread has to run |obj.reset()| and
+            // this thread has to observe the side-effects before this delayed
+            // task fires). If this ever flakes this expectation could be turned
+            // into a while(!obj->under_destruction()); but until that's proven
+            // flaky in practice, this expectation is more readable and
+            // diagnosable then a hang.
+            EXPECT_TRUE(obj->under_destruction());
+          },
+          obj->GetTrackedRef()),
+      TestTimeouts::tiny_timeout());
+
+  // This should kick off destruction but block until the above task resolves
+  // and releases the TrackedRef.
+  obj.reset();
+  EXPECT_GE(TimeTicks::Now() - begin, TestTimeouts::tiny_timeout());
+}
+
+TEST(TrackedRefTest, ManyThreadsRacing) {
+  constexpr int kNumThreads = 16;
+  std::vector<std::unique_ptr<Thread>> threads;
+  for (int i = 0; i < kNumThreads; ++i) {
+    threads.push_back(std::make_unique<Thread>("TrackedRefTestThread"));
+    threads.back()->StartAndWaitForTesting();
+  }
+
+  std::unique_ptr<ObjectWithTrackedRefs> obj =
+      std::make_unique<ObjectWithTrackedRefs>();
+
+  // Send a TrackedRef to each thread.
+  for (auto& thread : threads) {
+    thread->task_runner()->PostTask(
+        FROM_HERE, BindOnce(
+                       [](TrackedRef<ObjectWithTrackedRefs> obj) {
+                         // Confirm it's still safe to
+                         // dereference |obj| (and, bonus, that
+                         // playing with TrackedRefs some more
+                         // isn't problematic).
+                         EXPECT_TRUE(obj->GetTrackedRef());
+                       },
+                       obj->GetTrackedRef()));
+  }
+
+  // Initiate destruction racily with the above tasks' execution (they will
+  // crash if TrackedRefs aren't WAI).
+  obj.reset();
+}
+
+// Test that instantiating and deleting a TrackedRefFactory without ever taking
+// a TrackedRef on it is fine.
+TEST(TrackedRefTest, NoTrackedRefs) {
+  ObjectWithTrackedRefs obj;
+}
+
+namespace {
+void ConsumesTrackedRef(TrackedRef<ObjectWithTrackedRefs> obj) {}
+}  // namespace
+
+// Test that destroying a TrackedRefFactory which had TrackedRefs in the past
+// that are already gone is WAI.
+TEST(TrackedRefTest, NoPendingTrackedRefs) {
+  ObjectWithTrackedRefs obj;
+  ConsumesTrackedRef(obj.GetTrackedRef());
+}
+
+TEST(TrackedRefTest, CopyAndMoveSemantics) {
+  struct Foo {
+    Foo() : factory(this) {}
+    TrackedRefFactory<Foo> factory;
+  };
+  Foo foo;
+
+  EXPECT_EQ(1, foo.factory.live_tracked_refs_.SubtleRefCountForDebug());
+
+  {
+    TrackedRef<Foo> plain = foo.factory.GetTrackedRef();
+    EXPECT_EQ(2, foo.factory.live_tracked_refs_.SubtleRefCountForDebug());
+
+    TrackedRef<Foo> copy_constructed(plain);
+    EXPECT_EQ(3, foo.factory.live_tracked_refs_.SubtleRefCountForDebug());
+
+    TrackedRef<Foo> moved_constructed(std::move(copy_constructed));
+    EXPECT_EQ(3, foo.factory.live_tracked_refs_.SubtleRefCountForDebug());
+  }
+
+  EXPECT_EQ(1, foo.factory.live_tracked_refs_.SubtleRefCountForDebug());
+}
+
+}  // namespace internal
+}  // namespace base
diff --git a/base/template_util.h b/base/template_util.h
new file mode 100644
index 0000000..8544aa2
--- /dev/null
+++ b/base/template_util.h
@@ -0,0 +1,153 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TEMPLATE_UTIL_H_
+#define BASE_TEMPLATE_UTIL_H_
+
+#include <stddef.h>
+#include <iosfwd>
+#include <iterator>
+#include <type_traits>
+#include <utility>
+#include <vector>
+
+#include "build/build_config.h"
+
+// Some versions of libstdc++ have partial support for type_traits, but misses
+// a smaller subset while removing some of the older non-standard stuff. Assume
+// that all versions below 5.0 fall in this category, along with one 5.0
+// experimental release. Test for this by consulting compiler major version,
+// the only reliable option available, so theoretically this could fail should
+// you attempt to mix an earlier version of libstdc++ with >= GCC5. But
+// that's unlikely to work out, especially as GCC5 changed ABI.
+#define CR_GLIBCXX_5_0_0 20150123
+#if (defined(__GNUC__) && __GNUC__ < 5) || \
+    (defined(__GLIBCXX__) && __GLIBCXX__ == CR_GLIBCXX_5_0_0)
+#define CR_USE_FALLBACKS_FOR_OLD_EXPERIMENTAL_GLIBCXX
+#endif
+
+// This hacks around using gcc with libc++ which has some incompatibilies.
+// - is_trivially_* doesn't work: https://llvm.org/bugs/show_bug.cgi?id=27538
+// TODO(danakj): Remove this when android builders are all using a newer version
+// of gcc, or the android ndk is updated to a newer libc++ that works with older
+// gcc versions.
+#if !defined(__clang__) && defined(_LIBCPP_VERSION)
+#define CR_USE_FALLBACKS_FOR_GCC_WITH_LIBCXX
+#endif
+
+namespace base {
+
+template <class T> struct is_non_const_reference : std::false_type {};
+template <class T> struct is_non_const_reference<T&> : std::true_type {};
+template <class T> struct is_non_const_reference<const T&> : std::false_type {};
+
+namespace internal {
+
+// Implementation detail of base::void_t below.
+template <typename...>
+struct make_void {
+  using type = void;
+};
+
+}  // namespace internal
+
+// base::void_t is an implementation of std::void_t from C++17.
+//
+// We use |base::internal::make_void| as a helper struct to avoid a C++14
+// defect:
+//   http://en.cppreference.com/w/cpp/types/void_t
+//   http://open-std.org/JTC1/SC22/WG21/docs/cwg_defects.html#1558
+template <typename... Ts>
+using void_t = typename ::base::internal::make_void<Ts...>::type;
+
+namespace internal {
+
+// Uses expression SFINAE to detect whether using operator<< would work.
+template <typename T, typename = void>
+struct SupportsOstreamOperator : std::false_type {};
+template <typename T>
+struct SupportsOstreamOperator<T,
+                               decltype(void(std::declval<std::ostream&>()
+                                             << std::declval<T>()))>
+    : std::true_type {};
+
+// Used to detech whether the given type is an iterator.  This is normally used
+// with std::enable_if to provide disambiguation for functions that take
+// templatzed iterators as input.
+template <typename T, typename = void>
+struct is_iterator : std::false_type {};
+
+template <typename T>
+struct is_iterator<T,
+                   void_t<typename std::iterator_traits<T>::iterator_category>>
+    : std::true_type {};
+
+}  // namespace internal
+
+// is_trivially_copyable is especially hard to get right.
+// - Older versions of libstdc++ will fail to have it like they do for other
+//   type traits. This has become a subset of the second point, but used to be
+//   handled independently.
+// - An experimental release of gcc includes most of type_traits but misses
+//   is_trivially_copyable, so we still have to avoid using libstdc++ in this
+//   case, which is covered by CR_USE_FALLBACKS_FOR_OLD_EXPERIMENTAL_GLIBCXX.
+// - When compiling libc++ from before r239653, with a gcc compiler, the
+//   std::is_trivially_copyable can fail. So we need to work around that by not
+//   using the one in libc++ in this case. This is covered by the
+//   CR_USE_FALLBACKS_FOR_GCC_WITH_LIBCXX define, and is discussed in
+//   https://llvm.org/bugs/show_bug.cgi?id=27538#c1 where they point out that
+//   in libc++'s commit r239653 this is fixed by libc++ checking for gcc 5.1.
+// - In both of the above cases we are using the gcc compiler. When defining
+//   this ourselves on compiler intrinsics, the __is_trivially_copyable()
+//   intrinsic is not available on gcc before version 5.1 (see the discussion in
+//   https://llvm.org/bugs/show_bug.cgi?id=27538#c1 again), so we must check for
+//   that version.
+// - When __is_trivially_copyable() is not available because we are on gcc older
+//   than 5.1, we need to fall back to something, so we use __has_trivial_copy()
+//   instead based on what was done one-off in bit_cast() previously.
+
+// TODO(crbug.com/554293): Remove this when all platforms have this in the std
+// namespace and it works with gcc as needed.
+#if defined(CR_USE_FALLBACKS_FOR_OLD_EXPERIMENTAL_GLIBCXX) || \
+    defined(CR_USE_FALLBACKS_FOR_GCC_WITH_LIBCXX)
+template <typename T>
+struct is_trivially_copyable {
+// TODO(danakj): Remove this when android builders are all using a newer version
+// of gcc, or the android ndk is updated to a newer libc++ that does this for
+// us.
+#if _GNUC_VER >= 501
+  static constexpr bool value = __is_trivially_copyable(T);
+#else
+  static constexpr bool value =
+      __has_trivial_copy(T) && __has_trivial_destructor(T);
+#endif
+};
+#else
+template <class T>
+using is_trivially_copyable = std::is_trivially_copyable<T>;
+#endif
+
+#if defined(__GNUC__) && !defined(__clang__) && __GNUC__ <= 7
+// Workaround for g++7 and earlier family.
+// Due to https://gcc.gnu.org/bugzilla/show_bug.cgi?id=80654, without this
+// Optional<std::vector<T>> where T is non-copyable causes a compile error.
+// As we know it is not trivially copy constructible, explicitly declare so.
+template <typename T>
+struct is_trivially_copy_constructible
+    : std::is_trivially_copy_constructible<T> {};
+
+template <typename... T>
+struct is_trivially_copy_constructible<std::vector<T...>> : std::false_type {};
+#else
+// Otherwise use std::is_trivially_copy_constructible as is.
+template <typename T>
+using is_trivially_copy_constructible = std::is_trivially_copy_constructible<T>;
+#endif
+
+}  // namespace base
+
+#undef CR_USE_FALLBACKS_FOR_GCC_WITH_LIBCXX
+#undef CR_USE_FALLBACKS_FOR_OLD_EXPERIMENTAL_GLIBCXX
+
+#endif  // BASE_TEMPLATE_UTIL_H_
diff --git a/base/template_util_unittest.cc b/base/template_util_unittest.cc
new file mode 100644
index 0000000..2c42445
--- /dev/null
+++ b/base/template_util_unittest.cc
@@ -0,0 +1,106 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/template_util.h"
+
+#include <string>
+
+#include "base/containers/flat_tree.h"
+#include "base/test/move_only_int.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+namespace {
+
+enum SimpleEnum { SIMPLE_ENUM };
+enum EnumWithExplicitType : uint64_t { ENUM_WITH_EXPLICIT_TYPE };
+enum class ScopedEnum { SCOPED_ENUM };
+enum class ScopedEnumWithOperator { SCOPED_ENUM_WITH_OPERATOR };
+std::ostream& operator<<(std::ostream& os, ScopedEnumWithOperator v) {
+  return os;
+}
+struct SimpleStruct {};
+struct StructWithOperator {};
+std::ostream& operator<<(std::ostream& os, const StructWithOperator& v) {
+  return os;
+}
+
+// is_non_const_reference<Type>
+static_assert(!is_non_const_reference<int>::value, "IsNonConstReference");
+static_assert(!is_non_const_reference<const int&>::value,
+              "IsNonConstReference");
+static_assert(is_non_const_reference<int&>::value, "IsNonConstReference");
+
+// A few standard types that definitely support printing.
+static_assert(internal::SupportsOstreamOperator<int>::value,
+              "ints should be printable");
+static_assert(internal::SupportsOstreamOperator<const char*>::value,
+              "C strings should be printable");
+static_assert(internal::SupportsOstreamOperator<std::string>::value,
+              "std::string should be printable");
+
+// Various kinds of enums operator<< support.
+static_assert(internal::SupportsOstreamOperator<SimpleEnum>::value,
+              "simple enum should be printable by value");
+static_assert(internal::SupportsOstreamOperator<const SimpleEnum&>::value,
+              "simple enum should be printable by const ref");
+static_assert(internal::SupportsOstreamOperator<EnumWithExplicitType>::value,
+              "enum with explicit type should be printable by value");
+static_assert(
+    internal::SupportsOstreamOperator<const EnumWithExplicitType&>::value,
+    "enum with explicit type should be printable by const ref");
+static_assert(!internal::SupportsOstreamOperator<ScopedEnum>::value,
+              "scoped enum should not be printable by value");
+static_assert(!internal::SupportsOstreamOperator<const ScopedEnum&>::value,
+              "simple enum should not be printable by const ref");
+static_assert(internal::SupportsOstreamOperator<ScopedEnumWithOperator>::value,
+              "scoped enum with operator<< should be printable by value");
+static_assert(
+    internal::SupportsOstreamOperator<const ScopedEnumWithOperator&>::value,
+    "scoped enum with operator<< should be printable by const ref");
+
+// operator<< support on structs.
+static_assert(!internal::SupportsOstreamOperator<SimpleStruct>::value,
+              "simple struct should not be printable by value");
+static_assert(!internal::SupportsOstreamOperator<const SimpleStruct&>::value,
+              "simple struct should not be printable by const ref");
+static_assert(internal::SupportsOstreamOperator<StructWithOperator>::value,
+              "struct with operator<< should be printable by value");
+static_assert(
+    internal::SupportsOstreamOperator<const StructWithOperator&>::value,
+    "struct with operator<< should be printable by const ref");
+
+// base::is_trivially_copyable
+class TrivialCopy {
+ public:
+  TrivialCopy(int d) : data_(d) {}
+
+ protected:
+  int data_;
+};
+
+class TrivialCopyButWithDestructor : public TrivialCopy {
+ public:
+  TrivialCopyButWithDestructor(int d) : TrivialCopy(d) {}
+  ~TrivialCopyButWithDestructor() { data_ = 0; }
+};
+
+static_assert(base::is_trivially_copyable<TrivialCopy>::value,
+              "TrivialCopy should be detected as trivially copyable");
+static_assert(!base::is_trivially_copyable<TrivialCopyButWithDestructor>::value,
+              "TrivialCopyButWithDestructor should not be detected as "
+              "trivially copyable");
+
+class NoCopy {
+ public:
+  NoCopy(const NoCopy&) = delete;
+};
+
+static_assert(
+    !base::is_trivially_copy_constructible<std::vector<NoCopy>>::value,
+    "is_trivially_copy_constructible<std::vector<T>> must be compiled.");
+
+}  // namespace
+
+}  // namespace base
diff --git a/base/test/BUILD.gn b/base/test/BUILD.gn
new file mode 100644
index 0000000..576729c
--- /dev/null
+++ b/base/test/BUILD.gn
@@ -0,0 +1,443 @@
+# Copyright (c) 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import("//build/compiled_action.gni")
+import("//build/config/ui.gni")
+import("//build/config/nacl/config.gni")
+
+if (is_android) {
+  import("//build/config/android/rules.gni")
+}
+
+static_library("test_config") {
+  testonly = true
+  sources = [
+    "test_switches.cc",
+    "test_switches.h",
+    "test_timeouts.cc",
+    "test_timeouts.h",
+  ]
+  deps = [
+    "//base",
+  ]
+}
+
+static_library("test_support") {
+  testonly = true
+  sources = [
+    "../trace_event/trace_config_memory_test_util.h",
+    "android/java_handler_thread_helpers.cc",
+    "android/java_handler_thread_helpers.h",
+    "android/url_utils.cc",
+    "android/url_utils.h",
+    "bind_test_util.h",
+    "copy_only_int.h",
+    "fuzzed_data_provider.cc",
+    "fuzzed_data_provider.h",
+    "gtest_util.cc",
+    "gtest_util.h",
+    "gtest_xml_unittest_result_printer.cc",
+    "gtest_xml_unittest_result_printer.h",
+    "gtest_xml_util.cc",
+    "gtest_xml_util.h",
+    "histogram_tester.cc",
+    "histogram_tester.h",
+    "icu_test_util.cc",
+    "icu_test_util.h",
+    "ios/wait_util.h",
+    "ios/wait_util.mm",
+    "launcher/test_result.cc",
+    "launcher/test_result.h",
+    "launcher/test_results_tracker.h",
+    "launcher/unit_test_launcher.h",
+    "mock_callback.h",
+    "mock_chrome_application_mac.h",
+    "mock_chrome_application_mac.mm",
+    "mock_devices_changed_observer.cc",
+    "mock_devices_changed_observer.h",
+    "mock_entropy_provider.cc",
+    "mock_entropy_provider.h",
+    "mock_log.cc",
+    "mock_log.h",
+    "move_only_int.h",
+    "multiprocess_test.h",
+    "multiprocess_test_android.cc",
+    "null_task_runner.cc",
+    "null_task_runner.h",
+    "perf_log.cc",
+    "perf_log.h",
+    "perf_test_suite.cc",
+    "perf_test_suite.h",
+    "perf_time_logger.cc",
+    "perf_time_logger.h",
+    "power_monitor_test_base.cc",
+    "power_monitor_test_base.h",
+    "scoped_command_line.cc",
+    "scoped_command_line.h",
+    "scoped_environment_variable_override.cc",
+    "scoped_environment_variable_override.h",
+    "scoped_feature_list.cc",
+    "scoped_feature_list.h",
+    "scoped_mock_time_message_loop_task_runner.cc",
+    "scoped_mock_time_message_loop_task_runner.h",
+    "scoped_path_override.cc",
+    "scoped_path_override.h",
+    "scoped_task_environment.cc",
+    "scoped_task_environment.h",
+    "sequenced_task_runner_test_template.cc",
+    "sequenced_task_runner_test_template.h",
+    "simple_test_clock.cc",
+    "simple_test_clock.h",
+    "simple_test_tick_clock.cc",
+    "simple_test_tick_clock.h",
+    "task_runner_test_template.cc",
+    "task_runner_test_template.h",
+    "test_discardable_memory_allocator.cc",
+    "test_discardable_memory_allocator.h",
+    "test_file_util.cc",
+    "test_file_util.h",
+    "test_file_util_android.cc",
+    "test_file_util_linux.cc",
+    "test_file_util_mac.cc",
+    "test_file_util_win.cc",
+    "test_io_thread.cc",
+    "test_io_thread.h",
+    "test_listener_ios.h",
+    "test_listener_ios.mm",
+    "test_message_loop.cc",
+    "test_message_loop.h",
+    "test_mock_time_task_runner.cc",
+    "test_mock_time_task_runner.h",
+    "test_pending_task.cc",
+    "test_pending_task.h",
+    "test_reg_util_win.cc",
+    "test_reg_util_win.h",
+    "test_shared_memory_util.cc",
+    "test_shared_memory_util.h",
+    "test_shortcut_win.cc",
+    "test_shortcut_win.h",
+    "test_simple_task_runner.cc",
+    "test_simple_task_runner.h",
+    "test_suite.cc",
+    "test_suite.h",
+    "test_support_android.cc",
+    "test_support_android.h",
+    "test_support_ios.h",
+    "test_support_ios.mm",
+    "test_ui_thread_android.cc",
+    "test_ui_thread_android.h",
+    "thread_test_helper.cc",
+    "thread_test_helper.h",
+    "trace_event_analyzer.cc",
+    "trace_event_analyzer.h",
+    "trace_to_file.cc",
+    "trace_to_file.h",
+    "user_action_tester.cc",
+    "user_action_tester.h",
+    "values_test_util.cc",
+    "values_test_util.h",
+  ]
+
+  if (is_ios) {
+    sources += [ "launcher/unit_test_launcher_ios.cc" ]
+  } else if (!is_nacl_nonsfi) {
+    sources += [
+      "launcher/test_launcher.cc",
+      "launcher/test_launcher.h",
+      "launcher/test_launcher_tracer.cc",
+      "launcher/test_launcher_tracer.h",
+      "launcher/test_results_tracker.cc",
+      "launcher/unit_test_launcher.cc",
+      "multiprocess_test.cc",
+    ]
+  }
+
+  configs += [ "//build/config:precompiled_headers" ]
+
+  data = [
+    # The isolate needs this script for setting up the test. It's not actually
+    # needed to run this target locally.
+    "//testing/test_env.py",
+  ]
+
+  public_deps = [
+    ":test_config",
+    "//base",
+    "//base:base_static",
+    "//base:i18n",
+  ]
+  deps = [
+    "//base/third_party/dynamic_annotations",
+    "//testing/gmock",
+    "//testing/gtest",
+    "//third_party/icu:icuuc",
+    "//third_party/libxml",
+  ]
+
+  if (is_posix || is_fuchsia) {
+    sources += [
+      "scoped_locale.cc",
+      "scoped_locale.h",
+      "test_file_util_posix.cc",
+    ]
+  }
+
+  if (is_linux) {
+    public_deps += [ ":fontconfig_util_linux" ]
+    data_deps = [
+      "//third_party/test_fonts",
+    ]
+    if (current_toolchain == host_toolchain) {
+      data_deps += [ ":do_generate_fontconfig_caches" ]
+      data += [ "$root_out_dir/fontconfig_caches/" ]
+    }
+  }
+
+  if (is_ios) {
+    set_sources_assignment_filter([])
+    sources += [ "test_file_util_mac.cc" ]
+    set_sources_assignment_filter(sources_assignment_filter)
+  }
+
+  if (is_mac) {
+    libs = [ "AppKit.framework" ]
+  }
+
+  if (is_android) {
+    set_sources_assignment_filter([])
+    sources += [ "test_file_util_linux.cc" ]
+    set_sources_assignment_filter(sources_assignment_filter)
+    deps += [
+      ":base_unittests_jni_headers",
+      ":test_support_jni_headers",
+    ]
+    public_deps += [ ":test_support_java" ]
+  }
+
+  if (is_nacl_nonsfi) {
+    sources += [
+      "launcher/test_launcher.h",
+      "launcher/test_result.h",
+      "launcher/unit_test_launcher.h",
+      "launcher/unit_test_launcher_nacl_nonsfi.cc",
+    ]
+    sources -= [
+      "gtest_xml_util.cc",
+      "gtest_xml_util.h",
+      "icu_test_util.cc",
+      "icu_test_util.h",
+      "perf_test_suite.cc",
+      "perf_test_suite.h",
+      "scoped_path_override.cc",
+      "scoped_path_override.h",
+      "test_discardable_memory_allocator.cc",
+      "test_discardable_memory_allocator.h",
+      "test_file_util.cc",
+      "test_file_util.h",
+      "test_file_util_posix.cc",
+      "test_suite.cc",
+      "test_suite.h",
+      "trace_to_file.cc",
+      "trace_to_file.h",
+    ]
+    public_deps -= [ "//base:i18n" ]
+    deps -= [
+      "//third_party/icu:icuuc",
+      "//third_party/libxml",
+    ]
+  }
+}
+
+config("perf_test_config") {
+  defines = [ "PERF_TEST" ]
+}
+
+# This is a source set instead of a static library because it seems like some
+# linkers get confused when "main" is in a static library, and if you link to
+# this, you always want the object file anyway.
+source_set("test_support_perf") {
+  testonly = true
+  sources = [
+    "run_all_perftests.cc",
+  ]
+  deps = [
+    ":test_support",
+    "//base",
+    "//testing/gtest",
+  ]
+
+  public_configs = [ ":perf_test_config" ]
+}
+
+static_library("test_launcher_nacl_nonsfi") {
+  testonly = true
+  sources = [
+    "launcher/test_launcher_nacl_nonsfi.cc",
+    "launcher/test_launcher_nacl_nonsfi.h",
+  ]
+  deps = [
+    ":test_support",
+  ]
+}
+
+static_library("run_all_unittests") {
+  testonly = true
+  sources = [
+    "run_all_unittests.cc",
+  ]
+  deps = [
+    ":test_support",
+  ]
+}
+
+# These sources are linked into both the base_unittests binary and the test
+# shared library target below.
+source_set("native_library_test_utils") {
+  testonly = true
+  sources = [
+    "native_library_test_utils.cc",
+    "native_library_test_utils.h",
+  ]
+}
+
+# This shared library is dynamically loaded by NativeLibrary unittests.
+shared_library("test_shared_library") {
+  testonly = true
+  sources = [
+    "test_shared_library.cc",
+  ]
+
+  deps = [
+    ":native_library_test_utils",
+  ]
+}
+
+static_library("run_all_base_unittests") {
+  # Only targets in base should depend on this, targets outside base
+  # should depend on run_all_unittests above.
+  visibility = [ "//base/*" ]
+  testonly = true
+  sources = [
+    "run_all_base_unittests.cc",
+  ]
+  deps = [
+    ":test_support",
+  ]
+}
+
+if (is_linux) {
+  source_set("fontconfig_util_linux") {
+    sources = [
+      "fontconfig_util_linux.cc",
+      "fontconfig_util_linux.h",
+    ]
+    deps = [
+      "//base",
+      "//third_party/fontconfig",
+    ]
+  }
+
+  if (current_toolchain == host_toolchain) {
+    executable("generate_fontconfig_caches") {
+      testonly = true
+      sources = [
+        "generate_fontconfig_caches.cc",
+      ]
+      deps = [
+        ":fontconfig_util_linux",
+        "//base",
+        "//build/config:exe_and_shlib_deps",
+      ]
+    }
+
+    compiled_action("do_generate_fontconfig_caches") {
+      testonly = true
+      tool = ":generate_fontconfig_caches"
+      data_deps = [
+        "//third_party/test_fonts",
+      ]
+      args = []
+      outputs = [
+        "$root_out_dir/fontconfig_caches/STAMP",
+      ]
+    }
+  }
+
+  shared_library("malloc_wrapper") {
+    testonly = true
+    sources = [
+      "malloc_wrapper.cc",
+    ]
+    deps = [
+      "//base",
+      "//build/config:exe_and_shlib_deps",
+    ]
+  }
+}
+
+if (is_android) {
+  generate_jni("base_unittests_jni_headers") {
+    sources = [
+      "android/java/src/org/chromium/base/ContentUriTestUtils.java",
+      "android/java/src/org/chromium/base/JavaHandlerThreadHelpers.java",
+      "android/java/src/org/chromium/base/TestUiThread.java",
+    ]
+    jni_package = "base"
+  }
+
+  generate_jni("test_support_jni_headers") {
+    sources = [
+      "android/java/src/org/chromium/base/MainReturnCodeResult.java",
+      "android/java/src/org/chromium/base/MultiprocessTestClientLauncher.java",
+      "android/javatests/src/org/chromium/base/test/util/UrlUtils.java",
+    ]
+    jni_package = "base"
+  }
+
+  android_library("test_support_java") {
+    testonly = true
+    deps = [
+      "//base:base_java",
+      "//testing/android/native_test:native_main_runner_java",
+      "//third_party/android_tools:android_support_annotations_java",
+      "//third_party/jsr-305:jsr_305_javalib",
+    ]
+    srcjar_deps = [ ":test_support_java_aidl" ]
+    java_files = [
+      "android/java/src/org/chromium/base/MainReturnCodeResult.java",
+      "android/java/src/org/chromium/base/MultiprocessTestClientLauncher.java",
+      "android/java/src/org/chromium/base/MultiprocessTestClientService.java",
+      "android/java/src/org/chromium/base/MultiprocessTestClientService0.java",
+      "android/java/src/org/chromium/base/MultiprocessTestClientService1.java",
+      "android/java/src/org/chromium/base/MultiprocessTestClientService2.java",
+      "android/java/src/org/chromium/base/MultiprocessTestClientService3.java",
+      "android/java/src/org/chromium/base/MultiprocessTestClientService4.java",
+      "android/java/src/org/chromium/base/MultiprocessTestClientServiceDelegate.java",
+    ]
+  }
+
+  android_aidl("test_support_java_aidl") {
+    testonly = true
+    import_include = [
+      "android/java/src",
+      "//base/android/java/src",
+    ]
+    sources = [
+      "android/java/src/org/chromium/base/ITestCallback.aidl",
+      "android/java/src/org/chromium/base/ITestController.aidl",
+    ]
+  }
+}
+
+# Trivial executable which outputs space-delimited argv to stdout,
+# used for testing.
+executable("test_child_process") {
+  testonly = true
+  sources = [
+    "test_child_process.cc",
+  ]
+  deps = [
+    "//build/config:exe_and_shlib_deps",
+  ]
+}
diff --git a/base/test/DEPS b/base/test/DEPS
new file mode 100644
index 0000000..5827c26
--- /dev/null
+++ b/base/test/DEPS
@@ -0,0 +1,3 @@
+include_rules = [
+  "+third_party/libxml",
+]
diff --git a/base/test/OWNERS b/base/test/OWNERS
new file mode 100644
index 0000000..6807748
--- /dev/null
+++ b/base/test/OWNERS
@@ -0,0 +1,16 @@
+per-file *task_scheduler*=file://base/task_scheduler/OWNERS
+
+# Metrics-related test utilites:
+per-file *histogram_tester*=file://base/metrics/OWNERS
+per-file *scoped_feature_list*=file://base/metrics/OWNERS
+per-file *user_action_tester*=file://base/metrics/OWNERS
+
+# Tracing test utilities:
+per-file trace_*=file://base/trace_event/OWNERS
+
+# For Android-specific changes:
+per-file *android*=file://base/test/android/OWNERS
+per-file BUILD.gn=file://base/test/android/OWNERS
+
+# Linux fontconfig changes
+per-file *fontconfig*=file://base/nix/OWNERS
diff --git a/base/test/android/OWNERS b/base/test/android/OWNERS
new file mode 100644
index 0000000..2b0078b
--- /dev/null
+++ b/base/test/android/OWNERS
@@ -0,0 +1,4 @@
+jbudorick@chromium.org
+file://base/android/OWNERS
+
+# COMPONENT: Test>Android
diff --git a/base/test/android/java/src/org/chromium/base/ContentUriTestUtils.java b/base/test/android/java/src/org/chromium/base/ContentUriTestUtils.java
new file mode 100644
index 0000000..fe9d540
--- /dev/null
+++ b/base/test/android/java/src/org/chromium/base/ContentUriTestUtils.java
@@ -0,0 +1,46 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package org.chromium.base;
+
+import android.content.ContentValues;
+import android.database.Cursor;
+import android.net.Uri;
+import android.provider.MediaStore;
+
+import org.chromium.base.annotations.CalledByNative;
+
+/**
+ * Utilities for testing operations on content URI.
+ */
+public class ContentUriTestUtils {
+    /**
+     * Insert an image into the MediaStore, and return the content URI. If the
+     * image already exists in the MediaStore, just retrieve the URI.
+     *
+     * @param path Path to the image file.
+     * @return Content URI of the image.
+     */
+    @CalledByNative
+    private static String insertImageIntoMediaStore(String path) {
+        // Check whether the content URI exists.
+        Cursor c = ContextUtils.getApplicationContext().getContentResolver().query(
+                MediaStore.Images.Media.EXTERNAL_CONTENT_URI,
+                new String[] {MediaStore.Video.VideoColumns._ID},
+                MediaStore.Images.Media.DATA + " LIKE ?", new String[] {path}, null);
+        if (c != null && c.getCount() > 0) {
+            c.moveToFirst();
+            int id = c.getInt(0);
+            return Uri.withAppendedPath(
+                    MediaStore.Images.Media.EXTERNAL_CONTENT_URI, "" + id).toString();
+        }
+
+        // Insert the content URI into MediaStore.
+        ContentValues values = new ContentValues();
+        values.put(MediaStore.MediaColumns.DATA, path);
+        Uri uri = ContextUtils.getApplicationContext().getContentResolver().insert(
+                MediaStore.Images.Media.EXTERNAL_CONTENT_URI, values);
+        return uri.toString();
+    }
+}
diff --git a/base/test/android/java/src/org/chromium/base/ITestCallback.aidl b/base/test/android/java/src/org/chromium/base/ITestCallback.aidl
new file mode 100644
index 0000000..dd208d5
--- /dev/null
+++ b/base/test/android/java/src/org/chromium/base/ITestCallback.aidl
@@ -0,0 +1,23 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package org.chromium.base;
+
+import org.chromium.base.ITestController;
+import org.chromium.base.process_launcher.FileDescriptorInfo;
+
+/**
+ * This interface is called by the child process to pass its controller to its parent.
+ */
+interface ITestCallback {
+  oneway void childConnected(ITestController controller);
+
+  /**
+    * Invoked by the service to notify that the main method returned.
+    * IMPORTANT! Should not be marked oneway as the caller will terminate the running process after
+    * this call. Marking it oneway would make the call asynchronous and the process could terminate
+    * before the call was actually sent.
+    */
+  void mainReturned(int returnCode);
+}
diff --git a/base/test/android/java/src/org/chromium/base/ITestController.aidl b/base/test/android/java/src/org/chromium/base/ITestController.aidl
new file mode 100644
index 0000000..d927ee5
--- /dev/null
+++ b/base/test/android/java/src/org/chromium/base/ITestController.aidl
@@ -0,0 +1,25 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package org.chromium.base;
+
+import org.chromium.base.process_launcher.FileDescriptorInfo;
+
+/**
+ * This interface is used to control child processes.
+ */
+interface ITestController {
+  /**
+   * Forces the service process to terminate and block until the process stops.
+   * @param exitCode the exit code the process should terminate with.
+   * @return always true, a return value is only returned to force the call to be synchronous.
+   */
+  boolean forceStopSynchronous(int exitCode);
+
+  /**
+   * Forces the service process to terminate.
+   * @param exitCode the exit code the process should terminate with.
+   */
+  oneway void forceStop(int exitCode);
+}
diff --git a/base/test/android/java/src/org/chromium/base/JavaHandlerThreadHelpers.java b/base/test/android/java/src/org/chromium/base/JavaHandlerThreadHelpers.java
new file mode 100644
index 0000000..3da7ba8
--- /dev/null
+++ b/base/test/android/java/src/org/chromium/base/JavaHandlerThreadHelpers.java
@@ -0,0 +1,63 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package org.chromium.base;
+
+import android.os.Handler;
+
+import org.chromium.base.annotations.CalledByNative;
+import org.chromium.base.annotations.CalledByNativeUnchecked;
+import org.chromium.base.annotations.JNINamespace;
+
+import java.util.concurrent.atomic.AtomicBoolean;
+
+@JNINamespace("base::android")
+class JavaHandlerThreadHelpers {
+    private static class TestException extends Exception {}
+
+    // This is executed as part of base_unittests. This tests that JavaHandlerThread can be used
+    // by itself without attaching to its native peer.
+    @CalledByNative
+    private static JavaHandlerThread testAndGetJavaHandlerThread() {
+        final AtomicBoolean taskExecuted = new AtomicBoolean();
+        final Object lock = new Object();
+        Runnable runnable = new Runnable() {
+            @Override
+            public void run() {
+                synchronized (lock) {
+                    taskExecuted.set(true);
+                    lock.notifyAll();
+                }
+            }
+        };
+
+        JavaHandlerThread thread = new JavaHandlerThread("base_unittests_java");
+        thread.maybeStart();
+
+        Handler handler = new Handler(thread.getLooper());
+        handler.post(runnable);
+        synchronized (lock) {
+            while (!taskExecuted.get()) {
+                try {
+                    lock.wait();
+                } catch (InterruptedException e) {
+                    // ignore interrupts
+                }
+            }
+        }
+
+        return thread;
+    }
+
+    @CalledByNativeUnchecked
+    private static void throwException() throws TestException {
+        throw new TestException();
+    }
+
+    @CalledByNative
+    private static boolean isExceptionTestException(Throwable exception) {
+        if (exception == null) return false;
+        return exception instanceof TestException;
+    }
+}
diff --git a/base/test/android/java/src/org/chromium/base/MainReturnCodeResult.java b/base/test/android/java/src/org/chromium/base/MainReturnCodeResult.java
new file mode 100644
index 0000000..9756c97
--- /dev/null
+++ b/base/test/android/java/src/org/chromium/base/MainReturnCodeResult.java
@@ -0,0 +1,40 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package org.chromium.base;
+
+import org.chromium.base.annotations.CalledByNative;
+import org.chromium.base.annotations.JNINamespace;
+
+/**
+ * Contains the result of a native main method that ran in a child process.
+ */
+@JNINamespace("base::android")
+public final class MainReturnCodeResult {
+    private final int mMainReturnCode;
+    private final boolean mTimedOut;
+
+    public static MainReturnCodeResult createMainResult(int returnCode) {
+        return new MainReturnCodeResult(returnCode, false /* timedOut */);
+    }
+
+    public static MainReturnCodeResult createTimeoutMainResult() {
+        return new MainReturnCodeResult(0, true /* timedOut */);
+    }
+
+    private MainReturnCodeResult(int mainReturnCode, boolean timedOut) {
+        mMainReturnCode = mainReturnCode;
+        mTimedOut = timedOut;
+    }
+
+    @CalledByNative
+    public int getReturnCode() {
+        return mMainReturnCode;
+    }
+
+    @CalledByNative
+    public boolean hasTimedOut() {
+        return mTimedOut;
+    }
+}
diff --git a/base/test/android/java/src/org/chromium/base/MultiprocessTestClientLauncher.java b/base/test/android/java/src/org/chromium/base/MultiprocessTestClientLauncher.java
new file mode 100644
index 0000000..d0b1850
--- /dev/null
+++ b/base/test/android/java/src/org/chromium/base/MultiprocessTestClientLauncher.java
@@ -0,0 +1,383 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package org.chromium.base;
+
+import android.os.Handler;
+import android.os.HandlerThread;
+import android.os.Looper;
+import android.os.ParcelFileDescriptor;
+import android.os.RemoteException;
+import android.util.SparseArray;
+
+import org.chromium.base.annotations.CalledByNative;
+import org.chromium.base.annotations.JNINamespace;
+import org.chromium.base.process_launcher.ChildConnectionAllocator;
+import org.chromium.base.process_launcher.ChildProcessConnection;
+import org.chromium.base.process_launcher.ChildProcessLauncher;
+import org.chromium.base.process_launcher.FileDescriptorInfo;
+import org.chromium.base.process_launcher.IChildProcessService;
+
+import java.io.IOException;
+import java.util.Arrays;
+import java.util.concurrent.Callable;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.FutureTask;
+import java.util.concurrent.Semaphore;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.locks.Condition;
+import java.util.concurrent.locks.ReentrantLock;
+
+import javax.annotation.concurrent.GuardedBy;
+
+/**
+ * Helper class for launching test client processes for multiprocess unit tests.
+ */
+@JNINamespace("base::android")
+public final class MultiprocessTestClientLauncher {
+    private static final String TAG = "cr_MProcTCLauncher";
+
+    private static final int CONNECTION_TIMEOUT_MS = 10 * 1000;
+
+    private static final SparseArray<MultiprocessTestClientLauncher> sPidToLauncher =
+            new SparseArray<>();
+
+    private static final SparseArray<Integer> sPidToMainResult = new SparseArray<>();
+
+    private static final Object sLauncherHandlerInitLock = new Object();
+    private static Handler sLauncherHandler;
+
+    private static ChildConnectionAllocator sConnectionAllocator;
+
+    private final ITestCallback.Stub mCallback = new ITestCallback.Stub() {
+        @Override
+        public void childConnected(ITestController controller) {
+            mTestController = controller;
+            // This method can be called before onServiceConnected below has set the PID.
+            // Wait for mPid to be set before notifying.
+            try {
+                mPidReceived.await();
+            } catch (InterruptedException ie) {
+                Log.e(TAG, "Interrupted while waiting for connection PID.");
+                return;
+            }
+            // Now we are fully initialized, notify clients.
+            mConnectedLock.lock();
+            try {
+                mConnected = true;
+                mConnectedCondition.signal();
+            } finally {
+                mConnectedLock.unlock();
+            }
+        }
+
+        @Override
+        public void mainReturned(int returnCode) {
+            mMainReturnCodeLock.lock();
+            try {
+                mMainReturnCode = returnCode;
+                mMainReturnCodeCondition.signal();
+            } finally {
+                mMainReturnCodeLock.unlock();
+            }
+
+            // Also store the return code in a map as the connection might get disconnected
+            // before waitForMainToReturn is called and then we would not have a way to retrieve
+            // the connection.
+            sPidToMainResult.put(mPid, returnCode);
+        }
+    };
+
+    private final ChildProcessLauncher.Delegate mLauncherDelegate =
+            new ChildProcessLauncher.Delegate() {
+                @Override
+                public void onConnectionEstablished(ChildProcessConnection connection) {
+                    assert isRunningOnLauncherThread();
+                    int pid = connection.getPid();
+                    sPidToLauncher.put(pid, MultiprocessTestClientLauncher.this);
+                    mPid = pid;
+                    mPidReceived.countDown();
+                }
+
+                @Override
+                public void onConnectionLost(ChildProcessConnection connection) {
+                    assert isRunningOnLauncherThread();
+                    assert sPidToLauncher.get(connection.getPid())
+                            == MultiprocessTestClientLauncher.this;
+                    sPidToLauncher.remove(connection.getPid());
+                }
+            };
+
+    private final CountDownLatch mPidReceived = new CountDownLatch(1);
+
+    private final ChildProcessLauncher mLauncher;
+
+    private final ReentrantLock mConnectedLock = new ReentrantLock();
+    private final Condition mConnectedCondition = mConnectedLock.newCondition();
+    @GuardedBy("mConnectedLock")
+    private boolean mConnected;
+
+    private IChildProcessService mService = null;
+    private int mPid;
+    private ITestController mTestController;
+
+    private final ReentrantLock mMainReturnCodeLock = new ReentrantLock();
+    private final Condition mMainReturnCodeCondition = mMainReturnCodeLock.newCondition();
+    // The return code returned by the service's main method.
+    // null if the service has not sent it yet.
+    @GuardedBy("mMainReturnCodeLock")
+    private Integer mMainReturnCode;
+
+    private MultiprocessTestClientLauncher(String[] commandLine, FileDescriptorInfo[] filesToMap) {
+        assert isRunningOnLauncherThread();
+
+        if (sConnectionAllocator == null) {
+            sConnectionAllocator = ChildConnectionAllocator.create(
+                    ContextUtils.getApplicationContext(), sLauncherHandler, null,
+                    "org.chromium.native_test", "org.chromium.base.MultiprocessTestClientService",
+                    "org.chromium.native_test.NUM_TEST_CLIENT_SERVICES", false /* bindToCaller */,
+                    false /* bindAsExternalService */, false /* useStrongBinding */);
+        }
+        mLauncher = new ChildProcessLauncher(sLauncherHandler, mLauncherDelegate, commandLine,
+                filesToMap, sConnectionAllocator, Arrays.asList(mCallback));
+    }
+
+    private boolean waitForConnection(long timeoutMs) {
+        assert !isRunningOnLauncherThread();
+
+        long timeoutNs = TimeUnit.MILLISECONDS.toNanos(timeoutMs);
+        mConnectedLock.lock();
+        try {
+            while (!mConnected) {
+                if (timeoutNs <= 0L) {
+                    return false;
+                }
+                try {
+                    mConnectedCondition.awaitNanos(timeoutNs);
+                } catch (InterruptedException ie) {
+                    Log.e(TAG, "Interrupted while waiting for connection.");
+                }
+            }
+        } finally {
+            mConnectedLock.unlock();
+        }
+        return true;
+    }
+
+    private Integer getMainReturnCode(long timeoutMs) {
+        assert isRunningOnLauncherThread();
+
+        long timeoutNs = TimeUnit.MILLISECONDS.toNanos(timeoutMs);
+        mMainReturnCodeLock.lock();
+        try {
+            while (mMainReturnCode == null) {
+                if (timeoutNs <= 0L) {
+                    return null;
+                }
+                try {
+                    timeoutNs = mMainReturnCodeCondition.awaitNanos(timeoutNs);
+                } catch (InterruptedException ie) {
+                    Log.e(TAG, "Interrupted while waiting for main return code.");
+                }
+            }
+            return mMainReturnCode;
+        } finally {
+            mMainReturnCodeLock.unlock();
+        }
+    }
+
+    /**
+     * Spawns and connects to a child process.
+     * May not be called from the main thread.
+     *
+     * @param commandLine the child process command line argv.
+     * @return the PID of the started process or 0 if the process could not be started.
+     */
+    @CalledByNative
+    private static int launchClient(
+            final String[] commandLine, final FileDescriptorInfo[] filesToMap) {
+        initLauncherThread();
+
+        final MultiprocessTestClientLauncher launcher =
+                runOnLauncherAndGetResult(new Callable<MultiprocessTestClientLauncher>() {
+                    @Override
+                    public MultiprocessTestClientLauncher call() {
+                        return createAndStartLauncherOnLauncherThread(commandLine, filesToMap);
+                    }
+                });
+        if (launcher == null) {
+            return 0;
+        }
+
+        if (!launcher.waitForConnection(CONNECTION_TIMEOUT_MS)) {
+            return 0; // Timed-out.
+        }
+
+        return runOnLauncherAndGetResult(new Callable<Integer>() {
+            @Override
+            public Integer call() {
+                int pid = launcher.mLauncher.getPid();
+                assert pid > 0;
+                sPidToLauncher.put(pid, launcher);
+                return pid;
+            }
+        });
+    }
+
+    private static MultiprocessTestClientLauncher createAndStartLauncherOnLauncherThread(
+            String[] commandLine, FileDescriptorInfo[] filesToMap) {
+        assert isRunningOnLauncherThread();
+
+        MultiprocessTestClientLauncher launcher =
+                new MultiprocessTestClientLauncher(commandLine, filesToMap);
+        if (!launcher.mLauncher.start(
+                    true /* setupConnection */, true /* queueIfNoFreeConnection */)) {
+            return null;
+        }
+
+        return launcher;
+    }
+
+    /**
+     * Blocks until the main method invoked by a previous call to launchClient terminates or until
+     * the specified time-out expires.
+     * Returns immediately if main has already returned.
+     * @param pid the process ID that was returned by the call to launchClient
+     * @param timeoutMs the timeout in milliseconds after which the method returns even if main has
+     *        not returned.
+     * @return the return code returned by the main method or whether it timed-out.
+     */
+    @CalledByNative
+    private static MainReturnCodeResult waitForMainToReturn(final int pid, final int timeoutMs) {
+        return runOnLauncherAndGetResult(new Callable<MainReturnCodeResult>() {
+            @Override
+            public MainReturnCodeResult call() {
+                return waitForMainToReturnOnLauncherThread(pid, timeoutMs);
+            }
+        });
+    }
+
+    private static MainReturnCodeResult waitForMainToReturnOnLauncherThread(
+            int pid, int timeoutMs) {
+        assert isRunningOnLauncherThread();
+
+        MultiprocessTestClientLauncher launcher = sPidToLauncher.get(pid);
+        // The launcher can be null if it got cleaned-up (because the connection was lost) before
+        // this gets called.
+        if (launcher != null) {
+            Integer mainResult = launcher.getMainReturnCode(timeoutMs);
+            return mainResult == null ? MainReturnCodeResult.createTimeoutMainResult()
+                                      : MainReturnCodeResult.createMainResult(mainResult);
+        }
+
+        Integer mainResult = sPidToMainResult.get(pid);
+        if (mainResult == null) {
+            Log.e(TAG, "waitForMainToReturn called on unknown connection for pid " + pid);
+            return null;
+        }
+        sPidToMainResult.remove(pid);
+        return MainReturnCodeResult.createMainResult(mainResult);
+    }
+
+    @CalledByNative
+    private static boolean terminate(final int pid, final int exitCode, final boolean wait) {
+        return runOnLauncherAndGetResult(new Callable<Boolean>() {
+            @Override
+            public Boolean call() {
+                return terminateOnLauncherThread(pid, exitCode, wait);
+            }
+        });
+    }
+
+    private static boolean terminateOnLauncherThread(int pid, int exitCode, boolean wait) {
+        assert isRunningOnLauncherThread();
+
+        MultiprocessTestClientLauncher launcher = sPidToLauncher.get(pid);
+        if (launcher == null) {
+            Log.e(TAG, "terminate called on unknown launcher for pid " + pid);
+            return false;
+        }
+        try {
+            if (wait) {
+                launcher.mTestController.forceStopSynchronous(exitCode);
+            } else {
+                launcher.mTestController.forceStop(exitCode);
+            }
+        } catch (RemoteException e) {
+            // We expect this failure, since the forceStop's service implementation calls
+            // System.exit().
+        }
+        return true;
+    }
+
+    private static void initLauncherThread() {
+        synchronized (sLauncherHandlerInitLock) {
+            if (sLauncherHandler != null) return;
+
+            HandlerThread launcherThread = new HandlerThread("LauncherThread");
+            launcherThread.start();
+            sLauncherHandler = new Handler(launcherThread.getLooper());
+        }
+    }
+
+    /** Does not take ownership of of fds. */
+    @CalledByNative
+    private static FileDescriptorInfo[] makeFdInfoArray(int[] keys, int[] fds) {
+        FileDescriptorInfo[] fdInfos = new FileDescriptorInfo[keys.length];
+        for (int i = 0; i < keys.length; i++) {
+            FileDescriptorInfo fdInfo = makeFdInfo(keys[i], fds[i]);
+            if (fdInfo == null) {
+                Log.e(TAG, "Failed to make file descriptor (" + keys[i] + ", " + fds[i] + ").");
+                return null;
+            }
+            fdInfos[i] = fdInfo;
+        }
+        return fdInfos;
+    }
+
+    private static FileDescriptorInfo makeFdInfo(int id, int fd) {
+        ParcelFileDescriptor parcelableFd = null;
+        try {
+            parcelableFd = ParcelFileDescriptor.fromFd(fd);
+        } catch (IOException e) {
+            Log.e(TAG, "Invalid FD provided for process connection, aborting connection.", e);
+            return null;
+        }
+        return new FileDescriptorInfo(id, parcelableFd, 0 /* offset */, 0 /* size */);
+    }
+
+    private static boolean isRunningOnLauncherThread() {
+        return sLauncherHandler.getLooper() == Looper.myLooper();
+    }
+
+    private static void runOnLauncherThreadBlocking(final Runnable runnable) {
+        assert !isRunningOnLauncherThread();
+        final Semaphore done = new Semaphore(0);
+        sLauncherHandler.post(new Runnable() {
+            @Override
+            public void run() {
+                runnable.run();
+                done.release();
+            }
+        });
+        done.acquireUninterruptibly();
+    }
+
+    private static <R> R runOnLauncherAndGetResult(Callable<R> callable) {
+        if (isRunningOnLauncherThread()) {
+            try {
+                return callable.call();
+            } catch (Exception e) {
+                throw new RuntimeException(e);
+            }
+        }
+        try {
+            FutureTask<R> task = new FutureTask<R>(callable);
+            sLauncherHandler.post(task);
+            return task.get();
+        } catch (Exception e) {
+            throw new RuntimeException(e);
+        }
+    }
+}
diff --git a/base/test/android/java/src/org/chromium/base/MultiprocessTestClientService.java b/base/test/android/java/src/org/chromium/base/MultiprocessTestClientService.java
new file mode 100644
index 0000000..9b50001
--- /dev/null
+++ b/base/test/android/java/src/org/chromium/base/MultiprocessTestClientService.java
@@ -0,0 +1,14 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package org.chromium.base;
+
+import org.chromium.base.process_launcher.ChildProcessService;
+
+/** The service implementation used to host all multiprocess test client code. */
+public class MultiprocessTestClientService extends ChildProcessService {
+    public MultiprocessTestClientService() {
+        super(new MultiprocessTestClientServiceDelegate());
+    }
+}
diff --git a/base/test/android/java/src/org/chromium/base/MultiprocessTestClientService0.java b/base/test/android/java/src/org/chromium/base/MultiprocessTestClientService0.java
new file mode 100644
index 0000000..6bdd867
--- /dev/null
+++ b/base/test/android/java/src/org/chromium/base/MultiprocessTestClientService0.java
@@ -0,0 +1,10 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package org.chromium.base;
+
+/**
+ * A subclass used only to differentiate different test client service process instances.
+ */
+public class MultiprocessTestClientService0 extends MultiprocessTestClientService {}
diff --git a/base/test/android/java/src/org/chromium/base/MultiprocessTestClientService1.java b/base/test/android/java/src/org/chromium/base/MultiprocessTestClientService1.java
new file mode 100644
index 0000000..69827f0
--- /dev/null
+++ b/base/test/android/java/src/org/chromium/base/MultiprocessTestClientService1.java
@@ -0,0 +1,10 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package org.chromium.base;
+
+/**
+ * A subclass used only to differentiate different test client service process instances.
+ */
+public class MultiprocessTestClientService1 extends MultiprocessTestClientService {}
diff --git a/base/test/android/java/src/org/chromium/base/MultiprocessTestClientService2.java b/base/test/android/java/src/org/chromium/base/MultiprocessTestClientService2.java
new file mode 100644
index 0000000..aad11f1
--- /dev/null
+++ b/base/test/android/java/src/org/chromium/base/MultiprocessTestClientService2.java
@@ -0,0 +1,10 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package org.chromium.base;
+
+/**
+ * A subclass used only to differentiate different test client service process instances.
+ */
+public class MultiprocessTestClientService2 extends MultiprocessTestClientService {}
diff --git a/base/test/android/java/src/org/chromium/base/MultiprocessTestClientService3.java b/base/test/android/java/src/org/chromium/base/MultiprocessTestClientService3.java
new file mode 100644
index 0000000..20d2561
--- /dev/null
+++ b/base/test/android/java/src/org/chromium/base/MultiprocessTestClientService3.java
@@ -0,0 +1,10 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package org.chromium.base;
+
+/**
+ * A subclass used only to differentiate different test client service process instances.
+ */
+public class MultiprocessTestClientService3 extends MultiprocessTestClientService {}
diff --git a/base/test/android/java/src/org/chromium/base/MultiprocessTestClientService4.java b/base/test/android/java/src/org/chromium/base/MultiprocessTestClientService4.java
new file mode 100644
index 0000000..4b14551
--- /dev/null
+++ b/base/test/android/java/src/org/chromium/base/MultiprocessTestClientService4.java
@@ -0,0 +1,10 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package org.chromium.base;
+
+/**
+ * A subclass used only to differentiate different test client service process instances.
+ */
+public class MultiprocessTestClientService4 extends MultiprocessTestClientService {}
diff --git a/base/test/android/java/src/org/chromium/base/MultiprocessTestClientServiceDelegate.java b/base/test/android/java/src/org/chromium/base/MultiprocessTestClientServiceDelegate.java
new file mode 100644
index 0000000..83ccaca
--- /dev/null
+++ b/base/test/android/java/src/org/chromium/base/MultiprocessTestClientServiceDelegate.java
@@ -0,0 +1,99 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+package org.chromium.base;
+
+import android.content.Context;
+import android.content.Intent;
+import android.os.Bundle;
+import android.os.IBinder;
+import android.os.RemoteException;
+import android.util.SparseArray;
+
+import org.chromium.base.library_loader.LibraryLoader;
+import org.chromium.base.library_loader.LibraryProcessType;
+import org.chromium.base.library_loader.ProcessInitException;
+import org.chromium.base.process_launcher.ChildProcessServiceDelegate;
+import org.chromium.native_test.MainRunner;
+
+import java.util.List;
+
+/** Implementation of the ChildProcessServiceDelegate used for the Multiprocess tests. */
+public class MultiprocessTestClientServiceDelegate implements ChildProcessServiceDelegate {
+    private static final String TAG = "MPTestCSDelegate";
+
+    private ITestCallback mTestCallback;
+
+    private final ITestController.Stub mTestController = new ITestController.Stub() {
+        @Override
+        public boolean forceStopSynchronous(int exitCode) {
+            System.exit(exitCode);
+            return true;
+        }
+
+        @Override
+        public void forceStop(int exitCode) {
+            System.exit(exitCode);
+        }
+    };
+
+    @Override
+    public void onServiceCreated() {
+        PathUtils.setPrivateDataDirectorySuffix("chrome_multiprocess_test_client_service");
+    }
+
+    @Override
+    public void onServiceBound(Intent intent) {}
+
+    @Override
+    public void onConnectionSetup(Bundle connectionBundle, List<IBinder> callbacks) {
+        mTestCallback = ITestCallback.Stub.asInterface(callbacks.get(0));
+    }
+
+    @Override
+    public void onDestroy() {}
+
+    @Override
+    public void preloadNativeLibrary(Context hostContext) {
+        try {
+            LibraryLoader.get(LibraryProcessType.PROCESS_CHILD).preloadNow();
+        } catch (ProcessInitException pie) {
+            Log.w(TAG, "Unable to preload native libraries.", pie);
+        }
+    }
+
+    @Override
+    public boolean loadNativeLibrary(Context hostContext) {
+        try {
+            LibraryLoader.get(LibraryProcessType.PROCESS_CHILD).loadNow();
+            return true;
+        } catch (ProcessInitException pie) {
+            Log.e(TAG, "Unable to load native libraries.", pie);
+            return false;
+        }
+    }
+
+    @Override
+    public SparseArray<String> getFileDescriptorsIdsToKeys() {
+        return null;
+    }
+
+    @Override
+    public void onBeforeMain() {
+        try {
+            mTestCallback.childConnected(mTestController);
+        } catch (RemoteException re) {
+            Log.e(TAG, "Failed to notify parent process of connection.");
+        }
+    }
+
+    @Override
+    public void runMain() {
+        int result = MainRunner.runMain(CommandLine.getJavaSwitchesOrNull());
+        try {
+            mTestCallback.mainReturned(result);
+        } catch (RemoteException re) {
+            Log.e(TAG, "Failed to notify parent process of main returning.");
+        }
+    }
+}
diff --git a/base/test/android/java/src/org/chromium/base/OWNERS b/base/test/android/java/src/org/chromium/base/OWNERS
new file mode 100644
index 0000000..89442ab
--- /dev/null
+++ b/base/test/android/java/src/org/chromium/base/OWNERS
@@ -0,0 +1,2 @@
+per-file *.aidl=set noparent
+per-file *.aidl=file://ipc/SECURITY_OWNERS
\ No newline at end of file
diff --git a/base/test/android/java/src/org/chromium/base/TestUiThread.java b/base/test/android/java/src/org/chromium/base/TestUiThread.java
new file mode 100644
index 0000000..237c0ec
--- /dev/null
+++ b/base/test/android/java/src/org/chromium/base/TestUiThread.java
@@ -0,0 +1,51 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package org.chromium.base;
+
+import android.os.Looper;
+
+import org.chromium.base.annotations.CalledByNative;
+
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.atomic.AtomicBoolean;
+
+import javax.annotation.concurrent.ThreadSafe;
+
+/**
+ * Set up a thread as the Chromium UI Thread, and run its looper. This is is intended for C++ unit
+ * tests (e.g. the net unit tests) that don't run with the UI thread as their main looper, but test
+ * code that, on Android, uses UI thread events, so need a running UI thread.
+ */
+@ThreadSafe
+public class TestUiThread {
+    private static final AtomicBoolean sStarted = new AtomicBoolean(false);
+    private static final String TAG = "cr.TestUiThread";
+
+    @CalledByNative
+    private static void loop() {
+        // @{link ThreadUtils#setUiThread(Looper)} can only be called once in a test run, so do this
+        // once, and leave it running.
+        if (sStarted.getAndSet(true)) return;
+
+        final CountDownLatch startLatch = new CountDownLatch(1);
+        new Thread(new Runnable() {
+
+            @Override
+            public void run() {
+                Looper.prepare();
+                ThreadUtils.setUiThread(Looper.myLooper());
+                startLatch.countDown();
+                Looper.loop();
+            }
+
+        }).start();
+
+        try {
+            startLatch.await();
+        } catch (InterruptedException e) {
+            Log.e(TAG, "Failed to set UI Thread");
+        }
+    }
+}
diff --git a/base/test/android/java_handler_thread_helpers.cc b/base/test/android/java_handler_thread_helpers.cc
new file mode 100644
index 0000000..925dc9d
--- /dev/null
+++ b/base/test/android/java_handler_thread_helpers.cc
@@ -0,0 +1,39 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/test/android/java_handler_thread_helpers.h"
+
+#include "base/android/java_handler_thread.h"
+#include "base/message_loop/message_loop_current.h"
+#include "base/synchronization/waitable_event.h"
+#include "jni/JavaHandlerThreadHelpers_jni.h"
+
+namespace base {
+namespace android {
+
+// static
+std::unique_ptr<JavaHandlerThread> JavaHandlerThreadHelpers::CreateJavaFirst() {
+  return std::make_unique<JavaHandlerThread>(
+      Java_JavaHandlerThreadHelpers_testAndGetJavaHandlerThread(
+          base::android::AttachCurrentThread()));
+}
+
+// static
+void JavaHandlerThreadHelpers::ThrowExceptionAndAbort(WaitableEvent* event) {
+  JNIEnv* env = AttachCurrentThread();
+  Java_JavaHandlerThreadHelpers_throwException(env);
+  DCHECK(HasException(env));
+  base::MessageLoopCurrentForUI::Get()->Abort();
+  event->Signal();
+}
+
+// static
+bool JavaHandlerThreadHelpers::IsExceptionTestException(
+    ScopedJavaLocalRef<jthrowable> exception) {
+  JNIEnv* env = AttachCurrentThread();
+  return Java_JavaHandlerThreadHelpers_isExceptionTestException(env, exception);
+}
+
+}  // namespace android
+}  // namespace base
diff --git a/base/test/android/java_handler_thread_helpers.h b/base/test/android/java_handler_thread_helpers.h
new file mode 100644
index 0000000..5f05cbc
--- /dev/null
+++ b/base/test/android/java_handler_thread_helpers.h
@@ -0,0 +1,42 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_ANDROID_JAVA_HANDLER_THREAD_FOR_TESTING_H_
+#define BASE_ANDROID_JAVA_HANDLER_THREAD_FOR_TESTING_H_
+
+#include <jni.h>
+
+#include <memory>
+
+#include "base/android/scoped_java_ref.h"
+
+namespace base {
+
+class WaitableEvent;
+
+namespace android {
+
+class JavaHandlerThread;
+
+// Test-only helpers for working with JavaHandlerThread.
+class JavaHandlerThreadHelpers {
+ public:
+  // Create the Java peer first and test that it works before connecting to the
+  // native object.
+  static std::unique_ptr<JavaHandlerThread> CreateJavaFirst();
+
+  static void ThrowExceptionAndAbort(WaitableEvent* event);
+
+  static bool IsExceptionTestException(
+      ScopedJavaLocalRef<jthrowable> exception);
+
+ private:
+  JavaHandlerThreadHelpers() = default;
+  ~JavaHandlerThreadHelpers() = default;
+};
+
+}  // namespace android
+}  // namespace base
+
+#endif  // BASE_ANDROID_JAVA_HANDLER_THREAD_FOR_TESTING_H_
diff --git a/base/test/android/javatests/src/org/chromium/base/test/BaseChromiumAndroidJUnitRunner.java b/base/test/android/javatests/src/org/chromium/base/test/BaseChromiumAndroidJUnitRunner.java
new file mode 100644
index 0000000..59ab519
--- /dev/null
+++ b/base/test/android/javatests/src/org/chromium/base/test/BaseChromiumAndroidJUnitRunner.java
@@ -0,0 +1,282 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package org.chromium.base.test;
+
+import android.app.Activity;
+import android.app.Application;
+import android.app.Instrumentation;
+import android.content.Context;
+import android.content.pm.InstrumentationInfo;
+import android.content.pm.PackageManager;
+import android.content.pm.PackageManager.NameNotFoundException;
+import android.os.Bundle;
+import android.support.test.InstrumentationRegistry;
+import android.support.test.internal.runner.RunnerArgs;
+import android.support.test.internal.runner.TestExecutor;
+import android.support.test.internal.runner.TestLoader;
+import android.support.test.internal.runner.TestRequest;
+import android.support.test.internal.runner.TestRequestBuilder;
+import android.support.test.runner.AndroidJUnitRunner;
+
+import dalvik.system.DexFile;
+
+import org.chromium.base.BuildConfig;
+import org.chromium.base.Log;
+import org.chromium.base.annotations.MainDex;
+import org.chromium.base.multidex.ChromiumMultiDexInstaller;
+
+import java.io.IOException;
+import java.lang.reflect.Field;
+import java.util.Enumeration;
+
+/**
+ * A custom AndroidJUnitRunner that supports multidex installer and list out test information.
+ *
+ * This class is the equivalent of BaseChromiumInstrumentationTestRunner in JUnit3. Please
+ * beware that is this not a class runner. It is declared in test apk AndroidManifest.xml
+ * <instrumentation>
+ *
+ * TODO(yolandyan): remove this class after all tests are converted to JUnit4. Use class runner
+ * for test listing.
+ */
+@MainDex
+public class BaseChromiumAndroidJUnitRunner extends AndroidJUnitRunner {
+    private static final String LIST_ALL_TESTS_FLAG =
+            "org.chromium.base.test.BaseChromiumAndroidJUnitRunner.TestList";
+    private static final String LIST_TESTS_PACKAGE_FLAG =
+            "org.chromium.base.test.BaseChromiumAndroidJUnitRunner.TestListPackage";
+    /**
+     * This flag is supported by AndroidJUnitRunner.
+     *
+     * See the following page for detail
+     * https://developer.android.com/reference/android/support/test/runner/AndroidJUnitRunner.html
+     */
+    private static final String ARGUMENT_TEST_PACKAGE = "package";
+
+    /**
+     * The following arguments are corresponding to AndroidJUnitRunner command line arguments.
+     * `annotation`: run with only the argument annotation
+     * `notAnnotation`: run all tests except the ones with argument annotation
+     * `log`: run in log only mode, do not execute tests
+     *
+     * For more detail, please check
+     * https://developer.android.com/reference/android/support/test/runner/AndroidJUnitRunner.html
+     */
+    private static final String ARGUMENT_ANNOTATION = "annotation";
+    private static final String ARGUMENT_NOT_ANNOTATION = "notAnnotation";
+    private static final String ARGUMENT_LOG_ONLY = "log";
+
+    private static final String TAG = "BaseJUnitRunner";
+
+    @Override
+    public Application newApplication(ClassLoader cl, String className, Context context)
+            throws ClassNotFoundException, IllegalAccessException, InstantiationException {
+        // The multidex support library doesn't currently support having the test apk be multidex
+        // as well as the under-test apk being multidex. If MultiDex.install() is called for both,
+        // then re-extraction is triggered every time due to the support library caching only a
+        // single timestamp & crc.
+        //
+        // Attempt to install test apk multidex only if the apk-under-test is not multidex.
+        // It will likely continue to be true that the two are mutually exclusive because:
+        // * ProGuard enabled =>
+        //      Under-test apk is single dex.
+        //      Test apk duplicates under-test classes, so may need multidex.
+        // * ProGuard disabled =>
+        //      Under-test apk might be multidex
+        //      Test apk does not duplicate classes, so does not need multidex.
+        // https://crbug.com/824523
+        if (!BuildConfig.IS_MULTIDEX_ENABLED) {
+            ChromiumMultiDexInstaller.install(new BaseChromiumRunnerCommon.MultiDexContextWrapper(
+                    getContext(), getTargetContext()));
+            BaseChromiumRunnerCommon.reorderDexPathElements(cl, getContext(), getTargetContext());
+        }
+        return super.newApplication(cl, className, context);
+    }
+
+    /**
+     * Add TestListInstrumentationRunListener when argument ask the runner to list tests info.
+     *
+     * The running mechanism when argument has "listAllTests" is equivalent to that of
+     * {@link android.support.test.runner.AndroidJUnitRunner#onStart()} except it adds
+     * only TestListInstrumentationRunListener to monitor the tests.
+     */
+    @Override
+    public void onStart() {
+        Bundle arguments = InstrumentationRegistry.getArguments();
+        if (arguments != null && arguments.getString(LIST_ALL_TESTS_FLAG) != null) {
+            Log.w(TAG,
+                    String.format("Runner will list out tests info in JSON without running tests. "
+                                    + "Arguments: %s",
+                            arguments.toString()));
+            listTests(); // Intentionally not calling super.onStart() to avoid additional work.
+        } else {
+            if (arguments != null && arguments.getString(ARGUMENT_LOG_ONLY) != null) {
+                Log.e(TAG,
+                        String.format("Runner will log the tests without running tests."
+                                        + " If this cause a test run to fail, please report to"
+                                        + " crbug.com/754015. Arguments: %s",
+                                arguments.toString()));
+            }
+            super.onStart();
+        }
+    }
+
+    // TODO(yolandyan): Move this to test harness side once this class gets removed
+    private void addTestListPackage(Bundle bundle) {
+        PackageManager pm = getContext().getPackageManager();
+        InstrumentationInfo info;
+        try {
+            info = pm.getInstrumentationInfo(getComponentName(), PackageManager.GET_META_DATA);
+        } catch (NameNotFoundException e) {
+            Log.e(TAG, String.format("Could not find component %s", getComponentName()));
+            throw new RuntimeException(e);
+        }
+        Bundle metaDataBundle = info.metaData;
+        if (metaDataBundle != null && metaDataBundle.getString(LIST_TESTS_PACKAGE_FLAG) != null) {
+            bundle.putString(
+                    ARGUMENT_TEST_PACKAGE, metaDataBundle.getString(LIST_TESTS_PACKAGE_FLAG));
+        }
+    }
+
+    private void listTests() {
+        Bundle results = new Bundle();
+        TestListInstrumentationRunListener listener = new TestListInstrumentationRunListener();
+        try {
+            TestExecutor.Builder executorBuilder = new TestExecutor.Builder(this);
+            executorBuilder.addRunListener(listener);
+            Bundle junit3Arguments = new Bundle(InstrumentationRegistry.getArguments());
+            junit3Arguments.putString(ARGUMENT_NOT_ANNOTATION, "org.junit.runner.RunWith");
+            addTestListPackage(junit3Arguments);
+            TestRequest listJUnit3TestRequest = createListTestRequest(junit3Arguments);
+            results = executorBuilder.build().execute(listJUnit3TestRequest);
+
+            Bundle junit4Arguments = new Bundle(InstrumentationRegistry.getArguments());
+            junit4Arguments.putString(ARGUMENT_ANNOTATION, "org.junit.runner.RunWith");
+            addTestListPackage(junit4Arguments);
+
+            // Do not use Log runner from android test support.
+            //
+            // Test logging and execution skipping is handled by BaseJUnit4ClassRunner,
+            // having ARGUMENT_LOG_ONLY in argument bundle here causes AndroidJUnitRunner
+            // to use its own log-only class runner instead of BaseJUnit4ClassRunner.
+            junit4Arguments.remove(ARGUMENT_LOG_ONLY);
+
+            TestRequest listJUnit4TestRequest = createListTestRequest(junit4Arguments);
+            results.putAll(executorBuilder.build().execute(listJUnit4TestRequest));
+            listener.saveTestsToJson(
+                    InstrumentationRegistry.getArguments().getString(LIST_ALL_TESTS_FLAG));
+        } catch (IOException | RuntimeException e) {
+            String msg = "Fatal exception when running tests";
+            Log.e(TAG, msg, e);
+            // report the exception to instrumentation out
+            results.putString(Instrumentation.REPORT_KEY_STREAMRESULT,
+                    msg + "\n" + Log.getStackTraceString(e));
+        }
+        finish(Activity.RESULT_OK, results);
+    }
+
+    private TestRequest createListTestRequest(Bundle arguments) {
+        RunnerArgs runnerArgs =
+                new RunnerArgs.Builder().fromManifest(this).fromBundle(arguments).build();
+        TestRequestBuilder builder = new IncrementalInstallTestRequestBuilder(this, arguments);
+        builder.addFromRunnerArgs(runnerArgs);
+        builder.addApkToScan(getContext().getPackageCodePath());
+        return builder.build();
+    }
+
+    static boolean shouldListTests(Bundle arguments) {
+        return arguments != null && arguments.getString(LIST_ALL_TESTS_FLAG) != null;
+    }
+
+    /**
+     * Wraps TestRequestBuilder to make it work with incremental install.
+     */
+    private static class IncrementalInstallTestRequestBuilder extends TestRequestBuilder {
+        boolean mHasClassList;
+
+        public IncrementalInstallTestRequestBuilder(Instrumentation instr, Bundle bundle) {
+            super(instr, bundle);
+        }
+
+        @Override
+        public TestRequestBuilder addTestClass(String className) {
+            mHasClassList = true;
+            return super.addTestClass(className);
+        }
+
+        @Override
+        public TestRequestBuilder addTestMethod(String testClassName, String testMethodName) {
+            mHasClassList = true;
+            return super.addTestMethod(testClassName, testMethodName);
+        }
+
+        @Override
+        public TestRequest build() {
+            // See crbug://841695. TestLoader.isTestClass is incorrectly deciding that
+            // InstrumentationTestSuite is a test class.
+            removeTestClass("android.test.InstrumentationTestSuite");
+            // If a test class was requested, then no need to iterate class loader.
+            if (mHasClassList) {
+                return super.build();
+            }
+            maybeScanIncrementalClasspath();
+            return super.build();
+        }
+
+        private void maybeScanIncrementalClasspath() {
+            DexFile[] incrementalJars = null;
+            try {
+                Class<?> bootstrapClass =
+                        Class.forName("org.chromium.incrementalinstall.BootstrapApplication");
+                incrementalJars =
+                        (DexFile[]) bootstrapClass.getDeclaredField("sIncrementalDexFiles")
+                                .get(null);
+            } catch (Exception e) {
+                // Not an incremental apk.
+            }
+            if (incrementalJars != null) {
+                // builder.addApkToScan uses new DexFile(path) under the hood, which on Dalvik OS's
+                // assumes that the optimized dex is in the default location (crashes).
+                // Perform our own dex file scanning instead as a workaround.
+                addTestClasses(incrementalJars, this);
+            }
+        }
+
+        private boolean startsWithAny(String str, String[] prefixes) {
+            for (String prefix : prefixes) {
+                if (str.startsWith(prefix)) {
+                    return true;
+                }
+            }
+            return false;
+        }
+
+        private void addTestClasses(DexFile[] dexFiles, TestRequestBuilder builder) {
+            Log.i(TAG, "Scanning incremental classpath.");
+            String[] excludedPrefixes;
+            try {
+                Field excludedPackagesField =
+                        TestRequestBuilder.class.getDeclaredField("DEFAULT_EXCLUDED_PACKAGES");
+                excludedPackagesField.setAccessible(true);
+                excludedPrefixes = (String[]) excludedPackagesField.get(null);
+            } catch (Exception e) {
+                throw new RuntimeException(e);
+            }
+
+            // Mirror TestRequestBuilder.getClassNamesFromClassPath().
+            TestLoader loader = new TestLoader();
+            for (DexFile dexFile : dexFiles) {
+                Enumeration<String> classNames = dexFile.entries();
+                while (classNames.hasMoreElements()) {
+                    String className = classNames.nextElement();
+                    if (!className.contains("$") && !startsWithAny(className, excludedPrefixes)
+                            && loader.loadIfTest(className) != null) {
+                        addTestClass(className);
+                    }
+                }
+            }
+        }
+    }
+}
diff --git a/base/test/android/javatests/src/org/chromium/base/test/BaseChromiumRunnerCommon.java b/base/test/android/javatests/src/org/chromium/base/test/BaseChromiumRunnerCommon.java
new file mode 100644
index 0000000..e5eb273
--- /dev/null
+++ b/base/test/android/javatests/src/org/chromium/base/test/BaseChromiumRunnerCommon.java
@@ -0,0 +1,162 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package org.chromium.base.test;
+
+import android.content.Context;
+import android.content.ContextWrapper;
+import android.content.SharedPreferences;
+import android.content.pm.ApplicationInfo;
+import android.content.pm.PackageManager;
+import android.support.v4.content.ContextCompat;
+
+import org.chromium.android.support.PackageManagerWrapper;
+import org.chromium.base.Log;
+import org.chromium.base.annotations.MainDex;
+
+import java.io.File;
+import java.io.IOException;
+import java.io.Serializable;
+import java.lang.reflect.Field;
+import java.util.Arrays;
+import java.util.Comparator;
+
+/**
+ *  Functionality common to the JUnit3 and JUnit4 runners.
+ */
+@MainDex
+class BaseChromiumRunnerCommon {
+    private static final String TAG = "base_test";
+
+    /**
+     *  A ContextWrapper that allows multidex test APKs to extract secondary dexes into
+     *  the APK under test's data directory.
+     */
+    @MainDex
+    static class MultiDexContextWrapper extends ContextWrapper {
+        private Context mAppContext;
+
+        MultiDexContextWrapper(Context instrContext, Context appContext) {
+            super(instrContext);
+            mAppContext = appContext;
+        }
+
+        @Override
+        public File getFilesDir() {
+            return mAppContext.getFilesDir();
+        }
+
+        @Override
+        public SharedPreferences getSharedPreferences(String name, int mode) {
+            return mAppContext.getSharedPreferences(name, mode);
+        }
+
+        @Override
+        public PackageManager getPackageManager() {
+            return new PackageManagerWrapper(super.getPackageManager()) {
+                @Override
+                public ApplicationInfo getApplicationInfo(String packageName, int flags) {
+                    try {
+                        ApplicationInfo ai = super.getApplicationInfo(packageName, flags);
+                        if (packageName.equals(getPackageName())) {
+                            File dataDir = new File(
+                                    ContextCompat.getCodeCacheDir(mAppContext), "test-multidex");
+                            if (!dataDir.exists() && !dataDir.mkdirs()) {
+                                throw new IOException(String.format(
+                                        "Unable to create test multidex directory \"%s\"",
+                                        dataDir.getPath()));
+                            }
+                            ai.dataDir = dataDir.getPath();
+                        }
+                        return ai;
+                    } catch (Exception e) {
+                        Log.e(TAG, "Failed to get application info for %s", packageName, e);
+                    }
+                    return null;
+                }
+            };
+        }
+    }
+
+    /**
+     * Ensure all test dex entries precede app dex entries.
+     *
+     * @param cl ClassLoader to modify. Assumed to be a derivative of
+     *        {@link dalvik.system.BaseDexClassLoader}. If this isn't
+     *        the case, reordering will fail.
+     */
+    static void reorderDexPathElements(ClassLoader cl, Context context, Context targetContext) {
+        try {
+            Log.i(TAG,
+                    "Reordering dex files. If you're building a multidex test APK and see a "
+                            + "class resolving to an unexpected implementation, this may be why.");
+            Field pathListField = findField(cl, "pathList");
+            Object dexPathList = pathListField.get(cl);
+            Field dexElementsField = findField(dexPathList, "dexElements");
+            Object[] dexElementsList = (Object[]) dexElementsField.get(dexPathList);
+            Arrays.sort(dexElementsList,
+                    new DexListReorderingComparator(
+                            context.getPackageName(), targetContext.getPackageName()));
+            dexElementsField.set(dexPathList, dexElementsList);
+        } catch (Exception e) {
+            Log.e(TAG, "Failed to reorder dex elements for testing.", e);
+        }
+    }
+
+    /**
+     *  Comparator for sorting dex list entries.
+     *
+     *  Using this to sort a list of dex list entries will result in the following order:
+     *   - Strings that contain neither the test package nor the app package in lexicographical
+     *     order.
+     *   - Strings that contain the test package in lexicographical order.
+     *   - Strings that contain the app package but not the test package in lexicographical order.
+     */
+    private static class DexListReorderingComparator implements Comparator<Object>, Serializable {
+        private String mTestPackage;
+        private String mAppPackage;
+
+        public DexListReorderingComparator(String testPackage, String appPackage) {
+            mTestPackage = testPackage;
+            mAppPackage = appPackage;
+        }
+
+        @Override
+        public int compare(Object o1, Object o2) {
+            String s1 = o1.toString();
+            String s2 = o2.toString();
+            if (s1.contains(mTestPackage)) {
+                if (!s2.contains(mTestPackage)) {
+                    if (s2.contains(mAppPackage)) {
+                        return -1;
+                    } else {
+                        return 1;
+                    }
+                }
+            } else if (s1.contains(mAppPackage)) {
+                if (s2.contains(mTestPackage)) {
+                    return 1;
+                } else if (!s2.contains(mAppPackage)) {
+                    return 1;
+                }
+            } else if (s2.contains(mTestPackage) || s2.contains(mAppPackage)) {
+                return -1;
+            }
+            return s1.compareTo(s2);
+        }
+    }
+
+    private static Field findField(Object instance, String name) throws NoSuchFieldException {
+        for (Class<?> clazz = instance.getClass(); clazz != null; clazz = clazz.getSuperclass()) {
+            try {
+                Field f = clazz.getDeclaredField(name);
+                f.setAccessible(true);
+                return f;
+            } catch (NoSuchFieldException e) {
+            }
+        }
+        throw new NoSuchFieldException(
+                "Unable to find field " + name + " in " + instance.getClass());
+    }
+}
diff --git a/base/test/android/javatests/src/org/chromium/base/test/BaseJUnit4ClassRunner.java b/base/test/android/javatests/src/org/chromium/base/test/BaseJUnit4ClassRunner.java
new file mode 100644
index 0000000..102e082
--- /dev/null
+++ b/base/test/android/javatests/src/org/chromium/base/test/BaseJUnit4ClassRunner.java
@@ -0,0 +1,242 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package org.chromium.base.test;
+
+import static org.chromium.base.test.BaseChromiumAndroidJUnitRunner.shouldListTests;
+
+import android.content.Context;
+import android.support.test.InstrumentationRegistry;
+import android.support.test.internal.runner.junit4.AndroidJUnit4ClassRunner;
+import android.support.test.internal.util.AndroidRunnerParams;
+
+import org.junit.runner.Description;
+import org.junit.runner.notification.RunNotifier;
+import org.junit.runners.model.FrameworkMethod;
+import org.junit.runners.model.InitializationError;
+import org.junit.runners.model.Statement;
+
+import org.chromium.base.CollectionUtil;
+import org.chromium.base.CommandLine;
+import org.chromium.base.ContextUtils;
+import org.chromium.base.Log;
+import org.chromium.base.test.BaseTestResult.PreTestHook;
+import org.chromium.base.test.util.DisableIfSkipCheck;
+import org.chromium.base.test.util.ManualSkipCheck;
+import org.chromium.base.test.util.MinAndroidSdkLevelSkipCheck;
+import org.chromium.base.test.util.RestrictionSkipCheck;
+import org.chromium.base.test.util.SkipCheck;
+
+import java.io.File;
+import java.lang.reflect.Method;
+import java.util.ArrayList;
+import java.util.List;
+
+/**
+ *  A custom runner for JUnit4 tests that checks requirements to conditionally ignore tests.
+ *
+ *  This ClassRunner imports from AndroidJUnit4ClassRunner which is a hidden but accessible
+ *  class. The reason is that default JUnit4 runner for Android is a final class,
+ *  AndroidJUnit4. We need to extends an inheritable class to change {@link #runChild}
+ *  and {@link #isIgnored} to add SkipChecks and PreTesthook.
+ */
+public class BaseJUnit4ClassRunner extends AndroidJUnit4ClassRunner {
+    private static final String TAG = "BaseJUnit4ClassRunnr";
+    private final List<SkipCheck> mSkipChecks;
+    private final List<PreTestHook> mPreTestHooks;
+
+    private static final String EXTRA_TRACE_FILE =
+            "org.chromium.base.test.BaseJUnit4ClassRunner.TraceFile";
+
+    /**
+     * Create a BaseJUnit4ClassRunner to run {@code klass} and initialize values
+     *
+     * @throws InitializationError if the test class malformed
+     */
+    public BaseJUnit4ClassRunner(final Class<?> klass) throws InitializationError {
+        this(klass, null, null);
+    }
+
+    /**
+     * Create a BaseJUnit4ClassRunner to run {@code klass} and initialize values.
+     *
+     * To add more SkipCheck or PreTestHook in subclass, create Lists of checks and hooks,
+     * pass them into the super constructors. If you want make a subclass extendable by other
+     * class runners, you also have to create a constructor similar to the following one that
+     * merges default checks or hooks with this checks and hooks passed in by constructor.
+     *
+     * <pre>
+     * <code>
+     * e.g.
+     * public ChildRunner extends BaseJUnit4ClassRunner {
+     *     public ChildRunner(final Class<?> klass) {
+     *             throws InitializationError {
+     *         this(klass, null, null);
+     *     }
+     *
+     *     public ChildRunner(
+     *             final Class<?> klass, List<SkipCheck> checks, List<PreTestHook> hook) {
+     *             throws InitializationError {
+     *         super(klass, mergeList(
+     *             checks, defaultSkipChecks()), mergeList(hooks, DEFAULT_HOOKS));
+     *     }
+     *
+     *     public List<SkipCheck> defaultSkipChecks() {...}
+     *
+     *     public List<PreTestHook> defaultPreTestHooks() {...}
+     * </code>
+     * </pre>
+     *
+     * @throws InitializationError if the test class malformed
+     */
+    public BaseJUnit4ClassRunner(
+            final Class<?> klass, List<SkipCheck> checks, List<PreTestHook> hooks)
+            throws InitializationError {
+        super(klass,
+                new AndroidRunnerParams(InstrumentationRegistry.getInstrumentation(),
+                        InstrumentationRegistry.getArguments(), false, 0L, false));
+
+        String traceOutput = InstrumentationRegistry.getArguments().getString(EXTRA_TRACE_FILE);
+
+        if (traceOutput != null) {
+            File traceOutputFile = new File(traceOutput);
+            File traceOutputDir = traceOutputFile.getParentFile();
+
+            if (traceOutputDir != null) {
+                if (traceOutputDir.exists() || traceOutputDir.mkdirs()) {
+                    TestTraceEvent.enable(traceOutputFile);
+                }
+            }
+        }
+
+        mSkipChecks = mergeList(checks, defaultSkipChecks());
+        mPreTestHooks = mergeList(hooks, defaultPreTestHooks());
+    }
+
+    /**
+     * Merge two List into a new ArrayList.
+     *
+     * Used to merge the default SkipChecks/PreTestHooks with the subclasses's
+     * SkipChecks/PreTestHooks.
+     */
+    protected static final <T> List<T> mergeList(List<T> listA, List<T> listB) {
+        List<T> l = new ArrayList<>();
+        if (listA != null) {
+            l.addAll(listA);
+        }
+        if (listB != null) {
+            l.addAll(listB);
+        }
+        return l;
+    }
+
+    @Override
+    protected void collectInitializationErrors(List<Throwable> errors) {
+        super.collectInitializationErrors(errors);
+        // Log any initialization errors to help debugging, as the host-side test runner can get
+        // confused by the thrown exception.
+        if (!errors.isEmpty()) {
+            Log.e(TAG, "Initialization errors in %s: %s", getTestClass().getName(), errors);
+        }
+    }
+
+    /**
+     * Change this static function to add or take out default {@code SkipCheck}s.
+     */
+    private static List<SkipCheck> defaultSkipChecks() {
+        return CollectionUtil.newArrayList(
+                new RestrictionSkipCheck(InstrumentationRegistry.getTargetContext()),
+                new MinAndroidSdkLevelSkipCheck(), new DisableIfSkipCheck(), new ManualSkipCheck());
+    }
+
+    /**
+     * Change this static function to add or take out default {@code PreTestHook}s.
+     */
+    private static List<PreTestHook> defaultPreTestHooks() {
+        return null;
+    }
+
+    /**
+     * Evaluate whether a FrameworkMethod is ignored based on {@code SkipCheck}s.
+     */
+    @Override
+    protected boolean isIgnored(FrameworkMethod method) {
+        return super.isIgnored(method) || shouldSkip(method);
+    }
+
+    /**
+     * Run test with or without execution based on bundle arguments.
+     */
+    @Override
+    public void run(RunNotifier notifier) {
+        ContextUtils.initApplicationContext(
+                InstrumentationRegistry.getTargetContext().getApplicationContext());
+        if (shouldListTests(InstrumentationRegistry.getArguments())) {
+            for (Description child : getDescription().getChildren()) {
+                notifier.fireTestStarted(child);
+                notifier.fireTestFinished(child);
+            }
+        } else {
+            if (!CommandLine.isInitialized()) {
+                initCommandLineForTest();
+            }
+            super.run(notifier);
+        }
+    }
+
+    /**
+     * Override this method to change how test class runner initiate commandline flags
+     */
+    protected void initCommandLineForTest() {
+        CommandLine.init(null);
+    }
+
+    @Override
+    protected void runChild(FrameworkMethod method, RunNotifier notifier) {
+        String testName = method.getName();
+        TestTraceEvent.begin(testName);
+
+        runPreTestHooks(method);
+
+        super.runChild(method, notifier);
+
+        TestTraceEvent.end(testName);
+
+        // A new instance of BaseJUnit4ClassRunner is created on the device
+        // for each new method, so runChild will only be called once. Thus, we
+        // can disable tracing, and dump the output, once we get here.
+        TestTraceEvent.disable();
+    }
+
+    /**
+     * Loop through all the {@code PreTestHook}s to run them
+     */
+    private void runPreTestHooks(FrameworkMethod frameworkMethod) {
+        Method testMethod = frameworkMethod.getMethod();
+        Context targetContext = InstrumentationRegistry.getTargetContext();
+        for (PreTestHook hook : mPreTestHooks) {
+            hook.run(targetContext, testMethod);
+        }
+    }
+
+    /**
+     * Loop through all the {@code SkipCheck}s to confirm whether a test should be ignored
+     */
+    private boolean shouldSkip(FrameworkMethod method) {
+        for (SkipCheck s : mSkipChecks) {
+            if (s.shouldSkip(method)) {
+                return true;
+            }
+        }
+        return false;
+    }
+
+    /*
+     * Overriding this method to take screenshot of failure before tear down functions are run.
+     */
+    @Override
+    protected Statement withAfters(FrameworkMethod method, Object test, Statement base) {
+        return super.withAfters(method, test, new ScreenshotOnFailureStatement(base));
+    }
+}
diff --git a/base/test/android/javatests/src/org/chromium/base/test/BaseTestResult.java b/base/test/android/javatests/src/org/chromium/base/test/BaseTestResult.java
new file mode 100644
index 0000000..a80e0cc
--- /dev/null
+++ b/base/test/android/javatests/src/org/chromium/base/test/BaseTestResult.java
@@ -0,0 +1,137 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package org.chromium.base.test;
+
+import android.app.Instrumentation;
+import android.content.Context;
+import android.os.Bundle;
+import android.os.SystemClock;
+
+import junit.framework.TestCase;
+import junit.framework.TestResult;
+
+import org.chromium.base.Log;
+import org.chromium.base.test.util.SkipCheck;
+
+import java.lang.reflect.Method;
+import java.util.ArrayList;
+import java.util.List;
+
+/**
+ * A test result that can skip tests.
+ */
+public class BaseTestResult extends TestResult {
+    private static final String TAG = "base_test";
+
+    private static final int SLEEP_INTERVAL_MS = 50;
+    private static final int WAIT_DURATION_MS = 5000;
+
+    private final Instrumentation mInstrumentation;
+    private final List<SkipCheck> mSkipChecks;
+    private final List<PreTestHook> mPreTestHooks;
+
+    /**
+     * Creates an instance of BaseTestResult.
+     */
+    public BaseTestResult(Instrumentation instrumentation) {
+        mSkipChecks = new ArrayList<>();
+        mPreTestHooks = new ArrayList<>();
+        mInstrumentation = instrumentation;
+    }
+
+    /**
+     * An interface for classes that have some code to run before a test. They run after
+     * {@link SkipCheck}s. Provides access to the test method (and the annotations defined for it)
+     * and the instrumentation context.
+     */
+    public interface PreTestHook {
+        /**
+         * @param targetContext the instrumentation context that will be used during the test.
+         * @param testMethod the test method to be run.
+         */
+        public void run(Context targetContext, Method testMethod);
+    }
+
+    /**
+     * Adds a check for whether a test should run.
+     *
+     * @param skipCheck The check to add.
+     */
+    public void addSkipCheck(SkipCheck skipCheck) {
+        mSkipChecks.add(skipCheck);
+    }
+
+    /**
+     * Adds hooks that will be executed before each test that runs.
+     *
+     * @param preTestHook The hook to add.
+     */
+    public void addPreTestHook(PreTestHook preTestHook) {
+        mPreTestHooks.add(preTestHook);
+    }
+
+    protected boolean shouldSkip(TestCase test) {
+        for (SkipCheck s : mSkipChecks) {
+            if (s.shouldSkip(test)) return true;
+        }
+        return false;
+    }
+
+    private void runPreTestHooks(TestCase test) {
+        try {
+            Method testMethod = test.getClass().getMethod(test.getName());
+            Context targetContext = getTargetContext();
+
+            for (PreTestHook hook : mPreTestHooks) {
+                hook.run(targetContext, testMethod);
+            }
+        } catch (NoSuchMethodException e) {
+            Log.e(TAG, "Unable to run pre test hooks.", e);
+        }
+    }
+
+    @Override
+    protected void run(TestCase test) {
+        runPreTestHooks(test);
+
+        if (shouldSkip(test)) {
+            startTest(test);
+
+            Bundle skipResult = new Bundle();
+            skipResult.putString("class", test.getClass().getName());
+            skipResult.putString("test", test.getName());
+            skipResult.putBoolean("test_skipped", true);
+            mInstrumentation.sendStatus(0, skipResult);
+
+            endTest(test);
+        } else {
+            super.run(test);
+        }
+    }
+
+    /**
+     * Gets the target context.
+     *
+     * On older versions of Android, getTargetContext() may initially return null, so we have to
+     * wait for it to become available.
+     *
+     * @return The target {@link Context} if available; null otherwise.
+     */
+    public Context getTargetContext() {
+        Context targetContext = mInstrumentation.getTargetContext();
+        try {
+            long startTime = SystemClock.uptimeMillis();
+            // TODO(jbudorick): Convert this to CriteriaHelper once that moves to base/.
+            while (targetContext == null
+                    && SystemClock.uptimeMillis() - startTime < WAIT_DURATION_MS) {
+                Thread.sleep(SLEEP_INTERVAL_MS);
+                targetContext = mInstrumentation.getTargetContext();
+            }
+        } catch (InterruptedException e) {
+            Log.e(TAG, "Interrupted while attempting to initialize the command line.");
+        }
+        return targetContext;
+    }
+}
diff --git a/base/test/android/javatests/src/org/chromium/base/test/ScreenshotOnFailureStatement.java b/base/test/android/javatests/src/org/chromium/base/test/ScreenshotOnFailureStatement.java
new file mode 100644
index 0000000..397e8ab
--- /dev/null
+++ b/base/test/android/javatests/src/org/chromium/base/test/ScreenshotOnFailureStatement.java
@@ -0,0 +1,83 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package org.chromium.base.test;
+
+import android.support.test.InstrumentationRegistry;
+import android.support.test.uiautomator.UiDevice;
+
+import org.junit.runners.model.Statement;
+
+import org.chromium.base.Log;
+
+import java.io.File;
+
+/**
+ * Statement that captures screenshots if |base| statement fails.
+ *
+ * If --screenshot-path commandline flag is given, this |Statement|
+ * will save a screenshot to the specified path in the case of a test failure.
+ */
+public class ScreenshotOnFailureStatement extends Statement {
+    private static final String TAG = "ScreenshotOnFail";
+
+    private static final String EXTRA_SCREENSHOT_FILE =
+            "org.chromium.base.test.ScreenshotOnFailureStatement.ScreenshotFile";
+
+    private final Statement mBase;
+
+    public ScreenshotOnFailureStatement(final Statement base) {
+        mBase = base;
+    }
+
+    @Override
+    public void evaluate() throws Throwable {
+        try {
+            mBase.evaluate();
+        } catch (Throwable e) {
+            takeScreenshot();
+            throw e;
+        }
+    }
+
+    private void takeScreenshot() {
+        String screenshotFilePath =
+                InstrumentationRegistry.getArguments().getString(EXTRA_SCREENSHOT_FILE);
+        if (screenshotFilePath == null) {
+            Log.d(TAG,
+                    String.format("Did not save screenshot of failure. Must specify %s "
+                                    + "instrumentation argument to enable this feature.",
+                            EXTRA_SCREENSHOT_FILE));
+            return;
+        }
+
+        UiDevice uiDevice = null;
+        try {
+            uiDevice = UiDevice.getInstance(InstrumentationRegistry.getInstrumentation());
+        } catch (RuntimeException ex) {
+            Log.d(TAG, "Failed to initialize UiDevice", ex);
+            return;
+        }
+
+        File screenshotFile = new File(screenshotFilePath);
+        File screenshotDir = screenshotFile.getParentFile();
+        if (screenshotDir == null) {
+            Log.d(TAG,
+                    String.format(
+                            "Failed to create parent directory for %s. Can't save screenshot.",
+                            screenshotFile));
+            return;
+        }
+        if (!screenshotDir.exists()) {
+            if (!screenshotDir.mkdirs()) {
+                Log.d(TAG,
+                        String.format(
+                                "Failed to create %s. Can't save screenshot.", screenshotDir));
+                return;
+            }
+        }
+        Log.d(TAG, String.format("Saving screenshot of test failure, %s", screenshotFile));
+        uiDevice.takeScreenshot(screenshotFile);
+    }
+}
diff --git a/base/test/android/javatests/src/org/chromium/base/test/SetUpStatement.java b/base/test/android/javatests/src/org/chromium/base/test/SetUpStatement.java
new file mode 100644
index 0000000..30ac2b6
--- /dev/null
+++ b/base/test/android/javatests/src/org/chromium/base/test/SetUpStatement.java
@@ -0,0 +1,35 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package org.chromium.base.test;
+
+import org.junit.rules.TestRule;
+import org.junit.runners.model.Statement;
+
+/**
+ * Custom Statement for SetUpTestRules.
+ *
+ * Calls {@link SetUpTestRule#setUp} before evaluating {@link SetUpTestRule#base} if
+ * {@link SetUpTestRule#shouldSetUp} is true
+ */
+public class SetUpStatement extends Statement {
+    private final Statement mBase;
+    private final SetUpTestRule<? extends TestRule> mSetUpTestRule;
+    private final boolean mShouldSetUp;
+
+    public SetUpStatement(
+            final Statement base, SetUpTestRule<? extends TestRule> callback, boolean shouldSetUp) {
+        mBase = base;
+        mSetUpTestRule = callback;
+        mShouldSetUp = shouldSetUp;
+    }
+
+    @Override
+    public void evaluate() throws Throwable {
+        if (mShouldSetUp) {
+            mSetUpTestRule.setUp();
+        }
+        mBase.evaluate();
+    }
+}
diff --git a/base/test/android/javatests/src/org/chromium/base/test/SetUpTestRule.java b/base/test/android/javatests/src/org/chromium/base/test/SetUpTestRule.java
new file mode 100644
index 0000000..57dd8db
--- /dev/null
+++ b/base/test/android/javatests/src/org/chromium/base/test/SetUpTestRule.java
@@ -0,0 +1,35 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package org.chromium.base.test;
+
+import org.junit.rules.TestRule;
+
+/**
+ * An interface for TestRules that can be configured to automatically run set-up logic prior
+ * to &#064;Before.
+ *
+ * TestRules that implement this interface should return a {@link SetUpStatement} from their {@link
+ * TestRule#apply} method
+ *
+ * @param <T> TestRule type that implements this SetUpTestRule
+ */
+public interface SetUpTestRule<T extends TestRule> {
+    /**
+     * Set whether the TestRule should run setUp automatically.
+     *
+     * So TestRule can be declared in test like this:
+     * <code>
+     * &#064;Rule TestRule mRule = new MySetUpTestRule().shouldSetUp(true);
+     * </code>
+     *
+     * @return itself to chain up the calls for convenience
+     */
+    T shouldSetUp(boolean runSetUp);
+
+    /**
+     * Specify setUp action in this method
+     */
+    void setUp();
+}
diff --git a/base/test/android/javatests/src/org/chromium/base/test/TestChildProcessConnection.java b/base/test/android/javatests/src/org/chromium/base/test/TestChildProcessConnection.java
new file mode 100644
index 0000000..ae91b44
--- /dev/null
+++ b/base/test/android/javatests/src/org/chromium/base/test/TestChildProcessConnection.java
@@ -0,0 +1,87 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package org.chromium.base.test;
+
+import android.content.ComponentName;
+import android.content.Intent;
+import android.os.Bundle;
+
+import org.chromium.base.process_launcher.ChildProcessConnection;
+
+/** An implementation of ChildProcessConnection that does not connect to a real service. */
+public class TestChildProcessConnection extends ChildProcessConnection {
+    private static class MockChildServiceConnection
+            implements ChildProcessConnection.ChildServiceConnection {
+        private boolean mBound;
+
+        @Override
+        public boolean bind() {
+            mBound = true;
+            return true;
+        }
+
+        @Override
+        public void unbind() {
+            mBound = false;
+        }
+
+        @Override
+        public boolean isBound() {
+            return mBound;
+        }
+    }
+
+    private int mPid;
+    private boolean mConnected;
+    private ServiceCallback mServiceCallback;
+
+    /**
+     * Creates a mock binding corresponding to real ManagedChildProcessConnection after the
+     * connection is established: with initial binding bound and no strong binding.
+     */
+    public TestChildProcessConnection(ComponentName serviceName, boolean bindToCaller,
+            boolean bindAsExternalService, Bundle serviceBundle) {
+        super(null /* context */, serviceName, bindToCaller, bindAsExternalService, serviceBundle,
+                new ChildServiceConnectionFactory() {
+                    @Override
+                    public ChildServiceConnection createConnection(Intent bindIntent, int bindFlags,
+                            ChildServiceConnectionDelegate delegate) {
+                        return new MockChildServiceConnection();
+                    }
+                });
+    }
+
+    public void setPid(int pid) {
+        mPid = pid;
+    }
+
+    @Override
+    public int getPid() {
+        return mPid;
+    }
+
+    // We don't have a real service so we have to mock the connection status.
+    @Override
+    public void start(boolean useStrongBinding, ServiceCallback serviceCallback) {
+        super.start(useStrongBinding, serviceCallback);
+        mConnected = true;
+        mServiceCallback = serviceCallback;
+    }
+
+    @Override
+    public void stop() {
+        super.stop();
+        mConnected = false;
+    }
+
+    @Override
+    public boolean isConnected() {
+        return mConnected;
+    }
+
+    public ServiceCallback getServiceCallback() {
+        return mServiceCallback;
+    }
+}
diff --git a/base/test/android/javatests/src/org/chromium/base/test/TestListInstrumentationRunListener.java b/base/test/android/javatests/src/org/chromium/base/test/TestListInstrumentationRunListener.java
new file mode 100644
index 0000000..8cde570
--- /dev/null
+++ b/base/test/android/javatests/src/org/chromium/base/test/TestListInstrumentationRunListener.java
@@ -0,0 +1,142 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package org.chromium.base.test;
+
+import android.support.test.internal.runner.listener.InstrumentationRunListener;
+
+import org.json.JSONArray;
+import org.json.JSONObject;
+import org.junit.runner.Description;
+
+import org.chromium.base.Log;
+
+import java.io.File;
+import java.io.FileOutputStream;
+import java.io.IOException;
+import java.io.OutputStreamWriter;
+import java.io.Writer;
+import java.lang.annotation.Annotation;
+import java.lang.reflect.Method;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Map;
+import java.util.Set;
+
+/**
+ * A RunListener that list out all the test information into a json file.
+ */
+public class TestListInstrumentationRunListener extends InstrumentationRunListener {
+    private static final String TAG = "TestListRunListener";
+    private static final Set<String> SKIP_METHODS = new HashSet<>(
+            Arrays.asList(new String[] {"toString", "hashCode", "annotationType", "equals"}));
+
+    private final Map<Class<?>, JSONObject> mTestClassJsonMap = new HashMap<>();
+
+    /**
+     * Store the test method description to a Map at the beginning of a test run.
+     */
+    @Override
+    public void testStarted(Description desc) throws Exception {
+        if (mTestClassJsonMap.containsKey(desc.getTestClass())) {
+            ((JSONArray) mTestClassJsonMap.get(desc.getTestClass()).get("methods"))
+                .put(getTestMethodJSON(desc));
+        } else {
+            Class<?> testClass = desc.getTestClass();
+            mTestClassJsonMap.put(desc.getTestClass(), new JSONObject()
+                    .put("class", testClass.getName())
+                    .put("superclass", testClass.getSuperclass().getName())
+                    .put("annotations",
+                            getAnnotationJSON(Arrays.asList(testClass.getAnnotations())))
+                    .put("methods", new JSONArray().put(getTestMethodJSON(desc))));
+        }
+    }
+
+    /**
+     * Create a JSONArray with all the test class JSONObjects and save it to listed output path.
+     */
+    public void saveTestsToJson(String outputPath) throws IOException {
+        Writer writer = null;
+        File file = new File(outputPath);
+        try {
+            writer = new OutputStreamWriter(new FileOutputStream(file), "UTF-8");
+            JSONArray allTestClassesJSON = new JSONArray(mTestClassJsonMap.values());
+            writer.write(allTestClassesJSON.toString());
+        } catch (IOException e) {
+            Log.e(TAG, "failed to write json to file", e);
+            throw e;
+        } finally {
+            if (writer != null) {
+                try {
+                    writer.close();
+                } catch (IOException e) {
+                    // Intentionally ignore IOException when closing writer
+                }
+            }
+        }
+    }
+
+    /**
+     * Return a JSONOject that represent a Description of a method".
+     */
+    static JSONObject getTestMethodJSON(Description desc) throws Exception {
+        return new JSONObject()
+                .put("method", desc.getMethodName())
+                .put("annotations", getAnnotationJSON(desc.getAnnotations()));
+    }
+
+    /**
+     * Create a JSONObject that represent a collection of anntations.
+     *
+     * For example, for the following group of annotations for ExampleClass
+     * <code>
+     * @A
+     * @B(message = "hello", level = 3)
+     * public class ExampleClass() {}
+     * </code>
+     *
+     * This method would return a JSONObject as such:
+     * <code>
+     * {
+     *   "A": {},
+     *   "B": {
+     *     "message": "hello",
+     *     "level": "3"
+     *   }
+     * }
+     * </code>
+     *
+     * The method accomplish this by though through each annotation and reflectively call the
+     * annotation's method to get the element value, with exceptions to methods like "equals()"
+     * or "hashCode".
+     */
+    static JSONObject getAnnotationJSON(Collection<Annotation> annotations)
+            throws Exception {
+        JSONObject annotationsJsons = new JSONObject();
+        for (Annotation a : annotations) {
+            JSONObject elementJsonObject = new JSONObject();
+            for (Method method : a.annotationType().getMethods()) {
+                if (SKIP_METHODS.contains(method.getName())) {
+                    continue;
+                }
+                try {
+                    Object value = method.invoke(a);
+                    if (value == null) {
+                        elementJsonObject.put(method.getName(), null);
+                    } else {
+                        elementJsonObject.put(method.getName(),
+                                value.getClass().isArray()
+                                        ? new JSONArray(Arrays.asList((Object[]) value))
+                                        : value.toString());
+                    }
+                } catch (IllegalArgumentException e) {
+                }
+            }
+            annotationsJsons.put(a.annotationType().getSimpleName(), elementJsonObject);
+        }
+        return annotationsJsons;
+    }
+}
diff --git a/base/test/android/javatests/src/org/chromium/base/test/TestTraceEvent.java b/base/test/android/javatests/src/org/chromium/base/test/TestTraceEvent.java
new file mode 100644
index 0000000..5e0f6b3
--- /dev/null
+++ b/base/test/android/javatests/src/org/chromium/base/test/TestTraceEvent.java
@@ -0,0 +1,168 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package org.chromium.base.test;
+
+import org.json.JSONArray;
+import org.json.JSONException;
+import org.json.JSONObject;
+
+import org.chromium.base.Log;
+
+import java.io.File;
+import java.io.FileNotFoundException;
+import java.io.FileOutputStream;
+import java.io.PrintStream;
+
+/**
+ * TestTraceEvent is a modified version of TraceEvent, intended for tracing test runs.
+ */
+public class TestTraceEvent {
+    private static final String TAG = "TestTraceEvent";
+
+    /** The event types understood by the trace scripts. */
+    private enum EventType {
+        BEGIN("B"),
+        END("E"),
+        INSTANT("I");
+
+        private final String mTypeStr;
+
+        EventType(String typeStr) {
+            mTypeStr = typeStr;
+        }
+
+        @Override
+        public String toString() {
+            return mTypeStr;
+        }
+    }
+
+    // Locks internal fields.
+    private static final Object sLock = new Object();
+
+    private static File sOutputFile;
+
+    private static boolean sEnabled;
+
+    // A list of trace event strings.
+    private static JSONArray sTraceStrings;
+
+    /**
+     * Enable tracing, and set a specific output file. If tracing was previously enabled and
+     * disabled, that data is cleared.
+     *
+     * @param file Which file to append the trace data to.
+     */
+    public static void enable(File outputFile) {
+        synchronized (sLock) {
+            if (sEnabled) return;
+
+            sEnabled = true;
+            sOutputFile = outputFile;
+            sTraceStrings = new JSONArray();
+        }
+    }
+
+    /**
+     * Disabling of tracing will dump trace data to the system log.
+     */
+    public static void disable() {
+        synchronized (sLock) {
+            if (!sEnabled) return;
+
+            sEnabled = false;
+            dumpTraceOutput();
+            sTraceStrings = null;
+        }
+    }
+
+    /**
+     * @return True if tracing is enabled, false otherwise.
+     */
+    public static boolean isEnabled() {
+        synchronized (sLock) {
+            return sEnabled;
+        }
+    }
+
+    /**
+     * Record an "instant" trace event. E.g. "screen update happened".
+     */
+    public static void instant(String name) {
+        synchronized (sLock) {
+            if (!sEnabled) return;
+
+            saveTraceString(name, name.hashCode(), EventType.INSTANT);
+        }
+    }
+
+    /**
+     * Record an "begin" trace event. Begin trace events should have a matching end event (recorded
+     * by calling {@link #end(String)}).
+     */
+    public static void begin(String name) {
+        synchronized (sLock) {
+            if (!sEnabled) return;
+
+            saveTraceString(name, name.hashCode(), EventType.BEGIN);
+        }
+    }
+
+    /**
+     * Record an "end" trace event, to match a begin event (recorded by calling {@link
+     * #begin(String)}). The time delta between begin and end is usually interesting to graph code.
+     */
+    public static void end(String name) {
+        synchronized (sLock) {
+            if (!sEnabled) return;
+
+            saveTraceString(name, name.hashCode(), EventType.END);
+        }
+    }
+
+    /**
+     * Save a trace event as a JSON dict.
+     *
+     * @param name The trace data.
+     * @param id An identifier for the event, to be saved as the thread ID.
+     * @param type the type of trace event (B, E, I).
+     */
+    private static void saveTraceString(String name, long id, EventType type) {
+        // We use System.currentTimeMillis() because it agrees with the value of
+        // the $EPOCHREALTIME environment variable. The Python test runner code
+        // uses that variable to synchronize timing.
+        long timeMicroseconds = System.currentTimeMillis() * 1000;
+
+        try {
+            JSONObject traceObj = new JSONObject();
+            traceObj.put("cat", "Java");
+            traceObj.put("ts", timeMicroseconds);
+            traceObj.put("ph", type);
+            traceObj.put("name", name);
+            traceObj.put("tid", id);
+
+            sTraceStrings.put(traceObj);
+        } catch (JSONException e) {
+            throw new RuntimeException(e);
+        }
+    }
+
+    /**
+     * Dump all tracing data we have saved up to the log.
+     * Output as JSON for parsing convenience.
+     */
+    private static void dumpTraceOutput() {
+        try {
+            PrintStream stream = new PrintStream(new FileOutputStream(sOutputFile, true));
+            try {
+                stream.print(sTraceStrings);
+            } finally {
+                if (stream != null) stream.close();
+            }
+        } catch (FileNotFoundException ex) {
+            Log.e(TAG, "Unable to dump trace data to output file.");
+        }
+    }
+}
diff --git a/base/test/android/javatests/src/org/chromium/base/test/params/BaseJUnit4RunnerDelegate.java b/base/test/android/javatests/src/org/chromium/base/test/params/BaseJUnit4RunnerDelegate.java
new file mode 100644
index 0000000..c0dcd46
--- /dev/null
+++ b/base/test/android/javatests/src/org/chromium/base/test/params/BaseJUnit4RunnerDelegate.java
@@ -0,0 +1,42 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package org.chromium.base.test.params;
+
+import org.junit.runners.model.FrameworkMethod;
+import org.junit.runners.model.InitializationError;
+
+import org.chromium.base.test.BaseJUnit4ClassRunner;
+import org.chromium.base.test.params.ParameterizedRunner.ParameterizedTestInstantiationException;
+
+import java.util.List;
+
+/**
+ * Class runner delegate that extends BaseJUnit4ClassRunner
+ */
+public final class BaseJUnit4RunnerDelegate
+        extends BaseJUnit4ClassRunner implements ParameterizedRunnerDelegate {
+    private ParameterizedRunnerDelegateCommon mDelegateCommon;
+
+    public BaseJUnit4RunnerDelegate(Class<?> klass,
+            ParameterizedRunnerDelegateCommon delegateCommon) throws InitializationError {
+        super(klass);
+        mDelegateCommon = delegateCommon;
+    }
+
+    @Override
+    public void collectInitializationErrors(List<Throwable> errors) {
+        ParameterizedRunnerDelegateCommon.collectInitializationErrors(errors);
+    }
+
+    @Override
+    public List<FrameworkMethod> computeTestMethods() {
+        return mDelegateCommon.computeTestMethods();
+    }
+
+    @Override
+    public Object createTest() throws ParameterizedTestInstantiationException {
+        return mDelegateCommon.createTest();
+    }
+}
diff --git a/base/test/android/javatests/src/org/chromium/base/test/params/BlockJUnit4RunnerDelegate.java b/base/test/android/javatests/src/org/chromium/base/test/params/BlockJUnit4RunnerDelegate.java
new file mode 100644
index 0000000..7c948bb
--- /dev/null
+++ b/base/test/android/javatests/src/org/chromium/base/test/params/BlockJUnit4RunnerDelegate.java
@@ -0,0 +1,42 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package org.chromium.base.test.params;
+
+import org.junit.runners.BlockJUnit4ClassRunner;
+import org.junit.runners.model.FrameworkMethod;
+import org.junit.runners.model.InitializationError;
+
+import org.chromium.base.test.params.ParameterizedRunner.ParameterizedTestInstantiationException;
+
+import java.util.List;
+
+/**
+ * Parameterized class runner delegate that extends BlockJUnit4ClassRunner
+ */
+public final class BlockJUnit4RunnerDelegate
+        extends BlockJUnit4ClassRunner implements ParameterizedRunnerDelegate {
+    private ParameterizedRunnerDelegateCommon mDelegateCommon;
+
+    public BlockJUnit4RunnerDelegate(Class<?> klass,
+            ParameterizedRunnerDelegateCommon delegateCommon) throws InitializationError {
+        super(klass);
+        mDelegateCommon = delegateCommon;
+    }
+
+    @Override
+    public void collectInitializationErrors(List<Throwable> errors) {
+        ParameterizedRunnerDelegateCommon.collectInitializationErrors(errors);
+    }
+
+    @Override
+    public List<FrameworkMethod> computeTestMethods() {
+        return mDelegateCommon.computeTestMethods();
+    }
+
+    @Override
+    public Object createTest() throws ParameterizedTestInstantiationException {
+        return mDelegateCommon.createTest();
+    }
+}
diff --git a/base/test/android/javatests/src/org/chromium/base/test/params/MethodParamAnnotationRule.java b/base/test/android/javatests/src/org/chromium/base/test/params/MethodParamAnnotationRule.java
new file mode 100644
index 0000000..2986b96
--- /dev/null
+++ b/base/test/android/javatests/src/org/chromium/base/test/params/MethodParamAnnotationRule.java
@@ -0,0 +1,62 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package org.chromium.base.test.params;
+
+import org.junit.runners.model.Statement;
+
+import org.chromium.base.test.params.ParameterAnnotations.UseMethodParameterAfter;
+import org.chromium.base.test.params.ParameterAnnotations.UseMethodParameterBefore;
+
+import java.lang.reflect.Method;
+import java.lang.reflect.Modifier;
+import java.util.ArrayList;
+import java.util.List;
+
+/**
+ * Processes {@link UseMethodParameterBefore} and {@link UseMethodParameterAfter} annotations to run
+ * the corresponding methods. To use, add an instance to the test class and annotate it with
+ * {@code @}{@link org.junit.Rule Rule}.
+ */
+public class MethodParamAnnotationRule extends MethodParamRule {
+    @Override
+    protected Statement applyParameterAndValues(final Statement base, Object target,
+            Class<? extends ParameterProvider> parameterProvider, List<Object> values) {
+        final List<Method> beforeMethods = new ArrayList<>();
+        final List<Method> afterMethods = new ArrayList<>();
+        for (Method m : target.getClass().getDeclaredMethods()) {
+            if (!m.getReturnType().equals(Void.TYPE)) continue;
+            if (!Modifier.isPublic(m.getModifiers())) continue;
+
+            UseMethodParameterBefore beforeAnnotation =
+                    m.getAnnotation(UseMethodParameterBefore.class);
+            if (beforeAnnotation != null && beforeAnnotation.value().equals(parameterProvider)) {
+                beforeMethods.add(m);
+            }
+
+            UseMethodParameterAfter afterAnnotation =
+                    m.getAnnotation(UseMethodParameterAfter.class);
+            if (afterAnnotation != null && afterAnnotation.value().equals(parameterProvider)) {
+                afterMethods.add(m);
+            }
+        }
+
+        if (beforeMethods.isEmpty() && afterMethods.isEmpty()) return base;
+
+        return new Statement() {
+            @Override
+            public void evaluate() throws Throwable {
+                for (Method m : beforeMethods) {
+                    m.invoke(target, values.toArray());
+                }
+
+                base.evaluate();
+
+                for (Method m : afterMethods) {
+                    m.invoke(target, values.toArray());
+                }
+            }
+        };
+    }
+}
diff --git a/base/test/android/javatests/src/org/chromium/base/test/params/MethodParamRule.java b/base/test/android/javatests/src/org/chromium/base/test/params/MethodParamRule.java
new file mode 100644
index 0000000..440831a
--- /dev/null
+++ b/base/test/android/javatests/src/org/chromium/base/test/params/MethodParamRule.java
@@ -0,0 +1,35 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package org.chromium.base.test.params;
+
+import org.junit.rules.MethodRule;
+import org.junit.runners.model.FrameworkMethod;
+import org.junit.runners.model.Statement;
+
+import org.chromium.base.test.params.ParameterAnnotations.UseMethodParameter;
+
+import java.util.List;
+
+/**
+ * Abstract base class for rules that are applied to test methods using
+ * {@link org.chromium.base.test.params.ParameterAnnotations.UseMethodParameter method parameters}.
+ */
+public abstract class MethodParamRule implements MethodRule {
+    @Override
+    public Statement apply(final Statement base, FrameworkMethod method, Object target) {
+        UseMethodParameter useParameterProvider = method.getAnnotation(UseMethodParameter.class);
+        if (useParameterProvider == null) return base;
+        Class<? extends ParameterProvider> parameterProvider = useParameterProvider.value();
+
+        if (!(method instanceof ParameterizedFrameworkMethod)) return base;
+        ParameterSet parameters = ((ParameterizedFrameworkMethod) method).getParameterSet();
+        List<Object> values = parameters.getValues();
+
+        return applyParameterAndValues(base, target, parameterProvider, values);
+    }
+
+    protected abstract Statement applyParameterAndValues(final Statement base, Object target,
+            Class<? extends ParameterProvider> parameterProvider, List<Object> values);
+}
diff --git a/base/test/android/javatests/src/org/chromium/base/test/params/ParameterAnnotations.java b/base/test/android/javatests/src/org/chromium/base/test/params/ParameterAnnotations.java
new file mode 100644
index 0000000..7918369
--- /dev/null
+++ b/base/test/android/javatests/src/org/chromium/base/test/params/ParameterAnnotations.java
@@ -0,0 +1,78 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package org.chromium.base.test.params;
+
+import java.lang.annotation.ElementType;
+import java.lang.annotation.Retention;
+import java.lang.annotation.RetentionPolicy;
+import java.lang.annotation.Target;
+
+/**
+ * Annotations for Parameterized Tests
+ */
+public class ParameterAnnotations {
+    /**
+     * Annotation for test methods to indicate associated {@link ParameterProvider}.
+     * Note: the class referred to must be public and have a public default constructor.
+     */
+    @Retention(RetentionPolicy.RUNTIME)
+    @Target(ElementType.METHOD)
+    public @interface UseMethodParameter {
+        Class<? extends ParameterProvider> value();
+    }
+
+    /**
+     * Annotation for methods that should be called before running a test with method parameters.
+     *
+     * In order to use this, add a {@link MethodParamAnnotationRule} annotated with
+     * {@code @}{@link org.junit.Rule Rule} to your test class.
+     * @see ParameterProvider
+     * @see UseMethodParameterAfter
+     */
+    @Retention(RetentionPolicy.RUNTIME)
+    @Target(ElementType.METHOD)
+    public @interface UseMethodParameterBefore {
+        Class<? extends ParameterProvider> value();
+    }
+
+    /**
+     * Annotation for methods that should be called after running a test with method parameters.
+     *
+     * In order to use this, add a {@link MethodParamAnnotationRule} annotated with
+     * {@code @}{@link org.junit.Rule Rule} to your test class.
+     * @see ParameterProvider
+     * @see UseMethodParameterBefore
+     */
+    @Retention(RetentionPolicy.RUNTIME)
+    @Target(ElementType.METHOD)
+    public @interface UseMethodParameterAfter {
+        Class<? extends ParameterProvider> value();
+    }
+
+    /**
+     * Annotation for static field of a `List<ParameterSet>` for entire test class
+     */
+    @Retention(RetentionPolicy.RUNTIME)
+    @Target(ElementType.FIELD)
+    public @interface ClassParameter {}
+
+    /**
+     * Annotation for static field of a `List<ParameterSet>` of TestRule
+     */
+    @Retention(RetentionPolicy.RUNTIME)
+    @Target(ElementType.FIELD)
+    public @interface RuleParameter {}
+
+    /**
+     * Annotation for test class, it specifies which ParameterizeRunnerDelegate to use.
+     *
+     * The default ParameterizedRunnerDelegate is BaseJUnit4RunnerDelegate.class
+     */
+    @Retention(RetentionPolicy.RUNTIME)
+    @Target(ElementType.TYPE)
+    public @interface UseRunnerDelegate {
+        Class<? extends ParameterizedRunnerDelegate> value();
+    }
+}
diff --git a/base/test/android/javatests/src/org/chromium/base/test/params/ParameterProvider.java b/base/test/android/javatests/src/org/chromium/base/test/params/ParameterProvider.java
new file mode 100644
index 0000000..9bf27bd
--- /dev/null
+++ b/base/test/android/javatests/src/org/chromium/base/test/params/ParameterProvider.java
@@ -0,0 +1,11 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package org.chromium.base.test.params;
+
+/**
+ * Generator to use generate arguments for parameterized test methods.
+ * @see ParameterAnnotations.UseMethodParameter
+ */
+public interface ParameterProvider { Iterable<ParameterSet> getParameters(); }
diff --git a/base/test/android/javatests/src/org/chromium/base/test/params/ParameterSet.java b/base/test/android/javatests/src/org/chromium/base/test/params/ParameterSet.java
new file mode 100644
index 0000000..1cdb576
--- /dev/null
+++ b/base/test/android/javatests/src/org/chromium/base/test/params/ParameterSet.java
@@ -0,0 +1,129 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package org.chromium.base.test.params;
+
+import org.junit.Assert;
+
+import java.io.File;
+import java.net.URI;
+import java.net.URL;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Set;
+import java.util.concurrent.Callable;
+
+/**
+ * A set of parameters for one *SINGLE* test method or test class constructor.
+ *
+ * For example, <code>new ParameterSet().value("a", "b")</code> is intended for
+ * a test method/constructor that takes in two string as arguments.
+ * <code>public void testSimple(String a, String b) {...}</code>
+ * or
+ * <code>public MyTestClass(String a, String b) {...}</code>
+ *
+ * To parameterize testSimple or MyTestClass's tests, create multiple ParameterSets
+ * <code>
+ * static List<ParameterSet> sAllParameterSets = new ArrayList<>();
+ * static {
+ *   sAllParameterSets.add(new ParameterSet().value("a", "b");
+ *   sAllParameterSets.add(new ParameterSet().value("c", "d");
+ * }
+ */
+public class ParameterSet {
+    private List<Object> mValues;
+    private String mName;
+
+    public ParameterSet() {}
+
+    public ParameterSet value(Object firstArg, Object... objects) {
+        List<Object> parameterList = new ArrayList<Object>();
+        parameterList.add(firstArg);
+        parameterList.addAll(Arrays.asList(objects));
+        Assert.assertTrue(
+                "Can not create ParameterSet with no parameters", parameterList.size() != 0);
+        mValues = validateAndCopy(parameterList);
+        return this;
+    }
+
+    public ParameterSet name(String name) {
+        mName = name;
+        return this;
+    }
+
+    @Override
+    public String toString() {
+        if (mValues == null) {
+            return "null";
+        }
+        return Arrays.toString(mValues.toArray());
+    }
+
+    private List<Object> validateAndCopy(List<Object> values) {
+        List<Object> tempValues = new ArrayList<>();
+        for (Object o : values) {
+            if (o == null) {
+                tempValues.add(null);
+            } else {
+                if (o.getClass().isPrimitive() || ACCEPTABLE_TYPES.contains(o.getClass())
+                        || o instanceof Callable) {
+                    tempValues.add(o);
+                } else {
+                    // TODO(yolandyan): maybe come up with way to support
+                    // complex object while handling immutability at the
+                    // same time
+                    throw new IllegalArgumentException("Type \"%s\" is not supported in"
+                            + " parameterized testing at this time. Accepted types include"
+                            + " all primitive types along with "
+                            + Arrays.toString(ACCEPTABLE_TYPES.toArray(
+                                      new String[ACCEPTABLE_TYPES.size()])));
+                }
+            }
+        }
+        return Collections.unmodifiableList(tempValues);
+    }
+
+    String getName() {
+        if (mName == null) {
+            return "";
+        }
+        return mName;
+    }
+
+    List<Object> getValues() {
+        return mValues;
+    }
+
+    int size() {
+        if (mValues == null) return 0;
+        return mValues.size();
+    }
+
+    private static final Set<Class<?>> ACCEPTABLE_TYPES = getAcceptableTypes();
+
+    /**
+     * Any immutable class is acceptable.
+     */
+    private static Set<Class<?>> getAcceptableTypes() {
+        Set<Class<?>> ret = new HashSet<Class<?>>();
+        ret.add(Boolean.class);
+        ret.add(Byte.class);
+        ret.add(Character.class);
+        ret.add(Class.class);
+        ret.add(Double.class);
+        ret.add(File.class);
+        ret.add(Float.class);
+        ret.add(Integer.class);
+        ret.add(Long.class);
+        ret.add(Short.class);
+        ret.add(String.class);
+        ret.add(URI.class);
+        ret.add(URL.class);
+        ret.add(Void.class);
+        return ret;
+    }
+}
diff --git a/base/test/android/javatests/src/org/chromium/base/test/params/ParameterizedFrameworkMethod.java b/base/test/android/javatests/src/org/chromium/base/test/params/ParameterizedFrameworkMethod.java
new file mode 100644
index 0000000..f3333b5
--- /dev/null
+++ b/base/test/android/javatests/src/org/chromium/base/test/params/ParameterizedFrameworkMethod.java
@@ -0,0 +1,94 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package org.chromium.base.test.params;
+
+import org.junit.runners.model.FrameworkMethod;
+
+import java.lang.annotation.Annotation;
+import java.lang.reflect.Method;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.List;
+
+/**
+ * Custom FrameworkMethod that includes a {@code ParameterSet} that
+ * represents the parameters for this test method
+ */
+public class ParameterizedFrameworkMethod extends FrameworkMethod {
+    private ParameterSet mParameterSet;
+    private String mName;
+
+    public ParameterizedFrameworkMethod(
+            Method method, ParameterSet parameterSet, String classParameterSetName) {
+        super(method);
+        mParameterSet = parameterSet;
+        String postFix = "";
+        if (classParameterSetName != null && !classParameterSetName.isEmpty()) {
+            postFix += "_" + classParameterSetName;
+        }
+        if (parameterSet != null && !parameterSet.getName().isEmpty()) {
+            postFix += "_" + parameterSet.getName();
+        }
+        mName = postFix.isEmpty() ? method.getName() : method.getName() + "_" + postFix;
+    }
+
+    @Override
+    public String getName() {
+        return mName;
+    }
+
+    @Override
+    public Object invokeExplosively(Object target, Object... params) throws Throwable {
+        if (mParameterSet != null) {
+            return super.invokeExplosively(target, mParameterSet.getValues().toArray());
+        }
+        return super.invokeExplosively(target, params);
+    }
+
+    static List<FrameworkMethod> wrapAllFrameworkMethods(
+            Collection<FrameworkMethod> frameworkMethods, String classParameterSetName) {
+        List<FrameworkMethod> results = new ArrayList<>();
+        for (FrameworkMethod frameworkMethod : frameworkMethods) {
+            results.add(new ParameterizedFrameworkMethod(
+                    frameworkMethod.getMethod(), null, classParameterSetName));
+        }
+        return results;
+    }
+
+    @Override
+    public boolean equals(Object obj) {
+        if (obj instanceof ParameterizedFrameworkMethod) {
+            ParameterizedFrameworkMethod method = (ParameterizedFrameworkMethod) obj;
+            return super.equals(obj) && method.getParameterSet().equals(getParameterSet())
+                    && method.getName().equals(getName());
+        }
+        return false;
+    }
+
+    /**
+     * Override hashCode method to distinguish two ParameterizedFrameworkmethod with same
+     * Method object.
+     */
+    @Override
+    public int hashCode() {
+        int result = 17;
+        result = 31 * result + super.hashCode();
+        result = 31 * result + getName().hashCode();
+        if (getParameterSet() != null) {
+            result = 31 * result + getParameterSet().hashCode();
+        }
+        return result;
+    }
+
+    Annotation[] getTestAnnotations() {
+        // TODO(yolandyan): add annotation from the ParameterSet, enable
+        // test writing to add SkipCheck for an individual parameter
+        return getMethod().getAnnotations();
+    }
+
+    public ParameterSet getParameterSet() {
+        return mParameterSet;
+    }
+}
diff --git a/base/test/android/javatests/src/org/chromium/base/test/params/ParameterizedRunner.java b/base/test/android/javatests/src/org/chromium/base/test/params/ParameterizedRunner.java
new file mode 100644
index 0000000..113f176
--- /dev/null
+++ b/base/test/android/javatests/src/org/chromium/base/test/params/ParameterizedRunner.java
@@ -0,0 +1,220 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package org.chromium.base.test.params;
+
+import org.junit.Test;
+import org.junit.runner.Runner;
+import org.junit.runners.BlockJUnit4ClassRunner;
+import org.junit.runners.Suite;
+import org.junit.runners.model.FrameworkField;
+import org.junit.runners.model.TestClass;
+
+import org.chromium.base.test.params.ParameterAnnotations.ClassParameter;
+import org.chromium.base.test.params.ParameterAnnotations.UseMethodParameter;
+import org.chromium.base.test.params.ParameterAnnotations.UseRunnerDelegate;
+import org.chromium.base.test.params.ParameterizedRunnerDelegateFactory.ParameterizedRunnerDelegateInstantiationException;
+
+import java.lang.reflect.Modifier;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.List;
+import java.util.Locale;
+
+/**
+ * ParameterizedRunner generates a list of runners for each of class parameter set in a test class.
+ *
+ * ParameterizedRunner looks for {@code @ClassParameter} annotation in test class and
+ * generates a list of ParameterizedRunnerDelegate runners for each ParameterSet.
+ */
+public final class ParameterizedRunner extends Suite {
+    private final List<Runner> mRunners;
+
+    /**
+     * Create a ParameterizedRunner to run test class
+     *
+     * @param klass the Class of the test class, test class should be atomic
+     *              (extends only Object)
+     */
+    public ParameterizedRunner(Class<?> klass) throws Throwable {
+        super(klass, Collections.emptyList()); // pass in empty list of runners
+        validate();
+        mRunners = createRunners(getTestClass());
+    }
+
+    @Override
+    protected List<Runner> getChildren() {
+        return mRunners;
+    }
+
+    /**
+     * ParentRunner calls collectInitializationErrors() to check for errors in Test class.
+     * Parameterized tests are written in unconventional ways, therefore, this method is
+     * overridden and validation is done seperately.
+     */
+    @Override
+    protected void collectInitializationErrors(List<Throwable> errors) {
+        // Do not call super collectInitializationErrors
+    }
+
+    private void validate() throws Throwable {
+        validateNoNonStaticInnerClass();
+        validateOnlyOneConstructor();
+        validateInstanceMethods();
+        validateOnlyOneClassParameterField();
+        validateAtLeastOneParameterSetField();
+    }
+
+    private void validateNoNonStaticInnerClass() throws Exception {
+        if (getTestClass().isANonStaticInnerClass()) {
+            throw new Exception("The inner class " + getTestClass().getName() + " is not static.");
+        }
+    }
+
+    private void validateOnlyOneConstructor() throws Exception {
+        if (!hasOneConstructor()) {
+            throw new Exception("Test class should have exactly one public constructor");
+        }
+    }
+
+    private boolean hasOneConstructor() {
+        return getTestClass().getJavaClass().getConstructors().length == 1;
+    }
+
+    private void validateOnlyOneClassParameterField() {
+        if (getTestClass().getAnnotatedFields(ClassParameter.class).size() > 1) {
+            throw new IllegalParameterArgumentException(
+                    "%s class has more than one @ClassParameter, only one is allowed");
+        }
+    }
+
+    private void validateAtLeastOneParameterSetField() {
+        if (getTestClass().getAnnotatedFields(ClassParameter.class).isEmpty()
+                && getTestClass().getAnnotatedMethods(UseMethodParameter.class).isEmpty()) {
+            throw new IllegalArgumentException(String.format(Locale.getDefault(),
+                    "%s has no field annotated with @ClassParameter or method annotated with"
+                            + "@UseMethodParameter; it should not use ParameterizedRunner",
+                    getTestClass().getName()));
+        }
+    }
+
+    private void validateInstanceMethods() throws Exception {
+        if (getTestClass().getAnnotatedMethods(Test.class).size() == 0) {
+            throw new Exception("No runnable methods");
+        }
+    }
+
+    /**
+     * Return a list of runner delegates through ParameterizedRunnerDelegateFactory.
+     *
+     * For class parameter set: each class can only have one list of class parameter sets.
+     * Each parameter set will be used to create one runner.
+     *
+     * For method parameter set: a single list method parameter sets is associated with
+     * a string tag, an immutable map of string to parameter set list will be created and
+     * passed into factory for each runner delegate to create multiple tests. Only one
+     * Runner will be created for a method that uses @UseMethodParameter, regardless of the
+     * number of ParameterSets in the associated list.
+     *
+     * @return a list of runners
+     * @throws ParameterizedRunnerDelegateInstantiationException if runner delegate can not
+     *         be instantiated with constructor reflectively
+     * @throws IllegalAccessError if the field in tests are not accessible
+     */
+    static List<Runner> createRunners(TestClass testClass)
+            throws IllegalAccessException, ParameterizedRunnerDelegateInstantiationException {
+        List<ParameterSet> classParameterSetList;
+        if (testClass.getAnnotatedFields(ClassParameter.class).isEmpty()) {
+            classParameterSetList = new ArrayList<>();
+            classParameterSetList.add(null);
+        } else {
+            classParameterSetList = getParameterSetList(
+                    testClass.getAnnotatedFields(ClassParameter.class).get(0), testClass);
+            validateWidth(classParameterSetList);
+        }
+
+        Class<? extends ParameterizedRunnerDelegate> runnerDelegateClass =
+                getRunnerDelegateClass(testClass);
+        ParameterizedRunnerDelegateFactory factory = new ParameterizedRunnerDelegateFactory();
+        List<Runner> runnersForTestClass = new ArrayList<>();
+        for (ParameterSet classParameterSet : classParameterSetList) {
+            BlockJUnit4ClassRunner runner = (BlockJUnit4ClassRunner) factory.createRunner(
+                    testClass, classParameterSet, runnerDelegateClass);
+            runnersForTestClass.add(runner);
+        }
+        return runnersForTestClass;
+    }
+
+    /**
+     * Return an unmodifiable list of ParameterSet through a FrameworkField
+     */
+    private static List<ParameterSet> getParameterSetList(FrameworkField field, TestClass testClass)
+            throws IllegalAccessException {
+        field.getField().setAccessible(true);
+        if (!Modifier.isStatic(field.getField().getModifiers())) {
+            throw new IllegalParameterArgumentException(String.format(Locale.getDefault(),
+                    "ParameterSetList fields must be static, this field %s in %s is not",
+                    field.getName(), testClass.getName()));
+        }
+        if (!(field.get(testClass.getJavaClass()) instanceof List)) {
+            throw new IllegalArgumentException(String.format(Locale.getDefault(),
+                    "Fields with @ClassParameter annotations must be an instance of List, "
+                            + "this field %s in %s is not list",
+                    field.getName(), testClass.getName()));
+        }
+        @SuppressWarnings("unchecked") // checked above
+        List<ParameterSet> result = (List<ParameterSet>) field.get(testClass.getJavaClass());
+        return Collections.unmodifiableList(result);
+    }
+
+    static void validateWidth(Iterable<ParameterSet> parameterSetList) {
+        int lastSize = -1;
+        for (ParameterSet set : parameterSetList) {
+            if (set.size() == 0) {
+                throw new IllegalParameterArgumentException(
+                        "No parameter is added to method ParameterSet");
+            }
+            if (lastSize == -1 || set.size() == lastSize) {
+                lastSize = set.size();
+            } else {
+                throw new IllegalParameterArgumentException(String.format(Locale.getDefault(),
+                        "All ParameterSets in a list of ParameterSet must have equal"
+                                + " length. The current ParameterSet (%s) contains %d parameters,"
+                                + " while previous ParameterSet contains %d parameters",
+                        Arrays.toString(set.getValues().toArray()), set.size(), lastSize));
+            }
+        }
+    }
+
+    /**
+     * Get the runner delegate class for the test class if {@code @UseRunnerDelegate} is used.
+     * The default runner delegate is BaseJUnit4RunnerDelegate.class
+     */
+    private static Class<? extends ParameterizedRunnerDelegate> getRunnerDelegateClass(
+            TestClass testClass) {
+        if (testClass.getAnnotation(UseRunnerDelegate.class) != null) {
+            return testClass.getAnnotation(UseRunnerDelegate.class).value();
+        }
+        return BaseJUnit4RunnerDelegate.class;
+    }
+
+    static class IllegalParameterArgumentException extends IllegalArgumentException {
+        IllegalParameterArgumentException(String msg) {
+            super(msg);
+        }
+    }
+
+    public static class ParameterizedTestInstantiationException extends Exception {
+        ParameterizedTestInstantiationException(
+                TestClass testClass, String parameterSetString, Exception e) {
+            super(String.format(
+                          "Test class %s can not be initiated, the provided parameters are %s,"
+                                  + " the required parameter types are %s",
+                          testClass.getJavaClass().toString(), parameterSetString,
+                          Arrays.toString(testClass.getOnlyConstructor().getParameterTypes())),
+                    e);
+        }
+    }
+}
diff --git a/base/test/android/javatests/src/org/chromium/base/test/params/ParameterizedRunnerDelegate.java b/base/test/android/javatests/src/org/chromium/base/test/params/ParameterizedRunnerDelegate.java
new file mode 100644
index 0000000..d3698a9
--- /dev/null
+++ b/base/test/android/javatests/src/org/chromium/base/test/params/ParameterizedRunnerDelegate.java
@@ -0,0 +1,36 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package org.chromium.base.test.params;
+
+import org.junit.runners.model.FrameworkMethod;
+
+import org.chromium.base.test.params.ParameterizedRunner.ParameterizedTestInstantiationException;
+
+import java.util.List;
+
+/**
+ * This interface defines the methods that needs to be overriden for a Runner to
+ * be used by ParameterizedRunner to generate individual runners for parameters.
+ *
+ * To create a ParameterizedRunnerDelegate, extends from any BlockJUnit4Runner
+ * children class. You can copy all the implementation from
+ * org.chromium.base.test.params.BaseJUnit4RunnerDelegate.
+ */
+public interface ParameterizedRunnerDelegate {
+    /**
+     * Override to use DelegateCommon's implementation
+     */
+    void collectInitializationErrors(List<Throwable> errors);
+
+    /**
+     * Override to use DelegateCommon's implementation
+     */
+    List<FrameworkMethod> computeTestMethods();
+
+    /**
+     * Override to use DelegateCommon's implementation
+     */
+    Object createTest() throws ParameterizedTestInstantiationException;
+}
diff --git a/base/test/android/javatests/src/org/chromium/base/test/params/ParameterizedRunnerDelegateCommon.java b/base/test/android/javatests/src/org/chromium/base/test/params/ParameterizedRunnerDelegateCommon.java
new file mode 100644
index 0000000..f25e2b2
--- /dev/null
+++ b/base/test/android/javatests/src/org/chromium/base/test/params/ParameterizedRunnerDelegateCommon.java
@@ -0,0 +1,69 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package org.chromium.base.test.params;
+
+import org.junit.runners.model.FrameworkMethod;
+import org.junit.runners.model.TestClass;
+
+import org.chromium.base.test.params.ParameterizedRunner.ParameterizedTestInstantiationException;
+
+import java.lang.reflect.InvocationTargetException;
+import java.util.List;
+
+/**
+ * Parameterized runner delegate common that implements method that needed to be
+ * delegated for parameterization purposes
+ */
+public final class ParameterizedRunnerDelegateCommon {
+    private final TestClass mTestClass;
+    private final ParameterSet mClassParameterSet;
+    private final List<FrameworkMethod> mParameterizedFrameworkMethodList;
+
+    public ParameterizedRunnerDelegateCommon(TestClass testClass, ParameterSet classParameterSet,
+            List<FrameworkMethod> parameterizedFrameworkMethods) {
+        mTestClass = testClass;
+        mClassParameterSet = classParameterSet;
+        mParameterizedFrameworkMethodList = parameterizedFrameworkMethods;
+    }
+
+    /**
+     * Do not do any validation here because running the default class runner's
+     * collectInitializationErrors fail due to the overridden computeTestMethod relying on a local
+     * member variable
+     *
+     * The validation needed for parameterized tests is already done by ParameterizedRunner.
+     */
+    public static void collectInitializationErrors(
+            @SuppressWarnings("unused") List<Throwable> errors) {}
+
+    public List<FrameworkMethod> computeTestMethods() {
+        return mParameterizedFrameworkMethodList;
+    }
+
+    private void throwInstantiationException(Exception e)
+            throws ParameterizedTestInstantiationException {
+        String parameterSetString =
+                mClassParameterSet == null ? "null" : mClassParameterSet.toString();
+        throw new ParameterizedTestInstantiationException(mTestClass, parameterSetString, e);
+    }
+
+    public Object createTest() throws ParameterizedTestInstantiationException {
+        try {
+            if (mClassParameterSet == null) {
+                return mTestClass.getOnlyConstructor().newInstance();
+            }
+            return mTestClass.getOnlyConstructor().newInstance(
+                    mClassParameterSet.getValues().toArray());
+        } catch (InstantiationException e) {
+            throwInstantiationException(e);
+        } catch (IllegalAccessException e) {
+            throwInstantiationException(e);
+        } catch (InvocationTargetException e) {
+            throwInstantiationException(e);
+        }
+        assert false;
+        return null;
+    }
+}
diff --git a/base/test/android/javatests/src/org/chromium/base/test/params/ParameterizedRunnerDelegateFactory.java b/base/test/android/javatests/src/org/chromium/base/test/params/ParameterizedRunnerDelegateFactory.java
new file mode 100644
index 0000000..f829981
--- /dev/null
+++ b/base/test/android/javatests/src/org/chromium/base/test/params/ParameterizedRunnerDelegateFactory.java
@@ -0,0 +1,115 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package org.chromium.base.test.params;
+
+import org.junit.Test;
+import org.junit.runners.model.FrameworkMethod;
+import org.junit.runners.model.TestClass;
+
+import org.chromium.base.test.params.ParameterAnnotations.UseMethodParameter;
+
+import java.lang.reflect.InvocationTargetException;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.List;
+
+/**
+ * Factory to generate delegate class runners for ParameterizedRunner
+ */
+public class ParameterizedRunnerDelegateFactory {
+    /**
+     * Create a runner that implements ParameterizedRunner and extends BlockJUnit4ClassRunner
+     *
+     * @param testClass the TestClass object for current test class
+     * @param classParameterSet A parameter set for test constructor arguments
+     * @param parameterizedRunnerDelegateClass the parameterized runner delegate class specified
+     *                                         through {@code @UseRunnerDelegate}
+     */
+    <T extends ParameterizedRunnerDelegate> T createRunner(TestClass testClass,
+            ParameterSet classParameterSet, Class<T> parameterizedRunnerDelegateClass)
+            throws ParameterizedRunnerDelegateInstantiationException {
+        String testMethodPostfix = classParameterSet == null ? null : classParameterSet.getName();
+        List<FrameworkMethod> unmodifiableFrameworkMethodList =
+                generateUnmodifiableFrameworkMethodList(testClass, testMethodPostfix);
+        ParameterizedRunnerDelegateCommon delegateCommon = new ParameterizedRunnerDelegateCommon(
+                testClass, classParameterSet, unmodifiableFrameworkMethodList);
+        try {
+            return parameterizedRunnerDelegateClass
+                    .getDeclaredConstructor(Class.class, ParameterizedRunnerDelegateCommon.class)
+                    .newInstance(testClass.getJavaClass(), delegateCommon);
+        } catch (Exception e) {
+            throw new ParameterizedRunnerDelegateInstantiationException(
+                    parameterizedRunnerDelegateClass.toString(), e);
+        }
+    }
+
+    /**
+     * Match test methods annotated by @UseMethodParameter(X) with
+     * ParameterSetList annotated by @MethodParameter(X)
+     *
+     * @param testClass a {@code TestClass} that wraps around the actual java
+     *            test class
+     * @param postFix a name postfix for each test
+     * @return a list of ParameterizedFrameworkMethod
+     */
+    static List<FrameworkMethod> generateUnmodifiableFrameworkMethodList(
+            TestClass testClass, String postFix) {
+        // Represent the list of all ParameterizedFrameworkMethod in this test class
+        List<FrameworkMethod> returnList = new ArrayList<>();
+
+        for (FrameworkMethod method : testClass.getAnnotatedMethods(Test.class)) {
+            if (method.getMethod().isAnnotationPresent(UseMethodParameter.class)) {
+                Iterable<ParameterSet> parameterSets =
+                        getParameters(method.getAnnotation(UseMethodParameter.class).value());
+                returnList.addAll(createParameterizedMethods(method, parameterSets, postFix));
+            } else {
+                // If test method is not parameterized (does not have UseMethodParameter annotation)
+                returnList.add(new ParameterizedFrameworkMethod(method.getMethod(), null, postFix));
+            }
+        }
+
+        return Collections.unmodifiableList(returnList);
+    }
+
+    /**
+     * Exception caused by instantiating the provided Runner delegate
+     * Potentially caused by not overriding collecInitializationErrors() method
+     * to be empty
+     */
+    public static class ParameterizedRunnerDelegateInstantiationException extends Exception {
+        private ParameterizedRunnerDelegateInstantiationException(
+                String runnerDelegateClass, Exception e) {
+            super(String.format("Current class runner delegate %s can not be instantiated.",
+                          runnerDelegateClass),
+                    e);
+        }
+    }
+
+    private static Iterable<ParameterSet> getParameters(Class<? extends ParameterProvider> clazz) {
+        ParameterProvider parameterProvider;
+        try {
+            parameterProvider = clazz.getDeclaredConstructor().newInstance();
+        } catch (IllegalAccessException e) {
+            throw new IllegalStateException("Failed instantiating " + clazz.getCanonicalName(), e);
+        } catch (InstantiationException e) {
+            throw new IllegalStateException("Failed instantiating " + clazz.getCanonicalName(), e);
+        } catch (NoSuchMethodException e) {
+            throw new IllegalStateException("Failed instantiating " + clazz.getCanonicalName(), e);
+        } catch (InvocationTargetException e) {
+            throw new IllegalStateException("Failed instantiating " + clazz.getCanonicalName(), e);
+        }
+        return parameterProvider.getParameters();
+    }
+
+    private static List<FrameworkMethod> createParameterizedMethods(
+            FrameworkMethod baseMethod, Iterable<ParameterSet> parameterSetList, String suffix) {
+        ParameterizedRunner.validateWidth(parameterSetList);
+        List<FrameworkMethod> returnList = new ArrayList<>();
+        for (ParameterSet set : parameterSetList) {
+            returnList.add(new ParameterizedFrameworkMethod(baseMethod.getMethod(), set, suffix));
+        }
+        return returnList;
+    }
+}
diff --git a/base/test/android/javatests/src/org/chromium/base/test/util/AdvancedMockContext.java b/base/test/android/javatests/src/org/chromium/base/test/util/AdvancedMockContext.java
new file mode 100644
index 0000000..c8117f7
--- /dev/null
+++ b/base/test/android/javatests/src/org/chromium/base/test/util/AdvancedMockContext.java
@@ -0,0 +1,118 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package org.chromium.base.test.util;
+
+import android.content.ComponentCallbacks;
+import android.content.ContentResolver;
+import android.content.Context;
+import android.content.ContextWrapper;
+import android.content.SharedPreferences;
+import android.test.mock.MockContentResolver;
+import android.test.mock.MockContext;
+
+import java.util.HashMap;
+import java.util.Map;
+
+/**
+ * ContextWrapper that adds functionality for SharedPreferences and a way to set and retrieve flags.
+ */
+public class AdvancedMockContext extends ContextWrapper {
+
+    private final MockContentResolver mMockContentResolver = new MockContentResolver();
+
+    private final Map<String, SharedPreferences> mSharedPreferences =
+            new HashMap<String, SharedPreferences>();
+
+    private final Map<String, Boolean> mFlags = new HashMap<String, Boolean>();
+
+    public AdvancedMockContext(Context base) {
+        super(base);
+    }
+
+    public AdvancedMockContext() {
+        super(new MockContext());
+    }
+
+    @Override
+    public String getPackageName() {
+        return getBaseContext().getPackageName();
+    }
+
+    @Override
+    public Context getApplicationContext() {
+        return this;
+    }
+
+    @Override
+    public ContentResolver getContentResolver() {
+        return mMockContentResolver;
+    }
+
+    public MockContentResolver getMockContentResolver() {
+        return mMockContentResolver;
+    }
+
+    @Override
+    public SharedPreferences getSharedPreferences(String name, int mode) {
+        synchronized (mSharedPreferences) {
+            if (!mSharedPreferences.containsKey(name)) {
+                // Auto-create shared preferences to mimic Android Context behavior
+                mSharedPreferences.put(name, new InMemorySharedPreferences());
+            }
+            return mSharedPreferences.get(name);
+        }
+    }
+
+    @Override
+    public void registerComponentCallbacks(ComponentCallbacks callback) {
+        getBaseContext().registerComponentCallbacks(callback);
+    }
+
+    @Override
+    public void unregisterComponentCallbacks(ComponentCallbacks callback) {
+        getBaseContext().unregisterComponentCallbacks(callback);
+    }
+
+    public void addSharedPreferences(String name, Map<String, Object> data) {
+        synchronized (mSharedPreferences) {
+            mSharedPreferences.put(name, new InMemorySharedPreferences(data));
+        }
+    }
+
+    public void setFlag(String key) {
+        mFlags.put(key, true);
+    }
+
+    public void clearFlag(String key) {
+        mFlags.remove(key);
+    }
+
+    public boolean isFlagSet(String key) {
+        return mFlags.containsKey(key) && mFlags.get(key);
+    }
+
+    /**
+     * Builder for maps of type Map<String, Object> to be used with
+     * {@link #addSharedPreferences(String, java.util.Map)}.
+     */
+    public static class MapBuilder {
+
+        private final Map<String, Object> mData = new HashMap<String, Object>();
+
+        public static MapBuilder create() {
+            return new MapBuilder();
+        }
+
+        public MapBuilder add(String key, Object value) {
+            mData.put(key, value);
+            return this;
+        }
+
+        public Map<String, Object> build() {
+            return mData;
+        }
+
+    }
+}
diff --git a/base/test/android/javatests/src/org/chromium/base/test/util/AnnotationProcessingUtils.java b/base/test/android/javatests/src/org/chromium/base/test/util/AnnotationProcessingUtils.java
new file mode 100644
index 0000000..d335412
--- /dev/null
+++ b/base/test/android/javatests/src/org/chromium/base/test/util/AnnotationProcessingUtils.java
@@ -0,0 +1,259 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package org.chromium.base.test.util;
+
+import android.support.annotation.Nullable;
+
+import org.junit.runner.Description;
+
+import org.chromium.base.VisibleForTesting;
+
+import java.lang.annotation.Annotation;
+import java.lang.reflect.AnnotatedElement;
+import java.lang.reflect.Method;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.Comparator;
+import java.util.HashSet;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Queue;
+import java.util.Set;
+
+/**
+ * Utility class to help with processing annotations, going around the code to collect them, etc.
+ */
+public abstract class AnnotationProcessingUtils {
+    /**
+     * Returns the closest instance of the requested annotation or null if there is none.
+     * See {@link AnnotationExtractor} for context of "closest".
+     */
+    @SuppressWarnings("unchecked")
+    public static <A extends Annotation> A getAnnotation(Description description, Class<A> clazz) {
+        AnnotationExtractor extractor = new AnnotationExtractor(clazz);
+        return (A) extractor.getClosest(extractor.getMatchingAnnotations(description));
+    }
+
+    /**
+     * Returns the closest instance of the requested annotation or null if there is none.
+     * See {@link AnnotationExtractor} for context of "closest".
+     */
+    @SuppressWarnings("unchecked")
+    public static <A extends Annotation> A getAnnotation(AnnotatedElement element, Class<A> clazz) {
+        AnnotationExtractor extractor = new AnnotationExtractor(clazz);
+        return (A) extractor.getClosest(extractor.getMatchingAnnotations(element));
+    }
+
+    /** See {@link AnnotationExtractor} for details about the output sorting order. */
+    @SuppressWarnings("unchecked")
+    public static <A extends Annotation> List<A> getAnnotations(
+            Description description, Class<A> annotationType) {
+        return (List<A>) new AnnotationExtractor(annotationType)
+                .getMatchingAnnotations(description);
+    }
+
+    /** See {@link AnnotationExtractor} for details about the output sorting order. */
+    @SuppressWarnings("unchecked")
+    public static <A extends Annotation> List<A> getAnnotations(
+            AnnotatedElement annotatedElement, Class<A> annotationType) {
+        return (List<A>) new AnnotationExtractor(annotationType)
+                .getMatchingAnnotations(annotatedElement);
+    }
+
+    private static boolean isChromiumAnnotation(Annotation annotation) {
+        Package pkg = annotation.annotationType().getPackage();
+        return pkg != null && pkg.getName().startsWith("org.chromium");
+    }
+
+    /**
+     * Processes various types of annotated elements ({@link Class}es, {@link Annotation}s,
+     * {@link Description}s, etc.) and extracts the targeted annotations from it. The output will be
+     * sorted in BFS-like order.
+     *
+     * For example, for a method we would get in reverse order:
+     * - the method annotations
+     * - the meta-annotations present on the method annotations,
+     * - the class annotations
+     * - the meta-annotations present on the class annotations,
+     * - the annotations present on the super class,
+     * - the meta-annotations present on the super class annotations,
+     * - etc.
+     *
+     * When multiple annotations are targeted, if more than one is picked up at a given level (for
+     * example directly on the method), they will be returned in the reverse order that they were
+     * provided to the constructor.
+     *
+     * Note: We return the annotations in reverse order because we assume that if some processing
+     * is going to be made on related annotations, the later annotations would likely override
+     * modifications made by the former.
+     *
+     * Note: While resolving meta annotations, we don't expand the explorations to annotations types
+     * that have already been visited. Please file a bug and assign to dgn@ if you think it caused
+     * an issue.
+     */
+    public static class AnnotationExtractor {
+        private final List<Class<? extends Annotation>> mAnnotationTypes;
+        private final Comparator<Class<? extends Annotation>> mAnnotationTypeComparator;
+        private final Comparator<Annotation> mAnnotationComparator;
+
+        @SafeVarargs
+        public AnnotationExtractor(Class<? extends Annotation>... additionalTypes) {
+            this(Arrays.asList(additionalTypes));
+        }
+
+        public AnnotationExtractor(List<Class<? extends Annotation>> additionalTypes) {
+            assert !additionalTypes.isEmpty();
+            mAnnotationTypes = Collections.unmodifiableList(additionalTypes);
+            mAnnotationTypeComparator =
+                    (t1, t2) -> mAnnotationTypes.indexOf(t1) - mAnnotationTypes.indexOf(t2);
+            mAnnotationComparator = (t1, t2)
+                    -> mAnnotationTypeComparator.compare(t1.annotationType(), t2.annotationType());
+        }
+
+        public List<Annotation> getMatchingAnnotations(Description description) {
+            return getMatchingAnnotations(new AnnotatedNode.DescriptionNode(description));
+        }
+
+        public List<Annotation> getMatchingAnnotations(AnnotatedElement annotatedElement) {
+            AnnotatedNode annotatedNode;
+            if (annotatedElement instanceof Method) {
+                annotatedNode = new AnnotatedNode.MethodNode((Method) annotatedElement);
+            } else if (annotatedElement instanceof Class) {
+                annotatedNode = new AnnotatedNode.ClassNode((Class) annotatedElement);
+            } else {
+                throw new IllegalArgumentException("Unsupported type for " + annotatedElement);
+            }
+
+            return getMatchingAnnotations(annotatedNode);
+        }
+
+        /**
+         * For a given list obtained from the extractor, returns the {@link Annotation} that would
+         * be closest from the extraction point, or {@code null} if the list is empty.
+         */
+        @Nullable
+        public Annotation getClosest(List<Annotation> annotationList) {
+            return annotationList.isEmpty() ? null : annotationList.get(annotationList.size() - 1);
+        }
+
+        @VisibleForTesting
+        Comparator<Class<? extends Annotation>> getTypeComparator() {
+            return mAnnotationTypeComparator;
+        }
+
+        private List<Annotation> getMatchingAnnotations(AnnotatedNode annotatedNode) {
+            List<Annotation> collectedAnnotations = new ArrayList<>();
+            Queue<Annotation> workingSet = new LinkedList<>();
+            Set<Class<? extends Annotation>> visited = new HashSet<>();
+
+            AnnotatedNode currentAnnotationLayer = annotatedNode;
+            while (currentAnnotationLayer != null) {
+                queueAnnotations(currentAnnotationLayer.getAnnotations(), workingSet);
+
+                while (!workingSet.isEmpty()) {
+                    sweepAnnotations(collectedAnnotations, workingSet, visited);
+                }
+
+                currentAnnotationLayer = currentAnnotationLayer.getParent();
+            }
+
+            return collectedAnnotations;
+        }
+
+        private void queueAnnotations(List<Annotation> annotations, Queue<Annotation> workingSet) {
+            Collections.sort(annotations, mAnnotationComparator);
+            workingSet.addAll(annotations);
+        }
+
+        private void sweepAnnotations(List<Annotation> collectedAnnotations,
+                Queue<Annotation> workingSet, Set<Class<? extends Annotation>> visited) {
+            // 1. Grab node at the front of the working set.
+            Annotation annotation = workingSet.remove();
+
+            // 2. If it's an annotation of interest, put it aside for the output.
+            if (mAnnotationTypes.contains(annotation.annotationType())) {
+                collectedAnnotations.add(0, annotation);
+            }
+
+            // 3. Check if we can get skip some redundant iterations and avoid cycles.
+            if (!visited.add(annotation.annotationType())) return;
+            if (!isChromiumAnnotation(annotation)) return;
+
+            // 4. Expand the working set
+            queueAnnotations(Arrays.asList(annotation.annotationType().getDeclaredAnnotations()),
+                    workingSet);
+        }
+    }
+
+    /**
+     * Abstraction to hide differences between Class, Method and Description with regards to their
+     * annotations and what should be analyzed next.
+     */
+    private static abstract class AnnotatedNode {
+        @Nullable
+        abstract AnnotatedNode getParent();
+
+        abstract List<Annotation> getAnnotations();
+
+        static class DescriptionNode extends AnnotatedNode {
+            final Description mDescription;
+
+            DescriptionNode(Description description) {
+                mDescription = description;
+            }
+
+            @Nullable
+            @Override
+            AnnotatedNode getParent() {
+                return new ClassNode(mDescription.getTestClass());
+            }
+
+            @Override
+            List<Annotation> getAnnotations() {
+                return new ArrayList<>(mDescription.getAnnotations());
+            }
+        }
+
+        static class ClassNode extends AnnotatedNode {
+            final Class<?> mClass;
+
+            ClassNode(Class<?> clazz) {
+                mClass = clazz;
+            }
+
+            @Nullable
+            @Override
+            AnnotatedNode getParent() {
+                Class<?> superClass = mClass.getSuperclass();
+                return superClass == null ? null : new ClassNode(superClass);
+            }
+
+            @Override
+            List<Annotation> getAnnotations() {
+                return Arrays.asList(mClass.getDeclaredAnnotations());
+            }
+        }
+
+        static class MethodNode extends AnnotatedNode {
+            final Method mMethod;
+
+            MethodNode(Method method) {
+                mMethod = method;
+            }
+
+            @Nullable
+            @Override
+            AnnotatedNode getParent() {
+                return new ClassNode(mMethod.getDeclaringClass());
+            }
+
+            @Override
+            List<Annotation> getAnnotations() {
+                return Arrays.asList(mMethod.getDeclaredAnnotations());
+            }
+        }
+    }
+}
diff --git a/base/test/android/javatests/src/org/chromium/base/test/util/AnnotationRule.java b/base/test/android/javatests/src/org/chromium/base/test/util/AnnotationRule.java
new file mode 100644
index 0000000..a361ac3
--- /dev/null
+++ b/base/test/android/javatests/src/org/chromium/base/test/util/AnnotationRule.java
@@ -0,0 +1,139 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package org.chromium.base.test.util;
+
+import android.support.annotation.CallSuper;
+import android.support.annotation.Nullable;
+
+import org.junit.rules.ExternalResource;
+import org.junit.runner.Description;
+import org.junit.runners.model.Statement;
+
+import java.lang.annotation.Annotation;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.List;
+import java.util.ListIterator;
+
+/**
+ * Test rule that collects specific annotations to help with test set up and tear down. It is set up
+ * with a list of annotations to look for and exposes the ones picked up on the test through
+ * {@link #getAnnotations()} and related methods.
+ *
+ * Note: The rule always apply, whether it picked up annotations or not.
+ *
+ * Usage:
+ *
+ * <pre>
+ * public class Test {
+ *    &#64;Rule
+ *    public AnnotationRule rule = new AnnotationRule(Foo.class) {
+ *          &#64;Override
+ *          protected void before() { ... }
+ *
+ *          &#64;Override
+ *          protected void after() { ... }
+ *    };
+ *
+ *    &#64;Test
+ *    &#64;Foo
+ *    public void myTest() { ... }
+ * }
+ * </pre>
+ *
+ * It can also be used to trigger for multiple annotations:
+ *
+ * <pre>
+ * &#64;DisableFoo
+ * public class Test {
+ *    &#64;Rule
+ *    public AnnotationRule rule = new AnnotationRule(EnableFoo.class, DisableFoo.class) {
+ *          &#64;Override
+ *          protected void before() {
+ *            // Loops through all the picked up annotations. For myTest(), it would process
+ *            // DisableFoo first, then EnableFoo.
+ *            for (Annotation annotation : getAnnotations()) {
+ *                if (annotation instanceof EnableFoo) { ... }
+ *                else if (annotation instanceof DisableFoo) { ... }
+ *            }
+ *          }
+ *
+ *          &#64;Override
+ *          protected void after() {
+ *            // For myTest(), would return EnableFoo as it's directly set on the method.
+ *            Annotation a = getClosestAnnotation();
+ *            ...
+ *          }
+ *    };
+ *
+ *    &#64;Test
+ *    &#64;EnableFoo
+ *    public void myTest() { ... }
+ * }
+ * </pre>
+ *
+ * @see AnnotationProcessingUtils.AnnotationExtractor
+ */
+public abstract class AnnotationRule extends ExternalResource {
+    private final AnnotationProcessingUtils.AnnotationExtractor mAnnotationExtractor;
+    private List<Annotation> mCollectedAnnotations;
+    private Description mTestDescription;
+
+    @SafeVarargs
+    public AnnotationRule(Class<? extends Annotation> firstAnnotationType,
+            Class<? extends Annotation>... additionalTypes) {
+        List<Class<? extends Annotation>> mAnnotationTypes = new ArrayList<>();
+        mAnnotationTypes.add(firstAnnotationType);
+        mAnnotationTypes.addAll(Arrays.asList(additionalTypes));
+        mAnnotationExtractor = new AnnotationProcessingUtils.AnnotationExtractor(mAnnotationTypes);
+    }
+
+    @CallSuper
+    @Override
+    public Statement apply(Statement base, Description description) {
+        mTestDescription = description;
+
+        mCollectedAnnotations = mAnnotationExtractor.getMatchingAnnotations(description);
+
+        // Return the wrapped statement to execute before() and after().
+        return super.apply(base, description);
+    }
+
+    /** @return {@link Description} of the current test. */
+    protected Description getTestDescription() {
+        return mTestDescription;
+    }
+
+    /**
+     * @return The collected annotations that match the declared type(s).
+     * @throws NullPointerException if this is called before annotations have been collected,
+     * which happens when the rule is applied to the {@link Statement}.
+     */
+    protected List<Annotation> getAnnotations() {
+        return Collections.unmodifiableList(mCollectedAnnotations);
+    }
+
+    /**
+     * @return The closest annotation matching the provided type, or {@code null} if there is none.
+     */
+    @SuppressWarnings("unchecked")
+    protected @Nullable <A extends Annotation> A getAnnotation(Class<A> annnotationType) {
+        ListIterator<Annotation> iteratorFromEnd =
+                mCollectedAnnotations.listIterator(mCollectedAnnotations.size());
+        while (iteratorFromEnd.hasPrevious()) {
+            Annotation annotation = iteratorFromEnd.previous();
+            if (annnotationType.isAssignableFrom(annotation.annotationType())) {
+                return (A) annotation;
+            }
+        }
+        return null;
+    }
+
+    protected @Nullable Annotation getClosestAnnotation() {
+        if (mCollectedAnnotations.isEmpty()) return null;
+        return mCollectedAnnotations.get(mCollectedAnnotations.size() - 1);
+    }
+}
diff --git a/base/test/android/javatests/src/org/chromium/base/test/util/CallbackHelper.java b/base/test/android/javatests/src/org/chromium/base/test/util/CallbackHelper.java
new file mode 100644
index 0000000..bf064c4
--- /dev/null
+++ b/base/test/android/javatests/src/org/chromium/base/test/util/CallbackHelper.java
@@ -0,0 +1,252 @@
+// Copyright 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package org.chromium.base.test.util;
+
+import static org.chromium.base.test.util.ScalableTimeout.scaleTimeout;
+
+import org.junit.Assert;
+
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.TimeoutException;
+
+/**
+ * A helper class that encapsulates listening and blocking for callbacks.
+ *
+ * Sample usage:
+ *
+ * // Let us assume that this interface is defined by some piece of production code and is used
+ * // to communicate events that occur in that piece of code. Let us further assume that the
+ * // production code runs on the main thread test code runs on a separate test thread.
+ * // An instance that implements this interface would be injected by test code to ensure that the
+ * // methods are being called on another thread.
+ * interface Delegate {
+ *     void onOperationFailed(String errorMessage);
+ *     void onDataPersisted();
+ * }
+ *
+ * // This is the inner class you'd write in your test case to later inject into the production
+ * // code.
+ * class TestDelegate implements Delegate {
+ *     // This is the preferred way to create a helper that stores the parameters it receives
+ *     // when called by production code.
+ *     public static class OnOperationFailedHelper extends CallbackHelper {
+ *         private String mErrorMessage;
+ *
+ *         public void getErrorMessage() {
+ *             assert getCallCount() > 0;
+ *             return mErrorMessage;
+ *         }
+ *
+ *         public void notifyCalled(String errorMessage) {
+ *             mErrorMessage = errorMessage;
+ *             // It's important to call this after all parameter assignments.
+ *             notifyCalled();
+ *         }
+ *     }
+ *
+ *     // There should be one CallbackHelper instance per method.
+ *     private OnOperationFailedHelper mOnOperationFailedHelper;
+ *     private CallbackHelper mOnDataPersistedHelper;
+ *
+ *     public OnOperationFailedHelper getOnOperationFailedHelper() {
+ *         return mOnOperationFailedHelper;
+ *     }
+ *
+ *     public CallbackHelper getOnDataPersistedHelper() {
+ *         return mOnDataPersistedHelper;
+ *     }
+ *
+ *     @Override
+ *     public void onOperationFailed(String errorMessage) {
+ *         mOnOperationFailedHelper.notifyCalled(errorMessage);
+ *     }
+ *
+ *     @Override
+ *     public void onDataPersisted() {
+ *         mOnDataPersistedHelper.notifyCalled();
+ *     }
+ * }
+ *
+ * // This is a sample test case.
+ * public void testCase() throws Exception {
+ *     // Create the TestDelegate to inject into production code.
+ *     TestDelegate delegate = new TestDelegate();
+ *     // Create the production class instance that is being tested and inject the test delegate.
+ *     CodeUnderTest codeUnderTest = new CodeUnderTest();
+ *     codeUnderTest.setDelegate(delegate);
+ *
+ *     // Typically you'd get the current call count before performing the operation you expect to
+ *     // trigger the callback. There can't be any callbacks 'in flight' at this moment, otherwise
+ *     // the call count is unpredictable and the test will be flaky.
+ *     int onOperationFailedCallCount = delegate.getOnOperationFailedHelper().getCallCount();
+ *     codeUnderTest.doSomethingThatEndsUpCallingOnOperationFailedFromAnotherThread();
+ *     // It's safe to do other stuff here, if needed.
+ *     ....
+ *     // Wait for the callback if it hadn't been called yet, otherwise return immediately. This
+ *     // can throw an exception if the callback doesn't arrive within the timeout.
+ *     delegate.getOnOperationFailedHelper().waitForCallback(onOperationFailedCallCount);
+ *     // Access to method parameters is now safe.
+ *     assertEquals("server error", delegate.getOnOperationFailedHelper().getErrorMessage());
+ *
+ *     // Being able to pass the helper around lets us build methods which encapsulate commonly
+ *     // performed tasks.
+ *     doSomeOperationAndWait(codeUnerTest, delegate.getOnOperationFailedHelper());
+ *
+ *     // The helper can be reused for as many calls as needed, just be sure to get the count each
+ *     // time.
+ *     onOperationFailedCallCount = delegate.getOnOperationFailedHelper().getCallCount();
+ *     codeUnderTest.doSomethingElseButStillFailOnAnotherThread();
+ *     delegate.getOnOperationFailedHelper().waitForCallback(onOperationFailedCallCount);
+ *
+ *     // It is also possible to use more than one helper at a time.
+ *     onOperationFailedCallCount = delegate.getOnOperationFailedHelper().getCallCount();
+ *     int onDataPersistedCallCount = delegate.getOnDataPersistedHelper().getCallCount();
+ *     codeUnderTest.doSomethingThatPersistsDataButFailsInSomeOtherWayOnAnotherThread();
+ *     delegate.getOnDataPersistedHelper().waitForCallback(onDataPersistedCallCount);
+ *     delegate.getOnOperationFailedHelper().waitForCallback(onOperationFailedCallCount);
+ * }
+ *
+ * // Shows how to turn an async operation + completion callback into a synchronous operation.
+ * private void doSomeOperationAndWait(final CodeUnderTest underTest,
+ *         CallbackHelper operationHelper) throws InterruptedException, TimeoutException {
+ *     final int callCount = operationHelper.getCallCount();
+ *     getInstrumentation().runOnMainSync(new Runnable() {
+ *         @Override
+ *         public void run() {
+ *             // This schedules a call to a method on the injected TestDelegate. The TestDelegate
+ *             // implementation will then call operationHelper.notifyCalled().
+ *             underTest.operation();
+ *         }
+ *      });
+ *      operationHelper.waitForCallback(callCount);
+ * }
+ *
+ */
+public class CallbackHelper {
+    /** The default timeout (in seconds) for a callback to wait. */
+    public static final long WAIT_TIMEOUT_SECONDS = scaleTimeout(5);
+
+    private final Object mLock = new Object();
+    private int mCallCount;
+    private String mFailureString;
+
+    /**
+     * Gets the number of times the callback has been called.
+     *
+     * The call count can be used with the waitForCallback() method, indicating a point
+     * in time after which the caller wishes to record calls to the callback.
+     *
+     * In order to wait for a callback caused by X, the call count should be obtained
+     * before X occurs.
+     *
+     * NOTE: any call to the callback that occurs after the call count is obtained
+     * will result in the corresponding wait call to resume execution. The call count
+     * is intended to 'catch' callbacks that occur after X but before waitForCallback()
+     * is called.
+     */
+    public int getCallCount() {
+        synchronized (mLock) {
+            return mCallCount;
+        }
+    }
+
+    /**
+     * Blocks until the callback is called the specified number of
+     * times or throws an exception if we exceeded the specified time frame.
+     *
+     * This will wait for a callback to be called a specified number of times after
+     * the point in time at which the call count was obtained.  The method will return
+     * immediately if a call occurred the specified number of times after the
+     * call count was obtained but before the method was called, otherwise the method will
+     * block until the specified call count is reached.
+     *
+     * @param msg The error message to use if the callback times out.
+     * @param currentCallCount the value obtained by calling getCallCount().
+     * @param numberOfCallsToWaitFor number of calls (counting since
+     *                               currentCallCount was obtained) that we will wait for.
+     * @param timeout timeout value. We will wait the specified amount of time for a single
+     *                callback to occur so the method call may block up to
+     *                <code>numberOfCallsToWaitFor * timeout</code> units.
+     * @param unit timeout unit.
+     * @throws InterruptedException
+     * @throws TimeoutException Thrown if the method times out before onPageFinished is called.
+     */
+    public void waitForCallback(String msg, int currentCallCount, int numberOfCallsToWaitFor,
+            long timeout, TimeUnit unit) throws InterruptedException, TimeoutException {
+        assert mCallCount >= currentCallCount;
+        assert numberOfCallsToWaitFor > 0;
+        synchronized (mLock) {
+            int callCountWhenDoneWaiting = currentCallCount + numberOfCallsToWaitFor;
+            while (callCountWhenDoneWaiting > mCallCount) {
+                int callCountBeforeWait = mCallCount;
+                mLock.wait(unit.toMillis(timeout));
+                if (mFailureString != null) {
+                    String s = mFailureString;
+                    mFailureString = null;
+                    Assert.fail(s);
+                }
+                if (callCountBeforeWait == mCallCount) {
+                    throw new TimeoutException(msg == null ? "waitForCallback timed out!" : msg);
+                }
+            }
+        }
+    }
+
+    /**
+     * @see #waitForCallback(String, int, int, long, TimeUnit)
+     */
+    public void waitForCallback(int currentCallCount, int numberOfCallsToWaitFor, long timeout,
+            TimeUnit unit) throws InterruptedException, TimeoutException {
+        waitForCallback(null, currentCallCount, numberOfCallsToWaitFor, timeout, unit);
+    }
+
+    /**
+     * @see #waitForCallback(String, int, int, long, TimeUnit)
+     */
+    public void waitForCallback(int currentCallCount, int numberOfCallsToWaitFor)
+            throws InterruptedException, TimeoutException {
+        waitForCallback(null, currentCallCount, numberOfCallsToWaitFor,
+                WAIT_TIMEOUT_SECONDS, TimeUnit.SECONDS);
+    }
+
+    /**
+     * @see #waitForCallback(String, int, int, long, TimeUnit)
+     */
+    public void waitForCallback(String msg, int currentCallCount)
+            throws InterruptedException, TimeoutException {
+        waitForCallback(msg, currentCallCount, 1, WAIT_TIMEOUT_SECONDS, TimeUnit.SECONDS);
+    }
+
+    /**
+     * @see #waitForCallback(String, int, int, long, TimeUnit)
+     */
+    public void waitForCallback(int currentCallCount)
+            throws InterruptedException, TimeoutException {
+        waitForCallback(null, currentCallCount, 1, WAIT_TIMEOUT_SECONDS, TimeUnit.SECONDS);
+    }
+
+    /**
+     * Should be called when the callback associated with this helper object is called.
+     */
+    public void notifyCalled() {
+        synchronized (mLock) {
+            mCallCount++;
+            mLock.notifyAll();
+        }
+    }
+
+    /**
+     * Should be called when the callback associated with this helper object wants to
+     * indicate a failure.
+     *
+     * @param s The failure message.
+     */
+    public void notifyFailed(String s) {
+        synchronized (mLock) {
+            mFailureString = s;
+            mLock.notifyAll();
+        }
+    }
+}
diff --git a/base/test/android/javatests/src/org/chromium/base/test/util/CommandLineFlags.java b/base/test/android/javatests/src/org/chromium/base/test/util/CommandLineFlags.java
new file mode 100644
index 0000000..71ef8e9
--- /dev/null
+++ b/base/test/android/javatests/src/org/chromium/base/test/util/CommandLineFlags.java
@@ -0,0 +1,188 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package org.chromium.base.test.util;
+
+import android.text.TextUtils;
+
+import org.junit.Assert;
+import org.junit.Rule;
+
+import org.chromium.base.CommandLine;
+import org.chromium.base.CommandLineInitUtil;
+import org.chromium.base.test.BaseTestResult.PreTestHook;
+
+import java.lang.annotation.ElementType;
+import java.lang.annotation.Inherited;
+import java.lang.annotation.Retention;
+import java.lang.annotation.RetentionPolicy;
+import java.lang.annotation.Target;
+import java.lang.reflect.AnnotatedElement;
+import java.lang.reflect.Field;
+import java.lang.reflect.Method;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Set;
+
+/**
+ * Provides annotations related to command-line flag handling.
+ *
+ * Uses of these annotations on a derived class will take precedence over uses on its base classes,
+ * so a derived class can add a command-line flag that a base class has removed (or vice versa).
+ * Similarly, uses of these annotations on a test method will take precedence over uses on the
+ * containing class.
+ * <p>
+ * These annonations may also be used on Junit4 Rule classes and on their base classes. Note,
+ * however that the annotation processor only looks at the declared type of the Rule, not its actual
+ * type, so in, for example:
+ *
+ * <pre>
+ *     &#64Rule
+ *     TestRule mRule = new ChromeActivityTestRule();
+ * </pre>
+ *
+ * will only look for CommandLineFlags annotations on TestRule, not for CommandLineFlags annotations
+ * on ChromeActivityTestRule.
+ * <p>
+ * In addition a rule may not remove flags added by an independently invoked rule, although it may
+ * remove flags added by its base classes.
+ * <p>
+ * Uses of these annotations on the test class or methods take precedence over uses on Rule classes.
+ * <p>
+ * Note that this class should never be instantiated.
+ */
+public final class CommandLineFlags {
+    private static final String DISABLE_FEATURES = "disable-features";
+    private static final String ENABLE_FEATURES = "enable-features";
+
+    /**
+     * Adds command-line flags to the {@link org.chromium.base.CommandLine} for this test.
+     */
+    @Inherited
+    @Retention(RetentionPolicy.RUNTIME)
+    @Target({ElementType.METHOD, ElementType.TYPE})
+    public @interface Add {
+        String[] value();
+    }
+
+    /**
+     * Removes command-line flags from the {@link org.chromium.base.CommandLine} from this test.
+     *
+     * Note that this can only remove flags added via {@link Add} above.
+     */
+    @Inherited
+    @Retention(RetentionPolicy.RUNTIME)
+    @Target({ElementType.METHOD, ElementType.TYPE})
+    public @interface Remove {
+        String[] value();
+    }
+
+    /**
+     * Sets up the CommandLine with the appropriate flags.
+     *
+     * This will add the difference of the sets of flags specified by {@link CommandLineFlags.Add}
+     * and {@link CommandLineFlags.Remove} to the {@link org.chromium.base.CommandLine}. Note that
+     * trying to remove a flag set externally, i.e. by the command-line flags file, will not work.
+     */
+    public static void setUp(AnnotatedElement element) {
+        CommandLine.reset();
+        CommandLineInitUtil.initCommandLine(getTestCmdLineFile());
+        Set<String> enableFeatures = new HashSet<String>();
+        Set<String> disableFeatures = new HashSet<String>();
+        Set<String> flags = getFlags(element);
+        for (String flag : flags) {
+            String[] parsedFlags = flag.split("=", 2);
+            if (parsedFlags.length == 1) {
+                CommandLine.getInstance().appendSwitch(flag);
+            } else if (ENABLE_FEATURES.equals(parsedFlags[0])) {
+                // We collect enable/disable features flags separately and aggregate them because
+                // they may be specified multiple times, in which case the values will trample each
+                // other.
+                Collections.addAll(enableFeatures, parsedFlags[1].split(","));
+            } else if (DISABLE_FEATURES.equals(parsedFlags[0])) {
+                Collections.addAll(disableFeatures, parsedFlags[1].split(","));
+            } else {
+                CommandLine.getInstance().appendSwitchWithValue(parsedFlags[0], parsedFlags[1]);
+            }
+        }
+
+        if (enableFeatures.size() > 0) {
+            CommandLine.getInstance().appendSwitchWithValue(
+                    ENABLE_FEATURES, TextUtils.join(",", enableFeatures));
+        }
+        if (disableFeatures.size() > 0) {
+            CommandLine.getInstance().appendSwitchWithValue(
+                    DISABLE_FEATURES, TextUtils.join(",", disableFeatures));
+        }
+    }
+
+    private static Set<String> getFlags(AnnotatedElement type) {
+        Set<String> rule_flags = new HashSet<>();
+        updateFlagsForElement(type, rule_flags);
+        return rule_flags;
+    }
+
+    private static void updateFlagsForElement(AnnotatedElement element, Set<String> flags) {
+        if (element instanceof Class<?>) {
+            // Get flags from rules within the class.
+            for (Field field : ((Class<?>) element).getFields()) {
+                if (field.isAnnotationPresent(Rule.class)) {
+                    // The order in which fields are returned is undefined, so, for consistency,
+                    // a rule must not remove a flag added by a different rule. Ensure this by
+                    // initially getting the flags into a new set.
+                    Set<String> rule_flags = getFlags(field.getType());
+                    flags.addAll(rule_flags);
+                }
+            }
+            for (Method method : ((Class<?>) element).getMethods()) {
+                if (method.isAnnotationPresent(Rule.class)) {
+                    // The order in which methods are returned is undefined, so, for consistency,
+                    // a rule must not remove a flag added by a different rule. Ensure this by
+                    // initially getting the flags into a new set.
+                    Set<String> rule_flags = getFlags(method.getReturnType());
+                    flags.addAll(rule_flags);
+                }
+            }
+        }
+
+        // Add the flags from the parent. Override any flags defined by the rules.
+        AnnotatedElement parent = (element instanceof Method)
+                ? ((Method) element).getDeclaringClass()
+                : ((Class<?>) element).getSuperclass();
+        if (parent != null) updateFlagsForElement(parent, flags);
+
+        // Flags on the element itself override all other flag sources.
+        if (element.isAnnotationPresent(CommandLineFlags.Add.class)) {
+            flags.addAll(
+                    Arrays.asList(element.getAnnotation(CommandLineFlags.Add.class).value()));
+        }
+
+        if (element.isAnnotationPresent(CommandLineFlags.Remove.class)) {
+            List<String> flagsToRemove =
+                    Arrays.asList(element.getAnnotation(CommandLineFlags.Remove.class).value());
+            for (String flagToRemove : flagsToRemove) {
+                // If your test fails here, you have tried to remove a command-line flag via
+                // CommandLineFlags.Remove that was loaded into CommandLine via something other
+                // than CommandLineFlags.Add (probably the command-line flag file).
+                Assert.assertFalse("Unable to remove command-line flag \"" + flagToRemove + "\".",
+                        CommandLine.getInstance().hasSwitch(flagToRemove));
+            }
+            flags.removeAll(flagsToRemove);
+        }
+    }
+
+    private CommandLineFlags() {
+        throw new AssertionError("CommandLineFlags is a non-instantiable class");
+    }
+
+    public static PreTestHook getRegistrationHook() {
+        return (targetContext, testMethod) -> CommandLineFlags.setUp(testMethod);
+    }
+
+    public static String getTestCmdLineFile() {
+        return "test-cmdline-file";
+    }
+}
diff --git a/base/test/android/javatests/src/org/chromium/base/test/util/DisableIf.java b/base/test/android/javatests/src/org/chromium/base/test/util/DisableIf.java
new file mode 100644
index 0000000..c0303b6
--- /dev/null
+++ b/base/test/android/javatests/src/org/chromium/base/test/util/DisableIf.java
@@ -0,0 +1,49 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package org.chromium.base.test.util;
+
+import java.lang.annotation.ElementType;
+import java.lang.annotation.Retention;
+import java.lang.annotation.RetentionPolicy;
+import java.lang.annotation.Target;
+
+/**
+ * Annotations to support conditional test disabling.
+ *
+ * These annotations should only be used to disable tests that are temporarily failing
+ * in some configurations. If a test should never run at all in some configurations, use
+ * {@link Restriction}.
+ */
+public class DisableIf {
+
+    /** Conditional disabling based on {@link android.os.Build}.
+     */
+    @Target({ElementType.METHOD, ElementType.TYPE})
+    @Retention(RetentionPolicy.RUNTIME)
+    public static @interface Build {
+        String message() default "";
+
+        int sdk_is_greater_than() default 0;
+        int sdk_is_less_than() default Integer.MAX_VALUE;
+
+        String supported_abis_includes() default "";
+
+        String hardware_is() default "";
+
+        String product_name_includes() default "";
+    }
+
+    @Target({ElementType.METHOD, ElementType.TYPE})
+    @Retention(RetentionPolicy.RUNTIME)
+    public static @interface Device {
+        /**
+         * @return A list of disabled types.
+         */
+        public String[] type();
+    }
+
+    /* Objects of this type should not be created. */
+    private DisableIf() {}
+}
diff --git a/base/test/android/javatests/src/org/chromium/base/test/util/DisableIfSkipCheck.java b/base/test/android/javatests/src/org/chromium/base/test/util/DisableIfSkipCheck.java
new file mode 100644
index 0000000..e46b979
--- /dev/null
+++ b/base/test/android/javatests/src/org/chromium/base/test/util/DisableIfSkipCheck.java
@@ -0,0 +1,84 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package org.chromium.base.test.util;
+
+import android.os.Build;
+
+import org.junit.runners.model.FrameworkMethod;
+
+import org.chromium.base.Log;
+
+import java.util.Arrays;
+
+/**
+ * Checks for conditional disables.
+ *
+ * Currently, this only includes checks against a few {@link android.os.Build} values.
+ */
+public class DisableIfSkipCheck extends SkipCheck {
+
+    private static final String TAG = "cr_base_test";
+
+    @Override
+    public boolean shouldSkip(FrameworkMethod method) {
+        if (method == null) return true;
+        for (DisableIf.Build v : AnnotationProcessingUtils.getAnnotations(
+                     method.getMethod(), DisableIf.Build.class)) {
+            if (abi(v) && hardware(v) && product(v) && sdk(v)) {
+                if (!v.message().isEmpty()) {
+                    Log.i(TAG, "%s is disabled: %s", method.getName(), v.message());
+                }
+                return true;
+            }
+        }
+
+        for (DisableIf.Device d : AnnotationProcessingUtils.getAnnotations(
+                     method.getMethod(), DisableIf.Device.class)) {
+            for (String deviceType : d.type()) {
+                if (deviceTypeApplies(deviceType)) {
+                    Log.i(TAG, "Test " + method.getDeclaringClass().getName() + "#"
+                            + method.getName() + " disabled because of "
+                            + d);
+                    return true;
+                }
+            }
+        }
+
+        return false;
+    }
+
+    @SuppressWarnings("deprecation")
+    private boolean abi(DisableIf.Build v) {
+        if (v.supported_abis_includes().isEmpty()) return true;
+
+        if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.LOLLIPOP) {
+            return Arrays.asList(Build.SUPPORTED_ABIS).contains(
+                    v.supported_abis_includes());
+        } else {
+            return Build.CPU_ABI.equals(v.supported_abis_includes())
+                    || Build.CPU_ABI2.equals(v.supported_abis_includes());
+        }
+    }
+
+    private boolean hardware(DisableIf.Build v) {
+        return v.hardware_is().isEmpty() || Build.HARDWARE.equals(v.hardware_is());
+    }
+
+    private boolean product(DisableIf.Build v) {
+        return v.product_name_includes().isEmpty()
+                || Build.PRODUCT.contains(v.product_name_includes());
+    }
+
+    private boolean sdk(DisableIf.Build v) {
+        return Build.VERSION.SDK_INT > v.sdk_is_greater_than()
+                && Build.VERSION.SDK_INT < v.sdk_is_less_than();
+    }
+
+    protected boolean deviceTypeApplies(String type) {
+        return false;
+    }
+
+}
+
diff --git a/base/test/android/javatests/src/org/chromium/base/test/util/DisabledTest.java b/base/test/android/javatests/src/org/chromium/base/test/util/DisabledTest.java
new file mode 100644
index 0000000..a3e4e8e
--- /dev/null
+++ b/base/test/android/javatests/src/org/chromium/base/test/util/DisabledTest.java
@@ -0,0 +1,22 @@
+// Copyright 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package org.chromium.base.test.util;
+
+import java.lang.annotation.ElementType;
+import java.lang.annotation.Retention;
+import java.lang.annotation.RetentionPolicy;
+import java.lang.annotation.Target;
+
+/**
+ * This annotation is for disabled tests.
+ * <p>
+ * Tests with this annotation will not be run on any of the normal bots.
+ * Please note that they might eventually run on a special bot.
+ */
+@Target({ElementType.METHOD, ElementType.TYPE})
+@Retention(RetentionPolicy.RUNTIME)
+public @interface DisabledTest {
+    String message() default "";
+}
diff --git a/base/test/android/javatests/src/org/chromium/base/test/util/EnormousTest.java b/base/test/android/javatests/src/org/chromium/base/test/util/EnormousTest.java
new file mode 100644
index 0000000..af483ec
--- /dev/null
+++ b/base/test/android/javatests/src/org/chromium/base/test/util/EnormousTest.java
@@ -0,0 +1,24 @@
+// Copyright 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package org.chromium.base.test.util;
+
+import java.lang.annotation.ElementType;
+import java.lang.annotation.Retention;
+import java.lang.annotation.RetentionPolicy;
+import java.lang.annotation.Target;
+
+/**
+ * This annotation is for enormous tests.
+ * <p>
+ * Examples of enormous tests are tests that depend on external web sites or
+ * tests that are long running.
+ * <p>
+ * Such tests are likely NOT reliable enough to run on tree closing bots and
+ * should only be run on FYI bots.
+ */
+@Target(ElementType.METHOD)
+@Retention(RetentionPolicy.RUNTIME)
+public @interface EnormousTest {
+}
diff --git a/base/test/android/javatests/src/org/chromium/base/test/util/Feature.java b/base/test/android/javatests/src/org/chromium/base/test/util/Feature.java
new file mode 100644
index 0000000..1bc9226
--- /dev/null
+++ b/base/test/android/javatests/src/org/chromium/base/test/util/Feature.java
@@ -0,0 +1,29 @@
+// Copyright 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package org.chromium.base.test.util;
+
+import java.lang.annotation.ElementType;
+import java.lang.annotation.Retention;
+import java.lang.annotation.RetentionPolicy;
+import java.lang.annotation.Target;
+
+/**
+ * The java instrumentation tests are normally fairly large (in terms of
+ * dependencies), and the test suite ends up containing a large amount of
+ * tests that are not trivial to filter / group just by their names.
+ * Instead, we use this annotation: each test should be annotated as:
+ *     @Feature({"Foo", "Bar"})
+ * in order for the test runner scripts to be able to filter and group
+ * them accordingly (for instance, this enable us to run all tests that exercise
+ * feature Foo).
+ */
+@Target(ElementType.METHOD)
+@Retention(RetentionPolicy.RUNTIME)
+public @interface Feature {
+    /**
+     * @return A list of feature names.
+     */
+    public String[] value();
+}
diff --git a/base/test/android/javatests/src/org/chromium/base/test/util/FlakyTest.java b/base/test/android/javatests/src/org/chromium/base/test/util/FlakyTest.java
new file mode 100644
index 0000000..83f8e9f
--- /dev/null
+++ b/base/test/android/javatests/src/org/chromium/base/test/util/FlakyTest.java
@@ -0,0 +1,22 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package org.chromium.base.test.util;
+
+import java.lang.annotation.ElementType;
+import java.lang.annotation.Retention;
+import java.lang.annotation.RetentionPolicy;
+import java.lang.annotation.Target;
+
+/**
+ * This annotation is for flaky tests.
+ * <p>
+ * Tests with this annotation will not be run on any of the normal bots.
+ * Please note that they might eventually run on a special bot.
+ */
+@Target({ElementType.METHOD, ElementType.TYPE})
+@Retention(RetentionPolicy.RUNTIME)
+public @interface FlakyTest {
+    String message() default "";
+}
diff --git a/base/test/android/javatests/src/org/chromium/base/test/util/InMemorySharedPreferences.java b/base/test/android/javatests/src/org/chromium/base/test/util/InMemorySharedPreferences.java
new file mode 100644
index 0000000..2587d72
--- /dev/null
+++ b/base/test/android/javatests/src/org/chromium/base/test/util/InMemorySharedPreferences.java
@@ -0,0 +1,238 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package org.chromium.base.test.util;
+
+import android.content.SharedPreferences;
+
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Set;
+
+/**
+ * An implementation of SharedPreferences that can be used in tests.
+ * <p/>
+ * It keeps all state in memory, and there is no difference between apply() and commit().
+ */
+public class InMemorySharedPreferences implements SharedPreferences {
+
+    // Guarded on its own monitor.
+    private final Map<String, Object> mData;
+
+    public InMemorySharedPreferences() {
+        mData = new HashMap<String, Object>();
+    }
+
+    public InMemorySharedPreferences(Map<String, Object> data) {
+        mData = data;
+    }
+
+    @Override
+    public Map<String, ?> getAll() {
+        synchronized (mData) {
+            return Collections.unmodifiableMap(mData);
+        }
+    }
+
+    @Override
+    public String getString(String key, String defValue) {
+        synchronized (mData) {
+            if (mData.containsKey(key)) {
+                return (String) mData.get(key);
+            }
+        }
+        return defValue;
+    }
+
+    @SuppressWarnings("unchecked")
+    @Override
+    public Set<String> getStringSet(String key, Set<String> defValues) {
+        synchronized (mData) {
+            if (mData.containsKey(key)) {
+                return Collections.unmodifiableSet((Set<String>) mData.get(key));
+            }
+        }
+        return defValues;
+    }
+
+    @Override
+    public int getInt(String key, int defValue) {
+        synchronized (mData) {
+            if (mData.containsKey(key)) {
+                return (Integer) mData.get(key);
+            }
+        }
+        return defValue;
+    }
+
+    @Override
+    public long getLong(String key, long defValue) {
+        synchronized (mData) {
+            if (mData.containsKey(key)) {
+                return (Long) mData.get(key);
+            }
+        }
+        return defValue;
+    }
+
+    @Override
+    public float getFloat(String key, float defValue) {
+        synchronized (mData) {
+            if (mData.containsKey(key)) {
+                return (Float) mData.get(key);
+            }
+        }
+        return defValue;
+    }
+
+    @Override
+    public boolean getBoolean(String key, boolean defValue) {
+        synchronized (mData) {
+            if (mData.containsKey(key)) {
+                return (Boolean) mData.get(key);
+            }
+        }
+        return defValue;
+    }
+
+    @Override
+    public boolean contains(String key) {
+        synchronized (mData) {
+            return mData.containsKey(key);
+        }
+    }
+
+    @Override
+    public SharedPreferences.Editor edit() {
+        return new InMemoryEditor();
+    }
+
+    @Override
+    public void registerOnSharedPreferenceChangeListener(
+            SharedPreferences.OnSharedPreferenceChangeListener
+                    listener) {
+        throw new UnsupportedOperationException();
+    }
+
+    @Override
+    public void unregisterOnSharedPreferenceChangeListener(
+            SharedPreferences.OnSharedPreferenceChangeListener listener) {
+        throw new UnsupportedOperationException();
+    }
+
+    private class InMemoryEditor implements SharedPreferences.Editor {
+
+        // All guarded by |mChanges|
+        private boolean mClearCalled;
+        private volatile boolean mApplyCalled;
+        private final Map<String, Object> mChanges = new HashMap<String, Object>();
+
+        @Override
+        public SharedPreferences.Editor putString(String key, String value) {
+            synchronized (mChanges) {
+                if (mApplyCalled) throw new IllegalStateException();
+                mChanges.put(key, value);
+                return this;
+            }
+        }
+
+        @Override
+        public SharedPreferences.Editor putStringSet(String key, Set<String> values) {
+            synchronized (mChanges) {
+                if (mApplyCalled) throw new IllegalStateException();
+                mChanges.put(key, values);
+                return this;
+            }
+        }
+
+        @Override
+        public SharedPreferences.Editor putInt(String key, int value) {
+            synchronized (mChanges) {
+                if (mApplyCalled) throw new IllegalStateException();
+                mChanges.put(key, value);
+                return this;
+            }
+        }
+
+        @Override
+        public SharedPreferences.Editor putLong(String key, long value) {
+            synchronized (mChanges) {
+                if (mApplyCalled) throw new IllegalStateException();
+                mChanges.put(key, value);
+                return this;
+            }
+        }
+
+        @Override
+        public SharedPreferences.Editor putFloat(String key, float value) {
+            synchronized (mChanges) {
+                if (mApplyCalled) throw new IllegalStateException();
+                mChanges.put(key, value);
+                return this;
+            }
+        }
+
+        @Override
+        public SharedPreferences.Editor putBoolean(String key, boolean value) {
+            synchronized (mChanges) {
+                if (mApplyCalled) throw new IllegalStateException();
+                mChanges.put(key, value);
+                return this;
+            }
+        }
+
+        @Override
+        public SharedPreferences.Editor remove(String key) {
+            synchronized (mChanges) {
+                if (mApplyCalled) throw new IllegalStateException();
+                // Magic value for removes
+                mChanges.put(key, this);
+                return this;
+            }
+        }
+
+        @Override
+        public SharedPreferences.Editor clear() {
+            synchronized (mChanges) {
+                if (mApplyCalled) throw new IllegalStateException();
+                mClearCalled = true;
+                return this;
+            }
+        }
+
+        @Override
+        public boolean commit() {
+            apply();
+            return true;
+        }
+
+        @Override
+        public void apply() {
+            synchronized (mData) {
+                synchronized (mChanges) {
+                    if (mApplyCalled) throw new IllegalStateException();
+                    if (mClearCalled) {
+                        mData.clear();
+                    }
+                    for (Map.Entry<String, Object> entry : mChanges.entrySet()) {
+                        String key = entry.getKey();
+                        Object value = entry.getValue();
+                        if (value == this) {
+                            // Special value for removal
+                            mData.remove(key);
+                        } else {
+                            mData.put(key, value);
+                        }
+                    }
+                    // The real shared prefs clears out the temporaries allowing the caller to
+                    // reuse the Editor instance, however this is undocumented behavior and subtle
+                    // to read, so instead we just ban any future use of this instance.
+                    mApplyCalled = true;
+                }
+            }
+        }
+    }
+
+}
diff --git a/base/test/android/javatests/src/org/chromium/base/test/util/InstrumentationUtils.java b/base/test/android/javatests/src/org/chromium/base/test/util/InstrumentationUtils.java
new file mode 100644
index 0000000..20cfd9d
--- /dev/null
+++ b/base/test/android/javatests/src/org/chromium/base/test/util/InstrumentationUtils.java
@@ -0,0 +1,32 @@
+// Copyright 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package org.chromium.base.test.util;
+
+import android.app.Instrumentation;
+
+import java.util.concurrent.Callable;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.FutureTask;
+
+/**
+ * Utility methods built around the android.app.Instrumentation class.
+ */
+public final class InstrumentationUtils {
+
+    private InstrumentationUtils() {
+    }
+
+    public static <R> R runOnMainSyncAndGetResult(Instrumentation instrumentation,
+            Callable<R> callable) throws Throwable {
+        FutureTask<R> task = new FutureTask<R>(callable);
+        instrumentation.runOnMainSync(task);
+        try {
+            return task.get();
+        } catch (ExecutionException e) {
+            // Unwrap the cause of the exception and re-throw it.
+            throw e.getCause();
+        }
+    }
+}
diff --git a/base/test/android/javatests/src/org/chromium/base/test/util/IntegrationTest.java b/base/test/android/javatests/src/org/chromium/base/test/util/IntegrationTest.java
new file mode 100644
index 0000000..8b6550d
--- /dev/null
+++ b/base/test/android/javatests/src/org/chromium/base/test/util/IntegrationTest.java
@@ -0,0 +1,26 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package org.chromium.base.test.util;
+
+import java.lang.annotation.ElementType;
+import java.lang.annotation.Retention;
+import java.lang.annotation.RetentionPolicy;
+import java.lang.annotation.Target;
+
+/**
+ * This annotation is for integration tests.
+ * <p>
+ * Examples of integration tests are tests that rely on real instances of the
+ * application's services and components (e.g. Search) to test the system as
+ * a whole. These tests may use additional command-line flags to configure the
+ * existing backends to use.
+ * <p>
+ * Such tests are likely NOT reliable enough to run on tree closing bots and
+ * should only be run on FYI bots.
+ */
+@Target(ElementType.METHOD)
+@Retention(RetentionPolicy.RUNTIME)
+public @interface IntegrationTest {
+}
\ No newline at end of file
diff --git a/base/test/android/javatests/src/org/chromium/base/test/util/Manual.java b/base/test/android/javatests/src/org/chromium/base/test/util/Manual.java
new file mode 100644
index 0000000..31f3977
--- /dev/null
+++ b/base/test/android/javatests/src/org/chromium/base/test/util/Manual.java
@@ -0,0 +1,21 @@
+// Copyright 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package org.chromium.base.test.util;
+
+import java.lang.annotation.ElementType;
+import java.lang.annotation.Retention;
+import java.lang.annotation.RetentionPolicy;
+import java.lang.annotation.Target;
+
+/**
+ * This annotation can be used to mark a test that should only be run manually.
+ * <p>
+ * Tests with this annotation will not be run on bots, because they take too long
+ * or need manual monitoring.
+ */
+@Target(ElementType.METHOD)
+@Retention(RetentionPolicy.RUNTIME)
+public @interface Manual {
+}
diff --git a/base/test/android/javatests/src/org/chromium/base/test/util/ManualSkipCheck.java b/base/test/android/javatests/src/org/chromium/base/test/util/ManualSkipCheck.java
new file mode 100644
index 0000000..a916bdf
--- /dev/null
+++ b/base/test/android/javatests/src/org/chromium/base/test/util/ManualSkipCheck.java
@@ -0,0 +1,18 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package org.chromium.base.test.util;
+
+import org.junit.runners.model.FrameworkMethod;
+
+/**
+ * Skips any test methods annotated with {@code @}{@link Manual}.
+ */
+public class ManualSkipCheck extends SkipCheck {
+    @Override
+    public boolean shouldSkip(FrameworkMethod testMethod) {
+        return !AnnotationProcessingUtils.getAnnotations(testMethod.getMethod(), Manual.class)
+                        .isEmpty();
+    }
+}
diff --git a/base/test/android/javatests/src/org/chromium/base/test/util/Matchers.java b/base/test/android/javatests/src/org/chromium/base/test/util/Matchers.java
new file mode 100644
index 0000000..fc9d689
--- /dev/null
+++ b/base/test/android/javatests/src/org/chromium/base/test/util/Matchers.java
@@ -0,0 +1,44 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package org.chromium.base.test.util;
+
+import org.hamcrest.CoreMatchers;
+import org.hamcrest.Description;
+import org.hamcrest.Matcher;
+import org.hamcrest.TypeSafeMatcher;
+
+/**
+ * Helper class containing Hamcrest matchers.
+ */
+public class Matchers extends CoreMatchers {
+    private static class GreaterThanOrEqualTo<T extends Comparable<T>>
+            extends TypeSafeMatcher<T> {
+
+        private final T mComparisonValue;
+
+        public GreaterThanOrEqualTo(T comparisonValue) {
+            mComparisonValue = comparisonValue;
+        }
+
+        @Override
+        public void describeTo(Description description) {
+            description.appendText("greater than or equal to ").appendValue(mComparisonValue);
+        }
+
+        @Override
+        protected boolean matchesSafely(T item) {
+            return item.compareTo(mComparisonValue) >= 0;
+        }
+    }
+
+    /**
+     * @param <T> A Comparable type.
+     * @param comparisonValue The value to be compared against.
+     * @return A matcher that expects the value to be greater than the |comparisonValue|.
+     */
+    public static <T extends Comparable<T>> Matcher<T> greaterThanOrEqualTo(T comparisonValue) {
+        return new GreaterThanOrEqualTo<>(comparisonValue);
+    }
+}
diff --git a/base/test/android/javatests/src/org/chromium/base/test/util/MetricsUtils.java b/base/test/android/javatests/src/org/chromium/base/test/util/MetricsUtils.java
new file mode 100644
index 0000000..c4664d6
--- /dev/null
+++ b/base/test/android/javatests/src/org/chromium/base/test/util/MetricsUtils.java
@@ -0,0 +1,43 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package org.chromium.base.test.util;
+
+import org.chromium.base.metrics.RecordHistogram;
+
+/**
+ * Helpers for testing UMA metrics.
+ */
+public class MetricsUtils {
+    /**
+     * Helper class that snapshots the given bucket of the given UMA histogram on its creation,
+     * allowing to inspect the number of samples recorded during its lifetime.
+     */
+    public static class HistogramDelta {
+        private final String mHistogram;
+        private final int mSampleValue;
+
+        private final int mInitialCount;
+
+        private int get() {
+            return RecordHistogram.getHistogramValueCountForTesting(mHistogram, mSampleValue);
+        }
+
+        /**
+         * Snapshots the given bucket of the given histogram.
+         * @param histogram name of the histogram to snapshot
+         * @param sampleValue the bucket that contains this value will be snapshot
+         */
+        public HistogramDelta(String histogram, int sampleValue) {
+            mHistogram = histogram;
+            mSampleValue = sampleValue;
+            mInitialCount = get();
+        }
+
+        /** Returns the number of samples of the snapshot bucket recorded since creation */
+        public int getDelta() {
+            return get() - mInitialCount;
+        }
+    }
+}
diff --git a/base/test/android/javatests/src/org/chromium/base/test/util/MinAndroidSdkLevel.java b/base/test/android/javatests/src/org/chromium/base/test/util/MinAndroidSdkLevel.java
new file mode 100644
index 0000000..13e2578
--- /dev/null
+++ b/base/test/android/javatests/src/org/chromium/base/test/util/MinAndroidSdkLevel.java
@@ -0,0 +1,19 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package org.chromium.base.test.util;
+
+import java.lang.annotation.ElementType;
+import java.lang.annotation.Inherited;
+import java.lang.annotation.Retention;
+import java.lang.annotation.RetentionPolicy;
+import java.lang.annotation.Target;
+
+@Inherited
+@Retention(RetentionPolicy.RUNTIME)
+@Target({ElementType.METHOD, ElementType.TYPE})
+public @interface MinAndroidSdkLevel {
+    int value() default 0;
+}
+
diff --git a/base/test/android/javatests/src/org/chromium/base/test/util/MinAndroidSdkLevelSkipCheck.java b/base/test/android/javatests/src/org/chromium/base/test/util/MinAndroidSdkLevelSkipCheck.java
new file mode 100644
index 0000000..8b07c0f
--- /dev/null
+++ b/base/test/android/javatests/src/org/chromium/base/test/util/MinAndroidSdkLevelSkipCheck.java
@@ -0,0 +1,43 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package org.chromium.base.test.util;
+
+import android.os.Build;
+
+import org.junit.runners.model.FrameworkMethod;
+
+import org.chromium.base.Log;
+
+/**
+ * Checks the device's SDK level against any specified minimum requirement.
+ */
+public class MinAndroidSdkLevelSkipCheck extends SkipCheck {
+
+    private static final String TAG = "base_test";
+
+    /**
+     * If {@link MinAndroidSdkLevel} is present, checks its value
+     * against the device's SDK level.
+     *
+     * @param testCase The test to check.
+     * @return true if the device's SDK level is below the specified minimum.
+     */
+    @Override
+    public boolean shouldSkip(FrameworkMethod frameworkMethod) {
+        int minSdkLevel = 0;
+        for (MinAndroidSdkLevel m : AnnotationProcessingUtils.getAnnotations(
+                     frameworkMethod.getMethod(), MinAndroidSdkLevel.class)) {
+            minSdkLevel = Math.max(minSdkLevel, m.value());
+        }
+        if (Build.VERSION.SDK_INT < minSdkLevel) {
+            Log.i(TAG, "Test " + frameworkMethod.getDeclaringClass().getName() + "#"
+                    + frameworkMethod.getName() + " is not enabled at SDK level "
+                    + Build.VERSION.SDK_INT + ".");
+            return true;
+        }
+        return false;
+    }
+
+}
diff --git a/base/test/android/javatests/src/org/chromium/base/test/util/Restriction.java b/base/test/android/javatests/src/org/chromium/base/test/util/Restriction.java
new file mode 100644
index 0000000..f39bfbd
--- /dev/null
+++ b/base/test/android/javatests/src/org/chromium/base/test/util/Restriction.java
@@ -0,0 +1,37 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package org.chromium.base.test.util;
+
+import java.lang.annotation.ElementType;
+import java.lang.annotation.Retention;
+import java.lang.annotation.RetentionPolicy;
+import java.lang.annotation.Target;
+
+/**
+ * An annotation for listing restrictions for a test method. For example, if a test method is only
+ * applicable on a phone with small memory:
+ *     @Restriction({RESTRICTION_TYPE_PHONE, RESTRICTION_TYPE_SMALL_MEMORY})
+ * Test classes are free to define restrictions and enforce them using reflection at runtime.
+ */
+@Target({ElementType.METHOD, ElementType.TYPE})
+@Retention(RetentionPolicy.RUNTIME)
+public @interface Restriction {
+    /** Specifies the test is only valid on low end devices that have less memory. */
+    public static final String RESTRICTION_TYPE_LOW_END_DEVICE = "Low_End_Device";
+
+    /** Specifies the test is only valid on non-low end devices. */
+    public static final String RESTRICTION_TYPE_NON_LOW_END_DEVICE = "Non_Low_End_Device";
+
+    /** Specifies the test is only valid on a device that can reach the internet. */
+    public static final String RESTRICTION_TYPE_INTERNET = "Internet";
+
+    /** Specifies the test is only valid on a device that has a camera. */
+    public static final String RESTRICTION_TYPE_HAS_CAMERA = "Has_Camera";
+
+    /**
+     * @return A list of restrictions.
+     */
+    public String[] value();
+}
diff --git a/base/test/android/javatests/src/org/chromium/base/test/util/RestrictionSkipCheck.java b/base/test/android/javatests/src/org/chromium/base/test/util/RestrictionSkipCheck.java
new file mode 100644
index 0000000..a27dd1f
--- /dev/null
+++ b/base/test/android/javatests/src/org/chromium/base/test/util/RestrictionSkipCheck.java
@@ -0,0 +1,78 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package org.chromium.base.test.util;
+
+import android.content.Context;
+import android.net.ConnectivityManager;
+import android.net.NetworkInfo;
+import android.text.TextUtils;
+
+import org.junit.runners.model.FrameworkMethod;
+
+import org.chromium.base.Log;
+import org.chromium.base.SysUtils;
+
+/**
+ * Checks if any restrictions exist and skip the test if it meets those restrictions.
+ */
+public class RestrictionSkipCheck extends SkipCheck {
+
+    private static final String TAG = "base_test";
+
+    private final Context mTargetContext;
+
+    public RestrictionSkipCheck(Context targetContext) {
+        mTargetContext = targetContext;
+    }
+
+    protected Context getTargetContext() {
+        return mTargetContext;
+    }
+
+    @Override
+    public boolean shouldSkip(FrameworkMethod frameworkMethod) {
+        if (frameworkMethod == null) return true;
+
+        for (Restriction restriction : AnnotationProcessingUtils.getAnnotations(
+                     frameworkMethod.getMethod(), Restriction.class)) {
+            for (String restrictionVal : restriction.value()) {
+                if (restrictionApplies(restrictionVal)) {
+                    Log.i(TAG, "Test " + frameworkMethod.getDeclaringClass().getName() + "#"
+                            + frameworkMethod.getName() + " skipped because of restriction "
+                            + restriction);
+                    return true;
+                }
+            }
+        }
+        return false;
+    }
+
+    protected boolean restrictionApplies(String restriction) {
+        if (TextUtils.equals(restriction, Restriction.RESTRICTION_TYPE_LOW_END_DEVICE)
+                && !SysUtils.isLowEndDevice()) {
+            return true;
+        }
+        if (TextUtils.equals(restriction, Restriction.RESTRICTION_TYPE_NON_LOW_END_DEVICE)
+                && SysUtils.isLowEndDevice()) {
+            return true;
+        }
+        if (TextUtils.equals(restriction, Restriction.RESTRICTION_TYPE_INTERNET)
+                && !isNetworkAvailable()) {
+            return true;
+        }
+        if (TextUtils.equals(restriction, Restriction.RESTRICTION_TYPE_HAS_CAMERA)
+                && !SysUtils.hasCamera(mTargetContext)) {
+            return true;
+        }
+        return false;
+    }
+
+    private boolean isNetworkAvailable() {
+        final ConnectivityManager connectivityManager = (ConnectivityManager)
+                mTargetContext.getSystemService(Context.CONNECTIVITY_SERVICE);
+        final NetworkInfo activeNetworkInfo = connectivityManager.getActiveNetworkInfo();
+        return activeNetworkInfo != null && activeNetworkInfo.isConnected();
+    }
+}
diff --git a/base/test/android/javatests/src/org/chromium/base/test/util/RetryOnFailure.java b/base/test/android/javatests/src/org/chromium/base/test/util/RetryOnFailure.java
new file mode 100644
index 0000000..eb98008
--- /dev/null
+++ b/base/test/android/javatests/src/org/chromium/base/test/util/RetryOnFailure.java
@@ -0,0 +1,25 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package org.chromium.base.test.util;
+
+import java.lang.annotation.ElementType;
+import java.lang.annotation.Retention;
+import java.lang.annotation.RetentionPolicy;
+import java.lang.annotation.Target;
+
+// Note this annotation may be a NOOP. Check http://crbug.com/797002 for latest status (also see
+// http://crbug.com/619055). Current default behavior is to retry all tests on failure.
+/**
+ * Mark a test as flaky and should be retried on failure. The test is
+ * considered passed by the test script if any retry succeeds.
+ *
+ * Long term, this should be merged with @FlakyTest. But @FlakyTest means
+ * has specific meaning that is currently different from RetryOnFailure.
+ */
+@Target({ElementType.METHOD, ElementType.TYPE})
+@Retention(RetentionPolicy.RUNTIME)
+public @interface RetryOnFailure {
+    String message() default "";
+}
diff --git a/base/test/android/javatests/src/org/chromium/base/test/util/ScalableTimeout.java b/base/test/android/javatests/src/org/chromium/base/test/util/ScalableTimeout.java
new file mode 100644
index 0000000..7a815c0
--- /dev/null
+++ b/base/test/android/javatests/src/org/chromium/base/test/util/ScalableTimeout.java
@@ -0,0 +1,29 @@
+// Copyright 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package org.chromium.base.test.util;
+
+/**
+ * Utility class for scaling various timeouts by a common factor.
+ * For example, to run tests under slow memory tools, you might do
+ * something like this:
+ *   adb shell "echo 20.0 > /data/local/tmp/chrome_timeout_scale"
+ */
+public class ScalableTimeout {
+    private static Double sTimeoutScale;
+    public static final String PROPERTY_FILE = "/data/local/tmp/chrome_timeout_scale";
+
+    public static long scaleTimeout(long timeout) {
+        if (sTimeoutScale == null) {
+            try {
+                char[] data = TestFileUtil.readUtf8File(PROPERTY_FILE, 32);
+                sTimeoutScale = Double.parseDouble(new String(data));
+            } catch (Exception e) {
+                // NumberFormatException, FileNotFoundException, IOException
+                sTimeoutScale = 1.0;
+            }
+        }
+        return (long) (timeout * sTimeoutScale);
+    }
+}
diff --git a/base/test/android/javatests/src/org/chromium/base/test/util/SkipCheck.java b/base/test/android/javatests/src/org/chromium/base/test/util/SkipCheck.java
new file mode 100644
index 0000000..d1dd7be
--- /dev/null
+++ b/base/test/android/javatests/src/org/chromium/base/test/util/SkipCheck.java
@@ -0,0 +1,49 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package org.chromium.base.test.util;
+
+import junit.framework.TestCase;
+
+import org.junit.runners.model.FrameworkMethod;
+
+import org.chromium.base.Log;
+
+import java.lang.reflect.Method;
+
+/**
+ * Check whether a test case should be skipped.
+ */
+public abstract class SkipCheck {
+
+    private static final String TAG = "base_test";
+
+    /**
+     *
+     * Checks whether the given test method should be skipped.
+     *
+     * @param testMethod The test method to check.
+     * @return Whether the test case should be skipped.
+     */
+    public abstract boolean shouldSkip(FrameworkMethod testMethod);
+
+    /**
+     *
+     * Checks whether the given test case should be skipped.
+     *
+     * @param testCase The test case to check.
+     * @return Whether the test case should be skipped.
+     */
+    public boolean shouldSkip(TestCase testCase) {
+        try {
+            Method m = testCase.getClass().getMethod(testCase.getName(), (Class[]) null);
+            return shouldSkip(new FrameworkMethod(m));
+        } catch (NoSuchMethodException e) {
+            Log.e(TAG, "Unable to find %s in %s", testCase.getName(),
+                    testCase.getClass().getName(), e);
+            return false;
+        }
+    }
+}
+
diff --git a/base/test/android/javatests/src/org/chromium/base/test/util/TestFileUtil.java b/base/test/android/javatests/src/org/chromium/base/test/util/TestFileUtil.java
new file mode 100644
index 0000000..6d89121
--- /dev/null
+++ b/base/test/android/javatests/src/org/chromium/base/test/util/TestFileUtil.java
@@ -0,0 +1,85 @@
+// Copyright 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package org.chromium.base.test.util;
+
+import java.io.File;
+import java.io.FileInputStream;
+import java.io.FileNotFoundException;
+import java.io.FileOutputStream;
+import java.io.IOException;
+import java.io.InputStreamReader;
+import java.io.OutputStreamWriter;
+import java.io.Reader;
+import java.io.Writer;
+import java.util.Arrays;
+
+/**
+ * Utility class for dealing with files for test.
+ */
+public class TestFileUtil {
+    public static void createNewHtmlFile(String name, String title, String body)
+            throws IOException {
+        createNewHtmlFile(new File(name), title, body);
+    }
+
+    public static void createNewHtmlFile(File file, String title, String body)
+            throws IOException {
+        if (!file.createNewFile()) {
+            throw new IOException("File \"" + file.getAbsolutePath() + "\" already exists");
+        }
+
+        Writer writer = null;
+        try {
+            writer = new OutputStreamWriter(new FileOutputStream(file), "UTF-8");
+            writer.write("<html><meta charset=\"UTF-8\" />"
+                    + "     <head><title>" + title + "</title></head>"
+                    + "     <body>"
+                    + (body != null ? body : "")
+                    + "     </body>"
+                    + "   </html>");
+        } finally {
+            if (writer != null) {
+                writer.close();
+            }
+        }
+    }
+
+    public static void deleteFile(String name) {
+        deleteFile(new File(name));
+    }
+
+    public static void deleteFile(File file) {
+        boolean deleted = file.delete();
+        assert (deleted || !file.exists());
+    }
+
+    /**
+     * @param fileName the file to read in.
+     * @param sizeLimit cap on the file size: will throw an exception if exceeded
+     * @return Array of chars read from the file
+     * @throws FileNotFoundException file does not exceed
+     * @throws IOException error encountered accessing the file
+     */
+    public static char[] readUtf8File(String fileName, int sizeLimit) throws
+            FileNotFoundException, IOException {
+        Reader reader = null;
+        try {
+            File f = new File(fileName);
+            if (f.length() > sizeLimit) {
+                throw new IOException("File " + fileName + " length " + f.length()
+                        + " exceeds limit " + sizeLimit);
+            }
+            char[] buffer = new char[(int) f.length()];
+            reader = new InputStreamReader(new FileInputStream(f), "UTF-8");
+            int charsRead = reader.read(buffer);
+            // Debug check that we've exhausted the input stream (will fail e.g. if the
+            // file grew after we inspected its length).
+            assert !reader.ready();
+            return charsRead < buffer.length ? Arrays.copyOfRange(buffer, 0, charsRead) : buffer;
+        } finally {
+            if (reader != null) reader.close();
+        }
+    }
+}
diff --git a/base/test/android/javatests/src/org/chromium/base/test/util/TestThread.java b/base/test/android/javatests/src/org/chromium/base/test/util/TestThread.java
new file mode 100644
index 0000000..4f62969
--- /dev/null
+++ b/base/test/android/javatests/src/org/chromium/base/test/util/TestThread.java
@@ -0,0 +1,143 @@
+// Copyright 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package org.chromium.base.test.util;
+
+import android.os.Handler;
+import android.os.Looper;
+
+import java.util.concurrent.atomic.AtomicBoolean;
+
+/**
+ * This class is usefull when writing instrumentation tests that exercise code that posts tasks
+ * (to the same thread).
+ * Since the test code is run in a single thread, the posted tasks are never executed.
+ * The TestThread class lets you run that code on a specific thread synchronously and flush the
+ * message loop on that thread.
+ *
+ * Example of test using this:
+ *
+ * public void testMyAwesomeClass() {
+ *   TestThread testThread = new TestThread();
+ *   testThread.startAndWaitForReadyState();
+ *
+ *   testThread.runOnTestThreadSyncAndProcessPendingTasks(new Runnable() {
+ *       @Override
+ *       public void run() {
+ *           MyAwesomeClass.doStuffAsync();
+ *       }
+ *   });
+ *   // Once we get there we know doStuffAsync has been executed and all the tasks it posted.
+ *   assertTrue(MyAwesomeClass.stuffWasDone());
+ * }
+ *
+ * Notes:
+ * - this is only for tasks posted to the same thread. Anyway if you were posting to a different
+ *   thread, you'd probably need to set that other thread up.
+ * - this only supports tasks posted using Handler.post(), it won't work with postDelayed and
+ *   postAtTime.
+ * - if your test instanciates an object and that object is the one doing the posting of tasks, you
+ *   probably want to instanciate it on the test thread as it might create the Handler it posts
+ *   tasks to in the constructor.
+ */
+
+public class TestThread extends Thread {
+    private final Object mThreadReadyLock;
+    private AtomicBoolean mThreadReady;
+    private Handler mMainThreadHandler;
+    private Handler mTestThreadHandler;
+
+    public TestThread() {
+        mMainThreadHandler = new Handler();
+        // We can't use the AtomicBoolean as the lock or findbugs will freak out...
+        mThreadReadyLock = new Object();
+        mThreadReady = new AtomicBoolean();
+    }
+
+    @Override
+    public void run() {
+        Looper.prepare();
+        mTestThreadHandler = new Handler();
+        mTestThreadHandler.post(new Runnable() {
+            @Override
+            public void run() {
+                synchronized (mThreadReadyLock) {
+                    mThreadReady.set(true);
+                    mThreadReadyLock.notify();
+                }
+            }
+        });
+        Looper.loop();
+    }
+
+    /**
+     * Starts this TestThread and blocks until it's ready to accept calls.
+     */
+    public void startAndWaitForReadyState() {
+        checkOnMainThread();
+        start();
+        synchronized (mThreadReadyLock) {
+            try {
+                // Note the mThreadReady and while are not really needed.
+                // There are there so findbugs don't report warnings.
+                while (!mThreadReady.get()) {
+                    mThreadReadyLock.wait();
+                }
+            } catch (InterruptedException ie) {
+                System.err.println("Error starting TestThread.");
+                ie.printStackTrace();
+            }
+        }
+    }
+
+    /**
+     * Runs the passed Runnable synchronously on the TestThread and returns when all pending
+     * runnables have been excuted.
+     * Should be called from the main thread.
+     */
+    public void runOnTestThreadSyncAndProcessPendingTasks(Runnable r) {
+        checkOnMainThread();
+
+        runOnTestThreadSync(r);
+
+        // Run another task, when it's done it means all pendings tasks have executed.
+        runOnTestThreadSync(null);
+    }
+
+    /**
+     * Runs the passed Runnable on the test thread and blocks until it has finished executing.
+     * Should be called from the main thread.
+     * @param r The runnable to be executed.
+     */
+    public void runOnTestThreadSync(final Runnable r) {
+        checkOnMainThread();
+        final Object lock = new Object();
+        // Task executed is not really needed since we are only on one thread, it is here to appease
+        // findbugs.
+        final AtomicBoolean taskExecuted = new AtomicBoolean();
+        mTestThreadHandler.post(new Runnable() {
+            @Override
+            public void run() {
+                if (r != null) r.run();
+                synchronized (lock) {
+                    taskExecuted.set(true);
+                    lock.notify();
+                }
+            }
+        });
+        synchronized (lock) {
+            try {
+                while (!taskExecuted.get()) {
+                    lock.wait();
+                }
+            } catch (InterruptedException ie) {
+                ie.printStackTrace();
+            }
+        }
+    }
+
+    private void checkOnMainThread() {
+        assert Looper.myLooper() == mMainThreadHandler.getLooper();
+    }
+}
diff --git a/base/test/android/javatests/src/org/chromium/base/test/util/TimeoutScale.java b/base/test/android/javatests/src/org/chromium/base/test/util/TimeoutScale.java
new file mode 100644
index 0000000..5aee05e
--- /dev/null
+++ b/base/test/android/javatests/src/org/chromium/base/test/util/TimeoutScale.java
@@ -0,0 +1,22 @@
+// Copyright 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package org.chromium.base.test.util;
+
+import java.lang.annotation.ElementType;
+import java.lang.annotation.Retention;
+import java.lang.annotation.RetentionPolicy;
+import java.lang.annotation.Target;
+
+/**
+ * This annotation can be used to scale a specific test timeout.
+ */
+@Target(ElementType.METHOD)
+@Retention(RetentionPolicy.RUNTIME)
+public @interface TimeoutScale {
+    /**
+     * @return A number to scale the test timeout.
+     */
+    public int value();
+}
diff --git a/base/test/android/javatests/src/org/chromium/base/test/util/UrlUtils.java b/base/test/android/javatests/src/org/chromium/base/test/util/UrlUtils.java
new file mode 100644
index 0000000..9ca3fcc
--- /dev/null
+++ b/base/test/android/javatests/src/org/chromium/base/test/util/UrlUtils.java
@@ -0,0 +1,84 @@
+// Copyright 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package org.chromium.base.test.util;
+
+import org.junit.Assert;
+
+import org.chromium.base.PathUtils;
+import org.chromium.base.annotations.CalledByNative;
+import org.chromium.base.annotations.MainDex;
+
+/**
+ * Collection of URL utilities.
+ */
+@MainDex
+public class UrlUtils {
+    private static final String DATA_DIR = "/chrome/test/data/";
+
+    /**
+     * Construct the full path of a test data file.
+     * @param path Pathname relative to external/chrome/test/data
+     */
+    public static String getTestFilePath(String path) {
+        // TODO(jbudorick): Remove DATA_DIR once everything has been isolated. crbug/400499
+        return getIsolatedTestFilePath(DATA_DIR + path);
+    }
+
+    // TODO(jbudorick): Remove this function once everything has been isolated and switched back
+    // to getTestFilePath. crbug/400499
+    /**
+     * Construct the full path of a test data file.
+     * @param path Pathname relative to external/
+     */
+    public static String getIsolatedTestFilePath(String path) {
+        return getIsolatedTestRoot() + "/" + path;
+    }
+
+    /**
+     * Returns the root of the test data directory.
+     */
+    @CalledByNative
+    public static String getIsolatedTestRoot() {
+        return PathUtils.getExternalStorageDirectory() + "/chromium_tests_root";
+    }
+
+    /**
+     * Construct a suitable URL for loading a test data file.
+     * @param path Pathname relative to external/chrome/test/data
+     */
+    public static String getTestFileUrl(String path) {
+        return "file://" + getTestFilePath(path);
+    }
+
+    // TODO(jbudorick): Remove this function once everything has been isolated and switched back
+    // to getTestFileUrl. crbug/400499
+    /**
+     * Construct a suitable URL for loading a test data file.
+     * @param path Pathname relative to external/
+     */
+    public static String getIsolatedTestFileUrl(String path) {
+        return "file://" + getIsolatedTestFilePath(path);
+    }
+
+    /**
+     * Construct a data:text/html URI for loading from an inline HTML.
+     * @param html An unencoded HTML
+     * @return String An URI that contains the given HTML
+     */
+    public static String encodeHtmlDataUri(String html) {
+        try {
+            // URLEncoder encodes into application/x-www-form-encoded, so
+            // ' '->'+' needs to be undone and replaced with ' '->'%20'
+            // to match the Data URI requirements.
+            String encoded =
+                    "data:text/html;utf-8," + java.net.URLEncoder.encode(html, "UTF-8");
+            encoded = encoded.replace("+", "%20");
+            return encoded;
+        } catch (java.io.UnsupportedEncodingException e) {
+            Assert.fail("Unsupported encoding: " + e.getMessage());
+            return null;
+        }
+    }
+}
diff --git a/base/test/android/javatests/src/org/chromium/base/test/util/UserActionTester.java b/base/test/android/javatests/src/org/chromium/base/test/util/UserActionTester.java
new file mode 100644
index 0000000..88e3551
--- /dev/null
+++ b/base/test/android/javatests/src/org/chromium/base/test/util/UserActionTester.java
@@ -0,0 +1,51 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package org.chromium.base.test.util;
+
+import org.chromium.base.ThreadUtils;
+import org.chromium.base.metrics.RecordUserAction;
+
+import java.util.ArrayList;
+import java.util.List;
+
+/**
+ * A util class that records UserActions.
+ */
+public class UserActionTester implements RecordUserAction.UserActionCallback {
+    private List<String> mActions;
+
+    public UserActionTester() {
+        mActions = new ArrayList<>();
+        ThreadUtils.runOnUiThreadBlocking(new Runnable() {
+            @Override
+            public void run() {
+                RecordUserAction.setActionCallbackForTesting(UserActionTester.this);
+            }
+        });
+    }
+
+    public void tearDown() {
+        ThreadUtils.runOnUiThreadBlocking(new Runnable() {
+            @Override
+            public void run() {
+                RecordUserAction.removeActionCallbackForTesting();
+            }
+        });
+    }
+
+    @Override
+    public void onActionRecorded(String action) {
+        mActions.add(action);
+    }
+
+    public List<String> getActions() {
+        return mActions;
+    }
+
+    @Override
+    public String toString() {
+        return "Actions: " + mActions.toString();
+    }
+}
diff --git a/base/test/android/javatests/src/org/chromium/base/test/util/parameter/CommandLineParameter.java b/base/test/android/javatests/src/org/chromium/base/test/util/parameter/CommandLineParameter.java
new file mode 100644
index 0000000..e6f5506
--- /dev/null
+++ b/base/test/android/javatests/src/org/chromium/base/test/util/parameter/CommandLineParameter.java
@@ -0,0 +1,32 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package org.chromium.base.test.util.parameter;
+
+import java.lang.annotation.ElementType;
+import java.lang.annotation.Inherited;
+import java.lang.annotation.Retention;
+import java.lang.annotation.RetentionPolicy;
+import java.lang.annotation.Target;
+
+/**
+ * The annotation for parametering CommandLineFlags in JUnit3 instrumentation tests.
+ *
+ * E.g. if you add the following annotation to your test class:
+ *
+ * <code>
+ * @CommandLineParameter({"", FLAG_A, FLAG_B})
+ * public class MyTestClass
+ * </code>
+ *
+ * The test harness would run the test 3 times with each of the flag added to commandline
+ * file.
+ */
+
+@Inherited
+@Retention(RetentionPolicy.RUNTIME)
+@Target({ElementType.METHOD, ElementType.TYPE})
+public @interface CommandLineParameter {
+    String[] value() default {};
+}
diff --git a/base/test/android/javatests/src/org/chromium/base/test/util/parameter/SkipCommandLineParameterization.java b/base/test/android/javatests/src/org/chromium/base/test/util/parameter/SkipCommandLineParameterization.java
new file mode 100644
index 0000000..2181031
--- /dev/null
+++ b/base/test/android/javatests/src/org/chromium/base/test/util/parameter/SkipCommandLineParameterization.java
@@ -0,0 +1,23 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+package org.chromium.base.test.util.parameter;
+
+import java.lang.annotation.ElementType;
+import java.lang.annotation.Inherited;
+import java.lang.annotation.Retention;
+import java.lang.annotation.RetentionPolicy;
+import java.lang.annotation.Target;
+
+/**
+ * BaseJUnit4ClassRunner and host side test harness skips commandline parameterization for test
+ * classes or methods annotated with SkipCommandLineParameterization.
+ *
+ * This usually used by test that only runs in WebView javatests that only runs in sandboxed mode
+ * or single process mode.
+ */
+
+@Inherited
+@Retention(RetentionPolicy.RUNTIME)
+@Target({ElementType.METHOD, ElementType.TYPE})
+public @interface SkipCommandLineParameterization {}
diff --git a/base/test/android/junit/src/org/chromium/base/test/BaseRobolectricTestRunner.java b/base/test/android/junit/src/org/chromium/base/test/BaseRobolectricTestRunner.java
new file mode 100644
index 0000000..3ca756a
--- /dev/null
+++ b/base/test/android/junit/src/org/chromium/base/test/BaseRobolectricTestRunner.java
@@ -0,0 +1,49 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package org.chromium.base.test;
+
+import org.junit.runners.model.InitializationError;
+import org.robolectric.DefaultTestLifecycle;
+import org.robolectric.RuntimeEnvironment;
+import org.robolectric.TestLifecycle;
+
+import org.chromium.base.ApplicationStatus;
+import org.chromium.base.CommandLine;
+import org.chromium.base.ContextUtils;
+import org.chromium.testing.local.LocalRobolectricTestRunner;
+
+import java.lang.reflect.Method;
+
+/**
+ * A Robolectric Test Runner that initializes base globals.
+ */
+public class BaseRobolectricTestRunner extends LocalRobolectricTestRunner {
+    /**
+     * Enables a per-test setUp / tearDown hook.
+     */
+    public static class BaseTestLifecycle extends DefaultTestLifecycle {
+        @Override
+        public void beforeTest(Method method) {
+            ContextUtils.initApplicationContextForTests(RuntimeEnvironment.application);
+            CommandLine.init(null);
+            super.beforeTest(method);
+        }
+
+        @Override
+        public void afterTest(Method method) {
+            ApplicationStatus.destroyForJUnitTests();
+            super.afterTest(method);
+        }
+    }
+
+    public BaseRobolectricTestRunner(Class<?> testClass) throws InitializationError {
+        super(testClass);
+    }
+
+    @Override
+    protected Class<? extends TestLifecycle> getTestLifecycleClass() {
+        return BaseTestLifecycle.class;
+    }
+}
diff --git a/base/test/android/junit/src/org/chromium/base/test/SetUpStatementTest.java b/base/test/android/junit/src/org/chromium/base/test/SetUpStatementTest.java
new file mode 100644
index 0000000..722bd1a
--- /dev/null
+++ b/base/test/android/junit/src/org/chromium/base/test/SetUpStatementTest.java
@@ -0,0 +1,64 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package org.chromium.base.test;
+
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+import org.junit.rules.TestRule;
+import org.junit.runner.RunWith;
+import org.junit.runners.BlockJUnit4ClassRunner;
+import org.junit.runners.model.Statement;
+
+import java.util.ArrayList;
+import java.util.List;
+
+/**
+ * Test SetUpStatement is working as intended with SetUpTestRule.
+ */
+@RunWith(BlockJUnit4ClassRunner.class)
+public class SetUpStatementTest {
+    private Statement mBase;
+    private SetUpTestRule<TestRule> mRule;
+    private List<Integer> mList;
+
+    @Before
+    public void setUp() {
+        mBase = new Statement() {
+            @Override
+            public void evaluate() {
+                mList.add(1);
+            }
+        };
+        mList = new ArrayList<>();
+        mRule = new SetUpTestRule<TestRule>() {
+            @Override
+            public void setUp() {
+                mList.add(0);
+            }
+
+            @Override
+            public TestRule shouldSetUp(boolean toSetUp) {
+                return null;
+            }
+        };
+    }
+
+    @Test
+    public void testSetUpStatementShouldSetUp() throws Throwable {
+        SetUpStatement statement = new SetUpStatement(mBase, mRule, true);
+        statement.evaluate();
+        Integer[] expected = {0, 1};
+        Assert.assertArrayEquals(expected, mList.toArray());
+    }
+
+    @Test
+    public void testSetUpStatementShouldNotSetUp() throws Throwable {
+        SetUpStatement statement = new SetUpStatement(mBase, mRule, false);
+        statement.evaluate();
+        Integer[] expected = {1};
+        Assert.assertArrayEquals(expected, mList.toArray());
+    }
+}
diff --git a/base/test/android/junit/src/org/chromium/base/test/TestListInstrumentationRunListenerTest.java b/base/test/android/junit/src/org/chromium/base/test/TestListInstrumentationRunListenerTest.java
new file mode 100644
index 0000000..63fa560
--- /dev/null
+++ b/base/test/android/junit/src/org/chromium/base/test/TestListInstrumentationRunListenerTest.java
@@ -0,0 +1,119 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package org.chromium.base.test;
+
+import static org.chromium.base.test.TestListInstrumentationRunListener.getAnnotationJSON;
+import static org.chromium.base.test.TestListInstrumentationRunListener.getTestMethodJSON;
+
+import org.json.JSONObject;
+import org.junit.Assert;
+import org.junit.Test;
+import org.junit.runner.Description;
+import org.junit.runner.RunWith;
+import org.robolectric.annotation.Config;
+
+import org.chromium.base.test.util.CommandLineFlags;
+
+import java.util.Arrays;
+
+/**
+ * Robolectric test to ensure static methods in TestListInstrumentationRunListener works properly.
+ */
+@RunWith(BaseRobolectricTestRunner.class)
+@Config(manifest = Config.NONE)
+public class TestListInstrumentationRunListenerTest {
+    @CommandLineFlags.Add("hello")
+    private static class ParentClass {
+        public void testA() {}
+
+        @CommandLineFlags.Add("world")
+        public void testB() {}
+    }
+
+    @CommandLineFlags.Remove("hello")
+    private static class ChildClass extends ParentClass {
+    }
+
+    @Test
+    public void testGetTestMethodJSON_testA() throws Throwable {
+        Description desc = Description.createTestDescription(
+                ParentClass.class, "testA",
+                ParentClass.class.getMethod("testA").getAnnotations());
+        JSONObject json = getTestMethodJSON(desc);
+        String expectedJsonString =
+                "{"
+                + "'method': 'testA',"
+                + "'annotations': {}"
+                + "}";
+        expectedJsonString = expectedJsonString
+            .replaceAll("\\s", "")
+            .replaceAll("'", "\"");
+        Assert.assertEquals(expectedJsonString, json.toString());
+    }
+
+    @Test
+    public void testGetTestMethodJSON_testB() throws Throwable {
+        Description desc = Description.createTestDescription(
+                ParentClass.class, "testB",
+                ParentClass.class.getMethod("testB").getAnnotations());
+        JSONObject json = getTestMethodJSON(desc);
+        String expectedJsonString =
+                "{"
+                + "'method': 'testB',"
+                + "'annotations': {"
+                + "  'Add': {"
+                + "    'value': ['world']"
+                + "    }"
+                + "  }"
+                + "}";
+        expectedJsonString = expectedJsonString
+            .replaceAll("\\s", "")
+            .replaceAll("'", "\"");
+        Assert.assertEquals(expectedJsonString, json.toString());
+    }
+
+
+    @Test
+    public void testGetTestMethodJSONForInheritedClass() throws Throwable {
+        Description desc = Description.createTestDescription(
+                ChildClass.class, "testB",
+                ChildClass.class.getMethod("testB").getAnnotations());
+        JSONObject json = getTestMethodJSON(desc);
+        String expectedJsonString =
+                "{"
+                + "'method': 'testB',"
+                + "'annotations': {"
+                + "  'Add': {"
+                + "    'value': ['world']"
+                + "    }"
+                + "  }"
+                + "}";
+        expectedJsonString = expectedJsonString
+            .replaceAll("\\s", "")
+            .replaceAll("'", "\"");
+        Assert.assertEquals(expectedJsonString, json.toString());
+    }
+
+    @Test
+    public void testGetAnnotationJSONForParentClass() throws Throwable {
+        JSONObject json = getAnnotationJSON(Arrays.asList(ParentClass.class.getAnnotations()));
+        String expectedJsonString = "{'Add':{'value':['hello']}}";
+        expectedJsonString = expectedJsonString
+            .replaceAll("\\s", "")
+            .replaceAll("'", "\"");
+        Assert.assertEquals(expectedJsonString, json.toString());
+    }
+
+    @Test
+    public void testGetAnnotationJSONForChildClass() throws Throwable {
+        JSONObject json = getAnnotationJSON(Arrays.asList(ChildClass.class.getAnnotations()));
+        String expectedJsonString = "{'Add':{'value':['hello']},'Remove':{'value':['hello']}}";
+        expectedJsonString = expectedJsonString
+            .replaceAll("\\s", "")
+            .replaceAll("'", "\"");
+        Assert.assertEquals(expectedJsonString, json.toString());
+    }
+}
+
diff --git a/base/test/android/junit/src/org/chromium/base/test/params/ExampleParameterizedTest.java b/base/test/android/junit/src/org/chromium/base/test/params/ExampleParameterizedTest.java
new file mode 100644
index 0000000..6ffccad
--- /dev/null
+++ b/base/test/android/junit/src/org/chromium/base/test/params/ExampleParameterizedTest.java
@@ -0,0 +1,105 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package org.chromium.base.test.params;
+
+import org.junit.Assert;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.MethodRule;
+import org.junit.runner.RunWith;
+
+import org.chromium.base.test.params.ParameterAnnotations.ClassParameter;
+import org.chromium.base.test.params.ParameterAnnotations.UseMethodParameter;
+import org.chromium.base.test.params.ParameterAnnotations.UseMethodParameterAfter;
+import org.chromium.base.test.params.ParameterAnnotations.UseMethodParameterBefore;
+import org.chromium.base.test.params.ParameterAnnotations.UseRunnerDelegate;
+
+import java.util.Arrays;
+import java.util.List;
+
+/**
+ * Example test that uses ParameterizedRunner
+ */
+@RunWith(ParameterizedRunner.class)
+@UseRunnerDelegate(BlockJUnit4RunnerDelegate.class)
+public class ExampleParameterizedTest {
+    @ClassParameter
+    private static List<ParameterSet> sClassParams =
+            Arrays.asList(new ParameterSet().value("hello", "world").name("HelloWorld"),
+                    new ParameterSet().value("Xxxx", "Yyyy").name("XxxxYyyy"),
+                    new ParameterSet().value("aa", "yy").name("AaYy"));
+
+    public static class MethodParamsA implements ParameterProvider {
+        private static List<ParameterSet> sMethodParamA =
+                Arrays.asList(new ParameterSet().value(1, 2).name("OneTwo"),
+                        new ParameterSet().value(2, 3).name("TwoThree"),
+                        new ParameterSet().value(3, 4).name("ThreeFour"));
+
+        @Override
+        public List<ParameterSet> getParameters() {
+            return sMethodParamA;
+        }
+    }
+
+    public static class MethodParamsB implements ParameterProvider {
+        private static List<ParameterSet> sMethodParamB =
+                Arrays.asList(new ParameterSet().value("a", "b").name("Ab"),
+                        new ParameterSet().value("b", "c").name("Bc"),
+                        new ParameterSet().value("c", "d").name("Cd"),
+                        new ParameterSet().value("d", "e").name("De"));
+
+        @Override
+        public List<ParameterSet> getParameters() {
+            return sMethodParamB;
+        }
+    }
+
+    private String mStringA;
+    private String mStringB;
+
+    public ExampleParameterizedTest(String a, String b) {
+        mStringA = a;
+        mStringB = b;
+    }
+
+    @Test
+    public void testSimple() {
+        Assert.assertEquals(
+                "A and B string length aren't equal", mStringA.length(), mStringB.length());
+    }
+
+    @Rule
+    public MethodRule mMethodParamAnnotationProcessor = new MethodParamAnnotationRule();
+
+    private Integer mSum;
+
+    @UseMethodParameterBefore(MethodParamsA.class)
+    public void setupWithOnlyA(int intA, int intB) {
+        mSum = intA + intB;
+    }
+
+    @Test
+    @UseMethodParameter(MethodParamsA.class)
+    public void testWithOnlyA(int intA, int intB) {
+        Assert.assertEquals(intA + 1, intB);
+        Assert.assertEquals(mSum, Integer.valueOf(intA + intB));
+        mSum = null;
+    }
+
+    private String mConcatenation;
+
+    @Test
+    @UseMethodParameter(MethodParamsB.class)
+    public void testWithOnlyB(String a, String b) {
+        Assert.assertTrue(!a.equals(b));
+        mConcatenation = a + b;
+    }
+
+    @UseMethodParameterAfter(MethodParamsB.class)
+    public void teardownWithOnlyB(String a, String b) {
+        Assert.assertEquals(mConcatenation, a + b);
+        mConcatenation = null;
+    }
+}
diff --git a/base/test/android/junit/src/org/chromium/base/test/params/ParameterizedRunnerDelegateCommonTest.java b/base/test/android/junit/src/org/chromium/base/test/params/ParameterizedRunnerDelegateCommonTest.java
new file mode 100644
index 0000000..6d854c5
--- /dev/null
+++ b/base/test/android/junit/src/org/chromium/base/test/params/ParameterizedRunnerDelegateCommonTest.java
@@ -0,0 +1,77 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package org.chromium.base.test.params;
+
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.junit.runners.BlockJUnit4ClassRunner;
+import org.junit.runners.model.TestClass;
+
+import org.chromium.base.test.params.ParameterizedRunner.ParameterizedTestInstantiationException;
+
+import java.util.Collections;
+
+@RunWith(BlockJUnit4ClassRunner.class)
+public class ParameterizedRunnerDelegateCommonTest {
+    /**
+     * Create a test object using the list of class parameter set
+     *
+     * @param testClass the {@link TestClass} object for current test class
+     * @param classParameterSet the parameter set needed for the test class constructor
+     */
+    private static Object createTest(TestClass testClass, ParameterSet classParameterSet)
+            throws ParameterizedTestInstantiationException {
+        return new ParameterizedRunnerDelegateCommon(
+                testClass, classParameterSet, Collections.emptyList())
+                .createTest();
+    }
+
+    static class BadTestClassWithMoreThanOneConstructor {
+        public BadTestClassWithMoreThanOneConstructor() {}
+        @SuppressWarnings("unused")
+        public BadTestClassWithMoreThanOneConstructor(String argument) {}
+    }
+
+    static class BadTestClassWithTwoArgumentConstructor {
+        @SuppressWarnings("unused")
+        public BadTestClassWithTwoArgumentConstructor(int a, int b) {}
+    }
+
+    static abstract class BadTestClassAbstract {
+        public BadTestClassAbstract() {}
+    }
+
+    static class BadTestClassConstructorThrows {
+        public BadTestClassConstructorThrows() {
+            throw new RuntimeException();
+        }
+    }
+
+    @Test(expected = IllegalArgumentException.class)
+    public void testCreateTestWithMoreThanOneConstructor() throws Throwable {
+        TestClass testClass = new TestClass(BadTestClassWithMoreThanOneConstructor.class);
+        createTest(testClass, null);
+    }
+
+    @Test(expected = IllegalArgumentException.class)
+    public void testCreateTestWithIncorrectArguments() throws Throwable {
+        TestClass testClass = new TestClass(BadTestClassWithTwoArgumentConstructor.class);
+        ParameterSet pSet = new ParameterSet().value(1, 2, 3);
+        createTest(testClass, pSet);
+    }
+
+    @Test(expected = ParameterizedTestInstantiationException.class)
+    public void testCreateTestWithAbstractClass() throws ParameterizedTestInstantiationException {
+        TestClass testClass = new TestClass(BadTestClassAbstract.class);
+        createTest(testClass, null);
+    }
+
+    @Test(expected = ParameterizedTestInstantiationException.class)
+    public void testCreateTestWithThrowingConstructor()
+            throws ParameterizedTestInstantiationException {
+        TestClass testClass = new TestClass(BadTestClassConstructorThrows.class);
+        createTest(testClass, null);
+    }
+}
diff --git a/base/test/android/junit/src/org/chromium/base/test/params/ParameterizedRunnerDelegateFactoryTest.java b/base/test/android/junit/src/org/chromium/base/test/params/ParameterizedRunnerDelegateFactoryTest.java
new file mode 100644
index 0000000..723382d
--- /dev/null
+++ b/base/test/android/junit/src/org/chromium/base/test/params/ParameterizedRunnerDelegateFactoryTest.java
@@ -0,0 +1,133 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package org.chromium.base.test.params;
+
+import org.junit.Assert;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.junit.runners.BlockJUnit4ClassRunner;
+import org.junit.runners.model.FrameworkMethod;
+import org.junit.runners.model.InitializationError;
+import org.junit.runners.model.TestClass;
+
+import org.chromium.base.test.params.ParameterAnnotations.UseMethodParameter;
+import org.chromium.base.test.params.ParameterizedRunnerDelegateFactory.ParameterizedRunnerDelegateInstantiationException;
+
+import java.lang.reflect.Method;
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+/**
+ * Test for org.chromium.base.test.params.ParameterizedRunnerDelegateFactory
+ */
+@RunWith(BlockJUnit4ClassRunner.class)
+public class ParameterizedRunnerDelegateFactoryTest {
+    /**
+     * This RunnerDelegate calls `super.collectInitializationErrors()` and would
+     * cause BlockJUnit4ClassRunner to validate test classes.
+     */
+    public static class BadExampleRunnerDelegate
+            extends BlockJUnit4ClassRunner implements ParameterizedRunnerDelegate {
+        public static class LalaTestClass {}
+
+        private final List<FrameworkMethod> mParameterizedFrameworkMethodList;
+
+        BadExampleRunnerDelegate(Class<?> klass,
+                List<FrameworkMethod> parameterizedFrameworkMethods) throws InitializationError {
+            super(klass);
+            mParameterizedFrameworkMethodList = parameterizedFrameworkMethods;
+        }
+
+        @Override
+        public void collectInitializationErrors(List<Throwable> errors) {
+            super.collectInitializationErrors(errors); // This is wrong!!
+        }
+
+        @Override
+        public List<FrameworkMethod> computeTestMethods() {
+            return mParameterizedFrameworkMethodList;
+        }
+
+        @Override
+        public Object createTest() {
+            return null;
+        }
+    }
+
+    static class ExampleTestClass {
+        static class MethodParamsA implements ParameterProvider {
+            @Override
+            public Iterable<ParameterSet> getParameters() {
+                return Arrays.asList(
+                        new ParameterSet().value("a").name("testWithValue_a"),
+                        new ParameterSet().value("b").name("testWithValue_b")
+                );
+            }
+        }
+
+        @SuppressWarnings("unused")
+        @UseMethodParameter(MethodParamsA.class)
+        @Test
+        public void testA(String a) {}
+
+        static class MethodParamsB implements ParameterProvider {
+            @Override
+            public Iterable<ParameterSet> getParameters() {
+                return Arrays.asList(
+                        new ParameterSet().value(1).name("testWithValue_1"),
+                        new ParameterSet().value(2).name("testWithValue_2"),
+                        new ParameterSet().value(3).name("testWithValue_3")
+                );
+            }
+        }
+
+        @SuppressWarnings("unused")
+        @UseMethodParameter(MethodParamsB.class)
+        @Test
+        public void testB(int b) {}
+
+        @Test
+        public void testByMyself() {}
+    }
+
+    /**
+     * This test validates ParameterizedRunnerDelegateFactory throws exception when
+     * a runner delegate does not override the collectInitializationErrors method.
+     */
+    @Test(expected = ParameterizedRunnerDelegateInstantiationException.class)
+    public void testBadRunnerDelegateWithIncorrectValidationCall() throws Throwable {
+        ParameterizedRunnerDelegateFactory factory = new ParameterizedRunnerDelegateFactory();
+        TestClass testClass = new TestClass(BadExampleRunnerDelegate.LalaTestClass.class);
+        factory.createRunner(testClass, null, BadExampleRunnerDelegate.class);
+    }
+
+    @Test
+    public void testGenerateParameterizedFrameworkMethod() throws Throwable {
+        List<FrameworkMethod> methods =
+                ParameterizedRunnerDelegateFactory.generateUnmodifiableFrameworkMethodList(
+                        new TestClass(ExampleTestClass.class), "");
+
+        Assert.assertEquals(methods.size(), 6);
+
+        Map<String, Method> expectedTests = new HashMap<>();
+        Method testMethodA = ExampleTestClass.class.getDeclaredMethod("testA", String.class);
+        Method testMethodB = ExampleTestClass.class.getDeclaredMethod("testB", int.class);
+        Method testMethodByMyself = ExampleTestClass.class.getDeclaredMethod("testByMyself");
+        expectedTests.put("testA__testWithValue_a", testMethodA);
+        expectedTests.put("testA__testWithValue_b", testMethodA);
+        expectedTests.put("testB__testWithValue_1", testMethodB);
+        expectedTests.put("testB__testWithValue_2", testMethodB);
+        expectedTests.put("testB__testWithValue_3", testMethodB);
+        expectedTests.put("testByMyself", testMethodByMyself);
+        for (FrameworkMethod method : methods) {
+            Assert.assertNotNull(expectedTests.get(method.getName()));
+            Assert.assertEquals(expectedTests.get(method.getName()), method.getMethod());
+            expectedTests.remove(method.getName());
+        }
+        Assert.assertTrue(expectedTests.isEmpty());
+    }
+}
diff --git a/base/test/android/junit/src/org/chromium/base/test/params/ParameterizedRunnerTest.java b/base/test/android/junit/src/org/chromium/base/test/params/ParameterizedRunnerTest.java
new file mode 100644
index 0000000..170ff69
--- /dev/null
+++ b/base/test/android/junit/src/org/chromium/base/test/params/ParameterizedRunnerTest.java
@@ -0,0 +1,108 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package org.chromium.base.test.params;
+
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.junit.runners.BlockJUnit4ClassRunner;
+
+import org.chromium.base.test.params.ParameterAnnotations.ClassParameter;
+import org.chromium.base.test.params.ParameterAnnotations.UseRunnerDelegate;
+import org.chromium.base.test.params.ParameterizedRunner.IllegalParameterArgumentException;
+
+import java.util.ArrayList;
+import java.util.List;
+
+/**
+ * Test for org.chromium.base.test.params.ParameterizedRunner
+ */
+@RunWith(BlockJUnit4ClassRunner.class)
+public class ParameterizedRunnerTest {
+    @UseRunnerDelegate(BlockJUnit4RunnerDelegate.class)
+    public static class BadTestClassWithMoreThanOneConstructor {
+        @ClassParameter
+        static List<ParameterSet> sClassParams = new ArrayList<>();
+
+        public BadTestClassWithMoreThanOneConstructor() {}
+
+        public BadTestClassWithMoreThanOneConstructor(String x) {}
+    }
+
+    @UseRunnerDelegate(BlockJUnit4RunnerDelegate.class)
+    public static class BadTestClassWithNonListParameters {
+        @ClassParameter
+        static String[] sMethodParamA = {"1", "2"};
+
+        @Test
+        public void test() {}
+    }
+
+    @UseRunnerDelegate(BlockJUnit4RunnerDelegate.class)
+    public static class BadTestClassWithoutNeedForParameterization {
+        @Test
+        public void test() {}
+    }
+
+    @UseRunnerDelegate(BlockJUnit4RunnerDelegate.class)
+    public static class BadTestClassWithNonStaticParameterSetList {
+        @ClassParameter
+        public List<ParameterSet> mClassParams = new ArrayList<>();
+
+        @Test
+        public void test() {}
+    }
+
+    @UseRunnerDelegate(BlockJUnit4RunnerDelegate.class)
+    public static class BadTestClassWithMultipleClassParameter {
+        @ClassParameter
+        private static List<ParameterSet> sParamA = new ArrayList<>();
+
+        @ClassParameter
+        private static List<ParameterSet> sParamB = new ArrayList<>();
+    }
+
+    @Test(expected = ParameterizedRunner.IllegalParameterArgumentException.class)
+    public void testEmptyParameterSet() {
+        List<ParameterSet> paramList = new ArrayList<>();
+        paramList.add(new ParameterSet());
+        ParameterizedRunner.validateWidth(paramList);
+    }
+
+    @Test(expected = ParameterizedRunner.IllegalParameterArgumentException.class)
+    public void testUnequalWidthParameterSetList() {
+        List<ParameterSet> paramList = new ArrayList<>();
+        paramList.add(new ParameterSet().value(1, 2));
+        paramList.add(new ParameterSet().value(3, 4, 5));
+        ParameterizedRunner.validateWidth(paramList);
+    }
+
+    @Test(expected = ParameterizedRunner.IllegalParameterArgumentException.class)
+    public void testUnequalWidthParameterSetListWithNull() {
+        List<ParameterSet> paramList = new ArrayList<>();
+        paramList.add(new ParameterSet().value(null));
+        paramList.add(new ParameterSet().value(1, 2));
+        ParameterizedRunner.validateWidth(paramList);
+    }
+
+    @Test(expected = IllegalArgumentException.class)
+    public void testBadClassWithNonListParameters() throws Throwable {
+        new ParameterizedRunner(BadTestClassWithNonListParameters.class);
+    }
+
+    @Test(expected = IllegalParameterArgumentException.class)
+    public void testBadClassWithNonStaticParameterSetList() throws Throwable {
+        new ParameterizedRunner(BadTestClassWithNonStaticParameterSetList.class);
+    }
+
+    @Test(expected = IllegalArgumentException.class)
+    public void testBadClassWithoutNeedForParameterization() throws Throwable {
+        new ParameterizedRunner(BadTestClassWithoutNeedForParameterization.class);
+    }
+
+    @Test(expected = Exception.class)
+    public void testBadClassWithMoreThanOneConstructor() throws Throwable {
+        new ParameterizedRunner(BadTestClassWithMoreThanOneConstructor.class);
+    }
+}
diff --git a/base/test/android/junit/src/org/chromium/base/test/params/ParameterizedTestNameTest.java b/base/test/android/junit/src/org/chromium/base/test/params/ParameterizedTestNameTest.java
new file mode 100644
index 0000000..e79f5c5
--- /dev/null
+++ b/base/test/android/junit/src/org/chromium/base/test/params/ParameterizedTestNameTest.java
@@ -0,0 +1,201 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package org.chromium.base.test.params;
+
+import org.junit.Assert;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.junit.runner.Runner;
+import org.junit.runners.BlockJUnit4ClassRunner;
+import org.junit.runners.model.FrameworkMethod;
+import org.junit.runners.model.TestClass;
+
+import org.chromium.base.test.params.ParameterAnnotations.ClassParameter;
+import org.chromium.base.test.params.ParameterAnnotations.UseMethodParameter;
+import org.chromium.base.test.params.ParameterAnnotations.UseRunnerDelegate;
+
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.LinkedList;
+import java.util.List;
+
+/**
+ * Test for verify the names and test method Description works properly
+ */
+@RunWith(BlockJUnit4ClassRunner.class)
+public class ParameterizedTestNameTest {
+    @UseRunnerDelegate(BlockJUnit4RunnerDelegate.class)
+    public static class TestClassWithClassParameterAppendName {
+        @ClassParameter
+        static List<ParameterSet> sAllName = Arrays.asList(
+                new ParameterSet().value("hello").name("Hello"),
+                new ParameterSet().value("world").name("World")
+        );
+
+        public TestClassWithClassParameterAppendName(String a) {}
+
+        @Test
+        public void test() {}
+    }
+
+    @UseRunnerDelegate(BlockJUnit4RunnerDelegate.class)
+    public static class TestClassWithClassParameterDefaultName {
+        @ClassParameter
+        static List<ParameterSet> sAllName = Arrays.asList(
+                new ParameterSet().value("hello"),
+                new ParameterSet().value("world")
+        );
+
+        public TestClassWithClassParameterDefaultName(String a) {}
+
+        @Test
+        public void test() {}
+    }
+
+    @UseRunnerDelegate(BlockJUnit4RunnerDelegate.class)
+    public static class TestClassWithMethodParameter {
+        static class AppendNameParams implements ParameterProvider {
+            @Override
+            public Iterable<ParameterSet> getParameters() {
+                return Arrays.asList(
+                        new ParameterSet().value("hello").name("Hello"),
+                        new ParameterSet().value("world").name("World")
+                );
+            }
+        }
+
+        static class DefaultNameParams implements ParameterProvider {
+            @Override
+            public Iterable<ParameterSet> getParameters() {
+                return Arrays.asList(
+                        new ParameterSet().value("hello"),
+                        new ParameterSet().value("world")
+                );
+            }
+        }
+
+        @UseMethodParameter(AppendNameParams.class)
+        @Test
+        public void test(String a) {}
+
+        @UseMethodParameter(DefaultNameParams.class)
+        @Test
+        public void testDefaultName(String b) {}
+    }
+
+    @UseRunnerDelegate(BlockJUnit4RunnerDelegate.class)
+    public static class TestClassWithMixedParameter {
+        @ClassParameter
+        static List<ParameterSet> sAllName = Arrays.asList(
+                new ParameterSet().value("hello").name("Hello"),
+                new ParameterSet().value("world").name("World")
+        );
+
+        static class AppendNameParams implements ParameterProvider {
+            @Override
+            public Iterable<ParameterSet> getParameters() {
+                return Arrays.asList(
+                        new ParameterSet().value("1").name("A"),
+                        new ParameterSet().value("2").name("B")
+                );
+            }
+        }
+
+        public TestClassWithMixedParameter(String a) {}
+
+        @UseMethodParameter(AppendNameParams.class)
+        @Test
+        public void testA(String a) {}
+
+        @Test
+        public void test() {}
+    }
+
+    @Test
+    public void testClassParameterAppendName() throws Throwable {
+        List<Runner> runners = ParameterizedRunner.createRunners(
+                new TestClass(TestClassWithClassParameterAppendName.class));
+        List<String> expectedTestNames =
+                new LinkedList<String>(Arrays.asList("test__Hello", "test__World"));
+        List<String> computedMethodNames = new ArrayList<>();
+        for (Runner r : runners) {
+            BlockJUnit4RunnerDelegate castedRunner = (BlockJUnit4RunnerDelegate) r;
+            for (FrameworkMethod method : castedRunner.computeTestMethods()) {
+                computedMethodNames.add(method.getName());
+                Assert.assertTrue("This test name is not expected: " + method.getName(),
+                        expectedTestNames.contains(method.getName()));
+                expectedTestNames.remove(method.getName());
+                method.getName();
+            }
+        }
+        Assert.assertTrue(
+                String.format(
+                        "These names were provided: %s, these expected names are not found: %s",
+                        Arrays.toString(computedMethodNames.toArray()),
+                        Arrays.toString(expectedTestNames.toArray())),
+                expectedTestNames.isEmpty());
+    }
+
+    @Test
+    public void testClassParameterDefaultName() throws Throwable {
+        List<Runner> runners = ParameterizedRunner.createRunners(
+                new TestClass(TestClassWithClassParameterDefaultName.class));
+        List<String> expectedTestNames = new LinkedList<String>(Arrays.asList("test", "test"));
+        for (Runner r : runners) {
+            @SuppressWarnings("unchecked")
+            BlockJUnit4RunnerDelegate castedRunner = (BlockJUnit4RunnerDelegate) r;
+            for (FrameworkMethod method : castedRunner.computeTestMethods()) {
+                Assert.assertTrue("This test name is not expected: " + method.getName(),
+                        expectedTestNames.contains(method.getName()));
+                expectedTestNames.remove(method.getName());
+                method.getName();
+            }
+        }
+        Assert.assertTrue("These expected names are not found: "
+                        + Arrays.toString(expectedTestNames.toArray()),
+                expectedTestNames.isEmpty());
+    }
+
+    @Test
+    public void testMethodParameter() throws Throwable {
+        List<Runner> runners = ParameterizedRunner.createRunners(
+                new TestClass(TestClassWithMethodParameter.class));
+        List<String> expectedTestNames = new LinkedList<String>(
+                Arrays.asList("test__Hello", "test__World", "testDefaultName", "testDefaultName"));
+        for (Runner r : runners) {
+            BlockJUnit4RunnerDelegate castedRunner = (BlockJUnit4RunnerDelegate) r;
+            for (FrameworkMethod method : castedRunner.computeTestMethods()) {
+                Assert.assertTrue("This test name is not expected: " + method.getName(),
+                        expectedTestNames.contains(method.getName()));
+                expectedTestNames.remove(method.getName());
+                method.getName();
+            }
+        }
+        Assert.assertTrue("These expected names are not found: "
+                        + Arrays.toString(expectedTestNames.toArray()),
+                expectedTestNames.isEmpty());
+    }
+
+    @Test
+    public void testMixedParameterTestA() throws Throwable {
+        List<Runner> runners =
+                ParameterizedRunner.createRunners(new TestClass(TestClassWithMixedParameter.class));
+        List<String> expectedTestNames =
+                new LinkedList<String>(Arrays.asList("testA__Hello_A", "testA__World_A",
+                        "testA__Hello_B", "testA__World_B", "test__Hello", "test__World"));
+        for (Runner r : runners) {
+            BlockJUnit4RunnerDelegate castedRunner = (BlockJUnit4RunnerDelegate) r;
+            for (FrameworkMethod method : castedRunner.computeTestMethods()) {
+                Assert.assertTrue("This test name is not expected: " + method.getName(),
+                        expectedTestNames.contains(method.getName()));
+                expectedTestNames.remove(method.getName());
+                method.getName();
+            }
+        }
+        Assert.assertTrue("These expected names are not found: "
+                        + Arrays.toString(expectedTestNames.toArray()),
+                expectedTestNames.isEmpty());
+    }
+}
diff --git a/base/test/android/junit/src/org/chromium/base/test/util/AnnotationProcessingUtilsTest.java b/base/test/android/junit/src/org/chromium/base/test/util/AnnotationProcessingUtilsTest.java
new file mode 100644
index 0000000..9acd141
--- /dev/null
+++ b/base/test/android/junit/src/org/chromium/base/test/util/AnnotationProcessingUtilsTest.java
@@ -0,0 +1,377 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package org.chromium.base.test.util;
+
+import static org.hamcrest.Matchers.contains;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertThat;
+import static org.junit.Assert.fail;
+import static org.junit.runner.Description.createTestDescription;
+
+import org.junit.Ignore;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.TestRule;
+import org.junit.runner.Description;
+import org.junit.runner.RunWith;
+import org.junit.runners.BlockJUnit4ClassRunner;
+import org.junit.runners.model.FrameworkMethod;
+import org.junit.runners.model.InitializationError;
+import org.junit.runners.model.Statement;
+
+import org.chromium.base.test.util.AnnotationProcessingUtils.AnnotationExtractor;
+
+import java.lang.annotation.Annotation;
+import java.lang.annotation.ElementType;
+import java.lang.annotation.Retention;
+import java.lang.annotation.RetentionPolicy;
+import java.lang.annotation.Target;
+import java.util.Arrays;
+import java.util.Comparator;
+import java.util.List;
+
+/** Test for {@link AnnotationProcessingUtils}. */
+@RunWith(BlockJUnit4ClassRunner.class)
+public class AnnotationProcessingUtilsTest {
+    @Test
+    public void testGetTargetAnnotation_NotOnClassNorMethod() {
+        TargetAnnotation retrievedAnnotation;
+
+        retrievedAnnotation = AnnotationProcessingUtils.getAnnotation(
+                createTestDescription(
+                        ClassWithoutTargetAnnotation.class, "methodWithoutAnnotation"),
+                TargetAnnotation.class);
+        assertNull(retrievedAnnotation);
+    }
+
+    @Test
+    public void testGetTargetAnnotation_NotOnClassButOnMethod() {
+        TargetAnnotation retrievedAnnotation;
+
+        retrievedAnnotation = AnnotationProcessingUtils.getAnnotation(
+                getTest(ClassWithoutTargetAnnotation.class, "methodWithTargetAnnotation"),
+                TargetAnnotation.class);
+        assertNotNull(retrievedAnnotation);
+    }
+
+    @Test
+    public void testGetTargetAnnotation_NotOnClassDifferentOneOnMethod() {
+        TargetAnnotation retrievedAnnotation;
+
+        retrievedAnnotation = AnnotationProcessingUtils.getAnnotation(
+                getTest(ClassWithoutTargetAnnotation.class, "methodWithAnnotatedAnnotation"),
+                TargetAnnotation.class);
+        assertNull(retrievedAnnotation);
+    }
+
+    @Test
+    public void testGetTargetAnnotation_OnClassButNotOnMethod() {
+        TargetAnnotation retrievedAnnotation;
+
+        retrievedAnnotation = AnnotationProcessingUtils.getAnnotation(
+                getTest(ClassWithAnnotation.class, "methodWithoutAnnotation"),
+                TargetAnnotation.class);
+        assertNotNull(retrievedAnnotation);
+        assertEquals(Location.Class, retrievedAnnotation.value());
+    }
+
+    @Test
+    public void testGetTargetAnnotation_OnClassAndMethod() {
+        TargetAnnotation retrievedAnnotation;
+
+        retrievedAnnotation = AnnotationProcessingUtils.getAnnotation(
+                getTest(ClassWithAnnotation.class, "methodWithTargetAnnotation"),
+                TargetAnnotation.class);
+        assertNotNull(retrievedAnnotation);
+        assertEquals(Location.Method, retrievedAnnotation.value());
+    }
+
+    @Test
+    @Ignore("Rules not supported yet.")
+    public void testGetTargetAnnotation_OnRuleButNotOnMethod() {
+        TargetAnnotation retrievedAnnotation;
+
+        retrievedAnnotation = AnnotationProcessingUtils.getAnnotation(
+                getTest(ClassWithRule.class, "methodWithoutAnnotation"), TargetAnnotation.class);
+        assertNotNull(retrievedAnnotation);
+        assertEquals(Location.Rule, retrievedAnnotation.value());
+    }
+
+    @Test
+    @Ignore("Rules not supported yet.")
+    public void testGetTargetAnnotation_OnRuleAndMethod() {
+        TargetAnnotation retrievedAnnotation;
+
+        retrievedAnnotation = AnnotationProcessingUtils.getAnnotation(
+                getTest(ClassWithRule.class, "methodWithTargetAnnotation"), TargetAnnotation.class);
+        assertNotNull(retrievedAnnotation);
+        assertEquals(Location.Method, retrievedAnnotation.value());
+    }
+
+    @Test
+    public void testGetMetaAnnotation_Indirectly() {
+        MetaAnnotation retrievedAnnotation;
+
+        retrievedAnnotation = AnnotationProcessingUtils.getAnnotation(
+                getTest(ClassWithoutTargetAnnotation.class, "methodWithAnnotatedAnnotation"),
+                MetaAnnotation.class);
+        assertNotNull(retrievedAnnotation);
+    }
+
+    @Test
+    public void testGetAllTargetAnnotations() {
+        List<TargetAnnotation> retrievedAnnotations;
+
+        retrievedAnnotations = AnnotationProcessingUtils.getAnnotations(
+                getTest(ClassWithAnnotation.class, "methodWithTargetAnnotation"),
+                TargetAnnotation.class);
+        assertEquals(2, retrievedAnnotations.size());
+        assertEquals(Location.Class, retrievedAnnotations.get(0).value());
+        assertEquals(Location.Method, retrievedAnnotations.get(1).value());
+    }
+
+    @Test
+    public void testGetAllTargetAnnotations_OnParentClass() {
+        List<TargetAnnotation> retrievedAnnotations;
+
+        retrievedAnnotations = AnnotationProcessingUtils.getAnnotations(
+                getTest(DerivedClassWithoutAnnotation.class, "newMethodWithoutAnnotation"),
+                TargetAnnotation.class);
+        assertEquals(1, retrievedAnnotations.size());
+        assertEquals(Location.Class, retrievedAnnotations.get(0).value());
+    }
+
+    @Test
+    public void testGetAllTargetAnnotations_OnDerivedMethodAndParentClass() {
+        List<TargetAnnotation> retrievedAnnotations;
+
+        retrievedAnnotations = AnnotationProcessingUtils.getAnnotations(
+                getTest(DerivedClassWithoutAnnotation.class, "newMethodWithTargetAnnotation"),
+                TargetAnnotation.class);
+        assertEquals(2, retrievedAnnotations.size());
+        assertEquals(Location.Class, retrievedAnnotations.get(0).value());
+        assertEquals(Location.DerivedMethod, retrievedAnnotations.get(1).value());
+    }
+
+    @Test
+    public void testGetAllTargetAnnotations_OnDerivedMethodAndParentClassAndMethod() {
+        List<TargetAnnotation> retrievedAnnotations;
+
+        retrievedAnnotations = AnnotationProcessingUtils.getAnnotations(
+                getTest(DerivedClassWithoutAnnotation.class, "methodWithTargetAnnotation"),
+                TargetAnnotation.class);
+        // We should not look at the base implementation of the method. Mostly it should not happen
+        // in the context of tests.
+        assertEquals(2, retrievedAnnotations.size());
+        assertEquals(Location.Class, retrievedAnnotations.get(0).value());
+        assertEquals(Location.DerivedMethod, retrievedAnnotations.get(1).value());
+    }
+
+    @Test
+    public void testGetAllTargetAnnotations_OnDerivedParentAndParentClass() {
+        List<TargetAnnotation> retrievedAnnotations;
+
+        retrievedAnnotations = AnnotationProcessingUtils.getAnnotations(
+                getTest(DerivedClassWithAnnotation.class, "methodWithoutAnnotation"),
+                TargetAnnotation.class);
+        assertEquals(2, retrievedAnnotations.size());
+        assertEquals(Location.Class, retrievedAnnotations.get(0).value());
+        assertEquals(Location.DerivedClass, retrievedAnnotations.get(1).value());
+    }
+
+    @Test
+    public void testGetAllAnnotations() {
+        List<Annotation> annotations;
+
+        AnnotationExtractor annotationExtractor = new AnnotationExtractor(
+                TargetAnnotation.class, MetaAnnotation.class, AnnotatedAnnotation.class);
+        annotations = annotationExtractor.getMatchingAnnotations(
+                getTest(DerivedClassWithAnnotation.class, "methodWithTwoAnnotations"));
+        assertEquals(5, annotations.size());
+
+        // Retrieved annotation order:
+        // On Parent Class
+        assertEquals(TargetAnnotation.class, annotations.get(0).annotationType());
+        assertEquals(Location.Class, ((TargetAnnotation) annotations.get(0)).value());
+
+        // On Class
+        assertEquals(TargetAnnotation.class, annotations.get(1).annotationType());
+        assertEquals(Location.DerivedClass, ((TargetAnnotation) annotations.get(1)).value());
+
+        // Meta-annotations from method
+        assertEquals(MetaAnnotation.class, annotations.get(2).annotationType());
+
+        // On Method
+        assertEquals(AnnotatedAnnotation.class, annotations.get(3).annotationType());
+        assertEquals(TargetAnnotation.class, annotations.get(4).annotationType());
+        assertEquals(Location.DerivedMethod, ((TargetAnnotation) annotations.get(4)).value());
+    }
+
+    @SuppressWarnings("unchecked")
+    @Test
+    public void testAnnotationExtractorSortOrder_UnknownAnnotations() {
+        AnnotationExtractor annotationExtractor = new AnnotationExtractor(Target.class);
+        Comparator<Class<? extends Annotation>> comparator =
+                annotationExtractor.getTypeComparator();
+        List<Class<? extends Annotation>> testList =
+                Arrays.asList(Rule.class, Test.class, Override.class, Target.class, Rule.class);
+        testList.sort(comparator);
+        assertThat("Unknown annotations should not be reordered and come before the known ones.",
+                testList,
+                contains(Rule.class, Test.class, Override.class, Rule.class, Target.class));
+    }
+
+    @SuppressWarnings("unchecked")
+    @Test
+    public void testAnnotationExtractorSortOrder_KnownAnnotations() {
+        AnnotationExtractor annotationExtractor =
+                new AnnotationExtractor(Test.class, Target.class, Rule.class);
+        Comparator<Class<? extends Annotation>> comparator =
+                annotationExtractor.getTypeComparator();
+        List<Class<? extends Annotation>> testList =
+                Arrays.asList(Rule.class, Test.class, Override.class, Target.class, Rule.class);
+        testList.sort(comparator);
+        assertThat(
+                "Known annotations should be sorted in the same order as provided to the extractor",
+                testList,
+                contains(Override.class, Test.class, Target.class, Rule.class, Rule.class));
+    }
+
+    private static Description getTest(Class<?> klass, String testName) {
+        Description description = null;
+        try {
+            description = new DummyTestRunner(klass).describe(testName);
+        } catch (InitializationError initializationError) {
+            initializationError.printStackTrace();
+            fail("DummyTestRunner initialization failed:" + initializationError.getMessage());
+        }
+        if (description == null) {
+            fail("Not test named '" + testName + "' in class" + klass.getSimpleName());
+        }
+        return description;
+    }
+
+    // region Test Data: Annotations and dummy test classes
+    private enum Location { Unspecified, Class, Method, Rule, DerivedClass, DerivedMethod }
+
+    @Retention(RetentionPolicy.RUNTIME)
+    @Target({ElementType.TYPE, ElementType.METHOD})
+    private @interface TargetAnnotation {
+        Location value() default Location.Unspecified;
+    }
+
+    @Retention(RetentionPolicy.RUNTIME)
+    @Target({ElementType.ANNOTATION_TYPE, ElementType.TYPE, ElementType.METHOD})
+    private @interface MetaAnnotation {}
+
+    @Retention(RetentionPolicy.RUNTIME)
+    @Target({ElementType.TYPE, ElementType.METHOD})
+    @MetaAnnotation
+    private @interface AnnotatedAnnotation {}
+
+    private @interface SimpleAnnotation {}
+
+    @SimpleAnnotation
+    private static class ClassWithoutTargetAnnotation {
+        @Test
+        public void methodWithoutAnnotation() {}
+
+        @Test
+        @TargetAnnotation
+        public void methodWithTargetAnnotation() {}
+
+        @Test
+        @AnnotatedAnnotation
+        public void methodWithAnnotatedAnnotation() {}
+    }
+
+    @TargetAnnotation(Location.Class)
+    private static class ClassWithAnnotation {
+        @Test
+        public void methodWithoutAnnotation() {}
+
+        @Test
+        @TargetAnnotation(Location.Method)
+        public void methodWithTargetAnnotation() {}
+
+        @Test
+        @MetaAnnotation
+        public void methodWithMetaAnnotation() {}
+
+        @Test
+        @AnnotatedAnnotation
+        public void methodWithAnnotatedAnnotation() {}
+    }
+
+    private static class DerivedClassWithoutAnnotation extends ClassWithAnnotation {
+        @Test
+        public void newMethodWithoutAnnotation() {}
+
+        @Test
+        @TargetAnnotation(Location.DerivedMethod)
+        public void newMethodWithTargetAnnotation() {}
+
+        @Test
+        @Override
+        @TargetAnnotation(Location.DerivedMethod)
+        public void methodWithTargetAnnotation() {}
+    }
+
+    @TargetAnnotation(Location.DerivedClass)
+    private static class DerivedClassWithAnnotation extends ClassWithAnnotation {
+        @Test
+        public void newMethodWithoutAnnotation() {}
+
+        @Test
+        @AnnotatedAnnotation
+        @TargetAnnotation(Location.DerivedMethod)
+        public void methodWithTwoAnnotations() {}
+    }
+
+    private static class ClassWithRule {
+        @Rule
+        Rule1 mRule = new Rule1();
+
+        @Test
+        public void methodWithoutAnnotation() {}
+
+        @Test
+        @TargetAnnotation
+        public void methodWithTargetAnnotation() {}
+    }
+
+    @TargetAnnotation(Location.Rule)
+    @MetaAnnotation
+    private static class Rule1 implements TestRule {
+        @Override
+        public Statement apply(Statement statement, Description description) {
+            return null;
+        }
+    }
+
+    private static class DummyTestRunner extends BlockJUnit4ClassRunner {
+        public DummyTestRunner(Class<?> klass) throws InitializationError {
+            super(klass);
+        }
+
+        @Override
+        protected void collectInitializationErrors(List<Throwable> errors) {
+            // Do nothing. BlockJUnit4ClassRunner requires the class to be public, but we don't
+            // want/need it.
+        }
+
+        public Description describe(String testName) {
+            List<FrameworkMethod> tests = getTestClass().getAnnotatedMethods(Test.class);
+            for (FrameworkMethod testMethod : tests) {
+                if (testMethod.getName().equals(testName)) return describeChild(testMethod);
+            }
+            return null;
+        }
+    }
+
+    // endregion
+    }
diff --git a/base/test/android/junit/src/org/chromium/base/test/util/DisableIfTest.java b/base/test/android/junit/src/org/chromium/base/test/util/DisableIfTest.java
new file mode 100644
index 0000000..a147435
--- /dev/null
+++ b/base/test/android/junit/src/org/chromium/base/test/util/DisableIfTest.java
@@ -0,0 +1,193 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package org.chromium.base.test.util;
+
+import android.os.Build;
+
+import junit.framework.TestCase;
+
+import org.junit.Assert;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.robolectric.annotation.Config;
+import org.robolectric.util.ReflectionHelpers;
+
+import org.chromium.base.test.BaseRobolectricTestRunner;
+
+/** Unit tests for the DisableIf annotation and its SkipCheck implementation. */
+@RunWith(BaseRobolectricTestRunner.class)
+@Config(manifest = Config.NONE, sdk = 21)
+public class DisableIfTest {
+    @Test
+    public void testSdkIsLessThanAndIsLessThan() {
+        TestCase sdkIsLessThan = new TestCase("sdkIsLessThan") {
+            @DisableIf.Build(sdk_is_less_than = 22)
+            public void sdkIsLessThan() {}
+        };
+        Assert.assertTrue(new DisableIfSkipCheck().shouldSkip(sdkIsLessThan));
+    }
+
+    @Test
+    public void testSdkIsLessThanButIsEqual() {
+        TestCase sdkIsEqual = new TestCase("sdkIsEqual") {
+            @DisableIf.Build(sdk_is_less_than = 21)
+            public void sdkIsEqual() {}
+        };
+        Assert.assertFalse(new DisableIfSkipCheck().shouldSkip(sdkIsEqual));
+    }
+
+    @Test
+    public void testSdkIsLessThanButIsGreaterThan() {
+        TestCase sdkIsGreaterThan = new TestCase("sdkIsGreaterThan") {
+            @DisableIf.Build(sdk_is_less_than = 20)
+            public void sdkIsGreaterThan() {}
+        };
+        Assert.assertFalse(new DisableIfSkipCheck().shouldSkip(sdkIsGreaterThan));
+    }
+
+    @Test
+    public void testSdkIsGreaterThanButIsLessThan() {
+        TestCase sdkIsLessThan = new TestCase("sdkIsLessThan") {
+            @DisableIf.Build(sdk_is_greater_than = 22)
+            public void sdkIsLessThan() {}
+        };
+        Assert.assertFalse(new DisableIfSkipCheck().shouldSkip(sdkIsLessThan));
+    }
+
+    @Test
+    public void testSdkIsGreaterThanButIsEqual() {
+        TestCase sdkIsEqual = new TestCase("sdkIsEqual") {
+            @DisableIf.Build(sdk_is_greater_than = 21)
+            public void sdkIsEqual() {}
+        };
+        Assert.assertFalse(new DisableIfSkipCheck().shouldSkip(sdkIsEqual));
+    }
+
+    @Test
+    public void testSdkIsGreaterThanAndIsGreaterThan() {
+        TestCase sdkIsGreaterThan = new TestCase("sdkIsGreaterThan") {
+            @DisableIf.Build(sdk_is_greater_than = 20)
+            public void sdkIsGreaterThan() {}
+        };
+        Assert.assertTrue(new DisableIfSkipCheck().shouldSkip(sdkIsGreaterThan));
+    }
+
+    @Test
+    public void testSupportedAbiIncludesAndCpuAbiMatches() {
+        TestCase supportedAbisCpuAbiMatch = new TestCase("supportedAbisCpuAbiMatch") {
+            @DisableIf.Build(supported_abis_includes = "foo")
+            public void supportedAbisCpuAbiMatch() {}
+        };
+        String[] originalAbis = Build.SUPPORTED_ABIS;
+        try {
+            ReflectionHelpers.setStaticField(Build.class, "SUPPORTED_ABIS",
+                    new String[] {"foo", "bar"});
+            Assert.assertTrue(new DisableIfSkipCheck().shouldSkip(supportedAbisCpuAbiMatch));
+        } finally {
+            ReflectionHelpers.setStaticField(Build.class, "SUPPORTED_ABIS", originalAbis);
+        }
+    }
+
+    @Test
+    public void testSupportedAbiIncludesAndCpuAbi2Matches() {
+        TestCase supportedAbisCpuAbi2Match = new TestCase("supportedAbisCpuAbi2Match") {
+            @DisableIf.Build(supported_abis_includes = "bar")
+            public void supportedAbisCpuAbi2Match() {}
+        };
+        String[] originalAbis = Build.SUPPORTED_ABIS;
+        try {
+            ReflectionHelpers.setStaticField(Build.class, "SUPPORTED_ABIS",
+                    new String[] {"foo", "bar"});
+            Assert.assertTrue(new DisableIfSkipCheck().shouldSkip(supportedAbisCpuAbi2Match));
+        } finally {
+            ReflectionHelpers.setStaticField(Build.class, "SUPPORTED_ABIS", originalAbis);
+        }
+    }
+
+    @Test
+    public void testSupportedAbiIncludesButNoMatch() {
+        TestCase supportedAbisNoMatch = new TestCase("supportedAbisNoMatch") {
+            @DisableIf.Build(supported_abis_includes = "baz")
+            public void supportedAbisNoMatch() {}
+        };
+        String[] originalAbis = Build.SUPPORTED_ABIS;
+        try {
+            ReflectionHelpers.setStaticField(Build.class, "SUPPORTED_ABIS",
+                    new String[] {"foo", "bar"});
+            Assert.assertFalse(new DisableIfSkipCheck().shouldSkip(supportedAbisNoMatch));
+        } finally {
+            ReflectionHelpers.setStaticField(Build.class, "SUPPORTED_ABIS", originalAbis);
+        }
+    }
+
+    @Test
+    public void testHardwareIsMatches() {
+        TestCase hardwareIsMatches = new TestCase("hardwareIsMatches") {
+            @DisableIf.Build(hardware_is = "hammerhead")
+            public void hardwareIsMatches() {}
+        };
+        String originalHardware = Build.HARDWARE;
+        try {
+            ReflectionHelpers.setStaticField(Build.class, "HARDWARE", "hammerhead");
+            Assert.assertTrue(new DisableIfSkipCheck().shouldSkip(hardwareIsMatches));
+        } finally {
+            ReflectionHelpers.setStaticField(Build.class, "HARDWARE", originalHardware);
+        }
+    }
+
+    @Test
+    public void testHardwareIsDoesntMatch() {
+        TestCase hardwareIsDoesntMatch = new TestCase("hardwareIsDoesntMatch") {
+            @DisableIf.Build(hardware_is = "hammerhead")
+            public void hardwareIsDoesntMatch() {}
+        };
+        String originalHardware = Build.HARDWARE;
+        try {
+            ReflectionHelpers.setStaticField(Build.class, "HARDWARE", "mako");
+            Assert.assertFalse(new DisableIfSkipCheck().shouldSkip(hardwareIsDoesntMatch));
+        } finally {
+            ReflectionHelpers.setStaticField(Build.class, "HARDWARE", originalHardware);
+        }
+    }
+
+    @DisableIf.Build(supported_abis_includes = "foo")
+    private static class DisableIfSuperclassTestCase extends TestCase {
+        public DisableIfSuperclassTestCase(String name) {
+            super(name);
+        }
+    }
+
+    @DisableIf.Build(hardware_is = "hammerhead")
+    private static class DisableIfTestCase extends DisableIfSuperclassTestCase {
+        public DisableIfTestCase(String name) {
+            super(name);
+        }
+        public void sampleTestMethod() {}
+    }
+
+    @Test
+    public void testDisableClass() {
+        TestCase sampleTestMethod = new DisableIfTestCase("sampleTestMethod");
+        String originalHardware = Build.HARDWARE;
+        try {
+            ReflectionHelpers.setStaticField(Build.class, "HARDWARE", "hammerhead");
+            Assert.assertTrue(new DisableIfSkipCheck().shouldSkip(sampleTestMethod));
+        } finally {
+            ReflectionHelpers.setStaticField(Build.class, "HARDWARE", originalHardware);
+        }
+    }
+
+    @Test
+    public void testDisableSuperClass() {
+        TestCase sampleTestMethod = new DisableIfTestCase("sampleTestMethod");
+        String[] originalAbis = Build.SUPPORTED_ABIS;
+        try {
+            ReflectionHelpers.setStaticField(Build.class, "SUPPORTED_ABIS", new String[] {"foo"});
+            Assert.assertTrue(new DisableIfSkipCheck().shouldSkip(sampleTestMethod));
+        } finally {
+            ReflectionHelpers.setStaticField(Build.class, "SUPPORTED_ABIS", originalAbis);
+        }
+    }
+}
diff --git a/base/test/android/junit/src/org/chromium/base/test/util/ManualSkipCheckTest.java b/base/test/android/junit/src/org/chromium/base/test/util/ManualSkipCheckTest.java
new file mode 100644
index 0000000..0c3e955
--- /dev/null
+++ b/base/test/android/junit/src/org/chromium/base/test/util/ManualSkipCheckTest.java
@@ -0,0 +1,145 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package org.chromium.base.test.util;
+
+import static org.hamcrest.Matchers.contains;
+import static org.hamcrest.Matchers.isIn;
+
+import android.app.Instrumentation;
+import android.content.Context;
+import android.os.Bundle;
+import android.support.test.InstrumentationRegistry;
+
+import org.junit.Assert;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.ErrorCollector;
+import org.junit.rules.ExternalResource;
+import org.junit.rules.TestRule;
+import org.junit.runner.Description;
+import org.junit.runner.RunWith;
+import org.junit.runner.notification.Failure;
+import org.junit.runner.notification.RunListener;
+import org.junit.runner.notification.RunNotifier;
+import org.junit.runners.model.FrameworkMethod;
+import org.robolectric.RuntimeEnvironment;
+import org.robolectric.annotation.Config;
+
+import org.chromium.base.test.BaseJUnit4ClassRunner;
+import org.chromium.base.test.BaseRobolectricTestRunner;
+
+import java.util.ArrayList;
+import java.util.List;
+
+/**
+ * Unit tests for skipping tests annotated with {@code @}{@link Manual}.
+ */
+@RunWith(BaseRobolectricTestRunner.class)
+@Config(manifest = Config.NONE)
+public class ManualSkipCheckTest {
+    /**
+     * Example test class.
+     */
+    public static class ManualTest {
+        @Test
+        @Manual
+        public void manualTest() {}
+
+        @Test
+        public void nonManualTest() {}
+    }
+
+    @Test
+    public void testManual() throws NoSuchMethodException {
+        FrameworkMethod method = new FrameworkMethod(ManualTest.class.getMethod("manualTest"));
+        Assert.assertTrue(new ManualSkipCheck().shouldSkip(method));
+    }
+
+    @Test
+    public void testNonManual() throws NoSuchMethodException {
+        FrameworkMethod method = new FrameworkMethod(ManualTest.class.getMethod("nonManualTest"));
+        Assert.assertFalse(new ManualSkipCheck().shouldSkip(method));
+    }
+
+    private static class MockRunListener extends RunListener {
+        private List<Description> mRunTests = new ArrayList<>();
+        private List<Description> mSkippedTests = new ArrayList<>();
+
+        public List<Description> getRunTests() {
+            return mRunTests;
+        }
+
+        public List<Description> getSkippedTests() {
+            return mSkippedTests;
+        }
+
+        @Override
+        public void testStarted(Description description) throws Exception {
+            mRunTests.add(description);
+        }
+
+        @Override
+        public void testFinished(Description description) throws Exception {
+            Assert.assertThat(description, isIn(mRunTests));
+        }
+
+        @Override
+        public void testFailure(Failure failure) throws Exception {
+            Assert.fail(failure.toString());
+        }
+
+        @Override
+        public void testAssumptionFailure(Failure failure) {
+            Assert.fail(failure.toString());
+        }
+
+        @Override
+        public void testIgnored(Description description) throws Exception {
+            mSkippedTests.add(description);
+        }
+    }
+
+    /**
+     * Registers a fake {@link Instrumentation} so that class runners for instrumentation tests can
+     * be run even in Robolectric tests.
+     */
+    private static class MockInstrumentationRule extends ExternalResource {
+        @Override
+        protected void before() throws Throwable {
+            Instrumentation instrumentation = new Instrumentation() {
+                @Override
+                public Context getTargetContext() {
+                    return RuntimeEnvironment.application;
+                }
+            };
+            InstrumentationRegistry.registerInstance(instrumentation, new Bundle());
+        }
+
+        @Override
+        protected void after() {
+            InstrumentationRegistry.registerInstance(null, new Bundle());
+        }
+    }
+
+    @Rule
+    public TestRule mMockInstrumentationRule = new MockInstrumentationRule();
+
+    @Rule
+    public ErrorCollector mErrorCollector = new ErrorCollector();
+
+    @Test
+    public void testWithTestRunner() throws Exception {
+        // TODO(bauerb): Using Mockito mock() or spy() throws a ClassCastException.
+        MockRunListener runListener = new MockRunListener();
+        RunNotifier runNotifier = new RunNotifier();
+        runNotifier.addListener(runListener);
+        new BaseJUnit4ClassRunner(ManualTest.class).run(runNotifier);
+
+        mErrorCollector.checkThat(runListener.getRunTests(),
+                contains(Description.createTestDescription(ManualTest.class, "nonManualTest")));
+        mErrorCollector.checkThat(runListener.getSkippedTests(),
+                contains(Description.createTestDescription(ManualTest.class, "manualTest")));
+    }
+}
diff --git a/base/test/android/junit/src/org/chromium/base/test/util/MinAndroidSdkLevelSkipCheckTest.java b/base/test/android/junit/src/org/chromium/base/test/util/MinAndroidSdkLevelSkipCheckTest.java
new file mode 100644
index 0000000..050cb10
--- /dev/null
+++ b/base/test/android/junit/src/org/chromium/base/test/util/MinAndroidSdkLevelSkipCheckTest.java
@@ -0,0 +1,95 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package org.chromium.base.test.util;
+
+import junit.framework.TestCase;
+
+import org.junit.Assert;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.robolectric.annotation.Config;
+
+import org.chromium.base.test.BaseRobolectricTestRunner;
+
+/** Unit tests for MinAndroidSdkLevelSkipCheck. */
+@RunWith(BaseRobolectricTestRunner.class)
+@Config(manifest = Config.NONE, sdk = 18)
+public class MinAndroidSdkLevelSkipCheckTest {
+    private static class UnannotatedBaseClass extends TestCase {
+        public UnannotatedBaseClass(String name) {
+            super(name);
+        }
+        @MinAndroidSdkLevel(17) public void min17Method() {}
+        @MinAndroidSdkLevel(20) public void min20Method() {}
+    }
+
+    @MinAndroidSdkLevel(17)
+    private static class Min17Class extends UnannotatedBaseClass {
+        public Min17Class(String name) {
+            super(name);
+        }
+        public void unannotatedMethod() {}
+    }
+
+    @MinAndroidSdkLevel(20)
+    private static class Min20Class extends UnannotatedBaseClass {
+        public Min20Class(String name) {
+            super(name);
+        }
+        public void unannotatedMethod() {}
+    }
+
+    private static class ExtendsMin17Class extends Min17Class {
+        public ExtendsMin17Class(String name) {
+            super(name);
+        }
+        @Override
+        public void unannotatedMethod() {}
+    }
+
+    private static class ExtendsMin20Class extends Min20Class {
+        public ExtendsMin20Class(String name) {
+            super(name);
+        }
+        @Override
+        public void unannotatedMethod() {}
+    }
+
+    @Test
+    public void testAnnotatedMethodAboveMin() {
+        Assert.assertFalse(new MinAndroidSdkLevelSkipCheck().shouldSkip(
+                new UnannotatedBaseClass("min17Method")));
+    }
+
+    @Test
+    public void testAnnotatedMethodBelowMin() {
+        Assert.assertTrue(new MinAndroidSdkLevelSkipCheck().shouldSkip(
+                new UnannotatedBaseClass("min20Method")));
+    }
+
+    @Test
+    public void testAnnotatedClassAboveMin() {
+        Assert.assertFalse(new MinAndroidSdkLevelSkipCheck().shouldSkip(
+                new Min17Class("unannotatedMethod")));
+    }
+
+    @Test
+    public void testAnnotatedClassBelowMin() {
+        Assert.assertTrue(new MinAndroidSdkLevelSkipCheck().shouldSkip(
+                new Min20Class("unannotatedMethod")));
+    }
+
+    @Test
+    public void testAnnotatedSuperclassAboveMin() {
+        Assert.assertFalse(new MinAndroidSdkLevelSkipCheck().shouldSkip(
+                new ExtendsMin17Class("unannotatedMethod")));
+    }
+
+    @Test
+    public void testAnnotatedSuperclassBelowMin() {
+        Assert.assertTrue(new MinAndroidSdkLevelSkipCheck().shouldSkip(
+                new ExtendsMin20Class("unannotatedMethod")));
+    }
+}
diff --git a/base/test/android/junit/src/org/chromium/base/test/util/RestrictionSkipCheckTest.java b/base/test/android/junit/src/org/chromium/base/test/util/RestrictionSkipCheckTest.java
new file mode 100644
index 0000000..86285de
--- /dev/null
+++ b/base/test/android/junit/src/org/chromium/base/test/util/RestrictionSkipCheckTest.java
@@ -0,0 +1,129 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package org.chromium.base.test.util;
+
+import android.text.TextUtils;
+
+import junit.framework.TestCase;
+
+import org.junit.Assert;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.robolectric.annotation.Config;
+
+import org.chromium.base.test.BaseRobolectricTestRunner;
+
+/** Unit tests for RestrictionSkipCheck. */
+@RunWith(BaseRobolectricTestRunner.class)
+@Config(manifest = Config.NONE)
+public class RestrictionSkipCheckTest {
+    private static final String TEST_RESTRICTION_APPLIES =
+            "org.chromium.base.test.util.RestrictionSkipCheckTest.TEST_RESTRICTION_APPLIES";
+    private static final String TEST_RESTRICTION_DOES_NOT_APPLY =
+            "org.chromium.base.test.util.RestrictionSkipCheckTest.TEST_RESTRICTION_DOES_NOT_APPLY";
+
+    private static class TestRestrictionSkipCheck extends RestrictionSkipCheck {
+        public TestRestrictionSkipCheck() {
+            super(null);
+        }
+        @Override
+        protected boolean restrictionApplies(String restriction) {
+            return TextUtils.equals(restriction, TEST_RESTRICTION_APPLIES);
+        }
+    }
+
+    private static class UnannotatedBaseClass extends TestCase {
+        public UnannotatedBaseClass(String name) {
+            super(name);
+        }
+        @Restriction({TEST_RESTRICTION_APPLIES}) public void restrictedMethod() {}
+        @Restriction({TEST_RESTRICTION_DOES_NOT_APPLY}) public void unrestrictedMethod() {}
+    }
+
+    @Restriction({TEST_RESTRICTION_APPLIES})
+    private static class RestrictedClass extends UnannotatedBaseClass {
+        public RestrictedClass(String name) {
+            super(name);
+        }
+        public void unannotatedMethod() {}
+    }
+
+    @Restriction({TEST_RESTRICTION_DOES_NOT_APPLY})
+    private static class UnrestrictedClass extends UnannotatedBaseClass {
+        public UnrestrictedClass(String name) {
+            super(name);
+        }
+        public void unannotatedMethod() {}
+    }
+
+    @Restriction({
+            TEST_RESTRICTION_APPLIES,
+            TEST_RESTRICTION_DOES_NOT_APPLY})
+    private static class MultipleRestrictionsRestrictedClass extends UnannotatedBaseClass {
+        public MultipleRestrictionsRestrictedClass(String name) {
+            super(name);
+        }
+        public void unannotatedMethod() {}
+    }
+
+    private static class ExtendsRestrictedClass extends RestrictedClass {
+        public ExtendsRestrictedClass(String name) {
+            super(name);
+        }
+        @Override
+        public void unannotatedMethod() {}
+    }
+
+    private static class ExtendsUnrestrictedClass extends UnrestrictedClass {
+        public ExtendsUnrestrictedClass(String name) {
+            super(name);
+        }
+        @Override
+        public void unannotatedMethod() {}
+    }
+
+    @Test
+    public void testMethodRestricted() {
+        Assert.assertTrue(new TestRestrictionSkipCheck().shouldSkip(
+                new UnannotatedBaseClass("restrictedMethod")));
+    }
+
+    @Test
+    public void testMethodUnrestricted() {
+        Assert.assertFalse(new TestRestrictionSkipCheck().shouldSkip(
+                new UnannotatedBaseClass("unrestrictedMethod")));
+    }
+
+    @Test
+    public void testClassRestricted() {
+        Assert.assertTrue(new TestRestrictionSkipCheck().shouldSkip(
+                new RestrictedClass("unannotatedMethod")));
+    }
+
+    @Test
+    public void testClassUnrestricted() {
+        Assert.assertFalse(new TestRestrictionSkipCheck().shouldSkip(
+                new UnrestrictedClass("unannotatedMethod")));
+    }
+
+    @Test
+    public void testMultipleRestrictionsClassRestricted() {
+        Assert.assertTrue(new TestRestrictionSkipCheck().shouldSkip(
+                new MultipleRestrictionsRestrictedClass("unannotatedMethod")));
+    }
+
+    @Test
+    public void testSuperclassRestricted() {
+        Assert.assertTrue(new TestRestrictionSkipCheck().shouldSkip(
+                new ExtendsRestrictedClass("unannotatedMethod")));
+    }
+
+    @Test
+    public void testSuperclassUnrestricted() {
+        Assert.assertFalse(new TestRestrictionSkipCheck().shouldSkip(
+                new ExtendsUnrestrictedClass("unannotatedMethod")));
+    }
+}
+
diff --git a/base/test/android/junit/src/org/chromium/base/test/util/SkipCheckTest.java b/base/test/android/junit/src/org/chromium/base/test/util/SkipCheckTest.java
new file mode 100644
index 0000000..51c7516
--- /dev/null
+++ b/base/test/android/junit/src/org/chromium/base/test/util/SkipCheckTest.java
@@ -0,0 +1,130 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package org.chromium.base.test.util;
+
+import junit.framework.TestCase;
+
+import org.junit.Assert;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.junit.runners.model.FrameworkMethod;
+import org.robolectric.annotation.Config;
+
+import org.chromium.base.test.BaseRobolectricTestRunner;
+
+import java.lang.annotation.Annotation;
+import java.lang.annotation.Retention;
+import java.lang.annotation.RetentionPolicy;
+import java.lang.reflect.AnnotatedElement;
+import java.lang.reflect.Method;
+import java.util.List;
+
+/** Unit tests for SkipCheck. */
+@RunWith(BaseRobolectricTestRunner.class)
+@Config(manifest = Config.NONE)
+public class SkipCheckTest {
+    private static class TestableSkipCheck extends SkipCheck {
+        public static <T extends Annotation> List<T> getAnnotationsForTesting(
+                AnnotatedElement element, Class<T> annotationClass) {
+            return AnnotationProcessingUtils.getAnnotations(element, annotationClass);
+        }
+
+        @Override
+        public boolean shouldSkip(FrameworkMethod m) {
+            return false;
+        }
+    }
+
+    @Retention(RetentionPolicy.RUNTIME)
+    private @interface TestAnnotation {}
+
+    @TestAnnotation
+    private class AnnotatedBaseClass {
+        public void unannotatedMethod() {}
+        @TestAnnotation public void annotatedMethod() {}
+    }
+
+    private class ExtendsAnnotatedBaseClass extends AnnotatedBaseClass {
+        public void anotherUnannotatedMethod() {}
+    }
+
+    private class ExtendsTestCaseClass extends TestCase {
+        public ExtendsTestCaseClass(String name) {
+            super(name);
+        }
+        public void testMethodA() {}
+    }
+
+    private class UnannotatedBaseClass {
+        public void unannotatedMethod() {}
+        @TestAnnotation public void annotatedMethod() {}
+    }
+
+    @Test
+    public void getAnnotationsForClassNone() {
+        List<TestAnnotation> annotations = TestableSkipCheck.getAnnotationsForTesting(
+                UnannotatedBaseClass.class, TestAnnotation.class);
+        Assert.assertEquals(0, annotations.size());
+    }
+
+    @Test
+    public void getAnnotationsForClassOnClass() {
+        List<TestAnnotation> annotations = TestableSkipCheck.getAnnotationsForTesting(
+                AnnotatedBaseClass.class, TestAnnotation.class);
+        Assert.assertEquals(1, annotations.size());
+    }
+
+    @Test
+    public void getAnnotationsForClassOnSuperclass() {
+        List<TestAnnotation> annotations = TestableSkipCheck.getAnnotationsForTesting(
+                ExtendsAnnotatedBaseClass.class, TestAnnotation.class);
+        Assert.assertEquals(1, annotations.size());
+    }
+
+    @Test
+    public void getAnnotationsForMethodNone() throws NoSuchMethodException {
+        Method testMethod = UnannotatedBaseClass.class.getMethod("unannotatedMethod",
+                (Class[]) null);
+        List<TestAnnotation> annotations = TestableSkipCheck.getAnnotationsForTesting(
+                testMethod, TestAnnotation.class);
+        Assert.assertEquals(0, annotations.size());
+    }
+
+    @Test
+    public void getAnnotationsForMethodOnMethod() throws NoSuchMethodException {
+        Method testMethod = UnannotatedBaseClass.class.getMethod("annotatedMethod",
+                (Class[]) null);
+        List<TestAnnotation> annotations = TestableSkipCheck.getAnnotationsForTesting(
+                testMethod, TestAnnotation.class);
+        Assert.assertEquals(1, annotations.size());
+    }
+
+    @Test
+    public void getAnnotationsForMethodOnClass() throws NoSuchMethodException {
+        Method testMethod = AnnotatedBaseClass.class.getMethod("unannotatedMethod",
+                (Class[]) null);
+        List<TestAnnotation> annotations = TestableSkipCheck.getAnnotationsForTesting(
+                testMethod, TestAnnotation.class);
+        Assert.assertEquals(1, annotations.size());
+    }
+
+    @Test
+    public void getAnnotationsForMethodOnSuperclass() throws NoSuchMethodException {
+        Method testMethod = ExtendsAnnotatedBaseClass.class.getMethod("unannotatedMethod",
+                (Class[]) null);
+        List<TestAnnotation> annotations = TestableSkipCheck.getAnnotationsForTesting(
+                testMethod, TestAnnotation.class);
+        Assert.assertEquals(1, annotations.size());
+    }
+
+    @Test
+    public void getAnnotationsOverlapping() throws NoSuchMethodException {
+        Method testMethod = AnnotatedBaseClass.class.getMethod("annotatedMethod",
+                (Class[]) null);
+        List<TestAnnotation> annotations = TestableSkipCheck.getAnnotationsForTesting(
+                testMethod, TestAnnotation.class);
+        Assert.assertEquals(2, annotations.size());
+    }
+}
diff --git a/base/test/android/url_utils.cc b/base/test/android/url_utils.cc
new file mode 100644
index 0000000..7d2a8ed
--- /dev/null
+++ b/base/test/android/url_utils.cc
@@ -0,0 +1,24 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/test/android/url_utils.h"
+
+#include "base/android/jni_string.h"
+#include "base/android/scoped_java_ref.h"
+#include "jni/UrlUtils_jni.h"
+
+namespace base {
+namespace android {
+
+FilePath GetIsolatedTestRoot() {
+  JNIEnv* env = base::android::AttachCurrentThread();
+  ScopedJavaLocalRef<jstring> jtest_data_dir =
+      Java_UrlUtils_getIsolatedTestRoot(env);
+  base::FilePath test_data_dir(
+      base::android::ConvertJavaStringToUTF8(env, jtest_data_dir));
+  return test_data_dir;
+}
+
+}  // namespace android
+}  // namespace base
diff --git a/base/test/android/url_utils.h b/base/test/android/url_utils.h
new file mode 100644
index 0000000..3769bd2
--- /dev/null
+++ b/base/test/android/url_utils.h
@@ -0,0 +1,23 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TEST_ANDROID_URL_UTILS_H_
+#define BASE_TEST_ANDROID_URL_UTILS_H_
+
+#include <jni.h>
+
+#include "base/base_export.h"
+#include "base/files/file_path.h"
+
+namespace base {
+namespace android {
+
+// Returns the root of the test data directory. This function will call into
+// Java class UrlUtils through JNI bridge.
+BASE_EXPORT FilePath GetIsolatedTestRoot();
+
+}  // namespace android
+}  // namespace base
+
+#endif  // BASE_TEST_ANDROID_URL_UTILS_H_
diff --git a/base/test/bind_test_util.h b/base/test/bind_test_util.h
new file mode 100644
index 0000000..0dfcb46
--- /dev/null
+++ b/base/test/bind_test_util.h
@@ -0,0 +1,36 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TEST_BIND_TEST_UTIL_H_
+#define BASE_TEST_BIND_TEST_UTIL_H_
+
+#include "base/bind.h"
+
+namespace base {
+namespace internal {
+
+template <typename F, typename Signature>
+struct BindLambdaHelper;
+
+template <typename F, typename R, typename... Args>
+struct BindLambdaHelper<F, R(Args...)> {
+  static R Run(const std::decay_t<F>& f, Args... args) {
+    return f(std::forward<Args>(args)...);
+  }
+};
+
+}  // namespace internal
+
+// A variant of Bind() that can bind capturing lambdas for testing.
+// This doesn't support extra arguments binding as the lambda itself can do.
+template <typename F>
+decltype(auto) BindLambdaForTesting(F&& f) {
+  using Signature = internal::ExtractCallableRunType<std::decay_t<F>>;
+  return BindRepeating(&internal::BindLambdaHelper<F, Signature>::Run,
+                       std::forward<F>(f));
+}
+
+}  // namespace base
+
+#endif  // BASE_TEST_BIND_TEST_UTIL_H_
diff --git a/base/test/copy_only_int.h b/base/test/copy_only_int.h
new file mode 100644
index 0000000..4e482c9
--- /dev/null
+++ b/base/test/copy_only_int.h
@@ -0,0 +1,55 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TEST_COPY_ONLY_INT_H_
+#define BASE_TEST_COPY_ONLY_INT_H_
+
+#include "base/macros.h"
+
+namespace base {
+
+// A copy-only (not moveable) class that holds an integer. This is designed for
+// testing containers. See also MoveOnlyInt.
+class CopyOnlyInt {
+ public:
+  explicit CopyOnlyInt(int data = 1) : data_(data) {}
+  CopyOnlyInt(const CopyOnlyInt& other) = default;
+  ~CopyOnlyInt() { data_ = 0; }
+
+  friend bool operator==(const CopyOnlyInt& lhs, const CopyOnlyInt& rhs) {
+    return lhs.data_ == rhs.data_;
+  }
+
+  friend bool operator!=(const CopyOnlyInt& lhs, const CopyOnlyInt& rhs) {
+    return !operator==(lhs, rhs);
+  }
+
+  friend bool operator<(const CopyOnlyInt& lhs, const CopyOnlyInt& rhs) {
+    return lhs.data_ < rhs.data_;
+  }
+
+  friend bool operator>(const CopyOnlyInt& lhs, const CopyOnlyInt& rhs) {
+    return rhs < lhs;
+  }
+
+  friend bool operator<=(const CopyOnlyInt& lhs, const CopyOnlyInt& rhs) {
+    return !(rhs < lhs);
+  }
+
+  friend bool operator>=(const CopyOnlyInt& lhs, const CopyOnlyInt& rhs) {
+    return !(lhs < rhs);
+  }
+
+  int data() const { return data_; }
+
+ private:
+  volatile int data_;
+
+  CopyOnlyInt(CopyOnlyInt&&) = delete;
+  CopyOnlyInt& operator=(CopyOnlyInt&) = delete;
+};
+
+}  // namespace base
+
+#endif  // BASE_TEST_COPY_ONLY_INT_H_
diff --git a/base/test/data/file_util/binary_file.bin b/base/test/data/file_util/binary_file.bin
new file mode 100644
index 0000000..f53cc82
--- /dev/null
+++ b/base/test/data/file_util/binary_file.bin
Binary files differ
diff --git a/base/test/data/file_util/binary_file_diff.bin b/base/test/data/file_util/binary_file_diff.bin
new file mode 100644
index 0000000..103b26d
--- /dev/null
+++ b/base/test/data/file_util/binary_file_diff.bin
Binary files differ
diff --git a/base/test/data/file_util/binary_file_same.bin b/base/test/data/file_util/binary_file_same.bin
new file mode 100644
index 0000000..f53cc82
--- /dev/null
+++ b/base/test/data/file_util/binary_file_same.bin
Binary files differ
diff --git a/base/test/data/file_util/blank_line.txt b/base/test/data/file_util/blank_line.txt
new file mode 100644
index 0000000..8892069
--- /dev/null
+++ b/base/test/data/file_util/blank_line.txt
@@ -0,0 +1,3 @@
+The next line is blank.
+
+But this one isn't.
diff --git a/base/test/data/file_util/blank_line_crlf.txt b/base/test/data/file_util/blank_line_crlf.txt
new file mode 100644
index 0000000..3aefe52
--- /dev/null
+++ b/base/test/data/file_util/blank_line_crlf.txt
@@ -0,0 +1,3 @@
+The next line is blank.

+

+But this one isn't.

diff --git a/base/test/data/file_util/crlf.txt b/base/test/data/file_util/crlf.txt
new file mode 100644
index 0000000..0e62728
--- /dev/null
+++ b/base/test/data/file_util/crlf.txt
@@ -0,0 +1 @@
+This file is the same.

diff --git a/base/test/data/file_util/different.txt b/base/test/data/file_util/different.txt
new file mode 100644
index 0000000..5b9f9c4
--- /dev/null
+++ b/base/test/data/file_util/different.txt
@@ -0,0 +1 @@
+This file is different.
diff --git a/base/test/data/file_util/different_first.txt b/base/test/data/file_util/different_first.txt
new file mode 100644
index 0000000..8661d66
--- /dev/null
+++ b/base/test/data/file_util/different_first.txt
@@ -0,0 +1 @@
+this file is the same.
diff --git a/base/test/data/file_util/different_last.txt b/base/test/data/file_util/different_last.txt
new file mode 100644
index 0000000..e8b3e5a
--- /dev/null
+++ b/base/test/data/file_util/different_last.txt
@@ -0,0 +1 @@
+This file is the same. 
\ No newline at end of file
diff --git a/base/test/data/file_util/empty1.txt b/base/test/data/file_util/empty1.txt
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/base/test/data/file_util/empty1.txt
diff --git a/base/test/data/file_util/empty2.txt b/base/test/data/file_util/empty2.txt
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/base/test/data/file_util/empty2.txt
diff --git a/base/test/data/file_util/first1.txt b/base/test/data/file_util/first1.txt
new file mode 100644
index 0000000..2c6e300
--- /dev/null
+++ b/base/test/data/file_util/first1.txt
@@ -0,0 +1,2 @@
+The first line is the same.
+The second line is different.
diff --git a/base/test/data/file_util/first2.txt b/base/test/data/file_util/first2.txt
new file mode 100644
index 0000000..e39b5ec
--- /dev/null
+++ b/base/test/data/file_util/first2.txt
@@ -0,0 +1,2 @@
+The first line is the same.
+The second line is not.
diff --git a/base/test/data/file_util/original.txt b/base/test/data/file_util/original.txt
new file mode 100644
index 0000000..4422f57
--- /dev/null
+++ b/base/test/data/file_util/original.txt
@@ -0,0 +1 @@
+This file is the same.
diff --git a/base/test/data/file_util/red.png b/base/test/data/file_util/red.png
new file mode 100644
index 0000000..0806141
--- /dev/null
+++ b/base/test/data/file_util/red.png
Binary files differ
diff --git a/base/test/data/file_util/same.txt b/base/test/data/file_util/same.txt
new file mode 100644
index 0000000..4422f57
--- /dev/null
+++ b/base/test/data/file_util/same.txt
@@ -0,0 +1 @@
+This file is the same.
diff --git a/base/test/data/file_util/same_length.txt b/base/test/data/file_util/same_length.txt
new file mode 100644
index 0000000..157405c
--- /dev/null
+++ b/base/test/data/file_util/same_length.txt
@@ -0,0 +1 @@
+This file is not same.
diff --git a/base/test/data/file_util/shortened.txt b/base/test/data/file_util/shortened.txt
new file mode 100644
index 0000000..2bee82c
--- /dev/null
+++ b/base/test/data/file_util/shortened.txt
@@ -0,0 +1 @@
+This file is the
\ No newline at end of file
diff --git a/base/test/data/file_version_info_unittest/FileVersionInfoTest1.dll b/base/test/data/file_version_info_unittest/FileVersionInfoTest1.dll
new file mode 100755
index 0000000..bdf8dc0
--- /dev/null
+++ b/base/test/data/file_version_info_unittest/FileVersionInfoTest1.dll
Binary files differ
diff --git a/base/test/data/file_version_info_unittest/FileVersionInfoTest2.dll b/base/test/data/file_version_info_unittest/FileVersionInfoTest2.dll
new file mode 100755
index 0000000..51e7966
--- /dev/null
+++ b/base/test/data/file_version_info_unittest/FileVersionInfoTest2.dll
Binary files differ
diff --git a/base/test/data/json/bom_feff.json b/base/test/data/json/bom_feff.json
new file mode 100644
index 0000000..b05ae50
--- /dev/null
+++ b/base/test/data/json/bom_feff.json
@@ -0,0 +1,10 @@
+{

+  "appName": {

+    "message": "Gmail",

+    "description": "App name."

+  },

+  "appDesc": {

+    "message": "بريد إلكتروني يوفر إمكانية البحث مع مقدار أقل من الرسائل غير المرغوب فيها.", 

+    "description":"App description."

+  }

+}
\ No newline at end of file
diff --git a/base/test/data/pe_image/pe_image_test_32.dll b/base/test/data/pe_image/pe_image_test_32.dll
new file mode 100755
index 0000000..539d631
--- /dev/null
+++ b/base/test/data/pe_image/pe_image_test_32.dll
Binary files differ
diff --git a/base/test/data/pe_image/pe_image_test_64.dll b/base/test/data/pe_image/pe_image_test_64.dll
new file mode 100755
index 0000000..8801e23
--- /dev/null
+++ b/base/test/data/pe_image/pe_image_test_64.dll
Binary files differ
diff --git a/base/test/data/serializer_nested_test.json b/base/test/data/serializer_nested_test.json
new file mode 100644
index 0000000..cfea8e8
--- /dev/null
+++ b/base/test/data/serializer_nested_test.json
@@ -0,0 +1,17 @@
+{
+   "bool": true,
+   "dict": {
+      "bool": true,
+      "dict": {
+         "bees": "knees",
+         "cats": "meow"
+      },
+      "foos": "bar",
+      "list": [ 3.4, "second", null ]
+   },
+   "int": 42,
+   "list": [ 1, 2 ],
+   "null": null,
+   "real": 3.14,
+   "string": "hello"
+}
diff --git a/base/test/data/serializer_test.json b/base/test/data/serializer_test.json
new file mode 100644
index 0000000..446925e
--- /dev/null
+++ b/base/test/data/serializer_test.json
@@ -0,0 +1,8 @@
+{
+   "bool": true,
+   "int": 42,
+   "list": [ 1, 2 ],
+   "null": null,
+   "real": 3.14,
+   "string": "hello"
+}
diff --git a/base/test/data/serializer_test_nowhitespace.json b/base/test/data/serializer_test_nowhitespace.json
new file mode 100644
index 0000000..a1afdc5
--- /dev/null
+++ b/base/test/data/serializer_test_nowhitespace.json
@@ -0,0 +1 @@
+{"bool":true,"int":42,"list":[1,2],"null":null,"real":3.14,"string":"hello"}
\ No newline at end of file
diff --git a/base/test/fontconfig_util_linux.cc b/base/test/fontconfig_util_linux.cc
new file mode 100644
index 0000000..8bd7c92
--- /dev/null
+++ b/base/test/fontconfig_util_linux.cc
@@ -0,0 +1,496 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/test/fontconfig_util_linux.h"
+
+#include <fontconfig/fontconfig.h>
+
+#include "base/base_paths.h"
+#include "base/files/file_path.h"
+#include "base/files/file_util.h"
+#include "base/logging.h"
+#include "base/macros.h"
+#include "base/path_service.h"
+#include "base/strings/string_util.h"
+#include "base/strings/stringprintf.h"
+
+namespace base {
+
+namespace {
+
+const char kFontsConfTemplate[] = R"(<?xml version="1.0"?>
+<!DOCTYPE fontconfig SYSTEM "fonts.dtd">
+<fontconfig>
+
+  <!-- Cache location. -->
+  <cachedir>$1</cachedir>
+
+  <!-- GCS-synced fonts. -->
+  <dir>$2</dir>
+
+  <!-- Default properties. -->
+  <match target="font">
+    <edit name="embeddedbitmap" mode="append_last">
+      <bool>false</bool>
+    </edit>
+  </match>
+
+  <!-- TODO(thomasanderson): Figure out why this is necessary. -->
+  <match target="pattern">
+    <test name="family" compare="eq">
+      <string>Tinos</string>
+    </test>
+    <test name="prgname" compare="eq">
+      <string>chromevox_tests</string>
+    </test>
+    <edit name="hintstyle" mode="assign">
+      <const>hintslight</const>
+    </edit>
+  </match>
+
+  <match target="pattern">
+    <test qual="any" name="family">
+      <string>Times</string>
+    </test>
+    <edit name="family" mode="assign">
+      <string>Tinos</string>
+    </edit>
+  </match>
+
+  <match target="pattern">
+    <test qual="any" name="family">
+      <string>sans</string>
+    </test>
+    <edit name="family" mode="assign">
+      <string>DejaVu Sans</string>
+    </edit>
+  </match>
+
+  <match target="pattern">
+    <test qual="any" name="family">
+      <string>sans serif</string>
+    </test>
+    <edit name="family" mode="assign">
+      <string>Arimo</string>
+    </edit>
+  </match>
+
+  <!-- Some layout tests specify Helvetica as a family and we need to make sure
+       that we don't fallback to Tinos for them -->
+  <match target="pattern">
+    <test qual="any" name="family">
+      <string>Helvetica</string>
+    </test>
+    <edit name="family" mode="assign">
+      <string>Arimo</string>
+    </edit>
+  </match>
+
+  <match target="pattern">
+    <test qual="any" name="family">
+      <string>sans-serif</string>
+    </test>
+    <edit name="family" mode="assign">
+      <string>Arimo</string>
+    </edit>
+  </match>
+
+  <match target="pattern">
+    <test qual="any" name="family">
+      <string>serif</string>
+    </test>
+    <edit name="family" mode="assign">
+      <string>Tinos</string>
+    </edit>
+  </match>
+
+  <match target="pattern">
+    <test qual="any" name="family">
+      <string>mono</string>
+    </test>
+    <edit name="family" mode="assign">
+      <string>Cousine</string>
+    </edit>
+  </match>
+
+  <match target="pattern">
+    <test qual="any" name="family">
+      <string>monospace</string>
+    </test>
+    <edit name="family" mode="assign">
+      <string>Cousine</string>
+    </edit>
+  </match>
+
+  <match target="pattern">
+    <test qual="any" name="family">
+      <string>Courier</string>
+    </test>
+    <edit name="family" mode="assign">
+      <string>Cousine</string>
+    </edit>
+  </match>
+
+  <match target="pattern">
+    <test qual="any" name="family">
+      <string>cursive</string>
+    </test>
+    <edit name="family" mode="assign">
+      <string>Comic Sans MS</string>
+    </edit>
+  </match>
+
+  <match target="pattern">
+    <test qual="any" name="family">
+      <string>fantasy</string>
+    </test>
+    <edit name="family" mode="assign">
+      <string>Impact</string>
+    </edit>
+  </match>
+
+  <match target="pattern">
+    <test qual="any" name="family">
+      <string>Monaco</string>
+    </test>
+    <edit name="family" mode="assign">
+      <string>Tinos</string>
+    </edit>
+  </match>
+
+  <match target="pattern">
+    <test qual="any" name="family">
+      <string>Arial</string>
+    </test>
+    <edit name="family" mode="assign">
+      <string>Arimo</string>
+    </edit>
+  </match>
+
+  <match target="pattern">
+    <test qual="any" name="family">
+      <string>Courier New</string>
+    </test>
+    <edit name="family" mode="assign">
+      <string>Cousine</string>
+    </edit>
+  </match>
+
+  <match target="pattern">
+    <test qual="any" name="family">
+      <string>Georgia</string>
+    </test>
+    <edit name="family" mode="assign">
+      <string>Gelasio</string>
+    </edit>
+  </match>
+
+  <match target="pattern">
+    <test qual="any" name="family">
+      <string>Times New Roman</string>
+    </test>
+    <edit name="family" mode="assign">
+      <string>Tinos</string>
+    </edit>
+  </match>
+
+  <match target="pattern">
+    <test qual="any" name="family">
+      <string>Verdana</string>
+    </test>
+    <!-- NOT metrically compatible! -->
+    <edit name="family" mode="assign">
+      <string>Arimo</string>
+    </edit>
+  </match>
+
+  <!-- TODO(thomasanderson): Move these configs to be test-specific. -->
+  <match target="pattern">
+    <test name="family" compare="eq">
+      <string>NonAntiAliasedSans</string>
+    </test>
+    <edit name="family" mode="assign">
+      <string>Arimo</string>
+    </edit>
+    <edit name="antialias" mode="assign">
+      <bool>false</bool>
+    </edit>
+  </match>
+
+  <match target="pattern">
+    <test name="family" compare="eq">
+      <string>SlightHintedGeorgia</string>
+    </test>
+    <edit name="family" mode="assign">
+      <string>Gelasio</string>
+    </edit>
+    <edit name="hintstyle" mode="assign">
+      <const>hintslight</const>
+    </edit>
+  </match>
+
+  <match target="pattern">
+    <test name="family" compare="eq">
+      <string>NonHintedSans</string>
+    </test>
+    <edit name="family" mode="assign">
+      <string>Arimo</string>
+    </edit>
+    <!-- These deliberately contradict each other. The 'hinting' preference
+         should take priority -->
+    <edit name="hintstyle" mode="assign">
+      <const>hintfull</const>
+    </edit>
+   <edit name="hinting" mode="assign">
+      <bool>false</bool>
+    </edit>
+  </match>
+
+  <match target="pattern">
+    <test name="family" compare="eq">
+      <string>AutohintedSerif</string>
+    </test>
+    <edit name="family" mode="assign">
+      <string>Arimo</string>
+    </edit>
+    <edit name="autohint" mode="assign">
+      <bool>true</bool>
+    </edit>
+    <edit name="hintstyle" mode="assign">
+      <const>hintmedium</const>
+    </edit>
+  </match>
+
+  <match target="pattern">
+    <test name="family" compare="eq">
+      <string>HintedSerif</string>
+    </test>
+    <edit name="family" mode="assign">
+      <string>Arimo</string>
+    </edit>
+    <edit name="autohint" mode="assign">
+      <bool>false</bool>
+    </edit>
+    <edit name="hintstyle" mode="assign">
+      <const>hintmedium</const>
+    </edit>
+  </match>
+
+  <match target="pattern">
+    <test name="family" compare="eq">
+      <string>FullAndAutoHintedSerif</string>
+    </test>
+    <edit name="family" mode="assign">
+      <string>Arimo</string>
+    </edit>
+    <edit name="autohint" mode="assign">
+      <bool>true</bool>
+    </edit>
+    <edit name="hintstyle" mode="assign">
+      <const>hintfull</const>
+    </edit>
+  </match>
+
+  <match target="pattern">
+    <test name="family" compare="eq">
+      <string>SubpixelEnabledArial</string>
+    </test>
+    <edit name="family" mode="assign">
+      <string>Arimo</string>
+    </edit>
+    <edit name="rgba" mode="assign">
+      <const>rgb</const>
+    </edit>
+  </match>
+
+  <match target="pattern">
+    <test name="family" compare="eq">
+      <string>SubpixelDisabledArial</string>
+    </test>
+    <edit name="family" mode="assign">
+      <string>Arimo</string>
+    </edit>
+    <edit name="rgba" mode="assign">
+      <const>none</const>
+    </edit>
+  </match>
+
+  <match target="pattern">
+    <!-- FontConfig doesn't currently provide a well-defined way to turn on
+         subpixel positioning.  This is just an arbitrary pattern to use after
+         turning subpixel positioning on globally to ensure that we don't have
+         issues with our style getting cached for other tests. -->
+    <test name="family" compare="eq">
+      <string>SubpixelPositioning</string>
+    </test>
+    <edit name="family" mode="assign">
+      <string>Tinos</string>
+    </edit>
+  </match>
+
+  <match target="pattern">
+    <!-- See comments above -->
+    <test name="family" compare="eq">
+      <string>SubpixelPositioningAhem</string>
+    </test>
+    <edit name="family" mode="assign">
+      <string>ahem</string>
+    </edit>
+  </match>
+
+  <match target="pattern">
+    <test name="family" compare="eq">
+      <string>SlightHintedTimesNewRoman</string>
+    </test>
+    <edit name="family" mode="assign">
+      <string>Tinos</string>
+    </edit>
+    <edit name="hintstyle" mode="assign">
+      <const>hintslight</const>
+    </edit>
+  </match>
+
+  <!-- When we encounter a character that the current font doesn't
+       support, gfx::GetFallbackFontForChar() returns the first font
+       that does have a glyph for the character. The list of fonts is
+       sorted by a pattern that includes the current locale, but doesn't
+       include a font family (which means that the fallback font depends
+       on the locale but not on the current font).
+
+       DejaVu Sans is commonly the only font that supports some
+       characters, such as "⇧", and even when other candidates are
+       available, DejaVu Sans is commonly first among them, because of
+       the way Fontconfig is ordinarily configured. For example, the
+       configuration in the Fonconfig source lists DejaVu Sans under the
+       sans-serif generic family, and appends sans-serif to patterns
+       that don't already include a generic family (such as the pattern
+       in gfx::GetFallbackFontForChar()).
+
+       To get the same fallback font in the layout tests, we could
+       duplicate this configuration here, or more directly, simply
+       append DejaVu Sans to all patterns. -->
+  <match target="pattern">
+    <edit name="family" mode="append_last">
+      <string>DejaVu Sans</string>
+    </edit>
+  </match>
+
+</fontconfig>
+)";
+
+}  // namespace
+
+void SetUpFontconfig() {
+  FilePath dir_module;
+  PathService::Get(DIR_MODULE, &dir_module);
+  FilePath font_cache = dir_module.Append("fontconfig_caches");
+  FilePath test_fonts = dir_module.Append("test_fonts");
+  std::string fonts_conf = ReplaceStringPlaceholders(
+      kFontsConfTemplate, {font_cache.value(), test_fonts.value()}, nullptr);
+
+  FcConfig* config = FcConfigCreate();
+  CHECK(config);
+#if FC_VERSION >= 21205
+  CHECK(FcConfigParseAndLoadFromMemory(
+      config, reinterpret_cast<const FcChar8*>(fonts_conf.c_str()), FcTrue));
+#else
+  FilePath temp;
+  CHECK(CreateTemporaryFile(&temp));
+  CHECK(WriteFile(temp, fonts_conf.c_str(), fonts_conf.size()));
+  CHECK(FcConfigParseAndLoad(
+      config, reinterpret_cast<const FcChar8*>(temp.value().c_str()), FcTrue));
+  CHECK(DeleteFile(temp, false));
+#endif
+  CHECK(FcConfigBuildFonts(config));
+  CHECK(FcConfigSetCurrent(config));
+
+  // Decrement the reference count for |config|.  It's now owned by fontconfig.
+  FcConfigDestroy(config);
+}
+
+void TearDownFontconfig() {
+  FcFini();
+}
+
+bool LoadFontIntoFontconfig(const FilePath& path) {
+  if (!PathExists(path)) {
+    LOG(ERROR) << "You are missing " << path.value() << ". Try re-running "
+               << "build/install-build-deps.sh. "
+               << "Please make sure that "
+               << "third_party/test_fonts/ has downloaded "
+               << "and extracted the test_fonts."
+               << "Also see "
+               << "https://chromium.googlesource.com/chromium/src/+/master/"
+               << "docs/layout_tests_linux.md";
+    return false;
+  }
+
+  if (!FcConfigAppFontAddFile(
+          NULL, reinterpret_cast<const FcChar8*>(path.value().c_str()))) {
+    LOG(ERROR) << "Failed to load font " << path.value();
+    return false;
+  }
+
+  return true;
+}
+
+bool LoadConfigFileIntoFontconfig(const FilePath& path) {
+  // Unlike other FcConfig functions, FcConfigParseAndLoad() doesn't default to
+  // the current config when passed NULL. So that's cool.
+  if (!FcConfigParseAndLoad(
+          FcConfigGetCurrent(),
+          reinterpret_cast<const FcChar8*>(path.value().c_str()), FcTrue)) {
+    LOG(ERROR) << "Fontconfig failed to load " << path.value();
+    return false;
+  }
+  return true;
+}
+
+bool LoadConfigDataIntoFontconfig(const FilePath& temp_dir,
+                                  const std::string& data) {
+  FilePath path;
+  if (!CreateTemporaryFileInDir(temp_dir, &path)) {
+    PLOG(ERROR) << "Unable to create temporary file in " << temp_dir.value();
+    return false;
+  }
+  if (WriteFile(path, data.data(), data.size()) !=
+      static_cast<int>(data.size())) {
+    PLOG(ERROR) << "Unable to write config data to " << path.value();
+    return false;
+  }
+  return LoadConfigFileIntoFontconfig(path);
+}
+
+std::string CreateFontconfigEditStanza(const std::string& name,
+                                       const std::string& type,
+                                       const std::string& value) {
+  return StringPrintf(
+      "    <edit name=\"%s\" mode=\"assign\">\n"
+      "      <%s>%s</%s>\n"
+      "    </edit>\n",
+      name.c_str(), type.c_str(), value.c_str(), type.c_str());
+}
+
+std::string CreateFontconfigTestStanza(const std::string& name,
+                                       const std::string& op,
+                                       const std::string& type,
+                                       const std::string& value) {
+  return StringPrintf(
+      "    <test name=\"%s\" compare=\"%s\" qual=\"any\">\n"
+      "      <%s>%s</%s>\n"
+      "    </test>\n",
+      name.c_str(), op.c_str(), type.c_str(), value.c_str(), type.c_str());
+}
+
+std::string CreateFontconfigAliasStanza(const std::string& original_family,
+                                        const std::string& preferred_family) {
+  return StringPrintf(
+      "  <alias>\n"
+      "    <family>%s</family>\n"
+      "    <prefer><family>%s</family></prefer>\n"
+      "  </alias>\n",
+      original_family.c_str(), preferred_family.c_str());
+}
+
+}  // namespace base
diff --git a/base/test/fontconfig_util_linux.h b/base/test/fontconfig_util_linux.h
new file mode 100644
index 0000000..ac7037a
--- /dev/null
+++ b/base/test/fontconfig_util_linux.h
@@ -0,0 +1,51 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TEST_FONTCONFIG_UTIL_LINUX_H_
+#define BASE_TEST_FONTCONFIG_UTIL_LINUX_H_
+
+#include <stddef.h>
+
+#include <string>
+
+namespace base {
+class FilePath;
+
+// Initializes Fontconfig with a custom configuration suitable for tests.
+void SetUpFontconfig();
+
+// Deinitializes Fontconfig.
+void TearDownFontconfig();
+
+// Loads the font file at |path| into the current config, returning true on
+// success.
+bool LoadFontIntoFontconfig(const FilePath& path);
+
+// Instructs Fontconfig to load |path|, an XML configuration file, into the
+// current config, returning true on success.
+bool LoadConfigFileIntoFontconfig(const FilePath& path);
+
+// Writes |data| to a file in |temp_dir| and passes it to
+// LoadConfigFileIntoFontconfig().
+bool LoadConfigDataIntoFontconfig(const FilePath& temp_dir,
+                                  const std::string& data);
+
+// Returns a Fontconfig <edit> stanza.
+std::string CreateFontconfigEditStanza(const std::string& name,
+                                       const std::string& type,
+                                       const std::string& value);
+
+// Returns a Fontconfig <test> stanza.
+std::string CreateFontconfigTestStanza(const std::string& name,
+                                       const std::string& op,
+                                       const std::string& type,
+                                       const std::string& value);
+
+// Returns a Fontconfig <alias> stanza.
+std::string CreateFontconfigAliasStanza(const std::string& original_family,
+                                        const std::string& preferred_family);
+
+}  // namespace base
+
+#endif  // BASE_TEST_FONTCONFIG_UTIL_LINUX_H_
diff --git a/base/test/fuzzed_data_provider.cc b/base/test/fuzzed_data_provider.cc
new file mode 100644
index 0000000..b2d443a
--- /dev/null
+++ b/base/test/fuzzed_data_provider.cc
@@ -0,0 +1,98 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/test/fuzzed_data_provider.h"
+
+#include <algorithm>
+#include <limits>
+
+#include "base/logging.h"
+
+namespace base {
+
+FuzzedDataProvider::FuzzedDataProvider(const uint8_t* data, size_t size)
+    : remaining_data_(reinterpret_cast<const char*>(data), size) {}
+
+FuzzedDataProvider::~FuzzedDataProvider() = default;
+
+std::string FuzzedDataProvider::ConsumeBytes(size_t num_bytes) {
+  num_bytes = std::min(num_bytes, remaining_data_.length());
+  StringPiece result(remaining_data_.data(), num_bytes);
+  remaining_data_ = remaining_data_.substr(num_bytes);
+  return result.as_string();
+}
+
+std::string FuzzedDataProvider::ConsumeRemainingBytes() {
+  return ConsumeBytes(remaining_data_.length());
+}
+
+uint32_t FuzzedDataProvider::ConsumeUint32InRange(uint32_t min, uint32_t max) {
+  CHECK_LE(min, max);
+
+  uint32_t range = max - min;
+  uint32_t offset = 0;
+  uint32_t result = 0;
+
+  while (offset < 32 && (range >> offset) > 0 && !remaining_data_.empty()) {
+    // Pull bytes off the end of the seed data. Experimentally, this seems to
+    // allow the fuzzer to more easily explore the input space. This makes
+    // sense, since it works by modifying inputs that caused new code to run,
+    // and this data is often used to encode length of data read by
+    // ConsumeBytes. Separating out read lengths makes it easier modify the
+    // contents of the data that is actually read.
+    uint8_t next_byte = remaining_data_.back();
+    remaining_data_.remove_suffix(1);
+    result = (result << 8) | next_byte;
+    offset += 8;
+  }
+
+  // Avoid division by 0, in the case |range + 1| results in overflow.
+  if (range == std::numeric_limits<uint32_t>::max())
+    return result;
+
+  return min + result % (range + 1);
+}
+
+std::string FuzzedDataProvider::ConsumeRandomLengthString(size_t max_length) {
+  // Reads bytes from start of |remaining_data_|. Maps "\\" to "\", and maps "\"
+  // followed by anything else to the end of the string. As a result of this
+  // logic, a fuzzer can insert characters into the string, and the string will
+  // be lengthened to include those new characters, resulting in a more stable
+  // fuzzer than picking the length of a string independently from picking its
+  // contents.
+  std::string out;
+  for (size_t i = 0; i < max_length && !remaining_data_.empty(); ++i) {
+    char next = remaining_data_[0];
+    remaining_data_.remove_prefix(1);
+    if (next == '\\' && !remaining_data_.empty()) {
+      next = remaining_data_[0];
+      remaining_data_.remove_prefix(1);
+      if (next != '\\')
+        return out;
+    }
+    out += next;
+  }
+  return out;
+}
+
+int FuzzedDataProvider::ConsumeInt32InRange(int min, int max) {
+  CHECK_LE(min, max);
+
+  uint32_t range = max - min;
+  return min + ConsumeUint32InRange(0, range);
+}
+
+bool FuzzedDataProvider::ConsumeBool() {
+  return (ConsumeUint8() & 0x01) == 0x01;
+}
+
+uint8_t FuzzedDataProvider::ConsumeUint8() {
+  return ConsumeUint32InRange(0, 0xFF);
+}
+
+uint16_t FuzzedDataProvider::ConsumeUint16() {
+  return ConsumeUint32InRange(0, 0xFFFF);
+}
+
+}  // namespace base
diff --git a/base/test/fuzzed_data_provider.h b/base/test/fuzzed_data_provider.h
new file mode 100644
index 0000000..425c820
--- /dev/null
+++ b/base/test/fuzzed_data_provider.h
@@ -0,0 +1,80 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TEST_FUZZED_DATA_PROVIDER_H_
+#define BASE_TEST_FUZZED_DATA_PROVIDER_H_
+
+#include <stdint.h>
+
+#include <string>
+
+#include "base/base_export.h"
+#include "base/macros.h"
+#include "base/strings/string_piece.h"
+
+namespace base {
+
+// Utility class to break up fuzzer input for multiple consumers. Whenever run
+// on the same input, provides the same output, as long as its methods are
+// called in the same order, with the same arguments.
+class FuzzedDataProvider {
+ public:
+  // |data| is an array of length |size| that the FuzzedDataProvider wraps to
+  // provide more granular access. |data| must outlive the FuzzedDataProvider.
+  FuzzedDataProvider(const uint8_t* data, size_t size);
+  ~FuzzedDataProvider();
+
+  // Returns a std::string containing |num_bytes| of input data. If fewer than
+  // |num_bytes| of data remain, returns a shorter std::string containing all
+  // of the data that's left.
+  std::string ConsumeBytes(size_t num_bytes);
+
+  // Returns a std::string containing all remaining bytes of the input data.
+  std::string ConsumeRemainingBytes();
+
+  // Returns a std::string of length from 0 to |max_length|. When it runs out of
+  // input data, returns what remains of the input. Designed to be more stable
+  // with respect to a fuzzer inserting characters than just picking a random
+  // length and then consuming that many bytes with ConsumeBytes().
+  std::string ConsumeRandomLengthString(size_t max_length);
+
+  // Returns a number in the range [min, max] by consuming bytes from the input
+  // data. The value might not be uniformly distributed in the given range. If
+  // there's no input data left, always returns |min|. |min| must be less than
+  // or equal to |max|.
+  uint32_t ConsumeUint32InRange(uint32_t min, uint32_t max);
+  int ConsumeInt32InRange(int min, int max);
+
+  // Returns a bool, or false when no data remains.
+  bool ConsumeBool();
+
+  // Returns a uint8_t from the input or 0 if nothing remains. This is
+  // equivalent to ConsumeUint32InRange(0, 0xFF).
+  uint8_t ConsumeUint8();
+
+  // Returns a uint16_t from the input. If fewer than 2 bytes of data remain
+  // will fill the most significant bytes with 0. This is equivalent to
+  // ConsumeUint32InRange(0, 0xFFFF).
+  uint16_t ConsumeUint16();
+
+  // Returns a value from |array|, consuming as many bytes as needed to do so.
+  // |array| must be a fixed-size array. Equivalent to
+  // array[ConsumeUint32InRange(sizeof(array)-1)];
+  template <typename Type, size_t size>
+  Type PickValueInArray(Type (&array)[size]) {
+    return array[ConsumeUint32InRange(0, size - 1)];
+  }
+
+  // Reports the remaining bytes available for fuzzed input.
+  size_t remaining_bytes() { return remaining_data_.length(); }
+
+ private:
+  StringPiece remaining_data_;
+
+  DISALLOW_COPY_AND_ASSIGN(FuzzedDataProvider);
+};
+
+}  // namespace base
+
+#endif  // BASE_TEST_FUZZED_DATA_PROVIDER_H_
diff --git a/base/test/generate_fontconfig_caches.cc b/base/test/generate_fontconfig_caches.cc
new file mode 100644
index 0000000..f12eb48
--- /dev/null
+++ b/base/test/generate_fontconfig_caches.cc
@@ -0,0 +1,24 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <string>
+
+#include "base/files/file_path.h"
+#include "base/files/file_util.h"
+#include "base/path_service.h"
+#include "base/test/fontconfig_util_linux.h"
+
+int main(void) {
+  base::SetUpFontconfig();
+  base::TearDownFontconfig();
+
+  base::FilePath dir_module;
+  CHECK(base::PathService::Get(base::DIR_MODULE, &dir_module));
+  base::FilePath fontconfig_caches = dir_module.Append("fontconfig_caches");
+  CHECK(base::DirectoryExists(fontconfig_caches));
+  base::FilePath stamp = fontconfig_caches.Append("STAMP");
+  CHECK_EQ(0, base::WriteFile(stamp, "", 0));
+
+  return 0;
+}
diff --git a/base/test/gtest_util.cc b/base/test/gtest_util.cc
new file mode 100644
index 0000000..e5d38f4
--- /dev/null
+++ b/base/test/gtest_util.cc
@@ -0,0 +1,111 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/test/gtest_util.h"
+
+#include <stddef.h>
+
+#include <memory>
+
+#include "base/files/file_path.h"
+#include "base/json/json_file_value_serializer.h"
+#include "base/strings/string_util.h"
+#include "base/values.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+
+TestIdentifier::TestIdentifier() = default;
+
+TestIdentifier::TestIdentifier(const TestIdentifier& other) = default;
+
+std::string FormatFullTestName(const std::string& test_case_name,
+                               const std::string& test_name) {
+  return test_case_name + "." + test_name;
+}
+
+std::string TestNameWithoutDisabledPrefix(const std::string& full_test_name) {
+  std::string test_name_no_disabled(full_test_name);
+  ReplaceSubstringsAfterOffset(&test_name_no_disabled, 0, "DISABLED_", "");
+  return test_name_no_disabled;
+}
+
+std::vector<TestIdentifier> GetCompiledInTests() {
+  testing::UnitTest* const unit_test = testing::UnitTest::GetInstance();
+
+  std::vector<TestIdentifier> tests;
+  for (int i = 0; i < unit_test->total_test_case_count(); ++i) {
+    const testing::TestCase* test_case = unit_test->GetTestCase(i);
+    for (int j = 0; j < test_case->total_test_count(); ++j) {
+      const testing::TestInfo* test_info = test_case->GetTestInfo(j);
+      TestIdentifier test_data;
+      test_data.test_case_name = test_case->name();
+      test_data.test_name = test_info->name();
+      test_data.file = test_info->file();
+      test_data.line = test_info->line();
+      tests.push_back(test_data);
+    }
+  }
+  return tests;
+}
+
+bool WriteCompiledInTestsToFile(const FilePath& path) {
+  std::vector<TestIdentifier> tests(GetCompiledInTests());
+
+  ListValue root;
+  for (size_t i = 0; i < tests.size(); ++i) {
+    std::unique_ptr<DictionaryValue> test_info(new DictionaryValue);
+    test_info->SetString("test_case_name", tests[i].test_case_name);
+    test_info->SetString("test_name", tests[i].test_name);
+    test_info->SetString("file", tests[i].file);
+    test_info->SetInteger("line", tests[i].line);
+    root.Append(std::move(test_info));
+  }
+
+  JSONFileValueSerializer serializer(path);
+  return serializer.Serialize(root);
+}
+
+bool ReadTestNamesFromFile(const FilePath& path,
+                           std::vector<TestIdentifier>* output) {
+  JSONFileValueDeserializer deserializer(path);
+  int error_code = 0;
+  std::string error_message;
+  std::unique_ptr<base::Value> value =
+      deserializer.Deserialize(&error_code, &error_message);
+  if (!value.get())
+    return false;
+
+  base::ListValue* tests = nullptr;
+  if (!value->GetAsList(&tests))
+    return false;
+
+  std::vector<base::TestIdentifier> result;
+  for (base::ListValue::iterator i = tests->begin(); i != tests->end(); ++i) {
+    base::DictionaryValue* test = nullptr;
+    if (!i->GetAsDictionary(&test))
+      return false;
+
+    TestIdentifier test_data;
+
+    if (!test->GetStringASCII("test_case_name", &test_data.test_case_name))
+      return false;
+
+    if (!test->GetStringASCII("test_name", &test_data.test_name))
+      return false;
+
+    if (!test->GetStringASCII("file", &test_data.file))
+      return false;
+
+    if (!test->GetInteger("line", &test_data.line))
+      return false;
+
+    result.push_back(test_data);
+  }
+
+  output->swap(result);
+  return true;
+}
+
+}  // namespace base
diff --git a/base/test/gtest_util.h b/base/test/gtest_util.h
new file mode 100644
index 0000000..df2bce9
--- /dev/null
+++ b/base/test/gtest_util.h
@@ -0,0 +1,84 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TEST_GTEST_UTIL_H_
+#define BASE_TEST_GTEST_UTIL_H_
+
+#include <string>
+#include <utility>
+#include <vector>
+
+#include "base/compiler_specific.h"
+#include "base/logging.h"
+#include "build/build_config.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+// EXPECT/ASSERT_DCHECK_DEATH is intended to replace EXPECT/ASSERT_DEBUG_DEATH
+// when the death is expected to be caused by a DCHECK. Contrary to
+// EXPECT/ASSERT_DEBUG_DEATH however, it doesn't execute the statement in non-
+// dcheck builds as DCHECKs are intended to catch things that should never
+// happen and as such executing the statement results in undefined behavior
+// (|statement| is compiled in unsupported configurations nonetheless).
+// Death tests misbehave on Android.
+#if DCHECK_IS_ON() && defined(GTEST_HAS_DEATH_TEST) && !defined(OS_ANDROID)
+
+// EXPECT/ASSERT_DCHECK_DEATH tests verify that a DCHECK is hit ("Check failed"
+// is part of the error message), but intentionally do not expose the gtest
+// death test's full |regex| parameter to avoid users having to verify the exact
+// syntax of the error message produced by the DCHECK.
+#define EXPECT_DCHECK_DEATH(statement) EXPECT_DEATH(statement, "Check failed")
+#define ASSERT_DCHECK_DEATH(statement) ASSERT_DEATH(statement, "Check failed")
+
+#else
+// DCHECK_IS_ON() && defined(GTEST_HAS_DEATH_TEST) && !defined(OS_ANDROID)
+
+#define EXPECT_DCHECK_DEATH(statement) \
+    GTEST_UNSUPPORTED_DEATH_TEST(statement, "Check failed", )
+#define ASSERT_DCHECK_DEATH(statement) \
+    GTEST_UNSUPPORTED_DEATH_TEST(statement, "Check failed", return)
+
+#endif
+// DCHECK_IS_ON() && defined(GTEST_HAS_DEATH_TEST) && !defined(OS_ANDROID)
+
+namespace base {
+
+class FilePath;
+
+struct TestIdentifier {
+  TestIdentifier();
+  TestIdentifier(const TestIdentifier& other);
+
+  std::string test_case_name;
+  std::string test_name;
+  std::string file;
+  int line;
+};
+
+// Constructs a full test name given a test case name and a test name,
+// e.g. for test case "A" and test name "B" returns "A.B".
+std::string FormatFullTestName(const std::string& test_case_name,
+                               const std::string& test_name);
+
+// Returns the full test name with the "DISABLED_" prefix stripped out.
+// e.g. for the full test names "A.DISABLED_B", "DISABLED_A.B", and
+// "DISABLED_A.DISABLED_B", returns "A.B".
+std::string TestNameWithoutDisabledPrefix(const std::string& full_test_name);
+
+// Returns a vector of gtest-based tests compiled into
+// current executable.
+std::vector<TestIdentifier> GetCompiledInTests();
+
+// Writes the list of gtest-based tests compiled into
+// current executable as a JSON file. Returns true on success.
+bool WriteCompiledInTestsToFile(const FilePath& path) WARN_UNUSED_RESULT;
+
+// Reads the list of gtest-based tests from |path| into |output|.
+// Returns true on success.
+bool ReadTestNamesFromFile(
+    const FilePath& path,
+    std::vector<TestIdentifier>* output) WARN_UNUSED_RESULT;
+
+}  // namespace base
+
+#endif  // BASE_TEST_GTEST_UTIL_H_
diff --git a/base/test/gtest_xml_unittest_result_printer.cc b/base/test/gtest_xml_unittest_result_printer.cc
new file mode 100644
index 0000000..558a986
--- /dev/null
+++ b/base/test/gtest_xml_unittest_result_printer.cc
@@ -0,0 +1,162 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/test/gtest_xml_unittest_result_printer.h"
+
+#include "base/base64.h"
+#include "base/command_line.h"
+#include "base/files/file_util.h"
+#include "base/logging.h"
+#include "base/test/test_switches.h"
+#include "base/time/time.h"
+
+namespace base {
+
+namespace {
+const int kDefaultTestPartResultsLimit = 10;
+
+const char kTestPartLesultsLimitExceeded[] =
+    "Test part results limit exceeded. Use --test-launcher-test-part-limit to "
+    "increase or disable limit.";
+}  // namespace
+
+XmlUnitTestResultPrinter::XmlUnitTestResultPrinter()
+    : output_file_(nullptr), open_failed_(false) {}
+
+XmlUnitTestResultPrinter::~XmlUnitTestResultPrinter() {
+  if (output_file_ && !open_failed_) {
+    fprintf(output_file_, "</testsuites>\n");
+    fflush(output_file_);
+    CloseFile(output_file_);
+  }
+}
+
+bool XmlUnitTestResultPrinter::Initialize(const FilePath& output_file_path) {
+  DCHECK(!output_file_);
+  output_file_ = OpenFile(output_file_path, "w");
+  if (!output_file_) {
+    // If the file open fails, we set the output location to stderr. This is
+    // because in current usage our caller CHECKs the result of this function.
+    // But that in turn causes a LogMessage that comes back to this object,
+    // which in turn causes a (double) crash. By pointing at stderr, there might
+    // be some indication what's going wrong. See https://crbug.com/736783.
+    output_file_ = stderr;
+    open_failed_ = true;
+    return false;
+  }
+
+  fprintf(output_file_,
+          "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<testsuites>\n");
+  fflush(output_file_);
+
+  return true;
+}
+
+void XmlUnitTestResultPrinter::OnAssert(const char* file,
+                                        int line,
+                                        const std::string& summary,
+                                        const std::string& message) {
+  WriteTestPartResult(file, line, testing::TestPartResult::kFatalFailure,
+                      summary, message);
+}
+
+void XmlUnitTestResultPrinter::OnTestCaseStart(
+    const testing::TestCase& test_case) {
+  fprintf(output_file_, "  <testsuite>\n");
+  fflush(output_file_);
+}
+
+void XmlUnitTestResultPrinter::OnTestStart(
+    const testing::TestInfo& test_info) {
+  // This is our custom extension - it helps to recognize which test was
+  // running when the test binary crashed. Note that we cannot even open the
+  // <testcase> tag here - it requires e.g. run time of the test to be known.
+  fprintf(output_file_,
+          "    <x-teststart name=\"%s\" classname=\"%s\" />\n",
+          test_info.name(),
+          test_info.test_case_name());
+  fflush(output_file_);
+}
+
+void XmlUnitTestResultPrinter::OnTestEnd(const testing::TestInfo& test_info) {
+  fprintf(output_file_,
+          "    <testcase name=\"%s\" status=\"run\" time=\"%.3f\""
+          " classname=\"%s\">\n",
+          test_info.name(),
+          static_cast<double>(test_info.result()->elapsed_time()) /
+              Time::kMillisecondsPerSecond,
+          test_info.test_case_name());
+  if (test_info.result()->Failed()) {
+    fprintf(output_file_,
+            "      <failure message=\"\" type=\"\"></failure>\n");
+  }
+
+  int limit = test_info.result()->total_part_count();
+  if (CommandLine::ForCurrentProcess()->HasSwitch(
+          switches::kTestLauncherTestPartResultsLimit)) {
+    std::string limit_str =
+        CommandLine::ForCurrentProcess()->GetSwitchValueASCII(
+            switches::kTestLauncherTestPartResultsLimit);
+    int test_part_results_limit = std::strtol(limit_str.c_str(), nullptr, 10);
+    if (test_part_results_limit >= 0)
+      limit = std::min(limit, test_part_results_limit);
+  } else {
+    limit = std::min(limit, kDefaultTestPartResultsLimit);
+  }
+
+  for (int i = 0; i < limit; ++i) {
+    const auto& test_part_result = test_info.result()->GetTestPartResult(i);
+    WriteTestPartResult(test_part_result.file_name(),
+                        test_part_result.line_number(), test_part_result.type(),
+                        test_part_result.summary(), test_part_result.message());
+  }
+
+  if (test_info.result()->total_part_count() > limit) {
+    WriteTestPartResult(
+        "<unknown>", 0, testing::TestPartResult::kNonFatalFailure,
+        kTestPartLesultsLimitExceeded, kTestPartLesultsLimitExceeded);
+  }
+
+  fprintf(output_file_, "    </testcase>\n");
+  fflush(output_file_);
+}
+
+void XmlUnitTestResultPrinter::OnTestCaseEnd(
+    const testing::TestCase& test_case) {
+  fprintf(output_file_, "  </testsuite>\n");
+  fflush(output_file_);
+}
+
+void XmlUnitTestResultPrinter::WriteTestPartResult(
+    const char* file,
+    int line,
+    testing::TestPartResult::Type result_type,
+    const std::string& summary,
+    const std::string& message) {
+  const char* type = "unknown";
+  switch (result_type) {
+    case testing::TestPartResult::kSuccess:
+      type = "success";
+      break;
+    case testing::TestPartResult::kNonFatalFailure:
+      type = "failure";
+      break;
+    case testing::TestPartResult::kFatalFailure:
+      type = "fatal_failure";
+      break;
+  }
+  std::string summary_encoded;
+  Base64Encode(summary, &summary_encoded);
+  std::string message_encoded;
+  Base64Encode(message, &message_encoded);
+  fprintf(output_file_,
+          "      <x-test-result-part type=\"%s\" file=\"%s\" line=\"%d\">\n"
+          "        <summary>%s</summary>\n"
+          "        <message>%s</message>\n"
+          "      </x-test-result-part>\n",
+          type, file, line, summary_encoded.c_str(), message_encoded.c_str());
+  fflush(output_file_);
+}
+
+}  // namespace base
diff --git a/base/test/gtest_xml_unittest_result_printer.h b/base/test/gtest_xml_unittest_result_printer.h
new file mode 100644
index 0000000..9340382
--- /dev/null
+++ b/base/test/gtest_xml_unittest_result_printer.h
@@ -0,0 +1,55 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TEST_GTEST_XML_UNITTEST_RESULT_PRINTER_H_
+#define BASE_TEST_GTEST_XML_UNITTEST_RESULT_PRINTER_H_
+
+#include <stdio.h>
+
+#include "base/compiler_specific.h"
+#include "base/macros.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+
+class FilePath;
+
+// Generates an XML output file. Format is very close to GTest, but has
+// extensions needed by the test launcher.
+class XmlUnitTestResultPrinter : public testing::EmptyTestEventListener {
+ public:
+  XmlUnitTestResultPrinter();
+  ~XmlUnitTestResultPrinter() override;
+
+  // Must be called before adding as a listener. Returns true on success.
+  bool Initialize(const FilePath& output_file_path) WARN_UNUSED_RESULT;
+
+  // CHECK/DCHECK failed. Print file/line and message to the xml.
+  void OnAssert(const char* file,
+                int line,
+                const std::string& summary,
+                const std::string& message);
+
+ private:
+  // testing::EmptyTestEventListener:
+  void OnTestCaseStart(const testing::TestCase& test_case) override;
+  void OnTestStart(const testing::TestInfo& test_info) override;
+  void OnTestEnd(const testing::TestInfo& test_info) override;
+  void OnTestCaseEnd(const testing::TestCase& test_case) override;
+
+  void WriteTestPartResult(const char* file,
+                           int line,
+                           testing::TestPartResult::Type type,
+                           const std::string& summary,
+                           const std::string& message);
+
+  FILE* output_file_;
+  bool open_failed_;
+
+  DISALLOW_COPY_AND_ASSIGN(XmlUnitTestResultPrinter);
+};
+
+}  // namespace base
+
+#endif  // BASE_TEST_GTEST_XML_UNITTEST_RESULT_PRINTER_H_
diff --git a/base/test/gtest_xml_util.cc b/base/test/gtest_xml_util.cc
new file mode 100644
index 0000000..37104e8
--- /dev/null
+++ b/base/test/gtest_xml_util.cc
@@ -0,0 +1,234 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/test/gtest_xml_util.h"
+
+#include <stdint.h>
+
+#include "base/base64.h"
+#include "base/files/file_util.h"
+#include "base/logging.h"
+#include "base/strings/string_number_conversions.h"
+#include "base/strings/stringprintf.h"
+#include "base/test/gtest_util.h"
+#include "base/test/launcher/test_launcher.h"
+#include "third_party/libxml/chromium/libxml_utils.h"
+
+namespace base {
+
+namespace {
+
+// This is used for the xml parser to report errors. This assumes the context
+// is a pointer to a std::string where the error message should be appended.
+static void XmlErrorFunc(void *context, const char *message, ...) {
+  va_list args;
+  va_start(args, message);
+  std::string* error = static_cast<std::string*>(context);
+  StringAppendV(error, message, args);
+  va_end(args);
+}
+
+}  // namespace
+
+bool ProcessGTestOutput(const base::FilePath& output_file,
+                        std::vector<TestResult>* results,
+                        bool* crashed) {
+  DCHECK(results);
+
+  std::string xml_contents;
+  if (!ReadFileToString(output_file, &xml_contents))
+    return false;
+
+  // Silence XML errors - otherwise they go to stderr.
+  std::string xml_errors;
+  ScopedXmlErrorFunc error_func(&xml_errors, &XmlErrorFunc);
+
+  XmlReader xml_reader;
+  if (!xml_reader.Load(xml_contents))
+    return false;
+
+  enum {
+    STATE_INIT,
+    STATE_TESTSUITE,
+    STATE_TESTCASE,
+    STATE_TEST_RESULT,
+    STATE_FAILURE,
+    STATE_END,
+  } state = STATE_INIT;
+
+  while (xml_reader.Read()) {
+    xml_reader.SkipToElement();
+    std::string node_name(xml_reader.NodeName());
+
+    switch (state) {
+      case STATE_INIT:
+        if (node_name == "testsuites" && !xml_reader.IsClosingElement())
+          state = STATE_TESTSUITE;
+        else
+          return false;
+        break;
+      case STATE_TESTSUITE:
+        if (node_name == "testsuites" && xml_reader.IsClosingElement())
+          state = STATE_END;
+        else if (node_name == "testsuite" && !xml_reader.IsClosingElement())
+          state = STATE_TESTCASE;
+        else
+          return false;
+        break;
+      case STATE_TESTCASE:
+        if (node_name == "testsuite" && xml_reader.IsClosingElement()) {
+          state = STATE_TESTSUITE;
+        } else if (node_name == "x-teststart" &&
+                   !xml_reader.IsClosingElement()) {
+          // This is our custom extension that helps recognize which test was
+          // running when the test binary crashed.
+          TestResult result;
+
+          std::string test_case_name;
+          if (!xml_reader.NodeAttribute("classname", &test_case_name))
+            return false;
+          std::string test_name;
+          if (!xml_reader.NodeAttribute("name", &test_name))
+            return false;
+          result.full_name = FormatFullTestName(test_case_name, test_name);
+
+          result.elapsed_time = TimeDelta();
+
+          // Assume the test crashed - we can correct that later.
+          result.status = TestResult::TEST_CRASH;
+
+          results->push_back(result);
+        } else if (node_name == "testcase" && !xml_reader.IsClosingElement()) {
+          std::string test_status;
+          if (!xml_reader.NodeAttribute("status", &test_status))
+            return false;
+
+          if (test_status != "run" && test_status != "notrun")
+            return false;
+          if (test_status != "run")
+            break;
+
+          TestResult result;
+
+          std::string test_case_name;
+          if (!xml_reader.NodeAttribute("classname", &test_case_name))
+            return false;
+          std::string test_name;
+          if (!xml_reader.NodeAttribute("name", &test_name))
+            return false;
+          result.full_name = test_case_name + "." + test_name;
+
+          std::string test_time_str;
+          if (!xml_reader.NodeAttribute("time", &test_time_str))
+            return false;
+          result.elapsed_time = TimeDelta::FromMicroseconds(
+              static_cast<int64_t>(strtod(test_time_str.c_str(), nullptr) *
+                                   Time::kMicrosecondsPerSecond));
+
+          result.status = TestResult::TEST_SUCCESS;
+
+          if (!results->empty() &&
+              results->back().full_name == result.full_name &&
+              results->back().status == TestResult::TEST_CRASH) {
+            // Erase the fail-safe "crashed" result - now we know the test did
+            // not crash.
+            results->pop_back();
+          }
+
+          results->push_back(result);
+        } else if (node_name == "failure" && !xml_reader.IsClosingElement()) {
+          std::string failure_message;
+          if (!xml_reader.NodeAttribute("message", &failure_message))
+            return false;
+
+          DCHECK(!results->empty());
+          results->back().status = TestResult::TEST_FAILURE;
+
+          state = STATE_FAILURE;
+        } else if (node_name == "testcase" && xml_reader.IsClosingElement()) {
+          // Deliberately empty.
+        } else if (node_name == "x-test-result-part" &&
+                   !xml_reader.IsClosingElement()) {
+          std::string result_type;
+          if (!xml_reader.NodeAttribute("type", &result_type))
+            return false;
+
+          std::string file_name;
+          if (!xml_reader.NodeAttribute("file", &file_name))
+            return false;
+
+          std::string line_number_str;
+          if (!xml_reader.NodeAttribute("line", &line_number_str))
+            return false;
+
+          int line_number;
+          if (!StringToInt(line_number_str, &line_number))
+            return false;
+
+          TestResultPart::Type type;
+          if (!TestResultPart::TypeFromString(result_type, &type))
+            return false;
+
+          TestResultPart test_result_part;
+          test_result_part.type = type;
+          test_result_part.file_name = file_name,
+          test_result_part.line_number = line_number;
+          DCHECK(!results->empty());
+          results->back().test_result_parts.push_back(test_result_part);
+
+          state = STATE_TEST_RESULT;
+        } else {
+          return false;
+        }
+        break;
+      case STATE_TEST_RESULT:
+        if (node_name == "summary" && !xml_reader.IsClosingElement()) {
+          std::string summary;
+          if (!xml_reader.ReadElementContent(&summary))
+            return false;
+
+          if (!Base64Decode(summary, &summary))
+            return false;
+
+          DCHECK(!results->empty());
+          DCHECK(!results->back().test_result_parts.empty());
+          results->back().test_result_parts.back().summary = summary;
+        } else if (node_name == "summary" && xml_reader.IsClosingElement()) {
+        } else if (node_name == "message" && !xml_reader.IsClosingElement()) {
+          std::string message;
+          if (!xml_reader.ReadElementContent(&message))
+            return false;
+
+          if (!Base64Decode(message, &message))
+            return false;
+
+          DCHECK(!results->empty());
+          DCHECK(!results->back().test_result_parts.empty());
+          results->back().test_result_parts.back().message = message;
+        } else if (node_name == "message" && xml_reader.IsClosingElement()) {
+        } else if (node_name == "x-test-result-part" &&
+                   xml_reader.IsClosingElement()) {
+          state = STATE_TESTCASE;
+        } else {
+          return false;
+        }
+        break;
+      case STATE_FAILURE:
+        if (node_name == "failure" && xml_reader.IsClosingElement())
+          state = STATE_TESTCASE;
+        else
+          return false;
+        break;
+      case STATE_END:
+        // If we are here and there are still XML elements, the file has wrong
+        // format.
+        return false;
+    }
+  }
+
+  *crashed = (state != STATE_END);
+  return true;
+}
+
+}  // namespace base
diff --git a/base/test/gtest_xml_util.h b/base/test/gtest_xml_util.h
new file mode 100644
index 0000000..b023f80
--- /dev/null
+++ b/base/test/gtest_xml_util.h
@@ -0,0 +1,27 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TEST_GTEST_XML_UTIL_H_
+#define BASE_TEST_GTEST_XML_UTIL_H_
+
+#include <vector>
+
+#include "base/compiler_specific.h"
+
+namespace base {
+
+class FilePath;
+struct TestResult;
+
+// Produces a vector of test results based on GTest output file.
+// Returns true iff the output file exists and has been successfully parsed.
+// On successful return |crashed| is set to true if the test results
+// are valid but incomplete.
+bool ProcessGTestOutput(const base::FilePath& output_file,
+                        std::vector<TestResult>* results,
+                        bool* crashed) WARN_UNUSED_RESULT;
+
+}  // namespace base
+
+#endif  // BASE_TEST_GTEST_XML_UTIL_H_
diff --git a/base/test/histogram_tester.cc b/base/test/histogram_tester.cc
new file mode 100644
index 0000000..2a63b8c
--- /dev/null
+++ b/base/test/histogram_tester.cc
@@ -0,0 +1,200 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/test/histogram_tester.h"
+
+#include <stddef.h>
+
+#include "base/metrics/histogram.h"
+#include "base/metrics/histogram_samples.h"
+#include "base/metrics/metrics_hashes.h"
+#include "base/metrics/sample_map.h"
+#include "base/metrics/statistics_recorder.h"
+#include "base/strings/string_util.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+
+HistogramTester::HistogramTester() {
+  // Record any histogram data that exists when the object is created so it can
+  // be subtracted later.
+  for (const auto* const histogram : StatisticsRecorder::GetHistograms()) {
+    histograms_snapshot_[histogram->histogram_name()] =
+        histogram->SnapshotSamples();
+  }
+}
+
+HistogramTester::~HistogramTester() = default;
+
+void HistogramTester::ExpectUniqueSample(
+    const std::string& name,
+    HistogramBase::Sample sample,
+    HistogramBase::Count expected_count) const {
+  HistogramBase* histogram = StatisticsRecorder::FindHistogram(name);
+  EXPECT_NE(nullptr, histogram) << "Histogram \"" << name
+                                << "\" does not exist.";
+
+  if (histogram) {
+    std::unique_ptr<HistogramSamples> samples = histogram->SnapshotSamples();
+    CheckBucketCount(name, sample, expected_count, *samples);
+    CheckTotalCount(name, expected_count, *samples);
+  }
+}
+
+void HistogramTester::ExpectBucketCount(
+    const std::string& name,
+    HistogramBase::Sample sample,
+    HistogramBase::Count expected_count) const {
+  HistogramBase* histogram = StatisticsRecorder::FindHistogram(name);
+  EXPECT_NE(nullptr, histogram) << "Histogram \"" << name
+                                << "\" does not exist.";
+
+  if (histogram) {
+    std::unique_ptr<HistogramSamples> samples = histogram->SnapshotSamples();
+    CheckBucketCount(name, sample, expected_count, *samples);
+  }
+}
+
+void HistogramTester::ExpectTotalCount(const std::string& name,
+                                       HistogramBase::Count count) const {
+  HistogramBase* histogram = StatisticsRecorder::FindHistogram(name);
+  if (histogram) {
+    std::unique_ptr<HistogramSamples> samples = histogram->SnapshotSamples();
+    CheckTotalCount(name, count, *samples);
+  } else {
+    // No histogram means there were zero samples.
+    EXPECT_EQ(count, 0) << "Histogram \"" << name << "\" does not exist.";
+  }
+}
+
+void HistogramTester::ExpectTimeBucketCount(const std::string& name,
+                                            TimeDelta sample,
+                                            HistogramBase::Count count) const {
+  ExpectBucketCount(name, sample.InMilliseconds(), count);
+}
+
+std::vector<Bucket> HistogramTester::GetAllSamples(
+    const std::string& name) const {
+  std::vector<Bucket> samples;
+  std::unique_ptr<HistogramSamples> snapshot =
+      GetHistogramSamplesSinceCreation(name);
+  if (snapshot) {
+    for (auto it = snapshot->Iterator(); !it->Done(); it->Next()) {
+      HistogramBase::Sample sample;
+      HistogramBase::Count count;
+      it->Get(&sample, nullptr, &count);
+      samples.push_back(Bucket(sample, count));
+    }
+  }
+  return samples;
+}
+
+HistogramBase::Count HistogramTester::GetBucketCount(
+    const std::string& name,
+    HistogramBase::Sample sample) const {
+  HistogramBase* histogram = StatisticsRecorder::FindHistogram(name);
+  EXPECT_NE(nullptr, histogram)
+      << "Histogram \"" << name << "\" does not exist.";
+  HistogramBase::Count count = 0;
+  if (histogram) {
+    std::unique_ptr<HistogramSamples> samples = histogram->SnapshotSamples();
+    GetBucketCountForSamples(name, sample, *samples, &count);
+  }
+  return count;
+}
+
+void HistogramTester::GetBucketCountForSamples(
+    const std::string& name,
+    HistogramBase::Sample sample,
+    const HistogramSamples& samples,
+    HistogramBase::Count* count) const {
+  *count = samples.GetCount(sample);
+  auto histogram_data = histograms_snapshot_.find(name);
+  if (histogram_data != histograms_snapshot_.end())
+    *count -= histogram_data->second->GetCount(sample);
+}
+
+HistogramTester::CountsMap HistogramTester::GetTotalCountsForPrefix(
+    const std::string& prefix) const {
+  EXPECT_TRUE(prefix.find('.') != std::string::npos)
+      << "|prefix| ought to contain at least one period, to avoid matching too"
+      << " many histograms.";
+
+  CountsMap result;
+
+  // Find candidate matches by using the logic built into GetSnapshot().
+  for (const HistogramBase* histogram : StatisticsRecorder::GetHistograms()) {
+    if (!StartsWith(histogram->histogram_name(), prefix,
+                    CompareCase::SENSITIVE)) {
+      continue;
+    }
+    std::unique_ptr<HistogramSamples> new_samples =
+        GetHistogramSamplesSinceCreation(histogram->histogram_name());
+    // Omit unchanged histograms from the result.
+    if (new_samples->TotalCount()) {
+      result[histogram->histogram_name()] = new_samples->TotalCount();
+    }
+  }
+  return result;
+}
+
+std::unique_ptr<HistogramSamples>
+HistogramTester::GetHistogramSamplesSinceCreation(
+    const std::string& histogram_name) const {
+  HistogramBase* histogram = StatisticsRecorder::FindHistogram(histogram_name);
+  // Whether the histogram exists or not may not depend on the current test
+  // calling this method, but rather on which tests ran before and possibly
+  // generated a histogram or not (see http://crbug.com/473689). To provide a
+  // response which is independent of the previously run tests, this method
+  // creates empty samples in the absence of the histogram, rather than
+  // returning null.
+  if (!histogram) {
+    return std::unique_ptr<HistogramSamples>(
+        new SampleMap(HashMetricName(histogram_name)));
+  }
+  std::unique_ptr<HistogramSamples> named_samples =
+      histogram->SnapshotSamples();
+  auto original_samples_it = histograms_snapshot_.find(histogram_name);
+  if (original_samples_it != histograms_snapshot_.end())
+    named_samples->Subtract(*original_samples_it->second.get());
+  return named_samples;
+}
+
+void HistogramTester::CheckBucketCount(const std::string& name,
+                                       HistogramBase::Sample sample,
+                                       HistogramBase::Count expected_count,
+                                       const HistogramSamples& samples) const {
+  int actual_count;
+  GetBucketCountForSamples(name, sample, samples, &actual_count);
+
+  EXPECT_EQ(expected_count, actual_count)
+      << "Histogram \"" << name
+      << "\" does not have the right number of samples (" << expected_count
+      << ") in the expected bucket (" << sample << "). It has (" << actual_count
+      << ").";
+}
+
+void HistogramTester::CheckTotalCount(const std::string& name,
+                                      HistogramBase::Count expected_count,
+                                      const HistogramSamples& samples) const {
+  int actual_count = samples.TotalCount();
+  auto histogram_data = histograms_snapshot_.find(name);
+  if (histogram_data != histograms_snapshot_.end())
+    actual_count -= histogram_data->second->TotalCount();
+
+  EXPECT_EQ(expected_count, actual_count)
+      << "Histogram \"" << name
+      << "\" does not have the right total number of samples ("
+      << expected_count << "). It has (" << actual_count << ").";
+}
+
+bool Bucket::operator==(const Bucket& other) const {
+  return min == other.min && count == other.count;
+}
+
+void PrintTo(const Bucket& bucket, std::ostream* os) {
+  *os << "Bucket " << bucket.min << ": " << bucket.count;
+}
+
+}  // namespace base
diff --git a/base/test/histogram_tester.h b/base/test/histogram_tester.h
new file mode 100644
index 0000000..8019931
--- /dev/null
+++ b/base/test/histogram_tester.h
@@ -0,0 +1,174 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TEST_HISTOGRAM_TESTER_H_
+#define BASE_TEST_HISTOGRAM_TESTER_H_
+
+#include <map>
+#include <memory>
+#include <ostream>
+#include <string>
+#include <utility>
+#include <vector>
+
+#include "base/macros.h"
+#include "base/metrics/histogram.h"
+#include "base/metrics/histogram_base.h"
+#include "base/time/time.h"
+
+namespace base {
+
+struct Bucket;
+class HistogramSamples;
+
+// HistogramTester provides a simple interface for examining histograms, UMA
+// or otherwise. Tests can use this interface to verify that histogram data is
+// getting logged as intended.
+//
+// Note: When using this class from a browser test, one might have to call
+// SubprocessMetricsProvider::MergeHistogramDeltasForTesting() to sync the
+// histogram data between the renderer and browser processes. If it is in a
+// content browser test, then content::FetchHistogramsFromChildProcesses()
+// should be used to achieve that.
+class HistogramTester {
+ public:
+  using CountsMap = std::map<std::string, HistogramBase::Count>;
+
+  // Takes a snapshot of all current histograms counts.
+  HistogramTester();
+  ~HistogramTester();
+
+  // We know the exact number of samples in a bucket, and that no other bucket
+  // should have samples. Measures the diff from the snapshot taken when this
+  // object was constructed.
+  void ExpectUniqueSample(const std::string& name,
+                          HistogramBase::Sample sample,
+                          HistogramBase::Count expected_count) const;
+  template <typename T>
+  void ExpectUniqueSample(const std::string& name,
+                          T sample,
+                          HistogramBase::Count expected_count) const {
+    ExpectUniqueSample(name, static_cast<HistogramBase::Sample>(sample),
+                       expected_count);
+  }
+
+  // We know the exact number of samples in a bucket, but other buckets may
+  // have samples as well. Measures the diff from the snapshot taken when this
+  // object was constructed.
+  void ExpectBucketCount(const std::string& name,
+                         HistogramBase::Sample sample,
+                         HistogramBase::Count expected_count) const;
+  template <typename T>
+  void ExpectBucketCount(const std::string& name,
+                         T sample,
+                         HistogramBase::Count expected_count) const {
+    ExpectBucketCount(name, static_cast<HistogramBase::Sample>(sample),
+                      expected_count);
+  }
+
+  // We don't know the values of the samples, but we know how many there are.
+  // This measures the diff from the snapshot taken when this object was
+  // constructed.
+  void ExpectTotalCount(const std::string& name,
+                        HistogramBase::Count count) const;
+
+  // We know exact number of samples for buckets corresponding to a time
+  // interval. Other intervals may have samples too.
+  void ExpectTimeBucketCount(const std::string& name,
+                             TimeDelta sample,
+                             HistogramBase::Count count) const;
+
+  // Returns a list of all of the buckets recorded since creation of this
+  // object, as vector<Bucket>, where the Bucket represents the min boundary of
+  // the bucket and the count of samples recorded to that bucket since creation.
+  //
+  // Example usage, using gMock:
+  //   EXPECT_THAT(histogram_tester.GetAllSamples("HistogramName"),
+  //               ElementsAre(Bucket(1, 5), Bucket(2, 10), Bucket(3, 5)));
+  //
+  // If you build the expected list programmatically, you can use ContainerEq:
+  //   EXPECT_THAT(histogram_tester.GetAllSamples("HistogramName"),
+  //               ContainerEq(expected_buckets));
+  //
+  // or EXPECT_EQ if you prefer not to depend on gMock, at the expense of a
+  // slightly less helpful failure message:
+  //   EXPECT_EQ(expected_buckets,
+  //             histogram_tester.GetAllSamples("HistogramName"));
+  std::vector<Bucket> GetAllSamples(const std::string& name) const;
+
+  // Returns the value of the |sample| bucket for ths histogram |name|.
+  HistogramBase::Count GetBucketCount(const std::string& name,
+                                      HistogramBase::Sample sample) const;
+
+  // Finds histograms whose names start with |prefix|, and returns them along
+  // with the counts of any samples added since the creation of this object.
+  // Histograms that are unchanged are omitted from the result. The return value
+  // is a map whose keys are the histogram name, and whose values are the sample
+  // count.
+  //
+  // This is useful for cases where the code under test is choosing among a
+  // family of related histograms and incrementing one of them. Typically you
+  // should pass the result of this function directly to EXPECT_THAT.
+  //
+  // Example usage, using gmock (which produces better failure messages):
+  //   #include "testing/gmock/include/gmock/gmock.h"
+  // ...
+  //   base::HistogramTester::CountsMap expected_counts;
+  //   expected_counts["MyMetric.A"] = 1;
+  //   expected_counts["MyMetric.B"] = 1;
+  //   EXPECT_THAT(histogram_tester.GetTotalCountsForPrefix("MyMetric."),
+  //               testing::ContainerEq(expected_counts));
+  CountsMap GetTotalCountsForPrefix(const std::string& prefix) const;
+
+  // Access a modified HistogramSamples containing only what has been logged
+  // to the histogram since the creation of this object.
+  std::unique_ptr<HistogramSamples> GetHistogramSamplesSinceCreation(
+      const std::string& histogram_name) const;
+
+ private:
+  // Verifies and asserts that value in the |sample| bucket matches the
+  // |expected_count|. The bucket's current value is determined from |samples|
+  // and is modified based on the snapshot stored for histogram |name|.
+  void CheckBucketCount(const std::string& name,
+                        HistogramBase::Sample sample,
+                        Histogram::Count expected_count,
+                        const HistogramSamples& samples) const;
+
+  // Verifies that the total number of values recorded for the histogram |name|
+  // is |expected_count|. This is checked against |samples| minus the snapshot
+  // that was taken for |name|.
+  void CheckTotalCount(const std::string& name,
+                       Histogram::Count expected_count,
+                       const HistogramSamples& samples) const;
+
+  // Sets the value for |count| to be the value in the |sample| bucket. The
+  // bucket's current value is determined from |samples| and is modified based
+  // on the snapshot stored for histogram |name|.
+  void GetBucketCountForSamples(const std::string& name,
+                                HistogramBase::Sample sample,
+                                const HistogramSamples& samples,
+                                HistogramBase::Count* count) const;
+
+  // Used to determine the histogram changes made during this instance's
+  // lifecycle.
+  std::map<std::string, std::unique_ptr<HistogramSamples>> histograms_snapshot_;
+
+  DISALLOW_COPY_AND_ASSIGN(HistogramTester);
+};
+
+struct Bucket {
+  Bucket(HistogramBase::Sample min, HistogramBase::Count count)
+      : min(min), count(count) {}
+
+  bool operator==(const Bucket& other) const;
+
+  HistogramBase::Sample min;
+  HistogramBase::Count count;
+};
+
+void PrintTo(const Bucket& value, std::ostream* os);
+
+}  // namespace base
+
+#endif  // BASE_TEST_HISTOGRAM_TESTER_H_
diff --git a/base/test/histogram_tester_unittest.cc b/base/test/histogram_tester_unittest.cc
new file mode 100644
index 0000000..e49ed97
--- /dev/null
+++ b/base/test/histogram_tester_unittest.cc
@@ -0,0 +1,130 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/test/histogram_tester.h"
+
+#include <memory>
+
+#include "base/metrics/histogram_macros.h"
+#include "base/metrics/histogram_samples.h"
+#include "testing/gmock/include/gmock/gmock.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+using ::testing::ElementsAre;
+using ::testing::IsEmpty;
+
+namespace {
+
+const char kHistogram1[] = "Test1";
+const char kHistogram2[] = "Test2";
+const char kHistogram3[] = "Test3";
+const char kHistogram4[] = "Test4";
+const char kHistogram5[] = "Test5";
+
+}  // namespace
+
+namespace base {
+
+typedef testing::Test HistogramTesterTest;
+
+TEST_F(HistogramTesterTest, Scope) {
+  // Record a histogram before the creation of the recorder.
+  UMA_HISTOGRAM_BOOLEAN(kHistogram1, true);
+
+  HistogramTester tester;
+
+  // Verify that no histogram is recorded.
+  tester.ExpectTotalCount(kHistogram1, 0);
+
+  // Record a histogram after the creation of the recorder.
+  UMA_HISTOGRAM_BOOLEAN(kHistogram1, true);
+
+  // Verify that one histogram is recorded.
+  std::unique_ptr<HistogramSamples> samples(
+      tester.GetHistogramSamplesSinceCreation(kHistogram1));
+  EXPECT_TRUE(samples);
+  EXPECT_EQ(1, samples->TotalCount());
+}
+
+TEST_F(HistogramTesterTest, GetHistogramSamplesSinceCreationNotNull) {
+  // Chose the histogram name uniquely, to ensure nothing was recorded for it so
+  // far.
+  static const char kHistogram[] =
+      "GetHistogramSamplesSinceCreationNotNullHistogram";
+  HistogramTester tester;
+
+  // Verify that the returned samples are empty but not null.
+  std::unique_ptr<HistogramSamples> samples(
+      tester.GetHistogramSamplesSinceCreation(kHistogram1));
+  EXPECT_TRUE(samples);
+  tester.ExpectTotalCount(kHistogram, 0);
+}
+
+TEST_F(HistogramTesterTest, TestUniqueSample) {
+  HistogramTester tester;
+
+  // Record into a sample thrice
+  UMA_HISTOGRAM_COUNTS_100(kHistogram2, 2);
+  UMA_HISTOGRAM_COUNTS_100(kHistogram2, 2);
+  UMA_HISTOGRAM_COUNTS_100(kHistogram2, 2);
+
+  tester.ExpectUniqueSample(kHistogram2, 2, 3);
+}
+
+TEST_F(HistogramTesterTest, TestBucketsSample) {
+  HistogramTester tester;
+
+  // Record into a sample twice
+  UMA_HISTOGRAM_COUNTS_100(kHistogram3, 2);
+  UMA_HISTOGRAM_COUNTS_100(kHistogram3, 2);
+  UMA_HISTOGRAM_COUNTS_100(kHistogram3, 2);
+  UMA_HISTOGRAM_COUNTS_100(kHistogram3, 2);
+  UMA_HISTOGRAM_COUNTS_100(kHistogram3, 3);
+
+  tester.ExpectBucketCount(kHistogram3, 2, 4);
+  tester.ExpectBucketCount(kHistogram3, 3, 1);
+
+  tester.ExpectTotalCount(kHistogram3, 5);
+}
+
+TEST_F(HistogramTesterTest, TestBucketsSampleWithScope) {
+  // Record into a sample twice, once before the tester creation and once after.
+  UMA_HISTOGRAM_COUNTS_100(kHistogram4, 2);
+
+  HistogramTester tester;
+  UMA_HISTOGRAM_COUNTS_100(kHistogram4, 3);
+
+  tester.ExpectBucketCount(kHistogram4, 2, 0);
+  tester.ExpectBucketCount(kHistogram4, 3, 1);
+
+  tester.ExpectTotalCount(kHistogram4, 1);
+}
+
+TEST_F(HistogramTesterTest, TestGetAllSamples) {
+  HistogramTester tester;
+  UMA_HISTOGRAM_ENUMERATION(kHistogram5, 2, 5);
+  UMA_HISTOGRAM_ENUMERATION(kHistogram5, 3, 5);
+  UMA_HISTOGRAM_ENUMERATION(kHistogram5, 3, 5);
+  UMA_HISTOGRAM_ENUMERATION(kHistogram5, 5, 5);
+
+  EXPECT_THAT(tester.GetAllSamples(kHistogram5),
+              ElementsAre(Bucket(2, 1), Bucket(3, 2), Bucket(5, 1)));
+}
+
+TEST_F(HistogramTesterTest, TestGetAllSamples_NoSamples) {
+  HistogramTester tester;
+  EXPECT_THAT(tester.GetAllSamples(kHistogram5), IsEmpty());
+}
+
+TEST_F(HistogramTesterTest, TestGetTotalCountsForPrefix) {
+  HistogramTester tester;
+  UMA_HISTOGRAM_ENUMERATION("Test1.Test2.Test3", 2, 5);
+
+  // Regression check for bug https://crbug.com/659977.
+  EXPECT_TRUE(tester.GetTotalCountsForPrefix("Test2.").empty());
+
+  EXPECT_EQ(1u, tester.GetTotalCountsForPrefix("Test1.").size());
+}
+
+}  // namespace base
diff --git a/base/test/icu_test_util.cc b/base/test/icu_test_util.cc
new file mode 100644
index 0000000..a6f3e55
--- /dev/null
+++ b/base/test/icu_test_util.cc
@@ -0,0 +1,39 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/test/icu_test_util.h"
+
+#include "base/base_switches.h"
+#include "base/command_line.h"
+#include "base/i18n/icu_util.h"
+#include "base/i18n/rtl.h"
+#include "third_party/icu/source/common/unicode/uloc.h"
+
+namespace base {
+namespace test {
+
+ScopedRestoreICUDefaultLocale::ScopedRestoreICUDefaultLocale()
+    : ScopedRestoreICUDefaultLocale(std::string()) {}
+
+ScopedRestoreICUDefaultLocale::ScopedRestoreICUDefaultLocale(
+    const std::string& locale)
+    : default_locale_(uloc_getDefault()) {
+  if (!locale.empty())
+    i18n::SetICUDefaultLocale(locale.data());
+}
+
+ScopedRestoreICUDefaultLocale::~ScopedRestoreICUDefaultLocale() {
+  i18n::SetICUDefaultLocale(default_locale_.data());
+}
+
+void InitializeICUForTesting() {
+  if (!CommandLine::ForCurrentProcess()->HasSwitch(
+          switches::kTestDoNotInitializeIcu)) {
+    i18n::AllowMultipleInitializeCallsForTesting();
+    i18n::InitializeICU();
+  }
+}
+
+}  // namespace test
+}  // namespace base
diff --git a/base/test/icu_test_util.h b/base/test/icu_test_util.h
new file mode 100644
index 0000000..1a6e47d
--- /dev/null
+++ b/base/test/icu_test_util.h
@@ -0,0 +1,35 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TEST_ICU_TEST_UTIL_H_
+#define BASE_TEST_ICU_TEST_UTIL_H_
+
+#include <string>
+
+#include "base/macros.h"
+
+namespace base {
+namespace test {
+
+// In unit tests, prefer ScopedRestoreICUDefaultLocale over
+// calling base::i18n::SetICUDefaultLocale() directly. This scoper makes it
+// harder to accidentally forget to reset the locale.
+class ScopedRestoreICUDefaultLocale {
+ public:
+  ScopedRestoreICUDefaultLocale();
+  explicit ScopedRestoreICUDefaultLocale(const std::string& locale);
+  ~ScopedRestoreICUDefaultLocale();
+
+ private:
+  const std::string default_locale_;
+
+  DISALLOW_COPY_AND_ASSIGN(ScopedRestoreICUDefaultLocale);
+};
+
+void InitializeICUForTesting();
+
+}  // namespace test
+}  // namespace base
+
+#endif  // BASE_TEST_ICU_TEST_UTIL_H_
diff --git a/base/test/ios/OWNERS b/base/test/ios/OWNERS
new file mode 100644
index 0000000..40a68c7
--- /dev/null
+++ b/base/test/ios/OWNERS
@@ -0,0 +1 @@
+rohitrao@chromium.org
diff --git a/base/test/ios/wait_util.h b/base/test/ios/wait_util.h
new file mode 100644
index 0000000..e0c4c27
--- /dev/null
+++ b/base/test/ios/wait_util.h
@@ -0,0 +1,50 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TEST_IOS_WAIT_UTIL_H_
+#define BASE_TEST_IOS_WAIT_UTIL_H_
+
+#include "base/ios/block_types.h"
+#include "base/time/time.h"
+
+namespace base {
+namespace test {
+namespace ios {
+
+// Runs |action| if non-nil. Then, until either |condition| is true or |timeout|
+// expires, repetitively runs the current NSRunLoop and the current MessageLoop
+// (if |run_message_loop| is true). |condition| may be nil if there is no
+// condition to wait for; the NSRunLoop and current MessageLoop will be run run
+// until |timeout| expires. DCHECKs if |condition| is non-nil and |timeout|
+// expires before |condition| becomes true. If |timeout| is zero, a reasonable
+// default is used. Returns the time spent in the function.
+// DEPRECATED - Do not use in new code. http://crbug.com/784735
+TimeDelta TimeUntilCondition(ProceduralBlock action,
+                             ConditionBlock condition,
+                             bool run_message_loop,
+                             TimeDelta timeout);
+
+// Same as TimeUntilCondition, but doesn't run an action.
+// DEPRECATED - Do not use in new code. http://crbug.com/784735
+void WaitUntilCondition(ConditionBlock condition,
+                        bool run_message_loop,
+                        TimeDelta timeout);
+// DEPRECATED - Do not use in new code. http://crbug.com/784735
+void WaitUntilCondition(ConditionBlock condition);
+
+// Lets the run loop of the current thread process other messages
+// within the given maximum delay. This method may return before max_delay
+// elapsed.
+void SpinRunLoopWithMaxDelay(TimeDelta max_delay);
+
+// Lets the run loop of the current thread process other messages
+// within the given minimum delay. This method returns after |min_delay|
+// elapsed.
+void SpinRunLoopWithMinDelay(TimeDelta min_delay);
+
+}  // namespace ios
+}  // namespace test
+}  // namespace base
+
+#endif  // BASE_TEST_IOS_WAIT_UTIL_H_
diff --git a/base/test/ios/wait_util.mm b/base/test/ios/wait_util.mm
new file mode 100644
index 0000000..39a4115
--- /dev/null
+++ b/base/test/ios/wait_util.mm
@@ -0,0 +1,70 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#import "base/test/ios/wait_util.h"
+
+#import <Foundation/Foundation.h>
+
+#include "base/logging.h"
+#include "base/mac/scoped_nsobject.h"
+#include "base/run_loop.h"
+#include "base/test/test_timeouts.h"
+#include "base/timer/elapsed_timer.h"
+
+namespace base {
+namespace test {
+namespace ios {
+
+TimeDelta TimeUntilCondition(ProceduralBlock action,
+                             ConditionBlock condition,
+                             bool run_message_loop,
+                             TimeDelta timeout) {
+  ElapsedTimer timer;
+  if (action)
+    action();
+  if (timeout.is_zero())
+    timeout = TestTimeouts::action_timeout();
+  const TimeDelta spin_delay(TimeDelta::FromMilliseconds(10));
+  bool condition_evaluation_result = false;
+  while (timer.Elapsed() < timeout &&
+         (!condition || !(condition_evaluation_result = condition()))) {
+    SpinRunLoopWithMaxDelay(spin_delay);
+    if (run_message_loop)
+      RunLoop().RunUntilIdle();
+  }
+  const TimeDelta elapsed = timer.Elapsed();
+  // If DCHECK is ever hit, check if |action| is doing something that is
+  // taking an unreasonably long time, or if |condition| does not come
+  // true quickly enough. Increase |timeout| only if necessary.
+  DCHECK(!condition || condition_evaluation_result);
+  return elapsed;
+}
+
+void WaitUntilCondition(ConditionBlock condition,
+                        bool run_message_loop,
+                        TimeDelta timeout) {
+  TimeUntilCondition(nil, condition, run_message_loop, timeout);
+}
+
+void WaitUntilCondition(ConditionBlock condition) {
+  WaitUntilCondition(condition, false, TimeDelta());
+}
+
+void SpinRunLoopWithMaxDelay(TimeDelta max_delay) {
+  scoped_nsobject<NSDate> beforeDate(
+      [[NSDate alloc] initWithTimeIntervalSinceNow:max_delay.InSecondsF()]);
+  [[NSRunLoop currentRunLoop] runMode:NSDefaultRunLoopMode
+                           beforeDate:beforeDate];
+}
+
+void SpinRunLoopWithMinDelay(TimeDelta min_delay) {
+  ElapsedTimer timer;
+  while (timer.Elapsed() < min_delay) {
+    SpinRunLoopWithMaxDelay(TimeDelta::FromMilliseconds(10));
+  }
+}
+
+}  // namespace ios
+}  // namespace test
+}  // namespace base
diff --git a/base/test/launcher/test_launcher.cc b/base/test/launcher/test_launcher.cc
new file mode 100644
index 0000000..71eb1ad
--- /dev/null
+++ b/base/test/launcher/test_launcher.cc
@@ -0,0 +1,1349 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/test/launcher/test_launcher.h"
+
+#include <stdio.h>
+
+#include <algorithm>
+#include <map>
+#include <utility>
+
+#include "base/at_exit.h"
+#include "base/bind.h"
+#include "base/command_line.h"
+#include "base/environment.h"
+#include "base/files/file_path.h"
+#include "base/files/file_util.h"
+#include "base/files/scoped_file.h"
+#include "base/format_macros.h"
+#include "base/hash.h"
+#include "base/lazy_instance.h"
+#include "base/location.h"
+#include "base/logging.h"
+#include "base/macros.h"
+#include "base/memory/ptr_util.h"
+#include "base/numerics/safe_conversions.h"
+#include "base/process/kill.h"
+#include "base/process/launch.h"
+#include "base/run_loop.h"
+#include "base/single_thread_task_runner.h"
+#include "base/strings/pattern.h"
+#include "base/strings/string_number_conversions.h"
+#include "base/strings/string_piece.h"
+#include "base/strings/string_split.h"
+#include "base/strings/string_util.h"
+#include "base/strings/stringize_macros.h"
+#include "base/strings/stringprintf.h"
+#include "base/strings/utf_string_conversions.h"
+#include "base/sys_info.h"
+#include "base/task_scheduler/post_task.h"
+#include "base/task_scheduler/task_scheduler.h"
+#include "base/test/gtest_util.h"
+#include "base/test/launcher/test_launcher_tracer.h"
+#include "base/test/launcher/test_results_tracker.h"
+#include "base/test/test_switches.h"
+#include "base/test/test_timeouts.h"
+#include "base/threading/thread_restrictions.h"
+#include "base/threading/thread_task_runner_handle.h"
+#include "base/time/time.h"
+#include "build/build_config.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+#if defined(OS_POSIX)
+#include <fcntl.h>
+
+#include "base/files/file_descriptor_watcher_posix.h"
+#endif
+
+#if defined(OS_MACOSX)
+#include "base/mac/scoped_nsautorelease_pool.h"
+#endif
+
+#if defined(OS_WIN)
+#include "base/win/windows_version.h"
+#endif
+
+#if defined(OS_FUCHSIA)
+// TODO(scottmg): For temporary code in OnOutputTimeout().
+#include <zircon/syscalls.h>
+#include <zircon/syscalls/object.h>
+#include "base/fuchsia/default_job.h"
+#endif
+
+namespace base {
+
+// See https://groups.google.com/a/chromium.org/d/msg/chromium-dev/nkdTP7sstSc/uT3FaE_sgkAJ .
+using ::operator<<;
+
+// The environment variable name for the total number of test shards.
+const char kTestTotalShards[] = "GTEST_TOTAL_SHARDS";
+// The environment variable name for the test shard index.
+const char kTestShardIndex[] = "GTEST_SHARD_INDEX";
+
+namespace {
+
+// Global tag for test runs where the results are incomplete or unreliable
+// for any reason, e.g. early exit because of too many broken tests.
+const char kUnreliableResultsTag[] = "UNRELIABLE_RESULTS";
+
+// Maximum time of no output after which we print list of processes still
+// running. This deliberately doesn't use TestTimeouts (which is otherwise
+// a recommended solution), because they can be increased. This would defeat
+// the purpose of this timeout, which is 1) to avoid buildbot "no output for
+// X seconds" timeout killing the process 2) help communicate status of
+// the test launcher to people looking at the output (no output for a long
+// time is mysterious and gives no info about what is happening) 3) help
+// debugging in case the process hangs anyway.
+constexpr TimeDelta kOutputTimeout = TimeDelta::FromSeconds(15);
+
+// Limit of output snippet lines when printing to stdout.
+// Avoids flooding the logs with amount of output that gums up
+// the infrastructure.
+const size_t kOutputSnippetLinesLimit = 5000;
+
+// Limit of output snippet size. Exceeding this limit
+// results in truncating the output and failing the test.
+const size_t kOutputSnippetBytesLimit = 300 * 1024;
+
+// Set of live launch test processes with corresponding lock (it is allowed
+// for callers to launch processes on different threads).
+Lock* GetLiveProcessesLock() {
+  static auto* lock = new Lock;
+  return lock;
+}
+
+std::map<ProcessHandle, CommandLine>* GetLiveProcesses() {
+  static auto* map = new std::map<ProcessHandle, CommandLine>;
+  return map;
+}
+
+// Performance trace generator.
+TestLauncherTracer* GetTestLauncherTracer() {
+  static auto* tracer = new TestLauncherTracer;
+  return tracer;
+}
+
+// Creates and starts a TaskScheduler with |num_parallel_jobs| dedicated to
+// foreground blocking tasks (corresponds to the traits used to launch and wait
+// for child processes).
+void CreateAndStartTaskScheduler(int num_parallel_jobs) {
+  // These values are taken from TaskScheduler::StartWithDefaultParams(), which
+  // is not used directly to allow a custom number of threads in the foreground
+  // blocking pool.
+  constexpr int kMaxBackgroundThreads = 1;
+  constexpr int kMaxBackgroundBlockingThreads = 2;
+  const int max_foreground_threads =
+      std::max(1, base::SysInfo::NumberOfProcessors());
+  constexpr base::TimeDelta kSuggestedReclaimTime =
+      base::TimeDelta::FromSeconds(30);
+  base::TaskScheduler::Create("TestLauncher");
+  base::TaskScheduler::GetInstance()->Start(
+      {{kMaxBackgroundThreads, kSuggestedReclaimTime},
+       {kMaxBackgroundBlockingThreads, kSuggestedReclaimTime},
+       {max_foreground_threads, kSuggestedReclaimTime},
+       {num_parallel_jobs, kSuggestedReclaimTime}});
+}
+
+// TODO(fuchsia): Fuchsia does not have POSIX signals, but equivalent
+// functionality will probably be necessary eventually. See
+// https://crbug.com/706592.
+#if defined(OS_POSIX) && !defined(OS_FUCHSIA)
+// Self-pipe that makes it possible to do complex shutdown handling
+// outside of the signal handler.
+int g_shutdown_pipe[2] = { -1, -1 };
+
+void ShutdownPipeSignalHandler(int signal) {
+  HANDLE_EINTR(write(g_shutdown_pipe[1], "q", 1));
+}
+
+void KillSpawnedTestProcesses() {
+  // Keep the lock until exiting the process to prevent further processes
+  // from being spawned.
+  AutoLock lock(*GetLiveProcessesLock());
+
+  fprintf(stdout, "Sending SIGTERM to %" PRIuS " child processes... ",
+          GetLiveProcesses()->size());
+  fflush(stdout);
+
+  for (const auto& pair : *GetLiveProcesses()) {
+    // Send the signal to entire process group.
+    kill((-1) * (pair.first), SIGTERM);
+  }
+
+  fprintf(stdout,
+          "done.\nGiving processes a chance to terminate cleanly... ");
+  fflush(stdout);
+
+  PlatformThread::Sleep(TimeDelta::FromMilliseconds(500));
+
+  fprintf(stdout, "done.\n");
+  fflush(stdout);
+
+  fprintf(stdout, "Sending SIGKILL to %" PRIuS " child processes... ",
+          GetLiveProcesses()->size());
+  fflush(stdout);
+
+  for (const auto& pair : *GetLiveProcesses()) {
+    // Send the signal to entire process group.
+    kill((-1) * (pair.first), SIGKILL);
+  }
+
+  fprintf(stdout, "done.\n");
+  fflush(stdout);
+}
+#endif  // defined(OS_POSIX) && !defined(OS_FUCHSIA)
+
+// Parses the environment variable var as an Int32.  If it is unset, returns
+// true.  If it is set, unsets it then converts it to Int32 before
+// returning it in |result|.  Returns true on success.
+bool TakeInt32FromEnvironment(const char* const var, int32_t* result) {
+  std::unique_ptr<Environment> env(Environment::Create());
+  std::string str_val;
+
+  if (!env->GetVar(var, &str_val))
+    return true;
+
+  if (!env->UnSetVar(var)) {
+    LOG(ERROR) << "Invalid environment: we could not unset " << var << ".\n";
+    return false;
+  }
+
+  if (!StringToInt(str_val, result)) {
+    LOG(ERROR) << "Invalid environment: " << var << " is not an integer.\n";
+    return false;
+  }
+
+  return true;
+}
+
+// Unsets the environment variable |name| and returns true on success.
+// Also returns true if the variable just doesn't exist.
+bool UnsetEnvironmentVariableIfExists(const std::string& name) {
+  std::unique_ptr<Environment> env(Environment::Create());
+  std::string str_val;
+  if (!env->GetVar(name, &str_val))
+    return true;
+  return env->UnSetVar(name);
+}
+
+// Returns true if bot mode has been requested, i.e. defaults optimized
+// for continuous integration bots. This way developers don't have to remember
+// special command-line flags.
+bool BotModeEnabled() {
+  std::unique_ptr<Environment> env(Environment::Create());
+  return CommandLine::ForCurrentProcess()->HasSwitch(
+      switches::kTestLauncherBotMode) ||
+      env->HasVar("CHROMIUM_TEST_LAUNCHER_BOT_MODE");
+}
+
+// Returns command line command line after gtest-specific processing
+// and applying |wrapper|.
+CommandLine PrepareCommandLineForGTest(const CommandLine& command_line,
+                                       const std::string& wrapper) {
+  CommandLine new_command_line(command_line.GetProgram());
+  CommandLine::SwitchMap switches = command_line.GetSwitches();
+
+  // Strip out gtest_repeat flag - this is handled by the launcher process.
+  switches.erase(kGTestRepeatFlag);
+
+  // Don't try to write the final XML report in child processes.
+  switches.erase(kGTestOutputFlag);
+
+  for (CommandLine::SwitchMap::const_iterator iter = switches.begin();
+       iter != switches.end(); ++iter) {
+    new_command_line.AppendSwitchNative((*iter).first, (*iter).second);
+  }
+
+  // Prepend wrapper after last CommandLine quasi-copy operation. CommandLine
+  // does not really support removing switches well, and trying to do that
+  // on a CommandLine with a wrapper is known to break.
+  // TODO(phajdan.jr): Give it a try to support CommandLine removing switches.
+#if defined(OS_WIN)
+  new_command_line.PrependWrapper(ASCIIToUTF16(wrapper));
+#elif defined(OS_POSIX)
+  new_command_line.PrependWrapper(wrapper);
+#endif
+
+  return new_command_line;
+}
+
+// Launches a child process using |command_line|. If the child process is still
+// running after |timeout|, it is terminated and |*was_timeout| is set to true.
+// Returns exit code of the process.
+int LaunchChildTestProcessWithOptions(const CommandLine& command_line,
+                                      const LaunchOptions& options,
+                                      int flags,
+                                      TimeDelta timeout,
+                                      ProcessLifetimeObserver* observer,
+                                      bool* was_timeout) {
+  TimeTicks start_time(TimeTicks::Now());
+#if defined(OS_FUCHSIA)  // TODO(scottmg): https://crbug.com/755282
+  const bool kOnBot = getenv("CHROME_HEADLESS") != nullptr;
+#endif  // OS_FUCHSIA
+
+#if defined(OS_POSIX) && !defined(OS_FUCHSIA)
+  // Make sure an option we rely on is present - see LaunchChildGTestProcess.
+  DCHECK(options.new_process_group);
+#endif
+
+  LaunchOptions new_options(options);
+
+#if defined(OS_WIN)
+  DCHECK(!new_options.job_handle);
+
+  win::ScopedHandle job_handle;
+  if (flags & TestLauncher::USE_JOB_OBJECTS) {
+    job_handle.Set(CreateJobObject(NULL, NULL));
+    if (!job_handle.IsValid()) {
+      LOG(ERROR) << "Could not create JobObject.";
+      return -1;
+    }
+
+    DWORD job_flags = JOB_OBJECT_LIMIT_KILL_ON_JOB_CLOSE;
+
+    // Allow break-away from job since sandbox and few other places rely on it
+    // on Windows versions prior to Windows 8 (which supports nested jobs).
+    if (win::GetVersion() < win::VERSION_WIN8 &&
+        flags & TestLauncher::ALLOW_BREAKAWAY_FROM_JOB) {
+      job_flags |= JOB_OBJECT_LIMIT_BREAKAWAY_OK;
+    }
+
+    if (!SetJobObjectLimitFlags(job_handle.Get(), job_flags)) {
+      LOG(ERROR) << "Could not SetJobObjectLimitFlags.";
+      return -1;
+    }
+
+    new_options.job_handle = job_handle.Get();
+  }
+#elif defined(OS_FUCHSIA)
+  DCHECK(!new_options.job_handle);
+
+  ScopedZxHandle job_handle;
+  zx_status_t result = zx_job_create(GetDefaultJob(), 0, job_handle.receive());
+  CHECK_EQ(ZX_OK, result) << "zx_job_create: " << zx_status_get_string(result);
+  new_options.job_handle = job_handle.get();
+#endif  // defined(OS_FUCHSIA)
+
+#if defined(OS_LINUX)
+  // To prevent accidental privilege sharing to an untrusted child, processes
+  // are started with PR_SET_NO_NEW_PRIVS. Do not set that here, since this
+  // new child will be privileged and trusted.
+  new_options.allow_new_privs = true;
+#endif
+
+  Process process;
+
+  {
+    // Note how we grab the lock before the process possibly gets created.
+    // This ensures that when the lock is held, ALL the processes are registered
+    // in the set.
+    AutoLock lock(*GetLiveProcessesLock());
+
+#if defined(OS_WIN)
+    // Allow the handle used to capture stdio and stdout to be inherited by the
+    // child. Note that this is done under GetLiveProcessesLock() to ensure that
+    // only the desired child receives the handle.
+    if (new_options.stdout_handle) {
+      ::SetHandleInformation(new_options.stdout_handle, HANDLE_FLAG_INHERIT,
+                             HANDLE_FLAG_INHERIT);
+    }
+#endif
+
+    process = LaunchProcess(command_line, new_options);
+
+#if defined(OS_WIN)
+    // Revoke inheritance so that the handle isn't leaked into other children.
+    // Note that this is done under GetLiveProcessesLock() to ensure that only
+    // the desired child receives the handle.
+    if (new_options.stdout_handle)
+      ::SetHandleInformation(new_options.stdout_handle, HANDLE_FLAG_INHERIT, 0);
+#endif
+
+    if (!process.IsValid())
+      return -1;
+
+#if defined(OS_FUCHSIA)  // TODO(scottmg): https://crbug.com/755282
+    if (kOnBot) {
+      LOG(ERROR) << base::StringPrintf("adding %x to live process list",
+                                       process.Handle());
+    }
+#endif  // OS_FUCHSIA
+
+    // TODO(rvargas) crbug.com/417532: Don't store process handles.
+    GetLiveProcesses()->insert(std::make_pair(process.Handle(), command_line));
+  }
+
+  if (observer)
+    observer->OnLaunched(process.Handle(), process.Pid());
+
+  int exit_code = 0;
+  bool did_exit = false;
+
+  {
+    base::ScopedAllowBaseSyncPrimitivesForTesting allow_base_sync_primitives;
+    did_exit = process.WaitForExitWithTimeout(timeout, &exit_code);
+  }
+
+  if (!did_exit) {
+    if (observer)
+      observer->OnTimedOut(command_line);
+
+    *was_timeout = true;
+    exit_code = -1;  // Set a non-zero exit code to signal a failure.
+
+#if defined(OS_FUCHSIA)  // TODO(scottmg): https://crbug.com/755282
+    if (kOnBot) {
+      LOG(ERROR) << base::StringPrintf("about to process.Terminate() %x",
+                                       process.Handle());
+    }
+
+    // TODO(crbug.com/799268): Remove once we have debugged timed-out/hung
+    // test job processes.
+    LOG(ERROR) << "Dumping threads in process " << process.Pid();
+
+    CommandLine threads_cmdline(base::FilePath("/boot/bin/threads"));
+    threads_cmdline.AppendArg(IntToString(process.Pid()));
+
+    LaunchOptions threads_options;
+    threads_options.wait = true;
+    LaunchProcess(threads_cmdline, threads_options);
+#endif  // OS_FUCHSIA
+    {
+      base::ScopedAllowBaseSyncPrimitivesForTesting allow_base_sync_primitives;
+      // Ensure that the process terminates.
+      process.Terminate(-1, true);
+    }
+  }
+
+  {
+    // Note how we grab the log before issuing a possibly broad process kill.
+    // Other code parts that grab the log kill processes, so avoid trying
+    // to do that twice and trigger all kinds of log messages.
+    AutoLock lock(*GetLiveProcessesLock());
+
+#if defined(OS_FUCHSIA)
+    // TODO(scottmg): https://crbug.com/755282
+    if (kOnBot) {
+      LOG(ERROR) << base::StringPrintf("going to zx_task_kill(job) for %x",
+                                       process.Handle());
+    }
+
+    CHECK_EQ(zx_task_kill(job_handle.get()), ZX_OK);
+#elif defined(OS_POSIX)
+    if (exit_code != 0) {
+      // On POSIX, in case the test does not exit cleanly, either due to a crash
+      // or due to it timing out, we need to clean up any child processes that
+      // it might have created. On Windows, child processes are automatically
+      // cleaned up using JobObjects.
+      KillProcessGroup(process.Handle());
+    }
+#endif
+
+#if defined(OS_FUCHSIA)  // TODO(scottmg): https://crbug.com/755282
+    if (kOnBot) {
+      LOG(ERROR) << base::StringPrintf("removing %x from live process list",
+                                       process.Handle());
+    }
+#endif  // OS_FUCHSIA
+    GetLiveProcesses()->erase(process.Handle());
+  }
+
+  GetTestLauncherTracer()->RecordProcessExecution(
+      start_time, TimeTicks::Now() - start_time);
+
+  return exit_code;
+}
+
+void DoLaunchChildTestProcess(
+    const CommandLine& command_line,
+    TimeDelta timeout,
+    const TestLauncher::LaunchOptions& test_launch_options,
+    bool redirect_stdio,
+    SingleThreadTaskRunner* task_runner,
+    std::unique_ptr<ProcessLifetimeObserver> observer) {
+  TimeTicks start_time = TimeTicks::Now();
+
+  ScopedFILE output_file;
+  FilePath output_filename;
+  if (redirect_stdio) {
+    FILE* raw_output_file = CreateAndOpenTemporaryFile(&output_filename);
+    output_file.reset(raw_output_file);
+    CHECK(output_file);
+  }
+
+  LaunchOptions options;
+#if defined(OS_WIN)
+  options.inherit_mode = test_launch_options.inherit_mode;
+  options.handles_to_inherit = test_launch_options.handles_to_inherit;
+  if (redirect_stdio) {
+    HANDLE handle =
+        reinterpret_cast<HANDLE>(_get_osfhandle(_fileno(output_file.get())));
+    CHECK_NE(INVALID_HANDLE_VALUE, handle);
+    options.stdin_handle = INVALID_HANDLE_VALUE;
+    options.stdout_handle = handle;
+    options.stderr_handle = handle;
+    // See LaunchOptions.stdout_handle comments for why this compares against
+    // FILE_TYPE_CHAR.
+    if (options.inherit_mode == base::LaunchOptions::Inherit::kSpecific &&
+        GetFileType(handle) != FILE_TYPE_CHAR) {
+      options.handles_to_inherit.push_back(handle);
+    }
+  }
+#elif defined(OS_POSIX)
+  options.fds_to_remap = test_launch_options.fds_to_remap;
+  if (redirect_stdio) {
+    int output_file_fd = fileno(output_file.get());
+    CHECK_LE(0, output_file_fd);
+    options.fds_to_remap.push_back(
+        std::make_pair(output_file_fd, STDOUT_FILENO));
+    options.fds_to_remap.push_back(
+        std::make_pair(output_file_fd, STDERR_FILENO));
+  }
+
+#if !defined(OS_FUCHSIA)
+  options.new_process_group = true;
+#endif
+#if defined(OS_LINUX)
+  options.kill_on_parent_death = true;
+#endif
+
+#endif  // defined(OS_POSIX)
+
+  bool was_timeout = false;
+  int exit_code = LaunchChildTestProcessWithOptions(
+      command_line, options, test_launch_options.flags, timeout, observer.get(),
+      &was_timeout);
+
+  std::string output_file_contents;
+  if (redirect_stdio) {
+    fflush(output_file.get());
+    output_file.reset();
+    CHECK(ReadFileToString(output_filename, &output_file_contents))
+        << output_filename;
+
+    if (!DeleteFile(output_filename, false)) {
+      // This needs to be non-fatal at least for Windows.
+      LOG(WARNING) << "Failed to delete " << output_filename.AsUTF8Unsafe();
+    }
+  }
+
+  // Invoke OnCompleted on the thread it was originating from, not on a worker
+  // pool thread.
+  task_runner->PostTask(
+      FROM_HERE,
+      BindOnce(&ProcessLifetimeObserver::OnCompleted, std::move(observer),
+               exit_code, TimeTicks::Now() - start_time, was_timeout,
+               output_file_contents));
+}
+
+}  // namespace
+
+const char kGTestBreakOnFailure[] = "gtest_break_on_failure";
+const char kGTestFilterFlag[] = "gtest_filter";
+const char kGTestFlagfileFlag[] = "gtest_flagfile";
+const char kGTestHelpFlag[]   = "gtest_help";
+const char kGTestListTestsFlag[] = "gtest_list_tests";
+const char kGTestRepeatFlag[] = "gtest_repeat";
+const char kGTestRunDisabledTestsFlag[] = "gtest_also_run_disabled_tests";
+const char kGTestOutputFlag[] = "gtest_output";
+
+TestLauncherDelegate::~TestLauncherDelegate() = default;
+
+TestLauncher::LaunchOptions::LaunchOptions() = default;
+TestLauncher::LaunchOptions::LaunchOptions(const LaunchOptions& other) =
+    default;
+TestLauncher::LaunchOptions::~LaunchOptions() = default;
+
+TestLauncher::TestLauncher(TestLauncherDelegate* launcher_delegate,
+                           size_t parallel_jobs)
+    : launcher_delegate_(launcher_delegate),
+      total_shards_(1),
+      shard_index_(0),
+      cycles_(1),
+      test_found_count_(0),
+      test_started_count_(0),
+      test_finished_count_(0),
+      test_success_count_(0),
+      test_broken_count_(0),
+      retry_count_(0),
+      retry_limit_(0),
+      force_run_broken_tests_(false),
+      run_result_(true),
+      watchdog_timer_(FROM_HERE,
+                      kOutputTimeout,
+                      this,
+                      &TestLauncher::OnOutputTimeout),
+      parallel_jobs_(parallel_jobs) {}
+
+TestLauncher::~TestLauncher() {
+  base::TaskScheduler::GetInstance()->Shutdown();
+}
+
+bool TestLauncher::Run() {
+  if (!Init())
+    return false;
+
+  // Value of |cycles_| changes after each iteration. Keep track of the
+  // original value.
+  int requested_cycles = cycles_;
+
+// TODO(fuchsia): Fuchsia does not have POSIX signals. Something similiar to
+// this will likely need to be implemented. See https://crbug.com/706592.
+#if defined(OS_POSIX) && !defined(OS_FUCHSIA)
+  CHECK_EQ(0, pipe(g_shutdown_pipe));
+
+  struct sigaction action;
+  memset(&action, 0, sizeof(action));
+  sigemptyset(&action.sa_mask);
+  action.sa_handler = &ShutdownPipeSignalHandler;
+
+  CHECK_EQ(0, sigaction(SIGINT, &action, nullptr));
+  CHECK_EQ(0, sigaction(SIGQUIT, &action, nullptr));
+  CHECK_EQ(0, sigaction(SIGTERM, &action, nullptr));
+
+  auto controller = base::FileDescriptorWatcher::WatchReadable(
+      g_shutdown_pipe[0],
+      base::Bind(&TestLauncher::OnShutdownPipeReadable, Unretained(this)));
+#endif  // defined(OS_POSIX) && !defined(OS_FUCHSIA)
+
+  // Start the watchdog timer.
+  watchdog_timer_.Reset();
+
+  ThreadTaskRunnerHandle::Get()->PostTask(
+      FROM_HERE, BindOnce(&TestLauncher::RunTestIteration, Unretained(this)));
+
+  RunLoop().Run();
+
+  if (requested_cycles != 1)
+    results_tracker_.PrintSummaryOfAllIterations();
+
+  MaybeSaveSummaryAsJSON(std::vector<std::string>());
+
+  return run_result_;
+}
+
+void TestLauncher::LaunchChildGTestProcess(
+    const CommandLine& command_line,
+    const std::string& wrapper,
+    TimeDelta timeout,
+    const LaunchOptions& options,
+    std::unique_ptr<ProcessLifetimeObserver> observer) {
+  DCHECK(thread_checker_.CalledOnValidThread());
+
+  // Record the exact command line used to launch the child.
+  CommandLine new_command_line(
+      PrepareCommandLineForGTest(command_line, wrapper));
+
+  // When running in parallel mode we need to redirect stdio to avoid mixed-up
+  // output. We also always redirect on the bots to get the test output into
+  // JSON summary.
+  bool redirect_stdio = (parallel_jobs_ > 1) || BotModeEnabled();
+
+  PostTaskWithTraits(
+      FROM_HERE, {MayBlock(), TaskShutdownBehavior::BLOCK_SHUTDOWN},
+      BindOnce(&DoLaunchChildTestProcess, new_command_line, timeout, options,
+               redirect_stdio, RetainedRef(ThreadTaskRunnerHandle::Get()),
+               std::move(observer)));
+}
+
+void TestLauncher::OnTestFinished(const TestResult& original_result) {
+  ++test_finished_count_;
+
+  TestResult result(original_result);
+
+  if (result.output_snippet.length() > kOutputSnippetBytesLimit) {
+    if (result.status == TestResult::TEST_SUCCESS)
+      result.status = TestResult::TEST_EXCESSIVE_OUTPUT;
+
+    // Keep the top and bottom of the log and truncate the middle part.
+    result.output_snippet =
+        result.output_snippet.substr(0, kOutputSnippetBytesLimit / 2) + "\n" +
+        StringPrintf("<truncated (%" PRIuS " bytes)>\n",
+                     result.output_snippet.length()) +
+        result.output_snippet.substr(result.output_snippet.length() -
+                                     kOutputSnippetBytesLimit / 2) +
+        "\n";
+  }
+
+  bool print_snippet = false;
+  std::string print_test_stdio("auto");
+  if (CommandLine::ForCurrentProcess()->HasSwitch(
+          switches::kTestLauncherPrintTestStdio)) {
+    print_test_stdio = CommandLine::ForCurrentProcess()->GetSwitchValueASCII(
+        switches::kTestLauncherPrintTestStdio);
+  }
+  if (print_test_stdio == "auto") {
+    print_snippet = (result.status != TestResult::TEST_SUCCESS);
+  } else if (print_test_stdio == "always") {
+    print_snippet = true;
+  } else if (print_test_stdio == "never") {
+    print_snippet = false;
+  } else {
+    LOG(WARNING) << "Invalid value of " << switches::kTestLauncherPrintTestStdio
+                 << ": " << print_test_stdio;
+  }
+  if (print_snippet) {
+    std::vector<base::StringPiece> snippet_lines =
+        SplitStringPiece(result.output_snippet, "\n", base::KEEP_WHITESPACE,
+                         base::SPLIT_WANT_ALL);
+    if (snippet_lines.size() > kOutputSnippetLinesLimit) {
+      size_t truncated_size = snippet_lines.size() - kOutputSnippetLinesLimit;
+      snippet_lines.erase(
+          snippet_lines.begin(),
+          snippet_lines.begin() + truncated_size);
+      snippet_lines.insert(snippet_lines.begin(), "<truncated>");
+    }
+    fprintf(stdout, "%s", base::JoinString(snippet_lines, "\n").c_str());
+    fflush(stdout);
+  }
+
+  if (result.status == TestResult::TEST_SUCCESS) {
+    ++test_success_count_;
+  } else {
+    tests_to_retry_.insert(result.full_name);
+  }
+
+  results_tracker_.AddTestResult(result);
+
+  // TODO(phajdan.jr): Align counter (padding).
+  std::string status_line(
+      StringPrintf("[%" PRIuS "/%" PRIuS "] %s ",
+                   test_finished_count_,
+                   test_started_count_,
+                   result.full_name.c_str()));
+  if (result.completed()) {
+    status_line.append(StringPrintf("(%" PRId64 " ms)",
+                                    result.elapsed_time.InMilliseconds()));
+  } else if (result.status == TestResult::TEST_TIMEOUT) {
+    status_line.append("(TIMED OUT)");
+  } else if (result.status == TestResult::TEST_CRASH) {
+    status_line.append("(CRASHED)");
+  } else if (result.status == TestResult::TEST_SKIPPED) {
+    status_line.append("(SKIPPED)");
+  } else if (result.status == TestResult::TEST_UNKNOWN) {
+    status_line.append("(UNKNOWN)");
+  } else {
+    // Fail very loudly so it's not ignored.
+    CHECK(false) << "Unhandled test result status: " << result.status;
+  }
+  fprintf(stdout, "%s\n", status_line.c_str());
+  fflush(stdout);
+
+  // We just printed a status line, reset the watchdog timer.
+  watchdog_timer_.Reset();
+
+  // Do not waste time on timeouts. We include tests with unknown results here
+  // because sometimes (e.g. hang in between unit tests) that's how a timeout
+  // gets reported.
+  if (result.status == TestResult::TEST_TIMEOUT ||
+      result.status == TestResult::TEST_UNKNOWN) {
+    test_broken_count_++;
+  }
+  size_t broken_threshold =
+      std::max(static_cast<size_t>(20), test_found_count_ / 10);
+  if (!force_run_broken_tests_ && test_broken_count_ >= broken_threshold) {
+    fprintf(stdout, "Too many badly broken tests (%" PRIuS "), exiting now.\n",
+            test_broken_count_);
+    fflush(stdout);
+
+#if defined(OS_POSIX) && !defined(OS_FUCHSIA)
+    KillSpawnedTestProcesses();
+#endif  // defined(OS_POSIX) && !defined(OS_FUCHSIA)
+
+    MaybeSaveSummaryAsJSON({"BROKEN_TEST_EARLY_EXIT", kUnreliableResultsTag});
+
+    exit(1);
+  }
+
+  if (test_finished_count_ != test_started_count_)
+    return;
+
+  if (tests_to_retry_.empty() || retry_count_ >= retry_limit_) {
+    OnTestIterationFinished();
+    return;
+  }
+
+  if (!force_run_broken_tests_ && tests_to_retry_.size() >= broken_threshold) {
+    fprintf(stdout,
+            "Too many failing tests (%" PRIuS "), skipping retries.\n",
+            tests_to_retry_.size());
+    fflush(stdout);
+
+    results_tracker_.AddGlobalTag("BROKEN_TEST_SKIPPED_RETRIES");
+    results_tracker_.AddGlobalTag(kUnreliableResultsTag);
+
+    OnTestIterationFinished();
+    return;
+  }
+
+  retry_count_++;
+
+  std::vector<std::string> test_names(tests_to_retry_.begin(),
+                                      tests_to_retry_.end());
+
+  tests_to_retry_.clear();
+
+  size_t retry_started_count = launcher_delegate_->RetryTests(this, test_names);
+  if (retry_started_count == 0) {
+    // Signal failure, but continue to run all requested test iterations.
+    // With the summary of all iterations at the end this is a good default.
+    run_result_ = false;
+
+    OnTestIterationFinished();
+    return;
+  }
+
+  fprintf(stdout, "Retrying %" PRIuS " test%s (retry #%" PRIuS ")\n",
+          retry_started_count,
+          retry_started_count > 1 ? "s" : "",
+          retry_count_);
+  fflush(stdout);
+
+  test_started_count_ += retry_started_count;
+}
+
+// Helper used to parse test filter files. Syntax is documented in
+// //testing/buildbot/filters/README.md .
+bool LoadFilterFile(const FilePath& file_path,
+                    std::vector<std::string>* positive_filter,
+                    std::vector<std::string>* negative_filter) {
+  std::string file_content;
+  if (!ReadFileToString(file_path, &file_content)) {
+    LOG(ERROR) << "Failed to read the filter file.";
+    return false;
+  }
+
+  std::vector<std::string> filter_lines = SplitString(
+      file_content, "\n", base::TRIM_WHITESPACE, base::SPLIT_WANT_ALL);
+  int line_num = 0;
+  for (const std::string& filter_line : filter_lines) {
+    line_num++;
+
+    size_t hash_pos = filter_line.find('#');
+
+    // In case when # symbol is not in the beginning of the line and is not
+    // proceeded with a space then it's likely that the comment was
+    // unintentional.
+    if (hash_pos != std::string::npos && hash_pos > 0 &&
+        filter_line[hash_pos - 1] != ' ') {
+      LOG(WARNING) << "Content of line " << line_num << " in " << file_path
+                   << " after # is treated as a comment, " << filter_line;
+    }
+
+    // Strip comments and whitespace from each line.
+    std::string trimmed_line =
+        TrimWhitespaceASCII(filter_line.substr(0, hash_pos), TRIM_ALL)
+            .as_string();
+
+    if (trimmed_line.substr(0, 2) == "//") {
+      LOG(ERROR) << "Line " << line_num << " in " << file_path
+                 << " starts with //, use # for comments.";
+      return false;
+    }
+
+    // Treat a line starting with '//' as a comment.
+    if (trimmed_line.empty())
+      continue;
+
+    if (trimmed_line[0] == '-')
+      negative_filter->push_back(trimmed_line.substr(1));
+    else
+      positive_filter->push_back(trimmed_line);
+  }
+
+  return true;
+}
+
+bool TestLauncher::Init() {
+  const CommandLine* command_line = CommandLine::ForCurrentProcess();
+
+  // Initialize sharding. Command line takes precedence over legacy environment
+  // variables.
+  if (command_line->HasSwitch(switches::kTestLauncherTotalShards) &&
+      command_line->HasSwitch(switches::kTestLauncherShardIndex)) {
+    if (!StringToInt(
+            command_line->GetSwitchValueASCII(
+                switches::kTestLauncherTotalShards),
+            &total_shards_)) {
+      LOG(ERROR) << "Invalid value for " << switches::kTestLauncherTotalShards;
+      return false;
+    }
+    if (!StringToInt(
+            command_line->GetSwitchValueASCII(
+                switches::kTestLauncherShardIndex),
+            &shard_index_)) {
+      LOG(ERROR) << "Invalid value for " << switches::kTestLauncherShardIndex;
+      return false;
+    }
+    fprintf(stdout,
+            "Using sharding settings from command line. This is shard %d/%d\n",
+            shard_index_, total_shards_);
+    fflush(stdout);
+  } else {
+    if (!TakeInt32FromEnvironment(kTestTotalShards, &total_shards_))
+      return false;
+    if (!TakeInt32FromEnvironment(kTestShardIndex, &shard_index_))
+      return false;
+    fprintf(stdout,
+            "Using sharding settings from environment. This is shard %d/%d\n",
+            shard_index_, total_shards_);
+    fflush(stdout);
+  }
+  if (shard_index_ < 0 ||
+      total_shards_ < 0 ||
+      shard_index_ >= total_shards_) {
+    LOG(ERROR) << "Invalid sharding settings: we require 0 <= "
+               << kTestShardIndex << " < " << kTestTotalShards
+               << ", but you have " << kTestShardIndex << "=" << shard_index_
+               << ", " << kTestTotalShards << "=" << total_shards_ << ".\n";
+    return false;
+  }
+
+  // Make sure we don't pass any sharding-related environment to the child
+  // processes. This test launcher implements the sharding completely.
+  CHECK(UnsetEnvironmentVariableIfExists("GTEST_TOTAL_SHARDS"));
+  CHECK(UnsetEnvironmentVariableIfExists("GTEST_SHARD_INDEX"));
+
+  if (command_line->HasSwitch(kGTestRepeatFlag) &&
+      !StringToInt(command_line->GetSwitchValueASCII(kGTestRepeatFlag),
+                   &cycles_)) {
+    LOG(ERROR) << "Invalid value for " << kGTestRepeatFlag;
+    return false;
+  }
+
+  if (command_line->HasSwitch(switches::kTestLauncherRetryLimit)) {
+    int retry_limit = -1;
+    if (!StringToInt(command_line->GetSwitchValueASCII(
+                         switches::kTestLauncherRetryLimit), &retry_limit) ||
+        retry_limit < 0) {
+      LOG(ERROR) << "Invalid value for " << switches::kTestLauncherRetryLimit;
+      return false;
+    }
+
+    retry_limit_ = retry_limit;
+  } else if (!command_line->HasSwitch(kGTestFilterFlag) || BotModeEnabled()) {
+    // Retry failures 3 times by default if we are running all of the tests or
+    // in bot mode.
+    retry_limit_ = 3;
+  }
+
+  if (command_line->HasSwitch(switches::kTestLauncherForceRunBrokenTests))
+    force_run_broken_tests_ = true;
+
+  fprintf(stdout, "Using %" PRIuS " parallel jobs.\n", parallel_jobs_);
+  fflush(stdout);
+
+  CreateAndStartTaskScheduler(static_cast<int>(parallel_jobs_));
+
+  std::vector<std::string> positive_file_filter;
+  std::vector<std::string> positive_gtest_filter;
+
+  if (command_line->HasSwitch(switches::kTestLauncherFilterFile)) {
+    base::FilePath filter_file_path = base::MakeAbsoluteFilePath(
+        command_line->GetSwitchValuePath(switches::kTestLauncherFilterFile));
+    if (!LoadFilterFile(filter_file_path, &positive_file_filter,
+                        &negative_test_filter_))
+      return false;
+  }
+
+  // Split --gtest_filter at '-', if there is one, to separate into
+  // positive filter and negative filter portions.
+  std::string filter = command_line->GetSwitchValueASCII(kGTestFilterFlag);
+  size_t dash_pos = filter.find('-');
+  if (dash_pos == std::string::npos) {
+    positive_gtest_filter =
+        SplitString(filter, ":", base::TRIM_WHITESPACE, base::SPLIT_WANT_ALL);
+  } else {
+    // Everything up to the dash.
+    positive_gtest_filter =
+        SplitString(filter.substr(0, dash_pos), ":", base::TRIM_WHITESPACE,
+                    base::SPLIT_WANT_ALL);
+
+    // Everything after the dash.
+    for (std::string pattern :
+         SplitString(filter.substr(dash_pos + 1), ":", base::TRIM_WHITESPACE,
+                     base::SPLIT_WANT_ALL)) {
+      negative_test_filter_.push_back(pattern);
+    }
+  }
+
+  if (!launcher_delegate_->GetTests(&tests_)) {
+    LOG(ERROR) << "Failed to get list of tests.";
+    return false;
+  }
+
+  CombinePositiveTestFilters(std::move(positive_gtest_filter),
+                             std::move(positive_file_filter));
+
+  if (!results_tracker_.Init(*command_line)) {
+    LOG(ERROR) << "Failed to initialize test results tracker.";
+    return 1;
+  }
+
+#if defined(NDEBUG)
+  results_tracker_.AddGlobalTag("MODE_RELEASE");
+#else
+  results_tracker_.AddGlobalTag("MODE_DEBUG");
+#endif
+
+  // Operating systems (sorted alphabetically).
+  // Note that they can deliberately overlap, e.g. OS_LINUX is a subset
+  // of OS_POSIX.
+#if defined(OS_ANDROID)
+  results_tracker_.AddGlobalTag("OS_ANDROID");
+#endif
+
+#if defined(OS_BSD)
+  results_tracker_.AddGlobalTag("OS_BSD");
+#endif
+
+#if defined(OS_FREEBSD)
+  results_tracker_.AddGlobalTag("OS_FREEBSD");
+#endif
+
+#if defined(OS_FUCHSIA)
+  results_tracker_.AddGlobalTag("OS_FUCHSIA");
+#endif
+
+#if defined(OS_IOS)
+  results_tracker_.AddGlobalTag("OS_IOS");
+#endif
+
+#if defined(OS_LINUX)
+  results_tracker_.AddGlobalTag("OS_LINUX");
+#endif
+
+#if defined(OS_MACOSX)
+  results_tracker_.AddGlobalTag("OS_MACOSX");
+#endif
+
+#if defined(OS_NACL)
+  results_tracker_.AddGlobalTag("OS_NACL");
+#endif
+
+#if defined(OS_OPENBSD)
+  results_tracker_.AddGlobalTag("OS_OPENBSD");
+#endif
+
+#if defined(OS_POSIX)
+  results_tracker_.AddGlobalTag("OS_POSIX");
+#endif
+
+#if defined(OS_SOLARIS)
+  results_tracker_.AddGlobalTag("OS_SOLARIS");
+#endif
+
+#if defined(OS_WIN)
+  results_tracker_.AddGlobalTag("OS_WIN");
+#endif
+
+  // CPU-related tags.
+#if defined(ARCH_CPU_32_BITS)
+  results_tracker_.AddGlobalTag("CPU_32_BITS");
+#endif
+
+#if defined(ARCH_CPU_64_BITS)
+  results_tracker_.AddGlobalTag("CPU_64_BITS");
+#endif
+
+  return true;
+}
+
+void TestLauncher::CombinePositiveTestFilters(
+    std::vector<std::string> filter_a,
+    std::vector<std::string> filter_b) {
+  has_at_least_one_positive_filter_ = !filter_a.empty() || !filter_b.empty();
+  if (!has_at_least_one_positive_filter_) {
+    return;
+  }
+  // If two positive filters are present, only run tests that match a pattern
+  // in both filters.
+  if (!filter_a.empty() && !filter_b.empty()) {
+    for (size_t i = 0; i < tests_.size(); i++) {
+      std::string test_name =
+          FormatFullTestName(tests_[i].test_case_name, tests_[i].test_name);
+      bool found_a = false;
+      bool found_b = false;
+      for (size_t k = 0; k < filter_a.size(); ++k) {
+        found_a = found_a || MatchPattern(test_name, filter_a[k]);
+      }
+      for (size_t k = 0; k < filter_b.size(); ++k) {
+        found_b = found_b || MatchPattern(test_name, filter_b[k]);
+      }
+      if (found_a && found_b) {
+        positive_test_filter_.push_back(test_name);
+      }
+    }
+  } else if (!filter_a.empty()) {
+    positive_test_filter_ = std::move(filter_a);
+  } else {
+    positive_test_filter_ = std::move(filter_b);
+  }
+}
+
+void TestLauncher::RunTests() {
+  std::vector<std::string> test_names;
+  const CommandLine* command_line = CommandLine::ForCurrentProcess();
+  for (const TestIdentifier& test_id : tests_) {
+    std::string test_name =
+        FormatFullTestName(test_id.test_case_name, test_id.test_name);
+
+    results_tracker_.AddTest(test_name);
+
+    if (test_name.find("DISABLED") != std::string::npos) {
+      results_tracker_.AddDisabledTest(test_name);
+
+      // Skip disabled tests unless explicitly requested.
+      if (!command_line->HasSwitch(kGTestRunDisabledTestsFlag))
+        continue;
+    }
+
+    if (!launcher_delegate_->ShouldRunTest(test_id.test_case_name,
+                                           test_id.test_name)) {
+      continue;
+    }
+
+    // Count tests in the binary, before we apply filter and sharding.
+    test_found_count_++;
+
+    std::string test_name_no_disabled =
+        TestNameWithoutDisabledPrefix(test_name);
+
+    // Skip the test that doesn't match the filter (if given).
+    if (has_at_least_one_positive_filter_) {
+      bool found = false;
+      for (auto filter : positive_test_filter_) {
+        if (MatchPattern(test_name, filter) ||
+            MatchPattern(test_name_no_disabled, filter)) {
+          found = true;
+          break;
+        }
+      }
+
+      if (!found)
+        continue;
+    }
+    if (!negative_test_filter_.empty()) {
+      bool excluded = false;
+      for (auto filter : negative_test_filter_) {
+        if (MatchPattern(test_name, filter) ||
+            MatchPattern(test_name_no_disabled, filter)) {
+          excluded = true;
+          break;
+        }
+      }
+
+      if (excluded)
+        continue;
+    }
+
+    if (Hash(test_name) % total_shards_ != static_cast<uint32_t>(shard_index_))
+      continue;
+
+    // Report test locations after applying all filters, so that we report test
+    // locations only for those tests that were run as part of this shard.
+    results_tracker_.AddTestLocation(test_name, test_id.file, test_id.line);
+
+    test_names.push_back(test_name);
+  }
+
+  // Save an early test summary in case the launcher crashes or gets killed.
+  MaybeSaveSummaryAsJSON({"EARLY_SUMMARY", kUnreliableResultsTag});
+
+  test_started_count_ = launcher_delegate_->RunTests(this, test_names);
+
+  if (test_started_count_ == 0) {
+    fprintf(stdout, "0 tests run\n");
+    fflush(stdout);
+
+    // No tests have actually been started, so kick off the next iteration.
+    ThreadTaskRunnerHandle::Get()->PostTask(
+        FROM_HERE, BindOnce(&TestLauncher::RunTestIteration, Unretained(this)));
+  }
+}
+
+void TestLauncher::RunTestIteration() {
+  const bool stop_on_failure =
+      CommandLine::ForCurrentProcess()->HasSwitch(kGTestBreakOnFailure);
+  if (cycles_ == 0 ||
+      (stop_on_failure && test_success_count_ != test_finished_count_)) {
+    RunLoop::QuitCurrentWhenIdleDeprecated();
+    return;
+  }
+
+  // Special value "-1" means "repeat indefinitely".
+  cycles_ = (cycles_ == -1) ? cycles_ : cycles_ - 1;
+
+  test_found_count_ = 0;
+  test_started_count_ = 0;
+  test_finished_count_ = 0;
+  test_success_count_ = 0;
+  test_broken_count_ = 0;
+  retry_count_ = 0;
+  tests_to_retry_.clear();
+  results_tracker_.OnTestIterationStarting();
+
+  ThreadTaskRunnerHandle::Get()->PostTask(
+      FROM_HERE, BindOnce(&TestLauncher::RunTests, Unretained(this)));
+}
+
+#if defined(OS_POSIX) && !defined(OS_FUCHSIA)
+// I/O watcher for the reading end of the self-pipe above.
+// Terminates any launched child processes and exits the process.
+void TestLauncher::OnShutdownPipeReadable() {
+  fprintf(stdout, "\nCaught signal. Killing spawned test processes...\n");
+  fflush(stdout);
+
+  KillSpawnedTestProcesses();
+
+  MaybeSaveSummaryAsJSON({"CAUGHT_TERMINATION_SIGNAL", kUnreliableResultsTag});
+
+  // The signal would normally kill the process, so exit now.
+  _exit(1);
+}
+#endif  // defined(OS_POSIX)
+
+void TestLauncher::MaybeSaveSummaryAsJSON(
+    const std::vector<std::string>& additional_tags) {
+  const CommandLine* command_line = CommandLine::ForCurrentProcess();
+  if (command_line->HasSwitch(switches::kTestLauncherSummaryOutput)) {
+    FilePath summary_path(command_line->GetSwitchValuePath(
+                              switches::kTestLauncherSummaryOutput));
+    if (!results_tracker_.SaveSummaryAsJSON(summary_path, additional_tags)) {
+      LOG(ERROR) << "Failed to save test launcher output summary.";
+    }
+  }
+  if (command_line->HasSwitch(switches::kTestLauncherTrace)) {
+    FilePath trace_path(
+        command_line->GetSwitchValuePath(switches::kTestLauncherTrace));
+    if (!GetTestLauncherTracer()->Dump(trace_path)) {
+      LOG(ERROR) << "Failed to save test launcher trace.";
+    }
+  }
+}
+
+void TestLauncher::OnTestIterationFinished() {
+  TestResultsTracker::TestStatusMap tests_by_status(
+      results_tracker_.GetTestStatusMapForCurrentIteration());
+  if (!tests_by_status[TestResult::TEST_UNKNOWN].empty())
+    results_tracker_.AddGlobalTag(kUnreliableResultsTag);
+
+  // When we retry tests, success is determined by having nothing more
+  // to retry (everything eventually passed), as opposed to having
+  // no failures at all.
+  if (tests_to_retry_.empty()) {
+    fprintf(stdout, "SUCCESS: all tests passed.\n");
+    fflush(stdout);
+  } else {
+    // Signal failure, but continue to run all requested test iterations.
+    // With the summary of all iterations at the end this is a good default.
+    run_result_ = false;
+  }
+
+  results_tracker_.PrintSummaryOfCurrentIteration();
+
+  // Kick off the next iteration.
+  ThreadTaskRunnerHandle::Get()->PostTask(
+      FROM_HERE, BindOnce(&TestLauncher::RunTestIteration, Unretained(this)));
+}
+
+void TestLauncher::OnOutputTimeout() {
+  DCHECK(thread_checker_.CalledOnValidThread());
+
+  AutoLock lock(*GetLiveProcessesLock());
+
+  fprintf(stdout, "Still waiting for the following processes to finish:\n");
+
+  for (const auto& pair : *GetLiveProcesses()) {
+#if defined(OS_WIN)
+    fwprintf(stdout, L"\t%s\n", pair.second.GetCommandLineString().c_str());
+#else
+    fprintf(stdout, "\t%s\n", pair.second.GetCommandLineString().c_str());
+#endif
+
+#if defined(OS_FUCHSIA)
+    // TODO(scottmg): Temporary code to try to identify why child processes
+    // appear to not be terminated after a timeout correctly.
+    // https://crbug.com/750370 and https://crbug.com/738275.
+
+    zx_info_process_t proc_info = {};
+    zx_status_t status =
+        zx_object_get_info(pair.first, ZX_INFO_PROCESS, &proc_info,
+                           sizeof(proc_info), nullptr, nullptr);
+    if (status != ZX_OK) {
+      fprintf(stdout, "zx_object_get_info failed for '%s', status=%d\n",
+              pair.second.GetCommandLineString().c_str(), status);
+    } else {
+      fprintf(stdout, "  return_code=%d\n", proc_info.return_code);
+      fprintf(stdout, "  started=%d\n", proc_info.started);
+      fprintf(stdout, "  exited=%d\n", proc_info.exited);
+      fprintf(stdout, "  debugger_attached=%d\n", proc_info.debugger_attached);
+    }
+#endif  // OS_FUCHSIA
+  }
+
+  fflush(stdout);
+
+  // Arm the timer again - otherwise it would fire only once.
+  watchdog_timer_.Reset();
+}
+
+size_t NumParallelJobs() {
+  const CommandLine* command_line = CommandLine::ForCurrentProcess();
+  if (command_line->HasSwitch(switches::kTestLauncherJobs)) {
+    // If the number of test launcher jobs was specified, return that number.
+    size_t jobs = 0U;
+
+    if (!StringToSizeT(
+            command_line->GetSwitchValueASCII(switches::kTestLauncherJobs),
+            &jobs) ||
+        !jobs) {
+      LOG(ERROR) << "Invalid value for " << switches::kTestLauncherJobs;
+      return 0U;
+    }
+    return jobs;
+  }
+  if (command_line->HasSwitch(kGTestFilterFlag) && !BotModeEnabled()) {
+    // Do not run jobs in parallel by default if we are running a subset of
+    // the tests and if bot mode is off.
+    return 1U;
+  }
+
+  // Default to the number of processor cores.
+  return base::checked_cast<size_t>(SysInfo::NumberOfProcessors());
+}
+
+std::string GetTestOutputSnippet(const TestResult& result,
+                                 const std::string& full_output) {
+  size_t run_pos = full_output.find(std::string("[ RUN      ] ") +
+                                    result.full_name);
+  if (run_pos == std::string::npos)
+    return std::string();
+
+  size_t end_pos = full_output.find(std::string("[  FAILED  ] ") +
+                                    result.full_name,
+                                    run_pos);
+  // Only clip the snippet to the "OK" message if the test really
+  // succeeded. It still might have e.g. crashed after printing it.
+  if (end_pos == std::string::npos &&
+      result.status == TestResult::TEST_SUCCESS) {
+    end_pos = full_output.find(std::string("[       OK ] ") +
+                               result.full_name,
+                               run_pos);
+  }
+  if (end_pos != std::string::npos) {
+    size_t newline_pos = full_output.find("\n", end_pos);
+    if (newline_pos != std::string::npos)
+      end_pos = newline_pos + 1;
+  }
+
+  std::string snippet(full_output.substr(run_pos));
+  if (end_pos != std::string::npos)
+    snippet = full_output.substr(run_pos, end_pos - run_pos);
+
+  return snippet;
+}
+
+}  // namespace base
diff --git a/base/test/launcher/test_launcher.h b/base/test/launcher/test_launcher.h
new file mode 100644
index 0000000..88b9f1f
--- /dev/null
+++ b/base/test/launcher/test_launcher.h
@@ -0,0 +1,262 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TEST_LAUNCHER_TEST_LAUNCHER_H_
+#define BASE_TEST_LAUNCHER_TEST_LAUNCHER_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <memory>
+#include <set>
+#include <string>
+#include <vector>
+
+#include "base/compiler_specific.h"
+#include "base/macros.h"
+#include "base/process/launch.h"
+#include "base/test/gtest_util.h"
+#include "base/test/launcher/test_result.h"
+#include "base/test/launcher/test_results_tracker.h"
+#include "base/threading/thread_checker.h"
+#include "base/time/time.h"
+#include "base/timer/timer.h"
+#include "build/build_config.h"
+
+namespace base {
+
+class CommandLine;
+struct LaunchOptions;
+class TestLauncher;
+
+// Constants for GTest command-line flags.
+extern const char kGTestFilterFlag[];
+extern const char kGTestFlagfileFlag[];
+extern const char kGTestHelpFlag[];
+extern const char kGTestListTestsFlag[];
+extern const char kGTestRepeatFlag[];
+extern const char kGTestRunDisabledTestsFlag[];
+extern const char kGTestOutputFlag[];
+
+// Interface for use with LaunchTests that abstracts away exact details
+// which tests and how are run.
+class TestLauncherDelegate {
+ public:
+  // Called to get names of tests available for running. The delegate
+  // must put the result in |output| and return true on success.
+  virtual bool GetTests(std::vector<TestIdentifier>* output) = 0;
+
+  // Called before a test is considered for running. If it returns false,
+  // the test is not run. If it returns true, the test will be run provided
+  // it is part of the current shard.
+  virtual bool ShouldRunTest(const std::string& test_case_name,
+                             const std::string& test_name) = 0;
+
+  // Called to make the delegate run the specified tests. The delegate must
+  // return the number of actual tests it's going to run (can be smaller,
+  // equal to, or larger than size of |test_names|). It must also call
+  // |test_launcher|'s OnTestFinished method once per every run test,
+  // regardless of its success.
+  virtual size_t RunTests(TestLauncher* test_launcher,
+                          const std::vector<std::string>& test_names) = 0;
+
+  // Called to make the delegate retry the specified tests. The delegate must
+  // return the number of actual tests it's going to retry (can be smaller,
+  // equal to, or larger than size of |test_names|). It must also call
+  // |test_launcher|'s OnTestFinished method once per every retried test,
+  // regardless of its success.
+  virtual size_t RetryTests(TestLauncher* test_launcher,
+                            const std::vector<std::string>& test_names) = 0;
+
+ protected:
+  virtual ~TestLauncherDelegate();
+};
+
+// An observer of child process lifetime events generated by
+// LaunchChildGTestProcess.
+class ProcessLifetimeObserver {
+ public:
+  virtual ~ProcessLifetimeObserver() = default;
+
+  // Invoked once the child process is started. |handle| is a handle to the
+  // child process and |id| is its pid. NOTE: this method is invoked on the
+  // thread the process is launched on immediately after it is launched. The
+  // caller owns the ProcessHandle.
+  virtual void OnLaunched(ProcessHandle handle, ProcessId id) {}
+
+  // Invoked when a test process exceeds its runtime, immediately before it is
+  // terminated. |command_line| is the command line used to launch the process.
+  // NOTE: this method is invoked on the thread the process is launched on.
+  virtual void OnTimedOut(const CommandLine& command_line) {}
+
+  // Invoked after a child process finishes, reporting the process |exit_code|,
+  // child process |elapsed_time|, whether or not the process was terminated as
+  // a result of a timeout, and the output of the child (stdout and stderr
+  // together). NOTE: this method is invoked on the same thread as
+  // LaunchChildGTestProcess.
+  virtual void OnCompleted(int exit_code,
+                           TimeDelta elapsed_time,
+                           bool was_timeout,
+                           const std::string& output) {}
+
+ protected:
+  ProcessLifetimeObserver() = default;
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(ProcessLifetimeObserver);
+};
+
+// Launches tests using a TestLauncherDelegate.
+class TestLauncher {
+ public:
+  // Flags controlling behavior of LaunchChildGTestProcess.
+  enum LaunchChildGTestProcessFlags {
+    // Allows usage of job objects on Windows. Helps properly clean up child
+    // processes.
+    USE_JOB_OBJECTS = (1 << 0),
+
+    // Allows breakaway from job on Windows. May result in some child processes
+    // not being properly terminated after launcher dies if these processes
+    // fail to cooperate.
+    ALLOW_BREAKAWAY_FROM_JOB = (1 << 1),
+  };
+
+  struct LaunchOptions {
+    LaunchOptions();
+    LaunchOptions(const LaunchOptions& other);
+    ~LaunchOptions();
+
+    int flags = 0;
+    // These mirror values in base::LaunchOptions, see it for details.
+#if defined(OS_WIN)
+    base::LaunchOptions::Inherit inherit_mode =
+        base::LaunchOptions::Inherit::kSpecific;
+    base::HandlesToInheritVector handles_to_inherit;
+#elif defined(OS_POSIX)
+    FileHandleMappingVector fds_to_remap;
+#endif
+  };
+
+  // Constructor. |parallel_jobs| is the limit of simultaneous parallel test
+  // jobs.
+  TestLauncher(TestLauncherDelegate* launcher_delegate, size_t parallel_jobs);
+  ~TestLauncher();
+
+  // Runs the launcher. Must be called at most once.
+  bool Run() WARN_UNUSED_RESULT;
+
+  // Launches a child process (assumed to be gtest-based binary) using
+  // |command_line|. If |wrapper| is not empty, it is prepended to the final
+  // command line. |observer|, if not null, is used to convey process lifetime
+  // events to the caller. |observer| is destroyed after its OnCompleted
+  // method is invoked.
+  void LaunchChildGTestProcess(
+      const CommandLine& command_line,
+      const std::string& wrapper,
+      TimeDelta timeout,
+      const LaunchOptions& options,
+      std::unique_ptr<ProcessLifetimeObserver> observer);
+
+  // Called when a test has finished running.
+  void OnTestFinished(const TestResult& result);
+
+ private:
+  bool Init() WARN_UNUSED_RESULT;
+
+  // Runs all tests in current iteration.
+  void RunTests();
+
+  void CombinePositiveTestFilters(std::vector<std::string> filter_a,
+                                  std::vector<std::string> filter_b);
+
+  void RunTestIteration();
+
+#if defined(OS_POSIX)
+  void OnShutdownPipeReadable();
+#endif
+
+  // Saves test results summary as JSON if requested from command line.
+  void MaybeSaveSummaryAsJSON(const std::vector<std::string>& additional_tags);
+
+  // Called when a test iteration is finished.
+  void OnTestIterationFinished();
+
+  // Called by the delay timer when no output was made for a while.
+  void OnOutputTimeout();
+
+  // Make sure we don't accidentally call the wrong methods e.g. on the worker
+  // pool thread.  Should be the first member so that it's destroyed last: when
+  // destroying other members, especially the worker pool, we may check the code
+  // is running on the correct thread.
+  ThreadChecker thread_checker_;
+
+  TestLauncherDelegate* launcher_delegate_;
+
+  // Support for outer sharding, just like gtest does.
+  int32_t total_shards_;  // Total number of outer shards, at least one.
+  int32_t shard_index_;   // Index of shard the launcher is to run.
+
+  int cycles_;  // Number of remaining test itreations, or -1 for infinite.
+
+  // Test filters (empty means no filter).
+  bool has_at_least_one_positive_filter_;
+  std::vector<std::string> positive_test_filter_;
+  std::vector<std::string> negative_test_filter_;
+
+  // Tests to use (cached result of TestLauncherDelegate::GetTests).
+  std::vector<TestIdentifier> tests_;
+
+  // Number of tests found in this binary.
+  size_t test_found_count_;
+
+  // Number of tests started in this iteration.
+  size_t test_started_count_;
+
+  // Number of tests finished in this iteration.
+  size_t test_finished_count_;
+
+  // Number of tests successfully finished in this iteration.
+  size_t test_success_count_;
+
+  // Number of tests either timing out or having an unknown result,
+  // likely indicating a more systemic problem if widespread.
+  size_t test_broken_count_;
+
+  // Number of retries in this iteration.
+  size_t retry_count_;
+
+  // Maximum number of retries per iteration.
+  size_t retry_limit_;
+
+  // If true will not early exit nor skip retries even if too many tests are
+  // broken.
+  bool force_run_broken_tests_;
+
+  // Tests to retry in this iteration.
+  std::set<std::string> tests_to_retry_;
+
+  // Result to be returned from Run.
+  bool run_result_;
+
+  TestResultsTracker results_tracker_;
+
+  // Watchdog timer to make sure we do not go without output for too long.
+  DelayTimer watchdog_timer_;
+
+  // Number of jobs to run in parallel.
+  size_t parallel_jobs_;
+
+  DISALLOW_COPY_AND_ASSIGN(TestLauncher);
+};
+
+// Return the number of parallel jobs to use, or 0U in case of error.
+size_t NumParallelJobs();
+
+// Extract part from |full_output| that applies to |result|.
+std::string GetTestOutputSnippet(const TestResult& result,
+                                 const std::string& full_output);
+
+}  // namespace base
+
+#endif  // BASE_TEST_LAUNCHER_TEST_LAUNCHER_H_
diff --git a/base/test/launcher/test_launcher_nacl_nonsfi.cc b/base/test/launcher/test_launcher_nacl_nonsfi.cc
new file mode 100644
index 0000000..bdc4f67
--- /dev/null
+++ b/base/test/launcher/test_launcher_nacl_nonsfi.cc
@@ -0,0 +1,170 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <inttypes.h>
+#include <stdio.h>
+
+#include <string>
+
+#include "base/command_line.h"
+#include "base/files/file_path.h"
+#include "base/files/file_util.h"
+#include "base/message_loop/message_loop.h"
+#include "base/path_service.h"
+#include "base/process/launch.h"
+#include "base/strings/string_piece.h"
+#include "base/strings/string_tokenizer.h"
+#include "base/strings/string_util.h"
+#include "base/sys_info.h"
+#include "base/test/launcher/test_launcher.h"
+#include "base/test/launcher/unit_test_launcher.h"
+#include "base/test/test_switches.h"
+#include "base/test/test_timeouts.h"
+#include "build/build_config.h"
+
+#if defined(OS_POSIX)
+#include "base/files/file_descriptor_watcher_posix.h"
+#endif
+
+namespace base {
+
+namespace {
+
+const char kHelpFlag[] = "help";
+
+void PrintUsage() {
+  fprintf(stdout,
+          "Runs tests using the gtest framework, each batch of tests being\n"
+          "run in their own process. Supported command-line flags:\n"
+          "\n"
+          " Common flags:\n"
+          "  --gtest_filter=...\n"
+          "    Runs a subset of tests (see --gtest_help for more info).\n"
+          "\n"
+          "  --help\n"
+          "    Shows this message.\n"
+          "\n"
+          " Other flags:\n"
+          "  --test-launcher-retry-limit=N\n"
+          "    Sets the limit of test retries on failures to N.\n"
+          "\n"
+          "  --test-launcher-summary-output=PATH\n"
+          "    Saves a JSON machine-readable summary of the run.\n"
+          "\n"
+          "  --test-launcher-print-test-stdio=auto|always|never\n"
+          "    Controls when full test output is printed.\n"
+          "    auto means to print it when the test failed.\n"
+          "\n"
+          "  --test-launcher-total-shards=N\n"
+          "    Sets the total number of shards to N.\n"
+          "\n"
+          "  --test-launcher-shard-index=N\n"
+          "    Sets the shard index to run to N (from 0 to TOTAL - 1).\n");
+  fflush(stdout);
+}
+
+class NonSfiUnitTestPlatformDelegate : public base::UnitTestPlatformDelegate {
+ public:
+  NonSfiUnitTestPlatformDelegate() = default;
+
+  bool Init(const std::string& test_binary) {
+    base::FilePath dir_exe;
+    if (!PathService::Get(base::DIR_EXE, &dir_exe)) {
+      LOG(ERROR) << "Failed to get directory of the current executable.";
+      return false;
+    }
+
+    test_path_ = dir_exe.AppendASCII(test_binary);
+    return true;
+  }
+
+ private:
+  bool CreateResultsFile(base::FilePath* path) override {
+    if (!base::CreateNewTempDirectory(base::FilePath::StringType(), path))
+      return false;
+    *path = path->AppendASCII("test_results.xml");
+    return true;
+  }
+
+  bool CreateTemporaryFile(base::FilePath* path) override { return false; }
+
+  bool GetTests(std::vector<base::TestIdentifier>* output) override {
+    base::FilePath output_file;
+    if (!base::CreateTemporaryFile(&output_file)) {
+      LOG(ERROR) << "Failed to create a temp file.";
+      return false;
+    }
+
+    base::CommandLine cmd_line(test_path_);
+    cmd_line.AppendSwitchPath(switches::kTestLauncherListTests, output_file);
+
+    base::LaunchOptions launch_options;
+    launch_options.wait = true;
+
+    if (!base::LaunchProcess(cmd_line, launch_options).IsValid())
+      return false;
+
+    return base::ReadTestNamesFromFile(output_file, output);
+  }
+
+  std::string GetWrapperForChildGTestProcess() override {
+    return std::string();
+  }
+
+  base::CommandLine GetCommandLineForChildGTestProcess(
+      const std::vector<std::string>& test_names,
+      const base::FilePath& output_file,
+      const base::FilePath& flag_file) override {
+    base::CommandLine cmd_line(test_path_);
+    cmd_line.AppendSwitchPath(
+        switches::kTestLauncherOutput, output_file);
+    cmd_line.AppendSwitchASCII(
+        base::kGTestFilterFlag, base::JoinString(test_names, ":"));
+    return cmd_line;
+  }
+
+  void RelaunchTests(base::TestLauncher* test_launcher,
+                     const std::vector<std::string>& test_names,
+                     int launch_flags) override {
+    RunUnitTestsBatch(test_launcher, this, test_names, launch_flags);
+  }
+
+  base::FilePath test_path_;
+};
+
+}  // namespace
+
+int TestLauncherNonSfiMain(const std::string& test_binary) {
+  if (base::CommandLine::ForCurrentProcess()->HasSwitch(kHelpFlag)) {
+    PrintUsage();
+    return 0;
+  }
+
+  base::TimeTicks start_time(base::TimeTicks::Now());
+
+  TestTimeouts::Initialize();
+
+  base::MessageLoopForIO message_loop;
+#if defined(OS_POSIX)
+  FileDescriptorWatcher file_descriptor_watcher(&message_loop);
+#endif
+
+  NonSfiUnitTestPlatformDelegate platform_delegate;
+  if (!platform_delegate.Init(test_binary)) {
+    fprintf(stderr, "Failed to initialize test launcher.\n");
+    fflush(stderr);
+    return 1;
+  }
+
+  base::UnitTestLauncherDelegate delegate(&platform_delegate, 10, true);
+  base::TestLauncher launcher(&delegate, base::SysInfo::NumberOfProcessors());
+  bool success = launcher.Run();
+
+  fprintf(stdout, "Tests took %" PRId64 " seconds.\n",
+          (base::TimeTicks::Now() - start_time).InSeconds());
+  fflush(stdout);
+  return success ? 0 : 1;
+}
+
+}  // namespace base
diff --git a/base/test/launcher/test_launcher_nacl_nonsfi.h b/base/test/launcher/test_launcher_nacl_nonsfi.h
new file mode 100644
index 0000000..6cb3785
--- /dev/null
+++ b/base/test/launcher/test_launcher_nacl_nonsfi.h
@@ -0,0 +1,17 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TEST_LAUNCHER_TEST_LAUNCHER_NACL_NONSFI_H_
+#define BASE_TEST_LAUNCHER_TEST_LAUNCHER_NACL_NONSFI_H_
+
+#include <string>
+
+namespace base {
+
+// Launches the NaCl Non-SFI test binary |test_binary|.
+int TestLauncherNonSfiMain(const std::string& test_binary);
+
+}  // namespace base
+
+#endif  // BASE_TEST_LAUNCHER_TEST_LAUNCHER_NACL_NONSFI_H_
diff --git a/base/test/launcher/test_launcher_tracer.cc b/base/test/launcher/test_launcher_tracer.cc
new file mode 100644
index 0000000..d525df7
--- /dev/null
+++ b/base/test/launcher/test_launcher_tracer.cc
@@ -0,0 +1,53 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/test/launcher/test_launcher_tracer.h"
+
+#include "base/json/json_file_value_serializer.h"
+#include "base/strings/stringprintf.h"
+#include "base/values.h"
+
+namespace base {
+
+TestLauncherTracer::TestLauncherTracer()
+    : trace_start_time_(TimeTicks::Now()) {}
+
+TestLauncherTracer::~TestLauncherTracer() = default;
+
+void TestLauncherTracer::RecordProcessExecution(TimeTicks start_time,
+                                                TimeDelta duration) {
+  AutoLock lock(lock_);
+
+  Event event;
+  event.name = StringPrintf("process #%zu", events_.size());
+  event.timestamp = start_time;
+  event.duration = duration;
+  event.thread_id = PlatformThread::CurrentId();
+  events_.push_back(event);
+}
+
+bool TestLauncherTracer::Dump(const FilePath& path) {
+  AutoLock lock(lock_);
+
+  std::unique_ptr<ListValue> json_events(new ListValue);
+  for (const Event& event : events_) {
+    std::unique_ptr<DictionaryValue> json_event(new DictionaryValue);
+    json_event->SetString("name", event.name);
+    json_event->SetString("ph", "X");
+    json_event->SetInteger(
+        "ts", (event.timestamp - trace_start_time_).InMicroseconds());
+    json_event->SetInteger("dur", event.duration.InMicroseconds());
+    json_event->SetInteger("tid", event.thread_id);
+
+    // Add fake values required by the trace viewer.
+    json_event->SetInteger("pid", 0);
+
+    json_events->Append(std::move(json_event));
+  }
+
+  JSONFileValueSerializer serializer(path);
+  return serializer.Serialize(*json_events);
+}
+
+}  // namespace base
diff --git a/base/test/launcher/test_launcher_tracer.h b/base/test/launcher/test_launcher_tracer.h
new file mode 100644
index 0000000..58bc1b0
--- /dev/null
+++ b/base/test/launcher/test_launcher_tracer.h
@@ -0,0 +1,55 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TEST_LAUNCHER_TEST_LAUNCHER_TRACER_H_
+#define BASE_TEST_LAUNCHER_TEST_LAUNCHER_TRACER_H_
+
+#include <string>
+#include <vector>
+
+#include "base/synchronization/lock.h"
+#include "base/threading/platform_thread.h"
+#include "base/time/time.h"
+
+namespace base {
+
+class FilePath;
+
+// Records traces of test execution, e.g. to analyze performance.
+// Thread safe.
+class TestLauncherTracer {
+ public:
+  TestLauncherTracer();
+  ~TestLauncherTracer();
+
+  // Records an event corresponding to test process execution.
+  void RecordProcessExecution(TimeTicks start_time, TimeDelta duration);
+
+  // Dumps trace data as JSON. Returns true on success.
+  bool Dump(const FilePath& path) WARN_UNUSED_RESULT;
+
+ private:
+  // Simplified version of base::TraceEvent.
+  struct Event {
+    std::string name;            // Displayed name.
+    TimeTicks timestamp;         // Timestamp when this event began.
+    TimeDelta duration;          // How long was this event.
+    PlatformThreadId thread_id;  // Thread ID where event was reported.
+  };
+
+  // Timestamp when tracing started.
+  TimeTicks trace_start_time_;
+
+  // Log of trace events.
+  std::vector<Event> events_;
+
+  // Lock to protect all member variables.
+  Lock lock_;
+
+  DISALLOW_COPY_AND_ASSIGN(TestLauncherTracer);
+};
+
+}  // namespace base
+
+#endif  // BASE_TEST_LAUNCHER_TEST_LAUNCHER_TRACER_H_
diff --git a/base/test/launcher/test_result.cc b/base/test/launcher/test_result.cc
new file mode 100644
index 0000000..9f37a2b
--- /dev/null
+++ b/base/test/launcher/test_result.cc
@@ -0,0 +1,96 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/test/launcher/test_result.h"
+
+#include <stddef.h>
+
+#include "base/logging.h"
+
+namespace base {
+
+TestResultPart::TestResultPart() = default;
+TestResultPart::~TestResultPart() = default;
+
+TestResultPart::TestResultPart(const TestResultPart& other) = default;
+TestResultPart::TestResultPart(TestResultPart&& other) = default;
+TestResultPart& TestResultPart::operator=(const TestResultPart& other) =
+    default;
+TestResultPart& TestResultPart::operator=(TestResultPart&& other) = default;
+
+// static
+bool TestResultPart::TypeFromString(const std::string& str, Type* type) {
+  if (str == "success")
+    *type = kSuccess;
+  else if (str == "failure")
+    *type = kNonFatalFailure;
+  else if (str == "fatal_failure")
+    *type = kFatalFailure;
+  else
+    return false;
+  return true;
+}
+
+std::string TestResultPart::TypeAsString() const {
+  switch (type) {
+    case kSuccess:
+      return "success";
+    case kNonFatalFailure:
+      return "failure";
+    case kFatalFailure:
+      return "fatal_failure";
+    default:
+      NOTREACHED();
+  }
+  return "unknown";
+}
+
+TestResult::TestResult() : status(TEST_UNKNOWN) {
+}
+
+TestResult::~TestResult() = default;
+
+TestResult::TestResult(const TestResult& other) = default;
+TestResult::TestResult(TestResult&& other) = default;
+TestResult& TestResult::operator=(const TestResult& other) = default;
+TestResult& TestResult::operator=(TestResult&& other) = default;
+
+std::string TestResult::StatusAsString() const {
+  switch (status) {
+    case TEST_UNKNOWN:
+      return "UNKNOWN";
+    case TEST_SUCCESS:
+      return "SUCCESS";
+    case TEST_FAILURE:
+      return "FAILURE";
+    case TEST_FAILURE_ON_EXIT:
+      return "FAILURE_ON_EXIT";
+    case TEST_CRASH:
+      return "CRASH";
+    case TEST_TIMEOUT:
+      return "TIMEOUT";
+    case TEST_SKIPPED:
+      return "SKIPPED";
+    case TEST_EXCESSIVE_OUTPUT:
+      return "EXCESSIVE_OUTPUT";
+     // Rely on compiler warnings to ensure all possible values are handled.
+  }
+
+  NOTREACHED();
+  return std::string();
+}
+
+std::string TestResult::GetTestName() const {
+  size_t dot_pos = full_name.find('.');
+  CHECK_NE(dot_pos, std::string::npos);
+  return full_name.substr(dot_pos + 1);
+}
+
+std::string TestResult::GetTestCaseName() const {
+  size_t dot_pos = full_name.find('.');
+  CHECK_NE(dot_pos, std::string::npos);
+  return full_name.substr(0, dot_pos);
+}
+
+}  // namespace base
diff --git a/base/test/launcher/test_result.h b/base/test/launcher/test_result.h
new file mode 100644
index 0000000..07338b3
--- /dev/null
+++ b/base/test/launcher/test_result.h
@@ -0,0 +1,104 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TEST_LAUNCHER_TEST_RESULT_H_
+#define BASE_TEST_LAUNCHER_TEST_RESULT_H_
+
+#include <string>
+#include <vector>
+
+#include "base/time/time.h"
+
+namespace base {
+
+// Structure contains result of a single EXPECT/ASSERT/SUCCESS.
+struct TestResultPart {
+  enum Type {
+    kSuccess,          // SUCCESS
+    kNonFatalFailure,  // EXPECT
+    kFatalFailure,     // ASSERT
+  };
+  Type type;
+
+  TestResultPart();
+  ~TestResultPart();
+
+  TestResultPart(const TestResultPart& other);
+  TestResultPart(TestResultPart&& other);
+  TestResultPart& operator=(const TestResultPart& other);
+  TestResultPart& operator=(TestResultPart&& other);
+
+  // Convert type to string and back.
+  static bool TypeFromString(const std::string& str, Type* type);
+  std::string TypeAsString() const;
+
+  // Filename and line of EXPECT/ASSERT.
+  std::string file_name;
+  int line_number;
+
+  // Message without stacktrace, etc.
+  std::string summary;
+
+  // Complete message.
+  std::string message;
+};
+
+// Structure containing result of a single test.
+struct TestResult {
+  enum Status {
+    TEST_UNKNOWN,           // Status not set.
+    TEST_SUCCESS,           // Test passed.
+    TEST_FAILURE,           // Assertion failure (e.g. EXPECT_TRUE, not DCHECK).
+    TEST_FAILURE_ON_EXIT,   // Passed but executable exit code was non-zero.
+    TEST_TIMEOUT,           // Test timed out and was killed.
+    TEST_CRASH,             // Test crashed (includes CHECK/DCHECK failures).
+    TEST_SKIPPED,           // Test skipped (not run at all).
+    TEST_EXCESSIVE_OUTPUT,  // Test exceeded output limit.
+  };
+
+  TestResult();
+  ~TestResult();
+
+  TestResult(const TestResult& other);
+  TestResult(TestResult&& other);
+  TestResult& operator=(const TestResult& other);
+  TestResult& operator=(TestResult&& other);
+
+  // Returns the test status as string (e.g. for display).
+  std::string StatusAsString() const;
+
+  // Returns the test name (e.g. "B" for "A.B").
+  std::string GetTestName() const;
+
+  // Returns the test case name (e.g. "A" for "A.B").
+  std::string GetTestCaseName() const;
+
+  // Returns true if the test has completed (i.e. the test binary exited
+  // normally, possibly with an exit code indicating failure, but didn't crash
+  // or time out in the middle of the test).
+  bool completed() const {
+    return status == TEST_SUCCESS ||
+        status == TEST_FAILURE ||
+        status == TEST_FAILURE_ON_EXIT ||
+        status == TEST_EXCESSIVE_OUTPUT;
+  }
+
+  // Full name of the test (e.g. "A.B").
+  std::string full_name;
+
+  Status status;
+
+  // Time it took to run the test.
+  base::TimeDelta elapsed_time;
+
+  // Output of just this test (optional).
+  std::string output_snippet;
+
+  // Information about failed expectations.
+  std::vector<TestResultPart> test_result_parts;
+};
+
+}  // namespace base
+
+#endif  // BASE_TEST_LAUNCHER_TEST_RESULT_H_
diff --git a/base/test/launcher/test_results_tracker.cc b/base/test/launcher/test_results_tracker.cc
new file mode 100644
index 0000000..a7e590c
--- /dev/null
+++ b/base/test/launcher/test_results_tracker.cc
@@ -0,0 +1,541 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/test/launcher/test_results_tracker.h"
+
+#include <stddef.h>
+
+#include <memory>
+#include <utility>
+
+#include "base/base64.h"
+#include "base/command_line.h"
+#include "base/files/file.h"
+#include "base/files/file_path.h"
+#include "base/files/file_util.h"
+#include "base/format_macros.h"
+#include "base/json/json_writer.h"
+#include "base/json/string_escape.h"
+#include "base/logging.h"
+#include "base/strings/string_util.h"
+#include "base/strings/stringprintf.h"
+#include "base/test/gtest_util.h"
+#include "base/test/launcher/test_launcher.h"
+#include "base/time/time.h"
+#include "base/values.h"
+
+namespace base {
+
+namespace {
+
+// The default output file for XML output.
+const FilePath::CharType kDefaultOutputFile[] = FILE_PATH_LITERAL(
+    "test_detail.xml");
+
+// Converts the given epoch time in milliseconds to a date string in the ISO
+// 8601 format, without the timezone information.
+// TODO(xyzzyz): Find a good place in Chromium to put it and refactor all uses
+// to point to it.
+std::string FormatTimeAsIso8601(Time time) {
+  Time::Exploded exploded;
+  time.UTCExplode(&exploded);
+  return StringPrintf("%04d-%02d-%02dT%02d:%02d:%02d",
+                      exploded.year,
+                      exploded.month,
+                      exploded.day_of_month,
+                      exploded.hour,
+                      exploded.minute,
+                      exploded.second);
+}
+
+struct TestSuiteResultsAggregator {
+  TestSuiteResultsAggregator()
+      : tests(0), failures(0), disabled(0), errors(0) {}
+
+  void Add(const TestResult& result) {
+    tests++;
+    elapsed_time += result.elapsed_time;
+
+    switch (result.status) {
+      case TestResult::TEST_SUCCESS:
+        break;
+      case TestResult::TEST_FAILURE:
+        failures++;
+        break;
+      case TestResult::TEST_EXCESSIVE_OUTPUT:
+      case TestResult::TEST_FAILURE_ON_EXIT:
+      case TestResult::TEST_TIMEOUT:
+      case TestResult::TEST_CRASH:
+      case TestResult::TEST_UNKNOWN:
+        errors++;
+        break;
+      case TestResult::TEST_SKIPPED:
+        disabled++;
+        break;
+    }
+  }
+
+  int tests;
+  int failures;
+  int disabled;
+  int errors;
+
+  TimeDelta elapsed_time;
+};
+
+}  // namespace
+
+TestResultsTracker::TestResultsTracker() : iteration_(-1), out_(nullptr) {}
+
+TestResultsTracker::~TestResultsTracker() {
+  DCHECK(thread_checker_.CalledOnValidThread());
+
+  if (!out_)
+    return;
+
+  // Maps test case names to test results.
+  typedef std::map<std::string, std::vector<TestResult> > TestCaseMap;
+  TestCaseMap test_case_map;
+
+  TestSuiteResultsAggregator all_tests_aggregator;
+  for (const PerIterationData::ResultsMap::value_type& i
+           : per_iteration_data_[iteration_].results) {
+    // Use the last test result as the final one.
+    TestResult result = i.second.test_results.back();
+    test_case_map[result.GetTestCaseName()].push_back(result);
+    all_tests_aggregator.Add(result);
+  }
+
+  fprintf(out_, "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n");
+  fprintf(out_,
+          "<testsuites name=\"AllTests\" tests=\"%d\" failures=\"%d\""
+          " disabled=\"%d\" errors=\"%d\" time=\"%.3f\" timestamp=\"%s\">\n",
+          all_tests_aggregator.tests, all_tests_aggregator.failures,
+          all_tests_aggregator.disabled, all_tests_aggregator.errors,
+          all_tests_aggregator.elapsed_time.InSecondsF(),
+          FormatTimeAsIso8601(Time::Now()).c_str());
+
+  for (const TestCaseMap::value_type& i : test_case_map) {
+    const std::string testsuite_name = i.first;
+    const std::vector<TestResult>& results = i.second;
+
+    TestSuiteResultsAggregator aggregator;
+    for (const TestResult& result : results) {
+      aggregator.Add(result);
+    }
+    fprintf(out_,
+            "  <testsuite name=\"%s\" tests=\"%d\" "
+            "failures=\"%d\" disabled=\"%d\" errors=\"%d\" time=\"%.3f\" "
+            "timestamp=\"%s\">\n",
+            testsuite_name.c_str(), aggregator.tests, aggregator.failures,
+            aggregator.disabled, aggregator.errors,
+            aggregator.elapsed_time.InSecondsF(),
+            FormatTimeAsIso8601(Time::Now()).c_str());
+
+    for (const TestResult& result : results) {
+      fprintf(out_, "    <testcase name=\"%s\" status=\"run\" time=\"%.3f\""
+              " classname=\"%s\">\n",
+              result.GetTestName().c_str(),
+              result.elapsed_time.InSecondsF(),
+              result.GetTestCaseName().c_str());
+      if (result.status != TestResult::TEST_SUCCESS) {
+        // The actual failure message is not propagated up to here, as it's too
+        // much work to escape it properly, and in case of failure, almost
+        // always one needs to look into full log anyway.
+        fprintf(out_, "      <failure message=\"\" type=\"\"></failure>\n");
+      }
+      fprintf(out_, "    </testcase>\n");
+    }
+    fprintf(out_, "  </testsuite>\n");
+  }
+
+  fprintf(out_, "</testsuites>\n");
+  fclose(out_);
+}
+
+bool TestResultsTracker::Init(const CommandLine& command_line) {
+  DCHECK(thread_checker_.CalledOnValidThread());
+
+  // Prevent initializing twice.
+  if (out_) {
+    NOTREACHED();
+    return false;
+  }
+
+  if (!command_line.HasSwitch(kGTestOutputFlag))
+    return true;
+
+  std::string flag = command_line.GetSwitchValueASCII(kGTestOutputFlag);
+  size_t colon_pos = flag.find(':');
+  FilePath path;
+  if (colon_pos != std::string::npos) {
+    FilePath flag_path =
+        command_line.GetSwitchValuePath(kGTestOutputFlag);
+    FilePath::StringType path_string = flag_path.value();
+    path = FilePath(path_string.substr(colon_pos + 1));
+    // If the given path ends with '/', consider it is a directory.
+    // Note: This does NOT check that a directory (or file) actually exists
+    // (the behavior is same as what gtest does).
+    if (path.EndsWithSeparator()) {
+      FilePath executable = command_line.GetProgram().BaseName();
+      path = path.Append(executable.ReplaceExtension(
+                             FilePath::StringType(FILE_PATH_LITERAL("xml"))));
+    }
+  }
+  if (path.value().empty())
+    path = FilePath(kDefaultOutputFile);
+  FilePath dir_name = path.DirName();
+  if (!DirectoryExists(dir_name)) {
+    LOG(WARNING) << "The output directory does not exist. "
+                 << "Creating the directory: " << dir_name.value();
+    // Create the directory if necessary (because the gtest does the same).
+    if (!CreateDirectory(dir_name)) {
+      LOG(ERROR) << "Failed to created directory " << dir_name.value();
+      return false;
+    }
+  }
+  out_ = OpenFile(path, "w");
+  if (!out_) {
+    LOG(ERROR) << "Cannot open output file: "
+               << path.value() << ".";
+    return false;
+  }
+
+  return true;
+}
+
+void TestResultsTracker::OnTestIterationStarting() {
+  DCHECK(thread_checker_.CalledOnValidThread());
+
+  // Start with a fresh state for new iteration.
+  iteration_++;
+  per_iteration_data_.push_back(PerIterationData());
+}
+
+void TestResultsTracker::AddTest(const std::string& test_name) {
+  // Record disabled test names without DISABLED_ prefix so that they are easy
+  // to compare with regular test names, e.g. before or after disabling.
+  all_tests_.insert(TestNameWithoutDisabledPrefix(test_name));
+}
+
+void TestResultsTracker::AddDisabledTest(const std::string& test_name) {
+  // Record disabled test names without DISABLED_ prefix so that they are easy
+  // to compare with regular test names, e.g. before or after disabling.
+  disabled_tests_.insert(TestNameWithoutDisabledPrefix(test_name));
+}
+
+void TestResultsTracker::AddTestLocation(const std::string& test_name,
+                                         const std::string& file,
+                                         int line) {
+  test_locations_.insert(std::make_pair(test_name, CodeLocation(file, line)));
+}
+
+void TestResultsTracker::AddTestResult(const TestResult& result) {
+  DCHECK(thread_checker_.CalledOnValidThread());
+
+  // Record disabled test names without DISABLED_ prefix so that they are easy
+  // to compare with regular test names, e.g. before or after disabling.
+  per_iteration_data_[iteration_].results[
+      TestNameWithoutDisabledPrefix(result.full_name)].test_results.push_back(
+          result);
+}
+
+void TestResultsTracker::PrintSummaryOfCurrentIteration() const {
+  TestStatusMap tests_by_status(GetTestStatusMapForCurrentIteration());
+
+  PrintTests(tests_by_status[TestResult::TEST_FAILURE].begin(),
+             tests_by_status[TestResult::TEST_FAILURE].end(),
+             "failed");
+  PrintTests(tests_by_status[TestResult::TEST_FAILURE_ON_EXIT].begin(),
+             tests_by_status[TestResult::TEST_FAILURE_ON_EXIT].end(),
+             "failed on exit");
+  PrintTests(tests_by_status[TestResult::TEST_EXCESSIVE_OUTPUT].begin(),
+             tests_by_status[TestResult::TEST_EXCESSIVE_OUTPUT].end(),
+             "produced excessive output");
+  PrintTests(tests_by_status[TestResult::TEST_TIMEOUT].begin(),
+             tests_by_status[TestResult::TEST_TIMEOUT].end(),
+             "timed out");
+  PrintTests(tests_by_status[TestResult::TEST_CRASH].begin(),
+             tests_by_status[TestResult::TEST_CRASH].end(),
+             "crashed");
+  PrintTests(tests_by_status[TestResult::TEST_SKIPPED].begin(),
+             tests_by_status[TestResult::TEST_SKIPPED].end(),
+             "skipped");
+  PrintTests(tests_by_status[TestResult::TEST_UNKNOWN].begin(),
+             tests_by_status[TestResult::TEST_UNKNOWN].end(),
+             "had unknown result");
+}
+
+void TestResultsTracker::PrintSummaryOfAllIterations() const {
+  DCHECK(thread_checker_.CalledOnValidThread());
+
+  TestStatusMap tests_by_status(GetTestStatusMapForAllIterations());
+
+  fprintf(stdout, "Summary of all test iterations:\n");
+  fflush(stdout);
+
+  PrintTests(tests_by_status[TestResult::TEST_FAILURE].begin(),
+             tests_by_status[TestResult::TEST_FAILURE].end(),
+             "failed");
+  PrintTests(tests_by_status[TestResult::TEST_FAILURE_ON_EXIT].begin(),
+             tests_by_status[TestResult::TEST_FAILURE_ON_EXIT].end(),
+             "failed on exit");
+  PrintTests(tests_by_status[TestResult::TEST_EXCESSIVE_OUTPUT].begin(),
+             tests_by_status[TestResult::TEST_EXCESSIVE_OUTPUT].end(),
+             "produced excessive output");
+  PrintTests(tests_by_status[TestResult::TEST_TIMEOUT].begin(),
+             tests_by_status[TestResult::TEST_TIMEOUT].end(),
+             "timed out");
+  PrintTests(tests_by_status[TestResult::TEST_CRASH].begin(),
+             tests_by_status[TestResult::TEST_CRASH].end(),
+             "crashed");
+  PrintTests(tests_by_status[TestResult::TEST_SKIPPED].begin(),
+             tests_by_status[TestResult::TEST_SKIPPED].end(),
+             "skipped");
+  PrintTests(tests_by_status[TestResult::TEST_UNKNOWN].begin(),
+             tests_by_status[TestResult::TEST_UNKNOWN].end(),
+             "had unknown result");
+
+  fprintf(stdout, "End of the summary.\n");
+  fflush(stdout);
+}
+
+void TestResultsTracker::AddGlobalTag(const std::string& tag) {
+  global_tags_.insert(tag);
+}
+
+bool TestResultsTracker::SaveSummaryAsJSON(
+    const FilePath& path,
+    const std::vector<std::string>& additional_tags) const {
+  std::unique_ptr<DictionaryValue> summary_root(new DictionaryValue);
+
+  std::unique_ptr<ListValue> global_tags(new ListValue);
+  for (const auto& global_tag : global_tags_) {
+    global_tags->AppendString(global_tag);
+  }
+  for (const auto& tag : additional_tags) {
+    global_tags->AppendString(tag);
+  }
+  summary_root->Set("global_tags", std::move(global_tags));
+
+  std::unique_ptr<ListValue> all_tests(new ListValue);
+  for (const auto& test : all_tests_) {
+    all_tests->AppendString(test);
+  }
+  summary_root->Set("all_tests", std::move(all_tests));
+
+  std::unique_ptr<ListValue> disabled_tests(new ListValue);
+  for (const auto& disabled_test : disabled_tests_) {
+    disabled_tests->AppendString(disabled_test);
+  }
+  summary_root->Set("disabled_tests", std::move(disabled_tests));
+
+  std::unique_ptr<ListValue> per_iteration_data(new ListValue);
+
+  for (int i = 0; i <= iteration_; i++) {
+    std::unique_ptr<DictionaryValue> current_iteration_data(
+        new DictionaryValue);
+
+    for (PerIterationData::ResultsMap::const_iterator j =
+             per_iteration_data_[i].results.begin();
+         j != per_iteration_data_[i].results.end();
+         ++j) {
+      std::unique_ptr<ListValue> test_results(new ListValue);
+
+      for (size_t k = 0; k < j->second.test_results.size(); k++) {
+        const TestResult& test_result = j->second.test_results[k];
+
+        std::unique_ptr<DictionaryValue> test_result_value(new DictionaryValue);
+
+        test_result_value->SetString("status", test_result.StatusAsString());
+        test_result_value->SetInteger(
+            "elapsed_time_ms",
+            static_cast<int>(test_result.elapsed_time.InMilliseconds()));
+
+        bool lossless_snippet = false;
+        if (IsStringUTF8(test_result.output_snippet)) {
+          test_result_value->SetString(
+              "output_snippet", test_result.output_snippet);
+          lossless_snippet = true;
+        } else {
+          test_result_value->SetString(
+              "output_snippet",
+              "<non-UTF-8 snippet, see output_snippet_base64>");
+        }
+
+        // TODO(phajdan.jr): Fix typo in JSON key (losless -> lossless)
+        // making sure not to break any consumers of this data.
+        test_result_value->SetBoolean("losless_snippet", lossless_snippet);
+
+        // Also include the raw version (base64-encoded so that it can be safely
+        // JSON-serialized - there are no guarantees about character encoding
+        // of the snippet). This can be very useful piece of information when
+        // debugging a test failure related to character encoding.
+        std::string base64_output_snippet;
+        Base64Encode(test_result.output_snippet, &base64_output_snippet);
+        test_result_value->SetString("output_snippet_base64",
+                                     base64_output_snippet);
+
+        std::unique_ptr<ListValue> test_result_parts(new ListValue);
+        for (const TestResultPart& result_part :
+             test_result.test_result_parts) {
+          std::unique_ptr<DictionaryValue> result_part_value(
+              new DictionaryValue);
+          result_part_value->SetString("type", result_part.TypeAsString());
+          result_part_value->SetString("file", result_part.file_name);
+          result_part_value->SetInteger("line", result_part.line_number);
+
+          bool lossless_summary = IsStringUTF8(result_part.summary);
+          if (lossless_summary) {
+            result_part_value->SetString("summary", result_part.summary);
+          } else {
+            result_part_value->SetString(
+                "summary", "<non-UTF-8 snippet, see summary_base64>");
+          }
+          result_part_value->SetBoolean("lossless_summary", lossless_summary);
+
+          std::string encoded_summary;
+          Base64Encode(result_part.summary, &encoded_summary);
+          result_part_value->SetString("summary_base64", encoded_summary);
+
+          bool lossless_message = IsStringUTF8(result_part.message);
+          if (lossless_message) {
+            result_part_value->SetString("message", result_part.message);
+          } else {
+            result_part_value->SetString(
+                "message", "<non-UTF-8 snippet, see message_base64>");
+          }
+          result_part_value->SetBoolean("lossless_message", lossless_message);
+
+          std::string encoded_message;
+          Base64Encode(result_part.message, &encoded_message);
+          result_part_value->SetString("message_base64", encoded_message);
+
+          test_result_parts->Append(std::move(result_part_value));
+        }
+        test_result_value->Set("result_parts", std::move(test_result_parts));
+
+        test_results->Append(std::move(test_result_value));
+      }
+
+      current_iteration_data->SetWithoutPathExpansion(j->first,
+                                                      std::move(test_results));
+    }
+    per_iteration_data->Append(std::move(current_iteration_data));
+  }
+  summary_root->Set("per_iteration_data", std::move(per_iteration_data));
+
+  std::unique_ptr<DictionaryValue> test_locations(new DictionaryValue);
+  for (const auto& item : test_locations_) {
+    std::string test_name = item.first;
+    CodeLocation location = item.second;
+    std::unique_ptr<DictionaryValue> location_value(new DictionaryValue);
+    location_value->SetString("file", location.file);
+    location_value->SetInteger("line", location.line);
+    test_locations->SetWithoutPathExpansion(test_name,
+                                            std::move(location_value));
+  }
+  summary_root->Set("test_locations", std::move(test_locations));
+
+  std::string json;
+  if (!JSONWriter::Write(*summary_root, &json))
+    return false;
+
+  File output(path, File::FLAG_CREATE_ALWAYS | File::FLAG_WRITE);
+  if (!output.IsValid())
+    return false;
+
+  int json_size = static_cast<int>(json.size());
+  if (output.WriteAtCurrentPos(json.data(), json_size) != json_size) {
+    return false;
+  }
+
+  // File::Flush() will call fsync(). This is important on Fuchsia to ensure
+  // that the file is written to the disk - the system running under qemu will
+  // shutdown shortly after the test completes. On Fuchsia fsync() times out
+  // after 15 seconds. Apparently this may not be enough in some cases,
+  // particularly when running net_unittests on buildbots, see
+  // https://crbug.com/796318. Try calling fsync() more than once to workaround
+  // this issue.
+  //
+  // TODO(sergeyu): Figure out a better solution.
+  int flush_attempts_left = 4;
+  while (flush_attempts_left-- > 0) {
+    if (output.Flush())
+      return true;
+    LOG(ERROR) << "fsync() failed when saving test output summary. "
+               << ((flush_attempts_left > 0) ? "Retrying." : " Giving up.");
+  }
+
+  return false;
+}
+
+TestResultsTracker::TestStatusMap
+    TestResultsTracker::GetTestStatusMapForCurrentIteration() const {
+  TestStatusMap tests_by_status;
+  GetTestStatusForIteration(iteration_, &tests_by_status);
+  return tests_by_status;
+}
+
+TestResultsTracker::TestStatusMap
+    TestResultsTracker::GetTestStatusMapForAllIterations() const {
+  TestStatusMap tests_by_status;
+  for (int i = 0; i <= iteration_; i++)
+    GetTestStatusForIteration(i, &tests_by_status);
+  return tests_by_status;
+}
+
+void TestResultsTracker::GetTestStatusForIteration(
+    int iteration, TestStatusMap* map) const {
+  for (PerIterationData::ResultsMap::const_iterator j =
+           per_iteration_data_[iteration].results.begin();
+       j != per_iteration_data_[iteration].results.end();
+       ++j) {
+    // Use the last test result as the final one.
+    const TestResult& result = j->second.test_results.back();
+    (*map)[result.status].insert(result.full_name);
+  }
+}
+
+// Utility function to print a list of test names. Uses iterator to be
+// compatible with different containers, like vector and set.
+template<typename InputIterator>
+void TestResultsTracker::PrintTests(InputIterator first,
+                                    InputIterator last,
+                                    const std::string& description) const {
+  size_t count = std::distance(first, last);
+  if (count == 0)
+    return;
+
+  fprintf(stdout,
+          "%" PRIuS " test%s %s:\n",
+          count,
+          count != 1 ? "s" : "",
+          description.c_str());
+  for (InputIterator it = first; it != last; ++it) {
+    const std::string& test_name = *it;
+    const auto location_it = test_locations_.find(test_name);
+    DCHECK(location_it != test_locations_.end()) << test_name;
+    const CodeLocation& location = location_it->second;
+    fprintf(stdout, "    %s (%s:%d)\n", test_name.c_str(),
+            location.file.c_str(), location.line);
+  }
+  fflush(stdout);
+}
+
+TestResultsTracker::AggregateTestResult::AggregateTestResult() = default;
+
+TestResultsTracker::AggregateTestResult::AggregateTestResult(
+    const AggregateTestResult& other) = default;
+
+TestResultsTracker::AggregateTestResult::~AggregateTestResult() = default;
+
+TestResultsTracker::PerIterationData::PerIterationData() = default;
+
+TestResultsTracker::PerIterationData::PerIterationData(
+    const PerIterationData& other) = default;
+
+TestResultsTracker::PerIterationData::~PerIterationData() = default;
+
+}  // namespace base
diff --git a/base/test/launcher/test_results_tracker.h b/base/test/launcher/test_results_tracker.h
new file mode 100644
index 0000000..d89821d
--- /dev/null
+++ b/base/test/launcher/test_results_tracker.h
@@ -0,0 +1,149 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TEST_LAUNCHER_TEST_RESULTS_TRACKER_H_
+#define BASE_TEST_LAUNCHER_TEST_RESULTS_TRACKER_H_
+
+#include <map>
+#include <set>
+#include <string>
+#include <vector>
+
+#include "base/callback.h"
+#include "base/macros.h"
+#include "base/test/launcher/test_result.h"
+#include "base/threading/thread_checker.h"
+
+namespace base {
+
+class CommandLine;
+class FilePath;
+
+// A helper class to output results.
+// Note: as currently XML is the only supported format by gtest, we don't
+// check output format (e.g. "xml:" prefix) here and output an XML file
+// unconditionally.
+// Note: we don't output per-test-case or total summary info like
+// total failed_test_count, disabled_test_count, elapsed_time and so on.
+// Only each test (testcase element in the XML) will have the correct
+// failed/disabled/elapsed_time information. Each test won't include
+// detailed failure messages either.
+class TestResultsTracker {
+ public:
+  TestResultsTracker();
+  ~TestResultsTracker();
+
+  // Initialize the result tracker. Must be called exactly once before
+  // calling any other methods. Returns true on success.
+  bool Init(const CommandLine& command_line) WARN_UNUSED_RESULT;
+
+  // Called when a test iteration is starting.
+  void OnTestIterationStarting();
+
+  // Adds |test_name| to the set of discovered tests (this includes all tests
+  // present in the executable, not necessarily run).
+  void AddTest(const std::string& test_name);
+
+  // Adds |test_name| to the set of disabled tests.
+  void AddDisabledTest(const std::string& test_name);
+
+  // Adds location for the |test_name|.
+  void AddTestLocation(const std::string& test_name,
+                       const std::string& file,
+                       int line);
+
+  // Adds |result| to the stored test results.
+  void AddTestResult(const TestResult& result);
+
+  // Prints a summary of current test iteration to stdout.
+  void PrintSummaryOfCurrentIteration() const;
+
+  // Prints a summary of all test iterations (not just the last one) to stdout.
+  void PrintSummaryOfAllIterations() const;
+
+  // Adds a string tag to the JSON summary. This is intended to indicate
+  // conditions that affect the entire test run, as opposed to individual tests.
+  void AddGlobalTag(const std::string& tag);
+
+  // Saves a JSON summary of all test iterations results to |path|. Adds
+  // |additional_tags| to the summary (just for this invocation). Returns
+  // true on success.
+  bool SaveSummaryAsJSON(
+      const FilePath& path,
+      const std::vector<std::string>& additional_tags) const WARN_UNUSED_RESULT;
+
+  // Map where keys are test result statuses, and values are sets of tests
+  // which finished with that status.
+  typedef std::map<TestResult::Status, std::set<std::string> > TestStatusMap;
+
+  // Returns a test status map (see above) for current test iteration.
+  TestStatusMap GetTestStatusMapForCurrentIteration() const;
+
+  // Returns a test status map (see above) for all test iterations.
+  TestStatusMap GetTestStatusMapForAllIterations() const;
+
+ private:
+  void GetTestStatusForIteration(int iteration, TestStatusMap* map) const;
+
+  template<typename InputIterator>
+  void PrintTests(InputIterator first,
+                  InputIterator last,
+                  const std::string& description) const;
+
+  struct AggregateTestResult {
+    AggregateTestResult();
+    AggregateTestResult(const AggregateTestResult& other);
+    ~AggregateTestResult();
+
+    std::vector<TestResult> test_results;
+  };
+
+  struct PerIterationData {
+    PerIterationData();
+    PerIterationData(const PerIterationData& other);
+    ~PerIterationData();
+
+    // Aggregate test results grouped by full test name.
+    typedef std::map<std::string, AggregateTestResult> ResultsMap;
+    ResultsMap results;
+  };
+
+  struct CodeLocation {
+    CodeLocation(const std::string& f, int l) : file(f), line(l) {
+    }
+
+    std::string file;
+    int line;
+  };
+
+  ThreadChecker thread_checker_;
+
+  // Set of global tags, i.e. strings indicating conditions that apply to
+  // the entire test run.
+  std::set<std::string> global_tags_;
+
+  // Set of all test names discovered in the current executable.
+  std::set<std::string> all_tests_;
+
+  std::map<std::string, CodeLocation> test_locations_;
+
+  // Set of all disabled tests in the current executable.
+  std::set<std::string> disabled_tests_;
+
+  // Store test results for each iteration.
+  std::vector<PerIterationData> per_iteration_data_;
+
+  // Index of current iteration (starting from 0). -1 before the first
+  // iteration.
+  int iteration_;
+
+  // File handle of output file (can be NULL if no file).
+  FILE* out_;
+
+  DISALLOW_COPY_AND_ASSIGN(TestResultsTracker);
+};
+
+}  // namespace base
+
+#endif  // BASE_TEST_LAUNCHER_TEST_RESULTS_TRACKER_H_
diff --git a/base/test/launcher/unit_test_launcher.cc b/base/test/launcher/unit_test_launcher.cc
new file mode 100644
index 0000000..1d4439c
--- /dev/null
+++ b/base/test/launcher/unit_test_launcher.cc
@@ -0,0 +1,750 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/test/launcher/unit_test_launcher.h"
+
+#include <map>
+#include <memory>
+#include <utility>
+
+#include "base/base_switches.h"
+#include "base/bind.h"
+#include "base/callback_helpers.h"
+#include "base/command_line.h"
+#include "base/compiler_specific.h"
+#include "base/debug/debugger.h"
+#include "base/files/file_util.h"
+#include "base/files/scoped_temp_dir.h"
+#include "base/format_macros.h"
+#include "base/location.h"
+#include "base/macros.h"
+#include "base/message_loop/message_loop.h"
+#include "base/sequence_checker.h"
+#include "base/single_thread_task_runner.h"
+#include "base/stl_util.h"
+#include "base/strings/string_number_conversions.h"
+#include "base/strings/string_util.h"
+#include "base/sys_info.h"
+#include "base/test/gtest_xml_util.h"
+#include "base/test/launcher/test_launcher.h"
+#include "base/test/test_switches.h"
+#include "base/test/test_timeouts.h"
+#include "base/third_party/dynamic_annotations/dynamic_annotations.h"
+#include "base/threading/thread_checker.h"
+#include "base/threading/thread_task_runner_handle.h"
+#include "build/build_config.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+#if defined(OS_POSIX)
+#include "base/files/file_descriptor_watcher_posix.h"
+#endif
+
+namespace base {
+
+namespace {
+
+// This constant controls how many tests are run in a single batch by default.
+const size_t kDefaultTestBatchLimit = 10;
+
+const char kHelpFlag[] = "help";
+
+// Flag to run all tests in a single process.
+const char kSingleProcessTestsFlag[] = "single-process-tests";
+
+void PrintUsage() {
+  fprintf(stdout,
+          "Runs tests using the gtest framework, each batch of tests being\n"
+          "run in their own process. Supported command-line flags:\n"
+          "\n"
+          " Common flags:\n"
+          "  --gtest_filter=...\n"
+          "    Runs a subset of tests (see --gtest_help for more info).\n"
+          "\n"
+          "  --help\n"
+          "    Shows this message.\n"
+          "\n"
+          "  --gtest_help\n"
+          "    Shows the gtest help message.\n"
+          "\n"
+          "  --test-launcher-jobs=N\n"
+          "    Sets the number of parallel test jobs to N.\n"
+          "\n"
+          "  --single-process-tests\n"
+          "    Runs the tests and the launcher in the same process. Useful\n"
+          "    for debugging a specific test in a debugger.\n"
+          "\n"
+          " Other flags:\n"
+          "  --test-launcher-filter-file=PATH\n"
+          "    Like --gtest_filter, but read the test filter from PATH.\n"
+          "    One pattern per line; lines starting with '-' are exclusions.\n"
+          "    See also //testing/buildbot/filters/README.md file.\n"
+          "\n"
+          "  --test-launcher-batch-limit=N\n"
+          "    Sets the limit of test batch to run in a single process to N.\n"
+          "\n"
+          "  --test-launcher-debug-launcher\n"
+          "    Disables autodetection of debuggers and similar tools,\n"
+          "    making it possible to use them to debug launcher itself.\n"
+          "\n"
+          "  --test-launcher-retry-limit=N\n"
+          "    Sets the limit of test retries on failures to N.\n"
+          "\n"
+          "  --test-launcher-summary-output=PATH\n"
+          "    Saves a JSON machine-readable summary of the run.\n"
+          "\n"
+          "  --test-launcher-print-test-stdio=auto|always|never\n"
+          "    Controls when full test output is printed.\n"
+          "    auto means to print it when the test failed.\n"
+          "\n"
+          "  --test-launcher-test-part-results-limit=N\n"
+          "    Sets the limit of failed EXPECT/ASSERT entries in the xml and\n"
+          "    JSON outputs per test to N (default N=10). Negative value \n"
+          "    will disable this limit.\n"
+          "\n"
+          "  --test-launcher-total-shards=N\n"
+          "    Sets the total number of shards to N.\n"
+          "\n"
+          "  --test-launcher-shard-index=N\n"
+          "    Sets the shard index to run to N (from 0 to TOTAL - 1).\n");
+  fflush(stdout);
+}
+
+class DefaultUnitTestPlatformDelegate : public UnitTestPlatformDelegate {
+ public:
+  DefaultUnitTestPlatformDelegate() = default;
+
+ private:
+  // UnitTestPlatformDelegate:
+  bool GetTests(std::vector<TestIdentifier>* output) override {
+    *output = GetCompiledInTests();
+    return true;
+  }
+
+  bool CreateResultsFile(base::FilePath* path) override {
+    if (!CreateNewTempDirectory(FilePath::StringType(), path))
+      return false;
+    *path = path->AppendASCII("test_results.xml");
+    return true;
+  }
+
+  bool CreateTemporaryFile(base::FilePath* path) override {
+    if (!temp_dir_.IsValid() && !temp_dir_.CreateUniqueTempDir())
+      return false;
+    return CreateTemporaryFileInDir(temp_dir_.GetPath(), path);
+  }
+
+  CommandLine GetCommandLineForChildGTestProcess(
+      const std::vector<std::string>& test_names,
+      const base::FilePath& output_file,
+      const base::FilePath& flag_file) override {
+    CommandLine new_cmd_line(*CommandLine::ForCurrentProcess());
+
+    CHECK(base::PathExists(flag_file));
+
+    std::string long_flags(
+        std::string("--") + kGTestFilterFlag + "=" +
+        JoinString(test_names, ":"));
+    CHECK_EQ(static_cast<int>(long_flags.size()),
+             WriteFile(flag_file, long_flags.data(),
+                       static_cast<int>(long_flags.size())));
+
+    new_cmd_line.AppendSwitchPath(switches::kTestLauncherOutput, output_file);
+    new_cmd_line.AppendSwitchPath(kGTestFlagfileFlag, flag_file);
+    new_cmd_line.AppendSwitch(kSingleProcessTestsFlag);
+
+    return new_cmd_line;
+  }
+
+  std::string GetWrapperForChildGTestProcess() override {
+    return std::string();
+  }
+
+  void RelaunchTests(TestLauncher* test_launcher,
+                     const std::vector<std::string>& test_names,
+                     int launch_flags) override {
+    // Relaunch requested tests in parallel, but only use single
+    // test per batch for more precise results (crashes, etc).
+    for (const std::string& test_name : test_names) {
+      std::vector<std::string> batch;
+      batch.push_back(test_name);
+      RunUnitTestsBatch(test_launcher, this, batch, launch_flags);
+    }
+  }
+
+  ScopedTempDir temp_dir_;
+
+  DISALLOW_COPY_AND_ASSIGN(DefaultUnitTestPlatformDelegate);
+};
+
+bool GetSwitchValueAsInt(const std::string& switch_name, int* result) {
+  if (!CommandLine::ForCurrentProcess()->HasSwitch(switch_name))
+    return true;
+
+  std::string switch_value =
+      CommandLine::ForCurrentProcess()->GetSwitchValueASCII(switch_name);
+  if (!StringToInt(switch_value, result) || *result < 0) {
+    LOG(ERROR) << "Invalid value for " << switch_name << ": " << switch_value;
+    return false;
+  }
+
+  return true;
+}
+
+int LaunchUnitTestsInternal(RunTestSuiteCallback run_test_suite,
+                            size_t parallel_jobs,
+                            int default_batch_limit,
+                            bool use_job_objects,
+                            OnceClosure gtest_init) {
+#if defined(OS_ANDROID)
+  // We can't easily fork on Android, just run the test suite directly.
+  return std::move(run_test_suite).Run();
+#else
+  bool force_single_process = false;
+  if (CommandLine::ForCurrentProcess()->HasSwitch(
+          switches::kTestLauncherDebugLauncher)) {
+    fprintf(stdout, "Forcing test launcher debugging mode.\n");
+    fflush(stdout);
+  } else {
+    if (base::debug::BeingDebugged()) {
+      fprintf(stdout,
+              "Debugger detected, switching to single process mode.\n"
+              "Pass --test-launcher-debug-launcher to debug the launcher "
+              "itself.\n");
+      fflush(stdout);
+      force_single_process = true;
+    }
+  }
+
+  if (CommandLine::ForCurrentProcess()->HasSwitch(kGTestHelpFlag) ||
+      CommandLine::ForCurrentProcess()->HasSwitch(kGTestListTestsFlag) ||
+      CommandLine::ForCurrentProcess()->HasSwitch(kSingleProcessTestsFlag) ||
+      CommandLine::ForCurrentProcess()->HasSwitch(
+          switches::kTestChildProcess) ||
+      force_single_process) {
+    return std::move(run_test_suite).Run();
+  }
+#endif
+
+  if (CommandLine::ForCurrentProcess()->HasSwitch(kHelpFlag)) {
+    PrintUsage();
+    return 0;
+  }
+
+  TimeTicks start_time(TimeTicks::Now());
+
+  std::move(gtest_init).Run();
+  TestTimeouts::Initialize();
+
+  int batch_limit = default_batch_limit;
+  if (!GetSwitchValueAsInt(switches::kTestLauncherBatchLimit, &batch_limit))
+    return 1;
+
+  fprintf(stdout,
+          "IMPORTANT DEBUGGING NOTE: batches of tests are run inside their\n"
+          "own process. For debugging a test inside a debugger, use the\n"
+          "--gtest_filter=<your_test_name> flag along with\n"
+          "--single-process-tests.\n");
+  fflush(stdout);
+
+  MessageLoopForIO message_loop;
+#if defined(OS_POSIX)
+  FileDescriptorWatcher file_descriptor_watcher(&message_loop);
+#endif
+
+  DefaultUnitTestPlatformDelegate platform_delegate;
+  UnitTestLauncherDelegate delegate(
+      &platform_delegate, batch_limit, use_job_objects);
+  TestLauncher launcher(&delegate, parallel_jobs);
+  bool success = launcher.Run();
+
+  fprintf(stdout, "Tests took %" PRId64 " seconds.\n",
+          (TimeTicks::Now() - start_time).InSeconds());
+  fflush(stdout);
+
+  return (success ? 0 : 1);
+}
+
+void InitGoogleTestChar(int* argc, char** argv) {
+  testing::InitGoogleTest(argc, argv);
+}
+
+#if defined(OS_WIN)
+void InitGoogleTestWChar(int* argc, wchar_t** argv) {
+  testing::InitGoogleTest(argc, argv);
+}
+#endif  // defined(OS_WIN)
+
+// Interprets test results and reports to the test launcher. Returns true
+// on success.
+bool ProcessTestResults(
+    TestLauncher* test_launcher,
+    const std::vector<std::string>& test_names,
+    const base::FilePath& output_file,
+    const std::string& output,
+    int exit_code,
+    bool was_timeout,
+    std::vector<std::string>* tests_to_relaunch) {
+  std::vector<TestResult> test_results;
+  bool crashed = false;
+  bool have_test_results =
+      ProcessGTestOutput(output_file, &test_results, &crashed);
+
+  bool called_any_callback = false;
+
+  if (have_test_results) {
+    // TODO(phajdan.jr): Check for duplicates and mismatches between
+    // the results we got from XML file and tests we intended to run.
+    std::map<std::string, TestResult> results_map;
+    for (size_t i = 0; i < test_results.size(); i++)
+      results_map[test_results[i].full_name] = test_results[i];
+
+    bool had_interrupted_test = false;
+
+    // Results to be reported back to the test launcher.
+    std::vector<TestResult> final_results;
+
+    for (size_t i = 0; i < test_names.size(); i++) {
+      if (ContainsKey(results_map, test_names[i])) {
+        TestResult test_result = results_map[test_names[i]];
+        if (test_result.status == TestResult::TEST_CRASH) {
+          had_interrupted_test = true;
+
+          if (was_timeout) {
+            // Fix up the test status: we forcibly kill the child process
+            // after the timeout, so from XML results it looks just like
+            // a crash.
+            test_result.status = TestResult::TEST_TIMEOUT;
+          }
+        } else if (test_result.status == TestResult::TEST_SUCCESS ||
+                   test_result.status == TestResult::TEST_FAILURE) {
+          // We run multiple tests in a batch with a timeout applied
+          // to the entire batch. It is possible that with other tests
+          // running quickly some tests take longer than the per-test timeout.
+          // For consistent handling of tests independent of order and other
+          // factors, mark them as timing out.
+          if (test_result.elapsed_time >
+              TestTimeouts::test_launcher_timeout()) {
+            test_result.status = TestResult::TEST_TIMEOUT;
+          }
+        }
+        test_result.output_snippet = GetTestOutputSnippet(test_result, output);
+        final_results.push_back(test_result);
+      } else if (had_interrupted_test) {
+        tests_to_relaunch->push_back(test_names[i]);
+      } else {
+        // TODO(phajdan.jr): Explicitly pass the info that the test didn't
+        // run for a mysterious reason.
+        LOG(ERROR) << "no test result for " << test_names[i];
+        TestResult test_result;
+        test_result.full_name = test_names[i];
+        test_result.status = TestResult::TEST_UNKNOWN;
+        test_result.output_snippet = GetTestOutputSnippet(test_result, output);
+        final_results.push_back(test_result);
+      }
+    }
+
+    // TODO(phajdan.jr): Handle the case where processing XML output
+    // indicates a crash but none of the test results is marked as crashing.
+
+    if (final_results.empty())
+      return false;
+
+    bool has_non_success_test = false;
+    for (size_t i = 0; i < final_results.size(); i++) {
+      if (final_results[i].status != TestResult::TEST_SUCCESS) {
+        has_non_success_test = true;
+        break;
+      }
+    }
+
+    if (!has_non_success_test && exit_code != 0) {
+      // This is a bit surprising case: all tests are marked as successful,
+      // but the exit code was not zero. This can happen e.g. under memory
+      // tools that report leaks this way. Mark all tests as a failure on exit,
+      // and for more precise info they'd need to be retried serially.
+      for (size_t i = 0; i < final_results.size(); i++)
+        final_results[i].status = TestResult::TEST_FAILURE_ON_EXIT;
+    }
+
+    for (size_t i = 0; i < final_results.size(); i++) {
+      // Fix the output snippet after possible changes to the test result.
+      final_results[i].output_snippet =
+          GetTestOutputSnippet(final_results[i], output);
+      test_launcher->OnTestFinished(final_results[i]);
+      called_any_callback = true;
+    }
+  } else {
+    fprintf(stdout,
+            "Failed to get out-of-band test success data, "
+            "dumping full stdio below:\n%s\n",
+            output.c_str());
+    fflush(stdout);
+
+    // We do not have reliable details about test results (parsing test
+    // stdout is known to be unreliable).
+    if (test_names.size() == 1) {
+      // There is only one test. Try to determine status by exit code.
+      const std::string& test_name = test_names.front();
+      TestResult test_result;
+      test_result.full_name = test_name;
+
+      if (was_timeout) {
+        test_result.status = TestResult::TEST_TIMEOUT;
+      } else if (exit_code != 0) {
+        test_result.status = TestResult::TEST_FAILURE;
+      } else {
+        // It's strange case when test executed successfully,
+        // but we failed to read machine-readable report for it.
+        test_result.status = TestResult::TEST_UNKNOWN;
+      }
+
+      test_launcher->OnTestFinished(test_result);
+      called_any_callback = true;
+    } else {
+      // There is more than one test. Retry them individually.
+      for (const std::string& test_name : test_names)
+        tests_to_relaunch->push_back(test_name);
+    }
+  }
+
+  return called_any_callback;
+}
+
+class UnitTestProcessLifetimeObserver : public ProcessLifetimeObserver {
+ public:
+  ~UnitTestProcessLifetimeObserver() override {
+    DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
+  }
+
+  TestLauncher* test_launcher() { return test_launcher_; }
+  UnitTestPlatformDelegate* platform_delegate() { return platform_delegate_; }
+  const std::vector<std::string>& test_names() { return test_names_; }
+  int launch_flags() { return launch_flags_; }
+  const FilePath& output_file() { return output_file_; }
+  const FilePath& flag_file() { return flag_file_; }
+
+ protected:
+  UnitTestProcessLifetimeObserver(TestLauncher* test_launcher,
+                                  UnitTestPlatformDelegate* platform_delegate,
+                                  const std::vector<std::string>& test_names,
+                                  int launch_flags,
+                                  const FilePath& output_file,
+                                  const FilePath& flag_file)
+      : ProcessLifetimeObserver(),
+        test_launcher_(test_launcher),
+        platform_delegate_(platform_delegate),
+        test_names_(test_names),
+        launch_flags_(launch_flags),
+        output_file_(output_file),
+        flag_file_(flag_file) {}
+
+  SEQUENCE_CHECKER(sequence_checker_);
+
+ private:
+  TestLauncher* const test_launcher_;
+  UnitTestPlatformDelegate* const platform_delegate_;
+  const std::vector<std::string> test_names_;
+  const int launch_flags_;
+  const FilePath output_file_;
+  const FilePath flag_file_;
+
+  DISALLOW_COPY_AND_ASSIGN(UnitTestProcessLifetimeObserver);
+};
+
+class ParallelUnitTestProcessLifetimeObserver
+    : public UnitTestProcessLifetimeObserver {
+ public:
+  ParallelUnitTestProcessLifetimeObserver(
+      TestLauncher* test_launcher,
+      UnitTestPlatformDelegate* platform_delegate,
+      const std::vector<std::string>& test_names,
+      int launch_flags,
+      const FilePath& output_file,
+      const FilePath& flag_file)
+      : UnitTestProcessLifetimeObserver(test_launcher,
+                                        platform_delegate,
+                                        test_names,
+                                        launch_flags,
+                                        output_file,
+                                        flag_file) {}
+  ~ParallelUnitTestProcessLifetimeObserver() override = default;
+
+ private:
+  // ProcessLifetimeObserver:
+  void OnCompleted(int exit_code,
+                   TimeDelta elapsed_time,
+                   bool was_timeout,
+                   const std::string& output) override;
+
+  DISALLOW_COPY_AND_ASSIGN(ParallelUnitTestProcessLifetimeObserver);
+};
+
+void ParallelUnitTestProcessLifetimeObserver::OnCompleted(
+    int exit_code,
+    TimeDelta elapsed_time,
+    bool was_timeout,
+    const std::string& output) {
+  DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
+  std::vector<std::string> tests_to_relaunch;
+  ProcessTestResults(test_launcher(), test_names(), output_file(), output,
+                     exit_code, was_timeout, &tests_to_relaunch);
+
+  if (!tests_to_relaunch.empty()) {
+    platform_delegate()->RelaunchTests(test_launcher(), tests_to_relaunch,
+                                       launch_flags());
+  }
+
+  // The temporary file's directory is also temporary.
+  DeleteFile(output_file().DirName(), true);
+  if (!flag_file().empty())
+    DeleteFile(flag_file(), false);
+}
+
+class SerialUnitTestProcessLifetimeObserver
+    : public UnitTestProcessLifetimeObserver {
+ public:
+  SerialUnitTestProcessLifetimeObserver(
+      TestLauncher* test_launcher,
+      UnitTestPlatformDelegate* platform_delegate,
+      const std::vector<std::string>& test_names,
+      int launch_flags,
+      const FilePath& output_file,
+      const FilePath& flag_file,
+      std::vector<std::string>&& next_test_names)
+      : UnitTestProcessLifetimeObserver(test_launcher,
+                                        platform_delegate,
+                                        test_names,
+                                        launch_flags,
+                                        output_file,
+                                        flag_file),
+        next_test_names_(std::move(next_test_names)) {}
+  ~SerialUnitTestProcessLifetimeObserver() override = default;
+
+ private:
+  // ProcessLifetimeObserver:
+  void OnCompleted(int exit_code,
+                   TimeDelta elapsed_time,
+                   bool was_timeout,
+                   const std::string& output) override;
+
+  std::vector<std::string> next_test_names_;
+
+  DISALLOW_COPY_AND_ASSIGN(SerialUnitTestProcessLifetimeObserver);
+};
+
+void SerialUnitTestProcessLifetimeObserver::OnCompleted(
+    int exit_code,
+    TimeDelta elapsed_time,
+    bool was_timeout,
+    const std::string& output) {
+  DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
+  std::vector<std::string> tests_to_relaunch;
+  bool called_any_callbacks =
+      ProcessTestResults(test_launcher(), test_names(), output_file(), output,
+                         exit_code, was_timeout, &tests_to_relaunch);
+
+  // There is only one test, there cannot be other tests to relaunch
+  // due to a crash.
+  DCHECK(tests_to_relaunch.empty());
+
+  // There is only one test, we should have called back with its result.
+  DCHECK(called_any_callbacks);
+
+  // The temporary file's directory is also temporary.
+  DeleteFile(output_file().DirName(), true);
+
+  if (!flag_file().empty())
+    DeleteFile(flag_file(), false);
+
+  ThreadTaskRunnerHandle::Get()->PostTask(
+      FROM_HERE,
+      BindOnce(&RunUnitTestsSerially, test_launcher(), platform_delegate(),
+               std::move(next_test_names_), launch_flags()));
+}
+
+}  // namespace
+
+int LaunchUnitTests(int argc,
+                    char** argv,
+                    RunTestSuiteCallback run_test_suite) {
+  CommandLine::Init(argc, argv);
+  size_t parallel_jobs = NumParallelJobs();
+  if (parallel_jobs == 0U) {
+    return 1;
+  }
+  return LaunchUnitTestsInternal(std::move(run_test_suite), parallel_jobs,
+                                 kDefaultTestBatchLimit, true,
+                                 BindOnce(&InitGoogleTestChar, &argc, argv));
+}
+
+int LaunchUnitTestsSerially(int argc,
+                            char** argv,
+                            RunTestSuiteCallback run_test_suite) {
+  CommandLine::Init(argc, argv);
+  return LaunchUnitTestsInternal(std::move(run_test_suite), 1U,
+                                 kDefaultTestBatchLimit, true,
+                                 BindOnce(&InitGoogleTestChar, &argc, argv));
+}
+
+int LaunchUnitTestsWithOptions(int argc,
+                               char** argv,
+                               size_t parallel_jobs,
+                               int default_batch_limit,
+                               bool use_job_objects,
+                               RunTestSuiteCallback run_test_suite) {
+  CommandLine::Init(argc, argv);
+  return LaunchUnitTestsInternal(std::move(run_test_suite), parallel_jobs,
+                                 default_batch_limit, use_job_objects,
+                                 BindOnce(&InitGoogleTestChar, &argc, argv));
+}
+
+#if defined(OS_WIN)
+int LaunchUnitTests(int argc,
+                    wchar_t** argv,
+                    bool use_job_objects,
+                    RunTestSuiteCallback run_test_suite) {
+  // Windows CommandLine::Init ignores argv anyway.
+  CommandLine::Init(argc, NULL);
+  size_t parallel_jobs = NumParallelJobs();
+  if (parallel_jobs == 0U) {
+    return 1;
+  }
+  return LaunchUnitTestsInternal(std::move(run_test_suite), parallel_jobs,
+                                 kDefaultTestBatchLimit, use_job_objects,
+                                 BindOnce(&InitGoogleTestWChar, &argc, argv));
+}
+#endif  // defined(OS_WIN)
+
+void RunUnitTestsSerially(
+    TestLauncher* test_launcher,
+    UnitTestPlatformDelegate* platform_delegate,
+    const std::vector<std::string>& test_names,
+    int launch_flags) {
+  if (test_names.empty())
+    return;
+
+  // Create a dedicated temporary directory to store the xml result data
+  // per run to ensure clean state and make it possible to launch multiple
+  // processes in parallel.
+  FilePath output_file;
+  CHECK(platform_delegate->CreateResultsFile(&output_file));
+  FilePath flag_file;
+  platform_delegate->CreateTemporaryFile(&flag_file);
+
+  auto observer = std::make_unique<SerialUnitTestProcessLifetimeObserver>(
+      test_launcher, platform_delegate,
+      std::vector<std::string>(1, test_names.back()), launch_flags, output_file,
+      flag_file,
+      std::vector<std::string>(test_names.begin(), test_names.end() - 1));
+
+  CommandLine cmd_line(platform_delegate->GetCommandLineForChildGTestProcess(
+      observer->test_names(), output_file, flag_file));
+
+  TestLauncher::LaunchOptions launch_options;
+  launch_options.flags = launch_flags;
+  test_launcher->LaunchChildGTestProcess(
+      cmd_line, platform_delegate->GetWrapperForChildGTestProcess(),
+      TestTimeouts::test_launcher_timeout(), launch_options,
+      std::move(observer));
+}
+
+void RunUnitTestsBatch(
+    TestLauncher* test_launcher,
+    UnitTestPlatformDelegate* platform_delegate,
+    const std::vector<std::string>& test_names,
+    int launch_flags) {
+  if (test_names.empty())
+    return;
+
+  // Create a dedicated temporary directory to store the xml result data
+  // per run to ensure clean state and make it possible to launch multiple
+  // processes in parallel.
+  FilePath output_file;
+  CHECK(platform_delegate->CreateResultsFile(&output_file));
+  FilePath flag_file;
+  platform_delegate->CreateTemporaryFile(&flag_file);
+
+  auto observer = std::make_unique<ParallelUnitTestProcessLifetimeObserver>(
+      test_launcher, platform_delegate, test_names, launch_flags, output_file,
+      flag_file);
+
+  CommandLine cmd_line(platform_delegate->GetCommandLineForChildGTestProcess(
+      test_names, output_file, flag_file));
+
+  // Adjust the timeout depending on how many tests we're running
+  // (note that e.g. the last batch of tests will be smaller).
+  // TODO(phajdan.jr): Consider an adaptive timeout, which can change
+  // depending on how many tests ran and how many remain.
+  // Note: do NOT parse child's stdout to do that, it's known to be
+  // unreliable (e.g. buffering issues can mix up the output).
+  TimeDelta timeout = test_names.size() * TestTimeouts::test_launcher_timeout();
+
+  TestLauncher::LaunchOptions options;
+  options.flags = launch_flags;
+  test_launcher->LaunchChildGTestProcess(
+      cmd_line, platform_delegate->GetWrapperForChildGTestProcess(), timeout,
+      options, std::move(observer));
+}
+
+UnitTestLauncherDelegate::UnitTestLauncherDelegate(
+    UnitTestPlatformDelegate* platform_delegate,
+    size_t batch_limit,
+    bool use_job_objects)
+    : platform_delegate_(platform_delegate),
+      batch_limit_(batch_limit),
+      use_job_objects_(use_job_objects) {
+}
+
+UnitTestLauncherDelegate::~UnitTestLauncherDelegate() {
+  DCHECK(thread_checker_.CalledOnValidThread());
+}
+
+bool UnitTestLauncherDelegate::GetTests(std::vector<TestIdentifier>* output) {
+  DCHECK(thread_checker_.CalledOnValidThread());
+  return platform_delegate_->GetTests(output);
+}
+
+bool UnitTestLauncherDelegate::ShouldRunTest(const std::string& test_case_name,
+                                             const std::string& test_name) {
+  DCHECK(thread_checker_.CalledOnValidThread());
+
+  // There is no additional logic to disable specific tests.
+  return true;
+}
+
+size_t UnitTestLauncherDelegate::RunTests(
+    TestLauncher* test_launcher,
+    const std::vector<std::string>& test_names) {
+  DCHECK(thread_checker_.CalledOnValidThread());
+
+  int launch_flags = use_job_objects_ ? TestLauncher::USE_JOB_OBJECTS : 0;
+
+  std::vector<std::string> batch;
+  for (size_t i = 0; i < test_names.size(); i++) {
+    batch.push_back(test_names[i]);
+
+    // Use 0 to indicate unlimited batch size.
+    if (batch.size() >= batch_limit_ && batch_limit_ != 0) {
+      RunUnitTestsBatch(test_launcher, platform_delegate_, batch, launch_flags);
+      batch.clear();
+    }
+  }
+
+  RunUnitTestsBatch(test_launcher, platform_delegate_, batch, launch_flags);
+
+  return test_names.size();
+}
+
+size_t UnitTestLauncherDelegate::RetryTests(
+    TestLauncher* test_launcher,
+    const std::vector<std::string>& test_names) {
+  ThreadTaskRunnerHandle::Get()->PostTask(
+      FROM_HERE,
+      BindOnce(&RunUnitTestsSerially, test_launcher, platform_delegate_,
+               test_names,
+               use_job_objects_ ? TestLauncher::USE_JOB_OBJECTS : 0));
+  return test_names.size();
+}
+
+}  // namespace base
diff --git a/base/test/launcher/unit_test_launcher.h b/base/test/launcher/unit_test_launcher.h
new file mode 100644
index 0000000..0d1c21e
--- /dev/null
+++ b/base/test/launcher/unit_test_launcher.h
@@ -0,0 +1,134 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TEST_LAUNCHER_UNIT_TEST_LAUNCHER_H_
+#define BASE_TEST_LAUNCHER_UNIT_TEST_LAUNCHER_H_
+
+#include <stddef.h>
+
+#include <string>
+#include <vector>
+
+#include "base/callback.h"
+#include "base/files/file_path.h"
+#include "base/macros.h"
+#include "base/test/launcher/test_launcher.h"
+#include "build/build_config.h"
+
+namespace base {
+
+// Callback that runs a test suite and returns exit code.
+using RunTestSuiteCallback = OnceCallback<int(void)>;
+
+// Launches unit tests in given test suite. Returns exit code.
+int LaunchUnitTests(int argc, char** argv, RunTestSuiteCallback run_test_suite);
+
+// Same as above, but always runs tests serially.
+int LaunchUnitTestsSerially(int argc,
+                            char** argv,
+                            RunTestSuiteCallback run_test_suite);
+
+// Launches unit tests in given test suite. Returns exit code.
+// |parallel_jobs| is the number of parallel test jobs.
+// |default_batch_limit| is the default size of test batch
+// (use 0 to disable batching).
+// |use_job_objects| determines whether to use job objects.
+int LaunchUnitTestsWithOptions(int argc,
+                               char** argv,
+                               size_t parallel_jobs,
+                               int default_batch_limit,
+                               bool use_job_objects,
+                               RunTestSuiteCallback run_test_suite);
+
+#if defined(OS_WIN)
+// Launches unit tests in given test suite. Returns exit code.
+// |use_job_objects| determines whether to use job objects.
+int LaunchUnitTests(int argc,
+                    wchar_t** argv,
+                    bool use_job_objects,
+                    RunTestSuiteCallback run_test_suite);
+#endif  // defined(OS_WIN)
+
+// Delegate to abstract away platform differences for unit tests.
+class UnitTestPlatformDelegate {
+ public:
+  // Called to get names of tests available for running. The delegate
+  // must put the result in |output| and return true on success.
+  virtual bool GetTests(std::vector<TestIdentifier>* output) = 0;
+
+  // Called to create a temporary for storing test results. The delegate
+  // must put the resulting path in |path| and return true on success.
+  virtual bool CreateResultsFile(base::FilePath* path) = 0;
+
+  // Called to create a new temporary file. The delegate must put the resulting
+  // path in |path| and return true on success.
+  virtual bool CreateTemporaryFile(base::FilePath* path) = 0;
+
+  // Returns command line for child GTest process based on the command line
+  // of current process. |test_names| is a vector of test full names
+  // (e.g. "A.B"), |output_file| is path to the GTest XML output file.
+  virtual CommandLine GetCommandLineForChildGTestProcess(
+      const std::vector<std::string>& test_names,
+      const base::FilePath& output_file,
+      const base::FilePath& flag_file) = 0;
+
+  // Returns wrapper to use for child GTest process. Empty string means
+  // no wrapper.
+  virtual std::string GetWrapperForChildGTestProcess() = 0;
+
+  // Relaunch tests, e.g. after a crash.
+  virtual void RelaunchTests(TestLauncher* test_launcher,
+                             const std::vector<std::string>& test_names,
+                             int launch_flags) = 0;
+
+ protected:
+  ~UnitTestPlatformDelegate() = default;
+};
+
+// Runs tests serially, each in its own process.
+void RunUnitTestsSerially(TestLauncher* test_launcher,
+                          UnitTestPlatformDelegate* platform_delegate,
+                          const std::vector<std::string>& test_names,
+                          int launch_flags);
+
+// Runs tests in batches (each batch in its own process).
+void RunUnitTestsBatch(TestLauncher* test_launcher,
+                       UnitTestPlatformDelegate* platform_delegate,
+                       const std::vector<std::string>& test_names,
+                       int launch_flags);
+
+// Test launcher delegate for unit tests (mostly to support batching).
+class UnitTestLauncherDelegate : public TestLauncherDelegate {
+ public:
+  UnitTestLauncherDelegate(UnitTestPlatformDelegate* delegate,
+                           size_t batch_limit,
+                           bool use_job_objects);
+  ~UnitTestLauncherDelegate() override;
+
+ private:
+  // TestLauncherDelegate:
+  bool GetTests(std::vector<TestIdentifier>* output) override;
+  bool ShouldRunTest(const std::string& test_case_name,
+                     const std::string& test_name) override;
+  size_t RunTests(TestLauncher* test_launcher,
+                  const std::vector<std::string>& test_names) override;
+  size_t RetryTests(TestLauncher* test_launcher,
+                    const std::vector<std::string>& test_names) override;
+
+  ThreadChecker thread_checker_;
+
+  UnitTestPlatformDelegate* platform_delegate_;
+
+  // Maximum number of tests to run in a single batch.
+  size_t batch_limit_;
+
+  // Determines whether we use job objects on Windows.
+  bool use_job_objects_;
+
+  DISALLOW_COPY_AND_ASSIGN(UnitTestLauncherDelegate);
+};
+
+}   // namespace base
+
+#endif  // BASE_TEST_LAUNCHER_UNIT_TEST_LAUNCHER_H_
diff --git a/base/test/launcher/unit_test_launcher_ios.cc b/base/test/launcher/unit_test_launcher_ios.cc
new file mode 100644
index 0000000..0bb31f7
--- /dev/null
+++ b/base/test/launcher/unit_test_launcher_ios.cc
@@ -0,0 +1,42 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/test/launcher/unit_test_launcher.h"
+
+#include "base/command_line.h"
+#include "base/files/file_path.h"
+#include "base/files/file_util.h"
+#include "base/logging.h"
+#include "base/mac/foundation_util.h"
+#include "base/test/gtest_util.h"
+#include "base/test/test_switches.h"
+
+namespace base {
+
+int LaunchUnitTests(int argc,
+                    char** argv,
+                    RunTestSuiteCallback run_test_suite) {
+  CHECK(CommandLine::InitializedForCurrentProcess() ||
+        CommandLine::Init(argc, argv));
+  const CommandLine* command_line = CommandLine::ForCurrentProcess();
+  if (command_line->HasSwitch(switches::kTestLauncherListTests)) {
+    FilePath list_path(command_line->GetSwitchValuePath(
+        switches::kTestLauncherListTests));
+    if (WriteCompiledInTestsToFile(list_path)) {
+      return 0;
+    } else {
+      LOG(ERROR) << "Failed to write list of tests.";
+      return 1;
+    }
+  } else if (command_line->HasSwitch(
+                 switches::kTestLauncherPrintWritablePath)) {
+    fprintf(stdout, "%s", mac::GetUserLibraryPath().value().c_str());
+    fflush(stdout);
+    return 0;
+  }
+
+  return std::move(run_test_suite).Run();
+}
+
+}  // namespace base
diff --git a/base/test/launcher/unit_test_launcher_nacl_nonsfi.cc b/base/test/launcher/unit_test_launcher_nacl_nonsfi.cc
new file mode 100644
index 0000000..237a3da
--- /dev/null
+++ b/base/test/launcher/unit_test_launcher_nacl_nonsfi.cc
@@ -0,0 +1,51 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/test/launcher/unit_test_launcher.h"
+
+#include "base/command_line.h"
+#include "base/files/file_util.h"
+#include "base/test/gtest_util.h"
+#include "base/test/gtest_xml_unittest_result_printer.h"
+#include "base/test/test_switches.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+
+int LaunchUnitTests(int argc,
+                    char** argv,
+                    RunTestSuiteCallback run_test_suite) {
+  CHECK(CommandLine::InitializedForCurrentProcess() ||
+        CommandLine::Init(argc, argv));
+  const CommandLine* command_line = CommandLine::ForCurrentProcess();
+  if (command_line->HasSwitch(switches::kTestLauncherListTests)) {
+    // Dump all test list into a file.
+    FilePath list_path(
+        command_line->GetSwitchValuePath(switches::kTestLauncherListTests));
+    if (!WriteCompiledInTestsToFile(list_path)) {
+      LOG(ERROR) << "Failed to write list of tests.";
+      return 1;
+    }
+
+    // Successfully done.
+    return 0;
+  }
+
+  // Register XML output printer, if --test-launcher-output flag is set.
+  if (command_line->HasSwitch(switches::kTestLauncherOutput)) {
+    FilePath output_path = command_line->GetSwitchValuePath(
+        switches::kTestLauncherOutput);
+    if (PathExists(output_path)) {
+      LOG(WARNING) << "Test launcher output path exists. Do not override";
+    } else {
+      XmlUnitTestResultPrinter* printer = new XmlUnitTestResultPrinter;
+      CHECK(printer->Initialize(output_path));
+      testing::UnitTest::GetInstance()->listeners().Append(printer);
+    }
+  }
+
+  return std::move(run_test_suite).Run();
+}
+
+}  // namespace base
diff --git a/base/test/malloc_wrapper.cc b/base/test/malloc_wrapper.cc
new file mode 100644
index 0000000..eb280a3
--- /dev/null
+++ b/base/test/malloc_wrapper.cc
@@ -0,0 +1,11 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "malloc_wrapper.h"
+
+#include <stdlib.h>
+
+void* MallocWrapper(size_t size) {
+  return malloc(size);
+}
diff --git a/base/test/malloc_wrapper.h b/base/test/malloc_wrapper.h
new file mode 100644
index 0000000..d06228d
--- /dev/null
+++ b/base/test/malloc_wrapper.h
@@ -0,0 +1,21 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TEST_MALLOC_WRAPPER_H_
+#define BASE_TEST_MALLOC_WRAPPER_H_
+
+#include <stddef.h>
+
+// BASE_EXPORT depends on COMPONENT_BUILD.
+// This will always be a separate shared library, so don't use BASE_EXPORT here.
+#if defined(WIN32)
+#define MALLOC_WRAPPER_EXPORT __declspec(dllexport)
+#else
+#define MALLOC_WRAPPER_EXPORT __attribute__((visibility("default")))
+#endif  // defined(WIN32)
+
+// Calls malloc directly.
+MALLOC_WRAPPER_EXPORT void* MallocWrapper(size_t size);
+
+#endif  // BASE_TEST_MALLOC_WRAPPER_H_
diff --git a/base/test/mock_callback.h b/base/test/mock_callback.h
new file mode 100644
index 0000000..7ac4d34
--- /dev/null
+++ b/base/test/mock_callback.h
@@ -0,0 +1,366 @@
+// This file was GENERATED by command:
+//     pump.py mock_callback.h.pump
+// DO NOT EDIT BY HAND!!!
+
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Analogous to GMock's built-in MockFunction, but for base::Callback instead of
+// std::function. It takes the full callback type as a parameter, so that it can
+// support both OnceCallback and RepeatingCallback.
+//
+// Use:
+//   using FooCallback = base::Callback<int(std::string)>;
+//
+//   TEST(FooTest, RunsCallbackWithBarArgument) {
+//     base::MockCallback<FooCallback> callback;
+//     EXPECT_CALL(callback, Run("bar")).WillOnce(Return(1));
+//     Foo(callback.Get());
+//   }
+//
+// Can be used with StrictMock and NiceMock. Caller must ensure that it outlives
+// any base::Callback obtained from it.
+
+#ifndef BASE_TEST_MOCK_CALLBACK_H_
+#define BASE_TEST_MOCK_CALLBACK_H_
+
+#include "base/bind.h"
+#include "base/callback.h"
+#include "base/macros.h"
+#include "testing/gmock/include/gmock/gmock.h"
+
+namespace base {
+
+// clang-format off
+
+template <typename F>
+class MockCallback;
+
+template <typename R>
+class MockCallback<Callback<R()>> {
+ public:
+  MockCallback() = default;
+  MOCK_METHOD0_T(Run, R());
+
+  Callback<R()> Get() {
+    return Bind(&MockCallback::Run, Unretained(this));
+  }
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(MockCallback);
+};
+
+template <typename R>
+class MockCallback<OnceCallback<R()>> {
+ public:
+  MockCallback() = default;
+  MOCK_METHOD0_T(Run, R());
+
+  OnceCallback<R()> Get() {
+    return BindOnce(&MockCallback::Run, Unretained(this));
+  }
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(MockCallback);
+};
+
+template <typename R, typename A1>
+class MockCallback<Callback<R(A1)>> {
+ public:
+  MockCallback() = default;
+  MOCK_METHOD1_T(Run, R(A1));
+
+  Callback<R(A1)> Get() {
+    return Bind(&MockCallback::Run, Unretained(this));
+  }
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(MockCallback);
+};
+
+template <typename R, typename A1>
+class MockCallback<OnceCallback<R(A1)>> {
+ public:
+  MockCallback() = default;
+  MOCK_METHOD1_T(Run, R(A1));
+
+  OnceCallback<R(A1)> Get() {
+    return BindOnce(&MockCallback::Run, Unretained(this));
+  }
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(MockCallback);
+};
+
+template <typename R, typename A1, typename A2>
+class MockCallback<Callback<R(A1, A2)>> {
+ public:
+  MockCallback() = default;
+  MOCK_METHOD2_T(Run, R(A1, A2));
+
+  Callback<R(A1, A2)> Get() {
+    return Bind(&MockCallback::Run, Unretained(this));
+  }
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(MockCallback);
+};
+
+template <typename R, typename A1, typename A2>
+class MockCallback<OnceCallback<R(A1, A2)>> {
+ public:
+  MockCallback() = default;
+  MOCK_METHOD2_T(Run, R(A1, A2));
+
+  OnceCallback<R(A1, A2)> Get() {
+    return BindOnce(&MockCallback::Run, Unretained(this));
+  }
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(MockCallback);
+};
+
+template <typename R, typename A1, typename A2, typename A3>
+class MockCallback<Callback<R(A1, A2, A3)>> {
+ public:
+  MockCallback() = default;
+  MOCK_METHOD3_T(Run, R(A1, A2, A3));
+
+  Callback<R(A1, A2, A3)> Get() {
+    return Bind(&MockCallback::Run, Unretained(this));
+  }
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(MockCallback);
+};
+
+template <typename R, typename A1, typename A2, typename A3>
+class MockCallback<OnceCallback<R(A1, A2, A3)>> {
+ public:
+  MockCallback() = default;
+  MOCK_METHOD3_T(Run, R(A1, A2, A3));
+
+  OnceCallback<R(A1, A2, A3)> Get() {
+    return BindOnce(&MockCallback::Run, Unretained(this));
+  }
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(MockCallback);
+};
+
+template <typename R, typename A1, typename A2, typename A3, typename A4>
+class MockCallback<Callback<R(A1, A2, A3, A4)>> {
+ public:
+  MockCallback() = default;
+  MOCK_METHOD4_T(Run, R(A1, A2, A3, A4));
+
+  Callback<R(A1, A2, A3, A4)> Get() {
+    return Bind(&MockCallback::Run, Unretained(this));
+  }
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(MockCallback);
+};
+
+template <typename R, typename A1, typename A2, typename A3, typename A4>
+class MockCallback<OnceCallback<R(A1, A2, A3, A4)>> {
+ public:
+  MockCallback() = default;
+  MOCK_METHOD4_T(Run, R(A1, A2, A3, A4));
+
+  OnceCallback<R(A1, A2, A3, A4)> Get() {
+    return BindOnce(&MockCallback::Run, Unretained(this));
+  }
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(MockCallback);
+};
+
+template <typename R, typename A1, typename A2, typename A3, typename A4,
+    typename A5>
+class MockCallback<Callback<R(A1, A2, A3, A4, A5)>> {
+ public:
+  MockCallback() = default;
+  MOCK_METHOD5_T(Run, R(A1, A2, A3, A4, A5));
+
+  Callback<R(A1, A2, A3, A4, A5)> Get() {
+    return Bind(&MockCallback::Run, Unretained(this));
+  }
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(MockCallback);
+};
+
+template <typename R, typename A1, typename A2, typename A3, typename A4,
+    typename A5>
+class MockCallback<OnceCallback<R(A1, A2, A3, A4, A5)>> {
+ public:
+  MockCallback() = default;
+  MOCK_METHOD5_T(Run, R(A1, A2, A3, A4, A5));
+
+  OnceCallback<R(A1, A2, A3, A4, A5)> Get() {
+    return BindOnce(&MockCallback::Run, Unretained(this));
+  }
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(MockCallback);
+};
+
+template <typename R, typename A1, typename A2, typename A3, typename A4,
+    typename A5, typename A6>
+class MockCallback<Callback<R(A1, A2, A3, A4, A5, A6)>> {
+ public:
+  MockCallback() = default;
+  MOCK_METHOD6_T(Run, R(A1, A2, A3, A4, A5, A6));
+
+  Callback<R(A1, A2, A3, A4, A5, A6)> Get() {
+    return Bind(&MockCallback::Run, Unretained(this));
+  }
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(MockCallback);
+};
+
+template <typename R, typename A1, typename A2, typename A3, typename A4,
+    typename A5, typename A6>
+class MockCallback<OnceCallback<R(A1, A2, A3, A4, A5, A6)>> {
+ public:
+  MockCallback() = default;
+  MOCK_METHOD6_T(Run, R(A1, A2, A3, A4, A5, A6));
+
+  OnceCallback<R(A1, A2, A3, A4, A5, A6)> Get() {
+    return BindOnce(&MockCallback::Run, Unretained(this));
+  }
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(MockCallback);
+};
+
+template <typename R, typename A1, typename A2, typename A3, typename A4,
+    typename A5, typename A6, typename A7>
+class MockCallback<Callback<R(A1, A2, A3, A4, A5, A6, A7)>> {
+ public:
+  MockCallback() = default;
+  MOCK_METHOD7_T(Run, R(A1, A2, A3, A4, A5, A6, A7));
+
+  Callback<R(A1, A2, A3, A4, A5, A6, A7)> Get() {
+    return Bind(&MockCallback::Run, Unretained(this));
+  }
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(MockCallback);
+};
+
+template <typename R, typename A1, typename A2, typename A3, typename A4,
+    typename A5, typename A6, typename A7>
+class MockCallback<OnceCallback<R(A1, A2, A3, A4, A5, A6, A7)>> {
+ public:
+  MockCallback() = default;
+  MOCK_METHOD7_T(Run, R(A1, A2, A3, A4, A5, A6, A7));
+
+  OnceCallback<R(A1, A2, A3, A4, A5, A6, A7)> Get() {
+    return BindOnce(&MockCallback::Run, Unretained(this));
+  }
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(MockCallback);
+};
+
+template <typename R, typename A1, typename A2, typename A3, typename A4,
+    typename A5, typename A6, typename A7, typename A8>
+class MockCallback<Callback<R(A1, A2, A3, A4, A5, A6, A7, A8)>> {
+ public:
+  MockCallback() = default;
+  MOCK_METHOD8_T(Run, R(A1, A2, A3, A4, A5, A6, A7, A8));
+
+  Callback<R(A1, A2, A3, A4, A5, A6, A7, A8)> Get() {
+    return Bind(&MockCallback::Run, Unretained(this));
+  }
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(MockCallback);
+};
+
+template <typename R, typename A1, typename A2, typename A3, typename A4,
+    typename A5, typename A6, typename A7, typename A8>
+class MockCallback<OnceCallback<R(A1, A2, A3, A4, A5, A6, A7, A8)>> {
+ public:
+  MockCallback() = default;
+  MOCK_METHOD8_T(Run, R(A1, A2, A3, A4, A5, A6, A7, A8));
+
+  OnceCallback<R(A1, A2, A3, A4, A5, A6, A7, A8)> Get() {
+    return BindOnce(&MockCallback::Run, Unretained(this));
+  }
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(MockCallback);
+};
+
+template <typename R, typename A1, typename A2, typename A3, typename A4,
+    typename A5, typename A6, typename A7, typename A8, typename A9>
+class MockCallback<Callback<R(A1, A2, A3, A4, A5, A6, A7, A8, A9)>> {
+ public:
+  MockCallback() = default;
+  MOCK_METHOD9_T(Run, R(A1, A2, A3, A4, A5, A6, A7, A8, A9));
+
+  Callback<R(A1, A2, A3, A4, A5, A6, A7, A8, A9)> Get() {
+    return Bind(&MockCallback::Run, Unretained(this));
+  }
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(MockCallback);
+};
+
+template <typename R, typename A1, typename A2, typename A3, typename A4,
+    typename A5, typename A6, typename A7, typename A8, typename A9>
+class MockCallback<OnceCallback<R(A1, A2, A3, A4, A5, A6, A7, A8, A9)>> {
+ public:
+  MockCallback() = default;
+  MOCK_METHOD9_T(Run, R(A1, A2, A3, A4, A5, A6, A7, A8, A9));
+
+  OnceCallback<R(A1, A2, A3, A4, A5, A6, A7, A8, A9)> Get() {
+    return BindOnce(&MockCallback::Run, Unretained(this));
+  }
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(MockCallback);
+};
+
+template <typename R, typename A1, typename A2, typename A3, typename A4,
+    typename A5, typename A6, typename A7, typename A8, typename A9,
+    typename A10>
+class MockCallback<Callback<R(A1, A2, A3, A4, A5, A6, A7, A8, A9, A10)>> {
+ public:
+  MockCallback() = default;
+  MOCK_METHOD10_T(Run, R(A1, A2, A3, A4, A5, A6, A7, A8, A9, A10));
+
+  Callback<R(A1, A2, A3, A4, A5, A6, A7, A8, A9, A10)> Get() {
+    return Bind(&MockCallback::Run, Unretained(this));
+  }
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(MockCallback);
+};
+
+template <typename R, typename A1, typename A2, typename A3, typename A4,
+    typename A5, typename A6, typename A7, typename A8, typename A9,
+    typename A10>
+class MockCallback<OnceCallback<R(A1, A2, A3, A4, A5, A6, A7, A8, A9, A10)>> {
+ public:
+  MockCallback() = default;
+  MOCK_METHOD10_T(Run, R(A1, A2, A3, A4, A5, A6, A7, A8, A9, A10));
+
+  OnceCallback<R(A1, A2, A3, A4, A5, A6, A7, A8, A9, A10)> Get() {
+    return BindOnce(&MockCallback::Run, Unretained(this));
+  }
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(MockCallback);
+};
+
+// clang-format on
+
+}  // namespace base
+
+#endif  // BASE_TEST_MOCK_CALLBACK_H_
diff --git a/base/test/mock_callback.h.pump b/base/test/mock_callback.h.pump
new file mode 100644
index 0000000..3372789
--- /dev/null
+++ b/base/test/mock_callback.h.pump
@@ -0,0 +1,85 @@
+$$ This is a pump file for generating file templates.  Pump is a python
+$$ script that is part of the Google Test suite of utilities.  Description
+$$ can be found here:
+$$
+$$ https://github.com/google/googletest/blob/master/googletest/docs/PumpManual.md
+$$
+$$ MAX_ARITY controls the number of arguments that MockCallback supports.
+$$ It is choosen to match the number GMock supports.
+$var MAX_ARITY = 10
+$$
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Analogous to GMock's built-in MockFunction, but for base::Callback instead of
+// std::function. It takes the full callback type as a parameter, so that it can
+// support both OnceCallback and RepeatingCallback.
+//
+// Use:
+//   using FooCallback = base::Callback<int(std::string)>;
+//
+//   TEST(FooTest, RunsCallbackWithBarArgument) {
+//     base::MockCallback<FooCallback> callback;
+//     EXPECT_CALL(callback, Run("bar")).WillOnce(Return(1));
+//     Foo(callback.Get());
+//   }
+//
+// Can be used with StrictMock and NiceMock. Caller must ensure that it outlives
+// any base::Callback obtained from it.
+
+#ifndef BASE_TEST_MOCK_CALLBACK_H_
+#define BASE_TEST_MOCK_CALLBACK_H_
+
+#include "base/bind.h"
+#include "base/callback.h"
+#include "base/macros.h"
+#include "testing/gmock/include/gmock/gmock.h"
+
+namespace base {
+
+// clang-format off
+
+template <typename F>
+class MockCallback;
+
+$range i 0..MAX_ARITY
+$for i [[
+$range j 1..i
+$var run_type = [[R($for j, [[A$j]])]]
+
+template <typename R$for j [[, typename A$j]]>
+class MockCallback<Callback<$run_type>> {
+ public:
+  MockCallback() = default;
+  MOCK_METHOD$(i)_T(Run, $run_type);
+
+  Callback<$run_type> Get() {
+    return Bind(&MockCallback::Run, Unretained(this));
+  }
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(MockCallback);
+};
+
+template <typename R$for j [[, typename A$j]]>
+class MockCallback<OnceCallback<$run_type>> {
+ public:
+  MockCallback() = default;
+  MOCK_METHOD$(i)_T(Run, $run_type);
+
+  OnceCallback<$run_type> Get() {
+    return BindOnce(&MockCallback::Run, Unretained(this));
+  }
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(MockCallback);
+};
+
+]]
+
+// clang-format on
+
+}  // namespace base
+
+#endif  // BASE_TEST_MOCK_CALLBACK_H_
diff --git a/base/test/mock_callback_unittest.cc b/base/test/mock_callback_unittest.cc
new file mode 100644
index 0000000..c5f109f
--- /dev/null
+++ b/base/test/mock_callback_unittest.cc
@@ -0,0 +1,59 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/test/mock_callback.h"
+
+#include "base/callback.h"
+#include "testing/gmock/include/gmock/gmock.h"
+
+using testing::InSequence;
+using testing::Return;
+
+namespace base {
+namespace {
+
+TEST(MockCallbackTest, ZeroArgs) {
+  MockCallback<Closure> mock_closure;
+  EXPECT_CALL(mock_closure, Run());
+  mock_closure.Get().Run();
+
+  MockCallback<Callback<int()>> mock_int_callback;
+  {
+    InSequence sequence;
+    EXPECT_CALL(mock_int_callback, Run()).WillOnce(Return(42));
+    EXPECT_CALL(mock_int_callback, Run()).WillOnce(Return(88));
+  }
+  EXPECT_EQ(42, mock_int_callback.Get().Run());
+  EXPECT_EQ(88, mock_int_callback.Get().Run());
+}
+
+TEST(MockCallbackTest, WithArgs) {
+  MockCallback<Callback<int(int, int)>> mock_two_int_callback;
+  EXPECT_CALL(mock_two_int_callback, Run(1, 2)).WillOnce(Return(42));
+  EXPECT_CALL(mock_two_int_callback, Run(0, 0)).WillRepeatedly(Return(-1));
+  Callback<int(int, int)> two_int_callback = mock_two_int_callback.Get();
+  EXPECT_EQ(-1, two_int_callback.Run(0, 0));
+  EXPECT_EQ(42, two_int_callback.Run(1, 2));
+  EXPECT_EQ(-1, two_int_callback.Run(0, 0));
+}
+
+TEST(MockCallbackTest, ZeroArgsOnce) {
+  MockCallback<OnceClosure> mock_closure;
+  EXPECT_CALL(mock_closure, Run());
+  mock_closure.Get().Run();
+
+  MockCallback<OnceCallback<int()>> mock_int_callback;
+  EXPECT_CALL(mock_int_callback, Run()).WillOnce(Return(88));
+  EXPECT_EQ(88, mock_int_callback.Get().Run());
+}
+
+TEST(MockCallbackTest, WithArgsOnce) {
+  MockCallback<OnceCallback<int(int, int)>> mock_two_int_callback;
+  EXPECT_CALL(mock_two_int_callback, Run(1, 2)).WillOnce(Return(42));
+  OnceCallback<int(int, int)> two_int_callback = mock_two_int_callback.Get();
+  EXPECT_EQ(42, std::move(two_int_callback).Run(1, 2));
+}
+
+}  // namespace
+}  // namespace base
diff --git a/base/test/mock_chrome_application_mac.h b/base/test/mock_chrome_application_mac.h
new file mode 100644
index 0000000..ffa3080
--- /dev/null
+++ b/base/test/mock_chrome_application_mac.h
@@ -0,0 +1,33 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TEST_MOCK_CHROME_APPLICATION_MAC_H_
+#define BASE_TEST_MOCK_CHROME_APPLICATION_MAC_H_
+
+#if defined(__OBJC__)
+
+#import <AppKit/AppKit.h>
+
+#include "base/mac/scoped_sending_event.h"
+#include "base/message_loop/message_pump_mac.h"
+
+// A basic implementation of CrAppProtocol and
+// CrAppControlProtocol. This can be used in tests that need an
+// NSApplication and use a runloop, or which need a ScopedSendingEvent
+// when handling a nested event loop.
+@interface MockCrApp : NSApplication<CrAppProtocol,
+                                     CrAppControlProtocol> {
+ @private
+  BOOL handlingSendEvent_;
+}
+@end
+
+#endif
+
+// To be used to instantiate MockCrApp from C++ code.
+namespace mock_cr_app {
+void RegisterMockCrApp();
+}  // namespace mock_cr_app
+
+#endif  // BASE_TEST_MOCK_CHROME_APPLICATION_MAC_H_
diff --git a/base/test/mock_chrome_application_mac.mm b/base/test/mock_chrome_application_mac.mm
new file mode 100644
index 0000000..0890553
--- /dev/null
+++ b/base/test/mock_chrome_application_mac.mm
@@ -0,0 +1,44 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/test/mock_chrome_application_mac.h"
+
+#include "base/auto_reset.h"
+#include "base/logging.h"
+
+@implementation MockCrApp
+
++ (NSApplication*)sharedApplication {
+  NSApplication* app = [super sharedApplication];
+  DCHECK([app conformsToProtocol:@protocol(CrAppControlProtocol)])
+      << "Existing NSApp (class " << [[app className] UTF8String]
+      << ") does not conform to required protocol.";
+  DCHECK(base::MessagePumpMac::UsingCrApp())
+      << "MessagePumpMac::Create() was called before "
+      << "+[MockCrApp sharedApplication]";
+  return app;
+}
+
+- (void)sendEvent:(NSEvent*)event {
+  base::AutoReset<BOOL> scoper(&handlingSendEvent_, YES);
+  [super sendEvent:event];
+}
+
+- (void)setHandlingSendEvent:(BOOL)handlingSendEvent {
+  handlingSendEvent_ = handlingSendEvent;
+}
+
+- (BOOL)isHandlingSendEvent {
+  return handlingSendEvent_;
+}
+
+@end
+
+namespace mock_cr_app {
+
+void RegisterMockCrApp() {
+  [MockCrApp sharedApplication];
+}
+
+}  // namespace mock_cr_app
diff --git a/base/test/mock_devices_changed_observer.cc b/base/test/mock_devices_changed_observer.cc
new file mode 100644
index 0000000..9fc57cd
--- /dev/null
+++ b/base/test/mock_devices_changed_observer.cc
@@ -0,0 +1,13 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/test/mock_devices_changed_observer.h"
+
+namespace base {
+
+MockDevicesChangedObserver::MockDevicesChangedObserver() = default;
+
+MockDevicesChangedObserver::~MockDevicesChangedObserver() = default;
+
+}  // namespace base
diff --git a/base/test/mock_devices_changed_observer.h b/base/test/mock_devices_changed_observer.h
new file mode 100644
index 0000000..0734fb4
--- /dev/null
+++ b/base/test/mock_devices_changed_observer.h
@@ -0,0 +1,31 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TEST_MOCK_DEVICES_CHANGED_OBSERVER_H_
+#define BASE_TEST_MOCK_DEVICES_CHANGED_OBSERVER_H_
+
+#include <string>
+
+#include "base/macros.h"
+#include "base/system_monitor/system_monitor.h"
+#include "testing/gmock/include/gmock/gmock.h"
+
+namespace base {
+
+class MockDevicesChangedObserver
+    : public base::SystemMonitor::DevicesChangedObserver {
+ public:
+  MockDevicesChangedObserver();
+  ~MockDevicesChangedObserver() override;
+
+  MOCK_METHOD1(OnDevicesChanged,
+               void(base::SystemMonitor::DeviceType device_type));
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(MockDevicesChangedObserver);
+};
+
+}  // namespace base
+
+#endif  // BASE_TEST_MOCK_DEVICES_CHANGED_OBSERVER_H_
diff --git a/base/test/mock_entropy_provider.cc b/base/test/mock_entropy_provider.cc
new file mode 100644
index 0000000..f3fd2a4
--- /dev/null
+++ b/base/test/mock_entropy_provider.cc
@@ -0,0 +1,20 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/test/mock_entropy_provider.h"
+
+namespace base {
+
+MockEntropyProvider::MockEntropyProvider() : entropy_value_(0.5) {}
+MockEntropyProvider::MockEntropyProvider(double entropy_value)
+    : entropy_value_(entropy_value) {}
+MockEntropyProvider::~MockEntropyProvider() = default;
+
+double MockEntropyProvider::GetEntropyForTrial(
+    const std::string& trial_name,
+    uint32_t randomization_seed) const {
+  return entropy_value_;
+}
+
+}  // namespace base
diff --git a/base/test/mock_entropy_provider.h b/base/test/mock_entropy_provider.h
new file mode 100644
index 0000000..ca2b4bc
--- /dev/null
+++ b/base/test/mock_entropy_provider.h
@@ -0,0 +1,32 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TEST_MOCK_ENTROPY_PROVIDER_H_
+#define BASE_TEST_MOCK_ENTROPY_PROVIDER_H_
+
+#include <stdint.h>
+
+#include "base/metrics/field_trial.h"
+
+namespace base {
+
+class MockEntropyProvider : public base::FieldTrial::EntropyProvider {
+ public:
+  MockEntropyProvider();
+  explicit MockEntropyProvider(double entropy_value);
+  ~MockEntropyProvider() override;
+
+  // base::FieldTrial::EntropyProvider:
+  double GetEntropyForTrial(const std::string& trial_name,
+                            uint32_t randomization_seed) const override;
+
+ private:
+  double entropy_value_;
+
+  DISALLOW_COPY_AND_ASSIGN(MockEntropyProvider);
+};
+
+}  // namespace base
+
+#endif  // BASE_TEST_MOCK_ENTROPY_PROVIDER_H_
diff --git a/base/test/mock_log.cc b/base/test/mock_log.cc
new file mode 100644
index 0000000..a09000d
--- /dev/null
+++ b/base/test/mock_log.cc
@@ -0,0 +1,68 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/test/mock_log.h"
+
+namespace base {
+namespace test {
+
+// static
+MockLog* MockLog::g_instance_ = nullptr;
+Lock MockLog::g_lock;
+
+MockLog::MockLog() : is_capturing_logs_(false) {
+}
+
+MockLog::~MockLog() {
+  if (is_capturing_logs_) {
+    StopCapturingLogs();
+  }
+}
+
+void MockLog::StartCapturingLogs() {
+  AutoLock scoped_lock(g_lock);
+
+  // We don't use CHECK(), which can generate a new LOG message, and
+  // thus can confuse MockLog objects or other registered
+  // LogSinks.
+  RAW_CHECK(!is_capturing_logs_);
+  RAW_CHECK(!g_instance_);
+
+  is_capturing_logs_ = true;
+  g_instance_ = this;
+  previous_handler_ = logging::GetLogMessageHandler();
+  logging::SetLogMessageHandler(LogMessageHandler);
+}
+
+void MockLog::StopCapturingLogs() {
+  AutoLock scoped_lock(g_lock);
+
+  // We don't use CHECK(), which can generate a new LOG message, and
+  // thus can confuse MockLog objects or other registered
+  // LogSinks.
+  RAW_CHECK(is_capturing_logs_);
+  RAW_CHECK(g_instance_ == this);
+
+  is_capturing_logs_ = false;
+  logging::SetLogMessageHandler(previous_handler_);
+  g_instance_ = nullptr;
+}
+
+// static
+bool MockLog::LogMessageHandler(int severity,
+                                const char* file,
+                                int line,
+                                size_t message_start,
+                                const std::string& str) {
+  // gMock guarantees thread-safety for calling a mocked method
+  // (https://github.com/google/googlemock/blob/master/googlemock/docs/CookBook.md#using-google-mock-and-threads)
+  // but we also need to make sure that Start/StopCapturingLogs are synchronized
+  // with LogMessageHandler.
+  AutoLock scoped_lock(g_lock);
+
+  return g_instance_->Log(severity, file, line, message_start, str);
+}
+
+}  // namespace test
+}  // namespace base
diff --git a/base/test/mock_log.h b/base/test/mock_log.h
new file mode 100644
index 0000000..cda2fcd
--- /dev/null
+++ b/base/test/mock_log.h
@@ -0,0 +1,100 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TEST_MOCK_LOG_H_
+#define BASE_TEST_MOCK_LOG_H_
+
+#include <stddef.h>
+
+#include <string>
+
+#include "base/logging.h"
+#include "base/macros.h"
+#include "base/synchronization/lock.h"
+#include "testing/gmock/include/gmock/gmock.h"
+
+namespace base {
+namespace test {
+
+// A MockLog object intercepts LOG() messages issued during its lifespan.  Using
+// this together with gMock, it's very easy to test how a piece of code calls
+// LOG().  The typical usage:
+//
+//   TEST(FooTest, LogsCorrectly) {
+//     MockLog log;
+//
+//     // We expect the WARNING "Something bad!" exactly twice.
+//     EXPECT_CALL(log, Log(WARNING, _, "Something bad!"))
+//         .Times(2);
+//
+//     // We allow foo.cc to call LOG(INFO) any number of times.
+//     EXPECT_CALL(log, Log(INFO, HasSubstr("/foo.cc"), _))
+//         .Times(AnyNumber());
+//
+//     log.StartCapturingLogs();  // Call this after done setting expectations.
+//     Foo();  // Exercises the code under test.
+//   }
+//
+// CAVEAT: base/logging does not allow a thread to call LOG() again when it's
+// already inside a LOG() call.  Doing so will cause a deadlock.  Therefore,
+// it's the user's responsibility to not call LOG() in an action triggered by
+// MockLog::Log().  You may call RAW_LOG() instead.
+class MockLog {
+ public:
+  // Creates a MockLog object that is not capturing logs.  If it were to start
+  // to capture logs, it could be a problem if some other threads already exist
+  // and are logging, as the user hasn't had a chance to set up expectation on
+  // this object yet (calling a mock method before setting the expectation is
+  // UNDEFINED behavior).
+  MockLog();
+
+  // When the object is destructed, it stops intercepting logs.
+  ~MockLog();
+
+  // Starts log capturing if the object isn't already doing so.
+  // Otherwise crashes.
+  void StartCapturingLogs();
+
+  // Stops log capturing if the object is capturing logs.  Otherwise crashes.
+  void StopCapturingLogs();
+
+  // Log method is invoked for every log message before it's sent to other log
+  // destinations (if any).  The method should return true to signal that it
+  // handled the message and the message should not be sent to other log
+  // destinations.
+  MOCK_METHOD5(Log,
+               bool(int severity,
+                    const char* file,
+                    int line,
+                    size_t message_start,
+                    const std::string& str));
+
+ private:
+  // The currently active mock log.
+  static MockLog* g_instance_;
+
+  // Lock protecting access to g_instance_.
+  static Lock g_lock;
+
+  // Static function which is set as the logging message handler.
+  // Called once for each message.
+  static bool LogMessageHandler(int severity,
+                                const char* file,
+                                int line,
+                                size_t message_start,
+                                const std::string& str);
+
+  // True if this object is currently capturing logs.
+  bool is_capturing_logs_;
+
+  // The previous handler to restore when the MockLog is destroyed.
+  logging::LogMessageHandlerFunction previous_handler_;
+
+  DISALLOW_COPY_AND_ASSIGN(MockLog);
+};
+
+}  // namespace test
+}  // namespace base
+
+#endif  // BASE_TEST_MOCK_LOG_H_
diff --git a/base/test/move_only_int.h b/base/test/move_only_int.h
new file mode 100644
index 0000000..6e90983
--- /dev/null
+++ b/base/test/move_only_int.h
@@ -0,0 +1,68 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TEST_MOVE_ONLY_INT_H_
+#define BASE_TEST_MOVE_ONLY_INT_H_
+
+#include "base/macros.h"
+
+namespace base {
+
+// A move-only class that holds an integer. This is designed for testing
+// containers. See also CopyOnlyInt.
+class MoveOnlyInt {
+ public:
+  explicit MoveOnlyInt(int data = 1) : data_(data) {}
+  MoveOnlyInt(MoveOnlyInt&& other) : data_(other.data_) { other.data_ = 0; }
+  ~MoveOnlyInt() { data_ = 0; }
+
+  MoveOnlyInt& operator=(MoveOnlyInt&& other) {
+    data_ = other.data_;
+    other.data_ = 0;
+    return *this;
+  }
+
+  friend bool operator==(const MoveOnlyInt& lhs, const MoveOnlyInt& rhs) {
+    return lhs.data_ == rhs.data_;
+  }
+
+  friend bool operator!=(const MoveOnlyInt& lhs, const MoveOnlyInt& rhs) {
+    return !operator==(lhs, rhs);
+  }
+
+  friend bool operator<(const MoveOnlyInt& lhs, int rhs) {
+    return lhs.data_ < rhs;
+  }
+
+  friend bool operator<(int lhs, const MoveOnlyInt& rhs) {
+    return lhs < rhs.data_;
+  }
+
+  friend bool operator<(const MoveOnlyInt& lhs, const MoveOnlyInt& rhs) {
+    return lhs.data_ < rhs.data_;
+  }
+
+  friend bool operator>(const MoveOnlyInt& lhs, const MoveOnlyInt& rhs) {
+    return rhs < lhs;
+  }
+
+  friend bool operator<=(const MoveOnlyInt& lhs, const MoveOnlyInt& rhs) {
+    return !(rhs < lhs);
+  }
+
+  friend bool operator>=(const MoveOnlyInt& lhs, const MoveOnlyInt& rhs) {
+    return !(lhs < rhs);
+  }
+
+  int data() const { return data_; }
+
+ private:
+  volatile int data_;
+
+  DISALLOW_COPY_AND_ASSIGN(MoveOnlyInt);
+};
+
+}  // namespace base
+
+#endif  // BASE_TEST_MOVE_ONLY_INT_H_
diff --git a/base/test/multiprocess_test.cc b/base/test/multiprocess_test.cc
new file mode 100644
index 0000000..46556f7
--- /dev/null
+++ b/base/test/multiprocess_test.cc
@@ -0,0 +1,74 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/test/multiprocess_test.h"
+
+#include "base/base_switches.h"
+#include "base/command_line.h"
+#include "base/files/file_path.h"
+#include "base/files/file_util.h"
+#include "base/threading/thread_restrictions.h"
+#include "build/build_config.h"
+
+namespace base {
+
+#if !defined(OS_ANDROID)
+Process SpawnMultiProcessTestChild(const std::string& procname,
+                                   const CommandLine& base_command_line,
+                                   const LaunchOptions& options) {
+  CommandLine command_line(base_command_line);
+  // TODO(viettrungluu): See comment above |MakeCmdLine()| in the header file.
+  // This is a temporary hack, since |MakeCmdLine()| has to provide a full
+  // command line.
+  if (!command_line.HasSwitch(switches::kTestChildProcess))
+    command_line.AppendSwitchASCII(switches::kTestChildProcess, procname);
+
+  return LaunchProcess(command_line, options);
+}
+
+bool WaitForMultiprocessTestChildExit(const Process& process,
+                                      TimeDelta timeout,
+                                      int* exit_code) {
+  return process.WaitForExitWithTimeout(timeout, exit_code);
+}
+
+bool TerminateMultiProcessTestChild(const Process& process,
+                                    int exit_code,
+                                    bool wait) {
+  return process.Terminate(exit_code, wait);
+}
+
+#endif  // !defined(OS_ANDROID)
+
+CommandLine GetMultiProcessTestChildBaseCommandLine() {
+  base::ScopedAllowBlockingForTesting allow_blocking;
+  CommandLine cmd_line = *CommandLine::ForCurrentProcess();
+  cmd_line.SetProgram(MakeAbsoluteFilePath(cmd_line.GetProgram()));
+  return cmd_line;
+}
+
+// MultiProcessTest ------------------------------------------------------------
+
+MultiProcessTest::MultiProcessTest() = default;
+
+Process MultiProcessTest::SpawnChild(const std::string& procname) {
+  LaunchOptions options;
+#if defined(OS_WIN)
+  options.start_hidden = true;
+#endif
+  return SpawnChildWithOptions(procname, options);
+}
+
+Process MultiProcessTest::SpawnChildWithOptions(const std::string& procname,
+                                                const LaunchOptions& options) {
+  return SpawnMultiProcessTestChild(procname, MakeCmdLine(procname), options);
+}
+
+CommandLine MultiProcessTest::MakeCmdLine(const std::string& procname) {
+  CommandLine command_line = GetMultiProcessTestChildBaseCommandLine();
+  command_line.AppendSwitchASCII(switches::kTestChildProcess, procname);
+  return command_line;
+}
+
+}  // namespace base
diff --git a/base/test/multiprocess_test.h b/base/test/multiprocess_test.h
new file mode 100644
index 0000000..7c00d37
--- /dev/null
+++ b/base/test/multiprocess_test.h
@@ -0,0 +1,146 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TEST_MULTIPROCESS_TEST_H_
+#define BASE_TEST_MULTIPROCESS_TEST_H_
+
+#include <string>
+
+#include "base/macros.h"
+#include "base/process/launch.h"
+#include "base/process/process.h"
+#include "build/build_config.h"
+#include "testing/platform_test.h"
+
+namespace base {
+
+class CommandLine;
+
+// Helpers to spawn a child for a multiprocess test and execute a designated
+// function. Use these when you already have another base class for your test
+// fixture, but you want (some) of your tests to be multiprocess (otherwise you
+// may just want to derive your fixture from |MultiProcessTest|, below).
+//
+// Use these helpers as follows:
+//
+//   TEST_F(MyTest, ATest) {
+//     CommandLine command_line(
+//         base::GetMultiProcessTestChildBaseCommandLine());
+//     // Maybe add our own switches to |command_line|....
+//
+//     LaunchOptions options;
+//     // Maybe set some options (e.g., |start_hidden| on Windows)....
+//
+//     // Start a child process and run |a_test_func|.
+//     base::Process test_child_process =
+//         base::SpawnMultiProcessTestChild("a_test_func", command_line,
+//                                          options);
+//
+//     // Do stuff involving |test_child_process| and the child process....
+//
+//     int rv = -1;
+//     ASSERT_TRUE(base::WaitForMultiprocessTestChildExit(test_child_process,
+//         TestTimeouts::action_timeout(), &rv));
+//     EXPECT_EQ(0, rv);
+//   }
+//
+//   // Note: |MULTIPROCESS_TEST_MAIN()| is defined in
+//   // testing/multi_process_function_list.h.
+//   MULTIPROCESS_TEST_MAIN(a_test_func) {
+//     // Code here runs in a child process....
+//     return 0;
+//   }
+//
+// If you need to terminate the child process, use the
+// TerminateMultiProcessTestChild method to ensure that test will work on
+// Android.
+
+// Spawns a child process and executes the function |procname| declared using
+// |MULTIPROCESS_TEST_MAIN()| or |MULTIPROCESS_TEST_MAIN_WITH_SETUP()|.
+// |command_line| should be as provided by
+// |GetMultiProcessTestChildBaseCommandLine()| (below), possibly with arguments
+// added. Note: On Windows, you probably want to set |options.start_hidden|.
+Process SpawnMultiProcessTestChild(const std::string& procname,
+                                   const CommandLine& command_line,
+                                   const LaunchOptions& options);
+
+// Gets the base command line for |SpawnMultiProcessTestChild()|. To this, you
+// may add any flags needed for your child process.
+CommandLine GetMultiProcessTestChildBaseCommandLine();
+
+// Waits for the child process to exit. Returns true if the process exited
+// within |timeout| and sets |exit_code| if non null.
+bool WaitForMultiprocessTestChildExit(const Process& process,
+                                      TimeDelta timeout,
+                                      int* exit_code);
+
+// Terminates |process| with |exit_code|. If |wait| is true, this call blocks
+// until the process actually terminates.
+bool TerminateMultiProcessTestChild(const Process& process,
+                                    int exit_code,
+                                    bool wait);
+
+// MultiProcessTest ------------------------------------------------------------
+
+// A MultiProcessTest is a test class which makes it easier to
+// write a test which requires code running out of process.
+//
+// To create a multiprocess test simply follow these steps:
+//
+// 1) Derive your test from MultiProcessTest. Example:
+//
+//    class MyTest : public MultiProcessTest {
+//    };
+//
+//    TEST_F(MyTest, TestCaseName) {
+//      ...
+//    }
+//
+// 2) Create a mainline function for the child processes and include
+//    testing/multiprocess_func_list.h.
+//    See the declaration of the MULTIPROCESS_TEST_MAIN macro
+//    in that file for an example.
+// 3) Call SpawnChild("foo"), where "foo" is the name of
+//    the function you wish to run in the child processes.
+// That's it!
+class MultiProcessTest : public PlatformTest {
+ public:
+  MultiProcessTest();
+
+ protected:
+  // Run a child process.
+  // 'procname' is the name of a function which the child will
+  // execute.  It must be exported from this library in order to
+  // run.
+  //
+  // Example signature:
+  //    extern "C" int __declspec(dllexport) FooBar() {
+  //         // do client work here
+  //    }
+  //
+  // Returns the child process.
+  Process SpawnChild(const std::string& procname);
+
+  // Run a child process using the given launch options.
+  //
+  // Note: On Windows, you probably want to set |options.start_hidden|.
+  Process SpawnChildWithOptions(const std::string& procname,
+                                const LaunchOptions& options);
+
+  // Set up the command line used to spawn the child process.
+  // Override this to add things to the command line (calling this first in the
+  // override).
+  // Note that currently some tests rely on this providing a full command line,
+  // which they then use directly with |LaunchProcess()|.
+  // TODO(viettrungluu): Remove this and add a virtual
+  // |ModifyChildCommandLine()|; make the two divergent uses more sane.
+  virtual CommandLine MakeCmdLine(const std::string& procname);
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(MultiProcessTest);
+};
+
+}  // namespace base
+
+#endif  // BASE_TEST_MULTIPROCESS_TEST_H_
diff --git a/base/test/multiprocess_test_android.cc b/base/test/multiprocess_test_android.cc
new file mode 100644
index 0000000..4108593
--- /dev/null
+++ b/base/test/multiprocess_test_android.cc
@@ -0,0 +1,86 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/test/multiprocess_test.h"
+
+#include <string.h>
+#include <vector>
+
+#include "base/android/jni_android.h"
+#include "base/android/jni_array.h"
+#include "base/android/scoped_java_ref.h"
+#include "base/base_switches.h"
+#include "base/command_line.h"
+#include "base/logging.h"
+#include "jni/MainReturnCodeResult_jni.h"
+#include "jni/MultiprocessTestClientLauncher_jni.h"
+
+namespace base {
+
+// A very basic implementation for Android. On Android tests can run in an APK
+// and we don't have an executable to exec*. This implementation does the bare
+// minimum to execute the method specified by procname (in the child process).
+//  - All options except |fds_to_remap| are ignored.
+//
+// NOTE: This MUST NOT run on the main thread of the NativeTest application.
+Process SpawnMultiProcessTestChild(const std::string& procname,
+                                   const CommandLine& base_command_line,
+                                   const LaunchOptions& options) {
+  JNIEnv* env = android::AttachCurrentThread();
+  DCHECK(env);
+
+  std::vector<int> fd_keys;
+  std::vector<int> fd_fds;
+  for (auto& iter : options.fds_to_remap) {
+    fd_keys.push_back(iter.second);
+    fd_fds.push_back(iter.first);
+  }
+
+  android::ScopedJavaLocalRef<jobjectArray> fds =
+      android::Java_MultiprocessTestClientLauncher_makeFdInfoArray(
+          env, base::android::ToJavaIntArray(env, fd_keys),
+          base::android::ToJavaIntArray(env, fd_fds));
+
+  CommandLine command_line(base_command_line);
+  if (!command_line.HasSwitch(switches::kTestChildProcess)) {
+    command_line.AppendSwitchASCII(switches::kTestChildProcess, procname);
+  }
+
+  android::ScopedJavaLocalRef<jobjectArray> j_argv =
+      android::ToJavaArrayOfStrings(env, command_line.argv());
+  jint pid = android::Java_MultiprocessTestClientLauncher_launchClient(
+      env, j_argv, fds);
+  return Process(pid);
+}
+
+bool WaitForMultiprocessTestChildExit(const Process& process,
+                                      TimeDelta timeout,
+                                      int* exit_code) {
+  JNIEnv* env = android::AttachCurrentThread();
+  DCHECK(env);
+
+  base::android::ScopedJavaLocalRef<jobject> result_code =
+      android::Java_MultiprocessTestClientLauncher_waitForMainToReturn(
+          env, process.Pid(), static_cast<int32_t>(timeout.InMilliseconds()));
+  if (result_code.is_null() ||
+      Java_MainReturnCodeResult_hasTimedOut(env, result_code)) {
+    return false;
+  }
+  if (exit_code) {
+    *exit_code = Java_MainReturnCodeResult_getReturnCode(env, result_code);
+  }
+  return true;
+}
+
+bool TerminateMultiProcessTestChild(const Process& process,
+                                    int exit_code,
+                                    bool wait) {
+  JNIEnv* env = android::AttachCurrentThread();
+  DCHECK(env);
+
+  return android::Java_MultiprocessTestClientLauncher_terminate(
+      env, process.Pid(), exit_code, wait);
+}
+
+}  // namespace base
diff --git a/base/test/native_library_test_utils.cc b/base/test/native_library_test_utils.cc
new file mode 100644
index 0000000..adcb1b0
--- /dev/null
+++ b/base/test/native_library_test_utils.cc
@@ -0,0 +1,19 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/test/native_library_test_utils.h"
+
+namespace {
+
+int g_static_value = 0;
+
+}  // namespace
+
+extern "C" {
+
+int g_native_library_exported_value = 0;
+
+int NativeLibraryTestIncrement() { return ++g_static_value; }
+
+}  // extern "C"
diff --git a/base/test/native_library_test_utils.h b/base/test/native_library_test_utils.h
new file mode 100644
index 0000000..e26fd1a
--- /dev/null
+++ b/base/test/native_library_test_utils.h
@@ -0,0 +1,26 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TEST_NATIVE_LIBRARY_TEST_UTILS_H_
+#define BASE_TEST_NATIVE_LIBRARY_TEST_UTILS_H_
+
+#include "build/build_config.h"
+
+#if defined(OS_WIN)
+#define NATIVE_LIBRARY_TEST_ALWAYS_EXPORT __declspec(dllexport)
+#else
+#define NATIVE_LIBRARY_TEST_ALWAYS_EXPORT __attribute__((visibility("default")))
+#endif
+
+extern "C" {
+
+extern NATIVE_LIBRARY_TEST_ALWAYS_EXPORT int g_native_library_exported_value;
+
+// A function which increments an internal counter value and returns its value.
+// The first call returns 1, then 2, etc.
+NATIVE_LIBRARY_TEST_ALWAYS_EXPORT int NativeLibraryTestIncrement();
+
+}  // extern "C"
+
+#endif  // BASE_TEST_NATIVE_LIBRARY_TEST_UTILS_H_
diff --git a/base/test/null_task_runner.cc b/base/test/null_task_runner.cc
new file mode 100644
index 0000000..dfa26fa
--- /dev/null
+++ b/base/test/null_task_runner.cc
@@ -0,0 +1,29 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/test/null_task_runner.h"
+
+namespace base {
+
+NullTaskRunner::NullTaskRunner() = default;
+
+NullTaskRunner::~NullTaskRunner() = default;
+
+bool NullTaskRunner::PostDelayedTask(const Location& from_here,
+                                     OnceClosure task,
+                                     base::TimeDelta delay) {
+  return false;
+}
+
+bool NullTaskRunner::PostNonNestableDelayedTask(const Location& from_here,
+                                                OnceClosure task,
+                                                base::TimeDelta delay) {
+  return false;
+}
+
+bool NullTaskRunner::RunsTasksInCurrentSequence() const {
+  return true;
+}
+
+}  // namespace base
diff --git a/base/test/null_task_runner.h b/base/test/null_task_runner.h
new file mode 100644
index 0000000..c11ab6b
--- /dev/null
+++ b/base/test/null_task_runner.h
@@ -0,0 +1,39 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TEST_NULL_TASK_RUNNER_H_
+#define BASE_TEST_NULL_TASK_RUNNER_H_
+
+#include "base/callback.h"
+#include "base/compiler_specific.h"
+#include "base/macros.h"
+#include "base/single_thread_task_runner.h"
+
+namespace base {
+
+// Helper class for tests that need to provide an implementation of a
+// *TaskRunner class but don't actually care about tasks being run.
+
+class NullTaskRunner : public base::SingleThreadTaskRunner {
+ public:
+  NullTaskRunner();
+
+  bool PostDelayedTask(const Location& from_here,
+                       base::OnceClosure task,
+                       base::TimeDelta delay) override;
+  bool PostNonNestableDelayedTask(const Location& from_here,
+                                  base::OnceClosure task,
+                                  base::TimeDelta delay) override;
+  // Always returns true to avoid triggering DCHECKs.
+  bool RunsTasksInCurrentSequence() const override;
+
+ protected:
+  ~NullTaskRunner() override;
+
+  DISALLOW_COPY_AND_ASSIGN(NullTaskRunner);
+};
+
+}  // namespace base
+
+#endif  // BASE_TEST_NULL_TASK_RUNNER_H_
diff --git a/base/test/perf_log.cc b/base/test/perf_log.cc
new file mode 100644
index 0000000..9212f4b
--- /dev/null
+++ b/base/test/perf_log.cc
@@ -0,0 +1,45 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/test/perf_log.h"
+
+#include "base/files/file_util.h"
+#include "base/logging.h"
+
+namespace base {
+
+static FILE* perf_log_file = nullptr;
+
+bool InitPerfLog(const FilePath& log_file) {
+  if (perf_log_file) {
+    // trying to initialize twice
+    NOTREACHED();
+    return false;
+  }
+
+  perf_log_file = OpenFile(log_file, "w");
+  return perf_log_file != nullptr;
+}
+
+void FinalizePerfLog() {
+  if (!perf_log_file) {
+    // trying to cleanup without initializing
+    NOTREACHED();
+    return;
+  }
+  base::CloseFile(perf_log_file);
+}
+
+void LogPerfResult(const char* test_name, double value, const char* units) {
+  if (!perf_log_file) {
+    NOTREACHED();
+    return;
+  }
+
+  fprintf(perf_log_file, "%s\t%g\t%s\n", test_name, value, units);
+  printf("%s\t%g\t%s\n", test_name, value, units);
+  fflush(stdout);
+}
+
+}  // namespace base
diff --git a/base/test/perf_log.h b/base/test/perf_log.h
new file mode 100644
index 0000000..5d6ed9f
--- /dev/null
+++ b/base/test/perf_log.h
@@ -0,0 +1,24 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TEST_PERF_LOG_H_
+#define BASE_TEST_PERF_LOG_H_
+
+namespace base {
+
+class FilePath;
+
+// Initializes and finalizes the perf log. These functions should be
+// called at the beginning and end (respectively) of running all the
+// performance tests. The init function returns true on success.
+bool InitPerfLog(const FilePath& log_path);
+void FinalizePerfLog();
+
+// Writes to the perf result log the given 'value' resulting from the
+// named 'test'. The units are to aid in reading the log by people.
+void LogPerfResult(const char* test_name, double value, const char* units);
+
+}  // namespace base
+
+#endif  // BASE_TEST_PERF_LOG_H_
diff --git a/base/test/perf_test_suite.cc b/base/test/perf_test_suite.cc
new file mode 100644
index 0000000..2e2cdbb
--- /dev/null
+++ b/base/test/perf_test_suite.cc
@@ -0,0 +1,50 @@
+// Copyright (c) 2010 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/test/perf_test_suite.h"
+
+#include "base/command_line.h"
+#include "base/debug/debugger.h"
+#include "base/files/file_path.h"
+#include "base/path_service.h"
+#include "base/process/launch.h"
+#include "base/strings/string_util.h"
+#include "base/test/perf_log.h"
+#include "build/build_config.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+
+PerfTestSuite::PerfTestSuite(int argc, char** argv) : TestSuite(argc, argv) {}
+
+void PerfTestSuite::Initialize() {
+  TestSuite::Initialize();
+
+  // Initialize the perf timer log
+  FilePath log_path =
+      CommandLine::ForCurrentProcess()->GetSwitchValuePath("log-file");
+  if (log_path.empty()) {
+    PathService::Get(FILE_EXE, &log_path);
+#if defined(OS_ANDROID) || defined(OS_FUCHSIA)
+    base::FilePath tmp_dir;
+    PathService::Get(base::DIR_CACHE, &tmp_dir);
+    log_path = tmp_dir.Append(log_path.BaseName());
+#endif
+    log_path = log_path.ReplaceExtension(FILE_PATH_LITERAL("log"));
+    log_path = log_path.InsertBeforeExtension(FILE_PATH_LITERAL("_perf"));
+  }
+  ASSERT_TRUE(InitPerfLog(log_path));
+
+  // Raise to high priority to have more precise measurements. Since we don't
+  // aim at 1% precision, it is not necessary to run at realtime level.
+  if (!debug::BeingDebugged())
+    RaiseProcessToHighPriority();
+}
+
+void PerfTestSuite::Shutdown() {
+  TestSuite::Shutdown();
+  FinalizePerfLog();
+}
+
+}  // namespace base
diff --git a/base/test/perf_test_suite.h b/base/test/perf_test_suite.h
new file mode 100644
index 0000000..52528f0
--- /dev/null
+++ b/base/test/perf_test_suite.h
@@ -0,0 +1,22 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TEST_PERF_TEST_SUITE_H_
+#define BASE_TEST_PERF_TEST_SUITE_H_
+
+#include "base/test/test_suite.h"
+
+namespace base {
+
+class PerfTestSuite : public TestSuite {
+ public:
+  PerfTestSuite(int argc, char** argv);
+
+  void Initialize() override;
+  void Shutdown() override;
+};
+
+}  // namespace base
+
+#endif  // BASE_TEST_PERF_TEST_SUITE_H_
diff --git a/base/test/perf_time_logger.cc b/base/test/perf_time_logger.cc
new file mode 100644
index 0000000..c05ba51
--- /dev/null
+++ b/base/test/perf_time_logger.cc
@@ -0,0 +1,27 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/test/perf_time_logger.h"
+
+#include "base/test/perf_log.h"
+
+namespace base {
+
+PerfTimeLogger::PerfTimeLogger(const char* test_name)
+    : logged_(false), test_name_(test_name) {}
+
+PerfTimeLogger::~PerfTimeLogger() {
+  if (!logged_)
+    Done();
+}
+
+void PerfTimeLogger::Done() {
+  // we use a floating-point millisecond value because it is more
+  // intuitive than microseconds and we want more precision than
+  // integer milliseconds
+  LogPerfResult(test_name_.c_str(), timer_.Elapsed().InMillisecondsF(), "ms");
+  logged_ = true;
+}
+
+}  // namespace base
diff --git a/base/test/perf_time_logger.h b/base/test/perf_time_logger.h
new file mode 100644
index 0000000..a5f3e8a
--- /dev/null
+++ b/base/test/perf_time_logger.h
@@ -0,0 +1,37 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TEST_PERF_TIME_LOGGER_H_
+#define BASE_TEST_PERF_TIME_LOGGER_H_
+
+#include <string>
+
+#include "base/macros.h"
+#include "base/timer/elapsed_timer.h"
+
+namespace base {
+
+// Automates calling LogPerfResult for the common case where you want
+// to measure the time that something took. Call Done() when the test
+// is complete if you do extra work after the test or there are stack
+// objects with potentially expensive constructors. Otherwise, this
+// class with automatically log on destruction.
+class PerfTimeLogger {
+ public:
+  explicit PerfTimeLogger(const char* test_name);
+  ~PerfTimeLogger();
+
+  void Done();
+
+ private:
+  bool logged_;
+  std::string test_name_;
+  ElapsedTimer timer_;
+
+  DISALLOW_COPY_AND_ASSIGN(PerfTimeLogger);
+};
+
+}  // namespace base
+
+#endif  // BASE_TEST_PERF_TIME_LOGGER_H_
diff --git a/base/test/power_monitor_test_base.cc b/base/test/power_monitor_test_base.cc
new file mode 100644
index 0000000..8c5ba86
--- /dev/null
+++ b/base/test/power_monitor_test_base.cc
@@ -0,0 +1,66 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/test/power_monitor_test_base.h"
+
+#include "base/message_loop/message_loop.h"
+#include "base/message_loop/message_loop_current.h"
+#include "base/power_monitor/power_monitor.h"
+#include "base/power_monitor/power_monitor_source.h"
+#include "base/run_loop.h"
+
+namespace base {
+
+PowerMonitorTestSource::PowerMonitorTestSource()
+    : test_on_battery_power_(false) {
+  DCHECK(MessageLoopCurrent::Get())
+      << "PowerMonitorTestSource requires a MessageLoop.";
+}
+
+PowerMonitorTestSource::~PowerMonitorTestSource() = default;
+
+void PowerMonitorTestSource::GeneratePowerStateEvent(bool on_battery_power) {
+  test_on_battery_power_ = on_battery_power;
+  ProcessPowerEvent(POWER_STATE_EVENT);
+  RunLoop().RunUntilIdle();
+}
+
+void PowerMonitorTestSource::GenerateSuspendEvent() {
+  ProcessPowerEvent(SUSPEND_EVENT);
+  RunLoop().RunUntilIdle();
+}
+
+void PowerMonitorTestSource::GenerateResumeEvent() {
+  ProcessPowerEvent(RESUME_EVENT);
+  RunLoop().RunUntilIdle();
+}
+
+bool PowerMonitorTestSource::IsOnBatteryPowerImpl() {
+  return test_on_battery_power_;
+};
+
+PowerMonitorTestObserver::PowerMonitorTestObserver()
+    : last_power_state_(false),
+      power_state_changes_(0),
+      suspends_(0),
+      resumes_(0) {
+}
+
+PowerMonitorTestObserver::~PowerMonitorTestObserver() = default;
+
+// PowerObserver callbacks.
+void PowerMonitorTestObserver::OnPowerStateChange(bool on_battery_power) {
+  last_power_state_ = on_battery_power;
+  power_state_changes_++;
+}
+
+void PowerMonitorTestObserver::OnSuspend() {
+  suspends_++;
+}
+
+void PowerMonitorTestObserver::OnResume() {
+  resumes_++;
+}
+
+}  // namespace base
diff --git a/base/test/power_monitor_test_base.h b/base/test/power_monitor_test_base.h
new file mode 100644
index 0000000..037670a
--- /dev/null
+++ b/base/test/power_monitor_test_base.h
@@ -0,0 +1,53 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TEST_POWER_MONITOR_TEST_BASE_H_
+#define BASE_TEST_POWER_MONITOR_TEST_BASE_H_
+
+#include "base/power_monitor/power_monitor.h"
+#include "base/power_monitor/power_monitor_source.h"
+
+namespace base {
+
+class PowerMonitorTestSource : public PowerMonitorSource {
+ public:
+  PowerMonitorTestSource();
+  ~PowerMonitorTestSource() override;
+
+  void GeneratePowerStateEvent(bool on_battery_power);
+  void GenerateSuspendEvent();
+  void GenerateResumeEvent();
+
+ protected:
+  bool IsOnBatteryPowerImpl() override;
+
+  bool test_on_battery_power_;
+};
+
+class PowerMonitorTestObserver : public PowerObserver {
+ public:
+  PowerMonitorTestObserver();
+  ~PowerMonitorTestObserver() override;
+
+  // PowerObserver callbacks.
+  void OnPowerStateChange(bool on_battery_power) override;
+  void OnSuspend() override;
+  void OnResume() override;
+
+  // Test status counts.
+  bool last_power_state() { return last_power_state_; }
+  int power_state_changes() { return power_state_changes_; }
+  int suspends() { return suspends_; }
+  int resumes() { return resumes_; }
+
+ private:
+  bool last_power_state_; // Last power state we were notified of.
+  int power_state_changes_;  // Count of OnPowerStateChange notifications.
+  int suspends_;  // Count of OnSuspend notifications.
+  int resumes_;  // Count of OnResume notifications.
+};
+
+}  // namespace base
+
+#endif  // BASE_TEST_POWER_MONITOR_TEST_BASE_H_
diff --git a/base/test/run_all_base_unittests.cc b/base/test/run_all_base_unittests.cc
new file mode 100644
index 0000000..da52310
--- /dev/null
+++ b/base/test/run_all_base_unittests.cc
@@ -0,0 +1,15 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/bind.h"
+#include "base/test/launcher/unit_test_launcher.h"
+#include "base/test/test_suite.h"
+#include "build/build_config.h"
+
+int main(int argc, char** argv) {
+  base::TestSuite test_suite(argc, argv);
+  return base::LaunchUnitTests(
+      argc, argv,
+      base::Bind(&base::TestSuite::Run, base::Unretained(&test_suite)));
+}
diff --git a/base/test/run_all_perftests.cc b/base/test/run_all_perftests.cc
new file mode 100644
index 0000000..6e38109
--- /dev/null
+++ b/base/test/run_all_perftests.cc
@@ -0,0 +1,9 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/test/perf_test_suite.h"
+
+int main(int argc, char** argv) {
+  return base::PerfTestSuite(argc, argv).Run();
+}
diff --git a/base/test/run_all_unittests.cc b/base/test/run_all_unittests.cc
new file mode 100644
index 0000000..0ad84ed
--- /dev/null
+++ b/base/test/run_all_unittests.cc
@@ -0,0 +1,15 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/bind.h"
+#include "base/test/launcher/unit_test_launcher.h"
+#include "base/test/test_suite.h"
+#include "build/build_config.h"
+
+int main(int argc, char** argv) {
+  base::TestSuite test_suite(argc, argv);
+  return base::LaunchUnitTests(
+      argc, argv,
+      base::BindOnce(&base::TestSuite::Run, base::Unretained(&test_suite)));
+}
diff --git a/base/test/scoped_command_line.cc b/base/test/scoped_command_line.cc
new file mode 100644
index 0000000..c74d243
--- /dev/null
+++ b/base/test/scoped_command_line.cc
@@ -0,0 +1,22 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/test/scoped_command_line.h"
+
+namespace base {
+namespace test {
+
+ScopedCommandLine::ScopedCommandLine()
+    : original_command_line_(*base::CommandLine::ForCurrentProcess()) {}
+
+ScopedCommandLine::~ScopedCommandLine() {
+  *base::CommandLine::ForCurrentProcess() = original_command_line_;
+}
+
+CommandLine* ScopedCommandLine::GetProcessCommandLine() {
+  return base::CommandLine::ForCurrentProcess();
+}
+
+}  // namespace test
+}  // namespace base
diff --git a/base/test/scoped_command_line.h b/base/test/scoped_command_line.h
new file mode 100644
index 0000000..dea0c6a
--- /dev/null
+++ b/base/test/scoped_command_line.h
@@ -0,0 +1,34 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TEST_SCOPED_COMMAND_LINE_H_
+#define BASE_TEST_SCOPED_COMMAND_LINE_H_
+
+#include "base/command_line.h"
+
+namespace base {
+namespace test {
+
+// Helper class to restore the original command line at the end of the scope.
+// NOTE: In most unit tests, the command line is automatically restored per
+//       test, so this class is not necessary if the command line applies to
+//       the entire single test.
+class ScopedCommandLine final {
+ public:
+  ScopedCommandLine();
+  ~ScopedCommandLine();
+
+  // Gets the command line for the current process.
+  // NOTE: Do not name this GetCommandLine as this will conflict with Windows's
+  //       GetCommandLine and get renamed to GetCommandLineW.
+  CommandLine* GetProcessCommandLine();
+
+ private:
+  const CommandLine original_command_line_;
+};
+
+}  // namespace test
+}  // namespace base
+
+#endif  // BASE_TEST_SCOPED_COMMAND_LINE_H_
diff --git a/base/test/scoped_environment_variable_override.cc b/base/test/scoped_environment_variable_override.cc
new file mode 100644
index 0000000..4b7b387
--- /dev/null
+++ b/base/test/scoped_environment_variable_override.cc
@@ -0,0 +1,33 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/test/scoped_environment_variable_override.h"
+
+#include "base/environment.h"
+
+namespace base {
+namespace test {
+
+ScopedEnvironmentVariableOverride::ScopedEnvironmentVariableOverride(
+    const std::string& variable_name,
+    const std::string& value)
+    : environment_(Environment::Create()),
+      variable_name_(variable_name),
+      overridden_(false),
+      was_set_(false) {
+  was_set_ = environment_->GetVar(variable_name, &old_value_);
+  overridden_ = environment_->SetVar(variable_name, value);
+}
+
+ScopedEnvironmentVariableOverride::~ScopedEnvironmentVariableOverride() {
+  if (overridden_) {
+    if (was_set_)
+      environment_->SetVar(variable_name_, old_value_);
+    else
+      environment_->UnSetVar(variable_name_);
+  }
+}
+
+}  // namespace test
+}  // namespace base
diff --git a/base/test/scoped_environment_variable_override.h b/base/test/scoped_environment_variable_override.h
new file mode 100644
index 0000000..b05b5f9
--- /dev/null
+++ b/base/test/scoped_environment_variable_override.h
@@ -0,0 +1,40 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TEST_SCOPED_ENVIRONMENT_VARIABLE_OVERRIDE_H_
+#define BASE_TEST_SCOPED_ENVIRONMENT_VARIABLE_OVERRIDE_H_
+
+#include <memory>
+#include <string>
+
+namespace base {
+
+class Environment;
+
+namespace test {
+
+// Helper class to override |variable_name| environment variable to |value| for
+// the lifetime of this class. Upon destruction, the previous value is restored.
+class ScopedEnvironmentVariableOverride final {
+ public:
+  ScopedEnvironmentVariableOverride(const std::string& variable_name,
+                                    const std::string& value);
+  ~ScopedEnvironmentVariableOverride();
+
+  base::Environment* GetEnv() { return environment_.get(); }
+  bool IsOverridden() { return overridden_; }
+  bool WasSet() { return was_set_; }
+
+ private:
+  std::unique_ptr<Environment> environment_;
+  std::string variable_name_;
+  bool overridden_;
+  bool was_set_;
+  std::string old_value_;
+};
+
+}  // namespace test
+}  // namespace base
+
+#endif  // BASE_TEST_SCOPED_ENVIRONMENT_VARIABLE_OVERRIDE_H_
diff --git a/base/test/scoped_feature_list.cc b/base/test/scoped_feature_list.cc
new file mode 100644
index 0000000..7106c44
--- /dev/null
+++ b/base/test/scoped_feature_list.cc
@@ -0,0 +1,226 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/test/scoped_feature_list.h"
+
+#include <algorithm>
+#include <utility>
+#include <vector>
+
+#include "base/memory/ptr_util.h"
+#include "base/metrics/field_trial_param_associator.h"
+#include "base/stl_util.h"
+#include "base/strings/string_number_conversions.h"
+#include "base/strings/string_split.h"
+#include "base/strings/string_util.h"
+
+namespace base {
+namespace test {
+
+namespace {
+
+std::vector<StringPiece> GetFeatureVector(
+    const std::vector<Feature>& features) {
+  std::vector<StringPiece> output;
+  for (const Feature& feature : features) {
+    output.push_back(feature.name);
+  }
+
+  return output;
+}
+
+// Extracts a feature name from a feature state string. For example, given
+// the input "*MyLovelyFeature<SomeFieldTrial", returns "MyLovelyFeature".
+StringPiece GetFeatureName(StringPiece feature) {
+  StringPiece feature_name = feature;
+
+  // Remove default info.
+  if (feature_name.starts_with("*"))
+    feature_name = feature_name.substr(1);
+
+  // Remove field_trial info.
+  std::size_t index = feature_name.find("<");
+  if (index != std::string::npos)
+    feature_name = feature_name.substr(0, index);
+
+  return feature_name;
+}
+
+struct Features {
+  std::vector<StringPiece> enabled_feature_list;
+  std::vector<StringPiece> disabled_feature_list;
+};
+
+// Merges previously-specified feature overrides with those passed into one of
+// the Init() methods. |features| should be a list of features previously
+// overridden to be in the |override_state|. |merged_features| should contain
+// the enabled and disabled features passed into the Init() method, plus any
+// overrides merged as a result of previous calls to this function.
+void OverrideFeatures(const std::string& features,
+                      FeatureList::OverrideState override_state,
+                      Features* merged_features) {
+  std::vector<StringPiece> features_list =
+      SplitStringPiece(features, ",", TRIM_WHITESPACE, SPLIT_WANT_NONEMPTY);
+
+  for (StringPiece feature : features_list) {
+    StringPiece feature_name = GetFeatureName(feature);
+
+    if (ContainsValue(merged_features->enabled_feature_list, feature_name) ||
+        ContainsValue(merged_features->disabled_feature_list, feature_name))
+      continue;
+
+    if (override_state == FeatureList::OverrideState::OVERRIDE_ENABLE_FEATURE) {
+      merged_features->enabled_feature_list.push_back(feature);
+    } else {
+      DCHECK_EQ(override_state,
+                FeatureList::OverrideState::OVERRIDE_DISABLE_FEATURE);
+      merged_features->disabled_feature_list.push_back(feature);
+    }
+  }
+}
+
+}  // namespace
+
+ScopedFeatureList::ScopedFeatureList() = default;
+
+ScopedFeatureList::~ScopedFeatureList() {
+  if (field_trial_override_)
+    base::FieldTrialParamAssociator::GetInstance()->ClearParamsForTesting(
+        field_trial_override_->trial_name(),
+        field_trial_override_->group_name());
+
+  FeatureList::ClearInstanceForTesting();
+  if (original_feature_list_)
+    FeatureList::RestoreInstanceForTesting(std::move(original_feature_list_));
+}
+
+void ScopedFeatureList::Init() {
+  std::unique_ptr<FeatureList> feature_list(new FeatureList);
+  feature_list->InitializeFromCommandLine(std::string(), std::string());
+  InitWithFeatureList(std::move(feature_list));
+}
+
+void ScopedFeatureList::InitWithFeatureList(
+    std::unique_ptr<FeatureList> feature_list) {
+  DCHECK(!original_feature_list_);
+  original_feature_list_ = FeatureList::ClearInstanceForTesting();
+  FeatureList::SetInstance(std::move(feature_list));
+}
+
+void ScopedFeatureList::InitFromCommandLine(
+    const std::string& enable_features,
+    const std::string& disable_features) {
+  std::unique_ptr<FeatureList> feature_list(new FeatureList);
+  feature_list->InitializeFromCommandLine(enable_features, disable_features);
+  InitWithFeatureList(std::move(feature_list));
+}
+
+void ScopedFeatureList::InitWithFeatures(
+    const std::vector<Feature>& enabled_features,
+    const std::vector<Feature>& disabled_features) {
+  InitWithFeaturesAndFieldTrials(enabled_features, {}, disabled_features);
+}
+
+void ScopedFeatureList::InitAndEnableFeature(const Feature& feature) {
+  InitWithFeaturesAndFieldTrials({feature}, {}, {});
+}
+
+void ScopedFeatureList::InitAndEnableFeatureWithFieldTrialOverride(
+    const Feature& feature,
+    FieldTrial* trial) {
+  InitWithFeaturesAndFieldTrials({feature}, {trial}, {});
+}
+
+void ScopedFeatureList::InitAndDisableFeature(const Feature& feature) {
+  InitWithFeaturesAndFieldTrials({}, {}, {feature});
+}
+
+void ScopedFeatureList::InitWithFeatureState(const Feature& feature,
+                                             bool enabled) {
+  if (enabled) {
+    InitAndEnableFeature(feature);
+  } else {
+    InitAndDisableFeature(feature);
+  }
+}
+
+void ScopedFeatureList::InitWithFeaturesAndFieldTrials(
+    const std::vector<Feature>& enabled_features,
+    const std::vector<FieldTrial*>& trials_for_enabled_features,
+    const std::vector<Feature>& disabled_features) {
+  DCHECK_LE(trials_for_enabled_features.size(), enabled_features.size());
+
+  Features merged_features;
+  merged_features.enabled_feature_list = GetFeatureVector(enabled_features);
+  merged_features.disabled_feature_list = GetFeatureVector(disabled_features);
+
+  FeatureList* feature_list = FeatureList::GetInstance();
+
+  // |current_enabled_features| and |current_disabled_features| must declare out
+  // of if scope to avoid them out of scope before JoinString calls because
+  // |merged_features| may contains StringPiece which holding pointer points to
+  // |current_enabled_features| and |current_disabled_features|.
+  std::string current_enabled_features;
+  std::string current_disabled_features;
+  if (feature_list) {
+    FeatureList::GetInstance()->GetFeatureOverrides(&current_enabled_features,
+                                                    &current_disabled_features);
+    OverrideFeatures(current_enabled_features,
+                     FeatureList::OverrideState::OVERRIDE_ENABLE_FEATURE,
+                     &merged_features);
+    OverrideFeatures(current_disabled_features,
+                     FeatureList::OverrideState::OVERRIDE_DISABLE_FEATURE,
+                     &merged_features);
+  }
+
+  // Add the field trial overrides. This assumes that |enabled_features| are at
+  // the begining of |merged_features.enabled_feature_list|, in the same order.
+  std::vector<FieldTrial*>::const_iterator trial_it =
+      trials_for_enabled_features.begin();
+  auto feature_it = merged_features.enabled_feature_list.begin();
+  std::vector<std::unique_ptr<std::string>> features_with_trial;
+  features_with_trial.reserve(trials_for_enabled_features.size());
+  while (trial_it != trials_for_enabled_features.end()) {
+    features_with_trial.push_back(std::make_unique<std::string>(
+        feature_it->as_string() + "<" + (*trial_it)->trial_name()));
+    // |features_with_trial| owns the string, and feature_it points to it.
+    *feature_it = *(features_with_trial.back());
+    ++trial_it;
+    ++feature_it;
+  }
+
+  std::string enabled = JoinString(merged_features.enabled_feature_list, ",");
+  std::string disabled = JoinString(merged_features.disabled_feature_list, ",");
+  InitFromCommandLine(enabled, disabled);
+}
+
+void ScopedFeatureList::InitAndEnableFeatureWithParameters(
+    const Feature& feature,
+    const std::map<std::string, std::string>& feature_parameters) {
+  if (!FieldTrialList::IsGlobalSetForTesting()) {
+    field_trial_list_ = std::make_unique<base::FieldTrialList>(nullptr);
+  }
+
+  // TODO(crbug.com/794021) Remove this unique field trial name hack when there
+  // is a cleaner solution.
+  // Ensure that each call to this method uses a distinct field trial name.
+  // Otherwise, nested calls might fail due to the shared FieldTrialList
+  // already having the field trial registered.
+  static int num_calls = 0;
+  ++num_calls;
+  std::string kTrialName =
+      "scoped_feature_list_trial_name" + base::NumberToString(num_calls);
+  std::string kTrialGroup = "scoped_feature_list_trial_group";
+
+  field_trial_override_ =
+      base::FieldTrialList::CreateFieldTrial(kTrialName, kTrialGroup);
+  DCHECK(field_trial_override_);
+  FieldTrialParamAssociator::GetInstance()->AssociateFieldTrialParams(
+      kTrialName, kTrialGroup, feature_parameters);
+  InitAndEnableFeatureWithFieldTrialOverride(feature,
+                                             field_trial_override_.get());
+}
+
+}  // namespace test
+}  // namespace base
diff --git a/base/test/scoped_feature_list.h b/base/test/scoped_feature_list.h
new file mode 100644
index 0000000..d87b2d8
--- /dev/null
+++ b/base/test/scoped_feature_list.h
@@ -0,0 +1,123 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TEST_SCOPED_FEATURE_LIST_H_
+#define BASE_TEST_SCOPED_FEATURE_LIST_H_
+
+#include <map>
+#include <memory>
+#include <string>
+#include <vector>
+
+#include "base/feature_list.h"
+#include "base/memory/ref_counted.h"
+#include "base/metrics/field_trial.h"
+
+namespace base {
+namespace test {
+
+// ScopedFeatureList resets the global FeatureList instance to a new empty
+// instance and restores the original instance upon destruction.
+// Note: Re-using the same object is not allowed. To reset the feature
+// list and initialize it anew, destroy an existing scoped list and init
+// a new one.
+//
+// ScopedFeatureList needs to be initialized (via one of Init... methods)
+// before running code that inspects the state of features.  In practice this
+// means:
+// - In browser tests, one of Init... methods should be called from the
+//   overriden ::testing::Test::SetUp method. For example:
+//     void SetUp() override {
+//       scoped_feature_list_.InitAndEnableFeature(features::kMyFeatureHere);
+//       InProcessBrowserTest::SetUp();
+//     }
+class ScopedFeatureList final {
+ public:
+  ScopedFeatureList();
+  ~ScopedFeatureList();
+
+  // WARNING: This method will reset any globally configured features to their
+  // default values, which can hide feature interaction bugs. Please use
+  // sparingly.  https://crbug.com/713390
+  // Initializes and registers a FeatureList instance with no overrides.
+  void Init();
+
+  // WARNING: This method will reset any globally configured features to their
+  // default values, which can hide feature interaction bugs. Please use
+  // sparingly.  https://crbug.com/713390
+  // Initializes and registers the given FeatureList instance.
+  void InitWithFeatureList(std::unique_ptr<FeatureList> feature_list);
+
+  // WARNING: This method will reset any globally configured features to their
+  // default values, which can hide feature interaction bugs. Please use
+  // sparingly.  https://crbug.com/713390
+  // Initializes and registers a FeatureList instance with only the given
+  // enabled and disabled features (comma-separated names).
+  void InitFromCommandLine(const std::string& enable_features,
+                           const std::string& disable_features);
+
+  // Initializes and registers a FeatureList instance based on present
+  // FeatureList and overridden with the given enabled and disabled features.
+  // Any feature overrides already present in the global FeatureList will
+  // continue to apply, unless they conflict with the overrides passed into this
+  // method. This is important for testing potentially unexpected feature
+  // interactions.
+  void InitWithFeatures(const std::vector<Feature>& enabled_features,
+                        const std::vector<Feature>& disabled_features);
+
+  // Initializes and registers a FeatureList instance based on present
+  // FeatureList and overridden with single enabled feature.
+  void InitAndEnableFeature(const Feature& feature);
+
+  // Initializes and registers a FeatureList instance based on present
+  // FeatureList and overridden with single enabled feature and associated field
+  // trial parameters.
+  // Note: this creates a scoped global field trial list if there is not
+  // currently one.
+  void InitAndEnableFeatureWithParameters(
+      const Feature& feature,
+      const std::map<std::string, std::string>& feature_parameters);
+
+  // Initializes and registers a FeatureList instance based on present
+  // FeatureList and overridden with single disabled feature.
+  void InitAndDisableFeature(const Feature& feature);
+
+  // Initializes and registers a FeatureList instance based on present
+  // FeatureList and overriden with a single feature either enabled or
+  // disabled depending on |enabled|.
+  void InitWithFeatureState(const Feature& feature, bool enabled);
+
+ private:
+  // Initializes and registers a FeatureList instance based on present
+  // FeatureList and overridden with the given enabled and disabled features.
+  // Any feature overrides already present in the global FeatureList will
+  // continue to apply, unless they conflict with the overrides passed into this
+  // method.
+  // Field trials will apply to the enabled features, in the same order. The
+  // number of trials must be less (or equal) than the number of enabled
+  // features.
+  // Trials are expected to outlive the ScopedFeatureList.
+  void InitWithFeaturesAndFieldTrials(
+      const std::vector<Feature>& enabled_features,
+      const std::vector<FieldTrial*>& trials_for_enabled_features,
+      const std::vector<Feature>& disabled_features);
+
+  // Initializes and registers a FeatureList instance based on present
+  // FeatureList and overridden with single enabled feature and associated field
+  // trial override.
+  // |trial| is expected to outlive the ScopedFeatureList.
+  void InitAndEnableFeatureWithFieldTrialOverride(const Feature& feature,
+                                                  FieldTrial* trial);
+
+  std::unique_ptr<FeatureList> original_feature_list_;
+  scoped_refptr<FieldTrial> field_trial_override_;
+  std::unique_ptr<base::FieldTrialList> field_trial_list_;
+
+  DISALLOW_COPY_AND_ASSIGN(ScopedFeatureList);
+};
+
+}  // namespace test
+}  // namespace base
+
+#endif  // BASE_TEST_SCOPED_FEATURE_LIST_H_
diff --git a/base/test/scoped_feature_list_unittest.cc b/base/test/scoped_feature_list_unittest.cc
new file mode 100644
index 0000000..03d9897
--- /dev/null
+++ b/base/test/scoped_feature_list_unittest.cc
@@ -0,0 +1,297 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/test/scoped_feature_list.h"
+
+#include <map>
+#include <string>
+#include <utility>
+
+#include "base/metrics/field_trial.h"
+#include "base/metrics/field_trial_params.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+namespace test {
+
+namespace {
+
+const Feature kTestFeature1{"TestFeature1", FEATURE_DISABLED_BY_DEFAULT};
+const Feature kTestFeature2{"TestFeature2", FEATURE_DISABLED_BY_DEFAULT};
+
+void ExpectFeatures(const std::string& enabled_features,
+                    const std::string& disabled_features) {
+  FeatureList* list = FeatureList::GetInstance();
+  std::string actual_enabled_features;
+  std::string actual_disabled_features;
+
+  list->GetFeatureOverrides(&actual_enabled_features,
+                            &actual_disabled_features);
+
+  EXPECT_EQ(enabled_features, actual_enabled_features);
+  EXPECT_EQ(disabled_features, actual_disabled_features);
+}
+
+}  // namespace
+
+class ScopedFeatureListTest : public testing::Test {
+ public:
+  ScopedFeatureListTest() {
+    // Clear default feature list.
+    std::unique_ptr<FeatureList> feature_list(new FeatureList);
+    feature_list->InitializeFromCommandLine(std::string(), std::string());
+    original_feature_list_ = FeatureList::ClearInstanceForTesting();
+    FeatureList::SetInstance(std::move(feature_list));
+  }
+
+  ~ScopedFeatureListTest() override {
+    // Restore feature list.
+    if (original_feature_list_) {
+      FeatureList::ClearInstanceForTesting();
+      FeatureList::RestoreInstanceForTesting(std::move(original_feature_list_));
+    }
+  }
+
+ private:
+  // Save the present FeatureList and restore it after test finish.
+  std::unique_ptr<FeatureList> original_feature_list_;
+
+  DISALLOW_COPY_AND_ASSIGN(ScopedFeatureListTest);
+};
+
+TEST_F(ScopedFeatureListTest, BasicScoped) {
+  ExpectFeatures(std::string(), std::string());
+  EXPECT_FALSE(FeatureList::IsEnabled(kTestFeature1));
+  {
+    test::ScopedFeatureList feature_list1;
+    feature_list1.InitFromCommandLine("TestFeature1", std::string());
+    ExpectFeatures("TestFeature1", std::string());
+    EXPECT_TRUE(FeatureList::IsEnabled(kTestFeature1));
+  }
+  ExpectFeatures(std::string(), std::string());
+  EXPECT_FALSE(FeatureList::IsEnabled(kTestFeature1));
+}
+
+TEST_F(ScopedFeatureListTest, EnableWithFeatureParameters) {
+  const char kParam1[] = "param_1";
+  const char kParam2[] = "param_2";
+  const char kValue1[] = "value_1";
+  const char kValue2[] = "value_2";
+  std::map<std::string, std::string> parameters;
+  parameters[kParam1] = kValue1;
+  parameters[kParam2] = kValue2;
+
+  ExpectFeatures(std::string(), std::string());
+  EXPECT_EQ(nullptr, FeatureList::GetFieldTrial(kTestFeature1));
+  EXPECT_EQ("", GetFieldTrialParamValueByFeature(kTestFeature1, kParam1));
+  EXPECT_EQ("", GetFieldTrialParamValueByFeature(kTestFeature1, kParam2));
+  FieldTrial::ActiveGroups active_groups;
+  FieldTrialList::GetActiveFieldTrialGroups(&active_groups);
+  EXPECT_EQ(0u, active_groups.size());
+
+  {
+    test::ScopedFeatureList feature_list;
+
+    feature_list.InitAndEnableFeatureWithParameters(kTestFeature1, parameters);
+    EXPECT_TRUE(FeatureList::IsEnabled(kTestFeature1));
+    EXPECT_EQ(kValue1,
+              GetFieldTrialParamValueByFeature(kTestFeature1, kParam1));
+    EXPECT_EQ(kValue2,
+              GetFieldTrialParamValueByFeature(kTestFeature1, kParam2));
+    active_groups.clear();
+    FieldTrialList::GetActiveFieldTrialGroups(&active_groups);
+    EXPECT_EQ(1u, active_groups.size());
+  }
+
+  ExpectFeatures(std::string(), std::string());
+  EXPECT_EQ(nullptr, FeatureList::GetFieldTrial(kTestFeature1));
+  EXPECT_EQ("", GetFieldTrialParamValueByFeature(kTestFeature1, kParam1));
+  EXPECT_EQ("", GetFieldTrialParamValueByFeature(kTestFeature1, kParam2));
+  active_groups.clear();
+  FieldTrialList::GetActiveFieldTrialGroups(&active_groups);
+  EXPECT_EQ(0u, active_groups.size());
+}
+
+TEST_F(ScopedFeatureListTest, OverrideWithFeatureParameters) {
+  FieldTrialList field_trial_list(nullptr);
+  scoped_refptr<FieldTrial> trial =
+      FieldTrialList::CreateFieldTrial("foo", "bar");
+  const char kParam[] = "param_1";
+  const char kValue[] = "value_1";
+  std::map<std::string, std::string> parameters;
+  parameters[kParam] = kValue;
+
+  test::ScopedFeatureList feature_list1;
+  feature_list1.InitFromCommandLine("TestFeature1<foo,TestFeature2",
+                                    std::string());
+
+  // Check initial state.
+  ExpectFeatures("TestFeature1<foo,TestFeature2", std::string());
+  EXPECT_TRUE(FeatureList::IsEnabled(kTestFeature1));
+  EXPECT_TRUE(FeatureList::IsEnabled(kTestFeature2));
+  EXPECT_EQ(trial.get(), FeatureList::GetFieldTrial(kTestFeature1));
+  EXPECT_EQ(nullptr, FeatureList::GetFieldTrial(kTestFeature2));
+  EXPECT_EQ("", GetFieldTrialParamValueByFeature(kTestFeature1, kParam));
+  EXPECT_EQ("", GetFieldTrialParamValueByFeature(kTestFeature2, kParam));
+
+  {
+    // Override feature with existing field trial.
+    test::ScopedFeatureList feature_list2;
+
+    feature_list2.InitAndEnableFeatureWithParameters(kTestFeature1, parameters);
+    EXPECT_TRUE(FeatureList::IsEnabled(kTestFeature1));
+    EXPECT_TRUE(FeatureList::IsEnabled(kTestFeature2));
+    EXPECT_EQ(kValue, GetFieldTrialParamValueByFeature(kTestFeature1, kParam));
+    EXPECT_EQ("", GetFieldTrialParamValueByFeature(kTestFeature2, kParam));
+    EXPECT_NE(trial.get(), FeatureList::GetFieldTrial(kTestFeature1));
+    EXPECT_NE(nullptr, FeatureList::GetFieldTrial(kTestFeature1));
+    EXPECT_EQ(nullptr, FeatureList::GetFieldTrial(kTestFeature2));
+  }
+
+  // Check that initial state is restored.
+  ExpectFeatures("TestFeature1<foo,TestFeature2", std::string());
+  EXPECT_TRUE(FeatureList::IsEnabled(kTestFeature1));
+  EXPECT_TRUE(FeatureList::IsEnabled(kTestFeature2));
+  EXPECT_EQ(trial.get(), FeatureList::GetFieldTrial(kTestFeature1));
+  EXPECT_EQ(nullptr, FeatureList::GetFieldTrial(kTestFeature2));
+  EXPECT_EQ("", GetFieldTrialParamValueByFeature(kTestFeature1, kParam));
+  EXPECT_EQ("", GetFieldTrialParamValueByFeature(kTestFeature2, kParam));
+
+  {
+    // Override feature with no existing field trial.
+    test::ScopedFeatureList feature_list2;
+
+    feature_list2.InitAndEnableFeatureWithParameters(kTestFeature2, parameters);
+    EXPECT_TRUE(FeatureList::IsEnabled(kTestFeature1));
+    EXPECT_TRUE(FeatureList::IsEnabled(kTestFeature2));
+    EXPECT_EQ("", GetFieldTrialParamValueByFeature(kTestFeature1, kParam));
+    EXPECT_EQ(kValue, GetFieldTrialParamValueByFeature(kTestFeature2, kParam));
+    EXPECT_EQ(trial.get(), FeatureList::GetFieldTrial(kTestFeature1));
+    EXPECT_NE(nullptr, FeatureList::GetFieldTrial(kTestFeature2));
+  }
+
+  // Check that initial state is restored.
+  ExpectFeatures("TestFeature1<foo,TestFeature2", std::string());
+  EXPECT_TRUE(FeatureList::IsEnabled(kTestFeature1));
+  EXPECT_TRUE(FeatureList::IsEnabled(kTestFeature2));
+  EXPECT_EQ(trial.get(), FeatureList::GetFieldTrial(kTestFeature1));
+  EXPECT_EQ(nullptr, FeatureList::GetFieldTrial(kTestFeature2));
+  EXPECT_EQ("", GetFieldTrialParamValueByFeature(kTestFeature1, kParam));
+  EXPECT_EQ("", GetFieldTrialParamValueByFeature(kTestFeature2, kParam));
+}
+
+TEST_F(ScopedFeatureListTest, EnableFeatureOverrideDisable) {
+  test::ScopedFeatureList feature_list1;
+  feature_list1.InitWithFeatures({}, {kTestFeature1});
+
+  {
+    test::ScopedFeatureList feature_list2;
+    feature_list2.InitWithFeatures({kTestFeature1}, {});
+    ExpectFeatures("TestFeature1", std::string());
+  }
+}
+
+TEST_F(ScopedFeatureListTest, FeatureOverrideNotMakeDuplicate) {
+  test::ScopedFeatureList feature_list1;
+  feature_list1.InitWithFeatures({}, {kTestFeature1});
+
+  {
+    test::ScopedFeatureList feature_list2;
+    feature_list2.InitWithFeatures({}, {kTestFeature1});
+    ExpectFeatures(std::string(), "TestFeature1");
+  }
+}
+
+TEST_F(ScopedFeatureListTest, FeatureOverrideFeatureWithDefault) {
+  test::ScopedFeatureList feature_list1;
+  feature_list1.InitFromCommandLine("*TestFeature1", std::string());
+
+  {
+    test::ScopedFeatureList feature_list2;
+    feature_list2.InitWithFeatures({kTestFeature1}, {});
+    ExpectFeatures("TestFeature1", std::string());
+  }
+}
+
+TEST_F(ScopedFeatureListTest, FeatureOverrideFeatureWithDefault2) {
+  test::ScopedFeatureList feature_list1;
+  feature_list1.InitFromCommandLine("*TestFeature1", std::string());
+
+  {
+    test::ScopedFeatureList feature_list2;
+    feature_list2.InitWithFeatures({}, {kTestFeature1});
+    ExpectFeatures(std::string(), "TestFeature1");
+  }
+}
+
+TEST_F(ScopedFeatureListTest, FeatureOverrideFeatureWithEnabledFieldTrial) {
+  test::ScopedFeatureList feature_list1;
+
+  std::unique_ptr<FeatureList> feature_list(new FeatureList);
+  FieldTrialList field_trial_list(nullptr);
+  FieldTrial* trial = FieldTrialList::CreateFieldTrial("TrialExample", "A");
+  feature_list->RegisterFieldTrialOverride(
+      kTestFeature1.name, FeatureList::OVERRIDE_ENABLE_FEATURE, trial);
+  feature_list1.InitWithFeatureList(std::move(feature_list));
+
+  {
+    test::ScopedFeatureList feature_list2;
+    feature_list2.InitWithFeatures({kTestFeature1}, {});
+    ExpectFeatures("TestFeature1", std::string());
+  }
+}
+
+TEST_F(ScopedFeatureListTest, FeatureOverrideFeatureWithDisabledFieldTrial) {
+  test::ScopedFeatureList feature_list1;
+
+  std::unique_ptr<FeatureList> feature_list(new FeatureList);
+  FieldTrialList field_trial_list(nullptr);
+  FieldTrial* trial = FieldTrialList::CreateFieldTrial("TrialExample", "A");
+  feature_list->RegisterFieldTrialOverride(
+      kTestFeature1.name, FeatureList::OVERRIDE_DISABLE_FEATURE, trial);
+  feature_list1.InitWithFeatureList(std::move(feature_list));
+
+  {
+    test::ScopedFeatureList feature_list2;
+    feature_list2.InitWithFeatures({kTestFeature1}, {});
+    ExpectFeatures("TestFeature1", std::string());
+  }
+}
+
+TEST_F(ScopedFeatureListTest, FeatureOverrideKeepsOtherExistingFeature) {
+  test::ScopedFeatureList feature_list1;
+  feature_list1.InitWithFeatures({}, {kTestFeature1});
+
+  {
+    test::ScopedFeatureList feature_list2;
+    feature_list2.InitWithFeatures({}, {kTestFeature2});
+    EXPECT_FALSE(FeatureList::IsEnabled(kTestFeature1));
+    EXPECT_FALSE(FeatureList::IsEnabled(kTestFeature2));
+  }
+}
+
+TEST_F(ScopedFeatureListTest, FeatureOverrideKeepsOtherExistingFeature2) {
+  test::ScopedFeatureList feature_list1;
+  feature_list1.InitWithFeatures({}, {kTestFeature1});
+
+  {
+    test::ScopedFeatureList feature_list2;
+    feature_list2.InitWithFeatures({kTestFeature2}, {});
+    ExpectFeatures("TestFeature2", "TestFeature1");
+  }
+}
+
+TEST_F(ScopedFeatureListTest, FeatureOverrideKeepsOtherExistingDefaultFeature) {
+  test::ScopedFeatureList feature_list1;
+  feature_list1.InitFromCommandLine("*TestFeature1", std::string());
+
+  {
+    test::ScopedFeatureList feature_list2;
+    feature_list2.InitWithFeatures({}, {kTestFeature2});
+    ExpectFeatures("*TestFeature1", "TestFeature2");
+  }
+}
+
+}  // namespace test
+}  // namespace base
diff --git a/base/test/scoped_locale.cc b/base/test/scoped_locale.cc
new file mode 100644
index 0000000..c018284
--- /dev/null
+++ b/base/test/scoped_locale.cc
@@ -0,0 +1,23 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/test/scoped_locale.h"
+
+#include <locale.h>
+
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+
+ScopedLocale::ScopedLocale(const std::string& locale) {
+  prev_locale_ = setlocale(LC_ALL, nullptr);
+  EXPECT_TRUE(setlocale(LC_ALL, locale.c_str()) != nullptr)
+      << "Failed to set locale: " << locale;
+}
+
+ScopedLocale::~ScopedLocale() {
+  EXPECT_STREQ(prev_locale_.c_str(), setlocale(LC_ALL, prev_locale_.c_str()));
+}
+
+}  // namespace base
diff --git a/base/test/scoped_locale.h b/base/test/scoped_locale.h
new file mode 100644
index 0000000..ef64e98
--- /dev/null
+++ b/base/test/scoped_locale.h
@@ -0,0 +1,29 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TEST_SCOPED_LOCALE_H_
+#define BASE_TEST_SCOPED_LOCALE_H_
+
+#include <string>
+
+#include "base/macros.h"
+
+namespace base {
+
+// Sets the given |locale| on construction, and restores the previous locale
+// on destruction.
+class ScopedLocale {
+ public:
+  explicit ScopedLocale(const std::string& locale);
+  ~ScopedLocale();
+
+ private:
+  std::string prev_locale_;
+
+  DISALLOW_COPY_AND_ASSIGN(ScopedLocale);
+};
+
+}  // namespace base
+
+#endif  // BASE_TEST_SCOPED_LOCALE_H_
diff --git a/base/test/scoped_mock_time_message_loop_task_runner.cc b/base/test/scoped_mock_time_message_loop_task_runner.cc
new file mode 100644
index 0000000..8e855e5
--- /dev/null
+++ b/base/test/scoped_mock_time_message_loop_task_runner.cc
@@ -0,0 +1,38 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/test/scoped_mock_time_message_loop_task_runner.h"
+
+#include "base/bind.h"
+#include "base/logging.h"
+#include "base/message_loop/message_loop_current.h"
+#include "base/run_loop.h"
+#include "base/test/test_pending_task.h"
+#include "base/threading/thread_task_runner_handle.h"
+#include "base/time/time.h"
+
+namespace base {
+
+ScopedMockTimeMessageLoopTaskRunner::ScopedMockTimeMessageLoopTaskRunner()
+    : task_runner_(new TestMockTimeTaskRunner),
+      previous_task_runner_(ThreadTaskRunnerHandle::Get()) {
+  DCHECK(MessageLoopCurrent::Get());
+  // To ensure that we process any initialization tasks posted to the
+  // MessageLoop by a test fixture before replacing its TaskRunner.
+  RunLoop().RunUntilIdle();
+  MessageLoopCurrent::Get()->SetTaskRunner(task_runner_);
+}
+
+ScopedMockTimeMessageLoopTaskRunner::~ScopedMockTimeMessageLoopTaskRunner() {
+  DCHECK(previous_task_runner_->RunsTasksInCurrentSequence());
+  DCHECK_EQ(task_runner_, ThreadTaskRunnerHandle::Get());
+  for (auto& pending_task : task_runner_->TakePendingTasks()) {
+    previous_task_runner_->PostDelayedTask(
+        pending_task.location, std::move(pending_task.task),
+        pending_task.GetTimeToRun() - task_runner_->NowTicks());
+  }
+  MessageLoopCurrent::Get()->SetTaskRunner(std::move(previous_task_runner_));
+}
+
+}  // namespace base
diff --git a/base/test/scoped_mock_time_message_loop_task_runner.h b/base/test/scoped_mock_time_message_loop_task_runner.h
new file mode 100644
index 0000000..2a034ee
--- /dev/null
+++ b/base/test/scoped_mock_time_message_loop_task_runner.h
@@ -0,0 +1,45 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TEST_SCOPED_MOCK_TIME_MESSAGE_LOOP_TASK_RUNNER_H_
+#define BASE_TEST_SCOPED_MOCK_TIME_MESSAGE_LOOP_TASK_RUNNER_H_
+
+#include "base/macros.h"
+#include "base/memory/ref_counted.h"
+#include "base/test/test_mock_time_task_runner.h"
+
+namespace base {
+
+class SingleThreadTaskRunner;
+
+// A scoped wrapper around TestMockTimeTaskRunner that replaces
+// MessageLoopCurrent::Get()'s task runner (and consequently
+// ThreadTaskRunnerHandle) with a TestMockTimeTaskRunner and resets it back at
+// the end of its scope.
+//
+// Note: RunLoop() will not work in the scope of a
+// ScopedMockTimeMessageLoopTaskRunner, the underlying TestMockTimeTaskRunner's
+// methods must be used instead to pump tasks.
+//
+// DEPRECATED: Use a TestMockTimeTaskRunner::Type::kBoundToThread instead of a
+// MessageLoop + ScopedMockTimeMessageLoopTaskRunner.
+// TODO(gab): Remove usage of this API and delete it.
+class ScopedMockTimeMessageLoopTaskRunner {
+ public:
+  ScopedMockTimeMessageLoopTaskRunner();
+  ~ScopedMockTimeMessageLoopTaskRunner();
+
+  TestMockTimeTaskRunner* task_runner() { return task_runner_.get(); }
+  TestMockTimeTaskRunner* operator->() { return task_runner_.get(); }
+
+ private:
+  const scoped_refptr<TestMockTimeTaskRunner> task_runner_;
+  scoped_refptr<SingleThreadTaskRunner> previous_task_runner_;
+
+  DISALLOW_COPY_AND_ASSIGN(ScopedMockTimeMessageLoopTaskRunner);
+};
+
+}  // namespace base
+
+#endif  // BASE_TEST_SCOPED_MOCK_TIME_MESSAGE_LOOP_TASK_RUNNER_H_
diff --git a/base/test/scoped_mock_time_message_loop_task_runner_unittest.cc b/base/test/scoped_mock_time_message_loop_task_runner_unittest.cc
new file mode 100644
index 0000000..b08323d
--- /dev/null
+++ b/base/test/scoped_mock_time_message_loop_task_runner_unittest.cc
@@ -0,0 +1,120 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/test/scoped_mock_time_message_loop_task_runner.h"
+
+#include <memory>
+
+#include "base/bind.h"
+#include "base/bind_helpers.h"
+#include "base/callback_forward.h"
+#include "base/containers/circular_deque.h"
+#include "base/macros.h"
+#include "base/memory/ptr_util.h"
+#include "base/memory/ref_counted.h"
+#include "base/message_loop/message_loop.h"
+#include "base/message_loop/message_loop_current.h"
+#include "base/test/test_mock_time_task_runner.h"
+#include "base/test/test_pending_task.h"
+#include "base/time/time.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+namespace {
+
+TaskRunner* GetCurrentTaskRunner() {
+  return MessageLoopCurrent::Get()->task_runner().get();
+}
+
+void AssignTrue(bool* out) {
+  *out = true;
+}
+
+// Pops a task from the front of |pending_tasks| and returns it.
+TestPendingTask PopFront(base::circular_deque<TestPendingTask>* pending_tasks) {
+  TestPendingTask task = std::move(pending_tasks->front());
+  pending_tasks->pop_front();
+  return task;
+}
+
+class ScopedMockTimeMessageLoopTaskRunnerTest : public testing::Test {
+ public:
+  ScopedMockTimeMessageLoopTaskRunnerTest()
+      : original_task_runner_(new TestMockTimeTaskRunner()) {
+    MessageLoopCurrent::Get()->SetTaskRunner(original_task_runner_);
+  }
+
+ protected:
+  TestMockTimeTaskRunner* original_task_runner() {
+    return original_task_runner_.get();
+  }
+
+ private:
+  scoped_refptr<TestMockTimeTaskRunner> original_task_runner_;
+
+  MessageLoop message_loop_;
+
+  DISALLOW_COPY_AND_ASSIGN(ScopedMockTimeMessageLoopTaskRunnerTest);
+};
+
+// Verifies a new TaskRunner is installed while a
+// ScopedMockTimeMessageLoopTaskRunner exists and the previous one is installed
+// after destruction.
+TEST_F(ScopedMockTimeMessageLoopTaskRunnerTest, CurrentTaskRunners) {
+  auto scoped_task_runner_ =
+      std::make_unique<ScopedMockTimeMessageLoopTaskRunner>();
+  EXPECT_EQ(scoped_task_runner_->task_runner(), GetCurrentTaskRunner());
+  scoped_task_runner_.reset();
+  EXPECT_EQ(original_task_runner(), GetCurrentTaskRunner());
+}
+
+TEST_F(ScopedMockTimeMessageLoopTaskRunnerTest,
+       IncompleteTasksAreCopiedToPreviousTaskRunnerAfterDestruction) {
+  auto scoped_task_runner_ =
+      std::make_unique<ScopedMockTimeMessageLoopTaskRunner>();
+
+  bool task_10_has_run = false;
+  bool task_11_has_run = false;
+
+  Closure task_1 = DoNothing();
+  Closure task_2 = DoNothing();
+  Closure task_10 = Bind(&AssignTrue, &task_10_has_run);
+  Closure task_11 = Bind(&AssignTrue, &task_11_has_run);
+
+  constexpr TimeDelta task_1_delay = TimeDelta::FromSeconds(1);
+  constexpr TimeDelta task_2_delay = TimeDelta::FromSeconds(2);
+  constexpr TimeDelta task_10_delay = TimeDelta::FromSeconds(10);
+  constexpr TimeDelta task_11_delay = TimeDelta::FromSeconds(11);
+
+  constexpr TimeDelta step_time_by = TimeDelta::FromSeconds(5);
+
+  GetCurrentTaskRunner()->PostDelayedTask(FROM_HERE, task_1, task_1_delay);
+  GetCurrentTaskRunner()->PostDelayedTask(FROM_HERE, task_2, task_2_delay);
+  GetCurrentTaskRunner()->PostDelayedTask(FROM_HERE, task_10, task_10_delay);
+  GetCurrentTaskRunner()->PostDelayedTask(FROM_HERE, task_11, task_11_delay);
+
+  scoped_task_runner_->task_runner()->FastForwardBy(step_time_by);
+
+  scoped_task_runner_.reset();
+
+  base::circular_deque<TestPendingTask> pending_tasks =
+      original_task_runner()->TakePendingTasks();
+
+  EXPECT_EQ(2U, pending_tasks.size());
+
+  TestPendingTask pending_task = PopFront(&pending_tasks);
+  EXPECT_FALSE(task_10_has_run);
+  std::move(pending_task.task).Run();
+  EXPECT_TRUE(task_10_has_run);
+  EXPECT_EQ(task_10_delay - step_time_by, pending_task.delay);
+
+  pending_task = PopFront(&pending_tasks);
+  EXPECT_FALSE(task_11_has_run);
+  std::move(pending_task.task).Run();
+  EXPECT_TRUE(task_11_has_run);
+  EXPECT_EQ(task_11_delay - step_time_by, pending_task.delay);
+}
+
+}  // namespace
+}  // namespace base
diff --git a/base/test/scoped_path_override.cc b/base/test/scoped_path_override.cc
new file mode 100644
index 0000000..b8cfd4a
--- /dev/null
+++ b/base/test/scoped_path_override.cc
@@ -0,0 +1,40 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/test/scoped_path_override.h"
+
+#include "base/logging.h"
+#include "base/path_service.h"
+
+namespace base {
+
+ScopedPathOverride::ScopedPathOverride(int key) : key_(key) {
+  bool result = temp_dir_.CreateUniqueTempDir();
+  CHECK(result);
+  result = PathService::Override(key, temp_dir_.GetPath());
+  CHECK(result);
+}
+
+ScopedPathOverride::ScopedPathOverride(int key, const base::FilePath& dir)
+    : key_(key) {
+  bool result = PathService::Override(key, dir);
+  CHECK(result);
+}
+
+ScopedPathOverride::ScopedPathOverride(int key,
+                                       const FilePath& path,
+                                       bool is_absolute,
+                                       bool create)
+    : key_(key) {
+  bool result =
+      PathService::OverrideAndCreateIfNeeded(key, path, is_absolute, create);
+  CHECK(result);
+}
+
+ScopedPathOverride::~ScopedPathOverride() {
+   bool result = PathService::RemoveOverride(key_);
+   CHECK(result) << "The override seems to have been removed already!";
+}
+
+}  // namespace base
diff --git a/base/test/scoped_path_override.h b/base/test/scoped_path_override.h
new file mode 100644
index 0000000..f589149
--- /dev/null
+++ b/base/test/scoped_path_override.h
@@ -0,0 +1,43 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TEST_SCOPED_PATH_OVERRIDE_H_
+#define BASE_TEST_SCOPED_PATH_OVERRIDE_H_
+
+#include "base/files/scoped_temp_dir.h"
+#include "base/macros.h"
+
+namespace base {
+
+class FilePath;
+
+// Sets a path override on construction, and removes it when the object goes out
+// of scope. This class is intended to be used by tests that need to override
+// paths to ensure their overrides are properly handled and reverted when the
+// scope of the test is left.
+class ScopedPathOverride {
+ public:
+  // Contructor that initializes the override to a scoped temp directory.
+  explicit ScopedPathOverride(int key);
+
+  // Constructor that would use a path provided by the user.
+  ScopedPathOverride(int key, const FilePath& dir);
+
+  // See PathService::OverrideAndCreateIfNeeded.
+  ScopedPathOverride(int key,
+                     const FilePath& path,
+                     bool is_absolute,
+                     bool create);
+  ~ScopedPathOverride();
+
+ private:
+  int key_;
+  ScopedTempDir temp_dir_;
+
+  DISALLOW_COPY_AND_ASSIGN(ScopedPathOverride);
+};
+
+}  // namespace base
+
+#endif  // BASE_TEST_SCOPED_PATH_OVERRIDE_H_
diff --git a/base/test/scoped_task_environment.cc b/base/test/scoped_task_environment.cc
new file mode 100644
index 0000000..3d580b0
--- /dev/null
+++ b/base/test/scoped_task_environment.cc
@@ -0,0 +1,346 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/test/scoped_task_environment.h"
+
+#include "base/bind_helpers.h"
+#include "base/logging.h"
+#include "base/memory/ptr_util.h"
+#include "base/message_loop/message_loop.h"
+#include "base/run_loop.h"
+#include "base/synchronization/condition_variable.h"
+#include "base/synchronization/lock.h"
+#include "base/task_scheduler/post_task.h"
+#include "base/task_scheduler/task_scheduler.h"
+#include "base/task_scheduler/task_scheduler_impl.h"
+#include "base/test/test_mock_time_task_runner.h"
+#include "base/threading/sequence_local_storage_map.h"
+#include "base/threading/thread_restrictions.h"
+#include "base/threading/thread_task_runner_handle.h"
+#include "base/time/time.h"
+
+#if defined(OS_POSIX)
+#include "base/files/file_descriptor_watcher_posix.h"
+#endif
+
+namespace base {
+namespace test {
+
+namespace {
+
+std::unique_ptr<MessageLoop> CreateMessageLoopForMainThreadType(
+    ScopedTaskEnvironment::MainThreadType main_thread_type) {
+  switch (main_thread_type) {
+    case ScopedTaskEnvironment::MainThreadType::DEFAULT:
+      return std::make_unique<MessageLoop>(MessageLoop::TYPE_DEFAULT);
+    case ScopedTaskEnvironment::MainThreadType::MOCK_TIME:
+      return nullptr;
+    case ScopedTaskEnvironment::MainThreadType::UI:
+      return std::make_unique<MessageLoop>(MessageLoop::TYPE_UI);
+    case ScopedTaskEnvironment::MainThreadType::IO:
+      return std::make_unique<MessageLoop>(MessageLoop::TYPE_IO);
+  }
+  NOTREACHED();
+  return nullptr;
+}
+
+}  // namespace
+
+class ScopedTaskEnvironment::TestTaskTracker
+    : public internal::TaskSchedulerImpl::TaskTrackerImpl {
+ public:
+  TestTaskTracker();
+
+  // Allow running tasks.
+  void AllowRunTasks();
+
+  // Disallow running tasks. Returns true on success; success requires there to
+  // be no tasks currently running. Returns false if >0 tasks are currently
+  // running. Prior to returning false, it will attempt to block until at least
+  // one task has completed (in an attempt to avoid callers busy-looping
+  // DisallowRunTasks() calls with the same set of slowly ongoing tasks). This
+  // block attempt will also have a short timeout (in an attempt to prevent the
+  // fallout of blocking: if the only task remaining is blocked on the main
+  // thread, waiting for it to complete results in a deadlock...).
+  bool DisallowRunTasks();
+
+ private:
+  friend class ScopedTaskEnvironment;
+
+  // internal::TaskSchedulerImpl::TaskTrackerImpl:
+  void RunOrSkipTask(internal::Task task,
+                     internal::Sequence* sequence,
+                     bool can_run_task) override;
+
+  // Synchronizes accesses to members below.
+  Lock lock_;
+
+  // True if running tasks is allowed.
+  bool can_run_tasks_ = true;
+
+  // Signaled when |can_run_tasks_| becomes true.
+  ConditionVariable can_run_tasks_cv_;
+
+  // Signaled when a task is completed.
+  ConditionVariable task_completed_;
+
+  // Number of tasks that are currently running.
+  int num_tasks_running_ = 0;
+
+  DISALLOW_COPY_AND_ASSIGN(TestTaskTracker);
+};
+
+ScopedTaskEnvironment::ScopedTaskEnvironment(
+    MainThreadType main_thread_type,
+    ExecutionMode execution_control_mode)
+    : execution_control_mode_(execution_control_mode),
+      message_loop_(CreateMessageLoopForMainThreadType(main_thread_type)),
+      mock_time_task_runner_(
+          main_thread_type == MainThreadType::MOCK_TIME
+              ? MakeRefCounted<TestMockTimeTaskRunner>(
+                    TestMockTimeTaskRunner::Type::kBoundToThread)
+              : nullptr),
+      slsm_for_mock_time_(
+          main_thread_type == MainThreadType::MOCK_TIME
+              ? std::make_unique<internal::SequenceLocalStorageMap>()
+              : nullptr),
+      slsm_registration_for_mock_time_(
+          main_thread_type == MainThreadType::MOCK_TIME
+              ? std::make_unique<
+                    internal::ScopedSetSequenceLocalStorageMapForCurrentThread>(
+                    slsm_for_mock_time_.get())
+              : nullptr),
+#if defined(OS_POSIX)
+      file_descriptor_watcher_(
+          main_thread_type == MainThreadType::IO
+              ? std::make_unique<FileDescriptorWatcher>(
+                    static_cast<MessageLoopForIO*>(message_loop_.get()))
+              : nullptr),
+#endif  // defined(OS_POSIX)
+      task_tracker_(new TestTaskTracker()) {
+  CHECK(!TaskScheduler::GetInstance());
+
+  // Instantiate a TaskScheduler with 2 threads in each of its 4 pools. Threads
+  // stay alive even when they don't have work.
+  // Each pool uses two threads to prevent deadlocks in unit tests that have a
+  // sequence that uses WithBaseSyncPrimitives() to wait on the result of
+  // another sequence. This isn't perfect (doesn't solve wait chains) but solves
+  // the basic use case for now.
+  // TODO(fdoray/jeffreyhe): Make the TaskScheduler dynamically replace blocked
+  // threads and get rid of this limitation. http://crbug.com/738104
+  constexpr int kMaxThreads = 2;
+  const TimeDelta kSuggestedReclaimTime = TimeDelta::Max();
+  const SchedulerWorkerPoolParams worker_pool_params(kMaxThreads,
+                                                     kSuggestedReclaimTime);
+  TaskScheduler::SetInstance(std::make_unique<internal::TaskSchedulerImpl>(
+      "ScopedTaskEnvironment", WrapUnique(task_tracker_)));
+  task_scheduler_ = TaskScheduler::GetInstance();
+  TaskScheduler::GetInstance()->Start({worker_pool_params, worker_pool_params,
+                                       worker_pool_params, worker_pool_params});
+
+  if (execution_control_mode_ == ExecutionMode::QUEUED)
+    CHECK(task_tracker_->DisallowRunTasks());
+}
+
+ScopedTaskEnvironment::~ScopedTaskEnvironment() {
+  // Ideally this would RunLoop().RunUntilIdle() here to catch any errors or
+  // infinite post loop in the remaining work but this isn't possible right now
+  // because base::~MessageLoop() didn't use to do this and adding it here would
+  // make the migration away from MessageLoop that much harder.
+  CHECK_EQ(TaskScheduler::GetInstance(), task_scheduler_);
+  // Without FlushForTesting(), DeleteSoon() and ReleaseSoon() tasks could be
+  // skipped, resulting in memory leaks.
+  task_tracker_->AllowRunTasks();
+  TaskScheduler::GetInstance()->FlushForTesting();
+  TaskScheduler::GetInstance()->Shutdown();
+  TaskScheduler::GetInstance()->JoinForTesting();
+  // Destroying TaskScheduler state can result in waiting on worker threads.
+  // Make sure this is allowed to avoid flaking tests that have disallowed waits
+  // on their main thread.
+  ScopedAllowBaseSyncPrimitivesForTesting allow_waits_to_destroy_task_tracker;
+  TaskScheduler::SetInstance(nullptr);
+}
+
+scoped_refptr<base::SingleThreadTaskRunner>
+ScopedTaskEnvironment::GetMainThreadTaskRunner() {
+  if (message_loop_)
+    return message_loop_->task_runner();
+  DCHECK(mock_time_task_runner_);
+  return mock_time_task_runner_;
+}
+
+bool ScopedTaskEnvironment::MainThreadHasPendingTask() const {
+  if (message_loop_)
+    return !message_loop_->IsIdleForTesting();
+  DCHECK(mock_time_task_runner_);
+  return mock_time_task_runner_->HasPendingTask();
+}
+
+void ScopedTaskEnvironment::RunUntilIdle() {
+  // TODO(gab): This can be heavily simplified to essentially:
+  //     bool HasMainThreadTasks() {
+  //      if (message_loop_)
+  //        return !message_loop_->IsIdleForTesting();
+  //      return mock_time_task_runner_->NextPendingTaskDelay().is_zero();
+  //     }
+  //     while (task_tracker_->HasIncompleteTasks() || HasMainThreadTasks()) {
+  //       base::RunLoop().RunUntilIdle();
+  //       // Avoid busy-looping.
+  //       if (task_tracker_->HasIncompleteTasks())
+  //         PlatformThread::Sleep(TimeDelta::FromMilliSeconds(1));
+  //     }
+  // Challenge: HasMainThreadTasks() requires support for proper
+  // IncomingTaskQueue::IsIdleForTesting() (check all queues).
+  //
+  // Other than that it works because once |task_tracker_->HasIncompleteTasks()|
+  // is false we know for sure that the only thing that can make it true is a
+  // main thread task (ScopedTaskEnvironment owns all the threads). As such we
+  // can't racily see it as false on the main thread and be wrong as if it the
+  // main thread sees the atomic count at zero, it's the only one that can make
+  // it go up. And the only thing that can make it go up on the main thread are
+  // main thread tasks and therefore we're done if there aren't any left.
+  //
+  // This simplification further allows simplification of DisallowRunTasks().
+  //
+  // This can also be simplified even further once TaskTracker becomes directly
+  // aware of main thread tasks. https://crbug.com/660078.
+
+  for (;;) {
+    task_tracker_->AllowRunTasks();
+
+    // First run as many tasks as possible on the main thread in parallel with
+    // tasks in TaskScheduler. This increases likelihood of TSAN catching
+    // threading errors and eliminates possibility of hangs should a
+    // TaskScheduler task synchronously block on a main thread task
+    // (TaskScheduler::FlushForTesting() can't be used here for that reason).
+    RunLoop().RunUntilIdle();
+
+    // Then halt TaskScheduler. DisallowRunTasks() failing indicates that there
+    // were TaskScheduler tasks currently running. In that case, try again from
+    // top when DisallowRunTasks() yields control back to this thread as they
+    // may have posted main thread tasks.
+    if (!task_tracker_->DisallowRunTasks())
+      continue;
+
+    // Once TaskScheduler is halted. Run any remaining main thread tasks (which
+    // may have been posted by TaskScheduler tasks that completed between the
+    // above main thread RunUntilIdle() and TaskScheduler DisallowRunTasks()).
+    // Note: this assumes that no main thread task synchronously blocks on a
+    // TaskScheduler tasks (it certainly shouldn't); this call could otherwise
+    // hang.
+    RunLoop().RunUntilIdle();
+
+    // The above RunUntilIdle() guarantees there are no remaining main thread
+    // tasks (the TaskScheduler being halted during the last RunUntilIdle() is
+    // key as it prevents a task being posted to it racily with it determining
+    // it had no work remaining). Therefore, we're done if there is no more work
+    // on TaskScheduler either (there can be TaskScheduler work remaining if
+    // DisallowRunTasks() preempted work and/or the last RunUntilIdle() posted
+    // more TaskScheduler tasks).
+    // Note: this last |if| couldn't be turned into a |do {} while();|. A
+    // conditional loop makes it such that |continue;| results in checking the
+    // condition (not unconditionally loop again) which would be incorrect for
+    // the above logic as it'd then be possible for a TaskScheduler task to be
+    // running during the DisallowRunTasks() test, causing it to fail, but then
+    // post to the main thread and complete before the loop's condition is
+    // verified which could result in HasIncompleteUndelayedTasksForTesting()
+    // returning false and the loop erroneously exiting with a pending task on
+    // the main thread.
+    if (!task_tracker_->HasIncompleteUndelayedTasksForTesting())
+      break;
+  }
+
+  // The above loop always ends with running tasks being disallowed. Re-enable
+  // parallel execution before returning unless in ExecutionMode::QUEUED.
+  if (execution_control_mode_ != ExecutionMode::QUEUED)
+    task_tracker_->AllowRunTasks();
+}
+
+void ScopedTaskEnvironment::FastForwardBy(TimeDelta delta) {
+  DCHECK(mock_time_task_runner_);
+  mock_time_task_runner_->FastForwardBy(delta);
+}
+
+void ScopedTaskEnvironment::FastForwardUntilNoTasksRemain() {
+  DCHECK(mock_time_task_runner_);
+  mock_time_task_runner_->FastForwardUntilNoTasksRemain();
+}
+
+const TickClock* ScopedTaskEnvironment::GetMockTickClock() {
+  DCHECK(mock_time_task_runner_);
+  return mock_time_task_runner_->GetMockTickClock();
+}
+
+std::unique_ptr<TickClock> ScopedTaskEnvironment::DeprecatedGetMockTickClock() {
+  DCHECK(mock_time_task_runner_);
+  return mock_time_task_runner_->DeprecatedGetMockTickClock();
+}
+
+size_t ScopedTaskEnvironment::GetPendingMainThreadTaskCount() const {
+  DCHECK(mock_time_task_runner_);
+  return mock_time_task_runner_->GetPendingTaskCount();
+}
+
+TimeDelta ScopedTaskEnvironment::NextMainThreadPendingTaskDelay() const {
+  DCHECK(mock_time_task_runner_);
+  return mock_time_task_runner_->NextPendingTaskDelay();
+}
+
+ScopedTaskEnvironment::TestTaskTracker::TestTaskTracker()
+    : internal::TaskSchedulerImpl::TaskTrackerImpl("ScopedTaskEnvironment"),
+      can_run_tasks_cv_(&lock_),
+      task_completed_(&lock_) {}
+
+void ScopedTaskEnvironment::TestTaskTracker::AllowRunTasks() {
+  AutoLock auto_lock(lock_);
+  can_run_tasks_ = true;
+  can_run_tasks_cv_.Broadcast();
+}
+
+bool ScopedTaskEnvironment::TestTaskTracker::DisallowRunTasks() {
+  AutoLock auto_lock(lock_);
+
+  // Can't disallow run task if there are tasks running.
+  if (num_tasks_running_ > 0) {
+    // Attempt to wait a bit so that the caller doesn't busy-loop with the same
+    // set of pending work. A short wait is required to avoid deadlock
+    // scenarios. See DisallowRunTasks()'s declaration for more details.
+    task_completed_.TimedWait(TimeDelta::FromMilliseconds(1));
+    return false;
+  }
+
+  can_run_tasks_ = false;
+  return true;
+}
+
+void ScopedTaskEnvironment::TestTaskTracker::RunOrSkipTask(
+    internal::Task task,
+    internal::Sequence* sequence,
+    bool can_run_task) {
+  {
+    AutoLock auto_lock(lock_);
+
+    while (!can_run_tasks_)
+      can_run_tasks_cv_.Wait();
+
+    ++num_tasks_running_;
+  }
+
+  internal::TaskSchedulerImpl::TaskTrackerImpl::RunOrSkipTask(
+      std::move(task), sequence, can_run_task);
+
+  {
+    AutoLock auto_lock(lock_);
+
+    CHECK_GT(num_tasks_running_, 0);
+    CHECK(can_run_tasks_);
+
+    --num_tasks_running_;
+
+    task_completed_.Broadcast();
+  }
+}
+
+}  // namespace test
+}  // namespace base
diff --git a/base/test/scoped_task_environment.h b/base/test/scoped_task_environment.h
new file mode 100644
index 0000000..f9523b3
--- /dev/null
+++ b/base/test/scoped_task_environment.h
@@ -0,0 +1,177 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TEST_SCOPED_TASK_ENVIRONMENT_H_
+#define BASE_TEST_SCOPED_TASK_ENVIRONMENT_H_
+
+#include "base/macros.h"
+#include "base/memory/ref_counted.h"
+#include "base/single_thread_task_runner.h"
+#include "base/task_scheduler/lazy_task_runner.h"
+#include "build/build_config.h"
+
+namespace base {
+
+namespace internal {
+class ScopedSetSequenceLocalStorageMapForCurrentThread;
+class SequenceLocalStorageMap;
+}  // namespace internal
+
+class FileDescriptorWatcher;
+class MessageLoop;
+class TaskScheduler;
+class TestMockTimeTaskRunner;
+class TickClock;
+
+namespace test {
+
+// ScopedTaskEnvironment allows usage of these APIs within its scope:
+// - (Thread|Sequenced)TaskRunnerHandle, on the thread where it lives
+// - base/task_scheduler/post_task.h, on any thread
+//
+// Tests that need either of these APIs should instantiate a
+// ScopedTaskEnvironment.
+//
+// Tasks posted to the (Thread|Sequenced)TaskRunnerHandle run synchronously when
+// RunLoop::Run(UntilIdle) or ScopedTaskEnvironment::RunUntilIdle is called on
+// the thread where the ScopedTaskEnvironment lives.
+//
+// Tasks posted through base/task_scheduler/post_task.h run on dedicated
+// threads. If ExecutionMode is QUEUED, they run when RunUntilIdle() or
+// ~ScopedTaskEnvironment is called. If ExecutionMode is ASYNC, they run
+// as they are posted.
+//
+// All methods of ScopedTaskEnvironment must be called from the same thread.
+//
+// Usage:
+//
+//   class MyTestFixture : public testing::Test {
+//    public:
+//     (...)
+//
+//    protected:
+//     // Must be the first member (or at least before any member that cares
+//     // about tasks) to be initialized first and destroyed last. protected
+//     // instead of private visibility will allow controlling the task
+//     // environment (e.g. clock) once such features are added (see design doc
+//     // below for details), until then it at least doesn't hurt :).
+//     base::test::ScopedTaskEnvironment scoped_task_environment_;
+//
+//     // Other members go here (or further below in private section.)
+//   };
+//
+// Design and future improvements documented in
+// https://docs.google.com/document/d/1QabRo8c7D9LsYY3cEcaPQbOCLo8Tu-6VLykYXyl3Pkk/edit
+class ScopedTaskEnvironment {
+ public:
+  enum class MainThreadType {
+    // The main thread doesn't pump system messages.
+    DEFAULT,
+    // The main thread doesn't pump system messages and uses a mock clock for
+    // delayed tasks (controllable via FastForward*() methods).
+    // TODO(gab): Make this the default |main_thread_type|.
+    // TODO(gab): Also mock the TaskScheduler's clock simultaneously (this
+    // currently only mocks the main thread's clock).
+    MOCK_TIME,
+    // The main thread pumps UI messages.
+    UI,
+    // The main thread pumps asynchronous IO messages and supports the
+    // FileDescriptorWatcher API on POSIX.
+    IO,
+  };
+
+  enum class ExecutionMode {
+    // Tasks are queued and only executed when RunUntilIdle() is explicitly
+    // called.
+    QUEUED,
+    // Tasks run as they are posted. RunUntilIdle() can still be used to block
+    // until done.
+    ASYNC,
+  };
+
+  ScopedTaskEnvironment(
+      MainThreadType main_thread_type = MainThreadType::DEFAULT,
+      ExecutionMode execution_control_mode = ExecutionMode::ASYNC);
+
+  // Waits until no undelayed TaskScheduler tasks remain. Then, unregisters the
+  // TaskScheduler and the (Thread|Sequenced)TaskRunnerHandle.
+  ~ScopedTaskEnvironment();
+
+  // Returns a TaskRunner that schedules tasks on the main thread.
+  scoped_refptr<base::SingleThreadTaskRunner> GetMainThreadTaskRunner();
+
+  // Returns whether the main thread's TaskRunner has pending tasks.
+  bool MainThreadHasPendingTask() const;
+
+  // Runs tasks until both the (Thread|Sequenced)TaskRunnerHandle and the
+  // TaskScheduler's non-delayed queues are empty.
+  void RunUntilIdle();
+
+  // Only valid for instances with a MOCK_TIME MainThreadType. Fast-forwards
+  // virtual time by |delta|, causing all tasks on the main thread with a
+  // remaining delay less than or equal to |delta| to be executed before this
+  // returns. |delta| must be non-negative.
+  // TODO(gab): Make this apply to TaskScheduler delayed tasks as well
+  // (currently only main thread time is mocked).
+  void FastForwardBy(TimeDelta delta);
+
+  // Only valid for instances with a MOCK_TIME MainThreadType.
+  // Short for FastForwardBy(TimeDelta::Max()).
+  void FastForwardUntilNoTasksRemain();
+
+  // Only valid for instances with a MOCK_TIME MainThreadType.  Returns a
+  // TickClock whose time is updated by FastForward(By|UntilNoTasksRemain).
+  const TickClock* GetMockTickClock();
+  std::unique_ptr<TickClock> DeprecatedGetMockTickClock();
+
+  // Only valid for instances with a MOCK_TIME MainThreadType.
+  // Returns the number of pending tasks of the main thread's TaskRunner.
+  size_t GetPendingMainThreadTaskCount() const;
+
+  // Only valid for instances with a MOCK_TIME MainThreadType.
+  // Returns the delay until the next delayed pending task of the main thread's
+  // TaskRunner.
+  TimeDelta NextMainThreadPendingTaskDelay() const;
+
+ private:
+  class TestTaskTracker;
+
+  const ExecutionMode execution_control_mode_;
+
+  // Exactly one of these will be non-null to provide the task environment on
+  // the main thread. Users of this class should NOT rely on the presence of a
+  // MessageLoop beyond (Thread|Sequenced)TaskRunnerHandle and RunLoop as
+  // the backing implementation of each MainThreadType may change over time.
+  const std::unique_ptr<MessageLoop> message_loop_;
+  const scoped_refptr<TestMockTimeTaskRunner> mock_time_task_runner_;
+
+  // Non-null in MOCK_TIME, where an explicit SequenceLocalStorageMap needs to
+  // be provided. TODO(gab): This can be removed once mock time support is added
+  // to MessageLoop directly.
+  const std::unique_ptr<internal::SequenceLocalStorageMap> slsm_for_mock_time_;
+  const std::unique_ptr<
+      internal::ScopedSetSequenceLocalStorageMapForCurrentThread>
+      slsm_registration_for_mock_time_;
+
+#if defined(OS_POSIX)
+  // Enables the FileDescriptorWatcher API iff running a MainThreadType::IO.
+  const std::unique_ptr<FileDescriptorWatcher> file_descriptor_watcher_;
+#endif
+
+  const TaskScheduler* task_scheduler_ = nullptr;
+
+  // Owned by |task_scheduler_|.
+  TestTaskTracker* const task_tracker_;
+
+  // Ensures destruction of lazy TaskRunners when this is destroyed.
+  internal::ScopedLazyTaskRunnerListForTesting
+      scoped_lazy_task_runner_list_for_testing_;
+
+  DISALLOW_COPY_AND_ASSIGN(ScopedTaskEnvironment);
+};
+
+}  // namespace test
+}  // namespace base
+
+#endif  // BASE_TEST_SCOPED_ASYNC_TASK_SCHEDULER_H_
diff --git a/base/test/scoped_task_environment_unittest.cc b/base/test/scoped_task_environment_unittest.cc
new file mode 100644
index 0000000..478fa5e
--- /dev/null
+++ b/base/test/scoped_task_environment_unittest.cc
@@ -0,0 +1,324 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/test/scoped_task_environment.h"
+
+#include <memory>
+
+#include "base/atomicops.h"
+#include "base/bind.h"
+#include "base/bind_helpers.h"
+#include "base/synchronization/atomic_flag.h"
+#include "base/synchronization/waitable_event.h"
+#include "base/task_scheduler/post_task.h"
+#include "base/test/test_timeouts.h"
+#include "base/threading/platform_thread.h"
+#include "base/threading/sequence_local_storage_slot.h"
+#include "base/threading/thread_task_runner_handle.h"
+#include "base/time/tick_clock.h"
+#include "build/build_config.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+#if defined(OS_POSIX)
+#include <unistd.h>
+#include "base/files/file_descriptor_watcher_posix.h"
+#endif  // defined(OS_POSIX)
+
+namespace base {
+namespace test {
+
+namespace {
+
+class ScopedTaskEnvironmentTest
+    : public testing::TestWithParam<ScopedTaskEnvironment::MainThreadType> {};
+
+void VerifyRunUntilIdleDidNotReturnAndSetFlag(
+    AtomicFlag* run_until_idle_returned,
+    AtomicFlag* task_ran) {
+  EXPECT_FALSE(run_until_idle_returned->IsSet());
+  task_ran->Set();
+}
+
+void RunUntilIdleTest(
+    ScopedTaskEnvironment::MainThreadType main_thread_type,
+    ScopedTaskEnvironment::ExecutionMode execution_control_mode) {
+  AtomicFlag run_until_idle_returned;
+  ScopedTaskEnvironment scoped_task_environment(main_thread_type,
+                                                execution_control_mode);
+
+  AtomicFlag first_main_thread_task_ran;
+  ThreadTaskRunnerHandle::Get()->PostTask(
+      FROM_HERE, BindOnce(&VerifyRunUntilIdleDidNotReturnAndSetFlag,
+                          Unretained(&run_until_idle_returned),
+                          Unretained(&first_main_thread_task_ran)));
+
+  AtomicFlag first_task_scheduler_task_ran;
+  PostTask(FROM_HERE, BindOnce(&VerifyRunUntilIdleDidNotReturnAndSetFlag,
+                               Unretained(&run_until_idle_returned),
+                               Unretained(&first_task_scheduler_task_ran)));
+
+  AtomicFlag second_task_scheduler_task_ran;
+  AtomicFlag second_main_thread_task_ran;
+  PostTaskAndReply(FROM_HERE,
+                   BindOnce(&VerifyRunUntilIdleDidNotReturnAndSetFlag,
+                            Unretained(&run_until_idle_returned),
+                            Unretained(&second_task_scheduler_task_ran)),
+                   BindOnce(&VerifyRunUntilIdleDidNotReturnAndSetFlag,
+                            Unretained(&run_until_idle_returned),
+                            Unretained(&second_main_thread_task_ran)));
+
+  scoped_task_environment.RunUntilIdle();
+  run_until_idle_returned.Set();
+
+  EXPECT_TRUE(first_main_thread_task_ran.IsSet());
+  EXPECT_TRUE(first_task_scheduler_task_ran.IsSet());
+  EXPECT_TRUE(second_task_scheduler_task_ran.IsSet());
+  EXPECT_TRUE(second_main_thread_task_ran.IsSet());
+}
+
+}  // namespace
+
+TEST_P(ScopedTaskEnvironmentTest, QueuedRunUntilIdle) {
+  RunUntilIdleTest(GetParam(), ScopedTaskEnvironment::ExecutionMode::QUEUED);
+}
+
+TEST_P(ScopedTaskEnvironmentTest, AsyncRunUntilIdle) {
+  RunUntilIdleTest(GetParam(), ScopedTaskEnvironment::ExecutionMode::ASYNC);
+}
+
+// Verify that tasks posted to an ExecutionMode::QUEUED ScopedTaskEnvironment do
+// not run outside of RunUntilIdle().
+TEST_P(ScopedTaskEnvironmentTest, QueuedTasksDoNotRunOutsideOfRunUntilIdle) {
+  ScopedTaskEnvironment scoped_task_environment(
+      GetParam(), ScopedTaskEnvironment::ExecutionMode::QUEUED);
+
+  AtomicFlag run_until_idle_called;
+  PostTask(FROM_HERE, BindOnce(
+                          [](AtomicFlag* run_until_idle_called) {
+                            EXPECT_TRUE(run_until_idle_called->IsSet());
+                          },
+                          Unretained(&run_until_idle_called)));
+  PlatformThread::Sleep(TestTimeouts::tiny_timeout());
+  run_until_idle_called.Set();
+  scoped_task_environment.RunUntilIdle();
+
+  AtomicFlag other_run_until_idle_called;
+  PostTask(FROM_HERE, BindOnce(
+                          [](AtomicFlag* other_run_until_idle_called) {
+                            EXPECT_TRUE(other_run_until_idle_called->IsSet());
+                          },
+                          Unretained(&other_run_until_idle_called)));
+  PlatformThread::Sleep(TestTimeouts::tiny_timeout());
+  other_run_until_idle_called.Set();
+  scoped_task_environment.RunUntilIdle();
+}
+
+// Verify that a task posted to an ExecutionMode::ASYNC ScopedTaskEnvironment
+// can run without a call to RunUntilIdle().
+TEST_P(ScopedTaskEnvironmentTest, AsyncTasksRunAsTheyArePosted) {
+  ScopedTaskEnvironment scoped_task_environment(
+      GetParam(), ScopedTaskEnvironment::ExecutionMode::ASYNC);
+
+  WaitableEvent task_ran(WaitableEvent::ResetPolicy::MANUAL,
+                         WaitableEvent::InitialState::NOT_SIGNALED);
+  PostTask(FROM_HERE,
+           BindOnce([](WaitableEvent* task_ran) { task_ran->Signal(); },
+                    Unretained(&task_ran)));
+  task_ran.Wait();
+}
+
+// Verify that a task posted to an ExecutionMode::ASYNC ScopedTaskEnvironment
+// after a call to RunUntilIdle() can run without another call to
+// RunUntilIdle().
+TEST_P(ScopedTaskEnvironmentTest,
+       AsyncTasksRunAsTheyArePostedAfterRunUntilIdle) {
+  ScopedTaskEnvironment scoped_task_environment(
+      GetParam(), ScopedTaskEnvironment::ExecutionMode::ASYNC);
+
+  scoped_task_environment.RunUntilIdle();
+
+  WaitableEvent task_ran(WaitableEvent::ResetPolicy::MANUAL,
+                         WaitableEvent::InitialState::NOT_SIGNALED);
+  PostTask(FROM_HERE,
+           BindOnce([](WaitableEvent* task_ran) { task_ran->Signal(); },
+                    Unretained(&task_ran)));
+  task_ran.Wait();
+}
+
+TEST_P(ScopedTaskEnvironmentTest, DelayedTasks) {
+  // Use a QUEUED execution-mode environment, so that no tasks are actually
+  // executed until RunUntilIdle()/FastForwardBy() are invoked.
+  ScopedTaskEnvironment scoped_task_environment(
+      GetParam(), ScopedTaskEnvironment::ExecutionMode::QUEUED);
+
+  subtle::Atomic32 counter = 0;
+
+  constexpr base::TimeDelta kShortTaskDelay = TimeDelta::FromDays(1);
+  // Should run only in MOCK_TIME environment when time is fast-forwarded.
+  ThreadTaskRunnerHandle::Get()->PostDelayedTask(
+      FROM_HERE,
+      Bind(
+          [](subtle::Atomic32* counter) {
+            subtle::NoBarrier_AtomicIncrement(counter, 4);
+          },
+          Unretained(&counter)),
+      kShortTaskDelay);
+  // TODO(gab): This currently doesn't run because the TaskScheduler's clock
+  // isn't mocked but it should be.
+  PostDelayedTask(FROM_HERE,
+                  Bind(
+                      [](subtle::Atomic32* counter) {
+                        subtle::NoBarrier_AtomicIncrement(counter, 128);
+                      },
+                      Unretained(&counter)),
+                  kShortTaskDelay);
+
+  constexpr base::TimeDelta kLongTaskDelay = TimeDelta::FromDays(7);
+  // Same as first task, longer delays to exercise
+  // FastForwardUntilNoTasksRemain().
+  ThreadTaskRunnerHandle::Get()->PostDelayedTask(
+      FROM_HERE,
+      Bind(
+          [](subtle::Atomic32* counter) {
+            subtle::NoBarrier_AtomicIncrement(counter, 8);
+          },
+          Unretained(&counter)),
+      TimeDelta::FromDays(5));
+  ThreadTaskRunnerHandle::Get()->PostDelayedTask(
+      FROM_HERE,
+      Bind(
+          [](subtle::Atomic32* counter) {
+            subtle::NoBarrier_AtomicIncrement(counter, 16);
+          },
+          Unretained(&counter)),
+      kLongTaskDelay);
+
+  ThreadTaskRunnerHandle::Get()->PostTask(
+      FROM_HERE, Bind(
+                     [](subtle::Atomic32* counter) {
+                       subtle::NoBarrier_AtomicIncrement(counter, 1);
+                     },
+                     Unretained(&counter)));
+  PostTask(FROM_HERE, Bind(
+                          [](subtle::Atomic32* counter) {
+                            subtle::NoBarrier_AtomicIncrement(counter, 2);
+                          },
+                          Unretained(&counter)));
+
+  // This expectation will fail flakily if the preceding PostTask() is executed
+  // asynchronously, indicating a problem with the QUEUED execution mode.
+  int expected_value = 0;
+  EXPECT_EQ(expected_value, counter);
+
+  // RunUntilIdle() should process non-delayed tasks only in all queues.
+  scoped_task_environment.RunUntilIdle();
+  expected_value += 1;
+  expected_value += 2;
+  EXPECT_EQ(expected_value, counter);
+
+  if (GetParam() == ScopedTaskEnvironment::MainThreadType::MOCK_TIME) {
+    // Delay inferior to the delay of the first posted task.
+    constexpr base::TimeDelta kInferiorTaskDelay = TimeDelta::FromSeconds(1);
+    static_assert(kInferiorTaskDelay < kShortTaskDelay,
+                  "|kInferiorTaskDelay| should be "
+                  "set to a value inferior to the first posted task's delay.");
+    scoped_task_environment.FastForwardBy(kInferiorTaskDelay);
+    EXPECT_EQ(expected_value, counter);
+
+    scoped_task_environment.FastForwardBy(kShortTaskDelay - kInferiorTaskDelay);
+    expected_value += 4;
+    EXPECT_EQ(expected_value, counter);
+
+    scoped_task_environment.FastForwardUntilNoTasksRemain();
+    expected_value += 8;
+    expected_value += 16;
+    EXPECT_EQ(expected_value, counter);
+  }
+}
+
+// Regression test for https://crbug.com/824770.
+TEST_P(ScopedTaskEnvironmentTest, SupportsSequenceLocalStorageOnMainThread) {
+  ScopedTaskEnvironment scoped_task_environment(
+      GetParam(), ScopedTaskEnvironment::ExecutionMode::ASYNC);
+
+  SequenceLocalStorageSlot<int> sls_slot;
+  sls_slot.Set(5);
+  EXPECT_EQ(5, sls_slot.Get());
+}
+
+#if defined(OS_POSIX)
+TEST_F(ScopedTaskEnvironmentTest, SupportsFileDescriptorWatcherOnIOMainThread) {
+  ScopedTaskEnvironment scoped_task_environment(
+      ScopedTaskEnvironment::MainThreadType::IO,
+      ScopedTaskEnvironment::ExecutionMode::ASYNC);
+
+  int pipe_fds_[2];
+  ASSERT_EQ(0, pipe(pipe_fds_));
+
+  RunLoop run_loop;
+
+  // The write end of a newly created pipe is immediately writable.
+  auto controller = FileDescriptorWatcher::WatchWritable(
+      pipe_fds_[1], run_loop.QuitClosure());
+
+  // This will hang if the notification doesn't occur as expected.
+  run_loop.Run();
+}
+#endif  // defined(OS_POSIX)
+
+// Verify that the TickClock returned by
+// |ScopedTaskEnvironment::GetMockTickClock| gets updated when the
+// FastForward(By|UntilNoTasksRemain) functions are called.
+TEST_F(ScopedTaskEnvironmentTest, FastForwardAdvanceTickClock) {
+  // Use a QUEUED execution-mode environment, so that no tasks are actually
+  // executed until RunUntilIdle()/FastForwardBy() are invoked.
+  ScopedTaskEnvironment scoped_task_environment(
+      ScopedTaskEnvironment::MainThreadType::MOCK_TIME,
+      ScopedTaskEnvironment::ExecutionMode::QUEUED);
+
+  constexpr base::TimeDelta kShortTaskDelay = TimeDelta::FromDays(1);
+  ThreadTaskRunnerHandle::Get()->PostDelayedTask(FROM_HERE, base::DoNothing(),
+                                                 kShortTaskDelay);
+
+  constexpr base::TimeDelta kLongTaskDelay = TimeDelta::FromDays(7);
+  ThreadTaskRunnerHandle::Get()->PostDelayedTask(FROM_HERE, base::DoNothing(),
+                                                 kLongTaskDelay);
+
+  const base::TickClock* tick_clock =
+      scoped_task_environment.GetMockTickClock();
+  base::TimeTicks tick_clock_ref = tick_clock->NowTicks();
+
+  // Make sure that |FastForwardBy| advances the clock.
+  scoped_task_environment.FastForwardBy(kShortTaskDelay);
+  EXPECT_EQ(kShortTaskDelay, tick_clock->NowTicks() - tick_clock_ref);
+
+  // Make sure that |FastForwardUntilNoTasksRemain| advances the clock.
+  scoped_task_environment.FastForwardUntilNoTasksRemain();
+  EXPECT_EQ(kLongTaskDelay, tick_clock->NowTicks() - tick_clock_ref);
+
+  // Fast-forwarding to a time at which there's no tasks should also advance the
+  // clock.
+  scoped_task_environment.FastForwardBy(kLongTaskDelay);
+  EXPECT_EQ(kLongTaskDelay * 2, tick_clock->NowTicks() - tick_clock_ref);
+}
+
+INSTANTIATE_TEST_CASE_P(
+    MainThreadDefault,
+    ScopedTaskEnvironmentTest,
+    ::testing::Values(ScopedTaskEnvironment::MainThreadType::DEFAULT));
+INSTANTIATE_TEST_CASE_P(
+    MainThreadMockTime,
+    ScopedTaskEnvironmentTest,
+    ::testing::Values(ScopedTaskEnvironment::MainThreadType::MOCK_TIME));
+INSTANTIATE_TEST_CASE_P(
+    MainThreadUI,
+    ScopedTaskEnvironmentTest,
+    ::testing::Values(ScopedTaskEnvironment::MainThreadType::UI));
+INSTANTIATE_TEST_CASE_P(
+    MainThreadIO,
+    ScopedTaskEnvironmentTest,
+    ::testing::Values(ScopedTaskEnvironment::MainThreadType::IO));
+
+}  // namespace test
+}  // namespace base
diff --git a/base/test/sequenced_task_runner_test_template.cc b/base/test/sequenced_task_runner_test_template.cc
new file mode 100644
index 0000000..de68492
--- /dev/null
+++ b/base/test/sequenced_task_runner_test_template.cc
@@ -0,0 +1,269 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/test/sequenced_task_runner_test_template.h"
+
+#include <ostream>
+
+#include "base/location.h"
+
+namespace base {
+
+namespace internal {
+
+TaskEvent::TaskEvent(int i, Type type)
+  : i(i), type(type) {
+}
+
+SequencedTaskTracker::SequencedTaskTracker()
+    : next_post_i_(0),
+      task_end_count_(0),
+      task_end_cv_(&lock_) {
+}
+
+void SequencedTaskTracker::PostWrappedNonNestableTask(
+    SequencedTaskRunner* task_runner,
+    const Closure& task) {
+  AutoLock event_lock(lock_);
+  const int post_i = next_post_i_++;
+  Closure wrapped_task = Bind(&SequencedTaskTracker::RunTask, this,
+                              task, post_i);
+  task_runner->PostNonNestableTask(FROM_HERE, wrapped_task);
+  TaskPosted(post_i);
+}
+
+void SequencedTaskTracker::PostWrappedNestableTask(
+    SequencedTaskRunner* task_runner,
+    const Closure& task) {
+  AutoLock event_lock(lock_);
+  const int post_i = next_post_i_++;
+  Closure wrapped_task = Bind(&SequencedTaskTracker::RunTask, this,
+                              task, post_i);
+  task_runner->PostTask(FROM_HERE, wrapped_task);
+  TaskPosted(post_i);
+}
+
+void SequencedTaskTracker::PostWrappedDelayedNonNestableTask(
+    SequencedTaskRunner* task_runner,
+    const Closure& task,
+    TimeDelta delay) {
+  AutoLock event_lock(lock_);
+  const int post_i = next_post_i_++;
+  Closure wrapped_task = Bind(&SequencedTaskTracker::RunTask, this,
+                              task, post_i);
+  task_runner->PostNonNestableDelayedTask(FROM_HERE, wrapped_task, delay);
+  TaskPosted(post_i);
+}
+
+void SequencedTaskTracker::PostNonNestableTasks(
+    SequencedTaskRunner* task_runner,
+    int task_count) {
+  for (int i = 0; i < task_count; ++i) {
+    PostWrappedNonNestableTask(task_runner, Closure());
+  }
+}
+
+void SequencedTaskTracker::RunTask(const Closure& task, int task_i) {
+  TaskStarted(task_i);
+  if (!task.is_null())
+    task.Run();
+  TaskEnded(task_i);
+}
+
+void SequencedTaskTracker::TaskPosted(int i) {
+  // Caller must own |lock_|.
+  events_.push_back(TaskEvent(i, TaskEvent::POST));
+}
+
+void SequencedTaskTracker::TaskStarted(int i) {
+  AutoLock lock(lock_);
+  events_.push_back(TaskEvent(i, TaskEvent::START));
+}
+
+void SequencedTaskTracker::TaskEnded(int i) {
+  AutoLock lock(lock_);
+  events_.push_back(TaskEvent(i, TaskEvent::END));
+  ++task_end_count_;
+  task_end_cv_.Signal();
+}
+
+const std::vector<TaskEvent>&
+SequencedTaskTracker::GetTaskEvents() const {
+  return events_;
+}
+
+void SequencedTaskTracker::WaitForCompletedTasks(int count) {
+  AutoLock lock(lock_);
+  while (task_end_count_ < count)
+    task_end_cv_.Wait();
+}
+
+SequencedTaskTracker::~SequencedTaskTracker() = default;
+
+void PrintTo(const TaskEvent& event, std::ostream* os) {
+  *os << "(i=" << event.i << ", type=";
+  switch (event.type) {
+    case TaskEvent::POST: *os << "POST"; break;
+    case TaskEvent::START: *os << "START"; break;
+    case TaskEvent::END: *os << "END"; break;
+  }
+  *os << ")";
+}
+
+namespace {
+
+// Returns the task ordinals for the task event type |type| in the order that
+// they were recorded.
+std::vector<int> GetEventTypeOrder(const std::vector<TaskEvent>& events,
+                                   TaskEvent::Type type) {
+  std::vector<int> tasks;
+  std::vector<TaskEvent>::const_iterator event;
+  for (event = events.begin(); event != events.end(); ++event) {
+    if (event->type == type)
+      tasks.push_back(event->i);
+  }
+  return tasks;
+}
+
+// Returns all task events for task |task_i|.
+std::vector<TaskEvent::Type> GetEventsForTask(
+    const std::vector<TaskEvent>& events,
+    int task_i) {
+  std::vector<TaskEvent::Type> task_event_orders;
+  std::vector<TaskEvent>::const_iterator event;
+  for (event = events.begin(); event != events.end(); ++event) {
+    if (event->i == task_i)
+      task_event_orders.push_back(event->type);
+  }
+  return task_event_orders;
+}
+
+// Checks that the task events for each task in |events| occur in the order
+// {POST, START, END}, and that there is only one instance of each event type
+// per task.
+::testing::AssertionResult CheckEventOrdersForEachTask(
+    const std::vector<TaskEvent>& events,
+    int task_count) {
+  std::vector<TaskEvent::Type> expected_order;
+  expected_order.push_back(TaskEvent::POST);
+  expected_order.push_back(TaskEvent::START);
+  expected_order.push_back(TaskEvent::END);
+
+  // This is O(n^2), but it runs fast enough currently so is not worth
+  // optimizing.
+  for (int i = 0; i < task_count; ++i) {
+    const std::vector<TaskEvent::Type> task_events =
+        GetEventsForTask(events, i);
+    if (task_events != expected_order) {
+      return ::testing::AssertionFailure()
+          << "Events for task " << i << " are out of order; expected: "
+          << ::testing::PrintToString(expected_order) << "; actual: "
+          << ::testing::PrintToString(task_events);
+    }
+  }
+  return ::testing::AssertionSuccess();
+}
+
+// Checks that no two tasks were running at the same time. I.e. the only
+// events allowed between the START and END of a task are the POSTs of other
+// tasks.
+::testing::AssertionResult CheckNoTaskRunsOverlap(
+    const std::vector<TaskEvent>& events) {
+  // If > -1, we're currently inside a START, END pair.
+  int current_task_i = -1;
+
+  std::vector<TaskEvent>::const_iterator event;
+  for (event = events.begin(); event != events.end(); ++event) {
+    bool spurious_event_found = false;
+
+    if (current_task_i == -1) {  // Not inside a START, END pair.
+      switch (event->type) {
+        case TaskEvent::POST:
+          break;
+        case TaskEvent::START:
+          current_task_i = event->i;
+          break;
+        case TaskEvent::END:
+          spurious_event_found = true;
+          break;
+      }
+
+    } else {  // Inside a START, END pair.
+      bool interleaved_task_detected = false;
+
+      switch (event->type) {
+        case TaskEvent::POST:
+          if (event->i == current_task_i)
+            spurious_event_found = true;
+          break;
+        case TaskEvent::START:
+          interleaved_task_detected = true;
+          break;
+        case TaskEvent::END:
+          if (event->i != current_task_i)
+            interleaved_task_detected = true;
+          else
+            current_task_i = -1;
+          break;
+      }
+
+      if (interleaved_task_detected) {
+        return ::testing::AssertionFailure()
+            << "Found event " << ::testing::PrintToString(*event)
+            << " between START and END events for task " << current_task_i
+            << "; event dump: " << ::testing::PrintToString(events);
+      }
+    }
+
+    if (spurious_event_found) {
+      const int event_i = event - events.begin();
+      return ::testing::AssertionFailure()
+          << "Spurious event " << ::testing::PrintToString(*event)
+          << " at position " << event_i << "; event dump: "
+          << ::testing::PrintToString(events);
+    }
+  }
+
+  return ::testing::AssertionSuccess();
+}
+
+}  // namespace
+
+::testing::AssertionResult CheckNonNestableInvariants(
+    const std::vector<TaskEvent>& events,
+    int task_count) {
+  const std::vector<int> post_order =
+      GetEventTypeOrder(events, TaskEvent::POST);
+  const std::vector<int> start_order =
+      GetEventTypeOrder(events, TaskEvent::START);
+  const std::vector<int> end_order =
+      GetEventTypeOrder(events, TaskEvent::END);
+
+  if (start_order != post_order) {
+    return ::testing::AssertionFailure()
+        << "Expected START order (which equals actual POST order): \n"
+        << ::testing::PrintToString(post_order)
+        << "\n Actual START order:\n"
+        << ::testing::PrintToString(start_order);
+  }
+
+  if (end_order != post_order) {
+    return ::testing::AssertionFailure()
+        << "Expected END order (which equals actual POST order): \n"
+        << ::testing::PrintToString(post_order)
+        << "\n Actual END order:\n"
+        << ::testing::PrintToString(end_order);
+  }
+
+  const ::testing::AssertionResult result =
+      CheckEventOrdersForEachTask(events, task_count);
+  if (!result)
+    return result;
+
+  return CheckNoTaskRunsOverlap(events);
+}
+
+}  // namespace internal
+
+}  // namespace base
diff --git a/base/test/sequenced_task_runner_test_template.h b/base/test/sequenced_task_runner_test_template.h
new file mode 100644
index 0000000..a510030
--- /dev/null
+++ b/base/test/sequenced_task_runner_test_template.h
@@ -0,0 +1,350 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// SequencedTaskRunnerTest defines tests that implementations of
+// SequencedTaskRunner should pass in order to be conformant.
+// See task_runner_test_template.h for a description of how to use the
+// constructs in this file; these work the same.
+
+#ifndef BASE_TEST_SEQUENCED_TASK_RUNNER_TEST_TEMPLATE_H_
+#define BASE_TEST_SEQUENCED_TASK_RUNNER_TEST_TEMPLATE_H_
+
+#include <cstddef>
+#include <iosfwd>
+#include <vector>
+
+#include "base/bind.h"
+#include "base/callback.h"
+#include "base/macros.h"
+#include "base/memory/ref_counted.h"
+#include "base/sequenced_task_runner.h"
+#include "base/synchronization/condition_variable.h"
+#include "base/synchronization/lock.h"
+#include "base/time/time.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+
+namespace internal {
+
+struct TaskEvent {
+  enum Type { POST, START, END };
+  TaskEvent(int i, Type type);
+  int i;
+  Type type;
+};
+
+// Utility class used in the tests below.
+class SequencedTaskTracker : public RefCountedThreadSafe<SequencedTaskTracker> {
+ public:
+  SequencedTaskTracker();
+
+  // Posts the non-nestable task |task|, and records its post event.
+  void PostWrappedNonNestableTask(SequencedTaskRunner* task_runner,
+                                  const Closure& task);
+
+  // Posts the nestable task |task|, and records its post event.
+  void PostWrappedNestableTask(SequencedTaskRunner* task_runner,
+                               const Closure& task);
+
+  // Posts the delayed non-nestable task |task|, and records its post event.
+  void PostWrappedDelayedNonNestableTask(SequencedTaskRunner* task_runner,
+                                         const Closure& task,
+                                         TimeDelta delay);
+
+  // Posts |task_count| non-nestable tasks.
+  void PostNonNestableTasks(SequencedTaskRunner* task_runner, int task_count);
+
+  const std::vector<TaskEvent>& GetTaskEvents() const;
+
+  // Returns after the tracker observes a total of |count| task completions.
+  void WaitForCompletedTasks(int count);
+
+ private:
+  friend class RefCountedThreadSafe<SequencedTaskTracker>;
+
+  ~SequencedTaskTracker();
+
+  // A task which runs |task|, recording the start and end events.
+  void RunTask(const Closure& task, int task_i);
+
+  // Records a post event for task |i|. The owner is expected to be holding
+  // |lock_| (unlike |TaskStarted| and |TaskEnded|).
+  void TaskPosted(int i);
+
+  // Records a start event for task |i|.
+  void TaskStarted(int i);
+
+  // Records a end event for task |i|.
+  void TaskEnded(int i);
+
+  // Protects events_, next_post_i_, task_end_count_ and task_end_cv_.
+  Lock lock_;
+
+  // The events as they occurred for each task (protected by lock_).
+  std::vector<TaskEvent> events_;
+
+  // The ordinal to be used for the next task-posting task (protected by
+  // lock_).
+  int next_post_i_;
+
+  // The number of task end events we've received.
+  int task_end_count_;
+  ConditionVariable task_end_cv_;
+
+  DISALLOW_COPY_AND_ASSIGN(SequencedTaskTracker);
+};
+
+void PrintTo(const TaskEvent& event, std::ostream* os);
+
+// Checks the non-nestable task invariants for all tasks in |events|.
+//
+// The invariants are:
+// 1) Events started and ended in the same order that they were posted.
+// 2) Events for an individual tasks occur in the order {POST, START, END},
+//    and there is only one instance of each event type for a task.
+// 3) The only events between a task's START and END events are the POSTs of
+//    other tasks. I.e. tasks were run sequentially, not interleaved.
+::testing::AssertionResult CheckNonNestableInvariants(
+    const std::vector<TaskEvent>& events,
+    int task_count);
+
+}  // namespace internal
+
+template <typename TaskRunnerTestDelegate>
+class SequencedTaskRunnerTest : public testing::Test {
+ protected:
+  SequencedTaskRunnerTest()
+      : task_tracker_(new internal::SequencedTaskTracker()) {}
+
+  const scoped_refptr<internal::SequencedTaskTracker> task_tracker_;
+  TaskRunnerTestDelegate delegate_;
+};
+
+TYPED_TEST_CASE_P(SequencedTaskRunnerTest);
+
+// This test posts N non-nestable tasks in sequence, and expects them to run
+// in FIFO order, with no part of any two tasks' execution
+// overlapping. I.e. that each task starts only after the previously-posted
+// one has finished.
+TYPED_TEST_P(SequencedTaskRunnerTest, SequentialNonNestable) {
+  const int kTaskCount = 1000;
+
+  this->delegate_.StartTaskRunner();
+  const scoped_refptr<SequencedTaskRunner> task_runner =
+      this->delegate_.GetTaskRunner();
+
+  this->task_tracker_->PostWrappedNonNestableTask(
+      task_runner.get(),
+      Bind(&PlatformThread::Sleep, TimeDelta::FromSeconds(1)));
+  for (int i = 1; i < kTaskCount; ++i) {
+    this->task_tracker_->PostWrappedNonNestableTask(task_runner.get(),
+                                                    Closure());
+  }
+
+  this->delegate_.StopTaskRunner();
+
+  EXPECT_TRUE(CheckNonNestableInvariants(this->task_tracker_->GetTaskEvents(),
+                                         kTaskCount));
+}
+
+// This test posts N nestable tasks in sequence. It has the same expectations
+// as SequentialNonNestable because even though the tasks are nestable, they
+// will not be run nestedly in this case.
+TYPED_TEST_P(SequencedTaskRunnerTest, SequentialNestable) {
+  const int kTaskCount = 1000;
+
+  this->delegate_.StartTaskRunner();
+  const scoped_refptr<SequencedTaskRunner> task_runner =
+      this->delegate_.GetTaskRunner();
+
+  this->task_tracker_->PostWrappedNestableTask(
+      task_runner.get(),
+      Bind(&PlatformThread::Sleep, TimeDelta::FromSeconds(1)));
+  for (int i = 1; i < kTaskCount; ++i) {
+    this->task_tracker_->PostWrappedNestableTask(task_runner.get(), Closure());
+  }
+
+  this->delegate_.StopTaskRunner();
+
+  EXPECT_TRUE(CheckNonNestableInvariants(this->task_tracker_->GetTaskEvents(),
+                                         kTaskCount));
+}
+
+// This test posts non-nestable tasks in order of increasing delay, and checks
+// that that the tasks are run in FIFO order and that there is no execution
+// overlap whatsoever between any two tasks.
+TYPED_TEST_P(SequencedTaskRunnerTest, SequentialDelayedNonNestable) {
+  const int kTaskCount = 20;
+  const int kDelayIncrementMs = 50;
+
+  this->delegate_.StartTaskRunner();
+  const scoped_refptr<SequencedTaskRunner> task_runner =
+      this->delegate_.GetTaskRunner();
+
+  for (int i = 0; i < kTaskCount; ++i) {
+    this->task_tracker_->PostWrappedDelayedNonNestableTask(
+        task_runner.get(), Closure(),
+        TimeDelta::FromMilliseconds(kDelayIncrementMs * i));
+  }
+
+  this->task_tracker_->WaitForCompletedTasks(kTaskCount);
+  this->delegate_.StopTaskRunner();
+
+  EXPECT_TRUE(CheckNonNestableInvariants(this->task_tracker_->GetTaskEvents(),
+                                         kTaskCount));
+}
+
+// This test posts a fast, non-nestable task from within each of a number of
+// slow, non-nestable tasks and checks that they all run in the sequence they
+// were posted in and that there is no execution overlap whatsoever.
+TYPED_TEST_P(SequencedTaskRunnerTest, NonNestablePostFromNonNestableTask) {
+  const int kParentCount = 10;
+  const int kChildrenPerParent = 10;
+
+  this->delegate_.StartTaskRunner();
+  const scoped_refptr<SequencedTaskRunner> task_runner =
+      this->delegate_.GetTaskRunner();
+
+  for (int i = 0; i < kParentCount; ++i) {
+    Closure task = Bind(
+        &internal::SequencedTaskTracker::PostNonNestableTasks,
+        this->task_tracker_,
+        RetainedRef(task_runner),
+        kChildrenPerParent);
+    this->task_tracker_->PostWrappedNonNestableTask(task_runner.get(), task);
+  }
+
+  this->delegate_.StopTaskRunner();
+
+  EXPECT_TRUE(CheckNonNestableInvariants(
+      this->task_tracker_->GetTaskEvents(),
+      kParentCount * (kChildrenPerParent + 1)));
+}
+
+// This test posts two tasks with the same delay, and checks that the tasks are
+// run in the order in which they were posted.
+//
+// NOTE: This is actually an approximate test since the API only takes a
+// "delay" parameter, so we are not exactly simulating two tasks that get
+// posted at the exact same time. It would be nice if the API allowed us to
+// specify the desired run time.
+TYPED_TEST_P(SequencedTaskRunnerTest, DelayedTasksSameDelay) {
+  const int kTaskCount = 2;
+  const TimeDelta kDelay = TimeDelta::FromMilliseconds(100);
+
+  this->delegate_.StartTaskRunner();
+  const scoped_refptr<SequencedTaskRunner> task_runner =
+      this->delegate_.GetTaskRunner();
+
+  this->task_tracker_->PostWrappedDelayedNonNestableTask(task_runner.get(),
+                                                         Closure(), kDelay);
+  this->task_tracker_->PostWrappedDelayedNonNestableTask(task_runner.get(),
+                                                         Closure(), kDelay);
+  this->task_tracker_->WaitForCompletedTasks(kTaskCount);
+  this->delegate_.StopTaskRunner();
+
+  EXPECT_TRUE(CheckNonNestableInvariants(this->task_tracker_->GetTaskEvents(),
+                                         kTaskCount));
+}
+
+// This test posts a normal task and a delayed task, and checks that the
+// delayed task runs after the normal task even if the normal task takes
+// a long time to run.
+TYPED_TEST_P(SequencedTaskRunnerTest, DelayedTaskAfterLongTask) {
+  const int kTaskCount = 2;
+
+  this->delegate_.StartTaskRunner();
+  const scoped_refptr<SequencedTaskRunner> task_runner =
+      this->delegate_.GetTaskRunner();
+
+  this->task_tracker_->PostWrappedNonNestableTask(
+      task_runner.get(),
+      base::Bind(&PlatformThread::Sleep, TimeDelta::FromMilliseconds(50)));
+  this->task_tracker_->PostWrappedDelayedNonNestableTask(
+      task_runner.get(), Closure(), TimeDelta::FromMilliseconds(10));
+  this->task_tracker_->WaitForCompletedTasks(kTaskCount);
+  this->delegate_.StopTaskRunner();
+
+  EXPECT_TRUE(CheckNonNestableInvariants(this->task_tracker_->GetTaskEvents(),
+                                         kTaskCount));
+}
+
+// Test that a pile of normal tasks and a delayed task run in the
+// time-to-run order.
+TYPED_TEST_P(SequencedTaskRunnerTest, DelayedTaskAfterManyLongTasks) {
+  const int kTaskCount = 11;
+
+  this->delegate_.StartTaskRunner();
+  const scoped_refptr<SequencedTaskRunner> task_runner =
+      this->delegate_.GetTaskRunner();
+
+  for (int i = 0; i < kTaskCount - 1; i++) {
+    this->task_tracker_->PostWrappedNonNestableTask(
+        task_runner.get(),
+        base::Bind(&PlatformThread::Sleep, TimeDelta::FromMilliseconds(50)));
+  }
+  this->task_tracker_->PostWrappedDelayedNonNestableTask(
+      task_runner.get(), Closure(), TimeDelta::FromMilliseconds(10));
+  this->task_tracker_->WaitForCompletedTasks(kTaskCount);
+  this->delegate_.StopTaskRunner();
+
+  EXPECT_TRUE(CheckNonNestableInvariants(this->task_tracker_->GetTaskEvents(),
+                                         kTaskCount));
+}
+
+
+// TODO(francoisk777@gmail.com) Add a test, similiar to the above, which runs
+// some tasked nestedly (which should be implemented in the test
+// delegate). Also add, to the the test delegate, a predicate which checks
+// whether the implementation supports nested tasks.
+//
+
+// The SequencedTaskRunnerTest test case verifies behaviour that is expected
+// from a sequenced task runner in order to be conformant.
+REGISTER_TYPED_TEST_CASE_P(SequencedTaskRunnerTest,
+                           SequentialNonNestable,
+                           SequentialNestable,
+                           SequentialDelayedNonNestable,
+                           NonNestablePostFromNonNestableTask,
+                           DelayedTasksSameDelay,
+                           DelayedTaskAfterLongTask,
+                           DelayedTaskAfterManyLongTasks);
+
+template <typename TaskRunnerTestDelegate>
+class SequencedTaskRunnerDelayedTest
+    : public SequencedTaskRunnerTest<TaskRunnerTestDelegate> {};
+
+TYPED_TEST_CASE_P(SequencedTaskRunnerDelayedTest);
+
+// This test posts a delayed task, and checks that the task is run later than
+// the specified time.
+TYPED_TEST_P(SequencedTaskRunnerDelayedTest, DelayedTaskBasic) {
+  const int kTaskCount = 1;
+  const TimeDelta kDelay = TimeDelta::FromMilliseconds(100);
+
+  this->delegate_.StartTaskRunner();
+  const scoped_refptr<SequencedTaskRunner> task_runner =
+      this->delegate_.GetTaskRunner();
+
+  Time time_before_run = Time::Now();
+  this->task_tracker_->PostWrappedDelayedNonNestableTask(task_runner.get(),
+                                                         Closure(), kDelay);
+  this->task_tracker_->WaitForCompletedTasks(kTaskCount);
+  this->delegate_.StopTaskRunner();
+  Time time_after_run = Time::Now();
+
+  EXPECT_TRUE(CheckNonNestableInvariants(this->task_tracker_->GetTaskEvents(),
+                                         kTaskCount));
+  EXPECT_LE(kDelay, time_after_run - time_before_run);
+}
+
+// SequencedTaskRunnerDelayedTest tests that the |delay| parameter of
+// is used to actually wait for |delay| ms before executing the task.
+// This is not mandatory for a SequencedTaskRunner to be compliant.
+REGISTER_TYPED_TEST_CASE_P(SequencedTaskRunnerDelayedTest, DelayedTaskBasic);
+
+}  // namespace base
+
+#endif  // BASE_TEST_SEQUENCED_TASK_RUNNER_TEST_TEMPLATE_H_
diff --git a/base/test/simple_test_clock.cc b/base/test/simple_test_clock.cc
new file mode 100644
index 0000000..7486d79
--- /dev/null
+++ b/base/test/simple_test_clock.cc
@@ -0,0 +1,28 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/test/simple_test_clock.h"
+
+namespace base {
+
+SimpleTestClock::SimpleTestClock() = default;
+
+SimpleTestClock::~SimpleTestClock() = default;
+
+Time SimpleTestClock::Now() const {
+  AutoLock lock(lock_);
+  return now_;
+}
+
+void SimpleTestClock::Advance(TimeDelta delta) {
+  AutoLock lock(lock_);
+  now_ += delta;
+}
+
+void SimpleTestClock::SetNow(Time now) {
+  AutoLock lock(lock_);
+  now_ = now;
+}
+
+}  // namespace base
diff --git a/base/test/simple_test_clock.h b/base/test/simple_test_clock.h
new file mode 100644
index 0000000..0cbcf08
--- /dev/null
+++ b/base/test/simple_test_clock.h
@@ -0,0 +1,41 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TEST_SIMPLE_TEST_CLOCK_H_
+#define BASE_TEST_SIMPLE_TEST_CLOCK_H_
+
+#include "base/compiler_specific.h"
+#include "base/synchronization/lock.h"
+#include "base/time/clock.h"
+#include "base/time/time.h"
+
+namespace base {
+
+// SimpleTestClock is a Clock implementation that gives control over
+// the returned Time objects.  All methods may be called from any
+// thread.
+class SimpleTestClock : public Clock {
+ public:
+  // Starts off with a clock set to Time().
+  SimpleTestClock();
+  ~SimpleTestClock() override;
+
+  Time Now() const override;
+
+  // Advances the clock by |delta|.
+  void Advance(TimeDelta delta);
+
+  // Sets the clock to the given time.
+  void SetNow(Time now);
+
+ private:
+  // Protects |now_|.
+  mutable Lock lock_;
+
+  Time now_;
+};
+
+}  // namespace base
+
+#endif  // BASE_TEST_SIMPLE_TEST_CLOCK_H_
diff --git a/base/test/simple_test_tick_clock.cc b/base/test/simple_test_tick_clock.cc
new file mode 100644
index 0000000..7ee3401
--- /dev/null
+++ b/base/test/simple_test_tick_clock.cc
@@ -0,0 +1,31 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/test/simple_test_tick_clock.h"
+
+#include "base/logging.h"
+
+namespace base {
+
+SimpleTestTickClock::SimpleTestTickClock() = default;
+
+SimpleTestTickClock::~SimpleTestTickClock() = default;
+
+TimeTicks SimpleTestTickClock::NowTicks() const {
+  AutoLock lock(lock_);
+  return now_ticks_;
+}
+
+void SimpleTestTickClock::Advance(TimeDelta delta) {
+  AutoLock lock(lock_);
+  DCHECK(delta >= TimeDelta());
+  now_ticks_ += delta;
+}
+
+void SimpleTestTickClock::SetNowTicks(TimeTicks ticks) {
+  AutoLock lock(lock_);
+  now_ticks_ = ticks;
+}
+
+}  // namespace base
diff --git a/base/test/simple_test_tick_clock.h b/base/test/simple_test_tick_clock.h
new file mode 100644
index 0000000..923eba4
--- /dev/null
+++ b/base/test/simple_test_tick_clock.h
@@ -0,0 +1,41 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TEST_SIMPLE_TEST_TICK_CLOCK_H_
+#define BASE_TEST_SIMPLE_TEST_TICK_CLOCK_H_
+
+#include "base/compiler_specific.h"
+#include "base/synchronization/lock.h"
+#include "base/time/tick_clock.h"
+#include "base/time/time.h"
+
+namespace base {
+
+// SimpleTestTickClock is a TickClock implementation that gives
+// control over the returned TimeTicks objects.  All methods may be
+// called from any thread.
+class SimpleTestTickClock : public TickClock {
+ public:
+  // Starts off with a clock set to TimeTicks().
+  SimpleTestTickClock();
+  ~SimpleTestTickClock() override;
+
+  TimeTicks NowTicks() const override;
+
+  // Advances the clock by |delta|, which must not be negative.
+  void Advance(TimeDelta delta);
+
+  // Sets the clock to the given time.
+  void SetNowTicks(TimeTicks ticks);
+
+ private:
+  // Protects |now_ticks_|.
+  mutable Lock lock_;
+
+  TimeTicks now_ticks_;
+};
+
+}  // namespace base
+
+#endif  // BASE_TEST_SIMPLE_TEST_TICK_CLOCK_H_
diff --git a/base/test/task_runner_test_template.cc b/base/test/task_runner_test_template.cc
new file mode 100644
index 0000000..fe70247
--- /dev/null
+++ b/base/test/task_runner_test_template.cc
@@ -0,0 +1,47 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/test/task_runner_test_template.h"
+
+namespace base {
+
+namespace test {
+
+TaskTracker::TaskTracker() : task_runs_(0), task_runs_cv_(&lock_) {}
+
+TaskTracker::~TaskTracker() = default;
+
+Closure TaskTracker::WrapTask(const Closure& task, int i) {
+  return Bind(&TaskTracker::RunTask, this, task, i);
+}
+
+void TaskTracker::RunTask(const Closure& task, int i) {
+  AutoLock lock(lock_);
+  if (!task.is_null()) {
+    task.Run();
+  }
+  ++task_run_counts_[i];
+  ++task_runs_;
+  task_runs_cv_.Signal();
+}
+
+std::map<int, int> TaskTracker::GetTaskRunCounts() const {
+  AutoLock lock(lock_);
+  return task_run_counts_;
+}
+
+void TaskTracker::WaitForCompletedTasks(int count) {
+  AutoLock lock(lock_);
+  while (task_runs_ < count)
+    task_runs_cv_.Wait();
+}
+
+void ExpectRunsTasksInCurrentSequence(bool expected_value,
+                                      TaskRunner* task_runner) {
+  EXPECT_EQ(expected_value, task_runner->RunsTasksInCurrentSequence());
+}
+
+}  // namespace test
+
+}  // namespace base
diff --git a/base/test/task_runner_test_template.h b/base/test/task_runner_test_template.h
new file mode 100644
index 0000000..4670522
--- /dev/null
+++ b/base/test/task_runner_test_template.h
@@ -0,0 +1,230 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file defines tests that implementations of TaskRunner should
+// pass in order to be conformant, as well as test cases for optional behavior.
+// Here's how you use it to test your implementation.
+//
+// Say your class is called MyTaskRunner.  Then you need to define a
+// class called MyTaskRunnerTestDelegate in my_task_runner_unittest.cc
+// like this:
+//
+//   class MyTaskRunnerTestDelegate {
+//    public:
+//     // Tasks posted to the task runner after this and before
+//     // StopTaskRunner() is called is called should run successfully.
+//     void StartTaskRunner() {
+//       ...
+//     }
+//
+//     // Should return the task runner implementation.  Only called
+//     // after StartTaskRunner and before StopTaskRunner.
+//     scoped_refptr<MyTaskRunner> GetTaskRunner() {
+//       ...
+//     }
+//
+//     // Stop the task runner and make sure all tasks posted before
+//     // this is called are run. Caveat: delayed tasks are not run,
+       // they're simply deleted.
+//     void StopTaskRunner() {
+//       ...
+//     }
+//   };
+//
+// The TaskRunnerTest test harness will have a member variable of
+// this delegate type and will call its functions in the various
+// tests.
+//
+// Then you simply #include this file as well as gtest.h and add the
+// following statement to my_task_runner_unittest.cc:
+//
+//   INSTANTIATE_TYPED_TEST_CASE_P(
+//       MyTaskRunner, TaskRunnerTest, MyTaskRunnerTestDelegate);
+//
+// Easy!
+//
+// The optional test harnesses TaskRunnerAffinityTest can be
+// instanciated in the same way, using the same delegate:
+//
+//   INSTANTIATE_TYPED_TEST_CASE_P(
+//       MyTaskRunner, TaskRunnerAffinityTest, MyTaskRunnerTestDelegate);
+
+
+#ifndef BASE_TEST_TASK_RUNNER_TEST_TEMPLATE_H_
+#define BASE_TEST_TASK_RUNNER_TEST_TEMPLATE_H_
+
+#include <cstddef>
+#include <map>
+
+#include "base/bind.h"
+#include "base/callback.h"
+#include "base/location.h"
+#include "base/macros.h"
+#include "base/memory/ref_counted.h"
+#include "base/single_thread_task_runner.h"
+#include "base/synchronization/condition_variable.h"
+#include "base/synchronization/lock.h"
+#include "base/task_runner.h"
+#include "base/threading/thread.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+
+namespace test {
+
+// Utility class that keeps track of how many times particular tasks
+// are run.
+class TaskTracker : public RefCountedThreadSafe<TaskTracker> {
+ public:
+  TaskTracker();
+
+  // Returns a closure that runs the given task and increments the run
+  // count of |i| by one.  |task| may be null.  It is guaranteed that
+  // only one task wrapped by a given tracker will be run at a time.
+  Closure WrapTask(const Closure& task, int i);
+
+  std::map<int, int> GetTaskRunCounts() const;
+
+  // Returns after the tracker observes a total of |count| task completions.
+  void WaitForCompletedTasks(int count);
+
+ private:
+  friend class RefCountedThreadSafe<TaskTracker>;
+
+  ~TaskTracker();
+
+  void RunTask(const Closure& task, int i);
+
+  mutable Lock lock_;
+  std::map<int, int> task_run_counts_;
+  int task_runs_;
+  ConditionVariable task_runs_cv_;
+
+  DISALLOW_COPY_AND_ASSIGN(TaskTracker);
+};
+
+}  // namespace test
+
+template <typename TaskRunnerTestDelegate>
+class TaskRunnerTest : public testing::Test {
+ protected:
+  TaskRunnerTest() : task_tracker_(new test::TaskTracker()) {}
+
+  const scoped_refptr<test::TaskTracker> task_tracker_;
+  TaskRunnerTestDelegate delegate_;
+};
+
+TYPED_TEST_CASE_P(TaskRunnerTest);
+
+// We can't really test much, since TaskRunner provides very few
+// guarantees.
+
+// Post a bunch of tasks to the task runner.  They should all
+// complete.
+TYPED_TEST_P(TaskRunnerTest, Basic) {
+  std::map<int, int> expected_task_run_counts;
+
+  this->delegate_.StartTaskRunner();
+  scoped_refptr<TaskRunner> task_runner = this->delegate_.GetTaskRunner();
+  // Post each ith task i+1 times.
+  for (int i = 0; i < 20; ++i) {
+    const Closure& ith_task = this->task_tracker_->WrapTask(Closure(), i);
+    for (int j = 0; j < i + 1; ++j) {
+      task_runner->PostTask(FROM_HERE, ith_task);
+      ++expected_task_run_counts[i];
+    }
+  }
+  this->delegate_.StopTaskRunner();
+
+  EXPECT_EQ(expected_task_run_counts,
+            this->task_tracker_->GetTaskRunCounts());
+}
+
+// Post a bunch of delayed tasks to the task runner.  They should all
+// complete.
+TYPED_TEST_P(TaskRunnerTest, Delayed) {
+  std::map<int, int> expected_task_run_counts;
+  int expected_total_tasks = 0;
+
+  this->delegate_.StartTaskRunner();
+  scoped_refptr<TaskRunner> task_runner = this->delegate_.GetTaskRunner();
+  // Post each ith task i+1 times with delays from 0-i.
+  for (int i = 0; i < 20; ++i) {
+    const Closure& ith_task = this->task_tracker_->WrapTask(Closure(), i);
+    for (int j = 0; j < i + 1; ++j) {
+      task_runner->PostDelayedTask(
+          FROM_HERE, ith_task, base::TimeDelta::FromMilliseconds(j));
+      ++expected_task_run_counts[i];
+      ++expected_total_tasks;
+    }
+  }
+  this->task_tracker_->WaitForCompletedTasks(expected_total_tasks);
+  this->delegate_.StopTaskRunner();
+
+  EXPECT_EQ(expected_task_run_counts,
+            this->task_tracker_->GetTaskRunCounts());
+}
+
+// The TaskRunnerTest test case verifies behaviour that is expected from a
+// task runner in order to be conformant.
+REGISTER_TYPED_TEST_CASE_P(TaskRunnerTest, Basic, Delayed);
+
+namespace test {
+
+// Calls RunsTasksInCurrentSequence() on |task_runner| and expects it to
+// equal |expected_value|.
+void ExpectRunsTasksInCurrentSequence(bool expected_value,
+                                      TaskRunner* task_runner);
+
+}  // namespace test
+
+template <typename TaskRunnerTestDelegate>
+class TaskRunnerAffinityTest : public TaskRunnerTest<TaskRunnerTestDelegate> {};
+
+TYPED_TEST_CASE_P(TaskRunnerAffinityTest);
+
+// Post a bunch of tasks to the task runner as well as to a separate
+// thread, each checking the value of RunsTasksInCurrentSequence(),
+// which should return true for the tasks posted on the task runner
+// and false for the tasks posted on the separate thread.
+TYPED_TEST_P(TaskRunnerAffinityTest, RunsTasksInCurrentSequence) {
+  std::map<int, int> expected_task_run_counts;
+
+  Thread thread("Non-task-runner thread");
+  ASSERT_TRUE(thread.Start());
+  this->delegate_.StartTaskRunner();
+
+  scoped_refptr<TaskRunner> task_runner = this->delegate_.GetTaskRunner();
+  // Post each ith task i+1 times on the task runner and i+1 times on
+  // the non-task-runner thread.
+  for (int i = 0; i < 20; ++i) {
+    const Closure& ith_task_runner_task = this->task_tracker_->WrapTask(
+        Bind(&test::ExpectRunsTasksInCurrentSequence, true,
+             base::RetainedRef(task_runner)),
+        i);
+    const Closure& ith_non_task_runner_task = this->task_tracker_->WrapTask(
+        Bind(&test::ExpectRunsTasksInCurrentSequence, false,
+             base::RetainedRef(task_runner)),
+        i);
+    for (int j = 0; j < i + 1; ++j) {
+      task_runner->PostTask(FROM_HERE, ith_task_runner_task);
+      thread.task_runner()->PostTask(FROM_HERE, ith_non_task_runner_task);
+      expected_task_run_counts[i] += 2;
+    }
+  }
+
+  this->delegate_.StopTaskRunner();
+  thread.Stop();
+
+  EXPECT_EQ(expected_task_run_counts,
+            this->task_tracker_->GetTaskRunCounts());
+}
+
+// TaskRunnerAffinityTest tests that the TaskRunner implementation
+// can determine if tasks will never be run on a specific thread.
+REGISTER_TYPED_TEST_CASE_P(TaskRunnerAffinityTest, RunsTasksInCurrentSequence);
+
+}  // namespace base
+
+#endif  // BASE_TEST_TASK_RUNNER_TEST_TEMPLATE_H_
diff --git a/base/test/test_child_process.cc b/base/test/test_child_process.cc
new file mode 100644
index 0000000..ce15856
--- /dev/null
+++ b/base/test/test_child_process.cc
@@ -0,0 +1,43 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+// Simple testing command, used to exercise child process launcher calls.
+//
+// Usage:
+//        echo_test_helper [-x exit_code] arg0 arg1 arg2...
+//        Prints arg0..n to stdout with space delimiters between args,
+//        returning "exit_code" if -x is specified.
+//
+//        echo_test_helper -e env_var
+//        Prints the environmental variable |env_var| to stdout.
+int main(int argc, char** argv) {
+  if (strcmp(argv[1], "-e") == 0) {
+    if (argc != 3) {
+      return 1;
+    }
+
+    const char* env = getenv(argv[2]);
+    if (env != NULL) {
+      printf("%s", env);
+    }
+  } else {
+    int return_code = 0;
+    int start_idx = 1;
+
+    if (strcmp(argv[1], "-x") == 0) {
+      return_code = atoi(argv[2]);
+      start_idx = 3;
+    }
+
+    for (int i = start_idx; i < argc; ++i) {
+      printf((i < argc - 1 ? "%s " : "%s"), argv[i]);
+    }
+
+    return return_code;
+  }
+}
diff --git a/base/test/test_discardable_memory_allocator.cc b/base/test/test_discardable_memory_allocator.cc
new file mode 100644
index 0000000..a9bd097
--- /dev/null
+++ b/base/test/test_discardable_memory_allocator.cc
@@ -0,0 +1,61 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/test/test_discardable_memory_allocator.h"
+
+#include <cstdint>
+#include <cstring>
+
+#include "base/logging.h"
+#include "base/memory/discardable_memory.h"
+#include "base/memory/ptr_util.h"
+
+namespace base {
+namespace {
+
+class DiscardableMemoryImpl : public DiscardableMemory {
+ public:
+  explicit DiscardableMemoryImpl(size_t size)
+      : data_(new uint8_t[size]), size_(size) {}
+
+  // Overridden from DiscardableMemory:
+  bool Lock() override {
+    DCHECK(!is_locked_);
+    is_locked_ = true;
+    return false;
+  }
+
+  void Unlock() override {
+    DCHECK(is_locked_);
+    is_locked_ = false;
+    // Force eviction to catch clients not correctly checking the return value
+    // of Lock().
+    memset(data_.get(), 0, size_);
+  }
+
+  void* data() const override {
+    DCHECK(is_locked_);
+    return data_.get();
+  }
+
+  trace_event::MemoryAllocatorDump* CreateMemoryAllocatorDump(
+      const char* name,
+      trace_event::ProcessMemoryDump* pmd) const override {
+    return nullptr;
+  }
+
+ private:
+  bool is_locked_ = true;
+  std::unique_ptr<uint8_t[]> data_;
+  size_t size_;
+};
+
+}  // namespace
+
+std::unique_ptr<DiscardableMemory>
+TestDiscardableMemoryAllocator::AllocateLockedDiscardableMemory(size_t size) {
+  return std::make_unique<DiscardableMemoryImpl>(size);
+}
+
+}  // namespace base
diff --git a/base/test/test_discardable_memory_allocator.h b/base/test/test_discardable_memory_allocator.h
new file mode 100644
index 0000000..87436e3
--- /dev/null
+++ b/base/test/test_discardable_memory_allocator.h
@@ -0,0 +1,32 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TEST_TEST_DISCARDABLE_MEMORY_ALLOCATOR_H_
+#define BASE_TEST_TEST_DISCARDABLE_MEMORY_ALLOCATOR_H_
+
+#include <stddef.h>
+
+#include "base/macros.h"
+#include "base/memory/discardable_memory_allocator.h"
+
+namespace base {
+
+// TestDiscardableMemoryAllocator is a simple DiscardableMemoryAllocator
+// implementation that can be used for testing. It allocates one-shot
+// DiscardableMemory instances backed by heap memory.
+class TestDiscardableMemoryAllocator : public DiscardableMemoryAllocator {
+ public:
+  constexpr TestDiscardableMemoryAllocator() = default;
+
+  // Overridden from DiscardableMemoryAllocator:
+  std::unique_ptr<DiscardableMemory> AllocateLockedDiscardableMemory(
+      size_t size) override;
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(TestDiscardableMemoryAllocator);
+};
+
+}  // namespace base
+
+#endif  // BASE_TEST_TEST_DISCARDABLE_MEMORY_ALLOCATOR_H_
diff --git a/base/test/test_file_util.cc b/base/test/test_file_util.cc
new file mode 100644
index 0000000..8dafc58
--- /dev/null
+++ b/base/test/test_file_util.cc
@@ -0,0 +1,23 @@
+// Copyright (c) 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/test/test_file_util.h"
+
+#include "base/test/test_timeouts.h"
+#include "base/threading/platform_thread.h"
+
+namespace base {
+
+bool EvictFileFromSystemCacheWithRetry(const FilePath& path) {
+  const int kCycles = 10;
+  const TimeDelta kDelay = TestTimeouts::action_timeout() / kCycles;
+  for (int i = 0; i < kCycles; i++) {
+    if (EvictFileFromSystemCache(path))
+      return true;
+    PlatformThread::Sleep(kDelay);
+  }
+  return false;
+}
+
+}  // namespace base
diff --git a/base/test/test_file_util.h b/base/test/test_file_util.h
new file mode 100644
index 0000000..d9172d7
--- /dev/null
+++ b/base/test/test_file_util.h
@@ -0,0 +1,82 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TEST_TEST_FILE_UTIL_H_
+#define BASE_TEST_TEST_FILE_UTIL_H_
+
+// File utility functions used only by tests.
+
+#include <stddef.h>
+
+#include <string>
+
+#include "base/compiler_specific.h"
+#include "base/files/file_path.h"
+#include "base/macros.h"
+#include "build/build_config.h"
+
+#if defined(OS_ANDROID)
+#include <jni.h>
+#endif
+
+#if defined(OS_WIN)
+#include <windows.h>
+#endif
+
+namespace base {
+
+class FilePath;
+
+// Clear a specific file from the system cache like EvictFileFromSystemCache,
+// but on failure it will sleep and retry. On the Windows buildbots, eviction
+// can fail if the file is marked in use, and this will throw off timings that
+// rely on uncached files.
+bool EvictFileFromSystemCacheWithRetry(const FilePath& file);
+
+// Wrapper over base::Delete. On Windows repeatedly invokes Delete in case
+// of failure to workaround Windows file locking semantics. Returns true on
+// success.
+bool DieFileDie(const FilePath& file, bool recurse);
+
+// Clear a specific file from the system cache. After this call, trying
+// to access this file will result in a cold load from the hard drive.
+bool EvictFileFromSystemCache(const FilePath& file);
+
+#if defined(OS_WIN)
+// Deny |permission| on the file |path| for the current user. |permission| is an
+// ACCESS_MASK structure which is defined in
+// https://msdn.microsoft.com/en-us/library/windows/desktop/aa374892.aspx
+// Refer to https://msdn.microsoft.com/en-us/library/aa822867.aspx for a list of
+// possible values.
+bool DenyFilePermission(const FilePath& path, DWORD permission);
+#endif  // defined(OS_WIN)
+
+// For testing, make the file unreadable or unwritable.
+// In POSIX, this does not apply to the root user.
+bool MakeFileUnreadable(const FilePath& path) WARN_UNUSED_RESULT;
+bool MakeFileUnwritable(const FilePath& path) WARN_UNUSED_RESULT;
+
+// Saves the current permissions for a path, and restores it on destruction.
+class FilePermissionRestorer {
+ public:
+  explicit FilePermissionRestorer(const FilePath& path);
+  ~FilePermissionRestorer();
+
+ private:
+  const FilePath path_;
+  void* info_;  // The opaque stored permission information.
+  size_t length_;  // The length of the stored permission information.
+
+  DISALLOW_COPY_AND_ASSIGN(FilePermissionRestorer);
+};
+
+#if defined(OS_ANDROID)
+// Insert an image file into the MediaStore, and retrieve the content URI for
+// testing purpose.
+FilePath InsertImageIntoMediaStore(const FilePath& path);
+#endif  // defined(OS_ANDROID)
+
+}  // namespace base
+
+#endif  // BASE_TEST_TEST_FILE_UTIL_H_
diff --git a/base/test/test_file_util_android.cc b/base/test/test_file_util_android.cc
new file mode 100644
index 0000000..6e93e24
--- /dev/null
+++ b/base/test/test_file_util_android.cc
@@ -0,0 +1,26 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/test/test_file_util.h"
+
+#include "base/android/jni_android.h"
+#include "base/android/jni_string.h"
+#include "base/files/file_path.h"
+#include "jni/ContentUriTestUtils_jni.h"
+
+using base::android::ScopedJavaLocalRef;
+
+namespace base {
+
+FilePath InsertImageIntoMediaStore(const FilePath& path) {
+  JNIEnv* env = base::android::AttachCurrentThread();
+  ScopedJavaLocalRef<jstring> j_path =
+      base::android::ConvertUTF8ToJavaString(env, path.value());
+  ScopedJavaLocalRef<jstring> j_uri =
+      Java_ContentUriTestUtils_insertImageIntoMediaStore(env, j_path);
+  std::string uri = base::android::ConvertJavaStringToUTF8(j_uri);
+  return FilePath(uri);
+}
+
+}  // namespace base
diff --git a/base/test/test_file_util_linux.cc b/base/test/test_file_util_linux.cc
new file mode 100644
index 0000000..cf8b056
--- /dev/null
+++ b/base/test/test_file_util_linux.cc
@@ -0,0 +1,59 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/test/test_file_util.h"
+
+#include <fcntl.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <unistd.h>
+
+#if defined(OS_ANDROID)
+#include <asm/unistd.h>
+#include <errno.h>
+#include <linux/fadvise.h>
+#include <sys/syscall.h>
+#endif
+
+#include "base/files/file_path.h"
+#include "base/files/scoped_file.h"
+
+namespace base {
+
+// Inconveniently, the NDK doesn't provide for posix_fadvise
+// until native API level = 21, which we don't use yet, so provide a wrapper, at
+// least on ARM32
+#if defined(OS_ANDROID) && __ANDROID_API__ < 21
+
+namespace {
+int posix_fadvise(int fd, off_t offset, off_t len, int advice) {
+#if defined(ARCH_CPU_ARMEL)
+  // Note that the syscall argument order on ARM is different from the C
+  // function; this is helpfully documented in the Linux posix_fadvise manpage.
+  return syscall(__NR_arm_fadvise64_64, fd, advice,
+                 0,  // Upper 32-bits for offset
+                 offset,
+                 0,  // Upper 32-bits for length
+                 len);
+#endif
+  NOTIMPLEMENTED();
+  return ENOSYS;
+}
+
+}  // namespace
+
+#endif  // OS_ANDROID
+
+bool EvictFileFromSystemCache(const FilePath& file) {
+  ScopedFD fd(open(file.value().c_str(), O_RDONLY));
+  if (!fd.is_valid())
+    return false;
+  if (fdatasync(fd.get()) != 0)
+    return false;
+  if (posix_fadvise(fd.get(), 0, 0, POSIX_FADV_DONTNEED) != 0)
+    return false;
+  return true;
+}
+
+}  // namespace base
diff --git a/base/test/test_file_util_mac.cc b/base/test/test_file_util_mac.cc
new file mode 100644
index 0000000..174a31d
--- /dev/null
+++ b/base/test/test_file_util_mac.cc
@@ -0,0 +1,52 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/test/test_file_util.h"
+
+#include <sys/mman.h>
+#include <errno.h>
+#include <stdint.h>
+
+#include "base/files/file_util.h"
+#include "base/files/memory_mapped_file.h"
+#include "base/logging.h"
+
+namespace base {
+
+bool EvictFileFromSystemCache(const FilePath& file) {
+  // There aren't any really direct ways to purge a file from the UBC.  From
+  // talking with Amit Singh, the safest is to mmap the file with MAP_FILE (the
+  // default) + MAP_SHARED, then do an msync to invalidate the memory.  The next
+  // open should then have to load the file from disk.
+
+  int64_t length;
+  if (!GetFileSize(file, &length)) {
+    DLOG(ERROR) << "failed to get size of " << file.value();
+    return false;
+  }
+
+  // When a file is empty, we do not need to evict it from cache.
+  // In fact, an attempt to map it to memory will result in error.
+  if (length == 0) {
+    DLOG(WARNING) << "file size is zero, will not attempt to map to memory";
+    return true;
+  }
+
+  MemoryMappedFile mapped_file;
+  if (!mapped_file.Initialize(file)) {
+    DLOG(WARNING) << "failed to memory map " << file.value();
+    return false;
+  }
+
+  if (msync(const_cast<uint8_t*>(mapped_file.data()), mapped_file.length(),
+            MS_INVALIDATE) != 0) {
+    DLOG(WARNING) << "failed to invalidate memory map of " << file.value()
+                  << ", errno: " << errno;
+    return false;
+  }
+
+  return true;
+}
+
+}  // namespace base
diff --git a/base/test/test_file_util_posix.cc b/base/test/test_file_util_posix.cc
new file mode 100644
index 0000000..87290fb
--- /dev/null
+++ b/base/test/test_file_util_posix.cc
@@ -0,0 +1,110 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/test/test_file_util.h"
+
+#include <errno.h>
+#include <fcntl.h>
+#include <stddef.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+
+#include <string>
+
+#include "base/files/file_path.h"
+#include "base/files/file_util.h"
+#include "base/logging.h"
+#include "base/strings/string_util.h"
+#include "base/strings/utf_string_conversions.h"
+#include "build/build_config.h"
+
+namespace base {
+
+namespace {
+
+// Deny |permission| on the file |path|.
+bool DenyFilePermission(const FilePath& path, mode_t permission) {
+  struct stat stat_buf;
+  if (stat(path.value().c_str(), &stat_buf) != 0)
+    return false;
+  stat_buf.st_mode &= ~permission;
+
+  int rv = HANDLE_EINTR(chmod(path.value().c_str(), stat_buf.st_mode));
+  return rv == 0;
+}
+
+// Gets a blob indicating the permission information for |path|.
+// |length| is the length of the blob.  Zero on failure.
+// Returns the blob pointer, or NULL on failure.
+void* GetPermissionInfo(const FilePath& path, size_t* length) {
+  DCHECK(length);
+  *length = 0;
+
+  struct stat stat_buf;
+  if (stat(path.value().c_str(), &stat_buf) != 0)
+    return nullptr;
+
+  *length = sizeof(mode_t);
+  mode_t* mode = new mode_t;
+  *mode = stat_buf.st_mode & ~S_IFMT;  // Filter out file/path kind.
+
+  return mode;
+}
+
+// Restores the permission information for |path|, given the blob retrieved
+// using |GetPermissionInfo()|.
+// |info| is the pointer to the blob.
+// |length| is the length of the blob.
+// Either |info| or |length| may be NULL/0, in which case nothing happens.
+bool RestorePermissionInfo(const FilePath& path, void* info, size_t length) {
+  if (!info || (length == 0))
+    return false;
+
+  DCHECK_EQ(sizeof(mode_t), length);
+  mode_t* mode = reinterpret_cast<mode_t*>(info);
+
+  int rv = HANDLE_EINTR(chmod(path.value().c_str(), *mode));
+
+  delete mode;
+
+  return rv == 0;
+}
+
+}  // namespace
+
+bool DieFileDie(const FilePath& file, bool recurse) {
+  // There is no need to workaround Windows problems on POSIX.
+  // Just pass-through.
+  return DeleteFile(file, recurse);
+}
+
+#if !defined(OS_LINUX) && !defined(OS_MACOSX) && !defined(OS_ANDROID)
+bool EvictFileFromSystemCache(const FilePath& file) {
+  // There doesn't seem to be a POSIX way to cool the disk cache.
+  NOTIMPLEMENTED();
+  return false;
+}
+#endif
+
+bool MakeFileUnreadable(const FilePath& path) {
+  return DenyFilePermission(path, S_IRUSR | S_IRGRP | S_IROTH);
+}
+
+bool MakeFileUnwritable(const FilePath& path) {
+  return DenyFilePermission(path, S_IWUSR | S_IWGRP | S_IWOTH);
+}
+
+FilePermissionRestorer::FilePermissionRestorer(const FilePath& path)
+    : path_(path), info_(nullptr), length_(0) {
+  info_ = GetPermissionInfo(path_, &length_);
+  DCHECK(info_ != nullptr);
+  DCHECK_NE(0u, length_);
+}
+
+FilePermissionRestorer::~FilePermissionRestorer() {
+  if (!RestorePermissionInfo(path_, info_, length_))
+    NOTREACHED();
+}
+
+}  // namespace base
diff --git a/base/test/test_file_util_win.cc b/base/test/test_file_util_win.cc
new file mode 100644
index 0000000..da596bb
--- /dev/null
+++ b/base/test/test_file_util_win.cc
@@ -0,0 +1,182 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/test/test_file_util.h"
+
+#include <aclapi.h>
+#include <shlwapi.h>
+#include <stddef.h>
+#include <wchar.h>
+#include <windows.h>
+
+#include <memory>
+#include <vector>
+
+#include "base/files/file_path.h"
+#include "base/files/file_util.h"
+#include "base/logging.h"
+#include "base/memory/ptr_util.h"
+#include "base/strings/string_split.h"
+#include "base/threading/platform_thread.h"
+#include "base/win/scoped_handle.h"
+
+namespace base {
+
+namespace {
+
+struct PermissionInfo {
+  PSECURITY_DESCRIPTOR security_descriptor;
+  ACL dacl;
+};
+
+// Gets a blob indicating the permission information for |path|.
+// |length| is the length of the blob.  Zero on failure.
+// Returns the blob pointer, or NULL on failure.
+void* GetPermissionInfo(const FilePath& path, size_t* length) {
+  DCHECK(length != NULL);
+  *length = 0;
+  PACL dacl = NULL;
+  PSECURITY_DESCRIPTOR security_descriptor;
+  if (GetNamedSecurityInfo(const_cast<wchar_t*>(path.value().c_str()),
+                           SE_FILE_OBJECT,
+                           DACL_SECURITY_INFORMATION, NULL, NULL, &dacl,
+                           NULL, &security_descriptor) != ERROR_SUCCESS) {
+    return NULL;
+  }
+  DCHECK(dacl != NULL);
+
+  *length = sizeof(PSECURITY_DESCRIPTOR) + dacl->AclSize;
+  PermissionInfo* info = reinterpret_cast<PermissionInfo*>(new char[*length]);
+  info->security_descriptor = security_descriptor;
+  memcpy(&info->dacl, dacl, dacl->AclSize);
+
+  return info;
+}
+
+// Restores the permission information for |path|, given the blob retrieved
+// using |GetPermissionInfo()|.
+// |info| is the pointer to the blob.
+// |length| is the length of the blob.
+// Either |info| or |length| may be NULL/0, in which case nothing happens.
+bool RestorePermissionInfo(const FilePath& path, void* info, size_t length) {
+  if (!info || !length)
+    return false;
+
+  PermissionInfo* perm = reinterpret_cast<PermissionInfo*>(info);
+
+  DWORD rc = SetNamedSecurityInfo(const_cast<wchar_t*>(path.value().c_str()),
+                                  SE_FILE_OBJECT, DACL_SECURITY_INFORMATION,
+                                  NULL, NULL, &perm->dacl, NULL);
+  LocalFree(perm->security_descriptor);
+
+  char* char_array = reinterpret_cast<char*>(info);
+  delete [] char_array;
+
+  return rc == ERROR_SUCCESS;
+}
+
+std::unique_ptr<wchar_t[]> ToCStr(const std::basic_string<wchar_t>& str) {
+  size_t size = str.size() + 1;
+  std::unique_ptr<wchar_t[]> ptr = std::make_unique<wchar_t[]>(size);
+  wcsncpy(ptr.get(), str.c_str(), size);
+  return ptr;
+}
+
+}  // namespace
+
+bool DieFileDie(const FilePath& file, bool recurse) {
+  // It turns out that to not induce flakiness a long timeout is needed.
+  const int kIterations = 25;
+  const TimeDelta kTimeout = TimeDelta::FromSeconds(10) / kIterations;
+
+  if (!PathExists(file))
+    return true;
+
+  // Sometimes Delete fails, so try a few more times. Divide the timeout
+  // into short chunks, so that if a try succeeds, we won't delay the test
+  // for too long.
+  for (int i = 0; i < kIterations; ++i) {
+    if (DeleteFile(file, recurse))
+      return true;
+    PlatformThread::Sleep(kTimeout);
+  }
+  return false;
+}
+
+bool EvictFileFromSystemCache(const FilePath& file) {
+  base::win::ScopedHandle file_handle(
+      CreateFile(file.value().c_str(), GENERIC_READ | GENERIC_WRITE, 0, NULL,
+                 OPEN_EXISTING, FILE_FLAG_NO_BUFFERING, NULL));
+  if (!file_handle.IsValid())
+    return false;
+
+  // Re-write the file time information to trigger cache eviction for the file.
+  // This function previously overwrote the entire file without buffering, but
+  // local experimentation validates this simplified and *much* faster approach:
+  // [1] Sysinternals RamMap no longer lists these files as cached afterwards.
+  // [2] Telemetry performance test startup.cold.blank_page reports sane values.
+  BY_HANDLE_FILE_INFORMATION bhi = {0};
+  CHECK(::GetFileInformationByHandle(file_handle.Get(), &bhi));
+  CHECK(::SetFileTime(file_handle.Get(), &bhi.ftCreationTime,
+                      &bhi.ftLastAccessTime, &bhi.ftLastWriteTime));
+  return true;
+}
+
+// Deny |permission| on the file |path|, for the current user.
+bool DenyFilePermission(const FilePath& path, DWORD permission) {
+  PACL old_dacl;
+  PSECURITY_DESCRIPTOR security_descriptor;
+
+  std::unique_ptr<TCHAR[]> path_ptr = ToCStr(path.value());
+  if (GetNamedSecurityInfo(path_ptr.get(), SE_FILE_OBJECT,
+                           DACL_SECURITY_INFORMATION, nullptr, nullptr,
+                           &old_dacl, nullptr,
+                           &security_descriptor) != ERROR_SUCCESS) {
+    return false;
+  }
+
+  std::unique_ptr<TCHAR[]> current_user = ToCStr(std::wstring(L"CURRENT_USER"));
+  EXPLICIT_ACCESS new_access = {
+      permission,
+      DENY_ACCESS,
+      0,
+      {nullptr, NO_MULTIPLE_TRUSTEE, TRUSTEE_IS_NAME, TRUSTEE_IS_USER,
+       current_user.get()}};
+
+  PACL new_dacl;
+  if (SetEntriesInAcl(1, &new_access, old_dacl, &new_dacl) != ERROR_SUCCESS) {
+    LocalFree(security_descriptor);
+    return false;
+  }
+
+  DWORD rc = SetNamedSecurityInfo(path_ptr.get(), SE_FILE_OBJECT,
+                                  DACL_SECURITY_INFORMATION, nullptr, nullptr,
+                                  new_dacl, nullptr);
+  LocalFree(security_descriptor);
+  LocalFree(new_dacl);
+
+  return rc == ERROR_SUCCESS;
+}
+
+bool MakeFileUnreadable(const FilePath& path) {
+  return DenyFilePermission(path, GENERIC_READ);
+}
+
+bool MakeFileUnwritable(const FilePath& path) {
+  return DenyFilePermission(path, GENERIC_WRITE);
+}
+
+FilePermissionRestorer::FilePermissionRestorer(const FilePath& path)
+    : path_(path), info_(NULL), length_(0) {
+  info_ = GetPermissionInfo(path_, &length_);
+  DCHECK(info_ != NULL);
+  DCHECK_NE(0u, length_);
+}
+
+FilePermissionRestorer::~FilePermissionRestorer() {
+  if (!RestorePermissionInfo(path_, info_, length_))
+    NOTREACHED();
+}
+
+}  // namespace base
diff --git a/base/test/test_io_thread.cc b/base/test/test_io_thread.cc
new file mode 100644
index 0000000..1b20658
--- /dev/null
+++ b/base/test/test_io_thread.cc
@@ -0,0 +1,45 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/test/test_io_thread.h"
+
+#include "base/logging.h"
+#include "base/message_loop/message_loop.h"
+
+namespace base {
+
+TestIOThread::TestIOThread(Mode mode)
+    : io_thread_("test_io_thread"), io_thread_started_(false) {
+  switch (mode) {
+    case kAutoStart:
+      Start();
+      return;
+    case kManualStart:
+      return;
+  }
+  CHECK(false) << "Invalid mode";
+}
+
+TestIOThread::~TestIOThread() {
+  Stop();
+}
+
+void TestIOThread::Start() {
+  CHECK(!io_thread_started_);
+  io_thread_started_ = true;
+  CHECK(io_thread_.StartWithOptions(
+      base::Thread::Options(base::MessageLoop::TYPE_IO, 0)));
+}
+
+void TestIOThread::Stop() {
+  // Note: It's okay to call |Stop()| even if the thread isn't running.
+  io_thread_.Stop();
+  io_thread_started_ = false;
+}
+
+void TestIOThread::PostTask(const Location& from_here, base::OnceClosure task) {
+  task_runner()->PostTask(from_here, std::move(task));
+}
+
+}  // namespace base
diff --git a/base/test/test_io_thread.h b/base/test/test_io_thread.h
new file mode 100644
index 0000000..a55a063
--- /dev/null
+++ b/base/test/test_io_thread.h
@@ -0,0 +1,60 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TEST_TEST_IO_THREAD_H_
+#define BASE_TEST_TEST_IO_THREAD_H_
+
+#include "base/callback_forward.h"
+#include "base/compiler_specific.h"
+#include "base/macros.h"
+#include "base/memory/ref_counted.h"
+#include "base/task_runner.h"
+#include "base/threading/thread.h"
+#include "base/time/time.h"
+
+namespace base {
+
+// Create and run an IO thread with a MessageLoop, and
+// making the MessageLoop accessible from its client.
+// It also provides some ideomatic API like PostTaskAndWait().
+//
+// This API is not thread-safe:
+//   - Start()/Stop() should only be called from the main (creation) thread.
+//   - PostTask()/message_loop()/task_runner() are also safe to call from the
+//     underlying thread itself (to post tasks from other threads: get the
+//     task_runner() from the main thread first, it is then safe to pass _it_
+//     around).
+class TestIOThread {
+ public:
+  enum Mode { kAutoStart, kManualStart };
+  explicit TestIOThread(Mode mode);
+  // Stops the I/O thread if necessary.
+  ~TestIOThread();
+
+  // After Stop(), Start() may be called again to start a new I/O thread.
+  // Stop() may be called even when the I/O thread is not started.
+  void Start();
+  void Stop();
+
+  // Post |task| to the IO thread.
+  void PostTask(const Location& from_here, base::OnceClosure task);
+
+  base::MessageLoopForIO* message_loop() {
+    return static_cast<base::MessageLoopForIO*>(io_thread_.message_loop());
+  }
+
+  scoped_refptr<SingleThreadTaskRunner> task_runner() {
+    return message_loop()->task_runner();
+  }
+
+ private:
+  base::Thread io_thread_;
+  bool io_thread_started_;
+
+  DISALLOW_COPY_AND_ASSIGN(TestIOThread);
+};
+
+}  // namespace base
+
+#endif  // BASE_TEST_TEST_IO_THREAD_H_
diff --git a/base/test/test_listener_ios.h b/base/test/test_listener_ios.h
new file mode 100644
index 0000000..c312250
--- /dev/null
+++ b/base/test/test_listener_ios.h
@@ -0,0 +1,17 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TEST_TEST_LISTENER_IOS_H_
+#define BASE_TEST_TEST_LISTENER_IOS_H_
+
+namespace base {
+namespace test_listener_ios {
+
+// Register an IOSRunLoopListener.
+void RegisterTestEndListener();
+
+}  // namespace test_listener_ios
+}  // namespace base
+
+#endif  // BASE_TEST_TEST_LISTENER_IOS_H_
diff --git a/base/test/test_listener_ios.mm b/base/test/test_listener_ios.mm
new file mode 100644
index 0000000..12cf5bb
--- /dev/null
+++ b/base/test/test_listener_ios.mm
@@ -0,0 +1,45 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/test/test_listener_ios.h"
+
+#import <Foundation/Foundation.h>
+
+#include "base/mac/scoped_nsautorelease_pool.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+// The iOS watchdog timer will kill an app that doesn't spin the main event
+// loop often enough. This uses a Gtest TestEventListener to spin the current
+// loop after each test finishes. However, if any individual test takes too
+// long, it is still possible that the app will get killed.
+
+namespace {
+
+class IOSRunLoopListener : public testing::EmptyTestEventListener {
+ public:
+  virtual void OnTestEnd(const testing::TestInfo& test_info);
+};
+
+void IOSRunLoopListener::OnTestEnd(const testing::TestInfo& test_info) {
+  base::mac::ScopedNSAutoreleasePool scoped_pool;
+
+  // At the end of the test, spin the default loop for a moment.
+  NSDate* stop_date = [NSDate dateWithTimeIntervalSinceNow:0.001];
+  [[NSRunLoop currentRunLoop] runUntilDate:stop_date];
+}
+
+}  // namespace
+
+
+namespace base {
+namespace test_listener_ios {
+
+void RegisterTestEndListener() {
+  testing::TestEventListeners& listeners =
+      testing::UnitTest::GetInstance()->listeners();
+  listeners.Append(new IOSRunLoopListener);
+}
+
+}  // namespace test_listener_ios
+}  // namespace base
diff --git a/base/test/test_message_loop.cc b/base/test/test_message_loop.cc
new file mode 100644
index 0000000..bd3610f
--- /dev/null
+++ b/base/test/test_message_loop.cc
@@ -0,0 +1,18 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/run_loop.h"
+#include "base/test/test_message_loop.h"
+
+namespace base {
+
+TestMessageLoop::TestMessageLoop() = default;
+
+TestMessageLoop::TestMessageLoop(MessageLoop::Type type) : loop_(type) {}
+
+TestMessageLoop::~TestMessageLoop() {
+  RunLoop().RunUntilIdle();
+}
+
+}  // namespace base
diff --git a/base/test/test_message_loop.h b/base/test/test_message_loop.h
new file mode 100644
index 0000000..9c0aed8
--- /dev/null
+++ b/base/test/test_message_loop.h
@@ -0,0 +1,34 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TEST_TEST_MESSAGE_LOOP_H_
+#define BASE_TEST_TEST_MESSAGE_LOOP_H_
+
+#include "base/message_loop/message_loop.h"
+
+namespace base {
+
+// TestMessageLoop is a convenience class for unittests that need to create a
+// message loop without a real thread backing it. For most tests,
+// it is sufficient to just instantiate TestMessageLoop as a member variable.
+//
+// TestMessageLoop will attempt to drain the underlying MessageLoop on
+// destruction for clean teardown of tests.
+class TestMessageLoop {
+ public:
+  TestMessageLoop();
+  explicit TestMessageLoop(MessageLoop::Type type);
+  ~TestMessageLoop();
+
+  const scoped_refptr<SingleThreadTaskRunner>& task_runner() {
+    return loop_.task_runner();
+  }
+
+ private:
+  MessageLoop loop_;
+};
+
+}  // namespace base
+
+#endif  // BASE_TEST_TEST_MESSAGE_LOOP_H_
diff --git a/base/test/test_mock_time_task_runner.cc b/base/test/test_mock_time_task_runner.cc
new file mode 100644
index 0000000..c74d14e
--- /dev/null
+++ b/base/test/test_mock_time_task_runner.cc
@@ -0,0 +1,448 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/test/test_mock_time_task_runner.h"
+
+#include <utility>
+
+#include "base/containers/circular_deque.h"
+#include "base/logging.h"
+#include "base/macros.h"
+#include "base/memory/ptr_util.h"
+#include "base/memory/ref_counted.h"
+#include "base/threading/thread_task_runner_handle.h"
+
+namespace base {
+namespace {
+
+// LegacyMockTickClock and LegacyMockClock are used by deprecated APIs of
+// TestMockTimeTaskRunner. They will be removed after updating callers of
+// GetMockClock() and GetMockTickClock() to GetMockClockPtr() and
+// GetMockTickClockPtr().
+class LegacyMockTickClock : public TickClock {
+ public:
+  explicit LegacyMockTickClock(
+      scoped_refptr<const TestMockTimeTaskRunner> task_runner)
+      : task_runner_(std::move(task_runner)) {}
+
+  // TickClock:
+  TimeTicks NowTicks() const override { return task_runner_->NowTicks(); }
+
+ private:
+  scoped_refptr<const TestMockTimeTaskRunner> task_runner_;
+
+  DISALLOW_COPY_AND_ASSIGN(LegacyMockTickClock);
+};
+
+class LegacyMockClock : public Clock {
+ public:
+  explicit LegacyMockClock(
+      scoped_refptr<const TestMockTimeTaskRunner> task_runner)
+      : task_runner_(std::move(task_runner)) {}
+
+  // Clock:
+  Time Now() const override { return task_runner_->Now(); }
+
+ private:
+  scoped_refptr<const TestMockTimeTaskRunner> task_runner_;
+
+  DISALLOW_COPY_AND_ASSIGN(LegacyMockClock);
+};
+
+// A SingleThreadTaskRunner which forwards everything to its |target_|. This is
+// useful to break ownership chains when it is known that |target_| will outlive
+// the NonOwningProxyTaskRunner it's injected into. In particular,
+// TestMockTimeTaskRunner is forced to be ref-counted by virtue of being a
+// SingleThreadTaskRunner. As such it is impossible for it to have a
+// ThreadTaskRunnerHandle member that points back to itself as the
+// ThreadTaskRunnerHandle which it owns would hold a ref back to it. To break
+// this dependency cycle, the ThreadTaskRunnerHandle is instead handed a
+// NonOwningProxyTaskRunner which allows the TestMockTimeTaskRunner to not hand
+// a ref to its ThreadTaskRunnerHandle while promising in return that it will
+// outlive that ThreadTaskRunnerHandle instance.
+class NonOwningProxyTaskRunner : public SingleThreadTaskRunner {
+ public:
+  explicit NonOwningProxyTaskRunner(SingleThreadTaskRunner* target)
+      : target_(target) {
+    DCHECK(target_);
+  }
+
+  // SingleThreadTaskRunner:
+  bool RunsTasksInCurrentSequence() const override {
+    return target_->RunsTasksInCurrentSequence();
+  }
+  bool PostDelayedTask(const Location& from_here,
+                       OnceClosure task,
+                       TimeDelta delay) override {
+    return target_->PostDelayedTask(from_here, std::move(task), delay);
+  }
+  bool PostNonNestableDelayedTask(const Location& from_here,
+                                  OnceClosure task,
+                                  TimeDelta delay) override {
+    return target_->PostNonNestableDelayedTask(from_here, std::move(task),
+                                               delay);
+  }
+
+ private:
+  friend class RefCountedThreadSafe<NonOwningProxyTaskRunner>;
+  ~NonOwningProxyTaskRunner() override = default;
+
+  SingleThreadTaskRunner* const target_;
+
+  DISALLOW_COPY_AND_ASSIGN(NonOwningProxyTaskRunner);
+};
+
+}  // namespace
+
+// TestMockTimeTaskRunner::TestOrderedPendingTask -----------------------------
+
+// Subclass of TestPendingTask which has a strictly monotonically increasing ID
+// for every task, so that tasks posted with the same 'time to run' can be run
+// in the order of being posted.
+struct TestMockTimeTaskRunner::TestOrderedPendingTask
+    : public base::TestPendingTask {
+  TestOrderedPendingTask();
+  TestOrderedPendingTask(const Location& location,
+                         OnceClosure task,
+                         TimeTicks post_time,
+                         TimeDelta delay,
+                         size_t ordinal,
+                         TestNestability nestability);
+  TestOrderedPendingTask(TestOrderedPendingTask&&);
+  ~TestOrderedPendingTask();
+
+  TestOrderedPendingTask& operator=(TestOrderedPendingTask&&);
+
+  size_t ordinal;
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(TestOrderedPendingTask);
+};
+
+TestMockTimeTaskRunner::TestOrderedPendingTask::TestOrderedPendingTask()
+    : ordinal(0) {
+}
+
+TestMockTimeTaskRunner::TestOrderedPendingTask::TestOrderedPendingTask(
+    TestOrderedPendingTask&&) = default;
+
+TestMockTimeTaskRunner::TestOrderedPendingTask::TestOrderedPendingTask(
+    const Location& location,
+    OnceClosure task,
+    TimeTicks post_time,
+    TimeDelta delay,
+    size_t ordinal,
+    TestNestability nestability)
+    : base::TestPendingTask(location,
+                            std::move(task),
+                            post_time,
+                            delay,
+                            nestability),
+      ordinal(ordinal) {}
+
+TestMockTimeTaskRunner::TestOrderedPendingTask::~TestOrderedPendingTask() =
+    default;
+
+TestMockTimeTaskRunner::TestOrderedPendingTask&
+TestMockTimeTaskRunner::TestOrderedPendingTask::operator=(
+    TestOrderedPendingTask&&) = default;
+
+// TestMockTimeTaskRunner -----------------------------------------------------
+
+// TODO(gab): This should also set the SequenceToken for the current thread.
+// Ref. TestMockTimeTaskRunner::RunsTasksInCurrentSequence().
+TestMockTimeTaskRunner::ScopedContext::ScopedContext(
+    scoped_refptr<TestMockTimeTaskRunner> scope)
+    : on_destroy_(ThreadTaskRunnerHandle::OverrideForTesting(scope)) {
+  scope->RunUntilIdle();
+}
+
+TestMockTimeTaskRunner::ScopedContext::~ScopedContext() = default;
+
+bool TestMockTimeTaskRunner::TemporalOrder::operator()(
+    const TestOrderedPendingTask& first_task,
+    const TestOrderedPendingTask& second_task) const {
+  if (first_task.GetTimeToRun() == second_task.GetTimeToRun())
+    return first_task.ordinal > second_task.ordinal;
+  return first_task.GetTimeToRun() > second_task.GetTimeToRun();
+}
+
+TestMockTimeTaskRunner::TestMockTimeTaskRunner(Type type)
+    : TestMockTimeTaskRunner(Time::UnixEpoch(), TimeTicks(), type) {}
+
+TestMockTimeTaskRunner::TestMockTimeTaskRunner(Time start_time,
+                                               TimeTicks start_ticks,
+                                               Type type)
+    : now_(start_time),
+      now_ticks_(start_ticks),
+      tasks_lock_cv_(&tasks_lock_),
+      mock_clock_(this) {
+  if (type == Type::kBoundToThread) {
+    RunLoop::RegisterDelegateForCurrentThread(this);
+    thread_task_runner_handle_ = std::make_unique<ThreadTaskRunnerHandle>(
+        MakeRefCounted<NonOwningProxyTaskRunner>(this));
+  }
+}
+
+TestMockTimeTaskRunner::~TestMockTimeTaskRunner() = default;
+
+void TestMockTimeTaskRunner::FastForwardBy(TimeDelta delta) {
+  DCHECK(thread_checker_.CalledOnValidThread());
+  DCHECK_GE(delta, TimeDelta());
+
+  const TimeTicks original_now_ticks = NowTicks();
+  ProcessAllTasksNoLaterThan(delta);
+  ForwardClocksUntilTickTime(original_now_ticks + delta);
+}
+
+void TestMockTimeTaskRunner::AdvanceMockTickClock(TimeDelta delta) {
+  ForwardClocksUntilTickTime(NowTicks() + delta);
+}
+
+void TestMockTimeTaskRunner::RunUntilIdle() {
+  DCHECK(thread_checker_.CalledOnValidThread());
+  ProcessAllTasksNoLaterThan(TimeDelta());
+}
+
+void TestMockTimeTaskRunner::FastForwardUntilNoTasksRemain() {
+  DCHECK(thread_checker_.CalledOnValidThread());
+  ProcessAllTasksNoLaterThan(TimeDelta::Max());
+}
+
+void TestMockTimeTaskRunner::ClearPendingTasks() {
+  DCHECK(thread_checker_.CalledOnValidThread());
+  AutoLock scoped_lock(tasks_lock_);
+  while (!tasks_.empty())
+    tasks_.pop();
+}
+
+Time TestMockTimeTaskRunner::Now() const {
+  AutoLock scoped_lock(tasks_lock_);
+  return now_;
+}
+
+TimeTicks TestMockTimeTaskRunner::NowTicks() const {
+  AutoLock scoped_lock(tasks_lock_);
+  return now_ticks_;
+}
+
+std::unique_ptr<Clock> TestMockTimeTaskRunner::DeprecatedGetMockClock() const {
+  DCHECK(thread_checker_.CalledOnValidThread());
+  return std::make_unique<LegacyMockClock>(this);
+}
+
+Clock* TestMockTimeTaskRunner::GetMockClock() const {
+  DCHECK(thread_checker_.CalledOnValidThread());
+  return &mock_clock_;
+}
+
+std::unique_ptr<TickClock> TestMockTimeTaskRunner::DeprecatedGetMockTickClock()
+    const {
+  DCHECK(thread_checker_.CalledOnValidThread());
+  return std::make_unique<LegacyMockTickClock>(this);
+}
+
+const TickClock* TestMockTimeTaskRunner::GetMockTickClock() const {
+  DCHECK(thread_checker_.CalledOnValidThread());
+  return &mock_clock_;
+}
+
+base::circular_deque<TestPendingTask>
+TestMockTimeTaskRunner::TakePendingTasks() {
+  AutoLock scoped_lock(tasks_lock_);
+  base::circular_deque<TestPendingTask> tasks;
+  while (!tasks_.empty()) {
+    // It's safe to remove const and consume |task| here, since |task| is not
+    // used for ordering the item.
+    if (!tasks_.top().task.IsCancelled()) {
+      tasks.push_back(
+          std::move(const_cast<TestOrderedPendingTask&>(tasks_.top())));
+    }
+    tasks_.pop();
+  }
+  return tasks;
+}
+
+bool TestMockTimeTaskRunner::HasPendingTask() {
+  DCHECK(thread_checker_.CalledOnValidThread());
+  AutoLock scoped_lock(tasks_lock_);
+  while (!tasks_.empty() && tasks_.top().task.IsCancelled())
+    tasks_.pop();
+  return !tasks_.empty();
+}
+
+size_t TestMockTimeTaskRunner::GetPendingTaskCount() {
+  DCHECK(thread_checker_.CalledOnValidThread());
+  AutoLock scoped_lock(tasks_lock_);
+  TaskPriorityQueue preserved_tasks;
+  while (!tasks_.empty()) {
+    if (!tasks_.top().task.IsCancelled()) {
+      preserved_tasks.push(
+          std::move(const_cast<TestOrderedPendingTask&>(tasks_.top())));
+    }
+    tasks_.pop();
+  }
+  tasks_.swap(preserved_tasks);
+  return tasks_.size();
+}
+
+TimeDelta TestMockTimeTaskRunner::NextPendingTaskDelay() {
+  DCHECK(thread_checker_.CalledOnValidThread());
+  AutoLock scoped_lock(tasks_lock_);
+  while (!tasks_.empty() && tasks_.top().task.IsCancelled())
+    tasks_.pop();
+  return tasks_.empty() ? TimeDelta::Max()
+                        : tasks_.top().GetTimeToRun() - now_ticks_;
+}
+
+// TODO(gab): Combine |thread_checker_| with a SequenceToken to differentiate
+// between tasks running in the scope of this TestMockTimeTaskRunner and other
+// task runners sharing this thread. http://crbug.com/631186
+bool TestMockTimeTaskRunner::RunsTasksInCurrentSequence() const {
+  return thread_checker_.CalledOnValidThread();
+}
+
+bool TestMockTimeTaskRunner::PostDelayedTask(const Location& from_here,
+                                             OnceClosure task,
+                                             TimeDelta delay) {
+  AutoLock scoped_lock(tasks_lock_);
+  tasks_.push(TestOrderedPendingTask(from_here, std::move(task), now_ticks_,
+                                     delay, next_task_ordinal_++,
+                                     TestPendingTask::NESTABLE));
+  tasks_lock_cv_.Signal();
+  return true;
+}
+
+bool TestMockTimeTaskRunner::PostNonNestableDelayedTask(
+    const Location& from_here,
+    OnceClosure task,
+    TimeDelta delay) {
+  return PostDelayedTask(from_here, std::move(task), delay);
+}
+
+void TestMockTimeTaskRunner::OnBeforeSelectingTask() {
+  // Empty default implementation.
+}
+
+void TestMockTimeTaskRunner::OnAfterTimePassed() {
+  // Empty default implementation.
+}
+
+void TestMockTimeTaskRunner::OnAfterTaskRun() {
+  // Empty default implementation.
+}
+
+void TestMockTimeTaskRunner::ProcessAllTasksNoLaterThan(TimeDelta max_delta) {
+  DCHECK(thread_checker_.CalledOnValidThread());
+  DCHECK_GE(max_delta, TimeDelta());
+
+  // Multiple test task runners can share the same thread for determinism in
+  // unit tests. Make sure this TestMockTimeTaskRunner's tasks run in its scope.
+  ScopedClosureRunner undo_override;
+  if (!ThreadTaskRunnerHandle::IsSet() ||
+      ThreadTaskRunnerHandle::Get() != this) {
+    undo_override = ThreadTaskRunnerHandle::OverrideForTesting(this);
+  }
+
+  const TimeTicks original_now_ticks = NowTicks();
+  while (!quit_run_loop_) {
+    OnBeforeSelectingTask();
+    TestPendingTask task_info;
+    if (!DequeueNextTask(original_now_ticks, max_delta, &task_info))
+      break;
+    if (task_info.task.IsCancelled())
+      continue;
+    // If tasks were posted with a negative delay, task_info.GetTimeToRun() will
+    // be less than |now_ticks_|. ForwardClocksUntilTickTime() takes care of not
+    // moving the clock backwards in this case.
+    ForwardClocksUntilTickTime(task_info.GetTimeToRun());
+    std::move(task_info.task).Run();
+    OnAfterTaskRun();
+  }
+}
+
+void TestMockTimeTaskRunner::ForwardClocksUntilTickTime(TimeTicks later_ticks) {
+  DCHECK(thread_checker_.CalledOnValidThread());
+  {
+    AutoLock scoped_lock(tasks_lock_);
+    if (later_ticks <= now_ticks_)
+      return;
+
+    now_ += later_ticks - now_ticks_;
+    now_ticks_ = later_ticks;
+  }
+  OnAfterTimePassed();
+}
+
+bool TestMockTimeTaskRunner::DequeueNextTask(const TimeTicks& reference,
+                                             const TimeDelta& max_delta,
+                                             TestPendingTask* next_task) {
+  DCHECK(thread_checker_.CalledOnValidThread());
+  AutoLock scoped_lock(tasks_lock_);
+  if (!tasks_.empty() &&
+      (tasks_.top().GetTimeToRun() - reference) <= max_delta) {
+    // It's safe to remove const and consume |task| here, since |task| is not
+    // used for ordering the item.
+    *next_task = std::move(const_cast<TestOrderedPendingTask&>(tasks_.top()));
+    tasks_.pop();
+    return true;
+  }
+  return false;
+}
+
+void TestMockTimeTaskRunner::Run(bool application_tasks_allowed) {
+  DCHECK(thread_checker_.CalledOnValidThread());
+
+  // Since TestMockTimeTaskRunner doesn't process system messages: there's no
+  // hope for anything but an application task to call Quit(). If this RunLoop
+  // can't process application tasks (i.e. disallowed by default in nested
+  // RunLoops) it's guaranteed to hang...
+  DCHECK(application_tasks_allowed)
+      << "This is a nested RunLoop instance and needs to be of "
+         "Type::kNestableTasksAllowed.";
+
+  while (!quit_run_loop_) {
+    RunUntilIdle();
+    if (quit_run_loop_ || ShouldQuitWhenIdle())
+      break;
+
+    // Peek into |tasks_| to perform one of two things:
+    //   A) If there are no remaining tasks, wait until one is posted and
+    //      restart from the top.
+    //   B) If there is a remaining delayed task. Fast-forward to reach the next
+    //      round of tasks.
+    TimeDelta auto_fast_forward_by;
+    {
+      AutoLock scoped_lock(tasks_lock_);
+      if (tasks_.empty()) {
+        while (tasks_.empty())
+          tasks_lock_cv_.Wait();
+        continue;
+      }
+      auto_fast_forward_by = tasks_.top().GetTimeToRun() - now_ticks_;
+    }
+    FastForwardBy(auto_fast_forward_by);
+  }
+  quit_run_loop_ = false;
+}
+
+void TestMockTimeTaskRunner::Quit() {
+  DCHECK(thread_checker_.CalledOnValidThread());
+  quit_run_loop_ = true;
+}
+
+void TestMockTimeTaskRunner::EnsureWorkScheduled() {
+  // Nothing to do: TestMockTimeTaskRunner::Run() will always process tasks and
+  // doesn't need an extra kick on nested runs.
+}
+
+TimeTicks TestMockTimeTaskRunner::MockClock::NowTicks() const {
+  return task_runner_->NowTicks();
+}
+
+Time TestMockTimeTaskRunner::MockClock::Now() const {
+  return task_runner_->Now();
+}
+
+}  // namespace base
diff --git a/base/test/test_mock_time_task_runner.h b/base/test/test_mock_time_task_runner.h
new file mode 100644
index 0000000..23dbb2f
--- /dev/null
+++ b/base/test/test_mock_time_task_runner.h
@@ -0,0 +1,293 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TEST_TEST_MOCK_TIME_TASK_RUNNER_H_
+#define BASE_TEST_TEST_MOCK_TIME_TASK_RUNNER_H_
+
+#include <stddef.h>
+
+#include <memory>
+#include <queue>
+#include <vector>
+
+#include "base/callback.h"
+#include "base/callback_helpers.h"
+#include "base/containers/circular_deque.h"
+#include "base/macros.h"
+#include "base/run_loop.h"
+#include "base/single_thread_task_runner.h"
+#include "base/synchronization/lock.h"
+#include "base/test/test_pending_task.h"
+#include "base/threading/thread_checker_impl.h"
+#include "base/time/clock.h"
+#include "base/time/tick_clock.h"
+#include "base/time/time.h"
+
+namespace base {
+
+class ThreadTaskRunnerHandle;
+
+// Runs pending tasks in the order of the tasks' post time + delay, and keeps
+// track of a mock (virtual) tick clock time that can be fast-forwarded.
+//
+// TestMockTimeTaskRunner has the following properties:
+//
+//   - Methods RunsTasksInCurrentSequence() and Post[Delayed]Task() can be
+//     called from any thread, but the rest of the methods must be called on
+//     the same thread the TestMockTimeTaskRunner was created on.
+//   - It allows for reentrancy, in that it handles the running of tasks that in
+//     turn call back into it (e.g., to post more tasks).
+//   - Tasks are stored in a priority queue, and executed in the increasing
+//     order of post time + delay, but ignoring nestability.
+//   - It does not check for overflow when doing time arithmetic. A sufficient
+//     condition for preventing overflows is to make sure that the sum of all
+//     posted task delays and fast-forward increments is still representable by
+//     a TimeDelta, and that adding this delta to the starting values of Time
+//     and TickTime is still within their respective range.
+//
+// A TestMockTimeTaskRunner of Type::kBoundToThread has the following additional
+// properties:
+//   - Thread/SequencedTaskRunnerHandle refers to it on its thread.
+//   - It can be driven by a RunLoop on the thread it was created on.
+//     RunLoop::Run() will result in running non-delayed tasks until idle and
+//     then, if RunLoop::QuitWhenIdle() wasn't invoked, fast-forwarding time to
+//     the next delayed task and looping again. And so on, until either
+//     RunLoop::Quit() is invoked (quits immediately after the current task) or
+//     RunLoop::QuitWhenIdle() is invoked (quits before having to fast forward
+//     time once again). Should RunLoop::Run() process all tasks (including
+//     delayed ones), it will block until more are posted. As usual,
+//     RunLoop::RunUntilIdle() is equivalent to RunLoop::Run() followed by an
+//     immediate RunLoop::QuitWhenIdle().
+//    -
+//
+// This is a slightly more sophisticated version of TestSimpleTaskRunner, in
+// that it supports running delayed tasks in the correct temporal order.
+class TestMockTimeTaskRunner : public SingleThreadTaskRunner,
+                               public RunLoop::Delegate {
+ public:
+  // Everything that is executed in the scope of a ScopedContext will behave as
+  // though it ran under |scope| (i.e. ThreadTaskRunnerHandle,
+  // RunsTasksInCurrentSequence, etc.). This allows the test body to be all in
+  // one block when multiple TestMockTimeTaskRunners share the main thread.
+  // Note: RunLoop isn't supported: will DCHECK if used inside a ScopedContext.
+  //
+  // For example:
+  //
+  //   class ExampleFixture {
+  //    protected:
+  //     DoBarOnFoo() {
+  //       DCHECK(foo_task_runner_->RunsOnCurrentThread());
+  //       EXPECT_EQ(foo_task_runner_, ThreadTaskRunnerHandle::Get());
+  //       DoBar();
+  //     }
+  //
+  //     // Mock main task runner.
+  //     base::MessageLoop message_loop_;
+  //     base::ScopedMockTimeMessageLoopTaskRunner main_task_runner_;
+  //
+  //     // Mock foo task runner.
+  //     scoped_refptr<TestMockTimeTaskRunner> foo_task_runner_ =
+  //         new TestMockTimeTaskRunner();
+  //   };
+  //
+  //   TEST_F(ExampleFixture, DoBarOnFoo) {
+  //     DoThingsOnMain();
+  //     {
+  //       TestMockTimeTaskRunner::ScopedContext scoped_context(
+  //           foo_task_runner_.get());
+  //       DoBarOnFoo();
+  //     }
+  //     DoMoreThingsOnMain();
+  //   }
+  //
+  class ScopedContext {
+   public:
+    // Note: |scope| is ran until idle as part of this constructor to ensure
+    // that anything which runs in the underlying scope runs after any already
+    // pending tasks (the contrary would break the SequencedTaskRunner
+    // contract).
+    explicit ScopedContext(scoped_refptr<TestMockTimeTaskRunner> scope);
+    ~ScopedContext();
+
+   private:
+    ScopedClosureRunner on_destroy_;
+    DISALLOW_COPY_AND_ASSIGN(ScopedContext);
+  };
+
+  enum class Type {
+    // A TestMockTimeTaskRunner which can only be driven directly through its
+    // API. Thread/SequencedTaskRunnerHandle will refer to it only in the scope
+    // of its tasks.
+    kStandalone,
+    // A TestMockTimeTaskRunner which will associate to the thread it is created
+    // on, enabling RunLoop to drive it and making
+    // Thread/SequencedTaskRunnerHandle refer to it on that thread.
+    kBoundToThread,
+  };
+
+  // Constructs an instance whose virtual time will start at the Unix epoch, and
+  // whose time ticks will start at zero.
+  TestMockTimeTaskRunner(Type type = Type::kStandalone);
+
+  // Constructs an instance starting at the given virtual time and time ticks.
+  TestMockTimeTaskRunner(Time start_time,
+                         TimeTicks start_ticks,
+                         Type type = Type::kStandalone);
+
+  // Fast-forwards virtual time by |delta|, causing all tasks with a remaining
+  // delay less than or equal to |delta| to be executed. |delta| must be
+  // non-negative.
+  void FastForwardBy(TimeDelta delta);
+
+  // Fast-forwards virtual time by |delta| but not causing any task execution.
+  void AdvanceMockTickClock(TimeDelta delta);
+
+  // Fast-forwards virtual time just until all tasks are executed.
+  void FastForwardUntilNoTasksRemain();
+
+  // Executes all tasks that have no remaining delay. Tasks with a remaining
+  // delay greater than zero will remain enqueued, and no virtual time will
+  // elapse.
+  void RunUntilIdle();
+
+  // Clears the queue of pending tasks without running them.
+  void ClearPendingTasks();
+
+  // Returns the current virtual time (initially starting at the Unix epoch).
+  Time Now() const;
+
+  // Returns the current virtual tick time (initially starting at 0).
+  TimeTicks NowTicks() const;
+
+  // Returns a Clock that uses the virtual time of |this| as its time source.
+  // The returned Clock will hold a reference to |this|.
+  // TODO(tzik): Remove DeprecatedGetMockClock() after updating all callers to
+  // use non-owning Clock.
+  std::unique_ptr<Clock> DeprecatedGetMockClock() const;
+  Clock* GetMockClock() const;
+
+  // Returns a TickClock that uses the virtual time ticks of |this| as its tick
+  // source. The returned TickClock will hold a reference to |this|.
+  // TODO(tzik): Replace Remove DeprecatedGetMockTickClock() after updating all
+  // callers to use non-owning TickClock.
+  std::unique_ptr<TickClock> DeprecatedGetMockTickClock() const;
+  const TickClock* GetMockTickClock() const;
+
+  // Cancelled pending tasks get pruned automatically.
+  base::circular_deque<TestPendingTask> TakePendingTasks();
+  bool HasPendingTask();
+  size_t GetPendingTaskCount();
+  TimeDelta NextPendingTaskDelay();
+
+  // SingleThreadTaskRunner:
+  bool RunsTasksInCurrentSequence() const override;
+  bool PostDelayedTask(const Location& from_here,
+                       OnceClosure task,
+                       TimeDelta delay) override;
+  bool PostNonNestableDelayedTask(const Location& from_here,
+                                  OnceClosure task,
+                                  TimeDelta delay) override;
+
+ protected:
+  ~TestMockTimeTaskRunner() override;
+
+  // Called before the next task to run is selected, so that subclasses have a
+  // last chance to make sure all tasks are posted.
+  virtual void OnBeforeSelectingTask();
+
+  // Called after the current mock time has been incremented so that subclasses
+  // can react to the passing of time.
+  virtual void OnAfterTimePassed();
+
+  // Called after each task is run so that subclasses may perform additional
+  // activities, e.g., pump additional task runners.
+  virtual void OnAfterTaskRun();
+
+ private:
+  // MockClock implements TickClock and Clock. Always returns the then-current
+  // mock time of |task_runner| as the current time or time ticks.
+  class MockClock : public TickClock, public Clock {
+   public:
+    explicit MockClock(TestMockTimeTaskRunner* task_runner)
+        : task_runner_(task_runner) {}
+
+    // TickClock:
+    TimeTicks NowTicks() const override;
+
+    // Clock:
+    Time Now() const override;
+
+   private:
+    TestMockTimeTaskRunner* task_runner_;
+
+    DISALLOW_COPY_AND_ASSIGN(MockClock);
+  };
+
+  struct TestOrderedPendingTask;
+
+  // Predicate that defines a strict weak temporal ordering of tasks.
+  class TemporalOrder {
+   public:
+    bool operator()(const TestOrderedPendingTask& first_task,
+                    const TestOrderedPendingTask& second_task) const;
+  };
+
+  typedef std::priority_queue<TestOrderedPendingTask,
+                              std::vector<TestOrderedPendingTask>,
+                              TemporalOrder> TaskPriorityQueue;
+
+  // Core of the implementation for all flavors of fast-forward methods. Given a
+  // non-negative |max_delta|, runs all tasks with a remaining delay less than
+  // or equal to |max_delta|, and moves virtual time forward as needed for each
+  // processed task. Pass in TimeDelta::Max() as |max_delta| to run all tasks.
+  void ProcessAllTasksNoLaterThan(TimeDelta max_delta);
+
+  // Forwards |now_ticks_| until it equals |later_ticks|, and forwards |now_| by
+  // the same amount. Calls OnAfterTimePassed() if |later_ticks| > |now_ticks_|.
+  // Does nothing if |later_ticks| <= |now_ticks_|.
+  void ForwardClocksUntilTickTime(TimeTicks later_ticks);
+
+  // Returns the |next_task| to run if there is any with a running time that is
+  // at most |reference| + |max_delta|. This additional complexity is required
+  // so that |max_delta| == TimeDelta::Max() can be supported.
+  bool DequeueNextTask(const TimeTicks& reference,
+                       const TimeDelta& max_delta,
+                       TestPendingTask* next_task);
+
+  // RunLoop::Delegate:
+  void Run(bool application_tasks_allowed) override;
+  void Quit() override;
+  void EnsureWorkScheduled() override;
+
+  // Also used for non-dcheck logic (RunsTasksInCurrentSequence()) and as such
+  // needs to be a ThreadCheckerImpl.
+  ThreadCheckerImpl thread_checker_;
+
+  Time now_;
+  TimeTicks now_ticks_;
+
+  // Temporally ordered heap of pending tasks. Must only be accessed while the
+  // |tasks_lock_| is held.
+  TaskPriorityQueue tasks_;
+
+  // The ordinal to use for the next task. Must only be accessed while the
+  // |tasks_lock_| is held.
+  size_t next_task_ordinal_ = 0;
+
+  mutable Lock tasks_lock_;
+  ConditionVariable tasks_lock_cv_;
+  std::unique_ptr<ThreadTaskRunnerHandle> thread_task_runner_handle_;
+
+  // Set to true in RunLoop::Delegate::Quit() to signal the topmost
+  // RunLoop::Delegate::Run() instance to stop, reset to false when it does.
+  bool quit_run_loop_ = false;
+
+  mutable MockClock mock_clock_;
+
+  DISALLOW_COPY_AND_ASSIGN(TestMockTimeTaskRunner);
+};
+
+}  // namespace base
+
+#endif  // BASE_TEST_TEST_MOCK_TIME_TASK_RUNNER_H_
diff --git a/base/test/test_mock_time_task_runner_unittest.cc b/base/test/test_mock_time_task_runner_unittest.cc
new file mode 100644
index 0000000..04be466
--- /dev/null
+++ b/base/test/test_mock_time_task_runner_unittest.cc
@@ -0,0 +1,262 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/test/test_mock_time_task_runner.h"
+
+#include "base/cancelable_callback.h"
+#include "base/memory/ref_counted.h"
+#include "base/run_loop.h"
+#include "base/test/gtest_util.h"
+#include "base/test/test_timeouts.h"
+#include "base/threading/sequenced_task_runner_handle.h"
+#include "base/threading/thread.h"
+#include "base/threading/thread_task_runner_handle.h"
+#include "base/time/time.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+
+// Basic usage should work the same from default and bound
+// TestMockTimeTaskRunners.
+TEST(TestMockTimeTaskRunnerTest, Basic) {
+  static constexpr TestMockTimeTaskRunner::Type kTestCases[] = {
+      TestMockTimeTaskRunner::Type::kStandalone,
+      TestMockTimeTaskRunner::Type::kBoundToThread};
+
+  for (auto type : kTestCases) {
+    SCOPED_TRACE(static_cast<int>(type));
+
+    auto mock_time_task_runner = MakeRefCounted<TestMockTimeTaskRunner>(type);
+    int counter = 0;
+
+    mock_time_task_runner->PostTask(
+        FROM_HERE,
+        base::Bind([](int* counter) { *counter += 1; }, Unretained(&counter)));
+    mock_time_task_runner->PostTask(
+        FROM_HERE,
+        base::Bind([](int* counter) { *counter += 32; }, Unretained(&counter)));
+    mock_time_task_runner->PostDelayedTask(
+        FROM_HERE,
+        base::Bind([](int* counter) { *counter += 256; }, Unretained(&counter)),
+        TimeDelta::FromSeconds(3));
+    mock_time_task_runner->PostDelayedTask(
+        FROM_HERE,
+        base::Bind([](int* counter) { *counter += 64; }, Unretained(&counter)),
+        TimeDelta::FromSeconds(1));
+    mock_time_task_runner->PostDelayedTask(
+        FROM_HERE,
+        base::Bind([](int* counter) { *counter += 1024; },
+                   Unretained(&counter)),
+        TimeDelta::FromMinutes(20));
+    mock_time_task_runner->PostDelayedTask(
+        FROM_HERE,
+        base::Bind([](int* counter) { *counter += 4096; },
+                   Unretained(&counter)),
+        TimeDelta::FromDays(20));
+
+    int expected_value = 0;
+    EXPECT_EQ(expected_value, counter);
+    mock_time_task_runner->RunUntilIdle();
+    expected_value += 1;
+    expected_value += 32;
+    EXPECT_EQ(expected_value, counter);
+
+    mock_time_task_runner->RunUntilIdle();
+    EXPECT_EQ(expected_value, counter);
+
+    mock_time_task_runner->FastForwardBy(TimeDelta::FromSeconds(1));
+    expected_value += 64;
+    EXPECT_EQ(expected_value, counter);
+
+    mock_time_task_runner->FastForwardBy(TimeDelta::FromSeconds(5));
+    expected_value += 256;
+    EXPECT_EQ(expected_value, counter);
+
+    mock_time_task_runner->FastForwardUntilNoTasksRemain();
+    expected_value += 1024;
+    expected_value += 4096;
+    EXPECT_EQ(expected_value, counter);
+  }
+}
+
+// A default TestMockTimeTaskRunner shouldn't result in a thread association.
+TEST(TestMockTimeTaskRunnerTest, DefaultUnbound) {
+  auto unbound_mock_time_task_runner = MakeRefCounted<TestMockTimeTaskRunner>();
+  EXPECT_FALSE(ThreadTaskRunnerHandle::IsSet());
+  EXPECT_FALSE(SequencedTaskRunnerHandle::IsSet());
+  EXPECT_DEATH_IF_SUPPORTED({ RunLoop().RunUntilIdle(); }, "");
+}
+
+TEST(TestMockTimeTaskRunnerTest, RunLoopDriveableWhenBound) {
+  auto bound_mock_time_task_runner = MakeRefCounted<TestMockTimeTaskRunner>(
+      TestMockTimeTaskRunner::Type::kBoundToThread);
+
+  int counter = 0;
+  ThreadTaskRunnerHandle::Get()->PostTask(
+      FROM_HERE,
+      base::Bind([](int* counter) { *counter += 1; }, Unretained(&counter)));
+  ThreadTaskRunnerHandle::Get()->PostTask(
+      FROM_HERE,
+      base::Bind([](int* counter) { *counter += 32; }, Unretained(&counter)));
+  ThreadTaskRunnerHandle::Get()->PostDelayedTask(
+      FROM_HERE,
+      base::Bind([](int* counter) { *counter += 256; }, Unretained(&counter)),
+      TimeDelta::FromSeconds(3));
+  ThreadTaskRunnerHandle::Get()->PostDelayedTask(
+      FROM_HERE,
+      base::Bind([](int* counter) { *counter += 64; }, Unretained(&counter)),
+      TimeDelta::FromSeconds(1));
+  ThreadTaskRunnerHandle::Get()->PostDelayedTask(
+      FROM_HERE,
+      base::Bind([](int* counter) { *counter += 1024; }, Unretained(&counter)),
+      TimeDelta::FromMinutes(20));
+  ThreadTaskRunnerHandle::Get()->PostDelayedTask(
+      FROM_HERE,
+      base::Bind([](int* counter) { *counter += 4096; }, Unretained(&counter)),
+      TimeDelta::FromDays(20));
+
+  int expected_value = 0;
+  EXPECT_EQ(expected_value, counter);
+  RunLoop().RunUntilIdle();
+  expected_value += 1;
+  expected_value += 32;
+  EXPECT_EQ(expected_value, counter);
+
+  RunLoop().RunUntilIdle();
+  EXPECT_EQ(expected_value, counter);
+
+  {
+    RunLoop run_loop;
+    ThreadTaskRunnerHandle::Get()->PostDelayedTask(
+        FROM_HERE, run_loop.QuitClosure(), TimeDelta::FromSeconds(1));
+    ThreadTaskRunnerHandle::Get()->PostDelayedTask(
+        FROM_HERE,
+        base::Bind([](int* counter) { *counter += 8192; },
+                   Unretained(&counter)),
+        TimeDelta::FromSeconds(1));
+
+    // The QuitClosure() should be ordered between the 64 and the 8192
+    // increments and should preempt the latter.
+    run_loop.Run();
+    expected_value += 64;
+    EXPECT_EQ(expected_value, counter);
+
+    // Running until idle should process the 8192 increment whose delay has
+    // expired in the previous Run().
+    RunLoop().RunUntilIdle();
+    expected_value += 8192;
+    EXPECT_EQ(expected_value, counter);
+  }
+
+  {
+    RunLoop run_loop;
+    ThreadTaskRunnerHandle::Get()->PostDelayedTask(
+        FROM_HERE, run_loop.QuitWhenIdleClosure(), TimeDelta::FromSeconds(5));
+    ThreadTaskRunnerHandle::Get()->PostDelayedTask(
+        FROM_HERE,
+        base::Bind([](int* counter) { *counter += 16384; },
+                   Unretained(&counter)),
+        TimeDelta::FromSeconds(5));
+
+    // The QuitWhenIdleClosure() shouldn't preempt equally delayed tasks and as
+    // such the 16384 increment should be processed before quitting.
+    run_loop.Run();
+    expected_value += 256;
+    expected_value += 16384;
+    EXPECT_EQ(expected_value, counter);
+  }
+
+  // Process the remaining tasks (note: do not mimic this elsewhere,
+  // TestMockTimeTaskRunner::FastForwardUntilNoTasksRemain() is a better API to
+  // do this, this is just done here for the purpose of extensively testing the
+  // RunLoop approach).
+  RunLoop run_loop;
+  ThreadTaskRunnerHandle::Get()->PostDelayedTask(
+      FROM_HERE, run_loop.QuitWhenIdleClosure(), TimeDelta::FromDays(50));
+
+  run_loop.Run();
+  expected_value += 1024;
+  expected_value += 4096;
+  EXPECT_EQ(expected_value, counter);
+}
+
+// Regression test that receiving the quit-when-idle signal when already empty
+// works as intended (i.e. that |TestMockTimeTaskRunner::tasks_lock_cv| is
+// properly signaled).
+TEST(TestMockTimeTaskRunnerTest, RunLoopQuitFromIdle) {
+  auto bound_mock_time_task_runner = MakeRefCounted<TestMockTimeTaskRunner>(
+      TestMockTimeTaskRunner::Type::kBoundToThread);
+
+  Thread quitting_thread("quitting thread");
+  quitting_thread.Start();
+
+  RunLoop run_loop;
+  quitting_thread.task_runner()->PostDelayedTask(
+      FROM_HERE, run_loop.QuitWhenIdleClosure(), TestTimeouts::tiny_timeout());
+  run_loop.Run();
+}
+
+TEST(TestMockTimeTaskRunnerTest, TakePendingTasks) {
+  auto task_runner = MakeRefCounted<TestMockTimeTaskRunner>();
+  task_runner->PostTask(FROM_HERE, Bind([]() {}));
+  EXPECT_TRUE(task_runner->HasPendingTask());
+  EXPECT_EQ(1u, task_runner->TakePendingTasks().size());
+  EXPECT_FALSE(task_runner->HasPendingTask());
+}
+
+TEST(TestMockTimeTaskRunnerTest, CancelPendingTask) {
+  auto task_runner = MakeRefCounted<TestMockTimeTaskRunner>();
+  CancelableClosure task1(Bind([]() {}));
+  task_runner->PostDelayedTask(FROM_HERE, task1.callback(),
+                               TimeDelta::FromSeconds(1));
+  EXPECT_TRUE(task_runner->HasPendingTask());
+  EXPECT_EQ(1u, task_runner->GetPendingTaskCount());
+  EXPECT_EQ(TimeDelta::FromSeconds(1), task_runner->NextPendingTaskDelay());
+  task1.Cancel();
+  EXPECT_FALSE(task_runner->HasPendingTask());
+
+  CancelableClosure task2(Bind([]() {}));
+  task_runner->PostDelayedTask(FROM_HERE, task2.callback(),
+                               TimeDelta::FromSeconds(1));
+  task2.Cancel();
+  EXPECT_EQ(0u, task_runner->GetPendingTaskCount());
+
+  CancelableClosure task3(Bind([]() {}));
+  task_runner->PostDelayedTask(FROM_HERE, task3.callback(),
+                               TimeDelta::FromSeconds(1));
+  task3.Cancel();
+  EXPECT_EQ(TimeDelta::Max(), task_runner->NextPendingTaskDelay());
+
+  CancelableClosure task4(Bind([]() {}));
+  task_runner->PostDelayedTask(FROM_HERE, task4.callback(),
+                               TimeDelta::FromSeconds(1));
+  task4.Cancel();
+  EXPECT_TRUE(task_runner->TakePendingTasks().empty());
+}
+
+TEST(TestMockTimeTaskRunnerTest, NoFastForwardToCancelledTask) {
+  auto task_runner = MakeRefCounted<TestMockTimeTaskRunner>();
+  TimeTicks start_time = task_runner->NowTicks();
+  CancelableClosure task(Bind([]() {}));
+  task_runner->PostDelayedTask(FROM_HERE, task.callback(),
+                               TimeDelta::FromSeconds(1));
+  EXPECT_EQ(TimeDelta::FromSeconds(1), task_runner->NextPendingTaskDelay());
+  task.Cancel();
+  task_runner->FastForwardUntilNoTasksRemain();
+  EXPECT_EQ(start_time, task_runner->NowTicks());
+}
+
+TEST(TestMockTimeTaskRunnerTest, AdvanceMockTickClockDoesNotRunTasks) {
+  auto task_runner = MakeRefCounted<TestMockTimeTaskRunner>();
+  TimeTicks start_time = task_runner->NowTicks();
+  task_runner->PostTask(FROM_HERE, BindOnce([]() { ADD_FAILURE(); }));
+  task_runner->PostDelayedTask(FROM_HERE, BindOnce([]() { ADD_FAILURE(); }),
+                               TimeDelta::FromSeconds(1));
+
+  task_runner->AdvanceMockTickClock(TimeDelta::FromSeconds(3));
+  EXPECT_EQ(start_time + TimeDelta::FromSeconds(3), task_runner->NowTicks());
+  EXPECT_EQ(2u, task_runner->GetPendingTaskCount());
+}
+
+}  // namespace base
diff --git a/base/test/test_pending_task.cc b/base/test/test_pending_task.cc
new file mode 100644
index 0000000..f9cfa8e
--- /dev/null
+++ b/base/test/test_pending_task.cc
@@ -0,0 +1,81 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/test/test_pending_task.h"
+
+#include <string>
+#include <utility>
+
+namespace base {
+
+TestPendingTask::TestPendingTask() : nestability(NESTABLE) {}
+
+TestPendingTask::TestPendingTask(const Location& location,
+                                 OnceClosure task,
+                                 TimeTicks post_time,
+                                 TimeDelta delay,
+                                 TestNestability nestability)
+    : location(location),
+      task(std::move(task)),
+      post_time(post_time),
+      delay(delay),
+      nestability(nestability) {}
+
+TestPendingTask::TestPendingTask(TestPendingTask&& other) = default;
+
+TestPendingTask& TestPendingTask::operator=(TestPendingTask&& other) = default;
+
+TimeTicks TestPendingTask::GetTimeToRun() const {
+  return post_time + delay;
+}
+
+bool TestPendingTask::ShouldRunBefore(const TestPendingTask& other) const {
+  if (nestability != other.nestability)
+    return (nestability == NESTABLE);
+  return GetTimeToRun() < other.GetTimeToRun();
+}
+
+TestPendingTask::~TestPendingTask() = default;
+
+void TestPendingTask::AsValueInto(base::trace_event::TracedValue* state) const {
+  state->SetInteger("run_at", GetTimeToRun().ToInternalValue());
+  state->SetString("posting_function", location.ToString());
+  state->SetInteger("post_time", post_time.ToInternalValue());
+  state->SetInteger("delay", delay.ToInternalValue());
+  switch (nestability) {
+    case NESTABLE:
+      state->SetString("nestability", "NESTABLE");
+      break;
+    case NON_NESTABLE:
+      state->SetString("nestability", "NON_NESTABLE");
+      break;
+  }
+  state->SetInteger("delay", delay.ToInternalValue());
+}
+
+std::unique_ptr<base::trace_event::ConvertableToTraceFormat>
+TestPendingTask::AsValue() const {
+  std::unique_ptr<base::trace_event::TracedValue> state(
+      new base::trace_event::TracedValue());
+  AsValueInto(state.get());
+  return std::move(state);
+}
+
+std::string TestPendingTask::ToString() const {
+  std::string output("TestPendingTask(");
+  AsValue()->AppendAsTraceFormat(&output);
+  output += ")";
+  return output;
+}
+
+std::ostream& operator<<(std::ostream& os, const TestPendingTask& task) {
+  PrintTo(task, &os);
+  return os;
+}
+
+void PrintTo(const TestPendingTask& task, std::ostream* os) {
+  *os << task.ToString();
+}
+
+}  // namespace base
diff --git a/base/test/test_pending_task.h b/base/test/test_pending_task.h
new file mode 100644
index 0000000..460de0e
--- /dev/null
+++ b/base/test/test_pending_task.h
@@ -0,0 +1,78 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TEST_TEST_PENDING_TASK_H_
+#define BASE_TEST_TEST_PENDING_TASK_H_
+
+#include <string>
+
+#include "base/callback.h"
+#include "base/location.h"
+#include "base/time/time.h"
+#include "base/trace_event/trace_event_argument.h"
+
+namespace base {
+
+// TestPendingTask is a helper class for test TaskRunner
+// implementations.  See test_simple_task_runner.h for example usage.
+
+struct TestPendingTask {
+  enum TestNestability { NESTABLE, NON_NESTABLE };
+
+  TestPendingTask();
+  TestPendingTask(TestPendingTask&& other);
+  TestPendingTask(const Location& location,
+                  OnceClosure task,
+                  TimeTicks post_time,
+                  TimeDelta delay,
+                  TestNestability nestability);
+  ~TestPendingTask();
+
+  TestPendingTask& operator=(TestPendingTask&& other);
+
+  // Returns post_time + delay.
+  TimeTicks GetTimeToRun() const;
+
+  // Returns true if this task is nestable and |other| isn't, or if
+  // this task's time to run is strictly earlier than |other|'s time
+  // to run.
+  //
+  // Note that two tasks may both have the same nestability and delay.
+  // In that case, the caller must use some other criterion (probably
+  // the position in some queue) to break the tie.  Conveniently, the
+  // following STL functions already do so:
+  //
+  //   - std::min_element
+  //   - std::stable_sort
+  //
+  // but the following STL functions don't:
+  //
+  //   - std::max_element
+  //   - std::sort.
+  bool ShouldRunBefore(const TestPendingTask& other) const;
+
+  Location location;
+  OnceClosure task;
+  TimeTicks post_time;
+  TimeDelta delay;
+  TestNestability nestability;
+
+  // Functions for using test pending task with tracing, useful in unit
+  // testing.
+  void AsValueInto(base::trace_event::TracedValue* state) const;
+  std::unique_ptr<base::trace_event::ConvertableToTraceFormat> AsValue() const;
+  std::string ToString() const;
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(TestPendingTask);
+};
+
+// gtest helpers which allow pretty printing of the tasks, very useful in unit
+// testing.
+std::ostream& operator<<(std::ostream& os, const TestPendingTask& task);
+void PrintTo(const TestPendingTask& task, std::ostream* os);
+
+}  // namespace base
+
+#endif  // BASE_TEST_TEST_PENDING_TASK_H_
diff --git a/base/test/test_pending_task_unittest.cc b/base/test/test_pending_task_unittest.cc
new file mode 100644
index 0000000..6e01c8c
--- /dev/null
+++ b/base/test/test_pending_task_unittest.cc
@@ -0,0 +1,57 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/test/test_pending_task.h"
+
+#include "base/bind.h"
+#include "base/trace_event/trace_event.h"
+#include "testing/gmock/include/gmock/gmock.h"
+#include "testing/gtest/include/gtest/gtest-spi.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+
+TEST(TestPendingTaskTest, TraceSupport) {
+  base::TestPendingTask task;
+
+  // Check that TestPendingTask can be sent to the trace subsystem.
+  TRACE_EVENT1("test", "TestPendingTask::TraceSupport", "task", task.AsValue());
+
+  // Just a basic check that the trace output has *something* in it.
+  std::unique_ptr<base::trace_event::ConvertableToTraceFormat> task_value(
+      task.AsValue());
+  EXPECT_THAT(task_value->ToString(), ::testing::HasSubstr("post_time"));
+}
+
+TEST(TestPendingTaskTest, ToString) {
+  base::TestPendingTask task;
+
+  // Just a basic check that ToString has *something* in it.
+  EXPECT_THAT(task.ToString(), ::testing::StartsWith("TestPendingTask("));
+}
+
+TEST(TestPendingTaskTest, GTestPrettyPrint) {
+  base::TestPendingTask task;
+
+  // Check that gtest is calling the TestPendingTask's PrintTo method.
+  EXPECT_THAT(::testing::PrintToString(task),
+              ::testing::StartsWith("TestPendingTask("));
+
+  // Check that pretty printing works with the gtest iostreams operator.
+  EXPECT_NONFATAL_FAILURE(EXPECT_TRUE(false) << task, "TestPendingTask(");
+}
+
+TEST(TestPendingTaskTest, ShouldRunBefore) {
+  base::TestPendingTask task_first;
+  task_first.delay = base::TimeDelta::FromMilliseconds(1);
+  base::TestPendingTask task_after;
+  task_after.delay = base::TimeDelta::FromMilliseconds(2);
+
+  EXPECT_FALSE(task_after.ShouldRunBefore(task_first))
+      << task_after << ".ShouldRunBefore(" << task_first << ")\n";
+  EXPECT_TRUE(task_first.ShouldRunBefore(task_after))
+      << task_first << ".ShouldRunBefore(" << task_after << ")\n";
+}
+
+}  // namespace base
diff --git a/base/test/test_reg_util_win.cc b/base/test/test_reg_util_win.cc
new file mode 100644
index 0000000..9ce4ad1
--- /dev/null
+++ b/base/test/test_reg_util_win.cc
@@ -0,0 +1,122 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/test/test_reg_util_win.h"
+
+#include <stdint.h>
+
+#include "base/guid.h"
+#include "base/logging.h"
+#include "base/memory/ptr_util.h"
+#include "base/strings/string_number_conversions.h"
+#include "base/strings/string_split.h"
+#include "base/strings/string_util.h"
+#include "base/strings/utf_string_conversions.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+#include <windows.h>
+
+namespace registry_util {
+
+namespace {
+
+const wchar_t kTimestampDelimiter[] = L"$";
+const wchar_t kTempTestKeyPath[] = L"Software\\Chromium\\TempTestKeys";
+
+void DeleteStaleTestKeys(const base::Time& now,
+                         const base::string16& test_key_root) {
+  base::win::RegKey test_root_key;
+  if (test_root_key.Open(HKEY_CURRENT_USER,
+                         test_key_root.c_str(),
+                         KEY_ALL_ACCESS) != ERROR_SUCCESS) {
+    // This will occur on first-run, but is harmless.
+    return;
+  }
+
+  base::win::RegistryKeyIterator iterator_test_root_key(HKEY_CURRENT_USER,
+                                                        test_key_root.c_str());
+  for (; iterator_test_root_key.Valid(); ++iterator_test_root_key) {
+    base::string16 key_name = iterator_test_root_key.Name();
+    std::vector<base::string16> tokens = base::SplitString(
+        key_name, kTimestampDelimiter, base::KEEP_WHITESPACE,
+        base::SPLIT_WANT_NONEMPTY);
+    if (tokens.empty())
+      continue;
+    int64_t key_name_as_number = 0;
+
+    if (!base::StringToInt64(tokens[0], &key_name_as_number)) {
+      test_root_key.DeleteKey(key_name.c_str());
+      continue;
+    }
+
+    base::Time key_time = base::Time::FromInternalValue(key_name_as_number);
+    base::TimeDelta age = now - key_time;
+
+    if (age > base::TimeDelta::FromHours(24))
+      test_root_key.DeleteKey(key_name.c_str());
+  }
+}
+
+base::string16 GenerateTempKeyPath(const base::string16& test_key_root,
+                                   const base::Time& timestamp) {
+  base::string16 key_path = test_key_root;
+  key_path += L"\\" + base::Int64ToString16(timestamp.ToInternalValue());
+  key_path += kTimestampDelimiter + base::ASCIIToUTF16(base::GenerateGUID());
+
+  return key_path;
+}
+
+}  // namespace
+
+RegistryOverrideManager::ScopedRegistryKeyOverride::ScopedRegistryKeyOverride(
+    HKEY override,
+    const base::string16& key_path)
+    : override_(override), key_path_(key_path) {}
+
+RegistryOverrideManager::
+    ScopedRegistryKeyOverride::~ScopedRegistryKeyOverride() {
+  ::RegOverridePredefKey(override_, NULL);
+  base::win::RegKey(HKEY_CURRENT_USER, L"", KEY_QUERY_VALUE)
+      .DeleteKey(key_path_.c_str());
+}
+
+RegistryOverrideManager::RegistryOverrideManager()
+    : timestamp_(base::Time::Now()), test_key_root_(kTempTestKeyPath) {
+  DeleteStaleTestKeys(timestamp_, test_key_root_);
+}
+
+RegistryOverrideManager::RegistryOverrideManager(
+    const base::Time& timestamp,
+    const base::string16& test_key_root)
+    : timestamp_(timestamp), test_key_root_(test_key_root) {
+  DeleteStaleTestKeys(timestamp_, test_key_root_);
+}
+
+RegistryOverrideManager::~RegistryOverrideManager() {}
+
+void RegistryOverrideManager::OverrideRegistry(HKEY override) {
+  OverrideRegistry(override, nullptr);
+}
+
+void RegistryOverrideManager::OverrideRegistry(HKEY override,
+                                               base::string16* override_path) {
+  base::string16 key_path = GenerateTempKeyPath(test_key_root_, timestamp_);
+
+  base::win::RegKey temp_key;
+  ASSERT_EQ(ERROR_SUCCESS, temp_key.Create(HKEY_CURRENT_USER, key_path.c_str(),
+                                           KEY_ALL_ACCESS));
+  ASSERT_EQ(ERROR_SUCCESS, ::RegOverridePredefKey(override, temp_key.Handle()));
+
+  overrides_.push_back(
+      std::make_unique<ScopedRegistryKeyOverride>(override, key_path));
+  if (override_path)
+    override_path->assign(key_path);
+}
+
+base::string16 GenerateTempKeyPath() {
+  return GenerateTempKeyPath(base::string16(kTempTestKeyPath),
+                             base::Time::Now());
+}
+
+}  // namespace registry_util
diff --git a/base/test/test_reg_util_win.h b/base/test/test_reg_util_win.h
new file mode 100644
index 0000000..d74028a
--- /dev/null
+++ b/base/test/test_reg_util_win.h
@@ -0,0 +1,82 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TEST_TEST_REG_UTIL_WIN_H_
+#define BASE_TEST_TEST_REG_UTIL_WIN_H_
+
+// Registry utility functions used only by tests.
+#include <memory>
+#include <vector>
+
+#include "base/macros.h"
+#include "base/strings/string16.h"
+#include "base/time/time.h"
+#include "base/win/registry.h"
+
+namespace registry_util {
+
+// Allows a test to easily override registry hives so that it can start from a
+// known good state, or make sure to not leave any side effects once the test
+// completes. This supports parallel tests. All the overrides are scoped to the
+// lifetime of the override manager. Destroy the manager to undo the overrides.
+//
+// Overridden hives use keys stored at, for instance:
+//   HKCU\Software\Chromium\TempTestKeys\
+//       13028145911617809$02AB211C-CF73-478D-8D91-618E11998AED
+// The key path are comprises of:
+//   - The test key root, HKCU\Software\Chromium\TempTestKeys\
+//   - The base::Time::ToInternalValue of the creation time. This is used to
+//     delete stale keys left over from crashed tests.
+//   - A GUID used for preventing name collisions (although unlikely) between
+//     two RegistryOverrideManagers created with the same timestamp.
+class RegistryOverrideManager {
+ public:
+  RegistryOverrideManager();
+  ~RegistryOverrideManager();
+
+  // Override the given registry hive using a randomly generated temporary key.
+  // Multiple overrides to the same hive are not supported and lead to undefined
+  // behavior.
+  // Optional return of the registry override path.
+  // Calls to these functions must be wrapped in ASSERT_NO_FATAL_FAILURE to
+  // ensure that tests do not proceeed in case of failure to override.
+  void OverrideRegistry(HKEY override);
+  void OverrideRegistry(HKEY override, base::string16* override_path);
+
+ private:
+  friend class RegistryOverrideManagerTest;
+
+  // Keeps track of one override.
+  class ScopedRegistryKeyOverride {
+   public:
+    ScopedRegistryKeyOverride(HKEY override, const base::string16& key_path);
+    ~ScopedRegistryKeyOverride();
+
+   private:
+    HKEY override_;
+    base::string16 key_path_;
+
+    DISALLOW_COPY_AND_ASSIGN(ScopedRegistryKeyOverride);
+  };
+
+  // Used for testing only.
+  RegistryOverrideManager(const base::Time& timestamp,
+                          const base::string16& test_key_root);
+
+  base::Time timestamp_;
+  base::string16 guid_;
+
+  base::string16 test_key_root_;
+  std::vector<std::unique_ptr<ScopedRegistryKeyOverride>> overrides_;
+
+  DISALLOW_COPY_AND_ASSIGN(RegistryOverrideManager);
+};
+
+// Generates a temporary key path that will be eventually deleted
+// automatically if the process crashes.
+base::string16 GenerateTempKeyPath();
+
+}  // namespace registry_util
+
+#endif  // BASE_TEST_TEST_REG_UTIL_WIN_H_
diff --git a/base/test/test_reg_util_win_unittest.cc b/base/test/test_reg_util_win_unittest.cc
new file mode 100644
index 0000000..ca3bc99
--- /dev/null
+++ b/base/test/test_reg_util_win_unittest.cc
@@ -0,0 +1,133 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/test/test_reg_util_win.h"
+
+#include <memory>
+
+#include "base/compiler_specific.h"
+#include "base/strings/string_number_conversions.h"
+#include "base/strings/utf_string_conversions.h"
+#include "base/time/time.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace registry_util {
+
+namespace {
+const wchar_t kTestKeyPath[] = L"Software\\Chromium\\Foo\\Baz\\TestKey";
+const wchar_t kTestValueName[] = L"TestValue";
+}  // namespace
+
+class RegistryOverrideManagerTest : public testing::Test {
+ protected:
+  RegistryOverrideManagerTest() {
+    // We assign a fake test key path to our test RegistryOverrideManager
+    // so we don't interfere with any actual RegistryOverrideManagers running
+    // on the system. This fake path will be auto-deleted by other
+    // RegistryOverrideManagers in case we crash.
+    fake_test_key_root_ = registry_util::GenerateTempKeyPath();
+
+    // Ensure a clean test environment.
+    base::win::RegKey key(HKEY_CURRENT_USER);
+    key.DeleteKey(fake_test_key_root_.c_str());
+    key.DeleteKey(kTestKeyPath);
+  }
+
+  ~RegistryOverrideManagerTest() override {
+    base::win::RegKey key(HKEY_CURRENT_USER);
+    key.DeleteKey(fake_test_key_root_.c_str());
+  }
+
+  void AssertKeyExists(const base::string16& key_path) {
+    base::win::RegKey key;
+    ASSERT_EQ(ERROR_SUCCESS,
+              key.Open(HKEY_CURRENT_USER, key_path.c_str(), KEY_READ))
+        << key_path << " does not exist.";
+  }
+
+  void AssertKeyAbsent(const base::string16& key_path) {
+    base::win::RegKey key;
+    ASSERT_NE(ERROR_SUCCESS,
+              key.Open(HKEY_CURRENT_USER, key_path.c_str(), KEY_READ))
+        << key_path << " exists but it should not.";
+  }
+
+  void CreateKey(const base::string16& key_path) {
+    base::win::RegKey key;
+    ASSERT_EQ(ERROR_SUCCESS,
+              key.Create(HKEY_CURRENT_USER, key_path.c_str(), KEY_ALL_ACCESS));
+  }
+
+  base::string16 FakeOverrideManagerPath(const base::Time& time) {
+    return fake_test_key_root_ + L"\\" +
+           base::Int64ToString16(time.ToInternalValue());
+  }
+
+  void CreateManager(const base::Time& timestamp) {
+    manager_.reset(new RegistryOverrideManager(timestamp, fake_test_key_root_));
+    manager_->OverrideRegistry(HKEY_CURRENT_USER);
+  }
+
+  base::string16 fake_test_key_root_;
+  std::unique_ptr<RegistryOverrideManager> manager_;
+};
+
+TEST_F(RegistryOverrideManagerTest, Basic) {
+  ASSERT_NO_FATAL_FAILURE(CreateManager(base::Time::Now()));
+
+  base::win::RegKey create_key;
+  EXPECT_EQ(ERROR_SUCCESS,
+            create_key.Create(HKEY_CURRENT_USER, kTestKeyPath, KEY_ALL_ACCESS));
+  EXPECT_TRUE(create_key.Valid());
+  EXPECT_EQ(ERROR_SUCCESS, create_key.WriteValue(kTestValueName, 42));
+  create_key.Close();
+
+  ASSERT_NO_FATAL_FAILURE(AssertKeyExists(kTestKeyPath));
+
+  DWORD value;
+  base::win::RegKey read_key;
+  EXPECT_EQ(ERROR_SUCCESS,
+            read_key.Open(HKEY_CURRENT_USER, kTestKeyPath, KEY_READ));
+  EXPECT_TRUE(read_key.Valid());
+  EXPECT_EQ(ERROR_SUCCESS, read_key.ReadValueDW(kTestValueName, &value));
+  EXPECT_EQ(42u, value);
+  read_key.Close();
+
+  manager_.reset();
+
+  ASSERT_NO_FATAL_FAILURE(AssertKeyAbsent(kTestKeyPath));
+}
+
+TEST_F(RegistryOverrideManagerTest, DeleteStaleKeys) {
+  base::Time::Exploded kTestTimeExploded = {2013, 11, 1, 4, 0, 0, 0, 0};
+  base::Time kTestTime;
+  EXPECT_TRUE(base::Time::FromUTCExploded(kTestTimeExploded, &kTestTime));
+
+  base::string16 path_garbage = fake_test_key_root_ + L"\\Blah";
+  base::string16 path_very_stale =
+      FakeOverrideManagerPath(kTestTime - base::TimeDelta::FromDays(100));
+  base::string16 path_stale =
+      FakeOverrideManagerPath(kTestTime - base::TimeDelta::FromDays(5));
+  base::string16 path_current =
+      FakeOverrideManagerPath(kTestTime - base::TimeDelta::FromMinutes(1));
+  base::string16 path_future =
+      FakeOverrideManagerPath(kTestTime + base::TimeDelta::FromMinutes(1));
+
+  ASSERT_NO_FATAL_FAILURE(CreateKey(path_garbage));
+  ASSERT_NO_FATAL_FAILURE(CreateKey(path_very_stale));
+  ASSERT_NO_FATAL_FAILURE(CreateKey(path_stale));
+  ASSERT_NO_FATAL_FAILURE(CreateKey(path_current));
+  ASSERT_NO_FATAL_FAILURE(CreateKey(path_future));
+
+  ASSERT_NO_FATAL_FAILURE(CreateManager(kTestTime));
+  manager_.reset();
+
+  ASSERT_NO_FATAL_FAILURE(AssertKeyAbsent(path_garbage));
+  ASSERT_NO_FATAL_FAILURE(AssertKeyAbsent(path_very_stale));
+  ASSERT_NO_FATAL_FAILURE(AssertKeyAbsent(path_stale));
+  ASSERT_NO_FATAL_FAILURE(AssertKeyExists(path_current));
+  ASSERT_NO_FATAL_FAILURE(AssertKeyExists(path_future));
+}
+
+}  // namespace registry_util
diff --git a/base/test/test_shared_library.cc b/base/test/test_shared_library.cc
new file mode 100644
index 0000000..99c0467
--- /dev/null
+++ b/base/test/test_shared_library.cc
@@ -0,0 +1,30 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/test/native_library_test_utils.h"
+
+extern "C" {
+
+int NATIVE_LIBRARY_TEST_ALWAYS_EXPORT GetExportedValue() {
+  return g_native_library_exported_value;
+}
+
+void NATIVE_LIBRARY_TEST_ALWAYS_EXPORT SetExportedValue(int value) {
+  g_native_library_exported_value = value;
+}
+
+// A test function used only to verify basic dynamic symbol resolution.
+int NATIVE_LIBRARY_TEST_ALWAYS_EXPORT GetSimpleTestValue() {
+  return 5;
+}
+
+// When called by |NativeLibraryTest.LoadLibraryPreferOwnSymbols|, this should
+// forward to the local definition of NativeLibraryTestIncrement(), even though
+// the test module also links in the native_library_test_utils source library
+// which exports it.
+int NATIVE_LIBRARY_TEST_ALWAYS_EXPORT GetIncrementValue() {
+  return NativeLibraryTestIncrement();
+}
+
+}  // extern "C"
diff --git a/base/test/test_shared_memory_util.cc b/base/test/test_shared_memory_util.cc
new file mode 100644
index 0000000..cfc96a9
--- /dev/null
+++ b/base/test/test_shared_memory_util.cc
@@ -0,0 +1,187 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/test/test_shared_memory_util.h"
+
+#include <gtest/gtest.h>
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include "base/logging.h"
+#include "build/build_config.h"
+
+#if defined(OS_POSIX) && !defined(OS_NACL)
+#include <errno.h>
+#include <string.h>
+#include <sys/mman.h>
+#include <unistd.h>
+#endif
+
+#if defined(OS_FUCHSIA)
+#include <zircon/process.h>
+#include <zircon/rights.h>
+#include <zircon/syscalls.h>
+#endif
+
+#if defined(OS_MACOSX) && !defined(OS_IOS)
+#include <mach/mach_vm.h>
+#endif
+
+#if defined(OS_WIN)
+#include <aclapi.h>
+#endif
+
+namespace base {
+
+#if !defined(OS_NACL)
+
+static const size_t kDataSize = 1024;
+
+// Common routine used with Posix file descriptors. Check that shared memory
+// file descriptor |fd| does not allow writable mappings. Return true on
+// success, false otherwise.
+#if defined(OS_POSIX) && !defined(OS_FUCHSIA)
+static bool CheckReadOnlySharedMemoryFdPosix(int fd) {
+// Note that the error on Android is EPERM, unlike other platforms where
+// it will be EACCES.
+#if defined(OS_ANDROID)
+  const int kExpectedErrno = EPERM;
+#else
+  const int kExpectedErrno = EACCES;
+#endif
+  errno = 0;
+  void* address =
+      mmap(nullptr, kDataSize, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
+  const bool success = (address != nullptr) && (address != MAP_FAILED);
+  if (success) {
+    LOG(ERROR) << "mmap() should have failed!";
+    munmap(address, kDataSize);  // Cleanup.
+    return false;
+  }
+  if (errno != kExpectedErrno) {
+    LOG(ERROR) << "Expected mmap() to return " << kExpectedErrno
+               << " but returned " << errno << ": " << strerror(errno) << "\n";
+    return false;
+  }
+  return true;
+}
+#endif  // OS_POSIX && !OS_FUCHSIA
+
+#if defined(OS_FUCHSIA)
+// Fuchsia specific implementation.
+bool CheckReadOnlySharedMemoryFuchsiaHandle(zx_handle_t handle) {
+  const uint32_t flags = ZX_VM_FLAG_PERM_READ | ZX_VM_FLAG_PERM_WRITE;
+  uintptr_t addr;
+  const zx_handle_t root = zx_vmar_root_self();
+  const zx_status_t status =
+      zx_vmar_map(root, 0, handle, 0U, kDataSize, flags, &addr);
+  if (status == ZX_OK) {
+    LOG(ERROR) << "zx_vmar_map() should have failed!";
+    zx_vmar_unmap(root, addr, kDataSize);
+    return false;
+  }
+  if (status != ZX_ERR_ACCESS_DENIED) {
+    LOG(ERROR) << "Expected zx_vmar_map() to return " << ZX_ERR_ACCESS_DENIED
+               << " (ZX_ERR_ACCESS_DENIED) but returned " << status << "\n";
+    return false;
+  }
+  return true;
+}
+
+#elif defined(OS_MACOSX) && !defined(OS_IOS)
+bool CheckReadOnlySharedMemoryMachPort(mach_port_t memory_object) {
+  mach_vm_address_t memory;
+  const kern_return_t kr = mach_vm_map(
+      mach_task_self(), &memory, kDataSize, 0, VM_FLAGS_ANYWHERE, memory_object,
+      0, FALSE, VM_PROT_READ | VM_PROT_WRITE,
+      VM_PROT_READ | VM_PROT_WRITE | VM_PROT_IS_MASK, VM_INHERIT_NONE);
+  if (kr == KERN_SUCCESS) {
+    LOG(ERROR) << "mach_vm_map() should have failed!";
+    mach_vm_deallocate(mach_task_self(), memory, kDataSize);  // Cleanup.
+    return false;
+  }
+  return true;
+}
+
+#elif defined(OS_WIN)
+bool CheckReadOnlySharedMemoryWindowsHandle(HANDLE handle) {
+  void* memory =
+      MapViewOfFile(handle, FILE_MAP_READ | FILE_MAP_WRITE, 0, 0, kDataSize);
+  if (memory != nullptr) {
+    LOG(ERROR) << "MapViewOfFile() should have failed!";
+    UnmapViewOfFile(memory);
+    return false;
+  }
+  return true;
+}
+#endif
+
+bool CheckReadOnlySharedMemoryHandleForTesting(SharedMemoryHandle handle) {
+#if defined(OS_MACOSX) && !defined(OS_IOS)
+  // For OSX, the code has to deal with both POSIX and MACH handles.
+  if (handle.type_ == SharedMemoryHandle::POSIX)
+    return CheckReadOnlySharedMemoryFdPosix(handle.file_descriptor_.fd);
+  else
+    return CheckReadOnlySharedMemoryMachPort(handle.memory_object_);
+#elif defined(OS_FUCHSIA)
+  return CheckReadOnlySharedMemoryFuchsiaHandle(handle.GetHandle());
+#elif defined(OS_WIN)
+  return CheckReadOnlySharedMemoryWindowsHandle(handle.GetHandle());
+#else
+  return CheckReadOnlySharedMemoryFdPosix(handle.GetHandle());
+#endif
+}
+
+bool CheckReadOnlyPlatformSharedMemoryRegionForTesting(
+    subtle::PlatformSharedMemoryRegion region) {
+  if (region.GetMode() != subtle::PlatformSharedMemoryRegion::Mode::kReadOnly) {
+    LOG(ERROR) << "Expected region mode is "
+               << static_cast<int>(
+                      subtle::PlatformSharedMemoryRegion::Mode::kReadOnly)
+               << " but actual is " << static_cast<int>(region.GetMode());
+    return false;
+  }
+
+#if defined(OS_MACOSX) && !defined(OS_IOS)
+  return CheckReadOnlySharedMemoryMachPort(region.GetPlatformHandle());
+#elif defined(OS_FUCHSIA)
+  return CheckReadOnlySharedMemoryFuchsiaHandle(region.GetPlatformHandle());
+#elif defined(OS_WIN)
+  return CheckReadOnlySharedMemoryWindowsHandle(region.GetPlatformHandle());
+#elif defined(OS_ANDROID)
+  return CheckReadOnlySharedMemoryFdPosix(region.GetPlatformHandle());
+#else
+  return CheckReadOnlySharedMemoryFdPosix(region.GetPlatformHandle().fd);
+#endif
+}
+
+#endif  // !OS_NACL
+
+WritableSharedMemoryMapping MapForTesting(
+    subtle::PlatformSharedMemoryRegion* region) {
+  return MapAtForTesting(region, 0, region->GetSize());
+}
+
+WritableSharedMemoryMapping MapAtForTesting(
+    subtle::PlatformSharedMemoryRegion* region,
+    off_t offset,
+    size_t size) {
+  void* memory = nullptr;
+  size_t mapped_size = 0;
+  if (!region->MapAt(offset, size, &memory, &mapped_size))
+    return {};
+
+  return WritableSharedMemoryMapping(memory, size, mapped_size,
+                                     region->GetGUID());
+}
+
+template <>
+std::pair<ReadOnlySharedMemoryRegion, WritableSharedMemoryMapping>
+CreateMappedRegion(size_t size) {
+  MappedReadOnlyRegion mapped_region = ReadOnlySharedMemoryRegion::Create(size);
+  return {std::move(mapped_region.region), std::move(mapped_region.mapping)};
+}
+
+}  // namespace base
diff --git a/base/test/test_shared_memory_util.h b/base/test/test_shared_memory_util.h
new file mode 100644
index 0000000..d89f11d
--- /dev/null
+++ b/base/test/test_shared_memory_util.h
@@ -0,0 +1,56 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TEST_TEST_SHARED_MEMORY_UTIL_H_
+#define BASE_TEST_TEST_SHARED_MEMORY_UTIL_H_
+
+#include "base/memory/platform_shared_memory_region.h"
+#include "base/memory/read_only_shared_memory_region.h"
+#include "base/memory/shared_memory_handle.h"
+#include "base/memory/shared_memory_mapping.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+
+// Check that the shared memory |handle| cannot be used to perform
+// a writable mapping with low-level system APIs like mmap(). Return true
+// in case of success (i.e. writable mappings are _not_ allowed), or false
+// otherwise.
+bool CheckReadOnlySharedMemoryHandleForTesting(SharedMemoryHandle handle);
+
+bool CheckReadOnlyPlatformSharedMemoryRegionForTesting(
+    subtle::PlatformSharedMemoryRegion region);
+
+// Creates a scoped mapping from a PlatformSharedMemoryRegion. It's useful for
+// PlatformSharedMemoryRegion testing to not leak mapped memory.
+// WritableSharedMemoryMapping is used for wrapping because it has max
+// capabilities but the actual permission depends on the |region|'s mode.
+// This must not be used in production where PlatformSharedMemoryRegion should
+// be wrapped with {Writable,Unsafe,ReadOnly}SharedMemoryRegion.
+WritableSharedMemoryMapping MapAtForTesting(
+    subtle::PlatformSharedMemoryRegion* region,
+    off_t offset,
+    size_t size);
+
+WritableSharedMemoryMapping MapForTesting(
+    subtle::PlatformSharedMemoryRegion* region);
+
+template <typename SharedMemoryRegionType>
+std::pair<SharedMemoryRegionType, WritableSharedMemoryMapping>
+CreateMappedRegion(size_t size) {
+  SharedMemoryRegionType region = SharedMemoryRegionType::Create(size);
+  WritableSharedMemoryMapping mapping = region.Map();
+  return {std::move(region), std::move(mapping)};
+}
+
+// Template specialization of CreateMappedRegion<>() for
+// the ReadOnlySharedMemoryRegion. We need this because
+// ReadOnlySharedMemoryRegion::Create() has a different return type.
+template <>
+std::pair<ReadOnlySharedMemoryRegion, WritableSharedMemoryMapping>
+CreateMappedRegion(size_t size);
+
+}  // namespace base
+
+#endif  // BASE_TEST_TEST_SHARED_MEMORY_UTIL_H_
diff --git a/base/test/test_shortcut_win.cc b/base/test/test_shortcut_win.cc
new file mode 100644
index 0000000..70cb35b
--- /dev/null
+++ b/base/test/test_shortcut_win.cc
@@ -0,0 +1,156 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/test/test_shortcut_win.h"
+
+#include <windows.h>
+#include <objbase.h>
+#include <shlobj.h>
+#include <propkey.h>
+#include <wrl/client.h>
+
+#include "base/files/file_path.h"
+#include "base/strings/string16.h"
+#include "base/strings/utf_string_conversions.h"
+#include "base/win/scoped_propvariant.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+namespace win {
+
+void ValidatePathsAreEqual(const base::FilePath& expected_path,
+                           const base::FilePath& actual_path) {
+  wchar_t long_expected_path_chars[MAX_PATH] = {0};
+  wchar_t long_actual_path_chars[MAX_PATH] = {0};
+
+  // If |expected_path| is empty confirm immediately that |actual_path| is also
+  // empty.
+  if (expected_path.empty()) {
+    EXPECT_TRUE(actual_path.empty());
+    return;
+  }
+
+  // Proceed with LongPathName matching which will also confirm the paths exist.
+  EXPECT_NE(0U, ::GetLongPathName(
+      expected_path.value().c_str(), long_expected_path_chars, MAX_PATH))
+          << "Failed to get LongPathName of " << expected_path.value();
+  EXPECT_NE(0U, ::GetLongPathName(
+      actual_path.value().c_str(), long_actual_path_chars, MAX_PATH))
+          << "Failed to get LongPathName of " << actual_path.value();
+
+  base::FilePath long_expected_path(long_expected_path_chars);
+  base::FilePath long_actual_path(long_actual_path_chars);
+  EXPECT_FALSE(long_expected_path.empty());
+  EXPECT_FALSE(long_actual_path.empty());
+
+  EXPECT_EQ(long_expected_path, long_actual_path);
+}
+
+void ValidateShortcut(const base::FilePath& shortcut_path,
+                      const ShortcutProperties& properties) {
+  Microsoft::WRL::ComPtr<IShellLink> i_shell_link;
+  Microsoft::WRL::ComPtr<IPersistFile> i_persist_file;
+
+  wchar_t read_target[MAX_PATH] = {0};
+  wchar_t read_working_dir[MAX_PATH] = {0};
+  wchar_t read_arguments[MAX_PATH] = {0};
+  wchar_t read_description[MAX_PATH] = {0};
+  wchar_t read_icon[MAX_PATH] = {0};
+  int read_icon_index = 0;
+
+  HRESULT hr;
+
+  // Initialize the shell interfaces.
+  EXPECT_TRUE(SUCCEEDED(hr = ::CoCreateInstance(CLSID_ShellLink, NULL,
+                                                CLSCTX_INPROC_SERVER,
+                                                IID_PPV_ARGS(&i_shell_link))));
+  if (FAILED(hr))
+    return;
+
+  EXPECT_TRUE(
+      SUCCEEDED(hr = i_shell_link.CopyTo(i_persist_file.GetAddressOf())));
+  if (FAILED(hr))
+    return;
+
+  // Load the shortcut.
+  EXPECT_TRUE(SUCCEEDED(hr = i_persist_file->Load(
+      shortcut_path.value().c_str(), 0))) << "Failed to load shortcut at "
+                                          << shortcut_path.value();
+  if (FAILED(hr))
+    return;
+
+  if (properties.options & ShortcutProperties::PROPERTIES_TARGET) {
+    EXPECT_TRUE(SUCCEEDED(
+        i_shell_link->GetPath(read_target, MAX_PATH, NULL, SLGP_SHORTPATH)));
+    ValidatePathsAreEqual(properties.target, base::FilePath(read_target));
+  }
+
+  if (properties.options & ShortcutProperties::PROPERTIES_WORKING_DIR) {
+    EXPECT_TRUE(SUCCEEDED(
+        i_shell_link->GetWorkingDirectory(read_working_dir, MAX_PATH)));
+    ValidatePathsAreEqual(properties.working_dir,
+                          base::FilePath(read_working_dir));
+  }
+
+  if (properties.options & ShortcutProperties::PROPERTIES_ARGUMENTS) {
+    EXPECT_TRUE(SUCCEEDED(
+        i_shell_link->GetArguments(read_arguments, MAX_PATH)));
+    EXPECT_EQ(properties.arguments, read_arguments);
+  }
+
+  if (properties.options & ShortcutProperties::PROPERTIES_DESCRIPTION) {
+    EXPECT_TRUE(SUCCEEDED(
+        i_shell_link->GetDescription(read_description, MAX_PATH)));
+    EXPECT_EQ(properties.description, read_description);
+  }
+
+  if (properties.options & ShortcutProperties::PROPERTIES_ICON) {
+    EXPECT_TRUE(SUCCEEDED(
+        i_shell_link->GetIconLocation(read_icon, MAX_PATH, &read_icon_index)));
+    ValidatePathsAreEqual(properties.icon, base::FilePath(read_icon));
+    EXPECT_EQ(properties.icon_index, read_icon_index);
+  }
+
+  Microsoft::WRL::ComPtr<IPropertyStore> property_store;
+  EXPECT_TRUE(
+      SUCCEEDED(hr = i_shell_link.CopyTo(property_store.GetAddressOf())));
+  if (FAILED(hr))
+    return;
+
+  if (properties.options & ShortcutProperties::PROPERTIES_APP_ID) {
+    ScopedPropVariant pv_app_id;
+    EXPECT_EQ(S_OK, property_store->GetValue(PKEY_AppUserModel_ID,
+                                             pv_app_id.Receive()));
+    switch (pv_app_id.get().vt) {
+      case VT_EMPTY:
+        EXPECT_TRUE(properties.app_id.empty());
+        break;
+      case VT_LPWSTR:
+        EXPECT_EQ(properties.app_id, pv_app_id.get().pwszVal);
+        break;
+      default:
+        ADD_FAILURE() << "Unexpected variant type: " << pv_app_id.get().vt;
+    }
+  }
+
+  if (properties.options & ShortcutProperties::PROPERTIES_DUAL_MODE) {
+    ScopedPropVariant pv_dual_mode;
+    EXPECT_EQ(S_OK, property_store->GetValue(PKEY_AppUserModel_IsDualMode,
+                                             pv_dual_mode.Receive()));
+    switch (pv_dual_mode.get().vt) {
+      case VT_EMPTY:
+        EXPECT_FALSE(properties.dual_mode);
+        break;
+      case VT_BOOL:
+        EXPECT_EQ(properties.dual_mode,
+                  static_cast<bool>(pv_dual_mode.get().boolVal));
+        break;
+      default:
+        ADD_FAILURE() << "Unexpected variant type: " << pv_dual_mode.get().vt;
+    }
+  }
+}
+
+}  // namespace win
+}  // namespace base
diff --git a/base/test/test_shortcut_win.h b/base/test/test_shortcut_win.h
new file mode 100644
index 0000000..b828e8b
--- /dev/null
+++ b/base/test/test_shortcut_win.h
@@ -0,0 +1,30 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TEST_TEST_SHORTCUT_WIN_H_
+#define BASE_TEST_TEST_SHORTCUT_WIN_H_
+
+#include "base/files/file_path.h"
+#include "base/win/shortcut.h"
+
+// Windows shortcut functions used only by tests.
+
+namespace base {
+namespace win {
+
+// Validates |actual_path|'s LongPathName case-insensitively matches
+// |expected_path|'s LongPathName.
+void ValidatePathsAreEqual(const base::FilePath& expected_path,
+                           const base::FilePath& actual_path);
+
+// Validates that a shortcut exists at |shortcut_path| with the expected
+// |properties|.
+// Logs gtest failures on failed verifications.
+void ValidateShortcut(const FilePath& shortcut_path,
+                      const ShortcutProperties& properties);
+
+}  // namespace win
+}  // namespace base
+
+#endif  // BASE_TEST_TEST_SHORTCUT_WIN_H_
diff --git a/base/test/test_simple_task_runner.cc b/base/test/test_simple_task_runner.cc
new file mode 100644
index 0000000..91c6861
--- /dev/null
+++ b/base/test/test_simple_task_runner.cc
@@ -0,0 +1,103 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/test/test_simple_task_runner.h"
+
+#include <utility>
+
+#include "base/logging.h"
+#include "base/memory/ptr_util.h"
+#include "base/threading/thread_task_runner_handle.h"
+
+namespace base {
+
+TestSimpleTaskRunner::TestSimpleTaskRunner() = default;
+
+TestSimpleTaskRunner::~TestSimpleTaskRunner() = default;
+
+bool TestSimpleTaskRunner::PostDelayedTask(const Location& from_here,
+                                           OnceClosure task,
+                                           TimeDelta delay) {
+  AutoLock auto_lock(lock_);
+  pending_tasks_.push_back(TestPendingTask(from_here, std::move(task),
+                                           TimeTicks(), delay,
+                                           TestPendingTask::NESTABLE));
+  return true;
+}
+
+bool TestSimpleTaskRunner::PostNonNestableDelayedTask(const Location& from_here,
+                                                      OnceClosure task,
+                                                      TimeDelta delay) {
+  AutoLock auto_lock(lock_);
+  pending_tasks_.push_back(TestPendingTask(from_here, std::move(task),
+                                           TimeTicks(), delay,
+                                           TestPendingTask::NON_NESTABLE));
+  return true;
+}
+
+// TODO(gab): Use SequenceToken here to differentiate between tasks running in
+// the scope of this TestSimpleTaskRunner and other task runners sharing this
+// thread. http://crbug.com/631186
+bool TestSimpleTaskRunner::RunsTasksInCurrentSequence() const {
+  return thread_ref_ == PlatformThread::CurrentRef();
+}
+
+base::circular_deque<TestPendingTask> TestSimpleTaskRunner::TakePendingTasks() {
+  AutoLock auto_lock(lock_);
+  return std::move(pending_tasks_);
+}
+
+size_t TestSimpleTaskRunner::NumPendingTasks() const {
+  AutoLock auto_lock(lock_);
+  return pending_tasks_.size();
+}
+
+bool TestSimpleTaskRunner::HasPendingTask() const {
+  AutoLock auto_lock(lock_);
+  return !pending_tasks_.empty();
+}
+
+base::TimeDelta TestSimpleTaskRunner::NextPendingTaskDelay() const {
+  AutoLock auto_lock(lock_);
+  return pending_tasks_.front().GetTimeToRun() - base::TimeTicks();
+}
+
+base::TimeDelta TestSimpleTaskRunner::FinalPendingTaskDelay() const {
+  AutoLock auto_lock(lock_);
+  return pending_tasks_.back().GetTimeToRun() - base::TimeTicks();
+}
+
+void TestSimpleTaskRunner::ClearPendingTasks() {
+  AutoLock auto_lock(lock_);
+  pending_tasks_.clear();
+}
+
+void TestSimpleTaskRunner::RunPendingTasks() {
+  DCHECK(RunsTasksInCurrentSequence());
+
+  // Swap with a local variable to avoid re-entrancy problems.
+  base::circular_deque<TestPendingTask> tasks_to_run;
+  {
+    AutoLock auto_lock(lock_);
+    tasks_to_run.swap(pending_tasks_);
+  }
+
+  // Multiple test task runners can share the same thread for determinism in
+  // unit tests. Make sure this TestSimpleTaskRunner's tasks run in its scope.
+  ScopedClosureRunner undo_override;
+  if (!ThreadTaskRunnerHandle::IsSet() ||
+      ThreadTaskRunnerHandle::Get() != this) {
+    undo_override = ThreadTaskRunnerHandle::OverrideForTesting(this);
+  }
+
+  for (auto& task : tasks_to_run)
+    std::move(task.task).Run();
+}
+
+void TestSimpleTaskRunner::RunUntilIdle() {
+  while (HasPendingTask())
+    RunPendingTasks();
+}
+
+}  // namespace base
diff --git a/base/test/test_simple_task_runner.h b/base/test/test_simple_task_runner.h
new file mode 100644
index 0000000..bff8ee5
--- /dev/null
+++ b/base/test/test_simple_task_runner.h
@@ -0,0 +1,89 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TEST_TEST_SIMPLE_TASK_RUNNER_H_
+#define BASE_TEST_TEST_SIMPLE_TASK_RUNNER_H_
+
+#include "base/callback.h"
+#include "base/compiler_specific.h"
+#include "base/containers/circular_deque.h"
+#include "base/macros.h"
+#include "base/single_thread_task_runner.h"
+#include "base/synchronization/lock.h"
+#include "base/test/test_pending_task.h"
+#include "base/threading/platform_thread.h"
+
+namespace base {
+
+class TimeDelta;
+
+// TestSimpleTaskRunner is a simple TaskRunner implementation that can
+// be used for testing.  It implements SingleThreadTaskRunner as that
+// interface implements SequencedTaskRunner, which in turn implements
+// TaskRunner, so TestSimpleTaskRunner can be passed in to a function
+// that accepts any *TaskRunner object.
+//
+// TestSimpleTaskRunner has the following properties which make it simple:
+//
+//   - Tasks are simply stored in a queue in FIFO order, ignoring delay
+//     and nestability.
+//   - Tasks aren't guaranteed to be destroyed immediately after
+//     they're run.
+//
+// However, TestSimpleTaskRunner allows for reentrancy, in that it
+// handles the running of tasks that in turn call back into itself
+// (e.g., to post more tasks).
+//
+// Note that, like any TaskRunner, TestSimpleTaskRunner is
+// ref-counted.
+class TestSimpleTaskRunner : public SingleThreadTaskRunner {
+ public:
+  TestSimpleTaskRunner();
+
+  // SingleThreadTaskRunner implementation.
+  bool PostDelayedTask(const Location& from_here,
+                       OnceClosure task,
+                       TimeDelta delay) override;
+  bool PostNonNestableDelayedTask(const Location& from_here,
+                                  OnceClosure task,
+                                  TimeDelta delay) override;
+
+  bool RunsTasksInCurrentSequence() const override;
+
+  base::circular_deque<TestPendingTask> TakePendingTasks();
+  size_t NumPendingTasks() const;
+  bool HasPendingTask() const;
+  base::TimeDelta NextPendingTaskDelay() const;
+  base::TimeDelta FinalPendingTaskDelay() const;
+
+  // Clears the queue of pending tasks without running them.
+  void ClearPendingTasks();
+
+  // Runs each current pending task in order and clears the queue. Tasks posted
+  // by the tasks that run within this call do not run within this call. Can
+  // only be called on the thread that created this TestSimpleTaskRunner.
+  void RunPendingTasks();
+
+  // Runs pending tasks until the queue is empty. Can only be called on the
+  // thread that created this TestSimpleTaskRunner.
+  void RunUntilIdle();
+
+ protected:
+  ~TestSimpleTaskRunner() override;
+
+ private:
+  // Thread on which this was instantiated.
+  const PlatformThreadRef thread_ref_ = PlatformThread::CurrentRef();
+
+  // Synchronizes access to |pending_tasks_|.
+  mutable Lock lock_;
+
+  base::circular_deque<TestPendingTask> pending_tasks_;
+
+  DISALLOW_COPY_AND_ASSIGN(TestSimpleTaskRunner);
+};
+
+}  // namespace base
+
+#endif  // BASE_TEST_TEST_SIMPLE_TASK_RUNNER_H_
diff --git a/base/test/test_suite.cc b/base/test/test_suite.cc
new file mode 100644
index 0000000..3d53097
--- /dev/null
+++ b/base/test/test_suite.cc
@@ -0,0 +1,484 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/test/test_suite.h"
+
+#include <signal.h>
+
+#include <memory>
+
+#include "base/at_exit.h"
+#include "base/base_paths.h"
+#include "base/base_switches.h"
+#include "base/bind.h"
+#include "base/command_line.h"
+#include "base/debug/debugger.h"
+#include "base/debug/profiler.h"
+#include "base/debug/stack_trace.h"
+#include "base/feature_list.h"
+#include "base/files/file_path.h"
+#include "base/files/file_util.h"
+#include "base/i18n/icu_util.h"
+#include "base/logging.h"
+#include "base/macros.h"
+#include "base/memory/ptr_util.h"
+#include "base/path_service.h"
+#include "base/process/launch.h"
+#include "base/process/memory.h"
+#include "base/test/gtest_xml_unittest_result_printer.h"
+#include "base/test/gtest_xml_util.h"
+#include "base/test/icu_test_util.h"
+#include "base/test/launcher/unit_test_launcher.h"
+#include "base/test/multiprocess_test.h"
+#include "base/test/test_switches.h"
+#include "base/test/test_timeouts.h"
+#include "base/time/time.h"
+#include "build/build_config.h"
+#include "testing/gmock/include/gmock/gmock.h"
+#include "testing/gtest/include/gtest/gtest.h"
+#include "testing/multiprocess_func_list.h"
+
+#if defined(OS_MACOSX)
+#include "base/mac/scoped_nsautorelease_pool.h"
+#if defined(OS_IOS)
+#include "base/test/test_listener_ios.h"
+#endif  // OS_IOS
+#endif  // OS_MACOSX
+
+#if !defined(OS_WIN)
+#include "base/i18n/rtl.h"
+#if !defined(OS_IOS)
+#include "base/strings/string_util.h"
+#include "third_party/icu/source/common/unicode/uloc.h"
+#endif
+#endif
+
+#if defined(OS_ANDROID)
+#include "base/test/test_support_android.h"
+#endif
+
+#if defined(OS_IOS)
+#include "base/test/test_support_ios.h"
+#endif
+
+#if defined(OS_LINUX)
+#include "base/test/fontconfig_util_linux.h"
+#endif
+
+namespace base {
+
+namespace {
+
+class MaybeTestDisabler : public testing::EmptyTestEventListener {
+ public:
+  void OnTestStart(const testing::TestInfo& test_info) override {
+    ASSERT_FALSE(TestSuite::IsMarkedMaybe(test_info))
+        << "Probably the OS #ifdefs don't include all of the necessary "
+           "platforms.\nPlease ensure that no tests have the MAYBE_ prefix "
+           "after the code is preprocessed.";
+  }
+};
+
+class TestClientInitializer : public testing::EmptyTestEventListener {
+ public:
+  TestClientInitializer()
+      : old_command_line_(CommandLine::NO_PROGRAM) {
+  }
+
+  void OnTestStart(const testing::TestInfo& test_info) override {
+    old_command_line_ = *CommandLine::ForCurrentProcess();
+  }
+
+  void OnTestEnd(const testing::TestInfo& test_info) override {
+    *CommandLine::ForCurrentProcess() = old_command_line_;
+  }
+
+ private:
+  CommandLine old_command_line_;
+
+  DISALLOW_COPY_AND_ASSIGN(TestClientInitializer);
+};
+
+std::string GetProfileName() {
+  static const char kDefaultProfileName[] = "test-profile-{pid}";
+  CR_DEFINE_STATIC_LOCAL(std::string, profile_name, ());
+  if (profile_name.empty()) {
+    const base::CommandLine& command_line =
+        *base::CommandLine::ForCurrentProcess();
+    if (command_line.HasSwitch(switches::kProfilingFile))
+      profile_name = command_line.GetSwitchValueASCII(switches::kProfilingFile);
+    else
+      profile_name = std::string(kDefaultProfileName);
+  }
+  return profile_name;
+}
+
+void InitializeLogging() {
+#if defined(OS_ANDROID)
+  InitAndroidTestLogging();
+#else
+  FilePath exe;
+  PathService::Get(FILE_EXE, &exe);
+  FilePath log_filename = exe.ReplaceExtension(FILE_PATH_LITERAL("log"));
+  logging::LoggingSettings settings;
+  settings.logging_dest = logging::LOG_TO_ALL;
+  settings.log_file = log_filename.value().c_str();
+  settings.delete_old = logging::DELETE_OLD_LOG_FILE;
+  logging::InitLogging(settings);
+  // We want process and thread IDs because we may have multiple processes.
+  // Note: temporarily enabled timestamps in an effort to catch bug 6361.
+  logging::SetLogItems(true, true, true, true);
+#endif  // !defined(OS_ANDROID)
+}
+
+}  // namespace
+
+int RunUnitTestsUsingBaseTestSuite(int argc, char **argv) {
+  TestSuite test_suite(argc, argv);
+  return LaunchUnitTests(argc, argv,
+                         Bind(&TestSuite::Run, Unretained(&test_suite)));
+}
+
+TestSuite::TestSuite(int argc, char** argv) : initialized_command_line_(false) {
+  PreInitialize();
+  InitializeFromCommandLine(argc, argv);
+  // Logging must be initialized before any thread has a chance to call logging
+  // functions.
+  InitializeLogging();
+}
+
+#if defined(OS_WIN)
+TestSuite::TestSuite(int argc, wchar_t** argv)
+    : initialized_command_line_(false) {
+  PreInitialize();
+  InitializeFromCommandLine(argc, argv);
+  // Logging must be initialized before any thread has a chance to call logging
+  // functions.
+  InitializeLogging();
+}
+#endif  // defined(OS_WIN)
+
+TestSuite::~TestSuite() {
+  if (initialized_command_line_)
+    CommandLine::Reset();
+}
+
+void TestSuite::InitializeFromCommandLine(int argc, char** argv) {
+  initialized_command_line_ = CommandLine::Init(argc, argv);
+  testing::InitGoogleTest(&argc, argv);
+  testing::InitGoogleMock(&argc, argv);
+
+#if defined(OS_IOS)
+  InitIOSRunHook(this, argc, argv);
+#endif
+}
+
+#if defined(OS_WIN)
+void TestSuite::InitializeFromCommandLine(int argc, wchar_t** argv) {
+  // Windows CommandLine::Init ignores argv anyway.
+  initialized_command_line_ = CommandLine::Init(argc, NULL);
+  testing::InitGoogleTest(&argc, argv);
+  testing::InitGoogleMock(&argc, argv);
+}
+#endif  // defined(OS_WIN)
+
+void TestSuite::PreInitialize() {
+#if defined(OS_WIN)
+  testing::GTEST_FLAG(catch_exceptions) = false;
+#endif
+  EnableTerminationOnHeapCorruption();
+#if defined(OS_LINUX) && defined(USE_AURA)
+  // When calling native char conversion functions (e.g wrctomb) we need to
+  // have the locale set. In the absence of such a call the "C" locale is the
+  // default. In the gtk code (below) gtk_init() implicitly sets a locale.
+  setlocale(LC_ALL, "");
+#endif  // defined(OS_LINUX) && defined(USE_AURA)
+
+  // On Android, AtExitManager is created in
+  // testing/android/native_test_wrapper.cc before main() is called.
+#if !defined(OS_ANDROID)
+  at_exit_manager_.reset(new AtExitManager);
+#endif
+
+  // Don't add additional code to this function.  Instead add it to
+  // Initialize().  See bug 6436.
+}
+
+
+// static
+bool TestSuite::IsMarkedMaybe(const testing::TestInfo& test) {
+  return strncmp(test.name(), "MAYBE_", 6) == 0;
+}
+
+void TestSuite::CatchMaybeTests() {
+  testing::TestEventListeners& listeners =
+      testing::UnitTest::GetInstance()->listeners();
+  listeners.Append(new MaybeTestDisabler);
+}
+
+void TestSuite::ResetCommandLine() {
+  testing::TestEventListeners& listeners =
+      testing::UnitTest::GetInstance()->listeners();
+  listeners.Append(new TestClientInitializer);
+}
+
+void TestSuite::AddTestLauncherResultPrinter() {
+  // Only add the custom printer if requested.
+  if (!CommandLine::ForCurrentProcess()->HasSwitch(
+          switches::kTestLauncherOutput)) {
+    return;
+  }
+
+  FilePath output_path(CommandLine::ForCurrentProcess()->GetSwitchValuePath(
+      switches::kTestLauncherOutput));
+
+  // Do not add the result printer if output path already exists. It's an
+  // indicator there is a process printing to that file, and we're likely
+  // its child. Do not clobber the results in that case.
+  if (PathExists(output_path)) {
+    LOG(WARNING) << "Test launcher output path " << output_path.AsUTF8Unsafe()
+                 << " exists. Not adding test launcher result printer.";
+    return;
+  }
+
+  printer_ = new XmlUnitTestResultPrinter;
+  CHECK(printer_->Initialize(output_path));
+  testing::TestEventListeners& listeners =
+      testing::UnitTest::GetInstance()->listeners();
+  listeners.Append(printer_);
+}
+
+// Don't add additional code to this method.  Instead add it to
+// Initialize().  See bug 6436.
+int TestSuite::Run() {
+#if defined(OS_IOS)
+  RunTestsFromIOSApp();
+#endif
+
+#if defined(OS_MACOSX)
+  mac::ScopedNSAutoreleasePool scoped_pool;
+#endif
+
+  Initialize();
+  std::string client_func =
+      CommandLine::ForCurrentProcess()->GetSwitchValueASCII(
+          switches::kTestChildProcess);
+
+  // Check to see if we are being run as a client process.
+  if (!client_func.empty())
+    return multi_process_function_list::InvokeChildProcessTest(client_func);
+#if defined(OS_IOS)
+  test_listener_ios::RegisterTestEndListener();
+#endif
+
+  int result = RUN_ALL_TESTS();
+
+#if defined(OS_MACOSX)
+  // This MUST happen before Shutdown() since Shutdown() tears down
+  // objects (such as NotificationService::current()) that Cocoa
+  // objects use to remove themselves as observers.
+  scoped_pool.Recycle();
+#endif
+
+  Shutdown();
+
+  return result;
+}
+
+void TestSuite::UnitTestAssertHandler(const char* file,
+                                      int line,
+                                      const base::StringPiece summary,
+                                      const base::StringPiece stack_trace) {
+#if defined(OS_ANDROID)
+  // Correlating test stdio with logcat can be difficult, so we emit this
+  // helpful little hint about what was running.  Only do this for Android
+  // because other platforms don't separate out the relevant logs in the same
+  // way.
+  const ::testing::TestInfo* const test_info =
+      ::testing::UnitTest::GetInstance()->current_test_info();
+  if (test_info) {
+    LOG(ERROR) << "Currently running: " << test_info->test_case_name() << "."
+               << test_info->name();
+    fflush(stderr);
+  }
+#endif  // defined(OS_ANDROID)
+
+  // XmlUnitTestResultPrinter inherits gtest format, where assert has summary
+  // and message. In GTest, summary is just a logged text, and message is a
+  // logged text, concatenated with stack trace of assert.
+  // Concatenate summary and stack_trace here, to pass it as a message.
+  if (printer_) {
+    const std::string summary_str = summary.as_string();
+    const std::string stack_trace_str = summary_str + stack_trace.as_string();
+    printer_->OnAssert(file, line, summary_str, stack_trace_str);
+  }
+
+  // The logging system actually prints the message before calling the assert
+  // handler. Just exit now to avoid printing too many stack traces.
+  _exit(1);
+}
+
+#if defined(OS_WIN)
+namespace {
+
+// Disable optimizations to prevent function folding or other transformations
+// that will make the call stacks on failures more confusing.
+#pragma optimize("", off)
+// Handlers for invalid parameter, pure call, and abort. They generate a
+// breakpoint to ensure that we get a call stack on these failures.
+void InvalidParameter(const wchar_t* expression,
+                      const wchar_t* function,
+                      const wchar_t* file,
+                      unsigned int line,
+                      uintptr_t reserved) {
+  // CRT printed message is sufficient.
+  __debugbreak();
+  _exit(1);
+}
+
+void PureCall() {
+  fprintf(stderr, "Pure-virtual function call. Terminating.\n");
+  __debugbreak();
+  _exit(1);
+}
+
+void AbortHandler(int signal) {
+  // Print EOL after the CRT abort message.
+  fprintf(stderr, "\n");
+  __debugbreak();
+}
+#pragma optimize("", on)
+
+}  // namespace
+#endif
+
+void TestSuite::SuppressErrorDialogs() {
+#if defined(OS_WIN)
+  UINT new_flags = SEM_FAILCRITICALERRORS |
+                   SEM_NOGPFAULTERRORBOX |
+                   SEM_NOOPENFILEERRORBOX;
+
+  // Preserve existing error mode, as discussed at
+  // http://blogs.msdn.com/oldnewthing/archive/2004/07/27/198410.aspx
+  UINT existing_flags = SetErrorMode(new_flags);
+  SetErrorMode(existing_flags | new_flags);
+
+#if defined(_DEBUG)
+  // Suppress the "Debug Assertion Failed" dialog.
+  // TODO(hbono): remove this code when gtest has it.
+  // http://groups.google.com/d/topic/googletestframework/OjuwNlXy5ac/discussion
+  _CrtSetReportFile(_CRT_ASSERT, _CRTDBG_FILE_STDERR);
+  _CrtSetReportMode(_CRT_ASSERT, _CRTDBG_MODE_FILE | _CRTDBG_MODE_DEBUG);
+  _CrtSetReportFile(_CRT_ERROR, _CRTDBG_FILE_STDERR);
+  _CrtSetReportMode(_CRT_ERROR, _CRTDBG_MODE_FILE | _CRTDBG_MODE_DEBUG);
+#endif  // defined(_DEBUG)
+
+  // See crbug.com/783040 for test code to trigger all of these failures.
+  _set_invalid_parameter_handler(InvalidParameter);
+  _set_purecall_handler(PureCall);
+  signal(SIGABRT, AbortHandler);
+#endif  // defined(OS_WIN)
+}
+
+void TestSuite::Initialize() {
+  const CommandLine* command_line = CommandLine::ForCurrentProcess();
+#if !defined(OS_IOS)
+  if (command_line->HasSwitch(switches::kWaitForDebugger)) {
+    debug::WaitForDebugger(60, true);
+  }
+#endif
+  // Set up a FeatureList instance, so that code using that API will not hit a
+  // an error that it's not set. It will be cleared automatically.
+  // TestFeatureForBrowserTest1 and TestFeatureForBrowserTest2 used in
+  // ContentBrowserTestScopedFeatureListTest to ensure ScopedFeatureList keeps
+  // features from command line.
+  std::string enabled =
+      command_line->GetSwitchValueASCII(switches::kEnableFeatures);
+  std::string disabled =
+      command_line->GetSwitchValueASCII(switches::kDisableFeatures);
+  enabled += ",TestFeatureForBrowserTest1";
+  disabled += ",TestFeatureForBrowserTest2";
+  scoped_feature_list_.InitFromCommandLine(enabled, disabled);
+
+  // The enable-features and disable-features flags were just slurped into a
+  // FeatureList, so remove them from the command line. Tests should enable and
+  // disable features via the ScopedFeatureList API rather than command-line
+  // flags.
+  CommandLine new_command_line(command_line->GetProgram());
+  CommandLine::SwitchMap switches = command_line->GetSwitches();
+
+  switches.erase(switches::kEnableFeatures);
+  switches.erase(switches::kDisableFeatures);
+
+  for (const auto& iter : switches)
+    new_command_line.AppendSwitchNative(iter.first, iter.second);
+
+  *CommandLine::ForCurrentProcess() = new_command_line;
+
+#if defined(OS_IOS)
+  InitIOSTestMessageLoop();
+#endif  // OS_IOS
+
+#if defined(OS_ANDROID)
+  InitAndroidTestMessageLoop();
+#endif  // else defined(OS_ANDROID)
+
+  CHECK(debug::EnableInProcessStackDumping());
+#if defined(OS_WIN)
+  RouteStdioToConsole(true);
+  // Make sure we run with high resolution timer to minimize differences
+  // between production code and test code.
+  Time::EnableHighResolutionTimer(true);
+#endif  // defined(OS_WIN)
+
+  // In some cases, we do not want to see standard error dialogs.
+  if (!debug::BeingDebugged() &&
+      !command_line->HasSwitch("show-error-dialogs")) {
+    SuppressErrorDialogs();
+    debug::SetSuppressDebugUI(true);
+    assert_handler_ = std::make_unique<logging::ScopedLogAssertHandler>(
+        base::Bind(&TestSuite::UnitTestAssertHandler, base::Unretained(this)));
+  }
+
+  base::test::InitializeICUForTesting();
+
+  // On the Mac OS X command line, the default locale is *_POSIX. In Chromium,
+  // the locale is set via an OS X locale API and is never *_POSIX.
+  // Some tests (such as those involving word break iterator) will behave
+  // differently and fail if we use *POSIX locale. Setting it to en_US here
+  // does not affect tests that explicitly overrides the locale for testing.
+  // This can be an issue on all platforms other than Windows.
+  // TODO(jshin): Should we set the locale via an OS X locale API here?
+#if !defined(OS_WIN)
+#if defined(OS_IOS)
+  i18n::SetICUDefaultLocale("en_US");
+#else
+  std::string default_locale(uloc_getDefault());
+  if (EndsWith(default_locale, "POSIX", CompareCase::INSENSITIVE_ASCII))
+    i18n::SetICUDefaultLocale("en_US");
+#endif
+#endif
+
+#if defined(OS_LINUX)
+  // TODO(thomasanderson): Call TearDownFontconfig() in Shutdown().  It would
+  // currently crash because of leaked FcFontSet's in font_fallback_linux.cc.
+  SetUpFontconfig();
+#endif
+
+  CatchMaybeTests();
+  ResetCommandLine();
+  AddTestLauncherResultPrinter();
+
+  TestTimeouts::Initialize();
+
+  trace_to_file_.BeginTracingFromCommandLineOptions();
+
+  base::debug::StartProfiling(GetProfileName());
+}
+
+void TestSuite::Shutdown() {
+  base::debug::StopProfiling();
+}
+
+}  // namespace base
diff --git a/base/test/test_suite.h b/base/test/test_suite.h
new file mode 100644
index 0000000..6d852ba
--- /dev/null
+++ b/base/test/test_suite.h
@@ -0,0 +1,103 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TEST_TEST_SUITE_H_
+#define BASE_TEST_TEST_SUITE_H_
+
+// Defines a basic test suite framework for running gtest based tests.  You can
+// instantiate this class in your main function and call its Run method to run
+// any gtest based tests that are linked into your executable.
+
+#include <memory>
+#include <string>
+
+#include "base/at_exit.h"
+#include "base/logging.h"
+#include "base/macros.h"
+#include "base/test/scoped_feature_list.h"
+#include "base/test/trace_to_file.h"
+#include "build/build_config.h"
+
+namespace testing {
+class TestInfo;
+}
+
+namespace base {
+
+class XmlUnitTestResultPrinter;
+
+// Instantiates TestSuite, runs it and returns exit code.
+int RunUnitTestsUsingBaseTestSuite(int argc, char **argv);
+
+class TestSuite {
+ public:
+  // Match function used by the GetTestCount method.
+  typedef bool (*TestMatch)(const testing::TestInfo&);
+
+  TestSuite(int argc, char** argv);
+#if defined(OS_WIN)
+  TestSuite(int argc, wchar_t** argv);
+#endif  // defined(OS_WIN)
+  virtual ~TestSuite();
+
+  // Returns true if the test is marked as "MAYBE_".
+  // When using different prefixes depending on platform, we use MAYBE_ and
+  // preprocessor directives to replace MAYBE_ with the target prefix.
+  static bool IsMarkedMaybe(const testing::TestInfo& test);
+
+  void CatchMaybeTests();
+
+  void ResetCommandLine();
+
+  void AddTestLauncherResultPrinter();
+
+  int Run();
+
+ protected:
+  // By default fatal log messages (e.g. from DCHECKs) result in error dialogs
+  // which gum up buildbots. Use a minimalistic assert handler which just
+  // terminates the process.
+  void UnitTestAssertHandler(const char* file,
+                             int line,
+                             const base::StringPiece summary,
+                             const base::StringPiece stack_trace);
+
+  // Disable crash dialogs so that it doesn't gum up the buildbot
+  virtual void SuppressErrorDialogs();
+
+  // Override these for custom initialization and shutdown handling.  Use these
+  // instead of putting complex code in your constructor/destructor.
+
+  virtual void Initialize();
+  virtual void Shutdown();
+
+  // Make sure that we setup an AtExitManager so Singleton objects will be
+  // destroyed.
+  std::unique_ptr<base::AtExitManager> at_exit_manager_;
+
+ private:
+  void InitializeFromCommandLine(int argc, char** argv);
+#if defined(OS_WIN)
+  void InitializeFromCommandLine(int argc, wchar_t** argv);
+#endif  // defined(OS_WIN)
+
+  // Basic initialization for the test suite happens here.
+  void PreInitialize();
+
+  test::TraceToFile trace_to_file_;
+
+  bool initialized_command_line_;
+
+  test::ScopedFeatureList scoped_feature_list_;
+
+  XmlUnitTestResultPrinter* printer_ = nullptr;
+
+  std::unique_ptr<logging::ScopedLogAssertHandler> assert_handler_;
+
+  DISALLOW_COPY_AND_ASSIGN(TestSuite);
+};
+
+}  // namespace base
+
+#endif  // BASE_TEST_TEST_SUITE_H_
diff --git a/base/test/test_support_android.cc b/base/test/test_support_android.cc
new file mode 100644
index 0000000..33a6628
--- /dev/null
+++ b/base/test/test_support_android.cc
@@ -0,0 +1,194 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <stdarg.h>
+#include <string.h>
+
+#include "base/android/path_utils.h"
+#include "base/files/file_path.h"
+#include "base/logging.h"
+#include "base/macros.h"
+#include "base/memory/singleton.h"
+#include "base/message_loop/message_loop.h"
+#include "base/message_loop/message_pump_android.h"
+#include "base/path_service.h"
+#include "base/synchronization/waitable_event.h"
+#include "base/test/multiprocess_test.h"
+
+namespace {
+
+base::FilePath* g_test_data_dir = nullptr;
+
+struct RunState {
+  RunState(base::MessagePump::Delegate* delegate, int run_depth)
+      : delegate(delegate),
+        run_depth(run_depth),
+        should_quit(false) {
+  }
+
+  base::MessagePump::Delegate* delegate;
+
+  // Used to count how many Run() invocations are on the stack.
+  int run_depth;
+
+  // Used to flag that the current Run() invocation should return ASAP.
+  bool should_quit;
+};
+
+RunState* g_state = NULL;
+
+// A singleton WaitableEvent wrapper so we avoid a busy loop in
+// MessagePumpForUIStub. Other platforms use the native event loop which blocks
+// when there are no pending messages.
+class Waitable {
+ public:
+  static Waitable* GetInstance() {
+    return base::Singleton<Waitable,
+                           base::LeakySingletonTraits<Waitable>>::get();
+  }
+
+  // Signals that there are more work to do.
+  void Signal() { waitable_event_.Signal(); }
+
+  // Blocks until more work is scheduled.
+  void Block() { waitable_event_.Wait(); }
+
+  void Quit() {
+    g_state->should_quit = true;
+    Signal();
+  }
+
+ private:
+  friend struct base::DefaultSingletonTraits<Waitable>;
+
+  Waitable()
+      : waitable_event_(base::WaitableEvent::ResetPolicy::AUTOMATIC,
+                        base::WaitableEvent::InitialState::NOT_SIGNALED) {}
+
+  base::WaitableEvent waitable_event_;
+
+  DISALLOW_COPY_AND_ASSIGN(Waitable);
+};
+
+// The MessagePumpForUI implementation for test purpose.
+class MessagePumpForUIStub : public base::MessagePumpForUI {
+  ~MessagePumpForUIStub() override {}
+
+  void Start(base::MessagePump::Delegate* delegate) override {
+    NOTREACHED() << "The Start() method shouldn't be called in test, using"
+        " Run() method should be used.";
+  }
+
+  void Run(base::MessagePump::Delegate* delegate) override {
+    // The following was based on message_pump_glib.cc, except we're using a
+    // WaitableEvent since there are no native message loop to use.
+    RunState state(delegate, g_state ? g_state->run_depth + 1 : 1);
+
+    RunState* previous_state = g_state;
+    g_state = &state;
+
+    bool more_work_is_plausible = true;
+
+    for (;;) {
+      if (!more_work_is_plausible) {
+        Waitable::GetInstance()->Block();
+        if (g_state->should_quit)
+          break;
+      }
+
+      more_work_is_plausible = g_state->delegate->DoWork();
+      if (g_state->should_quit)
+        break;
+
+      base::TimeTicks delayed_work_time;
+      more_work_is_plausible |=
+          g_state->delegate->DoDelayedWork(&delayed_work_time);
+      if (g_state->should_quit)
+        break;
+
+      if (more_work_is_plausible)
+        continue;
+
+      more_work_is_plausible = g_state->delegate->DoIdleWork();
+      if (g_state->should_quit)
+        break;
+
+      more_work_is_plausible |= !delayed_work_time.is_null();
+    }
+
+    g_state = previous_state;
+  }
+
+  void Quit() override { Waitable::GetInstance()->Quit(); }
+
+  void ScheduleWork() override { Waitable::GetInstance()->Signal(); }
+
+  void ScheduleDelayedWork(const base::TimeTicks& delayed_work_time) override {
+    Waitable::GetInstance()->Signal();
+  }
+};
+
+std::unique_ptr<base::MessagePump> CreateMessagePumpForUIStub() {
+  return std::unique_ptr<base::MessagePump>(new MessagePumpForUIStub());
+};
+
+// Provides the test path for DIR_SOURCE_ROOT and DIR_ANDROID_APP_DATA.
+bool GetTestProviderPath(int key, base::FilePath* result) {
+  switch (key) {
+    // TODO(agrieve): Stop overriding DIR_ANDROID_APP_DATA.
+    // https://crbug.com/617734
+    // Instead DIR_ASSETS should be used to discover assets file location in
+    // tests.
+    case base::DIR_ANDROID_APP_DATA:
+    case base::DIR_ASSETS:
+    case base::DIR_SOURCE_ROOT:
+      CHECK(g_test_data_dir != nullptr);
+      *result = *g_test_data_dir;
+      return true;
+    default:
+      return false;
+  }
+}
+
+void InitPathProvider(int key) {
+  base::FilePath path;
+  // If failed to override the key, that means the way has not been registered.
+  if (GetTestProviderPath(key, &path) &&
+      !base::PathService::Override(key, path)) {
+    base::PathService::RegisterProvider(&GetTestProviderPath, key, key + 1);
+  }
+}
+
+}  // namespace
+
+namespace base {
+
+void InitAndroidTestLogging() {
+  logging::LoggingSettings settings;
+  settings.logging_dest = logging::LOG_TO_SYSTEM_DEBUG_LOG;
+  logging::InitLogging(settings);
+  // To view log output with IDs and timestamps use "adb logcat -v threadtime".
+  logging::SetLogItems(false,    // Process ID
+                       false,    // Thread ID
+                       false,    // Timestamp
+                       false);   // Tick count
+}
+
+void InitAndroidTestPaths(const FilePath& test_data_dir) {
+  if (g_test_data_dir) {
+    CHECK(test_data_dir == *g_test_data_dir);
+    return;
+  }
+  g_test_data_dir = new FilePath(test_data_dir);
+  InitPathProvider(DIR_SOURCE_ROOT);
+  InitPathProvider(DIR_ANDROID_APP_DATA);
+  InitPathProvider(DIR_ASSETS);
+}
+
+void InitAndroidTestMessageLoop() {
+  if (!MessageLoop::InitMessagePumpForUIFactory(&CreateMessagePumpForUIStub))
+    LOG(INFO) << "MessagePumpForUIFactory already set, unable to override.";
+}
+
+}  // namespace base
diff --git a/base/test/test_support_android.h b/base/test/test_support_android.h
new file mode 100644
index 0000000..4942e54
--- /dev/null
+++ b/base/test/test_support_android.h
@@ -0,0 +1,25 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TEST_TEST_SUPPORT_ANDROID_H_
+#define BASE_TEST_TEST_SUPPORT_ANDROID_H_
+
+#include "base/base_export.h"
+
+namespace base {
+
+class FilePath;
+
+// Init logging for tests on Android. Logs will be output into Android's logcat.
+BASE_EXPORT void InitAndroidTestLogging();
+
+// Init path providers for tests on Android.
+BASE_EXPORT void InitAndroidTestPaths(const FilePath& test_data_dir);
+
+// Init the message loop for tests on Android.
+BASE_EXPORT void InitAndroidTestMessageLoop();
+
+}  // namespace base
+
+#endif  // BASE_TEST_TEST_SUPPORT_ANDROID_H_
diff --git a/base/test/test_support_ios.h b/base/test/test_support_ios.h
new file mode 100644
index 0000000..c71cf0d
--- /dev/null
+++ b/base/test/test_support_ios.h
@@ -0,0 +1,24 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TEST_TEST_SUPPORT_IOS_H_
+#define BASE_TEST_TEST_SUPPORT_IOS_H_
+
+#include "base/test/test_suite.h"
+
+namespace base {
+
+// Inits the message loop for tests on iOS.
+void InitIOSTestMessageLoop();
+
+// Inits the run hook for tests on iOS.
+void InitIOSRunHook(TestSuite* suite, int argc, char* argv[]);
+
+// Launches an iOS app that runs the tests in the suite passed to
+// InitIOSRunHook.
+void RunTestsFromIOSApp();
+
+}  // namespace base
+
+#endif  // BASE_TEST_TEST_SUPPORT_IOS_H_
diff --git a/base/test/test_support_ios.mm b/base/test/test_support_ios.mm
new file mode 100644
index 0000000..03c7631
--- /dev/null
+++ b/base/test/test_support_ios.mm
@@ -0,0 +1,219 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#import <UIKit/UIKit.h>
+
+#include "base/debug/debugger.h"
+#include "base/logging.h"
+#include "base/mac/scoped_nsautorelease_pool.h"
+#include "base/mac/scoped_nsobject.h"
+#include "base/message_loop/message_loop.h"
+#include "base/message_loop/message_pump_default.h"
+#include "base/test/test_suite.h"
+#include "testing/coverage_util_ios.h"
+
+// Springboard will kill any iOS app that fails to check in after launch within
+// a given time. Starting a UIApplication before invoking TestSuite::Run
+// prevents this from happening.
+
+// InitIOSRunHook saves the TestSuite and argc/argv, then invoking
+// RunTestsFromIOSApp calls UIApplicationMain(), providing an application
+// delegate class: ChromeUnitTestDelegate. The delegate implements
+// application:didFinishLaunchingWithOptions: to invoke the TestSuite's Run
+// method.
+
+// Since the executable isn't likely to be a real iOS UI, the delegate puts up a
+// window displaying the app name. If a bunch of apps using MainHook are being
+// run in a row, this provides an indication of which one is currently running.
+
+static base::TestSuite* g_test_suite = NULL;
+static int g_argc;
+static char** g_argv;
+
+@interface UIApplication (Testing)
+- (void)_terminateWithStatus:(int)status;
+@end
+
+#if TARGET_IPHONE_SIMULATOR
+// Xcode 6 introduced behavior in the iOS Simulator where the software
+// keyboard does not appear if a hardware keyboard is connected. The following
+// declaration allows this behavior to be overriden when the app starts up.
+@interface UIKeyboardImpl
++ (instancetype)sharedInstance;
+- (void)setAutomaticMinimizationEnabled:(BOOL)enabled;
+- (void)setSoftwareKeyboardShownByTouch:(BOOL)enabled;
+@end
+#endif  // TARGET_IPHONE_SIMULATOR
+
+@interface ChromeUnitTestDelegate : NSObject {
+  base::scoped_nsobject<UIWindow> _window;
+}
+- (void)runTests;
+@end
+
+@implementation ChromeUnitTestDelegate
+
+- (BOOL)application:(UIApplication *)application
+    didFinishLaunchingWithOptions:(NSDictionary *)launchOptions {
+
+#if TARGET_IPHONE_SIMULATOR
+  // Xcode 6 introduced behavior in the iOS Simulator where the software
+  // keyboard does not appear if a hardware keyboard is connected. The following
+  // calls override this behavior by ensuring that the software keyboard is
+  // always shown.
+  [[UIKeyboardImpl sharedInstance] setAutomaticMinimizationEnabled:NO];
+  [[UIKeyboardImpl sharedInstance] setSoftwareKeyboardShownByTouch:YES];
+#endif  // TARGET_IPHONE_SIMULATOR
+
+  CGRect bounds = [[UIScreen mainScreen] bounds];
+
+  // Yes, this is leaked, it's just to make what's running visible.
+  _window.reset([[UIWindow alloc] initWithFrame:bounds]);
+  [_window setBackgroundColor:[UIColor whiteColor]];
+  [_window makeKeyAndVisible];
+
+  // Add a label with the app name.
+  UILabel* label = [[[UILabel alloc] initWithFrame:bounds] autorelease];
+  label.text = [[NSProcessInfo processInfo] processName];
+  label.textAlignment = NSTextAlignmentCenter;
+  [_window addSubview:label];
+
+  // An NSInternalInconsistencyException is thrown if the app doesn't have a
+  // root view controller. Set an empty one here.
+  [_window setRootViewController:[[[UIViewController alloc] init] autorelease]];
+
+  if ([self shouldRedirectOutputToFile])
+    [self redirectOutput];
+
+  // Queue up the test run.
+  [self performSelector:@selector(runTests)
+             withObject:nil
+             afterDelay:0.1];
+  return YES;
+}
+
+// Returns true if the gtest output should be redirected to a file, then sent
+// to NSLog when compleete. This redirection is used because gtest only writes
+// output to stdout, but results must be written to NSLog in order to show up in
+// the device log that is retrieved from the device by the host.
+- (BOOL)shouldRedirectOutputToFile {
+#if !TARGET_IPHONE_SIMULATOR
+  return !base::debug::BeingDebugged();
+#endif  // TARGET_IPHONE_SIMULATOR
+  return NO;
+}
+
+// Returns the path to the directory to store gtest output files.
+- (NSString*)outputPath {
+  NSArray* searchPath =
+      NSSearchPathForDirectoriesInDomains(NSDocumentDirectory,
+                                          NSUserDomainMask,
+                                          YES);
+  CHECK([searchPath count] > 0) << "Failed to get the Documents folder";
+  return [searchPath objectAtIndex:0];
+}
+
+// Returns the path to file that stdout is redirected to.
+- (NSString*)stdoutPath {
+  return [[self outputPath] stringByAppendingPathComponent:@"stdout.log"];
+}
+
+// Returns the path to file that stderr is redirected to.
+- (NSString*)stderrPath {
+  return [[self outputPath] stringByAppendingPathComponent:@"stderr.log"];
+}
+
+// Redirects stdout and stderr to files in the Documents folder in the app's
+// sandbox.
+- (void)redirectOutput {
+  freopen([[self stdoutPath] UTF8String], "w+", stdout);
+  freopen([[self stderrPath] UTF8String], "w+", stderr);
+}
+
+// Reads the redirected gtest output from a file and writes it to NSLog.
+- (void)writeOutputToNSLog {
+  // Close the redirected stdout and stderr files so that the content written to
+  // NSLog doesn't end up in these files.
+  fclose(stdout);
+  fclose(stderr);
+  for (NSString* path in @[ [self stdoutPath], [self stderrPath]]) {
+    NSString* content = [NSString stringWithContentsOfFile:path
+                                                  encoding:NSUTF8StringEncoding
+                                                     error:NULL];
+    NSArray* lines = [content componentsSeparatedByCharactersInSet:
+        [NSCharacterSet newlineCharacterSet]];
+
+    NSLog(@"Writing contents of %@ to NSLog", path);
+    for (NSString* line in lines) {
+      NSLog(@"%@", line);
+    }
+  }
+}
+
+- (void)runTests {
+  coverage_util::ConfigureCoverageReportPath();
+
+  int exitStatus = g_test_suite->Run();
+
+  if ([self shouldRedirectOutputToFile])
+    [self writeOutputToNSLog];
+
+  // If a test app is too fast, it will exit before Instruments has has a
+  // a chance to initialize and no test results will be seen.
+  // TODO(crbug.com/137010): Figure out how much time is actually needed, and
+  // sleep only to make sure that much time has elapsed since launch.
+  [NSThread sleepUntilDate:[NSDate dateWithTimeIntervalSinceNow:2.0]];
+  _window.reset();
+
+  // Use the hidden selector to try and cleanly take down the app (otherwise
+  // things can think the app crashed even on a zero exit status).
+  UIApplication* application = [UIApplication sharedApplication];
+  [application _terminateWithStatus:exitStatus];
+
+  exit(exitStatus);
+}
+
+@end
+
+namespace {
+
+std::unique_ptr<base::MessagePump> CreateMessagePumpForUIForTests() {
+  // A default MessagePump will do quite nicely in tests.
+  return std::unique_ptr<base::MessagePump>(new base::MessagePumpDefault());
+}
+
+}  // namespace
+
+namespace base {
+
+void InitIOSTestMessageLoop() {
+  MessageLoop::InitMessagePumpForUIFactory(&CreateMessagePumpForUIForTests);
+}
+
+void InitIOSRunHook(TestSuite* suite, int argc, char* argv[]) {
+  g_test_suite = suite;
+  g_argc = argc;
+  g_argv = argv;
+}
+
+void RunTestsFromIOSApp() {
+  // When TestSuite::Run is invoked it calls RunTestsFromIOSApp(). On the first
+  // invocation, this method fires up an iOS app via UIApplicationMain. Since
+  // UIApplicationMain does not return until the app exits, control does not
+  // return to the initial TestSuite::Run invocation, so the app invokes
+  // TestSuite::Run a second time and since |ran_hook| is true at this point,
+  // this method is a no-op and control returns to TestSuite:Run so that test
+  // are executed. Once the app exits, RunTestsFromIOSApp calls exit() so that
+  // control is not returned to the initial invocation of TestSuite::Run.
+  static bool ran_hook = false;
+  if (!ran_hook) {
+    ran_hook = true;
+    mac::ScopedNSAutoreleasePool pool;
+    int exit_status = UIApplicationMain(g_argc, g_argv, nil,
+                                        @"ChromeUnitTestDelegate");
+    exit(exit_status);
+  }
+}
+
+}  // namespace base
diff --git a/base/test/test_switches.cc b/base/test/test_switches.cc
new file mode 100644
index 0000000..a35bdd8
--- /dev/null
+++ b/base/test/test_switches.cc
@@ -0,0 +1,81 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/test/test_switches.h"
+
+// Maximum number of tests to run in a single batch.
+const char switches::kTestLauncherBatchLimit[] = "test-launcher-batch-limit";
+
+// Sets defaults desirable for the continuous integration bots, e.g. parallel
+// test execution and test retries.
+const char switches::kTestLauncherBotMode[] =
+    "test-launcher-bot-mode";
+
+// Makes it possible to debug the launcher itself. By default the launcher
+// automatically switches to single process mode when it detects presence
+// of debugger.
+const char switches::kTestLauncherDebugLauncher[] =
+    "test-launcher-debug-launcher";
+
+// Force running all requested tests and retries even if too many test errors
+// occur.
+const char switches::kTestLauncherForceRunBrokenTests[] =
+    "test-launcher-force-run-broken-tests";
+
+// Path to file containing test filter (one pattern per line).
+const char switches::kTestLauncherFilterFile[] = "test-launcher-filter-file";
+
+// Whether the test launcher should launch in "interactive mode", which disables
+// timeouts (and may have other effects for specific test types).
+const char switches::kTestLauncherInteractive[] = "test-launcher-interactive";
+
+// Number of parallel test launcher jobs.
+const char switches::kTestLauncherJobs[] = "test-launcher-jobs";
+
+// Path to list of compiled in tests.
+const char switches::kTestLauncherListTests[] = "test-launcher-list-tests";
+
+// Path to test results file in our custom test launcher format.
+const char switches::kTestLauncherOutput[] = "test-launcher-output";
+
+// Maximum number of times to retry a test after failure.
+const char switches::kTestLauncherRetryLimit[] = "test-launcher-retry-limit";
+
+// Path to test results file with all the info from the test launcher.
+const char switches::kTestLauncherSummaryOutput[] =
+    "test-launcher-summary-output";
+
+// Flag controlling when test stdio is displayed as part of the launcher's
+// standard output.
+const char switches::kTestLauncherPrintTestStdio[] =
+    "test-launcher-print-test-stdio";
+
+// Print a writable path and exit (for internal use).
+const char switches::kTestLauncherPrintWritablePath[] =
+    "test-launcher-print-writable-path";
+
+// Index of the test shard to run, starting from 0 (first shard) to total shards
+// minus one (last shard).
+const char switches::kTestLauncherShardIndex[] =
+    "test-launcher-shard-index";
+
+// Limit of test part results in the output. Default limit is 10.
+// Negative value will completely disable limit.
+const char switches::kTestLauncherTestPartResultsLimit[] =
+    "test-launcher-test-part-results-limit";
+
+// Total number of shards. Must be the same for all shards.
+const char switches::kTestLauncherTotalShards[] =
+    "test-launcher-total-shards";
+
+// Time (in milliseconds) that the tests should wait before timing out.
+const char switches::kTestLauncherTimeout[] = "test-launcher-timeout";
+
+// Path where to save a trace of test launcher's execution.
+const char switches::kTestLauncherTrace[] = "test-launcher-trace";
+
+// TODO(phajdan.jr): Clean up the switch names.
+const char switches::kTestTinyTimeout[] = "test-tiny-timeout";
+const char switches::kUiTestActionTimeout[] = "ui-test-action-timeout";
+const char switches::kUiTestActionMaxTimeout[] = "ui-test-action-max-timeout";
diff --git a/base/test/test_switches.h b/base/test/test_switches.h
new file mode 100644
index 0000000..6baba30
--- /dev/null
+++ b/base/test/test_switches.h
@@ -0,0 +1,36 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TEST_TEST_SWITCHES_H_
+#define BASE_TEST_TEST_SWITCHES_H_
+
+namespace switches {
+
+// All switches in alphabetical order. The switches should be documented
+// alongside the definition of their values in the .cc file.
+extern const char kTestLauncherBatchLimit[];
+extern const char kTestLauncherBotMode[];
+extern const char kTestLauncherDebugLauncher[];
+extern const char kTestLauncherForceRunBrokenTests[];
+extern const char kTestLauncherFilterFile[];
+extern const char kTestLauncherInteractive[];
+extern const char kTestLauncherJobs[];
+extern const char kTestLauncherListTests[];
+extern const char kTestLauncherOutput[];
+extern const char kTestLauncherRetryLimit[];
+extern const char kTestLauncherSummaryOutput[];
+extern const char kTestLauncherPrintTestStdio[];
+extern const char kTestLauncherPrintWritablePath[];
+extern const char kTestLauncherShardIndex[];
+extern const char kTestLauncherTestPartResultsLimit[];
+extern const char kTestLauncherTotalShards[];
+extern const char kTestLauncherTimeout[];
+extern const char kTestLauncherTrace[];
+extern const char kTestTinyTimeout[];
+extern const char kUiTestActionTimeout[];
+extern const char kUiTestActionMaxTimeout[];
+
+}  // namespace switches
+
+#endif  // BASE_TEST_TEST_SWITCHES_H_
diff --git a/base/test/test_timeouts.cc b/base/test/test_timeouts.cc
new file mode 100644
index 0000000..dd5acbc
--- /dev/null
+++ b/base/test/test_timeouts.cc
@@ -0,0 +1,117 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/test/test_timeouts.h"
+
+#include <algorithm>
+
+#include "base/command_line.h"
+#include "base/debug/debugger.h"
+#include "base/logging.h"
+#include "base/strings/string_number_conversions.h"
+#include "base/test/test_switches.h"
+#include "build/build_config.h"
+
+namespace {
+
+// Sets value to the greatest of:
+// 1) value's current value multiplied by kTimeoutMultiplier (assuming
+// InitializeTimeout is called only once per value).
+// 2) min_value.
+// 3) the numerical value given by switch_name on the command line multiplied
+// by kTimeoutMultiplier.
+void InitializeTimeout(const char* switch_name, int min_value, int* value) {
+  DCHECK(value);
+  int command_line_timeout = 0;
+  if (base::CommandLine::ForCurrentProcess()->HasSwitch(switch_name)) {
+    std::string string_value(base::CommandLine::ForCurrentProcess()->
+         GetSwitchValueASCII(switch_name));
+    if (!base::StringToInt(string_value, &command_line_timeout)) {
+      LOG(FATAL) << "Timeout value \"" << string_value << "\" was parsed as "
+                 << command_line_timeout;
+    }
+  }
+
+#if defined(MEMORY_SANITIZER)
+  // ASan/TSan/MSan instrument each memory access. This may slow the execution
+  // down significantly.
+  // For MSan the slowdown depends heavily on the value of msan_track_origins
+  // build flag. The multiplier below corresponds to msan_track_origins = 1.
+  constexpr int kTimeoutMultiplier = 6;
+#elif defined(ADDRESS_SANITIZER) && defined(OS_WIN)
+  // ASan/Win has not been optimized yet, give it a higher
+  // timeout multiplier. See http://crbug.com/412471
+  constexpr int kTimeoutMultiplier = 3;
+#elif defined(ADDRESS_SANITIZER) || defined(THREAD_SANITIZER)
+  constexpr int kTimeoutMultiplier = 2;
+#else
+  constexpr int kTimeoutMultiplier = 1;
+#endif
+
+  *value = std::max(std::max(*value, command_line_timeout) * kTimeoutMultiplier,
+                    min_value);
+}
+
+}  // namespace
+
+// static
+bool TestTimeouts::initialized_ = false;
+
+// The timeout values should increase in the order they appear in this block.
+// static
+int TestTimeouts::tiny_timeout_ms_ = 100;
+int TestTimeouts::action_timeout_ms_ = 10000;
+#ifndef NDEBUG
+int TestTimeouts::action_max_timeout_ms_ = 45000;
+#else
+int TestTimeouts::action_max_timeout_ms_ = 30000;
+#endif  // NDEBUG
+
+int TestTimeouts::test_launcher_timeout_ms_ = 45000;
+
+// static
+void TestTimeouts::Initialize() {
+  DCHECK(!initialized_);
+  initialized_ = true;
+
+  if (base::debug::BeingDebugged()) {
+    fprintf(stdout,
+        "Detected presence of a debugger, running without test timeouts.\n");
+  }
+
+  // Note that these timeouts MUST be initialized in the correct order as
+  // per the CHECKS below.
+
+  InitializeTimeout(switches::kTestTinyTimeout, 0, &tiny_timeout_ms_);
+
+  // All timeouts other than the "tiny" one should be set to very large values
+  // when in a debugger or when run interactively, so that tests will not get
+  // auto-terminated.  By setting the UI test action timeout to at least this
+  // value, we guarantee the subsequent timeouts will be this large also.
+  // Setting the "tiny" timeout to a large value as well would make some tests
+  // hang (because it's used as a task-posting delay).  In particular this
+  // causes problems for some iOS device tests, which are always run inside a
+  // debugger (thus BeingDebugged() is true even on the bots).
+  int min_ui_test_action_timeout = tiny_timeout_ms_;
+  if (base::debug::BeingDebugged() ||
+      base::CommandLine::ForCurrentProcess()->HasSwitch(
+          switches::kTestLauncherInteractive)) {
+    constexpr int kVeryLargeTimeoutMs = 100'000'000;
+    min_ui_test_action_timeout = kVeryLargeTimeoutMs;
+  }
+
+  InitializeTimeout(switches::kUiTestActionTimeout, min_ui_test_action_timeout,
+                    &action_timeout_ms_);
+  InitializeTimeout(switches::kUiTestActionMaxTimeout, action_timeout_ms_,
+                    &action_max_timeout_ms_);
+
+  // Test launcher timeout is independent from anything above action timeout.
+  InitializeTimeout(switches::kTestLauncherTimeout, action_timeout_ms_,
+                    &test_launcher_timeout_ms_);
+
+  // The timeout values should be increasing in the right order.
+  CHECK_LE(tiny_timeout_ms_, action_timeout_ms_);
+  CHECK_LE(action_timeout_ms_, action_max_timeout_ms_);
+  CHECK_LE(action_timeout_ms_, test_launcher_timeout_ms_);
+}
diff --git a/base/test/test_timeouts.h b/base/test/test_timeouts.h
new file mode 100644
index 0000000..71983ed
--- /dev/null
+++ b/base/test/test_timeouts.h
@@ -0,0 +1,61 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TEST_TEST_TIMEOUTS_H_
+#define BASE_TEST_TEST_TIMEOUTS_H_
+
+#include "base/logging.h"
+#include "base/macros.h"
+#include "base/time/time.h"
+
+// Returns common timeouts to use in tests. Makes it possible to adjust
+// the timeouts for different environments (like TSan).
+class TestTimeouts {
+ public:
+  // Initializes the timeouts. Non thread-safe. Should be called exactly once
+  // by the test suite.
+  static void Initialize();
+
+  // Timeout for actions that are expected to finish "almost instantly".  This
+  // is used in various tests to post delayed tasks and usually functions more
+  // like a delay value than a timeout.
+  static base::TimeDelta tiny_timeout() {
+    DCHECK(initialized_);
+    return base::TimeDelta::FromMilliseconds(tiny_timeout_ms_);
+  }
+
+  // Timeout to wait for something to happen. If you are not sure
+  // which timeout to use, this is the one you want.
+  static base::TimeDelta action_timeout() {
+    DCHECK(initialized_);
+    return base::TimeDelta::FromMilliseconds(action_timeout_ms_);
+  }
+
+  // Timeout longer than the above, but still suitable to use
+  // multiple times in a single test. Use if the timeout above
+  // is not sufficient.
+  static base::TimeDelta action_max_timeout() {
+    DCHECK(initialized_);
+    return base::TimeDelta::FromMilliseconds(action_max_timeout_ms_);
+  }
+
+  // Timeout for a single test launched used built-in test launcher.
+  // Do not use outside of the test launcher.
+  static base::TimeDelta test_launcher_timeout() {
+    DCHECK(initialized_);
+    return base::TimeDelta::FromMilliseconds(test_launcher_timeout_ms_);
+  }
+
+ private:
+  static bool initialized_;
+
+  static int tiny_timeout_ms_;
+  static int action_timeout_ms_;
+  static int action_max_timeout_ms_;
+  static int test_launcher_timeout_ms_;
+
+  DISALLOW_IMPLICIT_CONSTRUCTORS(TestTimeouts);
+};
+
+#endif  // BASE_TEST_TEST_TIMEOUTS_H_
diff --git a/base/test/test_ui_thread_android.cc b/base/test/test_ui_thread_android.cc
new file mode 100644
index 0000000..d19fefa
--- /dev/null
+++ b/base/test/test_ui_thread_android.cc
@@ -0,0 +1,14 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+#include "base/test/test_ui_thread_android.h"
+
+#include "jni/TestUiThread_jni.h"
+
+namespace base {
+
+void StartTestUiThreadLooper() {
+  Java_TestUiThread_loop(base::android::AttachCurrentThread());
+}
+
+}  // namespace base
diff --git a/base/test/test_ui_thread_android.h b/base/test/test_ui_thread_android.h
new file mode 100644
index 0000000..233911a
--- /dev/null
+++ b/base/test/test_ui_thread_android.h
@@ -0,0 +1,20 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TEST_TEST_UI_THREAD_ANDROID_
+#define BASE_TEST_TEST_UI_THREAD_ANDROID_
+
+#include <jni.h>
+
+namespace base {
+
+// Set up a thread as the Chromium UI Thread, and run its looper. This is is
+// intended for C++ unit tests (e.g. the net unit tests) that don't run with the
+// UI thread as their main looper, but test code that, on Android, uses UI
+// thread events, so need a running UI thread.
+void StartTestUiThreadLooper();
+
+}  // namespace base
+
+#endif  //  BASE_TEST_TEST_UI_THREAD_ANDROID_
diff --git a/base/test/thread_test_helper.cc b/base/test/thread_test_helper.cc
new file mode 100644
index 0000000..f3ca780
--- /dev/null
+++ b/base/test/thread_test_helper.cc
@@ -0,0 +1,41 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/test/thread_test_helper.h"
+
+#include <utility>
+
+#include "base/bind.h"
+#include "base/location.h"
+#include "base/threading/thread_restrictions.h"
+
+namespace base {
+
+ThreadTestHelper::ThreadTestHelper(
+    scoped_refptr<SequencedTaskRunner> target_sequence)
+    : test_result_(false),
+      target_sequence_(std::move(target_sequence)),
+      done_event_(WaitableEvent::ResetPolicy::AUTOMATIC,
+                  WaitableEvent::InitialState::NOT_SIGNALED) {}
+
+bool ThreadTestHelper::Run() {
+  if (!target_sequence_->PostTask(
+          FROM_HERE, base::BindOnce(&ThreadTestHelper::RunOnSequence, this))) {
+    return false;
+  }
+  base::ThreadRestrictions::ScopedAllowWait allow_wait;
+  done_event_.Wait();
+  return test_result_;
+}
+
+void ThreadTestHelper::RunTest() { set_test_result(true); }
+
+ThreadTestHelper::~ThreadTestHelper() = default;
+
+void ThreadTestHelper::RunOnSequence() {
+  RunTest();
+  done_event_.Signal();
+}
+
+}  // namespace base
diff --git a/base/test/thread_test_helper.h b/base/test/thread_test_helper.h
new file mode 100644
index 0000000..935e7ef
--- /dev/null
+++ b/base/test/thread_test_helper.h
@@ -0,0 +1,50 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TEST_THREAD_TEST_HELPER_H_
+#define BASE_TEST_THREAD_TEST_HELPER_H_
+
+#include "base/compiler_specific.h"
+#include "base/macros.h"
+#include "base/memory/ref_counted.h"
+#include "base/sequenced_task_runner.h"
+#include "base/synchronization/waitable_event.h"
+
+namespace base {
+
+// Helper class that executes code on a given target sequence/thread while
+// blocking on the invoking sequence/thread. To use, derive from this class and
+// overwrite RunTest. An alternative use of this class is to use it directly. It
+// will then block until all pending tasks on a given sequence/thread have been
+// executed.
+class ThreadTestHelper : public RefCountedThreadSafe<ThreadTestHelper> {
+ public:
+  explicit ThreadTestHelper(scoped_refptr<SequencedTaskRunner> target_sequence);
+
+  // True if RunTest() was successfully executed on the target sequence.
+  bool Run() WARN_UNUSED_RESULT;
+
+  virtual void RunTest();
+
+ protected:
+  friend class RefCountedThreadSafe<ThreadTestHelper>;
+
+  virtual ~ThreadTestHelper();
+
+  // Use this method to store the result of RunTest().
+  void set_test_result(bool test_result) { test_result_ = test_result; }
+
+ private:
+  void RunOnSequence();
+
+  bool test_result_;
+  scoped_refptr<SequencedTaskRunner> target_sequence_;
+  WaitableEvent done_event_;
+
+  DISALLOW_COPY_AND_ASSIGN(ThreadTestHelper);
+};
+
+}  // namespace base
+
+#endif  // BASE_TEST_THREAD_TEST_HELPER_H_
diff --git a/base/test/trace_event_analyzer.cc b/base/test/trace_event_analyzer.cc
new file mode 100644
index 0000000..cab2899
--- /dev/null
+++ b/base/test/trace_event_analyzer.cc
@@ -0,0 +1,1069 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/test/trace_event_analyzer.h"
+
+#include <math.h>
+
+#include <algorithm>
+#include <set>
+
+#include "base/json/json_reader.h"
+#include "base/memory/ptr_util.h"
+#include "base/memory/ref_counted_memory.h"
+#include "base/run_loop.h"
+#include "base/strings/pattern.h"
+#include "base/trace_event/trace_buffer.h"
+#include "base/trace_event/trace_config.h"
+#include "base/trace_event/trace_log.h"
+#include "base/values.h"
+
+namespace {
+void OnTraceDataCollected(base::OnceClosure quit_closure,
+                          base::trace_event::TraceResultBuffer* buffer,
+                          const scoped_refptr<base::RefCountedString>& json,
+                          bool has_more_events) {
+  buffer->AddFragment(json->data());
+  if (!has_more_events)
+    std::move(quit_closure).Run();
+}
+}  // namespace
+
+namespace trace_analyzer {
+
+// TraceEvent
+
+TraceEvent::TraceEvent()
+    : thread(0, 0),
+      timestamp(0),
+      duration(0),
+      phase(TRACE_EVENT_PHASE_BEGIN),
+      other_event(nullptr) {}
+
+TraceEvent::TraceEvent(TraceEvent&& other) = default;
+
+TraceEvent::~TraceEvent() = default;
+
+TraceEvent& TraceEvent::operator=(TraceEvent&& rhs) = default;
+
+bool TraceEvent::SetFromJSON(const base::Value* event_value) {
+  if (event_value->type() != base::Value::Type::DICTIONARY) {
+    LOG(ERROR) << "Value must be Type::DICTIONARY";
+    return false;
+  }
+  const base::DictionaryValue* dictionary =
+      static_cast<const base::DictionaryValue*>(event_value);
+
+  std::string phase_str;
+  const base::DictionaryValue* args = nullptr;
+
+  if (!dictionary->GetString("ph", &phase_str)) {
+    LOG(ERROR) << "ph is missing from TraceEvent JSON";
+    return false;
+  }
+
+  phase = *phase_str.data();
+
+  bool may_have_duration = (phase == TRACE_EVENT_PHASE_COMPLETE);
+  bool require_origin = (phase != TRACE_EVENT_PHASE_METADATA);
+  bool require_id = (phase == TRACE_EVENT_PHASE_ASYNC_BEGIN ||
+                     phase == TRACE_EVENT_PHASE_ASYNC_STEP_INTO ||
+                     phase == TRACE_EVENT_PHASE_ASYNC_STEP_PAST ||
+                     phase == TRACE_EVENT_PHASE_MEMORY_DUMP ||
+                     phase == TRACE_EVENT_PHASE_ENTER_CONTEXT ||
+                     phase == TRACE_EVENT_PHASE_LEAVE_CONTEXT ||
+                     phase == TRACE_EVENT_PHASE_CREATE_OBJECT ||
+                     phase == TRACE_EVENT_PHASE_DELETE_OBJECT ||
+                     phase == TRACE_EVENT_PHASE_SNAPSHOT_OBJECT ||
+                     phase == TRACE_EVENT_PHASE_ASYNC_END);
+
+  if (require_origin && !dictionary->GetInteger("pid", &thread.process_id)) {
+    LOG(ERROR) << "pid is missing from TraceEvent JSON";
+    return false;
+  }
+  if (require_origin && !dictionary->GetInteger("tid", &thread.thread_id)) {
+    LOG(ERROR) << "tid is missing from TraceEvent JSON";
+    return false;
+  }
+  if (require_origin && !dictionary->GetDouble("ts", &timestamp)) {
+    LOG(ERROR) << "ts is missing from TraceEvent JSON";
+    return false;
+  }
+  if (may_have_duration) {
+    dictionary->GetDouble("dur", &duration);
+  }
+  if (!dictionary->GetString("cat", &category)) {
+    LOG(ERROR) << "cat is missing from TraceEvent JSON";
+    return false;
+  }
+  if (!dictionary->GetString("name", &name)) {
+    LOG(ERROR) << "name is missing from TraceEvent JSON";
+    return false;
+  }
+  if (!dictionary->GetDictionary("args", &args)) {
+    LOG(ERROR) << "args is missing from TraceEvent JSON";
+    return false;
+  }
+  if (require_id && !dictionary->GetString("id", &id)) {
+    LOG(ERROR) << "id is missing from ASYNC_BEGIN/ASYNC_END TraceEvent JSON";
+    return false;
+  }
+
+  dictionary->GetDouble("tdur", &thread_duration);
+  dictionary->GetDouble("tts", &thread_timestamp);
+  dictionary->GetString("scope", &scope);
+  dictionary->GetString("bind_id", &bind_id);
+  dictionary->GetBoolean("flow_out", &flow_out);
+  dictionary->GetBoolean("flow_in", &flow_in);
+
+  const base::DictionaryValue* id2;
+  if (dictionary->GetDictionary("id2", &id2)) {
+    id2->GetString("global", &global_id2);
+    id2->GetString("local", &local_id2);
+  }
+
+  // For each argument, copy the type and create a trace_analyzer::TraceValue.
+  for (base::DictionaryValue::Iterator it(*args); !it.IsAtEnd();
+       it.Advance()) {
+    std::string str;
+    bool boolean = false;
+    int int_num = 0;
+    double double_num = 0.0;
+    if (it.value().GetAsString(&str)) {
+      arg_strings[it.key()] = str;
+    } else if (it.value().GetAsInteger(&int_num)) {
+      arg_numbers[it.key()] = static_cast<double>(int_num);
+    } else if (it.value().GetAsBoolean(&boolean)) {
+      arg_numbers[it.key()] = static_cast<double>(boolean ? 1 : 0);
+    } else if (it.value().GetAsDouble(&double_num)) {
+      arg_numbers[it.key()] = double_num;
+    }
+    // Record all arguments as values.
+    arg_values[it.key()] = it.value().CreateDeepCopy();
+  }
+
+  return true;
+}
+
+double TraceEvent::GetAbsTimeToOtherEvent() const {
+  return fabs(other_event->timestamp - timestamp);
+}
+
+bool TraceEvent::GetArgAsString(const std::string& name,
+                                std::string* arg) const {
+  const auto it = arg_strings.find(name);
+  if (it != arg_strings.end()) {
+    *arg = it->second;
+    return true;
+  }
+  return false;
+}
+
+bool TraceEvent::GetArgAsNumber(const std::string& name,
+                                double* arg) const {
+  const auto it = arg_numbers.find(name);
+  if (it != arg_numbers.end()) {
+    *arg = it->second;
+    return true;
+  }
+  return false;
+}
+
+bool TraceEvent::GetArgAsValue(const std::string& name,
+                               std::unique_ptr<base::Value>* arg) const {
+  const auto it = arg_values.find(name);
+  if (it != arg_values.end()) {
+    *arg = it->second->CreateDeepCopy();
+    return true;
+  }
+  return false;
+}
+
+bool TraceEvent::HasStringArg(const std::string& name) const {
+  return (arg_strings.find(name) != arg_strings.end());
+}
+
+bool TraceEvent::HasNumberArg(const std::string& name) const {
+  return (arg_numbers.find(name) != arg_numbers.end());
+}
+
+bool TraceEvent::HasArg(const std::string& name) const {
+  return (arg_values.find(name) != arg_values.end());
+}
+
+std::string TraceEvent::GetKnownArgAsString(const std::string& name) const {
+  std::string arg_string;
+  bool result = GetArgAsString(name, &arg_string);
+  DCHECK(result);
+  return arg_string;
+}
+
+double TraceEvent::GetKnownArgAsDouble(const std::string& name) const {
+  double arg_double = 0;
+  bool result = GetArgAsNumber(name, &arg_double);
+  DCHECK(result);
+  return arg_double;
+}
+
+int TraceEvent::GetKnownArgAsInt(const std::string& name) const {
+  double arg_double = 0;
+  bool result = GetArgAsNumber(name, &arg_double);
+  DCHECK(result);
+  return static_cast<int>(arg_double);
+}
+
+bool TraceEvent::GetKnownArgAsBool(const std::string& name) const {
+  double arg_double = 0;
+  bool result = GetArgAsNumber(name, &arg_double);
+  DCHECK(result);
+  return (arg_double != 0.0);
+}
+
+std::unique_ptr<base::Value> TraceEvent::GetKnownArgAsValue(
+    const std::string& name) const {
+  std::unique_ptr<base::Value> arg_value;
+  bool result = GetArgAsValue(name, &arg_value);
+  DCHECK(result);
+  return arg_value;
+}
+
+// QueryNode
+
+QueryNode::QueryNode(const Query& query) : query_(query) {
+}
+
+QueryNode::~QueryNode() = default;
+
+// Query
+
+Query::Query(TraceEventMember member)
+    : type_(QUERY_EVENT_MEMBER),
+      operator_(OP_INVALID),
+      member_(member),
+      number_(0),
+      is_pattern_(false) {
+}
+
+Query::Query(TraceEventMember member, const std::string& arg_name)
+    : type_(QUERY_EVENT_MEMBER),
+      operator_(OP_INVALID),
+      member_(member),
+      number_(0),
+      string_(arg_name),
+      is_pattern_(false) {
+}
+
+Query::Query(const Query& query) = default;
+
+Query::~Query() = default;
+
+Query Query::String(const std::string& str) {
+  return Query(str);
+}
+
+Query Query::Double(double num) {
+  return Query(num);
+}
+
+Query Query::Int(int32_t num) {
+  return Query(static_cast<double>(num));
+}
+
+Query Query::Uint(uint32_t num) {
+  return Query(static_cast<double>(num));
+}
+
+Query Query::Bool(bool boolean) {
+  return Query(boolean ? 1.0 : 0.0);
+}
+
+Query Query::Phase(char phase) {
+  return Query(static_cast<double>(phase));
+}
+
+Query Query::Pattern(const std::string& pattern) {
+  Query query(pattern);
+  query.is_pattern_ = true;
+  return query;
+}
+
+bool Query::Evaluate(const TraceEvent& event) const {
+  // First check for values that can convert to bool.
+
+  // double is true if != 0:
+  double bool_value = 0.0;
+  bool is_bool = GetAsDouble(event, &bool_value);
+  if (is_bool)
+    return (bool_value != 0.0);
+
+  // string is true if it is non-empty:
+  std::string str_value;
+  bool is_str = GetAsString(event, &str_value);
+  if (is_str)
+    return !str_value.empty();
+
+  DCHECK_EQ(QUERY_BOOLEAN_OPERATOR, type_)
+      << "Invalid query: missing boolean expression";
+  DCHECK(left_.get());
+  DCHECK(right_.get() || is_unary_operator());
+
+  if (is_comparison_operator()) {
+    DCHECK(left().is_value() && right().is_value())
+        << "Invalid query: comparison operator used between event member and "
+           "value.";
+    bool compare_result = false;
+    if (CompareAsDouble(event, &compare_result))
+      return compare_result;
+    if (CompareAsString(event, &compare_result))
+      return compare_result;
+    return false;
+  }
+  // It's a logical operator.
+  switch (operator_) {
+    case OP_AND:
+      return left().Evaluate(event) && right().Evaluate(event);
+    case OP_OR:
+      return left().Evaluate(event) || right().Evaluate(event);
+    case OP_NOT:
+      return !left().Evaluate(event);
+    default:
+      NOTREACHED();
+      return false;
+  }
+}
+
+bool Query::CompareAsDouble(const TraceEvent& event, bool* result) const {
+  double lhs, rhs;
+  if (!left().GetAsDouble(event, &lhs) || !right().GetAsDouble(event, &rhs))
+    return false;
+  switch (operator_) {
+    case OP_EQ:
+      *result = (lhs == rhs);
+      return true;
+    case OP_NE:
+      *result = (lhs != rhs);
+      return true;
+    case OP_LT:
+      *result = (lhs < rhs);
+      return true;
+    case OP_LE:
+      *result = (lhs <= rhs);
+      return true;
+    case OP_GT:
+      *result = (lhs > rhs);
+      return true;
+    case OP_GE:
+      *result = (lhs >= rhs);
+      return true;
+    default:
+      NOTREACHED();
+      return false;
+  }
+}
+
+bool Query::CompareAsString(const TraceEvent& event, bool* result) const {
+  std::string lhs, rhs;
+  if (!left().GetAsString(event, &lhs) || !right().GetAsString(event, &rhs))
+    return false;
+  switch (operator_) {
+    case OP_EQ:
+      if (right().is_pattern_)
+        *result = base::MatchPattern(lhs, rhs);
+      else if (left().is_pattern_)
+        *result = base::MatchPattern(rhs, lhs);
+      else
+        *result = (lhs == rhs);
+      return true;
+    case OP_NE:
+      if (right().is_pattern_)
+        *result = !base::MatchPattern(lhs, rhs);
+      else if (left().is_pattern_)
+        *result = !base::MatchPattern(rhs, lhs);
+      else
+        *result = (lhs != rhs);
+      return true;
+    case OP_LT:
+      *result = (lhs < rhs);
+      return true;
+    case OP_LE:
+      *result = (lhs <= rhs);
+      return true;
+    case OP_GT:
+      *result = (lhs > rhs);
+      return true;
+    case OP_GE:
+      *result = (lhs >= rhs);
+      return true;
+    default:
+      NOTREACHED();
+      return false;
+  }
+}
+
+bool Query::EvaluateArithmeticOperator(const TraceEvent& event,
+                                       double* num) const {
+  DCHECK_EQ(QUERY_ARITHMETIC_OPERATOR, type_);
+  DCHECK(left_.get());
+  DCHECK(right_.get() || is_unary_operator());
+
+  double lhs = 0, rhs = 0;
+  if (!left().GetAsDouble(event, &lhs))
+    return false;
+  if (!is_unary_operator() && !right().GetAsDouble(event, &rhs))
+    return false;
+
+  switch (operator_) {
+    case OP_ADD:
+      *num = lhs + rhs;
+      return true;
+    case OP_SUB:
+      *num = lhs - rhs;
+      return true;
+    case OP_MUL:
+      *num = lhs * rhs;
+      return true;
+    case OP_DIV:
+      *num = lhs / rhs;
+      return true;
+    case OP_MOD:
+      *num = static_cast<double>(static_cast<int64_t>(lhs) %
+                                 static_cast<int64_t>(rhs));
+      return true;
+    case OP_NEGATE:
+      *num = -lhs;
+      return true;
+    default:
+      NOTREACHED();
+      return false;
+  }
+}
+
+bool Query::GetAsDouble(const TraceEvent& event, double* num) const {
+  switch (type_) {
+    case QUERY_ARITHMETIC_OPERATOR:
+      return EvaluateArithmeticOperator(event, num);
+    case QUERY_EVENT_MEMBER:
+      return GetMemberValueAsDouble(event, num);
+    case QUERY_NUMBER:
+      *num = number_;
+      return true;
+    default:
+      return false;
+  }
+}
+
+bool Query::GetAsString(const TraceEvent& event, std::string* str) const {
+  switch (type_) {
+    case QUERY_EVENT_MEMBER:
+      return GetMemberValueAsString(event, str);
+    case QUERY_STRING:
+      *str = string_;
+      return true;
+    default:
+      return false;
+  }
+}
+
+const TraceEvent* Query::SelectTargetEvent(const TraceEvent* event,
+                                           TraceEventMember member) {
+  if (member >= OTHER_FIRST_MEMBER && member <= OTHER_LAST_MEMBER) {
+    return event->other_event;
+  } else if (member >= PREV_FIRST_MEMBER && member <= PREV_LAST_MEMBER) {
+    return event->prev_event;
+  } else {
+    return event;
+  }
+}
+
+bool Query::GetMemberValueAsDouble(const TraceEvent& event,
+                                   double* num) const {
+  DCHECK_EQ(QUERY_EVENT_MEMBER, type_);
+
+  // This could be a request for a member of |event| or a member of |event|'s
+  // associated previous or next event. Store the target event in the_event:
+  const TraceEvent* the_event = SelectTargetEvent(&event, member_);
+
+  // Request for member of associated event, but there is no associated event.
+  if (!the_event)
+    return false;
+
+  switch (member_) {
+    case EVENT_PID:
+    case OTHER_PID:
+    case PREV_PID:
+      *num = static_cast<double>(the_event->thread.process_id);
+      return true;
+    case EVENT_TID:
+    case OTHER_TID:
+    case PREV_TID:
+      *num = static_cast<double>(the_event->thread.thread_id);
+      return true;
+    case EVENT_TIME:
+    case OTHER_TIME:
+    case PREV_TIME:
+      *num = the_event->timestamp;
+      return true;
+    case EVENT_DURATION:
+      if (!the_event->has_other_event())
+        return false;
+      *num = the_event->GetAbsTimeToOtherEvent();
+      return true;
+    case EVENT_COMPLETE_DURATION:
+      if (the_event->phase != TRACE_EVENT_PHASE_COMPLETE)
+        return false;
+      *num = the_event->duration;
+      return true;
+    case EVENT_PHASE:
+    case OTHER_PHASE:
+    case PREV_PHASE:
+      *num = static_cast<double>(the_event->phase);
+      return true;
+    case EVENT_HAS_STRING_ARG:
+    case OTHER_HAS_STRING_ARG:
+    case PREV_HAS_STRING_ARG:
+      *num = (the_event->HasStringArg(string_) ? 1.0 : 0.0);
+      return true;
+    case EVENT_HAS_NUMBER_ARG:
+    case OTHER_HAS_NUMBER_ARG:
+    case PREV_HAS_NUMBER_ARG:
+      *num = (the_event->HasNumberArg(string_) ? 1.0 : 0.0);
+      return true;
+    case EVENT_ARG:
+    case OTHER_ARG:
+    case PREV_ARG: {
+      // Search for the argument name and return its value if found.
+      std::map<std::string, double>::const_iterator num_i =
+          the_event->arg_numbers.find(string_);
+      if (num_i == the_event->arg_numbers.end())
+        return false;
+      *num = num_i->second;
+      return true;
+    }
+    case EVENT_HAS_OTHER:
+      // return 1.0 (true) if the other event exists
+      *num = event.other_event ? 1.0 : 0.0;
+      return true;
+    case EVENT_HAS_PREV:
+      *num = event.prev_event ? 1.0 : 0.0;
+      return true;
+    default:
+      return false;
+  }
+}
+
+bool Query::GetMemberValueAsString(const TraceEvent& event,
+                                   std::string* str) const {
+  DCHECK_EQ(QUERY_EVENT_MEMBER, type_);
+
+  // This could be a request for a member of |event| or a member of |event|'s
+  // associated previous or next event. Store the target event in the_event:
+  const TraceEvent* the_event = SelectTargetEvent(&event, member_);
+
+  // Request for member of associated event, but there is no associated event.
+  if (!the_event)
+    return false;
+
+  switch (member_) {
+    case EVENT_CATEGORY:
+    case OTHER_CATEGORY:
+    case PREV_CATEGORY:
+      *str = the_event->category;
+      return true;
+    case EVENT_NAME:
+    case OTHER_NAME:
+    case PREV_NAME:
+      *str = the_event->name;
+      return true;
+    case EVENT_ID:
+    case OTHER_ID:
+    case PREV_ID:
+      *str = the_event->id;
+      return true;
+    case EVENT_ARG:
+    case OTHER_ARG:
+    case PREV_ARG: {
+      // Search for the argument name and return its value if found.
+      std::map<std::string, std::string>::const_iterator str_i =
+          the_event->arg_strings.find(string_);
+      if (str_i == the_event->arg_strings.end())
+        return false;
+      *str = str_i->second;
+      return true;
+    }
+    default:
+      return false;
+  }
+}
+
+Query::Query(const std::string& str)
+    : type_(QUERY_STRING),
+      operator_(OP_INVALID),
+      member_(EVENT_INVALID),
+      number_(0),
+      string_(str),
+      is_pattern_(false) {
+}
+
+Query::Query(double num)
+    : type_(QUERY_NUMBER),
+      operator_(OP_INVALID),
+      member_(EVENT_INVALID),
+      number_(num),
+      is_pattern_(false) {
+}
+const Query& Query::left() const {
+  return left_->query();
+}
+
+const Query& Query::right() const {
+  return right_->query();
+}
+
+Query Query::operator==(const Query& rhs) const {
+  return Query(*this, rhs, OP_EQ);
+}
+
+Query Query::operator!=(const Query& rhs) const {
+  return Query(*this, rhs, OP_NE);
+}
+
+Query Query::operator<(const Query& rhs) const {
+  return Query(*this, rhs, OP_LT);
+}
+
+Query Query::operator<=(const Query& rhs) const {
+  return Query(*this, rhs, OP_LE);
+}
+
+Query Query::operator>(const Query& rhs) const {
+  return Query(*this, rhs, OP_GT);
+}
+
+Query Query::operator>=(const Query& rhs) const {
+  return Query(*this, rhs, OP_GE);
+}
+
+Query Query::operator&&(const Query& rhs) const {
+  return Query(*this, rhs, OP_AND);
+}
+
+Query Query::operator||(const Query& rhs) const {
+  return Query(*this, rhs, OP_OR);
+}
+
+Query Query::operator!() const {
+  return Query(*this, OP_NOT);
+}
+
+Query Query::operator+(const Query& rhs) const {
+  return Query(*this, rhs, OP_ADD);
+}
+
+Query Query::operator-(const Query& rhs) const {
+  return Query(*this, rhs, OP_SUB);
+}
+
+Query Query::operator*(const Query& rhs) const {
+  return Query(*this, rhs, OP_MUL);
+}
+
+Query Query::operator/(const Query& rhs) const {
+  return Query(*this, rhs, OP_DIV);
+}
+
+Query Query::operator%(const Query& rhs) const {
+  return Query(*this, rhs, OP_MOD);
+}
+
+Query Query::operator-() const {
+  return Query(*this, OP_NEGATE);
+}
+
+
+Query::Query(const Query& left, const Query& right, Operator binary_op)
+    : operator_(binary_op),
+      left_(new QueryNode(left)),
+      right_(new QueryNode(right)),
+      member_(EVENT_INVALID),
+      number_(0) {
+  type_ = (binary_op < OP_ADD ?
+           QUERY_BOOLEAN_OPERATOR : QUERY_ARITHMETIC_OPERATOR);
+}
+
+Query::Query(const Query& left, Operator unary_op)
+    : operator_(unary_op),
+      left_(new QueryNode(left)),
+      member_(EVENT_INVALID),
+      number_(0) {
+  type_ = (unary_op < OP_ADD ?
+           QUERY_BOOLEAN_OPERATOR : QUERY_ARITHMETIC_OPERATOR);
+}
+
+namespace {
+
+// Search |events| for |query| and add matches to |output|.
+size_t FindMatchingEvents(const std::vector<TraceEvent>& events,
+                          const Query& query,
+                          TraceEventVector* output,
+                          bool ignore_metadata_events) {
+  for (size_t i = 0; i < events.size(); ++i) {
+    if (ignore_metadata_events && events[i].phase == TRACE_EVENT_PHASE_METADATA)
+      continue;
+    if (query.Evaluate(events[i]))
+      output->push_back(&events[i]);
+  }
+  return output->size();
+}
+
+bool ParseEventsFromJson(const std::string& json,
+                         std::vector<TraceEvent>* output) {
+  std::unique_ptr<base::Value> root = base::JSONReader::Read(json);
+
+  base::ListValue* root_list = nullptr;
+  if (!root.get() || !root->GetAsList(&root_list))
+    return false;
+
+  for (size_t i = 0; i < root_list->GetSize(); ++i) {
+    base::Value* item = nullptr;
+    if (root_list->Get(i, &item)) {
+      TraceEvent event;
+      if (event.SetFromJSON(item))
+        output->push_back(std::move(event));
+      else
+        return false;
+    }
+  }
+
+  return true;
+}
+
+}  // namespace
+
+// TraceAnalyzer
+
+TraceAnalyzer::TraceAnalyzer()
+    : ignore_metadata_events_(false), allow_association_changes_(true) {}
+
+TraceAnalyzer::~TraceAnalyzer() = default;
+
+// static
+TraceAnalyzer* TraceAnalyzer::Create(const std::string& json_events) {
+  std::unique_ptr<TraceAnalyzer> analyzer(new TraceAnalyzer());
+  if (analyzer->SetEvents(json_events))
+    return analyzer.release();
+  return nullptr;
+}
+
+bool TraceAnalyzer::SetEvents(const std::string& json_events) {
+  raw_events_.clear();
+  if (!ParseEventsFromJson(json_events, &raw_events_))
+    return false;
+  std::stable_sort(raw_events_.begin(), raw_events_.end());
+  ParseMetadata();
+  return true;
+}
+
+void TraceAnalyzer::AssociateBeginEndEvents() {
+  using trace_analyzer::Query;
+
+  Query begin(Query::EventPhaseIs(TRACE_EVENT_PHASE_BEGIN));
+  Query end(Query::EventPhaseIs(TRACE_EVENT_PHASE_END));
+  Query match(Query::EventName() == Query::OtherName() &&
+              Query::EventCategory() == Query::OtherCategory() &&
+              Query::EventTid() == Query::OtherTid() &&
+              Query::EventPid() == Query::OtherPid());
+
+  AssociateEvents(begin, end, match);
+}
+
+void TraceAnalyzer::AssociateAsyncBeginEndEvents(bool match_pid) {
+  using trace_analyzer::Query;
+
+  Query begin(
+      Query::EventPhaseIs(TRACE_EVENT_PHASE_ASYNC_BEGIN) ||
+      Query::EventPhaseIs(TRACE_EVENT_PHASE_ASYNC_STEP_INTO) ||
+      Query::EventPhaseIs(TRACE_EVENT_PHASE_ASYNC_STEP_PAST));
+  Query end(Query::EventPhaseIs(TRACE_EVENT_PHASE_ASYNC_END) ||
+            Query::EventPhaseIs(TRACE_EVENT_PHASE_ASYNC_STEP_INTO) ||
+            Query::EventPhaseIs(TRACE_EVENT_PHASE_ASYNC_STEP_PAST));
+  Query match(Query::EventCategory() == Query::OtherCategory() &&
+              Query::EventId() == Query::OtherId());
+
+  if (match_pid) {
+    match = match && Query::EventPid() == Query::OtherPid();
+  }
+
+  AssociateEvents(begin, end, match);
+}
+
+void TraceAnalyzer::AssociateEvents(const Query& first,
+                                    const Query& second,
+                                    const Query& match) {
+  DCHECK(allow_association_changes_)
+      << "AssociateEvents not allowed after FindEvents";
+
+  // Search for matching begin/end event pairs. When a matching end is found,
+  // it is associated with the begin event.
+  std::vector<TraceEvent*> begin_stack;
+  for (size_t event_index = 0; event_index < raw_events_.size();
+       ++event_index) {
+
+    TraceEvent& this_event = raw_events_[event_index];
+
+    if (second.Evaluate(this_event)) {
+      // Search stack for matching begin, starting from end.
+      for (int stack_index = static_cast<int>(begin_stack.size()) - 1;
+           stack_index >= 0; --stack_index) {
+        TraceEvent& begin_event = *begin_stack[stack_index];
+
+        // Temporarily set other to test against the match query.
+        const TraceEvent* other_backup = begin_event.other_event;
+        begin_event.other_event = &this_event;
+        if (match.Evaluate(begin_event)) {
+          // Found a matching begin/end pair.
+          // Set the associated previous event
+          this_event.prev_event = &begin_event;
+          // Erase the matching begin event index from the stack.
+          begin_stack.erase(begin_stack.begin() + stack_index);
+          break;
+        }
+
+        // Not a match, restore original other and continue.
+        begin_event.other_event = other_backup;
+      }
+    }
+    // Even if this_event is a |second| event that has matched an earlier
+    // |first| event, it can still also be a |first| event and be associated
+    // with a later |second| event.
+    if (first.Evaluate(this_event)) {
+      begin_stack.push_back(&this_event);
+    }
+  }
+}
+
+void TraceAnalyzer::MergeAssociatedEventArgs() {
+  for (size_t i = 0; i < raw_events_.size(); ++i) {
+    // Merge all associated events with the first event.
+    const TraceEvent* other = raw_events_[i].other_event;
+    // Avoid looping by keeping set of encountered TraceEvents.
+    std::set<const TraceEvent*> encounters;
+    encounters.insert(&raw_events_[i]);
+    while (other && encounters.find(other) == encounters.end()) {
+      encounters.insert(other);
+      raw_events_[i].arg_numbers.insert(
+          other->arg_numbers.begin(),
+          other->arg_numbers.end());
+      raw_events_[i].arg_strings.insert(
+          other->arg_strings.begin(),
+          other->arg_strings.end());
+      other = other->other_event;
+    }
+  }
+}
+
+size_t TraceAnalyzer::FindEvents(const Query& query, TraceEventVector* output) {
+  allow_association_changes_ = false;
+  output->clear();
+  return FindMatchingEvents(
+      raw_events_, query, output, ignore_metadata_events_);
+}
+
+const TraceEvent* TraceAnalyzer::FindFirstOf(const Query& query) {
+  TraceEventVector output;
+  if (FindEvents(query, &output) > 0)
+    return output.front();
+  return nullptr;
+}
+
+const TraceEvent* TraceAnalyzer::FindLastOf(const Query& query) {
+  TraceEventVector output;
+  if (FindEvents(query, &output) > 0)
+    return output.back();
+  return nullptr;
+}
+
+const std::string& TraceAnalyzer::GetThreadName(
+    const TraceEvent::ProcessThreadID& thread) {
+  // If thread is not found, just add and return empty string.
+  return thread_names_[thread];
+}
+
+void TraceAnalyzer::ParseMetadata() {
+  for (size_t i = 0; i < raw_events_.size(); ++i) {
+    TraceEvent& this_event = raw_events_[i];
+    // Check for thread name metadata.
+    if (this_event.phase != TRACE_EVENT_PHASE_METADATA ||
+        this_event.name != "thread_name")
+      continue;
+    std::map<std::string, std::string>::const_iterator string_it =
+        this_event.arg_strings.find("name");
+    if (string_it != this_event.arg_strings.end())
+      thread_names_[this_event.thread] = string_it->second;
+  }
+}
+
+// Utility functions for collecting process-local traces and creating a
+// |TraceAnalyzer| from the result.
+
+void Start(const std::string& category_filter_string) {
+  DCHECK(!base::trace_event::TraceLog::GetInstance()->IsEnabled());
+  base::trace_event::TraceLog::GetInstance()->SetEnabled(
+      base::trace_event::TraceConfig(category_filter_string, ""),
+      base::trace_event::TraceLog::RECORDING_MODE);
+}
+
+std::unique_ptr<TraceAnalyzer> Stop() {
+  DCHECK(base::trace_event::TraceLog::GetInstance()->IsEnabled());
+  base::trace_event::TraceLog::GetInstance()->SetDisabled();
+
+  base::trace_event::TraceResultBuffer buffer;
+  base::trace_event::TraceResultBuffer::SimpleOutput trace_output;
+  buffer.SetOutputCallback(trace_output.GetCallback());
+  base::RunLoop run_loop;
+  buffer.Start();
+  base::trace_event::TraceLog::GetInstance()->Flush(
+      base::BindRepeating(&OnTraceDataCollected, run_loop.QuitClosure(),
+                          base::Unretained(&buffer)));
+  run_loop.Run();
+  buffer.Finish();
+
+  return base::WrapUnique(TraceAnalyzer::Create(trace_output.json_output));
+}
+
+// TraceEventVector utility functions.
+
+bool GetRateStats(const TraceEventVector& events,
+                  RateStats* stats,
+                  const RateStatsOptions* options) {
+  DCHECK(stats);
+  // Need at least 3 events to calculate rate stats.
+  const size_t kMinEvents = 3;
+  if (events.size() < kMinEvents) {
+    LOG(ERROR) << "Not enough events: " << events.size();
+    return false;
+  }
+
+  std::vector<double> deltas;
+  size_t num_deltas = events.size() - 1;
+  for (size_t i = 0; i < num_deltas; ++i) {
+    double delta = events.at(i + 1)->timestamp - events.at(i)->timestamp;
+    if (delta < 0.0) {
+      LOG(ERROR) << "Events are out of order";
+      return false;
+    }
+    deltas.push_back(delta);
+  }
+
+  std::sort(deltas.begin(), deltas.end());
+
+  if (options) {
+    if (options->trim_min + options->trim_max > events.size() - kMinEvents) {
+      LOG(ERROR) << "Attempt to trim too many events";
+      return false;
+    }
+    deltas.erase(deltas.begin(), deltas.begin() + options->trim_min);
+    deltas.erase(deltas.end() - options->trim_max, deltas.end());
+  }
+
+  num_deltas = deltas.size();
+  double delta_sum = 0.0;
+  for (size_t i = 0; i < num_deltas; ++i)
+    delta_sum += deltas[i];
+
+  stats->min_us = *std::min_element(deltas.begin(), deltas.end());
+  stats->max_us = *std::max_element(deltas.begin(), deltas.end());
+  stats->mean_us = delta_sum / static_cast<double>(num_deltas);
+
+  double sum_mean_offsets_squared = 0.0;
+  for (size_t i = 0; i < num_deltas; ++i) {
+    double offset = fabs(deltas[i] - stats->mean_us);
+    sum_mean_offsets_squared += offset * offset;
+  }
+  stats->standard_deviation_us =
+      sqrt(sum_mean_offsets_squared / static_cast<double>(num_deltas - 1));
+
+  return true;
+}
+
+bool FindFirstOf(const TraceEventVector& events,
+                 const Query& query,
+                 size_t position,
+                 size_t* return_index) {
+  DCHECK(return_index);
+  for (size_t i = position; i < events.size(); ++i) {
+    if (query.Evaluate(*events[i])) {
+      *return_index = i;
+      return true;
+    }
+  }
+  return false;
+}
+
+bool FindLastOf(const TraceEventVector& events,
+                const Query& query,
+                size_t position,
+                size_t* return_index) {
+  DCHECK(return_index);
+  for (size_t i = std::min(position + 1, events.size()); i != 0; --i) {
+    if (query.Evaluate(*events[i - 1])) {
+      *return_index = i - 1;
+      return true;
+    }
+  }
+  return false;
+}
+
+bool FindClosest(const TraceEventVector& events,
+                 const Query& query,
+                 size_t position,
+                 size_t* return_closest,
+                 size_t* return_second_closest) {
+  DCHECK(return_closest);
+  if (events.empty() || position >= events.size())
+    return false;
+  size_t closest = events.size();
+  size_t second_closest = events.size();
+  for (size_t i = 0; i < events.size(); ++i) {
+    if (!query.Evaluate(*events.at(i)))
+      continue;
+    if (closest == events.size()) {
+      closest = i;
+      continue;
+    }
+    if (fabs(events.at(i)->timestamp - events.at(position)->timestamp) <
+        fabs(events.at(closest)->timestamp - events.at(position)->timestamp)) {
+      second_closest = closest;
+      closest = i;
+    } else if (second_closest == events.size()) {
+      second_closest = i;
+    }
+  }
+
+  if (closest < events.size() &&
+      (!return_second_closest || second_closest < events.size())) {
+    *return_closest = closest;
+    if (return_second_closest)
+      *return_second_closest = second_closest;
+    return true;
+  }
+
+  return false;
+}
+
+size_t CountMatches(const TraceEventVector& events,
+                    const Query& query,
+                    size_t begin_position,
+                    size_t end_position) {
+  if (begin_position >= events.size())
+    return 0u;
+  end_position = (end_position < events.size()) ? end_position : events.size();
+  size_t count = 0u;
+  for (size_t i = begin_position; i < end_position; ++i) {
+    if (query.Evaluate(*events.at(i)))
+      ++count;
+  }
+  return count;
+}
+
+}  // namespace trace_analyzer
diff --git a/base/test/trace_event_analyzer.h b/base/test/trace_event_analyzer.h
new file mode 100644
index 0000000..dcdd2e4
--- /dev/null
+++ b/base/test/trace_event_analyzer.h
@@ -0,0 +1,842 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Use trace_analyzer::Query and trace_analyzer::TraceAnalyzer to search for
+// specific trace events that were generated by the trace_event.h API.
+//
+// Basic procedure:
+// - Get trace events JSON string from base::trace_event::TraceLog.
+// - Create TraceAnalyzer with JSON string.
+// - Call TraceAnalyzer::AssociateBeginEndEvents (optional).
+// - Call TraceAnalyzer::AssociateEvents (zero or more times).
+// - Call TraceAnalyzer::FindEvents with queries to find specific events.
+//
+// A Query is a boolean expression tree that evaluates to true or false for a
+// given trace event. Queries can be combined into a tree using boolean,
+// arithmetic and comparison operators that refer to data of an individual trace
+// event.
+//
+// The events are returned as trace_analyzer::TraceEvent objects.
+// TraceEvent contains a single trace event's data, as well as a pointer to
+// a related trace event. The related trace event is typically the matching end
+// of a begin event or the matching begin of an end event.
+//
+// The following examples use this basic setup code to construct TraceAnalyzer
+// with the json trace string retrieved from TraceLog and construct an event
+// vector for retrieving events:
+//
+// TraceAnalyzer analyzer(json_events);
+// TraceEventVector events;
+//
+// EXAMPLE 1: Find events named "my_event".
+//
+// analyzer.FindEvents(Query(EVENT_NAME) == "my_event", &events);
+//
+// EXAMPLE 2: Find begin events named "my_event" with duration > 1 second.
+//
+// Query q = (Query(EVENT_NAME) == Query::String("my_event") &&
+//            Query(EVENT_PHASE) == Query::Phase(TRACE_EVENT_PHASE_BEGIN) &&
+//            Query(EVENT_DURATION) > Query::Double(1000000.0));
+// analyzer.FindEvents(q, &events);
+//
+// EXAMPLE 3: Associating event pairs across threads.
+//
+// If the test needs to analyze something that starts and ends on different
+// threads, the test needs to use INSTANT events. The typical procedure is to
+// specify the same unique ID as a TRACE_EVENT argument on both the start and
+// finish INSTANT events. Then use the following procedure to associate those
+// events.
+//
+// Step 1: instrument code with custom begin/end trace events.
+//   [Thread 1 tracing code]
+//   TRACE_EVENT_INSTANT1("test_latency", "timing1_begin", "id", 3);
+//   [Thread 2 tracing code]
+//   TRACE_EVENT_INSTANT1("test_latency", "timing1_end", "id", 3);
+//
+// Step 2: associate these custom begin/end pairs.
+//   Query begin(Query(EVENT_NAME) == Query::String("timing1_begin"));
+//   Query end(Query(EVENT_NAME) == Query::String("timing1_end"));
+//   Query match(Query(EVENT_ARG, "id") == Query(OTHER_ARG, "id"));
+//   analyzer.AssociateEvents(begin, end, match);
+//
+// Step 3: search for "timing1_begin" events with existing other event.
+//   Query q = (Query(EVENT_NAME) == Query::String("timing1_begin") &&
+//              Query(EVENT_HAS_OTHER));
+//   analyzer.FindEvents(q, &events);
+//
+// Step 4: analyze events, such as checking durations.
+//   for (size_t i = 0; i < events.size(); ++i) {
+//     double duration;
+//     EXPECT_TRUE(events[i].GetAbsTimeToOtherEvent(&duration));
+//     EXPECT_LT(duration, 1000000.0/60.0); // expect less than 1/60 second.
+//   }
+//
+// There are two helper functions, Start(category_filter_string) and Stop(), for
+// facilitating the collection of process-local traces and building a
+// TraceAnalyzer from them. A typical test, that uses the helper functions,
+// looks like the following:
+//
+// TEST_F(...) {
+//   Start("*");
+//   [Invoke the functions you want to test their traces]
+//   auto analyzer = Stop();
+//
+//   [Use the analyzer to verify produced traces, as explained above]
+// }
+//
+// Note: The Stop() function needs a SingleThreadTaskRunner.
+
+#ifndef BASE_TEST_TRACE_EVENT_ANALYZER_H_
+#define BASE_TEST_TRACE_EVENT_ANALYZER_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <map>
+#include <memory>
+#include <string>
+#include <vector>
+
+#include "base/macros.h"
+#include "base/memory/ref_counted.h"
+#include "base/trace_event/trace_event.h"
+
+namespace base {
+class Value;
+}
+
+namespace trace_analyzer {
+class QueryNode;
+
+// trace_analyzer::TraceEvent is a more convenient form of the
+// base::trace_event::TraceEvent class to make tracing-based tests easier to
+// write.
+struct TraceEvent {
+  // ProcessThreadID contains a Process ID and Thread ID.
+  struct ProcessThreadID {
+    ProcessThreadID() : process_id(0), thread_id(0) {}
+    ProcessThreadID(int process_id, int thread_id)
+        : process_id(process_id), thread_id(thread_id) {}
+    bool operator< (const ProcessThreadID& rhs) const {
+      if (process_id != rhs.process_id)
+        return process_id < rhs.process_id;
+      return thread_id < rhs.thread_id;
+    }
+    int process_id;
+    int thread_id;
+  };
+
+  TraceEvent();
+  TraceEvent(TraceEvent&& other);
+  ~TraceEvent();
+
+  bool SetFromJSON(const base::Value* event_value) WARN_UNUSED_RESULT;
+
+  bool operator< (const TraceEvent& rhs) const {
+    return timestamp < rhs.timestamp;
+  }
+
+  TraceEvent& operator=(TraceEvent&& rhs);
+
+  bool has_other_event() const { return other_event; }
+
+  // Returns absolute duration in microseconds between this event and other
+  // event. Must have already verified that other_event exists by
+  // Query(EVENT_HAS_OTHER) or by calling has_other_event().
+  double GetAbsTimeToOtherEvent() const;
+
+  // Return the argument value if it exists and it is a string.
+  bool GetArgAsString(const std::string& name, std::string* arg) const;
+  // Return the argument value if it exists and it is a number.
+  bool GetArgAsNumber(const std::string& name, double* arg) const;
+  // Return the argument value if it exists.
+  bool GetArgAsValue(const std::string& name,
+                     std::unique_ptr<base::Value>* arg) const;
+
+  // Check if argument exists and is string.
+  bool HasStringArg(const std::string& name) const;
+  // Check if argument exists and is number (double, int or bool).
+  bool HasNumberArg(const std::string& name) const;
+  // Check if argument exists.
+  bool HasArg(const std::string& name) const;
+
+  // Get known existing arguments as specific types.
+  // Useful when you have already queried the argument with
+  // Query(HAS_NUMBER_ARG) or Query(HAS_STRING_ARG).
+  std::string GetKnownArgAsString(const std::string& name) const;
+  double GetKnownArgAsDouble(const std::string& name) const;
+  int GetKnownArgAsInt(const std::string& name) const;
+  bool GetKnownArgAsBool(const std::string& name) const;
+  std::unique_ptr<base::Value> GetKnownArgAsValue(
+      const std::string& name) const;
+
+  // Process ID and Thread ID.
+  ProcessThreadID thread;
+
+  // Time since epoch in microseconds.
+  // Stored as double to match its JSON representation.
+  double timestamp;
+  double duration;
+  char phase;
+  std::string category;
+  std::string name;
+  std::string id;
+  double thread_duration = 0.0;
+  double thread_timestamp = 0.0;
+  std::string scope;
+  std::string bind_id;
+  bool flow_out = false;
+  bool flow_in = false;
+  std::string global_id2;
+  std::string local_id2;
+
+  // All numbers and bool values from TraceEvent args are cast to double.
+  // bool becomes 1.0 (true) or 0.0 (false).
+  std::map<std::string, double> arg_numbers;
+  std::map<std::string, std::string> arg_strings;
+  std::map<std::string, std::unique_ptr<base::Value>> arg_values;
+
+  // The other event associated with this event (or NULL).
+  const TraceEvent* other_event;
+
+  // A back-link for |other_event|. That is, if other_event is not null, then
+  // |event->other_event->prev_event == event| is always true.
+  const TraceEvent* prev_event;
+};
+
+typedef std::vector<const TraceEvent*> TraceEventVector;
+
+class Query {
+ public:
+  Query(const Query& query);
+
+  ~Query();
+
+  ////////////////////////////////////////////////////////////////
+  // Query literal values
+
+  // Compare with the given string.
+  static Query String(const std::string& str);
+
+  // Compare with the given number.
+  static Query Double(double num);
+  static Query Int(int32_t num);
+  static Query Uint(uint32_t num);
+
+  // Compare with the given bool.
+  static Query Bool(bool boolean);
+
+  // Compare with the given phase.
+  static Query Phase(char phase);
+
+  // Compare with the given string pattern. Only works with == and != operators.
+  // Example: Query(EVENT_NAME) == Query::Pattern("MyEvent*")
+  static Query Pattern(const std::string& pattern);
+
+  ////////////////////////////////////////////////////////////////
+  // Query event members
+
+  static Query EventPid() { return Query(EVENT_PID); }
+
+  static Query EventTid() { return Query(EVENT_TID); }
+
+  // Return the timestamp of the event in microseconds since epoch.
+  static Query EventTime() { return Query(EVENT_TIME); }
+
+  // Return the absolute time between event and other event in microseconds.
+  // Only works if Query::EventHasOther() == true.
+  static Query EventDuration() { return Query(EVENT_DURATION); }
+
+  // Return the duration of a COMPLETE event.
+  static Query EventCompleteDuration() {
+    return Query(EVENT_COMPLETE_DURATION);
+  }
+
+  static Query EventPhase() { return Query(EVENT_PHASE); }
+
+  static Query EventCategory() { return Query(EVENT_CATEGORY); }
+
+  static Query EventName() { return Query(EVENT_NAME); }
+
+  static Query EventId() { return Query(EVENT_ID); }
+
+  static Query EventPidIs(int process_id) {
+    return Query(EVENT_PID) == Query::Int(process_id);
+  }
+
+  static Query EventTidIs(int thread_id) {
+    return Query(EVENT_TID) == Query::Int(thread_id);
+  }
+
+  static Query EventThreadIs(const TraceEvent::ProcessThreadID& thread) {
+    return EventPidIs(thread.process_id) && EventTidIs(thread.thread_id);
+  }
+
+  static Query EventTimeIs(double timestamp) {
+    return Query(EVENT_TIME) == Query::Double(timestamp);
+  }
+
+  static Query EventDurationIs(double duration) {
+    return Query(EVENT_DURATION) == Query::Double(duration);
+  }
+
+  static Query EventPhaseIs(char phase) {
+    return Query(EVENT_PHASE) == Query::Phase(phase);
+  }
+
+  static Query EventCategoryIs(const std::string& category) {
+    return Query(EVENT_CATEGORY) == Query::String(category);
+  }
+
+  static Query EventNameIs(const std::string& name) {
+    return Query(EVENT_NAME) == Query::String(name);
+  }
+
+  static Query EventIdIs(const std::string& id) {
+    return Query(EVENT_ID) == Query::String(id);
+  }
+
+  // Evaluates to true if arg exists and is a string.
+  static Query EventHasStringArg(const std::string& arg_name) {
+    return Query(EVENT_HAS_STRING_ARG, arg_name);
+  }
+
+  // Evaluates to true if arg exists and is a number.
+  // Number arguments include types double, int and bool.
+  static Query EventHasNumberArg(const std::string& arg_name) {
+    return Query(EVENT_HAS_NUMBER_ARG, arg_name);
+  }
+
+  // Evaluates to arg value (string or number).
+  static Query EventArg(const std::string& arg_name) {
+    return Query(EVENT_ARG, arg_name);
+  }
+
+  // Return true if associated event exists.
+  static Query EventHasOther() { return Query(EVENT_HAS_OTHER); }
+
+  // Access the associated other_event's members:
+
+  static Query OtherPid() { return Query(OTHER_PID); }
+
+  static Query OtherTid() { return Query(OTHER_TID); }
+
+  static Query OtherTime() { return Query(OTHER_TIME); }
+
+  static Query OtherPhase() { return Query(OTHER_PHASE); }
+
+  static Query OtherCategory() { return Query(OTHER_CATEGORY); }
+
+  static Query OtherName() { return Query(OTHER_NAME); }
+
+  static Query OtherId() { return Query(OTHER_ID); }
+
+  static Query OtherPidIs(int process_id) {
+    return Query(OTHER_PID) == Query::Int(process_id);
+  }
+
+  static Query OtherTidIs(int thread_id) {
+    return Query(OTHER_TID) == Query::Int(thread_id);
+  }
+
+  static Query OtherThreadIs(const TraceEvent::ProcessThreadID& thread) {
+    return OtherPidIs(thread.process_id) && OtherTidIs(thread.thread_id);
+  }
+
+  static Query OtherTimeIs(double timestamp) {
+    return Query(OTHER_TIME) == Query::Double(timestamp);
+  }
+
+  static Query OtherPhaseIs(char phase) {
+    return Query(OTHER_PHASE) == Query::Phase(phase);
+  }
+
+  static Query OtherCategoryIs(const std::string& category) {
+    return Query(OTHER_CATEGORY) == Query::String(category);
+  }
+
+  static Query OtherNameIs(const std::string& name) {
+    return Query(OTHER_NAME) == Query::String(name);
+  }
+
+  static Query OtherIdIs(const std::string& id) {
+    return Query(OTHER_ID) == Query::String(id);
+  }
+
+  // Evaluates to true if arg exists and is a string.
+  static Query OtherHasStringArg(const std::string& arg_name) {
+    return Query(OTHER_HAS_STRING_ARG, arg_name);
+  }
+
+  // Evaluates to true if arg exists and is a number.
+  // Number arguments include types double, int and bool.
+  static Query OtherHasNumberArg(const std::string& arg_name) {
+    return Query(OTHER_HAS_NUMBER_ARG, arg_name);
+  }
+
+  // Evaluates to arg value (string or number).
+  static Query OtherArg(const std::string& arg_name) {
+    return Query(OTHER_ARG, arg_name);
+  }
+
+  // Access the associated prev_event's members:
+
+  static Query PrevPid() { return Query(PREV_PID); }
+
+  static Query PrevTid() { return Query(PREV_TID); }
+
+  static Query PrevTime() { return Query(PREV_TIME); }
+
+  static Query PrevPhase() { return Query(PREV_PHASE); }
+
+  static Query PrevCategory() { return Query(PREV_CATEGORY); }
+
+  static Query PrevName() { return Query(PREV_NAME); }
+
+  static Query PrevId() { return Query(PREV_ID); }
+
+  static Query PrevPidIs(int process_id) {
+    return Query(PREV_PID) == Query::Int(process_id);
+  }
+
+  static Query PrevTidIs(int thread_id) {
+    return Query(PREV_TID) == Query::Int(thread_id);
+  }
+
+  static Query PrevThreadIs(const TraceEvent::ProcessThreadID& thread) {
+    return PrevPidIs(thread.process_id) && PrevTidIs(thread.thread_id);
+  }
+
+  static Query PrevTimeIs(double timestamp) {
+    return Query(PREV_TIME) == Query::Double(timestamp);
+  }
+
+  static Query PrevPhaseIs(char phase) {
+    return Query(PREV_PHASE) == Query::Phase(phase);
+  }
+
+  static Query PrevCategoryIs(const std::string& category) {
+    return Query(PREV_CATEGORY) == Query::String(category);
+  }
+
+  static Query PrevNameIs(const std::string& name) {
+    return Query(PREV_NAME) == Query::String(name);
+  }
+
+  static Query PrevIdIs(const std::string& id) {
+    return Query(PREV_ID) == Query::String(id);
+  }
+
+  // Evaluates to true if arg exists and is a string.
+  static Query PrevHasStringArg(const std::string& arg_name) {
+    return Query(PREV_HAS_STRING_ARG, arg_name);
+  }
+
+  // Evaluates to true if arg exists and is a number.
+  // Number arguments include types double, int and bool.
+  static Query PrevHasNumberArg(const std::string& arg_name) {
+    return Query(PREV_HAS_NUMBER_ARG, arg_name);
+  }
+
+  // Evaluates to arg value (string or number).
+  static Query PrevArg(const std::string& arg_name) {
+    return Query(PREV_ARG, arg_name);
+  }
+
+  ////////////////////////////////////////////////////////////////
+  // Common queries:
+
+  // Find BEGIN events that have a corresponding END event.
+  static Query MatchBeginWithEnd() {
+    return (Query(EVENT_PHASE) == Query::Phase(TRACE_EVENT_PHASE_BEGIN)) &&
+           Query(EVENT_HAS_OTHER);
+  }
+
+  // Find COMPLETE events.
+  static Query MatchComplete() {
+    return (Query(EVENT_PHASE) == Query::Phase(TRACE_EVENT_PHASE_COMPLETE));
+  }
+
+  // Find ASYNC_BEGIN events that have a corresponding ASYNC_END event.
+  static Query MatchAsyncBeginWithNext() {
+    return (Query(EVENT_PHASE) ==
+            Query::Phase(TRACE_EVENT_PHASE_ASYNC_BEGIN)) &&
+           Query(EVENT_HAS_OTHER);
+  }
+
+  // Find BEGIN events of given |name| which also have associated END events.
+  static Query MatchBeginName(const std::string& name) {
+    return (Query(EVENT_NAME) == Query(name)) && MatchBeginWithEnd();
+  }
+
+  // Find COMPLETE events of given |name|.
+  static Query MatchCompleteName(const std::string& name) {
+    return (Query(EVENT_NAME) == Query(name)) && MatchComplete();
+  }
+
+  // Match given Process ID and Thread ID.
+  static Query MatchThread(const TraceEvent::ProcessThreadID& thread) {
+    return (Query(EVENT_PID) == Query::Int(thread.process_id)) &&
+           (Query(EVENT_TID) == Query::Int(thread.thread_id));
+  }
+
+  // Match event pair that spans multiple threads.
+  static Query MatchCrossThread() {
+    return (Query(EVENT_PID) != Query(OTHER_PID)) ||
+           (Query(EVENT_TID) != Query(OTHER_TID));
+  }
+
+  ////////////////////////////////////////////////////////////////
+  // Operators:
+
+  // Boolean operators:
+  Query operator==(const Query& rhs) const;
+  Query operator!=(const Query& rhs) const;
+  Query operator< (const Query& rhs) const;
+  Query operator<=(const Query& rhs) const;
+  Query operator> (const Query& rhs) const;
+  Query operator>=(const Query& rhs) const;
+  Query operator&&(const Query& rhs) const;
+  Query operator||(const Query& rhs) const;
+  Query operator!() const;
+
+  // Arithmetic operators:
+  // Following operators are applied to double arguments:
+  Query operator+(const Query& rhs) const;
+  Query operator-(const Query& rhs) const;
+  Query operator*(const Query& rhs) const;
+  Query operator/(const Query& rhs) const;
+  Query operator-() const;
+  // Mod operates on int64_t args (doubles are casted to int64_t beforehand):
+  Query operator%(const Query& rhs) const;
+
+  // Return true if the given event matches this query tree.
+  // This is a recursive method that walks the query tree.
+  bool Evaluate(const TraceEvent& event) const;
+
+  enum TraceEventMember {
+    EVENT_INVALID,
+    EVENT_PID,
+    EVENT_TID,
+    EVENT_TIME,
+    EVENT_DURATION,
+    EVENT_COMPLETE_DURATION,
+    EVENT_PHASE,
+    EVENT_CATEGORY,
+    EVENT_NAME,
+    EVENT_ID,
+    EVENT_HAS_STRING_ARG,
+    EVENT_HAS_NUMBER_ARG,
+    EVENT_ARG,
+    EVENT_HAS_OTHER,
+    EVENT_HAS_PREV,
+
+    OTHER_PID,
+    OTHER_TID,
+    OTHER_TIME,
+    OTHER_PHASE,
+    OTHER_CATEGORY,
+    OTHER_NAME,
+    OTHER_ID,
+    OTHER_HAS_STRING_ARG,
+    OTHER_HAS_NUMBER_ARG,
+    OTHER_ARG,
+
+    PREV_PID,
+    PREV_TID,
+    PREV_TIME,
+    PREV_PHASE,
+    PREV_CATEGORY,
+    PREV_NAME,
+    PREV_ID,
+    PREV_HAS_STRING_ARG,
+    PREV_HAS_NUMBER_ARG,
+    PREV_ARG,
+
+    OTHER_FIRST_MEMBER = OTHER_PID,
+    OTHER_LAST_MEMBER = OTHER_ARG,
+
+    PREV_FIRST_MEMBER = PREV_PID,
+    PREV_LAST_MEMBER = PREV_ARG,
+  };
+
+  enum Operator {
+    OP_INVALID,
+    // Boolean operators:
+    OP_EQ,
+    OP_NE,
+    OP_LT,
+    OP_LE,
+    OP_GT,
+    OP_GE,
+    OP_AND,
+    OP_OR,
+    OP_NOT,
+    // Arithmetic operators:
+    OP_ADD,
+    OP_SUB,
+    OP_MUL,
+    OP_DIV,
+    OP_MOD,
+    OP_NEGATE
+  };
+
+  enum QueryType {
+    QUERY_BOOLEAN_OPERATOR,
+    QUERY_ARITHMETIC_OPERATOR,
+    QUERY_EVENT_MEMBER,
+    QUERY_NUMBER,
+    QUERY_STRING
+  };
+
+  // Compare with the given member.
+  explicit Query(TraceEventMember member);
+
+  // Compare with the given member argument value.
+  Query(TraceEventMember member, const std::string& arg_name);
+
+  // Compare with the given string.
+  explicit Query(const std::string& str);
+
+  // Compare with the given number.
+  explicit Query(double num);
+
+  // Construct a boolean Query that returns (left <binary_op> right).
+  Query(const Query& left, const Query& right, Operator binary_op);
+
+  // Construct a boolean Query that returns (<binary_op> left).
+  Query(const Query& left, Operator unary_op);
+
+  // Try to compare left_ against right_ based on operator_.
+  // If either left or right does not convert to double, false is returned.
+  // Otherwise, true is returned and |result| is set to the comparison result.
+  bool CompareAsDouble(const TraceEvent& event, bool* result) const;
+
+  // Try to compare left_ against right_ based on operator_.
+  // If either left or right does not convert to string, false is returned.
+  // Otherwise, true is returned and |result| is set to the comparison result.
+  bool CompareAsString(const TraceEvent& event, bool* result) const;
+
+  // Attempt to convert this Query to a double. On success, true is returned
+  // and the double value is stored in |num|.
+  bool GetAsDouble(const TraceEvent& event, double* num) const;
+
+  // Attempt to convert this Query to a string. On success, true is returned
+  // and the string value is stored in |str|.
+  bool GetAsString(const TraceEvent& event, std::string* str) const;
+
+  // Evaluate this Query as an arithmetic operator on left_ and right_.
+  bool EvaluateArithmeticOperator(const TraceEvent& event,
+                                  double* num) const;
+
+  // For QUERY_EVENT_MEMBER Query: attempt to get the double value of the Query.
+  bool GetMemberValueAsDouble(const TraceEvent& event, double* num) const;
+
+  // For QUERY_EVENT_MEMBER Query: attempt to get the string value of the Query.
+  bool GetMemberValueAsString(const TraceEvent& event, std::string* num) const;
+
+  // Does this Query represent a value?
+  bool is_value() const { return type_ != QUERY_BOOLEAN_OPERATOR; }
+
+  bool is_unary_operator() const {
+    return operator_ == OP_NOT || operator_ == OP_NEGATE;
+  }
+
+  bool is_comparison_operator() const {
+    return operator_ != OP_INVALID && operator_ < OP_AND;
+  }
+
+  static const TraceEvent* SelectTargetEvent(const TraceEvent* ev,
+                                             TraceEventMember member);
+
+  const Query& left() const;
+  const Query& right() const;
+
+ private:
+  QueryType type_;
+  Operator operator_;
+  scoped_refptr<QueryNode> left_;
+  scoped_refptr<QueryNode> right_;
+  TraceEventMember member_;
+  double number_;
+  std::string string_;
+  bool is_pattern_;
+};
+
+// Implementation detail:
+// QueryNode allows Query to store a ref-counted query tree.
+class QueryNode : public base::RefCounted<QueryNode> {
+ public:
+  explicit QueryNode(const Query& query);
+  const Query& query() const { return query_; }
+
+ private:
+  friend class base::RefCounted<QueryNode>;
+  ~QueryNode();
+
+  Query query_;
+};
+
+// TraceAnalyzer helps tests search for trace events.
+class TraceAnalyzer {
+ public:
+  ~TraceAnalyzer();
+
+  // Use trace events from JSON string generated by tracing API.
+  // Returns non-NULL if the JSON is successfully parsed.
+  static TraceAnalyzer* Create(const std::string& json_events)
+                               WARN_UNUSED_RESULT;
+
+  void SetIgnoreMetadataEvents(bool ignore) {
+    ignore_metadata_events_ = ignore;
+  }
+
+  // Associate BEGIN and END events with each other. This allows Query(OTHER_*)
+  // to access the associated event and enables Query(EVENT_DURATION).
+  // An end event will match the most recent begin event with the same name,
+  // category, process ID and thread ID. This matches what is shown in
+  // about:tracing. After association, the BEGIN event will point to the
+  // matching END event, but the END event will not point to the BEGIN event.
+  void AssociateBeginEndEvents();
+
+  // Associate ASYNC_BEGIN, ASYNC_STEP and ASYNC_END events with each other.
+  // An ASYNC_END event will match the most recent ASYNC_BEGIN or ASYNC_STEP
+  // event with the same name, category, and ID. This creates a singly linked
+  // list of ASYNC_BEGIN->ASYNC_STEP...->ASYNC_END.
+  // |match_pid| - If true, will only match async events which are running
+  //               under the same process ID, otherwise will allow linking
+  //               async events from different processes.
+  void AssociateAsyncBeginEndEvents(bool match_pid = true);
+
+  // AssociateEvents can be used to customize event associations by setting the
+  // other_event member of TraceEvent. This should be used to associate two
+  // INSTANT events.
+  //
+  // The assumptions are:
+  // - |first| events occur before |second| events.
+  // - the closest matching |second| event is the correct match.
+  //
+  // |first|  - Eligible |first| events match this query.
+  // |second| - Eligible |second| events match this query.
+  // |match|  - This query is run on the |first| event. The OTHER_* EventMember
+  //            queries will point to an eligible |second| event. The query
+  //            should evaluate to true if the |first|/|second| pair is a match.
+  //
+  // When a match is found, the pair will be associated by having the first
+  // event's other_event member point to the other. AssociateEvents does not
+  // clear previous associations, so it is possible to associate multiple pairs
+  // of events by calling AssociateEvents more than once with different queries.
+  //
+  // NOTE: AssociateEvents will overwrite existing other_event associations if
+  // the queries pass for events that already had a previous association.
+  //
+  // After calling any Find* method, it is not allowed to call AssociateEvents
+  // again.
+  void AssociateEvents(const Query& first,
+                       const Query& second,
+                       const Query& match);
+
+  // For each event, copy its arguments to the other_event argument map. If
+  // argument name already exists, it will not be overwritten.
+  void MergeAssociatedEventArgs();
+
+  // Find all events that match query and replace output vector.
+  size_t FindEvents(const Query& query, TraceEventVector* output);
+
+  // Find first event that matches query or NULL if not found.
+  const TraceEvent* FindFirstOf(const Query& query);
+
+  // Find last event that matches query or NULL if not found.
+  const TraceEvent* FindLastOf(const Query& query);
+
+  const std::string& GetThreadName(const TraceEvent::ProcessThreadID& thread);
+
+ private:
+  TraceAnalyzer();
+
+  bool SetEvents(const std::string& json_events) WARN_UNUSED_RESULT;
+
+  // Read metadata (thread names, etc) from events.
+  void ParseMetadata();
+
+  std::map<TraceEvent::ProcessThreadID, std::string> thread_names_;
+  std::vector<TraceEvent> raw_events_;
+  bool ignore_metadata_events_;
+  bool allow_association_changes_;
+
+  DISALLOW_COPY_AND_ASSIGN(TraceAnalyzer);
+};
+
+// Utility functions for collecting process-local traces and creating a
+// |TraceAnalyzer| from the result. Please see comments in trace_config.h to
+// understand how the |category_filter_string| works. Use "*" to enable all
+// default categories.
+void Start(const std::string& category_filter_string);
+std::unique_ptr<TraceAnalyzer> Stop();
+
+// Utility functions for TraceEventVector.
+
+struct RateStats {
+  double min_us;
+  double max_us;
+  double mean_us;
+  double standard_deviation_us;
+};
+
+struct RateStatsOptions {
+  RateStatsOptions() : trim_min(0u), trim_max(0u) {}
+  // After the times between events are sorted, the number of specified elements
+  // will be trimmed before calculating the RateStats. This is useful in cases
+  // where extreme outliers are tolerable and should not skew the overall
+  // average.
+  size_t trim_min;  // Trim this many minimum times.
+  size_t trim_max;  // Trim this many maximum times.
+};
+
+// Calculate min/max/mean and standard deviation from the times between
+// adjacent events.
+bool GetRateStats(const TraceEventVector& events,
+                  RateStats* stats,
+                  const RateStatsOptions* options);
+
+// Starting from |position|, find the first event that matches |query|.
+// Returns true if found, false otherwise.
+bool FindFirstOf(const TraceEventVector& events,
+                 const Query& query,
+                 size_t position,
+                 size_t* return_index);
+
+// Starting from |position|, find the last event that matches |query|.
+// Returns true if found, false otherwise.
+bool FindLastOf(const TraceEventVector& events,
+                const Query& query,
+                size_t position,
+                size_t* return_index);
+
+// Find the closest events to |position| in time that match |query|.
+// return_second_closest may be NULL. Closeness is determined by comparing
+// with the event timestamp.
+// Returns true if found, false otherwise. If both return parameters are
+// requested, both must be found for a successful result.
+bool FindClosest(const TraceEventVector& events,
+                 const Query& query,
+                 size_t position,
+                 size_t* return_closest,
+                 size_t* return_second_closest);
+
+// Count matches, inclusive of |begin_position|, exclusive of |end_position|.
+size_t CountMatches(const TraceEventVector& events,
+                    const Query& query,
+                    size_t begin_position,
+                    size_t end_position);
+
+// Count all matches.
+static inline size_t CountMatches(const TraceEventVector& events,
+                                  const Query& query) {
+  return CountMatches(events, query, 0u, events.size());
+}
+
+}  // namespace trace_analyzer
+
+#endif  // BASE_TEST_TRACE_EVENT_ANALYZER_H_
diff --git a/base/test/trace_event_analyzer_unittest.cc b/base/test/trace_event_analyzer_unittest.cc
new file mode 100644
index 0000000..6461b0f
--- /dev/null
+++ b/base/test/trace_event_analyzer_unittest.cc
@@ -0,0 +1,961 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/test/trace_event_analyzer.h"
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include "base/bind.h"
+#include "base/memory/ptr_util.h"
+#include "base/memory/ref_counted_memory.h"
+#include "base/synchronization/waitable_event.h"
+#include "base/threading/platform_thread.h"
+#include "base/trace_event/trace_buffer.h"
+#include "base/trace_event/trace_event_argument.h"
+#include "testing/gmock/include/gmock/gmock.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace trace_analyzer {
+
+namespace {
+
+class TraceEventAnalyzerTest : public testing::Test {
+ public:
+  void ManualSetUp();
+  void OnTraceDataCollected(
+      base::WaitableEvent* flush_complete_event,
+      const scoped_refptr<base::RefCountedString>& json_events_str,
+      bool has_more_events);
+  void BeginTracing();
+  void EndTracing();
+
+  base::trace_event::TraceResultBuffer::SimpleOutput output_;
+  base::trace_event::TraceResultBuffer buffer_;
+};
+
+void TraceEventAnalyzerTest::ManualSetUp() {
+  ASSERT_TRUE(base::trace_event::TraceLog::GetInstance());
+  buffer_.SetOutputCallback(output_.GetCallback());
+  output_.json_output.clear();
+}
+
+void TraceEventAnalyzerTest::OnTraceDataCollected(
+    base::WaitableEvent* flush_complete_event,
+    const scoped_refptr<base::RefCountedString>& json_events_str,
+    bool has_more_events) {
+  buffer_.AddFragment(json_events_str->data());
+  if (!has_more_events)
+    flush_complete_event->Signal();
+}
+
+void TraceEventAnalyzerTest::BeginTracing() {
+  output_.json_output.clear();
+  buffer_.Start();
+  base::trace_event::TraceLog::GetInstance()->SetEnabled(
+      base::trace_event::TraceConfig("*", ""),
+      base::trace_event::TraceLog::RECORDING_MODE);
+}
+
+void TraceEventAnalyzerTest::EndTracing() {
+  base::trace_event::TraceLog::GetInstance()->SetDisabled();
+  base::WaitableEvent flush_complete_event(
+      base::WaitableEvent::ResetPolicy::AUTOMATIC,
+      base::WaitableEvent::InitialState::NOT_SIGNALED);
+  base::trace_event::TraceLog::GetInstance()->Flush(
+      base::Bind(&TraceEventAnalyzerTest::OnTraceDataCollected,
+                 base::Unretained(this),
+                 base::Unretained(&flush_complete_event)));
+  flush_complete_event.Wait();
+  buffer_.Finish();
+}
+
+}  // namespace
+
+TEST_F(TraceEventAnalyzerTest, NoEvents) {
+  ManualSetUp();
+
+  // Create an empty JSON event string:
+  buffer_.Start();
+  buffer_.Finish();
+
+  std::unique_ptr<TraceAnalyzer> analyzer(
+      TraceAnalyzer::Create(output_.json_output));
+  ASSERT_TRUE(analyzer.get());
+
+  // Search for all events and verify that nothing is returned.
+  TraceEventVector found;
+  analyzer->FindEvents(Query::Bool(true), &found);
+  EXPECT_EQ(0u, found.size());
+}
+
+TEST_F(TraceEventAnalyzerTest, TraceEvent) {
+  ManualSetUp();
+
+  int int_num = 2;
+  double double_num = 3.5;
+  const char str[] = "the string";
+
+  TraceEvent event;
+  event.arg_numbers["false"] = 0.0;
+  event.arg_numbers["true"] = 1.0;
+  event.arg_numbers["int"] = static_cast<double>(int_num);
+  event.arg_numbers["double"] = double_num;
+  event.arg_strings["string"] = str;
+  event.arg_values["dict"] = WrapUnique(new base::DictionaryValue());
+
+  ASSERT_TRUE(event.HasNumberArg("false"));
+  ASSERT_TRUE(event.HasNumberArg("true"));
+  ASSERT_TRUE(event.HasNumberArg("int"));
+  ASSERT_TRUE(event.HasNumberArg("double"));
+  ASSERT_TRUE(event.HasStringArg("string"));
+  ASSERT_FALSE(event.HasNumberArg("notfound"));
+  ASSERT_FALSE(event.HasStringArg("notfound"));
+  ASSERT_TRUE(event.HasArg("dict"));
+  ASSERT_FALSE(event.HasArg("notfound"));
+
+  EXPECT_FALSE(event.GetKnownArgAsBool("false"));
+  EXPECT_TRUE(event.GetKnownArgAsBool("true"));
+  EXPECT_EQ(int_num, event.GetKnownArgAsInt("int"));
+  EXPECT_EQ(double_num, event.GetKnownArgAsDouble("double"));
+  EXPECT_STREQ(str, event.GetKnownArgAsString("string").c_str());
+
+  std::unique_ptr<base::Value> arg;
+  EXPECT_TRUE(event.GetArgAsValue("dict", &arg));
+  EXPECT_EQ(base::Value::Type::DICTIONARY, arg->type());
+}
+
+TEST_F(TraceEventAnalyzerTest, QueryEventMember) {
+  ManualSetUp();
+
+  TraceEvent event;
+  event.thread.process_id = 3;
+  event.thread.thread_id = 4;
+  event.timestamp = 1.5;
+  event.phase = TRACE_EVENT_PHASE_BEGIN;
+  event.category = "category";
+  event.name = "name";
+  event.id = "1";
+  event.arg_numbers["num"] = 7.0;
+  event.arg_strings["str"] = "the string";
+
+  // Other event with all different members:
+  TraceEvent other;
+  other.thread.process_id = 5;
+  other.thread.thread_id = 6;
+  other.timestamp = 2.5;
+  other.phase = TRACE_EVENT_PHASE_END;
+  other.category = "category2";
+  other.name = "name2";
+  other.id = "2";
+  other.arg_numbers["num2"] = 8.0;
+  other.arg_strings["str2"] = "the string 2";
+
+  event.other_event = &other;
+  ASSERT_TRUE(event.has_other_event());
+  double duration = event.GetAbsTimeToOtherEvent();
+
+  Query event_pid = Query::EventPidIs(event.thread.process_id);
+  Query event_tid = Query::EventTidIs(event.thread.thread_id);
+  Query event_time = Query::EventTimeIs(event.timestamp);
+  Query event_duration = Query::EventDurationIs(duration);
+  Query event_phase = Query::EventPhaseIs(event.phase);
+  Query event_category = Query::EventCategoryIs(event.category);
+  Query event_name = Query::EventNameIs(event.name);
+  Query event_id = Query::EventIdIs(event.id);
+  Query event_has_arg1 = Query::EventHasNumberArg("num");
+  Query event_has_arg2 = Query::EventHasStringArg("str");
+  Query event_arg1 =
+      (Query::EventArg("num") == Query::Double(event.arg_numbers["num"]));
+  Query event_arg2 =
+      (Query::EventArg("str") == Query::String(event.arg_strings["str"]));
+  Query event_has_other = Query::EventHasOther();
+  Query other_pid = Query::OtherPidIs(other.thread.process_id);
+  Query other_tid = Query::OtherTidIs(other.thread.thread_id);
+  Query other_time = Query::OtherTimeIs(other.timestamp);
+  Query other_phase = Query::OtherPhaseIs(other.phase);
+  Query other_category = Query::OtherCategoryIs(other.category);
+  Query other_name = Query::OtherNameIs(other.name);
+  Query other_id = Query::OtherIdIs(other.id);
+  Query other_has_arg1 = Query::OtherHasNumberArg("num2");
+  Query other_has_arg2 = Query::OtherHasStringArg("str2");
+  Query other_arg1 =
+      (Query::OtherArg("num2") == Query::Double(other.arg_numbers["num2"]));
+  Query other_arg2 =
+      (Query::OtherArg("str2") == Query::String(other.arg_strings["str2"]));
+
+  EXPECT_TRUE(event_pid.Evaluate(event));
+  EXPECT_TRUE(event_tid.Evaluate(event));
+  EXPECT_TRUE(event_time.Evaluate(event));
+  EXPECT_TRUE(event_duration.Evaluate(event));
+  EXPECT_TRUE(event_phase.Evaluate(event));
+  EXPECT_TRUE(event_category.Evaluate(event));
+  EXPECT_TRUE(event_name.Evaluate(event));
+  EXPECT_TRUE(event_id.Evaluate(event));
+  EXPECT_TRUE(event_has_arg1.Evaluate(event));
+  EXPECT_TRUE(event_has_arg2.Evaluate(event));
+  EXPECT_TRUE(event_arg1.Evaluate(event));
+  EXPECT_TRUE(event_arg2.Evaluate(event));
+  EXPECT_TRUE(event_has_other.Evaluate(event));
+  EXPECT_TRUE(other_pid.Evaluate(event));
+  EXPECT_TRUE(other_tid.Evaluate(event));
+  EXPECT_TRUE(other_time.Evaluate(event));
+  EXPECT_TRUE(other_phase.Evaluate(event));
+  EXPECT_TRUE(other_category.Evaluate(event));
+  EXPECT_TRUE(other_name.Evaluate(event));
+  EXPECT_TRUE(other_id.Evaluate(event));
+  EXPECT_TRUE(other_has_arg1.Evaluate(event));
+  EXPECT_TRUE(other_has_arg2.Evaluate(event));
+  EXPECT_TRUE(other_arg1.Evaluate(event));
+  EXPECT_TRUE(other_arg2.Evaluate(event));
+
+  // Evaluate event queries against other to verify the queries fail when the
+  // event members are wrong.
+  EXPECT_FALSE(event_pid.Evaluate(other));
+  EXPECT_FALSE(event_tid.Evaluate(other));
+  EXPECT_FALSE(event_time.Evaluate(other));
+  EXPECT_FALSE(event_duration.Evaluate(other));
+  EXPECT_FALSE(event_phase.Evaluate(other));
+  EXPECT_FALSE(event_category.Evaluate(other));
+  EXPECT_FALSE(event_name.Evaluate(other));
+  EXPECT_FALSE(event_id.Evaluate(other));
+  EXPECT_FALSE(event_has_arg1.Evaluate(other));
+  EXPECT_FALSE(event_has_arg2.Evaluate(other));
+  EXPECT_FALSE(event_arg1.Evaluate(other));
+  EXPECT_FALSE(event_arg2.Evaluate(other));
+  EXPECT_FALSE(event_has_other.Evaluate(other));
+}
+
+TEST_F(TraceEventAnalyzerTest, BooleanOperators) {
+  ManualSetUp();
+
+  BeginTracing();
+  {
+    TRACE_EVENT_INSTANT1("cat1", "name1", TRACE_EVENT_SCOPE_THREAD, "num", 1);
+    TRACE_EVENT_INSTANT1("cat1", "name2", TRACE_EVENT_SCOPE_THREAD, "num", 2);
+    TRACE_EVENT_INSTANT1("cat2", "name3", TRACE_EVENT_SCOPE_THREAD, "num", 3);
+    TRACE_EVENT_INSTANT1("cat2", "name4", TRACE_EVENT_SCOPE_THREAD, "num", 4);
+  }
+  EndTracing();
+
+  std::unique_ptr<TraceAnalyzer> analyzer(
+      TraceAnalyzer::Create(output_.json_output));
+  ASSERT_TRUE(analyzer);
+  analyzer->SetIgnoreMetadataEvents(true);
+
+  TraceEventVector found;
+
+  // ==
+
+  analyzer->FindEvents(Query::EventCategory() == Query::String("cat1"), &found);
+  ASSERT_EQ(2u, found.size());
+  EXPECT_STREQ("name1", found[0]->name.c_str());
+  EXPECT_STREQ("name2", found[1]->name.c_str());
+
+  analyzer->FindEvents(Query::EventArg("num") == Query::Int(2), &found);
+  ASSERT_EQ(1u, found.size());
+  EXPECT_STREQ("name2", found[0]->name.c_str());
+
+  // !=
+
+  analyzer->FindEvents(Query::EventCategory() != Query::String("cat1"), &found);
+  ASSERT_EQ(2u, found.size());
+  EXPECT_STREQ("name3", found[0]->name.c_str());
+  EXPECT_STREQ("name4", found[1]->name.c_str());
+
+  analyzer->FindEvents(Query::EventArg("num") != Query::Int(2), &found);
+  ASSERT_EQ(3u, found.size());
+  EXPECT_STREQ("name1", found[0]->name.c_str());
+  EXPECT_STREQ("name3", found[1]->name.c_str());
+  EXPECT_STREQ("name4", found[2]->name.c_str());
+
+  // <
+  analyzer->FindEvents(Query::EventArg("num") < Query::Int(2), &found);
+  ASSERT_EQ(1u, found.size());
+  EXPECT_STREQ("name1", found[0]->name.c_str());
+
+  // <=
+  analyzer->FindEvents(Query::EventArg("num") <= Query::Int(2), &found);
+  ASSERT_EQ(2u, found.size());
+  EXPECT_STREQ("name1", found[0]->name.c_str());
+  EXPECT_STREQ("name2", found[1]->name.c_str());
+
+  // >
+  analyzer->FindEvents(Query::EventArg("num") > Query::Int(3), &found);
+  ASSERT_EQ(1u, found.size());
+  EXPECT_STREQ("name4", found[0]->name.c_str());
+
+  // >=
+  analyzer->FindEvents(Query::EventArg("num") >= Query::Int(4), &found);
+  ASSERT_EQ(1u, found.size());
+  EXPECT_STREQ("name4", found[0]->name.c_str());
+
+  // &&
+  analyzer->FindEvents(Query::EventName() != Query::String("name1") &&
+                       Query::EventArg("num") < Query::Int(3), &found);
+  ASSERT_EQ(1u, found.size());
+  EXPECT_STREQ("name2", found[0]->name.c_str());
+
+  // ||
+  analyzer->FindEvents(Query::EventName() == Query::String("name1") ||
+                       Query::EventArg("num") == Query::Int(3), &found);
+  ASSERT_EQ(2u, found.size());
+  EXPECT_STREQ("name1", found[0]->name.c_str());
+  EXPECT_STREQ("name3", found[1]->name.c_str());
+
+  // !
+  analyzer->FindEvents(!(Query::EventName() == Query::String("name1") ||
+                         Query::EventArg("num") == Query::Int(3)), &found);
+  ASSERT_EQ(2u, found.size());
+  EXPECT_STREQ("name2", found[0]->name.c_str());
+  EXPECT_STREQ("name4", found[1]->name.c_str());
+}
+
+TEST_F(TraceEventAnalyzerTest, ArithmeticOperators) {
+  ManualSetUp();
+
+  BeginTracing();
+  {
+    // These events are searched for:
+    TRACE_EVENT_INSTANT2("cat1", "math1", TRACE_EVENT_SCOPE_THREAD,
+                         "a", 10, "b", 5);
+    TRACE_EVENT_INSTANT2("cat1", "math2", TRACE_EVENT_SCOPE_THREAD,
+                         "a", 10, "b", 10);
+    // Extra events that never match, for noise:
+    TRACE_EVENT_INSTANT2("noise", "math3", TRACE_EVENT_SCOPE_THREAD,
+                         "a", 1,  "b", 3);
+    TRACE_EVENT_INSTANT2("noise", "math4", TRACE_EVENT_SCOPE_THREAD,
+                         "c", 10, "d", 5);
+  }
+  EndTracing();
+
+  std::unique_ptr<TraceAnalyzer> analyzer(
+      TraceAnalyzer::Create(output_.json_output));
+  ASSERT_TRUE(analyzer.get());
+
+  TraceEventVector found;
+
+  // Verify that arithmetic operators function:
+
+  // +
+  analyzer->FindEvents(Query::EventArg("a") + Query::EventArg("b") ==
+                       Query::Int(20), &found);
+  EXPECT_EQ(1u, found.size());
+  EXPECT_STREQ("math2", found.front()->name.c_str());
+
+  // -
+  analyzer->FindEvents(Query::EventArg("a") - Query::EventArg("b") ==
+                       Query::Int(5), &found);
+  EXPECT_EQ(1u, found.size());
+  EXPECT_STREQ("math1", found.front()->name.c_str());
+
+  // *
+  analyzer->FindEvents(Query::EventArg("a") * Query::EventArg("b") ==
+                       Query::Int(50), &found);
+  EXPECT_EQ(1u, found.size());
+  EXPECT_STREQ("math1", found.front()->name.c_str());
+
+  // /
+  analyzer->FindEvents(Query::EventArg("a") / Query::EventArg("b") ==
+                       Query::Int(2), &found);
+  EXPECT_EQ(1u, found.size());
+  EXPECT_STREQ("math1", found.front()->name.c_str());
+
+  // %
+  analyzer->FindEvents(Query::EventArg("a") % Query::EventArg("b") ==
+                       Query::Int(0), &found);
+  EXPECT_EQ(2u, found.size());
+
+  // - (negate)
+  analyzer->FindEvents(-Query::EventArg("b") == Query::Int(-10), &found);
+  EXPECT_EQ(1u, found.size());
+  EXPECT_STREQ("math2", found.front()->name.c_str());
+}
+
+TEST_F(TraceEventAnalyzerTest, StringPattern) {
+  ManualSetUp();
+
+  BeginTracing();
+  {
+    TRACE_EVENT_INSTANT0("cat1", "name1", TRACE_EVENT_SCOPE_THREAD);
+    TRACE_EVENT_INSTANT0("cat1", "name2", TRACE_EVENT_SCOPE_THREAD);
+    TRACE_EVENT_INSTANT0("cat1", "no match", TRACE_EVENT_SCOPE_THREAD);
+    TRACE_EVENT_INSTANT0("cat1", "name3x", TRACE_EVENT_SCOPE_THREAD);
+  }
+  EndTracing();
+
+  std::unique_ptr<TraceAnalyzer> analyzer(
+      TraceAnalyzer::Create(output_.json_output));
+  ASSERT_TRUE(analyzer.get());
+  analyzer->SetIgnoreMetadataEvents(true);
+
+  TraceEventVector found;
+
+  analyzer->FindEvents(Query::EventName() == Query::Pattern("name?"), &found);
+  ASSERT_EQ(2u, found.size());
+  EXPECT_STREQ("name1", found[0]->name.c_str());
+  EXPECT_STREQ("name2", found[1]->name.c_str());
+
+  analyzer->FindEvents(Query::EventName() == Query::Pattern("name*"), &found);
+  ASSERT_EQ(3u, found.size());
+  EXPECT_STREQ("name1", found[0]->name.c_str());
+  EXPECT_STREQ("name2", found[1]->name.c_str());
+  EXPECT_STREQ("name3x", found[2]->name.c_str());
+
+  analyzer->FindEvents(Query::EventName() != Query::Pattern("name*"), &found);
+  ASSERT_EQ(1u, found.size());
+  EXPECT_STREQ("no match", found[0]->name.c_str());
+}
+
+// Test that duration queries work.
+TEST_F(TraceEventAnalyzerTest, BeginEndDuration) {
+  ManualSetUp();
+
+  const base::TimeDelta kSleepTime = base::TimeDelta::FromMilliseconds(200);
+  // We will search for events that have a duration of greater than 90% of the
+  // sleep time, so that there is no flakiness.
+  int64_t duration_cutoff_us = (kSleepTime.InMicroseconds() * 9) / 10;
+
+  BeginTracing();
+  {
+    TRACE_EVENT_BEGIN0("cat1", "name1"); // found by duration query
+    TRACE_EVENT_BEGIN0("noise", "name2"); // not searched for, just noise
+    {
+      TRACE_EVENT_BEGIN0("cat2", "name3"); // found by duration query
+      // next event not searched for, just noise
+      TRACE_EVENT_INSTANT0("noise", "name4", TRACE_EVENT_SCOPE_THREAD);
+      base::PlatformThread::Sleep(kSleepTime);
+      TRACE_EVENT_BEGIN0("cat2", "name5"); // not found (duration too short)
+      TRACE_EVENT_END0("cat2", "name5"); // not found (duration too short)
+      TRACE_EVENT_END0("cat2", "name3"); // found by duration query
+    }
+    TRACE_EVENT_END0("noise", "name2"); // not searched for, just noise
+    TRACE_EVENT_END0("cat1", "name1"); // found by duration query
+  }
+  EndTracing();
+
+  std::unique_ptr<TraceAnalyzer> analyzer(
+      TraceAnalyzer::Create(output_.json_output));
+  ASSERT_TRUE(analyzer.get());
+  analyzer->AssociateBeginEndEvents();
+
+  TraceEventVector found;
+  analyzer->FindEvents(
+      Query::MatchBeginWithEnd() &&
+      Query::EventDuration() >
+          Query::Int(static_cast<int>(duration_cutoff_us)) &&
+      (Query::EventCategory() == Query::String("cat1") ||
+       Query::EventCategory() == Query::String("cat2") ||
+       Query::EventCategory() == Query::String("cat3")),
+      &found);
+  ASSERT_EQ(2u, found.size());
+  EXPECT_STREQ("name1", found[0]->name.c_str());
+  EXPECT_STREQ("name3", found[1]->name.c_str());
+}
+
+// Test that duration queries work.
+TEST_F(TraceEventAnalyzerTest, CompleteDuration) {
+  ManualSetUp();
+
+  const base::TimeDelta kSleepTime = base::TimeDelta::FromMilliseconds(200);
+  // We will search for events that have a duration of greater than 90% of the
+  // sleep time, so that there is no flakiness.
+  int64_t duration_cutoff_us = (kSleepTime.InMicroseconds() * 9) / 10;
+
+  BeginTracing();
+  {
+    TRACE_EVENT0("cat1", "name1"); // found by duration query
+    TRACE_EVENT0("noise", "name2"); // not searched for, just noise
+    {
+      TRACE_EVENT0("cat2", "name3"); // found by duration query
+      // next event not searched for, just noise
+      TRACE_EVENT_INSTANT0("noise", "name4", TRACE_EVENT_SCOPE_THREAD);
+      base::PlatformThread::Sleep(kSleepTime);
+      TRACE_EVENT0("cat2", "name5"); // not found (duration too short)
+    }
+  }
+  EndTracing();
+
+  std::unique_ptr<TraceAnalyzer> analyzer(
+      TraceAnalyzer::Create(output_.json_output));
+  ASSERT_TRUE(analyzer.get());
+  analyzer->AssociateBeginEndEvents();
+
+  TraceEventVector found;
+  analyzer->FindEvents(
+      Query::EventCompleteDuration() >
+          Query::Int(static_cast<int>(duration_cutoff_us)) &&
+      (Query::EventCategory() == Query::String("cat1") ||
+       Query::EventCategory() == Query::String("cat2") ||
+       Query::EventCategory() == Query::String("cat3")),
+      &found);
+  ASSERT_EQ(2u, found.size());
+  EXPECT_STREQ("name1", found[0]->name.c_str());
+  EXPECT_STREQ("name3", found[1]->name.c_str());
+}
+
+// Test AssociateBeginEndEvents
+TEST_F(TraceEventAnalyzerTest, BeginEndAssocations) {
+  ManualSetUp();
+
+  BeginTracing();
+  {
+    TRACE_EVENT_END0("cat1", "name1"); // does not match out of order begin
+    TRACE_EVENT_BEGIN0("cat1", "name2");
+    TRACE_EVENT_INSTANT0("cat1", "name3", TRACE_EVENT_SCOPE_THREAD);
+    TRACE_EVENT_BEGIN0("cat1", "name1");
+    TRACE_EVENT_END0("cat1", "name2");
+  }
+  EndTracing();
+
+  std::unique_ptr<TraceAnalyzer> analyzer(
+      TraceAnalyzer::Create(output_.json_output));
+  ASSERT_TRUE(analyzer.get());
+  analyzer->AssociateBeginEndEvents();
+
+  TraceEventVector found;
+  analyzer->FindEvents(Query::MatchBeginWithEnd(), &found);
+  ASSERT_EQ(1u, found.size());
+  EXPECT_STREQ("name2", found[0]->name.c_str());
+}
+
+// Test MergeAssociatedEventArgs
+TEST_F(TraceEventAnalyzerTest, MergeAssociatedEventArgs) {
+  ManualSetUp();
+
+  const char arg_string[] = "arg_string";
+  BeginTracing();
+  {
+    TRACE_EVENT_BEGIN0("cat1", "name1");
+    TRACE_EVENT_END1("cat1", "name1", "arg", arg_string);
+  }
+  EndTracing();
+
+  std::unique_ptr<TraceAnalyzer> analyzer(
+      TraceAnalyzer::Create(output_.json_output));
+  ASSERT_TRUE(analyzer.get());
+  analyzer->AssociateBeginEndEvents();
+
+  TraceEventVector found;
+  analyzer->FindEvents(Query::MatchBeginName("name1"), &found);
+  ASSERT_EQ(1u, found.size());
+  std::string arg_actual;
+  EXPECT_FALSE(found[0]->GetArgAsString("arg", &arg_actual));
+
+  analyzer->MergeAssociatedEventArgs();
+  EXPECT_TRUE(found[0]->GetArgAsString("arg", &arg_actual));
+  EXPECT_STREQ(arg_string, arg_actual.c_str());
+}
+
+// Test AssociateAsyncBeginEndEvents
+TEST_F(TraceEventAnalyzerTest, AsyncBeginEndAssocations) {
+  ManualSetUp();
+
+  BeginTracing();
+  {
+    TRACE_EVENT_ASYNC_END0("cat1", "name1", 0xA); // no match / out of order
+    TRACE_EVENT_ASYNC_BEGIN0("cat1", "name1", 0xB);
+    TRACE_EVENT_ASYNC_BEGIN0("cat1", "name1", 0xC);
+    TRACE_EVENT_INSTANT0("cat1", "name1", TRACE_EVENT_SCOPE_THREAD); // noise
+    TRACE_EVENT0("cat1", "name1"); // noise
+    TRACE_EVENT_ASYNC_END0("cat1", "name1", 0xB);
+    TRACE_EVENT_ASYNC_END0("cat1", "name1", 0xC);
+    TRACE_EVENT_ASYNC_BEGIN0("cat1", "name1", 0xA); // no match / out of order
+  }
+  EndTracing();
+
+  std::unique_ptr<TraceAnalyzer> analyzer(
+      TraceAnalyzer::Create(output_.json_output));
+  ASSERT_TRUE(analyzer.get());
+  analyzer->AssociateAsyncBeginEndEvents();
+
+  TraceEventVector found;
+  analyzer->FindEvents(Query::MatchAsyncBeginWithNext(), &found);
+  ASSERT_EQ(2u, found.size());
+  EXPECT_STRCASEEQ("0xb", found[0]->id.c_str());
+  EXPECT_STRCASEEQ("0xc", found[1]->id.c_str());
+}
+
+// Test AssociateAsyncBeginEndEvents
+TEST_F(TraceEventAnalyzerTest, AsyncBeginEndAssocationsWithSteps) {
+  ManualSetUp();
+
+  BeginTracing();
+  {
+    TRACE_EVENT_ASYNC_STEP_INTO0("c", "n", 0xA, "s1");
+    TRACE_EVENT_ASYNC_END0("c", "n", 0xA);
+    TRACE_EVENT_ASYNC_BEGIN0("c", "n", 0xB);
+    TRACE_EVENT_ASYNC_BEGIN0("c", "n", 0xC);
+    TRACE_EVENT_ASYNC_STEP_PAST0("c", "n", 0xB, "s1");
+    TRACE_EVENT_ASYNC_STEP_INTO0("c", "n", 0xC, "s1");
+    TRACE_EVENT_ASYNC_STEP_INTO1("c", "n", 0xC, "s2", "a", 1);
+    TRACE_EVENT_ASYNC_END0("c", "n", 0xB);
+    TRACE_EVENT_ASYNC_END0("c", "n", 0xC);
+    TRACE_EVENT_ASYNC_BEGIN0("c", "n", 0xA);
+    TRACE_EVENT_ASYNC_STEP_INTO0("c", "n", 0xA, "s2");
+  }
+  EndTracing();
+
+  std::unique_ptr<TraceAnalyzer> analyzer(
+      TraceAnalyzer::Create(output_.json_output));
+  ASSERT_TRUE(analyzer.get());
+  analyzer->AssociateAsyncBeginEndEvents();
+
+  TraceEventVector found;
+  analyzer->FindEvents(Query::MatchAsyncBeginWithNext(), &found);
+  ASSERT_EQ(3u, found.size());
+
+  EXPECT_STRCASEEQ("0xb", found[0]->id.c_str());
+  EXPECT_EQ(TRACE_EVENT_PHASE_ASYNC_STEP_PAST, found[0]->other_event->phase);
+  EXPECT_EQ(found[0], found[0]->other_event->prev_event);
+  EXPECT_TRUE(found[0]->other_event->other_event);
+  EXPECT_EQ(TRACE_EVENT_PHASE_ASYNC_END,
+            found[0]->other_event->other_event->phase);
+  EXPECT_EQ(found[0]->other_event,
+            found[0]->other_event->other_event->prev_event);
+
+  EXPECT_STRCASEEQ("0xc", found[1]->id.c_str());
+  EXPECT_EQ(TRACE_EVENT_PHASE_ASYNC_STEP_INTO, found[1]->other_event->phase);
+  EXPECT_EQ(found[1], found[1]->other_event->prev_event);
+  EXPECT_TRUE(found[1]->other_event->other_event);
+  EXPECT_EQ(TRACE_EVENT_PHASE_ASYNC_STEP_INTO,
+            found[1]->other_event->other_event->phase);
+  EXPECT_EQ(found[1]->other_event,
+            found[1]->other_event->other_event->prev_event);
+  double arg_actual = 0;
+  EXPECT_TRUE(found[1]->other_event->other_event->GetArgAsNumber(
+                  "a", &arg_actual));
+  EXPECT_EQ(1.0, arg_actual);
+  EXPECT_TRUE(found[1]->other_event->other_event->other_event);
+  EXPECT_EQ(TRACE_EVENT_PHASE_ASYNC_END,
+            found[1]->other_event->other_event->other_event->phase);
+
+  EXPECT_STRCASEEQ("0xa", found[2]->id.c_str());
+  EXPECT_EQ(TRACE_EVENT_PHASE_ASYNC_STEP_INTO, found[2]->other_event->phase);
+}
+
+// Test that the TraceAnalyzer custom associations work.
+TEST_F(TraceEventAnalyzerTest, CustomAssociations) {
+  ManualSetUp();
+
+  // Add events that begin/end in pipelined ordering with unique ID parameter
+  // to match up the begin/end pairs.
+  BeginTracing();
+  {
+    // no begin match
+    TRACE_EVENT_INSTANT1("cat1", "end", TRACE_EVENT_SCOPE_THREAD, "id", 1);
+    // end is cat4
+    TRACE_EVENT_INSTANT1("cat2", "begin", TRACE_EVENT_SCOPE_THREAD, "id", 2);
+    // end is cat5
+    TRACE_EVENT_INSTANT1("cat3", "begin", TRACE_EVENT_SCOPE_THREAD, "id", 3);
+    TRACE_EVENT_INSTANT1("cat4", "end", TRACE_EVENT_SCOPE_THREAD, "id", 2);
+    TRACE_EVENT_INSTANT1("cat5", "end", TRACE_EVENT_SCOPE_THREAD, "id", 3);
+    // no end match
+    TRACE_EVENT_INSTANT1("cat6", "begin", TRACE_EVENT_SCOPE_THREAD, "id", 1);
+  }
+  EndTracing();
+
+  std::unique_ptr<TraceAnalyzer> analyzer(
+      TraceAnalyzer::Create(output_.json_output));
+  ASSERT_TRUE(analyzer.get());
+
+  // begin, end, and match queries to find proper begin/end pairs.
+  Query begin(Query::EventName() == Query::String("begin"));
+  Query end(Query::EventName() == Query::String("end"));
+  Query match(Query::EventArg("id") == Query::OtherArg("id"));
+  analyzer->AssociateEvents(begin, end, match);
+
+  TraceEventVector found;
+
+  // cat1 has no other_event.
+  analyzer->FindEvents(Query::EventCategory() == Query::String("cat1") &&
+                       Query::EventHasOther(), &found);
+  EXPECT_EQ(0u, found.size());
+
+  // cat1 has no other_event.
+  analyzer->FindEvents(Query::EventCategory() == Query::String("cat1") &&
+                       !Query::EventHasOther(), &found);
+  EXPECT_EQ(1u, found.size());
+
+  // cat6 has no other_event.
+  analyzer->FindEvents(Query::EventCategory() == Query::String("cat6") &&
+                       !Query::EventHasOther(), &found);
+  EXPECT_EQ(1u, found.size());
+
+  // cat2 and cat4 are associated.
+  analyzer->FindEvents(Query::EventCategory() == Query::String("cat2") &&
+                       Query::OtherCategory() == Query::String("cat4"), &found);
+  EXPECT_EQ(1u, found.size());
+
+  // cat4 and cat2 are not associated.
+  analyzer->FindEvents(Query::EventCategory() == Query::String("cat4") &&
+                       Query::OtherCategory() == Query::String("cat2"), &found);
+  EXPECT_EQ(0u, found.size());
+
+  // cat3 and cat5 are associated.
+  analyzer->FindEvents(Query::EventCategory() == Query::String("cat3") &&
+                       Query::OtherCategory() == Query::String("cat5"), &found);
+  EXPECT_EQ(1u, found.size());
+
+  // cat5 and cat3 are not associated.
+  analyzer->FindEvents(Query::EventCategory() == Query::String("cat5") &&
+                       Query::OtherCategory() == Query::String("cat3"), &found);
+  EXPECT_EQ(0u, found.size());
+}
+
+// Verify that Query literals and types are properly casted.
+TEST_F(TraceEventAnalyzerTest, Literals) {
+  ManualSetUp();
+
+  // Since these queries don't refer to the event data, the dummy event below
+  // will never be accessed.
+  TraceEvent dummy;
+  char char_num = 5;
+  short short_num = -5;
+  EXPECT_TRUE((Query::Double(5.0) == Query::Int(char_num)).Evaluate(dummy));
+  EXPECT_TRUE((Query::Double(-5.0) == Query::Int(short_num)).Evaluate(dummy));
+  EXPECT_TRUE((Query::Double(1.0) == Query::Uint(1u)).Evaluate(dummy));
+  EXPECT_TRUE((Query::Double(1.0) == Query::Int(1)).Evaluate(dummy));
+  EXPECT_TRUE((Query::Double(-1.0) == Query::Int(-1)).Evaluate(dummy));
+  EXPECT_TRUE((Query::Double(1.0) == Query::Double(1.0f)).Evaluate(dummy));
+  EXPECT_TRUE((Query::Bool(true) == Query::Int(1)).Evaluate(dummy));
+  EXPECT_TRUE((Query::Bool(false) == Query::Int(0)).Evaluate(dummy));
+  EXPECT_TRUE((Query::Bool(true) == Query::Double(1.0f)).Evaluate(dummy));
+  EXPECT_TRUE((Query::Bool(false) == Query::Double(0.0f)).Evaluate(dummy));
+}
+
+// Test GetRateStats.
+TEST_F(TraceEventAnalyzerTest, RateStats) {
+  std::vector<TraceEvent> events;
+  events.reserve(100);
+  TraceEventVector event_ptrs;
+  double timestamp = 0.0;
+  double little_delta = 1.0;
+  double big_delta = 10.0;
+  double tiny_delta = 0.1;
+  RateStats stats;
+  RateStatsOptions options;
+
+  // Insert 10 events, each apart by little_delta.
+  for (int i = 0; i < 10; ++i) {
+    timestamp += little_delta;
+    TraceEvent event;
+    event.timestamp = timestamp;
+    events.push_back(std::move(event));
+    event_ptrs.push_back(&events.back());
+  }
+
+  ASSERT_TRUE(GetRateStats(event_ptrs, &stats, nullptr));
+  EXPECT_EQ(little_delta, stats.mean_us);
+  EXPECT_EQ(little_delta, stats.min_us);
+  EXPECT_EQ(little_delta, stats.max_us);
+  EXPECT_EQ(0.0, stats.standard_deviation_us);
+
+  // Add an event apart by big_delta.
+  {
+    timestamp += big_delta;
+    TraceEvent event;
+    event.timestamp = timestamp;
+    events.push_back(std::move(event));
+    event_ptrs.push_back(&events.back());
+  }
+
+  ASSERT_TRUE(GetRateStats(event_ptrs, &stats, nullptr));
+  EXPECT_LT(little_delta, stats.mean_us);
+  EXPECT_EQ(little_delta, stats.min_us);
+  EXPECT_EQ(big_delta, stats.max_us);
+  EXPECT_LT(0.0, stats.standard_deviation_us);
+
+  // Trim off the biggest delta and verify stats.
+  options.trim_min = 0;
+  options.trim_max = 1;
+  ASSERT_TRUE(GetRateStats(event_ptrs, &stats, &options));
+  EXPECT_EQ(little_delta, stats.mean_us);
+  EXPECT_EQ(little_delta, stats.min_us);
+  EXPECT_EQ(little_delta, stats.max_us);
+  EXPECT_EQ(0.0, stats.standard_deviation_us);
+
+  // Add an event apart by tiny_delta.
+  {
+    timestamp += tiny_delta;
+    TraceEvent event;
+    event.timestamp = timestamp;
+    events.push_back(std::move(event));
+    event_ptrs.push_back(&events.back());
+  }
+
+  // Trim off both the biggest and tiniest delta and verify stats.
+  options.trim_min = 1;
+  options.trim_max = 1;
+  ASSERT_TRUE(GetRateStats(event_ptrs, &stats, &options));
+  EXPECT_EQ(little_delta, stats.mean_us);
+  EXPECT_EQ(little_delta, stats.min_us);
+  EXPECT_EQ(little_delta, stats.max_us);
+  EXPECT_EQ(0.0, stats.standard_deviation_us);
+
+  // Verify smallest allowed number of events.
+  {
+    TraceEvent event;
+    TraceEventVector few_event_ptrs;
+    few_event_ptrs.push_back(&event);
+    few_event_ptrs.push_back(&event);
+    ASSERT_FALSE(GetRateStats(few_event_ptrs, &stats, nullptr));
+    few_event_ptrs.push_back(&event);
+    ASSERT_TRUE(GetRateStats(few_event_ptrs, &stats, nullptr));
+
+    // Trim off more than allowed and verify failure.
+    options.trim_min = 0;
+    options.trim_max = 1;
+    ASSERT_FALSE(GetRateStats(few_event_ptrs, &stats, &options));
+  }
+}
+
+// Test FindFirstOf and FindLastOf.
+TEST_F(TraceEventAnalyzerTest, FindOf) {
+  size_t num_events = 100;
+  size_t index = 0;
+  TraceEventVector event_ptrs;
+  EXPECT_FALSE(FindFirstOf(event_ptrs, Query::Bool(true), 0, &index));
+  EXPECT_FALSE(FindFirstOf(event_ptrs, Query::Bool(true), 10, &index));
+  EXPECT_FALSE(FindLastOf(event_ptrs, Query::Bool(true), 0, &index));
+  EXPECT_FALSE(FindLastOf(event_ptrs, Query::Bool(true), 10, &index));
+
+  std::vector<TraceEvent> events;
+  events.resize(num_events);
+  for (size_t i = 0; i < events.size(); ++i)
+    event_ptrs.push_back(&events[i]);
+  size_t bam_index = num_events/2;
+  events[bam_index].name = "bam";
+  Query query_bam = Query::EventName() == Query::String(events[bam_index].name);
+
+  // FindFirstOf
+  EXPECT_FALSE(FindFirstOf(event_ptrs, Query::Bool(false), 0, &index));
+  EXPECT_TRUE(FindFirstOf(event_ptrs, Query::Bool(true), 0, &index));
+  EXPECT_EQ(0u, index);
+  EXPECT_TRUE(FindFirstOf(event_ptrs, Query::Bool(true), 5, &index));
+  EXPECT_EQ(5u, index);
+
+  EXPECT_FALSE(FindFirstOf(event_ptrs, query_bam, bam_index + 1, &index));
+  EXPECT_TRUE(FindFirstOf(event_ptrs, query_bam, 0, &index));
+  EXPECT_EQ(bam_index, index);
+  EXPECT_TRUE(FindFirstOf(event_ptrs, query_bam, bam_index, &index));
+  EXPECT_EQ(bam_index, index);
+
+  // FindLastOf
+  EXPECT_FALSE(FindLastOf(event_ptrs, Query::Bool(false), 1000, &index));
+  EXPECT_TRUE(FindLastOf(event_ptrs, Query::Bool(true), 1000, &index));
+  EXPECT_EQ(num_events - 1, index);
+  EXPECT_TRUE(FindLastOf(event_ptrs, Query::Bool(true), num_events - 5,
+                         &index));
+  EXPECT_EQ(num_events - 5, index);
+
+  EXPECT_FALSE(FindLastOf(event_ptrs, query_bam, bam_index - 1, &index));
+  EXPECT_TRUE(FindLastOf(event_ptrs, query_bam, num_events, &index));
+  EXPECT_EQ(bam_index, index);
+  EXPECT_TRUE(FindLastOf(event_ptrs, query_bam, bam_index, &index));
+  EXPECT_EQ(bam_index, index);
+}
+
+// Test FindClosest.
+TEST_F(TraceEventAnalyzerTest, FindClosest) {
+  size_t index_1 = 0;
+  size_t index_2 = 0;
+  TraceEventVector event_ptrs;
+  EXPECT_FALSE(FindClosest(event_ptrs, Query::Bool(true), 0,
+                           &index_1, &index_2));
+
+  size_t num_events = 5;
+  std::vector<TraceEvent> events;
+  events.resize(num_events);
+  for (size_t i = 0; i < events.size(); ++i) {
+    // timestamps go up exponentially so the lower index is always closer in
+    // time than the higher index.
+    events[i].timestamp = static_cast<double>(i) * static_cast<double>(i);
+    event_ptrs.push_back(&events[i]);
+  }
+  events[0].name = "one";
+  events[2].name = "two";
+  events[4].name = "three";
+  Query query_named = Query::EventName() != Query::String(std::string());
+  Query query_one = Query::EventName() == Query::String("one");
+
+  // Only one event matches query_one, so two closest can't be found.
+  EXPECT_FALSE(FindClosest(event_ptrs, query_one, 0, &index_1, &index_2));
+
+  EXPECT_TRUE(FindClosest(event_ptrs, query_one, 3, &index_1, nullptr));
+  EXPECT_EQ(0u, index_1);
+
+  EXPECT_TRUE(FindClosest(event_ptrs, query_named, 1, &index_1, &index_2));
+  EXPECT_EQ(0u, index_1);
+  EXPECT_EQ(2u, index_2);
+
+  EXPECT_TRUE(FindClosest(event_ptrs, query_named, 4, &index_1, &index_2));
+  EXPECT_EQ(4u, index_1);
+  EXPECT_EQ(2u, index_2);
+
+  EXPECT_TRUE(FindClosest(event_ptrs, query_named, 3, &index_1, &index_2));
+  EXPECT_EQ(2u, index_1);
+  EXPECT_EQ(0u, index_2);
+}
+
+// Test CountMatches.
+TEST_F(TraceEventAnalyzerTest, CountMatches) {
+  TraceEventVector event_ptrs;
+  EXPECT_EQ(0u, CountMatches(event_ptrs, Query::Bool(true), 0, 10));
+
+  size_t num_events = 5;
+  size_t num_named = 3;
+  std::vector<TraceEvent> events;
+  events.resize(num_events);
+  for (size_t i = 0; i < events.size(); ++i)
+    event_ptrs.push_back(&events[i]);
+  events[0].name = "one";
+  events[2].name = "two";
+  events[4].name = "three";
+  Query query_named = Query::EventName() != Query::String(std::string());
+  Query query_one = Query::EventName() == Query::String("one");
+
+  EXPECT_EQ(0u, CountMatches(event_ptrs, Query::Bool(false)));
+  EXPECT_EQ(num_events, CountMatches(event_ptrs, Query::Bool(true)));
+  EXPECT_EQ(num_events - 1, CountMatches(event_ptrs, Query::Bool(true),
+                                         1, num_events));
+  EXPECT_EQ(1u, CountMatches(event_ptrs, query_one));
+  EXPECT_EQ(num_events - 1, CountMatches(event_ptrs, !query_one));
+  EXPECT_EQ(num_named, CountMatches(event_ptrs, query_named));
+}
+
+TEST_F(TraceEventAnalyzerTest, ComplexArgument) {
+  ManualSetUp();
+
+  BeginTracing();
+  {
+    std::unique_ptr<base::trace_event::TracedValue> value(
+        new base::trace_event::TracedValue);
+    value->SetString("property", "value");
+    TRACE_EVENT1("cat", "name", "arg", std::move(value));
+  }
+  EndTracing();
+
+  std::unique_ptr<TraceAnalyzer> analyzer(
+      TraceAnalyzer::Create(output_.json_output));
+  ASSERT_TRUE(analyzer.get());
+
+  TraceEventVector events;
+  analyzer->FindEvents(Query::EventName() == Query::String("name"), &events);
+
+  EXPECT_EQ(1u, events.size());
+  EXPECT_EQ("cat", events[0]->category);
+  EXPECT_EQ("name", events[0]->name);
+  EXPECT_TRUE(events[0]->HasArg("arg"));
+
+  std::unique_ptr<base::Value> arg;
+  events[0]->GetArgAsValue("arg", &arg);
+  base::DictionaryValue* arg_dict;
+  EXPECT_TRUE(arg->GetAsDictionary(&arg_dict));
+  std::string property;
+  EXPECT_TRUE(arg_dict->GetString("property", &property));
+  EXPECT_EQ("value", property);
+}
+
+}  // namespace trace_analyzer
diff --git a/base/test/trace_to_file.cc b/base/test/trace_to_file.cc
new file mode 100644
index 0000000..17aa80b
--- /dev/null
+++ b/base/test/trace_to_file.cc
@@ -0,0 +1,106 @@
+// Copyright (c) 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/test/trace_to_file.h"
+
+#include "base/base_switches.h"
+#include "base/bind.h"
+#include "base/command_line.h"
+#include "base/files/file_util.h"
+#include "base/memory/ref_counted_memory.h"
+#include "base/run_loop.h"
+#include "base/trace_event/trace_buffer.h"
+#include "base/trace_event/trace_log.h"
+
+namespace base {
+namespace test {
+
+TraceToFile::TraceToFile() : started_(false) {
+}
+
+TraceToFile::~TraceToFile() {
+  EndTracingIfNeeded();
+}
+
+void TraceToFile::BeginTracingFromCommandLineOptions() {
+  DCHECK(CommandLine::InitializedForCurrentProcess());
+  DCHECK(!started_);
+
+  if (!CommandLine::ForCurrentProcess()->HasSwitch(switches::kTraceToFile))
+    return;
+
+  // Empty filter (i.e. just --trace-to-file) turns into default categories in
+  // TraceEventImpl
+  std::string filter = CommandLine::ForCurrentProcess()->GetSwitchValueASCII(
+      switches::kTraceToFile);
+
+  FilePath path;
+  if (CommandLine::ForCurrentProcess()->HasSwitch(switches::kTraceToFileName)) {
+    path = FilePath(CommandLine::ForCurrentProcess()
+                        ->GetSwitchValuePath(switches::kTraceToFileName));
+  } else {
+    path = FilePath(FILE_PATH_LITERAL("trace.json"));
+  }
+
+  BeginTracing(path, filter);
+}
+
+void TraceToFile::BeginTracing(const FilePath& path,
+                               const std::string& categories) {
+  DCHECK(!started_);
+  started_ = true;
+  path_ = path;
+  WriteFileHeader();
+
+  trace_event::TraceLog::GetInstance()->SetEnabled(
+      trace_event::TraceConfig(categories, trace_event::RECORD_UNTIL_FULL),
+      trace_event::TraceLog::RECORDING_MODE);
+}
+
+void TraceToFile::WriteFileHeader() {
+  const char str[] = "{\"traceEvents\": [";
+  WriteFile(path_, str, static_cast<int>(strlen(str)));
+}
+
+void TraceToFile::AppendFileFooter() {
+  const char str[] = "]}";
+  AppendToFile(path_, str, static_cast<int>(strlen(str)));
+}
+
+void TraceToFile::TraceOutputCallback(const std::string& data) {
+  bool ret = AppendToFile(path_, data.c_str(), static_cast<int>(data.size()));
+  DCHECK(ret);
+}
+
+static void OnTraceDataCollected(
+    Closure quit_closure,
+    trace_event::TraceResultBuffer* buffer,
+    const scoped_refptr<RefCountedString>& json_events_str,
+    bool has_more_events) {
+  buffer->AddFragment(json_events_str->data());
+  if (!has_more_events)
+    quit_closure.Run();
+}
+
+void TraceToFile::EndTracingIfNeeded() {
+  if (!started_)
+    return;
+  started_ = false;
+
+  trace_event::TraceLog::GetInstance()->SetDisabled();
+
+  trace_event::TraceResultBuffer buffer;
+  buffer.SetOutputCallback(
+      Bind(&TraceToFile::TraceOutputCallback, Unretained(this)));
+
+  RunLoop run_loop;
+  trace_event::TraceLog::GetInstance()->Flush(
+      Bind(&OnTraceDataCollected, run_loop.QuitClosure(), Unretained(&buffer)));
+  run_loop.Run();
+
+  AppendFileFooter();
+}
+
+}  // namespace test
+}  // namespace base
diff --git a/base/test/trace_to_file.h b/base/test/trace_to_file.h
new file mode 100644
index 0000000..4308736
--- /dev/null
+++ b/base/test/trace_to_file.h
@@ -0,0 +1,35 @@
+// Copyright (c) 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TEST_TRACE_TO_FILE_H_
+#define BASE_TEST_TRACE_TO_FILE_H_
+
+#include "base/files/file_path.h"
+
+namespace base {
+namespace test {
+
+class TraceToFile {
+ public:
+  TraceToFile();
+  ~TraceToFile();
+
+  void BeginTracingFromCommandLineOptions();
+  void BeginTracing(const base::FilePath& path, const std::string& categories);
+  void EndTracingIfNeeded();
+
+ private:
+  void WriteFileHeader();
+  void AppendFileFooter();
+
+  void TraceOutputCallback(const std::string& data);
+
+  base::FilePath path_;
+  bool started_;
+};
+
+}  // namespace test
+}  // namespace base
+
+#endif  // BASE_TEST_TRACE_TO_FILE_H_
diff --git a/base/test/user_action_tester.cc b/base/test/user_action_tester.cc
new file mode 100644
index 0000000..6c3de39
--- /dev/null
+++ b/base/test/user_action_tester.cc
@@ -0,0 +1,38 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/test/user_action_tester.h"
+
+#include "base/bind.h"
+#include "base/bind_helpers.h"
+#include "base/test/test_simple_task_runner.h"
+
+namespace base {
+
+UserActionTester::UserActionTester()
+    : task_runner_(new base::TestSimpleTaskRunner),
+      action_callback_(
+          base::Bind(&UserActionTester::OnUserAction, base::Unretained(this))) {
+  base::SetRecordActionTaskRunner(task_runner_);
+  base::AddActionCallback(action_callback_);
+}
+
+UserActionTester::~UserActionTester() {
+  base::RemoveActionCallback(action_callback_);
+}
+
+int UserActionTester::GetActionCount(const std::string& user_action) const {
+  UserActionCountMap::const_iterator iter = count_map_.find(user_action);
+  return iter == count_map_.end() ? 0 : iter->second;
+}
+
+void UserActionTester::ResetCounts() {
+  count_map_.clear();
+}
+
+void UserActionTester::OnUserAction(const std::string& user_action) {
+  ++(count_map_[user_action]);
+}
+
+}  // namespace base
diff --git a/base/test/user_action_tester.h b/base/test/user_action_tester.h
new file mode 100644
index 0000000..88bc632
--- /dev/null
+++ b/base/test/user_action_tester.h
@@ -0,0 +1,50 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TEST_USER_ACTION_TESTER_H_
+#define BASE_TEST_USER_ACTION_TESTER_H_
+
+#include <map>
+#include <string>
+
+#include "base/macros.h"
+#include "base/metrics/user_metrics.h"
+
+namespace base {
+
+// This class observes and collects user action notifications that are sent
+// by the tests, so that they can be examined afterwards for correctness.
+// Note: This class is NOT thread-safe.
+class UserActionTester {
+ public:
+  UserActionTester();
+  ~UserActionTester();
+
+  // Returns the number of times the given |user_action| occurred.
+  int GetActionCount(const std::string& user_action) const;
+
+  // Resets all user action counts to 0.
+  void ResetCounts();
+
+ private:
+  typedef std::map<std::string, int> UserActionCountMap;
+
+  // The callback that is notified when a user actions occurs.
+  void OnUserAction(const std::string& user_action);
+
+  // A map that tracks the number of times a user action has occurred.
+  UserActionCountMap count_map_;
+
+  // A test task runner used by user metrics.
+  scoped_refptr<base::SingleThreadTaskRunner> task_runner_;
+
+  // The callback that is added to the global action callback list.
+  base::ActionCallback action_callback_;
+
+  DISALLOW_COPY_AND_ASSIGN(UserActionTester);
+};
+
+}  // namespace base
+
+#endif  // BASE_TEST_USER_ACTION_TESTER_H_
diff --git a/base/test/user_action_tester_unittest.cc b/base/test/user_action_tester_unittest.cc
new file mode 100644
index 0000000..a51849f
--- /dev/null
+++ b/base/test/user_action_tester_unittest.cc
@@ -0,0 +1,86 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/test/user_action_tester.h"
+
+#include "base/metrics/user_metrics.h"
+#include "base/metrics/user_metrics_action.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+
+namespace {
+
+const char kUserAction1[] = "user.action.1";
+const char kUserAction2[] = "user.action.2";
+const char kUserAction3[] = "user.action.3";
+
+// Record an action and cause all ActionCallback observers to be notified.
+void RecordAction(const char user_action[]) {
+  base::RecordAction(base::UserMetricsAction(user_action));
+}
+
+}  // namespace
+
+// Verify user action counts are zero initially.
+TEST(UserActionTesterTest, GetActionCountWhenNoActionsHaveBeenRecorded) {
+  UserActionTester user_action_tester;
+  EXPECT_EQ(0, user_action_tester.GetActionCount(kUserAction1));
+}
+
+// Verify user action counts are tracked properly.
+TEST(UserActionTesterTest, GetActionCountWhenActionsHaveBeenRecorded) {
+  UserActionTester user_action_tester;
+
+  RecordAction(kUserAction1);
+  RecordAction(kUserAction2);
+  RecordAction(kUserAction2);
+
+  EXPECT_EQ(1, user_action_tester.GetActionCount(kUserAction1));
+  EXPECT_EQ(2, user_action_tester.GetActionCount(kUserAction2));
+  EXPECT_EQ(0, user_action_tester.GetActionCount(kUserAction3));
+}
+
+// Verify no seg faults occur when resetting action counts when none have been
+// recorded.
+TEST(UserActionTesterTest, ResetCountsWhenNoActionsHaveBeenRecorded) {
+  UserActionTester user_action_tester;
+  user_action_tester.ResetCounts();
+}
+
+// Verify user action counts are set to zero on a ResetCounts.
+TEST(UserActionTesterTest, ResetCountsWhenActionsHaveBeenRecorded) {
+  UserActionTester user_action_tester;
+
+  RecordAction(kUserAction1);
+  RecordAction(kUserAction1);
+  RecordAction(kUserAction2);
+  user_action_tester.ResetCounts();
+
+  EXPECT_EQ(0, user_action_tester.GetActionCount(kUserAction1));
+  EXPECT_EQ(0, user_action_tester.GetActionCount(kUserAction2));
+  EXPECT_EQ(0, user_action_tester.GetActionCount(kUserAction3));
+}
+
+// Verify the UserActionsTester is notified when base::RecordAction is called.
+TEST(UserActionTesterTest, VerifyUserActionTesterListensForUserActions) {
+  UserActionTester user_action_tester;
+
+  base::RecordAction(base::UserMetricsAction(kUserAction1));
+
+  EXPECT_EQ(1, user_action_tester.GetActionCount(kUserAction1));
+}
+
+// Verify the UserActionsTester is notified when base::RecordComputedAction is
+// called.
+TEST(UserActionTesterTest,
+     VerifyUserActionTesterListensForComputedUserActions) {
+  UserActionTester user_action_tester;
+
+  base::RecordComputedAction(kUserAction1);
+
+  EXPECT_EQ(1, user_action_tester.GetActionCount(kUserAction1));
+}
+
+}  // namespace base
diff --git a/base/test/values_test_util.cc b/base/test/values_test_util.cc
new file mode 100644
index 0000000..a65c2c0
--- /dev/null
+++ b/base/test/values_test_util.cc
@@ -0,0 +1,76 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/test/values_test_util.h"
+
+#include <memory>
+
+#include "base/json/json_reader.h"
+#include "base/memory/ptr_util.h"
+#include "base/strings/string_number_conversions.h"
+#include "base/values.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+
+void ExpectDictBooleanValue(bool expected_value,
+                            const DictionaryValue& value,
+                            const std::string& key) {
+  bool boolean_value = false;
+  EXPECT_TRUE(value.GetBoolean(key, &boolean_value)) << key;
+  EXPECT_EQ(expected_value, boolean_value) << key;
+}
+
+void ExpectDictDictionaryValue(const DictionaryValue& expected_value,
+                               const DictionaryValue& value,
+                               const std::string& key) {
+  const DictionaryValue* dict_value = nullptr;
+  EXPECT_TRUE(value.GetDictionary(key, &dict_value)) << key;
+  EXPECT_EQ(expected_value, *dict_value) << key;
+}
+
+void ExpectDictIntegerValue(int expected_value,
+                            const DictionaryValue& value,
+                            const std::string& key) {
+  int integer_value = 0;
+  EXPECT_TRUE(value.GetInteger(key, &integer_value)) << key;
+  EXPECT_EQ(expected_value, integer_value) << key;
+}
+
+void ExpectDictListValue(const ListValue& expected_value,
+                         const DictionaryValue& value,
+                         const std::string& key) {
+  const ListValue* list_value = nullptr;
+  EXPECT_TRUE(value.GetList(key, &list_value)) << key;
+  EXPECT_EQ(expected_value, *list_value) << key;
+}
+
+void ExpectDictStringValue(const std::string& expected_value,
+                           const DictionaryValue& value,
+                           const std::string& key) {
+  std::string string_value;
+  EXPECT_TRUE(value.GetString(key, &string_value)) << key;
+  EXPECT_EQ(expected_value, string_value) << key;
+}
+
+void ExpectStringValue(const std::string& expected_str, const Value& actual) {
+  EXPECT_EQ(Value::Type::STRING, actual.type());
+  EXPECT_EQ(expected_str, actual.GetString());
+}
+
+namespace test {
+
+std::unique_ptr<Value> ParseJson(base::StringPiece json) {
+  std::string error_msg;
+  std::unique_ptr<Value> result = base::JSONReader::ReadAndReturnError(
+      json, base::JSON_ALLOW_TRAILING_COMMAS, nullptr, &error_msg);
+  if (!result) {
+    ADD_FAILURE() << "Failed to parse \"" << json << "\": " << error_msg;
+    result = std::make_unique<Value>();
+  }
+  return result;
+}
+
+}  // namespace test
+}  // namespace base
diff --git a/base/test/values_test_util.h b/base/test/values_test_util.h
new file mode 100644
index 0000000..02ebca1
--- /dev/null
+++ b/base/test/values_test_util.h
@@ -0,0 +1,53 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TEST_VALUES_TEST_UTIL_H_
+#define BASE_TEST_VALUES_TEST_UTIL_H_
+
+#include <memory>
+#include <string>
+
+#include "base/strings/string_piece.h"
+
+namespace base {
+class DictionaryValue;
+class ListValue;
+class Value;
+
+// All the functions below expect that the value for the given key in
+// the given dictionary equals the given expected value.
+
+void ExpectDictBooleanValue(bool expected_value,
+                            const DictionaryValue& value,
+                            const std::string& key);
+
+void ExpectDictDictionaryValue(const DictionaryValue& expected_value,
+                               const DictionaryValue& value,
+                               const std::string& key);
+
+void ExpectDictIntegerValue(int expected_value,
+                            const DictionaryValue& value,
+                            const std::string& key);
+
+void ExpectDictListValue(const ListValue& expected_value,
+                         const DictionaryValue& value,
+                         const std::string& key);
+
+void ExpectDictStringValue(const std::string& expected_value,
+                           const DictionaryValue& value,
+                           const std::string& key);
+
+void ExpectStringValue(const std::string& expected_str, const Value& actual);
+
+namespace test {
+
+// Parses |json| as JSON, allowing trailing commas, and returns the
+// resulting value.  If the json fails to parse, causes an EXPECT
+// failure and returns the Null Value (but never a NULL pointer).
+std::unique_ptr<Value> ParseJson(base::StringPiece json);
+
+}  // namespace test
+}  // namespace base
+
+#endif  // BASE_TEST_VALUES_TEST_UTIL_H_
diff --git a/base/third_party/dmg_fp/LICENSE b/base/third_party/dmg_fp/LICENSE
new file mode 100644
index 0000000..716f1ef
--- /dev/null
+++ b/base/third_party/dmg_fp/LICENSE
@@ -0,0 +1,18 @@
+/****************************************************************
+ *
+ * The author of this software is David M. Gay.
+ *
+ * Copyright (c) 1991, 2000, 2001 by Lucent Technologies.
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose without fee is hereby granted, provided that this entire notice
+ * is included in all copies of any software which is or includes a copy
+ * or modification of this software and in all copies of the supporting
+ * documentation for such software.
+ *
+ * THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR IMPLIED
+ * WARRANTY.  IN PARTICULAR, NEITHER THE AUTHOR NOR LUCENT MAKES ANY
+ * REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE MERCHANTABILITY
+ * OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR PURPOSE.
+ *
+ ***************************************************************/
diff --git a/base/third_party/dmg_fp/README.chromium b/base/third_party/dmg_fp/README.chromium
new file mode 100644
index 0000000..13d5fb2
--- /dev/null
+++ b/base/third_party/dmg_fp/README.chromium
@@ -0,0 +1,22 @@
+Name: David M. Gay's floating point routines
+URL: http://www.netlib.org/fp/
+License: MIT-like
+
+Original dtoa.c file can be found at <http://www.netlib.org/fp/dtoa.c>.
+Original g_fmt.c file can be found at <http://www.netlib.org/fp/g_fmt.c>.
+
+List of changes made to original code:
+  - wrapped functions in dmg_fp namespace
+  - renamed .c files to .cc
+  - added dmg_fp.h header
+  - added #define IEEE_8087 to dtoa.cc
+  - added #define NO_HEX_FP to dtoa.cc
+  - made some minor changes to allow clean compilation under g++ -Wall, see
+    gcc_warnings.patch.
+  - made some minor changes to build on 64-bit, see gcc_64_bit.patch.
+  - made minor changes for -Wextra for Mac build, see mac_wextra.patch
+  - fixed warnings under msvc, see msvc_warnings.patch
+  - fixed parsing of long exponents, see exp_length.patch and crbug.com/542881
+  - made hexdig array const
+  - removed deprecated `register` keyword
+  - #undef Long so that it won't change Long in other files in jumbo builds
diff --git a/base/third_party/dmg_fp/const_hexdig.patch b/base/third_party/dmg_fp/const_hexdig.patch
new file mode 100644
index 0000000..1a3145a
--- /dev/null
+++ b/base/third_party/dmg_fp/const_hexdig.patch
@@ -0,0 +1,13 @@
+diff --git a/base/third_party/dmg_fp/dtoa.cc b/base/third_party/dmg_fp/dtoa.cc
+index d7e6826..be560bc 100644
+--- a/base/third_party/dmg_fp/dtoa.cc
++++ b/base/third_party/dmg_fp/dtoa.cc
+@@ -1533,7 +1533,7 @@ hexdig_init(void)	/* Use of hexdig_init omitted 20121220 to avoid a */
+ 	htinit(hexdig, USC "ABCDEF", 0x10 + 10);
+ 	}
+ #else
+-static unsigned char hexdig[256] = {
++static const unsigned char hexdig[256] = {
+ 	0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+ 	0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+ 	0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
diff --git a/base/third_party/dmg_fp/dmg_fp.h b/base/third_party/dmg_fp/dmg_fp.h
new file mode 100644
index 0000000..4795397
--- /dev/null
+++ b/base/third_party/dmg_fp/dmg_fp.h
@@ -0,0 +1,30 @@
+// Copyright (c) 2008 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef THIRD_PARTY_DMG_FP_H_
+#define THIRD_PARTY_DMG_FP_H_
+
+namespace dmg_fp {
+
+// Return a nearest machine number to the input decimal
+// string (or set errno to ERANGE). With IEEE arithmetic, ties are
+// broken by the IEEE round-even rule.  Otherwise ties are broken by
+// biased rounding (add half and chop).
+double strtod(const char* s00, char** se);
+
+// Convert double to ASCII string. For meaning of parameters
+// see dtoa.cc file.
+char* dtoa(double d, int mode, int ndigits,
+           int* decpt, int* sign, char** rve);
+
+// Must be used to free values returned by dtoa.
+void freedtoa(char* s);
+
+// Store the closest decimal approximation to x in b (null terminated).
+// Returns a pointer to b.  It is sufficient for |b| to be 32 characters.
+char* g_fmt(char* b, double x);
+
+}  // namespace dmg_fp
+
+#endif  // THIRD_PARTY_DMG_FP_H_
diff --git a/base/third_party/dmg_fp/dtoa.cc b/base/third_party/dmg_fp/dtoa.cc
new file mode 100644
index 0000000..e846ee3
--- /dev/null
+++ b/base/third_party/dmg_fp/dtoa.cc
@@ -0,0 +1,4400 @@
+/****************************************************************
+ *
+ * The author of this software is David M. Gay.
+ *
+ * Copyright (c) 1991, 2000, 2001 by Lucent Technologies.
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose without fee is hereby granted, provided that this entire notice
+ * is included in all copies of any software which is or includes a copy
+ * or modification of this software and in all copies of the supporting
+ * documentation for such software.
+ *
+ * THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR IMPLIED
+ * WARRANTY.  IN PARTICULAR, NEITHER THE AUTHOR NOR LUCENT MAKES ANY
+ * REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE MERCHANTABILITY
+ * OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR PURPOSE.
+ *
+ ***************************************************************/
+
+/* Please send bug reports to David M. Gay (dmg at acm dot org,
+ * with " at " changed at "@" and " dot " changed to ".").	*/
+
+/* On a machine with IEEE extended-precision registers, it is
+ * necessary to specify double-precision (53-bit) rounding precision
+ * before invoking strtod or dtoa.  If the machine uses (the equivalent
+ * of) Intel 80x87 arithmetic, the call
+ *	_control87(PC_53, MCW_PC);
+ * does this with many compilers.  Whether this or another call is
+ * appropriate depends on the compiler; for this to work, it may be
+ * necessary to #include "float.h" or another system-dependent header
+ * file.
+ */
+
+/* strtod for IEEE-, VAX-, and IBM-arithmetic machines.
+ * (Note that IEEE arithmetic is disabled by gcc's -ffast-math flag.)
+ *
+ * This strtod returns a nearest machine number to the input decimal
+ * string (or sets errno to ERANGE).  With IEEE arithmetic, ties are
+ * broken by the IEEE round-even rule.  Otherwise ties are broken by
+ * biased rounding (add half and chop).
+ *
+ * Inspired loosely by William D. Clinger's paper "How to Read Floating
+ * Point Numbers Accurately" [Proc. ACM SIGPLAN '90, pp. 92-101].
+ *
+ * Modifications:
+ *
+ *	1. We only require IEEE, IBM, or VAX double-precision
+ *		arithmetic (not IEEE double-extended).
+ *	2. We get by with floating-point arithmetic in a case that
+ *		Clinger missed -- when we're computing d * 10^n
+ *		for a small integer d and the integer n is not too
+ *		much larger than 22 (the maximum integer k for which
+ *		we can represent 10^k exactly), we may be able to
+ *		compute (d*10^k) * 10^(e-k) with just one roundoff.
+ *	3. Rather than a bit-at-a-time adjustment of the binary
+ *		result in the hard case, we use floating-point
+ *		arithmetic to determine the adjustment to within
+ *		one bit; only in really hard cases do we need to
+ *		compute a second residual.
+ *	4. Because of 3., we don't need a large table of powers of 10
+ *		for ten-to-e (just some small tables, e.g. of 10^k
+ *		for 0 <= k <= 22).
+ */
+
+/*
+ * #define IEEE_8087 for IEEE-arithmetic machines where the least
+ *	significant byte has the lowest address.
+ * #define IEEE_MC68k for IEEE-arithmetic machines where the most
+ *	significant byte has the lowest address.
+ * #define Long int on machines with 32-bit ints and 64-bit longs.
+ * #define IBM for IBM mainframe-style floating-point arithmetic.
+ * #define VAX for VAX-style floating-point arithmetic (D_floating).
+ * #define No_leftright to omit left-right logic in fast floating-point
+ *	computation of dtoa.  This will cause dtoa modes 4 and 5 to be
+ *	treated the same as modes 2 and 3 for some inputs.
+ * #define Honor_FLT_ROUNDS if FLT_ROUNDS can assume the values 2 or 3
+ *	and strtod and dtoa should round accordingly.  Unless Trust_FLT_ROUNDS
+ *	is also #defined, fegetround() will be queried for the rounding mode.
+ *	Note that both FLT_ROUNDS and fegetround() are specified by the C99
+ *	standard (and are specified to be consistent, with fesetround()
+ *	affecting the value of FLT_ROUNDS), but that some (Linux) systems
+ *	do not work correctly in this regard, so using fegetround() is more
+ *	portable than using FLT_ROUNDS directly.
+ * #define Check_FLT_ROUNDS if FLT_ROUNDS can assume the values 2 or 3
+ *	and Honor_FLT_ROUNDS is not #defined.
+ * #define RND_PRODQUOT to use rnd_prod and rnd_quot (assembly routines
+ *	that use extended-precision instructions to compute rounded
+ *	products and quotients) with IBM.
+ * #define ROUND_BIASED for IEEE-format with biased rounding and arithmetic
+ *	that rounds toward +Infinity.
+ * #define ROUND_BIASED_without_Round_Up for IEEE-format with biased
+ *	rounding when the underlying floating-point arithmetic uses
+ *	unbiased rounding.  This prevent using ordinary floating-point
+ *	arithmetic when the result could be computed with one rounding error.
+ * #define Inaccurate_Divide for IEEE-format with correctly rounded
+ *	products but inaccurate quotients, e.g., for Intel i860.
+ * #define NO_LONG_LONG on machines that do not have a "long long"
+ *	integer type (of >= 64 bits).  On such machines, you can
+ *	#define Just_16 to store 16 bits per 32-bit Long when doing
+ *	high-precision integer arithmetic.  Whether this speeds things
+ *	up or slows things down depends on the machine and the number
+ *	being converted.  If long long is available and the name is
+ *	something other than "long long", #define Llong to be the name,
+ *	and if "unsigned Llong" does not work as an unsigned version of
+ *	Llong, #define #ULLong to be the corresponding unsigned type.
+ * #define KR_headers for old-style C function headers.
+ * #define Bad_float_h if your system lacks a float.h or if it does not
+ *	define some or all of DBL_DIG, DBL_MAX_10_EXP, DBL_MAX_EXP,
+ *	FLT_RADIX, FLT_ROUNDS, and DBL_MAX.
+ * #define MALLOC your_malloc, where your_malloc(n) acts like malloc(n)
+ *	if memory is available and otherwise does something you deem
+ *	appropriate.  If MALLOC is undefined, malloc will be invoked
+ *	directly -- and assumed always to succeed.  Similarly, if you
+ *	want something other than the system's free() to be called to
+ *	recycle memory acquired from MALLOC, #define FREE to be the
+ *	name of the alternate routine.  (FREE or free is only called in
+ *	pathological cases, e.g., in a dtoa call after a dtoa return in
+ *	mode 3 with thousands of digits requested.)
+ * #define Omit_Private_Memory to omit logic (added Jan. 1998) for making
+ *	memory allocations from a private pool of memory when possible.
+ *	When used, the private pool is PRIVATE_MEM bytes long:  2304 bytes,
+ *	unless #defined to be a different length.  This default length
+ *	suffices to get rid of MALLOC calls except for unusual cases,
+ *	such as decimal-to-binary conversion of a very long string of
+ *	digits.  The longest string dtoa can return is about 751 bytes
+ *	long.  For conversions by strtod of strings of 800 digits and
+ *	all dtoa conversions in single-threaded executions with 8-byte
+ *	pointers, PRIVATE_MEM >= 7400 appears to suffice; with 4-byte
+ *	pointers, PRIVATE_MEM >= 7112 appears adequate.
+ * #define NO_INFNAN_CHECK if you do not wish to have INFNAN_CHECK
+ *	#defined automatically on IEEE systems.  On such systems,
+ *	when INFNAN_CHECK is #defined, strtod checks
+ *	for Infinity and NaN (case insensitively).  On some systems
+ *	(e.g., some HP systems), it may be necessary to #define NAN_WORD0
+ *	appropriately -- to the most significant word of a quiet NaN.
+ *	(On HP Series 700/800 machines, -DNAN_WORD0=0x7ff40000 works.)
+ *	When INFNAN_CHECK is #defined and No_Hex_NaN is not #defined,
+ *	strtod also accepts (case insensitively) strings of the form
+ *	NaN(x), where x is a string of hexadecimal digits and spaces;
+ *	if there is only one string of hexadecimal digits, it is taken
+ *	for the 52 fraction bits of the resulting NaN; if there are two
+ *	or more strings of hex digits, the first is for the high 20 bits,
+ *	the second and subsequent for the low 32 bits, with intervening
+ *	white space ignored; but if this results in none of the 52
+ *	fraction bits being on (an IEEE Infinity symbol), then NAN_WORD0
+ *	and NAN_WORD1 are used instead.
+ * #define MULTIPLE_THREADS if the system offers preemptively scheduled
+ *	multiple threads.  In this case, you must provide (or suitably
+ *	#define) two locks, acquired by ACQUIRE_DTOA_LOCK(n) and freed
+ *	by FREE_DTOA_LOCK(n) for n = 0 or 1.  (The second lock, accessed
+ *	in pow5mult, ensures lazy evaluation of only one copy of high
+ *	powers of 5; omitting this lock would introduce a small
+ *	probability of wasting memory, but would otherwise be harmless.)
+ *	You must also invoke freedtoa(s) to free the value s returned by
+ *	dtoa.  You may do so whether or not MULTIPLE_THREADS is #defined.
+ * #define NO_IEEE_Scale to disable new (Feb. 1997) logic in strtod that
+ *	avoids underflows on inputs whose result does not underflow.
+ *	If you #define NO_IEEE_Scale on a machine that uses IEEE-format
+ *	floating-point numbers and flushes underflows to zero rather
+ *	than implementing gradual underflow, then you must also #define
+ *	Sudden_Underflow.
+ * #define USE_LOCALE to use the current locale's decimal_point value.
+ * #define SET_INEXACT if IEEE arithmetic is being used and extra
+ *	computation should be done to set the inexact flag when the
+ *	result is inexact and avoid setting inexact when the result
+ *	is exact.  In this case, dtoa.c must be compiled in
+ *	an environment, perhaps provided by #include "dtoa.c" in a
+ *	suitable wrapper, that defines two functions,
+ *		int get_inexact(void);
+ *		void clear_inexact(void);
+ *	such that get_inexact() returns a nonzero value if the
+ *	inexact bit is already set, and clear_inexact() sets the
+ *	inexact bit to 0.  When SET_INEXACT is #defined, strtod
+ *	also does extra computations to set the underflow and overflow
+ *	flags when appropriate (i.e., when the result is tiny and
+ *	inexact or when it is a numeric value rounded to +-infinity).
+ * #define NO_ERRNO if strtod should not assign errno = ERANGE when
+ *	the result overflows to +-Infinity or underflows to 0.
+ * #define NO_HEX_FP to omit recognition of hexadecimal floating-point
+ *	values by strtod.
+ * #define NO_STRTOD_BIGCOMP (on IEEE-arithmetic systems only for now)
+ *	to disable logic for "fast" testing of very long input strings
+ *	to strtod.  This testing proceeds by initially truncating the
+ *	input string, then if necessary comparing the whole string with
+ *	a decimal expansion to decide close cases. This logic is only
+ *	used for input more than STRTOD_DIGLIM digits long (default 40).
+ */
+
+#define IEEE_8087
+#define NO_HEX_FP
+
+#ifndef Long
+#if __LP64__
+#define Long int
+#else
+#define Long long
+#endif
+#endif
+#ifndef ULong
+typedef unsigned Long ULong;
+#endif
+
+#ifdef DEBUG
+#include "stdio.h"
+#define Bug(x) {fprintf(stderr, "%s\n", x); exit(1);}
+#endif
+
+#include "stdlib.h"
+#include "string.h"
+
+#ifdef USE_LOCALE
+#include "locale.h"
+#endif
+
+#ifdef Honor_FLT_ROUNDS
+#ifndef Trust_FLT_ROUNDS
+#include <fenv.h>
+#endif
+#endif
+
+#ifdef MALLOC
+#ifdef KR_headers
+extern char *MALLOC();
+#else
+extern void *MALLOC(size_t);
+#endif
+#else
+#define MALLOC malloc
+#endif
+
+#ifndef Omit_Private_Memory
+#ifndef PRIVATE_MEM
+#define PRIVATE_MEM 2304
+#endif
+#define PRIVATE_mem ((unsigned)((PRIVATE_MEM+sizeof(double)-1)/sizeof(double)))
+static double private_mem[PRIVATE_mem], *pmem_next = private_mem;
+#endif
+
+#undef IEEE_Arith
+#undef Avoid_Underflow
+#ifdef IEEE_MC68k
+#define IEEE_Arith
+#endif
+#ifdef IEEE_8087
+#define IEEE_Arith
+#endif
+
+#ifdef IEEE_Arith
+#ifndef NO_INFNAN_CHECK
+#undef INFNAN_CHECK
+#define INFNAN_CHECK
+#endif
+#else
+#undef INFNAN_CHECK
+#define NO_STRTOD_BIGCOMP
+#endif
+
+#include "errno.h"
+
+#ifdef Bad_float_h
+
+#ifdef IEEE_Arith
+#define DBL_DIG 15
+#define DBL_MAX_10_EXP 308
+#define DBL_MAX_EXP 1024
+#define FLT_RADIX 2
+#endif /*IEEE_Arith*/
+
+#ifdef IBM
+#define DBL_DIG 16
+#define DBL_MAX_10_EXP 75
+#define DBL_MAX_EXP 63
+#define FLT_RADIX 16
+#define DBL_MAX 7.2370055773322621e+75
+#endif
+
+#ifdef VAX
+#define DBL_DIG 16
+#define DBL_MAX_10_EXP 38
+#define DBL_MAX_EXP 127
+#define FLT_RADIX 2
+#define DBL_MAX 1.7014118346046923e+38
+#endif
+
+#ifndef LONG_MAX
+#define LONG_MAX 2147483647
+#endif
+
+#else /* ifndef Bad_float_h */
+#include "float.h"
+#endif /* Bad_float_h */
+
+#ifndef __MATH_H__
+#include "math.h"
+#endif
+
+namespace dmg_fp {
+
+#ifndef CONST
+#ifdef KR_headers
+#define CONST /* blank */
+#else
+#define CONST const
+#endif
+#endif
+
+#if defined(IEEE_8087) + defined(IEEE_MC68k) + defined(VAX) + defined(IBM) != 1
+Exactly one of IEEE_8087, IEEE_MC68k, VAX, or IBM should be defined.
+#endif
+
+typedef union { double d; ULong L[2]; } U;
+
+#ifdef IEEE_8087
+#define word0(x) (x)->L[1]
+#define word1(x) (x)->L[0]
+#else
+#define word0(x) (x)->L[0]
+#define word1(x) (x)->L[1]
+#endif
+#define dval(x) (x)->d
+
+#ifndef STRTOD_DIGLIM
+#define STRTOD_DIGLIM 40
+#endif
+
+#ifdef DIGLIM_DEBUG
+extern int strtod_diglim;
+#else
+#define strtod_diglim STRTOD_DIGLIM
+#endif
+
+/* The following definition of Storeinc is appropriate for MIPS processors.
+ * An alternative that might be better on some machines is
+ * #define Storeinc(a,b,c) (*a++ = b << 16 | c & 0xffff)
+ */
+#if defined(IEEE_8087) + defined(VAX)
+#define Storeinc(a,b,c) (((unsigned short *)a)[1] = (unsigned short)b, \
+((unsigned short *)a)[0] = (unsigned short)c, a++)
+#else
+#define Storeinc(a,b,c) (((unsigned short *)a)[0] = (unsigned short)b, \
+((unsigned short *)a)[1] = (unsigned short)c, a++)
+#endif
+
+/* #define P DBL_MANT_DIG */
+/* Ten_pmax = floor(P*log(2)/log(5)) */
+/* Bletch = (highest power of 2 < DBL_MAX_10_EXP) / 16 */
+/* Quick_max = floor((P-1)*log(FLT_RADIX)/log(10) - 1) */
+/* Int_max = floor(P*log(FLT_RADIX)/log(10) - 1) */
+
+#ifdef IEEE_Arith
+#define Exp_shift  20
+#define Exp_shift1 20
+#define Exp_msk1    0x100000
+#define Exp_msk11   0x100000
+#define Exp_mask  0x7ff00000
+#define P 53
+#define Nbits 53
+#define Bias 1023
+#define Emax 1023
+#define Emin (-1022)
+#define Exp_1  0x3ff00000
+#define Exp_11 0x3ff00000
+#define Ebits 11
+#define Frac_mask  0xfffff
+#define Frac_mask1 0xfffff
+#define Ten_pmax 22
+#define Bletch 0x10
+#define Bndry_mask  0xfffff
+#define Bndry_mask1 0xfffff
+#define LSB 1
+#define Sign_bit 0x80000000
+#define Log2P 1
+#define Tiny0 0
+#define Tiny1 1
+#define Quick_max 14
+#define Int_max 14
+#ifndef NO_IEEE_Scale
+#define Avoid_Underflow
+#ifdef Flush_Denorm	/* debugging option */
+#undef Sudden_Underflow
+#endif
+#endif
+
+#ifndef Flt_Rounds
+#ifdef FLT_ROUNDS
+#define Flt_Rounds FLT_ROUNDS
+#else
+#define Flt_Rounds 1
+#endif
+#endif /*Flt_Rounds*/
+
+#ifdef Honor_FLT_ROUNDS
+#undef Check_FLT_ROUNDS
+#define Check_FLT_ROUNDS
+#else
+#define Rounding Flt_Rounds
+#endif
+
+#else /* ifndef IEEE_Arith */
+#undef Check_FLT_ROUNDS
+#undef Honor_FLT_ROUNDS
+#undef SET_INEXACT
+#undef  Sudden_Underflow
+#define Sudden_Underflow
+#ifdef IBM
+#undef Flt_Rounds
+#define Flt_Rounds 0
+#define Exp_shift  24
+#define Exp_shift1 24
+#define Exp_msk1   0x1000000
+#define Exp_msk11  0x1000000
+#define Exp_mask  0x7f000000
+#define P 14
+#define Nbits 56
+#define Bias 65
+#define Emax 248
+#define Emin (-260)
+#define Exp_1  0x41000000
+#define Exp_11 0x41000000
+#define Ebits 8	/* exponent has 7 bits, but 8 is the right value in b2d */
+#define Frac_mask  0xffffff
+#define Frac_mask1 0xffffff
+#define Bletch 4
+#define Ten_pmax 22
+#define Bndry_mask  0xefffff
+#define Bndry_mask1 0xffffff
+#define LSB 1
+#define Sign_bit 0x80000000
+#define Log2P 4
+#define Tiny0 0x100000
+#define Tiny1 0
+#define Quick_max 14
+#define Int_max 15
+#else /* VAX */
+#undef Flt_Rounds
+#define Flt_Rounds 1
+#define Exp_shift  23
+#define Exp_shift1 7
+#define Exp_msk1    0x80
+#define Exp_msk11   0x800000
+#define Exp_mask  0x7f80
+#define P 56
+#define Nbits 56
+#define Bias 129
+#define Emax 126
+#define Emin (-129)
+#define Exp_1  0x40800000
+#define Exp_11 0x4080
+#define Ebits 8
+#define Frac_mask  0x7fffff
+#define Frac_mask1 0xffff007f
+#define Ten_pmax 24
+#define Bletch 2
+#define Bndry_mask  0xffff007f
+#define Bndry_mask1 0xffff007f
+#define LSB 0x10000
+#define Sign_bit 0x8000
+#define Log2P 1
+#define Tiny0 0x80
+#define Tiny1 0
+#define Quick_max 15
+#define Int_max 15
+#endif /* IBM, VAX */
+#endif /* IEEE_Arith */
+
+#ifndef IEEE_Arith
+#define ROUND_BIASED
+#else
+#ifdef ROUND_BIASED_without_Round_Up
+#undef  ROUND_BIASED
+#define ROUND_BIASED
+#endif
+#endif
+
+#ifdef RND_PRODQUOT
+#define rounded_product(a,b) a = rnd_prod(a, b)
+#define rounded_quotient(a,b) a = rnd_quot(a, b)
+#ifdef KR_headers
+extern double rnd_prod(), rnd_quot();
+#else
+extern double rnd_prod(double, double), rnd_quot(double, double);
+#endif
+#else
+#define rounded_product(a,b) a *= b
+#define rounded_quotient(a,b) a /= b
+#endif
+
+#define Big0 (Frac_mask1 | Exp_msk1*(DBL_MAX_EXP+Bias-1))
+#define Big1 0xffffffff
+
+#ifndef Pack_32
+#define Pack_32
+#endif
+
+typedef struct BCinfo BCinfo;
+ struct
+BCinfo { int dp0, dp1, dplen, dsign, e0, inexact, nd, nd0, rounding, scale, uflchk; };
+
+#ifdef KR_headers
+#define FFFFFFFF ((((unsigned long)0xffff)<<16)|(unsigned long)0xffff)
+#else
+#define FFFFFFFF 0xffffffffUL
+#endif
+
+#ifdef NO_LONG_LONG
+#undef ULLong
+#ifdef Just_16
+#undef Pack_32
+/* When Pack_32 is not defined, we store 16 bits per 32-bit Long.
+ * This makes some inner loops simpler and sometimes saves work
+ * during multiplications, but it often seems to make things slightly
+ * slower.  Hence the default is now to store 32 bits per Long.
+ */
+#endif
+#else	/* long long available */
+#ifndef Llong
+#define Llong long long
+#endif
+#ifndef ULLong
+#define ULLong unsigned Llong
+#endif
+#endif /* NO_LONG_LONG */
+
+#ifndef MULTIPLE_THREADS
+#define ACQUIRE_DTOA_LOCK(n)	/*nothing*/
+#define FREE_DTOA_LOCK(n)	/*nothing*/
+#endif
+
+#define Kmax 7
+
+double strtod(const char *s00, char **se);
+char *dtoa(double d, int mode, int ndigits,
+			int *decpt, int *sign, char **rve);
+
+ struct
+Bigint {
+	struct Bigint *next;
+	int k, maxwds, sign, wds;
+	ULong x[1];
+	};
+
+ typedef struct Bigint Bigint;
+
+ static Bigint *freelist[Kmax+1];
+
+ static Bigint *
+Balloc
+#ifdef KR_headers
+	(k) int k;
+#else
+	(int k)
+#endif
+{
+	int x;
+	Bigint *rv;
+#ifndef Omit_Private_Memory
+	unsigned int len;
+#endif
+
+	ACQUIRE_DTOA_LOCK(0);
+	/* The k > Kmax case does not need ACQUIRE_DTOA_LOCK(0), */
+	/* but this case seems very unlikely. */
+	if (k <= Kmax && freelist[k]) {
+		rv = freelist[k];
+		freelist[k] = rv->next;
+		}
+	else {
+		x = 1 << k;
+#ifdef Omit_Private_Memory
+		rv = (Bigint *)MALLOC(sizeof(Bigint) + (x-1)*sizeof(ULong));
+#else
+		len = (sizeof(Bigint) + (x-1)*sizeof(ULong) + sizeof(double) - 1)
+			/sizeof(double);
+		if (k <= Kmax && pmem_next - private_mem + len <= PRIVATE_mem) {
+			rv = (Bigint*)pmem_next;
+			pmem_next += len;
+			}
+		else
+			rv = (Bigint*)MALLOC(len*sizeof(double));
+#endif
+		rv->k = k;
+		rv->maxwds = x;
+		}
+	FREE_DTOA_LOCK(0);
+	rv->sign = rv->wds = 0;
+	return rv;
+	}
+
+ static void
+Bfree
+#ifdef KR_headers
+	(v) Bigint *v;
+#else
+	(Bigint *v)
+#endif
+{
+	if (v) {
+		if (v->k > Kmax)
+#ifdef FREE
+			FREE((void*)v);
+#else
+			free((void*)v);
+#endif
+		else {
+			ACQUIRE_DTOA_LOCK(0);
+			v->next = freelist[v->k];
+			freelist[v->k] = v;
+			FREE_DTOA_LOCK(0);
+			}
+		}
+	}
+
+#define Bcopy(x,y) memcpy((char *)&x->sign, (char *)&y->sign, \
+y->wds*sizeof(Long) + 2*sizeof(int))
+
+ static Bigint *
+multadd
+#ifdef KR_headers
+	(b, m, a) Bigint *b; int m, a;
+#else
+	(Bigint *b, int m, int a)	/* multiply by m and add a */
+#endif
+{
+	int i, wds;
+#ifdef ULLong
+	ULong *x;
+	ULLong carry, y;
+#else
+	ULong carry, *x, y;
+#ifdef Pack_32
+	ULong xi, z;
+#endif
+#endif
+	Bigint *b1;
+
+	wds = b->wds;
+	x = b->x;
+	i = 0;
+	carry = a;
+	do {
+#ifdef ULLong
+		y = *x * (ULLong)m + carry;
+		carry = y >> 32;
+		*x++ = y & FFFFFFFF;
+#else
+#ifdef Pack_32
+		xi = *x;
+		y = (xi & 0xffff) * m + carry;
+		z = (xi >> 16) * m + (y >> 16);
+		carry = z >> 16;
+		*x++ = (z << 16) + (y & 0xffff);
+#else
+		y = *x * m + carry;
+		carry = y >> 16;
+		*x++ = y & 0xffff;
+#endif
+#endif
+		}
+		while(++i < wds);
+	if (carry) {
+		if (wds >= b->maxwds) {
+			b1 = Balloc(b->k+1);
+			Bcopy(b1, b);
+			Bfree(b);
+			b = b1;
+			}
+		b->x[wds++] = (ULong)carry;
+		b->wds = wds;
+		}
+	return b;
+	}
+
+ static Bigint *
+s2b
+#ifdef KR_headers
+	(s, nd0, nd, y9, dplen) CONST char *s; int nd0, nd, dplen; ULong y9;
+#else
+	(const char *s, int nd0, int nd, ULong y9, int dplen)
+#endif
+{
+	Bigint *b;
+	int i, k;
+	Long x, y;
+
+	x = (nd + 8) / 9;
+	for(k = 0, y = 1; x > y; y <<= 1, k++) ;
+#ifdef Pack_32
+	b = Balloc(k);
+	b->x[0] = y9;
+	b->wds = 1;
+#else
+	b = Balloc(k+1);
+	b->x[0] = y9 & 0xffff;
+	b->wds = (b->x[1] = y9 >> 16) ? 2 : 1;
+#endif
+
+	i = 9;
+	if (9 < nd0) {
+		s += 9;
+		do b = multadd(b, 10, *s++ - '0');
+			while(++i < nd0);
+		s += dplen;
+		}
+	else
+		s += dplen + 9;
+	for(; i < nd; i++)
+		b = multadd(b, 10, *s++ - '0');
+	return b;
+	}
+
+ static int
+hi0bits
+#ifdef KR_headers
+	(x) ULong x;
+#else
+	(ULong x)
+#endif
+{
+	int k = 0;
+
+	if (!(x & 0xffff0000)) {
+		k = 16;
+		x <<= 16;
+		}
+	if (!(x & 0xff000000)) {
+		k += 8;
+		x <<= 8;
+		}
+	if (!(x & 0xf0000000)) {
+		k += 4;
+		x <<= 4;
+		}
+	if (!(x & 0xc0000000)) {
+		k += 2;
+		x <<= 2;
+		}
+	if (!(x & 0x80000000)) {
+		k++;
+		if (!(x & 0x40000000))
+			return 32;
+		}
+	return k;
+	}
+
+ static int
+lo0bits
+#ifdef KR_headers
+	(y) ULong *y;
+#else
+	(ULong *y)
+#endif
+{
+	int k;
+	ULong x = *y;
+
+	if (x & 7) {
+		if (x & 1)
+			return 0;
+		if (x & 2) {
+			*y = x >> 1;
+			return 1;
+			}
+		*y = x >> 2;
+		return 2;
+		}
+	k = 0;
+	if (!(x & 0xffff)) {
+		k = 16;
+		x >>= 16;
+		}
+	if (!(x & 0xff)) {
+		k += 8;
+		x >>= 8;
+		}
+	if (!(x & 0xf)) {
+		k += 4;
+		x >>= 4;
+		}
+	if (!(x & 0x3)) {
+		k += 2;
+		x >>= 2;
+		}
+	if (!(x & 1)) {
+		k++;
+		x >>= 1;
+		if (!x)
+			return 32;
+		}
+	*y = x;
+	return k;
+	}
+
+ static Bigint *
+i2b
+#ifdef KR_headers
+	(i) int i;
+#else
+	(int i)
+#endif
+{
+	Bigint *b;
+
+	b = Balloc(1);
+	b->x[0] = i;
+	b->wds = 1;
+	return b;
+	}
+
+ static Bigint *
+mult
+#ifdef KR_headers
+	(a, b) Bigint *a, *b;
+#else
+	(Bigint *a, Bigint *b)
+#endif
+{
+	Bigint *c;
+	int k, wa, wb, wc;
+	ULong *x, *xa, *xae, *xb, *xbe, *xc, *xc0;
+	ULong y;
+#ifdef ULLong
+	ULLong carry, z;
+#else
+	ULong carry, z;
+#ifdef Pack_32
+	ULong z2;
+#endif
+#endif
+
+	if (a->wds < b->wds) {
+		c = a;
+		a = b;
+		b = c;
+		}
+	k = a->k;
+	wa = a->wds;
+	wb = b->wds;
+	wc = wa + wb;
+	if (wc > a->maxwds)
+		k++;
+	c = Balloc(k);
+	for(x = c->x, xa = x + wc; x < xa; x++)
+		*x = 0;
+	xa = a->x;
+	xae = xa + wa;
+	xb = b->x;
+	xbe = xb + wb;
+	xc0 = c->x;
+#ifdef ULLong
+	for(; xb < xbe; xc0++) {
+		y = *xb++;
+		if (y) {
+			x = xa;
+			xc = xc0;
+			carry = 0;
+			do {
+				z = *x++ * (ULLong)y + *xc + carry;
+				carry = z >> 32;
+				*xc++ = z & FFFFFFFF;
+				}
+				while(x < xae);
+			*xc = (ULong)carry;
+			}
+		}
+#else
+#ifdef Pack_32
+	for(; xb < xbe; xb++, xc0++) {
+		if (y = *xb & 0xffff) {
+			x = xa;
+			xc = xc0;
+			carry = 0;
+			do {
+				z = (*x & 0xffff) * y + (*xc & 0xffff) + carry;
+				carry = z >> 16;
+				z2 = (*x++ >> 16) * y + (*xc >> 16) + carry;
+				carry = z2 >> 16;
+				Storeinc(xc, z2, z);
+				}
+				while(x < xae);
+			*xc = carry;
+			}
+		if (y = *xb >> 16) {
+			x = xa;
+			xc = xc0;
+			carry = 0;
+			z2 = *xc;
+			do {
+				z = (*x & 0xffff) * y + (*xc >> 16) + carry;
+				carry = z >> 16;
+				Storeinc(xc, z, z2);
+				z2 = (*x++ >> 16) * y + (*xc & 0xffff) + carry;
+				carry = z2 >> 16;
+				}
+				while(x < xae);
+			*xc = z2;
+			}
+		}
+#else
+	for(; xb < xbe; xc0++) {
+		if (y = *xb++) {
+			x = xa;
+			xc = xc0;
+			carry = 0;
+			do {
+				z = *x++ * y + *xc + carry;
+				carry = z >> 16;
+				*xc++ = z & 0xffff;
+				}
+				while(x < xae);
+			*xc = carry;
+			}
+		}
+#endif
+#endif
+	for(xc0 = c->x, xc = xc0 + wc; wc > 0 && !*--xc; --wc) ;
+	c->wds = wc;
+	return c;
+	}
+
+ static Bigint *p5s;
+
+ static Bigint *
+pow5mult
+#ifdef KR_headers
+	(b, k) Bigint *b; int k;
+#else
+	(Bigint *b, int k)
+#endif
+{
+	Bigint *b1, *p5, *p51;
+	int i;
+	static int p05[3] = { 5, 25, 125 };
+
+	i = k & 3;
+	if (i)
+		b = multadd(b, p05[i-1], 0);
+
+	if (!(k >>= 2))
+		return b;
+	p5 = p5s;
+	if (!p5) {
+		/* first time */
+#ifdef MULTIPLE_THREADS
+		ACQUIRE_DTOA_LOCK(1);
+		p5 = p5s;
+		if (!p5) {
+			p5 = p5s = i2b(625);
+			p5->next = 0;
+			}
+		FREE_DTOA_LOCK(1);
+#else
+		p5 = p5s = i2b(625);
+		p5->next = 0;
+#endif
+		}
+	for(;;) {
+		if (k & 1) {
+			b1 = mult(b, p5);
+			Bfree(b);
+			b = b1;
+			}
+		if (!(k >>= 1))
+			break;
+		p51 = p5->next;
+		if (!p51) {
+#ifdef MULTIPLE_THREADS
+			ACQUIRE_DTOA_LOCK(1);
+			p51 = p5->next;
+			if (!p51) {
+				p51 = p5->next = mult(p5,p5);
+				p51->next = 0;
+				}
+			FREE_DTOA_LOCK(1);
+#else
+			p51 = p5->next = mult(p5,p5);
+			p51->next = 0;
+#endif
+			}
+		p5 = p51;
+		}
+	return b;
+	}
+
+ static Bigint *
+lshift
+#ifdef KR_headers
+	(b, k) Bigint *b; int k;
+#else
+	(Bigint *b, int k)
+#endif
+{
+	int i, k1, n, n1;
+	Bigint *b1;
+	ULong *x, *x1, *xe, z;
+
+#ifdef Pack_32
+	n = k >> 5;
+#else
+	n = k >> 4;
+#endif
+	k1 = b->k;
+	n1 = n + b->wds + 1;
+	for(i = b->maxwds; n1 > i; i <<= 1)
+		k1++;
+	b1 = Balloc(k1);
+	x1 = b1->x;
+	for(i = 0; i < n; i++)
+		*x1++ = 0;
+	x = b->x;
+	xe = x + b->wds;
+#ifdef Pack_32
+	if (k &= 0x1f) {
+		k1 = 32 - k;
+		z = 0;
+		do {
+			*x1++ = *x << k | z;
+			z = *x++ >> k1;
+			}
+			while(x < xe);
+		*x1 = z;
+		if (*x1)
+			++n1;
+		}
+#else
+	if (k &= 0xf) {
+		k1 = 16 - k;
+		z = 0;
+		do {
+			*x1++ = *x << k  & 0xffff | z;
+			z = *x++ >> k1;
+			}
+			while(x < xe);
+		if (*x1 = z)
+			++n1;
+		}
+#endif
+	else do
+		*x1++ = *x++;
+		while(x < xe);
+	b1->wds = n1 - 1;
+	Bfree(b);
+	return b1;
+	}
+
+ static int
+cmp
+#ifdef KR_headers
+	(a, b) Bigint *a, *b;
+#else
+	(Bigint *a, Bigint *b)
+#endif
+{
+	ULong *xa, *xa0, *xb, *xb0;
+	int i, j;
+
+	i = a->wds;
+	j = b->wds;
+#ifdef DEBUG
+	if (i > 1 && !a->x[i-1])
+		Bug("cmp called with a->x[a->wds-1] == 0");
+	if (j > 1 && !b->x[j-1])
+		Bug("cmp called with b->x[b->wds-1] == 0");
+#endif
+	if (i -= j)
+		return i;
+	xa0 = a->x;
+	xa = xa0 + j;
+	xb0 = b->x;
+	xb = xb0 + j;
+	for(;;) {
+		if (*--xa != *--xb)
+			return *xa < *xb ? -1 : 1;
+		if (xa <= xa0)
+			break;
+		}
+	return 0;
+	}
+
+ static Bigint *
+diff
+#ifdef KR_headers
+	(a, b) Bigint *a, *b;
+#else
+	(Bigint *a, Bigint *b)
+#endif
+{
+	Bigint *c;
+	int i, wa, wb;
+	ULong *xa, *xae, *xb, *xbe, *xc;
+#ifdef ULLong
+	ULLong borrow, y;
+#else
+	ULong borrow, y;
+#ifdef Pack_32
+	ULong z;
+#endif
+#endif
+
+	i = cmp(a,b);
+	if (!i) {
+		c = Balloc(0);
+		c->wds = 1;
+		c->x[0] = 0;
+		return c;
+		}
+	if (i < 0) {
+		c = a;
+		a = b;
+		b = c;
+		i = 1;
+		}
+	else
+		i = 0;
+	c = Balloc(a->k);
+	c->sign = i;
+	wa = a->wds;
+	xa = a->x;
+	xae = xa + wa;
+	wb = b->wds;
+	xb = b->x;
+	xbe = xb + wb;
+	xc = c->x;
+	borrow = 0;
+#ifdef ULLong
+	do {
+		y = (ULLong)*xa++ - *xb++ - borrow;
+		borrow = y >> 32 & (ULong)1;
+		*xc++ = y & FFFFFFFF;
+		}
+		while(xb < xbe);
+	while(xa < xae) {
+		y = *xa++ - borrow;
+		borrow = y >> 32 & (ULong)1;
+		*xc++ = y & FFFFFFFF;
+		}
+#else
+#ifdef Pack_32
+	do {
+		y = (*xa & 0xffff) - (*xb & 0xffff) - borrow;
+		borrow = (y & 0x10000) >> 16;
+		z = (*xa++ >> 16) - (*xb++ >> 16) - borrow;
+		borrow = (z & 0x10000) >> 16;
+		Storeinc(xc, z, y);
+		}
+		while(xb < xbe);
+	while(xa < xae) {
+		y = (*xa & 0xffff) - borrow;
+		borrow = (y & 0x10000) >> 16;
+		z = (*xa++ >> 16) - borrow;
+		borrow = (z & 0x10000) >> 16;
+		Storeinc(xc, z, y);
+		}
+#else
+	do {
+		y = *xa++ - *xb++ - borrow;
+		borrow = (y & 0x10000) >> 16;
+		*xc++ = y & 0xffff;
+		}
+		while(xb < xbe);
+	while(xa < xae) {
+		y = *xa++ - borrow;
+		borrow = (y & 0x10000) >> 16;
+		*xc++ = y & 0xffff;
+		}
+#endif
+#endif
+	while(!*--xc)
+		wa--;
+	c->wds = wa;
+	return c;
+	}
+
+ static double
+ulp
+#ifdef KR_headers
+	(x) U *x;
+#else
+	(U *x)
+#endif
+{
+	Long L;
+	U u;
+
+	L = (word0(x) & Exp_mask) - (P-1)*Exp_msk1;
+#ifndef Avoid_Underflow
+#ifndef Sudden_Underflow
+	if (L > 0) {
+#endif
+#endif
+#ifdef IBM
+		L |= Exp_msk1 >> 4;
+#endif
+		word0(&u) = L;
+		word1(&u) = 0;
+#ifndef Avoid_Underflow
+#ifndef Sudden_Underflow
+		}
+	else {
+		L = -L >> Exp_shift;
+		if (L < Exp_shift) {
+			word0(&u) = 0x80000 >> L;
+			word1(&u) = 0;
+			}
+		else {
+			word0(&u) = 0;
+			L -= Exp_shift;
+			word1(&u) = L >= 31 ? 1 : 1 << 31 - L;
+			}
+		}
+#endif
+#endif
+	return dval(&u);
+	}
+
+ static double
+b2d
+#ifdef KR_headers
+	(a, e) Bigint *a; int *e;
+#else
+	(Bigint *a, int *e)
+#endif
+{
+	ULong *xa, *xa0, w, y, z;
+	int k;
+	U d;
+#ifdef VAX
+	ULong d0, d1;
+#else
+#define d0 word0(&d)
+#define d1 word1(&d)
+#endif
+
+	xa0 = a->x;
+	xa = xa0 + a->wds;
+	y = *--xa;
+#ifdef DEBUG
+	if (!y) Bug("zero y in b2d");
+#endif
+	k = hi0bits(y);
+	*e = 32 - k;
+#ifdef Pack_32
+	if (k < Ebits) {
+		d0 = Exp_1 | y >> (Ebits - k);
+		w = xa > xa0 ? *--xa : 0;
+		d1 = y << ((32-Ebits) + k) | w >> (Ebits - k);
+		goto ret_d;
+		}
+	z = xa > xa0 ? *--xa : 0;
+	if (k -= Ebits) {
+		d0 = Exp_1 | y << k | z >> (32 - k);
+		y = xa > xa0 ? *--xa : 0;
+		d1 = z << k | y >> (32 - k);
+		}
+	else {
+		d0 = Exp_1 | y;
+		d1 = z;
+		}
+#else
+	if (k < Ebits + 16) {
+		z = xa > xa0 ? *--xa : 0;
+		d0 = Exp_1 | y << k - Ebits | z >> Ebits + 16 - k;
+		w = xa > xa0 ? *--xa : 0;
+		y = xa > xa0 ? *--xa : 0;
+		d1 = z << k + 16 - Ebits | w << k - Ebits | y >> 16 + Ebits - k;
+		goto ret_d;
+		}
+	z = xa > xa0 ? *--xa : 0;
+	w = xa > xa0 ? *--xa : 0;
+	k -= Ebits + 16;
+	d0 = Exp_1 | y << k + 16 | z << k | w >> 16 - k;
+	y = xa > xa0 ? *--xa : 0;
+	d1 = w << k + 16 | y << k;
+#endif
+ ret_d:
+#ifdef VAX
+	word0(&d) = d0 >> 16 | d0 << 16;
+	word1(&d) = d1 >> 16 | d1 << 16;
+#else
+#undef d0
+#undef d1
+#endif
+	return dval(&d);
+	}
+
+ static Bigint *
+d2b
+#ifdef KR_headers
+	(d, e, bits) U *d; int *e, *bits;
+#else
+	(U *d, int *e, int *bits)
+#endif
+{
+	Bigint *b;
+	int de, k;
+	ULong *x, y, z;
+#ifndef Sudden_Underflow
+	int i;
+#endif
+#ifdef VAX
+	ULong d0, d1;
+	d0 = word0(d) >> 16 | word0(d) << 16;
+	d1 = word1(d) >> 16 | word1(d) << 16;
+#else
+#define d0 word0(d)
+#define d1 word1(d)
+#endif
+
+#ifdef Pack_32
+	b = Balloc(1);
+#else
+	b = Balloc(2);
+#endif
+	x = b->x;
+
+	z = d0 & Frac_mask;
+	d0 &= 0x7fffffff;	/* clear sign bit, which we ignore */
+#ifdef Sudden_Underflow
+	de = (int)(d0 >> Exp_shift);
+#ifndef IBM
+	z |= Exp_msk11;
+#endif
+#else
+	de = (int)(d0 >> Exp_shift);
+	if (de)
+		z |= Exp_msk1;
+#endif
+#ifdef Pack_32
+	y = d1;
+	if (y) {
+		k = lo0bits(&y);
+		if (k) {
+			x[0] = y | z << (32 - k);
+			z >>= k;
+			}
+		else
+			x[0] = y;
+		x[1] = z;
+		b->wds = x[1] ? 2 : 1;
+#ifndef Sudden_Underflow
+		i = b->wds;
+#endif
+		}
+	else {
+		k = lo0bits(&z);
+		x[0] = z;
+#ifndef Sudden_Underflow
+		i =
+#endif
+		    b->wds = 1;
+		k += 32;
+		}
+#else
+	if (y = d1) {
+		if (k = lo0bits(&y))
+			if (k >= 16) {
+				x[0] = y | z << 32 - k & 0xffff;
+				x[1] = z >> k - 16 & 0xffff;
+				x[2] = z >> k;
+				i = 2;
+				}
+			else {
+				x[0] = y & 0xffff;
+				x[1] = y >> 16 | z << 16 - k & 0xffff;
+				x[2] = z >> k & 0xffff;
+				x[3] = z >> k+16;
+				i = 3;
+				}
+		else {
+			x[0] = y & 0xffff;
+			x[1] = y >> 16;
+			x[2] = z & 0xffff;
+			x[3] = z >> 16;
+			i = 3;
+			}
+		}
+	else {
+#ifdef DEBUG
+		if (!z)
+			Bug("Zero passed to d2b");
+#endif
+		k = lo0bits(&z);
+		if (k >= 16) {
+			x[0] = z;
+			i = 0;
+			}
+		else {
+			x[0] = z & 0xffff;
+			x[1] = z >> 16;
+			i = 1;
+			}
+		k += 32;
+		}
+	while(!x[i])
+		--i;
+	b->wds = i + 1;
+#endif
+#ifndef Sudden_Underflow
+	if (de) {
+#endif
+#ifdef IBM
+		*e = (de - Bias - (P-1) << 2) + k;
+		*bits = 4*P + 8 - k - hi0bits(word0(d) & Frac_mask);
+#else
+		*e = de - Bias - (P-1) + k;
+		*bits = P - k;
+#endif
+#ifndef Sudden_Underflow
+		}
+	else {
+		*e = de - Bias - (P-1) + 1 + k;
+#ifdef Pack_32
+		*bits = 32*i - hi0bits(x[i-1]);
+#else
+		*bits = (i+2)*16 - hi0bits(x[i]);
+#endif
+		}
+#endif
+	return b;
+	}
+#undef d0
+#undef d1
+
+ static double
+ratio
+#ifdef KR_headers
+	(a, b) Bigint *a, *b;
+#else
+	(Bigint *a, Bigint *b)
+#endif
+{
+	U da, db;
+	int k, ka, kb;
+
+	dval(&da) = b2d(a, &ka);
+	dval(&db) = b2d(b, &kb);
+#ifdef Pack_32
+	k = ka - kb + 32*(a->wds - b->wds);
+#else
+	k = ka - kb + 16*(a->wds - b->wds);
+#endif
+#ifdef IBM
+	if (k > 0) {
+		word0(&da) += (k >> 2)*Exp_msk1;
+		if (k &= 3)
+			dval(&da) *= 1 << k;
+		}
+	else {
+		k = -k;
+		word0(&db) += (k >> 2)*Exp_msk1;
+		if (k &= 3)
+			dval(&db) *= 1 << k;
+		}
+#else
+	if (k > 0)
+		word0(&da) += k*Exp_msk1;
+	else {
+		k = -k;
+		word0(&db) += k*Exp_msk1;
+		}
+#endif
+	return dval(&da) / dval(&db);
+	}
+
+ static CONST double
+tens[] = {
+		1e0, 1e1, 1e2, 1e3, 1e4, 1e5, 1e6, 1e7, 1e8, 1e9,
+		1e10, 1e11, 1e12, 1e13, 1e14, 1e15, 1e16, 1e17, 1e18, 1e19,
+		1e20, 1e21, 1e22
+#ifdef VAX
+		, 1e23, 1e24
+#endif
+		};
+
+ static CONST double
+#ifdef IEEE_Arith
+bigtens[] = { 1e16, 1e32, 1e64, 1e128, 1e256 };
+static CONST double tinytens[] = { 1e-16, 1e-32, 1e-64, 1e-128,
+#ifdef Avoid_Underflow
+		9007199254740992.*9007199254740992.e-256
+		/* = 2^106 * 1e-256 */
+#else
+		1e-256
+#endif
+		};
+/* The factor of 2^53 in tinytens[4] helps us avoid setting the underflow */
+/* flag unnecessarily.  It leads to a song and dance at the end of strtod. */
+#define Scale_Bit 0x10
+#define n_bigtens 5
+#else
+#ifdef IBM
+bigtens[] = { 1e16, 1e32, 1e64 };
+static CONST double tinytens[] = { 1e-16, 1e-32, 1e-64 };
+#define n_bigtens 3
+#else
+bigtens[] = { 1e16, 1e32 };
+static CONST double tinytens[] = { 1e-16, 1e-32 };
+#define n_bigtens 2
+#endif
+#endif
+
+#undef Need_Hexdig
+#ifdef INFNAN_CHECK
+#ifndef No_Hex_NaN
+#define Need_Hexdig
+#endif
+#endif
+
+#ifndef Need_Hexdig
+#ifndef NO_HEX_FP
+#define Need_Hexdig
+#endif
+#endif
+
+#ifdef Need_Hexdig /*{*/
+#if 0
+static unsigned char hexdig[256];
+
+ static void
+htinit(unsigned char *h, unsigned char *s, int inc)
+{
+	int i, j;
+	for(i = 0; (j = s[i]) !=0; i++)
+		h[j] = (unsigned char)(i + inc);
+	}
+
+ static void
+hexdig_init(void)	/* Use of hexdig_init omitted 20121220 to avoid a */
+			/* race condition when multiple threads are used. */
+{
+#define USC (unsigned char *)
+	htinit(hexdig, USC "0123456789", 0x10);
+	htinit(hexdig, USC "abcdef", 0x10 + 10);
+	htinit(hexdig, USC "ABCDEF", 0x10 + 10);
+	}
+#else
+static const unsigned char hexdig[256] = {
+	0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+	0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+	0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+	16,17,18,19,20,21,22,23,24,25,0,0,0,0,0,0,
+	0,26,27,28,29,30,31,0,0,0,0,0,0,0,0,0,
+	0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+	0,26,27,28,29,30,31,0,0,0,0,0,0,0,0,0,
+	0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+	0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+	0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+	0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+	0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+	0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+	0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+	0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+	0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0
+	};
+#endif
+#endif /* } Need_Hexdig */
+
+#ifdef INFNAN_CHECK
+
+#ifndef NAN_WORD0
+#define NAN_WORD0 0x7ff80000
+#endif
+
+#ifndef NAN_WORD1
+#define NAN_WORD1 0
+#endif
+
+ static int
+match
+#ifdef KR_headers
+	(sp, t) char **sp, *t;
+#else
+	(const char **sp, const char *t)
+#endif
+{
+	int c, d;
+	CONST char *s = *sp;
+
+	for(d = *t++; d; d = *t++) {
+		if ((c = *++s) >= 'A' && c <= 'Z')
+			c += 'a' - 'A';
+		if (c != d)
+			return 0;
+		}
+	*sp = s + 1;
+	return 1;
+	}
+
+#ifndef No_Hex_NaN
+ static void
+hexnan
+#ifdef KR_headers
+	(rvp, sp) U *rvp; CONST char **sp;
+#else
+	(U *rvp, const char **sp)
+#endif
+{
+	ULong c, x[2];
+	CONST char *s;
+	int c1, havedig, udx0, xshift;
+
+	/**** if (!hexdig['0']) hexdig_init(); ****/
+	x[0] = x[1] = 0;
+	havedig = xshift = 0;
+	udx0 = 1;
+	s = *sp;
+	/* allow optional initial 0x or 0X */
+	for(c = *(CONST unsigned char*)(s+1); c && c <= ' '; c = *(CONST unsigned char*)(s+1))
+		++s;
+	if (s[1] == '0' && (s[2] == 'x' || s[2] == 'X'))
+		s += 2;
+	for(c = *(CONST unsigned char*)++s; c; c = *(CONST unsigned char*)++s) {
+		c1 = hexdig[c];
+		if (c1)
+			c  = c1 & 0xf;
+		else if (c <= ' ') {
+			if (udx0 && havedig) {
+				udx0 = 0;
+				xshift = 1;
+				}
+			continue;
+			}
+#ifdef GDTOA_NON_PEDANTIC_NANCHECK
+		else if (/*(*/ c == ')' && havedig) {
+			*sp = s + 1;
+			break;
+			}
+		else
+			return;	/* invalid form: don't change *sp */
+#else
+		else {
+			do {
+				if (/*(*/ c == ')') {
+					*sp = s + 1;
+					break;
+					}
+				c = *++s;
+				} while(c);
+			break;
+			}
+#endif
+		havedig = 1;
+		if (xshift) {
+			xshift = 0;
+			x[0] = x[1];
+			x[1] = 0;
+			}
+		if (udx0)
+			x[0] = (x[0] << 4) | (x[1] >> 28);
+		x[1] = (x[1] << 4) | c;
+		}
+	if ((x[0] &= 0xfffff) || x[1]) {
+		word0(rvp) = Exp_mask | x[0];
+		word1(rvp) = x[1];
+		}
+	}
+#endif /*No_Hex_NaN*/
+#endif /* INFNAN_CHECK */
+
+#ifdef Pack_32
+#define ULbits 32
+#define kshift 5
+#define kmask 31
+#else
+#define ULbits 16
+#define kshift 4
+#define kmask 15
+#endif
+
+#if !defined(NO_HEX_FP) || defined(Honor_FLT_ROUNDS) /*{*/
+ static Bigint *
+#ifdef KR_headers
+increment(b) Bigint *b;
+#else
+increment(Bigint *b)
+#endif
+{
+	ULong *x, *xe;
+	Bigint *b1;
+
+	x = b->x;
+	xe = x + b->wds;
+	do {
+		if (*x < (ULong)0xffffffffL) {
+			++*x;
+			return b;
+			}
+		*x++ = 0;
+		} while(x < xe);
+	{
+		if (b->wds >= b->maxwds) {
+			b1 = Balloc(b->k+1);
+			Bcopy(b1,b);
+			Bfree(b);
+			b = b1;
+			}
+		b->x[b->wds++] = 1;
+		}
+	return b;
+	}
+
+#endif /*}*/
+
+#ifndef NO_HEX_FP /*{*/
+
+ static void
+#ifdef KR_headers
+rshift(b, k) Bigint *b; int k;
+#else
+rshift(Bigint *b, int k)
+#endif
+{
+	ULong *x, *x1, *xe, y;
+	int n;
+
+	x = x1 = b->x;
+	n = k >> kshift;
+	if (n < b->wds) {
+		xe = x + b->wds;
+		x += n;
+		if (k &= kmask) {
+			n = 32 - k;
+			y = *x++ >> k;
+			while(x < xe) {
+				*x1++ = (y | (*x << n)) & 0xffffffff;
+				y = *x++ >> k;
+				}
+			if ((*x1 = y) !=0)
+				x1++;
+			}
+		else
+			while(x < xe)
+				*x1++ = *x++;
+		}
+	if ((b->wds = x1 - b->x) == 0)
+		b->x[0] = 0;
+	}
+
+ static ULong
+#ifdef KR_headers
+any_on(b, k) Bigint *b; int k;
+#else
+any_on(Bigint *b, int k)
+#endif
+{
+	int n, nwds;
+	ULong *x, *x0, x1, x2;
+
+	x = b->x;
+	nwds = b->wds;
+	n = k >> kshift;
+	if (n > nwds)
+		n = nwds;
+	else if (n < nwds && (k &= kmask)) {
+		x1 = x2 = x[n];
+		x1 >>= k;
+		x1 <<= k;
+		if (x1 != x2)
+			return 1;
+		}
+	x0 = x;
+	x += n;
+	while(x > x0)
+		if (*--x)
+			return 1;
+	return 0;
+	}
+
+enum {	/* rounding values: same as FLT_ROUNDS */
+	Round_zero = 0,
+	Round_near = 1,
+	Round_up = 2,
+	Round_down = 3
+	};
+
+ void
+#ifdef KR_headers
+gethex(sp, rvp, rounding, sign)
+	CONST char **sp; U *rvp; int rounding, sign;
+#else
+gethex( CONST char **sp, U *rvp, int rounding, int sign)
+#endif
+{
+	Bigint *b;
+	CONST unsigned char *decpt, *s0, *s, *s1;
+	Long e, e1;
+	ULong L, lostbits, *x;
+	int big, denorm, esign, havedig, k, n, nbits, up, zret;
+#ifdef IBM
+	int j;
+#endif
+	enum {
+#ifdef IEEE_Arith /*{{*/
+		emax = 0x7fe - Bias - P + 1,
+		emin = Emin - P + 1
+#else /*}{*/
+		emin = Emin - P,
+#ifdef VAX
+		emax = 0x7ff - Bias - P + 1
+#endif
+#ifdef IBM
+		emax = 0x7f - Bias - P
+#endif
+#endif /*}}*/
+		};
+#ifdef USE_LOCALE
+	int i;
+#ifdef NO_LOCALE_CACHE
+	const unsigned char *decimalpoint = (unsigned char*)
+		localeconv()->decimal_point;
+#else
+	const unsigned char *decimalpoint;
+	static unsigned char *decimalpoint_cache;
+	if (!(s0 = decimalpoint_cache)) {
+		s0 = (unsigned char*)localeconv()->decimal_point;
+		if ((decimalpoint_cache = (unsigned char*)
+				MALLOC(strlen((CONST char*)s0) + 1))) {
+			strcpy((char*)decimalpoint_cache, (CONST char*)s0);
+			s0 = decimalpoint_cache;
+			}
+		}
+	decimalpoint = s0;
+#endif
+#endif
+
+	/**** if (!hexdig['0']) hexdig_init(); ****/
+	havedig = 0;
+	s0 = *(CONST unsigned char **)sp + 2;
+	while(s0[havedig] == '0')
+		havedig++;
+	s0 += havedig;
+	s = s0;
+	decpt = 0;
+	zret = 0;
+	e = 0;
+	if (hexdig[*s])
+		havedig++;
+	else {
+		zret = 1;
+#ifdef USE_LOCALE
+		for(i = 0; decimalpoint[i]; ++i) {
+			if (s[i] != decimalpoint[i])
+				goto pcheck;
+			}
+		decpt = s += i;
+#else
+		if (*s != '.')
+			goto pcheck;
+		decpt = ++s;
+#endif
+		if (!hexdig[*s])
+			goto pcheck;
+		while(*s == '0')
+			s++;
+		if (hexdig[*s])
+			zret = 0;
+		havedig = 1;
+		s0 = s;
+		}
+	while(hexdig[*s])
+		s++;
+#ifdef USE_LOCALE
+	if (*s == *decimalpoint && !decpt) {
+		for(i = 1; decimalpoint[i]; ++i) {
+			if (s[i] != decimalpoint[i])
+				goto pcheck;
+			}
+		decpt = s += i;
+#else
+	if (*s == '.' && !decpt) {
+		decpt = ++s;
+#endif
+		while(hexdig[*s])
+			s++;
+		}/*}*/
+	if (decpt)
+		e = -(((Long)(s-decpt)) << 2);
+ pcheck:
+	s1 = s;
+	big = esign = 0;
+	switch(*s) {
+	  case 'p':
+	  case 'P':
+		switch(*++s) {
+		  case '-':
+			esign = 1;
+			FALLTHROUGH;
+		  case '+':
+			s++;
+		  }
+		if ((n = hexdig[*s]) == 0 || n > 0x19) {
+			s = s1;
+			break;
+			}
+		e1 = n - 0x10;
+		while((n = hexdig[*++s]) !=0 && n <= 0x19) {
+			if (e1 & 0xf8000000)
+				big = 1;
+			e1 = 10*e1 + n - 0x10;
+			}
+		if (esign)
+			e1 = -e1;
+		e += e1;
+	  }
+	*sp = (char*)s;
+	if (!havedig)
+		*sp = (char*)s0 - 1;
+	if (zret)
+		goto retz1;
+	if (big) {
+		if (esign) {
+#ifdef IEEE_Arith
+			switch(rounding) {
+			  case Round_up:
+				if (sign)
+					break;
+				goto ret_tiny;
+			  case Round_down:
+				if (!sign)
+					break;
+				goto ret_tiny;
+			  }
+#endif
+			goto retz;
+#ifdef IEEE_Arith
+ ret_tinyf:
+			Bfree(b);
+ ret_tiny:
+#ifndef NO_ERRNO
+			errno = ERANGE;
+#endif
+			word0(rvp) = 0;
+			word1(rvp) = 1;
+			return;
+#endif /* IEEE_Arith */
+			}
+		switch(rounding) {
+		  case Round_near:
+			goto ovfl1;
+		  case Round_up:
+			if (!sign)
+				goto ovfl1;
+			goto ret_big;
+		  case Round_down:
+			if (sign)
+				goto ovfl1;
+			goto ret_big;
+		  }
+ ret_big:
+		word0(rvp) = Big0;
+		word1(rvp) = Big1;
+		return;
+		}
+	n = s1 - s0 - 1;
+	for(k = 0; n > (1 << (kshift-2)) - 1; n >>= 1)
+		k++;
+	b = Balloc(k);
+	x = b->x;
+	n = 0;
+	L = 0;
+#ifdef USE_LOCALE
+	for(i = 0; decimalpoint[i+1]; ++i);
+#endif
+	while(s1 > s0) {
+#ifdef USE_LOCALE
+		if (*--s1 == decimalpoint[i]) {
+			s1 -= i;
+			continue;
+			}
+#else
+		if (*--s1 == '.')
+			continue;
+#endif
+		if (n == ULbits) {
+			*x++ = L;
+			L = 0;
+			n = 0;
+			}
+		L |= (hexdig[*s1] & 0x0f) << n;
+		n += 4;
+		}
+	*x++ = L;
+	b->wds = n = x - b->x;
+	n = ULbits*n - hi0bits(L);
+	nbits = Nbits;
+	lostbits = 0;
+	x = b->x;
+	if (n > nbits) {
+		n -= nbits;
+		if (any_on(b,n)) {
+			lostbits = 1;
+			k = n - 1;
+			if (x[k>>kshift] & 1 << (k & kmask)) {
+				lostbits = 2;
+				if (k > 0 && any_on(b,k))
+					lostbits = 3;
+				}
+			}
+		rshift(b, n);
+		e += n;
+		}
+	else if (n < nbits) {
+		n = nbits - n;
+		b = lshift(b, n);
+		e -= n;
+		x = b->x;
+		}
+	if (e > Emax) {
+ ovfl:
+		Bfree(b);
+ ovfl1:
+#ifndef NO_ERRNO
+		errno = ERANGE;
+#endif
+		word0(rvp) = Exp_mask;
+		word1(rvp) = 0;
+		return;
+		}
+	denorm = 0;
+	if (e < emin) {
+		denorm = 1;
+		n = emin - e;
+		if (n >= nbits) {
+#ifdef IEEE_Arith /*{*/
+			switch (rounding) {
+			  case Round_near:
+				if (n == nbits && (n < 2 || any_on(b,n-1)))
+					goto ret_tinyf;
+				break;
+			  case Round_up:
+				if (!sign)
+					goto ret_tinyf;
+				break;
+			  case Round_down:
+				if (sign)
+					goto ret_tinyf;
+			  }
+#endif /* } IEEE_Arith */
+			Bfree(b);
+ retz:
+#ifndef NO_ERRNO
+			errno = ERANGE;
+#endif
+ retz1:
+			rvp->d = 0.;
+			return;
+			}
+		k = n - 1;
+		if (lostbits)
+			lostbits = 1;
+		else if (k > 0)
+			lostbits = any_on(b,k);
+		if (x[k>>kshift] & 1 << (k & kmask))
+			lostbits |= 2;
+		nbits -= n;
+		rshift(b,n);
+		e = emin;
+		}
+	if (lostbits) {
+		up = 0;
+		switch(rounding) {
+		  case Round_zero:
+			break;
+		  case Round_near:
+			if (lostbits & 2
+			 && (lostbits & 1) | (x[0] & 1))
+				up = 1;
+			break;
+		  case Round_up:
+			up = 1 - sign;
+			break;
+		  case Round_down:
+			up = sign;
+		  }
+		if (up) {
+			k = b->wds;
+			b = increment(b);
+			x = b->x;
+			if (denorm) {
+#if 0
+				if (nbits == Nbits - 1
+				 && x[nbits >> kshift] & 1 << (nbits & kmask))
+					denorm = 0; /* not currently used */
+#endif
+				}
+			else if (b->wds > k
+			 || ((n = nbits & kmask) !=0
+			     && hi0bits(x[k-1]) < 32-n)) {
+				rshift(b,1);
+				if (++e > Emax)
+					goto ovfl;
+				}
+			}
+		}
+#ifdef IEEE_Arith
+	if (denorm)
+		word0(rvp) = b->wds > 1 ? b->x[1] & ~0x100000 : 0;
+	else
+		word0(rvp) = (b->x[1] & ~0x100000) | ((e + 0x3ff + 52) << 20);
+	word1(rvp) = b->x[0];
+#endif
+#ifdef IBM
+	if ((j = e & 3)) {
+		k = b->x[0] & ((1 << j) - 1);
+		rshift(b,j);
+		if (k) {
+			switch(rounding) {
+			  case Round_up:
+				if (!sign)
+					increment(b);
+				break;
+			  case Round_down:
+				if (sign)
+					increment(b);
+				break;
+			  case Round_near:
+				j = 1 << (j-1);
+				if (k & j && ((k & (j-1)) | lostbits))
+					increment(b);
+			  }
+			}
+		}
+	e >>= 2;
+	word0(rvp) = b->x[1] | ((e + 65 + 13) << 24);
+	word1(rvp) = b->x[0];
+#endif
+#ifdef VAX
+	/* The next two lines ignore swap of low- and high-order 2 bytes. */
+	/* word0(rvp) = (b->x[1] & ~0x800000) | ((e + 129 + 55) << 23); */
+	/* word1(rvp) = b->x[0]; */
+	word0(rvp) = ((b->x[1] & ~0x800000) >> 16) | ((e + 129 + 55) << 7) | (b->x[1] << 16);
+	word1(rvp) = (b->x[0] >> 16) | (b->x[0] << 16);
+#endif
+	Bfree(b);
+	}
+#endif /*!NO_HEX_FP}*/
+
+ static int
+#ifdef KR_headers
+dshift(b, p2) Bigint *b; int p2;
+#else
+dshift(Bigint *b, int p2)
+#endif
+{
+	int rv = hi0bits(b->x[b->wds-1]) - 4;
+	if (p2 > 0)
+		rv -= p2;
+	return rv & kmask;
+	}
+
+ static int
+quorem
+#ifdef KR_headers
+	(b, S) Bigint *b, *S;
+#else
+	(Bigint *b, Bigint *S)
+#endif
+{
+	int n;
+	ULong *bx, *bxe, q, *sx, *sxe;
+#ifdef ULLong
+	ULLong borrow, carry, y, ys;
+#else
+	ULong borrow, carry, y, ys;
+#ifdef Pack_32
+	ULong si, z, zs;
+#endif
+#endif
+
+	n = S->wds;
+#ifdef DEBUG
+	/*debug*/ if (b->wds > n)
+	/*debug*/	Bug("oversize b in quorem");
+#endif
+	if (b->wds < n)
+		return 0;
+	sx = S->x;
+	sxe = sx + --n;
+	bx = b->x;
+	bxe = bx + n;
+	q = *bxe / (*sxe + 1);	/* ensure q <= true quotient */
+#ifdef DEBUG
+#ifdef NO_STRTOD_BIGCOMP
+	/*debug*/ if (q > 9)
+#else
+	/* An oversized q is possible when quorem is called from bigcomp and */
+	/* the input is near, e.g., twice the smallest denormalized number. */
+	/*debug*/ if (q > 15)
+#endif
+	/*debug*/	Bug("oversized quotient in quorem");
+#endif
+	if (q) {
+		borrow = 0;
+		carry = 0;
+		do {
+#ifdef ULLong
+			ys = *sx++ * (ULLong)q + carry;
+			carry = ys >> 32;
+			y = *bx - (ys & FFFFFFFF) - borrow;
+			borrow = y >> 32 & (ULong)1;
+			*bx++ = y & FFFFFFFF;
+#else
+#ifdef Pack_32
+			si = *sx++;
+			ys = (si & 0xffff) * q + carry;
+			zs = (si >> 16) * q + (ys >> 16);
+			carry = zs >> 16;
+			y = (*bx & 0xffff) - (ys & 0xffff) - borrow;
+			borrow = (y & 0x10000) >> 16;
+			z = (*bx >> 16) - (zs & 0xffff) - borrow;
+			borrow = (z & 0x10000) >> 16;
+			Storeinc(bx, z, y);
+#else
+			ys = *sx++ * q + carry;
+			carry = ys >> 16;
+			y = *bx - (ys & 0xffff) - borrow;
+			borrow = (y & 0x10000) >> 16;
+			*bx++ = y & 0xffff;
+#endif
+#endif
+			}
+			while(sx <= sxe);
+		if (!*bxe) {
+			bx = b->x;
+			while(--bxe > bx && !*bxe)
+				--n;
+			b->wds = n;
+			}
+		}
+	if (cmp(b, S) >= 0) {
+		q++;
+		borrow = 0;
+		carry = 0;
+		bx = b->x;
+		sx = S->x;
+		do {
+#ifdef ULLong
+			ys = *sx++ + carry;
+			carry = ys >> 32;
+			y = *bx - (ys & FFFFFFFF) - borrow;
+			borrow = y >> 32 & (ULong)1;
+			*bx++ = y & FFFFFFFF;
+#else
+#ifdef Pack_32
+			si = *sx++;
+			ys = (si & 0xffff) + carry;
+			zs = (si >> 16) + (ys >> 16);
+			carry = zs >> 16;
+			y = (*bx & 0xffff) - (ys & 0xffff) - borrow;
+			borrow = (y & 0x10000) >> 16;
+			z = (*bx >> 16) - (zs & 0xffff) - borrow;
+			borrow = (z & 0x10000) >> 16;
+			Storeinc(bx, z, y);
+#else
+			ys = *sx++ + carry;
+			carry = ys >> 16;
+			y = *bx - (ys & 0xffff) - borrow;
+			borrow = (y & 0x10000) >> 16;
+			*bx++ = y & 0xffff;
+#endif
+#endif
+			}
+			while(sx <= sxe);
+		bx = b->x;
+		bxe = bx + n;
+		if (!*bxe) {
+			while(--bxe > bx && !*bxe)
+				--n;
+			b->wds = n;
+			}
+		}
+	return q;
+	}
+
+#if defined(Avoid_Underflow) || !defined(NO_STRTOD_BIGCOMP) /*{*/
+ static double
+sulp
+#ifdef KR_headers
+	(x, bc) U *x; BCinfo *bc;
+#else
+	(U *x, BCinfo *bc)
+#endif
+{
+	U u;
+	double rv;
+	int i;
+
+	rv = ulp(x);
+	if (!bc->scale || (i = 2*P + 1 - ((word0(x) & Exp_mask) >> Exp_shift)) <= 0)
+		return rv; /* Is there an example where i <= 0 ? */
+	word0(&u) = Exp_1 + (i << Exp_shift);
+	word1(&u) = 0;
+	return rv * u.d;
+	}
+#endif /*}*/
+
+#ifndef NO_STRTOD_BIGCOMP
+ static void
+bigcomp
+#ifdef KR_headers
+	(rv, s0, bc)
+	U *rv; CONST char *s0; BCinfo *bc;
+#else
+	(U *rv, const char *s0, BCinfo *bc)
+#endif
+{
+	Bigint *b, *d;
+	int b2, bbits, d2, dd, dig, dsign, i, j, nd, nd0, p2, p5, speccase;
+
+	dsign = bc->dsign;
+	nd = bc->nd;
+	nd0 = bc->nd0;
+	p5 = nd + bc->e0 - 1;
+	dd = speccase = 0;
+#ifndef Sudden_Underflow
+	if (rv->d == 0.) {	/* special case: value near underflow-to-zero */
+				/* threshold was rounded to zero */
+		b = i2b(1);
+		p2 = Emin - P + 1;
+		bbits = 1;
+#ifdef Avoid_Underflow
+		word0(rv) = (P+2) << Exp_shift;
+#else
+		word1(rv) = 1;
+#endif
+		i = 0;
+#ifdef Honor_FLT_ROUNDS
+		if (bc->rounding == 1)
+#endif
+			{
+			speccase = 1;
+			--p2;
+			dsign = 0;
+			goto have_i;
+			}
+		}
+	else
+#endif
+		b = d2b(rv, &p2, &bbits);
+#ifdef Avoid_Underflow
+	p2 -= bc->scale;
+#endif
+	/* floor(log2(rv)) == bbits - 1 + p2 */
+	/* Check for denormal case. */
+	i = P - bbits;
+	if (i > (j = P - Emin - 1 + p2)) {
+#ifdef Sudden_Underflow
+		Bfree(b);
+		b = i2b(1);
+		p2 = Emin;
+		i = P - 1;
+#ifdef Avoid_Underflow
+		word0(rv) = (1 + bc->scale) << Exp_shift;
+#else
+		word0(rv) = Exp_msk1;
+#endif
+		word1(rv) = 0;
+#else
+		i = j;
+#endif
+		}
+#ifdef Honor_FLT_ROUNDS
+	if (bc->rounding != 1) {
+		if (i > 0)
+			b = lshift(b, i);
+		if (dsign)
+			b = increment(b);
+		}
+	else
+#endif
+		{
+		b = lshift(b, ++i);
+		b->x[0] |= 1;
+		}
+#ifndef Sudden_Underflow
+ have_i:
+#endif
+	p2 -= p5 + i;
+	d = i2b(1);
+	/* Arrange for convenient computation of quotients:
+	 * shift left if necessary so divisor has 4 leading 0 bits.
+	 */
+	if (p5 > 0)
+		d = pow5mult(d, p5);
+	else if (p5 < 0)
+		b = pow5mult(b, -p5);
+	if (p2 > 0) {
+		b2 = p2;
+		d2 = 0;
+		}
+	else {
+		b2 = 0;
+		d2 = -p2;
+		}
+	i = dshift(d, d2);
+	if ((b2 += i) > 0)
+		b = lshift(b, b2);
+	if ((d2 += i) > 0)
+		d = lshift(d, d2);
+
+	/* Now b/d = exactly half-way between the two floating-point values */
+	/* on either side of the input string.  Compute first digit of b/d. */
+
+	dig = quorem(b,d);
+	if (!dig) {
+		b = multadd(b, 10, 0);	/* very unlikely */
+		dig = quorem(b,d);
+		}
+
+	/* Compare b/d with s0 */
+
+	for(i = 0; i < nd0; ) {
+		dd = s0[i++] - '0' - dig;
+		if (dd)
+			goto ret;
+		if (!b->x[0] && b->wds == 1) {
+			if (i < nd)
+				dd = 1;
+			goto ret;
+			}
+		b = multadd(b, 10, 0);
+		dig = quorem(b,d);
+		}
+	for(j = bc->dp1; i++ < nd;) {
+		dd = s0[j++] - '0' - dig;
+		if (dd)
+			goto ret;
+		if (!b->x[0] && b->wds == 1) {
+			if (i < nd)
+				dd = 1;
+			goto ret;
+			}
+		b = multadd(b, 10, 0);
+		dig = quorem(b,d);
+		}
+	if (dig > 0 || b->x[0] || b->wds > 1)
+		dd = -1;
+ ret:
+	Bfree(b);
+	Bfree(d);
+#ifdef Honor_FLT_ROUNDS
+	if (bc->rounding != 1) {
+		if (dd < 0) {
+			if (bc->rounding == 0) {
+				if (!dsign)
+					goto retlow1;
+				}
+			else if (dsign)
+				goto rethi1;
+			}
+		else if (dd > 0) {
+			if (bc->rounding == 0) {
+				if (dsign)
+					goto rethi1;
+				goto ret1;
+				}
+			if (!dsign)
+				goto rethi1;
+			dval(rv) += 2.*sulp(rv,bc);
+			}
+		else {
+			bc->inexact = 0;
+			if (dsign)
+				goto rethi1;
+			}
+		}
+	else
+#endif
+	if (speccase) {
+		if (dd <= 0)
+			rv->d = 0.;
+		}
+	else if (dd < 0) {
+		if (!dsign)	/* does not happen for round-near */
+retlow1:
+			dval(rv) -= sulp(rv,bc);
+		}
+	else if (dd > 0) {
+		if (dsign) {
+ rethi1:
+			dval(rv) += sulp(rv,bc);
+			}
+		}
+	else {
+		/* Exact half-way case:  apply round-even rule. */
+		if ((j = ((word0(rv) & Exp_mask) >> Exp_shift) - bc->scale) <= 0) {
+			i = 1 - j;
+			if (i <= 31) {
+				if (word1(rv) & (0x1 << i))
+					goto odd;
+				}
+			else if (word0(rv) & (0x1 << (i-32)))
+				goto odd;
+			}
+		else if (word1(rv) & 1) {
+ odd:
+			if (dsign)
+				goto rethi1;
+			goto retlow1;
+			}
+		}
+
+#ifdef Honor_FLT_ROUNDS
+ ret1:
+#endif
+	return;
+	}
+#endif /* NO_STRTOD_BIGCOMP */
+
+ double
+strtod
+#ifdef KR_headers
+	(s00, se) CONST char *s00; char **se;
+#else
+	(const char *s00, char **se)
+#endif
+{
+	int bb2, bb5, bbe, bd2, bd5, bbbits, bs2, c, e, e1;
+	int esign, i, j, k, nd, nd0, nf, nz, nz0, nz1, sign;
+	CONST char *s, *s0, *s1;
+	double aadj, aadj1;
+	Long L;
+	U aadj2, adj, rv, rv0;
+	ULong y, z;
+	BCinfo bc;
+	Bigint *bb = nullptr, *bb1, *bd = nullptr, *bd0, *bs = nullptr, *delta = nullptr;
+#ifdef Avoid_Underflow
+	ULong Lsb, Lsb1;
+#endif
+#ifdef SET_INEXACT
+	int oldinexact;
+#endif
+#ifndef NO_STRTOD_BIGCOMP
+	int req_bigcomp = 0;
+#endif
+#ifdef Honor_FLT_ROUNDS /*{*/
+#ifdef Trust_FLT_ROUNDS /*{{ only define this if FLT_ROUNDS really works! */
+	bc.rounding = Flt_Rounds;
+#else /*}{*/
+	bc.rounding = 1;
+	switch(fegetround()) {
+	  case FE_TOWARDZERO:	bc.rounding = 0; break;
+	  case FE_UPWARD:	bc.rounding = 2; break;
+	  case FE_DOWNWARD:	bc.rounding = 3;
+	  }
+#endif /*}}*/
+#endif /*}*/
+#ifdef USE_LOCALE
+	CONST char *s2;
+#endif
+
+	sign = nz0 = nz1 = nz = bc.dplen = bc.uflchk = 0;
+	dval(&rv) = 0.;
+	for(s = s00;;s++) switch(*s) {
+		case '-':
+			sign = 1;
+			FALLTHROUGH;
+		case '+':
+			if (*++s)
+				goto break2;
+			FALLTHROUGH;
+		case 0:
+			goto ret0;
+		case '\t':
+		case '\n':
+		case '\v':
+		case '\f':
+		case '\r':
+		case ' ':
+			continue;
+		default:
+			goto break2;
+		}
+ break2:
+	if (*s == '0') {
+#ifndef NO_HEX_FP /*{*/
+		switch(s[1]) {
+		  case 'x':
+		  case 'X':
+#ifdef Honor_FLT_ROUNDS
+			gethex(&s, &rv, bc.rounding, sign);
+#else
+			gethex(&s, &rv, 1, sign);
+#endif
+			goto ret;
+		  }
+#endif /*}*/
+		nz0 = 1;
+		while(*++s == '0') ;
+		if (!*s)
+			goto ret;
+		}
+	s0 = s;
+	y = z = 0;
+	for(nd = nf = 0; (c = *s) >= '0' && c <= '9'; nd++, s++)
+		if (nd < 9)
+			y = 10*y + c - '0';
+		else if (nd < DBL_DIG + 2)
+			z = 10*z + c - '0';
+	nd0 = nd;
+	bc.dp0 = bc.dp1 = s - s0;
+	for(s1 = s; s1 > s0 && *--s1 == '0'; )
+		++nz1;
+#ifdef USE_LOCALE
+	s1 = localeconv()->decimal_point;
+	if (c == *s1) {
+		c = '.';
+		if (*++s1) {
+			s2 = s;
+			for(;;) {
+				if (*++s2 != *s1) {
+					c = 0;
+					break;
+					}
+				if (!*++s1) {
+					s = s2;
+					break;
+					}
+				}
+			}
+		}
+#endif
+	if (c == '.') {
+		c = *++s;
+		bc.dp1 = s - s0;
+		bc.dplen = bc.dp1 - bc.dp0;
+		if (!nd) {
+			for(; c == '0'; c = *++s)
+				nz++;
+			if (c > '0' && c <= '9') {
+				bc.dp0 = s0 - s;
+				bc.dp1 = bc.dp0 + bc.dplen;
+				s0 = s;
+				nf += nz;
+				nz = 0;
+				goto have_dig;
+				}
+			goto dig_done;
+			}
+		for(; c >= '0' && c <= '9'; c = *++s) {
+ have_dig:
+			nz++;
+			if (c -= '0') {
+				nf += nz;
+				for(i = 1; i < nz; i++)
+					if (nd++ < 9)
+						y *= 10;
+					else if (nd <= DBL_DIG + 2)
+						z *= 10;
+				if (nd++ < 9)
+					y = 10*y + c;
+				else if (nd <= DBL_DIG + 2)
+					z = 10*z + c;
+				nz = nz1 = 0;
+				}
+			}
+		}
+ dig_done:
+	e = 0;
+	if (c == 'e' || c == 'E') {
+		if (!nd && !nz && !nz0) {
+			goto ret0;
+			}
+		s00 = s;
+		esign = 0;
+		switch(c = *++s) {
+			case '-':
+				esign = 1;
+				FALLTHROUGH;
+			case '+':
+				c = *++s;
+			}
+		if (c >= '0' && c <= '9') {
+			while(c == '0')
+				c = *++s;
+			if (c > '0' && c <= '9') {
+				L = c - '0';
+				s1 = s;
+				while((c = *++s) >= '0' && c <= '9') {
+					if (L < (INT_MAX - 10) / 10) {
+						L = 10*L + (c - '0');
+					}
+				}
+				if (s - s1 > 8 || L > 19999)
+					/* Avoid confusion from exponents
+					 * so large that e might overflow.
+					 */
+					e = 19999; /* safe for 16 bit ints */
+				else
+					e = (int)L;
+				if (esign)
+					e = -e;
+				}
+			else
+				e = 0;
+			}
+		else
+			s = s00;
+		}
+	if (!nd) {
+		if (!nz && !nz0) {
+#ifdef INFNAN_CHECK
+			/* Check for Nan and Infinity */
+			if (!bc.dplen)
+			 switch(c) {
+			  case 'i':
+			  case 'I':
+				if (match(&s,"nf")) {
+					--s;
+					if (!match(&s,"inity"))
+						++s;
+					word0(&rv) = 0x7ff00000;
+					word1(&rv) = 0;
+					goto ret;
+					}
+				break;
+			  case 'n':
+			  case 'N':
+				if (match(&s, "an")) {
+					word0(&rv) = NAN_WORD0;
+					word1(&rv) = NAN_WORD1;
+#ifndef No_Hex_NaN
+					if (*s == '(') /*)*/
+						hexnan(&rv, &s);
+#endif
+					goto ret;
+					}
+			  }
+#endif /* INFNAN_CHECK */
+ ret0:
+			s = s00;
+			sign = 0;
+			}
+		goto ret;
+		}
+	bc.e0 = e1 = e -= nf;
+
+	/* Now we have nd0 digits, starting at s0, followed by a
+	 * decimal point, followed by nd-nd0 digits.  The number we're
+	 * after is the integer represented by those digits times
+	 * 10**e */
+
+	if (!nd0)
+		nd0 = nd;
+	k = nd < DBL_DIG + 2 ? nd : DBL_DIG + 2;
+	dval(&rv) = y;
+	if (k > 9) {
+#ifdef SET_INEXACT
+		if (k > DBL_DIG)
+			oldinexact = get_inexact();
+#endif
+		dval(&rv) = tens[k - 9] * dval(&rv) + z;
+		}
+	bd0 = 0;
+	if (nd <= DBL_DIG
+#ifndef RND_PRODQUOT
+#ifndef Honor_FLT_ROUNDS
+		&& Flt_Rounds == 1
+#endif
+#endif
+			) {
+		if (!e)
+			goto ret;
+#ifndef ROUND_BIASED_without_Round_Up
+		if (e > 0) {
+			if (e <= Ten_pmax) {
+#ifdef VAX
+				goto vax_ovfl_check;
+#else
+#ifdef Honor_FLT_ROUNDS
+				/* round correctly FLT_ROUNDS = 2 or 3 */
+				if (sign) {
+					rv.d = -rv.d;
+					sign = 0;
+					}
+#endif
+				/* rv = */ rounded_product(dval(&rv), tens[e]);
+				goto ret;
+#endif
+				}
+			i = DBL_DIG - nd;
+			if (e <= Ten_pmax + i) {
+				/* A fancier test would sometimes let us do
+				 * this for larger i values.
+				 */
+#ifdef Honor_FLT_ROUNDS
+				/* round correctly FLT_ROUNDS = 2 or 3 */
+				if (sign) {
+					rv.d = -rv.d;
+					sign = 0;
+					}
+#endif
+				e -= i;
+				dval(&rv) *= tens[i];
+#ifdef VAX
+				/* VAX exponent range is so narrow we must
+				 * worry about overflow here...
+				 */
+ vax_ovfl_check:
+				word0(&rv) -= P*Exp_msk1;
+				/* rv = */ rounded_product(dval(&rv), tens[e]);
+				if ((word0(&rv) & Exp_mask)
+				 > Exp_msk1*(DBL_MAX_EXP+Bias-1-P))
+					goto ovfl;
+				word0(&rv) += P*Exp_msk1;
+#else
+				/* rv = */ rounded_product(dval(&rv), tens[e]);
+#endif
+				goto ret;
+				}
+			}
+#ifndef Inaccurate_Divide
+		else if (e >= -Ten_pmax) {
+#ifdef Honor_FLT_ROUNDS
+			/* round correctly FLT_ROUNDS = 2 or 3 */
+			if (sign) {
+				rv.d = -rv.d;
+				sign = 0;
+				}
+#endif
+			/* rv = */ rounded_quotient(dval(&rv), tens[-e]);
+			goto ret;
+			}
+#endif
+#endif /* ROUND_BIASED_without_Round_Up */
+		}
+	e1 += nd - k;
+
+#ifdef IEEE_Arith
+#ifdef SET_INEXACT
+	bc.inexact = 1;
+	if (k <= DBL_DIG)
+		oldinexact = get_inexact();
+#endif
+#ifdef Avoid_Underflow
+	bc.scale = 0;
+#endif
+#ifdef Honor_FLT_ROUNDS
+	if (bc.rounding >= 2) {
+		if (sign)
+			bc.rounding = bc.rounding == 2 ? 0 : 2;
+		else
+			if (bc.rounding != 2)
+				bc.rounding = 0;
+		}
+#endif
+#endif /*IEEE_Arith*/
+
+	/* Get starting approximation = rv * 10**e1 */
+
+	if (e1 > 0) {
+		i = e1 & 15;
+		if (i)
+			dval(&rv) *= tens[i];
+		if (e1 &= ~15) {
+			if (e1 > DBL_MAX_10_EXP) {
+ ovfl:
+				/* Can't trust HUGE_VAL */
+#ifdef IEEE_Arith
+#ifdef Honor_FLT_ROUNDS
+				switch(bc.rounding) {
+				  case 0: /* toward 0 */
+				  case 3: /* toward -infinity */
+					word0(&rv) = Big0;
+					word1(&rv) = Big1;
+					break;
+				  default:
+					word0(&rv) = Exp_mask;
+					word1(&rv) = 0;
+				  }
+#else /*Honor_FLT_ROUNDS*/
+				word0(&rv) = Exp_mask;
+				word1(&rv) = 0;
+#endif /*Honor_FLT_ROUNDS*/
+#ifdef SET_INEXACT
+				/* set overflow bit */
+				dval(&rv0) = 1e300;
+				dval(&rv0) *= dval(&rv0);
+#endif
+#else /*IEEE_Arith*/
+				word0(&rv) = Big0;
+				word1(&rv) = Big1;
+#endif /*IEEE_Arith*/
+ range_err:
+				if (bd0) {
+					Bfree(bb);
+					Bfree(bd);
+					Bfree(bs);
+					Bfree(bd0);
+					Bfree(delta);
+					}
+#ifndef NO_ERRNO
+				errno = ERANGE;
+#endif
+				goto ret;
+				}
+			e1 >>= 4;
+			for(j = 0; e1 > 1; j++, e1 >>= 1)
+				if (e1 & 1)
+					dval(&rv) *= bigtens[j];
+		/* The last multiplication could overflow. */
+			word0(&rv) -= P*Exp_msk1;
+			dval(&rv) *= bigtens[j];
+			if ((z = word0(&rv) & Exp_mask)
+			 > Exp_msk1*(DBL_MAX_EXP+Bias-P))
+				goto ovfl;
+			if (z > Exp_msk1*(DBL_MAX_EXP+Bias-1-P)) {
+				/* set to largest number */
+				/* (Can't trust DBL_MAX) */
+				word0(&rv) = Big0;
+				word1(&rv) = Big1;
+				}
+			else
+				word0(&rv) += P*Exp_msk1;
+			}
+		}
+	else if (e1 < 0) {
+		e1 = -e1;
+		i = e1 & 15;
+		if (i)
+			dval(&rv) /= tens[i];
+		if (e1 >>= 4) {
+			if (e1 >= 1 << n_bigtens)
+				goto undfl;
+#ifdef Avoid_Underflow
+			if (e1 & Scale_Bit)
+				bc.scale = 2*P;
+			for(j = 0; e1 > 0; j++, e1 >>= 1)
+				if (e1 & 1)
+					dval(&rv) *= tinytens[j];
+			if (bc.scale && (j = 2*P + 1 - ((word0(&rv) & Exp_mask)
+						>> Exp_shift)) > 0) {
+				/* scaled rv is denormal; clear j low bits */
+				if (j >= 32) {
+					if (j > 54)
+						goto undfl;
+					word1(&rv) = 0;
+					if (j >= 53)
+					 word0(&rv) = (P+2)*Exp_msk1;
+					else
+					 word0(&rv) &= 0xffffffff << (j-32);
+					}
+				else
+					word1(&rv) &= 0xffffffff << j;
+				}
+#else
+			for(j = 0; e1 > 1; j++, e1 >>= 1)
+				if (e1 & 1)
+					dval(&rv) *= tinytens[j];
+			/* The last multiplication could underflow. */
+			dval(&rv0) = dval(&rv);
+			dval(&rv) *= tinytens[j];
+			if (!dval(&rv)) {
+				dval(&rv) = 2.*dval(&rv0);
+				dval(&rv) *= tinytens[j];
+#endif
+				if (!dval(&rv)) {
+ undfl:
+					dval(&rv) = 0.;
+					goto range_err;
+					}
+#ifndef Avoid_Underflow
+				word0(&rv) = Tiny0;
+				word1(&rv) = Tiny1;
+				/* The refinement below will clean
+				 * this approximation up.
+				 */
+				}
+#endif
+			}
+		}
+
+	/* Now the hard part -- adjusting rv to the correct value.*/
+
+	/* Put digits into bd: true value = bd * 10^e */
+
+	bc.nd = nd - nz1;
+#ifndef NO_STRTOD_BIGCOMP
+	bc.nd0 = nd0;	/* Only needed if nd > strtod_diglim, but done here */
+			/* to silence an erroneous warning about bc.nd0 */
+			/* possibly not being initialized. */
+	if (nd > strtod_diglim) {
+		/* ASSERT(strtod_diglim >= 18); 18 == one more than the */
+		/* minimum number of decimal digits to distinguish double values */
+		/* in IEEE arithmetic. */
+		i = j = 18;
+		if (i > nd0)
+			j += bc.dplen;
+		for(;;) {
+			if (--j < bc.dp1 && j >= bc.dp0)
+				j = bc.dp0 - 1;
+			if (s0[j] != '0')
+				break;
+			--i;
+			}
+		e += nd - i;
+		nd = i;
+		if (nd0 > nd)
+			nd0 = nd;
+		if (nd < 9) { /* must recompute y */
+			y = 0;
+			for(i = 0; i < nd0; ++i)
+				y = 10*y + s0[i] - '0';
+			for(j = bc.dp1; i < nd; ++i)
+				y = 10*y + s0[j++] - '0';
+			}
+		}
+#endif
+	bd0 = s2b(s0, nd0, nd, y, bc.dplen);
+
+	for(;;) {
+		bd = Balloc(bd0->k);
+		Bcopy(bd, bd0);
+		bb = d2b(&rv, &bbe, &bbbits);	/* rv = bb * 2^bbe */
+		bs = i2b(1);
+
+		if (e >= 0) {
+			bb2 = bb5 = 0;
+			bd2 = bd5 = e;
+			}
+		else {
+			bb2 = bb5 = -e;
+			bd2 = bd5 = 0;
+			}
+		if (bbe >= 0)
+			bb2 += bbe;
+		else
+			bd2 -= bbe;
+		bs2 = bb2;
+#ifdef Honor_FLT_ROUNDS
+		if (bc.rounding != 1)
+			bs2++;
+#endif
+#ifdef Avoid_Underflow
+		Lsb = LSB;
+		Lsb1 = 0;
+		j = bbe - bc.scale;
+		i = j + bbbits - 1;	/* logb(rv) */
+		j = P + 1 - bbbits;
+		if (i < Emin) {	/* denormal */
+			i = Emin - i;
+			j -= i;
+			if (i < 32)
+				Lsb <<= i;
+			else if (i < 52)
+				Lsb1 = Lsb << (i-32);
+			else
+				Lsb1 = Exp_mask;
+			}
+#else /*Avoid_Underflow*/
+#ifdef Sudden_Underflow
+#ifdef IBM
+		j = 1 + 4*P - 3 - bbbits + ((bbe + bbbits - 1) & 3);
+#else
+		j = P + 1 - bbbits;
+#endif
+#else /*Sudden_Underflow*/
+		j = bbe;
+		i = j + bbbits - 1;	/* logb(rv) */
+		if (i < Emin)	/* denormal */
+			j += P - Emin;
+		else
+			j = P + 1 - bbbits;
+#endif /*Sudden_Underflow*/
+#endif /*Avoid_Underflow*/
+		bb2 += j;
+		bd2 += j;
+#ifdef Avoid_Underflow
+		bd2 += bc.scale;
+#endif
+		i = bb2 < bd2 ? bb2 : bd2;
+		if (i > bs2)
+			i = bs2;
+		if (i > 0) {
+			bb2 -= i;
+			bd2 -= i;
+			bs2 -= i;
+			}
+		if (bb5 > 0) {
+			bs = pow5mult(bs, bb5);
+			bb1 = mult(bs, bb);
+			Bfree(bb);
+			bb = bb1;
+			}
+		if (bb2 > 0)
+			bb = lshift(bb, bb2);
+		if (bd5 > 0)
+			bd = pow5mult(bd, bd5);
+		if (bd2 > 0)
+			bd = lshift(bd, bd2);
+		if (bs2 > 0)
+			bs = lshift(bs, bs2);
+		delta = diff(bb, bd);
+		bc.dsign = delta->sign;
+		delta->sign = 0;
+		i = cmp(delta, bs);
+#ifndef NO_STRTOD_BIGCOMP /*{*/
+		if (bc.nd > nd && i <= 0) {
+			if (bc.dsign) {
+				/* Must use bigcomp(). */
+				req_bigcomp = 1;
+				break;
+				}
+#ifdef Honor_FLT_ROUNDS
+			if (bc.rounding != 1) {
+				if (i < 0) {
+					req_bigcomp = 1;
+					break;
+					}
+				}
+			else
+#endif
+				i = -1;	/* Discarded digits make delta smaller. */
+			}
+#endif /*}*/
+#ifdef Honor_FLT_ROUNDS /*{*/
+		if (bc.rounding != 1) {
+			if (i < 0) {
+				/* Error is less than an ulp */
+				if (!delta->x[0] && delta->wds <= 1) {
+					/* exact */
+#ifdef SET_INEXACT
+					bc.inexact = 0;
+#endif
+					break;
+					}
+				if (bc.rounding) {
+					if (bc.dsign) {
+						adj.d = 1.;
+						goto apply_adj;
+						}
+					}
+				else if (!bc.dsign) {
+					adj.d = -1.;
+					if (!word1(&rv)
+					 && !(word0(&rv) & Frac_mask)) {
+						y = word0(&rv) & Exp_mask;
+#ifdef Avoid_Underflow
+						if (!bc.scale || y > 2*P*Exp_msk1)
+#else
+						if (y)
+#endif
+						  {
+						  delta = lshift(delta,Log2P);
+						  if (cmp(delta, bs) <= 0)
+							adj.d = -0.5;
+						  }
+						}
+ apply_adj:
+#ifdef Avoid_Underflow /*{*/
+					if (bc.scale && (y = word0(&rv) & Exp_mask)
+						<= 2*P*Exp_msk1)
+					  word0(&adj) += (2*P+1)*Exp_msk1 - y;
+#else
+#ifdef Sudden_Underflow
+					if ((word0(&rv) & Exp_mask) <=
+							P*Exp_msk1) {
+						word0(&rv) += P*Exp_msk1;
+						dval(&rv) += adj.d*ulp(dval(&rv));
+						word0(&rv) -= P*Exp_msk1;
+						}
+					else
+#endif /*Sudden_Underflow*/
+#endif /*Avoid_Underflow}*/
+					dval(&rv) += adj.d*ulp(&rv);
+					}
+				break;
+				}
+			adj.d = ratio(delta, bs);
+			if (adj.d < 1.)
+				adj.d = 1.;
+			if (adj.d <= 0x7ffffffe) {
+				/* adj = rounding ? ceil(adj) : floor(adj); */
+				y = adj.d;
+				if (y != adj.d) {
+					if (!((bc.rounding>>1) ^ bc.dsign))
+						y++;
+					adj.d = y;
+					}
+				}
+#ifdef Avoid_Underflow /*{*/
+			if (bc.scale && (y = word0(&rv) & Exp_mask) <= 2*P*Exp_msk1)
+				word0(&adj) += (2*P+1)*Exp_msk1 - y;
+#else
+#ifdef Sudden_Underflow
+			if ((word0(&rv) & Exp_mask) <= P*Exp_msk1) {
+				word0(&rv) += P*Exp_msk1;
+				adj.d *= ulp(dval(&rv));
+				if (bc.dsign)
+					dval(&rv) += adj.d;
+				else
+					dval(&rv) -= adj.d;
+				word0(&rv) -= P*Exp_msk1;
+				goto cont;
+				}
+#endif /*Sudden_Underflow*/
+#endif /*Avoid_Underflow}*/
+			adj.d *= ulp(&rv);
+			if (bc.dsign) {
+				if (word0(&rv) == Big0 && word1(&rv) == Big1)
+					goto ovfl;
+				dval(&rv) += adj.d;
+				}
+			else
+				dval(&rv) -= adj.d;
+			goto cont;
+			}
+#endif /*}Honor_FLT_ROUNDS*/
+
+		if (i < 0) {
+			/* Error is less than half an ulp -- check for
+			 * special case of mantissa a power of two.
+			 */
+			if (bc.dsign || word1(&rv) || word0(&rv) & Bndry_mask
+#ifdef IEEE_Arith /*{*/
+#ifdef Avoid_Underflow
+			 || (word0(&rv) & Exp_mask) <= (2*P+1)*Exp_msk1
+#else
+			 || (word0(&rv) & Exp_mask) <= Exp_msk1
+#endif
+#endif /*}*/
+				) {
+#ifdef SET_INEXACT
+				if (!delta->x[0] && delta->wds <= 1)
+					bc.inexact = 0;
+#endif
+				break;
+				}
+			if (!delta->x[0] && delta->wds <= 1) {
+				/* exact result */
+#ifdef SET_INEXACT
+				bc.inexact = 0;
+#endif
+				break;
+				}
+			delta = lshift(delta,Log2P);
+			if (cmp(delta, bs) > 0)
+				goto drop_down;
+			break;
+			}
+		if (i == 0) {
+			/* exactly half-way between */
+			if (bc.dsign) {
+				if ((word0(&rv) & Bndry_mask1) == Bndry_mask1
+				 &&  word1(&rv) == (
+#ifdef Avoid_Underflow
+			(bc.scale && (y = word0(&rv) & Exp_mask) <= 2*P*Exp_msk1)
+		? (0xffffffff & (0xffffffff << (2*P+1-(y>>Exp_shift)))) :
+#endif
+						   0xffffffff)) {
+					/*boundary case -- increment exponent*/
+					if (word0(&rv) == Big0 && word1(&rv) == Big1)
+						goto ovfl;
+					word0(&rv) = (word0(&rv) & Exp_mask)
+						+ Exp_msk1
+#ifdef IBM
+						| Exp_msk1 >> 4
+#endif
+						;
+					word1(&rv) = 0;
+#ifdef Avoid_Underflow
+					bc.dsign = 0;
+#endif
+					break;
+					}
+				}
+			else if (!(word0(&rv) & Bndry_mask) && !word1(&rv)) {
+ drop_down:
+				/* boundary case -- decrement exponent */
+#ifdef Sudden_Underflow /*{{*/
+				L = word0(&rv) & Exp_mask;
+#ifdef IBM
+				if (L <  Exp_msk1)
+#else
+#ifdef Avoid_Underflow
+				if (L <= (bc.scale ? (2*P+1)*Exp_msk1 : Exp_msk1))
+#else
+				if (L <= Exp_msk1)
+#endif /*Avoid_Underflow*/
+#endif /*IBM*/
+					{
+					if (bc.nd >nd) {
+						bc.uflchk = 1;
+						break;
+						}
+					goto undfl;
+					}
+				L -= Exp_msk1;
+#else /*Sudden_Underflow}{*/
+#ifdef Avoid_Underflow
+				if (bc.scale) {
+					L = word0(&rv) & Exp_mask;
+					if (L <= (2*P+1)*Exp_msk1) {
+						if (L > (P+2)*Exp_msk1)
+							/* round even ==> */
+							/* accept rv */
+							break;
+						/* rv = smallest denormal */
+						if (bc.nd >nd) {
+							bc.uflchk = 1;
+							break;
+							}
+						goto undfl;
+						}
+					}
+#endif /*Avoid_Underflow*/
+				L = (word0(&rv) & Exp_mask) - Exp_msk1;
+#endif /*Sudden_Underflow}}*/
+				word0(&rv) = L | Bndry_mask1;
+				word1(&rv) = 0xffffffff;
+#ifdef IBM
+				goto cont;
+#else
+#ifndef NO_STRTOD_BIGCOMP
+				if (bc.nd > nd)
+					goto cont;
+#endif
+				break;
+#endif
+				}
+#ifndef ROUND_BIASED
+#ifdef Avoid_Underflow
+			if (Lsb1) {
+				if (!(word0(&rv) & Lsb1))
+					break;
+				}
+			else if (!(word1(&rv) & Lsb))
+				break;
+#else
+			if (!(word1(&rv) & LSB))
+				break;
+#endif
+#endif
+			if (bc.dsign)
+#ifdef Avoid_Underflow
+				dval(&rv) += sulp(&rv, &bc);
+#else
+				dval(&rv) += ulp(&rv);
+#endif
+#ifndef ROUND_BIASED
+			else {
+#ifdef Avoid_Underflow
+				dval(&rv) -= sulp(&rv, &bc);
+#else
+				dval(&rv) -= ulp(&rv);
+#endif
+#ifndef Sudden_Underflow
+				if (!dval(&rv)) {
+					if (bc.nd >nd) {
+						bc.uflchk = 1;
+						break;
+						}
+					goto undfl;
+					}
+#endif
+				}
+#ifdef Avoid_Underflow
+			bc.dsign = 1 - bc.dsign;
+#endif
+#endif
+			break;
+			}
+		if ((aadj = ratio(delta, bs)) <= 2.) {
+			if (bc.dsign)
+				aadj = aadj1 = 1.;
+			else if (word1(&rv) || word0(&rv) & Bndry_mask) {
+#ifndef Sudden_Underflow
+				if (word1(&rv) == Tiny1 && !word0(&rv)) {
+					if (bc.nd >nd) {
+						bc.uflchk = 1;
+						break;
+						}
+					goto undfl;
+					}
+#endif
+				aadj = 1.;
+				aadj1 = -1.;
+				}
+			else {
+				/* special case -- power of FLT_RADIX to be */
+				/* rounded down... */
+
+				if (aadj < 2./FLT_RADIX)
+					aadj = 1./FLT_RADIX;
+				else
+					aadj *= 0.5;
+				aadj1 = -aadj;
+				}
+			}
+		else {
+			aadj *= 0.5;
+			aadj1 = bc.dsign ? aadj : -aadj;
+#ifdef Check_FLT_ROUNDS
+			switch(bc.rounding) {
+				case 2: /* towards +infinity */
+					aadj1 -= 0.5;
+					break;
+				case 0: /* towards 0 */
+				case 3: /* towards -infinity */
+					aadj1 += 0.5;
+				}
+#else
+			if (Flt_Rounds == 0)
+				aadj1 += 0.5;
+#endif /*Check_FLT_ROUNDS*/
+			}
+		y = word0(&rv) & Exp_mask;
+
+		/* Check for overflow */
+
+		if (y == Exp_msk1*(DBL_MAX_EXP+Bias-1)) {
+			dval(&rv0) = dval(&rv);
+			word0(&rv) -= P*Exp_msk1;
+			adj.d = aadj1 * ulp(&rv);
+			dval(&rv) += adj.d;
+			if ((word0(&rv) & Exp_mask) >=
+					Exp_msk1*(DBL_MAX_EXP+Bias-P)) {
+				if (word0(&rv0) == Big0 && word1(&rv0) == Big1)
+					goto ovfl;
+				word0(&rv) = Big0;
+				word1(&rv) = Big1;
+				goto cont;
+				}
+			else
+				word0(&rv) += P*Exp_msk1;
+			}
+		else {
+#ifdef Avoid_Underflow
+			if (bc.scale && y <= 2*P*Exp_msk1) {
+				if (aadj <= 0x7fffffff) {
+					if ((z = (ULong)aadj) <= 0)
+						z = 1;
+					aadj = z;
+					aadj1 = bc.dsign ? aadj : -aadj;
+					}
+				dval(&aadj2) = aadj1;
+				word0(&aadj2) += (2*P+1)*Exp_msk1 - y;
+				aadj1 = dval(&aadj2);
+				adj.d = aadj1 * ulp(&rv);
+				dval(&rv) += adj.d;
+				if (rv.d == 0.)
+#ifdef NO_STRTOD_BIGCOMP
+					goto undfl;
+#else
+					{
+					req_bigcomp = 1;
+					break;
+					}
+#endif
+				}
+			else {
+				adj.d = aadj1 * ulp(&rv);
+				dval(&rv) += adj.d;
+				}
+#else
+#ifdef Sudden_Underflow
+			if ((word0(&rv) & Exp_mask) <= P*Exp_msk1) {
+				dval(&rv0) = dval(&rv);
+				word0(&rv) += P*Exp_msk1;
+				adj.d = aadj1 * ulp(&rv);
+				dval(&rv) += adj.d;
+#ifdef IBM
+				if ((word0(&rv) & Exp_mask) <  P*Exp_msk1)
+#else
+				if ((word0(&rv) & Exp_mask) <= P*Exp_msk1)
+#endif
+					{
+					if (word0(&rv0) == Tiny0
+					 && word1(&rv0) == Tiny1) {
+						if (bc.nd >nd) {
+							bc.uflchk = 1;
+							break;
+							}
+						goto undfl;
+						}
+					word0(&rv) = Tiny0;
+					word1(&rv) = Tiny1;
+					goto cont;
+					}
+				else
+					word0(&rv) -= P*Exp_msk1;
+				}
+			else {
+				adj.d = aadj1 * ulp(&rv);
+				dval(&rv) += adj.d;
+				}
+#else /*Sudden_Underflow*/
+			/* Compute adj so that the IEEE rounding rules will
+			 * correctly round rv + adj in some half-way cases.
+			 * If rv * ulp(rv) is denormalized (i.e.,
+			 * y <= (P-1)*Exp_msk1), we must adjust aadj to avoid
+			 * trouble from bits lost to denormalization;
+			 * example: 1.2e-307 .
+			 */
+			if (y <= (P-1)*Exp_msk1 && aadj > 1.) {
+				aadj1 = (double)(int)(aadj + 0.5);
+				if (!bc.dsign)
+					aadj1 = -aadj1;
+				}
+			adj.d = aadj1 * ulp(&rv);
+			dval(&rv) += adj.d;
+#endif /*Sudden_Underflow*/
+#endif /*Avoid_Underflow*/
+			}
+		z = word0(&rv) & Exp_mask;
+#ifndef SET_INEXACT
+		if (bc.nd == nd) {
+#ifdef Avoid_Underflow
+		if (!bc.scale)
+#endif
+		if (y == z) {
+			/* Can we stop now? */
+			L = (Long)aadj;
+			aadj -= L;
+			/* The tolerances below are conservative. */
+			if (bc.dsign || word1(&rv) || word0(&rv) & Bndry_mask) {
+				if (aadj < .4999999 || aadj > .5000001)
+					break;
+				}
+			else if (aadj < .4999999/FLT_RADIX)
+				break;
+			}
+		}
+#endif
+ cont:
+		Bfree(bb);
+		Bfree(bd);
+		Bfree(bs);
+		Bfree(delta);
+		}
+	Bfree(bb);
+	Bfree(bd);
+	Bfree(bs);
+	Bfree(bd0);
+	Bfree(delta);
+#ifndef NO_STRTOD_BIGCOMP
+	if (req_bigcomp) {
+		bd0 = 0;
+		bc.e0 += nz1;
+		bigcomp(&rv, s0, &bc);
+		y = word0(&rv) & Exp_mask;
+		if (y == Exp_mask)
+			goto ovfl;
+		if (y == 0 && rv.d == 0.)
+			goto undfl;
+		}
+#endif
+#ifdef SET_INEXACT
+	if (bc.inexact) {
+		if (!oldinexact) {
+			word0(&rv0) = Exp_1 + (70 << Exp_shift);
+			word1(&rv0) = 0;
+			dval(&rv0) += 1.;
+			}
+		}
+	else if (!oldinexact)
+		clear_inexact();
+#endif
+#ifdef Avoid_Underflow
+	if (bc.scale) {
+		word0(&rv0) = Exp_1 - 2*P*Exp_msk1;
+		word1(&rv0) = 0;
+		dval(&rv) *= dval(&rv0);
+#ifndef NO_ERRNO
+		/* try to avoid the bug of testing an 8087 register value */
+#ifdef IEEE_Arith
+		if (!(word0(&rv) & Exp_mask))
+#else
+		if (word0(&rv) == 0 && word1(&rv) == 0)
+#endif
+			errno = ERANGE;
+#endif
+		}
+#endif /* Avoid_Underflow */
+#ifdef SET_INEXACT
+	if (bc.inexact && !(word0(&rv) & Exp_mask)) {
+		/* set underflow bit */
+		dval(&rv0) = 1e-300;
+		dval(&rv0) *= dval(&rv0);
+		}
+#endif
+ ret:
+	if (se)
+		*se = (char *)s;
+	return sign ? -dval(&rv) : dval(&rv);
+	}
+
+#ifndef MULTIPLE_THREADS
+ static char *dtoa_result;
+#endif
+
+ static char *
+#ifdef KR_headers
+rv_alloc(i) int i;
+#else
+rv_alloc(int i)
+#endif
+{
+	int j, k, *r;
+
+	j = sizeof(ULong);
+	for(k = 0;
+		sizeof(Bigint) - sizeof(ULong) - sizeof(int) + j <= (size_t)i;
+		j <<= 1)
+			k++;
+	r = (int*)Balloc(k);
+	*r = k;
+	return
+#ifndef MULTIPLE_THREADS
+	dtoa_result =
+#endif
+		(char *)(r+1);
+	}
+
+ static char *
+#ifdef KR_headers
+nrv_alloc(s, rve, n) char *s, **rve; int n;
+#else
+nrv_alloc(const char *s, char **rve, int n)
+#endif
+{
+	char *rv, *t;
+
+	t = rv = rv_alloc(n);
+	for(*t = *s++; *t; *t = *s++) t++;
+	if (rve)
+		*rve = t;
+	return rv;
+	}
+
+/* freedtoa(s) must be used to free values s returned by dtoa
+ * when MULTIPLE_THREADS is #defined.  It should be used in all cases,
+ * but for consistency with earlier versions of dtoa, it is optional
+ * when MULTIPLE_THREADS is not defined.
+ */
+
+ void
+#ifdef KR_headers
+freedtoa(s) char *s;
+#else
+freedtoa(char *s)
+#endif
+{
+	Bigint *b = (Bigint *)((int *)s - 1);
+	b->maxwds = 1 << (b->k = *(int*)b);
+	Bfree(b);
+#ifndef MULTIPLE_THREADS
+	if (s == dtoa_result)
+		dtoa_result = 0;
+#endif
+	}
+
+/* dtoa for IEEE arithmetic (dmg): convert double to ASCII string.
+ *
+ * Inspired by "How to Print Floating-Point Numbers Accurately" by
+ * Guy L. Steele, Jr. and Jon L. White [Proc. ACM SIGPLAN '90, pp. 112-126].
+ *
+ * Modifications:
+ *	1. Rather than iterating, we use a simple numeric overestimate
+ *	   to determine k = floor(log10(d)).  We scale relevant
+ *	   quantities using O(log2(k)) rather than O(k) multiplications.
+ *	2. For some modes > 2 (corresponding to ecvt and fcvt), we don't
+ *	   try to generate digits strictly left to right.  Instead, we
+ *	   compute with fewer bits and propagate the carry if necessary
+ *	   when rounding the final digit up.  This is often faster.
+ *	3. Under the assumption that input will be rounded nearest,
+ *	   mode 0 renders 1e23 as 1e23 rather than 9.999999999999999e22.
+ *	   That is, we allow equality in stopping tests when the
+ *	   round-nearest rule will give the same floating-point value
+ *	   as would satisfaction of the stopping test with strict
+ *	   inequality.
+ *	4. We remove common factors of powers of 2 from relevant
+ *	   quantities.
+ *	5. When converting floating-point integers less than 1e16,
+ *	   we use floating-point arithmetic rather than resorting
+ *	   to multiple-precision integers.
+ *	6. When asked to produce fewer than 15 digits, we first try
+ *	   to get by with floating-point arithmetic; we resort to
+ *	   multiple-precision integer arithmetic only if we cannot
+ *	   guarantee that the floating-point calculation has given
+ *	   the correctly rounded result.  For k requested digits and
+ *	   "uniformly" distributed input, the probability is
+ *	   something like 10^(k-15) that we must resort to the Long
+ *	   calculation.
+ */
+
+ char *
+dtoa
+#ifdef KR_headers
+	(dd, mode, ndigits, decpt, sign, rve)
+	double dd; int mode, ndigits, *decpt, *sign; char **rve;
+#else
+	(double dd, int mode, int ndigits, int *decpt, int *sign, char **rve)
+#endif
+{
+ /*	Arguments ndigits, decpt, sign are similar to those
+	of ecvt and fcvt; trailing zeros are suppressed from
+	the returned string.  If not null, *rve is set to point
+	to the end of the return value.  If d is +-Infinity or NaN,
+	then *decpt is set to 9999.
+
+	mode:
+		0 ==> shortest string that yields d when read in
+			and rounded to nearest.
+		1 ==> like 0, but with Steele & White stopping rule;
+			e.g. with IEEE P754 arithmetic , mode 0 gives
+			1e23 whereas mode 1 gives 9.999999999999999e22.
+		2 ==> max(1,ndigits) significant digits.  This gives a
+			return value similar to that of ecvt, except
+			that trailing zeros are suppressed.
+		3 ==> through ndigits past the decimal point.  This
+			gives a return value similar to that from fcvt,
+			except that trailing zeros are suppressed, and
+			ndigits can be negative.
+		4,5 ==> similar to 2 and 3, respectively, but (in
+			round-nearest mode) with the tests of mode 0 to
+			possibly return a shorter string that rounds to d.
+			With IEEE arithmetic and compilation with
+			-DHonor_FLT_ROUNDS, modes 4 and 5 behave the same
+			as modes 2 and 3 when FLT_ROUNDS != 1.
+		6-9 ==> Debugging modes similar to mode - 4:  don't try
+			fast floating-point estimate (if applicable).
+
+		Values of mode other than 0-9 are treated as mode 0.
+
+		Sufficient space is allocated to the return value
+		to hold the suppressed trailing zeros.
+	*/
+
+	int bbits, b2, b5, be, dig, i, ieps, ilim, ilim0, ilim1,
+		j, j1 = 0, k, k0, k_check, leftright, m2, m5, s2, s5,
+		spec_case, try_quick;
+	Long L;
+#ifndef Sudden_Underflow
+	int denorm;
+	ULong x;
+#endif
+	Bigint *b, *b1, *delta, *mlo = NULL, *mhi, *S;
+	U d2, eps, u;
+	double ds;
+	char *s, *s0;
+#ifndef No_leftright
+#ifdef IEEE_Arith
+	U eps1;
+#endif
+#endif
+#ifdef SET_INEXACT
+	int inexact, oldinexact;
+#endif
+#ifdef Honor_FLT_ROUNDS /*{*/
+	int Rounding;
+#ifdef Trust_FLT_ROUNDS /*{{ only define this if FLT_ROUNDS really works! */
+	Rounding = Flt_Rounds;
+#else /*}{*/
+	Rounding = 1;
+	switch(fegetround()) {
+	  case FE_TOWARDZERO:	Rounding = 0; break;
+	  case FE_UPWARD:	Rounding = 2; break;
+	  case FE_DOWNWARD:	Rounding = 3;
+	  }
+#endif /*}}*/
+#endif /*}*/
+
+#ifndef MULTIPLE_THREADS
+	if (dtoa_result) {
+		freedtoa(dtoa_result);
+		dtoa_result = 0;
+		}
+#endif
+
+	u.d = dd;
+	if (word0(&u) & Sign_bit) {
+		/* set sign for everything, including 0's and NaNs */
+		*sign = 1;
+		word0(&u) &= ~Sign_bit;	/* clear sign bit */
+		}
+	else
+		*sign = 0;
+
+#if defined(IEEE_Arith) + defined(VAX)
+#ifdef IEEE_Arith
+	if ((word0(&u) & Exp_mask) == Exp_mask)
+#else
+	if (word0(&u)  == 0x8000)
+#endif
+		{
+		/* Infinity or NaN */
+		*decpt = 9999;
+#ifdef IEEE_Arith
+		if (!word1(&u) && !(word0(&u) & 0xfffff))
+			return nrv_alloc("Infinity", rve, 8);
+#endif
+		return nrv_alloc("NaN", rve, 3);
+		}
+#endif
+#ifdef IBM
+	dval(&u) += 0; /* normalize */
+#endif
+	if (!dval(&u)) {
+		*decpt = 1;
+		return nrv_alloc("0", rve, 1);
+		}
+
+#ifdef SET_INEXACT
+	try_quick = oldinexact = get_inexact();
+	inexact = 1;
+#endif
+#ifdef Honor_FLT_ROUNDS
+	if (Rounding >= 2) {
+		if (*sign)
+			Rounding = Rounding == 2 ? 0 : 2;
+		else
+			if (Rounding != 2)
+				Rounding = 0;
+		}
+#endif
+
+	b = d2b(&u, &be, &bbits);
+	i = (int)(word0(&u) >> Exp_shift1 & (Exp_mask>>Exp_shift1));
+#ifndef Sudden_Underflow
+	if (i) {
+#endif
+		dval(&d2) = dval(&u);
+		word0(&d2) &= Frac_mask1;
+		word0(&d2) |= Exp_11;
+#ifdef IBM
+		if (j = 11 - hi0bits(word0(&d2) & Frac_mask))
+			dval(&d2) /= 1 << j;
+#endif
+
+		/* log(x)	~=~ log(1.5) + (x-1.5)/1.5
+		 * log10(x)	 =  log(x) / log(10)
+		 *		~=~ log(1.5)/log(10) + (x-1.5)/(1.5*log(10))
+		 * log10(d) = (i-Bias)*log(2)/log(10) + log10(d2)
+		 *
+		 * This suggests computing an approximation k to log10(d) by
+		 *
+		 * k = (i - Bias)*0.301029995663981
+		 *	+ ( (d2-1.5)*0.289529654602168 + 0.176091259055681 );
+		 *
+		 * We want k to be too large rather than too small.
+		 * The error in the first-order Taylor series approximation
+		 * is in our favor, so we just round up the constant enough
+		 * to compensate for any error in the multiplication of
+		 * (i - Bias) by 0.301029995663981; since |i - Bias| <= 1077,
+		 * and 1077 * 0.30103 * 2^-52 ~=~ 7.2e-14,
+		 * adding 1e-13 to the constant term more than suffices.
+		 * Hence we adjust the constant term to 0.1760912590558.
+		 * (We could get a more accurate k by invoking log10,
+		 *  but this is probably not worthwhile.)
+		 */
+
+		i -= Bias;
+#ifdef IBM
+		i <<= 2;
+		i += j;
+#endif
+#ifndef Sudden_Underflow
+		denorm = 0;
+		}
+	else {
+		/* d is denormalized */
+
+		i = bbits + be + (Bias + (P-1) - 1);
+		x = i > 32  ? word0(&u) << (64 - i) | word1(&u) >> (i - 32)
+			    : word1(&u) << (32 - i);
+		dval(&d2) = x;
+		word0(&d2) -= 31*Exp_msk1; /* adjust exponent */
+		i -= (Bias + (P-1) - 1) + 1;
+		denorm = 1;
+		}
+#endif
+	ds = (dval(&d2)-1.5)*0.289529654602168 + 0.1760912590558 + i*0.301029995663981;
+	k = (int)ds;
+	if (ds < 0. && ds != k)
+		k--;	/* want k = floor(ds) */
+	k_check = 1;
+	if (k >= 0 && k <= Ten_pmax) {
+		if (dval(&u) < tens[k])
+			k--;
+		k_check = 0;
+		}
+	j = bbits - i - 1;
+	if (j >= 0) {
+		b2 = 0;
+		s2 = j;
+		}
+	else {
+		b2 = -j;
+		s2 = 0;
+		}
+	if (k >= 0) {
+		b5 = 0;
+		s5 = k;
+		s2 += k;
+		}
+	else {
+		b2 -= k;
+		b5 = -k;
+		s5 = 0;
+		}
+	if (mode < 0 || mode > 9)
+		mode = 0;
+
+#ifndef SET_INEXACT
+#ifdef Check_FLT_ROUNDS
+	try_quick = Rounding == 1;
+#else
+	try_quick = 1;
+#endif
+#endif /*SET_INEXACT*/
+
+	if (mode > 5) {
+		mode -= 4;
+		try_quick = 0;
+		}
+	leftright = 1;
+	ilim = ilim1 = -1;	/* Values for cases 0 and 1; done here to */
+				/* silence erroneous "gcc -Wall" warning. */
+	switch(mode) {
+		case 0:
+		case 1:
+			i = 18;
+			ndigits = 0;
+			break;
+		case 2:
+			leftright = 0;
+			FALLTHROUGH;
+		case 4:
+			if (ndigits <= 0)
+				ndigits = 1;
+			ilim = ilim1 = i = ndigits;
+			break;
+		case 3:
+			leftright = 0;
+			FALLTHROUGH;
+		case 5:
+			i = ndigits + k + 1;
+			ilim = i;
+			ilim1 = i - 1;
+			if (i <= 0)
+				i = 1;
+		}
+	s = s0 = rv_alloc(i);
+
+#ifdef Honor_FLT_ROUNDS
+	if (mode > 1 && Rounding != 1)
+		leftright = 0;
+#endif
+
+	if (ilim >= 0 && ilim <= Quick_max && try_quick) {
+
+		/* Try to get by with floating-point arithmetic. */
+
+		i = 0;
+		dval(&d2) = dval(&u);
+		k0 = k;
+		ilim0 = ilim;
+		ieps = 2; /* conservative */
+		if (k > 0) {
+			ds = tens[k&0xf];
+			j = k >> 4;
+			if (j & Bletch) {
+				/* prevent overflows */
+				j &= Bletch - 1;
+				dval(&u) /= bigtens[n_bigtens-1];
+				ieps++;
+				}
+			for(; j; j >>= 1, i++)
+				if (j & 1) {
+					ieps++;
+					ds *= bigtens[i];
+					}
+			dval(&u) /= ds;
+			}
+		else {
+			j1 = -k;
+			if (j1) {
+				dval(&u) *= tens[j1 & 0xf];
+				for(j = j1 >> 4; j; j >>= 1, i++)
+					if (j & 1) {
+						ieps++;
+						dval(&u) *= bigtens[i];
+						}
+				}
+			}
+		if (k_check && dval(&u) < 1. && ilim > 0) {
+			if (ilim1 <= 0)
+				goto fast_failed;
+			ilim = ilim1;
+			k--;
+			dval(&u) *= 10.;
+			ieps++;
+			}
+		dval(&eps) = ieps*dval(&u) + 7.;
+		word0(&eps) -= (P-1)*Exp_msk1;
+		if (ilim == 0) {
+			S = mhi = 0;
+			dval(&u) -= 5.;
+			if (dval(&u) > dval(&eps))
+				goto one_digit;
+			if (dval(&u) < -dval(&eps))
+				goto no_digits;
+			goto fast_failed;
+			}
+#ifndef No_leftright
+		if (leftright) {
+			/* Use Steele & White method of only
+			 * generating digits needed.
+			 */
+			dval(&eps) = 0.5/tens[ilim-1] - dval(&eps);
+#ifdef IEEE_Arith
+			if (k0 < 0 && j1 >= 307) {
+				eps1.d = 1.01e256; /* 1.01 allows roundoff in the next few lines */
+				word0(&eps1) -= Exp_msk1 * (Bias+P-1);
+				dval(&eps1) *= tens[j1 & 0xf];
+				for(i = 0, j = (j1-256) >> 4; j; j >>= 1, i++)
+					if (j & 1)
+						dval(&eps1) *= bigtens[i];
+				if (eps.d < eps1.d)
+					eps.d = eps1.d;
+				}
+#endif
+			for(i = 0;;) {
+				L = dval(&u);
+				dval(&u) -= L;
+				*s++ = '0' + (int)L;
+				if (1. - dval(&u) < dval(&eps))
+					goto bump_up;
+				if (dval(&u) < dval(&eps))
+					goto ret1;
+				if (++i >= ilim)
+					break;
+				dval(&eps) *= 10.;
+				dval(&u) *= 10.;
+				}
+			}
+		else {
+#endif
+			/* Generate ilim digits, then fix them up. */
+			dval(&eps) *= tens[ilim-1];
+			for(i = 1;; i++, dval(&u) *= 10.) {
+				L = (Long)(dval(&u));
+				if (!(dval(&u) -= L))
+					ilim = i;
+				*s++ = '0' + (char)L;
+				if (i == ilim) {
+					if (dval(&u) > 0.5 + dval(&eps))
+						goto bump_up;
+					else if (dval(&u) < 0.5 - dval(&eps)) {
+						while(*--s == '0') {}
+						s++;
+						goto ret1;
+						}
+					break;
+					}
+				}
+#ifndef No_leftright
+			}
+#endif
+ fast_failed:
+		s = s0;
+		dval(&u) = dval(&d2);
+		k = k0;
+		ilim = ilim0;
+		}
+
+	/* Do we have a "small" integer? */
+
+	if (be >= 0 && k <= Int_max) {
+		/* Yes. */
+		ds = tens[k];
+		if (ndigits < 0 && ilim <= 0) {
+			S = mhi = 0;
+			if (ilim < 0 || dval(&u) <= 5*ds)
+				goto no_digits;
+			goto one_digit;
+			}
+		for(i = 1;; i++, dval(&u) *= 10.) {
+			L = (Long)(dval(&u) / ds);
+			dval(&u) -= L*ds;
+#ifdef Check_FLT_ROUNDS
+			/* If FLT_ROUNDS == 2, L will usually be high by 1 */
+			if (dval(&u) < 0) {
+				L--;
+				dval(&u) += ds;
+				}
+#endif
+			*s++ = '0' + (char)L;
+			if (!dval(&u)) {
+#ifdef SET_INEXACT
+				inexact = 0;
+#endif
+				break;
+				}
+			if (i == ilim) {
+#ifdef Honor_FLT_ROUNDS
+				if (mode > 1)
+				switch(Rounding) {
+				  case 0: goto ret1;
+				  case 2: goto bump_up;
+				  }
+#endif
+				dval(&u) += dval(&u);
+#ifdef ROUND_BIASED
+				if (dval(&u) >= ds)
+#else
+				if (dval(&u) > ds || (dval(&u) == ds && L & 1))
+#endif
+					{
+ bump_up:
+					while(*--s == '9')
+						if (s == s0) {
+							k++;
+							*s = '0';
+							break;
+							}
+					++*s++;
+					}
+				break;
+				}
+			}
+		goto ret1;
+		}
+
+	m2 = b2;
+	m5 = b5;
+	mhi = mlo = 0;
+	if (leftright) {
+		i =
+#ifndef Sudden_Underflow
+			denorm ? be + (Bias + (P-1) - 1 + 1) :
+#endif
+#ifdef IBM
+			1 + 4*P - 3 - bbits + ((bbits + be - 1) & 3);
+#else
+			1 + P - bbits;
+#endif
+		b2 += i;
+		s2 += i;
+		mhi = i2b(1);
+		}
+	if (m2 > 0 && s2 > 0) {
+		i = m2 < s2 ? m2 : s2;
+		b2 -= i;
+		m2 -= i;
+		s2 -= i;
+		}
+	if (b5 > 0) {
+		if (leftright) {
+			if (m5 > 0) {
+				mhi = pow5mult(mhi, m5);
+				b1 = mult(mhi, b);
+				Bfree(b);
+				b = b1;
+				}
+			j = b5 - m5;
+			if (j)
+				b = pow5mult(b, j);
+			}
+		else
+			b = pow5mult(b, b5);
+		}
+	S = i2b(1);
+	if (s5 > 0)
+		S = pow5mult(S, s5);
+
+	/* Check for special case that d is a normalized power of 2. */
+
+	spec_case = 0;
+	if ((mode < 2 || leftright)
+#ifdef Honor_FLT_ROUNDS
+			&& Rounding == 1
+#endif
+				) {
+		if (!word1(&u) && !(word0(&u) & Bndry_mask)
+#ifndef Sudden_Underflow
+		 && word0(&u) & (Exp_mask & ~Exp_msk1)
+#endif
+				) {
+			/* The special case */
+			b2 += Log2P;
+			s2 += Log2P;
+			spec_case = 1;
+			}
+		}
+
+	/* Arrange for convenient computation of quotients:
+	 * shift left if necessary so divisor has 4 leading 0 bits.
+	 *
+	 * Perhaps we should just compute leading 28 bits of S once
+	 * and for all and pass them and a shift to quorem, so it
+	 * can do shifts and ors to compute the numerator for q.
+	 */
+	i = dshift(S, s2);
+	b2 += i;
+	m2 += i;
+	s2 += i;
+	if (b2 > 0)
+		b = lshift(b, b2);
+	if (s2 > 0)
+		S = lshift(S, s2);
+	if (k_check) {
+		if (cmp(b,S) < 0) {
+			k--;
+			b = multadd(b, 10, 0);	/* we botched the k estimate */
+			if (leftright)
+				mhi = multadd(mhi, 10, 0);
+			ilim = ilim1;
+			}
+		}
+	if (ilim <= 0 && (mode == 3 || mode == 5)) {
+		if (ilim < 0 || cmp(b,S = multadd(S,5,0)) <= 0) {
+			/* no digits, fcvt style */
+ no_digits:
+			k = -1 - ndigits;
+			goto ret;
+			}
+ one_digit:
+		*s++ = '1';
+		k++;
+		goto ret;
+		}
+	if (leftright) {
+		if (m2 > 0)
+			mhi = lshift(mhi, m2);
+
+		/* Compute mlo -- check for special case
+		 * that d is a normalized power of 2.
+		 */
+
+		mlo = mhi;
+		if (spec_case) {
+			mhi = Balloc(mhi->k);
+			Bcopy(mhi, mlo);
+			mhi = lshift(mhi, Log2P);
+			}
+
+		for(i = 1;;i++) {
+			dig = quorem(b,S) + '0';
+			/* Do we yet have the shortest decimal string
+			 * that will round to d?
+			 */
+			j = cmp(b, mlo);
+			delta = diff(S, mhi);
+			j1 = delta->sign ? 1 : cmp(b, delta);
+			Bfree(delta);
+#ifndef ROUND_BIASED
+			if (j1 == 0 && mode != 1 && !(word1(&u) & 1)
+#ifdef Honor_FLT_ROUNDS
+				&& Rounding >= 1
+#endif
+								   ) {
+				if (dig == '9')
+					goto round_9_up;
+				if (j > 0)
+					dig++;
+#ifdef SET_INEXACT
+				else if (!b->x[0] && b->wds <= 1)
+					inexact = 0;
+#endif
+				*s++ = (char)dig;
+				goto ret;
+				}
+#endif
+			if (j < 0 || (j == 0 && mode != 1
+#ifndef ROUND_BIASED
+							&& !(word1(&u) & 1)
+#endif
+					)) {
+				if (!b->x[0] && b->wds <= 1) {
+#ifdef SET_INEXACT
+					inexact = 0;
+#endif
+					goto accept_dig;
+					}
+#ifdef Honor_FLT_ROUNDS
+				if (mode > 1)
+				 switch(Rounding) {
+				  case 0: goto accept_dig;
+				  case 2: goto keep_dig;
+				  }
+#endif /*Honor_FLT_ROUNDS*/
+				if (j1 > 0) {
+					b = lshift(b, 1);
+					j1 = cmp(b, S);
+#ifdef ROUND_BIASED
+					if (j1 >= 0 /*)*/
+#else
+					if ((j1 > 0 || (j1 == 0 && dig & 1))
+#endif
+					&& dig++ == '9')
+						goto round_9_up;
+					}
+ accept_dig:
+				*s++ = (char)dig;
+				goto ret;
+				}
+			if (j1 > 0) {
+#ifdef Honor_FLT_ROUNDS
+				if (!Rounding)
+					goto accept_dig;
+#endif
+				if (dig == '9') { /* possible if i == 1 */
+ round_9_up:
+					*s++ = '9';
+					goto roundoff;
+					}
+				*s++ = (char)dig + 1;
+				goto ret;
+				}
+#ifdef Honor_FLT_ROUNDS
+ keep_dig:
+#endif
+			*s++ = (char)dig;
+			if (i == ilim)
+				break;
+			b = multadd(b, 10, 0);
+			if (mlo == mhi)
+				mlo = mhi = multadd(mhi, 10, 0);
+			else {
+				mlo = multadd(mlo, 10, 0);
+				mhi = multadd(mhi, 10, 0);
+				}
+			}
+		}
+	else
+		for(i = 1;; i++) {
+			dig = quorem(b,S) + '0';
+			*s++ = (char)dig;
+			if (!b->x[0] && b->wds <= 1) {
+#ifdef SET_INEXACT
+				inexact = 0;
+#endif
+				goto ret;
+				}
+			if (i >= ilim)
+				break;
+			b = multadd(b, 10, 0);
+			}
+
+	/* Round off last digit */
+
+#ifdef Honor_FLT_ROUNDS
+	switch(Rounding) {
+	  case 0: goto trimzeros;
+	  case 2: goto roundoff;
+	  }
+#endif
+	b = lshift(b, 1);
+	j = cmp(b, S);
+#ifdef ROUND_BIASED
+	if (j >= 0)
+#else
+	if (j > 0 || (j == 0 && dig & 1))
+#endif
+		{
+ roundoff:
+		while(*--s == '9')
+			if (s == s0) {
+				k++;
+				*s++ = '1';
+				goto ret;
+				}
+		++*s++;
+		}
+	else {
+#ifdef Honor_FLT_ROUNDS
+ trimzeros:
+#endif
+		while(*--s == '0') {}
+		s++;
+		}
+ ret:
+	Bfree(S);
+	if (mhi) {
+		if (mlo && mlo != mhi)
+			Bfree(mlo);
+		Bfree(mhi);
+		}
+ ret1:
+#ifdef SET_INEXACT
+	if (inexact) {
+		if (!oldinexact) {
+			word0(&u) = Exp_1 + (70 << Exp_shift);
+			word1(&u) = 0;
+			dval(&u) += 1.;
+			}
+		}
+	else if (!oldinexact)
+		clear_inexact();
+#endif
+	Bfree(b);
+	*s = 0;
+	*decpt = k + 1;
+	if (rve)
+		*rve = s;
+	return s0;
+	}
+
+}  // namespace dmg_fp
diff --git a/base/third_party/dmg_fp/dtoa_wrapper.cc b/base/third_party/dmg_fp/dtoa_wrapper.cc
new file mode 100644
index 0000000..fb1ac8f
--- /dev/null
+++ b/base/third_party/dmg_fp/dtoa_wrapper.cc
@@ -0,0 +1,49 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// The purpose of this file is to supply the macro definintions necessary
+// to make third_party/dmg_fp/dtoa.cc threadsafe.
+#include "base/lazy_instance.h"
+#include "base/logging.h"
+#include "base/synchronization/lock.h"
+
+// We need two locks because they're sometimes grabbed at the same time.
+// A single lock would lead to an attempted recursive grab.
+static base::LazyInstance<base::Lock>::Leaky
+    dtoa_lock_0 = LAZY_INSTANCE_INITIALIZER;
+static base::LazyInstance<base::Lock>::Leaky
+    dtoa_lock_1 = LAZY_INSTANCE_INITIALIZER;
+
+/*
+ * This define and the code below is to trigger thread-safe behavior
+ * in dtoa.cc, per this comment from the file:
+ *
+ * #define MULTIPLE_THREADS if the system offers preemptively scheduled
+ *	multiple threads.  In this case, you must provide (or suitably
+ *	#define) two locks, acquired by ACQUIRE_DTOA_LOCK(n) and freed
+ *	by FREE_DTOA_LOCK(n) for n = 0 or 1.  (The second lock, accessed
+ *	in pow5mult, ensures lazy evaluation of only one copy of high
+ *	powers of 5; omitting this lock would introduce a small
+ *	probability of wasting memory, but would otherwise be harmless.)
+ *	You must also invoke freedtoa(s) to free the value s returned by
+ *	dtoa.  You may do so whether or not MULTIPLE_THREADS is #defined.
+ */
+#define MULTIPLE_THREADS
+
+inline static void ACQUIRE_DTOA_LOCK(size_t n) {
+  DCHECK(n < 2);
+  base::Lock* lock = n == 0 ? dtoa_lock_0.Pointer() : dtoa_lock_1.Pointer();
+  lock->Acquire();
+}
+
+inline static void FREE_DTOA_LOCK(size_t n) {
+  DCHECK(n < 2);
+  base::Lock* lock = n == 0 ? dtoa_lock_0.Pointer() : dtoa_lock_1.Pointer();
+  lock->Release();
+}
+
+#include "base/third_party/dmg_fp/dtoa.cc"
+
+#undef Bias  // Avoid windows jumbo build breakage.
+#undef Long  // To avoid breaking jni code in jumbo builds
diff --git a/base/third_party/dmg_fp/exp_length.patch b/base/third_party/dmg_fp/exp_length.patch
new file mode 100644
index 0000000..278ec17
--- /dev/null
+++ b/base/third_party/dmg_fp/exp_length.patch
@@ -0,0 +1,18 @@
+diff --git a/base/third_party/dmg_fp/dtoa.cc b/base/third_party/dmg_fp/dtoa.cc
+index c0a51c2..ab4e056 100644
+--- a/base/third_party/dmg_fp/dtoa.cc
++++ b/base/third_party/dmg_fp/dtoa.cc
+@@ -2674,8 +2674,11 @@ strtod
+ 			if (c > '0' && c <= '9') {
+ 				L = c - '0';
+ 				s1 = s;
+-				while((c = *++s) >= '0' && c <= '9')
+-					L = 10*L + c - '0';
++				while((c = *++s) >= '0' && c <= '9') {
++					if (L < (INT_MAX - 10) / 10) {
++						L = 10*L + (c - '0');
++					}
++				}
+ 				if (s - s1 > 8 || L > 19999)
+ 					/* Avoid confusion from exponents
+ 					 * so large that e might overflow.
diff --git a/base/third_party/dmg_fp/g_fmt.cc b/base/third_party/dmg_fp/g_fmt.cc
new file mode 100644
index 0000000..67c9f57
--- /dev/null
+++ b/base/third_party/dmg_fp/g_fmt.cc
@@ -0,0 +1,102 @@
+/****************************************************************
+ *
+ * The author of this software is David M. Gay.
+ *
+ * Copyright (c) 1991, 1996 by Lucent Technologies.
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose without fee is hereby granted, provided that this entire notice
+ * is included in all copies of any software which is or includes a copy
+ * or modification of this software and in all copies of the supporting
+ * documentation for such software.
+ *
+ * THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR IMPLIED
+ * WARRANTY.  IN PARTICULAR, NEITHER THE AUTHOR NOR LUCENT MAKES ANY
+ * REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE MERCHANTABILITY
+ * OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR PURPOSE.
+ *
+ ***************************************************************/
+
+/* g_fmt(buf,x) stores the closest decimal approximation to x in buf;
+ * it suffices to declare buf
+ *	char buf[32];
+ */
+
+#include "dmg_fp.h"
+
+namespace dmg_fp {
+
+ char *
+g_fmt(char *b, double x)
+{
+	int i, k;
+	char *s;
+	int decpt, j, sign;
+	char *b0, *s0, *se;
+
+	b0 = b;
+#ifdef IGNORE_ZERO_SIGN
+	if (!x) {
+		*b++ = '0';
+		*b = 0;
+		goto done;
+		}
+#endif
+	s = s0 = dtoa(x, 0, 0, &decpt, &sign, &se);
+	if (sign)
+		*b++ = '-';
+	if (decpt == 9999) /* Infinity or Nan */ {
+		for(*b = *s++; *b++; *b = *s++) {}
+		goto done0;
+		}
+	if (decpt <= -4 || decpt > se - s + 5) {
+		*b++ = *s++;
+		if (*s) {
+			*b++ = '.';
+			for(*b = *s++; *b; *b = *s++)
+				b++;
+			}
+		*b++ = 'e';
+		/* sprintf(b, "%+.2d", decpt - 1); */
+		if (--decpt < 0) {
+			*b++ = '-';
+			decpt = -decpt;
+			}
+		else
+			*b++ = '+';
+		for(j = 2, k = 10; 10*k <= decpt; j++, k *= 10) {}
+		for(;;) {
+			i = decpt / k;
+			*b++ = (char)i + '0';
+			if (--j <= 0)
+				break;
+			decpt -= i*k;
+			decpt *= 10;
+			}
+		*b = 0;
+		}
+	else if (decpt <= 0) {
+		*b++ = '.';
+		for(; decpt < 0; decpt++)
+			*b++ = '0';
+		for(*b = *s++; *b++; *b = *s++) {}
+		}
+	else {
+		for(*b = *s++; *b; *b = *s++) {
+			b++;
+			if (--decpt == 0 && *s)
+				*b++ = '.';
+			}
+		for(; decpt > 0; decpt--)
+			*b++ = '0';
+		*b = 0;
+		}
+ done0:
+	freedtoa(s0);
+#ifdef IGNORE_ZERO_SIGN
+ done:
+#endif
+	return b0;
+	}
+
+}  // namespace dmg_fp
diff --git a/base/third_party/dmg_fp/gcc_64_bit.patch b/base/third_party/dmg_fp/gcc_64_bit.patch
new file mode 100644
index 0000000..ab943c0
--- /dev/null
+++ b/base/third_party/dmg_fp/gcc_64_bit.patch
@@ -0,0 +1,25 @@
+Index: dtoa.cc
+--- dtoa.cc    (old copy)
++++ dtoa.cc    (working copy)
+@@ -183,8 +183,12 @@
+ #define NO_HEX_FP
+ 
+ #ifndef Long
++#if __LP64__
++#define Long int
++#else
+ #define Long long
+ #endif
++#endif
+ #ifndef ULong
+ typedef unsigned Long ULong;
+ #endif
+@@ -221,7 +225,7 @@ extern void *MALLOC(size_t);
+ #ifndef PRIVATE_MEM
+ #define PRIVATE_MEM 2304
+ #endif
+-#define PRIVATE_mem ((PRIVATE_MEM+sizeof(double)-1)/sizeof(double))
++#define PRIVATE_mem ((unsigned)((PRIVATE_MEM+sizeof(double)-1)/sizeof(double)))
+ static double private_mem[PRIVATE_mem], *pmem_next = private_mem;
+ #endif
+ 
diff --git a/base/third_party/dmg_fp/gcc_warnings.patch b/base/third_party/dmg_fp/gcc_warnings.patch
new file mode 100644
index 0000000..4262237
--- /dev/null
+++ b/base/third_party/dmg_fp/gcc_warnings.patch
@@ -0,0 +1,126 @@
+Index: dtoa.cc
+--- dtoa.cc    (old copy)
++++ dtoa.cc    (working copy)
+@@ -179,6 +179,9 @@
+  *	used for input more than STRTOD_DIGLIM digits long (default 40).
+  */
+ 
++#define IEEE_8087
++#define NO_HEX_FP
++
+ #ifndef Long
+ #define Long long
+ #endif
+@@ -280,9 +283,7 @@
+ #include "math.h"
+ #endif
+ 
+-#ifdef __cplusplus
+-extern "C" {
+-#endif
++namespace dmg_fp {
+ 
+ #ifndef CONST
+ #ifdef KR_headers
+@@ -511,11 +512,9 @@
+ 
+ #define Kmax 7
+ 
+-#ifdef __cplusplus
+-extern "C" double strtod(const char *s00, char **se);
+-extern "C" char *dtoa(double d, int mode, int ndigits,
++double strtod(const char *s00, char **se);
++char *dtoa(double d, int mode, int ndigits,
+ 			int *decpt, int *sign, char **rve);
+-#endif
+ 
+  struct
+ Bigint {
+@@ -1527,7 +1526,7 @@
+ #ifdef KR_headers
+ 	(sp, t) char **sp, *t;
+ #else
+-	(CONST char **sp, char *t)
++	(CONST char **sp, CONST char *t)
+ #endif
+ {
+ 	int c, d;
+@@ -2234,7 +2234,7 @@ bigcomp
+ 	nd = bc->nd;
+ 	nd0 = bc->nd0;
+ 	p5 = nd + bc->e0 - 1;
+-	speccase = 0;
++	dd = speccase = 0;
+ #ifndef Sudden_Underflow
+ 	if (rv->d == 0.) {	/* special case: value near underflow-to-zero */
+ 				/* threshold was rounded to zero */
+@@ -3431,7 +3430,7 @@
+ 
+ 	j = sizeof(ULong);
+ 	for(k = 0;
+-		sizeof(Bigint) - sizeof(ULong) - sizeof(int) + j <= i;
++		sizeof(Bigint) - sizeof(ULong) - sizeof(int) + j <= (size_t)i;
+ 		j <<= 1)
+ 			k++;
+ 	r = (int*)Balloc(k);
+@@ -3447,7 +3446,7 @@
+ #ifdef KR_headers
+ nrv_alloc(s, rve, n) char *s, **rve; int n;
+ #else
+-nrv_alloc(char *s, char **rve, int n)
++nrv_alloc(CONST char *s, char **rve, int n)
+ #endif
+ {
+ 	char *rv, *t;
+@@ -4202,6 +4201,5 @@
+ 		*rve = s;
+ 	return s0;
+ 	}
+-#ifdef __cplusplus
+-}
+-#endif
++
++}  // namespace dmg_fp
+Index: g_fmt.cc
+--- g_fmt.cc   (old copy)
++++ g_fmt.cc   (new copy)
+@@ -46,14 +46,14 @@ g_fmt(register char *b, double x)
+ 	if (sign)
+ 		*b++ = '-';
+ 	if (decpt == 9999) /* Infinity or Nan */ {
+-		while(*b++ = *s++);
++		while((*b++ = *s++));
+ 		goto done0;
+ 		}
+ 	if (decpt <= -4 || decpt > se - s + 5) {
+ 		*b++ = *s++;
+ 		if (*s) {
+ 			*b++ = '.';
+-			while(*b = *s++)
++			while((*b = *s++))
+ 				b++;
+ 			}
+ 		*b++ = 'e';
+@@ -79,10 +79,10 @@ g_fmt(register char *b, double x)
+ 		*b++ = '.';
+ 		for(; decpt < 0; decpt++)
+ 			*b++ = '0';
+-		while(*b++ = *s++);
++		while((*b++ = *s++));
+ 		}
+ 	else {
+-		while(*b = *s++) {
++		while((*b = *s++)) {
+ 			b++;
+ 			if (--decpt == 0 && *s)
+ 				*b++ = '.';
+@@ -93,7 +93,9 @@ g_fmt(register char *b, double x)
+ 		}
+  done0:
+ 	freedtoa(s0);
++#ifdef IGNORE_ZERO_SIGN
+  done:
++#endif
+ 	return b0;
+ 	}
+ 
diff --git a/base/third_party/dmg_fp/mac_wextra.patch b/base/third_party/dmg_fp/mac_wextra.patch
new file mode 100644
index 0000000..15340f2
--- /dev/null
+++ b/base/third_party/dmg_fp/mac_wextra.patch
@@ -0,0 +1,53 @@
+Index: g_fmt.cc
+===================================================================
+--- g_fmt.cc	(revision 49784)
++++ g_fmt.cc	(working copy)
+@@ -46,7 +46,7 @@
+ 	if (sign)
+ 		*b++ = '-';
+ 	if (decpt == 9999) /* Infinity or Nan */ {
+-		while((*b++ = *s++));
++		while((*b++ = *s++)) {}
+ 		goto done0;
+ 		}
+ 	if (decpt <= -4 || decpt > se - s + 5) {
+@@ -64,7 +64,7 @@
+ 			}
+ 		else
+ 			*b++ = '+';
+-		for(j = 2, k = 10; 10*k <= decpt; j++, k *= 10);
++		for(j = 2, k = 10; 10*k <= decpt; j++, k *= 10) {}
+ 		for(;;) {
+ 			i = decpt / k;
+ 			*b++ = i + '0';
+@@ -79,7 +79,7 @@
+ 		*b++ = '.';
+ 		for(; decpt < 0; decpt++)
+ 			*b++ = '0';
+-		while((*b++ = *s++));
++		while((*b++ = *s++)) {}
+ 		}
+ 	else {
+ 		while((*b = *s++)) {
+Index: dtoa.cc
+===================================================================
+--- dtoa.cc	(revision 49784)
++++ dtoa.cc	(working copy)
+@@ -3863,7 +3863,7 @@
+ 					if (dval(&u) > 0.5 + dval(&eps))
+ 						goto bump_up;
+ 					else if (dval(&u) < 0.5 - dval(&eps)) {
+-						while(*--s == '0');
++						while(*--s == '0') {}
+ 						s++;
+ 						goto ret1;
+ 						}
+@@ -4176,7 +4176,7 @@
+ #ifdef Honor_FLT_ROUNDS
+  trimzeros:
+ #endif
+-		while(*--s == '0');
++		while(*--s == '0') {}
+ 		s++;
+ 		}
+  ret:
diff --git a/base/third_party/dmg_fp/msvc_warnings.patch b/base/third_party/dmg_fp/msvc_warnings.patch
new file mode 100644
index 0000000..22e89cd
--- /dev/null
+++ b/base/third_party/dmg_fp/msvc_warnings.patch
@@ -0,0 +1,419 @@
+diff --git a/base/third_party/dmg_fp/dtoa.cc b/base/third_party/dmg_fp/dtoa.cc
+index 3312fa4..502c16c 100644
+--- a/base/third_party/dmg_fp/dtoa.cc
++++ b/base/third_party/dmg_fp/dtoa.cc
+@@ -548,8 +548,10 @@ Balloc
+ 	ACQUIRE_DTOA_LOCK(0);
+ 	/* The k > Kmax case does not need ACQUIRE_DTOA_LOCK(0), */
+ 	/* but this case seems very unlikely. */
+-	if (k <= Kmax && (rv = freelist[k]))
++	if (k <= Kmax && freelist[k]) {
++		rv = freelist[k];
+ 		freelist[k] = rv->next;
++		}
+ 	else {
+ 		x = 1 << k;
+ #ifdef Omit_Private_Memory
+@@ -650,7 +652,7 @@ multadd
+ 			Bfree(b);
+ 			b = b1;
+ 			}
+-		b->x[wds++] = carry;
++		b->x[wds++] = (ULong)carry;
+ 		b->wds = wds;
+ 		}
+ 	return b;
+@@ -834,7 +836,8 @@ mult
+ 	xc0 = c->x;
+ #ifdef ULLong
+ 	for(; xb < xbe; xc0++) {
+-		if ((y = *xb++)) {
++		y = *xb++;
++		if (y) {
+ 			x = xa;
+ 			xc = xc0;
+ 			carry = 0;
+@@ -844,7 +847,7 @@ mult
+ 				*xc++ = z & FFFFFFFF;
+ 				}
+ 				while(x < xae);
+-			*xc = carry;
++			*xc = (ULong)carry;
+ 			}
+ 		}
+ #else
+@@ -916,16 +919,19 @@ pow5mult
+ 	int i;
+ 	static int p05[3] = { 5, 25, 125 };
+ 
+-	if ((i = k & 3))
++	i = k & 3;
++	if (i)
+ 		b = multadd(b, p05[i-1], 0);
+ 
+ 	if (!(k >>= 2))
+ 		return b;
+-	if (!(p5 = p5s)) {
++	p5 = p5s;
++	if (!p5) {
+ 		/* first time */
+ #ifdef MULTIPLE_THREADS
+ 		ACQUIRE_DTOA_LOCK(1);
+-		if (!(p5 = p5s)) {
++		p5 = p5s;
++		if (!p5) {
+ 			p5 = p5s = i2b(625);
+ 			p5->next = 0;
+ 			}
+@@ -943,10 +949,12 @@ pow5mult
+ 			}
+ 		if (!(k >>= 1))
+ 			break;
+-		if (!(p51 = p5->next)) {
++		p51 = p5->next;
++		if (!p51) {
+ #ifdef MULTIPLE_THREADS
+ 			ACQUIRE_DTOA_LOCK(1);
+-			if (!(p51 = p5->next)) {
++			p51 = p5->next;
++			if (!p51) {
+ 				p51 = p5->next = mult(p5,p5);
+ 				p51->next = 0;
+ 				}
+@@ -997,7 +1005,8 @@ lshift
+ 			z = *x++ >> k1;
+ 			}
+ 			while(x < xe);
+-		if ((*x1 = z))
++		*x1 = z;
++		if (*x1)
+ 			++n1;
+ 		}
+ #else
+@@ -1299,21 +1308,25 @@ d2b
+ 	z |= Exp_msk11;
+ #endif
+ #else
+-	if ((de = (int)(d0 >> Exp_shift)))
++	de = (int)(d0 >> Exp_shift);
++	if (de)
+ 		z |= Exp_msk1;
+ #endif
+ #ifdef Pack_32
+-	if ((y = d1)) {
+-		if ((k = lo0bits(&y))) {
++	y = d1;
++	if (y) {
++		k = lo0bits(&y);
++		if (k) {
+ 			x[0] = y | z << (32 - k);
+ 			z >>= k;
+ 			}
+ 		else
+ 			x[0] = y;
++		x[1] = z;
++		b->wds = x[1] ? 2 : 1;
+ #ifndef Sudden_Underflow
+-		i =
++		i = b->wds;
+ #endif
+-		    b->wds = (x[1] = z) ? 2 : 1;
+ 		}
+ 	else {
+ 		k = lo0bits(&z);
+@@ -1498,7 +1511,7 @@ htinit(unsigned char *h, unsigned char *s, int inc)
+ {
+ 	int i, j;
+ 	for(i = 0; (j = s[i]) !=0; i++)
+-		h[j] = i + inc;
++		h[j] = (unsigned char)(i + inc);
+ 	}
+ 
+  static void
+@@ -1536,7 +1549,7 @@ match
+ 	int c, d;
+ 	CONST char *s = *sp;
+ 
+-	while((d = *t++)) {
++	for(d = *t++; d; d = *t++) {
+ 		if ((c = *++s) >= 'A' && c <= 'Z')
+ 			c += 'a' - 'A';
+ 		if (c != d)
+@@ -1566,12 +1579,13 @@ hexnan
+ 	udx0 = 1;
+ 	s = *sp;
+ 	/* allow optional initial 0x or 0X */
+-	while((c = *(CONST unsigned char*)(s+1)) && c <= ' ')
++	for(c = *(CONST unsigned char*)(s+1); c && c <= ' '; c = *(CONST unsigned char*)(s+1))
+ 		++s;
+ 	if (s[1] == '0' && (s[2] == 'x' || s[2] == 'X'))
+ 		s += 2;
+-	while((c = *(CONST unsigned char*)++s)) {
+-		if ((c1 = hexdig[c]))
++	for(c = *(CONST unsigned char*)++s; c; c = *(CONST unsigned char*)++s) {
++		c1 = hexdig[c];
++		if (c1)
+ 			c  = c1 & 0xf;
+ 		else if (c <= ' ') {
+ 			if (udx0 && havedig) {
+@@ -1594,7 +1608,8 @@ hexnan
+ 					*sp = s + 1;
+ 					break;
+ 					}
+-				} while((c = *++s));
++				c = *++s;
++				} while(c);
+ 			break;
+ 			}
+ #endif
+@@ -2328,7 +2343,8 @@ bigcomp
+ 	/* Now b/d = exactly half-way between the two floating-point values */
+ 	/* on either side of the input string.  Compute first digit of b/d. */
+ 
+-	if (!(dig = quorem(b,d))) {
++	dig = quorem(b,d);
++	if (!dig) {
+ 		b = multadd(b, 10, 0);	/* very unlikely */
+ 		dig = quorem(b,d);
+ 		}
+@@ -2336,7 +2352,8 @@ bigcomp
+ 	/* Compare b/d with s0 */
+ 
+ 	for(i = 0; i < nd0; ) {
+-		if ((dd = s0[i++] - '0' - dig))
++		dd = s0[i++] - '0' - dig;
++		if (dd)
+ 			goto ret;
+ 		if (!b->x[0] && b->wds == 1) {
+ 			if (i < nd)
+@@ -2347,7 +2364,8 @@ bigcomp
+ 		dig = quorem(b,d);
+ 		}
+ 	for(j = bc->dp1; i++ < nd;) {
+-		if ((dd = s0[j++] - '0' - dig))
++		dd = s0[j++] - '0' - dig;
++		if (dd)
+ 			goto ret;
+ 		if (!b->x[0] && b->wds == 1) {
+ 			if (i < nd)
+@@ -2747,7 +2765,8 @@ strtod
+ 	/* Get starting approximation = rv * 10**e1 */
+ 
+ 	if (e1 > 0) {
+-		if ((i = e1 & 15))
++		i = e1 & 15;
++		if (i)
+ 			dval(&rv) *= tens[i];
+ 		if (e1 &= ~15) {
+ 			if (e1 > DBL_MAX_10_EXP) {
+@@ -2805,7 +2824,8 @@ strtod
+ 		}
+ 	else if (e1 < 0) {
+ 		e1 = -e1;
+-		if ((i = e1 & 15))
++		i = e1 & 15;
++		if (i)
+ 			dval(&rv) /= tens[i];
+ 		if (e1 >>= 4) {
+ 			if (e1 >= 1 << n_bigtens)
+@@ -3283,7 +3303,7 @@ strtod
+ #ifdef Avoid_Underflow
+ 			if (bc.scale && y <= 2*P*Exp_msk1) {
+ 				if (aadj <= 0x7fffffff) {
+-					if ((z = aadj) <= 0)
++					if ((z = (ULong)aadj) <= 0)
+ 						z = 1;
+ 					aadj = z;
+ 					aadj1 = bc.dsign ? aadj : -aadj;
+@@ -3456,7 +3476,7 @@ nrv_alloc(CONST char *s, char **rve, int n)
+ 	char *rv, *t;
+ 
+ 	t = rv = rv_alloc(n);
+-	while((*t = *s++)) t++;
++	for(*t = *s++; *t; *t = *s++) t++;
+ 	if (rve)
+ 		*rve = t;
+ 	return rv;
+@@ -3569,7 +3589,7 @@ dtoa
+ 	int denorm;
+ 	ULong x;
+ #endif
+-	Bigint *b, *b1, *delta, *mlo, *mhi, *S;
++	Bigint *b, *b1, *delta, *mlo = NULL, *mhi, *S;
+ 	U d2, eps, u;
+ 	double ds;
+ 	char *s, *s0;
+@@ -3645,10 +3665,9 @@ dtoa
+ #endif
+ 
+ 	b = d2b(&u, &be, &bbits);
+-#ifdef Sudden_Underflow
+ 	i = (int)(word0(&u) >> Exp_shift1 & (Exp_mask>>Exp_shift1));
+-#else
+-	if ((i = (int)(word0(&u) >> Exp_shift1 & (Exp_mask>>Exp_shift1)))) {
++#ifndef Sudden_Underflow
++	if (i) {
+ #endif
+ 		dval(&d2) = dval(&u);
+ 		word0(&d2) &= Frac_mask1;
+@@ -3803,13 +3822,16 @@ dtoa
+ 					}
+ 			dval(&u) /= ds;
+ 			}
+-		else if ((j1 = -k)) {
+-			dval(&u) *= tens[j1 & 0xf];
+-			for(j = j1 >> 4; j; j >>= 1, i++)
+-				if (j & 1) {
+-					ieps++;
+-					dval(&u) *= bigtens[i];
+-					}
++		else {
++			j1 = -k;
++			if (j1) {
++				dval(&u) *= tens[j1 & 0xf];
++				for(j = j1 >> 4; j; j >>= 1, i++)
++					if (j & 1) {
++						ieps++;
++						dval(&u) *= bigtens[i];
++						}
++				}
+ 			}
+ 		if (k_check && dval(&u) < 1. && ilim > 0) {
+ 			if (ilim1 <= 0)
+@@ -3837,9 +3859,9 @@ dtoa
+ 			 */
+ 			dval(&eps) = 0.5/tens[ilim-1] - dval(&eps);
+ 			for(i = 0;;) {
+-				L = dval(&u);
++				L = (long)dval(&u);
+ 				dval(&u) -= L;
+-				*s++ = '0' + (int)L;
++				*s++ = '0' + (char)L;
+ 				if (dval(&u) < dval(&eps))
+ 					goto ret1;
+ 				if (1. - dval(&u) < dval(&eps))
+@@ -3858,7 +3880,7 @@ dtoa
+ 				L = (Long)(dval(&u));
+ 				if (!(dval(&u) -= L))
+ 					ilim = i;
+-				*s++ = '0' + (int)L;
++				*s++ = '0' + (char)L;
+ 				if (i == ilim) {
+ 					if (dval(&u) > 0.5 + dval(&eps))
+ 						goto bump_up;
+@@ -3901,7 +3923,7 @@ dtoa
+ 				dval(&u) += ds;
+ 				}
+ #endif
+-			*s++ = '0' + (int)L;
++			*s++ = '0' + (char)L;
+ 			if (!dval(&u)) {
+ #ifdef SET_INEXACT
+ 				inexact = 0;
+@@ -3964,7 +3986,8 @@ dtoa
+ 				Bfree(b);
+ 				b = b1;
+ 				}
+-			if ((j = b5 - m5))
++			j = b5 - m5;
++			if (j)
+ 				b = pow5mult(b, j);
+ 			}
+ 		else
+@@ -4002,7 +4025,8 @@ dtoa
+ 	 * can do shifts and ors to compute the numerator for q.
+ 	 */
+ #ifdef Pack_32
+-	if ((i = ((s5 ? 32 - hi0bits(S->x[S->wds-1]) : 1) + s2) & 0x1f))
++	i = ((s5 ? 32 - hi0bits(S->x[S->wds-1]) : 1) + s2) & 0x1f;
++	if (i)
+ 		i = 32 - i;
+ #define iInc 28
+ #else
+@@ -4077,7 +4101,7 @@ dtoa
+ 				else if (!b->x[0] && b->wds <= 1)
+ 					inexact = 0;
+ #endif
+-				*s++ = dig;
++				*s++ = (char)dig;
+ 				goto ret;
+ 				}
+ #endif
+@@ -4107,7 +4131,7 @@ dtoa
+ 						goto round_9_up;
+ 					}
+  accept_dig:
+-				*s++ = dig;
++				*s++ = (char)dig;
+ 				goto ret;
+ 				}
+ 			if (j1 > 0) {
+@@ -4120,13 +4144,13 @@ dtoa
+ 					*s++ = '9';
+ 					goto roundoff;
+ 					}
+-				*s++ = dig + 1;
++				*s++ = (char)dig + 1;
+ 				goto ret;
+ 				}
+ #ifdef Honor_FLT_ROUNDS
+  keep_dig:
+ #endif
+-			*s++ = dig;
++			*s++ = (char)dig;
+ 			if (i == ilim)
+ 				break;
+ 			b = multadd(b, 10, 0);
+@@ -4140,7 +4164,8 @@ dtoa
+ 		}
+ 	else
+ 		for(i = 1;; i++) {
+-			*s++ = dig = quorem(b,S) + '0';
++			dig = quorem(b,S) + '0';
++			*s++ = (char)dig;
+ 			if (!b->x[0] && b->wds <= 1) {
+ #ifdef SET_INEXACT
+ 				inexact = 0;
+diff --git a/base/third_party/dmg_fp/g_fmt.cc b/base/third_party/dmg_fp/g_fmt.cc
+index d864eb7..bfa358d 100644
+--- a/base/third_party/dmg_fp/g_fmt.cc
++++ b/base/third_party/dmg_fp/g_fmt.cc
+@@ -46,14 +46,14 @@ g_fmt(register char *b, double x)
+ 	if (sign)
+ 		*b++ = '-';
+ 	if (decpt == 9999) /* Infinity or Nan */ {
+-		while((*b++ = *s++)) {}
++		for(*b = *s++; *b++; *b = *s++) {}
+ 		goto done0;
+ 		}
+ 	if (decpt <= -4 || decpt > se - s + 5) {
+ 		*b++ = *s++;
+ 		if (*s) {
+ 			*b++ = '.';
+-			while((*b = *s++))
++			for(*b = *s++; *b; *b = *s++)
+ 				b++;
+ 			}
+ 		*b++ = 'e';
+@@ -67,7 +67,7 @@ g_fmt(register char *b, double x)
+ 		for(j = 2, k = 10; 10*k <= decpt; j++, k *= 10) {}
+ 		for(;;) {
+ 			i = decpt / k;
+-			*b++ = i + '0';
++			*b++ = (char)i + '0';
+ 			if (--j <= 0)
+ 				break;
+ 			decpt -= i*k;
+@@ -79,10 +79,10 @@ g_fmt(register char *b, double x)
+ 		*b++ = '.';
+ 		for(; decpt < 0; decpt++)
+ 			*b++ = '0';
+-		while((*b++ = *s++)) {}
++		for(*b = *s++; *b++; *b = *s++) {}
+ 		}
+ 	else {
+-		while((*b = *s++)) {
++		for(*b = *s++; *b; *b = *s++) {
+ 			b++;
+ 			if (--decpt == 0 && *s)
+ 				*b++ = '.';
diff --git a/base/third_party/dynamic_annotations/BUILD.gn b/base/third_party/dynamic_annotations/BUILD.gn
new file mode 100644
index 0000000..0fc4bf7
--- /dev/null
+++ b/base/third_party/dynamic_annotations/BUILD.gn
@@ -0,0 +1,27 @@
+# Copyright (c) 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+if (is_nacl) {
+  # Native client doesn't need dynamic annotations, so we provide a
+  # dummy target in order for clients to not have to special-case the
+  # dependency.
+  source_set("dynamic_annotations") {
+    sources = [
+      "dynamic_annotations.h",
+    ]
+  }
+} else {
+  # Should be static library, see documentation on //base:base for discussion.
+  static_library("dynamic_annotations") {
+    sources = [
+      "../valgrind/valgrind.h",
+      "dynamic_annotations.c",
+      "dynamic_annotations.h",
+    ]
+    if (is_android && !is_debug) {
+      configs -= [ "//build/config/compiler:default_optimization" ]
+      configs += [ "//build/config/compiler:optimize_max" ]
+    }
+  }
+}
diff --git a/base/third_party/dynamic_annotations/LICENSE b/base/third_party/dynamic_annotations/LICENSE
new file mode 100644
index 0000000..5c581a9
--- /dev/null
+++ b/base/third_party/dynamic_annotations/LICENSE
@@ -0,0 +1,28 @@
+/* Copyright (c) 2008-2009, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *     * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * ---
+ * Author: Kostya Serebryany
+ */
diff --git a/base/third_party/dynamic_annotations/README.chromium b/base/third_party/dynamic_annotations/README.chromium
new file mode 100644
index 0000000..c029f8e
--- /dev/null
+++ b/base/third_party/dynamic_annotations/README.chromium
@@ -0,0 +1,23 @@
+Name: dynamic annotations
+URL: http://code.google.com/p/data-race-test/wiki/DynamicAnnotations
+Version: 4384
+License: BSD
+
+ATTENTION: please avoid using these annotations in Chromium code.
+They were mainly intended to instruct the Valgrind-based version of
+ThreadSanitizer to handle atomic operations. The new version of ThreadSanitizer
+based on compiler instrumentation understands atomic operations out of the box,
+so normally you don't need the annotations.
+If you still think you do, please consider writing a comment at http://crbug.com/349861
+
+One header and one source file (dynamic_annotations.h and dynamic_annotations.c)
+in this directory define runtime macros useful for annotating synchronization
+utilities and benign data races so data race detectors can handle Chromium code
+with better precision.
+
+These files were taken from
+http://code.google.com/p/data-race-test/source/browse/?#svn/trunk/dynamic_annotations
+The files are covered under BSD license as described within the files.
+
+Local modifications:
+* made lineno an unsigned short (for -Wconstant-conversion warning fixes)
diff --git a/base/third_party/dynamic_annotations/dynamic_annotations.c b/base/third_party/dynamic_annotations/dynamic_annotations.c
new file mode 100644
index 0000000..4313ecc
--- /dev/null
+++ b/base/third_party/dynamic_annotations/dynamic_annotations.c
@@ -0,0 +1,269 @@
+/* Copyright (c) 2011, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *     * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifdef _MSC_VER
+# include <windows.h>
+#endif
+
+#ifdef __cplusplus
+# error "This file should be built as pure C to avoid name mangling"
+#endif
+
+#include <stdlib.h>
+#include <string.h>
+
+#include "base/third_party/dynamic_annotations/dynamic_annotations.h"
+
+#ifdef __GNUC__
+/* valgrind.h uses gcc extensions so it won't build with other compilers */
+# include "base/third_party/valgrind/valgrind.h"
+#endif
+
+/* Compiler-based ThreadSanitizer defines
+   DYNAMIC_ANNOTATIONS_EXTERNAL_IMPL = 1
+   and provides its own definitions of the functions. */
+
+#ifndef DYNAMIC_ANNOTATIONS_EXTERNAL_IMPL
+# define DYNAMIC_ANNOTATIONS_EXTERNAL_IMPL 0
+#endif
+
+/* Each function is empty and called (via a macro) only in debug mode.
+   The arguments are captured by dynamic tools at runtime. */
+
+#if DYNAMIC_ANNOTATIONS_ENABLED == 1 \
+    && DYNAMIC_ANNOTATIONS_EXTERNAL_IMPL == 0
+
+/* Identical code folding(-Wl,--icf=all) countermeasures.
+   This makes all Annotate* functions different, which prevents the linker from
+   folding them. */
+#ifdef __COUNTER__
+#define DYNAMIC_ANNOTATIONS_IMPL \
+  volatile unsigned short lineno = (__LINE__ << 8) + __COUNTER__; (void)lineno;
+#else
+#define DYNAMIC_ANNOTATIONS_IMPL \
+  volatile unsigned short lineno = (__LINE__ << 8); (void)lineno;
+#endif
+
+/* WARNING: always add new annotations to the end of the list.
+   Otherwise, lineno (see above) numbers for different Annotate* functions may
+   conflict. */
+void DYNAMIC_ANNOTATIONS_NAME(AnnotateRWLockCreate)(
+    const char *file, int line, const volatile void *lock)
+{DYNAMIC_ANNOTATIONS_IMPL}
+
+void DYNAMIC_ANNOTATIONS_NAME(AnnotateRWLockDestroy)(
+    const char *file, int line, const volatile void *lock)
+{DYNAMIC_ANNOTATIONS_IMPL}
+
+void DYNAMIC_ANNOTATIONS_NAME(AnnotateRWLockAcquired)(
+    const char *file, int line, const volatile void *lock, long is_w)
+{DYNAMIC_ANNOTATIONS_IMPL}
+
+void DYNAMIC_ANNOTATIONS_NAME(AnnotateRWLockReleased)(
+    const char *file, int line, const volatile void *lock, long is_w)
+{DYNAMIC_ANNOTATIONS_IMPL}
+
+void DYNAMIC_ANNOTATIONS_NAME(AnnotateBarrierInit)(
+    const char *file, int line, const volatile void *barrier, long count,
+    long reinitialization_allowed)
+{DYNAMIC_ANNOTATIONS_IMPL}
+
+void DYNAMIC_ANNOTATIONS_NAME(AnnotateBarrierWaitBefore)(
+    const char *file, int line, const volatile void *barrier)
+{DYNAMIC_ANNOTATIONS_IMPL}
+
+void DYNAMIC_ANNOTATIONS_NAME(AnnotateBarrierWaitAfter)(
+    const char *file, int line, const volatile void *barrier)
+{DYNAMIC_ANNOTATIONS_IMPL}
+
+void DYNAMIC_ANNOTATIONS_NAME(AnnotateBarrierDestroy)(
+    const char *file, int line, const volatile void *barrier)
+{DYNAMIC_ANNOTATIONS_IMPL}
+
+void DYNAMIC_ANNOTATIONS_NAME(AnnotateCondVarWait)(
+    const char *file, int line, const volatile void *cv,
+    const volatile void *lock)
+{DYNAMIC_ANNOTATIONS_IMPL}
+
+void DYNAMIC_ANNOTATIONS_NAME(AnnotateCondVarSignal)(
+    const char *file, int line, const volatile void *cv)
+{DYNAMIC_ANNOTATIONS_IMPL}
+
+void DYNAMIC_ANNOTATIONS_NAME(AnnotateCondVarSignalAll)(
+    const char *file, int line, const volatile void *cv)
+{DYNAMIC_ANNOTATIONS_IMPL}
+
+void DYNAMIC_ANNOTATIONS_NAME(AnnotateHappensBefore)(
+    const char *file, int line, const volatile void *obj)
+{DYNAMIC_ANNOTATIONS_IMPL};
+
+void DYNAMIC_ANNOTATIONS_NAME(AnnotateHappensAfter)(
+    const char *file, int line, const volatile void *obj)
+{DYNAMIC_ANNOTATIONS_IMPL};
+
+void DYNAMIC_ANNOTATIONS_NAME(AnnotatePublishMemoryRange)(
+    const char *file, int line, const volatile void *address, long size)
+{DYNAMIC_ANNOTATIONS_IMPL}
+
+void DYNAMIC_ANNOTATIONS_NAME(AnnotateUnpublishMemoryRange)(
+    const char *file, int line, const volatile void *address, long size)
+{DYNAMIC_ANNOTATIONS_IMPL}
+
+void DYNAMIC_ANNOTATIONS_NAME(AnnotatePCQCreate)(
+    const char *file, int line, const volatile void *pcq)
+{DYNAMIC_ANNOTATIONS_IMPL}
+
+void DYNAMIC_ANNOTATIONS_NAME(AnnotatePCQDestroy)(
+    const char *file, int line, const volatile void *pcq)
+{DYNAMIC_ANNOTATIONS_IMPL}
+
+void DYNAMIC_ANNOTATIONS_NAME(AnnotatePCQPut)(
+    const char *file, int line, const volatile void *pcq)
+{DYNAMIC_ANNOTATIONS_IMPL}
+
+void DYNAMIC_ANNOTATIONS_NAME(AnnotatePCQGet)(
+    const char *file, int line, const volatile void *pcq)
+{DYNAMIC_ANNOTATIONS_IMPL}
+
+void DYNAMIC_ANNOTATIONS_NAME(AnnotateNewMemory)(
+    const char *file, int line, const volatile void *mem, long size)
+{DYNAMIC_ANNOTATIONS_IMPL}
+
+void DYNAMIC_ANNOTATIONS_NAME(AnnotateExpectRace)(
+    const char *file, int line, const volatile void *mem,
+    const char *description)
+{DYNAMIC_ANNOTATIONS_IMPL}
+
+void DYNAMIC_ANNOTATIONS_NAME(AnnotateFlushExpectedRaces)(
+    const char *file, int line)
+{DYNAMIC_ANNOTATIONS_IMPL}
+
+void DYNAMIC_ANNOTATIONS_NAME(AnnotateBenignRace)(
+    const char *file, int line, const volatile void *mem,
+    const char *description)
+{DYNAMIC_ANNOTATIONS_IMPL}
+
+void DYNAMIC_ANNOTATIONS_NAME(AnnotateBenignRaceSized)(
+    const char *file, int line, const volatile void *mem, long size,
+    const char *description)
+{DYNAMIC_ANNOTATIONS_IMPL}
+
+void DYNAMIC_ANNOTATIONS_NAME(AnnotateMutexIsUsedAsCondVar)(
+    const char *file, int line, const volatile void *mu)
+{DYNAMIC_ANNOTATIONS_IMPL}
+
+void DYNAMIC_ANNOTATIONS_NAME(AnnotateMutexIsNotPHB)(
+    const char *file, int line, const volatile void *mu)
+{DYNAMIC_ANNOTATIONS_IMPL}
+
+void DYNAMIC_ANNOTATIONS_NAME(AnnotateTraceMemory)(
+    const char *file, int line, const volatile void *arg)
+{DYNAMIC_ANNOTATIONS_IMPL}
+
+void DYNAMIC_ANNOTATIONS_NAME(AnnotateThreadName)(
+    const char *file, int line, const char *name)
+{DYNAMIC_ANNOTATIONS_IMPL}
+
+void DYNAMIC_ANNOTATIONS_NAME(AnnotateIgnoreReadsBegin)(
+    const char *file, int line)
+{DYNAMIC_ANNOTATIONS_IMPL}
+
+void DYNAMIC_ANNOTATIONS_NAME(AnnotateIgnoreReadsEnd)(
+    const char *file, int line)
+{DYNAMIC_ANNOTATIONS_IMPL}
+
+void DYNAMIC_ANNOTATIONS_NAME(AnnotateIgnoreWritesBegin)(
+    const char *file, int line)
+{DYNAMIC_ANNOTATIONS_IMPL}
+
+void DYNAMIC_ANNOTATIONS_NAME(AnnotateIgnoreWritesEnd)(
+    const char *file, int line)
+{DYNAMIC_ANNOTATIONS_IMPL}
+
+void DYNAMIC_ANNOTATIONS_NAME(AnnotateIgnoreSyncBegin)(
+    const char *file, int line)
+{DYNAMIC_ANNOTATIONS_IMPL}
+
+void DYNAMIC_ANNOTATIONS_NAME(AnnotateIgnoreSyncEnd)(
+    const char *file, int line)
+{DYNAMIC_ANNOTATIONS_IMPL}
+
+void DYNAMIC_ANNOTATIONS_NAME(AnnotateEnableRaceDetection)(
+    const char *file, int line, int enable)
+{DYNAMIC_ANNOTATIONS_IMPL}
+
+void DYNAMIC_ANNOTATIONS_NAME(AnnotateNoOp)(
+    const char *file, int line, const volatile void *arg)
+{DYNAMIC_ANNOTATIONS_IMPL}
+
+void DYNAMIC_ANNOTATIONS_NAME(AnnotateFlushState)(
+    const char *file, int line)
+{DYNAMIC_ANNOTATIONS_IMPL}
+
+#endif  /* DYNAMIC_ANNOTATIONS_ENABLED == 1
+    && DYNAMIC_ANNOTATIONS_EXTERNAL_IMPL == 0 */
+
+#if DYNAMIC_ANNOTATIONS_PROVIDE_RUNNING_ON_VALGRIND == 1 \
+    && DYNAMIC_ANNOTATIONS_EXTERNAL_IMPL == 0
+static int GetRunningOnValgrind(void) {
+#ifdef RUNNING_ON_VALGRIND
+  if (RUNNING_ON_VALGRIND) return 1;
+#endif
+
+#ifndef _MSC_VER
+  char *running_on_valgrind_str = getenv("RUNNING_ON_VALGRIND");
+  if (running_on_valgrind_str) {
+    return strcmp(running_on_valgrind_str, "0") != 0;
+  }
+#else
+  /* Visual Studio issues warnings if we use getenv,
+   * so we use GetEnvironmentVariableA instead.
+   */
+  char value[100] = "1";
+  int res = GetEnvironmentVariableA("RUNNING_ON_VALGRIND",
+                                    value, sizeof(value));
+  /* value will remain "1" if res == 0 or res >= sizeof(value). The latter
+   * can happen only if the given value is long, in this case it can't be "0".
+   */
+  if (res > 0 && strcmp(value, "0") != 0)
+    return 1;
+#endif
+  return 0;
+}
+
+/* See the comments in dynamic_annotations.h */
+int RunningOnValgrind(void) {
+  static volatile int running_on_valgrind = -1;
+  /* C doesn't have thread-safe initialization of statics, and we
+     don't want to depend on pthread_once here, so hack it. */
+  int local_running_on_valgrind = running_on_valgrind;
+  if (local_running_on_valgrind == -1)
+    running_on_valgrind = local_running_on_valgrind = GetRunningOnValgrind();
+  return local_running_on_valgrind;
+}
+
+#endif /* DYNAMIC_ANNOTATIONS_PROVIDE_RUNNING_ON_VALGRIND == 1
+    && DYNAMIC_ANNOTATIONS_EXTERNAL_IMPL == 0 */
diff --git a/base/third_party/dynamic_annotations/dynamic_annotations.h b/base/third_party/dynamic_annotations/dynamic_annotations.h
new file mode 100644
index 0000000..8d7f052
--- /dev/null
+++ b/base/third_party/dynamic_annotations/dynamic_annotations.h
@@ -0,0 +1,595 @@
+/* Copyright (c) 2011, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *     * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/* This file defines dynamic annotations for use with dynamic analysis
+   tool such as valgrind, PIN, etc.
+
+   Dynamic annotation is a source code annotation that affects
+   the generated code (that is, the annotation is not a comment).
+   Each such annotation is attached to a particular
+   instruction and/or to a particular object (address) in the program.
+
+   The annotations that should be used by users are macros in all upper-case
+   (e.g., ANNOTATE_NEW_MEMORY).
+
+   Actual implementation of these macros may differ depending on the
+   dynamic analysis tool being used.
+
+   See http://code.google.com/p/data-race-test/  for more information.
+
+   This file supports the following dynamic analysis tools:
+   - None (DYNAMIC_ANNOTATIONS_ENABLED is not defined or zero).
+      Macros are defined empty.
+   - ThreadSanitizer, Helgrind, DRD (DYNAMIC_ANNOTATIONS_ENABLED is 1).
+      Macros are defined as calls to non-inlinable empty functions
+      that are intercepted by Valgrind. */
+
+#ifndef __DYNAMIC_ANNOTATIONS_H__
+#define __DYNAMIC_ANNOTATIONS_H__
+
+#ifndef DYNAMIC_ANNOTATIONS_PREFIX
+# define DYNAMIC_ANNOTATIONS_PREFIX
+#endif
+
+#ifndef DYNAMIC_ANNOTATIONS_PROVIDE_RUNNING_ON_VALGRIND
+# define DYNAMIC_ANNOTATIONS_PROVIDE_RUNNING_ON_VALGRIND 1
+#endif
+
+#ifdef DYNAMIC_ANNOTATIONS_WANT_ATTRIBUTE_WEAK
+# ifdef __GNUC__
+#  define DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK __attribute__((weak))
+# else
+/* TODO(glider): for Windows support we may want to change this macro in order
+   to prepend __declspec(selectany) to the annotations' declarations. */
+#  error weak annotations are not supported for your compiler
+# endif
+#else
+# define DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK
+#endif
+
+/* The following preprocessor magic prepends the value of
+   DYNAMIC_ANNOTATIONS_PREFIX to annotation function names. */
+#define DYNAMIC_ANNOTATIONS_GLUE0(A, B) A##B
+#define DYNAMIC_ANNOTATIONS_GLUE(A, B) DYNAMIC_ANNOTATIONS_GLUE0(A, B)
+#define DYNAMIC_ANNOTATIONS_NAME(name) \
+  DYNAMIC_ANNOTATIONS_GLUE(DYNAMIC_ANNOTATIONS_PREFIX, name)
+
+#ifndef DYNAMIC_ANNOTATIONS_ENABLED
+# define DYNAMIC_ANNOTATIONS_ENABLED 0
+#endif
+
+#if DYNAMIC_ANNOTATIONS_ENABLED != 0
+
+  /* -------------------------------------------------------------
+     Annotations useful when implementing condition variables such as CondVar,
+     using conditional critical sections (Await/LockWhen) and when constructing
+     user-defined synchronization mechanisms.
+
+     The annotations ANNOTATE_HAPPENS_BEFORE() and ANNOTATE_HAPPENS_AFTER() can
+     be used to define happens-before arcs in user-defined synchronization
+     mechanisms:  the race detector will infer an arc from the former to the
+     latter when they share the same argument pointer.
+
+     Example 1 (reference counting):
+
+     void Unref() {
+       ANNOTATE_HAPPENS_BEFORE(&refcount_);
+       if (AtomicDecrementByOne(&refcount_) == 0) {
+         ANNOTATE_HAPPENS_AFTER(&refcount_);
+         delete this;
+       }
+     }
+
+     Example 2 (message queue):
+
+     void MyQueue::Put(Type *e) {
+       MutexLock lock(&mu_);
+       ANNOTATE_HAPPENS_BEFORE(e);
+       PutElementIntoMyQueue(e);
+     }
+
+     Type *MyQueue::Get() {
+       MutexLock lock(&mu_);
+       Type *e = GetElementFromMyQueue();
+       ANNOTATE_HAPPENS_AFTER(e);
+       return e;
+     }
+
+     Note: when possible, please use the existing reference counting and message
+     queue implementations instead of inventing new ones. */
+
+  /* Report that wait on the condition variable at address "cv" has succeeded
+     and the lock at address "lock" is held. */
+  #define ANNOTATE_CONDVAR_LOCK_WAIT(cv, lock) \
+    DYNAMIC_ANNOTATIONS_NAME(AnnotateCondVarWait)(__FILE__, __LINE__, cv, lock)
+
+  /* Report that wait on the condition variable at "cv" has succeeded.  Variant
+     w/o lock. */
+  #define ANNOTATE_CONDVAR_WAIT(cv) \
+    DYNAMIC_ANNOTATIONS_NAME(AnnotateCondVarWait)(__FILE__, __LINE__, cv, NULL)
+
+  /* Report that we are about to signal on the condition variable at address
+     "cv". */
+  #define ANNOTATE_CONDVAR_SIGNAL(cv) \
+    DYNAMIC_ANNOTATIONS_NAME(AnnotateCondVarSignal)(__FILE__, __LINE__, cv)
+
+  /* Report that we are about to signal_all on the condition variable at address
+     "cv". */
+  #define ANNOTATE_CONDVAR_SIGNAL_ALL(cv) \
+    DYNAMIC_ANNOTATIONS_NAME(AnnotateCondVarSignalAll)(__FILE__, __LINE__, cv)
+
+  /* Annotations for user-defined synchronization mechanisms. */
+  #define ANNOTATE_HAPPENS_BEFORE(obj) \
+    DYNAMIC_ANNOTATIONS_NAME(AnnotateHappensBefore)(__FILE__, __LINE__, obj)
+  #define ANNOTATE_HAPPENS_AFTER(obj) \
+    DYNAMIC_ANNOTATIONS_NAME(AnnotateHappensAfter)(__FILE__, __LINE__, obj)
+
+  /* DEPRECATED. Don't use it. */
+  #define ANNOTATE_PUBLISH_MEMORY_RANGE(pointer, size) \
+    DYNAMIC_ANNOTATIONS_NAME(AnnotatePublishMemoryRange)(__FILE__, __LINE__, \
+        pointer, size)
+
+  /* DEPRECATED. Don't use it. */
+  #define ANNOTATE_UNPUBLISH_MEMORY_RANGE(pointer, size) \
+    DYNAMIC_ANNOTATIONS_NAME(AnnotateUnpublishMemoryRange)(__FILE__, __LINE__, \
+        pointer, size)
+
+  /* DEPRECATED. Don't use it. */
+  #define ANNOTATE_SWAP_MEMORY_RANGE(pointer, size)   \
+    do {                                              \
+      ANNOTATE_UNPUBLISH_MEMORY_RANGE(pointer, size); \
+      ANNOTATE_PUBLISH_MEMORY_RANGE(pointer, size);   \
+    } while (0)
+
+  /* Instruct the tool to create a happens-before arc between mu->Unlock() and
+     mu->Lock(). This annotation may slow down the race detector and hide real
+     races. Normally it is used only when it would be difficult to annotate each
+     of the mutex's critical sections individually using the annotations above.
+     This annotation makes sense only for hybrid race detectors. For pure
+     happens-before detectors this is a no-op. For more details see
+     http://code.google.com/p/data-race-test/wiki/PureHappensBeforeVsHybrid . */
+  #define ANNOTATE_PURE_HAPPENS_BEFORE_MUTEX(mu) \
+    DYNAMIC_ANNOTATIONS_NAME(AnnotateMutexIsUsedAsCondVar)(__FILE__, __LINE__, \
+        mu)
+
+  /* Opposite to ANNOTATE_PURE_HAPPENS_BEFORE_MUTEX.
+     Instruct the tool to NOT create h-b arcs between Unlock and Lock, even in
+     pure happens-before mode. For a hybrid mode this is a no-op. */
+  #define ANNOTATE_NOT_HAPPENS_BEFORE_MUTEX(mu) \
+    DYNAMIC_ANNOTATIONS_NAME(AnnotateMutexIsNotPHB)(__FILE__, __LINE__, mu)
+
+  /* Deprecated. Use ANNOTATE_PURE_HAPPENS_BEFORE_MUTEX. */
+  #define ANNOTATE_MUTEX_IS_USED_AS_CONDVAR(mu) \
+    DYNAMIC_ANNOTATIONS_NAME(AnnotateMutexIsUsedAsCondVar)(__FILE__, __LINE__, \
+        mu)
+
+  /* -------------------------------------------------------------
+     Annotations useful when defining memory allocators, or when memory that
+     was protected in one way starts to be protected in another. */
+
+  /* Report that a new memory at "address" of size "size" has been allocated.
+     This might be used when the memory has been retrieved from a free list and
+     is about to be reused, or when a the locking discipline for a variable
+     changes. */
+  #define ANNOTATE_NEW_MEMORY(address, size) \
+    DYNAMIC_ANNOTATIONS_NAME(AnnotateNewMemory)(__FILE__, __LINE__, address, \
+        size)
+
+  /* -------------------------------------------------------------
+     Annotations useful when defining FIFO queues that transfer data between
+     threads. */
+
+  /* Report that the producer-consumer queue (such as ProducerConsumerQueue) at
+     address "pcq" has been created.  The ANNOTATE_PCQ_* annotations
+     should be used only for FIFO queues.  For non-FIFO queues use
+     ANNOTATE_HAPPENS_BEFORE (for put) and ANNOTATE_HAPPENS_AFTER (for get). */
+  #define ANNOTATE_PCQ_CREATE(pcq) \
+    DYNAMIC_ANNOTATIONS_NAME(AnnotatePCQCreate)(__FILE__, __LINE__, pcq)
+
+  /* Report that the queue at address "pcq" is about to be destroyed. */
+  #define ANNOTATE_PCQ_DESTROY(pcq) \
+    DYNAMIC_ANNOTATIONS_NAME(AnnotatePCQDestroy)(__FILE__, __LINE__, pcq)
+
+  /* Report that we are about to put an element into a FIFO queue at address
+     "pcq". */
+  #define ANNOTATE_PCQ_PUT(pcq) \
+    DYNAMIC_ANNOTATIONS_NAME(AnnotatePCQPut)(__FILE__, __LINE__, pcq)
+
+  /* Report that we've just got an element from a FIFO queue at address
+     "pcq". */
+  #define ANNOTATE_PCQ_GET(pcq) \
+    DYNAMIC_ANNOTATIONS_NAME(AnnotatePCQGet)(__FILE__, __LINE__, pcq)
+
+  /* -------------------------------------------------------------
+     Annotations that suppress errors.  It is usually better to express the
+     program's synchronization using the other annotations, but these can
+     be used when all else fails. */
+
+  /* Report that we may have a benign race at "pointer", with size
+     "sizeof(*(pointer))". "pointer" must be a non-void* pointer.  Insert at the
+     point where "pointer" has been allocated, preferably close to the point
+     where the race happens.  See also ANNOTATE_BENIGN_RACE_STATIC. */
+  #define ANNOTATE_BENIGN_RACE(pointer, description) \
+    DYNAMIC_ANNOTATIONS_NAME(AnnotateBenignRaceSized)(__FILE__, __LINE__, \
+        pointer, sizeof(*(pointer)), description)
+
+  /* Same as ANNOTATE_BENIGN_RACE(address, description), but applies to
+     the memory range [address, address+size). */
+  #define ANNOTATE_BENIGN_RACE_SIZED(address, size, description) \
+    DYNAMIC_ANNOTATIONS_NAME(AnnotateBenignRaceSized)(__FILE__, __LINE__, \
+        address, size, description)
+
+  /* Request the analysis tool to ignore all reads in the current thread
+     until ANNOTATE_IGNORE_READS_END is called.
+     Useful to ignore intentional racey reads, while still checking
+     other reads and all writes.
+     See also ANNOTATE_UNPROTECTED_READ. */
+  #define ANNOTATE_IGNORE_READS_BEGIN() \
+    DYNAMIC_ANNOTATIONS_NAME(AnnotateIgnoreReadsBegin)(__FILE__, __LINE__)
+
+  /* Stop ignoring reads. */
+  #define ANNOTATE_IGNORE_READS_END() \
+    DYNAMIC_ANNOTATIONS_NAME(AnnotateIgnoreReadsEnd)(__FILE__, __LINE__)
+
+  /* Similar to ANNOTATE_IGNORE_READS_BEGIN, but ignore writes. */
+  #define ANNOTATE_IGNORE_WRITES_BEGIN() \
+    DYNAMIC_ANNOTATIONS_NAME(AnnotateIgnoreWritesBegin)(__FILE__, __LINE__)
+
+  /* Stop ignoring writes. */
+  #define ANNOTATE_IGNORE_WRITES_END() \
+    DYNAMIC_ANNOTATIONS_NAME(AnnotateIgnoreWritesEnd)(__FILE__, __LINE__)
+
+  /* Start ignoring all memory accesses (reads and writes). */
+  #define ANNOTATE_IGNORE_READS_AND_WRITES_BEGIN() \
+    do {\
+      ANNOTATE_IGNORE_READS_BEGIN();\
+      ANNOTATE_IGNORE_WRITES_BEGIN();\
+    }while(0)\
+
+  /* Stop ignoring all memory accesses. */
+  #define ANNOTATE_IGNORE_READS_AND_WRITES_END() \
+    do {\
+      ANNOTATE_IGNORE_WRITES_END();\
+      ANNOTATE_IGNORE_READS_END();\
+    }while(0)\
+
+  /* Similar to ANNOTATE_IGNORE_READS_BEGIN, but ignore synchronization events:
+     RWLOCK* and CONDVAR*. */
+  #define ANNOTATE_IGNORE_SYNC_BEGIN() \
+    DYNAMIC_ANNOTATIONS_NAME(AnnotateIgnoreSyncBegin)(__FILE__, __LINE__)
+
+  /* Stop ignoring sync events. */
+  #define ANNOTATE_IGNORE_SYNC_END() \
+    DYNAMIC_ANNOTATIONS_NAME(AnnotateIgnoreSyncEnd)(__FILE__, __LINE__)
+
+
+  /* Enable (enable!=0) or disable (enable==0) race detection for all threads.
+     This annotation could be useful if you want to skip expensive race analysis
+     during some period of program execution, e.g. during initialization. */
+  #define ANNOTATE_ENABLE_RACE_DETECTION(enable) \
+    DYNAMIC_ANNOTATIONS_NAME(AnnotateEnableRaceDetection)(__FILE__, __LINE__, \
+        enable)
+
+  /* -------------------------------------------------------------
+     Annotations useful for debugging. */
+
+  /* Request to trace every access to "address". */
+  #define ANNOTATE_TRACE_MEMORY(address) \
+    DYNAMIC_ANNOTATIONS_NAME(AnnotateTraceMemory)(__FILE__, __LINE__, address)
+
+  /* Report the current thread name to a race detector. */
+  #define ANNOTATE_THREAD_NAME(name) \
+    DYNAMIC_ANNOTATIONS_NAME(AnnotateThreadName)(__FILE__, __LINE__, name)
+
+  /* -------------------------------------------------------------
+     Annotations useful when implementing locks.  They are not
+     normally needed by modules that merely use locks.
+     The "lock" argument is a pointer to the lock object. */
+
+  /* Report that a lock has been created at address "lock". */
+  #define ANNOTATE_RWLOCK_CREATE(lock) \
+    DYNAMIC_ANNOTATIONS_NAME(AnnotateRWLockCreate)(__FILE__, __LINE__, lock)
+
+  /* Report that the lock at address "lock" is about to be destroyed. */
+  #define ANNOTATE_RWLOCK_DESTROY(lock) \
+    DYNAMIC_ANNOTATIONS_NAME(AnnotateRWLockDestroy)(__FILE__, __LINE__, lock)
+
+  /* Report that the lock at address "lock" has been acquired.
+     is_w=1 for writer lock, is_w=0 for reader lock. */
+  #define ANNOTATE_RWLOCK_ACQUIRED(lock, is_w) \
+    DYNAMIC_ANNOTATIONS_NAME(AnnotateRWLockAcquired)(__FILE__, __LINE__, lock, \
+        is_w)
+
+  /* Report that the lock at address "lock" is about to be released. */
+  #define ANNOTATE_RWLOCK_RELEASED(lock, is_w) \
+    DYNAMIC_ANNOTATIONS_NAME(AnnotateRWLockReleased)(__FILE__, __LINE__, lock, \
+        is_w)
+
+  /* -------------------------------------------------------------
+     Annotations useful when implementing barriers.  They are not
+     normally needed by modules that merely use barriers.
+     The "barrier" argument is a pointer to the barrier object. */
+
+  /* Report that the "barrier" has been initialized with initial "count".
+   If 'reinitialization_allowed' is true, initialization is allowed to happen
+   multiple times w/o calling barrier_destroy() */
+  #define ANNOTATE_BARRIER_INIT(barrier, count, reinitialization_allowed) \
+    DYNAMIC_ANNOTATIONS_NAME(AnnotateBarrierInit)(__FILE__, __LINE__, barrier, \
+        count, reinitialization_allowed)
+
+  /* Report that we are about to enter barrier_wait("barrier"). */
+  #define ANNOTATE_BARRIER_WAIT_BEFORE(barrier) \
+    DYNAMIC_ANNOTATIONS_NAME(AnnotateBarrierWaitBefore)(__FILE__, __LINE__, \
+        barrier)
+
+  /* Report that we just exited barrier_wait("barrier"). */
+  #define ANNOTATE_BARRIER_WAIT_AFTER(barrier) \
+    DYNAMIC_ANNOTATIONS_NAME(AnnotateBarrierWaitAfter)(__FILE__, __LINE__, \
+        barrier)
+
+  /* Report that the "barrier" has been destroyed. */
+  #define ANNOTATE_BARRIER_DESTROY(barrier) \
+    DYNAMIC_ANNOTATIONS_NAME(AnnotateBarrierDestroy)(__FILE__, __LINE__, \
+        barrier)
+
+  /* -------------------------------------------------------------
+     Annotations useful for testing race detectors. */
+
+  /* Report that we expect a race on the variable at "address".
+     Use only in unit tests for a race detector. */
+  #define ANNOTATE_EXPECT_RACE(address, description) \
+    DYNAMIC_ANNOTATIONS_NAME(AnnotateExpectRace)(__FILE__, __LINE__, address, \
+        description)
+
+  #define ANNOTATE_FLUSH_EXPECTED_RACES() \
+    DYNAMIC_ANNOTATIONS_NAME(AnnotateFlushExpectedRaces)(__FILE__, __LINE__)
+
+  /* A no-op. Insert where you like to test the interceptors. */
+  #define ANNOTATE_NO_OP(arg) \
+    DYNAMIC_ANNOTATIONS_NAME(AnnotateNoOp)(__FILE__, __LINE__, arg)
+
+  /* Force the race detector to flush its state. The actual effect depends on
+   * the implementation of the detector. */
+  #define ANNOTATE_FLUSH_STATE() \
+    DYNAMIC_ANNOTATIONS_NAME(AnnotateFlushState)(__FILE__, __LINE__)
+
+
+#else  /* DYNAMIC_ANNOTATIONS_ENABLED == 0 */
+
+  #define ANNOTATE_RWLOCK_CREATE(lock) /* empty */
+  #define ANNOTATE_RWLOCK_DESTROY(lock) /* empty */
+  #define ANNOTATE_RWLOCK_ACQUIRED(lock, is_w) /* empty */
+  #define ANNOTATE_RWLOCK_RELEASED(lock, is_w) /* empty */
+  #define ANNOTATE_BARRIER_INIT(barrier, count, reinitialization_allowed) /* */
+  #define ANNOTATE_BARRIER_WAIT_BEFORE(barrier) /* empty */
+  #define ANNOTATE_BARRIER_WAIT_AFTER(barrier) /* empty */
+  #define ANNOTATE_BARRIER_DESTROY(barrier) /* empty */
+  #define ANNOTATE_CONDVAR_LOCK_WAIT(cv, lock) /* empty */
+  #define ANNOTATE_CONDVAR_WAIT(cv) /* empty */
+  #define ANNOTATE_CONDVAR_SIGNAL(cv) /* empty */
+  #define ANNOTATE_CONDVAR_SIGNAL_ALL(cv) /* empty */
+  #define ANNOTATE_HAPPENS_BEFORE(obj) /* empty */
+  #define ANNOTATE_HAPPENS_AFTER(obj) /* empty */
+  #define ANNOTATE_PUBLISH_MEMORY_RANGE(address, size) /* empty */
+  #define ANNOTATE_UNPUBLISH_MEMORY_RANGE(address, size)  /* empty */
+  #define ANNOTATE_SWAP_MEMORY_RANGE(address, size)  /* empty */
+  #define ANNOTATE_PCQ_CREATE(pcq) /* empty */
+  #define ANNOTATE_PCQ_DESTROY(pcq) /* empty */
+  #define ANNOTATE_PCQ_PUT(pcq) /* empty */
+  #define ANNOTATE_PCQ_GET(pcq) /* empty */
+  #define ANNOTATE_NEW_MEMORY(address, size) /* empty */
+  #define ANNOTATE_EXPECT_RACE(address, description) /* empty */
+  #define ANNOTATE_FLUSH_EXPECTED_RACES(address, description) /* empty */
+  #define ANNOTATE_BENIGN_RACE(address, description) /* empty */
+  #define ANNOTATE_BENIGN_RACE_SIZED(address, size, description) /* empty */
+  #define ANNOTATE_PURE_HAPPENS_BEFORE_MUTEX(mu) /* empty */
+  #define ANNOTATE_MUTEX_IS_USED_AS_CONDVAR(mu) /* empty */
+  #define ANNOTATE_TRACE_MEMORY(arg) /* empty */
+  #define ANNOTATE_THREAD_NAME(name) /* empty */
+  #define ANNOTATE_IGNORE_READS_BEGIN() /* empty */
+  #define ANNOTATE_IGNORE_READS_END() /* empty */
+  #define ANNOTATE_IGNORE_WRITES_BEGIN() /* empty */
+  #define ANNOTATE_IGNORE_WRITES_END() /* empty */
+  #define ANNOTATE_IGNORE_READS_AND_WRITES_BEGIN() /* empty */
+  #define ANNOTATE_IGNORE_READS_AND_WRITES_END() /* empty */
+  #define ANNOTATE_IGNORE_SYNC_BEGIN() /* empty */
+  #define ANNOTATE_IGNORE_SYNC_END() /* empty */
+  #define ANNOTATE_ENABLE_RACE_DETECTION(enable) /* empty */
+  #define ANNOTATE_NO_OP(arg) /* empty */
+  #define ANNOTATE_FLUSH_STATE() /* empty */
+
+#endif  /* DYNAMIC_ANNOTATIONS_ENABLED */
+
+/* Use the macros above rather than using these functions directly. */
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+
+void DYNAMIC_ANNOTATIONS_NAME(AnnotateRWLockCreate)(
+    const char *file, int line,
+    const volatile void *lock) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK;
+void DYNAMIC_ANNOTATIONS_NAME(AnnotateRWLockDestroy)(
+    const char *file, int line,
+    const volatile void *lock) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK;
+void DYNAMIC_ANNOTATIONS_NAME(AnnotateRWLockAcquired)(
+    const char *file, int line,
+    const volatile void *lock, long is_w) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK;
+void DYNAMIC_ANNOTATIONS_NAME(AnnotateRWLockReleased)(
+    const char *file, int line,
+    const volatile void *lock, long is_w) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK;
+void DYNAMIC_ANNOTATIONS_NAME(AnnotateBarrierInit)(
+    const char *file, int line, const volatile void *barrier, long count,
+    long reinitialization_allowed) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK;
+void DYNAMIC_ANNOTATIONS_NAME(AnnotateBarrierWaitBefore)(
+    const char *file, int line,
+    const volatile void *barrier) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK;
+void DYNAMIC_ANNOTATIONS_NAME(AnnotateBarrierWaitAfter)(
+    const char *file, int line,
+    const volatile void *barrier) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK;
+void DYNAMIC_ANNOTATIONS_NAME(AnnotateBarrierDestroy)(
+    const char *file, int line,
+    const volatile void *barrier) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK;
+void DYNAMIC_ANNOTATIONS_NAME(AnnotateCondVarWait)(
+    const char *file, int line, const volatile void *cv,
+    const volatile void *lock) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK;
+void DYNAMIC_ANNOTATIONS_NAME(AnnotateCondVarSignal)(
+    const char *file, int line,
+    const volatile void *cv) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK;
+void DYNAMIC_ANNOTATIONS_NAME(AnnotateCondVarSignalAll)(
+    const char *file, int line,
+    const volatile void *cv) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK;
+void DYNAMIC_ANNOTATIONS_NAME(AnnotateHappensBefore)(
+    const char *file, int line,
+    const volatile void *obj) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK;
+void DYNAMIC_ANNOTATIONS_NAME(AnnotateHappensAfter)(
+    const char *file, int line,
+    const volatile void *obj) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK;
+void DYNAMIC_ANNOTATIONS_NAME(AnnotatePublishMemoryRange)(
+    const char *file, int line,
+    const volatile void *address, long size) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK;
+void DYNAMIC_ANNOTATIONS_NAME(AnnotateUnpublishMemoryRange)(
+    const char *file, int line,
+    const volatile void *address, long size) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK;
+void DYNAMIC_ANNOTATIONS_NAME(AnnotatePCQCreate)(
+    const char *file, int line,
+    const volatile void *pcq) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK;
+void DYNAMIC_ANNOTATIONS_NAME(AnnotatePCQDestroy)(
+    const char *file, int line,
+    const volatile void *pcq) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK;
+void DYNAMIC_ANNOTATIONS_NAME(AnnotatePCQPut)(
+    const char *file, int line,
+    const volatile void *pcq) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK;
+void DYNAMIC_ANNOTATIONS_NAME(AnnotatePCQGet)(
+    const char *file, int line,
+    const volatile void *pcq) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK;
+void DYNAMIC_ANNOTATIONS_NAME(AnnotateNewMemory)(
+    const char *file, int line,
+    const volatile void *mem, long size) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK;
+void DYNAMIC_ANNOTATIONS_NAME(AnnotateExpectRace)(
+    const char *file, int line, const volatile void *mem,
+    const char *description) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK;
+void DYNAMIC_ANNOTATIONS_NAME(AnnotateFlushExpectedRaces)(
+    const char *file, int line) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK;
+void DYNAMIC_ANNOTATIONS_NAME(AnnotateBenignRace)(
+    const char *file, int line, const volatile void *mem,
+    const char *description) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK;
+void DYNAMIC_ANNOTATIONS_NAME(AnnotateBenignRaceSized)(
+    const char *file, int line, const volatile void *mem, long size,
+    const char *description) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK;
+void DYNAMIC_ANNOTATIONS_NAME(AnnotateMutexIsUsedAsCondVar)(
+    const char *file, int line,
+    const volatile void *mu) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK;
+void DYNAMIC_ANNOTATIONS_NAME(AnnotateMutexIsNotPHB)(
+    const char *file, int line,
+    const volatile void *mu) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK;
+void DYNAMIC_ANNOTATIONS_NAME(AnnotateTraceMemory)(
+    const char *file, int line,
+    const volatile void *arg) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK;
+void DYNAMIC_ANNOTATIONS_NAME(AnnotateThreadName)(
+    const char *file, int line,
+    const char *name) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK;
+void DYNAMIC_ANNOTATIONS_NAME(AnnotateIgnoreReadsBegin)(
+    const char *file, int line) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK;
+void DYNAMIC_ANNOTATIONS_NAME(AnnotateIgnoreReadsEnd)(
+    const char *file, int line) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK;
+void DYNAMIC_ANNOTATIONS_NAME(AnnotateIgnoreWritesBegin)(
+    const char *file, int line) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK;
+void DYNAMIC_ANNOTATIONS_NAME(AnnotateIgnoreWritesEnd)(
+    const char *file, int line) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK;
+void DYNAMIC_ANNOTATIONS_NAME(AnnotateIgnoreSyncBegin)(
+    const char *file, int line) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK;
+void DYNAMIC_ANNOTATIONS_NAME(AnnotateIgnoreSyncEnd)(
+    const char *file, int line) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK;
+void DYNAMIC_ANNOTATIONS_NAME(AnnotateEnableRaceDetection)(
+    const char *file, int line, int enable) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK;
+void DYNAMIC_ANNOTATIONS_NAME(AnnotateNoOp)(
+    const char *file, int line,
+    const volatile void *arg) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK;
+void DYNAMIC_ANNOTATIONS_NAME(AnnotateFlushState)(
+    const char *file, int line) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK;
+
+#if DYNAMIC_ANNOTATIONS_PROVIDE_RUNNING_ON_VALGRIND == 1
+/* Return non-zero value if running under valgrind.
+
+  If "valgrind.h" is included into dynamic_annotations.c,
+  the regular valgrind mechanism will be used.
+  See http://valgrind.org/docs/manual/manual-core-adv.html about
+  RUNNING_ON_VALGRIND and other valgrind "client requests".
+  The file "valgrind.h" may be obtained by doing
+     svn co svn://svn.valgrind.org/valgrind/trunk/include
+
+  If for some reason you can't use "valgrind.h" or want to fake valgrind,
+  there are two ways to make this function return non-zero:
+    - Use environment variable: export RUNNING_ON_VALGRIND=1
+    - Make your tool intercept the function RunningOnValgrind() and
+      change its return value.
+ */
+int RunningOnValgrind(void) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK;
+#endif /* DYNAMIC_ANNOTATIONS_PROVIDE_RUNNING_ON_VALGRIND == 1 */
+
+#ifdef __cplusplus
+}
+#endif
+
+#if DYNAMIC_ANNOTATIONS_ENABLED != 0 && defined(__cplusplus)
+
+  /* ANNOTATE_UNPROTECTED_READ is the preferred way to annotate racey reads.
+
+     Instead of doing
+        ANNOTATE_IGNORE_READS_BEGIN();
+        ... = x;
+        ANNOTATE_IGNORE_READS_END();
+     one can use
+        ... = ANNOTATE_UNPROTECTED_READ(x); */
+  template <class T>
+  inline T ANNOTATE_UNPROTECTED_READ(const volatile T &x) {
+    ANNOTATE_IGNORE_READS_BEGIN();
+    T res = x;
+    ANNOTATE_IGNORE_READS_END();
+    return res;
+  }
+  /* Apply ANNOTATE_BENIGN_RACE_SIZED to a static variable. */
+  #define ANNOTATE_BENIGN_RACE_STATIC(static_var, description)        \
+    namespace {                                                       \
+      class static_var ## _annotator {                                \
+       public:                                                        \
+        static_var ## _annotator() {                                  \
+          ANNOTATE_BENIGN_RACE_SIZED(&static_var,                     \
+                                      sizeof(static_var),             \
+            # static_var ": " description);                           \
+        }                                                             \
+      };                                                              \
+      static static_var ## _annotator the ## static_var ## _annotator;\
+    }
+#else /* DYNAMIC_ANNOTATIONS_ENABLED == 0 */
+
+  #define ANNOTATE_UNPROTECTED_READ(x) (x)
+  #define ANNOTATE_BENIGN_RACE_STATIC(static_var, description)  /* empty */
+
+#endif /* DYNAMIC_ANNOTATIONS_ENABLED */
+
+#endif  /* __DYNAMIC_ANNOTATIONS_H__ */
diff --git a/base/third_party/icu/LICENSE b/base/third_party/icu/LICENSE
new file mode 100644
index 0000000..2882e4e
--- /dev/null
+++ b/base/third_party/icu/LICENSE
@@ -0,0 +1,76 @@
+COPYRIGHT AND PERMISSION NOTICE (ICU 58 and later)
+
+Copyright © 1991-2017 Unicode, Inc. All rights reserved.
+Distributed under the Terms of Use in http://www.unicode.org/copyright.html
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of the Unicode data files and any associated documentation
+(the "Data Files") or Unicode software and any associated documentation
+(the "Software") to deal in the Data Files or Software
+without restriction, including without limitation the rights to use,
+copy, modify, merge, publish, distribute, and/or sell copies of
+the Data Files or Software, and to permit persons to whom the Data Files
+or Software are furnished to do so, provided that either
+(a) this copyright and permission notice appear with all copies
+of the Data Files or Software, or
+(b) this copyright and permission notice appear in associated
+Documentation.
+
+THE DATA FILES AND SOFTWARE ARE PROVIDED "AS IS", WITHOUT WARRANTY OF
+ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
+WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+NONINFRINGEMENT OF THIRD PARTY RIGHTS.
+IN NO EVENT SHALL THE COPYRIGHT HOLDER OR HOLDERS INCLUDED IN THIS
+NOTICE BE LIABLE FOR ANY CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL
+DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
+DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+PERFORMANCE OF THE DATA FILES OR SOFTWARE.
+
+Except as contained in this notice, the name of a copyright holder
+shall not be used in advertising or otherwise to promote the sale,
+use or other dealings in these Data Files or Software without prior
+written authorization of the copyright holder.
+
+---------------------
+
+Third-Party Software Licenses
+
+This section contains third-party software notices and/or additional
+terms for licensed third-party software components included within ICU
+libraries.
+
+1. ICU License - ICU 1.8.1 to ICU 57.1
+
+COPYRIGHT AND PERMISSION NOTICE
+
+Copyright (c) 1995-2016 International Business Machines Corporation and others
+All rights reserved.
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, and/or sell copies of the Software, and to permit persons
+to whom the Software is furnished to do so, provided that the above
+copyright notice(s) and this permission notice appear in all copies of
+the Software and that both the above copyright notice(s) and this
+permission notice appear in supporting documentation.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT
+OF THIRD PARTY RIGHTS. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
+HOLDERS INCLUDED IN THIS NOTICE BE LIABLE FOR ANY CLAIM, OR ANY
+SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER
+RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF
+CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+Except as contained in this notice, the name of a copyright holder
+shall not be used in advertising or otherwise to promote the sale, use
+or other dealings in this Software without prior written authorization
+of the copyright holder.
+
+All trademarks and registered trademarks mentioned herein are the
+property of their respective owners.
diff --git a/base/third_party/icu/README.chromium b/base/third_party/icu/README.chromium
new file mode 100644
index 0000000..297e89a
--- /dev/null
+++ b/base/third_party/icu/README.chromium
@@ -0,0 +1,17 @@
+Name: ICU
+URL: http://site.icu-project.org/
+Version: 60
+License: Unicode
+License File: NOT_SHIPPED
+
+This file has the relevant components from ICU copied to handle basic UTF8/16/32
+conversions. Components are copied from umachine.h, utf.h, utf8.h, and utf16.h
+into icu_utf.h, and from utf_impl.cpp into icu_utf.cc.
+
+The main change is that U_/U8_/U16_ prefixes have been replaced with
+CBU_/CBU8_/CBU16_ (for "Chrome Base") to avoid confusion with the "real" ICU
+macros should ICU be in use on the system. For the same reason, the functions
+and types have been put in the "base_icu" namespace.
+
+Note that this license file is marked as NOT_SHIPPED, since a more complete
+ICU license is included from //third_party/icu/README.chromium
diff --git a/base/third_party/icu/icu_utf.cc b/base/third_party/icu/icu_utf.cc
new file mode 100644
index 0000000..a3262b0
--- /dev/null
+++ b/base/third_party/icu/icu_utf.cc
@@ -0,0 +1,131 @@
+// © 2016 and later: Unicode, Inc. and others.
+// License & terms of use: http://www.unicode.org/copyright.html
+/*
+******************************************************************************
+*
+*   Copyright (C) 1999-2012, International Business Machines
+*   Corporation and others.  All Rights Reserved.
+*
+******************************************************************************
+*   file name:  utf_impl.cpp
+*   encoding:   UTF-8
+*   tab size:   8 (not used)
+*   indentation:4
+*
+*   created on: 1999sep13
+*   created by: Markus W. Scherer
+*
+*   This file provides implementation functions for macros in the utfXX.h
+*   that would otherwise be too long as macros.
+*/
+
+#include "base/third_party/icu/icu_utf.h"
+
+namespace base_icu {
+
+// source/common/utf_impl.cpp
+
+static const UChar32
+utf8_errorValue[6]={
+    // Same values as UTF8_ERROR_VALUE_1, UTF8_ERROR_VALUE_2, UTF_ERROR_VALUE,
+    // but without relying on the obsolete unicode/utf_old.h.
+    0x15, 0x9f, 0xffff,
+    0x10ffff
+};
+
+static UChar32
+errorValue(int32_t count, int8_t strict) {
+    if(strict>=0) {
+        return utf8_errorValue[count];
+    } else if(strict==-3) {
+        return 0xfffd;
+    } else {
+        return CBU_SENTINEL;
+    }
+}
+
+/*
+ * Handle the non-inline part of the U8_NEXT() and U8_NEXT_FFFD() macros
+ * and their obsolete sibling UTF8_NEXT_CHAR_SAFE().
+ *
+ * U8_NEXT() supports NUL-terminated strings indicated via length<0.
+ *
+ * The "strict" parameter controls the error behavior:
+ * <0  "Safe" behavior of U8_NEXT():
+ *     -1: All illegal byte sequences yield U_SENTINEL=-1.
+ *     -2: Same as -1, except for lenient treatment of surrogate code points as legal.
+ *         Some implementations use this for roundtripping of
+ *         Unicode 16-bit strings that are not well-formed UTF-16, that is, they
+ *         contain unpaired surrogates.
+ *     -3: All illegal byte sequences yield U+FFFD.
+ *  0  Obsolete "safe" behavior of UTF8_NEXT_CHAR_SAFE(..., FALSE):
+ *     All illegal byte sequences yield a positive code point such that this
+ *     result code point would be encoded with the same number of bytes as
+ *     the illegal sequence.
+ * >0  Obsolete "strict" behavior of UTF8_NEXT_CHAR_SAFE(..., TRUE):
+ *     Same as the obsolete "safe" behavior, but non-characters are also treated
+ *     like illegal sequences.
+ *
+ * Note that a UBool is the same as an int8_t.
+ */
+UChar32
+utf8_nextCharSafeBody(const uint8_t *s, int32_t *pi, int32_t length, UChar32 c, UBool strict) {
+    // *pi is one after byte c.
+    int32_t i=*pi;
+    // length can be negative for NUL-terminated strings: Read and validate one byte at a time.
+    if(i==length || c>0xf4) {
+        // end of string, or not a lead byte
+    } else if(c>=0xf0) {
+        // Test for 4-byte sequences first because
+        // U8_NEXT() handles shorter valid sequences inline.
+        uint8_t t1=s[i], t2, t3;
+        c&=7;
+        if(CBU8_IS_VALID_LEAD4_AND_T1(c, t1) &&
+                ++i!=length && (t2=s[i]-0x80)<=0x3f &&
+                ++i!=length && (t3=s[i]-0x80)<=0x3f) {
+            ++i;
+            c=(c<<18)|((t1&0x3f)<<12)|(t2<<6)|t3;
+            // strict: forbid non-characters like U+fffe
+            if(strict<=0 || !CBU_IS_UNICODE_NONCHAR(c)) {
+                *pi=i;
+                return c;
+            }
+        }
+    } else if(c>=0xe0) {
+        c&=0xf;
+        if(strict!=-2) {
+            uint8_t t1=s[i], t2;
+            if(CBU8_IS_VALID_LEAD3_AND_T1(c, t1) &&
+                    ++i!=length && (t2=s[i]-0x80)<=0x3f) {
+                ++i;
+                c=(c<<12)|((t1&0x3f)<<6)|t2;
+                // strict: forbid non-characters like U+fffe
+                if(strict<=0 || !CBU_IS_UNICODE_NONCHAR(c)) {
+                    *pi=i;
+                    return c;
+                }
+            }
+        } else {
+            // strict=-2 -> lenient: allow surrogates
+            uint8_t t1=s[i]-0x80, t2;
+            if(t1<=0x3f && (c>0 || t1>=0x20) &&
+                    ++i!=length && (t2=s[i]-0x80)<=0x3f) {
+                *pi=i+1;
+                return (c<<12)|(t1<<6)|t2;
+            }
+        }
+    } else if(c>=0xc2) {
+        uint8_t t1=s[i]-0x80;
+        if(t1<=0x3f) {
+            *pi=i+1;
+            return ((c-0xc0)<<6)|t1;
+        }
+    }  // else 0x80<=c<0xc2 is not a lead byte
+
+    /* error handling */
+    c=errorValue(i-*pi, strict);
+    *pi=i;
+    return c;
+}
+
+}  // namespace base_icu
diff --git a/base/third_party/icu/icu_utf.h b/base/third_party/icu/icu_utf.h
new file mode 100644
index 0000000..2ba8231
--- /dev/null
+++ b/base/third_party/icu/icu_utf.h
@@ -0,0 +1,442 @@
+// © 2016 and later: Unicode, Inc. and others.
+// License & terms of use: http://www.unicode.org/copyright.html
+/*
+******************************************************************************
+*
+*   Copyright (C) 1999-2015, International Business Machines
+*   Corporation and others.  All Rights Reserved.
+*
+******************************************************************************
+*/
+
+#ifndef BASE_THIRD_PARTY_ICU_ICU_UTF_H_
+#define BASE_THIRD_PARTY_ICU_ICU_UTF_H_
+
+#include <stdint.h>
+
+namespace base_icu {
+
+// source/common/unicode/umachine.h
+
+/** The ICU boolean type @stable ICU 2.0 */
+typedef int8_t UBool;
+
+/**
+ * Define UChar32 as a type for single Unicode code points.
+ * UChar32 is a signed 32-bit integer (same as int32_t).
+ *
+ * The Unicode code point range is 0..0x10ffff.
+ * All other values (negative or >=0x110000) are illegal as Unicode code points.
+ * They may be used as sentinel values to indicate "done", "error"
+ * or similar non-code point conditions.
+ *
+ * Before ICU 2.4 (Jitterbug 2146), UChar32 was defined
+ * to be wchar_t if that is 32 bits wide (wchar_t may be signed or unsigned)
+ * or else to be uint32_t.
+ * That is, the definition of UChar32 was platform-dependent.
+ *
+ * @see U_SENTINEL
+ * @stable ICU 2.4
+ */
+typedef int32_t UChar32;
+
+/**
+ * This value is intended for sentinel values for APIs that
+ * (take or) return single code points (UChar32).
+ * It is outside of the Unicode code point range 0..0x10ffff.
+ *
+ * For example, a "done" or "error" value in a new API
+ * could be indicated with U_SENTINEL.
+ *
+ * ICU APIs designed before ICU 2.4 usually define service-specific "done"
+ * values, mostly 0xffff.
+ * Those may need to be distinguished from
+ * actual U+ffff text contents by calling functions like
+ * CharacterIterator::hasNext() or UnicodeString::length().
+ *
+ * @return -1
+ * @see UChar32
+ * @stable ICU 2.4
+ */
+#define CBU_SENTINEL (-1)
+
+// source/common/unicode/utf.h
+
+/**
+ * Is this code point a Unicode noncharacter?
+ * @param c 32-bit code point
+ * @return TRUE or FALSE
+ * @stable ICU 2.4
+ */
+#define CBU_IS_UNICODE_NONCHAR(c) \
+    ((c)>=0xfdd0 && \
+     ((c)<=0xfdef || ((c)&0xfffe)==0xfffe) && (c)<=0x10ffff)
+
+/**
+ * Is c a Unicode code point value (0..U+10ffff)
+ * that can be assigned a character?
+ *
+ * Code points that are not characters include:
+ * - single surrogate code points (U+d800..U+dfff, 2048 code points)
+ * - the last two code points on each plane (U+__fffe and U+__ffff, 34 code points)
+ * - U+fdd0..U+fdef (new with Unicode 3.1, 32 code points)
+ * - the highest Unicode code point value is U+10ffff
+ *
+ * This means that all code points below U+d800 are character code points,
+ * and that boundary is tested first for performance.
+ *
+ * @param c 32-bit code point
+ * @return TRUE or FALSE
+ * @stable ICU 2.4
+ */
+#define CBU_IS_UNICODE_CHAR(c) \
+    ((uint32_t)(c)<0xd800 || \
+        (0xdfff<(c) && (c)<=0x10ffff && !CBU_IS_UNICODE_NONCHAR(c)))
+
+/**
+ * Is this code point a surrogate (U+d800..U+dfff)?
+ * @param c 32-bit code point
+ * @return TRUE or FALSE
+ * @stable ICU 2.4
+ */
+#define CBU_IS_SURROGATE(c) (((c)&0xfffff800)==0xd800)
+
+/**
+ * Assuming c is a surrogate code point (U_IS_SURROGATE(c)),
+ * is it a lead surrogate?
+ * @param c 32-bit code point
+ * @return TRUE or FALSE
+ * @stable ICU 2.4
+ */
+#define CBU_IS_SURROGATE_LEAD(c) (((c)&0x400)==0)
+
+// source/common/unicode/utf8.h
+
+/**
+ * Internal bit vector for 3-byte UTF-8 validity check, for use in U8_IS_VALID_LEAD3_AND_T1.
+ * Each bit indicates whether one lead byte + first trail byte pair starts a valid sequence.
+ * Lead byte E0..EF bits 3..0 are used as byte index,
+ * first trail byte bits 7..5 are used as bit index into that byte.
+ * @see U8_IS_VALID_LEAD3_AND_T1
+ * @internal
+ */
+#define CBU8_LEAD3_T1_BITS "\x20\x30\x30\x30\x30\x30\x30\x30\x30\x30\x30\x30\x30\x10\x30\x30"
+
+/**
+ * Internal 3-byte UTF-8 validity check.
+ * Non-zero if lead byte E0..EF and first trail byte 00..FF start a valid sequence.
+ * @internal
+ */
+#define CBU8_IS_VALID_LEAD3_AND_T1(lead, t1) (CBU8_LEAD3_T1_BITS[(lead)&0xf]&(1<<((uint8_t)(t1)>>5)))
+
+/**
+ * Internal bit vector for 4-byte UTF-8 validity check, for use in U8_IS_VALID_LEAD4_AND_T1.
+ * Each bit indicates whether one lead byte + first trail byte pair starts a valid sequence.
+ * First trail byte bits 7..4 are used as byte index,
+ * lead byte F0..F4 bits 2..0 are used as bit index into that byte.
+ * @see U8_IS_VALID_LEAD4_AND_T1
+ * @internal
+ */
+#define CBU8_LEAD4_T1_BITS "\x00\x00\x00\x00\x00\x00\x00\x00\x1E\x0F\x0F\x0F\x00\x00\x00\x00"
+
+/**
+ * Internal 4-byte UTF-8 validity check.
+ * Non-zero if lead byte F0..F4 and first trail byte 00..FF start a valid sequence.
+ * @internal
+ */
+#define CBU8_IS_VALID_LEAD4_AND_T1(lead, t1) (CBU8_LEAD4_T1_BITS[(uint8_t)(t1)>>4]&(1<<((lead)&7)))
+
+/**
+ * Function for handling "next code point" with error-checking.
+ *
+ * This is internal since it is not meant to be called directly by external clie
+nts;
+ * however it is U_STABLE (not U_INTERNAL) since it is called by public macros i
+n this
+ * file and thus must remain stable, and should not be hidden when other interna
+l
+ * functions are hidden (otherwise public macros would fail to compile).
+ * @internal
+ */
+UChar32
+utf8_nextCharSafeBody(const uint8_t *s, int32_t *pi, int32_t length, ::base_icu::UChar32 c, ::base_icu::UBool strict);
+
+/**
+ * Does this code unit (byte) encode a code point by itself (US-ASCII 0..0x7f)?
+ * @param c 8-bit code unit (byte)
+ * @return TRUE or FALSE
+ * @stable ICU 2.4
+ */
+#define CBU8_IS_SINGLE(c) (((c)&0x80)==0)
+
+/**
+ * Is this code unit (byte) a UTF-8 lead byte? (0xC2..0xF4)
+ * @param c 8-bit code unit (byte)
+ * @return TRUE or FALSE
+ * @stable ICU 2.4
+ */
+#define CBU8_IS_LEAD(c) ((uint8_t)((c)-0xc2)<=0x32)
+
+/**
+ * Is this code unit (byte) a UTF-8 trail byte? (0x80..0xBF)
+ * @param c 8-bit code unit (byte)
+ * @return TRUE or FALSE
+ * @stable ICU 2.4
+ */
+#define CBU8_IS_TRAIL(c) ((int8_t)(c)<-0x40)
+
+/**
+ * How many code units (bytes) are used for the UTF-8 encoding
+ * of this Unicode code point?
+ * @param c 32-bit code point
+ * @return 1..4, or 0 if c is a surrogate or not a Unicode code point
+ * @stable ICU 2.4
+ */
+#define CBU8_LENGTH(c) \
+    ((uint32_t)(c)<=0x7f ? 1 : \
+        ((uint32_t)(c)<=0x7ff ? 2 : \
+            ((uint32_t)(c)<=0xd7ff ? 3 : \
+                ((uint32_t)(c)<=0xdfff || (uint32_t)(c)>0x10ffff ? 0 : \
+                    ((uint32_t)(c)<=0xffff ? 3 : 4)\
+                ) \
+            ) \
+        ) \
+    )
+
+/**
+ * The maximum number of UTF-8 code units (bytes) per Unicode code point (U+0000..U+10ffff).
+ * @return 4
+ * @stable ICU 2.4
+ */
+#define CBU8_MAX_LENGTH 4
+
+/**
+ * Get a code point from a string at a code point boundary offset,
+ * and advance the offset to the next code point boundary.
+ * (Post-incrementing forward iteration.)
+ * "Safe" macro, checks for illegal sequences and for string boundaries.
+ *
+ * The length can be negative for a NUL-terminated string.
+ *
+ * The offset may point to the lead byte of a multi-byte sequence,
+ * in which case the macro will read the whole sequence.
+ * If the offset points to a trail byte or an illegal UTF-8 sequence, then
+ * c is set to a negative value.
+ *
+ * @param s const uint8_t * string
+ * @param i int32_t string offset, must be i<length
+ * @param length int32_t string length
+ * @param c output UChar32 variable, set to <0 in case of an error
+ * @see U8_NEXT_UNSAFE
+ * @stable ICU 2.4
+ */
+#define CBU8_NEXT(s, i, length, c) { \
+    (c)=(uint8_t)(s)[(i)++]; \
+    if(!CBU8_IS_SINGLE(c)) { \
+        uint8_t __t1, __t2; \
+        if( /* handle U+0800..U+FFFF inline */ \
+                (0xe0<=(c) && (c)<0xf0) && \
+                (((i)+1)<(length) || (length)<0) && \
+                CBU8_IS_VALID_LEAD3_AND_T1((c), __t1=(s)[i]) && \
+                (__t2=(s)[(i)+1]-0x80)<=0x3f) { \
+            (c)=(((c)&0xf)<<12)|((__t1&0x3f)<<6)|__t2; \
+            (i)+=2; \
+        } else if( /* handle U+0080..U+07FF inline */ \
+                ((c)<0xe0 && (c)>=0xc2) && \
+                ((i)!=(length)) && \
+                (__t1=(s)[i]-0x80)<=0x3f) { \
+            (c)=(((c)&0x1f)<<6)|__t1; \
+            ++(i); \
+        } else { \
+            /* function call for "complicated" and error cases */ \
+            (c)=::base_icu::utf8_nextCharSafeBody((const uint8_t *)s, &(i), (length), c, -1); \
+        } \
+    } \
+}
+
+/**
+ * Append a code point to a string, overwriting 1 to 4 bytes.
+ * The offset points to the current end of the string contents
+ * and is advanced (post-increment).
+ * "Unsafe" macro, assumes a valid code point and sufficient space in the string.
+ * Otherwise, the result is undefined.
+ *
+ * @param s const uint8_t * string buffer
+ * @param i string offset
+ * @param c code point to append
+ * @see U8_APPEND
+ * @stable ICU 2.4
+ */
+#define CBU8_APPEND_UNSAFE(s, i, c) { \
+    if((uint32_t)(c)<=0x7f) { \
+        (s)[(i)++]=(uint8_t)(c); \
+    } else { \
+        if((uint32_t)(c)<=0x7ff) { \
+            (s)[(i)++]=(uint8_t)(((c)>>6)|0xc0); \
+        } else { \
+            if((uint32_t)(c)<=0xffff) { \
+                (s)[(i)++]=(uint8_t)(((c)>>12)|0xe0); \
+            } else { \
+                (s)[(i)++]=(uint8_t)(((c)>>18)|0xf0); \
+                (s)[(i)++]=(uint8_t)((((c)>>12)&0x3f)|0x80); \
+            } \
+            (s)[(i)++]=(uint8_t)((((c)>>6)&0x3f)|0x80); \
+        } \
+        (s)[(i)++]=(uint8_t)(((c)&0x3f)|0x80); \
+    } \
+}
+
+// source/common/unicode/utf16.h
+
+/**
+ * Does this code unit alone encode a code point (BMP, not a surrogate)?
+ * @param c 16-bit code unit
+ * @return TRUE or FALSE
+ * @stable ICU 2.4
+ */
+#define CBU16_IS_SINGLE(c) !CBU_IS_SURROGATE(c)
+
+/**
+ * Is this code unit a lead surrogate (U+d800..U+dbff)?
+ * @param c 16-bit code unit
+ * @return TRUE or FALSE
+ * @stable ICU 2.4
+ */
+#define CBU16_IS_LEAD(c) (((c)&0xfffffc00)==0xd800)
+
+/**
+ * Is this code unit a trail surrogate (U+dc00..U+dfff)?
+ * @param c 16-bit code unit
+ * @return TRUE or FALSE
+ * @stable ICU 2.4
+ */
+#define CBU16_IS_TRAIL(c) (((c)&0xfffffc00)==0xdc00)
+
+/**
+ * Is this code unit a surrogate (U+d800..U+dfff)?
+ * @param c 16-bit code unit
+ * @return TRUE or FALSE
+ * @stable ICU 2.4
+ */
+#define CBU16_IS_SURROGATE(c) CBU_IS_SURROGATE(c)
+
+/**
+ * Assuming c is a surrogate code point (U16_IS_SURROGATE(c)),
+ * is it a lead surrogate?
+ * @param c 16-bit code unit
+ * @return TRUE or FALSE
+ * @stable ICU 2.4
+ */
+#define CBU16_IS_SURROGATE_LEAD(c) (((c)&0x400)==0)
+
+/**
+ * Helper constant for U16_GET_SUPPLEMENTARY.
+ * @internal
+ */
+#define CBU16_SURROGATE_OFFSET ((0xd800<<10UL)+0xdc00-0x10000)
+
+/**
+ * Get a supplementary code point value (U+10000..U+10ffff)
+ * from its lead and trail surrogates.
+ * The result is undefined if the input values are not
+ * lead and trail surrogates.
+ *
+ * @param lead lead surrogate (U+d800..U+dbff)
+ * @param trail trail surrogate (U+dc00..U+dfff)
+ * @return supplementary code point (U+10000..U+10ffff)
+ * @stable ICU 2.4
+ */
+#define CBU16_GET_SUPPLEMENTARY(lead, trail) \
+    (((::base_icu::UChar32)(lead)<<10UL)+(::base_icu::UChar32)(trail)-CBU16_SURROGATE_OFFSET)
+
+/**
+ * Get the lead surrogate (0xd800..0xdbff) for a
+ * supplementary code point (0x10000..0x10ffff).
+ * @param supplementary 32-bit code point (U+10000..U+10ffff)
+ * @return lead surrogate (U+d800..U+dbff) for supplementary
+ * @stable ICU 2.4
+ */
+#define CBU16_LEAD(supplementary) (::base_icu::UChar)(((supplementary)>>10)+0xd7c0)
+
+/**
+ * Get the trail surrogate (0xdc00..0xdfff) for a
+ * supplementary code point (0x10000..0x10ffff).
+ * @param supplementary 32-bit code point (U+10000..U+10ffff)
+ * @return trail surrogate (U+dc00..U+dfff) for supplementary
+ * @stable ICU 2.4
+ */
+#define CBU16_TRAIL(supplementary) (::base_icu::UChar)(((supplementary)&0x3ff)|0xdc00)
+
+/**
+ * How many 16-bit code units are used to encode this Unicode code point? (1 or 2)
+ * The result is not defined if c is not a Unicode code point (U+0000..U+10ffff).
+ * @param c 32-bit code point
+ * @return 1 or 2
+ * @stable ICU 2.4
+ */
+#define CBU16_LENGTH(c) ((uint32_t)(c)<=0xffff ? 1 : 2)
+
+/**
+ * The maximum number of 16-bit code units per Unicode code point (U+0000..U+10ffff).
+ * @return 2
+ * @stable ICU 2.4
+ */
+#define CBU16_MAX_LENGTH 2
+
+/**
+ * Get a code point from a string at a code point boundary offset,
+ * and advance the offset to the next code point boundary.
+ * (Post-incrementing forward iteration.)
+ * "Safe" macro, handles unpaired surrogates and checks for string boundaries.
+ *
+ * The length can be negative for a NUL-terminated string.
+ *
+ * The offset may point to the lead surrogate unit
+ * for a supplementary code point, in which case the macro will read
+ * the following trail surrogate as well.
+ * If the offset points to a trail surrogate or
+ * to a single, unpaired lead surrogate, then c is set to that unpaired surrogate.
+ *
+ * @param s const UChar * string
+ * @param i string offset, must be i<length
+ * @param length string length
+ * @param c output UChar32 variable
+ * @see U16_NEXT_UNSAFE
+ * @stable ICU 2.4
+ */
+#define CBU16_NEXT(s, i, length, c) { \
+    (c)=(s)[(i)++]; \
+    if(CBU16_IS_LEAD(c)) { \
+        uint16_t __c2; \
+        if((i)!=(length) && CBU16_IS_TRAIL(__c2=(s)[(i)])) { \
+            ++(i); \
+            (c)=CBU16_GET_SUPPLEMENTARY((c), __c2); \
+        } \
+    } \
+}
+
+/**
+ * Append a code point to a string, overwriting 1 or 2 code units.
+ * The offset points to the current end of the string contents
+ * and is advanced (post-increment).
+ * "Unsafe" macro, assumes a valid code point and sufficient space in the string.
+ * Otherwise, the result is undefined.
+ *
+ * @param s const UChar * string buffer
+ * @param i string offset
+ * @param c code point to append
+ * @see U16_APPEND
+ * @stable ICU 2.4
+ */
+#define CBU16_APPEND_UNSAFE(s, i, c) { \
+    if((uint32_t)(c)<=0xffff) { \
+        (s)[(i)++]=(uint16_t)(c); \
+    } else { \
+        (s)[(i)++]=(uint16_t)(((c)>>10)+0xd7c0); \
+        (s)[(i)++]=(uint16_t)(((c)&0x3ff)|0xdc00); \
+    } \
+}
+
+}  // namesapce base_icu
+
+#endif  // BASE_THIRD_PARTY_ICU_ICU_UTF_H_
diff --git a/base/third_party/libevent/BUILD.gn b/base/third_party/libevent/BUILD.gn
new file mode 100644
index 0000000..e934454
--- /dev/null
+++ b/base/third_party/libevent/BUILD.gn
@@ -0,0 +1,80 @@
+# Copyright (c) 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import("//build/config/nacl/config.gni")
+
+static_library("libevent") {
+  sources = [
+    "buffer.c",
+    "evbuffer.c",
+    "evdns.c",
+    "evdns.h",
+    "event-config.h",
+    "event-internal.h",
+    "event.c",
+    "event.h",
+    "event_tagging.c",
+    "evhttp.h",
+    "evrpc-internal.h",
+    "evrpc.c",
+    "evrpc.h",
+    "evsignal.h",
+    "evutil.c",
+    "evutil.h",
+    "http-internal.h",
+    "http.c",
+    "log.c",
+    "log.h",
+    "min_heap.h",
+    "poll.c",
+    "select.c",
+    "signal.c",
+    "strlcpy-internal.h",
+    "strlcpy.c",
+  ]
+
+  defines = [ "HAVE_CONFIG_H" ]
+
+  if (is_mac || is_ios) {
+    sources += [
+      "kqueue.c",
+      "mac/config.h",
+      "mac/event-config.h",
+    ]
+    include_dirs = [ "mac" ]
+  } else if (is_linux) {
+    sources += [
+      "epoll.c",
+      "linux/config.h",
+      "linux/event-config.h",
+    ]
+    include_dirs = [ "linux" ]
+  } else if (is_android) {
+    sources += [
+      "android/config.h",
+      "android/event-config.h",
+      "epoll.c",
+    ]
+    include_dirs = [ "android" ]
+  } else if (is_nacl_nonsfi) {
+    sources -= [
+      "evdns.c",
+      "event_tagging.c",
+      "evrpc.c",
+      "http.c",
+      "select.c",
+      "signal.c",
+    ]
+    sources += [
+      "nacl_nonsfi/config.h",
+      "nacl_nonsfi/event-config.h",
+      "nacl_nonsfi/random.c",
+      "nacl_nonsfi/signal_stub.c",
+    ]
+    include_dirs = [ "nacl_nonsfi" ]
+  }
+
+  configs -= [ "//build/config/compiler:chromium_code" ]
+  configs += [ "//build/config/compiler:no_chromium_code" ]
+}
diff --git a/base/third_party/libevent/ChangeLog b/base/third_party/libevent/ChangeLog
new file mode 100644
index 0000000..893b087
--- /dev/null
+++ b/base/third_party/libevent/ChangeLog
@@ -0,0 +1,253 @@
+Changes in 1.4.15-stable (5 January 2015)
+
+ o Avoid integer overflow bugs in evbuffer_add() and related functions.  See CVE-2014-6272 advisory for more information. (d49bc0e88b81a5812116074dc007f1db0ca1eecd)
+
+ o Pass flags to fcntl(F_SETFL) as int, not long (b3d0382)
+ o Backport and tweak the LICENSE file for 1.4 (8a5ebd3)
+ o set close-on-exec bit for filedescriptors created by dns subsystem (9985231 Ralf Schmitt)
+ o Replace unused case of FD_CLOSEONEXEC with a proper null statement. (44f04a2)
+ o Fix kqueue correctness test on x84_64 (1c25b07)
+ o Avoid deadlock when activating signals. (e0e6958)
+ o Backport doc fix for evhttp_bind_socket. (95b71d0 Marco)
+ o Fix an issue with forking and signal socketpairs in select/poll backends (f0ff765)
+ o Fix compilation on Visual Studio 2010 (53c47c2 VDm)
+ o Defensive programming to prevent (hopefully impossible) stack-stomping (2d8cf0b)
+ o Check for POLLERR, POLLHUP and POLLNVAL for Solaris event ports (353b4ac Trond Norbye)
+ o Fix a bug that could allow dns requests with duplicate tx ids (e50ba5b)
+ o Avoid truncating huge values for content-length (1d6e30e)
+ o Take generated files out of git; add correct m4 magic for libtool to auto* files (7cf794b)
+ o Prefer autoregen -ivf to manual autogen.sh (823d9be)
+
+
+Changes in 1.4.14b-stable
+ o Set the VERSION_INFO correctly for 1.4.14
+
+
+Changes in 1.4.14-stable
+ o Add a .gitignore file for the 1.4 branch. (d014edb)
+ o Backport evbuffer_readln(). (b04cc60 Nicholas Marriott)
+ o Make the evbuffer_readln backport follow the current API (c545485)
+ o Valgrind fix: Clear struct kevent before checking for OSX bug. (5713d5d William Ahern)
+ o Fix a crash when reading badly formatted resolve.conf (5b10d00 Yasuoka Masahiko)
+ o Fix memory-leak of signal handler array with kqueue. [backport] (01f3775)
+ o Update sample/signal-test.c to use newer APIs and not leak. (891765c Evan Jones)
+ o Correct all versions in 1.4 branch (ac0d213)
+ o Make evutil_make_socket_nonblocking() leave any other flags alone. (81c26ba Jardel Weyrich)
+ o Adjusted fcntl() retval comparison on evutil_make_socket_nonblocking(). (5f2e250 Jardel Weyrich)
+ o Correct a debug message in evhttp_parse_request_line (35df59e)
+ o Merge branch 'readln-backport' into patches-1.4 (8771d5b)
+ o Do not send an HTTP error when we've already closed or responded. (4fd2dd9 Pavel Plesov)
+ o Re-add event_siglcb; some old code _was_ still using it. :( (bd03d06)
+ o Make Libevent 1.4 build on win32 with Unicode enabled. (bce58d6 Brodie Thiesfield)
+ o Distribute nmake makefile for 1.4 (20d706d)
+ o do not fail while sending on http connections the client closed. (5c8b446)
+ o make evhttp_send() safe against terminated connections, too (01ea0c5)
+ o Fix a free(NULL) in min_heap.h (2458934)
+ o Fix memory leak when setting up priorities; reported by Alexander Drozdov (cb1a722)
+ o Clean up properly when adding a signal handler fails. (ae6ece0 Gilad Benjamini)
+ o Do not abort HTTP requests missing a reason string. (29d7b32 Pierre Phaneuf)
+ o Fix compile warning in http.c (906d573)
+ o Define _REENTRANT as needed on Solaris, elsewhere (6cbea13)
+
+
+Changes in 1.4.13-stable:
+ o If the kernel tells us that there are a negative number of bytes to read from a socket, do not believe it.  Fixes bug 2841177; found by Alexander Pronchenkov.
+ o Do not allocate the maximum event queue and fd array for the epoll backend at startup.  Instead, start out accepting 32 events at a time, and double the queue's size when it seems that the OS is generating events faster than we're requesting them.  Saves up to 512K per epoll-based event_base.  Resolves bug 2839240.
+ o Fix compilation on Android, which forgot to define fd_mask in its sys/select.h
+ o Do not drop data from evbuffer when out of memory; reported by Jacek Masiulaniec
+ o Rename our replacement compat/sys/_time.h header to avoid build a conflict on HPUX; reported by Kathryn Hogg.
+ o Build kqueue.c correctly on GNU/kFreeBSD platforms. Patch pulled upstream from Debian.
+ o Fix a problem with excessive memory allocation when using multiple event priorities.
+ o When running set[ug]id, don't check the environment. Based on a patch from OpenBSD.
+
+
+Changes in 1.4.12-stable:
+ o Try to contain degree of failure when running on a win32 version so heavily firewalled that we can't fake a socketpair.
+ o Fix an obscure timing-dependent, allocator-dependent crash in the evdns code.
+ o Use __VA_ARGS__ syntax for varargs macros in event_rpcgen when compiler is not GCC.
+ o Activate fd events in a pseudorandom order with O(N) backends, so that we don't systematically favor low fds (select) or earlier-added fds (poll, win32).
+ o Fix another pair of fencepost bugs in epoll.c.  [Patch from Adam Langley.]
+ o Do not break evdns connections to nameservers when our IP changes.
+ o Set truncated flag correctly in evdns server replies.
+ o Disable strict aliasing with GCC: our code is not compliant with it.
+
+Changes in 1.4.11-stable:
+ o Fix a bug when removing a timeout from the heap. [Patch from Marko Kreen]
+ o Remove the limit on size of HTTP headers by removing static buffers.
+ o Fix a nasty dangling pointer bug in epoll.c that could occur after epoll_recalc(). [Patch from Kevin Springborn]
+ o Distribute Win32-Code/event-config.h, not ./event-config.h
+
+Changes in 1.4.10-stable:
+ o clean up buffered http connection data on reset; reported by Brian O'Kelley
+ o bug fix and potential race condition in signal handling; from Alexander Drozdov
+ o rename the Solaris event ports backend to evport
+ o support compilation on Haiku
+ o fix signal processing when a signal callback delivers a signal; from Alexander Drozdov
+ o const-ify some arguments to evdns functions.
+ o off-by-one error in epoll_recalc; reported by Victor Goya
+ o include Doxyfile in tar ball; from Jeff Garzik
+ o correctly parse queries with encoded \r, \n or + characters
+
+Changes in 1.4.9-stable:
+ o event_add would not return error for some backends; from Dean McNamee
+ o Clear the timer cache on entering the event loop; reported by Victor Chang
+ o Only bind the socket on connect when a local address has been provided; reported by Alejo Sanchez
+ o Allow setting of local port for evhttp connections to support millions of connections from a single system; from Richard Jones.
+ o Clear the timer cache when leaving the event loop; reported by Robin Haberkorn
+ o Fix a typo in setting the global event base; reported by lance.
+ o Fix a memory leak when reading multi-line headers
+ o Fix a memory leak by not running explicit close detection for server connections
+
+Changes in 1.4.8-stable:
+ o Match the query in DNS replies to the query in the request; from Vsevolod Stakhov.
+ o Fix a merge problem in which name_from_addr returned pointers to the stack; found by Jiang Hong.
+ o Do not remove Accept-Encoding header
+	
+Changes in 1.4.7-stable:
+ o Fix a bug where headers arriving in multiple packets were not parsed; fix from Jiang Hong; test by me.
+	
+Changes in 1.4.6-stable:
+ o evutil.h now includes <stdarg.h> directly
+ o switch all uses of [v]snprintf over to evutil
+ o Correct handling of trailing headers in chunked replies; from Scott Lamb.
+ o Support multi-line HTTP headers; based on a patch from Moshe Litvin
+ o Reject negative Content-Length headers; anonymous bug report
+ o Detect CLOCK_MONOTONIC at runtime for evdns; anonymous bug report	
+ o Fix a bug where deleting signals with the kqueue backend would cause subsequent adds to fail
+ o Support multiple events listening on the same signal; make signals regular events that go on the same event queue; problem report by Alexander Drozdov.
+ o Deal with evbuffer_read() returning -1 on EINTR|EAGAIN; from Adam Langley.
+ o Fix a bug in which the DNS server would incorrectly set the type of a cname reply to a.
+ o Fix a bug where setting the timeout on a bufferevent would take not effect if the event was already pending.
+ o Fix a memory leak when using signals for some event bases; reported by Alexander Drozdov.
+ o Add libevent.vcproj file to distribution to help with Windows build.
+ o Fix a problem with epoll() and reinit; problem report by Alexander Drozdov.	
+ o Fix off-by-one errors in devpoll; from Ian Bell
+ o Make event_add not change any state if it fails; reported by Ian Bell.
+ o Do not warn on accept when errno is either EAGAIN or EINTR
+
+Changes in 1.4.5-stable:
+ o Fix connection keep-alive behavior for HTTP/1.0
+ o Fix use of freed memory in event_reinit; pointed out by Peter Postma
+ o Constify struct timeval * where possible; pointed out by Forest Wilkinson
+ o allow min_heap_erase to be called on removed members; from liusifan.
+ o Rename INPUT and OUTPUT to EVRPC_INPUT and EVRPC_OUTPUT.  Retain INPUT/OUTPUT aliases on on-win32 platforms for backwards compatibility.
+ o Do not use SO_REUSEADDR when connecting
+ o Fix Windows build
+ o Fix a bug in event_rpcgen when generated fixed-sized entries
+
+Changes in 1.4.4-stable:
+ o Correct the documentation on buffer printf functions.
+ o Don't warn on unimplemented epoll_create(): this isn't a problem, just a reason to fall back to poll or select.
+ o Correctly handle timeouts larger than 35 minutes on Linux with epoll.c.  This is probably a kernel defect, but we'll have to support old kernels anyway even if it gets fixed.
+ o Fix a potential stack corruption bug in tagging on 64-bit CPUs.
+ o expose bufferevent_setwatermark via header files and fix high watermark on read
+ o fix a bug in bufferevent read water marks and add a test for them
+ o introduce bufferevent_setcb and bufferevent_setfd to allow better manipulation of bufferevents
+ o use libevent's internal timercmp on all platforms, to avoid bugs on old platforms where timercmp(a,b,<=) is buggy.
+ o reduce system calls for getting current time by caching it.
+ o fix evhttp_bind_socket() so that multiple sockets can be bound by the same http server.
+ o Build test directory correctly with CPPFLAGS set.
+ o Fix build under Visual C++ 2005.
+ o Expose evhttp_accept_socket() API.
+ o Merge windows gettimeofday() replacement into a new evutil_gettimeofday() function.
+ o Fix autoconf script behavior on IRIX.
+ o Make sure winsock2.h include always comes before windows.h include.
+
+Changes in 1.4.3-stable:
+ o include Content-Length in reply for HTTP/1.0 requests with keep-alive
+ o Patch from Tani Hosokawa: make some functions in http.c threadsafe.
+ o Do not free the kqop file descriptor in other processes, also allow it to be 0; from Andrei Nigmatulin
+ o make event_rpcgen.py generate code include event-config.h; reported by Sam Banks.
+ o make event methods static so that they are not exported; from Andrei Nigmatulin
+ o make RPC replies use application/octet-stream as mime type
+ o do not delete uninitialized timeout event in evdns
+
+Changes in 1.4.2-rc:
+ o remove pending timeouts on event_base_free()
+ o also check EAGAIN for Solaris' event ports; from W.C.A. Wijngaards
+ o devpoll and evport need reinit; tested by W.C.A Wijngaards
+ o event_base_get_method; from Springande Ulv
+ o Send CRLF after each chunk in HTTP output, for compliance with RFC2626.  Patch from "propanbutan".  Fixes bug 1894184.
+ o Add a int64_t parsing function, with unit tests, so we can apply Scott Lamb's fix to allow large HTTP values.
+ o Use a 64-bit field to hold HTTP content-lengths.  Patch from Scott Lamb.
+ o Allow regression code to build even without Python installed
+ o remove NDEBUG ifdefs from evdns.c
+ o update documentation of event_loop and event_base_loop; from Tani Hosokawa.
+ o detect integer types properly on platforms without stdint.h
+ o Remove "AM_MAINTAINER_MODE" declaration in configure.in: now makefiles and configure should get re-generated automatically when Makefile.am or configure.in chanes.
+ o do not insert event into list when evsel->add fails
+
+Changes in 1.4.1-beta:
+ o free minheap on event_base_free(); from Christopher Layne
+ o debug cleanups in signal.c; from Christopher Layne
+ o provide event_base_new() that does not set the current_base global
+ o bufferevent_write now uses a const source argument; report from Charles Kerr
+ o better documentation for event_base_loopexit; from Scott Lamb.
+ o Make kqueue have the same behavior as other backends when a signal is caught between event_add() and event_loop().  Previously, it would catch and ignore such signals.
+ o Make kqueue restore signal handlers correctly when event_del() is called.
+ o provide event_reinit() to reintialize an event_base after fork
+ o small improvements to evhttp documentation
+ o always generate Date and Content-Length headers for HTTP/1.1 replies
+ o set the correct event base for HTTP close events
+ o New function, event_{base_}loopbreak.  Like event_loopexit, it makes an event loop stop executing and return.  Unlike event_loopexit, it keeps subsequent pending events from getting executed.  Patch from Scott Lamb
+ o Removed obsoleted recalc code
+ o pull setters/getters out of RPC structures into a base class to which we just need to store a pointer; this reduces the memory footprint of these structures.
+ o fix a bug with event_rpcgen for integers
+ o move EV_PERSIST handling out of the event backends
+ o support for 32-bit tag numbers in rpc structures; this is wire compatible, but changes the API slightly.
+ o prefix {encode,decode}_tag functions with evtag to avoid collisions
+ o Correctly handle DNS replies with no answers set (Fixes bug 1846282)
+ o The configure script now takes an --enable-gcc-warnigns option that turns on many optional gcc warnings.  (Nick has been building with these for a while, but they might be useful to other developers.)
+ o When building with GCC, use the "format" attribute to verify type correctness of calls to printf-like functions.
+ o removed linger from http server socket; reported by Ilya Martynov
+ o allow \r or \n individually to separate HTTP headers instead of the standard "\r\n"; from Charles Kerr.
+ o demote most http warnings to debug messages
+ o Fix Solaris compilation; from Magne Mahre
+ o Add a "Date" header to HTTP responses, as required by HTTP 1.1.
+ o Support specifying the local address of an evhttp_connection using set_local_address
+ o Fix a memory leak in which failed HTTP connections would not free the request object
+ o Make adding of array members in event_rpcgen more efficient, but doubling memory allocation
+ o Fix a memory leak in the DNS server
+ o Fix compilation when DNS_USE_OPENSSL_FOR_ID is enabled
+ o Fix buffer size and string generation in evdns_resolve_reverse_ipv6().
+ o Respond to nonstandard DNS queries with "NOTIMPL" rather than by ignoring them.
+ o In DNS responses, the CD flag should be preserved, not the TC flag.
+ o Fix http.c to compile properly with USE_DEBUG; from Christopher Layne
+ o Handle NULL timeouts correctly on Solaris; from Trond Norbye
+ o Recalculate pending events properly when reallocating event array on Solaris; from Trond Norbye
+ o Add Doxygen documentation to header files; from Mark Heily
+ o Add a evdns_set_transaction_id_fn() function to override the default
+   transaction ID generation code.
+ o Add an evutil module (with header evutil.h) to implement our standard cross-platform hacks, on the theory that somebody else would like to use them too.
+ o Fix signals implementation on windows.
+ o Fix http module on windows to close sockets properly.
+ o Make autogen.sh script run correctly on systems where /bin/sh isn't bash. (Patch from Trond Norbye, rewritten by Hagne Mahre and then Hannah Schroeter.)
+ o Skip calling gettime() in timeout_process if we are not in fact waiting for any events. (Patch from Trond Norbye)
+ o Make test subdirectory compile under mingw.
+ o Fix win32 buffer.c behavior so that it is correct for sockets (which do not like ReadFile and WriteFile).
+ o Make the test.sh script run unit tests for the evpoll method.
+ o Make the entire evdns.h header enclosed in "extern C" as appropriate.
+ o Fix implementation of strsep on platforms that lack it
+ o Fix implementation of getaddrinfo on platforms that lack it; mainly, this will make Windows http.c work better.  Original patch by Lubomir Marinov.
+ o Fix evport implementation: port_disassociate called on unassociated events resulting in bogus errors; more efficient memory management; from Trond Norbye and Prakash Sangappa
+ o support for hooks on rpc input and output; can be used to implement rpc independent processing such as compression or authentication.
+ o use a min heap instead of a red-black tree for timeouts; as a result finding the min is a O(1) operation now; from Maxim Yegorushkin
+ o associate an event base with an rpc pool
+ o added two additional libraries: libevent_core and libevent_extra in addition to the regular libevent.  libevent_core contains only the event core whereas libevent_extra contains dns, http and rpc support
+ o Begin using libtool's library versioning support correctly.  If we don't mess up, this will more or less guarantee binaries linked against old versions of libevent continue working when we make changes to libevent that do not break backward compatibility.
+ o Fix evhttp.h compilation when TAILQ_ENTRY is not defined.
+ o Small code cleanups in epoll_dispatch().
+ o Increase the maximum number of addresses read from a packet in evdns to 32.
+ o Remove support for the rtsig method: it hasn't compiled for a while, and nobody seems to miss it very much.  Let us know if there's a good reason to put it back in.
+ o Rename the "class" field in evdns_server_request to dns_question_class, so that it won't break compilation under C++.  Use a macro so that old code won't break.  Mark the macro as deprecated.
+ o Fix DNS unit tests so that having a DNS server with broken IPv6 support is no longer cause for aborting the unit tests.
+ o Make event_base_free() succeed even if there are pending non-internal events on a base.  This may still leak memory and fds, but at least it no longer crashes.
+ o Post-process the config.h file into a new, installed event-config.h file that we can install, and whose macros will be safe to include in header files.
+ o Remove the long-deprecated acconfig.h file.
+ o Do not require #include <sys/types.h> before #include <event.h>.
+ o Add new evutil_timer* functions to wrap (or replace) the regular timeval manipulation functions.
+ o Fix many build issues when using the Microsoft C compiler.
+ o Remove a bash-ism in autogen.sh
+ o When calling event_del on a signal, restore the signal handler's previous value rather than setting it to SIG_DFL. Patch from Christopher Layne.
+ o Make the logic for active events work better with internal events; patch from Christopher Layne.
+ o We do not need to specially remove a timeout before calling event_del; patch from Christopher Layne.
diff --git a/base/third_party/libevent/Doxyfile b/base/third_party/libevent/Doxyfile
new file mode 100644
index 0000000..77f6de8
--- /dev/null
+++ b/base/third_party/libevent/Doxyfile
@@ -0,0 +1,230 @@
+# Doxyfile 1.5.1
+
+# This file describes the settings to be used by the documentation system
+# doxygen (www.doxygen.org) for a project
+#
+# All text after a hash (#) is considered a comment and will be ignored
+# The format is:
+#       TAG = value [value, ...]
+# For lists items can also be appended using:
+#       TAG += value [value, ...]
+# Values that contain spaces should be placed between quotes (" ")
+
+#---------------------------------------------------------------------------
+# Project related configuration options
+#---------------------------------------------------------------------------
+
+# The PROJECT_NAME tag is a single word (or a sequence of words surrounded 
+# by quotes) that should identify the project.
+
+PROJECT_NAME           = libevent
+
+# Place all output under 'doxygen/'
+
+OUTPUT_DIRECTORY        = doxygen/
+
+# If the JAVADOC_AUTOBRIEF tag is set to YES then Doxygen 
+# will interpret the first line (until the first dot) of a JavaDoc-style 
+# comment as the brief description. If set to NO, the JavaDoc 
+# comments will behave just like the Qt-style comments (thus requiring an 
+# explicit @brief command for a brief description.
+
+JAVADOC_AUTOBRIEF      = YES
+
+# Set the OPTIMIZE_OUTPUT_FOR_C tag to YES if your project consists of C 
+# sources only. Doxygen will then generate output that is more tailored for C. 
+# For instance, some of the names that are used will be different. The list 
+# of all members will be omitted, etc.
+
+OPTIMIZE_OUTPUT_FOR_C  = YES
+
+# If the SORT_BRIEF_DOCS tag is set to YES then doxygen will sort the 
+# brief documentation of file, namespace and class members alphabetically 
+# by member name. If set to NO (the default) the members will appear in 
+# declaration order.
+
+SORT_BRIEF_DOCS        = YES
+
+#---------------------------------------------------------------------------
+# configuration options related to the input files
+#---------------------------------------------------------------------------
+
+# The INPUT tag can be used to specify the files and/or directories that contain 
+# documented source files. You may enter file names like "myfile.cpp" or 
+# directories like "/usr/src/myproject". Separate the files or directories 
+# with spaces.
+
+INPUT                  = event.h evdns.h evhttp.h evrpc.h
+
+#---------------------------------------------------------------------------
+# configuration options related to the HTML output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_HTML tag is set to YES (the default) Doxygen will 
+# generate HTML output.
+
+GENERATE_HTML          = YES
+
+#---------------------------------------------------------------------------
+# configuration options related to the LaTeX output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_LATEX tag is set to YES (the default) Doxygen will 
+# generate Latex output.
+
+GENERATE_LATEX         = YES
+
+# The LATEX_OUTPUT tag is used to specify where the LaTeX docs will be put. 
+# If a relative path is entered the value of OUTPUT_DIRECTORY will be 
+# put in front of it. If left blank `latex' will be used as the default path.
+
+LATEX_OUTPUT           = latex
+
+# The LATEX_CMD_NAME tag can be used to specify the LaTeX command name to be 
+# invoked. If left blank `latex' will be used as the default command name.
+
+LATEX_CMD_NAME         = latex
+
+# The MAKEINDEX_CMD_NAME tag can be used to specify the command name to 
+# generate index for LaTeX. If left blank `makeindex' will be used as the 
+# default command name.
+
+MAKEINDEX_CMD_NAME     = makeindex
+
+# If the COMPACT_LATEX tag is set to YES Doxygen generates more compact 
+# LaTeX documents. This may be useful for small projects and may help to 
+# save some trees in general.
+
+COMPACT_LATEX          = NO
+
+# The PAPER_TYPE tag can be used to set the paper type that is used 
+# by the printer. Possible values are: a4, a4wide, letter, legal and 
+# executive. If left blank a4wide will be used.
+
+PAPER_TYPE             = a4wide
+
+# The EXTRA_PACKAGES tag can be to specify one or more names of LaTeX 
+# packages that should be included in the LaTeX output.
+
+EXTRA_PACKAGES         = 
+
+# The LATEX_HEADER tag can be used to specify a personal LaTeX header for 
+# the generated latex document. The header should contain everything until 
+# the first chapter. If it is left blank doxygen will generate a 
+# standard header. Notice: only use this tag if you know what you are doing!
+
+LATEX_HEADER           = 
+
+# If the PDF_HYPERLINKS tag is set to YES, the LaTeX that is generated 
+# is prepared for conversion to pdf (using ps2pdf). The pdf file will 
+# contain links (just like the HTML output) instead of page references 
+# This makes the output suitable for online browsing using a pdf viewer.
+
+PDF_HYPERLINKS         = NO
+
+# If the USE_PDFLATEX tag is set to YES, pdflatex will be used instead of 
+# plain latex in the generated Makefile. Set this option to YES to get a 
+# higher quality PDF documentation.
+
+USE_PDFLATEX           = NO
+
+# If the LATEX_BATCHMODE tag is set to YES, doxygen will add the \\batchmode. 
+# command to the generated LaTeX files. This will instruct LaTeX to keep 
+# running if errors occur, instead of asking the user for help. 
+# This option is also used when generating formulas in HTML.
+
+LATEX_BATCHMODE        = NO
+
+# If LATEX_HIDE_INDICES is set to YES then doxygen will not 
+# include the index chapters (such as File Index, Compound Index, etc.) 
+# in the output.
+
+LATEX_HIDE_INDICES     = NO
+
+#---------------------------------------------------------------------------
+# configuration options related to the man page output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_MAN tag is set to YES (the default) Doxygen will 
+# generate man pages
+
+GENERATE_MAN           = YES
+
+# The MAN_EXTENSION tag determines the extension that is added to 
+# the generated man pages (default is the subroutine's section .3)
+
+MAN_EXTENSION          = .3
+
+# If the MAN_LINKS tag is set to YES and Doxygen generates man output, 
+# then it will generate one additional man file for each entity 
+# documented in the real man page(s). These additional files 
+# only source the real man page, but without them the man command 
+# would be unable to find the correct page. The default is NO.
+
+MAN_LINKS              = YES
+
+#---------------------------------------------------------------------------
+# Configuration options related to the preprocessor   
+#---------------------------------------------------------------------------
+
+# If the ENABLE_PREPROCESSING tag is set to YES (the default) Doxygen will 
+# evaluate all C-preprocessor directives found in the sources and include 
+# files.
+
+ENABLE_PREPROCESSING   = YES
+
+# If the MACRO_EXPANSION tag is set to YES Doxygen will expand all macro 
+# names in the source code. If set to NO (the default) only conditional 
+# compilation will be performed. Macro expansion can be done in a controlled 
+# way by setting EXPAND_ONLY_PREDEF to YES.
+
+MACRO_EXPANSION        = NO
+
+# If the EXPAND_ONLY_PREDEF and MACRO_EXPANSION tags are both set to YES 
+# then the macro expansion is limited to the macros specified with the 
+# PREDEFINED and EXPAND_AS_DEFINED tags.
+
+EXPAND_ONLY_PREDEF     = NO
+
+# If the SEARCH_INCLUDES tag is set to YES (the default) the includes files 
+# in the INCLUDE_PATH (see below) will be search if a #include is found.
+
+SEARCH_INCLUDES        = YES
+
+# The INCLUDE_PATH tag can be used to specify one or more directories that 
+# contain include files that are not input files but should be processed by 
+# the preprocessor.
+
+INCLUDE_PATH           = 
+
+# You can use the INCLUDE_FILE_PATTERNS tag to specify one or more wildcard 
+# patterns (like *.h and *.hpp) to filter out the header-files in the 
+# directories. If left blank, the patterns specified with FILE_PATTERNS will 
+# be used.
+
+INCLUDE_FILE_PATTERNS  = 
+
+# The PREDEFINED tag can be used to specify one or more macro names that 
+# are defined before the preprocessor is started (similar to the -D option of 
+# gcc). The argument of the tag is a list of macros of the form: name 
+# or name=definition (no spaces). If the definition and the = are 
+# omitted =1 is assumed. To prevent a macro definition from being 
+# undefined via #undef or recursively expanded use the := operator 
+# instead of the = operator.
+
+PREDEFINED             = TAILQ_ENTRY RB_ENTRY _EVENT_DEFINED_TQENTRY
+
+# If the MACRO_EXPANSION and EXPAND_ONLY_PREDEF tags are set to YES then 
+# this tag can be used to specify a list of macro names that should be expanded. 
+# The macro definition that is found in the sources will be used. 
+# Use the PREDEFINED tag if you want to use a different macro definition.
+
+EXPAND_AS_DEFINED      = 
+
+# If the SKIP_FUNCTION_MACROS tag is set to YES (the default) then 
+# doxygen's preprocessor will remove all function-like macros that are alone 
+# on a line, have an all uppercase name, and do not end with a semicolon. Such 
+# function macros are typically used for boiler-plate code, and will confuse 
+# the parser if not removed.
+
+SKIP_FUNCTION_MACROS   = YES
diff --git a/base/third_party/libevent/LICENSE b/base/third_party/libevent/LICENSE
new file mode 100644
index 0000000..cabd9fc
--- /dev/null
+++ b/base/third_party/libevent/LICENSE
@@ -0,0 +1,53 @@
+Libevent is available for use under the following license, commonly known
+as the 3-clause (or "modified") BSD license:
+
+==============================
+Copyright (c) 2000-2007 Niels Provos <provos@citi.umich.edu>
+Copyright (c) 2007-2010 Niels Provos and Nick Mathewson
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions
+are met:
+1. Redistributions of source code must retain the above copyright
+   notice, this list of conditions and the following disclaimer.
+2. Redistributions in binary form must reproduce the above copyright
+   notice, this list of conditions and the following disclaimer in the
+   documentation and/or other materials provided with the distribution.
+3. The name of the author may not be used to endorse or promote products
+   derived from this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+==============================
+
+Portions of Libevent are based on works by others, also made available by
+them under the three-clause BSD license above.  The copyright notices are
+available in the corresponding source files; the license is as above.  Here's
+a list:
+
+log.c:
+   Copyright (c) 2000 Dug Song <dugsong@monkey.org>
+   Copyright (c) 1993 The Regents of the University of California.
+
+strlcpy.c:
+   Copyright (c) 1998 Todd C. Miller <Todd.Miller@courtesan.com>
+
+win32.c:
+   Copyright (c) 2003 Michael A. Davis <mike@datanerds.net>
+
+evport.c:
+   Copyright (c) 2007 Sun Microsystems
+
+min_heap.h:
+   Copyright (c) 2006 Maxim Yegorushkin <maxim.yegorushkin@gmail.com>
+
+tree.h:
+   Copyright 2002 Niels Provos <provos@citi.umich.edu>
diff --git a/base/third_party/libevent/Makefile.am b/base/third_party/libevent/Makefile.am
new file mode 100644
index 0000000..c1ed62a
--- /dev/null
+++ b/base/third_party/libevent/Makefile.am
@@ -0,0 +1,152 @@
+AUTOMAKE_OPTIONS = foreign no-dependencies
+
+ACLOCAL_AMFLAGS = -I m4
+
+# This is the point release for libevent.  It shouldn't include any
+# a/b/c/d/e notations.
+RELEASE = 1.4
+
+# This is the version info for the libevent binary API.  It has three
+# numbers:
+#   Current  -- the number of the binary API that we're implementing
+#   Revision -- which iteration of the implementation of the binary
+#               API are we supplying?
+#   Age      -- How many previous binary API versions do we also
+#               support?
+#
+# If we release a new version that does not change the binary API,
+# increment Revision.
+#
+# If we release a new version that changes the binary API, but does
+# not break programs compiled against the old binary API, increment
+# Current and Age.  Set Revision to 0, since this is the first
+# implementation of the new API.
+#
+# Otherwise, we're changing the binary API and breaking bakward
+# compatibility with old binaries.  Increment Current.  Set Age to 0,
+# since we're backward compatible with no previous APIs.  Set Revision
+# to 0 too.
+VERSION_INFO = 4:1:2
+
+###
+# History:
+#   We started using Libtool around version 1.0d.  For all versions from
+#   1.0d through 1.3e, we set RELEASE to the version name, and
+#   VERSION_INFO to something haphazard.  The didn't matter, since
+#   setting RELEASE meant that no version of Libevent was treated as
+#   binary-compatible with any other version.
+#
+#   As of 1.4.0-beta, we set RELEASE to "1.4", so that releases in the
+#   1.4.x series could be potentially binary-compatible with one another,
+#   but not with any other series.  (They aren't.)  We didn't necessarily
+#   set VERSION_INFO correctly, or update it as often as we should have.
+#   The VERSION_INFO values were:
+#    1.4.0-beta .. 1.4.4-stable     : 2:0:0   [See note 1]
+#    1.4.5-stable                   : 3:0:1   (compatible ABI change)
+#    1.4.6-stable                   : 3:1:1   (no ABI change)
+#    1.4.7-stable                   : 3:1:1   [see note 1]
+#    1.4.8-stable                   : 3:2:1   (no ABI change)
+#    1.4.9-stable                   : 3:2:1   [see note 1]
+#    1.4.10-stable                  : 3:3:1   (no ABI change)
+#    1.4.11-stable .. 1.4.13-stable : 3:3:1   [see note 1]
+#    1.4.14a-stable:                : 3:3:2   [see note 2]
+#    1.4.14b-stable:                : 4:0:2   (compatible ABI change)
+#    1.4.15-stable:                 : 4:1:2   (no ABI change)
+#
+# [1]: Using the same VERSION_INFO value was wrong; we should have been
+#      updating the Revision field.
+# [2]: We set the VERSION_INFO completely wrong on 1.4.14b-stable
+
+bin_SCRIPTS = event_rpcgen.py
+
+EXTRA_DIST = autogen.sh event.h event-internal.h log.h evsignal.h evdns.3 \
+	evrpc.h evrpc-internal.h min_heap.h \
+	event.3 \
+	Doxyfile \
+	kqueue.c epoll_sub.c epoll.c select.c poll.c signal.c \
+	evport.c devpoll.c event_rpcgen.py \
+	sample/Makefile.am sample/Makefile.in sample/event-test.c \
+	sample/signal-test.c sample/time-test.c \
+	test/Makefile.am test/Makefile.in test/bench.c test/regress.c \
+	test/test-eof.c test/test-weof.c test/test-time.c \
+	test/test-init.c test/test.sh \
+	compat/sys/queue.h compat/sys/_libevent_time.h \
+	WIN32-Code/config.h \
+	WIN32-Code/event-config.h \
+	WIN32-Code/win32.c \
+	WIN32-Code/tree.h \
+	WIN32-Prj/event_test/event_test.dsp \
+	WIN32-Prj/event_test/test.txt WIN32-Prj/libevent.dsp \
+	WIN32-Prj/libevent.dsw WIN32-Prj/signal_test/signal_test.dsp \
+	WIN32-Prj/time_test/time_test.dsp WIN32-Prj/regress/regress.vcproj \
+	WIN32-Prj/libevent.sln WIN32-Prj/libevent.vcproj \
+	Makefile.nmake test/Makefile.nmake \
+	LICENSE
+
+lib_LTLIBRARIES = libevent.la libevent_core.la libevent_extra.la
+
+if BUILD_WIN32
+
+SUBDIRS = . sample
+SYS_LIBS = -lws2_32
+SYS_SRC = WIN32-Code/win32.c
+SYS_INCLUDES = -IWIN32-Code
+
+else
+
+SUBDIRS = . sample test
+SYS_LIBS =
+SYS_SRC =
+SYS_INCLUDES =
+
+endif
+
+BUILT_SOURCES = event-config.h
+
+event-config.h: config.h
+	echo '/* event-config.h' > $@
+	echo ' * Generated by autoconf; post-processed by libevent.' >> $@
+	echo ' * Do not edit this file.' >> $@
+	echo ' * Do not rely on macros in this file existing in later versions.'>> $@
+	echo ' */' >> $@
+	echo '#ifndef _EVENT_CONFIG_H_' >> $@
+	echo '#define _EVENT_CONFIG_H_' >> $@
+
+	sed -e 's/#define /#define _EVENT_/' \
+	    -e 's/#undef /#undef _EVENT_/' \
+	    -e 's/#ifndef /#ifndef _EVENT_/' < config.h >> $@
+	echo "#endif" >> $@
+
+CORE_SRC = event.c buffer.c evbuffer.c log.c evutil.c $(SYS_SRC)
+EXTRA_SRC = event_tagging.c http.c evhttp.h http-internal.h evdns.c \
+	evdns.h evrpc.c evrpc.h evrpc-internal.h \
+	strlcpy.c strlcpy-internal.h strlcpy-internal.h
+
+libevent_la_SOURCES = $(CORE_SRC) $(EXTRA_SRC)
+libevent_la_LIBADD = @LTLIBOBJS@ $(SYS_LIBS)
+libevent_la_LDFLAGS = -release $(RELEASE) -version-info $(VERSION_INFO)
+
+libevent_core_la_SOURCES = $(CORE_SRC)
+libevent_core_la_LIBADD = @LTLIBOBJS@ $(SYS_LIBS)
+libevent_core_la_LDFLAGS = -release $(RELEASE) -version-info $(VERSION_INFO)
+
+libevent_extra_la_SOURCES = $(EXTRA_SRC)
+libevent_extra_la_LIBADD = @LTLIBOBJS@ $(SYS_LIBS)
+libevent_extra_la_LDFLAGS = -release $(RELEASE) -version-info $(VERSION_INFO)
+
+include_HEADERS = event.h evhttp.h evdns.h evrpc.h evutil.h
+
+nodist_include_HEADERS = event-config.h
+
+INCLUDES = -I$(srcdir)/compat $(SYS_INCLUDES)
+
+man_MANS = event.3 evdns.3
+
+verify: libevent.la
+	cd test && make verify
+
+doxygen: FORCE
+	doxygen $(srcdir)/Doxyfile
+FORCE:
+
+DISTCLEANFILES = *~ event-config.h
diff --git a/base/third_party/libevent/Makefile.nmake b/base/third_party/libevent/Makefile.nmake
new file mode 100644
index 0000000..f8d5722
--- /dev/null
+++ b/base/third_party/libevent/Makefile.nmake
@@ -0,0 +1,48 @@
+# WATCH OUT!  This makefile is a work in progress.  It is probably missing
+# tons of important things.  DO NOT RELY ON IT TO BUILD A GOOD LIBEVENT.
+
+# Needed for correctness
+CFLAGS=/Iinclude /Icompat /IWIN32-Code /DWIN32 /DHAVE_CONFIG_H /I.
+
+# For optimization and warnings
+CFLAGS=$(CFLAGS) /Ox /W3 /wd4996 /nologo
+
+# XXXX have a debug mode
+
+LIBFLAGS=/nologo
+
+
+CORE_OBJS=event.obj buffer.obj evbuffer.obj \
+	log.obj evutil.obj \
+	strlcpy.obj signal.obj win32.obj
+EXTRA_OBJS=event_tagging.obj http.obj evdns.obj evrpc.obj
+
+ALL_OBJS=$(CORE_OBJS) $(WIN_OBJS) $(EXTRA_OBJS)
+STATIC_LIBS=libevent_core.lib libevent_extras.lib libevent.lib
+
+
+all: static_libs tests
+
+static_libs: $(STATIC_LIBS)
+
+win32.obj: WIN32-Code\win32.c
+	$(CC) $(CFLAGS) /c WIN32-Code\win32.c
+
+libevent_core.lib: $(CORE_OBJS)
+	lib $(LIBFLAGS) $(CORE_OBJS) /out:libevent_core.lib 
+
+libevent_extras.lib: $(EXTRA_OBJS)
+	lib $(LIBFLAGS) $(EXTRA_OBJS) /out:libevent_extras.lib
+
+libevent.lib: $(CORE_OBJ) $(EXTRA_OBJS)
+	lib $(LIBFLAGS) $(CORE_OBJS) $(EXTRA_OBJS) /out:libevent.lib
+
+clean:
+	del $(ALL_OBJS)
+	del $(STATIC_LIBS)
+	cd test
+	$(MAKE) /F Makefile.nmake clean
+
+tests:
+	cd test
+	$(MAKE) /F Makefile.nmake
diff --git a/base/third_party/libevent/README b/base/third_party/libevent/README
new file mode 100644
index 0000000..b065039
--- /dev/null
+++ b/base/third_party/libevent/README
@@ -0,0 +1,57 @@
+To build libevent, type
+
+$ ./configure && make
+
+     (If you got libevent from the subversion repository, you will
+      first need to run the included "autogen.sh" script in order to
+      generate the configure script.)
+
+Install as root via
+
+# make install
+
+You can run the regression tests by
+
+$ make verify
+
+Before, reporting any problems, please run the regression tests.
+
+To enable the low-level tracing build the library as:
+
+CFLAGS=-DUSE_DEBUG ./configure [...]
+
+Acknowledgements:
+-----------------
+
+The following people have helped with suggestions, ideas, code or
+fixing bugs:
+
+  Alejo
+  Weston Andros Adamson
+  William Ahern
+  Stas Bekman
+  Andrew Danforth
+  Mike Davis
+  Shie Erlich
+  Alexander von Gernler
+  Artur Grabowski
+  Aaron Hopkins
+  Claudio Jeker
+  Scott Lamb
+  Adam Langley
+  Philip Lewis
+  David Libenzi
+  Nick Mathewson
+  Andrey Matveev
+  Richard Nyberg
+  Jon Oberheide
+  Phil Oleson
+  Dave Pacheco
+  Tassilo von Parseval
+  Pierre Phaneuf
+  Jon Poland
+  Bert JW Regeer
+  Dug Song
+  Taral
+
+If I have forgotten your name, please contact me.
diff --git a/base/third_party/libevent/README.chromium b/base/third_party/libevent/README.chromium
new file mode 100644
index 0000000..1462e88
--- /dev/null
+++ b/base/third_party/libevent/README.chromium
@@ -0,0 +1,39 @@
+Name: libevent
+URL: http://libevent.org/
+Version: 1.4.15
+License: BSD
+Security Critical: yes
+
+Local Modifications:
+Rather than use libevent's own build system, we just build a Chrome
+static library using GYP.
+
+1) Run configure and "make event-config.h" on Linux, FreeBSD, Solaris,
+   and Mac and copy config.h and event-config.h to linux/, freebsd/,
+   solaris/, and mac/ respectively.
+2) Add libevent.gyp.
+3) chromium.patch is applied to make the following changes:
+   - Allow libevent to be used without being installed by changing <...>
+     #includes to "...".
+   - Fix a race condition in event_del.
+   - Optimistically assume CLOCK_MONOTONIC is available and fallback if it
+     fails, rather than explicitly testing for it.
+   - Remove an unneeded variable that causes a -Werror build failure.
+   - Add an #ifndef to fix a preprocessor redefined -Werror build failure.
+   - Revert the patch from http://sourceforge.net/p/levent/bugs/223/ that
+     introduces use-after-free memory corruption when an event callback frees
+     the struct event memory.
+   - Remove deprecated global variables, event_sigcb and event_gotsig
+     (essentially unused) that trigger tsan errors. (crbug/605894)
+4) The directories WIN32-Code and WIN32-Prj are not included.
+5) The configs for android were copied from Linux's which were very close to
+   android one with the exception of HAVE_FD_MASK and HAVE_STRLCPY.
+6) Add files to support building with the PNaCl toolchain. Added
+   libevent_nacl_nonsfi.gyp for build rule. nacl_nonsfi/config.h and
+   nacl_nonsfi/event-config.h are derived from linux/ counterparts.
+   nacl_nonsfi/random.c is also added to provide the random() function,
+   which is missing in the newlib-based PNaCl toolchain.
+7) Stub out signal.c for nacl_helper_nonsfi. socketpair() will be prohibited
+   by sandbox in nacl_helper_nonsfi.
+8) Remove an unnecessary workaround for OS X 10.4 from kqueue.c. It was causing
+   problems on macOS Sierra.
diff --git a/base/third_party/libevent/aix/config.h b/base/third_party/libevent/aix/config.h
new file mode 100644
index 0000000..89e1f11
--- /dev/null
+++ b/base/third_party/libevent/aix/config.h
@@ -0,0 +1,276 @@
+/* config.h.  Generated from config.h.in by configure.  */
+/* config.h.in.  Generated from configure.in by autoheader.  */
+
+/* Define if clock_gettime is available in libc */
+#define DNS_USE_CPU_CLOCK_FOR_ID 1
+
+/* Define is no secure id variant is available */
+/* #undef DNS_USE_GETTIMEOFDAY_FOR_ID */
+
+/* Define to 1 if you have the `clock_gettime' function. */
+#define HAVE_CLOCK_GETTIME 1
+
+/* Define if /dev/poll is available */
+/* #undef HAVE_DEVPOLL */
+
+/* Define to 1 if you have the <dlfcn.h> header file. */
+#define HAVE_DLFCN_H 1
+
+/* Define if your system supports the epoll system calls */
+/* #undef HAVE_EPOLL */
+
+/* Define to 1 if you have the `epoll_ctl' function. */
+/* #undef HAVE_EPOLL_CTL */
+
+/* Define if your system supports event ports */
+/* #undef HAVE_EVENT_PORTS */
+
+/* Define to 1 if you have the `fcntl' function. */
+#define HAVE_FCNTL 1
+
+/* Define to 1 if you have the <fcntl.h> header file. */
+#define HAVE_FCNTL_H 1
+
+/* Define to 1 if the system has the type `fd_mask'. */
+/* #undef HAVE_FD_MASK */
+
+/* Define to 1 if you have the `getaddrinfo' function. */
+#define HAVE_GETADDRINFO 1
+
+/* Define to 1 if you have the `getegid' function. */
+#define HAVE_GETEGID 1
+
+/* Define to 1 if you have the `geteuid' function. */
+#define HAVE_GETEUID 1
+
+/* Define to 1 if you have the `getnameinfo' function. */
+#define HAVE_GETNAMEINFO 1
+
+/* Define to 1 if you have the `gettimeofday' function. */
+#define HAVE_GETTIMEOFDAY 1
+
+/* Define to 1 if you have the `inet_ntop' function. */
+#define HAVE_INET_NTOP 1
+
+/* Define to 1 if you have the <inttypes.h> header file. */
+#define HAVE_INTTYPES_H 1
+
+/* Define to 1 if you have the `issetugid' function. */
+/* #undef HAVE_ISSETUGID */
+
+/* Define to 1 if you have the `kqueue' function. */
+/* #undef HAVE_KQUEUE */
+
+/* Define to 1 if you have the `nsl' library (-lnsl). */
+#define HAVE_LIBNSL 1
+
+/* Define to 1 if you have the `resolv' library (-lresolv). */
+/* #undef HAVE_LIBRESOLV */
+
+/* Define to 1 if you have the `rt' library (-lrt). */
+#define HAVE_LIBRT 1
+
+/* Define to 1 if you have the `socket' library (-lsocket). */
+/* #undef HAVE_LIBSOCKET */
+
+/* Define to 1 if you have the <memory.h> header file. */
+#define HAVE_MEMORY_H 1
+
+/* Define to 1 if you have the <netinet/in6.h> header file. */
+/* #undef HAVE_NETINET_IN6_H */
+
+/* Define to 1 if you have the `poll' function. */
+#define HAVE_POLL 1
+
+/* Define to 1 if you have the <poll.h> header file. */
+#define HAVE_POLL_H 1
+
+/* Define to 1 if you have the `port_create' function. */
+/* #undef HAVE_PORT_CREATE */
+
+/* Define to 1 if you have the <port.h> header file. */
+/* #undef HAVE_PORT_H */
+
+/* Define to 1 if you have the `select' function. */
+#define HAVE_SELECT 1
+
+/* Define if F_SETFD is defined in <fcntl.h> */
+#define HAVE_SETFD 1
+
+/* Define to 1 if you have the `sigaction' function. */
+#define HAVE_SIGACTION 1
+
+/* Define to 1 if you have the `signal' function. */
+#define HAVE_SIGNAL 1
+
+/* Define to 1 if you have the <signal.h> header file. */
+#define HAVE_SIGNAL_H 1
+
+/* Define to 1 if you have the <stdarg.h> header file. */
+#define HAVE_STDARG_H 1
+
+/* Define to 1 if you have the <stdint.h> header file. */
+#define HAVE_STDINT_H 1
+
+/* Define to 1 if you have the <stdlib.h> header file. */
+#define HAVE_STDLIB_H 1
+
+/* Define to 1 if you have the <strings.h> header file. */
+#define HAVE_STRINGS_H 1
+
+/* Define to 1 if you have the <string.h> header file. */
+#define HAVE_STRING_H 1
+
+/* Define to 1 if you have the `strlcpy' function. */
+#define HAVE_STRLCPY 1
+
+/* Define to 1 if you have the `strsep' function. */
+#define HAVE_STRSEP 1
+
+/* Define to 1 if you have the `strtok_r' function. */
+#define HAVE_STRTOK_R 1
+
+/* Define to 1 if you have the `strtoll' function. */
+#define HAVE_STRTOLL 1
+
+/* Define to 1 if the system has the type `struct in6_addr'. */
+#define HAVE_STRUCT_IN6_ADDR 1
+
+/* Define to 1 if you have the <sys/devpoll.h> header file. */
+/* #undef HAVE_SYS_DEVPOLL_H */
+
+/* Define to 1 if you have the <sys/epoll.h> header file. */
+/* #undef HAVE_SYS_EPOLL_H */
+
+/* Define to 1 if you have the <sys/event.h> header file. */
+/* #undef HAVE_SYS_EVENT_H */
+
+/* Define to 1 if you have the <sys/ioctl.h> header file. */
+#define HAVE_SYS_IOCTL_H 1
+
+/* Define to 1 if you have the <sys/param.h> header file. */
+#define HAVE_SYS_PARAM_H 1
+
+/* Define to 1 if you have the <sys/queue.h> header file. */
+#define HAVE_SYS_QUEUE_H 1
+
+/* Define to 1 if you have the <sys/select.h> header file. */
+#define HAVE_SYS_SELECT_H 1
+
+/* Define to 1 if you have the <sys/socket.h> header file. */
+#define HAVE_SYS_SOCKET_H 1
+
+/* Define to 1 if you have the <sys/stat.h> header file. */
+#define HAVE_SYS_STAT_H 1
+
+/* Define to 1 if you have the <sys/time.h> header file. */
+#define HAVE_SYS_TIME_H 1
+
+/* Define to 1 if you have the <sys/types.h> header file. */
+#define HAVE_SYS_TYPES_H 1
+
+/* Define if TAILQ_FOREACH is defined in <sys/queue.h> */
+#define HAVE_TAILQFOREACH 1
+
+/* Define if timeradd is defined in <sys/time.h> */
+/* #undef HAVE_TIMERADD */
+
+/* Define if timerclear is defined in <sys/time.h> */
+#define HAVE_TIMERCLEAR 1
+
+/* Define if timercmp is defined in <sys/time.h> */
+#define HAVE_TIMERCMP 1
+
+/* Define if timerisset is defined in <sys/time.h> */
+#define HAVE_TIMERISSET 1
+
+/* Define to 1 if the system has the type `uint16_t'. */
+#define HAVE_UINT16_T 1
+
+/* Define to 1 if the system has the type `uint32_t'. */
+#define HAVE_UINT32_T 1
+
+/* Define to 1 if the system has the type `uint64_t'. */
+#define HAVE_UINT64_T 1
+
+/* Define to 1 if the system has the type `uint8_t'. */
+#define HAVE_UINT8_T 1
+
+/* Define to 1 if you have the <unistd.h> header file. */
+#define HAVE_UNISTD_H 1
+
+/* Define to 1 if you have the `vasprintf' function. */
+/* #undef HAVE_VASPRINTF */
+
+/* Define if kqueue works correctly with pipes */
+/* #undef HAVE_WORKING_KQUEUE */
+
+/* Define to the sub-directory in which libtool stores uninstalled libraries.
+ */
+#define LT_OBJDIR ".libs/"
+
+/* Numeric representation of the version */
+#define NUMERIC_VERSION 0x01040f00
+
+/* Name of package */
+#define PACKAGE "libevent"
+
+/* Define to the address where bug reports for this package should be sent. */
+#define PACKAGE_BUGREPORT ""
+
+/* Define to the full name of this package. */
+#define PACKAGE_NAME ""
+
+/* Define to the full name and version of this package. */
+#define PACKAGE_STRING ""
+
+/* Define to the one symbol short name of this package. */
+#define PACKAGE_TARNAME ""
+
+/* Define to the home page for this package. */
+#define PACKAGE_URL ""
+
+/* Define to the version of this package. */
+#define PACKAGE_VERSION ""
+
+/* The size of `int', as computed by sizeof. */
+#define SIZEOF_INT 4
+
+/* The size of `long', as computed by sizeof. */
+#define SIZEOF_LONG 4
+
+/* The size of `long long', as computed by sizeof. */
+#define SIZEOF_LONG_LONG 8
+
+/* The size of `short', as computed by sizeof. */
+#define SIZEOF_SHORT 2
+
+/* Define to 1 if you have the ANSI C header files. */
+#define STDC_HEADERS 1
+
+/* Define to 1 if you can safely include both <sys/time.h> and <time.h>. */
+#define TIME_WITH_SYS_TIME 1
+
+/* Version number of package */
+#define VERSION "1.4.15"
+
+/* Define to appropriate substitue if compiler doesnt have __func__ */
+/* #undef __func__ */
+
+/* Define to empty if `const' does not conform to ANSI C. */
+/* #undef const */
+
+/* Define to `__inline__' or `__inline' if that's what the C compiler
+   calls it, or to nothing if 'inline' is not supported under any name.  */
+#ifndef __cplusplus
+/* #undef inline */
+#endif
+
+/* Define to `int' if <sys/types.h> does not define. */
+/* #undef pid_t */
+
+/* Define to `unsigned int' if <sys/types.h> does not define. */
+/* #undef size_t */
+
+/* Define to unsigned int if you dont have it */
+/* #undef socklen_t */
diff --git a/base/third_party/libevent/aix/event-config.h b/base/third_party/libevent/aix/event-config.h
new file mode 100644
index 0000000..2679490
--- /dev/null
+++ b/base/third_party/libevent/aix/event-config.h
@@ -0,0 +1,284 @@
+/* event-config.h
+ * Generated by autoconf; post-processed by libevent.
+ * Do not edit this file.
+ * Do not rely on macros in this file existing in later versions.
+ */
+#ifndef _EVENT_CONFIG_H_
+#define _EVENT_CONFIG_H_
+/* config.h.  Generated from config.h.in by configure.  */
+/* config.h.in.  Generated from configure.in by autoheader.  */
+
+/* Define if clock_gettime is available in libc */
+#define _EVENT_DNS_USE_CPU_CLOCK_FOR_ID 1
+
+/* Define is no secure id variant is available */
+/* #undef _EVENT_DNS_USE_GETTIMEOFDAY_FOR_ID */
+
+/* Define to 1 if you have the `clock_gettime' function. */
+#define _EVENT_HAVE_CLOCK_GETTIME 1
+
+/* Define if /dev/poll is available */
+/* #undef _EVENT_HAVE_DEVPOLL */
+
+/* Define to 1 if you have the <dlfcn.h> header file. */
+#define _EVENT_HAVE_DLFCN_H 1
+
+/* Define if your system supports the epoll system calls */
+/* #undef _EVENT_HAVE_EPOLL */
+
+/* Define to 1 if you have the `epoll_ctl' function. */
+/* #undef _EVENT_HAVE_EPOLL_CTL */
+
+/* Define if your system supports event ports */
+/* #undef _EVENT_HAVE_EVENT_PORTS */
+
+/* Define to 1 if you have the `fcntl' function. */
+#define _EVENT_HAVE_FCNTL 1
+
+/* Define to 1 if you have the <fcntl.h> header file. */
+#define _EVENT_HAVE_FCNTL_H 1
+
+/* Define to 1 if the system has the type `fd_mask'. */
+#define _EVENT_HAVE_FD_MASK 1
+
+/* Define to 1 if you have the `getaddrinfo' function. */
+#define _EVENT_HAVE_GETADDRINFO 1
+
+/* Define to 1 if you have the `getegid' function. */
+#define _EVENT_HAVE_GETEGID 1
+
+/* Define to 1 if you have the `geteuid' function. */
+#define _EVENT_HAVE_GETEUID 1
+
+/* Define to 1 if you have the `getnameinfo' function. */
+#define _EVENT_HAVE_GETNAMEINFO 1
+
+/* Define to 1 if you have the `gettimeofday' function. */
+#define _EVENT_HAVE_GETTIMEOFDAY 1
+
+/* Define to 1 if you have the `inet_ntop' function. */
+#define _EVENT_HAVE_INET_NTOP 1
+
+/* Define to 1 if you have the <inttypes.h> header file. */
+#define _EVENT_HAVE_INTTYPES_H 1
+
+/* Define to 1 if you have the `issetugid' function. */
+/* #undef _EVENT_HAVE_ISSETUGID */
+
+/* Define to 1 if you have the `kqueue' function. */
+/* #undef _EVENT_HAVE_KQUEUE */
+
+/* Define to 1 if you have the `nsl' library (-lnsl). */
+#define _EVENT_HAVE_LIBNSL 1
+
+/* Define to 1 if you have the `resolv' library (-lresolv). */
+/* #undef _EVENT_HAVE_LIBRESOLV */
+
+/* Define to 1 if you have the `rt' library (-lrt). */
+#define _EVENT_HAVE_LIBRT 1
+
+/* Define to 1 if you have the `socket' library (-lsocket). */
+/* #undef _EVENT_HAVE_LIBSOCKET */
+
+/* Define to 1 if you have the <memory.h> header file. */
+#define _EVENT_HAVE_MEMORY_H 1
+
+/* Define to 1 if you have the <netinet/in6.h> header file. */
+/* #undef _EVENT_HAVE_NETINET_IN6_H */
+
+/* Define to 1 if you have the `poll' function. */
+#define _EVENT_HAVE_POLL 1
+
+/* Define to 1 if you have the <poll.h> header file. */
+#define _EVENT_HAVE_POLL_H 1
+
+/* Define to 1 if you have the `port_create' function. */
+/* #undef _EVENT_HAVE_PORT_CREATE */
+
+/* Define to 1 if you have the <port.h> header file. */
+/* #undef _EVENT_HAVE_PORT_H */
+
+/* Define to 1 if you have the `select' function. */
+#define _EVENT_HAVE_SELECT 1
+
+/* Define if F_SETFD is defined in <fcntl.h> */
+#define _EVENT_HAVE_SETFD 1
+
+/* Define to 1 if you have the `sigaction' function. */
+#define _EVENT_HAVE_SIGACTION 1
+
+/* Define to 1 if you have the `signal' function. */
+#define _EVENT_HAVE_SIGNAL 1
+
+/* Define to 1 if you have the <signal.h> header file. */
+#define _EVENT_HAVE_SIGNAL_H 1
+
+/* Define to 1 if you have the <stdarg.h> header file. */
+#define _EVENT_HAVE_STDARG_H 1
+
+/* Define to 1 if you have the <stdint.h> header file. */
+#define _EVENT_HAVE_STDINT_H 1
+
+/* Define to 1 if you have the <stdlib.h> header file. */
+#define _EVENT_HAVE_STDLIB_H 1
+
+/* Define to 1 if you have the <strings.h> header file. */
+#define _EVENT_HAVE_STRINGS_H 1
+
+/* Define to 1 if you have the <string.h> header file. */
+#define _EVENT_HAVE_STRING_H 1
+
+/* Define to 1 if you have the `strlcpy' function. */
+#define _EVENT_HAVE_STRLCPY 1
+
+/* Define to 1 if you have the `strsep' function. */
+#define _EVENT_HAVE_STRSEP 1
+
+/* Define to 1 if you have the `strtok_r' function. */
+#define _EVENT_HAVE_STRTOK_R 1
+
+/* Define to 1 if you have the `strtoll' function. */
+#define _EVENT_HAVE_STRTOLL 1
+
+/* Define to 1 if the system has the type `struct in6_addr'. */
+#define _EVENT_HAVE_STRUCT_IN6_ADDR 1
+
+/* Define to 1 if you have the <sys/devpoll.h> header file. */
+/* #undef _EVENT_HAVE_SYS_DEVPOLL_H */
+
+/* Define to 1 if you have the <sys/epoll.h> header file. */
+/* #undef _EVENT_HAVE_SYS_EPOLL_H */
+
+/* Define to 1 if you have the <sys/event.h> header file. */
+/* #undef _EVENT_HAVE_SYS_EVENT_H */
+
+/* Define to 1 if you have the <sys/ioctl.h> header file. */
+#define _EVENT_HAVE_SYS_IOCTL_H 1
+
+/* Define to 1 if you have the <sys/param.h> header file. */
+#define _EVENT_HAVE_SYS_PARAM_H 1
+
+/* Define to 1 if you have the <sys/queue.h> header file. */
+#define _EVENT_HAVE_SYS_QUEUE_H 1
+
+/* Define to 1 if you have the <sys/select.h> header file. */
+#define _EVENT_HAVE_SYS_SELECT_H 1
+
+/* Define to 1 if you have the <sys/socket.h> header file. */
+#define _EVENT_HAVE_SYS_SOCKET_H 1
+
+/* Define to 1 if you have the <sys/stat.h> header file. */
+#define _EVENT_HAVE_SYS_STAT_H 1
+
+/* Define to 1 if you have the <sys/time.h> header file. */
+#define _EVENT_HAVE_SYS_TIME_H 1
+
+/* Define to 1 if you have the <sys/types.h> header file. */
+#define _EVENT_HAVE_SYS_TYPES_H 1
+
+/* Define if TAILQ_FOREACH is defined in <sys/queue.h> */
+#define _EVENT_HAVE_TAILQFOREACH 1
+
+/* Define if timeradd is defined in <sys/time.h> */
+/* #undef _EVENT_HAVE_TIMERADD */
+
+/* Define if timerclear is defined in <sys/time.h> */
+#define _EVENT_HAVE_TIMERCLEAR 1
+
+/* Define if timercmp is defined in <sys/time.h> */
+#define _EVENT_HAVE_TIMERCMP 1
+
+/* Define if timerisset is defined in <sys/time.h> */
+#define _EVENT_HAVE_TIMERISSET 1
+
+/* Define to 1 if the system has the type `uint16_t'. */
+#define _EVENT_HAVE_UINT16_T 1
+
+/* Define to 1 if the system has the type `uint32_t'. */
+#define _EVENT_HAVE_UINT32_T 1
+
+/* Define to 1 if the system has the type `uint64_t'. */
+#define _EVENT_HAVE_UINT64_T 1
+
+/* Define to 1 if the system has the type `uint8_t'. */
+#define _EVENT_HAVE_UINT8_T 1
+
+/* Define to 1 if you have the <unistd.h> header file. */
+#define _EVENT_HAVE_UNISTD_H 1
+
+/* Define to 1 if you have the `vasprintf' function. */
+/* #undef _EVENT_HAVE_VASPRINTF */
+
+/* Define if kqueue works correctly with pipes */
+/* #undef _EVENT_HAVE_WORKING_KQUEUE */
+
+/* Define to the sub-directory in which libtool stores uninstalled libraries.
+ */
+#define _EVENT_LT_OBJDIR ".libs/"
+
+/* Numeric representation of the version */
+#define _EVENT_NUMERIC_VERSION 0x01040f00
+
+/* Name of package */
+#define _EVENT_PACKAGE "libevent"
+
+/* Define to the address where bug reports for this package should be sent. */
+#define _EVENT_PACKAGE_BUGREPORT ""
+
+/* Define to the full name of this package. */
+#define _EVENT_PACKAGE_NAME ""
+
+/* Define to the full name and version of this package. */
+#define _EVENT_PACKAGE_STRING ""
+
+/* Define to the one symbol short name of this package. */
+#define _EVENT_PACKAGE_TARNAME ""
+
+/* Define to the home page for this package. */
+#define _EVENT_PACKAGE_URL ""
+
+/* Define to the version of this package. */
+#define _EVENT_PACKAGE_VERSION ""
+
+/* The size of `int', as computed by sizeof. */
+#define _EVENT_SIZEOF_INT 4
+
+/* The size of `long', as computed by sizeof. */
+#define _EVENT_SIZEOF_LONG 4
+
+/* The size of `long long', as computed by sizeof. */
+#define _EVENT_SIZEOF_LONG_LONG 8
+
+/* The size of `short', as computed by sizeof. */
+#define _EVENT_SIZEOF_SHORT 2
+
+/* Define to 1 if you have the ANSI C header files. */
+#define _EVENT_STDC_HEADERS 1
+
+/* Define to 1 if you can safely include both <sys/time.h> and <time.h>. */
+#define _EVENT_TIME_WITH_SYS_TIME 1
+
+/* Version number of package */
+#define _EVENT_VERSION "1.4.15"
+
+/* Define to appropriate substitue if compiler doesnt have __func__ */
+/* #undef _EVENT___func__ */
+
+/* Define to empty if `const' does not conform to ANSI C. */
+/* #undef _EVENT_const */
+
+/* Define to `__inline__' or `__inline' if that's what the C compiler
+   calls it, or to nothing if 'inline' is not supported under any name.  */
+#ifndef _EVENT___cplusplus
+/* #undef _EVENT_inline */
+#endif
+
+/* Define to `int' if <sys/types.h> does not define. */
+/* #undef _EVENT_pid_t */
+
+/* Define to `unsigned int' if <sys/types.h> does not define. */
+/* #undef _EVENT_size_t */
+
+/* Define to unsigned int if you dont have it */
+/* #undef _EVENT_socklen_t */
+#endif
diff --git a/base/third_party/libevent/android/config.h b/base/third_party/libevent/android/config.h
new file mode 100644
index 0000000..91f4dda
--- /dev/null
+++ b/base/third_party/libevent/android/config.h
@@ -0,0 +1,266 @@
+/* Copied from Linux version and changed the features according Android, which
+ * is close to Linux */
+
+/* Define if clock_gettime is available in libc */
+#define DNS_USE_CPU_CLOCK_FOR_ID 1
+
+/* Define is no secure id variant is available */
+/* #undef DNS_USE_GETTIMEOFDAY_FOR_ID */
+
+/* Define to 1 if you have the `clock_gettime' function. */
+#define HAVE_CLOCK_GETTIME 1
+
+/* Define if /dev/poll is available */
+/* #undef HAVE_DEVPOLL */
+
+/* Define to 1 if you have the <dlfcn.h> header file. */
+#define HAVE_DLFCN_H 1
+
+/* Define if your system supports the epoll system calls */
+#define HAVE_EPOLL 1
+
+/* Define to 1 if you have the `epoll_ctl' function. */
+#define HAVE_EPOLL_CTL 1
+
+/* Define if your system supports event ports */
+/* #undef HAVE_EVENT_PORTS */
+
+/* Define to 1 if you have the `fcntl' function. */
+#define HAVE_FCNTL 1
+
+/* Define to 1 if you have the <fcntl.h> header file. */
+#define HAVE_FCNTL_H 1
+
+/* Define to 1 if the system has the type `fd_mask'. */
+/* #undef HAVE_FD_MASK */
+
+/* Define to 1 if you have the `getaddrinfo' function. */
+#define HAVE_GETADDRINFO 1
+
+/* Define to 1 if you have the `getegid' function. */
+#define HAVE_GETEGID 1
+
+/* Define to 1 if you have the `geteuid' function. */
+#define HAVE_GETEUID 1
+
+/* Define to 1 if you have the `getnameinfo' function. */
+#define HAVE_GETNAMEINFO 1
+
+/* Define to 1 if you have the `gettimeofday' function. */
+#define HAVE_GETTIMEOFDAY 1
+
+/* Define to 1 if you have the `inet_ntop' function. */
+#define HAVE_INET_NTOP 1
+
+/* Define to 1 if you have the <inttypes.h> header file. */
+#define HAVE_INTTYPES_H 1
+
+/* Define to 1 if you have the `issetugid' function. */
+/* #undef HAVE_ISSETUGID */
+
+/* Define to 1 if you have the `kqueue' function. */
+/* #undef HAVE_KQUEUE */
+
+/* Define to 1 if you have the `nsl' library (-lnsl). */
+#define HAVE_LIBNSL 1
+
+/* Define to 1 if you have the `resolv' library (-lresolv). */
+#define HAVE_LIBRESOLV 1
+
+/* Define to 1 if you have the `rt' library (-lrt). */
+#define HAVE_LIBRT 1
+
+/* Define to 1 if you have the `socket' library (-lsocket). */
+/* #undef HAVE_LIBSOCKET */
+
+/* Define to 1 if you have the <memory.h> header file. */
+#define HAVE_MEMORY_H 1
+
+/* Define to 1 if you have the <netinet/in6.h> header file. */
+/* #undef HAVE_NETINET_IN6_H */
+
+/* Define to 1 if you have the `poll' function. */
+#define HAVE_POLL 1
+
+/* Define to 1 if you have the <poll.h> header file. */
+#define HAVE_POLL_H 1
+
+/* Define to 1 if you have the `port_create' function. */
+/* #undef HAVE_PORT_CREATE */
+
+/* Define to 1 if you have the <port.h> header file. */
+/* #undef HAVE_PORT_H */
+
+/* Define to 1 if you have the `select' function. */
+#define HAVE_SELECT 1
+
+/* Define if F_SETFD is defined in <fcntl.h> */
+#define HAVE_SETFD 1
+
+/* Define to 1 if you have the `sigaction' function. */
+#define HAVE_SIGACTION 1
+
+/* Define to 1 if you have the `signal' function. */
+#define HAVE_SIGNAL 1
+
+/* Define to 1 if you have the <signal.h> header file. */
+#define HAVE_SIGNAL_H 1
+
+/* Define to 1 if you have the <stdarg.h> header file. */
+#define HAVE_STDARG_H 1
+
+/* Define to 1 if you have the <stdint.h> header file. */
+#define HAVE_STDINT_H 1
+
+/* Define to 1 if you have the <stdlib.h> header file. */
+#define HAVE_STDLIB_H 1
+
+/* Define to 1 if you have the <strings.h> header file. */
+#define HAVE_STRINGS_H 1
+
+/* Define to 1 if you have the <string.h> header file. */
+#define HAVE_STRING_H 1
+
+/* Define to 1 if you have the `strlcpy' function. */
+#define HAVE_STRLCPY 1
+
+/* Define to 1 if you have the `strsep' function. */
+#define HAVE_STRSEP 1
+
+/* Define to 1 if you have the `strtok_r' function. */
+#define HAVE_STRTOK_R 1
+
+/* Define to 1 if you have the `strtoll' function. */
+#define HAVE_STRTOLL 1
+
+/* Define to 1 if the system has the type `struct in6_addr'. */
+#define HAVE_STRUCT_IN6_ADDR 1
+
+/* Define to 1 if you have the <sys/devpoll.h> header file. */
+/* #undef HAVE_SYS_DEVPOLL_H */
+
+/* Define to 1 if you have the <sys/epoll.h> header file. */
+#define HAVE_SYS_EPOLL_H 1
+
+/* Define to 1 if you have the <sys/event.h> header file. */
+/* #undef HAVE_SYS_EVENT_H */
+
+/* Define to 1 if you have the <sys/ioctl.h> header file. */
+#define HAVE_SYS_IOCTL_H 1
+
+/* Define to 1 if you have the <sys/param.h> header file. */
+#define HAVE_SYS_PARAM_H 1
+
+/* Define to 1 if you have the <sys/queue.h> header file. */
+#define HAVE_SYS_QUEUE_H 1
+
+/* Define to 1 if you have the <sys/select.h> header file. */
+#define HAVE_SYS_SELECT_H 1
+
+/* Define to 1 if you have the <sys/socket.h> header file. */
+#define HAVE_SYS_SOCKET_H 1
+
+/* Define to 1 if you have the <sys/stat.h> header file. */
+#define HAVE_SYS_STAT_H 1
+
+/* Define to 1 if you have the <sys/time.h> header file. */
+#define HAVE_SYS_TIME_H 1
+
+/* Define to 1 if you have the <sys/types.h> header file. */
+#define HAVE_SYS_TYPES_H 1
+
+/* Define if TAILQ_FOREACH is defined in <sys/queue.h> */
+#define HAVE_TAILQFOREACH 1
+
+/* Define if timeradd is defined in <sys/time.h> */
+#define HAVE_TIMERADD 1
+
+/* Define if timerclear is defined in <sys/time.h> */
+#define HAVE_TIMERCLEAR 1
+
+/* Define if timercmp is defined in <sys/time.h> */
+#define HAVE_TIMERCMP 1
+
+/* Define if timerisset is defined in <sys/time.h> */
+#define HAVE_TIMERISSET 1
+
+/* Define to 1 if the system has the type `uint16_t'. */
+#define HAVE_UINT16_T 1
+
+/* Define to 1 if the system has the type `uint32_t'. */
+#define HAVE_UINT32_T 1
+
+/* Define to 1 if the system has the type `uint64_t'. */
+#define HAVE_UINT64_T 1
+
+/* Define to 1 if the system has the type `uint8_t'. */
+#define HAVE_UINT8_T 1
+
+/* Define to 1 if you have the <unistd.h> header file. */
+#define HAVE_UNISTD_H 1
+
+/* Define to 1 if you have the `vasprintf' function. */
+#define HAVE_VASPRINTF 1
+
+/* Define if kqueue works correctly with pipes */
+/* #undef HAVE_WORKING_KQUEUE */
+
+/* Name of package */
+#define PACKAGE "libevent"
+
+/* Define to the address where bug reports for this package should be sent. */
+#define PACKAGE_BUGREPORT ""
+
+/* Define to the full name of this package. */
+#define PACKAGE_NAME ""
+
+/* Define to the full name and version of this package. */
+#define PACKAGE_STRING ""
+
+/* Define to the one symbol short name of this package. */
+#define PACKAGE_TARNAME ""
+
+/* Define to the version of this package. */
+#define PACKAGE_VERSION ""
+
+/* The size of `int', as computed by sizeof. */
+#define SIZEOF_INT 4
+
+/* The size of `long', as computed by sizeof. */
+#define SIZEOF_LONG 8
+
+/* The size of `long long', as computed by sizeof. */
+#define SIZEOF_LONG_LONG 8
+
+/* The size of `short', as computed by sizeof. */
+#define SIZEOF_SHORT 2
+
+/* Define to 1 if you have the ANSI C header files. */
+#define STDC_HEADERS 1
+
+/* Define to 1 if you can safely include both <sys/time.h> and <time.h>. */
+#define TIME_WITH_SYS_TIME 1
+
+/* Version number of package */
+#define VERSION "1.4.13-stable"
+
+/* Define to appropriate substitue if compiler doesnt have __func__ */
+/* #undef __func__ */
+
+/* Define to empty if `const' does not conform to ANSI C. */
+/* #undef const */
+
+/* Define to `__inline__' or `__inline' if that's what the C compiler
+   calls it, or to nothing if 'inline' is not supported under any name.  */
+#ifndef __cplusplus
+/* #undef inline */
+#endif
+
+/* Define to `int' if <sys/types.h> does not define. */
+/* #undef pid_t */
+
+/* Define to `unsigned int' if <sys/types.h> does not define. */
+/* #undef size_t */
+
+/* Define to unsigned int if you dont have it */
+/* #undef socklen_t */
diff --git a/base/third_party/libevent/android/event-config.h b/base/third_party/libevent/android/event-config.h
new file mode 100644
index 0000000..6563cb7
--- /dev/null
+++ b/base/third_party/libevent/android/event-config.h
@@ -0,0 +1,281 @@
+/* Copied from Linux version and changed the features according Android, which
+ * is close to Linux */
+#ifndef _EVENT_CONFIG_H_
+#define _EVENT_CONFIG_H_
+/* config.h.  Generated from config.h.in by configure.  */
+/* config.h.in.  Generated from configure.in by autoheader.  */
+
+/* Define if clock_gettime is available in libc */
+#define _EVENT_DNS_USE_CPU_CLOCK_FOR_ID 1
+
+/* Define is no secure id variant is available */
+/* #undef _EVENT_DNS_USE_GETTIMEOFDAY_FOR_ID */
+
+/* Define to 1 if you have the `clock_gettime' function. */
+#define _EVENT_HAVE_CLOCK_GETTIME 1
+
+/* Define if /dev/poll is available */
+/* #undef _EVENT_HAVE_DEVPOLL */
+
+/* Define to 1 if you have the <dlfcn.h> header file. */
+#define _EVENT_HAVE_DLFCN_H 1
+
+/* Define if your system supports the epoll system calls */
+#define _EVENT_HAVE_EPOLL 1
+
+/* Define to 1 if you have the `epoll_ctl' function. */
+#define _EVENT_HAVE_EPOLL_CTL 1
+
+/* Define if your system supports event ports */
+/* #undef _EVENT_HAVE_EVENT_PORTS */
+
+/* Define to 1 if you have the `fcntl' function. */
+#define _EVENT_HAVE_FCNTL 1
+
+/* Define to 1 if you have the <fcntl.h> header file. */
+#define _EVENT_HAVE_FCNTL_H 1
+
+/* Define to 1 if the system has the type `fd_mask'. */
+/* #undef _EVENT_HAVE_FD_MASK 1 */
+
+/* Define to 1 if you have the `getaddrinfo' function. */
+#define _EVENT_HAVE_GETADDRINFO 1
+
+/* Define to 1 if you have the `getegid' function. */
+#define _EVENT_HAVE_GETEGID 1
+
+/* Define to 1 if you have the `geteuid' function. */
+#define _EVENT_HAVE_GETEUID 1
+
+/* Define to 1 if you have the `getnameinfo' function. */
+#define _EVENT_HAVE_GETNAMEINFO 1
+
+/* Define to 1 if you have the `gettimeofday' function. */
+#define _EVENT_HAVE_GETTIMEOFDAY 1
+
+/* Define to 1 if you have the `inet_ntop' function. */
+#define _EVENT_HAVE_INET_NTOP 1
+
+/* Define to 1 if you have the <inttypes.h> header file. */
+#define _EVENT_HAVE_INTTYPES_H 1
+
+/* Define to 1 if you have the `issetugid' function. */
+/* #undef _EVENT_HAVE_ISSETUGID */
+
+/* Define to 1 if you have the `kqueue' function. */
+/* #undef _EVENT_HAVE_KQUEUE */
+
+/* Define to 1 if you have the `nsl' library (-lnsl). */
+#define _EVENT_HAVE_LIBNSL 1
+
+/* Define to 1 if you have the `resolv' library (-lresolv). */
+#define _EVENT_HAVE_LIBRESOLV 1
+
+/* Define to 1 if you have the `rt' library (-lrt). */
+#define _EVENT_HAVE_LIBRT 1
+
+/* Define to 1 if you have the `socket' library (-lsocket). */
+/* #undef _EVENT_HAVE_LIBSOCKET */
+
+/* Define to 1 if you have the <memory.h> header file. */
+#define _EVENT_HAVE_MEMORY_H 1
+
+/* Define to 1 if you have the <netinet/in6.h> header file. */
+/* #undef _EVENT_HAVE_NETINET_IN6_H */
+
+/* Define to 1 if you have the `poll' function. */
+#define _EVENT_HAVE_POLL 1
+
+/* Define to 1 if you have the <poll.h> header file. */
+#define _EVENT_HAVE_POLL_H 1
+
+/* Define to 1 if you have the `port_create' function. */
+/* #undef _EVENT_HAVE_PORT_CREATE */
+
+/* Define to 1 if you have the <port.h> header file. */
+/* #undef _EVENT_HAVE_PORT_H */
+
+/* Define to 1 if you have the `select' function. */
+#define _EVENT_HAVE_SELECT 1
+
+/* Define if F_SETFD is defined in <fcntl.h> */
+#define _EVENT_HAVE_SETFD 1
+
+/* Define to 1 if you have the `sigaction' function. */
+#define _EVENT_HAVE_SIGACTION 1
+
+/* Define to 1 if you have the `signal' function. */
+#define _EVENT_HAVE_SIGNAL 1
+
+/* Define to 1 if you have the <signal.h> header file. */
+#define _EVENT_HAVE_SIGNAL_H 1
+
+/* Define to 1 if you have the <stdarg.h> header file. */
+#define _EVENT_HAVE_STDARG_H 1
+
+/* Define to 1 if you have the <stdint.h> header file. */
+#define _EVENT_HAVE_STDINT_H 1
+
+/* Define to 1 if you have the <stdlib.h> header file. */
+#define _EVENT_HAVE_STDLIB_H 1
+
+/* Define to 1 if you have the <strings.h> header file. */
+#define _EVENT_HAVE_STRINGS_H 1
+
+/* Define to 1 if you have the <string.h> header file. */
+#define _EVENT_HAVE_STRING_H 1
+
+/* Define to 1 if you have the `strlcpy' function. */
+#define _EVENT_HAVE_STRLCPY 1
+
+/* Define to 1 if you have the `strsep' function. */
+#define _EVENT_HAVE_STRSEP 1
+
+/* Define to 1 if you have the `strtok_r' function. */
+#define _EVENT_HAVE_STRTOK_R 1
+
+/* Define to 1 if you have the `strtoll' function. */
+#define _EVENT_HAVE_STRTOLL 1
+
+/* Define to 1 if the system has the type `struct in6_addr'. */
+#define _EVENT_HAVE_STRUCT_IN6_ADDR 1
+
+/* Define to 1 if you have the <sys/devpoll.h> header file. */
+/* #undef _EVENT_HAVE_SYS_DEVPOLL_H */
+
+/* Define to 1 if you have the <sys/epoll.h> header file. */
+#define _EVENT_HAVE_SYS_EPOLL_H 1
+
+/* Define to 1 if you have the <sys/event.h> header file. */
+/* #undef _EVENT_HAVE_SYS_EVENT_H */
+
+/* Define to 1 if you have the <sys/ioctl.h> header file. */
+#define _EVENT_HAVE_SYS_IOCTL_H 1
+
+/* Define to 1 if you have the <sys/param.h> header file. */
+#define _EVENT_HAVE_SYS_PARAM_H 1
+
+/* Define to 1 if you have the <sys/queue.h> header file. */
+#define _EVENT_HAVE_SYS_QUEUE_H 1
+
+/* Define to 1 if you have the <sys/select.h> header file. */
+#define _EVENT_HAVE_SYS_SELECT_H 1
+
+/* Define to 1 if you have the <sys/socket.h> header file. */
+#define _EVENT_HAVE_SYS_SOCKET_H 1
+
+/* Define to 1 if you have the <sys/stat.h> header file. */
+#define _EVENT_HAVE_SYS_STAT_H 1
+
+/* Define to 1 if you have the <sys/time.h> header file. */
+#define _EVENT_HAVE_SYS_TIME_H 1
+
+/* Define to 1 if you have the <sys/types.h> header file. */
+#define _EVENT_HAVE_SYS_TYPES_H 1
+
+/* Define if TAILQ_FOREACH is defined in <sys/queue.h> */
+#define _EVENT_HAVE_TAILQFOREACH 1
+
+/* Define if timeradd is defined in <sys/time.h> */
+#define _EVENT_HAVE_TIMERADD 1
+
+/* Define if timerclear is defined in <sys/time.h> */
+#define _EVENT_HAVE_TIMERCLEAR 1
+
+/* Define if timercmp is defined in <sys/time.h> */
+#define _EVENT_HAVE_TIMERCMP 1
+
+/* Define if timerisset is defined in <sys/time.h> */
+#define _EVENT_HAVE_TIMERISSET 1
+
+/* Define to 1 if the system has the type `uint16_t'. */
+#define _EVENT_HAVE_UINT16_T 1
+
+/* Define to 1 if the system has the type `uint32_t'. */
+#define _EVENT_HAVE_UINT32_T 1
+
+/* Define to 1 if the system has the type `uint64_t'. */
+#define _EVENT_HAVE_UINT64_T 1
+
+/* Define to 1 if the system has the type `uint8_t'. */
+#define _EVENT_HAVE_UINT8_T 1
+
+/* Define to 1 if you have the <unistd.h> header file. */
+#define _EVENT_HAVE_UNISTD_H 1
+
+/* Define to 1 if you have the `vasprintf' function. */
+#define _EVENT_HAVE_VASPRINTF 1
+
+/* Define if kqueue works correctly with pipes */
+/* #undef _EVENT_HAVE_WORKING_KQUEUE */
+
+/* Define to the sub-directory in which libtool stores uninstalled libraries.
+   */
+#define _EVENT_LT_OBJDIR ".libs/"
+
+/* Numeric representation of the version */
+#define _EVENT_NUMERIC_VERSION 0x01040f00
+
+/* Name of package */
+#define _EVENT_PACKAGE "libevent"
+
+/* Define to the address where bug reports for this package should be sent. */
+#define _EVENT_PACKAGE_BUGREPORT ""
+
+/* Define to the full name of this package. */
+#define _EVENT_PACKAGE_NAME ""
+
+/* Define to the full name and version of this package. */
+#define _EVENT_PACKAGE_STRING ""
+
+/* Define to the one symbol short name of this package. */
+#define _EVENT_PACKAGE_TARNAME ""
+
+/* Define to the home page for this package. */
+#define _EVENT_PACKAGE_URL ""
+
+/* Define to the version of this package. */
+#define _EVENT_PACKAGE_VERSION ""
+
+/* The size of `int', as computed by sizeof. */
+#define _EVENT_SIZEOF_INT 4
+
+/* The size of `long', as computed by sizeof. */
+#define _EVENT_SIZEOF_LONG 8
+
+/* The size of `long long', as computed by sizeof. */
+#define _EVENT_SIZEOF_LONG_LONG 8
+
+/* The size of `short', as computed by sizeof. */
+#define _EVENT_SIZEOF_SHORT 2
+
+/* Define to 1 if you have the ANSI C header files. */
+#define _EVENT_STDC_HEADERS 1
+
+/* Define to 1 if you can safely include both <sys/time.h> and <time.h>. */
+#define _EVENT_TIME_WITH_SYS_TIME 1
+
+/* Version number of package */
+#define _EVENT_VERSION "1.4.15"
+
+/* Define to appropriate substitue if compiler doesnt have __func__ */
+/* #undef _EVENT___func__ */
+
+/* Define to empty if `const' does not conform to ANSI C. */
+/* #undef _EVENT_const */
+
+/* Define to `__inline__' or `__inline' if that's what the C compiler
+   calls it, or to nothing if 'inline' is not supported under any name.  */
+#ifndef _EVENT___cplusplus
+/* #undef _EVENT_inline */
+#endif
+
+/* Define to `int' if <sys/types.h> does not define. */
+/* #undef _EVENT_pid_t */
+
+/* Define to `unsigned int' if <sys/types.h> does not define. */
+/* #undef _EVENT_size_t */
+
+/* Define to unsigned int if you dont have it */
+/* #undef _EVENT_socklen_t */
+#endif
diff --git a/base/third_party/libevent/autogen.sh b/base/third_party/libevent/autogen.sh
new file mode 100755
index 0000000..099cb30
--- /dev/null
+++ b/base/third_party/libevent/autogen.sh
@@ -0,0 +1,15 @@
+#!/bin/sh
+if [ -x "`which autoreconf 2>/dev/null`" ] ; then
+   exec autoreconf -ivf
+fi
+
+LIBTOOLIZE=libtoolize
+SYSNAME=`uname`
+if [ "x$SYSNAME" = "xDarwin" ] ; then
+  LIBTOOLIZE=glibtoolize
+fi
+aclocal && \
+	autoheader && \
+	$LIBTOOLIZE && \
+	autoconf && \
+	automake --add-missing --copy
diff --git a/base/third_party/libevent/buffer.c b/base/third_party/libevent/buffer.c
new file mode 100644
index 0000000..ebf35c9
--- /dev/null
+++ b/base/third_party/libevent/buffer.c
@@ -0,0 +1,554 @@
+/*
+ * Copyright (c) 2002, 2003 Niels Provos <provos@citi.umich.edu>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ *    derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#ifdef WIN32
+#include <winsock2.h>
+#include <windows.h>
+#endif
+
+#ifdef HAVE_VASPRINTF
+/* If we have vasprintf, we need to define this before we include stdio.h. */
+#define _GNU_SOURCE
+#endif
+
+#include <sys/types.h>
+
+#ifdef HAVE_SYS_TIME_H
+#include <sys/time.h>
+#endif
+
+#ifdef HAVE_SYS_IOCTL_H
+#include <sys/ioctl.h>
+#endif
+
+#include <assert.h>
+#include <errno.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#ifdef HAVE_STDARG_H
+#include <stdarg.h>
+#endif
+#ifdef HAVE_UNISTD_H
+#include <unistd.h>
+#endif
+
+#include "event.h"
+#include "config.h"
+#include "evutil.h"
+#include "./log.h"
+
+struct evbuffer *
+evbuffer_new(void)
+{
+	struct evbuffer *buffer;
+	
+	buffer = calloc(1, sizeof(struct evbuffer));
+
+	return (buffer);
+}
+
+void
+evbuffer_free(struct evbuffer *buffer)
+{
+	if (buffer->orig_buffer != NULL)
+		free(buffer->orig_buffer);
+	free(buffer);
+}
+
+/* 
+ * This is a destructive add.  The data from one buffer moves into
+ * the other buffer.
+ */
+
+#define SWAP(x,y) do { \
+	(x)->buffer = (y)->buffer; \
+	(x)->orig_buffer = (y)->orig_buffer; \
+	(x)->misalign = (y)->misalign; \
+	(x)->totallen = (y)->totallen; \
+	(x)->off = (y)->off; \
+} while (0)
+
+int
+evbuffer_add_buffer(struct evbuffer *outbuf, struct evbuffer *inbuf)
+{
+	int res;
+
+	/* Short cut for better performance */
+	if (outbuf->off == 0) {
+		struct evbuffer tmp;
+		size_t oldoff = inbuf->off;
+
+		/* Swap them directly */
+		SWAP(&tmp, outbuf);
+		SWAP(outbuf, inbuf);
+		SWAP(inbuf, &tmp);
+
+		/* 
+		 * Optimization comes with a price; we need to notify the
+		 * buffer if necessary of the changes. oldoff is the amount
+		 * of data that we transfered from inbuf to outbuf
+		 */
+		if (inbuf->off != oldoff && inbuf->cb != NULL)
+			(*inbuf->cb)(inbuf, oldoff, inbuf->off, inbuf->cbarg);
+		if (oldoff && outbuf->cb != NULL)
+			(*outbuf->cb)(outbuf, 0, oldoff, outbuf->cbarg);
+		
+		return (0);
+	}
+
+	res = evbuffer_add(outbuf, inbuf->buffer, inbuf->off);
+	if (res == 0) {
+		/* We drain the input buffer on success */
+		evbuffer_drain(inbuf, inbuf->off);
+	}
+
+	return (res);
+}
+
+int
+evbuffer_add_vprintf(struct evbuffer *buf, const char *fmt, va_list ap)
+{
+	char *buffer;
+	size_t space;
+	size_t oldoff = buf->off;
+	int sz;
+	va_list aq;
+
+	/* make sure that at least some space is available */
+	if (evbuffer_expand(buf, 64) < 0)
+		return (-1);
+	for (;;) {
+		size_t used = buf->misalign + buf->off;
+		buffer = (char *)buf->buffer + buf->off;
+		assert(buf->totallen >= used);
+		space = buf->totallen - used;
+
+#ifndef va_copy
+#define	va_copy(dst, src)	memcpy(&(dst), &(src), sizeof(va_list))
+#endif
+		va_copy(aq, ap);
+
+		sz = evutil_vsnprintf(buffer, space, fmt, aq);
+
+		va_end(aq);
+
+		if (sz < 0)
+			return (-1);
+		if ((size_t)sz < space) {
+			buf->off += sz;
+			if (buf->cb != NULL)
+				(*buf->cb)(buf, oldoff, buf->off, buf->cbarg);
+			return (sz);
+		}
+		if (evbuffer_expand(buf, sz + 1) == -1)
+			return (-1);
+
+	}
+	/* NOTREACHED */
+}
+
+int
+evbuffer_add_printf(struct evbuffer *buf, const char *fmt, ...)
+{
+	int res = -1;
+	va_list ap;
+
+	va_start(ap, fmt);
+	res = evbuffer_add_vprintf(buf, fmt, ap);
+	va_end(ap);
+
+	return (res);
+}
+
+/* Reads data from an event buffer and drains the bytes read */
+
+int
+evbuffer_remove(struct evbuffer *buf, void *data, size_t datlen)
+{
+	size_t nread = datlen;
+	if (nread >= buf->off)
+		nread = buf->off;
+
+	memcpy(data, buf->buffer, nread);
+	evbuffer_drain(buf, nread);
+	
+	return (nread);
+}
+
+/*
+ * Reads a line terminated by either '\r\n', '\n\r' or '\r' or '\n'.
+ * The returned buffer needs to be freed by the called.
+ */
+
+char *
+evbuffer_readline(struct evbuffer *buffer)
+{
+	u_char *data = EVBUFFER_DATA(buffer);
+	size_t len = EVBUFFER_LENGTH(buffer);
+	char *line;
+	unsigned int i;
+
+	for (i = 0; i < len; i++) {
+		if (data[i] == '\r' || data[i] == '\n')
+			break;
+	}
+
+	if (i == len)
+		return (NULL);
+
+	if ((line = malloc(i + 1)) == NULL) {
+		fprintf(stderr, "%s: out of memory\n", __func__);
+		return (NULL);
+	}
+
+	memcpy(line, data, i);
+	line[i] = '\0';
+
+	/*
+	 * Some protocols terminate a line with '\r\n', so check for
+	 * that, too.
+	 */
+	if ( i < len - 1 ) {
+		char fch = data[i], sch = data[i+1];
+
+		/* Drain one more character if needed */
+		if ( (sch == '\r' || sch == '\n') && sch != fch )
+			i += 1;
+	}
+
+	evbuffer_drain(buffer, i + 1);
+
+	return (line);
+}
+
+
+char *
+evbuffer_readln(struct evbuffer *buffer, size_t *n_read_out,
+		enum evbuffer_eol_style eol_style)
+{
+	u_char *data = EVBUFFER_DATA(buffer);
+	u_char *start_of_eol, *end_of_eol;
+	size_t len = EVBUFFER_LENGTH(buffer);
+	char *line;
+	unsigned int i, n_to_copy, n_to_drain;
+
+	if (n_read_out)
+		*n_read_out = 0;
+
+	/* depending on eol_style, set start_of_eol to the first character
+	 * in the newline, and end_of_eol to one after the last character. */
+	switch (eol_style) {
+	case EVBUFFER_EOL_ANY:
+		for (i = 0; i < len; i++) {
+			if (data[i] == '\r' || data[i] == '\n')
+				break;
+		}
+		if (i == len)
+			return (NULL);
+		start_of_eol = data+i;
+		++i;
+		for ( ; i < len; i++) {
+			if (data[i] != '\r' && data[i] != '\n')
+				break;
+		}
+		end_of_eol = data+i;
+		break;
+	case EVBUFFER_EOL_CRLF:
+		end_of_eol = memchr(data, '\n', len);
+		if (!end_of_eol)
+			return (NULL);
+		if (end_of_eol > data && *(end_of_eol-1) == '\r')
+			start_of_eol = end_of_eol - 1;
+		else
+			start_of_eol = end_of_eol;
+		end_of_eol++; /*point to one after the LF. */
+		break;
+	case EVBUFFER_EOL_CRLF_STRICT: {
+		u_char *cp = data;
+		while ((cp = memchr(cp, '\r', len-(cp-data)))) {
+			if (cp < data+len-1 && *(cp+1) == '\n')
+				break;
+			if (++cp >= data+len) {
+				cp = NULL;
+				break;
+			}
+		}
+		if (!cp)
+			return (NULL);
+		start_of_eol = cp;
+		end_of_eol = cp+2;
+		break;
+	}
+	case EVBUFFER_EOL_LF:
+		start_of_eol = memchr(data, '\n', len);
+		if (!start_of_eol)
+			return (NULL);
+		end_of_eol = start_of_eol + 1;
+		break;
+	default:
+		return (NULL);
+	}
+
+	n_to_copy = start_of_eol - data;
+	n_to_drain = end_of_eol - data;
+
+	if ((line = malloc(n_to_copy+1)) == NULL) {
+		event_warn("%s: out of memory\n", __func__);
+		return (NULL);
+	}
+
+	memcpy(line, data, n_to_copy);
+	line[n_to_copy] = '\0';
+
+	evbuffer_drain(buffer, n_to_drain);
+	if (n_read_out)
+		*n_read_out = (size_t)n_to_copy;
+
+	return (line);
+}
+
+/* Adds data to an event buffer */
+
+static void
+evbuffer_align(struct evbuffer *buf)
+{
+	memmove(buf->orig_buffer, buf->buffer, buf->off);
+	buf->buffer = buf->orig_buffer;
+	buf->misalign = 0;
+}
+
+#ifndef SIZE_MAX
+#define SIZE_MAX ((size_t)-1)
+#endif
+
+/* Expands the available space in the event buffer to at least datlen */
+
+int
+evbuffer_expand(struct evbuffer *buf, size_t datlen)
+{
+	size_t used = buf->misalign + buf->off;
+
+	assert(buf->totallen >= used);
+
+	/* If we can fit all the data, then we don't have to do anything */
+	if (buf->totallen - used >= datlen)
+		return (0);
+	/* If we would need to overflow to fit this much data, we can't
+	 * do anything. */
+	if (datlen > SIZE_MAX - buf->off)
+		return (-1);
+
+	/*
+	 * If the misalignment fulfills our data needs, we just force an
+	 * alignment to happen.  Afterwards, we have enough space.
+	 */
+	if (buf->totallen - buf->off >= datlen) {
+		evbuffer_align(buf);
+	} else {
+		void *newbuf;
+		size_t length = buf->totallen;
+		size_t need = buf->off + datlen;
+
+		if (length < 256)
+			length = 256;
+		if (need < SIZE_MAX / 2) {
+			while (length < need) {
+				length <<= 1;
+			}
+		} else {
+			length = need;
+		}
+
+		if (buf->orig_buffer != buf->buffer)
+			evbuffer_align(buf);
+		if ((newbuf = realloc(buf->buffer, length)) == NULL)
+			return (-1);
+
+		buf->orig_buffer = buf->buffer = newbuf;
+		buf->totallen = length;
+	}
+
+	return (0);
+}
+
+int
+evbuffer_add(struct evbuffer *buf, const void *data, size_t datlen)
+{
+	size_t used = buf->misalign + buf->off;
+	size_t oldoff = buf->off;
+
+	if (buf->totallen - used < datlen) {
+		if (evbuffer_expand(buf, datlen) == -1)
+			return (-1);
+	}
+
+	memcpy(buf->buffer + buf->off, data, datlen);
+	buf->off += datlen;
+
+	if (datlen && buf->cb != NULL)
+		(*buf->cb)(buf, oldoff, buf->off, buf->cbarg);
+
+	return (0);
+}
+
+void
+evbuffer_drain(struct evbuffer *buf, size_t len)
+{
+	size_t oldoff = buf->off;
+
+	if (len >= buf->off) {
+		buf->off = 0;
+		buf->buffer = buf->orig_buffer;
+		buf->misalign = 0;
+		goto done;
+	}
+
+	buf->buffer += len;
+	buf->misalign += len;
+
+	buf->off -= len;
+
+ done:
+	/* Tell someone about changes in this buffer */
+	if (buf->off != oldoff && buf->cb != NULL)
+		(*buf->cb)(buf, oldoff, buf->off, buf->cbarg);
+
+}
+
+/*
+ * Reads data from a file descriptor into a buffer.
+ */
+
+#define EVBUFFER_MAX_READ	4096
+
+int
+evbuffer_read(struct evbuffer *buf, int fd, int howmuch)
+{
+	u_char *p;
+	size_t oldoff = buf->off;
+	int n = EVBUFFER_MAX_READ;
+
+#if defined(FIONREAD)
+#ifdef WIN32
+	long lng = n;
+	if (ioctlsocket(fd, FIONREAD, &lng) == -1 || (n=lng) <= 0) {
+#else
+	if (ioctl(fd, FIONREAD, &n) == -1 || n <= 0) {
+#endif
+		n = EVBUFFER_MAX_READ;
+	} else if (n > EVBUFFER_MAX_READ && n > howmuch) {
+		/*
+		 * It's possible that a lot of data is available for
+		 * reading.  We do not want to exhaust resources
+		 * before the reader has a chance to do something
+		 * about it.  If the reader does not tell us how much
+		 * data we should read, we artifically limit it.
+		 */
+		if ((size_t)n > buf->totallen << 2)
+			n = buf->totallen << 2;
+		if (n < EVBUFFER_MAX_READ)
+			n = EVBUFFER_MAX_READ;
+	}
+#endif	
+	if (howmuch < 0 || howmuch > n)
+		howmuch = n;
+
+	/* If we don't have FIONREAD, we might waste some space here */
+	if (evbuffer_expand(buf, howmuch) == -1)
+		return (-1);
+
+	/* We can append new data at this point */
+	p = buf->buffer + buf->off;
+
+#ifndef WIN32
+	n = read(fd, p, howmuch);
+#else
+	n = recv(fd, p, howmuch, 0);
+#endif
+	if (n == -1)
+		return (-1);
+	if (n == 0)
+		return (0);
+
+	buf->off += n;
+
+	/* Tell someone about changes in this buffer */
+	if (buf->off != oldoff && buf->cb != NULL)
+		(*buf->cb)(buf, oldoff, buf->off, buf->cbarg);
+
+	return (n);
+}
+
+int
+evbuffer_write(struct evbuffer *buffer, int fd)
+{
+	int n;
+
+#ifndef WIN32
+	n = write(fd, buffer->buffer, buffer->off);
+#else
+	n = send(fd, buffer->buffer, buffer->off, 0);
+#endif
+	if (n == -1)
+		return (-1);
+	if (n == 0)
+		return (0);
+	evbuffer_drain(buffer, n);
+
+	return (n);
+}
+
+u_char *
+evbuffer_find(struct evbuffer *buffer, const u_char *what, size_t len)
+{
+	u_char *search = buffer->buffer, *end = search + buffer->off;
+	u_char *p;
+
+	while (search < end &&
+	    (p = memchr(search, *what, end - search)) != NULL) {
+		if (p + len > end)
+			break;
+		if (memcmp(p, what, len) == 0)
+			return (p);
+		search = p + 1;
+	}
+
+	return (NULL);
+}
+
+void evbuffer_setcb(struct evbuffer *buffer,
+    void (*cb)(struct evbuffer *, size_t, size_t, void *),
+    void *cbarg)
+{
+	buffer->cb = cb;
+	buffer->cbarg = cbarg;
+}
diff --git a/base/third_party/libevent/chromium.patch b/base/third_party/libevent/chromium.patch
new file mode 100644
index 0000000..5cbdfba
--- /dev/null
+++ b/base/third_party/libevent/chromium.patch
@@ -0,0 +1,226 @@
+diff --git a/third_party/libevent/buffer.c b/third_party/libevent/buffer.c
+index 64324bb..ebf35c9 100644
+--- a/third_party/libevent/buffer.c
++++ b/third_party/libevent/buffer.c
+@@ -356,7 +356,6 @@ int
+ evbuffer_expand(struct evbuffer *buf, size_t datlen)
+ {
+ 	size_t used = buf->misalign + buf->off;
+-	size_t need;
+ 
+ 	assert(buf->totallen >= used);
+ 
+diff --git a/third_party/libevent/evdns.c b/third_party/libevent/evdns.c
+index fa23163..f1c70d0 100644
+--- a/third_party/libevent/evdns.c
++++ b/third_party/libevent/evdns.c
+@@ -55,7 +55,9 @@
+ #endif
+ 
+ /* #define _POSIX_C_SOURCE 200507 */
++#ifndef _GNU_SOURCE
+ #define _GNU_SOURCE
++#endif
+ 
+ #ifdef DNS_USE_CPU_CLOCK_FOR_ID
+ #ifdef DNS_USE_OPENSSL_FOR_ID
+@@ -134,7 +136,7 @@
+ typedef ev_uint8_t u_char;
+ typedef unsigned int uint;
+ #endif
+-#include <event.h>
++#include "event.h"
+ 
+ #define u64 ev_uint64_t
+ #define u32 ev_uint32_t
+diff --git a/third_party/libevent/evdns.h b/third_party/libevent/evdns.h
+index 1eb5c38..fca4ac3 100644
+--- a/third_party/libevent/evdns.h
++++ b/third_party/libevent/evdns.h
+@@ -165,7 +165,7 @@ extern "C" {
+ #endif
+ 
+ /* For integer types. */
+-#include <evutil.h>
++#include "evutil.h"
+ 
+ /** Error codes 0-5 are as described in RFC 1035. */
+ #define DNS_ERR_NONE 0
+diff --git a/third_party/libevent/event.c b/third_party/libevent/event.c
+index da6cd42..36b1c51 100644
+--- a/third_party/libevent/event.c
++++ b/third_party/libevent/event.c
+@@ -107,11 +107,7 @@ static const struct eventop *eventops[] = {
+ /* Global state */
+ struct event_base *current_base = NULL;
+ extern struct event_base *evsignal_base;
+-static int use_monotonic;
+-
+-/* Handle signals - This is a deprecated interface */
+-int (*event_sigcb)(void);		/* Signal callback when gotsig is set */
+-volatile sig_atomic_t event_gotsig;	/* Set in signal handler */
++static int use_monotonic = 1;
+ 
+ /* Prototypes */
+ static void	event_queue_insert(struct event_base *, struct event *, int);
+@@ -124,17 +120,6 @@ static int	timeout_next(struct event_base *, struct timeval **);
+ static void	timeout_process(struct event_base *);
+ static void	timeout_correct(struct event_base *, struct timeval *);
+ 
+-static void
+-detect_monotonic(void)
+-{
+-#if defined(HAVE_CLOCK_GETTIME) && defined(CLOCK_MONOTONIC)
+-	struct timespec	ts;
+-
+-	if (clock_gettime(CLOCK_MONOTONIC, &ts) == 0)
+-		use_monotonic = 1;
+-#endif
+-}
+-
+ static int
+ gettime(struct event_base *base, struct timeval *tp)
+ {
+@@ -144,18 +129,18 @@ gettime(struct event_base *base, struct timeval *tp)
+ 	}
+ 
+ #if defined(HAVE_CLOCK_GETTIME) && defined(CLOCK_MONOTONIC)
+-	if (use_monotonic) {
+-		struct timespec	ts;
+-
+-		if (clock_gettime(CLOCK_MONOTONIC, &ts) == -1)
+-			return (-1);
++	struct timespec	ts;
+ 
++	if (use_monotonic &&
++	    clock_gettime(CLOCK_MONOTONIC, &ts) == 0) {
+ 		tp->tv_sec = ts.tv_sec;
+ 		tp->tv_usec = ts.tv_nsec / 1000;
+ 		return (0);
+ 	}
+ #endif
+ 
++	use_monotonic = 0;
++
+ 	return (evutil_gettimeofday(tp, NULL));
+ }
+ 
+@@ -179,10 +164,6 @@ event_base_new(void)
+ 	if ((base = calloc(1, sizeof(struct event_base))) == NULL)
+ 		event_err(1, "%s: calloc", __func__);
+ 
+-	event_sigcb = NULL;
+-	event_gotsig = 0;
+-
+-	detect_monotonic();
+ 	gettime(base, &base->event_tv);
+ 	
+ 	min_heap_ctor(&base->timeheap);
+@@ -398,12 +379,9 @@ event_process_active(struct event_base *base)
+ 			ncalls--;
+ 			ev->ev_ncalls = ncalls;
+ 			(*ev->ev_callback)((int)ev->ev_fd, ev->ev_res, ev->ev_arg);
+-			if (event_gotsig || base->event_break) {
+-			  	ev->ev_pncalls = NULL;
++			if (base->event_break)
+ 				return;
+-			}
+ 		}
+-		ev->ev_pncalls = NULL;
+ 	}
+ }
+ 
+@@ -506,18 +484,6 @@ event_base_loop(struct event_base *base, int flags)
+ 			break;
+ 		}
+ 
+-		/* You cannot use this interface for multi-threaded apps */
+-		while (event_gotsig) {
+-			event_gotsig = 0;
+-			if (event_sigcb) {
+-				res = (*event_sigcb)();
+-				if (res == -1) {
+-					errno = EINTR;
+-					return (-1);
+-				}
+-			}
+-		}
+-
+ 		timeout_correct(base, &tv);
+ 
+ 		tv_p = &tv;
+@@ -808,8 +774,6 @@ int
+ event_del(struct event *ev)
+ {
+ 	struct event_base *base;
+-	const struct eventop *evsel;
+-	void *evbase;
+ 
+ 	event_debug(("event_del: %p, callback %p",
+ 		 ev, ev->ev_callback));
+@@ -819,8 +783,6 @@ event_del(struct event *ev)
+ 		return (-1);
+ 
+ 	base = ev->ev_base;
+-	evsel = base->evsel;
+-	evbase = base->evbase;
+ 
+ 	assert(!(ev->ev_flags & ~EVLIST_ALL));
+ 
+@@ -838,7 +800,7 @@ event_del(struct event *ev)
+ 
+ 	if (ev->ev_flags & EVLIST_INSERTED) {
+ 		event_queue_remove(base, ev, EVLIST_INSERTED);
+-		return (evsel->del(evbase, ev));
++		return (base->evsel->del(base->evbase, ev));
+ 	}
+ 
+ 	return (0);
+diff --git a/third_party/libevent/event.h b/third_party/libevent/event.h
+index d1f5d9e..f0887b9 100644
+--- a/third_party/libevent/event.h
++++ b/third_party/libevent/event.h
+@@ -159,7 +159,7 @@
+ extern "C" {
+ #endif
+ 
+-#include <event-config.h>
++#include "event-config.h"
+ #ifdef _EVENT_HAVE_SYS_TYPES_H
+ #include <sys/types.h>
+ #endif
+@@ -172,7 +172,7 @@ extern "C" {
+ #include <stdarg.h>
+ 
+ /* For int types. */
+-#include <evutil.h>
++#include "evutil.h"
+ 
+ #ifdef WIN32
+ #define WIN32_LEAN_AND_MEAN
+diff --git a/third_party/libevent/evhttp.h b/third_party/libevent/evhttp.h
+index cba8be1..48c1d91 100644
+--- a/third_party/libevent/evhttp.h
++++ b/third_party/libevent/evhttp.h
+@@ -27,7 +27,7 @@
+ #ifndef _EVHTTP_H_
+ #define _EVHTTP_H_
+ 
+-#include <event.h>
++#include "event.h"
+ 
+ #ifdef __cplusplus
+ extern "C" {
+diff --git a/third_party/libevent/evutil.h b/third_party/libevent/evutil.h
+index dcb0013..8b664b9 100644
+--- a/third_party/libevent/evutil.h
++++ b/third_party/libevent/evutil.h
+@@ -38,7 +38,7 @@
+ extern "C" {
+ #endif
+ 
+-#include <event-config.h>
++#include "event-config.h"
+ #ifdef _EVENT_HAVE_SYS_TIME_H
+ #include <sys/time.h>
+ #endif
diff --git a/base/third_party/libevent/compat/sys/_libevent_time.h b/base/third_party/libevent/compat/sys/_libevent_time.h
new file mode 100644
index 0000000..8cabb0d
--- /dev/null
+++ b/base/third_party/libevent/compat/sys/_libevent_time.h
@@ -0,0 +1,163 @@
+/*	$OpenBSD: time.h,v 1.11 2000/10/10 13:36:48 itojun Exp $	*/
+/*	$NetBSD: time.h,v 1.18 1996/04/23 10:29:33 mycroft Exp $	*/
+
+/*
+ * Copyright (c) 1982, 1986, 1993
+ *	The Regents of the University of California.  All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ *    may be used to endorse or promote products derived from this software
+ *    without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ *	@(#)time.h	8.2 (Berkeley) 7/10/94
+ */
+
+#ifndef _SYS_TIME_H_
+#define _SYS_TIME_H_
+
+#include <sys/types.h>
+
+/*
+ * Structure returned by gettimeofday(2) system call,
+ * and used in other calls.
+ */
+struct timeval {
+	long	tv_sec;		/* seconds */
+	long	tv_usec;	/* and microseconds */
+};
+
+/*
+ * Structure defined by POSIX.1b to be like a timeval.
+ */
+struct timespec {
+	time_t	tv_sec;		/* seconds */
+	long	tv_nsec;	/* and nanoseconds */
+};
+
+#define	TIMEVAL_TO_TIMESPEC(tv, ts) {					\
+	(ts)->tv_sec = (tv)->tv_sec;					\
+	(ts)->tv_nsec = (tv)->tv_usec * 1000;				\
+}
+#define	TIMESPEC_TO_TIMEVAL(tv, ts) {					\
+	(tv)->tv_sec = (ts)->tv_sec;					\
+	(tv)->tv_usec = (ts)->tv_nsec / 1000;				\
+}
+
+struct timezone {
+	int	tz_minuteswest;	/* minutes west of Greenwich */
+	int	tz_dsttime;	/* type of dst correction */
+};
+#define	DST_NONE	0	/* not on dst */
+#define	DST_USA		1	/* USA style dst */
+#define	DST_AUST	2	/* Australian style dst */
+#define	DST_WET		3	/* Western European dst */
+#define	DST_MET		4	/* Middle European dst */
+#define	DST_EET		5	/* Eastern European dst */
+#define	DST_CAN		6	/* Canada */
+
+/* Operations on timevals. */
+#define	timerclear(tvp)		(tvp)->tv_sec = (tvp)->tv_usec = 0
+#define	timerisset(tvp)		((tvp)->tv_sec || (tvp)->tv_usec)
+#define	timercmp(tvp, uvp, cmp)						\
+	(((tvp)->tv_sec == (uvp)->tv_sec) ?				\
+	    ((tvp)->tv_usec cmp (uvp)->tv_usec) :			\
+	    ((tvp)->tv_sec cmp (uvp)->tv_sec))
+#define	timeradd(tvp, uvp, vvp)						\
+	do {								\
+		(vvp)->tv_sec = (tvp)->tv_sec + (uvp)->tv_sec;		\
+		(vvp)->tv_usec = (tvp)->tv_usec + (uvp)->tv_usec;	\
+		if ((vvp)->tv_usec >= 1000000) {			\
+			(vvp)->tv_sec++;				\
+			(vvp)->tv_usec -= 1000000;			\
+		}							\
+	} while (0)
+#define	timersub(tvp, uvp, vvp)						\
+	do {								\
+		(vvp)->tv_sec = (tvp)->tv_sec - (uvp)->tv_sec;		\
+		(vvp)->tv_usec = (tvp)->tv_usec - (uvp)->tv_usec;	\
+		if ((vvp)->tv_usec < 0) {				\
+			(vvp)->tv_sec--;				\
+			(vvp)->tv_usec += 1000000;			\
+		}							\
+	} while (0)
+
+/* Operations on timespecs. */
+#define	timespecclear(tsp)		(tsp)->tv_sec = (tsp)->tv_nsec = 0
+#define	timespecisset(tsp)		((tsp)->tv_sec || (tsp)->tv_nsec)
+#define	timespeccmp(tsp, usp, cmp)					\
+	(((tsp)->tv_sec == (usp)->tv_sec) ?				\
+	    ((tsp)->tv_nsec cmp (usp)->tv_nsec) :			\
+	    ((tsp)->tv_sec cmp (usp)->tv_sec))
+#define	timespecadd(tsp, usp, vsp)					\
+	do {								\
+		(vsp)->tv_sec = (tsp)->tv_sec + (usp)->tv_sec;		\
+		(vsp)->tv_nsec = (tsp)->tv_nsec + (usp)->tv_nsec;	\
+		if ((vsp)->tv_nsec >= 1000000000L) {			\
+			(vsp)->tv_sec++;				\
+			(vsp)->tv_nsec -= 1000000000L;			\
+		}							\
+	} while (0)
+#define	timespecsub(tsp, usp, vsp)					\
+	do {								\
+		(vsp)->tv_sec = (tsp)->tv_sec - (usp)->tv_sec;		\
+		(vsp)->tv_nsec = (tsp)->tv_nsec - (usp)->tv_nsec;	\
+		if ((vsp)->tv_nsec < 0) {				\
+			(vsp)->tv_sec--;				\
+			(vsp)->tv_nsec += 1000000000L;			\
+		}							\
+	} while (0)
+
+/*
+ * Names of the interval timers, and structure
+ * defining a timer setting.
+ */
+#define	ITIMER_REAL	0
+#define	ITIMER_VIRTUAL	1
+#define	ITIMER_PROF	2
+
+struct	itimerval {
+	struct	timeval it_interval;	/* timer interval */
+	struct	timeval it_value;	/* current value */
+};
+
+/*
+ * Getkerninfo clock information structure
+ */
+struct clockinfo {
+	int	hz;		/* clock frequency */
+	int	tick;		/* micro-seconds per hz tick */
+	int	tickadj;	/* clock skew rate for adjtime() */
+	int	stathz;		/* statistics clock frequency */
+	int	profhz;		/* profiling clock frequency */
+};
+
+#define CLOCK_REALTIME	0
+#define CLOCK_VIRTUAL	1
+#define CLOCK_PROF	2
+
+#define TIMER_RELTIME	0x0	/* relative timer */
+#define TIMER_ABSTIME	0x1	/* absolute timer */
+
+/* --- stuff got cut here - niels --- */
+
+#endif /* !_SYS_TIME_H_ */
diff --git a/base/third_party/libevent/compat/sys/queue.h b/base/third_party/libevent/compat/sys/queue.h
new file mode 100644
index 0000000..c0956dd
--- /dev/null
+++ b/base/third_party/libevent/compat/sys/queue.h
@@ -0,0 +1,488 @@
+/*	$OpenBSD: queue.h,v 1.16 2000/09/07 19:47:59 art Exp $	*/
+/*	$NetBSD: queue.h,v 1.11 1996/05/16 05:17:14 mycroft Exp $	*/
+
+/*
+ * Copyright (c) 1991, 1993
+ *	The Regents of the University of California.  All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ *    may be used to endorse or promote products derived from this software
+ *    without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ *	@(#)queue.h	8.5 (Berkeley) 8/20/94
+ */
+
+#ifndef	_SYS_QUEUE_H_
+#define	_SYS_QUEUE_H_
+
+/*
+ * This file defines five types of data structures: singly-linked lists, 
+ * lists, simple queues, tail queues, and circular queues.
+ *
+ *
+ * A singly-linked list is headed by a single forward pointer. The elements
+ * are singly linked for minimum space and pointer manipulation overhead at
+ * the expense of O(n) removal for arbitrary elements. New elements can be
+ * added to the list after an existing element or at the head of the list.
+ * Elements being removed from the head of the list should use the explicit
+ * macro for this purpose for optimum efficiency. A singly-linked list may
+ * only be traversed in the forward direction.  Singly-linked lists are ideal
+ * for applications with large datasets and few or no removals or for
+ * implementing a LIFO queue.
+ *
+ * A list is headed by a single forward pointer (or an array of forward
+ * pointers for a hash table header). The elements are doubly linked
+ * so that an arbitrary element can be removed without a need to
+ * traverse the list. New elements can be added to the list before
+ * or after an existing element or at the head of the list. A list
+ * may only be traversed in the forward direction.
+ *
+ * A simple queue is headed by a pair of pointers, one the head of the
+ * list and the other to the tail of the list. The elements are singly
+ * linked to save space, so elements can only be removed from the
+ * head of the list. New elements can be added to the list before or after
+ * an existing element, at the head of the list, or at the end of the
+ * list. A simple queue may only be traversed in the forward direction.
+ *
+ * A tail queue is headed by a pair of pointers, one to the head of the
+ * list and the other to the tail of the list. The elements are doubly
+ * linked so that an arbitrary element can be removed without a need to
+ * traverse the list. New elements can be added to the list before or
+ * after an existing element, at the head of the list, or at the end of
+ * the list. A tail queue may be traversed in either direction.
+ *
+ * A circle queue is headed by a pair of pointers, one to the head of the
+ * list and the other to the tail of the list. The elements are doubly
+ * linked so that an arbitrary element can be removed without a need to
+ * traverse the list. New elements can be added to the list before or after
+ * an existing element, at the head of the list, or at the end of the list.
+ * A circle queue may be traversed in either direction, but has a more
+ * complex end of list detection.
+ *
+ * For details on the use of these macros, see the queue(3) manual page.
+ */
+
+/*
+ * Singly-linked List definitions.
+ */
+#define SLIST_HEAD(name, type)						\
+struct name {								\
+	struct type *slh_first;	/* first element */			\
+}
+ 
+#define	SLIST_HEAD_INITIALIZER(head)					\
+	{ NULL }
+
+#ifndef WIN32
+#define SLIST_ENTRY(type)						\
+struct {								\
+	struct type *sle_next;	/* next element */			\
+}
+#endif
+
+/*
+ * Singly-linked List access methods.
+ */
+#define	SLIST_FIRST(head)	((head)->slh_first)
+#define	SLIST_END(head)		NULL
+#define	SLIST_EMPTY(head)	(SLIST_FIRST(head) == SLIST_END(head))
+#define	SLIST_NEXT(elm, field)	((elm)->field.sle_next)
+
+#define	SLIST_FOREACH(var, head, field)					\
+	for((var) = SLIST_FIRST(head);					\
+	    (var) != SLIST_END(head);					\
+	    (var) = SLIST_NEXT(var, field))
+
+/*
+ * Singly-linked List functions.
+ */
+#define	SLIST_INIT(head) {						\
+	SLIST_FIRST(head) = SLIST_END(head);				\
+}
+
+#define	SLIST_INSERT_AFTER(slistelm, elm, field) do {			\
+	(elm)->field.sle_next = (slistelm)->field.sle_next;		\
+	(slistelm)->field.sle_next = (elm);				\
+} while (0)
+
+#define	SLIST_INSERT_HEAD(head, elm, field) do {			\
+	(elm)->field.sle_next = (head)->slh_first;			\
+	(head)->slh_first = (elm);					\
+} while (0)
+
+#define	SLIST_REMOVE_HEAD(head, field) do {				\
+	(head)->slh_first = (head)->slh_first->field.sle_next;		\
+} while (0)
+
+/*
+ * List definitions.
+ */
+#define LIST_HEAD(name, type)						\
+struct name {								\
+	struct type *lh_first;	/* first element */			\
+}
+
+#define LIST_HEAD_INITIALIZER(head)					\
+	{ NULL }
+
+#define LIST_ENTRY(type)						\
+struct {								\
+	struct type *le_next;	/* next element */			\
+	struct type **le_prev;	/* address of previous next element */	\
+}
+
+/*
+ * List access methods
+ */
+#define	LIST_FIRST(head)		((head)->lh_first)
+#define	LIST_END(head)			NULL
+#define	LIST_EMPTY(head)		(LIST_FIRST(head) == LIST_END(head))
+#define	LIST_NEXT(elm, field)		((elm)->field.le_next)
+
+#define LIST_FOREACH(var, head, field)					\
+	for((var) = LIST_FIRST(head);					\
+	    (var)!= LIST_END(head);					\
+	    (var) = LIST_NEXT(var, field))
+
+/*
+ * List functions.
+ */
+#define	LIST_INIT(head) do {						\
+	LIST_FIRST(head) = LIST_END(head);				\
+} while (0)
+
+#define LIST_INSERT_AFTER(listelm, elm, field) do {			\
+	if (((elm)->field.le_next = (listelm)->field.le_next) != NULL)	\
+		(listelm)->field.le_next->field.le_prev =		\
+		    &(elm)->field.le_next;				\
+	(listelm)->field.le_next = (elm);				\
+	(elm)->field.le_prev = &(listelm)->field.le_next;		\
+} while (0)
+
+#define	LIST_INSERT_BEFORE(listelm, elm, field) do {			\
+	(elm)->field.le_prev = (listelm)->field.le_prev;		\
+	(elm)->field.le_next = (listelm);				\
+	*(listelm)->field.le_prev = (elm);				\
+	(listelm)->field.le_prev = &(elm)->field.le_next;		\
+} while (0)
+
+#define LIST_INSERT_HEAD(head, elm, field) do {				\
+	if (((elm)->field.le_next = (head)->lh_first) != NULL)		\
+		(head)->lh_first->field.le_prev = &(elm)->field.le_next;\
+	(head)->lh_first = (elm);					\
+	(elm)->field.le_prev = &(head)->lh_first;			\
+} while (0)
+
+#define LIST_REMOVE(elm, field) do {					\
+	if ((elm)->field.le_next != NULL)				\
+		(elm)->field.le_next->field.le_prev =			\
+		    (elm)->field.le_prev;				\
+	*(elm)->field.le_prev = (elm)->field.le_next;			\
+} while (0)
+
+#define LIST_REPLACE(elm, elm2, field) do {				\
+	if (((elm2)->field.le_next = (elm)->field.le_next) != NULL)	\
+		(elm2)->field.le_next->field.le_prev =			\
+		    &(elm2)->field.le_next;				\
+	(elm2)->field.le_prev = (elm)->field.le_prev;			\
+	*(elm2)->field.le_prev = (elm2);				\
+} while (0)
+
+/*
+ * Simple queue definitions.
+ */
+#define SIMPLEQ_HEAD(name, type)					\
+struct name {								\
+	struct type *sqh_first;	/* first element */			\
+	struct type **sqh_last;	/* addr of last next element */		\
+}
+
+#define SIMPLEQ_HEAD_INITIALIZER(head)					\
+	{ NULL, &(head).sqh_first }
+
+#define SIMPLEQ_ENTRY(type)						\
+struct {								\
+	struct type *sqe_next;	/* next element */			\
+}
+
+/*
+ * Simple queue access methods.
+ */
+#define	SIMPLEQ_FIRST(head)	    ((head)->sqh_first)
+#define	SIMPLEQ_END(head)	    NULL
+#define	SIMPLEQ_EMPTY(head)	    (SIMPLEQ_FIRST(head) == SIMPLEQ_END(head))
+#define	SIMPLEQ_NEXT(elm, field)    ((elm)->field.sqe_next)
+
+#define SIMPLEQ_FOREACH(var, head, field)				\
+	for((var) = SIMPLEQ_FIRST(head);				\
+	    (var) != SIMPLEQ_END(head);					\
+	    (var) = SIMPLEQ_NEXT(var, field))
+
+/*
+ * Simple queue functions.
+ */
+#define	SIMPLEQ_INIT(head) do {						\
+	(head)->sqh_first = NULL;					\
+	(head)->sqh_last = &(head)->sqh_first;				\
+} while (0)
+
+#define SIMPLEQ_INSERT_HEAD(head, elm, field) do {			\
+	if (((elm)->field.sqe_next = (head)->sqh_first) == NULL)	\
+		(head)->sqh_last = &(elm)->field.sqe_next;		\
+	(head)->sqh_first = (elm);					\
+} while (0)
+
+#define SIMPLEQ_INSERT_TAIL(head, elm, field) do {			\
+	(elm)->field.sqe_next = NULL;					\
+	*(head)->sqh_last = (elm);					\
+	(head)->sqh_last = &(elm)->field.sqe_next;			\
+} while (0)
+
+#define SIMPLEQ_INSERT_AFTER(head, listelm, elm, field) do {		\
+	if (((elm)->field.sqe_next = (listelm)->field.sqe_next) == NULL)\
+		(head)->sqh_last = &(elm)->field.sqe_next;		\
+	(listelm)->field.sqe_next = (elm);				\
+} while (0)
+
+#define SIMPLEQ_REMOVE_HEAD(head, elm, field) do {			\
+	if (((head)->sqh_first = (elm)->field.sqe_next) == NULL)	\
+		(head)->sqh_last = &(head)->sqh_first;			\
+} while (0)
+
+/*
+ * Tail queue definitions.
+ */
+#define TAILQ_HEAD(name, type)						\
+struct name {								\
+	struct type *tqh_first;	/* first element */			\
+	struct type **tqh_last;	/* addr of last next element */		\
+}
+
+#define TAILQ_HEAD_INITIALIZER(head)					\
+	{ NULL, &(head).tqh_first }
+
+#define TAILQ_ENTRY(type)						\
+struct {								\
+	struct type *tqe_next;	/* next element */			\
+	struct type **tqe_prev;	/* address of previous next element */	\
+}
+
+/* 
+ * tail queue access methods 
+ */
+#define	TAILQ_FIRST(head)		((head)->tqh_first)
+#define	TAILQ_END(head)			NULL
+#define	TAILQ_NEXT(elm, field)		((elm)->field.tqe_next)
+#define TAILQ_LAST(head, headname)					\
+	(*(((struct headname *)((head)->tqh_last))->tqh_last))
+/* XXX */
+#define TAILQ_PREV(elm, headname, field)				\
+	(*(((struct headname *)((elm)->field.tqe_prev))->tqh_last))
+#define	TAILQ_EMPTY(head)						\
+	(TAILQ_FIRST(head) == TAILQ_END(head))
+
+#define TAILQ_FOREACH(var, head, field)					\
+	for((var) = TAILQ_FIRST(head);					\
+	    (var) != TAILQ_END(head);					\
+	    (var) = TAILQ_NEXT(var, field))
+
+#define TAILQ_FOREACH_REVERSE(var, head, field, headname)		\
+	for((var) = TAILQ_LAST(head, headname);				\
+	    (var) != TAILQ_END(head);					\
+	    (var) = TAILQ_PREV(var, headname, field))
+
+/*
+ * Tail queue functions.
+ */
+#define	TAILQ_INIT(head) do {						\
+	(head)->tqh_first = NULL;					\
+	(head)->tqh_last = &(head)->tqh_first;				\
+} while (0)
+
+#define TAILQ_INSERT_HEAD(head, elm, field) do {			\
+	if (((elm)->field.tqe_next = (head)->tqh_first) != NULL)	\
+		(head)->tqh_first->field.tqe_prev =			\
+		    &(elm)->field.tqe_next;				\
+	else								\
+		(head)->tqh_last = &(elm)->field.tqe_next;		\
+	(head)->tqh_first = (elm);					\
+	(elm)->field.tqe_prev = &(head)->tqh_first;			\
+} while (0)
+
+#define TAILQ_INSERT_TAIL(head, elm, field) do {			\
+	(elm)->field.tqe_next = NULL;					\
+	(elm)->field.tqe_prev = (head)->tqh_last;			\
+	*(head)->tqh_last = (elm);					\
+	(head)->tqh_last = &(elm)->field.tqe_next;			\
+} while (0)
+
+#define TAILQ_INSERT_AFTER(head, listelm, elm, field) do {		\
+	if (((elm)->field.tqe_next = (listelm)->field.tqe_next) != NULL)\
+		(elm)->field.tqe_next->field.tqe_prev =			\
+		    &(elm)->field.tqe_next;				\
+	else								\
+		(head)->tqh_last = &(elm)->field.tqe_next;		\
+	(listelm)->field.tqe_next = (elm);				\
+	(elm)->field.tqe_prev = &(listelm)->field.tqe_next;		\
+} while (0)
+
+#define	TAILQ_INSERT_BEFORE(listelm, elm, field) do {			\
+	(elm)->field.tqe_prev = (listelm)->field.tqe_prev;		\
+	(elm)->field.tqe_next = (listelm);				\
+	*(listelm)->field.tqe_prev = (elm);				\
+	(listelm)->field.tqe_prev = &(elm)->field.tqe_next;		\
+} while (0)
+
+#define TAILQ_REMOVE(head, elm, field) do {				\
+	if (((elm)->field.tqe_next) != NULL)				\
+		(elm)->field.tqe_next->field.tqe_prev =			\
+		    (elm)->field.tqe_prev;				\
+	else								\
+		(head)->tqh_last = (elm)->field.tqe_prev;		\
+	*(elm)->field.tqe_prev = (elm)->field.tqe_next;			\
+} while (0)
+
+#define TAILQ_REPLACE(head, elm, elm2, field) do {			\
+	if (((elm2)->field.tqe_next = (elm)->field.tqe_next) != NULL)	\
+		(elm2)->field.tqe_next->field.tqe_prev =		\
+		    &(elm2)->field.tqe_next;				\
+	else								\
+		(head)->tqh_last = &(elm2)->field.tqe_next;		\
+	(elm2)->field.tqe_prev = (elm)->field.tqe_prev;			\
+	*(elm2)->field.tqe_prev = (elm2);				\
+} while (0)
+
+/*
+ * Circular queue definitions.
+ */
+#define CIRCLEQ_HEAD(name, type)					\
+struct name {								\
+	struct type *cqh_first;		/* first element */		\
+	struct type *cqh_last;		/* last element */		\
+}
+
+#define CIRCLEQ_HEAD_INITIALIZER(head)					\
+	{ CIRCLEQ_END(&head), CIRCLEQ_END(&head) }
+
+#define CIRCLEQ_ENTRY(type)						\
+struct {								\
+	struct type *cqe_next;		/* next element */		\
+	struct type *cqe_prev;		/* previous element */		\
+}
+
+/*
+ * Circular queue access methods 
+ */
+#define	CIRCLEQ_FIRST(head)		((head)->cqh_first)
+#define	CIRCLEQ_LAST(head)		((head)->cqh_last)
+#define	CIRCLEQ_END(head)		((void *)(head))
+#define	CIRCLEQ_NEXT(elm, field)	((elm)->field.cqe_next)
+#define	CIRCLEQ_PREV(elm, field)	((elm)->field.cqe_prev)
+#define	CIRCLEQ_EMPTY(head)						\
+	(CIRCLEQ_FIRST(head) == CIRCLEQ_END(head))
+
+#define CIRCLEQ_FOREACH(var, head, field)				\
+	for((var) = CIRCLEQ_FIRST(head);				\
+	    (var) != CIRCLEQ_END(head);					\
+	    (var) = CIRCLEQ_NEXT(var, field))
+
+#define CIRCLEQ_FOREACH_REVERSE(var, head, field)			\
+	for((var) = CIRCLEQ_LAST(head);					\
+	    (var) != CIRCLEQ_END(head);					\
+	    (var) = CIRCLEQ_PREV(var, field))
+
+/*
+ * Circular queue functions.
+ */
+#define	CIRCLEQ_INIT(head) do {						\
+	(head)->cqh_first = CIRCLEQ_END(head);				\
+	(head)->cqh_last = CIRCLEQ_END(head);				\
+} while (0)
+
+#define CIRCLEQ_INSERT_AFTER(head, listelm, elm, field) do {		\
+	(elm)->field.cqe_next = (listelm)->field.cqe_next;		\
+	(elm)->field.cqe_prev = (listelm);				\
+	if ((listelm)->field.cqe_next == CIRCLEQ_END(head))		\
+		(head)->cqh_last = (elm);				\
+	else								\
+		(listelm)->field.cqe_next->field.cqe_prev = (elm);	\
+	(listelm)->field.cqe_next = (elm);				\
+} while (0)
+
+#define CIRCLEQ_INSERT_BEFORE(head, listelm, elm, field) do {		\
+	(elm)->field.cqe_next = (listelm);				\
+	(elm)->field.cqe_prev = (listelm)->field.cqe_prev;		\
+	if ((listelm)->field.cqe_prev == CIRCLEQ_END(head))		\
+		(head)->cqh_first = (elm);				\
+	else								\
+		(listelm)->field.cqe_prev->field.cqe_next = (elm);	\
+	(listelm)->field.cqe_prev = (elm);				\
+} while (0)
+
+#define CIRCLEQ_INSERT_HEAD(head, elm, field) do {			\
+	(elm)->field.cqe_next = (head)->cqh_first;			\
+	(elm)->field.cqe_prev = CIRCLEQ_END(head);			\
+	if ((head)->cqh_last == CIRCLEQ_END(head))			\
+		(head)->cqh_last = (elm);				\
+	else								\
+		(head)->cqh_first->field.cqe_prev = (elm);		\
+	(head)->cqh_first = (elm);					\
+} while (0)
+
+#define CIRCLEQ_INSERT_TAIL(head, elm, field) do {			\
+	(elm)->field.cqe_next = CIRCLEQ_END(head);			\
+	(elm)->field.cqe_prev = (head)->cqh_last;			\
+	if ((head)->cqh_first == CIRCLEQ_END(head))			\
+		(head)->cqh_first = (elm);				\
+	else								\
+		(head)->cqh_last->field.cqe_next = (elm);		\
+	(head)->cqh_last = (elm);					\
+} while (0)
+
+#define	CIRCLEQ_REMOVE(head, elm, field) do {				\
+	if ((elm)->field.cqe_next == CIRCLEQ_END(head))			\
+		(head)->cqh_last = (elm)->field.cqe_prev;		\
+	else								\
+		(elm)->field.cqe_next->field.cqe_prev =			\
+		    (elm)->field.cqe_prev;				\
+	if ((elm)->field.cqe_prev == CIRCLEQ_END(head))			\
+		(head)->cqh_first = (elm)->field.cqe_next;		\
+	else								\
+		(elm)->field.cqe_prev->field.cqe_next =			\
+		    (elm)->field.cqe_next;				\
+} while (0)
+
+#define CIRCLEQ_REPLACE(head, elm, elm2, field) do {			\
+	if (((elm2)->field.cqe_next = (elm)->field.cqe_next) ==		\
+	    CIRCLEQ_END(head))						\
+		(head).cqh_last = (elm2);				\
+	else								\
+		(elm2)->field.cqe_next->field.cqe_prev = (elm2);	\
+	if (((elm2)->field.cqe_prev = (elm)->field.cqe_prev) ==		\
+	    CIRCLEQ_END(head))						\
+		(head).cqh_first = (elm2);				\
+	else								\
+		(elm2)->field.cqe_prev->field.cqe_next = (elm2);	\
+} while (0)
+
+#endif	/* !_SYS_QUEUE_H_ */
diff --git a/base/third_party/libevent/configure.in b/base/third_party/libevent/configure.in
new file mode 100644
index 0000000..468d774
--- /dev/null
+++ b/base/third_party/libevent/configure.in
@@ -0,0 +1,421 @@
+dnl configure.in for libevent
+dnl Dug Song <dugsong@monkey.org>
+AC_INIT(event.c)
+
+AM_INIT_AUTOMAKE(libevent,1.4.15)
+AM_CONFIG_HEADER(config.h)
+dnl AM_MAINTAINER_MODE
+
+AC_CONFIG_MACRO_DIR([m4])
+
+AC_CANONICAL_HOST
+
+AC_DEFINE(NUMERIC_VERSION, 0x01040f00, [Numeric representation of the version])
+
+dnl Initialize prefix.
+if test "$prefix" = "NONE"; then
+   prefix="/usr/local"
+fi
+
+dnl Checks for programs.
+AC_PROG_CC
+AC_PROG_INSTALL
+AC_PROG_LN_S
+
+AC_PROG_GCC_TRADITIONAL
+if test "$GCC" = yes ; then
+        CFLAGS="$CFLAGS -Wall"
+        # And disable the strict-aliasing optimization, since it breaks
+        # our sockaddr-handling code in strange ways.
+        CFLAGS="$CFLAGS -fno-strict-aliasing"
+fi
+
+dnl Libevent 1.4 isn't multithreaded, but some of its functions are
+dnl documented to be reentrant.  If you don't define the right macros
+dnl on some platforms, you get non-reentrant versions of the libc
+dnl functinos (like an errno that's shared by all threads).
+AC_MSG_CHECKING([whether we need extra flags to make libc reentrant])
+case $host in
+   *solaris* | *-osf* | *-hpux* )
+     AC_MSG_RESULT([-D_REENTRANT])
+     CFLAGS="$CFLAGS -D_REENTRANT"
+     ;;
+   *-aix* | *-freebsd* | *-darwin* )
+     AC_MSG_RESULT([-D_THREAD_SAFE])
+     CFLAGS="$CFLAGS -D_THREAD_SAFE"
+     ;;
+   *)
+     AC_MSG_RESULT(no)
+     ;;
+esac
+
+AC_ARG_ENABLE(gcc-warnings,
+     AS_HELP_STRING(--enable-gcc-warnings, enable verbose warnings with GCC))
+
+AC_PROG_LIBTOOL
+
+dnl   Uncomment "AC_DISABLE_SHARED" to make shared librraries not get
+dnl   built by default.  You can also turn shared libs on and off from 
+dnl   the command line with --enable-shared and --disable-shared.
+dnl AC_DISABLE_SHARED
+AC_SUBST(LIBTOOL_DEPS)
+
+dnl Checks for libraries.
+AC_CHECK_LIB(socket, socket)
+AC_CHECK_LIB(resolv, inet_aton)
+AC_CHECK_LIB(rt, clock_gettime)
+AC_CHECK_LIB(nsl, inet_ntoa)
+
+dnl Checks for header files.
+AC_HEADER_STDC
+AC_CHECK_HEADERS(fcntl.h stdarg.h inttypes.h stdint.h poll.h signal.h unistd.h sys/epoll.h sys/time.h sys/queue.h sys/event.h sys/param.h sys/ioctl.h sys/select.h sys/devpoll.h port.h netinet/in6.h sys/socket.h)
+if test "x$ac_cv_header_sys_queue_h" = "xyes"; then
+	AC_MSG_CHECKING(for TAILQ_FOREACH in sys/queue.h)
+	AC_EGREP_CPP(yes,
+[
+#include <sys/queue.h>
+#ifdef TAILQ_FOREACH
+ yes
+#endif
+],	[AC_MSG_RESULT(yes)
+	 AC_DEFINE(HAVE_TAILQFOREACH, 1,
+		[Define if TAILQ_FOREACH is defined in <sys/queue.h>])],
+	AC_MSG_RESULT(no)
+	)
+fi
+
+if test "x$ac_cv_header_sys_time_h" = "xyes"; then
+	AC_MSG_CHECKING(for timeradd in sys/time.h)
+	AC_EGREP_CPP(yes,
+[
+#include <sys/time.h>
+#ifdef timeradd
+ yes
+#endif
+],	[ AC_DEFINE(HAVE_TIMERADD, 1,
+		[Define if timeradd is defined in <sys/time.h>])
+	  AC_MSG_RESULT(yes)] ,AC_MSG_RESULT(no)
+)
+fi
+
+if test "x$ac_cv_header_sys_time_h" = "xyes"; then
+	AC_MSG_CHECKING(for timercmp in sys/time.h)
+	AC_EGREP_CPP(yes,
+[
+#include <sys/time.h>
+#ifdef timercmp
+ yes
+#endif
+],	[ AC_DEFINE(HAVE_TIMERCMP, 1,
+		[Define if timercmp is defined in <sys/time.h>])
+	  AC_MSG_RESULT(yes)] ,AC_MSG_RESULT(no)
+)
+fi
+
+if test "x$ac_cv_header_sys_time_h" = "xyes"; then
+	AC_MSG_CHECKING(for timerclear in sys/time.h)
+	AC_EGREP_CPP(yes,
+[
+#include <sys/time.h>
+#ifdef timerclear
+ yes
+#endif
+],	[ AC_DEFINE(HAVE_TIMERCLEAR, 1,
+		[Define if timerclear is defined in <sys/time.h>])
+	  AC_MSG_RESULT(yes)] ,AC_MSG_RESULT(no)
+)
+fi
+
+if test "x$ac_cv_header_sys_time_h" = "xyes"; then
+	AC_MSG_CHECKING(for timerisset in sys/time.h)
+	AC_EGREP_CPP(yes,
+[
+#include <sys/time.h>
+#ifdef timerisset
+ yes
+#endif
+],	[ AC_DEFINE(HAVE_TIMERISSET, 1,
+		[Define if timerisset is defined in <sys/time.h>])
+	  AC_MSG_RESULT(yes)] ,AC_MSG_RESULT(no)
+)
+fi
+
+dnl - check if the macro WIN32 is defined on this compiler.
+dnl - (this is how we check for a windows version of GCC)
+AC_MSG_CHECKING(for WIN32)
+AC_TRY_COMPILE(,
+	[
+#ifndef WIN32
+die horribly
+#endif
+	],
+	bwin32=true; AC_MSG_RESULT(yes),
+	bwin32=false; AC_MSG_RESULT(no),
+)
+
+AM_CONDITIONAL(BUILD_WIN32, test x$bwin32 = xtrue)
+
+dnl Checks for typedefs, structures, and compiler characteristics.
+AC_C_CONST
+AC_C_INLINE
+AC_HEADER_TIME
+
+dnl Checks for library functions.
+AC_CHECK_FUNCS(gettimeofday vasprintf fcntl clock_gettime strtok_r strsep getaddrinfo getnameinfo strlcpy inet_ntop signal sigaction strtoll issetugid geteuid getegid)
+
+AC_CHECK_SIZEOF(long)
+
+if test "x$ac_cv_func_clock_gettime" = "xyes"; then
+   AC_DEFINE(DNS_USE_CPU_CLOCK_FOR_ID, 1, [Define if clock_gettime is available in libc])
+else
+   AC_DEFINE(DNS_USE_GETTIMEOFDAY_FOR_ID, 1, [Define is no secure id variant is available])
+fi
+
+AC_MSG_CHECKING(for F_SETFD in fcntl.h)
+AC_EGREP_CPP(yes,
+[
+#define _GNU_SOURCE
+#include <fcntl.h>
+#ifdef F_SETFD
+yes
+#endif
+],	[ AC_DEFINE(HAVE_SETFD, 1,
+	      [Define if F_SETFD is defined in <fcntl.h>])
+	  AC_MSG_RESULT(yes) ], AC_MSG_RESULT(no))
+
+needsignal=no
+haveselect=no
+AC_CHECK_FUNCS(select, [haveselect=yes], )
+if test "x$haveselect" = "xyes" ; then
+	AC_LIBOBJ(select)
+	needsignal=yes
+fi
+
+havepoll=no
+AC_CHECK_FUNCS(poll, [havepoll=yes], )
+if test "x$havepoll" = "xyes" ; then
+	AC_LIBOBJ(poll)
+	needsignal=yes
+fi
+
+haveepoll=no
+AC_CHECK_FUNCS(epoll_ctl, [haveepoll=yes], )
+if test "x$haveepoll" = "xyes" ; then
+	AC_DEFINE(HAVE_EPOLL, 1,
+		[Define if your system supports the epoll system calls])
+	AC_LIBOBJ(epoll)
+	needsignal=yes
+fi
+
+havedevpoll=no
+if test "x$ac_cv_header_sys_devpoll_h" = "xyes"; then
+	AC_DEFINE(HAVE_DEVPOLL, 1,
+		    [Define if /dev/poll is available])
+        AC_LIBOBJ(devpoll)
+fi
+
+havekqueue=no
+if test "x$ac_cv_header_sys_event_h" = "xyes"; then
+	AC_CHECK_FUNCS(kqueue, [havekqueue=yes], )
+	if test "x$havekqueue" = "xyes" ; then
+		AC_MSG_CHECKING(for working kqueue)
+		AC_TRY_RUN(
+#include <sys/types.h>
+#include <sys/time.h>
+#include <sys/event.h>
+#include <stdio.h>
+#include <unistd.h>
+#include <fcntl.h>
+
+int
+main(int argc, char **argv)
+{
+	int kq;
+	int n;
+	int fd[[2]];
+	struct kevent ev;
+	struct timespec ts;
+	char buf[[8000]];
+
+	if (pipe(fd) == -1)
+		exit(1);
+	if (fcntl(fd[[1]], F_SETFL, O_NONBLOCK) == -1)
+		exit(1);
+
+	while ((n = write(fd[[1]], buf, sizeof(buf))) == sizeof(buf))
+		;
+
+        if ((kq = kqueue()) == -1)
+		exit(1);
+
+	memset(&ev, 0, sizeof(ev));
+	ev.ident = fd[[1]];
+	ev.filter = EVFILT_WRITE;
+	ev.flags = EV_ADD | EV_ENABLE;
+	n = kevent(kq, &ev, 1, NULL, 0, NULL);
+	if (n == -1)
+		exit(1);
+
+	read(fd[[0]], buf, sizeof(buf));
+
+	ts.tv_sec = 0;
+	ts.tv_nsec = 0;
+	n = kevent(kq, NULL, 0, &ev, 1, &ts);
+	if (n == -1 || n == 0)
+		exit(1);
+
+	exit(0);
+}, [AC_MSG_RESULT(yes)
+    AC_DEFINE(HAVE_WORKING_KQUEUE, 1,
+		[Define if kqueue works correctly with pipes])
+    AC_LIBOBJ(kqueue)], AC_MSG_RESULT(no), AC_MSG_RESULT(no))
+	fi
+fi
+
+haveepollsyscall=no
+if test "x$ac_cv_header_sys_epoll_h" = "xyes"; then
+	if test "x$haveepoll" = "xno" ; then
+		AC_MSG_CHECKING(for epoll system call)
+		AC_TRY_RUN(
+#include <stdint.h>
+#include <sys/param.h>
+#include <sys/types.h>
+#include <sys/syscall.h>
+#include <sys/epoll.h>
+#include <unistd.h>
+
+int
+epoll_create(int size)
+{
+	return (syscall(__NR_epoll_create, size));
+}
+
+int
+main(int argc, char **argv)
+{
+	int epfd;
+
+	epfd = epoll_create(256);
+	exit (epfd == -1 ? 1 : 0);
+}, [AC_MSG_RESULT(yes)
+    AC_DEFINE(HAVE_EPOLL, 1,
+	[Define if your system supports the epoll system calls])
+    needsignal=yes
+    AC_LIBOBJ(epoll_sub)
+    AC_LIBOBJ(epoll)], AC_MSG_RESULT(no), AC_MSG_RESULT(no))
+	fi
+fi
+
+haveeventports=no
+AC_CHECK_FUNCS(port_create, [haveeventports=yes], )
+if test "x$haveeventports" = "xyes" ; then
+	AC_DEFINE(HAVE_EVENT_PORTS, 1,
+		[Define if your system supports event ports])
+	AC_LIBOBJ(evport)
+	needsignal=yes
+fi
+if test "x$bwin32" = "xtrue"; then
+	needsignal=yes
+fi
+if test "x$bwin32" = "xtrue"; then
+	needsignal=yes
+fi
+if test "x$needsignal" = "xyes" ; then
+	AC_LIBOBJ(signal)
+fi
+
+AC_TYPE_PID_T
+AC_TYPE_SIZE_T
+AC_CHECK_TYPES([uint64_t, uint32_t, uint16_t, uint8_t], , ,
+[#ifdef HAVE_STDINT_H
+#include <stdint.h>
+#elif defined(HAVE_INTTYPES_H)
+#include <inttypes.h>
+#endif
+#ifdef HAVE_SYS_TYPES_H
+#include <sys/types.h>
+#endif])
+AC_CHECK_TYPES([fd_mask], , ,
+[#ifdef HAVE_SYS_TYPES_H
+#include <sys/types.h>
+#endif
+#ifdef HAVE_SELECT_H
+#include <select.h>
+#endif])
+
+AC_CHECK_SIZEOF(long long)
+AC_CHECK_SIZEOF(int)
+AC_CHECK_SIZEOF(short)
+AC_CHECK_TYPES([struct in6_addr], , ,
+[#ifdef WIN32
+#include <winsock2.h>
+#else
+#include <sys/types.h>
+#include <netinet/in.h>
+#include <sys/socket.h>
+#endif
+#ifdef HAVE_NETINET_IN6_H
+#include <netinet/in6.h>
+#endif])
+
+AC_MSG_CHECKING([for socklen_t])
+AC_TRY_COMPILE([
+ #include <sys/types.h>
+ #include <sys/socket.h>],
+  [socklen_t x;],
+  AC_MSG_RESULT([yes]),
+  [AC_MSG_RESULT([no])
+  AC_DEFINE(socklen_t, unsigned int,
+	[Define to unsigned int if you dont have it])]
+)
+
+AC_MSG_CHECKING([whether our compiler supports __func__])
+AC_TRY_COMPILE([],
+ [ const char *cp = __func__; ],
+ AC_MSG_RESULT([yes]),
+ AC_MSG_RESULT([no])
+ AC_MSG_CHECKING([whether our compiler supports __FUNCTION__])
+ AC_TRY_COMPILE([],
+   [ const char *cp = __FUNCTION__; ],
+   AC_MSG_RESULT([yes])
+   AC_DEFINE(__func__, __FUNCTION__,
+         [Define to appropriate substitue if compiler doesnt have __func__]),
+   AC_MSG_RESULT([no])
+   AC_DEFINE(__func__, __FILE__,
+         [Define to appropriate substitue if compiler doesnt have __func__])))
+
+
+# Add some more warnings which we use in development but not in the
+# released versions.  (Some relevant gcc versions can't handle these.)
+if test x$enable_gcc_warnings = xyes; then
+
+  AC_COMPILE_IFELSE(AC_LANG_PROGRAM([], [
+#if !defined(__GNUC__) || (__GNUC__ < 4)
+#error
+#endif]), have_gcc4=yes, have_gcc4=no)
+
+  AC_COMPILE_IFELSE(AC_LANG_PROGRAM([], [
+#if !defined(__GNUC__) || (__GNUC__ < 4) || (__GNUC__ == 4 && __GNUC_MINOR__ < 2)
+#error
+#endif]), have_gcc42=yes, have_gcc42=no)
+
+  CFLAGS="$CFLAGS -W -Wfloat-equal -Wundef -Wpointer-arith -Wstrict-prototypes -Wmissing-prototypes -Wwrite-strings -Wredundant-decls -Wchar-subscripts -Wcomment -Wformat=2 -Wwrite-strings -Wmissing-declarations -Wredundant-decls -Wnested-externs -Wbad-function-cast -Wswitch-enum -Werror"
+  CFLAGS="$CFLAGS -Wno-unused-parameter -Wno-sign-compare -Wstrict-aliasing"
+
+  if test x$have_gcc4 = xyes ; then 
+    # These warnings break gcc 3.3.5 and work on gcc 4.0.2
+    CFLAGS="$CFLAGS -Winit-self -Wmissing-field-initializers -Wdeclaration-after-statement"
+    #CFLAGS="$CFLAGS -Wold-style-definition"
+  fi
+
+  if test x$have_gcc42 = xyes ; then 
+    # These warnings break gcc 4.0.2 and work on gcc 4.2
+    CFLAGS="$CFLAGS -Waddress -Wnormalized=id -Woverride-init"
+  fi
+
+##This will break the world on some 64-bit architectures
+# CFLAGS="$CFLAGS -Winline"
+
+fi
+
+AC_OUTPUT(Makefile test/Makefile sample/Makefile)
diff --git a/base/third_party/libevent/devpoll.c b/base/third_party/libevent/devpoll.c
new file mode 100644
index 0000000..2d34ae3
--- /dev/null
+++ b/base/third_party/libevent/devpoll.c
@@ -0,0 +1,417 @@
+/*
+ * Copyright 2000-2004 Niels Provos <provos@citi.umich.edu>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ *    derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include <sys/types.h>
+#include <sys/resource.h>
+#ifdef HAVE_SYS_TIME_H
+#include <sys/time.h>
+#else
+#include <sys/_libevent_time.h>
+#endif
+#include <sys/queue.h>
+#include <sys/devpoll.h>
+#include <signal.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#include <fcntl.h>
+#include <errno.h>
+#include <assert.h>
+
+#include "event.h"
+#include "event-internal.h"
+#include "evsignal.h"
+#include "log.h"
+
+/* due to limitations in the devpoll interface, we need to keep track of
+ * all file descriptors outself.
+ */
+struct evdevpoll {
+	struct event *evread;
+	struct event *evwrite;
+};
+
+struct devpollop {
+	struct evdevpoll *fds;
+	int nfds;
+	struct pollfd *events;
+	int nevents;
+	int dpfd;
+	struct pollfd *changes;
+	int nchanges;
+};
+
+static void *devpoll_init	(struct event_base *);
+static int devpoll_add	(void *, struct event *);
+static int devpoll_del	(void *, struct event *);
+static int devpoll_dispatch	(struct event_base *, void *, struct timeval *);
+static void devpoll_dealloc	(struct event_base *, void *);
+
+const struct eventop devpollops = {
+	"devpoll",
+	devpoll_init,
+	devpoll_add,
+	devpoll_del,
+	devpoll_dispatch,
+	devpoll_dealloc,
+	1 /* need reinit */
+};
+
+#define NEVENT	32000
+
+static int
+devpoll_commit(struct devpollop *devpollop)
+{
+	/*
+	 * Due to a bug in Solaris, we have to use pwrite with an offset of 0.
+	 * Write is limited to 2GB of data, until it will fail.
+	 */
+	if (pwrite(devpollop->dpfd, devpollop->changes,
+		sizeof(struct pollfd) * devpollop->nchanges, 0) == -1)
+		return(-1);
+
+	devpollop->nchanges = 0;
+	return(0);
+}
+
+static int
+devpoll_queue(struct devpollop *devpollop, int fd, int events) {
+	struct pollfd *pfd;
+
+	if (devpollop->nchanges >= devpollop->nevents) {
+		/*
+		 * Change buffer is full, must commit it to /dev/poll before 
+		 * adding more 
+		 */
+		if (devpoll_commit(devpollop) != 0)
+			return(-1);
+	}
+
+	pfd = &devpollop->changes[devpollop->nchanges++];
+	pfd->fd = fd;
+	pfd->events = events;
+	pfd->revents = 0;
+
+	return(0);
+}
+
+static void *
+devpoll_init(struct event_base *base)
+{
+	int dpfd, nfiles = NEVENT;
+	struct rlimit rl;
+	struct devpollop *devpollop;
+
+	/* Disable devpoll when this environment variable is set */
+	if (evutil_getenv("EVENT_NODEVPOLL"))
+		return (NULL);
+
+	if (!(devpollop = calloc(1, sizeof(struct devpollop))))
+		return (NULL);
+
+	if (getrlimit(RLIMIT_NOFILE, &rl) == 0 &&
+	    rl.rlim_cur != RLIM_INFINITY)
+		nfiles = rl.rlim_cur;
+
+	/* Initialize the kernel queue */
+	if ((dpfd = open("/dev/poll", O_RDWR)) == -1) {
+                event_warn("open: /dev/poll");
+		free(devpollop);
+		return (NULL);
+	}
+
+	devpollop->dpfd = dpfd;
+
+	/* Initialize fields */
+	devpollop->events = calloc(nfiles, sizeof(struct pollfd));
+	if (devpollop->events == NULL) {
+		free(devpollop);
+		close(dpfd);
+		return (NULL);
+	}
+	devpollop->nevents = nfiles;
+
+	devpollop->fds = calloc(nfiles, sizeof(struct evdevpoll));
+	if (devpollop->fds == NULL) {
+		free(devpollop->events);
+		free(devpollop);
+		close(dpfd);
+		return (NULL);
+	}
+	devpollop->nfds = nfiles;
+
+	devpollop->changes = calloc(nfiles, sizeof(struct pollfd));
+	if (devpollop->changes == NULL) {
+		free(devpollop->fds);
+		free(devpollop->events);
+		free(devpollop);
+		close(dpfd);
+		return (NULL);
+	}
+
+	evsignal_init(base);
+
+	return (devpollop);
+}
+
+static int
+devpoll_recalc(struct event_base *base, void *arg, int max)
+{
+	struct devpollop *devpollop = arg;
+
+	if (max >= devpollop->nfds) {
+		struct evdevpoll *fds;
+		int nfds;
+
+		nfds = devpollop->nfds;
+		while (nfds <= max)
+			nfds <<= 1;
+
+		fds = realloc(devpollop->fds, nfds * sizeof(struct evdevpoll));
+		if (fds == NULL) {
+			event_warn("realloc");
+			return (-1);
+		}
+		devpollop->fds = fds;
+		memset(fds + devpollop->nfds, 0,
+		    (nfds - devpollop->nfds) * sizeof(struct evdevpoll));
+		devpollop->nfds = nfds;
+	}
+
+	return (0);
+}
+
+static int
+devpoll_dispatch(struct event_base *base, void *arg, struct timeval *tv)
+{
+	struct devpollop *devpollop = arg;
+	struct pollfd *events = devpollop->events;
+	struct dvpoll dvp;
+	struct evdevpoll *evdp;
+	int i, res, timeout = -1;
+
+	if (devpollop->nchanges)
+		devpoll_commit(devpollop);
+
+	if (tv != NULL)
+		timeout = tv->tv_sec * 1000 + (tv->tv_usec + 999) / 1000;
+
+	dvp.dp_fds = devpollop->events;
+	dvp.dp_nfds = devpollop->nevents;
+	dvp.dp_timeout = timeout;
+
+	res = ioctl(devpollop->dpfd, DP_POLL, &dvp);
+
+	if (res == -1) {
+		if (errno != EINTR) {
+			event_warn("ioctl: DP_POLL");
+			return (-1);
+		}
+
+		evsignal_process(base);
+		return (0);
+	} else if (base->sig.evsignal_caught) {
+		evsignal_process(base);
+	}
+
+	event_debug(("%s: devpoll_wait reports %d", __func__, res));
+
+	for (i = 0; i < res; i++) {
+		int which = 0;
+		int what = events[i].revents;
+		struct event *evread = NULL, *evwrite = NULL;
+
+		assert(events[i].fd < devpollop->nfds);
+		evdp = &devpollop->fds[events[i].fd];
+   
+                if (what & POLLHUP)
+                        what |= POLLIN | POLLOUT;
+                else if (what & POLLERR)
+                        what |= POLLIN | POLLOUT;
+
+		if (what & POLLIN) {
+			evread = evdp->evread;
+			which |= EV_READ;
+		}
+
+		if (what & POLLOUT) {
+			evwrite = evdp->evwrite;
+			which |= EV_WRITE;
+		}
+
+		if (!which)
+			continue;
+
+		if (evread != NULL && !(evread->ev_events & EV_PERSIST))
+			event_del(evread);
+		if (evwrite != NULL && evwrite != evread &&
+		    !(evwrite->ev_events & EV_PERSIST))
+			event_del(evwrite);
+
+		if (evread != NULL)
+			event_active(evread, EV_READ, 1);
+		if (evwrite != NULL)
+			event_active(evwrite, EV_WRITE, 1);
+	}
+
+	return (0);
+}
+
+
+static int
+devpoll_add(void *arg, struct event *ev)
+{
+	struct devpollop *devpollop = arg;
+	struct evdevpoll *evdp;
+	int fd, events;
+
+	if (ev->ev_events & EV_SIGNAL)
+		return (evsignal_add(ev));
+
+	fd = ev->ev_fd;
+	if (fd >= devpollop->nfds) {
+		/* Extend the file descriptor array as necessary */
+		if (devpoll_recalc(ev->ev_base, devpollop, fd) == -1)
+			return (-1);
+	}
+	evdp = &devpollop->fds[fd];
+
+	/* 
+	 * It's not necessary to OR the existing read/write events that we
+	 * are currently interested in with the new event we are adding.
+	 * The /dev/poll driver ORs any new events with the existing events
+	 * that it has cached for the fd.
+	 */
+
+	events = 0;
+	if (ev->ev_events & EV_READ) {
+		if (evdp->evread && evdp->evread != ev) {
+		   /* There is already a different read event registered */
+		   return(-1);
+		}
+		events |= POLLIN;
+	}
+
+	if (ev->ev_events & EV_WRITE) {
+		if (evdp->evwrite && evdp->evwrite != ev) {
+		   /* There is already a different write event registered */
+		   return(-1);
+		}
+		events |= POLLOUT;
+	}
+
+	if (devpoll_queue(devpollop, fd, events) != 0)
+		return(-1);
+
+	/* Update events responsible */
+	if (ev->ev_events & EV_READ)
+		evdp->evread = ev;
+	if (ev->ev_events & EV_WRITE)
+		evdp->evwrite = ev;
+
+	return (0);
+}
+
+static int
+devpoll_del(void *arg, struct event *ev)
+{
+	struct devpollop *devpollop = arg;
+	struct evdevpoll *evdp;
+	int fd, events;
+	int needwritedelete = 1, needreaddelete = 1;
+
+	if (ev->ev_events & EV_SIGNAL)
+		return (evsignal_del(ev));
+
+	fd = ev->ev_fd;
+	if (fd >= devpollop->nfds)
+		return (0);
+	evdp = &devpollop->fds[fd];
+
+	events = 0;
+	if (ev->ev_events & EV_READ)
+		events |= POLLIN;
+	if (ev->ev_events & EV_WRITE)
+		events |= POLLOUT;
+
+	/*
+	 * The only way to remove an fd from the /dev/poll monitored set is
+	 * to use POLLREMOVE by itself.  This removes ALL events for the fd 
+	 * provided so if we care about two events and are only removing one 
+	 * we must re-add the other event after POLLREMOVE.
+	 */
+
+	if (devpoll_queue(devpollop, fd, POLLREMOVE) != 0)
+		return(-1);
+
+	if ((events & (POLLIN|POLLOUT)) != (POLLIN|POLLOUT)) {
+		/*
+		 * We're not deleting all events, so we must resubmit the
+		 * event that we are still interested in if one exists.
+		 */
+
+		if ((events & POLLIN) && evdp->evwrite != NULL) {
+			/* Deleting read, still care about write */
+			devpoll_queue(devpollop, fd, POLLOUT);
+			needwritedelete = 0;
+		} else if ((events & POLLOUT) && evdp->evread != NULL) {
+			/* Deleting write, still care about read */
+			devpoll_queue(devpollop, fd, POLLIN);
+			needreaddelete = 0;
+		}
+	}
+
+	if (needreaddelete)
+		evdp->evread = NULL;
+	if (needwritedelete)
+		evdp->evwrite = NULL;
+
+	return (0);
+}
+
+static void
+devpoll_dealloc(struct event_base *base, void *arg)
+{
+	struct devpollop *devpollop = arg;
+
+	evsignal_dealloc(base);
+	if (devpollop->fds)
+		free(devpollop->fds);
+	if (devpollop->events)
+		free(devpollop->events);
+	if (devpollop->changes)
+		free(devpollop->changes);
+	if (devpollop->dpfd >= 0)
+		close(devpollop->dpfd);
+
+	memset(devpollop, 0, sizeof(struct devpollop));
+	free(devpollop);
+}
diff --git a/base/third_party/libevent/epoll.c b/base/third_party/libevent/epoll.c
new file mode 100644
index 0000000..4387ef8
--- /dev/null
+++ b/base/third_party/libevent/epoll.c
@@ -0,0 +1,377 @@
+/*
+ * Copyright 2000-2003 Niels Provos <provos@citi.umich.edu>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ *    derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include <stdint.h>
+#include <sys/types.h>
+#include <sys/resource.h>
+#ifdef HAVE_SYS_TIME_H
+#include <sys/time.h>
+#else
+#include <sys/_libevent_time.h>
+#endif
+#include <sys/queue.h>
+#include <sys/epoll.h>
+#include <signal.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#include <errno.h>
+#ifdef HAVE_FCNTL_H
+#include <fcntl.h>
+#endif
+
+#include "event.h"
+#include "event-internal.h"
+#include "evsignal.h"
+#include "log.h"
+
+/* due to limitations in the epoll interface, we need to keep track of
+ * all file descriptors outself.
+ */
+struct evepoll {
+	struct event *evread;
+	struct event *evwrite;
+};
+
+struct epollop {
+	struct evepoll *fds;
+	int nfds;
+	struct epoll_event *events;
+	int nevents;
+	int epfd;
+};
+
+static void *epoll_init	(struct event_base *);
+static int epoll_add	(void *, struct event *);
+static int epoll_del	(void *, struct event *);
+static int epoll_dispatch	(struct event_base *, void *, struct timeval *);
+static void epoll_dealloc	(struct event_base *, void *);
+
+const struct eventop epollops = {
+	"epoll",
+	epoll_init,
+	epoll_add,
+	epoll_del,
+	epoll_dispatch,
+	epoll_dealloc,
+	1 /* need reinit */
+};
+
+#ifdef HAVE_SETFD
+#define FD_CLOSEONEXEC(x) do { \
+        if (fcntl(x, F_SETFD, 1) == -1) \
+                event_warn("fcntl(%d, F_SETFD)", x); \
+} while (0)
+#else
+#define FD_CLOSEONEXEC(x)
+#endif
+
+/* On Linux kernels at least up to 2.6.24.4, epoll can't handle timeout
+ * values bigger than (LONG_MAX - 999ULL)/HZ.  HZ in the wild can be
+ * as big as 1000, and LONG_MAX can be as small as (1<<31)-1, so the
+ * largest number of msec we can support here is 2147482.  Let's
+ * round that down by 47 seconds.
+ */
+#define MAX_EPOLL_TIMEOUT_MSEC (35*60*1000)
+
+#define INITIAL_NFILES 32
+#define INITIAL_NEVENTS 32
+#define MAX_NEVENTS 4096
+
+static void *
+epoll_init(struct event_base *base)
+{
+	int epfd;
+	struct epollop *epollop;
+
+	/* Disable epollueue when this environment variable is set */
+	if (evutil_getenv("EVENT_NOEPOLL"))
+		return (NULL);
+
+	/* Initalize the kernel queue */
+	if ((epfd = epoll_create(32000)) == -1) {
+		if (errno != ENOSYS)
+			event_warn("epoll_create");
+		return (NULL);
+	}
+
+	FD_CLOSEONEXEC(epfd);
+
+	if (!(epollop = calloc(1, sizeof(struct epollop))))
+		return (NULL);
+
+	epollop->epfd = epfd;
+
+	/* Initalize fields */
+	epollop->events = malloc(INITIAL_NEVENTS * sizeof(struct epoll_event));
+	if (epollop->events == NULL) {
+		free(epollop);
+		return (NULL);
+	}
+	epollop->nevents = INITIAL_NEVENTS;
+
+	epollop->fds = calloc(INITIAL_NFILES, sizeof(struct evepoll));
+	if (epollop->fds == NULL) {
+		free(epollop->events);
+		free(epollop);
+		return (NULL);
+	}
+	epollop->nfds = INITIAL_NFILES;
+
+	evsignal_init(base);
+
+	return (epollop);
+}
+
+static int
+epoll_recalc(struct event_base *base, void *arg, int max)
+{
+	struct epollop *epollop = arg;
+
+	if (max >= epollop->nfds) {
+		struct evepoll *fds;
+		int nfds;
+
+		nfds = epollop->nfds;
+		while (nfds <= max)
+			nfds <<= 1;
+
+		fds = realloc(epollop->fds, nfds * sizeof(struct evepoll));
+		if (fds == NULL) {
+			event_warn("realloc");
+			return (-1);
+		}
+		epollop->fds = fds;
+		memset(fds + epollop->nfds, 0,
+		    (nfds - epollop->nfds) * sizeof(struct evepoll));
+		epollop->nfds = nfds;
+	}
+
+	return (0);
+}
+
+static int
+epoll_dispatch(struct event_base *base, void *arg, struct timeval *tv)
+{
+	struct epollop *epollop = arg;
+	struct epoll_event *events = epollop->events;
+	struct evepoll *evep;
+	int i, res, timeout = -1;
+
+	if (tv != NULL)
+		timeout = tv->tv_sec * 1000 + (tv->tv_usec + 999) / 1000;
+
+	if (timeout > MAX_EPOLL_TIMEOUT_MSEC) {
+		/* Linux kernels can wait forever if the timeout is too big;
+		 * see comment on MAX_EPOLL_TIMEOUT_MSEC. */
+		timeout = MAX_EPOLL_TIMEOUT_MSEC;
+	}
+
+	res = epoll_wait(epollop->epfd, events, epollop->nevents, timeout);
+
+	if (res == -1) {
+		if (errno != EINTR) {
+			event_warn("epoll_wait");
+			return (-1);
+		}
+
+		evsignal_process(base);
+		return (0);
+	} else if (base->sig.evsignal_caught) {
+		evsignal_process(base);
+	}
+
+	event_debug(("%s: epoll_wait reports %d", __func__, res));
+
+	for (i = 0; i < res; i++) {
+		int what = events[i].events;
+		struct event *evread = NULL, *evwrite = NULL;
+		int fd = events[i].data.fd;
+
+		if (fd < 0 || fd >= epollop->nfds)
+			continue;
+		evep = &epollop->fds[fd];
+
+		if (what & (EPOLLHUP|EPOLLERR)) {
+			evread = evep->evread;
+			evwrite = evep->evwrite;
+		} else {
+			if (what & EPOLLIN) {
+				evread = evep->evread;
+			}
+
+			if (what & EPOLLOUT) {
+				evwrite = evep->evwrite;
+			}
+		}
+
+		if (!(evread||evwrite))
+			continue;
+
+		if (evread != NULL)
+			event_active(evread, EV_READ, 1);
+		if (evwrite != NULL)
+			event_active(evwrite, EV_WRITE, 1);
+	}
+
+	if (res == epollop->nevents && epollop->nevents < MAX_NEVENTS) {
+		/* We used all of the event space this time.  We should
+		   be ready for more events next time. */
+		int new_nevents = epollop->nevents * 2;
+		struct epoll_event *new_events;
+
+		new_events = realloc(epollop->events,
+		    new_nevents * sizeof(struct epoll_event));
+		if (new_events) {
+			epollop->events = new_events;
+			epollop->nevents = new_nevents;
+		}
+	}
+
+	return (0);
+}
+
+
+static int
+epoll_add(void *arg, struct event *ev)
+{
+	struct epollop *epollop = arg;
+	struct epoll_event epev = {0, {0}};
+	struct evepoll *evep;
+	int fd, op, events;
+
+	if (ev->ev_events & EV_SIGNAL)
+		return (evsignal_add(ev));
+
+	fd = ev->ev_fd;
+	if (fd >= epollop->nfds) {
+		/* Extent the file descriptor array as necessary */
+		if (epoll_recalc(ev->ev_base, epollop, fd) == -1)
+			return (-1);
+	}
+	evep = &epollop->fds[fd];
+	op = EPOLL_CTL_ADD;
+	events = 0;
+	if (evep->evread != NULL) {
+		events |= EPOLLIN;
+		op = EPOLL_CTL_MOD;
+	}
+	if (evep->evwrite != NULL) {
+		events |= EPOLLOUT;
+		op = EPOLL_CTL_MOD;
+	}
+
+	if (ev->ev_events & EV_READ)
+		events |= EPOLLIN;
+	if (ev->ev_events & EV_WRITE)
+		events |= EPOLLOUT;
+
+	epev.data.fd = fd;
+	epev.events = events;
+	if (epoll_ctl(epollop->epfd, op, ev->ev_fd, &epev) == -1)
+			return (-1);
+
+	/* Update events responsible */
+	if (ev->ev_events & EV_READ)
+		evep->evread = ev;
+	if (ev->ev_events & EV_WRITE)
+		evep->evwrite = ev;
+
+	return (0);
+}
+
+static int
+epoll_del(void *arg, struct event *ev)
+{
+	struct epollop *epollop = arg;
+	struct epoll_event epev = {0, {0}};
+	struct evepoll *evep;
+	int fd, events, op;
+	int needwritedelete = 1, needreaddelete = 1;
+
+	if (ev->ev_events & EV_SIGNAL)
+		return (evsignal_del(ev));
+
+	fd = ev->ev_fd;
+	if (fd >= epollop->nfds)
+		return (0);
+	evep = &epollop->fds[fd];
+
+	op = EPOLL_CTL_DEL;
+	events = 0;
+
+	if (ev->ev_events & EV_READ)
+		events |= EPOLLIN;
+	if (ev->ev_events & EV_WRITE)
+		events |= EPOLLOUT;
+
+	if ((events & (EPOLLIN|EPOLLOUT)) != (EPOLLIN|EPOLLOUT)) {
+		if ((events & EPOLLIN) && evep->evwrite != NULL) {
+			needwritedelete = 0;
+			events = EPOLLOUT;
+			op = EPOLL_CTL_MOD;
+		} else if ((events & EPOLLOUT) && evep->evread != NULL) {
+			needreaddelete = 0;
+			events = EPOLLIN;
+			op = EPOLL_CTL_MOD;
+		}
+	}
+
+	epev.events = events;
+	epev.data.fd = fd;
+
+	if (needreaddelete)
+		evep->evread = NULL;
+	if (needwritedelete)
+		evep->evwrite = NULL;
+
+	if (epoll_ctl(epollop->epfd, op, fd, &epev) == -1)
+		return (-1);
+
+	return (0);
+}
+
+static void
+epoll_dealloc(struct event_base *base, void *arg)
+{
+	struct epollop *epollop = arg;
+
+	evsignal_dealloc(base);
+	if (epollop->fds)
+		free(epollop->fds);
+	if (epollop->events)
+		free(epollop->events);
+	if (epollop->epfd >= 0)
+		close(epollop->epfd);
+
+	memset(epollop, 0, sizeof(struct epollop));
+	free(epollop);
+}
diff --git a/base/third_party/libevent/epoll_sub.c b/base/third_party/libevent/epoll_sub.c
new file mode 100644
index 0000000..431970c
--- /dev/null
+++ b/base/third_party/libevent/epoll_sub.c
@@ -0,0 +1,52 @@
+/*
+ * Copyright 2003 Niels Provos <provos@citi.umich.edu>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ *    derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#include <stdint.h>
+
+#include <sys/param.h>
+#include <sys/types.h>
+#include <sys/syscall.h>
+#include <sys/epoll.h>
+#include <unistd.h>
+
+int
+epoll_create(int size)
+{
+	return (syscall(__NR_epoll_create, size));
+}
+
+int
+epoll_ctl(int epfd, int op, int fd, struct epoll_event *event)
+{
+
+	return (syscall(__NR_epoll_ctl, epfd, op, fd, event));
+}
+
+int
+epoll_wait(int epfd, struct epoll_event *events, int maxevents, int timeout)
+{
+	return (syscall(__NR_epoll_wait, epfd, events, maxevents, timeout));
+}
diff --git a/base/third_party/libevent/evbuffer.c b/base/third_party/libevent/evbuffer.c
new file mode 100644
index 0000000..f2179a5
--- /dev/null
+++ b/base/third_party/libevent/evbuffer.c
@@ -0,0 +1,455 @@
+/*
+ * Copyright (c) 2002-2004 Niels Provos <provos@citi.umich.edu>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ *    derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <sys/types.h>
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#ifdef HAVE_SYS_TIME_H
+#include <sys/time.h>
+#endif
+
+#include <errno.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#ifdef HAVE_STDARG_H
+#include <stdarg.h>
+#endif
+
+#ifdef WIN32
+#include <winsock2.h>
+#endif
+
+#include "evutil.h"
+#include "event.h"
+
+/* prototypes */
+
+void bufferevent_read_pressure_cb(struct evbuffer *, size_t, size_t, void *);
+
+static int
+bufferevent_add(struct event *ev, int timeout)
+{
+	struct timeval tv, *ptv = NULL;
+
+	if (timeout) {
+		evutil_timerclear(&tv);
+		tv.tv_sec = timeout;
+		ptv = &tv;
+	}
+
+	return (event_add(ev, ptv));
+}
+
+/* 
+ * This callback is executed when the size of the input buffer changes.
+ * We use it to apply back pressure on the reading side.
+ */
+
+void
+bufferevent_read_pressure_cb(struct evbuffer *buf, size_t old, size_t now,
+    void *arg) {
+	struct bufferevent *bufev = arg;
+	/* 
+	 * If we are below the watermark then reschedule reading if it's
+	 * still enabled.
+	 */
+	if (bufev->wm_read.high == 0 || now < bufev->wm_read.high) {
+		evbuffer_setcb(buf, NULL, NULL);
+
+		if (bufev->enabled & EV_READ)
+			bufferevent_add(&bufev->ev_read, bufev->timeout_read);
+	}
+}
+
+static void
+bufferevent_readcb(int fd, short event, void *arg)
+{
+	struct bufferevent *bufev = arg;
+	int res = 0;
+	short what = EVBUFFER_READ;
+	size_t len;
+	int howmuch = -1;
+
+	if (event == EV_TIMEOUT) {
+		what |= EVBUFFER_TIMEOUT;
+		goto error;
+	}
+
+	/*
+	 * If we have a high watermark configured then we don't want to
+	 * read more data than would make us reach the watermark.
+	 */
+	if (bufev->wm_read.high != 0) {
+		howmuch = bufev->wm_read.high - EVBUFFER_LENGTH(bufev->input);
+		/* we might have lowered the watermark, stop reading */
+		if (howmuch <= 0) {
+			struct evbuffer *buf = bufev->input;
+			event_del(&bufev->ev_read);
+			evbuffer_setcb(buf,
+			    bufferevent_read_pressure_cb, bufev);
+			return;
+		}
+	}
+
+	res = evbuffer_read(bufev->input, fd, howmuch);
+	if (res == -1) {
+		if (errno == EAGAIN || errno == EINTR)
+			goto reschedule;
+		/* error case */
+		what |= EVBUFFER_ERROR;
+	} else if (res == 0) {
+		/* eof case */
+		what |= EVBUFFER_EOF;
+	}
+
+	if (res <= 0)
+		goto error;
+
+	bufferevent_add(&bufev->ev_read, bufev->timeout_read);
+
+	/* See if this callbacks meets the water marks */
+	len = EVBUFFER_LENGTH(bufev->input);
+	if (bufev->wm_read.low != 0 && len < bufev->wm_read.low)
+		return;
+	if (bufev->wm_read.high != 0 && len >= bufev->wm_read.high) {
+		struct evbuffer *buf = bufev->input;
+		event_del(&bufev->ev_read);
+
+		/* Now schedule a callback for us when the buffer changes */
+		evbuffer_setcb(buf, bufferevent_read_pressure_cb, bufev);
+	}
+
+	/* Invoke the user callback - must always be called last */
+	if (bufev->readcb != NULL)
+		(*bufev->readcb)(bufev, bufev->cbarg);
+	return;
+
+ reschedule:
+	bufferevent_add(&bufev->ev_read, bufev->timeout_read);
+	return;
+
+ error:
+	(*bufev->errorcb)(bufev, what, bufev->cbarg);
+}
+
+static void
+bufferevent_writecb(int fd, short event, void *arg)
+{
+	struct bufferevent *bufev = arg;
+	int res = 0;
+	short what = EVBUFFER_WRITE;
+
+	if (event == EV_TIMEOUT) {
+		what |= EVBUFFER_TIMEOUT;
+		goto error;
+	}
+
+	if (EVBUFFER_LENGTH(bufev->output)) {
+	    res = evbuffer_write(bufev->output, fd);
+	    if (res == -1) {
+#ifndef WIN32
+/*todo. evbuffer uses WriteFile when WIN32 is set. WIN32 system calls do not
+ *set errno. thus this error checking is not portable*/
+		    if (errno == EAGAIN ||
+			errno == EINTR ||
+			errno == EINPROGRESS)
+			    goto reschedule;
+		    /* error case */
+		    what |= EVBUFFER_ERROR;
+
+#else
+				goto reschedule;
+#endif
+
+	    } else if (res == 0) {
+		    /* eof case */
+		    what |= EVBUFFER_EOF;
+	    }
+	    if (res <= 0)
+		    goto error;
+	}
+
+	if (EVBUFFER_LENGTH(bufev->output) != 0)
+		bufferevent_add(&bufev->ev_write, bufev->timeout_write);
+
+	/*
+	 * Invoke the user callback if our buffer is drained or below the
+	 * low watermark.
+	 */
+	if (bufev->writecb != NULL &&
+	    EVBUFFER_LENGTH(bufev->output) <= bufev->wm_write.low)
+		(*bufev->writecb)(bufev, bufev->cbarg);
+
+	return;
+
+ reschedule:
+	if (EVBUFFER_LENGTH(bufev->output) != 0)
+		bufferevent_add(&bufev->ev_write, bufev->timeout_write);
+	return;
+
+ error:
+	(*bufev->errorcb)(bufev, what, bufev->cbarg);
+}
+
+/*
+ * Create a new buffered event object.
+ *
+ * The read callback is invoked whenever we read new data.
+ * The write callback is invoked whenever the output buffer is drained.
+ * The error callback is invoked on a write/read error or on EOF.
+ *
+ * Both read and write callbacks maybe NULL.  The error callback is not
+ * allowed to be NULL and have to be provided always.
+ */
+
+struct bufferevent *
+bufferevent_new(int fd, evbuffercb readcb, evbuffercb writecb,
+    everrorcb errorcb, void *cbarg)
+{
+	struct bufferevent *bufev;
+
+	if ((bufev = calloc(1, sizeof(struct bufferevent))) == NULL)
+		return (NULL);
+
+	if ((bufev->input = evbuffer_new()) == NULL) {
+		free(bufev);
+		return (NULL);
+	}
+
+	if ((bufev->output = evbuffer_new()) == NULL) {
+		evbuffer_free(bufev->input);
+		free(bufev);
+		return (NULL);
+	}
+
+	event_set(&bufev->ev_read, fd, EV_READ, bufferevent_readcb, bufev);
+	event_set(&bufev->ev_write, fd, EV_WRITE, bufferevent_writecb, bufev);
+
+	bufferevent_setcb(bufev, readcb, writecb, errorcb, cbarg);
+
+	/*
+	 * Set to EV_WRITE so that using bufferevent_write is going to
+	 * trigger a callback.  Reading needs to be explicitly enabled
+	 * because otherwise no data will be available.
+	 */
+	bufev->enabled = EV_WRITE;
+
+	return (bufev);
+}
+
+void
+bufferevent_setcb(struct bufferevent *bufev,
+    evbuffercb readcb, evbuffercb writecb, everrorcb errorcb, void *cbarg)
+{
+	bufev->readcb = readcb;
+	bufev->writecb = writecb;
+	bufev->errorcb = errorcb;
+
+	bufev->cbarg = cbarg;
+}
+
+void
+bufferevent_setfd(struct bufferevent *bufev, int fd)
+{
+	event_del(&bufev->ev_read);
+	event_del(&bufev->ev_write);
+
+	event_set(&bufev->ev_read, fd, EV_READ, bufferevent_readcb, bufev);
+	event_set(&bufev->ev_write, fd, EV_WRITE, bufferevent_writecb, bufev);
+	if (bufev->ev_base != NULL) {
+		event_base_set(bufev->ev_base, &bufev->ev_read);
+		event_base_set(bufev->ev_base, &bufev->ev_write);
+	}
+
+	/* might have to manually trigger event registration */
+}
+
+int
+bufferevent_priority_set(struct bufferevent *bufev, int priority)
+{
+	if (event_priority_set(&bufev->ev_read, priority) == -1)
+		return (-1);
+	if (event_priority_set(&bufev->ev_write, priority) == -1)
+		return (-1);
+
+	return (0);
+}
+
+/* Closing the file descriptor is the responsibility of the caller */
+
+void
+bufferevent_free(struct bufferevent *bufev)
+{
+	event_del(&bufev->ev_read);
+	event_del(&bufev->ev_write);
+
+	evbuffer_free(bufev->input);
+	evbuffer_free(bufev->output);
+
+	free(bufev);
+}
+
+/*
+ * Returns 0 on success;
+ *        -1 on failure.
+ */
+
+int
+bufferevent_write(struct bufferevent *bufev, const void *data, size_t size)
+{
+	int res;
+
+	res = evbuffer_add(bufev->output, data, size);
+
+	if (res == -1)
+		return (res);
+
+	/* If everything is okay, we need to schedule a write */
+	if (size > 0 && (bufev->enabled & EV_WRITE))
+		bufferevent_add(&bufev->ev_write, bufev->timeout_write);
+
+	return (res);
+}
+
+int
+bufferevent_write_buffer(struct bufferevent *bufev, struct evbuffer *buf)
+{
+	int res;
+
+	res = bufferevent_write(bufev, buf->buffer, buf->off);
+	if (res != -1)
+		evbuffer_drain(buf, buf->off);
+
+	return (res);
+}
+
+size_t
+bufferevent_read(struct bufferevent *bufev, void *data, size_t size)
+{
+	struct evbuffer *buf = bufev->input;
+
+	if (buf->off < size)
+		size = buf->off;
+
+	/* Copy the available data to the user buffer */
+	memcpy(data, buf->buffer, size);
+
+	if (size)
+		evbuffer_drain(buf, size);
+
+	return (size);
+}
+
+int
+bufferevent_enable(struct bufferevent *bufev, short event)
+{
+	if (event & EV_READ) {
+		if (bufferevent_add(&bufev->ev_read, bufev->timeout_read) == -1)
+			return (-1);
+	}
+	if (event & EV_WRITE) {
+		if (bufferevent_add(&bufev->ev_write, bufev->timeout_write) == -1)
+			return (-1);
+	}
+
+	bufev->enabled |= event;
+	return (0);
+}
+
+int
+bufferevent_disable(struct bufferevent *bufev, short event)
+{
+	if (event & EV_READ) {
+		if (event_del(&bufev->ev_read) == -1)
+			return (-1);
+	}
+	if (event & EV_WRITE) {
+		if (event_del(&bufev->ev_write) == -1)
+			return (-1);
+	}
+
+	bufev->enabled &= ~event;
+	return (0);
+}
+
+/*
+ * Sets the read and write timeout for a buffered event.
+ */
+
+void
+bufferevent_settimeout(struct bufferevent *bufev,
+    int timeout_read, int timeout_write) {
+	bufev->timeout_read = timeout_read;
+	bufev->timeout_write = timeout_write;
+
+	if (event_pending(&bufev->ev_read, EV_READ, NULL))
+		bufferevent_add(&bufev->ev_read, timeout_read);
+	if (event_pending(&bufev->ev_write, EV_WRITE, NULL))
+		bufferevent_add(&bufev->ev_write, timeout_write);
+}
+
+/*
+ * Sets the water marks
+ */
+
+void
+bufferevent_setwatermark(struct bufferevent *bufev, short events,
+    size_t lowmark, size_t highmark)
+{
+	if (events & EV_READ) {
+		bufev->wm_read.low = lowmark;
+		bufev->wm_read.high = highmark;
+	}
+
+	if (events & EV_WRITE) {
+		bufev->wm_write.low = lowmark;
+		bufev->wm_write.high = highmark;
+	}
+
+	/* If the watermarks changed then see if we should call read again */
+	bufferevent_read_pressure_cb(bufev->input,
+	    0, EVBUFFER_LENGTH(bufev->input), bufev);
+}
+
+int
+bufferevent_base_set(struct event_base *base, struct bufferevent *bufev)
+{
+	int res;
+
+	bufev->ev_base = base;
+
+	res = event_base_set(base, &bufev->ev_read);
+	if (res == -1)
+		return (res);
+
+	res = event_base_set(base, &bufev->ev_write);
+	return (res);
+}
diff --git a/base/third_party/libevent/evdns.3 b/base/third_party/libevent/evdns.3
new file mode 100644
index 0000000..10414fa
--- /dev/null
+++ b/base/third_party/libevent/evdns.3
@@ -0,0 +1,322 @@
+.\"
+.\" Copyright (c) 2006 Niels Provos <provos@citi.umich.edu>
+.\" All rights reserved.
+.\"
+.\" Redistribution and use in source and binary forms, with or without
+.\" modification, are permitted provided that the following conditions
+.\" are met:
+.\"
+.\" 1. Redistributions of source code must retain the above copyright
+.\"    notice, this list of conditions and the following disclaimer.
+.\" 2. Redistributions in binary form must reproduce the above copyright
+.\"    notice, this list of conditions and the following disclaimer in the
+.\"    documentation and/or other materials provided with the distribution.
+.\" 3. The name of the author may not be used to endorse or promote products
+.\"    derived from this software without specific prior written permission.
+.\"
+.\" THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES,
+.\" INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY
+.\" AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
+.\" THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+.\" EXEMPLARY, OR CONSEQUENTIAL  DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+.\" PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+.\" OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+.\" WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+.\" OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+.\" ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+.\"
+.Dd October 7, 2006
+.Dt EVDNS 3
+.Os
+.Sh NAME
+.Nm evdns_init
+.Nm evdns_shutdown
+.Nm evdns_err_to_string
+.Nm evdns_nameserver_add
+.Nm evdns_count_nameservers
+.Nm evdns_clear_nameservers_and_suspend
+.Nm evdns_resume
+.Nm evdns_nameserver_ip_add
+.Nm evdns_resolve_ipv4
+.Nm evdns_resolve_reverse
+.Nm evdns_resolv_conf_parse
+.Nm evdns_config_windows_nameservers
+.Nm evdns_search_clear
+.Nm evdns_search_add
+.Nm evdns_search_ndots_set
+.Nm evdns_set_log_fn
+.Nd asynchronous functions for DNS resolution.
+.Sh SYNOPSIS
+.Fd #include <sys/time.h>
+.Fd #include <event.h>
+.Fd #include <evdns.h>
+.Ft int
+.Fn evdns_init
+.Ft void
+.Fn evdns_shutdown "int fail_requests"
+.Ft "const char *"
+.Fn evdns_err_to_string "int err"
+.Ft int
+.Fn evdns_nameserver_add "unsigned long int address"
+.Ft int
+.Fn evdns_count_nameservers
+.Ft int
+.Fn evdns_clear_nameservers_and_suspend
+.Ft int
+.Fn evdns_resume
+.Ft int
+.Fn evdns_nameserver_ip_add(const char *ip_as_string);
+.Ft int
+.Fn evdns_resolve_ipv4 "const char *name" "int flags" "evdns_callback_type callback" "void *ptr"
+.Ft int
+.Fn evdns_resolve_reverse "struct in_addr *in" "int flags" "evdns_callback_type callback" "void *ptr"
+.Ft int
+.Fn evdns_resolv_conf_parse "int flags" "const char *"
+.Ft void
+.Fn evdns_search_clear
+.Ft void
+.Fn evdns_search_add "const char *domain"
+.Ft void
+.Fn evdns_search_ndots_set "const int ndots"
+.Ft void
+.Fn evdns_set_log_fn "evdns_debug_log_fn_type fn"
+.Ft int
+.Fn evdns_config_windows_nameservers
+.Sh DESCRIPTION
+Welcome, gentle reader
+.Pp
+Async DNS lookups are really a whole lot harder than they should be,
+mostly stemming from the fact that the libc resolver has never been
+very good at them. Before you use this library you should see if libc
+can do the job for you with the modern async call getaddrinfo_a
+(see http://www.imperialviolet.org/page25.html#e498). Otherwise,
+please continue.
+.Pp
+This code is based on libevent and you must call event_init before
+any of the APIs in this file. You must also seed the OpenSSL random
+source if you are using OpenSSL for ids (see below).
+.Pp
+This library is designed to be included and shipped with your source
+code. You statically link with it. You should also test for the
+existence of strtok_r and define HAVE_STRTOK_R if you have it.
+.Pp
+The DNS protocol requires a good source of id numbers and these
+numbers should be unpredictable for spoofing reasons. There are
+three methods for generating them here and you must define exactly
+one of them. In increasing order of preference:
+.Pp
+.Bl -tag -width "DNS_USE_GETTIMEOFDAY_FOR_ID" -compact -offset indent
+.It DNS_USE_GETTIMEOFDAY_FOR_ID
+Using the bottom 16 bits of the usec result from gettimeofday. This
+is a pretty poor solution but should work anywhere.
+.It DNS_USE_CPU_CLOCK_FOR_ID
+Using the bottom 16 bits of the nsec result from the CPU's time
+counter. This is better, but may not work everywhere. Requires
+POSIX realtime support and you'll need to link against -lrt on
+glibc systems at least.
+.It DNS_USE_OPENSSL_FOR_ID
+Uses the OpenSSL RAND_bytes call to generate the data. You must
+have seeded the pool before making any calls to this library.
+.El
+.Pp
+The library keeps track of the state of nameservers and will avoid
+them when they go down. Otherwise it will round robin between them.
+.Pp
+Quick start guide:
+  #include "evdns.h"
+  void callback(int result, char type, int count, int ttl,
+	 void *addresses, void *arg);
+  evdns_resolv_conf_parse(DNS_OPTIONS_ALL, "/etc/resolv.conf");
+  evdns_resolve("www.hostname.com", 0, callback, NULL);
+.Pp
+When the lookup is complete the callback function is called. The
+first argument will be one of the DNS_ERR_* defines in evdns.h.
+Hopefully it will be DNS_ERR_NONE, in which case type will be
+DNS_IPv4_A, count will be the number of IP addresses, ttl is the time
+which the data can be cached for (in seconds), addresses will point
+to an array of uint32_t's and arg will be whatever you passed to
+evdns_resolve.
+.Pp
+Searching:
+.Pp
+In order for this library to be a good replacement for glibc's resolver it
+supports searching. This involves setting a list of default domains, in
+which names will be queried for. The number of dots in the query name
+determines the order in which this list is used.
+.Pp
+Searching appears to be a single lookup from the point of view of the API,
+although many DNS queries may be generated from a single call to
+evdns_resolve. Searching can also drastically slow down the resolution
+of names.
+.Pp
+To disable searching:
+.Bl -enum -compact -offset indent
+.It
+Never set it up. If you never call
+.Fn evdns_resolv_conf_parse,
+.Fn evdns_init,
+or
+.Fn evdns_search_add
+then no searching will occur.
+.It
+If you do call
+.Fn evdns_resolv_conf_parse
+then don't pass
+.Va DNS_OPTION_SEARCH
+(or
+.Va DNS_OPTIONS_ALL,
+which implies it).
+.It
+When calling
+.Fn evdns_resolve,
+pass the
+.Va DNS_QUERY_NO_SEARCH
+flag.
+.El
+.Pp
+The order of searches depends on the number of dots in the name. If the
+number is greater than the ndots setting then the names is first tried
+globally. Otherwise each search domain is appended in turn.
+.Pp
+The ndots setting can either be set from a resolv.conf, or by calling
+evdns_search_ndots_set.
+.Pp
+For example, with ndots set to 1 (the default) and a search domain list of
+["myhome.net"]:
+ Query: www
+ Order: www.myhome.net, www.
+.Pp
+ Query: www.abc
+ Order: www.abc., www.abc.myhome.net
+.Pp
+.Sh API reference
+.Pp
+.Bl -tag -width 0123456
+.It Ft int Fn evdns_init
+Initializes support for non-blocking name resolution by calling
+.Fn evdns_resolv_conf_parse
+on UNIX and
+.Fn evdns_config_windows_nameservers
+on Windows.
+.It Ft int Fn evdns_nameserver_add "unsigned long int address"
+Add a nameserver. The address should be an IP address in
+network byte order. The type of address is chosen so that
+it matches in_addr.s_addr.
+Returns non-zero on error.
+.It Ft int Fn evdns_nameserver_ip_add "const char *ip_as_string"
+This wraps the above function by parsing a string as an IP
+address and adds it as a nameserver.
+Returns non-zero on error
+.It Ft int Fn evdns_resolve "const char *name" "int flags" "evdns_callback_type callback" "void *ptr"
+Resolve a name. The name parameter should be a DNS name.
+The flags parameter should be 0, or DNS_QUERY_NO_SEARCH
+which disables searching for this query. (see defn of
+searching above).
+.Pp
+The callback argument is a function which is called when
+this query completes and ptr is an argument which is passed
+to that callback function.
+.Pp
+Returns non-zero on error
+.It Ft void Fn evdns_search_clear
+Clears the list of search domains
+.It Ft void Fn evdns_search_add "const char *domain"
+Add a domain to the list of search domains
+.It Ft void Fn evdns_search_ndots_set "int ndots"
+Set the number of dots which, when found in a name, causes
+the first query to be without any search domain.
+.It Ft int Fn evdns_count_nameservers "void"
+Return the number of configured nameservers (not necessarily the
+number of running nameservers).  This is useful for double-checking
+whether our calls to the various nameserver configuration functions
+have been successful.
+.It Ft int Fn evdns_clear_nameservers_and_suspend "void"
+Remove all currently configured nameservers, and suspend all pending
+resolves.  Resolves will not necessarily be re-attempted until
+evdns_resume() is called.
+.It Ft int Fn evdns_resume "void"
+Re-attempt resolves left in limbo after an earlier call to
+evdns_clear_nameservers_and_suspend().
+.It Ft int Fn evdns_config_windows_nameservers "void"
+Attempt to configure a set of nameservers based on platform settings on
+a win32 host.  Preferentially tries to use GetNetworkParams; if that fails,
+looks in the registry.  Returns 0 on success, nonzero on failure.
+.It Ft int Fn evdns_resolv_conf_parse "int flags" "const char *filename"
+Parse a resolv.conf like file from the given filename.
+.Pp
+See the man page for resolv.conf for the format of this file.
+The flags argument determines what information is parsed from
+this file:
+.Bl -tag -width "DNS_OPTION_NAMESERVERS" -offset indent -compact -nested
+.It DNS_OPTION_SEARCH
+domain, search and ndots options
+.It DNS_OPTION_NAMESERVERS
+nameserver lines
+.It DNS_OPTION_MISC
+timeout and attempts options
+.It DNS_OPTIONS_ALL
+all of the above
+.El
+.Pp
+The following directives are not parsed from the file:
+  sortlist, rotate, no-check-names, inet6, debug
+.Pp
+Returns non-zero on error:
+.Bl -tag -width "0" -offset indent -compact -nested
+.It 0
+no errors
+.It 1
+failed to open file
+.It 2
+failed to stat file
+.It 3
+file too large
+.It 4
+out of memory
+.It 5
+short read from file
+.El
+.El
+.Sh Internals:
+Requests are kept in two queues. The first is the inflight queue. In
+this queue requests have an allocated transaction id and nameserver.
+They will soon be transmitted if they haven't already been.
+.Pp
+The second is the waiting queue. The size of the inflight ring is
+limited and all other requests wait in waiting queue for space. This
+bounds the number of concurrent requests so that we don't flood the
+nameserver. Several algorithms require a full walk of the inflight
+queue and so bounding its size keeps thing going nicely under huge
+(many thousands of requests) loads.
+.Pp
+If a nameserver loses too many requests it is considered down and we
+try not to use it. After a while we send a probe to that nameserver
+(a lookup for google.com) and, if it replies, we consider it working
+again. If the nameserver fails a probe we wait longer to try again
+with the next probe.
+.Sh SEE ALSO
+.Xr event 3 ,
+.Xr gethostbyname 3 ,
+.Xr resolv.conf 5
+.Sh HISTORY
+The
+.Nm evdns
+API was developed by Adam Langley on top of the
+.Nm libevent
+API.
+The code was integrate into
+.Nm Tor
+by Nick Mathewson and finally put into
+.Nm libevent
+itself by Niels Provos.
+.Sh AUTHORS
+The
+.Nm evdns
+API and code was written by Adam Langley with significant
+contributions by Nick Mathewson.
+.Sh BUGS
+This documentation is neither complete nor authoritative.
+If you are in doubt about the usage of this API then
+check the source code to find out how it works, write
+up the missing piece of documentation and send it to
+me for inclusion in this man page.
diff --git a/base/third_party/libevent/evdns.c b/base/third_party/libevent/evdns.c
new file mode 100644
index 0000000..05fe594
--- /dev/null
+++ b/base/third_party/libevent/evdns.c
@@ -0,0 +1,3192 @@
+/* $Id: evdns.c 6979 2006-08-04 18:31:13Z nickm $ */
+
+/* The original version of this module was written by Adam Langley; for
+ * a history of modifications, check out the subversion logs.
+ *
+ * When editing this module, try to keep it re-mergeable by Adam.  Don't
+ * reformat the whitespace, add Tor dependencies, or so on.
+ *
+ * TODO:
+ *   - Support IPv6 and PTR records.
+ *   - Replace all externally visible magic numbers with #defined constants.
+ *   - Write doccumentation for APIs of all external functions.
+ */
+
+/* Async DNS Library
+ * Adam Langley <agl@imperialviolet.org>
+ * http://www.imperialviolet.org/eventdns.html
+ * Public Domain code
+ *
+ * This software is Public Domain. To view a copy of the public domain dedication,
+ * visit http://creativecommons.org/licenses/publicdomain/ or send a letter to
+ * Creative Commons, 559 Nathan Abbott Way, Stanford, California 94305, USA.
+ *
+ * I ask and expect, but do not require, that all derivative works contain an
+ * attribution similar to:
+ * 	Parts developed by Adam Langley <agl@imperialviolet.org>
+ *
+ * You may wish to replace the word "Parts" with something else depending on
+ * the amount of original code.
+ *
+ * (Derivative works does not include programs which link against, run or include
+ * the source verbatim in their source distributions)
+ *
+ * Version: 0.1b
+ */
+
+#include <sys/types.h>
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#ifdef DNS_USE_FTIME_FOR_ID
+#include <sys/timeb.h>
+#endif
+
+#ifndef DNS_USE_CPU_CLOCK_FOR_ID
+#ifndef DNS_USE_GETTIMEOFDAY_FOR_ID
+#ifndef DNS_USE_OPENSSL_FOR_ID
+#ifndef DNS_USE_FTIME_FOR_ID
+#error Must configure at least one id generation method.
+#error Please see the documentation.
+#endif
+#endif
+#endif
+#endif
+
+/* #define _POSIX_C_SOURCE 200507 */
+#ifndef _GNU_SOURCE
+#define _GNU_SOURCE
+#endif
+
+#ifdef DNS_USE_CPU_CLOCK_FOR_ID
+#ifdef DNS_USE_OPENSSL_FOR_ID
+#error Multiple id options selected
+#endif
+#ifdef DNS_USE_GETTIMEOFDAY_FOR_ID
+#error Multiple id options selected
+#endif
+#include <time.h>
+#endif
+
+#ifdef DNS_USE_OPENSSL_FOR_ID
+#ifdef DNS_USE_GETTIMEOFDAY_FOR_ID
+#error Multiple id options selected
+#endif
+#include <openssl/rand.h>
+#endif
+
+#ifndef _FORTIFY_SOURCE
+#define _FORTIFY_SOURCE 3
+#endif
+
+#include <string.h>
+#include <fcntl.h>
+#ifdef HAVE_SYS_TIME_H
+#include <sys/time.h>
+#endif
+#ifdef HAVE_STDINT_H
+#include <stdint.h>
+#endif
+#include <stdlib.h>
+#include <string.h>
+#include <errno.h>
+#include <assert.h>
+#ifdef HAVE_UNISTD_H
+#include <unistd.h>
+#endif
+#include <limits.h>
+#include <sys/stat.h>
+#include <ctype.h>
+#include <stdio.h>
+#include <stdarg.h>
+
+#include "evdns.h"
+#include "evutil.h"
+#include "log.h"
+#ifdef WIN32
+#include <winsock2.h>
+#include <windows.h>
+#include <iphlpapi.h>
+#include <io.h>
+#else
+#include <sys/socket.h>
+#include <netinet/in.h>
+#include <arpa/inet.h>
+#endif
+
+#ifdef HAVE_NETINET_IN6_H
+#include <netinet/in6.h>
+#endif
+
+#define EVDNS_LOG_DEBUG 0
+#define EVDNS_LOG_WARN 1
+
+#ifndef HOST_NAME_MAX
+#define HOST_NAME_MAX 255
+#endif
+
+#include <stdio.h>
+
+#undef MIN
+#define MIN(a,b) ((a)<(b)?(a):(b))
+
+#ifdef __USE_ISOC99B
+/* libevent doesn't work without this */
+typedef ev_uint8_t u_char;
+typedef unsigned int uint;
+#endif
+#include "event.h"
+
+#define u64 ev_uint64_t
+#define u32 ev_uint32_t
+#define u16 ev_uint16_t
+#define u8  ev_uint8_t
+
+#ifdef WIN32
+#define open _open
+#define read _read
+#define close _close
+#define strdup _strdup
+#endif
+
+#define MAX_ADDRS 32  /* maximum number of addresses from a single packet */
+/* which we bother recording */
+
+#define TYPE_A         EVDNS_TYPE_A
+#define TYPE_CNAME     5
+#define TYPE_PTR       EVDNS_TYPE_PTR
+#define TYPE_AAAA      EVDNS_TYPE_AAAA
+
+#define CLASS_INET     EVDNS_CLASS_INET
+
+#ifdef HAVE_SETFD
+#define FD_CLOSEONEXEC(x) do { \
+	if (fcntl(x, F_SETFD, 1) == -1) \
+		event_warn("fcntl(%d, F_SETFD)", x); \
+	} while (0)
+#else
+#define FD_CLOSEONEXEC(x) (void)0
+#endif
+
+struct request {
+	u8 *request;  /* the dns packet data */
+	unsigned int request_len;
+	int reissue_count;
+	int tx_count;  /* the number of times that this packet has been sent */
+	unsigned int request_type; /* TYPE_PTR or TYPE_A */
+	void *user_pointer;  /* the pointer given to us for this request */
+	evdns_callback_type user_callback;
+	struct nameserver *ns;  /* the server which we last sent it */
+
+	/* elements used by the searching code */
+	int search_index;
+	struct search_state *search_state;
+	char *search_origname;  /* needs to be free()ed */
+	int search_flags;
+
+	/* these objects are kept in a circular list */
+	struct request *next, *prev;
+
+	struct event timeout_event;
+
+	u16 trans_id;  /* the transaction id */
+	char request_appended;  /* true if the request pointer is data which follows this struct */
+	char transmit_me;  /* needs to be transmitted */
+};
+
+#ifndef HAVE_STRUCT_IN6_ADDR
+struct in6_addr {
+	u8 s6_addr[16];
+};
+#endif
+
+struct reply {
+	unsigned int type;
+	unsigned int have_answer;
+	union {
+		struct {
+			u32 addrcount;
+			u32 addresses[MAX_ADDRS];
+		} a;
+		struct {
+			u32 addrcount;
+			struct in6_addr addresses[MAX_ADDRS];
+		} aaaa;
+		struct {
+			char name[HOST_NAME_MAX];
+		} ptr;
+	} data;
+};
+
+struct nameserver {
+	int socket;  /* a connected UDP socket */
+	u32 address;
+	u16 port;
+	int failed_times;  /* number of times which we have given this server a chance */
+	int timedout;  /* number of times in a row a request has timed out */
+	struct event event;
+	/* these objects are kept in a circular list */
+	struct nameserver *next, *prev;
+	struct event timeout_event;  /* used to keep the timeout for */
+				     /* when we next probe this server. */
+				     /* Valid if state == 0 */
+	char state;  /* zero if we think that this server is down */
+	char choked;  /* true if we have an EAGAIN from this server's socket */
+	char write_waiting;  /* true if we are waiting for EV_WRITE events */
+};
+
+static struct request *req_head = NULL, *req_waiting_head = NULL;
+static struct nameserver *server_head = NULL;
+
+/* Represents a local port where we're listening for DNS requests. Right now, */
+/* only UDP is supported. */
+struct evdns_server_port {
+	int socket; /* socket we use to read queries and write replies. */
+	int refcnt; /* reference count. */
+	char choked; /* Are we currently blocked from writing? */
+	char closing; /* Are we trying to close this port, pending writes? */
+	evdns_request_callback_fn_type user_callback; /* Fn to handle requests */
+	void *user_data; /* Opaque pointer passed to user_callback */
+	struct event event; /* Read/write event */
+	/* circular list of replies that we want to write. */
+	struct server_request *pending_replies;
+};
+
+/* Represents part of a reply being built.	(That is, a single RR.) */
+struct server_reply_item {
+	struct server_reply_item *next; /* next item in sequence. */
+	char *name; /* name part of the RR */
+	u16 type : 16; /* The RR type */
+	u16 class : 16; /* The RR class (usually CLASS_INET) */
+	u32 ttl; /* The RR TTL */
+	char is_name; /* True iff data is a label */
+	u16 datalen; /* Length of data; -1 if data is a label */
+	void *data; /* The contents of the RR */
+};
+
+/* Represents a request that we've received as a DNS server, and holds */
+/* the components of the reply as we're constructing it. */
+struct server_request {
+	/* Pointers to the next and previous entries on the list of replies */
+	/* that we're waiting to write.	 Only set if we have tried to respond */
+	/* and gotten EAGAIN. */
+	struct server_request *next_pending;
+	struct server_request *prev_pending;
+
+	u16 trans_id; /* Transaction id. */
+	struct evdns_server_port *port; /* Which port received this request on? */
+	struct sockaddr_storage addr; /* Where to send the response */
+	socklen_t addrlen; /* length of addr */
+
+	int n_answer; /* how many answer RRs have been set? */
+	int n_authority; /* how many authority RRs have been set? */
+	int n_additional; /* how many additional RRs have been set? */
+
+	struct server_reply_item *answer; /* linked list of answer RRs */
+	struct server_reply_item *authority; /* linked list of authority RRs */
+	struct server_reply_item *additional; /* linked list of additional RRs */
+
+	/* Constructed response.  Only set once we're ready to send a reply. */
+	/* Once this is set, the RR fields are cleared, and no more should be set. */
+	char *response;
+	size_t response_len;
+
+	/* Caller-visible fields: flags, questions. */
+	struct evdns_server_request base;
+};
+
+/* helper macro */
+#define OFFSET_OF(st, member) ((off_t) (((char*)&((st*)0)->member)-(char*)0))
+
+/* Given a pointer to an evdns_server_request, get the corresponding */
+/* server_request. */
+#define TO_SERVER_REQUEST(base_ptr)										\
+	((struct server_request*)											\
+	 (((char*)(base_ptr) - OFFSET_OF(struct server_request, base))))
+
+/* The number of good nameservers that we have */
+static int global_good_nameservers = 0;
+
+/* inflight requests are contained in the req_head list */
+/* and are actually going out across the network */
+static int global_requests_inflight = 0;
+/* requests which aren't inflight are in the waiting list */
+/* and are counted here */
+static int global_requests_waiting = 0;
+
+static int global_max_requests_inflight = 64;
+
+static struct timeval global_timeout = {5, 0};  /* 5 seconds */
+static int global_max_reissues = 1;  /* a reissue occurs when we get some errors from the server */
+static int global_max_retransmits = 3;  /* number of times we'll retransmit a request which timed out */
+/* number of timeouts in a row before we consider this server to be down */
+static int global_max_nameserver_timeout = 3;
+
+/* These are the timeout values for nameservers. If we find a nameserver is down */
+/* we try to probe it at intervals as given below. Values are in seconds. */
+static const struct timeval global_nameserver_timeouts[] = {{10, 0}, {60, 0}, {300, 0}, {900, 0}, {3600, 0}};
+static const int global_nameserver_timeouts_length = sizeof(global_nameserver_timeouts)/sizeof(struct timeval);
+
+static struct nameserver *nameserver_pick(void);
+static void evdns_request_insert(struct request *req, struct request **head);
+static void nameserver_ready_callback(int fd, short events, void *arg);
+static int evdns_transmit(void);
+static int evdns_request_transmit(struct request *req);
+static void nameserver_send_probe(struct nameserver *const ns);
+static void search_request_finished(struct request *const);
+static int search_try_next(struct request *const req);
+static int search_request_new(int type, const char *const name, int flags, evdns_callback_type user_callback, void *user_arg);
+static void evdns_requests_pump_waiting_queue(void);
+static u16 transaction_id_pick(void);
+static struct request *request_new(int type, const char *name, int flags, evdns_callback_type callback, void *ptr);
+static void request_submit(struct request *const req);
+
+static int server_request_free(struct server_request *req);
+static void server_request_free_answers(struct server_request *req);
+static void server_port_free(struct evdns_server_port *port);
+static void server_port_ready_callback(int fd, short events, void *arg);
+
+static int strtoint(const char *const str);
+
+#ifdef WIN32
+static int
+last_error(int sock)
+{
+	int optval, optvallen=sizeof(optval);
+	int err = WSAGetLastError();
+	if (err == WSAEWOULDBLOCK && sock >= 0) {
+		if (getsockopt(sock, SOL_SOCKET, SO_ERROR, (void*)&optval,
+			       &optvallen))
+			return err;
+		if (optval)
+			return optval;
+	}
+	return err;
+
+}
+static int
+error_is_eagain(int err)
+{
+	return err == EAGAIN || err == WSAEWOULDBLOCK;
+}
+static int
+inet_aton(const char *c, struct in_addr *addr)
+{
+	ev_uint32_t r;
+	if (strcmp(c, "255.255.255.255") == 0) {
+		addr->s_addr = 0xffffffffu;
+	} else {
+		r = inet_addr(c);
+		if (r == INADDR_NONE)
+			return 0;
+		addr->s_addr = r;
+	}
+	return 1;
+}
+#else
+#define last_error(sock) (errno)
+#define error_is_eagain(err) ((err) == EAGAIN)
+#endif
+#define CLOSE_SOCKET(s) EVUTIL_CLOSESOCKET(s)
+
+#define ISSPACE(c) isspace((int)(unsigned char)(c))
+#define ISDIGIT(c) isdigit((int)(unsigned char)(c))
+
+static const char *
+debug_ntoa(u32 address)
+{
+	static char buf[32];
+	u32 a = ntohl(address);
+	evutil_snprintf(buf, sizeof(buf), "%d.%d.%d.%d",
+                      (int)(u8)((a>>24)&0xff),
+                      (int)(u8)((a>>16)&0xff),
+                      (int)(u8)((a>>8 )&0xff),
+  		      (int)(u8)((a    )&0xff));
+	return buf;
+}
+
+static evdns_debug_log_fn_type evdns_log_fn = NULL;
+
+void
+evdns_set_log_fn(evdns_debug_log_fn_type fn)
+{
+  evdns_log_fn = fn;
+}
+
+#ifdef __GNUC__
+#define EVDNS_LOG_CHECK  __attribute__ ((format(printf, 2, 3)))
+#else
+#define EVDNS_LOG_CHECK
+#endif
+
+static void _evdns_log(int warn, const char *fmt, ...) EVDNS_LOG_CHECK;
+static void
+_evdns_log(int warn, const char *fmt, ...)
+{
+  va_list args;
+  static char buf[512];
+  if (!evdns_log_fn)
+    return;
+  va_start(args,fmt);
+  evutil_vsnprintf(buf, sizeof(buf), fmt, args);
+  buf[sizeof(buf)-1] = '\0';
+  evdns_log_fn(warn, buf);
+  va_end(args);
+}
+
+#define log _evdns_log
+
+/* This walks the list of inflight requests to find the */
+/* one with a matching transaction id. Returns NULL on */
+/* failure */
+static struct request *
+request_find_from_trans_id(u16 trans_id) {
+	struct request *req = req_head, *const started_at = req_head;
+
+	if (req) {
+		do {
+			if (req->trans_id == trans_id) return req;
+			req = req->next;
+		} while (req != started_at);
+	}
+
+	return NULL;
+}
+
+/* a libevent callback function which is called when a nameserver */
+/* has gone down and we want to test if it has came back to life yet */
+static void
+nameserver_prod_callback(int fd, short events, void *arg) {
+	struct nameserver *const ns = (struct nameserver *) arg;
+        (void)fd;
+        (void)events;
+
+	nameserver_send_probe(ns);
+}
+
+/* a libevent callback which is called when a nameserver probe (to see if */
+/* it has come back to life) times out. We increment the count of failed_times */
+/* and wait longer to send the next probe packet. */
+static void
+nameserver_probe_failed(struct nameserver *const ns) {
+	const struct timeval * timeout;
+	(void) evtimer_del(&ns->timeout_event);
+	if (ns->state == 1) {
+		/* This can happen if the nameserver acts in a way which makes us mark */
+		/* it as bad and then starts sending good replies. */
+		return;
+	}
+
+	timeout =
+	  &global_nameserver_timeouts[MIN(ns->failed_times,
+					  global_nameserver_timeouts_length - 1)];
+	ns->failed_times++;
+
+	if (evtimer_add(&ns->timeout_event, (struct timeval *) timeout) < 0) {
+          log(EVDNS_LOG_WARN,
+              "Error from libevent when adding timer event for %s",
+              debug_ntoa(ns->address));
+          /* ???? Do more? */
+        }
+}
+
+/* called when a nameserver has been deemed to have failed. For example, too */
+/* many packets have timed out etc */
+static void
+nameserver_failed(struct nameserver *const ns, const char *msg) {
+	struct request *req, *started_at;
+	/* if this nameserver has already been marked as failed */
+	/* then don't do anything */
+	if (!ns->state) return;
+
+	log(EVDNS_LOG_WARN, "Nameserver %s has failed: %s",
+            debug_ntoa(ns->address), msg);
+	global_good_nameservers--;
+	assert(global_good_nameservers >= 0);
+	if (global_good_nameservers == 0) {
+		log(EVDNS_LOG_WARN, "All nameservers have failed");
+	}
+
+	ns->state = 0;
+	ns->failed_times = 1;
+
+	if (evtimer_add(&ns->timeout_event, (struct timeval *) &global_nameserver_timeouts[0]) < 0) {
+		log(EVDNS_LOG_WARN,
+		    "Error from libevent when adding timer event for %s",
+		    debug_ntoa(ns->address));
+		/* ???? Do more? */
+        }
+
+	/* walk the list of inflight requests to see if any can be reassigned to */
+	/* a different server. Requests in the waiting queue don't have a */
+	/* nameserver assigned yet */
+
+	/* if we don't have *any* good nameservers then there's no point */
+	/* trying to reassign requests to one */
+	if (!global_good_nameservers) return;
+
+	req = req_head;
+	started_at = req_head;
+	if (req) {
+		do {
+			if (req->tx_count == 0 && req->ns == ns) {
+				/* still waiting to go out, can be moved */
+				/* to another server */
+				req->ns = nameserver_pick();
+			}
+			req = req->next;
+		} while (req != started_at);
+	}
+}
+
+static void
+nameserver_up(struct nameserver *const ns) {
+	if (ns->state) return;
+	log(EVDNS_LOG_WARN, "Nameserver %s is back up",
+	    debug_ntoa(ns->address));
+	evtimer_del(&ns->timeout_event);
+	ns->state = 1;
+	ns->failed_times = 0;
+	ns->timedout = 0;
+	global_good_nameservers++;
+}
+
+static void
+request_trans_id_set(struct request *const req, const u16 trans_id) {
+	req->trans_id = trans_id;
+	*((u16 *) req->request) = htons(trans_id);
+}
+
+/* Called to remove a request from a list and dealloc it. */
+/* head is a pointer to the head of the list it should be */
+/* removed from or NULL if the request isn't in a list. */
+static void
+request_finished(struct request *const req, struct request **head) {
+	if (head) {
+		if (req->next == req) {
+			/* only item in the list */
+			*head = NULL;
+		} else {
+			req->next->prev = req->prev;
+			req->prev->next = req->next;
+			if (*head == req) *head = req->next;
+		}
+	}
+
+	log(EVDNS_LOG_DEBUG, "Removing timeout for request %lx",
+	    (unsigned long) req);
+	evtimer_del(&req->timeout_event);
+
+	search_request_finished(req);
+	global_requests_inflight--;
+
+	if (!req->request_appended) {
+		/* need to free the request data on it's own */
+		free(req->request);
+	} else {
+		/* the request data is appended onto the header */
+		/* so everything gets free()ed when we: */
+	}
+
+	free(req);
+
+	evdns_requests_pump_waiting_queue();
+}
+
+/* This is called when a server returns a funny error code. */
+/* We try the request again with another server. */
+/* */
+/* return: */
+/*   0 ok */
+/*   1 failed/reissue is pointless */
+static int
+request_reissue(struct request *req) {
+	const struct nameserver *const last_ns = req->ns;
+	/* the last nameserver should have been marked as failing */
+	/* by the caller of this function, therefore pick will try */
+	/* not to return it */
+	req->ns = nameserver_pick();
+	if (req->ns == last_ns) {
+		/* ... but pick did return it */
+		/* not a lot of point in trying again with the */
+		/* same server */
+		return 1;
+	}
+
+	req->reissue_count++;
+	req->tx_count = 0;
+	req->transmit_me = 1;
+
+	return 0;
+}
+
+/* this function looks for space on the inflight queue and promotes */
+/* requests from the waiting queue if it can. */
+static void
+evdns_requests_pump_waiting_queue(void) {
+	while (global_requests_inflight < global_max_requests_inflight &&
+	    global_requests_waiting) {
+		struct request *req;
+		/* move a request from the waiting queue to the inflight queue */
+		assert(req_waiting_head);
+		if (req_waiting_head->next == req_waiting_head) {
+			/* only one item in the queue */
+			req = req_waiting_head;
+			req_waiting_head = NULL;
+		} else {
+			req = req_waiting_head;
+			req->next->prev = req->prev;
+			req->prev->next = req->next;
+			req_waiting_head = req->next;
+		}
+
+		global_requests_waiting--;
+		global_requests_inflight++;
+
+		req->ns = nameserver_pick();
+		request_trans_id_set(req, transaction_id_pick());
+
+		evdns_request_insert(req, &req_head);
+		evdns_request_transmit(req);
+		evdns_transmit();
+	}
+}
+
+static void
+reply_callback(struct request *const req, u32 ttl, u32 err, struct reply *reply) {
+	switch (req->request_type) {
+	case TYPE_A:
+		if (reply)
+			req->user_callback(DNS_ERR_NONE, DNS_IPv4_A,
+							   reply->data.a.addrcount, ttl,
+						 reply->data.a.addresses,
+							   req->user_pointer);
+		else
+			req->user_callback(err, 0, 0, 0, NULL, req->user_pointer);
+		return;
+	case TYPE_PTR:
+		if (reply) {
+			char *name = reply->data.ptr.name;
+			req->user_callback(DNS_ERR_NONE, DNS_PTR, 1, ttl,
+							   &name, req->user_pointer);
+		} else {
+			req->user_callback(err, 0, 0, 0, NULL,
+							   req->user_pointer);
+		}
+		return;
+	case TYPE_AAAA:
+		if (reply)
+			req->user_callback(DNS_ERR_NONE, DNS_IPv6_AAAA,
+							   reply->data.aaaa.addrcount, ttl,
+							   reply->data.aaaa.addresses,
+							   req->user_pointer);
+		else
+			req->user_callback(err, 0, 0, 0, NULL, req->user_pointer);
+                return;
+	}
+	assert(0);
+}
+
+/* this processes a parsed reply packet */
+static void
+reply_handle(struct request *const req, u16 flags, u32 ttl, struct reply *reply) {
+	int error;
+	static const int error_codes[] = {
+		DNS_ERR_FORMAT, DNS_ERR_SERVERFAILED, DNS_ERR_NOTEXIST,
+		DNS_ERR_NOTIMPL, DNS_ERR_REFUSED
+	};
+
+	if (flags & 0x020f || !reply || !reply->have_answer) {
+		/* there was an error */
+		if (flags & 0x0200) {
+			error = DNS_ERR_TRUNCATED;
+		} else {
+			u16 error_code = (flags & 0x000f) - 1;
+			if (error_code > 4) {
+				error = DNS_ERR_UNKNOWN;
+			} else {
+				error = error_codes[error_code];
+			}
+		}
+
+		switch(error) {
+		case DNS_ERR_NOTIMPL:
+		case DNS_ERR_REFUSED:
+			/* we regard these errors as marking a bad nameserver */
+			if (req->reissue_count < global_max_reissues) {
+				char msg[64];
+				evutil_snprintf(msg, sizeof(msg),
+				    "Bad response %d (%s)",
+					 error, evdns_err_to_string(error));
+				nameserver_failed(req->ns, msg);
+				if (!request_reissue(req)) return;
+			}
+			break;
+		case DNS_ERR_SERVERFAILED:
+			/* rcode 2 (servfailed) sometimes means "we
+			 * are broken" and sometimes (with some binds)
+			 * means "that request was very confusing."
+			 * Treat this as a timeout, not a failure.
+			 */
+			log(EVDNS_LOG_DEBUG, "Got a SERVERFAILED from nameserver %s; "
+				"will allow the request to time out.",
+				debug_ntoa(req->ns->address));
+			break;
+		default:
+			/* we got a good reply from the nameserver */
+			nameserver_up(req->ns);
+		}
+
+		if (req->search_state && req->request_type != TYPE_PTR) {
+			/* if we have a list of domains to search in,
+			 * try the next one */
+			if (!search_try_next(req)) {
+				/* a new request was issued so this
+				 * request is finished and */
+				/* the user callback will be made when
+				 * that request (or a */
+				/* child of it) finishes. */
+				request_finished(req, &req_head);
+				return;
+			}
+		}
+
+		/* all else failed. Pass the failure up */
+		reply_callback(req, 0, error, NULL);
+		request_finished(req, &req_head);
+	} else {
+		/* all ok, tell the user */
+		reply_callback(req, ttl, 0, reply);
+		nameserver_up(req->ns);
+		request_finished(req, &req_head);
+	}
+}
+
+static int
+name_parse(u8 *packet, int length, int *idx, char *name_out, int name_out_len) {
+	int name_end = -1;
+	int j = *idx;
+	int ptr_count = 0;
+#define GET32(x) do { if (j + 4 > length) goto err; memcpy(&_t32, packet + j, 4); j += 4; x = ntohl(_t32); } while(0)
+#define GET16(x) do { if (j + 2 > length) goto err; memcpy(&_t, packet + j, 2); j += 2; x = ntohs(_t); } while(0)
+#define GET8(x) do { if (j >= length) goto err; x = packet[j++]; } while(0)
+
+	char *cp = name_out;
+	const char *const end = name_out + name_out_len;
+
+	/* Normally, names are a series of length prefixed strings terminated */
+	/* with a length of 0 (the lengths are u8's < 63). */
+	/* However, the length can start with a pair of 1 bits and that */
+	/* means that the next 14 bits are a pointer within the current */
+	/* packet. */
+
+	for(;;) {
+		u8 label_len;
+		if (j >= length) return -1;
+		GET8(label_len);
+		if (!label_len) break;
+		if (label_len & 0xc0) {
+			u8 ptr_low;
+			GET8(ptr_low);
+			if (name_end < 0) name_end = j;
+			j = (((int)label_len & 0x3f) << 8) + ptr_low;
+			/* Make sure that the target offset is in-bounds. */
+			if (j < 0 || j >= length) return -1;
+			/* If we've jumped more times than there are characters in the
+			 * message, we must have a loop. */
+			if (++ptr_count > length) return -1;
+			continue;
+		}
+		if (label_len > 63) return -1;
+		if (cp != name_out) {
+			if (cp + 1 >= end) return -1;
+			*cp++ = '.';
+		}
+		if (cp + label_len >= end) return -1;
+		memcpy(cp, packet + j, label_len);
+		cp += label_len;
+		j += label_len;
+	}
+	if (cp >= end) return -1;
+	*cp = '\0';
+	if (name_end < 0)
+		*idx = j;
+	else
+		*idx = name_end;
+	return 0;
+ err:
+	return -1;
+}
+
+/* parses a raw request from a nameserver */
+static int
+reply_parse(u8 *packet, int length) {
+	int j = 0, k = 0;  /* index into packet */
+	u16 _t;  /* used by the macros */
+	u32 _t32;  /* used by the macros */
+	char tmp_name[256], cmp_name[256]; /* used by the macros */
+
+	u16 trans_id, questions, answers, authority, additional, datalength;
+        u16 flags = 0;
+	u32 ttl, ttl_r = 0xffffffff;
+	struct reply reply;
+	struct request *req = NULL;
+	unsigned int i;
+
+	GET16(trans_id);
+	GET16(flags);
+	GET16(questions);
+	GET16(answers);
+	GET16(authority);
+	GET16(additional);
+	(void) authority; /* suppress "unused variable" warnings. */
+	(void) additional; /* suppress "unused variable" warnings. */
+
+	req = request_find_from_trans_id(trans_id);
+	if (!req) return -1;
+
+	memset(&reply, 0, sizeof(reply));
+
+	/* If it's not an answer, it doesn't correspond to any request. */
+	if (!(flags & 0x8000)) return -1;  /* must be an answer */
+	if (flags & 0x020f) {
+		/* there was an error */
+		goto err;
+	}
+	/* if (!answers) return; */  /* must have an answer of some form */
+
+	/* This macro skips a name in the DNS reply. */
+#define SKIP_NAME \
+	do { tmp_name[0] = '\0';				\
+		if (name_parse(packet, length, &j, tmp_name, sizeof(tmp_name))<0)\
+			goto err;				\
+	} while(0)
+#define TEST_NAME \
+	do { tmp_name[0] = '\0';				\
+		cmp_name[0] = '\0';				\
+		k = j;						\
+		if (name_parse(packet, length, &j, tmp_name, sizeof(tmp_name))<0)\
+			goto err;					\
+		if (name_parse(req->request, req->request_len, &k, cmp_name, sizeof(cmp_name))<0)	\
+			goto err;				\
+		if (memcmp(tmp_name, cmp_name, strlen (tmp_name)) != 0)	\
+			return (-1); /* we ignore mismatching names */	\
+	} while(0)
+
+	reply.type = req->request_type;
+
+	/* skip over each question in the reply */
+	for (i = 0; i < questions; ++i) {
+		/* the question looks like
+		 *   <label:name><u16:type><u16:class>
+		 */
+		TEST_NAME;
+		j += 4;
+		if (j > length) goto err;
+	}
+
+	/* now we have the answer section which looks like
+	 * <label:name><u16:type><u16:class><u32:ttl><u16:len><data...>
+	 */
+
+	for (i = 0; i < answers; ++i) {
+		u16 type, class;
+
+		SKIP_NAME;
+		GET16(type);
+		GET16(class);
+		GET32(ttl);
+		GET16(datalength);
+
+		if (type == TYPE_A && class == CLASS_INET) {
+			int addrcount, addrtocopy;
+			if (req->request_type != TYPE_A) {
+				j += datalength; continue;
+			}
+			if ((datalength & 3) != 0) /* not an even number of As. */
+			    goto err;
+			addrcount = datalength >> 2;
+			addrtocopy = MIN(MAX_ADDRS - reply.data.a.addrcount, (unsigned)addrcount);
+
+			ttl_r = MIN(ttl_r, ttl);
+			/* we only bother with the first four addresses. */
+			if (j + 4*addrtocopy > length) goto err;
+			memcpy(&reply.data.a.addresses[reply.data.a.addrcount],
+				   packet + j, 4*addrtocopy);
+			j += 4*addrtocopy;
+			reply.data.a.addrcount += addrtocopy;
+			reply.have_answer = 1;
+			if (reply.data.a.addrcount == MAX_ADDRS) break;
+		} else if (type == TYPE_PTR && class == CLASS_INET) {
+			if (req->request_type != TYPE_PTR) {
+				j += datalength; continue;
+			}
+			if (name_parse(packet, length, &j, reply.data.ptr.name,
+						   sizeof(reply.data.ptr.name))<0)
+				goto err;
+			ttl_r = MIN(ttl_r, ttl);
+			reply.have_answer = 1;
+			break;
+		} else if (type == TYPE_AAAA && class == CLASS_INET) {
+			int addrcount, addrtocopy;
+			if (req->request_type != TYPE_AAAA) {
+				j += datalength; continue;
+			}
+			if ((datalength & 15) != 0) /* not an even number of AAAAs. */
+				goto err;
+			addrcount = datalength >> 4;  /* each address is 16 bytes long */
+			addrtocopy = MIN(MAX_ADDRS - reply.data.aaaa.addrcount, (unsigned)addrcount);
+			ttl_r = MIN(ttl_r, ttl);
+
+			/* we only bother with the first four addresses. */
+			if (j + 16*addrtocopy > length) goto err;
+			memcpy(&reply.data.aaaa.addresses[reply.data.aaaa.addrcount],
+				   packet + j, 16*addrtocopy);
+			reply.data.aaaa.addrcount += addrtocopy;
+			j += 16*addrtocopy;
+			reply.have_answer = 1;
+			if (reply.data.aaaa.addrcount == MAX_ADDRS) break;
+		} else {
+			/* skip over any other type of resource */
+			j += datalength;
+		}
+	}
+
+	reply_handle(req, flags, ttl_r, &reply);
+	return 0;
+ err:
+	if (req)
+		reply_handle(req, flags, 0, NULL);
+	return -1;
+}
+
+/* Parse a raw request (packet,length) sent to a nameserver port (port) from */
+/* a DNS client (addr,addrlen), and if it's well-formed, call the corresponding */
+/* callback. */
+static int
+request_parse(u8 *packet, int length, struct evdns_server_port *port, struct sockaddr *addr, socklen_t addrlen)
+{
+	int j = 0;	/* index into packet */
+	u16 _t;	 /* used by the macros */
+	char tmp_name[256]; /* used by the macros */
+
+	int i;
+	u16 trans_id, flags, questions, answers, authority, additional;
+	struct server_request *server_req = NULL;
+
+	/* Get the header fields */
+	GET16(trans_id);
+	GET16(flags);
+	GET16(questions);
+	GET16(answers);
+	GET16(authority);
+	GET16(additional);
+
+	if (flags & 0x8000) return -1; /* Must not be an answer. */
+	flags &= 0x0110; /* Only RD and CD get preserved. */
+
+	server_req = malloc(sizeof(struct server_request));
+	if (server_req == NULL) return -1;
+	memset(server_req, 0, sizeof(struct server_request));
+
+	server_req->trans_id = trans_id;
+	memcpy(&server_req->addr, addr, addrlen);
+	server_req->addrlen = addrlen;
+
+	server_req->base.flags = flags;
+	server_req->base.nquestions = 0;
+	server_req->base.questions = malloc(sizeof(struct evdns_server_question *) * questions);
+	if (server_req->base.questions == NULL)
+		goto err;
+
+	for (i = 0; i < questions; ++i) {
+		u16 type, class;
+		struct evdns_server_question *q;
+		int namelen;
+		if (name_parse(packet, length, &j, tmp_name, sizeof(tmp_name))<0)
+			goto err;
+		GET16(type);
+		GET16(class);
+		namelen = strlen(tmp_name);
+		q = malloc(sizeof(struct evdns_server_question) + namelen);
+		if (!q)
+			goto err;
+		q->type = type;
+		q->dns_question_class = class;
+		memcpy(q->name, tmp_name, namelen+1);
+		server_req->base.questions[server_req->base.nquestions++] = q;
+	}
+
+	/* Ignore answers, authority, and additional. */
+
+	server_req->port = port;
+	port->refcnt++;
+
+	/* Only standard queries are supported. */
+	if (flags & 0x7800) {
+		evdns_server_request_respond(&(server_req->base), DNS_ERR_NOTIMPL);
+		return -1;
+	}
+
+	port->user_callback(&(server_req->base), port->user_data);
+
+	return 0;
+err:
+	if (server_req) {
+		if (server_req->base.questions) {
+			for (i = 0; i < server_req->base.nquestions; ++i)
+				free(server_req->base.questions[i]);
+			free(server_req->base.questions);
+		}
+		free(server_req);
+	}
+	return -1;
+
+#undef SKIP_NAME
+#undef GET32
+#undef GET16
+#undef GET8
+}
+
+static u16
+default_transaction_id_fn(void)
+{
+	u16 trans_id;
+#ifdef DNS_USE_CPU_CLOCK_FOR_ID
+	struct timespec ts;
+	static int clkid = -1;
+	if (clkid == -1) {
+		clkid = CLOCK_REALTIME;
+#ifdef CLOCK_MONOTONIC
+		if (clock_gettime(CLOCK_MONOTONIC, &ts) != -1)
+			clkid = CLOCK_MONOTONIC;
+#endif
+	}
+	if (clock_gettime(clkid, &ts) == -1)
+		event_err(1, "clock_gettime");
+	trans_id = ts.tv_nsec & 0xffff;
+#endif
+
+#ifdef DNS_USE_FTIME_FOR_ID
+	struct _timeb tb;
+	_ftime(&tb);
+	trans_id = tb.millitm & 0xffff;
+#endif
+
+#ifdef DNS_USE_GETTIMEOFDAY_FOR_ID
+	struct timeval tv;
+	evutil_gettimeofday(&tv, NULL);
+	trans_id = tv.tv_usec & 0xffff;
+#endif
+
+#ifdef DNS_USE_OPENSSL_FOR_ID
+	if (RAND_pseudo_bytes((u8 *) &trans_id, 2) == -1) {
+		/* in the case that the RAND call fails we back */
+		/* down to using gettimeofday. */
+		/*
+		  struct timeval tv;
+		  evutil_gettimeofday(&tv, NULL);
+		  trans_id = tv.tv_usec & 0xffff;
+		*/
+		abort();
+	}
+#endif
+	return trans_id;
+}
+
+static ev_uint16_t (*trans_id_function)(void) = default_transaction_id_fn;
+
+void
+evdns_set_transaction_id_fn(ev_uint16_t (*fn)(void))
+{
+	if (fn)
+		trans_id_function = fn;
+	else
+		trans_id_function = default_transaction_id_fn;
+}
+
+/* Try to choose a strong transaction id which isn't already in flight */
+static u16
+transaction_id_pick(void) {
+	for (;;) {
+		u16 trans_id = trans_id_function();
+
+		if (trans_id == 0xffff) continue;
+
+		if (request_find_from_trans_id(trans_id) == NULL)
+			return trans_id;
+	}
+}
+
+/* choose a namesever to use. This function will try to ignore */
+/* nameservers which we think are down and load balance across the rest */
+/* by updating the server_head global each time. */
+static struct nameserver *
+nameserver_pick(void) {
+	struct nameserver *started_at = server_head, *picked;
+	if (!server_head) return NULL;
+
+	/* if we don't have any good nameservers then there's no */
+	/* point in trying to find one. */
+	if (!global_good_nameservers) {
+		server_head = server_head->next;
+		return server_head;
+	}
+
+	/* remember that nameservers are in a circular list */
+	for (;;) {
+		if (server_head->state) {
+			/* we think this server is currently good */
+			picked = server_head;
+			server_head = server_head->next;
+			return picked;
+		}
+
+		server_head = server_head->next;
+		if (server_head == started_at) {
+			/* all the nameservers seem to be down */
+			/* so we just return this one and hope for the */
+			/* best */
+			assert(global_good_nameservers == 0);
+			picked = server_head;
+			server_head = server_head->next;
+			return picked;
+		}
+	}
+}
+
+static int
+address_is_correct(struct nameserver *ns, struct sockaddr *sa, socklen_t slen)
+{
+	struct sockaddr_in *sin = (struct sockaddr_in*) sa;
+	if (sa->sa_family != AF_INET || slen != sizeof(struct sockaddr_in))
+		return 0;
+	if (sin->sin_addr.s_addr != ns->address)
+		return 0;
+	return 1;
+}
+
+/* this is called when a namesever socket is ready for reading */
+static void
+nameserver_read(struct nameserver *ns) {
+	u8 packet[1500];
+	struct sockaddr_storage ss;
+	socklen_t addrlen = sizeof(ss);
+
+	for (;;) {
+          	const int r = recvfrom(ns->socket, packet, sizeof(packet), 0,
+		    (struct sockaddr*)&ss, &addrlen);
+		if (r < 0) {
+			int err = last_error(ns->socket);
+			if (error_is_eagain(err)) return;
+			nameserver_failed(ns, strerror(err));
+			return;
+		}
+		if (!address_is_correct(ns, (struct sockaddr*)&ss, addrlen)) {
+			log(EVDNS_LOG_WARN, "Address mismatch on received "
+			    "DNS packet.");
+			return;
+		}
+		ns->timedout = 0;
+		reply_parse(packet, r);
+	}
+}
+
+/* Read a packet from a DNS client on a server port s, parse it, and */
+/* act accordingly. */
+static void
+server_port_read(struct evdns_server_port *s) {
+	u8 packet[1500];
+	struct sockaddr_storage addr;
+	socklen_t addrlen;
+	int r;
+
+	for (;;) {
+		addrlen = sizeof(struct sockaddr_storage);
+		r = recvfrom(s->socket, packet, sizeof(packet), 0,
+					 (struct sockaddr*) &addr, &addrlen);
+		if (r < 0) {
+			int err = last_error(s->socket);
+			if (error_is_eagain(err)) return;
+			log(EVDNS_LOG_WARN, "Error %s (%d) while reading request.",
+				strerror(err), err);
+			return;
+		}
+		request_parse(packet, r, s, (struct sockaddr*) &addr, addrlen);
+	}
+}
+
+/* Try to write all pending replies on a given DNS server port. */
+static void
+server_port_flush(struct evdns_server_port *port)
+{
+	while (port->pending_replies) {
+		struct server_request *req = port->pending_replies;
+		int r = sendto(port->socket, req->response, req->response_len, 0,
+			   (struct sockaddr*) &req->addr, req->addrlen);
+		if (r < 0) {
+			int err = last_error(port->socket);
+			if (error_is_eagain(err))
+				return;
+			log(EVDNS_LOG_WARN, "Error %s (%d) while writing response to port; dropping", strerror(err), err);
+		}
+		if (server_request_free(req)) {
+			/* we released the last reference to req->port. */
+			return;
+		}
+	}
+
+	/* We have no more pending requests; stop listening for 'writeable' events. */
+	(void) event_del(&port->event);
+	event_set(&port->event, port->socket, EV_READ | EV_PERSIST,
+			  server_port_ready_callback, port);
+	if (event_add(&port->event, NULL) < 0) {
+		log(EVDNS_LOG_WARN, "Error from libevent when adding event for DNS server.");
+		/* ???? Do more? */
+	}
+}
+
+/* set if we are waiting for the ability to write to this server. */
+/* if waiting is true then we ask libevent for EV_WRITE events, otherwise */
+/* we stop these events. */
+static void
+nameserver_write_waiting(struct nameserver *ns, char waiting) {
+	if (ns->write_waiting == waiting) return;
+
+	ns->write_waiting = waiting;
+	(void) event_del(&ns->event);
+	event_set(&ns->event, ns->socket, EV_READ | (waiting ? EV_WRITE : 0) | EV_PERSIST,
+			nameserver_ready_callback, ns);
+	if (event_add(&ns->event, NULL) < 0) {
+          log(EVDNS_LOG_WARN, "Error from libevent when adding event for %s",
+              debug_ntoa(ns->address));
+          /* ???? Do more? */
+        }
+}
+
+/* a callback function. Called by libevent when the kernel says that */
+/* a nameserver socket is ready for writing or reading */
+static void
+nameserver_ready_callback(int fd, short events, void *arg) {
+	struct nameserver *ns = (struct nameserver *) arg;
+        (void)fd;
+
+	if (events & EV_WRITE) {
+		ns->choked = 0;
+		if (!evdns_transmit()) {
+			nameserver_write_waiting(ns, 0);
+		}
+	}
+	if (events & EV_READ) {
+		nameserver_read(ns);
+	}
+}
+
+/* a callback function. Called by libevent when the kernel says that */
+/* a server socket is ready for writing or reading. */
+static void
+server_port_ready_callback(int fd, short events, void *arg) {
+	struct evdns_server_port *port = (struct evdns_server_port *) arg;
+	(void) fd;
+
+	if (events & EV_WRITE) {
+		port->choked = 0;
+		server_port_flush(port);
+	}
+	if (events & EV_READ) {
+		server_port_read(port);
+	}
+}
+
+/* This is an inefficient representation; only use it via the dnslabel_table_*
+ * functions, so that is can be safely replaced with something smarter later. */
+#define MAX_LABELS 128
+/* Structures used to implement name compression */
+struct dnslabel_entry { char *v; off_t pos; };
+struct dnslabel_table {
+	int n_labels; /* number of current entries */
+	/* map from name to position in message */
+	struct dnslabel_entry labels[MAX_LABELS];
+};
+
+/* Initialize dnslabel_table. */
+static void
+dnslabel_table_init(struct dnslabel_table *table)
+{
+	table->n_labels = 0;
+}
+
+/* Free all storage held by table, but not the table itself. */
+static void
+dnslabel_clear(struct dnslabel_table *table)
+{
+	int i;
+	for (i = 0; i < table->n_labels; ++i)
+		free(table->labels[i].v);
+	table->n_labels = 0;
+}
+
+/* return the position of the label in the current message, or -1 if the label */
+/* hasn't been used yet. */
+static int
+dnslabel_table_get_pos(const struct dnslabel_table *table, const char *label)
+{
+	int i;
+	for (i = 0; i < table->n_labels; ++i) {
+		if (!strcmp(label, table->labels[i].v))
+			return table->labels[i].pos;
+	}
+	return -1;
+}
+
+/* remember that we've used the label at position pos */
+static int
+dnslabel_table_add(struct dnslabel_table *table, const char *label, off_t pos)
+{
+	char *v;
+	int p;
+	if (table->n_labels == MAX_LABELS)
+		return (-1);
+	v = strdup(label);
+	if (v == NULL)
+		return (-1);
+	p = table->n_labels++;
+	table->labels[p].v = v;
+	table->labels[p].pos = pos;
+
+	return (0);
+}
+
+/* Converts a string to a length-prefixed set of DNS labels, starting */
+/* at buf[j]. name and buf must not overlap. name_len should be the length */
+/* of name.	 table is optional, and is used for compression. */
+/* */
+/* Input: abc.def */
+/* Output: <3>abc<3>def<0> */
+/* */
+/* Returns the first index after the encoded name, or negative on error. */
+/*	 -1	 label was > 63 bytes */
+/*	 -2	 name too long to fit in buffer. */
+/* */
+static off_t
+dnsname_to_labels(u8 *const buf, size_t buf_len, off_t j,
+				  const char *name, const int name_len,
+				  struct dnslabel_table *table) {
+	const char *end = name + name_len;
+	int ref = 0;
+	u16 _t;
+
+#define APPEND16(x) do {						   \
+		if (j + 2 > (off_t)buf_len)				   \
+			goto overflow;						   \
+		_t = htons(x);							   \
+		memcpy(buf + j, &_t, 2);				   \
+		j += 2;									   \
+	} while (0)
+#define APPEND32(x) do {						   \
+		if (j + 4 > (off_t)buf_len)				   \
+			goto overflow;						   \
+		_t32 = htonl(x);						   \
+		memcpy(buf + j, &_t32, 4);				   \
+		j += 4;									   \
+	} while (0)
+
+	if (name_len > 255) return -2;
+
+	for (;;) {
+		const char *const start = name;
+		if (table && (ref = dnslabel_table_get_pos(table, name)) >= 0) {
+			APPEND16(ref | 0xc000);
+			return j;
+		}
+		name = strchr(name, '.');
+		if (!name) {
+			const unsigned int label_len = end - start;
+			if (label_len > 63) return -1;
+			if ((size_t)(j+label_len+1) > buf_len) return -2;
+			if (table) dnslabel_table_add(table, start, j);
+			buf[j++] = label_len;
+
+			memcpy(buf + j, start, end - start);
+			j += end - start;
+			break;
+		} else {
+			/* append length of the label. */
+			const unsigned int label_len = name - start;
+			if (label_len > 63) return -1;
+			if ((size_t)(j+label_len+1) > buf_len) return -2;
+			if (table) dnslabel_table_add(table, start, j);
+			buf[j++] = label_len;
+
+			memcpy(buf + j, start, name - start);
+			j += name - start;
+			/* hop over the '.' */
+			name++;
+		}
+	}
+
+	/* the labels must be terminated by a 0. */
+	/* It's possible that the name ended in a . */
+	/* in which case the zero is already there */
+	if (!j || buf[j-1]) buf[j++] = 0;
+	return j;
+ overflow:
+	return (-2);
+}
+
+/* Finds the length of a dns request for a DNS name of the given */
+/* length. The actual request may be smaller than the value returned */
+/* here */
+static int
+evdns_request_len(const int name_len) {
+	return 96 + /* length of the DNS standard header */
+		name_len + 2 +
+		4;  /* space for the resource type */
+}
+
+/* build a dns request packet into buf. buf should be at least as long */
+/* as evdns_request_len told you it should be. */
+/* */
+/* Returns the amount of space used. Negative on error. */
+static int
+evdns_request_data_build(const char *const name, const int name_len,
+    const u16 trans_id, const u16 type, const u16 class,
+    u8 *const buf, size_t buf_len) {
+	off_t j = 0;  /* current offset into buf */
+	u16 _t;  /* used by the macros */
+
+	APPEND16(trans_id);
+	APPEND16(0x0100);  /* standard query, recusion needed */
+	APPEND16(1);  /* one question */
+	APPEND16(0);  /* no answers */
+	APPEND16(0);  /* no authority */
+	APPEND16(0);  /* no additional */
+
+	j = dnsname_to_labels(buf, buf_len, j, name, name_len, NULL);
+	if (j < 0) {
+		return (int)j;
+	}
+	
+	APPEND16(type);
+	APPEND16(class);
+
+	return (int)j;
+ overflow:
+	return (-1);
+}
+
+/* exported function */
+struct evdns_server_port *
+evdns_add_server_port(int socket, int is_tcp, evdns_request_callback_fn_type cb, void *user_data)
+{
+	struct evdns_server_port *port;
+	if (!(port = malloc(sizeof(struct evdns_server_port))))
+		return NULL;
+	memset(port, 0, sizeof(struct evdns_server_port));
+
+	assert(!is_tcp); /* TCP sockets not yet implemented */
+	port->socket = socket;
+	port->refcnt = 1;
+	port->choked = 0;
+	port->closing = 0;
+	port->user_callback = cb;
+	port->user_data = user_data;
+	port->pending_replies = NULL;
+
+	event_set(&port->event, port->socket, EV_READ | EV_PERSIST,
+			  server_port_ready_callback, port);
+	event_add(&port->event, NULL); /* check return. */
+	return port;
+}
+
+/* exported function */
+void
+evdns_close_server_port(struct evdns_server_port *port)
+{
+	if (--port->refcnt == 0)
+		server_port_free(port);
+	port->closing = 1;
+}
+
+/* exported function */
+int
+evdns_server_request_add_reply(struct evdns_server_request *_req, int section, const char *name, int type, int class, int ttl, int datalen, int is_name, const char *data)
+{
+	struct server_request *req = TO_SERVER_REQUEST(_req);
+	struct server_reply_item **itemp, *item;
+	int *countp;
+
+	if (req->response) /* have we already answered? */
+		return (-1);
+
+	switch (section) {
+	case EVDNS_ANSWER_SECTION:
+		itemp = &req->answer;
+		countp = &req->n_answer;
+		break;
+	case EVDNS_AUTHORITY_SECTION:
+		itemp = &req->authority;
+		countp = &req->n_authority;
+		break;
+	case EVDNS_ADDITIONAL_SECTION:
+		itemp = &req->additional;
+		countp = &req->n_additional;
+		break;
+	default:
+		return (-1);
+	}
+	while (*itemp) {
+		itemp = &((*itemp)->next);
+	}
+	item = malloc(sizeof(struct server_reply_item));
+	if (!item)
+		return -1;
+	item->next = NULL;
+	if (!(item->name = strdup(name))) {
+		free(item);
+		return -1;
+	}
+	item->type = type;
+	item->dns_question_class = class;
+	item->ttl = ttl;
+	item->is_name = is_name != 0;
+	item->datalen = 0;
+	item->data = NULL;
+	if (data) {
+		if (item->is_name) {
+			if (!(item->data = strdup(data))) {
+				free(item->name);
+				free(item);
+				return -1;
+			}
+			item->datalen = (u16)-1;
+		} else {
+			if (!(item->data = malloc(datalen))) {
+				free(item->name);
+				free(item);
+				return -1;
+			}
+			item->datalen = datalen;
+			memcpy(item->data, data, datalen);
+		}
+	}
+
+	*itemp = item;
+	++(*countp);
+	return 0;
+}
+
+/* exported function */
+int
+evdns_server_request_add_a_reply(struct evdns_server_request *req, const char *name, int n, void *addrs, int ttl)
+{
+	return evdns_server_request_add_reply(
+		  req, EVDNS_ANSWER_SECTION, name, TYPE_A, CLASS_INET,
+		  ttl, n*4, 0, addrs);
+}
+
+/* exported function */
+int
+evdns_server_request_add_aaaa_reply(struct evdns_server_request *req, const char *name, int n, void *addrs, int ttl)
+{
+	return evdns_server_request_add_reply(
+		  req, EVDNS_ANSWER_SECTION, name, TYPE_AAAA, CLASS_INET,
+		  ttl, n*16, 0, addrs);
+}
+
+/* exported function */
+int
+evdns_server_request_add_ptr_reply(struct evdns_server_request *req, struct in_addr *in, const char *inaddr_name, const char *hostname, int ttl)
+{
+	u32 a;
+	char buf[32];
+	assert(in || inaddr_name);
+	assert(!(in && inaddr_name));
+	if (in) {
+		a = ntohl(in->s_addr);
+		evutil_snprintf(buf, sizeof(buf), "%d.%d.%d.%d.in-addr.arpa",
+				(int)(u8)((a	)&0xff),
+				(int)(u8)((a>>8 )&0xff),
+				(int)(u8)((a>>16)&0xff),
+				(int)(u8)((a>>24)&0xff));
+		inaddr_name = buf;
+	}
+	return evdns_server_request_add_reply(
+		  req, EVDNS_ANSWER_SECTION, inaddr_name, TYPE_PTR, CLASS_INET,
+		  ttl, -1, 1, hostname);
+}
+
+/* exported function */
+int
+evdns_server_request_add_cname_reply(struct evdns_server_request *req, const char *name, const char *cname, int ttl)
+{
+	return evdns_server_request_add_reply(
+		  req, EVDNS_ANSWER_SECTION, name, TYPE_CNAME, CLASS_INET,
+		  ttl, -1, 1, cname);
+}
+
+
+static int
+evdns_server_request_format_response(struct server_request *req, int err)
+{
+	unsigned char buf[1500];
+	size_t buf_len = sizeof(buf);
+	off_t j = 0, r;
+	u16 _t;
+	u32 _t32;
+	int i;
+	u16 flags;
+	struct dnslabel_table table;
+
+	if (err < 0 || err > 15) return -1;
+
+	/* Set response bit and error code; copy OPCODE and RD fields from
+	 * question; copy RA and AA if set by caller. */
+	flags = req->base.flags;
+	flags |= (0x8000 | err);
+
+	dnslabel_table_init(&table);
+	APPEND16(req->trans_id);
+	APPEND16(flags);
+	APPEND16(req->base.nquestions);
+	APPEND16(req->n_answer);
+	APPEND16(req->n_authority);
+	APPEND16(req->n_additional);
+
+	/* Add questions. */
+	for (i=0; i < req->base.nquestions; ++i) {
+		const char *s = req->base.questions[i]->name;
+		j = dnsname_to_labels(buf, buf_len, j, s, strlen(s), &table);
+		if (j < 0) {
+			dnslabel_clear(&table);
+			return (int) j;
+		}
+		APPEND16(req->base.questions[i]->type);
+		APPEND16(req->base.questions[i]->dns_question_class);
+	}
+
+	/* Add answer, authority, and additional sections. */
+	for (i=0; i<3; ++i) {
+		struct server_reply_item *item;
+		if (i==0)
+			item = req->answer;
+		else if (i==1)
+			item = req->authority;
+		else
+			item = req->additional;
+		while (item) {
+			r = dnsname_to_labels(buf, buf_len, j, item->name, strlen(item->name), &table);
+			if (r < 0)
+				goto overflow;
+			j = r;
+
+			APPEND16(item->type);
+			APPEND16(item->dns_question_class);
+			APPEND32(item->ttl);
+			if (item->is_name) {
+				off_t len_idx = j, name_start;
+				j += 2;
+				name_start = j;
+				r = dnsname_to_labels(buf, buf_len, j, item->data, strlen(item->data), &table);
+				if (r < 0)
+					goto overflow;
+				j = r;
+				_t = htons( (short) (j-name_start) );
+				memcpy(buf+len_idx, &_t, 2);
+			} else {
+				APPEND16(item->datalen);
+				if (j+item->datalen > (off_t)buf_len)
+					goto overflow;
+				memcpy(buf+j, item->data, item->datalen);
+				j += item->datalen;
+			}
+			item = item->next;
+		}
+	}
+
+	if (j > 512) {
+overflow:
+		j = 512;
+		buf[2] |= 0x02; /* set the truncated bit. */
+	}
+
+	req->response_len = j;
+
+	if (!(req->response = malloc(req->response_len))) {
+		server_request_free_answers(req);
+		dnslabel_clear(&table);
+		return (-1);
+	}
+	memcpy(req->response, buf, req->response_len);
+	server_request_free_answers(req);
+	dnslabel_clear(&table);
+	return (0);
+}
+
+/* exported function */
+int
+evdns_server_request_respond(struct evdns_server_request *_req, int err)
+{
+	struct server_request *req = TO_SERVER_REQUEST(_req);
+	struct evdns_server_port *port = req->port;
+	int r;
+	if (!req->response) {
+		if ((r = evdns_server_request_format_response(req, err))<0)
+			return r;
+	}
+
+	r = sendto(port->socket, req->response, req->response_len, 0,
+			   (struct sockaddr*) &req->addr, req->addrlen);
+	if (r<0) {
+		int sock_err = last_error(port->socket);
+		if (! error_is_eagain(sock_err))
+			return -1;
+
+		if (port->pending_replies) {
+			req->prev_pending = port->pending_replies->prev_pending;
+			req->next_pending = port->pending_replies;
+			req->prev_pending->next_pending =
+				req->next_pending->prev_pending = req;
+		} else {
+			req->prev_pending = req->next_pending = req;
+			port->pending_replies = req;
+			port->choked = 1;
+
+			(void) event_del(&port->event);
+			event_set(&port->event, port->socket, (port->closing?0:EV_READ) | EV_WRITE | EV_PERSIST, server_port_ready_callback, port);
+
+			if (event_add(&port->event, NULL) < 0) {
+				log(EVDNS_LOG_WARN, "Error from libevent when adding event for DNS server");
+			}
+
+		}
+
+		return 1;
+	}
+	if (server_request_free(req))
+		return 0;
+
+	if (port->pending_replies)
+		server_port_flush(port);
+
+	return 0;
+}
+
+/* Free all storage held by RRs in req. */
+static void
+server_request_free_answers(struct server_request *req)
+{
+	struct server_reply_item *victim, *next, **list;
+	int i;
+	for (i = 0; i < 3; ++i) {
+		if (i==0)
+			list = &req->answer;
+		else if (i==1)
+			list = &req->authority;
+		else
+			list = &req->additional;
+
+		victim = *list;
+		while (victim) {
+			next = victim->next;
+			free(victim->name);
+			if (victim->data)
+				free(victim->data);
+			free(victim);
+			victim = next;
+		}
+		*list = NULL;
+	}
+}
+
+/* Free all storage held by req, and remove links to it. */
+/* return true iff we just wound up freeing the server_port. */
+static int
+server_request_free(struct server_request *req)
+{
+	int i, rc=1;
+	if (req->base.questions) {
+		for (i = 0; i < req->base.nquestions; ++i)
+			free(req->base.questions[i]);
+		free(req->base.questions);
+	}
+
+	if (req->port) {
+		if (req->port->pending_replies == req) {
+			if (req->next_pending)
+				req->port->pending_replies = req->next_pending;
+			else
+				req->port->pending_replies = NULL;
+		}
+		rc = --req->port->refcnt;
+	}
+
+	if (req->response) {
+		free(req->response);
+	}
+
+	server_request_free_answers(req);
+
+	if (req->next_pending && req->next_pending != req) {
+		req->next_pending->prev_pending = req->prev_pending;
+		req->prev_pending->next_pending = req->next_pending;
+	}
+
+	if (rc == 0) {
+		server_port_free(req->port);
+		free(req);
+		return (1);
+	}
+	free(req);
+	return (0);
+}
+
+/* Free all storage held by an evdns_server_port.  Only called when  */
+static void
+server_port_free(struct evdns_server_port *port)
+{
+	assert(port);
+	assert(!port->refcnt);
+	assert(!port->pending_replies);
+	if (port->socket > 0) {
+		CLOSE_SOCKET(port->socket);
+		port->socket = -1;
+	}
+	(void) event_del(&port->event);
+	/* XXXX actually free the port? -NM */
+}
+
+/* exported function */
+int
+evdns_server_request_drop(struct evdns_server_request *_req)
+{
+	struct server_request *req = TO_SERVER_REQUEST(_req);
+	server_request_free(req);
+	return 0;
+}
+
+/* exported function */
+int
+evdns_server_request_get_requesting_addr(struct evdns_server_request *_req, struct sockaddr *sa, int addr_len)
+{
+	struct server_request *req = TO_SERVER_REQUEST(_req);
+	if (addr_len < (int)req->addrlen)
+		return -1;
+	memcpy(sa, &(req->addr), req->addrlen);
+	return req->addrlen;
+}
+
+#undef APPEND16
+#undef APPEND32
+
+/* this is a libevent callback function which is called when a request */
+/* has timed out. */
+static void
+evdns_request_timeout_callback(int fd, short events, void *arg) {
+	struct request *const req = (struct request *) arg;
+        (void) fd;
+        (void) events;
+
+	log(EVDNS_LOG_DEBUG, "Request %lx timed out", (unsigned long) arg);
+
+	req->ns->timedout++;
+	if (req->ns->timedout > global_max_nameserver_timeout) {
+		req->ns->timedout = 0;
+		nameserver_failed(req->ns, "request timed out.");
+	}
+
+	(void) evtimer_del(&req->timeout_event);
+	if (req->tx_count >= global_max_retransmits) {
+		/* this request has failed */
+		reply_callback(req, 0, DNS_ERR_TIMEOUT, NULL);
+		request_finished(req, &req_head);
+	} else {
+		/* retransmit it */
+		evdns_request_transmit(req);
+	}
+}
+
+/* try to send a request to a given server. */
+/* */
+/* return: */
+/*   0 ok */
+/*   1 temporary failure */
+/*   2 other failure */
+static int
+evdns_request_transmit_to(struct request *req, struct nameserver *server) {
+	struct sockaddr_in sin;
+	int r;
+	memset(&sin, 0, sizeof(sin));
+	sin.sin_addr.s_addr = req->ns->address;
+	sin.sin_port = req->ns->port;
+	sin.sin_family = AF_INET;
+
+	r = sendto(server->socket, req->request, req->request_len, 0,
+	    (struct sockaddr*)&sin, sizeof(sin));
+	if (r < 0) {
+		int err = last_error(server->socket);
+		if (error_is_eagain(err)) return 1;
+		nameserver_failed(req->ns, strerror(err));
+		return 2;
+	} else if (r != (int)req->request_len) {
+		return 1;  /* short write */
+	} else {
+		return 0;
+	}
+}
+
+/* try to send a request, updating the fields of the request */
+/* as needed */
+/* */
+/* return: */
+/*   0 ok */
+/*   1 failed */
+static int
+evdns_request_transmit(struct request *req) {
+	int retcode = 0, r;
+
+	/* if we fail to send this packet then this flag marks it */
+	/* for evdns_transmit */
+	req->transmit_me = 1;
+	if (req->trans_id == 0xffff) abort();
+
+	if (req->ns->choked) {
+		/* don't bother trying to write to a socket */
+		/* which we have had EAGAIN from */
+		return 1;
+	}
+
+	r = evdns_request_transmit_to(req, req->ns);
+	switch (r) {
+	case 1:
+		/* temp failure */
+		req->ns->choked = 1;
+		nameserver_write_waiting(req->ns, 1);
+		return 1;
+	case 2:
+		/* failed in some other way */
+		retcode = 1;
+		/* fall through */
+	default:
+		/* all ok */
+		log(EVDNS_LOG_DEBUG,
+		    "Setting timeout for request %lx", (unsigned long) req);
+		if (evtimer_add(&req->timeout_event, &global_timeout) < 0) {
+                  log(EVDNS_LOG_WARN,
+		      "Error from libevent when adding timer for request %lx",
+                      (unsigned long) req);
+                  /* ???? Do more? */
+                }
+		req->tx_count++;
+		req->transmit_me = 0;
+		return retcode;
+	}
+}
+
+static void
+nameserver_probe_callback(int result, char type, int count, int ttl, void *addresses, void *arg) {
+	struct nameserver *const ns = (struct nameserver *) arg;
+        (void) type;
+        (void) count;
+        (void) ttl;
+        (void) addresses;
+
+	if (result == DNS_ERR_NONE || result == DNS_ERR_NOTEXIST) {
+		/* this is a good reply */
+		nameserver_up(ns);
+	} else nameserver_probe_failed(ns);
+}
+
+static void
+nameserver_send_probe(struct nameserver *const ns) {
+	struct request *req;
+	/* here we need to send a probe to a given nameserver */
+	/* in the hope that it is up now. */
+
+  	log(EVDNS_LOG_DEBUG, "Sending probe to %s", debug_ntoa(ns->address));
+
+	req = request_new(TYPE_A, "www.google.com", DNS_QUERY_NO_SEARCH, nameserver_probe_callback, ns);
+        if (!req) return;
+	/* we force this into the inflight queue no matter what */
+	request_trans_id_set(req, transaction_id_pick());
+	req->ns = ns;
+	request_submit(req);
+}
+
+/* returns: */
+/*   0 didn't try to transmit anything */
+/*   1 tried to transmit something */
+static int
+evdns_transmit(void) {
+	char did_try_to_transmit = 0;
+
+	if (req_head) {
+		struct request *const started_at = req_head, *req = req_head;
+		/* first transmit all the requests which are currently waiting */
+		do {
+			if (req->transmit_me) {
+				did_try_to_transmit = 1;
+				evdns_request_transmit(req);
+			}
+
+			req = req->next;
+		} while (req != started_at);
+	}
+
+	return did_try_to_transmit;
+}
+
+/* exported function */
+int
+evdns_count_nameservers(void)
+{
+	const struct nameserver *server = server_head;
+	int n = 0;
+	if (!server)
+		return 0;
+	do {
+		++n;
+		server = server->next;
+	} while (server != server_head);
+	return n;
+}
+
+/* exported function */
+int
+evdns_clear_nameservers_and_suspend(void)
+{
+	struct nameserver *server = server_head, *started_at = server_head;
+	struct request *req = req_head, *req_started_at = req_head;
+
+	if (!server)
+		return 0;
+	while (1) {
+		struct nameserver *next = server->next;
+		(void) event_del(&server->event);
+		if (evtimer_initialized(&server->timeout_event))
+			(void) evtimer_del(&server->timeout_event);
+		if (server->socket >= 0)
+			CLOSE_SOCKET(server->socket);
+		free(server);
+		if (next == started_at)
+			break;
+		server = next;
+	}
+	server_head = NULL;
+	global_good_nameservers = 0;
+
+	while (req) {
+		struct request *next = req->next;
+		req->tx_count = req->reissue_count = 0;
+		req->ns = NULL;
+		/* ???? What to do about searches? */
+		(void) evtimer_del(&req->timeout_event);
+		req->trans_id = 0;
+		req->transmit_me = 0;
+
+		global_requests_waiting++;
+		evdns_request_insert(req, &req_waiting_head);
+		/* We want to insert these suspended elements at the front of
+		 * the waiting queue, since they were pending before any of
+		 * the waiting entries were added.  This is a circular list,
+		 * so we can just shift the start back by one.*/
+		req_waiting_head = req_waiting_head->prev;
+
+		if (next == req_started_at)
+			break;
+		req = next;
+	}
+	req_head = NULL;
+	global_requests_inflight = 0;
+
+	return 0;
+}
+
+
+/* exported function */
+int
+evdns_resume(void)
+{
+	evdns_requests_pump_waiting_queue();
+	return 0;
+}
+
+static int
+_evdns_nameserver_add_impl(unsigned long int address, int port) {
+	/* first check to see if we already have this nameserver */
+
+	const struct nameserver *server = server_head, *const started_at = server_head;
+	struct nameserver *ns;
+	int err = 0;
+	if (server) {
+		do {
+			if (server->address == address) return 3;
+			server = server->next;
+		} while (server != started_at);
+	}
+
+	ns = (struct nameserver *) malloc(sizeof(struct nameserver));
+        if (!ns) return -1;
+
+	memset(ns, 0, sizeof(struct nameserver));
+
+	evtimer_set(&ns->timeout_event, nameserver_prod_callback, ns);
+
+	ns->socket = socket(PF_INET, SOCK_DGRAM, 0);
+	if (ns->socket < 0) { err = 1; goto out1; }
+	FD_CLOSEONEXEC(ns->socket);
+	evutil_make_socket_nonblocking(ns->socket);
+
+	ns->address = address;
+	ns->port = htons(port);
+	ns->state = 1;
+	event_set(&ns->event, ns->socket, EV_READ | EV_PERSIST, nameserver_ready_callback, ns);
+	if (event_add(&ns->event, NULL) < 0) {
+          err = 2;
+          goto out2;
+        }
+
+	log(EVDNS_LOG_DEBUG, "Added nameserver %s", debug_ntoa(address));
+
+	/* insert this nameserver into the list of them */
+	if (!server_head) {
+		ns->next = ns->prev = ns;
+		server_head = ns;
+	} else {
+		ns->next = server_head->next;
+		ns->prev = server_head;
+		server_head->next = ns;
+		if (server_head->prev == server_head) {
+			server_head->prev = ns;
+		}
+	}
+
+	global_good_nameservers++;
+
+	return 0;
+
+out2:
+	CLOSE_SOCKET(ns->socket);
+out1:
+	free(ns);
+	log(EVDNS_LOG_WARN, "Unable to add nameserver %s: error %d", debug_ntoa(address), err);
+	return err;
+}
+
+/* exported function */
+int
+evdns_nameserver_add(unsigned long int address) {
+	return _evdns_nameserver_add_impl(address, 53);
+}
+
+/* exported function */
+int
+evdns_nameserver_ip_add(const char *ip_as_string) {
+	struct in_addr ina;
+	int port;
+	char buf[20];
+	const char *cp;
+	cp = strchr(ip_as_string, ':');
+	if (! cp) {
+		cp = ip_as_string;
+		port = 53;
+	} else {
+		port = strtoint(cp+1);
+		if (port < 0 || port > 65535) {
+			return 4;
+		}
+		if ((cp-ip_as_string) >= (int)sizeof(buf)) {
+			return 4;
+		}
+		memcpy(buf, ip_as_string, cp-ip_as_string);
+		buf[cp-ip_as_string] = '\0';
+		cp = buf;
+	}
+	if (!inet_aton(cp, &ina)) {
+		return 4;
+	}
+	return _evdns_nameserver_add_impl(ina.s_addr, port);
+}
+
+/* insert into the tail of the queue */
+static void
+evdns_request_insert(struct request *req, struct request **head) {
+	if (!*head) {
+		*head = req;
+		req->next = req->prev = req;
+		return;
+	}
+
+	req->prev = (*head)->prev;
+	req->prev->next = req;
+	req->next = *head;
+	(*head)->prev = req;
+}
+
+static int
+string_num_dots(const char *s) {
+	int count = 0;
+	while ((s = strchr(s, '.'))) {
+		s++;
+		count++;
+	}
+	return count;
+}
+
+static struct request *
+request_new(int type, const char *name, int flags,
+    evdns_callback_type callback, void *user_ptr) {
+	const char issuing_now =
+	    (global_requests_inflight < global_max_requests_inflight) ? 1 : 0;
+
+	const int name_len = strlen(name);
+	const int request_max_len = evdns_request_len(name_len);
+	const u16 trans_id = issuing_now ? transaction_id_pick() : 0xffff;
+	/* the request data is alloced in a single block with the header */
+	struct request *const req =
+	    (struct request *) malloc(sizeof(struct request) + request_max_len);
+	int rlen;
+        (void) flags;
+
+        if (!req) return NULL;
+	memset(req, 0, sizeof(struct request));
+
+	evtimer_set(&req->timeout_event, evdns_request_timeout_callback, req);
+
+	/* request data lives just after the header */
+	req->request = ((u8 *) req) + sizeof(struct request);
+	/* denotes that the request data shouldn't be free()ed */
+	req->request_appended = 1;
+	rlen = evdns_request_data_build(name, name_len, trans_id,
+	    type, CLASS_INET, req->request, request_max_len);
+	if (rlen < 0)
+		goto err1;
+	req->request_len = rlen;
+	req->trans_id = trans_id;
+	req->tx_count = 0;
+	req->request_type = type;
+	req->user_pointer = user_ptr;
+	req->user_callback = callback;
+	req->ns = issuing_now ? nameserver_pick() : NULL;
+	req->next = req->prev = NULL;
+
+	return req;
+err1:
+	free(req);
+	return NULL;
+}
+
+static void
+request_submit(struct request *const req) {
+	if (req->ns) {
+		/* if it has a nameserver assigned then this is going */
+		/* straight into the inflight queue */
+		evdns_request_insert(req, &req_head);
+		global_requests_inflight++;
+		evdns_request_transmit(req);
+	} else {
+		evdns_request_insert(req, &req_waiting_head);
+		global_requests_waiting++;
+	}
+}
+
+/* exported function */
+int evdns_resolve_ipv4(const char *name, int flags,
+    evdns_callback_type callback, void *ptr) {
+	log(EVDNS_LOG_DEBUG, "Resolve requested for %s", name);
+	if (flags & DNS_QUERY_NO_SEARCH) {
+		struct request *const req =
+			request_new(TYPE_A, name, flags, callback, ptr);
+		if (req == NULL)
+			return (1);
+		request_submit(req);
+		return (0);
+	} else {
+		return (search_request_new(TYPE_A, name, flags, callback, ptr));
+	}
+}
+
+/* exported function */
+int evdns_resolve_ipv6(const char *name, int flags,
+					   evdns_callback_type callback, void *ptr) {
+	log(EVDNS_LOG_DEBUG, "Resolve requested for %s", name);
+	if (flags & DNS_QUERY_NO_SEARCH) {
+		struct request *const req =
+			request_new(TYPE_AAAA, name, flags, callback, ptr);
+		if (req == NULL)
+			return (1);
+		request_submit(req);
+		return (0);
+	} else {
+		return (search_request_new(TYPE_AAAA, name, flags, callback, ptr));
+	}
+}
+
+int evdns_resolve_reverse(const struct in_addr *in, int flags, evdns_callback_type callback, void *ptr) {
+	char buf[32];
+	struct request *req;
+	u32 a;
+	assert(in);
+	a = ntohl(in->s_addr);
+	evutil_snprintf(buf, sizeof(buf), "%d.%d.%d.%d.in-addr.arpa",
+			(int)(u8)((a	)&0xff),
+			(int)(u8)((a>>8 )&0xff),
+			(int)(u8)((a>>16)&0xff),
+			(int)(u8)((a>>24)&0xff));
+	log(EVDNS_LOG_DEBUG, "Resolve requested for %s (reverse)", buf);
+	req = request_new(TYPE_PTR, buf, flags, callback, ptr);
+	if (!req) return 1;
+	request_submit(req);
+	return 0;
+}
+
+int evdns_resolve_reverse_ipv6(const struct in6_addr *in, int flags, evdns_callback_type callback, void *ptr) {
+	/* 32 nybbles, 32 periods, "ip6.arpa", NUL. */
+	char buf[73];
+	char *cp;
+	struct request *req;
+	int i;
+	assert(in);
+	cp = buf;
+	for (i=15; i >= 0; --i) {
+		u8 byte = in->s6_addr[i];
+		*cp++ = "0123456789abcdef"[byte & 0x0f];
+		*cp++ = '.';
+		*cp++ = "0123456789abcdef"[byte >> 4];
+		*cp++ = '.';
+	}
+	assert(cp + strlen("ip6.arpa") < buf+sizeof(buf));
+	memcpy(cp, "ip6.arpa", strlen("ip6.arpa")+1);
+	log(EVDNS_LOG_DEBUG, "Resolve requested for %s (reverse)", buf);
+	req = request_new(TYPE_PTR, buf, flags, callback, ptr);
+	if (!req) return 1;
+	request_submit(req);
+	return 0;
+}
+
+/*/////////////////////////////////////////////////////////////////// */
+/* Search support */
+/* */
+/* the libc resolver has support for searching a number of domains */
+/* to find a name. If nothing else then it takes the single domain */
+/* from the gethostname() call. */
+/* */
+/* It can also be configured via the domain and search options in a */
+/* resolv.conf. */
+/* */
+/* The ndots option controls how many dots it takes for the resolver */
+/* to decide that a name is non-local and so try a raw lookup first. */
+
+struct search_domain {
+	int len;
+	struct search_domain *next;
+	/* the text string is appended to this structure */
+};
+
+struct search_state {
+	int refcount;
+	int ndots;
+	int num_domains;
+	struct search_domain *head;
+};
+
+static struct search_state *global_search_state = NULL;
+
+static void
+search_state_decref(struct search_state *const state) {
+	if (!state) return;
+	state->refcount--;
+	if (!state->refcount) {
+		struct search_domain *next, *dom;
+		for (dom = state->head; dom; dom = next) {
+			next = dom->next;
+			free(dom);
+		}
+		free(state);
+	}
+}
+
+static struct search_state *
+search_state_new(void) {
+	struct search_state *state = (struct search_state *) malloc(sizeof(struct search_state));
+        if (!state) return NULL;
+	memset(state, 0, sizeof(struct search_state));
+	state->refcount = 1;
+	state->ndots = 1;
+
+	return state;
+}
+
+static void
+search_postfix_clear(void) {
+	search_state_decref(global_search_state);
+
+	global_search_state = search_state_new();
+}
+
+/* exported function */
+void
+evdns_search_clear(void) {
+	search_postfix_clear();
+}
+
+static void
+search_postfix_add(const char *domain) {
+	int domain_len;
+	struct search_domain *sdomain;
+	while (domain[0] == '.') domain++;
+	domain_len = strlen(domain);
+
+	if (!global_search_state) global_search_state = search_state_new();
+        if (!global_search_state) return;
+	global_search_state->num_domains++;
+
+	sdomain = (struct search_domain *) malloc(sizeof(struct search_domain) + domain_len);
+        if (!sdomain) return;
+	memcpy( ((u8 *) sdomain) + sizeof(struct search_domain), domain, domain_len);
+	sdomain->next = global_search_state->head;
+	sdomain->len = domain_len;
+
+	global_search_state->head = sdomain;
+}
+
+/* reverse the order of members in the postfix list. This is needed because, */
+/* when parsing resolv.conf we push elements in the wrong order */
+static void
+search_reverse(void) {
+	struct search_domain *cur, *prev = NULL, *next;
+	cur = global_search_state->head;
+	while (cur) {
+		next = cur->next;
+		cur->next = prev;
+		prev = cur;
+		cur = next;
+	}
+
+	global_search_state->head = prev;
+}
+
+/* exported function */
+void
+evdns_search_add(const char *domain) {
+	search_postfix_add(domain);
+}
+
+/* exported function */
+void
+evdns_search_ndots_set(const int ndots) {
+	if (!global_search_state) global_search_state = search_state_new();
+        if (!global_search_state) return;
+	global_search_state->ndots = ndots;
+}
+
+static void
+search_set_from_hostname(void) {
+	char hostname[HOST_NAME_MAX + 1], *domainname;
+
+	search_postfix_clear();
+	if (gethostname(hostname, sizeof(hostname))) return;
+	domainname = strchr(hostname, '.');
+	if (!domainname) return;
+	search_postfix_add(domainname);
+}
+
+/* warning: returns malloced string */
+static char *
+search_make_new(const struct search_state *const state, int n, const char *const base_name) {
+	const int base_len = strlen(base_name);
+	const char need_to_append_dot = base_name[base_len - 1] == '.' ? 0 : 1;
+	struct search_domain *dom;
+
+	for (dom = state->head; dom; dom = dom->next) {
+		if (!n--) {
+			/* this is the postfix we want */
+			/* the actual postfix string is kept at the end of the structure */
+			const u8 *const postfix = ((u8 *) dom) + sizeof(struct search_domain);
+			const int postfix_len = dom->len;
+			char *const newname = (char *) malloc(base_len + need_to_append_dot + postfix_len + 1);
+                        if (!newname) return NULL;
+			memcpy(newname, base_name, base_len);
+			if (need_to_append_dot) newname[base_len] = '.';
+			memcpy(newname + base_len + need_to_append_dot, postfix, postfix_len);
+			newname[base_len + need_to_append_dot + postfix_len] = 0;
+			return newname;
+		}
+	}
+
+	/* we ran off the end of the list and still didn't find the requested string */
+	abort();
+	return NULL; /* unreachable; stops warnings in some compilers. */
+}
+
+static int
+search_request_new(int type, const char *const name, int flags, evdns_callback_type user_callback, void *user_arg) {
+	assert(type == TYPE_A || type == TYPE_AAAA);
+	if ( ((flags & DNS_QUERY_NO_SEARCH) == 0) &&
+	     global_search_state &&
+		 global_search_state->num_domains) {
+		/* we have some domains to search */
+		struct request *req;
+		if (string_num_dots(name) >= global_search_state->ndots) {
+			req = request_new(type, name, flags, user_callback, user_arg);
+			if (!req) return 1;
+			req->search_index = -1;
+		} else {
+			char *const new_name = search_make_new(global_search_state, 0, name);
+                        if (!new_name) return 1;
+			req = request_new(type, new_name, flags, user_callback, user_arg);
+			free(new_name);
+			if (!req) return 1;
+			req->search_index = 0;
+		}
+		req->search_origname = strdup(name);
+		req->search_state = global_search_state;
+		req->search_flags = flags;
+		global_search_state->refcount++;
+		request_submit(req);
+		return 0;
+	} else {
+		struct request *const req = request_new(type, name, flags, user_callback, user_arg);
+		if (!req) return 1;
+		request_submit(req);
+		return 0;
+	}
+}
+
+/* this is called when a request has failed to find a name. We need to check */
+/* if it is part of a search and, if so, try the next name in the list */
+/* returns: */
+/*   0 another request has been submitted */
+/*   1 no more requests needed */
+static int
+search_try_next(struct request *const req) {
+	if (req->search_state) {
+		/* it is part of a search */
+		char *new_name;
+		struct request *newreq;
+		req->search_index++;
+		if (req->search_index >= req->search_state->num_domains) {
+			/* no more postfixes to try, however we may need to try */
+			/* this name without a postfix */
+			if (string_num_dots(req->search_origname) < req->search_state->ndots) {
+				/* yep, we need to try it raw */
+				newreq = request_new(req->request_type, req->search_origname, req->search_flags, req->user_callback, req->user_pointer);
+				log(EVDNS_LOG_DEBUG, "Search: trying raw query %s", req->search_origname);
+				if (newreq) {
+					request_submit(newreq);
+					return 0;
+				}
+			}
+			return 1;
+		}
+
+		new_name = search_make_new(req->search_state, req->search_index, req->search_origname);
+                if (!new_name) return 1;
+		log(EVDNS_LOG_DEBUG, "Search: now trying %s (%d)", new_name, req->search_index);
+		newreq = request_new(req->request_type, new_name, req->search_flags, req->user_callback, req->user_pointer);
+		free(new_name);
+		if (!newreq) return 1;
+		newreq->search_origname = req->search_origname;
+		req->search_origname = NULL;
+		newreq->search_state = req->search_state;
+		newreq->search_flags = req->search_flags;
+		newreq->search_index = req->search_index;
+		newreq->search_state->refcount++;
+		request_submit(newreq);
+		return 0;
+	}
+	return 1;
+}
+
+static void
+search_request_finished(struct request *const req) {
+	if (req->search_state) {
+		search_state_decref(req->search_state);
+		req->search_state = NULL;
+	}
+	if (req->search_origname) {
+		free(req->search_origname);
+		req->search_origname = NULL;
+	}
+}
+
+/*/////////////////////////////////////////////////////////////////// */
+/* Parsing resolv.conf files */
+
+static void
+evdns_resolv_set_defaults(int flags) {
+	/* if the file isn't found then we assume a local resolver */
+	if (flags & DNS_OPTION_SEARCH) search_set_from_hostname();
+	if (flags & DNS_OPTION_NAMESERVERS) evdns_nameserver_ip_add("127.0.0.1");
+}
+
+#ifndef HAVE_STRTOK_R
+static char *
+strtok_r(char *s, const char *delim, char **state) {
+	return strtok(s, delim);
+}
+#endif
+
+/* helper version of atoi which returns -1 on error */
+static int
+strtoint(const char *const str) {
+	char *endptr;
+	const int r = strtol(str, &endptr, 10);
+	if (*endptr) return -1;
+	return r;
+}
+
+/* helper version of atoi that returns -1 on error and clips to bounds. */
+static int
+strtoint_clipped(const char *const str, int min, int max)
+{
+	int r = strtoint(str);
+	if (r == -1)
+		return r;
+	else if (r<min)
+		return min;
+	else if (r>max)
+		return max;
+	else
+		return r;
+}
+
+/* exported function */
+int
+evdns_set_option(const char *option, const char *val, int flags)
+{
+	if (!strncmp(option, "ndots:", 6)) {
+		const int ndots = strtoint(val);
+		if (ndots == -1) return -1;
+		if (!(flags & DNS_OPTION_SEARCH)) return 0;
+		log(EVDNS_LOG_DEBUG, "Setting ndots to %d", ndots);
+		if (!global_search_state) global_search_state = search_state_new();
+		if (!global_search_state) return -1;
+		global_search_state->ndots = ndots;
+	} else if (!strncmp(option, "timeout:", 8)) {
+		const int timeout = strtoint(val);
+		if (timeout == -1) return -1;
+		if (!(flags & DNS_OPTION_MISC)) return 0;
+		log(EVDNS_LOG_DEBUG, "Setting timeout to %d", timeout);
+		global_timeout.tv_sec = timeout;
+	} else if (!strncmp(option, "max-timeouts:", 12)) {
+		const int maxtimeout = strtoint_clipped(val, 1, 255);
+		if (maxtimeout == -1) return -1;
+		if (!(flags & DNS_OPTION_MISC)) return 0;
+		log(EVDNS_LOG_DEBUG, "Setting maximum allowed timeouts to %d",
+			maxtimeout);
+		global_max_nameserver_timeout = maxtimeout;
+	} else if (!strncmp(option, "max-inflight:", 13)) {
+		const int maxinflight = strtoint_clipped(val, 1, 65000);
+		if (maxinflight == -1) return -1;
+		if (!(flags & DNS_OPTION_MISC)) return 0;
+		log(EVDNS_LOG_DEBUG, "Setting maximum inflight requests to %d",
+			maxinflight);
+		global_max_requests_inflight = maxinflight;
+	} else if (!strncmp(option, "attempts:", 9)) {
+		int retries = strtoint(val);
+		if (retries == -1) return -1;
+		if (retries > 255) retries = 255;
+		if (!(flags & DNS_OPTION_MISC)) return 0;
+		log(EVDNS_LOG_DEBUG, "Setting retries to %d", retries);
+		global_max_retransmits = retries;
+	}
+	return 0;
+}
+
+static void
+resolv_conf_parse_line(char *const start, int flags) {
+	char *strtok_state;
+	static const char *const delims = " \t";
+#define NEXT_TOKEN strtok_r(NULL, delims, &strtok_state)
+
+	char *const first_token = strtok_r(start, delims, &strtok_state);
+	if (!first_token) return;
+
+	if (!strcmp(first_token, "nameserver") && (flags & DNS_OPTION_NAMESERVERS)) {
+		const char *const nameserver = NEXT_TOKEN;
+		struct in_addr ina;
+
+		if (nameserver && inet_aton(nameserver, &ina)) {
+			/* address is valid */
+			evdns_nameserver_add(ina.s_addr);
+		}
+	} else if (!strcmp(first_token, "domain") && (flags & DNS_OPTION_SEARCH)) {
+		const char *const domain = NEXT_TOKEN;
+		if (domain) {
+			search_postfix_clear();
+			search_postfix_add(domain);
+		}
+	} else if (!strcmp(first_token, "search") && (flags & DNS_OPTION_SEARCH)) {
+		const char *domain;
+		search_postfix_clear();
+
+		while ((domain = NEXT_TOKEN)) {
+			search_postfix_add(domain);
+		}
+		search_reverse();
+	} else if (!strcmp(first_token, "options")) {
+		const char *option;
+		while ((option = NEXT_TOKEN)) {
+			const char *val = strchr(option, ':');
+			evdns_set_option(option, val ? val+1 : "", flags);
+		}
+	}
+#undef NEXT_TOKEN
+}
+
+/* exported function */
+/* returns: */
+/*   0 no errors */
+/*   1 failed to open file */
+/*   2 failed to stat file */
+/*   3 file too large */
+/*   4 out of memory */
+/*   5 short read from file */
+int
+evdns_resolv_conf_parse(int flags, const char *const filename) {
+	struct stat st;
+	int fd, n, r;
+	u8 *resolv;
+	char *start;
+	int err = 0;
+
+	log(EVDNS_LOG_DEBUG, "Parsing resolv.conf file %s", filename);
+
+	fd = open(filename, O_RDONLY);
+	if (fd < 0) {
+		evdns_resolv_set_defaults(flags);
+		return 1;
+	}
+
+	if (fstat(fd, &st)) { err = 2; goto out1; }
+	if (!st.st_size) {
+		evdns_resolv_set_defaults(flags);
+		err = (flags & DNS_OPTION_NAMESERVERS) ? 6 : 0;
+		goto out1;
+	}
+	if (st.st_size > 65535) { err = 3; goto out1; }  /* no resolv.conf should be any bigger */
+
+	resolv = (u8 *) malloc((size_t)st.st_size + 1);
+	if (!resolv) { err = 4; goto out1; }
+
+	n = 0;
+	while ((r = read(fd, resolv+n, (size_t)st.st_size-n)) > 0) {
+		n += r;
+		if (n == st.st_size)
+			break;
+		assert(n < st.st_size);
+ 	}
+	if (r < 0) { err = 5; goto out2; }
+	resolv[n] = 0;	 /* we malloced an extra byte; this should be fine. */
+
+	start = (char *) resolv;
+	for (;;) {
+		char *const newline = strchr(start, '\n');
+		if (!newline) {
+			resolv_conf_parse_line(start, flags);
+			break;
+		} else {
+			*newline = 0;
+			resolv_conf_parse_line(start, flags);
+			start = newline + 1;
+		}
+	}
+
+	if (!server_head && (flags & DNS_OPTION_NAMESERVERS)) {
+		/* no nameservers were configured. */
+		evdns_nameserver_ip_add("127.0.0.1");
+		err = 6;
+	}
+	if (flags & DNS_OPTION_SEARCH && (!global_search_state || global_search_state->num_domains == 0)) {
+		search_set_from_hostname();
+	}
+
+out2:
+	free(resolv);
+out1:
+	close(fd);
+	return err;
+}
+
+#ifdef WIN32
+/* Add multiple nameservers from a space-or-comma-separated list. */
+static int
+evdns_nameserver_ip_add_line(const char *ips) {
+	const char *addr;
+	char *buf;
+	int r;
+	while (*ips) {
+		while (ISSPACE(*ips) || *ips == ',' || *ips == '\t')
+			++ips;
+		addr = ips;
+		while (ISDIGIT(*ips) || *ips == '.' || *ips == ':')
+			++ips;
+		buf = malloc(ips-addr+1);
+		if (!buf) return 4;
+		memcpy(buf, addr, ips-addr);
+		buf[ips-addr] = '\0';
+		r = evdns_nameserver_ip_add(buf);
+		free(buf);
+		if (r) return r;
+	}
+	return 0;
+}
+
+typedef DWORD(WINAPI *GetNetworkParams_fn_t)(FIXED_INFO *, DWORD*);
+
+/* Use the windows GetNetworkParams interface in iphlpapi.dll to */
+/* figure out what our nameservers are. */
+static int
+load_nameservers_with_getnetworkparams(void)
+{
+	/* Based on MSDN examples and inspection of  c-ares code. */
+	FIXED_INFO *fixed;
+	HMODULE handle = 0;
+	ULONG size = sizeof(FIXED_INFO);
+	void *buf = NULL;
+	int status = 0, r, added_any;
+	IP_ADDR_STRING *ns;
+	GetNetworkParams_fn_t fn;
+
+	if (!(handle = LoadLibraryA("iphlpapi.dll"))) {
+		log(EVDNS_LOG_WARN, "Could not open iphlpapi.dll");
+		status = -1;
+		goto done;
+	}
+	if (!(fn = (GetNetworkParams_fn_t) GetProcAddress(handle, "GetNetworkParams"))) {
+		log(EVDNS_LOG_WARN, "Could not get address of function.");
+		status = -1;
+		goto done;
+	}
+
+	buf = malloc(size);
+	if (!buf) { status = 4; goto done; }
+	fixed = buf;
+	r = fn(fixed, &size);
+	if (r != ERROR_SUCCESS && r != ERROR_BUFFER_OVERFLOW) {
+		status = -1;
+		goto done;
+	}
+	if (r != ERROR_SUCCESS) {
+		free(buf);
+		buf = malloc(size);
+		if (!buf) { status = 4; goto done; }
+		fixed = buf;
+		r = fn(fixed, &size);
+		if (r != ERROR_SUCCESS) {
+			log(EVDNS_LOG_DEBUG, "fn() failed.");
+			status = -1;
+			goto done;
+		}
+	}
+
+	assert(fixed);
+	added_any = 0;
+	ns = &(fixed->DnsServerList);
+	while (ns) {
+		r = evdns_nameserver_ip_add_line(ns->IpAddress.String);
+		if (r) {
+			log(EVDNS_LOG_DEBUG,"Could not add nameserver %s to list,error: %d",
+				(ns->IpAddress.String),(int)GetLastError());
+			status = r;
+			goto done;
+		} else {
+			log(EVDNS_LOG_DEBUG,"Succesfully added %s as nameserver",ns->IpAddress.String);
+		}
+
+		added_any++;
+		ns = ns->Next;
+	}
+
+	if (!added_any) {
+		log(EVDNS_LOG_DEBUG, "No nameservers added.");
+		status = -1;
+	}
+
+ done:
+	if (buf)
+		free(buf);
+	if (handle)
+		FreeLibrary(handle);
+	return status;
+}
+
+static int
+config_nameserver_from_reg_key(HKEY key, const char *subkey)
+{
+	char *buf;
+	DWORD bufsz = 0, type = 0;
+	int status = 0;
+
+	if (RegQueryValueExA(key, subkey, 0, &type, NULL, &bufsz)
+	    != ERROR_MORE_DATA)
+		return -1;
+	if (!(buf = malloc(bufsz)))
+		return -1;
+
+	if (RegQueryValueExA(key, subkey, 0, &type, (LPBYTE)buf, &bufsz)
+	    == ERROR_SUCCESS && bufsz > 1) {
+		status = evdns_nameserver_ip_add_line(buf);
+	}
+
+	free(buf);
+	return status;
+}
+
+#define SERVICES_KEY "System\\CurrentControlSet\\Services\\"
+#define WIN_NS_9X_KEY  SERVICES_KEY "VxD\\MSTCP"
+#define WIN_NS_NT_KEY  SERVICES_KEY "Tcpip\\Parameters"
+
+static int
+load_nameservers_from_registry(void)
+{
+	int found = 0;
+	int r;
+#define TRY(k, name) \
+	if (!found && config_nameserver_from_reg_key(k,name) == 0) {	\
+		log(EVDNS_LOG_DEBUG,"Found nameservers in %s/%s",#k,name); \
+		found = 1;						\
+	} else if (!found) {						\
+		log(EVDNS_LOG_DEBUG,"Didn't find nameservers in %s/%s", \
+		    #k,#name);						\
+	}
+
+	if (((int)GetVersion()) > 0) { /* NT */
+		HKEY nt_key = 0, interfaces_key = 0;
+
+		if (RegOpenKeyExA(HKEY_LOCAL_MACHINE, WIN_NS_NT_KEY, 0,
+				 KEY_READ, &nt_key) != ERROR_SUCCESS) {
+			log(EVDNS_LOG_DEBUG,"Couldn't open nt key, %d",(int)GetLastError());
+			return -1;
+		}
+		r = RegOpenKeyExA(nt_key, "Interfaces", 0,
+			     KEY_QUERY_VALUE|KEY_ENUMERATE_SUB_KEYS,
+			     &interfaces_key);
+		if (r != ERROR_SUCCESS) {
+			log(EVDNS_LOG_DEBUG,"Couldn't open interfaces key, %d",(int)GetLastError());
+			return -1;
+		}
+		TRY(nt_key, "NameServer");
+		TRY(nt_key, "DhcpNameServer");
+		TRY(interfaces_key, "NameServer");
+		TRY(interfaces_key, "DhcpNameServer");
+		RegCloseKey(interfaces_key);
+		RegCloseKey(nt_key);
+	} else {
+		HKEY win_key = 0;
+		if (RegOpenKeyExA(HKEY_LOCAL_MACHINE, WIN_NS_9X_KEY, 0,
+				 KEY_READ, &win_key) != ERROR_SUCCESS) {
+			log(EVDNS_LOG_DEBUG, "Couldn't open registry key, %d", (int)GetLastError());
+			return -1;
+		}
+		TRY(win_key, "NameServer");
+		RegCloseKey(win_key);
+	}
+
+	if (found == 0) {
+		log(EVDNS_LOG_WARN,"Didn't find any nameservers.");
+	}
+
+	return found ? 0 : -1;
+#undef TRY
+}
+
+int
+evdns_config_windows_nameservers(void)
+{
+	if (load_nameservers_with_getnetworkparams() == 0)
+		return 0;
+	return load_nameservers_from_registry();
+}
+#endif
+
+int
+evdns_init(void)
+{
+	int res = 0;
+#ifdef WIN32
+	res = evdns_config_windows_nameservers();
+#else
+	res = evdns_resolv_conf_parse(DNS_OPTIONS_ALL, "/etc/resolv.conf");
+#endif
+
+	return (res);
+}
+
+const char *
+evdns_err_to_string(int err)
+{
+    switch (err) {
+	case DNS_ERR_NONE: return "no error";
+	case DNS_ERR_FORMAT: return "misformatted query";
+	case DNS_ERR_SERVERFAILED: return "server failed";
+	case DNS_ERR_NOTEXIST: return "name does not exist";
+	case DNS_ERR_NOTIMPL: return "query not implemented";
+	case DNS_ERR_REFUSED: return "refused";
+
+	case DNS_ERR_TRUNCATED: return "reply truncated or ill-formed";
+	case DNS_ERR_UNKNOWN: return "unknown";
+	case DNS_ERR_TIMEOUT: return "request timed out";
+	case DNS_ERR_SHUTDOWN: return "dns subsystem shut down";
+	default: return "[Unknown error code]";
+    }
+}
+
+void
+evdns_shutdown(int fail_requests)
+{
+	struct nameserver *server, *server_next;
+	struct search_domain *dom, *dom_next;
+
+	while (req_head) {
+		if (fail_requests)
+			reply_callback(req_head, 0, DNS_ERR_SHUTDOWN, NULL);
+		request_finished(req_head, &req_head);
+	}
+	while (req_waiting_head) {
+		if (fail_requests)
+			reply_callback(req_waiting_head, 0, DNS_ERR_SHUTDOWN, NULL);
+		request_finished(req_waiting_head, &req_waiting_head);
+	}
+	global_requests_inflight = global_requests_waiting = 0;
+
+	for (server = server_head; server; server = server_next) {
+		server_next = server->next;
+		if (server->socket >= 0)
+			CLOSE_SOCKET(server->socket);
+		(void) event_del(&server->event);
+		if (server->state == 0)
+                        (void) event_del(&server->timeout_event);
+		free(server);
+		if (server_next == server_head)
+			break;
+	}
+	server_head = NULL;
+	global_good_nameservers = 0;
+
+	if (global_search_state) {
+		for (dom = global_search_state->head; dom; dom = dom_next) {
+			dom_next = dom->next;
+			free(dom);
+		}
+		free(global_search_state);
+		global_search_state = NULL;
+	}
+	evdns_log_fn = NULL;
+}
+
+#ifdef EVDNS_MAIN
+void
+main_callback(int result, char type, int count, int ttl,
+			  void *addrs, void *orig) {
+	char *n = (char*)orig;
+	int i;
+	for (i = 0; i < count; ++i) {
+		if (type == DNS_IPv4_A) {
+			printf("%s: %s\n", n, debug_ntoa(((u32*)addrs)[i]));
+		} else if (type == DNS_PTR) {
+			printf("%s: %s\n", n, ((char**)addrs)[i]);
+		}
+	}
+	if (!count) {
+		printf("%s: No answer (%d)\n", n, result);
+	}
+	fflush(stdout);
+}
+void
+evdns_server_callback(struct evdns_server_request *req, void *data)
+{
+	int i, r;
+	(void)data;
+	/* dummy; give 192.168.11.11 as an answer for all A questions,
+	 *	give foo.bar.example.com as an answer for all PTR questions. */
+	for (i = 0; i < req->nquestions; ++i) {
+		u32 ans = htonl(0xc0a80b0bUL);
+		if (req->questions[i]->type == EVDNS_TYPE_A &&
+			req->questions[i]->dns_question_class == EVDNS_CLASS_INET) {
+			printf(" -- replying for %s (A)\n", req->questions[i]->name);
+			r = evdns_server_request_add_a_reply(req, req->questions[i]->name,
+										  1, &ans, 10);
+			if (r<0)
+				printf("eeep, didn't work.\n");
+		} else if (req->questions[i]->type == EVDNS_TYPE_PTR &&
+				   req->questions[i]->dns_question_class == EVDNS_CLASS_INET) {
+			printf(" -- replying for %s (PTR)\n", req->questions[i]->name);
+			r = evdns_server_request_add_ptr_reply(req, NULL, req->questions[i]->name,
+											"foo.bar.example.com", 10);
+		} else {
+			printf(" -- skipping %s [%d %d]\n", req->questions[i]->name,
+				   req->questions[i]->type, req->questions[i]->dns_question_class);
+		}
+	}
+
+	r = evdns_request_respond(req, 0);
+	if (r<0)
+		printf("eeek, couldn't send reply.\n");
+}
+
+void
+logfn(int is_warn, const char *msg) {
+  (void) is_warn;
+  fprintf(stderr, "%s\n", msg);
+}
+int
+main(int c, char **v) {
+	int idx;
+	int reverse = 0, verbose = 1, servertest = 0;
+	if (c<2) {
+		fprintf(stderr, "syntax: %s [-x] [-v] hostname\n", v[0]);
+		fprintf(stderr, "syntax: %s [-servertest]\n", v[0]);
+		return 1;
+	}
+	idx = 1;
+	while (idx < c && v[idx][0] == '-') {
+		if (!strcmp(v[idx], "-x"))
+			reverse = 1;
+		else if (!strcmp(v[idx], "-v"))
+			verbose = 1;
+		else if (!strcmp(v[idx], "-servertest"))
+			servertest = 1;
+		else
+			fprintf(stderr, "Unknown option %s\n", v[idx]);
+		++idx;
+	}
+	event_init();
+	if (verbose)
+		evdns_set_log_fn(logfn);
+	evdns_resolv_conf_parse(DNS_OPTION_NAMESERVERS, "/etc/resolv.conf");
+	if (servertest) {
+		int sock;
+		struct sockaddr_in my_addr;
+		sock = socket(PF_INET, SOCK_DGRAM, 0);
+                evutil_make_socket_nonblocking(sock);
+		my_addr.sin_family = AF_INET;
+		my_addr.sin_port = htons(10053);
+		my_addr.sin_addr.s_addr = INADDR_ANY;
+		if (bind(sock, (struct sockaddr*)&my_addr, sizeof(my_addr))<0) {
+			perror("bind");
+			exit(1);
+		}
+		evdns_add_server_port(sock, 0, evdns_server_callback, NULL);
+	}
+	for (; idx < c; ++idx) {
+		if (reverse) {
+			struct in_addr addr;
+			if (!inet_aton(v[idx], &addr)) {
+				fprintf(stderr, "Skipping non-IP %s\n", v[idx]);
+				continue;
+			}
+			fprintf(stderr, "resolving %s...\n",v[idx]);
+			evdns_resolve_reverse(&addr, 0, main_callback, v[idx]);
+		} else {
+			fprintf(stderr, "resolving (fwd) %s...\n",v[idx]);
+			evdns_resolve_ipv4(v[idx], 0, main_callback, v[idx]);
+		}
+	}
+	fflush(stdout);
+	event_dispatch();
+	return 0;
+}
+#endif
diff --git a/base/third_party/libevent/evdns.h b/base/third_party/libevent/evdns.h
new file mode 100644
index 0000000..fca4ac3
--- /dev/null
+++ b/base/third_party/libevent/evdns.h
@@ -0,0 +1,528 @@
+/*
+ * Copyright (c) 2006 Niels Provos <provos@citi.umich.edu>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ *    derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * The original DNS code is due to Adam Langley with heavy
+ * modifications by Nick Mathewson.  Adam put his DNS software in the
+ * public domain.  You can find his original copyright below.  Please,
+ * aware that the code as part of libevent is governed by the 3-clause
+ * BSD license above.
+ *
+ * This software is Public Domain. To view a copy of the public domain dedication,
+ * visit http://creativecommons.org/licenses/publicdomain/ or send a letter to
+ * Creative Commons, 559 Nathan Abbott Way, Stanford, California 94305, USA.
+ *
+ * I ask and expect, but do not require, that all derivative works contain an
+ * attribution similar to:
+ * 	Parts developed by Adam Langley <agl@imperialviolet.org>
+ *
+ * You may wish to replace the word "Parts" with something else depending on
+ * the amount of original code.
+ *
+ * (Derivative works does not include programs which link against, run or include
+ * the source verbatim in their source distributions)
+ */
+
+/** @file evdns.h
+ *
+ * Welcome, gentle reader
+ *
+ * Async DNS lookups are really a whole lot harder than they should be,
+ * mostly stemming from the fact that the libc resolver has never been
+ * very good at them. Before you use this library you should see if libc
+ * can do the job for you with the modern async call getaddrinfo_a
+ * (see http://www.imperialviolet.org/page25.html#e498). Otherwise,
+ * please continue.
+ *
+ * This code is based on libevent and you must call event_init before
+ * any of the APIs in this file. You must also seed the OpenSSL random
+ * source if you are using OpenSSL for ids (see below).
+ *
+ * This library is designed to be included and shipped with your source
+ * code. You statically link with it. You should also test for the
+ * existence of strtok_r and define HAVE_STRTOK_R if you have it.
+ *
+ * The DNS protocol requires a good source of id numbers and these
+ * numbers should be unpredictable for spoofing reasons. There are
+ * three methods for generating them here and you must define exactly
+ * one of them. In increasing order of preference:
+ *
+ * DNS_USE_GETTIMEOFDAY_FOR_ID:
+ *   Using the bottom 16 bits of the usec result from gettimeofday. This
+ *   is a pretty poor solution but should work anywhere.
+ * DNS_USE_CPU_CLOCK_FOR_ID:
+ *   Using the bottom 16 bits of the nsec result from the CPU's time
+ *   counter. This is better, but may not work everywhere. Requires
+ *   POSIX realtime support and you'll need to link against -lrt on
+ *   glibc systems at least.
+ * DNS_USE_OPENSSL_FOR_ID:
+ *   Uses the OpenSSL RAND_bytes call to generate the data. You must
+ *   have seeded the pool before making any calls to this library.
+ *
+ * The library keeps track of the state of nameservers and will avoid
+ * them when they go down. Otherwise it will round robin between them.
+ *
+ * Quick start guide:
+ *   #include "evdns.h"
+ *   void callback(int result, char type, int count, int ttl,
+ *		 void *addresses, void *arg);
+ *   evdns_resolv_conf_parse(DNS_OPTIONS_ALL, "/etc/resolv.conf");
+ *   evdns_resolve("www.hostname.com", 0, callback, NULL);
+ *
+ * When the lookup is complete the callback function is called. The
+ * first argument will be one of the DNS_ERR_* defines in evdns.h.
+ * Hopefully it will be DNS_ERR_NONE, in which case type will be
+ * DNS_IPv4_A, count will be the number of IP addresses, ttl is the time
+ * which the data can be cached for (in seconds), addresses will point
+ * to an array of uint32_t's and arg will be whatever you passed to
+ * evdns_resolve.
+ *
+ * Searching:
+ *
+ * In order for this library to be a good replacement for glibc's resolver it
+ * supports searching. This involves setting a list of default domains, in
+ * which names will be queried for. The number of dots in the query name
+ * determines the order in which this list is used.
+ *
+ * Searching appears to be a single lookup from the point of view of the API,
+ * although many DNS queries may be generated from a single call to
+ * evdns_resolve. Searching can also drastically slow down the resolution
+ * of names.
+ *
+ * To disable searching:
+ *   1. Never set it up. If you never call evdns_resolv_conf_parse or
+ *   evdns_search_add then no searching will occur.
+ *
+ *   2. If you do call evdns_resolv_conf_parse then don't pass
+ *   DNS_OPTION_SEARCH (or DNS_OPTIONS_ALL, which implies it).
+ *
+ *   3. When calling evdns_resolve, pass the DNS_QUERY_NO_SEARCH flag.
+ *
+ * The order of searches depends on the number of dots in the name. If the
+ * number is greater than the ndots setting then the names is first tried
+ * globally. Otherwise each search domain is appended in turn.
+ *
+ * The ndots setting can either be set from a resolv.conf, or by calling
+ * evdns_search_ndots_set.
+ *
+ * For example, with ndots set to 1 (the default) and a search domain list of
+ * ["myhome.net"]:
+ *  Query: www
+ *  Order: www.myhome.net, www.
+ *
+ *  Query: www.abc
+ *  Order: www.abc., www.abc.myhome.net
+ *
+ * Internals:
+ *
+ * Requests are kept in two queues. The first is the inflight queue. In
+ * this queue requests have an allocated transaction id and nameserver.
+ * They will soon be transmitted if they haven't already been.
+ *
+ * The second is the waiting queue. The size of the inflight ring is
+ * limited and all other requests wait in waiting queue for space. This
+ * bounds the number of concurrent requests so that we don't flood the
+ * nameserver. Several algorithms require a full walk of the inflight
+ * queue and so bounding its size keeps thing going nicely under huge
+ * (many thousands of requests) loads.
+ *
+ * If a nameserver loses too many requests it is considered down and we
+ * try not to use it. After a while we send a probe to that nameserver
+ * (a lookup for google.com) and, if it replies, we consider it working
+ * again. If the nameserver fails a probe we wait longer to try again
+ * with the next probe.
+ */
+
+#ifndef EVENTDNS_H
+#define EVENTDNS_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* For integer types. */
+#include "evutil.h"
+
+/** Error codes 0-5 are as described in RFC 1035. */
+#define DNS_ERR_NONE 0
+/** The name server was unable to interpret the query */
+#define DNS_ERR_FORMAT 1
+/** The name server was unable to process this query due to a problem with the
+ * name server */
+#define DNS_ERR_SERVERFAILED 2
+/** The domain name does not exist */
+#define DNS_ERR_NOTEXIST 3
+/** The name server does not support the requested kind of query */
+#define DNS_ERR_NOTIMPL 4
+/** The name server refuses to reform the specified operation for policy
+ * reasons */
+#define DNS_ERR_REFUSED 5
+/** The reply was truncated or ill-formated */
+#define DNS_ERR_TRUNCATED 65
+/** An unknown error occurred */
+#define DNS_ERR_UNKNOWN 66
+/** Communication with the server timed out */
+#define DNS_ERR_TIMEOUT 67
+/** The request was canceled because the DNS subsystem was shut down. */
+#define DNS_ERR_SHUTDOWN 68
+
+#define DNS_IPv4_A 1
+#define DNS_PTR 2
+#define DNS_IPv6_AAAA 3
+
+#define DNS_QUERY_NO_SEARCH 1
+
+#define DNS_OPTION_SEARCH 1
+#define DNS_OPTION_NAMESERVERS 2
+#define DNS_OPTION_MISC 4
+#define DNS_OPTIONS_ALL 7
+
+/**
+ * The callback that contains the results from a lookup.
+ * - type is either DNS_IPv4_A or DNS_PTR or DNS_IPv6_AAAA
+ * - count contains the number of addresses of form type
+ * - ttl is the number of seconds the resolution may be cached for.
+ * - addresses needs to be cast according to type
+ */
+typedef void (*evdns_callback_type) (int result, char type, int count, int ttl, void *addresses, void *arg);
+
+/**
+  Initialize the asynchronous DNS library.
+
+  This function initializes support for non-blocking name resolution by
+  calling evdns_resolv_conf_parse() on UNIX and
+  evdns_config_windows_nameservers() on Windows.
+
+  @return 0 if successful, or -1 if an error occurred
+  @see evdns_shutdown()
+ */
+int evdns_init(void);
+
+
+/**
+  Shut down the asynchronous DNS resolver and terminate all active requests.
+
+  If the 'fail_requests' option is enabled, all active requests will return
+  an empty result with the error flag set to DNS_ERR_SHUTDOWN. Otherwise,
+  the requests will be silently discarded.
+
+  @param fail_requests if zero, active requests will be aborted; if non-zero,
+		active requests will return DNS_ERR_SHUTDOWN.
+  @see evdns_init()
+ */
+void evdns_shutdown(int fail_requests);
+
+
+/**
+  Convert a DNS error code to a string.
+
+  @param err the DNS error code
+  @return a string containing an explanation of the error code
+*/
+const char *evdns_err_to_string(int err);
+
+
+/**
+  Add a nameserver.
+
+  The address should be an IPv4 address in network byte order.
+  The type of address is chosen so that it matches in_addr.s_addr.
+
+  @param address an IP address in network byte order
+  @return 0 if successful, or -1 if an error occurred
+  @see evdns_nameserver_ip_add()
+ */
+int evdns_nameserver_add(unsigned long int address);
+
+
+/**
+  Get the number of configured nameservers.
+
+  This returns the number of configured nameservers (not necessarily the
+  number of running nameservers).  This is useful for double-checking
+  whether our calls to the various nameserver configuration functions
+  have been successful.
+
+  @return the number of configured nameservers
+  @see evdns_nameserver_add()
+ */
+int evdns_count_nameservers(void);
+
+
+/**
+  Remove all configured nameservers, and suspend all pending resolves.
+
+  Resolves will not necessarily be re-attempted until evdns_resume() is called.
+
+  @return 0 if successful, or -1 if an error occurred
+  @see evdns_resume()
+ */
+int evdns_clear_nameservers_and_suspend(void);
+
+
+/**
+  Resume normal operation and continue any suspended resolve requests.
+
+  Re-attempt resolves left in limbo after an earlier call to
+  evdns_clear_nameservers_and_suspend().
+
+  @return 0 if successful, or -1 if an error occurred
+  @see evdns_clear_nameservers_and_suspend()
+ */
+int evdns_resume(void);
+
+
+/**
+  Add a nameserver.
+
+  This wraps the evdns_nameserver_add() function by parsing a string as an IP
+  address and adds it as a nameserver.
+
+  @return 0 if successful, or -1 if an error occurred
+  @see evdns_nameserver_add()
+ */
+int evdns_nameserver_ip_add(const char *ip_as_string);
+
+
+/**
+  Lookup an A record for a given name.
+
+  @param name a DNS hostname
+  @param flags either 0, or DNS_QUERY_NO_SEARCH to disable searching for this query.
+  @param callback a callback function to invoke when the request is completed
+  @param ptr an argument to pass to the callback function
+  @return 0 if successful, or -1 if an error occurred
+  @see evdns_resolve_ipv6(), evdns_resolve_reverse(), evdns_resolve_reverse_ipv6()
+ */
+int evdns_resolve_ipv4(const char *name, int flags, evdns_callback_type callback, void *ptr);
+
+
+/**
+  Lookup an AAAA record for a given name.
+
+  @param name a DNS hostname
+  @param flags either 0, or DNS_QUERY_NO_SEARCH to disable searching for this query.
+  @param callback a callback function to invoke when the request is completed
+  @param ptr an argument to pass to the callback function
+  @return 0 if successful, or -1 if an error occurred
+  @see evdns_resolve_ipv4(), evdns_resolve_reverse(), evdns_resolve_reverse_ipv6()
+ */
+int evdns_resolve_ipv6(const char *name, int flags, evdns_callback_type callback, void *ptr);
+
+struct in_addr;
+struct in6_addr;
+
+/**
+  Lookup a PTR record for a given IP address.
+
+  @param in an IPv4 address
+  @param flags either 0, or DNS_QUERY_NO_SEARCH to disable searching for this query.
+  @param callback a callback function to invoke when the request is completed
+  @param ptr an argument to pass to the callback function
+  @return 0 if successful, or -1 if an error occurred
+  @see evdns_resolve_reverse_ipv6()
+ */
+int evdns_resolve_reverse(const struct in_addr *in, int flags, evdns_callback_type callback, void *ptr);
+
+
+/**
+  Lookup a PTR record for a given IPv6 address.
+
+  @param in an IPv6 address
+  @param flags either 0, or DNS_QUERY_NO_SEARCH to disable searching for this query.
+  @param callback a callback function to invoke when the request is completed
+  @param ptr an argument to pass to the callback function
+  @return 0 if successful, or -1 if an error occurred
+  @see evdns_resolve_reverse_ipv6()
+ */
+int evdns_resolve_reverse_ipv6(const struct in6_addr *in, int flags, evdns_callback_type callback, void *ptr);
+
+
+/**
+  Set the value of a configuration option.
+
+  The currently available configuration options are:
+
+    ndots, timeout, max-timeouts, max-inflight, and attempts
+
+  @param option the name of the configuration option to be modified
+  @param val the value to be set
+  @param flags either 0 | DNS_OPTION_SEARCH | DNS_OPTION_MISC
+  @return 0 if successful, or -1 if an error occurred
+ */
+int evdns_set_option(const char *option, const char *val, int flags);
+
+
+/**
+  Parse a resolv.conf file.
+
+  The 'flags' parameter determines what information is parsed from the
+  resolv.conf file. See the man page for resolv.conf for the format of this
+  file.
+
+  The following directives are not parsed from the file: sortlist, rotate,
+  no-check-names, inet6, debug.
+
+  If this function encounters an error, the possible return values are: 1 =
+  failed to open file, 2 = failed to stat file, 3 = file too large, 4 = out of
+  memory, 5 = short read from file, 6 = no nameservers listed in the file
+
+  @param flags any of DNS_OPTION_NAMESERVERS|DNS_OPTION_SEARCH|DNS_OPTION_MISC|
+         DNS_OPTIONS_ALL
+  @param filename the path to the resolv.conf file
+  @return 0 if successful, or various positive error codes if an error
+          occurred (see above)
+  @see resolv.conf(3), evdns_config_windows_nameservers()
+ */
+int evdns_resolv_conf_parse(int flags, const char *const filename);
+
+
+/**
+  Obtain nameserver information using the Windows API.
+
+  Attempt to configure a set of nameservers based on platform settings on
+  a win32 host.  Preferentially tries to use GetNetworkParams; if that fails,
+  looks in the registry.
+
+  @return 0 if successful, or -1 if an error occurred
+  @see evdns_resolv_conf_parse()
+ */
+#ifdef WIN32
+int evdns_config_windows_nameservers(void);
+#endif
+
+
+/**
+  Clear the list of search domains.
+ */
+void evdns_search_clear(void);
+
+
+/**
+  Add a domain to the list of search domains
+
+  @param domain the domain to be added to the search list
+ */
+void evdns_search_add(const char *domain);
+
+
+/**
+  Set the 'ndots' parameter for searches.
+
+  Sets the number of dots which, when found in a name, causes
+  the first query to be without any search domain.
+
+  @param ndots the new ndots parameter
+ */
+void evdns_search_ndots_set(const int ndots);
+
+/**
+  A callback that is invoked when a log message is generated
+
+  @param is_warning indicates if the log message is a 'warning'
+  @param msg the content of the log message
+ */
+typedef void (*evdns_debug_log_fn_type)(int is_warning, const char *msg);
+
+
+/**
+  Set the callback function to handle log messages.
+
+  @param fn the callback to be invoked when a log message is generated
+ */
+void evdns_set_log_fn(evdns_debug_log_fn_type fn);
+
+/**
+   Set a callback that will be invoked to generate transaction IDs.  By
+   default, we pick transaction IDs based on the current clock time.
+
+   @param fn the new callback, or NULL to use the default.
+ */
+void evdns_set_transaction_id_fn(ev_uint16_t (*fn)(void));
+
+#define DNS_NO_SEARCH 1
+
+/*
+ * Structures and functions used to implement a DNS server.
+ */
+
+struct evdns_server_request {
+	int flags;
+	int nquestions;
+	struct evdns_server_question **questions;
+};
+struct evdns_server_question {
+	int type;
+#ifdef __cplusplus
+	int dns_question_class;
+#else
+	/* You should refer to this field as "dns_question_class".  The
+	 * name "class" works in C for backward compatibility, and will be
+	 * removed in a future version. (1.5 or later). */
+	int class;
+#define dns_question_class class
+#endif
+	char name[1];
+};
+typedef void (*evdns_request_callback_fn_type)(struct evdns_server_request *, void *);
+#define EVDNS_ANSWER_SECTION 0
+#define EVDNS_AUTHORITY_SECTION 1
+#define EVDNS_ADDITIONAL_SECTION 2
+
+#define EVDNS_TYPE_A	   1
+#define EVDNS_TYPE_NS	   2
+#define EVDNS_TYPE_CNAME   5
+#define EVDNS_TYPE_SOA	   6
+#define EVDNS_TYPE_PTR	  12
+#define EVDNS_TYPE_MX	  15
+#define EVDNS_TYPE_TXT	  16
+#define EVDNS_TYPE_AAAA	  28
+
+#define EVDNS_QTYPE_AXFR 252
+#define EVDNS_QTYPE_ALL	 255
+
+#define EVDNS_CLASS_INET   1
+
+struct evdns_server_port *evdns_add_server_port(int socket, int is_tcp, evdns_request_callback_fn_type callback, void *user_data);
+void evdns_close_server_port(struct evdns_server_port *port);
+
+int evdns_server_request_add_reply(struct evdns_server_request *req, int section, const char *name, int type, int dns_class, int ttl, int datalen, int is_name, const char *data);
+int evdns_server_request_add_a_reply(struct evdns_server_request *req, const char *name, int n, void *addrs, int ttl);
+int evdns_server_request_add_aaaa_reply(struct evdns_server_request *req, const char *name, int n, void *addrs, int ttl);
+int evdns_server_request_add_ptr_reply(struct evdns_server_request *req, struct in_addr *in, const char *inaddr_name, const char *hostname, int ttl);
+int evdns_server_request_add_cname_reply(struct evdns_server_request *req, const char *name, const char *cname, int ttl);
+
+int evdns_server_request_respond(struct evdns_server_request *req, int err);
+int evdns_server_request_drop(struct evdns_server_request *req);
+struct sockaddr;
+int evdns_server_request_get_requesting_addr(struct evdns_server_request *_req, struct sockaddr *sa, int addr_len);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif  /* !EVENTDNS_H */
diff --git a/base/third_party/libevent/event-config.h b/base/third_party/libevent/event-config.h
new file mode 100644
index 0000000..bbd23f1
--- /dev/null
+++ b/base/third_party/libevent/event-config.h
@@ -0,0 +1,24 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file is Chromium-specific, and brings in the appropriate
+// event-config.h depending on your platform.
+
+#if defined(__native_client_nonsfi__)
+#include "base/third_party/libevent/nacl_nonsfi/event-config.h"
+#elif defined(__APPLE__)
+#include "base/third_party/libevent/mac/event-config.h"
+#elif defined(ANDROID)
+#include "base/third_party/libevent/android/event-config.h"
+#elif defined(__linux__)
+#include "base/third_party/libevent/linux/event-config.h"
+#elif defined(__FreeBSD__)
+#include "base/third_party/libevent/freebsd/event-config.h"
+#elif defined(__sun)
+#include "base/third_party/libevent/solaris/event-config.h"
+#elif defined(_AIX)
+#include "base/third_party/libevent/aix/event-config.h"
+#else
+#error generate event-config.h for your platform
+#endif
diff --git a/base/third_party/libevent/event-internal.h b/base/third_party/libevent/event-internal.h
new file mode 100644
index 0000000..b7f0040
--- /dev/null
+++ b/base/third_party/libevent/event-internal.h
@@ -0,0 +1,101 @@
+/*
+ * Copyright (c) 2000-2004 Niels Provos <provos@citi.umich.edu>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ *    derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef _EVENT_INTERNAL_H_
+#define _EVENT_INTERNAL_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include "config.h"
+#include "min_heap.h"
+#include "evsignal.h"
+
+struct eventop {
+	const char *name;
+	void *(*init)(struct event_base *);
+	int (*add)(void *, struct event *);
+	int (*del)(void *, struct event *);
+	int (*dispatch)(struct event_base *, void *, struct timeval *);
+	void (*dealloc)(struct event_base *, void *);
+	/* set if we need to reinitialize the event base */
+	int need_reinit;
+};
+
+struct event_base {
+	const struct eventop *evsel;
+	void *evbase;
+	int event_count;		/* counts number of total events */
+	int event_count_active;	/* counts number of active events */
+
+	int event_gotterm;		/* Set to terminate loop */
+	int event_break;		/* Set to terminate loop immediately */
+
+	/* active event management */
+	struct event_list **activequeues;
+	int nactivequeues;
+
+	/* signal handling info */
+	struct evsignal_info sig;
+
+	struct event_list eventqueue;
+	struct timeval event_tv;
+
+	struct min_heap timeheap;
+
+	struct timeval tv_cache;
+};
+
+/* Internal use only: Functions that might be missing from <sys/queue.h> */
+#ifndef HAVE_TAILQFOREACH
+#define	TAILQ_FIRST(head)		((head)->tqh_first)
+#define	TAILQ_END(head)			NULL
+#define	TAILQ_NEXT(elm, field)		((elm)->field.tqe_next)
+#define TAILQ_FOREACH(var, head, field)					\
+	for((var) = TAILQ_FIRST(head);					\
+	    (var) != TAILQ_END(head);					\
+	    (var) = TAILQ_NEXT(var, field))
+#define	TAILQ_INSERT_BEFORE(listelm, elm, field) do {			\
+	(elm)->field.tqe_prev = (listelm)->field.tqe_prev;		\
+	(elm)->field.tqe_next = (listelm);				\
+	*(listelm)->field.tqe_prev = (elm);				\
+	(listelm)->field.tqe_prev = &(elm)->field.tqe_next;		\
+} while (0)
+#endif /* TAILQ_FOREACH */
+
+int _evsignal_set_handler(struct event_base *base, int evsignal,
+			  void (*fn)(int));
+int _evsignal_restore_handler(struct event_base *base, int evsignal);
+
+/* defined in evutil.c */
+const char *evutil_getenv(const char *varname);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _EVENT_INTERNAL_H_ */
diff --git a/base/third_party/libevent/event.3 b/base/third_party/libevent/event.3
new file mode 100644
index 0000000..5b33ec6
--- /dev/null
+++ b/base/third_party/libevent/event.3
@@ -0,0 +1,624 @@
+.\"	$OpenBSD: event.3,v 1.4 2002/07/12 18:50:48 provos Exp $
+.\"
+.\" Copyright (c) 2000 Artur Grabowski <art@openbsd.org>
+.\" All rights reserved.
+.\"
+.\" Redistribution and use in source and binary forms, with or without
+.\" modification, are permitted provided that the following conditions
+.\" are met:
+.\"
+.\" 1. Redistributions of source code must retain the above copyright
+.\"    notice, this list of conditions and the following disclaimer.
+.\" 2. Redistributions in binary form must reproduce the above copyright
+.\"    notice, this list of conditions and the following disclaimer in the
+.\"    documentation and/or other materials provided with the distribution.
+.\" 3. The name of the author may not be used to endorse or promote products
+.\"    derived from this software without specific prior written permission.
+.\"
+.\" THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES,
+.\" INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY
+.\" AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
+.\" THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+.\" EXEMPLARY, OR CONSEQUENTIAL  DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+.\" PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+.\" OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+.\" WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+.\" OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+.\" ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+.\"
+.Dd August 8, 2000
+.Dt EVENT 3
+.Os
+.Sh NAME
+.Nm event_init ,
+.Nm event_dispatch ,
+.Nm event_loop ,
+.Nm event_loopexit ,
+.Nm event_loopbreak ,
+.Nm event_set ,
+.Nm event_base_dispatch ,
+.Nm event_base_loop ,
+.Nm event_base_loopexit ,
+.Nm event_base_loopbreak ,
+.Nm event_base_set ,
+.Nm event_base_free ,
+.Nm event_add ,
+.Nm event_del ,
+.Nm event_once ,
+.Nm event_base_once ,
+.Nm event_pending ,
+.Nm event_initialized ,
+.Nm event_priority_init ,
+.Nm event_priority_set ,
+.Nm evtimer_set ,
+.Nm evtimer_add ,
+.Nm evtimer_del ,
+.Nm evtimer_pending ,
+.Nm evtimer_initialized ,
+.Nm signal_set ,
+.Nm signal_add ,
+.Nm signal_del ,
+.Nm signal_pending ,
+.Nm signal_initialized ,
+.Nm bufferevent_new ,
+.Nm bufferevent_free ,
+.Nm bufferevent_write ,
+.Nm bufferevent_write_buffer ,
+.Nm bufferevent_read ,
+.Nm bufferevent_enable ,
+.Nm bufferevent_disable ,
+.Nm bufferevent_settimeout ,
+.Nm bufferevent_base_set ,
+.Nm evbuffer_new ,
+.Nm evbuffer_free ,
+.Nm evbuffer_add ,
+.Nm evbuffer_add_buffer ,
+.Nm evbuffer_add_printf ,
+.Nm evbuffer_add_vprintf ,
+.Nm evbuffer_drain ,
+.Nm evbuffer_write ,
+.Nm evbuffer_read ,
+.Nm evbuffer_find ,
+.Nm evbuffer_readline ,
+.Nm evhttp_new ,
+.Nm evhttp_bind_socket ,
+.Nm evhttp_free
+.Nd execute a function when a specific event occurs
+.Sh SYNOPSIS
+.Fd #include <sys/time.h>
+.Fd #include <event.h>
+.Ft "struct event_base *"
+.Fn "event_init" "void"
+.Ft int
+.Fn "event_dispatch" "void"
+.Ft int
+.Fn "event_loop" "int flags"
+.Ft int
+.Fn "event_loopexit" "struct timeval *tv"
+.Ft int
+.Fn "event_loopbreak" "void"
+.Ft void
+.Fn "event_set" "struct event *ev" "int fd" "short event" "void (*fn)(int, short, void *)" "void *arg"
+.Ft int
+.Fn "event_base_dispatch" "struct event_base *base"
+.Ft int
+.Fn "event_base_loop" "struct event_base *base" "int flags"
+.Ft int
+.Fn "event_base_loopexit" "struct event_base *base" "struct timeval *tv"
+.Ft int
+.Fn "event_base_loopbreak" "struct event_base *base"
+.Ft int
+.Fn "event_base_set" "struct event_base *base" "struct event *"
+.Ft void
+.Fn "event_base_free" "struct event_base *base"
+.Ft int
+.Fn "event_add" "struct event *ev" "struct timeval *tv"
+.Ft int
+.Fn "event_del" "struct event *ev"
+.Ft int
+.Fn "event_once" "int fd" "short event" "void (*fn)(int, short, void *)" "void *arg" "struct timeval *tv"
+.Ft int
+.Fn "event_base_once" "struct event_base *base" "int fd" "short event" "void (*fn)(int, short, void *)" "void *arg" "struct timeval *tv"
+.Ft int
+.Fn "event_pending" "struct event *ev" "short event" "struct timeval *tv"
+.Ft int
+.Fn "event_initialized" "struct event *ev"
+.Ft int
+.Fn "event_priority_init" "int npriorities"
+.Ft int
+.Fn "event_priority_set" "struct event *ev" "int priority"
+.Ft void
+.Fn "evtimer_set" "struct event *ev" "void (*fn)(int, short, void *)" "void *arg"
+.Ft void
+.Fn "evtimer_add" "struct event *ev" "struct timeval *"
+.Ft void
+.Fn "evtimer_del" "struct event *ev"
+.Ft int
+.Fn "evtimer_pending" "struct event *ev" "struct timeval *tv"
+.Ft int
+.Fn "evtimer_initialized" "struct event *ev"
+.Ft void
+.Fn "signal_set" "struct event *ev" "int signal" "void (*fn)(int, short, void *)" "void *arg"
+.Ft void
+.Fn "signal_add" "struct event *ev" "struct timeval *"
+.Ft void
+.Fn "signal_del" "struct event *ev"
+.Ft int
+.Fn "signal_pending" "struct event *ev" "struct timeval *tv"
+.Ft int
+.Fn "signal_initialized" "struct event *ev"
+.Ft "struct bufferevent *"
+.Fn "bufferevent_new" "int fd" "evbuffercb readcb" "evbuffercb writecb" "everrorcb" "void *cbarg"
+.Ft void
+.Fn "bufferevent_free" "struct bufferevent *bufev"
+.Ft int
+.Fn "bufferevent_write" "struct bufferevent *bufev" "void *data" "size_t size"
+.Ft int
+.Fn "bufferevent_write_buffer" "struct bufferevent *bufev" "struct evbuffer *buf"
+.Ft size_t
+.Fn "bufferevent_read" "struct bufferevent *bufev" "void *data" "size_t size"
+.Ft int
+.Fn "bufferevent_enable" "struct bufferevent *bufev" "short event"
+.Ft int
+.Fn "bufferevent_disable" "struct bufferevent *bufev" "short event"
+.Ft void
+.Fn "bufferevent_settimeout" "struct bufferevent *bufev" "int timeout_read" "int timeout_write"
+.Ft int
+.Fn "bufferevent_base_set" "struct event_base *base" "struct bufferevent *bufev"
+.Ft "struct evbuffer *"
+.Fn "evbuffer_new" "void"
+.Ft void
+.Fn "evbuffer_free" "struct evbuffer *buf"
+.Ft int
+.Fn "evbuffer_add" "struct evbuffer *buf" "const void *data" "size_t size"
+.Ft int
+.Fn "evbuffer_add_buffer" "struct evbuffer *dst" "struct evbuffer *src"
+.Ft int
+.Fn "evbuffer_add_printf" "struct evbuffer *buf" "const char *fmt" "..."
+.Ft int
+.Fn "evbuffer_add_vprintf" "struct evbuffer *buf" "const char *fmt" "va_list ap"
+.Ft void
+.Fn "evbuffer_drain" "struct evbuffer *buf" "size_t size"
+.Ft int
+.Fn "evbuffer_write" "struct evbuffer *buf" "int fd"
+.Ft int
+.Fn "evbuffer_read" "struct evbuffer *buf" "int fd" "int size"
+.Ft "u_char *"
+.Fn "evbuffer_find" "struct evbuffer *buf" "const u_char *data" "size_t size"
+.Ft "char *"
+.Fn "evbuffer_readline" "struct evbuffer *buf"
+.Ft "struct evhttp *"
+.Fn "evhttp_new" "struct event_base *base"
+.Ft int
+.Fn "evhttp_bind_socket" "struct evhttp *http" "const char *address" "u_short port"
+.Ft "void"
+.Fn "evhttp_free" "struct evhttp *http"
+.Ft int
+.Fa (*event_sigcb)(void) ;
+.Ft volatile sig_atomic_t
+.Fa event_gotsig ;
+.Sh DESCRIPTION
+The
+.Nm event
+API provides a mechanism to execute a function when a specific event
+on a file descriptor occurs or after a given time has passed.
+.Pp
+The
+.Nm event
+API needs to be initialized with
+.Fn event_init
+before it can be used.
+.Pp
+In order to process events, an application needs to call
+.Fn event_dispatch .
+This function only returns on error, and should replace the event core
+of the application program.
+.Pp
+The function
+.Fn event_set
+prepares the event structure
+.Fa ev
+to be used in future calls to
+.Fn event_add
+and
+.Fn event_del .
+The event will be prepared to call the function specified by the
+.Fa fn
+argument with an
+.Fa int
+argument indicating the file descriptor, a
+.Fa short
+argument indicating the type of event, and a
+.Fa void *
+argument given in the
+.Fa arg
+argument.
+The
+.Fa fd
+indicates the file descriptor that should be monitored for events.
+The events can be either
+.Va EV_READ ,
+.Va EV_WRITE ,
+or both,
+indicating that an application can read or write from the file descriptor
+respectively without blocking.
+.Pp
+The function
+.Fa fn
+will be called with the file descriptor that triggered the event and
+the type of event which will be either
+.Va EV_TIMEOUT ,
+.Va EV_SIGNAL ,
+.Va EV_READ ,
+or
+.Va EV_WRITE .
+Additionally, an event which has registered interest in more than one of the
+preceeding events, via bitwise-OR to
+.Fn event_set ,
+can provide its callback function with a bitwise-OR of more than one triggered
+event.
+The additional flag
+.Va EV_PERSIST
+makes an
+.Fn event_add
+persistent until
+.Fn event_del
+has been called.
+.Pp
+Once initialized, the
+.Fa ev
+structure can be used repeatedly with
+.Fn event_add
+and
+.Fn event_del
+and does not need to be reinitialized unless the function called and/or
+the argument to it are to be changed.
+However, when an
+.Fa ev
+structure has been added to libevent using
+.Fn event_add
+the structure must persist until the event occurs (assuming
+.Fa EV_PERSIST
+is not set) or is removed
+using
+.Fn event_del .
+You may not reuse the same
+.Fa ev
+structure for multiple monitored descriptors; each descriptor
+needs its own
+.Fa ev .
+.Pp
+The function
+.Fn event_add
+schedules the execution of the
+.Fa ev
+event when the event specified in
+.Fn event_set
+occurs or in at least the time specified in the
+.Fa tv .
+If
+.Fa tv
+is
+.Dv NULL ,
+no timeout occurs and the function will only be called
+if a matching event occurs on the file descriptor.
+The event in the
+.Fa ev
+argument must be already initialized by
+.Fn event_set
+and may not be used in calls to
+.Fn event_set
+until it has timed out or been removed with
+.Fn event_del .
+If the event in the
+.Fa ev
+argument already has a scheduled timeout, the old timeout will be
+replaced by the new one.
+.Pp
+The function
+.Fn event_del
+will cancel the event in the argument
+.Fa ev .
+If the event has already executed or has never been added
+the call will have no effect.
+.Pp
+The functions
+.Fn evtimer_set ,
+.Fn evtimer_add ,
+.Fn evtimer_del ,
+.Fn evtimer_initialized ,
+and
+.Fn evtimer_pending
+are abbreviations for common situations where only a timeout is required.
+The file descriptor passed will be \-1, and the event type will be
+.Va EV_TIMEOUT .
+.Pp
+The functions
+.Fn signal_set ,
+.Fn signal_add ,
+.Fn signal_del ,
+.Fn signal_initialized ,
+and
+.Fn signal_pending
+are abbreviations.
+The event type will be a persistent
+.Va EV_SIGNAL .
+That means
+.Fn signal_set
+adds
+.Va EV_PERSIST .
+.Pp
+In order to avoid races in signal handlers, the
+.Nm event
+API provides two variables:
+.Va event_sigcb
+and
+.Va event_gotsig .
+A signal handler
+sets
+.Va event_gotsig
+to indicate that a signal has been received.
+The application sets
+.Va event_sigcb
+to a callback function.
+After the signal handler sets
+.Va event_gotsig ,
+.Nm event_dispatch
+will execute the callback function to process received signals.
+The callback returns 1 when no events are registered any more.
+It can return \-1 to indicate an error to the
+.Nm event
+library, causing
+.Fn event_dispatch
+to terminate with
+.Va errno
+set to
+.Er EINTR .
+.Pp
+The function
+.Fn event_once
+is similar to
+.Fn event_set .
+However, it schedules a callback to be called exactly once and does not
+require the caller to prepare an
+.Fa event
+structure.
+This function supports
+.Fa EV_TIMEOUT ,
+.Fa EV_READ ,
+and
+.Fa EV_WRITE .
+.Pp
+The
+.Fn event_pending
+function can be used to check if the event specified by
+.Fa event
+is pending to run.
+If
+.Va EV_TIMEOUT
+was specified and
+.Fa tv
+is not
+.Dv NULL ,
+the expiration time of the event will be returned in
+.Fa tv .
+.Pp
+The
+.Fn event_initialized
+macro can be used to check if an event has been initialized.
+.Pp
+The
+.Nm event_loop
+function provides an interface for single pass execution of pending
+events.
+The flags
+.Va EVLOOP_ONCE
+and
+.Va EVLOOP_NONBLOCK
+are recognized.
+The
+.Nm event_loopexit
+function exits from the event loop. The next
+.Fn event_loop
+iteration after the
+given timer expires will complete normally (handling all queued events) then
+exit without blocking for events again. Subsequent invocations of
+.Fn event_loop
+will proceed normally.
+The
+.Nm event_loopbreak
+function exits from the event loop immediately.
+.Fn event_loop
+will abort after the next event is completed;
+.Fn event_loopbreak
+is typically invoked from this event's callback. This behavior is analogous
+to the "break;" statement. Subsequent invocations of
+.Fn event_loop
+will proceed normally.
+.Pp
+It is the responsibility of the caller to provide these functions with
+pre-allocated event structures.
+.Pp
+.Sh EVENT PRIORITIES
+By default
+.Nm libevent
+schedules all active events with the same priority.
+However, sometimes it is desirable to process some events with a higher
+priority than others.
+For that reason,
+.Nm libevent
+supports strict priority queues.
+Active events with a lower priority are always processed before events
+with a higher priority.
+.Pp
+The number of different priorities can be set initially with the
+.Fn event_priority_init
+function.
+This function should be called before the first call to
+.Fn event_dispatch .
+The
+.Fn event_priority_set
+function can be used to assign a priority to an event.
+By default,
+.Nm libevent
+assigns the middle priority to all events unless their priority
+is explicitly set.
+.Sh THREAD SAFE EVENTS
+.Nm Libevent
+has experimental support for thread-safe events.
+When initializing the library via
+.Fn event_init ,
+an event base is returned.
+This event base can be used in conjunction with calls to
+.Fn event_base_set ,
+.Fn event_base_dispatch ,
+.Fn event_base_loop ,
+.Fn event_base_loopexit ,
+.Fn bufferevent_base_set
+and
+.Fn event_base_free .
+.Fn event_base_set
+should be called after preparing an event with
+.Fn event_set ,
+as
+.Fn event_set
+assigns the provided event to the most recently created event base.
+.Fn bufferevent_base_set
+should be called after preparing a bufferevent with
+.Fn bufferevent_new .
+.Fn event_base_free
+should be used to free memory associated with the event base
+when it is no longer needed.
+.Sh BUFFERED EVENTS
+.Nm libevent
+provides an abstraction on top of the regular event callbacks.
+This abstraction is called a
+.Va "buffered event" .
+A buffered event provides input and output buffers that get filled
+and drained automatically.
+The user of a buffered event no longer deals directly with the IO,
+but instead is reading from input and writing to output buffers.
+.Pp
+A new bufferevent is created by
+.Fn bufferevent_new .
+The parameter
+.Fa fd
+specifies the file descriptor from which data is read and written to.
+This file descriptor is not allowed to be a
+.Xr pipe 2 .
+The next three parameters are callbacks.
+The read and write callback have the following form:
+.Ft void
+.Fn "(*cb)" "struct bufferevent *bufev" "void *arg" .
+The error callback has the following form:
+.Ft void
+.Fn "(*cb)" "struct bufferevent *bufev" "short what" "void *arg" .
+The argument is specified by the fourth parameter
+.Fa "cbarg" .
+A
+.Fa bufferevent struct
+pointer is returned on success, NULL on error.
+Both the read and the write callback may be NULL.
+The error callback has to be always provided.
+.Pp
+Once initialized, the bufferevent structure can be used repeatedly with
+bufferevent_enable() and bufferevent_disable().
+The flags parameter can be a combination of
+.Va EV_READ
+and
+.Va EV_WRITE .
+When read enabled the bufferevent will try to read from the file
+descriptor and call the read callback.
+The write callback is executed
+whenever the output buffer is drained below the write low watermark,
+which is
+.Va 0
+by default.
+.Pp
+The
+.Fn bufferevent_write
+function can be used to write data to the file descriptor.
+The data is appended to the output buffer and written to the descriptor
+automatically as it becomes available for writing.
+.Fn bufferevent_write
+returns 0 on success or \-1 on failure.
+The
+.Fn bufferevent_read
+function is used to read data from the input buffer,
+returning the amount of data read.
+.Pp
+If multiple bases are in use, bufferevent_base_set() must be called before
+enabling the bufferevent for the first time.
+.Sh NON-BLOCKING HTTP SUPPORT
+.Nm libevent
+provides a very thin HTTP layer that can be used both to host an HTTP
+server and also to make HTTP requests.
+An HTTP server can be created by calling
+.Fn evhttp_new .
+It can be bound to any port and address with the
+.Fn evhttp_bind_socket
+function.
+When the HTTP server is no longer used, it can be freed via
+.Fn evhttp_free .
+.Pp
+To be notified of HTTP requests, a user needs to register callbacks with the
+HTTP server.
+This can be done by calling
+.Fn evhttp_set_cb .
+The second argument is the URI for which a callback is being registered.
+The corresponding callback will receive an
+.Va struct evhttp_request
+object that contains all information about the request.
+.Pp
+This section does not document all the possible function calls; please
+check
+.Va event.h
+for the public interfaces.
+.Sh ADDITIONAL NOTES
+It is possible to disable support for
+.Va epoll , kqueue , devpoll , poll
+or
+.Va select
+by setting the environment variable
+.Va EVENT_NOEPOLL , EVENT_NOKQUEUE , EVENT_NODEVPOLL , EVENT_NOPOLL
+or
+.Va EVENT_NOSELECT ,
+respectively.
+By setting the environment variable
+.Va EVENT_SHOW_METHOD ,
+.Nm libevent
+displays the kernel notification method that it uses.
+.Sh RETURN VALUES
+Upon successful completion
+.Fn event_add
+and
+.Fn event_del
+return 0.
+Otherwise, \-1 is returned and the global variable errno is
+set to indicate the error.
+.Sh SEE ALSO
+.Xr kqueue 2 ,
+.Xr poll 2 ,
+.Xr select 2 ,
+.Xr evdns 3 ,
+.Xr timeout 9
+.Sh HISTORY
+The
+.Nm event
+API manpage is based on the
+.Xr timeout 9
+manpage by Artur Grabowski.
+The port of
+.Nm libevent
+to Windows is due to Michael A. Davis.
+Support for real-time signals is due to Taral.
+.Sh AUTHORS
+The
+.Nm event
+library was written by Niels Provos.
+.Sh BUGS
+This documentation is neither complete nor authoritative.
+If you are in doubt about the usage of this API then
+check the source code to find out how it works, write
+up the missing piece of documentation and send it to
+me for inclusion in this man page.
diff --git a/base/third_party/libevent/event.c b/base/third_party/libevent/event.c
new file mode 100644
index 0000000..4aa326e
--- /dev/null
+++ b/base/third_party/libevent/event.c
@@ -0,0 +1,998 @@
+/*
+ * Copyright (c) 2000-2004 Niels Provos <provos@citi.umich.edu>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ *    derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#ifdef WIN32
+#define WIN32_LEAN_AND_MEAN
+#include <windows.h>
+#undef WIN32_LEAN_AND_MEAN
+#endif
+#include <sys/types.h>
+#ifdef HAVE_SYS_TIME_H
+#include <sys/time.h>
+#else 
+#include <sys/_libevent_time.h>
+#endif
+#include <sys/queue.h>
+#include <stdio.h>
+#include <stdlib.h>
+#ifndef WIN32
+#include <unistd.h>
+#endif
+#include <errno.h>
+#include <signal.h>
+#include <string.h>
+#include <assert.h>
+#include <time.h>
+
+#include "event.h"
+#include "event-internal.h"
+#include "evutil.h"
+#include "log.h"
+
+#ifdef HAVE_EVENT_PORTS
+extern const struct eventop evportops;
+#endif
+#ifdef HAVE_SELECT
+extern const struct eventop selectops;
+#endif
+#ifdef HAVE_POLL
+extern const struct eventop pollops;
+#endif
+#ifdef HAVE_EPOLL
+extern const struct eventop epollops;
+#endif
+#ifdef HAVE_WORKING_KQUEUE
+extern const struct eventop kqops;
+#endif
+#ifdef HAVE_DEVPOLL
+extern const struct eventop devpollops;
+#endif
+#ifdef WIN32
+extern const struct eventop win32ops;
+#endif
+
+/* In order of preference */
+static const struct eventop *eventops[] = {
+#ifdef HAVE_EVENT_PORTS
+	&evportops,
+#endif
+#ifdef HAVE_WORKING_KQUEUE
+	&kqops,
+#endif
+#ifdef HAVE_EPOLL
+	&epollops,
+#endif
+#ifdef HAVE_DEVPOLL
+	&devpollops,
+#endif
+#ifdef HAVE_POLL
+	&pollops,
+#endif
+#ifdef HAVE_SELECT
+	&selectops,
+#endif
+#ifdef WIN32
+	&win32ops,
+#endif
+	NULL
+};
+
+/* Global state */
+struct event_base *current_base = NULL;
+extern struct event_base *evsignal_base;
+static int use_monotonic = 1;
+
+/* Prototypes */
+static void	event_queue_insert(struct event_base *, struct event *, int);
+static void	event_queue_remove(struct event_base *, struct event *, int);
+static int	event_haveevents(struct event_base *);
+
+static void	event_process_active(struct event_base *);
+
+static int	timeout_next(struct event_base *, struct timeval **);
+static void	timeout_process(struct event_base *);
+static void	timeout_correct(struct event_base *, struct timeval *);
+
+static int
+gettime(struct event_base *base, struct timeval *tp)
+{
+	if (base->tv_cache.tv_sec) {
+		*tp = base->tv_cache;
+		return (0);
+	}
+
+#if defined(HAVE_CLOCK_GETTIME) && defined(CLOCK_MONOTONIC)
+	struct timespec	ts;
+
+	if (use_monotonic &&
+	    clock_gettime(CLOCK_MONOTONIC, &ts) == 0) {
+		tp->tv_sec = ts.tv_sec;
+		tp->tv_usec = ts.tv_nsec / 1000;
+		return (0);
+	}
+#endif
+
+	use_monotonic = 0;
+
+	return (evutil_gettimeofday(tp, NULL));
+}
+
+struct event_base *
+event_init(void)
+{
+	struct event_base *base = event_base_new();
+
+	if (base != NULL)
+		current_base = base;
+
+	return (base);
+}
+
+struct event_base *
+event_base_new(void)
+{
+	int i;
+	struct event_base *base;
+
+	if ((base = calloc(1, sizeof(struct event_base))) == NULL)
+		event_err(1, "%s: calloc", __func__);
+
+	gettime(base, &base->event_tv);
+	
+	min_heap_ctor(&base->timeheap);
+	TAILQ_INIT(&base->eventqueue);
+	base->sig.ev_signal_pair[0] = -1;
+	base->sig.ev_signal_pair[1] = -1;
+	
+	base->evbase = NULL;
+	for (i = 0; eventops[i] && !base->evbase; i++) {
+		base->evsel = eventops[i];
+
+		base->evbase = base->evsel->init(base);
+	}
+
+	if (base->evbase == NULL)
+		event_errx(1, "%s: no event mechanism available", __func__);
+
+	if (evutil_getenv("EVENT_SHOW_METHOD")) 
+		event_msgx("libevent using: %s\n",
+			   base->evsel->name);
+
+	/* allocate a single active event queue */
+	event_base_priority_init(base, 1);
+
+	return (base);
+}
+
+void
+event_base_free(struct event_base *base)
+{
+	int i, n_deleted=0;
+	struct event *ev;
+
+	if (base == NULL && current_base)
+		base = current_base;
+	if (base == current_base)
+		current_base = NULL;
+
+	/* XXX(niels) - check for internal events first */
+	assert(base);
+	/* Delete all non-internal events. */
+	for (ev = TAILQ_FIRST(&base->eventqueue); ev; ) {
+		struct event *next = TAILQ_NEXT(ev, ev_next);
+		if (!(ev->ev_flags & EVLIST_INTERNAL)) {
+			event_del(ev);
+			++n_deleted;
+		}
+		ev = next;
+	}
+	while ((ev = min_heap_top(&base->timeheap)) != NULL) {
+		event_del(ev);
+		++n_deleted;
+	}
+
+	for (i = 0; i < base->nactivequeues; ++i) {
+		for (ev = TAILQ_FIRST(base->activequeues[i]); ev; ) {
+			struct event *next = TAILQ_NEXT(ev, ev_active_next);
+			if (!(ev->ev_flags & EVLIST_INTERNAL)) {
+				event_del(ev);
+				++n_deleted;
+			}
+			ev = next;
+		}
+	}
+
+	if (n_deleted)
+		event_debug(("%s: %d events were still set in base",
+			__func__, n_deleted));
+
+	if (base->evsel->dealloc != NULL)
+		base->evsel->dealloc(base, base->evbase);
+
+	for (i = 0; i < base->nactivequeues; ++i)
+		assert(TAILQ_EMPTY(base->activequeues[i]));
+
+	assert(min_heap_empty(&base->timeheap));
+	min_heap_dtor(&base->timeheap);
+
+	for (i = 0; i < base->nactivequeues; ++i)
+		free(base->activequeues[i]);
+	free(base->activequeues);
+
+	assert(TAILQ_EMPTY(&base->eventqueue));
+
+	free(base);
+}
+
+/* reinitialized the event base after a fork */
+int
+event_reinit(struct event_base *base)
+{
+	const struct eventop *evsel = base->evsel;
+	void *evbase = base->evbase;
+	int res = 0;
+	struct event *ev;
+
+#if 0
+	/* Right now, reinit always takes effect, since even if the
+	   backend doesn't require it, the signal socketpair code does.
+	 */
+	/* check if this event mechanism requires reinit */
+	if (!evsel->need_reinit)
+		return (0);
+#endif
+
+	/* prevent internal delete */
+	if (base->sig.ev_signal_added) {
+		/* we cannot call event_del here because the base has
+		 * not been reinitialized yet. */
+		event_queue_remove(base, &base->sig.ev_signal,
+		    EVLIST_INSERTED);
+		if (base->sig.ev_signal.ev_flags & EVLIST_ACTIVE)
+			event_queue_remove(base, &base->sig.ev_signal,
+			    EVLIST_ACTIVE);
+		base->sig.ev_signal_added = 0;
+	}
+
+	if (base->evsel->dealloc != NULL)
+		base->evsel->dealloc(base, base->evbase);
+	evbase = base->evbase = evsel->init(base);
+	if (base->evbase == NULL)
+		event_errx(1, "%s: could not reinitialize event mechanism",
+		    __func__);
+
+	TAILQ_FOREACH(ev, &base->eventqueue, ev_next) {
+		if (evsel->add(evbase, ev) == -1)
+			res = -1;
+	}
+
+	return (res);
+}
+
+int
+event_priority_init(int npriorities)
+{
+  return event_base_priority_init(current_base, npriorities);
+}
+
+int
+event_base_priority_init(struct event_base *base, int npriorities)
+{
+	int i;
+
+	if (base->event_count_active)
+		return (-1);
+
+	if (npriorities == base->nactivequeues)
+		return (0);
+
+	if (base->nactivequeues) {
+		for (i = 0; i < base->nactivequeues; ++i) {
+			free(base->activequeues[i]);
+		}
+		free(base->activequeues);
+	}
+
+	/* Allocate our priority queues */
+	base->nactivequeues = npriorities;
+	base->activequeues = (struct event_list **)
+	    calloc(base->nactivequeues, sizeof(struct event_list *));
+	if (base->activequeues == NULL)
+		event_err(1, "%s: calloc", __func__);
+
+	for (i = 0; i < base->nactivequeues; ++i) {
+		base->activequeues[i] = malloc(sizeof(struct event_list));
+		if (base->activequeues[i] == NULL)
+			event_err(1, "%s: malloc", __func__);
+		TAILQ_INIT(base->activequeues[i]);
+	}
+
+	return (0);
+}
+
+int
+event_haveevents(struct event_base *base)
+{
+	return (base->event_count > 0);
+}
+
+/*
+ * Active events are stored in priority queues.  Lower priorities are always
+ * process before higher priorities.  Low priority events can starve high
+ * priority ones.
+ */
+
+static void
+event_process_active(struct event_base *base)
+{
+	struct event *ev;
+	struct event_list *activeq = NULL;
+	int i;
+	short ncalls;
+
+	for (i = 0; i < base->nactivequeues; ++i) {
+		if (TAILQ_FIRST(base->activequeues[i]) != NULL) {
+			activeq = base->activequeues[i];
+			break;
+		}
+	}
+
+	assert(activeq != NULL);
+
+	for (ev = TAILQ_FIRST(activeq); ev; ev = TAILQ_FIRST(activeq)) {
+		if (ev->ev_events & EV_PERSIST)
+			event_queue_remove(base, ev, EVLIST_ACTIVE);
+		else
+			event_del(ev);
+		
+		/* Allows deletes to work */
+		ncalls = ev->ev_ncalls;
+		ev->ev_pncalls = &ncalls;
+		while (ncalls) {
+			ncalls--;
+			ev->ev_ncalls = ncalls;
+			(*ev->ev_callback)((int)ev->ev_fd, ev->ev_res, ev->ev_arg);
+			if (base->event_break)
+				return;
+		}
+	}
+}
+
+/*
+ * Wait continously for events.  We exit only if no events are left.
+ */
+
+int
+event_dispatch(void)
+{
+	return (event_loop(0));
+}
+
+int
+event_base_dispatch(struct event_base *event_base)
+{
+  return (event_base_loop(event_base, 0));
+}
+
+const char *
+event_base_get_method(struct event_base *base)
+{
+	assert(base);
+	return (base->evsel->name);
+}
+
+static void
+event_loopexit_cb(int fd, short what, void *arg)
+{
+	struct event_base *base = arg;
+	base->event_gotterm = 1;
+}
+
+/* not thread safe */
+int
+event_loopexit(const struct timeval *tv)
+{
+	return (event_once(-1, EV_TIMEOUT, event_loopexit_cb,
+		    current_base, tv));
+}
+
+int
+event_base_loopexit(struct event_base *event_base, const struct timeval *tv)
+{
+	return (event_base_once(event_base, -1, EV_TIMEOUT, event_loopexit_cb,
+		    event_base, tv));
+}
+
+/* not thread safe */
+int
+event_loopbreak(void)
+{
+	return (event_base_loopbreak(current_base));
+}
+
+int
+event_base_loopbreak(struct event_base *event_base)
+{
+	if (event_base == NULL)
+		return (-1);
+
+	event_base->event_break = 1;
+	return (0);
+}
+
+
+
+/* not thread safe */
+
+int
+event_loop(int flags)
+{
+	return event_base_loop(current_base, flags);
+}
+
+int
+event_base_loop(struct event_base *base, int flags)
+{
+	const struct eventop *evsel = base->evsel;
+	void *evbase = base->evbase;
+	struct timeval tv;
+	struct timeval *tv_p;
+	int res, done;
+
+	/* clear time cache */
+	base->tv_cache.tv_sec = 0;
+
+	if (base->sig.ev_signal_added)
+		evsignal_base = base;
+	done = 0;
+	while (!done) {
+		/* Terminate the loop if we have been asked to */
+		if (base->event_gotterm) {
+			base->event_gotterm = 0;
+			break;
+		}
+
+		if (base->event_break) {
+			base->event_break = 0;
+			break;
+		}
+
+		timeout_correct(base, &tv);
+
+		tv_p = &tv;
+		if (!base->event_count_active && !(flags & EVLOOP_NONBLOCK)) {
+			timeout_next(base, &tv_p);
+		} else {
+			/* 
+			 * if we have active events, we just poll new events
+			 * without waiting.
+			 */
+			evutil_timerclear(&tv);
+		}
+		
+		/* If we have no events, we just exit */
+		if (!event_haveevents(base)) {
+			event_debug(("%s: no events registered.", __func__));
+			return (1);
+		}
+
+		/* update last old time */
+		gettime(base, &base->event_tv);
+
+		/* clear time cache */
+		base->tv_cache.tv_sec = 0;
+
+		res = evsel->dispatch(base, evbase, tv_p);
+
+		if (res == -1)
+			return (-1);
+		gettime(base, &base->tv_cache);
+
+		timeout_process(base);
+
+		if (base->event_count_active) {
+			event_process_active(base);
+			if (!base->event_count_active && (flags & EVLOOP_ONCE))
+				done = 1;
+		} else if (flags & EVLOOP_NONBLOCK)
+			done = 1;
+	}
+
+	/* clear time cache */
+	base->tv_cache.tv_sec = 0;
+
+	event_debug(("%s: asked to terminate loop.", __func__));
+	return (0);
+}
+
+/* Sets up an event for processing once */
+
+struct event_once {
+	struct event ev;
+
+	void (*cb)(int, short, void *);
+	void *arg;
+};
+
+/* One-time callback, it deletes itself */
+
+static void
+event_once_cb(int fd, short events, void *arg)
+{
+	struct event_once *eonce = arg;
+
+	(*eonce->cb)(fd, events, eonce->arg);
+	free(eonce);
+}
+
+/* not threadsafe, event scheduled once. */
+int
+event_once(int fd, short events,
+    void (*callback)(int, short, void *), void *arg, const struct timeval *tv)
+{
+	return event_base_once(current_base, fd, events, callback, arg, tv);
+}
+
+/* Schedules an event once */
+int
+event_base_once(struct event_base *base, int fd, short events,
+    void (*callback)(int, short, void *), void *arg, const struct timeval *tv)
+{
+	struct event_once *eonce;
+	struct timeval etv;
+	int res;
+
+	/* We cannot support signals that just fire once */
+	if (events & EV_SIGNAL)
+		return (-1);
+
+	if ((eonce = calloc(1, sizeof(struct event_once))) == NULL)
+		return (-1);
+
+	eonce->cb = callback;
+	eonce->arg = arg;
+
+	if (events == EV_TIMEOUT) {
+		if (tv == NULL) {
+			evutil_timerclear(&etv);
+			tv = &etv;
+		}
+
+		evtimer_set(&eonce->ev, event_once_cb, eonce);
+	} else if (events & (EV_READ|EV_WRITE)) {
+		events &= EV_READ|EV_WRITE;
+
+		event_set(&eonce->ev, fd, events, event_once_cb, eonce);
+	} else {
+		/* Bad event combination */
+		free(eonce);
+		return (-1);
+	}
+
+	res = event_base_set(base, &eonce->ev);
+	if (res == 0)
+		res = event_add(&eonce->ev, tv);
+	if (res != 0) {
+		free(eonce);
+		return (res);
+	}
+
+	return (0);
+}
+
+void
+event_set(struct event *ev, int fd, short events,
+	  void (*callback)(int, short, void *), void *arg)
+{
+	/* Take the current base - caller needs to set the real base later */
+	ev->ev_base = current_base;
+
+	ev->ev_callback = callback;
+	ev->ev_arg = arg;
+	ev->ev_fd = fd;
+	ev->ev_events = events;
+	ev->ev_res = 0;
+	ev->ev_flags = EVLIST_INIT;
+	ev->ev_ncalls = 0;
+	ev->ev_pncalls = NULL;
+
+	min_heap_elem_init(ev);
+
+	/* by default, we put new events into the middle priority */
+	if(current_base)
+		ev->ev_pri = current_base->nactivequeues/2;
+}
+
+int
+event_base_set(struct event_base *base, struct event *ev)
+{
+	/* Only innocent events may be assigned to a different base */
+	if (ev->ev_flags != EVLIST_INIT)
+		return (-1);
+
+	ev->ev_base = base;
+	ev->ev_pri = base->nactivequeues/2;
+
+	return (0);
+}
+
+/*
+ * Set's the priority of an event - if an event is already scheduled
+ * changing the priority is going to fail.
+ */
+
+int
+event_priority_set(struct event *ev, int pri)
+{
+	if (ev->ev_flags & EVLIST_ACTIVE)
+		return (-1);
+	if (pri < 0 || pri >= ev->ev_base->nactivequeues)
+		return (-1);
+
+	ev->ev_pri = pri;
+
+	return (0);
+}
+
+/*
+ * Checks if a specific event is pending or scheduled.
+ */
+
+int
+event_pending(struct event *ev, short event, struct timeval *tv)
+{
+	struct timeval	now, res;
+	int flags = 0;
+
+	if (ev->ev_flags & EVLIST_INSERTED)
+		flags |= (ev->ev_events & (EV_READ|EV_WRITE|EV_SIGNAL));
+	if (ev->ev_flags & EVLIST_ACTIVE)
+		flags |= ev->ev_res;
+	if (ev->ev_flags & EVLIST_TIMEOUT)
+		flags |= EV_TIMEOUT;
+
+	event &= (EV_TIMEOUT|EV_READ|EV_WRITE|EV_SIGNAL);
+
+	/* See if there is a timeout that we should report */
+	if (tv != NULL && (flags & event & EV_TIMEOUT)) {
+		gettime(ev->ev_base, &now);
+		evutil_timersub(&ev->ev_timeout, &now, &res);
+		/* correctly remap to real time */
+		evutil_gettimeofday(&now, NULL);
+		evutil_timeradd(&now, &res, tv);
+	}
+
+	return (flags & event);
+}
+
+int
+event_add(struct event *ev, const struct timeval *tv)
+{
+	struct event_base *base = ev->ev_base;
+	const struct eventop *evsel = base->evsel;
+	void *evbase = base->evbase;
+	int res = 0;
+
+	event_debug((
+		 "event_add: event: %p, %s%s%scall %p",
+		 ev,
+		 ev->ev_events & EV_READ ? "EV_READ " : " ",
+		 ev->ev_events & EV_WRITE ? "EV_WRITE " : " ",
+		 tv ? "EV_TIMEOUT " : " ",
+		 ev->ev_callback));
+
+	assert(!(ev->ev_flags & ~EVLIST_ALL));
+
+	/*
+	 * prepare for timeout insertion further below, if we get a
+	 * failure on any step, we should not change any state.
+	 */
+	if (tv != NULL && !(ev->ev_flags & EVLIST_TIMEOUT)) {
+		if (min_heap_reserve(&base->timeheap,
+			1 + min_heap_size(&base->timeheap)) == -1)
+			return (-1);  /* ENOMEM == errno */
+	}
+
+	if ((ev->ev_events & (EV_READ|EV_WRITE|EV_SIGNAL)) &&
+	    !(ev->ev_flags & (EVLIST_INSERTED|EVLIST_ACTIVE))) {
+		res = evsel->add(evbase, ev);
+		if (res != -1)
+			event_queue_insert(base, ev, EVLIST_INSERTED);
+	}
+
+	/* 
+	 * we should change the timout state only if the previous event
+	 * addition succeeded.
+	 */
+	if (res != -1 && tv != NULL) {
+		struct timeval now;
+
+		/* 
+		 * we already reserved memory above for the case where we
+		 * are not replacing an exisiting timeout.
+		 */
+		if (ev->ev_flags & EVLIST_TIMEOUT)
+			event_queue_remove(base, ev, EVLIST_TIMEOUT);
+
+		/* Check if it is active due to a timeout.  Rescheduling
+		 * this timeout before the callback can be executed
+		 * removes it from the active list. */
+		if ((ev->ev_flags & EVLIST_ACTIVE) &&
+		    (ev->ev_res & EV_TIMEOUT)) {
+			/* See if we are just active executing this
+			 * event in a loop
+			 */
+			if (ev->ev_ncalls && ev->ev_pncalls) {
+				/* Abort loop */
+				*ev->ev_pncalls = 0;
+			}
+			
+			event_queue_remove(base, ev, EVLIST_ACTIVE);
+		}
+
+		gettime(base, &now);
+		evutil_timeradd(&now, tv, &ev->ev_timeout);
+
+		event_debug((
+			 "event_add: timeout in %ld seconds, call %p",
+			 tv->tv_sec, ev->ev_callback));
+
+		event_queue_insert(base, ev, EVLIST_TIMEOUT);
+	}
+
+	return (res);
+}
+
+int
+event_del(struct event *ev)
+{
+	struct event_base *base;
+
+	event_debug(("event_del: %p, callback %p",
+		 ev, ev->ev_callback));
+
+	/* An event without a base has not been added */
+	if (ev->ev_base == NULL)
+		return (-1);
+
+	base = ev->ev_base;
+
+	assert(!(ev->ev_flags & ~EVLIST_ALL));
+
+	/* See if we are just active executing this event in a loop */
+	if (ev->ev_ncalls && ev->ev_pncalls) {
+		/* Abort loop */
+		*ev->ev_pncalls = 0;
+	}
+
+	if (ev->ev_flags & EVLIST_TIMEOUT)
+		event_queue_remove(base, ev, EVLIST_TIMEOUT);
+
+	if (ev->ev_flags & EVLIST_ACTIVE)
+		event_queue_remove(base, ev, EVLIST_ACTIVE);
+
+	if (ev->ev_flags & EVLIST_INSERTED) {
+		event_queue_remove(base, ev, EVLIST_INSERTED);
+		return (base->evsel->del(base->evbase, ev));
+	}
+
+	return (0);
+}
+
+void
+event_active(struct event *ev, int res, short ncalls)
+{
+	/* We get different kinds of events, add them together */
+	if (ev->ev_flags & EVLIST_ACTIVE) {
+		ev->ev_res |= res;
+		return;
+	}
+
+	ev->ev_res = res;
+	ev->ev_ncalls = ncalls;
+	ev->ev_pncalls = NULL;
+	event_queue_insert(ev->ev_base, ev, EVLIST_ACTIVE);
+}
+
+static int
+timeout_next(struct event_base *base, struct timeval **tv_p)
+{
+	struct timeval now;
+	struct event *ev;
+	struct timeval *tv = *tv_p;
+
+	if ((ev = min_heap_top(&base->timeheap)) == NULL) {
+		/* if no time-based events are active wait for I/O */
+		*tv_p = NULL;
+		return (0);
+	}
+
+	if (gettime(base, &now) == -1)
+		return (-1);
+
+	if (evutil_timercmp(&ev->ev_timeout, &now, <=)) {
+		evutil_timerclear(tv);
+		return (0);
+	}
+
+	evutil_timersub(&ev->ev_timeout, &now, tv);
+
+	assert(tv->tv_sec >= 0);
+	assert(tv->tv_usec >= 0);
+
+	event_debug(("timeout_next: in %ld seconds", tv->tv_sec));
+	return (0);
+}
+
+/*
+ * Determines if the time is running backwards by comparing the current
+ * time against the last time we checked.  Not needed when using clock
+ * monotonic.
+ */
+
+static void
+timeout_correct(struct event_base *base, struct timeval *tv)
+{
+	struct event **pev;
+	unsigned int size;
+	struct timeval off;
+
+	if (use_monotonic)
+		return;
+
+	/* Check if time is running backwards */
+	gettime(base, tv);
+	if (evutil_timercmp(tv, &base->event_tv, >=)) {
+		base->event_tv = *tv;
+		return;
+	}
+
+	event_debug(("%s: time is running backwards, corrected",
+		    __func__));
+	evutil_timersub(&base->event_tv, tv, &off);
+
+	/*
+	 * We can modify the key element of the node without destroying
+	 * the key, beause we apply it to all in the right order.
+	 */
+	pev = base->timeheap.p;
+	size = base->timeheap.n;
+	for (; size-- > 0; ++pev) {
+		struct timeval *ev_tv = &(**pev).ev_timeout;
+		evutil_timersub(ev_tv, &off, ev_tv);
+	}
+	/* Now remember what the new time turned out to be. */
+	base->event_tv = *tv;
+}
+
+void
+timeout_process(struct event_base *base)
+{
+	struct timeval now;
+	struct event *ev;
+
+	if (min_heap_empty(&base->timeheap))
+		return;
+
+	gettime(base, &now);
+
+	while ((ev = min_heap_top(&base->timeheap))) {
+		if (evutil_timercmp(&ev->ev_timeout, &now, >))
+			break;
+
+		/* delete this event from the I/O queues */
+		event_del(ev);
+
+		event_debug(("timeout_process: call %p",
+			 ev->ev_callback));
+		event_active(ev, EV_TIMEOUT, 1);
+	}
+}
+
+void
+event_queue_remove(struct event_base *base, struct event *ev, int queue)
+{
+	if (!(ev->ev_flags & queue))
+		event_errx(1, "%s: %p(fd %d) not on queue %x", __func__,
+			   ev, ev->ev_fd, queue);
+
+	if (~ev->ev_flags & EVLIST_INTERNAL)
+		base->event_count--;
+
+	ev->ev_flags &= ~queue;
+	switch (queue) {
+	case EVLIST_INSERTED:
+		TAILQ_REMOVE(&base->eventqueue, ev, ev_next);
+		break;
+	case EVLIST_ACTIVE:
+		base->event_count_active--;
+		TAILQ_REMOVE(base->activequeues[ev->ev_pri],
+		    ev, ev_active_next);
+		break;
+	case EVLIST_TIMEOUT:
+		min_heap_erase(&base->timeheap, ev);
+		break;
+	default:
+		event_errx(1, "%s: unknown queue %x", __func__, queue);
+	}
+}
+
+void
+event_queue_insert(struct event_base *base, struct event *ev, int queue)
+{
+	if (ev->ev_flags & queue) {
+		/* Double insertion is possible for active events */
+		if (queue & EVLIST_ACTIVE)
+			return;
+
+		event_errx(1, "%s: %p(fd %d) already on queue %x", __func__,
+			   ev, ev->ev_fd, queue);
+	}
+
+	if (~ev->ev_flags & EVLIST_INTERNAL)
+		base->event_count++;
+
+	ev->ev_flags |= queue;
+	switch (queue) {
+	case EVLIST_INSERTED:
+		TAILQ_INSERT_TAIL(&base->eventqueue, ev, ev_next);
+		break;
+	case EVLIST_ACTIVE:
+		base->event_count_active++;
+		TAILQ_INSERT_TAIL(base->activequeues[ev->ev_pri],
+		    ev,ev_active_next);
+		break;
+	case EVLIST_TIMEOUT: {
+		min_heap_push(&base->timeheap, ev);
+		break;
+	}
+	default:
+		event_errx(1, "%s: unknown queue %x", __func__, queue);
+	}
+}
+
+/* Functions for debugging */
+
+const char *
+event_get_version(void)
+{
+	return (VERSION);
+}
+
+/* 
+ * No thread-safe interface needed - the information should be the same
+ * for all threads.
+ */
+
+const char *
+event_get_method(void)
+{
+	return (current_base->evsel->name);
+}
diff --git a/base/third_party/libevent/event.h b/base/third_party/libevent/event.h
new file mode 100644
index 0000000..f0887b9
--- /dev/null
+++ b/base/third_party/libevent/event.h
@@ -0,0 +1,1212 @@
+/*
+ * Copyright (c) 2000-2007 Niels Provos <provos@citi.umich.edu>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ *    derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef _EVENT_H_
+#define _EVENT_H_
+
+/** @mainpage
+
+  @section intro Introduction
+
+  libevent is an event notification library for developing scalable network
+  servers.  The libevent API provides a mechanism to execute a callback
+  function when a specific event occurs on a file descriptor or after a
+  timeout has been reached. Furthermore, libevent also support callbacks due
+  to signals or regular timeouts.
+
+  libevent is meant to replace the event loop found in event driven network
+  servers. An application just needs to call event_dispatch() and then add or
+  remove events dynamically without having to change the event loop.
+
+  Currently, libevent supports /dev/poll, kqueue(2), select(2), poll(2) and
+  epoll(4). It also has experimental support for real-time signals. The
+  internal event mechanism is completely independent of the exposed event API,
+  and a simple update of libevent can provide new functionality without having
+  to redesign the applications. As a result, Libevent allows for portable
+  application development and provides the most scalable event notification
+  mechanism available on an operating system. Libevent can also be used for
+  multi-threaded aplications; see Steven Grimm's explanation. Libevent should
+  compile on Linux, *BSD, Mac OS X, Solaris and Windows.
+
+  @section usage Standard usage
+
+  Every program that uses libevent must include the <event.h> header, and pass
+  the -levent flag to the linker.  Before using any of the functions in the
+  library, you must call event_init() or event_base_new() to perform one-time
+  initialization of the libevent library.
+
+  @section event Event notification
+
+  For each file descriptor that you wish to monitor, you must declare an event
+  structure and call event_set() to initialize the members of the structure.
+  To enable notification, you add the structure to the list of monitored
+  events by calling event_add().  The event structure must remain allocated as
+  long as it is active, so it should be allocated on the heap. Finally, you
+  call event_dispatch() to loop and dispatch events.
+
+  @section bufferevent I/O Buffers
+
+  libevent provides an abstraction on top of the regular event callbacks. This
+  abstraction is called a buffered event. A buffered event provides input and
+  output buffers that get filled and drained automatically. The user of a
+  buffered event no longer deals directly with the I/O, but instead is reading
+  from input and writing to output buffers.
+
+  Once initialized via bufferevent_new(), the bufferevent structure can be
+  used repeatedly with bufferevent_enable() and bufferevent_disable().
+  Instead of reading and writing directly to a socket, you would call
+  bufferevent_read() and bufferevent_write().
+
+  When read enabled the bufferevent will try to read from the file descriptor
+  and call the read callback. The write callback is executed whenever the
+  output buffer is drained below the write low watermark, which is 0 by
+  default.
+
+  @section timers Timers
+
+  libevent can also be used to create timers that invoke a callback after a
+  certain amount of time has expired. The evtimer_set() function prepares an
+  event struct to be used as a timer. To activate the timer, call
+  evtimer_add(). Timers can be deactivated by calling evtimer_del().
+
+  @section timeouts Timeouts
+
+  In addition to simple timers, libevent can assign timeout events to file
+  descriptors that are triggered whenever a certain amount of time has passed
+  with no activity on a file descriptor.  The timeout_set() function
+  initializes an event struct for use as a timeout. Once initialized, the
+  event must be activated by using timeout_add().  To cancel the timeout, call
+  timeout_del().
+
+  @section evdns Asynchronous DNS resolution
+
+  libevent provides an asynchronous DNS resolver that should be used instead
+  of the standard DNS resolver functions.  These functions can be imported by
+  including the <evdns.h> header in your program. Before using any of the
+  resolver functions, you must call evdns_init() to initialize the library. To
+  convert a hostname to an IP address, you call the evdns_resolve_ipv4()
+  function.  To perform a reverse lookup, you would call the
+  evdns_resolve_reverse() function.  All of these functions use callbacks to
+  avoid blocking while the lookup is performed.
+
+  @section evhttp Event-driven HTTP servers
+
+  libevent provides a very simple event-driven HTTP server that can be
+  embedded in your program and used to service HTTP requests.
+
+  To use this capability, you need to include the <evhttp.h> header in your
+  program.  You create the server by calling evhttp_new(). Add addresses and
+  ports to listen on with evhttp_bind_socket(). You then register one or more
+  callbacks to handle incoming requests.  Each URI can be assigned a callback
+  via the evhttp_set_cb() function.  A generic callback function can also be
+  registered via evhttp_set_gencb(); this callback will be invoked if no other
+  callbacks have been registered for a given URI.
+
+  @section evrpc A framework for RPC servers and clients
+ 
+  libevents provides a framework for creating RPC servers and clients.  It
+  takes care of marshaling and unmarshaling all data structures.
+
+  @section api API Reference
+
+  To browse the complete documentation of the libevent API, click on any of
+  the following links.
+
+  event.h
+  The primary libevent header
+
+  evdns.h
+  Asynchronous DNS resolution
+
+  evhttp.h
+  An embedded libevent-based HTTP server
+
+  evrpc.h
+  A framework for creating RPC servers and clients
+
+ */
+
+/** @file event.h
+
+  A library for writing event-driven network servers
+
+ */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include "event-config.h"
+#ifdef _EVENT_HAVE_SYS_TYPES_H
+#include <sys/types.h>
+#endif
+#ifdef _EVENT_HAVE_SYS_TIME_H
+#include <sys/time.h>
+#endif
+#ifdef _EVENT_HAVE_STDINT_H
+#include <stdint.h>
+#endif
+#include <stdarg.h>
+
+/* For int types. */
+#include "evutil.h"
+
+#ifdef WIN32
+#define WIN32_LEAN_AND_MEAN
+#include <windows.h>
+#undef WIN32_LEAN_AND_MEAN
+typedef unsigned char u_char;
+typedef unsigned short u_short;
+#endif
+
+#define EVLIST_TIMEOUT	0x01
+#define EVLIST_INSERTED	0x02
+#define EVLIST_SIGNAL	0x04
+#define EVLIST_ACTIVE	0x08
+#define EVLIST_INTERNAL	0x10
+#define EVLIST_INIT	0x80
+
+/* EVLIST_X_ Private space: 0x1000-0xf000 */
+#define EVLIST_ALL	(0xf000 | 0x9f)
+
+#define EV_TIMEOUT	0x01
+#define EV_READ		0x02
+#define EV_WRITE	0x04
+#define EV_SIGNAL	0x08
+#define EV_PERSIST	0x10	/* Persistant event */
+
+/* Fix so that ppl dont have to run with <sys/queue.h> */
+#ifndef TAILQ_ENTRY
+#define _EVENT_DEFINED_TQENTRY
+#define TAILQ_ENTRY(type)						\
+struct {								\
+	struct type *tqe_next;	/* next element */			\
+	struct type **tqe_prev;	/* address of previous next element */	\
+}
+#endif /* !TAILQ_ENTRY */
+
+struct event_base;
+#ifndef EVENT_NO_STRUCT
+struct event {
+	TAILQ_ENTRY (event) ev_next;
+	TAILQ_ENTRY (event) ev_active_next;
+	TAILQ_ENTRY (event) ev_signal_next;
+	unsigned int min_heap_idx;	/* for managing timeouts */
+
+	struct event_base *ev_base;
+
+	int ev_fd;
+	short ev_events;
+	short ev_ncalls;
+	short *ev_pncalls;	/* Allows deletes in callback */
+
+	struct timeval ev_timeout;
+
+	int ev_pri;		/* smaller numbers are higher priority */
+
+	void (*ev_callback)(int, short, void *arg);
+	void *ev_arg;
+
+	int ev_res;		/* result passed to event callback */
+	int ev_flags;
+};
+#else
+struct event;
+#endif
+
+#define EVENT_SIGNAL(ev)	(int)(ev)->ev_fd
+#define EVENT_FD(ev)		(int)(ev)->ev_fd
+
+/*
+ * Key-Value pairs.  Can be used for HTTP headers but also for
+ * query argument parsing.
+ */
+struct evkeyval {
+	TAILQ_ENTRY(evkeyval) next;
+
+	char *key;
+	char *value;
+};
+
+#ifdef _EVENT_DEFINED_TQENTRY
+#undef TAILQ_ENTRY
+struct event_list;
+struct evkeyvalq;
+#undef _EVENT_DEFINED_TQENTRY
+#else
+TAILQ_HEAD (event_list, event);
+TAILQ_HEAD (evkeyvalq, evkeyval);
+#endif /* _EVENT_DEFINED_TQENTRY */
+
+/**
+  Initialize the event API.
+
+  Use event_base_new() to initialize a new event base, but does not set
+  the current_base global.   If using only event_base_new(), each event
+  added must have an event base set with event_base_set()
+
+  @see event_base_set(), event_base_free(), event_init()
+ */
+struct event_base *event_base_new(void);
+
+/**
+  Initialize the event API.
+
+  The event API needs to be initialized with event_init() before it can be
+  used.  Sets the current_base global representing the default base for
+  events that have no base associated with them.
+
+  @see event_base_set(), event_base_new()
+ */
+struct event_base *event_init(void);
+
+/**
+  Reinitialized the event base after a fork
+
+  Some event mechanisms do not survive across fork.   The event base needs
+  to be reinitialized with the event_reinit() function.
+
+  @param base the event base that needs to be re-initialized
+  @return 0 if successful, or -1 if some events could not be re-added.
+  @see event_base_new(), event_init()
+*/
+int event_reinit(struct event_base *base);
+
+/**
+  Loop to process events.
+
+  In order to process events, an application needs to call
+  event_dispatch().  This function only returns on error, and should
+  replace the event core of the application program.
+
+  @see event_base_dispatch()
+ */
+int event_dispatch(void);
+
+
+/**
+  Threadsafe event dispatching loop.
+
+  @param eb the event_base structure returned by event_init()
+  @see event_init(), event_dispatch()
+ */
+int event_base_dispatch(struct event_base *);
+
+
+/**
+ Get the kernel event notification mechanism used by libevent.
+ 
+ @param eb the event_base structure returned by event_base_new()
+ @return a string identifying the kernel event mechanism (kqueue, epoll, etc.)
+ */
+const char *event_base_get_method(struct event_base *);
+        
+        
+/**
+  Deallocate all memory associated with an event_base, and free the base.
+
+  Note that this function will not close any fds or free any memory passed
+  to event_set as the argument to callback.
+
+  @param eb an event_base to be freed
+ */
+void event_base_free(struct event_base *);
+
+
+#define _EVENT_LOG_DEBUG 0
+#define _EVENT_LOG_MSG   1
+#define _EVENT_LOG_WARN  2
+#define _EVENT_LOG_ERR   3
+typedef void (*event_log_cb)(int severity, const char *msg);
+/**
+  Redirect libevent's log messages.
+
+  @param cb a function taking two arguments: an integer severity between
+     _EVENT_LOG_DEBUG and _EVENT_LOG_ERR, and a string.  If cb is NULL,
+	 then the default log is used.
+  */
+void event_set_log_callback(event_log_cb cb);
+
+/**
+  Associate a different event base with an event.
+
+  @param eb the event base
+  @param ev the event
+ */
+int event_base_set(struct event_base *, struct event *);
+
+/**
+ event_loop() flags
+ */
+/*@{*/
+#define EVLOOP_ONCE	0x01	/**< Block at most once. */
+#define EVLOOP_NONBLOCK	0x02	/**< Do not block. */
+/*@}*/
+
+/**
+  Handle events.
+
+  This is a more flexible version of event_dispatch().
+
+  @param flags any combination of EVLOOP_ONCE | EVLOOP_NONBLOCK
+  @return 0 if successful, -1 if an error occurred, or 1 if no events were
+    registered.
+  @see event_loopexit(), event_base_loop()
+*/
+int event_loop(int);
+
+/**
+  Handle events (threadsafe version).
+
+  This is a more flexible version of event_base_dispatch().
+
+  @param eb the event_base structure returned by event_init()
+  @param flags any combination of EVLOOP_ONCE | EVLOOP_NONBLOCK
+  @return 0 if successful, -1 if an error occurred, or 1 if no events were
+    registered.
+  @see event_loopexit(), event_base_loop()
+  */
+int event_base_loop(struct event_base *, int);
+
+/**
+  Exit the event loop after the specified time.
+
+  The next event_loop() iteration after the given timer expires will
+  complete normally (handling all queued events) then exit without
+  blocking for events again.
+
+  Subsequent invocations of event_loop() will proceed normally.
+
+  @param tv the amount of time after which the loop should terminate.
+  @return 0 if successful, or -1 if an error occurred
+  @see event_loop(), event_base_loop(), event_base_loopexit()
+  */
+int event_loopexit(const struct timeval *);
+
+
+/**
+  Exit the event loop after the specified time (threadsafe variant).
+
+  The next event_base_loop() iteration after the given timer expires will
+  complete normally (handling all queued events) then exit without
+  blocking for events again.
+
+  Subsequent invocations of event_base_loop() will proceed normally.
+
+  @param eb the event_base structure returned by event_init()
+  @param tv the amount of time after which the loop should terminate.
+  @return 0 if successful, or -1 if an error occurred
+  @see event_loopexit()
+ */
+int event_base_loopexit(struct event_base *, const struct timeval *);
+
+/**
+  Abort the active event_loop() immediately.
+
+  event_loop() will abort the loop after the next event is completed;
+  event_loopbreak() is typically invoked from this event's callback.
+  This behavior is analogous to the "break;" statement.
+
+  Subsequent invocations of event_loop() will proceed normally.
+
+  @return 0 if successful, or -1 if an error occurred
+  @see event_base_loopbreak(), event_loopexit()
+ */
+int event_loopbreak(void);
+
+/**
+  Abort the active event_base_loop() immediately.
+
+  event_base_loop() will abort the loop after the next event is completed;
+  event_base_loopbreak() is typically invoked from this event's callback.
+  This behavior is analogous to the "break;" statement.
+
+  Subsequent invocations of event_loop() will proceed normally.
+
+  @param eb the event_base structure returned by event_init()
+  @return 0 if successful, or -1 if an error occurred
+  @see event_base_loopexit
+ */
+int event_base_loopbreak(struct event_base *);
+
+
+/**
+  Add a timer event.
+
+  @param ev the event struct
+  @param tv timeval struct
+ */
+#define evtimer_add(ev, tv)		event_add(ev, tv)
+
+
+/**
+  Define a timer event.
+
+  @param ev event struct to be modified
+  @param cb callback function
+  @param arg argument that will be passed to the callback function
+ */
+#define evtimer_set(ev, cb, arg)	event_set(ev, -1, 0, cb, arg)
+
+
+/**
+ * Delete a timer event.
+ *
+ * @param ev the event struct to be disabled
+ */
+#define evtimer_del(ev)			event_del(ev)
+#define evtimer_pending(ev, tv)		event_pending(ev, EV_TIMEOUT, tv)
+#define evtimer_initialized(ev)		((ev)->ev_flags & EVLIST_INIT)
+
+/**
+ * Add a timeout event.
+ *
+ * @param ev the event struct to be disabled
+ * @param tv the timeout value, in seconds
+ */
+#define timeout_add(ev, tv)		event_add(ev, tv)
+
+
+/**
+ * Define a timeout event.
+ *
+ * @param ev the event struct to be defined
+ * @param cb the callback to be invoked when the timeout expires
+ * @param arg the argument to be passed to the callback
+ */
+#define timeout_set(ev, cb, arg)	event_set(ev, -1, 0, cb, arg)
+
+
+/**
+ * Disable a timeout event.
+ *
+ * @param ev the timeout event to be disabled
+ */
+#define timeout_del(ev)			event_del(ev)
+
+#define timeout_pending(ev, tv)		event_pending(ev, EV_TIMEOUT, tv)
+#define timeout_initialized(ev)		((ev)->ev_flags & EVLIST_INIT)
+
+#define signal_add(ev, tv)		event_add(ev, tv)
+#define signal_set(ev, x, cb, arg)	\
+	event_set(ev, x, EV_SIGNAL|EV_PERSIST, cb, arg)
+#define signal_del(ev)			event_del(ev)
+#define signal_pending(ev, tv)		event_pending(ev, EV_SIGNAL, tv)
+#define signal_initialized(ev)		((ev)->ev_flags & EVLIST_INIT)
+
+/**
+  Prepare an event structure to be added.
+
+  The function event_set() prepares the event structure ev to be used in
+  future calls to event_add() and event_del().  The event will be prepared to
+  call the function specified by the fn argument with an int argument
+  indicating the file descriptor, a short argument indicating the type of
+  event, and a void * argument given in the arg argument.  The fd indicates
+  the file descriptor that should be monitored for events.  The events can be
+  either EV_READ, EV_WRITE, or both.  Indicating that an application can read
+  or write from the file descriptor respectively without blocking.
+
+  The function fn will be called with the file descriptor that triggered the
+  event and the type of event which will be either EV_TIMEOUT, EV_SIGNAL,
+  EV_READ, or EV_WRITE.  The additional flag EV_PERSIST makes an event_add()
+  persistent until event_del() has been called.
+
+  @param ev an event struct to be modified
+  @param fd the file descriptor to be monitored
+  @param event desired events to monitor; can be EV_READ and/or EV_WRITE
+  @param fn callback function to be invoked when the event occurs
+  @param arg an argument to be passed to the callback function
+
+  @see event_add(), event_del(), event_once()
+
+ */
+void event_set(struct event *, int, short, void (*)(int, short, void *), void *);
+
+/**
+  Schedule a one-time event to occur.
+
+  The function event_once() is similar to event_set().  However, it schedules
+  a callback to be called exactly once and does not require the caller to
+  prepare an event structure.
+
+  @param fd a file descriptor to monitor
+  @param events event(s) to monitor; can be any of EV_TIMEOUT | EV_READ |
+         EV_WRITE
+  @param callback callback function to be invoked when the event occurs
+  @param arg an argument to be passed to the callback function
+  @param timeout the maximum amount of time to wait for the event, or NULL
+         to wait forever
+  @return 0 if successful, or -1 if an error occurred
+  @see event_set()
+
+ */
+int event_once(int, short, void (*)(int, short, void *), void *,
+    const struct timeval *);
+
+
+/**
+  Schedule a one-time event (threadsafe variant)
+
+  The function event_base_once() is similar to event_set().  However, it
+  schedules a callback to be called exactly once and does not require the
+  caller to prepare an event structure.
+
+  @param base an event_base returned by event_init()
+  @param fd a file descriptor to monitor
+  @param events event(s) to monitor; can be any of EV_TIMEOUT | EV_READ |
+         EV_WRITE
+  @param callback callback function to be invoked when the event occurs
+  @param arg an argument to be passed to the callback function
+  @param timeout the maximum amount of time to wait for the event, or NULL
+         to wait forever
+  @return 0 if successful, or -1 if an error occurred
+  @see event_once()
+ */
+int event_base_once(struct event_base *base, int fd, short events,
+    void (*callback)(int, short, void *), void *arg,
+    const struct timeval *timeout);
+
+
+/**
+  Add an event to the set of monitored events.
+
+  The function event_add() schedules the execution of the ev event when the
+  event specified in event_set() occurs or in at least the time specified in
+  the tv.  If tv is NULL, no timeout occurs and the function will only be
+  called if a matching event occurs on the file descriptor.  The event in the
+  ev argument must be already initialized by event_set() and may not be used
+  in calls to event_set() until it has timed out or been removed with
+  event_del().  If the event in the ev argument already has a scheduled
+  timeout, the old timeout will be replaced by the new one.
+
+  @param ev an event struct initialized via event_set()
+  @param timeout the maximum amount of time to wait for the event, or NULL
+         to wait forever
+  @return 0 if successful, or -1 if an error occurred
+  @see event_del(), event_set()
+  */
+int event_add(struct event *ev, const struct timeval *timeout);
+
+
+/**
+  Remove an event from the set of monitored events.
+
+  The function event_del() will cancel the event in the argument ev.  If the
+  event has already executed or has never been added the call will have no
+  effect.
+
+  @param ev an event struct to be removed from the working set
+  @return 0 if successful, or -1 if an error occurred
+  @see event_add()
+ */
+int event_del(struct event *);
+
+void event_active(struct event *, int, short);
+
+
+/**
+  Checks if a specific event is pending or scheduled.
+
+  @param ev an event struct previously passed to event_add()
+  @param event the requested event type; any of EV_TIMEOUT|EV_READ|
+         EV_WRITE|EV_SIGNAL
+  @param tv an alternate timeout (FIXME - is this true?)
+
+  @return 1 if the event is pending, or 0 if the event has not occurred
+
+ */
+int event_pending(struct event *ev, short event, struct timeval *tv);
+
+
+/**
+  Test if an event structure has been initialized.
+
+  The event_initialized() macro can be used to check if an event has been
+  initialized.
+
+  @param ev an event structure to be tested
+  @return 1 if the structure has been initialized, or 0 if it has not been
+          initialized
+ */
+#ifdef WIN32
+#define event_initialized(ev)		((ev)->ev_flags & EVLIST_INIT && (ev)->ev_fd != (int)INVALID_HANDLE_VALUE)
+#else
+#define event_initialized(ev)		((ev)->ev_flags & EVLIST_INIT)
+#endif
+
+
+/**
+  Get the libevent version number.
+
+  @return a string containing the version number of libevent
+ */
+const char *event_get_version(void);
+
+
+/**
+  Get the kernel event notification mechanism used by libevent.
+
+  @return a string identifying the kernel event mechanism (kqueue, epoll, etc.)
+ */
+const char *event_get_method(void);
+
+
+/**
+  Set the number of different event priorities.
+
+  By default libevent schedules all active events with the same priority.
+  However, some time it is desirable to process some events with a higher
+  priority than others.  For that reason, libevent supports strict priority
+  queues.  Active events with a lower priority are always processed before
+  events with a higher priority.
+
+  The number of different priorities can be set initially with the
+  event_priority_init() function.  This function should be called before the
+  first call to event_dispatch().  The event_priority_set() function can be
+  used to assign a priority to an event.  By default, libevent assigns the
+  middle priority to all events unless their priority is explicitly set.
+
+  @param npriorities the maximum number of priorities
+  @return 0 if successful, or -1 if an error occurred
+  @see event_base_priority_init(), event_priority_set()
+
+ */
+int	event_priority_init(int);
+
+
+/**
+  Set the number of different event priorities (threadsafe variant).
+
+  See the description of event_priority_init() for more information.
+
+  @param eb the event_base structure returned by event_init()
+  @param npriorities the maximum number of priorities
+  @return 0 if successful, or -1 if an error occurred
+  @see event_priority_init(), event_priority_set()
+ */
+int	event_base_priority_init(struct event_base *, int);
+
+
+/**
+  Assign a priority to an event.
+
+  @param ev an event struct
+  @param priority the new priority to be assigned
+  @return 0 if successful, or -1 if an error occurred
+  @see event_priority_init()
+  */
+int	event_priority_set(struct event *, int);
+
+
+/* These functions deal with buffering input and output */
+
+struct evbuffer {
+	u_char *buffer;
+	u_char *orig_buffer;
+
+	size_t misalign;
+	size_t totallen;
+	size_t off;
+
+	void (*cb)(struct evbuffer *, size_t, size_t, void *);
+	void *cbarg;
+};
+
+/* Just for error reporting - use other constants otherwise */
+#define EVBUFFER_READ		0x01
+#define EVBUFFER_WRITE		0x02
+#define EVBUFFER_EOF		0x10
+#define EVBUFFER_ERROR		0x20
+#define EVBUFFER_TIMEOUT	0x40
+
+struct bufferevent;
+typedef void (*evbuffercb)(struct bufferevent *, void *);
+typedef void (*everrorcb)(struct bufferevent *, short what, void *);
+
+struct event_watermark {
+	size_t low;
+	size_t high;
+};
+
+#ifndef EVENT_NO_STRUCT
+struct bufferevent {
+	struct event_base *ev_base;
+
+	struct event ev_read;
+	struct event ev_write;
+
+	struct evbuffer *input;
+	struct evbuffer *output;
+
+	struct event_watermark wm_read;
+	struct event_watermark wm_write;
+
+	evbuffercb readcb;
+	evbuffercb writecb;
+	everrorcb errorcb;
+	void *cbarg;
+
+	int timeout_read;	/* in seconds */
+	int timeout_write;	/* in seconds */
+
+	short enabled;	/* events that are currently enabled */
+};
+#endif
+
+/**
+  Create a new bufferevent.
+
+  libevent provides an abstraction on top of the regular event callbacks.
+  This abstraction is called a buffered event.  A buffered event provides
+  input and output buffers that get filled and drained automatically.  The
+  user of a buffered event no longer deals directly with the I/O, but
+  instead is reading from input and writing to output buffers.
+
+  Once initialized, the bufferevent structure can be used repeatedly with
+  bufferevent_enable() and bufferevent_disable().
+
+  When read enabled the bufferevent will try to read from the file descriptor
+  and call the read callback.  The write callback is executed whenever the
+  output buffer is drained below the write low watermark, which is 0 by
+  default.
+
+  If multiple bases are in use, bufferevent_base_set() must be called before
+  enabling the bufferevent for the first time.
+
+  @param fd the file descriptor from which data is read and written to.
+  		This file descriptor is not allowed to be a pipe(2).
+  @param readcb callback to invoke when there is data to be read, or NULL if
+         no callback is desired
+  @param writecb callback to invoke when the file descriptor is ready for
+         writing, or NULL if no callback is desired
+  @param errorcb callback to invoke when there is an error on the file
+         descriptor
+  @param cbarg an argument that will be supplied to each of the callbacks
+         (readcb, writecb, and errorcb)
+  @return a pointer to a newly allocated bufferevent struct, or NULL if an
+          error occurred
+  @see bufferevent_base_set(), bufferevent_free()
+  */
+struct bufferevent *bufferevent_new(int fd,
+    evbuffercb readcb, evbuffercb writecb, everrorcb errorcb, void *cbarg);
+
+
+/**
+  Assign a bufferevent to a specific event_base.
+
+  @param base an event_base returned by event_init()
+  @param bufev a bufferevent struct returned by bufferevent_new()
+  @return 0 if successful, or -1 if an error occurred
+  @see bufferevent_new()
+ */
+int bufferevent_base_set(struct event_base *base, struct bufferevent *bufev);
+
+
+/**
+  Assign a priority to a bufferevent.
+
+  @param bufev a bufferevent struct
+  @param pri the priority to be assigned
+  @return 0 if successful, or -1 if an error occurred
+  */
+int bufferevent_priority_set(struct bufferevent *bufev, int pri);
+
+
+/**
+  Deallocate the storage associated with a bufferevent structure.
+
+  @param bufev the bufferevent structure to be freed.
+  */
+void bufferevent_free(struct bufferevent *bufev);
+
+
+/**
+  Changes the callbacks for a bufferevent.
+
+  @param bufev the bufferevent object for which to change callbacks
+  @param readcb callback to invoke when there is data to be read, or NULL if
+         no callback is desired
+  @param writecb callback to invoke when the file descriptor is ready for
+         writing, or NULL if no callback is desired
+  @param errorcb callback to invoke when there is an error on the file
+         descriptor
+  @param cbarg an argument that will be supplied to each of the callbacks
+         (readcb, writecb, and errorcb)
+  @see bufferevent_new()
+  */
+void bufferevent_setcb(struct bufferevent *bufev,
+    evbuffercb readcb, evbuffercb writecb, everrorcb errorcb, void *cbarg);
+
+/**
+  Changes the file descriptor on which the bufferevent operates.
+
+  @param bufev the bufferevent object for which to change the file descriptor
+  @param fd the file descriptor to operate on
+*/
+void bufferevent_setfd(struct bufferevent *bufev, int fd);
+
+/**
+  Write data to a bufferevent buffer.
+
+  The bufferevent_write() function can be used to write data to the file
+  descriptor.  The data is appended to the output buffer and written to the
+  descriptor automatically as it becomes available for writing.
+
+  @param bufev the bufferevent to be written to
+  @param data a pointer to the data to be written
+  @param size the length of the data, in bytes
+  @return 0 if successful, or -1 if an error occurred
+  @see bufferevent_write_buffer()
+  */
+int bufferevent_write(struct bufferevent *bufev,
+    const void *data, size_t size);
+
+
+/**
+  Write data from an evbuffer to a bufferevent buffer.  The evbuffer is
+  being drained as a result.
+
+  @param bufev the bufferevent to be written to
+  @param buf the evbuffer to be written
+  @return 0 if successful, or -1 if an error occurred
+  @see bufferevent_write()
+ */
+int bufferevent_write_buffer(struct bufferevent *bufev, struct evbuffer *buf);
+
+
+/**
+  Read data from a bufferevent buffer.
+
+  The bufferevent_read() function is used to read data from the input buffer.
+
+  @param bufev the bufferevent to be read from
+  @param data pointer to a buffer that will store the data
+  @param size the size of the data buffer, in bytes
+  @return the amount of data read, in bytes.
+ */
+size_t bufferevent_read(struct bufferevent *bufev, void *data, size_t size);
+
+/**
+  Enable a bufferevent.
+
+  @param bufev the bufferevent to be enabled
+  @param event any combination of EV_READ | EV_WRITE.
+  @return 0 if successful, or -1 if an error occurred
+  @see bufferevent_disable()
+ */
+int bufferevent_enable(struct bufferevent *bufev, short event);
+
+
+/**
+  Disable a bufferevent.
+
+  @param bufev the bufferevent to be disabled
+  @param event any combination of EV_READ | EV_WRITE.
+  @return 0 if successful, or -1 if an error occurred
+  @see bufferevent_enable()
+ */
+int bufferevent_disable(struct bufferevent *bufev, short event);
+
+
+/**
+  Set the read and write timeout for a buffered event.
+
+  @param bufev the bufferevent to be modified
+  @param timeout_read the read timeout
+  @param timeout_write the write timeout
+ */
+void bufferevent_settimeout(struct bufferevent *bufev,
+    int timeout_read, int timeout_write);
+
+
+/**
+  Sets the watermarks for read and write events.
+
+  On input, a bufferevent does not invoke the user read callback unless
+  there is at least low watermark data in the buffer.   If the read buffer
+  is beyond the high watermark, the buffevent stops reading from the network.
+
+  On output, the user write callback is invoked whenever the buffered data
+  falls below the low watermark.
+
+  @param bufev the bufferevent to be modified
+  @param events EV_READ, EV_WRITE or both
+  @param lowmark the lower watermark to set
+  @param highmark the high watermark to set
+*/
+
+void bufferevent_setwatermark(struct bufferevent *bufev, short events,
+    size_t lowmark, size_t highmark);
+
+#define EVBUFFER_LENGTH(x)	(x)->off
+#define EVBUFFER_DATA(x)	(x)->buffer
+#define EVBUFFER_INPUT(x)	(x)->input
+#define EVBUFFER_OUTPUT(x)	(x)->output
+
+
+/**
+  Allocate storage for a new evbuffer.
+
+  @return a pointer to a newly allocated evbuffer struct, or NULL if an error
+          occurred
+ */
+struct evbuffer *evbuffer_new(void);
+
+
+/**
+  Deallocate storage for an evbuffer.
+
+  @param pointer to the evbuffer to be freed
+ */
+void evbuffer_free(struct evbuffer *);
+
+
+/**
+  Expands the available space in an event buffer.
+
+  Expands the available space in the event buffer to at least datlen
+
+  @param buf the event buffer to be expanded
+  @param datlen the new minimum length requirement
+  @return 0 if successful, or -1 if an error occurred
+*/
+int evbuffer_expand(struct evbuffer *, size_t);
+
+
+/**
+  Append data to the end of an evbuffer.
+
+  @param buf the event buffer to be appended to
+  @param data pointer to the beginning of the data buffer
+  @param datlen the number of bytes to be copied from the data buffer
+ */
+int evbuffer_add(struct evbuffer *, const void *, size_t);
+
+
+
+/**
+  Read data from an event buffer and drain the bytes read.
+
+  @param buf the event buffer to be read from
+  @param data the destination buffer to store the result
+  @param datlen the maximum size of the destination buffer
+  @return the number of bytes read
+ */
+int evbuffer_remove(struct evbuffer *, void *, size_t);
+
+
+/**
+ * Read a single line from an event buffer.
+ *
+ * Reads a line terminated by either '\r\n', '\n\r' or '\r' or '\n'.
+ * The returned buffer needs to be freed by the caller.
+ *
+ * @param buffer the evbuffer to read from
+ * @return pointer to a single line, or NULL if an error occurred
+ */
+char *evbuffer_readline(struct evbuffer *);
+
+
+/** Used to tell evbuffer_readln what kind of line-ending to look for.
+ */
+enum evbuffer_eol_style {
+	/** Any sequence of CR and LF characters is acceptable as an EOL. */
+	EVBUFFER_EOL_ANY,
+	/** An EOL is an LF, optionally preceded by a CR.  This style is
+	 * most useful for implementing text-based internet protocols. */
+	EVBUFFER_EOL_CRLF,
+	/** An EOL is a CR followed by an LF. */
+	EVBUFFER_EOL_CRLF_STRICT,
+	/** An EOL is a LF. */
+        EVBUFFER_EOL_LF
+};
+
+/**
+ * Read a single line from an event buffer.
+ *
+ * Reads a line terminated by an EOL as determined by the evbuffer_eol_style
+ * argument.  Returns a newly allocated nul-terminated string; the caller must
+ * free the returned value.  The EOL is not included in the returned string.
+ *
+ * @param buffer the evbuffer to read from
+ * @param n_read_out if non-NULL, points to a size_t that is set to the
+ *       number of characters in the returned string.  This is useful for
+ *       strings that can contain NUL characters.
+ * @param eol_style the style of line-ending to use.
+ * @return pointer to a single line, or NULL if an error occurred
+ */
+char *evbuffer_readln(struct evbuffer *buffer, size_t *n_read_out,
+    enum evbuffer_eol_style eol_style);
+
+
+/**
+  Move data from one evbuffer into another evbuffer.
+
+  This is a destructive add.  The data from one buffer moves into
+  the other buffer. The destination buffer is expanded as needed.
+
+  @param outbuf the output buffer
+  @param inbuf the input buffer
+  @return 0 if successful, or -1 if an error occurred
+ */
+int evbuffer_add_buffer(struct evbuffer *, struct evbuffer *);
+
+
+/**
+  Append a formatted string to the end of an evbuffer.
+
+  @param buf the evbuffer that will be appended to
+  @param fmt a format string
+  @param ... arguments that will be passed to printf(3)
+  @return The number of bytes added if successful, or -1 if an error occurred.
+ */
+int evbuffer_add_printf(struct evbuffer *, const char *fmt, ...)
+#ifdef __GNUC__
+  __attribute__((format(printf, 2, 3)))
+#endif
+;
+
+
+/**
+  Append a va_list formatted string to the end of an evbuffer.
+
+  @param buf the evbuffer that will be appended to
+  @param fmt a format string
+  @param ap a varargs va_list argument array that will be passed to vprintf(3)
+  @return The number of bytes added if successful, or -1 if an error occurred.
+ */
+int evbuffer_add_vprintf(struct evbuffer *, const char *fmt, va_list ap);
+
+
+/**
+  Remove a specified number of bytes data from the beginning of an evbuffer.
+
+  @param buf the evbuffer to be drained
+  @param len the number of bytes to drain from the beginning of the buffer
+ */
+void evbuffer_drain(struct evbuffer *, size_t);
+
+
+/**
+  Write the contents of an evbuffer to a file descriptor.
+
+  The evbuffer will be drained after the bytes have been successfully written.
+
+  @param buffer the evbuffer to be written and drained
+  @param fd the file descriptor to be written to
+  @return the number of bytes written, or -1 if an error occurred
+  @see evbuffer_read()
+ */
+int evbuffer_write(struct evbuffer *, int);
+
+
+/**
+  Read from a file descriptor and store the result in an evbuffer.
+
+  @param buf the evbuffer to store the result
+  @param fd the file descriptor to read from
+  @param howmuch the number of bytes to be read
+  @return the number of bytes read, or -1 if an error occurred
+  @see evbuffer_write()
+ */
+int evbuffer_read(struct evbuffer *, int, int);
+
+
+/**
+  Find a string within an evbuffer.
+
+  @param buffer the evbuffer to be searched
+  @param what the string to be searched for
+  @param len the length of the search string
+  @return a pointer to the beginning of the search string, or NULL if the search failed.
+ */
+u_char *evbuffer_find(struct evbuffer *, const u_char *, size_t);
+
+/**
+  Set a callback to invoke when the evbuffer is modified.
+
+  @param buffer the evbuffer to be monitored
+  @param cb the callback function to invoke when the evbuffer is modified
+  @param cbarg an argument to be provided to the callback function
+ */
+void evbuffer_setcb(struct evbuffer *, void (*)(struct evbuffer *, size_t, size_t, void *), void *);
+
+/*
+ * Marshaling tagged data - We assume that all tags are inserted in their
+ * numeric order - so that unknown tags will always be higher than the
+ * known ones - and we can just ignore the end of an event buffer.
+ */
+
+void evtag_init(void);
+
+void evtag_marshal(struct evbuffer *evbuf, ev_uint32_t tag, const void *data,
+    ev_uint32_t len);
+
+/**
+  Encode an integer and store it in an evbuffer.
+
+  We encode integer's by nibbles; the first nibble contains the number
+  of significant nibbles - 1;  this allows us to encode up to 64-bit
+  integers.  This function is byte-order independent.
+
+  @param evbuf evbuffer to store the encoded number
+  @param number a 32-bit integer
+ */
+void encode_int(struct evbuffer *evbuf, ev_uint32_t number);
+
+void evtag_marshal_int(struct evbuffer *evbuf, ev_uint32_t tag,
+    ev_uint32_t integer);
+
+void evtag_marshal_string(struct evbuffer *buf, ev_uint32_t tag,
+    const char *string);
+
+void evtag_marshal_timeval(struct evbuffer *evbuf, ev_uint32_t tag,
+    struct timeval *tv);
+
+int evtag_unmarshal(struct evbuffer *src, ev_uint32_t *ptag,
+    struct evbuffer *dst);
+int evtag_peek(struct evbuffer *evbuf, ev_uint32_t *ptag);
+int evtag_peek_length(struct evbuffer *evbuf, ev_uint32_t *plength);
+int evtag_payload_length(struct evbuffer *evbuf, ev_uint32_t *plength);
+int evtag_consume(struct evbuffer *evbuf);
+
+int evtag_unmarshal_int(struct evbuffer *evbuf, ev_uint32_t need_tag,
+    ev_uint32_t *pinteger);
+
+int evtag_unmarshal_fixed(struct evbuffer *src, ev_uint32_t need_tag,
+    void *data, size_t len);
+
+int evtag_unmarshal_string(struct evbuffer *evbuf, ev_uint32_t need_tag,
+    char **pstring);
+
+int evtag_unmarshal_timeval(struct evbuffer *evbuf, ev_uint32_t need_tag,
+    struct timeval *ptv);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _EVENT_H_ */
diff --git a/base/third_party/libevent/event_rpcgen.py b/base/third_party/libevent/event_rpcgen.py
new file mode 100755
index 0000000..4ec77a6
--- /dev/null
+++ b/base/third_party/libevent/event_rpcgen.py
@@ -0,0 +1,1423 @@
+#!/usr/bin/env python
+#
+# Copyright (c) 2005 Niels Provos <provos@citi.umich.edu>
+# All rights reserved.
+#
+# Generates marshaling code based on libevent.
+
+import sys
+import re
+
+#
+_NAME = "event_rpcgen.py"
+_VERSION = "0.1"
+_STRUCT_RE = '[a-z][a-z_0-9]*'
+
+# Globals
+line_count = 0
+
+white = re.compile(r'^\s+')
+cppcomment = re.compile(r'\/\/.*$')
+headerdirect = []
+cppdirect = []
+
+# Holds everything that makes a struct
+class Struct:
+    def __init__(self, name):
+        self._name = name
+        self._entries = []
+        self._tags = {}
+        print >>sys.stderr, '  Created struct: %s' % name
+
+    def AddEntry(self, entry):
+        if self._tags.has_key(entry.Tag()):
+            print >>sys.stderr, ( 'Entry "%s" duplicates tag number '
+                                  '%d from "%s" around line %d' ) % (
+                entry.Name(), entry.Tag(),
+                self._tags[entry.Tag()], line_count)
+            sys.exit(1)
+        self._entries.append(entry)
+        self._tags[entry.Tag()] = entry.Name()
+        print >>sys.stderr, '    Added entry: %s' % entry.Name()
+
+    def Name(self):
+        return self._name
+
+    def EntryTagName(self, entry):
+        """Creates the name inside an enumeration for distinguishing data
+        types."""
+        name = "%s_%s" % (self._name, entry.Name())
+        return name.upper()
+
+    def PrintIdented(self, file, ident, code):
+        """Takes an array, add indentation to each entry and prints it."""
+        for entry in code:
+            print >>file, '%s%s' % (ident, entry)
+
+    def PrintTags(self, file):
+        """Prints the tag definitions for a structure."""
+        print >>file, '/* Tag definition for %s */' % self._name
+        print >>file, 'enum %s_ {' % self._name.lower()
+        for entry in self._entries:
+            print >>file, '  %s=%d,' % (self.EntryTagName(entry),
+                                        entry.Tag())
+        print >>file, '  %s_MAX_TAGS' % (self._name.upper())
+        print >>file, '};\n'
+
+    def PrintForwardDeclaration(self, file):
+        print >>file, 'struct %s;' % self._name
+
+    def PrintDeclaration(self, file):
+        print >>file, '/* Structure declaration for %s */' % self._name
+        print >>file, 'struct %s_access_ {' % self._name
+        for entry in self._entries:
+            dcl = entry.AssignDeclaration('(*%s_assign)' % entry.Name())
+            dcl.extend(
+                entry.GetDeclaration('(*%s_get)' % entry.Name()))
+            if entry.Array():
+                dcl.extend(
+                    entry.AddDeclaration('(*%s_add)' % entry.Name()))
+            self.PrintIdented(file, '  ', dcl)
+        print >>file, '};\n'
+
+        print >>file, 'struct %s {' % self._name
+        print >>file, '  struct %s_access_ *base;\n' % self._name
+        for entry in self._entries:
+            dcl = entry.Declaration()
+            self.PrintIdented(file, '  ', dcl)
+        print >>file, ''
+        for entry in self._entries:
+            print >>file, '  ev_uint8_t %s_set;' % entry.Name()
+        print >>file, '};\n'
+
+        print >>file, \
+"""struct %(name)s *%(name)s_new(void);
+void %(name)s_free(struct %(name)s *);
+void %(name)s_clear(struct %(name)s *);
+void %(name)s_marshal(struct evbuffer *, const struct %(name)s *);
+int %(name)s_unmarshal(struct %(name)s *, struct evbuffer *);
+int %(name)s_complete(struct %(name)s *);
+void evtag_marshal_%(name)s(struct evbuffer *, ev_uint32_t, 
+    const struct %(name)s *);
+int evtag_unmarshal_%(name)s(struct evbuffer *, ev_uint32_t,
+    struct %(name)s *);""" % { 'name' : self._name }
+
+
+        # Write a setting function of every variable
+        for entry in self._entries:
+            self.PrintIdented(file, '', entry.AssignDeclaration(
+                entry.AssignFuncName()))
+            self.PrintIdented(file, '', entry.GetDeclaration(
+                entry.GetFuncName()))
+            if entry.Array():
+                self.PrintIdented(file, '', entry.AddDeclaration(
+                    entry.AddFuncName()))
+
+        print >>file, '/* --- %s done --- */\n' % self._name
+
+    def PrintCode(self, file):
+        print >>file, ('/*\n'
+                       ' * Implementation of %s\n'
+                       ' */\n') % self._name
+
+        print >>file, \
+              'static struct %(name)s_access_ __%(name)s_base = {' % \
+              { 'name' : self._name }
+        for entry in self._entries:
+            self.PrintIdented(file, '  ', entry.CodeBase())
+        print >>file, '};\n'
+
+        # Creation
+        print >>file, (
+            'struct %(name)s *\n'
+            '%(name)s_new(void)\n'
+            '{\n'
+            '  struct %(name)s *tmp;\n'
+            '  if ((tmp = malloc(sizeof(struct %(name)s))) == NULL) {\n'
+            '    event_warn("%%s: malloc", __func__);\n'
+            '    return (NULL);\n'
+            '  }\n'
+            '  tmp->base = &__%(name)s_base;\n') % { 'name' : self._name }
+
+        for entry in self._entries:
+            self.PrintIdented(file, '  ', entry.CodeNew('tmp'))
+            print >>file, '  tmp->%s_set = 0;\n' % entry.Name()
+
+        print >>file, (
+            '  return (tmp);\n'
+            '}\n')
+
+        # Adding
+        for entry in self._entries:
+            if entry.Array():
+                self.PrintIdented(file, '', entry.CodeAdd())
+            print >>file, ''
+            
+        # Assigning
+        for entry in self._entries:
+            self.PrintIdented(file, '', entry.CodeAssign())
+            print >>file, ''
+
+        # Getting
+        for entry in self._entries:
+            self.PrintIdented(file, '', entry.CodeGet())
+            print >>file, ''
+            
+        # Clearing
+        print >>file, ( 'void\n'
+                        '%(name)s_clear(struct %(name)s *tmp)\n'
+                        '{'
+                        ) % { 'name' : self._name }
+        for entry in self._entries:
+            self.PrintIdented(file, '  ', entry.CodeClear('tmp'))
+
+        print >>file, '}\n'
+
+        # Freeing
+        print >>file, ( 'void\n'
+                        '%(name)s_free(struct %(name)s *tmp)\n'
+                        '{'
+                        ) % { 'name' : self._name }
+        
+        for entry in self._entries:
+            self.PrintIdented(file, '  ', entry.CodeFree('tmp'))
+
+        print >>file, ('  free(tmp);\n'
+                       '}\n')
+
+        # Marshaling
+        print >>file, ('void\n'
+                       '%(name)s_marshal(struct evbuffer *evbuf, '
+                       'const struct %(name)s *tmp)'
+                       '{') % { 'name' : self._name }
+        for entry in self._entries:
+            indent = '  '
+            # Optional entries do not have to be set
+            if entry.Optional():
+                indent += '  '
+                print >>file, '  if (tmp->%s_set) {' % entry.Name()
+            self.PrintIdented(
+                file, indent,
+                entry.CodeMarshal('evbuf', self.EntryTagName(entry), 'tmp'))
+            if entry.Optional():
+                print >>file, '  }'
+
+        print >>file, '}\n'
+                       
+        # Unmarshaling
+        print >>file, ('int\n'
+                       '%(name)s_unmarshal(struct %(name)s *tmp, '
+                       ' struct evbuffer *evbuf)\n'
+                       '{\n'
+                       '  ev_uint32_t tag;\n'
+                       '  while (EVBUFFER_LENGTH(evbuf) > 0) {\n'
+                       '    if (evtag_peek(evbuf, &tag) == -1)\n'
+                       '      return (-1);\n'
+                       '    switch (tag) {\n'
+                       ) % { 'name' : self._name }
+        for entry in self._entries:
+            print >>file, '      case %s:\n' % self.EntryTagName(entry)
+            if not entry.Array():
+                print >>file, (
+                    '        if (tmp->%s_set)\n'
+                    '          return (-1);'
+                    ) % (entry.Name())
+
+            self.PrintIdented(
+                file, '        ',
+                entry.CodeUnmarshal('evbuf',
+                                    self.EntryTagName(entry), 'tmp'))
+
+            print >>file, ( '        tmp->%s_set = 1;\n' % entry.Name() +
+                            '        break;\n' )
+        print >>file, ( '      default:\n'
+                        '        return -1;\n'
+                        '    }\n'
+                        '  }\n' )
+        # Check if it was decoded completely
+        print >>file, ( '  if (%(name)s_complete(tmp) == -1)\n'
+                        '    return (-1);'
+                        ) % { 'name' : self._name }
+
+        # Successfully decoded
+        print >>file, ( '  return (0);\n'
+                        '}\n')
+
+        # Checking if a structure has all the required data
+        print >>file, (
+            'int\n'
+            '%(name)s_complete(struct %(name)s *msg)\n'
+            '{' ) % { 'name' : self._name }
+        for entry in self._entries:
+            self.PrintIdented(
+                file, '  ',
+                entry.CodeComplete('msg'))
+        print >>file, (
+            '  return (0);\n'
+            '}\n' )
+
+        # Complete message unmarshaling
+        print >>file, (
+            'int\n'
+            'evtag_unmarshal_%(name)s(struct evbuffer *evbuf, '
+            'ev_uint32_t need_tag, struct %(name)s *msg)\n'
+            '{\n'
+            '  ev_uint32_t tag;\n'
+            '  int res = -1;\n'
+            '\n'
+            '  struct evbuffer *tmp = evbuffer_new();\n'
+            '\n'
+            '  if (evtag_unmarshal(evbuf, &tag, tmp) == -1'
+            ' || tag != need_tag)\n'
+            '    goto error;\n'
+            '\n'
+            '  if (%(name)s_unmarshal(msg, tmp) == -1)\n'
+            '    goto error;\n'
+            '\n'
+            '  res = 0;\n'
+            '\n'
+            ' error:\n'
+            '  evbuffer_free(tmp);\n'
+            '  return (res);\n'
+            '}\n' ) % { 'name' : self._name }
+
+        # Complete message marshaling
+        print >>file, (
+            'void\n'
+            'evtag_marshal_%(name)s(struct evbuffer *evbuf, ev_uint32_t tag, '
+            'const struct %(name)s *msg)\n'
+            '{\n'
+            '  struct evbuffer *_buf = evbuffer_new();\n'
+            '  assert(_buf != NULL);\n'
+            '  evbuffer_drain(_buf, -1);\n'
+            '  %(name)s_marshal(_buf, msg);\n'
+            '  evtag_marshal(evbuf, tag, EVBUFFER_DATA(_buf), '
+            'EVBUFFER_LENGTH(_buf));\n'
+            '  evbuffer_free(_buf);\n'
+            '}\n' ) % { 'name' : self._name }
+
+class Entry:
+    def __init__(self, type, name, tag):
+        self._type = type
+        self._name = name
+        self._tag = int(tag)
+        self._ctype = type
+        self._optional = 0
+        self._can_be_array = 0
+        self._array = 0
+        self._line_count = -1
+        self._struct = None
+        self._refname = None
+
+    def GetTranslation(self):
+        return { "parent_name" : self._struct.Name(),
+                 "name" : self._name,
+                 "ctype" : self._ctype,
+                 "refname" : self._refname
+                 }
+    
+    def SetStruct(self, struct):
+        self._struct = struct
+
+    def LineCount(self):
+        assert self._line_count != -1
+        return self._line_count
+
+    def SetLineCount(self, number):
+        self._line_count = number
+
+    def Array(self):
+        return self._array
+
+    def Optional(self):
+        return self._optional
+
+    def Tag(self):
+        return self._tag
+
+    def Name(self):
+        return self._name
+
+    def Type(self):
+        return self._type
+
+    def MakeArray(self, yes=1):
+        self._array = yes
+        
+    def MakeOptional(self):
+        self._optional = 1
+
+    def GetFuncName(self):
+        return '%s_%s_get' % (self._struct.Name(), self._name)
+    
+    def GetDeclaration(self, funcname):
+        code = [ 'int %s(struct %s *, %s *);' % (
+            funcname, self._struct.Name(), self._ctype ) ]
+        return code
+
+    def CodeGet(self):
+        code = (
+            'int',
+            '%(parent_name)s_%(name)s_get(struct %(parent_name)s *msg, '
+            '%(ctype)s *value)',
+            '{',
+            '  if (msg->%(name)s_set != 1)',
+            '    return (-1);',
+            '  *value = msg->%(name)s_data;',
+            '  return (0);',
+            '}' )
+        code = '\n'.join(code)
+        code = code % self.GetTranslation()
+        return code.split('\n')
+        
+    def AssignFuncName(self):
+        return '%s_%s_assign' % (self._struct.Name(), self._name)
+    
+    def AddFuncName(self):
+        return '%s_%s_add' % (self._struct.Name(), self._name)
+    
+    def AssignDeclaration(self, funcname):
+        code = [ 'int %s(struct %s *, const %s);' % (
+            funcname, self._struct.Name(), self._ctype ) ]
+        return code
+
+    def CodeAssign(self):
+        code = [ 'int',
+                 '%(parent_name)s_%(name)s_assign(struct %(parent_name)s *msg,'
+                 ' const %(ctype)s value)',
+                 '{',
+                 '  msg->%(name)s_set = 1;',
+                 '  msg->%(name)s_data = value;',
+                 '  return (0);',
+                 '}' ]
+        code = '\n'.join(code)
+        code = code % self.GetTranslation()
+        return code.split('\n')
+
+    def CodeClear(self, structname):
+        code = [ '%s->%s_set = 0;' % (structname, self.Name()) ]
+
+        return code
+        
+    def CodeComplete(self, structname):
+        if self.Optional():
+            return []
+        
+        code = [ 'if (!%s->%s_set)' % (structname, self.Name()),
+                 '  return (-1);' ]
+
+        return code
+
+    def CodeFree(self, name):
+        return []
+
+    def CodeBase(self):
+        code = [
+            '%(parent_name)s_%(name)s_assign,',
+            '%(parent_name)s_%(name)s_get,'
+            ]
+        if self.Array():
+            code.append('%(parent_name)s_%(name)s_add,')
+
+        code = '\n'.join(code)
+        code = code % self.GetTranslation()
+        return code.split('\n')
+
+    def Verify(self):
+        if self.Array() and not self._can_be_array:
+            print >>sys.stderr, (
+                'Entry "%s" cannot be created as an array '
+                'around line %d' ) % (self._name, self.LineCount())
+            sys.exit(1)
+        if not self._struct:
+            print >>sys.stderr, (
+                'Entry "%s" does not know which struct it belongs to '
+                'around line %d' ) % (self._name, self.LineCount())
+            sys.exit(1)
+        if self._optional and self._array:
+            print >>sys.stderr,  ( 'Entry "%s" has illegal combination of '
+                                   'optional and array around line %d' ) % (
+                self._name, self.LineCount() )
+            sys.exit(1)
+
+class EntryBytes(Entry):
+    def __init__(self, type, name, tag, length):
+        # Init base class
+        Entry.__init__(self, type, name, tag)
+
+        self._length = length
+        self._ctype = 'ev_uint8_t'
+
+    def GetDeclaration(self, funcname):
+        code = [ 'int %s(struct %s *, %s **);' % (
+            funcname, self._struct.Name(), self._ctype ) ]
+        return code
+        
+    def AssignDeclaration(self, funcname):
+        code = [ 'int %s(struct %s *, const %s *);' % (
+            funcname, self._struct.Name(), self._ctype ) ]
+        return code
+        
+    def Declaration(self):
+        dcl  = ['ev_uint8_t %s_data[%s];' % (self._name, self._length)]
+        
+        return dcl
+
+    def CodeGet(self):
+        name = self._name
+        code = [ 'int',
+                 '%s_%s_get(struct %s *msg, %s **value)' % (
+            self._struct.Name(), name,
+            self._struct.Name(), self._ctype),
+                 '{',
+                 '  if (msg->%s_set != 1)' % name,
+                 '    return (-1);',
+                 '  *value = msg->%s_data;' % name,
+                 '  return (0);',
+                 '}' ]
+        return code
+        
+    def CodeAssign(self):
+        name = self._name
+        code = [ 'int',
+                 '%s_%s_assign(struct %s *msg, const %s *value)' % (
+            self._struct.Name(), name,
+            self._struct.Name(), self._ctype),
+                 '{',
+                 '  msg->%s_set = 1;' % name,
+                 '  memcpy(msg->%s_data, value, %s);' % (
+            name, self._length),
+                 '  return (0);',
+                 '}' ]
+        return code
+        
+    def CodeUnmarshal(self, buf, tag_name, var_name):
+        code = [  'if (evtag_unmarshal_fixed(%s, %s, ' % (buf, tag_name) +
+                  '%s->%s_data, ' % (var_name, self._name) +
+                  'sizeof(%s->%s_data)) == -1) {' % (
+            var_name, self._name),
+                  '  event_warnx("%%s: failed to unmarshal %s", __func__);' % (
+            self._name ),
+                  '  return (-1);',
+                  '}'
+                  ]
+        return code
+
+    def CodeMarshal(self, buf, tag_name, var_name):
+        code = ['evtag_marshal(%s, %s, %s->%s_data, sizeof(%s->%s_data));' % (
+            buf, tag_name, var_name, self._name, var_name, self._name )]
+        return code
+
+    def CodeClear(self, structname):
+        code = [ '%s->%s_set = 0;' % (structname, self.Name()),
+                 'memset(%s->%s_data, 0, sizeof(%s->%s_data));' % (
+            structname, self._name, structname, self._name)]
+
+        return code
+        
+    def CodeNew(self, name):
+        code  = ['memset(%s->%s_data, 0, sizeof(%s->%s_data));' % (
+            name, self._name, name, self._name)]
+        return code
+
+    def Verify(self):
+        if not self._length:
+            print >>sys.stderr, 'Entry "%s" needs a length around line %d' % (
+                self._name, self.LineCount() )
+            sys.exit(1)
+
+        Entry.Verify(self)
+
+class EntryInt(Entry):
+    def __init__(self, type, name, tag):
+        # Init base class
+        Entry.__init__(self, type, name, tag)
+
+        self._ctype = 'ev_uint32_t'
+
+    def CodeUnmarshal(self, buf, tag_name, var_name):
+        code = ['if (evtag_unmarshal_int(%s, %s, &%s->%s_data) == -1) {' % (
+            buf, tag_name, var_name, self._name),
+                  '  event_warnx("%%s: failed to unmarshal %s", __func__);' % (
+            self._name ),
+                '  return (-1);',
+                '}' ] 
+        return code
+
+    def CodeMarshal(self, buf, tag_name, var_name):
+        code = ['evtag_marshal_int(%s, %s, %s->%s_data);' % (
+            buf, tag_name, var_name, self._name)]
+        return code
+
+    def Declaration(self):
+        dcl  = ['ev_uint32_t %s_data;' % self._name]
+
+        return dcl
+
+    def CodeNew(self, name):
+        code = ['%s->%s_data = 0;' % (name, self._name)]
+        return code
+
+class EntryString(Entry):
+    def __init__(self, type, name, tag):
+        # Init base class
+        Entry.__init__(self, type, name, tag)
+
+        self._ctype = 'char *'
+
+    def CodeAssign(self):
+        name = self._name
+        code = """int
+%(parent_name)s_%(name)s_assign(struct %(parent_name)s *msg,
+    const %(ctype)s value)
+{
+  if (msg->%(name)s_data != NULL)
+    free(msg->%(name)s_data);
+  if ((msg->%(name)s_data = strdup(value)) == NULL)
+    return (-1);
+  msg->%(name)s_set = 1;
+  return (0);
+}""" % self.GetTranslation()
+
+        return code.split('\n')
+        
+    def CodeUnmarshal(self, buf, tag_name, var_name):
+        code = ['if (evtag_unmarshal_string(%s, %s, &%s->%s_data) == -1) {' % (
+            buf, tag_name, var_name, self._name),
+                '  event_warnx("%%s: failed to unmarshal %s", __func__);' % (
+            self._name ),
+                '  return (-1);',
+                '}'
+                ]
+        return code
+
+    def CodeMarshal(self, buf, tag_name, var_name):
+        code = ['evtag_marshal_string(%s, %s, %s->%s_data);' % (
+            buf, tag_name, var_name, self._name)]
+        return code
+
+    def CodeClear(self, structname):
+        code = [ 'if (%s->%s_set == 1) {' % (structname, self.Name()),
+                 '  free (%s->%s_data);' % (structname, self.Name()),
+                 '  %s->%s_data = NULL;' % (structname, self.Name()),
+                 '  %s->%s_set = 0;' % (structname, self.Name()),
+                 '}'
+                 ]
+
+        return code
+        
+    def CodeNew(self, name):
+        code  = ['%s->%s_data = NULL;' % (name, self._name)]
+        return code
+
+    def CodeFree(self, name):
+        code  = ['if (%s->%s_data != NULL)' % (name, self._name),
+                 '    free (%s->%s_data); ' % (name, self._name)]
+
+        return code
+
+    def Declaration(self):
+        dcl  = ['char *%s_data;' % self._name]
+
+        return dcl
+
+class EntryStruct(Entry):
+    def __init__(self, type, name, tag, refname):
+        # Init base class
+        Entry.__init__(self, type, name, tag)
+
+        self._can_be_array = 1
+        self._refname = refname
+        self._ctype = 'struct %s*' % refname
+
+    def CodeGet(self):
+        name = self._name
+        code = [ 'int',
+                 '%s_%s_get(struct %s *msg, %s *value)' % (
+            self._struct.Name(), name,
+            self._struct.Name(), self._ctype),
+                 '{',
+                 '  if (msg->%s_set != 1) {' % name,
+                 '    msg->%s_data = %s_new();' % (name, self._refname),
+                 '    if (msg->%s_data == NULL)' % name,
+                 '      return (-1);',
+                 '    msg->%s_set = 1;' % name,
+                 '  }',
+                 '  *value = msg->%s_data;' % name,
+                 '  return (0);',
+                 '}' ]
+        return code
+        
+    def CodeAssign(self):
+        name = self._name
+        code = """int
+%(parent_name)s_%(name)s_assign(struct %(parent_name)s *msg,
+    const %(ctype)s value)
+{
+   struct evbuffer *tmp = NULL;
+   if (msg->%(name)s_set) {
+     %(refname)s_clear(msg->%(name)s_data);
+     msg->%(name)s_set = 0;
+   } else {
+     msg->%(name)s_data = %(refname)s_new();
+     if (msg->%(name)s_data == NULL) {
+       event_warn("%%s: %(refname)s_new()", __func__);
+       goto error;
+     }
+   }
+   if ((tmp = evbuffer_new()) == NULL) {
+     event_warn("%%s: evbuffer_new()", __func__);
+     goto error;
+   }
+   %(refname)s_marshal(tmp, value);
+   if (%(refname)s_unmarshal(msg->%(name)s_data, tmp) == -1) {
+     event_warnx("%%s: %(refname)s_unmarshal", __func__);
+     goto error;
+   }
+   msg->%(name)s_set = 1;
+   evbuffer_free(tmp);
+   return (0);
+ error:
+   if (tmp != NULL)
+     evbuffer_free(tmp);
+   if (msg->%(name)s_data != NULL) {
+     %(refname)s_free(msg->%(name)s_data);
+     msg->%(name)s_data = NULL;
+   }
+   return (-1);
+}""" % self.GetTranslation()
+        return code.split('\n')
+        
+    def CodeComplete(self, structname):
+        if self.Optional():
+            code = [ 'if (%s->%s_set && %s_complete(%s->%s_data) == -1)' % (
+                structname, self.Name(),
+                self._refname, structname, self.Name()),
+                     '  return (-1);' ]
+        else:
+            code = [ 'if (%s_complete(%s->%s_data) == -1)' % (
+                self._refname, structname, self.Name()),
+                     '  return (-1);' ]
+
+        return code
+    
+    def CodeUnmarshal(self, buf, tag_name, var_name):
+        code = ['%s->%s_data = %s_new();' % (
+            var_name, self._name, self._refname),
+                'if (%s->%s_data == NULL)' % (var_name, self._name),
+                '  return (-1);',
+                'if (evtag_unmarshal_%s(%s, %s, %s->%s_data) == -1) {' % (
+            self._refname, buf, tag_name, var_name, self._name),
+                  '  event_warnx("%%s: failed to unmarshal %s", __func__);' % (
+            self._name ),
+                '  return (-1);',
+                '}'
+                ]
+        return code
+
+    def CodeMarshal(self, buf, tag_name, var_name):
+        code = ['evtag_marshal_%s(%s, %s, %s->%s_data);' % (
+            self._refname, buf, tag_name, var_name, self._name)]
+        return code
+
+    def CodeClear(self, structname):
+        code = [ 'if (%s->%s_set == 1) {' % (structname, self.Name()),
+                 '  %s_free(%s->%s_data);' % (
+            self._refname, structname, self.Name()),
+                 '  %s->%s_data = NULL;' % (structname, self.Name()),
+                 '  %s->%s_set = 0;' % (structname, self.Name()),
+                 '}'
+                 ]
+
+        return code
+        
+    def CodeNew(self, name):
+        code  = ['%s->%s_data = NULL;' % (name, self._name)]
+        return code
+
+    def CodeFree(self, name):
+        code  = ['if (%s->%s_data != NULL)' % (name, self._name),
+                 '    %s_free(%s->%s_data); ' % (
+            self._refname, name, self._name)]
+
+        return code
+
+    def Declaration(self):
+        dcl  = ['%s %s_data;' % (self._ctype, self._name)]
+
+        return dcl
+
+class EntryVarBytes(Entry):
+    def __init__(self, type, name, tag):
+        # Init base class
+        Entry.__init__(self, type, name, tag)
+
+        self._ctype = 'ev_uint8_t *'
+
+    def GetDeclaration(self, funcname):
+        code = [ 'int %s(struct %s *, %s *, ev_uint32_t *);' % (
+            funcname, self._struct.Name(), self._ctype ) ]
+        return code
+        
+    def AssignDeclaration(self, funcname):
+        code = [ 'int %s(struct %s *, const %s, ev_uint32_t);' % (
+            funcname, self._struct.Name(), self._ctype ) ]
+        return code
+        
+    def CodeAssign(self):
+        name = self._name
+        code = [ 'int',
+                 '%s_%s_assign(struct %s *msg, '
+                 'const %s value, ev_uint32_t len)' % (
+            self._struct.Name(), name,
+            self._struct.Name(), self._ctype),
+                 '{',
+                 '  if (msg->%s_data != NULL)' % name,
+                 '    free (msg->%s_data);' % name,
+                 '  msg->%s_data = malloc(len);' % name,
+                 '  if (msg->%s_data == NULL)' % name,
+                 '    return (-1);',
+                 '  msg->%s_set = 1;' % name,
+                 '  msg->%s_length = len;' % name,
+                 '  memcpy(msg->%s_data, value, len);' % name,
+                 '  return (0);',
+                 '}' ]
+        return code
+        
+    def CodeGet(self):
+        name = self._name
+        code = [ 'int',
+                 '%s_%s_get(struct %s *msg, %s *value, ev_uint32_t *plen)' % (
+            self._struct.Name(), name,
+            self._struct.Name(), self._ctype),
+                 '{',
+                 '  if (msg->%s_set != 1)' % name,
+                 '    return (-1);',
+                 '  *value = msg->%s_data;' % name,
+                 '  *plen = msg->%s_length;' % name,
+                 '  return (0);',
+                 '}' ]
+        return code
+
+    def CodeUnmarshal(self, buf, tag_name, var_name):
+        code = ['if (evtag_payload_length(%s, &%s->%s_length) == -1)' % (
+            buf, var_name, self._name),
+                '  return (-1);',
+                # We do not want DoS opportunities
+                'if (%s->%s_length > EVBUFFER_LENGTH(%s))' % (
+            var_name, self._name, buf),
+                '  return (-1);',
+                'if ((%s->%s_data = malloc(%s->%s_length)) == NULL)' % (
+            var_name, self._name, var_name, self._name),
+                '  return (-1);',
+                'if (evtag_unmarshal_fixed(%s, %s, %s->%s_data, '
+                '%s->%s_length) == -1) {' % (
+            buf, tag_name, var_name, self._name, var_name, self._name),
+                '  event_warnx("%%s: failed to unmarshal %s", __func__);' % (
+            self._name ),
+                '  return (-1);',
+                '}'
+                ]
+        return code
+
+    def CodeMarshal(self, buf, tag_name, var_name):
+        code = ['evtag_marshal(%s, %s, %s->%s_data, %s->%s_length);' % (
+            buf, tag_name, var_name, self._name, var_name, self._name)]
+        return code
+
+    def CodeClear(self, structname):
+        code = [ 'if (%s->%s_set == 1) {' % (structname, self.Name()),
+                 '  free (%s->%s_data);' % (structname, self.Name()),
+                 '  %s->%s_data = NULL;' % (structname, self.Name()),
+                 '  %s->%s_length = 0;' % (structname, self.Name()),
+                 '  %s->%s_set = 0;' % (structname, self.Name()),
+                 '}'
+                 ]
+
+        return code
+        
+    def CodeNew(self, name):
+        code  = ['%s->%s_data = NULL;' % (name, self._name),
+                 '%s->%s_length = 0;' % (name, self._name) ]
+        return code
+
+    def CodeFree(self, name):
+        code  = ['if (%s->%s_data != NULL)' % (name, self._name),
+                 '    free (%s->%s_data); ' % (name, self._name)]
+
+        return code
+
+    def Declaration(self):
+        dcl  = ['ev_uint8_t *%s_data;' % self._name,
+                'ev_uint32_t %s_length;' % self._name]
+
+        return dcl
+
+class EntryArray(Entry):
+    def __init__(self, entry):
+        # Init base class
+        Entry.__init__(self, entry._type, entry._name, entry._tag)
+
+        self._entry = entry
+        self._refname = entry._refname
+        self._ctype = 'struct %s *' % self._refname
+
+    def GetDeclaration(self, funcname):
+        """Allows direct access to elements of the array."""
+        translate = self.GetTranslation()
+        translate["funcname"] = funcname
+        code = [
+            'int %(funcname)s(struct %(parent_name)s *, int, %(ctype)s *);' %
+            translate ]
+        return code
+        
+    def AssignDeclaration(self, funcname):
+        code = [ 'int %s(struct %s *, int, const %s);' % (
+            funcname, self._struct.Name(), self._ctype ) ]
+        return code
+        
+    def AddDeclaration(self, funcname):
+        code = [ '%s %s(struct %s *);' % (
+            self._ctype, funcname, self._struct.Name() ) ]
+        return code
+        
+    def CodeGet(self):
+        code = """int
+%(parent_name)s_%(name)s_get(struct %(parent_name)s *msg, int offset,
+    %(ctype)s *value)
+{
+  if (!msg->%(name)s_set || offset < 0 || offset >= msg->%(name)s_length)
+    return (-1);
+  *value = msg->%(name)s_data[offset];
+  return (0);
+}""" % self.GetTranslation()
+
+        return code.split('\n')
+        
+    def CodeAssign(self):
+        code = """int
+%(parent_name)s_%(name)s_assign(struct %(parent_name)s *msg, int off,
+    const %(ctype)s value)
+{
+  struct evbuffer *tmp = NULL;
+  if (!msg->%(name)s_set || off < 0 || off >= msg->%(name)s_length)
+    return (-1);
+  %(refname)s_clear(msg->%(name)s_data[off]);
+  if ((tmp = evbuffer_new()) == NULL) {
+    event_warn("%%s: evbuffer_new()", __func__);
+    goto error;
+  }
+  %(refname)s_marshal(tmp, value);
+  if (%(refname)s_unmarshal(msg->%(name)s_data[off], tmp) == -1) {
+    event_warnx("%%s: %(refname)s_unmarshal", __func__);
+    goto error;
+  }
+  evbuffer_free(tmp);
+  return (0);
+error:
+  if (tmp != NULL)
+    evbuffer_free(tmp);
+  %(refname)s_clear(msg->%(name)s_data[off]);
+  return (-1);
+}""" % self.GetTranslation()
+
+        return code.split('\n')
+        
+    def CodeAdd(self):
+        code = \
+"""%(ctype)s
+%(parent_name)s_%(name)s_add(struct %(parent_name)s *msg)
+{
+  if (++msg->%(name)s_length >= msg->%(name)s_num_allocated) {
+    int tobe_allocated = msg->%(name)s_num_allocated;
+    %(ctype)s* new_data = NULL;
+    tobe_allocated = !tobe_allocated ? 1 : tobe_allocated << 1;
+    new_data = (%(ctype)s*) realloc(msg->%(name)s_data,
+        tobe_allocated * sizeof(%(ctype)s));
+    if (new_data == NULL)
+      goto error;
+    msg->%(name)s_data = new_data;
+    msg->%(name)s_num_allocated = tobe_allocated;
+  }
+  msg->%(name)s_data[msg->%(name)s_length - 1] = %(refname)s_new();
+  if (msg->%(name)s_data[msg->%(name)s_length - 1] == NULL)
+    goto error;
+  msg->%(name)s_set = 1;
+  return (msg->%(name)s_data[msg->%(name)s_length - 1]);
+error:
+  --msg->%(name)s_length;
+  return (NULL);
+}
+        """ % self.GetTranslation()
+
+        return code.split('\n')
+
+    def CodeComplete(self, structname):
+        code = []
+        translate = self.GetTranslation()
+
+        if self.Optional():
+            code.append( 'if (%(structname)s->%(name)s_set)'  % translate)
+
+        translate["structname"] = structname
+        tmp = """{
+  int i;
+  for (i = 0; i < %(structname)s->%(name)s_length; ++i) {
+    if (%(refname)s_complete(%(structname)s->%(name)s_data[i]) == -1)
+      return (-1);
+  }
+}""" % translate
+        code.extend(tmp.split('\n'))
+
+        return code
+    
+    def CodeUnmarshal(self, buf, tag_name, var_name):
+        translate = self.GetTranslation()
+        translate["var_name"] = var_name
+        translate["buf"] = buf
+        translate["tag_name"] = tag_name
+        code = """if (%(parent_name)s_%(name)s_add(%(var_name)s) == NULL)
+  return (-1);
+if (evtag_unmarshal_%(refname)s(%(buf)s, %(tag_name)s,
+  %(var_name)s->%(name)s_data[%(var_name)s->%(name)s_length - 1]) == -1) {
+  --%(var_name)s->%(name)s_length;
+  event_warnx("%%s: failed to unmarshal %(name)s", __func__);
+  return (-1);
+}""" % translate
+
+        return code.split('\n')
+
+    def CodeMarshal(self, buf, tag_name, var_name):
+        code = ['{',
+                '  int i;',
+                '  for (i = 0; i < %s->%s_length; ++i) {' % (
+            var_name, self._name),
+                '    evtag_marshal_%s(%s, %s, %s->%s_data[i]);' % (
+            self._refname, buf, tag_name, var_name, self._name),
+                '  }',
+                '}'
+                ]
+        return code
+
+    def CodeClear(self, structname):
+        code = [ 'if (%s->%s_set == 1) {' % (structname, self.Name()),
+                 '  int i;',
+                 '  for (i = 0; i < %s->%s_length; ++i) {' % (
+            structname, self.Name()),
+                 '    %s_free(%s->%s_data[i]);' % (
+            self._refname, structname, self.Name()),
+                 '  }',
+                 '  free(%s->%s_data);' % (structname, self.Name()),
+                 '  %s->%s_data = NULL;' % (structname, self.Name()),
+                 '  %s->%s_set = 0;' % (structname, self.Name()),
+                 '  %s->%s_length = 0;' % (structname, self.Name()),
+                 '  %s->%s_num_allocated = 0;' % (structname, self.Name()),
+                 '}'
+                 ]
+
+        return code
+        
+    def CodeNew(self, name):
+        code  = ['%s->%s_data = NULL;' % (name, self._name),
+                 '%s->%s_length = 0;' % (name, self._name),
+                 '%s->%s_num_allocated = 0;' % (name, self._name)]
+        return code
+
+    def CodeFree(self, name):
+        code  = ['if (%s->%s_data != NULL) {' % (name, self._name),
+                 '  int i;',
+                 '  for (i = 0; i < %s->%s_length; ++i) {' % (
+            name, self._name),
+                 '    %s_free(%s->%s_data[i]); ' % (
+            self._refname, name, self._name),
+                 '    %s->%s_data[i] = NULL;' % (name, self._name),
+                 '  }',
+                 '  free(%s->%s_data);' % (name, self._name),
+                 '  %s->%s_data = NULL;' % (name, self._name),
+                 '  %s->%s_length = 0;' % (name, self._name),
+                 '  %s->%s_num_allocated = 0;' % (name, self._name),
+                 '}'
+                 ]
+
+        return code
+
+    def Declaration(self):
+        dcl  = ['struct %s **%s_data;' % (self._refname, self._name),
+                'int %s_length;' % self._name,
+                'int %s_num_allocated;' % self._name ]
+
+        return dcl
+
+def NormalizeLine(line):
+    global white
+    global cppcomment
+    
+    line = cppcomment.sub('', line)
+    line = line.strip()
+    line = white.sub(' ', line)
+
+    return line
+
+def ProcessOneEntry(newstruct, entry):
+    optional = 0
+    array = 0
+    entry_type = ''
+    name = ''
+    tag = ''
+    tag_set = None
+    separator = ''
+    fixed_length = ''
+
+    tokens = entry.split(' ')
+    while tokens:
+        token = tokens[0]
+        tokens = tokens[1:]
+
+        if not entry_type:
+            if not optional and token == 'optional':
+                optional = 1
+                continue
+
+            if not array and token == 'array':
+                array = 1
+                continue
+
+        if not entry_type:
+            entry_type = token
+            continue
+
+        if not name:
+            res = re.match(r'^([^\[\]]+)(\[.*\])?$', token)
+            if not res:
+                print >>sys.stderr, 'Cannot parse name: \"%s\" around %d' % (
+                    entry, line_count)
+                sys.exit(1)
+            name = res.group(1)
+            fixed_length = res.group(2)
+            if fixed_length:
+                fixed_length = fixed_length[1:-1]
+            continue
+
+        if not separator:
+            separator = token
+            if separator != '=':
+                print >>sys.stderr, 'Expected "=" after name \"%s\" got %s' % (
+                    name, token)
+                sys.exit(1)
+            continue
+
+        if not tag_set:
+            tag_set = 1
+            if not re.match(r'^(0x)?[0-9]+$', token):
+                print >>sys.stderr, 'Expected tag number: \"%s\"' % entry
+                sys.exit(1)
+            tag = int(token, 0)
+            continue
+
+        print >>sys.stderr, 'Cannot parse \"%s\"' % entry
+        sys.exit(1)
+
+    if not tag_set:
+        print >>sys.stderr, 'Need tag number: \"%s\"' % entry
+        sys.exit(1)
+
+    # Create the right entry
+    if entry_type == 'bytes':
+        if fixed_length:
+            newentry = EntryBytes(entry_type, name, tag, fixed_length)
+        else:
+            newentry = EntryVarBytes(entry_type, name, tag)
+    elif entry_type == 'int' and not fixed_length:
+        newentry = EntryInt(entry_type, name, tag)
+    elif entry_type == 'string' and not fixed_length:
+        newentry = EntryString(entry_type, name, tag)
+    else:
+        res = re.match(r'^struct\[(%s)\]$' % _STRUCT_RE,
+                       entry_type, re.IGNORECASE)
+        if res:
+            # References another struct defined in our file
+            newentry = EntryStruct(entry_type, name, tag, res.group(1))
+        else:
+            print >>sys.stderr, 'Bad type: "%s" in "%s"' % (entry_type, entry)
+            sys.exit(1)
+
+    structs = []
+        
+    if optional:
+        newentry.MakeOptional()
+    if array:
+        newentry.MakeArray()
+
+    newentry.SetStruct(newstruct)
+    newentry.SetLineCount(line_count)
+    newentry.Verify()
+
+    if array:
+        # We need to encapsulate this entry into a struct
+        newname = newentry.Name()+ '_array'
+
+        # Now borgify the new entry.
+        newentry = EntryArray(newentry)
+        newentry.SetStruct(newstruct)
+        newentry.SetLineCount(line_count)
+        newentry.MakeArray()
+
+    newstruct.AddEntry(newentry)
+
+    return structs
+
+def ProcessStruct(data):
+    tokens = data.split(' ')
+
+    # First three tokens are: 'struct' 'name' '{'
+    newstruct = Struct(tokens[1])
+
+    inside = ' '.join(tokens[3:-1])
+
+    tokens = inside.split(';')
+
+    structs = []
+
+    for entry in tokens:
+        entry = NormalizeLine(entry)
+        if not entry:
+            continue
+
+        # It's possible that new structs get defined in here
+        structs.extend(ProcessOneEntry(newstruct, entry))
+
+    structs.append(newstruct)
+    return structs
+
+def GetNextStruct(file):
+    global line_count
+    global cppdirect
+
+    got_struct = 0
+
+    processed_lines = []
+
+    have_c_comment = 0
+    data = ''
+    while 1:
+        line = file.readline()
+        if not line:
+            break
+        
+        line_count += 1
+        line = line[:-1]
+
+        if not have_c_comment and re.search(r'/\*', line):
+            if re.search(r'/\*.*\*/', line):
+                line = re.sub(r'/\*.*\*/', '', line)
+            else:
+                line = re.sub(r'/\*.*$', '', line)
+                have_c_comment = 1
+
+        if have_c_comment:
+            if not re.search(r'\*/', line):
+                continue
+            have_c_comment = 0
+            line = re.sub(r'^.*\*/', '', line)
+
+        line = NormalizeLine(line)
+
+        if not line:
+            continue
+
+        if not got_struct:
+            if re.match(r'#include ["<].*[>"]', line):
+                cppdirect.append(line)
+                continue
+            
+            if re.match(r'^#(if( |def)|endif)', line):
+                cppdirect.append(line)
+                continue
+
+            if re.match(r'^#define', line):
+                headerdirect.append(line)
+                continue
+
+            if not re.match(r'^struct %s {$' % _STRUCT_RE,
+                            line, re.IGNORECASE):
+                print >>sys.stderr, 'Missing struct on line %d: %s' % (
+                    line_count, line)
+                sys.exit(1)
+            else:
+                got_struct = 1
+                data += line
+            continue
+
+        # We are inside the struct
+        tokens = line.split('}')
+        if len(tokens) == 1:
+            data += ' ' + line
+            continue
+
+        if len(tokens[1]):
+            print >>sys.stderr, 'Trailing garbage after struct on line %d' % (
+                line_count )
+            sys.exit(1)
+
+        # We found the end of the struct
+        data += ' %s}' % tokens[0]
+        break
+
+    # Remove any comments, that might be in there
+    data = re.sub(r'/\*.*\*/', '', data)
+    
+    return data
+        
+
+def Parse(file):
+    """
+    Parses the input file and returns C code and corresponding header file.
+    """
+
+    entities = []
+
+    while 1:
+        # Just gets the whole struct nicely formatted
+        data = GetNextStruct(file)
+
+        if not data:
+            break
+
+        entities.extend(ProcessStruct(data))
+
+    return entities
+
+def GuardName(name):
+    name = '_'.join(name.split('.'))
+    name = '_'.join(name.split('/'))
+    guard = '_'+name.upper()+'_'
+
+    return guard
+
+def HeaderPreamble(name):
+    guard = GuardName(name)
+    pre = (
+        '/*\n'
+        ' * Automatically generated from %s\n'
+        ' */\n\n'
+        '#ifndef %s\n'
+        '#define %s\n\n' ) % (
+        name, guard, guard)
+
+    # insert stdint.h - let's hope everyone has it
+    pre += (
+        '#include <event-config.h>\n'
+        '#ifdef _EVENT_HAVE_STDINT_H\n'
+        '#include <stdint.h>\n'
+        '#endif\n' )
+
+    for statement in headerdirect:
+        pre += '%s\n' % statement
+    if headerdirect:
+        pre += '\n'
+
+    pre += (
+        '#define EVTAG_HAS(msg, member) ((msg)->member##_set == 1)\n'
+        '#ifdef __GNUC__\n'
+        '#define EVTAG_ASSIGN(msg, member, args...) '
+        '(*(msg)->base->member##_assign)(msg, ## args)\n'
+        '#define EVTAG_GET(msg, member, args...) '
+        '(*(msg)->base->member##_get)(msg, ## args)\n'
+        '#else\n'
+        '#define EVTAG_ASSIGN(msg, member, ...) '
+        '(*(msg)->base->member##_assign)(msg, ## __VA_ARGS__)\n'
+        '#define EVTAG_GET(msg, member, ...) '
+        '(*(msg)->base->member##_get)(msg, ## __VA_ARGS__)\n'
+        '#endif\n'
+        '#define EVTAG_ADD(msg, member) (*(msg)->base->member##_add)(msg)\n'
+        '#define EVTAG_LEN(msg, member) ((msg)->member##_length)\n'
+        )
+
+    return pre
+     
+
+def HeaderPostamble(name):
+    guard = GuardName(name)
+    return '#endif  /* %s */' % guard
+
+def BodyPreamble(name):
+    global _NAME
+    global _VERSION
+    
+    header_file = '.'.join(name.split('.')[:-1]) + '.gen.h'
+
+    pre = ( '/*\n'
+            ' * Automatically generated from %s\n'
+            ' * by %s/%s.  DO NOT EDIT THIS FILE.\n'
+            ' */\n\n' ) % (name, _NAME, _VERSION)
+    pre += ( '#include <sys/types.h>\n'
+             '#ifdef _EVENT_HAVE_SYS_TIME_H\n'
+             '#include <sys/time.h>\n'
+             '#endif\n'
+             '#include <stdlib.h>\n'
+             '#include <string.h>\n'
+             '#include <assert.h>\n'
+             '#define EVENT_NO_STRUCT\n'
+             '#include <event.h>\n\n'
+             '#ifdef _EVENT___func__\n'
+             '#define __func__ _EVENT___func__\n'
+             '#endif\n' )
+
+    for statement in cppdirect:
+        pre += '%s\n' % statement
+    
+    pre += '\n#include "%s"\n\n' % header_file
+
+    pre += 'void event_err(int eval, const char *fmt, ...);\n'
+    pre += 'void event_warn(const char *fmt, ...);\n'
+    pre += 'void event_errx(int eval, const char *fmt, ...);\n'
+    pre += 'void event_warnx(const char *fmt, ...);\n\n'
+
+    return pre
+
+def main(argv):
+    if len(argv) < 2 or not argv[1]:
+        print >>sys.stderr, 'Need RPC description file as first argument.'
+        sys.exit(1)
+
+    filename = argv[1]
+
+    ext = filename.split('.')[-1]
+    if ext != 'rpc':
+        print >>sys.stderr, 'Unrecognized file extension: %s' % ext
+        sys.exit(1)
+
+    print >>sys.stderr, 'Reading \"%s\"' % filename
+
+    fp = open(filename, 'r')
+    entities = Parse(fp)
+    fp.close()
+
+    header_file = '.'.join(filename.split('.')[:-1]) + '.gen.h'
+    impl_file = '.'.join(filename.split('.')[:-1]) + '.gen.c'
+
+    print >>sys.stderr, '... creating "%s"' % header_file
+    header_fp = open(header_file, 'w')
+    print >>header_fp, HeaderPreamble(filename)
+
+    # Create forward declarations: allows other structs to reference
+    # each other
+    for entry in entities:
+        entry.PrintForwardDeclaration(header_fp)
+    print >>header_fp, ''
+
+    for entry in entities:
+        entry.PrintTags(header_fp)
+        entry.PrintDeclaration(header_fp)
+    print >>header_fp, HeaderPostamble(filename)
+    header_fp.close()
+
+    print >>sys.stderr, '... creating "%s"' % impl_file
+    impl_fp = open(impl_file, 'w')
+    print >>impl_fp, BodyPreamble(filename)
+    for entry in entities:
+        entry.PrintCode(impl_fp)
+    impl_fp.close()
+
+if __name__ == '__main__':
+    main(sys.argv)
diff --git a/base/third_party/libevent/event_tagging.c b/base/third_party/libevent/event_tagging.c
new file mode 100644
index 0000000..d436e3f
--- /dev/null
+++ b/base/third_party/libevent/event_tagging.c
@@ -0,0 +1,443 @@
+/*
+ * Copyright (c) 2003, 2004 Niels Provos <provos@citi.umich.edu>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ *    derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#ifdef HAVE_SYS_TYPES_H
+#include <sys/types.h>
+#endif
+#ifdef HAVE_SYS_PARAM_H
+#include <sys/param.h>
+#endif
+
+#ifdef WIN32
+#define WIN32_LEAN_AND_MEAN
+#include <winsock2.h>
+#include <windows.h>
+#undef WIN32_LEAN_AND_MEAN
+#else
+#include <sys/ioctl.h>
+#endif
+
+#include <sys/queue.h>
+#ifdef HAVE_SYS_TIME_H
+#include <sys/time.h>
+#endif
+
+#include <errno.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#ifndef WIN32
+#include <syslog.h>
+#endif
+#ifdef HAVE_UNISTD_H
+#include <unistd.h>
+#endif
+
+#include "event.h"
+#include "evutil.h"
+#include "log.h"
+
+int evtag_decode_int(ev_uint32_t *pnumber, struct evbuffer *evbuf);
+int evtag_encode_tag(struct evbuffer *evbuf, ev_uint32_t tag);
+int evtag_decode_tag(ev_uint32_t *ptag, struct evbuffer *evbuf);
+
+static struct evbuffer *_buf;	/* not thread safe */
+
+void
+evtag_init(void)
+{
+	if (_buf != NULL)
+		return;
+
+	if ((_buf = evbuffer_new()) == NULL)
+		event_err(1, "%s: malloc", __func__);
+}
+
+/* 
+ * We encode integer's by nibbles; the first nibble contains the number
+ * of significant nibbles - 1;  this allows us to encode up to 64-bit
+ * integers.  This function is byte-order independent.
+ */
+
+void
+encode_int(struct evbuffer *evbuf, ev_uint32_t number)
+{
+	int off = 1, nibbles = 0;
+	ev_uint8_t data[5];
+
+	memset(data, 0, sizeof(ev_uint32_t)+1);
+	while (number) {
+		if (off & 0x1)
+			data[off/2] = (data[off/2] & 0xf0) | (number & 0x0f);
+		else
+			data[off/2] = (data[off/2] & 0x0f) |
+			    ((number & 0x0f) << 4);
+		number >>= 4;
+		off++;
+	}
+
+	if (off > 2)
+		nibbles = off - 2;
+
+	/* Off - 1 is the number of encoded nibbles */
+	data[0] = (data[0] & 0x0f) | ((nibbles & 0x0f) << 4);
+
+	evbuffer_add(evbuf, data, (off + 1) / 2);
+}
+
+/*
+ * Support variable length encoding of tags; we use the high bit in each
+ * octet as a continuation signal.
+ */
+
+int
+evtag_encode_tag(struct evbuffer *evbuf, ev_uint32_t tag)
+{
+	int bytes = 0;
+	ev_uint8_t data[5];
+
+	memset(data, 0, sizeof(data));
+	do {
+		ev_uint8_t lower = tag & 0x7f;
+		tag >>= 7;
+
+		if (tag)
+			lower |= 0x80;
+
+		data[bytes++] = lower;
+	} while (tag);
+
+	if (evbuf != NULL)
+		evbuffer_add(evbuf, data, bytes);
+
+	return (bytes);
+}
+
+static int
+decode_tag_internal(ev_uint32_t *ptag, struct evbuffer *evbuf, int dodrain)
+{
+	ev_uint32_t number = 0;
+	ev_uint8_t *data = EVBUFFER_DATA(evbuf);
+	int len = EVBUFFER_LENGTH(evbuf);
+	int count = 0, shift = 0, done = 0;
+
+	while (count++ < len) {
+		ev_uint8_t lower = *data++;
+		number |= (lower & 0x7f) << shift;
+		shift += 7;
+
+		if (!(lower & 0x80)) {
+			done = 1;
+			break;
+		}
+	}
+
+	if (!done)
+		return (-1);
+
+	if (dodrain)
+		evbuffer_drain(evbuf, count);
+
+	if (ptag != NULL)
+		*ptag = number;
+
+	return (count);
+}
+
+int
+evtag_decode_tag(ev_uint32_t *ptag, struct evbuffer *evbuf)
+{
+	return (decode_tag_internal(ptag, evbuf, 1 /* dodrain */));
+}
+
+/*
+ * Marshal a data type, the general format is as follows:
+ *
+ * tag number: one byte; length: var bytes; payload: var bytes
+ */
+
+void
+evtag_marshal(struct evbuffer *evbuf, ev_uint32_t tag,
+    const void *data, ev_uint32_t len)
+{
+	evtag_encode_tag(evbuf, tag);
+	encode_int(evbuf, len);
+	evbuffer_add(evbuf, (void *)data, len);
+}
+
+/* Marshaling for integers */
+void
+evtag_marshal_int(struct evbuffer *evbuf, ev_uint32_t tag, ev_uint32_t integer)
+{
+	evbuffer_drain(_buf, EVBUFFER_LENGTH(_buf));
+	encode_int(_buf, integer);
+
+	evtag_encode_tag(evbuf, tag);
+	encode_int(evbuf, EVBUFFER_LENGTH(_buf));
+	evbuffer_add_buffer(evbuf, _buf);
+}
+
+void
+evtag_marshal_string(struct evbuffer *buf, ev_uint32_t tag, const char *string)
+{
+	evtag_marshal(buf, tag, string, strlen(string));
+}
+
+void
+evtag_marshal_timeval(struct evbuffer *evbuf, ev_uint32_t tag, struct timeval *tv)
+{
+	evbuffer_drain(_buf, EVBUFFER_LENGTH(_buf));
+
+	encode_int(_buf, tv->tv_sec);
+	encode_int(_buf, tv->tv_usec);
+
+	evtag_marshal(evbuf, tag, EVBUFFER_DATA(_buf),
+	    EVBUFFER_LENGTH(_buf));
+}
+
+static int
+decode_int_internal(ev_uint32_t *pnumber, struct evbuffer *evbuf, int dodrain)
+{
+	ev_uint32_t number = 0;
+	ev_uint8_t *data = EVBUFFER_DATA(evbuf);
+	int len = EVBUFFER_LENGTH(evbuf);
+	int nibbles = 0;
+
+	if (!len)
+		return (-1);
+
+	nibbles = ((data[0] & 0xf0) >> 4) + 1;
+	if (nibbles > 8 || (nibbles >> 1) + 1 > len)
+		return (-1);
+	len = (nibbles >> 1) + 1;
+
+	while (nibbles > 0) {
+		number <<= 4;
+		if (nibbles & 0x1)
+			number |= data[nibbles >> 1] & 0x0f;
+		else
+			number |= (data[nibbles >> 1] & 0xf0) >> 4;
+		nibbles--;
+	}
+
+	if (dodrain)
+		evbuffer_drain(evbuf, len);
+
+	*pnumber = number;
+
+	return (len);
+}
+
+int
+evtag_decode_int(ev_uint32_t *pnumber, struct evbuffer *evbuf)
+{
+	return (decode_int_internal(pnumber, evbuf, 1) == -1 ? -1 : 0);
+}
+
+int
+evtag_peek(struct evbuffer *evbuf, ev_uint32_t *ptag)
+{
+	return (decode_tag_internal(ptag, evbuf, 0 /* dodrain */));
+}
+
+int
+evtag_peek_length(struct evbuffer *evbuf, ev_uint32_t *plength)
+{
+	struct evbuffer tmp;
+	int res, len;
+
+	len = decode_tag_internal(NULL, evbuf, 0 /* dodrain */);
+	if (len == -1)
+		return (-1);
+
+	tmp = *evbuf;
+	tmp.buffer += len;
+	tmp.off -= len;
+
+	res = decode_int_internal(plength, &tmp, 0);
+	if (res == -1)
+		return (-1);
+
+	*plength += res + len;
+
+	return (0);
+}
+
+int
+evtag_payload_length(struct evbuffer *evbuf, ev_uint32_t *plength)
+{
+	struct evbuffer tmp;
+	int res, len;
+
+	len = decode_tag_internal(NULL, evbuf, 0 /* dodrain */);
+	if (len == -1)
+		return (-1);
+
+	tmp = *evbuf;
+	tmp.buffer += len;
+	tmp.off -= len;
+
+	res = decode_int_internal(plength, &tmp, 0);
+	if (res == -1)
+		return (-1);
+
+	return (0);
+}
+
+int
+evtag_consume(struct evbuffer *evbuf)
+{
+	ev_uint32_t len;
+	if (decode_tag_internal(NULL, evbuf, 1 /* dodrain */) == -1)
+		return (-1);
+	if (evtag_decode_int(&len, evbuf) == -1)
+		return (-1);
+	evbuffer_drain(evbuf, len);
+
+	return (0);
+}
+
+/* Reads the data type from an event buffer */
+
+int
+evtag_unmarshal(struct evbuffer *src, ev_uint32_t *ptag, struct evbuffer *dst)
+{
+	ev_uint32_t len;
+	ev_uint32_t integer;
+
+	if (decode_tag_internal(ptag, src, 1 /* dodrain */) == -1)
+		return (-1);
+	if (evtag_decode_int(&integer, src) == -1)
+		return (-1);
+	len = integer;
+
+	if (EVBUFFER_LENGTH(src) < len)
+		return (-1);
+
+	if (evbuffer_add(dst, EVBUFFER_DATA(src), len) == -1)
+		return (-1);
+
+	evbuffer_drain(src, len);
+
+	return (len);
+}
+
+/* Marshaling for integers */
+
+int
+evtag_unmarshal_int(struct evbuffer *evbuf, ev_uint32_t need_tag,
+    ev_uint32_t *pinteger)
+{
+	ev_uint32_t tag;
+	ev_uint32_t len;
+	ev_uint32_t integer;
+
+	if (decode_tag_internal(&tag, evbuf, 1 /* dodrain */) == -1)
+		return (-1);
+	if (need_tag != tag)
+		return (-1);
+	if (evtag_decode_int(&integer, evbuf) == -1)
+		return (-1);
+	len = integer;
+
+	if (EVBUFFER_LENGTH(evbuf) < len)
+		return (-1);
+	
+	evbuffer_drain(_buf, EVBUFFER_LENGTH(_buf));
+	if (evbuffer_add(_buf, EVBUFFER_DATA(evbuf), len) == -1)
+		return (-1);
+
+	evbuffer_drain(evbuf, len);
+
+	return (evtag_decode_int(pinteger, _buf));
+}
+
+/* Unmarshal a fixed length tag */
+
+int
+evtag_unmarshal_fixed(struct evbuffer *src, ev_uint32_t need_tag, void *data,
+    size_t len)
+{
+	ev_uint32_t tag;
+
+	/* Initialize this event buffer so that we can read into it */
+	evbuffer_drain(_buf, EVBUFFER_LENGTH(_buf));
+
+	/* Now unmarshal a tag and check that it matches the tag we want */
+	if (evtag_unmarshal(src, &tag, _buf) == -1 || tag != need_tag)
+		return (-1);
+
+	if (EVBUFFER_LENGTH(_buf) != len)
+		return (-1);
+
+	memcpy(data, EVBUFFER_DATA(_buf), len);
+	return (0);
+}
+
+int
+evtag_unmarshal_string(struct evbuffer *evbuf, ev_uint32_t need_tag,
+    char **pstring)
+{
+	ev_uint32_t tag;
+
+	evbuffer_drain(_buf, EVBUFFER_LENGTH(_buf));
+
+	if (evtag_unmarshal(evbuf, &tag, _buf) == -1 || tag != need_tag)
+		return (-1);
+
+	*pstring = calloc(EVBUFFER_LENGTH(_buf) + 1, 1);
+	if (*pstring == NULL)
+		event_err(1, "%s: calloc", __func__);
+	evbuffer_remove(_buf, *pstring, EVBUFFER_LENGTH(_buf));
+
+	return (0);
+}
+
+int
+evtag_unmarshal_timeval(struct evbuffer *evbuf, ev_uint32_t need_tag,
+    struct timeval *ptv)
+{
+	ev_uint32_t tag;
+	ev_uint32_t integer;
+
+	evbuffer_drain(_buf, EVBUFFER_LENGTH(_buf));
+	if (evtag_unmarshal(evbuf, &tag, _buf) == -1 || tag != need_tag)
+		return (-1);
+
+	if (evtag_decode_int(&integer, _buf) == -1)
+		return (-1);
+	ptv->tv_sec = integer;
+	if (evtag_decode_int(&integer, _buf) == -1)
+		return (-1);
+	ptv->tv_usec = integer;
+
+	return (0);
+}
diff --git a/base/third_party/libevent/evhttp.h b/base/third_party/libevent/evhttp.h
new file mode 100644
index 0000000..48c1d91
--- /dev/null
+++ b/base/third_party/libevent/evhttp.h
@@ -0,0 +1,375 @@
+/*
+ * Copyright (c) 2000-2004 Niels Provos <provos@citi.umich.edu>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ *    derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef _EVHTTP_H_
+#define _EVHTTP_H_
+
+#include "event.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#ifdef WIN32
+#define WIN32_LEAN_AND_MEAN
+#include <winsock2.h>
+#include <windows.h>
+#undef WIN32_LEAN_AND_MEAN
+#endif
+
+/** @file evhttp.h
+ *
+ * Basic support for HTTP serving.
+ *
+ * As libevent is a library for dealing with event notification and most
+ * interesting applications are networked today, I have often found the
+ * need to write HTTP code.  The following prototypes and definitions provide
+ * an application with a minimal interface for making HTTP requests and for
+ * creating a very simple HTTP server.
+ */
+
+/* Response codes */
+#define HTTP_OK			200
+#define HTTP_NOCONTENT		204
+#define HTTP_MOVEPERM		301
+#define HTTP_MOVETEMP		302
+#define HTTP_NOTMODIFIED	304
+#define HTTP_BADREQUEST		400
+#define HTTP_NOTFOUND		404
+#define HTTP_SERVUNAVAIL	503
+
+struct evhttp;
+struct evhttp_request;
+struct evkeyvalq;
+
+/** Create a new HTTP server
+ *
+ * @param base (optional) the event base to receive the HTTP events
+ * @return a pointer to a newly initialized evhttp server structure
+ */
+struct evhttp *evhttp_new(struct event_base *base);
+
+/**
+ * Binds an HTTP server on the specified address and port.
+ *
+ * Can be called multiple times to bind the same http server
+ * to multiple different ports.
+ *
+ * @param http a pointer to an evhttp object
+ * @param address a string containing the IP address to listen(2) on
+ * @param port the port number to listen on
+ * @return 0 on success, -1 on failure
+ * @see evhttp_free()
+ */
+int evhttp_bind_socket(struct evhttp *http, const char *address, u_short port);
+
+/**
+ * Makes an HTTP server accept connections on the specified socket
+ *
+ * This may be useful to create a socket and then fork multiple instances
+ * of an http server, or when a socket has been communicated via file
+ * descriptor passing in situations where an http servers does not have
+ * permissions to bind to a low-numbered port.
+ *
+ * Can be called multiple times to have the http server listen to
+ * multiple different sockets.
+ *
+ * @param http a pointer to an evhttp object
+ * @param fd a socket fd that is ready for accepting connections
+ * @return 0 on success, -1 on failure.
+ * @see evhttp_free(), evhttp_bind_socket()
+ */
+int evhttp_accept_socket(struct evhttp *http, int fd);
+
+/**
+ * Free the previously created HTTP server.
+ *
+ * Works only if no requests are currently being served.
+ *
+ * @param http the evhttp server object to be freed
+ * @see evhttp_start()
+ */
+void evhttp_free(struct evhttp* http);
+
+/** Set a callback for a specified URI */
+void evhttp_set_cb(struct evhttp *, const char *,
+    void (*)(struct evhttp_request *, void *), void *);
+
+/** Removes the callback for a specified URI */
+int evhttp_del_cb(struct evhttp *, const char *);
+
+/** Set a callback for all requests that are not caught by specific callbacks
+ */
+void evhttp_set_gencb(struct evhttp *,
+    void (*)(struct evhttp_request *, void *), void *);
+
+/**
+ * Set the timeout for an HTTP request.
+ *
+ * @param http an evhttp object
+ * @param timeout_in_secs the timeout, in seconds
+ */
+void evhttp_set_timeout(struct evhttp *, int timeout_in_secs);
+
+/* Request/Response functionality */
+
+/**
+ * Send an HTML error message to the client.
+ *
+ * @param req a request object
+ * @param error the HTTP error code
+ * @param reason a brief explanation of the error
+ */
+void evhttp_send_error(struct evhttp_request *req, int error,
+    const char *reason);
+
+/**
+ * Send an HTML reply to the client.
+ *
+ * @param req a request object
+ * @param code the HTTP response code to send
+ * @param reason a brief message to send with the response code
+ * @param databuf the body of the response
+ */
+void evhttp_send_reply(struct evhttp_request *req, int code,
+    const char *reason, struct evbuffer *databuf);
+
+/* Low-level response interface, for streaming/chunked replies */
+void evhttp_send_reply_start(struct evhttp_request *, int, const char *);
+void evhttp_send_reply_chunk(struct evhttp_request *, struct evbuffer *);
+void evhttp_send_reply_end(struct evhttp_request *);
+
+/**
+ * Start an HTTP server on the specified address and port
+ *
+ * DEPRECATED: it does not allow an event base to be specified
+ *
+ * @param address the address to which the HTTP server should be bound
+ * @param port the port number on which the HTTP server should listen
+ * @return an struct evhttp object
+ */
+struct evhttp *evhttp_start(const char *address, u_short port);
+
+/*
+ * Interfaces for making requests
+ */
+enum evhttp_cmd_type { EVHTTP_REQ_GET, EVHTTP_REQ_POST, EVHTTP_REQ_HEAD };
+
+enum evhttp_request_kind { EVHTTP_REQUEST, EVHTTP_RESPONSE };
+
+/**
+ * the request structure that a server receives.
+ * WARNING: expect this structure to change.  I will try to provide
+ * reasonable accessors.
+ */
+struct evhttp_request {
+#if defined(TAILQ_ENTRY)
+	TAILQ_ENTRY(evhttp_request) next;
+#else
+struct {
+	struct evhttp_request *tqe_next;
+	struct evhttp_request **tqe_prev;
+}       next;
+#endif
+
+	/* the connection object that this request belongs to */
+	struct evhttp_connection *evcon;
+	int flags;
+#define EVHTTP_REQ_OWN_CONNECTION	0x0001
+#define EVHTTP_PROXY_REQUEST		0x0002
+
+	struct evkeyvalq *input_headers;
+	struct evkeyvalq *output_headers;
+
+	/* address of the remote host and the port connection came from */
+	char *remote_host;
+	u_short remote_port;
+
+	enum evhttp_request_kind kind;
+	enum evhttp_cmd_type type;
+
+	char *uri;			/* uri after HTTP request was parsed */
+
+	char major;			/* HTTP Major number */
+	char minor;			/* HTTP Minor number */
+
+	int response_code;		/* HTTP Response code */
+	char *response_code_line;	/* Readable response */
+
+	struct evbuffer *input_buffer;	/* read data */
+	ev_int64_t ntoread;
+	int chunked:1,                  /* a chunked request */
+	    userdone:1;                 /* the user has sent all data */
+
+	struct evbuffer *output_buffer;	/* outgoing post or data */
+
+	/* Callback */
+	void (*cb)(struct evhttp_request *, void *);
+	void *cb_arg;
+
+	/*
+	 * Chunked data callback - call for each completed chunk if
+	 * specified.  If not specified, all the data is delivered via
+	 * the regular callback.
+	 */
+	void (*chunk_cb)(struct evhttp_request *, void *);
+};
+
+/**
+ * Creates a new request object that needs to be filled in with the request
+ * parameters.  The callback is executed when the request completed or an
+ * error occurred.
+ */
+struct evhttp_request *evhttp_request_new(
+	void (*cb)(struct evhttp_request *, void *), void *arg);
+
+/** enable delivery of chunks to requestor */
+void evhttp_request_set_chunked_cb(struct evhttp_request *,
+    void (*cb)(struct evhttp_request *, void *));
+
+/** Frees the request object and removes associated events. */
+void evhttp_request_free(struct evhttp_request *req);
+
+/** Returns the connection object associated with the request or NULL */
+struct evhttp_connection *evhttp_request_get_connection(struct evhttp_request *req);
+
+/**
+ * A connection object that can be used to for making HTTP requests.  The
+ * connection object tries to establish the connection when it is given an
+ * http request object.
+ */
+struct evhttp_connection *evhttp_connection_new(
+	const char *address, unsigned short port);
+
+/** Frees an http connection */
+void evhttp_connection_free(struct evhttp_connection *evcon);
+
+/** sets the ip address from which http connections are made */
+void evhttp_connection_set_local_address(struct evhttp_connection *evcon,
+    const char *address);
+
+/** sets the local port from which http connections are made */
+void evhttp_connection_set_local_port(struct evhttp_connection *evcon,
+    unsigned short port);
+
+/** Sets the timeout for events related to this connection */
+void evhttp_connection_set_timeout(struct evhttp_connection *evcon,
+    int timeout_in_secs);
+
+/** Sets the retry limit for this connection - -1 repeats indefnitely */
+void evhttp_connection_set_retries(struct evhttp_connection *evcon,
+    int retry_max);
+
+/** Set a callback for connection close. */
+void evhttp_connection_set_closecb(struct evhttp_connection *evcon,
+    void (*)(struct evhttp_connection *, void *), void *);
+
+/**
+ * Associates an event base with the connection - can only be called
+ * on a freshly created connection object that has not been used yet.
+ */
+void evhttp_connection_set_base(struct evhttp_connection *evcon,
+    struct event_base *base);
+
+/** Get the remote address and port associated with this connection. */
+void evhttp_connection_get_peer(struct evhttp_connection *evcon,
+    char **address, u_short *port);
+
+/** The connection gets ownership of the request */
+int evhttp_make_request(struct evhttp_connection *evcon,
+    struct evhttp_request *req,
+    enum evhttp_cmd_type type, const char *uri);
+
+const char *evhttp_request_uri(struct evhttp_request *req);
+
+/* Interfaces for dealing with HTTP headers */
+
+const char *evhttp_find_header(const struct evkeyvalq *, const char *);
+int evhttp_remove_header(struct evkeyvalq *, const char *);
+int evhttp_add_header(struct evkeyvalq *, const char *, const char *);
+void evhttp_clear_headers(struct evkeyvalq *);
+
+/* Miscellaneous utility functions */
+
+
+/**
+  Helper function to encode a URI.
+
+  The returned string must be freed by the caller.
+
+  @param uri an unencoded URI
+  @return a newly allocated URI-encoded string
+ */
+char *evhttp_encode_uri(const char *uri);
+
+
+/**
+  Helper function to decode a URI.
+
+  The returned string must be freed by the caller.
+
+  @param uri an encoded URI
+  @return a newly allocated unencoded URI
+ */
+char *evhttp_decode_uri(const char *uri);
+
+
+/**
+ * Helper function to parse out arguments in a query.
+ *
+ * Parsing a uri like
+ *
+ *    http://foo.com/?q=test&s=some+thing
+ *
+ * will result in two entries in the key value queue.
+
+ * The first entry is: key="q", value="test"
+ * The second entry is: key="s", value="some thing"
+ *
+ * @param uri the request URI
+ * @param headers the head of the evkeyval queue
+ */
+void evhttp_parse_query(const char *uri, struct evkeyvalq *headers);
+
+
+/**
+ * Escape HTML character entities in a string.
+ *
+ * Replaces <, >, ", ' and & with &lt;, &gt;, &quot;,
+ * &#039; and &amp; correspondingly.
+ *
+ * The returned string needs to be freed by the caller.
+ *
+ * @param html an unescaped HTML string
+ * @return an escaped HTML string
+ */
+char *evhttp_htmlescape(const char *html);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _EVHTTP_H_ */
diff --git a/base/third_party/libevent/evport.c b/base/third_party/libevent/evport.c
new file mode 100644
index 0000000..1f5ebc4
--- /dev/null
+++ b/base/third_party/libevent/evport.c
@@ -0,0 +1,519 @@
+/*
+ * Submitted by David Pacheco (dp.spambait@gmail.com)
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ *    derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY SUN MICROSYSTEMS, INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL SUN MICROSYSTEMS, INC. BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+/*
+ * Copyright (c) 2007 Sun Microsystems. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+/*
+ * evport.c: event backend using Solaris 10 event ports. See port_create(3C).
+ * This implementation is loosely modeled after the one used for select(2) (in
+ * select.c).
+ *
+ * The outstanding events are tracked in a data structure called evport_data.
+ * Each entry in the ed_fds array corresponds to a file descriptor, and contains
+ * pointers to the read and write events that correspond to that fd. (That is,
+ * when the file is readable, the "read" event should handle it, etc.)
+ *
+ * evport_add and evport_del update this data structure. evport_dispatch uses it
+ * to determine where to callback when an event occurs (which it gets from
+ * port_getn). 
+ *
+ * Helper functions are used: grow() grows the file descriptor array as
+ * necessary when large fd's come in. reassociate() takes care of maintaining
+ * the proper file-descriptor/event-port associations.
+ *
+ * As in the select(2) implementation, signals are handled by evsignal.
+ */
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include <sys/time.h>
+#include <assert.h>
+#include <sys/queue.h>
+#include <errno.h>
+#include <poll.h>
+#include <port.h>
+#include <signal.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <time.h>
+#include <unistd.h>
+#ifdef CHECK_INVARIANTS
+#include <assert.h>
+#endif
+
+#include "event.h"
+#include "event-internal.h"
+#include "log.h"
+#include "evsignal.h"
+
+
+/*
+ * Default value for ed_nevents, which is the maximum file descriptor number we
+ * can handle. If an event comes in for a file descriptor F > nevents, we will
+ * grow the array of file descriptors, doubling its size.
+ */
+#define DEFAULT_NFDS	16
+
+
+/*
+ * EVENTS_PER_GETN is the maximum number of events to retrieve from port_getn on
+ * any particular call. You can speed things up by increasing this, but it will
+ * (obviously) require more memory.
+ */
+#define EVENTS_PER_GETN 8
+
+/*
+ * Per-file-descriptor information about what events we're subscribed to. These
+ * fields are NULL if no event is subscribed to either of them.
+ */
+
+struct fd_info {
+	struct event* fdi_revt; /* the event responsible for the "read"  */
+	struct event* fdi_wevt; /* the event responsible for the "write" */
+};
+
+#define FDI_HAS_READ(fdi)  ((fdi)->fdi_revt != NULL)
+#define FDI_HAS_WRITE(fdi) ((fdi)->fdi_wevt != NULL)
+#define FDI_HAS_EVENTS(fdi) (FDI_HAS_READ(fdi) || FDI_HAS_WRITE(fdi))
+#define FDI_TO_SYSEVENTS(fdi) (FDI_HAS_READ(fdi) ? POLLIN : 0) | \
+    (FDI_HAS_WRITE(fdi) ? POLLOUT : 0)
+
+struct evport_data {
+	int 		ed_port;	/* event port for system events  */
+	int		ed_nevents;	/* number of allocated fdi's 	 */
+	struct fd_info *ed_fds;		/* allocated fdi table 		 */
+	/* fdi's that we need to reassoc */
+	int ed_pending[EVENTS_PER_GETN]; /* fd's with pending events */
+};
+
+static void*	evport_init	(struct event_base *);
+static int 	evport_add	(void *, struct event *);
+static int 	evport_del	(void *, struct event *);
+static int 	evport_dispatch	(struct event_base *, void *, struct timeval *);
+static void	evport_dealloc	(struct event_base *, void *);
+
+const struct eventop evportops = {
+	"evport",
+	evport_init,
+	evport_add,
+	evport_del,
+	evport_dispatch,
+	evport_dealloc,
+	1 /* need reinit */
+};
+
+/*
+ * Initialize the event port implementation.
+ */
+
+static void*
+evport_init(struct event_base *base)
+{
+	struct evport_data *evpd;
+	int i;
+	/*
+	 * Disable event ports when this environment variable is set 
+	 */
+	if (evutil_getenv("EVENT_NOEVPORT"))
+		return (NULL);
+
+	if (!(evpd = calloc(1, sizeof(struct evport_data))))
+		return (NULL);
+
+	if ((evpd->ed_port = port_create()) == -1) {
+		free(evpd);
+		return (NULL);
+	}
+
+	/*
+	 * Initialize file descriptor structure
+	 */
+	evpd->ed_fds = calloc(DEFAULT_NFDS, sizeof(struct fd_info));
+	if (evpd->ed_fds == NULL) {
+		close(evpd->ed_port);
+		free(evpd);
+		return (NULL);
+	}
+	evpd->ed_nevents = DEFAULT_NFDS;
+	for (i = 0; i < EVENTS_PER_GETN; i++)
+		evpd->ed_pending[i] = -1;
+
+	evsignal_init(base);
+
+	return (evpd);
+}
+
+#ifdef CHECK_INVARIANTS
+/*
+ * Checks some basic properties about the evport_data structure. Because it
+ * checks all file descriptors, this function can be expensive when the maximum
+ * file descriptor ever used is rather large.
+ */
+
+static void
+check_evportop(struct evport_data *evpd)
+{
+	assert(evpd);
+	assert(evpd->ed_nevents > 0);
+	assert(evpd->ed_port > 0);
+	assert(evpd->ed_fds > 0);
+
+	/*
+	 * Verify the integrity of the fd_info struct as well as the events to
+	 * which it points (at least, that they're valid references and correct
+	 * for their position in the structure).
+	 */
+	int i;
+	for (i = 0; i < evpd->ed_nevents; ++i) {
+		struct event 	*ev;
+		struct fd_info 	*fdi;
+
+		fdi = &evpd->ed_fds[i];
+		if ((ev = fdi->fdi_revt) != NULL) {
+			assert(ev->ev_fd == i);
+		}
+		if ((ev = fdi->fdi_wevt) != NULL) {
+			assert(ev->ev_fd == i);
+		}
+	}
+}
+
+/*
+ * Verifies very basic integrity of a given port_event.
+ */
+static void
+check_event(port_event_t* pevt)
+{
+	/*
+	 * We've only registered for PORT_SOURCE_FD events. The only
+	 * other thing we can legitimately receive is PORT_SOURCE_ALERT,
+	 * but since we're not using port_alert either, we can assume
+	 * PORT_SOURCE_FD.
+	 */
+	assert(pevt->portev_source == PORT_SOURCE_FD);
+	assert(pevt->portev_user == NULL);
+}
+
+#else
+#define check_evportop(epop)
+#define check_event(pevt)
+#endif /* CHECK_INVARIANTS */
+
+/*
+ * Doubles the size of the allocated file descriptor array.
+ */
+static int
+grow(struct evport_data *epdp, int factor)
+{
+	struct fd_info *tmp;
+	int oldsize = epdp->ed_nevents;
+	int newsize = factor * oldsize;
+	assert(factor > 1);
+
+	check_evportop(epdp);
+
+	tmp = realloc(epdp->ed_fds, sizeof(struct fd_info) * newsize);
+	if (NULL == tmp)
+		return -1;
+	epdp->ed_fds = tmp;
+	memset((char*) (epdp->ed_fds + oldsize), 0, 
+	    (newsize - oldsize)*sizeof(struct fd_info));
+	epdp->ed_nevents = newsize;
+
+	check_evportop(epdp);
+
+	return 0;
+}
+
+
+/*
+ * (Re)associates the given file descriptor with the event port. The OS events
+ * are specified (implicitly) from the fd_info struct.
+ */
+static int
+reassociate(struct evport_data *epdp, struct fd_info *fdip, int fd)
+{
+	int sysevents = FDI_TO_SYSEVENTS(fdip);
+
+	if (sysevents != 0) {
+		if (port_associate(epdp->ed_port, PORT_SOURCE_FD,
+				   fd, sysevents, NULL) == -1) {
+			event_warn("port_associate");
+			return (-1);
+		}
+	}
+
+	check_evportop(epdp);
+
+	return (0);
+}
+
+/*
+ * Main event loop - polls port_getn for some number of events, and processes
+ * them.
+ */
+
+static int
+evport_dispatch(struct event_base *base, void *arg, struct timeval *tv)
+{
+	int i, res;
+	struct evport_data *epdp = arg;
+	port_event_t pevtlist[EVENTS_PER_GETN];
+
+	/*
+	 * port_getn will block until it has at least nevents events. It will
+	 * also return how many it's given us (which may be more than we asked
+	 * for, as long as it's less than our maximum (EVENTS_PER_GETN)) in
+	 * nevents.
+	 */
+	int nevents = 1;
+
+	/*
+	 * We have to convert a struct timeval to a struct timespec
+	 * (only difference is nanoseconds vs. microseconds). If no time-based
+	 * events are active, we should wait for I/O (and tv == NULL).
+	 */
+	struct timespec ts;
+	struct timespec *ts_p = NULL;
+	if (tv != NULL) {
+		ts.tv_sec = tv->tv_sec;
+		ts.tv_nsec = tv->tv_usec * 1000;
+		ts_p = &ts;
+	}
+
+	/*
+	 * Before doing anything else, we need to reassociate the events we hit
+	 * last time which need reassociation. See comment at the end of the
+	 * loop below.
+	 */
+	for (i = 0; i < EVENTS_PER_GETN; ++i) {
+		struct fd_info *fdi = NULL;
+		if (epdp->ed_pending[i] != -1) {
+			fdi = &(epdp->ed_fds[epdp->ed_pending[i]]);
+		}
+
+		if (fdi != NULL && FDI_HAS_EVENTS(fdi)) {
+			int fd = FDI_HAS_READ(fdi) ? fdi->fdi_revt->ev_fd : 
+			    fdi->fdi_wevt->ev_fd;
+			reassociate(epdp, fdi, fd);
+			epdp->ed_pending[i] = -1;
+		}
+	}
+
+	if ((res = port_getn(epdp->ed_port, pevtlist, EVENTS_PER_GETN, 
+		    (unsigned int *) &nevents, ts_p)) == -1) {
+		if (errno == EINTR || errno == EAGAIN) {
+			evsignal_process(base);
+			return (0);
+		} else if (errno == ETIME) {
+			if (nevents == 0)
+				return (0);
+		} else {
+			event_warn("port_getn");
+			return (-1);
+		}
+	} else if (base->sig.evsignal_caught) {
+		evsignal_process(base);
+	}
+	
+	event_debug(("%s: port_getn reports %d events", __func__, nevents));
+
+	for (i = 0; i < nevents; ++i) {
+		struct event *ev;
+		struct fd_info *fdi;
+		port_event_t *pevt = &pevtlist[i];
+		int fd = (int) pevt->portev_object;
+
+		check_evportop(epdp);
+		check_event(pevt);
+		epdp->ed_pending[i] = fd;
+
+		/*
+		 * Figure out what kind of event it was 
+		 * (because we have to pass this to the callback)
+		 */
+		res = 0;
+		if (pevt->portev_events & POLLIN)
+			res |= EV_READ;
+		if (pevt->portev_events & POLLOUT)
+			res |= EV_WRITE;
+
+		/*
+		 * Check for the error situations or a hangup situation
+		 */
+		if (pevt->portev_events & (POLLERR|POLLHUP|POLLNVAL))
+			res |= EV_READ|EV_WRITE;
+
+		assert(epdp->ed_nevents > fd);
+		fdi = &(epdp->ed_fds[fd]);
+
+		/*
+		 * We now check for each of the possible events (READ
+		 * or WRITE).  Then, we activate the event (which will
+		 * cause its callback to be executed).
+		 */
+
+		if ((res & EV_READ) && ((ev = fdi->fdi_revt) != NULL)) {
+			event_active(ev, res, 1);
+		}
+
+		if ((res & EV_WRITE) && ((ev = fdi->fdi_wevt) != NULL)) {
+			event_active(ev, res, 1);
+		}
+	} /* end of all events gotten */
+
+	check_evportop(epdp);
+
+	return (0);
+}
+
+
+/*
+ * Adds the given event (so that you will be notified when it happens via
+ * the callback function).
+ */
+
+static int
+evport_add(void *arg, struct event *ev)
+{
+	struct evport_data *evpd = arg;
+	struct fd_info *fdi;
+	int factor;
+
+	check_evportop(evpd);
+
+	/*
+	 * Delegate, if it's not ours to handle.
+	 */
+	if (ev->ev_events & EV_SIGNAL)
+		return (evsignal_add(ev));
+
+	/*
+	 * If necessary, grow the file descriptor info table
+	 */
+
+	factor = 1;
+	while (ev->ev_fd >= factor * evpd->ed_nevents)
+		factor *= 2;
+
+	if (factor > 1) {
+		if (-1 == grow(evpd, factor)) {
+			return (-1);
+		}
+	}
+
+	fdi = &evpd->ed_fds[ev->ev_fd];
+	if (ev->ev_events & EV_READ)
+		fdi->fdi_revt = ev;
+	if (ev->ev_events & EV_WRITE)
+		fdi->fdi_wevt = ev;
+
+	return reassociate(evpd, fdi, ev->ev_fd);
+}
+
+/*
+ * Removes the given event from the list of events to wait for.
+ */
+
+static int
+evport_del(void *arg, struct event *ev)
+{
+	struct evport_data *evpd = arg;
+	struct fd_info *fdi;
+	int i;
+	int associated = 1;
+
+	check_evportop(evpd);
+
+	/*
+	 * Delegate, if it's not ours to handle
+	 */
+	if (ev->ev_events & EV_SIGNAL) {
+		return (evsignal_del(ev));
+	}
+
+	if (evpd->ed_nevents < ev->ev_fd) {
+		return (-1);
+	}
+
+	for (i = 0; i < EVENTS_PER_GETN; ++i) {
+		if (evpd->ed_pending[i] == ev->ev_fd) {
+			associated = 0;
+			break;
+		}
+	}
+
+	fdi = &evpd->ed_fds[ev->ev_fd];
+	if (ev->ev_events & EV_READ)
+		fdi->fdi_revt = NULL;
+	if (ev->ev_events & EV_WRITE)
+		fdi->fdi_wevt = NULL;
+
+	if (associated) {
+		if (!FDI_HAS_EVENTS(fdi) &&
+		    port_dissociate(evpd->ed_port, PORT_SOURCE_FD,
+		    ev->ev_fd) == -1) {	 
+			/*
+			 * Ignre EBADFD error the fd could have been closed
+			 * before event_del() was called.
+			 */
+			if (errno != EBADFD) {
+				event_warn("port_dissociate");
+				return (-1);
+			}
+		} else {
+			if (FDI_HAS_EVENTS(fdi)) {
+				return (reassociate(evpd, fdi, ev->ev_fd));
+			}
+		}
+	} else {
+		if (fdi->fdi_revt == NULL && fdi->fdi_wevt == NULL) {
+			evpd->ed_pending[i] = -1;
+		}
+	}
+	return 0;
+}
+
+
+static void
+evport_dealloc(struct event_base *base, void *arg)
+{
+	struct evport_data *evpd = arg;
+
+	evsignal_dealloc(base);
+
+	close(evpd->ed_port);
+
+	if (evpd->ed_fds)
+		free(evpd->ed_fds);
+	free(evpd);
+}
diff --git a/base/third_party/libevent/evrpc-internal.h b/base/third_party/libevent/evrpc-internal.h
new file mode 100644
index 0000000..c900f95
--- /dev/null
+++ b/base/third_party/libevent/evrpc-internal.h
@@ -0,0 +1,87 @@
+/*
+ * Copyright (c) 2006 Niels Provos <provos@citi.umich.edu>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ *    derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef _EVRPC_INTERNAL_H_
+#define _EVRPC_INTERNAL_H_
+
+#include "http-internal.h"
+
+struct evrpc;
+
+#define EVRPC_URI_PREFIX "/.rpc."
+
+struct evrpc_hook {
+	TAILQ_ENTRY(evrpc_hook) (next);
+
+	/* returns -1; if the rpc should be aborted, is allowed to rewrite */
+	int (*process)(struct evhttp_request *, struct evbuffer *, void *);
+	void *process_arg;
+};
+
+TAILQ_HEAD(evrpc_hook_list, evrpc_hook);
+
+/*
+ * this is shared between the base and the pool, so that we can reuse
+ * the hook adding functions; we alias both evrpc_pool and evrpc_base
+ * to this common structure.
+ */
+struct _evrpc_hooks {
+	/* hooks for processing outbound and inbound rpcs */
+	struct evrpc_hook_list in_hooks;
+	struct evrpc_hook_list out_hooks;
+};
+
+#define input_hooks common.in_hooks
+#define output_hooks common.out_hooks
+
+struct evrpc_base {
+	struct _evrpc_hooks common;
+
+	/* the HTTP server under which we register our RPC calls */
+	struct evhttp* http_server;
+
+	/* a list of all RPCs registered with us */
+	TAILQ_HEAD(evrpc_list, evrpc) registered_rpcs;
+};
+
+struct evrpc_req_generic;
+void evrpc_reqstate_free(struct evrpc_req_generic* rpc_state);
+
+/* A pool for holding evhttp_connection objects */
+struct evrpc_pool {
+	struct _evrpc_hooks common;
+
+	struct event_base *base;
+
+	struct evconq connections;
+
+	int timeout;
+
+	TAILQ_HEAD(evrpc_requestq, evrpc_request_wrapper) requests;
+};
+
+
+#endif /* _EVRPC_INTERNAL_H_ */
diff --git a/base/third_party/libevent/evrpc.c b/base/third_party/libevent/evrpc.c
new file mode 100644
index 0000000..070fd9e
--- /dev/null
+++ b/base/third_party/libevent/evrpc.c
@@ -0,0 +1,657 @@
+/*
+ * Copyright (c) 2000-2004 Niels Provos <provos@citi.umich.edu>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ *    derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#ifdef WIN32
+#define WIN32_LEAN_AND_MEAN
+#include <winsock2.h>
+#include <windows.h>
+#undef WIN32_LEAN_AND_MEAN
+#endif
+
+#include <sys/types.h>
+#ifndef WIN32
+#include <sys/socket.h>
+#endif
+#ifdef HAVE_SYS_TIME_H
+#include <sys/time.h>
+#endif
+#include <sys/queue.h>
+#include <stdio.h>
+#include <stdlib.h>
+#ifndef WIN32
+#include <unistd.h>
+#endif
+#include <errno.h>
+#include <signal.h>
+#include <string.h>
+#include <assert.h>
+
+#include "event.h"
+#include "evrpc.h"
+#include "evrpc-internal.h"
+#include "evhttp.h"
+#include "evutil.h"
+#include "log.h"
+
+struct evrpc_base *
+evrpc_init(struct evhttp *http_server)
+{
+	struct evrpc_base* base = calloc(1, sizeof(struct evrpc_base));
+	if (base == NULL)
+		return (NULL);
+
+	/* we rely on the tagging sub system */
+	evtag_init();
+
+	TAILQ_INIT(&base->registered_rpcs);
+	TAILQ_INIT(&base->input_hooks);
+	TAILQ_INIT(&base->output_hooks);
+	base->http_server = http_server;
+
+	return (base);
+}
+
+void
+evrpc_free(struct evrpc_base *base)
+{
+	struct evrpc *rpc;
+	struct evrpc_hook *hook;
+
+	while ((rpc = TAILQ_FIRST(&base->registered_rpcs)) != NULL) {
+		assert(evrpc_unregister_rpc(base, rpc->uri));
+	}
+	while ((hook = TAILQ_FIRST(&base->input_hooks)) != NULL) {
+		assert(evrpc_remove_hook(base, EVRPC_INPUT, hook));
+	}
+	while ((hook = TAILQ_FIRST(&base->output_hooks)) != NULL) {
+		assert(evrpc_remove_hook(base, EVRPC_OUTPUT, hook));
+	}
+	free(base);
+}
+
+void *
+evrpc_add_hook(void *vbase,
+    enum EVRPC_HOOK_TYPE hook_type,
+    int (*cb)(struct evhttp_request *, struct evbuffer *, void *),
+    void *cb_arg)
+{
+	struct _evrpc_hooks *base = vbase;
+	struct evrpc_hook_list *head = NULL;
+	struct evrpc_hook *hook = NULL;
+	switch (hook_type) {
+	case EVRPC_INPUT:
+		head = &base->in_hooks;
+		break;
+	case EVRPC_OUTPUT:
+		head = &base->out_hooks;
+		break;
+	default:
+		assert(hook_type == EVRPC_INPUT || hook_type == EVRPC_OUTPUT);
+	}
+
+	hook = calloc(1, sizeof(struct evrpc_hook));
+	assert(hook != NULL);
+	
+	hook->process = cb;
+	hook->process_arg = cb_arg;
+	TAILQ_INSERT_TAIL(head, hook, next);
+
+	return (hook);
+}
+
+static int
+evrpc_remove_hook_internal(struct evrpc_hook_list *head, void *handle)
+{
+	struct evrpc_hook *hook = NULL;
+	TAILQ_FOREACH(hook, head, next) {
+		if (hook == handle) {
+			TAILQ_REMOVE(head, hook, next);
+			free(hook);
+			return (1);
+		}
+	}
+
+	return (0);
+}
+
+/*
+ * remove the hook specified by the handle
+ */
+
+int
+evrpc_remove_hook(void *vbase, enum EVRPC_HOOK_TYPE hook_type, void *handle)
+{
+	struct _evrpc_hooks *base = vbase;
+	struct evrpc_hook_list *head = NULL;
+	switch (hook_type) {
+	case EVRPC_INPUT:
+		head = &base->in_hooks;
+		break;
+	case EVRPC_OUTPUT:
+		head = &base->out_hooks;
+		break;
+	default:
+		assert(hook_type == EVRPC_INPUT || hook_type == EVRPC_OUTPUT);
+	}
+
+	return (evrpc_remove_hook_internal(head, handle));
+}
+
+static int
+evrpc_process_hooks(struct evrpc_hook_list *head,
+    struct evhttp_request *req, struct evbuffer *evbuf)
+{
+	struct evrpc_hook *hook;
+	TAILQ_FOREACH(hook, head, next) {
+		if (hook->process(req, evbuf, hook->process_arg) == -1)
+			return (-1);
+	}
+
+	return (0);
+}
+
+static void evrpc_pool_schedule(struct evrpc_pool *pool);
+static void evrpc_request_cb(struct evhttp_request *, void *);
+void evrpc_request_done(struct evrpc_req_generic*);
+
+/*
+ * Registers a new RPC with the HTTP server.   The evrpc object is expected
+ * to have been filled in via the EVRPC_REGISTER_OBJECT macro which in turn
+ * calls this function.
+ */
+
+static char *
+evrpc_construct_uri(const char *uri)
+{
+	char *constructed_uri;
+	int constructed_uri_len;
+
+	constructed_uri_len = strlen(EVRPC_URI_PREFIX) + strlen(uri) + 1;
+	if ((constructed_uri = malloc(constructed_uri_len)) == NULL)
+		event_err(1, "%s: failed to register rpc at %s",
+		    __func__, uri);
+	memcpy(constructed_uri, EVRPC_URI_PREFIX, strlen(EVRPC_URI_PREFIX));
+	memcpy(constructed_uri + strlen(EVRPC_URI_PREFIX), uri, strlen(uri));
+	constructed_uri[constructed_uri_len - 1] = '\0';
+
+	return (constructed_uri);
+}
+
+int
+evrpc_register_rpc(struct evrpc_base *base, struct evrpc *rpc,
+    void (*cb)(struct evrpc_req_generic *, void *), void *cb_arg)
+{
+	char *constructed_uri = evrpc_construct_uri(rpc->uri);
+
+	rpc->base = base;
+	rpc->cb = cb;
+	rpc->cb_arg = cb_arg;
+
+	TAILQ_INSERT_TAIL(&base->registered_rpcs, rpc, next);
+
+	evhttp_set_cb(base->http_server,
+	    constructed_uri,
+	    evrpc_request_cb,
+	    rpc);
+	
+	free(constructed_uri);
+
+	return (0);
+}
+
+int
+evrpc_unregister_rpc(struct evrpc_base *base, const char *name)
+{
+	char *registered_uri = NULL;
+	struct evrpc *rpc;
+
+	/* find the right rpc; linear search might be slow */
+	TAILQ_FOREACH(rpc, &base->registered_rpcs, next) {
+		if (strcmp(rpc->uri, name) == 0)
+			break;
+	}
+	if (rpc == NULL) {
+		/* We did not find an RPC with this name */
+		return (-1);
+	}
+	TAILQ_REMOVE(&base->registered_rpcs, rpc, next);
+	
+	free((char *)rpc->uri);
+	free(rpc);
+
+        registered_uri = evrpc_construct_uri(name);
+
+	/* remove the http server callback */
+	assert(evhttp_del_cb(base->http_server, registered_uri) == 0);
+
+	free(registered_uri);
+	return (0);
+}
+
+static void
+evrpc_request_cb(struct evhttp_request *req, void *arg)
+{
+	struct evrpc *rpc = arg;
+	struct evrpc_req_generic *rpc_state = NULL;
+
+	/* let's verify the outside parameters */
+	if (req->type != EVHTTP_REQ_POST ||
+	    EVBUFFER_LENGTH(req->input_buffer) <= 0)
+		goto error;
+
+	/*
+	 * we might want to allow hooks to suspend the processing,
+	 * but at the moment, we assume that they just act as simple
+	 * filters.
+	 */
+	if (evrpc_process_hooks(&rpc->base->input_hooks,
+		req, req->input_buffer) == -1)
+		goto error;
+
+	rpc_state = calloc(1, sizeof(struct evrpc_req_generic));
+	if (rpc_state == NULL)
+		goto error;
+
+	/* let's check that we can parse the request */
+	rpc_state->request = rpc->request_new();
+	if (rpc_state->request == NULL)
+		goto error;
+
+	rpc_state->rpc = rpc;
+
+	if (rpc->request_unmarshal(
+		    rpc_state->request, req->input_buffer) == -1) {
+		/* we failed to parse the request; that's a bummer */
+		goto error;
+	}
+
+	/* at this point, we have a well formed request, prepare the reply */
+
+	rpc_state->reply = rpc->reply_new();
+	if (rpc_state->reply == NULL)
+		goto error;
+
+	rpc_state->http_req = req;
+	rpc_state->done = evrpc_request_done;
+
+	/* give the rpc to the user; they can deal with it */
+	rpc->cb(rpc_state, rpc->cb_arg);
+
+	return;
+
+error:
+	evrpc_reqstate_free(rpc_state);
+	evhttp_send_error(req, HTTP_SERVUNAVAIL, "Service Error");
+	return;
+}
+
+void
+evrpc_reqstate_free(struct evrpc_req_generic* rpc_state)
+{
+	/* clean up all memory */
+	if (rpc_state != NULL) {
+		struct evrpc *rpc = rpc_state->rpc;
+
+		if (rpc_state->request != NULL)
+			rpc->request_free(rpc_state->request);
+		if (rpc_state->reply != NULL)
+			rpc->reply_free(rpc_state->reply);
+		free(rpc_state);
+	}
+}
+
+void
+evrpc_request_done(struct evrpc_req_generic* rpc_state)
+{
+	struct evhttp_request *req = rpc_state->http_req;
+	struct evrpc *rpc = rpc_state->rpc;
+	struct evbuffer* data = NULL;
+
+	if (rpc->reply_complete(rpc_state->reply) == -1) {
+		/* the reply was not completely filled in.  error out */
+		goto error;
+	}
+
+	if ((data = evbuffer_new()) == NULL) {
+		/* out of memory */
+		goto error;
+	}
+
+	/* serialize the reply */
+	rpc->reply_marshal(data, rpc_state->reply);
+
+	/* do hook based tweaks to the request */
+	if (evrpc_process_hooks(&rpc->base->output_hooks,
+		req, data) == -1)
+		goto error;
+
+	/* on success, we are going to transmit marshaled binary data */
+	if (evhttp_find_header(req->output_headers, "Content-Type") == NULL) {
+		evhttp_add_header(req->output_headers,
+		    "Content-Type", "application/octet-stream");
+	}
+
+	evhttp_send_reply(req, HTTP_OK, "OK", data);
+
+	evbuffer_free(data);
+
+	evrpc_reqstate_free(rpc_state);
+
+	return;
+
+error:
+	if (data != NULL)
+		evbuffer_free(data);
+	evrpc_reqstate_free(rpc_state);
+	evhttp_send_error(req, HTTP_SERVUNAVAIL, "Service Error");
+	return;
+}
+
+/* Client implementation of RPC site */
+
+static int evrpc_schedule_request(struct evhttp_connection *connection,
+    struct evrpc_request_wrapper *ctx);
+
+struct evrpc_pool *
+evrpc_pool_new(struct event_base *base)
+{
+	struct evrpc_pool *pool = calloc(1, sizeof(struct evrpc_pool));
+	if (pool == NULL)
+		return (NULL);
+
+	TAILQ_INIT(&pool->connections);
+	TAILQ_INIT(&pool->requests);
+
+	TAILQ_INIT(&pool->input_hooks);
+	TAILQ_INIT(&pool->output_hooks);
+
+	pool->base = base;
+	pool->timeout = -1;
+
+	return (pool);
+}
+
+static void
+evrpc_request_wrapper_free(struct evrpc_request_wrapper *request)
+{
+	free(request->name);
+	free(request);
+}
+
+void
+evrpc_pool_free(struct evrpc_pool *pool)
+{
+	struct evhttp_connection *connection;
+	struct evrpc_request_wrapper *request;
+	struct evrpc_hook *hook;
+
+	while ((request = TAILQ_FIRST(&pool->requests)) != NULL) {
+		TAILQ_REMOVE(&pool->requests, request, next);
+		/* if this gets more complicated we need our own function */
+		evrpc_request_wrapper_free(request);
+	}
+
+	while ((connection = TAILQ_FIRST(&pool->connections)) != NULL) {
+		TAILQ_REMOVE(&pool->connections, connection, next);
+		evhttp_connection_free(connection);
+	}
+
+	while ((hook = TAILQ_FIRST(&pool->input_hooks)) != NULL) {
+		assert(evrpc_remove_hook(pool, EVRPC_INPUT, hook));
+	}
+
+	while ((hook = TAILQ_FIRST(&pool->output_hooks)) != NULL) {
+		assert(evrpc_remove_hook(pool, EVRPC_OUTPUT, hook));
+	}
+
+	free(pool);
+}
+
+/*
+ * Add a connection to the RPC pool.   A request scheduled on the pool
+ * may use any available connection.
+ */
+
+void
+evrpc_pool_add_connection(struct evrpc_pool *pool,
+    struct evhttp_connection *connection) {
+	assert(connection->http_server == NULL);
+	TAILQ_INSERT_TAIL(&pool->connections, connection, next);
+
+	/*
+	 * associate an event base with this connection
+	 */
+	if (pool->base != NULL)
+		evhttp_connection_set_base(connection, pool->base);
+
+	/* 
+	 * unless a timeout was specifically set for a connection,
+	 * the connection inherits the timeout from the pool.
+	 */
+	if (connection->timeout == -1)
+		connection->timeout = pool->timeout;
+
+	/* 
+	 * if we have any requests pending, schedule them with the new
+	 * connections.
+	 */
+
+	if (TAILQ_FIRST(&pool->requests) != NULL) {
+		struct evrpc_request_wrapper *request = 
+		    TAILQ_FIRST(&pool->requests);
+		TAILQ_REMOVE(&pool->requests, request, next);
+		evrpc_schedule_request(connection, request);
+	}
+}
+
+void
+evrpc_pool_set_timeout(struct evrpc_pool *pool, int timeout_in_secs)
+{
+	struct evhttp_connection *evcon;
+	TAILQ_FOREACH(evcon, &pool->connections, next) {
+		evcon->timeout = timeout_in_secs;
+	}
+	pool->timeout = timeout_in_secs;
+}
+
+
+static void evrpc_reply_done(struct evhttp_request *, void *);
+static void evrpc_request_timeout(int, short, void *);
+
+/*
+ * Finds a connection object associated with the pool that is currently
+ * idle and can be used to make a request.
+ */
+static struct evhttp_connection *
+evrpc_pool_find_connection(struct evrpc_pool *pool)
+{
+	struct evhttp_connection *connection;
+	TAILQ_FOREACH(connection, &pool->connections, next) {
+		if (TAILQ_FIRST(&connection->requests) == NULL)
+			return (connection);
+	}
+
+	return (NULL);
+}
+
+/*
+ * We assume that the ctx is no longer queued on the pool.
+ */
+static int
+evrpc_schedule_request(struct evhttp_connection *connection,
+    struct evrpc_request_wrapper *ctx)
+{
+	struct evhttp_request *req = NULL;
+	struct evrpc_pool *pool = ctx->pool;
+	struct evrpc_status status;
+	char *uri = NULL;
+	int res = 0;
+
+	if ((req = evhttp_request_new(evrpc_reply_done, ctx)) == NULL)
+		goto error;
+
+	/* serialize the request data into the output buffer */
+	ctx->request_marshal(req->output_buffer, ctx->request);
+
+	uri = evrpc_construct_uri(ctx->name);
+	if (uri == NULL)
+		goto error;
+
+	/* we need to know the connection that we might have to abort */
+	ctx->evcon = connection;
+
+	/* apply hooks to the outgoing request */
+	if (evrpc_process_hooks(&pool->output_hooks,
+		req, req->output_buffer) == -1)
+		goto error;
+
+	if (pool->timeout > 0) {
+		/* 
+		 * a timeout after which the whole rpc is going to be aborted.
+		 */
+		struct timeval tv;
+		evutil_timerclear(&tv);
+		tv.tv_sec = pool->timeout;
+		evtimer_add(&ctx->ev_timeout, &tv);
+	}
+
+	/* start the request over the connection */
+	res = evhttp_make_request(connection, req, EVHTTP_REQ_POST, uri);
+	free(uri);
+
+	if (res == -1)
+		goto error;
+
+	return (0);
+
+error:
+	memset(&status, 0, sizeof(status));
+	status.error = EVRPC_STATUS_ERR_UNSTARTED;
+	(*ctx->cb)(&status, ctx->request, ctx->reply, ctx->cb_arg);
+	evrpc_request_wrapper_free(ctx);
+	return (-1);
+}
+
+int
+evrpc_make_request(struct evrpc_request_wrapper *ctx)
+{
+	struct evrpc_pool *pool = ctx->pool;
+
+	/* initialize the event structure for this rpc */
+	evtimer_set(&ctx->ev_timeout, evrpc_request_timeout, ctx);
+	if (pool->base != NULL)
+		event_base_set(pool->base, &ctx->ev_timeout);
+
+	/* we better have some available connections on the pool */
+	assert(TAILQ_FIRST(&pool->connections) != NULL);
+
+	/* 
+	 * if no connection is available, we queue the request on the pool,
+	 * the next time a connection is empty, the rpc will be send on that.
+	 */
+	TAILQ_INSERT_TAIL(&pool->requests, ctx, next);
+
+	evrpc_pool_schedule(pool);
+
+	return (0);
+}
+
+static void
+evrpc_reply_done(struct evhttp_request *req, void *arg)
+{
+	struct evrpc_request_wrapper *ctx = arg;
+	struct evrpc_pool *pool = ctx->pool;
+	struct evrpc_status status;
+	int res = -1;
+	
+	/* cancel any timeout we might have scheduled */
+	event_del(&ctx->ev_timeout);
+
+	memset(&status, 0, sizeof(status));
+	status.http_req = req;
+
+	/* we need to get the reply now */
+	if (req != NULL) {
+		/* apply hooks to the incoming request */
+		if (evrpc_process_hooks(&pool->input_hooks,
+			req, req->input_buffer) == -1) {
+			status.error = EVRPC_STATUS_ERR_HOOKABORTED;
+			res = -1;
+		} else {
+			res = ctx->reply_unmarshal(ctx->reply,
+			    req->input_buffer);
+			if (res == -1) {
+				status.error = EVRPC_STATUS_ERR_BADPAYLOAD;
+			}
+		}
+	} else {
+		status.error = EVRPC_STATUS_ERR_TIMEOUT;
+	}
+
+	if (res == -1) {
+		/* clear everything that we might have written previously */
+		ctx->reply_clear(ctx->reply);
+	}
+
+	(*ctx->cb)(&status, ctx->request, ctx->reply, ctx->cb_arg);
+	
+	evrpc_request_wrapper_free(ctx);
+
+	/* the http layer owns the request structure */
+
+	/* see if we can schedule another request */
+	evrpc_pool_schedule(pool);
+}
+
+static void
+evrpc_pool_schedule(struct evrpc_pool *pool)
+{
+	struct evrpc_request_wrapper *ctx = TAILQ_FIRST(&pool->requests);
+	struct evhttp_connection *evcon;
+
+	/* if no requests are pending, we have no work */
+	if (ctx == NULL)
+		return;
+
+	if ((evcon = evrpc_pool_find_connection(pool)) != NULL) {
+		TAILQ_REMOVE(&pool->requests, ctx, next);
+		evrpc_schedule_request(evcon, ctx);
+	}
+}
+
+static void
+evrpc_request_timeout(int fd, short what, void *arg)
+{
+	struct evrpc_request_wrapper *ctx = arg;
+	struct evhttp_connection *evcon = ctx->evcon;
+	assert(evcon != NULL);
+
+	evhttp_connection_fail(evcon, EVCON_HTTP_TIMEOUT);
+}
diff --git a/base/third_party/libevent/evrpc.h b/base/third_party/libevent/evrpc.h
new file mode 100644
index 0000000..7c16b95
--- /dev/null
+++ b/base/third_party/libevent/evrpc.h
@@ -0,0 +1,486 @@
+/*
+ * Copyright (c) 2006 Niels Provos <provos@citi.umich.edu>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ *    derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef _EVRPC_H_
+#define _EVRPC_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/** @file evrpc.h
+ *
+ * This header files provides basic support for an RPC server and client.
+ *
+ * To support RPCs in a server, every supported RPC command needs to be
+ * defined and registered.
+ *
+ * EVRPC_HEADER(SendCommand, Request, Reply);
+ *
+ *  SendCommand is the name of the RPC command.
+ *  Request is the name of a structure generated by event_rpcgen.py.
+ *    It contains all parameters relating to the SendCommand RPC.  The
+ *    server needs to fill in the Reply structure.
+ *  Reply is the name of a structure generated by event_rpcgen.py.  It
+ *    contains the answer to the RPC.
+ *
+ * To register an RPC with an HTTP server, you need to first create an RPC
+ * base with:
+ *
+ *   struct evrpc_base *base = evrpc_init(http);
+ *
+ * A specific RPC can then be registered with
+ *
+ * EVRPC_REGISTER(base, SendCommand, Request, Reply,  FunctionCB, arg);
+ *
+ * when the server receives an appropriately formatted RPC, the user callback
+ * is invokved.   The callback needs to fill in the reply structure.
+ *
+ * void FunctionCB(EVRPC_STRUCT(SendCommand)* rpc, void *arg);
+ *
+ * To send the reply, call EVRPC_REQUEST_DONE(rpc);
+ *
+ * See the regression test for an example.
+ */
+
+struct evbuffer;
+struct event_base;
+struct evrpc_req_generic;
+
+/* Encapsulates a request */
+struct evrpc {
+	TAILQ_ENTRY(evrpc) next;
+
+	/* the URI at which the request handler lives */
+	const char* uri;
+
+	/* creates a new request structure */
+	void *(*request_new)(void);
+
+	/* frees the request structure */
+	void (*request_free)(void *);
+
+	/* unmarshals the buffer into the proper request structure */
+	int (*request_unmarshal)(void *, struct evbuffer *);
+
+	/* creates a new reply structure */
+	void *(*reply_new)(void);
+
+	/* creates a new reply structure */
+	void (*reply_free)(void *);
+
+	/* verifies that the reply is valid */
+	int (*reply_complete)(void *);
+	
+	/* marshals the reply into a buffer */
+	void (*reply_marshal)(struct evbuffer*, void *);
+
+	/* the callback invoked for each received rpc */
+	void (*cb)(struct evrpc_req_generic *, void *);
+	void *cb_arg;
+
+	/* reference for further configuration */
+	struct evrpc_base *base;
+};
+
+/** The type of a specific RPC Message
+ *
+ * @param rpcname the name of the RPC message
+ */
+#define EVRPC_STRUCT(rpcname) struct evrpc_req__##rpcname
+
+struct evhttp_request;
+struct evrpc_status;
+
+/* We alias the RPC specific structs to this voided one */
+struct evrpc_req_generic {
+	/* the unmarshaled request object */
+	void *request;
+
+	/* the empty reply object that needs to be filled in */
+	void *reply;
+
+	/* 
+	 * the static structure for this rpc; that can be used to
+	 * automatically unmarshal and marshal the http buffers.
+	 */
+	struct evrpc *rpc;
+
+	/*
+	 * the http request structure on which we need to answer.
+	 */
+	struct evhttp_request* http_req;
+
+	/*
+	 * callback to reply and finish answering this rpc
+	 */
+	void (*done)(struct evrpc_req_generic* rpc); 
+};
+
+/** Creates the definitions and prototypes for an RPC
+ *
+ * You need to use EVRPC_HEADER to create structures and function prototypes
+ * needed by the server and client implementation.  The structures have to be
+ * defined in an .rpc file and converted to source code via event_rpcgen.py
+ *
+ * @param rpcname the name of the RPC
+ * @param reqstruct the name of the RPC request structure
+ * @param replystruct the name of the RPC reply structure
+ * @see EVRPC_GENERATE()
+ */
+#define EVRPC_HEADER(rpcname, reqstruct, rplystruct) \
+EVRPC_STRUCT(rpcname) {	\
+	struct reqstruct* request; \
+	struct rplystruct* reply; \
+	struct evrpc* rpc; \
+	struct evhttp_request* http_req; \
+	void (*done)(struct evrpc_status *, \
+	    struct evrpc* rpc, void *request, void *reply);	     \
+};								     \
+int evrpc_send_request_##rpcname(struct evrpc_pool *, \
+    struct reqstruct *, struct rplystruct *, \
+    void (*)(struct evrpc_status *, \
+	struct reqstruct *, struct rplystruct *, void *cbarg),	\
+    void *);
+
+/** Generates the code for receiving and sending an RPC message
+ *
+ * EVRPC_GENERATE is used to create the code corresponding to sending
+ * and receiving a particular RPC message
+ *
+ * @param rpcname the name of the RPC
+ * @param reqstruct the name of the RPC request structure
+ * @param replystruct the name of the RPC reply structure
+ * @see EVRPC_HEADER()
+ */
+#define EVRPC_GENERATE(rpcname, reqstruct, rplystruct) \
+int evrpc_send_request_##rpcname(struct evrpc_pool *pool, \
+    struct reqstruct *request, struct rplystruct *reply, \
+    void (*cb)(struct evrpc_status *, \
+	struct reqstruct *, struct rplystruct *, void *cbarg),	\
+    void *cbarg) { \
+	struct evrpc_status status;				    \
+	struct evrpc_request_wrapper *ctx;			    \
+	ctx = (struct evrpc_request_wrapper *) \
+	    malloc(sizeof(struct evrpc_request_wrapper));	    \
+	if (ctx == NULL)					    \
+		goto error;					    \
+	ctx->pool = pool;					    \
+	ctx->evcon = NULL;					    \
+	ctx->name = strdup(#rpcname);				    \
+	if (ctx->name == NULL) {				    \
+		free(ctx);					    \
+		goto error;					    \
+	}							    \
+	ctx->cb = (void (*)(struct evrpc_status *, \
+		void *, void *, void *))cb;			    \
+	ctx->cb_arg = cbarg;					    \
+	ctx->request = (void *)request;				    \
+	ctx->reply = (void *)reply;				    \
+	ctx->request_marshal = (void (*)(struct evbuffer *, void *))reqstruct##_marshal; \
+	ctx->reply_clear = (void (*)(void *))rplystruct##_clear;    \
+	ctx->reply_unmarshal = (int (*)(void *, struct evbuffer *))rplystruct##_unmarshal; \
+	return (evrpc_make_request(ctx));			    \
+error:								    \
+	memset(&status, 0, sizeof(status));			    \
+	status.error = EVRPC_STATUS_ERR_UNSTARTED;		    \
+	(*(cb))(&status, request, reply, cbarg);		    \
+	return (-1);						    \
+}
+
+/** Provides access to the HTTP request object underlying an RPC
+ *
+ * Access to the underlying http object; can be used to look at headers or
+ * for getting the remote ip address
+ *
+ * @param rpc_req the rpc request structure provided to the server callback
+ * @return an struct evhttp_request object that can be inspected for
+ * HTTP headers or sender information.
+ */
+#define EVRPC_REQUEST_HTTP(rpc_req) (rpc_req)->http_req
+
+/** Creates the reply to an RPC request
+ * 
+ * EVRPC_REQUEST_DONE is used to answer a request; the reply is expected
+ * to have been filled in.  The request and reply pointers become invalid
+ * after this call has finished.
+ * 
+ * @param rpc_req the rpc request structure provided to the server callback
+ */
+#define EVRPC_REQUEST_DONE(rpc_req) do { \
+  struct evrpc_req_generic *_req = (struct evrpc_req_generic *)(rpc_req); \
+  _req->done(_req); \
+} while (0)
+  
+
+/* Takes a request object and fills it in with the right magic */
+#define EVRPC_REGISTER_OBJECT(rpc, name, request, reply) \
+  do { \
+    (rpc)->uri = strdup(#name); \
+    if ((rpc)->uri == NULL) {			 \
+      fprintf(stderr, "failed to register object\n");	\
+      exit(1);						\
+    } \
+    (rpc)->request_new = (void *(*)(void))request##_new; \
+    (rpc)->request_free = (void (*)(void *))request##_free; \
+    (rpc)->request_unmarshal = (int (*)(void *, struct evbuffer *))request##_unmarshal; \
+    (rpc)->reply_new = (void *(*)(void))reply##_new; \
+    (rpc)->reply_free = (void (*)(void *))reply##_free; \
+    (rpc)->reply_complete = (int (*)(void *))reply##_complete; \
+    (rpc)->reply_marshal = (void (*)(struct evbuffer*, void *))reply##_marshal; \
+  } while (0)
+
+struct evrpc_base;
+struct evhttp;
+
+/* functions to start up the rpc system */
+
+/** Creates a new rpc base from which RPC requests can be received
+ *
+ * @param server a pointer to an existing HTTP server
+ * @return a newly allocated evrpc_base struct
+ * @see evrpc_free()
+ */
+struct evrpc_base *evrpc_init(struct evhttp *server);
+
+/** 
+ * Frees the evrpc base
+ *
+ * For now, you are responsible for making sure that no rpcs are ongoing.
+ *
+ * @param base the evrpc_base object to be freed
+ * @see evrpc_init
+ */
+void evrpc_free(struct evrpc_base *base);
+
+/** register RPCs with the HTTP Server
+ *
+ * registers a new RPC with the HTTP server, each RPC needs to have
+ * a unique name under which it can be identified.
+ *
+ * @param base the evrpc_base structure in which the RPC should be
+ *   registered.
+ * @param name the name of the RPC
+ * @param request the name of the RPC request structure
+ * @param reply the name of the RPC reply structure
+ * @param callback the callback that should be invoked when the RPC
+ * is received.  The callback has the following prototype
+ *   void (*callback)(EVRPC_STRUCT(Message)* rpc, void *arg)
+ * @param cbarg an additional parameter that can be passed to the callback.
+ *   The parameter can be used to carry around state.
+ */
+#define EVRPC_REGISTER(base, name, request, reply, callback, cbarg) \
+  do { \
+    struct evrpc* rpc = (struct evrpc *)calloc(1, sizeof(struct evrpc)); \
+    EVRPC_REGISTER_OBJECT(rpc, name, request, reply); \
+    evrpc_register_rpc(base, rpc, \
+	(void (*)(struct evrpc_req_generic*, void *))callback, cbarg);	\
+  } while (0)
+
+int evrpc_register_rpc(struct evrpc_base *, struct evrpc *,
+    void (*)(struct evrpc_req_generic*, void *), void *);
+
+/**
+ * Unregisters an already registered RPC
+ *
+ * @param base the evrpc_base object from which to unregister an RPC
+ * @param name the name of the rpc to unregister
+ * @return -1 on error or 0 when successful.
+ * @see EVRPC_REGISTER()
+ */
+#define EVRPC_UNREGISTER(base, name) evrpc_unregister_rpc(base, #name)
+
+int evrpc_unregister_rpc(struct evrpc_base *base, const char *name);
+
+/*
+ * Client-side RPC support
+ */
+
+struct evrpc_pool;
+struct evhttp_connection;
+
+/** 
+ * provides information about the completed RPC request.
+ */
+struct evrpc_status {
+#define EVRPC_STATUS_ERR_NONE		0
+#define EVRPC_STATUS_ERR_TIMEOUT	1
+#define EVRPC_STATUS_ERR_BADPAYLOAD	2
+#define EVRPC_STATUS_ERR_UNSTARTED	3
+#define EVRPC_STATUS_ERR_HOOKABORTED	4
+	int error;
+
+	/* for looking at headers or other information */
+	struct evhttp_request *http_req;
+};
+
+struct evrpc_request_wrapper {
+	TAILQ_ENTRY(evrpc_request_wrapper) next;
+
+        /* pool on which this rpc request is being made */
+        struct evrpc_pool *pool;
+
+        /* connection on which the request is being sent */
+	struct evhttp_connection *evcon;
+
+	/* event for implementing request timeouts */
+	struct event ev_timeout;
+
+	/* the name of the rpc */
+	char *name;
+
+	/* callback */
+	void (*cb)(struct evrpc_status*, void *request, void *reply, void *arg);
+	void *cb_arg;
+
+	void *request;
+	void *reply;
+
+	/* unmarshals the buffer into the proper request structure */
+	void (*request_marshal)(struct evbuffer *, void *);
+
+	/* removes all stored state in the reply */
+	void (*reply_clear)(void *);
+
+	/* marshals the reply into a buffer */
+	int (*reply_unmarshal)(void *, struct evbuffer*);
+};
+
+/** launches an RPC and sends it to the server
+ *
+ * EVRPC_MAKE_REQUEST() is used by the client to send an RPC to the server.
+ *
+ * @param name the name of the RPC
+ * @param pool the evrpc_pool that contains the connection objects over which
+ *   the request should be sent.
+ * @param request a pointer to the RPC request structure - it contains the
+ *   data to be sent to the server.
+ * @param reply a pointer to the RPC reply structure.  It is going to be filled
+ *   if the request was answered successfully
+ * @param cb the callback to invoke when the RPC request has been answered
+ * @param cbarg an additional argument to be passed to the client
+ * @return 0 on success, -1 on failure
+ */
+#define EVRPC_MAKE_REQUEST(name, pool, request, reply, cb, cbarg)	\
+	evrpc_send_request_##name(pool, request, reply, cb, cbarg)
+
+int evrpc_make_request(struct evrpc_request_wrapper *);
+
+/** creates an rpc connection pool
+ * 
+ * a pool has a number of connections associated with it.
+ * rpc requests are always made via a pool.
+ *
+ * @param base a pointer to an struct event_based object; can be left NULL
+ *   in singled-threaded applications
+ * @return a newly allocated struct evrpc_pool object
+ * @see evrpc_pool_free()
+ */
+struct evrpc_pool *evrpc_pool_new(struct event_base *base);
+/** frees an rpc connection pool
+ *
+ * @param pool a pointer to an evrpc_pool allocated via evrpc_pool_new()
+ * @see evrpc_pool_new()
+ */
+void evrpc_pool_free(struct evrpc_pool *pool);
+/*
+ * adds a connection over which rpc can be dispatched.  the connection
+ * object must have been newly created.
+ */
+void evrpc_pool_add_connection(struct evrpc_pool *, 
+    struct evhttp_connection *);
+
+/**
+ * Sets the timeout in secs after which a request has to complete.  The
+ * RPC is completely aborted if it does not complete by then.  Setting
+ * the timeout to 0 means that it never timeouts and can be used to
+ * implement callback type RPCs.
+ *
+ * Any connection already in the pool will be updated with the new
+ * timeout.  Connections added to the pool after set_timeout has be
+ * called receive the pool timeout only if no timeout has been set
+ * for the connection itself.
+ *
+ * @param pool a pointer to a struct evrpc_pool object
+ * @param timeout_in_secs the number of seconds after which a request should
+ *   timeout and a failure be returned to the callback.
+ */
+void evrpc_pool_set_timeout(struct evrpc_pool *pool, int timeout_in_secs);
+
+/**
+ * Hooks for changing the input and output of RPCs; this can be used to
+ * implement compression, authentication, encryption, ...
+ */
+
+enum EVRPC_HOOK_TYPE {
+	EVRPC_INPUT,		/**< apply the function to an input hook */
+	EVRPC_OUTPUT		/**< apply the function to an output hook */
+};
+
+#ifndef WIN32
+/** Deprecated alias for EVRPC_INPUT.  Not available on windows, where it
+ * conflicts with platform headers. */
+#define INPUT EVRPC_INPUT
+/** Deprecated alias for EVRPC_OUTPUT.  Not available on windows, where it
+ * conflicts with platform headers. */
+#define OUTPUT EVRPC_OUTPUT
+#endif
+
+/** adds a processing hook to either an rpc base or rpc pool
+ *
+ * If a hook returns -1, the processing is aborted.
+ *
+ * The add functions return handles that can be used for removing hooks.
+ *
+ * @param vbase a pointer to either struct evrpc_base or struct evrpc_pool
+ * @param hook_type either INPUT or OUTPUT
+ * @param cb the callback to call when the hook is activated
+ * @param cb_arg an additional argument for the callback
+ * @return a handle to the hook so it can be removed later
+ * @see evrpc_remove_hook()
+ */
+void *evrpc_add_hook(void *vbase,
+    enum EVRPC_HOOK_TYPE hook_type,
+    int (*cb)(struct evhttp_request *, struct evbuffer *, void *),
+    void *cb_arg);
+
+/** removes a previously added hook
+ *
+ * @param vbase a pointer to either struct evrpc_base or struct evrpc_pool
+ * @param hook_type either INPUT or OUTPUT
+ * @param handle a handle returned by evrpc_add_hook()
+ * @return 1 on success or 0 on failure
+ * @see evrpc_add_hook()
+ */
+int evrpc_remove_hook(void *vbase,
+    enum EVRPC_HOOK_TYPE hook_type,
+    void *handle);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _EVRPC_H_ */
diff --git a/base/third_party/libevent/evsignal.h b/base/third_party/libevent/evsignal.h
new file mode 100644
index 0000000..076cd8d
--- /dev/null
+++ b/base/third_party/libevent/evsignal.h
@@ -0,0 +1,52 @@
+/*
+ * Copyright 2000-2002 Niels Provos <provos@citi.umich.edu>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ *    derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef _EVSIGNAL_H_
+#define _EVSIGNAL_H_
+
+typedef void (*ev_sighandler_t)(int);
+
+struct evsignal_info {
+	struct event ev_signal;
+	int ev_signal_pair[2];
+	int ev_signal_added;
+	volatile sig_atomic_t evsignal_caught;
+	struct event_list evsigevents[NSIG];
+	sig_atomic_t evsigcaught[NSIG];
+#ifdef HAVE_SIGACTION
+	struct sigaction **sh_old;
+#else
+	ev_sighandler_t **sh_old;
+#endif
+	int sh_old_max;
+};
+int evsignal_init(struct event_base *);
+void evsignal_process(struct event_base *);
+int evsignal_add(struct event *);
+int evsignal_del(struct event *);
+void evsignal_dealloc(struct event_base *);
+
+#endif /* _EVSIGNAL_H_ */
diff --git a/base/third_party/libevent/evutil.c b/base/third_party/libevent/evutil.c
new file mode 100644
index 0000000..cc6d0f4
--- /dev/null
+++ b/base/third_party/libevent/evutil.c
@@ -0,0 +1,284 @@
+/*
+ * Copyright (c) 2007 Niels Provos <provos@citi.umich.edu>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ *    derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#ifdef WIN32
+#include <winsock2.h>
+#define WIN32_LEAN_AND_MEAN
+#include <windows.h>
+#undef WIN32_LEAN_AND_MEAN
+#endif
+
+#include <sys/types.h>
+#ifdef HAVE_SYS_SOCKET_H
+#include <sys/socket.h>
+#endif
+#ifdef HAVE_UNISTD_H
+#include <unistd.h>
+#endif
+#ifdef HAVE_FCNTL_H
+#include <fcntl.h>
+#endif
+#ifdef HAVE_STDLIB_H
+#include <stdlib.h>
+#endif
+#include <errno.h>
+#if defined WIN32 && !defined(HAVE_GETTIMEOFDAY_H)
+#include <sys/timeb.h>
+#endif
+#include <stdio.h>
+#include <signal.h>
+
+#include <sys/queue.h>
+#include "event.h"
+#include "event-internal.h"
+#include "evutil.h"
+#include "log.h"
+
+int
+evutil_socketpair(int family, int type, int protocol, int fd[2])
+{
+#ifndef WIN32
+	return socketpair(family, type, protocol, fd);
+#else
+	/* This code is originally from Tor.  Used with permission. */
+
+	/* This socketpair does not work when localhost is down. So
+	 * it's really not the same thing at all. But it's close enough
+	 * for now, and really, when localhost is down sometimes, we
+	 * have other problems too.
+	 */
+	int listener = -1;
+	int connector = -1;
+	int acceptor = -1;
+	struct sockaddr_in listen_addr;
+	struct sockaddr_in connect_addr;
+	int size;
+	int saved_errno = -1;
+
+	if (protocol
+#ifdef AF_UNIX
+		|| family != AF_UNIX
+#endif
+		) {
+		EVUTIL_SET_SOCKET_ERROR(WSAEAFNOSUPPORT);
+		return -1;
+	}
+	if (!fd) {
+		EVUTIL_SET_SOCKET_ERROR(WSAEINVAL);
+		return -1;
+	}
+
+	listener = socket(AF_INET, type, 0);
+	if (listener < 0)
+		return -1;
+	memset(&listen_addr, 0, sizeof(listen_addr));
+	listen_addr.sin_family = AF_INET;
+	listen_addr.sin_addr.s_addr = htonl(INADDR_LOOPBACK);
+	listen_addr.sin_port = 0;	/* kernel chooses port.	 */
+	if (bind(listener, (struct sockaddr *) &listen_addr, sizeof (listen_addr))
+		== -1)
+		goto tidy_up_and_fail;
+	if (listen(listener, 1) == -1)
+		goto tidy_up_and_fail;
+
+	connector = socket(AF_INET, type, 0);
+	if (connector < 0)
+		goto tidy_up_and_fail;
+	/* We want to find out the port number to connect to.  */
+	size = sizeof(connect_addr);
+	if (getsockname(listener, (struct sockaddr *) &connect_addr, &size) == -1)
+		goto tidy_up_and_fail;
+	if (size != sizeof (connect_addr))
+		goto abort_tidy_up_and_fail;
+	if (connect(connector, (struct sockaddr *) &connect_addr,
+				sizeof(connect_addr)) == -1)
+		goto tidy_up_and_fail;
+
+	size = sizeof(listen_addr);
+	acceptor = accept(listener, (struct sockaddr *) &listen_addr, &size);
+	if (acceptor < 0)
+		goto tidy_up_and_fail;
+	if (size != sizeof(listen_addr))
+		goto abort_tidy_up_and_fail;
+	EVUTIL_CLOSESOCKET(listener);
+	/* Now check we are talking to ourself by matching port and host on the
+	   two sockets.	 */
+	if (getsockname(connector, (struct sockaddr *) &connect_addr, &size) == -1)
+		goto tidy_up_and_fail;
+	if (size != sizeof (connect_addr)
+		|| listen_addr.sin_family != connect_addr.sin_family
+		|| listen_addr.sin_addr.s_addr != connect_addr.sin_addr.s_addr
+		|| listen_addr.sin_port != connect_addr.sin_port)
+		goto abort_tidy_up_and_fail;
+	fd[0] = connector;
+	fd[1] = acceptor;
+
+	return 0;
+
+ abort_tidy_up_and_fail:
+	saved_errno = WSAECONNABORTED;
+ tidy_up_and_fail:
+	if (saved_errno < 0)
+		saved_errno = WSAGetLastError();
+	if (listener != -1)
+		EVUTIL_CLOSESOCKET(listener);
+	if (connector != -1)
+		EVUTIL_CLOSESOCKET(connector);
+	if (acceptor != -1)
+		EVUTIL_CLOSESOCKET(acceptor);
+
+	EVUTIL_SET_SOCKET_ERROR(saved_errno);
+	return -1;
+#endif
+}
+
+int
+evutil_make_socket_nonblocking(int fd)
+{
+#ifdef WIN32
+	{
+		unsigned long nonblocking = 1;
+		ioctlsocket(fd, FIONBIO, (unsigned long*) &nonblocking);
+	}
+#else
+	{
+		int flags;
+		if ((flags = fcntl(fd, F_GETFL, NULL)) < 0) {
+			event_warn("fcntl(%d, F_GETFL)", fd);
+			return -1;
+		}
+		if (fcntl(fd, F_SETFL, flags | O_NONBLOCK) == -1) {
+			event_warn("fcntl(%d, F_SETFL)", fd);
+			return -1;
+		}
+	}
+#endif
+	return 0;
+}
+
+ev_int64_t
+evutil_strtoll(const char *s, char **endptr, int base)
+{
+#ifdef HAVE_STRTOLL
+	return (ev_int64_t)strtoll(s, endptr, base);
+#elif SIZEOF_LONG == 8
+	return (ev_int64_t)strtol(s, endptr, base);
+#elif defined(WIN32) && defined(_MSC_VER) && _MSC_VER < 1300
+	/* XXXX on old versions of MS APIs, we only support base
+	 * 10. */
+	ev_int64_t r;
+	if (base != 10)
+		return 0;
+	r = (ev_int64_t) _atoi64(s);
+	while (isspace(*s))
+		++s;
+	while (isdigit(*s))
+		++s;
+	if (endptr)
+		*endptr = (char*) s;
+	return r;
+#elif defined(WIN32)
+	return (ev_int64_t) _strtoi64(s, endptr, base);
+#else
+#error "I don't know how to parse 64-bit integers."
+#endif
+}
+
+#ifndef _EVENT_HAVE_GETTIMEOFDAY
+int
+evutil_gettimeofday(struct timeval *tv, struct timezone *tz)
+{
+	struct _timeb tb;
+
+	if(tv == NULL)
+		return -1;
+
+	_ftime(&tb);
+	tv->tv_sec = (long) tb.time;
+	tv->tv_usec = ((int) tb.millitm) * 1000;
+	return 0;
+}
+#endif
+
+int
+evutil_snprintf(char *buf, size_t buflen, const char *format, ...)
+{
+	int r;
+	va_list ap;
+	va_start(ap, format);
+	r = evutil_vsnprintf(buf, buflen, format, ap);
+	va_end(ap);
+	return r;
+}
+
+int
+evutil_vsnprintf(char *buf, size_t buflen, const char *format, va_list ap)
+{
+#ifdef _MSC_VER
+	int r = _vsnprintf(buf, buflen, format, ap);
+	buf[buflen-1] = '\0';
+	if (r >= 0)
+		return r;
+	else
+		return _vscprintf(format, ap);
+#else
+	int r = vsnprintf(buf, buflen, format, ap);
+	buf[buflen-1] = '\0';
+	return r;
+#endif
+}
+
+static int
+evutil_issetugid(void)
+{
+#ifdef _EVENT_HAVE_ISSETUGID
+	return issetugid();
+#else
+
+#ifdef _EVENT_HAVE_GETEUID
+	if (getuid() != geteuid())
+		return 1;
+#endif
+#ifdef _EVENT_HAVE_GETEGID
+	if (getgid() != getegid())
+		return 1;
+#endif
+	return 0;
+#endif
+}
+
+const char *
+evutil_getenv(const char *varname)
+{
+	if (evutil_issetugid())
+		return NULL;
+
+	return getenv(varname);
+}
diff --git a/base/third_party/libevent/evutil.h b/base/third_party/libevent/evutil.h
new file mode 100644
index 0000000..8b664b9
--- /dev/null
+++ b/base/third_party/libevent/evutil.h
@@ -0,0 +1,186 @@
+/*
+ * Copyright (c) 2007 Niels Provos <provos@citi.umich.edu>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ *    derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef _EVUTIL_H_
+#define _EVUTIL_H_
+
+/** @file evutil.h
+
+  Common convenience functions for cross-platform portability and
+  related socket manipulations.
+
+ */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include "event-config.h"
+#ifdef _EVENT_HAVE_SYS_TIME_H
+#include <sys/time.h>
+#endif
+#ifdef _EVENT_HAVE_STDINT_H
+#include <stdint.h>
+#elif defined(_EVENT_HAVE_INTTYPES_H)
+#include <inttypes.h>
+#endif
+#ifdef _EVENT_HAVE_SYS_TYPES_H
+#include <sys/types.h>
+#endif
+#include <stdarg.h>
+
+#ifdef _EVENT_HAVE_UINT64_T
+#define ev_uint64_t uint64_t
+#define ev_int64_t int64_t
+#elif defined(WIN32)
+#define ev_uint64_t unsigned __int64
+#define ev_int64_t signed __int64
+#elif _EVENT_SIZEOF_LONG_LONG == 8
+#define ev_uint64_t unsigned long long
+#define ev_int64_t long long
+#elif _EVENT_SIZEOF_LONG == 8
+#define ev_uint64_t unsigned long
+#define ev_int64_t long
+#else
+#error "No way to define ev_uint64_t"
+#endif
+
+#ifdef _EVENT_HAVE_UINT32_T
+#define ev_uint32_t uint32_t
+#elif defined(WIN32)
+#define ev_uint32_t unsigned int
+#elif _EVENT_SIZEOF_LONG == 4
+#define ev_uint32_t unsigned long
+#elif _EVENT_SIZEOF_INT == 4
+#define ev_uint32_t unsigned int
+#else
+#error "No way to define ev_uint32_t"
+#endif
+
+#ifdef _EVENT_HAVE_UINT16_T
+#define ev_uint16_t uint16_t
+#elif defined(WIN32)
+#define ev_uint16_t unsigned short
+#elif _EVENT_SIZEOF_INT == 2
+#define ev_uint16_t unsigned int
+#elif _EVENT_SIZEOF_SHORT == 2
+#define ev_uint16_t unsigned short
+#else
+#error "No way to define ev_uint16_t"
+#endif
+
+#ifdef _EVENT_HAVE_UINT8_T
+#define ev_uint8_t uint8_t
+#else
+#define ev_uint8_t unsigned char
+#endif
+
+int evutil_socketpair(int d, int type, int protocol, int sv[2]);
+int evutil_make_socket_nonblocking(int sock);
+#ifdef WIN32
+#define EVUTIL_CLOSESOCKET(s) closesocket(s)
+#else
+#define EVUTIL_CLOSESOCKET(s) close(s)
+#endif
+
+#ifdef WIN32
+#define EVUTIL_SOCKET_ERROR() WSAGetLastError()
+#define EVUTIL_SET_SOCKET_ERROR(errcode)		\
+	do { WSASetLastError(errcode); } while (0)
+#else
+#define EVUTIL_SOCKET_ERROR() (errno)
+#define EVUTIL_SET_SOCKET_ERROR(errcode)		\
+		do { errno = (errcode); } while (0)
+#endif
+
+/*
+ * Manipulation functions for struct timeval
+ */
+#ifdef _EVENT_HAVE_TIMERADD
+#define evutil_timeradd(tvp, uvp, vvp) timeradd((tvp), (uvp), (vvp))
+#define evutil_timersub(tvp, uvp, vvp) timersub((tvp), (uvp), (vvp))
+#else
+#define evutil_timeradd(tvp, uvp, vvp)							\
+	do {														\
+		(vvp)->tv_sec = (tvp)->tv_sec + (uvp)->tv_sec;			\
+		(vvp)->tv_usec = (tvp)->tv_usec + (uvp)->tv_usec;       \
+		if ((vvp)->tv_usec >= 1000000) {						\
+			(vvp)->tv_sec++;									\
+			(vvp)->tv_usec -= 1000000;							\
+		}														\
+	} while (0)
+#define	evutil_timersub(tvp, uvp, vvp)						\
+	do {													\
+		(vvp)->tv_sec = (tvp)->tv_sec - (uvp)->tv_sec;		\
+		(vvp)->tv_usec = (tvp)->tv_usec - (uvp)->tv_usec;	\
+		if ((vvp)->tv_usec < 0) {							\
+			(vvp)->tv_sec--;								\
+			(vvp)->tv_usec += 1000000;						\
+		}													\
+	} while (0)
+#endif /* !_EVENT_HAVE_HAVE_TIMERADD */
+
+#ifdef _EVENT_HAVE_TIMERCLEAR
+#define evutil_timerclear(tvp) timerclear(tvp)
+#else
+#define	evutil_timerclear(tvp)	(tvp)->tv_sec = (tvp)->tv_usec = 0
+#endif
+
+#define	evutil_timercmp(tvp, uvp, cmp)							\
+	(((tvp)->tv_sec == (uvp)->tv_sec) ?							\
+	 ((tvp)->tv_usec cmp (uvp)->tv_usec) :						\
+	 ((tvp)->tv_sec cmp (uvp)->tv_sec))
+
+#ifdef _EVENT_HAVE_TIMERISSET
+#define evutil_timerisset(tvp) timerisset(tvp)
+#else
+#define	evutil_timerisset(tvp)	((tvp)->tv_sec || (tvp)->tv_usec)
+#endif
+
+
+/* big-int related functions */
+ev_int64_t evutil_strtoll(const char *s, char **endptr, int base);
+
+
+#ifdef _EVENT_HAVE_GETTIMEOFDAY
+#define evutil_gettimeofday(tv, tz) gettimeofday((tv), (tz))
+#else
+struct timezone;
+int evutil_gettimeofday(struct timeval *tv, struct timezone *tz);
+#endif
+
+int evutil_snprintf(char *buf, size_t buflen, const char *format, ...)
+#ifdef __GNUC__
+	__attribute__((format(printf, 3, 4)))
+#endif
+	;
+int evutil_vsnprintf(char *buf, size_t buflen, const char *format, va_list ap);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _EVUTIL_H_ */
diff --git a/base/third_party/libevent/freebsd/config.h b/base/third_party/libevent/freebsd/config.h
new file mode 100644
index 0000000..4fe3d6b
--- /dev/null
+++ b/base/third_party/libevent/freebsd/config.h
@@ -0,0 +1,266 @@
+/* config.h.  Generated from config.h.in by configure.  */
+/* config.h.in.  Generated from configure.in by autoheader.  */
+
+/* Define if clock_gettime is available in libc */
+#define DNS_USE_CPU_CLOCK_FOR_ID 1
+
+/* Define is no secure id variant is available */
+/* #undef DNS_USE_GETTIMEOFDAY_FOR_ID */
+
+/* Define to 1 if you have the `clock_gettime' function. */
+#define HAVE_CLOCK_GETTIME 1
+
+/* Define if /dev/poll is available */
+/* #undef HAVE_DEVPOLL */
+
+/* Define to 1 if you have the <dlfcn.h> header file. */
+#define HAVE_DLFCN_H 1
+
+/* Define if your system supports the epoll system calls */
+/* #undef HAVE_EPOLL */
+
+/* Define to 1 if you have the `epoll_ctl' function. */
+/* #undef HAVE_EPOLL_CTL */
+
+/* Define if your system supports event ports */
+/* #undef HAVE_EVENT_PORTS */
+
+/* Define to 1 if you have the `fcntl' function. */
+#define HAVE_FCNTL 1
+
+/* Define to 1 if you have the <fcntl.h> header file. */
+#define HAVE_FCNTL_H 1
+
+/* Define to 1 if the system has the type `fd_mask'. */
+#define HAVE_FD_MASK 1
+
+/* Define to 1 if you have the `getaddrinfo' function. */
+#define HAVE_GETADDRINFO 1
+
+/* Define to 1 if you have the `getegid' function. */
+#define HAVE_GETEGID 1
+
+/* Define to 1 if you have the `geteuid' function. */
+#define HAVE_GETEUID 1
+
+/* Define to 1 if you have the `getnameinfo' function. */
+#define HAVE_GETNAMEINFO 1
+
+/* Define to 1 if you have the `gettimeofday' function. */
+#define HAVE_GETTIMEOFDAY 1
+
+/* Define to 1 if you have the `inet_ntop' function. */
+#define HAVE_INET_NTOP 1
+
+/* Define to 1 if you have the <inttypes.h> header file. */
+#define HAVE_INTTYPES_H 1
+
+/* Define to 1 if you have the `issetugid' function. */
+#define HAVE_ISSETUGID 1
+
+/* Define to 1 if you have the `kqueue' function. */
+#define HAVE_KQUEUE 1
+
+/* Define to 1 if you have the `nsl' library (-lnsl). */
+/* #undef HAVE_LIBNSL */
+
+/* Define to 1 if you have the `resolv' library (-lresolv). */
+/* #undef HAVE_LIBRESOLV */
+
+/* Define to 1 if you have the `rt' library (-lrt). */
+#define HAVE_LIBRT 1
+
+/* Define to 1 if you have the `socket' library (-lsocket). */
+/* #undef HAVE_LIBSOCKET */
+
+/* Define to 1 if you have the <memory.h> header file. */
+#define HAVE_MEMORY_H 1
+
+/* Define to 1 if you have the <netinet/in6.h> header file. */
+/* #undef HAVE_NETINET_IN6_H */
+
+/* Define to 1 if you have the `poll' function. */
+#define HAVE_POLL 1
+
+/* Define to 1 if you have the <poll.h> header file. */
+#define HAVE_POLL_H 1
+
+/* Define to 1 if you have the `port_create' function. */
+/* #undef HAVE_PORT_CREATE */
+
+/* Define to 1 if you have the <port.h> header file. */
+/* #undef HAVE_PORT_H */
+
+/* Define to 1 if you have the `select' function. */
+#define HAVE_SELECT 1
+
+/* Define if F_SETFD is defined in <fcntl.h> */
+#define HAVE_SETFD 1
+
+/* Define to 1 if you have the `sigaction' function. */
+#define HAVE_SIGACTION 1
+
+/* Define to 1 if you have the `signal' function. */
+#define HAVE_SIGNAL 1
+
+/* Define to 1 if you have the <signal.h> header file. */
+#define HAVE_SIGNAL_H 1
+
+/* Define to 1 if you have the <stdarg.h> header file. */
+#define HAVE_STDARG_H 1
+
+/* Define to 1 if you have the <stdint.h> header file. */
+#define HAVE_STDINT_H 1
+
+/* Define to 1 if you have the <stdlib.h> header file. */
+#define HAVE_STDLIB_H 1
+
+/* Define to 1 if you have the <strings.h> header file. */
+#define HAVE_STRINGS_H 1
+
+/* Define to 1 if you have the <string.h> header file. */
+#define HAVE_STRING_H 1
+
+/* Define to 1 if you have the `strlcpy' function. */
+#define HAVE_STRLCPY 1
+
+/* Define to 1 if you have the `strsep' function. */
+#define HAVE_STRSEP 1
+
+/* Define to 1 if you have the `strtok_r' function. */
+#define HAVE_STRTOK_R 1
+
+/* Define to 1 if you have the `strtoll' function. */
+#define HAVE_STRTOLL 1
+
+/* Define to 1 if the system has the type `struct in6_addr'. */
+#define HAVE_STRUCT_IN6_ADDR 1
+
+/* Define to 1 if you have the <sys/devpoll.h> header file. */
+/* #undef HAVE_SYS_DEVPOLL_H */
+
+/* Define to 1 if you have the <sys/epoll.h> header file. */
+/* #undef HAVE_SYS_EPOLL_H */
+
+/* Define to 1 if you have the <sys/event.h> header file. */
+#define HAVE_SYS_EVENT_H 1
+
+/* Define to 1 if you have the <sys/ioctl.h> header file. */
+#define HAVE_SYS_IOCTL_H 1
+
+/* Define to 1 if you have the <sys/param.h> header file. */
+#define HAVE_SYS_PARAM_H 1
+
+/* Define to 1 if you have the <sys/queue.h> header file. */
+#define HAVE_SYS_QUEUE_H 1
+
+/* Define to 1 if you have the <sys/select.h> header file. */
+#define HAVE_SYS_SELECT_H 1
+
+/* Define to 1 if you have the <sys/socket.h> header file. */
+#define HAVE_SYS_SOCKET_H 1
+
+/* Define to 1 if you have the <sys/stat.h> header file. */
+#define HAVE_SYS_STAT_H 1
+
+/* Define to 1 if you have the <sys/time.h> header file. */
+#define HAVE_SYS_TIME_H 1
+
+/* Define to 1 if you have the <sys/types.h> header file. */
+#define HAVE_SYS_TYPES_H 1
+
+/* Define if TAILQ_FOREACH is defined in <sys/queue.h> */
+#define HAVE_TAILQFOREACH 1
+
+/* Define if timeradd is defined in <sys/time.h> */
+#define HAVE_TIMERADD 1
+
+/* Define if timerclear is defined in <sys/time.h> */
+#define HAVE_TIMERCLEAR 1
+
+/* Define if timercmp is defined in <sys/time.h> */
+#define HAVE_TIMERCMP 1
+
+/* Define if timerisset is defined in <sys/time.h> */
+#define HAVE_TIMERISSET 1
+
+/* Define to 1 if the system has the type `uint16_t'. */
+#define HAVE_UINT16_T 1
+
+/* Define to 1 if the system has the type `uint32_t'. */
+#define HAVE_UINT32_T 1
+
+/* Define to 1 if the system has the type `uint64_t'. */
+#define HAVE_UINT64_T 1
+
+/* Define to 1 if the system has the type `uint8_t'. */
+#define HAVE_UINT8_T 1
+
+/* Define to 1 if you have the <unistd.h> header file. */
+#define HAVE_UNISTD_H 1
+
+/* Define to 1 if you have the `vasprintf' function. */
+#define HAVE_VASPRINTF 1
+
+/* Define if kqueue works correctly with pipes */
+#define HAVE_WORKING_KQUEUE 1
+
+/* Name of package */
+#define PACKAGE "libevent"
+
+/* Define to the address where bug reports for this package should be sent. */
+#define PACKAGE_BUGREPORT ""
+
+/* Define to the full name of this package. */
+#define PACKAGE_NAME ""
+
+/* Define to the full name and version of this package. */
+#define PACKAGE_STRING ""
+
+/* Define to the one symbol short name of this package. */
+#define PACKAGE_TARNAME ""
+
+/* Define to the version of this package. */
+#define PACKAGE_VERSION ""
+
+/* The size of `int', as computed by sizeof. */
+#define SIZEOF_INT 4
+
+/* The size of `long', as computed by sizeof. */
+#define SIZEOF_LONG 8
+
+/* The size of `long long', as computed by sizeof. */
+#define SIZEOF_LONG_LONG 8
+
+/* The size of `short', as computed by sizeof. */
+#define SIZEOF_SHORT 2
+
+/* Define to 1 if you have the ANSI C header files. */
+#define STDC_HEADERS 1
+
+/* Define to 1 if you can safely include both <sys/time.h> and <time.h>. */
+#define TIME_WITH_SYS_TIME 1
+
+/* Version number of package */
+#define VERSION "1.4.13-stable"
+
+/* Define to appropriate substitue if compiler doesnt have __func__ */
+/* #undef __func__ */
+
+/* Define to empty if `const' does not conform to ANSI C. */
+/* #undef const */
+
+/* Define to `__inline__' or `__inline' if that's what the C compiler
+   calls it, or to nothing if 'inline' is not supported under any name.  */
+#ifndef __cplusplus
+/* #undef inline */
+#endif
+
+/* Define to `int' if <sys/types.h> does not define. */
+/* #undef pid_t */
+
+/* Define to `unsigned int' if <sys/types.h> does not define. */
+/* #undef size_t */
+
+/* Define to unsigned int if you dont have it */
+/* #undef socklen_t */
diff --git a/base/third_party/libevent/freebsd/event-config.h b/base/third_party/libevent/freebsd/event-config.h
new file mode 100644
index 0000000..be1eae4
--- /dev/null
+++ b/base/third_party/libevent/freebsd/event-config.h
@@ -0,0 +1,284 @@
+/* event-config.h
+ * Generated by autoconf; post-processed by libevent.
+ * Do not edit this file.
+ * Do not rely on macros in this file existing in later versions.
+ */
+#ifndef _EVENT_CONFIG_H_
+#define _EVENT_CONFIG_H_
+/* config.h.  Generated from config.h.in by configure.  */
+/* config.h.in.  Generated from configure.in by autoheader.  */
+
+/* Define if clock_gettime is available in libc */
+#define _EVENT_DNS_USE_CPU_CLOCK_FOR_ID 1
+
+/* Define is no secure id variant is available */
+/* #undef _EVENT_DNS_USE_GETTIMEOFDAY_FOR_ID */
+
+/* Define to 1 if you have the `clock_gettime' function. */
+#define _EVENT_HAVE_CLOCK_GETTIME 1
+
+/* Define if /dev/poll is available */
+/* #undef _EVENT_HAVE_DEVPOLL */
+
+/* Define to 1 if you have the <dlfcn.h> header file. */
+#define _EVENT_HAVE_DLFCN_H 1
+
+/* Define if your system supports the epoll system calls */
+/* #undef _EVENT_HAVE_EPOLL */
+
+/* Define to 1 if you have the `epoll_ctl' function. */
+/* #undef _EVENT_HAVE_EPOLL_CTL */
+
+/* Define if your system supports event ports */
+/* #undef _EVENT_HAVE_EVENT_PORTS */
+
+/* Define to 1 if you have the `fcntl' function. */
+#define _EVENT_HAVE_FCNTL 1
+
+/* Define to 1 if you have the <fcntl.h> header file. */
+#define _EVENT_HAVE_FCNTL_H 1
+
+/* Define to 1 if the system has the type `fd_mask'. */
+#define _EVENT_HAVE_FD_MASK 1
+
+/* Define to 1 if you have the `getaddrinfo' function. */
+#define _EVENT_HAVE_GETADDRINFO 1
+
+/* Define to 1 if you have the `getegid' function. */
+#define _EVENT_HAVE_GETEGID 1
+
+/* Define to 1 if you have the `geteuid' function. */
+#define _EVENT_HAVE_GETEUID 1
+
+/* Define to 1 if you have the `getnameinfo' function. */
+#define _EVENT_HAVE_GETNAMEINFO 1
+
+/* Define to 1 if you have the `gettimeofday' function. */
+#define _EVENT_HAVE_GETTIMEOFDAY 1
+
+/* Define to 1 if you have the `inet_ntop' function. */
+#define _EVENT_HAVE_INET_NTOP 1
+
+/* Define to 1 if you have the <inttypes.h> header file. */
+#define _EVENT_HAVE_INTTYPES_H 1
+
+/* Define to 1 if you have the `issetugid' function. */
+#define _EVENT_HAVE_ISSETUGID 1
+
+/* Define to 1 if you have the `kqueue' function. */
+#define _EVENT_HAVE_KQUEUE 1
+
+/* Define to 1 if you have the `nsl' library (-lnsl). */
+/* #undef _EVENT_HAVE_LIBNSL */
+
+/* Define to 1 if you have the `resolv' library (-lresolv). */
+/* #undef _EVENT_HAVE_LIBRESOLV */
+
+/* Define to 1 if you have the `rt' library (-lrt). */
+#define _EVENT_HAVE_LIBRT 1
+
+/* Define to 1 if you have the `socket' library (-lsocket). */
+/* #undef _EVENT_HAVE_LIBSOCKET */
+
+/* Define to 1 if you have the <memory.h> header file. */
+#define _EVENT_HAVE_MEMORY_H 1
+
+/* Define to 1 if you have the <netinet/in6.h> header file. */
+/* #undef _EVENT_HAVE_NETINET_IN6_H */
+
+/* Define to 1 if you have the `poll' function. */
+#define _EVENT_HAVE_POLL 1
+
+/* Define to 1 if you have the <poll.h> header file. */
+#define _EVENT_HAVE_POLL_H 1
+
+/* Define to 1 if you have the `port_create' function. */
+/* #undef _EVENT_HAVE_PORT_CREATE */
+
+/* Define to 1 if you have the <port.h> header file. */
+/* #undef _EVENT_HAVE_PORT_H */
+
+/* Define to 1 if you have the `select' function. */
+#define _EVENT_HAVE_SELECT 1
+
+/* Define if F_SETFD is defined in <fcntl.h> */
+#define _EVENT_HAVE_SETFD 1
+
+/* Define to 1 if you have the `sigaction' function. */
+#define _EVENT_HAVE_SIGACTION 1
+
+/* Define to 1 if you have the `signal' function. */
+#define _EVENT_HAVE_SIGNAL 1
+
+/* Define to 1 if you have the <signal.h> header file. */
+#define _EVENT_HAVE_SIGNAL_H 1
+
+/* Define to 1 if you have the <stdarg.h> header file. */
+#define _EVENT_HAVE_STDARG_H 1
+
+/* Define to 1 if you have the <stdint.h> header file. */
+#define _EVENT_HAVE_STDINT_H 1
+
+/* Define to 1 if you have the <stdlib.h> header file. */
+#define _EVENT_HAVE_STDLIB_H 1
+
+/* Define to 1 if you have the <strings.h> header file. */
+#define _EVENT_HAVE_STRINGS_H 1
+
+/* Define to 1 if you have the <string.h> header file. */
+#define _EVENT_HAVE_STRING_H 1
+
+/* Define to 1 if you have the `strlcpy' function. */
+#define _EVENT_HAVE_STRLCPY 1
+
+/* Define to 1 if you have the `strsep' function. */
+#define _EVENT_HAVE_STRSEP 1
+
+/* Define to 1 if you have the `strtok_r' function. */
+#define _EVENT_HAVE_STRTOK_R 1
+
+/* Define to 1 if you have the `strtoll' function. */
+#define _EVENT_HAVE_STRTOLL 1
+
+/* Define to 1 if the system has the type `struct in6_addr'. */
+#define _EVENT_HAVE_STRUCT_IN6_ADDR 1
+
+/* Define to 1 if you have the <sys/devpoll.h> header file. */
+/* #undef _EVENT_HAVE_SYS_DEVPOLL_H */
+
+/* Define to 1 if you have the <sys/epoll.h> header file. */
+/* #undef _EVENT_HAVE_SYS_EPOLL_H */
+
+/* Define to 1 if you have the <sys/event.h> header file. */
+#define _EVENT_HAVE_SYS_EVENT_H 1
+
+/* Define to 1 if you have the <sys/ioctl.h> header file. */
+#define _EVENT_HAVE_SYS_IOCTL_H 1
+
+/* Define to 1 if you have the <sys/param.h> header file. */
+#define _EVENT_HAVE_SYS_PARAM_H 1
+
+/* Define to 1 if you have the <sys/queue.h> header file. */
+#define _EVENT_HAVE_SYS_QUEUE_H 1
+
+/* Define to 1 if you have the <sys/select.h> header file. */
+#define _EVENT_HAVE_SYS_SELECT_H 1
+
+/* Define to 1 if you have the <sys/socket.h> header file. */
+#define _EVENT_HAVE_SYS_SOCKET_H 1
+
+/* Define to 1 if you have the <sys/stat.h> header file. */
+#define _EVENT_HAVE_SYS_STAT_H 1
+
+/* Define to 1 if you have the <sys/time.h> header file. */
+#define _EVENT_HAVE_SYS_TIME_H 1
+
+/* Define to 1 if you have the <sys/types.h> header file. */
+#define _EVENT_HAVE_SYS_TYPES_H 1
+
+/* Define if TAILQ_FOREACH is defined in <sys/queue.h> */
+#define _EVENT_HAVE_TAILQFOREACH 1
+
+/* Define if timeradd is defined in <sys/time.h> */
+#define _EVENT_HAVE_TIMERADD 1
+
+/* Define if timerclear is defined in <sys/time.h> */
+#define _EVENT_HAVE_TIMERCLEAR 1
+
+/* Define if timercmp is defined in <sys/time.h> */
+#define _EVENT_HAVE_TIMERCMP 1
+
+/* Define if timerisset is defined in <sys/time.h> */
+#define _EVENT_HAVE_TIMERISSET 1
+
+/* Define to 1 if the system has the type `uint16_t'. */
+#define _EVENT_HAVE_UINT16_T 1
+
+/* Define to 1 if the system has the type `uint32_t'. */
+#define _EVENT_HAVE_UINT32_T 1
+
+/* Define to 1 if the system has the type `uint64_t'. */
+#define _EVENT_HAVE_UINT64_T 1
+
+/* Define to 1 if the system has the type `uint8_t'. */
+#define _EVENT_HAVE_UINT8_T 1
+
+/* Define to 1 if you have the <unistd.h> header file. */
+#define _EVENT_HAVE_UNISTD_H 1
+
+/* Define to 1 if you have the `vasprintf' function. */
+#define _EVENT_HAVE_VASPRINTF 1
+
+/* Define if kqueue works correctly with pipes */
+#define _EVENT_HAVE_WORKING_KQUEUE 1
+
+/* Define to the sub-directory in which libtool stores uninstalled libraries.
+   */
+#define _EVENT_LT_OBJDIR ".libs/"
+
+/* Numeric representation of the version */
+#define _EVENT_NUMERIC_VERSION 0x01040f00
+
+/* Name of package */
+#define _EVENT_PACKAGE "libevent"
+
+/* Define to the address where bug reports for this package should be sent. */
+#define _EVENT_PACKAGE_BUGREPORT ""
+
+/* Define to the full name of this package. */
+#define _EVENT_PACKAGE_NAME ""
+
+/* Define to the full name and version of this package. */
+#define _EVENT_PACKAGE_STRING ""
+
+/* Define to the one symbol short name of this package. */
+#define _EVENT_PACKAGE_TARNAME ""
+
+/* Define to the home page for this package. */
+#define _EVENT_PACKAGE_URL ""
+
+/* Define to the version of this package. */
+#define _EVENT_PACKAGE_VERSION ""
+
+/* The size of `int', as computed by sizeof. */
+#define _EVENT_SIZEOF_INT 4
+
+/* The size of `long', as computed by sizeof. */
+#define _EVENT_SIZEOF_LONG 8
+
+/* The size of `long long', as computed by sizeof. */
+#define _EVENT_SIZEOF_LONG_LONG 8
+
+/* The size of `short', as computed by sizeof. */
+#define _EVENT_SIZEOF_SHORT 2
+
+/* Define to 1 if you have the ANSI C header files. */
+#define _EVENT_STDC_HEADERS 1
+
+/* Define to 1 if you can safely include both <sys/time.h> and <time.h>. */
+#define _EVENT_TIME_WITH_SYS_TIME 1
+
+/* Version number of package */
+#define _EVENT_VERSION "1.4.15"
+
+/* Define to appropriate substitue if compiler doesnt have __func__ */
+/* #undef _EVENT___func__ */
+
+/* Define to empty if `const' does not conform to ANSI C. */
+/* #undef _EVENT_const */
+
+/* Define to `__inline__' or `__inline' if that's what the C compiler
+   calls it, or to nothing if 'inline' is not supported under any name.  */
+#ifndef _EVENT___cplusplus
+/* #undef _EVENT_inline */
+#endif
+
+/* Define to `int' if <sys/types.h> does not define. */
+/* #undef _EVENT_pid_t */
+
+/* Define to `unsigned int' if <sys/types.h> does not define. */
+/* #undef _EVENT_size_t */
+
+/* Define to unsigned int if you dont have it */
+/* #undef _EVENT_socklen_t */
+#endif
diff --git a/base/third_party/libevent/http-internal.h b/base/third_party/libevent/http-internal.h
new file mode 100644
index 0000000..1c4c3db
--- /dev/null
+++ b/base/third_party/libevent/http-internal.h
@@ -0,0 +1,153 @@
+/*
+ * Copyright 2001 Niels Provos <provos@citi.umich.edu>
+ * All rights reserved.
+ *
+ * This header file contains definitions for dealing with HTTP requests
+ * that are internal to libevent.  As user of the library, you should not
+ * need to know about these.
+ */
+
+#ifndef _HTTP_H_
+#define _HTTP_H_
+
+#define HTTP_CONNECT_TIMEOUT	45
+#define HTTP_WRITE_TIMEOUT	50
+#define HTTP_READ_TIMEOUT	50
+
+#define HTTP_PREFIX		"http://"
+#define HTTP_DEFAULTPORT	80
+
+enum message_read_status {
+	ALL_DATA_READ = 1,
+	MORE_DATA_EXPECTED = 0,
+	DATA_CORRUPTED = -1,
+	REQUEST_CANCELED = -2
+};
+
+enum evhttp_connection_error {
+	EVCON_HTTP_TIMEOUT,
+	EVCON_HTTP_EOF,
+	EVCON_HTTP_INVALID_HEADER
+};
+
+struct evbuffer;
+struct evhttp_request;
+
+/* A stupid connection object - maybe make this a bufferevent later */
+
+enum evhttp_connection_state {
+	EVCON_DISCONNECTED,	/**< not currently connected not trying either*/
+	EVCON_CONNECTING,	/**< tries to currently connect */
+	EVCON_IDLE,		/**< connection is established */
+	EVCON_READING_FIRSTLINE,/**< reading Request-Line (incoming conn) or
+				 **< Status-Line (outgoing conn) */
+	EVCON_READING_HEADERS,	/**< reading request/response headers */
+	EVCON_READING_BODY,	/**< reading request/response body */
+	EVCON_READING_TRAILER,	/**< reading request/response chunked trailer */
+	EVCON_WRITING		/**< writing request/response headers/body */
+};
+
+struct event_base;
+
+struct evhttp_connection {
+	/* we use tailq only if they were created for an http server */
+	TAILQ_ENTRY(evhttp_connection) (next);
+
+	int fd;
+	struct event ev;
+	struct event close_ev;
+	struct evbuffer *input_buffer;
+	struct evbuffer *output_buffer;
+	
+	char *bind_address;		/* address to use for binding the src */
+	u_short bind_port;		/* local port for binding the src */
+
+	char *address;			/* address to connect to */
+	u_short port;
+
+	int flags;
+#define EVHTTP_CON_INCOMING	0x0001	/* only one request on it ever */
+#define EVHTTP_CON_OUTGOING	0x0002  /* multiple requests possible */
+#define EVHTTP_CON_CLOSEDETECT  0x0004  /* detecting if persistent close */
+
+	int timeout;			/* timeout in seconds for events */
+	int retry_cnt;			/* retry count */
+	int retry_max;			/* maximum number of retries */
+	
+	enum evhttp_connection_state state;
+
+	/* for server connections, the http server they are connected with */
+	struct evhttp *http_server;
+
+	TAILQ_HEAD(evcon_requestq, evhttp_request) requests;
+	
+						   void (*cb)(struct evhttp_connection *, void *);
+	void *cb_arg;
+	
+	void (*closecb)(struct evhttp_connection *, void *);
+	void *closecb_arg;
+
+	struct event_base *base;
+};
+
+struct evhttp_cb {
+	TAILQ_ENTRY(evhttp_cb) next;
+
+	char *what;
+
+	void (*cb)(struct evhttp_request *req, void *);
+	void *cbarg;
+};
+
+/* both the http server as well as the rpc system need to queue connections */
+TAILQ_HEAD(evconq, evhttp_connection);
+
+/* each bound socket is stored in one of these */
+struct evhttp_bound_socket {
+	TAILQ_ENTRY(evhttp_bound_socket) (next);
+
+	struct event  bind_ev;
+};
+
+struct evhttp {
+	TAILQ_HEAD(boundq, evhttp_bound_socket) sockets;
+
+	TAILQ_HEAD(httpcbq, evhttp_cb) callbacks;
+        struct evconq connections;
+
+        int timeout;
+
+	void (*gencb)(struct evhttp_request *req, void *);
+	void *gencbarg;
+
+	struct event_base *base;
+};
+
+/* resets the connection; can be reused for more requests */
+void evhttp_connection_reset(struct evhttp_connection *);
+
+/* connects if necessary */
+int evhttp_connection_connect(struct evhttp_connection *);
+
+/* notifies the current request that it failed; resets connection */
+void evhttp_connection_fail(struct evhttp_connection *,
+    enum evhttp_connection_error error);
+
+void evhttp_get_request(struct evhttp *, int, struct sockaddr *, socklen_t);
+
+int evhttp_hostportfile(char *, char **, u_short *, char **);
+
+int evhttp_parse_firstline(struct evhttp_request *, struct evbuffer*);
+int evhttp_parse_headers(struct evhttp_request *, struct evbuffer*);
+
+void evhttp_start_read(struct evhttp_connection *);
+void evhttp_make_header(struct evhttp_connection *, struct evhttp_request *);
+
+void evhttp_write_buffer(struct evhttp_connection *,
+    void (*)(struct evhttp_connection *, void *), void *);
+
+/* response sending HTML the data in the buffer */
+void evhttp_response_code(struct evhttp_request *, int, const char *);
+void evhttp_send_page(struct evhttp_request *, struct evbuffer *);
+
+#endif /* _HTTP_H */
diff --git a/base/third_party/libevent/http.c b/base/third_party/libevent/http.c
new file mode 100644
index 0000000..4abce23
--- /dev/null
+++ b/base/third_party/libevent/http.c
@@ -0,0 +1,2885 @@
+/*
+ * Copyright (c) 2002-2006 Niels Provos <provos@citi.umich.edu>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ *    derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#ifdef HAVE_SYS_PARAM_H
+#include <sys/param.h>
+#endif
+#ifdef HAVE_SYS_TYPES_H
+#include <sys/types.h>
+#endif
+
+#ifdef HAVE_SYS_TIME_H
+#include <sys/time.h>
+#endif
+#ifdef HAVE_SYS_IOCCOM_H
+#include <sys/ioccom.h>
+#endif
+
+#ifndef WIN32
+#include <sys/resource.h>
+#include <sys/socket.h>
+#include <sys/stat.h>
+#include <sys/wait.h>
+#endif
+
+#include <sys/queue.h>
+
+#ifndef WIN32
+#include <netinet/in.h>
+#include <netdb.h>
+#endif
+
+#ifdef WIN32
+#include <winsock2.h>
+#endif
+
+#include <assert.h>
+#include <ctype.h>
+#include <errno.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#ifndef WIN32
+#include <syslog.h>
+#endif
+#include <signal.h>
+#include <time.h>
+#ifdef HAVE_UNISTD_H
+#include <unistd.h>
+#endif
+#ifdef HAVE_FCNTL_H
+#include <fcntl.h>
+#endif
+
+#undef timeout_pending
+#undef timeout_initialized
+
+#include "strlcpy-internal.h"
+#include "event.h"
+#include "evhttp.h"
+#include "evutil.h"
+#include "log.h"
+#include "http-internal.h"
+
+#ifdef WIN32
+#define strcasecmp _stricmp
+#define strncasecmp _strnicmp
+#define strdup _strdup
+#endif
+
+#ifndef HAVE_GETNAMEINFO
+#define NI_MAXSERV 32
+#define NI_MAXHOST 1025
+
+#ifndef NI_NUMERICHOST
+#define NI_NUMERICHOST 1
+#endif
+
+#ifndef NI_NUMERICSERV
+#define NI_NUMERICSERV 2
+#endif
+
+static int
+fake_getnameinfo(const struct sockaddr *sa, size_t salen, char *host, 
+	size_t hostlen, char *serv, size_t servlen, int flags)
+{
+        struct sockaddr_in *sin = (struct sockaddr_in *)sa;
+        
+        if (serv != NULL) {
+				char tmpserv[16];
+				evutil_snprintf(tmpserv, sizeof(tmpserv),
+					"%d", ntohs(sin->sin_port));
+                if (strlcpy(serv, tmpserv, servlen) >= servlen)
+                        return (-1);
+        }
+
+        if (host != NULL) {
+                if (flags & NI_NUMERICHOST) {
+                        if (strlcpy(host, inet_ntoa(sin->sin_addr),
+                            hostlen) >= hostlen)
+                                return (-1);
+                        else
+                                return (0);
+                } else {
+						struct hostent *hp;
+                        hp = gethostbyaddr((char *)&sin->sin_addr, 
+                            sizeof(struct in_addr), AF_INET);
+                        if (hp == NULL)
+                                return (-2);
+                        
+                        if (strlcpy(host, hp->h_name, hostlen) >= hostlen)
+                                return (-1);
+                        else
+                                return (0);
+                }
+        }
+        return (0);
+}
+
+#endif
+
+#ifndef HAVE_GETADDRINFO
+/* Apparently msvc2010 does have an addrinfo definition visible here */
+#if !defined(WIN32) || !defined(_MSC_VER) || (_MSC_VER < 1600)
+struct addrinfo {
+	int ai_family;
+	int ai_socktype;
+	int ai_protocol;
+	size_t ai_addrlen;
+	struct sockaddr *ai_addr;
+	struct addrinfo *ai_next;
+};
+#endif
+static int
+fake_getaddrinfo(const char *hostname, struct addrinfo *ai)
+{
+	struct hostent *he = NULL;
+	struct sockaddr_in *sa;
+	if (hostname) {
+		he = gethostbyname(hostname);
+		if (!he)
+			return (-1);
+	}
+	ai->ai_family = he ? he->h_addrtype : AF_INET;
+	ai->ai_socktype = SOCK_STREAM;
+	ai->ai_protocol = 0;
+	ai->ai_addrlen = sizeof(struct sockaddr_in);
+	if (NULL == (ai->ai_addr = malloc(ai->ai_addrlen)))
+		return (-1);
+	sa = (struct sockaddr_in*)ai->ai_addr;
+	memset(sa, 0, ai->ai_addrlen);
+	if (he) {
+		sa->sin_family = he->h_addrtype;
+		memcpy(&sa->sin_addr, he->h_addr_list[0], he->h_length);
+	} else {
+		sa->sin_family = AF_INET;
+		sa->sin_addr.s_addr = INADDR_ANY;
+	}
+	ai->ai_next = NULL;
+	return (0);
+}
+static void
+fake_freeaddrinfo(struct addrinfo *ai)
+{
+	free(ai->ai_addr);
+}
+#endif
+
+#ifndef MIN
+#define MIN(a,b) (((a)<(b))?(a):(b))
+#endif
+
+/* wrapper for setting the base from the http server */
+#define EVHTTP_BASE_SET(x, y) do { \
+	if ((x)->base != NULL) event_base_set((x)->base, y);	\
+} while (0) 
+
+extern int debug;
+
+static int socket_connect(int fd, const char *address, unsigned short port);
+static int bind_socket_ai(struct addrinfo *, int reuse);
+static int bind_socket(const char *, u_short, int reuse);
+static void name_from_addr(struct sockaddr *, socklen_t, char **, char **);
+static int evhttp_associate_new_request_with_connection(
+	struct evhttp_connection *evcon);
+static void evhttp_connection_start_detectclose(
+	struct evhttp_connection *evcon);
+static void evhttp_connection_stop_detectclose(
+	struct evhttp_connection *evcon);
+static void evhttp_request_dispatch(struct evhttp_connection* evcon);
+static void evhttp_read_firstline(struct evhttp_connection *evcon,
+				  struct evhttp_request *req);
+static void evhttp_read_header(struct evhttp_connection *evcon,
+    struct evhttp_request *req);
+static int evhttp_add_header_internal(struct evkeyvalq *headers,
+    const char *key, const char *value);
+static int evhttp_decode_uri_internal(const char *uri, size_t length,
+    char *ret, int always_decode_plus);
+
+void evhttp_read(int, short, void *);
+void evhttp_write(int, short, void *);
+
+#ifndef HAVE_STRSEP
+/* strsep replacement for platforms that lack it.  Only works if
+ * del is one character long. */
+static char *
+strsep(char **s, const char *del)
+{
+	char *d, *tok;
+	assert(strlen(del) == 1);
+	if (!s || !*s)
+		return NULL;
+	tok = *s;
+	d = strstr(tok, del);
+	if (d) {
+		*d = '\0';
+		*s = d + 1;
+	} else
+		*s = NULL;
+	return tok;
+}
+#endif
+
+static const char *
+html_replace(char ch, char *buf)
+{
+	switch (ch) {
+	case '<':
+		return "&lt;";
+	case '>':
+		return "&gt;";
+	case '"':
+		return "&quot;";
+	case '\'':
+		return "&#039;";
+	case '&':
+		return "&amp;";
+	default:
+		break;
+	}
+
+	/* Echo the character back */
+	buf[0] = ch;
+	buf[1] = '\0';
+	
+	return buf;
+}
+
+/*
+ * Replaces <, >, ", ' and & with &lt;, &gt;, &quot;,
+ * &#039; and &amp; correspondingly.
+ *
+ * The returned string needs to be freed by the caller.
+ */
+
+char *
+evhttp_htmlescape(const char *html)
+{
+	int i, new_size = 0, old_size = strlen(html);
+	char *escaped_html, *p;
+	char scratch_space[2];
+	
+	for (i = 0; i < old_size; ++i)
+          new_size += strlen(html_replace(html[i], scratch_space));
+
+	p = escaped_html = malloc(new_size + 1);
+	if (escaped_html == NULL)
+		event_err(1, "%s: malloc(%d)", __func__, new_size + 1);
+	for (i = 0; i < old_size; ++i) {
+		const char *replaced = html_replace(html[i], scratch_space);
+		/* this is length checked */
+		strcpy(p, replaced);
+		p += strlen(replaced);
+	}
+
+	*p = '\0';
+
+	return (escaped_html);
+}
+
+static const char *
+evhttp_method(enum evhttp_cmd_type type)
+{
+	const char *method;
+
+	switch (type) {
+	case EVHTTP_REQ_GET:
+		method = "GET";
+		break;
+	case EVHTTP_REQ_POST:
+		method = "POST";
+		break;
+	case EVHTTP_REQ_HEAD:
+		method = "HEAD";
+		break;
+	default:
+		method = NULL;
+		break;
+	}
+
+	return (method);
+}
+
+static void
+evhttp_add_event(struct event *ev, int timeout, int default_timeout)
+{
+	if (timeout != 0) {
+		struct timeval tv;
+		
+		evutil_timerclear(&tv);
+		tv.tv_sec = timeout != -1 ? timeout : default_timeout;
+		event_add(ev, &tv);
+	} else {
+		event_add(ev, NULL);
+	}
+}
+
+void
+evhttp_write_buffer(struct evhttp_connection *evcon,
+    void (*cb)(struct evhttp_connection *, void *), void *arg)
+{
+	event_debug(("%s: preparing to write buffer\n", __func__));
+
+	/* Set call back */
+	evcon->cb = cb;
+	evcon->cb_arg = arg;
+
+	/* check if the event is already pending */
+	if (event_pending(&evcon->ev, EV_WRITE|EV_TIMEOUT, NULL))
+		event_del(&evcon->ev);
+
+	event_set(&evcon->ev, evcon->fd, EV_WRITE, evhttp_write, evcon);
+	EVHTTP_BASE_SET(evcon, &evcon->ev);
+	evhttp_add_event(&evcon->ev, evcon->timeout, HTTP_WRITE_TIMEOUT);
+}
+
+static int
+evhttp_connected(struct evhttp_connection *evcon)
+{
+	switch (evcon->state) {
+	case EVCON_DISCONNECTED:
+	case EVCON_CONNECTING:
+		return (0);
+	case EVCON_IDLE:
+	case EVCON_READING_FIRSTLINE:
+	case EVCON_READING_HEADERS:
+	case EVCON_READING_BODY:
+	case EVCON_READING_TRAILER:
+	case EVCON_WRITING:
+	default:
+		return (1);
+	}
+}
+
+/*
+ * Create the headers needed for an HTTP request
+ */
+static void
+evhttp_make_header_request(struct evhttp_connection *evcon,
+    struct evhttp_request *req)
+{
+	const char *method;
+	
+	evhttp_remove_header(req->output_headers, "Proxy-Connection");
+
+	/* Generate request line */
+	method = evhttp_method(req->type);
+	evbuffer_add_printf(evcon->output_buffer, "%s %s HTTP/%d.%d\r\n",
+	    method, req->uri, req->major, req->minor);
+
+	/* Add the content length on a post request if missing */
+	if (req->type == EVHTTP_REQ_POST &&
+	    evhttp_find_header(req->output_headers, "Content-Length") == NULL){
+		char size[22];
+		evutil_snprintf(size, sizeof(size), "%ld",
+		    (long)EVBUFFER_LENGTH(req->output_buffer));
+		evhttp_add_header(req->output_headers, "Content-Length", size);
+	}
+}
+
+static int
+evhttp_is_connection_close(int flags, struct evkeyvalq* headers)
+{
+	if (flags & EVHTTP_PROXY_REQUEST) {
+		/* proxy connection */
+		const char *connection = evhttp_find_header(headers, "Proxy-Connection");
+		return (connection == NULL || strcasecmp(connection, "keep-alive") != 0);
+	} else {
+		const char *connection = evhttp_find_header(headers, "Connection");
+		return (connection != NULL && strcasecmp(connection, "close") == 0);
+	}
+}
+
+static int
+evhttp_is_connection_keepalive(struct evkeyvalq* headers)
+{
+	const char *connection = evhttp_find_header(headers, "Connection");
+	return (connection != NULL 
+	    && strncasecmp(connection, "keep-alive", 10) == 0);
+}
+
+static void
+evhttp_maybe_add_date_header(struct evkeyvalq *headers)
+{
+	if (evhttp_find_header(headers, "Date") == NULL) {
+		char date[50];
+#ifndef WIN32
+		struct tm cur;
+#endif
+		struct tm *cur_p;
+		time_t t = time(NULL);
+#ifdef WIN32
+		cur_p = gmtime(&t);
+#else
+		gmtime_r(&t, &cur);
+		cur_p = &cur;
+#endif
+		if (strftime(date, sizeof(date),
+			"%a, %d %b %Y %H:%M:%S GMT", cur_p) != 0) {
+			evhttp_add_header(headers, "Date", date);
+		}
+	}
+}
+
+static void
+evhttp_maybe_add_content_length_header(struct evkeyvalq *headers,
+    long content_length)
+{
+	if (evhttp_find_header(headers, "Transfer-Encoding") == NULL &&
+	    evhttp_find_header(headers,	"Content-Length") == NULL) {
+		char len[22];
+		evutil_snprintf(len, sizeof(len), "%ld", content_length);
+		evhttp_add_header(headers, "Content-Length", len);
+	}
+}
+
+/*
+ * Create the headers needed for an HTTP reply
+ */
+
+static void
+evhttp_make_header_response(struct evhttp_connection *evcon,
+    struct evhttp_request *req)
+{
+	int is_keepalive = evhttp_is_connection_keepalive(req->input_headers);
+	evbuffer_add_printf(evcon->output_buffer, "HTTP/%d.%d %d %s\r\n",
+	    req->major, req->minor, req->response_code,
+	    req->response_code_line);
+
+	if (req->major == 1) {
+		if (req->minor == 1)
+			evhttp_maybe_add_date_header(req->output_headers);
+
+		/*
+		 * if the protocol is 1.0; and the connection was keep-alive
+		 * we need to add a keep-alive header, too.
+		 */
+		if (req->minor == 0 && is_keepalive)
+			evhttp_add_header(req->output_headers,
+			    "Connection", "keep-alive");
+
+		if (req->minor == 1 || is_keepalive) {
+			/* 
+			 * we need to add the content length if the
+			 * user did not give it, this is required for
+			 * persistent connections to work.
+			 */
+			evhttp_maybe_add_content_length_header(
+				req->output_headers,
+				(long)EVBUFFER_LENGTH(req->output_buffer));
+		}
+	}
+
+	/* Potentially add headers for unidentified content. */
+	if (EVBUFFER_LENGTH(req->output_buffer)) {
+		if (evhttp_find_header(req->output_headers,
+			"Content-Type") == NULL) {
+			evhttp_add_header(req->output_headers,
+			    "Content-Type", "text/html; charset=ISO-8859-1");
+		}
+	}
+
+	/* if the request asked for a close, we send a close, too */
+	if (evhttp_is_connection_close(req->flags, req->input_headers)) {
+		evhttp_remove_header(req->output_headers, "Connection");
+		if (!(req->flags & EVHTTP_PROXY_REQUEST))
+		    evhttp_add_header(req->output_headers, "Connection", "close");
+		evhttp_remove_header(req->output_headers, "Proxy-Connection");
+	}
+}
+
+void
+evhttp_make_header(struct evhttp_connection *evcon, struct evhttp_request *req)
+{
+	struct evkeyval *header;
+
+	/*
+	 * Depending if this is a HTTP request or response, we might need to
+	 * add some new headers or remove existing headers.
+	 */
+	if (req->kind == EVHTTP_REQUEST) {
+		evhttp_make_header_request(evcon, req);
+	} else {
+		evhttp_make_header_response(evcon, req);
+	}
+
+	TAILQ_FOREACH(header, req->output_headers, next) {
+		evbuffer_add_printf(evcon->output_buffer, "%s: %s\r\n",
+		    header->key, header->value);
+	}
+	evbuffer_add(evcon->output_buffer, "\r\n", 2);
+
+	if (EVBUFFER_LENGTH(req->output_buffer) > 0) {
+		/*
+		 * For a request, we add the POST data, for a reply, this
+		 * is the regular data.
+		 */
+		evbuffer_add_buffer(evcon->output_buffer, req->output_buffer);
+	}
+}
+
+/* Separated host, port and file from URI */
+
+int
+evhttp_hostportfile(char *url, char **phost, u_short *pport, char **pfile)
+{
+	/* XXX not threadsafe. */
+	static char host[1024];
+	static char file[1024];
+	char *p;
+	const char *p2;
+	int len;
+	u_short port;
+
+	len = strlen(HTTP_PREFIX);
+	if (strncasecmp(url, HTTP_PREFIX, len))
+		return (-1);
+
+	url += len;
+
+	/* We might overrun */
+	if (strlcpy(host, url, sizeof (host)) >= sizeof(host))
+		return (-1);
+
+	p = strchr(host, '/');
+	if (p != NULL) {
+		*p = '\0';
+		p2 = p + 1;
+	} else
+		p2 = NULL;
+
+	if (pfile != NULL) {
+		/* Generate request file */
+		if (p2 == NULL)
+			p2 = "";
+		evutil_snprintf(file, sizeof(file), "/%s", p2);
+	}
+
+	p = strchr(host, ':');
+	if (p != NULL) {
+		*p = '\0';
+		port = atoi(p + 1);
+
+		if (port == 0)
+			return (-1);
+	} else
+		port = HTTP_DEFAULTPORT;
+
+	if (phost != NULL)
+		*phost = host;
+	if (pport != NULL)
+		*pport = port;
+	if (pfile != NULL)
+		*pfile = file;
+
+	return (0);
+}
+
+static int
+evhttp_connection_incoming_fail(struct evhttp_request *req,
+    enum evhttp_connection_error error)
+{
+	switch (error) {
+	case EVCON_HTTP_TIMEOUT:
+	case EVCON_HTTP_EOF:
+		/* 
+		 * these are cases in which we probably should just
+		 * close the connection and not send a reply.  this
+		 * case may happen when a browser keeps a persistent
+		 * connection open and we timeout on the read.  when
+		 * the request is still being used for sending, we
+		 * need to disassociated it from the connection here.
+		 */
+		if (!req->userdone) {
+			/* remove it so that it will not be freed */
+			TAILQ_REMOVE(&req->evcon->requests, req, next);
+			/* indicate that this request no longer has a
+			 * connection object
+			 */
+			req->evcon = NULL;
+		}
+		return (-1);
+	case EVCON_HTTP_INVALID_HEADER:
+	default:	/* xxx: probably should just error on default */
+		/* the callback looks at the uri to determine errors */
+		if (req->uri) {
+			free(req->uri);
+			req->uri = NULL;
+		}
+
+		/* 
+		 * the callback needs to send a reply, once the reply has
+		 * been send, the connection should get freed.
+		 */
+		(*req->cb)(req, req->cb_arg);
+	}
+	
+	return (0);
+}
+
+void
+evhttp_connection_fail(struct evhttp_connection *evcon,
+    enum evhttp_connection_error error)
+{
+	struct evhttp_request* req = TAILQ_FIRST(&evcon->requests);
+	void (*cb)(struct evhttp_request *, void *);
+	void *cb_arg;
+	assert(req != NULL);
+	
+	if (evcon->flags & EVHTTP_CON_INCOMING) {
+		/* 
+		 * for incoming requests, there are two different
+		 * failure cases.  it's either a network level error
+		 * or an http layer error. for problems on the network
+		 * layer like timeouts we just drop the connections.
+		 * For HTTP problems, we might have to send back a
+		 * reply before the connection can be freed.
+		 */
+		if (evhttp_connection_incoming_fail(req, error) == -1)
+			evhttp_connection_free(evcon);
+		return;
+	}
+
+	/* save the callback for later; the cb might free our object */
+	cb = req->cb;
+	cb_arg = req->cb_arg;
+
+	/* do not fail all requests; the next request is going to get
+	 * send over a new connection.   when a user cancels a request,
+	 * all other pending requests should be processed as normal
+	 */
+	TAILQ_REMOVE(&evcon->requests, req, next);
+	evhttp_request_free(req);
+
+	/* reset the connection */
+	evhttp_connection_reset(evcon);
+	
+	/* We are trying the next request that was queued on us */
+	if (TAILQ_FIRST(&evcon->requests) != NULL)
+		evhttp_connection_connect(evcon);
+
+	/* inform the user */
+	if (cb != NULL)
+		(*cb)(NULL, cb_arg);
+}
+
+void
+evhttp_write(int fd, short what, void *arg)
+{
+	struct evhttp_connection *evcon = arg;
+	int n;
+
+	if (what == EV_TIMEOUT) {
+		evhttp_connection_fail(evcon, EVCON_HTTP_TIMEOUT);
+		return;
+	}
+
+	n = evbuffer_write(evcon->output_buffer, fd);
+	if (n == -1) {
+		event_debug(("%s: evbuffer_write", __func__));
+		evhttp_connection_fail(evcon, EVCON_HTTP_EOF);
+		return;
+	}
+
+	if (n == 0) {
+		event_debug(("%s: write nothing", __func__));
+		evhttp_connection_fail(evcon, EVCON_HTTP_EOF);
+		return;
+	}
+
+	if (EVBUFFER_LENGTH(evcon->output_buffer) != 0) {
+		evhttp_add_event(&evcon->ev, 
+		    evcon->timeout, HTTP_WRITE_TIMEOUT);
+		return;
+	}
+
+	/* Activate our call back */
+	if (evcon->cb != NULL)
+		(*evcon->cb)(evcon, evcon->cb_arg);
+}
+
+/**
+ * Advance the connection state.
+ * - If this is an outgoing connection, we've just processed the response;
+ *   idle or close the connection.
+ * - If this is an incoming connection, we've just processed the request;
+ *   respond.
+ */
+static void
+evhttp_connection_done(struct evhttp_connection *evcon)
+{
+	struct evhttp_request *req = TAILQ_FIRST(&evcon->requests);
+	int con_outgoing = evcon->flags & EVHTTP_CON_OUTGOING;
+
+	if (con_outgoing) {
+		/* idle or close the connection */
+	        int need_close;
+		TAILQ_REMOVE(&evcon->requests, req, next);
+		req->evcon = NULL;
+
+		evcon->state = EVCON_IDLE;
+
+		need_close = 
+		    evhttp_is_connection_close(req->flags, req->input_headers)||
+		    evhttp_is_connection_close(req->flags, req->output_headers);
+
+		/* check if we got asked to close the connection */
+		if (need_close)
+			evhttp_connection_reset(evcon);
+
+		if (TAILQ_FIRST(&evcon->requests) != NULL) {
+			/*
+			 * We have more requests; reset the connection
+			 * and deal with the next request.
+			 */
+			if (!evhttp_connected(evcon))
+				evhttp_connection_connect(evcon);
+			else
+				evhttp_request_dispatch(evcon);
+		} else if (!need_close) {
+			/*
+			 * The connection is going to be persistent, but we
+			 * need to detect if the other side closes it.
+			 */
+			evhttp_connection_start_detectclose(evcon);
+		}
+	} else if (evcon->state != EVCON_DISCONNECTED) {
+		/*
+		 * incoming connection - we need to leave the request on the
+		 * connection so that we can reply to it.
+		 */
+		evcon->state = EVCON_WRITING;
+	}
+
+	/* notify the user of the request */
+	(*req->cb)(req, req->cb_arg);
+
+	/* if this was an outgoing request, we own and it's done. so free it */
+	if (con_outgoing) {
+		evhttp_request_free(req);
+	}
+}
+
+/*
+ * Handles reading from a chunked request.
+ *   return ALL_DATA_READ:
+ *     all data has been read
+ *   return MORE_DATA_EXPECTED:
+ *     more data is expected
+ *   return DATA_CORRUPTED:
+ *     data is corrupted
+ *   return REQUEST_CANCLED:
+ *     request was canceled by the user calling evhttp_cancel_request
+ */
+
+static enum message_read_status
+evhttp_handle_chunked_read(struct evhttp_request *req, struct evbuffer *buf)
+{
+	int len;
+
+	while ((len = EVBUFFER_LENGTH(buf)) > 0) {
+		if (req->ntoread < 0) {
+			/* Read chunk size */
+			ev_int64_t ntoread;
+			char *p = evbuffer_readline(buf);
+			char *endp;
+			int error;
+			if (p == NULL)
+				break;
+			/* the last chunk is on a new line? */
+			if (strlen(p) == 0) {
+				free(p);
+				continue;
+			}
+			ntoread = evutil_strtoll(p, &endp, 16);
+			error = (*p == '\0' ||
+			    (*endp != '\0' && *endp != ' ') ||
+			    ntoread < 0);
+			free(p);
+			if (error) {
+				/* could not get chunk size */
+				return (DATA_CORRUPTED);
+			}
+			req->ntoread = ntoread;
+			if (req->ntoread == 0) {
+				/* Last chunk */
+				return (ALL_DATA_READ);
+			}
+			continue;
+		}
+
+		/* don't have enough to complete a chunk; wait for more */
+		if (len < req->ntoread)
+			return (MORE_DATA_EXPECTED);
+
+		/* Completed chunk */
+		evbuffer_add(req->input_buffer,
+		    EVBUFFER_DATA(buf), (size_t)req->ntoread);
+		evbuffer_drain(buf, (size_t)req->ntoread);
+		req->ntoread = -1;
+		if (req->chunk_cb != NULL) {
+			(*req->chunk_cb)(req, req->cb_arg);
+			evbuffer_drain(req->input_buffer,
+			    EVBUFFER_LENGTH(req->input_buffer));
+		}
+	}
+
+	return (MORE_DATA_EXPECTED);
+}
+
+static void
+evhttp_read_trailer(struct evhttp_connection *evcon, struct evhttp_request *req)
+{
+	struct evbuffer *buf = evcon->input_buffer;
+
+	switch (evhttp_parse_headers(req, buf)) {
+	case DATA_CORRUPTED:
+		evhttp_connection_fail(evcon, EVCON_HTTP_INVALID_HEADER);
+		break;
+	case ALL_DATA_READ:
+		event_del(&evcon->ev);
+		evhttp_connection_done(evcon);
+		break;
+	case MORE_DATA_EXPECTED:
+	default:
+		evhttp_add_event(&evcon->ev, evcon->timeout,
+		    HTTP_READ_TIMEOUT);
+		break;
+	}
+}
+
+static void
+evhttp_read_body(struct evhttp_connection *evcon, struct evhttp_request *req)
+{
+	struct evbuffer *buf = evcon->input_buffer;
+	
+	if (req->chunked) {
+		switch (evhttp_handle_chunked_read(req, buf)) {
+		case ALL_DATA_READ:
+			/* finished last chunk */
+			evcon->state = EVCON_READING_TRAILER;
+			evhttp_read_trailer(evcon, req);
+			return;
+		case DATA_CORRUPTED:
+			/* corrupted data */
+			evhttp_connection_fail(evcon,
+			    EVCON_HTTP_INVALID_HEADER);
+			return;
+		case REQUEST_CANCELED:
+			/* request canceled */
+			evhttp_request_free(req);
+			return;
+		case MORE_DATA_EXPECTED:
+		default:
+			break;
+		}
+	} else if (req->ntoread < 0) {
+		/* Read until connection close. */
+		evbuffer_add_buffer(req->input_buffer, buf);
+	} else if (EVBUFFER_LENGTH(buf) >= req->ntoread) {
+		/* Completed content length */
+		evbuffer_add(req->input_buffer, EVBUFFER_DATA(buf),
+		    (size_t)req->ntoread);
+		evbuffer_drain(buf, (size_t)req->ntoread);
+		req->ntoread = 0;
+		evhttp_connection_done(evcon);
+		return;
+	}
+	/* Read more! */
+	event_set(&evcon->ev, evcon->fd, EV_READ, evhttp_read, evcon);
+	EVHTTP_BASE_SET(evcon, &evcon->ev);
+	evhttp_add_event(&evcon->ev, evcon->timeout, HTTP_READ_TIMEOUT);
+}
+
+/*
+ * Reads data into a buffer structure until no more data
+ * can be read on the file descriptor or we have read all
+ * the data that we wanted to read.
+ * Execute callback when done.
+ */
+
+void
+evhttp_read(int fd, short what, void *arg)
+{
+	struct evhttp_connection *evcon = arg;
+	struct evhttp_request *req = TAILQ_FIRST(&evcon->requests);
+	struct evbuffer *buf = evcon->input_buffer;
+	int n, len;
+
+	if (what == EV_TIMEOUT) {
+		evhttp_connection_fail(evcon, EVCON_HTTP_TIMEOUT);
+		return;
+	}
+	n = evbuffer_read(buf, fd, -1);
+	len = EVBUFFER_LENGTH(buf);
+	event_debug(("%s: got %d on %d\n", __func__, n, fd));
+	
+	if (n == -1) {
+		if (errno != EINTR && errno != EAGAIN) {
+			event_debug(("%s: evbuffer_read", __func__));
+			evhttp_connection_fail(evcon, EVCON_HTTP_EOF);
+		} else {
+			evhttp_add_event(&evcon->ev, evcon->timeout,
+			    HTTP_READ_TIMEOUT);	       
+		}
+		return;
+	} else if (n == 0) {
+		/* Connection closed */
+		evcon->state = EVCON_DISCONNECTED;
+		evhttp_connection_done(evcon);
+		return;
+	}
+
+	switch (evcon->state) {
+	case EVCON_READING_FIRSTLINE:
+		evhttp_read_firstline(evcon, req);
+		break;
+	case EVCON_READING_HEADERS:
+		evhttp_read_header(evcon, req);
+		break;
+	case EVCON_READING_BODY:
+		evhttp_read_body(evcon, req);
+		break;
+	case EVCON_READING_TRAILER:
+		evhttp_read_trailer(evcon, req);
+		break;
+	case EVCON_DISCONNECTED:
+	case EVCON_CONNECTING:
+	case EVCON_IDLE:
+	case EVCON_WRITING:
+	default:
+		event_errx(1, "%s: illegal connection state %d",
+			   __func__, evcon->state);
+	}
+}
+
+static void
+evhttp_write_connectioncb(struct evhttp_connection *evcon, void *arg)
+{
+	/* This is after writing the request to the server */
+	struct evhttp_request *req = TAILQ_FIRST(&evcon->requests);
+	assert(req != NULL);
+
+	assert(evcon->state == EVCON_WRITING);
+
+	/* We are done writing our header and are now expecting the response */
+	req->kind = EVHTTP_RESPONSE;
+
+	evhttp_start_read(evcon);
+}
+
+/*
+ * Clean up a connection object
+ */
+
+void
+evhttp_connection_free(struct evhttp_connection *evcon)
+{
+	struct evhttp_request *req;
+
+	/* notify interested parties that this connection is going down */
+	if (evcon->fd != -1) {
+		if (evhttp_connected(evcon) && evcon->closecb != NULL)
+			(*evcon->closecb)(evcon, evcon->closecb_arg);
+	}
+
+	/* remove all requests that might be queued on this
+	 * connection.  for server connections, this should be empty.
+	 * because it gets dequeued either in evhttp_connection_done or
+	 * evhttp_connection_fail.
+	 */
+	while ((req = TAILQ_FIRST(&evcon->requests)) != NULL) {
+		TAILQ_REMOVE(&evcon->requests, req, next);
+		evhttp_request_free(req);
+	}
+
+	if (evcon->http_server != NULL) {
+		struct evhttp *http = evcon->http_server;
+		TAILQ_REMOVE(&http->connections, evcon, next);
+	}
+
+	if (event_initialized(&evcon->close_ev))
+		event_del(&evcon->close_ev);
+
+	if (event_initialized(&evcon->ev))
+		event_del(&evcon->ev);
+	
+	if (evcon->fd != -1)
+		EVUTIL_CLOSESOCKET(evcon->fd);
+
+	if (evcon->bind_address != NULL)
+		free(evcon->bind_address);
+
+	if (evcon->address != NULL)
+		free(evcon->address);
+
+	if (evcon->input_buffer != NULL)
+		evbuffer_free(evcon->input_buffer);
+
+	if (evcon->output_buffer != NULL)
+		evbuffer_free(evcon->output_buffer);
+
+	free(evcon);
+}
+
+void
+evhttp_connection_set_local_address(struct evhttp_connection *evcon,
+    const char *address)
+{
+	assert(evcon->state == EVCON_DISCONNECTED);
+	if (evcon->bind_address)
+		free(evcon->bind_address);
+	if ((evcon->bind_address = strdup(address)) == NULL)
+		event_err(1, "%s: strdup", __func__);
+}
+
+void
+evhttp_connection_set_local_port(struct evhttp_connection *evcon,
+    unsigned short port)
+{
+	assert(evcon->state == EVCON_DISCONNECTED);
+	evcon->bind_port = port;
+}
+
+static void
+evhttp_request_dispatch(struct evhttp_connection* evcon)
+{
+	struct evhttp_request *req = TAILQ_FIRST(&evcon->requests);
+	
+	/* this should not usually happy but it's possible */
+	if (req == NULL)
+		return;
+
+	/* delete possible close detection events */
+	evhttp_connection_stop_detectclose(evcon);
+	
+	/* we assume that the connection is connected already */
+	assert(evcon->state == EVCON_IDLE);
+
+	evcon->state = EVCON_WRITING;
+
+	/* Create the header from the store arguments */
+	evhttp_make_header(evcon, req);
+
+	evhttp_write_buffer(evcon, evhttp_write_connectioncb, NULL);
+}
+
+/* Reset our connection state */
+void
+evhttp_connection_reset(struct evhttp_connection *evcon)
+{
+	if (event_initialized(&evcon->ev))
+		event_del(&evcon->ev);
+
+	if (evcon->fd != -1) {
+		/* inform interested parties about connection close */
+		if (evhttp_connected(evcon) && evcon->closecb != NULL)
+			(*evcon->closecb)(evcon, evcon->closecb_arg);
+
+		EVUTIL_CLOSESOCKET(evcon->fd);
+		evcon->fd = -1;
+	}
+	evcon->state = EVCON_DISCONNECTED;
+
+	evbuffer_drain(evcon->input_buffer,
+	    EVBUFFER_LENGTH(evcon->input_buffer));
+	evbuffer_drain(evcon->output_buffer,
+	    EVBUFFER_LENGTH(evcon->output_buffer));
+}
+
+static void
+evhttp_detect_close_cb(int fd, short what, void *arg)
+{
+	struct evhttp_connection *evcon = arg;
+	evhttp_connection_reset(evcon);
+}
+
+static void
+evhttp_connection_start_detectclose(struct evhttp_connection *evcon)
+{
+	evcon->flags |= EVHTTP_CON_CLOSEDETECT;
+
+	if (event_initialized(&evcon->close_ev))
+		event_del(&evcon->close_ev);
+	event_set(&evcon->close_ev, evcon->fd, EV_READ,
+	    evhttp_detect_close_cb, evcon);
+	EVHTTP_BASE_SET(evcon, &evcon->close_ev);
+	event_add(&evcon->close_ev, NULL);
+}
+
+static void
+evhttp_connection_stop_detectclose(struct evhttp_connection *evcon)
+{
+	evcon->flags &= ~EVHTTP_CON_CLOSEDETECT;
+	event_del(&evcon->close_ev);
+}
+
+static void
+evhttp_connection_retry(int fd, short what, void *arg)
+{
+	struct evhttp_connection *evcon = arg;
+
+	evcon->state = EVCON_DISCONNECTED;
+	evhttp_connection_connect(evcon);
+}
+
+/*
+ * Call back for asynchronous connection attempt.
+ */
+
+static void
+evhttp_connectioncb(int fd, short what, void *arg)
+{
+	struct evhttp_connection *evcon = arg;
+	int error;
+	socklen_t errsz = sizeof(error);
+		
+	if (what == EV_TIMEOUT) {
+		event_debug(("%s: connection timeout for \"%s:%d\" on %d",
+			__func__, evcon->address, evcon->port, evcon->fd));
+		goto cleanup;
+	}
+
+	/* Check if the connection completed */
+	if (getsockopt(evcon->fd, SOL_SOCKET, SO_ERROR, (void*)&error,
+		       &errsz) == -1) {
+		event_debug(("%s: getsockopt for \"%s:%d\" on %d",
+			__func__, evcon->address, evcon->port, evcon->fd));
+		goto cleanup;
+	}
+
+	if (error) {
+		event_debug(("%s: connect failed for \"%s:%d\" on %d: %s",
+		    __func__, evcon->address, evcon->port, evcon->fd,
+			strerror(error)));
+		goto cleanup;
+	}
+
+	/* We are connected to the server now */
+	event_debug(("%s: connected to \"%s:%d\" on %d\n",
+			__func__, evcon->address, evcon->port, evcon->fd));
+
+	/* Reset the retry count as we were successful in connecting */
+	evcon->retry_cnt = 0;
+	evcon->state = EVCON_IDLE;
+
+	/* try to start requests that have queued up on this connection */
+	evhttp_request_dispatch(evcon);
+	return;
+
+ cleanup:
+	if (evcon->retry_max < 0 || evcon->retry_cnt < evcon->retry_max) {
+		evtimer_set(&evcon->ev, evhttp_connection_retry, evcon);
+		EVHTTP_BASE_SET(evcon, &evcon->ev);
+		evhttp_add_event(&evcon->ev, MIN(3600, 2 << evcon->retry_cnt),
+		    HTTP_CONNECT_TIMEOUT);
+		evcon->retry_cnt++;
+		return;
+	}
+	evhttp_connection_reset(evcon);
+
+	/* for now, we just signal all requests by executing their callbacks */
+	while (TAILQ_FIRST(&evcon->requests) != NULL) {
+		struct evhttp_request *request = TAILQ_FIRST(&evcon->requests);
+		TAILQ_REMOVE(&evcon->requests, request, next);
+		request->evcon = NULL;
+
+		/* we might want to set an error here */
+		request->cb(request, request->cb_arg);
+		evhttp_request_free(request);
+	}
+}
+
+/*
+ * Check if we got a valid response code.
+ */
+
+static int
+evhttp_valid_response_code(int code)
+{
+	if (code == 0)
+		return (0);
+
+	return (1);
+}
+
+/* Parses the status line of a web server */
+
+static int
+evhttp_parse_response_line(struct evhttp_request *req, char *line)
+{
+	char *protocol;
+	char *number;
+	const char *readable = "";
+
+	protocol = strsep(&line, " ");
+	if (line == NULL)
+		return (-1);
+	number = strsep(&line, " ");
+	if (line != NULL)
+		readable = line;
+
+	if (strcmp(protocol, "HTTP/1.0") == 0) {
+		req->major = 1;
+		req->minor = 0;
+	} else if (strcmp(protocol, "HTTP/1.1") == 0) {
+		req->major = 1;
+		req->minor = 1;
+	} else {
+		event_debug(("%s: bad protocol \"%s\"",
+			__func__, protocol));
+		return (-1);
+	}
+
+	req->response_code = atoi(number);
+	if (!evhttp_valid_response_code(req->response_code)) {
+		event_debug(("%s: bad response code \"%s\"",
+			__func__, number));
+		return (-1);
+	}
+
+	if ((req->response_code_line = strdup(readable)) == NULL)
+		event_err(1, "%s: strdup", __func__);
+
+	return (0);
+}
+
+/* Parse the first line of a HTTP request */
+
+static int
+evhttp_parse_request_line(struct evhttp_request *req, char *line)
+{
+	char *method;
+	char *uri;
+	char *version;
+
+	/* Parse the request line */
+	method = strsep(&line, " ");
+	if (line == NULL)
+		return (-1);
+	uri = strsep(&line, " ");
+	if (line == NULL)
+		return (-1);
+	version = strsep(&line, " ");
+	if (line != NULL)
+		return (-1);
+
+	/* First line */
+	if (strcmp(method, "GET") == 0) {
+		req->type = EVHTTP_REQ_GET;
+	} else if (strcmp(method, "POST") == 0) {
+		req->type = EVHTTP_REQ_POST;
+	} else if (strcmp(method, "HEAD") == 0) {
+		req->type = EVHTTP_REQ_HEAD;
+	} else {
+		event_debug(("%s: bad method %s on request %p from %s",
+			__func__, method, req, req->remote_host));
+		return (-1);
+	}
+
+	if (strcmp(version, "HTTP/1.0") == 0) {
+		req->major = 1;
+		req->minor = 0;
+	} else if (strcmp(version, "HTTP/1.1") == 0) {
+		req->major = 1;
+		req->minor = 1;
+	} else {
+		event_debug(("%s: bad version %s on request %p from %s",
+			__func__, version, req, req->remote_host));
+		return (-1);
+	}
+
+	if ((req->uri = strdup(uri)) == NULL) {
+		event_debug(("%s: strdup", __func__));
+		return (-1);
+	}
+
+	/* determine if it's a proxy request */
+	if (strlen(req->uri) > 0 && req->uri[0] != '/')
+		req->flags |= EVHTTP_PROXY_REQUEST;
+
+	return (0);
+}
+
+const char *
+evhttp_find_header(const struct evkeyvalq *headers, const char *key)
+{
+	struct evkeyval *header;
+
+	TAILQ_FOREACH(header, headers, next) {
+		if (strcasecmp(header->key, key) == 0)
+			return (header->value);
+	}
+
+	return (NULL);
+}
+
+void
+evhttp_clear_headers(struct evkeyvalq *headers)
+{
+	struct evkeyval *header;
+
+	for (header = TAILQ_FIRST(headers);
+	    header != NULL;
+	    header = TAILQ_FIRST(headers)) {
+		TAILQ_REMOVE(headers, header, next);
+		free(header->key);
+		free(header->value);
+		free(header);
+	}
+}
+
+/*
+ * Returns 0,  if the header was successfully removed.
+ * Returns -1, if the header could not be found.
+ */
+
+int
+evhttp_remove_header(struct evkeyvalq *headers, const char *key)
+{
+	struct evkeyval *header;
+
+	TAILQ_FOREACH(header, headers, next) {
+		if (strcasecmp(header->key, key) == 0)
+			break;
+	}
+
+	if (header == NULL)
+		return (-1);
+
+	/* Free and remove the header that we found */
+	TAILQ_REMOVE(headers, header, next);
+	free(header->key);
+	free(header->value);
+	free(header);
+
+	return (0);
+}
+
+static int
+evhttp_header_is_valid_value(const char *value)
+{
+	const char *p = value;
+
+	while ((p = strpbrk(p, "\r\n")) != NULL) {
+		/* we really expect only one new line */
+		p += strspn(p, "\r\n");
+		/* we expect a space or tab for continuation */
+		if (*p != ' ' && *p != '\t')
+			return (0);
+	}
+	return (1);
+}
+
+int
+evhttp_add_header(struct evkeyvalq *headers,
+    const char *key, const char *value)
+{
+	event_debug(("%s: key: %s val: %s\n", __func__, key, value));
+
+	if (strchr(key, '\r') != NULL || strchr(key, '\n') != NULL) {
+		/* drop illegal headers */
+		event_debug(("%s: dropping illegal header key\n", __func__));
+		return (-1);
+	}
+	
+	if (!evhttp_header_is_valid_value(value)) {
+		event_debug(("%s: dropping illegal header value\n", __func__));
+		return (-1);
+	}
+
+	return (evhttp_add_header_internal(headers, key, value));
+}
+
+static int
+evhttp_add_header_internal(struct evkeyvalq *headers,
+    const char *key, const char *value)
+{
+	struct evkeyval *header = calloc(1, sizeof(struct evkeyval));
+	if (header == NULL) {
+		event_warn("%s: calloc", __func__);
+		return (-1);
+	}
+	if ((header->key = strdup(key)) == NULL) {
+		free(header);
+		event_warn("%s: strdup", __func__);
+		return (-1);
+	}
+	if ((header->value = strdup(value)) == NULL) {
+		free(header->key);
+		free(header);
+		event_warn("%s: strdup", __func__);
+		return (-1);
+	}
+
+	TAILQ_INSERT_TAIL(headers, header, next);
+
+	return (0);
+}
+
+/*
+ * Parses header lines from a request or a response into the specified
+ * request object given an event buffer.
+ *
+ * Returns
+ *   DATA_CORRUPTED      on error
+ *   MORE_DATA_EXPECTED  when we need to read more headers
+ *   ALL_DATA_READ       when all headers have been read.
+ */
+
+enum message_read_status
+evhttp_parse_firstline(struct evhttp_request *req, struct evbuffer *buffer)
+{
+	char *line;
+	enum message_read_status status = ALL_DATA_READ;
+
+	line = evbuffer_readline(buffer);
+	if (line == NULL)
+		return (MORE_DATA_EXPECTED);
+
+	switch (req->kind) {
+	case EVHTTP_REQUEST:
+		if (evhttp_parse_request_line(req, line) == -1)
+			status = DATA_CORRUPTED;
+		break;
+	case EVHTTP_RESPONSE:
+		if (evhttp_parse_response_line(req, line) == -1)
+			status = DATA_CORRUPTED;
+		break;
+	default:
+		status = DATA_CORRUPTED;
+	}
+
+	free(line);
+	return (status);
+}
+
+static int
+evhttp_append_to_last_header(struct evkeyvalq *headers, const char *line)
+{
+	struct evkeyval *header = TAILQ_LAST(headers, evkeyvalq);
+	char *newval;
+	size_t old_len, line_len;
+
+	if (header == NULL)
+		return (-1);
+
+	old_len = strlen(header->value);
+	line_len = strlen(line);
+
+	newval = realloc(header->value, old_len + line_len + 1);
+	if (newval == NULL)
+		return (-1);
+
+	memcpy(newval + old_len, line, line_len + 1);
+	header->value = newval;
+
+	return (0);
+}
+
+enum message_read_status
+evhttp_parse_headers(struct evhttp_request *req, struct evbuffer* buffer)
+{
+	char *line;
+	enum message_read_status status = MORE_DATA_EXPECTED;
+
+	struct evkeyvalq* headers = req->input_headers;
+	while ((line = evbuffer_readline(buffer))
+	       != NULL) {
+		char *skey, *svalue;
+
+		if (*line == '\0') { /* Last header - Done */
+			status = ALL_DATA_READ;
+			free(line);
+			break;
+		}
+
+		/* Check if this is a continuation line */
+		if (*line == ' ' || *line == '\t') {
+			if (evhttp_append_to_last_header(headers, line) == -1)
+				goto error;
+			free(line);
+			continue;
+		}
+
+		/* Processing of header lines */
+		svalue = line;
+		skey = strsep(&svalue, ":");
+		if (svalue == NULL)
+			goto error;
+
+		svalue += strspn(svalue, " ");
+
+		if (evhttp_add_header(headers, skey, svalue) == -1)
+			goto error;
+
+		free(line);
+	}
+
+	return (status);
+
+ error:
+	free(line);
+	return (DATA_CORRUPTED);
+}
+
+static int
+evhttp_get_body_length(struct evhttp_request *req)
+{
+	struct evkeyvalq *headers = req->input_headers;
+	const char *content_length;
+	const char *connection;
+
+	content_length = evhttp_find_header(headers, "Content-Length");
+	connection = evhttp_find_header(headers, "Connection");
+		
+	if (content_length == NULL && connection == NULL)
+		req->ntoread = -1;
+	else if (content_length == NULL &&
+	    strcasecmp(connection, "Close") != 0) {
+		/* Bad combination, we don't know when it will end */
+		event_warnx("%s: we got no content length, but the "
+		    "server wants to keep the connection open: %s.",
+		    __func__, connection);
+		return (-1);
+	} else if (content_length == NULL) {
+		req->ntoread = -1;
+	} else {
+		char *endp;
+		ev_int64_t ntoread = evutil_strtoll(content_length, &endp, 10);
+		if (*content_length == '\0' || *endp != '\0' || ntoread < 0) {
+			event_debug(("%s: illegal content length: %s",
+				__func__, content_length));
+			return (-1);
+		}
+		req->ntoread = ntoread;
+	}
+		
+	event_debug(("%s: bytes to read: %lld (in buffer %ld)\n",
+		__func__, req->ntoread,
+		EVBUFFER_LENGTH(req->evcon->input_buffer)));
+
+	return (0);
+}
+
+static void
+evhttp_get_body(struct evhttp_connection *evcon, struct evhttp_request *req)
+{
+	const char *xfer_enc;
+	
+	/* If this is a request without a body, then we are done */
+	if (req->kind == EVHTTP_REQUEST && req->type != EVHTTP_REQ_POST) {
+		evhttp_connection_done(evcon);
+		return;
+	}
+	evcon->state = EVCON_READING_BODY;
+	xfer_enc = evhttp_find_header(req->input_headers, "Transfer-Encoding");
+	if (xfer_enc != NULL && strcasecmp(xfer_enc, "chunked") == 0) {
+		req->chunked = 1;
+		req->ntoread = -1;
+	} else {
+		if (evhttp_get_body_length(req) == -1) {
+			evhttp_connection_fail(evcon,
+			    EVCON_HTTP_INVALID_HEADER);
+			return;
+		}
+	}
+	evhttp_read_body(evcon, req);
+}
+
+static void
+evhttp_read_firstline(struct evhttp_connection *evcon,
+		      struct evhttp_request *req)
+{
+	enum message_read_status res;
+
+	res = evhttp_parse_firstline(req, evcon->input_buffer);
+	if (res == DATA_CORRUPTED) {
+		/* Error while reading, terminate */
+		event_debug(("%s: bad header lines on %d\n",
+			__func__, evcon->fd));
+		evhttp_connection_fail(evcon, EVCON_HTTP_INVALID_HEADER);
+		return;
+	} else if (res == MORE_DATA_EXPECTED) {
+		/* Need more header lines */
+		evhttp_add_event(&evcon->ev, 
+                    evcon->timeout, HTTP_READ_TIMEOUT);
+		return;
+	}
+
+	evcon->state = EVCON_READING_HEADERS;
+	evhttp_read_header(evcon, req);
+}
+
+static void
+evhttp_read_header(struct evhttp_connection *evcon, struct evhttp_request *req)
+{
+	enum message_read_status res;
+	int fd = evcon->fd;
+
+	res = evhttp_parse_headers(req, evcon->input_buffer);
+	if (res == DATA_CORRUPTED) {
+		/* Error while reading, terminate */
+		event_debug(("%s: bad header lines on %d\n", __func__, fd));
+		evhttp_connection_fail(evcon, EVCON_HTTP_INVALID_HEADER);
+		return;
+	} else if (res == MORE_DATA_EXPECTED) {
+		/* Need more header lines */
+		evhttp_add_event(&evcon->ev, 
+		    evcon->timeout, HTTP_READ_TIMEOUT);
+		return;
+	}
+
+	/* Done reading headers, do the real work */
+	switch (req->kind) {
+	case EVHTTP_REQUEST:
+		event_debug(("%s: checking for post data on %d\n",
+				__func__, fd));
+		evhttp_get_body(evcon, req);
+		break;
+
+	case EVHTTP_RESPONSE:
+		if (req->response_code == HTTP_NOCONTENT ||
+		    req->response_code == HTTP_NOTMODIFIED ||
+		    (req->response_code >= 100 && req->response_code < 200)) {
+			event_debug(("%s: skipping body for code %d\n",
+					__func__, req->response_code));
+			evhttp_connection_done(evcon);
+		} else {
+			event_debug(("%s: start of read body for %s on %d\n",
+				__func__, req->remote_host, fd));
+			evhttp_get_body(evcon, req);
+		}
+		break;
+
+	default:
+		event_warnx("%s: bad header on %d", __func__, fd);
+		evhttp_connection_fail(evcon, EVCON_HTTP_INVALID_HEADER);
+		break;
+	}
+}
+
+/*
+ * Creates a TCP connection to the specified port and executes a callback
+ * when finished.  Failure or sucess is indicate by the passed connection
+ * object.
+ *
+ * Although this interface accepts a hostname, it is intended to take
+ * only numeric hostnames so that non-blocking DNS resolution can
+ * happen elsewhere.
+ */
+
+struct evhttp_connection *
+evhttp_connection_new(const char *address, unsigned short port)
+{
+	struct evhttp_connection *evcon = NULL;
+	
+	event_debug(("Attempting connection to %s:%d\n", address, port));
+
+	if ((evcon = calloc(1, sizeof(struct evhttp_connection))) == NULL) {
+		event_warn("%s: calloc failed", __func__);
+		goto error;
+	}
+
+	evcon->fd = -1;
+	evcon->port = port;
+
+	evcon->timeout = -1;
+	evcon->retry_cnt = evcon->retry_max = 0;
+
+	if ((evcon->address = strdup(address)) == NULL) {
+		event_warn("%s: strdup failed", __func__);
+		goto error;
+	}
+
+	if ((evcon->input_buffer = evbuffer_new()) == NULL) {
+		event_warn("%s: evbuffer_new failed", __func__);
+		goto error;
+	}
+
+	if ((evcon->output_buffer = evbuffer_new()) == NULL) {
+		event_warn("%s: evbuffer_new failed", __func__);
+		goto error;
+	}
+	
+	evcon->state = EVCON_DISCONNECTED;
+	TAILQ_INIT(&evcon->requests);
+
+	return (evcon);
+	
+ error:
+	if (evcon != NULL)
+		evhttp_connection_free(evcon);
+	return (NULL);
+}
+
+void evhttp_connection_set_base(struct evhttp_connection *evcon,
+    struct event_base *base)
+{
+	assert(evcon->base == NULL);
+	assert(evcon->state == EVCON_DISCONNECTED);
+	evcon->base = base;
+}
+
+void
+evhttp_connection_set_timeout(struct evhttp_connection *evcon,
+    int timeout_in_secs)
+{
+	evcon->timeout = timeout_in_secs;
+}
+
+void
+evhttp_connection_set_retries(struct evhttp_connection *evcon,
+    int retry_max)
+{
+	evcon->retry_max = retry_max;
+}
+
+void
+evhttp_connection_set_closecb(struct evhttp_connection *evcon,
+    void (*cb)(struct evhttp_connection *, void *), void *cbarg)
+{
+	evcon->closecb = cb;
+	evcon->closecb_arg = cbarg;
+}
+
+void
+evhttp_connection_get_peer(struct evhttp_connection *evcon,
+    char **address, u_short *port)
+{
+	*address = evcon->address;
+	*port = evcon->port;
+}
+
+int
+evhttp_connection_connect(struct evhttp_connection *evcon)
+{
+	if (evcon->state == EVCON_CONNECTING)
+		return (0);
+	
+	evhttp_connection_reset(evcon);
+
+	assert(!(evcon->flags & EVHTTP_CON_INCOMING));
+	evcon->flags |= EVHTTP_CON_OUTGOING;
+	
+	evcon->fd = bind_socket(
+		evcon->bind_address, evcon->bind_port, 0 /*reuse*/);
+	if (evcon->fd == -1) {
+		event_debug(("%s: failed to bind to \"%s\"",
+			__func__, evcon->bind_address));
+		return (-1);
+	}
+
+	if (socket_connect(evcon->fd, evcon->address, evcon->port) == -1) {
+		EVUTIL_CLOSESOCKET(evcon->fd); evcon->fd = -1;
+		return (-1);
+	}
+
+	/* Set up a callback for successful connection setup */
+	event_set(&evcon->ev, evcon->fd, EV_WRITE, evhttp_connectioncb, evcon);
+	EVHTTP_BASE_SET(evcon, &evcon->ev);
+	evhttp_add_event(&evcon->ev, evcon->timeout, HTTP_CONNECT_TIMEOUT);
+
+	evcon->state = EVCON_CONNECTING;
+	
+	return (0);
+}
+
+/*
+ * Starts an HTTP request on the provided evhttp_connection object.
+ * If the connection object is not connected to the web server already,
+ * this will start the connection.
+ */
+
+int
+evhttp_make_request(struct evhttp_connection *evcon,
+    struct evhttp_request *req,
+    enum evhttp_cmd_type type, const char *uri)
+{
+	/* We are making a request */
+	req->kind = EVHTTP_REQUEST;
+	req->type = type;
+	if (req->uri != NULL)
+		free(req->uri);
+	if ((req->uri = strdup(uri)) == NULL)
+		event_err(1, "%s: strdup", __func__);
+
+	/* Set the protocol version if it is not supplied */
+	if (!req->major && !req->minor) {
+		req->major = 1;
+		req->minor = 1;
+	}
+	
+	assert(req->evcon == NULL);
+	req->evcon = evcon;
+	assert(!(req->flags & EVHTTP_REQ_OWN_CONNECTION));
+	
+	TAILQ_INSERT_TAIL(&evcon->requests, req, next);
+
+	/* If the connection object is not connected; make it so */
+	if (!evhttp_connected(evcon))
+		return (evhttp_connection_connect(evcon));
+
+	/*
+	 * If it's connected already and we are the first in the queue,
+	 * then we can dispatch this request immediately.  Otherwise, it
+	 * will be dispatched once the pending requests are completed.
+	 */
+	if (TAILQ_FIRST(&evcon->requests) == req)
+		evhttp_request_dispatch(evcon);
+
+	return (0);
+}
+
+/*
+ * Reads data from file descriptor into request structure
+ * Request structure needs to be set up correctly.
+ */
+
+void
+evhttp_start_read(struct evhttp_connection *evcon)
+{
+	/* Set up an event to read the headers */
+	if (event_initialized(&evcon->ev))
+		event_del(&evcon->ev);
+	event_set(&evcon->ev, evcon->fd, EV_READ, evhttp_read, evcon);
+	EVHTTP_BASE_SET(evcon, &evcon->ev);
+	
+	evhttp_add_event(&evcon->ev, evcon->timeout, HTTP_READ_TIMEOUT);
+	evcon->state = EVCON_READING_FIRSTLINE;
+}
+
+static void
+evhttp_send_done(struct evhttp_connection *evcon, void *arg)
+{
+	int need_close;
+	struct evhttp_request *req = TAILQ_FIRST(&evcon->requests);
+	TAILQ_REMOVE(&evcon->requests, req, next);
+
+	/* delete possible close detection events */
+	evhttp_connection_stop_detectclose(evcon);
+	
+	need_close =
+	    (req->minor == 0 &&
+		!evhttp_is_connection_keepalive(req->input_headers))||
+	    evhttp_is_connection_close(req->flags, req->input_headers) ||
+	    evhttp_is_connection_close(req->flags, req->output_headers);
+
+	assert(req->flags & EVHTTP_REQ_OWN_CONNECTION);
+	evhttp_request_free(req);
+
+	if (need_close) {
+		evhttp_connection_free(evcon);
+		return;
+	} 
+
+	/* we have a persistent connection; try to accept another request. */
+	if (evhttp_associate_new_request_with_connection(evcon) == -1)
+		evhttp_connection_free(evcon);
+}
+
+/*
+ * Returns an error page.
+ */
+
+void
+evhttp_send_error(struct evhttp_request *req, int error, const char *reason)
+{
+#define ERR_FORMAT "<HTML><HEAD>\n" \
+	    "<TITLE>%d %s</TITLE>\n" \
+	    "</HEAD><BODY>\n" \
+	    "<H1>Method Not Implemented</H1>\n" \
+	    "Invalid method in request<P>\n" \
+	    "</BODY></HTML>\n"
+
+	struct evbuffer *buf = evbuffer_new();
+
+	/* close the connection on error */
+	evhttp_add_header(req->output_headers, "Connection", "close");
+
+	evhttp_response_code(req, error, reason);
+
+	evbuffer_add_printf(buf, ERR_FORMAT, error, reason);
+
+	evhttp_send_page(req, buf);
+
+	evbuffer_free(buf);
+#undef ERR_FORMAT
+}
+
+/* Requires that headers and response code are already set up */
+
+static inline void
+evhttp_send(struct evhttp_request *req, struct evbuffer *databuf)
+{
+	struct evhttp_connection *evcon = req->evcon;
+
+	if (evcon == NULL) {
+		evhttp_request_free(req);
+		return;
+	}
+
+	assert(TAILQ_FIRST(&evcon->requests) == req);
+
+	/* we expect no more calls form the user on this request */
+	req->userdone = 1;
+
+	/* xxx: not sure if we really should expose the data buffer this way */
+	if (databuf != NULL)
+		evbuffer_add_buffer(req->output_buffer, databuf);
+	
+	/* Adds headers to the response */
+	evhttp_make_header(evcon, req);
+
+	evhttp_write_buffer(evcon, evhttp_send_done, NULL);
+}
+
+void
+evhttp_send_reply(struct evhttp_request *req, int code, const char *reason,
+    struct evbuffer *databuf)
+{
+	evhttp_response_code(req, code, reason);
+	
+	evhttp_send(req, databuf);
+}
+
+void
+evhttp_send_reply_start(struct evhttp_request *req, int code,
+    const char *reason)
+{
+	evhttp_response_code(req, code, reason);
+	if (req->major == 1 && req->minor == 1) {
+		/* use chunked encoding for HTTP/1.1 */
+		evhttp_add_header(req->output_headers, "Transfer-Encoding",
+		    "chunked");
+		req->chunked = 1;
+	}
+	evhttp_make_header(req->evcon, req);
+	evhttp_write_buffer(req->evcon, NULL, NULL);
+}
+
+void
+evhttp_send_reply_chunk(struct evhttp_request *req, struct evbuffer *databuf)
+{
+	struct evhttp_connection *evcon = req->evcon;
+
+	if (evcon == NULL)
+		return;
+
+	if (req->chunked) {
+		evbuffer_add_printf(evcon->output_buffer, "%x\r\n",
+				    (unsigned)EVBUFFER_LENGTH(databuf));
+	}
+	evbuffer_add_buffer(evcon->output_buffer, databuf);
+	if (req->chunked) {
+		evbuffer_add(evcon->output_buffer, "\r\n", 2);
+	}
+	evhttp_write_buffer(evcon, NULL, NULL);
+}
+
+void
+evhttp_send_reply_end(struct evhttp_request *req)
+{
+	struct evhttp_connection *evcon = req->evcon;
+
+	if (evcon == NULL) {
+		evhttp_request_free(req);
+		return;
+	}
+
+	/* we expect no more calls form the user on this request */
+	req->userdone = 1;
+
+	if (req->chunked) {
+		evbuffer_add(req->evcon->output_buffer, "0\r\n\r\n", 5);
+		evhttp_write_buffer(req->evcon, evhttp_send_done, NULL);
+		req->chunked = 0;
+	} else if (!event_pending(&evcon->ev, EV_WRITE|EV_TIMEOUT, NULL)) {
+		/* let the connection know that we are done with the request */
+		evhttp_send_done(evcon, NULL);
+	} else {
+		/* make the callback execute after all data has been written */
+		evcon->cb = evhttp_send_done;
+		evcon->cb_arg = NULL;
+	}
+}
+
+void
+evhttp_response_code(struct evhttp_request *req, int code, const char *reason)
+{
+	req->kind = EVHTTP_RESPONSE;
+	req->response_code = code;
+	if (req->response_code_line != NULL)
+		free(req->response_code_line);
+	req->response_code_line = strdup(reason);
+}
+
+void
+evhttp_send_page(struct evhttp_request *req, struct evbuffer *databuf)
+{
+	if (!req->major || !req->minor) {
+		req->major = 1;
+		req->minor = 1;
+	}
+	
+	if (req->kind != EVHTTP_RESPONSE)
+		evhttp_response_code(req, 200, "OK");
+
+	evhttp_clear_headers(req->output_headers);
+	evhttp_add_header(req->output_headers, "Content-Type", "text/html");
+	evhttp_add_header(req->output_headers, "Connection", "close");
+
+	evhttp_send(req, databuf);
+}
+
+static const char uri_chars[256] = {
+	0, 0, 0, 0, 0, 0, 0, 0,   0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,   0, 0, 0, 0, 0, 0, 0, 0,
+	0, 1, 0, 0, 1, 0, 0, 1,   1, 1, 1, 1, 1, 1, 1, 1,
+	1, 1, 1, 1, 1, 1, 1, 1,   1, 1, 1, 0, 0, 1, 0, 0,
+	/* 64 */
+	1, 1, 1, 1, 1, 1, 1, 1,   1, 1, 1, 1, 1, 1, 1, 1,
+	1, 1, 1, 1, 1, 1, 1, 1,   1, 1, 1, 0, 0, 0, 0, 1,
+	0, 1, 1, 1, 1, 1, 1, 1,   1, 1, 1, 1, 1, 1, 1, 1,
+	1, 1, 1, 1, 1, 1, 1, 1,   1, 1, 1, 0, 0, 0, 1, 0,
+	/* 128 */
+	0, 0, 0, 0, 0, 0, 0, 0,   0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,   0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,   0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,   0, 0, 0, 0, 0, 0, 0, 0,
+	/* 192 */
+	0, 0, 0, 0, 0, 0, 0, 0,   0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,   0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,   0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,   0, 0, 0, 0, 0, 0, 0, 0,
+};
+
+/*
+ * Helper functions to encode/decode a URI.
+ * The returned string must be freed by the caller.
+ */
+char *
+evhttp_encode_uri(const char *uri)
+{
+	struct evbuffer *buf = evbuffer_new();
+	char *p;
+
+	for (p = (char *)uri; *p != '\0'; p++) {
+		if (uri_chars[(u_char)(*p)]) {
+			evbuffer_add(buf, p, 1);
+		} else {
+			evbuffer_add_printf(buf, "%%%02X", (u_char)(*p));
+		}
+	}
+	evbuffer_add(buf, "", 1);
+	p = strdup((char *)EVBUFFER_DATA(buf));
+	evbuffer_free(buf);
+	
+	return (p);
+}
+
+/*
+ * @param always_decode_plus: when true we transform plus to space even
+ *     if we have not seen a ?.
+ */
+static int
+evhttp_decode_uri_internal(
+	const char *uri, size_t length, char *ret, int always_decode_plus)
+{
+	char c;
+	int i, j, in_query = always_decode_plus;
+	
+	for (i = j = 0; uri[i] != '\0'; i++) {
+		c = uri[i];
+		if (c == '?') {
+			in_query = 1;
+		} else if (c == '+' && in_query) {
+			c = ' ';
+		} else if (c == '%' && isxdigit((unsigned char)uri[i+1]) &&
+		    isxdigit((unsigned char)uri[i+2])) {
+			char tmp[] = { uri[i+1], uri[i+2], '\0' };
+			c = (char)strtol(tmp, NULL, 16);
+			i += 2;
+		}
+		ret[j++] = c;
+	}
+	ret[j] = '\0';
+
+	return (j);
+}
+
+char *
+evhttp_decode_uri(const char *uri)
+{
+	char *ret;
+
+	if ((ret = malloc(strlen(uri) + 1)) == NULL)
+		event_err(1, "%s: malloc(%lu)", __func__,
+			  (unsigned long)(strlen(uri) + 1));
+
+	evhttp_decode_uri_internal(uri, strlen(uri),
+	    ret, 0 /*always_decode_plus*/);
+
+	return (ret);
+}
+
+/* 
+ * Helper function to parse out arguments in a query.
+ * The arguments are separated by key and value.
+ */
+
+void
+evhttp_parse_query(const char *uri, struct evkeyvalq *headers)
+{
+	char *line;
+	char *argument;
+	char *p;
+
+	TAILQ_INIT(headers);
+
+	/* No arguments - we are done */
+	if (strchr(uri, '?') == NULL)
+		return;
+
+	if ((line = strdup(uri)) == NULL)
+		event_err(1, "%s: strdup", __func__);
+
+
+	argument = line;
+
+	/* We already know that there has to be a ? */
+	strsep(&argument, "?");
+
+	p = argument;
+	while (p != NULL && *p != '\0') {
+		char *key, *value, *decoded_value;
+		argument = strsep(&p, "&");
+
+		value = argument;
+		key = strsep(&value, "=");
+		if (value == NULL)
+			goto error;
+
+		if ((decoded_value = malloc(strlen(value) + 1)) == NULL)
+			event_err(1, "%s: malloc", __func__);
+
+		evhttp_decode_uri_internal(value, strlen(value),
+		    decoded_value, 1 /*always_decode_plus*/);
+		event_debug(("Query Param: %s -> %s\n", key, decoded_value));
+		evhttp_add_header_internal(headers, key, decoded_value);
+		free(decoded_value);
+	}
+
+ error:
+	free(line);
+}
+
+static struct evhttp_cb *
+evhttp_dispatch_callback(struct httpcbq *callbacks, struct evhttp_request *req)
+{
+	struct evhttp_cb *cb;
+	size_t offset = 0;
+
+	/* Test for different URLs */
+	char *p = strchr(req->uri, '?');
+	if (p != NULL)
+		offset = (size_t)(p - req->uri);
+
+	TAILQ_FOREACH(cb, callbacks, next) {
+		int res = 0;
+		if (p == NULL) {
+			res = strcmp(cb->what, req->uri) == 0;
+		} else {
+			res = ((strncmp(cb->what, req->uri, offset) == 0) &&
+					(cb->what[offset] == '\0'));
+		}
+
+		if (res)
+			return (cb);
+	}
+
+	return (NULL);
+}
+
+static void
+evhttp_handle_request(struct evhttp_request *req, void *arg)
+{
+	struct evhttp *http = arg;
+	struct evhttp_cb *cb = NULL;
+
+	event_debug(("%s: req->uri=%s", __func__, req->uri));
+	if (req->uri == NULL) {
+		event_debug(("%s: bad request", __func__));
+		if (req->evcon->state == EVCON_DISCONNECTED) {
+			evhttp_connection_fail(req->evcon, EVCON_HTTP_EOF);
+		} else {
+			event_debug(("%s: sending error", __func__));
+			evhttp_send_error(req, HTTP_BADREQUEST, "Bad Request");
+		}
+		return;
+	}
+
+	if ((cb = evhttp_dispatch_callback(&http->callbacks, req)) != NULL) {
+		(*cb->cb)(req, cb->cbarg);
+		return;
+	}
+
+	/* Generic call back */
+	if (http->gencb) {
+		(*http->gencb)(req, http->gencbarg);
+		return;
+	} else {
+		/* We need to send a 404 here */
+#define ERR_FORMAT "<html><head>" \
+		    "<title>404 Not Found</title>" \
+		    "</head><body>" \
+		    "<h1>Not Found</h1>" \
+		    "<p>The requested URL %s was not found on this server.</p>"\
+		    "</body></html>\n"
+
+		char *escaped_html = evhttp_htmlescape(req->uri);
+		struct evbuffer *buf = evbuffer_new();
+
+		evhttp_response_code(req, HTTP_NOTFOUND, "Not Found");
+
+		evbuffer_add_printf(buf, ERR_FORMAT, escaped_html);
+
+		free(escaped_html);
+
+		evhttp_send_page(req, buf);
+
+		evbuffer_free(buf);
+#undef ERR_FORMAT
+	}
+}
+
+static void
+accept_socket(int fd, short what, void *arg)
+{
+	struct evhttp *http = arg;
+	struct sockaddr_storage ss;
+	socklen_t addrlen = sizeof(ss);
+	int nfd;
+
+	if ((nfd = accept(fd, (struct sockaddr *)&ss, &addrlen)) == -1) {
+		if (errno != EAGAIN && errno != EINTR)
+			event_warn("%s: bad accept", __func__);
+		return;
+	}
+	if (evutil_make_socket_nonblocking(nfd) < 0)
+		return;
+
+	evhttp_get_request(http, nfd, (struct sockaddr *)&ss, addrlen);
+}
+
+int
+evhttp_bind_socket(struct evhttp *http, const char *address, u_short port)
+{
+	int fd;
+	int res;
+
+	if ((fd = bind_socket(address, port, 1 /*reuse*/)) == -1)
+		return (-1);
+
+	if (listen(fd, 128) == -1) {
+		event_warn("%s: listen", __func__);
+		EVUTIL_CLOSESOCKET(fd);
+		return (-1);
+	}
+
+	res = evhttp_accept_socket(http, fd);
+	
+	if (res != -1)
+		event_debug(("Bound to port %d - Awaiting connections ... ",
+			port));
+
+	return (res);
+}
+
+int
+evhttp_accept_socket(struct evhttp *http, int fd)
+{
+	struct evhttp_bound_socket *bound;
+	struct event *ev;
+	int res;
+
+	bound = malloc(sizeof(struct evhttp_bound_socket));
+	if (bound == NULL)
+		return (-1);
+
+	ev = &bound->bind_ev;
+
+	/* Schedule the socket for accepting */
+	event_set(ev, fd, EV_READ | EV_PERSIST, accept_socket, http);
+	EVHTTP_BASE_SET(http, ev);
+
+	res = event_add(ev, NULL);
+
+	if (res == -1) {
+		free(bound);
+		return (-1);
+	}
+
+	TAILQ_INSERT_TAIL(&http->sockets, bound, next);
+
+	return (0);
+}
+
+static struct evhttp*
+evhttp_new_object(void)
+{
+	struct evhttp *http = NULL;
+
+	if ((http = calloc(1, sizeof(struct evhttp))) == NULL) {
+		event_warn("%s: calloc", __func__);
+		return (NULL);
+	}
+
+	http->timeout = -1;
+
+	TAILQ_INIT(&http->sockets);
+	TAILQ_INIT(&http->callbacks);
+	TAILQ_INIT(&http->connections);
+
+	return (http);
+}
+
+struct evhttp *
+evhttp_new(struct event_base *base)
+{
+	struct evhttp *http = evhttp_new_object();
+
+	http->base = base;
+
+	return (http);
+}
+
+/*
+ * Start a web server on the specified address and port.
+ */
+
+struct evhttp *
+evhttp_start(const char *address, u_short port)
+{
+	struct evhttp *http = evhttp_new_object();
+
+	if (evhttp_bind_socket(http, address, port) == -1) {
+		free(http);
+		return (NULL);
+	}
+
+	return (http);
+}
+
+void
+evhttp_free(struct evhttp* http)
+{
+	struct evhttp_cb *http_cb;
+	struct evhttp_connection *evcon;
+	struct evhttp_bound_socket *bound;
+	int fd;
+
+	/* Remove the accepting part */
+	while ((bound = TAILQ_FIRST(&http->sockets)) != NULL) {
+		TAILQ_REMOVE(&http->sockets, bound, next);
+
+		fd = bound->bind_ev.ev_fd;
+		event_del(&bound->bind_ev);
+		EVUTIL_CLOSESOCKET(fd);
+
+		free(bound);
+	}
+
+	while ((evcon = TAILQ_FIRST(&http->connections)) != NULL) {
+		/* evhttp_connection_free removes the connection */
+		evhttp_connection_free(evcon);
+	}
+
+	while ((http_cb = TAILQ_FIRST(&http->callbacks)) != NULL) {
+		TAILQ_REMOVE(&http->callbacks, http_cb, next);
+		free(http_cb->what);
+		free(http_cb);
+	}
+	
+	free(http);
+}
+
+void
+evhttp_set_timeout(struct evhttp* http, int timeout_in_secs)
+{
+	http->timeout = timeout_in_secs;
+}
+
+void
+evhttp_set_cb(struct evhttp *http, const char *uri,
+    void (*cb)(struct evhttp_request *, void *), void *cbarg)
+{
+	struct evhttp_cb *http_cb;
+
+	if ((http_cb = calloc(1, sizeof(struct evhttp_cb))) == NULL)
+		event_err(1, "%s: calloc", __func__);
+
+	http_cb->what = strdup(uri);
+	http_cb->cb = cb;
+	http_cb->cbarg = cbarg;
+
+	TAILQ_INSERT_TAIL(&http->callbacks, http_cb, next);
+}
+
+int
+evhttp_del_cb(struct evhttp *http, const char *uri)
+{
+	struct evhttp_cb *http_cb;
+
+	TAILQ_FOREACH(http_cb, &http->callbacks, next) {
+		if (strcmp(http_cb->what, uri) == 0)
+			break;
+	}
+	if (http_cb == NULL)
+		return (-1);
+
+	TAILQ_REMOVE(&http->callbacks, http_cb, next);
+	free(http_cb->what);
+	free(http_cb);
+
+	return (0);
+}
+
+void
+evhttp_set_gencb(struct evhttp *http,
+    void (*cb)(struct evhttp_request *, void *), void *cbarg)
+{
+	http->gencb = cb;
+	http->gencbarg = cbarg;
+}
+
+/*
+ * Request related functions
+ */
+
+struct evhttp_request *
+evhttp_request_new(void (*cb)(struct evhttp_request *, void *), void *arg)
+{
+	struct evhttp_request *req = NULL;
+
+	/* Allocate request structure */
+	if ((req = calloc(1, sizeof(struct evhttp_request))) == NULL) {
+		event_warn("%s: calloc", __func__);
+		goto error;
+	}
+
+	req->kind = EVHTTP_RESPONSE;
+	req->input_headers = calloc(1, sizeof(struct evkeyvalq));
+	if (req->input_headers == NULL) {
+		event_warn("%s: calloc", __func__);
+		goto error;
+	}
+	TAILQ_INIT(req->input_headers);
+
+	req->output_headers = calloc(1, sizeof(struct evkeyvalq));
+	if (req->output_headers == NULL) {
+		event_warn("%s: calloc", __func__);
+		goto error;
+	}
+	TAILQ_INIT(req->output_headers);
+
+	if ((req->input_buffer = evbuffer_new()) == NULL) {
+		event_warn("%s: evbuffer_new", __func__);
+		goto error;
+	}
+
+	if ((req->output_buffer = evbuffer_new()) == NULL) {
+		event_warn("%s: evbuffer_new", __func__);
+		goto error;
+	}
+
+	req->cb = cb;
+	req->cb_arg = arg;
+
+	return (req);
+
+ error:
+	if (req != NULL)
+		evhttp_request_free(req);
+	return (NULL);
+}
+
+void
+evhttp_request_free(struct evhttp_request *req)
+{
+	if (req->remote_host != NULL)
+		free(req->remote_host);
+	if (req->uri != NULL)
+		free(req->uri);
+	if (req->response_code_line != NULL)
+		free(req->response_code_line);
+
+	evhttp_clear_headers(req->input_headers);
+	free(req->input_headers);
+
+	evhttp_clear_headers(req->output_headers);
+	free(req->output_headers);
+
+	if (req->input_buffer != NULL)
+		evbuffer_free(req->input_buffer);
+
+	if (req->output_buffer != NULL)
+		evbuffer_free(req->output_buffer);
+
+	free(req);
+}
+
+struct evhttp_connection *
+evhttp_request_get_connection(struct evhttp_request *req)
+{
+	return req->evcon;
+}
+
+
+void
+evhttp_request_set_chunked_cb(struct evhttp_request *req,
+    void (*cb)(struct evhttp_request *, void *))
+{
+	req->chunk_cb = cb;
+}
+
+/*
+ * Allows for inspection of the request URI
+ */
+
+const char *
+evhttp_request_uri(struct evhttp_request *req) {
+	if (req->uri == NULL)
+		event_debug(("%s: request %p has no uri\n", __func__, req));
+	return (req->uri);
+}
+
+/*
+ * Takes a file descriptor to read a request from.
+ * The callback is executed once the whole request has been read.
+ */
+
+static struct evhttp_connection*
+evhttp_get_request_connection(
+	struct evhttp* http,
+	int fd, struct sockaddr *sa, socklen_t salen)
+{
+	struct evhttp_connection *evcon;
+	char *hostname = NULL, *portname = NULL;
+
+	name_from_addr(sa, salen, &hostname, &portname);
+	if (hostname == NULL || portname == NULL) {
+		if (hostname) free(hostname);
+		if (portname) free(portname);
+		return (NULL);
+	}
+
+	event_debug(("%s: new request from %s:%s on %d\n",
+			__func__, hostname, portname, fd));
+
+	/* we need a connection object to put the http request on */
+	evcon = evhttp_connection_new(hostname, atoi(portname));
+	free(hostname);
+	free(portname);
+	if (evcon == NULL)
+		return (NULL);
+
+	/* associate the base if we have one*/
+	evhttp_connection_set_base(evcon, http->base);
+
+	evcon->flags |= EVHTTP_CON_INCOMING;
+	evcon->state = EVCON_READING_FIRSTLINE;
+	
+	evcon->fd = fd;
+
+	return (evcon);
+}
+
+static int
+evhttp_associate_new_request_with_connection(struct evhttp_connection *evcon)
+{
+	struct evhttp *http = evcon->http_server;
+	struct evhttp_request *req;
+	if ((req = evhttp_request_new(evhttp_handle_request, http)) == NULL)
+		return (-1);
+
+	req->evcon = evcon;	/* the request ends up owning the connection */
+	req->flags |= EVHTTP_REQ_OWN_CONNECTION;
+	
+	TAILQ_INSERT_TAIL(&evcon->requests, req, next);
+	
+	req->kind = EVHTTP_REQUEST;
+	
+	if ((req->remote_host = strdup(evcon->address)) == NULL)
+		event_err(1, "%s: strdup", __func__);
+	req->remote_port = evcon->port;
+
+	evhttp_start_read(evcon);
+	
+	return (0);
+}
+
+void
+evhttp_get_request(struct evhttp *http, int fd,
+    struct sockaddr *sa, socklen_t salen)
+{
+	struct evhttp_connection *evcon;
+
+	evcon = evhttp_get_request_connection(http, fd, sa, salen);
+	if (evcon == NULL)
+		return;
+
+	/* the timeout can be used by the server to close idle connections */
+	if (http->timeout != -1)
+		evhttp_connection_set_timeout(evcon, http->timeout);
+
+	/* 
+	 * if we want to accept more than one request on a connection,
+	 * we need to know which http server it belongs to.
+	 */
+	evcon->http_server = http;
+	TAILQ_INSERT_TAIL(&http->connections, evcon, next);
+	
+	if (evhttp_associate_new_request_with_connection(evcon) == -1)
+		evhttp_connection_free(evcon);
+}
+
+
+/*
+ * Network helper functions that we do not want to export to the rest of
+ * the world.
+ */
+#if 0 /* Unused */
+static struct addrinfo *
+addr_from_name(char *address)
+{
+#ifdef HAVE_GETADDRINFO
+        struct addrinfo ai, *aitop;
+        int ai_result;
+
+        memset(&ai, 0, sizeof(ai));
+        ai.ai_family = AF_INET;
+        ai.ai_socktype = SOCK_RAW;
+        ai.ai_flags = 0;
+        if ((ai_result = getaddrinfo(address, NULL, &ai, &aitop)) != 0) {
+                if ( ai_result == EAI_SYSTEM )
+                        event_warn("getaddrinfo");
+                else
+                        event_warnx("getaddrinfo: %s", gai_strerror(ai_result));
+        }
+
+	return (aitop);
+#else
+	assert(0);
+	return NULL; /* XXXXX Use gethostbyname, if this function is ever used. */
+#endif
+}
+#endif
+
+static void
+name_from_addr(struct sockaddr *sa, socklen_t salen,
+    char **phost, char **pport)
+{
+	char ntop[NI_MAXHOST];
+	char strport[NI_MAXSERV];
+	int ni_result;
+
+#ifdef HAVE_GETNAMEINFO
+	ni_result = getnameinfo(sa, salen,
+		ntop, sizeof(ntop), strport, sizeof(strport),
+		NI_NUMERICHOST|NI_NUMERICSERV);
+	
+	if (ni_result != 0) {
+		if (ni_result == EAI_SYSTEM)
+			event_err(1, "getnameinfo failed");
+		else
+			event_errx(1, "getnameinfo failed: %s", gai_strerror(ni_result));
+		return;
+	}
+#else
+	ni_result = fake_getnameinfo(sa, salen,
+		ntop, sizeof(ntop), strport, sizeof(strport),
+		NI_NUMERICHOST|NI_NUMERICSERV);
+	if (ni_result != 0)
+			return;
+#endif
+	*phost = strdup(ntop);
+	*pport = strdup(strport);
+}
+
+/* Create a non-blocking socket and bind it */
+/* todo: rename this function */
+static int
+bind_socket_ai(struct addrinfo *ai, int reuse)
+{
+        int fd, on = 1, r;
+	int serrno;
+
+        /* Create listen socket */
+        fd = socket(AF_INET, SOCK_STREAM, 0);
+        if (fd == -1) {
+                event_warn("socket");
+                return (-1);
+        }
+
+        if (evutil_make_socket_nonblocking(fd) < 0)
+                goto out;
+
+#ifndef WIN32
+        if (fcntl(fd, F_SETFD, 1) == -1) {
+                event_warn("fcntl(F_SETFD)");
+                goto out;
+        }
+#endif
+
+        setsockopt(fd, SOL_SOCKET, SO_KEEPALIVE, (void *)&on, sizeof(on));
+	if (reuse) {
+		setsockopt(fd, SOL_SOCKET, SO_REUSEADDR,
+		    (void *)&on, sizeof(on));
+	}
+
+	if (ai != NULL) {
+		r = bind(fd, ai->ai_addr, ai->ai_addrlen);
+		if (r == -1)
+			goto out;
+	}
+
+	return (fd);
+
+ out:
+	serrno = EVUTIL_SOCKET_ERROR();
+	EVUTIL_CLOSESOCKET(fd);
+	EVUTIL_SET_SOCKET_ERROR(serrno);
+	return (-1);
+}
+
+static struct addrinfo *
+make_addrinfo(const char *address, u_short port)
+{
+        struct addrinfo *aitop = NULL;
+
+#ifdef HAVE_GETADDRINFO
+        struct addrinfo ai;
+        char strport[NI_MAXSERV];
+        int ai_result;
+
+        memset(&ai, 0, sizeof(ai));
+        ai.ai_family = AF_INET;
+        ai.ai_socktype = SOCK_STREAM;
+        ai.ai_flags = AI_PASSIVE;  /* turn NULL host name into INADDR_ANY */
+        evutil_snprintf(strport, sizeof(strport), "%d", port);
+        if ((ai_result = getaddrinfo(address, strport, &ai, &aitop)) != 0) {
+                if ( ai_result == EAI_SYSTEM )
+                        event_warn("getaddrinfo");
+                else
+                        event_warnx("getaddrinfo: %s", gai_strerror(ai_result));
+		return (NULL);
+        }
+#else
+	static int cur;
+	static struct addrinfo ai[2]; /* We will be returning the address of some of this memory so it has to last even after this call. */
+	if (++cur == 2) cur = 0;   /* allow calling this function twice */
+
+	if (fake_getaddrinfo(address, &ai[cur]) < 0) {
+		event_warn("fake_getaddrinfo");
+		return (NULL);
+	}
+	aitop = &ai[cur];
+	((struct sockaddr_in *) aitop->ai_addr)->sin_port = htons(port);
+#endif
+
+	return (aitop);
+}
+
+static int
+bind_socket(const char *address, u_short port, int reuse)
+{
+	int fd;
+	struct addrinfo *aitop = NULL;
+
+	/* just create an unbound socket */
+	if (address == NULL && port == 0)
+		return bind_socket_ai(NULL, 0);
+		
+	aitop = make_addrinfo(address, port);
+
+	if (aitop == NULL)
+		return (-1);
+
+	fd = bind_socket_ai(aitop, reuse);
+
+#ifdef HAVE_GETADDRINFO
+	freeaddrinfo(aitop);
+#else
+	fake_freeaddrinfo(aitop);
+#endif
+
+	return (fd);
+}
+
+static int
+socket_connect(int fd, const char *address, unsigned short port)
+{
+	struct addrinfo *ai = make_addrinfo(address, port);
+	int res = -1;
+
+	if (ai == NULL) {
+		event_debug(("%s: make_addrinfo: \"%s:%d\"",
+			__func__, address, port));
+		return (-1);
+	}
+
+	if (connect(fd, ai->ai_addr, ai->ai_addrlen) == -1) {
+#ifdef WIN32
+		int tmp_error = WSAGetLastError();
+		if (tmp_error != WSAEWOULDBLOCK && tmp_error != WSAEINVAL &&
+		    tmp_error != WSAEINPROGRESS) {
+			goto out;
+		}
+#else
+		if (errno != EINPROGRESS) {
+			goto out;
+		}
+#endif
+	}
+
+	/* everything is fine */
+	res = 0;
+
+out:
+#ifdef HAVE_GETADDRINFO
+	freeaddrinfo(ai);
+#else
+	fake_freeaddrinfo(ai);
+#endif
+
+	return (res);
+}
diff --git a/base/third_party/libevent/kqueue.c b/base/third_party/libevent/kqueue.c
new file mode 100644
index 0000000..3c2ffd5
--- /dev/null
+++ b/base/third_party/libevent/kqueue.c
@@ -0,0 +1,433 @@
+/*	$OpenBSD: kqueue.c,v 1.5 2002/07/10 14:41:31 art Exp $	*/
+
+/*
+ * Copyright 2000-2002 Niels Provos <provos@citi.umich.edu>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ *    derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#define _GNU_SOURCE 1
+
+#include <sys/types.h>
+#ifdef HAVE_SYS_TIME_H
+#include <sys/time.h>
+#else
+#include <sys/_libevent_time.h>
+#endif
+#include <sys/queue.h>
+#include <sys/event.h>
+#include <signal.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#include <errno.h>
+#include <assert.h>
+#ifdef HAVE_INTTYPES_H
+#include <inttypes.h>
+#endif
+
+/* Some platforms apparently define the udata field of struct kevent as
+ * intptr_t, whereas others define it as void*.  There doesn't seem to be an
+ * easy way to tell them apart via autoconf, so we need to use OS macros. */
+#if defined(HAVE_INTTYPES_H) && !defined(__OpenBSD__) && !defined(__FreeBSD__) && !defined(__darwin__) && !defined(__APPLE__)
+#define PTR_TO_UDATA(x)	((intptr_t)(x))
+#else
+#define PTR_TO_UDATA(x)	(x)
+#endif
+
+#include "event.h"
+#include "event-internal.h"
+#include "log.h"
+#include "evsignal.h"
+
+#define EVLIST_X_KQINKERNEL	0x1000
+
+#define NEVENT		64
+
+struct kqop {
+	struct kevent *changes;
+	int nchanges;
+	struct kevent *events;
+	struct event_list evsigevents[NSIG];
+	int nevents;
+	int kq;
+	pid_t pid;
+};
+
+static void *kq_init	(struct event_base *);
+static int kq_add	(void *, struct event *);
+static int kq_del	(void *, struct event *);
+static int kq_dispatch	(struct event_base *, void *, struct timeval *);
+static int kq_insert	(struct kqop *, struct kevent *);
+static void kq_dealloc (struct event_base *, void *);
+
+const struct eventop kqops = {
+	"kqueue",
+	kq_init,
+	kq_add,
+	kq_del,
+	kq_dispatch,
+	kq_dealloc,
+	1 /* need reinit */
+};
+
+static void *
+kq_init(struct event_base *base)
+{
+	int i, kq;
+	struct kqop *kqueueop;
+
+	/* Disable kqueue when this environment variable is set */
+	if (evutil_getenv("EVENT_NOKQUEUE"))
+		return (NULL);
+
+	if (!(kqueueop = calloc(1, sizeof(struct kqop))))
+		return (NULL);
+
+	/* Initalize the kernel queue */
+	
+	if ((kq = kqueue()) == -1) {
+		event_warn("kqueue");
+		free (kqueueop);
+		return (NULL);
+	}
+
+	kqueueop->kq = kq;
+
+	kqueueop->pid = getpid();
+
+	/* Initalize fields */
+	kqueueop->changes = malloc(NEVENT * sizeof(struct kevent));
+	if (kqueueop->changes == NULL) {
+		free (kqueueop);
+		return (NULL);
+	}
+	kqueueop->events = malloc(NEVENT * sizeof(struct kevent));
+	if (kqueueop->events == NULL) {
+		free (kqueueop->changes);
+		free (kqueueop);
+		return (NULL);
+	}
+	kqueueop->nevents = NEVENT;
+
+	/* we need to keep track of multiple events per signal */
+	for (i = 0; i < NSIG; ++i) {
+		TAILQ_INIT(&kqueueop->evsigevents[i]);
+	}
+
+	return (kqueueop);
+}
+
+static int
+kq_insert(struct kqop *kqop, struct kevent *kev)
+{
+	int nevents = kqop->nevents;
+
+	if (kqop->nchanges == nevents) {
+		struct kevent *newchange;
+		struct kevent *newresult;
+
+		nevents *= 2;
+
+		newchange = realloc(kqop->changes,
+				    nevents * sizeof(struct kevent));
+		if (newchange == NULL) {
+			event_warn("%s: malloc", __func__);
+			return (-1);
+		}
+		kqop->changes = newchange;
+
+		newresult = realloc(kqop->events,
+				    nevents * sizeof(struct kevent));
+
+		/*
+		 * If we fail, we don't have to worry about freeing,
+		 * the next realloc will pick it up.
+		 */
+		if (newresult == NULL) {
+			event_warn("%s: malloc", __func__);
+			return (-1);
+		}
+		kqop->events = newresult;
+
+		kqop->nevents = nevents;
+	}
+
+	memcpy(&kqop->changes[kqop->nchanges++], kev, sizeof(struct kevent));
+
+	event_debug(("%s: fd %d %s%s",
+		__func__, (int)kev->ident, 
+		kev->filter == EVFILT_READ ? "EVFILT_READ" : "EVFILT_WRITE",
+		kev->flags == EV_DELETE ? " (del)" : ""));
+
+	return (0);
+}
+
+static void
+kq_sighandler(int sig)
+{
+	/* Do nothing here */
+}
+
+static int
+kq_dispatch(struct event_base *base, void *arg, struct timeval *tv)
+{
+	struct kqop *kqop = arg;
+	struct kevent *changes = kqop->changes;
+	struct kevent *events = kqop->events;
+	struct event *ev;
+	struct timespec ts, *ts_p = NULL;
+	int i, res;
+
+	if (tv != NULL) {
+		TIMEVAL_TO_TIMESPEC(tv, &ts);
+		ts_p = &ts;
+	}
+
+	res = kevent(kqop->kq, changes, kqop->nchanges,
+	    events, kqop->nevents, ts_p);
+	kqop->nchanges = 0;
+	if (res == -1) {
+		if (errno != EINTR) {
+                        event_warn("kevent");
+			return (-1);
+		}
+
+		return (0);
+	}
+
+	event_debug(("%s: kevent reports %d", __func__, res));
+
+	for (i = 0; i < res; i++) {
+		int which = 0;
+
+		if (events[i].flags & EV_ERROR) {
+			/* 
+			 * Error messages that can happen, when a delete fails.
+			 *   EBADF happens when the file discriptor has been
+			 *   closed,
+			 *   ENOENT when the file discriptor was closed and
+			 *   then reopened.
+			 *   EINVAL for some reasons not understood; EINVAL
+			 *   should not be returned ever; but FreeBSD does :-\
+			 * An error is also indicated when a callback deletes
+			 * an event we are still processing.  In that case
+			 * the data field is set to ENOENT.
+			 */
+			if (events[i].data == EBADF ||
+			    events[i].data == EINVAL ||
+			    events[i].data == ENOENT)
+				continue;
+			errno = events[i].data;
+			return (-1);
+		}
+
+		if (events[i].filter == EVFILT_READ) {
+			which |= EV_READ;
+		} else if (events[i].filter == EVFILT_WRITE) {
+			which |= EV_WRITE;
+		} else if (events[i].filter == EVFILT_SIGNAL) {
+			which |= EV_SIGNAL;
+		}
+
+		if (!which)
+			continue;
+
+		if (events[i].filter == EVFILT_SIGNAL) {
+			struct event_list *head =
+			    (struct event_list *)events[i].udata;
+			TAILQ_FOREACH(ev, head, ev_signal_next) {
+				event_active(ev, which, events[i].data);
+			}
+		} else {
+			ev = (struct event *)events[i].udata;
+
+			if (!(ev->ev_events & EV_PERSIST))
+				ev->ev_flags &= ~EVLIST_X_KQINKERNEL;
+
+			event_active(ev, which, 1);
+		}
+	}
+
+	return (0);
+}
+
+
+static int
+kq_add(void *arg, struct event *ev)
+{
+	struct kqop *kqop = arg;
+	struct kevent kev;
+
+	if (ev->ev_events & EV_SIGNAL) {
+		int nsignal = EVENT_SIGNAL(ev);
+
+		assert(nsignal >= 0 && nsignal < NSIG);
+		if (TAILQ_EMPTY(&kqop->evsigevents[nsignal])) {
+			struct timespec timeout = { 0, 0 };
+			
+			memset(&kev, 0, sizeof(kev));
+			kev.ident = nsignal;
+			kev.filter = EVFILT_SIGNAL;
+			kev.flags = EV_ADD;
+			kev.udata = PTR_TO_UDATA(&kqop->evsigevents[nsignal]);
+			
+			/* Be ready for the signal if it is sent any
+			 * time between now and the next call to
+			 * kq_dispatch. */
+			if (kevent(kqop->kq, &kev, 1, NULL, 0, &timeout) == -1)
+				return (-1);
+			
+			if (_evsignal_set_handler(ev->ev_base, nsignal,
+				kq_sighandler) == -1)
+				return (-1);
+		}
+
+		TAILQ_INSERT_TAIL(&kqop->evsigevents[nsignal], ev,
+		    ev_signal_next);
+		ev->ev_flags |= EVLIST_X_KQINKERNEL;
+		return (0);
+	}
+
+	if (ev->ev_events & EV_READ) {
+ 		memset(&kev, 0, sizeof(kev));
+		kev.ident = ev->ev_fd;
+		kev.filter = EVFILT_READ;
+#ifdef NOTE_EOF
+		/* Make it behave like select() and poll() */
+		kev.fflags = NOTE_EOF;
+#endif
+		kev.flags = EV_ADD;
+		if (!(ev->ev_events & EV_PERSIST))
+			kev.flags |= EV_ONESHOT;
+		kev.udata = PTR_TO_UDATA(ev);
+		
+		if (kq_insert(kqop, &kev) == -1)
+			return (-1);
+
+		ev->ev_flags |= EVLIST_X_KQINKERNEL;
+	}
+
+	if (ev->ev_events & EV_WRITE) {
+ 		memset(&kev, 0, sizeof(kev));
+		kev.ident = ev->ev_fd;
+		kev.filter = EVFILT_WRITE;
+		kev.flags = EV_ADD;
+		if (!(ev->ev_events & EV_PERSIST))
+			kev.flags |= EV_ONESHOT;
+		kev.udata = PTR_TO_UDATA(ev);
+		
+		if (kq_insert(kqop, &kev) == -1)
+			return (-1);
+
+		ev->ev_flags |= EVLIST_X_KQINKERNEL;
+	}
+
+	return (0);
+}
+
+static int
+kq_del(void *arg, struct event *ev)
+{
+	struct kqop *kqop = arg;
+	struct kevent kev;
+
+	if (!(ev->ev_flags & EVLIST_X_KQINKERNEL))
+		return (0);
+
+	if (ev->ev_events & EV_SIGNAL) {
+		int nsignal = EVENT_SIGNAL(ev);
+		struct timespec timeout = { 0, 0 };
+
+		assert(nsignal >= 0 && nsignal < NSIG);
+		TAILQ_REMOVE(&kqop->evsigevents[nsignal], ev, ev_signal_next);
+		if (TAILQ_EMPTY(&kqop->evsigevents[nsignal])) {
+			memset(&kev, 0, sizeof(kev));
+			kev.ident = nsignal;
+			kev.filter = EVFILT_SIGNAL;
+			kev.flags = EV_DELETE;
+		
+			/* Because we insert signal events
+			 * immediately, we need to delete them
+			 * immediately, too */
+			if (kevent(kqop->kq, &kev, 1, NULL, 0, &timeout) == -1)
+				return (-1);
+
+			if (_evsignal_restore_handler(ev->ev_base,
+				nsignal) == -1)
+				return (-1);
+		}
+
+		ev->ev_flags &= ~EVLIST_X_KQINKERNEL;
+		return (0);
+	}
+
+	if (ev->ev_events & EV_READ) {
+ 		memset(&kev, 0, sizeof(kev));
+		kev.ident = ev->ev_fd;
+		kev.filter = EVFILT_READ;
+		kev.flags = EV_DELETE;
+		
+		if (kq_insert(kqop, &kev) == -1)
+			return (-1);
+
+		ev->ev_flags &= ~EVLIST_X_KQINKERNEL;
+	}
+
+	if (ev->ev_events & EV_WRITE) {
+ 		memset(&kev, 0, sizeof(kev));
+		kev.ident = ev->ev_fd;
+		kev.filter = EVFILT_WRITE;
+		kev.flags = EV_DELETE;
+		
+		if (kq_insert(kqop, &kev) == -1)
+			return (-1);
+
+		ev->ev_flags &= ~EVLIST_X_KQINKERNEL;
+	}
+
+	return (0);
+}
+
+static void
+kq_dealloc(struct event_base *base, void *arg)
+{
+	struct kqop *kqop = arg;
+
+	evsignal_dealloc(base);
+
+	if (kqop->changes)
+		free(kqop->changes);
+	if (kqop->events)
+		free(kqop->events);
+	if (kqop->kq >= 0 && kqop->pid == getpid())
+		close(kqop->kq);
+
+	memset(kqop, 0, sizeof(struct kqop));
+	free(kqop);
+}
diff --git a/base/third_party/libevent/linux/config.h b/base/third_party/libevent/linux/config.h
new file mode 100644
index 0000000..c01ceb5
--- /dev/null
+++ b/base/third_party/libevent/linux/config.h
@@ -0,0 +1,266 @@
+/* config.h.  Generated from config.h.in by configure.  */
+/* config.h.in.  Generated from configure.in by autoheader.  */
+
+/* Define if clock_gettime is available in libc */
+#define DNS_USE_CPU_CLOCK_FOR_ID 1
+
+/* Define is no secure id variant is available */
+/* #undef DNS_USE_GETTIMEOFDAY_FOR_ID */
+
+/* Define to 1 if you have the `clock_gettime' function. */
+#define HAVE_CLOCK_GETTIME 1
+
+/* Define if /dev/poll is available */
+/* #undef HAVE_DEVPOLL */
+
+/* Define to 1 if you have the <dlfcn.h> header file. */
+#define HAVE_DLFCN_H 1
+
+/* Define if your system supports the epoll system calls */
+#define HAVE_EPOLL 1
+
+/* Define to 1 if you have the `epoll_ctl' function. */
+#define HAVE_EPOLL_CTL 1
+
+/* Define if your system supports event ports */
+/* #undef HAVE_EVENT_PORTS */
+
+/* Define to 1 if you have the `fcntl' function. */
+#define HAVE_FCNTL 1
+
+/* Define to 1 if you have the <fcntl.h> header file. */
+#define HAVE_FCNTL_H 1
+
+/* Define to 1 if the system has the type `fd_mask'. */
+#define HAVE_FD_MASK 1
+
+/* Define to 1 if you have the `getaddrinfo' function. */
+#define HAVE_GETADDRINFO 1
+
+/* Define to 1 if you have the `getegid' function. */
+#define HAVE_GETEGID 1
+
+/* Define to 1 if you have the `geteuid' function. */
+#define HAVE_GETEUID 1
+
+/* Define to 1 if you have the `getnameinfo' function. */
+#define HAVE_GETNAMEINFO 1
+
+/* Define to 1 if you have the `gettimeofday' function. */
+#define HAVE_GETTIMEOFDAY 1
+
+/* Define to 1 if you have the `inet_ntop' function. */
+#define HAVE_INET_NTOP 1
+
+/* Define to 1 if you have the <inttypes.h> header file. */
+#define HAVE_INTTYPES_H 1
+
+/* Define to 1 if you have the `issetugid' function. */
+/* #undef HAVE_ISSETUGID */
+
+/* Define to 1 if you have the `kqueue' function. */
+/* #undef HAVE_KQUEUE */
+
+/* Define to 1 if you have the `nsl' library (-lnsl). */
+#define HAVE_LIBNSL 1
+
+/* Define to 1 if you have the `resolv' library (-lresolv). */
+#define HAVE_LIBRESOLV 1
+
+/* Define to 1 if you have the `rt' library (-lrt). */
+#define HAVE_LIBRT 1
+
+/* Define to 1 if you have the `socket' library (-lsocket). */
+/* #undef HAVE_LIBSOCKET */
+
+/* Define to 1 if you have the <memory.h> header file. */
+#define HAVE_MEMORY_H 1
+
+/* Define to 1 if you have the <netinet/in6.h> header file. */
+/* #undef HAVE_NETINET_IN6_H */
+
+/* Define to 1 if you have the `poll' function. */
+#define HAVE_POLL 1
+
+/* Define to 1 if you have the <poll.h> header file. */
+#define HAVE_POLL_H 1
+
+/* Define to 1 if you have the `port_create' function. */
+/* #undef HAVE_PORT_CREATE */
+
+/* Define to 1 if you have the <port.h> header file. */
+/* #undef HAVE_PORT_H */
+
+/* Define to 1 if you have the `select' function. */
+#define HAVE_SELECT 1
+
+/* Define if F_SETFD is defined in <fcntl.h> */
+#define HAVE_SETFD 1
+
+/* Define to 1 if you have the `sigaction' function. */
+#define HAVE_SIGACTION 1
+
+/* Define to 1 if you have the `signal' function. */
+#define HAVE_SIGNAL 1
+
+/* Define to 1 if you have the <signal.h> header file. */
+#define HAVE_SIGNAL_H 1
+
+/* Define to 1 if you have the <stdarg.h> header file. */
+#define HAVE_STDARG_H 1
+
+/* Define to 1 if you have the <stdint.h> header file. */
+#define HAVE_STDINT_H 1
+
+/* Define to 1 if you have the <stdlib.h> header file. */
+#define HAVE_STDLIB_H 1
+
+/* Define to 1 if you have the <strings.h> header file. */
+#define HAVE_STRINGS_H 1
+
+/* Define to 1 if you have the <string.h> header file. */
+#define HAVE_STRING_H 1
+
+/* Define to 1 if you have the `strlcpy' function. */
+/* #undef HAVE_STRLCPY */
+
+/* Define to 1 if you have the `strsep' function. */
+#define HAVE_STRSEP 1
+
+/* Define to 1 if you have the `strtok_r' function. */
+#define HAVE_STRTOK_R 1
+
+/* Define to 1 if you have the `strtoll' function. */
+#define HAVE_STRTOLL 1
+
+/* Define to 1 if the system has the type `struct in6_addr'. */
+#define HAVE_STRUCT_IN6_ADDR 1
+
+/* Define to 1 if you have the <sys/devpoll.h> header file. */
+/* #undef HAVE_SYS_DEVPOLL_H */
+
+/* Define to 1 if you have the <sys/epoll.h> header file. */
+#define HAVE_SYS_EPOLL_H 1
+
+/* Define to 1 if you have the <sys/event.h> header file. */
+/* #undef HAVE_SYS_EVENT_H */
+
+/* Define to 1 if you have the <sys/ioctl.h> header file. */
+#define HAVE_SYS_IOCTL_H 1
+
+/* Define to 1 if you have the <sys/param.h> header file. */
+#define HAVE_SYS_PARAM_H 1
+
+/* Define to 1 if you have the <sys/queue.h> header file. */
+#define HAVE_SYS_QUEUE_H 1
+
+/* Define to 1 if you have the <sys/select.h> header file. */
+#define HAVE_SYS_SELECT_H 1
+
+/* Define to 1 if you have the <sys/socket.h> header file. */
+#define HAVE_SYS_SOCKET_H 1
+
+/* Define to 1 if you have the <sys/stat.h> header file. */
+#define HAVE_SYS_STAT_H 1
+
+/* Define to 1 if you have the <sys/time.h> header file. */
+#define HAVE_SYS_TIME_H 1
+
+/* Define to 1 if you have the <sys/types.h> header file. */
+#define HAVE_SYS_TYPES_H 1
+
+/* Define if TAILQ_FOREACH is defined in <sys/queue.h> */
+#define HAVE_TAILQFOREACH 1
+
+/* Define if timeradd is defined in <sys/time.h> */
+#define HAVE_TIMERADD 1
+
+/* Define if timerclear is defined in <sys/time.h> */
+#define HAVE_TIMERCLEAR 1
+
+/* Define if timercmp is defined in <sys/time.h> */
+#define HAVE_TIMERCMP 1
+
+/* Define if timerisset is defined in <sys/time.h> */
+#define HAVE_TIMERISSET 1
+
+/* Define to 1 if the system has the type `uint16_t'. */
+#define HAVE_UINT16_T 1
+
+/* Define to 1 if the system has the type `uint32_t'. */
+#define HAVE_UINT32_T 1
+
+/* Define to 1 if the system has the type `uint64_t'. */
+#define HAVE_UINT64_T 1
+
+/* Define to 1 if the system has the type `uint8_t'. */
+#define HAVE_UINT8_T 1
+
+/* Define to 1 if you have the <unistd.h> header file. */
+#define HAVE_UNISTD_H 1
+
+/* Define to 1 if you have the `vasprintf' function. */
+#define HAVE_VASPRINTF 1
+
+/* Define if kqueue works correctly with pipes */
+/* #undef HAVE_WORKING_KQUEUE */
+
+/* Name of package */
+#define PACKAGE "libevent"
+
+/* Define to the address where bug reports for this package should be sent. */
+#define PACKAGE_BUGREPORT ""
+
+/* Define to the full name of this package. */
+#define PACKAGE_NAME ""
+
+/* Define to the full name and version of this package. */
+#define PACKAGE_STRING ""
+
+/* Define to the one symbol short name of this package. */
+#define PACKAGE_TARNAME ""
+
+/* Define to the version of this package. */
+#define PACKAGE_VERSION ""
+
+/* The size of `int', as computed by sizeof. */
+#define SIZEOF_INT 4
+
+/* The size of `long', as computed by sizeof. */
+#define SIZEOF_LONG 8
+
+/* The size of `long long', as computed by sizeof. */
+#define SIZEOF_LONG_LONG 8
+
+/* The size of `short', as computed by sizeof. */
+#define SIZEOF_SHORT 2
+
+/* Define to 1 if you have the ANSI C header files. */
+#define STDC_HEADERS 1
+
+/* Define to 1 if you can safely include both <sys/time.h> and <time.h>. */
+#define TIME_WITH_SYS_TIME 1
+
+/* Version number of package */
+#define VERSION "1.4.13-stable"
+
+/* Define to appropriate substitue if compiler doesnt have __func__ */
+/* #undef __func__ */
+
+/* Define to empty if `const' does not conform to ANSI C. */
+/* #undef const */
+
+/* Define to `__inline__' or `__inline' if that's what the C compiler
+   calls it, or to nothing if 'inline' is not supported under any name.  */
+#ifndef __cplusplus
+/* #undef inline */
+#endif
+
+/* Define to `int' if <sys/types.h> does not define. */
+/* #undef pid_t */
+
+/* Define to `unsigned int' if <sys/types.h> does not define. */
+/* #undef size_t */
+
+/* Define to unsigned int if you dont have it */
+/* #undef socklen_t */
diff --git a/base/third_party/libevent/linux/event-config.h b/base/third_party/libevent/linux/event-config.h
new file mode 100644
index 0000000..2203253
--- /dev/null
+++ b/base/third_party/libevent/linux/event-config.h
@@ -0,0 +1,284 @@
+/* event-config.h
+ * Generated by autoconf; post-processed by libevent.
+ * Do not edit this file.
+ * Do not rely on macros in this file existing in later versions.
+ */
+#ifndef _EVENT_CONFIG_H_
+#define _EVENT_CONFIG_H_
+/* config.h.  Generated from config.h.in by configure.  */
+/* config.h.in.  Generated from configure.in by autoheader.  */
+
+/* Define if clock_gettime is available in libc */
+#define _EVENT_DNS_USE_CPU_CLOCK_FOR_ID 1
+
+/* Define is no secure id variant is available */
+/* #undef _EVENT_DNS_USE_GETTIMEOFDAY_FOR_ID */
+
+/* Define to 1 if you have the `clock_gettime' function. */
+#define _EVENT_HAVE_CLOCK_GETTIME 1
+
+/* Define if /dev/poll is available */
+/* #undef _EVENT_HAVE_DEVPOLL */
+
+/* Define to 1 if you have the <dlfcn.h> header file. */
+#define _EVENT_HAVE_DLFCN_H 1
+
+/* Define if your system supports the epoll system calls */
+#define _EVENT_HAVE_EPOLL 1
+
+/* Define to 1 if you have the `epoll_ctl' function. */
+#define _EVENT_HAVE_EPOLL_CTL 1
+
+/* Define if your system supports event ports */
+/* #undef _EVENT_HAVE_EVENT_PORTS */
+
+/* Define to 1 if you have the `fcntl' function. */
+#define _EVENT_HAVE_FCNTL 1
+
+/* Define to 1 if you have the <fcntl.h> header file. */
+#define _EVENT_HAVE_FCNTL_H 1
+
+/* Define to 1 if the system has the type `fd_mask'. */
+#define _EVENT_HAVE_FD_MASK 1
+
+/* Define to 1 if you have the `getaddrinfo' function. */
+#define _EVENT_HAVE_GETADDRINFO 1
+
+/* Define to 1 if you have the `getegid' function. */
+#define _EVENT_HAVE_GETEGID 1
+
+/* Define to 1 if you have the `geteuid' function. */
+#define _EVENT_HAVE_GETEUID 1
+
+/* Define to 1 if you have the `getnameinfo' function. */
+#define _EVENT_HAVE_GETNAMEINFO 1
+
+/* Define to 1 if you have the `gettimeofday' function. */
+#define _EVENT_HAVE_GETTIMEOFDAY 1
+
+/* Define to 1 if you have the `inet_ntop' function. */
+#define _EVENT_HAVE_INET_NTOP 1
+
+/* Define to 1 if you have the <inttypes.h> header file. */
+#define _EVENT_HAVE_INTTYPES_H 1
+
+/* Define to 1 if you have the `issetugid' function. */
+/* #undef _EVENT_HAVE_ISSETUGID */
+
+/* Define to 1 if you have the `kqueue' function. */
+/* #undef _EVENT_HAVE_KQUEUE */
+
+/* Define to 1 if you have the `nsl' library (-lnsl). */
+#define _EVENT_HAVE_LIBNSL 1
+
+/* Define to 1 if you have the `resolv' library (-lresolv). */
+#define _EVENT_HAVE_LIBRESOLV 1
+
+/* Define to 1 if you have the `rt' library (-lrt). */
+#define _EVENT_HAVE_LIBRT 1
+
+/* Define to 1 if you have the `socket' library (-lsocket). */
+/* #undef _EVENT_HAVE_LIBSOCKET */
+
+/* Define to 1 if you have the <memory.h> header file. */
+#define _EVENT_HAVE_MEMORY_H 1
+
+/* Define to 1 if you have the <netinet/in6.h> header file. */
+/* #undef _EVENT_HAVE_NETINET_IN6_H */
+
+/* Define to 1 if you have the `poll' function. */
+#define _EVENT_HAVE_POLL 1
+
+/* Define to 1 if you have the <poll.h> header file. */
+#define _EVENT_HAVE_POLL_H 1
+
+/* Define to 1 if you have the `port_create' function. */
+/* #undef _EVENT_HAVE_PORT_CREATE */
+
+/* Define to 1 if you have the <port.h> header file. */
+/* #undef _EVENT_HAVE_PORT_H */
+
+/* Define to 1 if you have the `select' function. */
+#define _EVENT_HAVE_SELECT 1
+
+/* Define if F_SETFD is defined in <fcntl.h> */
+#define _EVENT_HAVE_SETFD 1
+
+/* Define to 1 if you have the `sigaction' function. */
+#define _EVENT_HAVE_SIGACTION 1
+
+/* Define to 1 if you have the `signal' function. */
+#define _EVENT_HAVE_SIGNAL 1
+
+/* Define to 1 if you have the <signal.h> header file. */
+#define _EVENT_HAVE_SIGNAL_H 1
+
+/* Define to 1 if you have the <stdarg.h> header file. */
+#define _EVENT_HAVE_STDARG_H 1
+
+/* Define to 1 if you have the <stdint.h> header file. */
+#define _EVENT_HAVE_STDINT_H 1
+
+/* Define to 1 if you have the <stdlib.h> header file. */
+#define _EVENT_HAVE_STDLIB_H 1
+
+/* Define to 1 if you have the <strings.h> header file. */
+#define _EVENT_HAVE_STRINGS_H 1
+
+/* Define to 1 if you have the <string.h> header file. */
+#define _EVENT_HAVE_STRING_H 1
+
+/* Define to 1 if you have the `strlcpy' function. */
+/* #undef _EVENT_HAVE_STRLCPY */
+
+/* Define to 1 if you have the `strsep' function. */
+#define _EVENT_HAVE_STRSEP 1
+
+/* Define to 1 if you have the `strtok_r' function. */
+#define _EVENT_HAVE_STRTOK_R 1
+
+/* Define to 1 if you have the `strtoll' function. */
+#define _EVENT_HAVE_STRTOLL 1
+
+/* Define to 1 if the system has the type `struct in6_addr'. */
+#define _EVENT_HAVE_STRUCT_IN6_ADDR 1
+
+/* Define to 1 if you have the <sys/devpoll.h> header file. */
+/* #undef _EVENT_HAVE_SYS_DEVPOLL_H */
+
+/* Define to 1 if you have the <sys/epoll.h> header file. */
+#define _EVENT_HAVE_SYS_EPOLL_H 1
+
+/* Define to 1 if you have the <sys/event.h> header file. */
+/* #undef _EVENT_HAVE_SYS_EVENT_H */
+
+/* Define to 1 if you have the <sys/ioctl.h> header file. */
+#define _EVENT_HAVE_SYS_IOCTL_H 1
+
+/* Define to 1 if you have the <sys/param.h> header file. */
+#define _EVENT_HAVE_SYS_PARAM_H 1
+
+/* Define to 1 if you have the <sys/queue.h> header file. */
+#define _EVENT_HAVE_SYS_QUEUE_H 1
+
+/* Define to 1 if you have the <sys/select.h> header file. */
+#define _EVENT_HAVE_SYS_SELECT_H 1
+
+/* Define to 1 if you have the <sys/socket.h> header file. */
+#define _EVENT_HAVE_SYS_SOCKET_H 1
+
+/* Define to 1 if you have the <sys/stat.h> header file. */
+#define _EVENT_HAVE_SYS_STAT_H 1
+
+/* Define to 1 if you have the <sys/time.h> header file. */
+#define _EVENT_HAVE_SYS_TIME_H 1
+
+/* Define to 1 if you have the <sys/types.h> header file. */
+#define _EVENT_HAVE_SYS_TYPES_H 1
+
+/* Define if TAILQ_FOREACH is defined in <sys/queue.h> */
+#define _EVENT_HAVE_TAILQFOREACH 1
+
+/* Define if timeradd is defined in <sys/time.h> */
+#define _EVENT_HAVE_TIMERADD 1
+
+/* Define if timerclear is defined in <sys/time.h> */
+#define _EVENT_HAVE_TIMERCLEAR 1
+
+/* Define if timercmp is defined in <sys/time.h> */
+#define _EVENT_HAVE_TIMERCMP 1
+
+/* Define if timerisset is defined in <sys/time.h> */
+#define _EVENT_HAVE_TIMERISSET 1
+
+/* Define to 1 if the system has the type `uint16_t'. */
+#define _EVENT_HAVE_UINT16_T 1
+
+/* Define to 1 if the system has the type `uint32_t'. */
+#define _EVENT_HAVE_UINT32_T 1
+
+/* Define to 1 if the system has the type `uint64_t'. */
+#define _EVENT_HAVE_UINT64_T 1
+
+/* Define to 1 if the system has the type `uint8_t'. */
+#define _EVENT_HAVE_UINT8_T 1
+
+/* Define to 1 if you have the <unistd.h> header file. */
+#define _EVENT_HAVE_UNISTD_H 1
+
+/* Define to 1 if you have the `vasprintf' function. */
+#define _EVENT_HAVE_VASPRINTF 1
+
+/* Define if kqueue works correctly with pipes */
+/* #undef _EVENT_HAVE_WORKING_KQUEUE */
+
+/* Define to the sub-directory in which libtool stores uninstalled libraries.
+   */
+#define _EVENT_LT_OBJDIR ".libs/"
+
+/* Numeric representation of the version */
+#define _EVENT_NUMERIC_VERSION 0x01040f00
+
+/* Name of package */
+#define _EVENT_PACKAGE "libevent"
+
+/* Define to the address where bug reports for this package should be sent. */
+#define _EVENT_PACKAGE_BUGREPORT ""
+
+/* Define to the full name of this package. */
+#define _EVENT_PACKAGE_NAME ""
+
+/* Define to the full name and version of this package. */
+#define _EVENT_PACKAGE_STRING ""
+
+/* Define to the one symbol short name of this package. */
+#define _EVENT_PACKAGE_TARNAME ""
+
+/* Define to the home page for this package. */
+#define _EVENT_PACKAGE_URL ""
+
+/* Define to the version of this package. */
+#define _EVENT_PACKAGE_VERSION ""
+
+/* The size of `int', as computed by sizeof. */
+#define _EVENT_SIZEOF_INT 4
+
+/* The size of `long', as computed by sizeof. */
+#define _EVENT_SIZEOF_LONG 8
+
+/* The size of `long long', as computed by sizeof. */
+#define _EVENT_SIZEOF_LONG_LONG 8
+
+/* The size of `short', as computed by sizeof. */
+#define _EVENT_SIZEOF_SHORT 2
+
+/* Define to 1 if you have the ANSI C header files. */
+#define _EVENT_STDC_HEADERS 1
+
+/* Define to 1 if you can safely include both <sys/time.h> and <time.h>. */
+#define _EVENT_TIME_WITH_SYS_TIME 1
+
+/* Version number of package */
+#define _EVENT_VERSION "1.4.15"
+
+/* Define to appropriate substitue if compiler doesnt have __func__ */
+/* #undef _EVENT___func__ */
+
+/* Define to empty if `const' does not conform to ANSI C. */
+/* #undef _EVENT_const */
+
+/* Define to `__inline__' or `__inline' if that's what the C compiler
+   calls it, or to nothing if 'inline' is not supported under any name.  */
+#ifndef _EVENT___cplusplus
+/* #undef _EVENT_inline */
+#endif
+
+/* Define to `int' if <sys/types.h> does not define. */
+/* #undef _EVENT_pid_t */
+
+/* Define to `unsigned int' if <sys/types.h> does not define. */
+/* #undef _EVENT_size_t */
+
+/* Define to unsigned int if you dont have it */
+/* #undef _EVENT_socklen_t */
+#endif
diff --git a/base/third_party/libevent/log.c b/base/third_party/libevent/log.c
new file mode 100644
index 0000000..48ebb26
--- /dev/null
+++ b/base/third_party/libevent/log.c
@@ -0,0 +1,187 @@
+/*	$OpenBSD: err.c,v 1.2 2002/06/25 15:50:15 mickey Exp $	*/
+
+/*
+ * log.c
+ *
+ * Based on err.c, which was adapted from OpenBSD libc *err* *warn* code.
+ *
+ * Copyright (c) 2005 Nick Mathewson <nickm@freehaven.net>
+ *
+ * Copyright (c) 2000 Dug Song <dugsong@monkey.org>
+ *
+ * Copyright (c) 1993
+ *	The Regents of the University of California.  All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ *    may be used to endorse or promote products derived from this software
+ *    without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#ifdef WIN32
+#define WIN32_LEAN_AND_MEAN
+#include <windows.h>
+#undef WIN32_LEAN_AND_MEAN
+#endif
+#include <sys/types.h>
+#ifdef HAVE_SYS_TIME_H
+#include <sys/time.h>
+#else
+#include <sys/_libevent_time.h>
+#endif
+#include <stdio.h>
+#include <stdlib.h>
+#include <stdarg.h>
+#include <string.h>
+#include <errno.h>
+#include "event.h"
+
+#include "log.h"
+#include "evutil.h"
+
+static void _warn_helper(int severity, int log_errno, const char *fmt,
+                         va_list ap);
+static void event_log(int severity, const char *msg);
+
+void
+event_err(int eval, const char *fmt, ...)
+{
+	va_list ap;
+	
+	va_start(ap, fmt);
+	_warn_helper(_EVENT_LOG_ERR, errno, fmt, ap);
+	va_end(ap);
+	exit(eval);
+}
+
+void
+event_warn(const char *fmt, ...)
+{
+	va_list ap;
+	
+	va_start(ap, fmt);
+	_warn_helper(_EVENT_LOG_WARN, errno, fmt, ap);
+	va_end(ap);
+}
+
+void
+event_errx(int eval, const char *fmt, ...)
+{
+	va_list ap;
+	
+	va_start(ap, fmt);
+	_warn_helper(_EVENT_LOG_ERR, -1, fmt, ap);
+	va_end(ap);
+	exit(eval);
+}
+
+void
+event_warnx(const char *fmt, ...)
+{
+	va_list ap;
+	
+	va_start(ap, fmt);
+	_warn_helper(_EVENT_LOG_WARN, -1, fmt, ap);
+	va_end(ap);
+}
+
+void
+event_msgx(const char *fmt, ...)
+{
+	va_list ap;
+	
+	va_start(ap, fmt);
+	_warn_helper(_EVENT_LOG_MSG, -1, fmt, ap);
+	va_end(ap);
+}
+
+void
+_event_debugx(const char *fmt, ...)
+{
+	va_list ap;
+	
+	va_start(ap, fmt);
+	_warn_helper(_EVENT_LOG_DEBUG, -1, fmt, ap);
+	va_end(ap);
+}
+
+static void
+_warn_helper(int severity, int log_errno, const char *fmt, va_list ap)
+{
+	char buf[1024];
+	size_t len;
+
+	if (fmt != NULL)
+		evutil_vsnprintf(buf, sizeof(buf), fmt, ap);
+	else
+		buf[0] = '\0';
+
+	if (log_errno >= 0) {
+		len = strlen(buf);
+		if (len < sizeof(buf) - 3) {
+			evutil_snprintf(buf + len, sizeof(buf) - len, ": %s",
+			    strerror(log_errno));
+		}
+	}
+
+	event_log(severity, buf);
+}
+
+static event_log_cb log_fn = NULL;
+
+void
+event_set_log_callback(event_log_cb cb)
+{
+	log_fn = cb;
+}
+
+static void
+event_log(int severity, const char *msg)
+{
+	if (log_fn)
+		log_fn(severity, msg);
+	else {
+		const char *severity_str;
+		switch (severity) {
+		case _EVENT_LOG_DEBUG:
+			severity_str = "debug";
+			break;
+		case _EVENT_LOG_MSG:
+			severity_str = "msg";
+			break;
+		case _EVENT_LOG_WARN:
+			severity_str = "warn";
+			break;
+		case _EVENT_LOG_ERR:
+			severity_str = "err";
+			break;
+		default:
+			severity_str = "???";
+			break;
+		}
+		(void)fprintf(stderr, "[%s] %s\n", severity_str, msg);
+	}
+}
diff --git a/base/third_party/libevent/log.h b/base/third_party/libevent/log.h
new file mode 100644
index 0000000..7bc6632
--- /dev/null
+++ b/base/third_party/libevent/log.h
@@ -0,0 +1,51 @@
+/*
+ * Copyright (c) 2000-2004 Niels Provos <provos@citi.umich.edu>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ *    derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef _LOG_H_
+#define _LOG_H_
+
+#ifdef __GNUC__
+#define EV_CHECK_FMT(a,b) __attribute__((format(printf, a, b)))
+#else
+#define EV_CHECK_FMT(a,b)
+#endif
+
+void event_err(int eval, const char *fmt, ...) EV_CHECK_FMT(2,3);
+void event_warn(const char *fmt, ...) EV_CHECK_FMT(1,2);
+void event_errx(int eval, const char *fmt, ...) EV_CHECK_FMT(2,3);
+void event_warnx(const char *fmt, ...) EV_CHECK_FMT(1,2);
+void event_msgx(const char *fmt, ...) EV_CHECK_FMT(1,2);
+void _event_debugx(const char *fmt, ...) EV_CHECK_FMT(1,2);
+
+#ifdef USE_DEBUG
+#define event_debug(x) _event_debugx x
+#else
+#define event_debug(x) do {;} while (0)
+#endif
+
+#undef EV_CHECK_FMT
+
+#endif
diff --git a/base/third_party/libevent/m4/.dummy b/base/third_party/libevent/m4/.dummy
new file mode 100644
index 0000000..a0a72d6
--- /dev/null
+++ b/base/third_party/libevent/m4/.dummy
@@ -0,0 +1 @@
+(This dummy file exists so that git will create the m4 directory)
diff --git a/base/third_party/libevent/mac/config.h b/base/third_party/libevent/mac/config.h
new file mode 100644
index 0000000..f73f0c6
--- /dev/null
+++ b/base/third_party/libevent/mac/config.h
@@ -0,0 +1,266 @@
+/* config.h.  Generated from config.h.in by configure.  */
+/* config.h.in.  Generated from configure.in by autoheader.  */
+
+/* Define if clock_gettime is available in libc */
+/* #undef DNS_USE_CPU_CLOCK_FOR_ID */
+
+/* Define is no secure id variant is available */
+#define DNS_USE_GETTIMEOFDAY_FOR_ID 1
+
+/* Define to 1 if you have the `clock_gettime' function. */
+/* #undef HAVE_CLOCK_GETTIME */
+
+/* Define if /dev/poll is available */
+/* #undef HAVE_DEVPOLL */
+
+/* Define to 1 if you have the <dlfcn.h> header file. */
+#define HAVE_DLFCN_H 1
+
+/* Define if your system supports the epoll system calls */
+/* #undef HAVE_EPOLL */
+
+/* Define to 1 if you have the `epoll_ctl' function. */
+/* #undef HAVE_EPOLL_CTL */
+
+/* Define if your system supports event ports */
+/* #undef HAVE_EVENT_PORTS */
+
+/* Define to 1 if you have the `fcntl' function. */
+#define HAVE_FCNTL 1
+
+/* Define to 1 if you have the <fcntl.h> header file. */
+#define HAVE_FCNTL_H 1
+
+/* Define to 1 if the system has the type `fd_mask'. */
+#define HAVE_FD_MASK 1
+
+/* Define to 1 if you have the `getaddrinfo' function. */
+#define HAVE_GETADDRINFO 1
+
+/* Define to 1 if you have the `getegid' function. */
+#define HAVE_GETEGID 1
+
+/* Define to 1 if you have the `geteuid' function. */
+#define HAVE_GETEUID 1
+
+/* Define to 1 if you have the `getnameinfo' function. */
+#define HAVE_GETNAMEINFO 1
+
+/* Define to 1 if you have the `gettimeofday' function. */
+#define HAVE_GETTIMEOFDAY 1
+
+/* Define to 1 if you have the `inet_ntop' function. */
+#define HAVE_INET_NTOP 1
+
+/* Define to 1 if you have the <inttypes.h> header file. */
+#define HAVE_INTTYPES_H 1
+
+/* Define to 1 if you have the `issetugid' function. */
+#define HAVE_ISSETUGID 1
+
+/* Define to 1 if you have the `kqueue' function. */
+#define HAVE_KQUEUE 1
+
+/* Define to 1 if you have the `nsl' library (-lnsl). */
+/* #undef HAVE_LIBNSL */
+
+/* Define to 1 if you have the `resolv' library (-lresolv). */
+#define HAVE_LIBRESOLV 1
+
+/* Define to 1 if you have the `rt' library (-lrt). */
+/* #undef HAVE_LIBRT */
+
+/* Define to 1 if you have the `socket' library (-lsocket). */
+/* #undef HAVE_LIBSOCKET */
+
+/* Define to 1 if you have the <memory.h> header file. */
+#define HAVE_MEMORY_H 1
+
+/* Define to 1 if you have the <netinet/in6.h> header file. */
+/* #undef HAVE_NETINET_IN6_H */
+
+/* Define to 1 if you have the `poll' function. */
+#define HAVE_POLL 1
+
+/* Define to 1 if you have the <poll.h> header file. */
+#define HAVE_POLL_H 1
+
+/* Define to 1 if you have the `port_create' function. */
+/* #undef HAVE_PORT_CREATE */
+
+/* Define to 1 if you have the <port.h> header file. */
+/* #undef HAVE_PORT_H */
+
+/* Define to 1 if you have the `select' function. */
+#define HAVE_SELECT 1
+
+/* Define if F_SETFD is defined in <fcntl.h> */
+#define HAVE_SETFD 1
+
+/* Define to 1 if you have the `sigaction' function. */
+#define HAVE_SIGACTION 1
+
+/* Define to 1 if you have the `signal' function. */
+#define HAVE_SIGNAL 1
+
+/* Define to 1 if you have the <signal.h> header file. */
+#define HAVE_SIGNAL_H 1
+
+/* Define to 1 if you have the <stdarg.h> header file. */
+#define HAVE_STDARG_H 1
+
+/* Define to 1 if you have the <stdint.h> header file. */
+#define HAVE_STDINT_H 1
+
+/* Define to 1 if you have the <stdlib.h> header file. */
+#define HAVE_STDLIB_H 1
+
+/* Define to 1 if you have the <strings.h> header file. */
+#define HAVE_STRINGS_H 1
+
+/* Define to 1 if you have the <string.h> header file. */
+#define HAVE_STRING_H 1
+
+/* Define to 1 if you have the `strlcpy' function. */
+#define HAVE_STRLCPY 1
+
+/* Define to 1 if you have the `strsep' function. */
+#define HAVE_STRSEP 1
+
+/* Define to 1 if you have the `strtok_r' function. */
+#define HAVE_STRTOK_R 1
+
+/* Define to 1 if you have the `strtoll' function. */
+#define HAVE_STRTOLL 1
+
+/* Define to 1 if the system has the type `struct in6_addr'. */
+#define HAVE_STRUCT_IN6_ADDR 1
+
+/* Define to 1 if you have the <sys/devpoll.h> header file. */
+/* #undef HAVE_SYS_DEVPOLL_H */
+
+/* Define to 1 if you have the <sys/epoll.h> header file. */
+/* #undef HAVE_SYS_EPOLL_H */
+
+/* Define to 1 if you have the <sys/event.h> header file. */
+#define HAVE_SYS_EVENT_H 1
+
+/* Define to 1 if you have the <sys/ioctl.h> header file. */
+#define HAVE_SYS_IOCTL_H 1
+
+/* Define to 1 if you have the <sys/param.h> header file. */
+#define HAVE_SYS_PARAM_H 1
+
+/* Define to 1 if you have the <sys/queue.h> header file. */
+#define HAVE_SYS_QUEUE_H 1
+
+/* Define to 1 if you have the <sys/select.h> header file. */
+#define HAVE_SYS_SELECT_H 1
+
+/* Define to 1 if you have the <sys/socket.h> header file. */
+#define HAVE_SYS_SOCKET_H 1
+
+/* Define to 1 if you have the <sys/stat.h> header file. */
+#define HAVE_SYS_STAT_H 1
+
+/* Define to 1 if you have the <sys/time.h> header file. */
+#define HAVE_SYS_TIME_H 1
+
+/* Define to 1 if you have the <sys/types.h> header file. */
+#define HAVE_SYS_TYPES_H 1
+
+/* Define if TAILQ_FOREACH is defined in <sys/queue.h> */
+#define HAVE_TAILQFOREACH 1
+
+/* Define if timeradd is defined in <sys/time.h> */
+#define HAVE_TIMERADD 1
+
+/* Define if timerclear is defined in <sys/time.h> */
+#define HAVE_TIMERCLEAR 1
+
+/* Define if timercmp is defined in <sys/time.h> */
+#define HAVE_TIMERCMP 1
+
+/* Define if timerisset is defined in <sys/time.h> */
+#define HAVE_TIMERISSET 1
+
+/* Define to 1 if the system has the type `uint16_t'. */
+#define HAVE_UINT16_T 1
+
+/* Define to 1 if the system has the type `uint32_t'. */
+#define HAVE_UINT32_T 1
+
+/* Define to 1 if the system has the type `uint64_t'. */
+#define HAVE_UINT64_T 1
+
+/* Define to 1 if the system has the type `uint8_t'. */
+#define HAVE_UINT8_T 1
+
+/* Define to 1 if you have the <unistd.h> header file. */
+#define HAVE_UNISTD_H 1
+
+/* Define to 1 if you have the `vasprintf' function. */
+#define HAVE_VASPRINTF 1
+
+/* Define if kqueue works correctly with pipes */
+#define HAVE_WORKING_KQUEUE 1
+
+/* Name of package */
+#define PACKAGE "libevent"
+
+/* Define to the address where bug reports for this package should be sent. */
+#define PACKAGE_BUGREPORT ""
+
+/* Define to the full name of this package. */
+#define PACKAGE_NAME ""
+
+/* Define to the full name and version of this package. */
+#define PACKAGE_STRING ""
+
+/* Define to the one symbol short name of this package. */
+#define PACKAGE_TARNAME ""
+
+/* Define to the version of this package. */
+#define PACKAGE_VERSION ""
+
+/* The size of `int', as computed by sizeof. */
+#define SIZEOF_INT 4
+
+/* The size of `long', as computed by sizeof. */
+#define SIZEOF_LONG 4
+
+/* The size of `long long', as computed by sizeof. */
+#define SIZEOF_LONG_LONG 8
+
+/* The size of `short', as computed by sizeof. */
+#define SIZEOF_SHORT 2
+
+/* Define to 1 if you have the ANSI C header files. */
+#define STDC_HEADERS 1
+
+/* Define to 1 if you can safely include both <sys/time.h> and <time.h>. */
+#define TIME_WITH_SYS_TIME 1
+
+/* Version number of package */
+#define VERSION "1.4.13-stable"
+
+/* Define to appropriate substitue if compiler doesnt have __func__ */
+/* #undef __func__ */
+
+/* Define to empty if `const' does not conform to ANSI C. */
+/* #undef const */
+
+/* Define to `__inline__' or `__inline' if that's what the C compiler
+   calls it, or to nothing if 'inline' is not supported under any name.  */
+#ifndef __cplusplus
+/* #undef inline */
+#endif
+
+/* Define to `int' if <sys/types.h> does not define. */
+/* #undef pid_t */
+
+/* Define to `unsigned int' if <sys/types.h> does not define. */
+/* #undef size_t */
+
+/* Define to unsigned int if you dont have it */
+/* #undef socklen_t */
diff --git a/base/third_party/libevent/mac/event-config.h b/base/third_party/libevent/mac/event-config.h
new file mode 100644
index 0000000..92e212d
--- /dev/null
+++ b/base/third_party/libevent/mac/event-config.h
@@ -0,0 +1,284 @@
+/* event-config.h
+ * Generated by autoconf; post-processed by libevent.
+ * Do not edit this file.
+ * Do not rely on macros in this file existing in later versions.
+ */
+#ifndef _EVENT_CONFIG_H_
+#define _EVENT_CONFIG_H_
+/* config.h.  Generated from config.h.in by configure.  */
+/* config.h.in.  Generated from configure.in by autoheader.  */
+
+/* Define if clock_gettime is available in libc */
+/* #undef _EVENT_DNS_USE_CPU_CLOCK_FOR_ID */
+
+/* Define is no secure id variant is available */
+#define _EVENT_DNS_USE_GETTIMEOFDAY_FOR_ID 1
+
+/* Define to 1 if you have the `clock_gettime' function. */
+/* #undef _EVENT_HAVE_CLOCK_GETTIME */
+
+/* Define if /dev/poll is available */
+/* #undef _EVENT_HAVE_DEVPOLL */
+
+/* Define to 1 if you have the <dlfcn.h> header file. */
+#define _EVENT_HAVE_DLFCN_H 1
+
+/* Define if your system supports the epoll system calls */
+/* #undef _EVENT_HAVE_EPOLL */
+
+/* Define to 1 if you have the `epoll_ctl' function. */
+/* #undef _EVENT_HAVE_EPOLL_CTL */
+
+/* Define if your system supports event ports */
+/* #undef _EVENT_HAVE_EVENT_PORTS */
+
+/* Define to 1 if you have the `fcntl' function. */
+#define _EVENT_HAVE_FCNTL 1
+
+/* Define to 1 if you have the <fcntl.h> header file. */
+#define _EVENT_HAVE_FCNTL_H 1
+
+/* Define to 1 if the system has the type `fd_mask'. */
+#define _EVENT_HAVE_FD_MASK 1
+
+/* Define to 1 if you have the `getaddrinfo' function. */
+#define _EVENT_HAVE_GETADDRINFO 1
+
+/* Define to 1 if you have the `getegid' function. */
+#define _EVENT_HAVE_GETEGID 1
+
+/* Define to 1 if you have the `geteuid' function. */
+#define _EVENT_HAVE_GETEUID 1
+
+/* Define to 1 if you have the `getnameinfo' function. */
+#define _EVENT_HAVE_GETNAMEINFO 1
+
+/* Define to 1 if you have the `gettimeofday' function. */
+#define _EVENT_HAVE_GETTIMEOFDAY 1
+
+/* Define to 1 if you have the `inet_ntop' function. */
+#define _EVENT_HAVE_INET_NTOP 1
+
+/* Define to 1 if you have the <inttypes.h> header file. */
+#define _EVENT_HAVE_INTTYPES_H 1
+
+/* Define to 1 if you have the `issetugid' function. */
+#define _EVENT_HAVE_ISSETUGID 1
+
+/* Define to 1 if you have the `kqueue' function. */
+#define _EVENT_HAVE_KQUEUE 1
+
+/* Define to 1 if you have the `nsl' library (-lnsl). */
+/* #undef _EVENT_HAVE_LIBNSL */
+
+/* Define to 1 if you have the `resolv' library (-lresolv). */
+#define _EVENT_HAVE_LIBRESOLV 1
+
+/* Define to 1 if you have the `rt' library (-lrt). */
+/* #undef _EVENT_HAVE_LIBRT */
+
+/* Define to 1 if you have the `socket' library (-lsocket). */
+/* #undef _EVENT_HAVE_LIBSOCKET */
+
+/* Define to 1 if you have the <memory.h> header file. */
+#define _EVENT_HAVE_MEMORY_H 1
+
+/* Define to 1 if you have the <netinet/in6.h> header file. */
+/* #undef _EVENT_HAVE_NETINET_IN6_H */
+
+/* Define to 1 if you have the `poll' function. */
+#define _EVENT_HAVE_POLL 1
+
+/* Define to 1 if you have the <poll.h> header file. */
+#define _EVENT_HAVE_POLL_H 1
+
+/* Define to 1 if you have the `port_create' function. */
+/* #undef _EVENT_HAVE_PORT_CREATE */
+
+/* Define to 1 if you have the <port.h> header file. */
+/* #undef _EVENT_HAVE_PORT_H */
+
+/* Define to 1 if you have the `select' function. */
+#define _EVENT_HAVE_SELECT 1
+
+/* Define if F_SETFD is defined in <fcntl.h> */
+#define _EVENT_HAVE_SETFD 1
+
+/* Define to 1 if you have the `sigaction' function. */
+#define _EVENT_HAVE_SIGACTION 1
+
+/* Define to 1 if you have the `signal' function. */
+#define _EVENT_HAVE_SIGNAL 1
+
+/* Define to 1 if you have the <signal.h> header file. */
+#define _EVENT_HAVE_SIGNAL_H 1
+
+/* Define to 1 if you have the <stdarg.h> header file. */
+#define _EVENT_HAVE_STDARG_H 1
+
+/* Define to 1 if you have the <stdint.h> header file. */
+#define _EVENT_HAVE_STDINT_H 1
+
+/* Define to 1 if you have the <stdlib.h> header file. */
+#define _EVENT_HAVE_STDLIB_H 1
+
+/* Define to 1 if you have the <strings.h> header file. */
+#define _EVENT_HAVE_STRINGS_H 1
+
+/* Define to 1 if you have the <string.h> header file. */
+#define _EVENT_HAVE_STRING_H 1
+
+/* Define to 1 if you have the `strlcpy' function. */
+#define _EVENT_HAVE_STRLCPY 1
+
+/* Define to 1 if you have the `strsep' function. */
+#define _EVENT_HAVE_STRSEP 1
+
+/* Define to 1 if you have the `strtok_r' function. */
+#define _EVENT_HAVE_STRTOK_R 1
+
+/* Define to 1 if you have the `strtoll' function. */
+#define _EVENT_HAVE_STRTOLL 1
+
+/* Define to 1 if the system has the type `struct in6_addr'. */
+#define _EVENT_HAVE_STRUCT_IN6_ADDR 1
+
+/* Define to 1 if you have the <sys/devpoll.h> header file. */
+/* #undef _EVENT_HAVE_SYS_DEVPOLL_H */
+
+/* Define to 1 if you have the <sys/epoll.h> header file. */
+/* #undef _EVENT_HAVE_SYS_EPOLL_H */
+
+/* Define to 1 if you have the <sys/event.h> header file. */
+#define _EVENT_HAVE_SYS_EVENT_H 1
+
+/* Define to 1 if you have the <sys/ioctl.h> header file. */
+#define _EVENT_HAVE_SYS_IOCTL_H 1
+
+/* Define to 1 if you have the <sys/param.h> header file. */
+#define _EVENT_HAVE_SYS_PARAM_H 1
+
+/* Define to 1 if you have the <sys/queue.h> header file. */
+#define _EVENT_HAVE_SYS_QUEUE_H 1
+
+/* Define to 1 if you have the <sys/select.h> header file. */
+#define _EVENT_HAVE_SYS_SELECT_H 1
+
+/* Define to 1 if you have the <sys/socket.h> header file. */
+#define _EVENT_HAVE_SYS_SOCKET_H 1
+
+/* Define to 1 if you have the <sys/stat.h> header file. */
+#define _EVENT_HAVE_SYS_STAT_H 1
+
+/* Define to 1 if you have the <sys/time.h> header file. */
+#define _EVENT_HAVE_SYS_TIME_H 1
+
+/* Define to 1 if you have the <sys/types.h> header file. */
+#define _EVENT_HAVE_SYS_TYPES_H 1
+
+/* Define if TAILQ_FOREACH is defined in <sys/queue.h> */
+#define _EVENT_HAVE_TAILQFOREACH 1
+
+/* Define if timeradd is defined in <sys/time.h> */
+#define _EVENT_HAVE_TIMERADD 1
+
+/* Define if timerclear is defined in <sys/time.h> */
+#define _EVENT_HAVE_TIMERCLEAR 1
+
+/* Define if timercmp is defined in <sys/time.h> */
+#define _EVENT_HAVE_TIMERCMP 1
+
+/* Define if timerisset is defined in <sys/time.h> */
+#define _EVENT_HAVE_TIMERISSET 1
+
+/* Define to 1 if the system has the type `uint16_t'. */
+#define _EVENT_HAVE_UINT16_T 1
+
+/* Define to 1 if the system has the type `uint32_t'. */
+#define _EVENT_HAVE_UINT32_T 1
+
+/* Define to 1 if the system has the type `uint64_t'. */
+#define _EVENT_HAVE_UINT64_T 1
+
+/* Define to 1 if the system has the type `uint8_t'. */
+#define _EVENT_HAVE_UINT8_T 1
+
+/* Define to 1 if you have the <unistd.h> header file. */
+#define _EVENT_HAVE_UNISTD_H 1
+
+/* Define to 1 if you have the `vasprintf' function. */
+#define _EVENT_HAVE_VASPRINTF 1
+
+/* Define if kqueue works correctly with pipes */
+#define _EVENT_HAVE_WORKING_KQUEUE 1
+
+/* Define to the sub-directory in which libtool stores uninstalled libraries.
+   */
+#define _EVENT_LT_OBJDIR ".libs/"
+
+/* Numeric representation of the version */
+#define _EVENT_NUMERIC_VERSION 0x01040f00
+
+/* Name of package */
+#define _EVENT_PACKAGE "libevent"
+
+/* Define to the address where bug reports for this package should be sent. */
+#define _EVENT_PACKAGE_BUGREPORT ""
+
+/* Define to the full name of this package. */
+#define _EVENT_PACKAGE_NAME ""
+
+/* Define to the full name and version of this package. */
+#define _EVENT_PACKAGE_STRING ""
+
+/* Define to the one symbol short name of this package. */
+#define _EVENT_PACKAGE_TARNAME ""
+
+/* Define to the home page for this package. */
+#define _EVENT_PACKAGE_URL ""
+
+/* Define to the version of this package. */
+#define _EVENT_PACKAGE_VERSION ""
+
+/* The size of `int', as computed by sizeof. */
+#define _EVENT_SIZEOF_INT 4
+
+/* The size of `long', as computed by sizeof. */
+#define _EVENT_SIZEOF_LONG 4
+
+/* The size of `long long', as computed by sizeof. */
+#define _EVENT_SIZEOF_LONG_LONG 8
+
+/* The size of `short', as computed by sizeof. */
+#define _EVENT_SIZEOF_SHORT 2
+
+/* Define to 1 if you have the ANSI C header files. */
+#define _EVENT_STDC_HEADERS 1
+
+/* Define to 1 if you can safely include both <sys/time.h> and <time.h>. */
+#define _EVENT_TIME_WITH_SYS_TIME 1
+
+/* Version number of package */
+#define _EVENT_VERSION "1.4.15"
+
+/* Define to appropriate substitue if compiler doesnt have __func__ */
+/* #undef _EVENT___func__ */
+
+/* Define to empty if `const' does not conform to ANSI C. */
+/* #undef _EVENT_const */
+
+/* Define to `__inline__' or `__inline' if that's what the C compiler
+   calls it, or to nothing if 'inline' is not supported under any name.  */
+#ifndef _EVENT___cplusplus
+/* #undef _EVENT_inline */
+#endif
+
+/* Define to `int' if <sys/types.h> does not define. */
+/* #undef _EVENT_pid_t */
+
+/* Define to `unsigned int' if <sys/types.h> does not define. */
+/* #undef _EVENT_size_t */
+
+/* Define to unsigned int if you dont have it */
+/* #undef _EVENT_socklen_t */
+#endif
diff --git a/base/third_party/libevent/min_heap.h b/base/third_party/libevent/min_heap.h
new file mode 100644
index 0000000..14d8e37
--- /dev/null
+++ b/base/third_party/libevent/min_heap.h
@@ -0,0 +1,149 @@
+/*
+ * Copyright (c) 2006 Maxim Yegorushkin <maxim.yegorushkin@gmail.com>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ *    derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef _MIN_HEAP_H_
+#define _MIN_HEAP_H_
+
+#include "event.h"
+#include "evutil.h"
+
+typedef struct min_heap
+{
+    struct event** p;
+    unsigned n, a;
+} min_heap_t;
+
+static inline void           min_heap_ctor(min_heap_t* s);
+static inline void           min_heap_dtor(min_heap_t* s);
+static inline void           min_heap_elem_init(struct event* e);
+static inline int            min_heap_elem_greater(struct event *a, struct event *b);
+static inline int            min_heap_empty(min_heap_t* s);
+static inline unsigned       min_heap_size(min_heap_t* s);
+static inline struct event*  min_heap_top(min_heap_t* s);
+static inline int            min_heap_reserve(min_heap_t* s, unsigned n);
+static inline int            min_heap_push(min_heap_t* s, struct event* e);
+static inline struct event*  min_heap_pop(min_heap_t* s);
+static inline int            min_heap_erase(min_heap_t* s, struct event* e);
+static inline void           min_heap_shift_up_(min_heap_t* s, unsigned hole_index, struct event* e);
+static inline void           min_heap_shift_down_(min_heap_t* s, unsigned hole_index, struct event* e);
+
+int min_heap_elem_greater(struct event *a, struct event *b)
+{
+    return evutil_timercmp(&a->ev_timeout, &b->ev_timeout, >);
+}
+
+void min_heap_ctor(min_heap_t* s) { s->p = 0; s->n = 0; s->a = 0; }
+void min_heap_dtor(min_heap_t* s) { if(s->p) free(s->p); }
+void min_heap_elem_init(struct event* e) { e->min_heap_idx = -1; }
+int min_heap_empty(min_heap_t* s) { return 0u == s->n; }
+unsigned min_heap_size(min_heap_t* s) { return s->n; }
+struct event* min_heap_top(min_heap_t* s) { return s->n ? *s->p : 0; }
+
+int min_heap_push(min_heap_t* s, struct event* e)
+{
+    if(min_heap_reserve(s, s->n + 1))
+        return -1;
+    min_heap_shift_up_(s, s->n++, e);
+    return 0;
+}
+
+struct event* min_heap_pop(min_heap_t* s)
+{
+    if(s->n)
+    {
+        struct event* e = *s->p;
+        min_heap_shift_down_(s, 0u, s->p[--s->n]);
+        e->min_heap_idx = -1;
+        return e;
+    }
+    return 0;
+}
+
+int min_heap_erase(min_heap_t* s, struct event* e)
+{
+    if(((unsigned int)-1) != e->min_heap_idx)
+    {
+        struct event *last = s->p[--s->n];
+        unsigned parent = (e->min_heap_idx - 1) / 2;
+	/* we replace e with the last element in the heap.  We might need to
+	   shift it upward if it is less than its parent, or downward if it is
+	   greater than one or both its children. Since the children are known
+	   to be less than the parent, it can't need to shift both up and
+	   down. */
+        if (e->min_heap_idx > 0 && min_heap_elem_greater(s->p[parent], last))
+             min_heap_shift_up_(s, e->min_heap_idx, last);
+        else
+             min_heap_shift_down_(s, e->min_heap_idx, last);
+        e->min_heap_idx = -1;
+        return 0;
+    }
+    return -1;
+}
+
+int min_heap_reserve(min_heap_t* s, unsigned n)
+{
+    if(s->a < n)
+    {
+        struct event** p;
+        unsigned a = s->a ? s->a * 2 : 8;
+        if(a < n)
+            a = n;
+        if(!(p = (struct event**)realloc(s->p, a * sizeof *p)))
+            return -1;
+        s->p = p;
+        s->a = a;
+    }
+    return 0;
+}
+
+void min_heap_shift_up_(min_heap_t* s, unsigned hole_index, struct event* e)
+{
+    unsigned parent = (hole_index - 1) / 2;
+    while(hole_index && min_heap_elem_greater(s->p[parent], e))
+    {
+        (s->p[hole_index] = s->p[parent])->min_heap_idx = hole_index;
+        hole_index = parent;
+        parent = (hole_index - 1) / 2;
+    }
+    (s->p[hole_index] = e)->min_heap_idx = hole_index;
+}
+
+void min_heap_shift_down_(min_heap_t* s, unsigned hole_index, struct event* e)
+{
+    unsigned min_child = 2 * (hole_index + 1);
+    while(min_child <= s->n)
+	{
+        min_child -= min_child == s->n || min_heap_elem_greater(s->p[min_child], s->p[min_child - 1]);
+        if(!(min_heap_elem_greater(e, s->p[min_child])))
+            break;
+        (s->p[hole_index] = s->p[min_child])->min_heap_idx = hole_index;
+        hole_index = min_child;
+        min_child = 2 * (hole_index + 1);
+	}
+    min_heap_shift_up_(s, hole_index,  e);
+}
+
+#endif /* _MIN_HEAP_H_ */
diff --git a/base/third_party/libevent/nacl_nonsfi/config.h b/base/third_party/libevent/nacl_nonsfi/config.h
new file mode 100644
index 0000000..60c9dfe
--- /dev/null
+++ b/base/third_party/libevent/nacl_nonsfi/config.h
@@ -0,0 +1,273 @@
+/* Copied from Linux version and changed the features according the PNaCl
+ * toolchain for the Non-SFI binary build, which is close to one under the
+ * linux/ directory. The built binary will be running under Linux directly,
+ * actually.
+ */
+
+/* Define if clock_gettime is available in libc */
+#define DNS_USE_CPU_CLOCK_FOR_ID 1
+
+/* Define is no secure id variant is available */
+/* #undef DNS_USE_GETTIMEOFDAY_FOR_ID */
+
+/* Define to 1 if you have the `clock_gettime' function. */
+#define HAVE_CLOCK_GETTIME 1
+
+/* Define if /dev/poll is available */
+/* #undef HAVE_DEVPOLL */
+
+/* Define to 1 if you have the <dlfcn.h> header file. */
+/* #undef HAVE_DLFCN_H */
+
+/* Define if your system supports the epoll system calls */
+/* #undef HAVE_EPOLL */
+
+/* Define to 1 if you have the `epoll_ctl' function. */
+/* #undef HAVE_EPOLL_CTL */
+
+/* Define if your system supports event ports */
+/* #undef HAVE_EVENT_PORTS */
+
+/* Define to 1 if you have the `fcntl' function. */
+#define HAVE_FCNTL 1
+
+/* Define to 1 if you have the <fcntl.h> header file. */
+#define HAVE_FCNTL_H 1
+
+/* Define to 1 if the system has the type `fd_mask'. */
+#define HAVE_FD_MASK 1
+
+/* Define to 1 if you have the `getaddrinfo' function. */
+#define HAVE_GETADDRINFO 1
+
+/* Define to 1 if you have the `getegid' function. */
+#define HAVE_GETEGID 1
+
+/* Define to 1 if you have the `geteuid' function. */
+#define HAVE_GETEUID 1
+
+/* Define to 1 if you have the `getnameinfo' function. */
+#define HAVE_GETNAMEINFO 1
+
+/* Define to 1 if you have the `gettimeofday' function. */
+#define HAVE_GETTIMEOFDAY 1
+
+/* Define to 1 if you have the `inet_ntop' function. */
+#define HAVE_INET_NTOP 1
+
+/* Define to 1 if you have the <inttypes.h> header file. */
+#define HAVE_INTTYPES_H 1
+
+/* Define to 1 if you have the `issetugid' function. */
+/* #undef HAVE_ISSETUGID */
+
+/* Define to 1 if you have the `kqueue' function. */
+/* #undef HAVE_KQUEUE */
+
+/* Define to 1 if you have the `nsl' library (-lnsl). */
+#define HAVE_LIBNSL 1
+
+/* Define to 1 if you have the `resolv' library (-lresolv). */
+#define HAVE_LIBRESOLV 1
+
+/* Define to 1 if you have the `rt' library (-lrt). */
+#define HAVE_LIBRT 1
+
+/* Define to 1 if you have the `socket' library (-lsocket). */
+/* #undef HAVE_LIBSOCKET */
+
+/* Define to 1 if you have the <memory.h> header file. */
+#define HAVE_MEMORY_H 1
+
+/* Define to 1 if you have the <netinet/in6.h> header file. */
+/* #undef HAVE_NETINET_IN6_H */
+
+/* Define to 1 if you have the `poll' function. */
+#define HAVE_POLL 1
+
+/* Define to 1 if you have the <poll.h> header file. */
+#define HAVE_POLL_H 1
+
+/* Define to 1 if you have the `port_create' function. */
+/* #undef HAVE_PORT_CREATE */
+
+/* Define to 1 if you have the <port.h> header file. */
+/* #undef HAVE_PORT_H */
+
+/* Define to 1 if you have the `select' function. */
+/* #undef HAVE_SELECT */
+
+/* Define if F_SETFD is defined in <fcntl.h> */
+#define HAVE_SETFD 1
+
+/* Note: The PNaCl toolchain prodives linux ABI's sigaction, named
+ * linux_sigaction() in native_client/src/nonsfi/linux/linux_sys_private.c,
+ * but newlib ABI sigaction() is not provided.
+ */
+/* Define to 1 if you have the `sigaction' function. */
+/* #undef HAVE_SIGACTION */
+
+/* Define to 1 if you have the `signal' function. */
+/* #undef HAVE_SIGNAL */
+
+/* Define to 1 if you have the <signal.h> header file. */
+#define HAVE_SIGNAL_H 1
+
+/* Define to 1 if you have the <stdarg.h> header file. */
+#define HAVE_STDARG_H 1
+
+/* Define to 1 if you have the <stdint.h> header file. */
+#define HAVE_STDINT_H 1
+
+/* Define to 1 if you have the <stdlib.h> header file. */
+#define HAVE_STDLIB_H 1
+
+/* Define to 1 if you have the <strings.h> header file. */
+#define HAVE_STRINGS_H 1
+
+/* Define to 1 if you have the <string.h> header file. */
+#define HAVE_STRING_H 1
+
+/* Define to 1 if you have the `strlcpy' function. */
+/* #undef HAVE_STRLCPY */
+
+/* Define to 1 if you have the `strsep' function. */
+#define HAVE_STRSEP 1
+
+/* Define to 1 if you have the `strtok_r' function. */
+#define HAVE_STRTOK_R 1
+
+/* Define to 1 if you have the `strtoll' function. */
+#define HAVE_STRTOLL 1
+
+/* Define to 1 if the system has the type `struct in6_addr'. */
+#define HAVE_STRUCT_IN6_ADDR 1
+
+/* Define to 1 if you have the <sys/devpoll.h> header file. */
+/* #undef HAVE_SYS_DEVPOLL_H */
+
+/* Define to 1 if you have the <sys/epoll.h> header file. */
+#define HAVE_SYS_EPOLL_H 1
+
+/* Define to 1 if you have the <sys/event.h> header file. */
+/* #undef HAVE_SYS_EVENT_H */
+
+/* Define to 1 if you have the <sys/ioctl.h> header file. */
+/* #undef HAVE_SYS_IOCTL_H */
+
+/* Define to 1 if you have the <sys/param.h> header file. */
+#define HAVE_SYS_PARAM_H 1
+
+/* Define to 1 if you have the <sys/queue.h> header file. */
+#define HAVE_SYS_QUEUE_H 1
+
+/* Define to 1 if you have the <sys/select.h> header file. */
+/* #undef HAVE_SYS_SELECT_H */
+
+/* Define to 1 if you have the <sys/socket.h> header file. */
+#define HAVE_SYS_SOCKET_H 1
+
+/* Define to 1 if you have the <sys/stat.h> header file. */
+#define HAVE_SYS_STAT_H 1
+
+/* Define to 1 if you have the <sys/time.h> header file. */
+#define HAVE_SYS_TIME_H 1
+
+/* Define to 1 if you have the <sys/types.h> header file. */
+#define HAVE_SYS_TYPES_H 1
+
+/* Define if TAILQ_FOREACH is defined in <sys/queue.h> */
+#define HAVE_TAILQFOREACH 1
+
+/* Define if timeradd is defined in <sys/time.h> */
+/* #undef HAVE_TIMERADD */
+
+/* Define if timerclear is defined in <sys/time.h> */
+/* #undef HAVE_TIMERCLEAR */
+
+/* Define if timercmp is defined in <sys/time.h> */
+/* #undef HAVE_TIMERCMP */
+
+/* Define if timerisset is defined in <sys/time.h> */
+/* #undef HAVE_TIMERISSET */
+
+/* Define to 1 if the system has the type `uint16_t'. */
+#define HAVE_UINT16_T 1
+
+/* Define to 1 if the system has the type `uint32_t'. */
+#define HAVE_UINT32_T 1
+
+/* Define to 1 if the system has the type `uint64_t'. */
+#define HAVE_UINT64_T 1
+
+/* Define to 1 if the system has the type `uint8_t'. */
+#define HAVE_UINT8_T 1
+
+/* Define to 1 if you have the <unistd.h> header file. */
+#define HAVE_UNISTD_H 1
+
+/* Define to 1 if you have the `vasprintf' function. */
+/* #undef HAVE_VASPRINTF */
+
+/* Define if kqueue works correctly with pipes */
+/* #undef HAVE_WORKING_KQUEUE */
+
+/* Name of package */
+#define PACKAGE "libevent_nacl"
+
+/* Define to the address where bug reports for this package should be sent. */
+#define PACKAGE_BUGREPORT ""
+
+/* Define to the full name of this package. */
+#define PACKAGE_NAME ""
+
+/* Define to the full name and version of this package. */
+#define PACKAGE_STRING ""
+
+/* Define to the one symbol short name of this package. */
+#define PACKAGE_TARNAME ""
+
+/* Define to the version of this package. */
+#define PACKAGE_VERSION ""
+
+/* The size of `int', as computed by sizeof. */
+#define SIZEOF_INT 4
+
+/* The size of `long', as computed by sizeof. */
+#define SIZEOF_LONG 8
+
+/* The size of `long long', as computed by sizeof. */
+#define SIZEOF_LONG_LONG 8
+
+/* The size of `short', as computed by sizeof. */
+#define SIZEOF_SHORT 2
+
+/* Define to 1 if you have the ANSI C header files. */
+#define STDC_HEADERS 1
+
+/* Define to 1 if you can safely include both <sys/time.h> and <time.h>. */
+#define TIME_WITH_SYS_TIME 1
+
+/* Version number of package */
+#define VERSION "1.4.13-stable"
+
+/* Define to appropriate substitue if compiler doesnt have __func__ */
+/* #undef __func__ */
+
+/* Define to empty if `const' does not conform to ANSI C. */
+/* #undef const */
+
+/* Define to `__inline__' or `__inline' if that's what the C compiler
+   calls it, or to nothing if 'inline' is not supported under any name.  */
+#ifndef __cplusplus
+/* #undef inline */
+#endif
+
+/* Define to `int' if <sys/types.h> does not define. */
+/* #undef pid_t */
+
+/* Define to `unsigned int' if <sys/types.h> does not define. */
+/* #undef size_t */
+
+/* Define to unsigned int if you dont have it */
+/* #undef socklen_t */
diff --git a/base/third_party/libevent/nacl_nonsfi/event-config.h b/base/third_party/libevent/nacl_nonsfi/event-config.h
new file mode 100644
index 0000000..fe28043
--- /dev/null
+++ b/base/third_party/libevent/nacl_nonsfi/event-config.h
@@ -0,0 +1,290 @@
+/* Copied from Linux version and changed the features according the PNaCl
+ * toolchain for the Non-SFI binary build, which is close to one under the
+ * linux/ directory. The built binary will be running under Linux directly,
+ * actually.
+ */
+
+#ifndef _EVENT_CONFIG_H_
+#define _EVENT_CONFIG_H_
+
+/* Define if clock_gettime is available in libc */
+#define _EVENT_DNS_USE_CPU_CLOCK_FOR_ID 1
+
+/* Define is no secure id variant is available */
+/* #undef _EVENT_DNS_USE_GETTIMEOFDAY_FOR_ID */
+
+/* Define to 1 if you have the `clock_gettime' function. */
+#define _EVENT_HAVE_CLOCK_GETTIME 1
+
+/* Define if /dev/poll is available */
+/* #undef _EVENT_HAVE_DEVPOLL */
+
+/* Define to 1 if you have the <dlfcn.h> header file. */
+#define _EVENT_HAVE_DLFCN_H 1
+
+/* Define if your system supports the epoll system calls */
+/* #undef _EVENT_HAVE_EPOLL */
+
+/* Define to 1 if you have the `epoll_ctl' function. */
+/* #undef _EVENT_HAVE_EPOLL_CTL */
+
+/* Define if your system supports event ports */
+/* #undef _EVENT_HAVE_EVENT_PORTS */
+
+/* Define to 1 if you have the `fcntl' function. */
+#define _EVENT_HAVE_FCNTL 1
+
+/* Define to 1 if you have the <fcntl.h> header file. */
+#define _EVENT_HAVE_FCNTL_H 1
+
+/* Define to 1 if the system has the type `fd_mask'. */
+#define _EVENT_HAVE_FD_MASK 1
+
+/* Define to 1 if you have the `getaddrinfo' function. */
+#define _EVENT_HAVE_GETADDRINFO 1
+
+/* Define to 1 if you have the `getegid' function. */
+#define _EVENT_HAVE_GETEGID 1
+
+/* Define to 1 if you have the `geteuid' function. */
+#define _EVENT_HAVE_GETEUID 1
+
+/* Define to 1 if you have the `getnameinfo' function. */
+#define _EVENT_HAVE_GETNAMEINFO 1
+
+/* Define to 1 if you have the `gettimeofday' function. */
+#define _EVENT_HAVE_GETTIMEOFDAY 1
+
+/* Define to 1 if you have the `inet_ntop' function. */
+#define _EVENT_HAVE_INET_NTOP 1
+
+/* Define to 1 if you have the <inttypes.h> header file. */
+#define _EVENT_HAVE_INTTYPES_H 1
+
+/* Define to 1 if you have the `issetugid' function. */
+/* #undef _EVENT_HAVE_ISSETUGID */
+
+/* Define to 1 if you have the `kqueue' function. */
+/* #undef _EVENT_HAVE_KQUEUE */
+
+/* Define to 1 if you have the `nsl' library (-lnsl). */
+#define _EVENT_HAVE_LIBNSL 1
+
+/* Define to 1 if you have the `resolv' library (-lresolv). */
+#define _EVENT_HAVE_LIBRESOLV 1
+
+/* Define to 1 if you have the `rt' library (-lrt). */
+#define _EVENT_HAVE_LIBRT 1
+
+/* Define to 1 if you have the `socket' library (-lsocket). */
+/* #undef _EVENT_HAVE_LIBSOCKET */
+
+/* Define to 1 if you have the <memory.h> header file. */
+#define _EVENT_HAVE_MEMORY_H 1
+
+/* Define to 1 if you have the <netinet/in6.h> header file. */
+/* #undef _EVENT_HAVE_NETINET_IN6_H */
+
+/* Define to 1 if you have the `poll' function. */
+#define _EVENT_HAVE_POLL 1
+
+/* Define to 1 if you have the <poll.h> header file. */
+#define _EVENT_HAVE_POLL_H 1
+
+/* Define to 1 if you have the `port_create' function. */
+/* #undef _EVENT_HAVE_PORT_CREATE */
+
+/* Define to 1 if you have the <port.h> header file. */
+/* #undef _EVENT_HAVE_PORT_H */
+
+/* Define to 1 if you have the `select' function. */
+/* #undef _EVENT_HAVE_SELECT */
+
+/* Define if F_SETFD is defined in <fcntl.h> */
+#define _EVENT_HAVE_SETFD 1
+
+/* Define to 1 if you have the `sigaction' function. */
+/* #undef _EVENT_HAVE_SIGACTION */
+
+/* Define to 1 if you have the `signal' function. */
+#define _EVENT_HAVE_SIGNAL 1
+
+/* Define to 1 if you have the <signal.h> header file. */
+#define _EVENT_HAVE_SIGNAL_H 1
+
+/* Define to 1 if you have the <stdarg.h> header file. */
+#define _EVENT_HAVE_STDARG_H 1
+
+/* Define to 1 if you have the <stdint.h> header file. */
+#define _EVENT_HAVE_STDINT_H 1
+
+/* Define to 1 if you have the <stdlib.h> header file. */
+#define _EVENT_HAVE_STDLIB_H 1
+
+/* Define to 1 if you have the <strings.h> header file. */
+#define _EVENT_HAVE_STRINGS_H 1
+
+/* Define to 1 if you have the <string.h> header file. */
+#define _EVENT_HAVE_STRING_H 1
+
+/* Define to 1 if you have the `strlcpy' function. */
+/* #undef _EVENT_HAVE_STRLCPY */
+
+/* Define to 1 if you have the `strsep' function. */
+#define _EVENT_HAVE_STRSEP 1
+
+/* Define to 1 if you have the `strtok_r' function. */
+#define _EVENT_HAVE_STRTOK_R 1
+
+/* Define to 1 if you have the `strtoll' function. */
+#define _EVENT_HAVE_STRTOLL 1
+
+/* Define to 1 if the system has the type `struct in6_addr'. */
+#define _EVENT_HAVE_STRUCT_IN6_ADDR 1
+
+/* Define to 1 if you have the <sys/devpoll.h> header file. */
+/* #undef _EVENT_HAVE_SYS_DEVPOLL_H */
+
+/* Define to 1 if you have the <sys/epoll.h> header file. */
+/* #undef _EVENT_HAVE_SYS_EPOLL_H */
+
+/* Define to 1 if you have the <sys/event.h> header file. */
+/* #undef _EVENT_HAVE_SYS_EVENT_H */
+
+/* Define to 1 if you have the <sys/ioctl.h> header file. */
+/* #undef _EVENT_HAVE_SYS_IOCTL_H */
+
+/* Define to 1 if you have the <sys/param.h> header file. */
+#define _EVENT_HAVE_SYS_PARAM_H 1
+
+/* Define to 1 if you have the <sys/queue.h> header file. */
+#define _EVENT_HAVE_SYS_QUEUE_H 1
+
+/* Define to 1 if you have the <sys/select.h> header file. */
+#define _EVENT_HAVE_SYS_SELECT_H 1
+
+/* Define to 1 if you have the <sys/socket.h> header file. */
+#define _EVENT_HAVE_SYS_SOCKET_H 1
+
+/* Define to 1 if you have the <sys/stat.h> header file. */
+#define _EVENT_HAVE_SYS_STAT_H 1
+
+/* Define to 1 if you have the <sys/time.h> header file. */
+#define _EVENT_HAVE_SYS_TIME_H 1
+
+/* Define to 1 if you have the <sys/types.h> header file. */
+#define _EVENT_HAVE_SYS_TYPES_H 1
+
+/* Define if TAILQ_FOREACH is defined in <sys/queue.h> */
+#define _EVENT_HAVE_TAILQFOREACH 1
+
+/* Define if timeradd is defined in <sys/time.h> */
+/* #undef _EVENT_HAVE_TIMERADD */
+
+/* Define if timerclear is defined in <sys/time.h> */
+/* #undef _EVENT_HAVE_TIMERCLEAR */
+
+/* Define if timercmp is defined in <sys/time.h> */
+/* #undef _EVENT_HAVE_TIMERCMP */
+
+/* Define if timerisset is defined in <sys/time.h> */
+/* #undef _EVENT_HAVE_TIMERISSET */
+
+/* Define to 1 if the system has the type `uint16_t'. */
+#define _EVENT_HAVE_UINT16_T 1
+
+/* Define to 1 if the system has the type `uint32_t'. */
+#define _EVENT_HAVE_UINT32_T 1
+
+/* Define to 1 if the system has the type `uint64_t'. */
+#define _EVENT_HAVE_UINT64_T 1
+
+/* Define to 1 if the system has the type `uint8_t'. */
+#define _EVENT_HAVE_UINT8_T 1
+
+/* Define to 1 if you have the <unistd.h> header file. */
+#define _EVENT_HAVE_UNISTD_H 1
+
+/* Define to 1 if you have the `vasprintf' function. */
+#define _EVENT_HAVE_VASPRINTF 1
+
+/* Define if kqueue works correctly with pipes */
+/* #undef _EVENT_HAVE_WORKING_KQUEUE */
+
+/* Define to the sub-directory in which libtool stores uninstalled libraries.
+   */
+#define _EVENT_LT_OBJDIR ".libs/"
+
+/* Numeric representation of the version */
+#define _EVENT_NUMERIC_VERSION 0x01040f00
+
+/* Name of package */
+#define _EVENT_PACKAGE "libevent_nacl"
+
+/* Define to the address where bug reports for this package should be sent. */
+#define _EVENT_PACKAGE_BUGREPORT ""
+
+/* Define to the full name of this package. */
+#define _EVENT_PACKAGE_NAME ""
+
+/* Define to the full name and version of this package. */
+#define _EVENT_PACKAGE_STRING ""
+
+/* Define to the one symbol short name of this package. */
+#define _EVENT_PACKAGE_TARNAME ""
+
+/* Define to the home page for this package. */
+#define _EVENT_PACKAGE_URL ""
+
+/* Define to the version of this package. */
+#define _EVENT_PACKAGE_VERSION ""
+
+/* The size of `int', as computed by sizeof. */
+#define _EVENT_SIZEOF_INT 4
+
+/* The size of `long', as computed by sizeof. */
+#define _EVENT_SIZEOF_LONG 8
+
+/* The size of `long long', as computed by sizeof. */
+#define _EVENT_SIZEOF_LONG_LONG 8
+
+/* The size of `short', as computed by sizeof. */
+#define _EVENT_SIZEOF_SHORT 2
+
+/* Define to 1 if you have the ANSI C header files. */
+#define _EVENT_STDC_HEADERS 1
+
+/* Define to 1 if you can safely include both <sys/time.h> and <time.h>. */
+#define _EVENT_TIME_WITH_SYS_TIME 1
+
+/* Version number of package */
+#define _EVENT_VERSION "1.4.15"
+
+/* Define to appropriate substitue if compiler doesnt have __func__ */
+/* #undef _EVENT___func__ */
+
+/* Define to empty if `const' does not conform to ANSI C. */
+/* #undef _EVENT_const */
+
+/* Define to `__inline__' or `__inline' if that's what the C compiler
+   calls it, or to nothing if 'inline' is not supported under any name.  */
+#ifndef _EVENT___cplusplus
+/* #undef _EVENT_inline */
+#endif
+
+/* Define to `int' if <sys/types.h> does not define. */
+/* #undef _EVENT_pid_t */
+
+/* Define to `unsigned int' if <sys/types.h> does not define. */
+/* #undef _EVENT_size_t */
+
+/* Define to unsigned int if you dont have it */
+/* #undef _EVENT_socklen_t */
+
+/* Work around for __native_client_nonsfi__ build. random() is not provided
+ * by the newlib-based PNaCl toolchain, so here we declare it. Please see also
+ * nacl_nonsfi/random.c for more details.
+ */
+long int random();
+
+#endif
diff --git a/base/third_party/libevent/nacl_nonsfi/random.c b/base/third_party/libevent/nacl_nonsfi/random.c
new file mode 100644
index 0000000..3577dd5
--- /dev/null
+++ b/base/third_party/libevent/nacl_nonsfi/random.c
@@ -0,0 +1,13 @@
+/* Copyright 2014 The Chromium Authors. All rights reserved.
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include <stdlib.h>
+
+/* The newlib-based PNaCl toolchain does not provide random(). So, here we
+ * define it. It just redirects to the rand(), which is provided by the
+ * toolchain. */
+long int random() {
+  return rand();
+}
diff --git a/base/third_party/libevent/nacl_nonsfi/signal_stub.c b/base/third_party/libevent/nacl_nonsfi/signal_stub.c
new file mode 100644
index 0000000..0399e8c
--- /dev/null
+++ b/base/third_party/libevent/nacl_nonsfi/signal_stub.c
@@ -0,0 +1,48 @@
+/*
+ * Copyright 2015 The Chromium Authors. All rights reserved.
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+/*
+ * In nacl_helper_nonsfi, socketpair() is unavailable. In libevent, it is used
+ * to notify of a signal handler invocation, which is unused in
+ * nacl_helper_nonsfi. Unfortunately, there is no macro to disable the feature,
+ * so we stub out the signal module entirely.
+ */
+
+
+#include <signal.h>
+#include <stdlib.h>
+#include <sys/queue.h>
+
+/* config.h must be included before any other libevent header is included. */
+#include "config.h"
+
+#include "base/third_party/libevent/event-internal.h"
+#include "base/third_party/libevent/event.h"
+#include "base/third_party/libevent/evsignal.h"
+
+
+struct event_base *evsignal_base = 0;
+
+int evsignal_init(struct event_base *base) {
+  /* Do nothing, and return success. */
+  return 0;
+}
+
+void evsignal_process(struct event_base *base) {
+}
+
+int evsignal_add(struct event *event) {
+  /* Do nothing, and return an error. */
+  return -1;
+}
+
+int evsignal_del(struct event *event) {
+  /* Do nothing, and return an error. */
+  return -1;
+}
+
+void evsignal_dealloc(struct event_base *base) {
+}
diff --git a/base/third_party/libevent/poll.c b/base/third_party/libevent/poll.c
new file mode 100644
index 0000000..2aa245b
--- /dev/null
+++ b/base/third_party/libevent/poll.c
@@ -0,0 +1,379 @@
+/*	$OpenBSD: poll.c,v 1.2 2002/06/25 15:50:15 mickey Exp $	*/
+
+/*
+ * Copyright 2000-2003 Niels Provos <provos@citi.umich.edu>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ *    derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include <sys/types.h>
+#ifdef HAVE_SYS_TIME_H
+#include <sys/time.h>
+#else
+#include <sys/_libevent_time.h>
+#endif
+#include <sys/queue.h>
+#include <poll.h>
+#include <signal.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#include <errno.h>
+#ifdef CHECK_INVARIANTS
+#include <assert.h>
+#endif
+
+#include "event.h"
+#include "event-internal.h"
+#include "evsignal.h"
+#include "log.h"
+
+struct pollop {
+	int event_count;		/* Highest number alloc */
+	int nfds;                       /* Size of event_* */
+	int fd_count;                   /* Size of idxplus1_by_fd */
+	struct pollfd *event_set;
+	struct event **event_r_back;
+	struct event **event_w_back;
+	int *idxplus1_by_fd; /* Index into event_set by fd; we add 1 so
+			      * that 0 (which is easy to memset) can mean
+			      * "no entry." */
+};
+
+static void *poll_init	(struct event_base *);
+static int poll_add		(void *, struct event *);
+static int poll_del		(void *, struct event *);
+static int poll_dispatch	(struct event_base *, void *, struct timeval *);
+static void poll_dealloc	(struct event_base *, void *);
+
+const struct eventop pollops = {
+	"poll",
+	poll_init,
+	poll_add,
+	poll_del,
+	poll_dispatch,
+	poll_dealloc,
+    0
+};
+
+static void *
+poll_init(struct event_base *base)
+{
+	struct pollop *pollop;
+
+	/* Disable poll when this environment variable is set */
+	if (evutil_getenv("EVENT_NOPOLL"))
+		return (NULL);
+
+	if (!(pollop = calloc(1, sizeof(struct pollop))))
+		return (NULL);
+
+	evsignal_init(base);
+
+	return (pollop);
+}
+
+#ifdef CHECK_INVARIANTS
+static void
+poll_check_ok(struct pollop *pop)
+{
+	int i, idx;
+	struct event *ev;
+
+	for (i = 0; i < pop->fd_count; ++i) {
+		idx = pop->idxplus1_by_fd[i]-1;
+		if (idx < 0)
+			continue;
+		assert(pop->event_set[idx].fd == i);
+		if (pop->event_set[idx].events & POLLIN) {
+			ev = pop->event_r_back[idx];
+			assert(ev);
+			assert(ev->ev_events & EV_READ);
+			assert(ev->ev_fd == i);
+		}
+		if (pop->event_set[idx].events & POLLOUT) {
+			ev = pop->event_w_back[idx];
+			assert(ev);
+			assert(ev->ev_events & EV_WRITE);
+			assert(ev->ev_fd == i);
+		}
+	}
+	for (i = 0; i < pop->nfds; ++i) {
+		struct pollfd *pfd = &pop->event_set[i];
+		assert(pop->idxplus1_by_fd[pfd->fd] == i+1);
+	}
+}
+#else
+#define poll_check_ok(pop)
+#endif
+
+static int
+poll_dispatch(struct event_base *base, void *arg, struct timeval *tv)
+{
+	int res, i, j, msec = -1, nfds;
+	struct pollop *pop = arg;
+
+	poll_check_ok(pop);
+
+	if (tv != NULL)
+		msec = tv->tv_sec * 1000 + (tv->tv_usec + 999) / 1000;
+
+	nfds = pop->nfds;
+	res = poll(pop->event_set, nfds, msec);
+
+	if (res == -1) {
+		if (errno != EINTR) {
+                        event_warn("poll");
+			return (-1);
+		}
+
+		evsignal_process(base);
+		return (0);
+	} else if (base->sig.evsignal_caught) {
+		evsignal_process(base);
+	}
+
+	event_debug(("%s: poll reports %d", __func__, res));
+
+	if (res == 0 || nfds == 0)
+		return (0);
+
+	i = random() % nfds;
+	for (j = 0; j < nfds; j++) {
+		struct event *r_ev = NULL, *w_ev = NULL;
+		int what;
+		if (++i == nfds)
+			i = 0;
+		what = pop->event_set[i].revents;
+
+		if (!what)
+			continue;
+
+		res = 0;
+
+		/* If the file gets closed notify */
+		if (what & (POLLHUP|POLLERR))
+			what |= POLLIN|POLLOUT;
+		if (what & POLLIN) {
+			res |= EV_READ;
+			r_ev = pop->event_r_back[i];
+		}
+		if (what & POLLOUT) {
+			res |= EV_WRITE;
+			w_ev = pop->event_w_back[i];
+		}
+		if (res == 0)
+			continue;
+
+		if (r_ev && (res & r_ev->ev_events)) {
+			event_active(r_ev, res & r_ev->ev_events, 1);
+		}
+		if (w_ev && w_ev != r_ev && (res & w_ev->ev_events)) {
+			event_active(w_ev, res & w_ev->ev_events, 1);
+		}
+	}
+
+	return (0);
+}
+
+static int
+poll_add(void *arg, struct event *ev)
+{
+	struct pollop *pop = arg;
+	struct pollfd *pfd = NULL;
+	int i;
+
+	if (ev->ev_events & EV_SIGNAL)
+		return (evsignal_add(ev));
+	if (!(ev->ev_events & (EV_READ|EV_WRITE)))
+		return (0);
+
+	poll_check_ok(pop);
+	if (pop->nfds + 1 >= pop->event_count) {
+		struct pollfd *tmp_event_set;
+		struct event **tmp_event_r_back;
+		struct event **tmp_event_w_back;
+		int tmp_event_count;
+
+		if (pop->event_count < 32)
+			tmp_event_count = 32;
+		else
+			tmp_event_count = pop->event_count * 2;
+
+		/* We need more file descriptors */
+		tmp_event_set = realloc(pop->event_set,
+				 tmp_event_count * sizeof(struct pollfd));
+		if (tmp_event_set == NULL) {
+			event_warn("realloc");
+			return (-1);
+		}
+		pop->event_set = tmp_event_set;
+
+		tmp_event_r_back = realloc(pop->event_r_back,
+			    tmp_event_count * sizeof(struct event *));
+		if (tmp_event_r_back == NULL) {
+			/* event_set overallocated; that's okay. */
+			event_warn("realloc");
+			return (-1);
+		}
+		pop->event_r_back = tmp_event_r_back;
+
+		tmp_event_w_back = realloc(pop->event_w_back,
+			    tmp_event_count * sizeof(struct event *));
+		if (tmp_event_w_back == NULL) {
+			/* event_set and event_r_back overallocated; that's
+			 * okay. */
+			event_warn("realloc");
+			return (-1);
+		}
+		pop->event_w_back = tmp_event_w_back;
+
+		pop->event_count = tmp_event_count;
+	}
+	if (ev->ev_fd >= pop->fd_count) {
+		int *tmp_idxplus1_by_fd;
+		int new_count;
+		if (pop->fd_count < 32)
+			new_count = 32;
+		else
+			new_count = pop->fd_count * 2;
+		while (new_count <= ev->ev_fd)
+			new_count *= 2;
+		tmp_idxplus1_by_fd =
+			realloc(pop->idxplus1_by_fd, new_count * sizeof(int));
+		if (tmp_idxplus1_by_fd == NULL) {
+			event_warn("realloc");
+			return (-1);
+		}
+		pop->idxplus1_by_fd = tmp_idxplus1_by_fd;
+		memset(pop->idxplus1_by_fd + pop->fd_count,
+		       0, sizeof(int)*(new_count - pop->fd_count));
+		pop->fd_count = new_count;
+	}
+
+	i = pop->idxplus1_by_fd[ev->ev_fd] - 1;
+	if (i >= 0) {
+		pfd = &pop->event_set[i];
+	} else {
+		i = pop->nfds++;
+		pfd = &pop->event_set[i];
+		pfd->events = 0;
+		pfd->fd = ev->ev_fd;
+		pop->event_w_back[i] = pop->event_r_back[i] = NULL;
+		pop->idxplus1_by_fd[ev->ev_fd] = i + 1;
+	}
+
+	pfd->revents = 0;
+	if (ev->ev_events & EV_WRITE) {
+		pfd->events |= POLLOUT;
+		pop->event_w_back[i] = ev;
+	}
+	if (ev->ev_events & EV_READ) {
+		pfd->events |= POLLIN;
+		pop->event_r_back[i] = ev;
+	}
+	poll_check_ok(pop);
+
+	return (0);
+}
+
+/*
+ * Nothing to be done here.
+ */
+
+static int
+poll_del(void *arg, struct event *ev)
+{
+	struct pollop *pop = arg;
+	struct pollfd *pfd = NULL;
+	int i;
+
+	if (ev->ev_events & EV_SIGNAL)
+		return (evsignal_del(ev));
+
+	if (!(ev->ev_events & (EV_READ|EV_WRITE)))
+		return (0);
+
+	poll_check_ok(pop);
+	i = pop->idxplus1_by_fd[ev->ev_fd] - 1;
+	if (i < 0)
+		return (-1);
+
+	/* Do we still want to read or write? */
+	pfd = &pop->event_set[i];
+	if (ev->ev_events & EV_READ) {
+		pfd->events &= ~POLLIN;
+		pop->event_r_back[i] = NULL;
+	}
+	if (ev->ev_events & EV_WRITE) {
+		pfd->events &= ~POLLOUT;
+		pop->event_w_back[i] = NULL;
+	}
+	poll_check_ok(pop);
+	if (pfd->events)
+		/* Another event cares about that fd. */
+		return (0);
+
+	/* Okay, so we aren't interested in that fd anymore. */
+	pop->idxplus1_by_fd[ev->ev_fd] = 0;
+
+	--pop->nfds;
+	if (i != pop->nfds) {
+		/* 
+		 * Shift the last pollfd down into the now-unoccupied
+		 * position.
+		 */
+		memcpy(&pop->event_set[i], &pop->event_set[pop->nfds],
+		       sizeof(struct pollfd));
+		pop->event_r_back[i] = pop->event_r_back[pop->nfds];
+		pop->event_w_back[i] = pop->event_w_back[pop->nfds];
+		pop->idxplus1_by_fd[pop->event_set[i].fd] = i + 1;
+	}
+
+	poll_check_ok(pop);
+	return (0);
+}
+
+static void
+poll_dealloc(struct event_base *base, void *arg)
+{
+	struct pollop *pop = arg;
+
+	evsignal_dealloc(base);
+	if (pop->event_set)
+		free(pop->event_set);
+	if (pop->event_r_back)
+		free(pop->event_r_back);
+	if (pop->event_w_back)
+		free(pop->event_w_back);
+	if (pop->idxplus1_by_fd)
+		free(pop->idxplus1_by_fd);
+
+	memset(pop, 0, sizeof(struct pollop));
+	free(pop);
+}
diff --git a/base/third_party/libevent/sample/Makefile.am b/base/third_party/libevent/sample/Makefile.am
new file mode 100644
index 0000000..2f4e26e
--- /dev/null
+++ b/base/third_party/libevent/sample/Makefile.am
@@ -0,0 +1,14 @@
+AUTOMAKE_OPTIONS = foreign no-dependencies
+
+LDADD = ../libevent.la
+AM_CFLAGS = -I$(top_srcdir) -I$(top_srcdir)/compat
+
+noinst_PROGRAMS = event-test time-test signal-test
+
+event_test_sources = event-test.c
+time_test_sources = time-test.c
+signal_test_sources = signal-test.c
+
+verify:
+
+DISTCLEANFILES = *~
diff --git a/base/third_party/libevent/sample/event-test.c b/base/third_party/libevent/sample/event-test.c
new file mode 100644
index 0000000..0a439ce
--- /dev/null
+++ b/base/third_party/libevent/sample/event-test.c
@@ -0,0 +1,139 @@
+/*
+ * Compile with:
+ * cc -I/usr/local/include -o event-test event-test.c -L/usr/local/lib -levent
+ */
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include <sys/types.h>
+#include <sys/stat.h>
+#ifndef WIN32
+#include <sys/queue.h>
+#include <unistd.h>
+#include <sys/time.h>
+#else
+#include <windows.h>
+#endif
+#include <fcntl.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include <errno.h>
+
+#include <event.h>
+
+static void
+fifo_read(int fd, short event, void *arg)
+{
+	char buf[255];
+	int len;
+	struct event *ev = arg;
+#ifdef WIN32
+	DWORD dwBytesRead;
+#endif
+
+	/* Reschedule this event */
+	event_add(ev, NULL);
+
+	fprintf(stderr, "fifo_read called with fd: %d, event: %d, arg: %p\n",
+		fd, event, arg);
+#ifdef WIN32
+	len = ReadFile((HANDLE)fd, buf, sizeof(buf) - 1, &dwBytesRead, NULL);
+
+	// Check for end of file. 
+	if(len && dwBytesRead == 0) {
+		fprintf(stderr, "End Of File");
+		event_del(ev);
+		return;
+	}
+
+	buf[dwBytesRead] = '\0';
+#else
+	len = read(fd, buf, sizeof(buf) - 1);
+
+	if (len == -1) {
+		perror("read");
+		return;
+	} else if (len == 0) {
+		fprintf(stderr, "Connection closed\n");
+		return;
+	}
+
+	buf[len] = '\0';
+#endif
+	fprintf(stdout, "Read: %s\n", buf);
+}
+
+int
+main (int argc, char **argv)
+{
+	struct event evfifo;
+#ifdef WIN32
+	HANDLE socket;
+	// Open a file. 
+	socket = CreateFileA("test.txt",     // open File 
+			GENERIC_READ,                 // open for reading 
+			0,                            // do not share 
+			NULL,                         // no security 
+			OPEN_EXISTING,                // existing file only 
+			FILE_ATTRIBUTE_NORMAL,        // normal file 
+			NULL);                        // no attr. template 
+
+	if(socket == INVALID_HANDLE_VALUE)
+		return 1;
+
+#else
+	struct stat st;
+	const char *fifo = "event.fifo";
+	int socket;
+ 
+	if (lstat (fifo, &st) == 0) {
+		if ((st.st_mode & S_IFMT) == S_IFREG) {
+			errno = EEXIST;
+			perror("lstat");
+			exit (1);
+		}
+	}
+
+	unlink (fifo);
+	if (mkfifo (fifo, 0600) == -1) {
+		perror("mkfifo");
+		exit (1);
+	}
+
+	/* Linux pipes are broken, we need O_RDWR instead of O_RDONLY */
+#ifdef __linux
+	socket = open (fifo, O_RDWR | O_NONBLOCK, 0);
+#else
+	socket = open (fifo, O_RDONLY | O_NONBLOCK, 0);
+#endif
+
+	if (socket == -1) {
+		perror("open");
+		exit (1);
+	}
+
+	fprintf(stderr, "Write data to %s\n", fifo);
+#endif
+	/* Initalize the event library */
+	event_init();
+
+	/* Initalize one event */
+#ifdef WIN32
+	event_set(&evfifo, (int)socket, EV_READ, fifo_read, &evfifo);
+#else
+	event_set(&evfifo, socket, EV_READ, fifo_read, &evfifo);
+#endif
+
+	/* Add it to the active events, without a timeout */
+	event_add(&evfifo, NULL);
+	
+	event_dispatch();
+#ifdef WIN32
+	CloseHandle(socket);
+#endif
+	return (0);
+}
+
diff --git a/base/third_party/libevent/sample/signal-test.c b/base/third_party/libevent/sample/signal-test.c
new file mode 100644
index 0000000..5a5a303
--- /dev/null
+++ b/base/third_party/libevent/sample/signal-test.c
@@ -0,0 +1,65 @@
+/*
+ * Compile with:
+ * cc -I/usr/local/include -o signal-test \
+ *   signal-test.c -L/usr/local/lib -levent
+ */
+
+#include <sys/types.h>
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include <sys/stat.h>
+#ifndef WIN32
+#include <sys/queue.h>
+#include <unistd.h>
+#include <sys/time.h>
+#else
+#include <windows.h>
+#endif
+#include <signal.h>
+#include <fcntl.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include <errno.h>
+
+#include <event.h>
+
+int called = 0;
+
+static void
+signal_cb(int fd, short event, void *arg)
+{
+	struct event *signal = arg;
+
+	printf("%s: got signal %d\n", __func__, EVENT_SIGNAL(signal));
+
+	if (called >= 2)
+		event_del(signal);
+
+	called++;
+}
+
+int
+main (int argc, char **argv)
+{
+	struct event signal_int;
+
+	/* Initalize the event library */
+	struct event_base* base = event_base_new();
+
+	/* Initalize one event */
+	event_set(&signal_int, SIGINT, EV_SIGNAL|EV_PERSIST, signal_cb,
+	    &signal_int);
+	event_base_set(base, &signal_int);
+
+	event_add(&signal_int, NULL);
+
+	event_base_dispatch(base);
+	event_base_free(base);
+
+	return (0);
+}
+
diff --git a/base/third_party/libevent/sample/time-test.c b/base/third_party/libevent/sample/time-test.c
new file mode 100644
index 0000000..069d4f8
--- /dev/null
+++ b/base/third_party/libevent/sample/time-test.c
@@ -0,0 +1,70 @@
+/*
+ * Compile with:
+ * cc -I/usr/local/include -o time-test time-test.c -L/usr/local/lib -levent
+ */
+
+#include <sys/types.h>
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include <sys/stat.h>
+#ifndef WIN32
+#include <sys/queue.h>
+#include <unistd.h>
+#endif
+#include <time.h>
+#ifdef HAVE_SYS_TIME_H
+#include <sys/time.h>
+#endif
+#include <fcntl.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include <errno.h>
+
+#include <event.h>
+#include <evutil.h>
+
+int lasttime;
+
+static void
+timeout_cb(int fd, short event, void *arg)
+{
+	struct timeval tv;
+	struct event *timeout = arg;
+	int newtime = time(NULL);
+
+	printf("%s: called at %d: %d\n", __func__, newtime,
+	    newtime - lasttime);
+	lasttime = newtime;
+
+	evutil_timerclear(&tv);
+	tv.tv_sec = 2;
+	event_add(timeout, &tv);
+}
+
+int
+main (int argc, char **argv)
+{
+	struct event timeout;
+	struct timeval tv;
+ 
+	/* Initalize the event library */
+	event_init();
+
+	/* Initalize one event */
+	evtimer_set(&timeout, timeout_cb, &timeout);
+
+	evutil_timerclear(&tv);
+	tv.tv_sec = 2;
+	event_add(&timeout, &tv);
+
+	lasttime = time(NULL);
+	
+	event_dispatch();
+
+	return (0);
+}
+
diff --git a/base/third_party/libevent/select.c b/base/third_party/libevent/select.c
new file mode 100644
index 0000000..3f73331
--- /dev/null
+++ b/base/third_party/libevent/select.c
@@ -0,0 +1,364 @@
+/*	$OpenBSD: select.c,v 1.2 2002/06/25 15:50:15 mickey Exp $	*/
+
+/*
+ * Copyright 2000-2002 Niels Provos <provos@citi.umich.edu>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ *    derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include <sys/types.h>
+#ifdef HAVE_SYS_TIME_H
+#include <sys/time.h>
+#else
+#include <sys/_libevent_time.h>
+#endif
+#ifdef HAVE_SYS_SELECT_H
+#include <sys/select.h>
+#endif
+#include <sys/queue.h>
+#include <signal.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#include <errno.h>
+#ifdef CHECK_INVARIANTS
+#include <assert.h>
+#endif
+
+#include "event.h"
+#include "evutil.h"
+#include "event-internal.h"
+#include "evsignal.h"
+#include "log.h"
+
+#ifndef howmany
+#define        howmany(x, y)   (((x)+((y)-1))/(y))
+#endif
+
+#ifndef _EVENT_HAVE_FD_MASK
+/* This type is mandatory, but Android doesn't define it. */
+#undef NFDBITS
+#define NFDBITS (sizeof(long)*8)
+typedef unsigned long fd_mask;
+#endif
+
+struct selectop {
+	int event_fds;		/* Highest fd in fd set */
+	int event_fdsz;
+	fd_set *event_readset_in;
+	fd_set *event_writeset_in;
+	fd_set *event_readset_out;
+	fd_set *event_writeset_out;
+	struct event **event_r_by_fd;
+	struct event **event_w_by_fd;
+};
+
+static void *select_init	(struct event_base *);
+static int select_add		(void *, struct event *);
+static int select_del		(void *, struct event *);
+static int select_dispatch	(struct event_base *, void *, struct timeval *);
+static void select_dealloc     (struct event_base *, void *);
+
+const struct eventop selectops = {
+	"select",
+	select_init,
+	select_add,
+	select_del,
+	select_dispatch,
+	select_dealloc,
+	0
+};
+
+static int select_resize(struct selectop *sop, int fdsz);
+
+static void *
+select_init(struct event_base *base)
+{
+	struct selectop *sop;
+
+	/* Disable select when this environment variable is set */
+	if (evutil_getenv("EVENT_NOSELECT"))
+		return (NULL);
+
+	if (!(sop = calloc(1, sizeof(struct selectop))))
+		return (NULL);
+
+	select_resize(sop, howmany(32 + 1, NFDBITS)*sizeof(fd_mask));
+
+	evsignal_init(base);
+
+	return (sop);
+}
+
+#ifdef CHECK_INVARIANTS
+static void
+check_selectop(struct selectop *sop)
+{
+	int i;
+	for (i = 0; i <= sop->event_fds; ++i) {
+		if (FD_ISSET(i, sop->event_readset_in)) {
+			assert(sop->event_r_by_fd[i]);
+			assert(sop->event_r_by_fd[i]->ev_events & EV_READ);
+			assert(sop->event_r_by_fd[i]->ev_fd == i);
+		} else {
+			assert(! sop->event_r_by_fd[i]);
+		}
+		if (FD_ISSET(i, sop->event_writeset_in)) {
+			assert(sop->event_w_by_fd[i]);
+			assert(sop->event_w_by_fd[i]->ev_events & EV_WRITE);
+			assert(sop->event_w_by_fd[i]->ev_fd == i);
+		} else {
+			assert(! sop->event_w_by_fd[i]);
+		}
+	}
+
+}
+#else
+#define check_selectop(sop) do { (void) sop; } while (0)
+#endif
+
+static int
+select_dispatch(struct event_base *base, void *arg, struct timeval *tv)
+{
+	int res, i, j;
+	struct selectop *sop = arg;
+
+	check_selectop(sop);
+
+	memcpy(sop->event_readset_out, sop->event_readset_in,
+	       sop->event_fdsz);
+	memcpy(sop->event_writeset_out, sop->event_writeset_in,
+	       sop->event_fdsz);
+
+	res = select(sop->event_fds + 1, sop->event_readset_out,
+	    sop->event_writeset_out, NULL, tv);
+
+	check_selectop(sop);
+
+	if (res == -1) {
+		if (errno != EINTR) {
+			event_warn("select");
+			return (-1);
+		}
+
+		evsignal_process(base);
+		return (0);
+	} else if (base->sig.evsignal_caught) {
+		evsignal_process(base);
+	}
+
+	event_debug(("%s: select reports %d", __func__, res));
+
+	check_selectop(sop);
+	i = random() % (sop->event_fds+1);
+	for (j = 0; j <= sop->event_fds; ++j) {
+		struct event *r_ev = NULL, *w_ev = NULL;
+		if (++i >= sop->event_fds+1)
+			i = 0;
+
+		res = 0;
+		if (FD_ISSET(i, sop->event_readset_out)) {
+			r_ev = sop->event_r_by_fd[i];
+			res |= EV_READ;
+		}
+		if (FD_ISSET(i, sop->event_writeset_out)) {
+			w_ev = sop->event_w_by_fd[i];
+			res |= EV_WRITE;
+		}
+		if (r_ev && (res & r_ev->ev_events)) {
+			event_active(r_ev, res & r_ev->ev_events, 1);
+		}
+		if (w_ev && w_ev != r_ev && (res & w_ev->ev_events)) {
+			event_active(w_ev, res & w_ev->ev_events, 1);
+		}
+	}
+	check_selectop(sop);
+
+	return (0);
+}
+
+
+static int
+select_resize(struct selectop *sop, int fdsz)
+{
+	int n_events, n_events_old;
+
+	fd_set *readset_in = NULL;
+	fd_set *writeset_in = NULL;
+	fd_set *readset_out = NULL;
+	fd_set *writeset_out = NULL;
+	struct event **r_by_fd = NULL;
+	struct event **w_by_fd = NULL;
+
+	n_events = (fdsz/sizeof(fd_mask)) * NFDBITS;
+	n_events_old = (sop->event_fdsz/sizeof(fd_mask)) * NFDBITS;
+
+	if (sop->event_readset_in)
+		check_selectop(sop);
+
+	if ((readset_in = realloc(sop->event_readset_in, fdsz)) == NULL)
+		goto error;
+	sop->event_readset_in = readset_in;
+	if ((readset_out = realloc(sop->event_readset_out, fdsz)) == NULL)
+		goto error;
+	sop->event_readset_out = readset_out;
+	if ((writeset_in = realloc(sop->event_writeset_in, fdsz)) == NULL)
+		goto error;
+	sop->event_writeset_in = writeset_in;
+	if ((writeset_out = realloc(sop->event_writeset_out, fdsz)) == NULL)
+		goto error;
+	sop->event_writeset_out = writeset_out;
+	if ((r_by_fd = realloc(sop->event_r_by_fd,
+		 n_events*sizeof(struct event*))) == NULL)
+		goto error;
+	sop->event_r_by_fd = r_by_fd;
+	if ((w_by_fd = realloc(sop->event_w_by_fd,
+		 n_events * sizeof(struct event*))) == NULL)
+		goto error;
+	sop->event_w_by_fd = w_by_fd;
+
+	memset((char *)sop->event_readset_in + sop->event_fdsz, 0,
+	    fdsz - sop->event_fdsz);
+	memset((char *)sop->event_writeset_in + sop->event_fdsz, 0,
+	    fdsz - sop->event_fdsz);
+	memset(sop->event_r_by_fd + n_events_old, 0,
+	    (n_events-n_events_old) * sizeof(struct event*));
+	memset(sop->event_w_by_fd + n_events_old, 0,
+	    (n_events-n_events_old) * sizeof(struct event*));
+
+	sop->event_fdsz = fdsz;
+	check_selectop(sop);
+
+	return (0);
+
+ error:
+	event_warn("malloc");
+	return (-1);
+}
+
+
+static int
+select_add(void *arg, struct event *ev)
+{
+	struct selectop *sop = arg;
+
+	if (ev->ev_events & EV_SIGNAL)
+		return (evsignal_add(ev));
+
+	check_selectop(sop);
+	/*
+	 * Keep track of the highest fd, so that we can calculate the size
+	 * of the fd_sets for select(2)
+	 */
+	if (sop->event_fds < ev->ev_fd) {
+		int fdsz = sop->event_fdsz;
+
+		if (fdsz < sizeof(fd_mask))
+			fdsz = sizeof(fd_mask);
+
+		while (fdsz <
+		    (howmany(ev->ev_fd + 1, NFDBITS) * sizeof(fd_mask)))
+			fdsz *= 2;
+
+		if (fdsz != sop->event_fdsz) {
+			if (select_resize(sop, fdsz)) {
+				check_selectop(sop);
+				return (-1);
+			}
+		}
+
+		sop->event_fds = ev->ev_fd;
+	}
+
+	if (ev->ev_events & EV_READ) {
+		FD_SET(ev->ev_fd, sop->event_readset_in);
+		sop->event_r_by_fd[ev->ev_fd] = ev;
+	}
+	if (ev->ev_events & EV_WRITE) {
+		FD_SET(ev->ev_fd, sop->event_writeset_in);
+		sop->event_w_by_fd[ev->ev_fd] = ev;
+	}
+	check_selectop(sop);
+
+	return (0);
+}
+
+/*
+ * Nothing to be done here.
+ */
+
+static int
+select_del(void *arg, struct event *ev)
+{
+	struct selectop *sop = arg;
+
+	check_selectop(sop);
+	if (ev->ev_events & EV_SIGNAL)
+		return (evsignal_del(ev));
+
+	if (sop->event_fds < ev->ev_fd) {
+		check_selectop(sop);
+		return (0);
+	}
+
+	if (ev->ev_events & EV_READ) {
+		FD_CLR(ev->ev_fd, sop->event_readset_in);
+		sop->event_r_by_fd[ev->ev_fd] = NULL;
+	}
+
+	if (ev->ev_events & EV_WRITE) {
+		FD_CLR(ev->ev_fd, sop->event_writeset_in);
+		sop->event_w_by_fd[ev->ev_fd] = NULL;
+	}
+
+	check_selectop(sop);
+	return (0);
+}
+
+static void
+select_dealloc(struct event_base *base, void *arg)
+{
+	struct selectop *sop = arg;
+
+	evsignal_dealloc(base);
+	if (sop->event_readset_in)
+		free(sop->event_readset_in);
+	if (sop->event_writeset_in)
+		free(sop->event_writeset_in);
+	if (sop->event_readset_out)
+		free(sop->event_readset_out);
+	if (sop->event_writeset_out)
+		free(sop->event_writeset_out);
+	if (sop->event_r_by_fd)
+		free(sop->event_r_by_fd);
+	if (sop->event_w_by_fd)
+		free(sop->event_w_by_fd);
+
+	memset(sop, 0, sizeof(struct selectop));
+	free(sop);
+}
diff --git a/base/third_party/libevent/signal.c b/base/third_party/libevent/signal.c
new file mode 100644
index 0000000..b8d51ab
--- /dev/null
+++ b/base/third_party/libevent/signal.c
@@ -0,0 +1,377 @@
+/*	$OpenBSD: select.c,v 1.2 2002/06/25 15:50:15 mickey Exp $	*/
+
+/*
+ * Copyright 2000-2002 Niels Provos <provos@citi.umich.edu>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ *    derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#ifdef WIN32
+#define WIN32_LEAN_AND_MEAN
+#include <winsock2.h>
+#include <windows.h>
+#undef WIN32_LEAN_AND_MEAN
+#endif
+#include <sys/types.h>
+#ifdef HAVE_SYS_TIME_H
+#include <sys/time.h>
+#endif
+#include <sys/queue.h>
+#ifdef HAVE_SYS_SOCKET_H
+#include <sys/socket.h>
+#endif
+#include <signal.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#ifdef HAVE_UNISTD_H
+#include <unistd.h>
+#endif
+#include <errno.h>
+#ifdef HAVE_FCNTL_H
+#include <fcntl.h>
+#endif
+#include <assert.h>
+
+#include "event.h"
+#include "event-internal.h"
+#include "evsignal.h"
+#include "evutil.h"
+#include "log.h"
+
+struct event_base *evsignal_base = NULL;
+
+static void evsignal_handler(int sig);
+
+#ifdef WIN32
+#define error_is_eagain(err)			\
+	((err) == EAGAIN || (err) == WSAEWOULDBLOCK)
+#else
+#define error_is_eagain(err) ((err) == EAGAIN)
+#endif
+
+/* Callback for when the signal handler write a byte to our signaling socket */
+static void
+evsignal_cb(int fd, short what, void *arg)
+{
+	static char signals[1];
+#ifdef WIN32
+	SSIZE_T n;
+#else
+	ssize_t n;
+#endif
+
+	n = recv(fd, signals, sizeof(signals), 0);
+	if (n == -1) {
+		int err = EVUTIL_SOCKET_ERROR();
+		if (! error_is_eagain(err))
+			event_err(1, "%s: read", __func__);
+	}
+}
+
+#ifdef HAVE_SETFD
+#define FD_CLOSEONEXEC(x) do { \
+        if (fcntl(x, F_SETFD, 1) == -1) \
+                event_warn("fcntl(%d, F_SETFD)", x); \
+} while (0)
+#else
+#define FD_CLOSEONEXEC(x)
+#endif
+
+int
+evsignal_init(struct event_base *base)
+{
+	int i;
+
+	/* 
+	 * Our signal handler is going to write to one end of the socket
+	 * pair to wake up our event loop.  The event loop then scans for
+	 * signals that got delivered.
+	 */
+	if (evutil_socketpair(
+		    AF_UNIX, SOCK_STREAM, 0, base->sig.ev_signal_pair) == -1) {
+#ifdef WIN32
+		/* Make this nonfatal on win32, where sometimes people
+		   have localhost firewalled. */
+		event_warn("%s: socketpair", __func__);
+#else
+		event_err(1, "%s: socketpair", __func__);
+#endif
+		return -1;
+	}
+
+	FD_CLOSEONEXEC(base->sig.ev_signal_pair[0]);
+	FD_CLOSEONEXEC(base->sig.ev_signal_pair[1]);
+	base->sig.sh_old = NULL;
+	base->sig.sh_old_max = 0;
+	base->sig.evsignal_caught = 0;
+	memset(&base->sig.evsigcaught, 0, sizeof(sig_atomic_t)*NSIG);
+	/* initialize the queues for all events */
+	for (i = 0; i < NSIG; ++i)
+		TAILQ_INIT(&base->sig.evsigevents[i]);
+
+        evutil_make_socket_nonblocking(base->sig.ev_signal_pair[0]);
+        evutil_make_socket_nonblocking(base->sig.ev_signal_pair[1]);
+
+	event_set(&base->sig.ev_signal, base->sig.ev_signal_pair[1],
+		EV_READ | EV_PERSIST, evsignal_cb, &base->sig.ev_signal);
+	base->sig.ev_signal.ev_base = base;
+	base->sig.ev_signal.ev_flags |= EVLIST_INTERNAL;
+
+	return 0;
+}
+
+/* Helper: set the signal handler for evsignal to handler in base, so that
+ * we can restore the original handler when we clear the current one. */
+int
+_evsignal_set_handler(struct event_base *base,
+		      int evsignal, void (*handler)(int))
+{
+#ifdef HAVE_SIGACTION
+	struct sigaction sa;
+#else
+	ev_sighandler_t sh;
+#endif
+	struct evsignal_info *sig = &base->sig;
+	void *p;
+
+	/*
+	 * resize saved signal handler array up to the highest signal number.
+	 * a dynamic array is used to keep footprint on the low side.
+	 */
+	if (evsignal >= sig->sh_old_max) {
+		int new_max = evsignal + 1;
+		event_debug(("%s: evsignal (%d) >= sh_old_max (%d), resizing",
+			    __func__, evsignal, sig->sh_old_max));
+		p = realloc(sig->sh_old, new_max * sizeof(*sig->sh_old));
+		if (p == NULL) {
+			event_warn("realloc");
+			return (-1);
+		}
+
+		memset((char *)p + sig->sh_old_max * sizeof(*sig->sh_old),
+		    0, (new_max - sig->sh_old_max) * sizeof(*sig->sh_old));
+
+		sig->sh_old_max = new_max;
+		sig->sh_old = p;
+	}
+
+	/* allocate space for previous handler out of dynamic array */
+	sig->sh_old[evsignal] = malloc(sizeof *sig->sh_old[evsignal]);
+	if (sig->sh_old[evsignal] == NULL) {
+		event_warn("malloc");
+		return (-1);
+	}
+
+	/* save previous handler and setup new handler */
+#ifdef HAVE_SIGACTION
+	memset(&sa, 0, sizeof(sa));
+	sa.sa_handler = handler;
+	sa.sa_flags |= SA_RESTART;
+	sigfillset(&sa.sa_mask);
+
+	if (sigaction(evsignal, &sa, sig->sh_old[evsignal]) == -1) {
+		event_warn("sigaction");
+		free(sig->sh_old[evsignal]);
+		sig->sh_old[evsignal] = NULL;
+		return (-1);
+	}
+#else
+	if ((sh = signal(evsignal, handler)) == SIG_ERR) {
+		event_warn("signal");
+		free(sig->sh_old[evsignal]);
+		sig->sh_old[evsignal] = NULL;
+		return (-1);
+	}
+	*sig->sh_old[evsignal] = sh;
+#endif
+
+	return (0);
+}
+
+int
+evsignal_add(struct event *ev)
+{
+	int evsignal;
+	struct event_base *base = ev->ev_base;
+	struct evsignal_info *sig = &ev->ev_base->sig;
+
+	if (ev->ev_events & (EV_READ|EV_WRITE))
+		event_errx(1, "%s: EV_SIGNAL incompatible use", __func__);
+	evsignal = EVENT_SIGNAL(ev);
+	assert(evsignal >= 0 && evsignal < NSIG);
+	if (TAILQ_EMPTY(&sig->evsigevents[evsignal])) {
+		event_debug(("%s: %p: changing signal handler", __func__, ev));
+		if (_evsignal_set_handler(
+			    base, evsignal, evsignal_handler) == -1)
+			return (-1);
+
+		/* catch signals if they happen quickly */
+		evsignal_base = base;
+
+		if (!sig->ev_signal_added) {
+			if (event_add(&sig->ev_signal, NULL))
+				return (-1);
+			sig->ev_signal_added = 1;
+		}
+	}
+
+	/* multiple events may listen to the same signal */
+	TAILQ_INSERT_TAIL(&sig->evsigevents[evsignal], ev, ev_signal_next);
+
+	return (0);
+}
+
+int
+_evsignal_restore_handler(struct event_base *base, int evsignal)
+{
+	int ret = 0;
+	struct evsignal_info *sig = &base->sig;
+#ifdef HAVE_SIGACTION
+	struct sigaction *sh;
+#else
+	ev_sighandler_t *sh;
+#endif
+
+	/* restore previous handler */
+	sh = sig->sh_old[evsignal];
+	sig->sh_old[evsignal] = NULL;
+#ifdef HAVE_SIGACTION
+	if (sigaction(evsignal, sh, NULL) == -1) {
+		event_warn("sigaction");
+		ret = -1;
+	}
+#else
+	if (signal(evsignal, *sh) == SIG_ERR) {
+		event_warn("signal");
+		ret = -1;
+	}
+#endif
+	free(sh);
+
+	return ret;
+}
+
+int
+evsignal_del(struct event *ev)
+{
+	struct event_base *base = ev->ev_base;
+	struct evsignal_info *sig = &base->sig;
+	int evsignal = EVENT_SIGNAL(ev);
+
+	assert(evsignal >= 0 && evsignal < NSIG);
+
+	/* multiple events may listen to the same signal */
+	TAILQ_REMOVE(&sig->evsigevents[evsignal], ev, ev_signal_next);
+
+	if (!TAILQ_EMPTY(&sig->evsigevents[evsignal]))
+		return (0);
+
+	event_debug(("%s: %p: restoring signal handler", __func__, ev));
+
+	return (_evsignal_restore_handler(ev->ev_base, EVENT_SIGNAL(ev)));
+}
+
+static void
+evsignal_handler(int sig)
+{
+	int save_errno = errno;
+
+	if (evsignal_base == NULL) {
+		event_warn(
+			"%s: received signal %d, but have no base configured",
+			__func__, sig);
+		return;
+	}
+
+	evsignal_base->sig.evsigcaught[sig]++;
+	evsignal_base->sig.evsignal_caught = 1;
+
+#ifndef HAVE_SIGACTION
+	signal(sig, evsignal_handler);
+#endif
+
+	/* Wake up our notification mechanism */
+	send(evsignal_base->sig.ev_signal_pair[0], "a", 1, 0);
+	errno = save_errno;
+}
+
+void
+evsignal_process(struct event_base *base)
+{
+	struct evsignal_info *sig = &base->sig;
+	struct event *ev, *next_ev;
+	sig_atomic_t ncalls;
+	int i;
+	
+	base->sig.evsignal_caught = 0;
+	for (i = 1; i < NSIG; ++i) {
+		ncalls = sig->evsigcaught[i];
+		if (ncalls == 0)
+			continue;
+		sig->evsigcaught[i] -= ncalls;
+
+		for (ev = TAILQ_FIRST(&sig->evsigevents[i]);
+		    ev != NULL; ev = next_ev) {
+			next_ev = TAILQ_NEXT(ev, ev_signal_next);
+			if (!(ev->ev_events & EV_PERSIST))
+				event_del(ev);
+			event_active(ev, EV_SIGNAL, ncalls);
+		}
+
+	}
+}
+
+void
+evsignal_dealloc(struct event_base *base)
+{
+	int i = 0;
+	if (base->sig.ev_signal_added) {
+		event_del(&base->sig.ev_signal);
+		base->sig.ev_signal_added = 0;
+	}
+	for (i = 0; i < NSIG; ++i) {
+		if (i < base->sig.sh_old_max && base->sig.sh_old[i] != NULL)
+			_evsignal_restore_handler(base, i);
+	}
+
+	if (base->sig.ev_signal_pair[0] != -1) {
+		EVUTIL_CLOSESOCKET(base->sig.ev_signal_pair[0]);
+		base->sig.ev_signal_pair[0] = -1;
+	}
+	if (base->sig.ev_signal_pair[1] != -1) {
+		EVUTIL_CLOSESOCKET(base->sig.ev_signal_pair[1]);
+		base->sig.ev_signal_pair[1] = -1;
+	}
+	base->sig.sh_old_max = 0;
+
+	/* per index frees are handled in evsig_del() */
+	if (base->sig.sh_old) {
+		free(base->sig.sh_old);
+		base->sig.sh_old = NULL;
+	}
+}
diff --git a/base/third_party/libevent/solaris/config.h b/base/third_party/libevent/solaris/config.h
new file mode 100644
index 0000000..4dd40eb
--- /dev/null
+++ b/base/third_party/libevent/solaris/config.h
@@ -0,0 +1,266 @@
+/* config.h.  Generated from config.h.in by configure.  */
+/* config.h.in.  Generated from configure.in by autoheader.  */
+
+/* Define if clock_gettime is available in libc */
+#define DNS_USE_CPU_CLOCK_FOR_ID 1
+
+/* Define is no secure id variant is available */
+/* #undef DNS_USE_GETTIMEOFDAY_FOR_ID */
+
+/* Define to 1 if you have the `clock_gettime' function. */
+#define HAVE_CLOCK_GETTIME 1
+
+/* Define if /dev/poll is available */
+#define HAVE_DEVPOLL 1
+
+/* Define to 1 if you have the <dlfcn.h> header file. */
+#define HAVE_DLFCN_H 1
+
+/* Define if your system supports the epoll system calls */
+/* #undef HAVE_EPOLL */
+
+/* Define to 1 if you have the `epoll_ctl' function. */
+/* #undef HAVE_EPOLL_CTL */
+
+/* Define if your system supports event ports */
+#define HAVE_EVENT_PORTS 1
+
+/* Define to 1 if you have the `fcntl' function. */
+#define HAVE_FCNTL 1
+
+/* Define to 1 if you have the <fcntl.h> header file. */
+#define HAVE_FCNTL_H 1
+
+/* Define to 1 if the system has the type `fd_mask'. */
+#define HAVE_FD_MASK 1
+
+/* Define to 1 if you have the `getaddrinfo' function. */
+#define HAVE_GETADDRINFO 1
+
+/* Define to 1 if you have the `getegid' function. */
+#define HAVE_GETEGID 1
+
+/* Define to 1 if you have the `geteuid' function. */
+#define HAVE_GETEUID 1
+
+/* Define to 1 if you have the `getnameinfo' function. */
+#define HAVE_GETNAMEINFO 1
+
+/* Define to 1 if you have the `gettimeofday' function. */
+#define HAVE_GETTIMEOFDAY 1
+
+/* Define to 1 if you have the `inet_ntop' function. */
+#define HAVE_INET_NTOP 1
+
+/* Define to 1 if you have the <inttypes.h> header file. */
+#define HAVE_INTTYPES_H 1
+
+/* Define to 1 if you have the `issetugid' function. */
+#define HAVE_ISSETUGID 1
+
+/* Define to 1 if you have the `kqueue' function. */
+/* #undef HAVE_KQUEUE */
+
+/* Define to 1 if you have the `nsl' library (-lnsl). */
+#define HAVE_LIBNSL 1
+
+/* Define to 1 if you have the `resolv' library (-lresolv). */
+#define HAVE_LIBRESOLV 1
+
+/* Define to 1 if you have the `rt' library (-lrt). */
+#define HAVE_LIBRT 1
+
+/* Define to 1 if you have the `socket' library (-lsocket). */
+#define HAVE_LIBSOCKET 1
+
+/* Define to 1 if you have the <memory.h> header file. */
+#define HAVE_MEMORY_H 1
+
+/* Define to 1 if you have the <netinet/in6.h> header file. */
+/* #undef HAVE_NETINET_IN6_H */
+
+/* Define to 1 if you have the `poll' function. */
+#define HAVE_POLL 1
+
+/* Define to 1 if you have the <poll.h> header file. */
+#define HAVE_POLL_H 1
+
+/* Define to 1 if you have the `port_create' function. */
+#define HAVE_PORT_CREATE 1
+
+/* Define to 1 if you have the <port.h> header file. */
+#define HAVE_PORT_H 1
+
+/* Define to 1 if you have the `select' function. */
+#define HAVE_SELECT 1
+
+/* Define if F_SETFD is defined in <fcntl.h> */
+#define HAVE_SETFD 1
+
+/* Define to 1 if you have the `sigaction' function. */
+#define HAVE_SIGACTION 1
+
+/* Define to 1 if you have the `signal' function. */
+#define HAVE_SIGNAL 1
+
+/* Define to 1 if you have the <signal.h> header file. */
+#define HAVE_SIGNAL_H 1
+
+/* Define to 1 if you have the <stdarg.h> header file. */
+#define HAVE_STDARG_H 1
+
+/* Define to 1 if you have the <stdint.h> header file. */
+#define HAVE_STDINT_H 1
+
+/* Define to 1 if you have the <stdlib.h> header file. */
+#define HAVE_STDLIB_H 1
+
+/* Define to 1 if you have the <strings.h> header file. */
+#define HAVE_STRINGS_H 1
+
+/* Define to 1 if you have the <string.h> header file. */
+#define HAVE_STRING_H 1
+
+/* Define to 1 if you have the `strlcpy' function. */
+#define HAVE_STRLCPY 1
+
+/* Define to 1 if you have the `strsep' function. */
+#define HAVE_STRSEP 1
+
+/* Define to 1 if you have the `strtok_r' function. */
+#define HAVE_STRTOK_R 1
+
+/* Define to 1 if you have the `strtoll' function. */
+#define HAVE_STRTOLL 1
+
+/* Define to 1 if the system has the type `struct in6_addr'. */
+#define HAVE_STRUCT_IN6_ADDR 1
+
+/* Define to 1 if you have the <sys/devpoll.h> header file. */
+#define HAVE_SYS_DEVPOLL_H 1
+
+/* Define to 1 if you have the <sys/epoll.h> header file. */
+/* #undef HAVE_SYS_EPOLL_H */
+
+/* Define to 1 if you have the <sys/event.h> header file. */
+/* #undef HAVE_SYS_EVENT_H */
+
+/* Define to 1 if you have the <sys/ioctl.h> header file. */
+#define HAVE_SYS_IOCTL_H 1
+
+/* Define to 1 if you have the <sys/param.h> header file. */
+#define HAVE_SYS_PARAM_H 1
+
+/* Define to 1 if you have the <sys/queue.h> header file. */
+#define HAVE_SYS_QUEUE_H 1
+
+/* Define to 1 if you have the <sys/select.h> header file. */
+#define HAVE_SYS_SELECT_H 1
+
+/* Define to 1 if you have the <sys/socket.h> header file. */
+#define HAVE_SYS_SOCKET_H 1
+
+/* Define to 1 if you have the <sys/stat.h> header file. */
+#define HAVE_SYS_STAT_H 1
+
+/* Define to 1 if you have the <sys/time.h> header file. */
+#define HAVE_SYS_TIME_H 1
+
+/* Define to 1 if you have the <sys/types.h> header file. */
+#define HAVE_SYS_TYPES_H 1
+
+/* Define if TAILQ_FOREACH is defined in <sys/queue.h> */
+#define HAVE_TAILQFOREACH 1
+
+/* Define if timeradd is defined in <sys/time.h> */
+#define HAVE_TIMERADD 1
+
+/* Define if timerclear is defined in <sys/time.h> */
+#define HAVE_TIMERCLEAR 1
+
+/* Define if timercmp is defined in <sys/time.h> */
+#define HAVE_TIMERCMP 1
+
+/* Define if timerisset is defined in <sys/time.h> */
+#define HAVE_TIMERISSET 1
+
+/* Define to 1 if the system has the type `uint16_t'. */
+#define HAVE_UINT16_T 1
+
+/* Define to 1 if the system has the type `uint32_t'. */
+#define HAVE_UINT32_T 1
+
+/* Define to 1 if the system has the type `uint64_t'. */
+#define HAVE_UINT64_T 1
+
+/* Define to 1 if the system has the type `uint8_t'. */
+#define HAVE_UINT8_T 1
+
+/* Define to 1 if you have the <unistd.h> header file. */
+#define HAVE_UNISTD_H 1
+
+/* Define to 1 if you have the `vasprintf' function. */
+#define HAVE_VASPRINTF 1
+
+/* Define if kqueue works correctly with pipes */
+/* #undef HAVE_WORKING_KQUEUE */
+
+/* Name of package */
+#define PACKAGE "libevent"
+
+/* Define to the address where bug reports for this package should be sent. */
+#define PACKAGE_BUGREPORT ""
+
+/* Define to the full name of this package. */
+#define PACKAGE_NAME ""
+
+/* Define to the full name and version of this package. */
+#define PACKAGE_STRING ""
+
+/* Define to the one symbol short name of this package. */
+#define PACKAGE_TARNAME ""
+
+/* Define to the version of this package. */
+#define PACKAGE_VERSION ""
+
+/* The size of `int', as computed by sizeof. */
+#define SIZEOF_INT 4
+
+/* The size of `long', as computed by sizeof. */
+#define SIZEOF_LONG 4
+
+/* The size of `long long', as computed by sizeof. */
+#define SIZEOF_LONG_LONG 8
+
+/* The size of `short', as computed by sizeof. */
+#define SIZEOF_SHORT 2
+
+/* Define to 1 if you have the ANSI C header files. */
+#define STDC_HEADERS 1
+
+/* Define to 1 if you can safely include both <sys/time.h> and <time.h>. */
+#define TIME_WITH_SYS_TIME 1
+
+/* Version number of package */
+#define VERSION "1.4.13-stable"
+
+/* Define to appropriate substitue if compiler doesnt have __func__ */
+/* #undef __func__ */
+
+/* Define to empty if `const' does not conform to ANSI C. */
+/* #undef const */
+
+/* Define to `__inline__' or `__inline' if that's what the C compiler
+   calls it, or to nothing if 'inline' is not supported under any name.  */
+#ifndef __cplusplus
+/* #undef inline */
+#endif
+
+/* Define to `int' if <sys/types.h> does not define. */
+/* #undef pid_t */
+
+/* Define to `unsigned int' if <sys/types.h> does not define. */
+/* #undef size_t */
+
+/* Define to unsigned int if you dont have it */
+/* #undef socklen_t */
diff --git a/base/third_party/libevent/solaris/event-config.h b/base/third_party/libevent/solaris/event-config.h
new file mode 100644
index 0000000..afabe2f
--- /dev/null
+++ b/base/third_party/libevent/solaris/event-config.h
@@ -0,0 +1,284 @@
+/* event-config.h
+ * Generated by autoconf; post-processed by libevent.
+ * Do not edit this file.
+ * Do not rely on macros in this file existing in later versions.
+ */
+#ifndef _EVENT_CONFIG_H_
+#define _EVENT_CONFIG_H_
+/* config.h.  Generated from config.h.in by configure.  */
+/* config.h.in.  Generated from configure.in by autoheader.  */
+
+/* Define if clock_gettime is available in libc */
+#define _EVENT_DNS_USE_CPU_CLOCK_FOR_ID 1
+
+/* Define is no secure id variant is available */
+/* #undef _EVENT_DNS_USE_GETTIMEOFDAY_FOR_ID */
+
+/* Define to 1 if you have the `clock_gettime' function. */
+#define _EVENT_HAVE_CLOCK_GETTIME 1
+
+/* Define if /dev/poll is available */
+#define _EVENT_HAVE_DEVPOLL 1
+
+/* Define to 1 if you have the <dlfcn.h> header file. */
+#define _EVENT_HAVE_DLFCN_H 1
+
+/* Define if your system supports the epoll system calls */
+/* #undef _EVENT_HAVE_EPOLL */
+
+/* Define to 1 if you have the `epoll_ctl' function. */
+/* #undef _EVENT_HAVE_EPOLL_CTL */
+
+/* Define if your system supports event ports */
+#define _EVENT_HAVE_EVENT_PORTS 1
+
+/* Define to 1 if you have the `fcntl' function. */
+#define _EVENT_HAVE_FCNTL 1
+
+/* Define to 1 if you have the <fcntl.h> header file. */
+#define _EVENT_HAVE_FCNTL_H 1
+
+/* Define to 1 if the system has the type `fd_mask'. */
+#define _EVENT_HAVE_FD_MASK 1
+
+/* Define to 1 if you have the `getaddrinfo' function. */
+#define _EVENT_HAVE_GETADDRINFO 1
+
+/* Define to 1 if you have the `getegid' function. */
+#define _EVENT_HAVE_GETEGID 1
+
+/* Define to 1 if you have the `geteuid' function. */
+#define _EVENT_HAVE_GETEUID 1
+
+/* Define to 1 if you have the `getnameinfo' function. */
+#define _EVENT_HAVE_GETNAMEINFO 1
+
+/* Define to 1 if you have the `gettimeofday' function. */
+#define _EVENT_HAVE_GETTIMEOFDAY 1
+
+/* Define to 1 if you have the `inet_ntop' function. */
+#define _EVENT_HAVE_INET_NTOP 1
+
+/* Define to 1 if you have the <inttypes.h> header file. */
+#define _EVENT_HAVE_INTTYPES_H 1
+
+/* Define to 1 if you have the `issetugid' function. */
+#define _EVENT_HAVE_ISSETUGID 1
+
+/* Define to 1 if you have the `kqueue' function. */
+/* #undef _EVENT_HAVE_KQUEUE */
+
+/* Define to 1 if you have the `nsl' library (-lnsl). */
+#define _EVENT_HAVE_LIBNSL 1
+
+/* Define to 1 if you have the `resolv' library (-lresolv). */
+#define _EVENT_HAVE_LIBRESOLV 1
+
+/* Define to 1 if you have the `rt' library (-lrt). */
+#define _EVENT_HAVE_LIBRT 1
+
+/* Define to 1 if you have the `socket' library (-lsocket). */
+#define _EVENT_HAVE_LIBSOCKET 1
+
+/* Define to 1 if you have the <memory.h> header file. */
+#define _EVENT_HAVE_MEMORY_H 1
+
+/* Define to 1 if you have the <netinet/in6.h> header file. */
+/* #undef _EVENT_HAVE_NETINET_IN6_H */
+
+/* Define to 1 if you have the `poll' function. */
+#define _EVENT_HAVE_POLL 1
+
+/* Define to 1 if you have the <poll.h> header file. */
+#define _EVENT_HAVE_POLL_H 1
+
+/* Define to 1 if you have the `port_create' function. */
+#define _EVENT_HAVE_PORT_CREATE 1
+
+/* Define to 1 if you have the <port.h> header file. */
+#define _EVENT_HAVE_PORT_H 1
+
+/* Define to 1 if you have the `select' function. */
+#define _EVENT_HAVE_SELECT 1
+
+/* Define if F_SETFD is defined in <fcntl.h> */
+#define _EVENT_HAVE_SETFD 1
+
+/* Define to 1 if you have the `sigaction' function. */
+#define _EVENT_HAVE_SIGACTION 1
+
+/* Define to 1 if you have the `signal' function. */
+#define _EVENT_HAVE_SIGNAL 1
+
+/* Define to 1 if you have the <signal.h> header file. */
+#define _EVENT_HAVE_SIGNAL_H 1
+
+/* Define to 1 if you have the <stdarg.h> header file. */
+#define _EVENT_HAVE_STDARG_H 1
+
+/* Define to 1 if you have the <stdint.h> header file. */
+#define _EVENT_HAVE_STDINT_H 1
+
+/* Define to 1 if you have the <stdlib.h> header file. */
+#define _EVENT_HAVE_STDLIB_H 1
+
+/* Define to 1 if you have the <strings.h> header file. */
+#define _EVENT_HAVE_STRINGS_H 1
+
+/* Define to 1 if you have the <string.h> header file. */
+#define _EVENT_HAVE_STRING_H 1
+
+/* Define to 1 if you have the `strlcpy' function. */
+#define _EVENT_HAVE_STRLCPY 1
+
+/* Define to 1 if you have the `strsep' function. */
+#define _EVENT_HAVE_STRSEP 1
+
+/* Define to 1 if you have the `strtok_r' function. */
+#define _EVENT_HAVE_STRTOK_R 1
+
+/* Define to 1 if you have the `strtoll' function. */
+#define _EVENT_HAVE_STRTOLL 1
+
+/* Define to 1 if the system has the type `struct in6_addr'. */
+#define _EVENT_HAVE_STRUCT_IN6_ADDR 1
+
+/* Define to 1 if you have the <sys/devpoll.h> header file. */
+#define _EVENT_HAVE_SYS_DEVPOLL_H 1
+
+/* Define to 1 if you have the <sys/epoll.h> header file. */
+/* #undef _EVENT_HAVE_SYS_EPOLL_H */
+
+/* Define to 1 if you have the <sys/event.h> header file. */
+/* #undef _EVENT_HAVE_SYS_EVENT_H */
+
+/* Define to 1 if you have the <sys/ioctl.h> header file. */
+#define _EVENT_HAVE_SYS_IOCTL_H 1
+
+/* Define to 1 if you have the <sys/param.h> header file. */
+#define _EVENT_HAVE_SYS_PARAM_H 1
+
+/* Define to 1 if you have the <sys/queue.h> header file. */
+#define _EVENT_HAVE_SYS_QUEUE_H 1
+
+/* Define to 1 if you have the <sys/select.h> header file. */
+#define _EVENT_HAVE_SYS_SELECT_H 1
+
+/* Define to 1 if you have the <sys/socket.h> header file. */
+#define _EVENT_HAVE_SYS_SOCKET_H 1
+
+/* Define to 1 if you have the <sys/stat.h> header file. */
+#define _EVENT_HAVE_SYS_STAT_H 1
+
+/* Define to 1 if you have the <sys/time.h> header file. */
+#define _EVENT_HAVE_SYS_TIME_H 1
+
+/* Define to 1 if you have the <sys/types.h> header file. */
+#define _EVENT_HAVE_SYS_TYPES_H 1
+
+/* Define if TAILQ_FOREACH is defined in <sys/queue.h> */
+#define _EVENT_HAVE_TAILQFOREACH 1
+
+/* Define if timeradd is defined in <sys/time.h> */
+#define _EVENT_HAVE_TIMERADD 1
+
+/* Define if timerclear is defined in <sys/time.h> */
+#define _EVENT_HAVE_TIMERCLEAR 1
+
+/* Define if timercmp is defined in <sys/time.h> */
+#define _EVENT_HAVE_TIMERCMP 1
+
+/* Define if timerisset is defined in <sys/time.h> */
+#define _EVENT_HAVE_TIMERISSET 1
+
+/* Define to 1 if the system has the type `uint16_t'. */
+#define _EVENT_HAVE_UINT16_T 1
+
+/* Define to 1 if the system has the type `uint32_t'. */
+#define _EVENT_HAVE_UINT32_T 1
+
+/* Define to 1 if the system has the type `uint64_t'. */
+#define _EVENT_HAVE_UINT64_T 1
+
+/* Define to 1 if the system has the type `uint8_t'. */
+#define _EVENT_HAVE_UINT8_T 1
+
+/* Define to 1 if you have the <unistd.h> header file. */
+#define _EVENT_HAVE_UNISTD_H 1
+
+/* Define to 1 if you have the `vasprintf' function. */
+#define _EVENT_HAVE_VASPRINTF 1
+
+/* Define if kqueue works correctly with pipes */
+/* #undef _EVENT_HAVE_WORKING_KQUEUE */
+
+/* Define to the sub-directory in which libtool stores uninstalled libraries.
+   */
+#define _EVENT_LT_OBJDIR ".libs/"
+
+/* Numeric representation of the version */
+#define _EVENT_NUMERIC_VERSION 0x01040f00
+
+/* Name of package */
+#define _EVENT_PACKAGE "libevent"
+
+/* Define to the address where bug reports for this package should be sent. */
+#define _EVENT_PACKAGE_BUGREPORT ""
+
+/* Define to the full name of this package. */
+#define _EVENT_PACKAGE_NAME ""
+
+/* Define to the full name and version of this package. */
+#define _EVENT_PACKAGE_STRING ""
+
+/* Define to the one symbol short name of this package. */
+#define _EVENT_PACKAGE_TARNAME ""
+
+/* Define to the home page for this package. */
+#define _EVENT_PACKAGE_URL ""
+
+/* Define to the version of this package. */
+#define _EVENT_PACKAGE_VERSION ""
+
+/* The size of `int', as computed by sizeof. */
+#define _EVENT_SIZEOF_INT 4
+
+/* The size of `long', as computed by sizeof. */
+#define _EVENT_SIZEOF_LONG 4
+
+/* The size of `long long', as computed by sizeof. */
+#define _EVENT_SIZEOF_LONG_LONG 8
+
+/* The size of `short', as computed by sizeof. */
+#define _EVENT_SIZEOF_SHORT 2
+
+/* Define to 1 if you have the ANSI C header files. */
+#define _EVENT_STDC_HEADERS 1
+
+/* Define to 1 if you can safely include both <sys/time.h> and <time.h>. */
+#define _EVENT_TIME_WITH_SYS_TIME 1
+
+/* Version number of package */
+#define _EVENT_VERSION "1.4.15"
+
+/* Define to appropriate substitue if compiler doesnt have __func__ */
+/* #undef _EVENT___func__ */
+
+/* Define to empty if `const' does not conform to ANSI C. */
+/* #undef _EVENT_const */
+
+/* Define to `__inline__' or `__inline' if that's what the C compiler
+   calls it, or to nothing if 'inline' is not supported under any name.  */
+#ifndef _EVENT___cplusplus
+/* #undef _EVENT_inline */
+#endif
+
+/* Define to `int' if <sys/types.h> does not define. */
+/* #undef _EVENT_pid_t */
+
+/* Define to `unsigned int' if <sys/types.h> does not define. */
+/* #undef _EVENT_size_t */
+
+/* Define to unsigned int if you dont have it */
+/* #undef _EVENT_socklen_t */
+#endif
diff --git a/base/third_party/libevent/stamp-h.in b/base/third_party/libevent/stamp-h.in
new file mode 100644
index 0000000..9788f70
--- /dev/null
+++ b/base/third_party/libevent/stamp-h.in
@@ -0,0 +1 @@
+timestamp
diff --git a/base/third_party/libevent/strlcpy-internal.h b/base/third_party/libevent/strlcpy-internal.h
new file mode 100644
index 0000000..22b5f61
--- /dev/null
+++ b/base/third_party/libevent/strlcpy-internal.h
@@ -0,0 +1,23 @@
+#ifndef _STRLCPY_INTERNAL_H_
+#define _STRLCPY_INTERNAL_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif /* HAVE_CONFIG_H */
+
+#ifndef HAVE_STRLCPY
+#include <string.h>
+size_t _event_strlcpy(char *dst, const char *src, size_t siz);
+#define strlcpy _event_strlcpy
+#endif
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
+
diff --git a/base/third_party/libevent/strlcpy.c b/base/third_party/libevent/strlcpy.c
new file mode 100644
index 0000000..5d19452
--- /dev/null
+++ b/base/third_party/libevent/strlcpy.c
@@ -0,0 +1,76 @@
+/*	$OpenBSD: strlcpy.c,v 1.5 2001/05/13 15:40:16 deraadt Exp $	*/
+
+/*
+ * Copyright (c) 1998 Todd C. Miller <Todd.Miller@courtesan.com>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ *    derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES,
+ * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY
+ * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL
+ * THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#if defined(LIBC_SCCS) && !defined(lint)
+static char *rcsid = "$OpenBSD: strlcpy.c,v 1.5 2001/05/13 15:40:16 deraadt Exp $";
+#endif /* LIBC_SCCS and not lint */
+
+#include <sys/types.h>
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif /* HAVE_CONFIG_H */
+
+#ifndef HAVE_STRLCPY
+#include "strlcpy-internal.h"
+
+/*
+ * Copy src to string dst of size siz.  At most siz-1 characters
+ * will be copied.  Always NUL terminates (unless siz == 0).
+ * Returns strlen(src); if retval >= siz, truncation occurred.
+ */
+size_t
+_event_strlcpy(dst, src, siz)
+	char *dst;
+	const char *src;
+	size_t siz;
+{
+	register char *d = dst;
+	register const char *s = src;
+	register size_t n = siz;
+
+	/* Copy as many bytes as will fit */
+	if (n != 0 && --n != 0) {
+		do {
+			if ((*d++ = *s++) == 0)
+				break;
+		} while (--n != 0);
+	}
+
+	/* Not enough room in dst, add NUL and traverse rest of src */
+	if (n == 0) {
+		if (siz != 0)
+			*d = '\0';		/* NUL-terminate dst */
+		while (*s++)
+			;
+	}
+
+	return(s - src - 1);	/* count does not include NUL */
+}
+#endif
diff --git a/base/third_party/libevent/test/Makefile.am b/base/third_party/libevent/test/Makefile.am
new file mode 100644
index 0000000..3558d02
--- /dev/null
+++ b/base/third_party/libevent/test/Makefile.am
@@ -0,0 +1,35 @@
+AUTOMAKE_OPTIONS = foreign no-dependencies
+
+AM_CFLAGS = -I$(top_srcdir) -I$(top_srcdir)/compat
+
+EXTRA_DIST = regress.rpc regress.gen.h regress.gen.c
+
+noinst_PROGRAMS = test-init test-eof test-weof test-time regress bench
+
+BUILT_SOURCES = regress.gen.c regress.gen.h
+test_init_SOURCES = test-init.c
+test_init_LDADD = ../libevent_core.la
+test_eof_SOURCES = test-eof.c
+test_eof_LDADD = ../libevent_core.la
+test_weof_SOURCES = test-weof.c
+test_weof_LDADD = ../libevent_core.la
+test_time_SOURCES = test-time.c
+test_time_LDADD = ../libevent_core.la
+regress_SOURCES = regress.c regress.h regress_http.c regress_dns.c \
+	regress_rpc.c \
+	regress.gen.c regress.gen.h
+regress_LDADD = ../libevent.la
+bench_SOURCES = bench.c
+bench_LDADD = ../libevent.la
+
+regress.gen.c regress.gen.h: regress.rpc $(top_srcdir)/event_rpcgen.py
+	$(top_srcdir)/event_rpcgen.py $(srcdir)/regress.rpc || echo "No Python installed"
+
+DISTCLEANFILES = *~
+
+test: test-init test-eof test-weof test-time regress
+
+verify: test
+	@$(srcdir)/test.sh
+
+bench test-init test-eof test-weof test-time: ../libevent.la
diff --git a/base/third_party/libevent/test/Makefile.nmake b/base/third_party/libevent/test/Makefile.nmake
new file mode 100644
index 0000000..320abe7
--- /dev/null
+++ b/base/third_party/libevent/test/Makefile.nmake
@@ -0,0 +1,47 @@
+
+CFLAGS=/I.. /I../include /I../WIN32-Code /I../compat /DWIN32 /DHAVE_CONFIG_H
+
+CFLAGS=$(CFLAGS) /Ox /W3 /wd4996 /nologo
+
+REGRESS_OBJS=regress.obj regress_http.obj regress_dns.obj \
+        regress_rpc.obj regress.gen.obj \
+
+OTHER_OBJS=test-init.obj test-eof.obj test-weof.obj test-time.obj \
+	bench.obj bench_cascade.obj bench_http.obj bench_httpclient.obj
+
+PROGRAMS=regress.exe \
+	test-init.exe test-eof.exe test-weof.exe test-time.exe
+
+# Disabled for now:
+#	bench.exe bench_cascade.exe bench_http.exe bench_httpclient.exe
+
+
+LIBS=..\libevent.lib ws2_32.lib advapi32.lib
+
+all: $(PROGRAMS)
+
+regress.exe: $(REGRESS_OBJS)
+	$(CC) $(CFLAGS) $(LIBS) $(REGRESS_OBJS)
+
+test-init.exe: test-init.obj
+	$(CC) $(CFLAGS) $(LIBS) test-init.obj
+test-eof.exe: test-eof.obj
+	$(CC) $(CFLAGS) $(LIBS) test-eof.obj
+test-weof.exe: test-weof.obj
+	$(CC) $(CFLAGS) $(LIBS) test-weof.obj
+test-time.exe: test-time.obj
+	$(CC) $(CFLAGS) $(LIBS) test-time.obj
+
+bench.exe: bench.obj
+	$(CC) $(CFLAGS) $(LIBS) bench.obj
+bench_cascade.exe: bench_cascade.obj
+	$(CC) $(CFLAGS) $(LIBS) bench_cascade.obj
+bench_http.exe: bench_http.obj
+	$(CC) $(CFLAGS) $(LIBS) bench_http.obj
+bench_httpclient.exe: bench_httpclient.obj
+	$(CC) $(CFLAGS) $(LIBS) bench_httpclient.obj
+
+clean:
+	-del $(REGRESS_OBJS)
+	-del $(OTHER_OBJS)
+	-del regress.exe
diff --git a/base/third_party/libevent/test/bench.c b/base/third_party/libevent/test/bench.c
new file mode 100644
index 0000000..c976932
--- /dev/null
+++ b/base/third_party/libevent/test/bench.c
@@ -0,0 +1,188 @@
+/*
+ * Copyright 2003 Niels Provos <provos@citi.umich.edu>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 4. The name of the author may not be used to endorse or promote products
+ *    derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *
+ * Mon 03/10/2003 - Modified by Davide Libenzi <davidel@xmailserver.org>
+ *
+ *     Added chain event propagation to improve the sensitivity of
+ *     the measure respect to the event loop efficency.
+ *
+ *
+ */
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <sys/time.h>
+#ifdef WIN32
+#include <windows.h>
+#else
+#include <sys/socket.h>
+#include <signal.h>
+#include <sys/resource.h>
+#endif
+#include <fcntl.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include <unistd.h>
+#include <errno.h>
+
+#include <event.h>
+#include <evutil.h>
+
+
+static int count, writes, fired;
+static int *pipes;
+static int num_pipes, num_active, num_writes;
+static struct event *events;
+
+static void
+read_cb(int fd, short which, void *arg)
+{
+	long idx = (long) arg, widx = idx + 1;
+	u_char ch;
+
+	count += read(fd, &ch, sizeof(ch));
+	if (writes) {
+		if (widx >= num_pipes)
+			widx -= num_pipes;
+		write(pipes[2 * widx + 1], "e", 1);
+		writes--;
+		fired++;
+	}
+}
+
+static struct timeval *
+run_once(void)
+{
+	int *cp, space;
+	long i;
+	static struct timeval ts, te;
+
+	for (cp = pipes, i = 0; i < num_pipes; i++, cp += 2) {
+		event_del(&events[i]);
+		event_set(&events[i], cp[0], EV_READ | EV_PERSIST, read_cb, (void *) i);
+		event_add(&events[i], NULL);
+	}
+
+	event_loop(EVLOOP_ONCE | EVLOOP_NONBLOCK);
+
+	fired = 0;
+	space = num_pipes / num_active;
+	space = space * 2;
+	for (i = 0; i < num_active; i++, fired++)
+		write(pipes[i * space + 1], "e", 1);
+
+	count = 0;
+	writes = num_writes;
+	{ int xcount = 0;
+	gettimeofday(&ts, NULL);
+	do {
+		event_loop(EVLOOP_ONCE | EVLOOP_NONBLOCK);
+		xcount++;
+	} while (count != fired);
+	gettimeofday(&te, NULL);
+
+	if (xcount != count) fprintf(stderr, "Xcount: %d, Rcount: %d\n", xcount, count);
+	}
+
+	evutil_timersub(&te, &ts, &te);
+
+	return (&te);
+}
+
+int
+main (int argc, char **argv)
+{
+#ifndef WIN32
+	struct rlimit rl;
+#endif
+	int i, c;
+	struct timeval *tv;
+	int *cp;
+
+	num_pipes = 100;
+	num_active = 1;
+	num_writes = num_pipes;
+	while ((c = getopt(argc, argv, "n:a:w:")) != -1) {
+		switch (c) {
+		case 'n':
+			num_pipes = atoi(optarg);
+			break;
+		case 'a':
+			num_active = atoi(optarg);
+			break;
+		case 'w':
+			num_writes = atoi(optarg);
+			break;
+		default:
+			fprintf(stderr, "Illegal argument \"%c\"\n", c);
+			exit(1);
+		}
+	}
+
+#ifndef WIN32
+	rl.rlim_cur = rl.rlim_max = num_pipes * 2 + 50;
+	if (setrlimit(RLIMIT_NOFILE, &rl) == -1) {
+		perror("setrlimit");
+		exit(1);
+	}
+#endif
+
+	events = calloc(num_pipes, sizeof(struct event));
+	pipes = calloc(num_pipes * 2, sizeof(int));
+	if (events == NULL || pipes == NULL) {
+		perror("malloc");
+		exit(1);
+	}
+
+	event_init();
+
+	for (cp = pipes, i = 0; i < num_pipes; i++, cp += 2) {
+#ifdef USE_PIPES
+		if (pipe(cp) == -1) {
+#else
+		if (evutil_socketpair(AF_UNIX, SOCK_STREAM, 0, cp) == -1) {
+#endif
+			perror("pipe");
+			exit(1);
+		}
+	}
+
+	for (i = 0; i < 25; i++) {
+		tv = run_once();
+		if (tv == NULL)
+			exit(1);
+		fprintf(stdout, "%ld\n",
+			tv->tv_sec * 1000000L + tv->tv_usec);
+	}
+
+	exit(0);
+}
diff --git a/base/third_party/libevent/test/regress.c b/base/third_party/libevent/test/regress.c
new file mode 100644
index 0000000..cce7d7d
--- /dev/null
+++ b/base/third_party/libevent/test/regress.c
@@ -0,0 +1,1903 @@
+/*
+ * Copyright (c) 2003, 2004 Niels Provos <provos@citi.umich.edu>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ *    derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifdef WIN32
+#include <winsock2.h>
+#include <windows.h>
+#endif
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include <sys/types.h>
+#include <sys/stat.h>
+#ifdef HAVE_SYS_TIME_H
+#include <sys/time.h>
+#endif
+#include <sys/queue.h>
+#ifndef WIN32
+#include <sys/socket.h>
+#include <sys/wait.h>
+#include <signal.h>
+#include <unistd.h>
+#include <netdb.h>
+#endif
+#include <assert.h>
+#include <fcntl.h>
+#include <signal.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include <errno.h>
+
+#include "event.h"
+#include "evutil.h"
+#include "event-internal.h"
+#include "log.h"
+
+#include "regress.h"
+#ifndef WIN32
+#include "regress.gen.h"
+#endif
+
+int pair[2];
+int test_ok;
+static int called;
+static char wbuf[4096];
+static char rbuf[4096];
+static int woff;
+static int roff;
+static int usepersist;
+static struct timeval tset;
+static struct timeval tcalled;
+static struct event_base *global_base;
+
+#define TEST1	"this is a test"
+#define SECONDS	1
+
+#ifndef SHUT_WR
+#define SHUT_WR 1
+#endif
+
+#ifdef WIN32
+#define write(fd,buf,len) send((fd),(buf),(len),0)
+#define read(fd,buf,len) recv((fd),(buf),(len),0)
+#endif
+
+static void
+simple_read_cb(int fd, short event, void *arg)
+{
+	char buf[256];
+	int len;
+
+	if (arg == NULL)
+		return;
+
+	len = read(fd, buf, sizeof(buf));
+
+	if (len) {
+		if (!called) {
+			if (event_add(arg, NULL) == -1)
+				exit(1);
+		}
+	} else if (called == 1)
+		test_ok = 1;
+
+	called++;
+}
+
+static void
+simple_write_cb(int fd, short event, void *arg)
+{
+	int len;
+
+	if (arg == NULL)
+		return;
+
+	len = write(fd, TEST1, strlen(TEST1) + 1);
+	if (len == -1)
+		test_ok = 0;
+	else
+		test_ok = 1;
+}
+
+static void
+multiple_write_cb(int fd, short event, void *arg)
+{
+	struct event *ev = arg;
+	int len;
+
+	len = 128;
+	if (woff + len >= sizeof(wbuf))
+		len = sizeof(wbuf) - woff;
+
+	len = write(fd, wbuf + woff, len);
+	if (len == -1) {
+		fprintf(stderr, "%s: write\n", __func__);
+		if (usepersist)
+			event_del(ev);
+		return;
+	}
+
+	woff += len;
+
+	if (woff >= sizeof(wbuf)) {
+		shutdown(fd, SHUT_WR);
+		if (usepersist)
+			event_del(ev);
+		return;
+	}
+
+	if (!usepersist) {
+		if (event_add(ev, NULL) == -1)
+			exit(1);
+	}
+}
+
+static void
+multiple_read_cb(int fd, short event, void *arg)
+{
+	struct event *ev = arg;
+	int len;
+
+	len = read(fd, rbuf + roff, sizeof(rbuf) - roff);
+	if (len == -1)
+		fprintf(stderr, "%s: read\n", __func__);
+	if (len <= 0) {
+		if (usepersist)
+			event_del(ev);
+		return;
+	}
+
+	roff += len;
+	if (!usepersist) {
+		if (event_add(ev, NULL) == -1) 
+			exit(1);
+	}
+}
+
+static void
+timeout_cb(int fd, short event, void *arg)
+{
+	struct timeval tv;
+	int diff;
+
+	evutil_gettimeofday(&tcalled, NULL);
+	if (evutil_timercmp(&tcalled, &tset, >))
+		evutil_timersub(&tcalled, &tset, &tv);
+	else
+		evutil_timersub(&tset, &tcalled, &tv);
+
+	diff = tv.tv_sec*1000 + tv.tv_usec/1000 - SECONDS * 1000;
+	if (diff < 0)
+		diff = -diff;
+
+	if (diff < 100)
+		test_ok = 1;
+}
+
+#ifndef WIN32
+static void
+signal_cb_sa(int sig)
+{
+	test_ok = 2;
+}
+
+static void
+signal_cb(int fd, short event, void *arg)
+{
+	struct event *ev = arg;
+
+	signal_del(ev);
+	test_ok = 1;
+}
+#endif
+
+struct both {
+	struct event ev;
+	int nread;
+};
+
+static void
+combined_read_cb(int fd, short event, void *arg)
+{
+	struct both *both = arg;
+	char buf[128];
+	int len;
+
+	len = read(fd, buf, sizeof(buf));
+	if (len == -1)
+		fprintf(stderr, "%s: read\n", __func__);
+	if (len <= 0)
+		return;
+
+	both->nread += len;
+	if (event_add(&both->ev, NULL) == -1)
+		exit(1);
+}
+
+static void
+combined_write_cb(int fd, short event, void *arg)
+{
+	struct both *both = arg;
+	char buf[128];
+	int len;
+
+	len = sizeof(buf);
+	if (len > both->nread)
+		len = both->nread;
+
+	len = write(fd, buf, len);
+	if (len == -1)
+		fprintf(stderr, "%s: write\n", __func__);
+	if (len <= 0) {
+		shutdown(fd, SHUT_WR);
+		return;
+	}
+
+	both->nread -= len;
+	if (event_add(&both->ev, NULL) == -1)
+		exit(1);
+}
+
+/* Test infrastructure */
+
+static int
+setup_test(const char *name)
+{
+
+	fprintf(stdout, "%s", name);
+
+	if (evutil_socketpair(AF_UNIX, SOCK_STREAM, 0, pair) == -1) {
+		fprintf(stderr, "%s: socketpair\n", __func__);
+		exit(1);
+	}
+
+#ifdef HAVE_FCNTL
+        if (fcntl(pair[0], F_SETFL, O_NONBLOCK) == -1)
+		fprintf(stderr, "fcntl(O_NONBLOCK)");
+
+        if (fcntl(pair[1], F_SETFL, O_NONBLOCK) == -1)
+		fprintf(stderr, "fcntl(O_NONBLOCK)");
+#endif
+
+	test_ok = 0;
+	called = 0;
+	return (0);
+}
+
+static int
+cleanup_test(void)
+{
+#ifndef WIN32
+	close(pair[0]);
+	close(pair[1]);
+#else
+	CloseHandle((HANDLE)pair[0]);
+	CloseHandle((HANDLE)pair[1]);
+#endif
+	if (test_ok)
+		fprintf(stdout, "OK\n");
+	else {
+		fprintf(stdout, "FAILED\n");
+		exit(1);
+	}
+        test_ok = 0;
+	return (0);
+}
+
+static void
+test_registerfds(void)
+{
+	int i, j;
+	int pair[2];
+	struct event read_evs[512];
+	struct event write_evs[512];
+
+	struct event_base *base = event_base_new();
+
+	fprintf(stdout, "Testing register fds: ");
+
+	for (i = 0; i < 512; ++i) {
+		if (evutil_socketpair(AF_UNIX, SOCK_STREAM, 0, pair) == -1) {
+			/* run up to the limit of file descriptors */
+			break;
+		}
+		event_set(&read_evs[i], pair[0],
+		    EV_READ|EV_PERSIST, simple_read_cb, NULL);
+		event_base_set(base, &read_evs[i]);
+		event_add(&read_evs[i], NULL);
+		event_set(&write_evs[i], pair[1],
+		    EV_WRITE|EV_PERSIST, simple_write_cb, NULL);
+		event_base_set(base, &write_evs[i]);
+		event_add(&write_evs[i], NULL);
+
+		/* just loop once */
+		event_base_loop(base, EVLOOP_ONCE);
+	}
+
+	/* now delete everything */
+	for (j = 0; j < i; ++j) {
+		event_del(&read_evs[j]);
+		event_del(&write_evs[j]);
+#ifndef WIN32
+		close(read_evs[j].ev_fd);
+		close(write_evs[j].ev_fd);
+#else
+		CloseHandle((HANDLE)read_evs[j].ev_fd);
+		CloseHandle((HANDLE)write_evs[j].ev_fd);
+#endif
+
+		/* just loop once */
+		event_base_loop(base, EVLOOP_ONCE);
+	}
+
+	event_base_free(base);
+
+	fprintf(stdout, "OK\n");
+}
+
+static void
+test_simpleread(void)
+{
+	struct event ev;
+
+	/* Very simple read test */
+	setup_test("Simple read: ");
+	
+	write(pair[0], TEST1, strlen(TEST1)+1);
+	shutdown(pair[0], SHUT_WR);
+
+	event_set(&ev, pair[1], EV_READ, simple_read_cb, &ev);
+	if (event_add(&ev, NULL) == -1)
+		exit(1);
+	event_dispatch();
+
+	cleanup_test();
+}
+
+static void
+test_simplewrite(void)
+{
+	struct event ev;
+
+	/* Very simple write test */
+	setup_test("Simple write: ");
+	
+	event_set(&ev, pair[0], EV_WRITE, simple_write_cb, &ev);
+	if (event_add(&ev, NULL) == -1)
+		exit(1);
+	event_dispatch();
+
+	cleanup_test();
+}
+
+static void
+test_multiple(void)
+{
+	struct event ev, ev2;
+	int i;
+
+	/* Multiple read and write test */
+	setup_test("Multiple read/write: ");
+	memset(rbuf, 0, sizeof(rbuf));
+	for (i = 0; i < sizeof(wbuf); i++)
+		wbuf[i] = i;
+
+	roff = woff = 0;
+	usepersist = 0;
+
+	event_set(&ev, pair[0], EV_WRITE, multiple_write_cb, &ev);
+	if (event_add(&ev, NULL) == -1)
+		exit(1);
+	event_set(&ev2, pair[1], EV_READ, multiple_read_cb, &ev2);
+	if (event_add(&ev2, NULL) == -1)
+		exit(1);
+	event_dispatch();
+
+	if (roff == woff)
+		test_ok = memcmp(rbuf, wbuf, sizeof(wbuf)) == 0;
+
+	cleanup_test();
+}
+
+static void
+test_persistent(void)
+{
+	struct event ev, ev2;
+	int i;
+
+	/* Multiple read and write test with persist */
+	setup_test("Persist read/write: ");
+	memset(rbuf, 0, sizeof(rbuf));
+	for (i = 0; i < sizeof(wbuf); i++)
+		wbuf[i] = i;
+
+	roff = woff = 0;
+	usepersist = 1;
+
+	event_set(&ev, pair[0], EV_WRITE|EV_PERSIST, multiple_write_cb, &ev);
+	if (event_add(&ev, NULL) == -1)
+		exit(1);
+	event_set(&ev2, pair[1], EV_READ|EV_PERSIST, multiple_read_cb, &ev2);
+	if (event_add(&ev2, NULL) == -1)
+		exit(1);
+	event_dispatch();
+
+	if (roff == woff)
+		test_ok = memcmp(rbuf, wbuf, sizeof(wbuf)) == 0;
+
+	cleanup_test();
+}
+
+static void
+test_combined(void)
+{
+	struct both r1, r2, w1, w2;
+
+	setup_test("Combined read/write: ");
+	memset(&r1, 0, sizeof(r1));
+	memset(&r2, 0, sizeof(r2));
+	memset(&w1, 0, sizeof(w1));
+	memset(&w2, 0, sizeof(w2));
+
+	w1.nread = 4096;
+	w2.nread = 8192;
+
+	event_set(&r1.ev, pair[0], EV_READ, combined_read_cb, &r1);
+	event_set(&w1.ev, pair[0], EV_WRITE, combined_write_cb, &w1);
+	event_set(&r2.ev, pair[1], EV_READ, combined_read_cb, &r2);
+	event_set(&w2.ev, pair[1], EV_WRITE, combined_write_cb, &w2);
+	if (event_add(&r1.ev, NULL) == -1)
+		exit(1);
+	if (event_add(&w1.ev, NULL))
+		exit(1);
+	if (event_add(&r2.ev, NULL))
+		exit(1);
+	if (event_add(&w2.ev, NULL))
+		exit(1);
+
+	event_dispatch();
+
+	if (r1.nread == 8192 && r2.nread == 4096)
+		test_ok = 1;
+
+	cleanup_test();
+}
+
+static void
+test_simpletimeout(void)
+{
+	struct timeval tv;
+	struct event ev;
+
+	setup_test("Simple timeout: ");
+
+	tv.tv_usec = 0;
+	tv.tv_sec = SECONDS;
+	evtimer_set(&ev, timeout_cb, NULL);
+	evtimer_add(&ev, &tv);
+
+	evutil_gettimeofday(&tset, NULL);
+	event_dispatch();
+
+	cleanup_test();
+}
+
+#ifndef WIN32
+extern struct event_base *current_base;
+
+static void
+child_signal_cb(int fd, short event, void *arg)
+{
+	struct timeval tv;
+	int *pint = arg;
+
+	*pint = 1;
+
+	tv.tv_usec = 500000;
+	tv.tv_sec = 0;
+	event_loopexit(&tv);
+}
+
+static void
+test_fork(void)
+{
+	int status, got_sigchld = 0;
+	struct event ev, sig_ev;
+	pid_t pid;
+
+	setup_test("After fork: ");
+
+	write(pair[0], TEST1, strlen(TEST1)+1);
+
+	event_set(&ev, pair[1], EV_READ, simple_read_cb, &ev);
+	if (event_add(&ev, NULL) == -1)
+		exit(1);
+
+	signal_set(&sig_ev, SIGCHLD, child_signal_cb, &got_sigchld);
+	signal_add(&sig_ev, NULL);
+
+	if ((pid = fork()) == 0) {
+		/* in the child */
+		if (event_reinit(current_base) == -1) {
+			fprintf(stderr, "FAILED (reinit)\n");
+			exit(1);
+		}
+
+		signal_del(&sig_ev);
+
+		called = 0;
+
+		event_dispatch();
+
+		/* we do not send an EOF; simple_read_cb requires an EOF 
+		 * to set test_ok.  we just verify that the callback was
+		 * called. */
+		exit(test_ok != 0 || called != 2 ? -2 : 76);
+	}
+
+	/* wait for the child to read the data */
+	sleep(1);
+
+	write(pair[0], TEST1, strlen(TEST1)+1);
+
+	if (waitpid(pid, &status, 0) == -1) {
+		fprintf(stderr, "FAILED (fork)\n");
+		exit(1);
+	}
+	
+	if (WEXITSTATUS(status) != 76) {
+		fprintf(stderr, "FAILED (exit): %d\n", WEXITSTATUS(status));
+		exit(1);
+	}
+
+	/* test that the current event loop still works */
+	write(pair[0], TEST1, strlen(TEST1)+1);
+	shutdown(pair[0], SHUT_WR);
+
+	event_dispatch();
+
+	if (!got_sigchld) {
+		fprintf(stdout, "FAILED (sigchld)\n");
+		exit(1);
+	}
+
+	signal_del(&sig_ev);
+
+	cleanup_test();
+}
+
+static void
+test_simplesignal(void)
+{
+	struct event ev;
+	struct itimerval itv;
+
+	setup_test("Simple signal: ");
+	signal_set(&ev, SIGALRM, signal_cb, &ev);
+	signal_add(&ev, NULL);
+	/* find bugs in which operations are re-ordered */
+	signal_del(&ev);
+	signal_add(&ev, NULL);
+
+	memset(&itv, 0, sizeof(itv));
+	itv.it_value.tv_sec = 1;
+	if (setitimer(ITIMER_REAL, &itv, NULL) == -1)
+		goto skip_simplesignal;
+
+	event_dispatch();
+ skip_simplesignal:
+	if (signal_del(&ev) == -1)
+		test_ok = 0;
+
+	cleanup_test();
+}
+
+static void
+test_multiplesignal(void)
+{
+	struct event ev_one, ev_two;
+	struct itimerval itv;
+
+	setup_test("Multiple signal: ");
+
+	signal_set(&ev_one, SIGALRM, signal_cb, &ev_one);
+	signal_add(&ev_one, NULL);
+
+	signal_set(&ev_two, SIGALRM, signal_cb, &ev_two);
+	signal_add(&ev_two, NULL);
+
+	memset(&itv, 0, sizeof(itv));
+	itv.it_value.tv_sec = 1;
+	if (setitimer(ITIMER_REAL, &itv, NULL) == -1)
+		goto skip_simplesignal;
+
+	event_dispatch();
+
+ skip_simplesignal:
+	if (signal_del(&ev_one) == -1)
+		test_ok = 0;
+	if (signal_del(&ev_two) == -1)
+		test_ok = 0;
+
+	cleanup_test();
+}
+
+static void
+test_immediatesignal(void)
+{
+	struct event ev;
+
+	test_ok = 0;
+	printf("Immediate signal: ");
+	signal_set(&ev, SIGUSR1, signal_cb, &ev);
+	signal_add(&ev, NULL);
+	raise(SIGUSR1);
+	event_loop(EVLOOP_NONBLOCK);
+	signal_del(&ev);
+	cleanup_test();
+}
+
+static void
+test_signal_dealloc(void)
+{
+	/* make sure that signal_event is event_del'ed and pipe closed */
+	struct event ev;
+	struct event_base *base = event_init();
+	printf("Signal dealloc: ");
+	signal_set(&ev, SIGUSR1, signal_cb, &ev);
+	signal_add(&ev, NULL);
+	signal_del(&ev);
+	event_base_free(base);
+        /* If we got here without asserting, we're fine. */
+        test_ok = 1;
+	cleanup_test();
+}
+
+static void
+test_signal_pipeloss(void)
+{
+	/* make sure that the base1 pipe is closed correctly. */
+	struct event_base *base1, *base2;
+	int pipe1;
+	test_ok = 0;
+	printf("Signal pipeloss: ");
+	base1 = event_init();
+	pipe1 = base1->sig.ev_signal_pair[0];
+	base2 = event_init();
+	event_base_free(base2);
+	event_base_free(base1);
+	if (close(pipe1) != -1 || errno!=EBADF) {
+		/* fd must be closed, so second close gives -1, EBADF */
+		printf("signal pipe not closed. ");
+		test_ok = 0;
+	} else {
+		test_ok = 1;
+	}
+	cleanup_test();
+}
+
+/*
+ * make two bases to catch signals, use both of them.  this only works
+ * for event mechanisms that use our signal pipe trick.  kqueue handles
+ * signals internally, and all interested kqueues get all the signals.
+ */
+static void
+test_signal_switchbase(void)
+{
+	struct event ev1, ev2;
+	struct event_base *base1, *base2;
+        int is_kqueue;
+	test_ok = 0;
+	printf("Signal switchbase: ");
+	base1 = event_init();
+	base2 = event_init();
+        is_kqueue = !strcmp(event_get_method(),"kqueue");
+	signal_set(&ev1, SIGUSR1, signal_cb, &ev1);
+	signal_set(&ev2, SIGUSR1, signal_cb, &ev2);
+	if (event_base_set(base1, &ev1) ||
+	    event_base_set(base2, &ev2) ||
+	    event_add(&ev1, NULL) ||
+	    event_add(&ev2, NULL)) {
+		fprintf(stderr, "%s: cannot set base, add\n", __func__);
+		exit(1);
+	}
+
+	test_ok = 0;
+	/* can handle signal before loop is called */
+	raise(SIGUSR1);
+	event_base_loop(base2, EVLOOP_NONBLOCK);
+        if (is_kqueue) {
+                if (!test_ok)
+                        goto done;
+                test_ok = 0;
+        }
+	event_base_loop(base1, EVLOOP_NONBLOCK);
+	if (test_ok && !is_kqueue) {
+		test_ok = 0;
+
+		/* set base1 to handle signals */
+		event_base_loop(base1, EVLOOP_NONBLOCK);
+		raise(SIGUSR1);
+		event_base_loop(base1, EVLOOP_NONBLOCK);
+		event_base_loop(base2, EVLOOP_NONBLOCK);
+	}
+ done:
+	event_base_free(base1);
+	event_base_free(base2);
+	cleanup_test();
+}
+
+/*
+ * assert that a signal event removed from the event queue really is
+ * removed - with no possibility of it's parent handler being fired.
+ */
+static void
+test_signal_assert(void)
+{
+	struct event ev;
+	struct event_base *base = event_init();
+	test_ok = 0;
+	printf("Signal handler assert: ");
+	/* use SIGCONT so we don't kill ourselves when we signal to nowhere */
+	signal_set(&ev, SIGCONT, signal_cb, &ev);
+	signal_add(&ev, NULL);
+	/*
+	 * if signal_del() fails to reset the handler, it's current handler
+	 * will still point to evsignal_handler().
+	 */
+	signal_del(&ev);
+
+	raise(SIGCONT);
+	/* only way to verify we were in evsignal_handler() */
+	if (base->sig.evsignal_caught)
+		test_ok = 0;
+	else
+		test_ok = 1;
+
+	event_base_free(base);
+	cleanup_test();
+	return;
+}
+
+/*
+ * assert that we restore our previous signal handler properly.
+ */
+static void
+test_signal_restore(void)
+{
+	struct event ev;
+	struct event_base *base = event_init();
+#ifdef HAVE_SIGACTION
+	struct sigaction sa;
+#endif
+
+	test_ok = 0;
+	printf("Signal handler restore: ");
+#ifdef HAVE_SIGACTION
+	sa.sa_handler = signal_cb_sa;
+	sa.sa_flags = 0x0;
+	sigemptyset(&sa.sa_mask);
+	if (sigaction(SIGUSR1, &sa, NULL) == -1)
+		goto out;
+#else
+	if (signal(SIGUSR1, signal_cb_sa) == SIG_ERR)
+		goto out;
+#endif
+	signal_set(&ev, SIGUSR1, signal_cb, &ev);
+	signal_add(&ev, NULL);
+	signal_del(&ev);
+
+	raise(SIGUSR1);
+	/* 1 == signal_cb, 2 == signal_cb_sa, we want our previous handler */
+	if (test_ok != 2)
+		test_ok = 0;
+out:
+	event_base_free(base);
+	cleanup_test();
+	return;
+}
+
+static void
+signal_cb_swp(int sig, short event, void *arg)
+{
+	called++;
+	if (called < 5)
+		raise(sig);
+	else
+		event_loopexit(NULL);
+}
+static void
+timeout_cb_swp(int fd, short event, void *arg)
+{
+	if (called == -1) {
+		struct timeval tv = {5, 0};
+
+		called = 0;
+		evtimer_add((struct event *)arg, &tv);
+		raise(SIGUSR1);
+		return;
+	}
+	test_ok = 0;
+	event_loopexit(NULL);
+}
+
+static void
+test_signal_while_processing(void)
+{
+	struct event_base *base = event_init();
+	struct event ev, ev_timer;
+	struct timeval tv = {0, 0};
+
+	setup_test("Receiving a signal while processing other signal: ");
+
+	called = -1;
+	test_ok = 1;
+	signal_set(&ev, SIGUSR1, signal_cb_swp, NULL);
+	signal_add(&ev, NULL);
+	evtimer_set(&ev_timer, timeout_cb_swp, &ev_timer);
+	evtimer_add(&ev_timer, &tv);
+	event_dispatch();
+
+	event_base_free(base);
+	cleanup_test();
+	return;
+}
+#endif
+
+static void
+test_free_active_base(void)
+{
+	struct event_base *base1;
+	struct event ev1;
+	setup_test("Free active base: ");
+	base1 = event_init();
+	event_set(&ev1, pair[1], EV_READ, simple_read_cb, &ev1);
+	event_base_set(base1, &ev1);
+	event_add(&ev1, NULL);
+	/* event_del(&ev1); */
+	event_base_free(base1);
+	test_ok = 1;
+	cleanup_test();
+}
+
+static void
+test_event_base_new(void)
+{
+	struct event_base *base;
+	struct event ev1;
+	setup_test("Event base new: ");
+
+	write(pair[0], TEST1, strlen(TEST1)+1);
+	shutdown(pair[0], SHUT_WR);
+
+	base = event_base_new();
+	event_set(&ev1, pair[1], EV_READ, simple_read_cb, &ev1);
+	event_base_set(base, &ev1);
+	event_add(&ev1, NULL);
+
+	event_base_dispatch(base);
+
+	event_base_free(base);
+	test_ok = 1;
+	cleanup_test();
+}
+
+static void
+test_loopexit(void)
+{
+	struct timeval tv, tv_start, tv_end;
+	struct event ev;
+
+	setup_test("Loop exit: ");
+
+	tv.tv_usec = 0;
+	tv.tv_sec = 60*60*24;
+	evtimer_set(&ev, timeout_cb, NULL);
+	evtimer_add(&ev, &tv);
+
+	tv.tv_usec = 0;
+	tv.tv_sec = 1;
+	event_loopexit(&tv);
+
+	evutil_gettimeofday(&tv_start, NULL);
+	event_dispatch();
+	evutil_gettimeofday(&tv_end, NULL);
+	evutil_timersub(&tv_end, &tv_start, &tv_end);
+
+	evtimer_del(&ev);
+
+	if (tv.tv_sec < 2)
+		test_ok = 1;
+
+	cleanup_test();
+}
+
+static void
+test_loopexit_multiple(void)
+{
+	struct timeval tv;
+	struct event_base *base;
+
+	setup_test("Loop Multiple exit: ");
+
+	base = event_base_new();
+	
+	tv.tv_usec = 0;
+	tv.tv_sec = 1;
+	event_base_loopexit(base, &tv);
+
+	tv.tv_usec = 0;
+	tv.tv_sec = 2;
+	event_base_loopexit(base, &tv);
+
+	event_base_dispatch(base);
+
+	event_base_free(base);
+	
+	test_ok = 1;
+
+	cleanup_test();
+}
+
+static void
+break_cb(int fd, short events, void *arg)
+{
+	test_ok = 1;
+	event_loopbreak();
+}
+
+static void
+fail_cb(int fd, short events, void *arg)
+{
+	test_ok = 0;
+}
+
+static void
+test_loopbreak(void)
+{
+	struct event ev1, ev2;
+	struct timeval tv;
+
+	setup_test("Loop break: ");
+
+	tv.tv_sec = 0;
+	tv.tv_usec = 0;
+	evtimer_set(&ev1, break_cb, NULL);
+	evtimer_add(&ev1, &tv);
+	evtimer_set(&ev2, fail_cb, NULL);
+	evtimer_add(&ev2, &tv);
+
+	event_dispatch();
+
+	evtimer_del(&ev1);
+	evtimer_del(&ev2);
+
+	cleanup_test();
+}
+
+static void
+test_evbuffer(void) {
+
+	struct evbuffer *evb = evbuffer_new();
+	setup_test("Testing Evbuffer: ");
+
+	evbuffer_add_printf(evb, "%s/%d", "hello", 1);
+
+	if (EVBUFFER_LENGTH(evb) == 7 &&
+	    strcmp((char*)EVBUFFER_DATA(evb), "hello/1") == 0)
+	    test_ok = 1;
+	
+	evbuffer_free(evb);
+
+	cleanup_test();
+}
+
+static void
+test_evbuffer_readln(void)
+{
+	struct evbuffer *evb = evbuffer_new();
+	struct evbuffer *evb_tmp = evbuffer_new();
+	const char *s;
+	char *cp = NULL;
+	size_t sz;
+
+#define tt_line_eq(content)						\
+	if (!cp || sz != strlen(content) || strcmp(cp, content)) {	\
+		fprintf(stdout, "FAILED\n");				\
+		exit(1);						\
+	}
+#define tt_assert(expression)						\
+	if (!(expression)) {						\
+		fprintf(stdout, "FAILED\n");				\
+		exit(1);						\
+	}								\
+
+	/* Test EOL_ANY. */
+	fprintf(stdout, "Testing evbuffer_readln EOL_ANY: ");
+
+	s = "complex silly newline\r\n\n\r\n\n\rmore\0\n";
+	evbuffer_add(evb, s, strlen(s)+2);
+	cp = evbuffer_readln(evb, &sz, EVBUFFER_EOL_ANY);
+	tt_line_eq("complex silly newline");
+	free(cp);
+	cp = evbuffer_readln(evb, &sz, EVBUFFER_EOL_ANY);
+	if (!cp || sz != 5 || memcmp(cp, "more\0\0", 6)) {
+		fprintf(stdout, "FAILED\n");
+		exit(1);
+	}
+	if (evb->totallen == 0) {
+		fprintf(stdout, "FAILED\n");
+		exit(1);
+	}
+	s = "\nno newline";
+	evbuffer_add(evb, s, strlen(s));
+	free(cp);
+	cp = evbuffer_readln(evb, &sz, EVBUFFER_EOL_ANY);
+	tt_line_eq("");
+	free(cp);
+	cp = evbuffer_readln(evb, &sz, EVBUFFER_EOL_ANY);
+        tt_assert(!cp);
+	evbuffer_drain(evb, EVBUFFER_LENGTH(evb));
+        tt_assert(EVBUFFER_LENGTH(evb) == 0);
+
+	fprintf(stdout, "OK\n");
+
+	/* Test EOL_CRLF */
+	fprintf(stdout, "Testing evbuffer_readln EOL_CRLF: ");
+
+	s = "Line with\rin the middle\nLine with good crlf\r\n\nfinal\n";
+	evbuffer_add(evb, s, strlen(s));
+	cp = evbuffer_readln(evb, &sz, EVBUFFER_EOL_CRLF);
+	tt_line_eq("Line with\rin the middle");
+	free(cp);
+
+	cp = evbuffer_readln(evb, &sz, EVBUFFER_EOL_CRLF);
+	tt_line_eq("Line with good crlf");
+	free(cp);
+
+	cp = evbuffer_readln(evb, &sz, EVBUFFER_EOL_CRLF);
+	tt_line_eq("");
+	free(cp);
+
+	cp = evbuffer_readln(evb, &sz, EVBUFFER_EOL_CRLF);
+	tt_line_eq("final");
+	s = "x";
+	evbuffer_add(evb, s, 1);
+	free(cp);
+	cp = evbuffer_readln(evb, &sz, EVBUFFER_EOL_CRLF);
+        tt_assert(!cp);
+
+	fprintf(stdout, "OK\n");
+
+	/* Test CRLF_STRICT */
+	fprintf(stdout, "Testing evbuffer_readln CRLF_STRICT: ");
+
+	s = " and a bad crlf\nand a good one\r\n\r\nMore\r";
+	evbuffer_add(evb, s, strlen(s));
+	cp = evbuffer_readln(evb, &sz, EVBUFFER_EOL_CRLF_STRICT);
+	tt_line_eq("x and a bad crlf\nand a good one");
+	free(cp);
+
+	cp = evbuffer_readln(evb, &sz, EVBUFFER_EOL_CRLF_STRICT);
+	tt_line_eq("");
+	free(cp);
+
+	cp = evbuffer_readln(evb, &sz, EVBUFFER_EOL_CRLF_STRICT);
+        tt_assert(!cp);
+	evbuffer_add(evb, "\n", 1);
+
+	cp = evbuffer_readln(evb, &sz, EVBUFFER_EOL_CRLF_STRICT);
+	tt_line_eq("More");
+	free(cp);
+	tt_assert(EVBUFFER_LENGTH(evb) == 0);
+
+	s = "An internal CR\r is not an eol\r\nNor is a lack of one";
+	evbuffer_add(evb, s, strlen(s));
+	cp = evbuffer_readln(evb, &sz, EVBUFFER_EOL_CRLF_STRICT);
+	tt_line_eq("An internal CR\r is not an eol");
+	free(cp);
+
+	cp = evbuffer_readln(evb, &sz, EVBUFFER_EOL_CRLF_STRICT);
+	tt_assert(!cp);
+
+	evbuffer_add(evb, "\r\n", 2);
+	cp = evbuffer_readln(evb, &sz, EVBUFFER_EOL_CRLF_STRICT);
+	tt_line_eq("Nor is a lack of one");
+	free(cp);
+	tt_assert(EVBUFFER_LENGTH(evb) == 0);
+
+	fprintf(stdout, "OK\n");
+
+	/* Test LF */
+	fprintf(stdout, "Testing evbuffer_readln LF: ");
+
+	s = "An\rand a nl\n\nText";
+	evbuffer_add(evb, s, strlen(s));
+
+	cp = evbuffer_readln(evb, &sz, EVBUFFER_EOL_LF);
+	tt_line_eq("An\rand a nl");
+	free(cp);
+
+	cp = evbuffer_readln(evb, &sz, EVBUFFER_EOL_LF);
+	tt_line_eq("");
+	free(cp);
+
+	cp = evbuffer_readln(evb, &sz, EVBUFFER_EOL_LF);
+	tt_assert(!cp);
+	free(cp);
+	evbuffer_add(evb, "\n", 1);
+	cp = evbuffer_readln(evb, &sz, EVBUFFER_EOL_LF);
+	tt_line_eq("Text");
+	free(cp);
+
+	fprintf(stdout, "OK\n");
+
+	/* Test CRLF_STRICT - across boundaries */
+	fprintf(stdout,
+	    "Testing evbuffer_readln CRLF_STRICT across boundaries: ");
+
+	s = " and a bad crlf\nand a good one\r";
+	evbuffer_add(evb_tmp, s, strlen(s));
+	evbuffer_add_buffer(evb, evb_tmp);
+	s = "\n\r";
+	evbuffer_add(evb_tmp, s, strlen(s));
+	evbuffer_add_buffer(evb, evb_tmp);
+	s = "\nMore\r";
+	evbuffer_add(evb_tmp, s, strlen(s));
+	evbuffer_add_buffer(evb, evb_tmp);
+
+	cp = evbuffer_readln(evb, &sz, EVBUFFER_EOL_CRLF_STRICT);
+	tt_line_eq(" and a bad crlf\nand a good one");
+	free(cp);
+
+	cp = evbuffer_readln(evb, &sz, EVBUFFER_EOL_CRLF_STRICT);
+	tt_line_eq("");
+	free(cp);
+
+	cp = evbuffer_readln(evb, &sz, EVBUFFER_EOL_CRLF_STRICT);
+	tt_assert(!cp);
+	free(cp);
+	evbuffer_add(evb, "\n", 1);
+	cp = evbuffer_readln(evb, &sz, EVBUFFER_EOL_CRLF_STRICT);
+	tt_line_eq("More");
+	free(cp); cp = NULL;
+	if (EVBUFFER_LENGTH(evb) != 0) {
+		fprintf(stdout, "FAILED\n");
+		exit(1);
+	}
+
+	fprintf(stdout, "OK\n");
+
+	/* Test memory problem */
+	fprintf(stdout, "Testing evbuffer_readln memory problem: ");
+
+	s = "one line\ntwo line\nblue line";
+	evbuffer_add(evb_tmp, s, strlen(s));
+	evbuffer_add_buffer(evb, evb_tmp);
+
+	cp = evbuffer_readln(evb, &sz, EVBUFFER_EOL_LF);
+	tt_line_eq("one line");
+	free(cp); cp = NULL;
+
+	cp = evbuffer_readln(evb, &sz, EVBUFFER_EOL_LF);
+	tt_line_eq("two line");
+	free(cp); cp = NULL;
+
+	fprintf(stdout, "OK\n");
+
+	test_ok = 1;
+	evbuffer_free(evb);
+	evbuffer_free(evb_tmp);
+	if (cp) free(cp);
+}
+
+static void
+test_evbuffer_find(void)
+{
+	u_char* p;
+	const char* test1 = "1234567890\r\n";
+	const char* test2 = "1234567890\r";
+#define EVBUFFER_INITIAL_LENGTH 256
+	char test3[EVBUFFER_INITIAL_LENGTH];
+	unsigned int i;
+	struct evbuffer * buf = evbuffer_new();
+
+	/* make sure evbuffer_find doesn't match past the end of the buffer */
+	fprintf(stdout, "Testing evbuffer_find 1: ");
+	evbuffer_add(buf, (u_char*)test1, strlen(test1));
+	evbuffer_drain(buf, strlen(test1));	  
+	evbuffer_add(buf, (u_char*)test2, strlen(test2));
+	p = evbuffer_find(buf, (u_char*)"\r\n", 2);
+	if (p == NULL) {
+		fprintf(stdout, "OK\n");
+	} else {
+		fprintf(stdout, "FAILED\n");
+		exit(1);
+	}
+
+	/*
+	 * drain the buffer and do another find; in r309 this would
+	 * read past the allocated buffer causing a valgrind error.
+	 */
+	fprintf(stdout, "Testing evbuffer_find 2: ");
+	evbuffer_drain(buf, strlen(test2));
+	for (i = 0; i < EVBUFFER_INITIAL_LENGTH; ++i)
+		test3[i] = 'a';
+	test3[EVBUFFER_INITIAL_LENGTH - 1] = 'x';
+	evbuffer_add(buf, (u_char *)test3, EVBUFFER_INITIAL_LENGTH);
+	p = evbuffer_find(buf, (u_char *)"xy", 2);
+	if (p == NULL) {
+		printf("OK\n");
+	} else {
+		fprintf(stdout, "FAILED\n");
+		exit(1);
+	}
+
+	/* simple test for match at end of allocated buffer */
+	fprintf(stdout, "Testing evbuffer_find 3: ");
+	p = evbuffer_find(buf, (u_char *)"ax", 2);
+	if (p != NULL && strncmp((char*)p, "ax", 2) == 0) {
+		printf("OK\n");
+	} else {
+		fprintf(stdout, "FAILED\n");
+		exit(1);
+	}
+
+	evbuffer_free(buf);
+}
+
+/*
+ * simple bufferevent test
+ */
+
+static void
+readcb(struct bufferevent *bev, void *arg)
+{
+	if (EVBUFFER_LENGTH(bev->input) == 8333) {
+		bufferevent_disable(bev, EV_READ);
+		test_ok++;
+	}
+}
+
+static void
+writecb(struct bufferevent *bev, void *arg)
+{
+	if (EVBUFFER_LENGTH(bev->output) == 0)
+		test_ok++;
+}
+
+static void
+errorcb(struct bufferevent *bev, short what, void *arg)
+{
+	test_ok = -2;
+}
+
+static void
+test_bufferevent(void)
+{
+	struct bufferevent *bev1, *bev2;
+	char buffer[8333];
+	int i;
+
+	setup_test("Bufferevent: ");
+
+	bev1 = bufferevent_new(pair[0], readcb, writecb, errorcb, NULL);
+	bev2 = bufferevent_new(pair[1], readcb, writecb, errorcb, NULL);
+
+	bufferevent_disable(bev1, EV_READ);
+	bufferevent_enable(bev2, EV_READ);
+
+	for (i = 0; i < sizeof(buffer); i++)
+		buffer[i] = i;
+
+	bufferevent_write(bev1, buffer, sizeof(buffer));
+
+	event_dispatch();
+
+	bufferevent_free(bev1);
+	bufferevent_free(bev2);
+
+	if (test_ok != 2)
+		test_ok = 0;
+
+	cleanup_test();
+}
+
+/*
+ * test watermarks and bufferevent
+ */
+
+static void
+wm_readcb(struct bufferevent *bev, void *arg)
+{
+	int len = EVBUFFER_LENGTH(bev->input);
+	static int nread;
+
+	assert(len >= 10 && len <= 20);
+
+	evbuffer_drain(bev->input, len);
+
+	nread += len;
+	if (nread == 65000) {
+		bufferevent_disable(bev, EV_READ);
+		test_ok++;
+	}
+}
+
+static void
+wm_writecb(struct bufferevent *bev, void *arg)
+{
+	if (EVBUFFER_LENGTH(bev->output) == 0)
+		test_ok++;
+}
+
+static void
+wm_errorcb(struct bufferevent *bev, short what, void *arg)
+{
+	test_ok = -2;
+}
+
+static void
+test_bufferevent_watermarks(void)
+{
+	struct bufferevent *bev1, *bev2;
+	char buffer[65000];
+	int i;
+
+	setup_test("Bufferevent Watermarks: ");
+
+	bev1 = bufferevent_new(pair[0], NULL, wm_writecb, wm_errorcb, NULL);
+	bev2 = bufferevent_new(pair[1], wm_readcb, NULL, wm_errorcb, NULL);
+
+	bufferevent_disable(bev1, EV_READ);
+	bufferevent_enable(bev2, EV_READ);
+
+	for (i = 0; i < sizeof(buffer); i++)
+		buffer[i] = i;
+
+	bufferevent_write(bev1, buffer, sizeof(buffer));
+
+	/* limit the reading on the receiving bufferevent */
+	bufferevent_setwatermark(bev2, EV_READ, 10, 20);
+
+	event_dispatch();
+
+	bufferevent_free(bev1);
+	bufferevent_free(bev2);
+
+	if (test_ok != 2)
+		test_ok = 0;
+
+	cleanup_test();
+}
+
+struct test_pri_event {
+	struct event ev;
+	int count;
+};
+
+static void
+test_priorities_cb(int fd, short what, void *arg)
+{
+	struct test_pri_event *pri = arg;
+	struct timeval tv;
+
+	if (pri->count == 3) {
+		event_loopexit(NULL);
+		return;
+	}
+
+	pri->count++;
+
+	evutil_timerclear(&tv);
+	event_add(&pri->ev, &tv);
+}
+
+static void
+test_priorities(int npriorities)
+{
+	char buf[32];
+	struct test_pri_event one, two;
+	struct timeval tv;
+
+	evutil_snprintf(buf, sizeof(buf), "Testing Priorities %d: ", npriorities);
+	setup_test(buf);
+
+	event_base_priority_init(global_base, npriorities);
+
+	memset(&one, 0, sizeof(one));
+	memset(&two, 0, sizeof(two));
+
+	timeout_set(&one.ev, test_priorities_cb, &one);
+	if (event_priority_set(&one.ev, 0) == -1) {
+		fprintf(stderr, "%s: failed to set priority", __func__);
+		exit(1);
+	}
+
+	timeout_set(&two.ev, test_priorities_cb, &two);
+	if (event_priority_set(&two.ev, npriorities - 1) == -1) {
+		fprintf(stderr, "%s: failed to set priority", __func__);
+		exit(1);
+	}
+
+	evutil_timerclear(&tv);
+
+	if (event_add(&one.ev, &tv) == -1)
+		exit(1);
+	if (event_add(&two.ev, &tv) == -1)
+		exit(1);
+
+	event_dispatch();
+
+	event_del(&one.ev);
+	event_del(&two.ev);
+
+	if (npriorities == 1) {
+		if (one.count == 3 && two.count == 3)
+			test_ok = 1;
+	} else if (npriorities == 2) {
+		/* Two is called once because event_loopexit is priority 1 */
+		if (one.count == 3 && two.count == 1)
+			test_ok = 1;
+	} else {
+		if (one.count == 3 && two.count == 0)
+			test_ok = 1;
+	}
+
+	cleanup_test();
+}
+
+static void
+test_multiple_cb(int fd, short event, void *arg)
+{
+	if (event & EV_READ)
+		test_ok |= 1;
+	else if (event & EV_WRITE)
+		test_ok |= 2;
+}
+
+static void
+test_multiple_events_for_same_fd(void)
+{
+   struct event e1, e2;
+
+   setup_test("Multiple events for same fd: ");
+
+   event_set(&e1, pair[0], EV_READ, test_multiple_cb, NULL);
+   event_add(&e1, NULL);
+   event_set(&e2, pair[0], EV_WRITE, test_multiple_cb, NULL);
+   event_add(&e2, NULL);
+   event_loop(EVLOOP_ONCE);
+   event_del(&e2);
+   write(pair[1], TEST1, strlen(TEST1)+1);
+   event_loop(EVLOOP_ONCE);
+   event_del(&e1);
+   
+   if (test_ok != 3)
+	   test_ok = 0;
+
+   cleanup_test();
+}
+
+int evtag_decode_int(uint32_t *pnumber, struct evbuffer *evbuf);
+int evtag_encode_tag(struct evbuffer *evbuf, uint32_t number);
+int evtag_decode_tag(uint32_t *pnumber, struct evbuffer *evbuf);
+
+static void
+read_once_cb(int fd, short event, void *arg)
+{
+	char buf[256];
+	int len;
+
+	len = read(fd, buf, sizeof(buf));
+
+	if (called) {
+		test_ok = 0;
+	} else if (len) {
+		/* Assumes global pair[0] can be used for writing */
+		write(pair[0], TEST1, strlen(TEST1)+1);
+		test_ok = 1;
+	}
+
+	called++;
+}
+
+static void
+test_want_only_once(void)
+{
+	struct event ev;
+	struct timeval tv;
+
+	/* Very simple read test */
+	setup_test("Want read only once: ");
+	
+	write(pair[0], TEST1, strlen(TEST1)+1);
+
+	/* Setup the loop termination */
+	evutil_timerclear(&tv);
+	tv.tv_sec = 1;
+	event_loopexit(&tv);
+	
+	event_set(&ev, pair[1], EV_READ, read_once_cb, &ev);
+	if (event_add(&ev, NULL) == -1)
+		exit(1);
+	event_dispatch();
+
+	cleanup_test();
+}
+
+#define TEST_MAX_INT	6
+
+static void
+evtag_int_test(void)
+{
+	struct evbuffer *tmp = evbuffer_new();
+	uint32_t integers[TEST_MAX_INT] = {
+		0xaf0, 0x1000, 0x1, 0xdeadbeef, 0x00, 0xbef000
+	};
+	uint32_t integer;
+	int i;
+
+	for (i = 0; i < TEST_MAX_INT; i++) {
+		int oldlen, newlen;
+		oldlen = EVBUFFER_LENGTH(tmp);
+		encode_int(tmp, integers[i]);
+		newlen = EVBUFFER_LENGTH(tmp);
+		fprintf(stdout, "\t\tencoded 0x%08x with %d bytes\n",
+		    integers[i], newlen - oldlen);
+	}
+
+	for (i = 0; i < TEST_MAX_INT; i++) {
+		if (evtag_decode_int(&integer, tmp) == -1) {
+			fprintf(stderr, "decode %d failed", i);
+			exit(1);
+		}
+		if (integer != integers[i]) {
+			fprintf(stderr, "got %x, wanted %x",
+			    integer, integers[i]);
+			exit(1);
+		}
+	}
+
+	if (EVBUFFER_LENGTH(tmp) != 0) {
+		fprintf(stderr, "trailing data");
+		exit(1);
+	}
+	evbuffer_free(tmp);
+
+	fprintf(stdout, "\t%s: OK\n", __func__);
+}
+
+static void
+evtag_fuzz(void)
+{
+	u_char buffer[4096];
+	struct evbuffer *tmp = evbuffer_new();
+	struct timeval tv;
+	int i, j;
+
+	int not_failed = 0;
+	for (j = 0; j < 100; j++) {
+		for (i = 0; i < sizeof(buffer); i++)
+			buffer[i] = rand();
+		evbuffer_drain(tmp, -1);
+		evbuffer_add(tmp, buffer, sizeof(buffer));
+
+		if (evtag_unmarshal_timeval(tmp, 0, &tv) != -1)
+			not_failed++;
+	}
+
+	/* The majority of decodes should fail */
+	if (not_failed >= 10) {
+		fprintf(stderr, "evtag_unmarshal should have failed");
+		exit(1);
+	}
+
+	/* Now insert some corruption into the tag length field */
+	evbuffer_drain(tmp, -1);
+	evutil_timerclear(&tv);
+	tv.tv_sec = 1;
+	evtag_marshal_timeval(tmp, 0, &tv);
+	evbuffer_add(tmp, buffer, sizeof(buffer));
+
+	EVBUFFER_DATA(tmp)[1] = 0xff;
+	if (evtag_unmarshal_timeval(tmp, 0, &tv) != -1) {
+		fprintf(stderr, "evtag_unmarshal_timeval should have failed");
+		exit(1);
+	}
+
+	evbuffer_free(tmp);
+
+	fprintf(stdout, "\t%s: OK\n", __func__);
+}
+
+static void
+evtag_tag_encoding(void)
+{
+	struct evbuffer *tmp = evbuffer_new();
+	uint32_t integers[TEST_MAX_INT] = {
+		0xaf0, 0x1000, 0x1, 0xdeadbeef, 0x00, 0xbef000
+	};
+	uint32_t integer;
+	int i;
+
+	for (i = 0; i < TEST_MAX_INT; i++) {
+		int oldlen, newlen;
+		oldlen = EVBUFFER_LENGTH(tmp);
+		evtag_encode_tag(tmp, integers[i]);
+		newlen = EVBUFFER_LENGTH(tmp);
+		fprintf(stdout, "\t\tencoded 0x%08x with %d bytes\n",
+		    integers[i], newlen - oldlen);
+	}
+
+	for (i = 0; i < TEST_MAX_INT; i++) {
+		if (evtag_decode_tag(&integer, tmp) == -1) {
+			fprintf(stderr, "decode %d failed", i);
+			exit(1);
+		}
+		if (integer != integers[i]) {
+			fprintf(stderr, "got %x, wanted %x",
+			    integer, integers[i]);
+			exit(1);
+		}
+	}
+
+	if (EVBUFFER_LENGTH(tmp) != 0) {
+		fprintf(stderr, "trailing data");
+		exit(1);
+	}
+	evbuffer_free(tmp);
+
+	fprintf(stdout, "\t%s: OK\n", __func__);
+}
+
+static void
+evtag_test(void)
+{
+	fprintf(stdout, "Testing Tagging:\n");
+
+	evtag_init();
+	evtag_int_test();
+	evtag_fuzz();
+
+	evtag_tag_encoding();
+
+	fprintf(stdout, "OK\n");
+}
+
+#ifndef WIN32
+static void
+rpc_test(void)
+{
+	struct msg *msg, *msg2;
+	struct kill *attack;
+	struct run *run;
+	struct evbuffer *tmp = evbuffer_new();
+	struct timeval tv_start, tv_end;
+	uint32_t tag;
+	int i;
+
+	fprintf(stdout, "Testing RPC: ");
+
+	msg = msg_new();
+	EVTAG_ASSIGN(msg, from_name, "niels");
+	EVTAG_ASSIGN(msg, to_name, "phoenix");
+
+	if (EVTAG_GET(msg, attack, &attack) == -1) {
+		fprintf(stderr, "Failed to set kill message.\n");
+		exit(1);
+	}
+
+	EVTAG_ASSIGN(attack, weapon, "feather");
+	EVTAG_ASSIGN(attack, action, "tickle");
+
+	evutil_gettimeofday(&tv_start, NULL);
+	for (i = 0; i < 1000; ++i) {
+		run = EVTAG_ADD(msg, run);
+		if (run == NULL) {
+			fprintf(stderr, "Failed to add run message.\n");
+			exit(1);
+		}
+		EVTAG_ASSIGN(run, how, "very fast but with some data in it");
+		EVTAG_ASSIGN(run, fixed_bytes,
+		    (unsigned char*)"012345678901234567890123");
+	}
+
+	if (msg_complete(msg) == -1) {
+		fprintf(stderr, "Failed to make complete message.\n");
+		exit(1);
+	}
+
+	evtag_marshal_msg(tmp, 0xdeaf, msg);
+
+	if (evtag_peek(tmp, &tag) == -1) {
+		fprintf(stderr, "Failed to peak tag.\n");
+		exit (1);
+	}
+
+	if (tag != 0xdeaf) {
+		fprintf(stderr, "Got incorrect tag: %0x.\n", tag);
+		exit (1);
+	}
+
+	msg2 = msg_new();
+	if (evtag_unmarshal_msg(tmp, 0xdeaf, msg2) == -1) {
+		fprintf(stderr, "Failed to unmarshal message.\n");
+		exit(1);
+	}
+
+	evutil_gettimeofday(&tv_end, NULL);
+	evutil_timersub(&tv_end, &tv_start, &tv_end);
+	fprintf(stderr, "(%.1f us/add) ",
+	    (float)tv_end.tv_sec/(float)i * 1000000.0 +
+	    tv_end.tv_usec / (float)i);
+
+	if (!EVTAG_HAS(msg2, from_name) ||
+	    !EVTAG_HAS(msg2, to_name) ||
+	    !EVTAG_HAS(msg2, attack)) {
+		fprintf(stderr, "Missing data structures.\n");
+		exit(1);
+	}
+
+	if (EVTAG_LEN(msg2, run) != i) {
+		fprintf(stderr, "Wrong number of run messages.\n");
+		exit(1);
+	}
+
+	msg_free(msg);
+	msg_free(msg2);
+
+	evbuffer_free(tmp);
+
+	fprintf(stdout, "OK\n");
+}
+#endif
+
+static void
+test_evutil_strtoll(void)
+{
+        const char *s;
+        char *endptr;
+        setup_test("evutil_stroll: ");
+        test_ok = 0;
+
+        if (evutil_strtoll("5000000000", NULL, 10) != ((ev_int64_t)5000000)*1000)
+                goto err;
+        if (evutil_strtoll("-5000000000", NULL, 10) != ((ev_int64_t)5000000)*-1000)
+                goto err;
+        s = " 99999stuff";
+        if (evutil_strtoll(s, &endptr, 10) != (ev_int64_t)99999)
+                goto err;
+        if (endptr != s+6)
+                goto err;
+        if (evutil_strtoll("foo", NULL, 10) != 0)
+                goto err;
+
+        test_ok = 1;
+ err:
+        cleanup_test();
+}
+
+
+int
+main (int argc, char **argv)
+{
+#ifdef WIN32
+	WORD wVersionRequested;
+	WSADATA wsaData;
+	int	err;
+
+	wVersionRequested = MAKEWORD( 2, 2 );
+
+	err = WSAStartup( wVersionRequested, &wsaData );
+#endif
+
+#ifndef WIN32
+	if (signal(SIGPIPE, SIG_IGN) == SIG_ERR)
+		return (1);
+#endif
+	setvbuf(stdout, NULL, _IONBF, 0);
+
+	/* Initalize the event library */
+	global_base = event_init();
+
+	test_registerfds();
+
+        test_evutil_strtoll();
+
+	/* use the global event base and need to be called first */
+	test_priorities(1);
+	test_priorities(2);
+	test_priorities(3);
+
+	test_evbuffer();
+	test_evbuffer_find();
+	test_evbuffer_readln();
+	
+	test_bufferevent();
+	test_bufferevent_watermarks();
+
+	test_free_active_base();
+
+	test_event_base_new();
+
+	http_suite();
+
+#ifndef WIN32
+	rpc_suite();
+#endif
+
+	dns_suite();
+	
+#ifndef WIN32
+	test_fork();
+#endif
+
+	test_simpleread();
+
+	test_simplewrite();
+
+	test_multiple();
+
+	test_persistent();
+
+	test_combined();
+
+	test_simpletimeout();
+#ifndef WIN32
+	test_simplesignal();
+	test_multiplesignal();
+	test_immediatesignal();
+#endif
+	test_loopexit();
+	test_loopbreak();
+
+	test_loopexit_multiple();
+	
+	test_multiple_events_for_same_fd();
+
+	test_want_only_once();
+
+	evtag_test();
+
+#ifndef WIN32
+	rpc_test();
+
+	test_signal_dealloc();
+	test_signal_pipeloss();
+	test_signal_switchbase();
+	test_signal_restore();
+	test_signal_assert();
+	test_signal_while_processing();
+#endif
+	
+	return (0);
+}
+
diff --git a/base/third_party/libevent/test/regress.h b/base/third_party/libevent/test/regress.h
new file mode 100644
index 0000000..4060ff5
--- /dev/null
+++ b/base/third_party/libevent/test/regress.h
@@ -0,0 +1,45 @@
+/*
+ * Copyright (c) 2000-2004 Niels Provos <provos@citi.umich.edu>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ *    derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef _REGRESS_H_
+#define _REGRESS_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+void http_suite(void);
+void http_basic_test(void);
+
+void rpc_suite(void);
+
+void dns_suite(void);
+	
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _REGRESS_H_ */
diff --git a/base/third_party/libevent/test/regress.rpc b/base/third_party/libevent/test/regress.rpc
new file mode 100644
index 0000000..65ca95d
--- /dev/null
+++ b/base/third_party/libevent/test/regress.rpc
@@ -0,0 +1,20 @@
+/* tests data packing and unpacking */
+
+struct msg {
+	string from_name = 1;
+	string to_name = 2;
+	optional struct[kill] attack = 3;
+	array struct[run] run = 4;
+}
+
+struct kill {
+	string weapon = 0x10121;
+	string action = 2;
+	optional int how_often = 3;
+}
+
+struct run {
+	string how = 1;
+	optional bytes some_bytes = 2;
+	bytes fixed_bytes[24] = 3;
+}
diff --git a/base/third_party/libevent/test/regress_dns.c b/base/third_party/libevent/test/regress_dns.c
new file mode 100644
index 0000000..129cdad
--- /dev/null
+++ b/base/third_party/libevent/test/regress_dns.c
@@ -0,0 +1,376 @@
+/*
+ * Copyright (c) 2003-2006 Niels Provos <provos@citi.umich.edu>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ *    derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifdef WIN32
+#include <winsock2.h>
+#include <windows.h>
+#endif
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include <sys/types.h>
+#include <sys/stat.h>
+#ifdef HAVE_SYS_TIME_H
+#include <sys/time.h>
+#endif
+#include <sys/queue.h>
+#ifndef WIN32
+#include <sys/socket.h>
+#include <signal.h>
+#include <netinet/in.h>
+#include <arpa/inet.h>
+#include <unistd.h>
+#endif
+#ifdef HAVE_NETINET_IN6_H
+#include <netinet/in6.h>
+#endif
+#ifdef HAVE_NETDB_H
+#include <netdb.h>
+#endif
+#include <fcntl.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include <errno.h>
+
+#include "event.h"
+#include "evdns.h"
+#include "log.h"
+
+static int dns_ok = 0;
+static int dns_err = 0;
+
+void dns_suite(void);
+
+static void
+dns_gethostbyname_cb(int result, char type, int count, int ttl,
+    void *addresses, void *arg)
+{
+	dns_ok = dns_err = 0;
+
+	if (result == DNS_ERR_TIMEOUT) {
+		fprintf(stdout, "[Timed out] ");
+		dns_err = result;
+		goto out;
+	}
+
+	if (result != DNS_ERR_NONE) {
+		fprintf(stdout, "[Error code %d] ", result);
+		goto out;
+	}
+
+	fprintf(stderr, "type: %d, count: %d, ttl: %d: ", type, count, ttl);
+
+	switch (type) {
+	case DNS_IPv6_AAAA: {
+#if defined(HAVE_STRUCT_IN6_ADDR) && defined(HAVE_INET_NTOP) && defined(INET6_ADDRSTRLEN)
+		struct in6_addr *in6_addrs = addresses;
+		char buf[INET6_ADDRSTRLEN+1];
+		int i;
+		/* a resolution that's not valid does not help */
+		if (ttl < 0)
+			goto out;
+		for (i = 0; i < count; ++i) {
+			const char *b = inet_ntop(AF_INET6, &in6_addrs[i], buf,sizeof(buf));
+			if (b)
+				fprintf(stderr, "%s ", b);
+			else
+				fprintf(stderr, "%s ", strerror(errno));
+		}
+#endif
+		break;
+	}
+	case DNS_IPv4_A: {
+		struct in_addr *in_addrs = addresses;
+		int i;
+		/* a resolution that's not valid does not help */
+		if (ttl < 0)
+			goto out;
+		for (i = 0; i < count; ++i)
+			fprintf(stderr, "%s ", inet_ntoa(in_addrs[i]));
+		break;
+	}
+	case DNS_PTR:
+		/* may get at most one PTR */
+		if (count != 1)
+			goto out;
+
+		fprintf(stderr, "%s ", *(char **)addresses);
+		break;
+	default:
+		goto out;
+	}
+
+	dns_ok = type;
+
+out:
+	event_loopexit(NULL);
+}
+
+static void
+dns_gethostbyname(void)
+{
+	fprintf(stdout, "Simple DNS resolve: ");
+	dns_ok = 0;
+	evdns_resolve_ipv4("www.monkey.org", 0, dns_gethostbyname_cb, NULL);
+	event_dispatch();
+
+	if (dns_ok == DNS_IPv4_A) {
+		fprintf(stdout, "OK\n");
+	} else {
+		fprintf(stdout, "FAILED\n");
+		exit(1);
+	}
+}
+
+static void
+dns_gethostbyname6(void)
+{
+	fprintf(stdout, "IPv6 DNS resolve: ");
+	dns_ok = 0;
+	evdns_resolve_ipv6("www.ietf.org", 0, dns_gethostbyname_cb, NULL);
+	event_dispatch();
+
+	if (dns_ok == DNS_IPv6_AAAA) {
+		fprintf(stdout, "OK\n");
+	} else if (!dns_ok && dns_err == DNS_ERR_TIMEOUT) {
+		fprintf(stdout, "SKIPPED\n");
+	} else {
+		fprintf(stdout, "FAILED (%d)\n", dns_ok);
+		exit(1);
+	}
+}
+
+static void
+dns_gethostbyaddr(void)
+{
+	struct in_addr in;
+	in.s_addr = htonl(0x7f000001ul); /* 127.0.0.1 */
+	fprintf(stdout, "Simple reverse DNS resolve: ");
+	dns_ok = 0;
+	evdns_resolve_reverse(&in, 0, dns_gethostbyname_cb, NULL);
+	event_dispatch();
+
+	if (dns_ok == DNS_PTR) {
+		fprintf(stdout, "OK\n");
+	} else {
+		fprintf(stdout, "FAILED\n");
+		exit(1);
+	}
+}
+
+static int n_server_responses = 0;
+
+static void
+dns_server_request_cb(struct evdns_server_request *req, void *data)
+{
+	int i, r;
+	const char TEST_ARPA[] = "11.11.168.192.in-addr.arpa";
+	for (i = 0; i < req->nquestions; ++i) {
+		struct in_addr ans;
+		ans.s_addr = htonl(0xc0a80b0bUL); /* 192.168.11.11 */
+		if (req->questions[i]->type == EVDNS_TYPE_A &&
+			req->questions[i]->dns_question_class == EVDNS_CLASS_INET &&
+			!strcmp(req->questions[i]->name, "zz.example.com")) {
+			r = evdns_server_request_add_a_reply(req, "zz.example.com",
+												 1, &ans.s_addr, 12345);
+			if (r<0)
+				dns_ok = 0;
+		} else if (req->questions[i]->type == EVDNS_TYPE_AAAA &&
+				   req->questions[i]->dns_question_class == EVDNS_CLASS_INET &&
+				   !strcmp(req->questions[i]->name, "zz.example.com")) {
+			char addr6[17] = "abcdefghijklmnop";
+			r = evdns_server_request_add_aaaa_reply(req, "zz.example.com",
+												 1, addr6, 123);
+			if (r<0)
+				dns_ok = 0;
+		} else if (req->questions[i]->type == EVDNS_TYPE_PTR &&
+				   req->questions[i]->dns_question_class == EVDNS_CLASS_INET &&
+				   !strcmp(req->questions[i]->name, TEST_ARPA)) {
+			r = evdns_server_request_add_ptr_reply(req, NULL, TEST_ARPA,
+					   "ZZ.EXAMPLE.COM", 54321);
+			if (r<0)
+				dns_ok = 0;
+		} else {
+			fprintf(stdout, "Unexpected question %d %d \"%s\" ",
+					req->questions[i]->type,
+					req->questions[i]->dns_question_class,
+					req->questions[i]->name);
+			dns_ok = 0;
+		}
+	}
+	r = evdns_server_request_respond(req, 0);
+	if (r<0) {
+		fprintf(stdout, "Couldn't send reply. ");
+		dns_ok = 0;
+	}
+}
+
+static void
+dns_server_gethostbyname_cb(int result, char type, int count, int ttl,
+							void *addresses, void *arg)
+{
+	if (result != DNS_ERR_NONE) {
+		fprintf(stdout, "Unexpected result %d. ", result);
+		dns_ok = 0;
+		goto out;
+	}
+	if (count != 1) {
+		fprintf(stdout, "Unexpected answer count %d. ", count);
+		dns_ok = 0;
+		goto out;
+	}
+	switch (type) {
+	case DNS_IPv4_A: {
+		struct in_addr *in_addrs = addresses;
+		if (in_addrs[0].s_addr != htonl(0xc0a80b0bUL) || ttl != 12345) {
+			fprintf(stdout, "Bad IPv4 response \"%s\" %d. ",
+					inet_ntoa(in_addrs[0]), ttl);
+			dns_ok = 0;
+			goto out;
+		}
+		break;
+	}
+	case DNS_IPv6_AAAA: {
+#if defined (HAVE_STRUCT_IN6_ADDR) && defined(HAVE_INET_NTOP) && defined(INET6_ADDRSTRLEN)
+		struct in6_addr *in6_addrs = addresses;
+		char buf[INET6_ADDRSTRLEN+1];
+		if (memcmp(&in6_addrs[0].s6_addr, "abcdefghijklmnop", 16)
+			|| ttl != 123) {
+			const char *b = inet_ntop(AF_INET6, &in6_addrs[0],buf,sizeof(buf));
+			fprintf(stdout, "Bad IPv6 response \"%s\" %d. ", b, ttl);
+			dns_ok = 0;
+			goto out;
+		}
+#endif
+		break;
+	}
+	case DNS_PTR: {
+		char **addrs = addresses;
+		if (strcmp(addrs[0], "ZZ.EXAMPLE.COM") || ttl != 54321) {
+			fprintf(stdout, "Bad PTR response \"%s\" %d. ",
+					addrs[0], ttl);
+			dns_ok = 0;
+			goto out;
+		}
+		break;
+	}
+	default:
+		fprintf(stdout, "Bad response type %d. ", type);
+		dns_ok = 0;
+	}
+
+ out:
+	if (++n_server_responses == 3) {
+		event_loopexit(NULL);
+	}
+}
+
+static void
+dns_server(void)
+{
+	int sock;
+	struct sockaddr_in my_addr;
+	struct evdns_server_port *port;
+	struct in_addr resolve_addr;
+
+	dns_ok = 1;
+	fprintf(stdout, "DNS server support: ");
+
+	/* Add ourself as the only nameserver, and make sure we really are
+	 * the only nameserver. */
+	evdns_nameserver_ip_add("127.0.0.1:35353");
+	if (evdns_count_nameservers() != 1) {
+		fprintf(stdout, "Couldn't set up.\n");
+		exit(1);
+	}
+
+	/* Now configure a nameserver port. */
+	sock = socket(AF_INET, SOCK_DGRAM, 0);
+	if (sock == -1) {
+		perror("socket");
+		exit(1);
+	}
+#ifdef WIN32
+	{
+		u_long nonblocking = 1;
+		ioctlsocket(sock, FIONBIO, &nonblocking);
+	}
+#else
+	fcntl(sock, F_SETFL, O_NONBLOCK);
+#endif
+	memset(&my_addr, 0, sizeof(my_addr));
+	my_addr.sin_family = AF_INET;
+	my_addr.sin_port = htons(35353);
+	my_addr.sin_addr.s_addr = htonl(0x7f000001UL);
+	if (bind(sock, (struct sockaddr*)&my_addr, sizeof(my_addr)) < 0) {
+		perror("bind");
+		exit (1);
+	}
+	port = evdns_add_server_port(sock, 0, dns_server_request_cb, NULL);
+
+	/* Send two queries. */
+	evdns_resolve_ipv4("zz.example.com", DNS_QUERY_NO_SEARCH,
+					   dns_server_gethostbyname_cb, NULL);
+	evdns_resolve_ipv6("zz.example.com", DNS_QUERY_NO_SEARCH,
+					   dns_server_gethostbyname_cb, NULL);
+	resolve_addr.s_addr = htonl(0xc0a80b0bUL); /* 192.168.11.11 */
+	evdns_resolve_reverse(&resolve_addr, 0,
+						  dns_server_gethostbyname_cb, NULL);
+
+	event_dispatch();
+
+	if (dns_ok) {
+		fprintf(stdout, "OK\n");
+	} else {
+		fprintf(stdout, "FAILED\n");
+		exit(1);
+	}
+
+	evdns_close_server_port(port);
+	evdns_shutdown(0); /* remove ourself as nameserver. */
+#ifdef WIN32
+	closesocket(sock);
+#else
+	close(sock);
+#endif
+}
+
+void
+dns_suite(void)
+{
+	dns_server(); /* Do this before we call evdns_init. */
+
+	evdns_init();
+	dns_gethostbyname();
+	dns_gethostbyname6();
+	dns_gethostbyaddr();
+
+	evdns_shutdown(0);
+}
diff --git a/base/third_party/libevent/test/regress_http.c b/base/third_party/libevent/test/regress_http.c
new file mode 100644
index 0000000..943b29d
--- /dev/null
+++ b/base/third_party/libevent/test/regress_http.c
@@ -0,0 +1,1744 @@
+/*
+ * Copyright (c) 2003-2006 Niels Provos <provos@citi.umich.edu>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ *    derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifdef WIN32
+#include <winsock2.h>
+#include <windows.h>
+#endif
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include <sys/types.h>
+#include <sys/stat.h>
+#ifdef HAVE_SYS_TIME_H
+#include <sys/time.h>
+#endif
+#include <sys/queue.h>
+#ifndef WIN32
+#include <sys/socket.h>
+#include <signal.h>
+#include <unistd.h>
+#include <netdb.h>
+#endif
+#include <fcntl.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include <errno.h>
+
+#include "event.h"
+#include "evhttp.h"
+#include "log.h"
+#include "http-internal.h"
+
+extern int pair[];
+extern int test_ok;
+
+static struct evhttp *http;
+/* set if a test needs to call loopexit on a base */
+static struct event_base *base;
+
+void http_suite(void);
+
+void http_basic_cb(struct evhttp_request *req, void *arg);
+static void http_chunked_cb(struct evhttp_request *req, void *arg);
+void http_post_cb(struct evhttp_request *req, void *arg);
+void http_dispatcher_cb(struct evhttp_request *req, void *arg);
+static void http_large_delay_cb(struct evhttp_request *req, void *arg);
+static void http_badreq_cb(struct evhttp_request *req, void *arg);
+
+static struct evhttp *
+http_setup(short *pport, struct event_base *base)
+{
+	int i;
+	struct evhttp *myhttp;
+	short port = -1;
+
+	/* Try a few different ports */
+	myhttp = evhttp_new(base);
+	for (i = 0; i < 50; ++i) {
+		if (evhttp_bind_socket(myhttp, "127.0.0.1", 8080 + i) != -1) {
+			port = 8080 + i;
+			break;
+		}
+	}
+
+	if (port == -1)
+		event_errx(1, "Could not start web server");
+
+	/* Register a callback for certain types of requests */
+	evhttp_set_cb(myhttp, "/test", http_basic_cb, NULL);
+	evhttp_set_cb(myhttp, "/chunked", http_chunked_cb, NULL);
+	evhttp_set_cb(myhttp, "/postit", http_post_cb, NULL);
+	evhttp_set_cb(myhttp, "/largedelay", http_large_delay_cb, NULL);
+	evhttp_set_cb(myhttp, "/badrequest", http_badreq_cb, NULL);
+	evhttp_set_cb(myhttp, "/", http_dispatcher_cb, NULL);
+
+	*pport = port;
+	return (myhttp);
+}
+
+#ifndef NI_MAXSERV
+#define NI_MAXSERV 1024
+#endif
+
+static int
+http_connect(const char *address, u_short port)
+{
+	/* Stupid code for connecting */
+#ifdef WIN32
+	struct hostent *he;
+	struct sockaddr_in sin;
+#else
+	struct addrinfo ai, *aitop;
+	char strport[NI_MAXSERV];
+#endif
+	struct sockaddr *sa;
+	int slen;
+	int fd;
+	
+#ifdef WIN32
+	if (!(he = gethostbyname(address))) {
+		event_warn("gethostbyname");
+	}
+	memcpy(&sin.sin_addr, he->h_addr_list[0], he->h_length);
+	sin.sin_family = AF_INET;
+	sin.sin_port = htons(port);
+	slen = sizeof(struct sockaddr_in);
+	sa = (struct sockaddr*)&sin;
+#else
+	memset(&ai, 0, sizeof (ai));
+	ai.ai_family = AF_INET;
+	ai.ai_socktype = SOCK_STREAM;
+	snprintf(strport, sizeof (strport), "%d", port);
+	if (getaddrinfo(address, strport, &ai, &aitop) != 0) {
+		event_warn("getaddrinfo");
+		return (-1);
+	}
+	sa = aitop->ai_addr;
+	slen = aitop->ai_addrlen;
+#endif
+        
+	fd = socket(AF_INET, SOCK_STREAM, 0);
+	if (fd == -1)
+		event_err(1, "socket failed");
+
+	if (connect(fd, sa, slen) == -1)
+		event_err(1, "connect failed");
+
+#ifndef WIN32
+	freeaddrinfo(aitop);
+#endif
+
+	return (fd);
+}
+
+static void
+http_readcb(struct bufferevent *bev, void *arg)
+{
+	const char *what = "This is funny";
+
+ 	event_debug(("%s: %s\n", __func__, EVBUFFER_DATA(bev->input)));
+	
+	if (evbuffer_find(bev->input,
+		(const unsigned char*) what, strlen(what)) != NULL) {
+		struct evhttp_request *req = evhttp_request_new(NULL, NULL);
+		enum message_read_status done;
+
+		req->kind = EVHTTP_RESPONSE;
+		done = evhttp_parse_firstline(req, bev->input);
+		if (done != ALL_DATA_READ)
+			goto out;
+
+		done = evhttp_parse_headers(req, bev->input);
+		if (done != ALL_DATA_READ)
+			goto out;
+
+		if (done == 1 &&
+		    evhttp_find_header(req->input_headers,
+			"Content-Type") != NULL)
+			test_ok++;
+
+	out:
+		evhttp_request_free(req);
+		bufferevent_disable(bev, EV_READ);
+		if (base)
+			event_base_loopexit(base, NULL);
+		else
+			event_loopexit(NULL);
+	}
+}
+
+static void
+http_writecb(struct bufferevent *bev, void *arg)
+{
+	if (EVBUFFER_LENGTH(bev->output) == 0) {
+		/* enable reading of the reply */
+		bufferevent_enable(bev, EV_READ);
+		test_ok++;
+	}
+}
+
+static void
+http_errorcb(struct bufferevent *bev, short what, void *arg)
+{
+	test_ok = -2;
+	event_loopexit(NULL);
+}
+
+void
+http_basic_cb(struct evhttp_request *req, void *arg)
+{
+	struct evbuffer *evb = evbuffer_new();
+	int empty = evhttp_find_header(req->input_headers, "Empty") != NULL;
+	event_debug(("%s: called\n", __func__));
+	evbuffer_add_printf(evb, "This is funny");
+	
+	/* For multi-line headers test */
+	{
+		const char *multi =
+		    evhttp_find_header(req->input_headers,"X-multi");
+		if (multi) {
+			if (strcmp("END", multi + strlen(multi) - 3) == 0)
+				test_ok++;
+			if (evhttp_find_header(req->input_headers, "X-Last"))
+				test_ok++;
+		}
+	}
+
+	/* injecting a bad content-length */
+	if (evhttp_find_header(req->input_headers, "X-Negative"))
+		evhttp_add_header(req->output_headers,
+		    "Content-Length", "-100");
+
+	/* allow sending of an empty reply */
+	evhttp_send_reply(req, HTTP_OK, "Everything is fine",
+	    !empty ? evb : NULL);
+
+	evbuffer_free(evb);
+}
+
+static char const* const CHUNKS[] = {
+	"This is funny",
+	"but not hilarious.",
+	"bwv 1052"
+};
+
+struct chunk_req_state {
+	struct evhttp_request *req;
+	int i;
+};
+
+static void
+http_chunked_trickle_cb(int fd, short events, void *arg)
+{
+	struct evbuffer *evb = evbuffer_new();
+	struct chunk_req_state *state = arg;
+	struct timeval when = { 0, 0 };
+
+	evbuffer_add_printf(evb, "%s", CHUNKS[state->i]);
+	evhttp_send_reply_chunk(state->req, evb);
+	evbuffer_free(evb);
+
+	if (++state->i < sizeof(CHUNKS)/sizeof(CHUNKS[0])) {
+		event_once(-1, EV_TIMEOUT,
+		    http_chunked_trickle_cb, state, &when);
+	} else {
+		evhttp_send_reply_end(state->req);
+		free(state);
+	}
+}
+
+static void
+http_chunked_cb(struct evhttp_request *req, void *arg)
+{
+	struct timeval when = { 0, 0 };
+	struct chunk_req_state *state = malloc(sizeof(struct chunk_req_state));
+	event_debug(("%s: called\n", __func__));
+
+	memset(state, 0, sizeof(struct chunk_req_state));
+	state->req = req;
+
+	/* generate a chunked reply */
+	evhttp_send_reply_start(req, HTTP_OK, "Everything is fine");
+
+	/* but trickle it across several iterations to ensure we're not
+	 * assuming it comes all at once */
+	event_once(-1, EV_TIMEOUT, http_chunked_trickle_cb, state, &when);
+}
+
+static void
+http_complete_write(int fd, short what, void *arg)
+{
+	struct bufferevent *bev = arg;
+	const char *http_request = "host\r\n"
+	    "Connection: close\r\n"
+	    "\r\n";
+	bufferevent_write(bev, http_request, strlen(http_request));
+}
+
+static void
+http_basic_test(void)
+{
+	struct timeval tv;
+	struct bufferevent *bev;
+	int fd;
+	const char *http_request;
+	short port = -1;
+
+	test_ok = 0;
+	fprintf(stdout, "Testing Basic HTTP Server: ");
+
+	http = http_setup(&port, NULL);
+
+	/* bind to a second socket */
+	if (evhttp_bind_socket(http, "127.0.0.1", port + 1) == -1) {
+		fprintf(stdout, "FAILED (bind)\n");
+		exit(1);
+	}
+	
+	fd = http_connect("127.0.0.1", port);
+
+	/* Stupid thing to send a request */
+	bev = bufferevent_new(fd, http_readcb, http_writecb,
+	    http_errorcb, NULL);
+
+	/* first half of the http request */
+	http_request =
+	    "GET /test HTTP/1.1\r\n"
+	    "Host: some";
+
+	bufferevent_write(bev, http_request, strlen(http_request));
+	timerclear(&tv);
+	tv.tv_usec = 10000;
+	event_once(-1, EV_TIMEOUT, http_complete_write, bev, &tv);
+	
+	event_dispatch();
+
+	if (test_ok != 3) {
+		fprintf(stdout, "FAILED\n");
+		exit(1);
+	}
+
+	/* connect to the second port */
+	bufferevent_free(bev);
+	EVUTIL_CLOSESOCKET(fd);
+
+	fd = http_connect("127.0.0.1", port + 1);
+
+	/* Stupid thing to send a request */
+	bev = bufferevent_new(fd, http_readcb, http_writecb,
+	    http_errorcb, NULL);
+
+	http_request =
+	    "GET /test HTTP/1.1\r\n"
+	    "Host: somehost\r\n"
+	    "Connection: close\r\n"
+	    "\r\n";
+
+	bufferevent_write(bev, http_request, strlen(http_request));
+	
+	event_dispatch();
+
+	bufferevent_free(bev);
+	EVUTIL_CLOSESOCKET(fd);
+
+	evhttp_free(http);
+	
+	if (test_ok != 5) {
+		fprintf(stdout, "FAILED\n");
+		exit(1);
+	}
+
+	fprintf(stdout, "OK\n");
+}
+
+static void
+http_badreq_cb(struct evhttp_request *req, void *arg)
+{
+	struct evbuffer *buf = evbuffer_new();
+
+	evhttp_add_header(req->output_headers, "Content-Type", "text/xml; charset=UTF-8");
+	evbuffer_add_printf(buf, "Hello, %s!", "127.0.0.1");
+
+	evhttp_send_reply(req, HTTP_OK, "OK", buf);
+	evbuffer_free(buf);
+}
+
+static void
+http_badreq_errorcb(struct bufferevent *bev, short what, void *arg)
+{
+	event_debug(("%s: called (what=%04x, arg=%p)", __func__, what, arg));
+	/* ignore */
+}
+
+static void
+http_badreq_readcb(struct bufferevent *bev, void *arg)
+{
+	const char *what = "Hello, 127.0.0.1";
+	const char *bad_request = "400 Bad Request";
+
+	event_debug(("%s: %s\n", __func__, EVBUFFER_DATA(bev->input)));
+
+	if (evbuffer_find(bev->input,
+		(const unsigned char *) bad_request, strlen(bad_request)) != NULL) {
+		event_debug(("%s: bad request detected", __func__));
+		test_ok = -10;
+		bufferevent_disable(bev, EV_READ);
+		event_loopexit(NULL);
+		return;
+	}
+
+	if (evbuffer_find(bev->input,
+		(const unsigned char*) what, strlen(what)) != NULL) {
+		struct evhttp_request *req = evhttp_request_new(NULL, NULL);
+		enum message_read_status done;
+
+		req->kind = EVHTTP_RESPONSE;
+		done = evhttp_parse_firstline(req, bev->input);
+		if (done != ALL_DATA_READ)
+			goto out;
+
+		done = evhttp_parse_headers(req, bev->input);
+		if (done != ALL_DATA_READ)
+			goto out;
+
+		if (done == 1 &&
+		    evhttp_find_header(req->input_headers,
+			"Content-Type") != NULL)
+			test_ok++;
+
+	out:
+		evhttp_request_free(req);
+		evbuffer_drain(bev->input, EVBUFFER_LENGTH(bev->input));
+	}
+
+	shutdown(bev->ev_read.ev_fd, SHUT_WR);
+}
+
+static void
+http_badreq_successcb(int fd, short what, void *arg)
+{
+	event_debug(("%s: called (what=%04x, arg=%p)", __func__, what, arg));
+	event_loopexit(NULL);
+}
+
+static void
+http_bad_request(void)
+{
+	struct timeval tv;
+	struct bufferevent *bev;
+	int fd;
+	const char *http_request;
+	short port = -1;
+
+	test_ok = 0;
+	fprintf(stdout, "Testing \"Bad Request\" on connection close: ");
+
+	http = http_setup(&port, NULL);
+
+	/* bind to a second socket */
+	if (evhttp_bind_socket(http, "127.0.0.1", port + 1) == -1) {
+		fprintf(stdout, "FAILED (bind)\n");
+		exit(1);
+	}
+
+	/* NULL request test */
+	fd = http_connect("127.0.0.1", port);
+
+	/* Stupid thing to send a request */
+	bev = bufferevent_new(fd, http_badreq_readcb, http_writecb,
+	    http_badreq_errorcb, NULL);
+	bufferevent_enable(bev, EV_READ);
+
+	/* real NULL request */
+	http_request = "";
+
+	shutdown(fd, SHUT_WR);
+	timerclear(&tv);
+	tv.tv_usec = 10000;
+	event_once(-1, EV_TIMEOUT, http_badreq_successcb, bev, &tv);
+
+	event_dispatch();
+
+	bufferevent_free(bev);
+	EVUTIL_CLOSESOCKET(fd);
+
+	if (test_ok != 0) {
+		fprintf(stdout, "FAILED\n");
+		exit(1);
+	}
+
+	/* Second answer (BAD REQUEST) on connection close */
+
+	/* connect to the second port */
+	fd = http_connect("127.0.0.1", port + 1);
+
+	/* Stupid thing to send a request */
+	bev = bufferevent_new(fd, http_badreq_readcb, http_writecb,
+	    http_badreq_errorcb, NULL);
+	bufferevent_enable(bev, EV_READ);
+
+	/* first half of the http request */
+	http_request =
+		"GET /badrequest HTTP/1.0\r\n"	\
+		"Connection: Keep-Alive\r\n"	\
+		"\r\n";
+
+	bufferevent_write(bev, http_request, strlen(http_request));
+
+	timerclear(&tv);
+	tv.tv_usec = 10000;
+	event_once(-1, EV_TIMEOUT, http_badreq_successcb, bev, &tv);
+
+	event_dispatch();
+
+	evhttp_free(http);
+
+	if (test_ok != 2) {
+		fprintf(stdout, "FAILED\n");
+		exit(1);
+	}
+
+	fprintf(stdout, "OK\n");
+}
+static struct evhttp_connection *delayed_client;
+
+static void
+http_delay_reply(int fd, short what, void *arg)
+{
+	struct evhttp_request *req = arg;
+
+	evhttp_send_reply(req, HTTP_OK, "Everything is fine", NULL);
+
+	++test_ok;
+}
+
+static void
+http_large_delay_cb(struct evhttp_request *req, void *arg)
+{
+	struct timeval tv;
+	timerclear(&tv);
+	tv.tv_sec = 3;
+
+	event_once(-1, EV_TIMEOUT, http_delay_reply, req, &tv);
+
+	/* here we close the client connection which will cause an EOF */
+	evhttp_connection_fail(delayed_client, EVCON_HTTP_EOF);
+}
+
+void http_request_done(struct evhttp_request *, void *);
+void http_request_empty_done(struct evhttp_request *, void *);
+
+static void
+http_connection_test(int persistent)
+{
+	short port = -1;
+	struct evhttp_connection *evcon = NULL;
+	struct evhttp_request *req = NULL;
+	
+	test_ok = 0;
+	fprintf(stdout, "Testing Request Connection Pipeline %s: ",
+	    persistent ? "(persistent)" : "");
+
+	http = http_setup(&port, NULL);
+
+	evcon = evhttp_connection_new("127.0.0.1", port);
+	if (evcon == NULL) {
+		fprintf(stdout, "FAILED\n");
+		exit(1);
+	}
+
+	/*
+	 * At this point, we want to schedule a request to the HTTP
+	 * server using our make request method.
+	 */
+
+	req = evhttp_request_new(http_request_done, NULL);
+
+	/* Add the information that we care about */
+	evhttp_add_header(req->output_headers, "Host", "somehost");
+
+	/* We give ownership of the request to the connection */
+	if (evhttp_make_request(evcon, req, EVHTTP_REQ_GET, "/test") == -1) {
+		fprintf(stdout, "FAILED\n");
+		exit(1);
+	}
+
+	event_dispatch();
+
+	if (test_ok != 1) {
+		fprintf(stdout, "FAILED\n");
+		exit(1);
+	}
+
+	/* try to make another request over the same connection */
+	test_ok = 0;
+	
+	req = evhttp_request_new(http_request_done, NULL);
+
+	/* Add the information that we care about */
+	evhttp_add_header(req->output_headers, "Host", "somehost");
+
+	/* 
+	 * if our connections are not supposed to be persistent; request
+	 * a close from the server.
+	 */
+	if (!persistent)
+		evhttp_add_header(req->output_headers, "Connection", "close");
+
+	/* We give ownership of the request to the connection */
+	if (evhttp_make_request(evcon, req, EVHTTP_REQ_GET, "/test") == -1) {
+		fprintf(stdout, "FAILED\n");
+		exit(1);
+	}
+
+	event_dispatch();
+
+	/* make another request: request empty reply */
+	test_ok = 0;
+	
+	req = evhttp_request_new(http_request_empty_done, NULL);
+
+	/* Add the information that we care about */
+	evhttp_add_header(req->output_headers, "Empty", "itis");
+
+	/* We give ownership of the request to the connection */
+	if (evhttp_make_request(evcon, req, EVHTTP_REQ_GET, "/test") == -1) {
+		fprintf(stdout, "FAILED\n");
+		exit(1);
+	}
+
+	event_dispatch();
+
+	if (test_ok != 1) {
+		fprintf(stdout, "FAILED\n");
+		exit(1);
+	}
+
+	evhttp_connection_free(evcon);
+	evhttp_free(http);
+	
+	fprintf(stdout, "OK\n");
+}
+
+void
+http_request_done(struct evhttp_request *req, void *arg)
+{
+	const char *what = "This is funny";
+
+	if (req->response_code != HTTP_OK) {
+		fprintf(stderr, "FAILED\n");
+		exit(1);
+	}
+
+	if (evhttp_find_header(req->input_headers, "Content-Type") == NULL) {
+		fprintf(stderr, "FAILED\n");
+		exit(1);
+	}
+
+	if (EVBUFFER_LENGTH(req->input_buffer) != strlen(what)) {
+		fprintf(stderr, "FAILED\n");
+		exit(1);
+	}
+	
+	if (memcmp(EVBUFFER_DATA(req->input_buffer), what, strlen(what)) != 0) {
+		fprintf(stderr, "FAILED\n");
+		exit(1);
+	}
+
+	test_ok = 1;
+	event_loopexit(NULL);
+}
+
+/* test date header and content length */
+
+void
+http_request_empty_done(struct evhttp_request *req, void *arg)
+{
+	if (req->response_code != HTTP_OK) {
+		fprintf(stderr, "FAILED\n");
+		exit(1);
+	}
+
+	if (evhttp_find_header(req->input_headers, "Date") == NULL) {
+		fprintf(stderr, "FAILED\n");
+		exit(1);
+	}
+
+	
+	if (evhttp_find_header(req->input_headers, "Content-Length") == NULL) {
+		fprintf(stderr, "FAILED\n");
+		exit(1);
+	}
+
+	if (strcmp(evhttp_find_header(req->input_headers, "Content-Length"),
+		"0")) {
+		fprintf(stderr, "FAILED\n");
+		exit(1);
+	}
+
+	if (EVBUFFER_LENGTH(req->input_buffer) != 0) {
+		fprintf(stderr, "FAILED\n");
+		exit(1);
+	}
+
+	test_ok = 1;
+	event_loopexit(NULL);
+}
+
+/*
+ * HTTP DISPATCHER test
+ */
+
+void
+http_dispatcher_cb(struct evhttp_request *req, void *arg)
+{
+
+	struct evbuffer *evb = evbuffer_new();
+	event_debug(("%s: called\n", __func__));
+	evbuffer_add_printf(evb, "DISPATCHER_TEST");
+
+	evhttp_send_reply(req, HTTP_OK, "Everything is fine", evb);
+
+	evbuffer_free(evb);
+}
+
+static void
+http_dispatcher_test_done(struct evhttp_request *req, void *arg)
+{
+	const char *what = "DISPATCHER_TEST";
+
+	if (req->response_code != HTTP_OK) {
+		fprintf(stderr, "FAILED\n");
+		exit(1);
+	}
+
+	if (evhttp_find_header(req->input_headers, "Content-Type") == NULL) {
+		fprintf(stderr, "FAILED (content type)\n");
+		exit(1);
+	}
+
+	if (EVBUFFER_LENGTH(req->input_buffer) != strlen(what)) {
+		fprintf(stderr, "FAILED (length %zu vs %zu)\n",
+		    EVBUFFER_LENGTH(req->input_buffer), strlen(what));
+		exit(1);
+	}
+	
+	if (memcmp(EVBUFFER_DATA(req->input_buffer), what, strlen(what)) != 0) {
+		fprintf(stderr, "FAILED (data)\n");
+		exit(1);
+	}
+
+	test_ok = 1;
+	event_loopexit(NULL);
+}
+
+static void
+http_dispatcher_test(void)
+{
+	short port = -1;
+	struct evhttp_connection *evcon = NULL;
+	struct evhttp_request *req = NULL;
+
+	test_ok = 0;
+	fprintf(stdout, "Testing HTTP Dispatcher: ");
+
+	http = http_setup(&port, NULL);
+
+	evcon = evhttp_connection_new("127.0.0.1", port);
+	if (evcon == NULL) {
+		fprintf(stdout, "FAILED\n");
+		exit(1);
+	}
+
+	/* also bind to local host */
+	evhttp_connection_set_local_address(evcon, "127.0.0.1");
+
+	/*
+	 * At this point, we want to schedule an HTTP GET request
+	 * server using our make request method.
+	 */
+
+	req = evhttp_request_new(http_dispatcher_test_done, NULL);
+	if (req == NULL) {
+		fprintf(stdout, "FAILED\n");
+		exit(1);
+	}
+
+	/* Add the information that we care about */
+	evhttp_add_header(req->output_headers, "Host", "somehost");
+	
+	if (evhttp_make_request(evcon, req, EVHTTP_REQ_GET, "/?arg=val") == -1) {
+		fprintf(stdout, "FAILED\n");
+		exit(1);
+	}
+
+	event_dispatch();
+
+	evhttp_connection_free(evcon);
+	evhttp_free(http);
+	
+	if (test_ok != 1) {
+		fprintf(stdout, "FAILED: %d\n", test_ok);
+		exit(1);
+	}
+	
+	fprintf(stdout, "OK\n");
+}
+
+/*
+ * HTTP POST test.
+ */
+
+void http_postrequest_done(struct evhttp_request *, void *);
+
+#define POST_DATA "Okay.  Not really printf"
+
+static void
+http_post_test(void)
+{
+	short port = -1;
+	struct evhttp_connection *evcon = NULL;
+	struct evhttp_request *req = NULL;
+
+	test_ok = 0;
+	fprintf(stdout, "Testing HTTP POST Request: ");
+
+	http = http_setup(&port, NULL);
+
+	evcon = evhttp_connection_new("127.0.0.1", port);
+	if (evcon == NULL) {
+		fprintf(stdout, "FAILED\n");
+		exit(1);
+	}
+
+	/*
+	 * At this point, we want to schedule an HTTP POST request
+	 * server using our make request method.
+	 */
+
+	req = evhttp_request_new(http_postrequest_done, NULL);
+	if (req == NULL) {
+		fprintf(stdout, "FAILED\n");
+		exit(1);
+	}
+
+	/* Add the information that we care about */
+	evhttp_add_header(req->output_headers, "Host", "somehost");
+	evbuffer_add_printf(req->output_buffer, POST_DATA);
+	
+	if (evhttp_make_request(evcon, req, EVHTTP_REQ_POST, "/postit") == -1) {
+		fprintf(stdout, "FAILED\n");
+		exit(1);
+	}
+
+	event_dispatch();
+
+	evhttp_connection_free(evcon);
+	evhttp_free(http);
+	
+	if (test_ok != 1) {
+		fprintf(stdout, "FAILED: %d\n", test_ok);
+		exit(1);
+	}
+	
+	fprintf(stdout, "OK\n");
+}
+
+void
+http_post_cb(struct evhttp_request *req, void *arg)
+{
+	struct evbuffer *evb;
+	event_debug(("%s: called\n", __func__));
+
+	/* Yes, we are expecting a post request */
+	if (req->type != EVHTTP_REQ_POST) {
+		fprintf(stdout, "FAILED (post type)\n");
+		exit(1);
+	}
+
+	if (EVBUFFER_LENGTH(req->input_buffer) != strlen(POST_DATA)) {
+		fprintf(stdout, "FAILED (length: %zu vs %zu)\n",
+		    EVBUFFER_LENGTH(req->input_buffer), strlen(POST_DATA));
+		exit(1);
+	}
+
+	if (memcmp(EVBUFFER_DATA(req->input_buffer), POST_DATA,
+		strlen(POST_DATA))) {
+		fprintf(stdout, "FAILED (data)\n");
+		fprintf(stdout, "Got :%s\n", EVBUFFER_DATA(req->input_buffer));
+		fprintf(stdout, "Want:%s\n", POST_DATA);
+		exit(1);
+	}
+	
+	evb = evbuffer_new();
+	evbuffer_add_printf(evb, "This is funny");
+
+	evhttp_send_reply(req, HTTP_OK, "Everything is fine", evb);
+
+	evbuffer_free(evb);
+}
+
+void
+http_postrequest_done(struct evhttp_request *req, void *arg)
+{
+	const char *what = "This is funny";
+
+	if (req == NULL) {
+		fprintf(stderr, "FAILED (timeout)\n");
+		exit(1);
+	}
+
+	if (req->response_code != HTTP_OK) {
+	
+		fprintf(stderr, "FAILED (response code)\n");
+		exit(1);
+	}
+
+	if (evhttp_find_header(req->input_headers, "Content-Type") == NULL) {
+		fprintf(stderr, "FAILED (content type)\n");
+		exit(1);
+	}
+
+	if (EVBUFFER_LENGTH(req->input_buffer) != strlen(what)) {
+		fprintf(stderr, "FAILED (length %zu vs %zu)\n",
+		    EVBUFFER_LENGTH(req->input_buffer), strlen(what));
+		exit(1);
+	}
+	
+	if (memcmp(EVBUFFER_DATA(req->input_buffer), what, strlen(what)) != 0) {
+		fprintf(stderr, "FAILED (data)\n");
+		exit(1);
+	}
+
+	test_ok = 1;
+	event_loopexit(NULL);
+}
+
+static void
+http_failure_readcb(struct bufferevent *bev, void *arg)
+{
+	const char *what = "400 Bad Request";
+	if (evbuffer_find(bev->input, (const unsigned char*) what, strlen(what)) != NULL) {
+		test_ok = 2;
+		bufferevent_disable(bev, EV_READ);
+		event_loopexit(NULL);
+	}
+}
+
+/*
+ * Testing that the HTTP server can deal with a malformed request.
+ */
+static void
+http_failure_test(void)
+{
+	struct bufferevent *bev;
+	int fd;
+	const char *http_request;
+	short port = -1;
+
+	test_ok = 0;
+	fprintf(stdout, "Testing Bad HTTP Request: ");
+
+	http = http_setup(&port, NULL);
+	
+	fd = http_connect("127.0.0.1", port);
+
+	/* Stupid thing to send a request */
+	bev = bufferevent_new(fd, http_failure_readcb, http_writecb,
+	    http_errorcb, NULL);
+
+	http_request = "illegal request\r\n";
+
+	bufferevent_write(bev, http_request, strlen(http_request));
+	
+	event_dispatch();
+
+	bufferevent_free(bev);
+	EVUTIL_CLOSESOCKET(fd);
+
+	evhttp_free(http);
+	
+	if (test_ok != 2) {
+		fprintf(stdout, "FAILED\n");
+		exit(1);
+	}
+	
+	fprintf(stdout, "OK\n");
+}
+
+static void
+close_detect_done(struct evhttp_request *req, void *arg)
+{
+	struct timeval tv;
+	if (req == NULL || req->response_code != HTTP_OK) {
+	
+		fprintf(stderr, "FAILED\n");
+		exit(1);
+	}
+
+	test_ok = 1;
+
+	timerclear(&tv);
+	tv.tv_sec = 3;   /* longer than the http time out */
+
+	event_loopexit(&tv);
+}
+
+static void
+close_detect_launch(int fd, short what, void *arg)
+{
+	struct evhttp_connection *evcon = arg;
+	struct evhttp_request *req;
+
+	req = evhttp_request_new(close_detect_done, NULL);
+
+	/* Add the information that we care about */
+	evhttp_add_header(req->output_headers, "Host", "somehost");
+
+	/* We give ownership of the request to the connection */
+	if (evhttp_make_request(evcon, req, EVHTTP_REQ_GET, "/test") == -1) {
+		fprintf(stdout, "FAILED\n");
+		exit(1);
+	}
+}
+
+static void
+close_detect_cb(struct evhttp_request *req, void *arg)
+{
+	struct evhttp_connection *evcon = arg;
+	struct timeval tv;
+
+	if (req != NULL && req->response_code != HTTP_OK) {
+	
+		fprintf(stderr, "FAILED\n");
+		exit(1);
+	}
+
+	timerclear(&tv);
+	tv.tv_sec = 3;   /* longer than the http time out */
+
+	/* launch a new request on the persistent connection in 6 seconds */
+	event_once(-1, EV_TIMEOUT, close_detect_launch, evcon, &tv);
+}
+
+
+static void
+http_close_detection(int with_delay)
+{
+	short port = -1;
+	struct evhttp_connection *evcon = NULL;
+	struct evhttp_request *req = NULL;
+	
+	test_ok = 0;
+	fprintf(stdout, "Testing Connection Close Detection%s: ",
+		with_delay ? " (with delay)" : "");
+
+	http = http_setup(&port, NULL);
+
+	/* 2 second timeout */
+	evhttp_set_timeout(http, 2);
+
+	evcon = evhttp_connection_new("127.0.0.1", port);
+	if (evcon == NULL) {
+		fprintf(stdout, "FAILED\n");
+		exit(1);
+	}
+
+	delayed_client = evcon;
+
+	/*
+	 * At this point, we want to schedule a request to the HTTP
+	 * server using our make request method.
+	 */
+
+	req = evhttp_request_new(close_detect_cb, evcon);
+
+	/* Add the information that we care about */
+	evhttp_add_header(req->output_headers, "Host", "somehost");
+
+	/* We give ownership of the request to the connection */
+	if (evhttp_make_request(evcon,
+	    req, EVHTTP_REQ_GET, with_delay ? "/largedelay" : "/test") == -1) {
+		fprintf(stdout, "FAILED\n");
+		exit(1);
+	}
+
+	event_dispatch();
+
+	if (test_ok != 1) {
+		fprintf(stdout, "FAILED\n");
+		exit(1);
+	}
+
+	/* at this point, the http server should have no connection */
+	if (TAILQ_FIRST(&http->connections) != NULL) {
+		fprintf(stdout, "FAILED (left connections)\n");
+		exit(1);
+	}
+
+	evhttp_connection_free(evcon);
+	evhttp_free(http);
+	
+	fprintf(stdout, "OK\n");
+}
+
+static void
+http_highport_test(void)
+{
+	int i = -1;
+	struct evhttp *myhttp = NULL;
+ 
+	fprintf(stdout, "Testing HTTP Server with high port: ");
+
+	/* Try a few different ports */
+	for (i = 0; i < 50; ++i) {
+		myhttp = evhttp_start("127.0.0.1", 65535 - i);
+		if (myhttp != NULL) {
+			fprintf(stdout, "OK\n");
+			evhttp_free(myhttp);
+			return;
+		}
+	}
+
+	fprintf(stdout, "FAILED\n");
+	exit(1);
+}
+
+static void
+http_bad_header_test(void)
+{
+	struct evkeyvalq headers;
+
+	fprintf(stdout, "Testing HTTP Header filtering: ");
+
+	TAILQ_INIT(&headers);
+
+	if (evhttp_add_header(&headers, "One", "Two") != 0)
+		goto fail;
+	
+	if (evhttp_add_header(&headers, "One\r", "Two") != -1)
+		goto fail;
+	if (evhttp_add_header(&headers, "One", "Two") != 0)
+		goto fail;
+	if (evhttp_add_header(&headers, "One", "Two\r\n Three") != 0)
+		goto fail;
+	if (evhttp_add_header(&headers, "One\r", "Two") != -1)
+		goto fail;
+	if (evhttp_add_header(&headers, "One\n", "Two") != -1)
+		goto fail;
+	if (evhttp_add_header(&headers, "One", "Two\r") != -1)
+		goto fail;
+	if (evhttp_add_header(&headers, "One", "Two\n") != -1)
+		goto fail;
+
+	evhttp_clear_headers(&headers);
+
+	fprintf(stdout, "OK\n");
+	return;
+fail:
+	fprintf(stdout, "FAILED\n");
+	exit(1);
+}
+
+static int validate_header(
+	const struct evkeyvalq* headers,
+	const char *key, const char *value) 
+{
+	const char *real_val = evhttp_find_header(headers, key);
+	if (real_val == NULL)
+		return (-1);
+	if (strcmp(real_val, value) != 0)
+		return (-1);
+	return (0);
+}
+
+static void
+http_parse_query_test(void)
+{
+	struct evkeyvalq headers;
+
+	fprintf(stdout, "Testing HTTP query parsing: ");
+
+	TAILQ_INIT(&headers);
+	
+	evhttp_parse_query("http://www.test.com/?q=test", &headers);
+	if (validate_header(&headers, "q", "test") != 0)
+		goto fail;
+	evhttp_clear_headers(&headers);
+
+	evhttp_parse_query("http://www.test.com/?q=test&foo=bar", &headers);
+	if (validate_header(&headers, "q", "test") != 0)
+		goto fail;
+	if (validate_header(&headers, "foo", "bar") != 0)
+		goto fail;
+	evhttp_clear_headers(&headers);
+
+	evhttp_parse_query("http://www.test.com/?q=test+foo", &headers);
+	if (validate_header(&headers, "q", "test foo") != 0)
+		goto fail;
+	evhttp_clear_headers(&headers);
+
+	evhttp_parse_query("http://www.test.com/?q=test%0Afoo", &headers);
+	if (validate_header(&headers, "q", "test\nfoo") != 0)
+		goto fail;
+	evhttp_clear_headers(&headers);
+
+	evhttp_parse_query("http://www.test.com/?q=test%0Dfoo", &headers);
+	if (validate_header(&headers, "q", "test\rfoo") != 0)
+		goto fail;
+	evhttp_clear_headers(&headers);
+
+	fprintf(stdout, "OK\n");
+	return;
+fail:
+	fprintf(stdout, "FAILED\n");
+	exit(1);
+}
+
+static void
+http_base_test(void)
+{
+	struct bufferevent *bev;
+	int fd;
+	const char *http_request;
+	short port = -1;
+
+	test_ok = 0;
+	fprintf(stdout, "Testing HTTP Server Event Base: ");
+
+	base = event_init();
+
+	/* 
+	 * create another bogus base - which is being used by all subsequen
+	 * tests - yuck!
+	 */
+	event_init();
+
+	http = http_setup(&port, base);
+	
+	fd = http_connect("127.0.0.1", port);
+
+	/* Stupid thing to send a request */
+	bev = bufferevent_new(fd, http_readcb, http_writecb,
+	    http_errorcb, NULL);
+	bufferevent_base_set(base, bev);
+
+	http_request =
+	    "GET /test HTTP/1.1\r\n"
+	    "Host: somehost\r\n"
+	    "Connection: close\r\n"
+	    "\r\n";
+
+	bufferevent_write(bev, http_request, strlen(http_request));
+	
+	event_base_dispatch(base);
+
+	bufferevent_free(bev);
+	EVUTIL_CLOSESOCKET(fd);
+
+	evhttp_free(http);
+
+	event_base_free(base);
+	base = NULL;
+	
+	if (test_ok != 2) {
+		fprintf(stdout, "FAILED\n");
+		exit(1);
+	}
+	
+	fprintf(stdout, "OK\n");
+}
+
+/*
+ * the server is going to reply with chunked data.
+ */
+
+static void
+http_chunked_readcb(struct bufferevent *bev, void *arg)
+{
+	/* nothing here */
+}
+
+static void
+http_chunked_errorcb(struct bufferevent *bev, short what, void *arg)
+{
+	if (!test_ok)
+		goto out;
+
+	test_ok = -1;
+
+	if ((what & EVBUFFER_EOF) != 0) {
+		struct evhttp_request *req = evhttp_request_new(NULL, NULL);
+		const char *header;
+		enum message_read_status done;
+		
+		req->kind = EVHTTP_RESPONSE;
+		done = evhttp_parse_firstline(req, EVBUFFER_INPUT(bev));
+		if (done != ALL_DATA_READ)
+			goto out;
+
+		done = evhttp_parse_headers(req, EVBUFFER_INPUT(bev));
+		if (done != ALL_DATA_READ)
+			goto out;
+
+		header = evhttp_find_header(req->input_headers, "Transfer-Encoding");
+		if (header == NULL || strcmp(header, "chunked"))
+			goto out;
+
+		header = evhttp_find_header(req->input_headers, "Connection");
+		if (header == NULL || strcmp(header, "close"))
+			goto out;
+
+		header = evbuffer_readline(EVBUFFER_INPUT(bev));
+		if (header == NULL)
+			goto out;
+		/* 13 chars */
+		if (strcmp(header, "d"))
+			goto out;
+		free((char*)header);
+
+		if (strncmp((char *)EVBUFFER_DATA(EVBUFFER_INPUT(bev)),
+			"This is funny", 13))
+			goto out;
+
+		evbuffer_drain(EVBUFFER_INPUT(bev), 13 + 2);
+
+		header = evbuffer_readline(EVBUFFER_INPUT(bev));
+		if (header == NULL)
+			goto out;
+		/* 18 chars */
+		if (strcmp(header, "12"))
+			goto out;
+		free((char *)header);
+
+		if (strncmp((char *)EVBUFFER_DATA(EVBUFFER_INPUT(bev)),
+			"but not hilarious.", 18))
+			goto out;
+
+		evbuffer_drain(EVBUFFER_INPUT(bev), 18 + 2);
+
+		header = evbuffer_readline(EVBUFFER_INPUT(bev));
+		if (header == NULL)
+			goto out;
+		/* 8 chars */
+		if (strcmp(header, "8"))
+			goto out;
+		free((char *)header);
+
+		if (strncmp((char *)EVBUFFER_DATA(EVBUFFER_INPUT(bev)),
+			"bwv 1052.", 8))
+			goto out;
+
+		evbuffer_drain(EVBUFFER_INPUT(bev), 8 + 2);
+
+		header = evbuffer_readline(EVBUFFER_INPUT(bev));
+		if (header == NULL)
+			goto out;
+		/* 0 chars */
+		if (strcmp(header, "0"))
+			goto out;
+		free((char *)header);
+
+		test_ok = 2;
+	}
+
+out:
+	event_loopexit(NULL);
+}
+
+static void
+http_chunked_writecb(struct bufferevent *bev, void *arg)
+{
+	if (EVBUFFER_LENGTH(EVBUFFER_OUTPUT(bev)) == 0) {
+		/* enable reading of the reply */
+		bufferevent_enable(bev, EV_READ);
+		test_ok++;
+	}
+}
+
+static void
+http_chunked_request_done(struct evhttp_request *req, void *arg)
+{
+	if (req->response_code != HTTP_OK) {
+		fprintf(stderr, "FAILED\n");
+		exit(1);
+	}
+
+	if (evhttp_find_header(req->input_headers,
+		"Transfer-Encoding") == NULL) {
+		fprintf(stderr, "FAILED\n");
+		exit(1);
+	}
+
+	if (EVBUFFER_LENGTH(req->input_buffer) != 13 + 18 + 8) {
+		fprintf(stderr, "FAILED\n");
+		exit(1);
+	}
+
+	if (strncmp((char *)EVBUFFER_DATA(req->input_buffer),
+		"This is funnybut not hilarious.bwv 1052",
+		13 + 18 + 8)) {
+		fprintf(stderr, "FAILED\n");
+		exit(1);
+	}
+	
+	test_ok = 1;
+	event_loopexit(NULL);
+}
+
+static void
+http_chunked_test(void)
+{
+	struct bufferevent *bev;
+	int fd;
+	const char *http_request;
+	short port = -1;
+	struct timeval tv_start, tv_end;
+	struct evhttp_connection *evcon = NULL;
+	struct evhttp_request *req = NULL;
+	int i;
+
+	test_ok = 0;
+	fprintf(stdout, "Testing Chunked HTTP Reply: ");
+
+	http = http_setup(&port, NULL);
+
+	fd = http_connect("127.0.0.1", port);
+
+	/* Stupid thing to send a request */
+	bev = bufferevent_new(fd, 
+	    http_chunked_readcb, http_chunked_writecb,
+	    http_chunked_errorcb, NULL);
+
+	http_request =
+	    "GET /chunked HTTP/1.1\r\n"
+	    "Host: somehost\r\n"
+	    "Connection: close\r\n"
+	    "\r\n";
+
+	bufferevent_write(bev, http_request, strlen(http_request));
+
+	evutil_gettimeofday(&tv_start, NULL);
+	
+	event_dispatch();
+
+	evutil_gettimeofday(&tv_end, NULL);
+	evutil_timersub(&tv_end, &tv_start, &tv_end);
+
+	if (tv_end.tv_sec >= 1) {
+		fprintf(stdout, "FAILED (time)\n");
+		exit (1);
+	}
+
+
+	if (test_ok != 2) {
+		fprintf(stdout, "FAILED\n");
+		exit(1);
+	}
+
+	/* now try again with the regular connection object */
+	evcon = evhttp_connection_new("127.0.0.1", port);
+	if (evcon == NULL) {
+		fprintf(stdout, "FAILED\n");
+		exit(1);
+	}
+
+	/* make two requests to check the keepalive behavior */
+	for (i = 0; i < 2; i++) {
+		test_ok = 0;
+		req = evhttp_request_new(http_chunked_request_done, NULL);
+
+		/* Add the information that we care about */
+		evhttp_add_header(req->output_headers, "Host", "somehost");
+
+		/* We give ownership of the request to the connection */
+		if (evhttp_make_request(evcon, req,
+			EVHTTP_REQ_GET, "/chunked") == -1) {
+			fprintf(stdout, "FAILED\n");
+			exit(1);
+		}
+
+		event_dispatch();
+
+		if (test_ok != 1) {
+			fprintf(stdout, "FAILED\n");
+			exit(1);
+		}
+	}
+
+	evhttp_connection_free(evcon);
+	evhttp_free(http);
+	
+	fprintf(stdout, "OK\n");
+}
+
+static void
+http_multi_line_header_test(void)
+{
+	struct bufferevent *bev;
+	int fd;
+	const char *http_start_request;
+	short port = -1;
+	
+	test_ok = 0;
+	fprintf(stdout, "Testing HTTP Server with multi line: ");
+
+	http = http_setup(&port, NULL);
+	
+	fd = http_connect("127.0.0.1", port);
+
+	/* Stupid thing to send a request */
+	bev = bufferevent_new(fd, http_readcb, http_writecb,
+	    http_errorcb, NULL);
+
+	http_start_request =
+	    "GET /test HTTP/1.1\r\n"
+	    "Host: somehost\r\n"
+	    "Connection: close\r\n"
+	    "X-Multi:  aaaaaaaa\r\n"
+	    " a\r\n"
+	    "\tEND\r\n"
+	    "X-Last: last\r\n"
+	    "\r\n";
+		
+	bufferevent_write(bev, http_start_request, strlen(http_start_request));
+
+	event_dispatch();
+	
+	bufferevent_free(bev);
+	EVUTIL_CLOSESOCKET(fd);
+
+	evhttp_free(http);
+
+	if (test_ok != 4) {
+		fprintf(stdout, "FAILED\n");
+		exit(1);
+	}
+	
+	fprintf(stdout, "OK\n");
+}
+
+static void
+http_request_bad(struct evhttp_request *req, void *arg)
+{
+	if (req != NULL) {
+		fprintf(stderr, "FAILED\n");
+		exit(1);
+	}
+
+	test_ok = 1;
+	event_loopexit(NULL);
+}
+
+static void
+http_negative_content_length_test(void)
+{
+	short port = -1;
+	struct evhttp_connection *evcon = NULL;
+	struct evhttp_request *req = NULL;
+	
+	test_ok = 0;
+	fprintf(stdout, "Testing HTTP Negative Content Length: ");
+
+	http = http_setup(&port, NULL);
+
+	evcon = evhttp_connection_new("127.0.0.1", port);
+	if (evcon == NULL) {
+		fprintf(stdout, "FAILED\n");
+		exit(1);
+	}
+
+	/*
+	 * At this point, we want to schedule a request to the HTTP
+	 * server using our make request method.
+	 */
+
+	req = evhttp_request_new(http_request_bad, NULL);
+
+	/* Cause the response to have a negative content-length */
+	evhttp_add_header(req->output_headers, "X-Negative", "makeitso");
+
+	/* We give ownership of the request to the connection */
+	if (evhttp_make_request(evcon, req, EVHTTP_REQ_GET, "/test") == -1) {
+		fprintf(stdout, "FAILED\n");
+		exit(1);
+	}
+
+	event_dispatch();
+
+	evhttp_free(http);
+
+	if (test_ok != 1) {
+		fprintf(stdout, "FAILED\n");
+		exit(1);
+	}
+
+	fprintf(stdout, "OK\n");
+}
+
+/*
+ * Testing client reset of server chunked connections
+ */
+
+struct terminate_state {
+	struct evhttp_request *req;
+	struct bufferevent *bev;
+	int fd;
+} terminate_state;
+
+static void
+terminate_chunked_trickle_cb(int fd, short events, void *arg)
+{
+	struct terminate_state *state = arg;
+	struct evbuffer *evb = evbuffer_new();
+	struct timeval tv;
+
+	if (evhttp_request_get_connection(state->req) == NULL) {
+		test_ok = 1;
+		evhttp_request_free(state->req);
+		event_loopexit(NULL);
+		return;
+	}
+
+	evbuffer_add_printf(evb, "%p", evb);
+	evhttp_send_reply_chunk(state->req, evb);
+	evbuffer_free(evb);
+
+	tv.tv_sec = 0;
+	tv.tv_usec = 3000;
+	event_once(-1, EV_TIMEOUT, terminate_chunked_trickle_cb, arg, &tv);
+}
+
+static void
+terminate_chunked_cb(struct evhttp_request *req, void *arg)
+{
+	struct terminate_state *state = arg;
+	struct timeval tv;
+
+	state->req = req;
+
+	evhttp_send_reply_start(req, HTTP_OK, "OK");
+
+	tv.tv_sec = 0;
+	tv.tv_usec = 3000;
+	event_once(-1, EV_TIMEOUT, terminate_chunked_trickle_cb, arg, &tv);
+}
+
+static void
+terminate_chunked_client(int fd, short event, void *arg)
+{
+	struct terminate_state *state = arg;
+	bufferevent_free(state->bev);
+	EVUTIL_CLOSESOCKET(state->fd);
+}
+
+static void
+terminate_readcb(struct bufferevent *bev, void *arg)
+{
+	/* just drop the data */
+	evbuffer_drain(bev->output, -1);
+}
+
+
+static void
+http_terminate_chunked_test(void)
+{
+	struct bufferevent *bev = NULL;
+	struct timeval tv;
+	const char *http_request;
+	short port = -1;
+	int fd = -1;
+
+	test_ok = 0;
+	fprintf(stdout, "Testing Terminated Chunked Connection: ");
+
+	http = http_setup(&port, NULL);
+	evhttp_del_cb(http, "/test");
+	evhttp_set_cb(http, "/test", terminate_chunked_cb, &terminate_state);
+
+	fd = http_connect("127.0.0.1", port);
+
+	/* Stupid thing to send a request */
+	bev = bufferevent_new(fd, terminate_readcb, http_writecb,
+	    http_errorcb, NULL);
+
+	terminate_state.fd = fd;
+	terminate_state.bev = bev;
+
+	/* first half of the http request */
+	http_request =
+	    "GET /test HTTP/1.1\r\n"
+	    "Host: some\r\n\r\n";
+
+	bufferevent_write(bev, http_request, strlen(http_request));
+	evutil_timerclear(&tv);
+	tv.tv_usec = 10000;
+	event_once(-1, EV_TIMEOUT, terminate_chunked_client, &terminate_state,
+	    &tv);
+
+	event_dispatch();
+
+	if (test_ok != 1) {
+		fprintf(stdout, "FAILED\n");
+		exit(1);
+	}
+
+	fprintf(stdout, "OK\n");
+
+	if (fd >= 0)
+		EVUTIL_CLOSESOCKET(fd);
+	if (http)
+		evhttp_free(http);
+}
+
+void
+http_suite(void)
+{
+	http_base_test();
+	http_bad_header_test();
+	http_parse_query_test();
+	http_basic_test();
+	http_connection_test(0 /* not-persistent */);
+	http_connection_test(1 /* persistent */);
+	http_close_detection(0 /* without delay */);
+	http_close_detection(1 /* with delay */);
+	http_bad_request();
+	http_post_test();
+	http_failure_test();
+	http_highport_test();
+	http_dispatcher_test();
+
+	http_multi_line_header_test();
+	http_negative_content_length_test();
+
+	http_chunked_test();
+	http_terminate_chunked_test();
+}
diff --git a/base/third_party/libevent/test/regress_rpc.c b/base/third_party/libevent/test/regress_rpc.c
new file mode 100644
index 0000000..7609347
--- /dev/null
+++ b/base/third_party/libevent/test/regress_rpc.c
@@ -0,0 +1,631 @@
+/*
+ * Copyright (c) 2003-2006 Niels Provos <provos@citi.umich.edu>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ *    derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifdef WIN32
+#include <winsock2.h>
+#include <windows.h>
+#endif
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include <sys/types.h>
+#include <sys/stat.h>
+#ifdef HAVE_SYS_TIME_H
+#include <sys/time.h>
+#endif
+#include <sys/queue.h>
+#ifndef WIN32
+#include <sys/socket.h>
+#include <signal.h>
+#include <unistd.h>
+#include <netdb.h>
+#endif
+#include <fcntl.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include <errno.h>
+#include <assert.h>
+
+#include "event.h"
+#include "evhttp.h"
+#include "log.h"
+#include "evrpc.h"
+
+#include "regress.gen.h"
+
+void rpc_suite(void);
+
+extern int test_ok;
+
+static struct evhttp *
+http_setup(short *pport)
+{
+	int i;
+	struct evhttp *myhttp;
+	short port = -1;
+
+	/* Try a few different ports */
+	for (i = 0; i < 50; ++i) {
+		myhttp = evhttp_start("127.0.0.1", 8080 + i);
+		if (myhttp != NULL) {
+			port = 8080 + i;
+			break;
+		}
+	}
+
+	if (port == -1)
+		event_errx(1, "Could not start web server");
+
+	*pport = port;
+	return (myhttp);
+}
+
+EVRPC_HEADER(Message, msg, kill);
+EVRPC_HEADER(NeverReply, msg, kill);
+
+EVRPC_GENERATE(Message, msg, kill);
+EVRPC_GENERATE(NeverReply, msg, kill);
+
+static int need_input_hook = 0;
+static int need_output_hook = 0;
+
+static void
+MessageCb(EVRPC_STRUCT(Message)* rpc, void *arg)
+{
+	struct kill* kill_reply = rpc->reply;
+
+	if (need_input_hook) {
+		struct evhttp_request* req = EVRPC_REQUEST_HTTP(rpc);
+		const char *header = evhttp_find_header(
+			req->input_headers, "X-Hook");
+		assert(strcmp(header, "input") == 0);
+	}
+
+	/* we just want to fill in some non-sense */
+	EVTAG_ASSIGN(kill_reply, weapon, "dagger");
+	EVTAG_ASSIGN(kill_reply, action, "wave around like an idiot");
+
+	/* no reply to the RPC */
+	EVRPC_REQUEST_DONE(rpc);
+}
+
+static EVRPC_STRUCT(NeverReply) *saved_rpc;
+
+static void
+NeverReplyCb(EVRPC_STRUCT(NeverReply)* rpc, void *arg)
+{
+	test_ok += 1;
+	saved_rpc = rpc;
+}
+
+static void
+rpc_setup(struct evhttp **phttp, short *pport, struct evrpc_base **pbase)
+{
+	short port;
+	struct evhttp *http = NULL;
+	struct evrpc_base *base = NULL;
+
+	http = http_setup(&port);
+	base = evrpc_init(http);
+	
+	EVRPC_REGISTER(base, Message, msg, kill, MessageCb, NULL);
+	EVRPC_REGISTER(base, NeverReply, msg, kill, NeverReplyCb, NULL);
+
+	*phttp = http;
+	*pport = port;
+	*pbase = base;
+
+	need_input_hook = 0;
+	need_output_hook = 0;
+}
+
+static void
+rpc_teardown(struct evrpc_base *base)
+{
+	assert(EVRPC_UNREGISTER(base, Message) == 0);
+	assert(EVRPC_UNREGISTER(base, NeverReply) == 0);
+
+	evrpc_free(base);
+}
+
+static void
+rpc_postrequest_failure(struct evhttp_request *req, void *arg)
+{
+	if (req->response_code != HTTP_SERVUNAVAIL) {
+	
+		fprintf(stderr, "FAILED (response code)\n");
+		exit(1);
+	}
+
+	test_ok = 1;
+	event_loopexit(NULL);
+}
+
+/*
+ * Test a malformed payload submitted as an RPC
+ */
+
+static void
+rpc_basic_test(void)
+{
+	short port;
+	struct evhttp *http = NULL;
+	struct evrpc_base *base = NULL;
+	struct evhttp_connection *evcon = NULL;
+	struct evhttp_request *req = NULL;
+
+	fprintf(stdout, "Testing Basic RPC Support: ");
+
+	rpc_setup(&http, &port, &base);
+
+	evcon = evhttp_connection_new("127.0.0.1", port);
+	if (evcon == NULL) {
+		fprintf(stdout, "FAILED\n");
+		exit(1);
+	}
+
+	/*
+	 * At this point, we want to schedule an HTTP POST request
+	 * server using our make request method.
+	 */
+
+	req = evhttp_request_new(rpc_postrequest_failure, NULL);
+	if (req == NULL) {
+		fprintf(stdout, "FAILED\n");
+		exit(1);
+	}
+
+	/* Add the information that we care about */
+	evhttp_add_header(req->output_headers, "Host", "somehost");
+	evbuffer_add_printf(req->output_buffer, "Some Nonsense");
+	
+	if (evhttp_make_request(evcon, req,
+		EVHTTP_REQ_POST,
+		"/.rpc.Message") == -1) {
+		fprintf(stdout, "FAILED\n");
+		exit(1);
+	}
+
+	test_ok = 0;
+
+	event_dispatch();
+
+	evhttp_connection_free(evcon);
+
+	rpc_teardown(base);
+	
+	if (test_ok != 1) {
+		fprintf(stdout, "FAILED\n");
+		exit(1);
+	}
+
+	fprintf(stdout, "OK\n");
+
+	evhttp_free(http);
+}
+
+static void
+rpc_postrequest_done(struct evhttp_request *req, void *arg)
+{
+	struct kill* kill_reply = NULL;
+
+	if (req->response_code != HTTP_OK) {
+	
+		fprintf(stderr, "FAILED (response code)\n");
+		exit(1);
+	}
+
+	kill_reply = kill_new();
+
+	if ((kill_unmarshal(kill_reply, req->input_buffer)) == -1) {
+		fprintf(stderr, "FAILED (unmarshal)\n");
+		exit(1);
+	}
+	
+	kill_free(kill_reply);
+
+	test_ok = 1;
+	event_loopexit(NULL);
+}
+
+static void
+rpc_basic_message(void)
+{
+	short port;
+	struct evhttp *http = NULL;
+	struct evrpc_base *base = NULL;
+	struct evhttp_connection *evcon = NULL;
+	struct evhttp_request *req = NULL;
+	struct msg *msg;
+
+	fprintf(stdout, "Testing Good RPC Post: ");
+
+	rpc_setup(&http, &port, &base);
+
+	evcon = evhttp_connection_new("127.0.0.1", port);
+	if (evcon == NULL) {
+		fprintf(stdout, "FAILED\n");
+		exit(1);
+	}
+
+	/*
+	 * At this point, we want to schedule an HTTP POST request
+	 * server using our make request method.
+	 */
+
+	req = evhttp_request_new(rpc_postrequest_done, NULL);
+	if (req == NULL) {
+		fprintf(stdout, "FAILED\n");
+		exit(1);
+	}
+
+	/* Add the information that we care about */
+	evhttp_add_header(req->output_headers, "Host", "somehost");
+
+	/* set up the basic message */
+	msg = msg_new();
+	EVTAG_ASSIGN(msg, from_name, "niels");
+	EVTAG_ASSIGN(msg, to_name, "tester");
+	msg_marshal(req->output_buffer, msg);
+	msg_free(msg);
+
+	if (evhttp_make_request(evcon, req,
+		EVHTTP_REQ_POST,
+		"/.rpc.Message") == -1) {
+		fprintf(stdout, "FAILED\n");
+		exit(1);
+	}
+
+	test_ok = 0;
+
+	event_dispatch();
+
+	evhttp_connection_free(evcon);
+	
+	rpc_teardown(base);
+	
+	if (test_ok != 1) {
+		fprintf(stdout, "FAILED\n");
+		exit(1);
+	}
+
+	fprintf(stdout, "OK\n");
+
+	evhttp_free(http);
+}
+
+static struct evrpc_pool *
+rpc_pool_with_connection(short port)
+{
+	struct evhttp_connection *evcon;
+	struct evrpc_pool *pool;
+
+	pool = evrpc_pool_new(NULL);
+	assert(pool != NULL);
+
+	evcon = evhttp_connection_new("127.0.0.1", port);
+	assert(evcon != NULL);
+
+	evrpc_pool_add_connection(pool, evcon);
+	
+	return (pool);
+}
+
+static void
+GotKillCb(struct evrpc_status *status,
+    struct msg *msg, struct kill *kill, void *arg)
+{
+	char *weapon;
+	char *action;
+
+	if (need_output_hook) {
+		struct evhttp_request *req = status->http_req;
+		const char *header = evhttp_find_header(
+			req->input_headers, "X-Pool-Hook");
+		assert(strcmp(header, "ran") == 0);
+	}
+
+	if (status->error != EVRPC_STATUS_ERR_NONE)
+		goto done;
+
+	if (EVTAG_GET(kill, weapon, &weapon) == -1) {
+		fprintf(stderr, "get weapon\n");
+		goto done;
+	}
+	if (EVTAG_GET(kill, action, &action) == -1) {
+		fprintf(stderr, "get action\n");
+		goto done;
+	}
+
+	if (strcmp(weapon, "dagger"))
+		goto done;
+
+	if (strcmp(action, "wave around like an idiot"))
+		goto done;
+
+	test_ok += 1;
+
+done:
+	event_loopexit(NULL);
+}
+
+static void
+GotKillCbTwo(struct evrpc_status *status,
+    struct msg *msg, struct kill *kill, void *arg)
+{
+	char *weapon;
+	char *action;
+
+	if (status->error != EVRPC_STATUS_ERR_NONE)
+		goto done;
+
+	if (EVTAG_GET(kill, weapon, &weapon) == -1) {
+		fprintf(stderr, "get weapon\n");
+		goto done;
+	}
+	if (EVTAG_GET(kill, action, &action) == -1) {
+		fprintf(stderr, "get action\n");
+		goto done;
+	}
+
+	if (strcmp(weapon, "dagger"))
+		goto done;
+
+	if (strcmp(action, "wave around like an idiot"))
+		goto done;
+
+	test_ok += 1;
+
+done:
+	if (test_ok == 2)
+		event_loopexit(NULL);
+}
+
+static int
+rpc_hook_add_header(struct evhttp_request *req,
+    struct evbuffer *evbuf, void *arg)
+{
+	const char *hook_type = arg;
+	if (strcmp("input", hook_type) == 0)
+		evhttp_add_header(req->input_headers, "X-Hook", hook_type);
+	else 
+		evhttp_add_header(req->output_headers, "X-Hook", hook_type);
+	return (0);
+}
+
+static int
+rpc_hook_remove_header(struct evhttp_request *req,
+    struct evbuffer *evbuf, void *arg)
+{
+	const char *header = evhttp_find_header(req->input_headers, "X-Hook");
+	assert(header != NULL);
+	assert(strcmp(header, arg) == 0);
+	evhttp_remove_header(req->input_headers, "X-Hook");
+	evhttp_add_header(req->input_headers, "X-Pool-Hook", "ran");
+
+	return (0);
+}
+
+static void
+rpc_basic_client(void)
+{
+	short port;
+	struct evhttp *http = NULL;
+	struct evrpc_base *base = NULL;
+	struct evrpc_pool *pool = NULL;
+	struct msg *msg;
+	struct kill *kill;
+
+	fprintf(stdout, "Testing RPC Client: ");
+
+	rpc_setup(&http, &port, &base);
+
+	need_input_hook = 1;
+	need_output_hook = 1;
+
+	assert(evrpc_add_hook(base, EVRPC_INPUT, rpc_hook_add_header, (void*)"input")
+	    != NULL);
+	assert(evrpc_add_hook(base, EVRPC_OUTPUT, rpc_hook_add_header, (void*)"output")
+	    != NULL);
+
+	pool = rpc_pool_with_connection(port);
+
+	assert(evrpc_add_hook(pool, EVRPC_INPUT, rpc_hook_remove_header, (void*)"output"));
+
+	/* set up the basic message */
+	msg = msg_new();
+	EVTAG_ASSIGN(msg, from_name, "niels");
+	EVTAG_ASSIGN(msg, to_name, "tester");
+
+	kill = kill_new();
+
+	EVRPC_MAKE_REQUEST(Message, pool, msg, kill,  GotKillCb, NULL);
+
+	test_ok = 0;
+
+	event_dispatch();
+	
+	if (test_ok != 1) {
+		fprintf(stdout, "FAILED (1)\n");
+		exit(1);
+	}
+
+	/* we do it twice to make sure that reuse works correctly */
+	kill_clear(kill);
+
+	EVRPC_MAKE_REQUEST(Message, pool, msg, kill,  GotKillCb, NULL);
+
+	event_dispatch();
+	
+	rpc_teardown(base);
+	
+	if (test_ok != 2) {
+		fprintf(stdout, "FAILED (2)\n");
+		exit(1);
+	}
+
+	fprintf(stdout, "OK\n");
+
+	msg_free(msg);
+	kill_free(kill);
+
+	evrpc_pool_free(pool);
+	evhttp_free(http);
+}
+
+/* 
+ * We are testing that the second requests gets send over the same
+ * connection after the first RPCs completes.
+ */
+static void
+rpc_basic_queued_client(void)
+{
+	short port;
+	struct evhttp *http = NULL;
+	struct evrpc_base *base = NULL;
+	struct evrpc_pool *pool = NULL;
+	struct msg *msg;
+	struct kill *kill_one, *kill_two;
+
+	fprintf(stdout, "Testing RPC (Queued) Client: ");
+
+	rpc_setup(&http, &port, &base);
+
+	pool = rpc_pool_with_connection(port);
+
+	/* set up the basic message */
+	msg = msg_new();
+	EVTAG_ASSIGN(msg, from_name, "niels");
+	EVTAG_ASSIGN(msg, to_name, "tester");
+
+	kill_one = kill_new();
+	kill_two = kill_new();
+
+	EVRPC_MAKE_REQUEST(Message, pool, msg, kill_one,  GotKillCbTwo, NULL);
+	EVRPC_MAKE_REQUEST(Message, pool, msg, kill_two,  GotKillCb, NULL);
+
+	test_ok = 0;
+
+	event_dispatch();
+	
+	rpc_teardown(base);
+	
+	if (test_ok != 2) {
+		fprintf(stdout, "FAILED (1)\n");
+		exit(1);
+	}
+
+	fprintf(stdout, "OK\n");
+
+	msg_free(msg);
+	kill_free(kill_one);
+	kill_free(kill_two);
+
+	evrpc_pool_free(pool);
+	evhttp_free(http);
+}
+
+static void
+GotErrorCb(struct evrpc_status *status,
+    struct msg *msg, struct kill *kill, void *arg)
+{
+	if (status->error != EVRPC_STATUS_ERR_TIMEOUT)
+		goto done;
+
+	/* should never be complete but just to check */
+	if (kill_complete(kill) == 0)
+		goto done;
+
+	test_ok += 1;
+
+done:
+	event_loopexit(NULL);
+}
+
+static void
+rpc_client_timeout(void)
+{
+	short port;
+	struct evhttp *http = NULL;
+	struct evrpc_base *base = NULL;
+	struct evrpc_pool *pool = NULL;
+	struct msg *msg;
+	struct kill *kill;
+
+	fprintf(stdout, "Testing RPC Client Timeout: ");
+
+	rpc_setup(&http, &port, &base);
+
+	pool = rpc_pool_with_connection(port);
+
+	/* set the timeout to 5 seconds */
+	evrpc_pool_set_timeout(pool, 5);
+
+	/* set up the basic message */
+	msg = msg_new();
+	EVTAG_ASSIGN(msg, from_name, "niels");
+	EVTAG_ASSIGN(msg, to_name, "tester");
+
+	kill = kill_new();
+
+	EVRPC_MAKE_REQUEST(NeverReply, pool, msg, kill, GotErrorCb, NULL);
+
+	test_ok = 0;
+
+	event_dispatch();
+	
+	/* free the saved RPC structure up */
+	EVRPC_REQUEST_DONE(saved_rpc);
+
+	rpc_teardown(base);
+	
+	if (test_ok != 2) {
+		fprintf(stdout, "FAILED (1)\n");
+		exit(1);
+	}
+
+	fprintf(stdout, "OK\n");
+
+	msg_free(msg);
+	kill_free(kill);
+
+	evrpc_pool_free(pool);
+	evhttp_free(http);
+}
+
+void
+rpc_suite(void)
+{
+	rpc_basic_test();
+	rpc_basic_message();
+	rpc_basic_client();
+	rpc_basic_queued_client();
+	rpc_client_timeout();
+}
diff --git a/base/third_party/libevent/test/test-eof.c b/base/third_party/libevent/test/test-eof.c
new file mode 100644
index 0000000..3264a7b
--- /dev/null
+++ b/base/third_party/libevent/test/test-eof.c
@@ -0,0 +1,86 @@
+/*
+ * Compile with:
+ * cc -I/usr/local/include -o time-test time-test.c -L/usr/local/lib -levent
+ */
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+
+#ifdef WIN32
+#include <winsock2.h>
+#endif
+#include <sys/types.h>
+#include <sys/stat.h>
+#ifdef HAVE_SYS_TIME_H
+#include <sys/time.h>
+#endif
+#ifdef HAVE_SYS_SOCKET_H
+#include <sys/socket.h>
+#endif
+#include <fcntl.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#ifdef HAVE_UNISTD_H
+#include <unistd.h>
+#endif
+#include <errno.h>
+
+#include <event.h>
+#include <evutil.h>
+
+int test_okay = 1;
+int called = 0;
+
+static void
+read_cb(int fd, short event, void *arg)
+{
+	char buf[256];
+	int len;
+
+	len = recv(fd, buf, sizeof(buf), 0);
+
+	printf("%s: read %d%s\n", __func__,
+	    len, len ? "" : " - means EOF");
+
+	if (len) {
+		if (!called)
+			event_add(arg, NULL);
+	} else if (called == 1)
+		test_okay = 0;
+
+	called++;
+}
+
+#ifndef SHUT_WR
+#define SHUT_WR 1
+#endif
+
+int
+main (int argc, char **argv)
+{
+	struct event ev;
+	const char *test = "test string";
+	int pair[2];
+
+	if (evutil_socketpair(AF_UNIX, SOCK_STREAM, 0, pair) == -1)
+		return (1);
+
+	
+	send(pair[0], test, strlen(test)+1, 0);
+	shutdown(pair[0], SHUT_WR);
+
+	/* Initalize the event library */
+	event_init();
+
+	/* Initalize one event */
+	event_set(&ev, pair[1], EV_READ, read_cb, &ev);
+
+	event_add(&ev, NULL);
+
+	event_dispatch();
+
+	return (test_okay);
+}
+
diff --git a/base/third_party/libevent/test/test-init.c b/base/third_party/libevent/test/test-init.c
new file mode 100644
index 0000000..d60aa36
--- /dev/null
+++ b/base/third_party/libevent/test/test-init.c
@@ -0,0 +1,40 @@
+/*
+ * Compile with:
+ * cc -I/usr/local/include -o time-test time-test.c -L/usr/local/lib -levent
+ */
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#ifdef WIN32
+#include <winsock2.h>
+#endif
+
+#include <sys/types.h>
+#include <sys/stat.h>
+#ifdef HAVE_SYS_TIME_H
+#include <sys/time.h>
+#endif
+#ifdef HAVE_SYS_SOCKET_H
+#include <sys/socket.h>
+#endif
+#include <fcntl.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#ifdef HAVE_UNISTD_H
+#include <unistd.h>
+#endif
+#include <errno.h>
+
+#include <event.h>
+
+int
+main(int argc, char **argv)
+{
+	/* Initalize the event library */
+	event_init();
+
+	return (0);
+}
+
diff --git a/base/third_party/libevent/test/test-time.c b/base/third_party/libevent/test/test-time.c
new file mode 100644
index 0000000..703bc32
--- /dev/null
+++ b/base/third_party/libevent/test/test-time.c
@@ -0,0 +1,89 @@
+/*
+ * Compile with:
+ * cc -I/usr/local/include -o time-test time-test.c -L/usr/local/lib -levent
+ */
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#ifdef WIN32
+#include <winsock2.h>
+#endif
+
+#include <sys/types.h>
+#include <sys/stat.h>
+#ifdef HAVE_SYS_TIME_H
+#include <sys/time.h>
+#endif
+#include <fcntl.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#ifdef HAVE_UNISTD_H
+#include <unistd.h>
+#endif
+#include <errno.h>
+
+#include <event.h>
+
+int called = 0;
+
+#define NEVENT	20000
+
+struct event *ev[NEVENT];
+
+static int
+rand_int(int n)
+{
+#ifdef WIN32
+	return (int)(rand() * n);
+#else
+	return (int)(random() % n);
+#endif
+}
+
+static void
+time_cb(int fd, short event, void *arg)
+{
+	struct timeval tv;
+	int i, j;
+
+	called++;
+
+	if (called < 10*NEVENT) {
+		for (i = 0; i < 10; i++) {
+			j = rand_int(NEVENT);
+			tv.tv_sec = 0;
+			tv.tv_usec = rand_int(50000);
+			if (tv.tv_usec % 2)
+				evtimer_add(ev[j], &tv);
+			else
+				evtimer_del(ev[j]);
+		}
+	}
+}
+
+int
+main (int argc, char **argv)
+{
+	struct timeval tv;
+	int i;
+
+	/* Initalize the event library */
+	event_init();
+
+	for (i = 0; i < NEVENT; i++) {
+		ev[i] = malloc(sizeof(struct event));
+
+		/* Initalize one event */
+		evtimer_set(ev[i], time_cb, ev[i]);
+		tv.tv_sec = 0;
+		tv.tv_usec = rand_int(50000);
+		evtimer_add(ev[i], &tv);
+	}
+
+	event_dispatch();
+
+	return (called < NEVENT);
+}
+
diff --git a/base/third_party/libevent/test/test-weof.c b/base/third_party/libevent/test/test-weof.c
new file mode 100644
index 0000000..7fd6c8b
--- /dev/null
+++ b/base/third_party/libevent/test/test-weof.c
@@ -0,0 +1,84 @@
+/*
+ * Compile with:
+ * cc -I/usr/local/include -o time-test time-test.c -L/usr/local/lib -levent
+ */
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+
+#ifdef WIN32
+#include <winsock2.h>
+#endif
+#include <sys/types.h>
+#include <sys/stat.h>
+#ifdef HAVE_SYS_TIME_H
+#include <sys/time.h>
+#endif
+#ifdef HAVE_SYS_SOCKET_H
+#include <sys/socket.h>
+#endif
+#include <fcntl.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include <signal.h>
+#ifdef HAVE_UNISTD_H
+#include <unistd.h>
+#endif
+#include <errno.h>
+
+#include <event.h>
+#include <evutil.h>
+
+int pair[2];
+int test_okay = 1;
+int called = 0;
+
+static void
+write_cb(int fd, short event, void *arg)
+{
+	const char *test = "test string";
+	int len;
+
+	len = send(fd, test, strlen(test) + 1, 0);
+
+	printf("%s: write %d%s\n", __func__,
+	    len, len ? "" : " - means EOF");
+
+	if (len > 0) {
+		if (!called)
+			event_add(arg, NULL);
+		EVUTIL_CLOSESOCKET(pair[0]);
+	} else if (called == 1)
+		test_okay = 0;
+
+	called++;
+}
+
+int
+main (int argc, char **argv)
+{
+	struct event ev;
+
+#ifndef WIN32
+	if (signal(SIGPIPE, SIG_IGN) == SIG_ERR)
+		return (1);
+#endif
+
+	if (evutil_socketpair(AF_UNIX, SOCK_STREAM, 0, pair) == -1)
+		return (1);
+
+	/* Initalize the event library */
+	event_init();
+
+	/* Initalize one event */
+	event_set(&ev, pair[1], EV_WRITE, write_cb, &ev);
+
+	event_add(&ev, NULL);
+
+	event_dispatch();
+
+	return (test_okay);
+}
+
diff --git a/base/third_party/libevent/test/test.sh b/base/third_party/libevent/test/test.sh
new file mode 100755
index 0000000..506a198
--- /dev/null
+++ b/base/third_party/libevent/test/test.sh
@@ -0,0 +1,91 @@
+#!/bin/sh
+
+setup () {
+	 EVENT_NOKQUEUE=yes; export EVENT_NOKQUEUE
+	 EVENT_NODEVPOLL=yes; export EVENT_NODEVPOLL
+	 EVENT_NOPOLL=yes; export EVENT_NOPOLL
+	 EVENT_NOSELECT=yes; export EVENT_NOSELECT
+	 EVENT_NOEPOLL=yes; export EVENT_NOEPOLL
+	 EVENT_NOEVPORT=yes; export EVENT_NOEVPORT
+}
+
+test () {
+	if ./test-init 2>/dev/null ;
+	then
+	        true
+	else
+		echo Skipping test
+		return
+	fi	
+
+echo -n " test-eof: "
+if ./test-eof >/dev/null ; 
+then 
+	echo OKAY ; 
+else 
+	echo FAILED ; 
+fi
+echo -n " test-weof: "
+if ./test-weof >/dev/null ; 
+then 
+	echo OKAY ; 
+else 
+	echo FAILED ; 
+fi
+echo -n " test-time: "
+if ./test-time >/dev/null ; 
+then 
+	echo OKAY ; 
+else 
+	echo FAILED ; 
+fi
+echo -n " regress: "
+if ./regress >/dev/null ; 
+then 
+	echo OKAY ; 
+else 
+	echo FAILED ; 
+fi
+}
+
+echo "Running tests:"
+
+# Need to do this by hand?
+setup
+unset EVENT_NOKQUEUE
+export EVENT_NOKQUEUE
+echo "KQUEUE"
+test
+
+setup
+unset EVENT_NODEVPOLL
+export EVENT_NODEVPOLL
+echo "DEVPOLL"
+test
+
+setup
+unset EVENT_NOPOLL
+export EVENT_NOPOLL
+echo "POLL"
+test
+
+setup
+unset EVENT_NOSELECT
+export EVENT_NOSELECT
+echo "SELECT"
+test
+
+setup
+unset EVENT_NOEPOLL
+export EVENT_NOEPOLL
+echo "EPOLL"
+test
+
+setup
+unset EVENT_NOEVPORT
+export EVENT_NOEVPORT
+echo "EVPORT"
+test
+
+
+
diff --git a/base/third_party/libevent/whatsnew-14.txt b/base/third_party/libevent/whatsnew-14.txt
new file mode 100644
index 0000000..769dda7
--- /dev/null
+++ b/base/third_party/libevent/whatsnew-14.txt
@@ -0,0 +1,167 @@
+What's New In Libevent 1.4:
+
+0. About this document
+
+  This document describes the key differences between Libevent 1.3 and
+  Libevent 1.4, from a user's point of view.  It was most recently
+  updated based on features from libevent 1.4.2-rc.
+
+1. Packaging Issues.
+
+1.1. The great library division.
+
+  The libevent source now builds two libraries: libevent_core and
+  libevent_extra.  The libevent_core library includes event loops,
+  timers, buffer code, and various small compatibility functions.  The
+  libevent_extra library includes code for HTTP, DNS, RPC, and so on.
+  Thus, if you're writing software that only uses libevent's event
+  loop, you should link against only the libevent_core library,
+  whereas if you're writing software that uses libevent's protocol
+  support as well, you need to link libevent_extra as well.
+
+  For backward compatibility, libevent also builds a library called
+  "libevent" that includes everything.
+
+1.2. The event-config.h header
+
+  Libevent configure script now builds two headers from its configure
+  script: config.h (which it uses internally) and event-config.h
+  (which it installs as a header file).  All of the macros in
+  event-config.h are modified so that they're safe to include in other
+  projects.  This allows libevent's header files (like event.h and
+  evutil.h) information about platform configuration.
+
+  What does this mean for you?  As of 1.4.x, it should never be
+  necessary to include extra files or define extra types before you
+  include event.h (or any other libevent header); event.h can now look
+  at the information in event-config.h and figure out what it needs to
+  include.
+
+1.3. Documentation
+
+  Libevent now includes better doxygen documentation.  It's not
+  perfect or complete, though; if you find a mistake, please let us
+  know.
+
+1.4. Libtool usage
+
+  We now use libtool's library versioning support correctly.  If we
+  don't mess this up, it means that binaries linked against old
+  version of libevent should continue working when we make changes to
+  libevent that don't break backward compatibility.
+
+1.5. Portability
+
+  Libevent now builds with MSVC again.  We've only tested it with MSVC
+  2005, and the project files might not be right.  Please let us know
+  if you run into any issues.
+
+  Libevent now builds on platforms where /bin/sh is not bash.
+
+  Libevent's regression test no longer requires Python to be
+  installed.
+
+2. New and Improved APIs:
+
+  (This list includes functions that are new, functions whose behavior
+  has changed, and functions that were included in previous releases
+  but which never actually worked before.)
+
+2.1. Utility functions are defined in evutil.h
+
+  Libevent now exposes a small set of functions for cross-platform
+  network programming in evutil.h, on the theory that they've been
+  useful enough to us that other people may likely want to use them
+  too.  These are mainly workarounds for Windows issues for now: they
+  include evutil_socketpair (to fake socketpair on platforms that
+  don't have it) and evutil_make_socket_nonblocking (to make a socket
+  nonblocking in a cross-platform way.  See the header for more
+  information.
+
+2.2. In the libevent core.
+
+  The event_base_free() function now works.  Previously, it would
+  crash with an assertion failure if there were events pending on a
+  base.  Now, it simply deletes all the pending events and frees the
+  base.  Be careful -- this might leak fds and memory associated with
+  the old events.  To avoid leaks, you should still remove all the
+  events and free their resources before you delete the base.
+
+  Libevent should now work properly with fork().  Just call
+  event_reinit() on your event base after the fork call, and it should
+  work okay.  Please let us know about any bugs you find.
+
+  There's a new event_base_new() function that acts just like
+  event_init(), but does not replace the default base.  If you are
+  using multiple event bases in your code, you should just use
+  event_base_new() instead of event_init(), to avoid accidental bugs.
+
+  There's new event_loopbreak() function to make a current event loop
+  stop exiting and return.  Unlike event_loopexit, it stops subsequent
+  pending events from getting executed.  This behavior is useful for
+  scripting languages to implement exceptions from inside callbacks.
+
+  There's a new event_base_get_method() function, for use in place of
+  event_get_method() in multi-base applications.
+
+2.3. New in HTTP.
+
+  There's an evhttp_connection_set_local_address() function you can
+  use to set the local address of an HTTP connection.
+
+  HTTP/1.1 chunking now correctly ends chunks with '\r\n'.
+
+2.4. New in DNS
+
+  Instead of picking your method for generating DNS transaction IDs at
+  startup, you can use evdns_set_transaction_id() to provide a
+  transaction ID function at runtime.
+
+  The "class" field in evdns_server_request is now renamed to
+  dns_question_class, so that it won't break compilation under C++.
+  This uses some preprocessor hacks so that C code using the old name
+  won't break.  Eventually, the old name will be deprecated entirely;
+  please don't use it.
+
+2.5. New in RPC
+
+  There are now hooks on RPC input and output; can be used to
+  implement RPC independent processing such as compression or
+  authentication.
+
+  RPC tags can now be up to 32 bits.  This is wire-compatible, but
+  changes some of the types in the APIs.  Please let us know if this
+  is problematic for you.
+
+3. Big bugfixes
+
+  We've done a lot, with help from users on different platforms, to
+  make the different backends behave more similarly with respect to
+  signals and timeouts.  The kqueue and solaris backends were the big
+  offenders previously, but they should be better now.  Windows should
+  be better too, though it's likely that problems remain there.
+
+  The libevent headers (though not the source files!) should now build
+  cleanly on C++.
+
+  (For more bugfixes, see the ChangeLog file.  These are only the
+  biggies.)
+
+4. Big performance improvements
+
+  Libevent now uses a min-heap rather than a red-black tree to track
+  timeouts.  This means that finding the next timeout to fire is now
+  O(1) instead of (lg n).
+
+  The win32 select-based backend now uses a red-black tree to map
+  SOCKET handles to event structures.  This changes the performance
+  characteristics of the event loop on win32 from O(n^2) to O(n lg n).
+  Not perfect, but better.
+
+5. Removed code and features
+
+  The rtsig backend is now removed.  It hasn't even compiled for a
+  while, and nobody seemed to miss it very much.  All the platforms
+  that have rtsig seem to have a better option instead these days.
+  Please let us know if rtsig was crucial for you.
+
diff --git a/base/third_party/nspr/LICENSE b/base/third_party/nspr/LICENSE
new file mode 100644
index 0000000..eba7b77
--- /dev/null
+++ b/base/third_party/nspr/LICENSE
@@ -0,0 +1,35 @@
+/* ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is the Netscape Portable Runtime (NSPR).
+ *
+ * The Initial Developer of the Original Code is
+ * Netscape Communications Corporation.
+ * Portions created by the Initial Developer are Copyright (C) 1998-2000
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either the GNU General Public License Version 2 or later (the "GPL"), or
+ * the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
diff --git a/base/third_party/nspr/OWNERS b/base/third_party/nspr/OWNERS
new file mode 100644
index 0000000..20ba660
--- /dev/null
+++ b/base/third_party/nspr/OWNERS
@@ -0,0 +1,2 @@
+rsleevi@chromium.org
+wtc@chromium.org
diff --git a/base/third_party/nspr/README.chromium b/base/third_party/nspr/README.chromium
new file mode 100644
index 0000000..3659a2c
--- /dev/null
+++ b/base/third_party/nspr/README.chromium
@@ -0,0 +1,3 @@
+Name: Netscape Portable Runtime (NSPR)
+URL: http://www.mozilla.org/projects/nspr/
+License: MPL 1.1/GPL 2.0/LGPL 2.1
diff --git a/base/third_party/nspr/prtime.cc b/base/third_party/nspr/prtime.cc
new file mode 100644
index 0000000..c125160
--- /dev/null
+++ b/base/third_party/nspr/prtime.cc
@@ -0,0 +1,1186 @@
+/* Portions are Copyright (C) 2011 Google Inc */
+/* ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is the Netscape Portable Runtime (NSPR).
+ *
+ * The Initial Developer of the Original Code is
+ * Netscape Communications Corporation.
+ * Portions created by the Initial Developer are Copyright (C) 1998-2000
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either the GNU General Public License Version 2 or later (the "GPL"), or
+ * the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+/*
+ * prtime.cc --
+ * NOTE: The original nspr file name is prtime.c
+ *
+ *     NSPR date and time functions
+ *
+ * CVS revision 3.37
+ */
+
+/*
+ * The following functions were copied from the NSPR prtime.c file.
+ * PR_ParseTimeString
+ *   We inlined the new PR_ParseTimeStringToExplodedTime function to avoid
+ *   copying PR_ExplodeTime and PR_LocalTimeParameters.  (The PR_ExplodeTime
+ *   and PR_ImplodeTime calls cancel each other out.)
+ * PR_NormalizeTime
+ * PR_GMTParameters
+ * PR_ImplodeTime
+ *   Upstream implementation from
+ *   http://lxr.mozilla.org/nspr/source/pr/src/misc/prtime.c#221
+ * All types and macros are defined in the base/third_party/prtime.h file.
+ * These have been copied from the following nspr files. We have only copied
+ * over the types we need.
+ * 1. prtime.h
+ * 2. prtypes.h
+ * 3. prlong.h
+ *
+ * Unit tests are in base/time/pr_time_unittest.cc.
+ */
+
+#include <limits.h>
+
+#include "base/logging.h"
+#include "base/third_party/nspr/prtime.h"
+#include "build/build_config.h"
+
+#include <errno.h>  /* for EINVAL */
+#include <time.h>
+
+/*
+ * The COUNT_LEAPS macro counts the number of leap years passed by
+ * till the start of the given year Y.  At the start of the year 4
+ * A.D. the number of leap years passed by is 0, while at the start of
+ * the year 5 A.D. this count is 1. The number of years divisible by
+ * 100 but not divisible by 400 (the non-leap years) is deducted from
+ * the count to get the correct number of leap years.
+ *
+ * The COUNT_DAYS macro counts the number of days since 01/01/01 till the
+ * start of the given year Y. The number of days at the start of the year
+ * 1 is 0 while the number of days at the start of the year 2 is 365
+ * (which is ((2)-1) * 365) and so on. The reference point is 01/01/01
+ * midnight 00:00:00.
+ */
+
+#define COUNT_LEAPS(Y) (((Y)-1) / 4 - ((Y)-1) / 100 + ((Y)-1) / 400)
+#define COUNT_DAYS(Y) (((Y)-1) * 365 + COUNT_LEAPS(Y))
+#define DAYS_BETWEEN_YEARS(A, B) (COUNT_DAYS(B) - COUNT_DAYS(A))
+
+/* Implements the Unix localtime_r() function for windows */
+#if defined(OS_WIN)
+static void localtime_r(const time_t* secs, struct tm* time) {
+  (void) localtime_s(time, secs);
+}
+#endif
+
+/*
+ * Static variables used by functions in this file
+ */
+
+/*
+ * The following array contains the day of year for the last day of
+ * each month, where index 1 is January, and day 0 is January 1.
+ */
+
+static const int lastDayOfMonth[2][13] = {
+    {-1, 30, 58, 89, 119, 150, 180, 211, 242, 272, 303, 333, 364},
+    {-1, 30, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334, 365}
+};
+
+/*
+ * The number of days in a month
+ */
+
+static const PRInt8 nDays[2][12] = {
+    {31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31},
+    {31, 29, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31}
+};
+
+/*
+ *------------------------------------------------------------------------
+ *
+ * PR_ImplodeTime --
+ *
+ *     Cf. time_t mktime(struct tm *tp)
+ *     Note that 1 year has < 2^25 seconds.  So an PRInt32 is large enough.
+ *
+ *------------------------------------------------------------------------
+ */
+PRTime
+PR_ImplodeTime(const PRExplodedTime *exploded)
+{
+  PRExplodedTime copy;
+  PRTime retVal;
+  PRInt64 secPerDay, usecPerSec;
+  PRInt64 temp;
+  PRInt64 numSecs64;
+  PRInt32 numDays;
+  PRInt32 numSecs;
+
+  /* Normalize first.  Do this on our copy */
+  copy = *exploded;
+  PR_NormalizeTime(&copy, PR_GMTParameters);
+
+  numDays = DAYS_BETWEEN_YEARS(1970, copy.tm_year);
+
+  numSecs = copy.tm_yday * 86400 + copy.tm_hour * 3600 + copy.tm_min * 60 +
+            copy.tm_sec;
+
+  LL_I2L(temp, numDays);
+  LL_I2L(secPerDay, 86400);
+  LL_MUL(temp, temp, secPerDay);
+  LL_I2L(numSecs64, numSecs);
+  LL_ADD(numSecs64, numSecs64, temp);
+
+  /* apply the GMT and DST offsets */
+  LL_I2L(temp, copy.tm_params.tp_gmt_offset);
+  LL_SUB(numSecs64, numSecs64, temp);
+  LL_I2L(temp, copy.tm_params.tp_dst_offset);
+  LL_SUB(numSecs64, numSecs64, temp);
+
+  LL_I2L(usecPerSec, 1000000L);
+  LL_MUL(temp, numSecs64, usecPerSec);
+  LL_I2L(retVal, copy.tm_usec);
+  LL_ADD(retVal, retVal, temp);
+
+  return retVal;
+}
+
+/*
+ *-------------------------------------------------------------------------
+ *
+ * IsLeapYear --
+ *
+ *     Returns 1 if the year is a leap year, 0 otherwise.
+ *
+ *-------------------------------------------------------------------------
+ */
+
+static int IsLeapYear(PRInt16 year)
+{
+    if ((year % 4 == 0 && year % 100 != 0) || year % 400 == 0)
+        return 1;
+    else
+        return 0;
+}
+
+/*
+ * 'secOffset' should be less than 86400 (i.e., a day).
+ * 'time' should point to a normalized PRExplodedTime.
+ */
+
+static void
+ApplySecOffset(PRExplodedTime *time, PRInt32 secOffset)
+{
+    time->tm_sec += secOffset;
+
+    /* Note that in this implementation we do not count leap seconds */
+    if (time->tm_sec < 0 || time->tm_sec >= 60) {
+        time->tm_min += time->tm_sec / 60;
+        time->tm_sec %= 60;
+        if (time->tm_sec < 0) {
+            time->tm_sec += 60;
+            time->tm_min--;
+        }
+    }
+
+    if (time->tm_min < 0 || time->tm_min >= 60) {
+        time->tm_hour += time->tm_min / 60;
+        time->tm_min %= 60;
+        if (time->tm_min < 0) {
+            time->tm_min += 60;
+            time->tm_hour--;
+        }
+    }
+
+    if (time->tm_hour < 0) {
+        /* Decrement mday, yday, and wday */
+        time->tm_hour += 24;
+        time->tm_mday--;
+        time->tm_yday--;
+        if (time->tm_mday < 1) {
+            time->tm_month--;
+            if (time->tm_month < 0) {
+                time->tm_month = 11;
+                time->tm_year--;
+                if (IsLeapYear(time->tm_year))
+                    time->tm_yday = 365;
+                else
+                    time->tm_yday = 364;
+            }
+            time->tm_mday = nDays[IsLeapYear(time->tm_year)][time->tm_month];
+        }
+        time->tm_wday--;
+        if (time->tm_wday < 0)
+            time->tm_wday = 6;
+    } else if (time->tm_hour > 23) {
+        /* Increment mday, yday, and wday */
+        time->tm_hour -= 24;
+        time->tm_mday++;
+        time->tm_yday++;
+        if (time->tm_mday >
+                nDays[IsLeapYear(time->tm_year)][time->tm_month]) {
+            time->tm_mday = 1;
+            time->tm_month++;
+            if (time->tm_month > 11) {
+                time->tm_month = 0;
+                time->tm_year++;
+                time->tm_yday = 0;
+            }
+        }
+        time->tm_wday++;
+        if (time->tm_wday > 6)
+            time->tm_wday = 0;
+    }
+}
+
+void
+PR_NormalizeTime(PRExplodedTime *time, PRTimeParamFn params)
+{
+    int daysInMonth;
+    PRInt32 numDays;
+
+    /* Get back to GMT */
+    time->tm_sec -= time->tm_params.tp_gmt_offset
+            + time->tm_params.tp_dst_offset;
+    time->tm_params.tp_gmt_offset = 0;
+    time->tm_params.tp_dst_offset = 0;
+
+    /* Now normalize GMT */
+
+    if (time->tm_usec < 0 || time->tm_usec >= 1000000) {
+        time->tm_sec +=  time->tm_usec / 1000000;
+        time->tm_usec %= 1000000;
+        if (time->tm_usec < 0) {
+            time->tm_usec += 1000000;
+            time->tm_sec--;
+        }
+    }
+
+    /* Note that we do not count leap seconds in this implementation */
+    if (time->tm_sec < 0 || time->tm_sec >= 60) {
+        time->tm_min += time->tm_sec / 60;
+        time->tm_sec %= 60;
+        if (time->tm_sec < 0) {
+            time->tm_sec += 60;
+            time->tm_min--;
+        }
+    }
+
+    if (time->tm_min < 0 || time->tm_min >= 60) {
+        time->tm_hour += time->tm_min / 60;
+        time->tm_min %= 60;
+        if (time->tm_min < 0) {
+            time->tm_min += 60;
+            time->tm_hour--;
+        }
+    }
+
+    if (time->tm_hour < 0 || time->tm_hour >= 24) {
+        time->tm_mday += time->tm_hour / 24;
+        time->tm_hour %= 24;
+        if (time->tm_hour < 0) {
+            time->tm_hour += 24;
+            time->tm_mday--;
+        }
+    }
+
+    /* Normalize month and year before mday */
+    if (time->tm_month < 0 || time->tm_month >= 12) {
+        time->tm_year += static_cast<PRInt16>(time->tm_month / 12);
+        time->tm_month %= 12;
+        if (time->tm_month < 0) {
+            time->tm_month += 12;
+            time->tm_year--;
+        }
+    }
+
+    /* Now that month and year are in proper range, normalize mday */
+
+    if (time->tm_mday < 1) {
+        /* mday too small */
+        do {
+            /* the previous month */
+            time->tm_month--;
+            if (time->tm_month < 0) {
+                time->tm_month = 11;
+                time->tm_year--;
+            }
+            time->tm_mday += nDays[IsLeapYear(time->tm_year)][time->tm_month];
+        } while (time->tm_mday < 1);
+    } else {
+        daysInMonth = nDays[IsLeapYear(time->tm_year)][time->tm_month];
+        while (time->tm_mday > daysInMonth) {
+            /* mday too large */
+            time->tm_mday -= daysInMonth;
+            time->tm_month++;
+            if (time->tm_month > 11) {
+                time->tm_month = 0;
+                time->tm_year++;
+            }
+            daysInMonth = nDays[IsLeapYear(time->tm_year)][time->tm_month];
+        }
+    }
+
+    /* Recompute yday and wday */
+    time->tm_yday = static_cast<PRInt16>(time->tm_mday +
+            lastDayOfMonth[IsLeapYear(time->tm_year)][time->tm_month]);
+
+    numDays = DAYS_BETWEEN_YEARS(1970, time->tm_year) + time->tm_yday;
+    time->tm_wday = (numDays + 4) % 7;
+    if (time->tm_wday < 0) {
+        time->tm_wday += 7;
+    }
+
+    /* Recompute time parameters */
+
+    time->tm_params = params(time);
+
+    ApplySecOffset(time, time->tm_params.tp_gmt_offset
+            + time->tm_params.tp_dst_offset);
+}
+
+/*
+ *------------------------------------------------------------------------
+ *
+ * PR_GMTParameters --
+ *
+ *     Returns the PRTimeParameters for Greenwich Mean Time.
+ *     Trivially, both the tp_gmt_offset and tp_dst_offset fields are 0.
+ *
+ *------------------------------------------------------------------------
+ */
+
+PRTimeParameters
+PR_GMTParameters(const PRExplodedTime *gmt)
+{
+    PRTimeParameters retVal = { 0, 0 };
+    return retVal;
+}
+
+/*
+ * The following code implements PR_ParseTimeString().  It is based on
+ * ns/lib/xp/xp_time.c, revision 1.25, by Jamie Zawinski <jwz@netscape.com>.
+ */
+
+/*
+ * We only recognize the abbreviations of a small subset of time zones
+ * in North America, Europe, and Japan.
+ *
+ * PST/PDT: Pacific Standard/Daylight Time
+ * MST/MDT: Mountain Standard/Daylight Time
+ * CST/CDT: Central Standard/Daylight Time
+ * EST/EDT: Eastern Standard/Daylight Time
+ * AST: Atlantic Standard Time
+ * NST: Newfoundland Standard Time
+ * GMT: Greenwich Mean Time
+ * BST: British Summer Time
+ * MET: Middle Europe Time
+ * EET: Eastern Europe Time
+ * JST: Japan Standard Time
+ */
+
+typedef enum
+{
+  TT_UNKNOWN,
+
+  TT_SUN, TT_MON, TT_TUE, TT_WED, TT_THU, TT_FRI, TT_SAT,
+
+  TT_JAN, TT_FEB, TT_MAR, TT_APR, TT_MAY, TT_JUN,
+  TT_JUL, TT_AUG, TT_SEP, TT_OCT, TT_NOV, TT_DEC,
+
+  TT_PST, TT_PDT, TT_MST, TT_MDT, TT_CST, TT_CDT, TT_EST, TT_EDT,
+  TT_AST, TT_NST, TT_GMT, TT_BST, TT_MET, TT_EET, TT_JST
+} TIME_TOKEN;
+
+/*
+ * This parses a time/date string into a PRTime
+ * (microseconds after "1-Jan-1970 00:00:00 GMT").
+ * It returns PR_SUCCESS on success, and PR_FAILURE
+ * if the time/date string can't be parsed.
+ *
+ * Many formats are handled, including:
+ *
+ *   14 Apr 89 03:20:12
+ *   14 Apr 89 03:20 GMT
+ *   Fri, 17 Mar 89 4:01:33
+ *   Fri, 17 Mar 89 4:01 GMT
+ *   Mon Jan 16 16:12 PDT 1989
+ *   Mon Jan 16 16:12 +0130 1989
+ *   6 May 1992 16:41-JST (Wednesday)
+ *   22-AUG-1993 10:59:12.82
+ *   22-AUG-1993 10:59pm
+ *   22-AUG-1993 12:59am
+ *   22-AUG-1993 12:59 PM
+ *   Friday, August 04, 1995 3:54 PM
+ *   06/21/95 04:24:34 PM
+ *   20/06/95 21:07
+ *   95-06-08 19:32:48 EDT
+ *   1995-06-17T23:11:25.342156Z
+ *
+ * If the input string doesn't contain a description of the timezone,
+ * we consult the `default_to_gmt' to decide whether the string should
+ * be interpreted relative to the local time zone (PR_FALSE) or GMT (PR_TRUE).
+ * The correct value for this argument depends on what standard specified
+ * the time string which you are parsing.
+ */
+
+PRStatus
+PR_ParseTimeString(
+        const char *string,
+        PRBool default_to_gmt,
+        PRTime *result_imploded)
+{
+  PRExplodedTime tm;
+  PRExplodedTime *result = &tm;
+  TIME_TOKEN dotw = TT_UNKNOWN;
+  TIME_TOKEN month = TT_UNKNOWN;
+  TIME_TOKEN zone = TT_UNKNOWN;
+  int zone_offset = -1;
+  int dst_offset = 0;
+  int date = -1;
+  PRInt32 year = -1;
+  int hour = -1;
+  int min = -1;
+  int sec = -1;
+  int usec = -1;
+
+  const char *rest = string;
+
+  int iterations = 0;
+
+  PR_ASSERT(string && result);
+  if (!string || !result) return PR_FAILURE;
+
+  while (*rest)
+        {
+
+          if (iterations++ > 1000)
+                {
+                  return PR_FAILURE;
+                }
+
+          switch (*rest)
+                {
+                case 'a': case 'A':
+                  if (month == TT_UNKNOWN &&
+                          (rest[1] == 'p' || rest[1] == 'P') &&
+                          (rest[2] == 'r' || rest[2] == 'R'))
+                        month = TT_APR;
+                  else if (zone == TT_UNKNOWN &&
+                                   (rest[1] == 's' || rest[1] == 'S') &&
+                                   (rest[2] == 't' || rest[2] == 'T'))
+                        zone = TT_AST;
+                  else if (month == TT_UNKNOWN &&
+                                   (rest[1] == 'u' || rest[1] == 'U') &&
+                                   (rest[2] == 'g' || rest[2] == 'G'))
+                        month = TT_AUG;
+                  break;
+                case 'b': case 'B':
+                  if (zone == TT_UNKNOWN &&
+                          (rest[1] == 's' || rest[1] == 'S') &&
+                          (rest[2] == 't' || rest[2] == 'T'))
+                        zone = TT_BST;
+                  break;
+                case 'c': case 'C':
+                  if (zone == TT_UNKNOWN &&
+                          (rest[1] == 'd' || rest[1] == 'D') &&
+                          (rest[2] == 't' || rest[2] == 'T'))
+                        zone = TT_CDT;
+                  else if (zone == TT_UNKNOWN &&
+                                   (rest[1] == 's' || rest[1] == 'S') &&
+                                   (rest[2] == 't' || rest[2] == 'T'))
+                        zone = TT_CST;
+                  break;
+                case 'd': case 'D':
+                  if (month == TT_UNKNOWN &&
+                          (rest[1] == 'e' || rest[1] == 'E') &&
+                          (rest[2] == 'c' || rest[2] == 'C'))
+                        month = TT_DEC;
+                  break;
+                case 'e': case 'E':
+                  if (zone == TT_UNKNOWN &&
+                          (rest[1] == 'd' || rest[1] == 'D') &&
+                          (rest[2] == 't' || rest[2] == 'T'))
+                        zone = TT_EDT;
+                  else if (zone == TT_UNKNOWN &&
+                                   (rest[1] == 'e' || rest[1] == 'E') &&
+                                   (rest[2] == 't' || rest[2] == 'T'))
+                        zone = TT_EET;
+                  else if (zone == TT_UNKNOWN &&
+                                   (rest[1] == 's' || rest[1] == 'S') &&
+                                   (rest[2] == 't' || rest[2] == 'T'))
+                        zone = TT_EST;
+                  break;
+                case 'f': case 'F':
+                  if (month == TT_UNKNOWN &&
+                          (rest[1] == 'e' || rest[1] == 'E') &&
+                          (rest[2] == 'b' || rest[2] == 'B'))
+                        month = TT_FEB;
+                  else if (dotw == TT_UNKNOWN &&
+                                   (rest[1] == 'r' || rest[1] == 'R') &&
+                                   (rest[2] == 'i' || rest[2] == 'I'))
+                        dotw = TT_FRI;
+                  break;
+                case 'g': case 'G':
+                  if (zone == TT_UNKNOWN &&
+                          (rest[1] == 'm' || rest[1] == 'M') &&
+                          (rest[2] == 't' || rest[2] == 'T'))
+                        zone = TT_GMT;
+                  break;
+                case 'j': case 'J':
+                  if (month == TT_UNKNOWN &&
+                          (rest[1] == 'a' || rest[1] == 'A') &&
+                          (rest[2] == 'n' || rest[2] == 'N'))
+                        month = TT_JAN;
+                  else if (zone == TT_UNKNOWN &&
+                                   (rest[1] == 's' || rest[1] == 'S') &&
+                                   (rest[2] == 't' || rest[2] == 'T'))
+                        zone = TT_JST;
+                  else if (month == TT_UNKNOWN &&
+                                   (rest[1] == 'u' || rest[1] == 'U') &&
+                                   (rest[2] == 'l' || rest[2] == 'L'))
+                        month = TT_JUL;
+                  else if (month == TT_UNKNOWN &&
+                                   (rest[1] == 'u' || rest[1] == 'U') &&
+                                   (rest[2] == 'n' || rest[2] == 'N'))
+                        month = TT_JUN;
+                  break;
+                case 'm': case 'M':
+                  if (month == TT_UNKNOWN &&
+                          (rest[1] == 'a' || rest[1] == 'A') &&
+                          (rest[2] == 'r' || rest[2] == 'R'))
+                        month = TT_MAR;
+                  else if (month == TT_UNKNOWN &&
+                                   (rest[1] == 'a' || rest[1] == 'A') &&
+                                   (rest[2] == 'y' || rest[2] == 'Y'))
+                        month = TT_MAY;
+                  else if (zone == TT_UNKNOWN &&
+                                   (rest[1] == 'd' || rest[1] == 'D') &&
+                                   (rest[2] == 't' || rest[2] == 'T'))
+                        zone = TT_MDT;
+                  else if (zone == TT_UNKNOWN &&
+                                   (rest[1] == 'e' || rest[1] == 'E') &&
+                                   (rest[2] == 't' || rest[2] == 'T'))
+                        zone = TT_MET;
+                  else if (dotw == TT_UNKNOWN &&
+                                   (rest[1] == 'o' || rest[1] == 'O') &&
+                                   (rest[2] == 'n' || rest[2] == 'N'))
+                        dotw = TT_MON;
+                  else if (zone == TT_UNKNOWN &&
+                                   (rest[1] == 's' || rest[1] == 'S') &&
+                                   (rest[2] == 't' || rest[2] == 'T'))
+                        zone = TT_MST;
+                  break;
+                case 'n': case 'N':
+                  if (month == TT_UNKNOWN &&
+                          (rest[1] == 'o' || rest[1] == 'O') &&
+                          (rest[2] == 'v' || rest[2] == 'V'))
+                        month = TT_NOV;
+                  else if (zone == TT_UNKNOWN &&
+                                   (rest[1] == 's' || rest[1] == 'S') &&
+                                   (rest[2] == 't' || rest[2] == 'T'))
+                        zone = TT_NST;
+                  break;
+                case 'o': case 'O':
+                  if (month == TT_UNKNOWN &&
+                          (rest[1] == 'c' || rest[1] == 'C') &&
+                          (rest[2] == 't' || rest[2] == 'T'))
+                        month = TT_OCT;
+                  break;
+                case 'p': case 'P':
+                  if (zone == TT_UNKNOWN &&
+                          (rest[1] == 'd' || rest[1] == 'D') &&
+                          (rest[2] == 't' || rest[2] == 'T'))
+                        zone = TT_PDT;
+                  else if (zone == TT_UNKNOWN &&
+                                   (rest[1] == 's' || rest[1] == 'S') &&
+                                   (rest[2] == 't' || rest[2] == 'T'))
+                        zone = TT_PST;
+                  break;
+                case 's': case 'S':
+                  if (dotw == TT_UNKNOWN &&
+                          (rest[1] == 'a' || rest[1] == 'A') &&
+                          (rest[2] == 't' || rest[2] == 'T'))
+                        dotw = TT_SAT;
+                  else if (month == TT_UNKNOWN &&
+                                   (rest[1] == 'e' || rest[1] == 'E') &&
+                                   (rest[2] == 'p' || rest[2] == 'P'))
+                        month = TT_SEP;
+                  else if (dotw == TT_UNKNOWN &&
+                                   (rest[1] == 'u' || rest[1] == 'U') &&
+                                   (rest[2] == 'n' || rest[2] == 'N'))
+                        dotw = TT_SUN;
+                  break;
+                case 't': case 'T':
+                  if (dotw == TT_UNKNOWN &&
+                          (rest[1] == 'h' || rest[1] == 'H') &&
+                          (rest[2] == 'u' || rest[2] == 'U'))
+                        dotw = TT_THU;
+                  else if (dotw == TT_UNKNOWN &&
+                                   (rest[1] == 'u' || rest[1] == 'U') &&
+                                   (rest[2] == 'e' || rest[2] == 'E'))
+                        dotw = TT_TUE;
+                  break;
+                case 'u': case 'U':
+                  if (zone == TT_UNKNOWN &&
+                          (rest[1] == 't' || rest[1] == 'T') &&
+                          !(rest[2] >= 'A' && rest[2] <= 'Z') &&
+                          !(rest[2] >= 'a' && rest[2] <= 'z'))
+                        /* UT is the same as GMT but UTx is not. */
+                        zone = TT_GMT;
+                  break;
+                case 'w': case 'W':
+                  if (dotw == TT_UNKNOWN &&
+                          (rest[1] == 'e' || rest[1] == 'E') &&
+                          (rest[2] == 'd' || rest[2] == 'D'))
+                        dotw = TT_WED;
+                  break;
+
+                case '+': case '-':
+                  {
+                        const char *end;
+                        int sign;
+                        if (zone_offset != -1)
+                          {
+                                /* already got one... */
+                                rest++;
+                                break;
+                          }
+                        if (zone != TT_UNKNOWN && zone != TT_GMT)
+                          {
+                                /* GMT+0300 is legal, but PST+0300 is not. */
+                                rest++;
+                                break;
+                          }
+
+                        sign = ((*rest == '+') ? 1 : -1);
+                        rest++; /* move over sign */
+                        end = rest;
+                        while (*end >= '0' && *end <= '9')
+                          end++;
+                        if (rest == end) /* no digits here */
+                          break;
+
+                        if ((end - rest) == 4)
+                          /* offset in HHMM */
+                          zone_offset = (((((rest[0]-'0')*10) + (rest[1]-'0')) * 60) +
+                                                         (((rest[2]-'0')*10) + (rest[3]-'0')));
+                        else if ((end - rest) == 2)
+                          /* offset in hours */
+                          zone_offset = (((rest[0]-'0')*10) + (rest[1]-'0')) * 60;
+                        else if ((end - rest) == 1)
+                          /* offset in hours */
+                          zone_offset = (rest[0]-'0') * 60;
+                        else
+                          /* 3 or >4 */
+                          break;
+
+                        zone_offset *= sign;
+                        zone = TT_GMT;
+                        break;
+                  }
+
+                case '0': case '1': case '2': case '3': case '4':
+                case '5': case '6': case '7': case '8': case '9':
+                  {
+                        int tmp_hour = -1;
+                        int tmp_min = -1;
+                        int tmp_sec = -1;
+                        int tmp_usec = -1;
+                        const char *end = rest + 1;
+                        while (*end >= '0' && *end <= '9')
+                          end++;
+
+                        /* end is now the first character after a range of digits. */
+
+                        if (*end == ':')
+                          {
+                                if (hour >= 0 && min >= 0) /* already got it */
+                                  break;
+
+                                /* We have seen "[0-9]+:", so this is probably HH:MM[:SS] */
+                                if ((end - rest) > 2)
+                                  /* it is [0-9][0-9][0-9]+: */
+                                  break;
+                                else if ((end - rest) == 2)
+                                  tmp_hour = ((rest[0]-'0')*10 +
+                                                          (rest[1]-'0'));
+                                else
+                                  tmp_hour = (rest[0]-'0');
+
+                                /* move over the colon, and parse minutes */
+
+                                rest = ++end;
+                                while (*end >= '0' && *end <= '9')
+                                  end++;
+
+                                if (end == rest)
+                                  /* no digits after first colon? */
+                                  break;
+                                else if ((end - rest) > 2)
+                                  /* it is [0-9][0-9][0-9]+: */
+                                  break;
+                                else if ((end - rest) == 2)
+                                  tmp_min = ((rest[0]-'0')*10 +
+                                                         (rest[1]-'0'));
+                                else
+                                  tmp_min = (rest[0]-'0');
+
+                                /* now go for seconds */
+                                rest = end;
+                                if (*rest == ':')
+                                  rest++;
+                                end = rest;
+                                while (*end >= '0' && *end <= '9')
+                                  end++;
+
+                                if (end == rest)
+                                  /* no digits after second colon - that's ok. */
+                                  ;
+                                else if ((end - rest) > 2)
+                                  /* it is [0-9][0-9][0-9]+: */
+                                  break;
+                                else if ((end - rest) == 2)
+                                  tmp_sec = ((rest[0]-'0')*10 +
+                                                         (rest[1]-'0'));
+                                else
+                                  tmp_sec = (rest[0]-'0');
+
+                                /* fractional second */
+                                rest = end;
+                                if (*rest == '.')
+                                  {
+                                    rest++;
+                                    end++;
+                                    tmp_usec = 0;
+                                    /* use up to 6 digits, skip over the rest */
+                                    while (*end >= '0' && *end <= '9')
+                                      {
+                                        if (end - rest < 6)
+                                          tmp_usec = tmp_usec * 10 + *end - '0';
+                                        end++;
+                                      }
+                                    int ndigits = end - rest;
+                                    while (ndigits++ < 6)
+                                      tmp_usec *= 10;
+                                    rest = end;
+                                  }
+
+                                if (*rest == 'Z')
+                                  {
+                                    zone = TT_GMT;
+                                    rest++;
+                                  }
+                                else if (tmp_hour <= 12)
+                                  {
+                                    /* If we made it here, we've parsed hour and min,
+                                       and possibly sec, so the current token is a time.
+                                       Now skip over whitespace and see if there's an AM
+                                       or PM directly following the time.
+                                    */
+                                        const char *s = end;
+                                        while (*s && (*s == ' ' || *s == '\t'))
+                                          s++;
+                                        if ((s[0] == 'p' || s[0] == 'P') &&
+                                                (s[1] == 'm' || s[1] == 'M'))
+                                          /* 10:05pm == 22:05, and 12:05pm == 12:05 */
+                                          tmp_hour = (tmp_hour == 12 ? 12 : tmp_hour + 12);
+                                        else if (tmp_hour == 12 &&
+                                                         (s[0] == 'a' || s[0] == 'A') &&
+                                                         (s[1] == 'm' || s[1] == 'M'))
+                                          /* 12:05am == 00:05 */
+                                          tmp_hour = 0;
+                                  }
+
+                                hour = tmp_hour;
+                                min = tmp_min;
+                                sec = tmp_sec;
+                                usec = tmp_usec;
+                                rest = end;
+                                break;
+                          }
+                        else if ((*end == '/' || *end == '-') &&
+                                         end[1] >= '0' && end[1] <= '9')
+                          {
+                                /* Perhaps this is 6/16/95, 16/6/95, 6-16-95, or 16-6-95
+                                   or even 95-06-05 or 1995-06-22.
+                                 */
+                                int n1, n2, n3;
+                                const char *s;
+
+                                if (month != TT_UNKNOWN)
+                                  /* if we saw a month name, this can't be. */
+                                  break;
+
+                                s = rest;
+
+                                n1 = (*s++ - '0');                                /* first 1, 2 or 4 digits */
+                                if (*s >= '0' && *s <= '9')
+                                  {
+                                    n1 = n1*10 + (*s++ - '0');
+
+                                    if (*s >= '0' && *s <= '9')            /* optional digits 3 and 4 */
+                                      {
+                                        n1 = n1*10 + (*s++ - '0');
+                                        if (*s < '0' || *s > '9')
+                                          break;
+                                        n1 = n1*10 + (*s++ - '0');
+                                      }
+                                  }
+
+                                if (*s != '/' && *s != '-')                /* slash */
+                                  break;
+                                s++;
+
+                                if (*s < '0' || *s > '9')                /* second 1 or 2 digits */
+                                  break;
+                                n2 = (*s++ - '0');
+                                if (*s >= '0' && *s <= '9')
+                                  n2 = n2*10 + (*s++ - '0');
+
+                                if (*s != '/' && *s != '-')                /* slash */
+                                  break;
+                                s++;
+
+                                if (*s < '0' || *s > '9')                /* third 1, 2, 4, or 5 digits */
+                                  break;
+                                n3 = (*s++ - '0');
+                                if (*s >= '0' && *s <= '9')
+                                  n3 = n3*10 + (*s++ - '0');
+
+                                if (*s >= '0' && *s <= '9')            /* optional digits 3, 4, and 5 */
+                                  {
+                                        n3 = n3*10 + (*s++ - '0');
+                                        if (*s < '0' || *s > '9')
+                                          break;
+                                        n3 = n3*10 + (*s++ - '0');
+                                        if (*s >= '0' && *s <= '9')
+                                          n3 = n3*10 + (*s++ - '0');
+                                  }
+
+                                if (*s == 'T' && s[1] >= '0' && s[1] <= '9')
+                                  /* followed by ISO 8601 T delimiter and number is ok */
+                                  ;
+                                else if ((*s >= '0' && *s <= '9') ||
+                                         (*s >= 'A' && *s <= 'Z') ||
+                                         (*s >= 'a' && *s <= 'z'))
+                                  /* but other alphanumerics are not ok */
+                                  break;
+
+                                /* Ok, we parsed three multi-digit numbers, with / or -
+                                   between them.  Now decide what the hell they are
+                                   (DD/MM/YY or MM/DD/YY or [YY]YY/MM/DD.)
+                                 */
+
+                                if (n1 > 31 || n1 == 0)  /* must be [YY]YY/MM/DD */
+                                  {
+                                        if (n2 > 12) break;
+                                        if (n3 > 31) break;
+                                        year = n1;
+                                        if (year < 70)
+                                            year += 2000;
+                                        else if (year < 100)
+                                            year += 1900;
+                                        month = (TIME_TOKEN)(n2 + ((int)TT_JAN) - 1);
+                                        date = n3;
+                                        rest = s;
+                                        break;
+                                  }
+
+                                if (n1 > 12 && n2 > 12)  /* illegal */
+                                  {
+                                        rest = s;
+                                        break;
+                                  }
+
+                                if (n3 < 70)
+                                    n3 += 2000;
+                                else if (n3 < 100)
+                                    n3 += 1900;
+
+                                if (n1 > 12)  /* must be DD/MM/YY */
+                                  {
+                                        date = n1;
+                                        month = (TIME_TOKEN)(n2 + ((int)TT_JAN) - 1);
+                                        year = n3;
+                                  }
+                                else                  /* assume MM/DD/YY */
+                                  {
+                                        /* #### In the ambiguous case, should we consult the
+                                           locale to find out the local default? */
+                                        month = (TIME_TOKEN)(n1 + ((int)TT_JAN) - 1);
+                                        date = n2;
+                                        year = n3;
+                                  }
+                                rest = s;
+                          }
+                        else if ((*end >= 'A' && *end <= 'Z') ||
+                                         (*end >= 'a' && *end <= 'z'))
+                          /* Digits followed by non-punctuation - what's that? */
+                          ;
+                        else if ((end - rest) == 5)                /* five digits is a year */
+                          year = (year < 0
+                                          ? ((rest[0]-'0')*10000L +
+                                                 (rest[1]-'0')*1000L +
+                                                 (rest[2]-'0')*100L +
+                                                 (rest[3]-'0')*10L +
+                                                 (rest[4]-'0'))
+                                          : year);
+                        else if ((end - rest) == 4)                /* four digits is a year */
+                          year = (year < 0
+                                          ? ((rest[0]-'0')*1000L +
+                                                 (rest[1]-'0')*100L +
+                                                 (rest[2]-'0')*10L +
+                                                 (rest[3]-'0'))
+                                          : year);
+                        else if ((end - rest) == 2)                /* two digits - date or year */
+                          {
+                                int n = ((rest[0]-'0')*10 +
+                                                 (rest[1]-'0'));
+                                /* If we don't have a date (day of the month) and we see a number
+                                     less than 32, then assume that is the date.
+
+                                         Otherwise, if we have a date and not a year, assume this is the
+                                         year.  If it is less than 70, then assume it refers to the 21st
+                                         century.  If it is two digits (>= 70), assume it refers to this
+                                         century.  Otherwise, assume it refers to an unambiguous year.
+
+                                         The world will surely end soon.
+                                   */
+                                if (date < 0 && n < 32)
+                                  date = n;
+                                else if (year < 0)
+                                  {
+                                        if (n < 70)
+                                          year = 2000 + n;
+                                        else if (n < 100)
+                                          year = 1900 + n;
+                                        else
+                                          year = n;
+                                  }
+                                /* else what the hell is this. */
+                          }
+                        else if ((end - rest) == 1)                /* one digit - date */
+                          date = (date < 0 ? (rest[0]-'0') : date);
+                        /* else, three or more than five digits - what's that? */
+
+                        break;
+                  }   /* case '0' .. '9' */
+                }   /* switch */
+
+          /* Skip to the end of this token, whether we parsed it or not.
+             Tokens are delimited by whitespace, or ,;-+/()[] but explicitly not .:
+             'T' is also treated as delimiter when followed by a digit (ISO 8601).
+           */
+          while (*rest &&
+                         *rest != ' ' && *rest != '\t' &&
+                         *rest != ',' && *rest != ';' &&
+                         *rest != '-' && *rest != '+' &&
+                         *rest != '/' &&
+                         *rest != '(' && *rest != ')' && *rest != '[' && *rest != ']' &&
+                         !(*rest == 'T' && rest[1] >= '0' && rest[1] <= '9')
+                )
+                rest++;
+          /* skip over uninteresting chars. */
+        SKIP_MORE:
+          while (*rest == ' ' || *rest == '\t' ||
+                 *rest == ',' || *rest == ';' || *rest == '/' ||
+                 *rest == '(' || *rest == ')' || *rest == '[' || *rest == ']')
+                rest++;
+
+          /* "-" is ignored at the beginning of a token if we have not yet
+                 parsed a year (e.g., the second "-" in "30-AUG-1966"), or if
+                 the character after the dash is not a digit. */         
+          if (*rest == '-' && ((rest > string &&
+              isalpha((unsigned char)rest[-1]) && year < 0) ||
+              rest[1] < '0' || rest[1] > '9'))
+                {
+                  rest++;
+                  goto SKIP_MORE;
+                }
+
+          /* Skip T that may precede ISO 8601 time. */
+          if (*rest == 'T' && rest[1] >= '0' && rest[1] <= '9')
+            rest++;
+        }   /* while */
+
+  if (zone != TT_UNKNOWN && zone_offset == -1)
+        {
+          switch (zone)
+                {
+                case TT_PST: zone_offset = -8 * 60; break;
+                case TT_PDT: zone_offset = -8 * 60; dst_offset = 1 * 60; break;
+                case TT_MST: zone_offset = -7 * 60; break;
+                case TT_MDT: zone_offset = -7 * 60; dst_offset = 1 * 60; break;
+                case TT_CST: zone_offset = -6 * 60; break;
+                case TT_CDT: zone_offset = -6 * 60; dst_offset = 1 * 60; break;
+                case TT_EST: zone_offset = -5 * 60; break;
+                case TT_EDT: zone_offset = -5 * 60; dst_offset = 1 * 60; break;
+                case TT_AST: zone_offset = -4 * 60; break;
+                case TT_NST: zone_offset = -3 * 60 - 30; break;
+                case TT_GMT: zone_offset =  0 * 60; break;
+                case TT_BST: zone_offset =  0 * 60; dst_offset = 1 * 60; break;
+                case TT_MET: zone_offset =  1 * 60; break;
+                case TT_EET: zone_offset =  2 * 60; break;
+                case TT_JST: zone_offset =  9 * 60; break;
+                default:
+                  PR_ASSERT (0);
+                  break;
+                }
+        }
+
+  /* If we didn't find a year, month, or day-of-the-month, we can't
+         possibly parse this, and in fact, mktime() will do something random
+         (I'm seeing it return "Tue Feb  5 06:28:16 2036", which is no doubt
+         a numerologically significant date... */
+  if (month == TT_UNKNOWN || date == -1 || year == -1 || year > PR_INT16_MAX)
+      return PR_FAILURE;
+
+  memset(result, 0, sizeof(*result));
+  if (usec != -1)
+        result->tm_usec = usec;
+  if (sec != -1)
+        result->tm_sec = sec;
+  if (min != -1)
+        result->tm_min = min;
+  if (hour != -1)
+        result->tm_hour = hour;
+  if (date != -1)
+        result->tm_mday = date;
+  if (month != TT_UNKNOWN)
+        result->tm_month = (((int)month) - ((int)TT_JAN));
+  if (year != -1)
+        result->tm_year = static_cast<PRInt16>(year);
+  if (dotw != TT_UNKNOWN)
+        result->tm_wday = static_cast<PRInt8>(((int)dotw) - ((int)TT_SUN));
+  /*
+   * Mainly to compute wday and yday, but normalized time is also required
+   * by the check below that works around a Visual C++ 2005 mktime problem.
+   */
+  PR_NormalizeTime(result, PR_GMTParameters);
+  /* The remaining work is to set the gmt and dst offsets in tm_params. */
+
+  if (zone == TT_UNKNOWN && default_to_gmt)
+        {
+          /* No zone was specified, so pretend the zone was GMT. */
+          zone = TT_GMT;
+          zone_offset = 0;
+        }
+
+  if (zone_offset == -1)
+         {
+           /* no zone was specified, and we're to assume that everything
+             is local. */
+          struct tm localTime;
+          time_t secs;
+
+          PR_ASSERT(result->tm_month > -1 &&
+                    result->tm_mday > 0 &&
+                    result->tm_hour > -1 &&
+                    result->tm_min > -1 &&
+                    result->tm_sec > -1);
+
+            /*
+             * To obtain time_t from a tm structure representing the local
+             * time, we call mktime().  However, we need to see if we are
+             * on 1-Jan-1970 or before.  If we are, we can't call mktime()
+             * because mktime() will crash on win16. In that case, we
+             * calculate zone_offset based on the zone offset at
+             * 00:00:00, 2 Jan 1970 GMT, and subtract zone_offset from the
+             * date we are parsing to transform the date to GMT.  We also
+             * do so if mktime() returns (time_t) -1 (time out of range).
+           */
+
+          /* month, day, hours, mins and secs are always non-negative
+             so we dont need to worry about them. */
+          if (result->tm_year >= 1970)
+                {
+                  localTime.tm_sec = result->tm_sec;
+                  localTime.tm_min = result->tm_min;
+                  localTime.tm_hour = result->tm_hour;
+                  localTime.tm_mday = result->tm_mday;
+                  localTime.tm_mon = result->tm_month;
+                  localTime.tm_year = result->tm_year - 1900;
+                  /* Set this to -1 to tell mktime "I don't care".  If you set
+                     it to 0 or 1, you are making assertions about whether the
+                     date you are handing it is in daylight savings mode or not;
+                     and if you're wrong, it will "fix" it for you. */
+                  localTime.tm_isdst = -1;
+
+#if _MSC_VER == 1400  /* 1400 = Visual C++ 2005 (8.0) */
+                  /*
+                   * mktime will return (time_t) -1 if the input is a date
+                   * after 23:59:59, December 31, 3000, US Pacific Time (not
+                   * UTC as documented): 
+                   * http://msdn.microsoft.com/en-us/library/d1y53h2a(VS.80).aspx
+                   * But if the year is 3001, mktime also invokes the invalid
+                   * parameter handler, causing the application to crash.  This
+                   * problem has been reported in
+                   * http://connect.microsoft.com/VisualStudio/feedback/ViewFeedback.aspx?FeedbackID=266036.
+                   * We avoid this crash by not calling mktime if the date is
+                   * out of range.  To use a simple test that works in any time
+                   * zone, we consider year 3000 out of range as well.  (See
+                   * bug 480740.)
+                   */
+                  if (result->tm_year >= 3000) {
+                      /* Emulate what mktime would have done. */
+                      errno = EINVAL;
+                      secs = (time_t) -1;
+                  } else {
+                      secs = mktime(&localTime);
+                  }
+#else
+                  secs = mktime(&localTime);
+#endif
+                  if (secs != (time_t) -1)
+                    {
+                      *result_imploded = (PRInt64)secs * PR_USEC_PER_SEC;
+                      *result_imploded += result->tm_usec;
+                      return PR_SUCCESS;
+                    }
+                }
+                
+                /* So mktime() can't handle this case.  We assume the
+                   zone_offset for the date we are parsing is the same as
+                   the zone offset on 00:00:00 2 Jan 1970 GMT. */
+                secs = 86400;
+                localtime_r(&secs, &localTime);
+                zone_offset = localTime.tm_min
+                              + 60 * localTime.tm_hour
+                              + 1440 * (localTime.tm_mday - 2);
+        }
+
+  result->tm_params.tp_gmt_offset = zone_offset * 60;
+  result->tm_params.tp_dst_offset = dst_offset * 60;
+
+  *result_imploded = PR_ImplodeTime(result);
+  return PR_SUCCESS;
+}
diff --git a/base/third_party/nspr/prtime.h b/base/third_party/nspr/prtime.h
new file mode 100644
index 0000000..20bae38
--- /dev/null
+++ b/base/third_party/nspr/prtime.h
@@ -0,0 +1,263 @@
+/* Portions are Copyright (C) 2011 Google Inc */
+/* ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is the Netscape Portable Runtime (NSPR).
+ *
+ * The Initial Developer of the Original Code is
+ * Netscape Communications Corporation.
+ * Portions created by the Initial Developer are Copyright (C) 1998-2000
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either the GNU General Public License Version 2 or later (the "GPL"), or
+ * the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+/*
+ *---------------------------------------------------------------------------
+ *
+ * prtime.h --
+ *
+ *     NSPR date and time functions
+ * CVS revision 3.10
+ * This file contains definitions of NSPR's basic types required by
+ * prtime.cc. These types have been copied over from the following NSPR
+ * files prtime.h, prtypes.h(CVS revision 3.35), prlong.h(CVS revision 3.13)
+ *
+ *---------------------------------------------------------------------------
+ */
+
+#ifndef BASE_PRTIME_H__
+#define BASE_PRTIME_H__
+
+#include <stdint.h>
+
+#include "base/base_export.h"
+
+typedef int8_t PRInt8;
+typedef int16_t PRInt16;
+typedef int32_t PRInt32;
+typedef int64_t PRInt64;
+typedef int PRIntn;
+
+typedef PRIntn PRBool;
+#define PR_TRUE 1
+#define PR_FALSE 0
+
+typedef enum { PR_FAILURE = -1, PR_SUCCESS = 0 } PRStatus;
+
+#define PR_ASSERT DCHECK
+#define PR_CALLBACK
+#define PR_INT16_MAX 32767
+#define NSPR_API(__type) extern __type
+
+/*
+ * Long-long (64-bit signed integer type) support macros used by
+ * PR_ImplodeTime().
+ * See http://lxr.mozilla.org/nspr/source/pr/include/prlong.h
+ */
+
+#define LL_I2L(l, i) ((l) = (PRInt64)(i))
+#define LL_MUL(r, a, b) ((r) = (a) * (b))
+#define LL_ADD(r, a, b) ((r) = (a) + (b))
+#define LL_SUB(r, a, b) ((r) = (a) - (b))
+
+/**********************************************************************/
+/************************* TYPES AND CONSTANTS ************************/
+/**********************************************************************/
+
+#define PR_MSEC_PER_SEC		1000UL
+#define PR_USEC_PER_SEC		1000000UL
+#define PR_NSEC_PER_SEC		1000000000UL
+#define PR_USEC_PER_MSEC	1000UL
+#define PR_NSEC_PER_MSEC	1000000UL
+
+/*
+ * PRTime --
+ *
+ *     NSPR represents basic time as 64-bit signed integers relative
+ *     to midnight (00:00:00), January 1, 1970 Greenwich Mean Time (GMT).
+ *     (GMT is also known as Coordinated Universal Time, UTC.)
+ *     The units of time are in microseconds. Negative times are allowed
+ *     to represent times prior to the January 1970 epoch. Such values are
+ *     intended to be exported to other systems or converted to human
+ *     readable form.
+ *
+ *     Notes on porting: PRTime corresponds to time_t in ANSI C.  NSPR 1.0
+ *     simply uses PRInt64.
+ */
+
+typedef PRInt64 PRTime;
+
+/*
+ * Time zone and daylight saving time corrections applied to GMT to
+ * obtain the local time of some geographic location
+ */
+
+typedef struct PRTimeParameters {
+    PRInt32 tp_gmt_offset;     /* the offset from GMT in seconds */
+    PRInt32 tp_dst_offset;     /* contribution of DST in seconds */
+} PRTimeParameters;
+
+/*
+ * PRExplodedTime --
+ *
+ *     Time broken down into human-readable components such as year, month,
+ *     day, hour, minute, second, and microsecond.  Time zone and daylight
+ *     saving time corrections may be applied.  If they are applied, the
+ *     offsets from the GMT must be saved in the 'tm_params' field so that
+ *     all the information is available to reconstruct GMT.
+ *
+ *     Notes on porting: PRExplodedTime corrresponds to struct tm in
+ *     ANSI C, with the following differences:
+ *       - an additional field tm_usec;
+ *       - replacing tm_isdst by tm_params;
+ *       - the month field is spelled tm_month, not tm_mon;
+ *       - we use absolute year, AD, not the year since 1900.
+ *     The corresponding type in NSPR 1.0 is called PRTime.  Below is
+ *     a table of date/time type correspondence in the three APIs:
+ *         API          time since epoch          time in components
+ *       ANSI C             time_t                  struct tm
+ *       NSPR 1.0           PRInt64                   PRTime
+ *       NSPR 2.0           PRTime                  PRExplodedTime
+ */
+
+typedef struct PRExplodedTime {
+    PRInt32 tm_usec;		    /* microseconds past tm_sec (0-99999)  */
+    PRInt32 tm_sec;             /* seconds past tm_min (0-61, accomodating
+                                   up to two leap seconds) */	
+    PRInt32 tm_min;             /* minutes past tm_hour (0-59) */
+    PRInt32 tm_hour;            /* hours past tm_day (0-23) */
+    PRInt32 tm_mday;            /* days past tm_mon (1-31, note that it
+				                starts from 1) */
+    PRInt32 tm_month;           /* months past tm_year (0-11, Jan = 0) */
+    PRInt16 tm_year;            /* absolute year, AD (note that we do not
+				                count from 1900) */
+
+    PRInt8 tm_wday;		        /* calculated day of the week
+				                (0-6, Sun = 0) */
+    PRInt16 tm_yday;            /* calculated day of the year
+				                (0-365, Jan 1 = 0) */
+
+    PRTimeParameters tm_params;  /* time parameters used by conversion */
+} PRExplodedTime;
+
+/*
+ * PRTimeParamFn --
+ *
+ *     A function of PRTimeParamFn type returns the time zone and
+ *     daylight saving time corrections for some geographic location,
+ *     given the current time in GMT.  The input argument gmt should
+ *     point to a PRExplodedTime that is in GMT, i.e., whose
+ *     tm_params contains all 0's.
+ *
+ *     For any time zone other than GMT, the computation is intended to
+ *     consist of two steps:
+ *       - Figure out the time zone correction, tp_gmt_offset.  This number
+ *         usually depends on the geographic location only.  But it may
+ *         also depend on the current time.  For example, all of China
+ *         is one time zone right now.  But this situation may change
+ *         in the future.
+ *       - Figure out the daylight saving time correction, tp_dst_offset.
+ *         This number depends on both the geographic location and the
+ *         current time.  Most of the DST rules are expressed in local
+ *         current time.  If so, one should apply the time zone correction
+ *         to GMT before applying the DST rules.
+ */
+
+typedef PRTimeParameters (PR_CALLBACK *PRTimeParamFn)(const PRExplodedTime *gmt);
+
+/**********************************************************************/
+/****************************** FUNCTIONS *****************************/
+/**********************************************************************/
+
+NSPR_API(PRTime)
+PR_ImplodeTime(const PRExplodedTime *exploded);
+
+/*
+ * Adjust exploded time to normalize field overflows after manipulation.
+ * Note that the following fields of PRExplodedTime should not be
+ * manipulated:
+ *   - tm_month and tm_year: because the number of days in a month and
+ *     number of days in a year are not constant, it is ambiguous to
+ *     manipulate the month and year fields, although one may be tempted
+ *     to.  For example, what does "a month from January 31st" mean?
+ *   - tm_wday and tm_yday: these fields are calculated by NSPR.  Users
+ *     should treat them as "read-only".
+ */
+
+NSPR_API(void) PR_NormalizeTime(
+    PRExplodedTime *exploded, PRTimeParamFn params);
+
+/**********************************************************************/
+/*********************** TIME PARAMETER FUNCTIONS *********************/
+/**********************************************************************/
+
+/* Time parameters that represent Greenwich Mean Time */
+NSPR_API(PRTimeParameters) PR_GMTParameters(const PRExplodedTime *gmt);
+
+/*
+ * This parses a time/date string into a PRTime
+ * (microseconds after "1-Jan-1970 00:00:00 GMT").
+ * It returns PR_SUCCESS on success, and PR_FAILURE
+ * if the time/date string can't be parsed.
+ *
+ * Many formats are handled, including:
+ *
+ *   14 Apr 89 03:20:12
+ *   14 Apr 89 03:20 GMT
+ *   Fri, 17 Mar 89 4:01:33
+ *   Fri, 17 Mar 89 4:01 GMT
+ *   Mon Jan 16 16:12 PDT 1989
+ *   Mon Jan 16 16:12 +0130 1989
+ *   6 May 1992 16:41-JST (Wednesday)
+ *   22-AUG-1993 10:59:12.82
+ *   22-AUG-1993 10:59pm
+ *   22-AUG-1993 12:59am
+ *   22-AUG-1993 12:59 PM
+ *   Friday, August 04, 1995 3:54 PM
+ *   06/21/95 04:24:34 PM
+ *   20/06/95 21:07
+ *   95-06-08 19:32:48 EDT
+ *   1995-06-17T23:11:25.342156Z
+ *
+ * If the input string doesn't contain a description of the timezone,
+ * we consult the `default_to_gmt' to decide whether the string should
+ * be interpreted relative to the local time zone (PR_FALSE) or GMT (PR_TRUE).
+ * The correct value for this argument depends on what standard specified
+ * the time string which you are parsing.
+ */
+
+/*
+ * This is the only funtion that should be called from outside base, and only
+ * from the unit test.
+ */
+
+BASE_EXPORT PRStatus PR_ParseTimeString (
+	const char *string,
+	PRBool default_to_gmt,
+	PRTime *result);
+
+#endif  // BASE_PRTIME_H__
diff --git a/base/third_party/superfasthash/LICENSE b/base/third_party/superfasthash/LICENSE
new file mode 100644
index 0000000..3c40a3e
--- /dev/null
+++ b/base/third_party/superfasthash/LICENSE
@@ -0,0 +1,27 @@
+Paul Hsieh OLD BSD license
+
+Copyright (c) 2010, Paul Hsieh
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+* Redistributions of source code must retain the above copyright notice, this
+  list of conditions and the following disclaimer.
+* Redistributions in binary form must reproduce the above copyright notice, this
+  list of conditions and the following disclaimer in the documentation and/or
+  other materials provided with the distribution.
+* Neither my name, Paul Hsieh, nor the names of any other contributors to the
+  code use may not be used to endorse or promote products derived from this
+  software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/base/third_party/superfasthash/OWNERS b/base/third_party/superfasthash/OWNERS
new file mode 100644
index 0000000..633cc35
--- /dev/null
+++ b/base/third_party/superfasthash/OWNERS
@@ -0,0 +1 @@
+mgiuca@chromium.org
diff --git a/base/third_party/superfasthash/README.chromium b/base/third_party/superfasthash/README.chromium
new file mode 100644
index 0000000..d41ed77
--- /dev/null
+++ b/base/third_party/superfasthash/README.chromium
@@ -0,0 +1,29 @@
+Name: Paul Hsieh's SuperFastHash
+Short Name: SuperFastHash
+URL: http://www.azillionmonkeys.com/qed/hash.html
+Version: 0
+Date: 2012-02-21
+License: BSD
+License File: LICENSE
+Security Critical: yes
+
+Description:
+A fast string hashing algorithm.
+
+Local Modifications:
+- Added LICENSE.
+- Added license text as a comment to the top of superfasthash.c.
+- #include <stdint.h> instead of "pstdint.h".
+- #include <stdlib.h>.
+
+The license is a standard 3-clause BSD license with the following minor changes:
+
+"nor the names of its contributors may be used"
+is replaced with:
+"nor the names of any other contributors to the code use may not be used"
+
+and
+
+"IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE"
+is replaced with:
+"IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE"
diff --git a/base/third_party/superfasthash/superfasthash.c b/base/third_party/superfasthash/superfasthash.c
new file mode 100644
index 0000000..6e7687e
--- /dev/null
+++ b/base/third_party/superfasthash/superfasthash.c
@@ -0,0 +1,84 @@
+// Copyright (c) 2010, Paul Hsieh
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice, this
+//   list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither my name, Paul Hsieh, nor the names of any other contributors to the
+//   code use may not be used to endorse or promote products derived from this
+//   software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+
+#include <stdint.h>
+#include <stdlib.h>
+#undef get16bits
+#if (defined(__GNUC__) && defined(__i386__)) || defined(__WATCOMC__) \
+  || defined(_MSC_VER) || defined (__BORLANDC__) || defined (__TURBOC__)
+#define get16bits(d) (*((const uint16_t *) (d)))
+#endif
+
+#if !defined (get16bits)
+#define get16bits(d) ((((uint32_t)(((const uint8_t *)(d))[1])) << 8)\
+                       +(uint32_t)(((const uint8_t *)(d))[0]) )
+#endif
+
+uint32_t SuperFastHash (const char * data, int len) {
+uint32_t hash = len, tmp;
+int rem;
+
+    if (len <= 0 || data == NULL) return 0;
+
+    rem = len & 3;
+    len >>= 2;
+
+    /* Main loop */
+    for (;len > 0; len--) {
+        hash  += get16bits (data);
+        tmp    = (get16bits (data+2) << 11) ^ hash;
+        hash   = (hash << 16) ^ tmp;
+        data  += 2*sizeof (uint16_t);
+        hash  += hash >> 11;
+    }
+
+    /* Handle end cases */
+    switch (rem) {
+        case 3: hash += get16bits (data);
+                hash ^= hash << 16;
+                hash ^= ((signed char)data[sizeof (uint16_t)]) << 18;
+                hash += hash >> 11;
+                break;
+        case 2: hash += get16bits (data);
+                hash ^= hash << 11;
+                hash += hash >> 17;
+                break;
+        case 1: hash += (signed char)*data;
+                hash ^= hash << 10;
+                hash += hash >> 1;
+    }
+
+    /* Force "avalanching" of final 127 bits */
+    hash ^= hash << 3;
+    hash += hash >> 5;
+    hash ^= hash << 4;
+    hash += hash >> 17;
+    hash ^= hash << 25;
+    hash += hash >> 6;
+
+    return hash;
+}
diff --git a/base/third_party/symbolize/BUILD.gn b/base/third_party/symbolize/BUILD.gn
new file mode 100644
index 0000000..0dc7c2f
--- /dev/null
+++ b/base/third_party/symbolize/BUILD.gn
@@ -0,0 +1,36 @@
+# Copyright (c) 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import("//build/config/compiler/compiler.gni")
+
+declare_args() {
+  # Stack traces will not include function names. Instead they will contain
+  # file and offset information that can be used with
+  # tools/valgrind/asan/asan_symbolize.py. By piping stderr through this script,
+  # and also enabling symbol_level = 2, you can get much more detailed stack
+  # traces with file names and line numbers, even in non-ASAN builds.
+  print_unsymbolized_stack_traces = is_asan || is_lsan || is_msan || is_tsan
+}
+
+static_library("symbolize") {
+  visibility = [ "//base/*" ]
+  sources = [
+    "config.h",
+    "demangle.cc",
+    "demangle.h",
+    "glog/logging.h",
+    "glog/raw_logging.h",
+    "symbolize.cc",
+    "symbolize.h",
+    "utilities.h",
+  ]
+
+  defines = []
+  if (print_unsymbolized_stack_traces) {
+    defines += [ "PRINT_UNSYMBOLIZED_STACK_TRACES" ]
+  }
+
+  configs -= [ "//build/config/compiler:chromium_code" ]
+  configs += [ "//build/config/compiler:no_chromium_code" ]
+}
diff --git a/base/third_party/symbolize/DEPS b/base/third_party/symbolize/DEPS
new file mode 100644
index 0000000..73eab50
--- /dev/null
+++ b/base/third_party/symbolize/DEPS
@@ -0,0 +1,3 @@
+include_rules = [
+  "+glog",
+]
diff --git a/base/third_party/symbolize/LICENSE b/base/third_party/symbolize/LICENSE
new file mode 100644
index 0000000..433a3d1
--- /dev/null
+++ b/base/third_party/symbolize/LICENSE
@@ -0,0 +1,28 @@
+// Copyright (c) 2006, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/base/third_party/symbolize/README.chromium b/base/third_party/symbolize/README.chromium
new file mode 100644
index 0000000..ff78e0e
--- /dev/null
+++ b/base/third_party/symbolize/README.chromium
@@ -0,0 +1,24 @@
+Name: google-glog's symbolization library
+URL: https://github.com/google/glog
+License: BSD
+
+The following files are copied AS-IS from:
+http://code.google.com/p/google-glog/source/browse/#svn/trunk/src (r141)
+https://github.com/google/glog/tree/a5ffa884137f7687d0393ccba22557d583654a25
+
+- demangle.cc
+- demangle.h
+- symbolize.cc
+- symbolize.h
+
+Cherry picked upstream changes:
+https://github.com/google/glog/pull/115
+https://github.com/google/glog/pull/261
+to fix symbolization issues when using lld.
+
+The following files are minimal stubs created for use in Chromium:
+
+- config.h
+- glog/logging.h
+- glog/raw_logging.h
+- utilities.h
diff --git a/base/third_party/symbolize/config.h b/base/third_party/symbolize/config.h
new file mode 100644
index 0000000..945f5a6
--- /dev/null
+++ b/base/third_party/symbolize/config.h
@@ -0,0 +1,7 @@
+// Copyright (c) 2010 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#define GOOGLE_NAMESPACE google
+#define _END_GOOGLE_NAMESPACE_ }
+#define _START_GOOGLE_NAMESPACE_ namespace google {
diff --git a/base/third_party/symbolize/demangle.cc b/base/third_party/symbolize/demangle.cc
new file mode 100644
index 0000000..e858181
--- /dev/null
+++ b/base/third_party/symbolize/demangle.cc
@@ -0,0 +1,1304 @@
+// Copyright (c) 2006, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: Satoru Takabayashi
+//
+// For reference check out:
+// http://www.codesourcery.com/public/cxx-abi/abi.html#mangling
+//
+// Note that we only have partial C++0x support yet.
+
+#include <stdio.h>  // for NULL
+#include "demangle.h"
+
+_START_GOOGLE_NAMESPACE_
+
+typedef struct {
+  const char *abbrev;
+  const char *real_name;
+} AbbrevPair;
+
+// List of operators from Itanium C++ ABI.
+static const AbbrevPair kOperatorList[] = {
+  { "nw", "new" },
+  { "na", "new[]" },
+  { "dl", "delete" },
+  { "da", "delete[]" },
+  { "ps", "+" },
+  { "ng", "-" },
+  { "ad", "&" },
+  { "de", "*" },
+  { "co", "~" },
+  { "pl", "+" },
+  { "mi", "-" },
+  { "ml", "*" },
+  { "dv", "/" },
+  { "rm", "%" },
+  { "an", "&" },
+  { "or", "|" },
+  { "eo", "^" },
+  { "aS", "=" },
+  { "pL", "+=" },
+  { "mI", "-=" },
+  { "mL", "*=" },
+  { "dV", "/=" },
+  { "rM", "%=" },
+  { "aN", "&=" },
+  { "oR", "|=" },
+  { "eO", "^=" },
+  { "ls", "<<" },
+  { "rs", ">>" },
+  { "lS", "<<=" },
+  { "rS", ">>=" },
+  { "eq", "==" },
+  { "ne", "!=" },
+  { "lt", "<" },
+  { "gt", ">" },
+  { "le", "<=" },
+  { "ge", ">=" },
+  { "nt", "!" },
+  { "aa", "&&" },
+  { "oo", "||" },
+  { "pp", "++" },
+  { "mm", "--" },
+  { "cm", "," },
+  { "pm", "->*" },
+  { "pt", "->" },
+  { "cl", "()" },
+  { "ix", "[]" },
+  { "qu", "?" },
+  { "st", "sizeof" },
+  { "sz", "sizeof" },
+  { NULL, NULL },
+};
+
+// List of builtin types from Itanium C++ ABI.
+static const AbbrevPair kBuiltinTypeList[] = {
+  { "v", "void" },
+  { "w", "wchar_t" },
+  { "b", "bool" },
+  { "c", "char" },
+  { "a", "signed char" },
+  { "h", "unsigned char" },
+  { "s", "short" },
+  { "t", "unsigned short" },
+  { "i", "int" },
+  { "j", "unsigned int" },
+  { "l", "long" },
+  { "m", "unsigned long" },
+  { "x", "long long" },
+  { "y", "unsigned long long" },
+  { "n", "__int128" },
+  { "o", "unsigned __int128" },
+  { "f", "float" },
+  { "d", "double" },
+  { "e", "long double" },
+  { "g", "__float128" },
+  { "z", "ellipsis" },
+  { NULL, NULL }
+};
+
+// List of substitutions Itanium C++ ABI.
+static const AbbrevPair kSubstitutionList[] = {
+  { "St", "" },
+  { "Sa", "allocator" },
+  { "Sb", "basic_string" },
+  // std::basic_string<char, std::char_traits<char>,std::allocator<char> >
+  { "Ss", "string"},
+  // std::basic_istream<char, std::char_traits<char> >
+  { "Si", "istream" },
+  // std::basic_ostream<char, std::char_traits<char> >
+  { "So", "ostream" },
+  // std::basic_iostream<char, std::char_traits<char> >
+  { "Sd", "iostream" },
+  { NULL, NULL }
+};
+
+// State needed for demangling.
+typedef struct {
+  const char *mangled_cur;  // Cursor of mangled name.
+  char *out_cur;            // Cursor of output string.
+  const char *out_begin;    // Beginning of output string.
+  const char *out_end;      // End of output string.
+  const char *prev_name;    // For constructors/destructors.
+  int prev_name_length;     // For constructors/destructors.
+  short nest_level;         // For nested names.
+  bool append;              // Append flag.
+  bool overflowed;          // True if output gets overflowed.
+} State;
+
+// We don't use strlen() in libc since it's not guaranteed to be async
+// signal safe.
+static size_t StrLen(const char *str) {
+  size_t len = 0;
+  while (*str != '\0') {
+    ++str;
+    ++len;
+  }
+  return len;
+}
+
+// Returns true if "str" has at least "n" characters remaining.
+static bool AtLeastNumCharsRemaining(const char *str, int n) {
+  for (int i = 0; i < n; ++i) {
+    if (str[i] == '\0') {
+      return false;
+    }
+  }
+  return true;
+}
+
+// Returns true if "str" has "prefix" as a prefix.
+static bool StrPrefix(const char *str, const char *prefix) {
+  size_t i = 0;
+  while (str[i] != '\0' && prefix[i] != '\0' &&
+         str[i] == prefix[i]) {
+    ++i;
+  }
+  return prefix[i] == '\0';  // Consumed everything in "prefix".
+}
+
+static void InitState(State *state, const char *mangled,
+                      char *out, int out_size) {
+  state->mangled_cur = mangled;
+  state->out_cur = out;
+  state->out_begin = out;
+  state->out_end = out + out_size;
+  state->prev_name  = NULL;
+  state->prev_name_length = -1;
+  state->nest_level = -1;
+  state->append = true;
+  state->overflowed = false;
+}
+
+// Returns true and advances "mangled_cur" if we find "one_char_token"
+// at "mangled_cur" position.  It is assumed that "one_char_token" does
+// not contain '\0'.
+static bool ParseOneCharToken(State *state, const char one_char_token) {
+  if (state->mangled_cur[0] == one_char_token) {
+    ++state->mangled_cur;
+    return true;
+  }
+  return false;
+}
+
+// Returns true and advances "mangled_cur" if we find "two_char_token"
+// at "mangled_cur" position.  It is assumed that "two_char_token" does
+// not contain '\0'.
+static bool ParseTwoCharToken(State *state, const char *two_char_token) {
+  if (state->mangled_cur[0] == two_char_token[0] &&
+      state->mangled_cur[1] == two_char_token[1]) {
+    state->mangled_cur += 2;
+    return true;
+  }
+  return false;
+}
+
+// Returns true and advances "mangled_cur" if we find any character in
+// "char_class" at "mangled_cur" position.
+static bool ParseCharClass(State *state, const char *char_class) {
+  const char *p = char_class;
+  for (; *p != '\0'; ++p) {
+    if (state->mangled_cur[0] == *p) {
+      ++state->mangled_cur;
+      return true;
+    }
+  }
+  return false;
+}
+
+// This function is used for handling an optional non-terminal.
+static bool Optional(bool) {
+  return true;
+}
+
+// This function is used for handling <non-terminal>+ syntax.
+typedef bool (*ParseFunc)(State *);
+static bool OneOrMore(ParseFunc parse_func, State *state) {
+  if (parse_func(state)) {
+    while (parse_func(state)) {
+    }
+    return true;
+  }
+  return false;
+}
+
+// This function is used for handling <non-terminal>* syntax. The function
+// always returns true and must be followed by a termination token or a
+// terminating sequence not handled by parse_func (e.g.
+// ParseOneCharToken(state, 'E')).
+static bool ZeroOrMore(ParseFunc parse_func, State *state) {
+  while (parse_func(state)) {
+  }
+  return true;
+}
+
+// Append "str" at "out_cur".  If there is an overflow, "overflowed"
+// is set to true for later use.  The output string is ensured to
+// always terminate with '\0' as long as there is no overflow.
+static void Append(State *state, const char * const str, const int length) {
+  int i;
+  for (i = 0; i < length; ++i) {
+    if (state->out_cur + 1 < state->out_end) {  // +1 for '\0'
+      *state->out_cur = str[i];
+      ++state->out_cur;
+    } else {
+      state->overflowed = true;
+      break;
+    }
+  }
+  if (!state->overflowed) {
+    *state->out_cur = '\0';  // Terminate it with '\0'
+  }
+}
+
+// We don't use equivalents in libc to avoid locale issues.
+static bool IsLower(char c) {
+  return c >= 'a' && c <= 'z';
+}
+
+static bool IsAlpha(char c) {
+  return (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z');
+}
+
+static bool IsDigit(char c) {
+  return c >= '0' && c <= '9';
+}
+
+// Returns true if "str" is a function clone suffix.  These suffixes are used
+// by GCC 4.5.x and later versions to indicate functions which have been
+// cloned during optimization.  We treat any sequence (.<alpha>+.<digit>+)+ as
+// a function clone suffix.
+static bool IsFunctionCloneSuffix(const char *str) {
+  size_t i = 0;
+  while (str[i] != '\0') {
+    // Consume a single .<alpha>+.<digit>+ sequence.
+    if (str[i] != '.' || !IsAlpha(str[i + 1])) {
+      return false;
+    }
+    i += 2;
+    while (IsAlpha(str[i])) {
+      ++i;
+    }
+    if (str[i] != '.' || !IsDigit(str[i + 1])) {
+      return false;
+    }
+    i += 2;
+    while (IsDigit(str[i])) {
+      ++i;
+    }
+  }
+  return true;  // Consumed everything in "str".
+}
+
+// Append "str" with some tweaks, iff "append" state is true.
+// Returns true so that it can be placed in "if" conditions.
+static void MaybeAppendWithLength(State *state, const char * const str,
+                                  const int length) {
+  if (state->append && length > 0) {
+    // Append a space if the output buffer ends with '<' and "str"
+    // starts with '<' to avoid <<<.
+    if (str[0] == '<' && state->out_begin < state->out_cur  &&
+        state->out_cur[-1] == '<') {
+      Append(state, " ", 1);
+    }
+    // Remember the last identifier name for ctors/dtors.
+    if (IsAlpha(str[0]) || str[0] == '_') {
+      state->prev_name = state->out_cur;
+      state->prev_name_length = length;
+    }
+    Append(state, str, length);
+  }
+}
+
+// A convenient wrapper arount MaybeAppendWithLength().
+static bool MaybeAppend(State *state, const char * const str) {
+  if (state->append) {
+    int length = StrLen(str);
+    MaybeAppendWithLength(state, str, length);
+  }
+  return true;
+}
+
+// This function is used for handling nested names.
+static bool EnterNestedName(State *state) {
+  state->nest_level = 0;
+  return true;
+}
+
+// This function is used for handling nested names.
+static bool LeaveNestedName(State *state, short prev_value) {
+  state->nest_level = prev_value;
+  return true;
+}
+
+// Disable the append mode not to print function parameters, etc.
+static bool DisableAppend(State *state) {
+  state->append = false;
+  return true;
+}
+
+// Restore the append mode to the previous state.
+static bool RestoreAppend(State *state, bool prev_value) {
+  state->append = prev_value;
+  return true;
+}
+
+// Increase the nest level for nested names.
+static void MaybeIncreaseNestLevel(State *state) {
+  if (state->nest_level > -1) {
+    ++state->nest_level;
+  }
+}
+
+// Appends :: for nested names if necessary.
+static void MaybeAppendSeparator(State *state) {
+  if (state->nest_level >= 1) {
+    MaybeAppend(state, "::");
+  }
+}
+
+// Cancel the last separator if necessary.
+static void MaybeCancelLastSeparator(State *state) {
+  if (state->nest_level >= 1 && state->append &&
+      state->out_begin <= state->out_cur - 2) {
+    state->out_cur -= 2;
+    *state->out_cur = '\0';
+  }
+}
+
+// Returns true if the identifier of the given length pointed to by
+// "mangled_cur" is anonymous namespace.
+static bool IdentifierIsAnonymousNamespace(State *state, int length) {
+  static const char anon_prefix[] = "_GLOBAL__N_";
+  return (length > (int)sizeof(anon_prefix) - 1 &&  // Should be longer.
+          StrPrefix(state->mangled_cur, anon_prefix));
+}
+
+// Forward declarations of our parsing functions.
+static bool ParseMangledName(State *state);
+static bool ParseEncoding(State *state);
+static bool ParseName(State *state);
+static bool ParseUnscopedName(State *state);
+static bool ParseUnscopedTemplateName(State *state);
+static bool ParseNestedName(State *state);
+static bool ParsePrefix(State *state);
+static bool ParseUnqualifiedName(State *state);
+static bool ParseSourceName(State *state);
+static bool ParseLocalSourceName(State *state);
+static bool ParseNumber(State *state, int *number_out);
+static bool ParseFloatNumber(State *state);
+static bool ParseSeqId(State *state);
+static bool ParseIdentifier(State *state, int length);
+static bool ParseOperatorName(State *state);
+static bool ParseSpecialName(State *state);
+static bool ParseCallOffset(State *state);
+static bool ParseNVOffset(State *state);
+static bool ParseVOffset(State *state);
+static bool ParseCtorDtorName(State *state);
+static bool ParseType(State *state);
+static bool ParseCVQualifiers(State *state);
+static bool ParseBuiltinType(State *state);
+static bool ParseFunctionType(State *state);
+static bool ParseBareFunctionType(State *state);
+static bool ParseClassEnumType(State *state);
+static bool ParseArrayType(State *state);
+static bool ParsePointerToMemberType(State *state);
+static bool ParseTemplateParam(State *state);
+static bool ParseTemplateTemplateParam(State *state);
+static bool ParseTemplateArgs(State *state);
+static bool ParseTemplateArg(State *state);
+static bool ParseExpression(State *state);
+static bool ParseExprPrimary(State *state);
+static bool ParseLocalName(State *state);
+static bool ParseDiscriminator(State *state);
+static bool ParseSubstitution(State *state);
+
+// Implementation note: the following code is a straightforward
+// translation of the Itanium C++ ABI defined in BNF with a couple of
+// exceptions.
+//
+// - Support GNU extensions not defined in the Itanium C++ ABI
+// - <prefix> and <template-prefix> are combined to avoid infinite loop
+// - Reorder patterns to shorten the code
+// - Reorder patterns to give greedier functions precedence
+//   We'll mark "Less greedy than" for these cases in the code
+//
+// Each parsing function changes the state and returns true on
+// success.  Otherwise, don't change the state and returns false.  To
+// ensure that the state isn't changed in the latter case, we save the
+// original state before we call more than one parsing functions
+// consecutively with &&, and restore the state if unsuccessful.  See
+// ParseEncoding() as an example of this convention.  We follow the
+// convention throughout the code.
+//
+// Originally we tried to do demangling without following the full ABI
+// syntax but it turned out we needed to follow the full syntax to
+// parse complicated cases like nested template arguments.  Note that
+// implementing a full-fledged demangler isn't trivial (libiberty's
+// cp-demangle.c has +4300 lines).
+//
+// Note that (foo) in <(foo) ...> is a modifier to be ignored.
+//
+// Reference:
+// - Itanium C++ ABI
+//   <http://www.codesourcery.com/cxx-abi/abi.html#mangling>
+
+// <mangled-name> ::= _Z <encoding>
+static bool ParseMangledName(State *state) {
+  return ParseTwoCharToken(state, "_Z") && ParseEncoding(state);
+}
+
+// <encoding> ::= <(function) name> <bare-function-type>
+//            ::= <(data) name>
+//            ::= <special-name>
+static bool ParseEncoding(State *state) {
+  State copy = *state;
+  if (ParseName(state) && ParseBareFunctionType(state)) {
+    return true;
+  }
+  *state = copy;
+
+  if (ParseName(state) || ParseSpecialName(state)) {
+    return true;
+  }
+  return false;
+}
+
+// <name> ::= <nested-name>
+//        ::= <unscoped-template-name> <template-args>
+//        ::= <unscoped-name>
+//        ::= <local-name>
+static bool ParseName(State *state) {
+  if (ParseNestedName(state) || ParseLocalName(state)) {
+    return true;
+  }
+
+  State copy = *state;
+  if (ParseUnscopedTemplateName(state) &&
+      ParseTemplateArgs(state)) {
+    return true;
+  }
+  *state = copy;
+
+  // Less greedy than <unscoped-template-name> <template-args>.
+  if (ParseUnscopedName(state)) {
+    return true;
+  }
+  return false;
+}
+
+// <unscoped-name> ::= <unqualified-name>
+//                 ::= St <unqualified-name>
+static bool ParseUnscopedName(State *state) {
+  if (ParseUnqualifiedName(state)) {
+    return true;
+  }
+
+  State copy = *state;
+  if (ParseTwoCharToken(state, "St") &&
+      MaybeAppend(state, "std::") &&
+      ParseUnqualifiedName(state)) {
+    return true;
+  }
+  *state = copy;
+  return false;
+}
+
+// <unscoped-template-name> ::= <unscoped-name>
+//                          ::= <substitution>
+static bool ParseUnscopedTemplateName(State *state) {
+  return ParseUnscopedName(state) || ParseSubstitution(state);
+}
+
+// <nested-name> ::= N [<CV-qualifiers>] <prefix> <unqualified-name> E
+//               ::= N [<CV-qualifiers>] <template-prefix> <template-args> E
+static bool ParseNestedName(State *state) {
+  State copy = *state;
+  if (ParseOneCharToken(state, 'N') &&
+      EnterNestedName(state) &&
+      Optional(ParseCVQualifiers(state)) &&
+      ParsePrefix(state) &&
+      LeaveNestedName(state, copy.nest_level) &&
+      ParseOneCharToken(state, 'E')) {
+    return true;
+  }
+  *state = copy;
+  return false;
+}
+
+// This part is tricky.  If we literally translate them to code, we'll
+// end up infinite loop.  Hence we merge them to avoid the case.
+//
+// <prefix> ::= <prefix> <unqualified-name>
+//          ::= <template-prefix> <template-args>
+//          ::= <template-param>
+//          ::= <substitution>
+//          ::= # empty
+// <template-prefix> ::= <prefix> <(template) unqualified-name>
+//                   ::= <template-param>
+//                   ::= <substitution>
+static bool ParsePrefix(State *state) {
+  bool has_something = false;
+  while (true) {
+    MaybeAppendSeparator(state);
+    if (ParseTemplateParam(state) ||
+        ParseSubstitution(state) ||
+        ParseUnscopedName(state)) {
+      has_something = true;
+      MaybeIncreaseNestLevel(state);
+      continue;
+    }
+    MaybeCancelLastSeparator(state);
+    if (has_something && ParseTemplateArgs(state)) {
+      return ParsePrefix(state);
+    } else {
+      break;
+    }
+  }
+  return true;
+}
+
+// <unqualified-name> ::= <operator-name>
+//                    ::= <ctor-dtor-name>
+//                    ::= <source-name>
+//                    ::= <local-source-name>
+static bool ParseUnqualifiedName(State *state) {
+  return (ParseOperatorName(state) ||
+          ParseCtorDtorName(state) ||
+          ParseSourceName(state) ||
+          ParseLocalSourceName(state));
+}
+
+// <source-name> ::= <positive length number> <identifier>
+static bool ParseSourceName(State *state) {
+  State copy = *state;
+  int length = -1;
+  if (ParseNumber(state, &length) && ParseIdentifier(state, length)) {
+    return true;
+  }
+  *state = copy;
+  return false;
+}
+
+// <local-source-name> ::= L <source-name> [<discriminator>]
+//
+// References:
+//   http://gcc.gnu.org/bugzilla/show_bug.cgi?id=31775
+//   http://gcc.gnu.org/viewcvs?view=rev&revision=124467
+static bool ParseLocalSourceName(State *state) {
+  State copy = *state;
+  if (ParseOneCharToken(state, 'L') && ParseSourceName(state) &&
+      Optional(ParseDiscriminator(state))) {
+    return true;
+  }
+  *state = copy;
+  return false;
+}
+
+// <number> ::= [n] <non-negative decimal integer>
+// If "number_out" is non-null, then *number_out is set to the value of the
+// parsed number on success.
+static bool ParseNumber(State *state, int *number_out) {
+  int sign = 1;
+  if (ParseOneCharToken(state, 'n')) {
+    sign = -1;
+  }
+  const char *p = state->mangled_cur;
+  int number = 0;
+  for (;*p != '\0'; ++p) {
+    if (IsDigit(*p)) {
+      number = number * 10 + (*p - '0');
+    } else {
+      break;
+    }
+  }
+  if (p != state->mangled_cur) {  // Conversion succeeded.
+    state->mangled_cur = p;
+    if (number_out != NULL) {
+      *number_out = number * sign;
+    }
+    return true;
+  }
+  return false;
+}
+
+// Floating-point literals are encoded using a fixed-length lowercase
+// hexadecimal string.
+static bool ParseFloatNumber(State *state) {
+  const char *p = state->mangled_cur;
+  for (;*p != '\0'; ++p) {
+    if (!IsDigit(*p) && !(*p >= 'a' && *p <= 'f')) {
+      break;
+    }
+  }
+  if (p != state->mangled_cur) {  // Conversion succeeded.
+    state->mangled_cur = p;
+    return true;
+  }
+  return false;
+}
+
+// The <seq-id> is a sequence number in base 36,
+// using digits and upper case letters
+static bool ParseSeqId(State *state) {
+  const char *p = state->mangled_cur;
+  for (;*p != '\0'; ++p) {
+    if (!IsDigit(*p) && !(*p >= 'A' && *p <= 'Z')) {
+      break;
+    }
+  }
+  if (p != state->mangled_cur) {  // Conversion succeeded.
+    state->mangled_cur = p;
+    return true;
+  }
+  return false;
+}
+
+// <identifier> ::= <unqualified source code identifier> (of given length)
+static bool ParseIdentifier(State *state, int length) {
+  if (length == -1 ||
+      !AtLeastNumCharsRemaining(state->mangled_cur, length)) {
+    return false;
+  }
+  if (IdentifierIsAnonymousNamespace(state, length)) {
+    MaybeAppend(state, "(anonymous namespace)");
+  } else {
+    MaybeAppendWithLength(state, state->mangled_cur, length);
+  }
+  state->mangled_cur += length;
+  return true;
+}
+
+// <operator-name> ::= nw, and other two letters cases
+//                 ::= cv <type>  # (cast)
+//                 ::= v  <digit> <source-name> # vendor extended operator
+static bool ParseOperatorName(State *state) {
+  if (!AtLeastNumCharsRemaining(state->mangled_cur, 2)) {
+    return false;
+  }
+  // First check with "cv" (cast) case.
+  State copy = *state;
+  if (ParseTwoCharToken(state, "cv") &&
+      MaybeAppend(state, "operator ") &&
+      EnterNestedName(state) &&
+      ParseType(state) &&
+      LeaveNestedName(state, copy.nest_level)) {
+    return true;
+  }
+  *state = copy;
+
+  // Then vendor extended operators.
+  if (ParseOneCharToken(state, 'v') && ParseCharClass(state, "0123456789") &&
+      ParseSourceName(state)) {
+    return true;
+  }
+  *state = copy;
+
+  // Other operator names should start with a lower alphabet followed
+  // by a lower/upper alphabet.
+  if (!(IsLower(state->mangled_cur[0]) &&
+        IsAlpha(state->mangled_cur[1]))) {
+    return false;
+  }
+  // We may want to perform a binary search if we really need speed.
+  const AbbrevPair *p;
+  for (p = kOperatorList; p->abbrev != NULL; ++p) {
+    if (state->mangled_cur[0] == p->abbrev[0] &&
+        state->mangled_cur[1] == p->abbrev[1]) {
+      MaybeAppend(state, "operator");
+      if (IsLower(*p->real_name)) {  // new, delete, etc.
+        MaybeAppend(state, " ");
+      }
+      MaybeAppend(state, p->real_name);
+      state->mangled_cur += 2;
+      return true;
+    }
+  }
+  return false;
+}
+
+// <special-name> ::= TV <type>
+//                ::= TT <type>
+//                ::= TI <type>
+//                ::= TS <type>
+//                ::= Tc <call-offset> <call-offset> <(base) encoding>
+//                ::= GV <(object) name>
+//                ::= T <call-offset> <(base) encoding>
+// G++ extensions:
+//                ::= TC <type> <(offset) number> _ <(base) type>
+//                ::= TF <type>
+//                ::= TJ <type>
+//                ::= GR <name>
+//                ::= GA <encoding>
+//                ::= Th <call-offset> <(base) encoding>
+//                ::= Tv <call-offset> <(base) encoding>
+//
+// Note: we don't care much about them since they don't appear in
+// stack traces.  The are special data.
+static bool ParseSpecialName(State *state) {
+  State copy = *state;
+  if (ParseOneCharToken(state, 'T') &&
+      ParseCharClass(state, "VTIS") &&
+      ParseType(state)) {
+    return true;
+  }
+  *state = copy;
+
+  if (ParseTwoCharToken(state, "Tc") && ParseCallOffset(state) &&
+      ParseCallOffset(state) && ParseEncoding(state)) {
+    return true;
+  }
+  *state = copy;
+
+  if (ParseTwoCharToken(state, "GV") &&
+      ParseName(state)) {
+    return true;
+  }
+  *state = copy;
+
+  if (ParseOneCharToken(state, 'T') && ParseCallOffset(state) &&
+      ParseEncoding(state)) {
+    return true;
+  }
+  *state = copy;
+
+  // G++ extensions
+  if (ParseTwoCharToken(state, "TC") && ParseType(state) &&
+      ParseNumber(state, NULL) && ParseOneCharToken(state, '_') &&
+      DisableAppend(state) &&
+      ParseType(state)) {
+    RestoreAppend(state, copy.append);
+    return true;
+  }
+  *state = copy;
+
+  if (ParseOneCharToken(state, 'T') && ParseCharClass(state, "FJ") &&
+      ParseType(state)) {
+    return true;
+  }
+  *state = copy;
+
+  if (ParseTwoCharToken(state, "GR") && ParseName(state)) {
+    return true;
+  }
+  *state = copy;
+
+  if (ParseTwoCharToken(state, "GA") && ParseEncoding(state)) {
+    return true;
+  }
+  *state = copy;
+
+  if (ParseOneCharToken(state, 'T') && ParseCharClass(state, "hv") &&
+      ParseCallOffset(state) && ParseEncoding(state)) {
+    return true;
+  }
+  *state = copy;
+  return false;
+}
+
+// <call-offset> ::= h <nv-offset> _
+//               ::= v <v-offset> _
+static bool ParseCallOffset(State *state) {
+  State copy = *state;
+  if (ParseOneCharToken(state, 'h') &&
+      ParseNVOffset(state) && ParseOneCharToken(state, '_')) {
+    return true;
+  }
+  *state = copy;
+
+  if (ParseOneCharToken(state, 'v') &&
+      ParseVOffset(state) && ParseOneCharToken(state, '_')) {
+    return true;
+  }
+  *state = copy;
+
+  return false;
+}
+
+// <nv-offset> ::= <(offset) number>
+static bool ParseNVOffset(State *state) {
+  return ParseNumber(state, NULL);
+}
+
+// <v-offset>  ::= <(offset) number> _ <(virtual offset) number>
+static bool ParseVOffset(State *state) {
+  State copy = *state;
+  if (ParseNumber(state, NULL) && ParseOneCharToken(state, '_') &&
+      ParseNumber(state, NULL)) {
+    return true;
+  }
+  *state = copy;
+  return false;
+}
+
+// <ctor-dtor-name> ::= C1 | C2 | C3
+//                  ::= D0 | D1 | D2
+static bool ParseCtorDtorName(State *state) {
+  State copy = *state;
+  if (ParseOneCharToken(state, 'C') &&
+      ParseCharClass(state, "123")) {
+    const char * const prev_name = state->prev_name;
+    const int prev_name_length = state->prev_name_length;
+    MaybeAppendWithLength(state, prev_name, prev_name_length);
+    return true;
+  }
+  *state = copy;
+
+  if (ParseOneCharToken(state, 'D') &&
+      ParseCharClass(state, "012")) {
+    const char * const prev_name = state->prev_name;
+    const int prev_name_length = state->prev_name_length;
+    MaybeAppend(state, "~");
+    MaybeAppendWithLength(state, prev_name, prev_name_length);
+    return true;
+  }
+  *state = copy;
+  return false;
+}
+
+// <type> ::= <CV-qualifiers> <type>
+//        ::= P <type>   # pointer-to
+//        ::= R <type>   # reference-to
+//        ::= O <type>   # rvalue reference-to (C++0x)
+//        ::= C <type>   # complex pair (C 2000)
+//        ::= G <type>   # imaginary (C 2000)
+//        ::= U <source-name> <type>  # vendor extended type qualifier
+//        ::= <builtin-type>
+//        ::= <function-type>
+//        ::= <class-enum-type>
+//        ::= <array-type>
+//        ::= <pointer-to-member-type>
+//        ::= <template-template-param> <template-args>
+//        ::= <template-param>
+//        ::= <substitution>
+//        ::= Dp <type>          # pack expansion of (C++0x)
+//        ::= Dt <expression> E  # decltype of an id-expression or class
+//                               # member access (C++0x)
+//        ::= DT <expression> E  # decltype of an expression (C++0x)
+//
+static bool ParseType(State *state) {
+  // We should check CV-qualifers, and PRGC things first.
+  State copy = *state;
+  if (ParseCVQualifiers(state) && ParseType(state)) {
+    return true;
+  }
+  *state = copy;
+
+  if (ParseCharClass(state, "OPRCG") && ParseType(state)) {
+    return true;
+  }
+  *state = copy;
+
+  if (ParseTwoCharToken(state, "Dp") && ParseType(state)) {
+    return true;
+  }
+  *state = copy;
+
+  if (ParseOneCharToken(state, 'D') && ParseCharClass(state, "tT") &&
+      ParseExpression(state) && ParseOneCharToken(state, 'E')) {
+    return true;
+  }
+  *state = copy;
+
+  if (ParseOneCharToken(state, 'U') && ParseSourceName(state) &&
+      ParseType(state)) {
+    return true;
+  }
+  *state = copy;
+
+  if (ParseBuiltinType(state) ||
+      ParseFunctionType(state) ||
+      ParseClassEnumType(state) ||
+      ParseArrayType(state) ||
+      ParsePointerToMemberType(state) ||
+      ParseSubstitution(state)) {
+    return true;
+  }
+
+  if (ParseTemplateTemplateParam(state) &&
+      ParseTemplateArgs(state)) {
+    return true;
+  }
+  *state = copy;
+
+  // Less greedy than <template-template-param> <template-args>.
+  if (ParseTemplateParam(state)) {
+    return true;
+  }
+
+  return false;
+}
+
+// <CV-qualifiers> ::= [r] [V] [K]
+// We don't allow empty <CV-qualifiers> to avoid infinite loop in
+// ParseType().
+static bool ParseCVQualifiers(State *state) {
+  int num_cv_qualifiers = 0;
+  num_cv_qualifiers += ParseOneCharToken(state, 'r');
+  num_cv_qualifiers += ParseOneCharToken(state, 'V');
+  num_cv_qualifiers += ParseOneCharToken(state, 'K');
+  return num_cv_qualifiers > 0;
+}
+
+// <builtin-type> ::= v, etc.
+//                ::= u <source-name>
+static bool ParseBuiltinType(State *state) {
+  const AbbrevPair *p;
+  for (p = kBuiltinTypeList; p->abbrev != NULL; ++p) {
+    if (state->mangled_cur[0] == p->abbrev[0]) {
+      MaybeAppend(state, p->real_name);
+      ++state->mangled_cur;
+      return true;
+    }
+  }
+
+  State copy = *state;
+  if (ParseOneCharToken(state, 'u') && ParseSourceName(state)) {
+    return true;
+  }
+  *state = copy;
+  return false;
+}
+
+// <function-type> ::= F [Y] <bare-function-type> E
+static bool ParseFunctionType(State *state) {
+  State copy = *state;
+  if (ParseOneCharToken(state, 'F') &&
+      Optional(ParseOneCharToken(state, 'Y')) &&
+      ParseBareFunctionType(state) && ParseOneCharToken(state, 'E')) {
+    return true;
+  }
+  *state = copy;
+  return false;
+}
+
+// <bare-function-type> ::= <(signature) type>+
+static bool ParseBareFunctionType(State *state) {
+  State copy = *state;
+  DisableAppend(state);
+  if (OneOrMore(ParseType, state)) {
+    RestoreAppend(state, copy.append);
+    MaybeAppend(state, "()");
+    return true;
+  }
+  *state = copy;
+  return false;
+}
+
+// <class-enum-type> ::= <name>
+static bool ParseClassEnumType(State *state) {
+  return ParseName(state);
+}
+
+// <array-type> ::= A <(positive dimension) number> _ <(element) type>
+//              ::= A [<(dimension) expression>] _ <(element) type>
+static bool ParseArrayType(State *state) {
+  State copy = *state;
+  if (ParseOneCharToken(state, 'A') && ParseNumber(state, NULL) &&
+      ParseOneCharToken(state, '_') && ParseType(state)) {
+    return true;
+  }
+  *state = copy;
+
+  if (ParseOneCharToken(state, 'A') && Optional(ParseExpression(state)) &&
+      ParseOneCharToken(state, '_') && ParseType(state)) {
+    return true;
+  }
+  *state = copy;
+  return false;
+}
+
+// <pointer-to-member-type> ::= M <(class) type> <(member) type>
+static bool ParsePointerToMemberType(State *state) {
+  State copy = *state;
+  if (ParseOneCharToken(state, 'M') && ParseType(state) &&
+      ParseType(state)) {
+    return true;
+  }
+  *state = copy;
+  return false;
+}
+
+// <template-param> ::= T_
+//                  ::= T <parameter-2 non-negative number> _
+static bool ParseTemplateParam(State *state) {
+  if (ParseTwoCharToken(state, "T_")) {
+    MaybeAppend(state, "?");  // We don't support template substitutions.
+    return true;
+  }
+
+  State copy = *state;
+  if (ParseOneCharToken(state, 'T') && ParseNumber(state, NULL) &&
+      ParseOneCharToken(state, '_')) {
+    MaybeAppend(state, "?");  // We don't support template substitutions.
+    return true;
+  }
+  *state = copy;
+  return false;
+}
+
+
+// <template-template-param> ::= <template-param>
+//                           ::= <substitution>
+static bool ParseTemplateTemplateParam(State *state) {
+  return (ParseTemplateParam(state) ||
+          ParseSubstitution(state));
+}
+
+// <template-args> ::= I <template-arg>+ E
+static bool ParseTemplateArgs(State *state) {
+  State copy = *state;
+  DisableAppend(state);
+  if (ParseOneCharToken(state, 'I') &&
+      OneOrMore(ParseTemplateArg, state) &&
+      ParseOneCharToken(state, 'E')) {
+    RestoreAppend(state, copy.append);
+    MaybeAppend(state, "<>");
+    return true;
+  }
+  *state = copy;
+  return false;
+}
+
+// <template-arg>  ::= <type>
+//                 ::= <expr-primary>
+//                 ::= I <template-arg>* E        # argument pack
+//                 ::= X <expression> E
+static bool ParseTemplateArg(State *state) {
+  State copy = *state;
+  if (ParseOneCharToken(state, 'I') &&
+      ZeroOrMore(ParseTemplateArg, state) &&
+      ParseOneCharToken(state, 'E')) {
+    return true;
+  }
+  *state = copy;
+
+  if (ParseType(state) ||
+      ParseExprPrimary(state)) {
+    return true;
+  }
+  *state = copy;
+
+  if (ParseOneCharToken(state, 'X') && ParseExpression(state) &&
+      ParseOneCharToken(state, 'E')) {
+    return true;
+  }
+  *state = copy;
+  return false;
+}
+
+// <expression> ::= <template-param>
+//              ::= <expr-primary>
+//              ::= <unary operator-name> <expression>
+//              ::= <binary operator-name> <expression> <expression>
+//              ::= <trinary operator-name> <expression> <expression>
+//                  <expression>
+//              ::= st <type>
+//              ::= sr <type> <unqualified-name> <template-args>
+//              ::= sr <type> <unqualified-name>
+static bool ParseExpression(State *state) {
+  if (ParseTemplateParam(state) || ParseExprPrimary(state)) {
+    return true;
+  }
+
+  State copy = *state;
+  if (ParseOperatorName(state) &&
+      ParseExpression(state) &&
+      ParseExpression(state) &&
+      ParseExpression(state)) {
+    return true;
+  }
+  *state = copy;
+
+  if (ParseOperatorName(state) &&
+      ParseExpression(state) &&
+      ParseExpression(state)) {
+    return true;
+  }
+  *state = copy;
+
+  if (ParseOperatorName(state) &&
+      ParseExpression(state)) {
+    return true;
+  }
+  *state = copy;
+
+  if (ParseTwoCharToken(state, "st") && ParseType(state)) {
+    return true;
+  }
+  *state = copy;
+
+  if (ParseTwoCharToken(state, "sr") && ParseType(state) &&
+      ParseUnqualifiedName(state) &&
+      ParseTemplateArgs(state)) {
+    return true;
+  }
+  *state = copy;
+
+  if (ParseTwoCharToken(state, "sr") && ParseType(state) &&
+      ParseUnqualifiedName(state)) {
+    return true;
+  }
+  *state = copy;
+  return false;
+}
+
+// <expr-primary> ::= L <type> <(value) number> E
+//                ::= L <type> <(value) float> E
+//                ::= L <mangled-name> E
+//                // A bug in g++'s C++ ABI version 2 (-fabi-version=2).
+//                ::= LZ <encoding> E
+static bool ParseExprPrimary(State *state) {
+  State copy = *state;
+  if (ParseOneCharToken(state, 'L') && ParseType(state) &&
+      ParseNumber(state, NULL) &&
+      ParseOneCharToken(state, 'E')) {
+    return true;
+  }
+  *state = copy;
+
+  if (ParseOneCharToken(state, 'L') && ParseType(state) &&
+      ParseFloatNumber(state) &&
+      ParseOneCharToken(state, 'E')) {
+    return true;
+  }
+  *state = copy;
+
+  if (ParseOneCharToken(state, 'L') && ParseMangledName(state) &&
+      ParseOneCharToken(state, 'E')) {
+    return true;
+  }
+  *state = copy;
+
+  if (ParseTwoCharToken(state, "LZ") && ParseEncoding(state) &&
+      ParseOneCharToken(state, 'E')) {
+    return true;
+  }
+  *state = copy;
+
+  return false;
+}
+
+// <local-name> := Z <(function) encoding> E <(entity) name>
+//                 [<discriminator>]
+//              := Z <(function) encoding> E s [<discriminator>]
+static bool ParseLocalName(State *state) {
+  State copy = *state;
+  if (ParseOneCharToken(state, 'Z') && ParseEncoding(state) &&
+      ParseOneCharToken(state, 'E') && MaybeAppend(state, "::") &&
+      ParseName(state) && Optional(ParseDiscriminator(state))) {
+    return true;
+  }
+  *state = copy;
+
+  if (ParseOneCharToken(state, 'Z') && ParseEncoding(state) &&
+      ParseTwoCharToken(state, "Es") && Optional(ParseDiscriminator(state))) {
+    return true;
+  }
+  *state = copy;
+  return false;
+}
+
+// <discriminator> := _ <(non-negative) number>
+static bool ParseDiscriminator(State *state) {
+  State copy = *state;
+  if (ParseOneCharToken(state, '_') && ParseNumber(state, NULL)) {
+    return true;
+  }
+  *state = copy;
+  return false;
+}
+
+// <substitution> ::= S_
+//                ::= S <seq-id> _
+//                ::= St, etc.
+static bool ParseSubstitution(State *state) {
+  if (ParseTwoCharToken(state, "S_")) {
+    MaybeAppend(state, "?");  // We don't support substitutions.
+    return true;
+  }
+
+  State copy = *state;
+  if (ParseOneCharToken(state, 'S') && ParseSeqId(state) &&
+      ParseOneCharToken(state, '_')) {
+    MaybeAppend(state, "?");  // We don't support substitutions.
+    return true;
+  }
+  *state = copy;
+
+  // Expand abbreviations like "St" => "std".
+  if (ParseOneCharToken(state, 'S')) {
+    const AbbrevPair *p;
+    for (p = kSubstitutionList; p->abbrev != NULL; ++p) {
+      if (state->mangled_cur[0] == p->abbrev[1]) {
+        MaybeAppend(state, "std");
+        if (p->real_name[0] != '\0') {
+          MaybeAppend(state, "::");
+          MaybeAppend(state, p->real_name);
+        }
+        ++state->mangled_cur;
+        return true;
+      }
+    }
+  }
+  *state = copy;
+  return false;
+}
+
+// Parse <mangled-name>, optionally followed by either a function-clone suffix
+// or version suffix.  Returns true only if all of "mangled_cur" was consumed.
+static bool ParseTopLevelMangledName(State *state) {
+  if (ParseMangledName(state)) {
+    if (state->mangled_cur[0] != '\0') {
+      // Drop trailing function clone suffix, if any.
+      if (IsFunctionCloneSuffix(state->mangled_cur)) {
+        return true;
+      }
+      // Append trailing version suffix if any.
+      // ex. _Z3foo@@GLIBCXX_3.4
+      if (state->mangled_cur[0] == '@') {
+        MaybeAppend(state, state->mangled_cur);
+        return true;
+      }
+      return false;  // Unconsumed suffix.
+    }
+    return true;
+  }
+  return false;
+}
+
+// The demangler entry point.
+bool Demangle(const char *mangled, char *out, int out_size) {
+  State state;
+  InitState(&state, mangled, out, out_size);
+  return ParseTopLevelMangledName(&state) && !state.overflowed;
+}
+
+_END_GOOGLE_NAMESPACE_
diff --git a/base/third_party/symbolize/demangle.h b/base/third_party/symbolize/demangle.h
new file mode 100644
index 0000000..9c75915
--- /dev/null
+++ b/base/third_party/symbolize/demangle.h
@@ -0,0 +1,84 @@
+// Copyright (c) 2006, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: Satoru Takabayashi
+//
+// An async-signal-safe and thread-safe demangler for Itanium C++ ABI
+// (aka G++ V3 ABI).
+
+// The demangler is implemented to be used in async signal handlers to
+// symbolize stack traces.  We cannot use libstdc++'s
+// abi::__cxa_demangle() in such signal handlers since it's not async
+// signal safe (it uses malloc() internally).
+//
+// Note that this demangler doesn't support full demangling.  More
+// specifically, it doesn't print types of function parameters and
+// types of template arguments.  It just skips them.  However, it's
+// still very useful to extract basic information such as class,
+// function, constructor, destructor, and operator names.
+//
+// See the implementation note in demangle.cc if you are interested.
+//
+// Example:
+//
+// | Mangled Name  | The Demangler | abi::__cxa_demangle()
+// |---------------|---------------|-----------------------
+// | _Z1fv         | f()           | f()
+// | _Z1fi         | f()           | f(int)
+// | _Z3foo3bar    | foo()         | foo(bar)
+// | _Z1fIiEvi     | f<>()         | void f<int>(int)
+// | _ZN1N1fE      | N::f          | N::f
+// | _ZN3Foo3BarEv | Foo::Bar()    | Foo::Bar()
+// | _Zrm1XS_"     | operator%()   | operator%(X, X)
+// | _ZN3FooC1Ev   | Foo::Foo()    | Foo::Foo()
+// | _Z1fSs        | f()           | f(std::basic_string<char,
+// |               |               |   std::char_traits<char>,
+// |               |               |   std::allocator<char> >)
+//
+// See the unit test for more examples.
+//
+// Note: we might want to write demanglers for ABIs other than Itanium
+// C++ ABI in the future.
+//
+
+#ifndef BASE_DEMANGLE_H_
+#define BASE_DEMANGLE_H_
+
+#include "config.h"
+
+_START_GOOGLE_NAMESPACE_
+
+// Demangle "mangled".  On success, return true and write the
+// demangled symbol name to "out".  Otherwise, return false.
+// "out" is modified even if demangling is unsuccessful.
+bool Demangle(const char *mangled, char *out, int out_size);
+
+_END_GOOGLE_NAMESPACE_
+
+#endif  // BASE_DEMANGLE_H_
diff --git a/base/third_party/symbolize/glog/logging.h b/base/third_party/symbolize/glog/logging.h
new file mode 100644
index 0000000..a42c306
--- /dev/null
+++ b/base/third_party/symbolize/glog/logging.h
@@ -0,0 +1,5 @@
+// Copyright (c) 2010 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Empty.
diff --git a/base/third_party/symbolize/glog/raw_logging.h b/base/third_party/symbolize/glog/raw_logging.h
new file mode 100644
index 0000000..f5515c4
--- /dev/null
+++ b/base/third_party/symbolize/glog/raw_logging.h
@@ -0,0 +1,6 @@
+// Copyright (c) 2010 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#define WARNING 1;
+#define RAW_LOG(severity, ...);  // Do nothing.
diff --git a/base/third_party/symbolize/symbolize.cc b/base/third_party/symbolize/symbolize.cc
new file mode 100644
index 0000000..e6fbb84
--- /dev/null
+++ b/base/third_party/symbolize/symbolize.cc
@@ -0,0 +1,883 @@
+// Copyright (c) 2006, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: Satoru Takabayashi
+// Stack-footprint reduction work done by Raksit Ashok
+//
+// Implementation note:
+//
+// We don't use heaps but only use stacks.  We want to reduce the
+// stack consumption so that the symbolizer can run on small stacks.
+//
+// Here are some numbers collected with GCC 4.1.0 on x86:
+// - sizeof(Elf32_Sym)  = 16
+// - sizeof(Elf32_Shdr) = 40
+// - sizeof(Elf64_Sym)  = 24
+// - sizeof(Elf64_Shdr) = 64
+//
+// This implementation is intended to be async-signal-safe but uses
+// some functions which are not guaranteed to be so, such as memchr()
+// and memmove().  We assume they are async-signal-safe.
+//
+// Additional header can be specified by the GLOG_BUILD_CONFIG_INCLUDE
+// macro to add platform specific defines (e.g. OS_OPENBSD).
+
+#ifdef GLOG_BUILD_CONFIG_INCLUDE
+#include GLOG_BUILD_CONFIG_INCLUDE
+#endif  // GLOG_BUILD_CONFIG_INCLUDE
+
+#include "utilities.h"
+
+#if defined(HAVE_SYMBOLIZE)
+
+#include <string.h>
+
+#include <algorithm>
+#include <limits>
+
+#include "symbolize.h"
+#include "demangle.h"
+
+_START_GOOGLE_NAMESPACE_
+
+// We don't use assert() since it's not guaranteed to be
+// async-signal-safe.  Instead we define a minimal assertion
+// macro. So far, we don't need pretty printing for __FILE__, etc.
+
+// A wrapper for abort() to make it callable in ? :.
+static int AssertFail() {
+  abort();
+  return 0;  // Should not reach.
+}
+
+#define SAFE_ASSERT(expr) ((expr) ? 0 : AssertFail())
+
+static SymbolizeCallback g_symbolize_callback = NULL;
+void InstallSymbolizeCallback(SymbolizeCallback callback) {
+  g_symbolize_callback = callback;
+}
+
+static SymbolizeOpenObjectFileCallback g_symbolize_open_object_file_callback =
+    NULL;
+void InstallSymbolizeOpenObjectFileCallback(
+    SymbolizeOpenObjectFileCallback callback) {
+  g_symbolize_open_object_file_callback = callback;
+}
+
+// This function wraps the Demangle function to provide an interface
+// where the input symbol is demangled in-place.
+// To keep stack consumption low, we would like this function to not
+// get inlined.
+static ATTRIBUTE_NOINLINE void DemangleInplace(char *out, int out_size) {
+  char demangled[256];  // Big enough for sane demangled symbols.
+  if (Demangle(out, demangled, sizeof(demangled))) {
+    // Demangling succeeded. Copy to out if the space allows.
+    size_t len = strlen(demangled);
+    if (len + 1 <= (size_t)out_size) {  // +1 for '\0'.
+      SAFE_ASSERT(len < sizeof(demangled));
+      memmove(out, demangled, len + 1);
+    }
+  }
+}
+
+_END_GOOGLE_NAMESPACE_
+
+#if defined(__ELF__)
+
+#include <dlfcn.h>
+#if defined(OS_OPENBSD)
+#include <sys/exec_elf.h>
+#else
+#include <elf.h>
+#endif
+#include <errno.h>
+#include <fcntl.h>
+#include <limits.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <stddef.h>
+#include <string.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <unistd.h>
+
+#include "symbolize.h"
+#include "config.h"
+#include "glog/raw_logging.h"
+
+// Re-runs fn until it doesn't cause EINTR.
+#define NO_INTR(fn)   do {} while ((fn) < 0 && errno == EINTR)
+
+_START_GOOGLE_NAMESPACE_
+
+// Read up to "count" bytes from file descriptor "fd" into the buffer
+// starting at "buf" while handling short reads and EINTR.  On
+// success, return the number of bytes read.  Otherwise, return -1.
+static ssize_t ReadPersistent(const int fd, void *buf, const size_t count) {
+  SAFE_ASSERT(fd >= 0);
+  SAFE_ASSERT(count <= std::numeric_limits<ssize_t>::max());
+  char *buf0 = reinterpret_cast<char *>(buf);
+  ssize_t num_bytes = 0;
+  while (num_bytes < count) {
+    ssize_t len;
+    NO_INTR(len = read(fd, buf0 + num_bytes, count - num_bytes));
+    if (len < 0) {  // There was an error other than EINTR.
+      return -1;
+    }
+    if (len == 0) {  // Reached EOF.
+      break;
+    }
+    num_bytes += len;
+  }
+  SAFE_ASSERT(num_bytes <= count);
+  return num_bytes;
+}
+
+// Read up to "count" bytes from "offset" in the file pointed by file
+// descriptor "fd" into the buffer starting at "buf".  On success,
+// return the number of bytes read.  Otherwise, return -1.
+static ssize_t ReadFromOffset(const int fd, void *buf,
+                              const size_t count, const off_t offset) {
+  off_t off = lseek(fd, offset, SEEK_SET);
+  if (off == (off_t)-1) {
+    return -1;
+  }
+  return ReadPersistent(fd, buf, count);
+}
+
+// Try reading exactly "count" bytes from "offset" bytes in a file
+// pointed by "fd" into the buffer starting at "buf" while handling
+// short reads and EINTR.  On success, return true. Otherwise, return
+// false.
+static bool ReadFromOffsetExact(const int fd, void *buf,
+                                const size_t count, const off_t offset) {
+  ssize_t len = ReadFromOffset(fd, buf, count, offset);
+  return len == count;
+}
+
+// Returns elf_header.e_type if the file pointed by fd is an ELF binary.
+static int FileGetElfType(const int fd) {
+  ElfW(Ehdr) elf_header;
+  if (!ReadFromOffsetExact(fd, &elf_header, sizeof(elf_header), 0)) {
+    return -1;
+  }
+  if (memcmp(elf_header.e_ident, ELFMAG, SELFMAG) != 0) {
+    return -1;
+  }
+  return elf_header.e_type;
+}
+
+// Read the section headers in the given ELF binary, and if a section
+// of the specified type is found, set the output to this section header
+// and return true.  Otherwise, return false.
+// To keep stack consumption low, we would like this function to not get
+// inlined.
+static ATTRIBUTE_NOINLINE bool
+GetSectionHeaderByType(const int fd, ElfW(Half) sh_num, const off_t sh_offset,
+                       ElfW(Word) type, ElfW(Shdr) *out) {
+  // Read at most 16 section headers at a time to save read calls.
+  ElfW(Shdr) buf[16];
+  for (int i = 0; i < sh_num;) {
+    const ssize_t num_bytes_left = (sh_num - i) * sizeof(buf[0]);
+    const ssize_t num_bytes_to_read =
+        (sizeof(buf) > num_bytes_left) ? num_bytes_left : sizeof(buf);
+    const ssize_t len = ReadFromOffset(fd, buf, num_bytes_to_read,
+                                       sh_offset + i * sizeof(buf[0]));
+    SAFE_ASSERT(len % sizeof(buf[0]) == 0);
+    const ssize_t num_headers_in_buf = len / sizeof(buf[0]);
+    SAFE_ASSERT(num_headers_in_buf <= sizeof(buf) / sizeof(buf[0]));
+    for (int j = 0; j < num_headers_in_buf; ++j) {
+      if (buf[j].sh_type == type) {
+        *out = buf[j];
+        return true;
+      }
+    }
+    i += num_headers_in_buf;
+  }
+  return false;
+}
+
+// There is no particular reason to limit section name to 63 characters,
+// but there has (as yet) been no need for anything longer either.
+const int kMaxSectionNameLen = 64;
+
+// name_len should include terminating '\0'.
+bool GetSectionHeaderByName(int fd, const char *name, size_t name_len,
+                            ElfW(Shdr) *out) {
+  ElfW(Ehdr) elf_header;
+  if (!ReadFromOffsetExact(fd, &elf_header, sizeof(elf_header), 0)) {
+    return false;
+  }
+
+  ElfW(Shdr) shstrtab;
+  off_t shstrtab_offset = (elf_header.e_shoff +
+                           elf_header.e_shentsize * elf_header.e_shstrndx);
+  if (!ReadFromOffsetExact(fd, &shstrtab, sizeof(shstrtab), shstrtab_offset)) {
+    return false;
+  }
+
+  for (int i = 0; i < elf_header.e_shnum; ++i) {
+    off_t section_header_offset = (elf_header.e_shoff +
+                                   elf_header.e_shentsize * i);
+    if (!ReadFromOffsetExact(fd, out, sizeof(*out), section_header_offset)) {
+      return false;
+    }
+    char header_name[kMaxSectionNameLen];
+    if (sizeof(header_name) < name_len) {
+      RAW_LOG(WARNING, "Section name '%s' is too long (%" PRIuS "); "
+              "section will not be found (even if present).", name, name_len);
+      // No point in even trying.
+      return false;
+    }
+    off_t name_offset = shstrtab.sh_offset + out->sh_name;
+    ssize_t n_read = ReadFromOffset(fd, &header_name, name_len, name_offset);
+    if (n_read == -1) {
+      return false;
+    } else if (n_read != name_len) {
+      // Short read -- name could be at end of file.
+      continue;
+    }
+    if (memcmp(header_name, name, name_len) == 0) {
+      return true;
+    }
+  }
+  return false;
+}
+
+// Read a symbol table and look for the symbol containing the
+// pc. Iterate over symbols in a symbol table and look for the symbol
+// containing "pc".  On success, return true and write the symbol name
+// to out.  Otherwise, return false.
+// To keep stack consumption low, we would like this function to not get
+// inlined.
+static ATTRIBUTE_NOINLINE bool
+FindSymbol(uint64_t pc, const int fd, char *out, int out_size,
+           uint64_t symbol_offset, const ElfW(Shdr) *strtab,
+           const ElfW(Shdr) *symtab) {
+  if (symtab == NULL) {
+    return false;
+  }
+  const int num_symbols = symtab->sh_size / symtab->sh_entsize;
+  for (int i = 0; i < num_symbols;) {
+    off_t offset = symtab->sh_offset + i * symtab->sh_entsize;
+
+    // If we are reading Elf64_Sym's, we want to limit this array to
+    // 32 elements (to keep stack consumption low), otherwise we can
+    // have a 64 element Elf32_Sym array.
+#if __WORDSIZE == 64
+#define NUM_SYMBOLS 32
+#else
+#define NUM_SYMBOLS 64
+#endif
+
+    // Read at most NUM_SYMBOLS symbols at once to save read() calls.
+    ElfW(Sym) buf[NUM_SYMBOLS];
+    int num_symbols_to_read = std::min(NUM_SYMBOLS, num_symbols - i);
+    const ssize_t len =
+        ReadFromOffset(fd, &buf, sizeof(buf[0]) * num_symbols_to_read, offset);
+    SAFE_ASSERT(len % sizeof(buf[0]) == 0);
+    const ssize_t num_symbols_in_buf = len / sizeof(buf[0]);
+    SAFE_ASSERT(num_symbols_in_buf <= num_symbols_to_read);
+    for (int j = 0; j < num_symbols_in_buf; ++j) {
+      const ElfW(Sym)& symbol = buf[j];
+      uint64_t start_address = symbol.st_value;
+      start_address += symbol_offset;
+      uint64_t end_address = start_address + symbol.st_size;
+      if (symbol.st_value != 0 &&  // Skip null value symbols.
+          symbol.st_shndx != 0 &&  // Skip undefined symbols.
+          start_address <= pc && pc < end_address) {
+        ssize_t len1 = ReadFromOffset(fd, out, out_size,
+                                      strtab->sh_offset + symbol.st_name);
+        if (len1 <= 0 || memchr(out, '\0', out_size) == NULL) {
+          return false;
+        }
+        return true;  // Obtained the symbol name.
+      }
+    }
+    i += num_symbols_in_buf;
+  }
+  return false;
+}
+
+// Get the symbol name of "pc" from the file pointed by "fd".  Process
+// both regular and dynamic symbol tables if necessary.  On success,
+// write the symbol name to "out" and return true.  Otherwise, return
+// false.
+static bool GetSymbolFromObjectFile(const int fd,
+                                    uint64_t pc,
+                                    char* out,
+                                    int out_size,
+                                    uint64_t base_address) {
+  // Read the ELF header.
+  ElfW(Ehdr) elf_header;
+  if (!ReadFromOffsetExact(fd, &elf_header, sizeof(elf_header), 0)) {
+    return false;
+  }
+
+  ElfW(Shdr) symtab, strtab;
+
+  // Consult a regular symbol table first.
+  if (GetSectionHeaderByType(fd, elf_header.e_shnum, elf_header.e_shoff,
+                             SHT_SYMTAB, &symtab)) {
+    if (!ReadFromOffsetExact(fd, &strtab, sizeof(strtab), elf_header.e_shoff +
+                             symtab.sh_link * sizeof(symtab))) {
+      return false;
+    }
+    if (FindSymbol(pc, fd, out, out_size, base_address, &strtab, &symtab)) {
+      return true;  // Found the symbol in a regular symbol table.
+    }
+  }
+
+  // If the symbol is not found, then consult a dynamic symbol table.
+  if (GetSectionHeaderByType(fd, elf_header.e_shnum, elf_header.e_shoff,
+                             SHT_DYNSYM, &symtab)) {
+    if (!ReadFromOffsetExact(fd, &strtab, sizeof(strtab), elf_header.e_shoff +
+                             symtab.sh_link * sizeof(symtab))) {
+      return false;
+    }
+    if (FindSymbol(pc, fd, out, out_size, base_address, &strtab, &symtab)) {
+      return true;  // Found the symbol in a dynamic symbol table.
+    }
+  }
+
+  return false;
+}
+
+namespace {
+// Thin wrapper around a file descriptor so that the file descriptor
+// gets closed for sure.
+struct FileDescriptor {
+  const int fd_;
+  explicit FileDescriptor(int fd) : fd_(fd) {}
+  ~FileDescriptor() {
+    if (fd_ >= 0) {
+      NO_INTR(close(fd_));
+    }
+  }
+  int get() { return fd_; }
+
+ private:
+  explicit FileDescriptor(const FileDescriptor&);
+  void operator=(const FileDescriptor&);
+};
+
+// Helper class for reading lines from file.
+//
+// Note: we don't use ProcMapsIterator since the object is big (it has
+// a 5k array member) and uses async-unsafe functions such as sscanf()
+// and snprintf().
+class LineReader {
+ public:
+  explicit LineReader(int fd, char *buf, int buf_len) : fd_(fd),
+    buf_(buf), buf_len_(buf_len), bol_(buf), eol_(buf), eod_(buf) {
+  }
+
+  // Read '\n'-terminated line from file.  On success, modify "bol"
+  // and "eol", then return true.  Otherwise, return false.
+  //
+  // Note: if the last line doesn't end with '\n', the line will be
+  // dropped.  It's an intentional behavior to make the code simple.
+  bool ReadLine(const char **bol, const char **eol) {
+    if (BufferIsEmpty()) {  // First time.
+      const ssize_t num_bytes = ReadPersistent(fd_, buf_, buf_len_);
+      if (num_bytes <= 0) {  // EOF or error.
+        return false;
+      }
+      eod_ = buf_ + num_bytes;
+      bol_ = buf_;
+    } else {
+      bol_ = eol_ + 1;  // Advance to the next line in the buffer.
+      SAFE_ASSERT(bol_ <= eod_);  // "bol_" can point to "eod_".
+      if (!HasCompleteLine()) {
+        const int incomplete_line_length = eod_ - bol_;
+        // Move the trailing incomplete line to the beginning.
+        memmove(buf_, bol_, incomplete_line_length);
+        // Read text from file and append it.
+        char * const append_pos = buf_ + incomplete_line_length;
+        const int capacity_left = buf_len_ - incomplete_line_length;
+        const ssize_t num_bytes = ReadPersistent(fd_, append_pos,
+                                                 capacity_left);
+        if (num_bytes <= 0) {  // EOF or error.
+          return false;
+        }
+        eod_ = append_pos + num_bytes;
+        bol_ = buf_;
+      }
+    }
+    eol_ = FindLineFeed();
+    if (eol_ == NULL) {  // '\n' not found.  Malformed line.
+      return false;
+    }
+    *eol_ = '\0';  // Replace '\n' with '\0'.
+
+    *bol = bol_;
+    *eol = eol_;
+    return true;
+  }
+
+  // Beginning of line.
+  const char *bol() {
+    return bol_;
+  }
+
+  // End of line.
+  const char *eol() {
+    return eol_;
+  }
+
+ private:
+  explicit LineReader(const LineReader&);
+  void operator=(const LineReader&);
+
+  char *FindLineFeed() {
+    return reinterpret_cast<char *>(memchr(bol_, '\n', eod_ - bol_));
+  }
+
+  bool BufferIsEmpty() {
+    return buf_ == eod_;
+  }
+
+  bool HasCompleteLine() {
+    return !BufferIsEmpty() && FindLineFeed() != NULL;
+  }
+
+  const int fd_;
+  char * const buf_;
+  const int buf_len_;
+  char *bol_;
+  char *eol_;
+  const char *eod_;  // End of data in "buf_".
+};
+}  // namespace
+
+// Place the hex number read from "start" into "*hex".  The pointer to
+// the first non-hex character or "end" is returned.
+static char *GetHex(const char *start, const char *end, uint64_t *hex) {
+  *hex = 0;
+  const char *p;
+  for (p = start; p < end; ++p) {
+    int ch = *p;
+    if ((ch >= '0' && ch <= '9') ||
+        (ch >= 'A' && ch <= 'F') || (ch >= 'a' && ch <= 'f')) {
+      *hex = (*hex << 4) | (ch < 'A' ? ch - '0' : (ch & 0xF) + 9);
+    } else {  // Encountered the first non-hex character.
+      break;
+    }
+  }
+  SAFE_ASSERT(p <= end);
+  return const_cast<char *>(p);
+}
+
+// Searches for the object file (from /proc/self/maps) that contains
+// the specified pc.  If found, sets |start_address| to the start address
+// of where this object file is mapped in memory, sets the module base
+// address into |base_address|, copies the object file name into
+// |out_file_name|, and attempts to open the object file.  If the object
+// file is opened successfully, returns the file descriptor.  Otherwise,
+// returns -1.  |out_file_name_size| is the size of the file name buffer
+// (including the null-terminator).
+static ATTRIBUTE_NOINLINE int
+OpenObjectFileContainingPcAndGetStartAddress(uint64_t pc,
+                                             uint64_t &start_address,
+                                             uint64_t &base_address,
+                                             char *out_file_name,
+                                             int out_file_name_size) {
+  int object_fd;
+
+  int maps_fd;
+  NO_INTR(maps_fd = open("/proc/self/maps", O_RDONLY));
+  FileDescriptor wrapped_maps_fd(maps_fd);
+  if (wrapped_maps_fd.get() < 0) {
+    return -1;
+  }
+
+  int mem_fd;
+  NO_INTR(mem_fd = open("/proc/self/mem", O_RDONLY));
+  FileDescriptor wrapped_mem_fd(mem_fd);
+  if (wrapped_mem_fd.get() < 0) {
+    return -1;
+  }
+
+  // Iterate over maps and look for the map containing the pc.  Then
+  // look into the symbol tables inside.
+  char buf[1024];  // Big enough for line of sane /proc/self/maps
+  int num_maps = 0;
+  LineReader reader(wrapped_maps_fd.get(), buf, sizeof(buf));
+  while (true) {
+    num_maps++;
+    const char *cursor;
+    const char *eol;
+    if (!reader.ReadLine(&cursor, &eol)) {  // EOF or malformed line.
+      return -1;
+    }
+
+    // Start parsing line in /proc/self/maps.  Here is an example:
+    //
+    // 08048000-0804c000 r-xp 00000000 08:01 2142121    /bin/cat
+    //
+    // We want start address (08048000), end address (0804c000), flags
+    // (r-xp) and file name (/bin/cat).
+
+    // Read start address.
+    cursor = GetHex(cursor, eol, &start_address);
+    if (cursor == eol || *cursor != '-') {
+      return -1;  // Malformed line.
+    }
+    ++cursor;  // Skip '-'.
+
+    // Read end address.
+    uint64_t end_address;
+    cursor = GetHex(cursor, eol, &end_address);
+    if (cursor == eol || *cursor != ' ') {
+      return -1;  // Malformed line.
+    }
+    ++cursor;  // Skip ' '.
+
+    // Read flags.  Skip flags until we encounter a space or eol.
+    const char * const flags_start = cursor;
+    while (cursor < eol && *cursor != ' ') {
+      ++cursor;
+    }
+    // We expect at least four letters for flags (ex. "r-xp").
+    if (cursor == eol || cursor < flags_start + 4) {
+      return -1;  // Malformed line.
+    }
+
+    // Determine the base address by reading ELF headers in process memory.
+    ElfW(Ehdr) ehdr;
+    if (flags_start[0] == 'r' &&
+        ReadFromOffsetExact(mem_fd, &ehdr, sizeof(ElfW(Ehdr)), start_address) &&
+        memcmp(ehdr.e_ident, ELFMAG, SELFMAG) == 0) {
+      switch (ehdr.e_type) {
+        case ET_EXEC:
+          base_address = 0;
+          break;
+        case ET_DYN:
+          // Find the segment containing file offset 0. This will correspond
+          // to the ELF header that we just read. Normally this will have
+          // virtual address 0, but this is not guaranteed. We must subtract
+          // the virtual address from the address where the ELF header was
+          // mapped to get the base address.
+          //
+          // If we fail to find a segment for file offset 0, use the address
+          // of the ELF header as the base address.
+          base_address = start_address;
+          for (unsigned i = 0; i != ehdr.e_phnum; ++i) {
+            ElfW(Phdr) phdr;
+            if (ReadFromOffsetExact(
+                    mem_fd, &phdr, sizeof(phdr),
+                    start_address + ehdr.e_phoff + i * sizeof(phdr)) &&
+                phdr.p_type == PT_LOAD && phdr.p_offset == 0) {
+              base_address = start_address - phdr.p_vaddr;
+              break;
+            }
+          }
+          break;
+        default:
+          // ET_REL or ET_CORE. These aren't directly executable, so they don't
+          // affect the base address.
+          break;
+      }
+    }
+
+    // Check start and end addresses.
+    if (!(start_address <= pc && pc < end_address)) {
+      continue;  // We skip this map.  PC isn't in this map.
+    }
+
+    // Check flags.  We are only interested in "r-x" maps.
+    if (memcmp(flags_start, "r-x", 3) != 0) {  // Not a "r-x" map.
+      continue;  // We skip this map.
+    }
+    ++cursor;  // Skip ' '.
+
+    // Read file offset.
+    uint64_t file_offset;
+    cursor = GetHex(cursor, eol, &file_offset);
+    if (cursor == eol || *cursor != ' ') {
+      return -1;  // Malformed line.
+    }
+    ++cursor;  // Skip ' '.
+
+    // Skip to file name.  "cursor" now points to dev.  We need to
+    // skip at least two spaces for dev and inode.
+    int num_spaces = 0;
+    while (cursor < eol) {
+      if (*cursor == ' ') {
+        ++num_spaces;
+      } else if (num_spaces >= 2) {
+        // The first non-space character after skipping two spaces
+        // is the beginning of the file name.
+        break;
+      }
+      ++cursor;
+    }
+    if (cursor == eol) {
+      return -1;  // Malformed line.
+    }
+
+    // Finally, "cursor" now points to file name of our interest.
+    NO_INTR(object_fd = open(cursor, O_RDONLY));
+    if (object_fd < 0) {
+      // Failed to open object file.  Copy the object file name to
+      // |out_file_name|.
+      strncpy(out_file_name, cursor, out_file_name_size);
+      // Making sure |out_file_name| is always null-terminated.
+      out_file_name[out_file_name_size - 1] = '\0';
+      return -1;
+    }
+    return object_fd;
+  }
+}
+
+// POSIX doesn't define any async-signal safe function for converting
+// an integer to ASCII. We'll have to define our own version.
+// itoa_r() converts a (signed) integer to ASCII. It returns "buf", if the
+// conversion was successful or NULL otherwise. It never writes more than "sz"
+// bytes. Output will be truncated as needed, and a NUL character is always
+// appended.
+// NOTE: code from sandbox/linux/seccomp-bpf/demo.cc.
+char *itoa_r(intptr_t i, char *buf, size_t sz, int base, size_t padding) {
+  // Make sure we can write at least one NUL byte.
+  size_t n = 1;
+  if (n > sz)
+    return NULL;
+
+  if (base < 2 || base > 16) {
+    buf[0] = '\000';
+    return NULL;
+  }
+
+  char *start = buf;
+
+  uintptr_t j = i;
+
+  // Handle negative numbers (only for base 10).
+  if (i < 0 && base == 10) {
+    // This does "j = -i" while avoiding integer overflow.
+    j = static_cast<uintptr_t>(-(i + 1)) + 1;
+
+    // Make sure we can write the '-' character.
+    if (++n > sz) {
+      buf[0] = '\000';
+      return NULL;
+    }
+    *start++ = '-';
+  }
+
+  // Loop until we have converted the entire number. Output at least one
+  // character (i.e. '0').
+  char *ptr = start;
+  do {
+    // Make sure there is still enough space left in our output buffer.
+    if (++n > sz) {
+      buf[0] = '\000';
+      return NULL;
+    }
+
+    // Output the next digit.
+    *ptr++ = "0123456789abcdef"[j % base];
+    j /= base;
+
+    if (padding > 0)
+      padding--;
+  } while (j > 0 || padding > 0);
+
+  // Terminate the output with a NUL character.
+  *ptr = '\000';
+
+  // Conversion to ASCII actually resulted in the digits being in reverse
+  // order. We can't easily generate them in forward order, as we can't tell
+  // the number of characters needed until we are done converting.
+  // So, now, we reverse the string (except for the possible "-" sign).
+  while (--ptr > start) {
+    char ch = *ptr;
+    *ptr = *start;
+    *start++ = ch;
+  }
+  return buf;
+}
+
+// Safely appends string |source| to string |dest|.  Never writes past the
+// buffer size |dest_size| and guarantees that |dest| is null-terminated.
+void SafeAppendString(const char* source, char* dest, int dest_size) {
+  int dest_string_length = strlen(dest);
+  SAFE_ASSERT(dest_string_length < dest_size);
+  dest += dest_string_length;
+  dest_size -= dest_string_length;
+  strncpy(dest, source, dest_size);
+  // Making sure |dest| is always null-terminated.
+  dest[dest_size - 1] = '\0';
+}
+
+// Converts a 64-bit value into a hex string, and safely appends it to |dest|.
+// Never writes past the buffer size |dest_size| and guarantees that |dest| is
+// null-terminated.
+void SafeAppendHexNumber(uint64_t value, char* dest, int dest_size) {
+  // 64-bit numbers in hex can have up to 16 digits.
+  char buf[17] = {'\0'};
+  SafeAppendString(itoa_r(value, buf, sizeof(buf), 16, 0), dest, dest_size);
+}
+
+// The implementation of our symbolization routine.  If it
+// successfully finds the symbol containing "pc" and obtains the
+// symbol name, returns true and write the symbol name to "out".
+// Otherwise, returns false. If Callback function is installed via
+// InstallSymbolizeCallback(), the function is also called in this function,
+// and "out" is used as its output.
+// To keep stack consumption low, we would like this function to not
+// get inlined.
+static ATTRIBUTE_NOINLINE bool SymbolizeAndDemangle(void *pc, char *out,
+                                                    int out_size) {
+  uint64_t pc0 = reinterpret_cast<uintptr_t>(pc);
+  uint64_t start_address = 0;
+  uint64_t base_address = 0;
+  int object_fd = -1;
+
+  if (out_size < 1) {
+    return false;
+  }
+  out[0] = '\0';
+  SafeAppendString("(", out, out_size);
+
+  if (g_symbolize_open_object_file_callback) {
+    object_fd = g_symbolize_open_object_file_callback(pc0, start_address,
+                                                      base_address, out + 1,
+                                                      out_size - 1);
+  } else {
+    object_fd = OpenObjectFileContainingPcAndGetStartAddress(pc0, start_address,
+                                                             base_address,
+                                                             out + 1,
+                                                             out_size - 1);
+  }
+
+  // Check whether a file name was returned.
+#if !defined(PRINT_UNSYMBOLIZED_STACK_TRACES)
+  if (object_fd < 0) {
+#endif
+    if (out[1]) {
+      // The object file containing PC was determined successfully however the
+      // object file was not opened successfully.  This is still considered
+      // success because the object file name and offset are known and tools
+      // like asan_symbolize.py can be used for the symbolization.
+      out[out_size - 1] = '\0';  // Making sure |out| is always null-terminated.
+      SafeAppendString("+0x", out, out_size);
+      SafeAppendHexNumber(pc0 - base_address, out, out_size);
+      SafeAppendString(")", out, out_size);
+      return true;
+    }
+    // Failed to determine the object file containing PC.  Bail out.
+    return false;
+#if !defined(PRINT_UNSYMBOLIZED_STACK_TRACES)
+  }
+#endif
+  FileDescriptor wrapped_object_fd(object_fd);
+  int elf_type = FileGetElfType(wrapped_object_fd.get());
+  if (elf_type == -1) {
+    return false;
+  }
+  if (g_symbolize_callback) {
+    // Run the call back if it's installed.
+    // Note: relocation (and much of the rest of this code) will be
+    // wrong for prelinked shared libraries and PIE executables.
+    uint64_t relocation = (elf_type == ET_DYN) ? start_address : 0;
+    int num_bytes_written = g_symbolize_callback(wrapped_object_fd.get(),
+                                                 pc, out, out_size,
+                                                 relocation);
+    if (num_bytes_written > 0) {
+      out += num_bytes_written;
+      out_size -= num_bytes_written;
+    }
+  }
+  if (!GetSymbolFromObjectFile(wrapped_object_fd.get(), pc0,
+                               out, out_size, base_address)) {
+    return false;
+  }
+
+  // Symbolization succeeded.  Now we try to demangle the symbol.
+  DemangleInplace(out, out_size);
+  return true;
+}
+
+_END_GOOGLE_NAMESPACE_
+
+#elif defined(OS_MACOSX) && defined(HAVE_DLADDR)
+
+#include <dlfcn.h>
+#include <string.h>
+
+_START_GOOGLE_NAMESPACE_
+
+static ATTRIBUTE_NOINLINE bool SymbolizeAndDemangle(void *pc, char *out,
+                                                    int out_size) {
+  Dl_info info;
+  if (dladdr(pc, &info)) {
+    if ((int)strlen(info.dli_sname) < out_size) {
+      strcpy(out, info.dli_sname);
+      // Symbolization succeeded.  Now we try to demangle the symbol.
+      DemangleInplace(out, out_size);
+      return true;
+    }
+  }
+  return false;
+}
+
+_END_GOOGLE_NAMESPACE_
+
+#else
+# error BUG: HAVE_SYMBOLIZE was wrongly set
+#endif
+
+_START_GOOGLE_NAMESPACE_
+
+bool Symbolize(void *pc, char *out, int out_size) {
+  SAFE_ASSERT(out_size >= 0);
+  return SymbolizeAndDemangle(pc, out, out_size);
+}
+
+_END_GOOGLE_NAMESPACE_
+
+#else  /* HAVE_SYMBOLIZE */
+
+#include <assert.h>
+
+#include "config.h"
+
+_START_GOOGLE_NAMESPACE_
+
+// TODO: Support other environments.
+bool Symbolize(void *pc, char *out, int out_size) {
+  assert(0);
+  return false;
+}
+
+_END_GOOGLE_NAMESPACE_
+
+#endif
diff --git a/base/third_party/symbolize/symbolize.h b/base/third_party/symbolize/symbolize.h
new file mode 100644
index 0000000..aeb2fe3
--- /dev/null
+++ b/base/third_party/symbolize/symbolize.h
@@ -0,0 +1,155 @@
+// Copyright (c) 2006, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: Satoru Takabayashi
+//
+// This library provides Symbolize() function that symbolizes program
+// counters to their corresponding symbol names on linux platforms.
+// This library has a minimal implementation of an ELF symbol table
+// reader (i.e. it doesn't depend on libelf, etc.).
+//
+// The algorithm used in Symbolize() is as follows.
+//
+//   1. Go through a list of maps in /proc/self/maps and find the map
+//   containing the program counter.
+//
+//   2. Open the mapped file and find a regular symbol table inside.
+//   Iterate over symbols in the symbol table and look for the symbol
+//   containing the program counter.  If such a symbol is found,
+//   obtain the symbol name, and demangle the symbol if possible.
+//   If the symbol isn't found in the regular symbol table (binary is
+//   stripped), try the same thing with a dynamic symbol table.
+//
+// Note that Symbolize() is originally implemented to be used in
+// FailureSignalHandler() in base/google.cc.  Hence it doesn't use
+// malloc() and other unsafe operations.  It should be both
+// thread-safe and async-signal-safe.
+
+#ifndef BASE_SYMBOLIZE_H_
+#define BASE_SYMBOLIZE_H_
+
+#include "utilities.h"
+#include "config.h"
+#include "glog/logging.h"
+
+#ifdef HAVE_SYMBOLIZE
+
+#if defined(__ELF__)  // defined by gcc
+#if defined(__OpenBSD__)
+#include <sys/exec_elf.h>
+#else
+#include <elf.h>
+#endif
+
+#if !defined(ANDROID)
+#include <link.h>  // For ElfW() macro.
+#endif
+
+// For systems where SIZEOF_VOID_P is not defined, determine it
+// based on __LP64__ (defined by gcc on 64-bit systems)
+#if !defined(SIZEOF_VOID_P)
+# if defined(__LP64__)
+#  define SIZEOF_VOID_P 8
+# else
+#  define SIZEOF_VOID_P 4
+# endif
+#endif
+
+// If there is no ElfW macro, let's define it by ourself.
+#ifndef ElfW
+# if SIZEOF_VOID_P == 4
+#  define ElfW(type) Elf32_##type
+# elif SIZEOF_VOID_P == 8
+#  define ElfW(type) Elf64_##type
+# else
+#  error "Unknown sizeof(void *)"
+# endif
+#endif
+
+_START_GOOGLE_NAMESPACE_
+
+// Gets the section header for the given name, if it exists. Returns true on
+// success. Otherwise, returns false.
+bool GetSectionHeaderByName(int fd, const char *name, size_t name_len,
+                            ElfW(Shdr) *out);
+
+_END_GOOGLE_NAMESPACE_
+
+#endif  /* __ELF__ */
+
+_START_GOOGLE_NAMESPACE_
+
+// Restrictions on the callbacks that follow:
+//  - The callbacks must not use heaps but only use stacks.
+//  - The callbacks must be async-signal-safe.
+
+// Installs a callback function, which will be called right before a symbol name
+// is printed. The callback is intended to be used for showing a file name and a
+// line number preceding a symbol name.
+// "fd" is a file descriptor of the object file containing the program
+// counter "pc". The callback function should write output to "out"
+// and return the size of the output written. On error, the callback
+// function should return -1.
+typedef int (*SymbolizeCallback)(int fd, void *pc, char *out, size_t out_size,
+                                 uint64_t relocation);
+void InstallSymbolizeCallback(SymbolizeCallback callback);
+
+// Installs a callback function, which will be called instead of
+// OpenObjectFileContainingPcAndGetStartAddress.  The callback is expected
+// to searches for the object file (from /proc/self/maps) that contains
+// the specified pc.  If found, sets |start_address| to the start address
+// of where this object file is mapped in memory, sets the module base
+// address into |base_address|, copies the object file name into
+// |out_file_name|, and attempts to open the object file.  If the object
+// file is opened successfully, returns the file descriptor.  Otherwise,
+// returns -1.  |out_file_name_size| is the size of the file name buffer
+// (including the null-terminator).
+typedef int (*SymbolizeOpenObjectFileCallback)(uint64_t pc,
+                                               uint64_t &start_address,
+                                               uint64_t &base_address,
+                                               char *out_file_name,
+                                               int out_file_name_size);
+void InstallSymbolizeOpenObjectFileCallback(
+    SymbolizeOpenObjectFileCallback callback);
+
+_END_GOOGLE_NAMESPACE_
+
+#endif
+
+_START_GOOGLE_NAMESPACE_
+
+// Symbolizes a program counter.  On success, returns true and write the
+// symbol name to "out".  The symbol name is demangled if possible
+// (supports symbols generated by GCC 3.x or newer).  Otherwise,
+// returns false.
+bool Symbolize(void *pc, char *out, int out_size);
+
+_END_GOOGLE_NAMESPACE_
+
+#endif  // BASE_SYMBOLIZE_H_
diff --git a/base/third_party/symbolize/utilities.h b/base/third_party/symbolize/utilities.h
new file mode 100644
index 0000000..65c5ba0
--- /dev/null
+++ b/base/third_party/symbolize/utilities.h
@@ -0,0 +1,11 @@
+// Copyright (c) 2010 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <inttypes.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+#define HAVE_SYMBOLIZE 1
+#define ATTRIBUTE_NOINLINE __attribute__ ((noinline))
diff --git a/base/third_party/valgrind/LICENSE b/base/third_party/valgrind/LICENSE
new file mode 100644
index 0000000..41f677b
--- /dev/null
+++ b/base/third_party/valgrind/LICENSE
@@ -0,0 +1,39 @@
+   Notice that the following BSD-style license applies to the Valgrind header
+   files used by Chromium (valgrind.h and memcheck.h). However, the rest of
+   Valgrind is licensed under the terms of the GNU General Public License,
+   version 2, unless otherwise indicated.
+
+   ----------------------------------------------------------------
+
+   Copyright (C) 2000-2008 Julian Seward.  All rights reserved.
+
+   Redistribution and use in source and binary forms, with or without
+   modification, are permitted provided that the following conditions
+   are met:
+
+   1. Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+
+   2. The origin of this software must not be misrepresented; you must 
+      not claim that you wrote the original software.  If you use this 
+      software in a product, an acknowledgment in the product 
+      documentation would be appreciated but is not required.
+
+   3. Altered source versions must be plainly marked as such, and must
+      not be misrepresented as being the original software.
+
+   4. The name of the author may not be used to endorse or promote 
+      products derived from this software without specific prior written 
+      permission.
+
+   THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
+   OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+   WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+   ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+   DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+   DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
+   GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+   WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+   NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+   SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/base/third_party/valgrind/README.chromium b/base/third_party/valgrind/README.chromium
new file mode 100644
index 0000000..56a1cbb
--- /dev/null
+++ b/base/third_party/valgrind/README.chromium
@@ -0,0 +1,11 @@
+Name: valgrind
+URL: http://valgrind.org
+License: BSD
+
+Header files in this directory define runtime macros that determine whether the
+current process is running under Valgrind and tell Memcheck tool about custom
+memory allocators.
+
+These header files were taken from Valgrind source code
+(svn://svn.valgrind.org/valgrind/trunk@11504, dated 21 Jan 2011). The files are
+covered under BSD license as described within.
diff --git a/base/third_party/valgrind/memcheck.h b/base/third_party/valgrind/memcheck.h
new file mode 100644
index 0000000..f59c212
--- /dev/null
+++ b/base/third_party/valgrind/memcheck.h
@@ -0,0 +1,279 @@
+
+/*
+   ----------------------------------------------------------------
+
+   Notice that the following BSD-style license applies to this one
+   file (memcheck.h) only.  The rest of Valgrind is licensed under the
+   terms of the GNU General Public License, version 2, unless
+   otherwise indicated.  See the COPYING file in the source
+   distribution for details.
+
+   ----------------------------------------------------------------
+
+   This file is part of MemCheck, a heavyweight Valgrind tool for
+   detecting memory errors.
+
+   Copyright (C) 2000-2010 Julian Seward.  All rights reserved.
+
+   Redistribution and use in source and binary forms, with or without
+   modification, are permitted provided that the following conditions
+   are met:
+
+   1. Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+
+   2. The origin of this software must not be misrepresented; you must 
+      not claim that you wrote the original software.  If you use this 
+      software in a product, an acknowledgment in the product 
+      documentation would be appreciated but is not required.
+
+   3. Altered source versions must be plainly marked as such, and must
+      not be misrepresented as being the original software.
+
+   4. The name of the author may not be used to endorse or promote 
+      products derived from this software without specific prior written 
+      permission.
+
+   THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
+   OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+   WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+   ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+   DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+   DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
+   GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+   WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+   NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+   SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+   ----------------------------------------------------------------
+
+   Notice that the above BSD-style license applies to this one file
+   (memcheck.h) only.  The entire rest of Valgrind is licensed under
+   the terms of the GNU General Public License, version 2.  See the
+   COPYING file in the source distribution for details.
+
+   ---------------------------------------------------------------- 
+*/
+
+
+#ifndef __MEMCHECK_H
+#define __MEMCHECK_H
+
+
+/* This file is for inclusion into client (your!) code.
+
+   You can use these macros to manipulate and query memory permissions
+   inside your own programs.
+
+   See comment near the top of valgrind.h on how to use them.
+*/
+
+#include "valgrind.h"
+
+/* !! ABIWARNING !! ABIWARNING !! ABIWARNING !! ABIWARNING !! 
+   This enum comprises an ABI exported by Valgrind to programs
+   which use client requests.  DO NOT CHANGE THE ORDER OF THESE
+   ENTRIES, NOR DELETE ANY -- add new ones at the end. */
+typedef
+   enum { 
+      VG_USERREQ__MAKE_MEM_NOACCESS = VG_USERREQ_TOOL_BASE('M','C'),
+      VG_USERREQ__MAKE_MEM_UNDEFINED,
+      VG_USERREQ__MAKE_MEM_DEFINED,
+      VG_USERREQ__DISCARD,
+      VG_USERREQ__CHECK_MEM_IS_ADDRESSABLE,
+      VG_USERREQ__CHECK_MEM_IS_DEFINED,
+      VG_USERREQ__DO_LEAK_CHECK,
+      VG_USERREQ__COUNT_LEAKS,
+
+      VG_USERREQ__GET_VBITS,
+      VG_USERREQ__SET_VBITS,
+
+      VG_USERREQ__CREATE_BLOCK,
+
+      VG_USERREQ__MAKE_MEM_DEFINED_IF_ADDRESSABLE,
+
+      /* Not next to VG_USERREQ__COUNT_LEAKS because it was added later. */
+      VG_USERREQ__COUNT_LEAK_BLOCKS,
+
+      /* This is just for memcheck's internal use - don't use it */
+      _VG_USERREQ__MEMCHECK_RECORD_OVERLAP_ERROR 
+         = VG_USERREQ_TOOL_BASE('M','C') + 256
+   } Vg_MemCheckClientRequest;
+
+
+
+/* Client-code macros to manipulate the state of memory. */
+
+/* Mark memory at _qzz_addr as unaddressable for _qzz_len bytes. */
+#define VALGRIND_MAKE_MEM_NOACCESS(_qzz_addr,_qzz_len)           \
+    VALGRIND_DO_CLIENT_REQUEST_EXPR(0 /* default return */,      \
+                            VG_USERREQ__MAKE_MEM_NOACCESS,       \
+                            (_qzz_addr), (_qzz_len), 0, 0, 0)
+      
+/* Similarly, mark memory at _qzz_addr as addressable but undefined
+   for _qzz_len bytes. */
+#define VALGRIND_MAKE_MEM_UNDEFINED(_qzz_addr,_qzz_len)          \
+    VALGRIND_DO_CLIENT_REQUEST_EXPR(0 /* default return */,      \
+                            VG_USERREQ__MAKE_MEM_UNDEFINED,      \
+                            (_qzz_addr), (_qzz_len), 0, 0, 0)
+
+/* Similarly, mark memory at _qzz_addr as addressable and defined
+   for _qzz_len bytes. */
+#define VALGRIND_MAKE_MEM_DEFINED(_qzz_addr,_qzz_len)            \
+    VALGRIND_DO_CLIENT_REQUEST_EXPR(0 /* default return */,      \
+                            VG_USERREQ__MAKE_MEM_DEFINED,        \
+                            (_qzz_addr), (_qzz_len), 0, 0, 0)
+
+/* Similar to VALGRIND_MAKE_MEM_DEFINED except that addressability is
+   not altered: bytes which are addressable are marked as defined,
+   but those which are not addressable are left unchanged. */
+#define VALGRIND_MAKE_MEM_DEFINED_IF_ADDRESSABLE(_qzz_addr,_qzz_len)     \
+    VALGRIND_DO_CLIENT_REQUEST_EXPR(0 /* default return */,              \
+                            VG_USERREQ__MAKE_MEM_DEFINED_IF_ADDRESSABLE, \
+                            (_qzz_addr), (_qzz_len), 0, 0, 0)
+
+/* Create a block-description handle.  The description is an ascii
+   string which is included in any messages pertaining to addresses
+   within the specified memory range.  Has no other effect on the
+   properties of the memory range. */
+#define VALGRIND_CREATE_BLOCK(_qzz_addr,_qzz_len, _qzz_desc)	   \
+    VALGRIND_DO_CLIENT_REQUEST_EXPR(0 /* default return */,        \
+                            VG_USERREQ__CREATE_BLOCK,              \
+                            (_qzz_addr), (_qzz_len), (_qzz_desc),  \
+                            0, 0)
+
+/* Discard a block-description-handle. Returns 1 for an
+   invalid handle, 0 for a valid handle. */
+#define VALGRIND_DISCARD(_qzz_blkindex)                          \
+    VALGRIND_DO_CLIENT_REQUEST_EXPR(0 /* default return */,      \
+                            VG_USERREQ__DISCARD,                 \
+                            0, (_qzz_blkindex), 0, 0, 0)
+
+
+/* Client-code macros to check the state of memory. */
+
+/* Check that memory at _qzz_addr is addressable for _qzz_len bytes.
+   If suitable addressibility is not established, Valgrind prints an
+   error message and returns the address of the first offending byte.
+   Otherwise it returns zero. */
+#define VALGRIND_CHECK_MEM_IS_ADDRESSABLE(_qzz_addr,_qzz_len)      \
+    VALGRIND_DO_CLIENT_REQUEST_EXPR(0,                             \
+                            VG_USERREQ__CHECK_MEM_IS_ADDRESSABLE,  \
+                            (_qzz_addr), (_qzz_len), 0, 0, 0)
+
+/* Check that memory at _qzz_addr is addressable and defined for
+   _qzz_len bytes.  If suitable addressibility and definedness are not
+   established, Valgrind prints an error message and returns the
+   address of the first offending byte.  Otherwise it returns zero. */
+#define VALGRIND_CHECK_MEM_IS_DEFINED(_qzz_addr,_qzz_len)        \
+    VALGRIND_DO_CLIENT_REQUEST_EXPR(0,                           \
+                            VG_USERREQ__CHECK_MEM_IS_DEFINED,    \
+                            (_qzz_addr), (_qzz_len), 0, 0, 0)
+
+/* Use this macro to force the definedness and addressibility of an
+   lvalue to be checked.  If suitable addressibility and definedness
+   are not established, Valgrind prints an error message and returns
+   the address of the first offending byte.  Otherwise it returns
+   zero. */
+#define VALGRIND_CHECK_VALUE_IS_DEFINED(__lvalue)                \
+   VALGRIND_CHECK_MEM_IS_DEFINED(                                \
+      (volatile unsigned char *)&(__lvalue),                     \
+                      (unsigned long)(sizeof (__lvalue)))
+
+
+/* Do a full memory leak check (like --leak-check=full) mid-execution. */
+#define VALGRIND_DO_LEAK_CHECK                                   \
+   {unsigned long _qzz_res;                                      \
+    VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0,                      \
+                            VG_USERREQ__DO_LEAK_CHECK,           \
+                            0, 0, 0, 0, 0);                      \
+   }
+
+/* Do a summary memory leak check (like --leak-check=summary) mid-execution. */
+#define VALGRIND_DO_QUICK_LEAK_CHECK				 \
+   {unsigned long _qzz_res;                                      \
+    VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0,                      \
+                            VG_USERREQ__DO_LEAK_CHECK,           \
+                            1, 0, 0, 0, 0);                      \
+   }
+
+/* Return number of leaked, dubious, reachable and suppressed bytes found by
+   all previous leak checks.  They must be lvalues.  */
+#define VALGRIND_COUNT_LEAKS(leaked, dubious, reachable, suppressed)     \
+   /* For safety on 64-bit platforms we assign the results to private
+      unsigned long variables, then assign these to the lvalues the user
+      specified, which works no matter what type 'leaked', 'dubious', etc
+      are.  We also initialise '_qzz_leaked', etc because
+      VG_USERREQ__COUNT_LEAKS doesn't mark the values returned as
+      defined. */                                                        \
+   {unsigned long _qzz_res;                                              \
+    unsigned long _qzz_leaked    = 0, _qzz_dubious    = 0;               \
+    unsigned long _qzz_reachable = 0, _qzz_suppressed = 0;               \
+    VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0,                              \
+                               VG_USERREQ__COUNT_LEAKS,                  \
+                               &_qzz_leaked, &_qzz_dubious,              \
+                               &_qzz_reachable, &_qzz_suppressed, 0);    \
+    leaked     = _qzz_leaked;                                            \
+    dubious    = _qzz_dubious;                                           \
+    reachable  = _qzz_reachable;                                         \
+    suppressed = _qzz_suppressed;                                        \
+   }
+
+/* Return number of leaked, dubious, reachable and suppressed bytes found by
+   all previous leak checks.  They must be lvalues.  */
+#define VALGRIND_COUNT_LEAK_BLOCKS(leaked, dubious, reachable, suppressed) \
+   /* For safety on 64-bit platforms we assign the results to private
+      unsigned long variables, then assign these to the lvalues the user
+      specified, which works no matter what type 'leaked', 'dubious', etc
+      are.  We also initialise '_qzz_leaked', etc because
+      VG_USERREQ__COUNT_LEAKS doesn't mark the values returned as
+      defined. */                                                        \
+   {unsigned long _qzz_res;                                              \
+    unsigned long _qzz_leaked    = 0, _qzz_dubious    = 0;               \
+    unsigned long _qzz_reachable = 0, _qzz_suppressed = 0;               \
+    VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0,                              \
+                               VG_USERREQ__COUNT_LEAK_BLOCKS,            \
+                               &_qzz_leaked, &_qzz_dubious,              \
+                               &_qzz_reachable, &_qzz_suppressed, 0);    \
+    leaked     = _qzz_leaked;                                            \
+    dubious    = _qzz_dubious;                                           \
+    reachable  = _qzz_reachable;                                         \
+    suppressed = _qzz_suppressed;                                        \
+   }
+
+
+/* Get the validity data for addresses [zza..zza+zznbytes-1] and copy it
+   into the provided zzvbits array.  Return values:
+      0   if not running on valgrind
+      1   success
+      2   [previously indicated unaligned arrays;  these are now allowed]
+      3   if any parts of zzsrc/zzvbits are not addressable.
+   The metadata is not copied in cases 0, 2 or 3 so it should be
+   impossible to segfault your system by using this call.
+*/
+#define VALGRIND_GET_VBITS(zza,zzvbits,zznbytes)                \
+    VALGRIND_DO_CLIENT_REQUEST_EXPR(0,                          \
+                                    VG_USERREQ__GET_VBITS,      \
+                                    (const char*)(zza),         \
+                                    (char*)(zzvbits),           \
+                                    (zznbytes), 0, 0)
+
+/* Set the validity data for addresses [zza..zza+zznbytes-1], copying it
+   from the provided zzvbits array.  Return values:
+      0   if not running on valgrind
+      1   success
+      2   [previously indicated unaligned arrays;  these are now allowed]
+      3   if any parts of zza/zzvbits are not addressable.
+   The metadata is not copied in cases 0, 2 or 3 so it should be
+   impossible to segfault your system by using this call.
+*/
+#define VALGRIND_SET_VBITS(zza,zzvbits,zznbytes)                \
+    VALGRIND_DO_CLIENT_REQUEST_EXPR(0,                          \
+                                    VG_USERREQ__SET_VBITS,      \
+                                    (const char*)(zza),         \
+                                    (const char*)(zzvbits),     \
+                                    (zznbytes), 0, 0 )
+
+#endif
+
diff --git a/base/third_party/valgrind/valgrind.h b/base/third_party/valgrind/valgrind.h
new file mode 100644
index 0000000..0bae0aa
--- /dev/null
+++ b/base/third_party/valgrind/valgrind.h
@@ -0,0 +1,4792 @@
+/* -*- c -*-
+   ----------------------------------------------------------------
+
+   Notice that the following BSD-style license applies to this one
+   file (valgrind.h) only.  The rest of Valgrind is licensed under the
+   terms of the GNU General Public License, version 2, unless
+   otherwise indicated.  See the COPYING file in the source
+   distribution for details.
+
+   ----------------------------------------------------------------
+
+   This file is part of Valgrind, a dynamic binary instrumentation
+   framework.
+
+   Copyright (C) 2000-2010 Julian Seward.  All rights reserved.
+
+   Redistribution and use in source and binary forms, with or without
+   modification, are permitted provided that the following conditions
+   are met:
+
+   1. Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+
+   2. The origin of this software must not be misrepresented; you must 
+      not claim that you wrote the original software.  If you use this 
+      software in a product, an acknowledgment in the product 
+      documentation would be appreciated but is not required.
+
+   3. Altered source versions must be plainly marked as such, and must
+      not be misrepresented as being the original software.
+
+   4. The name of the author may not be used to endorse or promote 
+      products derived from this software without specific prior written 
+      permission.
+
+   THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
+   OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+   WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+   ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+   DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+   DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
+   GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+   WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+   NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+   SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+   ----------------------------------------------------------------
+
+   Notice that the above BSD-style license applies to this one file
+   (valgrind.h) only.  The entire rest of Valgrind is licensed under
+   the terms of the GNU General Public License, version 2.  See the
+   COPYING file in the source distribution for details.
+
+   ---------------------------------------------------------------- 
+*/
+
+
+/* This file is for inclusion into client (your!) code.
+
+   You can use these macros to manipulate and query Valgrind's 
+   execution inside your own programs.
+
+   The resulting executables will still run without Valgrind, just a
+   little bit more slowly than they otherwise would, but otherwise
+   unchanged.  When not running on valgrind, each client request
+   consumes very few (eg. 7) instructions, so the resulting performance
+   loss is negligible unless you plan to execute client requests
+   millions of times per second.  Nevertheless, if that is still a
+   problem, you can compile with the NVALGRIND symbol defined (gcc
+   -DNVALGRIND) so that client requests are not even compiled in.  */
+
+#ifndef __VALGRIND_H
+#define __VALGRIND_H
+
+
+/* ------------------------------------------------------------------ */
+/* VERSION NUMBER OF VALGRIND                                         */
+/* ------------------------------------------------------------------ */
+
+/* Specify Valgrind's version number, so that user code can
+   conditionally compile based on our version number.  Note that these
+   were introduced at version 3.6 and so do not exist in version 3.5
+   or earlier.  The recommended way to use them to check for "version
+   X.Y or later" is (eg)
+
+#if defined(__VALGRIND_MAJOR__) && defined(__VALGRIND_MINOR__)   \
+    && (__VALGRIND_MAJOR__ > 3                                   \
+        || (__VALGRIND_MAJOR__ == 3 && __VALGRIND_MINOR__ >= 6))
+*/
+#define __VALGRIND_MAJOR__    3
+#define __VALGRIND_MINOR__    6
+
+
+#include <stdarg.h>
+
+/* Nb: this file might be included in a file compiled with -ansi.  So
+   we can't use C++ style "//" comments nor the "asm" keyword (instead
+   use "__asm__"). */
+
+/* Derive some tags indicating what the target platform is.  Note
+   that in this file we're using the compiler's CPP symbols for
+   identifying architectures, which are different to the ones we use
+   within the rest of Valgrind.  Note, __powerpc__ is active for both
+   32 and 64-bit PPC, whereas __powerpc64__ is only active for the
+   latter (on Linux, that is).
+
+   Misc note: how to find out what's predefined in gcc by default:
+   gcc -Wp,-dM somefile.c
+*/
+#undef PLAT_ppc64_aix5
+#undef PLAT_ppc32_aix5
+#undef PLAT_x86_darwin
+#undef PLAT_amd64_darwin
+#undef PLAT_x86_win32
+#undef PLAT_x86_linux
+#undef PLAT_amd64_linux
+#undef PLAT_ppc32_linux
+#undef PLAT_ppc64_linux
+#undef PLAT_arm_linux
+
+#if defined(_AIX) && defined(__64BIT__)
+#  define PLAT_ppc64_aix5 1
+#elif defined(_AIX) && !defined(__64BIT__)
+#  define PLAT_ppc32_aix5 1
+#elif defined(__APPLE__) && defined(__i386__)
+#  define PLAT_x86_darwin 1
+#elif defined(__APPLE__) && defined(__x86_64__)
+#  define PLAT_amd64_darwin 1
+#elif defined(__MINGW32__) || defined(__CYGWIN32__) || defined(_WIN32) && defined(_M_IX86)
+#  define PLAT_x86_win32 1
+#elif defined(__linux__) && defined(__i386__)
+#  define PLAT_x86_linux 1
+#elif defined(__linux__) && defined(__x86_64__)
+#  define PLAT_amd64_linux 1
+#elif defined(__linux__) && defined(__powerpc__) && !defined(__powerpc64__)
+#  define PLAT_ppc32_linux 1
+#elif defined(__linux__) && defined(__powerpc__) && defined(__powerpc64__)
+#  define PLAT_ppc64_linux 1
+#elif defined(__linux__) && defined(__arm__)
+#  define PLAT_arm_linux 1
+#else
+/* If we're not compiling for our target platform, don't generate
+   any inline asms.  */
+#  if !defined(NVALGRIND)
+#    define NVALGRIND 1
+#  endif
+#endif
+
+
+/* ------------------------------------------------------------------ */
+/* ARCHITECTURE SPECIFICS for SPECIAL INSTRUCTIONS.  There is nothing */
+/* in here of use to end-users -- skip to the next section.           */
+/* ------------------------------------------------------------------ */
+
+#if defined(NVALGRIND)
+
+/* Define NVALGRIND to completely remove the Valgrind magic sequence
+   from the compiled code (analogous to NDEBUG's effects on
+   assert()) */
+#define VALGRIND_DO_CLIENT_REQUEST(                               \
+        _zzq_rlval, _zzq_default, _zzq_request,                   \
+        _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5)    \
+   {                                                              \
+      (_zzq_rlval) = (_zzq_default);                              \
+   }
+
+#else  /* ! NVALGRIND */
+
+/* The following defines the magic code sequences which the JITter
+   spots and handles magically.  Don't look too closely at them as
+   they will rot your brain.
+
+   The assembly code sequences for all architectures is in this one
+   file.  This is because this file must be stand-alone, and we don't
+   want to have multiple files.
+
+   For VALGRIND_DO_CLIENT_REQUEST, we must ensure that the default
+   value gets put in the return slot, so that everything works when
+   this is executed not under Valgrind.  Args are passed in a memory
+   block, and so there's no intrinsic limit to the number that could
+   be passed, but it's currently five.
+   
+   The macro args are: 
+      _zzq_rlval    result lvalue
+      _zzq_default  default value (result returned when running on real CPU)
+      _zzq_request  request code
+      _zzq_arg1..5  request params
+
+   The other two macros are used to support function wrapping, and are
+   a lot simpler.  VALGRIND_GET_NR_CONTEXT returns the value of the
+   guest's NRADDR pseudo-register and whatever other information is
+   needed to safely run the call original from the wrapper: on
+   ppc64-linux, the R2 value at the divert point is also needed.  This
+   information is abstracted into a user-visible type, OrigFn.
+
+   VALGRIND_CALL_NOREDIR_* behaves the same as the following on the
+   guest, but guarantees that the branch instruction will not be
+   redirected: x86: call *%eax, amd64: call *%rax, ppc32/ppc64:
+   branch-and-link-to-r11.  VALGRIND_CALL_NOREDIR is just text, not a
+   complete inline asm, since it needs to be combined with more magic
+   inline asm stuff to be useful.
+*/
+
+/* ------------------------- x86-{linux,darwin} ---------------- */
+
+#if defined(PLAT_x86_linux)  ||  defined(PLAT_x86_darwin)  \
+    ||  (defined(PLAT_x86_win32) && defined(__GNUC__))
+
+typedef
+   struct { 
+      unsigned int nraddr; /* where's the code? */
+   }
+   OrigFn;
+
+#define __SPECIAL_INSTRUCTION_PREAMBLE                            \
+                     "roll $3,  %%edi ; roll $13, %%edi\n\t"      \
+                     "roll $29, %%edi ; roll $19, %%edi\n\t"
+
+#define VALGRIND_DO_CLIENT_REQUEST(                               \
+        _zzq_rlval, _zzq_default, _zzq_request,                   \
+        _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5)    \
+  { volatile unsigned int _zzq_args[6];                           \
+    volatile unsigned int _zzq_result;                            \
+    _zzq_args[0] = (unsigned int)(_zzq_request);                  \
+    _zzq_args[1] = (unsigned int)(_zzq_arg1);                     \
+    _zzq_args[2] = (unsigned int)(_zzq_arg2);                     \
+    _zzq_args[3] = (unsigned int)(_zzq_arg3);                     \
+    _zzq_args[4] = (unsigned int)(_zzq_arg4);                     \
+    _zzq_args[5] = (unsigned int)(_zzq_arg5);                     \
+    __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE               \
+                     /* %EDX = client_request ( %EAX ) */         \
+                     "xchgl %%ebx,%%ebx"                          \
+                     : "=d" (_zzq_result)                         \
+                     : "a" (&_zzq_args[0]), "0" (_zzq_default)    \
+                     : "cc", "memory"                             \
+                    );                                            \
+    _zzq_rlval = _zzq_result;                                     \
+  }
+
+#define VALGRIND_GET_NR_CONTEXT(_zzq_rlval)                       \
+  { volatile OrigFn* _zzq_orig = &(_zzq_rlval);                   \
+    volatile unsigned int __addr;                                 \
+    __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE               \
+                     /* %EAX = guest_NRADDR */                    \
+                     "xchgl %%ecx,%%ecx"                          \
+                     : "=a" (__addr)                              \
+                     :                                            \
+                     : "cc", "memory"                             \
+                    );                                            \
+    _zzq_orig->nraddr = __addr;                                   \
+  }
+
+#define VALGRIND_CALL_NOREDIR_EAX                                 \
+                     __SPECIAL_INSTRUCTION_PREAMBLE               \
+                     /* call-noredir *%EAX */                     \
+                     "xchgl %%edx,%%edx\n\t"
+#endif /* PLAT_x86_linux || PLAT_x86_darwin || (PLAT_x86_win32 && __GNUC__) */
+
+/* ------------------------- x86-Win32 ------------------------- */
+
+#if defined(PLAT_x86_win32) && !defined(__GNUC__)
+
+typedef
+   struct { 
+      unsigned int nraddr; /* where's the code? */
+   }
+   OrigFn;
+
+#if defined(_MSC_VER)
+
+#define __SPECIAL_INSTRUCTION_PREAMBLE                            \
+                     __asm rol edi, 3  __asm rol edi, 13          \
+                     __asm rol edi, 29 __asm rol edi, 19
+
+#define VALGRIND_DO_CLIENT_REQUEST(                               \
+        _zzq_rlval, _zzq_default, _zzq_request,                   \
+        _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5)    \
+  { volatile uintptr_t _zzq_args[6];                              \
+    volatile unsigned int _zzq_result;                            \
+    _zzq_args[0] = (uintptr_t)(_zzq_request);                     \
+    _zzq_args[1] = (uintptr_t)(_zzq_arg1);                        \
+    _zzq_args[2] = (uintptr_t)(_zzq_arg2);                        \
+    _zzq_args[3] = (uintptr_t)(_zzq_arg3);                        \
+    _zzq_args[4] = (uintptr_t)(_zzq_arg4);                        \
+    _zzq_args[5] = (uintptr_t)(_zzq_arg5);                        \
+    __asm { __asm lea eax, _zzq_args __asm mov edx, _zzq_default  \
+            __SPECIAL_INSTRUCTION_PREAMBLE                        \
+            /* %EDX = client_request ( %EAX ) */                  \
+            __asm xchg ebx,ebx                                    \
+            __asm mov _zzq_result, edx                            \
+    }                                                             \
+    _zzq_rlval = _zzq_result;                                     \
+  }
+
+#define VALGRIND_GET_NR_CONTEXT(_zzq_rlval)                       \
+  { volatile OrigFn* _zzq_orig = &(_zzq_rlval);                   \
+    volatile unsigned int __addr;                                 \
+    __asm { __SPECIAL_INSTRUCTION_PREAMBLE                        \
+            /* %EAX = guest_NRADDR */                             \
+            __asm xchg ecx,ecx                                    \
+            __asm mov __addr, eax                                 \
+    }                                                             \
+    _zzq_orig->nraddr = __addr;                                   \
+  }
+
+#define VALGRIND_CALL_NOREDIR_EAX ERROR
+
+#else
+#error Unsupported compiler.
+#endif
+
+#endif /* PLAT_x86_win32 */
+
+/* ------------------------ amd64-{linux,darwin} --------------- */
+
+#if defined(PLAT_amd64_linux)  ||  defined(PLAT_amd64_darwin)
+
+typedef
+   struct { 
+      unsigned long long int nraddr; /* where's the code? */
+   }
+   OrigFn;
+
+#define __SPECIAL_INSTRUCTION_PREAMBLE                            \
+                     "rolq $3,  %%rdi ; rolq $13, %%rdi\n\t"      \
+                     "rolq $61, %%rdi ; rolq $51, %%rdi\n\t"
+
+#define VALGRIND_DO_CLIENT_REQUEST(                               \
+        _zzq_rlval, _zzq_default, _zzq_request,                   \
+        _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5)    \
+  { volatile unsigned long long int _zzq_args[6];                 \
+    volatile unsigned long long int _zzq_result;                  \
+    _zzq_args[0] = (unsigned long long int)(_zzq_request);        \
+    _zzq_args[1] = (unsigned long long int)(_zzq_arg1);           \
+    _zzq_args[2] = (unsigned long long int)(_zzq_arg2);           \
+    _zzq_args[3] = (unsigned long long int)(_zzq_arg3);           \
+    _zzq_args[4] = (unsigned long long int)(_zzq_arg4);           \
+    _zzq_args[5] = (unsigned long long int)(_zzq_arg5);           \
+    __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE               \
+                     /* %RDX = client_request ( %RAX ) */         \
+                     "xchgq %%rbx,%%rbx"                          \
+                     : "=d" (_zzq_result)                         \
+                     : "a" (&_zzq_args[0]), "0" (_zzq_default)    \
+                     : "cc", "memory"                             \
+                    );                                            \
+    _zzq_rlval = _zzq_result;                                     \
+  }
+
+#define VALGRIND_GET_NR_CONTEXT(_zzq_rlval)                       \
+  { volatile OrigFn* _zzq_orig = &(_zzq_rlval);                   \
+    volatile unsigned long long int __addr;                       \
+    __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE               \
+                     /* %RAX = guest_NRADDR */                    \
+                     "xchgq %%rcx,%%rcx"                          \
+                     : "=a" (__addr)                              \
+                     :                                            \
+                     : "cc", "memory"                             \
+                    );                                            \
+    _zzq_orig->nraddr = __addr;                                   \
+  }
+
+#define VALGRIND_CALL_NOREDIR_RAX                                 \
+                     __SPECIAL_INSTRUCTION_PREAMBLE               \
+                     /* call-noredir *%RAX */                     \
+                     "xchgq %%rdx,%%rdx\n\t"
+#endif /* PLAT_amd64_linux || PLAT_amd64_darwin */
+
+/* ------------------------ ppc32-linux ------------------------ */
+
+#if defined(PLAT_ppc32_linux)
+
+typedef
+   struct { 
+      unsigned int nraddr; /* where's the code? */
+   }
+   OrigFn;
+
+#define __SPECIAL_INSTRUCTION_PREAMBLE                            \
+                     "rlwinm 0,0,3,0,0  ; rlwinm 0,0,13,0,0\n\t"  \
+                     "rlwinm 0,0,29,0,0 ; rlwinm 0,0,19,0,0\n\t"
+
+#define VALGRIND_DO_CLIENT_REQUEST(                               \
+        _zzq_rlval, _zzq_default, _zzq_request,                   \
+        _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5)    \
+                                                                  \
+  {          unsigned int  _zzq_args[6];                          \
+             unsigned int  _zzq_result;                           \
+             unsigned int* _zzq_ptr;                              \
+    _zzq_args[0] = (unsigned int)(_zzq_request);                  \
+    _zzq_args[1] = (unsigned int)(_zzq_arg1);                     \
+    _zzq_args[2] = (unsigned int)(_zzq_arg2);                     \
+    _zzq_args[3] = (unsigned int)(_zzq_arg3);                     \
+    _zzq_args[4] = (unsigned int)(_zzq_arg4);                     \
+    _zzq_args[5] = (unsigned int)(_zzq_arg5);                     \
+    _zzq_ptr = _zzq_args;                                         \
+    __asm__ volatile("mr 3,%1\n\t" /*default*/                    \
+                     "mr 4,%2\n\t" /*ptr*/                        \
+                     __SPECIAL_INSTRUCTION_PREAMBLE               \
+                     /* %R3 = client_request ( %R4 ) */           \
+                     "or 1,1,1\n\t"                               \
+                     "mr %0,3"     /*result*/                     \
+                     : "=b" (_zzq_result)                         \
+                     : "b" (_zzq_default), "b" (_zzq_ptr)         \
+                     : "cc", "memory", "r3", "r4");               \
+    _zzq_rlval = _zzq_result;                                     \
+  }
+
+#define VALGRIND_GET_NR_CONTEXT(_zzq_rlval)                       \
+  { volatile OrigFn* _zzq_orig = &(_zzq_rlval);                   \
+    unsigned int __addr;                                          \
+    __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE               \
+                     /* %R3 = guest_NRADDR */                     \
+                     "or 2,2,2\n\t"                               \
+                     "mr %0,3"                                    \
+                     : "=b" (__addr)                              \
+                     :                                            \
+                     : "cc", "memory", "r3"                       \
+                    );                                            \
+    _zzq_orig->nraddr = __addr;                                   \
+  }
+
+#define VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11                   \
+                     __SPECIAL_INSTRUCTION_PREAMBLE               \
+                     /* branch-and-link-to-noredir *%R11 */       \
+                     "or 3,3,3\n\t"
+#endif /* PLAT_ppc32_linux */
+
+/* ------------------------ ppc64-linux ------------------------ */
+
+#if defined(PLAT_ppc64_linux)
+
+typedef
+   struct { 
+      unsigned long long int nraddr; /* where's the code? */
+      unsigned long long int r2;  /* what tocptr do we need? */
+   }
+   OrigFn;
+
+#define __SPECIAL_INSTRUCTION_PREAMBLE                            \
+                     "rotldi 0,0,3  ; rotldi 0,0,13\n\t"          \
+                     "rotldi 0,0,61 ; rotldi 0,0,51\n\t"
+
+#define VALGRIND_DO_CLIENT_REQUEST(                               \
+        _zzq_rlval, _zzq_default, _zzq_request,                   \
+        _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5)    \
+                                                                  \
+  {          unsigned long long int  _zzq_args[6];                \
+    register unsigned long long int  _zzq_result __asm__("r3");   \
+    register unsigned long long int* _zzq_ptr __asm__("r4");      \
+    _zzq_args[0] = (unsigned long long int)(_zzq_request);        \
+    _zzq_args[1] = (unsigned long long int)(_zzq_arg1);           \
+    _zzq_args[2] = (unsigned long long int)(_zzq_arg2);           \
+    _zzq_args[3] = (unsigned long long int)(_zzq_arg3);           \
+    _zzq_args[4] = (unsigned long long int)(_zzq_arg4);           \
+    _zzq_args[5] = (unsigned long long int)(_zzq_arg5);           \
+    _zzq_ptr = _zzq_args;                                         \
+    __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE               \
+                     /* %R3 = client_request ( %R4 ) */           \
+                     "or 1,1,1"                                   \
+                     : "=r" (_zzq_result)                         \
+                     : "0" (_zzq_default), "r" (_zzq_ptr)         \
+                     : "cc", "memory");                           \
+    _zzq_rlval = _zzq_result;                                     \
+  }
+
+#define VALGRIND_GET_NR_CONTEXT(_zzq_rlval)                       \
+  { volatile OrigFn* _zzq_orig = &(_zzq_rlval);                   \
+    register unsigned long long int __addr __asm__("r3");         \
+    __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE               \
+                     /* %R3 = guest_NRADDR */                     \
+                     "or 2,2,2"                                   \
+                     : "=r" (__addr)                              \
+                     :                                            \
+                     : "cc", "memory"                             \
+                    );                                            \
+    _zzq_orig->nraddr = __addr;                                   \
+    __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE               \
+                     /* %R3 = guest_NRADDR_GPR2 */                \
+                     "or 4,4,4"                                   \
+                     : "=r" (__addr)                              \
+                     :                                            \
+                     : "cc", "memory"                             \
+                    );                                            \
+    _zzq_orig->r2 = __addr;                                       \
+  }
+
+#define VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11                   \
+                     __SPECIAL_INSTRUCTION_PREAMBLE               \
+                     /* branch-and-link-to-noredir *%R11 */       \
+                     "or 3,3,3\n\t"
+
+#endif /* PLAT_ppc64_linux */
+
+/* ------------------------- arm-linux ------------------------- */
+
+#if defined(PLAT_arm_linux)
+
+typedef
+   struct { 
+      unsigned int nraddr; /* where's the code? */
+   }
+   OrigFn;
+
+#define __SPECIAL_INSTRUCTION_PREAMBLE                            \
+            "mov r12, r12, ror #3  ; mov r12, r12, ror #13 \n\t"  \
+            "mov r12, r12, ror #29 ; mov r12, r12, ror #19 \n\t"
+
+#define VALGRIND_DO_CLIENT_REQUEST(                               \
+        _zzq_rlval, _zzq_default, _zzq_request,                   \
+        _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5)    \
+                                                                  \
+  { volatile unsigned int  _zzq_args[6];                          \
+    volatile unsigned int  _zzq_result;                           \
+    _zzq_args[0] = (unsigned int)(_zzq_request);                  \
+    _zzq_args[1] = (unsigned int)(_zzq_arg1);                     \
+    _zzq_args[2] = (unsigned int)(_zzq_arg2);                     \
+    _zzq_args[3] = (unsigned int)(_zzq_arg3);                     \
+    _zzq_args[4] = (unsigned int)(_zzq_arg4);                     \
+    _zzq_args[5] = (unsigned int)(_zzq_arg5);                     \
+    __asm__ volatile("mov r3, %1\n\t" /*default*/                 \
+                     "mov r4, %2\n\t" /*ptr*/                     \
+                     __SPECIAL_INSTRUCTION_PREAMBLE               \
+                     /* R3 = client_request ( R4 ) */             \
+                     "orr r10, r10, r10\n\t"                      \
+                     "mov %0, r3"     /*result*/                  \
+                     : "=r" (_zzq_result)                         \
+                     : "r" (_zzq_default), "r" (&_zzq_args[0])    \
+                     : "cc","memory", "r3", "r4");                \
+    _zzq_rlval = _zzq_result;                                     \
+  }
+
+#define VALGRIND_GET_NR_CONTEXT(_zzq_rlval)                       \
+  { volatile OrigFn* _zzq_orig = &(_zzq_rlval);                   \
+    unsigned int __addr;                                          \
+    __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE               \
+                     /* R3 = guest_NRADDR */                      \
+                     "orr r11, r11, r11\n\t"                      \
+                     "mov %0, r3"                                 \
+                     : "=r" (__addr)                              \
+                     :                                            \
+                     : "cc", "memory", "r3"                       \
+                    );                                            \
+    _zzq_orig->nraddr = __addr;                                   \
+  }
+
+#define VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4                    \
+                     __SPECIAL_INSTRUCTION_PREAMBLE               \
+                     /* branch-and-link-to-noredir *%R4 */        \
+                     "orr r12, r12, r12\n\t"
+
+#endif /* PLAT_arm_linux */
+
+/* ------------------------ ppc32-aix5 ------------------------- */
+
+#if defined(PLAT_ppc32_aix5)
+
+typedef
+   struct { 
+      unsigned int nraddr; /* where's the code? */
+      unsigned int r2;  /* what tocptr do we need? */
+   }
+   OrigFn;
+
+#define __SPECIAL_INSTRUCTION_PREAMBLE                            \
+                     "rlwinm 0,0,3,0,0  ; rlwinm 0,0,13,0,0\n\t"  \
+                     "rlwinm 0,0,29,0,0 ; rlwinm 0,0,19,0,0\n\t"
+
+#define VALGRIND_DO_CLIENT_REQUEST(                               \
+        _zzq_rlval, _zzq_default, _zzq_request,                   \
+        _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5)    \
+                                                                  \
+  {          unsigned int  _zzq_args[7];                          \
+    register unsigned int  _zzq_result;                           \
+    register unsigned int* _zzq_ptr;                              \
+    _zzq_args[0] = (unsigned int)(_zzq_request);                  \
+    _zzq_args[1] = (unsigned int)(_zzq_arg1);                     \
+    _zzq_args[2] = (unsigned int)(_zzq_arg2);                     \
+    _zzq_args[3] = (unsigned int)(_zzq_arg3);                     \
+    _zzq_args[4] = (unsigned int)(_zzq_arg4);                     \
+    _zzq_args[5] = (unsigned int)(_zzq_arg5);                     \
+    _zzq_args[6] = (unsigned int)(_zzq_default);                  \
+    _zzq_ptr = _zzq_args;                                         \
+    __asm__ volatile("mr 4,%1\n\t"                                \
+                     "lwz 3, 24(4)\n\t"                           \
+                     __SPECIAL_INSTRUCTION_PREAMBLE               \
+                     /* %R3 = client_request ( %R4 ) */           \
+                     "or 1,1,1\n\t"                               \
+                     "mr %0,3"                                    \
+                     : "=b" (_zzq_result)                         \
+                     : "b" (_zzq_ptr)                             \
+                     : "r3", "r4", "cc", "memory");               \
+    _zzq_rlval = _zzq_result;                                     \
+  }
+
+#define VALGRIND_GET_NR_CONTEXT(_zzq_rlval)                       \
+  { volatile OrigFn* _zzq_orig = &(_zzq_rlval);                   \
+    register unsigned int __addr;                                 \
+    __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE               \
+                     /* %R3 = guest_NRADDR */                     \
+                     "or 2,2,2\n\t"                               \
+                     "mr %0,3"                                    \
+                     : "=b" (__addr)                              \
+                     :                                            \
+                     : "r3", "cc", "memory"                       \
+                    );                                            \
+    _zzq_orig->nraddr = __addr;                                   \
+    __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE               \
+                     /* %R3 = guest_NRADDR_GPR2 */                \
+                     "or 4,4,4\n\t"                               \
+                     "mr %0,3"                                    \
+                     : "=b" (__addr)                              \
+                     :                                            \
+                     : "r3", "cc", "memory"                       \
+                    );                                            \
+    _zzq_orig->r2 = __addr;                                       \
+  }
+
+#define VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11                   \
+                     __SPECIAL_INSTRUCTION_PREAMBLE               \
+                     /* branch-and-link-to-noredir *%R11 */       \
+                     "or 3,3,3\n\t"
+
+#endif /* PLAT_ppc32_aix5 */
+
+/* ------------------------ ppc64-aix5 ------------------------- */
+
+#if defined(PLAT_ppc64_aix5)
+
+typedef
+   struct { 
+      unsigned long long int nraddr; /* where's the code? */
+      unsigned long long int r2;  /* what tocptr do we need? */
+   }
+   OrigFn;
+
+#define __SPECIAL_INSTRUCTION_PREAMBLE                            \
+                     "rotldi 0,0,3  ; rotldi 0,0,13\n\t"          \
+                     "rotldi 0,0,61 ; rotldi 0,0,51\n\t"
+
+#define VALGRIND_DO_CLIENT_REQUEST(                               \
+        _zzq_rlval, _zzq_default, _zzq_request,                   \
+        _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5)    \
+                                                                  \
+  {          unsigned long long int  _zzq_args[7];                \
+    register unsigned long long int  _zzq_result;                 \
+    register unsigned long long int* _zzq_ptr;                    \
+    _zzq_args[0] = (unsigned int long long)(_zzq_request);        \
+    _zzq_args[1] = (unsigned int long long)(_zzq_arg1);           \
+    _zzq_args[2] = (unsigned int long long)(_zzq_arg2);           \
+    _zzq_args[3] = (unsigned int long long)(_zzq_arg3);           \
+    _zzq_args[4] = (unsigned int long long)(_zzq_arg4);           \
+    _zzq_args[5] = (unsigned int long long)(_zzq_arg5);           \
+    _zzq_args[6] = (unsigned int long long)(_zzq_default);        \
+    _zzq_ptr = _zzq_args;                                         \
+    __asm__ volatile("mr 4,%1\n\t"                                \
+                     "ld 3, 48(4)\n\t"                            \
+                     __SPECIAL_INSTRUCTION_PREAMBLE               \
+                     /* %R3 = client_request ( %R4 ) */           \
+                     "or 1,1,1\n\t"                               \
+                     "mr %0,3"                                    \
+                     : "=b" (_zzq_result)                         \
+                     : "b" (_zzq_ptr)                             \
+                     : "r3", "r4", "cc", "memory");               \
+    _zzq_rlval = _zzq_result;                                     \
+  }
+
+#define VALGRIND_GET_NR_CONTEXT(_zzq_rlval)                       \
+  { volatile OrigFn* _zzq_orig = &(_zzq_rlval);                   \
+    register unsigned long long int __addr;                       \
+    __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE               \
+                     /* %R3 = guest_NRADDR */                     \
+                     "or 2,2,2\n\t"                               \
+                     "mr %0,3"                                    \
+                     : "=b" (__addr)                              \
+                     :                                            \
+                     : "r3", "cc", "memory"                       \
+                    );                                            \
+    _zzq_orig->nraddr = __addr;                                   \
+    __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE               \
+                     /* %R3 = guest_NRADDR_GPR2 */                \
+                     "or 4,4,4\n\t"                               \
+                     "mr %0,3"                                    \
+                     : "=b" (__addr)                              \
+                     :                                            \
+                     : "r3", "cc", "memory"                       \
+                    );                                            \
+    _zzq_orig->r2 = __addr;                                       \
+  }
+
+#define VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11                   \
+                     __SPECIAL_INSTRUCTION_PREAMBLE               \
+                     /* branch-and-link-to-noredir *%R11 */       \
+                     "or 3,3,3\n\t"
+
+#endif /* PLAT_ppc64_aix5 */
+
+/* Insert assembly code for other platforms here... */
+
+#endif /* NVALGRIND */
+
+
+/* ------------------------------------------------------------------ */
+/* PLATFORM SPECIFICS for FUNCTION WRAPPING.  This is all very        */
+/* ugly.  It's the least-worst tradeoff I can think of.               */
+/* ------------------------------------------------------------------ */
+
+/* This section defines magic (a.k.a appalling-hack) macros for doing
+   guaranteed-no-redirection macros, so as to get from function
+   wrappers to the functions they are wrapping.  The whole point is to
+   construct standard call sequences, but to do the call itself with a
+   special no-redirect call pseudo-instruction that the JIT
+   understands and handles specially.  This section is long and
+   repetitious, and I can't see a way to make it shorter.
+
+   The naming scheme is as follows:
+
+      CALL_FN_{W,v}_{v,W,WW,WWW,WWWW,5W,6W,7W,etc}
+
+   'W' stands for "word" and 'v' for "void".  Hence there are
+   different macros for calling arity 0, 1, 2, 3, 4, etc, functions,
+   and for each, the possibility of returning a word-typed result, or
+   no result.
+*/
+
+/* Use these to write the name of your wrapper.  NOTE: duplicates
+   VG_WRAP_FUNCTION_Z{U,Z} in pub_tool_redir.h. */
+
+/* Use an extra level of macroisation so as to ensure the soname/fnname
+   args are fully macro-expanded before pasting them together. */
+#define VG_CONCAT4(_aa,_bb,_cc,_dd) _aa##_bb##_cc##_dd
+
+#define I_WRAP_SONAME_FNNAME_ZU(soname,fnname)                    \
+   VG_CONCAT4(_vgwZU_,soname,_,fnname)
+
+#define I_WRAP_SONAME_FNNAME_ZZ(soname,fnname)                    \
+   VG_CONCAT4(_vgwZZ_,soname,_,fnname)
+
+/* Use this macro from within a wrapper function to collect the
+   context (address and possibly other info) of the original function.
+   Once you have that you can then use it in one of the CALL_FN_
+   macros.  The type of the argument _lval is OrigFn. */
+#define VALGRIND_GET_ORIG_FN(_lval)  VALGRIND_GET_NR_CONTEXT(_lval)
+
+/* Derivatives of the main macros below, for calling functions
+   returning void. */
+
+#define CALL_FN_v_v(fnptr)                                        \
+   do { volatile unsigned long _junk;                             \
+        CALL_FN_W_v(_junk,fnptr); } while (0)
+
+#define CALL_FN_v_W(fnptr, arg1)                                  \
+   do { volatile unsigned long _junk;                             \
+        CALL_FN_W_W(_junk,fnptr,arg1); } while (0)
+
+#define CALL_FN_v_WW(fnptr, arg1,arg2)                            \
+   do { volatile unsigned long _junk;                             \
+        CALL_FN_W_WW(_junk,fnptr,arg1,arg2); } while (0)
+
+#define CALL_FN_v_WWW(fnptr, arg1,arg2,arg3)                      \
+   do { volatile unsigned long _junk;                             \
+        CALL_FN_W_WWW(_junk,fnptr,arg1,arg2,arg3); } while (0)
+
+#define CALL_FN_v_WWWW(fnptr, arg1,arg2,arg3,arg4)                \
+   do { volatile unsigned long _junk;                             \
+        CALL_FN_W_WWWW(_junk,fnptr,arg1,arg2,arg3,arg4); } while (0)
+
+#define CALL_FN_v_5W(fnptr, arg1,arg2,arg3,arg4,arg5)             \
+   do { volatile unsigned long _junk;                             \
+        CALL_FN_W_5W(_junk,fnptr,arg1,arg2,arg3,arg4,arg5); } while (0)
+
+#define CALL_FN_v_6W(fnptr, arg1,arg2,arg3,arg4,arg5,arg6)        \
+   do { volatile unsigned long _junk;                             \
+        CALL_FN_W_6W(_junk,fnptr,arg1,arg2,arg3,arg4,arg5,arg6); } while (0)
+
+#define CALL_FN_v_7W(fnptr, arg1,arg2,arg3,arg4,arg5,arg6,arg7)   \
+   do { volatile unsigned long _junk;                             \
+        CALL_FN_W_7W(_junk,fnptr,arg1,arg2,arg3,arg4,arg5,arg6,arg7); } while (0)
+
+/* ------------------------- x86-{linux,darwin} ---------------- */
+
+#if defined(PLAT_x86_linux)  ||  defined(PLAT_x86_darwin)
+
+/* These regs are trashed by the hidden call.  No need to mention eax
+   as gcc can already see that, plus causes gcc to bomb. */
+#define __CALLER_SAVED_REGS /*"eax"*/ "ecx", "edx"
+
+/* These CALL_FN_ macros assume that on x86-linux, sizeof(unsigned
+   long) == 4. */
+
+#define CALL_FN_W_v(lval, orig)                                   \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[1];                          \
+      volatile unsigned long _res;                                \
+      _argvec[0] = (unsigned long)_orig.nraddr;                   \
+      __asm__ volatile(                                           \
+         "movl (%%eax), %%eax\n\t"  /* target->%eax */            \
+         VALGRIND_CALL_NOREDIR_EAX                                \
+         : /*out*/   "=a" (_res)                                  \
+         : /*in*/    "a" (&_argvec[0])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_W(lval, orig, arg1)                             \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[2];                          \
+      volatile unsigned long _res;                                \
+      _argvec[0] = (unsigned long)_orig.nraddr;                   \
+      _argvec[1] = (unsigned long)(arg1);                         \
+      __asm__ volatile(                                           \
+         "subl $12, %%esp\n\t"                                    \
+         "pushl 4(%%eax)\n\t"                                     \
+         "movl (%%eax), %%eax\n\t"  /* target->%eax */            \
+         VALGRIND_CALL_NOREDIR_EAX                                \
+         "addl $16, %%esp\n"                                      \
+         : /*out*/   "=a" (_res)                                  \
+         : /*in*/    "a" (&_argvec[0])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_WW(lval, orig, arg1,arg2)                       \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[3];                          \
+      volatile unsigned long _res;                                \
+      _argvec[0] = (unsigned long)_orig.nraddr;                   \
+      _argvec[1] = (unsigned long)(arg1);                         \
+      _argvec[2] = (unsigned long)(arg2);                         \
+      __asm__ volatile(                                           \
+         "subl $8, %%esp\n\t"                                     \
+         "pushl 8(%%eax)\n\t"                                     \
+         "pushl 4(%%eax)\n\t"                                     \
+         "movl (%%eax), %%eax\n\t"  /* target->%eax */            \
+         VALGRIND_CALL_NOREDIR_EAX                                \
+         "addl $16, %%esp\n"                                      \
+         : /*out*/   "=a" (_res)                                  \
+         : /*in*/    "a" (&_argvec[0])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_WWW(lval, orig, arg1,arg2,arg3)                 \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[4];                          \
+      volatile unsigned long _res;                                \
+      _argvec[0] = (unsigned long)_orig.nraddr;                   \
+      _argvec[1] = (unsigned long)(arg1);                         \
+      _argvec[2] = (unsigned long)(arg2);                         \
+      _argvec[3] = (unsigned long)(arg3);                         \
+      __asm__ volatile(                                           \
+         "subl $4, %%esp\n\t"                                     \
+         "pushl 12(%%eax)\n\t"                                    \
+         "pushl 8(%%eax)\n\t"                                     \
+         "pushl 4(%%eax)\n\t"                                     \
+         "movl (%%eax), %%eax\n\t"  /* target->%eax */            \
+         VALGRIND_CALL_NOREDIR_EAX                                \
+         "addl $16, %%esp\n"                                      \
+         : /*out*/   "=a" (_res)                                  \
+         : /*in*/    "a" (&_argvec[0])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_WWWW(lval, orig, arg1,arg2,arg3,arg4)           \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[5];                          \
+      volatile unsigned long _res;                                \
+      _argvec[0] = (unsigned long)_orig.nraddr;                   \
+      _argvec[1] = (unsigned long)(arg1);                         \
+      _argvec[2] = (unsigned long)(arg2);                         \
+      _argvec[3] = (unsigned long)(arg3);                         \
+      _argvec[4] = (unsigned long)(arg4);                         \
+      __asm__ volatile(                                           \
+         "pushl 16(%%eax)\n\t"                                    \
+         "pushl 12(%%eax)\n\t"                                    \
+         "pushl 8(%%eax)\n\t"                                     \
+         "pushl 4(%%eax)\n\t"                                     \
+         "movl (%%eax), %%eax\n\t"  /* target->%eax */            \
+         VALGRIND_CALL_NOREDIR_EAX                                \
+         "addl $16, %%esp\n"                                      \
+         : /*out*/   "=a" (_res)                                  \
+         : /*in*/    "a" (&_argvec[0])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_5W(lval, orig, arg1,arg2,arg3,arg4,arg5)        \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[6];                          \
+      volatile unsigned long _res;                                \
+      _argvec[0] = (unsigned long)_orig.nraddr;                   \
+      _argvec[1] = (unsigned long)(arg1);                         \
+      _argvec[2] = (unsigned long)(arg2);                         \
+      _argvec[3] = (unsigned long)(arg3);                         \
+      _argvec[4] = (unsigned long)(arg4);                         \
+      _argvec[5] = (unsigned long)(arg5);                         \
+      __asm__ volatile(                                           \
+         "subl $12, %%esp\n\t"                                    \
+         "pushl 20(%%eax)\n\t"                                    \
+         "pushl 16(%%eax)\n\t"                                    \
+         "pushl 12(%%eax)\n\t"                                    \
+         "pushl 8(%%eax)\n\t"                                     \
+         "pushl 4(%%eax)\n\t"                                     \
+         "movl (%%eax), %%eax\n\t"  /* target->%eax */            \
+         VALGRIND_CALL_NOREDIR_EAX                                \
+         "addl $32, %%esp\n"                                      \
+         : /*out*/   "=a" (_res)                                  \
+         : /*in*/    "a" (&_argvec[0])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_6W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6)   \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[7];                          \
+      volatile unsigned long _res;                                \
+      _argvec[0] = (unsigned long)_orig.nraddr;                   \
+      _argvec[1] = (unsigned long)(arg1);                         \
+      _argvec[2] = (unsigned long)(arg2);                         \
+      _argvec[3] = (unsigned long)(arg3);                         \
+      _argvec[4] = (unsigned long)(arg4);                         \
+      _argvec[5] = (unsigned long)(arg5);                         \
+      _argvec[6] = (unsigned long)(arg6);                         \
+      __asm__ volatile(                                           \
+         "subl $8, %%esp\n\t"                                     \
+         "pushl 24(%%eax)\n\t"                                    \
+         "pushl 20(%%eax)\n\t"                                    \
+         "pushl 16(%%eax)\n\t"                                    \
+         "pushl 12(%%eax)\n\t"                                    \
+         "pushl 8(%%eax)\n\t"                                     \
+         "pushl 4(%%eax)\n\t"                                     \
+         "movl (%%eax), %%eax\n\t"  /* target->%eax */            \
+         VALGRIND_CALL_NOREDIR_EAX                                \
+         "addl $32, %%esp\n"                                      \
+         : /*out*/   "=a" (_res)                                  \
+         : /*in*/    "a" (&_argvec[0])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_7W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6,   \
+                                 arg7)                            \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[8];                          \
+      volatile unsigned long _res;                                \
+      _argvec[0] = (unsigned long)_orig.nraddr;                   \
+      _argvec[1] = (unsigned long)(arg1);                         \
+      _argvec[2] = (unsigned long)(arg2);                         \
+      _argvec[3] = (unsigned long)(arg3);                         \
+      _argvec[4] = (unsigned long)(arg4);                         \
+      _argvec[5] = (unsigned long)(arg5);                         \
+      _argvec[6] = (unsigned long)(arg6);                         \
+      _argvec[7] = (unsigned long)(arg7);                         \
+      __asm__ volatile(                                           \
+         "subl $4, %%esp\n\t"                                     \
+         "pushl 28(%%eax)\n\t"                                    \
+         "pushl 24(%%eax)\n\t"                                    \
+         "pushl 20(%%eax)\n\t"                                    \
+         "pushl 16(%%eax)\n\t"                                    \
+         "pushl 12(%%eax)\n\t"                                    \
+         "pushl 8(%%eax)\n\t"                                     \
+         "pushl 4(%%eax)\n\t"                                     \
+         "movl (%%eax), %%eax\n\t"  /* target->%eax */            \
+         VALGRIND_CALL_NOREDIR_EAX                                \
+         "addl $32, %%esp\n"                                      \
+         : /*out*/   "=a" (_res)                                  \
+         : /*in*/    "a" (&_argvec[0])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_8W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6,   \
+                                 arg7,arg8)                       \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[9];                          \
+      volatile unsigned long _res;                                \
+      _argvec[0] = (unsigned long)_orig.nraddr;                   \
+      _argvec[1] = (unsigned long)(arg1);                         \
+      _argvec[2] = (unsigned long)(arg2);                         \
+      _argvec[3] = (unsigned long)(arg3);                         \
+      _argvec[4] = (unsigned long)(arg4);                         \
+      _argvec[5] = (unsigned long)(arg5);                         \
+      _argvec[6] = (unsigned long)(arg6);                         \
+      _argvec[7] = (unsigned long)(arg7);                         \
+      _argvec[8] = (unsigned long)(arg8);                         \
+      __asm__ volatile(                                           \
+         "pushl 32(%%eax)\n\t"                                    \
+         "pushl 28(%%eax)\n\t"                                    \
+         "pushl 24(%%eax)\n\t"                                    \
+         "pushl 20(%%eax)\n\t"                                    \
+         "pushl 16(%%eax)\n\t"                                    \
+         "pushl 12(%%eax)\n\t"                                    \
+         "pushl 8(%%eax)\n\t"                                     \
+         "pushl 4(%%eax)\n\t"                                     \
+         "movl (%%eax), %%eax\n\t"  /* target->%eax */            \
+         VALGRIND_CALL_NOREDIR_EAX                                \
+         "addl $32, %%esp\n"                                      \
+         : /*out*/   "=a" (_res)                                  \
+         : /*in*/    "a" (&_argvec[0])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_9W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6,   \
+                                 arg7,arg8,arg9)                  \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[10];                         \
+      volatile unsigned long _res;                                \
+      _argvec[0] = (unsigned long)_orig.nraddr;                   \
+      _argvec[1] = (unsigned long)(arg1);                         \
+      _argvec[2] = (unsigned long)(arg2);                         \
+      _argvec[3] = (unsigned long)(arg3);                         \
+      _argvec[4] = (unsigned long)(arg4);                         \
+      _argvec[5] = (unsigned long)(arg5);                         \
+      _argvec[6] = (unsigned long)(arg6);                         \
+      _argvec[7] = (unsigned long)(arg7);                         \
+      _argvec[8] = (unsigned long)(arg8);                         \
+      _argvec[9] = (unsigned long)(arg9);                         \
+      __asm__ volatile(                                           \
+         "subl $12, %%esp\n\t"                                    \
+         "pushl 36(%%eax)\n\t"                                    \
+         "pushl 32(%%eax)\n\t"                                    \
+         "pushl 28(%%eax)\n\t"                                    \
+         "pushl 24(%%eax)\n\t"                                    \
+         "pushl 20(%%eax)\n\t"                                    \
+         "pushl 16(%%eax)\n\t"                                    \
+         "pushl 12(%%eax)\n\t"                                    \
+         "pushl 8(%%eax)\n\t"                                     \
+         "pushl 4(%%eax)\n\t"                                     \
+         "movl (%%eax), %%eax\n\t"  /* target->%eax */            \
+         VALGRIND_CALL_NOREDIR_EAX                                \
+         "addl $48, %%esp\n"                                      \
+         : /*out*/   "=a" (_res)                                  \
+         : /*in*/    "a" (&_argvec[0])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_10W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6,  \
+                                  arg7,arg8,arg9,arg10)           \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[11];                         \
+      volatile unsigned long _res;                                \
+      _argvec[0] = (unsigned long)_orig.nraddr;                   \
+      _argvec[1] = (unsigned long)(arg1);                         \
+      _argvec[2] = (unsigned long)(arg2);                         \
+      _argvec[3] = (unsigned long)(arg3);                         \
+      _argvec[4] = (unsigned long)(arg4);                         \
+      _argvec[5] = (unsigned long)(arg5);                         \
+      _argvec[6] = (unsigned long)(arg6);                         \
+      _argvec[7] = (unsigned long)(arg7);                         \
+      _argvec[8] = (unsigned long)(arg8);                         \
+      _argvec[9] = (unsigned long)(arg9);                         \
+      _argvec[10] = (unsigned long)(arg10);                       \
+      __asm__ volatile(                                           \
+         "subl $8, %%esp\n\t"                                     \
+         "pushl 40(%%eax)\n\t"                                    \
+         "pushl 36(%%eax)\n\t"                                    \
+         "pushl 32(%%eax)\n\t"                                    \
+         "pushl 28(%%eax)\n\t"                                    \
+         "pushl 24(%%eax)\n\t"                                    \
+         "pushl 20(%%eax)\n\t"                                    \
+         "pushl 16(%%eax)\n\t"                                    \
+         "pushl 12(%%eax)\n\t"                                    \
+         "pushl 8(%%eax)\n\t"                                     \
+         "pushl 4(%%eax)\n\t"                                     \
+         "movl (%%eax), %%eax\n\t"  /* target->%eax */            \
+         VALGRIND_CALL_NOREDIR_EAX                                \
+         "addl $48, %%esp\n"                                      \
+         : /*out*/   "=a" (_res)                                  \
+         : /*in*/    "a" (&_argvec[0])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_11W(lval, orig, arg1,arg2,arg3,arg4,arg5,       \
+                                  arg6,arg7,arg8,arg9,arg10,      \
+                                  arg11)                          \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[12];                         \
+      volatile unsigned long _res;                                \
+      _argvec[0] = (unsigned long)_orig.nraddr;                   \
+      _argvec[1] = (unsigned long)(arg1);                         \
+      _argvec[2] = (unsigned long)(arg2);                         \
+      _argvec[3] = (unsigned long)(arg3);                         \
+      _argvec[4] = (unsigned long)(arg4);                         \
+      _argvec[5] = (unsigned long)(arg5);                         \
+      _argvec[6] = (unsigned long)(arg6);                         \
+      _argvec[7] = (unsigned long)(arg7);                         \
+      _argvec[8] = (unsigned long)(arg8);                         \
+      _argvec[9] = (unsigned long)(arg9);                         \
+      _argvec[10] = (unsigned long)(arg10);                       \
+      _argvec[11] = (unsigned long)(arg11);                       \
+      __asm__ volatile(                                           \
+         "subl $4, %%esp\n\t"                                     \
+         "pushl 44(%%eax)\n\t"                                    \
+         "pushl 40(%%eax)\n\t"                                    \
+         "pushl 36(%%eax)\n\t"                                    \
+         "pushl 32(%%eax)\n\t"                                    \
+         "pushl 28(%%eax)\n\t"                                    \
+         "pushl 24(%%eax)\n\t"                                    \
+         "pushl 20(%%eax)\n\t"                                    \
+         "pushl 16(%%eax)\n\t"                                    \
+         "pushl 12(%%eax)\n\t"                                    \
+         "pushl 8(%%eax)\n\t"                                     \
+         "pushl 4(%%eax)\n\t"                                     \
+         "movl (%%eax), %%eax\n\t"  /* target->%eax */            \
+         VALGRIND_CALL_NOREDIR_EAX                                \
+         "addl $48, %%esp\n"                                      \
+         : /*out*/   "=a" (_res)                                  \
+         : /*in*/    "a" (&_argvec[0])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_12W(lval, orig, arg1,arg2,arg3,arg4,arg5,       \
+                                  arg6,arg7,arg8,arg9,arg10,      \
+                                  arg11,arg12)                    \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[13];                         \
+      volatile unsigned long _res;                                \
+      _argvec[0] = (unsigned long)_orig.nraddr;                   \
+      _argvec[1] = (unsigned long)(arg1);                         \
+      _argvec[2] = (unsigned long)(arg2);                         \
+      _argvec[3] = (unsigned long)(arg3);                         \
+      _argvec[4] = (unsigned long)(arg4);                         \
+      _argvec[5] = (unsigned long)(arg5);                         \
+      _argvec[6] = (unsigned long)(arg6);                         \
+      _argvec[7] = (unsigned long)(arg7);                         \
+      _argvec[8] = (unsigned long)(arg8);                         \
+      _argvec[9] = (unsigned long)(arg9);                         \
+      _argvec[10] = (unsigned long)(arg10);                       \
+      _argvec[11] = (unsigned long)(arg11);                       \
+      _argvec[12] = (unsigned long)(arg12);                       \
+      __asm__ volatile(                                           \
+         "pushl 48(%%eax)\n\t"                                    \
+         "pushl 44(%%eax)\n\t"                                    \
+         "pushl 40(%%eax)\n\t"                                    \
+         "pushl 36(%%eax)\n\t"                                    \
+         "pushl 32(%%eax)\n\t"                                    \
+         "pushl 28(%%eax)\n\t"                                    \
+         "pushl 24(%%eax)\n\t"                                    \
+         "pushl 20(%%eax)\n\t"                                    \
+         "pushl 16(%%eax)\n\t"                                    \
+         "pushl 12(%%eax)\n\t"                                    \
+         "pushl 8(%%eax)\n\t"                                     \
+         "pushl 4(%%eax)\n\t"                                     \
+         "movl (%%eax), %%eax\n\t"  /* target->%eax */            \
+         VALGRIND_CALL_NOREDIR_EAX                                \
+         "addl $48, %%esp\n"                                      \
+         : /*out*/   "=a" (_res)                                  \
+         : /*in*/    "a" (&_argvec[0])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#endif /* PLAT_x86_linux || PLAT_x86_darwin */
+
+/* ------------------------ amd64-{linux,darwin} --------------- */
+
+#if defined(PLAT_amd64_linux)  ||  defined(PLAT_amd64_darwin)
+
+/* ARGREGS: rdi rsi rdx rcx r8 r9 (the rest on stack in R-to-L order) */
+
+/* These regs are trashed by the hidden call. */
+#define __CALLER_SAVED_REGS /*"rax",*/ "rcx", "rdx", "rsi",       \
+                            "rdi", "r8", "r9", "r10", "r11"
+
+/* This is all pretty complex.  It's so as to make stack unwinding
+   work reliably.  See bug 243270.  The basic problem is the sub and
+   add of 128 of %rsp in all of the following macros.  If gcc believes
+   the CFA is in %rsp, then unwinding may fail, because what's at the
+   CFA is not what gcc "expected" when it constructs the CFIs for the
+   places where the macros are instantiated.
+
+   But we can't just add a CFI annotation to increase the CFA offset
+   by 128, to match the sub of 128 from %rsp, because we don't know
+   whether gcc has chosen %rsp as the CFA at that point, or whether it
+   has chosen some other register (eg, %rbp).  In the latter case,
+   adding a CFI annotation to change the CFA offset is simply wrong.
+
+   So the solution is to get hold of the CFA using
+   __builtin_dwarf_cfa(), put it in a known register, and add a
+   CFI annotation to say what the register is.  We choose %rbp for
+   this (perhaps perversely), because:
+
+   (1) %rbp is already subject to unwinding.  If a new register was
+       chosen then the unwinder would have to unwind it in all stack
+       traces, which is expensive, and
+
+   (2) %rbp is already subject to precise exception updates in the
+       JIT.  If a new register was chosen, we'd have to have precise
+       exceptions for it too, which reduces performance of the
+       generated code.
+
+   However .. one extra complication.  We can't just whack the result
+   of __builtin_dwarf_cfa() into %rbp and then add %rbp to the
+   list of trashed registers at the end of the inline assembly
+   fragments; gcc won't allow %rbp to appear in that list.  Hence
+   instead we need to stash %rbp in %r15 for the duration of the asm,
+   and say that %r15 is trashed instead.  gcc seems happy to go with
+   that.
+
+   Oh .. and this all needs to be conditionalised so that it is
+   unchanged from before this commit, when compiled with older gccs
+   that don't support __builtin_dwarf_cfa.  Furthermore, since
+   this header file is freestanding, it has to be independent of
+   config.h, and so the following conditionalisation cannot depend on
+   configure time checks.
+
+   Although it's not clear from
+   'defined(__GNUC__) && defined(__GCC_HAVE_DWARF2_CFI_ASM)',
+   this expression excludes Darwin.
+   .cfi directives in Darwin assembly appear to be completely
+   different and I haven't investigated how they work.
+
+   For even more entertainment value, note we have to use the
+   completely undocumented __builtin_dwarf_cfa(), which appears to
+   really compute the CFA, whereas __builtin_frame_address(0) claims
+   to but actually doesn't.  See
+   https://bugs.kde.org/show_bug.cgi?id=243270#c47
+*/
+#if defined(__GNUC__) && defined(__GCC_HAVE_DWARF2_CFI_ASM)
+#  define __FRAME_POINTER                                         \
+      ,"r"(__builtin_dwarf_cfa())
+#  define VALGRIND_CFI_PROLOGUE                                   \
+      "movq %%rbp, %%r15\n\t"                                     \
+      "movq %2, %%rbp\n\t"                                        \
+      ".cfi_remember_state\n\t"                                   \
+      ".cfi_def_cfa rbp, 0\n\t"
+#  define VALGRIND_CFI_EPILOGUE                                   \
+      "movq %%r15, %%rbp\n\t"                                     \
+      ".cfi_restore_state\n\t"
+#else
+#  define __FRAME_POINTER
+#  define VALGRIND_CFI_PROLOGUE
+#  define VALGRIND_CFI_EPILOGUE
+#endif
+
+
+/* These CALL_FN_ macros assume that on amd64-linux, sizeof(unsigned
+   long) == 8. */
+
+/* NB 9 Sept 07.  There is a nasty kludge here in all these CALL_FN_
+   macros.  In order not to trash the stack redzone, we need to drop
+   %rsp by 128 before the hidden call, and restore afterwards.  The
+   nastyness is that it is only by luck that the stack still appears
+   to be unwindable during the hidden call - since then the behaviour
+   of any routine using this macro does not match what the CFI data
+   says.  Sigh.
+
+   Why is this important?  Imagine that a wrapper has a stack
+   allocated local, and passes to the hidden call, a pointer to it.
+   Because gcc does not know about the hidden call, it may allocate
+   that local in the redzone.  Unfortunately the hidden call may then
+   trash it before it comes to use it.  So we must step clear of the
+   redzone, for the duration of the hidden call, to make it safe.
+
+   Probably the same problem afflicts the other redzone-style ABIs too
+   (ppc64-linux, ppc32-aix5, ppc64-aix5); but for those, the stack is
+   self describing (none of this CFI nonsense) so at least messing
+   with the stack pointer doesn't give a danger of non-unwindable
+   stack. */
+
+#define CALL_FN_W_v(lval, orig)                                   \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[1];                          \
+      volatile unsigned long _res;                                \
+      _argvec[0] = (unsigned long)_orig.nraddr;                   \
+      __asm__ volatile(                                           \
+         VALGRIND_CFI_PROLOGUE                                    \
+         "subq $128,%%rsp\n\t"                                    \
+         "movq (%%rax), %%rax\n\t"  /* target->%rax */            \
+         VALGRIND_CALL_NOREDIR_RAX                                \
+         "addq $128,%%rsp\n\t"                                    \
+         VALGRIND_CFI_EPILOGUE                                    \
+         : /*out*/   "=a" (_res)                                  \
+         : /*in*/    "a" (&_argvec[0]) __FRAME_POINTER            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r15"   \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_W(lval, orig, arg1)                             \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[2];                          \
+      volatile unsigned long _res;                                \
+      _argvec[0] = (unsigned long)_orig.nraddr;                   \
+      _argvec[1] = (unsigned long)(arg1);                         \
+      __asm__ volatile(                                           \
+         VALGRIND_CFI_PROLOGUE                                    \
+         "subq $128,%%rsp\n\t"                                    \
+         "movq 8(%%rax), %%rdi\n\t"                               \
+         "movq (%%rax), %%rax\n\t"  /* target->%rax */            \
+         VALGRIND_CALL_NOREDIR_RAX                                \
+         "addq $128,%%rsp\n\t"                                    \
+         VALGRIND_CFI_EPILOGUE                                    \
+         : /*out*/   "=a" (_res)                                  \
+         : /*in*/    "a" (&_argvec[0]) __FRAME_POINTER            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r15"   \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_WW(lval, orig, arg1,arg2)                       \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[3];                          \
+      volatile unsigned long _res;                                \
+      _argvec[0] = (unsigned long)_orig.nraddr;                   \
+      _argvec[1] = (unsigned long)(arg1);                         \
+      _argvec[2] = (unsigned long)(arg2);                         \
+      __asm__ volatile(                                           \
+         VALGRIND_CFI_PROLOGUE                                    \
+         "subq $128,%%rsp\n\t"                                    \
+         "movq 16(%%rax), %%rsi\n\t"                              \
+         "movq 8(%%rax), %%rdi\n\t"                               \
+         "movq (%%rax), %%rax\n\t"  /* target->%rax */            \
+         VALGRIND_CALL_NOREDIR_RAX                                \
+         "addq $128,%%rsp\n\t"                                    \
+         VALGRIND_CFI_EPILOGUE                                    \
+         : /*out*/   "=a" (_res)                                  \
+         : /*in*/    "a" (&_argvec[0]) __FRAME_POINTER            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r15"   \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_WWW(lval, orig, arg1,arg2,arg3)                 \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[4];                          \
+      volatile unsigned long _res;                                \
+      _argvec[0] = (unsigned long)_orig.nraddr;                   \
+      _argvec[1] = (unsigned long)(arg1);                         \
+      _argvec[2] = (unsigned long)(arg2);                         \
+      _argvec[3] = (unsigned long)(arg3);                         \
+      __asm__ volatile(                                           \
+         VALGRIND_CFI_PROLOGUE                                    \
+         "subq $128,%%rsp\n\t"                                    \
+         "movq 24(%%rax), %%rdx\n\t"                              \
+         "movq 16(%%rax), %%rsi\n\t"                              \
+         "movq 8(%%rax), %%rdi\n\t"                               \
+         "movq (%%rax), %%rax\n\t"  /* target->%rax */            \
+         VALGRIND_CALL_NOREDIR_RAX                                \
+         "addq $128,%%rsp\n\t"                                    \
+         VALGRIND_CFI_EPILOGUE                                    \
+         : /*out*/   "=a" (_res)                                  \
+         : /*in*/    "a" (&_argvec[0]) __FRAME_POINTER            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r15"   \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_WWWW(lval, orig, arg1,arg2,arg3,arg4)           \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[5];                          \
+      volatile unsigned long _res;                                \
+      _argvec[0] = (unsigned long)_orig.nraddr;                   \
+      _argvec[1] = (unsigned long)(arg1);                         \
+      _argvec[2] = (unsigned long)(arg2);                         \
+      _argvec[3] = (unsigned long)(arg3);                         \
+      _argvec[4] = (unsigned long)(arg4);                         \
+      __asm__ volatile(                                           \
+         VALGRIND_CFI_PROLOGUE                                    \
+         "subq $128,%%rsp\n\t"                                    \
+         "movq 32(%%rax), %%rcx\n\t"                              \
+         "movq 24(%%rax), %%rdx\n\t"                              \
+         "movq 16(%%rax), %%rsi\n\t"                              \
+         "movq 8(%%rax), %%rdi\n\t"                               \
+         "movq (%%rax), %%rax\n\t"  /* target->%rax */            \
+         VALGRIND_CALL_NOREDIR_RAX                                \
+         "addq $128,%%rsp\n\t"                                    \
+         VALGRIND_CFI_EPILOGUE                                    \
+         : /*out*/   "=a" (_res)                                  \
+         : /*in*/    "a" (&_argvec[0]) __FRAME_POINTER            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r15"   \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_5W(lval, orig, arg1,arg2,arg3,arg4,arg5)        \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[6];                          \
+      volatile unsigned long _res;                                \
+      _argvec[0] = (unsigned long)_orig.nraddr;                   \
+      _argvec[1] = (unsigned long)(arg1);                         \
+      _argvec[2] = (unsigned long)(arg2);                         \
+      _argvec[3] = (unsigned long)(arg3);                         \
+      _argvec[4] = (unsigned long)(arg4);                         \
+      _argvec[5] = (unsigned long)(arg5);                         \
+      __asm__ volatile(                                           \
+         VALGRIND_CFI_PROLOGUE                                    \
+         "subq $128,%%rsp\n\t"                                    \
+         "movq 40(%%rax), %%r8\n\t"                               \
+         "movq 32(%%rax), %%rcx\n\t"                              \
+         "movq 24(%%rax), %%rdx\n\t"                              \
+         "movq 16(%%rax), %%rsi\n\t"                              \
+         "movq 8(%%rax), %%rdi\n\t"                               \
+         "movq (%%rax), %%rax\n\t"  /* target->%rax */            \
+         VALGRIND_CALL_NOREDIR_RAX                                \
+         "addq $128,%%rsp\n\t"                                    \
+         VALGRIND_CFI_EPILOGUE                                    \
+         : /*out*/   "=a" (_res)                                  \
+         : /*in*/    "a" (&_argvec[0]) __FRAME_POINTER            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r15"   \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_6W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6)   \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[7];                          \
+      volatile unsigned long _res;                                \
+      _argvec[0] = (unsigned long)_orig.nraddr;                   \
+      _argvec[1] = (unsigned long)(arg1);                         \
+      _argvec[2] = (unsigned long)(arg2);                         \
+      _argvec[3] = (unsigned long)(arg3);                         \
+      _argvec[4] = (unsigned long)(arg4);                         \
+      _argvec[5] = (unsigned long)(arg5);                         \
+      _argvec[6] = (unsigned long)(arg6);                         \
+      __asm__ volatile(                                           \
+         VALGRIND_CFI_PROLOGUE                                    \
+         "subq $128,%%rsp\n\t"                                    \
+         "movq 48(%%rax), %%r9\n\t"                               \
+         "movq 40(%%rax), %%r8\n\t"                               \
+         "movq 32(%%rax), %%rcx\n\t"                              \
+         "movq 24(%%rax), %%rdx\n\t"                              \
+         "movq 16(%%rax), %%rsi\n\t"                              \
+         "movq 8(%%rax), %%rdi\n\t"                               \
+         "movq (%%rax), %%rax\n\t"  /* target->%rax */            \
+         VALGRIND_CALL_NOREDIR_RAX                                \
+         "addq $128,%%rsp\n\t"                                    \
+         VALGRIND_CFI_EPILOGUE                                    \
+         : /*out*/   "=a" (_res)                                  \
+         : /*in*/    "a" (&_argvec[0]) __FRAME_POINTER            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r15"   \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_7W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6,   \
+                                 arg7)                            \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[8];                          \
+      volatile unsigned long _res;                                \
+      _argvec[0] = (unsigned long)_orig.nraddr;                   \
+      _argvec[1] = (unsigned long)(arg1);                         \
+      _argvec[2] = (unsigned long)(arg2);                         \
+      _argvec[3] = (unsigned long)(arg3);                         \
+      _argvec[4] = (unsigned long)(arg4);                         \
+      _argvec[5] = (unsigned long)(arg5);                         \
+      _argvec[6] = (unsigned long)(arg6);                         \
+      _argvec[7] = (unsigned long)(arg7);                         \
+      __asm__ volatile(                                           \
+         VALGRIND_CFI_PROLOGUE                                    \
+         "subq $136,%%rsp\n\t"                                    \
+         "pushq 56(%%rax)\n\t"                                    \
+         "movq 48(%%rax), %%r9\n\t"                               \
+         "movq 40(%%rax), %%r8\n\t"                               \
+         "movq 32(%%rax), %%rcx\n\t"                              \
+         "movq 24(%%rax), %%rdx\n\t"                              \
+         "movq 16(%%rax), %%rsi\n\t"                              \
+         "movq 8(%%rax), %%rdi\n\t"                               \
+         "movq (%%rax), %%rax\n\t"  /* target->%rax */            \
+         VALGRIND_CALL_NOREDIR_RAX                                \
+         "addq $8, %%rsp\n"                                       \
+         "addq $136,%%rsp\n\t"                                    \
+         VALGRIND_CFI_EPILOGUE                                    \
+         : /*out*/   "=a" (_res)                                  \
+         : /*in*/    "a" (&_argvec[0]) __FRAME_POINTER            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r15"   \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_8W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6,   \
+                                 arg7,arg8)                       \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[9];                          \
+      volatile unsigned long _res;                                \
+      _argvec[0] = (unsigned long)_orig.nraddr;                   \
+      _argvec[1] = (unsigned long)(arg1);                         \
+      _argvec[2] = (unsigned long)(arg2);                         \
+      _argvec[3] = (unsigned long)(arg3);                         \
+      _argvec[4] = (unsigned long)(arg4);                         \
+      _argvec[5] = (unsigned long)(arg5);                         \
+      _argvec[6] = (unsigned long)(arg6);                         \
+      _argvec[7] = (unsigned long)(arg7);                         \
+      _argvec[8] = (unsigned long)(arg8);                         \
+      __asm__ volatile(                                           \
+         VALGRIND_CFI_PROLOGUE                                    \
+         "subq $128,%%rsp\n\t"                                    \
+         "pushq 64(%%rax)\n\t"                                    \
+         "pushq 56(%%rax)\n\t"                                    \
+         "movq 48(%%rax), %%r9\n\t"                               \
+         "movq 40(%%rax), %%r8\n\t"                               \
+         "movq 32(%%rax), %%rcx\n\t"                              \
+         "movq 24(%%rax), %%rdx\n\t"                              \
+         "movq 16(%%rax), %%rsi\n\t"                              \
+         "movq 8(%%rax), %%rdi\n\t"                               \
+         "movq (%%rax), %%rax\n\t"  /* target->%rax */            \
+         VALGRIND_CALL_NOREDIR_RAX                                \
+         "addq $16, %%rsp\n"                                      \
+         "addq $128,%%rsp\n\t"                                    \
+         VALGRIND_CFI_EPILOGUE                                    \
+         : /*out*/   "=a" (_res)                                  \
+         : /*in*/    "a" (&_argvec[0]) __FRAME_POINTER            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r15"   \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_9W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6,   \
+                                 arg7,arg8,arg9)                  \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[10];                         \
+      volatile unsigned long _res;                                \
+      _argvec[0] = (unsigned long)_orig.nraddr;                   \
+      _argvec[1] = (unsigned long)(arg1);                         \
+      _argvec[2] = (unsigned long)(arg2);                         \
+      _argvec[3] = (unsigned long)(arg3);                         \
+      _argvec[4] = (unsigned long)(arg4);                         \
+      _argvec[5] = (unsigned long)(arg5);                         \
+      _argvec[6] = (unsigned long)(arg6);                         \
+      _argvec[7] = (unsigned long)(arg7);                         \
+      _argvec[8] = (unsigned long)(arg8);                         \
+      _argvec[9] = (unsigned long)(arg9);                         \
+      __asm__ volatile(                                           \
+         VALGRIND_CFI_PROLOGUE                                    \
+         "subq $136,%%rsp\n\t"                                    \
+         "pushq 72(%%rax)\n\t"                                    \
+         "pushq 64(%%rax)\n\t"                                    \
+         "pushq 56(%%rax)\n\t"                                    \
+         "movq 48(%%rax), %%r9\n\t"                               \
+         "movq 40(%%rax), %%r8\n\t"                               \
+         "movq 32(%%rax), %%rcx\n\t"                              \
+         "movq 24(%%rax), %%rdx\n\t"                              \
+         "movq 16(%%rax), %%rsi\n\t"                              \
+         "movq 8(%%rax), %%rdi\n\t"                               \
+         "movq (%%rax), %%rax\n\t"  /* target->%rax */            \
+         VALGRIND_CALL_NOREDIR_RAX                                \
+         "addq $24, %%rsp\n"                                      \
+         "addq $136,%%rsp\n\t"                                    \
+         VALGRIND_CFI_EPILOGUE                                    \
+         : /*out*/   "=a" (_res)                                  \
+         : /*in*/    "a" (&_argvec[0]) __FRAME_POINTER            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r15"   \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_10W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6,  \
+                                  arg7,arg8,arg9,arg10)           \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[11];                         \
+      volatile unsigned long _res;                                \
+      _argvec[0] = (unsigned long)_orig.nraddr;                   \
+      _argvec[1] = (unsigned long)(arg1);                         \
+      _argvec[2] = (unsigned long)(arg2);                         \
+      _argvec[3] = (unsigned long)(arg3);                         \
+      _argvec[4] = (unsigned long)(arg4);                         \
+      _argvec[5] = (unsigned long)(arg5);                         \
+      _argvec[6] = (unsigned long)(arg6);                         \
+      _argvec[7] = (unsigned long)(arg7);                         \
+      _argvec[8] = (unsigned long)(arg8);                         \
+      _argvec[9] = (unsigned long)(arg9);                         \
+      _argvec[10] = (unsigned long)(arg10);                       \
+      __asm__ volatile(                                           \
+         VALGRIND_CFI_PROLOGUE                                    \
+         "subq $128,%%rsp\n\t"                                    \
+         "pushq 80(%%rax)\n\t"                                    \
+         "pushq 72(%%rax)\n\t"                                    \
+         "pushq 64(%%rax)\n\t"                                    \
+         "pushq 56(%%rax)\n\t"                                    \
+         "movq 48(%%rax), %%r9\n\t"                               \
+         "movq 40(%%rax), %%r8\n\t"                               \
+         "movq 32(%%rax), %%rcx\n\t"                              \
+         "movq 24(%%rax), %%rdx\n\t"                              \
+         "movq 16(%%rax), %%rsi\n\t"                              \
+         "movq 8(%%rax), %%rdi\n\t"                               \
+         "movq (%%rax), %%rax\n\t"  /* target->%rax */            \
+         VALGRIND_CALL_NOREDIR_RAX                                \
+         "addq $32, %%rsp\n"                                      \
+         "addq $128,%%rsp\n\t"                                    \
+         VALGRIND_CFI_EPILOGUE                                    \
+         : /*out*/   "=a" (_res)                                  \
+         : /*in*/    "a" (&_argvec[0]) __FRAME_POINTER            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r15"   \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_11W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6,  \
+                                  arg7,arg8,arg9,arg10,arg11)     \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[12];                         \
+      volatile unsigned long _res;                                \
+      _argvec[0] = (unsigned long)_orig.nraddr;                   \
+      _argvec[1] = (unsigned long)(arg1);                         \
+      _argvec[2] = (unsigned long)(arg2);                         \
+      _argvec[3] = (unsigned long)(arg3);                         \
+      _argvec[4] = (unsigned long)(arg4);                         \
+      _argvec[5] = (unsigned long)(arg5);                         \
+      _argvec[6] = (unsigned long)(arg6);                         \
+      _argvec[7] = (unsigned long)(arg7);                         \
+      _argvec[8] = (unsigned long)(arg8);                         \
+      _argvec[9] = (unsigned long)(arg9);                         \
+      _argvec[10] = (unsigned long)(arg10);                       \
+      _argvec[11] = (unsigned long)(arg11);                       \
+      __asm__ volatile(                                           \
+         VALGRIND_CFI_PROLOGUE                                    \
+         "subq $136,%%rsp\n\t"                                    \
+         "pushq 88(%%rax)\n\t"                                    \
+         "pushq 80(%%rax)\n\t"                                    \
+         "pushq 72(%%rax)\n\t"                                    \
+         "pushq 64(%%rax)\n\t"                                    \
+         "pushq 56(%%rax)\n\t"                                    \
+         "movq 48(%%rax), %%r9\n\t"                               \
+         "movq 40(%%rax), %%r8\n\t"                               \
+         "movq 32(%%rax), %%rcx\n\t"                              \
+         "movq 24(%%rax), %%rdx\n\t"                              \
+         "movq 16(%%rax), %%rsi\n\t"                              \
+         "movq 8(%%rax), %%rdi\n\t"                               \
+         "movq (%%rax), %%rax\n\t"  /* target->%rax */            \
+         VALGRIND_CALL_NOREDIR_RAX                                \
+         "addq $40, %%rsp\n"                                      \
+         "addq $136,%%rsp\n\t"                                    \
+         VALGRIND_CFI_EPILOGUE                                    \
+         : /*out*/   "=a" (_res)                                  \
+         : /*in*/    "a" (&_argvec[0]) __FRAME_POINTER            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r15"   \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_12W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6,  \
+                                arg7,arg8,arg9,arg10,arg11,arg12) \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[13];                         \
+      volatile unsigned long _res;                                \
+      _argvec[0] = (unsigned long)_orig.nraddr;                   \
+      _argvec[1] = (unsigned long)(arg1);                         \
+      _argvec[2] = (unsigned long)(arg2);                         \
+      _argvec[3] = (unsigned long)(arg3);                         \
+      _argvec[4] = (unsigned long)(arg4);                         \
+      _argvec[5] = (unsigned long)(arg5);                         \
+      _argvec[6] = (unsigned long)(arg6);                         \
+      _argvec[7] = (unsigned long)(arg7);                         \
+      _argvec[8] = (unsigned long)(arg8);                         \
+      _argvec[9] = (unsigned long)(arg9);                         \
+      _argvec[10] = (unsigned long)(arg10);                       \
+      _argvec[11] = (unsigned long)(arg11);                       \
+      _argvec[12] = (unsigned long)(arg12);                       \
+      __asm__ volatile(                                           \
+         VALGRIND_CFI_PROLOGUE                                    \
+         "subq $128,%%rsp\n\t"                                    \
+         "pushq 96(%%rax)\n\t"                                    \
+         "pushq 88(%%rax)\n\t"                                    \
+         "pushq 80(%%rax)\n\t"                                    \
+         "pushq 72(%%rax)\n\t"                                    \
+         "pushq 64(%%rax)\n\t"                                    \
+         "pushq 56(%%rax)\n\t"                                    \
+         "movq 48(%%rax), %%r9\n\t"                               \
+         "movq 40(%%rax), %%r8\n\t"                               \
+         "movq 32(%%rax), %%rcx\n\t"                              \
+         "movq 24(%%rax), %%rdx\n\t"                              \
+         "movq 16(%%rax), %%rsi\n\t"                              \
+         "movq 8(%%rax), %%rdi\n\t"                               \
+         "movq (%%rax), %%rax\n\t"  /* target->%rax */            \
+         VALGRIND_CALL_NOREDIR_RAX                                \
+         "addq $48, %%rsp\n"                                      \
+         "addq $128,%%rsp\n\t"                                    \
+         VALGRIND_CFI_EPILOGUE                                    \
+         : /*out*/   "=a" (_res)                                  \
+         : /*in*/    "a" (&_argvec[0]) __FRAME_POINTER            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r15"   \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#endif /* PLAT_amd64_linux || PLAT_amd64_darwin */
+
+/* ------------------------ ppc32-linux ------------------------ */
+
+#if defined(PLAT_ppc32_linux)
+
+/* This is useful for finding out about the on-stack stuff:
+
+   extern int f9  ( int,int,int,int,int,int,int,int,int );
+   extern int f10 ( int,int,int,int,int,int,int,int,int,int );
+   extern int f11 ( int,int,int,int,int,int,int,int,int,int,int );
+   extern int f12 ( int,int,int,int,int,int,int,int,int,int,int,int );
+
+   int g9 ( void ) {
+      return f9(11,22,33,44,55,66,77,88,99);
+   }
+   int g10 ( void ) {
+      return f10(11,22,33,44,55,66,77,88,99,110);
+   }
+   int g11 ( void ) {
+      return f11(11,22,33,44,55,66,77,88,99,110,121);
+   }
+   int g12 ( void ) {
+      return f12(11,22,33,44,55,66,77,88,99,110,121,132);
+   }
+*/
+
+/* ARGREGS: r3 r4 r5 r6 r7 r8 r9 r10 (the rest on stack somewhere) */
+
+/* These regs are trashed by the hidden call. */
+#define __CALLER_SAVED_REGS                                       \
+   "lr", "ctr", "xer",                                            \
+   "cr0", "cr1", "cr2", "cr3", "cr4", "cr5", "cr6", "cr7",        \
+   "r0", "r2", "r3", "r4", "r5", "r6", "r7", "r8", "r9", "r10",   \
+   "r11", "r12", "r13"
+
+/* These CALL_FN_ macros assume that on ppc32-linux, 
+   sizeof(unsigned long) == 4. */
+
+#define CALL_FN_W_v(lval, orig)                                   \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[1];                          \
+      volatile unsigned long _res;                                \
+      _argvec[0] = (unsigned long)_orig.nraddr;                   \
+      __asm__ volatile(                                           \
+         "mr 11,%1\n\t"                                           \
+         "lwz 11,0(11)\n\t"  /* target->r11 */                    \
+         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11                  \
+         "mr %0,3"                                                \
+         : /*out*/   "=r" (_res)                                  \
+         : /*in*/    "r" (&_argvec[0])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_W(lval, orig, arg1)                             \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[2];                          \
+      volatile unsigned long _res;                                \
+      _argvec[0] = (unsigned long)_orig.nraddr;                   \
+      _argvec[1] = (unsigned long)arg1;                           \
+      __asm__ volatile(                                           \
+         "mr 11,%1\n\t"                                           \
+         "lwz 3,4(11)\n\t"   /* arg1->r3 */                       \
+         "lwz 11,0(11)\n\t"  /* target->r11 */                    \
+         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11                  \
+         "mr %0,3"                                                \
+         : /*out*/   "=r" (_res)                                  \
+         : /*in*/    "r" (&_argvec[0])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_WW(lval, orig, arg1,arg2)                       \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[3];                          \
+      volatile unsigned long _res;                                \
+      _argvec[0] = (unsigned long)_orig.nraddr;                   \
+      _argvec[1] = (unsigned long)arg1;                           \
+      _argvec[2] = (unsigned long)arg2;                           \
+      __asm__ volatile(                                           \
+         "mr 11,%1\n\t"                                           \
+         "lwz 3,4(11)\n\t"   /* arg1->r3 */                       \
+         "lwz 4,8(11)\n\t"                                        \
+         "lwz 11,0(11)\n\t"  /* target->r11 */                    \
+         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11                  \
+         "mr %0,3"                                                \
+         : /*out*/   "=r" (_res)                                  \
+         : /*in*/    "r" (&_argvec[0])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_WWW(lval, orig, arg1,arg2,arg3)                 \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[4];                          \
+      volatile unsigned long _res;                                \
+      _argvec[0] = (unsigned long)_orig.nraddr;                   \
+      _argvec[1] = (unsigned long)arg1;                           \
+      _argvec[2] = (unsigned long)arg2;                           \
+      _argvec[3] = (unsigned long)arg3;                           \
+      __asm__ volatile(                                           \
+         "mr 11,%1\n\t"                                           \
+         "lwz 3,4(11)\n\t"   /* arg1->r3 */                       \
+         "lwz 4,8(11)\n\t"                                        \
+         "lwz 5,12(11)\n\t"                                       \
+         "lwz 11,0(11)\n\t"  /* target->r11 */                    \
+         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11                  \
+         "mr %0,3"                                                \
+         : /*out*/   "=r" (_res)                                  \
+         : /*in*/    "r" (&_argvec[0])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_WWWW(lval, orig, arg1,arg2,arg3,arg4)           \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[5];                          \
+      volatile unsigned long _res;                                \
+      _argvec[0] = (unsigned long)_orig.nraddr;                   \
+      _argvec[1] = (unsigned long)arg1;                           \
+      _argvec[2] = (unsigned long)arg2;                           \
+      _argvec[3] = (unsigned long)arg3;                           \
+      _argvec[4] = (unsigned long)arg4;                           \
+      __asm__ volatile(                                           \
+         "mr 11,%1\n\t"                                           \
+         "lwz 3,4(11)\n\t"   /* arg1->r3 */                       \
+         "lwz 4,8(11)\n\t"                                        \
+         "lwz 5,12(11)\n\t"                                       \
+         "lwz 6,16(11)\n\t"  /* arg4->r6 */                       \
+         "lwz 11,0(11)\n\t"  /* target->r11 */                    \
+         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11                  \
+         "mr %0,3"                                                \
+         : /*out*/   "=r" (_res)                                  \
+         : /*in*/    "r" (&_argvec[0])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_5W(lval, orig, arg1,arg2,arg3,arg4,arg5)        \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[6];                          \
+      volatile unsigned long _res;                                \
+      _argvec[0] = (unsigned long)_orig.nraddr;                   \
+      _argvec[1] = (unsigned long)arg1;                           \
+      _argvec[2] = (unsigned long)arg2;                           \
+      _argvec[3] = (unsigned long)arg3;                           \
+      _argvec[4] = (unsigned long)arg4;                           \
+      _argvec[5] = (unsigned long)arg5;                           \
+      __asm__ volatile(                                           \
+         "mr 11,%1\n\t"                                           \
+         "lwz 3,4(11)\n\t"   /* arg1->r3 */                       \
+         "lwz 4,8(11)\n\t"                                        \
+         "lwz 5,12(11)\n\t"                                       \
+         "lwz 6,16(11)\n\t"  /* arg4->r6 */                       \
+         "lwz 7,20(11)\n\t"                                       \
+         "lwz 11,0(11)\n\t"  /* target->r11 */                    \
+         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11                  \
+         "mr %0,3"                                                \
+         : /*out*/   "=r" (_res)                                  \
+         : /*in*/    "r" (&_argvec[0])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_6W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6)   \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[7];                          \
+      volatile unsigned long _res;                                \
+      _argvec[0] = (unsigned long)_orig.nraddr;                   \
+      _argvec[1] = (unsigned long)arg1;                           \
+      _argvec[2] = (unsigned long)arg2;                           \
+      _argvec[3] = (unsigned long)arg3;                           \
+      _argvec[4] = (unsigned long)arg4;                           \
+      _argvec[5] = (unsigned long)arg5;                           \
+      _argvec[6] = (unsigned long)arg6;                           \
+      __asm__ volatile(                                           \
+         "mr 11,%1\n\t"                                           \
+         "lwz 3,4(11)\n\t"   /* arg1->r3 */                       \
+         "lwz 4,8(11)\n\t"                                        \
+         "lwz 5,12(11)\n\t"                                       \
+         "lwz 6,16(11)\n\t"  /* arg4->r6 */                       \
+         "lwz 7,20(11)\n\t"                                       \
+         "lwz 8,24(11)\n\t"                                       \
+         "lwz 11,0(11)\n\t"  /* target->r11 */                    \
+         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11                  \
+         "mr %0,3"                                                \
+         : /*out*/   "=r" (_res)                                  \
+         : /*in*/    "r" (&_argvec[0])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_7W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6,   \
+                                 arg7)                            \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[8];                          \
+      volatile unsigned long _res;                                \
+      _argvec[0] = (unsigned long)_orig.nraddr;                   \
+      _argvec[1] = (unsigned long)arg1;                           \
+      _argvec[2] = (unsigned long)arg2;                           \
+      _argvec[3] = (unsigned long)arg3;                           \
+      _argvec[4] = (unsigned long)arg4;                           \
+      _argvec[5] = (unsigned long)arg5;                           \
+      _argvec[6] = (unsigned long)arg6;                           \
+      _argvec[7] = (unsigned long)arg7;                           \
+      __asm__ volatile(                                           \
+         "mr 11,%1\n\t"                                           \
+         "lwz 3,4(11)\n\t"   /* arg1->r3 */                       \
+         "lwz 4,8(11)\n\t"                                        \
+         "lwz 5,12(11)\n\t"                                       \
+         "lwz 6,16(11)\n\t"  /* arg4->r6 */                       \
+         "lwz 7,20(11)\n\t"                                       \
+         "lwz 8,24(11)\n\t"                                       \
+         "lwz 9,28(11)\n\t"                                       \
+         "lwz 11,0(11)\n\t"  /* target->r11 */                    \
+         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11                  \
+         "mr %0,3"                                                \
+         : /*out*/   "=r" (_res)                                  \
+         : /*in*/    "r" (&_argvec[0])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_8W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6,   \
+                                 arg7,arg8)                       \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[9];                          \
+      volatile unsigned long _res;                                \
+      _argvec[0] = (unsigned long)_orig.nraddr;                   \
+      _argvec[1] = (unsigned long)arg1;                           \
+      _argvec[2] = (unsigned long)arg2;                           \
+      _argvec[3] = (unsigned long)arg3;                           \
+      _argvec[4] = (unsigned long)arg4;                           \
+      _argvec[5] = (unsigned long)arg5;                           \
+      _argvec[6] = (unsigned long)arg6;                           \
+      _argvec[7] = (unsigned long)arg7;                           \
+      _argvec[8] = (unsigned long)arg8;                           \
+      __asm__ volatile(                                           \
+         "mr 11,%1\n\t"                                           \
+         "lwz 3,4(11)\n\t"   /* arg1->r3 */                       \
+         "lwz 4,8(11)\n\t"                                        \
+         "lwz 5,12(11)\n\t"                                       \
+         "lwz 6,16(11)\n\t"  /* arg4->r6 */                       \
+         "lwz 7,20(11)\n\t"                                       \
+         "lwz 8,24(11)\n\t"                                       \
+         "lwz 9,28(11)\n\t"                                       \
+         "lwz 10,32(11)\n\t" /* arg8->r10 */                      \
+         "lwz 11,0(11)\n\t"  /* target->r11 */                    \
+         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11                  \
+         "mr %0,3"                                                \
+         : /*out*/   "=r" (_res)                                  \
+         : /*in*/    "r" (&_argvec[0])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_9W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6,   \
+                                 arg7,arg8,arg9)                  \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[10];                         \
+      volatile unsigned long _res;                                \
+      _argvec[0] = (unsigned long)_orig.nraddr;                   \
+      _argvec[1] = (unsigned long)arg1;                           \
+      _argvec[2] = (unsigned long)arg2;                           \
+      _argvec[3] = (unsigned long)arg3;                           \
+      _argvec[4] = (unsigned long)arg4;                           \
+      _argvec[5] = (unsigned long)arg5;                           \
+      _argvec[6] = (unsigned long)arg6;                           \
+      _argvec[7] = (unsigned long)arg7;                           \
+      _argvec[8] = (unsigned long)arg8;                           \
+      _argvec[9] = (unsigned long)arg9;                           \
+      __asm__ volatile(                                           \
+         "mr 11,%1\n\t"                                           \
+         "addi 1,1,-16\n\t"                                       \
+         /* arg9 */                                               \
+         "lwz 3,36(11)\n\t"                                       \
+         "stw 3,8(1)\n\t"                                         \
+         /* args1-8 */                                            \
+         "lwz 3,4(11)\n\t"   /* arg1->r3 */                       \
+         "lwz 4,8(11)\n\t"                                        \
+         "lwz 5,12(11)\n\t"                                       \
+         "lwz 6,16(11)\n\t"  /* arg4->r6 */                       \
+         "lwz 7,20(11)\n\t"                                       \
+         "lwz 8,24(11)\n\t"                                       \
+         "lwz 9,28(11)\n\t"                                       \
+         "lwz 10,32(11)\n\t" /* arg8->r10 */                      \
+         "lwz 11,0(11)\n\t"  /* target->r11 */                    \
+         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11                  \
+         "addi 1,1,16\n\t"                                        \
+         "mr %0,3"                                                \
+         : /*out*/   "=r" (_res)                                  \
+         : /*in*/    "r" (&_argvec[0])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_10W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6,  \
+                                  arg7,arg8,arg9,arg10)           \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[11];                         \
+      volatile unsigned long _res;                                \
+      _argvec[0] = (unsigned long)_orig.nraddr;                   \
+      _argvec[1] = (unsigned long)arg1;                           \
+      _argvec[2] = (unsigned long)arg2;                           \
+      _argvec[3] = (unsigned long)arg3;                           \
+      _argvec[4] = (unsigned long)arg4;                           \
+      _argvec[5] = (unsigned long)arg5;                           \
+      _argvec[6] = (unsigned long)arg6;                           \
+      _argvec[7] = (unsigned long)arg7;                           \
+      _argvec[8] = (unsigned long)arg8;                           \
+      _argvec[9] = (unsigned long)arg9;                           \
+      _argvec[10] = (unsigned long)arg10;                         \
+      __asm__ volatile(                                           \
+         "mr 11,%1\n\t"                                           \
+         "addi 1,1,-16\n\t"                                       \
+         /* arg10 */                                              \
+         "lwz 3,40(11)\n\t"                                       \
+         "stw 3,12(1)\n\t"                                        \
+         /* arg9 */                                               \
+         "lwz 3,36(11)\n\t"                                       \
+         "stw 3,8(1)\n\t"                                         \
+         /* args1-8 */                                            \
+         "lwz 3,4(11)\n\t"   /* arg1->r3 */                       \
+         "lwz 4,8(11)\n\t"                                        \
+         "lwz 5,12(11)\n\t"                                       \
+         "lwz 6,16(11)\n\t"  /* arg4->r6 */                       \
+         "lwz 7,20(11)\n\t"                                       \
+         "lwz 8,24(11)\n\t"                                       \
+         "lwz 9,28(11)\n\t"                                       \
+         "lwz 10,32(11)\n\t" /* arg8->r10 */                      \
+         "lwz 11,0(11)\n\t"  /* target->r11 */                    \
+         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11                  \
+         "addi 1,1,16\n\t"                                        \
+         "mr %0,3"                                                \
+         : /*out*/   "=r" (_res)                                  \
+         : /*in*/    "r" (&_argvec[0])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_11W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6,  \
+                                  arg7,arg8,arg9,arg10,arg11)     \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[12];                         \
+      volatile unsigned long _res;                                \
+      _argvec[0] = (unsigned long)_orig.nraddr;                   \
+      _argvec[1] = (unsigned long)arg1;                           \
+      _argvec[2] = (unsigned long)arg2;                           \
+      _argvec[3] = (unsigned long)arg3;                           \
+      _argvec[4] = (unsigned long)arg4;                           \
+      _argvec[5] = (unsigned long)arg5;                           \
+      _argvec[6] = (unsigned long)arg6;                           \
+      _argvec[7] = (unsigned long)arg7;                           \
+      _argvec[8] = (unsigned long)arg8;                           \
+      _argvec[9] = (unsigned long)arg9;                           \
+      _argvec[10] = (unsigned long)arg10;                         \
+      _argvec[11] = (unsigned long)arg11;                         \
+      __asm__ volatile(                                           \
+         "mr 11,%1\n\t"                                           \
+         "addi 1,1,-32\n\t"                                       \
+         /* arg11 */                                              \
+         "lwz 3,44(11)\n\t"                                       \
+         "stw 3,16(1)\n\t"                                        \
+         /* arg10 */                                              \
+         "lwz 3,40(11)\n\t"                                       \
+         "stw 3,12(1)\n\t"                                        \
+         /* arg9 */                                               \
+         "lwz 3,36(11)\n\t"                                       \
+         "stw 3,8(1)\n\t"                                         \
+         /* args1-8 */                                            \
+         "lwz 3,4(11)\n\t"   /* arg1->r3 */                       \
+         "lwz 4,8(11)\n\t"                                        \
+         "lwz 5,12(11)\n\t"                                       \
+         "lwz 6,16(11)\n\t"  /* arg4->r6 */                       \
+         "lwz 7,20(11)\n\t"                                       \
+         "lwz 8,24(11)\n\t"                                       \
+         "lwz 9,28(11)\n\t"                                       \
+         "lwz 10,32(11)\n\t" /* arg8->r10 */                      \
+         "lwz 11,0(11)\n\t"  /* target->r11 */                    \
+         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11                  \
+         "addi 1,1,32\n\t"                                        \
+         "mr %0,3"                                                \
+         : /*out*/   "=r" (_res)                                  \
+         : /*in*/    "r" (&_argvec[0])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_12W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6,  \
+                                arg7,arg8,arg9,arg10,arg11,arg12) \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[13];                         \
+      volatile unsigned long _res;                                \
+      _argvec[0] = (unsigned long)_orig.nraddr;                   \
+      _argvec[1] = (unsigned long)arg1;                           \
+      _argvec[2] = (unsigned long)arg2;                           \
+      _argvec[3] = (unsigned long)arg3;                           \
+      _argvec[4] = (unsigned long)arg4;                           \
+      _argvec[5] = (unsigned long)arg5;                           \
+      _argvec[6] = (unsigned long)arg6;                           \
+      _argvec[7] = (unsigned long)arg7;                           \
+      _argvec[8] = (unsigned long)arg8;                           \
+      _argvec[9] = (unsigned long)arg9;                           \
+      _argvec[10] = (unsigned long)arg10;                         \
+      _argvec[11] = (unsigned long)arg11;                         \
+      _argvec[12] = (unsigned long)arg12;                         \
+      __asm__ volatile(                                           \
+         "mr 11,%1\n\t"                                           \
+         "addi 1,1,-32\n\t"                                       \
+         /* arg12 */                                              \
+         "lwz 3,48(11)\n\t"                                       \
+         "stw 3,20(1)\n\t"                                        \
+         /* arg11 */                                              \
+         "lwz 3,44(11)\n\t"                                       \
+         "stw 3,16(1)\n\t"                                        \
+         /* arg10 */                                              \
+         "lwz 3,40(11)\n\t"                                       \
+         "stw 3,12(1)\n\t"                                        \
+         /* arg9 */                                               \
+         "lwz 3,36(11)\n\t"                                       \
+         "stw 3,8(1)\n\t"                                         \
+         /* args1-8 */                                            \
+         "lwz 3,4(11)\n\t"   /* arg1->r3 */                       \
+         "lwz 4,8(11)\n\t"                                        \
+         "lwz 5,12(11)\n\t"                                       \
+         "lwz 6,16(11)\n\t"  /* arg4->r6 */                       \
+         "lwz 7,20(11)\n\t"                                       \
+         "lwz 8,24(11)\n\t"                                       \
+         "lwz 9,28(11)\n\t"                                       \
+         "lwz 10,32(11)\n\t" /* arg8->r10 */                      \
+         "lwz 11,0(11)\n\t"  /* target->r11 */                    \
+         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11                  \
+         "addi 1,1,32\n\t"                                        \
+         "mr %0,3"                                                \
+         : /*out*/   "=r" (_res)                                  \
+         : /*in*/    "r" (&_argvec[0])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#endif /* PLAT_ppc32_linux */
+
+/* ------------------------ ppc64-linux ------------------------ */
+
+#if defined(PLAT_ppc64_linux)
+
+/* ARGREGS: r3 r4 r5 r6 r7 r8 r9 r10 (the rest on stack somewhere) */
+
+/* These regs are trashed by the hidden call. */
+#define __CALLER_SAVED_REGS                                       \
+   "lr", "ctr", "xer",                                            \
+   "cr0", "cr1", "cr2", "cr3", "cr4", "cr5", "cr6", "cr7",        \
+   "r0", "r2", "r3", "r4", "r5", "r6", "r7", "r8", "r9", "r10",   \
+   "r11", "r12", "r13"
+
+/* These CALL_FN_ macros assume that on ppc64-linux, sizeof(unsigned
+   long) == 8. */
+
+#define CALL_FN_W_v(lval, orig)                                   \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[3+0];                        \
+      volatile unsigned long _res;                                \
+      /* _argvec[0] holds current r2 across the call */           \
+      _argvec[1] = (unsigned long)_orig.r2;                       \
+      _argvec[2] = (unsigned long)_orig.nraddr;                   \
+      __asm__ volatile(                                           \
+         "mr 11,%1\n\t"                                           \
+         "std 2,-16(11)\n\t"  /* save tocptr */                   \
+         "ld   2,-8(11)\n\t"  /* use nraddr's tocptr */           \
+         "ld  11, 0(11)\n\t"  /* target->r11 */                   \
+         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11                  \
+         "mr 11,%1\n\t"                                           \
+         "mr %0,3\n\t"                                            \
+         "ld 2,-16(11)" /* restore tocptr */                      \
+         : /*out*/   "=r" (_res)                                  \
+         : /*in*/    "r" (&_argvec[2])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_W(lval, orig, arg1)                             \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[3+1];                        \
+      volatile unsigned long _res;                                \
+      /* _argvec[0] holds current r2 across the call */           \
+      _argvec[1]   = (unsigned long)_orig.r2;                     \
+      _argvec[2]   = (unsigned long)_orig.nraddr;                 \
+      _argvec[2+1] = (unsigned long)arg1;                         \
+      __asm__ volatile(                                           \
+         "mr 11,%1\n\t"                                           \
+         "std 2,-16(11)\n\t"  /* save tocptr */                   \
+         "ld   2,-8(11)\n\t"  /* use nraddr's tocptr */           \
+         "ld   3, 8(11)\n\t"  /* arg1->r3 */                      \
+         "ld  11, 0(11)\n\t"  /* target->r11 */                   \
+         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11                  \
+         "mr 11,%1\n\t"                                           \
+         "mr %0,3\n\t"                                            \
+         "ld 2,-16(11)" /* restore tocptr */                      \
+         : /*out*/   "=r" (_res)                                  \
+         : /*in*/    "r" (&_argvec[2])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_WW(lval, orig, arg1,arg2)                       \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[3+2];                        \
+      volatile unsigned long _res;                                \
+      /* _argvec[0] holds current r2 across the call */           \
+      _argvec[1]   = (unsigned long)_orig.r2;                     \
+      _argvec[2]   = (unsigned long)_orig.nraddr;                 \
+      _argvec[2+1] = (unsigned long)arg1;                         \
+      _argvec[2+2] = (unsigned long)arg2;                         \
+      __asm__ volatile(                                           \
+         "mr 11,%1\n\t"                                           \
+         "std 2,-16(11)\n\t"  /* save tocptr */                   \
+         "ld   2,-8(11)\n\t"  /* use nraddr's tocptr */           \
+         "ld   3, 8(11)\n\t"  /* arg1->r3 */                      \
+         "ld   4, 16(11)\n\t" /* arg2->r4 */                      \
+         "ld  11, 0(11)\n\t"  /* target->r11 */                   \
+         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11                  \
+         "mr 11,%1\n\t"                                           \
+         "mr %0,3\n\t"                                            \
+         "ld 2,-16(11)" /* restore tocptr */                      \
+         : /*out*/   "=r" (_res)                                  \
+         : /*in*/    "r" (&_argvec[2])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_WWW(lval, orig, arg1,arg2,arg3)                 \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[3+3];                        \
+      volatile unsigned long _res;                                \
+      /* _argvec[0] holds current r2 across the call */           \
+      _argvec[1]   = (unsigned long)_orig.r2;                     \
+      _argvec[2]   = (unsigned long)_orig.nraddr;                 \
+      _argvec[2+1] = (unsigned long)arg1;                         \
+      _argvec[2+2] = (unsigned long)arg2;                         \
+      _argvec[2+3] = (unsigned long)arg3;                         \
+      __asm__ volatile(                                           \
+         "mr 11,%1\n\t"                                           \
+         "std 2,-16(11)\n\t"  /* save tocptr */                   \
+         "ld   2,-8(11)\n\t"  /* use nraddr's tocptr */           \
+         "ld   3, 8(11)\n\t"  /* arg1->r3 */                      \
+         "ld   4, 16(11)\n\t" /* arg2->r4 */                      \
+         "ld   5, 24(11)\n\t" /* arg3->r5 */                      \
+         "ld  11, 0(11)\n\t"  /* target->r11 */                   \
+         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11                  \
+         "mr 11,%1\n\t"                                           \
+         "mr %0,3\n\t"                                            \
+         "ld 2,-16(11)" /* restore tocptr */                      \
+         : /*out*/   "=r" (_res)                                  \
+         : /*in*/    "r" (&_argvec[2])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_WWWW(lval, orig, arg1,arg2,arg3,arg4)           \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[3+4];                        \
+      volatile unsigned long _res;                                \
+      /* _argvec[0] holds current r2 across the call */           \
+      _argvec[1]   = (unsigned long)_orig.r2;                     \
+      _argvec[2]   = (unsigned long)_orig.nraddr;                 \
+      _argvec[2+1] = (unsigned long)arg1;                         \
+      _argvec[2+2] = (unsigned long)arg2;                         \
+      _argvec[2+3] = (unsigned long)arg3;                         \
+      _argvec[2+4] = (unsigned long)arg4;                         \
+      __asm__ volatile(                                           \
+         "mr 11,%1\n\t"                                           \
+         "std 2,-16(11)\n\t"  /* save tocptr */                   \
+         "ld   2,-8(11)\n\t"  /* use nraddr's tocptr */           \
+         "ld   3, 8(11)\n\t"  /* arg1->r3 */                      \
+         "ld   4, 16(11)\n\t" /* arg2->r4 */                      \
+         "ld   5, 24(11)\n\t" /* arg3->r5 */                      \
+         "ld   6, 32(11)\n\t" /* arg4->r6 */                      \
+         "ld  11, 0(11)\n\t"  /* target->r11 */                   \
+         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11                  \
+         "mr 11,%1\n\t"                                           \
+         "mr %0,3\n\t"                                            \
+         "ld 2,-16(11)" /* restore tocptr */                      \
+         : /*out*/   "=r" (_res)                                  \
+         : /*in*/    "r" (&_argvec[2])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_5W(lval, orig, arg1,arg2,arg3,arg4,arg5)        \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[3+5];                        \
+      volatile unsigned long _res;                                \
+      /* _argvec[0] holds current r2 across the call */           \
+      _argvec[1]   = (unsigned long)_orig.r2;                     \
+      _argvec[2]   = (unsigned long)_orig.nraddr;                 \
+      _argvec[2+1] = (unsigned long)arg1;                         \
+      _argvec[2+2] = (unsigned long)arg2;                         \
+      _argvec[2+3] = (unsigned long)arg3;                         \
+      _argvec[2+4] = (unsigned long)arg4;                         \
+      _argvec[2+5] = (unsigned long)arg5;                         \
+      __asm__ volatile(                                           \
+         "mr 11,%1\n\t"                                           \
+         "std 2,-16(11)\n\t"  /* save tocptr */                   \
+         "ld   2,-8(11)\n\t"  /* use nraddr's tocptr */           \
+         "ld   3, 8(11)\n\t"  /* arg1->r3 */                      \
+         "ld   4, 16(11)\n\t" /* arg2->r4 */                      \
+         "ld   5, 24(11)\n\t" /* arg3->r5 */                      \
+         "ld   6, 32(11)\n\t" /* arg4->r6 */                      \
+         "ld   7, 40(11)\n\t" /* arg5->r7 */                      \
+         "ld  11, 0(11)\n\t"  /* target->r11 */                   \
+         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11                  \
+         "mr 11,%1\n\t"                                           \
+         "mr %0,3\n\t"                                            \
+         "ld 2,-16(11)" /* restore tocptr */                      \
+         : /*out*/   "=r" (_res)                                  \
+         : /*in*/    "r" (&_argvec[2])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_6W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6)   \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[3+6];                        \
+      volatile unsigned long _res;                                \
+      /* _argvec[0] holds current r2 across the call */           \
+      _argvec[1]   = (unsigned long)_orig.r2;                     \
+      _argvec[2]   = (unsigned long)_orig.nraddr;                 \
+      _argvec[2+1] = (unsigned long)arg1;                         \
+      _argvec[2+2] = (unsigned long)arg2;                         \
+      _argvec[2+3] = (unsigned long)arg3;                         \
+      _argvec[2+4] = (unsigned long)arg4;                         \
+      _argvec[2+5] = (unsigned long)arg5;                         \
+      _argvec[2+6] = (unsigned long)arg6;                         \
+      __asm__ volatile(                                           \
+         "mr 11,%1\n\t"                                           \
+         "std 2,-16(11)\n\t"  /* save tocptr */                   \
+         "ld   2,-8(11)\n\t"  /* use nraddr's tocptr */           \
+         "ld   3, 8(11)\n\t"  /* arg1->r3 */                      \
+         "ld   4, 16(11)\n\t" /* arg2->r4 */                      \
+         "ld   5, 24(11)\n\t" /* arg3->r5 */                      \
+         "ld   6, 32(11)\n\t" /* arg4->r6 */                      \
+         "ld   7, 40(11)\n\t" /* arg5->r7 */                      \
+         "ld   8, 48(11)\n\t" /* arg6->r8 */                      \
+         "ld  11, 0(11)\n\t"  /* target->r11 */                   \
+         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11                  \
+         "mr 11,%1\n\t"                                           \
+         "mr %0,3\n\t"                                            \
+         "ld 2,-16(11)" /* restore tocptr */                      \
+         : /*out*/   "=r" (_res)                                  \
+         : /*in*/    "r" (&_argvec[2])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_7W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6,   \
+                                 arg7)                            \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[3+7];                        \
+      volatile unsigned long _res;                                \
+      /* _argvec[0] holds current r2 across the call */           \
+      _argvec[1]   = (unsigned long)_orig.r2;                     \
+      _argvec[2]   = (unsigned long)_orig.nraddr;                 \
+      _argvec[2+1] = (unsigned long)arg1;                         \
+      _argvec[2+2] = (unsigned long)arg2;                         \
+      _argvec[2+3] = (unsigned long)arg3;                         \
+      _argvec[2+4] = (unsigned long)arg4;                         \
+      _argvec[2+5] = (unsigned long)arg5;                         \
+      _argvec[2+6] = (unsigned long)arg6;                         \
+      _argvec[2+7] = (unsigned long)arg7;                         \
+      __asm__ volatile(                                           \
+         "mr 11,%1\n\t"                                           \
+         "std 2,-16(11)\n\t"  /* save tocptr */                   \
+         "ld   2,-8(11)\n\t"  /* use nraddr's tocptr */           \
+         "ld   3, 8(11)\n\t"  /* arg1->r3 */                      \
+         "ld   4, 16(11)\n\t" /* arg2->r4 */                      \
+         "ld   5, 24(11)\n\t" /* arg3->r5 */                      \
+         "ld   6, 32(11)\n\t" /* arg4->r6 */                      \
+         "ld   7, 40(11)\n\t" /* arg5->r7 */                      \
+         "ld   8, 48(11)\n\t" /* arg6->r8 */                      \
+         "ld   9, 56(11)\n\t" /* arg7->r9 */                      \
+         "ld  11, 0(11)\n\t"  /* target->r11 */                   \
+         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11                  \
+         "mr 11,%1\n\t"                                           \
+         "mr %0,3\n\t"                                            \
+         "ld 2,-16(11)" /* restore tocptr */                      \
+         : /*out*/   "=r" (_res)                                  \
+         : /*in*/    "r" (&_argvec[2])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_8W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6,   \
+                                 arg7,arg8)                       \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[3+8];                        \
+      volatile unsigned long _res;                                \
+      /* _argvec[0] holds current r2 across the call */           \
+      _argvec[1]   = (unsigned long)_orig.r2;                     \
+      _argvec[2]   = (unsigned long)_orig.nraddr;                 \
+      _argvec[2+1] = (unsigned long)arg1;                         \
+      _argvec[2+2] = (unsigned long)arg2;                         \
+      _argvec[2+3] = (unsigned long)arg3;                         \
+      _argvec[2+4] = (unsigned long)arg4;                         \
+      _argvec[2+5] = (unsigned long)arg5;                         \
+      _argvec[2+6] = (unsigned long)arg6;                         \
+      _argvec[2+7] = (unsigned long)arg7;                         \
+      _argvec[2+8] = (unsigned long)arg8;                         \
+      __asm__ volatile(                                           \
+         "mr 11,%1\n\t"                                           \
+         "std 2,-16(11)\n\t"  /* save tocptr */                   \
+         "ld   2,-8(11)\n\t"  /* use nraddr's tocptr */           \
+         "ld   3, 8(11)\n\t"  /* arg1->r3 */                      \
+         "ld   4, 16(11)\n\t" /* arg2->r4 */                      \
+         "ld   5, 24(11)\n\t" /* arg3->r5 */                      \
+         "ld   6, 32(11)\n\t" /* arg4->r6 */                      \
+         "ld   7, 40(11)\n\t" /* arg5->r7 */                      \
+         "ld   8, 48(11)\n\t" /* arg6->r8 */                      \
+         "ld   9, 56(11)\n\t" /* arg7->r9 */                      \
+         "ld  10, 64(11)\n\t" /* arg8->r10 */                     \
+         "ld  11, 0(11)\n\t"  /* target->r11 */                   \
+         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11                  \
+         "mr 11,%1\n\t"                                           \
+         "mr %0,3\n\t"                                            \
+         "ld 2,-16(11)" /* restore tocptr */                      \
+         : /*out*/   "=r" (_res)                                  \
+         : /*in*/    "r" (&_argvec[2])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_9W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6,   \
+                                 arg7,arg8,arg9)                  \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[3+9];                        \
+      volatile unsigned long _res;                                \
+      /* _argvec[0] holds current r2 across the call */           \
+      _argvec[1]   = (unsigned long)_orig.r2;                     \
+      _argvec[2]   = (unsigned long)_orig.nraddr;                 \
+      _argvec[2+1] = (unsigned long)arg1;                         \
+      _argvec[2+2] = (unsigned long)arg2;                         \
+      _argvec[2+3] = (unsigned long)arg3;                         \
+      _argvec[2+4] = (unsigned long)arg4;                         \
+      _argvec[2+5] = (unsigned long)arg5;                         \
+      _argvec[2+6] = (unsigned long)arg6;                         \
+      _argvec[2+7] = (unsigned long)arg7;                         \
+      _argvec[2+8] = (unsigned long)arg8;                         \
+      _argvec[2+9] = (unsigned long)arg9;                         \
+      __asm__ volatile(                                           \
+         "mr 11,%1\n\t"                                           \
+         "std 2,-16(11)\n\t"  /* save tocptr */                   \
+         "ld   2,-8(11)\n\t"  /* use nraddr's tocptr */           \
+         "addi 1,1,-128\n\t"  /* expand stack frame */            \
+         /* arg9 */                                               \
+         "ld  3,72(11)\n\t"                                       \
+         "std 3,112(1)\n\t"                                       \
+         /* args1-8 */                                            \
+         "ld   3, 8(11)\n\t"  /* arg1->r3 */                      \
+         "ld   4, 16(11)\n\t" /* arg2->r4 */                      \
+         "ld   5, 24(11)\n\t" /* arg3->r5 */                      \
+         "ld   6, 32(11)\n\t" /* arg4->r6 */                      \
+         "ld   7, 40(11)\n\t" /* arg5->r7 */                      \
+         "ld   8, 48(11)\n\t" /* arg6->r8 */                      \
+         "ld   9, 56(11)\n\t" /* arg7->r9 */                      \
+         "ld  10, 64(11)\n\t" /* arg8->r10 */                     \
+         "ld  11, 0(11)\n\t"  /* target->r11 */                   \
+         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11                  \
+         "mr 11,%1\n\t"                                           \
+         "mr %0,3\n\t"                                            \
+         "ld 2,-16(11)\n\t" /* restore tocptr */                  \
+         "addi 1,1,128"     /* restore frame */                   \
+         : /*out*/   "=r" (_res)                                  \
+         : /*in*/    "r" (&_argvec[2])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_10W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6,  \
+                                  arg7,arg8,arg9,arg10)           \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[3+10];                       \
+      volatile unsigned long _res;                                \
+      /* _argvec[0] holds current r2 across the call */           \
+      _argvec[1]   = (unsigned long)_orig.r2;                     \
+      _argvec[2]   = (unsigned long)_orig.nraddr;                 \
+      _argvec[2+1] = (unsigned long)arg1;                         \
+      _argvec[2+2] = (unsigned long)arg2;                         \
+      _argvec[2+3] = (unsigned long)arg3;                         \
+      _argvec[2+4] = (unsigned long)arg4;                         \
+      _argvec[2+5] = (unsigned long)arg5;                         \
+      _argvec[2+6] = (unsigned long)arg6;                         \
+      _argvec[2+7] = (unsigned long)arg7;                         \
+      _argvec[2+8] = (unsigned long)arg8;                         \
+      _argvec[2+9] = (unsigned long)arg9;                         \
+      _argvec[2+10] = (unsigned long)arg10;                       \
+      __asm__ volatile(                                           \
+         "mr 11,%1\n\t"                                           \
+         "std 2,-16(11)\n\t"  /* save tocptr */                   \
+         "ld   2,-8(11)\n\t"  /* use nraddr's tocptr */           \
+         "addi 1,1,-128\n\t"  /* expand stack frame */            \
+         /* arg10 */                                              \
+         "ld  3,80(11)\n\t"                                       \
+         "std 3,120(1)\n\t"                                       \
+         /* arg9 */                                               \
+         "ld  3,72(11)\n\t"                                       \
+         "std 3,112(1)\n\t"                                       \
+         /* args1-8 */                                            \
+         "ld   3, 8(11)\n\t"  /* arg1->r3 */                      \
+         "ld   4, 16(11)\n\t" /* arg2->r4 */                      \
+         "ld   5, 24(11)\n\t" /* arg3->r5 */                      \
+         "ld   6, 32(11)\n\t" /* arg4->r6 */                      \
+         "ld   7, 40(11)\n\t" /* arg5->r7 */                      \
+         "ld   8, 48(11)\n\t" /* arg6->r8 */                      \
+         "ld   9, 56(11)\n\t" /* arg7->r9 */                      \
+         "ld  10, 64(11)\n\t" /* arg8->r10 */                     \
+         "ld  11, 0(11)\n\t"  /* target->r11 */                   \
+         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11                  \
+         "mr 11,%1\n\t"                                           \
+         "mr %0,3\n\t"                                            \
+         "ld 2,-16(11)\n\t" /* restore tocptr */                  \
+         "addi 1,1,128"     /* restore frame */                   \
+         : /*out*/   "=r" (_res)                                  \
+         : /*in*/    "r" (&_argvec[2])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_11W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6,  \
+                                  arg7,arg8,arg9,arg10,arg11)     \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[3+11];                       \
+      volatile unsigned long _res;                                \
+      /* _argvec[0] holds current r2 across the call */           \
+      _argvec[1]   = (unsigned long)_orig.r2;                     \
+      _argvec[2]   = (unsigned long)_orig.nraddr;                 \
+      _argvec[2+1] = (unsigned long)arg1;                         \
+      _argvec[2+2] = (unsigned long)arg2;                         \
+      _argvec[2+3] = (unsigned long)arg3;                         \
+      _argvec[2+4] = (unsigned long)arg4;                         \
+      _argvec[2+5] = (unsigned long)arg5;                         \
+      _argvec[2+6] = (unsigned long)arg6;                         \
+      _argvec[2+7] = (unsigned long)arg7;                         \
+      _argvec[2+8] = (unsigned long)arg8;                         \
+      _argvec[2+9] = (unsigned long)arg9;                         \
+      _argvec[2+10] = (unsigned long)arg10;                       \
+      _argvec[2+11] = (unsigned long)arg11;                       \
+      __asm__ volatile(                                           \
+         "mr 11,%1\n\t"                                           \
+         "std 2,-16(11)\n\t"  /* save tocptr */                   \
+         "ld   2,-8(11)\n\t"  /* use nraddr's tocptr */           \
+         "addi 1,1,-144\n\t"  /* expand stack frame */            \
+         /* arg11 */                                              \
+         "ld  3,88(11)\n\t"                                       \
+         "std 3,128(1)\n\t"                                       \
+         /* arg10 */                                              \
+         "ld  3,80(11)\n\t"                                       \
+         "std 3,120(1)\n\t"                                       \
+         /* arg9 */                                               \
+         "ld  3,72(11)\n\t"                                       \
+         "std 3,112(1)\n\t"                                       \
+         /* args1-8 */                                            \
+         "ld   3, 8(11)\n\t"  /* arg1->r3 */                      \
+         "ld   4, 16(11)\n\t" /* arg2->r4 */                      \
+         "ld   5, 24(11)\n\t" /* arg3->r5 */                      \
+         "ld   6, 32(11)\n\t" /* arg4->r6 */                      \
+         "ld   7, 40(11)\n\t" /* arg5->r7 */                      \
+         "ld   8, 48(11)\n\t" /* arg6->r8 */                      \
+         "ld   9, 56(11)\n\t" /* arg7->r9 */                      \
+         "ld  10, 64(11)\n\t" /* arg8->r10 */                     \
+         "ld  11, 0(11)\n\t"  /* target->r11 */                   \
+         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11                  \
+         "mr 11,%1\n\t"                                           \
+         "mr %0,3\n\t"                                            \
+         "ld 2,-16(11)\n\t" /* restore tocptr */                  \
+         "addi 1,1,144"     /* restore frame */                   \
+         : /*out*/   "=r" (_res)                                  \
+         : /*in*/    "r" (&_argvec[2])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_12W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6,  \
+                                arg7,arg8,arg9,arg10,arg11,arg12) \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[3+12];                       \
+      volatile unsigned long _res;                                \
+      /* _argvec[0] holds current r2 across the call */           \
+      _argvec[1]   = (unsigned long)_orig.r2;                     \
+      _argvec[2]   = (unsigned long)_orig.nraddr;                 \
+      _argvec[2+1] = (unsigned long)arg1;                         \
+      _argvec[2+2] = (unsigned long)arg2;                         \
+      _argvec[2+3] = (unsigned long)arg3;                         \
+      _argvec[2+4] = (unsigned long)arg4;                         \
+      _argvec[2+5] = (unsigned long)arg5;                         \
+      _argvec[2+6] = (unsigned long)arg6;                         \
+      _argvec[2+7] = (unsigned long)arg7;                         \
+      _argvec[2+8] = (unsigned long)arg8;                         \
+      _argvec[2+9] = (unsigned long)arg9;                         \
+      _argvec[2+10] = (unsigned long)arg10;                       \
+      _argvec[2+11] = (unsigned long)arg11;                       \
+      _argvec[2+12] = (unsigned long)arg12;                       \
+      __asm__ volatile(                                           \
+         "mr 11,%1\n\t"                                           \
+         "std 2,-16(11)\n\t"  /* save tocptr */                   \
+         "ld   2,-8(11)\n\t"  /* use nraddr's tocptr */           \
+         "addi 1,1,-144\n\t"  /* expand stack frame */            \
+         /* arg12 */                                              \
+         "ld  3,96(11)\n\t"                                       \
+         "std 3,136(1)\n\t"                                       \
+         /* arg11 */                                              \
+         "ld  3,88(11)\n\t"                                       \
+         "std 3,128(1)\n\t"                                       \
+         /* arg10 */                                              \
+         "ld  3,80(11)\n\t"                                       \
+         "std 3,120(1)\n\t"                                       \
+         /* arg9 */                                               \
+         "ld  3,72(11)\n\t"                                       \
+         "std 3,112(1)\n\t"                                       \
+         /* args1-8 */                                            \
+         "ld   3, 8(11)\n\t"  /* arg1->r3 */                      \
+         "ld   4, 16(11)\n\t" /* arg2->r4 */                      \
+         "ld   5, 24(11)\n\t" /* arg3->r5 */                      \
+         "ld   6, 32(11)\n\t" /* arg4->r6 */                      \
+         "ld   7, 40(11)\n\t" /* arg5->r7 */                      \
+         "ld   8, 48(11)\n\t" /* arg6->r8 */                      \
+         "ld   9, 56(11)\n\t" /* arg7->r9 */                      \
+         "ld  10, 64(11)\n\t" /* arg8->r10 */                     \
+         "ld  11, 0(11)\n\t"  /* target->r11 */                   \
+         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11                  \
+         "mr 11,%1\n\t"                                           \
+         "mr %0,3\n\t"                                            \
+         "ld 2,-16(11)\n\t" /* restore tocptr */                  \
+         "addi 1,1,144"     /* restore frame */                   \
+         : /*out*/   "=r" (_res)                                  \
+         : /*in*/    "r" (&_argvec[2])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#endif /* PLAT_ppc64_linux */
+
+/* ------------------------- arm-linux ------------------------- */
+
+#if defined(PLAT_arm_linux)
+
+/* These regs are trashed by the hidden call. */
+#define __CALLER_SAVED_REGS "r0", "r1", "r2", "r3","r4","r14"
+
+/* These CALL_FN_ macros assume that on arm-linux, sizeof(unsigned
+   long) == 4. */
+
+#define CALL_FN_W_v(lval, orig)                                   \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[1];                          \
+      volatile unsigned long _res;                                \
+      _argvec[0] = (unsigned long)_orig.nraddr;                   \
+      __asm__ volatile(                                           \
+         "ldr r4, [%1] \n\t"  /* target->r4 */                    \
+         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4                   \
+         "mov %0, r0\n"                                           \
+         : /*out*/   "=r" (_res)                                  \
+         : /*in*/    "0" (&_argvec[0])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_W(lval, orig, arg1)                             \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[2];                          \
+      volatile unsigned long _res;                                \
+      _argvec[0] = (unsigned long)_orig.nraddr;                   \
+      _argvec[1] = (unsigned long)(arg1);                         \
+      __asm__ volatile(                                           \
+         "ldr r0, [%1, #4] \n\t"                                  \
+         "ldr r4, [%1] \n\t"  /* target->r4 */                    \
+         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4                   \
+         "mov %0, r0\n"                                           \
+         : /*out*/   "=r" (_res)                                  \
+         : /*in*/    "0" (&_argvec[0])                            \
+         : /*trash*/ "cc", "memory",  __CALLER_SAVED_REGS         \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_WW(lval, orig, arg1,arg2)                       \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[3];                          \
+      volatile unsigned long _res;                                \
+      _argvec[0] = (unsigned long)_orig.nraddr;                   \
+      _argvec[1] = (unsigned long)(arg1);                         \
+      _argvec[2] = (unsigned long)(arg2);                         \
+      __asm__ volatile(                                           \
+         "ldr r0, [%1, #4] \n\t"                                  \
+         "ldr r1, [%1, #8] \n\t"                                  \
+         "ldr r4, [%1] \n\t"  /* target->r4 */                    \
+         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4                   \
+         "mov %0, r0\n"                                           \
+         : /*out*/   "=r" (_res)                                  \
+         : /*in*/    "0" (&_argvec[0])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_WWW(lval, orig, arg1,arg2,arg3)                 \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[4];                          \
+      volatile unsigned long _res;                                \
+      _argvec[0] = (unsigned long)_orig.nraddr;                   \
+      _argvec[1] = (unsigned long)(arg1);                         \
+      _argvec[2] = (unsigned long)(arg2);                         \
+      _argvec[3] = (unsigned long)(arg3);                         \
+      __asm__ volatile(                                           \
+         "ldr r0, [%1, #4] \n\t"                                  \
+         "ldr r1, [%1, #8] \n\t"                                  \
+         "ldr r2, [%1, #12] \n\t"                                 \
+         "ldr r4, [%1] \n\t"  /* target->r4 */                    \
+         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4                   \
+         "mov %0, r0\n"                                           \
+         : /*out*/   "=r" (_res)                                  \
+         : /*in*/    "0" (&_argvec[0])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_WWWW(lval, orig, arg1,arg2,arg3,arg4)           \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[5];                          \
+      volatile unsigned long _res;                                \
+      _argvec[0] = (unsigned long)_orig.nraddr;                   \
+      _argvec[1] = (unsigned long)(arg1);                         \
+      _argvec[2] = (unsigned long)(arg2);                         \
+      _argvec[3] = (unsigned long)(arg3);                         \
+      _argvec[4] = (unsigned long)(arg4);                         \
+      __asm__ volatile(                                           \
+         "ldr r0, [%1, #4] \n\t"                                  \
+         "ldr r1, [%1, #8] \n\t"                                  \
+         "ldr r2, [%1, #12] \n\t"                                 \
+         "ldr r3, [%1, #16] \n\t"                                 \
+         "ldr r4, [%1] \n\t"  /* target->r4 */                    \
+         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4                   \
+         "mov %0, r0"                                             \
+         : /*out*/   "=r" (_res)                                  \
+         : /*in*/    "0" (&_argvec[0])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_5W(lval, orig, arg1,arg2,arg3,arg4,arg5)        \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[6];                          \
+      volatile unsigned long _res;                                \
+      _argvec[0] = (unsigned long)_orig.nraddr;                   \
+      _argvec[1] = (unsigned long)(arg1);                         \
+      _argvec[2] = (unsigned long)(arg2);                         \
+      _argvec[3] = (unsigned long)(arg3);                         \
+      _argvec[4] = (unsigned long)(arg4);                         \
+      _argvec[5] = (unsigned long)(arg5);                         \
+      __asm__ volatile(                                           \
+         "ldr r0, [%1, #20] \n\t"                                 \
+         "push {r0} \n\t"                                         \
+         "ldr r0, [%1, #4] \n\t"                                  \
+         "ldr r1, [%1, #8] \n\t"                                  \
+         "ldr r2, [%1, #12] \n\t"                                 \
+         "ldr r3, [%1, #16] \n\t"                                 \
+         "ldr r4, [%1] \n\t"  /* target->r4 */                    \
+         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4                   \
+         "add sp, sp, #4 \n\t"                                    \
+         "mov %0, r0"                                             \
+         : /*out*/   "=r" (_res)                                  \
+         : /*in*/    "0" (&_argvec[0])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_6W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6)   \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[7];                          \
+      volatile unsigned long _res;                                \
+      _argvec[0] = (unsigned long)_orig.nraddr;                   \
+      _argvec[1] = (unsigned long)(arg1);                         \
+      _argvec[2] = (unsigned long)(arg2);                         \
+      _argvec[3] = (unsigned long)(arg3);                         \
+      _argvec[4] = (unsigned long)(arg4);                         \
+      _argvec[5] = (unsigned long)(arg5);                         \
+      _argvec[6] = (unsigned long)(arg6);                         \
+      __asm__ volatile(                                           \
+         "ldr r0, [%1, #20] \n\t"                                 \
+         "ldr r1, [%1, #24] \n\t"                                 \
+         "push {r0, r1} \n\t"                                     \
+         "ldr r0, [%1, #4] \n\t"                                  \
+         "ldr r1, [%1, #8] \n\t"                                  \
+         "ldr r2, [%1, #12] \n\t"                                 \
+         "ldr r3, [%1, #16] \n\t"                                 \
+         "ldr r4, [%1] \n\t"  /* target->r4 */                    \
+         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4                   \
+         "add sp, sp, #8 \n\t"                                    \
+         "mov %0, r0"                                             \
+         : /*out*/   "=r" (_res)                                  \
+         : /*in*/    "0" (&_argvec[0])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_7W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6,   \
+                                 arg7)                            \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[8];                          \
+      volatile unsigned long _res;                                \
+      _argvec[0] = (unsigned long)_orig.nraddr;                   \
+      _argvec[1] = (unsigned long)(arg1);                         \
+      _argvec[2] = (unsigned long)(arg2);                         \
+      _argvec[3] = (unsigned long)(arg3);                         \
+      _argvec[4] = (unsigned long)(arg4);                         \
+      _argvec[5] = (unsigned long)(arg5);                         \
+      _argvec[6] = (unsigned long)(arg6);                         \
+      _argvec[7] = (unsigned long)(arg7);                         \
+      __asm__ volatile(                                           \
+         "ldr r0, [%1, #20] \n\t"                                 \
+         "ldr r1, [%1, #24] \n\t"                                 \
+         "ldr r2, [%1, #28] \n\t"                                 \
+         "push {r0, r1, r2} \n\t"                                 \
+         "ldr r0, [%1, #4] \n\t"                                  \
+         "ldr r1, [%1, #8] \n\t"                                  \
+         "ldr r2, [%1, #12] \n\t"                                 \
+         "ldr r3, [%1, #16] \n\t"                                 \
+         "ldr r4, [%1] \n\t"  /* target->r4 */                    \
+         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4                   \
+         "add sp, sp, #12 \n\t"                                   \
+         "mov %0, r0"                                             \
+         : /*out*/   "=r" (_res)                                  \
+         : /*in*/    "0" (&_argvec[0])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_8W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6,   \
+                                 arg7,arg8)                       \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[9];                          \
+      volatile unsigned long _res;                                \
+      _argvec[0] = (unsigned long)_orig.nraddr;                   \
+      _argvec[1] = (unsigned long)(arg1);                         \
+      _argvec[2] = (unsigned long)(arg2);                         \
+      _argvec[3] = (unsigned long)(arg3);                         \
+      _argvec[4] = (unsigned long)(arg4);                         \
+      _argvec[5] = (unsigned long)(arg5);                         \
+      _argvec[6] = (unsigned long)(arg6);                         \
+      _argvec[7] = (unsigned long)(arg7);                         \
+      _argvec[8] = (unsigned long)(arg8);                         \
+      __asm__ volatile(                                           \
+         "ldr r0, [%1, #20] \n\t"                                 \
+         "ldr r1, [%1, #24] \n\t"                                 \
+         "ldr r2, [%1, #28] \n\t"                                 \
+         "ldr r3, [%1, #32] \n\t"                                 \
+         "push {r0, r1, r2, r3} \n\t"                             \
+         "ldr r0, [%1, #4] \n\t"                                  \
+         "ldr r1, [%1, #8] \n\t"                                  \
+         "ldr r2, [%1, #12] \n\t"                                 \
+         "ldr r3, [%1, #16] \n\t"                                 \
+         "ldr r4, [%1] \n\t"  /* target->r4 */                    \
+         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4                   \
+         "add sp, sp, #16 \n\t"                                   \
+         "mov %0, r0"                                             \
+         : /*out*/   "=r" (_res)                                  \
+         : /*in*/    "0" (&_argvec[0])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_9W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6,   \
+                                 arg7,arg8,arg9)                  \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[10];                         \
+      volatile unsigned long _res;                                \
+      _argvec[0] = (unsigned long)_orig.nraddr;                   \
+      _argvec[1] = (unsigned long)(arg1);                         \
+      _argvec[2] = (unsigned long)(arg2);                         \
+      _argvec[3] = (unsigned long)(arg3);                         \
+      _argvec[4] = (unsigned long)(arg4);                         \
+      _argvec[5] = (unsigned long)(arg5);                         \
+      _argvec[6] = (unsigned long)(arg6);                         \
+      _argvec[7] = (unsigned long)(arg7);                         \
+      _argvec[8] = (unsigned long)(arg8);                         \
+      _argvec[9] = (unsigned long)(arg9);                         \
+      __asm__ volatile(                                           \
+         "ldr r0, [%1, #20] \n\t"                                 \
+         "ldr r1, [%1, #24] \n\t"                                 \
+         "ldr r2, [%1, #28] \n\t"                                 \
+         "ldr r3, [%1, #32] \n\t"                                 \
+         "ldr r4, [%1, #36] \n\t"                                 \
+         "push {r0, r1, r2, r3, r4} \n\t"                         \
+         "ldr r0, [%1, #4] \n\t"                                  \
+         "ldr r1, [%1, #8] \n\t"                                  \
+         "ldr r2, [%1, #12] \n\t"                                 \
+         "ldr r3, [%1, #16] \n\t"                                 \
+         "ldr r4, [%1] \n\t"  /* target->r4 */                    \
+         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4                   \
+         "add sp, sp, #20 \n\t"                                   \
+         "mov %0, r0"                                             \
+         : /*out*/   "=r" (_res)                                  \
+         : /*in*/    "0" (&_argvec[0])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_10W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6,  \
+                                  arg7,arg8,arg9,arg10)           \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[11];                         \
+      volatile unsigned long _res;                                \
+      _argvec[0] = (unsigned long)_orig.nraddr;                   \
+      _argvec[1] = (unsigned long)(arg1);                         \
+      _argvec[2] = (unsigned long)(arg2);                         \
+      _argvec[3] = (unsigned long)(arg3);                         \
+      _argvec[4] = (unsigned long)(arg4);                         \
+      _argvec[5] = (unsigned long)(arg5);                         \
+      _argvec[6] = (unsigned long)(arg6);                         \
+      _argvec[7] = (unsigned long)(arg7);                         \
+      _argvec[8] = (unsigned long)(arg8);                         \
+      _argvec[9] = (unsigned long)(arg9);                         \
+      _argvec[10] = (unsigned long)(arg10);                       \
+      __asm__ volatile(                                           \
+         "ldr r0, [%1, #40] \n\t"                                 \
+         "push {r0} \n\t"                                         \
+         "ldr r0, [%1, #20] \n\t"                                 \
+         "ldr r1, [%1, #24] \n\t"                                 \
+         "ldr r2, [%1, #28] \n\t"                                 \
+         "ldr r3, [%1, #32] \n\t"                                 \
+         "ldr r4, [%1, #36] \n\t"                                 \
+         "push {r0, r1, r2, r3, r4} \n\t"                         \
+         "ldr r0, [%1, #4] \n\t"                                  \
+         "ldr r1, [%1, #8] \n\t"                                  \
+         "ldr r2, [%1, #12] \n\t"                                 \
+         "ldr r3, [%1, #16] \n\t"                                 \
+         "ldr r4, [%1] \n\t"  /* target->r4 */                    \
+         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4                   \
+         "add sp, sp, #24 \n\t"                                   \
+         "mov %0, r0"                                             \
+         : /*out*/   "=r" (_res)                                  \
+         : /*in*/    "0" (&_argvec[0])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_11W(lval, orig, arg1,arg2,arg3,arg4,arg5,       \
+                                  arg6,arg7,arg8,arg9,arg10,      \
+                                  arg11)                          \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[12];                         \
+      volatile unsigned long _res;                                \
+      _argvec[0] = (unsigned long)_orig.nraddr;                   \
+      _argvec[1] = (unsigned long)(arg1);                         \
+      _argvec[2] = (unsigned long)(arg2);                         \
+      _argvec[3] = (unsigned long)(arg3);                         \
+      _argvec[4] = (unsigned long)(arg4);                         \
+      _argvec[5] = (unsigned long)(arg5);                         \
+      _argvec[6] = (unsigned long)(arg6);                         \
+      _argvec[7] = (unsigned long)(arg7);                         \
+      _argvec[8] = (unsigned long)(arg8);                         \
+      _argvec[9] = (unsigned long)(arg9);                         \
+      _argvec[10] = (unsigned long)(arg10);                       \
+      _argvec[11] = (unsigned long)(arg11);                       \
+      __asm__ volatile(                                           \
+         "ldr r0, [%1, #40] \n\t"                                 \
+         "ldr r1, [%1, #44] \n\t"                                 \
+         "push {r0, r1} \n\t"                                     \
+         "ldr r0, [%1, #20] \n\t"                                 \
+         "ldr r1, [%1, #24] \n\t"                                 \
+         "ldr r2, [%1, #28] \n\t"                                 \
+         "ldr r3, [%1, #32] \n\t"                                 \
+         "ldr r4, [%1, #36] \n\t"                                 \
+         "push {r0, r1, r2, r3, r4} \n\t"                         \
+         "ldr r0, [%1, #4] \n\t"                                  \
+         "ldr r1, [%1, #8] \n\t"                                  \
+         "ldr r2, [%1, #12] \n\t"                                 \
+         "ldr r3, [%1, #16] \n\t"                                 \
+         "ldr r4, [%1] \n\t"  /* target->r4 */                    \
+         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4                   \
+         "add sp, sp, #28 \n\t"                                   \
+         "mov %0, r0"                                             \
+         : /*out*/   "=r" (_res)                                  \
+         : /*in*/    "0" (&_argvec[0])                            \
+         : /*trash*/ "cc", "memory",__CALLER_SAVED_REGS           \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_12W(lval, orig, arg1,arg2,arg3,arg4,arg5,       \
+                                  arg6,arg7,arg8,arg9,arg10,      \
+                                  arg11,arg12)                    \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[13];                         \
+      volatile unsigned long _res;                                \
+      _argvec[0] = (unsigned long)_orig.nraddr;                   \
+      _argvec[1] = (unsigned long)(arg1);                         \
+      _argvec[2] = (unsigned long)(arg2);                         \
+      _argvec[3] = (unsigned long)(arg3);                         \
+      _argvec[4] = (unsigned long)(arg4);                         \
+      _argvec[5] = (unsigned long)(arg5);                         \
+      _argvec[6] = (unsigned long)(arg6);                         \
+      _argvec[7] = (unsigned long)(arg7);                         \
+      _argvec[8] = (unsigned long)(arg8);                         \
+      _argvec[9] = (unsigned long)(arg9);                         \
+      _argvec[10] = (unsigned long)(arg10);                       \
+      _argvec[11] = (unsigned long)(arg11);                       \
+      _argvec[12] = (unsigned long)(arg12);                       \
+      __asm__ volatile(                                           \
+         "ldr r0, [%1, #40] \n\t"                                 \
+         "ldr r1, [%1, #44] \n\t"                                 \
+         "ldr r2, [%1, #48] \n\t"                                 \
+         "push {r0, r1, r2} \n\t"                                 \
+         "ldr r0, [%1, #20] \n\t"                                 \
+         "ldr r1, [%1, #24] \n\t"                                 \
+         "ldr r2, [%1, #28] \n\t"                                 \
+         "ldr r3, [%1, #32] \n\t"                                 \
+         "ldr r4, [%1, #36] \n\t"                                 \
+         "push {r0, r1, r2, r3, r4} \n\t"                         \
+         "ldr r0, [%1, #4] \n\t"                                  \
+         "ldr r1, [%1, #8] \n\t"                                  \
+         "ldr r2, [%1, #12] \n\t"                                 \
+         "ldr r3, [%1, #16] \n\t"                                 \
+         "ldr r4, [%1] \n\t"  /* target->r4 */                    \
+         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4                   \
+         "add sp, sp, #32 \n\t"                                   \
+         "mov %0, r0"                                             \
+         : /*out*/   "=r" (_res)                                  \
+         : /*in*/    "0" (&_argvec[0])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#endif /* PLAT_arm_linux */
+
+/* ------------------------ ppc32-aix5 ------------------------- */
+
+#if defined(PLAT_ppc32_aix5)
+
+/* ARGREGS: r3 r4 r5 r6 r7 r8 r9 r10 (the rest on stack somewhere) */
+
+/* These regs are trashed by the hidden call. */
+#define __CALLER_SAVED_REGS                                       \
+   "lr", "ctr", "xer",                                            \
+   "cr0", "cr1", "cr2", "cr3", "cr4", "cr5", "cr6", "cr7",        \
+   "r0", "r2", "r3", "r4", "r5", "r6", "r7", "r8", "r9", "r10",   \
+   "r11", "r12", "r13"
+
+/* Expand the stack frame, copying enough info that unwinding
+   still works.  Trashes r3. */
+
+#define VG_EXPAND_FRAME_BY_trashes_r3(_n_fr)                      \
+         "addi 1,1,-" #_n_fr "\n\t"                               \
+         "lwz  3," #_n_fr "(1)\n\t"                               \
+         "stw  3,0(1)\n\t"
+
+#define VG_CONTRACT_FRAME_BY(_n_fr)                               \
+         "addi 1,1," #_n_fr "\n\t"
+
+/* These CALL_FN_ macros assume that on ppc32-aix5, sizeof(unsigned
+   long) == 4. */
+
+#define CALL_FN_W_v(lval, orig)                                   \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[3+0];                        \
+      volatile unsigned long _res;                                \
+      /* _argvec[0] holds current r2 across the call */           \
+      _argvec[1] = (unsigned long)_orig.r2;                       \
+      _argvec[2] = (unsigned long)_orig.nraddr;                   \
+      __asm__ volatile(                                           \
+         "mr 11,%1\n\t"                                           \
+         VG_EXPAND_FRAME_BY_trashes_r3(512)                       \
+         "stw  2,-8(11)\n\t"  /* save tocptr */                   \
+         "lwz  2,-4(11)\n\t"  /* use nraddr's tocptr */           \
+         "lwz 11, 0(11)\n\t"  /* target->r11 */                   \
+         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11                  \
+         "mr 11,%1\n\t"                                           \
+         "mr %0,3\n\t"                                            \
+         "lwz 2,-8(11)\n\t" /* restore tocptr */                  \
+         VG_CONTRACT_FRAME_BY(512)                                \
+         : /*out*/   "=r" (_res)                                  \
+         : /*in*/    "r" (&_argvec[2])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_W(lval, orig, arg1)                             \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[3+1];                        \
+      volatile unsigned long _res;                                \
+      /* _argvec[0] holds current r2 across the call */           \
+      _argvec[1]   = (unsigned long)_orig.r2;                     \
+      _argvec[2]   = (unsigned long)_orig.nraddr;                 \
+      _argvec[2+1] = (unsigned long)arg1;                         \
+      __asm__ volatile(                                           \
+         "mr 11,%1\n\t"                                           \
+         VG_EXPAND_FRAME_BY_trashes_r3(512)                       \
+         "stw  2,-8(11)\n\t"  /* save tocptr */                   \
+         "lwz  2,-4(11)\n\t"  /* use nraddr's tocptr */           \
+         "lwz  3, 4(11)\n\t"  /* arg1->r3 */                      \
+         "lwz 11, 0(11)\n\t"  /* target->r11 */                   \
+         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11                  \
+         "mr 11,%1\n\t"                                           \
+         "mr %0,3\n\t"                                            \
+         "lwz 2,-8(11)\n\t" /* restore tocptr */                  \
+         VG_CONTRACT_FRAME_BY(512)                                \
+         : /*out*/   "=r" (_res)                                  \
+         : /*in*/    "r" (&_argvec[2])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_WW(lval, orig, arg1,arg2)                       \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[3+2];                        \
+      volatile unsigned long _res;                                \
+      /* _argvec[0] holds current r2 across the call */           \
+      _argvec[1]   = (unsigned long)_orig.r2;                     \
+      _argvec[2]   = (unsigned long)_orig.nraddr;                 \
+      _argvec[2+1] = (unsigned long)arg1;                         \
+      _argvec[2+2] = (unsigned long)arg2;                         \
+      __asm__ volatile(                                           \
+         "mr 11,%1\n\t"                                           \
+         VG_EXPAND_FRAME_BY_trashes_r3(512)                       \
+         "stw  2,-8(11)\n\t"  /* save tocptr */                   \
+         "lwz  2,-4(11)\n\t"  /* use nraddr's tocptr */           \
+         "lwz  3, 4(11)\n\t"  /* arg1->r3 */                      \
+         "lwz  4, 8(11)\n\t"  /* arg2->r4 */                      \
+         "lwz 11, 0(11)\n\t"  /* target->r11 */                   \
+         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11                  \
+         "mr 11,%1\n\t"                                           \
+         "mr %0,3\n\t"                                            \
+         "lwz 2,-8(11)\n\t" /* restore tocptr */                  \
+         VG_CONTRACT_FRAME_BY(512)                                \
+         : /*out*/   "=r" (_res)                                  \
+         : /*in*/    "r" (&_argvec[2])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_WWW(lval, orig, arg1,arg2,arg3)                 \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[3+3];                        \
+      volatile unsigned long _res;                                \
+      /* _argvec[0] holds current r2 across the call */           \
+      _argvec[1]   = (unsigned long)_orig.r2;                     \
+      _argvec[2]   = (unsigned long)_orig.nraddr;                 \
+      _argvec[2+1] = (unsigned long)arg1;                         \
+      _argvec[2+2] = (unsigned long)arg2;                         \
+      _argvec[2+3] = (unsigned long)arg3;                         \
+      __asm__ volatile(                                           \
+         "mr 11,%1\n\t"                                           \
+         VG_EXPAND_FRAME_BY_trashes_r3(512)                       \
+         "stw  2,-8(11)\n\t"  /* save tocptr */                   \
+         "lwz  2,-4(11)\n\t"  /* use nraddr's tocptr */           \
+         "lwz  3, 4(11)\n\t"  /* arg1->r3 */                      \
+         "lwz  4, 8(11)\n\t"  /* arg2->r4 */                      \
+         "lwz  5, 12(11)\n\t" /* arg3->r5 */                      \
+         "lwz 11, 0(11)\n\t"  /* target->r11 */                   \
+         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11                  \
+         "mr 11,%1\n\t"                                           \
+         "mr %0,3\n\t"                                            \
+         "lwz 2,-8(11)\n\t" /* restore tocptr */                  \
+         VG_CONTRACT_FRAME_BY(512)                                \
+         : /*out*/   "=r" (_res)                                  \
+         : /*in*/    "r" (&_argvec[2])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_WWWW(lval, orig, arg1,arg2,arg3,arg4)           \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[3+4];                        \
+      volatile unsigned long _res;                                \
+      /* _argvec[0] holds current r2 across the call */           \
+      _argvec[1]   = (unsigned long)_orig.r2;                     \
+      _argvec[2]   = (unsigned long)_orig.nraddr;                 \
+      _argvec[2+1] = (unsigned long)arg1;                         \
+      _argvec[2+2] = (unsigned long)arg2;                         \
+      _argvec[2+3] = (unsigned long)arg3;                         \
+      _argvec[2+4] = (unsigned long)arg4;                         \
+      __asm__ volatile(                                           \
+         "mr 11,%1\n\t"                                           \
+         VG_EXPAND_FRAME_BY_trashes_r3(512)                       \
+         "stw  2,-8(11)\n\t"  /* save tocptr */                   \
+         "lwz  2,-4(11)\n\t"  /* use nraddr's tocptr */           \
+         "lwz  3, 4(11)\n\t"  /* arg1->r3 */                      \
+         "lwz  4, 8(11)\n\t"  /* arg2->r4 */                      \
+         "lwz  5, 12(11)\n\t" /* arg3->r5 */                      \
+         "lwz  6, 16(11)\n\t" /* arg4->r6 */                      \
+         "lwz 11, 0(11)\n\t"  /* target->r11 */                   \
+         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11                  \
+         "mr 11,%1\n\t"                                           \
+         "mr %0,3\n\t"                                            \
+         "lwz 2,-8(11)\n\t" /* restore tocptr */                  \
+         VG_CONTRACT_FRAME_BY(512)                                \
+         : /*out*/   "=r" (_res)                                  \
+         : /*in*/    "r" (&_argvec[2])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_5W(lval, orig, arg1,arg2,arg3,arg4,arg5)        \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[3+5];                        \
+      volatile unsigned long _res;                                \
+      /* _argvec[0] holds current r2 across the call */           \
+      _argvec[1]   = (unsigned long)_orig.r2;                     \
+      _argvec[2]   = (unsigned long)_orig.nraddr;                 \
+      _argvec[2+1] = (unsigned long)arg1;                         \
+      _argvec[2+2] = (unsigned long)arg2;                         \
+      _argvec[2+3] = (unsigned long)arg3;                         \
+      _argvec[2+4] = (unsigned long)arg4;                         \
+      _argvec[2+5] = (unsigned long)arg5;                         \
+      __asm__ volatile(                                           \
+         "mr 11,%1\n\t"                                           \
+         VG_EXPAND_FRAME_BY_trashes_r3(512)                       \
+         "stw  2,-8(11)\n\t"  /* save tocptr */                   \
+         "lwz  2,-4(11)\n\t"  /* use nraddr's tocptr */           \
+         "lwz  3, 4(11)\n\t"  /* arg1->r3 */                      \
+         "lwz  4, 8(11)\n\t" /* arg2->r4 */                       \
+         "lwz  5, 12(11)\n\t" /* arg3->r5 */                      \
+         "lwz  6, 16(11)\n\t" /* arg4->r6 */                      \
+         "lwz  7, 20(11)\n\t" /* arg5->r7 */                      \
+         "lwz 11, 0(11)\n\t"  /* target->r11 */                   \
+         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11                  \
+         "mr 11,%1\n\t"                                           \
+         "mr %0,3\n\t"                                            \
+         "lwz 2,-8(11)\n\t" /* restore tocptr */                  \
+         VG_CONTRACT_FRAME_BY(512)                                \
+         : /*out*/   "=r" (_res)                                  \
+         : /*in*/    "r" (&_argvec[2])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_6W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6)   \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[3+6];                        \
+      volatile unsigned long _res;                                \
+      /* _argvec[0] holds current r2 across the call */           \
+      _argvec[1]   = (unsigned long)_orig.r2;                     \
+      _argvec[2]   = (unsigned long)_orig.nraddr;                 \
+      _argvec[2+1] = (unsigned long)arg1;                         \
+      _argvec[2+2] = (unsigned long)arg2;                         \
+      _argvec[2+3] = (unsigned long)arg3;                         \
+      _argvec[2+4] = (unsigned long)arg4;                         \
+      _argvec[2+5] = (unsigned long)arg5;                         \
+      _argvec[2+6] = (unsigned long)arg6;                         \
+      __asm__ volatile(                                           \
+         "mr 11,%1\n\t"                                           \
+         VG_EXPAND_FRAME_BY_trashes_r3(512)                       \
+         "stw  2,-8(11)\n\t"  /* save tocptr */                   \
+         "lwz  2,-4(11)\n\t"  /* use nraddr's tocptr */           \
+         "lwz  3, 4(11)\n\t"  /* arg1->r3 */                      \
+         "lwz  4, 8(11)\n\t"  /* arg2->r4 */                      \
+         "lwz  5, 12(11)\n\t" /* arg3->r5 */                      \
+         "lwz  6, 16(11)\n\t" /* arg4->r6 */                      \
+         "lwz  7, 20(11)\n\t" /* arg5->r7 */                      \
+         "lwz  8, 24(11)\n\t" /* arg6->r8 */                      \
+         "lwz 11, 0(11)\n\t"  /* target->r11 */                   \
+         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11                  \
+         "mr 11,%1\n\t"                                           \
+         "mr %0,3\n\t"                                            \
+         "lwz 2,-8(11)\n\t" /* restore tocptr */                  \
+         VG_CONTRACT_FRAME_BY(512)                                \
+         : /*out*/   "=r" (_res)                                  \
+         : /*in*/    "r" (&_argvec[2])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_7W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6,   \
+                                 arg7)                            \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[3+7];                        \
+      volatile unsigned long _res;                                \
+      /* _argvec[0] holds current r2 across the call */           \
+      _argvec[1]   = (unsigned long)_orig.r2;                     \
+      _argvec[2]   = (unsigned long)_orig.nraddr;                 \
+      _argvec[2+1] = (unsigned long)arg1;                         \
+      _argvec[2+2] = (unsigned long)arg2;                         \
+      _argvec[2+3] = (unsigned long)arg3;                         \
+      _argvec[2+4] = (unsigned long)arg4;                         \
+      _argvec[2+5] = (unsigned long)arg5;                         \
+      _argvec[2+6] = (unsigned long)arg6;                         \
+      _argvec[2+7] = (unsigned long)arg7;                         \
+      __asm__ volatile(                                           \
+         "mr 11,%1\n\t"                                           \
+         VG_EXPAND_FRAME_BY_trashes_r3(512)                       \
+         "stw  2,-8(11)\n\t"  /* save tocptr */                   \
+         "lwz  2,-4(11)\n\t"  /* use nraddr's tocptr */           \
+         "lwz  3, 4(11)\n\t"  /* arg1->r3 */                      \
+         "lwz  4, 8(11)\n\t"  /* arg2->r4 */                      \
+         "lwz  5, 12(11)\n\t" /* arg3->r5 */                      \
+         "lwz  6, 16(11)\n\t" /* arg4->r6 */                      \
+         "lwz  7, 20(11)\n\t" /* arg5->r7 */                      \
+         "lwz  8, 24(11)\n\t" /* arg6->r8 */                      \
+         "lwz  9, 28(11)\n\t" /* arg7->r9 */                      \
+         "lwz 11, 0(11)\n\t"  /* target->r11 */                   \
+         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11                  \
+         "mr 11,%1\n\t"                                           \
+         "mr %0,3\n\t"                                            \
+         "lwz 2,-8(11)\n\t" /* restore tocptr */                  \
+         VG_CONTRACT_FRAME_BY(512)                                \
+         : /*out*/   "=r" (_res)                                  \
+         : /*in*/    "r" (&_argvec[2])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_8W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6,   \
+                                 arg7,arg8)                       \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[3+8];                        \
+      volatile unsigned long _res;                                \
+      /* _argvec[0] holds current r2 across the call */           \
+      _argvec[1]   = (unsigned long)_orig.r2;                     \
+      _argvec[2]   = (unsigned long)_orig.nraddr;                 \
+      _argvec[2+1] = (unsigned long)arg1;                         \
+      _argvec[2+2] = (unsigned long)arg2;                         \
+      _argvec[2+3] = (unsigned long)arg3;                         \
+      _argvec[2+4] = (unsigned long)arg4;                         \
+      _argvec[2+5] = (unsigned long)arg5;                         \
+      _argvec[2+6] = (unsigned long)arg6;                         \
+      _argvec[2+7] = (unsigned long)arg7;                         \
+      _argvec[2+8] = (unsigned long)arg8;                         \
+      __asm__ volatile(                                           \
+         "mr 11,%1\n\t"                                           \
+         VG_EXPAND_FRAME_BY_trashes_r3(512)                       \
+         "stw  2,-8(11)\n\t"  /* save tocptr */                   \
+         "lwz  2,-4(11)\n\t"  /* use nraddr's tocptr */           \
+         "lwz  3, 4(11)\n\t"  /* arg1->r3 */                      \
+         "lwz  4, 8(11)\n\t"  /* arg2->r4 */                      \
+         "lwz  5, 12(11)\n\t" /* arg3->r5 */                      \
+         "lwz  6, 16(11)\n\t" /* arg4->r6 */                      \
+         "lwz  7, 20(11)\n\t" /* arg5->r7 */                      \
+         "lwz  8, 24(11)\n\t" /* arg6->r8 */                      \
+         "lwz  9, 28(11)\n\t" /* arg7->r9 */                      \
+         "lwz 10, 32(11)\n\t" /* arg8->r10 */                     \
+         "lwz 11, 0(11)\n\t"  /* target->r11 */                   \
+         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11                  \
+         "mr 11,%1\n\t"                                           \
+         "mr %0,3\n\t"                                            \
+         "lwz 2,-8(11)\n\t" /* restore tocptr */                  \
+         VG_CONTRACT_FRAME_BY(512)                                \
+         : /*out*/   "=r" (_res)                                  \
+         : /*in*/    "r" (&_argvec[2])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_9W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6,   \
+                                 arg7,arg8,arg9)                  \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[3+9];                        \
+      volatile unsigned long _res;                                \
+      /* _argvec[0] holds current r2 across the call */           \
+      _argvec[1]   = (unsigned long)_orig.r2;                     \
+      _argvec[2]   = (unsigned long)_orig.nraddr;                 \
+      _argvec[2+1] = (unsigned long)arg1;                         \
+      _argvec[2+2] = (unsigned long)arg2;                         \
+      _argvec[2+3] = (unsigned long)arg3;                         \
+      _argvec[2+4] = (unsigned long)arg4;                         \
+      _argvec[2+5] = (unsigned long)arg5;                         \
+      _argvec[2+6] = (unsigned long)arg6;                         \
+      _argvec[2+7] = (unsigned long)arg7;                         \
+      _argvec[2+8] = (unsigned long)arg8;                         \
+      _argvec[2+9] = (unsigned long)arg9;                         \
+      __asm__ volatile(                                           \
+         "mr 11,%1\n\t"                                           \
+         VG_EXPAND_FRAME_BY_trashes_r3(512)                       \
+         "stw  2,-8(11)\n\t"  /* save tocptr */                   \
+         "lwz  2,-4(11)\n\t"  /* use nraddr's tocptr */           \
+         VG_EXPAND_FRAME_BY_trashes_r3(64)                        \
+         /* arg9 */                                               \
+         "lwz 3,36(11)\n\t"                                       \
+         "stw 3,56(1)\n\t"                                        \
+         /* args1-8 */                                            \
+         "lwz  3, 4(11)\n\t"  /* arg1->r3 */                      \
+         "lwz  4, 8(11)\n\t"  /* arg2->r4 */                      \
+         "lwz  5, 12(11)\n\t" /* arg3->r5 */                      \
+         "lwz  6, 16(11)\n\t" /* arg4->r6 */                      \
+         "lwz  7, 20(11)\n\t" /* arg5->r7 */                      \
+         "lwz  8, 24(11)\n\t" /* arg6->r8 */                      \
+         "lwz  9, 28(11)\n\t" /* arg7->r9 */                      \
+         "lwz 10, 32(11)\n\t" /* arg8->r10 */                     \
+         "lwz 11, 0(11)\n\t"  /* target->r11 */                   \
+         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11                  \
+         "mr 11,%1\n\t"                                           \
+         "mr %0,3\n\t"                                            \
+         "lwz 2,-8(11)\n\t" /* restore tocptr */                  \
+         VG_CONTRACT_FRAME_BY(64)                                 \
+         VG_CONTRACT_FRAME_BY(512)                                \
+         : /*out*/   "=r" (_res)                                  \
+         : /*in*/    "r" (&_argvec[2])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_10W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6,  \
+                                  arg7,arg8,arg9,arg10)           \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[3+10];                       \
+      volatile unsigned long _res;                                \
+      /* _argvec[0] holds current r2 across the call */           \
+      _argvec[1]   = (unsigned long)_orig.r2;                     \
+      _argvec[2]   = (unsigned long)_orig.nraddr;                 \
+      _argvec[2+1] = (unsigned long)arg1;                         \
+      _argvec[2+2] = (unsigned long)arg2;                         \
+      _argvec[2+3] = (unsigned long)arg3;                         \
+      _argvec[2+4] = (unsigned long)arg4;                         \
+      _argvec[2+5] = (unsigned long)arg5;                         \
+      _argvec[2+6] = (unsigned long)arg6;                         \
+      _argvec[2+7] = (unsigned long)arg7;                         \
+      _argvec[2+8] = (unsigned long)arg8;                         \
+      _argvec[2+9] = (unsigned long)arg9;                         \
+      _argvec[2+10] = (unsigned long)arg10;                       \
+      __asm__ volatile(                                           \
+         "mr 11,%1\n\t"                                           \
+         VG_EXPAND_FRAME_BY_trashes_r3(512)                       \
+         "stw  2,-8(11)\n\t"  /* save tocptr */                   \
+         "lwz  2,-4(11)\n\t"  /* use nraddr's tocptr */           \
+         VG_EXPAND_FRAME_BY_trashes_r3(64)                        \
+         /* arg10 */                                              \
+         "lwz 3,40(11)\n\t"                                       \
+         "stw 3,60(1)\n\t"                                        \
+         /* arg9 */                                               \
+         "lwz 3,36(11)\n\t"                                       \
+         "stw 3,56(1)\n\t"                                        \
+         /* args1-8 */                                            \
+         "lwz  3, 4(11)\n\t"  /* arg1->r3 */                      \
+         "lwz  4, 8(11)\n\t"  /* arg2->r4 */                      \
+         "lwz  5, 12(11)\n\t" /* arg3->r5 */                      \
+         "lwz  6, 16(11)\n\t" /* arg4->r6 */                      \
+         "lwz  7, 20(11)\n\t" /* arg5->r7 */                      \
+         "lwz  8, 24(11)\n\t" /* arg6->r8 */                      \
+         "lwz  9, 28(11)\n\t" /* arg7->r9 */                      \
+         "lwz 10, 32(11)\n\t" /* arg8->r10 */                     \
+         "lwz 11, 0(11)\n\t"  /* target->r11 */                   \
+         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11                  \
+         "mr 11,%1\n\t"                                           \
+         "mr %0,3\n\t"                                            \
+         "lwz 2,-8(11)\n\t" /* restore tocptr */                  \
+         VG_CONTRACT_FRAME_BY(64)                                 \
+         VG_CONTRACT_FRAME_BY(512)                                \
+         : /*out*/   "=r" (_res)                                  \
+         : /*in*/    "r" (&_argvec[2])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_11W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6,  \
+                                  arg7,arg8,arg9,arg10,arg11)     \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[3+11];                       \
+      volatile unsigned long _res;                                \
+      /* _argvec[0] holds current r2 across the call */           \
+      _argvec[1]   = (unsigned long)_orig.r2;                     \
+      _argvec[2]   = (unsigned long)_orig.nraddr;                 \
+      _argvec[2+1] = (unsigned long)arg1;                         \
+      _argvec[2+2] = (unsigned long)arg2;                         \
+      _argvec[2+3] = (unsigned long)arg3;                         \
+      _argvec[2+4] = (unsigned long)arg4;                         \
+      _argvec[2+5] = (unsigned long)arg5;                         \
+      _argvec[2+6] = (unsigned long)arg6;                         \
+      _argvec[2+7] = (unsigned long)arg7;                         \
+      _argvec[2+8] = (unsigned long)arg8;                         \
+      _argvec[2+9] = (unsigned long)arg9;                         \
+      _argvec[2+10] = (unsigned long)arg10;                       \
+      _argvec[2+11] = (unsigned long)arg11;                       \
+      __asm__ volatile(                                           \
+         "mr 11,%1\n\t"                                           \
+         VG_EXPAND_FRAME_BY_trashes_r3(512)                       \
+         "stw  2,-8(11)\n\t"  /* save tocptr */                   \
+         "lwz  2,-4(11)\n\t"  /* use nraddr's tocptr */           \
+         VG_EXPAND_FRAME_BY_trashes_r3(72)                        \
+         /* arg11 */                                              \
+         "lwz 3,44(11)\n\t"                                       \
+         "stw 3,64(1)\n\t"                                        \
+         /* arg10 */                                              \
+         "lwz 3,40(11)\n\t"                                       \
+         "stw 3,60(1)\n\t"                                        \
+         /* arg9 */                                               \
+         "lwz 3,36(11)\n\t"                                       \
+         "stw 3,56(1)\n\t"                                        \
+         /* args1-8 */                                            \
+         "lwz  3, 4(11)\n\t"  /* arg1->r3 */                      \
+         "lwz  4, 8(11)\n\t"  /* arg2->r4 */                      \
+         "lwz  5, 12(11)\n\t" /* arg3->r5 */                      \
+         "lwz  6, 16(11)\n\t" /* arg4->r6 */                      \
+         "lwz  7, 20(11)\n\t" /* arg5->r7 */                      \
+         "lwz  8, 24(11)\n\t" /* arg6->r8 */                      \
+         "lwz  9, 28(11)\n\t" /* arg7->r9 */                      \
+         "lwz 10, 32(11)\n\t" /* arg8->r10 */                     \
+         "lwz 11, 0(11)\n\t"  /* target->r11 */                   \
+         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11                  \
+         "mr 11,%1\n\t"                                           \
+         "mr %0,3\n\t"                                            \
+         "lwz 2,-8(11)\n\t" /* restore tocptr */                  \
+         VG_CONTRACT_FRAME_BY(72)                                 \
+         VG_CONTRACT_FRAME_BY(512)                                \
+         : /*out*/   "=r" (_res)                                  \
+         : /*in*/    "r" (&_argvec[2])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_12W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6,  \
+                                arg7,arg8,arg9,arg10,arg11,arg12) \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[3+12];                       \
+      volatile unsigned long _res;                                \
+      /* _argvec[0] holds current r2 across the call */           \
+      _argvec[1]   = (unsigned long)_orig.r2;                     \
+      _argvec[2]   = (unsigned long)_orig.nraddr;                 \
+      _argvec[2+1] = (unsigned long)arg1;                         \
+      _argvec[2+2] = (unsigned long)arg2;                         \
+      _argvec[2+3] = (unsigned long)arg3;                         \
+      _argvec[2+4] = (unsigned long)arg4;                         \
+      _argvec[2+5] = (unsigned long)arg5;                         \
+      _argvec[2+6] = (unsigned long)arg6;                         \
+      _argvec[2+7] = (unsigned long)arg7;                         \
+      _argvec[2+8] = (unsigned long)arg8;                         \
+      _argvec[2+9] = (unsigned long)arg9;                         \
+      _argvec[2+10] = (unsigned long)arg10;                       \
+      _argvec[2+11] = (unsigned long)arg11;                       \
+      _argvec[2+12] = (unsigned long)arg12;                       \
+      __asm__ volatile(                                           \
+         "mr 11,%1\n\t"                                           \
+         VG_EXPAND_FRAME_BY_trashes_r3(512)                       \
+         "stw  2,-8(11)\n\t"  /* save tocptr */                   \
+         "lwz  2,-4(11)\n\t"  /* use nraddr's tocptr */           \
+         VG_EXPAND_FRAME_BY_trashes_r3(72)                        \
+         /* arg12 */                                              \
+         "lwz 3,48(11)\n\t"                                       \
+         "stw 3,68(1)\n\t"                                        \
+         /* arg11 */                                              \
+         "lwz 3,44(11)\n\t"                                       \
+         "stw 3,64(1)\n\t"                                        \
+         /* arg10 */                                              \
+         "lwz 3,40(11)\n\t"                                       \
+         "stw 3,60(1)\n\t"                                        \
+         /* arg9 */                                               \
+         "lwz 3,36(11)\n\t"                                       \
+         "stw 3,56(1)\n\t"                                        \
+         /* args1-8 */                                            \
+         "lwz  3, 4(11)\n\t"  /* arg1->r3 */                      \
+         "lwz  4, 8(11)\n\t"  /* arg2->r4 */                      \
+         "lwz  5, 12(11)\n\t" /* arg3->r5 */                      \
+         "lwz  6, 16(11)\n\t" /* arg4->r6 */                      \
+         "lwz  7, 20(11)\n\t" /* arg5->r7 */                      \
+         "lwz  8, 24(11)\n\t" /* arg6->r8 */                      \
+         "lwz  9, 28(11)\n\t" /* arg7->r9 */                      \
+         "lwz 10, 32(11)\n\t" /* arg8->r10 */                     \
+         "lwz 11, 0(11)\n\t"  /* target->r11 */                   \
+         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11                  \
+         "mr 11,%1\n\t"                                           \
+         "mr %0,3\n\t"                                            \
+         "lwz 2,-8(11)\n\t" /* restore tocptr */                  \
+         VG_CONTRACT_FRAME_BY(72)                                 \
+         VG_CONTRACT_FRAME_BY(512)                                \
+         : /*out*/   "=r" (_res)                                  \
+         : /*in*/    "r" (&_argvec[2])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#endif /* PLAT_ppc32_aix5 */
+
+/* ------------------------ ppc64-aix5 ------------------------- */
+
+#if defined(PLAT_ppc64_aix5)
+
+/* ARGREGS: r3 r4 r5 r6 r7 r8 r9 r10 (the rest on stack somewhere) */
+
+/* These regs are trashed by the hidden call. */
+#define __CALLER_SAVED_REGS                                       \
+   "lr", "ctr", "xer",                                            \
+   "cr0", "cr1", "cr2", "cr3", "cr4", "cr5", "cr6", "cr7",        \
+   "r0", "r2", "r3", "r4", "r5", "r6", "r7", "r8", "r9", "r10",   \
+   "r11", "r12", "r13"
+
+/* Expand the stack frame, copying enough info that unwinding
+   still works.  Trashes r3. */
+
+#define VG_EXPAND_FRAME_BY_trashes_r3(_n_fr)                      \
+         "addi 1,1,-" #_n_fr "\n\t"                               \
+         "ld   3," #_n_fr "(1)\n\t"                               \
+         "std  3,0(1)\n\t"
+
+#define VG_CONTRACT_FRAME_BY(_n_fr)                               \
+         "addi 1,1," #_n_fr "\n\t"
+
+/* These CALL_FN_ macros assume that on ppc64-aix5, sizeof(unsigned
+   long) == 8. */
+
+#define CALL_FN_W_v(lval, orig)                                   \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[3+0];                        \
+      volatile unsigned long _res;                                \
+      /* _argvec[0] holds current r2 across the call */           \
+      _argvec[1] = (unsigned long)_orig.r2;                       \
+      _argvec[2] = (unsigned long)_orig.nraddr;                   \
+      __asm__ volatile(                                           \
+         "mr 11,%1\n\t"                                           \
+         VG_EXPAND_FRAME_BY_trashes_r3(512)                       \
+         "std  2,-16(11)\n\t" /* save tocptr */                   \
+         "ld   2,-8(11)\n\t"  /* use nraddr's tocptr */           \
+         "ld  11, 0(11)\n\t"  /* target->r11 */                   \
+         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11                  \
+         "mr 11,%1\n\t"                                           \
+         "mr %0,3\n\t"                                            \
+         "ld 2,-16(11)\n\t" /* restore tocptr */                  \
+         VG_CONTRACT_FRAME_BY(512)                                \
+         : /*out*/   "=r" (_res)                                  \
+         : /*in*/    "r" (&_argvec[2])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_W(lval, orig, arg1)                             \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[3+1];                        \
+      volatile unsigned long _res;                                \
+      /* _argvec[0] holds current r2 across the call */           \
+      _argvec[1]   = (unsigned long)_orig.r2;                     \
+      _argvec[2]   = (unsigned long)_orig.nraddr;                 \
+      _argvec[2+1] = (unsigned long)arg1;                         \
+      __asm__ volatile(                                           \
+         "mr 11,%1\n\t"                                           \
+         VG_EXPAND_FRAME_BY_trashes_r3(512)                       \
+         "std  2,-16(11)\n\t" /* save tocptr */                   \
+         "ld   2,-8(11)\n\t"  /* use nraddr's tocptr */           \
+         "ld   3, 8(11)\n\t"  /* arg1->r3 */                      \
+         "ld  11, 0(11)\n\t"  /* target->r11 */                   \
+         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11                  \
+         "mr 11,%1\n\t"                                           \
+         "mr %0,3\n\t"                                            \
+         "ld 2,-16(11)\n\t" /* restore tocptr */                  \
+         VG_CONTRACT_FRAME_BY(512)                                \
+         : /*out*/   "=r" (_res)                                  \
+         : /*in*/    "r" (&_argvec[2])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_WW(lval, orig, arg1,arg2)                       \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[3+2];                        \
+      volatile unsigned long _res;                                \
+      /* _argvec[0] holds current r2 across the call */           \
+      _argvec[1]   = (unsigned long)_orig.r2;                     \
+      _argvec[2]   = (unsigned long)_orig.nraddr;                 \
+      _argvec[2+1] = (unsigned long)arg1;                         \
+      _argvec[2+2] = (unsigned long)arg2;                         \
+      __asm__ volatile(                                           \
+         "mr 11,%1\n\t"                                           \
+         VG_EXPAND_FRAME_BY_trashes_r3(512)                       \
+         "std  2,-16(11)\n\t" /* save tocptr */                   \
+         "ld   2,-8(11)\n\t"  /* use nraddr's tocptr */           \
+         "ld   3, 8(11)\n\t"  /* arg1->r3 */                      \
+         "ld   4, 16(11)\n\t" /* arg2->r4 */                      \
+         "ld  11, 0(11)\n\t"  /* target->r11 */                   \
+         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11                  \
+         "mr 11,%1\n\t"                                           \
+         "mr %0,3\n\t"                                            \
+         "ld  2,-16(11)\n\t" /* restore tocptr */                 \
+         VG_CONTRACT_FRAME_BY(512)                                \
+         : /*out*/   "=r" (_res)                                  \
+         : /*in*/    "r" (&_argvec[2])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_WWW(lval, orig, arg1,arg2,arg3)                 \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[3+3];                        \
+      volatile unsigned long _res;                                \
+      /* _argvec[0] holds current r2 across the call */           \
+      _argvec[1]   = (unsigned long)_orig.r2;                     \
+      _argvec[2]   = (unsigned long)_orig.nraddr;                 \
+      _argvec[2+1] = (unsigned long)arg1;                         \
+      _argvec[2+2] = (unsigned long)arg2;                         \
+      _argvec[2+3] = (unsigned long)arg3;                         \
+      __asm__ volatile(                                           \
+         "mr 11,%1\n\t"                                           \
+         VG_EXPAND_FRAME_BY_trashes_r3(512)                       \
+         "std  2,-16(11)\n\t" /* save tocptr */                   \
+         "ld   2,-8(11)\n\t"  /* use nraddr's tocptr */           \
+         "ld   3, 8(11)\n\t"  /* arg1->r3 */                      \
+         "ld   4, 16(11)\n\t" /* arg2->r4 */                      \
+         "ld   5, 24(11)\n\t" /* arg3->r5 */                      \
+         "ld  11, 0(11)\n\t"  /* target->r11 */                   \
+         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11                  \
+         "mr 11,%1\n\t"                                           \
+         "mr %0,3\n\t"                                            \
+         "ld  2,-16(11)\n\t" /* restore tocptr */                 \
+         VG_CONTRACT_FRAME_BY(512)                                \
+         : /*out*/   "=r" (_res)                                  \
+         : /*in*/    "r" (&_argvec[2])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_WWWW(lval, orig, arg1,arg2,arg3,arg4)           \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[3+4];                        \
+      volatile unsigned long _res;                                \
+      /* _argvec[0] holds current r2 across the call */           \
+      _argvec[1]   = (unsigned long)_orig.r2;                     \
+      _argvec[2]   = (unsigned long)_orig.nraddr;                 \
+      _argvec[2+1] = (unsigned long)arg1;                         \
+      _argvec[2+2] = (unsigned long)arg2;                         \
+      _argvec[2+3] = (unsigned long)arg3;                         \
+      _argvec[2+4] = (unsigned long)arg4;                         \
+      __asm__ volatile(                                           \
+         "mr 11,%1\n\t"                                           \
+         VG_EXPAND_FRAME_BY_trashes_r3(512)                       \
+         "std  2,-16(11)\n\t" /* save tocptr */                   \
+         "ld   2,-8(11)\n\t"  /* use nraddr's tocptr */           \
+         "ld   3, 8(11)\n\t"  /* arg1->r3 */                      \
+         "ld   4, 16(11)\n\t" /* arg2->r4 */                      \
+         "ld   5, 24(11)\n\t" /* arg3->r5 */                      \
+         "ld   6, 32(11)\n\t" /* arg4->r6 */                      \
+         "ld  11, 0(11)\n\t"  /* target->r11 */                   \
+         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11                  \
+         "mr 11,%1\n\t"                                           \
+         "mr %0,3\n\t"                                            \
+         "ld  2,-16(11)\n\t" /* restore tocptr */                 \
+         VG_CONTRACT_FRAME_BY(512)                                \
+         : /*out*/   "=r" (_res)                                  \
+         : /*in*/    "r" (&_argvec[2])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_5W(lval, orig, arg1,arg2,arg3,arg4,arg5)        \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[3+5];                        \
+      volatile unsigned long _res;                                \
+      /* _argvec[0] holds current r2 across the call */           \
+      _argvec[1]   = (unsigned long)_orig.r2;                     \
+      _argvec[2]   = (unsigned long)_orig.nraddr;                 \
+      _argvec[2+1] = (unsigned long)arg1;                         \
+      _argvec[2+2] = (unsigned long)arg2;                         \
+      _argvec[2+3] = (unsigned long)arg3;                         \
+      _argvec[2+4] = (unsigned long)arg4;                         \
+      _argvec[2+5] = (unsigned long)arg5;                         \
+      __asm__ volatile(                                           \
+         "mr 11,%1\n\t"                                           \
+         VG_EXPAND_FRAME_BY_trashes_r3(512)                       \
+         "std  2,-16(11)\n\t" /* save tocptr */                   \
+         "ld   2,-8(11)\n\t"  /* use nraddr's tocptr */           \
+         "ld   3, 8(11)\n\t"  /* arg1->r3 */                      \
+         "ld   4, 16(11)\n\t" /* arg2->r4 */                      \
+         "ld   5, 24(11)\n\t" /* arg3->r5 */                      \
+         "ld   6, 32(11)\n\t" /* arg4->r6 */                      \
+         "ld   7, 40(11)\n\t" /* arg5->r7 */                      \
+         "ld  11, 0(11)\n\t"  /* target->r11 */                   \
+         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11                  \
+         "mr 11,%1\n\t"                                           \
+         "mr %0,3\n\t"                                            \
+         "ld  2,-16(11)\n\t" /* restore tocptr */                 \
+         VG_CONTRACT_FRAME_BY(512)                                \
+         : /*out*/   "=r" (_res)                                  \
+         : /*in*/    "r" (&_argvec[2])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_6W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6)   \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[3+6];                        \
+      volatile unsigned long _res;                                \
+      /* _argvec[0] holds current r2 across the call */           \
+      _argvec[1]   = (unsigned long)_orig.r2;                     \
+      _argvec[2]   = (unsigned long)_orig.nraddr;                 \
+      _argvec[2+1] = (unsigned long)arg1;                         \
+      _argvec[2+2] = (unsigned long)arg2;                         \
+      _argvec[2+3] = (unsigned long)arg3;                         \
+      _argvec[2+4] = (unsigned long)arg4;                         \
+      _argvec[2+5] = (unsigned long)arg5;                         \
+      _argvec[2+6] = (unsigned long)arg6;                         \
+      __asm__ volatile(                                           \
+         "mr 11,%1\n\t"                                           \
+         VG_EXPAND_FRAME_BY_trashes_r3(512)                       \
+         "std  2,-16(11)\n\t" /* save tocptr */                   \
+         "ld   2,-8(11)\n\t"  /* use nraddr's tocptr */           \
+         "ld   3, 8(11)\n\t"  /* arg1->r3 */                      \
+         "ld   4, 16(11)\n\t" /* arg2->r4 */                      \
+         "ld   5, 24(11)\n\t" /* arg3->r5 */                      \
+         "ld   6, 32(11)\n\t" /* arg4->r6 */                      \
+         "ld   7, 40(11)\n\t" /* arg5->r7 */                      \
+         "ld   8, 48(11)\n\t" /* arg6->r8 */                      \
+         "ld  11, 0(11)\n\t"  /* target->r11 */                   \
+         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11                  \
+         "mr 11,%1\n\t"                                           \
+         "mr %0,3\n\t"                                            \
+         "ld  2,-16(11)\n\t" /* restore tocptr */                 \
+         VG_CONTRACT_FRAME_BY(512)                                \
+         : /*out*/   "=r" (_res)                                  \
+         : /*in*/    "r" (&_argvec[2])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_7W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6,   \
+                                 arg7)                            \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[3+7];                        \
+      volatile unsigned long _res;                                \
+      /* _argvec[0] holds current r2 across the call */           \
+      _argvec[1]   = (unsigned long)_orig.r2;                     \
+      _argvec[2]   = (unsigned long)_orig.nraddr;                 \
+      _argvec[2+1] = (unsigned long)arg1;                         \
+      _argvec[2+2] = (unsigned long)arg2;                         \
+      _argvec[2+3] = (unsigned long)arg3;                         \
+      _argvec[2+4] = (unsigned long)arg4;                         \
+      _argvec[2+5] = (unsigned long)arg5;                         \
+      _argvec[2+6] = (unsigned long)arg6;                         \
+      _argvec[2+7] = (unsigned long)arg7;                         \
+      __asm__ volatile(                                           \
+         "mr 11,%1\n\t"                                           \
+         VG_EXPAND_FRAME_BY_trashes_r3(512)                       \
+         "std  2,-16(11)\n\t" /* save tocptr */                   \
+         "ld   2,-8(11)\n\t"  /* use nraddr's tocptr */           \
+         "ld   3, 8(11)\n\t"  /* arg1->r3 */                      \
+         "ld   4, 16(11)\n\t" /* arg2->r4 */                      \
+         "ld   5, 24(11)\n\t" /* arg3->r5 */                      \
+         "ld   6, 32(11)\n\t" /* arg4->r6 */                      \
+         "ld   7, 40(11)\n\t" /* arg5->r7 */                      \
+         "ld   8, 48(11)\n\t" /* arg6->r8 */                      \
+         "ld   9, 56(11)\n\t" /* arg7->r9 */                      \
+         "ld  11, 0(11)\n\t"  /* target->r11 */                   \
+         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11                  \
+         "mr 11,%1\n\t"                                           \
+         "mr %0,3\n\t"                                            \
+         "ld  2,-16(11)\n\t" /* restore tocptr */                 \
+         VG_CONTRACT_FRAME_BY(512)                                \
+         : /*out*/   "=r" (_res)                                  \
+         : /*in*/    "r" (&_argvec[2])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_8W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6,   \
+                                 arg7,arg8)                       \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[3+8];                        \
+      volatile unsigned long _res;                                \
+      /* _argvec[0] holds current r2 across the call */           \
+      _argvec[1]   = (unsigned long)_orig.r2;                     \
+      _argvec[2]   = (unsigned long)_orig.nraddr;                 \
+      _argvec[2+1] = (unsigned long)arg1;                         \
+      _argvec[2+2] = (unsigned long)arg2;                         \
+      _argvec[2+3] = (unsigned long)arg3;                         \
+      _argvec[2+4] = (unsigned long)arg4;                         \
+      _argvec[2+5] = (unsigned long)arg5;                         \
+      _argvec[2+6] = (unsigned long)arg6;                         \
+      _argvec[2+7] = (unsigned long)arg7;                         \
+      _argvec[2+8] = (unsigned long)arg8;                         \
+      __asm__ volatile(                                           \
+         "mr 11,%1\n\t"                                           \
+         VG_EXPAND_FRAME_BY_trashes_r3(512)                       \
+         "std  2,-16(11)\n\t" /* save tocptr */                   \
+         "ld   2,-8(11)\n\t"  /* use nraddr's tocptr */           \
+         "ld   3, 8(11)\n\t"  /* arg1->r3 */                      \
+         "ld   4, 16(11)\n\t" /* arg2->r4 */                      \
+         "ld   5, 24(11)\n\t" /* arg3->r5 */                      \
+         "ld   6, 32(11)\n\t" /* arg4->r6 */                      \
+         "ld   7, 40(11)\n\t" /* arg5->r7 */                      \
+         "ld   8, 48(11)\n\t" /* arg6->r8 */                      \
+         "ld   9, 56(11)\n\t" /* arg7->r9 */                      \
+         "ld  10, 64(11)\n\t" /* arg8->r10 */                     \
+         "ld  11, 0(11)\n\t"  /* target->r11 */                   \
+         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11                  \
+         "mr 11,%1\n\t"                                           \
+         "mr %0,3\n\t"                                            \
+         "ld  2,-16(11)\n\t" /* restore tocptr */                 \
+         VG_CONTRACT_FRAME_BY(512)                                \
+         : /*out*/   "=r" (_res)                                  \
+         : /*in*/    "r" (&_argvec[2])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_9W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6,   \
+                                 arg7,arg8,arg9)                  \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[3+9];                        \
+      volatile unsigned long _res;                                \
+      /* _argvec[0] holds current r2 across the call */           \
+      _argvec[1]   = (unsigned long)_orig.r2;                     \
+      _argvec[2]   = (unsigned long)_orig.nraddr;                 \
+      _argvec[2+1] = (unsigned long)arg1;                         \
+      _argvec[2+2] = (unsigned long)arg2;                         \
+      _argvec[2+3] = (unsigned long)arg3;                         \
+      _argvec[2+4] = (unsigned long)arg4;                         \
+      _argvec[2+5] = (unsigned long)arg5;                         \
+      _argvec[2+6] = (unsigned long)arg6;                         \
+      _argvec[2+7] = (unsigned long)arg7;                         \
+      _argvec[2+8] = (unsigned long)arg8;                         \
+      _argvec[2+9] = (unsigned long)arg9;                         \
+      __asm__ volatile(                                           \
+         "mr 11,%1\n\t"                                           \
+         VG_EXPAND_FRAME_BY_trashes_r3(512)                       \
+         "std  2,-16(11)\n\t" /* save tocptr */                   \
+         "ld   2,-8(11)\n\t"  /* use nraddr's tocptr */           \
+         VG_EXPAND_FRAME_BY_trashes_r3(128)                       \
+         /* arg9 */                                               \
+         "ld  3,72(11)\n\t"                                       \
+         "std 3,112(1)\n\t"                                       \
+         /* args1-8 */                                            \
+         "ld   3, 8(11)\n\t"  /* arg1->r3 */                      \
+         "ld   4, 16(11)\n\t" /* arg2->r4 */                      \
+         "ld   5, 24(11)\n\t" /* arg3->r5 */                      \
+         "ld   6, 32(11)\n\t" /* arg4->r6 */                      \
+         "ld   7, 40(11)\n\t" /* arg5->r7 */                      \
+         "ld   8, 48(11)\n\t" /* arg6->r8 */                      \
+         "ld   9, 56(11)\n\t" /* arg7->r9 */                      \
+         "ld  10, 64(11)\n\t" /* arg8->r10 */                     \
+         "ld  11, 0(11)\n\t"  /* target->r11 */                   \
+         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11                  \
+         "mr 11,%1\n\t"                                           \
+         "mr %0,3\n\t"                                            \
+         "ld  2,-16(11)\n\t" /* restore tocptr */                 \
+         VG_CONTRACT_FRAME_BY(128)                                \
+         VG_CONTRACT_FRAME_BY(512)                                \
+         : /*out*/   "=r" (_res)                                  \
+         : /*in*/    "r" (&_argvec[2])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_10W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6,  \
+                                  arg7,arg8,arg9,arg10)           \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[3+10];                       \
+      volatile unsigned long _res;                                \
+      /* _argvec[0] holds current r2 across the call */           \
+      _argvec[1]   = (unsigned long)_orig.r2;                     \
+      _argvec[2]   = (unsigned long)_orig.nraddr;                 \
+      _argvec[2+1] = (unsigned long)arg1;                         \
+      _argvec[2+2] = (unsigned long)arg2;                         \
+      _argvec[2+3] = (unsigned long)arg3;                         \
+      _argvec[2+4] = (unsigned long)arg4;                         \
+      _argvec[2+5] = (unsigned long)arg5;                         \
+      _argvec[2+6] = (unsigned long)arg6;                         \
+      _argvec[2+7] = (unsigned long)arg7;                         \
+      _argvec[2+8] = (unsigned long)arg8;                         \
+      _argvec[2+9] = (unsigned long)arg9;                         \
+      _argvec[2+10] = (unsigned long)arg10;                       \
+      __asm__ volatile(                                           \
+         "mr 11,%1\n\t"                                           \
+         VG_EXPAND_FRAME_BY_trashes_r3(512)                       \
+         "std  2,-16(11)\n\t" /* save tocptr */                   \
+         "ld   2,-8(11)\n\t"  /* use nraddr's tocptr */           \
+         VG_EXPAND_FRAME_BY_trashes_r3(128)                       \
+         /* arg10 */                                              \
+         "ld  3,80(11)\n\t"                                       \
+         "std 3,120(1)\n\t"                                       \
+         /* arg9 */                                               \
+         "ld  3,72(11)\n\t"                                       \
+         "std 3,112(1)\n\t"                                       \
+         /* args1-8 */                                            \
+         "ld   3, 8(11)\n\t"  /* arg1->r3 */                      \
+         "ld   4, 16(11)\n\t" /* arg2->r4 */                      \
+         "ld   5, 24(11)\n\t" /* arg3->r5 */                      \
+         "ld   6, 32(11)\n\t" /* arg4->r6 */                      \
+         "ld   7, 40(11)\n\t" /* arg5->r7 */                      \
+         "ld   8, 48(11)\n\t" /* arg6->r8 */                      \
+         "ld   9, 56(11)\n\t" /* arg7->r9 */                      \
+         "ld  10, 64(11)\n\t" /* arg8->r10 */                     \
+         "ld  11, 0(11)\n\t"  /* target->r11 */                   \
+         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11                  \
+         "mr 11,%1\n\t"                                           \
+         "mr %0,3\n\t"                                            \
+         "ld  2,-16(11)\n\t" /* restore tocptr */                 \
+         VG_CONTRACT_FRAME_BY(128)                                \
+         VG_CONTRACT_FRAME_BY(512)                                \
+         : /*out*/   "=r" (_res)                                  \
+         : /*in*/    "r" (&_argvec[2])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_11W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6,  \
+                                  arg7,arg8,arg9,arg10,arg11)     \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[3+11];                       \
+      volatile unsigned long _res;                                \
+      /* _argvec[0] holds current r2 across the call */           \
+      _argvec[1]   = (unsigned long)_orig.r2;                     \
+      _argvec[2]   = (unsigned long)_orig.nraddr;                 \
+      _argvec[2+1] = (unsigned long)arg1;                         \
+      _argvec[2+2] = (unsigned long)arg2;                         \
+      _argvec[2+3] = (unsigned long)arg3;                         \
+      _argvec[2+4] = (unsigned long)arg4;                         \
+      _argvec[2+5] = (unsigned long)arg5;                         \
+      _argvec[2+6] = (unsigned long)arg6;                         \
+      _argvec[2+7] = (unsigned long)arg7;                         \
+      _argvec[2+8] = (unsigned long)arg8;                         \
+      _argvec[2+9] = (unsigned long)arg9;                         \
+      _argvec[2+10] = (unsigned long)arg10;                       \
+      _argvec[2+11] = (unsigned long)arg11;                       \
+      __asm__ volatile(                                           \
+         "mr 11,%1\n\t"                                           \
+         VG_EXPAND_FRAME_BY_trashes_r3(512)                       \
+         "std  2,-16(11)\n\t" /* save tocptr */                   \
+         "ld   2,-8(11)\n\t"  /* use nraddr's tocptr */           \
+         VG_EXPAND_FRAME_BY_trashes_r3(144)                       \
+         /* arg11 */                                              \
+         "ld  3,88(11)\n\t"                                       \
+         "std 3,128(1)\n\t"                                       \
+         /* arg10 */                                              \
+         "ld  3,80(11)\n\t"                                       \
+         "std 3,120(1)\n\t"                                       \
+         /* arg9 */                                               \
+         "ld  3,72(11)\n\t"                                       \
+         "std 3,112(1)\n\t"                                       \
+         /* args1-8 */                                            \
+         "ld   3, 8(11)\n\t"  /* arg1->r3 */                      \
+         "ld   4, 16(11)\n\t" /* arg2->r4 */                      \
+         "ld   5, 24(11)\n\t" /* arg3->r5 */                      \
+         "ld   6, 32(11)\n\t" /* arg4->r6 */                      \
+         "ld   7, 40(11)\n\t" /* arg5->r7 */                      \
+         "ld   8, 48(11)\n\t" /* arg6->r8 */                      \
+         "ld   9, 56(11)\n\t" /* arg7->r9 */                      \
+         "ld  10, 64(11)\n\t" /* arg8->r10 */                     \
+         "ld  11, 0(11)\n\t"  /* target->r11 */                   \
+         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11                  \
+         "mr 11,%1\n\t"                                           \
+         "mr %0,3\n\t"                                            \
+         "ld  2,-16(11)\n\t" /* restore tocptr */                 \
+         VG_CONTRACT_FRAME_BY(144)                                \
+         VG_CONTRACT_FRAME_BY(512)                                \
+         : /*out*/   "=r" (_res)                                  \
+         : /*in*/    "r" (&_argvec[2])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_12W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6,  \
+                                arg7,arg8,arg9,arg10,arg11,arg12) \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[3+12];                       \
+      volatile unsigned long _res;                                \
+      /* _argvec[0] holds current r2 across the call */           \
+      _argvec[1]   = (unsigned long)_orig.r2;                     \
+      _argvec[2]   = (unsigned long)_orig.nraddr;                 \
+      _argvec[2+1] = (unsigned long)arg1;                         \
+      _argvec[2+2] = (unsigned long)arg2;                         \
+      _argvec[2+3] = (unsigned long)arg3;                         \
+      _argvec[2+4] = (unsigned long)arg4;                         \
+      _argvec[2+5] = (unsigned long)arg5;                         \
+      _argvec[2+6] = (unsigned long)arg6;                         \
+      _argvec[2+7] = (unsigned long)arg7;                         \
+      _argvec[2+8] = (unsigned long)arg8;                         \
+      _argvec[2+9] = (unsigned long)arg9;                         \
+      _argvec[2+10] = (unsigned long)arg10;                       \
+      _argvec[2+11] = (unsigned long)arg11;                       \
+      _argvec[2+12] = (unsigned long)arg12;                       \
+      __asm__ volatile(                                           \
+         "mr 11,%1\n\t"                                           \
+         VG_EXPAND_FRAME_BY_trashes_r3(512)                       \
+         "std  2,-16(11)\n\t" /* save tocptr */                   \
+         "ld   2,-8(11)\n\t"  /* use nraddr's tocptr */           \
+         VG_EXPAND_FRAME_BY_trashes_r3(144)                       \
+         /* arg12 */                                              \
+         "ld  3,96(11)\n\t"                                       \
+         "std 3,136(1)\n\t"                                       \
+         /* arg11 */                                              \
+         "ld  3,88(11)\n\t"                                       \
+         "std 3,128(1)\n\t"                                       \
+         /* arg10 */                                              \
+         "ld  3,80(11)\n\t"                                       \
+         "std 3,120(1)\n\t"                                       \
+         /* arg9 */                                               \
+         "ld  3,72(11)\n\t"                                       \
+         "std 3,112(1)\n\t"                                       \
+         /* args1-8 */                                            \
+         "ld   3, 8(11)\n\t"  /* arg1->r3 */                      \
+         "ld   4, 16(11)\n\t" /* arg2->r4 */                      \
+         "ld   5, 24(11)\n\t" /* arg3->r5 */                      \
+         "ld   6, 32(11)\n\t" /* arg4->r6 */                      \
+         "ld   7, 40(11)\n\t" /* arg5->r7 */                      \
+         "ld   8, 48(11)\n\t" /* arg6->r8 */                      \
+         "ld   9, 56(11)\n\t" /* arg7->r9 */                      \
+         "ld  10, 64(11)\n\t" /* arg8->r10 */                     \
+         "ld  11, 0(11)\n\t"  /* target->r11 */                   \
+         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11                  \
+         "mr 11,%1\n\t"                                           \
+         "mr %0,3\n\t"                                            \
+         "ld  2,-16(11)\n\t" /* restore tocptr */                 \
+         VG_CONTRACT_FRAME_BY(144)                                \
+         VG_CONTRACT_FRAME_BY(512)                                \
+         : /*out*/   "=r" (_res)                                  \
+         : /*in*/    "r" (&_argvec[2])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#endif /* PLAT_ppc64_aix5 */
+
+
+/* ------------------------------------------------------------------ */
+/* ARCHITECTURE INDEPENDENT MACROS for CLIENT REQUESTS.               */
+/*                                                                    */
+/* ------------------------------------------------------------------ */
+
+/* Some request codes.  There are many more of these, but most are not
+   exposed to end-user view.  These are the public ones, all of the
+   form 0x1000 + small_number.
+
+   Core ones are in the range 0x00000000--0x0000ffff.  The non-public
+   ones start at 0x2000.
+*/
+
+/* These macros are used by tools -- they must be public, but don't
+   embed them into other programs. */
+#define VG_USERREQ_TOOL_BASE(a,b) \
+   ((unsigned int)(((a)&0xff) << 24 | ((b)&0xff) << 16))
+#define VG_IS_TOOL_USERREQ(a, b, v) \
+   (VG_USERREQ_TOOL_BASE(a,b) == ((v) & 0xffff0000))
+
+/* !! ABIWARNING !! ABIWARNING !! ABIWARNING !! ABIWARNING !! 
+   This enum comprises an ABI exported by Valgrind to programs
+   which use client requests.  DO NOT CHANGE THE ORDER OF THESE
+   ENTRIES, NOR DELETE ANY -- add new ones at the end. */
+typedef
+   enum { VG_USERREQ__RUNNING_ON_VALGRIND  = 0x1001,
+          VG_USERREQ__DISCARD_TRANSLATIONS = 0x1002,
+
+          /* These allow any function to be called from the simulated
+             CPU but run on the real CPU.  Nb: the first arg passed to
+             the function is always the ThreadId of the running
+             thread!  So CLIENT_CALL0 actually requires a 1 arg
+             function, etc. */
+          VG_USERREQ__CLIENT_CALL0 = 0x1101,
+          VG_USERREQ__CLIENT_CALL1 = 0x1102,
+          VG_USERREQ__CLIENT_CALL2 = 0x1103,
+          VG_USERREQ__CLIENT_CALL3 = 0x1104,
+
+          /* Can be useful in regression testing suites -- eg. can
+             send Valgrind's output to /dev/null and still count
+             errors. */
+          VG_USERREQ__COUNT_ERRORS = 0x1201,
+
+          /* These are useful and can be interpreted by any tool that
+             tracks malloc() et al, by using vg_replace_malloc.c. */
+          VG_USERREQ__MALLOCLIKE_BLOCK = 0x1301,
+          VG_USERREQ__FREELIKE_BLOCK   = 0x1302,
+          /* Memory pool support. */
+          VG_USERREQ__CREATE_MEMPOOL   = 0x1303,
+          VG_USERREQ__DESTROY_MEMPOOL  = 0x1304,
+          VG_USERREQ__MEMPOOL_ALLOC    = 0x1305,
+          VG_USERREQ__MEMPOOL_FREE     = 0x1306,
+          VG_USERREQ__MEMPOOL_TRIM     = 0x1307,
+          VG_USERREQ__MOVE_MEMPOOL     = 0x1308,
+          VG_USERREQ__MEMPOOL_CHANGE   = 0x1309,
+          VG_USERREQ__MEMPOOL_EXISTS   = 0x130a,
+
+          /* Allow printfs to valgrind log. */
+          /* The first two pass the va_list argument by value, which
+             assumes it is the same size as or smaller than a UWord,
+             which generally isn't the case.  Hence are deprecated.
+             The second two pass the vargs by reference and so are
+             immune to this problem. */
+          /* both :: char* fmt, va_list vargs (DEPRECATED) */
+          VG_USERREQ__PRINTF           = 0x1401,
+          VG_USERREQ__PRINTF_BACKTRACE = 0x1402,
+          /* both :: char* fmt, va_list* vargs */
+          VG_USERREQ__PRINTF_VALIST_BY_REF = 0x1403,
+          VG_USERREQ__PRINTF_BACKTRACE_VALIST_BY_REF = 0x1404,
+
+          /* Stack support. */
+          VG_USERREQ__STACK_REGISTER   = 0x1501,
+          VG_USERREQ__STACK_DEREGISTER = 0x1502,
+          VG_USERREQ__STACK_CHANGE     = 0x1503,
+
+          /* Wine support */
+          VG_USERREQ__LOAD_PDB_DEBUGINFO = 0x1601,
+
+          /* Querying of debug info. */
+          VG_USERREQ__MAP_IP_TO_SRCLOC = 0x1701
+   } Vg_ClientRequest;
+
+#if !defined(__GNUC__)
+#  define __extension__ /* */
+#endif
+
+
+/*
+ * VALGRIND_DO_CLIENT_REQUEST_EXPR(): a C expression that invokes a Valgrind
+ * client request and whose value equals the client request result.
+ */
+
+#if defined(NVALGRIND)
+
+#define VALGRIND_DO_CLIENT_REQUEST_EXPR(                               \
+        _zzq_default, _zzq_request,                                    \
+        _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5)         \
+   (_zzq_default)
+
+#else /*defined(NVALGRIND)*/
+
+#if defined(_MSC_VER)
+
+#define VALGRIND_DO_CLIENT_REQUEST_EXPR(                                \
+        _zzq_default, _zzq_request,                                     \
+        _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5)          \
+   (vg_VALGRIND_DO_CLIENT_REQUEST_EXPR((uintptr_t)(_zzq_default),       \
+        (_zzq_request), (uintptr_t)(_zzq_arg1), (uintptr_t)(_zzq_arg2), \
+        (uintptr_t)(_zzq_arg3), (uintptr_t)(_zzq_arg4),                 \
+        (uintptr_t)(_zzq_arg5)))
+
+static __inline unsigned
+vg_VALGRIND_DO_CLIENT_REQUEST_EXPR(uintptr_t _zzq_default,
+                                   unsigned _zzq_request, uintptr_t _zzq_arg1,
+                                   uintptr_t _zzq_arg2, uintptr_t _zzq_arg3,
+                                   uintptr_t _zzq_arg4, uintptr_t _zzq_arg5)
+{
+    unsigned _zzq_rlval;
+    VALGRIND_DO_CLIENT_REQUEST(_zzq_rlval, _zzq_default, _zzq_request,
+                      _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5);
+    return _zzq_rlval;
+}
+
+#else /*defined(_MSC_VER)*/
+
+#define VALGRIND_DO_CLIENT_REQUEST_EXPR(                               \
+        _zzq_default, _zzq_request,                                    \
+        _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5)         \
+   (__extension__({unsigned int _zzq_rlval;                            \
+    VALGRIND_DO_CLIENT_REQUEST(_zzq_rlval, _zzq_default, _zzq_request, \
+                _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \
+    _zzq_rlval;                                                        \
+   }))
+
+#endif /*defined(_MSC_VER)*/
+
+#endif /*defined(NVALGRIND)*/
+
+
+/* Returns the number of Valgrinds this code is running under.  That
+   is, 0 if running natively, 1 if running under Valgrind, 2 if
+   running under Valgrind which is running under another Valgrind,
+   etc. */
+#define RUNNING_ON_VALGRIND                                           \
+    VALGRIND_DO_CLIENT_REQUEST_EXPR(0 /* if not */,                   \
+                                    VG_USERREQ__RUNNING_ON_VALGRIND,  \
+                                    0, 0, 0, 0, 0)                    \
+
+
+/* Discard translation of code in the range [_qzz_addr .. _qzz_addr +
+   _qzz_len - 1].  Useful if you are debugging a JITter or some such,
+   since it provides a way to make sure valgrind will retranslate the
+   invalidated area.  Returns no value. */
+#define VALGRIND_DISCARD_TRANSLATIONS(_qzz_addr,_qzz_len)         \
+   {unsigned int _qzz_res;                                        \
+    VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0,                       \
+                               VG_USERREQ__DISCARD_TRANSLATIONS,  \
+                               _qzz_addr, _qzz_len, 0, 0, 0);     \
+   }
+
+
+/* These requests are for getting Valgrind itself to print something.
+   Possibly with a backtrace.  This is a really ugly hack.  The return value
+   is the number of characters printed, excluding the "**<pid>** " part at the
+   start and the backtrace (if present). */
+
+#if defined(NVALGRIND)
+
+#  define VALGRIND_PRINTF(...)
+#  define VALGRIND_PRINTF_BACKTRACE(...)
+
+#else /* NVALGRIND */
+
+#if !defined(_MSC_VER)
+/* Modern GCC will optimize the static routine out if unused,
+   and unused attribute will shut down warnings about it.  */
+static int VALGRIND_PRINTF(const char *format, ...)
+   __attribute__((format(__printf__, 1, 2), __unused__));
+#endif
+static int
+#if defined(_MSC_VER)
+__inline
+#endif
+VALGRIND_PRINTF(const char *format, ...)
+{
+   unsigned long _qzz_res;
+   va_list vargs;
+   va_start(vargs, format);
+#if defined(_MSC_VER)
+   VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0,
+                              VG_USERREQ__PRINTF_VALIST_BY_REF,
+                              (uintptr_t)format,
+                              (uintptr_t)&vargs,
+                              0, 0, 0);
+#else
+   VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0,
+                              VG_USERREQ__PRINTF_VALIST_BY_REF,
+                              (unsigned long)format,
+                              (unsigned long)&vargs, 
+                              0, 0, 0);
+#endif
+   va_end(vargs);
+   return (int)_qzz_res;
+}
+
+#if !defined(_MSC_VER)
+static int VALGRIND_PRINTF_BACKTRACE(const char *format, ...)
+   __attribute__((format(__printf__, 1, 2), __unused__));
+#endif
+static int
+#if defined(_MSC_VER)
+__inline
+#endif
+VALGRIND_PRINTF_BACKTRACE(const char *format, ...)
+{
+   unsigned long _qzz_res;
+   va_list vargs;
+   va_start(vargs, format);
+#if defined(_MSC_VER)
+   VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0,
+                              VG_USERREQ__PRINTF_BACKTRACE_VALIST_BY_REF,
+                              (uintptr_t)format,
+                              (uintptr_t)&vargs,
+                              0, 0, 0);
+#else
+   VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0,
+                              VG_USERREQ__PRINTF_BACKTRACE_VALIST_BY_REF,
+                              (unsigned long)format,
+                              (unsigned long)&vargs, 
+                              0, 0, 0);
+#endif
+   va_end(vargs);
+   return (int)_qzz_res;
+}
+
+#endif /* NVALGRIND */
+
+
+/* These requests allow control to move from the simulated CPU to the
+   real CPU, calling an arbitary function.
+   
+   Note that the current ThreadId is inserted as the first argument.
+   So this call:
+
+     VALGRIND_NON_SIMD_CALL2(f, arg1, arg2)
+
+   requires f to have this signature:
+
+     Word f(Word tid, Word arg1, Word arg2)
+
+   where "Word" is a word-sized type.
+
+   Note that these client requests are not entirely reliable.  For example,
+   if you call a function with them that subsequently calls printf(),
+   there's a high chance Valgrind will crash.  Generally, your prospects of
+   these working are made higher if the called function does not refer to
+   any global variables, and does not refer to any libc or other functions
+   (printf et al).  Any kind of entanglement with libc or dynamic linking is
+   likely to have a bad outcome, for tricky reasons which we've grappled
+   with a lot in the past.
+*/
+#define VALGRIND_NON_SIMD_CALL0(_qyy_fn)                          \
+   __extension__                                                  \
+   ({unsigned long _qyy_res;                                      \
+    VALGRIND_DO_CLIENT_REQUEST(_qyy_res, 0 /* default return */,  \
+                               VG_USERREQ__CLIENT_CALL0,          \
+                               _qyy_fn,                           \
+                               0, 0, 0, 0);                       \
+    _qyy_res;                                                     \
+   })
+
+#define VALGRIND_NON_SIMD_CALL1(_qyy_fn, _qyy_arg1)               \
+   __extension__                                                  \
+   ({unsigned long _qyy_res;                                      \
+    VALGRIND_DO_CLIENT_REQUEST(_qyy_res, 0 /* default return */,  \
+                               VG_USERREQ__CLIENT_CALL1,          \
+                               _qyy_fn,                           \
+                               _qyy_arg1, 0, 0, 0);               \
+    _qyy_res;                                                     \
+   })
+
+#define VALGRIND_NON_SIMD_CALL2(_qyy_fn, _qyy_arg1, _qyy_arg2)    \
+   __extension__                                                  \
+   ({unsigned long _qyy_res;                                      \
+    VALGRIND_DO_CLIENT_REQUEST(_qyy_res, 0 /* default return */,  \
+                               VG_USERREQ__CLIENT_CALL2,          \
+                               _qyy_fn,                           \
+                               _qyy_arg1, _qyy_arg2, 0, 0);       \
+    _qyy_res;                                                     \
+   })
+
+#define VALGRIND_NON_SIMD_CALL3(_qyy_fn, _qyy_arg1, _qyy_arg2, _qyy_arg3) \
+   __extension__                                                  \
+   ({unsigned long _qyy_res;                                      \
+    VALGRIND_DO_CLIENT_REQUEST(_qyy_res, 0 /* default return */,  \
+                               VG_USERREQ__CLIENT_CALL3,          \
+                               _qyy_fn,                           \
+                               _qyy_arg1, _qyy_arg2,              \
+                               _qyy_arg3, 0);                     \
+    _qyy_res;                                                     \
+   })
+
+
+/* Counts the number of errors that have been recorded by a tool.  Nb:
+   the tool must record the errors with VG_(maybe_record_error)() or
+   VG_(unique_error)() for them to be counted. */
+#define VALGRIND_COUNT_ERRORS                                     \
+   __extension__                                                  \
+   ({unsigned int _qyy_res;                                       \
+    VALGRIND_DO_CLIENT_REQUEST(_qyy_res, 0 /* default return */,  \
+                               VG_USERREQ__COUNT_ERRORS,          \
+                               0, 0, 0, 0, 0);                    \
+    _qyy_res;                                                     \
+   })
+
+/* Several Valgrind tools (Memcheck, Massif, Helgrind, DRD) rely on knowing
+   when heap blocks are allocated in order to give accurate results.  This
+   happens automatically for the standard allocator functions such as
+   malloc(), calloc(), realloc(), memalign(), new, new[], free(), delete,
+   delete[], etc.
+
+   But if your program uses a custom allocator, this doesn't automatically
+   happen, and Valgrind will not do as well.  For example, if you allocate
+   superblocks with mmap() and then allocates chunks of the superblocks, all
+   Valgrind's observations will be at the mmap() level and it won't know that
+   the chunks should be considered separate entities.  In Memcheck's case,
+   that means you probably won't get heap block overrun detection (because
+   there won't be redzones marked as unaddressable) and you definitely won't
+   get any leak detection.
+
+   The following client requests allow a custom allocator to be annotated so
+   that it can be handled accurately by Valgrind.
+
+   VALGRIND_MALLOCLIKE_BLOCK marks a region of memory as having been allocated
+   by a malloc()-like function.  For Memcheck (an illustrative case), this
+   does two things:
+
+   - It records that the block has been allocated.  This means any addresses
+     within the block mentioned in error messages will be
+     identified as belonging to the block.  It also means that if the block
+     isn't freed it will be detected by the leak checker.
+
+   - It marks the block as being addressable and undefined (if 'is_zeroed' is
+     not set), or addressable and defined (if 'is_zeroed' is set).  This
+     controls how accesses to the block by the program are handled.
+   
+   'addr' is the start of the usable block (ie. after any
+   redzone), 'sizeB' is its size.  'rzB' is the redzone size if the allocator
+   can apply redzones -- these are blocks of padding at the start and end of
+   each block.  Adding redzones is recommended as it makes it much more likely
+   Valgrind will spot block overruns.  `is_zeroed' indicates if the memory is
+   zeroed (or filled with another predictable value), as is the case for
+   calloc().
+   
+   VALGRIND_MALLOCLIKE_BLOCK should be put immediately after the point where a
+   heap block -- that will be used by the client program -- is allocated.
+   It's best to put it at the outermost level of the allocator if possible;
+   for example, if you have a function my_alloc() which calls
+   internal_alloc(), and the client request is put inside internal_alloc(),
+   stack traces relating to the heap block will contain entries for both
+   my_alloc() and internal_alloc(), which is probably not what you want.
+
+   For Memcheck users: if you use VALGRIND_MALLOCLIKE_BLOCK to carve out
+   custom blocks from within a heap block, B, that has been allocated with
+   malloc/calloc/new/etc, then block B will be *ignored* during leak-checking
+   -- the custom blocks will take precedence.
+
+   VALGRIND_FREELIKE_BLOCK is the partner to VALGRIND_MALLOCLIKE_BLOCK.  For
+   Memcheck, it does two things:
+
+   - It records that the block has been deallocated.  This assumes that the
+     block was annotated as having been allocated via
+     VALGRIND_MALLOCLIKE_BLOCK.  Otherwise, an error will be issued.
+
+   - It marks the block as being unaddressable.
+
+   VALGRIND_FREELIKE_BLOCK should be put immediately after the point where a
+   heap block is deallocated.
+
+   In many cases, these two client requests will not be enough to get your
+   allocator working well with Memcheck.  More specifically, if your allocator
+   writes to freed blocks in any way then a VALGRIND_MAKE_MEM_UNDEFINED call
+   will be necessary to mark the memory as addressable just before the zeroing
+   occurs, otherwise you'll get a lot of invalid write errors.  For example,
+   you'll need to do this if your allocator recycles freed blocks, but it
+   zeroes them before handing them back out (via VALGRIND_MALLOCLIKE_BLOCK).
+   Alternatively, if your allocator reuses freed blocks for allocator-internal
+   data structures, VALGRIND_MAKE_MEM_UNDEFINED calls will also be necessary.
+
+   Really, what's happening is a blurring of the lines between the client
+   program and the allocator... after VALGRIND_FREELIKE_BLOCK is called, the
+   memory should be considered unaddressable to the client program, but the
+   allocator knows more than the rest of the client program and so may be able
+   to safely access it.  Extra client requests are necessary for Valgrind to
+   understand the distinction between the allocator and the rest of the
+   program.
+
+   Note: there is currently no VALGRIND_REALLOCLIKE_BLOCK client request;  it
+   has to be emulated with MALLOCLIKE/FREELIKE and memory copying.
+   
+   Ignored if addr == 0.
+*/
+#define VALGRIND_MALLOCLIKE_BLOCK(addr, sizeB, rzB, is_zeroed)    \
+   {unsigned int _qzz_res;                                        \
+    VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0,                       \
+                               VG_USERREQ__MALLOCLIKE_BLOCK,      \
+                               addr, sizeB, rzB, is_zeroed, 0);   \
+   }
+
+/* See the comment for VALGRIND_MALLOCLIKE_BLOCK for details.
+   Ignored if addr == 0.
+*/
+#define VALGRIND_FREELIKE_BLOCK(addr, rzB)                        \
+   {unsigned int _qzz_res;                                        \
+    VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0,                       \
+                               VG_USERREQ__FREELIKE_BLOCK,        \
+                               addr, rzB, 0, 0, 0);               \
+   }
+
+/* Create a memory pool. */
+#define VALGRIND_CREATE_MEMPOOL(pool, rzB, is_zeroed)             \
+   {unsigned int _qzz_res;                                        \
+    VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0,                       \
+                               VG_USERREQ__CREATE_MEMPOOL,        \
+                               pool, rzB, is_zeroed, 0, 0);       \
+   }
+
+/* Destroy a memory pool. */
+#define VALGRIND_DESTROY_MEMPOOL(pool)                            \
+   {unsigned int _qzz_res;                                        \
+    VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0,                       \
+                               VG_USERREQ__DESTROY_MEMPOOL,       \
+                               pool, 0, 0, 0, 0);                 \
+   }
+
+/* Associate a piece of memory with a memory pool. */
+#define VALGRIND_MEMPOOL_ALLOC(pool, addr, size)                  \
+   {unsigned int _qzz_res;                                        \
+    VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0,                       \
+                               VG_USERREQ__MEMPOOL_ALLOC,         \
+                               pool, addr, size, 0, 0);           \
+   }
+
+/* Disassociate a piece of memory from a memory pool. */
+#define VALGRIND_MEMPOOL_FREE(pool, addr)                         \
+   {unsigned int _qzz_res;                                        \
+    VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0,                       \
+                               VG_USERREQ__MEMPOOL_FREE,          \
+                               pool, addr, 0, 0, 0);              \
+   }
+
+/* Disassociate any pieces outside a particular range. */
+#define VALGRIND_MEMPOOL_TRIM(pool, addr, size)                   \
+   {unsigned int _qzz_res;                                        \
+    VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0,                       \
+                               VG_USERREQ__MEMPOOL_TRIM,          \
+                               pool, addr, size, 0, 0);           \
+   }
+
+/* Resize and/or move a piece associated with a memory pool. */
+#define VALGRIND_MOVE_MEMPOOL(poolA, poolB)                       \
+   {unsigned int _qzz_res;                                        \
+    VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0,                       \
+                               VG_USERREQ__MOVE_MEMPOOL,          \
+                               poolA, poolB, 0, 0, 0);            \
+   }
+
+/* Resize and/or move a piece associated with a memory pool. */
+#define VALGRIND_MEMPOOL_CHANGE(pool, addrA, addrB, size)         \
+   {unsigned int _qzz_res;                                        \
+    VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0,                       \
+                               VG_USERREQ__MEMPOOL_CHANGE,        \
+                               pool, addrA, addrB, size, 0);      \
+   }
+
+/* Return 1 if a mempool exists, else 0. */
+#define VALGRIND_MEMPOOL_EXISTS(pool)                             \
+   __extension__                                                  \
+   ({unsigned int _qzz_res;                                       \
+    VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0,                       \
+                               VG_USERREQ__MEMPOOL_EXISTS,        \
+                               pool, 0, 0, 0, 0);                 \
+    _qzz_res;                                                     \
+   })
+
+/* Mark a piece of memory as being a stack. Returns a stack id. */
+#define VALGRIND_STACK_REGISTER(start, end)                       \
+   __extension__                                                  \
+   ({unsigned int _qzz_res;                                       \
+    VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0,                       \
+                               VG_USERREQ__STACK_REGISTER,        \
+                               start, end, 0, 0, 0);              \
+    _qzz_res;                                                     \
+   })
+
+/* Unmark the piece of memory associated with a stack id as being a
+   stack. */
+#define VALGRIND_STACK_DEREGISTER(id)                             \
+   {unsigned int _qzz_res;                                        \
+    VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0,                       \
+                               VG_USERREQ__STACK_DEREGISTER,      \
+                               id, 0, 0, 0, 0);                   \
+   }
+
+/* Change the start and end address of the stack id. */
+#define VALGRIND_STACK_CHANGE(id, start, end)                     \
+   {unsigned int _qzz_res;                                        \
+    VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0,                       \
+                               VG_USERREQ__STACK_CHANGE,          \
+                               id, start, end, 0, 0);             \
+   }
+
+/* Load PDB debug info for Wine PE image_map. */
+#define VALGRIND_LOAD_PDB_DEBUGINFO(fd, ptr, total_size, delta)   \
+   {unsigned int _qzz_res;                                        \
+    VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0,                       \
+                               VG_USERREQ__LOAD_PDB_DEBUGINFO,    \
+                               fd, ptr, total_size, delta, 0);    \
+   }
+
+/* Map a code address to a source file name and line number.  buf64
+   must point to a 64-byte buffer in the caller's address space.  The
+   result will be dumped in there and is guaranteed to be zero
+   terminated.  If no info is found, the first byte is set to zero. */
+#define VALGRIND_MAP_IP_TO_SRCLOC(addr, buf64)                    \
+   {unsigned int _qzz_res;                                        \
+    VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0,                       \
+                               VG_USERREQ__MAP_IP_TO_SRCLOC,      \
+                               addr, buf64, 0, 0, 0);             \
+   }
+
+
+#undef PLAT_x86_linux
+#undef PLAT_amd64_linux
+#undef PLAT_ppc32_linux
+#undef PLAT_ppc64_linux
+#undef PLAT_arm_linux
+#undef PLAT_ppc32_aix5
+#undef PLAT_ppc64_aix5
+
+#endif   /* __VALGRIND_H */
diff --git a/base/third_party/xdg_mime/BUILD.gn b/base/third_party/xdg_mime/BUILD.gn
new file mode 100644
index 0000000..ac9e2c9
--- /dev/null
+++ b/base/third_party/xdg_mime/BUILD.gn
@@ -0,0 +1,28 @@
+# Copyright (c) 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+static_library("xdg_mime") {
+  visibility = [ "//base/*" ]
+  sources = [
+    "xdgmime.c",
+    "xdgmime.h",
+    "xdgmimealias.c",
+    "xdgmimealias.h",
+    "xdgmimecache.c",
+    "xdgmimecache.h",
+    "xdgmimeglob.c",
+    "xdgmimeglob.h",
+    "xdgmimeicon.c",
+    "xdgmimeicon.h",
+    "xdgmimeint.c",
+    "xdgmimeint.h",
+    "xdgmimemagic.c",
+    "xdgmimemagic.h",
+    "xdgmimeparent.c",
+    "xdgmimeparent.h",
+  ]
+
+  configs -= [ "//build/config/compiler:chromium_code" ]
+  configs += [ "//build/config/compiler:no_chromium_code" ]
+}
diff --git a/base/third_party/xdg_mime/LICENSE b/base/third_party/xdg_mime/LICENSE
new file mode 100644
index 0000000..55fedcf
--- /dev/null
+++ b/base/third_party/xdg_mime/LICENSE
@@ -0,0 +1,168 @@
+Licensed under the Academic Free License version 2.0 (below)
+Or under the following terms:
+
+This library is free software; you can redistribute it and/or
+modify it under the terms of the GNU Lesser General Public
+License as published by the Free Software Foundation; either
+version 2 of the License, or (at your option) any later version.
+
+This library is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+Lesser General Public License for more details.
+
+You should have received a copy of the GNU Lesser General Public
+License along with this library; if not, write to the
+Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA.
+
+
+--------------------------------------------------------------------------------
+Academic Free License v. 2.0
+--------------------------------------------------------------------------------
+
+This Academic Free License (the "License") applies to any original work of
+authorship (the "Original Work") whose owner (the "Licensor") has placed the
+following notice immediately following the copyright notice for the Original
+Work:
+
+Licensed under the Academic Free License version 2.0
+1) Grant of Copyright License. Licensor hereby grants You a world-wide,
+royalty-free, non-exclusive, perpetual, sublicenseable license to do the
+following:
+
+a) to reproduce the Original Work in copies;
+b) to prepare derivative works ("Derivative Works") based upon the Original
+   Work;
+c) to distribute copies of the Original Work and Derivative Works to the
+   public;
+d) to perform the Original Work publicly; and
+e) to display the Original Work publicly.
+
+2) Grant of Patent License. Licensor hereby grants You a world-wide,
+royalty-free, non-exclusive, perpetual, sublicenseable license, under patent
+claims owned or controlled by the Licensor that are embodied in the Original
+Work as furnished by the Licensor, to make, use, sell and offer for sale the
+Original Work and Derivative Works.
+
+3) Grant of Source Code License. The term "Source Code" means the preferred
+form of the Original Work for making modifications to it and all available
+documentation describing how to modify the Original Work. Licensor hereby
+agrees to provide a machine-readable copy of the Source Code of the Original
+Work along with each copy of the Original Work that Licensor distributes.
+Licensor reserves the right to satisfy this obligation by placing a
+machine-readable copy of the Source Code in an information repository
+reasonably calculated to permit inexpensive and convenient access by You for as
+long as Licensor continues to distribute the Original Work, and by publishing
+the address of that information repository in a notice immediately following
+the copyright notice that applies to the Original Work.
+
+4) Exclusions From License Grant. Neither the names of Licensor, nor the names
+of any contributors to the Original Work, nor any of their trademarks or
+service marks, may be used to endorse or promote products derived from this
+Original Work without express prior written permission of the Licensor. Nothing
+in this License shall be deemed to grant any rights to trademarks, copyrights,
+patents, trade secrets or any other intellectual property of Licensor except as
+expressly stated herein. No patent license is granted to make, use, sell or
+offer to sell embodiments of any patent claims other than the licensed claims
+defined in Section 2. No right is granted to the trademarks of Licensor even if
+such marks are included in the Original Work. Nothing in this License shall be
+interpreted to prohibit Licensor from licensing under different terms from this
+License any Original Work that Licensor otherwise would have a right to
+license.
+
+5) This section intentionally omitted.
+
+6) Attribution Rights. You must retain, in the Source Code of any Derivative
+Works that You create, all copyright, patent or trademark notices from the
+Source Code of the Original Work, as well as any notices of licensing and any
+descriptive text identified therein as an "Attribution Notice." You must cause
+the Source Code for any Derivative Works that You create to carry a prominent
+Attribution Notice reasonably calculated to inform recipients that You have
+modified the Original Work.
+
+7) Warranty of Provenance and Disclaimer of Warranty. Licensor warrants that
+the copyright in and to the Original Work and the patent rights granted herein
+by Licensor are owned by the Licensor or are sublicensed to You under the terms
+of this License with the permission of the contributor(s) of those copyrights
+and patent rights. Except as expressly stated in the immediately proceeding
+sentence, the Original Work is provided under this License on an "AS IS" BASIS
+and WITHOUT WARRANTY, either express or implied, including, without limitation,
+the warranties of NON-INFRINGEMENT, MERCHANTABILITY or FITNESS FOR A PARTICULAR
+PURPOSE. THE ENTIRE RISK AS TO THE QUALITY OF THE ORIGINAL WORK IS WITH YOU.
+This DISCLAIMER OF WARRANTY constitutes an essential part of this License. No
+license to Original Work is granted hereunder except under this disclaimer.
+
+8) Limitation of Liability. Under no circumstances and under no legal theory,
+whether in tort (including negligence), contract, or otherwise, shall the
+Licensor be liable to any person for any direct, indirect, special, incidental,
+or consequential damages of any character arising as a result of this License
+or the use of the Original Work including, without limitation, damages for loss
+of goodwill, work stoppage, computer failure or malfunction, or any and all
+other commercial damages or losses. This limitation of liability shall not
+apply to liability for death or personal injury resulting from Licensor's
+negligence to the extent applicable law prohibits such limitation. Some
+jurisdictions do not allow the exclusion or limitation of incidental or
+consequential damages, so this exclusion and limitation may not apply to You.
+
+9) Acceptance and Termination. If You distribute copies of the Original Work or
+a Derivative Work, You must make a reasonable effort under the circumstances to
+obtain the express assent of recipients to the terms of this License. Nothing
+else but this License (or another written agreement between Licensor and You)
+grants You permission to create Derivative Works based upon the Original Work
+or to exercise any of the rights granted in Section 1 herein, and any attempt
+to do so except under the terms of this License (or another written agreement
+between Licensor and You) is expressly prohibited by U.S. copyright law, the
+equivalent laws of other countries, and by international treaty. Therefore, by
+exercising any of the rights granted to You in Section 1 herein, You indicate
+Your acceptance of this License and all of its terms and conditions.
+
+10) Termination for Patent Action. This License shall terminate automatically
+and You may no longer exercise any of the rights granted to You by this License
+as of the date You commence an action, including a cross-claim or counterclaim,
+for patent infringement (i) against Licensor with respect to a patent
+applicable to software or (ii) against any entity with respect to a patent
+applicable to the Original Work (but excluding combinations of the Original
+Work with other software or hardware).
+
+11) Jurisdiction, Venue and Governing Law. Any action or suit relating to this
+License may be brought only in the courts of a jurisdiction wherein the
+Licensor resides or in which Licensor conducts its primary business, and under
+the laws of that jurisdiction excluding its conflict-of-law provisions. The
+application of the United Nations Convention on Contracts for the International
+Sale of Goods is expressly excluded. Any use of the Original Work outside the
+scope of this License or after its termination shall be subject to the
+requirements and penalties of the U.S. Copyright Act, 17 U.S.C. 101 et seq.,
+the equivalent laws of other countries, and international treaty. This section
+shall survive the termination of this License.
+
+12) Attorneys Fees. In any action to enforce the terms of this License or
+seeking damages relating thereto, the prevailing party shall be entitled to
+recover its costs and expenses, including, without limitation, reasonable
+attorneys' fees and costs incurred in connection with such action, including
+any appeal of such action. This section shall survive the termination of this
+License.
+
+13) Miscellaneous. This License represents the complete agreement concerning
+the subject matter hereof. If any provision of this License is held to be
+unenforceable, such provision shall be reformed only to the extent necessary to
+make it enforceable.
+
+14) Definition of "You" in This License. "You" throughout this License, whether
+in upper or lower case, means an individual or a legal entity exercising rights
+under, and complying with all of the terms of, this License. For legal
+entities, "You" includes any entity that controls, is controlled by, or is
+under common control with you. For purposes of this definition, "control" means
+(i) the power, direct or indirect, to cause the direction or management of such
+entity, whether by contract or otherwise, or (ii) ownership of fifty percent
+(50%) or more of the outstanding shares, or (iii) beneficial ownership of such
+entity.
+
+15) Right to Use. You may use the Original Work in all ways not otherwise
+restricted or conditioned by this License or by law, and Licensor promises not
+to interfere with or be responsible for such uses by You.
+
+This license is Copyright (C) 2003 Lawrence E. Rosen. All rights reserved.
+Permission is hereby granted to copy and distribute this license without
+modification. This license may not be modified without the express written
+permission of its copyright owner.
diff --git a/base/third_party/xdg_mime/README b/base/third_party/xdg_mime/README
new file mode 100644
index 0000000..e7f3f68
--- /dev/null
+++ b/base/third_party/xdg_mime/README
@@ -0,0 +1,8 @@
+This module is a simple module that parses the proposed MIME spec listed
+at http://freedesktop.org/.  It is currently targetted at version 0.12.
+There are no formal releases planned for this module, and it is not
+intended to be installed at this time.  Rather, it is meant to be used
+by other libraries or applications to add support for the MIME system.
+
+It is dual-licensed under the terms of the GNU Lesser General Public
+License, and the Academic Free License, version 2.0.
diff --git a/base/third_party/xdg_mime/README.chromium b/base/third_party/xdg_mime/README.chromium
new file mode 100644
index 0000000..8212752
--- /dev/null
+++ b/base/third_party/xdg_mime/README.chromium
@@ -0,0 +1,14 @@
+Name: xdg-mime
+URL: http://freedesktop.org
+License: Academic Free License version 2.0 or LGPL v2
+
+The code in this directory is synced from:
+git://anongit.freedesktop.org/xdg/xdgmime
+@ 2cdd8d36d7930d5a594587286cb1949ff62f7027 on 2012/08/06.
+
+In addition, we have the following patch(es):
+  - compile.patch: small tweaks to make the code compile.
+  - free_pointer_later.patch: small patch that fixes potential crash in
+      xdg_mime_get_mime_type_for_file() - use of pointer after being freed.
+  - function_casts.patch: fix bad function casts.
+  - Added a LICENSE file.
diff --git a/base/third_party/xdg_mime/compile.patch b/base/third_party/xdg_mime/compile.patch
new file mode 100644
index 0000000..cd055ed
--- /dev/null
+++ b/base/third_party/xdg_mime/compile.patch
@@ -0,0 +1,17 @@
+--- a/xdgmimecache.c
++++ b/xdgmimecache.c
+@@ -40,6 +40,8 @@
+ 
+ #include <netinet/in.h> /* for ntohl/ntohs */
+ 
++#define HAVE_MMAP 1
++
+ #ifdef HAVE_MMAP
+ #include <sys/mman.h>
+ #else
+@@ -1000,5 +1002,3 @@
+ 	    dump_glob_node (cache, offset + 20 * j, 0);
+   }
+ }
+-
+-
diff --git a/base/third_party/xdg_mime/free_pointer_later.patch b/base/third_party/xdg_mime/free_pointer_later.patch
new file mode 100644
index 0000000..7668761
--- /dev/null
+++ b/base/third_party/xdg_mime/free_pointer_later.patch
@@ -0,0 +1,22 @@
+diff --git a/base/third_party/xdg_mime/xdgmime.c b/base/third_party/xdg_mime/xdgmime.c
+index c7b16bb..6dc58c2 100644
+--- a/base/third_party/xdg_mime/xdgmime.c
++++ b/base/third_party/xdg_mime/xdgmime.c
+@@ -558,13 +558,13 @@ xdg_mime_get_mime_type_for_file (const char  *file_name,
+   mime_type = _xdg_mime_magic_lookup_data (global_magic, data, bytes_read, NULL,
+ 					   mime_types, n);
+ 
+-  free (data);
+   fclose (file);
+ 
+-  if (mime_type)
+-    return mime_type;
++  if (!mime_type)
++    mime_type = _xdg_binary_or_text_fallback(data, bytes_read);
+ 
+-  return _xdg_binary_or_text_fallback(data, bytes_read);
++  free (data);
++  return mime_type;
+ }
+ 
+ const char *
diff --git a/base/third_party/xdg_mime/function_casts.patch b/base/third_party/xdg_mime/function_casts.patch
new file mode 100644
index 0000000..37d38a7
--- /dev/null
+++ b/base/third_party/xdg_mime/function_casts.patch
@@ -0,0 +1,44 @@
+diff --git a/base/third_party/xdg_mime/xdgmime.c b/base/third_party/xdg_mime/xdgmime.c
+index 6dc58c253fa2..f340fcefabb4 100644
+--- a/base/third_party/xdg_mime/xdgmime.c
++++ b/base/third_party/xdg_mime/xdgmime.c
+@@ -136,7 +136,7 @@ xdg_dir_time_list_free (XdgDirTimeList *list)
+ }
+ 
+ static int
+-xdg_mime_init_from_directory (const char *directory)
++xdg_mime_init_from_directory (const char *directory, void *user_data)
+ {
+   char *file_name;
+   struct stat st;
+@@ -340,8 +340,9 @@ xdg_check_file (const char *file_path,
+ 
+ static int
+ xdg_check_dir (const char *directory,
+-	       int        *invalid_dir_list)
++	       void       *user_data)
+ {
++  int *invalid_dir_list = user_data;
+   int invalid, exists;
+   char *file_name;
+ 
+@@ -398,8 +399,7 @@ xdg_check_dirs (void)
+   for (list = dir_time_list; list; list = list->next)
+     list->checked = XDG_CHECKED_UNCHECKED;
+ 
+-  xdg_run_command_on_dirs ((XdgDirectoryFunc) xdg_check_dir,
+-			   &invalid_dir_list);
++  xdg_run_command_on_dirs (xdg_check_dir, &invalid_dir_list);
+ 
+   if (invalid_dir_list)
+     return TRUE;
+@@ -455,8 +455,7 @@ xdg_mime_init (void)
+       icon_list = _xdg_mime_icon_list_new ();
+       generic_icon_list = _xdg_mime_icon_list_new ();
+ 
+-      xdg_run_command_on_dirs ((XdgDirectoryFunc) xdg_mime_init_from_directory,
+-			       NULL);
++      xdg_run_command_on_dirs (xdg_mime_init_from_directory, NULL);
+ 
+       need_reread = FALSE;
+     }
diff --git a/base/third_party/xdg_mime/xdgmime.c b/base/third_party/xdg_mime/xdgmime.c
new file mode 100644
index 0000000..f340fce
--- /dev/null
+++ b/base/third_party/xdg_mime/xdgmime.c
@@ -0,0 +1,933 @@
+/* -*- mode: C; c-file-style: "gnu" -*- */
+/* xdgmime.c: XDG Mime Spec mime resolver.  Based on version 0.11 of the spec.
+ *
+ * More info can be found at http://www.freedesktop.org/standards/
+ * 
+ * Copyright (C) 2003,2004  Red Hat, Inc.
+ * Copyright (C) 2003,2004  Jonathan Blandford <jrb@alum.mit.edu>
+ *
+ * Licensed under the Academic Free License version 2.0
+ * Or under the following terms:
+ * 
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.	 See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 02111-1307, USA.
+ */
+
+#ifdef HAVE_CONFIG_H
+#include <config.h>
+#endif
+
+#include "xdgmime.h"
+#include "xdgmimeint.h"
+#include "xdgmimeglob.h"
+#include "xdgmimemagic.h"
+#include "xdgmimealias.h"
+#include "xdgmimeicon.h"
+#include "xdgmimeparent.h"
+#include "xdgmimecache.h"
+#include <stdio.h>
+#include <string.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <sys/time.h>
+#include <unistd.h>
+#include <assert.h>
+
+typedef struct XdgDirTimeList XdgDirTimeList;
+typedef struct XdgCallbackList XdgCallbackList;
+
+static int need_reread = TRUE;
+static time_t last_stat_time = 0;
+
+static XdgGlobHash *global_hash = NULL;
+static XdgMimeMagic *global_magic = NULL;
+static XdgAliasList *alias_list = NULL;
+static XdgParentList *parent_list = NULL;
+static XdgDirTimeList *dir_time_list = NULL;
+static XdgCallbackList *callback_list = NULL;
+static XdgIconList *icon_list = NULL;
+static XdgIconList *generic_icon_list = NULL;
+
+XdgMimeCache **_caches = NULL;
+static int n_caches = 0;
+
+const char xdg_mime_type_unknown[] = "application/octet-stream";
+const char xdg_mime_type_empty[] = "application/x-zerosize";
+const char xdg_mime_type_textplain[] = "text/plain";
+
+
+enum
+{
+  XDG_CHECKED_UNCHECKED,
+  XDG_CHECKED_VALID,
+  XDG_CHECKED_INVALID
+};
+
+struct XdgDirTimeList
+{
+  time_t mtime;
+  char *directory_name;
+  int checked;
+  XdgDirTimeList *next;
+};
+
+struct XdgCallbackList
+{
+  XdgCallbackList *next;
+  XdgCallbackList *prev;
+  int              callback_id;
+  XdgMimeCallback  callback;
+  void            *data;
+  XdgMimeDestroy   destroy;
+};
+
+/* Function called by xdg_run_command_on_dirs.  If it returns TRUE, further
+ * directories aren't looked at */
+typedef int (*XdgDirectoryFunc) (const char *directory,
+				 void       *user_data);
+
+static void
+xdg_dir_time_list_add (char   *file_name, 
+		       time_t  mtime)
+{
+  XdgDirTimeList *list;
+
+  for (list = dir_time_list; list; list = list->next) 
+    {
+      if (strcmp (list->directory_name, file_name) == 0)
+        {
+          free (file_name);
+          return;
+        }
+    }
+  
+  list = calloc (1, sizeof (XdgDirTimeList));
+  list->checked = XDG_CHECKED_UNCHECKED;
+  list->directory_name = file_name;
+  list->mtime = mtime;
+  list->next = dir_time_list;
+  dir_time_list = list;
+}
+ 
+static void
+xdg_dir_time_list_free (XdgDirTimeList *list)
+{
+  XdgDirTimeList *next;
+
+  while (list)
+    {
+      next = list->next;
+      free (list->directory_name);
+      free (list);
+      list = next;
+    }
+}
+
+static int
+xdg_mime_init_from_directory (const char *directory, void *user_data)
+{
+  char *file_name;
+  struct stat st;
+
+  assert (directory != NULL);
+
+  file_name = malloc (strlen (directory) + strlen ("/mime/mime.cache") + 1);
+  strcpy (file_name, directory); strcat (file_name, "/mime/mime.cache");
+  if (stat (file_name, &st) == 0)
+    {
+      XdgMimeCache *cache = _xdg_mime_cache_new_from_file (file_name);
+
+      if (cache != NULL)
+	{
+	  xdg_dir_time_list_add (file_name, st.st_mtime);
+
+	  _caches = realloc (_caches, sizeof (XdgMimeCache *) * (n_caches + 2));
+	  _caches[n_caches] = cache;
+          _caches[n_caches + 1] = NULL;
+	  n_caches++;
+
+	  return FALSE;
+	}
+    }
+  free (file_name);
+
+  file_name = malloc (strlen (directory) + strlen ("/mime/globs2") + 1);
+  strcpy (file_name, directory); strcat (file_name, "/mime/globs2");
+  if (stat (file_name, &st) == 0)
+    {
+      _xdg_mime_glob_read_from_file (global_hash, file_name, TRUE);
+      xdg_dir_time_list_add (file_name, st.st_mtime);
+    }
+  else
+    {
+      free (file_name);
+      file_name = malloc (strlen (directory) + strlen ("/mime/globs") + 1);
+      strcpy (file_name, directory); strcat (file_name, "/mime/globs");
+      if (stat (file_name, &st) == 0)
+        {
+          _xdg_mime_glob_read_from_file (global_hash, file_name, FALSE);
+          xdg_dir_time_list_add (file_name, st.st_mtime);
+        }
+      else
+        {
+          free (file_name);
+        }
+    }
+
+  file_name = malloc (strlen (directory) + strlen ("/mime/magic") + 1);
+  strcpy (file_name, directory); strcat (file_name, "/mime/magic");
+  if (stat (file_name, &st) == 0)
+    {
+      _xdg_mime_magic_read_from_file (global_magic, file_name);
+      xdg_dir_time_list_add (file_name, st.st_mtime);
+    }
+  else
+    {
+      free (file_name);
+    }
+
+  file_name = malloc (strlen (directory) + strlen ("/mime/aliases") + 1);
+  strcpy (file_name, directory); strcat (file_name, "/mime/aliases");
+  _xdg_mime_alias_read_from_file (alias_list, file_name);
+  free (file_name);
+
+  file_name = malloc (strlen (directory) + strlen ("/mime/subclasses") + 1);
+  strcpy (file_name, directory); strcat (file_name, "/mime/subclasses");
+  _xdg_mime_parent_read_from_file (parent_list, file_name);
+  free (file_name);
+
+  file_name = malloc (strlen (directory) + strlen ("/mime/icons") + 1);
+  strcpy (file_name, directory); strcat (file_name, "/mime/icons");
+  _xdg_mime_icon_read_from_file (icon_list, file_name);
+  free (file_name);
+
+  file_name = malloc (strlen (directory) + strlen ("/mime/generic-icons") + 1);
+  strcpy (file_name, directory); strcat (file_name, "/mime/generic-icons");
+  _xdg_mime_icon_read_from_file (generic_icon_list, file_name);
+  free (file_name);
+
+  return FALSE; /* Keep processing */
+}
+
+/* Runs a command on all the directories in the search path */
+static void
+xdg_run_command_on_dirs (XdgDirectoryFunc  func,
+			 void             *user_data)
+{
+  const char *xdg_data_home;
+  const char *xdg_data_dirs;
+  const char *ptr;
+
+  xdg_data_home = getenv ("XDG_DATA_HOME");
+  if (xdg_data_home)
+    {
+      if ((func) (xdg_data_home, user_data))
+	return;
+    }
+  else
+    {
+      const char *home;
+
+      home = getenv ("HOME");
+      if (home != NULL)
+	{
+	  char *guessed_xdg_home;
+	  int stop_processing;
+
+	  guessed_xdg_home = malloc (strlen (home) + strlen ("/.local/share/") + 1);
+	  strcpy (guessed_xdg_home, home);
+	  strcat (guessed_xdg_home, "/.local/share/");
+	  stop_processing = (func) (guessed_xdg_home, user_data);
+	  free (guessed_xdg_home);
+
+	  if (stop_processing)
+	    return;
+	}
+    }
+
+  xdg_data_dirs = getenv ("XDG_DATA_DIRS");
+  if (xdg_data_dirs == NULL)
+    xdg_data_dirs = "/usr/local/share/:/usr/share/";
+
+  ptr = xdg_data_dirs;
+
+  while (*ptr != '\000')
+    {
+      const char *end_ptr;
+      char *dir;
+      int len;
+      int stop_processing;
+
+      end_ptr = ptr;
+      while (*end_ptr != ':' && *end_ptr != '\000')
+	end_ptr ++;
+
+      if (end_ptr == ptr)
+	{
+	  ptr++;
+	  continue;
+	}
+
+      if (*end_ptr == ':')
+	len = end_ptr - ptr;
+      else
+	len = end_ptr - ptr + 1;
+      dir = malloc (len + 1);
+      strncpy (dir, ptr, len);
+      dir[len] = '\0';
+      stop_processing = (func) (dir, user_data);
+      free (dir);
+
+      if (stop_processing)
+	return;
+
+      ptr = end_ptr;
+    }
+}
+
+/* Checks file_path to make sure it has the same mtime as last time it was
+ * checked.  If it has a different mtime, or if the file doesn't exist, it
+ * returns FALSE.
+ *
+ * FIXME: This doesn't protect against permission changes.
+ */
+static int
+xdg_check_file (const char *file_path,
+                int        *exists)
+{
+  struct stat st;
+
+  /* If the file exists */
+  if (stat (file_path, &st) == 0)
+    {
+      XdgDirTimeList *list;
+
+      if (exists)
+        *exists = TRUE;
+
+      for (list = dir_time_list; list; list = list->next)
+	{
+	  if (! strcmp (list->directory_name, file_path))
+	    {
+	      if (st.st_mtime == list->mtime)
+		list->checked = XDG_CHECKED_VALID;
+	      else 
+		list->checked = XDG_CHECKED_INVALID;
+
+	      return (list->checked != XDG_CHECKED_VALID);
+	    }
+	}
+      return TRUE;
+    }
+
+  if (exists)
+    *exists = FALSE;
+
+  return FALSE;
+}
+
+static int
+xdg_check_dir (const char *directory,
+	       void       *user_data)
+{
+  int *invalid_dir_list = user_data;
+  int invalid, exists;
+  char *file_name;
+
+  assert (directory != NULL);
+
+  /* Check the mime.cache file */
+  file_name = malloc (strlen (directory) + strlen ("/mime/mime.cache") + 1);
+  strcpy (file_name, directory); strcat (file_name, "/mime/mime.cache");
+  invalid = xdg_check_file (file_name, &exists);
+  free (file_name);
+  if (invalid)
+    {
+      *invalid_dir_list = TRUE;
+      return TRUE;
+    }
+  else if (exists)
+    {
+      return FALSE;
+    }
+
+  /* Check the globs file */
+  file_name = malloc (strlen (directory) + strlen ("/mime/globs") + 1);
+  strcpy (file_name, directory); strcat (file_name, "/mime/globs");
+  invalid = xdg_check_file (file_name, NULL);
+  free (file_name);
+  if (invalid)
+    {
+      *invalid_dir_list = TRUE;
+      return TRUE;
+    }
+
+  /* Check the magic file */
+  file_name = malloc (strlen (directory) + strlen ("/mime/magic") + 1);
+  strcpy (file_name, directory); strcat (file_name, "/mime/magic");
+  invalid = xdg_check_file (file_name, NULL);
+  free (file_name);
+  if (invalid)
+    {
+      *invalid_dir_list = TRUE;
+      return TRUE;
+    }
+
+  return FALSE; /* Keep processing */
+}
+
+/* Walks through all the mime files stat()ing them to see if they've changed.
+ * Returns TRUE if they have. */
+static int
+xdg_check_dirs (void)
+{
+  XdgDirTimeList *list;
+  int invalid_dir_list = FALSE;
+
+  for (list = dir_time_list; list; list = list->next)
+    list->checked = XDG_CHECKED_UNCHECKED;
+
+  xdg_run_command_on_dirs (xdg_check_dir, &invalid_dir_list);
+
+  if (invalid_dir_list)
+    return TRUE;
+
+  for (list = dir_time_list; list; list = list->next)
+    {
+      if (list->checked != XDG_CHECKED_VALID)
+	return TRUE;
+    }
+
+  return FALSE;
+}
+
+/* We want to avoid stat()ing on every single mime call, so we only look for
+ * newer files every 5 seconds.  This will return TRUE if we need to reread the
+ * mime data from disk.
+ */
+static int
+xdg_check_time_and_dirs (void)
+{
+  struct timeval tv;
+  time_t current_time;
+  int retval = FALSE;
+
+  gettimeofday (&tv, NULL);
+  current_time = tv.tv_sec;
+
+  if (current_time >= last_stat_time + 5)
+    {
+      retval = xdg_check_dirs ();
+      last_stat_time = current_time;
+    }
+
+  return retval;
+}
+
+/* Called in every public function.  It reloads the hash function if need be.
+ */
+static void
+xdg_mime_init (void)
+{
+  if (xdg_check_time_and_dirs ())
+    {
+      xdg_mime_shutdown ();
+    }
+
+  if (need_reread)
+    {
+      global_hash = _xdg_glob_hash_new ();
+      global_magic = _xdg_mime_magic_new ();
+      alias_list = _xdg_mime_alias_list_new ();
+      parent_list = _xdg_mime_parent_list_new ();
+      icon_list = _xdg_mime_icon_list_new ();
+      generic_icon_list = _xdg_mime_icon_list_new ();
+
+      xdg_run_command_on_dirs (xdg_mime_init_from_directory, NULL);
+
+      need_reread = FALSE;
+    }
+}
+
+const char *
+xdg_mime_get_mime_type_for_data (const void *data,
+				 size_t      len,
+				 int        *result_prio)
+{
+  const char *mime_type;
+
+  if (len == 0)
+    {
+      *result_prio = 100;
+      return XDG_MIME_TYPE_EMPTY;
+    }
+
+  xdg_mime_init ();
+
+  if (_caches)
+    mime_type = _xdg_mime_cache_get_mime_type_for_data (data, len, result_prio);
+  else
+    mime_type = _xdg_mime_magic_lookup_data (global_magic, data, len, result_prio, NULL, 0);
+
+  if (mime_type)
+    return mime_type;
+
+  return _xdg_binary_or_text_fallback(data, len);
+}
+
+const char *
+xdg_mime_get_mime_type_for_file (const char  *file_name,
+                                 struct stat *statbuf)
+{
+  const char *mime_type;
+  /* currently, only a few globs occur twice, and none
+   * more often, so 5 seems plenty.
+   */
+  const char *mime_types[5];
+  FILE *file;
+  unsigned char *data;
+  int max_extent;
+  int bytes_read;
+  struct stat buf;
+  const char *base_name;
+  int n;
+
+  if (file_name == NULL)
+    return NULL;
+  if (! _xdg_utf8_validate (file_name))
+    return NULL;
+
+  xdg_mime_init ();
+
+  if (_caches)
+    return _xdg_mime_cache_get_mime_type_for_file (file_name, statbuf);
+
+  base_name = _xdg_get_base_name (file_name);
+  n = _xdg_glob_hash_lookup_file_name (global_hash, base_name, mime_types, 5);
+
+  if (n == 1)
+    return mime_types[0];
+
+  if (!statbuf)
+    {
+      if (stat (file_name, &buf) != 0)
+	return XDG_MIME_TYPE_UNKNOWN;
+
+      statbuf = &buf;
+    }
+
+  if (!S_ISREG (statbuf->st_mode))
+    return XDG_MIME_TYPE_UNKNOWN;
+
+  /* FIXME: Need to make sure that max_extent isn't totally broken.  This could
+   * be large and need getting from a stream instead of just reading it all
+   * in. */
+  max_extent = _xdg_mime_magic_get_buffer_extents (global_magic);
+  data = malloc (max_extent);
+  if (data == NULL)
+    return XDG_MIME_TYPE_UNKNOWN;
+        
+  file = fopen (file_name, "r");
+  if (file == NULL)
+    {
+      free (data);
+      return XDG_MIME_TYPE_UNKNOWN;
+    }
+
+  bytes_read = fread (data, 1, max_extent, file);
+  if (ferror (file))
+    {
+      free (data);
+      fclose (file);
+      return XDG_MIME_TYPE_UNKNOWN;
+    }
+
+  mime_type = _xdg_mime_magic_lookup_data (global_magic, data, bytes_read, NULL,
+					   mime_types, n);
+
+  fclose (file);
+
+  if (!mime_type)
+    mime_type = _xdg_binary_or_text_fallback(data, bytes_read);
+
+  free (data);
+  return mime_type;
+}
+
+const char *
+xdg_mime_get_mime_type_from_file_name (const char *file_name)
+{
+  const char *mime_type;
+
+  xdg_mime_init ();
+
+  if (_caches)
+    return _xdg_mime_cache_get_mime_type_from_file_name (file_name);
+
+  if (_xdg_glob_hash_lookup_file_name (global_hash, file_name, &mime_type, 1))
+    return mime_type;
+  else
+    return XDG_MIME_TYPE_UNKNOWN;
+}
+
+int
+xdg_mime_get_mime_types_from_file_name (const char *file_name,
+					const char  *mime_types[],
+					int          n_mime_types)
+{
+  xdg_mime_init ();
+  
+  if (_caches)
+    return _xdg_mime_cache_get_mime_types_from_file_name (file_name, mime_types, n_mime_types);
+  
+  return _xdg_glob_hash_lookup_file_name (global_hash, file_name, mime_types, n_mime_types);
+}
+
+int
+xdg_mime_is_valid_mime_type (const char *mime_type)
+{
+  /* FIXME: We should make this a better test
+   */
+  return _xdg_utf8_validate (mime_type);
+}
+
+void
+xdg_mime_shutdown (void)
+{
+  XdgCallbackList *list;
+
+  /* FIXME: Need to make this (and the whole library) thread safe */
+  if (dir_time_list)
+    {
+      xdg_dir_time_list_free (dir_time_list);
+      dir_time_list = NULL;
+    }
+	
+  if (global_hash)
+    {
+      _xdg_glob_hash_free (global_hash);
+      global_hash = NULL;
+    }
+  if (global_magic)
+    {
+      _xdg_mime_magic_free (global_magic);
+      global_magic = NULL;
+    }
+
+  if (alias_list)
+    {
+      _xdg_mime_alias_list_free (alias_list);
+      alias_list = NULL;
+    }
+
+  if (parent_list)
+    {
+      _xdg_mime_parent_list_free (parent_list);
+      parent_list = NULL;
+    }
+
+  if (icon_list)
+    {
+      _xdg_mime_icon_list_free (icon_list);
+      icon_list = NULL;
+    }
+
+  if (generic_icon_list)
+    {
+      _xdg_mime_icon_list_free (generic_icon_list);
+      generic_icon_list = NULL;
+    }
+  
+  if (_caches)
+    {
+      int i;
+
+      for (i = 0; i < n_caches; i++)
+        _xdg_mime_cache_unref (_caches[i]);
+      free (_caches);
+      _caches = NULL;
+      n_caches = 0;
+    }
+
+  for (list = callback_list; list; list = list->next)
+    (list->callback) (list->data);
+
+  need_reread = TRUE;
+}
+
+int
+xdg_mime_get_max_buffer_extents (void)
+{
+  xdg_mime_init ();
+  
+  if (_caches)
+    return _xdg_mime_cache_get_max_buffer_extents ();
+
+  return _xdg_mime_magic_get_buffer_extents (global_magic);
+}
+
+const char *
+_xdg_mime_unalias_mime_type (const char *mime_type)
+{
+  const char *lookup;
+
+  if (_caches)
+    return _xdg_mime_cache_unalias_mime_type (mime_type);
+
+  if ((lookup = _xdg_mime_alias_list_lookup (alias_list, mime_type)) != NULL)
+    return lookup;
+
+  return mime_type;
+}
+
+const char *
+xdg_mime_unalias_mime_type (const char *mime_type)
+{
+  xdg_mime_init ();
+
+  return _xdg_mime_unalias_mime_type (mime_type);
+}
+
+int
+_xdg_mime_mime_type_equal (const char *mime_a,
+			   const char *mime_b)
+{
+  const char *unalias_a, *unalias_b;
+
+  unalias_a = _xdg_mime_unalias_mime_type (mime_a);
+  unalias_b = _xdg_mime_unalias_mime_type (mime_b);
+
+  if (strcmp (unalias_a, unalias_b) == 0)
+    return 1;
+
+  return 0;
+}
+
+int
+xdg_mime_mime_type_equal (const char *mime_a,
+			  const char *mime_b)
+{
+  xdg_mime_init ();
+
+  return _xdg_mime_mime_type_equal (mime_a, mime_b);
+}
+
+int
+xdg_mime_media_type_equal (const char *mime_a,
+			   const char *mime_b)
+{
+  char *sep;
+
+  sep = strchr (mime_a, '/');
+  
+  if (sep && strncmp (mime_a, mime_b, sep - mime_a + 1) == 0)
+    return 1;
+
+  return 0;
+}
+
+#if 1
+static int
+xdg_mime_is_super_type (const char *mime)
+{
+  int length;
+  const char *type;
+
+  length = strlen (mime);
+  type = &(mime[length - 2]);
+
+  if (strcmp (type, "/*") == 0)
+    return 1;
+
+  return 0;
+}
+#endif
+
+int
+_xdg_mime_mime_type_subclass (const char *mime,
+			      const char *base)
+{
+  const char *umime, *ubase;
+  const char **parents;
+
+  if (_caches)
+    return _xdg_mime_cache_mime_type_subclass (mime, base);
+
+  umime = _xdg_mime_unalias_mime_type (mime);
+  ubase = _xdg_mime_unalias_mime_type (base);
+
+  if (strcmp (umime, ubase) == 0)
+    return 1;
+
+#if 1  
+  /* Handle supertypes */
+  if (xdg_mime_is_super_type (ubase) &&
+      xdg_mime_media_type_equal (umime, ubase))
+    return 1;
+#endif
+
+  /*  Handle special cases text/plain and application/octet-stream */
+  if (strcmp (ubase, "text/plain") == 0 && 
+      strncmp (umime, "text/", 5) == 0)
+    return 1;
+
+  if (strcmp (ubase, "application/octet-stream") == 0)
+    return 1;
+  
+  parents = _xdg_mime_parent_list_lookup (parent_list, umime);
+  for (; parents && *parents; parents++)
+    {
+      if (_xdg_mime_mime_type_subclass (*parents, ubase))
+	return 1;
+    }
+
+  return 0;
+}
+
+int
+xdg_mime_mime_type_subclass (const char *mime,
+			     const char *base)
+{
+  xdg_mime_init ();
+
+  return _xdg_mime_mime_type_subclass (mime, base);
+}
+
+char **
+xdg_mime_list_mime_parents (const char *mime)
+{
+  const char **parents;
+  char **result;
+  int i, n;
+
+  if (_caches)
+    return _xdg_mime_cache_list_mime_parents (mime);
+
+  parents = xdg_mime_get_mime_parents (mime);
+
+  if (!parents)
+    return NULL;
+
+  for (i = 0; parents[i]; i++) ;
+  
+  n = (i + 1) * sizeof (char *);
+  result = (char **) malloc (n);
+  memcpy (result, parents, n);
+
+  return result;
+}
+
+const char **
+xdg_mime_get_mime_parents (const char *mime)
+{
+  const char *umime;
+
+  xdg_mime_init ();
+
+  umime = _xdg_mime_unalias_mime_type (mime);
+
+  return _xdg_mime_parent_list_lookup (parent_list, umime);
+}
+
+void 
+xdg_mime_dump (void)
+{
+  xdg_mime_init();
+
+  printf ("*** ALIASES ***\n\n");
+  _xdg_mime_alias_list_dump (alias_list);
+  printf ("\n*** PARENTS ***\n\n");
+  _xdg_mime_parent_list_dump (parent_list);
+  printf ("\n*** CACHE ***\n\n");
+  _xdg_glob_hash_dump (global_hash);
+  printf ("\n*** GLOBS ***\n\n");
+  _xdg_glob_hash_dump (global_hash);
+  printf ("\n*** GLOBS REVERSE TREE ***\n\n");
+  _xdg_mime_cache_glob_dump ();
+}
+
+
+/* Registers a function to be called every time the mime database reloads its files
+ */
+int
+xdg_mime_register_reload_callback (XdgMimeCallback  callback,
+				   void            *data,
+				   XdgMimeDestroy   destroy)
+{
+  XdgCallbackList *list_el;
+  static int callback_id = 1;
+
+  /* Make a new list element */
+  list_el = calloc (1, sizeof (XdgCallbackList));
+  list_el->callback_id = callback_id;
+  list_el->callback = callback;
+  list_el->data = data;
+  list_el->destroy = destroy;
+  list_el->next = callback_list;
+  if (list_el->next)
+    list_el->next->prev = list_el;
+
+  callback_list = list_el;
+  callback_id ++;
+
+  return callback_id - 1;
+}
+
+void
+xdg_mime_remove_callback (int callback_id)
+{
+  XdgCallbackList *list;
+
+  for (list = callback_list; list; list = list->next)
+    {
+      if (list->callback_id == callback_id)
+	{
+	  if (list->next)
+	    list->next = list->prev;
+
+	  if (list->prev)
+	    list->prev->next = list->next;
+	  else
+	    callback_list = list->next;
+
+	  /* invoke the destroy handler */
+	  (list->destroy) (list->data);
+	  free (list);
+	  return;
+	}
+    }
+}
+
+const char *
+xdg_mime_get_icon (const char *mime)
+{
+  xdg_mime_init ();
+  
+  if (_caches)
+    return _xdg_mime_cache_get_icon (mime);
+
+  return _xdg_mime_icon_list_lookup (icon_list, mime);
+}
+
+const char *
+xdg_mime_get_generic_icon (const char *mime)
+{
+  xdg_mime_init ();
+  
+  if (_caches)
+    return _xdg_mime_cache_get_generic_icon (mime);
+
+  return _xdg_mime_icon_list_lookup (generic_icon_list, mime);
+}
diff --git a/base/third_party/xdg_mime/xdgmime.h b/base/third_party/xdg_mime/xdgmime.h
new file mode 100644
index 0000000..6a34edf
--- /dev/null
+++ b/base/third_party/xdg_mime/xdgmime.h
@@ -0,0 +1,133 @@
+/* -*- mode: C; c-file-style: "gnu" -*- */
+/* xdgmime.h: XDG Mime Spec mime resolver.  Based on version 0.11 of the spec.
+ *
+ * More info can be found at http://www.freedesktop.org/standards/
+ * 
+ * Copyright (C) 2003  Red Hat, Inc.
+ * Copyright (C) 2003  Jonathan Blandford <jrb@alum.mit.edu>
+ *
+ * Licensed under the Academic Free License version 2.0
+ * Or under the following terms:
+ * 
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.	 See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 02111-1307, USA.
+ */
+
+
+#ifndef __XDG_MIME_H__
+#define __XDG_MIME_H__
+
+#include <stdlib.h>
+#include <sys/stat.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+
+#ifdef XDG_PREFIX
+#define XDG_ENTRY(func) _XDG_ENTRY2(XDG_PREFIX,func)
+#define _XDG_ENTRY2(prefix,func) _XDG_ENTRY3(prefix,func)
+#define _XDG_ENTRY3(prefix,func) prefix##_##func
+
+#define XDG_RESERVED_ENTRY(func) _XDG_RESERVED_ENTRY2(XDG_PREFIX,func)
+#define _XDG_RESERVED_ENTRY2(prefix,func) _XDG_RESERVED_ENTRY3(prefix,func)
+#define _XDG_RESERVED_ENTRY3(prefix,func) _##prefix##_##func
+#endif
+
+typedef void (*XdgMimeCallback) (void *user_data);
+typedef void (*XdgMimeDestroy)  (void *user_data);
+
+  
+#ifdef XDG_PREFIX
+#define xdg_mime_get_mime_type_for_data       XDG_ENTRY(get_mime_type_for_data)
+#define xdg_mime_get_mime_type_for_file       XDG_ENTRY(get_mime_type_for_file)
+#define xdg_mime_get_mime_type_from_file_name XDG_ENTRY(get_mime_type_from_file_name)
+#define xdg_mime_get_mime_types_from_file_name XDG_ENTRY(get_mime_types_from_file_name)
+#define xdg_mime_is_valid_mime_type           XDG_ENTRY(is_valid_mime_type)
+#define xdg_mime_mime_type_equal              XDG_ENTRY(mime_type_equal)
+#define xdg_mime_media_type_equal             XDG_ENTRY(media_type_equal)
+#define xdg_mime_mime_type_subclass           XDG_ENTRY(mime_type_subclass)
+#define xdg_mime_get_mime_parents             XDG_ENTRY(get_mime_parents)
+#define xdg_mime_list_mime_parents            XDG_ENTRY(list_mime_parents)
+#define xdg_mime_unalias_mime_type            XDG_ENTRY(unalias_mime_type)
+#define xdg_mime_get_max_buffer_extents       XDG_ENTRY(get_max_buffer_extents)
+#define xdg_mime_shutdown                     XDG_ENTRY(shutdown)
+#define xdg_mime_dump                         XDG_ENTRY(dump)
+#define xdg_mime_register_reload_callback     XDG_ENTRY(register_reload_callback)
+#define xdg_mime_remove_callback              XDG_ENTRY(remove_callback)
+#define xdg_mime_type_unknown                 XDG_ENTRY(type_unknown)
+#define xdg_mime_type_empty                   XDG_ENTRY(type_empty)
+#define xdg_mime_type_textplain               XDG_ENTRY(type_textplain)
+#define xdg_mime_get_icon                     XDG_ENTRY(get_icon)
+#define xdg_mime_get_generic_icon             XDG_ENTRY(get_generic_icon)
+
+#define _xdg_mime_mime_type_equal             XDG_RESERVED_ENTRY(mime_type_equal)
+#define _xdg_mime_mime_type_subclass          XDG_RESERVED_ENTRY(mime_type_subclass)
+#define _xdg_mime_unalias_mime_type           XDG_RESERVED_ENTRY(unalias_mime_type)  
+#endif
+
+extern const char xdg_mime_type_unknown[];
+extern const char xdg_mime_type_empty[];
+extern const char xdg_mime_type_textplain[];
+#define XDG_MIME_TYPE_UNKNOWN xdg_mime_type_unknown
+#define XDG_MIME_TYPE_EMPTY xdg_mime_type_empty
+#define XDG_MIME_TYPE_TEXTPLAIN xdg_mime_type_textplain
+
+const char  *xdg_mime_get_mime_type_for_data       (const void *data,
+						    size_t      len,
+						    int        *result_prio);
+const char  *xdg_mime_get_mime_type_for_file       (const char *file_name,
+                                                    struct stat *statbuf);
+const char  *xdg_mime_get_mime_type_from_file_name (const char *file_name);
+int          xdg_mime_get_mime_types_from_file_name(const char *file_name,
+						    const char *mime_types[],
+						    int         n_mime_types);
+int          xdg_mime_is_valid_mime_type           (const char *mime_type);
+int          xdg_mime_mime_type_equal              (const char *mime_a,
+						    const char *mime_b);
+int          xdg_mime_media_type_equal             (const char *mime_a,
+						    const char *mime_b);
+int          xdg_mime_mime_type_subclass           (const char *mime_a,
+						    const char *mime_b);
+  /* xdg_mime_get_mime_parents() is deprecated since it does
+   * not work correctly with caches. Use xdg_mime_list_parents() 
+   * instead, but notice that that function expects you to free
+   * the array it returns. 
+   */
+const char **xdg_mime_get_mime_parents		   (const char *mime);
+char **      xdg_mime_list_mime_parents		   (const char *mime);
+const char  *xdg_mime_unalias_mime_type		   (const char *mime);
+const char  *xdg_mime_get_icon                     (const char *mime);
+const char  *xdg_mime_get_generic_icon             (const char *mime);
+int          xdg_mime_get_max_buffer_extents       (void);
+void         xdg_mime_shutdown                     (void);
+void         xdg_mime_dump                         (void);
+int          xdg_mime_register_reload_callback     (XdgMimeCallback  callback,
+						    void            *data,
+						    XdgMimeDestroy   destroy);
+void         xdg_mime_remove_callback              (int              callback_id);
+
+   /* Private versions of functions that don't call xdg_mime_init () */
+int          _xdg_mime_mime_type_equal             (const char *mime_a,
+						    const char *mime_b);
+int          _xdg_mime_mime_type_subclass          (const char *mime,
+						    const char *base);
+const char  *_xdg_mime_unalias_mime_type           (const char *mime);
+
+
+#ifdef __cplusplus
+}
+#endif /* __cplusplus */
+#endif /* __XDG_MIME_H__ */
diff --git a/base/third_party/xdg_mime/xdgmimealias.c b/base/third_party/xdg_mime/xdgmimealias.c
new file mode 100644
index 0000000..07d89eb
--- /dev/null
+++ b/base/third_party/xdg_mime/xdgmimealias.c
@@ -0,0 +1,184 @@
+/* -*- mode: C; c-file-style: "gnu" -*- */
+/* xdgmimealias.c: Private file.  Datastructure for storing the aliases.
+ *
+ * More info can be found at http://www.freedesktop.org/standards/
+ *
+ * Copyright (C) 2004  Red Hat, Inc.
+ * Copyright (C) 2004  Matthias Clasen <mclasen@redhat.com>
+ *
+ * Licensed under the Academic Free License version 2.0
+ * Or under the following terms:
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.	 See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 02111-1307, USA.
+ */
+
+#ifdef HAVE_CONFIG_H
+#include <config.h>
+#endif
+
+#include "xdgmimealias.h"
+#include "xdgmimeint.h"
+#include <stdlib.h>
+#include <stdio.h>
+#include <assert.h>
+#include <string.h>
+#include <fnmatch.h>
+
+#ifndef	FALSE
+#define	FALSE	(0)
+#endif
+
+#ifndef	TRUE
+#define	TRUE	(!FALSE)
+#endif
+
+typedef struct XdgAlias XdgAlias;
+
+struct XdgAlias 
+{
+  char *alias;
+  char *mime_type;
+};
+
+struct XdgAliasList
+{
+  struct XdgAlias *aliases;
+  int n_aliases;
+};
+
+XdgAliasList *
+_xdg_mime_alias_list_new (void)
+{
+  XdgAliasList *list;
+
+  list = malloc (sizeof (XdgAliasList));
+
+  list->aliases = NULL;
+  list->n_aliases = 0;
+
+  return list;
+}
+
+void         
+_xdg_mime_alias_list_free (XdgAliasList *list)
+{
+  int i;
+
+  if (list->aliases)
+    {
+      for (i = 0; i < list->n_aliases; i++)
+	{
+	  free (list->aliases[i].alias);
+	  free (list->aliases[i].mime_type);
+	}
+      free (list->aliases);
+    }
+  free (list);
+}
+
+static int
+alias_entry_cmp (const void *v1, const void *v2)
+{
+  return strcmp (((XdgAlias *)v1)->alias, ((XdgAlias *)v2)->alias);
+}
+
+const char  *
+_xdg_mime_alias_list_lookup (XdgAliasList *list,
+			     const char   *alias)
+{
+  XdgAlias *entry;
+  XdgAlias key;
+
+  if (list->n_aliases > 0)
+    {
+      key.alias = (char *)alias;
+      key.mime_type = NULL;
+
+      entry = bsearch (&key, list->aliases, list->n_aliases,
+		       sizeof (XdgAlias), alias_entry_cmp);
+      if (entry)
+        return entry->mime_type;
+    }
+
+  return NULL;
+}
+
+void
+_xdg_mime_alias_read_from_file (XdgAliasList *list,
+				const char   *file_name)
+{
+  FILE *file;
+  char line[255];
+  int alloc;
+
+  file = fopen (file_name, "r");
+
+  if (file == NULL)
+    return;
+
+  /* FIXME: Not UTF-8 safe.  Doesn't work if lines are greater than 255 chars.
+   * Blah */
+  alloc = list->n_aliases + 16;
+  list->aliases = realloc (list->aliases, alloc * sizeof (XdgAlias));
+  while (fgets (line, 255, file) != NULL)
+    {
+      char *sep;
+      if (line[0] == '#')
+	continue;
+
+      sep = strchr (line, ' ');
+      if (sep == NULL)
+	continue;
+      *(sep++) = '\000';
+      sep[strlen (sep) -1] = '\000';
+      if (list->n_aliases == alloc)
+	{
+	  alloc <<= 1;
+	  list->aliases = realloc (list->aliases, 
+				   alloc * sizeof (XdgAlias));
+	}
+      list->aliases[list->n_aliases].alias = strdup (line);
+      list->aliases[list->n_aliases].mime_type = strdup (sep);
+      list->n_aliases++;
+    }
+  list->aliases = realloc (list->aliases, 
+			   list->n_aliases * sizeof (XdgAlias));
+
+  fclose (file);  
+  
+  if (list->n_aliases > 1)
+    qsort (list->aliases, list->n_aliases, 
+           sizeof (XdgAlias), alias_entry_cmp);
+}
+
+
+void
+_xdg_mime_alias_list_dump (XdgAliasList *list)
+{
+  int i;
+
+  if (list->aliases)
+    {
+      for (i = 0; i < list->n_aliases; i++)
+	{
+	  printf ("%s %s\n", 
+		  list->aliases[i].alias,
+		  list->aliases[i].mime_type);
+	}
+    }
+}
+
+
diff --git a/base/third_party/xdg_mime/xdgmimealias.h b/base/third_party/xdg_mime/xdgmimealias.h
new file mode 100644
index 0000000..3c28012
--- /dev/null
+++ b/base/third_party/xdg_mime/xdgmimealias.h
@@ -0,0 +1,51 @@
+/* -*- mode: C; c-file-style: "gnu" -*- */
+/* xdgmimealias.h: Private file.  Datastructure for storing the aliases.
+ *
+ * More info can be found at http://www.freedesktop.org/standards/
+ *
+ * Copyright (C) 2004  Red Hat, Inc.
+ * Copyright (C) 200  Matthias Clasen <mclasen@redhat.com>
+ *
+ * Licensed under the Academic Free License version 2.0
+ * Or under the following terms:
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.	 See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 02111-1307, USA.
+ */
+
+#ifndef __XDG_MIME_ALIAS_H__
+#define __XDG_MIME_ALIAS_H__
+
+#include "xdgmime.h"
+
+typedef struct XdgAliasList XdgAliasList;
+
+#ifdef XDG_PREFIX
+#define _xdg_mime_alias_read_from_file        XDG_RESERVED_ENTRY(alias_read_from_file)
+#define _xdg_mime_alias_list_new              XDG_RESERVED_ENTRY(alias_list_new)
+#define _xdg_mime_alias_list_free             XDG_RESERVED_ENTRY(alias_list_free)
+#define _xdg_mime_alias_list_lookup           XDG_RESERVED_ENTRY(alias_list_lookup)
+#define _xdg_mime_alias_list_dump             XDG_RESERVED_ENTRY(alias_list_dump)
+#endif
+
+void          _xdg_mime_alias_read_from_file (XdgAliasList *list,
+					      const char   *file_name);
+XdgAliasList *_xdg_mime_alias_list_new       (void);
+void          _xdg_mime_alias_list_free      (XdgAliasList *list);
+const char   *_xdg_mime_alias_list_lookup    (XdgAliasList *list,
+					      const char  *alias);
+void          _xdg_mime_alias_list_dump      (XdgAliasList *list);
+
+#endif /* __XDG_MIME_ALIAS_H__ */
diff --git a/base/third_party/xdg_mime/xdgmimecache.c b/base/third_party/xdg_mime/xdgmimecache.c
new file mode 100644
index 0000000..ddb8754
--- /dev/null
+++ b/base/third_party/xdg_mime/xdgmimecache.c
@@ -0,0 +1,1069 @@
+/* -*- mode: C; c-file-style: "gnu" -*- */
+/* xdgmimealias.c: Private file.  mmappable caches for mime data
+ *
+ * More info can be found at http://www.freedesktop.org/standards/
+ *
+ * Copyright (C) 2005  Matthias Clasen <mclasen@redhat.com>
+ *
+ * Licensed under the Academic Free License version 2.0
+ * Or under the following terms:
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.	 See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 02111-1307, USA.
+ */
+
+#ifdef HAVE_CONFIG_H
+#include <config.h>
+#endif
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include <fcntl.h>
+#include <unistd.h>
+#include <fnmatch.h>
+#include <assert.h>
+
+#include <netinet/in.h> /* for ntohl/ntohs */
+
+#define HAVE_MMAP 1
+
+#ifdef HAVE_MMAP
+#include <sys/mman.h>
+#else
+#warning Building xdgmime without MMAP support. Binary "mime.cache" files will not be used.
+#endif
+
+#include <sys/stat.h>
+#include <sys/types.h>
+
+#include "xdgmimecache.h"
+#include "xdgmimeint.h"
+
+#ifndef MAX
+#define MAX(a,b) ((a) > (b) ? (a) : (b))
+#endif
+
+#ifndef	FALSE
+#define	FALSE	(0)
+#endif
+
+#ifndef	TRUE
+#define	TRUE	(!FALSE)
+#endif
+
+#ifndef _O_BINARY
+#define _O_BINARY 0
+#endif
+
+#ifndef MAP_FAILED
+#define MAP_FAILED ((void *) -1)
+#endif
+
+#define MAJOR_VERSION 1
+#define MINOR_VERSION_MIN 1
+#define MINOR_VERSION_MAX 2
+
+struct _XdgMimeCache
+{
+  int ref_count;
+  int minor;
+
+  size_t  size;
+  char   *buffer;
+};
+
+#define GET_UINT16(cache,offset) (ntohs(*(xdg_uint16_t*)((cache) + (offset))))
+#define GET_UINT32(cache,offset) (ntohl(*(xdg_uint32_t*)((cache) + (offset))))
+
+XdgMimeCache *
+_xdg_mime_cache_ref (XdgMimeCache *cache)
+{
+  cache->ref_count++;
+  return cache;
+}
+
+void
+_xdg_mime_cache_unref (XdgMimeCache *cache)
+{
+  cache->ref_count--;
+
+  if (cache->ref_count == 0)
+    {
+#ifdef HAVE_MMAP
+      munmap (cache->buffer, cache->size);
+#endif
+      free (cache);
+    }
+}
+
+XdgMimeCache *
+_xdg_mime_cache_new_from_file (const char *file_name)
+{
+  XdgMimeCache *cache = NULL;
+
+#ifdef HAVE_MMAP
+  int fd = -1;
+  struct stat st;
+  char *buffer = NULL;
+  int minor;
+
+  /* Open the file and map it into memory */
+  fd = open (file_name, O_RDONLY|_O_BINARY, 0);
+
+  if (fd < 0)
+    return NULL;
+  
+  if (fstat (fd, &st) < 0 || st.st_size < 4)
+    goto done;
+
+  buffer = (char *) mmap (NULL, st.st_size, PROT_READ, MAP_SHARED, fd, 0);
+
+  if (buffer == MAP_FAILED)
+    goto done;
+
+  minor = GET_UINT16 (buffer, 2);
+  /* Verify version */
+  if (GET_UINT16 (buffer, 0) != MAJOR_VERSION ||
+      (minor < MINOR_VERSION_MIN ||
+       minor > MINOR_VERSION_MAX))
+    {
+      munmap (buffer, st.st_size);
+
+      goto done;
+    }
+  
+  cache = (XdgMimeCache *) malloc (sizeof (XdgMimeCache));
+  cache->minor = minor;
+  cache->ref_count = 1;
+  cache->buffer = buffer;
+  cache->size = st.st_size;
+
+ done:
+  if (fd != -1)
+    close (fd);
+
+#endif  /* HAVE_MMAP */
+
+  return cache;
+}
+
+static int
+cache_magic_matchlet_compare_to_data (XdgMimeCache *cache, 
+				      xdg_uint32_t  offset,
+				      const void   *data,
+				      size_t        len)
+{
+  xdg_uint32_t range_start = GET_UINT32 (cache->buffer, offset);
+  xdg_uint32_t range_length = GET_UINT32 (cache->buffer, offset + 4);
+  xdg_uint32_t data_length = GET_UINT32 (cache->buffer, offset + 12);
+  xdg_uint32_t data_offset = GET_UINT32 (cache->buffer, offset + 16);
+  xdg_uint32_t mask_offset = GET_UINT32 (cache->buffer, offset + 20);
+  
+  int i, j;
+
+  for (i = range_start; i < range_start + range_length; i++)
+    {
+      int valid_matchlet = TRUE;
+      
+      if (i + data_length > len)
+	return FALSE;
+
+      if (mask_offset)
+	{
+	  for (j = 0; j < data_length; j++)
+	    {
+	      if ((((unsigned char *)cache->buffer)[data_offset + j] & ((unsigned char *)cache->buffer)[mask_offset + j]) !=
+		  ((((unsigned char *) data)[j + i]) & ((unsigned char *)cache->buffer)[mask_offset + j]))
+		{
+		  valid_matchlet = FALSE;
+		  break;
+		}
+	    }
+	}
+      else
+	{
+	  valid_matchlet = memcmp(cache->buffer + data_offset, data + i, data_length) == 0;
+	}
+
+      if (valid_matchlet)
+	return TRUE;
+    }
+  
+  return FALSE;  
+}
+
+static int
+cache_magic_matchlet_compare (XdgMimeCache *cache, 
+			      xdg_uint32_t  offset,
+			      const void   *data,
+			      size_t        len)
+{
+  xdg_uint32_t n_children = GET_UINT32 (cache->buffer, offset + 24);
+  xdg_uint32_t child_offset = GET_UINT32 (cache->buffer, offset + 28);
+
+  int i;
+  
+  if (cache_magic_matchlet_compare_to_data (cache, offset, data, len))
+    {
+      if (n_children == 0)
+	return TRUE;
+      
+      for (i = 0; i < n_children; i++)
+	{
+	  if (cache_magic_matchlet_compare (cache, child_offset + 32 * i,
+					    data, len))
+	    return TRUE;
+	}
+    }
+  
+  return FALSE;  
+}
+
+static const char *
+cache_magic_compare_to_data (XdgMimeCache *cache, 
+			     xdg_uint32_t  offset,
+			     const void   *data, 
+			     size_t        len, 
+			     int          *prio)
+{
+  xdg_uint32_t priority = GET_UINT32 (cache->buffer, offset);
+  xdg_uint32_t mimetype_offset = GET_UINT32 (cache->buffer, offset + 4);
+  xdg_uint32_t n_matchlets = GET_UINT32 (cache->buffer, offset + 8);
+  xdg_uint32_t matchlet_offset = GET_UINT32 (cache->buffer, offset + 12);
+
+  int i;
+
+  for (i = 0; i < n_matchlets; i++)
+    {
+      if (cache_magic_matchlet_compare (cache, matchlet_offset + i * 32, 
+					data, len))
+	{
+	  *prio = priority;
+	  
+	  return cache->buffer + mimetype_offset;
+	}
+    }
+
+  return NULL;
+}
+
+static const char *
+cache_magic_lookup_data (XdgMimeCache *cache, 
+			 const void   *data, 
+			 size_t        len, 
+			 int          *prio,
+			 const char   *mime_types[],
+			 int           n_mime_types)
+{
+  xdg_uint32_t list_offset;
+  xdg_uint32_t n_entries;
+  xdg_uint32_t offset;
+
+  int j, n;
+
+  *prio = 0;
+
+  list_offset = GET_UINT32 (cache->buffer, 24);
+  n_entries = GET_UINT32 (cache->buffer, list_offset);
+  offset = GET_UINT32 (cache->buffer, list_offset + 8);
+  
+  for (j = 0; j < n_entries; j++)
+    {
+      const char *match;
+
+      match = cache_magic_compare_to_data (cache, offset + 16 * j, 
+					   data, len, prio);
+      if (match)
+	return match;
+      else
+	{
+	  xdg_uint32_t mimetype_offset;
+	  const char *non_match;
+	  
+	  mimetype_offset = GET_UINT32 (cache->buffer, offset + 16 * j + 4);
+	  non_match = cache->buffer + mimetype_offset;
+
+	  for (n = 0; n < n_mime_types; n++)
+	    {
+	      if (mime_types[n] && 
+		  _xdg_mime_mime_type_equal (mime_types[n], non_match))
+		mime_types[n] = NULL;
+	    }
+	}
+    }
+
+  return NULL;
+}
+
+static const char *
+cache_alias_lookup (const char *alias)
+{
+  const char *ptr;
+  int i, min, max, mid, cmp;
+
+  for (i = 0; _caches[i]; i++)
+    {
+      XdgMimeCache *cache = _caches[i];
+      xdg_uint32_t list_offset = GET_UINT32 (cache->buffer, 4);
+      xdg_uint32_t n_entries = GET_UINT32 (cache->buffer, list_offset);
+      xdg_uint32_t offset;
+
+      min = 0; 
+      max = n_entries - 1;
+      while (max >= min) 
+	{
+	  mid = (min + max) / 2;
+
+	  offset = GET_UINT32 (cache->buffer, list_offset + 4 + 8 * mid);
+	  ptr = cache->buffer + offset;
+	  cmp = strcmp (ptr, alias);
+	  
+	  if (cmp < 0)
+	    min = mid + 1;
+	  else if (cmp > 0)
+	    max = mid - 1;
+	  else
+	    {
+	      offset = GET_UINT32 (cache->buffer, list_offset + 4 + 8 * mid + 4);
+	      return cache->buffer + offset;
+	    }
+	}
+    }
+
+  return NULL;
+}
+
+typedef struct {
+  const char *mime;
+  int weight;
+} MimeWeight;
+
+static int
+cache_glob_lookup_literal (const char *file_name,
+			   const char *mime_types[],
+			   int         n_mime_types,
+			   int         case_sensitive_check)
+{
+  const char *ptr;
+  int i, min, max, mid, cmp;
+
+  for (i = 0; _caches[i]; i++)
+    {
+      XdgMimeCache *cache = _caches[i];
+      xdg_uint32_t list_offset = GET_UINT32 (cache->buffer, 12);
+      xdg_uint32_t n_entries = GET_UINT32 (cache->buffer, list_offset);
+      xdg_uint32_t offset;
+
+      min = 0; 
+      max = n_entries - 1;
+      while (max >= min) 
+	{
+	  mid = (min + max) / 2;
+
+	  offset = GET_UINT32 (cache->buffer, list_offset + 4 + 12 * mid);
+	  ptr = cache->buffer + offset;
+	  cmp = strcmp (ptr, file_name);
+
+	  if (cmp < 0)
+	    min = mid + 1;
+	  else if (cmp > 0)
+	    max = mid - 1;
+	  else
+	    {
+	      int weight = GET_UINT32 (cache->buffer, list_offset + 4 + 12 * mid + 8);
+	      int case_sensitive = weight & 0x100;
+	      weight = weight & 0xff;
+
+	      if (case_sensitive_check || !case_sensitive)
+		{
+		  offset = GET_UINT32 (cache->buffer, list_offset + 4 + 12 * mid + 4);
+		  mime_types[0] = (const char *)(cache->buffer + offset);
+
+		  return 1;
+		}
+	      return 0;
+	    }
+	}
+    }
+
+  return 0;
+}
+
+static int
+cache_glob_lookup_fnmatch (const char *file_name,
+			   MimeWeight  mime_types[],
+			   int         n_mime_types,
+			   int         case_sensitive_check)
+{
+  const char *mime_type;
+  const char *ptr;
+
+  int i, j, n;
+
+  n = 0;
+  for (i = 0; _caches[i]; i++)
+    {
+      XdgMimeCache *cache = _caches[i];
+
+      xdg_uint32_t list_offset = GET_UINT32 (cache->buffer, 20);
+      xdg_uint32_t n_entries = GET_UINT32 (cache->buffer, list_offset);
+
+      for (j = 0; j < n_entries && n < n_mime_types; j++)
+	{
+	  xdg_uint32_t offset = GET_UINT32 (cache->buffer, list_offset + 4 + 12 * j);
+	  xdg_uint32_t mimetype_offset = GET_UINT32 (cache->buffer, list_offset + 4 + 12 * j + 4);
+	  int weight = GET_UINT32 (cache->buffer, list_offset + 4 + 12 * j + 8);
+	  int case_sensitive = weight & 0x100;
+	  weight = weight & 0xff;
+	  ptr = cache->buffer + offset;
+	  mime_type = cache->buffer + mimetype_offset;
+	  if (case_sensitive_check || !case_sensitive)
+	    {
+	      /* FIXME: Not UTF-8 safe */
+	      if (fnmatch (ptr, file_name, 0) == 0)
+	        {
+	          mime_types[n].mime = mime_type;
+	          mime_types[n].weight = weight;
+	          n++;
+	        }
+	    }
+	}
+
+      if (n > 0)
+	return n;
+    }
+  
+  return 0;
+}
+
+static int
+cache_glob_node_lookup_suffix (XdgMimeCache  *cache,
+			       xdg_uint32_t   n_entries,
+			       xdg_uint32_t   offset,
+			       const char    *file_name,
+			       int            len,
+			       int            case_sensitive_check,
+			       MimeWeight     mime_types[],
+			       int            n_mime_types)
+{
+  xdg_unichar_t character;
+  xdg_unichar_t match_char;
+  xdg_uint32_t mimetype_offset;
+  xdg_uint32_t n_children;
+  xdg_uint32_t child_offset; 
+  int weight;
+  int case_sensitive;
+
+  int min, max, mid, n, i;
+
+  character = file_name[len - 1];
+
+  assert (character != 0);
+
+  min = 0;
+  max = n_entries - 1;
+  while (max >= min)
+    {
+      mid = (min + max) /  2;
+      match_char = GET_UINT32 (cache->buffer, offset + 12 * mid);
+      if (match_char < character)
+	min = mid + 1;
+      else if (match_char > character)
+	max = mid - 1;
+      else 
+	{
+          len--;
+          n = 0;
+          n_children = GET_UINT32 (cache->buffer, offset + 12 * mid + 4);
+          child_offset = GET_UINT32 (cache->buffer, offset + 12 * mid + 8);
+      
+          if (len > 0)
+            {
+              n = cache_glob_node_lookup_suffix (cache, 
+                                                 n_children, child_offset,
+                                                 file_name, len, 
+                                                 case_sensitive_check,
+                                                 mime_types,
+                                                 n_mime_types);
+            }
+          if (n == 0)
+            {
+	      i = 0;
+	      while (n < n_mime_types && i < n_children)
+		{
+		  match_char = GET_UINT32 (cache->buffer, child_offset + 12 * i);
+		  if (match_char != 0)
+		    break;
+
+		  mimetype_offset = GET_UINT32 (cache->buffer, child_offset + 12 * i + 4);
+		  weight = GET_UINT32 (cache->buffer, child_offset + 12 * i + 8);
+		  case_sensitive = weight & 0x100;
+		  weight = weight & 0xff;
+
+		  if (case_sensitive_check || !case_sensitive)
+		    {
+		      mime_types[n].mime = cache->buffer + mimetype_offset;
+		      mime_types[n].weight = weight;
+		      n++;
+		    }
+		  i++;
+		}
+	    }
+	  return n;
+	}
+    }
+  return 0;
+}
+
+static int
+cache_glob_lookup_suffix (const char *file_name,
+			  int         len,
+			  int         ignore_case,
+			  MimeWeight  mime_types[],
+			  int         n_mime_types)
+{
+  int i, n;
+
+  for (i = 0; _caches[i]; i++)
+    {
+      XdgMimeCache *cache = _caches[i];
+
+      xdg_uint32_t list_offset = GET_UINT32 (cache->buffer, 16);
+      xdg_uint32_t n_entries = GET_UINT32 (cache->buffer, list_offset);
+      xdg_uint32_t offset = GET_UINT32 (cache->buffer, list_offset + 4);
+
+      n = cache_glob_node_lookup_suffix (cache, 
+					 n_entries, offset, 
+					 file_name, len,
+					 ignore_case,
+					 mime_types,
+					 n_mime_types);
+      if (n > 0)
+	return n;
+    }
+
+  return 0;
+}
+
+static int compare_mime_weight (const void *a, const void *b)
+{
+  const MimeWeight *aa = (const MimeWeight *)a;
+  const MimeWeight *bb = (const MimeWeight *)b;
+
+  return bb->weight - aa->weight;
+}
+
+#define ISUPPER(c)		((c) >= 'A' && (c) <= 'Z')
+static char *
+ascii_tolower (const char *str)
+{
+  char *p, *lower;
+
+  lower = strdup (str);
+  p = lower;
+  while (*p != 0)
+    {
+      char c = *p;
+      *p++ = ISUPPER (c) ? c - 'A' + 'a' : c;
+    }
+  return lower;
+}
+
+static int
+cache_glob_lookup_file_name (const char *file_name, 
+			     const char *mime_types[],
+			     int         n_mime_types)
+{
+  int n;
+  MimeWeight mimes[10];
+  int n_mimes = 10;
+  int i;
+  int len;
+  char *lower_case;
+
+  assert (file_name != NULL && n_mime_types > 0);
+
+  /* First, check the literals */
+
+  lower_case = ascii_tolower (file_name);
+
+  n = cache_glob_lookup_literal (lower_case, mime_types, n_mime_types, FALSE);
+  if (n > 0)
+    {
+      free (lower_case);
+      return n;
+    }
+
+  n = cache_glob_lookup_literal (file_name, mime_types, n_mime_types, TRUE);
+  if (n > 0)
+    {
+      free (lower_case);
+      return n;
+    }
+
+  len = strlen (file_name);
+  n = cache_glob_lookup_suffix (lower_case, len, FALSE, mimes, n_mimes);
+  if (n == 0)
+    n = cache_glob_lookup_suffix (file_name, len, TRUE, mimes, n_mimes);
+
+  /* Last, try fnmatch */
+  if (n == 0)
+    n = cache_glob_lookup_fnmatch (lower_case, mimes, n_mimes, FALSE);
+  if (n == 0)
+    n = cache_glob_lookup_fnmatch (file_name, mimes, n_mimes, TRUE);
+
+  free (lower_case);
+
+  qsort (mimes, n, sizeof (MimeWeight), compare_mime_weight);
+
+  if (n_mime_types < n)
+    n = n_mime_types;
+
+  for (i = 0; i < n; i++)
+    mime_types[i] = mimes[i].mime;
+
+  return n;
+}
+
+int
+_xdg_mime_cache_get_max_buffer_extents (void)
+{
+  xdg_uint32_t offset;
+  xdg_uint32_t max_extent;
+  int i;
+
+  max_extent = 0;
+  for (i = 0; _caches[i]; i++)
+    {
+      XdgMimeCache *cache = _caches[i];
+
+      offset = GET_UINT32 (cache->buffer, 24);
+      max_extent = MAX (max_extent, GET_UINT32 (cache->buffer, offset + 4));
+    }
+
+  return max_extent;
+}
+
+static const char *
+cache_get_mime_type_for_data (const void *data,
+			      size_t      len,
+			      int        *result_prio,
+			      const char *mime_types[],
+			      int         n_mime_types)
+{
+  const char *mime_type;
+  int i, n, priority;
+
+  priority = 0;
+  mime_type = NULL;
+  for (i = 0; _caches[i]; i++)
+    {
+      XdgMimeCache *cache = _caches[i];
+
+      int prio;
+      const char *match;
+
+      match = cache_magic_lookup_data (cache, data, len, &prio, 
+				       mime_types, n_mime_types);
+      if (prio > priority)
+	{
+	  priority = prio;
+	  mime_type = match;
+	}
+    }
+
+  if (result_prio)
+    *result_prio = priority;
+
+  if (priority > 0)
+    {
+      /* Pick glob-result R where mime_type inherits from R */
+      for (n = 0; n < n_mime_types; n++)
+        {
+          if (mime_types[n] && _xdg_mime_cache_mime_type_subclass(mime_types[n], mime_type))
+              return mime_types[n];
+        }
+
+      /* Return magic match */
+      return mime_type;
+    }
+
+  /* Pick first glob result, as fallback */
+  for (n = 0; n < n_mime_types; n++)
+    {
+      if (mime_types[n])
+        return mime_types[n];
+    }
+
+  return NULL;
+}
+
+const char *
+_xdg_mime_cache_get_mime_type_for_data (const void *data,
+					size_t      len,
+					int        *result_prio)
+{
+  return cache_get_mime_type_for_data (data, len, result_prio, NULL, 0);
+}
+
+const char *
+_xdg_mime_cache_get_mime_type_for_file (const char  *file_name,
+					struct stat *statbuf)
+{
+  const char *mime_type;
+  const char *mime_types[10];
+  FILE *file;
+  unsigned char *data;
+  int max_extent;
+  int bytes_read;
+  struct stat buf;
+  const char *base_name;
+  int n;
+
+  if (file_name == NULL)
+    return NULL;
+
+  if (! _xdg_utf8_validate (file_name))
+    return NULL;
+
+  base_name = _xdg_get_base_name (file_name);
+  n = cache_glob_lookup_file_name (base_name, mime_types, 10);
+
+  if (n == 1)
+    return mime_types[0];
+
+  if (!statbuf)
+    {
+      if (stat (file_name, &buf) != 0)
+	return XDG_MIME_TYPE_UNKNOWN;
+
+      statbuf = &buf;
+    }
+
+  if (statbuf->st_size == 0)
+    return XDG_MIME_TYPE_EMPTY;
+
+  if (!S_ISREG (statbuf->st_mode))
+    return XDG_MIME_TYPE_UNKNOWN;
+
+  /* FIXME: Need to make sure that max_extent isn't totally broken.  This could
+   * be large and need getting from a stream instead of just reading it all
+   * in. */
+  max_extent = _xdg_mime_cache_get_max_buffer_extents ();
+  data = malloc (max_extent);
+  if (data == NULL)
+    return XDG_MIME_TYPE_UNKNOWN;
+        
+  file = fopen (file_name, "r");
+  if (file == NULL)
+    {
+      free (data);
+      return XDG_MIME_TYPE_UNKNOWN;
+    }
+
+  bytes_read = fread (data, 1, max_extent, file);
+  if (ferror (file))
+    {
+      free (data);
+      fclose (file);
+      return XDG_MIME_TYPE_UNKNOWN;
+    }
+
+  mime_type = cache_get_mime_type_for_data (data, bytes_read, NULL,
+					    mime_types, n);
+
+  if (!mime_type)
+    mime_type = _xdg_binary_or_text_fallback(data, bytes_read);
+
+  free (data);
+  fclose (file);
+
+  return mime_type;
+}
+
+const char *
+_xdg_mime_cache_get_mime_type_from_file_name (const char *file_name)
+{
+  const char *mime_type;
+
+  if (cache_glob_lookup_file_name (file_name, &mime_type, 1))
+    return mime_type;
+  else
+    return XDG_MIME_TYPE_UNKNOWN;
+}
+
+int
+_xdg_mime_cache_get_mime_types_from_file_name (const char *file_name,
+					       const char  *mime_types[],
+					       int          n_mime_types)
+{
+  return cache_glob_lookup_file_name (file_name, mime_types, n_mime_types);
+}
+
+#if 1
+static int
+is_super_type (const char *mime)
+{
+  int length;
+  const char *type;
+
+  length = strlen (mime);
+  type = &(mime[length - 2]);
+
+  if (strcmp (type, "/*") == 0)
+    return 1;
+
+  return 0;
+}
+#endif
+
+int
+_xdg_mime_cache_mime_type_subclass (const char *mime,
+				    const char *base)
+{
+  const char *umime, *ubase;
+
+  int i, j, min, max, med, cmp;
+  
+  umime = _xdg_mime_cache_unalias_mime_type (mime);
+  ubase = _xdg_mime_cache_unalias_mime_type (base);
+
+  if (strcmp (umime, ubase) == 0)
+    return 1;
+
+  /* We really want to handle text/ * in GtkFileFilter, so we just
+   * turn on the supertype matching
+   */
+#if 1
+  /* Handle supertypes */
+  if (is_super_type (ubase) &&
+      xdg_mime_media_type_equal (umime, ubase))
+    return 1;
+#endif
+
+  /*  Handle special cases text/plain and application/octet-stream */
+  if (strcmp (ubase, "text/plain") == 0 && 
+      strncmp (umime, "text/", 5) == 0)
+    return 1;
+
+  if (strcmp (ubase, "application/octet-stream") == 0)
+    return 1;
+ 
+  for (i = 0; _caches[i]; i++)
+    {
+      XdgMimeCache *cache = _caches[i];
+      
+      xdg_uint32_t list_offset = GET_UINT32 (cache->buffer, 8);
+      xdg_uint32_t n_entries = GET_UINT32 (cache->buffer, list_offset);
+      xdg_uint32_t offset, n_parents, parent_offset;
+
+      min = 0; 
+      max = n_entries - 1;
+      while (max >= min)
+	{
+	  med = (min + max)/2;
+	  
+	  offset = GET_UINT32 (cache->buffer, list_offset + 4 + 8 * med);
+	  cmp = strcmp (cache->buffer + offset, umime);
+	  if (cmp < 0)
+	    min = med + 1;
+	  else if (cmp > 0)
+	    max = med - 1;
+	  else
+	    {
+	      offset = GET_UINT32 (cache->buffer, list_offset + 4 + 8 * med + 4);
+	      n_parents = GET_UINT32 (cache->buffer, offset);
+	      
+	      for (j = 0; j < n_parents; j++)
+		{
+		  parent_offset = GET_UINT32 (cache->buffer, offset + 4 + 4 * j);
+		  if (_xdg_mime_cache_mime_type_subclass (cache->buffer + parent_offset, ubase))
+		    return 1;
+		}
+
+	      break;
+	    }
+	}
+    }
+
+  return 0;
+}
+
+const char *
+_xdg_mime_cache_unalias_mime_type (const char *mime)
+{
+  const char *lookup;
+  
+  lookup = cache_alias_lookup (mime);
+  
+  if (lookup)
+    return lookup;
+  
+  return mime;  
+}
+
+char **
+_xdg_mime_cache_list_mime_parents (const char *mime)
+{
+  int i, j, k, l, p;
+  char *all_parents[128]; /* we'll stop at 128 */ 
+  char **result;
+
+  mime = xdg_mime_unalias_mime_type (mime);
+
+  p = 0;
+  for (i = 0; _caches[i]; i++)
+    {
+      XdgMimeCache *cache = _caches[i];
+  
+      xdg_uint32_t list_offset = GET_UINT32 (cache->buffer, 8);
+      xdg_uint32_t n_entries = GET_UINT32 (cache->buffer, list_offset);
+
+      for (j = 0; j < n_entries; j++)
+	{
+	  xdg_uint32_t mimetype_offset = GET_UINT32 (cache->buffer, list_offset + 4 + 8 * j);
+	  xdg_uint32_t parents_offset = GET_UINT32 (cache->buffer, list_offset + 4 + 8 * j + 4);
+
+	  if (strcmp (cache->buffer + mimetype_offset, mime) == 0)
+	    {
+	      xdg_uint32_t parent_mime_offset;
+	      xdg_uint32_t n_parents = GET_UINT32 (cache->buffer, parents_offset);
+
+	      for (k = 0; k < n_parents && p < 127; k++)
+		{
+		  parent_mime_offset = GET_UINT32 (cache->buffer, parents_offset + 4 + 4 * k);
+
+		  /* Don't add same parent multiple times.
+		   * This can happen for instance if the same type is listed in multiple directories
+		   */
+		  for (l = 0; l < p; l++)
+		    {
+		      if (strcmp (all_parents[l], cache->buffer + parent_mime_offset) == 0)
+			break;
+		    }
+
+		  if (l == p)
+		    all_parents[p++] = cache->buffer + parent_mime_offset;
+		}
+
+	      break;
+	    }
+	}
+    }
+  all_parents[p++] = NULL;
+  
+  result = (char **) malloc (p * sizeof (char *));
+  memcpy (result, all_parents, p * sizeof (char *));
+
+  return result;
+}
+
+static const char *
+cache_lookup_icon (const char *mime, int header)
+{
+  const char *ptr;
+  int i, min, max, mid, cmp;
+
+  for (i = 0; _caches[i]; i++)
+    {
+      XdgMimeCache *cache = _caches[i];
+      xdg_uint32_t list_offset = GET_UINT32 (cache->buffer, header);
+      xdg_uint32_t n_entries = GET_UINT32 (cache->buffer, list_offset);
+      xdg_uint32_t offset;
+
+      min = 0; 
+      max = n_entries - 1;
+      while (max >= min) 
+        {
+          mid = (min + max) / 2;
+
+          offset = GET_UINT32 (cache->buffer, list_offset + 4 + 8 * mid);
+          ptr = cache->buffer + offset;
+          cmp = strcmp (ptr, mime);
+         
+          if (cmp < 0)
+            min = mid + 1;
+          else if (cmp > 0)
+            max = mid - 1;
+          else
+            {
+              offset = GET_UINT32 (cache->buffer, list_offset + 4 + 8 * mid + 4);
+              return cache->buffer + offset;
+            }
+        }
+    }
+
+  return NULL;
+}
+
+const char *
+_xdg_mime_cache_get_generic_icon (const char *mime)
+{
+  return cache_lookup_icon (mime, 36);
+}
+
+const char *
+_xdg_mime_cache_get_icon (const char *mime)
+{
+  return cache_lookup_icon (mime, 32);
+}
+
+static void
+dump_glob_node (XdgMimeCache *cache,
+		xdg_uint32_t  offset,
+		int           depth)
+{
+  xdg_unichar_t character;
+  xdg_uint32_t mime_offset;
+  xdg_uint32_t n_children;
+  xdg_uint32_t child_offset;
+  int i;
+
+  character = GET_UINT32 (cache->buffer, offset);
+  mime_offset = GET_UINT32 (cache->buffer, offset + 4);
+  n_children = GET_UINT32 (cache->buffer, offset + 8);
+  child_offset = GET_UINT32 (cache->buffer, offset + 12);
+  for (i = 0; i < depth; i++)
+    printf (" ");
+  printf ("%c", character);
+  if (mime_offset)
+    printf (" - %s", cache->buffer + mime_offset);
+  printf ("\n");
+  if (child_offset)
+  {
+    for (i = 0; i < n_children; i++)
+      dump_glob_node (cache, child_offset + 20 * i, depth + 1);
+  }
+}
+
+void
+_xdg_mime_cache_glob_dump (void)
+{
+  int i, j;
+  for (i = 0; _caches[i]; i++)
+  {
+    XdgMimeCache *cache = _caches[i];
+    xdg_uint32_t list_offset;
+    xdg_uint32_t n_entries;
+    xdg_uint32_t offset;
+    list_offset = GET_UINT32 (cache->buffer, 16);
+    n_entries = GET_UINT32 (cache->buffer, list_offset);
+    offset = GET_UINT32 (cache->buffer, list_offset + 4);
+    for (j = 0; j < n_entries; j++)
+	    dump_glob_node (cache, offset + 20 * j, 0);
+  }
+}
diff --git a/base/third_party/xdg_mime/xdgmimecache.h b/base/third_party/xdg_mime/xdgmimecache.h
new file mode 100644
index 0000000..27f42d0
--- /dev/null
+++ b/base/third_party/xdg_mime/xdgmimecache.h
@@ -0,0 +1,81 @@
+/* -*- mode: C; c-file-style: "gnu" -*- */
+/* xdgmimecache.h: Private file.  Datastructure for mmapped caches.
+ *
+ * More info can be found at http://www.freedesktop.org/standards/
+ *
+ * Copyright (C) 2005  Matthias Clasen <mclasen@redhat.com>
+ *
+ * Licensed under the Academic Free License version 2.0
+ * Or under the following terms:
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.	 See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 02111-1307, USA.
+ */
+
+#ifndef __XDG_MIME_CACHE_H__
+#define __XDG_MIME_CACHE_H__
+
+#include "xdgmime.h"
+
+typedef struct _XdgMimeCache XdgMimeCache;
+
+#ifdef XDG_PREFIX
+#define _xdg_mime_cache_new_from_file                 XDG_RESERVED_ENTRY(cache_new_from_file)
+#define _xdg_mime_cache_ref                           XDG_RESERVED_ENTRY(cache_ref)
+#define _xdg_mime_cache_unref                         XDG_RESERVED_ENTRY(cache_unref)
+#define _xdg_mime_cache_get_max_buffer_extents        XDG_RESERVED_ENTRY(cache_get_max_buffer_extents)
+#define _xdg_mime_cache_get_mime_type_for_data        XDG_RESERVED_ENTRY(cache_get_mime_type_for_data)
+#define _xdg_mime_cache_get_mime_type_for_file        XDG_RESERVED_ENTRY(cache_get_mime_type_for_file)
+#define _xdg_mime_cache_get_mime_type_from_file_name  XDG_RESERVED_ENTRY(cache_get_mime_type_from_file_name)
+#define _xdg_mime_cache_get_mime_types_from_file_name XDG_RESERVED_ENTRY(cache_get_mime_types_from_file_name)
+#define _xdg_mime_cache_list_mime_parents             XDG_RESERVED_ENTRY(cache_list_mime_parents)
+#define _xdg_mime_cache_mime_type_subclass            XDG_RESERVED_ENTRY(cache_mime_type_subclass)
+#define _xdg_mime_cache_unalias_mime_type             XDG_RESERVED_ENTRY(cache_unalias_mime_type)
+#define _xdg_mime_cache_get_icon                      XDG_RESERVED_ENTRY(cache_get_icon)
+#define _xdg_mime_cache_get_generic_icon              XDG_RESERVED_ENTRY(cache_get_generic_icon)
+#define _xdg_mime_cache_glob_dump                     XDG_RESERVED_ENTRY(cache_glob_dump)
+#endif
+
+extern XdgMimeCache **_caches;
+
+XdgMimeCache *_xdg_mime_cache_new_from_file (const char   *file_name);
+XdgMimeCache *_xdg_mime_cache_ref           (XdgMimeCache *cache);
+void          _xdg_mime_cache_unref         (XdgMimeCache *cache);
+
+
+const char  *_xdg_mime_cache_get_mime_type_for_data       (const void *data,
+		 				           size_t      len,
+							   int        *result_prio);
+const char  *_xdg_mime_cache_get_mime_type_for_file       (const char  *file_name,
+							   struct stat *statbuf);
+int          _xdg_mime_cache_get_mime_types_from_file_name (const char *file_name,
+							    const char  *mime_types[],
+							    int          n_mime_types);
+const char  *_xdg_mime_cache_get_mime_type_from_file_name (const char *file_name);
+int          _xdg_mime_cache_is_valid_mime_type           (const char *mime_type);
+int          _xdg_mime_cache_mime_type_equal              (const char *mime_a,
+						           const char *mime_b);
+int          _xdg_mime_cache_media_type_equal             (const char *mime_a,
+							   const char *mime_b);
+int          _xdg_mime_cache_mime_type_subclass           (const char *mime_a,
+							   const char *mime_b);
+char       **_xdg_mime_cache_list_mime_parents		  (const char *mime);
+const char  *_xdg_mime_cache_unalias_mime_type            (const char *mime);
+int          _xdg_mime_cache_get_max_buffer_extents       (void);
+const char  *_xdg_mime_cache_get_icon                     (const char *mime);
+const char  *_xdg_mime_cache_get_generic_icon             (const char *mime);
+void         _xdg_mime_cache_glob_dump                    (void);
+
+#endif /* __XDG_MIME_CACHE_H__ */
diff --git a/base/third_party/xdg_mime/xdgmimeglob.c b/base/third_party/xdg_mime/xdgmimeglob.c
new file mode 100644
index 0000000..f8434bc
--- /dev/null
+++ b/base/third_party/xdg_mime/xdgmimeglob.c
@@ -0,0 +1,691 @@
+/* -*- mode: C; c-file-style: "gnu" -*- */
+/* xdgmimeglob.c: Private file.  Datastructure for storing the globs.
+ *
+ * More info can be found at http://www.freedesktop.org/standards/
+ *
+ * Copyright (C) 2003  Red Hat, Inc.
+ * Copyright (C) 2003  Jonathan Blandford <jrb@alum.mit.edu>
+ *
+ * Licensed under the Academic Free License version 2.0
+ * Or under the following terms:
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.	 See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 02111-1307, USA.
+ */
+
+#ifdef HAVE_CONFIG_H
+#include <config.h>
+#endif
+
+#include "xdgmimeglob.h"
+#include "xdgmimeint.h"
+#include <stdlib.h>
+#include <stdio.h>
+#include <assert.h>
+#include <string.h>
+#include <fnmatch.h>
+
+#ifndef	FALSE
+#define	FALSE	(0)
+#endif
+
+#ifndef	TRUE
+#define	TRUE	(!FALSE)
+#endif
+
+typedef struct XdgGlobHashNode XdgGlobHashNode;
+typedef struct XdgGlobList XdgGlobList;
+
+struct XdgGlobHashNode
+{
+  xdg_unichar_t character;
+  const char *mime_type;
+  int weight;
+  int case_sensitive;
+  XdgGlobHashNode *next;
+  XdgGlobHashNode *child;
+};
+struct XdgGlobList
+{
+  const char *data;
+  const char *mime_type;
+  int weight;
+  int case_sensitive;
+  XdgGlobList *next;
+};
+
+struct XdgGlobHash
+{
+  XdgGlobList *literal_list;
+  XdgGlobHashNode *simple_node;
+  XdgGlobList *full_list;
+};
+
+
+/* XdgGlobList
+ */
+static XdgGlobList *
+_xdg_glob_list_new (void)
+{
+  XdgGlobList *new_element;
+
+  new_element = calloc (1, sizeof (XdgGlobList));
+
+  return new_element;
+}
+
+/* Frees glob_list and all of it's children */
+static void
+_xdg_glob_list_free (XdgGlobList *glob_list)
+{
+  XdgGlobList *ptr, *next;
+
+  ptr = glob_list;
+
+  while (ptr != NULL)
+    {
+      next = ptr->next;
+
+      if (ptr->data)
+	free ((void *) ptr->data);
+      if (ptr->mime_type)
+	free ((void *) ptr->mime_type);
+      free (ptr);
+
+      ptr = next;
+    }
+}
+
+static XdgGlobList *
+_xdg_glob_list_append (XdgGlobList *glob_list,
+		       void        *data,
+		       const char  *mime_type,
+		       int          weight,
+		       int          case_sensitive)
+{
+  XdgGlobList *new_element;
+  XdgGlobList *tmp_element;
+
+  tmp_element = glob_list;
+  while (tmp_element != NULL)
+    {
+      if (strcmp (tmp_element->data, data) == 0 &&
+	  strcmp (tmp_element->mime_type, mime_type) == 0)
+	return glob_list;
+
+      tmp_element = tmp_element->next;
+    }
+
+  new_element = _xdg_glob_list_new ();
+  new_element->data = data;
+  new_element->mime_type = mime_type;
+  new_element->weight = weight;
+  new_element->case_sensitive = case_sensitive;
+  if (glob_list == NULL)
+    return new_element;
+
+  tmp_element = glob_list;
+  while (tmp_element->next != NULL)
+    tmp_element = tmp_element->next;
+
+  tmp_element->next = new_element;
+
+  return glob_list;
+}
+
+/* XdgGlobHashNode
+ */
+
+static XdgGlobHashNode *
+_xdg_glob_hash_node_new (void)
+{
+  XdgGlobHashNode *glob_hash_node;
+
+  glob_hash_node = calloc (1, sizeof (XdgGlobHashNode));
+
+  return glob_hash_node;
+}
+
+static void
+_xdg_glob_hash_node_dump (XdgGlobHashNode *glob_hash_node,
+			  int depth)
+{
+  int i;
+  for (i = 0; i < depth; i++)
+    printf (" ");
+
+  printf ("%c", (char)glob_hash_node->character);
+  if (glob_hash_node->mime_type)
+    printf (" - %s %d\n", glob_hash_node->mime_type, glob_hash_node->weight);
+  else
+    printf ("\n");
+  if (glob_hash_node->child)
+    _xdg_glob_hash_node_dump (glob_hash_node->child, depth + 1);
+  if (glob_hash_node->next)
+    _xdg_glob_hash_node_dump (glob_hash_node->next, depth);
+}
+
+static XdgGlobHashNode *
+_xdg_glob_hash_insert_ucs4 (XdgGlobHashNode *glob_hash_node,
+			    xdg_unichar_t   *text,
+			    const char      *mime_type,
+			    int              weight,
+			    int              case_sensitive)
+{
+  XdgGlobHashNode *node;
+  xdg_unichar_t character;
+
+  character = text[0];
+
+  if ((glob_hash_node == NULL) ||
+      (character < glob_hash_node->character))
+    {
+      node = _xdg_glob_hash_node_new ();
+      node->character = character;
+      node->next = glob_hash_node;
+      glob_hash_node = node;
+    }
+  else if (character == glob_hash_node->character)
+    {
+      node = glob_hash_node;
+    }
+  else
+    {
+      XdgGlobHashNode *prev_node;
+      int found_node = FALSE;
+
+      /* Look for the first character of text in glob_hash_node, and insert it if we
+       * have to.*/
+      prev_node = glob_hash_node;
+      node = prev_node->next;
+
+      while (node != NULL)
+	{
+	  if (character < node->character)
+	    {
+	      node = _xdg_glob_hash_node_new ();
+	      node->character = character;
+	      node->next = prev_node->next;
+	      prev_node->next = node;
+
+	      found_node = TRUE;
+	      break;
+	    }
+	  else if (character == node->character)
+	    {
+	      found_node = TRUE;
+	      break;
+	    }
+	  prev_node = node;
+	  node = node->next;
+	}
+
+      if (! found_node)
+	{
+	  node = _xdg_glob_hash_node_new ();
+	  node->character = character;
+	  node->next = prev_node->next;
+	  prev_node->next = node;
+	}
+    }
+
+  text++;
+  if (*text == 0)
+    {
+      if (node->mime_type)
+	{
+	  if (strcmp (node->mime_type, mime_type) != 0)
+	    {
+	      XdgGlobHashNode *child;
+	      int found_node = FALSE;
+
+	      child = node->child;
+	      while (child && child->character == 0)
+		{
+		  if (strcmp (child->mime_type, mime_type) == 0)
+		    {
+		      found_node = TRUE;
+		      break;
+		    }
+		  child = child->next;
+		}
+
+	      if (!found_node)
+		{
+		  child = _xdg_glob_hash_node_new ();
+		  child->character = 0;
+		  child->mime_type = strdup (mime_type);
+		  child->weight = weight;
+		  child->case_sensitive = case_sensitive;
+		  child->child = NULL;
+		  child->next = node->child;
+		  node->child = child;
+		}
+	    }
+	}
+      else
+	{
+	  node->mime_type = strdup (mime_type);
+	  node->weight = weight;
+	  node->case_sensitive = case_sensitive;
+	}
+    }
+  else
+    {
+      node->child = _xdg_glob_hash_insert_ucs4 (node->child, text, mime_type, weight, case_sensitive);
+    }
+  return glob_hash_node;
+}
+
+/* glob must be valid UTF-8 */
+static XdgGlobHashNode *
+_xdg_glob_hash_insert_text (XdgGlobHashNode *glob_hash_node,
+			    const char      *text,
+			    const char      *mime_type,
+			    int              weight,
+			    int              case_sensitive)
+{
+  XdgGlobHashNode *node;
+  xdg_unichar_t *unitext;
+  int len;
+
+  unitext = _xdg_convert_to_ucs4 (text, &len);
+  _xdg_reverse_ucs4 (unitext, len);
+  node = _xdg_glob_hash_insert_ucs4 (glob_hash_node, unitext, mime_type, weight, case_sensitive);
+  free (unitext);
+  return node;
+}
+
+typedef struct {
+  const char *mime;
+  int weight;
+} MimeWeight;
+
+static int
+_xdg_glob_hash_node_lookup_file_name (XdgGlobHashNode *glob_hash_node,
+				      const char      *file_name,
+				      int              len,
+				      int              case_sensitive_check,
+				      MimeWeight       mime_types[],
+				      int              n_mime_types)
+{
+  int n;
+  XdgGlobHashNode *node;
+  xdg_unichar_t character;
+
+  if (glob_hash_node == NULL)
+    return 0;
+
+  character = file_name[len - 1];
+
+  for (node = glob_hash_node; node && character >= node->character; node = node->next)
+    {
+      if (character == node->character)
+        {
+          len--;
+          n = 0;
+          if (len > 0) 
+	    {
+	      n = _xdg_glob_hash_node_lookup_file_name (node->child,
+							file_name,
+							len,
+							case_sensitive_check,
+							mime_types,
+							n_mime_types);
+	    }
+	  if (n == 0)
+	    {
+              if (node->mime_type &&
+		  (case_sensitive_check ||
+		   !node->case_sensitive))
+                {
+	          mime_types[n].mime = node->mime_type;
+		  mime_types[n].weight = node->weight;
+		  n++; 
+                }
+	      node = node->child;
+	      while (n < n_mime_types && node && node->character == 0)
+		{
+                  if (node->mime_type &&
+		      (case_sensitive_check ||
+		       !node->case_sensitive))
+		    {
+		      mime_types[n].mime = node->mime_type;
+		      mime_types[n].weight = node->weight;
+		      n++;
+		    }
+		  node = node->next;
+		}
+	    }
+	  return n;
+	}
+    }
+
+  return 0;
+}
+
+static int compare_mime_weight (const void *a, const void *b)
+{
+  const MimeWeight *aa = (const MimeWeight *)a;
+  const MimeWeight *bb = (const MimeWeight *)b;
+
+  return bb->weight - aa->weight;
+}
+
+#define ISUPPER(c)		((c) >= 'A' && (c) <= 'Z')
+static char *
+ascii_tolower (const char *str)
+{
+  char *p, *lower;
+
+  lower = strdup (str);
+  p = lower;
+  while (*p != 0)
+    {
+      char c = *p;
+      *p++ = ISUPPER (c) ? c - 'A' + 'a' : c;
+    }
+  return lower;
+}
+
+int
+_xdg_glob_hash_lookup_file_name (XdgGlobHash *glob_hash,
+				 const char  *file_name,
+				 const char  *mime_types[],
+				 int          n_mime_types)
+{
+  XdgGlobList *list;
+  int i, n;
+  MimeWeight mimes[10];
+  int n_mimes = 10;
+  int len;
+  char *lower_case;
+
+  /* First, check the literals */
+
+  assert (file_name != NULL && n_mime_types > 0);
+
+  n = 0;
+
+  lower_case = ascii_tolower (file_name);
+
+  for (list = glob_hash->literal_list; list; list = list->next)
+    {
+      if (strcmp ((const char *)list->data, file_name) == 0)
+	{
+	  mime_types[0] = list->mime_type;
+	  free (lower_case);
+	  return 1;
+	}
+    }
+
+  for (list = glob_hash->literal_list; list; list = list->next)
+    {
+      if (!list->case_sensitive &&
+	  strcmp ((const char *)list->data, lower_case) == 0)
+	{
+	  mime_types[0] = list->mime_type;
+	  free (lower_case);
+	  return 1;
+	}
+    }
+
+
+  len = strlen (file_name);
+  n = _xdg_glob_hash_node_lookup_file_name (glob_hash->simple_node, lower_case, len, FALSE,
+					    mimes, n_mimes);
+  if (n == 0)
+    n = _xdg_glob_hash_node_lookup_file_name (glob_hash->simple_node, file_name, len, TRUE,
+					      mimes, n_mimes);
+
+  if (n == 0)
+    {
+      for (list = glob_hash->full_list; list && n < n_mime_types; list = list->next)
+        {
+          if (fnmatch ((const char *)list->data, file_name, 0) == 0)
+	    {
+	      mimes[n].mime = list->mime_type;
+	      mimes[n].weight = list->weight;
+	      n++;
+	    }
+        }
+    }
+  free (lower_case);
+
+  qsort (mimes, n, sizeof (MimeWeight), compare_mime_weight);
+
+  if (n_mime_types < n)
+    n = n_mime_types;
+
+  for (i = 0; i < n; i++)
+    mime_types[i] = mimes[i].mime;
+
+  return n;
+}
+
+
+
+/* XdgGlobHash
+ */
+
+XdgGlobHash *
+_xdg_glob_hash_new (void)
+{
+  XdgGlobHash *glob_hash;
+
+  glob_hash = calloc (1, sizeof (XdgGlobHash));
+
+  return glob_hash;
+}
+
+
+static void
+_xdg_glob_hash_free_nodes (XdgGlobHashNode *node)
+{
+  if (node)
+    {
+      if (node->child)
+       _xdg_glob_hash_free_nodes (node->child);
+      if (node->next)
+       _xdg_glob_hash_free_nodes (node->next);
+      if (node->mime_type)
+	free ((void *) node->mime_type);
+      free (node);
+    }
+}
+
+void
+_xdg_glob_hash_free (XdgGlobHash *glob_hash)
+{
+  _xdg_glob_list_free (glob_hash->literal_list);
+  _xdg_glob_list_free (glob_hash->full_list);
+  _xdg_glob_hash_free_nodes (glob_hash->simple_node);
+  free (glob_hash);
+}
+
+XdgGlobType
+_xdg_glob_determine_type (const char *glob)
+{
+  const char *ptr;
+  int maybe_in_simple_glob = FALSE;
+  int first_char = TRUE;
+
+  ptr = glob;
+
+  while (*ptr != '\0')
+    {
+      if (*ptr == '*' && first_char)
+	maybe_in_simple_glob = TRUE;
+      else if (*ptr == '\\' || *ptr == '[' || *ptr == '?' || *ptr == '*')
+	  return XDG_GLOB_FULL;
+
+      first_char = FALSE;
+      ptr = _xdg_utf8_next_char (ptr);
+    }
+  if (maybe_in_simple_glob)
+    return XDG_GLOB_SIMPLE;
+  else
+    return XDG_GLOB_LITERAL;
+}
+
+/* glob must be valid UTF-8 */
+void
+_xdg_glob_hash_append_glob (XdgGlobHash *glob_hash,
+			    const char  *glob,
+			    const char  *mime_type,
+			    int          weight,
+			    int          case_sensitive)
+{
+  XdgGlobType type;
+
+  assert (glob_hash != NULL);
+  assert (glob != NULL);
+
+  type = _xdg_glob_determine_type (glob);
+
+  switch (type)
+    {
+    case XDG_GLOB_LITERAL:
+      glob_hash->literal_list = _xdg_glob_list_append (glob_hash->literal_list, strdup (glob), strdup (mime_type), weight, case_sensitive);
+      break;
+    case XDG_GLOB_SIMPLE:
+      glob_hash->simple_node = _xdg_glob_hash_insert_text (glob_hash->simple_node, glob + 1, mime_type, weight, case_sensitive);
+      break;
+    case XDG_GLOB_FULL:
+      glob_hash->full_list = _xdg_glob_list_append (glob_hash->full_list, strdup (glob), strdup (mime_type), weight, case_sensitive);
+      break;
+    }
+}
+
+void
+_xdg_glob_hash_dump (XdgGlobHash *glob_hash)
+{
+  XdgGlobList *list;
+  printf ("LITERAL STRINGS\n");
+  if (!glob_hash || glob_hash->literal_list == NULL)
+    {
+      printf ("    None\n");
+    }
+  else
+    {
+      for (list = glob_hash->literal_list; list; list = list->next)
+	printf ("    %s - %s %d\n", (char *)list->data, list->mime_type, list->weight);
+    }
+  printf ("\nSIMPLE GLOBS\n");
+  if (!glob_hash || glob_hash->simple_node == NULL)
+    {
+      printf ("    None\n");
+    }
+  else
+    {
+      _xdg_glob_hash_node_dump (glob_hash->simple_node, 4);
+    }
+
+  printf ("\nFULL GLOBS\n");
+  if (!glob_hash || glob_hash->full_list == NULL)
+    {
+      printf ("    None\n");
+    }
+  else
+    {
+      for (list = glob_hash->full_list; list; list = list->next)
+	printf ("    %s - %s %d\n", (char *)list->data, list->mime_type, list->weight);
+    }
+}
+
+
+void
+_xdg_mime_glob_read_from_file (XdgGlobHash *glob_hash,
+			       const char  *file_name,
+			       int          version_two)
+{
+  FILE *glob_file;
+  char line[255];
+  char *p;
+
+  glob_file = fopen (file_name, "r");
+
+  if (glob_file == NULL)
+    return;
+
+  /* FIXME: Not UTF-8 safe.  Doesn't work if lines are greater than 255 chars.
+   * Blah */
+  while (fgets (line, 255, glob_file) != NULL)
+    {
+      char *colon;
+      char *mimetype, *glob, *end;
+      int weight;
+      int case_sensitive;
+
+      if (line[0] == '#' || line[0] == 0)
+	continue;
+
+      end = line + strlen(line) - 1;
+      if (*end == '\n')
+	*end = 0;
+
+      p = line;
+      if (version_two)
+	{
+	  colon = strchr (p, ':');
+	  if (colon == NULL)
+	    continue;
+	  *colon = 0;
+          weight = atoi (p);
+	  p = colon + 1;
+	}
+      else
+	weight = 50;
+
+      colon = strchr (p, ':');
+      if (colon == NULL)
+	continue;
+      *colon = 0;
+
+      mimetype = p;
+      p = colon + 1;
+      glob = p;
+      case_sensitive = FALSE;
+
+      colon = strchr (p, ':');
+      if (version_two && colon != NULL)
+	{
+	  char *flag;
+
+	  /* We got flags */
+	  *colon = 0;
+	  p = colon + 1;
+
+	  /* Flags end at next colon */
+	  colon = strchr (p, ':');
+	  if (colon != NULL)
+	    *colon = 0;
+
+	  flag = strstr (p, "cs");
+	  if (flag != NULL &&
+	      /* Start or after comma */
+	      (flag == p ||
+	       flag[-1] == ',') &&
+	      /* ends with comma or end of string */
+	      (flag[2] == 0 ||
+	       flag[2] == ','))
+	    case_sensitive = TRUE;
+	}
+
+      _xdg_glob_hash_append_glob (glob_hash, glob, mimetype, weight, case_sensitive);
+    }
+
+  fclose (glob_file);
+}
diff --git a/base/third_party/xdg_mime/xdgmimeglob.h b/base/third_party/xdg_mime/xdgmimeglob.h
new file mode 100644
index 0000000..0018292
--- /dev/null
+++ b/base/third_party/xdg_mime/xdgmimeglob.h
@@ -0,0 +1,70 @@
+/* -*- mode: C; c-file-style: "gnu" -*- */
+/* xdgmimeglob.h: Private file.  Datastructure for storing the globs.
+ *
+ * More info can be found at http://www.freedesktop.org/standards/
+ *
+ * Copyright (C) 2003  Red Hat, Inc.
+ * Copyright (C) 2003  Jonathan Blandford <jrb@alum.mit.edu>
+ *
+ * Licensed under the Academic Free License version 2.0
+ * Or under the following terms:
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.	 See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 02111-1307, USA.
+ */
+
+#ifndef __XDG_MIME_GLOB_H__
+#define __XDG_MIME_GLOB_H__
+
+#include "xdgmime.h"
+
+typedef struct XdgGlobHash XdgGlobHash;
+
+typedef enum
+{
+  XDG_GLOB_LITERAL, /* Makefile */
+  XDG_GLOB_SIMPLE,  /* *.gif */
+  XDG_GLOB_FULL     /* x*.[ch] */
+} XdgGlobType;
+
+  
+#ifdef XDG_PREFIX
+#define _xdg_mime_glob_read_from_file         XDG_RESERVED_ENTRY(glob_read_from_file)
+#define _xdg_glob_hash_new                    XDG_RESERVED_ENTRY(hash_new)
+#define _xdg_glob_hash_free                   XDG_RESERVED_ENTRY(hash_free)
+#define _xdg_glob_hash_lookup_file_name       XDG_RESERVED_ENTRY(hash_lookup_file_name)
+#define _xdg_glob_hash_append_glob            XDG_RESERVED_ENTRY(hash_append_glob)
+#define _xdg_glob_determine_type              XDG_RESERVED_ENTRY(determine_type)
+#define _xdg_glob_hash_dump                   XDG_RESERVED_ENTRY(hash_dump)
+#endif
+
+void         _xdg_mime_glob_read_from_file   (XdgGlobHash *glob_hash,
+					      const char  *file_name,
+					      int          version_two);
+XdgGlobHash *_xdg_glob_hash_new              (void);
+void         _xdg_glob_hash_free             (XdgGlobHash *glob_hash);
+int          _xdg_glob_hash_lookup_file_name (XdgGlobHash *glob_hash,
+					      const char  *text,
+					      const char  *mime_types[],
+					      int          n_mime_types);
+void         _xdg_glob_hash_append_glob      (XdgGlobHash *glob_hash,
+					      const char  *glob,
+					      const char  *mime_type,
+					      int          weight,
+					      int          case_sensitive);
+XdgGlobType  _xdg_glob_determine_type        (const char  *glob);
+void         _xdg_glob_hash_dump             (XdgGlobHash *glob_hash);
+
+#endif /* __XDG_MIME_GLOB_H__ */
diff --git a/base/third_party/xdg_mime/xdgmimeicon.c b/base/third_party/xdg_mime/xdgmimeicon.c
new file mode 100644
index 0000000..05c9473
--- /dev/null
+++ b/base/third_party/xdg_mime/xdgmimeicon.c
@@ -0,0 +1,183 @@
+/* -*- mode: C; c-file-style: "gnu" -*- */
+/* xdgmimeicon.c: Private file.  Datastructure for storing the aliases.
+ *
+ * More info can be found at http://www.freedesktop.org/standards/
+ *
+ * Copyright (C) 2008  Red Hat, Inc.
+ *
+ * Licensed under the Academic Free License version 2.0
+ * Or under the following terms:
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.	 See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 02111-1307, USA.
+ */
+
+#ifdef HAVE_CONFIG_H
+#include <config.h>
+#endif
+
+#include "xdgmimeicon.h"
+#include "xdgmimeint.h"
+#include <stdlib.h>
+#include <stdio.h>
+#include <assert.h>
+#include <string.h>
+#include <fnmatch.h>
+
+#ifndef	FALSE
+#define	FALSE	(0)
+#endif
+
+#ifndef	TRUE
+#define	TRUE	(!FALSE)
+#endif
+
+typedef struct XdgIcon XdgIcon;
+
+struct XdgIcon 
+{
+  char *mime_type;
+  char *icon_name;
+};
+
+struct XdgIconList
+{
+  struct XdgIcon *icons;
+  int n_icons;
+};
+
+XdgIconList *
+_xdg_mime_icon_list_new (void)
+{
+  XdgIconList *list;
+
+  list = malloc (sizeof (XdgIconList));
+
+  list->icons = NULL;
+  list->n_icons = 0;
+
+  return list;
+}
+
+void         
+_xdg_mime_icon_list_free (XdgIconList *list)
+{
+  int i;
+
+  if (list->icons)
+    {
+      for (i = 0; i < list->n_icons; i++)
+	{
+	  free (list->icons[i].mime_type);
+	  free (list->icons[i].icon_name);
+	}
+      free (list->icons);
+    }
+  free (list);
+}
+
+static int
+icon_entry_cmp (const void *v1, const void *v2)
+{
+  return strcmp (((XdgIcon *)v1)->mime_type, ((XdgIcon *)v2)->mime_type);
+}
+
+const char  *
+_xdg_mime_icon_list_lookup (XdgIconList *list,
+			    const char  *mime_type)
+{
+  XdgIcon *entry;
+  XdgIcon key;
+
+  if (list->n_icons > 0)
+    {
+      key.mime_type = (char *)mime_type;
+      key.icon_name = NULL;
+
+      entry = bsearch (&key, list->icons, list->n_icons,
+		       sizeof (XdgIcon), icon_entry_cmp);
+      if (entry)
+        return entry->icon_name;
+    }
+
+  return NULL;
+}
+
+void
+_xdg_mime_icon_read_from_file (XdgIconList *list,
+			       const char   *file_name)
+{
+  FILE *file;
+  char line[255];
+  int alloc;
+
+  file = fopen (file_name, "r");
+
+  if (file == NULL)
+    return;
+
+  /* FIXME: Not UTF-8 safe.  Doesn't work if lines are greater than 255 chars.
+   * Blah */
+  alloc = list->n_icons + 16;
+  list->icons = realloc (list->icons, alloc * sizeof (XdgIcon));
+  while (fgets (line, 255, file) != NULL)
+    {
+      char *sep;
+      if (line[0] == '#')
+	continue;
+
+      sep = strchr (line, ':');
+      if (sep == NULL)
+	continue;
+      *(sep++) = '\000';
+      sep[strlen (sep) -1] = '\000';
+      if (list->n_icons == alloc)
+	{
+	  alloc <<= 1;
+	  list->icons = realloc (list->icons, 
+				   alloc * sizeof (XdgIcon));
+	}
+      list->icons[list->n_icons].mime_type = strdup (line);
+      list->icons[list->n_icons].icon_name = strdup (sep);
+      list->n_icons++;
+    }
+  list->icons = realloc (list->icons, 
+			   list->n_icons * sizeof (XdgIcon));
+
+  fclose (file);  
+  
+  if (list->n_icons > 1)
+    qsort (list->icons, list->n_icons, 
+           sizeof (XdgIcon), icon_entry_cmp);
+}
+
+
+void
+_xdg_mime_icon_list_dump (XdgIconList *list)
+{
+  int i;
+
+  if (list->icons)
+    {
+      for (i = 0; i < list->n_icons; i++)
+	{
+	  printf ("%s %s\n", 
+		  list->icons[i].mime_type,
+		  list->icons[i].icon_name);
+	}
+    }
+}
+
+
diff --git a/base/third_party/xdg_mime/xdgmimeicon.h b/base/third_party/xdg_mime/xdgmimeicon.h
new file mode 100644
index 0000000..b5f2583
--- /dev/null
+++ b/base/third_party/xdg_mime/xdgmimeicon.h
@@ -0,0 +1,50 @@
+/* -*- mode: C; c-file-style: "gnu" -*- */
+/* xdgmimeicon.h: Private file.  Datastructure for storing the aliases.
+ *
+ * More info can be found at http://www.freedesktop.org/standards/
+ *
+ * Copyright (C) 2008  Red Hat, Inc.
+ *
+ * Licensed under the Academic Free License version 2.0
+ * Or under the following terms:
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.	 See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 02111-1307, USA.
+ */
+
+#ifndef __XDG_MIME_ICON_H__
+#define __XDG_MIME_ICON_H__
+
+#include "xdgmime.h"
+
+typedef struct XdgIconList XdgIconList;
+
+#ifdef XDG_PREFIX
+#define _xdg_mime_icon_read_from_file        XDG_ENTRY(icon_read_from_file)
+#define _xdg_mime_icon_list_new              XDG_ENTRY(icon_list_new)
+#define _xdg_mime_icon_list_free             XDG_ENTRY(icon_list_free)
+#define _xdg_mime_icon_list_lookup           XDG_ENTRY(icon_list_lookup)
+#define _xdg_mime_icon_list_dump             XDG_ENTRY(icon_list_dump)
+#endif
+
+void          _xdg_mime_icon_read_from_file (XdgIconList *list,
+					    const char   *file_name);
+XdgIconList  *_xdg_mime_icon_list_new       (void);
+void          _xdg_mime_icon_list_free      (XdgIconList *list);
+const char   *_xdg_mime_icon_list_lookup    (XdgIconList *list,
+					     const char  *mime);
+void          _xdg_mime_icon_list_dump      (XdgIconList *list);
+
+#endif /* __XDG_MIME_ICON_H__ */
diff --git a/base/third_party/xdg_mime/xdgmimeint.c b/base/third_party/xdg_mime/xdgmimeint.c
new file mode 100644
index 0000000..cf789d9
--- /dev/null
+++ b/base/third_party/xdg_mime/xdgmimeint.c
@@ -0,0 +1,206 @@
+/* -*- mode: C; c-file-style: "gnu" -*- */
+/* xdgmimeint.c: Internal defines and functions.
+ *
+ * More info can be found at http://www.freedesktop.org/standards/
+ *
+ * Copyright (C) 2003  Red Hat, Inc.
+ * Copyright (C) 2003  Jonathan Blandford <jrb@alum.mit.edu>
+ *
+ * Licensed under the Academic Free License version 2.0
+ * Or under the following terms:
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.	 See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 02111-1307, USA.
+ */
+
+#ifdef HAVE_CONFIG_H
+#include <config.h>
+#endif
+
+#include "xdgmimeint.h"
+#include <ctype.h>
+#include <string.h>
+
+#ifndef	FALSE
+#define	FALSE	(0)
+#endif
+
+#ifndef	TRUE
+#define	TRUE	(!FALSE)
+#endif
+
+static const char _xdg_utf8_skip_data[256] = {
+  1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
+  1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
+  1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
+  1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
+  1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
+  1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
+  2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,
+  3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,4,4,4,4,4,4,4,4,5,5,5,5,6,6,1,1
+};
+
+const char * const _xdg_utf8_skip = _xdg_utf8_skip_data;
+
+
+
+/* Returns the number of unprocessed characters. */
+xdg_unichar_t
+_xdg_utf8_to_ucs4(const char *source)
+{
+  xdg_unichar_t ucs32;
+  if( ! ( *source & 0x80 ) )
+    {
+      ucs32 = *source;
+    }
+  else
+    {
+      int bytelength = 0;
+      xdg_unichar_t result;
+      if ( ! (*source & 0x40) )
+	{
+	  ucs32 = *source;
+	}
+      else
+	{
+	  if ( ! (*source & 0x20) )
+	    {
+	      result = *source++ & 0x1F;
+	      bytelength = 2;
+	    }
+	  else if ( ! (*source & 0x10) )
+	    {
+	      result = *source++ & 0x0F;
+	      bytelength = 3;
+	    }
+	  else if ( ! (*source & 0x08) )
+	    {
+	      result = *source++ & 0x07;
+	      bytelength = 4;
+	    }
+	  else if ( ! (*source & 0x04) )
+	    {
+	      result = *source++ & 0x03;
+	      bytelength = 5;
+	    }
+	  else if ( ! (*source & 0x02) )
+	    {
+	      result = *source++ & 0x01;
+	      bytelength = 6;
+	    }
+	  else
+	    {
+	      result = *source++;
+	      bytelength = 1;
+	    }
+
+	  for ( bytelength --; bytelength > 0; bytelength -- )
+	    {
+	      result <<= 6;
+	      result |= *source++ & 0x3F;
+	    }
+	  ucs32 = result;
+	}
+    }
+  return ucs32;
+}
+
+
+/* hullo.  this is great code.  don't rewrite it */
+
+xdg_unichar_t
+_xdg_ucs4_to_lower (xdg_unichar_t source)
+{
+  /* FIXME: Do a real to_upper sometime */
+  /* CaseFolding-3.2.0.txt has a table of rules. */
+  if ((source & 0xFF) == source)
+    return (xdg_unichar_t) tolower ((unsigned char) source);
+  return source;
+}
+
+int
+_xdg_utf8_validate (const char *source)
+{
+  /* FIXME: actually write */
+  return TRUE;
+}
+
+const char *
+_xdg_get_base_name (const char *file_name)
+{
+  const char *base_name;
+
+  if (file_name == NULL)
+    return NULL;
+
+  base_name = strrchr (file_name, '/');
+
+  if (base_name == NULL)
+    return file_name;
+  else
+    return base_name + 1;
+}
+
+xdg_unichar_t *
+_xdg_convert_to_ucs4 (const char *source, int *len)
+{
+  xdg_unichar_t *out;
+  int i;
+  const char *p;
+
+  out = malloc (sizeof (xdg_unichar_t) * (strlen (source) + 1));
+
+  p = source;
+  i = 0;
+  while (*p) 
+    {
+      out[i++] = _xdg_utf8_to_ucs4 (p);
+      p = _xdg_utf8_next_char (p); 
+    }
+  out[i] = 0;
+  *len = i;
+ 
+  return out;
+}
+
+void
+_xdg_reverse_ucs4 (xdg_unichar_t *source, int len)
+{
+  xdg_unichar_t c;
+  int i;
+
+  for (i = 0; i < len - i - 1; i++) 
+    {
+      c = source[i]; 
+      source[i] = source[len - i - 1];
+      source[len - i - 1] = c;
+    }
+}
+
+const char *
+_xdg_binary_or_text_fallback(const void *data, size_t len)
+{
+  unsigned char *chardata;
+  int i;
+
+  chardata = (unsigned char *) data;
+  for (i = 0; i < 32 && i < len; ++i)
+    {
+       if (chardata[i] < 32 && chardata[i] != 9 && chardata[i] != 10 && chardata[i] != 13)
+         return XDG_MIME_TYPE_UNKNOWN; /* binary data */
+    }
+
+  return XDG_MIME_TYPE_TEXTPLAIN;
+}
diff --git a/base/third_party/xdg_mime/xdgmimeint.h b/base/third_party/xdg_mime/xdgmimeint.h
new file mode 100644
index 0000000..9e8b2cb
--- /dev/null
+++ b/base/third_party/xdg_mime/xdgmimeint.h
@@ -0,0 +1,78 @@
+/* -*- mode: C; c-file-style: "gnu" -*- */
+/* xdgmimeint.h: Internal defines and functions.
+ *
+ * More info can be found at http://www.freedesktop.org/standards/
+ *
+ * Copyright (C) 2003  Red Hat, Inc.
+ * Copyright (C) 2003  Jonathan Blandford <jrb@alum.mit.edu>
+ *
+ * Licensed under the Academic Free License version 2.0
+ * Or under the following terms:
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.	 See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 02111-1307, USA.
+ */
+
+#ifndef __XDG_MIME_INT_H__
+#define __XDG_MIME_INT_H__
+
+#include "xdgmime.h"
+
+
+#ifndef	FALSE
+#define	FALSE (0)
+#endif
+
+#ifndef	TRUE
+#define	TRUE (!FALSE)
+#endif
+
+/* FIXME: Needs to be configure check */
+typedef unsigned int   xdg_unichar_t;
+typedef unsigned char  xdg_uchar8_t;
+typedef unsigned short xdg_uint16_t;
+typedef unsigned int   xdg_uint32_t;
+
+#ifdef XDG_PREFIX
+#define _xdg_utf8_skip       XDG_RESERVED_ENTRY(utf8_skip)
+#define _xdg_utf8_to_ucs4    XDG_RESERVED_ENTRY(utf8_to_ucs4)
+#define _xdg_ucs4_to_lower   XDG_RESERVED_ENTRY(ucs4_to_lower)
+#define _xdg_utf8_validate   XDG_RESERVED_ENTRY(utf8_validate)
+#define _xdg_get_base_name   XDG_RESERVED_ENTRY(get_base_name)
+#define _xdg_convert_to_ucs4 XDG_RESERVED_ENTRY(convert_to_ucs4)
+#define _xdg_reverse_ucs4    XDG_RESERVED_ENTRY(reverse_ucs4)
+#endif
+
+#define SWAP_BE16_TO_LE16(val) (xdg_uint16_t)(((xdg_uint16_t)(val) << 8)|((xdg_uint16_t)(val) >> 8))
+
+#define SWAP_BE32_TO_LE32(val) (xdg_uint32_t)((((xdg_uint32_t)(val) & 0xFF000000U) >> 24) |	\
+					      (((xdg_uint32_t)(val) & 0x00FF0000U) >> 8) |	\
+					      (((xdg_uint32_t)(val) & 0x0000FF00U) << 8) |	\
+					      (((xdg_uint32_t)(val) & 0x000000FFU) << 24))
+/* UTF-8 utils
+ */
+extern const char *const _xdg_utf8_skip;
+#define _xdg_utf8_next_char(p) (char *)((p) + _xdg_utf8_skip[*(unsigned char *)(p)])
+#define _xdg_utf8_char_size(p) (int) (_xdg_utf8_skip[*(unsigned char *)(p)])
+
+xdg_unichar_t  _xdg_utf8_to_ucs4  (const char    *source);
+xdg_unichar_t  _xdg_ucs4_to_lower (xdg_unichar_t  source);
+int            _xdg_utf8_validate (const char    *source);
+xdg_unichar_t *_xdg_convert_to_ucs4 (const char *source, int *len);
+void           _xdg_reverse_ucs4 (xdg_unichar_t *source, int len);
+const char    *_xdg_get_base_name (const char    *file_name);
+const char    *_xdg_binary_or_text_fallback(const void *data, size_t len);
+
+#endif /* __XDG_MIME_INT_H__ */
diff --git a/base/third_party/xdg_mime/xdgmimemagic.c b/base/third_party/xdg_mime/xdgmimemagic.c
new file mode 100644
index 0000000..a2320f5
--- /dev/null
+++ b/base/third_party/xdg_mime/xdgmimemagic.c
@@ -0,0 +1,813 @@
+/* -*- mode: C; c-file-style: "gnu" -*- */
+/* xdgmimemagic.: Private file.  Datastructure for storing magic files.
+ *
+ * More info can be found at http://www.freedesktop.org/standards/
+ *
+ * Copyright (C) 2003  Red Hat, Inc.
+ * Copyright (C) 2003  Jonathan Blandford <jrb@alum.mit.edu>
+ *
+ * Licensed under the Academic Free License version 2.0
+ * Or under the following terms:
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.	 See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 02111-1307, USA.
+ */
+
+#ifdef HAVE_CONFIG_H
+#include <config.h>
+#endif
+
+#include <assert.h>
+#include "xdgmimemagic.h"
+#include "xdgmimeint.h"
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <ctype.h>
+#include <errno.h>
+#include <limits.h>
+
+#ifndef	FALSE
+#define	FALSE	(0)
+#endif
+
+#ifndef	TRUE
+#define	TRUE	(!FALSE)
+#endif
+
+#if !defined getc_unlocked && !defined HAVE_GETC_UNLOCKED
+# define getc_unlocked(fp) getc (fp)
+#endif
+
+typedef struct XdgMimeMagicMatch XdgMimeMagicMatch;
+typedef struct XdgMimeMagicMatchlet XdgMimeMagicMatchlet;
+
+typedef enum
+{
+  XDG_MIME_MAGIC_SECTION,
+  XDG_MIME_MAGIC_MAGIC,
+  XDG_MIME_MAGIC_ERROR,
+  XDG_MIME_MAGIC_EOF
+} XdgMimeMagicState;
+
+struct XdgMimeMagicMatch
+{
+  const char *mime_type;
+  int priority;
+  XdgMimeMagicMatchlet *matchlet;
+  XdgMimeMagicMatch *next;
+};
+
+
+struct XdgMimeMagicMatchlet
+{
+  int indent;
+  int offset;
+  unsigned int value_length;
+  unsigned char *value;
+  unsigned char *mask;
+  unsigned int range_length;
+  unsigned int word_size;
+  XdgMimeMagicMatchlet *next;
+};
+
+
+struct XdgMimeMagic
+{
+  XdgMimeMagicMatch *match_list;
+  int max_extent;
+};
+
+static XdgMimeMagicMatch *
+_xdg_mime_magic_match_new (void)
+{
+  return calloc (1, sizeof (XdgMimeMagicMatch));
+}
+
+
+static XdgMimeMagicMatchlet *
+_xdg_mime_magic_matchlet_new (void)
+{
+  XdgMimeMagicMatchlet *matchlet;
+
+  matchlet = malloc (sizeof (XdgMimeMagicMatchlet));
+
+  matchlet->indent = 0;
+  matchlet->offset = 0;
+  matchlet->value_length = 0;
+  matchlet->value = NULL;
+  matchlet->mask = NULL;
+  matchlet->range_length = 1;
+  matchlet->word_size = 1;
+  matchlet->next = NULL;
+
+  return matchlet;
+}
+
+
+static void
+_xdg_mime_magic_matchlet_free (XdgMimeMagicMatchlet *mime_magic_matchlet)
+{
+  if (mime_magic_matchlet)
+    {
+      if (mime_magic_matchlet->next)
+	_xdg_mime_magic_matchlet_free (mime_magic_matchlet->next);
+      if (mime_magic_matchlet->value)
+	free (mime_magic_matchlet->value);
+      if (mime_magic_matchlet->mask)
+	free (mime_magic_matchlet->mask);
+      free (mime_magic_matchlet);
+    }
+}
+
+
+/* Frees mime_magic_match and the remainder of its list
+ */
+static void
+_xdg_mime_magic_match_free (XdgMimeMagicMatch *mime_magic_match)
+{
+  XdgMimeMagicMatch *ptr, *next;
+
+  ptr = mime_magic_match;
+  while (ptr)
+    {
+      next = ptr->next;
+
+      if (ptr->mime_type)
+	free ((void *) ptr->mime_type);
+      if (ptr->matchlet)
+	_xdg_mime_magic_matchlet_free (ptr->matchlet);
+      free (ptr);
+
+      ptr = next;
+    }
+}
+
+/* Reads in a hunk of data until a newline character or a '\000' is hit.  The
+ * returned string is null terminated, and doesn't include the newline.
+ */
+static unsigned char *
+_xdg_mime_magic_read_to_newline (FILE *magic_file,
+				 int  *end_of_file)
+{
+  unsigned char *retval;
+  int c;
+  int len, pos;
+
+  len = 128;
+  pos = 0;
+  retval = malloc (len);
+  *end_of_file = FALSE;
+
+  while (TRUE)
+    {
+      c = getc_unlocked (magic_file);
+      if (c == EOF)
+	{
+	  *end_of_file = TRUE;
+	  break;
+	}
+      if (c == '\n' || c == '\000')
+	break;
+      retval[pos++] = (unsigned char) c;
+      if (pos % 128 == 127)
+	{
+	  len = len + 128;
+	  retval = realloc (retval, len);
+	}
+    }
+
+  retval[pos] = '\000';
+  return retval;
+}
+
+/* Returns the number read from the file, or -1 if no number could be read.
+ */
+static int
+_xdg_mime_magic_read_a_number (FILE *magic_file,
+			       int  *end_of_file)
+{
+  /* LONG_MAX is about 20 characters on my system */
+#define MAX_NUMBER_SIZE 30
+  char number_string[MAX_NUMBER_SIZE + 1];
+  int pos = 0;
+  int c;
+  long retval = -1;
+
+  while (TRUE)
+    {
+      c = getc_unlocked (magic_file);
+
+      if (c == EOF)
+	{
+	  *end_of_file = TRUE;
+	  break;
+	}
+      if (! isdigit (c))
+	{
+	  ungetc (c, magic_file);
+	  break;
+	}
+      number_string[pos] = (char) c;
+      pos++;
+      if (pos == MAX_NUMBER_SIZE)
+	break;
+    }
+  if (pos > 0)
+    {
+      number_string[pos] = '\000';
+      errno = 0;
+      retval = strtol (number_string, NULL, 10);
+
+      if ((retval < INT_MIN) || (retval > INT_MAX) || (errno != 0))
+	return -1;
+    }
+
+  return retval;
+}
+
+/* Headers are of the format:
+ * [<priority>:<mime-type>]
+ */
+static XdgMimeMagicState
+_xdg_mime_magic_parse_header (FILE *magic_file, XdgMimeMagicMatch *match)
+{
+  int c;
+  char *buffer;
+  char *end_ptr;
+  int end_of_file = 0;
+
+  assert (magic_file != NULL);
+  assert (match != NULL);
+
+  c = getc_unlocked (magic_file);
+  if (c == EOF)
+    return XDG_MIME_MAGIC_EOF;
+  if (c != '[')
+    return XDG_MIME_MAGIC_ERROR;
+
+  match->priority = _xdg_mime_magic_read_a_number (magic_file, &end_of_file);
+  if (end_of_file)
+    return XDG_MIME_MAGIC_EOF;
+  if (match->priority == -1)
+    return XDG_MIME_MAGIC_ERROR;
+
+  c = getc_unlocked (magic_file);
+  if (c == EOF)
+    return XDG_MIME_MAGIC_EOF;
+  if (c != ':')
+    return XDG_MIME_MAGIC_ERROR;
+
+  buffer = (char *)_xdg_mime_magic_read_to_newline (magic_file, &end_of_file);
+  if (end_of_file)
+    return XDG_MIME_MAGIC_EOF;
+
+  end_ptr = buffer;
+  while (*end_ptr != ']' && *end_ptr != '\000' && *end_ptr != '\n')
+    end_ptr++;
+  if (*end_ptr != ']')
+    {
+      free (buffer);
+      return XDG_MIME_MAGIC_ERROR;
+    }
+  *end_ptr = '\000';
+
+  match->mime_type = strdup (buffer);
+  free (buffer);
+
+  return XDG_MIME_MAGIC_MAGIC;
+}
+
+static XdgMimeMagicState
+_xdg_mime_magic_parse_error (FILE *magic_file)
+{
+  int c;
+
+  while (1)
+    {
+      c = getc_unlocked (magic_file);
+      if (c == EOF)
+	return XDG_MIME_MAGIC_EOF;
+      if (c == '\n')
+	return XDG_MIME_MAGIC_SECTION;
+    }
+}
+
+/* Headers are of the format:
+ * [ indent ] ">" start-offset "=" value
+ * [ "&" mask ] [ "~" word-size ] [ "+" range-length ] "\n"
+ */
+static XdgMimeMagicState
+_xdg_mime_magic_parse_magic_line (FILE              *magic_file,
+				  XdgMimeMagicMatch *match)
+{
+  XdgMimeMagicMatchlet *matchlet;
+  int c;
+  int end_of_file;
+  int indent = 0;
+  int bytes_read;
+
+  assert (magic_file != NULL);
+
+  /* Sniff the buffer to make sure it's a valid line */
+  c = getc_unlocked (magic_file);
+  if (c == EOF)
+    return XDG_MIME_MAGIC_EOF;
+  else if (c == '[')
+    {
+      ungetc (c, magic_file);
+      return XDG_MIME_MAGIC_SECTION;
+    }
+  else if (c == '\n')
+    return XDG_MIME_MAGIC_MAGIC;
+
+  /* At this point, it must be a digit or a '>' */
+  end_of_file = FALSE;
+  if (isdigit (c))
+    {
+      ungetc (c, magic_file);
+      indent = _xdg_mime_magic_read_a_number (magic_file, &end_of_file);
+      if (end_of_file)
+	return XDG_MIME_MAGIC_EOF;
+      if (indent == -1)
+	return XDG_MIME_MAGIC_ERROR;
+      c = getc_unlocked (magic_file);
+      if (c == EOF)
+	return XDG_MIME_MAGIC_EOF;
+    }
+
+  if (c != '>')
+    return XDG_MIME_MAGIC_ERROR;
+
+  matchlet = _xdg_mime_magic_matchlet_new ();
+  matchlet->indent = indent;
+  matchlet->offset = _xdg_mime_magic_read_a_number (magic_file, &end_of_file);
+  if (end_of_file)
+    {
+      _xdg_mime_magic_matchlet_free (matchlet);
+      return XDG_MIME_MAGIC_EOF;
+    }
+  if (matchlet->offset == -1)
+    {
+      _xdg_mime_magic_matchlet_free (matchlet);
+      return XDG_MIME_MAGIC_ERROR;
+    }
+  c = getc_unlocked (magic_file);
+  if (c == EOF)
+    {
+      _xdg_mime_magic_matchlet_free (matchlet);
+      return XDG_MIME_MAGIC_EOF;
+    }
+  else if (c != '=')
+    {
+      _xdg_mime_magic_matchlet_free (matchlet);
+      return XDG_MIME_MAGIC_ERROR;
+    }
+
+  /* Next two bytes determine how long the value is */
+  matchlet->value_length = 0;
+  c = getc_unlocked (magic_file);
+  if (c == EOF)
+    {
+      _xdg_mime_magic_matchlet_free (matchlet);
+      return XDG_MIME_MAGIC_EOF;
+    }
+  matchlet->value_length = c & 0xFF;
+  matchlet->value_length = matchlet->value_length << 8;
+
+  c = getc_unlocked (magic_file);
+  if (c == EOF)
+    {
+      _xdg_mime_magic_matchlet_free (matchlet);
+      return XDG_MIME_MAGIC_EOF;
+    }
+  matchlet->value_length = matchlet->value_length + (c & 0xFF);
+
+  matchlet->value = malloc (matchlet->value_length);
+
+  /* OOM */
+  if (matchlet->value == NULL)
+    {
+      _xdg_mime_magic_matchlet_free (matchlet);
+      return XDG_MIME_MAGIC_ERROR;
+    }
+  bytes_read = fread (matchlet->value, 1, matchlet->value_length, magic_file);
+  if (bytes_read != matchlet->value_length)
+    {
+      _xdg_mime_magic_matchlet_free (matchlet);
+      if (feof (magic_file))
+	return XDG_MIME_MAGIC_EOF;
+      else
+	return XDG_MIME_MAGIC_ERROR;
+    }
+
+  c = getc_unlocked (magic_file);
+  if (c == '&')
+    {
+      matchlet->mask = malloc (matchlet->value_length);
+      /* OOM */
+      if (matchlet->mask == NULL)
+	{
+	  _xdg_mime_magic_matchlet_free (matchlet);
+	  return XDG_MIME_MAGIC_ERROR;
+	}
+      bytes_read = fread (matchlet->mask, 1, matchlet->value_length, magic_file);
+      if (bytes_read != matchlet->value_length)
+	{
+	  _xdg_mime_magic_matchlet_free (matchlet);
+	  if (feof (magic_file))
+	    return XDG_MIME_MAGIC_EOF;
+	  else
+	    return XDG_MIME_MAGIC_ERROR;
+	}
+      c = getc_unlocked (magic_file);
+    }
+
+  if (c == '~')
+    {
+      matchlet->word_size = _xdg_mime_magic_read_a_number (magic_file, &end_of_file);
+      if (end_of_file)
+	{
+	  _xdg_mime_magic_matchlet_free (matchlet);
+	  return XDG_MIME_MAGIC_EOF;
+	}
+      if (matchlet->word_size != 0 &&
+	  matchlet->word_size != 1 &&
+	  matchlet->word_size != 2 &&
+	  matchlet->word_size != 4)
+	{
+	  _xdg_mime_magic_matchlet_free (matchlet);
+	  return XDG_MIME_MAGIC_ERROR;
+	}
+      c = getc_unlocked (magic_file);
+    }
+
+  if (c == '+')
+    {
+      matchlet->range_length = _xdg_mime_magic_read_a_number (magic_file, &end_of_file);
+      if (end_of_file)
+	{
+	  _xdg_mime_magic_matchlet_free (matchlet);
+	  return XDG_MIME_MAGIC_EOF;
+	}
+      if (matchlet->range_length == -1)
+	{
+	  _xdg_mime_magic_matchlet_free (matchlet);
+	  return XDG_MIME_MAGIC_ERROR;
+	}
+      c = getc_unlocked (magic_file);
+    }
+
+
+  if (c == '\n')
+    {
+      /* We clean up the matchlet, byte swapping if needed */
+      if (matchlet->word_size > 1)
+	{
+	  int i;
+	  if (matchlet->value_length % matchlet->word_size != 0)
+	    {
+	      _xdg_mime_magic_matchlet_free (matchlet);
+	      return XDG_MIME_MAGIC_ERROR;
+	    }
+	  /* FIXME: need to get this defined in a <config.h> style file */
+#if LITTLE_ENDIAN
+	  for (i = 0; i < matchlet->value_length; i = i + matchlet->word_size)
+	    {
+	      if (matchlet->word_size == 2)
+		*((xdg_uint16_t *) matchlet->value + i) = SWAP_BE16_TO_LE16 (*((xdg_uint16_t *) (matchlet->value + i)));
+	      else if (matchlet->word_size == 4)
+		*((xdg_uint32_t *) matchlet->value + i) = SWAP_BE32_TO_LE32 (*((xdg_uint32_t *) (matchlet->value + i)));
+	      if (matchlet->mask)
+		{
+		  if (matchlet->word_size == 2)
+		    *((xdg_uint16_t *) matchlet->mask + i) = SWAP_BE16_TO_LE16 (*((xdg_uint16_t *) (matchlet->mask + i)));
+		  else if (matchlet->word_size == 4)
+		    *((xdg_uint32_t *) matchlet->mask + i) = SWAP_BE32_TO_LE32 (*((xdg_uint32_t *) (matchlet->mask + i)));
+
+		}
+	    }
+#endif
+	}
+
+      matchlet->next = match->matchlet;
+      match->matchlet = matchlet;
+
+
+      return XDG_MIME_MAGIC_MAGIC;
+    }
+
+  _xdg_mime_magic_matchlet_free (matchlet);
+  if (c == EOF)
+    return XDG_MIME_MAGIC_EOF;
+
+  return XDG_MIME_MAGIC_ERROR;
+}
+
+static int
+_xdg_mime_magic_matchlet_compare_to_data (XdgMimeMagicMatchlet *matchlet,
+					  const void           *data,
+					  size_t                len)
+{
+  int i, j;
+  for (i = matchlet->offset; i < matchlet->offset + matchlet->range_length; i++)
+    {
+      int valid_matchlet = TRUE;
+
+      if (i + matchlet->value_length > len)
+	return FALSE;
+
+      if (matchlet->mask)
+	{
+	  for (j = 0; j < matchlet->value_length; j++)
+	    {
+	      if ((matchlet->value[j] & matchlet->mask[j]) !=
+		  ((((unsigned char *) data)[j + i]) & matchlet->mask[j]))
+		{
+		  valid_matchlet = FALSE;
+		  break;
+		}
+	    }
+	}
+      else
+	{
+	  for (j = 0; j <  matchlet->value_length; j++)
+	    {
+	      if (matchlet->value[j] != ((unsigned char *) data)[j + i])
+		{
+		  valid_matchlet = FALSE;
+		  break;
+		}
+	    }
+	}
+      if (valid_matchlet)
+	return TRUE;
+    }
+  return FALSE;
+}
+
+static int
+_xdg_mime_magic_matchlet_compare_level (XdgMimeMagicMatchlet *matchlet,
+					const void           *data,
+					size_t                len,
+					int                   indent)
+{
+  while ((matchlet != NULL) && (matchlet->indent == indent))
+    {
+      if (_xdg_mime_magic_matchlet_compare_to_data (matchlet, data, len))
+	{
+	  if ((matchlet->next == NULL) ||
+	      (matchlet->next->indent <= indent))
+	    return TRUE;
+
+	  if (_xdg_mime_magic_matchlet_compare_level (matchlet->next,
+						      data,
+						      len,
+						      indent + 1))
+	    return TRUE;
+	}
+
+      do
+	{
+	  matchlet = matchlet->next;
+	}
+      while (matchlet && matchlet->indent > indent);
+    }
+
+  return FALSE;
+}
+
+static int
+_xdg_mime_magic_match_compare_to_data (XdgMimeMagicMatch *match,
+				       const void        *data,
+				       size_t             len)
+{
+  return _xdg_mime_magic_matchlet_compare_level (match->matchlet, data, len, 0);
+}
+
+static void
+_xdg_mime_magic_insert_match (XdgMimeMagic      *mime_magic,
+			      XdgMimeMagicMatch *match)
+{
+  XdgMimeMagicMatch *list;
+
+  if (mime_magic->match_list == NULL)
+    {
+      mime_magic->match_list = match;
+      return;
+    }
+
+  if (match->priority > mime_magic->match_list->priority)
+    {
+      match->next = mime_magic->match_list;
+      mime_magic->match_list = match;
+      return;
+    }
+
+  list = mime_magic->match_list;
+  while (list->next != NULL)
+    {
+      if (list->next->priority < match->priority)
+	{
+	  match->next = list->next;
+	  list->next = match;
+	  return;
+	}
+      list = list->next;
+    }
+  list->next = match;
+  match->next = NULL;
+}
+
+XdgMimeMagic *
+_xdg_mime_magic_new (void)
+{
+  return calloc (1, sizeof (XdgMimeMagic));
+}
+
+void
+_xdg_mime_magic_free (XdgMimeMagic *mime_magic)
+{
+  if (mime_magic) {
+    _xdg_mime_magic_match_free (mime_magic->match_list);
+    free (mime_magic);
+  }
+}
+
+int
+_xdg_mime_magic_get_buffer_extents (XdgMimeMagic *mime_magic)
+{
+  return mime_magic->max_extent;
+}
+
+const char *
+_xdg_mime_magic_lookup_data (XdgMimeMagic *mime_magic,
+			     const void   *data,
+			     size_t        len,
+			     int           *result_prio,
+                             const char   *mime_types[],
+                             int           n_mime_types)
+{
+  XdgMimeMagicMatch *match;
+  const char *mime_type;
+  int n;
+  int prio;
+
+  prio = 0;
+  mime_type = NULL;
+  for (match = mime_magic->match_list; match; match = match->next)
+    {
+      if (_xdg_mime_magic_match_compare_to_data (match, data, len))
+	{
+	  prio = match->priority;
+	  mime_type = match->mime_type;
+	  break;
+	}
+      else 
+	{
+	  for (n = 0; n < n_mime_types; n++)
+	    {
+	      if (mime_types[n] && 
+		  _xdg_mime_mime_type_equal (mime_types[n], match->mime_type))
+		mime_types[n] = NULL;
+	    }
+	}
+    }
+
+  if (mime_type == NULL)
+    {
+      for (n = 0; n < n_mime_types; n++)
+	{
+	  if (mime_types[n])
+	    mime_type = mime_types[n];
+	}
+    }
+  
+  if (result_prio)
+    *result_prio = prio;
+
+  return mime_type;
+}
+
+static void
+_xdg_mime_update_mime_magic_extents (XdgMimeMagic *mime_magic)
+{
+  XdgMimeMagicMatch *match;
+  int max_extent = 0;
+
+  for (match = mime_magic->match_list; match; match = match->next)
+    {
+      XdgMimeMagicMatchlet *matchlet;
+
+      for (matchlet = match->matchlet; matchlet; matchlet = matchlet->next)
+	{
+	  int extent;
+
+	  extent = matchlet->value_length + matchlet->offset + matchlet->range_length;
+	  if (max_extent < extent)
+	    max_extent = extent;
+	}
+    }
+
+  mime_magic->max_extent = max_extent;
+}
+
+static XdgMimeMagicMatchlet *
+_xdg_mime_magic_matchlet_mirror (XdgMimeMagicMatchlet *matchlets)
+{
+  XdgMimeMagicMatchlet *new_list;
+  XdgMimeMagicMatchlet *tmp;
+
+  if ((matchlets == NULL) || (matchlets->next == NULL))
+    return matchlets;
+
+  new_list = NULL;
+  tmp = matchlets;
+  while (tmp != NULL)
+    {
+      XdgMimeMagicMatchlet *matchlet;
+
+      matchlet = tmp;
+      tmp = tmp->next;
+      matchlet->next = new_list;
+      new_list = matchlet;
+    }
+
+  return new_list;
+
+}
+
+static void
+_xdg_mime_magic_read_magic_file (XdgMimeMagic *mime_magic,
+				 FILE         *magic_file)
+{
+  XdgMimeMagicState state;
+  XdgMimeMagicMatch *match = NULL; /* Quiet compiler */
+
+  state = XDG_MIME_MAGIC_SECTION;
+
+  while (state != XDG_MIME_MAGIC_EOF)
+    {
+      switch (state)
+	{
+	case XDG_MIME_MAGIC_SECTION:
+	  match = _xdg_mime_magic_match_new ();
+	  state = _xdg_mime_magic_parse_header (magic_file, match);
+	  if (state == XDG_MIME_MAGIC_EOF || state == XDG_MIME_MAGIC_ERROR)
+	    _xdg_mime_magic_match_free (match);
+	  break;
+	case XDG_MIME_MAGIC_MAGIC:
+	  state = _xdg_mime_magic_parse_magic_line (magic_file, match);
+	  if (state == XDG_MIME_MAGIC_SECTION ||
+	      (state == XDG_MIME_MAGIC_EOF && match->mime_type))
+	    {
+	      match->matchlet = _xdg_mime_magic_matchlet_mirror (match->matchlet);
+	      _xdg_mime_magic_insert_match (mime_magic, match);
+	    }
+	  else if (state == XDG_MIME_MAGIC_EOF || state == XDG_MIME_MAGIC_ERROR)
+	    _xdg_mime_magic_match_free (match);
+	  break;
+	case XDG_MIME_MAGIC_ERROR:
+	  state = _xdg_mime_magic_parse_error (magic_file);
+	  break;
+	case XDG_MIME_MAGIC_EOF:
+	default:
+	  /* Make the compiler happy */
+	  assert (0);
+	}
+    }
+  _xdg_mime_update_mime_magic_extents (mime_magic);
+}
+
+void
+_xdg_mime_magic_read_from_file (XdgMimeMagic *mime_magic,
+				const char   *file_name)
+{
+  FILE *magic_file;
+  char header[12];
+
+  magic_file = fopen (file_name, "r");
+
+  if (magic_file == NULL)
+    return;
+
+  if (fread (header, 1, 12, magic_file) == 12)
+    {
+      if (memcmp ("MIME-Magic\0\n", header, 12) == 0)
+        _xdg_mime_magic_read_magic_file (mime_magic, magic_file);
+    }
+
+  fclose (magic_file);
+}
diff --git a/base/third_party/xdg_mime/xdgmimemagic.h b/base/third_party/xdg_mime/xdgmimemagic.h
new file mode 100644
index 0000000..35c8039
--- /dev/null
+++ b/base/third_party/xdg_mime/xdgmimemagic.h
@@ -0,0 +1,57 @@
+/* -*- mode: C; c-file-style: "gnu" -*- */
+/* xdgmimemagic.h: Private file.  Datastructure for storing the magic files.
+ *
+ * More info can be found at http://www.freedesktop.org/standards/
+ *
+ * Copyright (C) 2003  Red Hat, Inc.
+ * Copyright (C) 2003  Jonathan Blandford <jrb@alum.mit.edu>
+ *
+ * Licensed under the Academic Free License version 2.0
+ * Or under the following terms:
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.	 See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 02111-1307, USA.
+ */
+
+#ifndef __XDG_MIME_MAGIC_H__
+#define __XDG_MIME_MAGIC_H__
+
+#include <unistd.h>
+#include "xdgmime.h"
+typedef struct XdgMimeMagic XdgMimeMagic;
+
+#ifdef XDG_PREFIX
+#define _xdg_mime_glob_read_from_file             XDG_RESERVED_ENTRY(glob_read_from_file)
+#define _xdg_mime_magic_new                       XDG_RESERVED_ENTRY(magic_new)
+#define _xdg_mime_magic_read_from_file            XDG_RESERVED_ENTRY(magic_read_from_file)
+#define _xdg_mime_magic_free                      XDG_RESERVED_ENTRY(magic_free)
+#define _xdg_mime_magic_get_buffer_extents        XDG_RESERVED_ENTRY(magic_get_buffer_extents)
+#define _xdg_mime_magic_lookup_data               XDG_RESERVED_ENTRY(magic_lookup_data)
+#endif
+
+
+XdgMimeMagic *_xdg_mime_magic_new                (void);
+void          _xdg_mime_magic_read_from_file     (XdgMimeMagic *mime_magic,
+						  const char   *file_name);
+void          _xdg_mime_magic_free               (XdgMimeMagic *mime_magic);
+int           _xdg_mime_magic_get_buffer_extents (XdgMimeMagic *mime_magic);
+const char   *_xdg_mime_magic_lookup_data        (XdgMimeMagic *mime_magic,
+						  const void   *data,
+						  size_t        len,
+						  int          *result_prio,
+						  const char   *mime_types[],
+						  int           n_mime_types);
+
+#endif /* __XDG_MIME_MAGIC_H__ */
diff --git a/base/third_party/xdg_mime/xdgmimeparent.c b/base/third_party/xdg_mime/xdgmimeparent.c
new file mode 100644
index 0000000..511bbac
--- /dev/null
+++ b/base/third_party/xdg_mime/xdgmimeparent.c
@@ -0,0 +1,219 @@
+/* -*- mode: C; c-file-style: "gnu" -*- */
+/* xdgmimealias.c: Private file.  Datastructure for storing the hierarchy.
+ *
+ * More info can be found at http://www.freedesktop.org/standards/
+ *
+ * Copyright (C) 2004  Red Hat, Inc.
+ * Copyright (C) 2004  Matthias Clasen <mclasen@redhat.com>
+ *
+ * Licensed under the Academic Free License version 2.0
+ * Or under the following terms:
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.	 See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 02111-1307, USA.
+ */
+
+#ifdef HAVE_CONFIG_H
+#include <config.h>
+#endif
+
+#include "xdgmimeparent.h"
+#include "xdgmimeint.h"
+#include <stdlib.h>
+#include <stdio.h>
+#include <assert.h>
+#include <string.h>
+#include <fnmatch.h>
+
+#ifndef	FALSE
+#define	FALSE	(0)
+#endif
+
+#ifndef	TRUE
+#define	TRUE	(!FALSE)
+#endif
+
+typedef struct XdgMimeParents XdgMimeParents;
+
+struct XdgMimeParents
+{
+  char *mime;
+  char **parents;
+  int n_parents;
+};
+
+struct XdgParentList
+{
+  struct XdgMimeParents *parents;
+  int n_mimes;
+};
+
+XdgParentList *
+_xdg_mime_parent_list_new (void)
+{
+  XdgParentList *list;
+
+  list = malloc (sizeof (XdgParentList));
+
+  list->parents = NULL;
+  list->n_mimes = 0;
+
+  return list;
+}
+
+void         
+_xdg_mime_parent_list_free (XdgParentList *list)
+{
+  int i;
+  char **p;
+
+  if (list->parents)
+    {
+      for (i = 0; i < list->n_mimes; i++)
+	{
+	  for (p = list->parents[i].parents; *p; p++)
+	    free (*p);
+
+	  free (list->parents[i].parents);
+	  free (list->parents[i].mime);
+	}
+      free (list->parents);
+    }
+  free (list);
+}
+
+static int
+parent_entry_cmp (const void *v1, const void *v2)
+{
+  return strcmp (((XdgMimeParents *)v1)->mime, ((XdgMimeParents *)v2)->mime);
+}
+
+const char **
+_xdg_mime_parent_list_lookup (XdgParentList *list,
+			      const char    *mime)
+{
+  XdgMimeParents *entry;
+  XdgMimeParents key;
+
+  if (list->n_mimes > 0)
+    {
+      key.mime = (char *)mime;
+      key.parents = NULL;
+
+      entry = bsearch (&key, list->parents, list->n_mimes,
+		       sizeof (XdgMimeParents), &parent_entry_cmp);
+      if (entry)
+        return (const char **)entry->parents;
+    }
+
+  return NULL;
+}
+
+void
+_xdg_mime_parent_read_from_file (XdgParentList *list,
+				 const char    *file_name)
+{
+  FILE *file;
+  char line[255];
+  int i, alloc;
+  XdgMimeParents *entry;
+
+  file = fopen (file_name, "r");
+
+  if (file == NULL)
+    return;
+
+  /* FIXME: Not UTF-8 safe.  Doesn't work if lines are greater than 255 chars.
+   * Blah */
+  alloc = list->n_mimes + 16;
+  list->parents = realloc (list->parents, alloc * sizeof (XdgMimeParents));
+  while (fgets (line, 255, file) != NULL)
+    {
+      char *sep;
+      if (line[0] == '#')
+	continue;
+
+      sep = strchr (line, ' ');
+      if (sep == NULL)
+	continue;
+      *(sep++) = '\000';
+      sep[strlen (sep) -1] = '\000';
+      entry = NULL;
+      for (i = 0; i < list->n_mimes; i++)
+	{
+	  if (strcmp (list->parents[i].mime, line) == 0)
+	    {
+	      entry = &(list->parents[i]);
+	      break;
+	    }
+	}
+      
+      if (!entry)
+	{
+	  if (list->n_mimes == alloc)
+	    {
+	      alloc <<= 1;
+	      list->parents = realloc (list->parents, 
+				       alloc * sizeof (XdgMimeParents));
+	    }
+	  list->parents[list->n_mimes].mime = strdup (line);
+	  list->parents[list->n_mimes].parents = NULL;
+	  entry = &(list->parents[list->n_mimes]);
+	  list->n_mimes++;
+	}
+
+      if (!entry->parents)
+	{
+	  entry->n_parents = 1;
+	  entry->parents = malloc ((entry->n_parents + 1) * sizeof (char *));
+	}
+      else
+	{
+	  entry->n_parents += 1;
+	  entry->parents = realloc (entry->parents, 
+				    (entry->n_parents + 2) * sizeof (char *));
+	}
+      entry->parents[entry->n_parents - 1] = strdup (sep);
+      entry->parents[entry->n_parents] = NULL;
+    }
+
+  list->parents = realloc (list->parents, 
+			   list->n_mimes * sizeof (XdgMimeParents));
+
+  fclose (file);  
+  
+  if (list->n_mimes > 1)
+    qsort (list->parents, list->n_mimes, 
+           sizeof (XdgMimeParents), &parent_entry_cmp);
+}
+
+
+void         
+_xdg_mime_parent_list_dump (XdgParentList *list)
+{
+  int i;
+  char **p;
+
+  if (list->parents)
+    {
+      for (i = 0; i < list->n_mimes; i++)
+	{
+	  for (p = list->parents[i].parents; *p; p++)
+	    printf ("%s %s\n", list->parents[i].mime, *p);
+	}
+    }
+}
+
+
diff --git a/base/third_party/xdg_mime/xdgmimeparent.h b/base/third_party/xdg_mime/xdgmimeparent.h
new file mode 100644
index 0000000..b564f41
--- /dev/null
+++ b/base/third_party/xdg_mime/xdgmimeparent.h
@@ -0,0 +1,51 @@
+/* -*- mode: C; c-file-style: "gnu" -*- */
+/* xdgmimeparent.h: Private file.  Datastructure for storing the hierarchy.
+ *
+ * More info can be found at http://www.freedesktop.org/standards/
+ *
+ * Copyright (C) 2004  Red Hat, Inc.
+ * Copyright (C) 200  Matthias Clasen <mclasen@redhat.com>
+ *
+ * Licensed under the Academic Free License version 2.0
+ * Or under the following terms:
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.	 See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 02111-1307, USA.
+ */
+
+#ifndef __XDG_MIME_PARENT_H__
+#define __XDG_MIME_PARENT_H__
+
+#include "xdgmime.h"
+
+typedef struct XdgParentList XdgParentList;
+
+#ifdef XDG_PREFIX
+#define _xdg_mime_parent_read_from_file        XDG_RESERVED_ENTRY(parent_read_from_file)
+#define _xdg_mime_parent_list_new              XDG_RESERVED_ENTRY(parent_list_new)
+#define _xdg_mime_parent_list_free             XDG_RESERVED_ENTRY(parent_list_free)
+#define _xdg_mime_parent_list_lookup           XDG_RESERVED_ENTRY(parent_list_lookup)
+#define _xdg_mime_parent_list_dump             XDG_RESERVED_ENTRY(parent_list_dump)
+#endif
+
+void          _xdg_mime_parent_read_from_file (XdgParentList *list,
+					       const char    *file_name);
+XdgParentList *_xdg_mime_parent_list_new       (void);
+void           _xdg_mime_parent_list_free      (XdgParentList *list);
+const char   **_xdg_mime_parent_list_lookup    (XdgParentList *list,
+						const char    *mime);
+void           _xdg_mime_parent_list_dump      (XdgParentList *list);
+
+#endif /* __XDG_MIME_PARENT_H__ */
diff --git a/base/third_party/xdg_user_dirs/BUILD.gn b/base/third_party/xdg_user_dirs/BUILD.gn
new file mode 100644
index 0000000..a5626e9
--- /dev/null
+++ b/base/third_party/xdg_user_dirs/BUILD.gn
@@ -0,0 +1,11 @@
+# Copyright (c) 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+static_library("xdg_user_dirs") {
+  visibility = [ "//base/*" ]
+  sources = [
+    "xdg_user_dir_lookup.cc",
+    "xdg_user_dir_lookup.h",
+  ]
+}
diff --git a/base/third_party/xdg_user_dirs/LICENSE b/base/third_party/xdg_user_dirs/LICENSE
new file mode 100644
index 0000000..540e803
--- /dev/null
+++ b/base/third_party/xdg_user_dirs/LICENSE
@@ -0,0 +1,21 @@
+  Copyright (c) 2007 Red Hat, inc
+
+  Permission is hereby granted, free of charge, to any person
+  obtaining a copy of this software and associated documentation files
+  (the "Software"), to deal in the Software without restriction,
+  including without limitation the rights to use, copy, modify, merge,
+  publish, distribute, sublicense, and/or sell copies of the Software,
+  and to permit persons to whom the Software is furnished to do so,
+  subject to the following conditions: 
+
+  The above copyright notice and this permission notice shall be
+  included in all copies or substantial portions of the Software. 
+
+  THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+  EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+  MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+  NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+  BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+  ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+  CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+  SOFTWARE.
diff --git a/base/third_party/xdg_user_dirs/README.chromium b/base/third_party/xdg_user_dirs/README.chromium
new file mode 100644
index 0000000..ff2f977
--- /dev/null
+++ b/base/third_party/xdg_user_dirs/README.chromium
@@ -0,0 +1,9 @@
+Name: xdg-user-dirs
+URL: http://www.freedesktop.org/wiki/Software/xdg-user-dirs
+License: MIT
+
+This directory include xdg-user-dir-lookup.c renamed as xdg_user_dir_lookup.cc
+from xdg-user-dirs 0.10. We made xdg_user_dir_lookup() non-static and added a
+xdg_user_dir_lookup.h.
+
+- Added include xdg_user_dir_lookup.h from xdg_user_dir_lookup.cc
diff --git a/base/third_party/xdg_user_dirs/xdg_user_dir_lookup.cc b/base/third_party/xdg_user_dirs/xdg_user_dir_lookup.cc
new file mode 100644
index 0000000..4e28569
--- /dev/null
+++ b/base/third_party/xdg_user_dirs/xdg_user_dir_lookup.cc
@@ -0,0 +1,234 @@
+/*
+  This file is not licenced under the GPL like the rest of the code.
+  Its is under the MIT license, to encourage reuse by cut-and-paste.
+
+  Copyright (c) 2007 Red Hat, inc
+
+  Permission is hereby granted, free of charge, to any person
+  obtaining a copy of this software and associated documentation files
+  (the "Software"), to deal in the Software without restriction,
+  including without limitation the rights to use, copy, modify, merge,
+  publish, distribute, sublicense, and/or sell copies of the Software,
+  and to permit persons to whom the Software is furnished to do so,
+  subject to the following conditions: 
+
+  The above copyright notice and this permission notice shall be
+  included in all copies or substantial portions of the Software. 
+
+  THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+  EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+  MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+  NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+  BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+  ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+  CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+  SOFTWARE.
+*/
+
+#include "base/third_party/xdg_user_dirs/xdg_user_dir_lookup.h"
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+/**
+ * xdg_user_dir_lookup_with_fallback:
+ * @type: a string specifying the type of directory
+ * @fallback: value to use if the directory isn't specified by the user
+ * @returns: a newly allocated absolute pathname
+ *
+ * Looks up a XDG user directory of the specified type.
+ * Example of types are "DESKTOP" and "DOWNLOAD".
+ *
+ * In case the user hasn't specified any directory for the specified
+ * type the value returned is @fallback.
+ *
+ * The return value is newly allocated and must be freed with
+ * free(). The return value is never NULL if @fallback != NULL, unless
+ * out of memory.
+ **/
+static char *
+xdg_user_dir_lookup_with_fallback (const char *type, const char *fallback)
+{
+  FILE *file;
+  char *home_dir, *config_home, *config_file;
+  char buffer[512];
+  char *user_dir;
+  char *p, *d;
+  int len;
+  int relative;
+  
+  home_dir = getenv ("HOME");
+
+  if (home_dir == NULL)
+    goto error;
+
+  config_home = getenv ("XDG_CONFIG_HOME");
+  if (config_home == NULL || config_home[0] == 0)
+    {
+      config_file = (char*) malloc (strlen (home_dir) + strlen ("/.config/user-dirs.dirs") + 1);
+      if (config_file == NULL)
+        goto error;
+
+      strcpy (config_file, home_dir);
+      strcat (config_file, "/.config/user-dirs.dirs");
+    }
+  else
+    {
+      config_file = (char*) malloc (strlen (config_home) + strlen ("/user-dirs.dirs") + 1);
+      if (config_file == NULL)
+        goto error;
+
+      strcpy (config_file, config_home);
+      strcat (config_file, "/user-dirs.dirs");
+    }
+
+  file = fopen (config_file, "r");
+  free (config_file);
+  if (file == NULL)
+    goto error;
+
+  user_dir = NULL;
+  while (fgets (buffer, sizeof (buffer), file))
+    {
+      /* Remove newline at end */
+      len = strlen (buffer);
+      if (len > 0 && buffer[len-1] == '\n')
+	buffer[len-1] = 0;
+      
+      p = buffer;
+      while (*p == ' ' || *p == '\t')
+	p++;
+      
+      if (strncmp (p, "XDG_", 4) != 0)
+	continue;
+      p += 4;
+      if (strncmp (p, type, strlen (type)) != 0)
+	continue;
+      p += strlen (type);
+      if (strncmp (p, "_DIR", 4) != 0)
+	continue;
+      p += 4;
+
+      while (*p == ' ' || *p == '\t')
+	p++;
+
+      if (*p != '=')
+	continue;
+      p++;
+      
+      while (*p == ' ' || *p == '\t')
+	p++;
+
+      if (*p != '"')
+	continue;
+      p++;
+      
+      relative = 0;
+      if (strncmp (p, "$HOME/", 6) == 0)
+	{
+	  p += 6;
+	  relative = 1;
+	}
+      else if (*p != '/')
+	continue;
+      
+      if (relative)
+	{
+	  user_dir = (char*) malloc (strlen (home_dir) + 1 + strlen (p) + 1);
+          if (user_dir == NULL)
+            goto error2;
+
+	  strcpy (user_dir, home_dir);
+	  strcat (user_dir, "/");
+	}
+      else
+	{
+	  user_dir = (char*) malloc (strlen (p) + 1);
+          if (user_dir == NULL)
+            goto error2;
+
+	  *user_dir = 0;
+	}
+      
+      d = user_dir + strlen (user_dir);
+      while (*p && *p != '"')
+	{
+	  if ((*p == '\\') && (*(p+1) != 0))
+	    p++;
+	  *d++ = *p++;
+	}
+      *d = 0;
+    }
+error2:
+  fclose (file);
+
+  if (user_dir)
+    return user_dir;
+
+ error:
+  if (fallback)
+    return strdup (fallback);
+  return NULL;
+}
+
+/**
+ * xdg_user_dir_lookup:
+ * @type: a string specifying the type of directory
+ * @returns: a newly allocated absolute pathname
+ *
+ * Looks up a XDG user directory of the specified type.
+ * Example of types are "DESKTOP" and "DOWNLOAD".
+ *
+ * The return value is always != NULL (unless out of memory),
+ * and if a directory
+ * for the type is not specified by the user the default
+ * is the home directory. Except for DESKTOP which defaults
+ * to ~/Desktop.
+ *
+ * The return value is newly allocated and must be freed with
+ * free().
+ **/
+char *
+xdg_user_dir_lookup (const char *type)
+{
+  char *dir, *home_dir, *user_dir;
+	  
+  dir = xdg_user_dir_lookup_with_fallback (type, NULL);
+  if (dir != NULL)
+    return dir;
+  
+  home_dir = getenv ("HOME");
+  
+  if (home_dir == NULL)
+    return strdup ("/tmp");
+  
+  /* Special case desktop for historical compatibility */
+  if (strcmp (type, "DESKTOP") == 0)
+    {
+      user_dir = (char*) malloc (strlen (home_dir) + strlen ("/Desktop") + 1);
+      if (user_dir == NULL)
+        return NULL;
+
+      strcpy (user_dir, home_dir);
+      strcat (user_dir, "/Desktop");
+      return user_dir;
+    }
+  
+  return strdup (home_dir);
+}
+
+#ifdef STANDALONE_XDG_USER_DIR_LOOKUP
+int
+main (int argc, char *argv[])
+{
+  if (argc != 2)
+    {
+      fprintf (stderr, "Usage %s <dir-type>\n", argv[0]);
+      exit (1);
+    }
+  
+  printf ("%s\n", xdg_user_dir_lookup (argv[1]));
+  return 0;
+}
+#endif
diff --git a/base/third_party/xdg_user_dirs/xdg_user_dir_lookup.h b/base/third_party/xdg_user_dirs/xdg_user_dir_lookup.h
new file mode 100644
index 0000000..9e81e1b
--- /dev/null
+++ b/base/third_party/xdg_user_dirs/xdg_user_dir_lookup.h
@@ -0,0 +1,33 @@
+/*
+  This file is not licenced under the GPL like the rest of the code.
+  Its is under the MIT license, to encourage reuse by cut-and-paste.
+
+  Copyright (c) 2007 Red Hat, inc
+
+  Permission is hereby granted, free of charge, to any person
+  obtaining a copy of this software and associated documentation files
+  (the "Software"), to deal in the Software without restriction,
+  including without limitation the rights to use, copy, modify, merge,
+  publish, distribute, sublicense, and/or sell copies of the Software,
+  and to permit persons to whom the Software is furnished to do so,
+  subject to the following conditions: 
+
+  The above copyright notice and this permission notice shall be
+  included in all copies or substantial portions of the Software. 
+
+  THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+  EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+  MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+  NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+  BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+  ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+  CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+  SOFTWARE.
+*/
+
+#ifndef CHROME_THIRD_PARTY_XDG_USER_DIRS_XDG_USER_DIR_LOOKUP_H_
+#define CHROME_THIRD_PARTY_XDG_USER_DIRS_XDG_USER_DIR_LOOKUP_H_
+
+char* xdg_user_dir_lookup(const char *type);
+
+#endif  // CHROME_THIRD_PARTY_XDG_USER_DIRS_XDG_USER_DIR_LOOKUP_H_
diff --git a/base/thread_annotations.h b/base/thread_annotations.h
new file mode 100644
index 0000000..ba7168b
--- /dev/null
+++ b/base/thread_annotations.h
@@ -0,0 +1,238 @@
+// Copyright (c) 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This header file contains macro definitions for thread safety annotations
+// that allow developers to document the locking policies of multi-threaded
+// code. The annotations can also help program analysis tools to identify
+// potential thread safety issues.
+//
+// Note that the annotations we use are described as deprecated in the Clang
+// documentation, linked below. E.g. we use EXCLUSIVE_LOCKS_REQUIRED where the
+// Clang docs use REQUIRES.
+//
+// http://clang.llvm.org/docs/ThreadSafetyAnalysis.html
+//
+// We use the deprecated Clang annotations to match Abseil (relevant header
+// linked below) and its ecosystem of libraries. We will follow Abseil with
+// respect to upgrading to more modern annotations.
+//
+// https://github.com/abseil/abseil-cpp/blob/master/absl/base/thread_annotations.h
+//
+// These annotations are implemented using compiler attributes. Using the macros
+// defined here instead of raw attributes allow for portability and future
+// compatibility.
+//
+// When referring to mutexes in the arguments of the attributes, you should
+// use variable names or more complex expressions (e.g. my_object->mutex_)
+// that evaluate to a concrete mutex object whenever possible. If the mutex
+// you want to refer to is not in scope, you may use a member pointer
+// (e.g. &MyClass::mutex_) to refer to a mutex in some (unknown) object.
+
+#ifndef THREAD_ANNOTATIONS_H_
+#define THREAD_ANNOTATIONS_H_
+
+#if defined(__clang__)
+#define THREAD_ANNOTATION_ATTRIBUTE__(x) __attribute__((x))
+#else
+#define THREAD_ANNOTATION_ATTRIBUTE__(x)  // no-op
+#endif
+
+// GUARDED_BY()
+//
+// Documents if a shared field or global variable needs to be protected by a
+// mutex. GUARDED_BY() allows the user to specify a particular mutex that
+// should be held when accessing the annotated variable.
+//
+// Example:
+//
+//   Mutex mu;
+//   int p1 GUARDED_BY(mu);
+#define GUARDED_BY(x) THREAD_ANNOTATION_ATTRIBUTE__(guarded_by(x))
+
+// PT_GUARDED_BY()
+//
+// Documents if the memory location pointed to by a pointer should be guarded
+// by a mutex when dereferencing the pointer.
+//
+// Example:
+//   Mutex mu;
+//   int *p1 PT_GUARDED_BY(mu);
+//
+// Note that a pointer variable to a shared memory location could itself be a
+// shared variable.
+//
+// Example:
+//
+//     // `q`, guarded by `mu1`, points to a shared memory location that is
+//     // guarded by `mu2`:
+//     int *q GUARDED_BY(mu1) PT_GUARDED_BY(mu2);
+#define PT_GUARDED_BY(x) THREAD_ANNOTATION_ATTRIBUTE__(pt_guarded_by(x))
+
+// ACQUIRED_AFTER() / ACQUIRED_BEFORE()
+//
+// Documents the acquisition order between locks that can be held
+// simultaneously by a thread. For any two locks that need to be annotated
+// to establish an acquisition order, only one of them needs the annotation.
+// (i.e. You don't have to annotate both locks with both ACQUIRED_AFTER
+// and ACQUIRED_BEFORE.)
+//
+// Example:
+//
+//   Mutex m1;
+//   Mutex m2 ACQUIRED_AFTER(m1);
+#define ACQUIRED_AFTER(...) \
+  THREAD_ANNOTATION_ATTRIBUTE__(acquired_after(__VA_ARGS__))
+
+#define ACQUIRED_BEFORE(...) \
+  THREAD_ANNOTATION_ATTRIBUTE__(acquired_before(__VA_ARGS__))
+
+// EXCLUSIVE_LOCKS_REQUIRED() / SHARED_LOCKS_REQUIRED()
+//
+// Documents a function that expects a mutex to be held prior to entry.
+// The mutex is expected to be held both on entry to, and exit from, the
+// function.
+//
+// Example:
+//
+//   Mutex mu1, mu2;
+//   int a GUARDED_BY(mu1);
+//   int b GUARDED_BY(mu2);
+//
+//   void foo() EXCLUSIVE_LOCKS_REQUIRED(mu1, mu2) { ... };
+#define EXCLUSIVE_LOCKS_REQUIRED(...) \
+  THREAD_ANNOTATION_ATTRIBUTE__(exclusive_locks_required(__VA_ARGS__))
+
+#define SHARED_LOCKS_REQUIRED(...) \
+  THREAD_ANNOTATION_ATTRIBUTE__(shared_locks_required(__VA_ARGS__))
+
+// LOCKS_EXCLUDED()
+//
+// Documents the locks acquired in the body of the function. These locks
+// cannot be held when calling this function (as Abseil's `Mutex` locks are
+// non-reentrant).
+#define LOCKS_EXCLUDED(...) \
+  THREAD_ANNOTATION_ATTRIBUTE__(locks_excluded(__VA_ARGS__))
+
+// LOCK_RETURNED()
+//
+// Documents a function that returns a mutex without acquiring it.  For example,
+// a public getter method that returns a pointer to a private mutex should
+// be annotated with LOCK_RETURNED.
+#define LOCK_RETURNED(x) THREAD_ANNOTATION_ATTRIBUTE__(lock_returned(x))
+
+// LOCKABLE
+//
+// Documents if a class/type is a lockable type (such as the `Mutex` class).
+#define LOCKABLE THREAD_ANNOTATION_ATTRIBUTE__(lockable)
+
+// SCOPED_LOCKABLE
+//
+// Documents if a class does RAII locking (such as the `MutexLock` class).
+// The constructor should use `LOCK_FUNCTION()` to specify the mutex that is
+// acquired, and the destructor should use `UNLOCK_FUNCTION()` with no
+// arguments; the analysis will assume that the destructor unlocks whatever the
+// constructor locked.
+#define SCOPED_LOCKABLE THREAD_ANNOTATION_ATTRIBUTE__(scoped_lockable)
+
+// EXCLUSIVE_LOCK_FUNCTION()
+//
+// Documents functions that acquire a lock in the body of a function, and do
+// not release it.
+#define EXCLUSIVE_LOCK_FUNCTION(...) \
+  THREAD_ANNOTATION_ATTRIBUTE__(exclusive_lock_function(__VA_ARGS__))
+
+// SHARED_LOCK_FUNCTION()
+//
+// Documents functions that acquire a shared (reader) lock in the body of a
+// function, and do not release it.
+#define SHARED_LOCK_FUNCTION(...) \
+  THREAD_ANNOTATION_ATTRIBUTE__(shared_lock_function(__VA_ARGS__))
+
+// UNLOCK_FUNCTION()
+//
+// Documents functions that expect a lock to be held on entry to the function,
+// and release it in the body of the function.
+#define UNLOCK_FUNCTION(...) \
+  THREAD_ANNOTATION_ATTRIBUTE__(unlock_function(__VA_ARGS__))
+
+// EXCLUSIVE_TRYLOCK_FUNCTION() / SHARED_TRYLOCK_FUNCTION()
+//
+// Documents functions that try to acquire a lock, and return success or failure
+// (or a non-boolean value that can be interpreted as a boolean).
+// The first argument should be `true` for functions that return `true` on
+// success, or `false` for functions that return `false` on success. The second
+// argument specifies the mutex that is locked on success. If unspecified, this
+// mutex is assumed to be `this`.
+#define EXCLUSIVE_TRYLOCK_FUNCTION(...) \
+  THREAD_ANNOTATION_ATTRIBUTE__(exclusive_trylock_function(__VA_ARGS__))
+
+#define SHARED_TRYLOCK_FUNCTION(...) \
+  THREAD_ANNOTATION_ATTRIBUTE__(shared_trylock_function(__VA_ARGS__))
+
+// ASSERT_EXCLUSIVE_LOCK() / ASSERT_SHARED_LOCK()
+//
+// Documents functions that dynamically check to see if a lock is held, and fail
+// if it is not held.
+#define ASSERT_EXCLUSIVE_LOCK(...) \
+  THREAD_ANNOTATION_ATTRIBUTE__(assert_exclusive_lock(__VA_ARGS__))
+
+#define ASSERT_SHARED_LOCK(...) \
+  THREAD_ANNOTATION_ATTRIBUTE__(assert_shared_lock(__VA_ARGS__))
+
+// NO_THREAD_SAFETY_ANALYSIS
+//
+// Turns off thread safety checking within the body of a particular function.
+// This annotation is used to mark functions that are known to be correct, but
+// the locking behavior is more complicated than the analyzer can handle.
+#define NO_THREAD_SAFETY_ANALYSIS \
+  THREAD_ANNOTATION_ATTRIBUTE__(no_thread_safety_analysis)
+
+//------------------------------------------------------------------------------
+// Tool-Supplied Annotations
+//------------------------------------------------------------------------------
+
+// TS_UNCHECKED should be placed around lock expressions that are not valid
+// C++ syntax, but which are present for documentation purposes.  These
+// annotations will be ignored by the analysis.
+#define TS_UNCHECKED(x) ""
+
+// TS_FIXME is used to mark lock expressions that are not valid C++ syntax.
+// It is used by automated tools to mark and disable invalid expressions.
+// The annotation should either be fixed, or changed to TS_UNCHECKED.
+#define TS_FIXME(x) ""
+
+// Like NO_THREAD_SAFETY_ANALYSIS, this turns off checking within the body of
+// a particular function.  However, this attribute is used to mark functions
+// that are incorrect and need to be fixed.  It is used by automated tools to
+// avoid breaking the build when the analysis is updated.
+// Code owners are expected to eventually fix the routine.
+#define NO_THREAD_SAFETY_ANALYSIS_FIXME NO_THREAD_SAFETY_ANALYSIS
+
+// Similar to NO_THREAD_SAFETY_ANALYSIS_FIXME, this macro marks a GUARDED_BY
+// annotation that needs to be fixed, because it is producing thread safety
+// warning.  It disables the GUARDED_BY.
+#define GUARDED_BY_FIXME(x)
+
+// Disables warnings for a single read operation.  This can be used to avoid
+// warnings when it is known that the read is not actually involved in a race,
+// but the compiler cannot confirm that.
+#define TS_UNCHECKED_READ(x) thread_safety_analysis::ts_unchecked_read(x)
+
+namespace thread_safety_analysis {
+
+// Takes a reference to a guarded data member, and returns an unguarded
+// reference.
+template <typename T>
+inline const T& ts_unchecked_read(const T& v) NO_THREAD_SAFETY_ANALYSIS {
+  return v;
+}
+
+template <typename T>
+inline T& ts_unchecked_read(T& v) NO_THREAD_SAFETY_ANALYSIS {
+  return v;
+}
+
+}  // namespace thread_safety_analysis
+
+#endif  // _BASE_THREAD_ANNOTATIONS_H_
diff --git a/base/thread_annotations_unittest.cc b/base/thread_annotations_unittest.cc
new file mode 100644
index 0000000..b4aafef
--- /dev/null
+++ b/base/thread_annotations_unittest.cc
@@ -0,0 +1,58 @@
+// Copyright (c) 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "thread_annotations.h"
+
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace {
+
+class LOCKABLE Lock {
+ public:
+  void Acquire() EXCLUSIVE_LOCK_FUNCTION() {}
+  void Release() UNLOCK_FUNCTION() {}
+};
+
+class SCOPED_LOCKABLE AutoLock {
+ public:
+  AutoLock(Lock& lock) EXCLUSIVE_LOCK_FUNCTION(lock) : lock_(lock) {
+    lock.Acquire();
+  }
+  ~AutoLock() UNLOCK_FUNCTION() { lock_.Release(); }
+
+ private:
+  Lock& lock_;
+};
+
+class ThreadSafe {
+ public:
+  void ExplicitIncrement();
+  void ImplicitIncrement();
+
+ private:
+  Lock lock_;
+  int counter_ GUARDED_BY(lock_);
+};
+
+void ThreadSafe::ExplicitIncrement() {
+  lock_.Acquire();
+  ++counter_;
+  lock_.Release();
+}
+
+void ThreadSafe::ImplicitIncrement() {
+  AutoLock auto_lock(lock_);
+  counter_++;
+}
+
+TEST(ThreadAnnotationsTest, ExplicitIncrement) {
+  ThreadSafe thread_safe;
+  thread_safe.ExplicitIncrement();
+}
+TEST(ThreadAnnotationsTest, ImplicitIncrement) {
+  ThreadSafe thread_safe;
+  thread_safe.ImplicitIncrement();
+}
+
+}  // anonymous namespace
diff --git a/base/thread_annotations_unittest.nc b/base/thread_annotations_unittest.nc
new file mode 100644
index 0000000..ea64a7e
--- /dev/null
+++ b/base/thread_annotations_unittest.nc
@@ -0,0 +1,71 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This is a "No Compile Test" suite.
+// https://dev.chromium.org/developers/testing/no-compile-tests
+
+#include "base/thread_annotations.h"
+
+namespace {
+
+class LOCKABLE Lock {
+ public:
+  void Acquire() EXCLUSIVE_LOCK_FUNCTION() {}
+  void Release() UNLOCK_FUNCTION() {}
+};
+
+class SCOPED_LOCKABLE AutoLock {
+ public:
+  AutoLock(Lock& lock) EXCLUSIVE_LOCK_FUNCTION(lock) : lock_(lock) {
+    lock.Acquire();
+  }
+  ~AutoLock() UNLOCK_FUNCTION() { lock_.Release(); }
+
+ private:
+  Lock& lock_;
+};
+class ThreadSafe {
+ public:
+  void BuggyIncrement();
+ private:
+  Lock lock_;
+  int counter_ GUARDED_BY(lock_);
+};
+
+#if defined(NCTEST_LOCK_WITHOUT_UNLOCK)  // [r"fatal error: mutex 'lock_' is still held at the end of function"]
+
+void ThreadSafe::BuggyIncrement() {
+  lock_.Acquire();
+  ++counter_;
+  // Forgot to release the lock.
+}
+
+#elif defined(NCTEST_ACCESS_WITHOUT_LOCK)  // [r"fatal error: writing variable 'counter_' requires holding mutex 'lock_' exclusively"]
+
+void ThreadSafe::BuggyIncrement() {
+  // Member access without holding the lock guarding it.
+  ++counter_;
+}
+
+#elif defined(NCTEST_ACCESS_WITHOUT_SCOPED_LOCK)  // [r"fatal error: writing variable 'counter_' requires holding mutex 'lock_' exclusively"]
+
+void ThreadSafe::BuggyIncrement() {
+  {
+    AutoLock auto_lock(lock_);
+    // The AutoLock will go out of scope before the guarded member access.
+  }
+  ++counter_;
+}
+
+#elif defined(NCTEST_GUARDED_BY_WRONG_TYPE)  // [r"fatal error: 'guarded_by' attribute requires arguments whose type is annotated"]
+
+int not_lockable;
+int global_counter GUARDED_BY(not_lockable);
+
+// Defined to avoid link error.
+void ThreadSafe::BuggyIncrement() { }
+
+#endif
+
+}  // anonymous namespace
diff --git a/base/threading/OWNERS b/base/threading/OWNERS
new file mode 100644
index 0000000..4198e99
--- /dev/null
+++ b/base/threading/OWNERS
@@ -0,0 +1,2 @@
+# For thread_resrictions.*
+jam@chromium.org
diff --git a/base/threading/platform_thread.h b/base/threading/platform_thread.h
new file mode 100644
index 0000000..faeb858
--- /dev/null
+++ b/base/threading/platform_thread.h
@@ -0,0 +1,240 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// WARNING: You should *NOT* be using this class directly.  PlatformThread is
+// the low-level platform-specific abstraction to the OS's threading interface.
+// You should instead be using a message-loop driven Thread, see thread.h.
+
+#ifndef BASE_THREADING_PLATFORM_THREAD_H_
+#define BASE_THREADING_PLATFORM_THREAD_H_
+
+#include <stddef.h>
+
+#include "base/base_export.h"
+#include "base/macros.h"
+#include "base/time/time.h"
+#include "build/build_config.h"
+
+#if defined(OS_WIN)
+#include "base/win/windows_types.h"
+#elif defined(OS_FUCHSIA)
+#include <zircon/types.h>
+#elif defined(OS_MACOSX)
+#include <mach/mach_types.h>
+#elif defined(OS_POSIX)
+#include <pthread.h>
+#include <unistd.h>
+#endif
+
+namespace base {
+
+// Used for logging. Always an integer value.
+#if defined(OS_WIN)
+typedef DWORD PlatformThreadId;
+#elif defined(OS_FUCHSIA)
+typedef zx_handle_t PlatformThreadId;
+#elif defined(OS_MACOSX)
+typedef mach_port_t PlatformThreadId;
+#elif defined(OS_POSIX)
+typedef pid_t PlatformThreadId;
+#endif
+
+// Used for thread checking and debugging.
+// Meant to be as fast as possible.
+// These are produced by PlatformThread::CurrentRef(), and used to later
+// check if we are on the same thread or not by using ==. These are safe
+// to copy between threads, but can't be copied to another process as they
+// have no meaning there. Also, the internal identifier can be re-used
+// after a thread dies, so a PlatformThreadRef cannot be reliably used
+// to distinguish a new thread from an old, dead thread.
+class PlatformThreadRef {
+ public:
+#if defined(OS_WIN)
+  typedef DWORD RefType;
+#else  //  OS_POSIX
+  typedef pthread_t RefType;
+#endif
+  constexpr PlatformThreadRef() : id_(0) {}
+
+  explicit constexpr PlatformThreadRef(RefType id) : id_(id) {}
+
+  bool operator==(PlatformThreadRef other) const {
+    return id_ == other.id_;
+  }
+
+  bool operator!=(PlatformThreadRef other) const { return id_ != other.id_; }
+
+  bool is_null() const {
+    return id_ == 0;
+  }
+ private:
+  RefType id_;
+};
+
+// Used to operate on threads.
+class PlatformThreadHandle {
+ public:
+#if defined(OS_WIN)
+  typedef void* Handle;
+#elif defined(OS_POSIX) || defined(OS_FUCHSIA)
+  typedef pthread_t Handle;
+#endif
+
+  constexpr PlatformThreadHandle() : handle_(0) {}
+
+  explicit constexpr PlatformThreadHandle(Handle handle) : handle_(handle) {}
+
+  bool is_equal(const PlatformThreadHandle& other) const {
+    return handle_ == other.handle_;
+  }
+
+  bool is_null() const {
+    return !handle_;
+  }
+
+  Handle platform_handle() const {
+    return handle_;
+  }
+
+ private:
+  Handle handle_;
+};
+
+const PlatformThreadId kInvalidThreadId(0);
+
+// Valid values for priority of Thread::Options and SimpleThread::Options, and
+// SetCurrentThreadPriority(), listed in increasing order of importance.
+enum class ThreadPriority : int {
+  // Suitable for threads that shouldn't disrupt high priority work.
+  BACKGROUND,
+  // Default priority level.
+  NORMAL,
+  // Suitable for threads which generate data for the display (at ~60Hz).
+  DISPLAY,
+  // Suitable for low-latency, glitch-resistant audio.
+  REALTIME_AUDIO,
+};
+
+// A namespace for low-level thread functions.
+class BASE_EXPORT PlatformThread {
+ public:
+  // Implement this interface to run code on a background thread.  Your
+  // ThreadMain method will be called on the newly created thread.
+  class BASE_EXPORT Delegate {
+   public:
+    virtual void ThreadMain() = 0;
+
+   protected:
+    virtual ~Delegate() = default;
+  };
+
+  // Gets the current thread id, which may be useful for logging purposes.
+  static PlatformThreadId CurrentId();
+
+  // Gets the current thread reference, which can be used to check if
+  // we're on the right thread quickly.
+  static PlatformThreadRef CurrentRef();
+
+  // Get the handle representing the current thread. On Windows, this is a
+  // pseudo handle constant which will always represent the thread using it and
+  // hence should not be shared with other threads nor be used to differentiate
+  // the current thread from another.
+  static PlatformThreadHandle CurrentHandle();
+
+  // Yield the current thread so another thread can be scheduled.
+  static void YieldCurrentThread();
+
+  // Sleeps for the specified duration.
+  static void Sleep(base::TimeDelta duration);
+
+  // Sets the thread name visible to debuggers/tools. This will try to
+  // initialize the context for current thread unless it's a WorkerThread.
+  static void SetName(const std::string& name);
+
+  // Gets the thread name, if previously set by SetName.
+  static const char* GetName();
+
+  // Creates a new thread.  The |stack_size| parameter can be 0 to indicate
+  // that the default stack size should be used.  Upon success,
+  // |*thread_handle| will be assigned a handle to the newly created thread,
+  // and |delegate|'s ThreadMain method will be executed on the newly created
+  // thread.
+  // NOTE: When you are done with the thread handle, you must call Join to
+  // release system resources associated with the thread.  You must ensure that
+  // the Delegate object outlives the thread.
+  static bool Create(size_t stack_size,
+                     Delegate* delegate,
+                     PlatformThreadHandle* thread_handle) {
+    return CreateWithPriority(stack_size, delegate, thread_handle,
+                              ThreadPriority::NORMAL);
+  }
+
+  // CreateWithPriority() does the same thing as Create() except the priority of
+  // the thread is set based on |priority|.
+  static bool CreateWithPriority(size_t stack_size, Delegate* delegate,
+                                 PlatformThreadHandle* thread_handle,
+                                 ThreadPriority priority);
+
+  // CreateNonJoinable() does the same thing as Create() except the thread
+  // cannot be Join()'d.  Therefore, it also does not output a
+  // PlatformThreadHandle.
+  static bool CreateNonJoinable(size_t stack_size, Delegate* delegate);
+
+  // CreateNonJoinableWithPriority() does the same thing as CreateNonJoinable()
+  // except the priority of the thread is set based on |priority|.
+  static bool CreateNonJoinableWithPriority(size_t stack_size,
+                                            Delegate* delegate,
+                                            ThreadPriority priority);
+
+  // Joins with a thread created via the Create function.  This function blocks
+  // the caller until the designated thread exits.  This will invalidate
+  // |thread_handle|.
+  static void Join(PlatformThreadHandle thread_handle);
+
+  // Detaches and releases the thread handle. The thread is no longer joinable
+  // and |thread_handle| is invalidated after this call.
+  static void Detach(PlatformThreadHandle thread_handle);
+
+  // Returns true if SetCurrentThreadPriority() can be used to increase the
+  // priority of the current thread.
+  static bool CanIncreaseCurrentThreadPriority();
+
+  // Toggles the current thread's priority at runtime.
+  //
+  // A thread may not be able to raise its priority back up after lowering it if
+  // the process does not have a proper permission, e.g. CAP_SYS_NICE on Linux.
+  // A thread may not be able to lower its priority back down after raising it
+  // to REALTIME_AUDIO.
+  //
+  // This function must not be called from the main thread on Mac. This is to
+  // avoid performance regressions (https://crbug.com/601270).
+  //
+  // Since changing other threads' priority is not permitted in favor of
+  // security, this interface is restricted to change only the current thread
+  // priority (https://crbug.com/399473).
+  static void SetCurrentThreadPriority(ThreadPriority priority);
+
+  static ThreadPriority GetCurrentThreadPriority();
+
+#if defined(OS_LINUX)
+  // Toggles a specific thread's priority at runtime. This can be used to
+  // change the priority of a thread in a different process and will fail
+  // if the calling process does not have proper permissions. The
+  // SetCurrentThreadPriority() function above is preferred in favor of
+  // security but on platforms where sandboxed processes are not allowed to
+  // change priority this function exists to allow a non-sandboxed process
+  // to change the priority of sandboxed threads for improved performance.
+  // Warning: Don't use this for a main thread because that will change the
+  // whole thread group's (i.e. process) priority.
+  static void SetThreadPriority(PlatformThreadId thread_id,
+                                ThreadPriority priority);
+#endif
+
+ private:
+  DISALLOW_IMPLICIT_CONSTRUCTORS(PlatformThread);
+};
+
+}  // namespace base
+
+#endif  // BASE_THREADING_PLATFORM_THREAD_H_
diff --git a/base/threading/platform_thread_android.cc b/base/threading/platform_thread_android.cc
new file mode 100644
index 0000000..fd90d35
--- /dev/null
+++ b/base/threading/platform_thread_android.cc
@@ -0,0 +1,96 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/threading/platform_thread.h"
+
+#include <errno.h>
+#include <stddef.h>
+#include <sys/prctl.h>
+#include <sys/resource.h>
+#include <sys/types.h>
+#include <unistd.h>
+
+#include "base/android/jni_android.h"
+#include "base/lazy_instance.h"
+#include "base/logging.h"
+#include "base/threading/platform_thread_internal_posix.h"
+#include "base/threading/thread_id_name_manager.h"
+#include "jni/ThreadUtils_jni.h"
+
+namespace base {
+
+namespace internal {
+
+// - BACKGROUND corresponds to Android's PRIORITY_BACKGROUND = 10 value and can
+// result in heavy throttling and force the thread onto a little core on
+// big.LITTLE devices.
+// - DISPLAY corresponds to Android's PRIORITY_DISPLAY = -4 value.
+// - REALTIME_AUDIO corresponds to Android's PRIORITY_AUDIO = -16 value.
+const ThreadPriorityToNiceValuePair kThreadPriorityToNiceValueMap[4] = {
+    {ThreadPriority::BACKGROUND, 10},
+    {ThreadPriority::NORMAL, 0},
+    {ThreadPriority::DISPLAY, -4},
+    {ThreadPriority::REALTIME_AUDIO, -16},
+};
+
+bool SetCurrentThreadPriorityForPlatform(ThreadPriority priority) {
+  // On Android, we set the Audio priority through JNI as Audio priority
+  // will also allow the process to run while it is backgrounded.
+  if (priority == ThreadPriority::REALTIME_AUDIO) {
+    JNIEnv* env = base::android::AttachCurrentThread();
+    Java_ThreadUtils_setThreadPriorityAudio(env, PlatformThread::CurrentId());
+    return true;
+  }
+  return false;
+}
+
+bool GetCurrentThreadPriorityForPlatform(ThreadPriority* priority) {
+  DCHECK(priority);
+  *priority = ThreadPriority::NORMAL;
+  JNIEnv* env = base::android::AttachCurrentThread();
+  if (Java_ThreadUtils_isThreadPriorityAudio(
+      env, PlatformThread::CurrentId())) {
+    *priority = ThreadPriority::REALTIME_AUDIO;
+    return true;
+  }
+  return false;
+}
+
+}  // namespace internal
+
+void PlatformThread::SetName(const std::string& name) {
+  ThreadIdNameManager::GetInstance()->SetName(name);
+
+  // Like linux, on android we can get the thread names to show up in the
+  // debugger by setting the process name for the LWP.
+  // We don't want to do this for the main thread because that would rename
+  // the process, causing tools like killall to stop working.
+  if (PlatformThread::CurrentId() == getpid())
+    return;
+
+  // Set the name for the LWP (which gets truncated to 15 characters).
+  int err = prctl(PR_SET_NAME, name.c_str());
+  if (err < 0 && errno != EPERM)
+    DPLOG(ERROR) << "prctl(PR_SET_NAME)";
+}
+
+
+void InitThreading() {
+}
+
+void TerminateOnThread() {
+  base::android::DetachFromVM();
+}
+
+size_t GetDefaultThreadStackSize(const pthread_attr_t& attributes) {
+#if !defined(ADDRESS_SANITIZER)
+  return 0;
+#else
+  // AddressSanitizer bloats the stack approximately 2x. Default stack size of
+  // 1Mb is not enough for some tests (see http://crbug.com/263749 for example).
+  return 2 * (1 << 20);  // 2Mb
+#endif
+}
+
+}  // namespace base
diff --git a/base/threading/platform_thread_fuchsia.cc b/base/threading/platform_thread_fuchsia.cc
new file mode 100644
index 0000000..eb06795
--- /dev/null
+++ b/base/threading/platform_thread_fuchsia.cc
@@ -0,0 +1,50 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/threading/platform_thread.h"
+
+#include <pthread.h>
+#include <sched.h>
+#include <zircon/syscalls.h>
+
+#include "base/threading/platform_thread_internal_posix.h"
+#include "base/threading/thread_id_name_manager.h"
+
+namespace base {
+
+void InitThreading() {}
+
+void TerminateOnThread() {}
+
+size_t GetDefaultThreadStackSize(const pthread_attr_t& attributes) {
+  return 0;
+}
+
+// static
+void PlatformThread::SetName(const std::string& name) {
+  zx_status_t status = zx_object_set_property(CurrentId(), ZX_PROP_NAME,
+                                              name.data(), name.size());
+  DCHECK_EQ(status, ZX_OK);
+
+  ThreadIdNameManager::GetInstance()->SetName(name);
+}
+
+// static
+bool PlatformThread::CanIncreaseCurrentThreadPriority() {
+  return false;
+}
+
+// static
+void PlatformThread::SetCurrentThreadPriority(ThreadPriority priority) {
+  if (priority != ThreadPriority::NORMAL) {
+    NOTIMPLEMENTED() << "setting ThreadPriority " << static_cast<int>(priority);
+  }
+}
+
+// static
+ThreadPriority PlatformThread::GetCurrentThreadPriority() {
+  return ThreadPriority::NORMAL;
+}
+
+}  // namespace base
diff --git a/base/threading/platform_thread_internal_posix.cc b/base/threading/platform_thread_internal_posix.cc
new file mode 100644
index 0000000..378a24d
--- /dev/null
+++ b/base/threading/platform_thread_internal_posix.cc
@@ -0,0 +1,39 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/threading/platform_thread_internal_posix.h"
+
+#include "base/containers/adapters.h"
+#include "base/logging.h"
+
+namespace base {
+
+namespace internal {
+
+int ThreadPriorityToNiceValue(ThreadPriority priority) {
+  for (const auto& pair : kThreadPriorityToNiceValueMap) {
+    if (pair.priority == priority)
+      return pair.nice_value;
+  }
+  NOTREACHED() << "Unknown ThreadPriority";
+  return 0;
+}
+
+ThreadPriority NiceValueToThreadPriority(int nice_value) {
+  // Try to find a priority that best describes |nice_value|. If there isn't
+  // an exact match, this method returns the closest priority whose nice value
+  // is higher (lower priority) than |nice_value|.
+  for (const auto& pair : Reversed(kThreadPriorityToNiceValueMap)) {
+    if (pair.nice_value >= nice_value)
+      return pair.priority;
+  }
+
+  // Reaching here means |nice_value| is more than any of the defined
+  // priorities. The lowest priority is suitable in this case.
+  return ThreadPriority::BACKGROUND;
+}
+
+}  // namespace internal
+
+}  // namespace base
diff --git a/base/threading/platform_thread_internal_posix.h b/base/threading/platform_thread_internal_posix.h
new file mode 100644
index 0000000..5f4a215
--- /dev/null
+++ b/base/threading/platform_thread_internal_posix.h
@@ -0,0 +1,48 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_THREADING_PLATFORM_THREAD_INTERNAL_POSIX_H_
+#define BASE_THREADING_PLATFORM_THREAD_INTERNAL_POSIX_H_
+
+#include "base/base_export.h"
+#include "base/threading/platform_thread.h"
+
+namespace base {
+
+namespace internal {
+
+struct ThreadPriorityToNiceValuePair {
+  ThreadPriority priority;
+  int nice_value;
+};
+// The elements must be listed in the order of increasing priority (lowest
+// priority first), that is, in the order of decreasing nice values (highest
+// nice value first).
+BASE_EXPORT extern
+const ThreadPriorityToNiceValuePair kThreadPriorityToNiceValueMap[4];
+
+// Returns the nice value matching |priority| based on the platform-specific
+// implementation of kThreadPriorityToNiceValueMap.
+int ThreadPriorityToNiceValue(ThreadPriority priority);
+
+// Returns the ThreadPrioirty matching |nice_value| based on the platform-
+// specific implementation of kThreadPriorityToNiceValueMap.
+BASE_EXPORT ThreadPriority NiceValueToThreadPriority(int nice_value);
+
+// Allows platform specific tweaks to the generic POSIX solution for
+// SetCurrentThreadPriority. Returns true if the platform-specific
+// implementation handled this |priority| change, false if the generic
+// implementation should instead proceed.
+bool SetCurrentThreadPriorityForPlatform(ThreadPriority priority);
+
+// Returns true if there is a platform-specific ThreadPriority set on the
+// current thread (and returns the actual ThreadPriority via |priority|).
+// Returns false otherwise, leaving |priority| untouched.
+bool GetCurrentThreadPriorityForPlatform(ThreadPriority* priority);
+
+}  // namespace internal
+
+}  // namespace base
+
+#endif  // BASE_THREADING_PLATFORM_THREAD_INTERNAL_POSIX_H_
diff --git a/base/threading/platform_thread_linux.cc b/base/threading/platform_thread_linux.cc
new file mode 100644
index 0000000..190aced
--- /dev/null
+++ b/base/threading/platform_thread_linux.cc
@@ -0,0 +1,184 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/threading/platform_thread.h"
+
+#include <errno.h>
+#include <sched.h>
+#include <stddef.h>
+
+#include "base/files/file_util.h"
+#include "base/lazy_instance.h"
+#include "base/logging.h"
+#include "base/strings/string_number_conversions.h"
+#include "base/threading/platform_thread_internal_posix.h"
+#include "base/threading/thread_id_name_manager.h"
+#include "build/build_config.h"
+
+#if !defined(OS_NACL) && !defined(OS_AIX)
+#include <pthread.h>
+#include <sys/prctl.h>
+#include <sys/resource.h>
+#include <sys/time.h>
+#include <sys/types.h>
+#include <unistd.h>
+#endif
+
+namespace base {
+namespace {
+#if !defined(OS_NACL)
+const FilePath::CharType kCgroupDirectory[] =
+    FILE_PATH_LITERAL("/sys/fs/cgroup");
+
+FilePath ThreadPriorityToCgroupDirectory(const FilePath& cgroup_filepath,
+                                         ThreadPriority priority) {
+  switch (priority) {
+    case ThreadPriority::NORMAL:
+      return cgroup_filepath;
+    case ThreadPriority::BACKGROUND:
+      return cgroup_filepath.Append(FILE_PATH_LITERAL("non-urgent"));
+    case ThreadPriority::DISPLAY:
+    case ThreadPriority::REALTIME_AUDIO:
+      return cgroup_filepath.Append(FILE_PATH_LITERAL("urgent"));
+  }
+  NOTREACHED();
+  return FilePath();
+}
+
+void SetThreadCgroup(PlatformThreadId thread_id,
+                     const FilePath& cgroup_directory) {
+  FilePath tasks_filepath = cgroup_directory.Append(FILE_PATH_LITERAL("tasks"));
+  std::string tid = IntToString(thread_id);
+  int bytes_written = WriteFile(tasks_filepath, tid.c_str(), tid.size());
+  if (bytes_written != static_cast<int>(tid.size())) {
+    DVLOG(1) << "Failed to add " << tid << " to " << tasks_filepath.value();
+  }
+}
+
+void SetThreadCgroupForThreadPriority(PlatformThreadId thread_id,
+                                      const FilePath& cgroup_filepath,
+                                      ThreadPriority priority) {
+  // Append "chrome" suffix.
+  FilePath cgroup_directory = ThreadPriorityToCgroupDirectory(
+      cgroup_filepath.Append(FILE_PATH_LITERAL("chrome")), priority);
+
+  // Silently ignore request if cgroup directory doesn't exist.
+  if (!DirectoryExists(cgroup_directory))
+    return;
+
+  SetThreadCgroup(thread_id, cgroup_directory);
+}
+
+void SetThreadCgroupsForThreadPriority(PlatformThreadId thread_id,
+                                       ThreadPriority priority) {
+  FilePath cgroup_filepath(kCgroupDirectory);
+  SetThreadCgroupForThreadPriority(
+      thread_id, cgroup_filepath.Append(FILE_PATH_LITERAL("cpuset")), priority);
+  SetThreadCgroupForThreadPriority(
+      thread_id, cgroup_filepath.Append(FILE_PATH_LITERAL("schedtune")),
+      priority);
+}
+#endif
+}  // namespace
+
+namespace internal {
+
+namespace {
+#if !defined(OS_NACL)
+const struct sched_param kRealTimePrio = {8};
+#endif
+}  // namespace
+
+const ThreadPriorityToNiceValuePair kThreadPriorityToNiceValueMap[4] = {
+    {ThreadPriority::BACKGROUND, 10},
+    {ThreadPriority::NORMAL, 0},
+    {ThreadPriority::DISPLAY, -8},
+    {ThreadPriority::REALTIME_AUDIO, -10},
+};
+
+bool SetCurrentThreadPriorityForPlatform(ThreadPriority priority) {
+#if !defined(OS_NACL)
+  SetThreadCgroupsForThreadPriority(PlatformThread::CurrentId(), priority);
+  return priority == ThreadPriority::REALTIME_AUDIO &&
+         pthread_setschedparam(pthread_self(), SCHED_RR, &kRealTimePrio) == 0;
+#else
+  return false;
+#endif
+}
+
+bool GetCurrentThreadPriorityForPlatform(ThreadPriority* priority) {
+#if !defined(OS_NACL)
+  int maybe_sched_rr = 0;
+  struct sched_param maybe_realtime_prio = {0};
+  if (pthread_getschedparam(pthread_self(), &maybe_sched_rr,
+                            &maybe_realtime_prio) == 0 &&
+      maybe_sched_rr == SCHED_RR &&
+      maybe_realtime_prio.sched_priority == kRealTimePrio.sched_priority) {
+    *priority = ThreadPriority::REALTIME_AUDIO;
+    return true;
+  }
+#endif
+  return false;
+}
+
+}  // namespace internal
+
+// static
+void PlatformThread::SetName(const std::string& name) {
+  ThreadIdNameManager::GetInstance()->SetName(name);
+
+#if !defined(OS_NACL) && !defined(OS_AIX)
+  // On linux we can get the thread names to show up in the debugger by setting
+  // the process name for the LWP.  We don't want to do this for the main
+  // thread because that would rename the process, causing tools like killall
+  // to stop working.
+  if (PlatformThread::CurrentId() == getpid())
+    return;
+
+  // http://0pointer.de/blog/projects/name-your-threads.html
+  // Set the name for the LWP (which gets truncated to 15 characters).
+  // Note that glibc also has a 'pthread_setname_np' api, but it may not be
+  // available everywhere and it's only benefit over using prctl directly is
+  // that it can set the name of threads other than the current thread.
+  int err = prctl(PR_SET_NAME, name.c_str());
+  // We expect EPERM failures in sandboxed processes, just ignore those.
+  if (err < 0 && errno != EPERM)
+    DPLOG(ERROR) << "prctl(PR_SET_NAME)";
+#endif  //  !defined(OS_NACL) && !defined(OS_AIX)
+}
+
+#if !defined(OS_NACL) && !defined(OS_AIX)
+// static
+void PlatformThread::SetThreadPriority(PlatformThreadId thread_id,
+                                       ThreadPriority priority) {
+  // Changing current main threads' priority is not permitted in favor of
+  // security, this interface is restricted to change only non-main thread
+  // priority.
+  CHECK_NE(thread_id, getpid());
+
+  SetThreadCgroupsForThreadPriority(thread_id, priority);
+
+  const int nice_setting = internal::ThreadPriorityToNiceValue(priority);
+  if (setpriority(PRIO_PROCESS, thread_id, nice_setting)) {
+    DVPLOG(1) << "Failed to set nice value of thread (" << thread_id << ") to "
+              << nice_setting;
+  }
+}
+#endif  //  !defined(OS_NACL) && !defined(OS_AIX)
+
+void InitThreading() {}
+
+void TerminateOnThread() {}
+
+size_t GetDefaultThreadStackSize(const pthread_attr_t& attributes) {
+#if !defined(THREAD_SANITIZER)
+  return 0;
+#else
+  // ThreadSanitizer bloats the stack heavily. Evidence has been that the
+  // default stack size isn't enough for some browser tests.
+  return 2 * (1 << 23);  // 2 times 8192K (the default stack size on Linux).
+#endif
+}
+
+}  // namespace base
diff --git a/base/threading/platform_thread_mac.mm b/base/threading/platform_thread_mac.mm
new file mode 100644
index 0000000..39d979d
--- /dev/null
+++ b/base/threading/platform_thread_mac.mm
@@ -0,0 +1,240 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/threading/platform_thread.h"
+
+#import <Foundation/Foundation.h>
+#include <mach/mach.h>
+#include <mach/mach_time.h>
+#include <mach/thread_policy.h>
+#include <stddef.h>
+#include <sys/resource.h>
+
+#include <algorithm>
+
+#include "base/lazy_instance.h"
+#include "base/logging.h"
+#include "base/mac/foundation_util.h"
+#include "base/mac/mach_logging.h"
+#include "base/threading/thread_id_name_manager.h"
+#include "build/build_config.h"
+
+namespace base {
+
+namespace {
+NSString* const kThreadPriorityKey = @"CrThreadPriorityKey";
+}  // namespace
+
+// If Cocoa is to be used on more than one thread, it must know that the
+// application is multithreaded.  Since it's possible to enter Cocoa code
+// from threads created by pthread_thread_create, Cocoa won't necessarily
+// be aware that the application is multithreaded.  Spawning an NSThread is
+// enough to get Cocoa to set up for multithreaded operation, so this is done
+// if necessary before pthread_thread_create spawns any threads.
+//
+// http://developer.apple.com/documentation/Cocoa/Conceptual/Multithreading/CreatingThreads/chapter_4_section_4.html
+void InitThreading() {
+  static BOOL multithreaded = [NSThread isMultiThreaded];
+  if (!multithreaded) {
+    // +[NSObject class] is idempotent.
+    [NSThread detachNewThreadSelector:@selector(class)
+                             toTarget:[NSObject class]
+                           withObject:nil];
+    multithreaded = YES;
+
+    DCHECK([NSThread isMultiThreaded]);
+  }
+}
+
+// static
+void PlatformThread::SetName(const std::string& name) {
+  ThreadIdNameManager::GetInstance()->SetName(name);
+
+  // Mac OS X does not expose the length limit of the name, so
+  // hardcode it.
+  const int kMaxNameLength = 63;
+  std::string shortened_name = name.substr(0, kMaxNameLength);
+  // pthread_setname() fails (harmlessly) in the sandbox, ignore when it does.
+  // See http://crbug.com/47058
+  pthread_setname_np(shortened_name.c_str());
+}
+
+namespace {
+
+// Enables time-contraint policy and priority suitable for low-latency,
+// glitch-resistant audio.
+void SetPriorityRealtimeAudio() {
+  // Increase thread priority to real-time.
+
+  // Please note that the thread_policy_set() calls may fail in
+  // rare cases if the kernel decides the system is under heavy load
+  // and is unable to handle boosting the thread priority.
+  // In these cases we just return early and go on with life.
+
+  mach_port_t mach_thread_id =
+      pthread_mach_thread_np(PlatformThread::CurrentHandle().platform_handle());
+
+  // Make thread fixed priority.
+  thread_extended_policy_data_t policy;
+  policy.timeshare = 0;  // Set to 1 for a non-fixed thread.
+  kern_return_t result =
+      thread_policy_set(mach_thread_id,
+                        THREAD_EXTENDED_POLICY,
+                        reinterpret_cast<thread_policy_t>(&policy),
+                        THREAD_EXTENDED_POLICY_COUNT);
+  if (result != KERN_SUCCESS) {
+    MACH_DVLOG(1, result) << "thread_policy_set";
+    return;
+  }
+
+  // Set to relatively high priority.
+  thread_precedence_policy_data_t precedence;
+  precedence.importance = 63;
+  result = thread_policy_set(mach_thread_id,
+                             THREAD_PRECEDENCE_POLICY,
+                             reinterpret_cast<thread_policy_t>(&precedence),
+                             THREAD_PRECEDENCE_POLICY_COUNT);
+  if (result != KERN_SUCCESS) {
+    MACH_DVLOG(1, result) << "thread_policy_set";
+    return;
+  }
+
+  // Most important, set real-time constraints.
+
+  // Define the guaranteed and max fraction of time for the audio thread.
+  // These "duty cycle" values can range from 0 to 1.  A value of 0.5
+  // means the scheduler would give half the time to the thread.
+  // These values have empirically been found to yield good behavior.
+  // Good means that audio performance is high and other threads won't starve.
+  const double kGuaranteedAudioDutyCycle = 0.75;
+  const double kMaxAudioDutyCycle = 0.85;
+
+  // Define constants determining how much time the audio thread can
+  // use in a given time quantum.  All times are in milliseconds.
+
+  // About 128 frames @44.1KHz
+  const double kTimeQuantum = 2.9;
+
+  // Time guaranteed each quantum.
+  const double kAudioTimeNeeded = kGuaranteedAudioDutyCycle * kTimeQuantum;
+
+  // Maximum time each quantum.
+  const double kMaxTimeAllowed = kMaxAudioDutyCycle * kTimeQuantum;
+
+  // Get the conversion factor from milliseconds to absolute time
+  // which is what the time-constraints call needs.
+  mach_timebase_info_data_t tb_info;
+  mach_timebase_info(&tb_info);
+  double ms_to_abs_time =
+      (static_cast<double>(tb_info.denom) / tb_info.numer) * 1000000;
+
+  thread_time_constraint_policy_data_t time_constraints;
+  time_constraints.period = kTimeQuantum * ms_to_abs_time;
+  time_constraints.computation = kAudioTimeNeeded * ms_to_abs_time;
+  time_constraints.constraint = kMaxTimeAllowed * ms_to_abs_time;
+  time_constraints.preemptible = 0;
+
+  result =
+      thread_policy_set(mach_thread_id,
+                        THREAD_TIME_CONSTRAINT_POLICY,
+                        reinterpret_cast<thread_policy_t>(&time_constraints),
+                        THREAD_TIME_CONSTRAINT_POLICY_COUNT);
+  MACH_DVLOG_IF(1, result != KERN_SUCCESS, result) << "thread_policy_set";
+
+  return;
+}
+
+}  // anonymous namespace
+
+// static
+bool PlatformThread::CanIncreaseCurrentThreadPriority() {
+  return true;
+}
+
+// static
+void PlatformThread::SetCurrentThreadPriority(ThreadPriority priority) {
+  // Changing the priority of the main thread causes performance regressions.
+  // https://crbug.com/601270
+  DCHECK(![[NSThread currentThread] isMainThread]);
+
+  switch (priority) {
+    case ThreadPriority::BACKGROUND:
+      [[NSThread currentThread] setThreadPriority:0];
+      break;
+    case ThreadPriority::NORMAL:
+    case ThreadPriority::DISPLAY:
+      [[NSThread currentThread] setThreadPriority:0.5];
+      break;
+    case ThreadPriority::REALTIME_AUDIO:
+      SetPriorityRealtimeAudio();
+      DCHECK_EQ([[NSThread currentThread] threadPriority], 1.0);
+      break;
+  }
+
+  [[[NSThread currentThread] threadDictionary]
+      setObject:@(static_cast<int>(priority))
+         forKey:kThreadPriorityKey];
+}
+
+// static
+ThreadPriority PlatformThread::GetCurrentThreadPriority() {
+  NSNumber* priority = base::mac::ObjCCast<NSNumber>([[[NSThread currentThread]
+      threadDictionary] objectForKey:kThreadPriorityKey]);
+
+  if (!priority)
+    return ThreadPriority::NORMAL;
+
+  ThreadPriority thread_priority =
+      static_cast<ThreadPriority>(priority.intValue);
+  switch (thread_priority) {
+    case ThreadPriority::BACKGROUND:
+    case ThreadPriority::NORMAL:
+    case ThreadPriority::DISPLAY:
+    case ThreadPriority::REALTIME_AUDIO:
+      return thread_priority;
+    default:
+      NOTREACHED() << "Unknown priority.";
+      return ThreadPriority::NORMAL;
+  }
+}
+
+size_t GetDefaultThreadStackSize(const pthread_attr_t& attributes) {
+#if defined(OS_IOS)
+  return 0;
+#else
+  // The Mac OS X default for a pthread stack size is 512kB.
+  // Libc-594.1.4/pthreads/pthread.c's pthread_attr_init uses
+  // DEFAULT_STACK_SIZE for this purpose.
+  //
+  // 512kB isn't quite generous enough for some deeply recursive threads that
+  // otherwise request the default stack size by specifying 0. Here, adopt
+  // glibc's behavior as on Linux, which is to use the current stack size
+  // limit (ulimit -s) as the default stack size. See
+  // glibc-2.11.1/nptl/nptl-init.c's __pthread_initialize_minimal_internal. To
+  // avoid setting the limit below the Mac OS X default or the minimum usable
+  // stack size, these values are also considered. If any of these values
+  // can't be determined, or if stack size is unlimited (ulimit -s unlimited),
+  // stack_size is left at 0 to get the system default.
+  //
+  // Mac OS X normally only applies ulimit -s to the main thread stack. On
+  // contemporary OS X and Linux systems alike, this value is generally 8MB
+  // or in that neighborhood.
+  size_t default_stack_size = 0;
+  struct rlimit stack_rlimit;
+  if (pthread_attr_getstacksize(&attributes, &default_stack_size) == 0 &&
+      getrlimit(RLIMIT_STACK, &stack_rlimit) == 0 &&
+      stack_rlimit.rlim_cur != RLIM_INFINITY) {
+    default_stack_size =
+        std::max(std::max(default_stack_size,
+                          static_cast<size_t>(PTHREAD_STACK_MIN)),
+                 static_cast<size_t>(stack_rlimit.rlim_cur));
+  }
+  return default_stack_size;
+#endif
+}
+
+void TerminateOnThread() {
+}
+
+}  // namespace base
diff --git a/base/threading/platform_thread_posix.cc b/base/threading/platform_thread_posix.cc
new file mode 100644
index 0000000..2466b78
--- /dev/null
+++ b/base/threading/platform_thread_posix.cc
@@ -0,0 +1,305 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/threading/platform_thread.h"
+
+#include <errno.h>
+#include <pthread.h>
+#include <sched.h>
+#include <stddef.h>
+#include <stdint.h>
+#include <sys/time.h>
+#include <sys/types.h>
+#include <unistd.h>
+
+#include <memory>
+
+#include "base/debug/activity_tracker.h"
+#include "base/lazy_instance.h"
+#include "base/logging.h"
+#include "base/threading/platform_thread_internal_posix.h"
+#include "base/threading/thread_id_name_manager.h"
+#include "base/threading/thread_restrictions.h"
+#include "build/build_config.h"
+
+#if defined(OS_LINUX)
+#include <sys/syscall.h>
+#endif
+
+#if defined(OS_FUCHSIA)
+#include <zircon/process.h>
+#else
+#include <sys/resource.h>
+#endif
+
+namespace base {
+
+void InitThreading();
+void TerminateOnThread();
+size_t GetDefaultThreadStackSize(const pthread_attr_t& attributes);
+
+namespace {
+
+struct ThreadParams {
+  ThreadParams()
+      : delegate(nullptr), joinable(false), priority(ThreadPriority::NORMAL) {}
+
+  PlatformThread::Delegate* delegate;
+  bool joinable;
+  ThreadPriority priority;
+};
+
+void* ThreadFunc(void* params) {
+  PlatformThread::Delegate* delegate = nullptr;
+
+  {
+    std::unique_ptr<ThreadParams> thread_params(
+        static_cast<ThreadParams*>(params));
+
+    delegate = thread_params->delegate;
+    if (!thread_params->joinable)
+      base::ThreadRestrictions::SetSingletonAllowed(false);
+
+#if !defined(OS_NACL)
+    // Threads on linux/android may inherit their priority from the thread
+    // where they were created. This explicitly sets the priority of all new
+    // threads.
+    PlatformThread::SetCurrentThreadPriority(thread_params->priority);
+#endif
+  }
+
+  ThreadIdNameManager::GetInstance()->RegisterThread(
+      PlatformThread::CurrentHandle().platform_handle(),
+      PlatformThread::CurrentId());
+
+  delegate->ThreadMain();
+
+  ThreadIdNameManager::GetInstance()->RemoveName(
+      PlatformThread::CurrentHandle().platform_handle(),
+      PlatformThread::CurrentId());
+
+  base::TerminateOnThread();
+  return nullptr;
+}
+
+bool CreateThread(size_t stack_size,
+                  bool joinable,
+                  PlatformThread::Delegate* delegate,
+                  PlatformThreadHandle* thread_handle,
+                  ThreadPriority priority) {
+  DCHECK(thread_handle);
+  base::InitThreading();
+
+  pthread_attr_t attributes;
+  pthread_attr_init(&attributes);
+
+  // Pthreads are joinable by default, so only specify the detached
+  // attribute if the thread should be non-joinable.
+  if (!joinable)
+    pthread_attr_setdetachstate(&attributes, PTHREAD_CREATE_DETACHED);
+
+  // Get a better default if available.
+  if (stack_size == 0)
+    stack_size = base::GetDefaultThreadStackSize(attributes);
+
+  if (stack_size > 0)
+    pthread_attr_setstacksize(&attributes, stack_size);
+
+  std::unique_ptr<ThreadParams> params(new ThreadParams);
+  params->delegate = delegate;
+  params->joinable = joinable;
+  params->priority = priority;
+
+  pthread_t handle;
+  int err = pthread_create(&handle, &attributes, ThreadFunc, params.get());
+  bool success = !err;
+  if (success) {
+    // ThreadParams should be deleted on the created thread after used.
+    ignore_result(params.release());
+  } else {
+    // Value of |handle| is undefined if pthread_create fails.
+    handle = 0;
+    errno = err;
+    PLOG(ERROR) << "pthread_create";
+  }
+  *thread_handle = PlatformThreadHandle(handle);
+
+  pthread_attr_destroy(&attributes);
+
+  return success;
+}
+
+}  // namespace
+
+// static
+PlatformThreadId PlatformThread::CurrentId() {
+  // Pthreads doesn't have the concept of a thread ID, so we have to reach down
+  // into the kernel.
+#if defined(OS_MACOSX)
+  return pthread_mach_thread_np(pthread_self());
+#elif defined(OS_LINUX)
+  return syscall(__NR_gettid);
+#elif defined(OS_ANDROID)
+  return gettid();
+#elif defined(OS_FUCHSIA)
+  return zx_thread_self();
+#elif defined(OS_SOLARIS) || defined(OS_QNX)
+  return pthread_self();
+#elif defined(OS_NACL) && defined(__GLIBC__)
+  return pthread_self();
+#elif defined(OS_NACL) && !defined(__GLIBC__)
+  // Pointers are 32-bits in NaCl.
+  return reinterpret_cast<int32_t>(pthread_self());
+#elif defined(OS_POSIX) && defined(OS_AIX)
+  return pthread_self();
+#elif defined(OS_POSIX) && !defined(OS_AIX)
+  return reinterpret_cast<int64_t>(pthread_self());
+#endif
+}
+
+// static
+PlatformThreadRef PlatformThread::CurrentRef() {
+  return PlatformThreadRef(pthread_self());
+}
+
+// static
+PlatformThreadHandle PlatformThread::CurrentHandle() {
+  return PlatformThreadHandle(pthread_self());
+}
+
+// static
+void PlatformThread::YieldCurrentThread() {
+  sched_yield();
+}
+
+// static
+void PlatformThread::Sleep(TimeDelta duration) {
+  struct timespec sleep_time, remaining;
+
+  // Break the duration into seconds and nanoseconds.
+  // NOTE: TimeDelta's microseconds are int64s while timespec's
+  // nanoseconds are longs, so this unpacking must prevent overflow.
+  sleep_time.tv_sec = duration.InSeconds();
+  duration -= TimeDelta::FromSeconds(sleep_time.tv_sec);
+  sleep_time.tv_nsec = duration.InMicroseconds() * 1000;  // nanoseconds
+
+  while (nanosleep(&sleep_time, &remaining) == -1 && errno == EINTR)
+    sleep_time = remaining;
+}
+
+// static
+const char* PlatformThread::GetName() {
+  return ThreadIdNameManager::GetInstance()->GetName(CurrentId());
+}
+
+// static
+bool PlatformThread::CreateWithPriority(size_t stack_size, Delegate* delegate,
+                                        PlatformThreadHandle* thread_handle,
+                                        ThreadPriority priority) {
+  return CreateThread(stack_size, true /* joinable thread */, delegate,
+                      thread_handle, priority);
+}
+
+// static
+bool PlatformThread::CreateNonJoinable(size_t stack_size, Delegate* delegate) {
+  return CreateNonJoinableWithPriority(stack_size, delegate,
+                                       ThreadPriority::NORMAL);
+}
+
+// static
+bool PlatformThread::CreateNonJoinableWithPriority(size_t stack_size,
+                                                   Delegate* delegate,
+                                                   ThreadPriority priority) {
+  PlatformThreadHandle unused;
+
+  bool result = CreateThread(stack_size, false /* non-joinable thread */,
+                             delegate, &unused, priority);
+  return result;
+}
+
+// static
+void PlatformThread::Join(PlatformThreadHandle thread_handle) {
+  // Record the event that this thread is blocking upon (for hang diagnosis).
+  base::debug::ScopedThreadJoinActivity thread_activity(&thread_handle);
+
+  // Joining another thread may block the current thread for a long time, since
+  // the thread referred to by |thread_handle| may still be running long-lived /
+  // blocking tasks.
+  AssertBlockingAllowed();
+  CHECK_EQ(0, pthread_join(thread_handle.platform_handle(), nullptr));
+}
+
+// static
+void PlatformThread::Detach(PlatformThreadHandle thread_handle) {
+  CHECK_EQ(0, pthread_detach(thread_handle.platform_handle()));
+}
+
+// Mac and Fuchsia have their own Set/GetCurrentThreadPriority()
+// implementations.
+#if !defined(OS_MACOSX) && !defined(OS_FUCHSIA)
+
+// static
+bool PlatformThread::CanIncreaseCurrentThreadPriority() {
+#if defined(OS_NACL)
+  return false;
+#else
+  // Only root can raise thread priority on POSIX environment. On Linux, users
+  // who have CAP_SYS_NICE permission also can raise the thread priority, but
+  // libcap.so would be needed to check the capability.
+  return geteuid() == 0;
+#endif  // defined(OS_NACL)
+}
+
+// static
+void PlatformThread::SetCurrentThreadPriority(ThreadPriority priority) {
+#if defined(OS_NACL)
+  NOTIMPLEMENTED();
+#else
+  if (internal::SetCurrentThreadPriorityForPlatform(priority))
+    return;
+
+  // setpriority(2) should change the whole thread group's (i.e. process)
+  // priority. However, as stated in the bugs section of
+  // http://man7.org/linux/man-pages/man2/getpriority.2.html: "under the current
+  // Linux/NPTL implementation of POSIX threads, the nice value is a per-thread
+  // attribute". Also, 0 is prefered to the current thread id since it is
+  // equivalent but makes sandboxing easier (https://crbug.com/399473).
+  const int nice_setting = internal::ThreadPriorityToNiceValue(priority);
+  if (setpriority(PRIO_PROCESS, 0, nice_setting)) {
+    DVPLOG(1) << "Failed to set nice value of thread ("
+              << PlatformThread::CurrentId() << ") to " << nice_setting;
+  }
+#endif  // defined(OS_NACL)
+}
+
+// static
+ThreadPriority PlatformThread::GetCurrentThreadPriority() {
+#if defined(OS_NACL)
+  NOTIMPLEMENTED();
+  return ThreadPriority::NORMAL;
+#else
+  // Mirrors SetCurrentThreadPriority()'s implementation.
+  ThreadPriority platform_specific_priority;
+  if (internal::GetCurrentThreadPriorityForPlatform(
+          &platform_specific_priority)) {
+    return platform_specific_priority;
+  }
+
+  // Need to clear errno before calling getpriority():
+  // http://man7.org/linux/man-pages/man2/getpriority.2.html
+  errno = 0;
+  int nice_value = getpriority(PRIO_PROCESS, 0);
+  if (errno != 0) {
+    DVPLOG(1) << "Failed to get nice value of thread ("
+              << PlatformThread::CurrentId() << ")";
+    return ThreadPriority::NORMAL;
+  }
+
+  return internal::NiceValueToThreadPriority(nice_value);
+#endif  // !defined(OS_NACL)
+}
+
+#endif  // !defined(OS_MACOSX) && !defined(OS_FUCHSIA)
+
+}  // namespace base
diff --git a/base/threading/platform_thread_unittest.cc b/base/threading/platform_thread_unittest.cc
new file mode 100644
index 0000000..7eea22e
--- /dev/null
+++ b/base/threading/platform_thread_unittest.cc
@@ -0,0 +1,367 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <stddef.h>
+
+#include "base/compiler_specific.h"
+#include "base/macros.h"
+#include "base/synchronization/waitable_event.h"
+#include "base/threading/platform_thread.h"
+#include "build/build_config.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+#if defined(OS_POSIX)
+#include "base/threading/platform_thread_internal_posix.h"
+#elif defined(OS_WIN)
+#include <windows.h>
+#endif
+
+namespace base {
+
+// Trivial tests that thread runs and doesn't crash on create, join, or detach -
+
+namespace {
+
+class TrivialThread : public PlatformThread::Delegate {
+ public:
+  TrivialThread() : run_event_(WaitableEvent::ResetPolicy::MANUAL,
+                               WaitableEvent::InitialState::NOT_SIGNALED) {}
+
+  void ThreadMain() override { run_event_.Signal(); }
+
+  WaitableEvent& run_event() { return run_event_; }
+
+ private:
+  WaitableEvent run_event_;
+
+  DISALLOW_COPY_AND_ASSIGN(TrivialThread);
+};
+
+}  // namespace
+
+TEST(PlatformThreadTest, TrivialJoin) {
+  TrivialThread thread;
+  PlatformThreadHandle handle;
+
+  ASSERT_FALSE(thread.run_event().IsSignaled());
+  ASSERT_TRUE(PlatformThread::Create(0, &thread, &handle));
+  PlatformThread::Join(handle);
+  ASSERT_TRUE(thread.run_event().IsSignaled());
+}
+
+TEST(PlatformThreadTest, TrivialJoinTimesTen) {
+  TrivialThread thread[10];
+  PlatformThreadHandle handle[arraysize(thread)];
+
+  for (size_t n = 0; n < arraysize(thread); n++)
+    ASSERT_FALSE(thread[n].run_event().IsSignaled());
+  for (size_t n = 0; n < arraysize(thread); n++)
+    ASSERT_TRUE(PlatformThread::Create(0, &thread[n], &handle[n]));
+  for (size_t n = 0; n < arraysize(thread); n++)
+    PlatformThread::Join(handle[n]);
+  for (size_t n = 0; n < arraysize(thread); n++)
+    ASSERT_TRUE(thread[n].run_event().IsSignaled());
+}
+
+// The following detach tests are by nature racy. The run_event approximates the
+// end and termination of the thread, but threads could persist shortly after
+// the test completes.
+TEST(PlatformThreadTest, TrivialDetach) {
+  TrivialThread thread;
+  PlatformThreadHandle handle;
+
+  ASSERT_FALSE(thread.run_event().IsSignaled());
+  ASSERT_TRUE(PlatformThread::Create(0, &thread, &handle));
+  PlatformThread::Detach(handle);
+  thread.run_event().Wait();
+}
+
+TEST(PlatformThreadTest, TrivialDetachTimesTen) {
+  TrivialThread thread[10];
+  PlatformThreadHandle handle[arraysize(thread)];
+
+  for (size_t n = 0; n < arraysize(thread); n++)
+    ASSERT_FALSE(thread[n].run_event().IsSignaled());
+  for (size_t n = 0; n < arraysize(thread); n++) {
+    ASSERT_TRUE(PlatformThread::Create(0, &thread[n], &handle[n]));
+    PlatformThread::Detach(handle[n]);
+  }
+  for (size_t n = 0; n < arraysize(thread); n++)
+    thread[n].run_event().Wait();
+}
+
+// Tests of basic thread functions ---------------------------------------------
+
+namespace {
+
+class FunctionTestThread : public PlatformThread::Delegate {
+ public:
+  FunctionTestThread()
+      : thread_id_(kInvalidThreadId),
+        termination_ready_(WaitableEvent::ResetPolicy::MANUAL,
+                           WaitableEvent::InitialState::NOT_SIGNALED),
+        terminate_thread_(WaitableEvent::ResetPolicy::MANUAL,
+                          WaitableEvent::InitialState::NOT_SIGNALED),
+        done_(false) {}
+  ~FunctionTestThread() override {
+    EXPECT_TRUE(terminate_thread_.IsSignaled())
+        << "Need to mark thread for termination and join the underlying thread "
+        << "before destroying a FunctionTestThread as it owns the "
+        << "WaitableEvent blocking the underlying thread's main.";
+  }
+
+  // Grabs |thread_id_|, runs an optional test on that thread, signals
+  // |termination_ready_|, and then waits for |terminate_thread_| to be
+  // signaled before exiting.
+  void ThreadMain() override {
+    thread_id_ = PlatformThread::CurrentId();
+    EXPECT_NE(thread_id_, kInvalidThreadId);
+
+    // Make sure that the thread ID is the same across calls.
+    EXPECT_EQ(thread_id_, PlatformThread::CurrentId());
+
+    // Run extra tests.
+    RunTest();
+
+    termination_ready_.Signal();
+    terminate_thread_.Wait();
+
+    done_ = true;
+  }
+
+  PlatformThreadId thread_id() const {
+    EXPECT_TRUE(termination_ready_.IsSignaled()) << "Thread ID still unknown";
+    return thread_id_;
+  }
+
+  bool IsRunning() const {
+    return termination_ready_.IsSignaled() && !done_;
+  }
+
+  // Blocks until this thread is started and ready to be terminated.
+  void WaitForTerminationReady() { termination_ready_.Wait(); }
+
+  // Marks this thread for termination (callers must then join this thread to be
+  // guaranteed of termination).
+  void MarkForTermination() { terminate_thread_.Signal(); }
+
+ private:
+  // Runs an optional test on the newly created thread.
+  virtual void RunTest() {}
+
+  PlatformThreadId thread_id_;
+
+  mutable WaitableEvent termination_ready_;
+  WaitableEvent terminate_thread_;
+  bool done_;
+
+  DISALLOW_COPY_AND_ASSIGN(FunctionTestThread);
+};
+
+}  // namespace
+
+TEST(PlatformThreadTest, Function) {
+  PlatformThreadId main_thread_id = PlatformThread::CurrentId();
+
+  FunctionTestThread thread;
+  PlatformThreadHandle handle;
+
+  ASSERT_FALSE(thread.IsRunning());
+  ASSERT_TRUE(PlatformThread::Create(0, &thread, &handle));
+  thread.WaitForTerminationReady();
+  ASSERT_TRUE(thread.IsRunning());
+  EXPECT_NE(thread.thread_id(), main_thread_id);
+
+  thread.MarkForTermination();
+  PlatformThread::Join(handle);
+  ASSERT_FALSE(thread.IsRunning());
+
+  // Make sure that the thread ID is the same across calls.
+  EXPECT_EQ(main_thread_id, PlatformThread::CurrentId());
+}
+
+TEST(PlatformThreadTest, FunctionTimesTen) {
+  PlatformThreadId main_thread_id = PlatformThread::CurrentId();
+
+  FunctionTestThread thread[10];
+  PlatformThreadHandle handle[arraysize(thread)];
+
+  for (size_t n = 0; n < arraysize(thread); n++)
+    ASSERT_FALSE(thread[n].IsRunning());
+
+  for (size_t n = 0; n < arraysize(thread); n++)
+    ASSERT_TRUE(PlatformThread::Create(0, &thread[n], &handle[n]));
+  for (size_t n = 0; n < arraysize(thread); n++)
+    thread[n].WaitForTerminationReady();
+
+  for (size_t n = 0; n < arraysize(thread); n++) {
+    ASSERT_TRUE(thread[n].IsRunning());
+    EXPECT_NE(thread[n].thread_id(), main_thread_id);
+
+    // Make sure no two threads get the same ID.
+    for (size_t i = 0; i < n; ++i) {
+      EXPECT_NE(thread[i].thread_id(), thread[n].thread_id());
+    }
+  }
+
+  for (size_t n = 0; n < arraysize(thread); n++)
+    thread[n].MarkForTermination();
+  for (size_t n = 0; n < arraysize(thread); n++)
+    PlatformThread::Join(handle[n]);
+  for (size_t n = 0; n < arraysize(thread); n++)
+    ASSERT_FALSE(thread[n].IsRunning());
+
+  // Make sure that the thread ID is the same across calls.
+  EXPECT_EQ(main_thread_id, PlatformThread::CurrentId());
+}
+
+namespace {
+
+const ThreadPriority kThreadPriorityTestValues[] = {
+// The order should be higher to lower to cover as much cases as possible on
+// Linux trybots running without CAP_SYS_NICE permission.
+#if !defined(OS_ANDROID)
+    // PlatformThread::GetCurrentThreadPriority() on Android does not support
+    // REALTIME_AUDIO case. See http://crbug.com/505474.
+    ThreadPriority::REALTIME_AUDIO,
+#endif
+    ThreadPriority::DISPLAY,
+    // This redundant BACKGROUND priority is to test backgrounding from other
+    // priorities, and unbackgrounding.
+    ThreadPriority::BACKGROUND,
+    ThreadPriority::NORMAL,
+    ThreadPriority::BACKGROUND};
+
+class ThreadPriorityTestThread : public FunctionTestThread {
+ public:
+  explicit ThreadPriorityTestThread(ThreadPriority priority)
+      : priority_(priority) {}
+  ~ThreadPriorityTestThread() override = default;
+
+ private:
+  void RunTest() override {
+    // Confirm that the current thread's priority is as expected.
+    EXPECT_EQ(ThreadPriority::NORMAL,
+              PlatformThread::GetCurrentThreadPriority());
+
+    // Alter and verify the current thread's priority.
+    PlatformThread::SetCurrentThreadPriority(priority_);
+    EXPECT_EQ(priority_, PlatformThread::GetCurrentThreadPriority());
+  }
+
+  const ThreadPriority priority_;
+
+  DISALLOW_COPY_AND_ASSIGN(ThreadPriorityTestThread);
+};
+
+}  // namespace
+
+// Test changing a created thread's priority (which has different semantics on
+// some platforms).
+TEST(PlatformThreadTest, ThreadPriorityCurrentThread) {
+  const bool increase_priority_allowed =
+      PlatformThread::CanIncreaseCurrentThreadPriority();
+
+// Bump the priority in order to verify that new threads are started with normal
+// priority. Skip this on Mac since this platform doesn't allow changing the
+// priority of the main thread. Also skip this on platforms that don't allow
+// increasing the priority of a thread.
+#if !defined(OS_MACOSX)
+  if (increase_priority_allowed)
+    PlatformThread::SetCurrentThreadPriority(ThreadPriority::DISPLAY);
+#endif
+
+  // Toggle each supported priority on the thread and confirm it affects it.
+  for (size_t i = 0; i < arraysize(kThreadPriorityTestValues); ++i) {
+    if (!increase_priority_allowed &&
+        kThreadPriorityTestValues[i] >
+            PlatformThread::GetCurrentThreadPriority()) {
+      continue;
+    }
+
+    ThreadPriorityTestThread thread(kThreadPriorityTestValues[i]);
+    PlatformThreadHandle handle;
+
+    ASSERT_FALSE(thread.IsRunning());
+    ASSERT_TRUE(PlatformThread::Create(0, &thread, &handle));
+    thread.WaitForTerminationReady();
+    ASSERT_TRUE(thread.IsRunning());
+
+    thread.MarkForTermination();
+    PlatformThread::Join(handle);
+    ASSERT_FALSE(thread.IsRunning());
+  }
+}
+
+// This tests internal PlatformThread APIs used under some POSIX platforms,
+// with the exception of Mac OS X, iOS and Fuchsia.
+#if defined(OS_POSIX) && !defined(OS_MACOSX) && !defined(OS_IOS) && \
+    !defined(OS_FUCHSIA)
+TEST(PlatformThreadTest, GetNiceValueToThreadPriority) {
+  using internal::NiceValueToThreadPriority;
+  using internal::kThreadPriorityToNiceValueMap;
+
+  EXPECT_EQ(ThreadPriority::BACKGROUND,
+            kThreadPriorityToNiceValueMap[0].priority);
+  EXPECT_EQ(ThreadPriority::NORMAL,
+            kThreadPriorityToNiceValueMap[1].priority);
+  EXPECT_EQ(ThreadPriority::DISPLAY,
+            kThreadPriorityToNiceValueMap[2].priority);
+  EXPECT_EQ(ThreadPriority::REALTIME_AUDIO,
+            kThreadPriorityToNiceValueMap[3].priority);
+
+  static const int kBackgroundNiceValue =
+      kThreadPriorityToNiceValueMap[0].nice_value;
+  static const int kNormalNiceValue =
+      kThreadPriorityToNiceValueMap[1].nice_value;
+  static const int kDisplayNiceValue =
+      kThreadPriorityToNiceValueMap[2].nice_value;
+  static const int kRealtimeAudioNiceValue =
+      kThreadPriorityToNiceValueMap[3].nice_value;
+
+  // The tests below assume the nice values specified in the map are within
+  // the range below (both ends exclusive).
+  static const int kHighestNiceValue = 19;
+  static const int kLowestNiceValue = -20;
+
+  EXPECT_GT(kHighestNiceValue, kBackgroundNiceValue);
+  EXPECT_GT(kBackgroundNiceValue, kNormalNiceValue);
+  EXPECT_GT(kNormalNiceValue, kDisplayNiceValue);
+  EXPECT_GT(kDisplayNiceValue, kRealtimeAudioNiceValue);
+  EXPECT_GT(kRealtimeAudioNiceValue, kLowestNiceValue);
+
+  EXPECT_EQ(ThreadPriority::BACKGROUND,
+            NiceValueToThreadPriority(kHighestNiceValue));
+  EXPECT_EQ(ThreadPriority::BACKGROUND,
+            NiceValueToThreadPriority(kBackgroundNiceValue + 1));
+  EXPECT_EQ(ThreadPriority::BACKGROUND,
+            NiceValueToThreadPriority(kBackgroundNiceValue));
+  EXPECT_EQ(ThreadPriority::BACKGROUND,
+            NiceValueToThreadPriority(kNormalNiceValue + 1));
+  EXPECT_EQ(ThreadPriority::NORMAL,
+            NiceValueToThreadPriority(kNormalNiceValue));
+  EXPECT_EQ(ThreadPriority::NORMAL,
+            NiceValueToThreadPriority(kDisplayNiceValue + 1));
+  EXPECT_EQ(ThreadPriority::DISPLAY,
+            NiceValueToThreadPriority(kDisplayNiceValue));
+  EXPECT_EQ(ThreadPriority::DISPLAY,
+            NiceValueToThreadPriority(kRealtimeAudioNiceValue + 1));
+  EXPECT_EQ(ThreadPriority::REALTIME_AUDIO,
+            NiceValueToThreadPriority(kRealtimeAudioNiceValue));
+  EXPECT_EQ(ThreadPriority::REALTIME_AUDIO,
+            NiceValueToThreadPriority(kLowestNiceValue));
+}
+#endif  // defined(OS_POSIX) && !defined(OS_MACOSX) && !defined(OS_IOS) &&
+        // !defined(OS_FUCHSIA)
+
+TEST(PlatformThreadTest, SetHugeThreadName) {
+  // Construct an excessively long thread name.
+  std::string long_name(1024, 'a');
+
+  // SetName has no return code, so just verify that implementations
+  // don't [D]CHECK().
+  PlatformThread::SetName(long_name);
+}
+
+}  // namespace base
diff --git a/base/threading/platform_thread_win.cc b/base/threading/platform_thread_win.cc
new file mode 100644
index 0000000..daccc0e
--- /dev/null
+++ b/base/threading/platform_thread_win.cc
@@ -0,0 +1,317 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/threading/platform_thread.h"
+
+#include <stddef.h>
+
+#include "base/debug/activity_tracker.h"
+#include "base/debug/alias.h"
+#include "base/debug/profiler.h"
+#include "base/logging.h"
+#include "base/metrics/histogram_macros.h"
+#include "base/strings/utf_string_conversions.h"
+#include "base/threading/thread_id_name_manager.h"
+#include "base/threading/thread_restrictions.h"
+#include "base/win/scoped_handle.h"
+
+#include <windows.h>
+
+namespace base {
+
+namespace {
+
+// The information on how to set the thread name comes from
+// a MSDN article: http://msdn2.microsoft.com/en-us/library/xcb2z8hs.aspx
+const DWORD kVCThreadNameException = 0x406D1388;
+
+typedef struct tagTHREADNAME_INFO {
+  DWORD dwType;  // Must be 0x1000.
+  LPCSTR szName;  // Pointer to name (in user addr space).
+  DWORD dwThreadID;  // Thread ID (-1=caller thread).
+  DWORD dwFlags;  // Reserved for future use, must be zero.
+} THREADNAME_INFO;
+
+// The SetThreadDescription API was brought in version 1607 of Windows 10.
+typedef HRESULT(WINAPI* SetThreadDescription)(HANDLE hThread,
+                                              PCWSTR lpThreadDescription);
+
+// This function has try handling, so it is separated out of its caller.
+void SetNameInternal(PlatformThreadId thread_id, const char* name) {
+  THREADNAME_INFO info;
+  info.dwType = 0x1000;
+  info.szName = name;
+  info.dwThreadID = thread_id;
+  info.dwFlags = 0;
+
+  __try {
+    RaiseException(kVCThreadNameException, 0, sizeof(info)/sizeof(DWORD),
+                   reinterpret_cast<DWORD_PTR*>(&info));
+  } __except(EXCEPTION_CONTINUE_EXECUTION) {
+  }
+}
+
+struct ThreadParams {
+  PlatformThread::Delegate* delegate;
+  bool joinable;
+  ThreadPriority priority;
+};
+
+DWORD __stdcall ThreadFunc(void* params) {
+  ThreadParams* thread_params = static_cast<ThreadParams*>(params);
+  PlatformThread::Delegate* delegate = thread_params->delegate;
+  if (!thread_params->joinable)
+    base::ThreadRestrictions::SetSingletonAllowed(false);
+
+  if (thread_params->priority != ThreadPriority::NORMAL)
+    PlatformThread::SetCurrentThreadPriority(thread_params->priority);
+
+  // Retrieve a copy of the thread handle to use as the key in the
+  // thread name mapping.
+  PlatformThreadHandle::Handle platform_handle;
+  BOOL did_dup = DuplicateHandle(GetCurrentProcess(),
+                                GetCurrentThread(),
+                                GetCurrentProcess(),
+                                &platform_handle,
+                                0,
+                                FALSE,
+                                DUPLICATE_SAME_ACCESS);
+
+  win::ScopedHandle scoped_platform_handle;
+
+  if (did_dup) {
+    scoped_platform_handle.Set(platform_handle);
+    ThreadIdNameManager::GetInstance()->RegisterThread(
+        scoped_platform_handle.Get(),
+        PlatformThread::CurrentId());
+  }
+
+  delete thread_params;
+  delegate->ThreadMain();
+
+  if (did_dup) {
+    ThreadIdNameManager::GetInstance()->RemoveName(
+        scoped_platform_handle.Get(),
+        PlatformThread::CurrentId());
+  }
+
+  return 0;
+}
+
+// CreateThreadInternal() matches PlatformThread::CreateWithPriority(), except
+// that |out_thread_handle| may be nullptr, in which case a non-joinable thread
+// is created.
+bool CreateThreadInternal(size_t stack_size,
+                          PlatformThread::Delegate* delegate,
+                          PlatformThreadHandle* out_thread_handle,
+                          ThreadPriority priority) {
+  unsigned int flags = 0;
+  if (stack_size > 0) {
+    flags = STACK_SIZE_PARAM_IS_A_RESERVATION;
+  }
+
+  ThreadParams* params = new ThreadParams;
+  params->delegate = delegate;
+  params->joinable = out_thread_handle != nullptr;
+  params->priority = priority;
+
+  void* thread_handle;
+  {
+    SCOPED_UMA_HISTOGRAM_TIMER("Windows.CreateThreadTime");
+
+    // Using CreateThread here vs _beginthreadex makes thread creation a bit
+    // faster and doesn't require the loader lock to be available.  Our code
+    // will  have to work running on CreateThread() threads anyway, since we run
+    // code on the Windows thread pool, etc.  For some background on the
+    // difference:
+    //   http://www.microsoft.com/msj/1099/win32/win321099.aspx
+    thread_handle =
+        ::CreateThread(nullptr, stack_size, ThreadFunc, params, flags, nullptr);
+  }
+
+  if (!thread_handle) {
+    delete params;
+    return false;
+  }
+
+  if (out_thread_handle)
+    *out_thread_handle = PlatformThreadHandle(thread_handle);
+  else
+    CloseHandle(thread_handle);
+  return true;
+}
+
+}  // namespace
+
+// static
+PlatformThreadId PlatformThread::CurrentId() {
+  return ::GetCurrentThreadId();
+}
+
+// static
+PlatformThreadRef PlatformThread::CurrentRef() {
+  return PlatformThreadRef(::GetCurrentThreadId());
+}
+
+// static
+PlatformThreadHandle PlatformThread::CurrentHandle() {
+  return PlatformThreadHandle(::GetCurrentThread());
+}
+
+// static
+void PlatformThread::YieldCurrentThread() {
+  ::Sleep(0);
+}
+
+// static
+void PlatformThread::Sleep(TimeDelta duration) {
+  // When measured with a high resolution clock, Sleep() sometimes returns much
+  // too early. We may need to call it repeatedly to get the desired duration.
+  TimeTicks end = TimeTicks::Now() + duration;
+  for (TimeTicks now = TimeTicks::Now(); now < end; now = TimeTicks::Now())
+    ::Sleep(static_cast<DWORD>((end - now).InMillisecondsRoundedUp()));
+}
+
+// static
+void PlatformThread::SetName(const std::string& name) {
+  ThreadIdNameManager::GetInstance()->SetName(name);
+
+  // The SetThreadDescription API works even if no debugger is attached.
+  auto set_thread_description_func =
+      reinterpret_cast<SetThreadDescription>(::GetProcAddress(
+          ::GetModuleHandle(L"Kernel32.dll"), "SetThreadDescription"));
+  if (set_thread_description_func) {
+    set_thread_description_func(::GetCurrentThread(),
+                                base::UTF8ToWide(name).c_str());
+  }
+
+  // The debugger needs to be around to catch the name in the exception.  If
+  // there isn't a debugger, we are just needlessly throwing an exception.
+  if (!::IsDebuggerPresent())
+    return;
+
+  SetNameInternal(CurrentId(), name.c_str());
+}
+
+// static
+const char* PlatformThread::GetName() {
+  return ThreadIdNameManager::GetInstance()->GetName(CurrentId());
+}
+
+// static
+bool PlatformThread::CreateWithPriority(size_t stack_size, Delegate* delegate,
+                                        PlatformThreadHandle* thread_handle,
+                                        ThreadPriority priority) {
+  DCHECK(thread_handle);
+  return CreateThreadInternal(stack_size, delegate, thread_handle, priority);
+}
+
+// static
+bool PlatformThread::CreateNonJoinable(size_t stack_size, Delegate* delegate) {
+  return CreateNonJoinableWithPriority(stack_size, delegate,
+                                       ThreadPriority::NORMAL);
+}
+
+// static
+bool PlatformThread::CreateNonJoinableWithPriority(size_t stack_size,
+                                                   Delegate* delegate,
+                                                   ThreadPriority priority) {
+  return CreateThreadInternal(stack_size, delegate, nullptr /* non-joinable */,
+                              priority);
+}
+
+// static
+void PlatformThread::Join(PlatformThreadHandle thread_handle) {
+  DCHECK(thread_handle.platform_handle());
+  // TODO(willchan): Enable this check once I can get it to work for Windows
+  // shutdown.
+  // Joining another thread may block the current thread for a long time, since
+  // the thread referred to by |thread_handle| may still be running long-lived /
+  // blocking tasks.
+  // AssertBlockingAllowed();
+
+  DWORD thread_id = 0;
+  thread_id = ::GetThreadId(thread_handle.platform_handle());
+  DWORD last_error = 0;
+  if (!thread_id)
+    last_error = ::GetLastError();
+
+  // Record information about the exiting thread in case joining hangs.
+  base::debug::Alias(&thread_id);
+  base::debug::Alias(&last_error);
+
+  // Record the event that this thread is blocking upon (for hang diagnosis).
+  base::debug::ScopedThreadJoinActivity thread_activity(&thread_handle);
+
+  // Wait for the thread to exit.  It should already have terminated but make
+  // sure this assumption is valid.
+  CHECK_EQ(WAIT_OBJECT_0,
+           WaitForSingleObject(thread_handle.platform_handle(), INFINITE));
+  CloseHandle(thread_handle.platform_handle());
+}
+
+// static
+void PlatformThread::Detach(PlatformThreadHandle thread_handle) {
+  CloseHandle(thread_handle.platform_handle());
+}
+
+// static
+bool PlatformThread::CanIncreaseCurrentThreadPriority() {
+  return true;
+}
+
+// static
+void PlatformThread::SetCurrentThreadPriority(ThreadPriority priority) {
+  int desired_priority = THREAD_PRIORITY_ERROR_RETURN;
+  switch (priority) {
+    case ThreadPriority::BACKGROUND:
+      desired_priority = THREAD_PRIORITY_LOWEST;
+      break;
+    case ThreadPriority::NORMAL:
+      desired_priority = THREAD_PRIORITY_NORMAL;
+      break;
+    case ThreadPriority::DISPLAY:
+      desired_priority = THREAD_PRIORITY_ABOVE_NORMAL;
+      break;
+    case ThreadPriority::REALTIME_AUDIO:
+      desired_priority = THREAD_PRIORITY_TIME_CRITICAL;
+      break;
+    default:
+      NOTREACHED() << "Unknown priority.";
+      break;
+  }
+  DCHECK_NE(desired_priority, THREAD_PRIORITY_ERROR_RETURN);
+
+#if DCHECK_IS_ON()
+  const BOOL success =
+#endif
+      ::SetThreadPriority(PlatformThread::CurrentHandle().platform_handle(),
+                          desired_priority);
+  DPLOG_IF(ERROR, !success) << "Failed to set thread priority to "
+                            << desired_priority;
+}
+
+// static
+ThreadPriority PlatformThread::GetCurrentThreadPriority() {
+  int priority =
+      ::GetThreadPriority(PlatformThread::CurrentHandle().platform_handle());
+  switch (priority) {
+    case THREAD_PRIORITY_LOWEST:
+      return ThreadPriority::BACKGROUND;
+    case THREAD_PRIORITY_NORMAL:
+      return ThreadPriority::NORMAL;
+    case THREAD_PRIORITY_ABOVE_NORMAL:
+      return ThreadPriority::DISPLAY;
+    case THREAD_PRIORITY_TIME_CRITICAL:
+      return ThreadPriority::REALTIME_AUDIO;
+    case THREAD_PRIORITY_ERROR_RETURN:
+      DPCHECK(false) << "GetThreadPriority error";
+      FALLTHROUGH;
+    default:
+      NOTREACHED() << "Unexpected priority: " << priority;
+      return ThreadPriority::NORMAL;
+  }
+}
+
+}  // namespace base
diff --git a/base/threading/post_task_and_reply_impl.cc b/base/threading/post_task_and_reply_impl.cc
new file mode 100644
index 0000000..5aacdad
--- /dev/null
+++ b/base/threading/post_task_and_reply_impl.cc
@@ -0,0 +1,127 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/threading/post_task_and_reply_impl.h"
+
+#include <utility>
+
+#include "base/bind.h"
+#include "base/debug/leak_annotations.h"
+#include "base/logging.h"
+#include "base/memory/ref_counted.h"
+#include "base/sequenced_task_runner.h"
+#include "base/threading/sequenced_task_runner_handle.h"
+
+namespace base {
+
+namespace {
+
+class PostTaskAndReplyRelay {
+ public:
+  PostTaskAndReplyRelay(const Location& from_here,
+                        OnceClosure task,
+                        OnceClosure reply)
+      : from_here_(from_here),
+        task_(std::move(task)),
+        reply_(std::move(reply)) {}
+  PostTaskAndReplyRelay(PostTaskAndReplyRelay&&) = default;
+
+  ~PostTaskAndReplyRelay() {
+    if (reply_) {
+      // This can run:
+      // 1) On origin sequence, when:
+      //    1a) Posting |task_| fails.
+      //    1b) |reply_| is cancelled before running.
+      //    1c) The DeleteSoon() below is scheduled.
+      // 2) On destination sequence, when:
+      //    2a) |task_| is cancelled before running.
+      //    2b) Posting |reply_| fails.
+
+      if (!reply_task_runner_->RunsTasksInCurrentSequence()) {
+        // Case 2a) or 2b).
+        //
+        // Destroy callbacks asynchronously on |reply_task_runner| since their
+        // destructors can rightfully be affine to it. As always, DeleteSoon()
+        // might leak its argument if the target execution environment is
+        // shutdown (e.g. MessageLoop deleted, TaskScheduler shutdown).
+        //
+        // Note: while it's obvious why |reply_| can be affine to
+        // |reply_task_runner|, the reason that |task_| can also be affine to it
+        // is that it if neither tasks ran, |task_| may still hold an object
+        // which was intended to be moved to |reply_| when |task_| ran (such an
+        // object's destruction can be affine to |reply_task_runner_| -- e.g.
+        // https://crbug.com/829122).
+        auto relay_to_delete =
+            std::make_unique<PostTaskAndReplyRelay>(std::move(*this));
+        ANNOTATE_LEAKING_OBJECT_PTR(relay_to_delete.get());
+        reply_task_runner_->DeleteSoon(from_here_, std::move(relay_to_delete));
+      }
+
+      // Case 1a), 1b), 1c).
+      //
+      // Callbacks will be destroyed synchronously at the end of this scope.
+    } else {
+      // This can run when both callbacks have run or have been moved to another
+      // PostTaskAndReplyRelay instance. If |reply_| is null, |task_| must be
+      // null too.
+      DCHECK(!task_);
+    }
+  }
+
+  // No assignment operator because of const members.
+  PostTaskAndReplyRelay& operator=(PostTaskAndReplyRelay&&) = delete;
+
+  // Static function is used because it is not possible to bind a method call to
+  // a non-pointer type.
+  static void RunTaskAndPostReply(PostTaskAndReplyRelay relay) {
+    DCHECK(relay.task_);
+    std::move(relay.task_).Run();
+
+    // Keep a reference to the reply TaskRunner for the PostTask() call before
+    // |relay| is moved into a callback.
+    scoped_refptr<SequencedTaskRunner> reply_task_runner =
+        relay.reply_task_runner_;
+
+    reply_task_runner->PostTask(
+        relay.from_here_,
+        BindOnce(&PostTaskAndReplyRelay::RunReply, std::move(relay)));
+  }
+
+ private:
+  // Static function is used because it is not possible to bind a method call to
+  // a non-pointer type.
+  static void RunReply(PostTaskAndReplyRelay relay) {
+    DCHECK(!relay.task_);
+    DCHECK(relay.reply_);
+    std::move(relay.reply_).Run();
+  }
+
+  const Location from_here_;
+  OnceClosure task_;
+  OnceClosure reply_;
+  const scoped_refptr<SequencedTaskRunner> reply_task_runner_ =
+      SequencedTaskRunnerHandle::Get();
+
+  DISALLOW_COPY_AND_ASSIGN(PostTaskAndReplyRelay);
+};
+
+}  // namespace
+
+namespace internal {
+
+bool PostTaskAndReplyImpl::PostTaskAndReply(const Location& from_here,
+                                            OnceClosure task,
+                                            OnceClosure reply) {
+  DCHECK(task) << from_here.ToString();
+  DCHECK(reply) << from_here.ToString();
+
+  return PostTask(from_here,
+                  BindOnce(&PostTaskAndReplyRelay::RunTaskAndPostReply,
+                           PostTaskAndReplyRelay(from_here, std::move(task),
+                                                 std::move(reply))));
+}
+
+}  // namespace internal
+
+}  // namespace base
diff --git a/base/threading/post_task_and_reply_impl.h b/base/threading/post_task_and_reply_impl.h
new file mode 100644
index 0000000..54038ce
--- /dev/null
+++ b/base/threading/post_task_and_reply_impl.h
@@ -0,0 +1,46 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file contains the implementation for TaskRunner::PostTaskAndReply.
+
+#ifndef BASE_THREADING_POST_TASK_AND_REPLY_IMPL_H_
+#define BASE_THREADING_POST_TASK_AND_REPLY_IMPL_H_
+
+#include "base/base_export.h"
+#include "base/callback.h"
+#include "base/location.h"
+
+namespace base {
+namespace internal {
+
+// Inherit from this in a class that implements PostTask to send a task to a
+// custom execution context.
+//
+// If you're looking for a concrete implementation of PostTaskAndReply, you
+// probably want base::TaskRunner or base/task_scheduler/post_task.h
+class BASE_EXPORT PostTaskAndReplyImpl {
+ public:
+  virtual ~PostTaskAndReplyImpl() = default;
+
+  // Posts |task| by calling PostTask(). On completion, posts |reply| to the
+  // origin sequence. Can only be called when
+  // SequencedTaskRunnerHandle::IsSet(). Each callback is deleted synchronously
+  // after running, or scheduled for asynchronous deletion on the origin
+  // sequence if it can't run (e.g. if a TaskRunner skips it on shutdown). See
+  // SequencedTaskRunner::DeleteSoon() for when objects scheduled for
+  // asynchronous deletion can be leaked. Note: All //base task posting APIs
+  // require callbacks to support deletion on the posting sequence if they can't
+  // be scheduled.
+  bool PostTaskAndReply(const Location& from_here,
+                        OnceClosure task,
+                        OnceClosure reply);
+
+ private:
+  virtual bool PostTask(const Location& from_here, OnceClosure task) = 0;
+};
+
+}  // namespace internal
+}  // namespace base
+
+#endif  // BASE_THREADING_POST_TASK_AND_REPLY_IMPL_H_
diff --git a/base/threading/post_task_and_reply_impl_unittest.cc b/base/threading/post_task_and_reply_impl_unittest.cc
new file mode 100644
index 0000000..319327d
--- /dev/null
+++ b/base/threading/post_task_and_reply_impl_unittest.cc
@@ -0,0 +1,198 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/threading/post_task_and_reply_impl.h"
+
+#include <utility>
+
+#include "base/auto_reset.h"
+#include "base/bind.h"
+#include "base/bind_helpers.h"
+#include "base/macros.h"
+#include "base/memory/ref_counted.h"
+#include "base/test/test_mock_time_task_runner.h"
+#include "testing/gmock/include/gmock/gmock.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+using ::testing::_;
+
+namespace base {
+namespace internal {
+
+namespace {
+
+class PostTaskAndReplyTaskRunner : public internal::PostTaskAndReplyImpl {
+ public:
+  explicit PostTaskAndReplyTaskRunner(TaskRunner* destination)
+      : destination_(destination) {}
+
+ private:
+  bool PostTask(const Location& from_here, OnceClosure task) override {
+    return destination_->PostTask(from_here, std::move(task));
+  }
+
+  // Non-owning.
+  TaskRunner* const destination_;
+};
+
+class ObjectToDelete : public RefCounted<ObjectToDelete> {
+ public:
+  // |delete_flag| is set to true when this object is deleted
+  ObjectToDelete(bool* delete_flag) : delete_flag_(delete_flag) {
+    EXPECT_FALSE(*delete_flag_);
+  }
+
+ private:
+  friend class RefCounted<ObjectToDelete>;
+  ~ObjectToDelete() { *delete_flag_ = true; }
+
+  bool* const delete_flag_;
+
+  DISALLOW_COPY_AND_ASSIGN(ObjectToDelete);
+};
+
+class MockObject {
+ public:
+  MockObject() = default;
+
+  MOCK_METHOD1(Task, void(scoped_refptr<ObjectToDelete>));
+  MOCK_METHOD1(Reply, void(scoped_refptr<ObjectToDelete>));
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(MockObject);
+};
+
+class MockRunsTasksInCurrentSequenceTaskRunner : public TestMockTimeTaskRunner {
+ public:
+  MockRunsTasksInCurrentSequenceTaskRunner(
+      TestMockTimeTaskRunner::Type type =
+          TestMockTimeTaskRunner::Type::kStandalone)
+      : TestMockTimeTaskRunner(type) {}
+
+  void RunUntilIdleWithRunsTasksInCurrentSequence() {
+    AutoReset<bool> reset(&runs_tasks_in_current_sequence_, true);
+    RunUntilIdle();
+  }
+
+  void ClearPendingTasksWithRunsTasksInCurrentSequence() {
+    AutoReset<bool> reset(&runs_tasks_in_current_sequence_, true);
+    ClearPendingTasks();
+  }
+
+  // TestMockTimeTaskRunner:
+  bool RunsTasksInCurrentSequence() const override {
+    return runs_tasks_in_current_sequence_;
+  }
+
+ private:
+  ~MockRunsTasksInCurrentSequenceTaskRunner() override = default;
+
+  bool runs_tasks_in_current_sequence_ = false;
+
+  DISALLOW_COPY_AND_ASSIGN(MockRunsTasksInCurrentSequenceTaskRunner);
+};
+
+class PostTaskAndReplyImplTest : public testing::Test {
+ protected:
+  PostTaskAndReplyImplTest() = default;
+
+  void PostTaskAndReplyToMockObject() {
+    // Expect the post to succeed.
+    EXPECT_TRUE(
+        PostTaskAndReplyTaskRunner(post_runner_.get())
+            .PostTaskAndReply(
+                FROM_HERE,
+                BindOnce(&MockObject::Task, Unretained(&mock_object_),
+                         MakeRefCounted<ObjectToDelete>(&delete_task_flag_)),
+                BindOnce(&MockObject::Reply, Unretained(&mock_object_),
+                         MakeRefCounted<ObjectToDelete>(&delete_reply_flag_))));
+
+    // Expect the first task to be posted to |post_runner_|.
+    EXPECT_TRUE(post_runner_->HasPendingTask());
+    EXPECT_FALSE(reply_runner_->HasPendingTask());
+    EXPECT_FALSE(delete_task_flag_);
+    EXPECT_FALSE(delete_reply_flag_);
+  }
+
+  scoped_refptr<MockRunsTasksInCurrentSequenceTaskRunner> post_runner_ =
+      MakeRefCounted<MockRunsTasksInCurrentSequenceTaskRunner>();
+  scoped_refptr<MockRunsTasksInCurrentSequenceTaskRunner> reply_runner_ =
+      MakeRefCounted<MockRunsTasksInCurrentSequenceTaskRunner>(
+          TestMockTimeTaskRunner::Type::kBoundToThread);
+  testing::StrictMock<MockObject> mock_object_;
+  bool delete_task_flag_ = false;
+  bool delete_reply_flag_ = false;
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(PostTaskAndReplyImplTest);
+};
+
+}  // namespace
+
+TEST_F(PostTaskAndReplyImplTest, PostTaskAndReply) {
+  PostTaskAndReplyToMockObject();
+
+  EXPECT_CALL(mock_object_, Task(_));
+  post_runner_->RunUntilIdleWithRunsTasksInCurrentSequence();
+  testing::Mock::VerifyAndClear(&mock_object_);
+  // The task should have been deleted right after being run.
+  EXPECT_TRUE(delete_task_flag_);
+  EXPECT_FALSE(delete_reply_flag_);
+
+  // Expect the reply to be posted to |reply_runner_|.
+  EXPECT_FALSE(post_runner_->HasPendingTask());
+  EXPECT_TRUE(reply_runner_->HasPendingTask());
+
+  EXPECT_CALL(mock_object_, Reply(_));
+  reply_runner_->RunUntilIdleWithRunsTasksInCurrentSequence();
+  testing::Mock::VerifyAndClear(&mock_object_);
+  EXPECT_TRUE(delete_task_flag_);
+  // The reply should have been deleted right after being run.
+  EXPECT_TRUE(delete_reply_flag_);
+
+  // Expect no pending task in |post_runner_| and |reply_runner_|.
+  EXPECT_FALSE(post_runner_->HasPendingTask());
+  EXPECT_FALSE(reply_runner_->HasPendingTask());
+}
+
+TEST_F(PostTaskAndReplyImplTest, TaskDoesNotRun) {
+  PostTaskAndReplyToMockObject();
+
+  // Clear the |post_runner_|. Both callbacks should be scheduled for deletion
+  // on the |reply_runner_|.
+  post_runner_->ClearPendingTasksWithRunsTasksInCurrentSequence();
+  EXPECT_FALSE(post_runner_->HasPendingTask());
+  EXPECT_TRUE(reply_runner_->HasPendingTask());
+  EXPECT_FALSE(delete_task_flag_);
+  EXPECT_FALSE(delete_reply_flag_);
+
+  // Run the |reply_runner_|. Both callbacks should be deleted.
+  reply_runner_->RunUntilIdleWithRunsTasksInCurrentSequence();
+  EXPECT_TRUE(delete_task_flag_);
+  EXPECT_TRUE(delete_reply_flag_);
+}
+
+TEST_F(PostTaskAndReplyImplTest, ReplyDoesNotRun) {
+  PostTaskAndReplyToMockObject();
+
+  EXPECT_CALL(mock_object_, Task(_));
+  post_runner_->RunUntilIdleWithRunsTasksInCurrentSequence();
+  testing::Mock::VerifyAndClear(&mock_object_);
+  // The task should have been deleted right after being run.
+  EXPECT_TRUE(delete_task_flag_);
+  EXPECT_FALSE(delete_reply_flag_);
+
+  // Expect the reply to be posted to |reply_runner_|.
+  EXPECT_FALSE(post_runner_->HasPendingTask());
+  EXPECT_TRUE(reply_runner_->HasPendingTask());
+
+  // Clear the |reply_runner_| queue without running tasks. The reply callback
+  // should be deleted.
+  reply_runner_->ClearPendingTasksWithRunsTasksInCurrentSequence();
+  EXPECT_TRUE(delete_task_flag_);
+  EXPECT_TRUE(delete_reply_flag_);
+}
+
+}  // namespace internal
+}  // namespace base
diff --git a/base/threading/scoped_blocking_call.cc b/base/threading/scoped_blocking_call.cc
new file mode 100644
index 0000000..1d2931c
--- /dev/null
+++ b/base/threading/scoped_blocking_call.cc
@@ -0,0 +1,72 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/threading/scoped_blocking_call.h"
+
+#include "base/lazy_instance.h"
+#include "base/threading/thread_local.h"
+
+namespace base {
+
+namespace {
+
+LazyInstance<ThreadLocalPointer<internal::BlockingObserver>>::Leaky
+    tls_blocking_observer = LAZY_INSTANCE_INITIALIZER;
+
+// Last ScopedBlockingCall instantiated on this thread.
+LazyInstance<ThreadLocalPointer<ScopedBlockingCall>>::Leaky
+    tls_last_scoped_blocking_call = LAZY_INSTANCE_INITIALIZER;
+
+}  // namespace
+
+ScopedBlockingCall::ScopedBlockingCall(BlockingType blocking_type)
+    : blocking_observer_(tls_blocking_observer.Get().Get()),
+      previous_scoped_blocking_call_(tls_last_scoped_blocking_call.Get().Get()),
+      is_will_block_(blocking_type == BlockingType::WILL_BLOCK ||
+                     (previous_scoped_blocking_call_ &&
+                      previous_scoped_blocking_call_->is_will_block_)) {
+  tls_last_scoped_blocking_call.Get().Set(this);
+
+  if (blocking_observer_) {
+    if (!previous_scoped_blocking_call_) {
+      blocking_observer_->BlockingStarted(blocking_type);
+    } else if (blocking_type == BlockingType::WILL_BLOCK &&
+               !previous_scoped_blocking_call_->is_will_block_) {
+      blocking_observer_->BlockingTypeUpgraded();
+    }
+  }
+}
+
+ScopedBlockingCall::~ScopedBlockingCall() {
+  DCHECK_EQ(this, tls_last_scoped_blocking_call.Get().Get());
+  tls_last_scoped_blocking_call.Get().Set(previous_scoped_blocking_call_);
+  if (blocking_observer_ && !previous_scoped_blocking_call_)
+    blocking_observer_->BlockingEnded();
+}
+
+namespace internal {
+
+void SetBlockingObserverForCurrentThread(BlockingObserver* blocking_observer) {
+  DCHECK(!tls_blocking_observer.Get().Get());
+  tls_blocking_observer.Get().Set(blocking_observer);
+}
+
+void ClearBlockingObserverForTesting() {
+  tls_blocking_observer.Get().Set(nullptr);
+}
+
+ScopedClearBlockingObserverForTesting::ScopedClearBlockingObserverForTesting()
+    : blocking_observer_(tls_blocking_observer.Get().Get()) {
+  tls_blocking_observer.Get().Set(nullptr);
+}
+
+ScopedClearBlockingObserverForTesting::
+    ~ScopedClearBlockingObserverForTesting() {
+  DCHECK(!tls_blocking_observer.Get().Get());
+  tls_blocking_observer.Get().Set(blocking_observer_);
+}
+
+}  // namespace internal
+
+}  // namespace base
diff --git a/base/threading/scoped_blocking_call.h b/base/threading/scoped_blocking_call.h
new file mode 100644
index 0000000..e376c30
--- /dev/null
+++ b/base/threading/scoped_blocking_call.h
@@ -0,0 +1,140 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_THREADING_SCOPED_BLOCKING_CALL_H
+#define BASE_THREADING_SCOPED_BLOCKING_CALL_H
+
+#include "base/base_export.h"
+#include "base/logging.h"
+
+namespace base {
+
+// BlockingType indicates the likelihood that a blocking call will actually
+// block.
+enum class BlockingType {
+  // The call might block (e.g. file I/O that might hit in memory cache).
+  MAY_BLOCK,
+  // The call will definitely block (e.g. cache already checked and now pinging
+  // server synchronously).
+  WILL_BLOCK
+};
+
+namespace internal {
+class BlockingObserver;
+}
+
+// This class must be instantiated in every scope where a blocking call is made.
+// CPU usage should be minimal within that scope. //base APIs that block
+// instantiate their own ScopedBlockingCall; it is not necessary to instantiate
+// another ScopedBlockingCall in the scope where these APIs are used.
+//
+// Good:
+//   Data data;
+//   {
+//     ScopedBlockingCall scoped_blocking_call(BlockingType::WILL_BLOCK);
+//     data = GetDataFromNetwork();
+//   }
+//   CPUIntensiveProcessing(data);
+//
+// Bad:
+//   ScopedBlockingCall scoped_blocking_call(BlockingType::WILL_BLOCK);
+//   Data data = GetDataFromNetwork();
+//   CPUIntensiveProcessing(data);  // CPU usage within a ScopedBlockingCall.
+//
+// Good:
+//   Data a;
+//   Data b;
+//   {
+//     ScopedBlockingCall scoped_blocking_call(BlockingType::MAY_BLOCK);
+//     a = GetDataFromMemoryCacheOrNetwork();
+//     b = GetDataFromMemoryCacheOrNetwork();
+//   }
+//   CPUIntensiveProcessing(a);
+//   CPUIntensiveProcessing(b);
+//
+// Bad:
+//   ScopedBlockingCall scoped_blocking_call(BlockingType::MAY_BLOCK);
+//   Data a = GetDataFromMemoryCacheOrNetwork();
+//   Data b = GetDataFromMemoryCacheOrNetwork();
+//   CPUIntensiveProcessing(a);  // CPU usage within a ScopedBlockingCall.
+//   CPUIntensiveProcessing(b);  // CPU usage within a ScopedBlockingCall.
+//
+// Good:
+//   base::WaitableEvent waitable_event(...);
+//   waitable_event.Wait();
+//
+// Bad:
+//  base::WaitableEvent waitable_event(...);
+//  ScopedBlockingCall scoped_blocking_call(BlockingType::WILL_BLOCK);
+//  waitable_event.Wait();  // Wait() instantiates its own ScopedBlockingCall.
+//
+// When a ScopedBlockingCall is instantiated from a TaskScheduler parallel or
+// sequenced task, the thread pool size is incremented to compensate for the
+// blocked thread (more or less aggressively depending on BlockingType).
+class BASE_EXPORT ScopedBlockingCall {
+ public:
+  ScopedBlockingCall(BlockingType blocking_type);
+  ~ScopedBlockingCall();
+
+ private:
+  internal::BlockingObserver* const blocking_observer_;
+
+  // Previous ScopedBlockingCall instantiated on this thread.
+  ScopedBlockingCall* const previous_scoped_blocking_call_;
+
+  // Whether the BlockingType of the current thread was WILL_BLOCK after this
+  // ScopedBlockingCall was instantiated.
+  const bool is_will_block_;
+
+  DISALLOW_COPY_AND_ASSIGN(ScopedBlockingCall);
+};
+
+namespace internal {
+
+// Interface for an observer to be informed when a thread enters or exits
+// the scope of ScopedBlockingCall objects.
+class BASE_EXPORT BlockingObserver {
+ public:
+  virtual ~BlockingObserver() = default;
+
+  // Invoked when a ScopedBlockingCall is instantiated on the observed thread
+  // where there wasn't an existing ScopedBlockingCall.
+  virtual void BlockingStarted(BlockingType blocking_type) = 0;
+
+  // Invoked when a WILL_BLOCK ScopedBlockingCall is instantiated on the
+  // observed thread where there was a MAY_BLOCK ScopedBlockingCall but not a
+  // WILL_BLOCK ScopedBlockingCall.
+  virtual void BlockingTypeUpgraded() = 0;
+
+  // Invoked when the last ScopedBlockingCall on the observed thread is
+  // destroyed.
+  virtual void BlockingEnded() = 0;
+};
+
+// Registers |blocking_observer| on the current thread. It is invalid to call
+// this on a thread where there is an active ScopedBlockingCall.
+BASE_EXPORT void SetBlockingObserverForCurrentThread(
+    BlockingObserver* blocking_observer);
+
+BASE_EXPORT void ClearBlockingObserverForTesting();
+
+// Unregisters the |blocking_observer| on the current thread within its scope.
+// Used in TaskScheduler tests to prevent calls to //base sync primitives from
+// affecting the thread pool capacity.
+class BASE_EXPORT ScopedClearBlockingObserverForTesting {
+ public:
+  ScopedClearBlockingObserverForTesting();
+  ~ScopedClearBlockingObserverForTesting();
+
+ private:
+  BlockingObserver* const blocking_observer_;
+
+  DISALLOW_COPY_AND_ASSIGN(ScopedClearBlockingObserverForTesting);
+};
+
+}  // namespace internal
+
+}  // namespace base
+
+#endif  // BASE_THREADING_SCOPED_BLOCKING_CALL_H
diff --git a/base/threading/scoped_blocking_call_unittest.cc b/base/threading/scoped_blocking_call_unittest.cc
new file mode 100644
index 0000000..5e030f3
--- /dev/null
+++ b/base/threading/scoped_blocking_call_unittest.cc
@@ -0,0 +1,134 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/threading/scoped_blocking_call.h"
+
+#include <memory>
+
+#include "base/macros.h"
+#include "base/test/gtest_util.h"
+#include "testing/gmock/include/gmock/gmock.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+
+namespace {
+
+class MockBlockingObserver : public internal::BlockingObserver {
+ public:
+  MockBlockingObserver() = default;
+
+  MOCK_METHOD1(BlockingStarted, void(BlockingType));
+  MOCK_METHOD0(BlockingTypeUpgraded, void());
+  MOCK_METHOD0(BlockingEnded, void());
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(MockBlockingObserver);
+};
+
+class ScopedBlockingCallTest : public testing::Test {
+ protected:
+  ScopedBlockingCallTest() {
+    internal::SetBlockingObserverForCurrentThread(&observer_);
+  }
+
+  ~ScopedBlockingCallTest() override {
+    internal::ClearBlockingObserverForTesting();
+  }
+
+  testing::StrictMock<MockBlockingObserver> observer_;
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(ScopedBlockingCallTest);
+};
+
+}  // namespace
+
+TEST_F(ScopedBlockingCallTest, MayBlock) {
+  EXPECT_CALL(observer_, BlockingStarted(BlockingType::MAY_BLOCK));
+  ScopedBlockingCall scoped_blocking_call(BlockingType::MAY_BLOCK);
+  testing::Mock::VerifyAndClear(&observer_);
+  EXPECT_CALL(observer_, BlockingEnded());
+}
+
+TEST_F(ScopedBlockingCallTest, WillBlock) {
+  EXPECT_CALL(observer_, BlockingStarted(BlockingType::WILL_BLOCK));
+  ScopedBlockingCall scoped_blocking_call(BlockingType::WILL_BLOCK);
+  testing::Mock::VerifyAndClear(&observer_);
+  EXPECT_CALL(observer_, BlockingEnded());
+}
+
+TEST_F(ScopedBlockingCallTest, MayBlockWillBlock) {
+  EXPECT_CALL(observer_, BlockingStarted(BlockingType::MAY_BLOCK));
+  ScopedBlockingCall scoped_blocking_call_a(BlockingType::MAY_BLOCK);
+  testing::Mock::VerifyAndClear(&observer_);
+
+  {
+    EXPECT_CALL(observer_, BlockingTypeUpgraded());
+    ScopedBlockingCall scoped_blocking_call_b(BlockingType::WILL_BLOCK);
+    testing::Mock::VerifyAndClear(&observer_);
+  }
+
+  EXPECT_CALL(observer_, BlockingEnded());
+}
+
+TEST_F(ScopedBlockingCallTest, WillBlockMayBlock) {
+  EXPECT_CALL(observer_, BlockingStarted(BlockingType::WILL_BLOCK));
+  ScopedBlockingCall scoped_blocking_call_a(BlockingType::WILL_BLOCK);
+  testing::Mock::VerifyAndClear(&observer_);
+
+  { ScopedBlockingCall scoped_blocking_call_b(BlockingType::MAY_BLOCK); }
+
+  EXPECT_CALL(observer_, BlockingEnded());
+}
+
+TEST_F(ScopedBlockingCallTest, MayBlockMayBlock) {
+  EXPECT_CALL(observer_, BlockingStarted(BlockingType::MAY_BLOCK));
+  ScopedBlockingCall scoped_blocking_call_a(BlockingType::MAY_BLOCK);
+  testing::Mock::VerifyAndClear(&observer_);
+
+  { ScopedBlockingCall scoped_blocking_call_b(BlockingType::MAY_BLOCK); }
+
+  EXPECT_CALL(observer_, BlockingEnded());
+}
+
+TEST_F(ScopedBlockingCallTest, WillBlockWillBlock) {
+  EXPECT_CALL(observer_, BlockingStarted(BlockingType::WILL_BLOCK));
+  ScopedBlockingCall scoped_blocking_call_a(BlockingType::WILL_BLOCK);
+  testing::Mock::VerifyAndClear(&observer_);
+
+  { ScopedBlockingCall scoped_blocking_call_b(BlockingType::WILL_BLOCK); }
+
+  EXPECT_CALL(observer_, BlockingEnded());
+}
+
+TEST_F(ScopedBlockingCallTest, MayBlockWillBlockTwice) {
+  EXPECT_CALL(observer_, BlockingStarted(BlockingType::MAY_BLOCK));
+  ScopedBlockingCall scoped_blocking_call_a(BlockingType::MAY_BLOCK);
+  testing::Mock::VerifyAndClear(&observer_);
+
+  {
+    EXPECT_CALL(observer_, BlockingTypeUpgraded());
+    ScopedBlockingCall scoped_blocking_call_b(BlockingType::WILL_BLOCK);
+    testing::Mock::VerifyAndClear(&observer_);
+
+    {
+      ScopedBlockingCall scoped_blocking_call_c(BlockingType::MAY_BLOCK);
+      ScopedBlockingCall scoped_blocking_call_d(BlockingType::WILL_BLOCK);
+    }
+  }
+
+  EXPECT_CALL(observer_, BlockingEnded());
+}
+
+TEST(ScopedBlockingCallDestructionOrderTest, InvalidDestructionOrder) {
+  auto scoped_blocking_call_a =
+      std::make_unique<ScopedBlockingCall>(BlockingType::WILL_BLOCK);
+  auto scoped_blocking_call_b =
+      std::make_unique<ScopedBlockingCall>(BlockingType::WILL_BLOCK);
+
+  EXPECT_DCHECK_DEATH({ scoped_blocking_call_a.reset(); });
+}
+
+}  // namespace base
diff --git a/base/threading/sequence_local_storage_map.cc b/base/threading/sequence_local_storage_map.cc
new file mode 100644
index 0000000..2837aa0
--- /dev/null
+++ b/base/threading/sequence_local_storage_map.cc
@@ -0,0 +1,105 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/threading/sequence_local_storage_map.h"
+
+#include <utility>
+
+#include "base/lazy_instance.h"
+#include "base/logging.h"
+#include "base/threading/thread_local.h"
+
+namespace base {
+namespace internal {
+
+namespace {
+LazyInstance<ThreadLocalPointer<SequenceLocalStorageMap>>::Leaky
+    tls_current_sequence_local_storage = LAZY_INSTANCE_INITIALIZER;
+}  // namespace
+
+SequenceLocalStorageMap::SequenceLocalStorageMap() = default;
+
+SequenceLocalStorageMap::~SequenceLocalStorageMap() = default;
+
+ScopedSetSequenceLocalStorageMapForCurrentThread::
+    ScopedSetSequenceLocalStorageMapForCurrentThread(
+        SequenceLocalStorageMap* sequence_local_storage) {
+  DCHECK(!tls_current_sequence_local_storage.Get().Get());
+  tls_current_sequence_local_storage.Get().Set(sequence_local_storage);
+}
+
+ScopedSetSequenceLocalStorageMapForCurrentThread::
+    ~ScopedSetSequenceLocalStorageMapForCurrentThread() {
+  tls_current_sequence_local_storage.Get().Set(nullptr);
+}
+
+SequenceLocalStorageMap& SequenceLocalStorageMap::GetForCurrentThread() {
+  SequenceLocalStorageMap* current_sequence_local_storage =
+      tls_current_sequence_local_storage.Get().Get();
+
+  DCHECK(current_sequence_local_storage)
+      << "SequenceLocalStorageSlot cannot be used because no "
+         "SequenceLocalStorageMap was stored in TLS. Use "
+         "ScopedSetSequenceLocalStorageMapForCurrentThread to store a "
+         "SequenceLocalStorageMap object in TLS.";
+
+  return *current_sequence_local_storage;
+}
+
+void* SequenceLocalStorageMap::Get(int slot_id) {
+  const auto it = sls_map_.find(slot_id);
+  if (it == sls_map_.end())
+    return nullptr;
+  return it->second.value();
+}
+
+void SequenceLocalStorageMap::Set(
+    int slot_id,
+    SequenceLocalStorageMap::ValueDestructorPair value_destructor_pair) {
+  auto it = sls_map_.find(slot_id);
+
+  if (it == sls_map_.end())
+    sls_map_.emplace(slot_id, std::move(value_destructor_pair));
+  else
+    it->second = std::move(value_destructor_pair);
+
+  // The maximum number of entries in the map is 256. This can be adjusted, but
+  // will require reviewing the choice of data structure for the map.
+  DCHECK_LE(sls_map_.size(), 256U);
+}
+
+SequenceLocalStorageMap::ValueDestructorPair::ValueDestructorPair(
+    void* value,
+    DestructorFunc* destructor)
+    : value_(value), destructor_(destructor) {}
+
+SequenceLocalStorageMap::ValueDestructorPair::~ValueDestructorPair() {
+  if (value_)
+    destructor_(value_);
+}
+
+SequenceLocalStorageMap::ValueDestructorPair::ValueDestructorPair(
+    ValueDestructorPair&& value_destructor_pair)
+    : value_(value_destructor_pair.value_),
+      destructor_(value_destructor_pair.destructor_) {
+  value_destructor_pair.value_ = nullptr;
+}
+
+SequenceLocalStorageMap::ValueDestructorPair&
+SequenceLocalStorageMap::ValueDestructorPair::operator=(
+    ValueDestructorPair&& value_destructor_pair) {
+  // Destroy |value_| before overwriting it with a new value.
+  if (value_)
+    destructor_(value_);
+
+  value_ = value_destructor_pair.value_;
+  destructor_ = value_destructor_pair.destructor_;
+
+  value_destructor_pair.value_ = nullptr;
+
+  return *this;
+}
+
+}  // namespace internal
+}  // namespace base
diff --git a/base/threading/sequence_local_storage_map.h b/base/threading/sequence_local_storage_map.h
new file mode 100644
index 0000000..8b9155c
--- /dev/null
+++ b/base/threading/sequence_local_storage_map.h
@@ -0,0 +1,90 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_THREADING_SEQUENCE_LOCAL_STORAGE_MAP_H_
+#define BASE_THREADING_SEQUENCE_LOCAL_STORAGE_MAP_H_
+
+#include "base/base_export.h"
+#include "base/containers/flat_map.h"
+#include "base/macros.h"
+
+namespace base {
+namespace internal {
+
+// A SequenceLocalStorageMap holds (slot_id) -> (value, destructor) items for a
+// sequence. When a task runs, it is expected that a pointer to its sequence's
+// SequenceLocalStorageMap is set in TLS using
+// ScopedSetSequenceMapLocalStorageForCurrentThread. When a
+// SequenceLocalStorageMap is destroyed, it invokes the destructors associated
+// with values stored within it.
+// The Get() and Set() methods should not be accessed directly.
+// Use SequenceLocalStorageSlot to Get() and Set() values in the current
+// sequence's SequenceLocalStorageMap.
+class BASE_EXPORT SequenceLocalStorageMap {
+ public:
+  SequenceLocalStorageMap();
+  ~SequenceLocalStorageMap();
+
+  // Returns the SequenceLocalStorage bound to the current thread. It is invalid
+  // to call this outside the scope of a
+  // ScopedSetSequenceLocalStorageForCurrentThread.
+  static SequenceLocalStorageMap& GetForCurrentThread();
+
+  // Holds a pointer to a value alongside a destructor for this pointer.
+  // Calls the destructor on the value upon destruction.
+  class BASE_EXPORT ValueDestructorPair {
+   public:
+    using DestructorFunc = void(void*);
+
+    ValueDestructorPair(void* value, DestructorFunc* destructor);
+    ~ValueDestructorPair();
+
+    ValueDestructorPair(ValueDestructorPair&& value_destructor_pair);
+
+    ValueDestructorPair& operator=(ValueDestructorPair&& value_destructor_pair);
+
+    void* value() const { return value_; }
+
+   private:
+    void* value_;
+    DestructorFunc* destructor_;
+
+    DISALLOW_COPY_AND_ASSIGN(ValueDestructorPair);
+  };
+
+  // Returns the value stored in |slot_id| or nullptr if no value was stored.
+  void* Get(int slot_id);
+
+  // Stores |value_destructor_pair| in |slot_id|. Overwrites and destroys any
+  // previously stored value.
+  void Set(int slot_id, ValueDestructorPair value_destructor_pair);
+
+ private:
+  // Map from slot id to ValueDestructorPair.
+  // flat_map was chosen because there are expected to be relatively few entries
+  // in the map. For low number of entries, flat_map is known to perform better
+  // than other map implementations.
+  base::flat_map<int, ValueDestructorPair> sls_map_;
+
+  DISALLOW_COPY_AND_ASSIGN(SequenceLocalStorageMap);
+};
+
+// Within the scope of this object,
+// SequenceLocalStorageMap::GetForCurrentThread() will return a reference to the
+// SequenceLocalStorageMap object passed to the constructor. There can be only
+// one ScopedSetSequenceLocalStorageMapForCurrentThread instance per scope.
+class BASE_EXPORT ScopedSetSequenceLocalStorageMapForCurrentThread {
+ public:
+  ScopedSetSequenceLocalStorageMapForCurrentThread(
+      SequenceLocalStorageMap* sequence_local_storage);
+
+  ~ScopedSetSequenceLocalStorageMapForCurrentThread();
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(ScopedSetSequenceLocalStorageMapForCurrentThread);
+};
+}  // namespace internal
+}  // namespace base
+
+#endif  // BASE_THREADING_SEQUENCE_LOCAL_STORAGE_MAP_H_
diff --git a/base/threading/sequence_local_storage_map_unittest.cc b/base/threading/sequence_local_storage_map_unittest.cc
new file mode 100644
index 0000000..a45bbc3
--- /dev/null
+++ b/base/threading/sequence_local_storage_map_unittest.cc
@@ -0,0 +1,117 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/threading/sequence_local_storage_map.h"
+
+#include <memory>
+#include <utility>
+
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+namespace internal {
+
+namespace {
+
+constexpr int kSlotId = 1;
+
+class SetOnDestroy {
+ public:
+  SetOnDestroy(bool* was_destroyed_ptr)
+      : was_destroyed_ptr_(was_destroyed_ptr) {
+    DCHECK(was_destroyed_ptr_);
+    DCHECK(!(*was_destroyed_ptr_));
+  }
+  ~SetOnDestroy() {
+    DCHECK(!(*was_destroyed_ptr_));
+    *was_destroyed_ptr_ = true;
+  }
+
+ private:
+  bool* const was_destroyed_ptr_;
+
+  DISALLOW_COPY_AND_ASSIGN(SetOnDestroy);
+};
+
+template <typename T, typename... Args>
+SequenceLocalStorageMap::ValueDestructorPair CreateValueDestructorPair(
+    Args... args) {
+  T* value = new T(args...);
+  SequenceLocalStorageMap::ValueDestructorPair::DestructorFunc* destructor =
+      [](void* ptr) { std::default_delete<T>()(static_cast<T*>(ptr)); };
+
+  SequenceLocalStorageMap::ValueDestructorPair value_destructor_pair{
+      value, destructor};
+
+  return value_destructor_pair;
+}
+
+}  // namespace
+
+// Verify that setting a value in the SequenceLocalStorageMap, then getting
+// it will yield the same value.
+TEST(SequenceLocalStorageMapTest, SetGet) {
+  SequenceLocalStorageMap sequence_local_storage_map;
+  ScopedSetSequenceLocalStorageMapForCurrentThread
+      scoped_sequence_local_storage_map(&sequence_local_storage_map);
+
+  SequenceLocalStorageMap::ValueDestructorPair value_destructor_pair =
+      CreateValueDestructorPair<int>(5);
+
+  sequence_local_storage_map.Set(kSlotId, std::move(value_destructor_pair));
+
+  EXPECT_EQ(*static_cast<int*>(sequence_local_storage_map.Get(kSlotId)), 5);
+}
+
+// Verify that the destructor is called on a value stored in the
+// SequenceLocalStorageMap when SequenceLocalStorageMap is destroyed.
+TEST(SequenceLocalStorageMapTest, Destructor) {
+  bool set_on_destruction = false;
+
+  {
+    SequenceLocalStorageMap sequence_local_storage_map;
+    ScopedSetSequenceLocalStorageMapForCurrentThread
+        scoped_sequence_local_storage_map(&sequence_local_storage_map);
+
+    SequenceLocalStorageMap::ValueDestructorPair value_destructor_pair =
+        CreateValueDestructorPair<SetOnDestroy>(&set_on_destruction);
+
+    sequence_local_storage_map.Set(kSlotId, std::move(value_destructor_pair));
+  }
+
+  EXPECT_TRUE(set_on_destruction);
+}
+
+// Verify that overwriting a value already in the SequenceLocalStorageMap
+// calls value's destructor.
+TEST(SequenceLocalStorageMapTest, DestructorCalledOnSetOverwrite) {
+  bool set_on_destruction = false;
+  bool set_on_destruction2 = false;
+  {
+    SequenceLocalStorageMap sequence_local_storage_map;
+    ScopedSetSequenceLocalStorageMapForCurrentThread
+        scoped_sequence_local_storage_map(&sequence_local_storage_map);
+
+    SequenceLocalStorageMap::ValueDestructorPair value_destructor_pair =
+        CreateValueDestructorPair<SetOnDestroy>(&set_on_destruction);
+    SequenceLocalStorageMap::ValueDestructorPair value_destructor_pair2 =
+        CreateValueDestructorPair<SetOnDestroy>(&set_on_destruction2);
+
+    sequence_local_storage_map.Set(kSlotId, std::move(value_destructor_pair));
+
+    ASSERT_FALSE(set_on_destruction);
+
+    // Overwrites the old value in the slot.
+    sequence_local_storage_map.Set(kSlotId, std::move(value_destructor_pair2));
+
+    // Destructor should've been called for the old value in the slot, and not
+    // yet called for the new value.
+    EXPECT_TRUE(set_on_destruction);
+    EXPECT_FALSE(set_on_destruction2);
+  }
+  EXPECT_TRUE(set_on_destruction2);
+}
+
+}  // namespace internal
+}  // namespace base
diff --git a/base/threading/sequence_local_storage_slot.cc b/base/threading/sequence_local_storage_slot.cc
new file mode 100644
index 0000000..b7db40b
--- /dev/null
+++ b/base/threading/sequence_local_storage_slot.cc
@@ -0,0 +1,26 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/threading/sequence_local_storage_slot.h"
+
+#include <limits>
+
+#include "base/atomic_sequence_num.h"
+#include "base/logging.h"
+
+namespace base {
+namespace internal {
+
+namespace {
+AtomicSequenceNumber g_sequence_local_storage_slot_generator;
+}  // namespace
+
+int GetNextSequenceLocalStorageSlotNumber() {
+  int slot_id = g_sequence_local_storage_slot_generator.GetNext();
+  DCHECK_LT(slot_id, std::numeric_limits<int>::max());
+  return slot_id;
+}
+
+}  // namespace internal
+}  // namespace base
diff --git a/base/threading/sequence_local_storage_slot.h b/base/threading/sequence_local_storage_slot.h
new file mode 100644
index 0000000..315df7d
--- /dev/null
+++ b/base/threading/sequence_local_storage_slot.h
@@ -0,0 +1,105 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_THREADING_SEQUENCE_LOCAL_STORAGE_SLOT_H_
+#define BASE_THREADING_SEQUENCE_LOCAL_STORAGE_SLOT_H_
+
+#include <memory>
+#include <utility>
+
+#include "base/base_export.h"
+#include "base/threading/sequence_local_storage_map.h"
+
+namespace base {
+
+namespace internal {
+BASE_EXPORT int GetNextSequenceLocalStorageSlotNumber();
+}
+
+// SequenceLocalStorageSlot allows arbitrary values to be stored and retrieved
+// from a sequence. Values are deleted when the sequence is deleted.
+//
+// Example usage:
+//
+// namespace {
+// base::LazyInstance<SequenceLocalStorageSlot<int>> sls_value;
+// }
+//
+// void Read() {
+//   int value = sls_value.Get().Get();
+//   ...
+// }
+//
+// void Write() {
+//   sls_value.Get().Set(42);
+// }
+//
+// void PostTasks() {
+//   // Since Read() runs on the same sequence as Write(), it
+//   // will read the value "42". A Read() running on a different
+//   // sequence would not see that value.
+//   scoped_refptr<base::SequencedTaskRunner> task_runner = ...;
+//   task_runner->PostTask(FROM_HERE, base::BindOnce(&Write));
+//   task_runner->PostTask(FROM_HERE, base::BindOnce(&Read));
+// }
+//
+// SequenceLocalStorageSlot must be used within the scope of a
+// ScopedSetSequenceLocalStorageMapForCurrentThread object.
+// Note: this is true on all TaskScheduler workers and on threads bound to a
+// MessageLoop.
+template <typename T, typename Deleter = std::default_delete<T>>
+class SequenceLocalStorageSlot {
+ public:
+  SequenceLocalStorageSlot()
+      : slot_id_(internal::GetNextSequenceLocalStorageSlotNumber()) {}
+  ~SequenceLocalStorageSlot() = default;
+
+  // Get the sequence-local value stored in this slot. Returns a
+  // default-constructed value if no value was previously set.
+  T& Get() {
+    void* value =
+        internal::SequenceLocalStorageMap::GetForCurrentThread().Get(slot_id_);
+
+    // Sets and returns a default-constructed value if no value was previously
+    // set.
+    if (!value) {
+      Set(T());
+      return Get();
+    }
+    return *(static_cast<T*>(value));
+  }
+
+  // Set this slot's sequence-local value to |value|.
+  // Note that if T is expensive to copy, it may be more appropriate to instead
+  // store a std::unique_ptr<T>. This is enforced by the
+  // DISALLOW_COPY_AND_ASSIGN style rather than directly by this class however.
+  void Set(T value) {
+    // Allocates the |value| with new rather than std::make_unique.
+    // Since SequenceLocalStorageMap needs to store values of various types
+    // within the same map, the type of value_destructor_pair.value is void*
+    // (std::unique_ptr<void> is invalid). Memory is freed by calling
+    // |value_destructor_pair.destructor| in the destructor of
+    // ValueDestructorPair which is invoked when the value is overwritten by
+    // another call to SequenceLocalStorageMap::Set or when the
+    // SequenceLocalStorageMap is deleted.
+    T* value_ptr = new T(std::move(value));
+
+    internal::SequenceLocalStorageMap::ValueDestructorPair::DestructorFunc*
+        destructor = [](void* ptr) { Deleter()(static_cast<T*>(ptr)); };
+
+    internal::SequenceLocalStorageMap::ValueDestructorPair
+        value_destructor_pair(value_ptr, destructor);
+
+    internal::SequenceLocalStorageMap::GetForCurrentThread().Set(
+        slot_id_, std::move(value_destructor_pair));
+  }
+
+ private:
+  // |slot_id_| is used as a key in SequenceLocalStorageMap
+  const int slot_id_;
+  DISALLOW_COPY_AND_ASSIGN(SequenceLocalStorageSlot);
+};
+
+}  // namespace base
+#endif  // BASE_THREADING_SEQUENCE_LOCAL_STORAGE_SLOT_H_
diff --git a/base/threading/sequence_local_storage_slot_unittest.cc b/base/threading/sequence_local_storage_slot_unittest.cc
new file mode 100644
index 0000000..4a9f6a9
--- /dev/null
+++ b/base/threading/sequence_local_storage_slot_unittest.cc
@@ -0,0 +1,143 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/threading/sequence_local_storage_slot.h"
+
+#include <utility>
+
+#include "base/macros.h"
+#include "base/memory/ptr_util.h"
+#include "base/threading/sequence_local_storage_map.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+
+namespace {
+
+class SequenceLocalStorageSlotTest : public testing::Test {
+ protected:
+  SequenceLocalStorageSlotTest()
+      : scoped_sequence_local_storage_(&sequence_local_storage_) {}
+
+  internal::SequenceLocalStorageMap sequence_local_storage_;
+  internal::ScopedSetSequenceLocalStorageMapForCurrentThread
+      scoped_sequence_local_storage_;
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(SequenceLocalStorageSlotTest);
+};
+
+}  // namespace
+
+// Verify that a value stored with Set() can be retrieved with Get().
+TEST_F(SequenceLocalStorageSlotTest, GetSet) {
+  SequenceLocalStorageSlot<int> slot;
+  slot.Set(5);
+  EXPECT_EQ(slot.Get(), 5);
+}
+
+// Verify that setting an object in a SequenceLocalStorageSlot creates a copy
+// of that object independent of the original one.
+TEST_F(SequenceLocalStorageSlotTest, SetObjectIsIndependent) {
+  bool should_be_false = false;
+
+  SequenceLocalStorageSlot<bool> slot;
+
+  slot.Set(should_be_false);
+
+  EXPECT_FALSE(slot.Get());
+  slot.Get() = true;
+  EXPECT_TRUE(slot.Get());
+
+  EXPECT_NE(should_be_false, slot.Get());
+}
+
+// Verify that multiple slots work and that calling Get after overwriting
+// a value in a slot yields the new value.
+TEST_F(SequenceLocalStorageSlotTest, GetSetMultipleSlots) {
+  SequenceLocalStorageSlot<int> slot1;
+  SequenceLocalStorageSlot<int> slot2;
+  SequenceLocalStorageSlot<int> slot3;
+
+  slot1.Set(1);
+  slot2.Set(2);
+  slot3.Set(3);
+
+  EXPECT_EQ(slot1.Get(), 1);
+  EXPECT_EQ(slot2.Get(), 2);
+  EXPECT_EQ(slot3.Get(), 3);
+
+  slot3.Set(4);
+  slot2.Set(5);
+  slot1.Set(6);
+
+  EXPECT_EQ(slot3.Get(), 4);
+  EXPECT_EQ(slot2.Get(), 5);
+  EXPECT_EQ(slot1.Get(), 6);
+}
+
+// Verify that changing the the value returned by Get() changes the value
+// in sequence local storage.
+TEST_F(SequenceLocalStorageSlotTest, GetReferenceModifiable) {
+  SequenceLocalStorageSlot<bool> slot;
+  slot.Set(false);
+  slot.Get() = true;
+  EXPECT_TRUE(slot.Get());
+}
+
+// Verify that a move-only type can be stored in sequence local storage.
+TEST_F(SequenceLocalStorageSlotTest, SetGetWithMoveOnlyType) {
+  std::unique_ptr<int> int_unique_ptr = std::make_unique<int>(5);
+
+  SequenceLocalStorageSlot<std::unique_ptr<int>> slot;
+  slot.Set(std::move(int_unique_ptr));
+
+  EXPECT_EQ(*slot.Get(), 5);
+}
+
+// Verify that a Get() without a previous Set() on a slot returns a
+// default-constructed value.
+TEST_F(SequenceLocalStorageSlotTest, GetWithoutSetDefaultConstructs) {
+  struct DefaultConstructable {
+    int x = 0x12345678;
+  };
+
+  SequenceLocalStorageSlot<DefaultConstructable> slot;
+
+  EXPECT_EQ(slot.Get().x, 0x12345678);
+}
+
+// Verify that a Get() without a previous Set() on a slot with a POD-type
+// returns a default-constructed value.
+// Note: this test could be flaky and give a false pass. If it's flaky, the test
+// might've "passed" because the memory for the slot happened to be zeroed.
+TEST_F(SequenceLocalStorageSlotTest, GetWithoutSetDefaultConstructsPOD) {
+  SequenceLocalStorageSlot<void*> slot;
+
+  EXPECT_EQ(slot.Get(), nullptr);
+}
+
+// Verify that the value of a slot is specific to a SequenceLocalStorageMap
+TEST(SequenceLocalStorageSlotMultipleMapTest, SetGetMultipleMapsOneSlot) {
+  SequenceLocalStorageSlot<unsigned int> slot;
+  internal::SequenceLocalStorageMap sequence_local_storage_maps[5];
+
+  // Set the value of the slot to be the index of the current
+  // SequenceLocalStorageMaps in the vector
+  for (unsigned int i = 0; i < arraysize(sequence_local_storage_maps); ++i) {
+    internal::ScopedSetSequenceLocalStorageMapForCurrentThread
+        scoped_sequence_local_storage(&sequence_local_storage_maps[i]);
+
+    slot.Set(i);
+  }
+
+  for (unsigned int i = 0; i < arraysize(sequence_local_storage_maps); ++i) {
+    internal::ScopedSetSequenceLocalStorageMapForCurrentThread
+        scoped_sequence_local_storage(&sequence_local_storage_maps[i]);
+
+    EXPECT_EQ(slot.Get(), i);
+  }
+}
+
+}  // namespace base
diff --git a/base/threading/sequenced_task_runner_handle.cc b/base/threading/sequenced_task_runner_handle.cc
new file mode 100644
index 0000000..e6920f5
--- /dev/null
+++ b/base/threading/sequenced_task_runner_handle.cc
@@ -0,0 +1,59 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/threading/sequenced_task_runner_handle.h"
+
+#include <utility>
+
+#include "base/lazy_instance.h"
+#include "base/logging.h"
+#include "base/threading/thread_local.h"
+#include "base/threading/thread_task_runner_handle.h"
+
+namespace base {
+
+namespace {
+
+LazyInstance<ThreadLocalPointer<SequencedTaskRunnerHandle>>::Leaky
+    sequenced_task_runner_tls = LAZY_INSTANCE_INITIALIZER;
+
+}  // namespace
+
+// static
+scoped_refptr<SequencedTaskRunner> SequencedTaskRunnerHandle::Get() {
+  // Return the registered SequencedTaskRunner, if any.
+  const SequencedTaskRunnerHandle* handle =
+      sequenced_task_runner_tls.Pointer()->Get();
+  if (handle)
+    return handle->task_runner_;
+
+  // Note if you hit this: the problem is the lack of a sequenced context. The
+  // ThreadTaskRunnerHandle is just the last attempt at finding such a context.
+  CHECK(ThreadTaskRunnerHandle::IsSet())
+      << "Error: This caller requires a sequenced context (i.e. the "
+         "current task needs to run from a SequencedTaskRunner).";
+  return ThreadTaskRunnerHandle::Get();
+}
+
+// static
+bool SequencedTaskRunnerHandle::IsSet() {
+  return sequenced_task_runner_tls.Pointer()->Get() ||
+         ThreadTaskRunnerHandle::IsSet();
+}
+
+SequencedTaskRunnerHandle::SequencedTaskRunnerHandle(
+    scoped_refptr<SequencedTaskRunner> task_runner)
+    : task_runner_(std::move(task_runner)) {
+  DCHECK(task_runner_->RunsTasksInCurrentSequence());
+  DCHECK(!SequencedTaskRunnerHandle::IsSet());
+  sequenced_task_runner_tls.Pointer()->Set(this);
+}
+
+SequencedTaskRunnerHandle::~SequencedTaskRunnerHandle() {
+  DCHECK(task_runner_->RunsTasksInCurrentSequence());
+  DCHECK_EQ(sequenced_task_runner_tls.Pointer()->Get(), this);
+  sequenced_task_runner_tls.Pointer()->Set(nullptr);
+}
+
+}  // namespace base
diff --git a/base/threading/sequenced_task_runner_handle.h b/base/threading/sequenced_task_runner_handle.h
new file mode 100644
index 0000000..f55cee5
--- /dev/null
+++ b/base/threading/sequenced_task_runner_handle.h
@@ -0,0 +1,43 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_THREADING_SEQUENCED_TASK_RUNNER_HANDLE_H_
+#define BASE_THREADING_SEQUENCED_TASK_RUNNER_HANDLE_H_
+
+#include "base/compiler_specific.h"
+#include "base/macros.h"
+#include "base/memory/ref_counted.h"
+#include "base/sequenced_task_runner.h"
+
+namespace base {
+
+class BASE_EXPORT SequencedTaskRunnerHandle {
+ public:
+  // Returns a SequencedTaskRunner which guarantees that posted tasks will only
+  // run after the current task is finished and will satisfy a SequenceChecker.
+  // It should only be called if IsSet() returns true (see the comment there for
+  // the requirements).
+  static scoped_refptr<SequencedTaskRunner> Get();
+
+  // Returns true if one of the following conditions is fulfilled:
+  // a) A SequencedTaskRunner has been assigned to the current thread by
+  //    instantiating a SequencedTaskRunnerHandle.
+  // b) The current thread has a ThreadTaskRunnerHandle (which includes any
+  //    thread that has a MessageLoop associated with it).
+  static bool IsSet();
+
+  // Binds |task_runner| to the current thread.
+  explicit SequencedTaskRunnerHandle(
+      scoped_refptr<SequencedTaskRunner> task_runner);
+  ~SequencedTaskRunnerHandle();
+
+ private:
+  scoped_refptr<SequencedTaskRunner> task_runner_;
+
+  DISALLOW_COPY_AND_ASSIGN(SequencedTaskRunnerHandle);
+};
+
+}  // namespace base
+
+#endif  // BASE_THREADING_SEQUENCED_TASK_RUNNER_HANDLE_H_
diff --git a/base/threading/sequenced_task_runner_handle_unittest.cc b/base/threading/sequenced_task_runner_handle_unittest.cc
new file mode 100644
index 0000000..48394da
--- /dev/null
+++ b/base/threading/sequenced_task_runner_handle_unittest.cc
@@ -0,0 +1,90 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/threading/sequenced_task_runner_handle.h"
+
+#include <memory>
+#include <utility>
+
+#include "base/bind.h"
+#include "base/callback.h"
+#include "base/location.h"
+#include "base/memory/ref_counted.h"
+#include "base/run_loop.h"
+#include "base/sequence_checker_impl.h"
+#include "base/sequenced_task_runner.h"
+#include "base/synchronization/waitable_event.h"
+#include "base/task_scheduler/post_task.h"
+#include "base/test/scoped_task_environment.h"
+#include "base/test/test_simple_task_runner.h"
+#include "base/threading/thread_task_runner_handle.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+namespace {
+
+class SequencedTaskRunnerHandleTest : public ::testing::Test {
+ protected:
+  // Verifies that the context it runs on has a SequencedTaskRunnerHandle
+  // and that posting to it results in the posted task running in that same
+  // context (sequence).
+  static void VerifyCurrentSequencedTaskRunner() {
+    ASSERT_TRUE(SequencedTaskRunnerHandle::IsSet());
+    scoped_refptr<SequencedTaskRunner> task_runner =
+        SequencedTaskRunnerHandle::Get();
+    ASSERT_TRUE(task_runner);
+
+    // Use SequenceCheckerImpl to make sure it's not a no-op in Release builds.
+    std::unique_ptr<SequenceCheckerImpl> sequence_checker(
+        new SequenceCheckerImpl);
+    task_runner->PostTask(
+        FROM_HERE,
+        base::BindOnce(&SequencedTaskRunnerHandleTest::CheckValidSequence,
+                       std::move(sequence_checker)));
+  }
+
+  static void CheckValidSequence(
+      std::unique_ptr<SequenceCheckerImpl> sequence_checker) {
+    EXPECT_TRUE(sequence_checker->CalledOnValidSequence());
+  }
+
+  base::test::ScopedTaskEnvironment scoped_task_environment_;
+};
+
+TEST_F(SequencedTaskRunnerHandleTest, FromMessageLoop) {
+  VerifyCurrentSequencedTaskRunner();
+  RunLoop().RunUntilIdle();
+}
+
+TEST_F(SequencedTaskRunnerHandleTest, FromTaskSchedulerSequencedTask) {
+  base::CreateSequencedTaskRunnerWithTraits({})->PostTask(
+      FROM_HERE,
+      base::BindOnce(
+          &SequencedTaskRunnerHandleTest::VerifyCurrentSequencedTaskRunner));
+  scoped_task_environment_.RunUntilIdle();
+}
+
+TEST_F(SequencedTaskRunnerHandleTest, NoHandleFromUnsequencedTask) {
+  base::PostTask(FROM_HERE, base::BindOnce([]() {
+                   EXPECT_FALSE(SequencedTaskRunnerHandle::IsSet());
+                 }));
+  scoped_task_environment_.RunUntilIdle();
+}
+
+TEST(SequencedTaskRunnerHandleTestWithoutMessageLoop, FromHandleInScope) {
+  scoped_refptr<SequencedTaskRunner> test_task_runner(new TestSimpleTaskRunner);
+  EXPECT_FALSE(SequencedTaskRunnerHandle::IsSet());
+  EXPECT_FALSE(ThreadTaskRunnerHandle::IsSet());
+  {
+    SequencedTaskRunnerHandle handle(test_task_runner);
+    EXPECT_TRUE(SequencedTaskRunnerHandle::IsSet());
+    EXPECT_FALSE(ThreadTaskRunnerHandle::IsSet());
+    EXPECT_EQ(test_task_runner, SequencedTaskRunnerHandle::Get());
+  }
+  EXPECT_FALSE(SequencedTaskRunnerHandle::IsSet());
+  EXPECT_FALSE(ThreadTaskRunnerHandle::IsSet());
+}
+
+}  // namespace
+}  // namespace base
diff --git a/base/threading/simple_thread.cc b/base/threading/simple_thread.cc
new file mode 100644
index 0000000..04a5285
--- /dev/null
+++ b/base/threading/simple_thread.cc
@@ -0,0 +1,182 @@
+// Copyright (c) 2010 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/threading/simple_thread.h"
+
+#include "base/logging.h"
+#include "base/strings/string_number_conversions.h"
+#include "base/threading/platform_thread.h"
+#include "base/threading/thread_restrictions.h"
+
+namespace base {
+
+SimpleThread::SimpleThread(const std::string& name_prefix)
+    : SimpleThread(name_prefix, Options()) {}
+
+SimpleThread::SimpleThread(const std::string& name_prefix,
+                           const Options& options)
+    : name_prefix_(name_prefix),
+      options_(options),
+      event_(WaitableEvent::ResetPolicy::MANUAL,
+             WaitableEvent::InitialState::NOT_SIGNALED) {}
+
+SimpleThread::~SimpleThread() {
+  DCHECK(HasBeenStarted()) << "SimpleThread was never started.";
+  DCHECK(!options_.joinable || HasBeenJoined())
+      << "Joinable SimpleThread destroyed without being Join()ed.";
+}
+
+void SimpleThread::Start() {
+  StartAsync();
+  ThreadRestrictions::ScopedAllowWait allow_wait;
+  event_.Wait();  // Wait for the thread to complete initialization.
+}
+
+void SimpleThread::Join() {
+  DCHECK(options_.joinable) << "A non-joinable thread can't be joined.";
+  DCHECK(HasStartBeenAttempted()) << "Tried to Join a never-started thread.";
+  DCHECK(!HasBeenJoined()) << "Tried to Join a thread multiple times.";
+  BeforeJoin();
+  PlatformThread::Join(thread_);
+  thread_ = PlatformThreadHandle();
+  joined_ = true;
+}
+
+void SimpleThread::StartAsync() {
+  DCHECK(!HasStartBeenAttempted()) << "Tried to Start a thread multiple times.";
+  start_called_ = true;
+  BeforeStart();
+  bool success =
+      options_.joinable
+          ? PlatformThread::CreateWithPriority(options_.stack_size, this,
+                                               &thread_, options_.priority)
+          : PlatformThread::CreateNonJoinableWithPriority(
+                options_.stack_size, this, options_.priority);
+  DCHECK(success);
+}
+
+PlatformThreadId SimpleThread::tid() {
+  DCHECK(HasBeenStarted());
+  return tid_;
+}
+
+bool SimpleThread::HasBeenStarted() {
+  ThreadRestrictions::ScopedAllowWait allow_wait;
+  return event_.IsSignaled();
+}
+
+void SimpleThread::ThreadMain() {
+  tid_ = PlatformThread::CurrentId();
+  // Construct our full name of the form "name_prefix_/TID".
+  std::string name(name_prefix_);
+  name.push_back('/');
+  name.append(IntToString(tid_));
+  PlatformThread::SetName(name);
+
+  // We've initialized our new thread, signal that we're done to Start().
+  event_.Signal();
+
+  BeforeRun();
+  Run();
+}
+
+DelegateSimpleThread::DelegateSimpleThread(Delegate* delegate,
+                                           const std::string& name_prefix)
+    : DelegateSimpleThread(delegate, name_prefix, Options()) {}
+
+DelegateSimpleThread::DelegateSimpleThread(Delegate* delegate,
+                                           const std::string& name_prefix,
+                                           const Options& options)
+    : SimpleThread(name_prefix, options),
+      delegate_(delegate) {
+  DCHECK(delegate_);
+}
+
+DelegateSimpleThread::~DelegateSimpleThread() = default;
+
+void DelegateSimpleThread::Run() {
+  DCHECK(delegate_) << "Tried to call Run without a delegate (called twice?)";
+
+  // Non-joinable DelegateSimpleThreads are allowed to be deleted during Run().
+  // Member state must not be accessed after invoking Run().
+  Delegate* delegate = delegate_;
+  delegate_ = nullptr;
+  delegate->Run();
+}
+
+DelegateSimpleThreadPool::DelegateSimpleThreadPool(
+    const std::string& name_prefix,
+    int num_threads)
+    : name_prefix_(name_prefix),
+      num_threads_(num_threads),
+      dry_(WaitableEvent::ResetPolicy::MANUAL,
+           WaitableEvent::InitialState::NOT_SIGNALED) {}
+
+DelegateSimpleThreadPool::~DelegateSimpleThreadPool() {
+  DCHECK(threads_.empty());
+  DCHECK(delegates_.empty());
+  DCHECK(!dry_.IsSignaled());
+}
+
+void DelegateSimpleThreadPool::Start() {
+  DCHECK(threads_.empty()) << "Start() called with outstanding threads.";
+  for (int i = 0; i < num_threads_; ++i) {
+    DelegateSimpleThread* thread = new DelegateSimpleThread(this, name_prefix_);
+    thread->Start();
+    threads_.push_back(thread);
+  }
+}
+
+void DelegateSimpleThreadPool::JoinAll() {
+  DCHECK(!threads_.empty()) << "JoinAll() called with no outstanding threads.";
+
+  // Tell all our threads to quit their worker loop.
+  AddWork(nullptr, num_threads_);
+
+  // Join and destroy all the worker threads.
+  for (int i = 0; i < num_threads_; ++i) {
+    threads_[i]->Join();
+    delete threads_[i];
+  }
+  threads_.clear();
+  DCHECK(delegates_.empty());
+}
+
+void DelegateSimpleThreadPool::AddWork(Delegate* delegate, int repeat_count) {
+  AutoLock locked(lock_);
+  for (int i = 0; i < repeat_count; ++i)
+    delegates_.push(delegate);
+  // If we were empty, signal that we have work now.
+  if (!dry_.IsSignaled())
+    dry_.Signal();
+}
+
+void DelegateSimpleThreadPool::Run() {
+  Delegate* work = nullptr;
+
+  while (true) {
+    dry_.Wait();
+    {
+      AutoLock locked(lock_);
+      if (!dry_.IsSignaled())
+        continue;
+
+      DCHECK(!delegates_.empty());
+      work = delegates_.front();
+      delegates_.pop();
+
+      // Signal to any other threads that we're currently out of work.
+      if (delegates_.empty())
+        dry_.Reset();
+    }
+
+    // A NULL delegate pointer signals us to quit.
+    if (!work)
+      break;
+
+    work->Run();
+  }
+}
+
+}  // namespace base
diff --git a/base/threading/simple_thread.h b/base/threading/simple_thread.h
new file mode 100644
index 0000000..976f557
--- /dev/null
+++ b/base/threading/simple_thread.h
@@ -0,0 +1,232 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// WARNING: You should probably be using Thread (thread.h) instead.  Thread is
+//          Chrome's message-loop based Thread abstraction, and if you are a
+//          thread running in the browser, there will likely be assumptions
+//          that your thread will have an associated message loop.
+//
+// This is a simple thread interface that backs to a native operating system
+// thread.  You should use this only when you want a thread that does not have
+// an associated MessageLoop.  Unittesting is the best example of this.
+//
+// The simplest interface to use is DelegateSimpleThread, which will create
+// a new thread, and execute the Delegate's virtual Run() in this new thread
+// until it has completed, exiting the thread.
+//
+// NOTE: You *MUST* call Join on the thread to clean up the underlying thread
+// resources.  You are also responsible for destructing the SimpleThread object.
+// It is invalid to destroy a SimpleThread while it is running, or without
+// Start() having been called (and a thread never created).  The Delegate
+// object should live as long as a DelegateSimpleThread.
+//
+// Thread Safety: A SimpleThread is not completely thread safe.  It is safe to
+// access it from the creating thread or from the newly created thread.  This
+// implies that the creator thread should be the thread that calls Join.
+//
+// Example:
+//   class MyThreadRunner : public DelegateSimpleThread::Delegate { ... };
+//   MyThreadRunner runner;
+//   DelegateSimpleThread thread(&runner, "good_name_here");
+//   thread.Start();
+//   // Start will return after the Thread has been successfully started and
+//   // initialized.  The newly created thread will invoke runner->Run(), and
+//   // run until it returns.
+//   thread.Join();  // Wait until the thread has exited.  You *MUST* Join!
+//   // The SimpleThread object is still valid, however you may not call Join
+//   // or Start again.
+
+#ifndef BASE_THREADING_SIMPLE_THREAD_H_
+#define BASE_THREADING_SIMPLE_THREAD_H_
+
+#include <stddef.h>
+
+#include <string>
+#include <vector>
+
+#include "base/base_export.h"
+#include "base/compiler_specific.h"
+#include "base/containers/queue.h"
+#include "base/macros.h"
+#include "base/synchronization/lock.h"
+#include "base/synchronization/waitable_event.h"
+#include "base/threading/platform_thread.h"
+
+namespace base {
+
+// This is the base SimpleThread.  You can derive from it and implement the
+// virtual Run method, or you can use the DelegateSimpleThread interface.
+class BASE_EXPORT SimpleThread : public PlatformThread::Delegate {
+ public:
+  struct BASE_EXPORT Options {
+   public:
+    Options() = default;
+    explicit Options(ThreadPriority priority_in) : priority(priority_in) {}
+    ~Options() = default;
+
+    // Allow copies.
+    Options(const Options& other) = default;
+    Options& operator=(const Options& other) = default;
+
+    // A custom stack size, or 0 for the system default.
+    size_t stack_size = 0;
+
+    ThreadPriority priority = ThreadPriority::NORMAL;
+
+    // If false, the underlying thread's PlatformThreadHandle will not be kept
+    // around and as such the SimpleThread instance will not be Join()able and
+    // must not be deleted before Run() is invoked. After that, it's up to
+    // the subclass to determine when it is safe to delete itself.
+    bool joinable = true;
+  };
+
+  // Create a SimpleThread.  |options| should be used to manage any specific
+  // configuration involving the thread creation and management.
+  // Every thread has a name, in the form of |name_prefix|/TID, for example
+  // "my_thread/321".  The thread will not be created until Start() is called.
+  explicit SimpleThread(const std::string& name_prefix);
+  SimpleThread(const std::string& name_prefix, const Options& options);
+
+  ~SimpleThread() override;
+
+  // Starts the thread and returns only after the thread has started and
+  // initialized (i.e. ThreadMain() has been called).
+  void Start();
+
+  // Joins the thread. If StartAsync() was used to start the thread, then this
+  // first waits for the thread to start cleanly, then it joins.
+  void Join();
+
+  // Starts the thread, but returns immediately, without waiting for the thread
+  // to have initialized first (i.e. this does not wait for ThreadMain() to have
+  // been run first).
+  void StartAsync();
+
+  // Subclasses should override the Run method.
+  virtual void Run() = 0;
+
+  // Returns the thread id, only valid after the thread has started. If the
+  // thread was started using Start(), then this will be valid after the call to
+  // Start(). If StartAsync() was used to start the thread, then this must not
+  // be called before HasBeenStarted() returns True.
+  PlatformThreadId tid();
+
+  // Returns True if the thread has been started and initialized (i.e. if
+  // ThreadMain() has run). If the thread was started with StartAsync(), but it
+  // hasn't been initialized yet (i.e. ThreadMain() has not run), then this will
+  // return False.
+  bool HasBeenStarted();
+
+  // Returns True if Join() has ever been called.
+  bool HasBeenJoined() { return joined_; }
+
+  // Returns true if Start() or StartAsync() has been called.
+  bool HasStartBeenAttempted() { return start_called_; }
+
+  // Overridden from PlatformThread::Delegate:
+  void ThreadMain() override;
+
+ private:
+  // This is called just before the thread is started. This is called regardless
+  // of whether Start() or StartAsync() is used to start the thread.
+  virtual void BeforeStart() {}
+
+  // This is called just after the thread has been initialized and just before
+  // Run() is called. This is called on the newly started thread.
+  virtual void BeforeRun() {}
+
+  // This is called just before the thread is joined. The thread is started and
+  // has been initialized before this is called.
+  virtual void BeforeJoin() {}
+
+  const std::string name_prefix_;
+  std::string name_;
+  const Options options_;
+  PlatformThreadHandle thread_;  // PlatformThread handle, reset after Join.
+  WaitableEvent event_;          // Signaled if Start() was ever called.
+  PlatformThreadId tid_ = kInvalidThreadId;  // The backing thread's id.
+  bool joined_ = false;                      // True if Join has been called.
+  // Set to true when the platform-thread creation has started.
+  bool start_called_ = false;
+
+  DISALLOW_COPY_AND_ASSIGN(SimpleThread);
+};
+
+// A SimpleThread which delegates Run() to its Delegate. Non-joinable
+// DelegateSimpleThread are safe to delete after Run() was invoked, their
+// Delegates are also safe to delete after that point from this class' point of
+// view (although implementations must of course make sure that Run() will not
+// use their Delegate's member state after its deletion).
+class BASE_EXPORT DelegateSimpleThread : public SimpleThread {
+ public:
+  class BASE_EXPORT Delegate {
+   public:
+    virtual ~Delegate() = default;
+    virtual void Run() = 0;
+  };
+
+  DelegateSimpleThread(Delegate* delegate,
+                       const std::string& name_prefix);
+  DelegateSimpleThread(Delegate* delegate,
+                       const std::string& name_prefix,
+                       const Options& options);
+
+  ~DelegateSimpleThread() override;
+  void Run() override;
+
+ private:
+  Delegate* delegate_;
+
+  DISALLOW_COPY_AND_ASSIGN(DelegateSimpleThread);
+};
+
+// DelegateSimpleThreadPool allows you to start up a fixed number of threads,
+// and then add jobs which will be dispatched to the threads.  This is
+// convenient when you have a lot of small work that you want done
+// multi-threaded, but don't want to spawn a thread for each small bit of work.
+//
+// You just call AddWork() to add a delegate to the list of work to be done.
+// JoinAll() will make sure that all outstanding work is processed, and wait
+// for everything to finish.  You can reuse a pool, so you can call Start()
+// again after you've called JoinAll().
+class BASE_EXPORT DelegateSimpleThreadPool
+    : public DelegateSimpleThread::Delegate {
+ public:
+  typedef DelegateSimpleThread::Delegate Delegate;
+
+  DelegateSimpleThreadPool(const std::string& name_prefix, int num_threads);
+  ~DelegateSimpleThreadPool() override;
+
+  // Start up all of the underlying threads, and start processing work if we
+  // have any.
+  void Start();
+
+  // Make sure all outstanding work is finished, and wait for and destroy all
+  // of the underlying threads in the pool.
+  void JoinAll();
+
+  // It is safe to AddWork() any time, before or after Start().
+  // Delegate* should always be a valid pointer, NULL is reserved internally.
+  void AddWork(Delegate* work, int repeat_count);
+  void AddWork(Delegate* work) {
+    AddWork(work, 1);
+  }
+
+  // We implement the Delegate interface, for running our internal threads.
+  void Run() override;
+
+ private:
+  const std::string name_prefix_;
+  int num_threads_;
+  std::vector<DelegateSimpleThread*> threads_;
+  base::queue<Delegate*> delegates_;
+  base::Lock lock_;            // Locks delegates_
+  WaitableEvent dry_;    // Not signaled when there is no work to do.
+
+  DISALLOW_COPY_AND_ASSIGN(DelegateSimpleThreadPool);
+};
+
+}  // namespace base
+
+#endif  // BASE_THREADING_SIMPLE_THREAD_H_
diff --git a/base/threading/simple_thread_unittest.cc b/base/threading/simple_thread_unittest.cc
new file mode 100644
index 0000000..4e618f9
--- /dev/null
+++ b/base/threading/simple_thread_unittest.cc
@@ -0,0 +1,234 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <memory>
+
+#include "base/atomic_sequence_num.h"
+#include "base/memory/ptr_util.h"
+#include "base/strings/string_number_conversions.h"
+#include "base/synchronization/waitable_event.h"
+#include "base/test/gtest_util.h"
+#include "base/threading/simple_thread.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+
+namespace {
+
+class SetIntRunner : public DelegateSimpleThread::Delegate {
+ public:
+  SetIntRunner(int* ptr, int val) : ptr_(ptr), val_(val) { }
+  ~SetIntRunner() override = default;
+
+ private:
+  void Run() override { *ptr_ = val_; }
+
+  int* ptr_;
+  int val_;
+
+  DISALLOW_COPY_AND_ASSIGN(SetIntRunner);
+};
+
+// Signals |started_| when Run() is invoked and waits until |released_| is
+// signaled to return, signaling |done_| before doing so. Useful for tests that
+// care to control Run()'s flow.
+class ControlledRunner : public DelegateSimpleThread::Delegate {
+ public:
+  ControlledRunner()
+      : started_(WaitableEvent::ResetPolicy::MANUAL,
+                 WaitableEvent::InitialState::NOT_SIGNALED),
+        released_(WaitableEvent::ResetPolicy::MANUAL,
+                  WaitableEvent::InitialState::NOT_SIGNALED),
+        done_(WaitableEvent::ResetPolicy::MANUAL,
+              WaitableEvent::InitialState::NOT_SIGNALED) {}
+
+  ~ControlledRunner() override { ReleaseAndWaitUntilDone(); }
+
+  void WaitUntilStarted() { started_.Wait(); }
+
+  void ReleaseAndWaitUntilDone() {
+    released_.Signal();
+    done_.Wait();
+  }
+
+ private:
+  void Run() override {
+    started_.Signal();
+    released_.Wait();
+    done_.Signal();
+  }
+
+  WaitableEvent started_;
+  WaitableEvent released_;
+  WaitableEvent done_;
+
+  DISALLOW_COPY_AND_ASSIGN(ControlledRunner);
+};
+
+class WaitEventRunner : public DelegateSimpleThread::Delegate {
+ public:
+  explicit WaitEventRunner(WaitableEvent* event) : event_(event) { }
+  ~WaitEventRunner() override = default;
+
+ private:
+  void Run() override {
+    EXPECT_FALSE(event_->IsSignaled());
+    event_->Signal();
+    EXPECT_TRUE(event_->IsSignaled());
+  }
+
+  WaitableEvent* event_;
+
+  DISALLOW_COPY_AND_ASSIGN(WaitEventRunner);
+};
+
+class SeqRunner : public DelegateSimpleThread::Delegate {
+ public:
+  explicit SeqRunner(AtomicSequenceNumber* seq) : seq_(seq) { }
+
+ private:
+  void Run() override { seq_->GetNext(); }
+
+  AtomicSequenceNumber* seq_;
+
+  DISALLOW_COPY_AND_ASSIGN(SeqRunner);
+};
+
+// We count up on a sequence number, firing on the event when we've hit our
+// expected amount, otherwise we wait on the event.  This will ensure that we
+// have all threads outstanding until we hit our expected thread pool size.
+class VerifyPoolRunner : public DelegateSimpleThread::Delegate {
+ public:
+  VerifyPoolRunner(AtomicSequenceNumber* seq,
+                   int total, WaitableEvent* event)
+      : seq_(seq), total_(total), event_(event) { }
+
+ private:
+  void Run() override {
+    if (seq_->GetNext() == total_) {
+      event_->Signal();
+    } else {
+      event_->Wait();
+    }
+  }
+
+  AtomicSequenceNumber* seq_;
+  int total_;
+  WaitableEvent* event_;
+
+  DISALLOW_COPY_AND_ASSIGN(VerifyPoolRunner);
+};
+
+}  // namespace
+
+TEST(SimpleThreadTest, CreateAndJoin) {
+  int stack_int = 0;
+
+  SetIntRunner runner(&stack_int, 7);
+  EXPECT_EQ(0, stack_int);
+
+  DelegateSimpleThread thread(&runner, "int_setter");
+  EXPECT_FALSE(thread.HasBeenStarted());
+  EXPECT_FALSE(thread.HasBeenJoined());
+  EXPECT_EQ(0, stack_int);
+
+  thread.Start();
+  EXPECT_TRUE(thread.HasBeenStarted());
+  EXPECT_FALSE(thread.HasBeenJoined());
+
+  thread.Join();
+  EXPECT_TRUE(thread.HasBeenStarted());
+  EXPECT_TRUE(thread.HasBeenJoined());
+  EXPECT_EQ(7, stack_int);
+}
+
+TEST(SimpleThreadTest, WaitForEvent) {
+  // Create a thread, and wait for it to signal us.
+  WaitableEvent event(WaitableEvent::ResetPolicy::MANUAL,
+                      WaitableEvent::InitialState::NOT_SIGNALED);
+
+  WaitEventRunner runner(&event);
+  DelegateSimpleThread thread(&runner, "event_waiter");
+
+  EXPECT_FALSE(event.IsSignaled());
+  thread.Start();
+  event.Wait();
+  EXPECT_TRUE(event.IsSignaled());
+  thread.Join();
+}
+
+TEST(SimpleThreadTest, NonJoinableStartAndDieOnJoin) {
+  ControlledRunner runner;
+
+  SimpleThread::Options options;
+  options.joinable = false;
+  DelegateSimpleThread thread(&runner, "non_joinable", options);
+
+  EXPECT_FALSE(thread.HasBeenStarted());
+  thread.Start();
+  EXPECT_TRUE(thread.HasBeenStarted());
+
+  // Note: this is not quite the same as |thread.HasBeenStarted()| which
+  // represents ThreadMain() getting ready to invoke Run() whereas
+  // |runner.WaitUntilStarted()| ensures Run() was actually invoked.
+  runner.WaitUntilStarted();
+
+  EXPECT_FALSE(thread.HasBeenJoined());
+  EXPECT_DCHECK_DEATH({ thread.Join(); });
+}
+
+TEST(SimpleThreadTest, NonJoinableInactiveDelegateDestructionIsOkay) {
+  std::unique_ptr<ControlledRunner> runner(new ControlledRunner);
+
+  SimpleThread::Options options;
+  options.joinable = false;
+  std::unique_ptr<DelegateSimpleThread> thread(
+      new DelegateSimpleThread(runner.get(), "non_joinable", options));
+
+  thread->Start();
+  runner->WaitUntilStarted();
+
+  // Deleting a non-joinable SimpleThread after Run() was invoked is okay.
+  thread.reset();
+
+  runner->WaitUntilStarted();
+  runner->ReleaseAndWaitUntilDone();
+  // It should be safe to destroy a Delegate after its Run() method completed.
+  runner.reset();
+}
+
+TEST(SimpleThreadTest, ThreadPool) {
+  AtomicSequenceNumber seq;
+  SeqRunner runner(&seq);
+  DelegateSimpleThreadPool pool("seq_runner", 10);
+
+  // Add work before we're running.
+  pool.AddWork(&runner, 300);
+
+  EXPECT_EQ(seq.GetNext(), 0);
+  pool.Start();
+
+  // Add work while we're running.
+  pool.AddWork(&runner, 300);
+
+  pool.JoinAll();
+
+  EXPECT_EQ(seq.GetNext(), 601);
+
+  // We can reuse our pool.  Verify that all 10 threads can actually run in
+  // parallel, so this test will only pass if there are actually 10 threads.
+  AtomicSequenceNumber seq2;
+  WaitableEvent event(WaitableEvent::ResetPolicy::MANUAL,
+                      WaitableEvent::InitialState::NOT_SIGNALED);
+  // Changing 9 to 10, for example, would cause us JoinAll() to never return.
+  VerifyPoolRunner verifier(&seq2, 9, &event);
+  pool.Start();
+
+  pool.AddWork(&verifier, 10);
+
+  pool.JoinAll();
+  EXPECT_EQ(seq2.GetNext(), 10);
+}
+
+}  // namespace base
diff --git a/base/threading/thread.cc b/base/threading/thread.cc
new file mode 100644
index 0000000..97e160f
--- /dev/null
+++ b/base/threading/thread.cc
@@ -0,0 +1,370 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/threading/thread.h"
+
+#include "base/bind.h"
+#include "base/bind_helpers.h"
+#include "base/lazy_instance.h"
+#include "base/location.h"
+#include "base/logging.h"
+#include "base/run_loop.h"
+#include "base/synchronization/waitable_event.h"
+#include "base/third_party/dynamic_annotations/dynamic_annotations.h"
+#include "base/threading/thread_id_name_manager.h"
+#include "base/threading/thread_local.h"
+#include "base/threading/thread_restrictions.h"
+#include "build/build_config.h"
+
+#if defined(OS_POSIX) && !defined(OS_NACL)
+#include "base/files/file_descriptor_watcher_posix.h"
+#endif
+
+#if defined(OS_WIN)
+#include "base/win/scoped_com_initializer.h"
+#endif
+
+namespace base {
+
+namespace {
+
+// We use this thread-local variable to record whether or not a thread exited
+// because its Stop method was called.  This allows us to catch cases where
+// MessageLoop::QuitWhenIdle() is called directly, which is unexpected when
+// using a Thread to setup and run a MessageLoop.
+base::LazyInstance<base::ThreadLocalBoolean>::Leaky lazy_tls_bool =
+    LAZY_INSTANCE_INITIALIZER;
+
+}  // namespace
+
+Thread::Options::Options() = default;
+
+Thread::Options::Options(MessageLoop::Type type, size_t size)
+    : message_loop_type(type), stack_size(size) {}
+
+Thread::Options::Options(const Options& other) = default;
+
+Thread::Options::~Options() = default;
+
+Thread::Thread(const std::string& name)
+    : id_event_(WaitableEvent::ResetPolicy::MANUAL,
+                WaitableEvent::InitialState::NOT_SIGNALED),
+      name_(name),
+      start_event_(WaitableEvent::ResetPolicy::MANUAL,
+                   WaitableEvent::InitialState::NOT_SIGNALED) {
+  // Only bind the sequence on Start(): the state is constant between
+  // construction and Start() and it's thus valid for Start() to be called on
+  // another sequence as long as every other operation is then performed on that
+  // sequence.
+  owning_sequence_checker_.DetachFromSequence();
+}
+
+Thread::~Thread() {
+  Stop();
+}
+
+bool Thread::Start() {
+  DCHECK(owning_sequence_checker_.CalledOnValidSequence());
+
+  Options options;
+#if defined(OS_WIN)
+  if (com_status_ == STA)
+    options.message_loop_type = MessageLoop::TYPE_UI;
+#endif
+  return StartWithOptions(options);
+}
+
+bool Thread::StartWithOptions(const Options& options) {
+  DCHECK(owning_sequence_checker_.CalledOnValidSequence());
+  DCHECK(!message_loop_);
+  DCHECK(!IsRunning());
+  DCHECK(!stopping_) << "Starting a non-joinable thread a second time? That's "
+                     << "not allowed!";
+#if defined(OS_WIN)
+  DCHECK((com_status_ != STA) ||
+      (options.message_loop_type == MessageLoop::TYPE_UI));
+#endif
+
+  // Reset |id_| here to support restarting the thread.
+  id_event_.Reset();
+  id_ = kInvalidThreadId;
+
+  SetThreadWasQuitProperly(false);
+
+  MessageLoop::Type type = options.message_loop_type;
+  if (!options.message_pump_factory.is_null())
+    type = MessageLoop::TYPE_CUSTOM;
+
+  message_loop_timer_slack_ = options.timer_slack;
+  std::unique_ptr<MessageLoop> message_loop_owned =
+      MessageLoop::CreateUnbound(type, options.message_pump_factory);
+  message_loop_ = message_loop_owned.get();
+  start_event_.Reset();
+
+  // Hold |thread_lock_| while starting the new thread to synchronize with
+  // Stop() while it's not guaranteed to be sequenced (until crbug/629139 is
+  // fixed).
+  {
+    AutoLock lock(thread_lock_);
+    bool success =
+        options.joinable
+            ? PlatformThread::CreateWithPriority(options.stack_size, this,
+                                                 &thread_, options.priority)
+            : PlatformThread::CreateNonJoinableWithPriority(
+                  options.stack_size, this, options.priority);
+    if (!success) {
+      DLOG(ERROR) << "failed to create thread";
+      message_loop_ = nullptr;
+      return false;
+    }
+  }
+
+  joinable_ = options.joinable;
+
+  // The ownership of |message_loop_| is managed by the newly created thread
+  // within the ThreadMain.
+  ignore_result(message_loop_owned.release());
+
+  DCHECK(message_loop_);
+  return true;
+}
+
+bool Thread::StartAndWaitForTesting() {
+  DCHECK(owning_sequence_checker_.CalledOnValidSequence());
+  bool result = Start();
+  if (!result)
+    return false;
+  WaitUntilThreadStarted();
+  return true;
+}
+
+bool Thread::WaitUntilThreadStarted() const {
+  DCHECK(owning_sequence_checker_.CalledOnValidSequence());
+  if (!message_loop_)
+    return false;
+  base::ThreadRestrictions::ScopedAllowWait allow_wait;
+  start_event_.Wait();
+  return true;
+}
+
+void Thread::FlushForTesting() {
+  DCHECK(owning_sequence_checker_.CalledOnValidSequence());
+  if (!message_loop_)
+    return;
+
+  WaitableEvent done(WaitableEvent::ResetPolicy::AUTOMATIC,
+                     WaitableEvent::InitialState::NOT_SIGNALED);
+  task_runner()->PostTask(FROM_HERE,
+                          BindOnce(&WaitableEvent::Signal, Unretained(&done)));
+  done.Wait();
+}
+
+void Thread::Stop() {
+  DCHECK(joinable_);
+
+  // TODO(gab): Fix improper usage of this API (http://crbug.com/629139) and
+  // enable this check, until then synchronization with Start() via
+  // |thread_lock_| is required...
+  // DCHECK(owning_sequence_checker_.CalledOnValidSequence());
+  AutoLock lock(thread_lock_);
+
+  StopSoon();
+
+  // Can't join if the |thread_| is either already gone or is non-joinable.
+  if (thread_.is_null())
+    return;
+
+  // Wait for the thread to exit.
+  //
+  // TODO(darin): Unfortunately, we need to keep |message_loop_| around until
+  // the thread exits.  Some consumers are abusing the API.  Make them stop.
+  //
+  PlatformThread::Join(thread_);
+  thread_ = base::PlatformThreadHandle();
+
+  // The thread should nullify |message_loop_| on exit (note: Join() adds an
+  // implicit memory barrier and no lock is thus required for this check).
+  DCHECK(!message_loop_);
+
+  stopping_ = false;
+}
+
+void Thread::StopSoon() {
+  // TODO(gab): Fix improper usage of this API (http://crbug.com/629139) and
+  // enable this check.
+  // DCHECK(owning_sequence_checker_.CalledOnValidSequence());
+
+  if (stopping_ || !message_loop_)
+    return;
+
+  stopping_ = true;
+
+  if (using_external_message_loop_) {
+    // Setting |stopping_| to true above should have been sufficient for this
+    // thread to be considered "stopped" per it having never set its |running_|
+    // bit by lack of its own ThreadMain.
+    DCHECK(!IsRunning());
+    message_loop_ = nullptr;
+    return;
+  }
+
+  task_runner()->PostTask(
+      FROM_HERE, base::BindOnce(&Thread::ThreadQuitHelper, Unretained(this)));
+}
+
+void Thread::DetachFromSequence() {
+  DCHECK(owning_sequence_checker_.CalledOnValidSequence());
+  owning_sequence_checker_.DetachFromSequence();
+}
+
+PlatformThreadId Thread::GetThreadId() const {
+  // If the thread is created but not started yet, wait for |id_| being ready.
+  base::ThreadRestrictions::ScopedAllowWait allow_wait;
+  id_event_.Wait();
+  return id_;
+}
+
+PlatformThreadHandle Thread::GetThreadHandle() const {
+  AutoLock lock(thread_lock_);
+  return thread_;
+}
+
+bool Thread::IsRunning() const {
+  // TODO(gab): Fix improper usage of this API (http://crbug.com/629139) and
+  // enable this check.
+  // DCHECK(owning_sequence_checker_.CalledOnValidSequence());
+
+  // If the thread's already started (i.e. |message_loop_| is non-null) and not
+  // yet requested to stop (i.e. |stopping_| is false) we can just return true.
+  // (Note that |stopping_| is touched only on the same sequence that starts /
+  // started the new thread so we need no locking here.)
+  if (message_loop_ && !stopping_)
+    return true;
+  // Otherwise check the |running_| flag, which is set to true by the new thread
+  // only while it is inside Run().
+  AutoLock lock(running_lock_);
+  return running_;
+}
+
+void Thread::Run(RunLoop* run_loop) {
+  // Overridable protected method to be called from our |thread_| only.
+  DCHECK(id_event_.IsSignaled());
+  DCHECK_EQ(id_, PlatformThread::CurrentId());
+
+  run_loop->Run();
+}
+
+// static
+void Thread::SetThreadWasQuitProperly(bool flag) {
+  lazy_tls_bool.Pointer()->Set(flag);
+}
+
+// static
+bool Thread::GetThreadWasQuitProperly() {
+  bool quit_properly = true;
+#ifndef NDEBUG
+  quit_properly = lazy_tls_bool.Pointer()->Get();
+#endif
+  return quit_properly;
+}
+
+void Thread::SetMessageLoop(MessageLoop* message_loop) {
+  DCHECK(owning_sequence_checker_.CalledOnValidSequence());
+  DCHECK(message_loop);
+
+  // Setting |message_loop_| should suffice for this thread to be considered
+  // as "running", until Stop() is invoked.
+  DCHECK(!IsRunning());
+  message_loop_ = message_loop;
+  DCHECK(IsRunning());
+
+  using_external_message_loop_ = true;
+}
+
+void Thread::ThreadMain() {
+  // First, make GetThreadId() available to avoid deadlocks. It could be called
+  // any place in the following thread initialization code.
+  DCHECK(!id_event_.IsSignaled());
+  // Note: this read of |id_| while |id_event_| isn't signaled is exceptionally
+  // okay because ThreadMain has a happens-after relationship with the other
+  // write in StartWithOptions().
+  DCHECK_EQ(kInvalidThreadId, id_);
+  id_ = PlatformThread::CurrentId();
+  DCHECK_NE(kInvalidThreadId, id_);
+  id_event_.Signal();
+
+  // Complete the initialization of our Thread object.
+  PlatformThread::SetName(name_.c_str());
+  ANNOTATE_THREAD_NAME(name_.c_str());  // Tell the name to race detector.
+
+  // Lazily initialize the |message_loop| so that it can run on this thread.
+  DCHECK(message_loop_);
+  std::unique_ptr<MessageLoop> message_loop(message_loop_);
+  message_loop_->BindToCurrentThread();
+  message_loop_->SetTimerSlack(message_loop_timer_slack_);
+
+#if defined(OS_POSIX) && !defined(OS_NACL)
+  // Allow threads running a MessageLoopForIO to use FileDescriptorWatcher API.
+  std::unique_ptr<FileDescriptorWatcher> file_descriptor_watcher;
+  if (MessageLoopForIO::IsCurrent()) {
+    file_descriptor_watcher.reset(new FileDescriptorWatcher(
+        static_cast<MessageLoopForIO*>(message_loop_)));
+  }
+#endif
+
+#if defined(OS_WIN)
+  std::unique_ptr<win::ScopedCOMInitializer> com_initializer;
+  if (com_status_ != NONE) {
+    com_initializer.reset((com_status_ == STA) ?
+        new win::ScopedCOMInitializer() :
+        new win::ScopedCOMInitializer(win::ScopedCOMInitializer::kMTA));
+  }
+#endif
+
+  // Let the thread do extra initialization.
+  Init();
+
+  {
+    AutoLock lock(running_lock_);
+    running_ = true;
+  }
+
+  start_event_.Signal();
+
+  RunLoop run_loop;
+  run_loop_ = &run_loop;
+  Run(run_loop_);
+
+  {
+    AutoLock lock(running_lock_);
+    running_ = false;
+  }
+
+  // Let the thread do extra cleanup.
+  CleanUp();
+
+#if defined(OS_WIN)
+  com_initializer.reset();
+#endif
+
+  if (message_loop->type() != MessageLoop::TYPE_CUSTOM) {
+    // Assert that RunLoop::QuitWhenIdle was called by ThreadQuitHelper. Don't
+    // check for custom message pumps, because their shutdown might not allow
+    // this.
+    DCHECK(GetThreadWasQuitProperly());
+  }
+
+  // We can't receive messages anymore.
+  // (The message loop is destructed at the end of this block)
+  message_loop_ = nullptr;
+  run_loop_ = nullptr;
+}
+
+void Thread::ThreadQuitHelper() {
+  DCHECK(run_loop_);
+  run_loop_->QuitWhenIdle();
+  SetThreadWasQuitProperly(true);
+}
+
+}  // namespace base
diff --git a/base/threading/thread.h b/base/threading/thread.h
new file mode 100644
index 0000000..9fbdcb8
--- /dev/null
+++ b/base/threading/thread.h
@@ -0,0 +1,356 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_THREADING_THREAD_H_
+#define BASE_THREADING_THREAD_H_
+
+#include <stddef.h>
+
+#include <memory>
+#include <string>
+
+#include "base/base_export.h"
+#include "base/callback.h"
+#include "base/macros.h"
+#include "base/message_loop/message_loop.h"
+#include "base/message_loop/timer_slack.h"
+#include "base/sequence_checker.h"
+#include "base/single_thread_task_runner.h"
+#include "base/synchronization/atomic_flag.h"
+#include "base/synchronization/lock.h"
+#include "base/synchronization/waitable_event.h"
+#include "base/threading/platform_thread.h"
+#include "build/build_config.h"
+
+namespace base {
+
+class MessagePump;
+class RunLoop;
+
+// IMPORTANT: Instead of creating a base::Thread, consider using
+// base::Create(Sequenced|SingleThread)TaskRunnerWithTraits().
+//
+// A simple thread abstraction that establishes a MessageLoop on a new thread.
+// The consumer uses the MessageLoop of the thread to cause code to execute on
+// the thread.  When this object is destroyed the thread is terminated.  All
+// pending tasks queued on the thread's message loop will run to completion
+// before the thread is terminated.
+//
+// WARNING! SUBCLASSES MUST CALL Stop() IN THEIR DESTRUCTORS!  See ~Thread().
+//
+// After the thread is stopped, the destruction sequence is:
+//
+//  (1) Thread::CleanUp()
+//  (2) MessageLoop::~MessageLoop
+//  (3.b) MessageLoopCurrent::DestructionObserver::WillDestroyCurrentMessageLoop
+//
+// This API is not thread-safe: unless indicated otherwise its methods are only
+// valid from the owning sequence (which is the one from which Start() is
+// invoked -- should it differ from the one on which it was constructed).
+//
+// Sometimes it's useful to kick things off on the initial sequence (e.g.
+// construction, Start(), task_runner()), but to then hand the Thread over to a
+// pool of users for the last one of them to destroy it when done. For that use
+// case, Thread::DetachFromSequence() allows the owning sequence to give up
+// ownership. The caller is then responsible to ensure a happens-after
+// relationship between the DetachFromSequence() call and the next use of that
+// Thread object (including ~Thread()).
+class BASE_EXPORT Thread : PlatformThread::Delegate {
+ public:
+  struct BASE_EXPORT Options {
+    typedef Callback<std::unique_ptr<MessagePump>()> MessagePumpFactory;
+
+    Options();
+    Options(MessageLoop::Type type, size_t size);
+    Options(const Options& other);
+    ~Options();
+
+    // Specifies the type of message loop that will be allocated on the thread.
+    // This is ignored if message_pump_factory.is_null() is false.
+    MessageLoop::Type message_loop_type = MessageLoop::TYPE_DEFAULT;
+
+    // Specifies timer slack for thread message loop.
+    TimerSlack timer_slack = TIMER_SLACK_NONE;
+
+    // Used to create the MessagePump for the MessageLoop. The callback is Run()
+    // on the thread. If message_pump_factory.is_null(), then a MessagePump
+    // appropriate for |message_loop_type| is created. Setting this forces the
+    // MessageLoop::Type to TYPE_CUSTOM.
+    MessagePumpFactory message_pump_factory;
+
+    // Specifies the maximum stack size that the thread is allowed to use.
+    // This does not necessarily correspond to the thread's initial stack size.
+    // A value of 0 indicates that the default maximum should be used.
+    size_t stack_size = 0;
+
+    // Specifies the initial thread priority.
+    ThreadPriority priority = ThreadPriority::NORMAL;
+
+    // If false, the thread will not be joined on destruction. This is intended
+    // for threads that want TaskShutdownBehavior::CONTINUE_ON_SHUTDOWN
+    // semantics. Non-joinable threads can't be joined (must be leaked and
+    // can't be destroyed or Stop()'ed).
+    // TODO(gab): allow non-joinable instances to be deleted without causing
+    // user-after-frees (proposal @ https://crbug.com/629139#c14)
+    bool joinable = true;
+  };
+
+  // Constructor.
+  // name is a display string to identify the thread.
+  explicit Thread(const std::string& name);
+
+  // Destroys the thread, stopping it if necessary.
+  //
+  // NOTE: ALL SUBCLASSES OF Thread MUST CALL Stop() IN THEIR DESTRUCTORS (or
+  // guarantee Stop() is explicitly called before the subclass is destroyed).
+  // This is required to avoid a data race between the destructor modifying the
+  // vtable, and the thread's ThreadMain calling the virtual method Run().  It
+  // also ensures that the CleanUp() virtual method is called on the subclass
+  // before it is destructed.
+  ~Thread() override;
+
+#if defined(OS_WIN)
+  // Causes the thread to initialize COM.  This must be called before calling
+  // Start() or StartWithOptions().  If |use_mta| is false, the thread is also
+  // started with a TYPE_UI message loop.  It is an error to call
+  // init_com_with_mta(false) and then StartWithOptions() with any message loop
+  // type other than TYPE_UI.
+  void init_com_with_mta(bool use_mta) {
+    DCHECK(!message_loop_);
+    com_status_ = use_mta ? MTA : STA;
+  }
+#endif
+
+  // Starts the thread.  Returns true if the thread was successfully started;
+  // otherwise, returns false.  Upon successful return, the message_loop()
+  // getter will return non-null.
+  //
+  // Note: This function can't be called on Windows with the loader lock held;
+  // i.e. during a DllMain, global object construction or destruction, atexit()
+  // callback.
+  bool Start();
+
+  // Starts the thread. Behaves exactly like Start in addition to allow to
+  // override the default options.
+  //
+  // Note: This function can't be called on Windows with the loader lock held;
+  // i.e. during a DllMain, global object construction or destruction, atexit()
+  // callback.
+  bool StartWithOptions(const Options& options);
+
+  // Starts the thread and wait for the thread to start and run initialization
+  // before returning. It's same as calling Start() and then
+  // WaitUntilThreadStarted().
+  // Note that using this (instead of Start() or StartWithOptions() causes
+  // jank on the calling thread, should be used only in testing code.
+  bool StartAndWaitForTesting();
+
+  // Blocks until the thread starts running. Called within StartAndWait().
+  // Note that calling this causes jank on the calling thread, must be used
+  // carefully for production code.
+  bool WaitUntilThreadStarted() const;
+
+  // Blocks until all tasks previously posted to this thread have been executed.
+  void FlushForTesting();
+
+  // Signals the thread to exit and returns once the thread has exited. The
+  // Thread object is completely reset and may be used as if it were newly
+  // constructed (i.e., Start may be called again). Can only be called if
+  // |joinable_|.
+  //
+  // Stop may be called multiple times and is simply ignored if the thread is
+  // already stopped or currently stopping.
+  //
+  // Start/Stop are not thread-safe and callers that desire to invoke them from
+  // different threads must ensure mutual exclusion.
+  //
+  // NOTE: If you are a consumer of Thread, it is not necessary to call this
+  // before deleting your Thread objects, as the destructor will do it.
+  // IF YOU ARE A SUBCLASS OF Thread, YOU MUST CALL THIS IN YOUR DESTRUCTOR.
+  void Stop();
+
+  // Signals the thread to exit in the near future.
+  //
+  // WARNING: This function is not meant to be commonly used. Use at your own
+  // risk. Calling this function will cause message_loop() to become invalid in
+  // the near future. This function was created to workaround a specific
+  // deadlock on Windows with printer worker thread. In any other case, Stop()
+  // should be used.
+  //
+  // Call Stop() to reset the thread object once it is known that the thread has
+  // quit.
+  void StopSoon();
+
+  // Detaches the owning sequence, indicating that the next call to this API
+  // (including ~Thread()) can happen from a different sequence (to which it
+  // will be rebound). This call itself must happen on the current owning
+  // sequence and the caller must ensure the next API call has a happens-after
+  // relationship with this one.
+  void DetachFromSequence();
+
+  // Returns the message loop for this thread.  Use the MessageLoop's
+  // PostTask methods to execute code on the thread.  This only returns
+  // non-null after a successful call to Start.  After Stop has been called,
+  // this will return nullptr.
+  //
+  // NOTE: You must not call this MessageLoop's Quit method directly.  Use
+  // the Thread's Stop method instead.
+  //
+  // In addition to this Thread's owning sequence, this can also safely be
+  // called from the underlying thread itself.
+  MessageLoop* message_loop() const {
+    // This class doesn't provide synchronization around |message_loop_| and as
+    // such only the owner should access it (and the underlying thread which
+    // never sees it before it's set). In practice, many callers are coming from
+    // unrelated threads but provide their own implicit (e.g. memory barriers
+    // from task posting) or explicit (e.g. locks) synchronization making the
+    // access of |message_loop_| safe... Changing all of those callers is
+    // unfeasible; instead verify that they can reliably see
+    // |message_loop_ != nullptr| without synchronization as a proof that their
+    // external synchronization catches the unsynchronized effects of Start().
+    // TODO(gab): Despite all of the above this test has to be disabled for now
+    // per crbug.com/629139#c6.
+    // DCHECK(owning_sequence_checker_.CalledOnValidSequence() ||
+    //        (id_event_.IsSignaled() && id_ == PlatformThread::CurrentId()) ||
+    //        message_loop_);
+    return message_loop_;
+  }
+
+  // Returns a TaskRunner for this thread. Use the TaskRunner's PostTask
+  // methods to execute code on the thread. Returns nullptr if the thread is not
+  // running (e.g. before Start or after Stop have been called). Callers can
+  // hold on to this even after the thread is gone; in this situation, attempts
+  // to PostTask() will fail.
+  //
+  // In addition to this Thread's owning sequence, this can also safely be
+  // called from the underlying thread itself.
+  scoped_refptr<SingleThreadTaskRunner> task_runner() const {
+    // Refer to the DCHECK and comment inside |message_loop()|.
+    DCHECK(owning_sequence_checker_.CalledOnValidSequence() ||
+           (id_event_.IsSignaled() && id_ == PlatformThread::CurrentId()) ||
+           message_loop_);
+    return message_loop_ ? message_loop_->task_runner() : nullptr;
+  }
+
+  // Returns the name of this thread (for display in debugger too).
+  const std::string& thread_name() const { return name_; }
+
+  // Returns the thread ID.  Should not be called before the first Start*()
+  // call.  Keeps on returning the same ID even after a Stop() call. The next
+  // Start*() call renews the ID.
+  //
+  // WARNING: This function will block if the thread hasn't started yet.
+  //
+  // This method is thread-safe.
+  PlatformThreadId GetThreadId() const;
+
+  // Returns the current thread handle. If called before Start*() returns or
+  // after Stop() returns, an empty thread handle will be returned.
+  //
+  // This method is thread-safe.
+  //
+  // TODO(robliao): Remove this when it no longer needs to be temporarily
+  // exposed for http://crbug.com/717380.
+  PlatformThreadHandle GetThreadHandle() const;
+
+  // Returns true if the thread has been started, and not yet stopped.
+  bool IsRunning() const;
+
+ protected:
+  // Called just prior to starting the message loop
+  virtual void Init() {}
+
+  // Called to start the run loop
+  virtual void Run(RunLoop* run_loop);
+
+  // Called just after the message loop ends
+  virtual void CleanUp() {}
+
+  static void SetThreadWasQuitProperly(bool flag);
+  static bool GetThreadWasQuitProperly();
+
+  // Bind this Thread to an existing MessageLoop instead of starting a new one.
+  // TODO(gab): Remove this after ios/ has undergone the same surgery as
+  // BrowserThreadImpl (ref.
+  // https://chromium-review.googlesource.com/c/chromium/src/+/969104).
+  void SetMessageLoop(MessageLoop* message_loop);
+
+  bool using_external_message_loop() const {
+    return using_external_message_loop_;
+  }
+
+ private:
+#if defined(OS_WIN)
+  enum ComStatus {
+    NONE,
+    STA,
+    MTA,
+  };
+#endif
+
+  // PlatformThread::Delegate methods:
+  void ThreadMain() override;
+
+  void ThreadQuitHelper();
+
+#if defined(OS_WIN)
+  // Whether this thread needs to initialize COM, and if so, in what mode.
+  ComStatus com_status_ = NONE;
+#endif
+
+  // Mirrors the Options::joinable field used to start this thread. Verified
+  // on Stop() -- non-joinable threads can't be joined (must be leaked).
+  bool joinable_ = true;
+
+  // If true, we're in the middle of stopping, and shouldn't access
+  // |message_loop_|. It may non-nullptr and invalid.
+  // Should be written on the thread that created this thread. Also read data
+  // could be wrong on other threads.
+  bool stopping_ = false;
+
+  // True while inside of Run().
+  bool running_ = false;
+  mutable base::Lock running_lock_;  // Protects |running_|.
+
+  // The thread's handle.
+  PlatformThreadHandle thread_;
+  mutable base::Lock thread_lock_;  // Protects |thread_|.
+
+  // The thread's id once it has started.
+  PlatformThreadId id_ = kInvalidThreadId;
+  // Protects |id_| which must only be read while it's signaled.
+  mutable WaitableEvent id_event_;
+
+  // The thread's MessageLoop and RunLoop. Valid only while the thread is alive.
+  // Set by the created thread.
+  MessageLoop* message_loop_ = nullptr;
+  RunLoop* run_loop_ = nullptr;
+
+  // True only if |message_loop_| was externally provided by |SetMessageLoop()|
+  // in which case this Thread has no underlying |thread_| and should merely
+  // drop |message_loop_| on Stop(). In that event, this remains true after
+  // Stop() was invoked so that subclasses can use this state to build their own
+  // cleanup logic as required.
+  bool using_external_message_loop_ = false;
+
+  // Stores Options::timer_slack_ until the message loop has been bound to
+  // a thread.
+  TimerSlack message_loop_timer_slack_ = TIMER_SLACK_NONE;
+
+  // The name of the thread.  Used for debugging purposes.
+  const std::string name_;
+
+  // Signaled when the created thread gets ready to use the message loop.
+  mutable WaitableEvent start_event_;
+
+  // This class is not thread-safe, use this to verify access from the owning
+  // sequence of the Thread.
+  SequenceChecker owning_sequence_checker_;
+
+  DISALLOW_COPY_AND_ASSIGN(Thread);
+};
+
+}  // namespace base
+
+#endif  // BASE_THREADING_THREAD_H_
diff --git a/base/threading/thread_checker.h b/base/threading/thread_checker.h
new file mode 100644
index 0000000..6799e25
--- /dev/null
+++ b/base/threading/thread_checker.h
@@ -0,0 +1,103 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_THREADING_THREAD_CHECKER_H_
+#define BASE_THREADING_THREAD_CHECKER_H_
+
+#include "base/compiler_specific.h"
+#include "base/logging.h"
+#include "base/threading/thread_checker_impl.h"
+
+// ThreadChecker is a helper class used to help verify that some methods of a
+// class are called from the same thread (for thread-affinity).
+//
+// Use the macros below instead of the ThreadChecker directly so that the unused
+// member doesn't result in an extra byte (four when padded) per instance in
+// production.
+//
+// Usage of this class should be *rare* as most classes require thread-safety
+// but not thread-affinity. Prefer base::SequenceChecker to verify thread-safe
+// access.
+//
+// Thread-affinity checks should only be required in classes that use thread-
+// local-storage or a third-party API that does.
+//
+// Prefer to encode the minimum requirements of each class instead of the
+// environment it happens to run in today. e.g. if a class requires thread-
+// safety but not thread-affinity, use a SequenceChecker even if it happens to
+// run on a SingleThreadTaskRunner today. That makes it easier to understand
+// what would need to change to turn that SingleThreadTaskRunner into a
+// SequencedTaskRunner for ease of scheduling as well as minimizes side-effects
+// if that change is made.
+//
+// Usage:
+//   class MyClass {
+//    public:
+//     MyClass() {
+//       // It's sometimes useful to detach on construction for objects that are
+//       // constructed in one place and forever after used from another
+//       // thread.
+//       DETACH_FROM_THREAD(my_thread_checker_);
+//     }
+//
+//     ~MyClass() {
+//       // ThreadChecker doesn't automatically check it's destroyed on origin
+//       // thread for the same reason it's sometimes detached in the
+//       // constructor. It's okay to destroy off thread if the owner otherwise
+//       // knows usage on the associated thread is done. If you're not
+//       // detaching in the constructor, you probably want to explicitly check
+//       // in the destructor.
+//       DCHECK_CALLED_ON_VALID_THREAD(my_thread_checker_);
+//     }
+//
+//     void MyMethod() {
+//       DCHECK_CALLED_ON_VALID_THREAD(my_thread_checker_);
+//       ... (do stuff) ...
+//     }
+//
+//    private:
+//     THREAD_CHECKER(my_thread_checker_);
+//   }
+
+#if DCHECK_IS_ON()
+#define THREAD_CHECKER(name) base::ThreadChecker name
+#define DCHECK_CALLED_ON_VALID_THREAD(name) DCHECK((name).CalledOnValidThread())
+#define DETACH_FROM_THREAD(name) (name).DetachFromThread()
+#else  // DCHECK_IS_ON()
+#define THREAD_CHECKER(name)
+#define DCHECK_CALLED_ON_VALID_THREAD(name) EAT_STREAM_PARAMETERS
+#define DETACH_FROM_THREAD(name)
+#endif  // DCHECK_IS_ON()
+
+namespace base {
+
+// Do nothing implementation, for use in release mode.
+//
+// Note: You should almost always use the ThreadChecker class (through the above
+// macros) to get the right version for your build configuration.
+class ThreadCheckerDoNothing {
+ public:
+  ThreadCheckerDoNothing() = default;
+  bool CalledOnValidThread() const WARN_UNUSED_RESULT { return true; }
+  void DetachFromThread() {}
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(ThreadCheckerDoNothing);
+};
+
+// Note that ThreadCheckerImpl::CalledOnValidThread() returns false when called
+// from tasks posted to SingleThreadTaskRunners bound to different sequences,
+// even if the tasks happen to run on the same thread (e.g. two independent
+// SingleThreadTaskRunners on the TaskScheduler that happen to share a thread).
+#if DCHECK_IS_ON()
+class ThreadChecker : public ThreadCheckerImpl {
+};
+#else
+class ThreadChecker : public ThreadCheckerDoNothing {
+};
+#endif  // DCHECK_IS_ON()
+
+}  // namespace base
+
+#endif  // BASE_THREADING_THREAD_CHECKER_H_
diff --git a/base/threading/thread_checker_impl.cc b/base/threading/thread_checker_impl.cc
new file mode 100644
index 0000000..d5ccbdb
--- /dev/null
+++ b/base/threading/thread_checker_impl.cc
@@ -0,0 +1,57 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/threading/thread_checker_impl.h"
+
+#include "base/threading/thread_task_runner_handle.h"
+
+namespace base {
+
+ThreadCheckerImpl::ThreadCheckerImpl() {
+  AutoLock auto_lock(lock_);
+  EnsureAssigned();
+}
+
+ThreadCheckerImpl::~ThreadCheckerImpl() = default;
+
+bool ThreadCheckerImpl::CalledOnValidThread() const {
+  AutoLock auto_lock(lock_);
+  EnsureAssigned();
+
+  // Always return true when called from the task from which this
+  // ThreadCheckerImpl was assigned to a thread.
+  if (task_token_ == TaskToken::GetForCurrentThread())
+    return true;
+
+  // If this ThreadCheckerImpl is bound to a valid SequenceToken, it must be
+  // equal to the current SequenceToken and there must be a registered
+  // ThreadTaskRunnerHandle. Otherwise, the fact that the current task runs on
+  // the thread to which this ThreadCheckerImpl is bound is fortuitous.
+  if (sequence_token_.IsValid() &&
+      (sequence_token_ != SequenceToken::GetForCurrentThread() ||
+       !ThreadTaskRunnerHandle::IsSet())) {
+    return false;
+  }
+
+  return thread_id_ == PlatformThread::CurrentRef();
+}
+
+void ThreadCheckerImpl::DetachFromThread() {
+  AutoLock auto_lock(lock_);
+  thread_id_ = PlatformThreadRef();
+  task_token_ = TaskToken();
+  sequence_token_ = SequenceToken();
+}
+
+void ThreadCheckerImpl::EnsureAssigned() const {
+  lock_.AssertAcquired();
+  if (!thread_id_.is_null())
+    return;
+
+  thread_id_ = PlatformThread::CurrentRef();
+  task_token_ = TaskToken::GetForCurrentThread();
+  sequence_token_ = SequenceToken::GetForCurrentThread();
+}
+
+}  // namespace base
diff --git a/base/threading/thread_checker_impl.h b/base/threading/thread_checker_impl.h
new file mode 100644
index 0000000..103dfe7
--- /dev/null
+++ b/base/threading/thread_checker_impl.h
@@ -0,0 +1,62 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_THREADING_THREAD_CHECKER_IMPL_H_
+#define BASE_THREADING_THREAD_CHECKER_IMPL_H_
+
+#include "base/base_export.h"
+#include "base/compiler_specific.h"
+#include "base/sequence_token.h"
+#include "base/synchronization/lock.h"
+#include "base/threading/platform_thread.h"
+
+namespace base {
+
+// Real implementation of ThreadChecker, for use in debug mode, or for temporary
+// use in release mode (e.g. to CHECK on a threading issue seen only in the
+// wild).
+//
+// Note: You should almost always use the ThreadChecker class to get the right
+// version for your build configuration.
+class BASE_EXPORT ThreadCheckerImpl {
+ public:
+  ThreadCheckerImpl();
+  ~ThreadCheckerImpl();
+
+  bool CalledOnValidThread() const WARN_UNUSED_RESULT;
+
+  // Changes the thread that is checked for in CalledOnValidThread.  This may
+  // be useful when an object may be created on one thread and then used
+  // exclusively on another thread.
+  void DetachFromThread();
+
+ private:
+  void EnsureAssigned() const;
+
+  // Members are mutable so that CalledOnValidThread() can set them.
+
+  // Synchronizes access to all members.
+  mutable base::Lock lock_;
+
+  // Thread on which CalledOnValidThread() may return true.
+  mutable PlatformThreadRef thread_id_;
+
+  // TaskToken for which CalledOnValidThread() always returns true. This allows
+  // CalledOnValidThread() to return true when called multiple times from the
+  // same task, even if it's not running in a single-threaded context itself
+  // (allowing usage of ThreadChecker objects on the stack in the scope of one-
+  // off tasks). Note: CalledOnValidThread() may return true even if the current
+  // TaskToken is not equal to this.
+  mutable TaskToken task_token_;
+
+  // SequenceToken for which CalledOnValidThread() may return true. Used to
+  // ensure that CalledOnValidThread() doesn't return true for TaskScheduler
+  // tasks that happen to run on the same thread but weren't posted to the same
+  // SingleThreadTaskRunner.
+  mutable SequenceToken sequence_token_;
+};
+
+}  // namespace base
+
+#endif  // BASE_THREADING_THREAD_CHECKER_IMPL_H_
diff --git a/base/threading/thread_checker_unittest.cc b/base/threading/thread_checker_unittest.cc
new file mode 100644
index 0000000..5fbbc52
--- /dev/null
+++ b/base/threading/thread_checker_unittest.cc
@@ -0,0 +1,245 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/threading/thread_checker.h"
+
+#include <memory>
+
+#include "base/bind.h"
+#include "base/bind_helpers.h"
+#include "base/macros.h"
+#include "base/memory/ref_counted.h"
+#include "base/sequence_token.h"
+#include "base/test/gtest_util.h"
+#include "base/test/test_simple_task_runner.h"
+#include "base/threading/simple_thread.h"
+#include "base/threading/thread_task_runner_handle.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+namespace {
+
+// A thread that runs a callback.
+class RunCallbackThread : public SimpleThread {
+ public:
+  explicit RunCallbackThread(const Closure& callback)
+      : SimpleThread("RunCallbackThread"), callback_(callback) {}
+
+ private:
+  // SimpleThread:
+  void Run() override { callback_.Run(); }
+
+  const Closure callback_;
+
+  DISALLOW_COPY_AND_ASSIGN(RunCallbackThread);
+};
+
+// Runs a callback on a new thread synchronously.
+void RunCallbackOnNewThreadSynchronously(const Closure& callback) {
+  RunCallbackThread run_callback_thread(callback);
+  run_callback_thread.Start();
+  run_callback_thread.Join();
+}
+
+void ExpectCalledOnValidThread(ThreadCheckerImpl* thread_checker) {
+  ASSERT_TRUE(thread_checker);
+
+  // This should bind |thread_checker| to the current thread if it wasn't
+  // already bound to a thread.
+  EXPECT_TRUE(thread_checker->CalledOnValidThread());
+
+  // Since |thread_checker| is now bound to the current thread, another call to
+  // CalledOnValidThread() should return true.
+  EXPECT_TRUE(thread_checker->CalledOnValidThread());
+}
+
+void ExpectNotCalledOnValidThread(ThreadCheckerImpl* thread_checker) {
+  ASSERT_TRUE(thread_checker);
+  EXPECT_FALSE(thread_checker->CalledOnValidThread());
+}
+
+void ExpectNotCalledOnValidThreadWithSequenceTokenAndThreadTaskRunnerHandle(
+    ThreadCheckerImpl* thread_checker,
+    SequenceToken sequence_token) {
+  ThreadTaskRunnerHandle thread_task_runner_handle(
+      MakeRefCounted<TestSimpleTaskRunner>());
+  ScopedSetSequenceTokenForCurrentThread
+      scoped_set_sequence_token_for_current_thread(sequence_token);
+  ExpectNotCalledOnValidThread(thread_checker);
+}
+
+}  // namespace
+
+TEST(ThreadCheckerTest, AllowedSameThreadNoSequenceToken) {
+  ThreadCheckerImpl thread_checker;
+  EXPECT_TRUE(thread_checker.CalledOnValidThread());
+}
+
+TEST(ThreadCheckerTest,
+     AllowedSameThreadAndSequenceDifferentTasksWithThreadTaskRunnerHandle) {
+  ThreadTaskRunnerHandle thread_task_runner_handle(
+      MakeRefCounted<TestSimpleTaskRunner>());
+
+  std::unique_ptr<ThreadCheckerImpl> thread_checker;
+  const SequenceToken sequence_token = SequenceToken::Create();
+
+  {
+    ScopedSetSequenceTokenForCurrentThread
+        scoped_set_sequence_token_for_current_thread(sequence_token);
+    thread_checker.reset(new ThreadCheckerImpl);
+  }
+
+  {
+    ScopedSetSequenceTokenForCurrentThread
+        scoped_set_sequence_token_for_current_thread(sequence_token);
+    EXPECT_TRUE(thread_checker->CalledOnValidThread());
+  }
+}
+
+TEST(ThreadCheckerTest,
+     AllowedSameThreadSequenceAndTaskNoThreadTaskRunnerHandle) {
+  ScopedSetSequenceTokenForCurrentThread
+      scoped_set_sequence_token_for_current_thread(SequenceToken::Create());
+  ThreadCheckerImpl thread_checker;
+  EXPECT_TRUE(thread_checker.CalledOnValidThread());
+}
+
+TEST(ThreadCheckerTest,
+     DisallowedSameThreadAndSequenceDifferentTasksNoThreadTaskRunnerHandle) {
+  std::unique_ptr<ThreadCheckerImpl> thread_checker;
+
+  {
+    ScopedSetSequenceTokenForCurrentThread
+        scoped_set_sequence_token_for_current_thread(SequenceToken::Create());
+    thread_checker.reset(new ThreadCheckerImpl);
+  }
+
+  {
+    ScopedSetSequenceTokenForCurrentThread
+        scoped_set_sequence_token_for_current_thread(SequenceToken::Create());
+    EXPECT_FALSE(thread_checker->CalledOnValidThread());
+  }
+}
+
+TEST(ThreadCheckerTest, DisallowedDifferentThreadsNoSequenceToken) {
+  ThreadCheckerImpl thread_checker;
+  RunCallbackOnNewThreadSynchronously(
+      Bind(&ExpectNotCalledOnValidThread, Unretained(&thread_checker)));
+}
+
+TEST(ThreadCheckerTest, DisallowedDifferentThreadsSameSequence) {
+  ThreadTaskRunnerHandle thread_task_runner_handle(
+      MakeRefCounted<TestSimpleTaskRunner>());
+  const SequenceToken sequence_token(SequenceToken::Create());
+
+  ScopedSetSequenceTokenForCurrentThread
+      scoped_set_sequence_token_for_current_thread(sequence_token);
+  ThreadCheckerImpl thread_checker;
+  EXPECT_TRUE(thread_checker.CalledOnValidThread());
+
+  RunCallbackOnNewThreadSynchronously(Bind(
+      &ExpectNotCalledOnValidThreadWithSequenceTokenAndThreadTaskRunnerHandle,
+      Unretained(&thread_checker), sequence_token));
+}
+
+TEST(ThreadCheckerTest, DisallowedSameThreadDifferentSequence) {
+  std::unique_ptr<ThreadCheckerImpl> thread_checker;
+
+  ThreadTaskRunnerHandle thread_task_runner_handle(
+      MakeRefCounted<TestSimpleTaskRunner>());
+
+  {
+    ScopedSetSequenceTokenForCurrentThread
+        scoped_set_sequence_token_for_current_thread(SequenceToken::Create());
+    thread_checker.reset(new ThreadCheckerImpl);
+  }
+
+  {
+    // Different SequenceToken.
+    ScopedSetSequenceTokenForCurrentThread
+        scoped_set_sequence_token_for_current_thread(SequenceToken::Create());
+    EXPECT_FALSE(thread_checker->CalledOnValidThread());
+  }
+
+  // No SequenceToken.
+  EXPECT_FALSE(thread_checker->CalledOnValidThread());
+}
+
+TEST(ThreadCheckerTest, DetachFromThread) {
+  ThreadCheckerImpl thread_checker;
+  thread_checker.DetachFromThread();
+
+  // Verify that CalledOnValidThread() returns true when called on a different
+  // thread after a call to DetachFromThread().
+  RunCallbackOnNewThreadSynchronously(
+      Bind(&ExpectCalledOnValidThread, Unretained(&thread_checker)));
+
+  EXPECT_FALSE(thread_checker.CalledOnValidThread());
+}
+
+TEST(ThreadCheckerTest, DetachFromThreadWithSequenceToken) {
+  ThreadTaskRunnerHandle thread_task_runner_handle(
+      MakeRefCounted<TestSimpleTaskRunner>());
+  ScopedSetSequenceTokenForCurrentThread
+      scoped_set_sequence_token_for_current_thread(SequenceToken::Create());
+  ThreadCheckerImpl thread_checker;
+  thread_checker.DetachFromThread();
+
+  // Verify that CalledOnValidThread() returns true when called on a different
+  // thread after a call to DetachFromThread().
+  RunCallbackOnNewThreadSynchronously(
+      Bind(&ExpectCalledOnValidThread, Unretained(&thread_checker)));
+
+  EXPECT_FALSE(thread_checker.CalledOnValidThread());
+}
+
+namespace {
+
+// This fixture is a helper for unit testing the thread checker macros as it is
+// not possible to inline ExpectDeathOnOtherThread() and
+// ExpectNoDeathOnOtherThreadAfterDetach() as lambdas since binding
+// |Unretained(&my_sequence_checker)| wouldn't compile on non-dcheck builds
+// where it won't be defined.
+class ThreadCheckerMacroTest : public testing::Test {
+ public:
+  ThreadCheckerMacroTest() = default;
+
+  void ExpectDeathOnOtherThread() {
+#if DCHECK_IS_ON()
+    EXPECT_DCHECK_DEATH({ DCHECK_CALLED_ON_VALID_THREAD(my_thread_checker_); });
+#else
+    // Happily no-ops on non-dcheck builds.
+    DCHECK_CALLED_ON_VALID_THREAD(my_thread_checker_);
+#endif
+  }
+
+  void ExpectNoDeathOnOtherThreadAfterDetach() {
+    DCHECK_CALLED_ON_VALID_THREAD(my_thread_checker_);
+    DCHECK_CALLED_ON_VALID_THREAD(my_thread_checker_)
+        << "Make sure it compiles when DCHECK is off";
+  }
+
+ protected:
+  THREAD_CHECKER(my_thread_checker_);
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(ThreadCheckerMacroTest);
+};
+
+}  // namespace
+
+TEST_F(ThreadCheckerMacroTest, Macros) {
+  THREAD_CHECKER(my_thread_checker);
+
+  RunCallbackOnNewThreadSynchronously(Bind(
+      &ThreadCheckerMacroTest::ExpectDeathOnOtherThread, Unretained(this)));
+
+  DETACH_FROM_THREAD(my_thread_checker_);
+
+  RunCallbackOnNewThreadSynchronously(
+      Bind(&ThreadCheckerMacroTest::ExpectNoDeathOnOtherThreadAfterDetach,
+           Unretained(this)));
+}
+
+}  // namespace base
diff --git a/base/threading/thread_collision_warner.cc b/base/threading/thread_collision_warner.cc
new file mode 100644
index 0000000..547e11c
--- /dev/null
+++ b/base/threading/thread_collision_warner.cc
@@ -0,0 +1,64 @@
+// Copyright (c) 2010 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/threading/thread_collision_warner.h"
+
+#include "base/logging.h"
+#include "base/threading/platform_thread.h"
+
+namespace base {
+
+void DCheckAsserter::warn() {
+  NOTREACHED() << "Thread Collision";
+}
+
+static subtle::Atomic32 CurrentThread() {
+  const PlatformThreadId current_thread_id = PlatformThread::CurrentId();
+  // We need to get the thread id into an atomic data type. This might be a
+  // truncating conversion, but any loss-of-information just increases the
+  // chance of a fault negative, not a false positive.
+  const subtle::Atomic32 atomic_thread_id =
+      static_cast<subtle::Atomic32>(current_thread_id);
+
+  return atomic_thread_id;
+}
+
+void ThreadCollisionWarner::EnterSelf() {
+  // If the active thread is 0 then I'll write the current thread ID
+  // if two or more threads arrive here only one will succeed to
+  // write on valid_thread_id_ the current thread ID.
+  subtle::Atomic32 current_thread_id = CurrentThread();
+
+  int previous_value = subtle::NoBarrier_CompareAndSwap(&valid_thread_id_,
+                                                        0,
+                                                        current_thread_id);
+  if (previous_value != 0 && previous_value != current_thread_id) {
+    // gotcha! a thread is trying to use the same class and that is
+    // not current thread.
+    asserter_->warn();
+  }
+
+  subtle::NoBarrier_AtomicIncrement(&counter_, 1);
+}
+
+void ThreadCollisionWarner::Enter() {
+  subtle::Atomic32 current_thread_id = CurrentThread();
+
+  if (subtle::NoBarrier_CompareAndSwap(&valid_thread_id_,
+                                       0,
+                                       current_thread_id) != 0) {
+    // gotcha! another thread is trying to use the same class.
+    asserter_->warn();
+  }
+
+  subtle::NoBarrier_AtomicIncrement(&counter_, 1);
+}
+
+void ThreadCollisionWarner::Leave() {
+  if (subtle::Barrier_AtomicIncrement(&counter_, -1) == 0) {
+    subtle::NoBarrier_Store(&valid_thread_id_, 0);
+  }
+}
+
+}  // namespace base
diff --git a/base/threading/thread_collision_warner.h b/base/threading/thread_collision_warner.h
new file mode 100644
index 0000000..b6993f6
--- /dev/null
+++ b/base/threading/thread_collision_warner.h
@@ -0,0 +1,245 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_THREADING_THREAD_COLLISION_WARNER_H_
+#define BASE_THREADING_THREAD_COLLISION_WARNER_H_
+
+#include <memory>
+
+#include "base/atomicops.h"
+#include "base/base_export.h"
+#include "base/compiler_specific.h"
+#include "base/macros.h"
+
+// A helper class alongside macros to be used to verify assumptions about thread
+// safety of a class.
+//
+// Example: Queue implementation non thread-safe but still usable if clients
+//          are synchronized somehow.
+//
+//          In this case the macro DFAKE_SCOPED_LOCK has to be
+//          used, it checks that if a thread is inside the push/pop then
+//          noone else is still inside the pop/push
+//
+// class NonThreadSafeQueue {
+//  public:
+//   ...
+//   void push(int) { DFAKE_SCOPED_LOCK(push_pop_); ... }
+//   int pop() { DFAKE_SCOPED_LOCK(push_pop_); ... }
+//   ...
+//  private:
+//   DFAKE_MUTEX(push_pop_);
+// };
+//
+//
+// Example: Queue implementation non thread-safe but still usable if clients
+//          are synchronized somehow, it calls a method to "protect" from
+//          a "protected" method
+//
+//          In this case the macro DFAKE_SCOPED_RECURSIVE_LOCK
+//          has to be used, it checks that if a thread is inside the push/pop
+//          then noone else is still inside the pop/push
+//
+// class NonThreadSafeQueue {
+//  public:
+//   void push(int) {
+//     DFAKE_SCOPED_LOCK(push_pop_);
+//     ...
+//   }
+//   int pop() {
+//     DFAKE_SCOPED_RECURSIVE_LOCK(push_pop_);
+//     bar();
+//     ...
+//   }
+//   void bar() { DFAKE_SCOPED_RECURSIVE_LOCK(push_pop_); ... }
+//   ...
+//  private:
+//   DFAKE_MUTEX(push_pop_);
+// };
+//
+//
+// Example: Queue implementation not usable even if clients are synchronized,
+//          so only one thread in the class life cycle can use the two members
+//          push/pop.
+//
+//          In this case the macro DFAKE_SCOPED_LOCK_THREAD_LOCKED pins the
+//          specified
+//          critical section the first time a thread enters push or pop, from
+//          that time on only that thread is allowed to execute push or pop.
+//
+// class NonThreadSafeQueue {
+//  public:
+//   ...
+//   void push(int) { DFAKE_SCOPED_LOCK_THREAD_LOCKED(push_pop_); ... }
+//   int pop() { DFAKE_SCOPED_LOCK_THREAD_LOCKED(push_pop_); ... }
+//   ...
+//  private:
+//   DFAKE_MUTEX(push_pop_);
+// };
+//
+//
+// Example: Class that has to be contructed/destroyed on same thread, it has
+//          a "shareable" method (with external synchronization) and a not
+//          shareable method (even with external synchronization).
+//
+//          In this case 3 Critical sections have to be defined
+//
+// class ExoticClass {
+//  public:
+//   ExoticClass() { DFAKE_SCOPED_LOCK_THREAD_LOCKED(ctor_dtor_); ... }
+//   ~ExoticClass() { DFAKE_SCOPED_LOCK_THREAD_LOCKED(ctor_dtor_); ... }
+//
+//   void Shareable() { DFAKE_SCOPED_LOCK(shareable_section_); ... }
+//   void NotShareable() { DFAKE_SCOPED_LOCK_THREAD_LOCKED(ctor_dtor_); ... }
+//   ...
+//  private:
+//   DFAKE_MUTEX(ctor_dtor_);
+//   DFAKE_MUTEX(shareable_section_);
+// };
+
+
+#if !defined(NDEBUG)
+
+// Defines a class member that acts like a mutex. It is used only as a
+// verification tool.
+#define DFAKE_MUTEX(obj) \
+     mutable base::ThreadCollisionWarner obj
+// Asserts the call is never called simultaneously in two threads. Used at
+// member function scope.
+#define DFAKE_SCOPED_LOCK(obj) \
+     base::ThreadCollisionWarner::ScopedCheck s_check_##obj(&obj)
+// Asserts the call is never called simultaneously in two threads. Used at
+// member function scope. Same as DFAKE_SCOPED_LOCK but allows recursive locks.
+#define DFAKE_SCOPED_RECURSIVE_LOCK(obj) \
+     base::ThreadCollisionWarner::ScopedRecursiveCheck sr_check_##obj(&obj)
+// Asserts the code is always executed in the same thread.
+#define DFAKE_SCOPED_LOCK_THREAD_LOCKED(obj) \
+     base::ThreadCollisionWarner::Check check_##obj(&obj)
+
+#else
+
+#define DFAKE_MUTEX(obj) typedef void InternalFakeMutexType##obj
+#define DFAKE_SCOPED_LOCK(obj) ((void)0)
+#define DFAKE_SCOPED_RECURSIVE_LOCK(obj) ((void)0)
+#define DFAKE_SCOPED_LOCK_THREAD_LOCKED(obj) ((void)0)
+
+#endif
+
+namespace base {
+
+// The class ThreadCollisionWarner uses an Asserter to notify the collision
+// AsserterBase is the interfaces and DCheckAsserter is the default asserter
+// used. During the unit tests is used another class that doesn't "DCHECK"
+// in case of collision (check thread_collision_warner_unittests.cc)
+struct BASE_EXPORT AsserterBase {
+  virtual ~AsserterBase() = default;
+  virtual void warn() = 0;
+};
+
+struct BASE_EXPORT DCheckAsserter : public AsserterBase {
+  ~DCheckAsserter() override = default;
+  void warn() override;
+};
+
+class BASE_EXPORT ThreadCollisionWarner {
+ public:
+  // The parameter asserter is there only for test purpose
+  explicit ThreadCollisionWarner(AsserterBase* asserter = new DCheckAsserter())
+      : valid_thread_id_(0),
+        counter_(0),
+        asserter_(asserter) {}
+
+  ~ThreadCollisionWarner() {
+    delete asserter_;
+  }
+
+  // This class is meant to be used through the macro
+  // DFAKE_SCOPED_LOCK_THREAD_LOCKED
+  // it doesn't leave the critical section, as opposed to ScopedCheck,
+  // because the critical section being pinned is allowed to be used only
+  // from one thread
+  class BASE_EXPORT Check {
+   public:
+    explicit Check(ThreadCollisionWarner* warner)
+        : warner_(warner) {
+      warner_->EnterSelf();
+    }
+
+    ~Check() = default;
+
+   private:
+    ThreadCollisionWarner* warner_;
+
+    DISALLOW_COPY_AND_ASSIGN(Check);
+  };
+
+  // This class is meant to be used through the macro
+  // DFAKE_SCOPED_LOCK
+  class BASE_EXPORT ScopedCheck {
+   public:
+    explicit ScopedCheck(ThreadCollisionWarner* warner)
+        : warner_(warner) {
+      warner_->Enter();
+    }
+
+    ~ScopedCheck() {
+      warner_->Leave();
+    }
+
+   private:
+    ThreadCollisionWarner* warner_;
+
+    DISALLOW_COPY_AND_ASSIGN(ScopedCheck);
+  };
+
+  // This class is meant to be used through the macro
+  // DFAKE_SCOPED_RECURSIVE_LOCK
+  class BASE_EXPORT ScopedRecursiveCheck {
+   public:
+    explicit ScopedRecursiveCheck(ThreadCollisionWarner* warner)
+        : warner_(warner) {
+      warner_->EnterSelf();
+    }
+
+    ~ScopedRecursiveCheck() {
+      warner_->Leave();
+    }
+
+   private:
+    ThreadCollisionWarner* warner_;
+
+    DISALLOW_COPY_AND_ASSIGN(ScopedRecursiveCheck);
+  };
+
+ private:
+  // This method stores the current thread identifier and does a DCHECK
+  // if a another thread has already done it, it is safe if same thread
+  // calls this multiple time (recursion allowed).
+  void EnterSelf();
+
+  // Same as EnterSelf but recursion is not allowed.
+  void Enter();
+
+  // Removes the thread_id stored in order to allow other threads to
+  // call EnterSelf or Enter.
+  void Leave();
+
+  // This stores the thread id that is inside the critical section, if the
+  // value is 0 then no thread is inside.
+  volatile subtle::Atomic32 valid_thread_id_;
+
+  // Counter to trace how many time a critical section was "pinned"
+  // (when allowed) in order to unpin it when counter_ reaches 0.
+  volatile subtle::Atomic32 counter_;
+
+  // Here only for class unit tests purpose, during the test I need to not
+  // DCHECK but notify the collision with something else.
+  AsserterBase* asserter_;
+
+  DISALLOW_COPY_AND_ASSIGN(ThreadCollisionWarner);
+};
+
+}  // namespace base
+
+#endif  // BASE_THREADING_THREAD_COLLISION_WARNER_H_
diff --git a/base/threading/thread_collision_warner_unittest.cc b/base/threading/thread_collision_warner_unittest.cc
new file mode 100644
index 0000000..cd56768
--- /dev/null
+++ b/base/threading/thread_collision_warner_unittest.cc
@@ -0,0 +1,382 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/threading/thread_collision_warner.h"
+
+#include <memory>
+
+#include "base/compiler_specific.h"
+#include "base/macros.h"
+#include "base/synchronization/lock.h"
+#include "base/threading/platform_thread.h"
+#include "base/threading/simple_thread.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+// '' : local class member function does not have a body
+MSVC_PUSH_DISABLE_WARNING(4822)
+
+
+#if defined(NDEBUG)
+
+// Would cause a memory leak otherwise.
+#undef DFAKE_MUTEX
+#define DFAKE_MUTEX(obj) std::unique_ptr<base::AsserterBase> obj
+
+// In Release, we expect the AsserterBase::warn() to not happen.
+#define EXPECT_NDEBUG_FALSE_DEBUG_TRUE EXPECT_FALSE
+
+#else
+
+// In Debug, we expect the AsserterBase::warn() to happen.
+#define EXPECT_NDEBUG_FALSE_DEBUG_TRUE EXPECT_TRUE
+
+#endif
+
+
+namespace {
+
+// This is the asserter used with ThreadCollisionWarner instead of the default
+// DCheckAsserter. The method fail_state is used to know if a collision took
+// place.
+class AssertReporter : public base::AsserterBase {
+ public:
+  AssertReporter()
+      : failed_(false) {}
+
+  void warn() override { failed_ = true; }
+
+  ~AssertReporter() override = default;
+
+  bool fail_state() const { return failed_; }
+  void reset() { failed_ = false; }
+
+ private:
+  bool failed_;
+};
+
+}  // namespace
+
+TEST(ThreadCollisionTest, BookCriticalSection) {
+  AssertReporter* local_reporter = new AssertReporter();
+
+  base::ThreadCollisionWarner warner(local_reporter);
+  EXPECT_FALSE(local_reporter->fail_state());
+
+  {  // Pin section.
+    DFAKE_SCOPED_LOCK_THREAD_LOCKED(warner);
+    EXPECT_FALSE(local_reporter->fail_state());
+    {  // Pin section.
+      DFAKE_SCOPED_LOCK_THREAD_LOCKED(warner);
+      EXPECT_FALSE(local_reporter->fail_state());
+    }
+  }
+}
+
+TEST(ThreadCollisionTest, ScopedRecursiveBookCriticalSection) {
+  AssertReporter* local_reporter = new AssertReporter();
+
+  base::ThreadCollisionWarner warner(local_reporter);
+  EXPECT_FALSE(local_reporter->fail_state());
+
+  {  // Pin section.
+    DFAKE_SCOPED_RECURSIVE_LOCK(warner);
+    EXPECT_FALSE(local_reporter->fail_state());
+    {  // Pin section again (allowed by DFAKE_SCOPED_RECURSIVE_LOCK)
+      DFAKE_SCOPED_RECURSIVE_LOCK(warner);
+      EXPECT_FALSE(local_reporter->fail_state());
+    }  // Unpin section.
+  }  // Unpin section.
+
+  // Check that section is not pinned
+  {  // Pin section.
+    DFAKE_SCOPED_LOCK(warner);
+    EXPECT_FALSE(local_reporter->fail_state());
+  }  // Unpin section.
+}
+
+TEST(ThreadCollisionTest, ScopedBookCriticalSection) {
+  AssertReporter* local_reporter = new AssertReporter();
+
+  base::ThreadCollisionWarner warner(local_reporter);
+  EXPECT_FALSE(local_reporter->fail_state());
+
+  {  // Pin section.
+    DFAKE_SCOPED_LOCK(warner);
+    EXPECT_FALSE(local_reporter->fail_state());
+  }  // Unpin section.
+
+  {  // Pin section.
+    DFAKE_SCOPED_LOCK(warner);
+    EXPECT_FALSE(local_reporter->fail_state());
+    {
+      // Pin section again (not allowed by DFAKE_SCOPED_LOCK)
+      DFAKE_SCOPED_LOCK(warner);
+      EXPECT_NDEBUG_FALSE_DEBUG_TRUE(local_reporter->fail_state());
+      // Reset the status of warner for further tests.
+      local_reporter->reset();
+    }  // Unpin section.
+  }  // Unpin section.
+
+  {
+    // Pin section.
+    DFAKE_SCOPED_LOCK(warner);
+    EXPECT_FALSE(local_reporter->fail_state());
+  }  // Unpin section.
+}
+
+TEST(ThreadCollisionTest, MTBookCriticalSectionTest) {
+  class NonThreadSafeQueue {
+   public:
+    explicit NonThreadSafeQueue(base::AsserterBase* asserter)
+        : push_pop_(asserter) {
+    }
+
+    void push(int value) {
+      DFAKE_SCOPED_LOCK_THREAD_LOCKED(push_pop_);
+    }
+
+    int pop() {
+      DFAKE_SCOPED_LOCK_THREAD_LOCKED(push_pop_);
+      return 0;
+    }
+
+   private:
+    DFAKE_MUTEX(push_pop_);
+
+    DISALLOW_COPY_AND_ASSIGN(NonThreadSafeQueue);
+  };
+
+  class QueueUser : public base::DelegateSimpleThread::Delegate {
+   public:
+    explicit QueueUser(NonThreadSafeQueue* queue) : queue_(queue) {}
+
+    void Run() override {
+      queue_->push(0);
+      queue_->pop();
+    }
+
+   private:
+    NonThreadSafeQueue* queue_;
+  };
+
+  AssertReporter* local_reporter = new AssertReporter();
+
+  NonThreadSafeQueue queue(local_reporter);
+
+  QueueUser queue_user_a(&queue);
+  QueueUser queue_user_b(&queue);
+
+  base::DelegateSimpleThread thread_a(&queue_user_a, "queue_user_thread_a");
+  base::DelegateSimpleThread thread_b(&queue_user_b, "queue_user_thread_b");
+
+  thread_a.Start();
+  thread_b.Start();
+
+  thread_a.Join();
+  thread_b.Join();
+
+  EXPECT_NDEBUG_FALSE_DEBUG_TRUE(local_reporter->fail_state());
+}
+
+TEST(ThreadCollisionTest, MTScopedBookCriticalSectionTest) {
+  // Queue with a 5 seconds push execution time, hopefuly the two used threads
+  // in the test will enter the push at same time.
+  class NonThreadSafeQueue {
+   public:
+    explicit NonThreadSafeQueue(base::AsserterBase* asserter)
+        : push_pop_(asserter) {
+    }
+
+    void push(int value) {
+      DFAKE_SCOPED_LOCK(push_pop_);
+      base::PlatformThread::Sleep(base::TimeDelta::FromSeconds(5));
+    }
+
+    int pop() {
+      DFAKE_SCOPED_LOCK(push_pop_);
+      return 0;
+    }
+
+   private:
+    DFAKE_MUTEX(push_pop_);
+
+    DISALLOW_COPY_AND_ASSIGN(NonThreadSafeQueue);
+  };
+
+  class QueueUser : public base::DelegateSimpleThread::Delegate {
+   public:
+    explicit QueueUser(NonThreadSafeQueue* queue) : queue_(queue) {}
+
+    void Run() override {
+      queue_->push(0);
+      queue_->pop();
+    }
+
+   private:
+    NonThreadSafeQueue* queue_;
+  };
+
+  AssertReporter* local_reporter = new AssertReporter();
+
+  NonThreadSafeQueue queue(local_reporter);
+
+  QueueUser queue_user_a(&queue);
+  QueueUser queue_user_b(&queue);
+
+  base::DelegateSimpleThread thread_a(&queue_user_a, "queue_user_thread_a");
+  base::DelegateSimpleThread thread_b(&queue_user_b, "queue_user_thread_b");
+
+  thread_a.Start();
+  thread_b.Start();
+
+  thread_a.Join();
+  thread_b.Join();
+
+  EXPECT_NDEBUG_FALSE_DEBUG_TRUE(local_reporter->fail_state());
+}
+
+TEST(ThreadCollisionTest, MTSynchedScopedBookCriticalSectionTest) {
+  // Queue with a 2 seconds push execution time, hopefuly the two used threads
+  // in the test will enter the push at same time.
+  class NonThreadSafeQueue {
+   public:
+    explicit NonThreadSafeQueue(base::AsserterBase* asserter)
+        : push_pop_(asserter) {
+    }
+
+    void push(int value) {
+      DFAKE_SCOPED_LOCK(push_pop_);
+      base::PlatformThread::Sleep(base::TimeDelta::FromSeconds(2));
+    }
+
+    int pop() {
+      DFAKE_SCOPED_LOCK(push_pop_);
+      return 0;
+    }
+
+   private:
+    DFAKE_MUTEX(push_pop_);
+
+    DISALLOW_COPY_AND_ASSIGN(NonThreadSafeQueue);
+  };
+
+  // This time the QueueUser class protects the non thread safe queue with
+  // a lock.
+  class QueueUser : public base::DelegateSimpleThread::Delegate {
+   public:
+    QueueUser(NonThreadSafeQueue* queue, base::Lock* lock)
+        : queue_(queue), lock_(lock) {}
+
+    void Run() override {
+      {
+        base::AutoLock auto_lock(*lock_);
+        queue_->push(0);
+      }
+      {
+        base::AutoLock auto_lock(*lock_);
+        queue_->pop();
+      }
+    }
+   private:
+    NonThreadSafeQueue* queue_;
+    base::Lock* lock_;
+  };
+
+  AssertReporter* local_reporter = new AssertReporter();
+
+  NonThreadSafeQueue queue(local_reporter);
+
+  base::Lock lock;
+
+  QueueUser queue_user_a(&queue, &lock);
+  QueueUser queue_user_b(&queue, &lock);
+
+  base::DelegateSimpleThread thread_a(&queue_user_a, "queue_user_thread_a");
+  base::DelegateSimpleThread thread_b(&queue_user_b, "queue_user_thread_b");
+
+  thread_a.Start();
+  thread_b.Start();
+
+  thread_a.Join();
+  thread_b.Join();
+
+  EXPECT_FALSE(local_reporter->fail_state());
+}
+
+TEST(ThreadCollisionTest, MTSynchedScopedRecursiveBookCriticalSectionTest) {
+  // Queue with a 2 seconds push execution time, hopefuly the two used threads
+  // in the test will enter the push at same time.
+  class NonThreadSafeQueue {
+   public:
+    explicit NonThreadSafeQueue(base::AsserterBase* asserter)
+        : push_pop_(asserter) {
+    }
+
+    void push(int) {
+      DFAKE_SCOPED_RECURSIVE_LOCK(push_pop_);
+      bar();
+      base::PlatformThread::Sleep(base::TimeDelta::FromSeconds(2));
+    }
+
+    int pop() {
+      DFAKE_SCOPED_RECURSIVE_LOCK(push_pop_);
+      return 0;
+    }
+
+    void bar() {
+      DFAKE_SCOPED_RECURSIVE_LOCK(push_pop_);
+    }
+
+   private:
+    DFAKE_MUTEX(push_pop_);
+
+    DISALLOW_COPY_AND_ASSIGN(NonThreadSafeQueue);
+  };
+
+  // This time the QueueUser class protects the non thread safe queue with
+  // a lock.
+  class QueueUser : public base::DelegateSimpleThread::Delegate {
+   public:
+    QueueUser(NonThreadSafeQueue* queue, base::Lock* lock)
+        : queue_(queue), lock_(lock) {}
+
+    void Run() override {
+      {
+        base::AutoLock auto_lock(*lock_);
+        queue_->push(0);
+      }
+      {
+        base::AutoLock auto_lock(*lock_);
+        queue_->bar();
+      }
+      {
+        base::AutoLock auto_lock(*lock_);
+        queue_->pop();
+      }
+    }
+   private:
+    NonThreadSafeQueue* queue_;
+    base::Lock* lock_;
+  };
+
+  AssertReporter* local_reporter = new AssertReporter();
+
+  NonThreadSafeQueue queue(local_reporter);
+
+  base::Lock lock;
+
+  QueueUser queue_user_a(&queue, &lock);
+  QueueUser queue_user_b(&queue, &lock);
+
+  base::DelegateSimpleThread thread_a(&queue_user_a, "queue_user_thread_a");
+  base::DelegateSimpleThread thread_b(&queue_user_b, "queue_user_thread_b");
+
+  thread_a.Start();
+  thread_b.Start();
+
+  thread_a.Join();
+  thread_b.Join();
+
+  EXPECT_FALSE(local_reporter->fail_state());
+}
diff --git a/base/threading/thread_id_name_manager.cc b/base/threading/thread_id_name_manager.cc
new file mode 100644
index 0000000..ca1979d
--- /dev/null
+++ b/base/threading/thread_id_name_manager.cc
@@ -0,0 +1,142 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/threading/thread_id_name_manager.h"
+
+#include <stdlib.h>
+#include <string.h>
+
+#include "base/logging.h"
+#include "base/memory/singleton.h"
+#include "base/no_destructor.h"
+#include "base/strings/string_util.h"
+#include "base/threading/thread_local.h"
+#include "base/trace_event/heap_profiler_allocation_context_tracker.h"
+
+namespace base {
+namespace {
+
+static const char kDefaultName[] = "";
+static std::string* g_default_name;
+
+ThreadLocalStorage::Slot& GetThreadNameTLS() {
+  static base::NoDestructor<base::ThreadLocalStorage::Slot> thread_name_tls;
+  return *thread_name_tls;
+}
+}
+
+ThreadIdNameManager::ThreadIdNameManager()
+    : main_process_name_(nullptr), main_process_id_(kInvalidThreadId) {
+  g_default_name = new std::string(kDefaultName);
+
+  AutoLock locked(lock_);
+  name_to_interned_name_[kDefaultName] = g_default_name;
+}
+
+ThreadIdNameManager::~ThreadIdNameManager() = default;
+
+ThreadIdNameManager* ThreadIdNameManager::GetInstance() {
+  return Singleton<ThreadIdNameManager,
+      LeakySingletonTraits<ThreadIdNameManager> >::get();
+}
+
+const char* ThreadIdNameManager::GetDefaultInternedString() {
+  return g_default_name->c_str();
+}
+
+void ThreadIdNameManager::RegisterThread(PlatformThreadHandle::Handle handle,
+                                         PlatformThreadId id) {
+  AutoLock locked(lock_);
+  thread_id_to_handle_[id] = handle;
+  thread_handle_to_interned_name_[handle] =
+      name_to_interned_name_[kDefaultName];
+}
+
+void ThreadIdNameManager::InstallSetNameCallback(SetNameCallback callback) {
+  AutoLock locked(lock_);
+  set_name_callback_ = std::move(callback);
+}
+
+void ThreadIdNameManager::SetName(const std::string& name) {
+  PlatformThreadId id = PlatformThread::CurrentId();
+  std::string* leaked_str = nullptr;
+  {
+    AutoLock locked(lock_);
+    NameToInternedNameMap::iterator iter = name_to_interned_name_.find(name);
+    if (iter != name_to_interned_name_.end()) {
+      leaked_str = iter->second;
+    } else {
+      leaked_str = new std::string(name);
+      name_to_interned_name_[name] = leaked_str;
+    }
+
+    ThreadIdToHandleMap::iterator id_to_handle_iter =
+        thread_id_to_handle_.find(id);
+
+    GetThreadNameTLS().Set(const_cast<char*>(leaked_str->c_str()));
+    if (set_name_callback_) {
+      set_name_callback_.Run(leaked_str->c_str());
+    }
+
+    // The main thread of a process will not be created as a Thread object which
+    // means there is no PlatformThreadHandler registered.
+    if (id_to_handle_iter == thread_id_to_handle_.end()) {
+      main_process_name_ = leaked_str;
+      main_process_id_ = id;
+      return;
+    }
+    thread_handle_to_interned_name_[id_to_handle_iter->second] = leaked_str;
+  }
+
+  // Add the leaked thread name to heap profiler context tracker. The name added
+  // is valid for the lifetime of the process. AllocationContextTracker cannot
+  // call GetName(which holds a lock) during the first allocation because it can
+  // cause a deadlock when the first allocation happens in the
+  // ThreadIdNameManager itself when holding the lock.
+  trace_event::AllocationContextTracker::SetCurrentThreadName(
+      leaked_str->c_str());
+}
+
+const char* ThreadIdNameManager::GetName(PlatformThreadId id) {
+  AutoLock locked(lock_);
+
+  if (id == main_process_id_)
+    return main_process_name_->c_str();
+
+  ThreadIdToHandleMap::iterator id_to_handle_iter =
+      thread_id_to_handle_.find(id);
+  if (id_to_handle_iter == thread_id_to_handle_.end())
+    return name_to_interned_name_[kDefaultName]->c_str();
+
+  ThreadHandleToInternedNameMap::iterator handle_to_name_iter =
+      thread_handle_to_interned_name_.find(id_to_handle_iter->second);
+  return handle_to_name_iter->second->c_str();
+}
+
+const char* ThreadIdNameManager::GetNameForCurrentThread() {
+  const char* name = reinterpret_cast<const char*>(GetThreadNameTLS().Get());
+  return name ? name : kDefaultName;
+}
+
+void ThreadIdNameManager::RemoveName(PlatformThreadHandle::Handle handle,
+                                     PlatformThreadId id) {
+  AutoLock locked(lock_);
+  ThreadHandleToInternedNameMap::iterator handle_to_name_iter =
+      thread_handle_to_interned_name_.find(handle);
+
+  DCHECK(handle_to_name_iter != thread_handle_to_interned_name_.end());
+  thread_handle_to_interned_name_.erase(handle_to_name_iter);
+
+  ThreadIdToHandleMap::iterator id_to_handle_iter =
+      thread_id_to_handle_.find(id);
+  DCHECK((id_to_handle_iter!= thread_id_to_handle_.end()));
+  // The given |id| may have been re-used by the system. Make sure the
+  // mapping points to the provided |handle| before removal.
+  if (id_to_handle_iter->second != handle)
+    return;
+
+  thread_id_to_handle_.erase(id_to_handle_iter);
+}
+
+}  // namespace base
diff --git a/base/threading/thread_id_name_manager.h b/base/threading/thread_id_name_manager.h
new file mode 100644
index 0000000..f17dc1a
--- /dev/null
+++ b/base/threading/thread_id_name_manager.h
@@ -0,0 +1,80 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_THREADING_THREAD_ID_NAME_MANAGER_H_
+#define BASE_THREADING_THREAD_ID_NAME_MANAGER_H_
+
+#include <map>
+#include <string>
+
+#include "base/base_export.h"
+#include "base/callback.h"
+#include "base/macros.h"
+#include "base/synchronization/lock.h"
+#include "base/threading/platform_thread.h"
+
+namespace base {
+
+template <typename T>
+struct DefaultSingletonTraits;
+
+class BASE_EXPORT ThreadIdNameManager {
+ public:
+  static ThreadIdNameManager* GetInstance();
+
+  static const char* GetDefaultInternedString();
+
+  // Register the mapping between a thread |id| and |handle|.
+  void RegisterThread(PlatformThreadHandle::Handle handle, PlatformThreadId id);
+
+  // The callback is called on the thread, immediately after the name is set.
+  // |name| is a pointer to a C string that is guaranteed to remain valid for
+  // the duration of the process.
+  using SetNameCallback = base::RepeatingCallback<void(const char* name)>;
+  void InstallSetNameCallback(SetNameCallback callback);
+
+  // Set the name for the current thread.
+  void SetName(const std::string& name);
+
+  // Get the name for the given id.
+  const char* GetName(PlatformThreadId id);
+
+  // Unlike |GetName|, this method using TLS and avoids touching |lock_|.
+  const char* GetNameForCurrentThread();
+
+  // Remove the name for the given id.
+  void RemoveName(PlatformThreadHandle::Handle handle, PlatformThreadId id);
+
+ private:
+  friend struct DefaultSingletonTraits<ThreadIdNameManager>;
+
+  typedef std::map<PlatformThreadId, PlatformThreadHandle::Handle>
+      ThreadIdToHandleMap;
+  typedef std::map<PlatformThreadHandle::Handle, std::string*>
+      ThreadHandleToInternedNameMap;
+  typedef std::map<std::string, std::string*> NameToInternedNameMap;
+
+  ThreadIdNameManager();
+  ~ThreadIdNameManager();
+
+  // lock_ protects the name_to_interned_name_, thread_id_to_handle_ and
+  // thread_handle_to_interned_name_ maps.
+  Lock lock_;
+
+  NameToInternedNameMap name_to_interned_name_;
+  ThreadIdToHandleMap thread_id_to_handle_;
+  ThreadHandleToInternedNameMap thread_handle_to_interned_name_;
+
+  // Treat the main process specially as there is no PlatformThreadHandle.
+  std::string* main_process_name_;
+  PlatformThreadId main_process_id_;
+
+  SetNameCallback set_name_callback_;
+
+  DISALLOW_COPY_AND_ASSIGN(ThreadIdNameManager);
+};
+
+}  // namespace base
+
+#endif  // BASE_THREADING_THREAD_ID_NAME_MANAGER_H_
diff --git a/base/threading/thread_id_name_manager_unittest.cc b/base/threading/thread_id_name_manager_unittest.cc
new file mode 100644
index 0000000..350dc0f
--- /dev/null
+++ b/base/threading/thread_id_name_manager_unittest.cc
@@ -0,0 +1,93 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/threading/thread_id_name_manager.h"
+
+#include "base/threading/platform_thread.h"
+#include "base/threading/thread.h"
+#include "testing/gtest/include/gtest/gtest.h"
+#include "testing/platform_test.h"
+
+typedef PlatformTest ThreadIdNameManagerTest;
+
+namespace {
+
+const char kAThread[] = "a thread";
+const char kBThread[] = "b thread";
+
+TEST_F(ThreadIdNameManagerTest, AddThreads) {
+  base::ThreadIdNameManager* manager = base::ThreadIdNameManager::GetInstance();
+  base::Thread thread_a(kAThread);
+  base::Thread thread_b(kBThread);
+
+  thread_a.StartAndWaitForTesting();
+  thread_b.StartAndWaitForTesting();
+
+  EXPECT_STREQ(kAThread, manager->GetName(thread_a.GetThreadId()));
+  EXPECT_STREQ(kBThread, manager->GetName(thread_b.GetThreadId()));
+
+  thread_b.Stop();
+  thread_a.Stop();
+}
+
+TEST_F(ThreadIdNameManagerTest, RemoveThreads) {
+  base::ThreadIdNameManager* manager = base::ThreadIdNameManager::GetInstance();
+  base::Thread thread_a(kAThread);
+
+  thread_a.StartAndWaitForTesting();
+  {
+    base::Thread thread_b(kBThread);
+    thread_b.StartAndWaitForTesting();
+    thread_b.Stop();
+  }
+  EXPECT_STREQ(kAThread, manager->GetName(thread_a.GetThreadId()));
+
+  thread_a.Stop();
+  EXPECT_STREQ("", manager->GetName(thread_a.GetThreadId()));
+}
+
+TEST_F(ThreadIdNameManagerTest, RestartThread) {
+  base::ThreadIdNameManager* manager = base::ThreadIdNameManager::GetInstance();
+  base::Thread thread_a(kAThread);
+
+  thread_a.StartAndWaitForTesting();
+  base::PlatformThreadId a_id = thread_a.GetThreadId();
+  EXPECT_STREQ(kAThread, manager->GetName(a_id));
+  thread_a.Stop();
+
+  thread_a.StartAndWaitForTesting();
+  EXPECT_STREQ("", manager->GetName(a_id));
+  EXPECT_STREQ(kAThread, manager->GetName(thread_a.GetThreadId()));
+  thread_a.Stop();
+}
+
+TEST_F(ThreadIdNameManagerTest, ThreadNameInterning) {
+  base::ThreadIdNameManager* manager = base::ThreadIdNameManager::GetInstance();
+
+  base::PlatformThreadId a_id = base::PlatformThread::CurrentId();
+  base::PlatformThread::SetName("First Name");
+  std::string version = manager->GetName(a_id);
+
+  base::PlatformThread::SetName("New name");
+  EXPECT_NE(version, manager->GetName(a_id));
+  base::PlatformThread::SetName("");
+}
+
+TEST_F(ThreadIdNameManagerTest, ResettingNameKeepsCorrectInternedValue) {
+  base::ThreadIdNameManager* manager = base::ThreadIdNameManager::GetInstance();
+
+  base::PlatformThreadId a_id = base::PlatformThread::CurrentId();
+  base::PlatformThread::SetName("Test Name");
+  std::string version = manager->GetName(a_id);
+
+  base::PlatformThread::SetName("New name");
+  EXPECT_NE(version, manager->GetName(a_id));
+
+  base::PlatformThread::SetName("Test Name");
+  EXPECT_EQ(version, manager->GetName(a_id));
+
+  base::PlatformThread::SetName("");
+}
+
+}  // namespace
diff --git a/base/threading/thread_local.h b/base/threading/thread_local.h
new file mode 100644
index 0000000..cad9add
--- /dev/null
+++ b/base/threading/thread_local.h
@@ -0,0 +1,99 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// WARNING: Thread local storage is a bit tricky to get right. Please make sure
+// that this is really the proper solution for what you're trying to achieve.
+// Don't prematurely optimize, most likely you can just use a Lock.
+//
+// These classes implement a wrapper around ThreadLocalStorage::Slot. On
+// construction, they will allocate a TLS slot, and free the TLS slot on
+// destruction. No memory management (creation or destruction) is handled. This
+// means for uses of ThreadLocalPointer, you must correctly manage the memory
+// yourself, these classes will not destroy the pointer for you. There are no
+// at-thread-exit actions taken by these classes.
+//
+// ThreadLocalPointer<Type> wraps a Type*. It performs no creation or
+// destruction, so memory management must be handled elsewhere. The first call
+// to Get() on a thread will return NULL. You can update the pointer with a call
+// to Set().
+//
+// ThreadLocalBoolean wraps a bool. It will default to false if it has never
+// been set otherwise with Set().
+//
+// Thread Safety: An instance of ThreadLocalStorage is completely thread safe
+// once it has been created. If you want to dynamically create an instance, you
+// must of course properly deal with safety and race conditions. This means a
+// function-level static initializer is generally inappropiate.
+//
+// In Android, the system TLS is limited.
+//
+// Example usage:
+//   // My class is logically attached to a single thread. We cache a pointer
+//   // on the thread it was created on, so we can implement current().
+//   MyClass::MyClass() {
+//     DCHECK(Singleton<ThreadLocalPointer<MyClass> >::get()->Get() == NULL);
+//     Singleton<ThreadLocalPointer<MyClass> >::get()->Set(this);
+//   }
+//
+//   MyClass::~MyClass() {
+//     DCHECK(Singleton<ThreadLocalPointer<MyClass> >::get()->Get() != NULL);
+//     Singleton<ThreadLocalPointer<MyClass> >::get()->Set(NULL);
+//   }
+//
+//   // Return the current MyClass associated with the calling thread, can be
+//   // NULL if there isn't a MyClass associated.
+//   MyClass* MyClass::current() {
+//     return Singleton<ThreadLocalPointer<MyClass> >::get()->Get();
+//   }
+
+#ifndef BASE_THREADING_THREAD_LOCAL_H_
+#define BASE_THREADING_THREAD_LOCAL_H_
+
+#include "base/macros.h"
+#include "base/threading/thread_local_storage.h"
+
+namespace base {
+
+template <typename Type>
+class ThreadLocalPointer {
+ public:
+  ThreadLocalPointer() = default;
+  ~ThreadLocalPointer() = default;
+
+  Type* Get() {
+    return static_cast<Type*>(slot_.Get());
+  }
+
+  void Set(Type* ptr) {
+    slot_.Set(const_cast<void*>(static_cast<const void*>(ptr)));
+  }
+
+ private:
+  ThreadLocalStorage::Slot slot_;
+
+  DISALLOW_COPY_AND_ASSIGN(ThreadLocalPointer<Type>);
+};
+
+class ThreadLocalBoolean {
+ public:
+  ThreadLocalBoolean() = default;
+  ~ThreadLocalBoolean() = default;
+
+  bool Get() {
+    return tlp_.Get() != nullptr;
+  }
+
+  void Set(bool val) {
+    tlp_.Set(val ? this : nullptr);
+  }
+
+ private:
+  ThreadLocalPointer<void> tlp_;
+
+  DISALLOW_COPY_AND_ASSIGN(ThreadLocalBoolean);
+};
+
+}  // namespace base
+
+#endif  // BASE_THREADING_THREAD_LOCAL_H_
diff --git a/base/threading/thread_local_storage.cc b/base/threading/thread_local_storage.cc
new file mode 100644
index 0000000..21fd323
--- /dev/null
+++ b/base/threading/thread_local_storage.cc
@@ -0,0 +1,397 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/threading/thread_local_storage.h"
+
+#include "base/atomicops.h"
+#include "base/logging.h"
+#include "base/synchronization/lock.h"
+#include "build/build_config.h"
+
+using base::internal::PlatformThreadLocalStorage;
+
+// Chrome Thread Local Storage (TLS)
+//
+// This TLS system allows Chrome to use a single OS level TLS slot process-wide,
+// and allows us to control the slot limits instead of being at the mercy of the
+// platform. To do this, Chrome TLS replicates an array commonly found in the OS
+// thread metadata.
+//
+// Overview:
+//
+// OS TLS Slots       Per-Thread                 Per-Process Global
+//     ...
+//     []             Chrome TLS Array           Chrome TLS Metadata
+//     [] ----------> [][][][][ ][][][][]        [][][][][ ][][][][]
+//     []                      |                          |
+//     ...                     V                          V
+//                      Metadata Version           Slot Information
+//                         Your Data!
+//
+// Using a single OS TLS slot, Chrome TLS allocates an array on demand for the
+// lifetime of each thread that requests Chrome TLS data. Each per-thread TLS
+// array matches the length of the per-process global metadata array.
+//
+// A per-process global TLS metadata array tracks information about each item in
+// the per-thread array:
+//   * Status: Tracks if the slot is allocated or free to assign.
+//   * Destructor: An optional destructor to call on thread destruction for that
+//                 specific slot.
+//   * Version: Tracks the current version of the TLS slot. Each TLS slot
+//              allocation is associated with a unique version number.
+//
+//              Most OS TLS APIs guarantee that a newly allocated TLS slot is
+//              initialized to 0 for all threads. The Chrome TLS system provides
+//              this guarantee by tracking the version for each TLS slot here
+//              on each per-thread Chrome TLS array entry. Threads that access
+//              a slot with a mismatched version will receive 0 as their value.
+//              The metadata version is incremented when the client frees a
+//              slot. The per-thread metadata version is updated when a client
+//              writes to the slot. This scheme allows for constant time
+//              invalidation and avoids the need to iterate through each Chrome
+//              TLS array to mark the slot as zero.
+//
+// Just like an OS TLS API, clients of the Chrome TLS are responsible for
+// managing any necessary lifetime of the data in their slots. The only
+// convenience provided is automatic destruction when a thread ends. If a client
+// frees a slot, that client is responsible for destroying the data in the slot.
+
+namespace {
+// In order to make TLS destructors work, we need to keep around a function
+// pointer to the destructor for each slot. We keep this array of pointers in a
+// global (static) array.
+// We use the single OS-level TLS slot (giving us one pointer per thread) to
+// hold a pointer to a per-thread array (table) of slots that we allocate to
+// Chromium consumers.
+
+// g_native_tls_key is the one native TLS that we use. It stores our table.
+base::subtle::Atomic32 g_native_tls_key =
+    PlatformThreadLocalStorage::TLS_KEY_OUT_OF_INDEXES;
+
+// The OS TLS slot has three states:
+//   * kUninitialized: Any call to Slot::Get()/Set() will create the base
+//     per-thread TLS state. On POSIX, kUninitialized must be 0.
+//   * [Memory Address]: Raw pointer to the base per-thread TLS state.
+//   * kDestroyed: The base per-thread TLS state has been freed.
+//
+// Final States:
+//   * Windows: kDestroyed. Windows does not iterate through the OS TLS to clean
+//     up the values.
+//   * POSIX: kUninitialized. POSIX iterates through TLS until all slots contain
+//     nullptr.
+//
+// More details on this design:
+//   We need some type of thread-local state to indicate that the TLS system has
+//   been destroyed. To do so, we leverage the multi-pass nature of destruction
+//   of pthread_key.
+//
+//    a) After destruction of TLS system, we set the pthread_key to a sentinel
+//       kDestroyed.
+//    b) All calls to Slot::Get() DCHECK that the state is not kDestroyed, and
+//       any system which might potentially invoke Slot::Get() after destruction
+//       of TLS must check ThreadLocalStorage::ThreadIsBeingDestroyed().
+//    c) After a full pass of the pthread_keys, on the next invocation of
+//       ConstructTlsVector(), we'll then set the key to nullptr.
+//    d) At this stage, the TLS system is back in its uninitialized state.
+//    e) If in the second pass of destruction of pthread_keys something were to
+//       re-initialize TLS [this should never happen! Since the only code which
+//       uses Chrome TLS is Chrome controlled, we should really be striving for
+//       single-pass destruction], then TLS will be re-initialized and then go
+//       through the 2-pass destruction system again. Everything should just
+//       work (TM).
+
+// The consumers of kUninitialized and kDestroyed expect void*, since that's
+// what the API exposes on both POSIX and Windows.
+void* const kUninitialized = nullptr;
+
+// A sentinel value to indicate that the TLS system has been destroyed.
+void* const kDestroyed = reinterpret_cast<void*>(1);
+
+// The maximum number of slots in our thread local storage stack.
+constexpr int kThreadLocalStorageSize = 256;
+
+enum TlsStatus {
+  FREE,
+  IN_USE,
+};
+
+struct TlsMetadata {
+  TlsStatus status;
+  base::ThreadLocalStorage::TLSDestructorFunc destructor;
+  uint32_t version;
+};
+
+struct TlsVectorEntry {
+  void* data;
+  uint32_t version;
+};
+
+// This lock isn't needed until after we've constructed the per-thread TLS
+// vector, so it's safe to use.
+base::Lock* GetTLSMetadataLock() {
+  static auto* lock = new base::Lock();
+  return lock;
+}
+TlsMetadata g_tls_metadata[kThreadLocalStorageSize];
+size_t g_last_assigned_slot = 0;
+
+// The maximum number of times to try to clear slots by calling destructors.
+// Use pthread naming convention for clarity.
+constexpr int kMaxDestructorIterations = kThreadLocalStorageSize;
+
+// This function is called to initialize our entire Chromium TLS system.
+// It may be called very early, and we need to complete most all of the setup
+// (initialization) before calling *any* memory allocator functions, which may
+// recursively depend on this initialization.
+// As a result, we use Atomics, and avoid anything (like a singleton) that might
+// require memory allocations.
+TlsVectorEntry* ConstructTlsVector() {
+  PlatformThreadLocalStorage::TLSKey key =
+      base::subtle::NoBarrier_Load(&g_native_tls_key);
+  if (key == PlatformThreadLocalStorage::TLS_KEY_OUT_OF_INDEXES) {
+    CHECK(PlatformThreadLocalStorage::AllocTLS(&key));
+
+    // The TLS_KEY_OUT_OF_INDEXES is used to find out whether the key is set or
+    // not in NoBarrier_CompareAndSwap, but Posix doesn't have invalid key, we
+    // define an almost impossible value be it.
+    // If we really get TLS_KEY_OUT_OF_INDEXES as value of key, just alloc
+    // another TLS slot.
+    if (key == PlatformThreadLocalStorage::TLS_KEY_OUT_OF_INDEXES) {
+      PlatformThreadLocalStorage::TLSKey tmp = key;
+      CHECK(PlatformThreadLocalStorage::AllocTLS(&key) &&
+            key != PlatformThreadLocalStorage::TLS_KEY_OUT_OF_INDEXES);
+      PlatformThreadLocalStorage::FreeTLS(tmp);
+    }
+    // Atomically test-and-set the tls_key. If the key is
+    // TLS_KEY_OUT_OF_INDEXES, go ahead and set it. Otherwise, do nothing, as
+    // another thread already did our dirty work.
+    if (PlatformThreadLocalStorage::TLS_KEY_OUT_OF_INDEXES !=
+        static_cast<PlatformThreadLocalStorage::TLSKey>(
+            base::subtle::NoBarrier_CompareAndSwap(
+                &g_native_tls_key,
+                PlatformThreadLocalStorage::TLS_KEY_OUT_OF_INDEXES, key))) {
+      // We've been shortcut. Another thread replaced g_native_tls_key first so
+      // we need to destroy our index and use the one the other thread got
+      // first.
+      PlatformThreadLocalStorage::FreeTLS(key);
+      key = base::subtle::NoBarrier_Load(&g_native_tls_key);
+    }
+  }
+  CHECK_EQ(PlatformThreadLocalStorage::GetTLSValue(key), kUninitialized);
+
+  // Some allocators, such as TCMalloc, make use of thread local storage. As a
+  // result, any attempt to call new (or malloc) will lazily cause such a system
+  // to initialize, which will include registering for a TLS key. If we are not
+  // careful here, then that request to create a key will call new back, and
+  // we'll have an infinite loop. We avoid that as follows: Use a stack
+  // allocated vector, so that we don't have dependence on our allocator until
+  // our service is in place. (i.e., don't even call new until after we're
+  // setup)
+  TlsVectorEntry stack_allocated_tls_data[kThreadLocalStorageSize];
+  memset(stack_allocated_tls_data, 0, sizeof(stack_allocated_tls_data));
+  // Ensure that any rentrant calls change the temp version.
+  PlatformThreadLocalStorage::SetTLSValue(key, stack_allocated_tls_data);
+
+  // Allocate an array to store our data.
+  TlsVectorEntry* tls_data = new TlsVectorEntry[kThreadLocalStorageSize];
+  memcpy(tls_data, stack_allocated_tls_data, sizeof(stack_allocated_tls_data));
+  PlatformThreadLocalStorage::SetTLSValue(key, tls_data);
+  return tls_data;
+}
+
+void OnThreadExitInternal(TlsVectorEntry* tls_data) {
+  // This branch is for POSIX, where this function is called twice. The first
+  // pass calls dtors and sets state to kDestroyed. The second pass sets
+  // kDestroyed to kUninitialized.
+  if (tls_data == kDestroyed) {
+    PlatformThreadLocalStorage::TLSKey key =
+        base::subtle::NoBarrier_Load(&g_native_tls_key);
+    PlatformThreadLocalStorage::SetTLSValue(key, kUninitialized);
+    return;
+  }
+
+  DCHECK(tls_data);
+  // Some allocators, such as TCMalloc, use TLS. As a result, when a thread
+  // terminates, one of the destructor calls we make may be to shut down an
+  // allocator. We have to be careful that after we've shutdown all of the known
+  // destructors (perchance including an allocator), that we don't call the
+  // allocator and cause it to resurrect itself (with no possibly destructor
+  // call to follow). We handle this problem as follows: Switch to using a stack
+  // allocated vector, so that we don't have dependence on our allocator after
+  // we have called all g_tls_metadata destructors. (i.e., don't even call
+  // delete[] after we're done with destructors.)
+  TlsVectorEntry stack_allocated_tls_data[kThreadLocalStorageSize];
+  memcpy(stack_allocated_tls_data, tls_data, sizeof(stack_allocated_tls_data));
+  // Ensure that any re-entrant calls change the temp version.
+  PlatformThreadLocalStorage::TLSKey key =
+      base::subtle::NoBarrier_Load(&g_native_tls_key);
+  PlatformThreadLocalStorage::SetTLSValue(key, stack_allocated_tls_data);
+  delete[] tls_data;  // Our last dependence on an allocator.
+
+  // Snapshot the TLS Metadata so we don't have to lock on every access.
+  TlsMetadata tls_metadata[kThreadLocalStorageSize];
+  {
+    base::AutoLock auto_lock(*GetTLSMetadataLock());
+    memcpy(tls_metadata, g_tls_metadata, sizeof(g_tls_metadata));
+  }
+
+  int remaining_attempts = kMaxDestructorIterations;
+  bool need_to_scan_destructors = true;
+  while (need_to_scan_destructors) {
+    need_to_scan_destructors = false;
+    // Try to destroy the first-created-slot (which is slot 1) in our last
+    // destructor call. That user was able to function, and define a slot with
+    // no other services running, so perhaps it is a basic service (like an
+    // allocator) and should also be destroyed last. If we get the order wrong,
+    // then we'll iterate several more times, so it is really not that critical
+    // (but it might help).
+    for (int slot = 0; slot < kThreadLocalStorageSize ; ++slot) {
+      void* tls_value = stack_allocated_tls_data[slot].data;
+      if (!tls_value || tls_metadata[slot].status == TlsStatus::FREE ||
+          stack_allocated_tls_data[slot].version != tls_metadata[slot].version)
+        continue;
+
+      base::ThreadLocalStorage::TLSDestructorFunc destructor =
+          tls_metadata[slot].destructor;
+      if (!destructor)
+        continue;
+      stack_allocated_tls_data[slot].data = nullptr;  // pre-clear the slot.
+      destructor(tls_value);
+      // Any destructor might have called a different service, which then set a
+      // different slot to a non-null value. Hence we need to check the whole
+      // vector again. This is a pthread standard.
+      need_to_scan_destructors = true;
+    }
+    if (--remaining_attempts <= 0) {
+      NOTREACHED();  // Destructors might not have been called.
+      break;
+    }
+  }
+
+  // Remove our stack allocated vector.
+  PlatformThreadLocalStorage::SetTLSValue(key, kDestroyed);
+}
+
+}  // namespace
+
+namespace base {
+
+namespace internal {
+
+#if defined(OS_WIN)
+void PlatformThreadLocalStorage::OnThreadExit() {
+  PlatformThreadLocalStorage::TLSKey key =
+      base::subtle::NoBarrier_Load(&g_native_tls_key);
+  if (key == PlatformThreadLocalStorage::TLS_KEY_OUT_OF_INDEXES)
+    return;
+  void *tls_data = GetTLSValue(key);
+
+  // On Windows, thread destruction callbacks are only invoked once per module,
+  // so there should be no way that this could be invoked twice.
+  DCHECK_NE(tls_data, kDestroyed);
+
+  // Maybe we have never initialized TLS for this thread.
+  if (tls_data == kUninitialized)
+    return;
+  OnThreadExitInternal(static_cast<TlsVectorEntry*>(tls_data));
+}
+#elif defined(OS_POSIX) || defined(OS_FUCHSIA)
+void PlatformThreadLocalStorage::OnThreadExit(void* value) {
+  OnThreadExitInternal(static_cast<TlsVectorEntry*>(value));
+}
+#endif  // defined(OS_WIN)
+
+}  // namespace internal
+
+bool ThreadLocalStorage::HasBeenDestroyed() {
+  PlatformThreadLocalStorage::TLSKey key =
+      base::subtle::NoBarrier_Load(&g_native_tls_key);
+  if (key == PlatformThreadLocalStorage::TLS_KEY_OUT_OF_INDEXES)
+    return false;
+  return PlatformThreadLocalStorage::GetTLSValue(key) == kDestroyed;
+}
+
+void ThreadLocalStorage::Slot::Initialize(TLSDestructorFunc destructor) {
+  PlatformThreadLocalStorage::TLSKey key =
+      base::subtle::NoBarrier_Load(&g_native_tls_key);
+  if (key == PlatformThreadLocalStorage::TLS_KEY_OUT_OF_INDEXES ||
+      PlatformThreadLocalStorage::GetTLSValue(key) == kUninitialized) {
+    ConstructTlsVector();
+  }
+
+  // Grab a new slot.
+  {
+    base::AutoLock auto_lock(*GetTLSMetadataLock());
+    for (int i = 0; i < kThreadLocalStorageSize; ++i) {
+      // Tracking the last assigned slot is an attempt to find the next
+      // available slot within one iteration. Under normal usage, slots remain
+      // in use for the lifetime of the process (otherwise before we reclaimed
+      // slots, we would have run out of slots). This makes it highly likely the
+      // next slot is going to be a free slot.
+      size_t slot_candidate =
+          (g_last_assigned_slot + 1 + i) % kThreadLocalStorageSize;
+      if (g_tls_metadata[slot_candidate].status == TlsStatus::FREE) {
+        g_tls_metadata[slot_candidate].status = TlsStatus::IN_USE;
+        g_tls_metadata[slot_candidate].destructor = destructor;
+        g_last_assigned_slot = slot_candidate;
+        DCHECK_EQ(kInvalidSlotValue, slot_);
+        slot_ = slot_candidate;
+        version_ = g_tls_metadata[slot_candidate].version;
+        break;
+      }
+    }
+  }
+  CHECK_NE(slot_, kInvalidSlotValue);
+  CHECK_LT(slot_, kThreadLocalStorageSize);
+}
+
+void ThreadLocalStorage::Slot::Free() {
+  DCHECK_NE(slot_, kInvalidSlotValue);
+  DCHECK_LT(slot_, kThreadLocalStorageSize);
+  {
+    base::AutoLock auto_lock(*GetTLSMetadataLock());
+    g_tls_metadata[slot_].status = TlsStatus::FREE;
+    g_tls_metadata[slot_].destructor = nullptr;
+    ++(g_tls_metadata[slot_].version);
+  }
+  slot_ = kInvalidSlotValue;
+}
+
+void* ThreadLocalStorage::Slot::Get() const {
+  TlsVectorEntry* tls_data = static_cast<TlsVectorEntry*>(
+      PlatformThreadLocalStorage::GetTLSValue(
+          base::subtle::NoBarrier_Load(&g_native_tls_key)));
+  DCHECK_NE(tls_data, kDestroyed);
+  if (!tls_data)
+    return nullptr;
+  DCHECK_NE(slot_, kInvalidSlotValue);
+  DCHECK_LT(slot_, kThreadLocalStorageSize);
+  // Version mismatches means this slot was previously freed.
+  if (tls_data[slot_].version != version_)
+    return nullptr;
+  return tls_data[slot_].data;
+}
+
+void ThreadLocalStorage::Slot::Set(void* value) {
+  TlsVectorEntry* tls_data = static_cast<TlsVectorEntry*>(
+      PlatformThreadLocalStorage::GetTLSValue(
+          base::subtle::NoBarrier_Load(&g_native_tls_key)));
+  DCHECK_NE(tls_data, kDestroyed);
+  if (!tls_data)
+    tls_data = ConstructTlsVector();
+  DCHECK_NE(slot_, kInvalidSlotValue);
+  DCHECK_LT(slot_, kThreadLocalStorageSize);
+  tls_data[slot_].data = value;
+  tls_data[slot_].version = version_;
+}
+
+ThreadLocalStorage::Slot::Slot(TLSDestructorFunc destructor) {
+  Initialize(destructor);
+}
+
+ThreadLocalStorage::Slot::~Slot() {
+  Free();
+}
+
+}  // namespace base
diff --git a/base/threading/thread_local_storage.h b/base/threading/thread_local_storage.h
new file mode 100644
index 0000000..f84ac33
--- /dev/null
+++ b/base/threading/thread_local_storage.h
@@ -0,0 +1,167 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_THREADING_THREAD_LOCAL_STORAGE_H_
+#define BASE_THREADING_THREAD_LOCAL_STORAGE_H_
+
+#include <stdint.h>
+
+#include "base/atomicops.h"
+#include "base/base_export.h"
+#include "base/macros.h"
+#include "build/build_config.h"
+
+#if defined(OS_WIN)
+#include "base/win/windows_types.h"
+#elif defined(OS_POSIX) || defined(OS_FUCHSIA)
+#include <pthread.h>
+#endif
+
+namespace heap_profiling {
+class ScopedAllowLogging;
+}  // namespace heap_profiling
+
+namespace base {
+
+class SamplingHeapProfiler;
+
+namespace trace_event {
+class MallocDumpProvider;
+}  // namespace trace_event
+
+namespace internal {
+
+class ThreadLocalStorageTestInternal;
+
+// WARNING: You should *NOT* use this class directly.
+// PlatformThreadLocalStorage is a low-level abstraction of the OS's TLS
+// interface. Instead, you should use one of the following:
+// * ThreadLocalBoolean (from thread_local.h) for booleans.
+// * ThreadLocalPointer (from thread_local.h) for pointers.
+// * ThreadLocalStorage::StaticSlot/Slot for more direct control of the slot.
+class BASE_EXPORT PlatformThreadLocalStorage {
+ public:
+
+#if defined(OS_WIN)
+  typedef unsigned long TLSKey;
+  enum : unsigned { TLS_KEY_OUT_OF_INDEXES = TLS_OUT_OF_INDEXES };
+#elif defined(OS_POSIX) || defined(OS_FUCHSIA)
+  typedef pthread_key_t TLSKey;
+  // The following is a "reserved key" which is used in our generic Chromium
+  // ThreadLocalStorage implementation.  We expect that an OS will not return
+  // such a key, but if it is returned (i.e., the OS tries to allocate it) we
+  // will just request another key.
+  enum { TLS_KEY_OUT_OF_INDEXES = 0x7FFFFFFF };
+#endif
+
+  // The following methods need to be supported on each OS platform, so that
+  // the Chromium ThreadLocalStore functionality can be constructed.
+  // Chromium will use these methods to acquire a single OS slot, and then use
+  // that to support a much larger number of Chromium slots (independent of the
+  // OS restrictions).
+  // The following returns true if it successfully is able to return an OS
+  // key in |key|.
+  static bool AllocTLS(TLSKey* key);
+  // Note: FreeTLS() doesn't have to be called, it is fine with this leak, OS
+  // might not reuse released slot, you might just reset the TLS value with
+  // SetTLSValue().
+  static void FreeTLS(TLSKey key);
+  static void SetTLSValue(TLSKey key, void* value);
+  static void* GetTLSValue(TLSKey key) {
+#if defined(OS_WIN)
+    return TlsGetValue(key);
+#elif defined(OS_POSIX) || defined(OS_FUCHSIA)
+    return pthread_getspecific(key);
+#endif
+  }
+
+  // Each platform (OS implementation) is required to call this method on each
+  // terminating thread when the thread is about to terminate.  This method
+  // will then call all registered destructors for slots in Chromium
+  // ThreadLocalStorage, until there are no slot values remaining as having
+  // been set on this thread.
+  // Destructors may end up being called multiple times on a terminating
+  // thread, as other destructors may re-set slots that were previously
+  // destroyed.
+#if defined(OS_WIN)
+  // Since Windows which doesn't support TLS destructor, the implementation
+  // should use GetTLSValue() to retrieve the value of TLS slot.
+  static void OnThreadExit();
+#elif defined(OS_POSIX) || defined(OS_FUCHSIA)
+  // |Value| is the data stored in TLS slot, The implementation can't use
+  // GetTLSValue() to retrieve the value of slot as it has already been reset
+  // in Posix.
+  static void OnThreadExit(void* value);
+#endif
+};
+
+}  // namespace internal
+
+// Wrapper for thread local storage.  This class doesn't do much except provide
+// an API for portability.
+class BASE_EXPORT ThreadLocalStorage {
+ public:
+  // Prototype for the TLS destructor function, which can be optionally used to
+  // cleanup thread local storage on thread exit.  'value' is the data that is
+  // stored in thread local storage.
+  typedef void (*TLSDestructorFunc)(void* value);
+
+  // A key representing one value stored in TLS. Use as a class member or a
+  // local variable. If you need a static storage duration variable, use the
+  // following pattern with a NoDestructor<Slot>:
+  // void MyDestructorFunc(void* value);
+  // ThreadLocalStorage::Slot& ImportantContentTLS() {
+  //   static NoDestructor<ThreadLocalStorage::Slot> important_content_tls(
+  //       &MyDestructorFunc);
+  //   return *important_content_tls;
+  // }
+  class BASE_EXPORT Slot final {
+   public:
+    // |destructor| is a pointer to a function to perform per-thread cleanup of
+    // this object.  If set to nullptr, no cleanup is done for this TLS slot.
+    explicit Slot(TLSDestructorFunc destructor = nullptr);
+    // If a destructor was set for this slot, removes the destructor so that
+    // remaining threads exiting will not free data.
+    ~Slot();
+
+    // Get the thread-local value stored in slot 'slot'.
+    // Values are guaranteed to initially be zero.
+    void* Get() const;
+
+    // Set the thread-local value stored in slot 'slot' to
+    // value 'value'.
+    void Set(void* value);
+
+   private:
+    void Initialize(TLSDestructorFunc destructor);
+    void Free();
+
+    static constexpr int kInvalidSlotValue = -1;
+    int slot_ = kInvalidSlotValue;
+    uint32_t version_ = 0;
+
+    DISALLOW_COPY_AND_ASSIGN(Slot);
+  };
+
+ private:
+  // In most cases, most callers should not need access to HasBeenDestroyed().
+  // If you are working in code that runs during thread destruction, contact the
+  // base OWNERs for advice and then make a friend request.
+  //
+  // Returns |true| if Chrome's implementation of TLS has been destroyed during
+  // thread destruction. Attempting to call Slot::Get() during destruction is
+  // disallowed and will hit a DCHECK. Any code that relies on TLS during thread
+  // destruction must first check this method before calling Slot::Get().
+  friend class base::SamplingHeapProfiler;
+  friend class base::internal::ThreadLocalStorageTestInternal;
+  friend class base::trace_event::MallocDumpProvider;
+  friend class heap_profiling::ScopedAllowLogging;
+  static bool HasBeenDestroyed();
+
+  DISALLOW_COPY_AND_ASSIGN(ThreadLocalStorage);
+};
+
+}  // namespace base
+
+#endif  // BASE_THREADING_THREAD_LOCAL_STORAGE_H_
diff --git a/base/threading/thread_local_storage_posix.cc b/base/threading/thread_local_storage_posix.cc
new file mode 100644
index 0000000..89edeee
--- /dev/null
+++ b/base/threading/thread_local_storage_posix.cc
@@ -0,0 +1,30 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/threading/thread_local_storage.h"
+
+#include "base/logging.h"
+
+namespace base {
+
+namespace internal {
+
+bool PlatformThreadLocalStorage::AllocTLS(TLSKey* key) {
+  return !pthread_key_create(key,
+      base::internal::PlatformThreadLocalStorage::OnThreadExit);
+}
+
+void PlatformThreadLocalStorage::FreeTLS(TLSKey key) {
+  int ret = pthread_key_delete(key);
+  DCHECK_EQ(ret, 0);
+}
+
+void PlatformThreadLocalStorage::SetTLSValue(TLSKey key, void* value) {
+  int ret = pthread_setspecific(key, value);
+  DCHECK_EQ(ret, 0);
+}
+
+}  // namespace internal
+
+}  // namespace base
diff --git a/base/threading/thread_local_storage_unittest.cc b/base/threading/thread_local_storage_unittest.cc
new file mode 100644
index 0000000..9062ff0
--- /dev/null
+++ b/base/threading/thread_local_storage_unittest.cc
@@ -0,0 +1,278 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/threading/thread_local_storage.h"
+
+#if defined(OS_WIN)
+#include <windows.h>
+#include <process.h>
+#endif
+
+#include "base/macros.h"
+#include "base/no_destructor.h"
+#include "base/threading/simple_thread.h"
+#include "build/build_config.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+#if defined(OS_WIN)
+// Ignore warnings about ptr->int conversions that we use when
+// storing ints into ThreadLocalStorage.
+#pragma warning(disable : 4311 4312)
+#endif
+
+namespace base {
+
+#if defined(OS_POSIX)
+
+namespace internal {
+
+// This class is friended by ThreadLocalStorage.
+class ThreadLocalStorageTestInternal {
+ public:
+  static bool HasBeenDestroyed() {
+    return ThreadLocalStorage::HasBeenDestroyed();
+  }
+};
+
+}  // namespace internal
+
+#endif  // defined(OS_POSIX)
+
+namespace {
+
+const int kInitialTlsValue = 0x5555;
+const int kFinalTlsValue = 0x7777;
+// How many times must a destructor be called before we really are done.
+const int kNumberDestructorCallRepetitions = 3;
+
+void ThreadLocalStorageCleanup(void* value);
+
+ThreadLocalStorage::Slot& TLSSlot() {
+  static NoDestructor<ThreadLocalStorage::Slot> slot(
+      &ThreadLocalStorageCleanup);
+  return *slot;
+}
+
+class ThreadLocalStorageRunner : public DelegateSimpleThread::Delegate {
+ public:
+  explicit ThreadLocalStorageRunner(int* tls_value_ptr)
+      : tls_value_ptr_(tls_value_ptr) {}
+
+  ~ThreadLocalStorageRunner() override = default;
+
+  void Run() override {
+    *tls_value_ptr_ = kInitialTlsValue;
+    TLSSlot().Set(tls_value_ptr_);
+
+    int* ptr = static_cast<int*>(TLSSlot().Get());
+    EXPECT_EQ(ptr, tls_value_ptr_);
+    EXPECT_EQ(*ptr, kInitialTlsValue);
+    *tls_value_ptr_ = 0;
+
+    ptr = static_cast<int*>(TLSSlot().Get());
+    EXPECT_EQ(ptr, tls_value_ptr_);
+    EXPECT_EQ(*ptr, 0);
+
+    *ptr = kFinalTlsValue + kNumberDestructorCallRepetitions;
+  }
+
+ private:
+  int* tls_value_ptr_;
+  DISALLOW_COPY_AND_ASSIGN(ThreadLocalStorageRunner);
+};
+
+
+void ThreadLocalStorageCleanup(void *value) {
+  int *ptr = reinterpret_cast<int*>(value);
+  // Destructors should never be called with a NULL.
+  ASSERT_NE(reinterpret_cast<int*>(NULL), ptr);
+  if (*ptr == kFinalTlsValue)
+    return;  // We've been called enough times.
+  ASSERT_LT(kFinalTlsValue, *ptr);
+  ASSERT_GE(kFinalTlsValue + kNumberDestructorCallRepetitions, *ptr);
+  --*ptr;  // Move closer to our target.
+  // Tell tls that we're not done with this thread, and still need destruction.
+  TLSSlot().Set(value);
+}
+
+#if defined(OS_POSIX)
+constexpr intptr_t kDummyValue = 0xABCD;
+constexpr size_t kKeyCount = 20;
+
+// The order in which pthread keys are destructed is not specified by the POSIX
+// specification. Hopefully, of the 20 keys we create, some of them should be
+// destroyed after the TLS key is destroyed.
+class UseTLSDuringDestructionRunner {
+ public:
+  UseTLSDuringDestructionRunner() = default;
+
+  // The order in which pthread_key destructors are called is not well defined.
+  // Hopefully, by creating 10 both before and after initializing TLS on the
+  // thread, at least 1 will be called after TLS destruction.
+  void Run() {
+    ASSERT_FALSE(internal::ThreadLocalStorageTestInternal::HasBeenDestroyed());
+
+    // Create 10 pthread keys before initializing TLS on the thread.
+    size_t slot_index = 0;
+    for (; slot_index < 10; ++slot_index) {
+      CreateTlsKeyWithDestructor(slot_index);
+    }
+
+    // Initialize the Chrome TLS system. It's possible that base::Thread has
+    // already initialized Chrome TLS, but we don't rely on that.
+    slot_.Set(reinterpret_cast<void*>(kDummyValue));
+
+    // Create 10 pthread keys after initializing TLS on the thread.
+    for (; slot_index < kKeyCount; ++slot_index) {
+      CreateTlsKeyWithDestructor(slot_index);
+    }
+  }
+
+  bool teardown_works_correctly() { return teardown_works_correctly_; }
+
+ private:
+  struct TLSState {
+    pthread_key_t key;
+    bool* teardown_works_correctly;
+  };
+
+  // The POSIX TLS destruction API takes as input a single C-function, which is
+  // called with the current |value| of a (key, value) pair. We need this
+  // function to do two things: set the |value| to nullptr, which requires
+  // knowing the associated |key|, and update the |teardown_works_correctly_|
+  // state.
+  //
+  // To accomplish this, we set the value to an instance of TLSState, which
+  // contains |key| as well as a pointer to |teardown_works_correctly|.
+  static void ThreadLocalDestructor(void* value) {
+    TLSState* state = static_cast<TLSState*>(value);
+    int result = pthread_setspecific(state->key, nullptr);
+    ASSERT_EQ(result, 0);
+
+    // If this path is hit, then the thread local destructor was called after
+    // the Chrome-TLS destructor and the internal state was updated correctly.
+    // No further checks are necessary.
+    if (internal::ThreadLocalStorageTestInternal::HasBeenDestroyed()) {
+      *(state->teardown_works_correctly) = true;
+      return;
+    }
+
+    // If this path is hit, then the thread local destructor was called before
+    // the Chrome-TLS destructor is hit. The ThreadLocalStorage::Slot should
+    // still function correctly.
+    ASSERT_EQ(reinterpret_cast<intptr_t>(slot_.Get()), kDummyValue);
+  }
+
+  void CreateTlsKeyWithDestructor(size_t index) {
+    ASSERT_LT(index, kKeyCount);
+
+    tls_states_[index].teardown_works_correctly = &teardown_works_correctly_;
+    int result = pthread_key_create(
+        &(tls_states_[index].key),
+        UseTLSDuringDestructionRunner::ThreadLocalDestructor);
+    ASSERT_EQ(result, 0);
+
+    result = pthread_setspecific(tls_states_[index].key, &tls_states_[index]);
+    ASSERT_EQ(result, 0);
+  }
+
+  static base::ThreadLocalStorage::Slot slot_;
+  bool teardown_works_correctly_ = false;
+  TLSState tls_states_[kKeyCount];
+
+  DISALLOW_COPY_AND_ASSIGN(UseTLSDuringDestructionRunner);
+};
+
+base::ThreadLocalStorage::Slot UseTLSDuringDestructionRunner::slot_;
+
+void* UseTLSTestThreadRun(void* input) {
+  UseTLSDuringDestructionRunner* runner =
+      static_cast<UseTLSDuringDestructionRunner*>(input);
+  runner->Run();
+  return nullptr;
+}
+
+#endif  // defined(OS_POSIX)
+
+}  // namespace
+
+TEST(ThreadLocalStorageTest, Basics) {
+  ThreadLocalStorage::Slot slot;
+  slot.Set(reinterpret_cast<void*>(123));
+  int value = reinterpret_cast<intptr_t>(slot.Get());
+  EXPECT_EQ(value, 123);
+}
+
+#if defined(THREAD_SANITIZER) || \
+    (defined(OS_WIN) && defined(ARCH_CPU_X86_64) && !defined(NDEBUG))
+// Do not run the test under ThreadSanitizer. Because this test iterates its
+// own TSD destructor for the maximum possible number of times, TSan can't jump
+// in after the last destructor invocation, therefore the destructor remains
+// unsynchronized with the following users of the same TSD slot. This results
+// in race reports between the destructor and functions in other tests.
+//
+// It is disabled on Win x64 with incremental linking (i.e. "Debug") pending
+// resolution of http://crbug.com/251251.
+#define MAYBE_TLSDestructors DISABLED_TLSDestructors
+#else
+#define MAYBE_TLSDestructors TLSDestructors
+#endif
+TEST(ThreadLocalStorageTest, MAYBE_TLSDestructors) {
+  // Create a TLS index with a destructor.  Create a set of
+  // threads that set the TLS, while the destructor cleans it up.
+  // After the threads finish, verify that the value is cleaned up.
+  const int kNumThreads = 5;
+  int values[kNumThreads];
+  ThreadLocalStorageRunner* thread_delegates[kNumThreads];
+  DelegateSimpleThread* threads[kNumThreads];
+
+  // Spawn the threads.
+  for (int index = 0; index < kNumThreads; index++) {
+    values[index] = kInitialTlsValue;
+    thread_delegates[index] = new ThreadLocalStorageRunner(&values[index]);
+    threads[index] = new DelegateSimpleThread(thread_delegates[index],
+                                              "tls thread");
+    threads[index]->Start();
+  }
+
+  // Wait for the threads to finish.
+  for (int index = 0; index < kNumThreads; index++) {
+    threads[index]->Join();
+    delete threads[index];
+    delete thread_delegates[index];
+
+    // Verify that the destructor was called and that we reset.
+    EXPECT_EQ(values[index], kFinalTlsValue);
+  }
+}
+
+TEST(ThreadLocalStorageTest, TLSReclaim) {
+  // Creates and destroys many TLS slots and ensures they all zero-inited.
+  for (int i = 0; i < 1000; ++i) {
+    ThreadLocalStorage::Slot slot(nullptr);
+    EXPECT_EQ(nullptr, slot.Get());
+    slot.Set(reinterpret_cast<void*>(0xBAADF00D));
+    EXPECT_EQ(reinterpret_cast<void*>(0xBAADF00D), slot.Get());
+  }
+}
+
+#if defined(OS_POSIX)
+// Unlike POSIX, Windows does not iterate through the OS TLS to cleanup any
+// values there. Instead a per-module thread destruction function is called.
+// However, it is not possible to perform a check after this point (as the code
+// is detached from the thread), so this check remains POSIX only.
+TEST(ThreadLocalStorageTest, UseTLSDuringDestruction) {
+  UseTLSDuringDestructionRunner runner;
+  pthread_t thread;
+  int result = pthread_create(&thread, nullptr, UseTLSTestThreadRun, &runner);
+  ASSERT_EQ(result, 0);
+
+  result = pthread_join(thread, nullptr);
+  ASSERT_EQ(result, 0);
+
+  EXPECT_TRUE(runner.teardown_works_correctly());
+}
+#endif  // defined(OS_POSIX)
+
+}  // namespace base
diff --git a/base/threading/thread_local_storage_win.cc b/base/threading/thread_local_storage_win.cc
new file mode 100644
index 0000000..a9aec31
--- /dev/null
+++ b/base/threading/thread_local_storage_win.cc
@@ -0,0 +1,107 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/threading/thread_local_storage.h"
+
+#include <windows.h>
+
+#include "base/logging.h"
+
+namespace base {
+
+namespace internal {
+
+bool PlatformThreadLocalStorage::AllocTLS(TLSKey* key) {
+  TLSKey value = TlsAlloc();
+  if (value != TLS_OUT_OF_INDEXES) {
+    *key = value;
+    return true;
+  }
+  return false;
+}
+
+void PlatformThreadLocalStorage::FreeTLS(TLSKey key) {
+  BOOL ret = TlsFree(key);
+  DCHECK(ret);
+}
+
+void PlatformThreadLocalStorage::SetTLSValue(TLSKey key, void* value) {
+  BOOL ret = TlsSetValue(key, value);
+  DCHECK(ret);
+}
+
+}  // namespace internal
+
+}  // namespace base
+
+// Thread Termination Callbacks.
+// Windows doesn't support a per-thread destructor with its
+// TLS primitives.  So, we build it manually by inserting a
+// function to be called on each thread's exit.
+// This magic is from http://www.codeproject.com/threads/tls.asp
+// and it works for VC++ 7.0 and later.
+
+// Force a reference to _tls_used to make the linker create the TLS directory
+// if it's not already there.  (e.g. if __declspec(thread) is not used).
+// Force a reference to p_thread_callback_base to prevent whole program
+// optimization from discarding the variable.
+#ifdef _WIN64
+
+#pragma comment(linker, "/INCLUDE:_tls_used")
+#pragma comment(linker, "/INCLUDE:p_thread_callback_base")
+
+#else  // _WIN64
+
+#pragma comment(linker, "/INCLUDE:__tls_used")
+#pragma comment(linker, "/INCLUDE:_p_thread_callback_base")
+
+#endif  // _WIN64
+
+// Static callback function to call with each thread termination.
+void NTAPI OnThreadExit(PVOID module, DWORD reason, PVOID reserved) {
+  // On XP SP0 & SP1, the DLL_PROCESS_ATTACH is never seen. It is sent on SP2+
+  // and on W2K and W2K3. So don't assume it is sent.
+  if (DLL_THREAD_DETACH == reason || DLL_PROCESS_DETACH == reason)
+    base::internal::PlatformThreadLocalStorage::OnThreadExit();
+}
+
+// .CRT$XLA to .CRT$XLZ is an array of PIMAGE_TLS_CALLBACK pointers that are
+// called automatically by the OS loader code (not the CRT) when the module is
+// loaded and on thread creation. They are NOT called if the module has been
+// loaded by a LoadLibrary() call. It must have implicitly been loaded at
+// process startup.
+// By implicitly loaded, I mean that it is directly referenced by the main EXE
+// or by one of its dependent DLLs. Delay-loaded DLL doesn't count as being
+// implicitly loaded.
+//
+// See VC\crt\src\tlssup.c for reference.
+
+// extern "C" suppresses C++ name mangling so we know the symbol name for the
+// linker /INCLUDE:symbol pragma above.
+extern "C" {
+// The linker must not discard p_thread_callback_base.  (We force a reference
+// to this variable with a linker /INCLUDE:symbol pragma to ensure that.) If
+// this variable is discarded, the OnThreadExit function will never be called.
+#ifdef _WIN64
+
+// .CRT section is merged with .rdata on x64 so it must be constant data.
+#pragma const_seg(".CRT$XLB")
+// When defining a const variable, it must have external linkage to be sure the
+// linker doesn't discard it.
+extern const PIMAGE_TLS_CALLBACK p_thread_callback_base;
+const PIMAGE_TLS_CALLBACK p_thread_callback_base = OnThreadExit;
+
+// Reset the default section.
+#pragma const_seg()
+
+#else  // _WIN64
+
+#pragma data_seg(".CRT$XLB")
+PIMAGE_TLS_CALLBACK p_thread_callback_base = OnThreadExit;
+
+// Reset the default section.
+#pragma data_seg()
+
+#endif  // _WIN64
+}  // extern "C"
diff --git a/base/threading/thread_local_unittest.cc b/base/threading/thread_local_unittest.cc
new file mode 100644
index 0000000..54f2ad2
--- /dev/null
+++ b/base/threading/thread_local_unittest.cc
@@ -0,0 +1,164 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/logging.h"
+#include "base/threading/simple_thread.h"
+#include "base/threading/thread_local.h"
+#include "base/synchronization/waitable_event.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+
+namespace {
+
+class ThreadLocalTesterBase : public base::DelegateSimpleThreadPool::Delegate {
+ public:
+  typedef base::ThreadLocalPointer<char> TLPType;
+
+  ThreadLocalTesterBase(TLPType* tlp, base::WaitableEvent* done)
+      : tlp_(tlp),
+        done_(done) {
+  }
+  ~ThreadLocalTesterBase() override = default;
+
+ protected:
+  TLPType* tlp_;
+  base::WaitableEvent* done_;
+};
+
+class SetThreadLocal : public ThreadLocalTesterBase {
+ public:
+  SetThreadLocal(TLPType* tlp, base::WaitableEvent* done)
+      : ThreadLocalTesterBase(tlp, done), val_(nullptr) {}
+  ~SetThreadLocal() override = default;
+
+  void set_value(char* val) { val_ = val; }
+
+  void Run() override {
+    DCHECK(!done_->IsSignaled());
+    tlp_->Set(val_);
+    done_->Signal();
+  }
+
+ private:
+  char* val_;
+};
+
+class GetThreadLocal : public ThreadLocalTesterBase {
+ public:
+  GetThreadLocal(TLPType* tlp, base::WaitableEvent* done)
+      : ThreadLocalTesterBase(tlp, done), ptr_(nullptr) {}
+  ~GetThreadLocal() override = default;
+
+  void set_ptr(char** ptr) { ptr_ = ptr; }
+
+  void Run() override {
+    DCHECK(!done_->IsSignaled());
+    *ptr_ = tlp_->Get();
+    done_->Signal();
+  }
+
+ private:
+  char** ptr_;
+};
+
+}  // namespace
+
+// In this test, we start 2 threads which will access a ThreadLocalPointer.  We
+// make sure the default is NULL, and the pointers are unique to the threads.
+TEST(ThreadLocalTest, Pointer) {
+  base::DelegateSimpleThreadPool tp1("ThreadLocalTest tp1", 1);
+  base::DelegateSimpleThreadPool tp2("ThreadLocalTest tp1", 1);
+  tp1.Start();
+  tp2.Start();
+
+  base::ThreadLocalPointer<char> tlp;
+
+  static char* const kBogusPointer = reinterpret_cast<char*>(0x1234);
+
+  char* tls_val;
+  base::WaitableEvent done(WaitableEvent::ResetPolicy::MANUAL,
+                           WaitableEvent::InitialState::NOT_SIGNALED);
+
+  GetThreadLocal getter(&tlp, &done);
+  getter.set_ptr(&tls_val);
+
+  // Check that both threads defaulted to NULL.
+  tls_val = kBogusPointer;
+  done.Reset();
+  tp1.AddWork(&getter);
+  done.Wait();
+  EXPECT_EQ(static_cast<char*>(nullptr), tls_val);
+
+  tls_val = kBogusPointer;
+  done.Reset();
+  tp2.AddWork(&getter);
+  done.Wait();
+  EXPECT_EQ(static_cast<char*>(nullptr), tls_val);
+
+  SetThreadLocal setter(&tlp, &done);
+  setter.set_value(kBogusPointer);
+
+  // Have thread 1 set their pointer value to kBogusPointer.
+  done.Reset();
+  tp1.AddWork(&setter);
+  done.Wait();
+
+  tls_val = nullptr;
+  done.Reset();
+  tp1.AddWork(&getter);
+  done.Wait();
+  EXPECT_EQ(kBogusPointer, tls_val);
+
+  // Make sure thread 2 is still NULL
+  tls_val = kBogusPointer;
+  done.Reset();
+  tp2.AddWork(&getter);
+  done.Wait();
+  EXPECT_EQ(static_cast<char*>(nullptr), tls_val);
+
+  // Set thread 2 to kBogusPointer + 1.
+  setter.set_value(kBogusPointer + 1);
+
+  done.Reset();
+  tp2.AddWork(&setter);
+  done.Wait();
+
+  tls_val = nullptr;
+  done.Reset();
+  tp2.AddWork(&getter);
+  done.Wait();
+  EXPECT_EQ(kBogusPointer + 1, tls_val);
+
+  // Make sure thread 1 is still kBogusPointer.
+  tls_val = nullptr;
+  done.Reset();
+  tp1.AddWork(&getter);
+  done.Wait();
+  EXPECT_EQ(kBogusPointer, tls_val);
+
+  tp1.JoinAll();
+  tp2.JoinAll();
+}
+
+TEST(ThreadLocalTest, Boolean) {
+  {
+    base::ThreadLocalBoolean tlb;
+    EXPECT_FALSE(tlb.Get());
+
+    tlb.Set(false);
+    EXPECT_FALSE(tlb.Get());
+
+    tlb.Set(true);
+    EXPECT_TRUE(tlb.Get());
+  }
+
+  // Our slot should have been freed, we're all reset.
+  {
+    base::ThreadLocalBoolean tlb;
+    EXPECT_FALSE(tlb.Get());
+  }
+}
+
+}  // namespace base
diff --git a/base/threading/thread_perftest.cc b/base/threading/thread_perftest.cc
new file mode 100644
index 0000000..bf89049
--- /dev/null
+++ b/base/threading/thread_perftest.cc
@@ -0,0 +1,318 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <stddef.h>
+
+#include <memory>
+#include <vector>
+
+#include "base/base_switches.h"
+#include "base/bind.h"
+#include "base/command_line.h"
+#include "base/location.h"
+#include "base/memory/ptr_util.h"
+#include "base/message_loop/message_loop.h"
+#include "base/single_thread_task_runner.h"
+#include "base/strings/stringprintf.h"
+#include "base/synchronization/condition_variable.h"
+#include "base/synchronization/lock.h"
+#include "base/synchronization/waitable_event.h"
+#include "base/threading/thread.h"
+#include "base/time/time.h"
+#include "build/build_config.h"
+#include "testing/gtest/include/gtest/gtest.h"
+#include "testing/perf/perf_test.h"
+
+#if defined(OS_POSIX)
+#include <pthread.h>
+#endif
+
+namespace base {
+
+namespace {
+
+const int kNumRuns = 100000;
+
+// Base class for a threading perf-test. This sets up some threads for the
+// test and measures the clock-time in addition to time spent on each thread.
+class ThreadPerfTest : public testing::Test {
+ public:
+  ThreadPerfTest()
+      : done_(WaitableEvent::ResetPolicy::AUTOMATIC,
+              WaitableEvent::InitialState::NOT_SIGNALED) {}
+
+  // To be implemented by each test. Subclass must uses threads_ such that
+  // their cpu-time can be measured. Test must return from PingPong() _and_
+  // call FinishMeasurement from any thread to complete the test.
+  virtual void Init() {
+    if (ThreadTicks::IsSupported())
+      ThreadTicks::WaitUntilInitialized();
+  }
+  virtual void PingPong(int hops) = 0;
+  virtual void Reset() {}
+
+  void TimeOnThread(base::ThreadTicks* ticks, base::WaitableEvent* done) {
+    *ticks = base::ThreadTicks::Now();
+    done->Signal();
+  }
+
+  base::ThreadTicks ThreadNow(const base::Thread& thread) {
+    base::WaitableEvent done(WaitableEvent::ResetPolicy::AUTOMATIC,
+                             WaitableEvent::InitialState::NOT_SIGNALED);
+    base::ThreadTicks ticks;
+    thread.task_runner()->PostTask(
+        FROM_HERE, base::BindOnce(&ThreadPerfTest::TimeOnThread,
+                                  base::Unretained(this), &ticks, &done));
+    done.Wait();
+    return ticks;
+  }
+
+  void RunPingPongTest(const std::string& name, unsigned num_threads) {
+    // Create threads and collect starting cpu-time for each thread.
+    std::vector<base::ThreadTicks> thread_starts;
+    while (threads_.size() < num_threads) {
+      threads_.push_back(std::make_unique<base::Thread>("PingPonger"));
+      threads_.back()->Start();
+      if (base::ThreadTicks::IsSupported())
+        thread_starts.push_back(ThreadNow(*threads_.back()));
+    }
+
+    Init();
+
+    base::TimeTicks start = base::TimeTicks::Now();
+    PingPong(kNumRuns);
+    done_.Wait();
+    base::TimeTicks end = base::TimeTicks::Now();
+
+    // Gather the cpu-time spent on each thread. This does one extra tasks,
+    // but that should be in the noise given enough runs.
+    base::TimeDelta thread_time;
+    while (threads_.size()) {
+      if (base::ThreadTicks::IsSupported()) {
+        thread_time += ThreadNow(*threads_.back()) - thread_starts.back();
+        thread_starts.pop_back();
+      }
+      threads_.pop_back();
+    }
+
+    Reset();
+
+    double num_runs = static_cast<double>(kNumRuns);
+    double us_per_task_clock = (end - start).InMicroseconds() / num_runs;
+    double us_per_task_cpu = thread_time.InMicroseconds() / num_runs;
+
+    // Clock time per task.
+    perf_test::PrintResult(
+        "task", "", name + "_time ", us_per_task_clock, "us/hop", true);
+
+    // Total utilization across threads if available (likely higher).
+    if (base::ThreadTicks::IsSupported()) {
+      perf_test::PrintResult(
+          "task", "", name + "_cpu ", us_per_task_cpu, "us/hop", true);
+    }
+  }
+
+ protected:
+  void FinishMeasurement() { done_.Signal(); }
+  std::vector<std::unique_ptr<base::Thread>> threads_;
+
+ private:
+  base::WaitableEvent done_;
+};
+
+// Class to test task performance by posting empty tasks back and forth.
+class TaskPerfTest : public ThreadPerfTest {
+  base::Thread* NextThread(int count) {
+    return threads_[count % threads_.size()].get();
+  }
+
+  void PingPong(int hops) override {
+    if (!hops) {
+      FinishMeasurement();
+      return;
+    }
+    NextThread(hops)->task_runner()->PostTask(
+        FROM_HERE, base::BindOnce(&ThreadPerfTest::PingPong,
+                                  base::Unretained(this), hops - 1));
+  }
+};
+
+// This tries to test the 'best-case' as well as the 'worst-case' task posting
+// performance. The best-case keeps one thread alive such that it never yeilds,
+// while the worse-case forces a context switch for every task. Four threads are
+// used to ensure the threads do yeild (with just two it might be possible for
+// both threads to stay awake if they can signal each other fast enough).
+TEST_F(TaskPerfTest, TaskPingPong) {
+  RunPingPongTest("1_Task_Threads", 1);
+  RunPingPongTest("4_Task_Threads", 4);
+}
+
+
+// Same as above, but add observers to test their perf impact.
+class MessageLoopObserver : public base::MessageLoop::TaskObserver {
+ public:
+  void WillProcessTask(const base::PendingTask& pending_task) override {}
+  void DidProcessTask(const base::PendingTask& pending_task) override {}
+};
+MessageLoopObserver message_loop_observer;
+
+class TaskObserverPerfTest : public TaskPerfTest {
+ public:
+  void Init() override {
+    TaskPerfTest::Init();
+    for (size_t i = 0; i < threads_.size(); i++) {
+      threads_[i]->message_loop()->AddTaskObserver(&message_loop_observer);
+    }
+  }
+};
+
+TEST_F(TaskObserverPerfTest, TaskPingPong) {
+  RunPingPongTest("1_Task_Threads_With_Observer", 1);
+  RunPingPongTest("4_Task_Threads_With_Observer", 4);
+}
+
+// Class to test our WaitableEvent performance by signaling back and fort.
+// WaitableEvent is templated so we can also compare with other versions.
+template <typename WaitableEventType>
+class EventPerfTest : public ThreadPerfTest {
+ public:
+  void Init() override {
+    for (size_t i = 0; i < threads_.size(); i++) {
+      events_.push_back(std::make_unique<WaitableEventType>(
+          WaitableEvent::ResetPolicy::AUTOMATIC,
+          WaitableEvent::InitialState::NOT_SIGNALED));
+    }
+  }
+
+  void Reset() override { events_.clear(); }
+
+  void WaitAndSignalOnThread(size_t event) {
+    size_t next_event = (event + 1) % events_.size();
+    int my_hops = 0;
+    do {
+      events_[event]->Wait();
+      my_hops = --remaining_hops_;  // We own 'hops' between Wait and Signal.
+      events_[next_event]->Signal();
+    } while (my_hops > 0);
+    // Once we are done, all threads will signal as hops passes zero.
+    // We only signal completion once, on the thread that reaches zero.
+    if (!my_hops)
+      FinishMeasurement();
+  }
+
+  void PingPong(int hops) override {
+    remaining_hops_ = hops;
+    for (size_t i = 0; i < threads_.size(); i++) {
+      threads_[i]->task_runner()->PostTask(
+          FROM_HERE, base::BindOnce(&EventPerfTest::WaitAndSignalOnThread,
+                                    base::Unretained(this), i));
+    }
+
+    // Kick off the Signal ping-ponging.
+    events_.front()->Signal();
+  }
+
+  int remaining_hops_;
+  std::vector<std::unique_ptr<WaitableEventType>> events_;
+};
+
+// Similar to the task posting test, this just tests similar functionality
+// using WaitableEvents. We only test four threads (worst-case), but we
+// might want to craft a way to test the best-case (where the thread doesn't
+// end up blocking because the event is already signalled).
+typedef EventPerfTest<base::WaitableEvent> WaitableEventThreadPerfTest;
+TEST_F(WaitableEventThreadPerfTest, EventPingPong) {
+  RunPingPongTest("4_WaitableEvent_Threads", 4);
+}
+
+// Build a minimal event using ConditionVariable.
+class ConditionVariableEvent {
+ public:
+  ConditionVariableEvent(WaitableEvent::ResetPolicy reset_policy,
+                         WaitableEvent::InitialState initial_state)
+      : cond_(&lock_), signaled_(false) {
+    DCHECK_EQ(WaitableEvent::ResetPolicy::AUTOMATIC, reset_policy);
+    DCHECK_EQ(WaitableEvent::InitialState::NOT_SIGNALED, initial_state);
+  }
+
+  void Signal() {
+    {
+      base::AutoLock scoped_lock(lock_);
+      signaled_ = true;
+    }
+    cond_.Signal();
+  }
+
+  void Wait() {
+    base::AutoLock scoped_lock(lock_);
+    while (!signaled_)
+      cond_.Wait();
+    signaled_ = false;
+  }
+
+ private:
+  base::Lock lock_;
+  base::ConditionVariable cond_;
+  bool signaled_;
+};
+
+// This is meant to test the absolute minimal context switching time
+// using our own base synchronization code.
+typedef EventPerfTest<ConditionVariableEvent> ConditionVariablePerfTest;
+TEST_F(ConditionVariablePerfTest, EventPingPong) {
+  RunPingPongTest("4_ConditionVariable_Threads", 4);
+}
+#if defined(OS_POSIX)
+
+// Absolutely 100% minimal posix waitable event. If there is a better/faster
+// way to force a context switch, we should use that instead.
+class PthreadEvent {
+ public:
+  PthreadEvent(WaitableEvent::ResetPolicy reset_policy,
+               WaitableEvent::InitialState initial_state) {
+    DCHECK_EQ(WaitableEvent::ResetPolicy::AUTOMATIC, reset_policy);
+    DCHECK_EQ(WaitableEvent::InitialState::NOT_SIGNALED, initial_state);
+    pthread_mutex_init(&mutex_, nullptr);
+    pthread_cond_init(&cond_, nullptr);
+    signaled_ = false;
+  }
+
+  ~PthreadEvent() {
+    pthread_cond_destroy(&cond_);
+    pthread_mutex_destroy(&mutex_);
+  }
+
+  void Signal() {
+    pthread_mutex_lock(&mutex_);
+    signaled_ = true;
+    pthread_mutex_unlock(&mutex_);
+    pthread_cond_signal(&cond_);
+  }
+
+  void Wait() {
+    pthread_mutex_lock(&mutex_);
+    while (!signaled_)
+      pthread_cond_wait(&cond_, &mutex_);
+    signaled_ = false;
+    pthread_mutex_unlock(&mutex_);
+  }
+
+ private:
+  bool signaled_;
+  pthread_mutex_t mutex_;
+  pthread_cond_t cond_;
+};
+
+// This is meant to test the absolute minimal context switching time.
+// If there is any faster way to do this we should substitute it in.
+typedef EventPerfTest<PthreadEvent> PthreadEventPerfTest;
+TEST_F(PthreadEventPerfTest, EventPingPong) {
+  RunPingPongTest("4_PthreadCondVar_Threads", 4);
+}
+
+#endif
+
+}  // namespace
+
+}  // namespace base
diff --git a/base/threading/thread_restrictions.cc b/base/threading/thread_restrictions.cc
new file mode 100644
index 0000000..633bcb2
--- /dev/null
+++ b/base/threading/thread_restrictions.cc
@@ -0,0 +1,176 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/threading/thread_restrictions.h"
+
+#if DCHECK_IS_ON()
+
+#include "base/lazy_instance.h"
+#include "base/logging.h"
+#include "base/threading/thread_local.h"
+
+namespace base {
+
+namespace {
+
+LazyInstance<ThreadLocalBoolean>::Leaky g_blocking_disallowed =
+    LAZY_INSTANCE_INITIALIZER;
+
+LazyInstance<ThreadLocalBoolean>::Leaky
+    g_singleton_disallowed = LAZY_INSTANCE_INITIALIZER;
+
+LazyInstance<ThreadLocalBoolean>::Leaky g_base_sync_primitives_disallowed =
+    LAZY_INSTANCE_INITIALIZER;
+
+}  // namespace
+
+void AssertBlockingAllowed() {
+  DCHECK(!g_blocking_disallowed.Get().Get())
+      << "Function marked as blocking was called from a scope that disallows "
+         "blocking! If this task is running inside the TaskScheduler, it needs "
+         "to have MayBlock() in its TaskTraits. Otherwise, consider making "
+         "this blocking work asynchronous or, as a last resort, you may use "
+         "ScopedAllowBlocking in a narrow scope.";
+}
+
+void DisallowBlocking() {
+  g_blocking_disallowed.Get().Set(true);
+}
+
+ScopedDisallowBlocking::ScopedDisallowBlocking()
+    : was_disallowed_(g_blocking_disallowed.Get().Get()) {
+  g_blocking_disallowed.Get().Set(true);
+}
+
+ScopedDisallowBlocking::~ScopedDisallowBlocking() {
+  DCHECK(g_blocking_disallowed.Get().Get());
+  g_blocking_disallowed.Get().Set(was_disallowed_);
+}
+
+ScopedAllowBlocking::ScopedAllowBlocking()
+    : was_disallowed_(g_blocking_disallowed.Get().Get()) {
+  g_blocking_disallowed.Get().Set(false);
+}
+
+ScopedAllowBlocking::~ScopedAllowBlocking() {
+  DCHECK(!g_blocking_disallowed.Get().Get());
+  g_blocking_disallowed.Get().Set(was_disallowed_);
+}
+
+void DisallowBaseSyncPrimitives() {
+  g_base_sync_primitives_disallowed.Get().Set(true);
+}
+
+ScopedAllowBaseSyncPrimitives::ScopedAllowBaseSyncPrimitives()
+    : was_disallowed_(g_base_sync_primitives_disallowed.Get().Get()) {
+  DCHECK(!g_blocking_disallowed.Get().Get())
+      << "To allow //base sync primitives in a scope where blocking is "
+         "disallowed use ScopedAllowBaseSyncPrimitivesOutsideBlockingScope.";
+  g_base_sync_primitives_disallowed.Get().Set(false);
+}
+
+ScopedAllowBaseSyncPrimitives::~ScopedAllowBaseSyncPrimitives() {
+  DCHECK(!g_base_sync_primitives_disallowed.Get().Get());
+  g_base_sync_primitives_disallowed.Get().Set(was_disallowed_);
+}
+
+ScopedAllowBaseSyncPrimitivesOutsideBlockingScope::
+    ScopedAllowBaseSyncPrimitivesOutsideBlockingScope()
+    : was_disallowed_(g_base_sync_primitives_disallowed.Get().Get()) {
+  g_base_sync_primitives_disallowed.Get().Set(false);
+}
+
+ScopedAllowBaseSyncPrimitivesOutsideBlockingScope::
+    ~ScopedAllowBaseSyncPrimitivesOutsideBlockingScope() {
+  DCHECK(!g_base_sync_primitives_disallowed.Get().Get());
+  g_base_sync_primitives_disallowed.Get().Set(was_disallowed_);
+}
+
+ScopedAllowBaseSyncPrimitivesForTesting::
+    ScopedAllowBaseSyncPrimitivesForTesting()
+    : was_disallowed_(g_base_sync_primitives_disallowed.Get().Get()) {
+  g_base_sync_primitives_disallowed.Get().Set(false);
+}
+
+ScopedAllowBaseSyncPrimitivesForTesting::
+    ~ScopedAllowBaseSyncPrimitivesForTesting() {
+  DCHECK(!g_base_sync_primitives_disallowed.Get().Get());
+  g_base_sync_primitives_disallowed.Get().Set(was_disallowed_);
+}
+
+namespace internal {
+
+void AssertBaseSyncPrimitivesAllowed() {
+  DCHECK(!g_base_sync_primitives_disallowed.Get().Get())
+      << "Waiting on a //base sync primitive is not allowed on this thread to "
+         "prevent jank and deadlock. If waiting on a //base sync primitive is "
+         "unavoidable, do it within the scope of a "
+         "ScopedAllowBaseSyncPrimitives. If in a test, "
+         "use ScopedAllowBaseSyncPrimitivesForTesting.";
+}
+
+void ResetThreadRestrictionsForTesting() {
+  g_blocking_disallowed.Get().Set(false);
+  g_singleton_disallowed.Get().Set(false);
+  g_base_sync_primitives_disallowed.Get().Set(false);
+}
+
+}  // namespace internal
+
+ThreadRestrictions::ScopedAllowIO::ScopedAllowIO()
+    : was_allowed_(SetIOAllowed(true)) {}
+
+ThreadRestrictions::ScopedAllowIO::~ScopedAllowIO() {
+  SetIOAllowed(was_allowed_);
+}
+
+// static
+bool ThreadRestrictions::SetIOAllowed(bool allowed) {
+  bool previous_disallowed = g_blocking_disallowed.Get().Get();
+  g_blocking_disallowed.Get().Set(!allowed);
+  return !previous_disallowed;
+}
+
+// static
+bool ThreadRestrictions::SetSingletonAllowed(bool allowed) {
+  bool previous_disallowed = g_singleton_disallowed.Get().Get();
+  g_singleton_disallowed.Get().Set(!allowed);
+  return !previous_disallowed;
+}
+
+// static
+void ThreadRestrictions::AssertSingletonAllowed() {
+  if (g_singleton_disallowed.Get().Get()) {
+    NOTREACHED() << "LazyInstance/Singleton is not allowed to be used on this "
+                 << "thread.  Most likely it's because this thread is not "
+                 << "joinable (or the current task is running with "
+                 << "TaskShutdownBehavior::CONTINUE_ON_SHUTDOWN semantics), so "
+                 << "AtExitManager may have deleted the object on shutdown, "
+                 << "leading to a potential shutdown crash. If you need to use "
+                 << "the object from this context, it'll have to be updated to "
+                 << "use Leaky traits.";
+  }
+}
+
+// static
+void ThreadRestrictions::DisallowWaiting() {
+  DisallowBaseSyncPrimitives();
+}
+
+bool ThreadRestrictions::SetWaitAllowed(bool allowed) {
+  bool previous_disallowed = g_base_sync_primitives_disallowed.Get().Get();
+  g_base_sync_primitives_disallowed.Get().Set(!allowed);
+  return !previous_disallowed;
+}
+
+ThreadRestrictions::ScopedAllowWait::ScopedAllowWait()
+    : was_allowed_(SetWaitAllowed(true)) {}
+
+ThreadRestrictions::ScopedAllowWait::~ScopedAllowWait() {
+  SetWaitAllowed(was_allowed_);
+}
+
+}  // namespace base
+
+#endif  // DCHECK_IS_ON()
diff --git a/base/threading/thread_restrictions.h b/base/threading/thread_restrictions.h
new file mode 100644
index 0000000..57f2f21
--- /dev/null
+++ b/base/threading/thread_restrictions.h
@@ -0,0 +1,506 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_THREADING_THREAD_RESTRICTIONS_H_
+#define BASE_THREADING_THREAD_RESTRICTIONS_H_
+
+#include "base/base_export.h"
+#include "base/gtest_prod_util.h"
+#include "base/logging.h"
+#include "base/macros.h"
+
+class BrowserProcessImpl;
+class HistogramSynchronizer;
+class NativeBackendKWallet;
+class KeyStorageLinux;
+
+namespace android_webview {
+class AwFormDatabaseService;
+class CookieManager;
+class ScopedAllowInitGLBindings;
+}
+
+namespace cc {
+class CompletionEvent;
+class SingleThreadTaskGraphRunner;
+}
+namespace chromeos {
+class BlockingMethodCaller;
+namespace system {
+class StatisticsProviderImpl;
+}
+}
+namespace chrome_browser_net {
+class Predictor;
+}
+namespace content {
+class BrowserGpuChannelHostFactory;
+class BrowserGpuMemoryBufferManager;
+class BrowserMainLoop;
+class BrowserProcessSubThread;
+class BrowserShutdownProfileDumper;
+class BrowserSurfaceViewManager;
+class BrowserTestBase;
+class CategorizedWorkerPool;
+class NestedMessagePumpAndroid;
+class ScopedAllowWaitForAndroidLayoutTests;
+class ScopedAllowWaitForDebugURL;
+class SessionStorageDatabase;
+class SoftwareOutputDeviceMus;
+class SynchronousCompositor;
+class SynchronousCompositorHost;
+class SynchronousCompositorSyncCallBridge;
+class TextInputClientMac;
+}  // namespace content
+namespace cronet {
+class CronetPrefsManager;
+class CronetURLRequestContext;
+}  // namespace cronet
+namespace dbus {
+class Bus;
+}
+namespace disk_cache {
+class BackendImpl;
+class InFlightIO;
+}
+namespace functions {
+class ExecScriptScopedAllowBaseSyncPrimitives;
+}
+namespace gpu {
+class GpuChannelHost;
+}
+namespace leveldb {
+class LevelDBMojoProxy;
+}
+namespace media {
+class AudioInputDevice;
+class BlockingUrlProtocol;
+}
+namespace midi {
+class TaskService;  // https://crbug.com/796830
+}
+namespace mojo {
+class CoreLibraryInitializer;
+class SyncCallRestrictions;
+namespace edk {
+class ScopedIPCSupport;
+}
+}
+namespace rlz_lib {
+class FinancialPing;
+}
+namespace ui {
+class CommandBufferClientImpl;
+class CommandBufferLocal;
+class GpuState;
+class MaterialDesignController;
+}
+namespace net {
+class MultiThreadedCertVerifierScopedAllowBaseSyncPrimitives;
+class NetworkChangeNotifierMac;
+namespace internal {
+class AddressTrackerLinux;
+}
+}
+
+namespace remoting {
+class AutoThread;
+}
+
+namespace resource_coordinator {
+class TabManagerDelegate;
+}
+
+namespace service_manager {
+class ServiceProcessLauncher;
+}
+
+namespace shell_integration {
+class LaunchXdgUtilityScopedAllowBaseSyncPrimitives;
+}
+
+namespace ui {
+class WindowResizeHelperMac;
+}
+
+namespace views {
+class ScreenMus;
+}
+
+namespace viz {
+class ServerGpuMemoryBufferManager;
+}
+
+namespace webrtc {
+class DesktopConfigurationMonitor;
+}
+
+namespace base {
+
+namespace android {
+class JavaHandlerThread;
+}
+
+namespace internal {
+class TaskTracker;
+}
+
+class GetAppOutputScopedAllowBaseSyncPrimitives;
+class SimpleThread;
+class StackSamplingProfiler;
+class Thread;
+class ThreadTestHelper;
+
+#if DCHECK_IS_ON()
+#define INLINE_IF_DCHECK_IS_OFF BASE_EXPORT
+#define EMPTY_BODY_IF_DCHECK_IS_OFF
+#else
+#define INLINE_IF_DCHECK_IS_OFF inline
+#define EMPTY_BODY_IF_DCHECK_IS_OFF \
+  {}
+#endif
+
+// A "blocking call" refers to any call that causes the calling thread to wait
+// off-CPU. It includes but is not limited to calls that wait on synchronous
+// file I/O operations: read or write a file from disk, interact with a pipe or
+// a socket, rename or delete a file, enumerate files in a directory, etc.
+// Acquiring a low contention lock is not considered a blocking call.
+
+// Asserts that blocking calls are allowed in the current scope.
+//
+// Style tip: It's best if you put AssertBlockingAllowed() checks as close to
+// the blocking call as possible. For example:
+//
+// void ReadFile() {
+//   PreWork();
+//
+//   base::AssertBlockingAllowed();
+//   fopen(...);
+//
+//   PostWork();
+// }
+//
+// void Bar() {
+//   ReadFile();
+// }
+//
+// void Foo() {
+//   Bar();
+// }
+INLINE_IF_DCHECK_IS_OFF void AssertBlockingAllowed()
+    EMPTY_BODY_IF_DCHECK_IS_OFF;
+
+// Disallows blocking on the current thread.
+INLINE_IF_DCHECK_IS_OFF void DisallowBlocking() EMPTY_BODY_IF_DCHECK_IS_OFF;
+
+// Disallows blocking calls within its scope.
+class BASE_EXPORT ScopedDisallowBlocking {
+ public:
+  ScopedDisallowBlocking() EMPTY_BODY_IF_DCHECK_IS_OFF;
+  ~ScopedDisallowBlocking() EMPTY_BODY_IF_DCHECK_IS_OFF;
+
+ private:
+#if DCHECK_IS_ON()
+  const bool was_disallowed_;
+#endif
+
+  DISALLOW_COPY_AND_ASSIGN(ScopedDisallowBlocking);
+};
+
+// ScopedAllowBlocking(ForTesting) allow blocking calls within a scope where
+// they are normally disallowed.
+//
+// Avoid using this. Prefer making blocking calls from tasks posted to
+// base::TaskScheduler with base::MayBlock().
+class BASE_EXPORT ScopedAllowBlocking {
+ private:
+  // This can only be instantiated by friends. Use ScopedAllowBlockingForTesting
+  // in unit tests to avoid the friend requirement.
+  FRIEND_TEST_ALL_PREFIXES(ThreadRestrictionsTest, ScopedAllowBlocking);
+  friend class android_webview::ScopedAllowInitGLBindings;
+  friend class content::BrowserProcessSubThread;
+  friend class cronet::CronetPrefsManager;
+  friend class cronet::CronetURLRequestContext;
+  friend class media::AudioInputDevice;
+  friend class mojo::CoreLibraryInitializer;
+  friend class resource_coordinator::TabManagerDelegate;  // crbug.com/778703
+  friend class ui::MaterialDesignController;
+  friend class ScopedAllowBlockingForTesting;
+  friend class StackSamplingProfiler;
+
+  ScopedAllowBlocking() EMPTY_BODY_IF_DCHECK_IS_OFF;
+  ~ScopedAllowBlocking() EMPTY_BODY_IF_DCHECK_IS_OFF;
+
+#if DCHECK_IS_ON()
+  const bool was_disallowed_;
+#endif
+
+  DISALLOW_COPY_AND_ASSIGN(ScopedAllowBlocking);
+};
+
+class ScopedAllowBlockingForTesting {
+ public:
+  ScopedAllowBlockingForTesting() {}
+  ~ScopedAllowBlockingForTesting() {}
+
+ private:
+#if DCHECK_IS_ON()
+  ScopedAllowBlocking scoped_allow_blocking_;
+#endif
+
+  DISALLOW_COPY_AND_ASSIGN(ScopedAllowBlockingForTesting);
+};
+
+// "Waiting on a //base sync primitive" refers to calling one of these methods:
+// - base::WaitableEvent::*Wait*
+// - base::ConditionVariable::*Wait*
+// - base::Process::WaitForExit*
+
+// Disallows waiting on a //base sync primitive on the current thread.
+INLINE_IF_DCHECK_IS_OFF void DisallowBaseSyncPrimitives()
+    EMPTY_BODY_IF_DCHECK_IS_OFF;
+
+// ScopedAllowBaseSyncPrimitives(ForTesting)(OutsideBlockingScope) allow waiting
+// on a //base sync primitive within a scope where this is normally disallowed.
+//
+// Avoid using this.
+//
+// Instead of waiting on a WaitableEvent or a ConditionVariable, put the work
+// that should happen after the wait in a callback and post that callback from
+// where the WaitableEvent or ConditionVariable would have been signaled. If
+// something needs to be scheduled after many tasks have executed, use
+// base::BarrierClosure.
+//
+// On Windows, join processes asynchronously using base::win::ObjectWatcher.
+
+// This can only be used in a scope where blocking is allowed.
+class BASE_EXPORT ScopedAllowBaseSyncPrimitives {
+ private:
+  // This can only be instantiated by friends. Use
+  // ScopedAllowBaseSyncPrimitivesForTesting in unit tests to avoid the friend
+  // requirement.
+  FRIEND_TEST_ALL_PREFIXES(ThreadRestrictionsTest,
+                           ScopedAllowBaseSyncPrimitives);
+  FRIEND_TEST_ALL_PREFIXES(ThreadRestrictionsTest,
+                           ScopedAllowBaseSyncPrimitivesResetsState);
+  FRIEND_TEST_ALL_PREFIXES(ThreadRestrictionsTest,
+                           ScopedAllowBaseSyncPrimitivesWithBlockingDisallowed);
+  friend class base::GetAppOutputScopedAllowBaseSyncPrimitives;
+  friend class content::BrowserProcessSubThread;
+  friend class content::SessionStorageDatabase;
+  friend class functions::ExecScriptScopedAllowBaseSyncPrimitives;
+  friend class leveldb::LevelDBMojoProxy;
+  friend class media::BlockingUrlProtocol;
+  friend class net::MultiThreadedCertVerifierScopedAllowBaseSyncPrimitives;
+  friend class rlz_lib::FinancialPing;
+  friend class shell_integration::LaunchXdgUtilityScopedAllowBaseSyncPrimitives;
+  friend class webrtc::DesktopConfigurationMonitor;
+
+  ScopedAllowBaseSyncPrimitives() EMPTY_BODY_IF_DCHECK_IS_OFF;
+  ~ScopedAllowBaseSyncPrimitives() EMPTY_BODY_IF_DCHECK_IS_OFF;
+
+#if DCHECK_IS_ON()
+  const bool was_disallowed_;
+#endif
+
+  DISALLOW_COPY_AND_ASSIGN(ScopedAllowBaseSyncPrimitives);
+};
+
+// This can be used in a scope where blocking is disallowed.
+class BASE_EXPORT ScopedAllowBaseSyncPrimitivesOutsideBlockingScope {
+ private:
+  // This can only be instantiated by friends. Use
+  // ScopedAllowBaseSyncPrimitivesForTesting in unit tests to avoid the friend
+  // requirement.
+  FRIEND_TEST_ALL_PREFIXES(ThreadRestrictionsTest,
+                           ScopedAllowBaseSyncPrimitivesOutsideBlockingScope);
+  FRIEND_TEST_ALL_PREFIXES(
+      ThreadRestrictionsTest,
+      ScopedAllowBaseSyncPrimitivesOutsideBlockingScopeResetsState);
+  friend class ::KeyStorageLinux;
+  friend class content::SynchronousCompositor;
+  friend class content::SynchronousCompositorHost;
+  friend class content::SynchronousCompositorSyncCallBridge;
+  friend class midi::TaskService;  // https://crbug.com/796830
+  // Not used in production yet, https://crbug.com/844078.
+  friend class service_manager::ServiceProcessLauncher;
+
+  ScopedAllowBaseSyncPrimitivesOutsideBlockingScope()
+      EMPTY_BODY_IF_DCHECK_IS_OFF;
+  ~ScopedAllowBaseSyncPrimitivesOutsideBlockingScope()
+      EMPTY_BODY_IF_DCHECK_IS_OFF;
+
+#if DCHECK_IS_ON()
+  const bool was_disallowed_;
+#endif
+
+  DISALLOW_COPY_AND_ASSIGN(ScopedAllowBaseSyncPrimitivesOutsideBlockingScope);
+};
+
+// This can be used in tests without being a friend of
+// ScopedAllowBaseSyncPrimitives(OutsideBlockingScope).
+class BASE_EXPORT ScopedAllowBaseSyncPrimitivesForTesting {
+ public:
+  ScopedAllowBaseSyncPrimitivesForTesting() EMPTY_BODY_IF_DCHECK_IS_OFF;
+  ~ScopedAllowBaseSyncPrimitivesForTesting() EMPTY_BODY_IF_DCHECK_IS_OFF;
+
+ private:
+#if DCHECK_IS_ON()
+  const bool was_disallowed_;
+#endif
+
+  DISALLOW_COPY_AND_ASSIGN(ScopedAllowBaseSyncPrimitivesForTesting);
+};
+
+namespace internal {
+
+// Asserts that waiting on a //base sync primitive is allowed in the current
+// scope.
+INLINE_IF_DCHECK_IS_OFF void AssertBaseSyncPrimitivesAllowed()
+    EMPTY_BODY_IF_DCHECK_IS_OFF;
+
+// Resets all thread restrictions on the current thread.
+INLINE_IF_DCHECK_IS_OFF void ResetThreadRestrictionsForTesting()
+    EMPTY_BODY_IF_DCHECK_IS_OFF;
+
+}  // namespace internal
+
+class BASE_EXPORT ThreadRestrictions {
+ public:
+  // Constructing a ScopedAllowIO temporarily allows IO for the current
+  // thread.  Doing this is almost certainly always incorrect.
+  //
+  // DEPRECATED. Use ScopedAllowBlocking(ForTesting).
+  class BASE_EXPORT ScopedAllowIO {
+   public:
+    ScopedAllowIO() EMPTY_BODY_IF_DCHECK_IS_OFF;
+    ~ScopedAllowIO() EMPTY_BODY_IF_DCHECK_IS_OFF;
+
+   private:
+#if DCHECK_IS_ON()
+    const bool was_allowed_;
+#endif
+
+    DISALLOW_COPY_AND_ASSIGN(ScopedAllowIO);
+  };
+
+#if DCHECK_IS_ON()
+  // Set whether the current thread to make IO calls.
+  // Threads start out in the *allowed* state.
+  // Returns the previous value.
+  //
+  // DEPRECATED. Use ScopedAllowBlocking(ForTesting) or ScopedDisallowBlocking.
+  static bool SetIOAllowed(bool allowed);
+
+  // Set whether the current thread can use singletons.  Returns the previous
+  // value.
+  static bool SetSingletonAllowed(bool allowed);
+
+  // Check whether the current thread is allowed to use singletons (Singleton /
+  // LazyInstance).  DCHECKs if not.
+  static void AssertSingletonAllowed();
+
+  // Disable waiting on the current thread. Threads start out in the *allowed*
+  // state. Returns the previous value.
+  //
+  // DEPRECATED. Use DisallowBaseSyncPrimitives.
+  static void DisallowWaiting();
+#else
+  // Inline the empty definitions of these functions so that they can be
+  // compiled out.
+  static bool SetIOAllowed(bool allowed) { return true; }
+  static bool SetSingletonAllowed(bool allowed) { return true; }
+  static void AssertSingletonAllowed() {}
+  static void DisallowWaiting() {}
+#endif
+
+ private:
+  // DO NOT ADD ANY OTHER FRIEND STATEMENTS, talk to jam or brettw first.
+  // BEGIN ALLOWED USAGE.
+  friend class android_webview::AwFormDatabaseService;
+  friend class android_webview::CookieManager;
+  friend class base::StackSamplingProfiler;
+  friend class content::BrowserMainLoop;
+  friend class content::BrowserShutdownProfileDumper;
+  friend class content::BrowserSurfaceViewManager;
+  friend class content::BrowserTestBase;
+  friend class content::NestedMessagePumpAndroid;
+  friend class content::ScopedAllowWaitForAndroidLayoutTests;
+  friend class content::ScopedAllowWaitForDebugURL;
+  friend class ::HistogramSynchronizer;
+  friend class internal::TaskTracker;
+  friend class cc::CompletionEvent;
+  friend class cc::SingleThreadTaskGraphRunner;
+  friend class content::CategorizedWorkerPool;
+  friend class remoting::AutoThread;
+  friend class ui::WindowResizeHelperMac;
+  friend class MessagePumpDefault;
+  friend class SimpleThread;
+  friend class Thread;
+  friend class ThreadTestHelper;
+  friend class PlatformThread;
+  friend class android::JavaHandlerThread;
+  friend class mojo::SyncCallRestrictions;
+  friend class mojo::edk::ScopedIPCSupport;
+  friend class ui::CommandBufferClientImpl;
+  friend class ui::CommandBufferLocal;
+  friend class ui::GpuState;
+
+  // END ALLOWED USAGE.
+  // BEGIN USAGE THAT NEEDS TO BE FIXED.
+  friend class ::chromeos::BlockingMethodCaller;  // http://crbug.com/125360
+  friend class ::chromeos::system::StatisticsProviderImpl;  // http://crbug.com/125385
+  friend class chrome_browser_net::Predictor;     // http://crbug.com/78451
+  friend class
+      content::BrowserGpuChannelHostFactory;      // http://crbug.com/125248
+  friend class
+      content::BrowserGpuMemoryBufferManager;     // http://crbug.com/420368
+  friend class content::TextInputClientMac;       // http://crbug.com/121917
+  friend class dbus::Bus;                         // http://crbug.com/125222
+  friend class disk_cache::BackendImpl;           // http://crbug.com/74623
+  friend class disk_cache::InFlightIO;            // http://crbug.com/74623
+  friend class gpu::GpuChannelHost;               // http://crbug.com/125264
+  friend class net::internal::AddressTrackerLinux;  // http://crbug.com/125097
+  friend class net::NetworkChangeNotifierMac;     // http://crbug.com/125097
+  friend class ::BrowserProcessImpl;              // http://crbug.com/125207
+  friend class ::NativeBackendKWallet;            // http://crbug.com/125331
+#if !defined(OFFICIAL_BUILD)
+  friend class content::SoftwareOutputDeviceMus;  // Interim non-production code
+#endif
+  friend class views::ScreenMus;
+  friend class viz::ServerGpuMemoryBufferManager;
+// END USAGE THAT NEEDS TO BE FIXED.
+
+#if DCHECK_IS_ON()
+  // DEPRECATED. Use ScopedAllowBaseSyncPrimitives.
+  static bool SetWaitAllowed(bool allowed);
+#else
+  static bool SetWaitAllowed(bool allowed) { return true; }
+#endif
+
+  // Constructing a ScopedAllowWait temporarily allows waiting on the current
+  // thread.  Doing this is almost always incorrect, which is why we limit who
+  // can use this through friend. If you find yourself needing to use this, find
+  // another way. Talk to jam or brettw.
+  //
+  // DEPRECATED. Use ScopedAllowBaseSyncPrimitives.
+  class BASE_EXPORT ScopedAllowWait {
+   public:
+    ScopedAllowWait() EMPTY_BODY_IF_DCHECK_IS_OFF;
+    ~ScopedAllowWait() EMPTY_BODY_IF_DCHECK_IS_OFF;
+
+   private:
+#if DCHECK_IS_ON()
+    const bool was_allowed_;
+#endif
+
+    DISALLOW_COPY_AND_ASSIGN(ScopedAllowWait);
+  };
+
+  DISALLOW_IMPLICIT_CONSTRUCTORS(ThreadRestrictions);
+};
+
+}  // namespace base
+
+#endif  // BASE_THREADING_THREAD_RESTRICTIONS_H_
diff --git a/base/threading/thread_restrictions_unittest.cc b/base/threading/thread_restrictions_unittest.cc
new file mode 100644
index 0000000..a957a9a
--- /dev/null
+++ b/base/threading/thread_restrictions_unittest.cc
@@ -0,0 +1,137 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/threading/thread_restrictions.h"
+
+#include <utility>
+
+#include "base/bind.h"
+#include "base/callback.h"
+#include "base/macros.h"
+#include "base/test/gtest_util.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+
+namespace {
+
+class ThreadRestrictionsTest : public testing::Test {
+ public:
+  ThreadRestrictionsTest() = default;
+  ~ThreadRestrictionsTest() override {
+    internal::ResetThreadRestrictionsForTesting();
+  }
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(ThreadRestrictionsTest);
+};
+
+}  // namespace
+
+TEST_F(ThreadRestrictionsTest, BlockingAllowedByDefault) {
+  AssertBlockingAllowed();
+}
+
+TEST_F(ThreadRestrictionsTest, ScopedDisallowBlocking) {
+  {
+    ScopedDisallowBlocking scoped_disallow_blocking;
+    EXPECT_DCHECK_DEATH({ AssertBlockingAllowed(); });
+  }
+  AssertBlockingAllowed();
+}
+
+TEST_F(ThreadRestrictionsTest, ScopedAllowBlocking) {
+  ScopedDisallowBlocking scoped_disallow_blocking;
+  {
+    ScopedAllowBlocking scoped_allow_blocking;
+    AssertBlockingAllowed();
+  }
+  EXPECT_DCHECK_DEATH({ AssertBlockingAllowed(); });
+}
+
+TEST_F(ThreadRestrictionsTest, ScopedAllowBlockingForTesting) {
+  ScopedDisallowBlocking scoped_disallow_blocking;
+  {
+    ScopedAllowBlockingForTesting scoped_allow_blocking_for_testing;
+    AssertBlockingAllowed();
+  }
+  EXPECT_DCHECK_DEATH({ AssertBlockingAllowed(); });
+}
+
+TEST_F(ThreadRestrictionsTest, BaseSyncPrimitivesAllowedByDefault) {}
+
+TEST_F(ThreadRestrictionsTest, DisallowBaseSyncPrimitives) {
+  DisallowBaseSyncPrimitives();
+  EXPECT_DCHECK_DEATH({ internal::AssertBaseSyncPrimitivesAllowed(); });
+}
+
+TEST_F(ThreadRestrictionsTest, ScopedAllowBaseSyncPrimitives) {
+  DisallowBaseSyncPrimitives();
+  ScopedAllowBaseSyncPrimitives scoped_allow_base_sync_primitives;
+  internal::AssertBaseSyncPrimitivesAllowed();
+}
+
+TEST_F(ThreadRestrictionsTest, ScopedAllowBaseSyncPrimitivesResetsState) {
+  DisallowBaseSyncPrimitives();
+  { ScopedAllowBaseSyncPrimitives scoped_allow_base_sync_primitives; }
+  EXPECT_DCHECK_DEATH({ internal::AssertBaseSyncPrimitivesAllowed(); });
+}
+
+TEST_F(ThreadRestrictionsTest,
+       ScopedAllowBaseSyncPrimitivesWithBlockingDisallowed) {
+  ScopedDisallowBlocking scoped_disallow_blocking;
+  DisallowBaseSyncPrimitives();
+
+  // This should DCHECK because blocking is not allowed in this scope
+  // and OutsideBlockingScope is not passed to the constructor.
+  EXPECT_DCHECK_DEATH(
+      { ScopedAllowBaseSyncPrimitives scoped_allow_base_sync_primitives; });
+}
+
+TEST_F(ThreadRestrictionsTest,
+       ScopedAllowBaseSyncPrimitivesOutsideBlockingScope) {
+  ScopedDisallowBlocking scoped_disallow_blocking;
+  DisallowBaseSyncPrimitives();
+  ScopedAllowBaseSyncPrimitivesOutsideBlockingScope
+      scoped_allow_base_sync_primitives;
+  internal::AssertBaseSyncPrimitivesAllowed();
+}
+
+TEST_F(ThreadRestrictionsTest,
+       ScopedAllowBaseSyncPrimitivesOutsideBlockingScopeResetsState) {
+  DisallowBaseSyncPrimitives();
+  {
+    ScopedAllowBaseSyncPrimitivesOutsideBlockingScope
+        scoped_allow_base_sync_primitives;
+  }
+  EXPECT_DCHECK_DEATH({ internal::AssertBaseSyncPrimitivesAllowed(); });
+}
+
+TEST_F(ThreadRestrictionsTest, ScopedAllowBaseSyncPrimitivesForTesting) {
+  DisallowBaseSyncPrimitives();
+  ScopedAllowBaseSyncPrimitivesForTesting
+      scoped_allow_base_sync_primitives_for_testing;
+  internal::AssertBaseSyncPrimitivesAllowed();
+}
+
+TEST_F(ThreadRestrictionsTest,
+       ScopedAllowBaseSyncPrimitivesForTestingResetsState) {
+  DisallowBaseSyncPrimitives();
+  {
+    ScopedAllowBaseSyncPrimitivesForTesting
+        scoped_allow_base_sync_primitives_for_testing;
+  }
+  EXPECT_DCHECK_DEATH({ internal::AssertBaseSyncPrimitivesAllowed(); });
+}
+
+TEST_F(ThreadRestrictionsTest,
+       ScopedAllowBaseSyncPrimitivesForTestingWithBlockingDisallowed) {
+  ScopedDisallowBlocking scoped_disallow_blocking;
+  DisallowBaseSyncPrimitives();
+  // This should not DCHECK.
+  ScopedAllowBaseSyncPrimitivesForTesting
+      scoped_allow_base_sync_primitives_for_testing;
+}
+
+}  // namespace base
diff --git a/base/threading/thread_task_runner_handle.cc b/base/threading/thread_task_runner_handle.cc
new file mode 100644
index 0000000..314b303
--- /dev/null
+++ b/base/threading/thread_task_runner_handle.cc
@@ -0,0 +1,106 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/threading/thread_task_runner_handle.h"
+
+#include <utility>
+
+#include "base/bind.h"
+#include "base/lazy_instance.h"
+#include "base/logging.h"
+#include "base/memory/ptr_util.h"
+#include "base/run_loop.h"
+#include "base/threading/sequenced_task_runner_handle.h"
+#include "base/threading/thread_local.h"
+
+namespace base {
+
+namespace {
+
+base::LazyInstance<base::ThreadLocalPointer<ThreadTaskRunnerHandle>>::Leaky
+    thread_task_runner_tls = LAZY_INSTANCE_INITIALIZER;
+
+}  // namespace
+
+// static
+scoped_refptr<SingleThreadTaskRunner> ThreadTaskRunnerHandle::Get() {
+  ThreadTaskRunnerHandle* current = thread_task_runner_tls.Pointer()->Get();
+  CHECK(current) << "Error: This caller requires a single-threaded context "
+                    "(i.e. the current task needs to run from a "
+                    "SingleThreadTaskRunner).";
+  return current->task_runner_;
+}
+
+// static
+bool ThreadTaskRunnerHandle::IsSet() {
+  return !!thread_task_runner_tls.Pointer()->Get();
+}
+
+// static
+ScopedClosureRunner ThreadTaskRunnerHandle::OverrideForTesting(
+    scoped_refptr<SingleThreadTaskRunner> overriding_task_runner) {
+  // OverrideForTesting() is not compatible with a SequencedTaskRunnerHandle
+  // being set (but SequencedTaskRunnerHandle::IsSet() includes
+  // ThreadTaskRunnerHandle::IsSet() so that's discounted as the only valid
+  // excuse for it to be true). Sadly this means that tests that merely need a
+  // SequencedTaskRunnerHandle on their main thread can be forced to use a
+  // ThreadTaskRunnerHandle if they're also using test task runners (that
+  // OverrideForTesting() when running their tasks from said main thread). To
+  // solve this: sequence_task_runner_handle.cc and thread_task_runner_handle.cc
+  // would have to be merged into a single impl file and share TLS state. This
+  // was deemed unecessary for now as most tests should use higher level
+  // constructs and not have to instantiate task runner handles on their own.
+  DCHECK(!SequencedTaskRunnerHandle::IsSet() || IsSet());
+
+  if (!IsSet()) {
+    auto top_level_ttrh = std::make_unique<ThreadTaskRunnerHandle>(
+        std::move(overriding_task_runner));
+    return ScopedClosureRunner(base::BindOnce(
+        [](std::unique_ptr<ThreadTaskRunnerHandle> ttrh_to_release) {},
+        std::move(top_level_ttrh)));
+  }
+
+  ThreadTaskRunnerHandle* ttrh = thread_task_runner_tls.Pointer()->Get();
+  // Swap the two (and below bind |overriding_task_runner|, which is now the
+  // previous one, as the |task_runner_to_restore|).
+  ttrh->task_runner_.swap(overriding_task_runner);
+
+  auto no_running_during_override =
+      std::make_unique<RunLoop::ScopedDisallowRunningForTesting>();
+
+  return ScopedClosureRunner(base::BindOnce(
+      [](scoped_refptr<SingleThreadTaskRunner> task_runner_to_restore,
+         SingleThreadTaskRunner* expected_task_runner_before_restore,
+         std::unique_ptr<RunLoop::ScopedDisallowRunningForTesting>
+             no_running_during_override) {
+        ThreadTaskRunnerHandle* ttrh = thread_task_runner_tls.Pointer()->Get();
+
+        DCHECK_EQ(expected_task_runner_before_restore, ttrh->task_runner_.get())
+            << "Nested overrides must expire their ScopedClosureRunners "
+               "in LIFO order.";
+
+        ttrh->task_runner_.swap(task_runner_to_restore);
+      },
+      std::move(overriding_task_runner),
+      base::Unretained(ttrh->task_runner_.get()),
+      std::move(no_running_during_override)));
+}
+
+ThreadTaskRunnerHandle::ThreadTaskRunnerHandle(
+    scoped_refptr<SingleThreadTaskRunner> task_runner)
+    : task_runner_(std::move(task_runner)) {
+  DCHECK(task_runner_->BelongsToCurrentThread());
+  // No SequencedTaskRunnerHandle (which includes ThreadTaskRunnerHandles)
+  // should already be set for this thread.
+  DCHECK(!SequencedTaskRunnerHandle::IsSet());
+  thread_task_runner_tls.Pointer()->Set(this);
+}
+
+ThreadTaskRunnerHandle::~ThreadTaskRunnerHandle() {
+  DCHECK(task_runner_->BelongsToCurrentThread());
+  DCHECK_EQ(thread_task_runner_tls.Pointer()->Get(), this);
+  thread_task_runner_tls.Pointer()->Set(nullptr);
+}
+
+}  // namespace base
diff --git a/base/threading/thread_task_runner_handle.h b/base/threading/thread_task_runner_handle.h
new file mode 100644
index 0000000..f6b71d7
--- /dev/null
+++ b/base/threading/thread_task_runner_handle.h
@@ -0,0 +1,57 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_THREADING_THREAD_TASK_RUNNER_HANDLE_H_
+#define BASE_THREADING_THREAD_TASK_RUNNER_HANDLE_H_
+
+#include "base/base_export.h"
+#include "base/callback_helpers.h"
+#include "base/compiler_specific.h"
+#include "base/macros.h"
+#include "base/memory/ref_counted.h"
+#include "base/single_thread_task_runner.h"
+
+namespace base {
+
+// ThreadTaskRunnerHandle stores a reference to a thread's TaskRunner
+// in thread-local storage.  Callers can then retrieve the TaskRunner
+// for the current thread by calling ThreadTaskRunnerHandle::Get().
+// At most one TaskRunner may be bound to each thread at a time.
+// Prefer SequencedTaskRunnerHandle to this unless thread affinity is required.
+class BASE_EXPORT ThreadTaskRunnerHandle {
+ public:
+  // Gets the SingleThreadTaskRunner for the current thread.
+  static scoped_refptr<SingleThreadTaskRunner> Get();
+
+  // Returns true if the SingleThreadTaskRunner is already created for
+  // the current thread.
+  static bool IsSet();
+
+  // Overrides ThreadTaskRunnerHandle::Get()'s |task_runner_| to point at
+  // |overriding_task_runner| until the returned ScopedClosureRunner goes out of
+  // scope (instantiates a ThreadTaskRunnerHandle for that scope if |!IsSet()|).
+  // Nested overrides are allowed but callers must ensure the
+  // ScopedClosureRunners expire in LIFO (stack) order. Note: nesting
+  // ThreadTaskRunnerHandles isn't generally desired but it's useful in unit
+  // tests where multiple task runners can share the main thread for simplicity
+  // and determinism.
+  static ScopedClosureRunner OverrideForTesting(
+      scoped_refptr<SingleThreadTaskRunner> overriding_task_runner)
+      WARN_UNUSED_RESULT;
+
+  // Binds |task_runner| to the current thread. |task_runner| must belong
+  // to the current thread for this to succeed.
+  explicit ThreadTaskRunnerHandle(
+      scoped_refptr<SingleThreadTaskRunner> task_runner);
+  ~ThreadTaskRunnerHandle();
+
+ private:
+  scoped_refptr<SingleThreadTaskRunner> task_runner_;
+
+  DISALLOW_COPY_AND_ASSIGN(ThreadTaskRunnerHandle);
+};
+
+}  // namespace base
+
+#endif  // BASE_THREADING_THREAD_TASK_RUNNER_HANDLE_H_
diff --git a/base/threading/thread_task_runner_handle_unittest.cc b/base/threading/thread_task_runner_handle_unittest.cc
new file mode 100644
index 0000000..1aa02d1
--- /dev/null
+++ b/base/threading/thread_task_runner_handle_unittest.cc
@@ -0,0 +1,122 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/threading/thread_task_runner_handle.h"
+
+#include "base/memory/ref_counted.h"
+#include "base/test/gtest_util.h"
+#include "base/test/test_simple_task_runner.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+
+TEST(ThreadTaskRunnerHandleTest, Basic) {
+  scoped_refptr<SingleThreadTaskRunner> task_runner(new TestSimpleTaskRunner);
+
+  EXPECT_FALSE(ThreadTaskRunnerHandle::IsSet());
+  {
+    ThreadTaskRunnerHandle ttrh1(task_runner);
+    EXPECT_TRUE(ThreadTaskRunnerHandle::IsSet());
+    EXPECT_EQ(task_runner, ThreadTaskRunnerHandle::Get());
+  }
+  EXPECT_FALSE(ThreadTaskRunnerHandle::IsSet());
+}
+
+TEST(ThreadTaskRunnerHandleTest, DeathOnImplicitOverride) {
+  scoped_refptr<SingleThreadTaskRunner> task_runner(new TestSimpleTaskRunner);
+  scoped_refptr<SingleThreadTaskRunner> overidding_task_runner(
+      new TestSimpleTaskRunner);
+
+  ThreadTaskRunnerHandle ttrh(task_runner);
+  EXPECT_DCHECK_DEATH(
+      { ThreadTaskRunnerHandle overriding_ttrh(overidding_task_runner); });
+}
+
+TEST(ThreadTaskRunnerHandleTest, OverrideForTestingExistingTTRH) {
+  scoped_refptr<SingleThreadTaskRunner> task_runner_1(new TestSimpleTaskRunner);
+  scoped_refptr<SingleThreadTaskRunner> task_runner_2(new TestSimpleTaskRunner);
+  scoped_refptr<SingleThreadTaskRunner> task_runner_3(new TestSimpleTaskRunner);
+  scoped_refptr<SingleThreadTaskRunner> task_runner_4(new TestSimpleTaskRunner);
+
+  EXPECT_FALSE(ThreadTaskRunnerHandle::IsSet());
+  {
+    // TTRH in place prior to override.
+    ThreadTaskRunnerHandle ttrh1(task_runner_1);
+    EXPECT_TRUE(ThreadTaskRunnerHandle::IsSet());
+    EXPECT_EQ(task_runner_1, ThreadTaskRunnerHandle::Get());
+
+    {
+      // Override.
+      ScopedClosureRunner undo_override_2 =
+          ThreadTaskRunnerHandle::OverrideForTesting(task_runner_2);
+      EXPECT_TRUE(ThreadTaskRunnerHandle::IsSet());
+      EXPECT_EQ(task_runner_2, ThreadTaskRunnerHandle::Get());
+
+      {
+        // Nested override.
+        ScopedClosureRunner undo_override_3 =
+            ThreadTaskRunnerHandle::OverrideForTesting(task_runner_3);
+        EXPECT_TRUE(ThreadTaskRunnerHandle::IsSet());
+        EXPECT_EQ(task_runner_3, ThreadTaskRunnerHandle::Get());
+      }
+
+      // Back to single override.
+      EXPECT_TRUE(ThreadTaskRunnerHandle::IsSet());
+      EXPECT_EQ(task_runner_2, ThreadTaskRunnerHandle::Get());
+
+      {
+        // Backup to double override with another TTRH.
+        ScopedClosureRunner undo_override_4 =
+            ThreadTaskRunnerHandle::OverrideForTesting(task_runner_4);
+        EXPECT_TRUE(ThreadTaskRunnerHandle::IsSet());
+        EXPECT_EQ(task_runner_4, ThreadTaskRunnerHandle::Get());
+      }
+    }
+
+    // Back to simple TTRH.
+    EXPECT_TRUE(ThreadTaskRunnerHandle::IsSet());
+    EXPECT_EQ(task_runner_1, ThreadTaskRunnerHandle::Get());
+  }
+  EXPECT_FALSE(ThreadTaskRunnerHandle::IsSet());
+}
+
+TEST(ThreadTaskRunnerHandleTest, OverrideForTestingNoExistingTTRH) {
+  scoped_refptr<SingleThreadTaskRunner> task_runner_1(new TestSimpleTaskRunner);
+  scoped_refptr<SingleThreadTaskRunner> task_runner_2(new TestSimpleTaskRunner);
+
+  EXPECT_FALSE(ThreadTaskRunnerHandle::IsSet());
+  {
+    // Override with no TTRH in place.
+    ScopedClosureRunner undo_override_1 =
+        ThreadTaskRunnerHandle::OverrideForTesting(task_runner_1);
+    EXPECT_TRUE(ThreadTaskRunnerHandle::IsSet());
+    EXPECT_EQ(task_runner_1, ThreadTaskRunnerHandle::Get());
+
+    {
+      // Nested override works the same.
+      ScopedClosureRunner undo_override_2 =
+          ThreadTaskRunnerHandle::OverrideForTesting(task_runner_2);
+      EXPECT_TRUE(ThreadTaskRunnerHandle::IsSet());
+      EXPECT_EQ(task_runner_2, ThreadTaskRunnerHandle::Get());
+    }
+
+    // Back to single override.
+    EXPECT_TRUE(ThreadTaskRunnerHandle::IsSet());
+    EXPECT_EQ(task_runner_1, ThreadTaskRunnerHandle::Get());
+  }
+  EXPECT_FALSE(ThreadTaskRunnerHandle::IsSet());
+}
+
+TEST(ThreadTaskRunnerHandleTest, DeathOnTTRHOverOverride) {
+  scoped_refptr<SingleThreadTaskRunner> task_runner(new TestSimpleTaskRunner);
+  scoped_refptr<SingleThreadTaskRunner> overidding_task_runner(
+      new TestSimpleTaskRunner);
+
+  ScopedClosureRunner undo_override =
+      ThreadTaskRunnerHandle::OverrideForTesting(task_runner);
+  EXPECT_DCHECK_DEATH(
+      { ThreadTaskRunnerHandle overriding_ttrh(overidding_task_runner); });
+}
+
+}  // namespace base
diff --git a/base/threading/thread_unittest.cc b/base/threading/thread_unittest.cc
new file mode 100644
index 0000000..d90b1f9
--- /dev/null
+++ b/base/threading/thread_unittest.cc
@@ -0,0 +1,579 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/threading/thread.h"
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <utility>
+#include <vector>
+
+#include "base/bind.h"
+#include "base/debug/leak_annotations.h"
+#include "base/macros.h"
+#include "base/memory/ptr_util.h"
+#include "base/message_loop/message_loop.h"
+#include "base/message_loop/message_loop_current.h"
+#include "base/run_loop.h"
+#include "base/single_thread_task_runner.h"
+#include "base/synchronization/waitable_event.h"
+#include "base/test/gtest_util.h"
+#include "base/third_party/dynamic_annotations/dynamic_annotations.h"
+#include "base/threading/platform_thread.h"
+#include "base/time/time.h"
+#include "build/build_config.h"
+#include "testing/gtest/include/gtest/gtest.h"
+#include "testing/platform_test.h"
+
+using base::Thread;
+
+typedef PlatformTest ThreadTest;
+
+namespace {
+
+void ToggleValue(bool* value) {
+  ANNOTATE_BENIGN_RACE(value, "Test-only data race on boolean "
+                       "in base/thread_unittest");
+  *value = !*value;
+}
+
+class SleepInsideInitThread : public Thread {
+ public:
+  SleepInsideInitThread() : Thread("none") {
+    init_called_ = false;
+    ANNOTATE_BENIGN_RACE(
+        this, "Benign test-only data race on vptr - http://crbug.com/98219");
+  }
+  ~SleepInsideInitThread() override { Stop(); }
+
+  void Init() override {
+    base::PlatformThread::Sleep(base::TimeDelta::FromMilliseconds(500));
+    init_called_ = true;
+  }
+  bool InitCalled() { return init_called_; }
+
+ private:
+  bool init_called_;
+
+  DISALLOW_COPY_AND_ASSIGN(SleepInsideInitThread);
+};
+
+enum ThreadEvent {
+  // Thread::Init() was called.
+  THREAD_EVENT_INIT = 0,
+
+  // The MessageLoop for the thread was deleted.
+  THREAD_EVENT_MESSAGE_LOOP_DESTROYED,
+
+  // Thread::CleanUp() was called.
+  THREAD_EVENT_CLEANUP,
+
+  // Keep at end of list.
+  THREAD_NUM_EVENTS
+};
+
+typedef std::vector<ThreadEvent> EventList;
+
+class CaptureToEventList : public Thread {
+ public:
+  // This Thread pushes events into the vector |event_list| to show
+  // the order they occured in. |event_list| must remain valid for the
+  // lifetime of this thread.
+  explicit CaptureToEventList(EventList* event_list)
+      : Thread("none"),
+        event_list_(event_list) {
+  }
+
+  ~CaptureToEventList() override { Stop(); }
+
+  void Init() override { event_list_->push_back(THREAD_EVENT_INIT); }
+
+  void CleanUp() override { event_list_->push_back(THREAD_EVENT_CLEANUP); }
+
+ private:
+  EventList* event_list_;
+
+  DISALLOW_COPY_AND_ASSIGN(CaptureToEventList);
+};
+
+// Observer that writes a value into |event_list| when a message loop has been
+// destroyed.
+class CapturingDestructionObserver
+    : public base::MessageLoopCurrent::DestructionObserver {
+ public:
+  // |event_list| must remain valid throughout the observer's lifetime.
+  explicit CapturingDestructionObserver(EventList* event_list)
+      : event_list_(event_list) {
+  }
+
+  // DestructionObserver implementation:
+  void WillDestroyCurrentMessageLoop() override {
+    event_list_->push_back(THREAD_EVENT_MESSAGE_LOOP_DESTROYED);
+    event_list_ = nullptr;
+  }
+
+ private:
+  EventList* event_list_;
+
+  DISALLOW_COPY_AND_ASSIGN(CapturingDestructionObserver);
+};
+
+// Task that adds a destruction observer to the current message loop.
+void RegisterDestructionObserver(
+    base::MessageLoopCurrent::DestructionObserver* observer) {
+  base::MessageLoopCurrent::Get()->AddDestructionObserver(observer);
+}
+
+// Task that calls GetThreadId() of |thread|, stores the result into |id|, then
+// signal |event|.
+void ReturnThreadId(base::Thread* thread,
+                    base::PlatformThreadId* id,
+                    base::WaitableEvent* event) {
+  *id = thread->GetThreadId();
+  event->Signal();
+}
+
+}  // namespace
+
+TEST_F(ThreadTest, StartWithOptions_StackSize) {
+  Thread a("StartWithStackSize");
+  // Ensure that the thread can work with only 12 kb and still process a
+  // message. At the same time, we should scale with the bitness of the system
+  // where 12 kb is definitely not enough.
+  // 12 kb = 3072 Slots on a 32-bit system, so we'll scale based off of that.
+  Thread::Options options;
+#if defined(ADDRESS_SANITIZER) || !defined(NDEBUG)
+  // ASan bloats the stack variables and overflows the 3072 slot stack. Some
+  // debug builds also grow the stack too much.
+  options.stack_size = 2 * 3072 * sizeof(uintptr_t);
+#else
+  options.stack_size = 3072 * sizeof(uintptr_t);
+#endif
+  EXPECT_TRUE(a.StartWithOptions(options));
+  EXPECT_TRUE(a.message_loop());
+  EXPECT_TRUE(a.IsRunning());
+
+  base::WaitableEvent event(base::WaitableEvent::ResetPolicy::AUTOMATIC,
+                            base::WaitableEvent::InitialState::NOT_SIGNALED);
+  a.task_runner()->PostTask(
+      FROM_HERE,
+      base::BindOnce(&base::WaitableEvent::Signal, base::Unretained(&event)));
+  event.Wait();
+}
+
+// Intentional test-only race for otherwise untestable code, won't fix.
+// https://crbug.com/634383
+#if !defined(THREAD_SANITIZER)
+TEST_F(ThreadTest, StartWithOptions_NonJoinable) {
+  Thread* a = new Thread("StartNonJoinable");
+  // Non-joinable threads have to be leaked for now (see
+  // Thread::Options::joinable for details).
+  ANNOTATE_LEAKING_OBJECT_PTR(a);
+
+  Thread::Options options;
+  options.joinable = false;
+  EXPECT_TRUE(a->StartWithOptions(options));
+  EXPECT_TRUE(a->message_loop());
+  EXPECT_TRUE(a->IsRunning());
+
+  // Without this call this test is racy. The above IsRunning() succeeds because
+  // of an early-return condition while between Start() and StopSoon(), after
+  // invoking StopSoon() below this early-return condition is no longer
+  // satisfied and the real |is_running_| bit has to be checked. It could still
+  // be false if the message loop hasn't started for real in practice. This is
+  // only a requirement for this test because the non-joinable property forces
+  // it to use StopSoon() and not wait for a complete Stop().
+  EXPECT_TRUE(a->WaitUntilThreadStarted());
+
+  // Make the thread block until |block_event| is signaled.
+  base::WaitableEvent block_event(
+      base::WaitableEvent::ResetPolicy::AUTOMATIC,
+      base::WaitableEvent::InitialState::NOT_SIGNALED);
+  a->task_runner()->PostTask(FROM_HERE,
+                             base::BindOnce(&base::WaitableEvent::Wait,
+                                            base::Unretained(&block_event)));
+
+  a->StopSoon();
+  EXPECT_TRUE(a->IsRunning());
+
+  // Unblock the task and give a bit of extra time to unwind QuitWhenIdle().
+  block_event.Signal();
+  base::PlatformThread::Sleep(base::TimeDelta::FromMilliseconds(20));
+
+  // The thread should now have stopped on its own.
+  EXPECT_FALSE(a->IsRunning());
+}
+#endif
+
+TEST_F(ThreadTest, TwoTasksOnJoinableThread) {
+  bool was_invoked = false;
+  {
+    Thread a("TwoTasksOnJoinableThread");
+    EXPECT_TRUE(a.Start());
+    EXPECT_TRUE(a.message_loop());
+
+    // Test that all events are dispatched before the Thread object is
+    // destroyed.  We do this by dispatching a sleep event before the
+    // event that will toggle our sentinel value.
+    a.task_runner()->PostTask(
+        FROM_HERE, base::BindOnce(static_cast<void (*)(base::TimeDelta)>(
+                                      &base::PlatformThread::Sleep),
+                                  base::TimeDelta::FromMilliseconds(20)));
+    a.task_runner()->PostTask(FROM_HERE,
+                              base::BindOnce(&ToggleValue, &was_invoked));
+  }
+  EXPECT_TRUE(was_invoked);
+}
+
+TEST_F(ThreadTest, DestroyWhileRunningIsSafe) {
+  Thread a("DestroyWhileRunningIsSafe");
+  EXPECT_TRUE(a.Start());
+  EXPECT_TRUE(a.WaitUntilThreadStarted());
+}
+
+// TODO(gab): Enable this test when destroying a non-joinable Thread instance
+// is supported (proposal @ https://crbug.com/629139#c14).
+TEST_F(ThreadTest, DISABLED_DestroyWhileRunningNonJoinableIsSafe) {
+  {
+    Thread a("DestroyWhileRunningNonJoinableIsSafe");
+    Thread::Options options;
+    options.joinable = false;
+    EXPECT_TRUE(a.StartWithOptions(options));
+    EXPECT_TRUE(a.WaitUntilThreadStarted());
+  }
+
+  // Attempt to catch use-after-frees from the non-joinable thread in the
+  // scope of this test if any.
+  base::PlatformThread::Sleep(base::TimeDelta::FromMilliseconds(20));
+}
+
+TEST_F(ThreadTest, StopSoon) {
+  Thread a("StopSoon");
+  EXPECT_TRUE(a.Start());
+  EXPECT_TRUE(a.message_loop());
+  EXPECT_TRUE(a.IsRunning());
+  a.StopSoon();
+  a.Stop();
+  EXPECT_FALSE(a.message_loop());
+  EXPECT_FALSE(a.IsRunning());
+}
+
+TEST_F(ThreadTest, StopTwiceNop) {
+  Thread a("StopTwiceNop");
+  EXPECT_TRUE(a.Start());
+  EXPECT_TRUE(a.message_loop());
+  EXPECT_TRUE(a.IsRunning());
+  a.StopSoon();
+  // Calling StopSoon() a second time should be a nop.
+  a.StopSoon();
+  a.Stop();
+  // Same with Stop().
+  a.Stop();
+  EXPECT_FALSE(a.message_loop());
+  EXPECT_FALSE(a.IsRunning());
+  // Calling them when not running should also nop.
+  a.StopSoon();
+  a.Stop();
+}
+
+// TODO(gab): Enable this test in conjunction with re-enabling the sequence
+// check in Thread::Stop() as part of http://crbug.com/629139.
+TEST_F(ThreadTest, DISABLED_StopOnNonOwningThreadIsDeath) {
+  Thread a("StopOnNonOwningThreadDeath");
+  EXPECT_TRUE(a.StartAndWaitForTesting());
+
+  Thread b("NonOwningThread");
+  b.Start();
+  EXPECT_DCHECK_DEATH({
+    // Stopping |a| on |b| isn't allowed.
+    b.task_runner()->PostTask(
+        FROM_HERE, base::BindOnce(&Thread::Stop, base::Unretained(&a)));
+    // Block here so the DCHECK on |b| always happens in this scope.
+    base::PlatformThread::Sleep(base::TimeDelta::Max());
+  });
+}
+
+TEST_F(ThreadTest, TransferOwnershipAndStop) {
+  std::unique_ptr<Thread> a =
+      std::make_unique<Thread>("TransferOwnershipAndStop");
+  EXPECT_TRUE(a->StartAndWaitForTesting());
+  EXPECT_TRUE(a->IsRunning());
+
+  Thread b("TakingOwnershipThread");
+  b.Start();
+
+  base::WaitableEvent event(base::WaitableEvent::ResetPolicy::MANUAL,
+                            base::WaitableEvent::InitialState::NOT_SIGNALED);
+
+  // a->DetachFromSequence() should allow |b| to use |a|'s Thread API.
+  a->DetachFromSequence();
+  b.task_runner()->PostTask(
+      FROM_HERE, base::BindOnce(
+                     [](std::unique_ptr<Thread> thread_to_stop,
+                        base::WaitableEvent* event_to_signal) -> void {
+                       thread_to_stop->Stop();
+                       event_to_signal->Signal();
+                     },
+                     std::move(a), base::Unretained(&event)));
+
+  event.Wait();
+}
+
+TEST_F(ThreadTest, StartTwice) {
+  Thread a("StartTwice");
+
+  EXPECT_FALSE(a.message_loop());
+  EXPECT_FALSE(a.IsRunning());
+
+  EXPECT_TRUE(a.Start());
+  EXPECT_TRUE(a.message_loop());
+  EXPECT_TRUE(a.IsRunning());
+
+  a.Stop();
+  EXPECT_FALSE(a.message_loop());
+  EXPECT_FALSE(a.IsRunning());
+
+  EXPECT_TRUE(a.Start());
+  EXPECT_TRUE(a.message_loop());
+  EXPECT_TRUE(a.IsRunning());
+
+  a.Stop();
+  EXPECT_FALSE(a.message_loop());
+  EXPECT_FALSE(a.IsRunning());
+}
+
+// Intentional test-only race for otherwise untestable code, won't fix.
+// https://crbug.com/634383
+#if !defined(THREAD_SANITIZER)
+TEST_F(ThreadTest, StartTwiceNonJoinableNotAllowed) {
+  LOG(ERROR) << __FUNCTION__;
+  Thread* a = new Thread("StartTwiceNonJoinable");
+  // Non-joinable threads have to be leaked for now (see
+  // Thread::Options::joinable for details).
+  ANNOTATE_LEAKING_OBJECT_PTR(a);
+
+  Thread::Options options;
+  options.joinable = false;
+  EXPECT_TRUE(a->StartWithOptions(options));
+  EXPECT_TRUE(a->message_loop());
+  EXPECT_TRUE(a->IsRunning());
+
+  // Signaled when last task on |a| is processed.
+  base::WaitableEvent last_task_event(
+      base::WaitableEvent::ResetPolicy::AUTOMATIC,
+      base::WaitableEvent::InitialState::NOT_SIGNALED);
+  a->task_runner()->PostTask(
+      FROM_HERE, base::BindOnce(&base::WaitableEvent::Signal,
+                                base::Unretained(&last_task_event)));
+
+  // StopSoon() is non-blocking, Yield() to |a|, wait for last task to be
+  // processed and a little more for QuitWhenIdle() to unwind before considering
+  // the thread "stopped".
+  a->StopSoon();
+  base::PlatformThread::YieldCurrentThread();
+  last_task_event.Wait();
+  base::PlatformThread::Sleep(base::TimeDelta::FromMilliseconds(20));
+
+  // This test assumes that the above was sufficient to let the thread fully
+  // stop.
+  ASSERT_FALSE(a->IsRunning());
+
+  // Restarting it should not be allowed.
+  EXPECT_DCHECK_DEATH(a->Start());
+}
+#endif
+
+TEST_F(ThreadTest, ThreadName) {
+  Thread a("ThreadName");
+  EXPECT_TRUE(a.Start());
+  EXPECT_EQ("ThreadName", a.thread_name());
+}
+
+TEST_F(ThreadTest, ThreadId) {
+  Thread a("ThreadId0");
+  Thread b("ThreadId1");
+  a.Start();
+  b.Start();
+
+  // Post a task that calls GetThreadId() on the created thread.
+  base::WaitableEvent event(base::WaitableEvent::ResetPolicy::AUTOMATIC,
+                            base::WaitableEvent::InitialState::NOT_SIGNALED);
+  base::PlatformThreadId id_from_new_thread;
+  a.task_runner()->PostTask(
+      FROM_HERE,
+      base::BindOnce(ReturnThreadId, &a, &id_from_new_thread, &event));
+
+  // Call GetThreadId() on the current thread before calling event.Wait() so
+  // that this test can find a race issue with TSAN.
+  base::PlatformThreadId id_from_current_thread = a.GetThreadId();
+
+  // Check if GetThreadId() returns consistent value in both threads.
+  event.Wait();
+  EXPECT_EQ(id_from_current_thread, id_from_new_thread);
+
+  // A started thread should have a valid ID.
+  EXPECT_NE(base::kInvalidThreadId, a.GetThreadId());
+  EXPECT_NE(base::kInvalidThreadId, b.GetThreadId());
+
+  // Each thread should have a different thread ID.
+  EXPECT_NE(a.GetThreadId(), b.GetThreadId());
+}
+
+TEST_F(ThreadTest, ThreadIdWithRestart) {
+  Thread a("ThreadIdWithRestart");
+  base::PlatformThreadId previous_id = base::kInvalidThreadId;
+
+  for (size_t i = 0; i < 16; ++i) {
+    EXPECT_TRUE(a.Start());
+    base::PlatformThreadId current_id = a.GetThreadId();
+    EXPECT_NE(previous_id, current_id);
+    previous_id = current_id;
+    a.Stop();
+  }
+}
+
+// Make sure Init() is called after Start() and before
+// WaitUntilThreadInitialized() returns.
+TEST_F(ThreadTest, SleepInsideInit) {
+  SleepInsideInitThread t;
+  EXPECT_FALSE(t.InitCalled());
+  t.StartAndWaitForTesting();
+  EXPECT_TRUE(t.InitCalled());
+}
+
+// Make sure that the destruction sequence is:
+//
+//  (1) Thread::CleanUp()
+//  (2) MessageLoop::~MessageLoop()
+//      MessageLoopCurrent::DestructionObservers called.
+TEST_F(ThreadTest, CleanUp) {
+  EventList captured_events;
+  CapturingDestructionObserver loop_destruction_observer(&captured_events);
+
+  {
+    // Start a thread which writes its event into |captured_events|.
+    CaptureToEventList t(&captured_events);
+    EXPECT_TRUE(t.Start());
+    EXPECT_TRUE(t.message_loop());
+    EXPECT_TRUE(t.IsRunning());
+
+    // Register an observer that writes into |captured_events| once the
+    // thread's message loop is destroyed.
+    t.task_runner()->PostTask(
+        FROM_HERE,
+        base::BindOnce(&RegisterDestructionObserver,
+                       base::Unretained(&loop_destruction_observer)));
+
+    // Upon leaving this scope, the thread is deleted.
+  }
+
+  // Check the order of events during shutdown.
+  ASSERT_EQ(static_cast<size_t>(THREAD_NUM_EVENTS), captured_events.size());
+  EXPECT_EQ(THREAD_EVENT_INIT, captured_events[0]);
+  EXPECT_EQ(THREAD_EVENT_CLEANUP, captured_events[1]);
+  EXPECT_EQ(THREAD_EVENT_MESSAGE_LOOP_DESTROYED, captured_events[2]);
+}
+
+TEST_F(ThreadTest, ThreadNotStarted) {
+  Thread a("Inert");
+  EXPECT_FALSE(a.task_runner());
+}
+
+TEST_F(ThreadTest, MultipleWaitUntilThreadStarted) {
+  Thread a("MultipleWaitUntilThreadStarted");
+  EXPECT_TRUE(a.Start());
+  // It's OK to call WaitUntilThreadStarted() multiple times.
+  EXPECT_TRUE(a.WaitUntilThreadStarted());
+  EXPECT_TRUE(a.WaitUntilThreadStarted());
+}
+
+TEST_F(ThreadTest, FlushForTesting) {
+  Thread a("FlushForTesting");
+
+  // Flushing a non-running thread should be a no-op.
+  a.FlushForTesting();
+
+  ASSERT_TRUE(a.Start());
+
+  // Flushing a thread with no tasks shouldn't block.
+  a.FlushForTesting();
+
+  constexpr base::TimeDelta kSleepPerTestTask =
+      base::TimeDelta::FromMilliseconds(50);
+  constexpr size_t kNumSleepTasks = 5;
+
+  const base::TimeTicks ticks_before_post = base::TimeTicks::Now();
+
+  for (size_t i = 0; i < kNumSleepTasks; ++i) {
+    a.task_runner()->PostTask(
+        FROM_HERE,
+        base::BindOnce(&base::PlatformThread::Sleep, kSleepPerTestTask));
+  }
+
+  // All tasks should have executed, as reflected by the elapsed time.
+  a.FlushForTesting();
+  EXPECT_GE(base::TimeTicks::Now() - ticks_before_post,
+            kNumSleepTasks * kSleepPerTestTask);
+
+  a.Stop();
+
+  // Flushing a stopped thread should be a no-op.
+  a.FlushForTesting();
+}
+
+namespace {
+
+// A Thread which uses a MessageLoop on the stack. It won't start a real
+// underlying thread (instead its messages can be processed by a RunLoop on the
+// stack).
+class ExternalMessageLoopThread : public Thread {
+ public:
+  ExternalMessageLoopThread() : Thread("ExternalMessageLoopThread") {}
+
+  ~ExternalMessageLoopThread() override { Stop(); }
+
+  void InstallMessageLoop() { SetMessageLoop(&external_message_loop_); }
+
+  void VerifyUsingExternalMessageLoop(
+      bool expected_using_external_message_loop) {
+    EXPECT_EQ(expected_using_external_message_loop,
+              using_external_message_loop());
+  }
+
+ private:
+  base::MessageLoop external_message_loop_;
+
+  DISALLOW_COPY_AND_ASSIGN(ExternalMessageLoopThread);
+};
+
+}  // namespace
+
+TEST_F(ThreadTest, ExternalMessageLoop) {
+  ExternalMessageLoopThread a;
+  EXPECT_FALSE(a.message_loop());
+  EXPECT_FALSE(a.IsRunning());
+  a.VerifyUsingExternalMessageLoop(false);
+
+  a.InstallMessageLoop();
+  EXPECT_TRUE(a.message_loop());
+  EXPECT_TRUE(a.IsRunning());
+  a.VerifyUsingExternalMessageLoop(true);
+
+  bool ran = false;
+  a.task_runner()->PostTask(
+      FROM_HERE, base::BindOnce([](bool* toggled) { *toggled = true; }, &ran));
+  base::RunLoop().RunUntilIdle();
+  EXPECT_TRUE(ran);
+
+  a.Stop();
+  EXPECT_FALSE(a.message_loop());
+  EXPECT_FALSE(a.IsRunning());
+  a.VerifyUsingExternalMessageLoop(true);
+
+  // Confirm that running any remaining tasks posted from Stop() goes smoothly
+  // (e.g. https://codereview.chromium.org/2135413003/#ps300001 crashed if
+  // StopSoon() posted Thread::ThreadQuitHelper() while |run_loop_| was null).
+  base::RunLoop().RunUntilIdle();
+}
diff --git a/base/threading/watchdog.cc b/base/threading/watchdog.cc
new file mode 100644
index 0000000..6c384b1
--- /dev/null
+++ b/base/threading/watchdog.cc
@@ -0,0 +1,187 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/threading/watchdog.h"
+
+#include "base/compiler_specific.h"
+#include "base/logging.h"
+#include "base/threading/platform_thread.h"
+
+namespace base {
+
+namespace {
+
+// When the debugger breaks (when we alarm), all the other alarms that are
+// armed will expire (also alarm).  To diminish this effect, we track any
+// delay due to debugger breaks, and we *try* to adjust the effective start
+// time of other alarms to step past the debugging break.
+// Without this safety net, any alarm will typically trigger a host of follow
+// on alarms from callers that specify old times.
+
+struct StaticData {
+  // Lock for access of static data...
+  Lock lock;
+
+  // When did we last alarm and get stuck (for a while) in a debugger?
+  TimeTicks last_debugged_alarm_time;
+
+  // How long did we sit on a break in the debugger?
+  TimeDelta last_debugged_alarm_delay;
+};
+
+StaticData* GetStaticData() {
+  static auto* static_data = new StaticData();
+  return static_data;
+}
+
+}  // namespace
+
+// Start thread running in a Disarmed state.
+Watchdog::Watchdog(const TimeDelta& duration,
+                   const std::string& thread_watched_name,
+                   bool enabled)
+  : enabled_(enabled),
+    lock_(),
+    condition_variable_(&lock_),
+    state_(DISARMED),
+    duration_(duration),
+    thread_watched_name_(thread_watched_name),
+    delegate_(this) {
+  if (!enabled_)
+    return;  // Don't start thread, or doing anything really.
+  enabled_ = PlatformThread::Create(0,  // Default stack size.
+                                    &delegate_,
+                                    &handle_);
+  DCHECK(enabled_);
+}
+
+// Notify watchdog thread, and wait for it to finish up.
+Watchdog::~Watchdog() {
+  if (!enabled_)
+    return;
+  if (!IsJoinable())
+    Cleanup();
+  condition_variable_.Signal();
+  PlatformThread::Join(handle_);
+}
+
+void Watchdog::Cleanup() {
+  if (!enabled_)
+    return;
+  {
+    AutoLock lock(lock_);
+    state_ = SHUTDOWN;
+  }
+  condition_variable_.Signal();
+}
+
+bool Watchdog::IsJoinable() {
+  if (!enabled_)
+    return true;
+  AutoLock lock(lock_);
+  return (state_ == JOINABLE);
+}
+
+void Watchdog::Arm() {
+  ArmAtStartTime(TimeTicks::Now());
+}
+
+void Watchdog::ArmSomeTimeDeltaAgo(const TimeDelta& time_delta) {
+  ArmAtStartTime(TimeTicks::Now() - time_delta);
+}
+
+// Start clock for watchdog.
+void Watchdog::ArmAtStartTime(const TimeTicks start_time) {
+  {
+    AutoLock lock(lock_);
+    start_time_ = start_time;
+    state_ = ARMED;
+  }
+  // Force watchdog to wake up, and go to sleep with the timer ticking with the
+  // proper duration.
+  condition_variable_.Signal();
+}
+
+// Disable watchdog so that it won't do anything when time expires.
+void Watchdog::Disarm() {
+  AutoLock lock(lock_);
+  state_ = DISARMED;
+  // We don't need to signal, as the watchdog will eventually wake up, and it
+  // will check its state and time, and act accordingly.
+}
+
+void Watchdog::Alarm() {
+  DVLOG(1) << "Watchdog alarmed for " << thread_watched_name_;
+}
+
+//------------------------------------------------------------------------------
+// Internal private methods that the watchdog thread uses.
+
+void Watchdog::ThreadDelegate::ThreadMain() {
+  SetThreadName();
+  TimeDelta remaining_duration;
+  StaticData* static_data = GetStaticData();
+  while (1) {
+    AutoLock lock(watchdog_->lock_);
+    while (DISARMED == watchdog_->state_)
+      watchdog_->condition_variable_.Wait();
+    if (SHUTDOWN == watchdog_->state_) {
+      watchdog_->state_ = JOINABLE;
+      return;
+    }
+    DCHECK(ARMED == watchdog_->state_);
+    remaining_duration = watchdog_->duration_ -
+        (TimeTicks::Now() - watchdog_->start_time_);
+    if (remaining_duration.InMilliseconds() > 0) {
+      // Spurios wake?  Timer drifts?  Go back to sleep for remaining time.
+      watchdog_->condition_variable_.TimedWait(remaining_duration);
+      continue;
+    }
+    // We overslept, so this seems like a real alarm.
+    // Watch out for a user that stopped the debugger on a different alarm!
+    {
+      AutoLock static_lock(static_data->lock);
+      if (static_data->last_debugged_alarm_time > watchdog_->start_time_) {
+        // False alarm: we started our clock before the debugger break (last
+        // alarm time).
+        watchdog_->start_time_ += static_data->last_debugged_alarm_delay;
+        if (static_data->last_debugged_alarm_time > watchdog_->start_time_)
+          // Too many alarms must have taken place.
+          watchdog_->state_ = DISARMED;
+        continue;
+      }
+    }
+    watchdog_->state_ = DISARMED;  // Only alarm at most once.
+    TimeTicks last_alarm_time = TimeTicks::Now();
+    {
+      AutoUnlock unlock(watchdog_->lock_);
+      watchdog_->Alarm();  // Set a break point here to debug on alarms.
+    }
+    TimeDelta last_alarm_delay = TimeTicks::Now() - last_alarm_time;
+    if (last_alarm_delay <= TimeDelta::FromMilliseconds(2))
+      continue;
+    // Ignore race of two alarms/breaks going off at roughly the same time.
+    AutoLock static_lock(static_data->lock);
+    // This was a real debugger break.
+    static_data->last_debugged_alarm_time = last_alarm_time;
+    static_data->last_debugged_alarm_delay = last_alarm_delay;
+  }
+}
+
+void Watchdog::ThreadDelegate::SetThreadName() const {
+  std::string name = watchdog_->thread_watched_name_ + " Watchdog";
+  PlatformThread::SetName(name);
+  DVLOG(1) << "Watchdog active: " << name;
+}
+
+// static
+void Watchdog::ResetStaticData() {
+  StaticData* static_data = GetStaticData();
+  AutoLock lock(static_data->lock);
+  // See https://crbug.com/734232 for why this cannot be zero-initialized.
+  static_data->last_debugged_alarm_time = TimeTicks::Min();
+  static_data->last_debugged_alarm_delay = TimeDelta();
+}
+
+}  // namespace base
diff --git a/base/threading/watchdog.h b/base/threading/watchdog.h
new file mode 100644
index 0000000..f806984
--- /dev/null
+++ b/base/threading/watchdog.h
@@ -0,0 +1,96 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// The Watchdog class creates a second thread that can Alarm if a specific
+// duration of time passes without proper attention.  The duration of time is
+// specified at construction time.  The Watchdog may be used many times by
+// simply calling Arm() (to start timing) and Disarm() (to reset the timer).
+// The Watchdog is typically used under a debugger, where the stack traces on
+// other threads can be examined if/when the Watchdog alarms.
+
+// Some watchdogs will be enabled or disabled via command line switches. To
+// facilitate such code, an "enabled" argument for the constuctor can be used
+// to permanently disable the watchdog.  Disabled watchdogs don't even spawn
+// a second thread, and their methods call (Arm() and Disarm()) return very
+// quickly.
+
+#ifndef BASE_THREADING_WATCHDOG_H_
+#define BASE_THREADING_WATCHDOG_H_
+
+#include <string>
+
+#include "base/base_export.h"
+#include "base/compiler_specific.h"
+#include "base/macros.h"
+#include "base/synchronization/condition_variable.h"
+#include "base/synchronization/lock.h"
+#include "base/threading/platform_thread.h"
+#include "base/time/time.h"
+
+namespace base {
+
+class BASE_EXPORT Watchdog {
+ public:
+  // Constructor specifies how long the Watchdog will wait before alarming.
+  Watchdog(const TimeDelta& duration,
+           const std::string& thread_watched_name,
+           bool enabled);
+  virtual ~Watchdog();
+
+  // Notify watchdog thread to finish up. Sets the state_ to SHUTDOWN.
+  void Cleanup();
+
+  // Returns true if we state_ is JOINABLE (which indicates that Watchdog has
+  // exited).
+  bool IsJoinable();
+
+  // Start timing, and alarm when time expires (unless we're disarm()ed.)
+  void Arm();  // Arm  starting now.
+  void ArmSomeTimeDeltaAgo(const TimeDelta& time_delta);
+  void ArmAtStartTime(const TimeTicks start_time);
+
+  // Reset time, and do not set off the alarm.
+  void Disarm();
+
+  // Alarm is called if the time expires after an Arm() without someone calling
+  // Disarm().  This method can be overridden to create testable classes.
+  virtual void Alarm();
+
+  // Reset static data to initial state. Useful for tests, to ensure
+  // they are independent.
+  static void ResetStaticData();
+
+ private:
+  class ThreadDelegate : public PlatformThread::Delegate {
+   public:
+    explicit ThreadDelegate(Watchdog* watchdog) : watchdog_(watchdog) {
+    }
+    void ThreadMain() override;
+
+   private:
+    void SetThreadName() const;
+
+    Watchdog* watchdog_;
+  };
+
+  enum State {ARMED, DISARMED, SHUTDOWN, JOINABLE };
+
+  bool enabled_;
+
+  Lock lock_;  // Mutex for state_.
+  ConditionVariable condition_variable_;
+  State state_;
+  const TimeDelta duration_;  // How long after start_time_ do we alarm?
+  const std::string thread_watched_name_;
+  PlatformThreadHandle handle_;
+  ThreadDelegate delegate_;  // Store it, because it must outlive the thread.
+
+  TimeTicks start_time_;  // Start of epoch, and alarm after duration_.
+
+  DISALLOW_COPY_AND_ASSIGN(Watchdog);
+};
+
+}  // namespace base
+
+#endif  // BASE_THREADING_WATCHDOG_H_
diff --git a/base/threading/watchdog_unittest.cc b/base/threading/watchdog_unittest.cc
new file mode 100644
index 0000000..f534a86
--- /dev/null
+++ b/base/threading/watchdog_unittest.cc
@@ -0,0 +1,141 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/threading/watchdog.h"
+
+#include "base/logging.h"
+#include "base/macros.h"
+#include "base/synchronization/spin_wait.h"
+#include "base/threading/platform_thread.h"
+#include "base/time/time.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+
+namespace {
+
+//------------------------------------------------------------------------------
+// Provide a derived class to facilitate testing.
+
+class WatchdogCounter : public Watchdog {
+ public:
+  WatchdogCounter(const TimeDelta& duration,
+                  const std::string& thread_watched_name,
+                  bool enabled)
+      : Watchdog(duration, thread_watched_name, enabled),
+        alarm_counter_(0) {
+  }
+
+  ~WatchdogCounter() override = default;
+
+  void Alarm() override {
+    alarm_counter_++;
+    Watchdog::Alarm();
+  }
+
+  int alarm_counter() { return alarm_counter_; }
+
+ private:
+  int alarm_counter_;
+
+  DISALLOW_COPY_AND_ASSIGN(WatchdogCounter);
+};
+
+class WatchdogTest : public testing::Test {
+ public:
+  void SetUp() override { Watchdog::ResetStaticData(); }
+};
+
+}  // namespace
+
+//------------------------------------------------------------------------------
+// Actual tests
+
+// Minimal constructor/destructor test.
+TEST_F(WatchdogTest, StartupShutdownTest) {
+  Watchdog watchdog1(TimeDelta::FromMilliseconds(300), "Disabled", false);
+  Watchdog watchdog2(TimeDelta::FromMilliseconds(300), "Enabled", true);
+}
+
+// Test ability to call Arm and Disarm repeatedly.
+TEST_F(WatchdogTest, ArmDisarmTest) {
+  Watchdog watchdog1(TimeDelta::FromMilliseconds(300), "Disabled", false);
+  watchdog1.Arm();
+  watchdog1.Disarm();
+  watchdog1.Arm();
+  watchdog1.Disarm();
+
+  Watchdog watchdog2(TimeDelta::FromMilliseconds(300), "Enabled", true);
+  watchdog2.Arm();
+  watchdog2.Disarm();
+  watchdog2.Arm();
+  watchdog2.Disarm();
+}
+
+// Make sure a basic alarm fires when the time has expired.
+TEST_F(WatchdogTest, AlarmTest) {
+  WatchdogCounter watchdog(TimeDelta::FromMilliseconds(10), "Enabled", true);
+  watchdog.Arm();
+  SPIN_FOR_TIMEDELTA_OR_UNTIL_TRUE(TimeDelta::FromMinutes(5),
+                                   watchdog.alarm_counter() > 0);
+  EXPECT_EQ(1, watchdog.alarm_counter());
+}
+
+// Make sure a basic alarm fires when the time has expired.
+TEST_F(WatchdogTest, AlarmPriorTimeTest) {
+  WatchdogCounter watchdog(TimeDelta(), "Enabled2", true);
+  // Set a time in the past.
+  watchdog.ArmSomeTimeDeltaAgo(TimeDelta::FromSeconds(2));
+  // It should instantly go off, but certainly in less than 5 minutes.
+  SPIN_FOR_TIMEDELTA_OR_UNTIL_TRUE(TimeDelta::FromMinutes(5),
+                                   watchdog.alarm_counter() > 0);
+
+  EXPECT_EQ(1, watchdog.alarm_counter());
+}
+
+// Make sure a disable alarm does nothing, even if we arm it.
+TEST_F(WatchdogTest, ConstructorDisabledTest) {
+  WatchdogCounter watchdog(TimeDelta::FromMilliseconds(10), "Disabled", false);
+  watchdog.Arm();
+  // Alarm should not fire, as it was disabled.
+  PlatformThread::Sleep(TimeDelta::FromMilliseconds(500));
+  EXPECT_EQ(0, watchdog.alarm_counter());
+}
+
+// Make sure Disarming will prevent firing, even after Arming.
+TEST_F(WatchdogTest, DisarmTest) {
+  WatchdogCounter watchdog(TimeDelta::FromSeconds(1), "Enabled3", true);
+
+  TimeTicks start = TimeTicks::Now();
+  watchdog.Arm();
+  // Sleep a bit, but not past the alarm point.
+  PlatformThread::Sleep(TimeDelta::FromMilliseconds(100));
+  watchdog.Disarm();
+  TimeTicks end = TimeTicks::Now();
+
+  if (end - start > TimeDelta::FromMilliseconds(500)) {
+    LOG(WARNING) << "100ms sleep took over 500ms, making the results of this "
+                 << "timing-sensitive test suspicious.  Aborting now.";
+    return;
+  }
+
+  // Alarm should not have fired before it was disarmed.
+  EXPECT_EQ(0, watchdog.alarm_counter());
+
+  // Sleep past the point where it would have fired if it wasn't disarmed,
+  // and verify that it didn't fire.
+  PlatformThread::Sleep(TimeDelta::FromSeconds(1));
+  EXPECT_EQ(0, watchdog.alarm_counter());
+
+  // ...but even after disarming, we can still use the alarm...
+  // Set a time greater than the timeout into the past.
+  watchdog.ArmSomeTimeDeltaAgo(TimeDelta::FromSeconds(10));
+  // It should almost instantly go off, but certainly in less than 5 minutes.
+  SPIN_FOR_TIMEDELTA_OR_UNTIL_TRUE(TimeDelta::FromMinutes(5),
+                                   watchdog.alarm_counter() > 0);
+
+  EXPECT_EQ(1, watchdog.alarm_counter());
+}
+
+}  // namespace base
diff --git a/base/time/OWNERS b/base/time/OWNERS
new file mode 100644
index 0000000..ff0520a
--- /dev/null
+++ b/base/time/OWNERS
@@ -0,0 +1,3 @@
+miu@chromium.org
+
+# COMPONENT: Internals>Core
diff --git a/base/time/clock.cc b/base/time/clock.cc
new file mode 100644
index 0000000..9e3f271
--- /dev/null
+++ b/base/time/clock.cc
@@ -0,0 +1,11 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/time/clock.h"
+
+namespace base {
+
+Clock::~Clock() = default;
+
+}  // namespace base
diff --git a/base/time/clock.h b/base/time/clock.h
new file mode 100644
index 0000000..166cb2e
--- /dev/null
+++ b/base/time/clock.h
@@ -0,0 +1,40 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TIME_CLOCK_H_
+#define BASE_TIME_CLOCK_H_
+
+#include "base/base_export.h"
+#include "base/time/time.h"
+
+namespace base {
+
+// A Clock is an interface for objects that vend Times.  It is
+// intended to be able to test the behavior of classes with respect to
+// time.
+//
+// See DefaultClock (base/time/default_clock.h) for the default
+// implementation that simply uses Time::Now().
+//
+// (An implementation that uses Time::SystemTime() should be added as
+// needed.)
+//
+// See SimpleTestClock (base/test/simple_test_clock.h) for a simple
+// test implementation.
+//
+// See TickClock (base/time/tick_clock.h) for the equivalent interface for
+// TimeTicks.
+class BASE_EXPORT Clock {
+ public:
+  virtual ~Clock();
+
+  // Now() must be safe to call from any thread.  The caller cannot
+  // make any ordering assumptions about the returned Time.  For
+  // example, the system clock may change to an earlier time.
+  virtual Time Now() const = 0;
+};
+
+}  // namespace base
+
+#endif  // BASE_TIME_CLOCK_H_
diff --git a/base/time/default_clock.cc b/base/time/default_clock.cc
new file mode 100644
index 0000000..aa08f52
--- /dev/null
+++ b/base/time/default_clock.cc
@@ -0,0 +1,23 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/time/default_clock.h"
+
+#include "base/lazy_instance.h"
+
+namespace base {
+
+DefaultClock::~DefaultClock() = default;
+
+Time DefaultClock::Now() const {
+  return Time::Now();
+}
+
+// static
+DefaultClock* DefaultClock::GetInstance() {
+  static LazyInstance<DefaultClock>::Leaky instance = LAZY_INSTANCE_INITIALIZER;
+  return instance.Pointer();
+}
+
+}  // namespace base
diff --git a/base/time/default_clock.h b/base/time/default_clock.h
new file mode 100644
index 0000000..a0e175b
--- /dev/null
+++ b/base/time/default_clock.h
@@ -0,0 +1,28 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TIME_DEFAULT_CLOCK_H_
+#define BASE_TIME_DEFAULT_CLOCK_H_
+
+#include "base/base_export.h"
+#include "base/compiler_specific.h"
+#include "base/time/clock.h"
+
+namespace base {
+
+// DefaultClock is a Clock implementation that uses Time::Now().
+class BASE_EXPORT DefaultClock : public Clock {
+ public:
+  ~DefaultClock() override;
+
+  // Simply returns Time::Now().
+  Time Now() const override;
+
+  // Returns a shared instance of DefaultClock. This is thread-safe.
+  static DefaultClock* GetInstance();
+};
+
+}  // namespace base
+
+#endif  // BASE_TIME_DEFAULT_CLOCK_H_
diff --git a/base/time/default_tick_clock.cc b/base/time/default_tick_clock.cc
new file mode 100644
index 0000000..188c3cf
--- /dev/null
+++ b/base/time/default_tick_clock.cc
@@ -0,0 +1,23 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/time/default_tick_clock.h"
+
+#include "base/no_destructor.h"
+
+namespace base {
+
+DefaultTickClock::~DefaultTickClock() = default;
+
+TimeTicks DefaultTickClock::NowTicks() const {
+  return TimeTicks::Now();
+}
+
+// static
+const DefaultTickClock* DefaultTickClock::GetInstance() {
+  static const base::NoDestructor<DefaultTickClock> default_tick_clock;
+  return default_tick_clock.get();
+}
+
+}  // namespace base
diff --git a/base/time/default_tick_clock.h b/base/time/default_tick_clock.h
new file mode 100644
index 0000000..78f8a99
--- /dev/null
+++ b/base/time/default_tick_clock.h
@@ -0,0 +1,27 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TIME_DEFAULT_TICK_CLOCK_H_
+#define BASE_TIME_DEFAULT_TICK_CLOCK_H_
+
+#include "base/base_export.h"
+#include "base/time/tick_clock.h"
+
+namespace base {
+
+// DefaultClock is a Clock implementation that uses TimeTicks::Now().
+class BASE_EXPORT DefaultTickClock : public TickClock {
+ public:
+  ~DefaultTickClock() override;
+
+  // Simply returns TimeTicks::Now().
+  TimeTicks NowTicks() const override;
+
+  // Returns a shared instance of DefaultTickClock. This is thread-safe.
+  static const DefaultTickClock* GetInstance();
+};
+
+}  // namespace base
+
+#endif  // BASE_TIME_DEFAULT_TICK_CLOCK_H_
diff --git a/base/time/pr_time_unittest.cc b/base/time/pr_time_unittest.cc
new file mode 100644
index 0000000..6fce4ab
--- /dev/null
+++ b/base/time/pr_time_unittest.cc
@@ -0,0 +1,289 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <stdint.h>
+#include <time.h>
+
+#include "base/compiler_specific.h"
+#include "base/macros.h"
+#include "base/third_party/nspr/prtime.h"
+#include "base/time/time.h"
+#include "build/build_config.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+using base::Time;
+
+namespace {
+
+// time_t representation of 15th Oct 2007 12:45:00 PDT
+PRTime comparison_time_pdt = 1192477500 * Time::kMicrosecondsPerSecond;
+
+// Time with positive tz offset and fractional seconds:
+// 2013-07-08T11:28:12.441381+02:00
+PRTime comparison_time_2 = INT64_C(1373275692441381);   // represented as GMT
+
+// Specialized test fixture allowing time strings without timezones to be
+// tested by comparing them to a known time in the local zone.
+class PRTimeTest : public testing::Test {
+ protected:
+  void SetUp() override {
+    // Use mktime to get a time_t, and turn it into a PRTime by converting
+    // seconds to microseconds.  Use 15th Oct 2007 12:45:00 local.  This
+    // must be a time guaranteed to be outside of a DST fallback hour in
+    // any timezone.
+    struct tm local_comparison_tm = {
+      0,            // second
+      45,           // minute
+      12,           // hour
+      15,           // day of month
+      10 - 1,       // month
+      2007 - 1900,  // year
+      0,            // day of week (ignored, output only)
+      0,            // day of year (ignored, output only)
+      -1            // DST in effect, -1 tells mktime to figure it out
+    };
+    comparison_time_local_ =
+        mktime(&local_comparison_tm) * Time::kMicrosecondsPerSecond;
+    ASSERT_GT(comparison_time_local_, 0);
+
+    const int microseconds = 441381;
+    struct tm local_comparison_tm_2 = {
+      12,           // second
+      28,           // minute
+      11,           // hour
+      8,            // day of month
+      7 - 1,        // month
+      2013 - 1900,  // year
+      0,            // day of week (ignored, output only)
+      0,            // day of year (ignored, output only)
+      -1            // DST in effect, -1 tells mktime to figure it out
+    };
+    comparison_time_local_2_ =
+        mktime(&local_comparison_tm_2) * Time::kMicrosecondsPerSecond;
+    ASSERT_GT(comparison_time_local_2_, 0);
+    comparison_time_local_2_ += microseconds;
+  }
+
+  PRTime comparison_time_local_;
+  PRTime comparison_time_local_2_;
+};
+
+// Tests the PR_ParseTimeString nspr helper function for
+// a variety of time strings.
+TEST_F(PRTimeTest, ParseTimeTest1) {
+  time_t current_time = 0;
+  time(&current_time);
+
+  struct tm local_time = {};
+  char time_buf[64] = {};
+#if defined(OS_WIN)
+  localtime_s(&local_time, &current_time);
+  asctime_s(time_buf, arraysize(time_buf), &local_time);
+#elif defined(OS_POSIX) || defined(OS_FUCHSIA)
+  localtime_r(&current_time, &local_time);
+  asctime_r(&local_time, time_buf);
+#endif
+
+  PRTime current_time64 = static_cast<PRTime>(current_time) * PR_USEC_PER_SEC;
+
+  PRTime parsed_time = 0;
+  PRStatus result = PR_ParseTimeString(time_buf, PR_FALSE, &parsed_time);
+  EXPECT_EQ(PR_SUCCESS, result);
+  EXPECT_EQ(current_time64, parsed_time);
+}
+
+TEST_F(PRTimeTest, ParseTimeTest2) {
+  PRTime parsed_time = 0;
+  PRStatus result = PR_ParseTimeString("Mon, 15 Oct 2007 19:45:00 GMT",
+                                       PR_FALSE, &parsed_time);
+  EXPECT_EQ(PR_SUCCESS, result);
+  EXPECT_EQ(comparison_time_pdt, parsed_time);
+}
+
+TEST_F(PRTimeTest, ParseTimeTest3) {
+  PRTime parsed_time = 0;
+  PRStatus result = PR_ParseTimeString("15 Oct 07 12:45:00", PR_FALSE,
+                                       &parsed_time);
+  EXPECT_EQ(PR_SUCCESS, result);
+  EXPECT_EQ(comparison_time_local_, parsed_time);
+}
+
+TEST_F(PRTimeTest, ParseTimeTest4) {
+  PRTime parsed_time = 0;
+  PRStatus result = PR_ParseTimeString("15 Oct 07 19:45 GMT", PR_FALSE,
+                                       &parsed_time);
+  EXPECT_EQ(PR_SUCCESS, result);
+  EXPECT_EQ(comparison_time_pdt, parsed_time);
+}
+
+TEST_F(PRTimeTest, ParseTimeTest5) {
+  PRTime parsed_time = 0;
+  PRStatus result = PR_ParseTimeString("Mon Oct 15 12:45 PDT 2007",
+                                       PR_FALSE, &parsed_time);
+  EXPECT_EQ(PR_SUCCESS, result);
+  EXPECT_EQ(comparison_time_pdt, parsed_time);
+}
+
+TEST_F(PRTimeTest, ParseTimeTest6) {
+  PRTime parsed_time = 0;
+  PRStatus result = PR_ParseTimeString("Monday, Oct 15, 2007 12:45 PM",
+                                       PR_FALSE, &parsed_time);
+  EXPECT_EQ(PR_SUCCESS, result);
+  EXPECT_EQ(comparison_time_local_, parsed_time);
+}
+
+TEST_F(PRTimeTest, ParseTimeTest7) {
+  PRTime parsed_time = 0;
+  PRStatus result = PR_ParseTimeString("10/15/07 12:45:00 PM", PR_FALSE,
+                                       &parsed_time);
+  EXPECT_EQ(PR_SUCCESS, result);
+  EXPECT_EQ(comparison_time_local_, parsed_time);
+}
+
+TEST_F(PRTimeTest, ParseTimeTest8) {
+  PRTime parsed_time = 0;
+  PRStatus result = PR_ParseTimeString("10/15/07 12:45:00. PM", PR_FALSE,
+                                       &parsed_time);
+  EXPECT_EQ(PR_SUCCESS, result);
+  EXPECT_EQ(comparison_time_local_, parsed_time);
+}
+
+TEST_F(PRTimeTest, ParseTimeTest9) {
+  PRTime parsed_time = 0;
+  PRStatus result = PR_ParseTimeString("10/15/07 12:45:00.0 PM", PR_FALSE,
+                                       &parsed_time);
+  EXPECT_EQ(PR_SUCCESS, result);
+  EXPECT_EQ(comparison_time_local_, parsed_time);
+}
+
+TEST_F(PRTimeTest, ParseTimeTest10) {
+  PRTime parsed_time = 0;
+  PRStatus result = PR_ParseTimeString("15-OCT-2007 12:45pm", PR_FALSE,
+                                       &parsed_time);
+  EXPECT_EQ(PR_SUCCESS, result);
+  EXPECT_EQ(comparison_time_local_, parsed_time);
+}
+
+TEST_F(PRTimeTest, ParseTimeTest11) {
+  PRTime parsed_time = 0;
+  PRStatus result = PR_ParseTimeString("16 Oct 2007 4:45-JST (Tuesday)",
+                                       PR_FALSE, &parsed_time);
+  EXPECT_EQ(PR_SUCCESS, result);
+  EXPECT_EQ(comparison_time_pdt, parsed_time);
+}
+
+// hh:mm timezone offset.
+TEST_F(PRTimeTest, ParseTimeTest12) {
+  PRTime parsed_time = 0;
+  PRStatus result = PR_ParseTimeString("2013-07-08T11:28:12.441381+02:00",
+                                       PR_FALSE, &parsed_time);
+  EXPECT_EQ(PR_SUCCESS, result);
+  EXPECT_EQ(comparison_time_2, parsed_time);
+}
+
+// hhmm timezone offset.
+TEST_F(PRTimeTest, ParseTimeTest13) {
+  PRTime parsed_time = 0;
+  PRStatus result = PR_ParseTimeString("2013-07-08T11:28:12.441381+0200",
+                                       PR_FALSE, &parsed_time);
+  EXPECT_EQ(PR_SUCCESS, result);
+  EXPECT_EQ(comparison_time_2, parsed_time);
+}
+
+// hh timezone offset.
+TEST_F(PRTimeTest, ParseTimeTest14) {
+  PRTime parsed_time = 0;
+  PRStatus result = PR_ParseTimeString("2013-07-08T11:28:12.4413819+02",
+                                       PR_FALSE, &parsed_time);
+  EXPECT_EQ(PR_SUCCESS, result);
+  EXPECT_EQ(comparison_time_2, parsed_time);
+}
+
+// 5 digits fractional second.
+TEST_F(PRTimeTest, ParseTimeTest15) {
+  PRTime parsed_time = 0;
+  PRStatus result = PR_ParseTimeString("2013-07-08T09:28:12.44138Z",
+                                       PR_FALSE, &parsed_time);
+  EXPECT_EQ(PR_SUCCESS, result);
+  EXPECT_EQ(comparison_time_2-1, parsed_time);
+}
+
+// Fractional seconds, local timezone.
+TEST_F(PRTimeTest, ParseTimeTest16) {
+  PRTime parsed_time = 0;
+  PRStatus result = PR_ParseTimeString("2013-07-08T11:28:12.441381",
+                                       PR_FALSE, &parsed_time);
+  EXPECT_EQ(PR_SUCCESS, result);
+  EXPECT_EQ(comparison_time_local_2_, parsed_time);
+}
+
+// "Z" (=GMT) timezone.
+TEST_F(PRTimeTest, ParseTimeTest17) {
+  PRTime parsed_time = 0;
+  PRStatus result = PR_ParseTimeString("2013-07-08T09:28:12.441381Z",
+                                       PR_FALSE, &parsed_time);
+  EXPECT_EQ(PR_SUCCESS, result);
+  EXPECT_EQ(comparison_time_2, parsed_time);
+}
+
+// "T" delimiter replaced by space.
+TEST_F(PRTimeTest, ParseTimeTest18) {
+  PRTime parsed_time = 0;
+  PRStatus result = PR_ParseTimeString("2013-07-08 09:28:12.441381Z",
+                                       PR_FALSE, &parsed_time);
+  EXPECT_EQ(PR_SUCCESS, result);
+  EXPECT_EQ(comparison_time_2, parsed_time);
+}
+
+TEST_F(PRTimeTest, ParseTimeTestInvalid1) {
+  PRTime parsed_time = 0;
+  PRStatus result = PR_ParseTimeString("201-07-08T09:28:12.441381Z",
+                                       PR_FALSE, &parsed_time);
+  EXPECT_EQ(PR_FAILURE, result);
+}
+
+TEST_F(PRTimeTest, ParseTimeTestInvalid2) {
+  PRTime parsed_time = 0;
+  PRStatus result = PR_ParseTimeString("2013-007-08T09:28:12.441381Z",
+                                       PR_FALSE, &parsed_time);
+  EXPECT_EQ(PR_FAILURE, result);
+}
+
+TEST_F(PRTimeTest, ParseTimeTestInvalid3) {
+  PRTime parsed_time = 0;
+  PRStatus result = PR_ParseTimeString("2013-07-008T09:28:12.441381Z",
+                                       PR_FALSE, &parsed_time);
+  EXPECT_EQ(PR_FAILURE, result);
+}
+
+// This test should not crash when compiled with Visual C++ 2005 (see
+// http://crbug.com/4387).
+TEST_F(PRTimeTest, ParseTimeTestOutOfRange) {
+  PRTime parsed_time = 0;
+  // Note the lack of timezone in the time string.  The year has to be 3001.
+  // The date has to be after 23:59:59, December 31, 3000, US Pacific Time, so
+  // we use January 2, 3001 to make sure it's after the magic maximum in any
+  // timezone.
+  PRStatus result = PR_ParseTimeString("Sun Jan  2 00:00:00 3001",
+                                       PR_FALSE, &parsed_time);
+  EXPECT_EQ(PR_SUCCESS, result);
+}
+
+TEST_F(PRTimeTest, ParseTimeTestNotNormalized1) {
+  PRTime parsed_time = 0;
+  PRStatus result = PR_ParseTimeString("Mon Oct 15 12:44:60 PDT 2007",
+                                       PR_FALSE, &parsed_time);
+  EXPECT_EQ(PR_SUCCESS, result);
+  EXPECT_EQ(comparison_time_pdt, parsed_time);
+}
+
+TEST_F(PRTimeTest, ParseTimeTestNotNormalized2) {
+  PRTime parsed_time = 0;
+  PRStatus result = PR_ParseTimeString("Sun Oct 14 36:45 PDT 2007",
+                                       PR_FALSE, &parsed_time);
+  EXPECT_EQ(PR_SUCCESS, result);
+  EXPECT_EQ(comparison_time_pdt, parsed_time);
+}
+
+}  // namespace
diff --git a/base/time/tick_clock.cc b/base/time/tick_clock.cc
new file mode 100644
index 0000000..79e396d
--- /dev/null
+++ b/base/time/tick_clock.cc
@@ -0,0 +1,11 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/time/tick_clock.h"
+
+namespace base {
+
+TickClock::~TickClock() = default;
+
+}  // namespace base
diff --git a/base/time/tick_clock.h b/base/time/tick_clock.h
new file mode 100644
index 0000000..dc57354
--- /dev/null
+++ b/base/time/tick_clock.h
@@ -0,0 +1,40 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TIME_TICK_CLOCK_H_
+#define BASE_TIME_TICK_CLOCK_H_
+
+#include "base/base_export.h"
+#include "base/time/time.h"
+
+namespace base {
+
+// A TickClock is an interface for objects that vend TimeTicks.  It is
+// intended to be able to test the behavior of classes with respect to
+// non-decreasing time.
+//
+// See DefaultTickClock (base/time/default_tick_clock.h) for the default
+// implementation that simply uses TimeTicks::Now().
+//
+// (Other implementations that use TimeTicks::NowFromSystemTime() should
+// be added as needed.)
+//
+// See SimpleTestTickClock (base/test/simple_test_tick_clock.h) for a
+// simple test implementation.
+//
+// See Clock (base/time/clock.h) for the equivalent interface for Times.
+class BASE_EXPORT TickClock {
+ public:
+  virtual ~TickClock();
+
+  // NowTicks() must be safe to call from any thread.  The caller may
+  // assume that NowTicks() is monotonic (but not strictly monotonic).
+  // In other words, the returned TimeTicks will never decrease with
+  // time, although they might "stand still".
+  virtual TimeTicks NowTicks() const = 0;
+};
+
+}  // namespace base
+
+#endif  // BASE_TIME_TICK_CLOCK_H_
diff --git a/base/time/time.cc b/base/time/time.cc
new file mode 100644
index 0000000..9c541a4
--- /dev/null
+++ b/base/time/time.cc
@@ -0,0 +1,403 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/time/time.h"
+
+#include <cmath>
+#include <ios>
+#include <limits>
+#include <ostream>
+#include <sstream>
+
+#include "base/logging.h"
+#include "base/macros.h"
+#include "base/no_destructor.h"
+#include "base/strings/stringprintf.h"
+#include "base/third_party/nspr/prtime.h"
+#include "base/time/time_override.h"
+#include "build/build_config.h"
+
+namespace base {
+
+namespace internal {
+
+TimeNowFunction g_time_now_function = &subtle::TimeNowIgnoringOverride;
+
+TimeNowFunction g_time_now_from_system_time_function =
+    &subtle::TimeNowFromSystemTimeIgnoringOverride;
+
+TimeTicksNowFunction g_time_ticks_now_function =
+    &subtle::TimeTicksNowIgnoringOverride;
+
+ThreadTicksNowFunction g_thread_ticks_now_function =
+    &subtle::ThreadTicksNowIgnoringOverride;
+
+}  // namespace internal
+
+// TimeDelta ------------------------------------------------------------------
+
+int TimeDelta::InDays() const {
+  if (is_max()) {
+    // Preserve max to prevent overflow.
+    return std::numeric_limits<int>::max();
+  }
+  return static_cast<int>(delta_ / Time::kMicrosecondsPerDay);
+}
+
+int TimeDelta::InHours() const {
+  if (is_max()) {
+    // Preserve max to prevent overflow.
+    return std::numeric_limits<int>::max();
+  }
+  return static_cast<int>(delta_ / Time::kMicrosecondsPerHour);
+}
+
+int TimeDelta::InMinutes() const {
+  if (is_max()) {
+    // Preserve max to prevent overflow.
+    return std::numeric_limits<int>::max();
+  }
+  return static_cast<int>(delta_ / Time::kMicrosecondsPerMinute);
+}
+
+double TimeDelta::InSecondsF() const {
+  if (is_max()) {
+    // Preserve max to prevent overflow.
+    return std::numeric_limits<double>::infinity();
+  }
+  return static_cast<double>(delta_) / Time::kMicrosecondsPerSecond;
+}
+
+int64_t TimeDelta::InSeconds() const {
+  if (is_max()) {
+    // Preserve max to prevent overflow.
+    return std::numeric_limits<int64_t>::max();
+  }
+  return delta_ / Time::kMicrosecondsPerSecond;
+}
+
+double TimeDelta::InMillisecondsF() const {
+  if (is_max()) {
+    // Preserve max to prevent overflow.
+    return std::numeric_limits<double>::infinity();
+  }
+  return static_cast<double>(delta_) / Time::kMicrosecondsPerMillisecond;
+}
+
+int64_t TimeDelta::InMilliseconds() const {
+  if (is_max()) {
+    // Preserve max to prevent overflow.
+    return std::numeric_limits<int64_t>::max();
+  }
+  return delta_ / Time::kMicrosecondsPerMillisecond;
+}
+
+int64_t TimeDelta::InMillisecondsRoundedUp() const {
+  if (is_max()) {
+    // Preserve max to prevent overflow.
+    return std::numeric_limits<int64_t>::max();
+  }
+  return (delta_ + Time::kMicrosecondsPerMillisecond - 1) /
+      Time::kMicrosecondsPerMillisecond;
+}
+
+int64_t TimeDelta::InMicroseconds() const {
+  if (is_max()) {
+    // Preserve max to prevent overflow.
+    return std::numeric_limits<int64_t>::max();
+  }
+  return delta_;
+}
+
+double TimeDelta::InMicrosecondsF() const {
+  if (is_max()) {
+    // Preserve max to prevent overflow.
+    return std::numeric_limits<double>::infinity();
+  }
+  return static_cast<double>(delta_);
+}
+
+int64_t TimeDelta::InNanoseconds() const {
+  if (is_max()) {
+    // Preserve max to prevent overflow.
+    return std::numeric_limits<int64_t>::max();
+  }
+  return delta_ * Time::kNanosecondsPerMicrosecond;
+}
+
+namespace time_internal {
+
+int64_t SaturatedAdd(TimeDelta delta, int64_t value) {
+  CheckedNumeric<int64_t> rv(delta.delta_);
+  rv += value;
+  if (rv.IsValid())
+    return rv.ValueOrDie();
+  // Positive RHS overflows. Negative RHS underflows.
+  if (value < 0)
+    return std::numeric_limits<int64_t>::min();
+  return std::numeric_limits<int64_t>::max();
+}
+
+int64_t SaturatedSub(TimeDelta delta, int64_t value) {
+  CheckedNumeric<int64_t> rv(delta.delta_);
+  rv -= value;
+  if (rv.IsValid())
+    return rv.ValueOrDie();
+  // Negative RHS overflows. Positive RHS underflows.
+  if (value < 0)
+    return std::numeric_limits<int64_t>::max();
+  return std::numeric_limits<int64_t>::min();
+}
+
+}  // namespace time_internal
+
+std::ostream& operator<<(std::ostream& os, TimeDelta time_delta) {
+  return os << time_delta.InSecondsF() << " s";
+}
+
+// Time -----------------------------------------------------------------------
+
+// static
+Time Time::Now() {
+  return internal::g_time_now_function();
+}
+
+// static
+Time Time::NowFromSystemTime() {
+  // Just use g_time_now_function because it returns the system time.
+  return internal::g_time_now_from_system_time_function();
+}
+
+// static
+Time Time::FromDeltaSinceWindowsEpoch(TimeDelta delta) {
+  return Time(delta.InMicroseconds());
+}
+
+TimeDelta Time::ToDeltaSinceWindowsEpoch() const {
+  return TimeDelta::FromMicroseconds(us_);
+}
+
+// static
+Time Time::FromTimeT(time_t tt) {
+  if (tt == 0)
+    return Time();  // Preserve 0 so we can tell it doesn't exist.
+  if (tt == std::numeric_limits<time_t>::max())
+    return Max();
+  return Time(kTimeTToMicrosecondsOffset) + TimeDelta::FromSeconds(tt);
+}
+
+time_t Time::ToTimeT() const {
+  if (is_null())
+    return 0;  // Preserve 0 so we can tell it doesn't exist.
+  if (is_max()) {
+    // Preserve max without offset to prevent overflow.
+    return std::numeric_limits<time_t>::max();
+  }
+  if (std::numeric_limits<int64_t>::max() - kTimeTToMicrosecondsOffset <= us_) {
+    DLOG(WARNING) << "Overflow when converting base::Time with internal " <<
+                     "value " << us_ << " to time_t.";
+    return std::numeric_limits<time_t>::max();
+  }
+  return (us_ - kTimeTToMicrosecondsOffset) / kMicrosecondsPerSecond;
+}
+
+// static
+Time Time::FromDoubleT(double dt) {
+  if (dt == 0 || std::isnan(dt))
+    return Time();  // Preserve 0 so we can tell it doesn't exist.
+  return Time(kTimeTToMicrosecondsOffset) + TimeDelta::FromSecondsD(dt);
+}
+
+double Time::ToDoubleT() const {
+  if (is_null())
+    return 0;  // Preserve 0 so we can tell it doesn't exist.
+  if (is_max()) {
+    // Preserve max without offset to prevent overflow.
+    return std::numeric_limits<double>::infinity();
+  }
+  return (static_cast<double>(us_ - kTimeTToMicrosecondsOffset) /
+          static_cast<double>(kMicrosecondsPerSecond));
+}
+
+#if defined(OS_POSIX)
+// static
+Time Time::FromTimeSpec(const timespec& ts) {
+  return FromDoubleT(ts.tv_sec +
+                     static_cast<double>(ts.tv_nsec) /
+                         base::Time::kNanosecondsPerSecond);
+}
+#endif
+
+// static
+Time Time::FromJsTime(double ms_since_epoch) {
+  // The epoch is a valid time, so this constructor doesn't interpret
+  // 0 as the null time.
+  return Time(kTimeTToMicrosecondsOffset) +
+         TimeDelta::FromMillisecondsD(ms_since_epoch);
+}
+
+double Time::ToJsTime() const {
+  if (is_null()) {
+    // Preserve 0 so the invalid result doesn't depend on the platform.
+    return 0;
+  }
+  if (is_max()) {
+    // Preserve max without offset to prevent overflow.
+    return std::numeric_limits<double>::infinity();
+  }
+  return (static_cast<double>(us_ - kTimeTToMicrosecondsOffset) /
+          kMicrosecondsPerMillisecond);
+}
+
+Time Time::FromJavaTime(int64_t ms_since_epoch) {
+  return base::Time::UnixEpoch() +
+         base::TimeDelta::FromMilliseconds(ms_since_epoch);
+}
+
+int64_t Time::ToJavaTime() const {
+  if (is_null()) {
+    // Preserve 0 so the invalid result doesn't depend on the platform.
+    return 0;
+  }
+  if (is_max()) {
+    // Preserve max without offset to prevent overflow.
+    return std::numeric_limits<int64_t>::max();
+  }
+  return ((us_ - kTimeTToMicrosecondsOffset) /
+          kMicrosecondsPerMillisecond);
+}
+
+// static
+Time Time::UnixEpoch() {
+  Time time;
+  time.us_ = kTimeTToMicrosecondsOffset;
+  return time;
+}
+
+Time Time::LocalMidnight() const {
+  Exploded exploded;
+  LocalExplode(&exploded);
+  exploded.hour = 0;
+  exploded.minute = 0;
+  exploded.second = 0;
+  exploded.millisecond = 0;
+  Time out_time;
+  if (FromLocalExploded(exploded, &out_time))
+    return out_time;
+  // This function must not fail.
+  NOTREACHED();
+  return Time();
+}
+
+// static
+bool Time::FromStringInternal(const char* time_string,
+                              bool is_local,
+                              Time* parsed_time) {
+  DCHECK((time_string != nullptr) && (parsed_time != nullptr));
+
+  if (time_string[0] == '\0')
+    return false;
+
+  PRTime result_time = 0;
+  PRStatus result = PR_ParseTimeString(time_string,
+                                       is_local ? PR_FALSE : PR_TRUE,
+                                       &result_time);
+  if (PR_SUCCESS != result)
+    return false;
+
+  result_time += kTimeTToMicrosecondsOffset;
+  *parsed_time = Time(result_time);
+  return true;
+}
+
+// static
+bool Time::ExplodedMostlyEquals(const Exploded& lhs, const Exploded& rhs) {
+  return lhs.year == rhs.year && lhs.month == rhs.month &&
+         lhs.day_of_month == rhs.day_of_month && lhs.hour == rhs.hour &&
+         lhs.minute == rhs.minute && lhs.second == rhs.second &&
+         lhs.millisecond == rhs.millisecond;
+}
+
+std::ostream& operator<<(std::ostream& os, Time time) {
+  Time::Exploded exploded;
+  time.UTCExplode(&exploded);
+  // Use StringPrintf because iostreams formatting is painful.
+  return os << StringPrintf("%04d-%02d-%02d %02d:%02d:%02d.%03d UTC",
+                            exploded.year,
+                            exploded.month,
+                            exploded.day_of_month,
+                            exploded.hour,
+                            exploded.minute,
+                            exploded.second,
+                            exploded.millisecond);
+}
+
+// TimeTicks ------------------------------------------------------------------
+
+// static
+TimeTicks TimeTicks::Now() {
+  return internal::g_time_ticks_now_function();
+}
+
+// static
+TimeTicks TimeTicks::UnixEpoch() {
+  static const base::NoDestructor<base::TimeTicks> epoch([]() {
+    return subtle::TimeTicksNowIgnoringOverride() -
+           (subtle::TimeNowIgnoringOverride() - Time::UnixEpoch());
+  }());
+  return *epoch;
+}
+
+TimeTicks TimeTicks::SnappedToNextTick(TimeTicks tick_phase,
+                                       TimeDelta tick_interval) const {
+  // |interval_offset| is the offset from |this| to the next multiple of
+  // |tick_interval| after |tick_phase|, possibly negative if in the past.
+  TimeDelta interval_offset = (tick_phase - *this) % tick_interval;
+  // If |this| is exactly on the interval (i.e. offset==0), don't adjust.
+  // Otherwise, if |tick_phase| was in the past, adjust forward to the next
+  // tick after |this|.
+  if (!interval_offset.is_zero() && tick_phase < *this)
+    interval_offset += tick_interval;
+  return *this + interval_offset;
+}
+
+std::ostream& operator<<(std::ostream& os, TimeTicks time_ticks) {
+  // This function formats a TimeTicks object as "bogo-microseconds".
+  // The origin and granularity of the count are platform-specific, and may very
+  // from run to run. Although bogo-microseconds usually roughly correspond to
+  // real microseconds, the only real guarantee is that the number never goes
+  // down during a single run.
+  const TimeDelta as_time_delta = time_ticks - TimeTicks();
+  return os << as_time_delta.InMicroseconds() << " bogo-microseconds";
+}
+
+// ThreadTicks ----------------------------------------------------------------
+
+// static
+ThreadTicks ThreadTicks::Now() {
+  return internal::g_thread_ticks_now_function();
+}
+
+std::ostream& operator<<(std::ostream& os, ThreadTicks thread_ticks) {
+  const TimeDelta as_time_delta = thread_ticks - ThreadTicks();
+  return os << as_time_delta.InMicroseconds() << " bogo-thread-microseconds";
+}
+
+// Time::Exploded -------------------------------------------------------------
+
+inline bool is_in_range(int value, int lo, int hi) {
+  return lo <= value && value <= hi;
+}
+
+bool Time::Exploded::HasValidValues() const {
+  return is_in_range(month, 1, 12) &&
+         is_in_range(day_of_week, 0, 6) &&
+         is_in_range(day_of_month, 1, 31) &&
+         is_in_range(hour, 0, 23) &&
+         is_in_range(minute, 0, 59) &&
+         is_in_range(second, 0, 60) &&
+         is_in_range(millisecond, 0, 999);
+}
+
+}  // namespace base
diff --git a/base/time/time.h b/base/time/time.h
new file mode 100644
index 0000000..329dbd3
--- /dev/null
+++ b/base/time/time.h
@@ -0,0 +1,1000 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Time represents an absolute point in coordinated universal time (UTC),
+// internally represented as microseconds (s/1,000,000) since the Windows epoch
+// (1601-01-01 00:00:00 UTC). System-dependent clock interface routines are
+// defined in time_PLATFORM.cc. Note that values for Time may skew and jump
+// around as the operating system makes adjustments to synchronize (e.g., with
+// NTP servers). Thus, client code that uses the Time class must account for
+// this.
+//
+// TimeDelta represents a duration of time, internally represented in
+// microseconds.
+//
+// TimeTicks and ThreadTicks represent an abstract time that is most of the time
+// incrementing, for use in measuring time durations. Internally, they are
+// represented in microseconds. They cannot be converted to a human-readable
+// time, but are guaranteed not to decrease (unlike the Time class). Note that
+// TimeTicks may "stand still" (e.g., if the computer is suspended), and
+// ThreadTicks will "stand still" whenever the thread has been de-scheduled by
+// the operating system.
+//
+// All time classes are copyable, assignable, and occupy 64-bits per instance.
+// As a result, prefer passing them by value:
+//   void MyFunction(TimeDelta arg);
+// If circumstances require, you may also pass by const reference:
+//   void MyFunction(const TimeDelta& arg);  // Not preferred.
+//
+// Definitions of operator<< are provided to make these types work with
+// DCHECK_EQ() and other log macros. For human-readable formatting, see
+// "base/i18n/time_formatting.h".
+//
+// So many choices!  Which time class should you use?  Examples:
+//
+//   Time:        Interpreting the wall-clock time provided by a remote system.
+//                Detecting whether cached resources have expired. Providing the
+//                user with a display of the current date and time. Determining
+//                the amount of time between events across re-boots of the
+//                machine.
+//
+//   TimeTicks:   Tracking the amount of time a task runs. Executing delayed
+//                tasks at the right time. Computing presentation timestamps.
+//                Synchronizing audio and video using TimeTicks as a common
+//                reference clock (lip-sync). Measuring network round-trip
+//                latency.
+//
+//   ThreadTicks: Benchmarking how long the current thread has been doing actual
+//                work.
+
+#ifndef BASE_TIME_TIME_H_
+#define BASE_TIME_TIME_H_
+
+#include <stdint.h>
+#include <time.h>
+
+#include <iosfwd>
+#include <limits>
+
+#include "base/base_export.h"
+#include "base/compiler_specific.h"
+#include "base/logging.h"
+#include "base/numerics/safe_math.h"
+#include "build/build_config.h"
+
+#if defined(OS_FUCHSIA)
+#include <zircon/types.h>
+#endif
+
+#if defined(OS_MACOSX)
+#include <CoreFoundation/CoreFoundation.h>
+// Avoid Mac system header macro leak.
+#undef TYPE_BOOL
+#endif
+
+#if defined(OS_ANDROID)
+#include <jni.h>
+#endif
+
+#if defined(OS_POSIX) || defined(OS_FUCHSIA)
+#include <unistd.h>
+#include <sys/time.h>
+#endif
+
+#if defined(OS_WIN)
+#include "base/gtest_prod_util.h"
+#include "base/win/windows_types.h"
+#endif
+
+namespace base {
+
+class PlatformThreadHandle;
+class TimeDelta;
+
+// The functions in the time_internal namespace are meant to be used only by the
+// time classes and functions.  Please use the math operators defined in the
+// time classes instead.
+namespace time_internal {
+
+// Add or subtract |value| from a TimeDelta. The int64_t argument and return
+// value are in terms of a microsecond timebase.
+BASE_EXPORT int64_t SaturatedAdd(TimeDelta delta, int64_t value);
+BASE_EXPORT int64_t SaturatedSub(TimeDelta delta, int64_t value);
+
+}  // namespace time_internal
+
+// TimeDelta ------------------------------------------------------------------
+
+class BASE_EXPORT TimeDelta {
+ public:
+  constexpr TimeDelta() : delta_(0) {}
+
+  // Converts units of time to TimeDeltas.
+  static constexpr TimeDelta FromDays(int days);
+  static constexpr TimeDelta FromHours(int hours);
+  static constexpr TimeDelta FromMinutes(int minutes);
+  static constexpr TimeDelta FromSeconds(int64_t secs);
+  static constexpr TimeDelta FromMilliseconds(int64_t ms);
+  static constexpr TimeDelta FromMicroseconds(int64_t us);
+  static constexpr TimeDelta FromNanoseconds(int64_t ns);
+  static constexpr TimeDelta FromSecondsD(double secs);
+  static constexpr TimeDelta FromMillisecondsD(double ms);
+  static constexpr TimeDelta FromMicrosecondsD(double us);
+  static constexpr TimeDelta FromNanosecondsD(double ns);
+#if defined(OS_WIN)
+  static TimeDelta FromQPCValue(LONGLONG qpc_value);
+  static TimeDelta FromFileTime(FILETIME ft);
+#elif defined(OS_POSIX) || defined(OS_FUCHSIA)
+  static TimeDelta FromTimeSpec(const timespec& ts);
+#endif
+
+  // Converts an integer value representing TimeDelta to a class. This is used
+  // when deserializing a |TimeDelta| structure, using a value known to be
+  // compatible. It is not provided as a constructor because the integer type
+  // may be unclear from the perspective of a caller.
+  //
+  // DEPRECATED - Do not use in new code. http://crbug.com/634507
+  static constexpr TimeDelta FromInternalValue(int64_t delta) {
+    return TimeDelta(delta);
+  }
+
+  // Returns the maximum time delta, which should be greater than any reasonable
+  // time delta we might compare it to. Adding or subtracting the maximum time
+  // delta to a time or another time delta has an undefined result.
+  static constexpr TimeDelta Max();
+
+  // Returns the minimum time delta, which should be less than than any
+  // reasonable time delta we might compare it to. Adding or subtracting the
+  // minimum time delta to a time or another time delta has an undefined result.
+  static constexpr TimeDelta Min();
+
+  // Returns the internal numeric value of the TimeDelta object. Please don't
+  // use this and do arithmetic on it, as it is more error prone than using the
+  // provided operators.
+  // For serializing, use FromInternalValue to reconstitute.
+  //
+  // DEPRECATED - Do not use in new code. http://crbug.com/634507
+  constexpr int64_t ToInternalValue() const { return delta_; }
+
+  // Returns the magnitude (absolute value) of this TimeDelta.
+  constexpr TimeDelta magnitude() const {
+    // Some toolchains provide an incomplete C++11 implementation and lack an
+    // int64_t overload for std::abs().  The following is a simple branchless
+    // implementation:
+    const int64_t mask = delta_ >> (sizeof(delta_) * 8 - 1);
+    return TimeDelta((delta_ + mask) ^ mask);
+  }
+
+  // Returns true if the time delta is zero.
+  constexpr bool is_zero() const { return delta_ == 0; }
+
+  // Returns true if the time delta is the maximum/minimum time delta.
+  constexpr bool is_max() const {
+    return delta_ == std::numeric_limits<int64_t>::max();
+  }
+  constexpr bool is_min() const {
+    return delta_ == std::numeric_limits<int64_t>::min();
+  }
+
+#if defined(OS_POSIX) || defined(OS_FUCHSIA)
+  struct timespec ToTimeSpec() const;
+#endif
+
+  // Returns the time delta in some unit. The F versions return a floating
+  // point value, the "regular" versions return a rounded-down value.
+  //
+  // InMillisecondsRoundedUp() instead returns an integer that is rounded up
+  // to the next full millisecond.
+  int InDays() const;
+  int InHours() const;
+  int InMinutes() const;
+  double InSecondsF() const;
+  int64_t InSeconds() const;
+  double InMillisecondsF() const;
+  int64_t InMilliseconds() const;
+  int64_t InMillisecondsRoundedUp() const;
+  int64_t InMicroseconds() const;
+  double InMicrosecondsF() const;
+  int64_t InNanoseconds() const;
+
+  constexpr TimeDelta& operator=(TimeDelta other) {
+    delta_ = other.delta_;
+    return *this;
+  }
+
+  // Computations with other deltas. Can easily be made constexpr with C++17 but
+  // hard to do until then per limitations around
+  // __builtin_(add|sub)_overflow in safe_math_clang_gcc_impl.h :
+  // https://chromium-review.googlesource.com/c/chromium/src/+/873352#message-59594ab70827795a67e0780404adf37b4b6c2f14
+  TimeDelta operator+(TimeDelta other) const {
+    return TimeDelta(time_internal::SaturatedAdd(*this, other.delta_));
+  }
+  TimeDelta operator-(TimeDelta other) const {
+    return TimeDelta(time_internal::SaturatedSub(*this, other.delta_));
+  }
+
+  TimeDelta& operator+=(TimeDelta other) {
+    return *this = (*this + other);
+  }
+  TimeDelta& operator-=(TimeDelta other) {
+    return *this = (*this - other);
+  }
+  constexpr TimeDelta operator-() const { return TimeDelta(-delta_); }
+
+  // Computations with numeric types. operator*() isn't constexpr because of a
+  // limitation around __builtin_mul_overflow (but operator/(1.0/a) works for
+  // |a|'s of "reasonable" size -- i.e. that don't risk overflow).
+  template <typename T>
+  TimeDelta operator*(T a) const {
+    CheckedNumeric<int64_t> rv(delta_);
+    rv *= a;
+    if (rv.IsValid())
+      return TimeDelta(rv.ValueOrDie());
+    // Matched sign overflows. Mismatched sign underflows.
+    if ((delta_ < 0) ^ (a < 0))
+      return TimeDelta(std::numeric_limits<int64_t>::min());
+    return TimeDelta(std::numeric_limits<int64_t>::max());
+  }
+  template <typename T>
+  constexpr TimeDelta operator/(T a) const {
+    CheckedNumeric<int64_t> rv(delta_);
+    rv /= a;
+    if (rv.IsValid())
+      return TimeDelta(rv.ValueOrDie());
+    // Matched sign overflows. Mismatched sign underflows.
+    // Special case to catch divide by zero.
+    if ((delta_ < 0) ^ (a <= 0))
+      return TimeDelta(std::numeric_limits<int64_t>::min());
+    return TimeDelta(std::numeric_limits<int64_t>::max());
+  }
+  template <typename T>
+  TimeDelta& operator*=(T a) {
+    return *this = (*this * a);
+  }
+  template <typename T>
+  constexpr TimeDelta& operator/=(T a) {
+    return *this = (*this / a);
+  }
+
+  constexpr int64_t operator/(TimeDelta a) const { return delta_ / a.delta_; }
+  constexpr TimeDelta operator%(TimeDelta a) const {
+    return TimeDelta(delta_ % a.delta_);
+  }
+
+  // Comparison operators.
+  constexpr bool operator==(TimeDelta other) const {
+    return delta_ == other.delta_;
+  }
+  constexpr bool operator!=(TimeDelta other) const {
+    return delta_ != other.delta_;
+  }
+  constexpr bool operator<(TimeDelta other) const {
+    return delta_ < other.delta_;
+  }
+  constexpr bool operator<=(TimeDelta other) const {
+    return delta_ <= other.delta_;
+  }
+  constexpr bool operator>(TimeDelta other) const {
+    return delta_ > other.delta_;
+  }
+  constexpr bool operator>=(TimeDelta other) const {
+    return delta_ >= other.delta_;
+  }
+
+#if defined(OS_WIN)
+  // This works around crbug.com/635974
+  constexpr TimeDelta(const TimeDelta& other) : delta_(other.delta_) {}
+#endif
+
+ private:
+  friend int64_t time_internal::SaturatedAdd(TimeDelta delta, int64_t value);
+  friend int64_t time_internal::SaturatedSub(TimeDelta delta, int64_t value);
+
+  // Constructs a delta given the duration in microseconds. This is private
+  // to avoid confusion by callers with an integer constructor. Use
+  // FromSeconds, FromMilliseconds, etc. instead.
+  constexpr explicit TimeDelta(int64_t delta_us) : delta_(delta_us) {}
+
+  // Private method to build a delta from a double.
+  static constexpr TimeDelta FromDouble(double value);
+
+  // Private method to build a delta from the product of a user-provided value
+  // and a known-positive value.
+  static constexpr TimeDelta FromProduct(int64_t value, int64_t positive_value);
+
+  // Delta in microseconds.
+  int64_t delta_;
+};
+
+template <typename T>
+TimeDelta operator*(T a, TimeDelta td) {
+  return td * a;
+}
+
+// For logging use only.
+BASE_EXPORT std::ostream& operator<<(std::ostream& os, TimeDelta time_delta);
+
+// Do not reference the time_internal::TimeBase template class directly.  Please
+// use one of the time subclasses instead, and only reference the public
+// TimeBase members via those classes.
+namespace time_internal {
+
+// TimeBase--------------------------------------------------------------------
+
+// Provides value storage and comparison/math operations common to all time
+// classes. Each subclass provides for strong type-checking to ensure
+// semantically meaningful comparison/math of time values from the same clock
+// source or timeline.
+template<class TimeClass>
+class TimeBase {
+ public:
+  static const int64_t kHoursPerDay = 24;
+  static const int64_t kMillisecondsPerSecond = 1000;
+  static const int64_t kMillisecondsPerDay =
+      kMillisecondsPerSecond * 60 * 60 * kHoursPerDay;
+  static const int64_t kMicrosecondsPerMillisecond = 1000;
+  static const int64_t kMicrosecondsPerSecond =
+      kMicrosecondsPerMillisecond * kMillisecondsPerSecond;
+  static const int64_t kMicrosecondsPerMinute = kMicrosecondsPerSecond * 60;
+  static const int64_t kMicrosecondsPerHour = kMicrosecondsPerMinute * 60;
+  static const int64_t kMicrosecondsPerDay =
+      kMicrosecondsPerHour * kHoursPerDay;
+  static const int64_t kMicrosecondsPerWeek = kMicrosecondsPerDay * 7;
+  static const int64_t kNanosecondsPerMicrosecond = 1000;
+  static const int64_t kNanosecondsPerSecond =
+      kNanosecondsPerMicrosecond * kMicrosecondsPerSecond;
+
+  // Returns true if this object has not been initialized.
+  //
+  // Warning: Be careful when writing code that performs math on time values,
+  // since it's possible to produce a valid "zero" result that should not be
+  // interpreted as a "null" value.
+  bool is_null() const {
+    return us_ == 0;
+  }
+
+  // Returns true if this object represents the maximum/minimum time.
+  bool is_max() const { return us_ == std::numeric_limits<int64_t>::max(); }
+  bool is_min() const { return us_ == std::numeric_limits<int64_t>::min(); }
+
+  // Returns the maximum/minimum times, which should be greater/less than than
+  // any reasonable time with which we might compare it.
+  static TimeClass Max() {
+    return TimeClass(std::numeric_limits<int64_t>::max());
+  }
+
+  static TimeClass Min() {
+    return TimeClass(std::numeric_limits<int64_t>::min());
+  }
+
+  // For serializing only. Use FromInternalValue() to reconstitute. Please don't
+  // use this and do arithmetic on it, as it is more error prone than using the
+  // provided operators.
+  //
+  // DEPRECATED - Do not use in new code. For serializing Time values, prefer
+  // Time::ToDeltaSinceWindowsEpoch().InMicroseconds(). http://crbug.com/634507
+  int64_t ToInternalValue() const { return us_; }
+
+  // The amount of time since the origin (or "zero") point. This is a syntactic
+  // convenience to aid in code readability, mainly for debugging/testing use
+  // cases.
+  //
+  // Warning: While the Time subclass has a fixed origin point, the origin for
+  // the other subclasses can vary each time the application is restarted.
+  TimeDelta since_origin() const { return TimeDelta::FromMicroseconds(us_); }
+
+  TimeClass& operator=(TimeClass other) {
+    us_ = other.us_;
+    return *(static_cast<TimeClass*>(this));
+  }
+
+  // Compute the difference between two times.
+  TimeDelta operator-(TimeClass other) const {
+    return TimeDelta::FromMicroseconds(us_ - other.us_);
+  }
+
+  // Return a new time modified by some delta.
+  TimeClass operator+(TimeDelta delta) const {
+    return TimeClass(time_internal::SaturatedAdd(delta, us_));
+  }
+  TimeClass operator-(TimeDelta delta) const {
+    return TimeClass(-time_internal::SaturatedSub(delta, us_));
+  }
+
+  // Modify by some time delta.
+  TimeClass& operator+=(TimeDelta delta) {
+    return static_cast<TimeClass&>(*this = (*this + delta));
+  }
+  TimeClass& operator-=(TimeDelta delta) {
+    return static_cast<TimeClass&>(*this = (*this - delta));
+  }
+
+  // Comparison operators
+  bool operator==(TimeClass other) const {
+    return us_ == other.us_;
+  }
+  bool operator!=(TimeClass other) const {
+    return us_ != other.us_;
+  }
+  bool operator<(TimeClass other) const {
+    return us_ < other.us_;
+  }
+  bool operator<=(TimeClass other) const {
+    return us_ <= other.us_;
+  }
+  bool operator>(TimeClass other) const {
+    return us_ > other.us_;
+  }
+  bool operator>=(TimeClass other) const {
+    return us_ >= other.us_;
+  }
+
+ protected:
+  constexpr explicit TimeBase(int64_t us) : us_(us) {}
+
+  // Time value in a microsecond timebase.
+  int64_t us_;
+};
+
+}  // namespace time_internal
+
+template<class TimeClass>
+inline TimeClass operator+(TimeDelta delta, TimeClass t) {
+  return t + delta;
+}
+
+// Time -----------------------------------------------------------------------
+
+// Represents a wall clock time in UTC. Values are not guaranteed to be
+// monotonically non-decreasing and are subject to large amounts of skew.
+class BASE_EXPORT Time : public time_internal::TimeBase<Time> {
+ public:
+  // Offset of UNIX epoch (1970-01-01 00:00:00 UTC) from Windows FILETIME epoch
+  // (1601-01-01 00:00:00 UTC), in microseconds. This value is derived from the
+  // following: ((1970-1601)*365+89)*24*60*60*1000*1000, where 89 is the number
+  // of leap year days between 1601 and 1970: (1970-1601)/4 excluding 1700,
+  // 1800, and 1900.
+  static constexpr int64_t kTimeTToMicrosecondsOffset =
+      INT64_C(11644473600000000);
+
+#if defined(OS_WIN)
+  // To avoid overflow in QPC to Microseconds calculations, since we multiply
+  // by kMicrosecondsPerSecond, then the QPC value should not exceed
+  // (2^63 - 1) / 1E6. If it exceeds that threshold, we divide then multiply.
+  static constexpr int64_t kQPCOverflowThreshold = INT64_C(0x8637BD05AF7);
+#endif
+
+// kExplodedMinYear and kExplodedMaxYear define the platform-specific limits
+// for values passed to FromUTCExploded() and FromLocalExploded(). Those
+// functions will return false if passed values outside these limits. The limits
+// are inclusive, meaning that the API should support all dates within a given
+// limit year.
+#if defined(OS_WIN)
+  static constexpr int kExplodedMinYear = 1601;
+  static constexpr int kExplodedMaxYear = 30827;
+#elif defined(OS_IOS)
+  static constexpr int kExplodedMinYear = std::numeric_limits<int>::min();
+  static constexpr int kExplodedMaxYear = std::numeric_limits<int>::max();
+#elif defined(OS_MACOSX)
+  static constexpr int kExplodedMinYear = 1902;
+  static constexpr int kExplodedMaxYear = std::numeric_limits<int>::max();
+#elif defined(OS_ANDROID)
+  // Though we use 64-bit time APIs on both 32 and 64 bit Android, some OS
+  // versions like KitKat (ARM but not x86 emulator) can't handle some early
+  // dates (e.g. before 1170). So we set min conservatively here.
+  static constexpr int kExplodedMinYear = 1902;
+  static constexpr int kExplodedMaxYear = std::numeric_limits<int>::max();
+#else
+  static constexpr int kExplodedMinYear =
+      (sizeof(time_t) == 4 ? 1902 : std::numeric_limits<int>::min());
+  static constexpr int kExplodedMaxYear =
+      (sizeof(time_t) == 4 ? 2037 : std::numeric_limits<int>::max());
+#endif
+
+  // Represents an exploded time that can be formatted nicely. This is kind of
+  // like the Win32 SYSTEMTIME structure or the Unix "struct tm" with a few
+  // additions and changes to prevent errors.
+  struct BASE_EXPORT Exploded {
+    int year;          // Four digit year "2007"
+    int month;         // 1-based month (values 1 = January, etc.)
+    int day_of_week;   // 0-based day of week (0 = Sunday, etc.)
+    int day_of_month;  // 1-based day of month (1-31)
+    int hour;          // Hour within the current day (0-23)
+    int minute;        // Minute within the current hour (0-59)
+    int second;        // Second within the current minute (0-59 plus leap
+                       //   seconds which may take it up to 60).
+    int millisecond;   // Milliseconds within the current second (0-999)
+
+    // A cursory test for whether the data members are within their
+    // respective ranges. A 'true' return value does not guarantee the
+    // Exploded value can be successfully converted to a Time value.
+    bool HasValidValues() const;
+  };
+
+  // Contains the NULL time. Use Time::Now() to get the current time.
+  constexpr Time() : TimeBase(0) {}
+
+  // Returns the time for epoch in Unix-like system (Jan 1, 1970).
+  static Time UnixEpoch();
+
+  // Returns the current time. Watch out, the system might adjust its clock
+  // in which case time will actually go backwards. We don't guarantee that
+  // times are increasing, or that two calls to Now() won't be the same.
+  static Time Now();
+
+  // Returns the current time. Same as Now() except that this function always
+  // uses system time so that there are no discrepancies between the returned
+  // time and system time even on virtual environments including our test bot.
+  // For timing sensitive unittests, this function should be used.
+  static Time NowFromSystemTime();
+
+  // Converts to/from TimeDeltas relative to the Windows epoch (1601-01-01
+  // 00:00:00 UTC). Prefer these methods for opaque serialization and
+  // deserialization of time values, e.g.
+  //
+  //   // Serialization:
+  //   base::Time last_updated = ...;
+  //   SaveToDatabase(last_updated.ToDeltaSinceWindowsEpoch().InMicroseconds());
+  //
+  //   // Deserialization:
+  //   base::Time last_updated = base::Time::FromDeltaSinceWindowsEpoch(
+  //       base::TimeDelta::FromMicroseconds(LoadFromDatabase()));
+  static Time FromDeltaSinceWindowsEpoch(TimeDelta delta);
+  TimeDelta ToDeltaSinceWindowsEpoch() const;
+
+  // Converts to/from time_t in UTC and a Time class.
+  static Time FromTimeT(time_t tt);
+  time_t ToTimeT() const;
+
+  // Converts time to/from a double which is the number of seconds since epoch
+  // (Jan 1, 1970).  Webkit uses this format to represent time.
+  // Because WebKit initializes double time value to 0 to indicate "not
+  // initialized", we map it to empty Time object that also means "not
+  // initialized".
+  static Time FromDoubleT(double dt);
+  double ToDoubleT() const;
+
+#if defined(OS_POSIX) || defined(OS_FUCHSIA)
+  // Converts the timespec structure to time. MacOS X 10.8.3 (and tentatively,
+  // earlier versions) will have the |ts|'s tv_nsec component zeroed out,
+  // having a 1 second resolution, which agrees with
+  // https://developer.apple.com/legacy/library/#technotes/tn/tn1150.html#HFSPlusDates.
+  static Time FromTimeSpec(const timespec& ts);
+#endif
+
+  // Converts to/from the Javascript convention for times, a number of
+  // milliseconds since the epoch:
+  // https://developer.mozilla.org/en/JavaScript/Reference/Global_Objects/Date/getTime.
+  static Time FromJsTime(double ms_since_epoch);
+  double ToJsTime() const;
+
+  // Converts to/from Java convention for times, a number of milliseconds since
+  // the epoch. Because the Java format has less resolution, converting to Java
+  // time is a lossy operation.
+  static Time FromJavaTime(int64_t ms_since_epoch);
+  int64_t ToJavaTime() const;
+
+#if defined(OS_POSIX) || defined(OS_FUCHSIA)
+  static Time FromTimeVal(struct timeval t);
+  struct timeval ToTimeVal() const;
+#endif
+
+#if defined(OS_MACOSX)
+  static Time FromCFAbsoluteTime(CFAbsoluteTime t);
+  CFAbsoluteTime ToCFAbsoluteTime() const;
+#endif
+
+#if defined(OS_WIN)
+  static Time FromFileTime(FILETIME ft);
+  FILETIME ToFileTime() const;
+
+  // The minimum time of a low resolution timer.  This is basically a windows
+  // constant of ~15.6ms.  While it does vary on some older OS versions, we'll
+  // treat it as static across all windows versions.
+  static const int kMinLowResolutionThresholdMs = 16;
+
+  // Enable or disable Windows high resolution timer.
+  static void EnableHighResolutionTimer(bool enable);
+
+  // Activates or deactivates the high resolution timer based on the |activate|
+  // flag.  If the HighResolutionTimer is not Enabled (see
+  // EnableHighResolutionTimer), this function will return false.  Otherwise
+  // returns true.  Each successful activate call must be paired with a
+  // subsequent deactivate call.
+  // All callers to activate the high resolution timer must eventually call
+  // this function to deactivate the high resolution timer.
+  static bool ActivateHighResolutionTimer(bool activate);
+
+  // Returns true if the high resolution timer is both enabled and activated.
+  // This is provided for testing only, and is not tracked in a thread-safe
+  // way.
+  static bool IsHighResolutionTimerInUse();
+
+  // The following two functions are used to report the fraction of elapsed time
+  // that the high resolution timer is activated.
+  // ResetHighResolutionTimerUsage() resets the cumulative usage and starts the
+  // measurement interval and GetHighResolutionTimerUsage() returns the
+  // percentage of time since the reset that the high resolution timer was
+  // activated.
+  // ResetHighResolutionTimerUsage() must be called at least once before calling
+  // GetHighResolutionTimerUsage(); otherwise the usage result would be
+  // undefined.
+  static void ResetHighResolutionTimerUsage();
+  static double GetHighResolutionTimerUsage();
+#endif  // defined(OS_WIN)
+
+  // Converts an exploded structure representing either the local time or UTC
+  // into a Time class. Returns false on a failure when, for example, a day of
+  // month is set to 31 on a 28-30 day month. Returns Time(0) on overflow.
+  static bool FromUTCExploded(const Exploded& exploded,
+                              Time* time) WARN_UNUSED_RESULT {
+    return FromExploded(false, exploded, time);
+  }
+  static bool FromLocalExploded(const Exploded& exploded,
+                                Time* time) WARN_UNUSED_RESULT {
+    return FromExploded(true, exploded, time);
+  }
+
+  // Converts a string representation of time to a Time object.
+  // An example of a time string which is converted is as below:-
+  // "Tue, 15 Nov 1994 12:45:26 GMT". If the timezone is not specified
+  // in the input string, FromString assumes local time and FromUTCString
+  // assumes UTC. A timezone that cannot be parsed (e.g. "UTC" which is not
+  // specified in RFC822) is treated as if the timezone is not specified.
+  // TODO(iyengar) Move the FromString/FromTimeT/ToTimeT/FromFileTime to
+  // a new time converter class.
+  static bool FromString(const char* time_string,
+                         Time* parsed_time) WARN_UNUSED_RESULT {
+    return FromStringInternal(time_string, true, parsed_time);
+  }
+  static bool FromUTCString(const char* time_string,
+                            Time* parsed_time) WARN_UNUSED_RESULT {
+    return FromStringInternal(time_string, false, parsed_time);
+  }
+
+  // Fills the given exploded structure with either the local time or UTC from
+  // this time structure (containing UTC).
+  void UTCExplode(Exploded* exploded) const {
+    return Explode(false, exploded);
+  }
+  void LocalExplode(Exploded* exploded) const {
+    return Explode(true, exploded);
+  }
+
+  // Rounds this time down to the nearest day in local time. It will represent
+  // midnight on that day.
+  Time LocalMidnight() const;
+
+  // Converts an integer value representing Time to a class. This may be used
+  // when deserializing a |Time| structure, using a value known to be
+  // compatible. It is not provided as a constructor because the integer type
+  // may be unclear from the perspective of a caller.
+  //
+  // DEPRECATED - Do not use in new code. For deserializing Time values, prefer
+  // Time::FromDeltaSinceWindowsEpoch(). http://crbug.com/634507
+  static constexpr Time FromInternalValue(int64_t us) { return Time(us); }
+
+ private:
+  friend class time_internal::TimeBase<Time>;
+
+  constexpr explicit Time(int64_t us) : TimeBase(us) {}
+
+  // Explodes the given time to either local time |is_local = true| or UTC
+  // |is_local = false|.
+  void Explode(bool is_local, Exploded* exploded) const;
+
+  // Unexplodes a given time assuming the source is either local time
+  // |is_local = true| or UTC |is_local = false|. Function returns false on
+  // failure and sets |time| to Time(0). Otherwise returns true and sets |time|
+  // to non-exploded time.
+  static bool FromExploded(bool is_local,
+                           const Exploded& exploded,
+                           Time* time) WARN_UNUSED_RESULT;
+
+  // Converts a string representation of time to a Time object.
+  // An example of a time string which is converted is as below:-
+  // "Tue, 15 Nov 1994 12:45:26 GMT". If the timezone is not specified
+  // in the input string, local time |is_local = true| or
+  // UTC |is_local = false| is assumed. A timezone that cannot be parsed
+  // (e.g. "UTC" which is not specified in RFC822) is treated as if the
+  // timezone is not specified.
+  static bool FromStringInternal(const char* time_string,
+                                 bool is_local,
+                                 Time* parsed_time) WARN_UNUSED_RESULT;
+
+  // Comparison does not consider |day_of_week| when doing the operation.
+  static bool ExplodedMostlyEquals(const Exploded& lhs,
+                                   const Exploded& rhs) WARN_UNUSED_RESULT;
+};
+
+// static
+constexpr TimeDelta TimeDelta::FromDays(int days) {
+  return days == std::numeric_limits<int>::max()
+             ? Max()
+             : TimeDelta(days * Time::kMicrosecondsPerDay);
+}
+
+// static
+constexpr TimeDelta TimeDelta::FromHours(int hours) {
+  return hours == std::numeric_limits<int>::max()
+             ? Max()
+             : TimeDelta(hours * Time::kMicrosecondsPerHour);
+}
+
+// static
+constexpr TimeDelta TimeDelta::FromMinutes(int minutes) {
+  return minutes == std::numeric_limits<int>::max()
+             ? Max()
+             : TimeDelta(minutes * Time::kMicrosecondsPerMinute);
+}
+
+// static
+constexpr TimeDelta TimeDelta::FromSeconds(int64_t secs) {
+  return FromProduct(secs, Time::kMicrosecondsPerSecond);
+}
+
+// static
+constexpr TimeDelta TimeDelta::FromMilliseconds(int64_t ms) {
+  return FromProduct(ms, Time::kMicrosecondsPerMillisecond);
+}
+
+// static
+constexpr TimeDelta TimeDelta::FromMicroseconds(int64_t us) {
+  return TimeDelta(us);
+}
+
+// static
+constexpr TimeDelta TimeDelta::FromNanoseconds(int64_t ns) {
+  return TimeDelta(ns / Time::kNanosecondsPerMicrosecond);
+}
+
+// static
+constexpr TimeDelta TimeDelta::FromSecondsD(double secs) {
+  return FromDouble(secs * Time::kMicrosecondsPerSecond);
+}
+
+// static
+constexpr TimeDelta TimeDelta::FromMillisecondsD(double ms) {
+  return FromDouble(ms * Time::kMicrosecondsPerMillisecond);
+}
+
+// static
+constexpr TimeDelta TimeDelta::FromMicrosecondsD(double us) {
+  return FromDouble(us);
+}
+
+// static
+constexpr TimeDelta TimeDelta::FromNanosecondsD(double ns) {
+  return FromDouble(ns / Time::kNanosecondsPerMicrosecond);
+}
+
+// static
+constexpr TimeDelta TimeDelta::Max() {
+  return TimeDelta(std::numeric_limits<int64_t>::max());
+}
+
+// static
+constexpr TimeDelta TimeDelta::Min() {
+  return TimeDelta(std::numeric_limits<int64_t>::min());
+}
+
+// static
+constexpr TimeDelta TimeDelta::FromDouble(double value) {
+  // TODO(crbug.com/612601): Use saturated_cast<int64_t>(value) once we sort out
+  // the Min() behavior.
+  return value > std::numeric_limits<int64_t>::max()
+             ? Max()
+             : value < std::numeric_limits<int64_t>::min()
+                   ? Min()
+                   : TimeDelta(static_cast<int64_t>(value));
+}
+
+// static
+constexpr TimeDelta TimeDelta::FromProduct(int64_t value,
+                                           int64_t positive_value) {
+  DCHECK(positive_value > 0);
+  return value > std::numeric_limits<int64_t>::max() / positive_value
+             ? Max()
+             : value < std::numeric_limits<int64_t>::min() / positive_value
+                   ? Min()
+                   : TimeDelta(value * positive_value);
+}
+
+// For logging use only.
+BASE_EXPORT std::ostream& operator<<(std::ostream& os, Time time);
+
+// TimeTicks ------------------------------------------------------------------
+
+// Represents monotonically non-decreasing clock time.
+class BASE_EXPORT TimeTicks : public time_internal::TimeBase<TimeTicks> {
+ public:
+  // The underlying clock used to generate new TimeTicks.
+  enum class Clock {
+    FUCHSIA_ZX_CLOCK_MONOTONIC,
+    LINUX_CLOCK_MONOTONIC,
+    IOS_CF_ABSOLUTE_TIME_MINUS_KERN_BOOTTIME,
+    MAC_MACH_ABSOLUTE_TIME,
+    WIN_QPC,
+    WIN_ROLLOVER_PROTECTED_TIME_GET_TIME
+  };
+
+  constexpr TimeTicks() : TimeBase(0) {}
+
+  // Platform-dependent tick count representing "right now." When
+  // IsHighResolution() returns false, the resolution of the clock could be
+  // as coarse as ~15.6ms. Otherwise, the resolution should be no worse than one
+  // microsecond.
+  static TimeTicks Now();
+
+  // Returns true if the high resolution clock is working on this system and
+  // Now() will return high resolution values. Note that, on systems where the
+  // high resolution clock works but is deemed inefficient, the low resolution
+  // clock will be used instead.
+  static bool IsHighResolution() WARN_UNUSED_RESULT;
+
+  // Returns true if TimeTicks is consistent across processes, meaning that
+  // timestamps taken on different processes can be safely compared with one
+  // another. (Note that, even on platforms where this returns true, time values
+  // from different threads that are within one tick of each other must be
+  // considered to have an ambiguous ordering.)
+  static bool IsConsistentAcrossProcesses() WARN_UNUSED_RESULT;
+
+#if defined(OS_FUCHSIA)
+  // Converts between TimeTicks and an ZX_CLOCK_MONOTONIC zx_time_t value.
+  static TimeTicks FromZxTime(zx_time_t nanos_since_boot);
+  zx_time_t ToZxTime() const;
+#endif
+
+#if defined(OS_WIN)
+  // Translates an absolute QPC timestamp into a TimeTicks value. The returned
+  // value has the same origin as Now(). Do NOT attempt to use this if
+  // IsHighResolution() returns false.
+  static TimeTicks FromQPCValue(LONGLONG qpc_value);
+#endif
+
+#if defined(OS_MACOSX) && !defined(OS_IOS)
+  static TimeTicks FromMachAbsoluteTime(uint64_t mach_absolute_time);
+#endif  // defined(OS_MACOSX) && !defined(OS_IOS)
+
+#if defined(OS_ANDROID)
+  // Converts to TimeTicks the value obtained from SystemClock.uptimeMillis().
+  // Note: this convertion may be non-monotonic in relation to previously
+  // obtained TimeTicks::Now() values because of the truncation (to
+  // milliseconds) performed by uptimeMillis().
+  static TimeTicks FromUptimeMillis(jlong uptime_millis_value);
+#endif
+
+  // Get an estimate of the TimeTick value at the time of the UnixEpoch. Because
+  // Time and TimeTicks respond differently to user-set time and NTP
+  // adjustments, this number is only an estimate. Nevertheless, this can be
+  // useful when you need to relate the value of TimeTicks to a real time and
+  // date. Note: Upon first invocation, this function takes a snapshot of the
+  // realtime clock to establish a reference point.  This function will return
+  // the same value for the duration of the application, but will be different
+  // in future application runs.
+  static TimeTicks UnixEpoch();
+
+  // Returns |this| snapped to the next tick, given a |tick_phase| and
+  // repeating |tick_interval| in both directions. |this| may be before,
+  // after, or equal to the |tick_phase|.
+  TimeTicks SnappedToNextTick(TimeTicks tick_phase,
+                              TimeDelta tick_interval) const;
+
+  // Returns an enum indicating the underlying clock being used to generate
+  // TimeTicks timestamps. This function should only be used for debugging and
+  // logging purposes.
+  static Clock GetClock();
+
+  // Converts an integer value representing TimeTicks to a class. This may be
+  // used when deserializing a |TimeTicks| structure, using a value known to be
+  // compatible. It is not provided as a constructor because the integer type
+  // may be unclear from the perspective of a caller.
+  //
+  // DEPRECATED - Do not use in new code. For deserializing TimeTicks values,
+  // prefer TimeTicks + TimeDelta(). http://crbug.com/634507
+  static constexpr TimeTicks FromInternalValue(int64_t us) {
+    return TimeTicks(us);
+  }
+
+#if defined(OS_WIN)
+ protected:
+  typedef DWORD (*TickFunctionType)(void);
+  static TickFunctionType SetMockTickFunction(TickFunctionType ticker);
+#endif
+
+ private:
+  friend class time_internal::TimeBase<TimeTicks>;
+
+  // Please use Now() to create a new object. This is for internal use
+  // and testing.
+  constexpr explicit TimeTicks(int64_t us) : TimeBase(us) {}
+};
+
+// For logging use only.
+BASE_EXPORT std::ostream& operator<<(std::ostream& os, TimeTicks time_ticks);
+
+// ThreadTicks ----------------------------------------------------------------
+
+// Represents a clock, specific to a particular thread, than runs only while the
+// thread is running.
+class BASE_EXPORT ThreadTicks : public time_internal::TimeBase<ThreadTicks> {
+ public:
+  ThreadTicks() : TimeBase(0) {
+  }
+
+  // Returns true if ThreadTicks::Now() is supported on this system.
+  static bool IsSupported() WARN_UNUSED_RESULT {
+#if (defined(_POSIX_THREAD_CPUTIME) && (_POSIX_THREAD_CPUTIME >= 0)) || \
+    (defined(OS_MACOSX) && !defined(OS_IOS)) || defined(OS_ANDROID) ||  \
+    defined(OS_FUCHSIA)
+    return true;
+#elif defined(OS_WIN)
+    return IsSupportedWin();
+#else
+    return false;
+#endif
+  }
+
+  // Waits until the initialization is completed. Needs to be guarded with a
+  // call to IsSupported().
+  static void WaitUntilInitialized() {
+#if defined(OS_WIN)
+    WaitUntilInitializedWin();
+#endif
+  }
+
+  // Returns thread-specific CPU-time on systems that support this feature.
+  // Needs to be guarded with a call to IsSupported(). Use this timer
+  // to (approximately) measure how much time the calling thread spent doing
+  // actual work vs. being de-scheduled. May return bogus results if the thread
+  // migrates to another CPU between two calls. Returns an empty ThreadTicks
+  // object until the initialization is completed. If a clock reading is
+  // absolutely needed, call WaitUntilInitialized() before this method.
+  static ThreadTicks Now();
+
+#if defined(OS_WIN)
+  // Similar to Now() above except this returns thread-specific CPU time for an
+  // arbitrary thread. All comments for Now() method above apply apply to this
+  // method as well.
+  static ThreadTicks GetForThread(const PlatformThreadHandle& thread_handle);
+#endif
+
+  // Converts an integer value representing ThreadTicks to a class. This may be
+  // used when deserializing a |ThreadTicks| structure, using a value known to
+  // be compatible. It is not provided as a constructor because the integer type
+  // may be unclear from the perspective of a caller.
+  //
+  // DEPRECATED - Do not use in new code. For deserializing ThreadTicks values,
+  // prefer ThreadTicks + TimeDelta(). http://crbug.com/634507
+  static constexpr ThreadTicks FromInternalValue(int64_t us) {
+    return ThreadTicks(us);
+  }
+
+ private:
+  friend class time_internal::TimeBase<ThreadTicks>;
+
+  // Please use Now() or GetForThread() to create a new object. This is for
+  // internal use and testing.
+  constexpr explicit ThreadTicks(int64_t us) : TimeBase(us) {}
+
+#if defined(OS_WIN)
+  FRIEND_TEST_ALL_PREFIXES(TimeTicks, TSCTicksPerSecond);
+
+  // Returns the frequency of the TSC in ticks per second, or 0 if it hasn't
+  // been measured yet. Needs to be guarded with a call to IsSupported().
+  // This method is declared here rather than in the anonymous namespace to
+  // allow testing.
+  static double TSCTicksPerSecond();
+
+  static bool IsSupportedWin() WARN_UNUSED_RESULT;
+  static void WaitUntilInitializedWin();
+#endif
+};
+
+// For logging use only.
+BASE_EXPORT std::ostream& operator<<(std::ostream& os, ThreadTicks time_ticks);
+
+}  // namespace base
+
+#endif  // BASE_TIME_TIME_H_
diff --git a/base/time/time_android.cc b/base/time/time_android.cc
new file mode 100644
index 0000000..e0c4914
--- /dev/null
+++ b/base/time/time_android.cc
@@ -0,0 +1,26 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/time/time.h"
+
+namespace base {
+
+// static
+TimeTicks TimeTicks::FromUptimeMillis(jlong uptime_millis_value) {
+  // The implementation of the SystemClock.uptimeMillis() in AOSP uses the same
+  // clock as base::TimeTicks::Now(): clock_gettime(CLOCK_MONOTONIC), see in
+  // platform/system/code:
+  // 1. libutils/SystemClock.cpp
+  // 2. libutils/Timers.cpp
+  //
+  // We are not aware of any motivations for Android OEMs to modify the AOSP
+  // implementation of either uptimeMillis() or clock_gettime(CLOCK_MONOTONIC),
+  // so we assume that there are no such customizations.
+  //
+  // Under these assumptions the conversion is as safe as copying the value of
+  // base::TimeTicks::Now() with a loss of sub-millisecond precision.
+  return TimeTicks(uptime_millis_value * Time::kMicrosecondsPerMillisecond);
+}
+
+}  // namespace base
diff --git a/base/time/time_conversion_posix.cc b/base/time/time_conversion_posix.cc
new file mode 100644
index 0000000..ba0a2b2
--- /dev/null
+++ b/base/time/time_conversion_posix.cc
@@ -0,0 +1,67 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/time/time.h"
+
+#include <stdint.h>
+#include <sys/time.h>
+#include <time.h>
+
+#include <limits>
+
+#include "base/logging.h"
+
+namespace base {
+
+// static
+TimeDelta TimeDelta::FromTimeSpec(const timespec& ts) {
+  return TimeDelta(ts.tv_sec * Time::kMicrosecondsPerSecond +
+                   ts.tv_nsec / Time::kNanosecondsPerMicrosecond);
+}
+
+struct timespec TimeDelta::ToTimeSpec() const {
+  int64_t microseconds = InMicroseconds();
+  time_t seconds = 0;
+  if (microseconds >= Time::kMicrosecondsPerSecond) {
+    seconds = InSeconds();
+    microseconds -= seconds * Time::kMicrosecondsPerSecond;
+  }
+  struct timespec result = {
+      seconds,
+      static_cast<long>(microseconds * Time::kNanosecondsPerMicrosecond)};
+  return result;
+}
+
+// static
+Time Time::FromTimeVal(struct timeval t) {
+  DCHECK_LT(t.tv_usec, static_cast<int>(Time::kMicrosecondsPerSecond));
+  DCHECK_GE(t.tv_usec, 0);
+  if (t.tv_usec == 0 && t.tv_sec == 0)
+    return Time();
+  if (t.tv_usec == static_cast<suseconds_t>(Time::kMicrosecondsPerSecond) - 1 &&
+      t.tv_sec == std::numeric_limits<time_t>::max())
+    return Max();
+  return Time((static_cast<int64_t>(t.tv_sec) * Time::kMicrosecondsPerSecond) +
+              t.tv_usec + kTimeTToMicrosecondsOffset);
+}
+
+struct timeval Time::ToTimeVal() const {
+  struct timeval result;
+  if (is_null()) {
+    result.tv_sec = 0;
+    result.tv_usec = 0;
+    return result;
+  }
+  if (is_max()) {
+    result.tv_sec = std::numeric_limits<time_t>::max();
+    result.tv_usec = static_cast<suseconds_t>(Time::kMicrosecondsPerSecond) - 1;
+    return result;
+  }
+  int64_t us = us_ - kTimeTToMicrosecondsOffset;
+  result.tv_sec = us / Time::kMicrosecondsPerSecond;
+  result.tv_usec = us % Time::kMicrosecondsPerSecond;
+  return result;
+}
+
+}  // namespace base
diff --git a/base/time/time_exploded_posix.cc b/base/time/time_exploded_posix.cc
new file mode 100644
index 0000000..627c6b4
--- /dev/null
+++ b/base/time/time_exploded_posix.cc
@@ -0,0 +1,300 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/time/time.h"
+
+#include <stdint.h>
+#include <sys/time.h>
+#include <time.h>
+#if defined(OS_ANDROID) && !defined(__LP64__)
+#include <time64.h>
+#endif
+#include <unistd.h>
+
+#include <limits>
+
+#include "base/numerics/safe_math.h"
+#include "base/synchronization/lock.h"
+#include "build/build_config.h"
+
+#if defined(OS_ANDROID)
+#include "base/os_compat_android.h"
+#elif defined(OS_NACL)
+#include "base/os_compat_nacl.h"
+#endif
+
+#if defined(OS_MACOSX)
+static_assert(sizeof(time_t) >= 8, "Y2038 problem!");
+#endif
+
+namespace {
+
+// This prevents a crash on traversing the environment global and looking up
+// the 'TZ' variable in libc. See: crbug.com/390567.
+base::Lock* GetSysTimeToTimeStructLock() {
+  static auto* lock = new base::Lock();
+  return lock;
+}
+
+// Define a system-specific SysTime that wraps either to a time_t or
+// a time64_t depending on the host system, and associated convertion.
+// See crbug.com/162007
+#if defined(OS_ANDROID) && !defined(__LP64__)
+typedef time64_t SysTime;
+
+SysTime SysTimeFromTimeStruct(struct tm* timestruct, bool is_local) {
+  base::AutoLock locked(*GetSysTimeToTimeStructLock());
+  if (is_local)
+    return mktime64(timestruct);
+  else
+    return timegm64(timestruct);
+}
+
+void SysTimeToTimeStruct(SysTime t, struct tm* timestruct, bool is_local) {
+  base::AutoLock locked(*GetSysTimeToTimeStructLock());
+  if (is_local)
+    localtime64_r(&t, timestruct);
+  else
+    gmtime64_r(&t, timestruct);
+}
+
+#elif defined(OS_AIX)
+
+// The function timegm is not available on AIX.
+time_t aix_timegm(struct tm* tm) {
+  time_t ret;
+  char* tz;
+
+  tz = getenv("TZ");
+  if (tz) {
+    tz = strdup(tz);
+  }
+  setenv("TZ", "GMT0", 1);
+  tzset();
+  ret = mktime(tm);
+  if (tz) {
+    setenv("TZ", tz, 1);
+    free(tz);
+  } else {
+    unsetenv("TZ");
+  }
+  tzset();
+  return ret;
+}
+
+typedef time_t SysTime;
+
+SysTime SysTimeFromTimeStruct(struct tm* timestruct, bool is_local) {
+  base::AutoLock locked(*GetSysTimeToTimeStructLock());
+  if (is_local)
+    return mktime(timestruct);
+  else
+    return aix_timegm(timestruct);
+}
+
+void SysTimeToTimeStruct(SysTime t, struct tm* timestruct, bool is_local) {
+  base::AutoLock locked(*GetSysTimeToTimeStructLock());
+  if (is_local)
+    localtime_r(&t, timestruct);
+  else
+    gmtime_r(&t, timestruct);
+}
+
+#else   // OS_ANDROID && !__LP64__
+typedef time_t SysTime;
+
+SysTime SysTimeFromTimeStruct(struct tm* timestruct, bool is_local) {
+  base::AutoLock locked(*GetSysTimeToTimeStructLock());
+  if (is_local)
+    return mktime(timestruct);
+  else
+    return timegm(timestruct);
+}
+
+void SysTimeToTimeStruct(SysTime t, struct tm* timestruct, bool is_local) {
+  base::AutoLock locked(*GetSysTimeToTimeStructLock());
+  if (is_local)
+    localtime_r(&t, timestruct);
+  else
+    gmtime_r(&t, timestruct);
+}
+#endif  // OS_ANDROID
+
+}  // namespace
+
+namespace base {
+
+void Time::Explode(bool is_local, Exploded* exploded) const {
+  // Time stores times with microsecond resolution, but Exploded only carries
+  // millisecond resolution, so begin by being lossy.  Adjust from Windows
+  // epoch (1601) to Unix epoch (1970);
+  int64_t microseconds = us_ - kTimeTToMicrosecondsOffset;
+  // The following values are all rounded towards -infinity.
+  int64_t milliseconds;  // Milliseconds since epoch.
+  SysTime seconds;       // Seconds since epoch.
+  int millisecond;       // Exploded millisecond value (0-999).
+  if (microseconds >= 0) {
+    // Rounding towards -infinity <=> rounding towards 0, in this case.
+    milliseconds = microseconds / kMicrosecondsPerMillisecond;
+    seconds = milliseconds / kMillisecondsPerSecond;
+    millisecond = milliseconds % kMillisecondsPerSecond;
+  } else {
+    // Round these *down* (towards -infinity).
+    milliseconds = (microseconds - kMicrosecondsPerMillisecond + 1) /
+                   kMicrosecondsPerMillisecond;
+    seconds =
+        (milliseconds - kMillisecondsPerSecond + 1) / kMillisecondsPerSecond;
+    // Make this nonnegative (and between 0 and 999 inclusive).
+    millisecond = milliseconds % kMillisecondsPerSecond;
+    if (millisecond < 0)
+      millisecond += kMillisecondsPerSecond;
+  }
+
+  struct tm timestruct;
+  SysTimeToTimeStruct(seconds, &timestruct, is_local);
+
+  exploded->year = timestruct.tm_year + 1900;
+  exploded->month = timestruct.tm_mon + 1;
+  exploded->day_of_week = timestruct.tm_wday;
+  exploded->day_of_month = timestruct.tm_mday;
+  exploded->hour = timestruct.tm_hour;
+  exploded->minute = timestruct.tm_min;
+  exploded->second = timestruct.tm_sec;
+  exploded->millisecond = millisecond;
+}
+
+// static
+bool Time::FromExploded(bool is_local, const Exploded& exploded, Time* time) {
+  CheckedNumeric<int> month = exploded.month;
+  month--;
+  CheckedNumeric<int> year = exploded.year;
+  year -= 1900;
+  if (!month.IsValid() || !year.IsValid()) {
+    *time = Time(0);
+    return false;
+  }
+
+  struct tm timestruct;
+  timestruct.tm_sec = exploded.second;
+  timestruct.tm_min = exploded.minute;
+  timestruct.tm_hour = exploded.hour;
+  timestruct.tm_mday = exploded.day_of_month;
+  timestruct.tm_mon = month.ValueOrDie();
+  timestruct.tm_year = year.ValueOrDie();
+  timestruct.tm_wday = exploded.day_of_week;  // mktime/timegm ignore this
+  timestruct.tm_yday = 0;                     // mktime/timegm ignore this
+  timestruct.tm_isdst = -1;                   // attempt to figure it out
+#if !defined(OS_NACL) && !defined(OS_SOLARIS) && !defined(OS_AIX)
+  timestruct.tm_gmtoff = 0;   // not a POSIX field, so mktime/timegm ignore
+  timestruct.tm_zone = nullptr;  // not a POSIX field, so mktime/timegm ignore
+#endif
+
+  SysTime seconds;
+
+  // Certain exploded dates do not really exist due to daylight saving times,
+  // and this causes mktime() to return implementation-defined values when
+  // tm_isdst is set to -1. On Android, the function will return -1, while the
+  // C libraries of other platforms typically return a liberally-chosen value.
+  // Handling this requires the special code below.
+
+  // SysTimeFromTimeStruct() modifies the input structure, save current value.
+  struct tm timestruct0 = timestruct;
+
+  seconds = SysTimeFromTimeStruct(&timestruct, is_local);
+  if (seconds == -1) {
+    // Get the time values with tm_isdst == 0 and 1, then select the closest one
+    // to UTC 00:00:00 that isn't -1.
+    timestruct = timestruct0;
+    timestruct.tm_isdst = 0;
+    int64_t seconds_isdst0 = SysTimeFromTimeStruct(&timestruct, is_local);
+
+    timestruct = timestruct0;
+    timestruct.tm_isdst = 1;
+    int64_t seconds_isdst1 = SysTimeFromTimeStruct(&timestruct, is_local);
+
+    // seconds_isdst0 or seconds_isdst1 can be -1 for some timezones.
+    // E.g. "CLST" (Chile Summer Time) returns -1 for 'tm_isdt == 1'.
+    if (seconds_isdst0 < 0)
+      seconds = seconds_isdst1;
+    else if (seconds_isdst1 < 0)
+      seconds = seconds_isdst0;
+    else
+      seconds = std::min(seconds_isdst0, seconds_isdst1);
+  }
+
+  // Handle overflow.  Clamping the range to what mktime and timegm might
+  // return is the best that can be done here.  It's not ideal, but it's better
+  // than failing here or ignoring the overflow case and treating each time
+  // overflow as one second prior to the epoch.
+  int64_t milliseconds = 0;
+  if (seconds == -1 && (exploded.year < 1969 || exploded.year > 1970)) {
+    // If exploded.year is 1969 or 1970, take -1 as correct, with the
+    // time indicating 1 second prior to the epoch.  (1970 is allowed to handle
+    // time zone and DST offsets.)  Otherwise, return the most future or past
+    // time representable.  Assumes the time_t epoch is 1970-01-01 00:00:00 UTC.
+    //
+    // The minimum and maximum representible times that mktime and timegm could
+    // return are used here instead of values outside that range to allow for
+    // proper round-tripping between exploded and counter-type time
+    // representations in the presence of possible truncation to time_t by
+    // division and use with other functions that accept time_t.
+    //
+    // When representing the most distant time in the future, add in an extra
+    // 999ms to avoid the time being less than any other possible value that
+    // this function can return.
+
+    // On Android, SysTime is int64_t, special care must be taken to avoid
+    // overflows.
+    const int64_t min_seconds = (sizeof(SysTime) < sizeof(int64_t))
+                                    ? std::numeric_limits<SysTime>::min()
+                                    : std::numeric_limits<int32_t>::min();
+    const int64_t max_seconds = (sizeof(SysTime) < sizeof(int64_t))
+                                    ? std::numeric_limits<SysTime>::max()
+                                    : std::numeric_limits<int32_t>::max();
+    if (exploded.year < 1969) {
+      milliseconds = min_seconds * kMillisecondsPerSecond;
+    } else {
+      milliseconds = max_seconds * kMillisecondsPerSecond;
+      milliseconds += (kMillisecondsPerSecond - 1);
+    }
+  } else {
+    base::CheckedNumeric<int64_t> checked_millis = seconds;
+    checked_millis *= kMillisecondsPerSecond;
+    checked_millis += exploded.millisecond;
+    if (!checked_millis.IsValid()) {
+      *time = base::Time(0);
+      return false;
+    }
+    milliseconds = checked_millis.ValueOrDie();
+  }
+
+  // Adjust from Unix (1970) to Windows (1601) epoch avoiding overflows.
+  base::CheckedNumeric<int64_t> checked_microseconds_win_epoch = milliseconds;
+  checked_microseconds_win_epoch *= kMicrosecondsPerMillisecond;
+  checked_microseconds_win_epoch += kTimeTToMicrosecondsOffset;
+  if (!checked_microseconds_win_epoch.IsValid()) {
+    *time = base::Time(0);
+    return false;
+  }
+  base::Time converted_time(checked_microseconds_win_epoch.ValueOrDie());
+
+  // If |exploded.day_of_month| is set to 31 on a 28-30 day month, it will
+  // return the first day of the next month. Thus round-trip the time and
+  // compare the initial |exploded| with |utc_to_exploded| time.
+  base::Time::Exploded to_exploded;
+  if (!is_local)
+    converted_time.UTCExplode(&to_exploded);
+  else
+    converted_time.LocalExplode(&to_exploded);
+
+  if (ExplodedMostlyEquals(to_exploded, exploded)) {
+    *time = converted_time;
+    return true;
+  }
+
+  *time = Time(0);
+  return false;
+}
+
+}  // namespace base
diff --git a/base/time/time_fuchsia.cc b/base/time/time_fuchsia.cc
new file mode 100644
index 0000000..8b658b4
--- /dev/null
+++ b/base/time/time_fuchsia.cc
@@ -0,0 +1,93 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/time/time.h"
+
+#include <zircon/syscalls.h>
+
+#include "base/compiler_specific.h"
+#include "base/numerics/checked_math.h"
+#include "base/time/time_override.h"
+
+namespace base {
+
+namespace {
+
+// Helper function to map an unsigned integer with nanosecond timebase to a
+// signed integer with microsecond timebase.
+ALWAYS_INLINE int64_t ZxTimeToMicroseconds(zx_time_t nanos) {
+  const zx_time_t micros =
+      nanos / static_cast<zx_time_t>(base::Time::kNanosecondsPerMicrosecond);
+  return static_cast<int64_t>(micros);
+}
+
+}  // namespace
+
+// Time -----------------------------------------------------------------------
+
+namespace subtle {
+Time TimeNowIgnoringOverride() {
+  const zx_time_t nanos_since_unix_epoch = zx_clock_get(ZX_CLOCK_UTC);
+  CHECK(nanos_since_unix_epoch != 0);
+  // The following expression will overflow in the year 289938 A.D.:
+  return Time() + TimeDelta::FromMicroseconds(
+                      ZxTimeToMicroseconds(nanos_since_unix_epoch) +
+                      Time::kTimeTToMicrosecondsOffset);
+}
+
+Time TimeNowFromSystemTimeIgnoringOverride() {
+  // Just use TimeNowIgnoringOverride() because it returns the system time.
+  return TimeNowIgnoringOverride();
+}
+}  // namespace subtle
+
+// TimeTicks ------------------------------------------------------------------
+
+namespace subtle {
+TimeTicks TimeTicksNowIgnoringOverride() {
+  const zx_time_t nanos_since_boot = zx_clock_get(ZX_CLOCK_MONOTONIC);
+  CHECK(nanos_since_boot != 0);
+  return TimeTicks() +
+         TimeDelta::FromMicroseconds(ZxTimeToMicroseconds(nanos_since_boot));
+}
+}  // namespace subtle
+
+// static
+TimeTicks::Clock TimeTicks::GetClock() {
+  return Clock::FUCHSIA_ZX_CLOCK_MONOTONIC;
+}
+
+// static
+bool TimeTicks::IsHighResolution() {
+  return true;
+}
+
+// static
+bool TimeTicks::IsConsistentAcrossProcesses() {
+  return true;
+}
+
+// static
+TimeTicks TimeTicks::FromZxTime(zx_time_t nanos_since_boot) {
+  return TimeTicks(ZxTimeToMicroseconds(nanos_since_boot));
+}
+
+zx_time_t TimeTicks::ToZxTime() const {
+  CheckedNumeric<zx_time_t> result(base::Time::kNanosecondsPerMicrosecond);
+  result *= us_;
+  return result.ValueOrDie();
+}
+
+// ThreadTicks ----------------------------------------------------------------
+
+namespace subtle {
+ThreadTicks ThreadTicksNowIgnoringOverride() {
+  const zx_time_t nanos_since_thread_started = zx_clock_get(ZX_CLOCK_THREAD);
+  CHECK(nanos_since_thread_started != 0);
+  return ThreadTicks() + TimeDelta::FromMicroseconds(
+                             ZxTimeToMicroseconds(nanos_since_thread_started));
+}
+}  // namespace subtle
+
+}  // namespace base
diff --git a/base/time/time_mac.cc b/base/time/time_mac.cc
new file mode 100644
index 0000000..7ae7459
--- /dev/null
+++ b/base/time/time_mac.cc
@@ -0,0 +1,314 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/time/time.h"
+
+#include <CoreFoundation/CFDate.h>
+#include <CoreFoundation/CFTimeZone.h>
+#include <mach/mach.h>
+#include <mach/mach_time.h>
+#include <stddef.h>
+#include <stdint.h>
+#include <sys/sysctl.h>
+#include <sys/time.h>
+#include <sys/types.h>
+#include <time.h>
+
+#include "base/logging.h"
+#include "base/mac/mach_logging.h"
+#include "base/mac/scoped_cftyperef.h"
+#include "base/mac/scoped_mach_port.h"
+#include "base/macros.h"
+#include "base/numerics/safe_conversions.h"
+#include "base/time/time_override.h"
+#include "build/build_config.h"
+
+#if defined(OS_IOS)
+#include <time.h>
+#include "base/ios/ios_util.h"
+#endif
+
+namespace {
+
+#if defined(OS_MACOSX) && !defined(OS_IOS)
+int64_t MachAbsoluteTimeToTicks(uint64_t mach_absolute_time) {
+  static mach_timebase_info_data_t timebase_info;
+  if (timebase_info.denom == 0) {
+    // Zero-initialization of statics guarantees that denom will be 0 before
+    // calling mach_timebase_info.  mach_timebase_info will never set denom to
+    // 0 as that would be invalid, so the zero-check can be used to determine
+    // whether mach_timebase_info has already been called.  This is
+    // recommended by Apple's QA1398.
+    kern_return_t kr = mach_timebase_info(&timebase_info);
+    MACH_DCHECK(kr == KERN_SUCCESS, kr) << "mach_timebase_info";
+  }
+
+  // timebase_info converts absolute time tick units into nanoseconds.  Convert
+  // to microseconds up front to stave off overflows.
+  base::CheckedNumeric<uint64_t> result(mach_absolute_time /
+                                        base::Time::kNanosecondsPerMicrosecond);
+  result *= timebase_info.numer;
+  result /= timebase_info.denom;
+
+  // Don't bother with the rollover handling that the Windows version does.
+  // With numer and denom = 1 (the expected case), the 64-bit absolute time
+  // reported in nanoseconds is enough to last nearly 585 years.
+  return base::checked_cast<int64_t>(result.ValueOrDie());
+}
+#endif  // defined(OS_MACOSX) && !defined(OS_IOS)
+
+// Returns monotonically growing number of ticks in microseconds since some
+// unspecified starting point.
+int64_t ComputeCurrentTicks() {
+#if defined(OS_IOS)
+  // iOS 10 supports clock_gettime(CLOCK_MONOTONIC, ...), which is
+  // around 15 times faster than sysctl() call. Use it if possible;
+  // otherwise, fall back to sysctl().
+  if (__builtin_available(iOS 10, *)) {
+    struct timespec tp;
+    if (clock_gettime(CLOCK_MONOTONIC, &tp) == 0) {
+      return (int64_t)tp.tv_sec * 1000000 + tp.tv_nsec / 1000;
+    }
+  }
+
+  // On iOS mach_absolute_time stops while the device is sleeping. Instead use
+  // now - KERN_BOOTTIME to get a time difference that is not impacted by clock
+  // changes. KERN_BOOTTIME will be updated by the system whenever the system
+  // clock change.
+  struct timeval boottime;
+  int mib[2] = {CTL_KERN, KERN_BOOTTIME};
+  size_t size = sizeof(boottime);
+  int kr = sysctl(mib, arraysize(mib), &boottime, &size, nullptr, 0);
+  DCHECK_EQ(KERN_SUCCESS, kr);
+  base::TimeDelta time_difference =
+      base::subtle::TimeNowIgnoringOverride() -
+      (base::Time::FromTimeT(boottime.tv_sec) +
+       base::TimeDelta::FromMicroseconds(boottime.tv_usec));
+  return time_difference.InMicroseconds();
+#else
+  // mach_absolute_time is it when it comes to ticks on the Mac.  Other calls
+  // with less precision (such as TickCount) just call through to
+  // mach_absolute_time.
+  return MachAbsoluteTimeToTicks(mach_absolute_time());
+#endif  // defined(OS_IOS)
+}
+
+int64_t ComputeThreadTicks() {
+#if defined(OS_IOS)
+  NOTREACHED();
+  return 0;
+#else
+  base::mac::ScopedMachSendRight thread(mach_thread_self());
+  mach_msg_type_number_t thread_info_count = THREAD_BASIC_INFO_COUNT;
+  thread_basic_info_data_t thread_info_data;
+
+  if (thread.get() == MACH_PORT_NULL) {
+    DLOG(ERROR) << "Failed to get mach_thread_self()";
+    return 0;
+  }
+
+  kern_return_t kr = thread_info(
+      thread.get(),
+      THREAD_BASIC_INFO,
+      reinterpret_cast<thread_info_t>(&thread_info_data),
+      &thread_info_count);
+  MACH_DCHECK(kr == KERN_SUCCESS, kr) << "thread_info";
+
+  base::CheckedNumeric<int64_t> absolute_micros(
+      thread_info_data.user_time.seconds +
+      thread_info_data.system_time.seconds);
+  absolute_micros *= base::Time::kMicrosecondsPerSecond;
+  absolute_micros += (thread_info_data.user_time.microseconds +
+                      thread_info_data.system_time.microseconds);
+  return absolute_micros.ValueOrDie();
+#endif  // defined(OS_IOS)
+}
+
+}  // namespace
+
+namespace base {
+
+// The Time routines in this file use Mach and CoreFoundation APIs, since the
+// POSIX definition of time_t in Mac OS X wraps around after 2038--and
+// there are already cookie expiration dates, etc., past that time out in
+// the field.  Using CFDate prevents that problem, and using mach_absolute_time
+// for TimeTicks gives us nice high-resolution interval timing.
+
+// Time -----------------------------------------------------------------------
+
+namespace subtle {
+Time TimeNowIgnoringOverride() {
+  return Time::FromCFAbsoluteTime(CFAbsoluteTimeGetCurrent());
+}
+
+Time TimeNowFromSystemTimeIgnoringOverride() {
+  // Just use TimeNowIgnoringOverride() because it returns the system time.
+  return TimeNowIgnoringOverride();
+}
+}  // namespace subtle
+
+// static
+Time Time::FromCFAbsoluteTime(CFAbsoluteTime t) {
+  static_assert(std::numeric_limits<CFAbsoluteTime>::has_infinity,
+                "CFAbsoluteTime must have an infinity value");
+  if (t == 0)
+    return Time();  // Consider 0 as a null Time.
+  if (t == std::numeric_limits<CFAbsoluteTime>::infinity())
+    return Max();
+  return Time(static_cast<int64_t>((t + kCFAbsoluteTimeIntervalSince1970) *
+                                   kMicrosecondsPerSecond) +
+              kTimeTToMicrosecondsOffset);
+}
+
+CFAbsoluteTime Time::ToCFAbsoluteTime() const {
+  static_assert(std::numeric_limits<CFAbsoluteTime>::has_infinity,
+                "CFAbsoluteTime must have an infinity value");
+  if (is_null())
+    return 0;  // Consider 0 as a null Time.
+  if (is_max())
+    return std::numeric_limits<CFAbsoluteTime>::infinity();
+  return (static_cast<CFAbsoluteTime>(us_ - kTimeTToMicrosecondsOffset) /
+          kMicrosecondsPerSecond) -
+         kCFAbsoluteTimeIntervalSince1970;
+}
+
+// Note: These implementations of Time::FromExploded() and Time::Explode() are
+// only used on iOS now. Since Mac is now always 64-bit, we can use the POSIX
+// versions of these functions as time_t is not capped at year 2038 on 64-bit
+// builds. The POSIX functions are preferred since they don't suffer from some
+// performance problems that are present in these implementations.
+// See crbug.com/781601 for more details.
+#if defined(OS_IOS)
+// static
+bool Time::FromExploded(bool is_local, const Exploded& exploded, Time* time) {
+  base::ScopedCFTypeRef<CFTimeZoneRef> time_zone(
+      is_local
+          ? CFTimeZoneCopySystem()
+          : CFTimeZoneCreateWithTimeIntervalFromGMT(kCFAllocatorDefault, 0));
+  base::ScopedCFTypeRef<CFCalendarRef> gregorian(CFCalendarCreateWithIdentifier(
+      kCFAllocatorDefault, kCFGregorianCalendar));
+  CFCalendarSetTimeZone(gregorian, time_zone);
+  CFAbsoluteTime absolute_time;
+  // 'S' is not defined in componentDesc in Apple documentation, but can be
+  // found at http://www.opensource.apple.com/source/CF/CF-855.17/CFCalendar.c
+  CFCalendarComposeAbsoluteTime(
+      gregorian, &absolute_time, "yMdHmsS", exploded.year, exploded.month,
+      exploded.day_of_month, exploded.hour, exploded.minute, exploded.second,
+      exploded.millisecond);
+  CFAbsoluteTime seconds = absolute_time + kCFAbsoluteTimeIntervalSince1970;
+
+  // CFAbsolutTime is typedef of double. Convert seconds to
+  // microseconds and then cast to int64. If
+  // it cannot be suited to int64, then fail to avoid overflows.
+  double microseconds =
+      (seconds * kMicrosecondsPerSecond) + kTimeTToMicrosecondsOffset;
+  if (microseconds > std::numeric_limits<int64_t>::max() ||
+      microseconds < std::numeric_limits<int64_t>::min()) {
+    *time = Time(0);
+    return false;
+  }
+
+  base::Time converted_time = Time(static_cast<int64_t>(microseconds));
+
+  // If |exploded.day_of_month| is set to 31
+  // on a 28-30 day month, it will return the first day of the next month.
+  // Thus round-trip the time and compare the initial |exploded| with
+  // |utc_to_exploded| time.
+  base::Time::Exploded to_exploded;
+  if (!is_local)
+    converted_time.UTCExplode(&to_exploded);
+  else
+    converted_time.LocalExplode(&to_exploded);
+
+  if (ExplodedMostlyEquals(to_exploded, exploded)) {
+    *time = converted_time;
+    return true;
+  }
+
+  *time = Time(0);
+  return false;
+}
+
+void Time::Explode(bool is_local, Exploded* exploded) const {
+  // Avoid rounding issues, by only putting the integral number of seconds
+  // (rounded towards -infinity) into a |CFAbsoluteTime| (which is a |double|).
+  int64_t microsecond = us_ % kMicrosecondsPerSecond;
+  if (microsecond < 0)
+    microsecond += kMicrosecondsPerSecond;
+  CFAbsoluteTime seconds = ((us_ - microsecond - kTimeTToMicrosecondsOffset) /
+                            kMicrosecondsPerSecond) -
+                           kCFAbsoluteTimeIntervalSince1970;
+
+  base::ScopedCFTypeRef<CFTimeZoneRef> time_zone(
+      is_local
+          ? CFTimeZoneCopySystem()
+          : CFTimeZoneCreateWithTimeIntervalFromGMT(kCFAllocatorDefault, 0));
+  base::ScopedCFTypeRef<CFCalendarRef> gregorian(CFCalendarCreateWithIdentifier(
+      kCFAllocatorDefault, kCFGregorianCalendar));
+  CFCalendarSetTimeZone(gregorian, time_zone);
+  int second, day_of_week;
+  // 'E' sets the day of week, but is not defined in componentDesc in Apple
+  // documentation. It can be found in open source code here:
+  // http://www.opensource.apple.com/source/CF/CF-855.17/CFCalendar.c
+  CFCalendarDecomposeAbsoluteTime(gregorian, seconds, "yMdHmsE",
+                                  &exploded->year, &exploded->month,
+                                  &exploded->day_of_month, &exploded->hour,
+                                  &exploded->minute, &second, &day_of_week);
+  // Make sure seconds are rounded down towards -infinity.
+  exploded->second = floor(second);
+  // |Exploded|'s convention for day of week is 0 = Sunday, i.e. different
+  // from CF's 1 = Sunday.
+  exploded->day_of_week = (day_of_week - 1) % 7;
+  // Calculate milliseconds ourselves, since we rounded the |seconds|, making
+  // sure to round towards -infinity.
+  exploded->millisecond =
+      (microsecond >= 0) ? microsecond / kMicrosecondsPerMillisecond :
+                           (microsecond - kMicrosecondsPerMillisecond + 1) /
+                               kMicrosecondsPerMillisecond;
+}
+#endif  // OS_IOS
+
+// TimeTicks ------------------------------------------------------------------
+
+namespace subtle {
+TimeTicks TimeTicksNowIgnoringOverride() {
+  return TimeTicks() + TimeDelta::FromMicroseconds(ComputeCurrentTicks());
+}
+}  // namespace subtle
+
+// static
+bool TimeTicks::IsHighResolution() {
+  return true;
+}
+
+// static
+bool TimeTicks::IsConsistentAcrossProcesses() {
+  return true;
+}
+
+#if defined(OS_MACOSX) && !defined(OS_IOS)
+// static
+TimeTicks TimeTicks::FromMachAbsoluteTime(uint64_t mach_absolute_time) {
+  return TimeTicks(MachAbsoluteTimeToTicks(mach_absolute_time));
+}
+#endif  // defined(OS_MACOSX) && !defined(OS_IOS)
+
+// static
+TimeTicks::Clock TimeTicks::GetClock() {
+#if defined(OS_IOS)
+  return Clock::IOS_CF_ABSOLUTE_TIME_MINUS_KERN_BOOTTIME;
+#else
+  return Clock::MAC_MACH_ABSOLUTE_TIME;
+#endif  // defined(OS_IOS)
+}
+
+// ThreadTicks ----------------------------------------------------------------
+
+namespace subtle {
+ThreadTicks ThreadTicksNowIgnoringOverride() {
+  return ThreadTicks() + TimeDelta::FromMicroseconds(ComputeThreadTicks());
+}
+}  // namespace subtle
+
+}  // namespace base
diff --git a/base/time/time_now_posix.cc b/base/time/time_now_posix.cc
new file mode 100644
index 0000000..5427836
--- /dev/null
+++ b/base/time/time_now_posix.cc
@@ -0,0 +1,123 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/time/time.h"
+
+#include <stdint.h>
+#include <sys/time.h>
+#include <time.h>
+#if defined(OS_ANDROID) && !defined(__LP64__)
+#include <time64.h>
+#endif
+#include <unistd.h>
+
+#include "base/logging.h"
+#include "base/numerics/safe_math.h"
+#include "base/time/time_override.h"
+#include "build/build_config.h"
+
+// Ensure the Fuchsia and Mac builds do not include this module. Instead,
+// non-POSIX implementation is used for sampling the system clocks.
+#if defined(OS_FUCHSIA) || defined(OS_MACOSX)
+#error "This implementation is for POSIX platforms other than Fuchsia or Mac."
+#endif
+
+namespace {
+
+int64_t ConvertTimespecToMicros(const struct timespec& ts) {
+  // On 32-bit systems, the calculation cannot overflow int64_t.
+  // 2**32 * 1000000 + 2**64 / 1000 < 2**63
+  if (sizeof(ts.tv_sec) <= 4 && sizeof(ts.tv_nsec) <= 8) {
+    int64_t result = ts.tv_sec;
+    result *= base::Time::kMicrosecondsPerSecond;
+    result += (ts.tv_nsec / base::Time::kNanosecondsPerMicrosecond);
+    return result;
+  } else {
+    base::CheckedNumeric<int64_t> result(ts.tv_sec);
+    result *= base::Time::kMicrosecondsPerSecond;
+    result += (ts.tv_nsec / base::Time::kNanosecondsPerMicrosecond);
+    return result.ValueOrDie();
+  }
+}
+
+// Helper function to get results from clock_gettime() and convert to a
+// microsecond timebase. Minimum requirement is MONOTONIC_CLOCK to be supported
+// on the system. FreeBSD 6 has CLOCK_MONOTONIC but defines
+// _POSIX_MONOTONIC_CLOCK to -1.
+#if (defined(OS_POSIX) && defined(_POSIX_MONOTONIC_CLOCK) && \
+     _POSIX_MONOTONIC_CLOCK >= 0) ||                         \
+    defined(OS_BSD) || defined(OS_ANDROID)
+int64_t ClockNow(clockid_t clk_id) {
+  struct timespec ts;
+  CHECK(clock_gettime(clk_id, &ts) == 0);
+  return ConvertTimespecToMicros(ts);
+}
+#else  // _POSIX_MONOTONIC_CLOCK
+#error No usable tick clock function on this platform.
+#endif  // _POSIX_MONOTONIC_CLOCK
+
+}  // namespace
+
+namespace base {
+
+// Time -----------------------------------------------------------------------
+
+namespace subtle {
+Time TimeNowIgnoringOverride() {
+  struct timeval tv;
+  struct timezone tz = {0, 0};  // UTC
+  CHECK(gettimeofday(&tv, &tz) == 0);
+  // Combine seconds and microseconds in a 64-bit field containing microseconds
+  // since the epoch.  That's enough for nearly 600 centuries.  Adjust from
+  // Unix (1970) to Windows (1601) epoch.
+  return Time() + TimeDelta::FromMicroseconds(
+                      (tv.tv_sec * Time::kMicrosecondsPerSecond + tv.tv_usec) +
+                      Time::kTimeTToMicrosecondsOffset);
+}
+
+Time TimeNowFromSystemTimeIgnoringOverride() {
+  // Just use TimeNowIgnoringOverride() because it returns the system time.
+  return TimeNowIgnoringOverride();
+}
+}  // namespace subtle
+
+// TimeTicks ------------------------------------------------------------------
+
+namespace subtle {
+TimeTicks TimeTicksNowIgnoringOverride() {
+  return TimeTicks() + TimeDelta::FromMicroseconds(ClockNow(CLOCK_MONOTONIC));
+}
+}  // namespace subtle
+
+// static
+TimeTicks::Clock TimeTicks::GetClock() {
+  return Clock::LINUX_CLOCK_MONOTONIC;
+}
+
+// static
+bool TimeTicks::IsHighResolution() {
+  return true;
+}
+
+// static
+bool TimeTicks::IsConsistentAcrossProcesses() {
+  return true;
+}
+
+// ThreadTicks ----------------------------------------------------------------
+
+namespace subtle {
+ThreadTicks ThreadTicksNowIgnoringOverride() {
+#if (defined(_POSIX_THREAD_CPUTIME) && (_POSIX_THREAD_CPUTIME >= 0)) || \
+    defined(OS_ANDROID)
+  return ThreadTicks() +
+         TimeDelta::FromMicroseconds(ClockNow(CLOCK_THREAD_CPUTIME_ID));
+#else
+  NOTREACHED();
+  return ThreadTicks();
+#endif
+}
+}  // namespace subtle
+
+}  // namespace base
diff --git a/base/time/time_override.cc b/base/time/time_override.cc
new file mode 100644
index 0000000..09692b5
--- /dev/null
+++ b/base/time/time_override.cc
@@ -0,0 +1,45 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/time/time_override.h"
+
+namespace base {
+namespace subtle {
+
+#if DCHECK_IS_ON()
+// static
+bool ScopedTimeClockOverrides::overrides_active_ = false;
+#endif
+
+ScopedTimeClockOverrides::ScopedTimeClockOverrides(
+    TimeNowFunction time_override,
+    TimeTicksNowFunction time_ticks_override,
+    ThreadTicksNowFunction thread_ticks_override) {
+#if DCHECK_IS_ON()
+  DCHECK(!overrides_active_);
+  overrides_active_ = true;
+#endif
+  if (time_override) {
+    internal::g_time_now_function = time_override;
+    internal::g_time_now_from_system_time_function = time_override;
+  }
+  if (time_ticks_override)
+    internal::g_time_ticks_now_function = time_ticks_override;
+  if (thread_ticks_override)
+    internal::g_thread_ticks_now_function = thread_ticks_override;
+}
+
+ScopedTimeClockOverrides::~ScopedTimeClockOverrides() {
+  internal::g_time_now_function = &TimeNowIgnoringOverride;
+  internal::g_time_now_from_system_time_function =
+      &TimeNowFromSystemTimeIgnoringOverride;
+  internal::g_time_ticks_now_function = &TimeTicksNowIgnoringOverride;
+  internal::g_thread_ticks_now_function = &ThreadTicksNowIgnoringOverride;
+#if DCHECK_IS_ON()
+  overrides_active_ = false;
+#endif
+}
+
+}  // namespace subtle
+}  // namespace base
diff --git a/base/time/time_override.h b/base/time/time_override.h
new file mode 100644
index 0000000..1586a87
--- /dev/null
+++ b/base/time/time_override.h
@@ -0,0 +1,74 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TIME_TIME_OVERRIDE_H_
+#define BASE_TIME_TIME_OVERRIDE_H_
+
+#include "base/base_export.h"
+#include "base/time/time.h"
+
+namespace base {
+
+using TimeNowFunction = decltype(&Time::Now);
+using TimeTicksNowFunction = decltype(&TimeTicks::Now);
+using ThreadTicksNowFunction = decltype(&ThreadTicks::Now);
+
+// Time overrides should be used with extreme caution. Discuss with //base/time
+// OWNERS before adding a new one.
+namespace subtle {
+
+// Override the return value of Time::Now and Time::NowFromSystemTime /
+// TimeTicks::Now / ThreadTicks::Now to emulate time, e.g. for tests or to
+// modify progression of time. Note that the override should be set while
+// single-threaded and before the first call to Now() to avoid threading issues
+// and inconsistencies in returned values. Nested overrides are not allowed.
+class BASE_EXPORT ScopedTimeClockOverrides {
+ public:
+  // Pass |nullptr| for any override if it shouldn't be overriden.
+  ScopedTimeClockOverrides(TimeNowFunction time_override,
+                           TimeTicksNowFunction time_ticks_override,
+                           ThreadTicksNowFunction thread_ticks_override);
+
+  // Restores the platform default Now() functions.
+  ~ScopedTimeClockOverrides();
+
+ private:
+#if DCHECK_IS_ON()
+  static bool overrides_active_;
+#endif
+
+  DISALLOW_IMPLICIT_CONSTRUCTORS(ScopedTimeClockOverrides);
+};
+
+// These methods return the platform default Time::Now / TimeTicks::Now /
+// ThreadTicks::Now values even while an override is in place. These methods
+// should only be used in places where emulated time should be disregarded. For
+// example, they can be used to implement test timeouts for tests that may
+// override time.
+BASE_EXPORT Time TimeNowIgnoringOverride();
+BASE_EXPORT Time TimeNowFromSystemTimeIgnoringOverride();
+BASE_EXPORT TimeTicks TimeTicksNowIgnoringOverride();
+BASE_EXPORT ThreadTicks ThreadTicksNowIgnoringOverride();
+
+}  // namespace subtle
+
+namespace internal {
+
+// These function pointers are used by platform-independent implementations of
+// the Now() methods and ScopedTimeClockOverrides. They are set to point to the
+// respective NowIgnoringOverride functions by default, but can also be set by
+// platform-specific code to select a default implementation at runtime, thereby
+// avoiding the indirection via the NowIgnoringOverride functions. Note that the
+// pointers can be overridden and later reset to the NowIgnoringOverride
+// functions by ScopedTimeClockOverrides.
+extern TimeNowFunction g_time_now_function;
+extern TimeNowFunction g_time_now_from_system_time_function;
+extern TimeTicksNowFunction g_time_ticks_now_function;
+extern ThreadTicksNowFunction g_thread_ticks_now_function;
+
+}  // namespace internal
+
+}  // namespace base
+
+#endif  // BASE_TIME_TIME_OVERRIDE_H_
diff --git a/base/time/time_to_iso8601.cc b/base/time/time_to_iso8601.cc
new file mode 100644
index 0000000..27e7bfc
--- /dev/null
+++ b/base/time/time_to_iso8601.cc
@@ -0,0 +1,20 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/time/time_to_iso8601.h"
+
+#include "base/strings/stringprintf.h"
+#include "base/time/time.h"
+
+namespace base {
+
+std::string TimeToISO8601(const Time& t) {
+  Time::Exploded exploded;
+  t.UTCExplode(&exploded);
+  return StringPrintf("%04d-%02d-%02dT%02d:%02d:%02d.%03dZ", exploded.year,
+                      exploded.month, exploded.day_of_month, exploded.hour,
+                      exploded.minute, exploded.second, exploded.millisecond);
+}
+
+}  // namespace base
diff --git a/base/time/time_to_iso8601.h b/base/time/time_to_iso8601.h
new file mode 100644
index 0000000..2643484
--- /dev/null
+++ b/base/time/time_to_iso8601.h
@@ -0,0 +1,20 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TIME_TIME_TO_ISO8601_H_
+#define BASE_TIME_TIME_TO_ISO8601_H_
+
+#include <string>
+
+#include "base/base_export.h"
+
+namespace base {
+
+class Time;
+
+BASE_EXPORT std::string TimeToISO8601(const base::Time& t);
+
+}  // namespace base
+
+#endif  // BASE_TIME_TIME_TO_ISO8601_H_
diff --git a/base/time/time_unittest.cc b/base/time/time_unittest.cc
new file mode 100644
index 0000000..cde5cf5
--- /dev/null
+++ b/base/time/time_unittest.cc
@@ -0,0 +1,1588 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/time/time.h"
+
+#include <stdint.h>
+#include <time.h>
+#include <limits>
+#include <string>
+
+#include "base/build_time.h"
+#include "base/compiler_specific.h"
+#include "base/logging.h"
+#include "base/macros.h"
+#include "base/strings/stringprintf.h"
+#include "base/threading/platform_thread.h"
+#include "base/time/time_override.h"
+#include "build/build_config.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+#if defined(OS_ANDROID)
+#include "base/android/jni_android.h"
+#elif defined(OS_IOS)
+#include "base/ios/ios_util.h"
+#elif defined(OS_WIN)
+#include <windows.h>
+#endif
+
+namespace base {
+
+namespace {
+
+TEST(TimeTestOutOfBounds, FromExplodedOutOfBoundsTime) {
+  // FromUTCExploded must set time to Time(0) and failure, if the day is set to
+  // 31 on a 28-30 day month. Test |exploded| returns Time(0) on 31st of
+  // February and 31st of April. New implementation handles this.
+
+  const struct DateTestData {
+    Time::Exploded explode;
+    bool is_valid;
+  } kDateTestData[] = {
+      // 31st of February
+      {{2016, 2, 0, 31, 12, 30, 0, 0}, true},
+      // 31st of April
+      {{2016, 4, 0, 31, 8, 43, 0, 0}, true},
+      // Negative month
+      {{2016, -5, 0, 2, 4, 10, 0, 0}, false},
+      // Negative date of month
+      {{2016, 6, 0, -15, 2, 50, 0, 0}, false},
+      // Negative hours
+      {{2016, 7, 0, 10, -11, 29, 0, 0}, false},
+      // Negative minutes
+      {{2016, 3, 0, 14, 10, -29, 0, 0}, false},
+      // Negative seconds
+      {{2016, 10, 0, 25, 7, 47, -30, 0}, false},
+      // Negative milliseconds
+      {{2016, 10, 0, 25, 7, 47, 20, -500}, false},
+      // Hours are too large
+      {{2016, 7, 0, 10, 26, 29, 0, 0}, false},
+      // Minutes are too large
+      {{2016, 3, 0, 14, 10, 78, 0, 0}, false},
+      // Seconds are too large
+      {{2016, 10, 0, 25, 7, 47, 234, 0}, false},
+      // Milliseconds are too large
+      {{2016, 10, 0, 25, 6, 31, 23, 1643}, false},
+      // Test overflow. Time is valid, but overflow case
+      // results in Time(0).
+      {{9840633, 1, 0, 1, 1, 1, 0, 0}, true},
+      // Underflow will fail as well.
+      {{-9840633, 1, 0, 1, 1, 1, 0, 0}, true},
+      // Test integer overflow and underflow cases for the values themselves.
+      {{std::numeric_limits<int>::min(), 1, 0, 1, 1, 1, 0, 0}, true},
+      {{std::numeric_limits<int>::max(), 1, 0, 1, 1, 1, 0, 0}, true},
+      {{2016, std::numeric_limits<int>::min(), 0, 1, 1, 1, 0, 0}, false},
+      {{2016, std::numeric_limits<int>::max(), 0, 1, 1, 1, 0, 0}, false},
+  };
+
+  for (const auto& test : kDateTestData) {
+    EXPECT_EQ(test.explode.HasValidValues(), test.is_valid);
+
+    base::Time result;
+    EXPECT_FALSE(base::Time::FromUTCExploded(test.explode, &result));
+    EXPECT_TRUE(result.is_null());
+    EXPECT_FALSE(base::Time::FromLocalExploded(test.explode, &result));
+    EXPECT_TRUE(result.is_null());
+  }
+}
+
+// Specialized test fixture allowing time strings without timezones to be
+// tested by comparing them to a known time in the local zone.
+// See also pr_time_unittests.cc
+class TimeTest : public testing::Test {
+ protected:
+  void SetUp() override {
+    // Use mktime to get a time_t, and turn it into a PRTime by converting
+    // seconds to microseconds.  Use 15th Oct 2007 12:45:00 local.  This
+    // must be a time guaranteed to be outside of a DST fallback hour in
+    // any timezone.
+    struct tm local_comparison_tm = {
+      0,            // second
+      45,           // minute
+      12,           // hour
+      15,           // day of month
+      10 - 1,       // month
+      2007 - 1900,  // year
+      0,            // day of week (ignored, output only)
+      0,            // day of year (ignored, output only)
+      -1            // DST in effect, -1 tells mktime to figure it out
+    };
+
+    time_t converted_time = mktime(&local_comparison_tm);
+    ASSERT_GT(converted_time, 0);
+    comparison_time_local_ = Time::FromTimeT(converted_time);
+
+    // time_t representation of 15th Oct 2007 12:45:00 PDT
+    comparison_time_pdt_ = Time::FromTimeT(1192477500);
+  }
+
+  Time comparison_time_local_;
+  Time comparison_time_pdt_;
+};
+
+// Test conversion to/from TimeDeltas elapsed since the Windows epoch.
+// Conversions should be idempotent and non-lossy.
+TEST_F(TimeTest, DeltaSinceWindowsEpoch) {
+  const TimeDelta delta = TimeDelta::FromMicroseconds(123);
+  EXPECT_EQ(delta,
+            Time::FromDeltaSinceWindowsEpoch(delta).ToDeltaSinceWindowsEpoch());
+
+  const Time now = Time::Now();
+  const Time actual =
+      Time::FromDeltaSinceWindowsEpoch(now.ToDeltaSinceWindowsEpoch());
+  EXPECT_EQ(now, actual);
+
+  // Null times should remain null after a round-trip conversion. This is an
+  // important invariant for the common use case of serialization +
+  // deserialization.
+  const Time should_be_null =
+      Time::FromDeltaSinceWindowsEpoch(Time().ToDeltaSinceWindowsEpoch());
+  EXPECT_TRUE(should_be_null.is_null());
+}
+
+// Test conversion to/from time_t.
+TEST_F(TimeTest, TimeT) {
+  EXPECT_EQ(10, Time().FromTimeT(10).ToTimeT());
+  EXPECT_EQ(10.0, Time().FromTimeT(10).ToDoubleT());
+
+  // Conversions of 0 should stay 0.
+  EXPECT_EQ(0, Time().ToTimeT());
+  EXPECT_EQ(0, Time::FromTimeT(0).ToInternalValue());
+}
+
+// Test conversions to/from time_t and exploding/unexploding (utc time).
+TEST_F(TimeTest, UTCTimeT) {
+  // C library time and exploded time.
+  time_t now_t_1 = time(nullptr);
+  struct tm tms;
+#if defined(OS_WIN)
+  gmtime_s(&tms, &now_t_1);
+#elif defined(OS_POSIX) || defined(OS_FUCHSIA)
+  gmtime_r(&now_t_1, &tms);
+#endif
+
+  // Convert to ours.
+  Time our_time_1 = Time::FromTimeT(now_t_1);
+  Time::Exploded exploded;
+  our_time_1.UTCExplode(&exploded);
+
+  // This will test both our exploding and our time_t -> Time conversion.
+  EXPECT_EQ(tms.tm_year + 1900, exploded.year);
+  EXPECT_EQ(tms.tm_mon + 1, exploded.month);
+  EXPECT_EQ(tms.tm_mday, exploded.day_of_month);
+  EXPECT_EQ(tms.tm_hour, exploded.hour);
+  EXPECT_EQ(tms.tm_min, exploded.minute);
+  EXPECT_EQ(tms.tm_sec, exploded.second);
+
+  // Convert exploded back to the time struct.
+  Time our_time_2;
+  EXPECT_TRUE(Time::FromUTCExploded(exploded, &our_time_2));
+  EXPECT_TRUE(our_time_1 == our_time_2);
+
+  time_t now_t_2 = our_time_2.ToTimeT();
+  EXPECT_EQ(now_t_1, now_t_2);
+}
+
+// Test conversions to/from time_t and exploding/unexploding (local time).
+TEST_F(TimeTest, LocalTimeT) {
+#if defined(OS_IOS) && TARGET_OS_SIMULATOR
+  // The function CFTimeZoneCopySystem() fails to determine the system timezone
+  // when running iOS 11.0 simulator on an host running High Sierra and return
+  // the "GMT" timezone. This causes Time::LocalExplode and localtime_r values
+  // to differ by the local timezone offset. Disable the test if simulating
+  // iOS 10.0 as it is not possible to check the version of the host mac.
+  // TODO(crbug.com/782033): remove this once support for iOS pre-11.0 is
+  // dropped or when the bug in CFTimeZoneCopySystem() is fixed.
+  if (ios::IsRunningOnIOS10OrLater() && !ios::IsRunningOnIOS11OrLater()) {
+    return;
+  }
+#endif
+
+  // C library time and exploded time.
+  time_t now_t_1 = time(nullptr);
+  struct tm tms;
+#if defined(OS_WIN)
+  localtime_s(&tms, &now_t_1);
+#elif defined(OS_POSIX) || defined(OS_FUCHSIA)
+  localtime_r(&now_t_1, &tms);
+#endif
+
+  // Convert to ours.
+  Time our_time_1 = Time::FromTimeT(now_t_1);
+  Time::Exploded exploded;
+  our_time_1.LocalExplode(&exploded);
+
+  // This will test both our exploding and our time_t -> Time conversion.
+  EXPECT_EQ(tms.tm_year + 1900, exploded.year);
+  EXPECT_EQ(tms.tm_mon + 1, exploded.month);
+  EXPECT_EQ(tms.tm_mday, exploded.day_of_month);
+  EXPECT_EQ(tms.tm_hour, exploded.hour);
+  EXPECT_EQ(tms.tm_min, exploded.minute);
+  EXPECT_EQ(tms.tm_sec, exploded.second);
+
+  // Convert exploded back to the time struct.
+  Time our_time_2;
+  EXPECT_TRUE(Time::FromLocalExploded(exploded, &our_time_2));
+  EXPECT_TRUE(our_time_1 == our_time_2);
+
+  time_t now_t_2 = our_time_2.ToTimeT();
+  EXPECT_EQ(now_t_1, now_t_2);
+}
+
+// Test conversions to/from javascript time.
+TEST_F(TimeTest, JsTime) {
+  Time epoch = Time::FromJsTime(0.0);
+  EXPECT_EQ(epoch, Time::UnixEpoch());
+  Time t = Time::FromJsTime(700000.3);
+  EXPECT_EQ(700.0003, t.ToDoubleT());
+  t = Time::FromDoubleT(800.73);
+  EXPECT_EQ(800730.0, t.ToJsTime());
+}
+
+#if defined(OS_POSIX) || defined(OS_FUCHSIA)
+TEST_F(TimeTest, FromTimeVal) {
+  Time now = Time::Now();
+  Time also_now = Time::FromTimeVal(now.ToTimeVal());
+  EXPECT_EQ(now, also_now);
+}
+#endif  // defined(OS_POSIX) || defined(OS_FUCHSIA)
+
+TEST_F(TimeTest, FromExplodedWithMilliseconds) {
+  // Some platform implementations of FromExploded are liable to drop
+  // milliseconds if we aren't careful.
+  Time now = Time::NowFromSystemTime();
+  Time::Exploded exploded1 = {0};
+  now.UTCExplode(&exploded1);
+  exploded1.millisecond = 500;
+  Time time;
+  EXPECT_TRUE(Time::FromUTCExploded(exploded1, &time));
+  Time::Exploded exploded2 = {0};
+  time.UTCExplode(&exploded2);
+  EXPECT_EQ(exploded1.millisecond, exploded2.millisecond);
+}
+
+TEST_F(TimeTest, ZeroIsSymmetric) {
+  Time zero_time(Time::FromTimeT(0));
+  EXPECT_EQ(0, zero_time.ToTimeT());
+
+  EXPECT_EQ(0.0, zero_time.ToDoubleT());
+}
+
+TEST_F(TimeTest, LocalExplode) {
+  Time a = Time::Now();
+  Time::Exploded exploded;
+  a.LocalExplode(&exploded);
+
+  Time b;
+  EXPECT_TRUE(Time::FromLocalExploded(exploded, &b));
+
+  // The exploded structure doesn't have microseconds, and on Mac & Linux, the
+  // internal OS conversion uses seconds, which will cause truncation. So we
+  // can only make sure that the delta is within one second.
+  EXPECT_TRUE((a - b) < TimeDelta::FromSeconds(1));
+}
+
+TEST_F(TimeTest, UTCExplode) {
+  Time a = Time::Now();
+  Time::Exploded exploded;
+  a.UTCExplode(&exploded);
+
+  Time b;
+  EXPECT_TRUE(Time::FromUTCExploded(exploded, &b));
+  EXPECT_TRUE((a - b) < TimeDelta::FromSeconds(1));
+}
+
+TEST_F(TimeTest, LocalMidnight) {
+  Time::Exploded exploded;
+  Time::Now().LocalMidnight().LocalExplode(&exploded);
+  EXPECT_EQ(0, exploded.hour);
+  EXPECT_EQ(0, exploded.minute);
+  EXPECT_EQ(0, exploded.second);
+  EXPECT_EQ(0, exploded.millisecond);
+}
+
+TEST_F(TimeTest, ParseTimeTest1) {
+  time_t current_time = 0;
+  time(&current_time);
+
+  struct tm local_time = {};
+  char time_buf[64] = {};
+#if defined(OS_WIN)
+  localtime_s(&local_time, &current_time);
+  asctime_s(time_buf, arraysize(time_buf), &local_time);
+#elif defined(OS_POSIX) || defined(OS_FUCHSIA)
+  localtime_r(&current_time, &local_time);
+  asctime_r(&local_time, time_buf);
+#endif
+
+  Time parsed_time;
+  EXPECT_TRUE(Time::FromString(time_buf, &parsed_time));
+  EXPECT_EQ(current_time, parsed_time.ToTimeT());
+}
+
+TEST_F(TimeTest, DayOfWeekSunday) {
+  Time time;
+  EXPECT_TRUE(Time::FromString("Sun, 06 May 2012 12:00:00 GMT", &time));
+  Time::Exploded exploded;
+  time.UTCExplode(&exploded);
+  EXPECT_EQ(0, exploded.day_of_week);
+}
+
+TEST_F(TimeTest, DayOfWeekWednesday) {
+  Time time;
+  EXPECT_TRUE(Time::FromString("Wed, 09 May 2012 12:00:00 GMT", &time));
+  Time::Exploded exploded;
+  time.UTCExplode(&exploded);
+  EXPECT_EQ(3, exploded.day_of_week);
+}
+
+TEST_F(TimeTest, DayOfWeekSaturday) {
+  Time time;
+  EXPECT_TRUE(Time::FromString("Sat, 12 May 2012 12:00:00 GMT", &time));
+  Time::Exploded exploded;
+  time.UTCExplode(&exploded);
+  EXPECT_EQ(6, exploded.day_of_week);
+}
+
+TEST_F(TimeTest, ParseTimeTest2) {
+  Time parsed_time;
+  EXPECT_TRUE(Time::FromString("Mon, 15 Oct 2007 19:45:00 GMT", &parsed_time));
+  EXPECT_EQ(comparison_time_pdt_, parsed_time);
+}
+
+TEST_F(TimeTest, ParseTimeTest3) {
+  Time parsed_time;
+  EXPECT_TRUE(Time::FromString("15 Oct 07 12:45:00", &parsed_time));
+  EXPECT_EQ(comparison_time_local_, parsed_time);
+}
+
+TEST_F(TimeTest, ParseTimeTest4) {
+  Time parsed_time;
+  EXPECT_TRUE(Time::FromString("15 Oct 07 19:45 GMT", &parsed_time));
+  EXPECT_EQ(comparison_time_pdt_, parsed_time);
+}
+
+TEST_F(TimeTest, ParseTimeTest5) {
+  Time parsed_time;
+  EXPECT_TRUE(Time::FromString("Mon Oct 15 12:45 PDT 2007", &parsed_time));
+  EXPECT_EQ(comparison_time_pdt_, parsed_time);
+}
+
+TEST_F(TimeTest, ParseTimeTest6) {
+  Time parsed_time;
+  EXPECT_TRUE(Time::FromString("Monday, Oct 15, 2007 12:45 PM", &parsed_time));
+  EXPECT_EQ(comparison_time_local_, parsed_time);
+}
+
+TEST_F(TimeTest, ParseTimeTest7) {
+  Time parsed_time;
+  EXPECT_TRUE(Time::FromString("10/15/07 12:45:00 PM", &parsed_time));
+  EXPECT_EQ(comparison_time_local_, parsed_time);
+}
+
+TEST_F(TimeTest, ParseTimeTest8) {
+  Time parsed_time;
+  EXPECT_TRUE(Time::FromString("15-OCT-2007 12:45pm", &parsed_time));
+  EXPECT_EQ(comparison_time_local_, parsed_time);
+}
+
+TEST_F(TimeTest, ParseTimeTest9) {
+  Time parsed_time;
+  EXPECT_TRUE(Time::FromString("16 Oct 2007 4:45-JST (Tuesday)", &parsed_time));
+  EXPECT_EQ(comparison_time_pdt_, parsed_time);
+}
+
+TEST_F(TimeTest, ParseTimeTest10) {
+  Time parsed_time;
+  EXPECT_TRUE(Time::FromString("15/10/07 12:45", &parsed_time));
+  EXPECT_EQ(parsed_time, comparison_time_local_);
+}
+
+// Test some of edge cases around epoch, etc.
+TEST_F(TimeTest, ParseTimeTestEpoch0) {
+  Time parsed_time;
+
+  // time_t == epoch == 0
+  EXPECT_TRUE(Time::FromString("Thu Jan 01 01:00:00 +0100 1970",
+                               &parsed_time));
+  EXPECT_EQ(0, parsed_time.ToTimeT());
+  EXPECT_TRUE(Time::FromString("Thu Jan 01 00:00:00 GMT 1970",
+                               &parsed_time));
+  EXPECT_EQ(0, parsed_time.ToTimeT());
+}
+
+TEST_F(TimeTest, ParseTimeTestEpoch1) {
+  Time parsed_time;
+
+  // time_t == 1 second after epoch == 1
+  EXPECT_TRUE(Time::FromString("Thu Jan 01 01:00:01 +0100 1970",
+                               &parsed_time));
+  EXPECT_EQ(1, parsed_time.ToTimeT());
+  EXPECT_TRUE(Time::FromString("Thu Jan 01 00:00:01 GMT 1970",
+                               &parsed_time));
+  EXPECT_EQ(1, parsed_time.ToTimeT());
+}
+
+TEST_F(TimeTest, ParseTimeTestEpoch2) {
+  Time parsed_time;
+
+  // time_t == 2 seconds after epoch == 2
+  EXPECT_TRUE(Time::FromString("Thu Jan 01 01:00:02 +0100 1970",
+                               &parsed_time));
+  EXPECT_EQ(2, parsed_time.ToTimeT());
+  EXPECT_TRUE(Time::FromString("Thu Jan 01 00:00:02 GMT 1970",
+                               &parsed_time));
+  EXPECT_EQ(2, parsed_time.ToTimeT());
+}
+
+TEST_F(TimeTest, ParseTimeTestEpochNeg1) {
+  Time parsed_time;
+
+  // time_t == 1 second before epoch == -1
+  EXPECT_TRUE(Time::FromString("Thu Jan 01 00:59:59 +0100 1970",
+                               &parsed_time));
+  EXPECT_EQ(-1, parsed_time.ToTimeT());
+  EXPECT_TRUE(Time::FromString("Wed Dec 31 23:59:59 GMT 1969",
+                               &parsed_time));
+  EXPECT_EQ(-1, parsed_time.ToTimeT());
+}
+
+// If time_t is 32 bits, a date after year 2038 will overflow time_t and
+// cause timegm() to return -1.  The parsed time should not be 1 second
+// before epoch.
+TEST_F(TimeTest, ParseTimeTestEpochNotNeg1) {
+  Time parsed_time;
+
+  EXPECT_TRUE(Time::FromString("Wed Dec 31 23:59:59 GMT 2100",
+                               &parsed_time));
+  EXPECT_NE(-1, parsed_time.ToTimeT());
+}
+
+TEST_F(TimeTest, ParseTimeTestEpochNeg2) {
+  Time parsed_time;
+
+  // time_t == 2 seconds before epoch == -2
+  EXPECT_TRUE(Time::FromString("Thu Jan 01 00:59:58 +0100 1970",
+                               &parsed_time));
+  EXPECT_EQ(-2, parsed_time.ToTimeT());
+  EXPECT_TRUE(Time::FromString("Wed Dec 31 23:59:58 GMT 1969",
+                               &parsed_time));
+  EXPECT_EQ(-2, parsed_time.ToTimeT());
+}
+
+TEST_F(TimeTest, ParseTimeTestEpoch1960) {
+  Time parsed_time;
+
+  // time_t before Epoch, in 1960
+  EXPECT_TRUE(Time::FromString("Wed Jun 29 19:40:01 +0100 1960",
+                               &parsed_time));
+  EXPECT_EQ(-299999999, parsed_time.ToTimeT());
+  EXPECT_TRUE(Time::FromString("Wed Jun 29 18:40:01 GMT 1960",
+                               &parsed_time));
+  EXPECT_EQ(-299999999, parsed_time.ToTimeT());
+  EXPECT_TRUE(Time::FromString("Wed Jun 29 17:40:01 GMT 1960",
+                               &parsed_time));
+  EXPECT_EQ(-300003599, parsed_time.ToTimeT());
+}
+
+TEST_F(TimeTest, ParseTimeTestEmpty) {
+  Time parsed_time;
+  EXPECT_FALSE(Time::FromString("", &parsed_time));
+}
+
+TEST_F(TimeTest, ParseTimeTestInvalidString) {
+  Time parsed_time;
+  EXPECT_FALSE(Time::FromString("Monday morning 2000", &parsed_time));
+}
+
+TEST_F(TimeTest, ExplodeBeforeUnixEpoch) {
+  static const int kUnixEpochYear = 1970;  // In case this changes (ha!).
+  Time t;
+  Time::Exploded exploded;
+
+  t = Time::UnixEpoch() - TimeDelta::FromMicroseconds(1);
+  t.UTCExplode(&exploded);
+  EXPECT_TRUE(exploded.HasValidValues());
+  // Should be 1969-12-31 23:59:59 999 milliseconds (and 999 microseconds).
+  EXPECT_EQ(kUnixEpochYear - 1, exploded.year);
+  EXPECT_EQ(12, exploded.month);
+  EXPECT_EQ(31, exploded.day_of_month);
+  EXPECT_EQ(23, exploded.hour);
+  EXPECT_EQ(59, exploded.minute);
+  EXPECT_EQ(59, exploded.second);
+  EXPECT_EQ(999, exploded.millisecond);
+
+  t = Time::UnixEpoch() - TimeDelta::FromMicroseconds(1000);
+  t.UTCExplode(&exploded);
+  EXPECT_TRUE(exploded.HasValidValues());
+  // Should be 1969-12-31 23:59:59 999 milliseconds.
+  EXPECT_EQ(kUnixEpochYear - 1, exploded.year);
+  EXPECT_EQ(12, exploded.month);
+  EXPECT_EQ(31, exploded.day_of_month);
+  EXPECT_EQ(23, exploded.hour);
+  EXPECT_EQ(59, exploded.minute);
+  EXPECT_EQ(59, exploded.second);
+  EXPECT_EQ(999, exploded.millisecond);
+
+  t = Time::UnixEpoch() - TimeDelta::FromMicroseconds(1001);
+  t.UTCExplode(&exploded);
+  EXPECT_TRUE(exploded.HasValidValues());
+  // Should be 1969-12-31 23:59:59 998 milliseconds (and 999 microseconds).
+  EXPECT_EQ(kUnixEpochYear - 1, exploded.year);
+  EXPECT_EQ(12, exploded.month);
+  EXPECT_EQ(31, exploded.day_of_month);
+  EXPECT_EQ(23, exploded.hour);
+  EXPECT_EQ(59, exploded.minute);
+  EXPECT_EQ(59, exploded.second);
+  EXPECT_EQ(998, exploded.millisecond);
+
+  t = Time::UnixEpoch() - TimeDelta::FromMilliseconds(1000);
+  t.UTCExplode(&exploded);
+  EXPECT_TRUE(exploded.HasValidValues());
+  // Should be 1969-12-31 23:59:59.
+  EXPECT_EQ(kUnixEpochYear - 1, exploded.year);
+  EXPECT_EQ(12, exploded.month);
+  EXPECT_EQ(31, exploded.day_of_month);
+  EXPECT_EQ(23, exploded.hour);
+  EXPECT_EQ(59, exploded.minute);
+  EXPECT_EQ(59, exploded.second);
+  EXPECT_EQ(0, exploded.millisecond);
+
+  t = Time::UnixEpoch() - TimeDelta::FromMilliseconds(1001);
+  t.UTCExplode(&exploded);
+  EXPECT_TRUE(exploded.HasValidValues());
+  // Should be 1969-12-31 23:59:58 999 milliseconds.
+  EXPECT_EQ(kUnixEpochYear - 1, exploded.year);
+  EXPECT_EQ(12, exploded.month);
+  EXPECT_EQ(31, exploded.day_of_month);
+  EXPECT_EQ(23, exploded.hour);
+  EXPECT_EQ(59, exploded.minute);
+  EXPECT_EQ(58, exploded.second);
+  EXPECT_EQ(999, exploded.millisecond);
+
+  // Make sure we still handle at/after Unix epoch correctly.
+  t = Time::UnixEpoch();
+  t.UTCExplode(&exploded);
+  EXPECT_TRUE(exploded.HasValidValues());
+  // Should be 1970-12-31 00:00:00 0 milliseconds.
+  EXPECT_EQ(kUnixEpochYear, exploded.year);
+  EXPECT_EQ(1, exploded.month);
+  EXPECT_EQ(1, exploded.day_of_month);
+  EXPECT_EQ(0, exploded.hour);
+  EXPECT_EQ(0, exploded.minute);
+  EXPECT_EQ(0, exploded.second);
+  EXPECT_EQ(0, exploded.millisecond);
+
+  t = Time::UnixEpoch() + TimeDelta::FromMicroseconds(1);
+  t.UTCExplode(&exploded);
+  EXPECT_TRUE(exploded.HasValidValues());
+  // Should be 1970-01-01 00:00:00 0 milliseconds (and 1 microsecond).
+  EXPECT_EQ(kUnixEpochYear, exploded.year);
+  EXPECT_EQ(1, exploded.month);
+  EXPECT_EQ(1, exploded.day_of_month);
+  EXPECT_EQ(0, exploded.hour);
+  EXPECT_EQ(0, exploded.minute);
+  EXPECT_EQ(0, exploded.second);
+  EXPECT_EQ(0, exploded.millisecond);
+
+  t = Time::UnixEpoch() + TimeDelta::FromMicroseconds(1000);
+  t.UTCExplode(&exploded);
+  EXPECT_TRUE(exploded.HasValidValues());
+  // Should be 1970-01-01 00:00:00 1 millisecond.
+  EXPECT_EQ(kUnixEpochYear, exploded.year);
+  EXPECT_EQ(1, exploded.month);
+  EXPECT_EQ(1, exploded.day_of_month);
+  EXPECT_EQ(0, exploded.hour);
+  EXPECT_EQ(0, exploded.minute);
+  EXPECT_EQ(0, exploded.second);
+  EXPECT_EQ(1, exploded.millisecond);
+
+  t = Time::UnixEpoch() + TimeDelta::FromMilliseconds(1000);
+  t.UTCExplode(&exploded);
+  EXPECT_TRUE(exploded.HasValidValues());
+  // Should be 1970-01-01 00:00:01.
+  EXPECT_EQ(kUnixEpochYear, exploded.year);
+  EXPECT_EQ(1, exploded.month);
+  EXPECT_EQ(1, exploded.day_of_month);
+  EXPECT_EQ(0, exploded.hour);
+  EXPECT_EQ(0, exploded.minute);
+  EXPECT_EQ(1, exploded.second);
+  EXPECT_EQ(0, exploded.millisecond);
+
+  t = Time::UnixEpoch() + TimeDelta::FromMilliseconds(1001);
+  t.UTCExplode(&exploded);
+  EXPECT_TRUE(exploded.HasValidValues());
+  // Should be 1970-01-01 00:00:01 1 millisecond.
+  EXPECT_EQ(kUnixEpochYear, exploded.year);
+  EXPECT_EQ(1, exploded.month);
+  EXPECT_EQ(1, exploded.day_of_month);
+  EXPECT_EQ(0, exploded.hour);
+  EXPECT_EQ(0, exploded.minute);
+  EXPECT_EQ(1, exploded.second);
+  EXPECT_EQ(1, exploded.millisecond);
+}
+
+TEST_F(TimeTest, Max) {
+  Time max = Time::Max();
+  EXPECT_TRUE(max.is_max());
+  EXPECT_EQ(max, Time::Max());
+  EXPECT_GT(max, Time::Now());
+  EXPECT_GT(max, Time());
+}
+
+TEST_F(TimeTest, MaxConversions) {
+  Time t = Time::Max();
+  EXPECT_EQ(std::numeric_limits<int64_t>::max(), t.ToInternalValue());
+
+  t = Time::FromDoubleT(std::numeric_limits<double>::infinity());
+  EXPECT_TRUE(t.is_max());
+  EXPECT_EQ(std::numeric_limits<double>::infinity(), t.ToDoubleT());
+
+  t = Time::FromJsTime(std::numeric_limits<double>::infinity());
+  EXPECT_TRUE(t.is_max());
+  EXPECT_EQ(std::numeric_limits<double>::infinity(), t.ToJsTime());
+
+  t = Time::FromTimeT(std::numeric_limits<time_t>::max());
+  EXPECT_TRUE(t.is_max());
+  EXPECT_EQ(std::numeric_limits<time_t>::max(), t.ToTimeT());
+
+#if defined(OS_POSIX) || defined(OS_FUCHSIA)
+  struct timeval tval;
+  tval.tv_sec = std::numeric_limits<time_t>::max();
+  tval.tv_usec = static_cast<suseconds_t>(Time::kMicrosecondsPerSecond) - 1;
+  t = Time::FromTimeVal(tval);
+  EXPECT_TRUE(t.is_max());
+  tval = t.ToTimeVal();
+  EXPECT_EQ(std::numeric_limits<time_t>::max(), tval.tv_sec);
+  EXPECT_EQ(static_cast<suseconds_t>(Time::kMicrosecondsPerSecond) - 1,
+      tval.tv_usec);
+#endif
+
+#if defined(OS_MACOSX)
+  t = Time::FromCFAbsoluteTime(std::numeric_limits<CFAbsoluteTime>::infinity());
+  EXPECT_TRUE(t.is_max());
+  EXPECT_EQ(std::numeric_limits<CFAbsoluteTime>::infinity(),
+            t.ToCFAbsoluteTime());
+#endif
+
+#if defined(OS_WIN)
+  FILETIME ftime;
+  ftime.dwHighDateTime = std::numeric_limits<DWORD>::max();
+  ftime.dwLowDateTime = std::numeric_limits<DWORD>::max();
+  t = Time::FromFileTime(ftime);
+  EXPECT_TRUE(t.is_max());
+  ftime = t.ToFileTime();
+  EXPECT_EQ(std::numeric_limits<DWORD>::max(), ftime.dwHighDateTime);
+  EXPECT_EQ(std::numeric_limits<DWORD>::max(), ftime.dwLowDateTime);
+#endif
+}
+
+#if defined(OS_MACOSX)
+TEST_F(TimeTest, TimeTOverflow) {
+  Time t = Time::FromInternalValue(std::numeric_limits<int64_t>::max() - 1);
+  EXPECT_FALSE(t.is_max());
+  EXPECT_EQ(std::numeric_limits<time_t>::max(), t.ToTimeT());
+}
+#endif
+
+#if defined(OS_ANDROID)
+TEST_F(TimeTest, FromLocalExplodedCrashOnAndroid) {
+  // This crashed inside Time:: FromLocalExploded() on Android 4.1.2.
+  // See http://crbug.com/287821
+  Time::Exploded midnight = {2013,  // year
+                             10,    // month
+                             0,     // day_of_week
+                             13,    // day_of_month
+                             0,     // hour
+                             0,     // minute
+                             0,     // second
+  };
+  // The string passed to putenv() must be a char* and the documentation states
+  // that it 'becomes part of the environment', so use a static buffer.
+  static char buffer[] = "TZ=America/Santiago";
+  putenv(buffer);
+  tzset();
+  Time t;
+  EXPECT_TRUE(Time::FromLocalExploded(midnight, &t));
+  EXPECT_EQ(1381633200, t.ToTimeT());
+}
+#endif  // OS_ANDROID
+
+TEST_F(TimeTest, FromExploded_MinMax) {
+  Time::Exploded exploded = {0};
+  exploded.month = 1;
+  exploded.day_of_month = 1;
+
+  Time parsed_time;
+
+  if (Time::kExplodedMinYear != std::numeric_limits<int>::min()) {
+    exploded.year = Time::kExplodedMinYear;
+    EXPECT_TRUE(Time::FromUTCExploded(exploded, &parsed_time));
+#if defined(OS_POSIX) || defined(OS_FUCHSIA)
+    // On Windows, January 1, 1601 00:00:00 is actually the null time.
+    EXPECT_FALSE(parsed_time.is_null());
+#endif
+
+#if !defined(OS_ANDROID) && !defined(OS_MACOSX)
+    // The dates earlier than |kExplodedMinYear| that don't work are OS version
+    // dependent on Android and Mac (for example, macOS 10.13 seems to support
+    // dates before 1902).
+    exploded.year--;
+    EXPECT_FALSE(Time::FromUTCExploded(exploded, &parsed_time));
+    EXPECT_TRUE(parsed_time.is_null());
+#endif
+  }
+
+  if (Time::kExplodedMaxYear != std::numeric_limits<int>::max()) {
+    exploded.year = Time::kExplodedMaxYear;
+    exploded.month = 12;
+    exploded.day_of_month = 31;
+    exploded.hour = 23;
+    exploded.minute = 59;
+    exploded.second = 59;
+    exploded.millisecond = 999;
+    EXPECT_TRUE(Time::FromUTCExploded(exploded, &parsed_time));
+    EXPECT_FALSE(parsed_time.is_null());
+
+    exploded.year++;
+    EXPECT_FALSE(Time::FromUTCExploded(exploded, &parsed_time));
+    EXPECT_TRUE(parsed_time.is_null());
+  }
+}
+
+class TimeOverride {
+ public:
+  static Time Now() {
+    now_time_ += TimeDelta::FromSeconds(1);
+    return now_time_;
+  }
+
+  static Time now_time_;
+};
+
+// static
+Time TimeOverride::now_time_;
+
+TEST_F(TimeTest, NowOverride) {
+  TimeOverride::now_time_ = Time::UnixEpoch();
+
+  // Choose a reference time that we know to be in the past but close to now.
+  Time build_time = GetBuildTime();
+
+  // Override is not active. All Now() methods should return a time greater than
+  // the build time.
+  EXPECT_LT(build_time, Time::Now());
+  EXPECT_GT(Time::Max(), Time::Now());
+  EXPECT_LT(build_time, subtle::TimeNowIgnoringOverride());
+  EXPECT_GT(Time::Max(), subtle::TimeNowIgnoringOverride());
+  EXPECT_LT(build_time, Time::NowFromSystemTime());
+  EXPECT_GT(Time::Max(), Time::NowFromSystemTime());
+  EXPECT_LT(build_time, subtle::TimeNowFromSystemTimeIgnoringOverride());
+  EXPECT_GT(Time::Max(), subtle::TimeNowFromSystemTimeIgnoringOverride());
+
+  {
+    // Set override.
+    subtle::ScopedTimeClockOverrides overrides(&TimeOverride::Now, nullptr,
+                                               nullptr);
+
+    // Overridden value is returned and incremented when Now() or
+    // NowFromSystemTime() is called.
+    EXPECT_EQ(Time::UnixEpoch() + TimeDelta::FromSeconds(1), Time::Now());
+    EXPECT_EQ(Time::UnixEpoch() + TimeDelta::FromSeconds(2), Time::Now());
+    EXPECT_EQ(Time::UnixEpoch() + TimeDelta::FromSeconds(3),
+              Time::NowFromSystemTime());
+    EXPECT_EQ(Time::UnixEpoch() + TimeDelta::FromSeconds(4),
+              Time::NowFromSystemTime());
+
+    // IgnoringOverride methods still return real time.
+    EXPECT_LT(build_time, subtle::TimeNowIgnoringOverride());
+    EXPECT_GT(Time::Max(), subtle::TimeNowIgnoringOverride());
+    EXPECT_LT(build_time, subtle::TimeNowFromSystemTimeIgnoringOverride());
+    EXPECT_GT(Time::Max(), subtle::TimeNowFromSystemTimeIgnoringOverride());
+
+    // IgnoringOverride methods didn't call NowOverrideClock::Now().
+    EXPECT_EQ(Time::UnixEpoch() + TimeDelta::FromSeconds(5), Time::Now());
+    EXPECT_EQ(Time::UnixEpoch() + TimeDelta::FromSeconds(6),
+              Time::NowFromSystemTime());
+  }
+
+  // All methods return real time again.
+  EXPECT_LT(build_time, Time::Now());
+  EXPECT_GT(Time::Max(), Time::Now());
+  EXPECT_LT(build_time, subtle::TimeNowIgnoringOverride());
+  EXPECT_GT(Time::Max(), subtle::TimeNowIgnoringOverride());
+  EXPECT_LT(build_time, Time::NowFromSystemTime());
+  EXPECT_GT(Time::Max(), Time::NowFromSystemTime());
+  EXPECT_LT(build_time, subtle::TimeNowFromSystemTimeIgnoringOverride());
+  EXPECT_GT(Time::Max(), subtle::TimeNowFromSystemTimeIgnoringOverride());
+}
+
+TEST(TimeTicks, Deltas) {
+  for (int index = 0; index < 50; index++) {
+    TimeTicks ticks_start = TimeTicks::Now();
+    base::PlatformThread::Sleep(base::TimeDelta::FromMilliseconds(10));
+    TimeTicks ticks_stop = TimeTicks::Now();
+    TimeDelta delta = ticks_stop - ticks_start;
+    // Note:  Although we asked for a 10ms sleep, if the
+    // time clock has a finer granularity than the Sleep()
+    // clock, it is quite possible to wakeup early.  Here
+    // is how that works:
+    //      Time(ms timer)      Time(us timer)
+    //          5                   5010
+    //          6                   6010
+    //          7                   7010
+    //          8                   8010
+    //          9                   9000
+    // Elapsed  4ms                 3990us
+    //
+    // Unfortunately, our InMilliseconds() function truncates
+    // rather than rounds.  We should consider fixing this
+    // so that our averages come out better.
+    EXPECT_GE(delta.InMilliseconds(), 9);
+    EXPECT_GE(delta.InMicroseconds(), 9000);
+    EXPECT_EQ(delta.InSeconds(), 0);
+  }
+}
+
+static void HighResClockTest(TimeTicks (*GetTicks)()) {
+  // IsHighResolution() is false on some systems.  Since the product still works
+  // even if it's false, it makes this entire test questionable.
+  if (!TimeTicks::IsHighResolution())
+    return;
+
+  // Why do we loop here?
+  // We're trying to measure that intervals increment in a VERY small amount
+  // of time --  less than 15ms.  Unfortunately, if we happen to have a
+  // context switch in the middle of our test, the context switch could easily
+  // exceed our limit.  So, we iterate on this several times.  As long as we're
+  // able to detect the fine-granularity timers at least once, then the test
+  // has succeeded.
+
+  const int kTargetGranularityUs = 15000;  // 15ms
+
+  bool success = false;
+  int retries = 100;  // Arbitrary.
+  TimeDelta delta;
+  while (!success && retries--) {
+    TimeTicks ticks_start = GetTicks();
+    // Loop until we can detect that the clock has changed.  Non-HighRes timers
+    // will increment in chunks, e.g. 15ms.  By spinning until we see a clock
+    // change, we detect the minimum time between measurements.
+    do {
+      delta = GetTicks() - ticks_start;
+    } while (delta.InMilliseconds() == 0);
+
+    if (delta.InMicroseconds() <= kTargetGranularityUs)
+      success = true;
+  }
+
+  // In high resolution mode, we expect to see the clock increment
+  // in intervals less than 15ms.
+  EXPECT_TRUE(success);
+}
+
+TEST(TimeTicks, HighRes) {
+  HighResClockTest(&TimeTicks::Now);
+}
+
+class TimeTicksOverride {
+ public:
+  static TimeTicks Now() {
+    now_ticks_ += TimeDelta::FromSeconds(1);
+    return now_ticks_;
+  }
+
+  static TimeTicks now_ticks_;
+};
+
+// static
+TimeTicks TimeTicksOverride::now_ticks_;
+
+TEST(TimeTicks, NowOverride) {
+  TimeTicksOverride::now_ticks_ = TimeTicks::Min();
+
+  // Override is not active. All Now() methods should return a sensible value.
+  EXPECT_LT(TimeTicks::Min(), TimeTicks::UnixEpoch());
+  EXPECT_LT(TimeTicks::UnixEpoch(), TimeTicks::Now());
+  EXPECT_GT(TimeTicks::Max(), TimeTicks::Now());
+  EXPECT_LT(TimeTicks::UnixEpoch(), subtle::TimeTicksNowIgnoringOverride());
+  EXPECT_GT(TimeTicks::Max(), subtle::TimeTicksNowIgnoringOverride());
+
+  {
+    // Set override.
+    subtle::ScopedTimeClockOverrides overrides(nullptr, &TimeTicksOverride::Now,
+                                               nullptr);
+
+    // Overridden value is returned and incremented when Now() is called.
+    EXPECT_EQ(TimeTicks::Min() + TimeDelta::FromSeconds(1), TimeTicks::Now());
+    EXPECT_EQ(TimeTicks::Min() + TimeDelta::FromSeconds(2), TimeTicks::Now());
+
+    // NowIgnoringOverride() still returns real ticks.
+    EXPECT_LT(TimeTicks::UnixEpoch(), subtle::TimeTicksNowIgnoringOverride());
+    EXPECT_GT(TimeTicks::Max(), subtle::TimeTicksNowIgnoringOverride());
+
+    // IgnoringOverride methods didn't call NowOverrideTickClock::NowTicks().
+    EXPECT_EQ(TimeTicks::Min() + TimeDelta::FromSeconds(3), TimeTicks::Now());
+  }
+
+  // All methods return real ticks again.
+  EXPECT_LT(TimeTicks::UnixEpoch(), TimeTicks::Now());
+  EXPECT_GT(TimeTicks::Max(), TimeTicks::Now());
+  EXPECT_LT(TimeTicks::UnixEpoch(), subtle::TimeTicksNowIgnoringOverride());
+  EXPECT_GT(TimeTicks::Max(), subtle::TimeTicksNowIgnoringOverride());
+}
+
+class ThreadTicksOverride {
+ public:
+  static ThreadTicks Now() {
+    now_ticks_ += TimeDelta::FromSeconds(1);
+    return now_ticks_;
+  }
+
+  static ThreadTicks now_ticks_;
+};
+
+// static
+ThreadTicks ThreadTicksOverride::now_ticks_;
+
+// IOS doesn't support ThreadTicks::Now().
+#if defined(OS_IOS)
+#define MAYBE_NowOverride DISABLED_NowOverride
+#else
+#define MAYBE_NowOverride NowOverride
+#endif
+TEST(ThreadTicks, MAYBE_NowOverride) {
+  ThreadTicksOverride::now_ticks_ = ThreadTicks::Min();
+
+  // Override is not active. All Now() methods should return a sensible value.
+  ThreadTicks initial_thread_ticks = ThreadTicks::Now();
+  EXPECT_LE(initial_thread_ticks, ThreadTicks::Now());
+  EXPECT_GT(ThreadTicks::Max(), ThreadTicks::Now());
+  EXPECT_LE(initial_thread_ticks, subtle::ThreadTicksNowIgnoringOverride());
+  EXPECT_GT(ThreadTicks::Max(), subtle::ThreadTicksNowIgnoringOverride());
+
+  {
+    // Set override.
+    subtle::ScopedTimeClockOverrides overrides(nullptr, nullptr,
+                                               &ThreadTicksOverride::Now);
+
+    // Overridden value is returned and incremented when Now() is called.
+    EXPECT_EQ(ThreadTicks::Min() + TimeDelta::FromSeconds(1),
+              ThreadTicks::Now());
+    EXPECT_EQ(ThreadTicks::Min() + TimeDelta::FromSeconds(2),
+              ThreadTicks::Now());
+
+    // NowIgnoringOverride() still returns real ticks.
+    EXPECT_LE(initial_thread_ticks, subtle::ThreadTicksNowIgnoringOverride());
+    EXPECT_GT(ThreadTicks::Max(), subtle::ThreadTicksNowIgnoringOverride());
+
+    // IgnoringOverride methods didn't call NowOverrideTickClock::NowTicks().
+    EXPECT_EQ(ThreadTicks::Min() + TimeDelta::FromSeconds(3),
+              ThreadTicks::Now());
+  }
+
+  // All methods return real ticks again.
+  EXPECT_LE(initial_thread_ticks, ThreadTicks::Now());
+  EXPECT_GT(ThreadTicks::Max(), ThreadTicks::Now());
+  EXPECT_LE(initial_thread_ticks, subtle::ThreadTicksNowIgnoringOverride());
+  EXPECT_GT(ThreadTicks::Max(), subtle::ThreadTicksNowIgnoringOverride());
+}
+
+// Fails frequently on Android http://crbug.com/352633 with:
+// Expected: (delta_thread.InMicroseconds()) > (0), actual: 0 vs 0
+#if defined(OS_ANDROID)
+#define MAYBE_ThreadNow DISABLED_ThreadNow
+#else
+#define MAYBE_ThreadNow ThreadNow
+#endif
+TEST(ThreadTicks, MAYBE_ThreadNow) {
+  if (ThreadTicks::IsSupported()) {
+    ThreadTicks::WaitUntilInitialized();
+    TimeTicks begin = TimeTicks::Now();
+    ThreadTicks begin_thread = ThreadTicks::Now();
+    // Make sure that ThreadNow value is non-zero.
+    EXPECT_GT(begin_thread, ThreadTicks());
+    // Sleep for 10 milliseconds to get the thread de-scheduled.
+    base::PlatformThread::Sleep(base::TimeDelta::FromMilliseconds(10));
+    ThreadTicks end_thread = ThreadTicks::Now();
+    TimeTicks end = TimeTicks::Now();
+    TimeDelta delta = end - begin;
+    TimeDelta delta_thread = end_thread - begin_thread;
+    // Make sure that some thread time have elapsed.
+    EXPECT_GT(delta_thread.InMicroseconds(), 0);
+    // But the thread time is at least 9ms less than clock time.
+    TimeDelta difference = delta - delta_thread;
+    EXPECT_GE(difference.InMicroseconds(), 9000);
+  }
+}
+
+TEST(TimeTicks, SnappedToNextTickBasic) {
+  base::TimeTicks phase = base::TimeTicks::FromInternalValue(4000);
+  base::TimeDelta interval = base::TimeDelta::FromMicroseconds(1000);
+  base::TimeTicks timestamp;
+
+  // Timestamp in previous interval.
+  timestamp = base::TimeTicks::FromInternalValue(3500);
+  EXPECT_EQ(4000,
+            timestamp.SnappedToNextTick(phase, interval).ToInternalValue());
+
+  // Timestamp in next interval.
+  timestamp = base::TimeTicks::FromInternalValue(4500);
+  EXPECT_EQ(5000,
+            timestamp.SnappedToNextTick(phase, interval).ToInternalValue());
+
+  // Timestamp multiple intervals before.
+  timestamp = base::TimeTicks::FromInternalValue(2500);
+  EXPECT_EQ(3000,
+            timestamp.SnappedToNextTick(phase, interval).ToInternalValue());
+
+  // Timestamp multiple intervals after.
+  timestamp = base::TimeTicks::FromInternalValue(6500);
+  EXPECT_EQ(7000,
+            timestamp.SnappedToNextTick(phase, interval).ToInternalValue());
+
+  // Timestamp on previous interval.
+  timestamp = base::TimeTicks::FromInternalValue(3000);
+  EXPECT_EQ(3000,
+            timestamp.SnappedToNextTick(phase, interval).ToInternalValue());
+
+  // Timestamp on next interval.
+  timestamp = base::TimeTicks::FromInternalValue(5000);
+  EXPECT_EQ(5000,
+            timestamp.SnappedToNextTick(phase, interval).ToInternalValue());
+
+  // Timestamp equal to phase.
+  timestamp = base::TimeTicks::FromInternalValue(4000);
+  EXPECT_EQ(4000,
+            timestamp.SnappedToNextTick(phase, interval).ToInternalValue());
+}
+
+TEST(TimeTicks, SnappedToNextTickOverflow) {
+  // int(big_timestamp / interval) < 0, so this causes a crash if the number of
+  // intervals elapsed is attempted to be stored in an int.
+  base::TimeTicks phase = base::TimeTicks::FromInternalValue(0);
+  base::TimeDelta interval = base::TimeDelta::FromMicroseconds(4000);
+  base::TimeTicks big_timestamp =
+      base::TimeTicks::FromInternalValue(8635916564000);
+
+  EXPECT_EQ(8635916564000,
+            big_timestamp.SnappedToNextTick(phase, interval).ToInternalValue());
+  EXPECT_EQ(8635916564000,
+            big_timestamp.SnappedToNextTick(big_timestamp, interval)
+                .ToInternalValue());
+}
+
+#if defined(OS_ANDROID)
+TEST(TimeTicks, Android_FromUptimeMillis_ClocksMatch) {
+  JNIEnv* const env = android::AttachCurrentThread();
+  android::ScopedJavaLocalRef<jclass> clazz(
+      android::GetClass(env, "android/os/SystemClock"));
+  ASSERT_TRUE(clazz.obj());
+  const jmethodID method_id =
+      android::MethodID::Get<android::MethodID::TYPE_STATIC>(
+          env, clazz.obj(), "uptimeMillis", "()J");
+  ASSERT_FALSE(!method_id);
+  // Subtract 1ms from the expected lower bound to allow millisecon-level
+  // truncation performed in uptimeMillis().
+  const TimeTicks lower_bound_ticks =
+      TimeTicks::Now() - TimeDelta::FromMilliseconds(1);
+  const TimeTicks converted_ticks = TimeTicks::FromUptimeMillis(
+      env->CallStaticLongMethod(clazz.obj(), method_id));
+  const TimeTicks upper_bound_ticks = TimeTicks::Now();
+  EXPECT_LE(lower_bound_ticks, converted_ticks);
+  EXPECT_GE(upper_bound_ticks, converted_ticks);
+}
+#endif  // OS_ANDROID
+
+TEST(TimeDelta, FromAndIn) {
+  // static_assert also checks that the contained expression is a constant
+  // expression, meaning all its components are suitable for initializing global
+  // variables.
+  static_assert(TimeDelta::FromDays(2) == TimeDelta::FromHours(48), "");
+  static_assert(TimeDelta::FromHours(3) == TimeDelta::FromMinutes(180), "");
+  static_assert(TimeDelta::FromMinutes(2) == TimeDelta::FromSeconds(120), "");
+  static_assert(TimeDelta::FromSeconds(2) == TimeDelta::FromMilliseconds(2000),
+                "");
+  static_assert(
+      TimeDelta::FromMilliseconds(2) == TimeDelta::FromMicroseconds(2000), "");
+  static_assert(
+      TimeDelta::FromSecondsD(2.3) == TimeDelta::FromMilliseconds(2300), "");
+  static_assert(
+      TimeDelta::FromMillisecondsD(2.5) == TimeDelta::FromMicroseconds(2500),
+      "");
+  EXPECT_EQ(TimeDelta::FromDays(13).InDays(), 13);
+  EXPECT_EQ(TimeDelta::FromHours(13).InHours(), 13);
+  EXPECT_EQ(TimeDelta::FromMinutes(13).InMinutes(), 13);
+  EXPECT_EQ(TimeDelta::FromSeconds(13).InSeconds(), 13);
+  EXPECT_EQ(TimeDelta::FromSeconds(13).InSecondsF(), 13.0);
+  EXPECT_EQ(TimeDelta::FromMilliseconds(13).InMilliseconds(), 13);
+  EXPECT_EQ(TimeDelta::FromMilliseconds(13).InMillisecondsF(), 13.0);
+  EXPECT_EQ(TimeDelta::FromSecondsD(13.1).InSeconds(), 13);
+  EXPECT_EQ(TimeDelta::FromSecondsD(13.1).InSecondsF(), 13.1);
+  EXPECT_EQ(TimeDelta::FromMillisecondsD(13.3).InMilliseconds(), 13);
+  EXPECT_EQ(TimeDelta::FromMillisecondsD(13.3).InMillisecondsF(), 13.3);
+  EXPECT_EQ(TimeDelta::FromMicroseconds(13).InMicroseconds(), 13);
+  EXPECT_EQ(TimeDelta::FromMicrosecondsD(13.3).InMicroseconds(), 13);
+  EXPECT_EQ(TimeDelta::FromMillisecondsD(3.45678).InMillisecondsF(), 3.456);
+  EXPECT_EQ(TimeDelta::FromNanoseconds(12345).InNanoseconds(), 12000);
+  EXPECT_EQ(TimeDelta::FromNanosecondsD(12345.678).InNanoseconds(), 12000);
+}
+
+#if defined(OS_POSIX) || defined(OS_FUCHSIA)
+TEST(TimeDelta, TimeSpecConversion) {
+  TimeDelta delta = TimeDelta::FromSeconds(0);
+  struct timespec result = delta.ToTimeSpec();
+  EXPECT_EQ(result.tv_sec, 0);
+  EXPECT_EQ(result.tv_nsec, 0);
+  EXPECT_EQ(delta, TimeDelta::FromTimeSpec(result));
+
+  delta = TimeDelta::FromSeconds(1);
+  result = delta.ToTimeSpec();
+  EXPECT_EQ(result.tv_sec, 1);
+  EXPECT_EQ(result.tv_nsec, 0);
+  EXPECT_EQ(delta, TimeDelta::FromTimeSpec(result));
+
+  delta = TimeDelta::FromMicroseconds(1);
+  result = delta.ToTimeSpec();
+  EXPECT_EQ(result.tv_sec, 0);
+  EXPECT_EQ(result.tv_nsec, 1000);
+  EXPECT_EQ(delta, TimeDelta::FromTimeSpec(result));
+
+  delta = TimeDelta::FromMicroseconds(Time::kMicrosecondsPerSecond + 1);
+  result = delta.ToTimeSpec();
+  EXPECT_EQ(result.tv_sec, 1);
+  EXPECT_EQ(result.tv_nsec, 1000);
+  EXPECT_EQ(delta, TimeDelta::FromTimeSpec(result));
+}
+#endif  // defined(OS_POSIX) || defined(OS_FUCHSIA)
+
+// Our internal time format is serialized in things like databases, so it's
+// important that it's consistent across all our platforms.  We use the 1601
+// Windows epoch as the internal format across all platforms.
+TEST(TimeDelta, WindowsEpoch) {
+  Time::Exploded exploded;
+  exploded.year = 1970;
+  exploded.month = 1;
+  exploded.day_of_week = 0;  // Should be unusued.
+  exploded.day_of_month = 1;
+  exploded.hour = 0;
+  exploded.minute = 0;
+  exploded.second = 0;
+  exploded.millisecond = 0;
+  Time t;
+  EXPECT_TRUE(Time::FromUTCExploded(exploded, &t));
+  // Unix 1970 epoch.
+  EXPECT_EQ(INT64_C(11644473600000000), t.ToInternalValue());
+
+  // We can't test 1601 epoch, since the system time functions on Linux
+  // only compute years starting from 1900.
+}
+
+// We could define this separately for Time, TimeTicks and TimeDelta but the
+// definitions would be identical anyway.
+template <class Any>
+std::string AnyToString(Any any) {
+  std::ostringstream oss;
+  oss << any;
+  return oss.str();
+}
+
+TEST(TimeDelta, Magnitude) {
+  constexpr int64_t zero = 0;
+  static_assert(TimeDelta::FromMicroseconds(zero) ==
+                    TimeDelta::FromMicroseconds(zero).magnitude(),
+                "");
+
+  constexpr int64_t one = 1;
+  constexpr int64_t negative_one = -1;
+  static_assert(TimeDelta::FromMicroseconds(one) ==
+                    TimeDelta::FromMicroseconds(one).magnitude(),
+                "");
+  static_assert(TimeDelta::FromMicroseconds(one) ==
+                    TimeDelta::FromMicroseconds(negative_one).magnitude(),
+                "");
+
+  constexpr int64_t max_int64_minus_one =
+      std::numeric_limits<int64_t>::max() - 1;
+  constexpr int64_t min_int64_plus_two =
+      std::numeric_limits<int64_t>::min() + 2;
+  static_assert(
+      TimeDelta::FromMicroseconds(max_int64_minus_one) ==
+          TimeDelta::FromMicroseconds(max_int64_minus_one).magnitude(),
+      "");
+  static_assert(TimeDelta::FromMicroseconds(max_int64_minus_one) ==
+                    TimeDelta::FromMicroseconds(min_int64_plus_two).magnitude(),
+                "");
+}
+
+TEST(TimeDelta, ZeroMinMax) {
+  constexpr TimeDelta kZero;
+  static_assert(kZero.is_zero(), "");
+
+  constexpr TimeDelta kMax = TimeDelta::Max();
+  static_assert(kMax.is_max(), "");
+  static_assert(kMax == TimeDelta::Max(), "");
+  static_assert(kMax > TimeDelta::FromDays(100 * 365), "");
+  static_assert(kMax > kZero, "");
+
+  constexpr TimeDelta kMin = TimeDelta::Min();
+  static_assert(kMin.is_min(), "");
+  static_assert(kMin == TimeDelta::Min(), "");
+  static_assert(kMin < TimeDelta::FromDays(-100 * 365), "");
+  static_assert(kMin < kZero, "");
+}
+
+TEST(TimeDelta, MaxConversions) {
+  // static_assert also confirms constexpr works as intended.
+  constexpr TimeDelta kMax = TimeDelta::Max();
+  static_assert(kMax.ToInternalValue() == std::numeric_limits<int64_t>::max(),
+                "");
+  EXPECT_EQ(kMax.InDays(), std::numeric_limits<int>::max());
+  EXPECT_EQ(kMax.InHours(), std::numeric_limits<int>::max());
+  EXPECT_EQ(kMax.InMinutes(), std::numeric_limits<int>::max());
+  EXPECT_EQ(kMax.InSecondsF(), std::numeric_limits<double>::infinity());
+  EXPECT_EQ(kMax.InSeconds(), std::numeric_limits<int64_t>::max());
+  EXPECT_EQ(kMax.InMillisecondsF(), std::numeric_limits<double>::infinity());
+  EXPECT_EQ(kMax.InMilliseconds(), std::numeric_limits<int64_t>::max());
+  EXPECT_EQ(kMax.InMillisecondsRoundedUp(), std::numeric_limits<int64_t>::max());
+
+  static_assert(TimeDelta::FromDays(std::numeric_limits<int>::max()).is_max(),
+                "");
+
+  static_assert(TimeDelta::FromHours(std::numeric_limits<int>::max()).is_max(),
+                "");
+
+  static_assert(
+      TimeDelta::FromMinutes(std::numeric_limits<int>::max()).is_max(), "");
+
+  constexpr int64_t max_int = std::numeric_limits<int64_t>::max();
+  constexpr int64_t min_int = std::numeric_limits<int64_t>::min();
+
+  static_assert(
+      TimeDelta::FromSeconds(max_int / Time::kMicrosecondsPerSecond + 1)
+          .is_max(),
+      "");
+
+  static_assert(
+      TimeDelta::FromMilliseconds(max_int / Time::kMillisecondsPerSecond + 1)
+          .is_max(),
+      "");
+
+  static_assert(TimeDelta::FromMicroseconds(max_int).is_max(), "");
+
+  static_assert(
+      TimeDelta::FromSeconds(min_int / Time::kMicrosecondsPerSecond - 1)
+          .is_min(),
+      "");
+
+  static_assert(
+      TimeDelta::FromMilliseconds(min_int / Time::kMillisecondsPerSecond - 1)
+          .is_min(),
+      "");
+
+  static_assert(TimeDelta::FromMicroseconds(min_int).is_min(), "");
+
+  static_assert(
+      TimeDelta::FromMicroseconds(std::numeric_limits<int64_t>::min()).is_min(),
+      "");
+
+  // Floating point arithmetic resulting in infinity isn't constexpr in C++14.
+  EXPECT_TRUE(TimeDelta::FromSecondsD(std::numeric_limits<double>::infinity())
+                  .is_max());
+
+  // Note that max_int/min_int will be rounded when converted to doubles - they
+  // can't be exactly represented.
+  constexpr double max_d = static_cast<double>(max_int);
+  constexpr double min_d = static_cast<double>(min_int);
+
+  static_assert(
+      TimeDelta::FromSecondsD(max_d / Time::kMicrosecondsPerSecond + 1)
+          .is_max(),
+      "");
+
+  // Floating point arithmetic resulting in infinity isn't constexpr in C++14.
+  EXPECT_TRUE(
+      TimeDelta::FromMillisecondsD(std::numeric_limits<double>::infinity())
+          .is_max());
+
+  static_assert(
+      TimeDelta::FromMillisecondsD(max_d / Time::kMillisecondsPerSecond * 2)
+          .is_max(),
+      "");
+
+  static_assert(
+      TimeDelta::FromSecondsD(min_d / Time::kMicrosecondsPerSecond - 1)
+          .is_min(),
+      "");
+
+  static_assert(
+      TimeDelta::FromMillisecondsD(min_d / Time::kMillisecondsPerSecond * 2)
+          .is_min(),
+      "");
+}
+
+TEST(TimeDelta, NumericOperators) {
+  constexpr double d = 0.5;
+  EXPECT_EQ(TimeDelta::FromMilliseconds(500),
+            (TimeDelta::FromMilliseconds(1000) * d));
+  static_assert(TimeDelta::FromMilliseconds(2000) ==
+                    (TimeDelta::FromMilliseconds(1000) / d),
+                "");
+  EXPECT_EQ(TimeDelta::FromMilliseconds(500),
+            (TimeDelta::FromMilliseconds(1000) *= d));
+  static_assert(TimeDelta::FromMilliseconds(2000) ==
+                    (TimeDelta::FromMilliseconds(1000) /= d),
+                "");
+  EXPECT_EQ(TimeDelta::FromMilliseconds(500),
+            (d * TimeDelta::FromMilliseconds(1000)));
+
+  constexpr float f = 0.5;
+  EXPECT_EQ(TimeDelta::FromMilliseconds(500),
+            (TimeDelta::FromMilliseconds(1000) * f));
+  static_assert(TimeDelta::FromMilliseconds(2000) ==
+                    (TimeDelta::FromMilliseconds(1000) / f),
+                "");
+  EXPECT_EQ(TimeDelta::FromMilliseconds(500),
+            (TimeDelta::FromMilliseconds(1000) *= f));
+  static_assert(TimeDelta::FromMilliseconds(2000) ==
+                    (TimeDelta::FromMilliseconds(1000) /= f),
+                "");
+  EXPECT_EQ(TimeDelta::FromMilliseconds(500),
+            (f * TimeDelta::FromMilliseconds(1000)));
+
+  constexpr int i = 2;
+  EXPECT_EQ(TimeDelta::FromMilliseconds(2000),
+            (TimeDelta::FromMilliseconds(1000) * i));
+  static_assert(TimeDelta::FromMilliseconds(500) ==
+                    (TimeDelta::FromMilliseconds(1000) / i),
+                "");
+  EXPECT_EQ(TimeDelta::FromMilliseconds(2000),
+            (TimeDelta::FromMilliseconds(1000) *= i));
+  static_assert(TimeDelta::FromMilliseconds(500) ==
+                    (TimeDelta::FromMilliseconds(1000) /= i),
+                "");
+  EXPECT_EQ(TimeDelta::FromMilliseconds(2000),
+            (i * TimeDelta::FromMilliseconds(1000)));
+
+  constexpr int64_t i64 = 2;
+  EXPECT_EQ(TimeDelta::FromMilliseconds(2000),
+            (TimeDelta::FromMilliseconds(1000) * i64));
+  static_assert(TimeDelta::FromMilliseconds(500) ==
+                    (TimeDelta::FromMilliseconds(1000) / i64),
+                "");
+  EXPECT_EQ(TimeDelta::FromMilliseconds(2000),
+            (TimeDelta::FromMilliseconds(1000) *= i64));
+  static_assert(TimeDelta::FromMilliseconds(500) ==
+                    (TimeDelta::FromMilliseconds(1000) /= i64),
+                "");
+  EXPECT_EQ(TimeDelta::FromMilliseconds(2000),
+            (i64 * TimeDelta::FromMilliseconds(1000)));
+
+  EXPECT_EQ(TimeDelta::FromMilliseconds(500),
+            (TimeDelta::FromMilliseconds(1000) * 0.5));
+  static_assert(TimeDelta::FromMilliseconds(2000) ==
+                    (TimeDelta::FromMilliseconds(1000) / 0.5),
+                "");
+  EXPECT_EQ(TimeDelta::FromMilliseconds(500),
+            (TimeDelta::FromMilliseconds(1000) *= 0.5));
+  static_assert(TimeDelta::FromMilliseconds(2000) ==
+                    (TimeDelta::FromMilliseconds(1000) /= 0.5),
+                "");
+  EXPECT_EQ(TimeDelta::FromMilliseconds(500),
+            (0.5 * TimeDelta::FromMilliseconds(1000)));
+
+  EXPECT_EQ(TimeDelta::FromMilliseconds(2000),
+            (TimeDelta::FromMilliseconds(1000) * 2));
+  static_assert(TimeDelta::FromMilliseconds(500) ==
+                    (TimeDelta::FromMilliseconds(1000) / 2),
+                "");
+  EXPECT_EQ(TimeDelta::FromMilliseconds(2000),
+            (TimeDelta::FromMilliseconds(1000) *= 2));
+  static_assert(TimeDelta::FromMilliseconds(500) ==
+                    (TimeDelta::FromMilliseconds(1000) /= 2),
+                "");
+  EXPECT_EQ(TimeDelta::FromMilliseconds(2000),
+            (2 * TimeDelta::FromMilliseconds(1000)));
+}
+
+// Basic test of operators between TimeDeltas (without overflow -- next test
+// handles overflow).
+TEST(TimeDelta, TimeDeltaOperators) {
+  constexpr TimeDelta kElevenSeconds = TimeDelta::FromSeconds(11);
+  constexpr TimeDelta kThreeSeconds = TimeDelta::FromSeconds(3);
+
+  EXPECT_EQ(TimeDelta::FromSeconds(14), kElevenSeconds + kThreeSeconds);
+  EXPECT_EQ(TimeDelta::FromSeconds(14), kThreeSeconds + kElevenSeconds);
+  EXPECT_EQ(TimeDelta::FromSeconds(8), kElevenSeconds - kThreeSeconds);
+  EXPECT_EQ(TimeDelta::FromSeconds(-8), kThreeSeconds - kElevenSeconds);
+  static_assert(3 == kElevenSeconds / kThreeSeconds, "");
+  static_assert(0 == kThreeSeconds / kElevenSeconds, "");
+  static_assert(TimeDelta::FromSeconds(2) == kElevenSeconds % kThreeSeconds,
+                "");
+}
+
+TEST(TimeDelta, Overflows) {
+  // Some sanity checks. static_assert's used were possible to verify constexpr
+  // evaluation at the same time.
+  static_assert(TimeDelta::Max().is_max(), "");
+  static_assert(-TimeDelta::Max() < TimeDelta(), "");
+  static_assert(-TimeDelta::Max() > TimeDelta::Min(), "");
+  static_assert(TimeDelta() > -TimeDelta::Max(), "");
+
+  TimeDelta large_delta = TimeDelta::Max() - TimeDelta::FromMilliseconds(1);
+  TimeDelta large_negative = -large_delta;
+  EXPECT_GT(TimeDelta(), large_negative);
+  EXPECT_FALSE(large_delta.is_max());
+  EXPECT_FALSE((-large_negative).is_min());
+  constexpr TimeDelta kOneSecond = TimeDelta::FromSeconds(1);
+
+  // Test +, -, * and / operators.
+  EXPECT_TRUE((large_delta + kOneSecond).is_max());
+  EXPECT_TRUE((large_negative + (-kOneSecond)).is_min());
+  EXPECT_TRUE((large_negative - kOneSecond).is_min());
+  EXPECT_TRUE((large_delta - (-kOneSecond)).is_max());
+  EXPECT_TRUE((large_delta * 2).is_max());
+  EXPECT_TRUE((large_delta * -2).is_min());
+  EXPECT_TRUE((large_delta / 0.5).is_max());
+  EXPECT_TRUE((large_delta / -0.5).is_min());
+
+  // Test that double conversions overflow to infinity.
+  EXPECT_EQ((large_delta + kOneSecond).InSecondsF(),
+            std::numeric_limits<double>::infinity());
+  EXPECT_EQ((large_delta + kOneSecond).InMillisecondsF(),
+            std::numeric_limits<double>::infinity());
+  EXPECT_EQ((large_delta + kOneSecond).InMicrosecondsF(),
+            std::numeric_limits<double>::infinity());
+
+  // Test +=, -=, *= and /= operators.
+  TimeDelta delta = large_delta;
+  delta += kOneSecond;
+  EXPECT_TRUE(delta.is_max());
+  delta = large_negative;
+  delta += -kOneSecond;
+  EXPECT_TRUE((delta).is_min());
+
+  delta = large_negative;
+  delta -= kOneSecond;
+  EXPECT_TRUE((delta).is_min());
+  delta = large_delta;
+  delta -= -kOneSecond;
+  EXPECT_TRUE(delta.is_max());
+
+  delta = large_delta;
+  delta *= 2;
+  EXPECT_TRUE(delta.is_max());
+  delta = large_negative;
+  delta *= 1.5;
+  EXPECT_TRUE((delta).is_min());
+
+  delta = large_delta;
+  delta /= 0.5;
+  EXPECT_TRUE(delta.is_max());
+  delta = large_negative;
+  delta /= 0.5;
+  EXPECT_TRUE((delta).is_min());
+
+  // Test operations with Time and TimeTicks.
+  EXPECT_TRUE((large_delta + Time::Now()).is_max());
+  EXPECT_TRUE((large_delta + TimeTicks::Now()).is_max());
+  EXPECT_TRUE((Time::Now() + large_delta).is_max());
+  EXPECT_TRUE((TimeTicks::Now() + large_delta).is_max());
+
+  Time time_now = Time::Now();
+  EXPECT_EQ(kOneSecond, (time_now + kOneSecond) - time_now);
+  EXPECT_EQ(-kOneSecond, (time_now - kOneSecond) - time_now);
+
+  TimeTicks ticks_now = TimeTicks::Now();
+  EXPECT_EQ(-kOneSecond, (ticks_now - kOneSecond) - ticks_now);
+  EXPECT_EQ(kOneSecond, (ticks_now + kOneSecond) - ticks_now);
+}
+
+TEST(TimeDeltaLogging, DCheckEqCompiles) {
+  DCHECK_EQ(TimeDelta(), TimeDelta());
+}
+
+TEST(TimeDeltaLogging, EmptyIsZero) {
+  constexpr TimeDelta kZero;
+  EXPECT_EQ("0 s", AnyToString(kZero));
+}
+
+TEST(TimeDeltaLogging, FiveHundredMs) {
+  constexpr TimeDelta kFiveHundredMs = TimeDelta::FromMilliseconds(500);
+  EXPECT_EQ("0.5 s", AnyToString(kFiveHundredMs));
+}
+
+TEST(TimeDeltaLogging, MinusTenSeconds) {
+  constexpr TimeDelta kMinusTenSeconds = TimeDelta::FromSeconds(-10);
+  EXPECT_EQ("-10 s", AnyToString(kMinusTenSeconds));
+}
+
+TEST(TimeDeltaLogging, DoesNotMessUpFormattingFlags) {
+  std::ostringstream oss;
+  std::ios_base::fmtflags flags_before = oss.flags();
+  oss << TimeDelta();
+  EXPECT_EQ(flags_before, oss.flags());
+}
+
+TEST(TimeDeltaLogging, DoesNotMakeStreamBad) {
+  std::ostringstream oss;
+  oss << TimeDelta();
+  EXPECT_TRUE(oss.good());
+}
+
+TEST(TimeLogging, DCheckEqCompiles) {
+  DCHECK_EQ(Time(), Time());
+}
+
+TEST(TimeLogging, ChromeBirthdate) {
+  Time birthdate;
+  ASSERT_TRUE(Time::FromString("Tue, 02 Sep 2008 09:42:18 GMT", &birthdate));
+  EXPECT_EQ("2008-09-02 09:42:18.000 UTC", AnyToString(birthdate));
+}
+
+TEST(TimeLogging, DoesNotMessUpFormattingFlags) {
+  std::ostringstream oss;
+  std::ios_base::fmtflags flags_before = oss.flags();
+  oss << Time();
+  EXPECT_EQ(flags_before, oss.flags());
+}
+
+TEST(TimeLogging, DoesNotMakeStreamBad) {
+  std::ostringstream oss;
+  oss << Time();
+  EXPECT_TRUE(oss.good());
+}
+
+TEST(TimeTicksLogging, DCheckEqCompiles) {
+  DCHECK_EQ(TimeTicks(), TimeTicks());
+}
+
+TEST(TimeTicksLogging, ZeroTime) {
+  TimeTicks zero;
+  EXPECT_EQ("0 bogo-microseconds", AnyToString(zero));
+}
+
+TEST(TimeTicksLogging, FortyYearsLater) {
+  TimeTicks forty_years_later =
+      TimeTicks() + TimeDelta::FromDays(365.25 * 40);
+  EXPECT_EQ("1262304000000000 bogo-microseconds",
+            AnyToString(forty_years_later));
+}
+
+TEST(TimeTicksLogging, DoesNotMessUpFormattingFlags) {
+  std::ostringstream oss;
+  std::ios_base::fmtflags flags_before = oss.flags();
+  oss << TimeTicks();
+  EXPECT_EQ(flags_before, oss.flags());
+}
+
+TEST(TimeTicksLogging, DoesNotMakeStreamBad) {
+  std::ostringstream oss;
+  oss << TimeTicks();
+  EXPECT_TRUE(oss.good());
+}
+
+}  // namespace
+
+}  // namespace base
diff --git a/base/time/time_win.cc b/base/time/time_win.cc
new file mode 100644
index 0000000..9c6eba0
--- /dev/null
+++ b/base/time/time_win.cc
@@ -0,0 +1,737 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+
+// Windows Timer Primer
+//
+// A good article:  http://www.ddj.com/windows/184416651
+// A good mozilla bug:  http://bugzilla.mozilla.org/show_bug.cgi?id=363258
+//
+// The default windows timer, GetSystemTimeAsFileTime is not very precise.
+// It is only good to ~15.5ms.
+//
+// QueryPerformanceCounter is the logical choice for a high-precision timer.
+// However, it is known to be buggy on some hardware.  Specifically, it can
+// sometimes "jump".  On laptops, QPC can also be very expensive to call.
+// It's 3-4x slower than timeGetTime() on desktops, but can be 10x slower
+// on laptops.  A unittest exists which will show the relative cost of various
+// timers on any system.
+//
+// The next logical choice is timeGetTime().  timeGetTime has a precision of
+// 1ms, but only if you call APIs (timeBeginPeriod()) which affect all other
+// applications on the system.  By default, precision is only 15.5ms.
+// Unfortunately, we don't want to call timeBeginPeriod because we don't
+// want to affect other applications.  Further, on mobile platforms, use of
+// faster multimedia timers can hurt battery life.  See the intel
+// article about this here:
+// http://softwarecommunity.intel.com/articles/eng/1086.htm
+//
+// To work around all this, we're going to generally use timeGetTime().  We
+// will only increase the system-wide timer if we're not running on battery
+// power.
+
+#include "base/time/time.h"
+
+#include <windows.h>
+#include <mmsystem.h>
+#include <stdint.h>
+
+#include "base/atomicops.h"
+#include "base/bit_cast.h"
+#include "base/cpu.h"
+#include "base/logging.h"
+#include "base/synchronization/lock.h"
+#include "base/threading/platform_thread.h"
+#include "base/time/time_override.h"
+
+namespace base {
+
+namespace {
+
+// From MSDN, FILETIME "Contains a 64-bit value representing the number of
+// 100-nanosecond intervals since January 1, 1601 (UTC)."
+int64_t FileTimeToMicroseconds(const FILETIME& ft) {
+  // Need to bit_cast to fix alignment, then divide by 10 to convert
+  // 100-nanoseconds to microseconds. This only works on little-endian
+  // machines.
+  return bit_cast<int64_t, FILETIME>(ft) / 10;
+}
+
+void MicrosecondsToFileTime(int64_t us, FILETIME* ft) {
+  DCHECK_GE(us, 0LL) << "Time is less than 0, negative values are not "
+      "representable in FILETIME";
+
+  // Multiply by 10 to convert microseconds to 100-nanoseconds. Bit_cast will
+  // handle alignment problems. This only works on little-endian machines.
+  *ft = bit_cast<FILETIME, int64_t>(us * 10);
+}
+
+int64_t CurrentWallclockMicroseconds() {
+  FILETIME ft;
+  ::GetSystemTimeAsFileTime(&ft);
+  return FileTimeToMicroseconds(ft);
+}
+
+// Time between resampling the un-granular clock for this API.
+constexpr TimeDelta kMaxTimeToAvoidDrift = TimeDelta::FromSeconds(60);
+
+int64_t g_initial_time = 0;
+TimeTicks g_initial_ticks;
+
+void InitializeClock() {
+  g_initial_ticks = subtle::TimeTicksNowIgnoringOverride();
+  g_initial_time = CurrentWallclockMicroseconds();
+}
+
+// The two values that ActivateHighResolutionTimer uses to set the systemwide
+// timer interrupt frequency on Windows. It controls how precise timers are
+// but also has a big impact on battery life.
+const int kMinTimerIntervalHighResMs = 1;
+const int kMinTimerIntervalLowResMs = 4;
+// Track if kMinTimerIntervalHighResMs or kMinTimerIntervalLowResMs is active.
+bool g_high_res_timer_enabled = false;
+// How many times the high resolution timer has been called.
+uint32_t g_high_res_timer_count = 0;
+// Start time of the high resolution timer usage monitoring. This is needed
+// to calculate the usage as percentage of the total elapsed time.
+TimeTicks g_high_res_timer_usage_start;
+// The cumulative time the high resolution timer has been in use since
+// |g_high_res_timer_usage_start| moment.
+TimeDelta g_high_res_timer_usage;
+// Timestamp of the last activation change of the high resolution timer. This
+// is used to calculate the cumulative usage.
+TimeTicks g_high_res_timer_last_activation;
+// The lock to control access to the above two variables.
+Lock* GetHighResLock() {
+  static auto* lock = new Lock();
+  return lock;
+}
+
+// Returns the current value of the performance counter.
+uint64_t QPCNowRaw() {
+  LARGE_INTEGER perf_counter_now = {};
+  // According to the MSDN documentation for QueryPerformanceCounter(), this
+  // will never fail on systems that run XP or later.
+  // https://msdn.microsoft.com/library/windows/desktop/ms644904.aspx
+  ::QueryPerformanceCounter(&perf_counter_now);
+  return perf_counter_now.QuadPart;
+}
+
+bool SafeConvertToWord(int in, WORD* out) {
+  CheckedNumeric<WORD> result = in;
+  *out = result.ValueOrDefault(std::numeric_limits<WORD>::max());
+  return result.IsValid();
+}
+
+}  // namespace
+
+// Time -----------------------------------------------------------------------
+
+namespace subtle {
+Time TimeNowIgnoringOverride() {
+  if (g_initial_time == 0)
+    InitializeClock();
+
+  // We implement time using the high-resolution timers so that we can get
+  // timeouts which are smaller than 10-15ms.  If we just used
+  // CurrentWallclockMicroseconds(), we'd have the less-granular timer.
+  //
+  // To make this work, we initialize the clock (g_initial_time) and the
+  // counter (initial_ctr).  To compute the initial time, we can check
+  // the number of ticks that have elapsed, and compute the delta.
+  //
+  // To avoid any drift, we periodically resync the counters to the system
+  // clock.
+  while (true) {
+    TimeTicks ticks = TimeTicksNowIgnoringOverride();
+
+    // Calculate the time elapsed since we started our timer
+    TimeDelta elapsed = ticks - g_initial_ticks;
+
+    // Check if enough time has elapsed that we need to resync the clock.
+    if (elapsed > kMaxTimeToAvoidDrift) {
+      InitializeClock();
+      continue;
+    }
+
+    return Time() + elapsed + TimeDelta::FromMicroseconds(g_initial_time);
+  }
+}
+
+Time TimeNowFromSystemTimeIgnoringOverride() {
+  // Force resync.
+  InitializeClock();
+  return Time() + TimeDelta::FromMicroseconds(g_initial_time);
+}
+}  // namespace subtle
+
+// static
+Time Time::FromFileTime(FILETIME ft) {
+  if (bit_cast<int64_t, FILETIME>(ft) == 0)
+    return Time();
+  if (ft.dwHighDateTime == std::numeric_limits<DWORD>::max() &&
+      ft.dwLowDateTime == std::numeric_limits<DWORD>::max())
+    return Max();
+  return Time(FileTimeToMicroseconds(ft));
+}
+
+FILETIME Time::ToFileTime() const {
+  if (is_null())
+    return bit_cast<FILETIME, int64_t>(0);
+  if (is_max()) {
+    FILETIME result;
+    result.dwHighDateTime = std::numeric_limits<DWORD>::max();
+    result.dwLowDateTime = std::numeric_limits<DWORD>::max();
+    return result;
+  }
+  FILETIME utc_ft;
+  MicrosecondsToFileTime(us_, &utc_ft);
+  return utc_ft;
+}
+
+// static
+void Time::EnableHighResolutionTimer(bool enable) {
+  AutoLock lock(*GetHighResLock());
+  if (g_high_res_timer_enabled == enable)
+    return;
+  g_high_res_timer_enabled = enable;
+  if (!g_high_res_timer_count)
+    return;
+  // Since g_high_res_timer_count != 0, an ActivateHighResolutionTimer(true)
+  // was called which called timeBeginPeriod with g_high_res_timer_enabled
+  // with a value which is the opposite of |enable|. With that information we
+  // call timeEndPeriod with the same value used in timeBeginPeriod and
+  // therefore undo the period effect.
+  if (enable) {
+    timeEndPeriod(kMinTimerIntervalLowResMs);
+    timeBeginPeriod(kMinTimerIntervalHighResMs);
+  } else {
+    timeEndPeriod(kMinTimerIntervalHighResMs);
+    timeBeginPeriod(kMinTimerIntervalLowResMs);
+  }
+}
+
+// static
+bool Time::ActivateHighResolutionTimer(bool activating) {
+  // We only do work on the transition from zero to one or one to zero so we
+  // can easily undo the effect (if necessary) when EnableHighResolutionTimer is
+  // called.
+  const uint32_t max = std::numeric_limits<uint32_t>::max();
+
+  AutoLock lock(*GetHighResLock());
+  UINT period = g_high_res_timer_enabled ? kMinTimerIntervalHighResMs
+                                         : kMinTimerIntervalLowResMs;
+  if (activating) {
+    DCHECK_NE(g_high_res_timer_count, max);
+    ++g_high_res_timer_count;
+    if (g_high_res_timer_count == 1) {
+      g_high_res_timer_last_activation = subtle::TimeTicksNowIgnoringOverride();
+      timeBeginPeriod(period);
+    }
+  } else {
+    DCHECK_NE(g_high_res_timer_count, 0u);
+    --g_high_res_timer_count;
+    if (g_high_res_timer_count == 0) {
+      g_high_res_timer_usage += subtle::TimeTicksNowIgnoringOverride() -
+                                g_high_res_timer_last_activation;
+      timeEndPeriod(period);
+    }
+  }
+  return (period == kMinTimerIntervalHighResMs);
+}
+
+// static
+bool Time::IsHighResolutionTimerInUse() {
+  AutoLock lock(*GetHighResLock());
+  return g_high_res_timer_enabled && g_high_res_timer_count > 0;
+}
+
+// static
+void Time::ResetHighResolutionTimerUsage() {
+  AutoLock lock(*GetHighResLock());
+  g_high_res_timer_usage = TimeDelta();
+  g_high_res_timer_usage_start = subtle::TimeTicksNowIgnoringOverride();
+  if (g_high_res_timer_count > 0)
+    g_high_res_timer_last_activation = g_high_res_timer_usage_start;
+}
+
+// static
+double Time::GetHighResolutionTimerUsage() {
+  AutoLock lock(*GetHighResLock());
+  TimeTicks now = subtle::TimeTicksNowIgnoringOverride();
+  TimeDelta elapsed_time = now - g_high_res_timer_usage_start;
+  if (elapsed_time.is_zero()) {
+    // This is unexpected but possible if TimeTicks resolution is low and
+    // GetHighResolutionTimerUsage() is called promptly after
+    // ResetHighResolutionTimerUsage().
+    return 0.0;
+  }
+  TimeDelta used_time = g_high_res_timer_usage;
+  if (g_high_res_timer_count > 0) {
+    // If currently activated add the remainder of time since the last
+    // activation.
+    used_time += now - g_high_res_timer_last_activation;
+  }
+  return used_time.InMillisecondsF() / elapsed_time.InMillisecondsF() * 100;
+}
+
+// static
+bool Time::FromExploded(bool is_local, const Exploded& exploded, Time* time) {
+  // Create the system struct representing our exploded time. It will either be
+  // in local time or UTC.If casting from int to WORD results in overflow,
+  // fail and return Time(0).
+  SYSTEMTIME st;
+  if (!SafeConvertToWord(exploded.year, &st.wYear) ||
+      !SafeConvertToWord(exploded.month, &st.wMonth) ||
+      !SafeConvertToWord(exploded.day_of_week, &st.wDayOfWeek) ||
+      !SafeConvertToWord(exploded.day_of_month, &st.wDay) ||
+      !SafeConvertToWord(exploded.hour, &st.wHour) ||
+      !SafeConvertToWord(exploded.minute, &st.wMinute) ||
+      !SafeConvertToWord(exploded.second, &st.wSecond) ||
+      !SafeConvertToWord(exploded.millisecond, &st.wMilliseconds)) {
+    *time = Time(0);
+    return false;
+  }
+
+  FILETIME ft;
+  bool success = true;
+  // Ensure that it's in UTC.
+  if (is_local) {
+    SYSTEMTIME utc_st;
+    success = TzSpecificLocalTimeToSystemTime(nullptr, &st, &utc_st) &&
+              SystemTimeToFileTime(&utc_st, &ft);
+  } else {
+    success = !!SystemTimeToFileTime(&st, &ft);
+  }
+
+  if (!success) {
+    *time = Time(0);
+    return false;
+  }
+
+  *time = Time(FileTimeToMicroseconds(ft));
+  return true;
+}
+
+void Time::Explode(bool is_local, Exploded* exploded) const {
+  if (us_ < 0LL) {
+    // We are not able to convert it to FILETIME.
+    ZeroMemory(exploded, sizeof(*exploded));
+    return;
+  }
+
+  // FILETIME in UTC.
+  FILETIME utc_ft;
+  MicrosecondsToFileTime(us_, &utc_ft);
+
+  // FILETIME in local time if necessary.
+  bool success = true;
+  // FILETIME in SYSTEMTIME (exploded).
+  SYSTEMTIME st = {0};
+  if (is_local) {
+    SYSTEMTIME utc_st;
+    // We don't use FileTimeToLocalFileTime here, since it uses the current
+    // settings for the time zone and daylight saving time. Therefore, if it is
+    // daylight saving time, it will take daylight saving time into account,
+    // even if the time you are converting is in standard time.
+    success = FileTimeToSystemTime(&utc_ft, &utc_st) &&
+              SystemTimeToTzSpecificLocalTime(nullptr, &utc_st, &st);
+  } else {
+    success = !!FileTimeToSystemTime(&utc_ft, &st);
+  }
+
+  if (!success) {
+    NOTREACHED() << "Unable to convert time, don't know why";
+    ZeroMemory(exploded, sizeof(*exploded));
+    return;
+  }
+
+  exploded->year = st.wYear;
+  exploded->month = st.wMonth;
+  exploded->day_of_week = st.wDayOfWeek;
+  exploded->day_of_month = st.wDay;
+  exploded->hour = st.wHour;
+  exploded->minute = st.wMinute;
+  exploded->second = st.wSecond;
+  exploded->millisecond = st.wMilliseconds;
+}
+
+// TimeTicks ------------------------------------------------------------------
+
+namespace {
+
+// We define a wrapper to adapt between the __stdcall and __cdecl call of the
+// mock function, and to avoid a static constructor.  Assigning an import to a
+// function pointer directly would require setup code to fetch from the IAT.
+DWORD timeGetTimeWrapper() {
+  return timeGetTime();
+}
+
+DWORD (*g_tick_function)(void) = &timeGetTimeWrapper;
+
+// A structure holding the most significant bits of "last seen" and a
+// "rollover" counter.
+union LastTimeAndRolloversState {
+  // The state as a single 32-bit opaque value.
+  subtle::Atomic32 as_opaque_32;
+
+  // The state as usable values.
+  struct {
+    // The top 8-bits of the "last" time. This is enough to check for rollovers
+    // and the small bit-size means fewer CompareAndSwap operations to store
+    // changes in state, which in turn makes for fewer retries.
+    uint8_t last_8;
+    // A count of the number of detected rollovers. Using this as bits 47-32
+    // of the upper half of a 64-bit value results in a 48-bit tick counter.
+    // This extends the total rollover period from about 49 days to about 8800
+    // years while still allowing it to be stored with last_8 in a single
+    // 32-bit value.
+    uint16_t rollovers;
+  } as_values;
+};
+subtle::Atomic32 g_last_time_and_rollovers = 0;
+static_assert(
+    sizeof(LastTimeAndRolloversState) <= sizeof(g_last_time_and_rollovers),
+    "LastTimeAndRolloversState does not fit in a single atomic word");
+
+// We use timeGetTime() to implement TimeTicks::Now().  This can be problematic
+// because it returns the number of milliseconds since Windows has started,
+// which will roll over the 32-bit value every ~49 days.  We try to track
+// rollover ourselves, which works if TimeTicks::Now() is called at least every
+// 48.8 days (not 49 days because only changes in the top 8 bits get noticed).
+TimeTicks RolloverProtectedNow() {
+  LastTimeAndRolloversState state;
+  DWORD now;  // DWORD is always unsigned 32 bits.
+
+  while (true) {
+    // Fetch the "now" and "last" tick values, updating "last" with "now" and
+    // incrementing the "rollovers" counter if the tick-value has wrapped back
+    // around. Atomic operations ensure that both "last" and "rollovers" are
+    // always updated together.
+    int32_t original = subtle::Acquire_Load(&g_last_time_and_rollovers);
+    state.as_opaque_32 = original;
+    now = g_tick_function();
+    uint8_t now_8 = static_cast<uint8_t>(now >> 24);
+    if (now_8 < state.as_values.last_8)
+      ++state.as_values.rollovers;
+    state.as_values.last_8 = now_8;
+
+    // If the state hasn't changed, exit the loop.
+    if (state.as_opaque_32 == original)
+      break;
+
+    // Save the changed state. If the existing value is unchanged from the
+    // original, exit the loop.
+    int32_t check = subtle::Release_CompareAndSwap(
+        &g_last_time_and_rollovers, original, state.as_opaque_32);
+    if (check == original)
+      break;
+
+    // Another thread has done something in between so retry from the top.
+  }
+
+  return TimeTicks() +
+         TimeDelta::FromMilliseconds(
+             now + (static_cast<uint64_t>(state.as_values.rollovers) << 32));
+}
+
+// Discussion of tick counter options on Windows:
+//
+// (1) CPU cycle counter. (Retrieved via RDTSC)
+// The CPU counter provides the highest resolution time stamp and is the least
+// expensive to retrieve. However, on older CPUs, two issues can affect its
+// reliability: First it is maintained per processor and not synchronized
+// between processors. Also, the counters will change frequency due to thermal
+// and power changes, and stop in some states.
+//
+// (2) QueryPerformanceCounter (QPC). The QPC counter provides a high-
+// resolution (<1 microsecond) time stamp. On most hardware running today, it
+// auto-detects and uses the constant-rate RDTSC counter to provide extremely
+// efficient and reliable time stamps.
+//
+// On older CPUs where RDTSC is unreliable, it falls back to using more
+// expensive (20X to 40X more costly) alternate clocks, such as HPET or the ACPI
+// PM timer, and can involve system calls; and all this is up to the HAL (with
+// some help from ACPI). According to
+// http://blogs.msdn.com/oldnewthing/archive/2005/09/02/459952.aspx, in the
+// worst case, it gets the counter from the rollover interrupt on the
+// programmable interrupt timer. In best cases, the HAL may conclude that the
+// RDTSC counter runs at a constant frequency, then it uses that instead. On
+// multiprocessor machines, it will try to verify the values returned from
+// RDTSC on each processor are consistent with each other, and apply a handful
+// of workarounds for known buggy hardware. In other words, QPC is supposed to
+// give consistent results on a multiprocessor computer, but for older CPUs it
+// can be unreliable due bugs in BIOS or HAL.
+//
+// (3) System time. The system time provides a low-resolution (from ~1 to ~15.6
+// milliseconds) time stamp but is comparatively less expensive to retrieve and
+// more reliable. Time::EnableHighResolutionTimer() and
+// Time::ActivateHighResolutionTimer() can be called to alter the resolution of
+// this timer; and also other Windows applications can alter it, affecting this
+// one.
+
+TimeTicks InitialNowFunction();
+
+// See "threading notes" in InitializeNowFunctionPointer() for details on how
+// concurrent reads/writes to these globals has been made safe.
+TimeTicksNowFunction g_time_ticks_now_ignoring_override_function =
+    &InitialNowFunction;
+int64_t g_qpc_ticks_per_second = 0;
+
+// As of January 2015, use of <atomic> is forbidden in Chromium code. This is
+// what std::atomic_thread_fence does on Windows on all Intel architectures when
+// the memory_order argument is anything but std::memory_order_seq_cst:
+#define ATOMIC_THREAD_FENCE(memory_order) _ReadWriteBarrier();
+
+TimeDelta QPCValueToTimeDelta(LONGLONG qpc_value) {
+  // Ensure that the assignment to |g_qpc_ticks_per_second|, made in
+  // InitializeNowFunctionPointer(), has happened by this point.
+  ATOMIC_THREAD_FENCE(memory_order_acquire);
+
+  DCHECK_GT(g_qpc_ticks_per_second, 0);
+
+  // If the QPC Value is below the overflow threshold, we proceed with
+  // simple multiply and divide.
+  if (qpc_value < Time::kQPCOverflowThreshold) {
+    return TimeDelta::FromMicroseconds(
+        qpc_value * Time::kMicrosecondsPerSecond / g_qpc_ticks_per_second);
+  }
+  // Otherwise, calculate microseconds in a round about manner to avoid
+  // overflow and precision issues.
+  int64_t whole_seconds = qpc_value / g_qpc_ticks_per_second;
+  int64_t leftover_ticks = qpc_value - (whole_seconds * g_qpc_ticks_per_second);
+  return TimeDelta::FromMicroseconds(
+      (whole_seconds * Time::kMicrosecondsPerSecond) +
+      ((leftover_ticks * Time::kMicrosecondsPerSecond) /
+       g_qpc_ticks_per_second));
+}
+
+TimeTicks QPCNow() {
+  return TimeTicks() + QPCValueToTimeDelta(QPCNowRaw());
+}
+
+bool IsBuggyAthlon(const CPU& cpu) {
+  // On Athlon X2 CPUs (e.g. model 15) QueryPerformanceCounter is unreliable.
+  return cpu.vendor_name() == "AuthenticAMD" && cpu.family() == 15;
+}
+
+void InitializeNowFunctionPointer() {
+  LARGE_INTEGER ticks_per_sec = {};
+  if (!QueryPerformanceFrequency(&ticks_per_sec))
+    ticks_per_sec.QuadPart = 0;
+
+  // If Windows cannot provide a QPC implementation, TimeTicks::Now() must use
+  // the low-resolution clock.
+  //
+  // If the QPC implementation is expensive and/or unreliable, TimeTicks::Now()
+  // will still use the low-resolution clock. A CPU lacking a non-stop time
+  // counter will cause Windows to provide an alternate QPC implementation that
+  // works, but is expensive to use. Certain Athlon CPUs are known to make the
+  // QPC implementation unreliable.
+  //
+  // Otherwise, Now uses the high-resolution QPC clock. As of 21 August 2015,
+  // ~72% of users fall within this category.
+  TimeTicksNowFunction now_function;
+  CPU cpu;
+  if (ticks_per_sec.QuadPart <= 0 ||
+      !cpu.has_non_stop_time_stamp_counter() || IsBuggyAthlon(cpu)) {
+    now_function = &RolloverProtectedNow;
+  } else {
+    now_function = &QPCNow;
+  }
+
+  // Threading note 1: In an unlikely race condition, it's possible for two or
+  // more threads to enter InitializeNowFunctionPointer() in parallel. This is
+  // not a problem since all threads should end up writing out the same values
+  // to the global variables.
+  //
+  // Threading note 2: A release fence is placed here to ensure, from the
+  // perspective of other threads using the function pointers, that the
+  // assignment to |g_qpc_ticks_per_second| happens before the function pointers
+  // are changed.
+  g_qpc_ticks_per_second = ticks_per_sec.QuadPart;
+  ATOMIC_THREAD_FENCE(memory_order_release);
+  // Also set g_time_ticks_now_function to avoid the additional indirection via
+  // TimeTicksNowIgnoringOverride() for future calls to TimeTicks::Now(). But
+  // g_time_ticks_now_function may have already be overridden.
+  if (internal::g_time_ticks_now_function ==
+      &subtle::TimeTicksNowIgnoringOverride) {
+    internal::g_time_ticks_now_function = now_function;
+  }
+  g_time_ticks_now_ignoring_override_function = now_function;
+}
+
+TimeTicks InitialNowFunction() {
+  InitializeNowFunctionPointer();
+  return g_time_ticks_now_ignoring_override_function();
+}
+
+}  // namespace
+
+// static
+TimeTicks::TickFunctionType TimeTicks::SetMockTickFunction(
+    TickFunctionType ticker) {
+  TickFunctionType old = g_tick_function;
+  g_tick_function = ticker;
+  subtle::NoBarrier_Store(&g_last_time_and_rollovers, 0);
+  return old;
+}
+
+namespace subtle {
+TimeTicks TimeTicksNowIgnoringOverride() {
+  return g_time_ticks_now_ignoring_override_function();
+}
+}  // namespace subtle
+
+// static
+bool TimeTicks::IsHighResolution() {
+  if (g_time_ticks_now_ignoring_override_function == &InitialNowFunction)
+    InitializeNowFunctionPointer();
+  return g_time_ticks_now_ignoring_override_function == &QPCNow;
+}
+
+// static
+bool TimeTicks::IsConsistentAcrossProcesses() {
+  // According to Windows documentation [1] QPC is consistent post-Windows
+  // Vista. So if we are using QPC then we are consistent which is the same as
+  // being high resolution.
+  //
+  // [1] https://msdn.microsoft.com/en-us/library/windows/desktop/dn553408(v=vs.85).aspx
+  //
+  // "In general, the performance counter results are consistent across all
+  // processors in multi-core and multi-processor systems, even when measured on
+  // different threads or processes. Here are some exceptions to this rule:
+  // - Pre-Windows Vista operating systems that run on certain processors might
+  // violate this consistency because of one of these reasons:
+  //     1. The hardware processors have a non-invariant TSC and the BIOS
+  //     doesn't indicate this condition correctly.
+  //     2. The TSC synchronization algorithm that was used wasn't suitable for
+  //     systems with large numbers of processors."
+  return IsHighResolution();
+}
+
+// static
+TimeTicks::Clock TimeTicks::GetClock() {
+  return IsHighResolution() ?
+      Clock::WIN_QPC : Clock::WIN_ROLLOVER_PROTECTED_TIME_GET_TIME;
+}
+
+// ThreadTicks ----------------------------------------------------------------
+
+namespace subtle {
+ThreadTicks ThreadTicksNowIgnoringOverride() {
+  return ThreadTicks::GetForThread(PlatformThread::CurrentHandle());
+}
+}  // namespace subtle
+
+// static
+ThreadTicks ThreadTicks::GetForThread(
+    const PlatformThreadHandle& thread_handle) {
+  DCHECK(IsSupported());
+
+  // Get the number of TSC ticks used by the current thread.
+  ULONG64 thread_cycle_time = 0;
+  ::QueryThreadCycleTime(thread_handle.platform_handle(), &thread_cycle_time);
+
+  // Get the frequency of the TSC.
+  double tsc_ticks_per_second = TSCTicksPerSecond();
+  if (tsc_ticks_per_second == 0)
+    return ThreadTicks();
+
+  // Return the CPU time of the current thread.
+  double thread_time_seconds = thread_cycle_time / tsc_ticks_per_second;
+  return ThreadTicks(
+      static_cast<int64_t>(thread_time_seconds * Time::kMicrosecondsPerSecond));
+}
+
+// static
+bool ThreadTicks::IsSupportedWin() {
+  static bool is_supported =
+      CPU().has_non_stop_time_stamp_counter() && !IsBuggyAthlon(CPU());
+  return is_supported;
+}
+
+// static
+void ThreadTicks::WaitUntilInitializedWin() {
+  while (TSCTicksPerSecond() == 0)
+    ::Sleep(10);
+}
+
+double ThreadTicks::TSCTicksPerSecond() {
+  DCHECK(IsSupported());
+
+  // The value returned by QueryPerformanceFrequency() cannot be used as the TSC
+  // frequency, because there is no guarantee that the TSC frequency is equal to
+  // the performance counter frequency.
+
+  // The TSC frequency is cached in a static variable because it takes some time
+  // to compute it.
+  static double tsc_ticks_per_second = 0;
+  if (tsc_ticks_per_second != 0)
+    return tsc_ticks_per_second;
+
+  // Increase the thread priority to reduces the chances of having a context
+  // switch during a reading of the TSC and the performance counter.
+  int previous_priority = ::GetThreadPriority(::GetCurrentThread());
+  ::SetThreadPriority(::GetCurrentThread(), THREAD_PRIORITY_HIGHEST);
+
+  // The first time that this function is called, make an initial reading of the
+  // TSC and the performance counter.
+  static const uint64_t tsc_initial = __rdtsc();
+  static const uint64_t perf_counter_initial = QPCNowRaw();
+
+  // Make a another reading of the TSC and the performance counter every time
+  // that this function is called.
+  uint64_t tsc_now = __rdtsc();
+  uint64_t perf_counter_now = QPCNowRaw();
+
+  // Reset the thread priority.
+  ::SetThreadPriority(::GetCurrentThread(), previous_priority);
+
+  // Make sure that at least 50 ms elapsed between the 2 readings. The first
+  // time that this function is called, we don't expect this to be the case.
+  // Note: The longer the elapsed time between the 2 readings is, the more
+  //   accurate the computed TSC frequency will be. The 50 ms value was
+  //   chosen because local benchmarks show that it allows us to get a
+  //   stddev of less than 1 tick/us between multiple runs.
+  // Note: According to the MSDN documentation for QueryPerformanceFrequency(),
+  //   this will never fail on systems that run XP or later.
+  //   https://msdn.microsoft.com/library/windows/desktop/ms644905.aspx
+  LARGE_INTEGER perf_counter_frequency = {};
+  ::QueryPerformanceFrequency(&perf_counter_frequency);
+  DCHECK_GE(perf_counter_now, perf_counter_initial);
+  uint64_t perf_counter_ticks = perf_counter_now - perf_counter_initial;
+  double elapsed_time_seconds =
+      perf_counter_ticks / static_cast<double>(perf_counter_frequency.QuadPart);
+
+  static constexpr double kMinimumEvaluationPeriodSeconds = 0.05;
+  if (elapsed_time_seconds < kMinimumEvaluationPeriodSeconds)
+    return 0;
+
+  // Compute the frequency of the TSC.
+  DCHECK_GE(tsc_now, tsc_initial);
+  uint64_t tsc_ticks = tsc_now - tsc_initial;
+  tsc_ticks_per_second = tsc_ticks / elapsed_time_seconds;
+
+  return tsc_ticks_per_second;
+}
+
+// static
+TimeTicks TimeTicks::FromQPCValue(LONGLONG qpc_value) {
+  return TimeTicks() + QPCValueToTimeDelta(qpc_value);
+}
+
+// TimeDelta ------------------------------------------------------------------
+
+// static
+TimeDelta TimeDelta::FromQPCValue(LONGLONG qpc_value) {
+  return QPCValueToTimeDelta(qpc_value);
+}
+
+// static
+TimeDelta TimeDelta::FromFileTime(FILETIME ft) {
+  return TimeDelta::FromMicroseconds(FileTimeToMicroseconds(ft));
+}
+
+}  // namespace base
diff --git a/base/time/time_win_unittest.cc b/base/time/time_win_unittest.cc
new file mode 100644
index 0000000..24cd731
--- /dev/null
+++ b/base/time/time_win_unittest.cc
@@ -0,0 +1,365 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <windows.h>
+#include <mmsystem.h>
+#include <process.h>
+#include <stdint.h>
+
+#include <cmath>
+#include <limits>
+#include <vector>
+
+#include "base/threading/platform_thread.h"
+#include "base/time/time.h"
+#include "base/win/registry.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+namespace {
+
+// For TimeDelta::ConstexprInitialization
+constexpr int kExpectedDeltaInMilliseconds = 10;
+constexpr TimeDelta kConstexprTimeDelta =
+    TimeDelta::FromMilliseconds(kExpectedDeltaInMilliseconds);
+
+class MockTimeTicks : public TimeTicks {
+ public:
+  static DWORD Ticker() {
+    return static_cast<int>(InterlockedIncrement(&ticker_));
+  }
+
+  static void InstallTicker() {
+    old_tick_function_ = SetMockTickFunction(&Ticker);
+    ticker_ = -5;
+  }
+
+  static void UninstallTicker() {
+    SetMockTickFunction(old_tick_function_);
+  }
+
+ private:
+  static volatile LONG ticker_;
+  static TickFunctionType old_tick_function_;
+};
+
+volatile LONG MockTimeTicks::ticker_;
+MockTimeTicks::TickFunctionType MockTimeTicks::old_tick_function_;
+
+HANDLE g_rollover_test_start;
+
+unsigned __stdcall RolloverTestThreadMain(void* param) {
+  int64_t counter = reinterpret_cast<int64_t>(param);
+  DWORD rv = WaitForSingleObject(g_rollover_test_start, INFINITE);
+  EXPECT_EQ(rv, WAIT_OBJECT_0);
+
+  TimeTicks last = TimeTicks::Now();
+  for (int index = 0; index < counter; index++) {
+    TimeTicks now = TimeTicks::Now();
+    int64_t milliseconds = (now - last).InMilliseconds();
+    // This is a tight loop; we could have looped faster than our
+    // measurements, so the time might be 0 millis.
+    EXPECT_GE(milliseconds, 0);
+    EXPECT_LT(milliseconds, 250);
+    last = now;
+  }
+  return 0;
+}
+
+}  // namespace
+
+// This test spawns many threads, and can occasionally fail due to resource
+// exhaustion in the presence of ASan.
+#if defined(ADDRESS_SANITIZER)
+#define MAYBE_WinRollover DISABLED_WinRollover
+#else
+#define MAYBE_WinRollover WinRollover
+#endif
+TEST(TimeTicks, MAYBE_WinRollover) {
+  // The internal counter rolls over at ~49days.  We'll use a mock
+  // timer to test this case.
+  // Basic test algorithm:
+  //   1) Set clock to rollover - N
+  //   2) Create N threads
+  //   3) Start the threads
+  //   4) Each thread loops through TimeTicks() N times
+  //   5) Each thread verifies integrity of result.
+
+  const int kThreads = 8;
+  // Use int64_t so we can cast into a void* without a compiler warning.
+  const int64_t kChecks = 10;
+
+  // It takes a lot of iterations to reproduce the bug!
+  // (See bug 1081395)
+  for (int loop = 0; loop < 4096; loop++) {
+    // Setup
+    MockTimeTicks::InstallTicker();
+    g_rollover_test_start = CreateEvent(0, TRUE, FALSE, 0);
+    HANDLE threads[kThreads];
+
+    for (int index = 0; index < kThreads; index++) {
+      void* argument = reinterpret_cast<void*>(kChecks);
+      unsigned thread_id;
+      threads[index] = reinterpret_cast<HANDLE>(
+        _beginthreadex(NULL, 0, RolloverTestThreadMain, argument, 0,
+          &thread_id));
+      EXPECT_NE((HANDLE)NULL, threads[index]);
+    }
+
+    // Start!
+    SetEvent(g_rollover_test_start);
+
+    // Wait for threads to finish
+    for (int index = 0; index < kThreads; index++) {
+      DWORD rv = WaitForSingleObject(threads[index], INFINITE);
+      EXPECT_EQ(rv, WAIT_OBJECT_0);
+      // Since using _beginthreadex() (as opposed to _beginthread),
+      // an explicit CloseHandle() is supposed to be called.
+      CloseHandle(threads[index]);
+    }
+
+    CloseHandle(g_rollover_test_start);
+
+    // Teardown
+    MockTimeTicks::UninstallTicker();
+  }
+}
+
+TEST(TimeTicks, SubMillisecondTimers) {
+  // IsHighResolution() is false on some systems.  Since the product still works
+  // even if it's false, it makes this entire test questionable.
+  if (!TimeTicks::IsHighResolution())
+    return;
+
+  const int kRetries = 1000;
+  bool saw_submillisecond_timer = false;
+
+  // Run kRetries attempts to see a sub-millisecond timer.
+  for (int index = 0; index < kRetries; index++) {
+    TimeTicks last_time = TimeTicks::Now();
+    TimeDelta delta;
+    // Spin until the clock has detected a change.
+    do {
+      delta = TimeTicks::Now() - last_time;
+    } while (delta.InMicroseconds() == 0);
+    if (delta.InMicroseconds() < 1000) {
+      saw_submillisecond_timer = true;
+      break;
+    }
+  }
+  EXPECT_TRUE(saw_submillisecond_timer);
+}
+
+TEST(TimeTicks, TimeGetTimeCaps) {
+  // Test some basic assumptions that we expect about how timeGetDevCaps works.
+
+  TIMECAPS caps;
+  MMRESULT status = timeGetDevCaps(&caps, sizeof(caps));
+  ASSERT_EQ(static_cast<MMRESULT>(MMSYSERR_NOERROR), status);
+
+  EXPECT_GE(static_cast<int>(caps.wPeriodMin), 1);
+  EXPECT_GT(static_cast<int>(caps.wPeriodMax), 1);
+  EXPECT_GE(static_cast<int>(caps.wPeriodMin), 1);
+  EXPECT_GT(static_cast<int>(caps.wPeriodMax), 1);
+  printf("timeGetTime range is %d to %dms\n", caps.wPeriodMin,
+    caps.wPeriodMax);
+}
+
+TEST(TimeTicks, QueryPerformanceFrequency) {
+  // Test some basic assumptions that we expect about QPC.
+
+  LARGE_INTEGER frequency;
+  BOOL rv = QueryPerformanceFrequency(&frequency);
+  EXPECT_EQ(TRUE, rv);
+  EXPECT_GT(frequency.QuadPart, 1000000);  // Expect at least 1MHz
+  printf("QueryPerformanceFrequency is %5.2fMHz\n",
+    frequency.QuadPart / 1000000.0);
+}
+
+TEST(TimeTicks, TimerPerformance) {
+  // Verify that various timer mechanisms can always complete quickly.
+  // Note:  This is a somewhat arbitrary test.
+  const int kLoops = 10000;
+
+  typedef TimeTicks (*TestFunc)();
+  struct TestCase {
+    TestFunc func;
+    const char *description;
+  };
+  // Cheating a bit here:  assumes sizeof(TimeTicks) == sizeof(Time)
+  // in order to create a single test case list.
+  static_assert(sizeof(TimeTicks) == sizeof(Time),
+                "TimeTicks and Time must be the same size");
+  std::vector<TestCase> cases;
+  cases.push_back({reinterpret_cast<TestFunc>(&Time::Now), "Time::Now"});
+  cases.push_back({&TimeTicks::Now, "TimeTicks::Now"});
+
+  if (ThreadTicks::IsSupported()) {
+    ThreadTicks::WaitUntilInitialized();
+    cases.push_back(
+        {reinterpret_cast<TestFunc>(&ThreadTicks::Now), "ThreadTicks::Now"});
+  }
+
+  for (const auto& test_case : cases) {
+    TimeTicks start = TimeTicks::Now();
+    for (int index = 0; index < kLoops; index++)
+      test_case.func();
+    TimeTicks stop = TimeTicks::Now();
+    // Turning off the check for acceptible delays.  Without this check,
+    // the test really doesn't do much other than measure.  But the
+    // measurements are still useful for testing timers on various platforms.
+    // The reason to remove the check is because the tests run on many
+    // buildbots, some of which are VMs.  These machines can run horribly
+    // slow, and there is really no value for checking against a max timer.
+    //const int kMaxTime = 35;  // Maximum acceptible milliseconds for test.
+    //EXPECT_LT((stop - start).InMilliseconds(), kMaxTime);
+    printf("%s: %1.2fus per call\n", test_case.description,
+           (stop - start).InMillisecondsF() * 1000 / kLoops);
+  }
+}
+
+TEST(TimeTicks, TSCTicksPerSecond) {
+  if (ThreadTicks::IsSupported()) {
+    ThreadTicks::WaitUntilInitialized();
+
+    // Read the CPU frequency from the registry.
+    base::win::RegKey processor_key(
+        HKEY_LOCAL_MACHINE,
+        L"Hardware\\Description\\System\\CentralProcessor\\0", KEY_QUERY_VALUE);
+    ASSERT_TRUE(processor_key.Valid());
+    DWORD processor_mhz_from_registry;
+    ASSERT_EQ(ERROR_SUCCESS,
+              processor_key.ReadValueDW(L"~MHz", &processor_mhz_from_registry));
+
+    // Expect the measured TSC frequency to be similar to the processor
+    // frequency from the registry (0.5% error).
+    double tsc_mhz_measured = ThreadTicks::TSCTicksPerSecond() / 1e6;
+    EXPECT_NEAR(tsc_mhz_measured, processor_mhz_from_registry,
+                0.005 * processor_mhz_from_registry);
+  }
+}
+
+TEST(TimeTicks, FromQPCValue) {
+  if (!TimeTicks::IsHighResolution())
+    return;
+
+  LARGE_INTEGER frequency;
+  ASSERT_TRUE(QueryPerformanceFrequency(&frequency));
+  const int64_t ticks_per_second = frequency.QuadPart;
+  ASSERT_GT(ticks_per_second, 0);
+
+  // Generate the tick values to convert, advancing the tick count by varying
+  // amounts.  These values will ensure that both the fast and overflow-safe
+  // conversion logic in FromQPCValue() is tested, and across the entire range
+  // of possible QPC tick values.
+  std::vector<int64_t> test_cases;
+  test_cases.push_back(0);
+  const int kNumAdvancements = 100;
+  int64_t ticks = 0;
+  int64_t ticks_increment = 10;
+  for (int i = 0; i < kNumAdvancements; ++i) {
+    test_cases.push_back(ticks);
+    ticks += ticks_increment;
+    ticks_increment = ticks_increment * 6 / 5;
+  }
+  test_cases.push_back(Time::kQPCOverflowThreshold - 1);
+  test_cases.push_back(Time::kQPCOverflowThreshold);
+  test_cases.push_back(Time::kQPCOverflowThreshold + 1);
+  ticks = Time::kQPCOverflowThreshold + 10;
+  ticks_increment = 10;
+  for (int i = 0; i < kNumAdvancements; ++i) {
+    test_cases.push_back(ticks);
+    ticks += ticks_increment;
+    ticks_increment = ticks_increment * 6 / 5;
+  }
+  test_cases.push_back(std::numeric_limits<int64_t>::max());
+
+  // Test that the conversions using FromQPCValue() match those computed here
+  // using simple floating-point arithmetic.  The floating-point math provides
+  // enough precision for all reasonable values to confirm that the
+  // implementation is correct to the microsecond, and for "very large" values
+  // it confirms that the answer is very close to correct.
+  for (int64_t ticks : test_cases) {
+    const double expected_microseconds_since_origin =
+        (static_cast<double>(ticks) * Time::kMicrosecondsPerSecond) /
+            ticks_per_second;
+    const TimeTicks converted_value = TimeTicks::FromQPCValue(ticks);
+    const double converted_microseconds_since_origin =
+        static_cast<double>((converted_value - TimeTicks()).InMicroseconds());
+    // When we test with very large numbers we end up in a range where adjacent
+    // double values are far apart - 512.0 apart in one test failure. In that
+    // situation it makes no sense for our epsilon to be 1.0 - it should be
+    // the difference between adjacent doubles.
+    double epsilon = nextafter(expected_microseconds_since_origin, INFINITY) -
+                     expected_microseconds_since_origin;
+    // Epsilon must be at least 1.0 because converted_microseconds_since_origin
+    // comes from an integral value and the rounding is not perfect.
+    if (epsilon < 1.0)
+      epsilon = 1.0;
+    EXPECT_NEAR(expected_microseconds_since_origin,
+                converted_microseconds_since_origin, epsilon)
+        << "ticks=" << ticks << ", to be converted via logic path: "
+        << (ticks < Time::kQPCOverflowThreshold ? "FAST" : "SAFE");
+  }
+}
+
+TEST(TimeDelta, ConstexprInitialization) {
+  // Make sure that TimeDelta works around crbug.com/635974
+  EXPECT_EQ(kExpectedDeltaInMilliseconds, kConstexprTimeDelta.InMilliseconds());
+}
+
+TEST(TimeDelta, FromFileTime) {
+  FILETIME ft;
+  ft.dwLowDateTime = 1001;
+  ft.dwHighDateTime = 0;
+
+  // 100100 ns ~= 100 us.
+  EXPECT_EQ(TimeDelta::FromMicroseconds(100), TimeDelta::FromFileTime(ft));
+
+  ft.dwLowDateTime = 0;
+  ft.dwHighDateTime = 1;
+
+  // 2^32 * 100 ns ~= 2^32 * 10 us.
+  EXPECT_EQ(TimeDelta::FromMicroseconds((1ull << 32) / 10),
+            TimeDelta::FromFileTime(ft));
+}
+
+TEST(HighResolutionTimer, GetUsage) {
+  EXPECT_EQ(0.0, Time::GetHighResolutionTimerUsage());
+
+  Time::ResetHighResolutionTimerUsage();
+
+  // 0% usage since the timer isn't activated regardless of how much time has
+  // elapsed.
+  EXPECT_EQ(0.0, Time::GetHighResolutionTimerUsage());
+  Sleep(10);
+  EXPECT_EQ(0.0, Time::GetHighResolutionTimerUsage());
+
+  Time::ActivateHighResolutionTimer(true);
+  Time::ResetHighResolutionTimerUsage();
+
+  Sleep(20);
+  // 100% usage since the timer has been activated entire time.
+  EXPECT_EQ(100.0, Time::GetHighResolutionTimerUsage());
+
+  Time::ActivateHighResolutionTimer(false);
+  Sleep(20);
+  double usage1 = Time::GetHighResolutionTimerUsage();
+  // usage1 should be about 50%.
+  EXPECT_LT(usage1, 100.0);
+  EXPECT_GT(usage1, 0.0);
+
+  Time::ActivateHighResolutionTimer(true);
+  Sleep(10);
+  Time::ActivateHighResolutionTimer(false);
+  double usage2 = Time::GetHighResolutionTimerUsage();
+  // usage2 should be about 60%.
+  EXPECT_LT(usage2, 100.0);
+  EXPECT_GT(usage2, usage1);
+
+  Time::ResetHighResolutionTimerUsage();
+  EXPECT_EQ(0.0, Time::GetHighResolutionTimerUsage());
+}
+
+}  // namespace base
diff --git a/base/timer/elapsed_timer.cc b/base/timer/elapsed_timer.cc
new file mode 100644
index 0000000..ca86ccd
--- /dev/null
+++ b/base/timer/elapsed_timer.cc
@@ -0,0 +1,25 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/timer/elapsed_timer.h"
+
+namespace base {
+
+ElapsedTimer::ElapsedTimer() {
+  begin_ = TimeTicks::Now();
+}
+
+ElapsedTimer::ElapsedTimer(ElapsedTimer&& other) {
+  begin_ = other.begin_;
+}
+
+void ElapsedTimer::operator=(ElapsedTimer&& other) {
+  begin_ = other.begin_;
+}
+
+TimeDelta ElapsedTimer::Elapsed() const {
+  return TimeTicks::Now() - begin_;
+}
+
+}  // namespace base
diff --git a/base/timer/elapsed_timer.h b/base/timer/elapsed_timer.h
new file mode 100644
index 0000000..9dfa12c
--- /dev/null
+++ b/base/timer/elapsed_timer.h
@@ -0,0 +1,33 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TIMER_ELAPSED_TIMER_H_
+#define BASE_TIMER_ELAPSED_TIMER_H_
+
+#include "base/base_export.h"
+#include "base/macros.h"
+#include "base/time/time.h"
+
+namespace base {
+
+// A simple wrapper around TimeTicks::Now().
+class BASE_EXPORT ElapsedTimer {
+ public:
+  ElapsedTimer();
+  ElapsedTimer(ElapsedTimer&& other);
+
+  void operator=(ElapsedTimer&& other);
+
+  // Returns the time elapsed since object construction.
+  TimeDelta Elapsed() const;
+
+ private:
+  TimeTicks begin_;
+
+  DISALLOW_COPY_AND_ASSIGN(ElapsedTimer);
+};
+
+}  // namespace base
+
+#endif  // BASE_TIMER_ELAPSED_TIMER_H_
diff --git a/base/timer/hi_res_timer_manager.h b/base/timer/hi_res_timer_manager.h
new file mode 100644
index 0000000..bfa316d
--- /dev/null
+++ b/base/timer/hi_res_timer_manager.h
@@ -0,0 +1,48 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TIMER_HI_RES_TIMER_MANAGER_H_
+#define BASE_TIMER_HI_RES_TIMER_MANAGER_H_
+
+#include "base/base_export.h"
+#include "base/macros.h"
+#include "base/memory/ref_counted.h"
+#include "base/power_monitor/power_observer.h"
+#include "base/timer/timer.h"
+#include "build/build_config.h"
+
+namespace base {
+
+// Ensures that the Windows high resolution timer is only used
+// when not running on battery power.
+class BASE_EXPORT HighResolutionTimerManager : public base::PowerObserver {
+ public:
+  HighResolutionTimerManager();
+  ~HighResolutionTimerManager() override;
+
+  // base::PowerObserver methods.
+  void OnPowerStateChange(bool on_battery_power) override;
+  void OnSuspend() override;
+  void OnResume() override;
+
+  // Returns true if the hi resolution clock could be used right now.
+  bool hi_res_clock_available() const { return hi_res_clock_available_; }
+
+ private:
+  // Enable or disable the faster multimedia timer.
+  void UseHiResClock(bool use);
+
+  bool hi_res_clock_available_;
+
+#if defined(OS_WIN)
+  // Timer for polling the high resolution timer usage.
+  base::RepeatingTimer timer_;
+#endif
+
+  DISALLOW_COPY_AND_ASSIGN(HighResolutionTimerManager);
+};
+
+}  // namespace base
+
+#endif  // BASE_TIMER_HI_RES_TIMER_MANAGER_H_
diff --git a/base/timer/hi_res_timer_manager_posix.cc b/base/timer/hi_res_timer_manager_posix.cc
new file mode 100644
index 0000000..d2a3aa5
--- /dev/null
+++ b/base/timer/hi_res_timer_manager_posix.cc
@@ -0,0 +1,27 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/timer/hi_res_timer_manager.h"
+
+// On POSIX we don't need to do anything special with the system timer.
+
+namespace base {
+
+HighResolutionTimerManager::HighResolutionTimerManager()
+    : hi_res_clock_available_(false) {
+}
+
+HighResolutionTimerManager::~HighResolutionTimerManager() = default;
+
+void HighResolutionTimerManager::OnPowerStateChange(bool on_battery_power) {
+}
+
+void HighResolutionTimerManager::OnSuspend() {}
+
+void HighResolutionTimerManager::OnResume() {}
+
+void HighResolutionTimerManager::UseHiResClock(bool use) {
+}
+
+}  // namespace base
diff --git a/base/timer/hi_res_timer_manager_unittest.cc b/base/timer/hi_res_timer_manager_unittest.cc
new file mode 100644
index 0000000..43f607a
--- /dev/null
+++ b/base/timer/hi_res_timer_manager_unittest.cc
@@ -0,0 +1,62 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/timer/hi_res_timer_manager.h"
+
+#include <memory>
+#include <utility>
+
+#include "base/power_monitor/power_monitor.h"
+#include "base/power_monitor/power_monitor_device_source.h"
+#include "base/test/scoped_task_environment.h"
+#include "base/time/time.h"
+#include "build/build_config.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+
+#if defined(OS_WIN)
+TEST(HiResTimerManagerTest, ToggleOnOff) {
+  // The power monitor creates Window to receive power notifications from
+  // Windows, which makes this test flaky if you run while the machine
+  // goes in or out of AC power.
+  test::ScopedTaskEnvironment scoped_task_environment(
+      test::ScopedTaskEnvironment::MainThreadType::UI);
+  std::unique_ptr<base::PowerMonitorSource> power_monitor_source(
+      new base::PowerMonitorDeviceSource());
+  std::unique_ptr<base::PowerMonitor> power_monitor(
+      new base::PowerMonitor(std::move(power_monitor_source)));
+
+  HighResolutionTimerManager manager;
+  // Simulate a on-AC power event to get to a known initial state.
+  manager.OnPowerStateChange(false);
+
+  // Loop a few times to test power toggling.
+  for (int times = 0; times != 3; ++times) {
+    // The manager has the high resolution clock enabled now.
+    EXPECT_TRUE(manager.hi_res_clock_available());
+    // But the Time class has it off, because it hasn't been activated.
+    EXPECT_FALSE(base::Time::IsHighResolutionTimerInUse());
+
+    // Activate the high resolution timer.
+    base::Time::ActivateHighResolutionTimer(true);
+    EXPECT_TRUE(base::Time::IsHighResolutionTimerInUse());
+
+    // Simulate a on-battery power event.
+    manager.OnPowerStateChange(true);
+    EXPECT_FALSE(manager.hi_res_clock_available());
+    EXPECT_FALSE(base::Time::IsHighResolutionTimerInUse());
+
+    // Back to on-AC power.
+    manager.OnPowerStateChange(false);
+    EXPECT_TRUE(manager.hi_res_clock_available());
+    EXPECT_TRUE(base::Time::IsHighResolutionTimerInUse());
+
+    // De-activate the high resolution timer.
+    base::Time::ActivateHighResolutionTimer(false);
+  }
+}
+#endif  // defined(OS_WIN)
+
+}  // namespace base
diff --git a/base/timer/hi_res_timer_manager_win.cc b/base/timer/hi_res_timer_manager_win.cc
new file mode 100644
index 0000000..5474373
--- /dev/null
+++ b/base/timer/hi_res_timer_manager_win.cc
@@ -0,0 +1,70 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/timer/hi_res_timer_manager.h"
+
+#include <algorithm>
+
+#include "base/atomicops.h"
+#include "base/metrics/histogram_macros.h"
+#include "base/power_monitor/power_monitor.h"
+#include "base/task_scheduler/post_task.h"
+#include "base/time/time.h"
+
+namespace base {
+
+namespace {
+
+constexpr TimeDelta kUsageSampleInterval = TimeDelta::FromMinutes(10);
+
+void ReportHighResolutionTimerUsage() {
+  UMA_HISTOGRAM_PERCENTAGE("Windows.HighResolutionTimerUsage",
+                           Time::GetHighResolutionTimerUsage());
+  // Reset usage for the next interval.
+  Time::ResetHighResolutionTimerUsage();
+}
+
+}  // namespace
+
+HighResolutionTimerManager::HighResolutionTimerManager()
+    : hi_res_clock_available_(false) {
+  PowerMonitor* power_monitor = PowerMonitor::Get();
+  DCHECK(power_monitor != NULL);
+  power_monitor->AddObserver(this);
+  UseHiResClock(!power_monitor->IsOnBatteryPower());
+
+  // Start polling the high resolution timer usage.
+  Time::ResetHighResolutionTimerUsage();
+  timer_.Start(FROM_HERE, kUsageSampleInterval,
+               Bind(&ReportHighResolutionTimerUsage));
+}
+
+HighResolutionTimerManager::~HighResolutionTimerManager() {
+  PowerMonitor::Get()->RemoveObserver(this);
+  UseHiResClock(false);
+}
+
+void HighResolutionTimerManager::OnPowerStateChange(bool on_battery_power) {
+  UseHiResClock(!on_battery_power);
+}
+
+void HighResolutionTimerManager::OnSuspend() {
+  // Stop polling the usage to avoid including the standby time.
+  timer_.Stop();
+}
+
+void HighResolutionTimerManager::OnResume() {
+  // Resume polling the usage.
+  Time::ResetHighResolutionTimerUsage();
+  timer_.Reset();
+}
+
+void HighResolutionTimerManager::UseHiResClock(bool use) {
+  if (use == hi_res_clock_available_)
+    return;
+  hi_res_clock_available_ = use;
+  Time::EnableHighResolutionTimer(use);
+}
+
+}  // namespace base
diff --git a/base/timer/mock_timer.cc b/base/timer/mock_timer.cc
new file mode 100644
index 0000000..ca0893b
--- /dev/null
+++ b/base/timer/mock_timer.cc
@@ -0,0 +1,59 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/timer/mock_timer.h"
+
+namespace base {
+
+MockTimer::MockTimer(bool retain_user_task, bool is_repeating)
+    : Timer(retain_user_task, is_repeating),
+      is_running_(false) {
+}
+
+MockTimer::MockTimer(const Location& posted_from,
+                     TimeDelta delay,
+                     const base::Closure& user_task,
+                     bool is_repeating)
+    : Timer(true, is_repeating), delay_(delay), is_running_(false) {}
+
+MockTimer::~MockTimer() = default;
+
+bool MockTimer::IsRunning() const {
+  return is_running_;
+}
+
+base::TimeDelta MockTimer::GetCurrentDelay() const {
+  return delay_;
+}
+
+void MockTimer::Start(const Location& posted_from,
+                      TimeDelta delay,
+                      const base::Closure& user_task) {
+  delay_ = delay;
+  user_task_ = user_task;
+  Reset();
+}
+
+void MockTimer::Stop() {
+  is_running_ = false;
+  if (!retain_user_task())
+    user_task_.Reset();
+}
+
+void MockTimer::Reset() {
+  DCHECK(!user_task_.is_null());
+  is_running_ = true;
+}
+
+void MockTimer::Fire() {
+  DCHECK(is_running_);
+  base::Closure old_task = user_task_;
+  if (is_repeating())
+    Reset();
+  else
+    Stop();
+  old_task.Run();
+}
+
+}  // namespace base
diff --git a/base/timer/mock_timer.h b/base/timer/mock_timer.h
new file mode 100644
index 0000000..49394b2
--- /dev/null
+++ b/base/timer/mock_timer.h
@@ -0,0 +1,41 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TIMER_MOCK_TIMER_H_
+#define BASE_TIMER_MOCK_TIMER_H_
+
+#include "base/timer/timer.h"
+
+namespace base {
+
+class BASE_EXPORT MockTimer : public Timer {
+ public:
+  MockTimer(bool retain_user_task, bool is_repeating);
+  MockTimer(const Location& posted_from,
+            TimeDelta delay,
+            const base::Closure& user_task,
+            bool is_repeating);
+  ~MockTimer() override;
+
+  // base::Timer implementation.
+  bool IsRunning() const override;
+  base::TimeDelta GetCurrentDelay() const override;
+  void Start(const Location& posted_from,
+             base::TimeDelta delay,
+             const base::Closure& user_task) override;
+  void Stop() override;
+  void Reset() override;
+
+  // Testing methods.
+  void Fire();
+
+ private:
+  base::Closure user_task_;
+  TimeDelta delay_;
+  bool is_running_;
+};
+
+}  // namespace base
+
+#endif  // BASE_TIMER_MOCK_TIMER_H_
diff --git a/base/timer/mock_timer_unittest.cc b/base/timer/mock_timer_unittest.cc
new file mode 100644
index 0000000..61716a4
--- /dev/null
+++ b/base/timer/mock_timer_unittest.cc
@@ -0,0 +1,80 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/timer/mock_timer.h"
+
+#include "base/macros.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace {
+
+void CallMeMaybe(int *number) {
+  (*number)++;
+}
+
+TEST(MockTimerTest, FiresOnce) {
+  int calls = 0;
+  base::MockTimer timer(false, false);
+  base::TimeDelta delay = base::TimeDelta::FromSeconds(2);
+  timer.Start(FROM_HERE, delay,
+              base::Bind(&CallMeMaybe,
+                         base::Unretained(&calls)));
+  EXPECT_EQ(delay, timer.GetCurrentDelay());
+  EXPECT_TRUE(timer.IsRunning());
+  timer.Fire();
+  EXPECT_FALSE(timer.IsRunning());
+  EXPECT_EQ(1, calls);
+}
+
+TEST(MockTimerTest, FiresRepeatedly) {
+  int calls = 0;
+  base::MockTimer timer(true, true);
+  base::TimeDelta delay = base::TimeDelta::FromSeconds(2);
+  timer.Start(FROM_HERE, delay,
+              base::Bind(&CallMeMaybe,
+                         base::Unretained(&calls)));
+  timer.Fire();
+  EXPECT_TRUE(timer.IsRunning());
+  timer.Fire();
+  timer.Fire();
+  EXPECT_TRUE(timer.IsRunning());
+  EXPECT_EQ(3, calls);
+}
+
+TEST(MockTimerTest, Stops) {
+  int calls = 0;
+  base::MockTimer timer(true, true);
+  base::TimeDelta delay = base::TimeDelta::FromSeconds(2);
+  timer.Start(FROM_HERE, delay,
+              base::Bind(&CallMeMaybe,
+                         base::Unretained(&calls)));
+  EXPECT_TRUE(timer.IsRunning());
+  timer.Stop();
+  EXPECT_FALSE(timer.IsRunning());
+}
+
+class HasWeakPtr : public base::SupportsWeakPtr<HasWeakPtr> {
+ public:
+  HasWeakPtr() = default;
+  virtual ~HasWeakPtr() = default;
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(HasWeakPtr);
+};
+
+TEST(MockTimerTest, DoesNotRetainClosure) {
+  HasWeakPtr *has_weak_ptr = new HasWeakPtr();
+  base::WeakPtr<HasWeakPtr> weak_ptr(has_weak_ptr->AsWeakPtr());
+  base::MockTimer timer(false, false);
+  base::TimeDelta delay = base::TimeDelta::FromSeconds(2);
+  ASSERT_TRUE(weak_ptr.get());
+  timer.Start(FROM_HERE, delay,
+              base::Bind(base::DoNothing::Repeatedly<HasWeakPtr*>(),
+                         base::Owned(has_weak_ptr)));
+  ASSERT_TRUE(weak_ptr.get());
+  timer.Fire();
+  ASSERT_FALSE(weak_ptr.get());
+}
+
+}  // namespace
diff --git a/base/timer/timer.cc b/base/timer/timer.cc
new file mode 100644
index 0000000..99cd839
--- /dev/null
+++ b/base/timer/timer.cc
@@ -0,0 +1,268 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/timer/timer.h"
+
+#include <stddef.h>
+
+#include <utility>
+
+#include "base/logging.h"
+#include "base/memory/ptr_util.h"
+#include "base/memory/ref_counted.h"
+#include "base/threading/platform_thread.h"
+#include "base/threading/sequenced_task_runner_handle.h"
+#include "base/time/tick_clock.h"
+
+namespace base {
+
+// BaseTimerTaskInternal is a simple delegate for scheduling a callback to Timer
+// on the current sequence. It also handles the following edge cases:
+// - deleted by the task runner.
+// - abandoned (orphaned) by Timer.
+class BaseTimerTaskInternal {
+ public:
+  explicit BaseTimerTaskInternal(Timer* timer)
+      : timer_(timer) {
+  }
+
+  ~BaseTimerTaskInternal() {
+    // This task may be getting cleared because the task runner has been
+    // destructed.  If so, don't leave Timer with a dangling pointer
+    // to this.
+    if (timer_)
+      timer_->AbandonAndStop();
+  }
+
+  void Run() {
+    // |timer_| is nullptr if we were abandoned.
+    if (!timer_)
+      return;
+
+    // |this| will be deleted by the task runner, so Timer needs to forget us:
+    timer_->scheduled_task_ = nullptr;
+
+    // Although Timer should not call back into |this|, let's clear |timer_|
+    // first to be pedantic.
+    Timer* timer = timer_;
+    timer_ = nullptr;
+    timer->RunScheduledTask();
+  }
+
+  // The task remains in the queue, but nothing will happen when it runs.
+  void Abandon() { timer_ = nullptr; }
+
+ private:
+  Timer* timer_;
+
+  DISALLOW_COPY_AND_ASSIGN(BaseTimerTaskInternal);
+};
+
+Timer::Timer(bool retain_user_task, bool is_repeating)
+    : Timer(retain_user_task, is_repeating, nullptr) {}
+
+Timer::Timer(bool retain_user_task,
+             bool is_repeating,
+             const TickClock* tick_clock)
+    : scheduled_task_(nullptr),
+      is_repeating_(is_repeating),
+      retain_user_task_(retain_user_task),
+      tick_clock_(tick_clock),
+      is_running_(false) {
+  // It is safe for the timer to be created on a different thread/sequence than
+  // the one from which the timer APIs are called. The first call to the
+  // checker's CalledOnValidSequence() method will re-bind the checker, and
+  // later calls will verify that the same task runner is used.
+  origin_sequence_checker_.DetachFromSequence();
+}
+
+Timer::Timer(const Location& posted_from,
+             TimeDelta delay,
+             const base::Closure& user_task,
+             bool is_repeating)
+    : Timer(posted_from, delay, user_task, is_repeating, nullptr) {}
+
+Timer::Timer(const Location& posted_from,
+             TimeDelta delay,
+             const base::Closure& user_task,
+             bool is_repeating,
+             const TickClock* tick_clock)
+    : scheduled_task_(nullptr),
+      posted_from_(posted_from),
+      delay_(delay),
+      user_task_(user_task),
+      is_repeating_(is_repeating),
+      retain_user_task_(true),
+      tick_clock_(tick_clock),
+      is_running_(false) {
+  // See comment in other constructor.
+  origin_sequence_checker_.DetachFromSequence();
+}
+
+Timer::~Timer() {
+  DCHECK(origin_sequence_checker_.CalledOnValidSequence());
+  AbandonAndStop();
+}
+
+bool Timer::IsRunning() const {
+  DCHECK(origin_sequence_checker_.CalledOnValidSequence());
+  return is_running_;
+}
+
+TimeDelta Timer::GetCurrentDelay() const {
+  DCHECK(origin_sequence_checker_.CalledOnValidSequence());
+  return delay_;
+}
+
+void Timer::SetTaskRunner(scoped_refptr<SequencedTaskRunner> task_runner) {
+  // Do not allow changing the task runner when the Timer is running.
+  // Don't check for |origin_sequence_checker_.CalledOnValidSequence()| here to
+  // allow the use case of constructing the Timer and immediatetly invoking
+  // SetTaskRunner() before starting it (CalledOnValidSequence() would undo the
+  // DetachFromSequence() from the constructor). The |!is_running| check kind of
+  // verifies the same thing (and TSAN should catch callers that do it wrong but
+  // somehow evade all debug checks).
+  DCHECK(!is_running_);
+  task_runner_.swap(task_runner);
+}
+
+void Timer::Start(const Location& posted_from,
+                  TimeDelta delay,
+                  const base::Closure& user_task) {
+  DCHECK(origin_sequence_checker_.CalledOnValidSequence());
+
+  posted_from_ = posted_from;
+  delay_ = delay;
+  user_task_ = user_task;
+
+  Reset();
+}
+
+void Timer::Stop() {
+  // TODO(gab): Enable this when it's no longer called racily from
+  // RunScheduledTask(): https://crbug.com/587199.
+  // DCHECK(origin_sequence_checker_.CalledOnValidSequence());
+
+  is_running_ = false;
+
+  // It's safe to destroy or restart Timer on another sequence after Stop().
+  origin_sequence_checker_.DetachFromSequence();
+
+  if (!retain_user_task_)
+    user_task_.Reset();
+  // No more member accesses here: |this| could be deleted after freeing
+  // |user_task_|.
+}
+
+void Timer::Reset() {
+  DCHECK(origin_sequence_checker_.CalledOnValidSequence());
+  DCHECK(!user_task_.is_null());
+
+  // If there's no pending task, start one up and return.
+  if (!scheduled_task_) {
+    PostNewScheduledTask(delay_);
+    return;
+  }
+
+  // Set the new |desired_run_time_|.
+  if (delay_ > TimeDelta::FromMicroseconds(0))
+    desired_run_time_ = Now() + delay_;
+  else
+    desired_run_time_ = TimeTicks();
+
+  // We can use the existing scheduled task if it arrives before the new
+  // |desired_run_time_|.
+  if (desired_run_time_ >= scheduled_run_time_) {
+    is_running_ = true;
+    return;
+  }
+
+  // We can't reuse the |scheduled_task_|, so abandon it and post a new one.
+  AbandonScheduledTask();
+  PostNewScheduledTask(delay_);
+}
+
+TimeTicks Timer::Now() const {
+  // TODO(gab): Enable this when it's no longer called racily from
+  // RunScheduledTask(): https://crbug.com/587199.
+  // DCHECK(origin_sequence_checker_.CalledOnValidSequence());
+  return tick_clock_ ? tick_clock_->NowTicks() : TimeTicks::Now();
+}
+
+void Timer::PostNewScheduledTask(TimeDelta delay) {
+  // TODO(gab): Enable this when it's no longer called racily from
+  // RunScheduledTask(): https://crbug.com/587199.
+  // DCHECK(origin_sequence_checker_.CalledOnValidSequence());
+  DCHECK(!scheduled_task_);
+  is_running_ = true;
+  scheduled_task_ = new BaseTimerTaskInternal(this);
+  if (delay > TimeDelta::FromMicroseconds(0)) {
+    // TODO(gab): Posting BaseTimerTaskInternal::Run to another sequence makes
+    // this code racy. https://crbug.com/587199
+    GetTaskRunner()->PostDelayedTask(
+        posted_from_,
+        base::BindOnce(&BaseTimerTaskInternal::Run,
+                       base::Owned(scheduled_task_)),
+        delay);
+    scheduled_run_time_ = desired_run_time_ = Now() + delay;
+  } else {
+    GetTaskRunner()->PostTask(posted_from_,
+                              base::BindOnce(&BaseTimerTaskInternal::Run,
+                                             base::Owned(scheduled_task_)));
+    scheduled_run_time_ = desired_run_time_ = TimeTicks();
+  }
+}
+
+scoped_refptr<SequencedTaskRunner> Timer::GetTaskRunner() {
+  return task_runner_.get() ? task_runner_ : SequencedTaskRunnerHandle::Get();
+}
+
+void Timer::AbandonScheduledTask() {
+  // TODO(gab): Enable this when it's no longer called racily from
+  // RunScheduledTask() -> Stop(): https://crbug.com/587199.
+  // DCHECK(origin_sequence_checker_.CalledOnValidSequence());
+  if (scheduled_task_) {
+    scheduled_task_->Abandon();
+    scheduled_task_ = nullptr;
+  }
+}
+
+void Timer::RunScheduledTask() {
+  // TODO(gab): Enable this when it's no longer called racily:
+  // https://crbug.com/587199.
+  // DCHECK(origin_sequence_checker_.CalledOnValidSequence());
+
+  // Task may have been disabled.
+  if (!is_running_)
+    return;
+
+  // First check if we need to delay the task because of a new target time.
+  if (desired_run_time_ > scheduled_run_time_) {
+    // Now() can be expensive, so only call it if we know the user has changed
+    // the |desired_run_time_|.
+    TimeTicks now = Now();
+    // Task runner may have called us late anyway, so only post a continuation
+    // task if the |desired_run_time_| is in the future.
+    if (desired_run_time_ > now) {
+      // Post a new task to span the remaining time.
+      PostNewScheduledTask(desired_run_time_ - now);
+      return;
+    }
+  }
+
+  // Make a local copy of the task to run. The Stop method will reset the
+  // |user_task_| member if |retain_user_task_| is false.
+  base::Closure task = user_task_;
+
+  if (is_repeating_)
+    PostNewScheduledTask(delay_);
+  else
+    Stop();
+
+  task.Run();
+
+  // No more member accesses here: |this| could be deleted at this point.
+}
+
+}  // namespace base
diff --git a/base/timer/timer.h b/base/timer/timer.h
new file mode 100644
index 0000000..2777632
--- /dev/null
+++ b/base/timer/timer.h
@@ -0,0 +1,295 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// OneShotTimer and RepeatingTimer provide a simple timer API.  As the names
+// suggest, OneShotTimer calls you back once after a time delay expires.
+// RepeatingTimer on the other hand calls you back periodically with the
+// prescribed time interval.
+//
+// OneShotTimer and RepeatingTimer both cancel the timer when they go out of
+// scope, which makes it easy to ensure that you do not get called when your
+// object has gone out of scope.  Just instantiate a OneShotTimer or
+// RepeatingTimer as a member variable of the class for which you wish to
+// receive timer events.
+//
+// Sample RepeatingTimer usage:
+//
+//   class MyClass {
+//    public:
+//     void StartDoingStuff() {
+//       timer_.Start(FROM_HERE, TimeDelta::FromSeconds(1),
+//                    this, &MyClass::DoStuff);
+//     }
+//     void StopDoingStuff() {
+//       timer_.Stop();
+//     }
+//    private:
+//     void DoStuff() {
+//       // This method is called every second to do stuff.
+//       ...
+//     }
+//     base::RepeatingTimer timer_;
+//   };
+//
+// Both OneShotTimer and RepeatingTimer also support a Reset method, which
+// allows you to easily defer the timer event until the timer delay passes once
+// again.  So, in the above example, if 0.5 seconds have already passed,
+// calling Reset on |timer_| would postpone DoStuff by another 1 second.  In
+// other words, Reset is shorthand for calling Stop and then Start again with
+// the same arguments.
+//
+// These APIs are not thread safe. All methods must be called from the same
+// sequence (not necessarily the construction sequence), except for the
+// destructor and SetTaskRunner().
+// - The destructor may be called from any sequence when the timer is not
+// running and there is no scheduled task active, i.e. when Start() has never
+// been called or after AbandonAndStop() has been called.
+// - SetTaskRunner() may be called from any sequence when the timer is not
+// running, i.e. when Start() has never been called or Stop() has been called
+// since the last Start().
+//
+// By default, the scheduled tasks will be run on the same sequence that the
+// Timer was *started on*, but this can be changed *prior* to Start() via
+// SetTaskRunner().
+
+#ifndef BASE_TIMER_TIMER_H_
+#define BASE_TIMER_TIMER_H_
+
+// IMPORTANT: If you change timer code, make sure that all tests (including
+// disabled ones) from timer_unittests.cc pass locally. Some are disabled
+// because they're flaky on the buildbot, but when you run them locally you
+// should be able to tell the difference.
+
+#include <memory>
+
+#include "base/base_export.h"
+#include "base/bind.h"
+#include "base/bind_helpers.h"
+#include "base/callback.h"
+#include "base/location.h"
+#include "base/macros.h"
+#include "base/sequence_checker_impl.h"
+#include "base/sequenced_task_runner.h"
+#include "base/time/time.h"
+
+namespace base {
+
+class BaseTimerTaskInternal;
+class TickClock;
+
+//-----------------------------------------------------------------------------
+// This class wraps TaskRunner::PostDelayedTask to manage delayed and repeating
+// tasks. See meta comment above for thread-safety requirements.
+//
+class BASE_EXPORT Timer {
+ public:
+  // Construct a timer in repeating or one-shot mode. Start must be called later
+  // to set task info. |retain_user_task| determines whether the user_task is
+  // retained or reset when it runs or stops. If |tick_clock| is provided, it is
+  // used instead of TimeTicks::Now() to get TimeTicks when scheduling tasks.
+  Timer(bool retain_user_task, bool is_repeating);
+  Timer(bool retain_user_task, bool is_repeating, const TickClock* tick_clock);
+
+  // Construct a timer with retained task info. If |tick_clock| is provided, it
+  // is used instead of TimeTicks::Now() to get TimeTicks when scheduling tasks.
+  Timer(const Location& posted_from,
+        TimeDelta delay,
+        const base::Closure& user_task,
+        bool is_repeating);
+  Timer(const Location& posted_from,
+        TimeDelta delay,
+        const base::Closure& user_task,
+        bool is_repeating,
+        const TickClock* tick_clock);
+
+  virtual ~Timer();
+
+  // Returns true if the timer is running (i.e., not stopped).
+  virtual bool IsRunning() const;
+
+  // Returns the current delay for this timer.
+  virtual TimeDelta GetCurrentDelay() const;
+
+  // Set the task runner on which the task should be scheduled. This method can
+  // only be called before any tasks have been scheduled. If |task_runner| runs
+  // tasks on a different sequence than the sequence owning this Timer,
+  // |user_task_| will be posted to it when the Timer fires (note that this
+  // means |user_task_| can run after ~Timer() and should support that).
+  void SetTaskRunner(scoped_refptr<SequencedTaskRunner> task_runner);
+
+  // Start the timer to run at the given |delay| from now. If the timer is
+  // already running, it will be replaced to call the given |user_task|.
+  virtual void Start(const Location& posted_from,
+                     TimeDelta delay,
+                     const base::Closure& user_task);
+
+  // Start the timer to run at the given |delay| from now. If the timer is
+  // already running, it will be replaced to call a task formed from
+  // |reviewer->*method|.
+  template <class Receiver>
+  void Start(const Location& posted_from,
+             TimeDelta delay,
+             Receiver* receiver,
+             void (Receiver::*method)()) {
+    Start(posted_from, delay,
+          base::BindRepeating(method, base::Unretained(receiver)));
+  }
+
+  // Call this method to stop and cancel the timer.  It is a no-op if the timer
+  // is not running.
+  virtual void Stop();
+
+  // Stop running task (if any) and abandon scheduled task (if any).
+  void AbandonAndStop() {
+    AbandonScheduledTask();
+
+    Stop();
+    // No more member accesses here: |this| could be deleted at this point.
+  }
+
+  // Call this method to reset the timer delay. The |user_task_| must be set. If
+  // the timer is not running, this will start it by posting a task.
+  virtual void Reset();
+
+  const base::Closure& user_task() const { return user_task_; }
+  const TimeTicks& desired_run_time() const { return desired_run_time_; }
+
+ protected:
+  // Returns the current tick count.
+  TimeTicks Now() const;
+
+  void set_user_task(const Closure& task) { user_task_ = task; }
+  void set_desired_run_time(TimeTicks desired) { desired_run_time_ = desired; }
+  void set_is_running(bool running) { is_running_ = running; }
+
+  const Location& posted_from() const { return posted_from_; }
+  bool retain_user_task() const { return retain_user_task_; }
+  bool is_repeating() const { return is_repeating_; }
+  bool is_running() const { return is_running_; }
+
+ private:
+  friend class BaseTimerTaskInternal;
+
+  // Allocates a new |scheduled_task_| and posts it on the current sequence with
+  // the given |delay|. |scheduled_task_| must be null. |scheduled_run_time_|
+  // and |desired_run_time_| are reset to Now() + delay.
+  void PostNewScheduledTask(TimeDelta delay);
+
+  // Returns the task runner on which the task should be scheduled. If the
+  // corresponding |task_runner_| field is null, the task runner for the current
+  // sequence is returned.
+  scoped_refptr<SequencedTaskRunner> GetTaskRunner();
+
+  // Disable |scheduled_task_| and abandon it so that it no longer refers back
+  // to this object.
+  void AbandonScheduledTask();
+
+  // Called by BaseTimerTaskInternal when the delayed task fires.
+  void RunScheduledTask();
+
+  // When non-null, the |scheduled_task_| was posted to call RunScheduledTask()
+  // at |scheduled_run_time_|.
+  BaseTimerTaskInternal* scheduled_task_;
+
+  // The task runner on which the task should be scheduled. If it is null, the
+  // task runner for the current sequence will be used.
+  scoped_refptr<SequencedTaskRunner> task_runner_;
+
+  // Location in user code.
+  Location posted_from_;
+  // Delay requested by user.
+  TimeDelta delay_;
+  // |user_task_| is what the user wants to be run at |desired_run_time_|.
+  base::Closure user_task_;
+
+  // The time at which |scheduled_task_| is expected to fire. This time can be a
+  // "zero" TimeTicks if the task must be run immediately.
+  TimeTicks scheduled_run_time_;
+
+  // The desired run time of |user_task_|. The user may update this at any time,
+  // even if their previous request has not run yet. If |desired_run_time_| is
+  // greater than |scheduled_run_time_|, a continuation task will be posted to
+  // wait for the remaining time. This allows us to reuse the pending task so as
+  // not to flood the delayed queues with orphaned tasks when the user code
+  // excessively Stops and Starts the timer. This time can be a "zero" TimeTicks
+  // if the task must be run immediately.
+  TimeTicks desired_run_time_;
+
+  // Timer isn't thread-safe and must only be used on its origin sequence
+  // (sequence on which it was started). Once fully Stop()'ed it may be
+  // destroyed or restarted on another sequence.
+  SequenceChecker origin_sequence_checker_;
+
+  // Repeating timers automatically post the task again before calling the task
+  // callback.
+  const bool is_repeating_;
+
+  // If true, hold on to the |user_task_| closure object for reuse.
+  const bool retain_user_task_;
+
+  // The tick clock used to calculate the run time for scheduled tasks.
+  const TickClock* const tick_clock_;
+
+  // If true, |user_task_| is scheduled to run sometime in the future.
+  bool is_running_;
+
+  DISALLOW_COPY_AND_ASSIGN(Timer);
+};
+
+//-----------------------------------------------------------------------------
+// A simple, one-shot timer.  See usage notes at the top of the file.
+class OneShotTimer : public Timer {
+ public:
+  OneShotTimer() : OneShotTimer(nullptr) {}
+  explicit OneShotTimer(const TickClock* tick_clock)
+      : Timer(false, false, tick_clock) {}
+};
+
+//-----------------------------------------------------------------------------
+// A simple, repeating timer.  See usage notes at the top of the file.
+class RepeatingTimer : public Timer {
+ public:
+  RepeatingTimer() : RepeatingTimer(nullptr) {}
+  explicit RepeatingTimer(const TickClock* tick_clock)
+      : Timer(true, true, tick_clock) {}
+};
+
+//-----------------------------------------------------------------------------
+// A Delay timer is like The Button from Lost. Once started, you have to keep
+// calling Reset otherwise it will call the given method on the sequence it was
+// initially Reset() from.
+//
+// Once created, it is inactive until Reset is called. Once |delay| seconds have
+// passed since the last call to Reset, the callback is made. Once the callback
+// has been made, it's inactive until Reset is called again.
+//
+// If destroyed, the timeout is canceled and will not occur even if already
+// inflight.
+class DelayTimer : protected Timer {
+ public:
+  template <class Receiver>
+  DelayTimer(const Location& posted_from,
+             TimeDelta delay,
+             Receiver* receiver,
+             void (Receiver::*method)())
+      : DelayTimer(posted_from, delay, receiver, method, nullptr) {}
+
+  template <class Receiver>
+  DelayTimer(const Location& posted_from,
+             TimeDelta delay,
+             Receiver* receiver,
+             void (Receiver::*method)(),
+             const TickClock* tick_clock)
+      : Timer(posted_from,
+              delay,
+              base::Bind(method, base::Unretained(receiver)),
+              false,
+              tick_clock) {}
+
+  using Timer::Reset;
+};
+
+}  // namespace base
+
+#endif  // BASE_TIMER_TIMER_H_
diff --git a/base/timer/timer_unittest.cc b/base/timer/timer_unittest.cc
new file mode 100644
index 0000000..aaab237
--- /dev/null
+++ b/base/timer/timer_unittest.cc
@@ -0,0 +1,904 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/timer/timer.h"
+
+#include <stddef.h>
+
+#include <memory>
+
+#include "base/bind.h"
+#include "base/bind_helpers.h"
+#include "base/callback.h"
+#include "base/macros.h"
+#include "base/memory/ptr_util.h"
+#include "base/memory/ref_counted.h"
+#include "base/message_loop/message_loop.h"
+#include "base/run_loop.h"
+#include "base/sequenced_task_runner.h"
+#include "base/synchronization/waitable_event.h"
+#include "base/task_scheduler/post_task.h"
+#include "base/test/scoped_task_environment.h"
+#include "base/test/test_mock_time_task_runner.h"
+#include "base/threading/platform_thread.h"
+#include "base/threading/sequenced_task_runner_handle.h"
+#include "base/threading/thread.h"
+#include "base/time/tick_clock.h"
+#include "base/time/time.h"
+#include "build/build_config.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+
+namespace {
+
+// The message loops on which each timer should be tested.
+const MessageLoop::Type testing_message_loops[] = {
+    MessageLoop::TYPE_DEFAULT, MessageLoop::TYPE_IO,
+#if !defined(OS_IOS)  // iOS does not allow direct running of the UI loop.
+    MessageLoop::TYPE_UI,
+#endif
+};
+
+const int kNumTestingMessageLoops = arraysize(testing_message_loops);
+
+class Receiver {
+ public:
+  Receiver() : count_(0) {}
+  void OnCalled() { count_++; }
+  bool WasCalled() { return count_ > 0; }
+  int TimesCalled() { return count_; }
+
+ private:
+  int count_;
+};
+
+// A basic helper class that can start a one-shot timer and signal a
+// WaitableEvent when this timer fires.
+class OneShotTimerTesterBase {
+ public:
+  // |did_run|, if provided, will be signaled when Run() fires.
+  explicit OneShotTimerTesterBase(
+      WaitableEvent* did_run = nullptr,
+      const TimeDelta& delay = TimeDelta::FromMilliseconds(10))
+      : did_run_(did_run), delay_(delay) {}
+
+  virtual ~OneShotTimerTesterBase() = default;
+
+  void Start() {
+    started_time_ = TimeTicks::Now();
+    timer_->Start(FROM_HERE, delay_, this, &OneShotTimerTesterBase::Run);
+  }
+
+  bool IsRunning() { return timer_->IsRunning(); }
+
+  TimeTicks started_time() const { return started_time_; }
+  TimeDelta delay() const { return delay_; }
+
+ protected:
+  virtual void Run() {
+    if (did_run_) {
+      EXPECT_FALSE(did_run_->IsSignaled());
+      did_run_->Signal();
+    }
+  }
+
+  std::unique_ptr<OneShotTimer> timer_ = std::make_unique<OneShotTimer>();
+
+ private:
+  WaitableEvent* const did_run_;
+  const TimeDelta delay_;
+  TimeTicks started_time_;
+
+  DISALLOW_COPY_AND_ASSIGN(OneShotTimerTesterBase);
+};
+
+// Extends functionality of OneShotTimerTesterBase with the abilities to wait
+// until the timer fires and to change task runner for the timer.
+class OneShotTimerTester : public OneShotTimerTesterBase {
+ public:
+  // |did_run|, if provided, will be signaled when Run() fires.
+  explicit OneShotTimerTester(
+      WaitableEvent* did_run = nullptr,
+      const TimeDelta& delay = TimeDelta::FromMilliseconds(10))
+      : OneShotTimerTesterBase(did_run, delay),
+        quit_closure_(run_loop_.QuitClosure()) {}
+
+  ~OneShotTimerTester() override = default;
+
+  void SetTaskRunner(scoped_refptr<SequencedTaskRunner> task_runner) {
+    timer_->SetTaskRunner(std::move(task_runner));
+
+    // Run() will be invoked on |task_runner| but |run_loop_|'s QuitClosure
+    // needs to run on this thread (where the MessageLoop lives).
+    quit_closure_ = Bind(IgnoreResult(&SequencedTaskRunner::PostTask),
+                         SequencedTaskRunnerHandle::Get(), FROM_HERE,
+                         run_loop_.QuitClosure());
+  }
+
+  // Blocks until Run() executes and confirms that Run() didn't fire before
+  // |delay_| expired.
+  void WaitAndConfirmTimerFiredAfterDelay() {
+    run_loop_.Run();
+
+    EXPECT_NE(TimeTicks(), started_time());
+    EXPECT_GE(TimeTicks::Now() - started_time(), delay());
+  }
+
+ protected:
+  // Overridable method to do things on Run() before signaling events/closures
+  // managed by this helper.
+  virtual void OnRun() {}
+
+ private:
+  void Run() override {
+    OnRun();
+    OneShotTimerTesterBase::Run();
+    quit_closure_.Run();
+  }
+
+  RunLoop run_loop_;
+  Closure quit_closure_;
+
+  DISALLOW_COPY_AND_ASSIGN(OneShotTimerTester);
+};
+
+class OneShotSelfDeletingTimerTester : public OneShotTimerTester {
+ protected:
+  void OnRun() override { timer_.reset(); }
+};
+
+constexpr int kNumRepeats = 10;
+
+class RepeatingTimerTester {
+ public:
+  explicit RepeatingTimerTester(WaitableEvent* did_run, const TimeDelta& delay)
+      : counter_(kNumRepeats),
+        quit_closure_(run_loop_.QuitClosure()),
+        did_run_(did_run),
+        delay_(delay) {}
+
+  void Start() {
+    started_time_ = TimeTicks::Now();
+    timer_.Start(FROM_HERE, delay_, this, &RepeatingTimerTester::Run);
+  }
+
+  void WaitAndConfirmTimerFiredRepeatedlyAfterDelay() {
+    run_loop_.Run();
+
+    EXPECT_NE(TimeTicks(), started_time_);
+    EXPECT_GE(TimeTicks::Now() - started_time_, kNumRepeats * delay_);
+  }
+
+ private:
+  void Run() {
+    if (--counter_ == 0) {
+      if (did_run_) {
+        EXPECT_FALSE(did_run_->IsSignaled());
+        did_run_->Signal();
+      }
+      timer_.Stop();
+      quit_closure_.Run();
+    }
+  }
+
+  RepeatingTimer timer_;
+  int counter_;
+
+  RunLoop run_loop_;
+  Closure quit_closure_;
+  WaitableEvent* const did_run_;
+
+  const TimeDelta delay_;
+  TimeTicks started_time_;
+
+  DISALLOW_COPY_AND_ASSIGN(RepeatingTimerTester);
+};
+
+// Basic test with same setup as RunTest_OneShotTimers_Cancel below to confirm
+// that |did_run_a| would be signaled in that test if it wasn't for the
+// deletion.
+void RunTest_OneShotTimers(MessageLoop::Type message_loop_type) {
+  MessageLoop loop(message_loop_type);
+
+  WaitableEvent did_run_a(WaitableEvent::ResetPolicy::MANUAL,
+                          WaitableEvent::InitialState::NOT_SIGNALED);
+  OneShotTimerTester a(&did_run_a);
+  a.Start();
+
+  OneShotTimerTester b;
+  b.Start();
+
+  b.WaitAndConfirmTimerFiredAfterDelay();
+
+  EXPECT_TRUE(did_run_a.IsSignaled());
+}
+
+void RunTest_OneShotTimers_Cancel(MessageLoop::Type message_loop_type) {
+  MessageLoop loop(message_loop_type);
+
+  WaitableEvent did_run_a(WaitableEvent::ResetPolicy::MANUAL,
+                          WaitableEvent::InitialState::NOT_SIGNALED);
+  OneShotTimerTester* a = new OneShotTimerTester(&did_run_a);
+
+  // This should run before the timer expires.
+  SequencedTaskRunnerHandle::Get()->DeleteSoon(FROM_HERE, a);
+
+  // Now start the timer.
+  a->Start();
+
+  OneShotTimerTester b;
+  b.Start();
+
+  b.WaitAndConfirmTimerFiredAfterDelay();
+
+  EXPECT_FALSE(did_run_a.IsSignaled());
+}
+
+void RunTest_OneShotSelfDeletingTimer(MessageLoop::Type message_loop_type) {
+  MessageLoop loop(message_loop_type);
+
+  OneShotSelfDeletingTimerTester f;
+  f.Start();
+  f.WaitAndConfirmTimerFiredAfterDelay();
+}
+
+void RunTest_RepeatingTimer(MessageLoop::Type message_loop_type,
+                            const TimeDelta& delay) {
+  MessageLoop loop(message_loop_type);
+
+  RepeatingTimerTester f(nullptr, delay);
+  f.Start();
+  f.WaitAndConfirmTimerFiredRepeatedlyAfterDelay();
+}
+
+void RunTest_RepeatingTimer_Cancel(MessageLoop::Type message_loop_type,
+                                   const TimeDelta& delay) {
+  MessageLoop loop(message_loop_type);
+
+  WaitableEvent did_run_a(WaitableEvent::ResetPolicy::MANUAL,
+                          WaitableEvent::InitialState::NOT_SIGNALED);
+  RepeatingTimerTester* a = new RepeatingTimerTester(&did_run_a, delay);
+
+  // This should run before the timer expires.
+  SequencedTaskRunnerHandle::Get()->DeleteSoon(FROM_HERE, a);
+
+  // Now start the timer.
+  a->Start();
+
+  RepeatingTimerTester b(nullptr, delay);
+  b.Start();
+
+  b.WaitAndConfirmTimerFiredRepeatedlyAfterDelay();
+
+  // |a| should not have fired despite |b| starting after it on the same
+  // sequence and being complete by now.
+  EXPECT_FALSE(did_run_a.IsSignaled());
+}
+
+class DelayTimerTarget {
+ public:
+  bool signaled() const { return signaled_; }
+
+  void Signal() {
+    ASSERT_FALSE(signaled_);
+    signaled_ = true;
+  }
+
+ private:
+  bool signaled_ = false;
+};
+
+void RunTest_DelayTimer_NoCall(MessageLoop::Type message_loop_type) {
+  MessageLoop loop(message_loop_type);
+
+  // If Delay is never called, the timer shouldn't go off.
+  DelayTimerTarget target;
+  DelayTimer timer(FROM_HERE, TimeDelta::FromMilliseconds(1), &target,
+                   &DelayTimerTarget::Signal);
+
+  OneShotTimerTester tester;
+  tester.Start();
+  tester.WaitAndConfirmTimerFiredAfterDelay();
+
+  ASSERT_FALSE(target.signaled());
+}
+
+void RunTest_DelayTimer_OneCall(MessageLoop::Type message_loop_type) {
+  MessageLoop loop(message_loop_type);
+
+  DelayTimerTarget target;
+  DelayTimer timer(FROM_HERE, TimeDelta::FromMilliseconds(1), &target,
+                   &DelayTimerTarget::Signal);
+  timer.Reset();
+
+  OneShotTimerTester tester(nullptr, TimeDelta::FromMilliseconds(100));
+  tester.Start();
+  tester.WaitAndConfirmTimerFiredAfterDelay();
+
+  ASSERT_TRUE(target.signaled());
+}
+
+struct ResetHelper {
+  ResetHelper(DelayTimer* timer, DelayTimerTarget* target)
+      : timer_(timer), target_(target) {}
+
+  void Reset() {
+    ASSERT_FALSE(target_->signaled());
+    timer_->Reset();
+  }
+
+ private:
+  DelayTimer* const timer_;
+  DelayTimerTarget* const target_;
+};
+
+void RunTest_DelayTimer_Reset(MessageLoop::Type message_loop_type) {
+  MessageLoop loop(message_loop_type);
+
+  // If Delay is never called, the timer shouldn't go off.
+  DelayTimerTarget target;
+  DelayTimer timer(FROM_HERE, TimeDelta::FromMilliseconds(50), &target,
+                   &DelayTimerTarget::Signal);
+  timer.Reset();
+
+  ResetHelper reset_helper(&timer, &target);
+
+  OneShotTimer timers[20];
+  for (size_t i = 0; i < arraysize(timers); ++i) {
+    timers[i].Start(FROM_HERE, TimeDelta::FromMilliseconds(i * 10),
+                    &reset_helper, &ResetHelper::Reset);
+  }
+
+  OneShotTimerTester tester(nullptr, TimeDelta::FromMilliseconds(300));
+  tester.Start();
+  tester.WaitAndConfirmTimerFiredAfterDelay();
+
+  ASSERT_TRUE(target.signaled());
+}
+
+class DelayTimerFatalTarget {
+ public:
+  void Signal() {
+    ASSERT_TRUE(false);
+  }
+};
+
+void RunTest_DelayTimer_Deleted(MessageLoop::Type message_loop_type) {
+  MessageLoop loop(message_loop_type);
+
+  DelayTimerFatalTarget target;
+
+  {
+    DelayTimer timer(FROM_HERE, TimeDelta::FromMilliseconds(50), &target,
+                     &DelayTimerFatalTarget::Signal);
+    timer.Reset();
+  }
+
+  // When the timer is deleted, the DelayTimerFatalTarget should never be
+  // called.
+  PlatformThread::Sleep(TimeDelta::FromMilliseconds(100));
+}
+
+}  // namespace
+
+//-----------------------------------------------------------------------------
+// Each test is run against each type of MessageLoop.  That way we are sure
+// that timers work properly in all configurations.
+
+TEST(TimerTest, OneShotTimers) {
+  for (int i = 0; i < kNumTestingMessageLoops; i++) {
+    RunTest_OneShotTimers(testing_message_loops[i]);
+  }
+}
+
+TEST(TimerTest, OneShotTimers_Cancel) {
+  for (int i = 0; i < kNumTestingMessageLoops; i++) {
+    RunTest_OneShotTimers_Cancel(testing_message_loops[i]);
+  }
+}
+
+// If underline timer does not handle properly, we will crash or fail
+// in full page heap environment.
+TEST(TimerTest, OneShotSelfDeletingTimer) {
+  for (int i = 0; i < kNumTestingMessageLoops; i++) {
+    RunTest_OneShotSelfDeletingTimer(testing_message_loops[i]);
+  }
+}
+
+TEST(TimerTest, OneShotTimer_CustomTaskRunner) {
+  // A MessageLoop is required for the timer events on the other thread to
+  // communicate back to the Timer under test.
+  MessageLoop loop;
+
+  Thread other_thread("OneShotTimer_CustomTaskRunner");
+  other_thread.Start();
+
+  WaitableEvent did_run(WaitableEvent::ResetPolicy::MANUAL,
+                        WaitableEvent::InitialState::NOT_SIGNALED);
+  OneShotTimerTester f(&did_run);
+  f.SetTaskRunner(other_thread.task_runner());
+  f.Start();
+  EXPECT_TRUE(f.IsRunning() || did_run.IsSignaled());
+
+  f.WaitAndConfirmTimerFiredAfterDelay();
+  EXPECT_TRUE(did_run.IsSignaled());
+
+  // |f| should already have communicated back to this |loop| before invoking
+  // Run() and as such this thread should already be aware that |f| is no longer
+  // running.
+  EXPECT_TRUE(loop.IsIdleForTesting());
+  EXPECT_FALSE(f.IsRunning());
+}
+
+TEST(TimerTest, OneShotTimerWithTickClock) {
+  scoped_refptr<TestMockTimeTaskRunner> task_runner(
+      new TestMockTimeTaskRunner(Time::Now(), TimeTicks::Now()));
+  MessageLoop message_loop;
+  message_loop.SetTaskRunner(task_runner);
+  Receiver receiver;
+  OneShotTimer timer(task_runner->GetMockTickClock());
+  timer.Start(FROM_HERE, TimeDelta::FromSeconds(1),
+              Bind(&Receiver::OnCalled, Unretained(&receiver)));
+  task_runner->FastForwardBy(TimeDelta::FromSeconds(1));
+  EXPECT_TRUE(receiver.WasCalled());
+}
+
+TEST(TimerTest, RepeatingTimer) {
+  for (int i = 0; i < kNumTestingMessageLoops; i++) {
+    RunTest_RepeatingTimer(testing_message_loops[i],
+                           TimeDelta::FromMilliseconds(10));
+  }
+}
+
+TEST(TimerTest, RepeatingTimer_Cancel) {
+  for (int i = 0; i < kNumTestingMessageLoops; i++) {
+    RunTest_RepeatingTimer_Cancel(testing_message_loops[i],
+                                  TimeDelta::FromMilliseconds(10));
+  }
+}
+
+TEST(TimerTest, RepeatingTimerZeroDelay) {
+  for (int i = 0; i < kNumTestingMessageLoops; i++) {
+    RunTest_RepeatingTimer(testing_message_loops[i],
+                           TimeDelta::FromMilliseconds(0));
+  }
+}
+
+TEST(TimerTest, RepeatingTimerZeroDelay_Cancel) {
+  for (int i = 0; i < kNumTestingMessageLoops; i++) {
+    RunTest_RepeatingTimer_Cancel(testing_message_loops[i],
+                                  TimeDelta::FromMilliseconds(0));
+  }
+}
+
+TEST(TimerTest, RepeatingTimerWithTickClock) {
+  scoped_refptr<TestMockTimeTaskRunner> task_runner(
+      new TestMockTimeTaskRunner(Time::Now(), TimeTicks::Now()));
+  MessageLoop message_loop;
+  message_loop.SetTaskRunner(task_runner);
+  Receiver receiver;
+  const int expected_times_called = 10;
+  RepeatingTimer timer(task_runner->GetMockTickClock());
+  timer.Start(FROM_HERE, TimeDelta::FromSeconds(1),
+              Bind(&Receiver::OnCalled, Unretained(&receiver)));
+  task_runner->FastForwardBy(TimeDelta::FromSeconds(expected_times_called));
+  timer.Stop();
+  EXPECT_EQ(expected_times_called, receiver.TimesCalled());
+}
+
+TEST(TimerTest, DelayTimer_NoCall) {
+  for (int i = 0; i < kNumTestingMessageLoops; i++) {
+    RunTest_DelayTimer_NoCall(testing_message_loops[i]);
+  }
+}
+
+TEST(TimerTest, DelayTimer_OneCall) {
+  for (int i = 0; i < kNumTestingMessageLoops; i++) {
+    RunTest_DelayTimer_OneCall(testing_message_loops[i]);
+  }
+}
+
+// It's flaky on the buildbot, http://crbug.com/25038.
+TEST(TimerTest, DISABLED_DelayTimer_Reset) {
+  for (int i = 0; i < kNumTestingMessageLoops; i++) {
+    RunTest_DelayTimer_Reset(testing_message_loops[i]);
+  }
+}
+
+TEST(TimerTest, DelayTimer_Deleted) {
+  for (int i = 0; i < kNumTestingMessageLoops; i++) {
+    RunTest_DelayTimer_Deleted(testing_message_loops[i]);
+  }
+}
+
+TEST(TimerTest, DelayTimerWithTickClock) {
+  scoped_refptr<TestMockTimeTaskRunner> task_runner(
+      new TestMockTimeTaskRunner(Time::Now(), TimeTicks::Now()));
+  MessageLoop message_loop;
+  message_loop.SetTaskRunner(task_runner);
+  Receiver receiver;
+  DelayTimer timer(FROM_HERE, TimeDelta::FromSeconds(1), &receiver,
+                   &Receiver::OnCalled, task_runner->GetMockTickClock());
+  task_runner->FastForwardBy(TimeDelta::FromMilliseconds(999));
+  EXPECT_FALSE(receiver.WasCalled());
+  timer.Reset();
+  task_runner->FastForwardBy(TimeDelta::FromMilliseconds(999));
+  EXPECT_FALSE(receiver.WasCalled());
+  timer.Reset();
+  task_runner->FastForwardBy(TimeDelta::FromSeconds(1));
+  EXPECT_TRUE(receiver.WasCalled());
+}
+
+TEST(TimerTest, MessageLoopShutdown) {
+  // This test is designed to verify that shutdown of the
+  // message loop does not cause crashes if there were pending
+  // timers not yet fired.  It may only trigger exceptions
+  // if debug heap checking is enabled.
+  WaitableEvent did_run(WaitableEvent::ResetPolicy::MANUAL,
+                        WaitableEvent::InitialState::NOT_SIGNALED);
+  {
+    OneShotTimerTesterBase a(&did_run);
+    OneShotTimerTesterBase b(&did_run);
+    OneShotTimerTesterBase c(&did_run);
+    OneShotTimerTesterBase d(&did_run);
+    {
+      MessageLoop loop;
+      a.Start();
+      b.Start();
+    }  // MessageLoop destructs by falling out of scope.
+  }  // OneShotTimers destruct.  SHOULD NOT CRASH, of course.
+
+  EXPECT_FALSE(did_run.IsSignaled());
+}
+
+// Ref counted class which owns a Timer. The class passes a reference to itself
+// via the |user_task| parameter in Timer::Start(). |Timer::user_task_| might
+// end up holding the last reference to the class.
+class OneShotSelfOwningTimerTester
+    : public RefCounted<OneShotSelfOwningTimerTester> {
+ public:
+  OneShotSelfOwningTimerTester() = default;
+
+  void StartTimer() {
+    // Start timer with long delay in order to test the timer getting destroyed
+    // while a timer task is still pending.
+    timer_.Start(FROM_HERE, TimeDelta::FromDays(1),
+                 base::Bind(&OneShotSelfOwningTimerTester::Run, this));
+  }
+
+ private:
+  friend class RefCounted<OneShotSelfOwningTimerTester>;
+  ~OneShotSelfOwningTimerTester() = default;
+
+  void Run() {
+    ADD_FAILURE() << "Timer unexpectedly fired.";
+  }
+
+  OneShotTimer timer_;
+
+  DISALLOW_COPY_AND_ASSIGN(OneShotSelfOwningTimerTester);
+};
+
+TEST(TimerTest, MessageLoopShutdownSelfOwningTimer) {
+  // This test verifies that shutdown of the message loop does not cause crashes
+  // if there is a pending timer not yet fired and |Timer::user_task_| owns the
+  // timer. The test may only trigger exceptions if debug heap checking is
+  // enabled.
+
+  MessageLoop loop;
+  scoped_refptr<OneShotSelfOwningTimerTester> tester =
+      new OneShotSelfOwningTimerTester();
+
+  std::move(tester)->StartTimer();
+  // |Timer::user_task_| owns sole reference to |tester|.
+
+  // MessageLoop destructs by falling out of scope. SHOULD NOT CRASH.
+}
+
+void TimerTestCallback() {
+}
+
+TEST(TimerTest, NonRepeatIsRunning) {
+  {
+    MessageLoop loop;
+    Timer timer(false, false);
+    EXPECT_FALSE(timer.IsRunning());
+    timer.Start(FROM_HERE, TimeDelta::FromDays(1), Bind(&TimerTestCallback));
+    EXPECT_TRUE(timer.IsRunning());
+    timer.Stop();
+    EXPECT_FALSE(timer.IsRunning());
+    EXPECT_TRUE(timer.user_task().is_null());
+  }
+
+  {
+    Timer timer(true, false);
+    MessageLoop loop;
+    EXPECT_FALSE(timer.IsRunning());
+    timer.Start(FROM_HERE, TimeDelta::FromDays(1), Bind(&TimerTestCallback));
+    EXPECT_TRUE(timer.IsRunning());
+    timer.Stop();
+    EXPECT_FALSE(timer.IsRunning());
+    ASSERT_FALSE(timer.user_task().is_null());
+    timer.Reset();
+    EXPECT_TRUE(timer.IsRunning());
+  }
+}
+
+TEST(TimerTest, NonRepeatMessageLoopDeath) {
+  Timer timer(false, false);
+  {
+    MessageLoop loop;
+    EXPECT_FALSE(timer.IsRunning());
+    timer.Start(FROM_HERE, TimeDelta::FromDays(1), Bind(&TimerTestCallback));
+    EXPECT_TRUE(timer.IsRunning());
+  }
+  EXPECT_FALSE(timer.IsRunning());
+  EXPECT_TRUE(timer.user_task().is_null());
+}
+
+TEST(TimerTest, RetainRepeatIsRunning) {
+  MessageLoop loop;
+  Timer timer(FROM_HERE, TimeDelta::FromDays(1), Bind(&TimerTestCallback),
+              true);
+  EXPECT_FALSE(timer.IsRunning());
+  timer.Reset();
+  EXPECT_TRUE(timer.IsRunning());
+  timer.Stop();
+  EXPECT_FALSE(timer.IsRunning());
+  timer.Reset();
+  EXPECT_TRUE(timer.IsRunning());
+}
+
+TEST(TimerTest, RetainNonRepeatIsRunning) {
+  MessageLoop loop;
+  Timer timer(FROM_HERE, TimeDelta::FromDays(1), Bind(&TimerTestCallback),
+              false);
+  EXPECT_FALSE(timer.IsRunning());
+  timer.Reset();
+  EXPECT_TRUE(timer.IsRunning());
+  timer.Stop();
+  EXPECT_FALSE(timer.IsRunning());
+  timer.Reset();
+  EXPECT_TRUE(timer.IsRunning());
+}
+
+//-----------------------------------------------------------------------------
+
+namespace {
+
+bool g_callback_happened1 = false;
+bool g_callback_happened2 = false;
+
+void ClearAllCallbackHappened() {
+  g_callback_happened1 = false;
+  g_callback_happened2 = false;
+}
+
+void SetCallbackHappened1() {
+  g_callback_happened1 = true;
+  RunLoop::QuitCurrentWhenIdleDeprecated();
+}
+
+void SetCallbackHappened2() {
+  g_callback_happened2 = true;
+  RunLoop::QuitCurrentWhenIdleDeprecated();
+}
+
+}  // namespace
+
+TEST(TimerTest, ContinuationStopStart) {
+  {
+    ClearAllCallbackHappened();
+    MessageLoop loop;
+    Timer timer(false, false);
+    timer.Start(FROM_HERE, TimeDelta::FromMilliseconds(10),
+                Bind(&SetCallbackHappened1));
+    timer.Stop();
+    timer.Start(FROM_HERE, TimeDelta::FromMilliseconds(40),
+                Bind(&SetCallbackHappened2));
+    RunLoop().Run();
+    EXPECT_FALSE(g_callback_happened1);
+    EXPECT_TRUE(g_callback_happened2);
+  }
+}
+
+TEST(TimerTest, ContinuationReset) {
+  {
+    ClearAllCallbackHappened();
+    MessageLoop loop;
+    Timer timer(false, false);
+    timer.Start(FROM_HERE, TimeDelta::FromMilliseconds(10),
+                Bind(&SetCallbackHappened1));
+    timer.Reset();
+    // Since Reset happened before task ran, the user_task must not be cleared:
+    ASSERT_FALSE(timer.user_task().is_null());
+    RunLoop().Run();
+    EXPECT_TRUE(g_callback_happened1);
+  }
+}
+
+namespace {
+
+// Fixture for tests requiring ScopedTaskEnvironment. Includes a WaitableEvent
+// so that cases may Wait() on one thread and Signal() (explicitly, or
+// implicitly via helper methods) on another.
+class TimerSequenceTest : public testing::Test {
+ public:
+  TimerSequenceTest()
+      : event_(WaitableEvent::ResetPolicy::AUTOMATIC,
+               WaitableEvent::InitialState::NOT_SIGNALED) {}
+
+  // Block until Signal() is called on another thread.
+  void Wait() { event_.Wait(); }
+
+  void Signal() { event_.Signal(); }
+
+  // Helper to augment a task with a subsequent call to Signal().
+  Closure TaskWithSignal(const Closure& task) {
+    return Bind(&TimerSequenceTest::RunTaskAndSignal, Unretained(this), task);
+  }
+
+  // Create the timer.
+  void CreateTimer() { timer_.reset(new OneShotTimer); }
+
+  // Schedule an event on the timer.
+  void StartTimer(TimeDelta delay, const Closure& task) {
+    timer_->Start(FROM_HERE, delay, task);
+  }
+
+  void SetTaskRunnerForTimer(scoped_refptr<SequencedTaskRunner> task_runner) {
+    timer_->SetTaskRunner(std::move(task_runner));
+  }
+
+  // Tell the timer to abandon the task.
+  void AbandonTask() {
+    EXPECT_TRUE(timer_->IsRunning());
+    // Reset() to call Timer::AbandonScheduledTask()
+    timer_->Reset();
+    EXPECT_TRUE(timer_->IsRunning());
+    timer_->Stop();
+    EXPECT_FALSE(timer_->IsRunning());
+  }
+
+  static void VerifyAffinity(const SequencedTaskRunner* task_runner) {
+    EXPECT_TRUE(task_runner->RunsTasksInCurrentSequence());
+  }
+
+  // Delete the timer.
+  void DeleteTimer() { timer_.reset(); }
+
+ private:
+  void RunTaskAndSignal(const Closure& task) {
+    task.Run();
+    Signal();
+  }
+
+  base::test::ScopedTaskEnvironment scoped_task_environment_;
+  WaitableEvent event_;
+  std::unique_ptr<OneShotTimer> timer_;
+
+  DISALLOW_COPY_AND_ASSIGN(TimerSequenceTest);
+};
+
+}  // namespace
+
+TEST_F(TimerSequenceTest, OneShotTimerTaskOnPoolSequence) {
+  scoped_refptr<SequencedTaskRunner> task_runner =
+      base::CreateSequencedTaskRunnerWithTraits({});
+
+  base::RunLoop run_loop_;
+
+  // Timer is created on this thread.
+  CreateTimer();
+
+  // Task will execute on a pool thread.
+  SetTaskRunnerForTimer(task_runner);
+  StartTimer(TimeDelta::FromMilliseconds(1),
+             Bind(IgnoreResult(&SequencedTaskRunner::PostTask),
+                  SequencedTaskRunnerHandle::Get(), FROM_HERE,
+                  run_loop_.QuitClosure()));
+
+  // Spin the loop so that the delayed task fires on it, which will forward it
+  // to |task_runner|. And since the Timer's task is one that posts back to this
+  // MessageLoop to quit, we finally unblock.
+  run_loop_.Run();
+
+  // Timer will be destroyed on this thread.
+  DeleteTimer();
+}
+
+TEST_F(TimerSequenceTest, OneShotTimerUsedOnPoolSequence) {
+  scoped_refptr<SequencedTaskRunner> task_runner =
+      base::CreateSequencedTaskRunnerWithTraits({});
+
+  // Timer is created on this thread.
+  CreateTimer();
+
+  // Task will be scheduled from a pool thread.
+  task_runner->PostTask(
+      FROM_HERE, BindOnce(&TimerSequenceTest::StartTimer, Unretained(this),
+                          TimeDelta::FromMilliseconds(1),
+                          Bind(&TimerSequenceTest::Signal, Unretained(this))));
+  Wait();
+
+  // Timer must be destroyed on pool thread, too.
+  task_runner->PostTask(
+      FROM_HERE,
+      TaskWithSignal(Bind(&TimerSequenceTest::DeleteTimer, Unretained(this))));
+  Wait();
+}
+
+TEST_F(TimerSequenceTest, OneShotTimerTwoSequencesAbandonTask) {
+  scoped_refptr<SequencedTaskRunner> task_runner1 =
+      base::CreateSequencedTaskRunnerWithTraits({});
+  scoped_refptr<SequencedTaskRunner> task_runner2 =
+      base::CreateSequencedTaskRunnerWithTraits({});
+
+  // Create timer on sequence #1.
+  task_runner1->PostTask(
+      FROM_HERE,
+      TaskWithSignal(Bind(&TimerSequenceTest::CreateTimer, Unretained(this))));
+  Wait();
+
+  // And tell it to execute on a different sequence (#2).
+  task_runner1->PostTask(
+      FROM_HERE, TaskWithSignal(Bind(&TimerSequenceTest::SetTaskRunnerForTimer,
+                                     Unretained(this), task_runner2)));
+  Wait();
+
+  // Task will be scheduled from sequence #1.
+  task_runner1->PostTask(
+      FROM_HERE, BindOnce(&TimerSequenceTest::StartTimer, Unretained(this),
+                          TimeDelta::FromHours(1), DoNothing()));
+
+  // Abandon task - must be called from scheduling sequence (#1).
+  task_runner1->PostTask(
+      FROM_HERE,
+      TaskWithSignal(Bind(&TimerSequenceTest::AbandonTask, Unretained(this))));
+  Wait();
+
+  // Timer must be destroyed on the sequence it was scheduled from (#1).
+  task_runner1->PostTask(
+      FROM_HERE,
+      TaskWithSignal(Bind(&TimerSequenceTest::DeleteTimer, Unretained(this))));
+  Wait();
+}
+
+TEST_F(TimerSequenceTest, OneShotTimerUsedAndTaskedOnDifferentSequences) {
+  scoped_refptr<SequencedTaskRunner> task_runner1 =
+      base::CreateSequencedTaskRunnerWithTraits({});
+  scoped_refptr<SequencedTaskRunner> task_runner2 =
+      base::CreateSequencedTaskRunnerWithTraits({});
+
+  // Create timer on sequence #1.
+  task_runner1->PostTask(
+      FROM_HERE,
+      TaskWithSignal(Bind(&TimerSequenceTest::CreateTimer, Unretained(this))));
+  Wait();
+
+  // And tell it to execute on a different sequence (#2).
+  task_runner1->PostTask(
+      FROM_HERE, TaskWithSignal(Bind(&TimerSequenceTest::SetTaskRunnerForTimer,
+                                     Unretained(this), task_runner2)));
+  Wait();
+
+  // Task will be scheduled from sequence #1.
+  task_runner1->PostTask(
+      FROM_HERE,
+      BindOnce(&TimerSequenceTest::StartTimer, Unretained(this),
+               TimeDelta::FromMilliseconds(1),
+               TaskWithSignal(Bind(&TimerSequenceTest::VerifyAffinity,
+                                   Unretained(task_runner2.get())))));
+
+  Wait();
+
+  // Timer must be destroyed on the sequence it was scheduled from (#1).
+  task_runner1->PostTask(
+      FROM_HERE,
+      TaskWithSignal(Bind(&TimerSequenceTest::DeleteTimer, Unretained(this))));
+  Wait();
+}
+
+}  // namespace base
diff --git a/base/tools_sanity_unittest.cc b/base/tools_sanity_unittest.cc
new file mode 100644
index 0000000..98c30df
--- /dev/null
+++ b/base/tools_sanity_unittest.cc
@@ -0,0 +1,423 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// This file contains intentional memory errors, some of which may lead to
+// crashes if the test is ran without special memory testing tools. We use these
+// errors to verify the sanity of the tools.
+
+#include <stddef.h>
+
+#include "base/atomicops.h"
+#include "base/cfi_buildflags.h"
+#include "base/debug/asan_invalid_access.h"
+#include "base/debug/profiler.h"
+#include "base/third_party/dynamic_annotations/dynamic_annotations.h"
+#include "base/threading/thread.h"
+#include "build/build_config.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+
+namespace {
+
+const base::subtle::Atomic32 kMagicValue = 42;
+
+// Helper for memory accesses that can potentially corrupt memory or cause a
+// crash during a native run.
+#if defined(ADDRESS_SANITIZER)
+#if defined(OS_IOS)
+// EXPECT_DEATH is not supported on IOS.
+#define HARMFUL_ACCESS(action,error_regexp) do { action; } while (0)
+#else
+#define HARMFUL_ACCESS(action,error_regexp) EXPECT_DEATH(action,error_regexp)
+#endif  // !OS_IOS
+#else
+#define HARMFUL_ACCESS(action, error_regexp)
+#define HARMFUL_ACCESS_IS_NOOP
+#endif
+
+void DoReadUninitializedValue(char *ptr) {
+  // Comparison with 64 is to prevent clang from optimizing away the
+  // jump -- valgrind only catches jumps and conditional moves, but clang uses
+  // the borrow flag if the condition is just `*ptr == '\0'`.  We no longer
+  // support valgrind, but this constant should be fine to keep as-is.
+  if (*ptr == 64) {
+    VLOG(1) << "Uninit condition is true";
+  } else {
+    VLOG(1) << "Uninit condition is false";
+  }
+}
+
+void ReadUninitializedValue(char *ptr) {
+#if defined(MEMORY_SANITIZER)
+  EXPECT_DEATH(DoReadUninitializedValue(ptr),
+               "use-of-uninitialized-value");
+#else
+  DoReadUninitializedValue(ptr);
+#endif
+}
+
+#ifndef HARMFUL_ACCESS_IS_NOOP
+void ReadValueOutOfArrayBoundsLeft(char *ptr) {
+  char c = ptr[-2];
+  VLOG(1) << "Reading a byte out of bounds: " << c;
+}
+
+void ReadValueOutOfArrayBoundsRight(char *ptr, size_t size) {
+  char c = ptr[size + 1];
+  VLOG(1) << "Reading a byte out of bounds: " << c;
+}
+
+void WriteValueOutOfArrayBoundsLeft(char *ptr) {
+  ptr[-1] = kMagicValue;
+}
+
+void WriteValueOutOfArrayBoundsRight(char *ptr, size_t size) {
+  ptr[size] = kMagicValue;
+}
+#endif  // HARMFUL_ACCESS_IS_NOOP
+
+void MakeSomeErrors(char *ptr, size_t size) {
+  ReadUninitializedValue(ptr);
+
+  HARMFUL_ACCESS(ReadValueOutOfArrayBoundsLeft(ptr),
+                 "2 bytes to the left");
+  HARMFUL_ACCESS(ReadValueOutOfArrayBoundsRight(ptr, size),
+                 "1 bytes to the right");
+  HARMFUL_ACCESS(WriteValueOutOfArrayBoundsLeft(ptr),
+                 "1 bytes to the left");
+  HARMFUL_ACCESS(WriteValueOutOfArrayBoundsRight(ptr, size),
+                 "0 bytes to the right");
+}
+
+}  // namespace
+
+// A memory leak detector should report an error in this test.
+TEST(ToolsSanityTest, MemoryLeak) {
+  // Without the |volatile|, clang optimizes away the next two lines.
+  int* volatile leak = new int[256];  // Leak some memory intentionally.
+  leak[4] = 1;  // Make sure the allocated memory is used.
+}
+
+#if (defined(ADDRESS_SANITIZER) && defined(OS_IOS))
+// Because iOS doesn't support death tests, each of the following tests will
+// crash the whole program under Asan.
+#define MAYBE_AccessesToNewMemory DISABLED_AccessesToNewMemory
+#define MAYBE_AccessesToMallocMemory DISABLED_AccessesToMallocMemory
+#else
+#define MAYBE_AccessesToNewMemory AccessesToNewMemory
+#define MAYBE_AccessesToMallocMemory AccessesToMallocMemory
+#endif  // (defined(ADDRESS_SANITIZER) && defined(OS_IOS))
+
+// The following tests pass with Clang r170392, but not r172454, which
+// makes AddressSanitizer detect errors in them. We disable these tests under
+// AddressSanitizer until we fully switch to Clang r172454. After that the
+// tests should be put back under the (defined(OS_IOS) || defined(OS_WIN))
+// clause above.
+// See also http://crbug.com/172614.
+#if defined(ADDRESS_SANITIZER)
+#define MAYBE_SingleElementDeletedWithBraces \
+    DISABLED_SingleElementDeletedWithBraces
+#define MAYBE_ArrayDeletedWithoutBraces DISABLED_ArrayDeletedWithoutBraces
+#else
+#define MAYBE_ArrayDeletedWithoutBraces ArrayDeletedWithoutBraces
+#define MAYBE_SingleElementDeletedWithBraces SingleElementDeletedWithBraces
+#endif  // defined(ADDRESS_SANITIZER)
+
+TEST(ToolsSanityTest, MAYBE_AccessesToNewMemory) {
+  char *foo = new char[10];
+  MakeSomeErrors(foo, 10);
+  delete [] foo;
+  // Use after delete.
+  HARMFUL_ACCESS(foo[5] = 0, "heap-use-after-free");
+}
+
+TEST(ToolsSanityTest, MAYBE_AccessesToMallocMemory) {
+  char *foo = reinterpret_cast<char*>(malloc(10));
+  MakeSomeErrors(foo, 10);
+  free(foo);
+  // Use after free.
+  HARMFUL_ACCESS(foo[5] = 0, "heap-use-after-free");
+}
+
+#if defined(ADDRESS_SANITIZER)
+
+static int* allocateArray() {
+  // Clang warns about the mismatched new[]/delete if they occur in the same
+  // function.
+  return new int[10];
+}
+
+// This test may corrupt memory if not compiled with AddressSanitizer.
+TEST(ToolsSanityTest, MAYBE_ArrayDeletedWithoutBraces) {
+  // Without the |volatile|, clang optimizes away the next two lines.
+  int* volatile foo = allocateArray();
+  delete foo;
+}
+#endif
+
+#if defined(ADDRESS_SANITIZER)
+static int* allocateScalar() {
+  // Clang warns about the mismatched new/delete[] if they occur in the same
+  // function.
+  return new int;
+}
+
+// This test may corrupt memory if not compiled with AddressSanitizer.
+TEST(ToolsSanityTest, MAYBE_SingleElementDeletedWithBraces) {
+  // Without the |volatile|, clang optimizes away the next two lines.
+  int* volatile foo = allocateScalar();
+  (void) foo;
+  delete [] foo;
+}
+#endif
+
+#if defined(ADDRESS_SANITIZER)
+
+TEST(ToolsSanityTest, DISABLED_AddressSanitizerNullDerefCrashTest) {
+  // Intentionally crash to make sure AddressSanitizer is running.
+  // This test should not be ran on bots.
+  int* volatile zero = NULL;
+  *zero = 0;
+}
+
+TEST(ToolsSanityTest, DISABLED_AddressSanitizerLocalOOBCrashTest) {
+  // Intentionally crash to make sure AddressSanitizer is instrumenting
+  // the local variables.
+  // This test should not be ran on bots.
+  int array[5];
+  // Work around the OOB warning reported by Clang.
+  int* volatile access = &array[5];
+  *access = 43;
+}
+
+namespace {
+int g_asan_test_global_array[10];
+}  // namespace
+
+TEST(ToolsSanityTest, DISABLED_AddressSanitizerGlobalOOBCrashTest) {
+  // Intentionally crash to make sure AddressSanitizer is instrumenting
+  // the global variables.
+  // This test should not be ran on bots.
+
+  // Work around the OOB warning reported by Clang.
+  int* volatile access = g_asan_test_global_array - 1;
+  *access = 43;
+}
+
+#ifndef HARMFUL_ACCESS_IS_NOOP
+TEST(ToolsSanityTest, AsanHeapOverflow) {
+  HARMFUL_ACCESS(debug::AsanHeapOverflow() ,"to the right");
+}
+
+TEST(ToolsSanityTest, AsanHeapUnderflow) {
+  HARMFUL_ACCESS(debug::AsanHeapUnderflow(), "to the left");
+}
+
+TEST(ToolsSanityTest, AsanHeapUseAfterFree) {
+  HARMFUL_ACCESS(debug::AsanHeapUseAfterFree(), "heap-use-after-free");
+}
+
+#if defined(OS_WIN)
+// The ASAN runtime doesn't detect heap corruption, this needs fixing before
+// ASAN builds can ship to the wild. See https://crbug.com/818747.
+TEST(ToolsSanityTest, DISABLED_AsanCorruptHeapBlock) {
+  HARMFUL_ACCESS(debug::AsanCorruptHeapBlock(), "");
+}
+
+TEST(ToolsSanityTest, DISABLED_AsanCorruptHeap) {
+  // This test will kill the process by raising an exception, there's no
+  // particular string to look for in the stack trace.
+  EXPECT_DEATH(debug::AsanCorruptHeap(), "");
+}
+#endif  // OS_WIN
+#endif  // !HARMFUL_ACCESS_IS_NOOP
+
+#endif  // ADDRESS_SANITIZER
+
+namespace {
+
+// We use caps here just to ensure that the method name doesn't interfere with
+// the wildcarded suppressions.
+class TOOLS_SANITY_TEST_CONCURRENT_THREAD : public PlatformThread::Delegate {
+ public:
+  explicit TOOLS_SANITY_TEST_CONCURRENT_THREAD(bool *value) : value_(value) {}
+  ~TOOLS_SANITY_TEST_CONCURRENT_THREAD() override = default;
+  void ThreadMain() override {
+    *value_ = true;
+
+    // Sleep for a few milliseconds so the two threads are more likely to live
+    // simultaneously. Otherwise we may miss the report due to mutex
+    // lock/unlock's inside thread creation code in pure-happens-before mode...
+    PlatformThread::Sleep(TimeDelta::FromMilliseconds(100));
+  }
+ private:
+  bool *value_;
+};
+
+class ReleaseStoreThread : public PlatformThread::Delegate {
+ public:
+  explicit ReleaseStoreThread(base::subtle::Atomic32 *value) : value_(value) {}
+  ~ReleaseStoreThread() override = default;
+  void ThreadMain() override {
+    base::subtle::Release_Store(value_, kMagicValue);
+
+    // Sleep for a few milliseconds so the two threads are more likely to live
+    // simultaneously. Otherwise we may miss the report due to mutex
+    // lock/unlock's inside thread creation code in pure-happens-before mode...
+    PlatformThread::Sleep(TimeDelta::FromMilliseconds(100));
+  }
+ private:
+  base::subtle::Atomic32 *value_;
+};
+
+class AcquireLoadThread : public PlatformThread::Delegate {
+ public:
+  explicit AcquireLoadThread(base::subtle::Atomic32 *value) : value_(value) {}
+  ~AcquireLoadThread() override = default;
+  void ThreadMain() override {
+    // Wait for the other thread to make Release_Store
+    PlatformThread::Sleep(TimeDelta::FromMilliseconds(100));
+    base::subtle::Acquire_Load(value_);
+  }
+ private:
+  base::subtle::Atomic32 *value_;
+};
+
+void RunInParallel(PlatformThread::Delegate *d1, PlatformThread::Delegate *d2) {
+  PlatformThreadHandle a;
+  PlatformThreadHandle b;
+  PlatformThread::Create(0, d1, &a);
+  PlatformThread::Create(0, d2, &b);
+  PlatformThread::Join(a);
+  PlatformThread::Join(b);
+}
+
+#if defined(THREAD_SANITIZER)
+void DataRace() {
+  bool *shared = new bool(false);
+  TOOLS_SANITY_TEST_CONCURRENT_THREAD thread1(shared), thread2(shared);
+  RunInParallel(&thread1, &thread2);
+  EXPECT_TRUE(*shared);
+  delete shared;
+  // We're in a death test - crash.
+  CHECK(0);
+}
+#endif
+
+}  // namespace
+
+#if defined(THREAD_SANITIZER)
+// A data race detector should report an error in this test.
+TEST(ToolsSanityTest, DataRace) {
+  // The suppression regexp must match that in base/debug/tsan_suppressions.cc.
+  EXPECT_DEATH(DataRace(), "1 race:base/tools_sanity_unittest.cc");
+}
+#endif
+
+TEST(ToolsSanityTest, AnnotateBenignRace) {
+  bool shared = false;
+  ANNOTATE_BENIGN_RACE(&shared, "Intentional race - make sure doesn't show up");
+  TOOLS_SANITY_TEST_CONCURRENT_THREAD thread1(&shared), thread2(&shared);
+  RunInParallel(&thread1, &thread2);
+  EXPECT_TRUE(shared);
+}
+
+TEST(ToolsSanityTest, AtomicsAreIgnored) {
+  base::subtle::Atomic32 shared = 0;
+  ReleaseStoreThread thread1(&shared);
+  AcquireLoadThread thread2(&shared);
+  RunInParallel(&thread1, &thread2);
+  EXPECT_EQ(kMagicValue, shared);
+}
+
+#if BUILDFLAG(CFI_ENFORCEMENT_TRAP)
+#if defined(OS_WIN)
+#define CFI_ERROR_MSG "EXCEPTION_ILLEGAL_INSTRUCTION"
+#elif defined(OS_ANDROID)
+// TODO(pcc): Produce proper stack dumps on Android and test for the correct
+// si_code here.
+#define CFI_ERROR_MSG "^$"
+#else
+#define CFI_ERROR_MSG "ILL_ILLOPN"
+#endif
+#elif BUILDFLAG(CFI_ENFORCEMENT_DIAGNOSTIC)
+#define CFI_ERROR_MSG "runtime error: control flow integrity check"
+#endif  // BUILDFLAG(CFI_ENFORCEMENT_TRAP || CFI_ENFORCEMENT_DIAGNOSTIC)
+
+#if defined(CFI_ERROR_MSG)
+class A {
+ public:
+  A(): n_(0) {}
+  virtual void f() { n_++; }
+ protected:
+  int n_;
+};
+
+class B: public A {
+ public:
+  void f() override { n_--; }
+};
+
+class C: public B {
+ public:
+  void f() override { n_ += 2; }
+};
+
+NOINLINE void KillVptrAndCall(A *obj) {
+  *reinterpret_cast<void **>(obj) = 0;
+  obj->f();
+}
+
+TEST(ToolsSanityTest, BadVirtualCallNull) {
+  A a;
+  B b;
+  EXPECT_DEATH({ KillVptrAndCall(&a); KillVptrAndCall(&b); }, CFI_ERROR_MSG);
+}
+
+NOINLINE void OverwriteVptrAndCall(B *obj, A *vptr) {
+  *reinterpret_cast<void **>(obj) = *reinterpret_cast<void **>(vptr);
+  obj->f();
+}
+
+TEST(ToolsSanityTest, BadVirtualCallWrongType) {
+  A a;
+  B b;
+  C c;
+  EXPECT_DEATH({ OverwriteVptrAndCall(&b, &a); OverwriteVptrAndCall(&b, &c); },
+               CFI_ERROR_MSG);
+}
+
+// TODO(pcc): remove CFI_CAST_CHECK, see https://crbug.com/626794.
+#if BUILDFLAG(CFI_CAST_CHECK)
+TEST(ToolsSanityTest, BadDerivedCast) {
+  A a;
+  EXPECT_DEATH((void)(B*)&a, CFI_ERROR_MSG);
+}
+
+TEST(ToolsSanityTest, BadUnrelatedCast) {
+  class A {
+    virtual void f() {}
+  };
+
+  class B {
+    virtual void f() {}
+  };
+
+  A a;
+  EXPECT_DEATH((void)(B*)&a, CFI_ERROR_MSG);
+}
+#endif  // BUILDFLAG(CFI_CAST_CHECK)
+
+#endif  // CFI_ERROR_MSG
+
+#undef CFI_ERROR_MSG
+#undef MAYBE_AccessesToNewMemory
+#undef MAYBE_AccessesToMallocMemory
+#undef MAYBE_ArrayDeletedWithoutBraces
+#undef MAYBE_SingleElementDeletedWithBraces
+#undef HARMFUL_ACCESS
+#undef HARMFUL_ACCESS_IS_NOOP
+
+}  // namespace base
diff --git a/base/trace_event/OWNERS b/base/trace_event/OWNERS
new file mode 100644
index 0000000..24a0bd2
--- /dev/null
+++ b/base/trace_event/OWNERS
@@ -0,0 +1,15 @@
+chiniforooshan@chromium.org
+oysteine@chromium.org
+primiano@chromium.org
+per-file trace_event_android.cc=wangxianzhu@chromium.org
+
+# For memory-infra related changes
+ssid@chromium.org
+
+# Emeritus:
+dsinclair@chromium.org
+nduca@chromium.org
+simonhatch@chromium.org
+
+# TEAM: tracing@chromium.org
+# COMPONENT: Speed>Tracing
diff --git a/base/trace_event/auto_open_close_event.cc b/base/trace_event/auto_open_close_event.cc
new file mode 100644
index 0000000..1879700
--- /dev/null
+++ b/base/trace_event/auto_open_close_event.cc
@@ -0,0 +1,52 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/trace_event/auto_open_close_event.h"
+
+#include "base/macros.h"
+#include "base/time/time.h"
+#include "base/trace_event/trace_event.h"
+
+namespace base {
+namespace trace_event {
+
+AutoOpenCloseEvent::AutoOpenCloseEvent(AutoOpenCloseEvent::Type type,
+  const char* category, const char* event_name):
+  category_(category),
+  event_name_(event_name),
+  weak_factory_(this) {
+  base::trace_event::TraceLog::GetInstance()->AddAsyncEnabledStateObserver(
+      weak_factory_.GetWeakPtr());
+}
+
+AutoOpenCloseEvent::~AutoOpenCloseEvent() {
+  DCHECK(thread_checker_.CalledOnValidThread());
+  base::trace_event::TraceLog::GetInstance()->RemoveAsyncEnabledStateObserver(
+      this);
+}
+
+void AutoOpenCloseEvent::Begin() {
+  DCHECK(thread_checker_.CalledOnValidThread());
+  start_time_ = TRACE_TIME_TICKS_NOW();
+  TRACE_EVENT_ASYNC_BEGIN_WITH_TIMESTAMP0(
+      category_, event_name_, static_cast<void*>(this), start_time_);
+}
+
+void AutoOpenCloseEvent::End() {
+  DCHECK(thread_checker_.CalledOnValidThread());
+  TRACE_EVENT_ASYNC_END0(category_, event_name_, static_cast<void*>(this));
+  start_time_ = base::TimeTicks();
+}
+
+void AutoOpenCloseEvent::OnTraceLogEnabled() {
+  DCHECK(thread_checker_.CalledOnValidThread());
+  if (start_time_.ToInternalValue() != 0)
+    TRACE_EVENT_ASYNC_BEGIN_WITH_TIMESTAMP0(
+        category_, event_name_, static_cast<void*>(this), start_time_);
+}
+
+void AutoOpenCloseEvent::OnTraceLogDisabled() {}
+
+}  // namespace trace_event
+}  // namespace base
diff --git a/base/trace_event/auto_open_close_event.h b/base/trace_event/auto_open_close_event.h
new file mode 100644
index 0000000..795a494
--- /dev/null
+++ b/base/trace_event/auto_open_close_event.h
@@ -0,0 +1,57 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_AUTO_OPEN_CLOSE_EVENT_H_
+#define BASE_AUTO_OPEN_CLOSE_EVENT_H_
+
+#include "base/macros.h"
+#include "base/memory/weak_ptr.h"
+#include "base/threading/thread_checker.h"
+#include "base/time/time.h"
+#include "base/trace_event/trace_event.h"
+
+namespace base {
+namespace trace_event {
+
+// Class for tracing events that support "auto-opening" and "auto-closing".
+// "auto-opening" = if the trace event is started (call Begin() before
+// tracing is started,the trace event will be opened, with the start time
+// being the time that the trace event was actually started.
+// "auto-closing" = if the trace event is started but not ended by the time
+// tracing ends, then the trace event will be automatically closed at the
+// end of tracing.
+class BASE_EXPORT AutoOpenCloseEvent
+    : public TraceLog::AsyncEnabledStateObserver {
+ public:
+  enum Type {
+    ASYNC
+  };
+
+  // As in the rest of the tracing macros, the const char* arguments here
+  // must be pointers to indefinitely lived strings (e.g. hard-coded string
+  // literals are okay, but not strings created by c_str())
+  AutoOpenCloseEvent(Type type, const char* category, const char* event_name);
+  ~AutoOpenCloseEvent() override;
+
+  void Begin();
+  void End();
+
+  // AsyncEnabledStateObserver implementation
+  void OnTraceLogEnabled() override;
+  void OnTraceLogDisabled() override;
+
+ private:
+  const char* const category_;
+  const char* const event_name_;
+  base::TimeTicks start_time_;
+  base::ThreadChecker thread_checker_;
+  WeakPtrFactory<AutoOpenCloseEvent> weak_factory_;
+
+  DISALLOW_COPY_AND_ASSIGN(AutoOpenCloseEvent);
+};
+
+}  // namespace trace_event
+}  // namespace base
+
+#endif  // BASE_AUTO_OPEN_CLOSE_EVENT_H_
\ No newline at end of file
diff --git a/base/trace_event/blame_context.cc b/base/trace_event/blame_context.cc
new file mode 100644
index 0000000..ae0b718
--- /dev/null
+++ b/base/trace_event/blame_context.cc
@@ -0,0 +1,111 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/trace_event/blame_context.h"
+
+#include "base/strings/stringprintf.h"
+#include "base/trace_event/trace_event.h"
+#include "base/trace_event/trace_event_argument.h"
+
+namespace base {
+namespace trace_event {
+
+BlameContext::BlameContext(const char* category,
+                           const char* name,
+                           const char* type,
+                           const char* scope,
+                           int64_t id,
+                           const BlameContext* parent_context)
+    : category_(category),
+      name_(name),
+      type_(type),
+      scope_(scope),
+      id_(id),
+      parent_scope_(parent_context ? parent_context->scope() : nullptr),
+      parent_id_(parent_context ? parent_context->id() : 0),
+      category_group_enabled_(nullptr),
+      weak_factory_(this) {
+  DCHECK(!parent_context || !std::strcmp(name_, parent_context->name()))
+      << "Parent blame context must have the same name";
+}
+
+BlameContext::~BlameContext() {
+  DCHECK(thread_checker_.CalledOnValidThread());
+  DCHECK(WasInitialized());
+  TRACE_EVENT_API_ADD_TRACE_EVENT(
+      TRACE_EVENT_PHASE_DELETE_OBJECT, category_group_enabled_, type_, scope_,
+      id_, 0, nullptr, nullptr, nullptr, nullptr, TRACE_EVENT_FLAG_HAS_ID);
+  trace_event::TraceLog::GetInstance()->RemoveAsyncEnabledStateObserver(this);
+}
+
+void BlameContext::Enter() {
+  DCHECK(WasInitialized());
+  TRACE_EVENT_API_ADD_TRACE_EVENT(TRACE_EVENT_PHASE_ENTER_CONTEXT,
+                                  category_group_enabled_, name_, scope_, id_,
+                                  0 /* num_args */, nullptr, nullptr, nullptr,
+                                  nullptr, TRACE_EVENT_FLAG_HAS_ID);
+}
+
+void BlameContext::Leave() {
+  DCHECK(WasInitialized());
+  TRACE_EVENT_API_ADD_TRACE_EVENT(TRACE_EVENT_PHASE_LEAVE_CONTEXT,
+                                  category_group_enabled_, name_, scope_, id_,
+                                  0 /* num_args */, nullptr, nullptr, nullptr,
+                                  nullptr, TRACE_EVENT_FLAG_HAS_ID);
+}
+
+void BlameContext::TakeSnapshot() {
+  DCHECK(thread_checker_.CalledOnValidThread());
+  DCHECK(WasInitialized());
+  if (!*category_group_enabled_)
+    return;
+  std::unique_ptr<trace_event::TracedValue> snapshot(
+      new trace_event::TracedValue);
+  AsValueInto(snapshot.get());
+  static const char* const kArgName = "snapshot";
+  const int kNumArgs = 1;
+  unsigned char arg_types[1] = {TRACE_VALUE_TYPE_CONVERTABLE};
+  std::unique_ptr<trace_event::ConvertableToTraceFormat> arg_values[1] = {
+      std::move(snapshot)};
+  TRACE_EVENT_API_ADD_TRACE_EVENT(TRACE_EVENT_PHASE_SNAPSHOT_OBJECT,
+                                  category_group_enabled_, type_, scope_, id_,
+                                  kNumArgs, &kArgName, arg_types, nullptr,
+                                  arg_values, TRACE_EVENT_FLAG_HAS_ID);
+}
+
+void BlameContext::OnTraceLogEnabled() {
+  DCHECK(WasInitialized());
+  TakeSnapshot();
+}
+
+void BlameContext::OnTraceLogDisabled() {}
+
+void BlameContext::AsValueInto(trace_event::TracedValue* state) {
+  DCHECK(WasInitialized());
+  if (!parent_id_)
+    return;
+  state->BeginDictionary("parent");
+  state->SetString("id_ref", StringPrintf("0x%" PRIx64, parent_id_));
+  state->SetString("scope", parent_scope_);
+  state->EndDictionary();
+}
+
+void BlameContext::Initialize() {
+  DCHECK(thread_checker_.CalledOnValidThread());
+  category_group_enabled_ =
+      TRACE_EVENT_API_GET_CATEGORY_GROUP_ENABLED(category_);
+  TRACE_EVENT_API_ADD_TRACE_EVENT(
+      TRACE_EVENT_PHASE_CREATE_OBJECT, category_group_enabled_, type_, scope_,
+      id_, 0, nullptr, nullptr, nullptr, nullptr, TRACE_EVENT_FLAG_HAS_ID);
+  trace_event::TraceLog::GetInstance()->AddAsyncEnabledStateObserver(
+      weak_factory_.GetWeakPtr());
+  TakeSnapshot();
+}
+
+bool BlameContext::WasInitialized() const {
+  return category_group_enabled_ != nullptr;
+}
+
+}  // namespace trace_event
+}  // namespace base
diff --git a/base/trace_event/blame_context.h b/base/trace_event/blame_context.h
new file mode 100644
index 0000000..a973a28
--- /dev/null
+++ b/base/trace_event/blame_context.h
@@ -0,0 +1,138 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TRACE_EVENT_BLAME_CONTEXT_H_
+#define BASE_TRACE_EVENT_BLAME_CONTEXT_H_
+
+#include <inttypes.h>
+
+#include "base/base_export.h"
+#include "base/macros.h"
+#include "base/threading/thread_checker.h"
+#include "base/trace_event/trace_log.h"
+
+namespace base {
+namespace trace_event {
+class TracedValue;
+}
+
+namespace trace_event {
+
+// A blame context represents a logical unit to which we want to attribute
+// different costs (e.g., CPU, network, or memory usage). An example of a blame
+// context is an <iframe> element on a web page. Different subsystems can
+// "enter" and "leave" blame contexts to indicate that they are doing work which
+// should be accounted against this blame context.
+//
+// A blame context can optionally have a parent context, forming a blame context
+// tree. When work is attributed to a particular blame context, it is considered
+// to count against all of that context's children too. This is useful when work
+// cannot be exactly attributed into a more specific context. For example,
+// Javascript garbage collection generally needs to inspect all objects on a
+// page instead looking at each <iframe> individually. In this case the work
+// should be attributed to a blame context which is the parent of all <iframe>
+// blame contexts.
+class BASE_EXPORT BlameContext
+    : public trace_event::TraceLog::AsyncEnabledStateObserver {
+ public:
+  // Construct a blame context belonging to the blame context tree |name|, using
+  // the tracing category |category|, identified by |id| from the |scope|
+  // namespace. |type| identifies the type of this object snapshot in the blame
+  // context tree. |parent_context| is the parent of this blame context or
+  // null. Note that all strings must have application lifetime.
+  //
+  // For example, a blame context which represents a specific <iframe> in a
+  // browser frame tree could be specified with:
+  //
+  //   category="blink",
+  //   name="FrameTree",
+  //   type="IFrame",
+  //   scope="IFrameIdentifier",
+  //   id=1234.
+  //
+  // Each <iframe> blame context could have another <iframe> context as a
+  // parent, or a top-level context which represents the entire browser:
+  //
+  //   category="blink",
+  //   name="FrameTree",
+  //   type="Browser",
+  //   scope="BrowserIdentifier",
+  //   id=1.
+  //
+  // Note that the |name| property is identical, signifying that both context
+  // types are part of the same tree.
+  //
+  BlameContext(const char* category,
+               const char* name,
+               const char* type,
+               const char* scope,
+               int64_t id,
+               const BlameContext* parent_context);
+  ~BlameContext() override;
+
+  // Initialize the blame context, automatically taking a snapshot if tracing is
+  // enabled. Must be called before any other methods on this class.
+  void Initialize();
+
+  // Indicate that the current thread is now doing work which should count
+  // against this blame context.  This function is allowed to be called in a
+  // thread different from where the blame context was created; However, any
+  // client doing that must be fully responsible for ensuring thready safety.
+  void Enter();
+
+  // Leave and stop doing work for a previously entered blame context. If
+  // another blame context belonging to the same tree was entered prior to this
+  // one, it becomes the active blame context for this thread again.  Similar
+  // to Enter(), this function can be called in a thread different from where
+  // the blame context was created, and the same requirement on thread safety
+  // must be satisfied.
+  void Leave();
+
+  // Record a snapshot of the blame context. This is normally only needed if a
+  // blame context subclass defines custom properties (see AsValueInto) and one
+  // or more of those properties have changed.
+  void TakeSnapshot();
+
+  const char* category() const { return category_; }
+  const char* name() const { return name_; }
+  const char* type() const { return type_; }
+  const char* scope() const { return scope_; }
+  int64_t id() const { return id_; }
+
+  // trace_event::TraceLog::EnabledStateObserver implementation:
+  void OnTraceLogEnabled() override;
+  void OnTraceLogDisabled() override;
+
+ protected:
+  // Serialize the properties of this blame context into |state|. Subclasses can
+  // override this method to record additional properties (e.g, the URL for an
+  // <iframe> blame context). Note that an overridden implementation must still
+  // call this base method.
+  virtual void AsValueInto(trace_event::TracedValue* state);
+
+ private:
+  bool WasInitialized() const;
+
+  // The following string pointers have application lifetime.
+  const char* category_;
+  const char* name_;
+  const char* type_;
+  const char* scope_;
+  const int64_t id_;
+
+  const char* parent_scope_;
+  const int64_t parent_id_;
+
+  const unsigned char* category_group_enabled_;
+
+  ThreadChecker thread_checker_;
+  WeakPtrFactory<BlameContext> weak_factory_;
+
+  DISALLOW_COPY_AND_ASSIGN(BlameContext);
+};
+
+}  // namespace trace_event
+}  // namespace base
+
+#endif  // BASE_TRACE_EVENT_BLAME_CONTEXT_H_
diff --git a/base/trace_event/blame_context_unittest.cc b/base/trace_event/blame_context_unittest.cc
new file mode 100644
index 0000000..12e7857
--- /dev/null
+++ b/base/trace_event/blame_context_unittest.cc
@@ -0,0 +1,175 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/trace_event/blame_context.h"
+
+#include "base/json/json_writer.h"
+#include "base/message_loop/message_loop.h"
+#include "base/test/trace_event_analyzer.h"
+#include "base/trace_event/trace_event_argument.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+namespace trace_event {
+namespace {
+
+const char kTestBlameContextCategory[] = "test";
+const char kDisabledTestBlameContextCategory[] = "disabled-by-default-test";
+const char kTestBlameContextName[] = "TestBlameContext";
+const char kTestBlameContextType[] = "TestBlameContextType";
+const char kTestBlameContextScope[] = "TestBlameContextScope";
+
+class TestBlameContext : public BlameContext {
+ public:
+  explicit TestBlameContext(int id)
+      : BlameContext(kTestBlameContextCategory,
+                     kTestBlameContextName,
+                     kTestBlameContextType,
+                     kTestBlameContextScope,
+                     id,
+                     nullptr) {}
+
+  TestBlameContext(int id, const TestBlameContext& parent)
+      : BlameContext(kTestBlameContextCategory,
+                     kTestBlameContextName,
+                     kTestBlameContextType,
+                     kTestBlameContextScope,
+                     id,
+                     &parent) {}
+
+ protected:
+  void AsValueInto(trace_event::TracedValue* state) override {
+    BlameContext::AsValueInto(state);
+    state->SetBoolean("crossStreams", false);
+  }
+};
+
+class DisabledTestBlameContext : public BlameContext {
+ public:
+  explicit DisabledTestBlameContext(int id)
+      : BlameContext(kDisabledTestBlameContextCategory,
+                     kTestBlameContextName,
+                     kTestBlameContextType,
+                     kTestBlameContextScope,
+                     id,
+                     nullptr) {}
+};
+
+class BlameContextTest : public testing::Test {
+ protected:
+  MessageLoop loop_;
+};
+
+TEST_F(BlameContextTest, EnterAndLeave) {
+  using trace_analyzer::Query;
+  trace_analyzer::Start("*");
+  {
+    TestBlameContext blame_context(0x1234);
+    blame_context.Initialize();
+    blame_context.Enter();
+    blame_context.Leave();
+  }
+  auto analyzer = trace_analyzer::Stop();
+
+  trace_analyzer::TraceEventVector events;
+  Query q = Query::EventPhaseIs(TRACE_EVENT_PHASE_ENTER_CONTEXT) ||
+            Query::EventPhaseIs(TRACE_EVENT_PHASE_LEAVE_CONTEXT);
+  analyzer->FindEvents(q, &events);
+
+  EXPECT_EQ(2u, events.size());
+  EXPECT_EQ(TRACE_EVENT_PHASE_ENTER_CONTEXT, events[0]->phase);
+  EXPECT_EQ(kTestBlameContextCategory, events[0]->category);
+  EXPECT_EQ(kTestBlameContextName, events[0]->name);
+  EXPECT_EQ("0x1234", events[0]->id);
+  EXPECT_EQ(TRACE_EVENT_PHASE_LEAVE_CONTEXT, events[1]->phase);
+  EXPECT_EQ(kTestBlameContextCategory, events[1]->category);
+  EXPECT_EQ(kTestBlameContextName, events[1]->name);
+  EXPECT_EQ("0x1234", events[1]->id);
+}
+
+TEST_F(BlameContextTest, DifferentCategories) {
+  // Ensure there is no cross talk between blame contexts from different
+  // categories.
+  using trace_analyzer::Query;
+  trace_analyzer::Start("*");
+  {
+    TestBlameContext blame_context(0x1234);
+    DisabledTestBlameContext disabled_blame_context(0x5678);
+    blame_context.Initialize();
+    blame_context.Enter();
+    blame_context.Leave();
+    disabled_blame_context.Initialize();
+    disabled_blame_context.Enter();
+    disabled_blame_context.Leave();
+  }
+  auto analyzer = trace_analyzer::Stop();
+
+  trace_analyzer::TraceEventVector events;
+  Query q = Query::EventPhaseIs(TRACE_EVENT_PHASE_ENTER_CONTEXT) ||
+            Query::EventPhaseIs(TRACE_EVENT_PHASE_LEAVE_CONTEXT);
+  analyzer->FindEvents(q, &events);
+
+  // None of the events from the disabled-by-default category should show up.
+  EXPECT_EQ(2u, events.size());
+  EXPECT_EQ(TRACE_EVENT_PHASE_ENTER_CONTEXT, events[0]->phase);
+  EXPECT_EQ(kTestBlameContextCategory, events[0]->category);
+  EXPECT_EQ(kTestBlameContextName, events[0]->name);
+  EXPECT_EQ("0x1234", events[0]->id);
+  EXPECT_EQ(TRACE_EVENT_PHASE_LEAVE_CONTEXT, events[1]->phase);
+  EXPECT_EQ(kTestBlameContextCategory, events[1]->category);
+  EXPECT_EQ(kTestBlameContextName, events[1]->name);
+  EXPECT_EQ("0x1234", events[1]->id);
+}
+
+TEST_F(BlameContextTest, TakeSnapshot) {
+  using trace_analyzer::Query;
+  trace_analyzer::Start("*");
+  {
+    TestBlameContext parent_blame_context(0x5678);
+    TestBlameContext blame_context(0x1234, parent_blame_context);
+    parent_blame_context.Initialize();
+    blame_context.Initialize();
+    blame_context.TakeSnapshot();
+  }
+  auto analyzer = trace_analyzer::Stop();
+
+  trace_analyzer::TraceEventVector events;
+  Query q = Query::EventPhaseIs(TRACE_EVENT_PHASE_SNAPSHOT_OBJECT);
+  analyzer->FindEvents(q, &events);
+
+  // We should have 3 snapshots: one for both calls to Initialize() and one from
+  // the explicit call to TakeSnapshot().
+  EXPECT_EQ(3u, events.size());
+  EXPECT_EQ(kTestBlameContextCategory, events[0]->category);
+  EXPECT_EQ(kTestBlameContextType, events[0]->name);
+  EXPECT_EQ("0x5678", events[0]->id);
+  EXPECT_TRUE(events[0]->HasArg("snapshot"));
+
+  EXPECT_EQ(kTestBlameContextCategory, events[1]->category);
+  EXPECT_EQ(kTestBlameContextType, events[1]->name);
+  EXPECT_EQ("0x1234", events[1]->id);
+  EXPECT_TRUE(events[0]->HasArg("snapshot"));
+
+  EXPECT_EQ(kTestBlameContextCategory, events[2]->category);
+  EXPECT_EQ(kTestBlameContextType, events[2]->name);
+  EXPECT_EQ("0x1234", events[2]->id);
+  EXPECT_TRUE(events[0]->HasArg("snapshot"));
+
+  const char kExpectedSnapshotJson[] =
+      "{"
+          "\"crossStreams\":false,"
+          "\"parent\":{"
+              "\"id_ref\":\"0x5678\","
+              "\"scope\":\"TestBlameContextScope\""
+          "}"
+      "}";
+
+  std::string snapshot_json;
+  JSONWriter::Write(*events[2]->GetKnownArgAsValue("snapshot"), &snapshot_json);
+  EXPECT_EQ(kExpectedSnapshotJson, snapshot_json);
+}
+
+}  // namepace
+}  // namespace trace_event
+}  // namespace base
diff --git a/base/trace_event/category_registry.cc b/base/trace_event/category_registry.cc
new file mode 100644
index 0000000..e7c1460
--- /dev/null
+++ b/base/trace_event/category_registry.cc
@@ -0,0 +1,156 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/trace_event/category_registry.h"
+
+#include <string.h>
+
+#include <type_traits>
+
+#include "base/atomicops.h"
+#include "base/debug/leak_annotations.h"
+#include "base/logging.h"
+#include "base/third_party/dynamic_annotations/dynamic_annotations.h"
+#include "base/trace_event/trace_category.h"
+
+namespace base {
+namespace trace_event {
+
+namespace {
+
+constexpr size_t kMaxCategories = 200;
+const int kNumBuiltinCategories = 4;
+
+// |g_categories| might end up causing creating dynamic initializers if not POD.
+static_assert(std::is_pod<TraceCategory>::value, "TraceCategory must be POD");
+
+// These entries must be kept consistent with the kCategory* consts below.
+TraceCategory g_categories[kMaxCategories] = {
+    {0, 0, "tracing categories exhausted; must increase kMaxCategories"},
+    {0, 0, "tracing already shutdown"},  // See kCategoryAlreadyShutdown below.
+    {0, 0, "__metadata"},                // See kCategoryMetadata below.
+    {0, 0, "toplevel"},                  // Warmup the toplevel category.
+};
+
+base::subtle::AtomicWord g_category_index = kNumBuiltinCategories;
+
+bool IsValidCategoryPtr(const TraceCategory* category) {
+  // If any of these are hit, something has cached a corrupt category pointer.
+  uintptr_t ptr = reinterpret_cast<uintptr_t>(category);
+  return ptr % sizeof(void*) == 0 &&
+         ptr >= reinterpret_cast<uintptr_t>(&g_categories[0]) &&
+         ptr <= reinterpret_cast<uintptr_t>(&g_categories[kMaxCategories - 1]);
+}
+
+}  // namespace
+
+// static
+TraceCategory* const CategoryRegistry::kCategoryExhausted = &g_categories[0];
+TraceCategory* const CategoryRegistry::kCategoryAlreadyShutdown =
+    &g_categories[1];
+TraceCategory* const CategoryRegistry::kCategoryMetadata = &g_categories[2];
+
+// static
+void CategoryRegistry::Initialize() {
+  // Trace is enabled or disabled on one thread while other threads are
+  // accessing the enabled flag. We don't care whether edge-case events are
+  // traced or not, so we allow races on the enabled flag to keep the trace
+  // macros fast.
+  for (size_t i = 0; i < kMaxCategories; ++i) {
+    ANNOTATE_BENIGN_RACE(g_categories[i].state_ptr(),
+                         "trace_event category enabled");
+    // If this DCHECK is hit in a test it means that ResetForTesting() is not
+    // called and the categories state leaks between test fixtures.
+    DCHECK(!g_categories[i].is_enabled());
+  }
+}
+
+// static
+void CategoryRegistry::ResetForTesting() {
+  // reset_for_testing clears up only the enabled state and filters. The
+  // categories themselves cannot be cleared up because the static pointers
+  // injected by the macros still point to them and cannot be reset.
+  for (size_t i = 0; i < kMaxCategories; ++i)
+    g_categories[i].reset_for_testing();
+}
+
+// static
+TraceCategory* CategoryRegistry::GetCategoryByName(const char* category_name) {
+  DCHECK(!strchr(category_name, '"'))
+      << "Category names may not contain double quote";
+
+  // The g_categories is append only, avoid using a lock for the fast path.
+  size_t category_index = base::subtle::Acquire_Load(&g_category_index);
+
+  // Search for pre-existing category group.
+  for (size_t i = 0; i < category_index; ++i) {
+    if (strcmp(g_categories[i].name(), category_name) == 0) {
+      return &g_categories[i];
+    }
+  }
+  return nullptr;
+}
+
+bool CategoryRegistry::GetOrCreateCategoryLocked(
+    const char* category_name,
+    CategoryInitializerFn category_initializer_fn,
+    TraceCategory** category) {
+  // This is the slow path: the lock is not held in the fastpath
+  // (GetCategoryByName), so more than one thread could have reached here trying
+  // to add the same category.
+  *category = GetCategoryByName(category_name);
+  if (*category)
+    return false;
+
+  // Create a new category.
+  size_t category_index = base::subtle::Acquire_Load(&g_category_index);
+  if (category_index >= kMaxCategories) {
+    NOTREACHED() << "must increase kMaxCategories";
+    *category = kCategoryExhausted;
+    return false;
+  }
+
+  // TODO(primiano): this strdup should be removed. The only documented reason
+  // for it was TraceWatchEvent, which is gone. However, something might have
+  // ended up relying on this. Needs some auditing before removal.
+  const char* category_name_copy = strdup(category_name);
+  ANNOTATE_LEAKING_OBJECT_PTR(category_name_copy);
+
+  *category = &g_categories[category_index];
+  DCHECK(!(*category)->is_valid());
+  DCHECK(!(*category)->is_enabled());
+  (*category)->set_name(category_name_copy);
+  category_initializer_fn(*category);
+
+  // Update the max index now.
+  base::subtle::Release_Store(&g_category_index, category_index + 1);
+  return true;
+}
+
+// static
+const TraceCategory* CategoryRegistry::GetCategoryByStatePtr(
+    const uint8_t* category_state) {
+  const TraceCategory* category = TraceCategory::FromStatePtr(category_state);
+  DCHECK(IsValidCategoryPtr(category));
+  return category;
+}
+
+// static
+bool CategoryRegistry::IsBuiltinCategory(const TraceCategory* category) {
+  DCHECK(IsValidCategoryPtr(category));
+  return category < &g_categories[kNumBuiltinCategories];
+}
+
+// static
+CategoryRegistry::Range CategoryRegistry::GetAllCategories() {
+  // The |g_categories| array is append only. We have to only guarantee to
+  // not return an index to a category which is being initialized by
+  // GetOrCreateCategoryByName().
+  size_t category_index = base::subtle::Acquire_Load(&g_category_index);
+  return CategoryRegistry::Range(&g_categories[0],
+                                 &g_categories[category_index]);
+}
+
+}  // namespace trace_event
+}  // namespace base
diff --git a/base/trace_event/category_registry.h b/base/trace_event/category_registry.h
new file mode 100644
index 0000000..9c08efa
--- /dev/null
+++ b/base/trace_event/category_registry.h
@@ -0,0 +1,93 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TRACE_EVENT_CATEGORY_REGISTRY_H_
+#define BASE_TRACE_EVENT_CATEGORY_REGISTRY_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include "base/base_export.h"
+#include "base/logging.h"
+
+namespace base {
+namespace trace_event {
+
+struct TraceCategory;
+class TraceCategoryTest;
+class TraceLog;
+
+// Allows fast and thread-safe acces to the state of all tracing categories.
+// All the methods in this class can be concurrently called on multiple threads,
+// unless otherwise noted (e.g., GetOrCreateCategoryLocked).
+// The reason why this is a fully static class with global state is to allow to
+// statically define known categories as global linker-initialized structs,
+// without requiring static initializers.
+class BASE_EXPORT CategoryRegistry {
+ public:
+  // Allows for-each iterations over a slice of the categories array.
+  class Range {
+   public:
+    Range(TraceCategory* begin, TraceCategory* end) : begin_(begin), end_(end) {
+      DCHECK_LE(begin, end);
+    }
+    TraceCategory* begin() const { return begin_; }
+    TraceCategory* end() const { return end_; }
+
+   private:
+    TraceCategory* const begin_;
+    TraceCategory* const end_;
+  };
+
+  // Known categories.
+  static TraceCategory* const kCategoryExhausted;
+  static TraceCategory* const kCategoryMetadata;
+  static TraceCategory* const kCategoryAlreadyShutdown;
+
+  // Returns a category entry from the Category.state_ptr() pointer.
+  // TODO(primiano): trace macros should just keep a pointer to the entire
+  // TraceCategory, not just the enabled state pointer. That would remove the
+  // need for this function and make everything cleaner at no extra cost (as
+  // long as the |state_| is the first field of the struct, which can be
+  // guaranteed via static_assert, see TraceCategory ctor).
+  static const TraceCategory* GetCategoryByStatePtr(
+      const uint8_t* category_state);
+
+  // Returns a category from its name or nullptr if not found.
+  // The output |category| argument is an undefinitely lived pointer to the
+  // TraceCategory owned by the registry. TRACE_EVENTx macros will cache this
+  // pointer and use it for checks in their fast-paths.
+  static TraceCategory* GetCategoryByName(const char* category_name);
+
+  static bool IsBuiltinCategory(const TraceCategory*);
+
+ private:
+  friend class TraceCategoryTest;
+  friend class TraceLog;
+  using CategoryInitializerFn = void (*)(TraceCategory*);
+
+  // Only for debugging/testing purposes, is a no-op on release builds.
+  static void Initialize();
+
+  // Resets the state of all categories, to clear up the state between tests.
+  static void ResetForTesting();
+
+  // Used to get/create a category in the slow-path. If the category exists
+  // already, this has the same effect of GetCategoryByName and returns false.
+  // If not, a new category is created and the CategoryInitializerFn is invoked
+  // before retuning true. The caller must guarantee serialization: either call
+  // this method from a single thread or hold a lock when calling this.
+  static bool GetOrCreateCategoryLocked(const char* category_name,
+                                        CategoryInitializerFn,
+                                        TraceCategory**);
+
+  // Allows to iterate over the valid categories in a for-each loop.
+  // This includes builtin categories such as __metadata.
+  static Range GetAllCategories();
+};
+
+}  // namespace trace_event
+}  // namespace base
+
+#endif  // BASE_TRACE_EVENT_CATEGORY_REGISTRY_H_
diff --git a/base/trace_event/cfi_backtrace_android.cc b/base/trace_event/cfi_backtrace_android.cc
new file mode 100644
index 0000000..8fd8b95
--- /dev/null
+++ b/base/trace_event/cfi_backtrace_android.cc
@@ -0,0 +1,314 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/trace_event/cfi_backtrace_android.h"
+
+#include <sys/mman.h>
+#include <sys/types.h>
+
+#include "base/android/apk_assets.h"
+
+#if !defined(ARCH_CPU_ARMEL)
+#error This file should not be built for this architecture.
+#endif
+
+/*
+Basics of unwinding:
+For each instruction in a function we need to know what is the offset of SP
+(Stack Pointer) to reach the previous function's stack frame. To know which
+function is being invoked, we need the return address of the next function. The
+CFI information for an instruction is made up of 2 offsets, CFA (Call Frame
+Address) offset and RA (Return Address) offset. The CFA offset is the change in
+SP made by the function till the current instruction. This depends on amount of
+memory allocated on stack by the function plus some registers that the function
+stores that needs to be restored at the end of function. So, at each instruction
+the CFA offset tells the offset from original SP before the function call. The
+RA offset tells us the offset from the previous SP into the current function
+where the return address is stored.
+
+The unwind table file has 2 tables UNW_INDEX and UNW_DATA, inspired from ARM
+EHABI format. The first table contains function addresses and an index into the
+UNW_DATA table. The second table contains one or more rows for the function
+unwind information.
+
+UNW_INDEX contains two columns of N rows each, where N is the number of
+functions.
+  1. First column 4 byte rows of all the function start address as offset from
+     start of the binary, in sorted order.
+  2. For each function addr, the second column contains 2 byte indices in order.
+     The indices are offsets (in count of 2 bytes) of the CFI data from start of
+     UNW_DATA.
+The last entry in the table always contains CANT_UNWIND index to specify the
+end address of the last function.
+
+UNW_DATA contains data of all the functions. Each function data contains N rows.
+The data found at the address pointed from UNW_INDEX will be:
+  2 bytes: N - number of rows that belong to current function.
+  N * 4 bytes: N rows of data. 16 bits : Address offset from function start.
+                               14 bits : CFA offset / 4.
+                                2 bits : RA offset / 4.
+If the RA offset of a row is 0, then use the offset of the previous rows in the
+same function.
+TODO(ssid): Make sure RA offset is always present.
+
+See extract_unwind_tables.py for details about how this data is extracted from
+breakpad symbol files.
+*/
+
+extern "C" {
+extern char __executable_start;
+extern char _etext;
+}
+
+namespace base {
+namespace trace_event {
+
+namespace {
+
+// The value of index when the function does not have unwind information.
+constexpr uint32_t kCantUnwind = 0xFFFF;
+
+// The mask on the CFI row data that is used to get the high 14 bits and
+// multiply it by 4 to get CFA offset. Since the last 2 bits are masked out, a
+// shift is not necessary.
+constexpr uint16_t kCFAMask = 0xfffc;
+
+// The mask on the CFI row data that is used to get the low 2 bits and multiply
+// it by 4 to get the RA offset.
+constexpr uint16_t kRAMask = 0x3;
+constexpr uint16_t kRAShift = 2;
+
+// The code in this file assumes we are running in 32-bit builds since all the
+// addresses in the unwind table are specified in 32 bits.
+static_assert(sizeof(uintptr_t) == 4,
+              "The unwind table format is only valid for 32 bit builds.");
+
+// The CFI data in UNW_DATA table starts with number of rows (N) and then
+// followed by N rows of 4 bytes long. The CFIUnwindDataRow represents a single
+// row of CFI data of a function in the table. Since we cast the memory at the
+// address after the address of number of rows, into an array of
+// CFIUnwindDataRow, the size of the struct should be 4 bytes and the order of
+// the members is fixed according to the given format. The first 2 bytes tell
+// the address of function and last 2 bytes give the CFI data for the offset.
+struct CFIUnwindDataRow {
+  // The address of the instruction in terms of offset from the start of the
+  // function.
+  uint16_t addr_offset;
+  // Represents the CFA and RA offsets to get information about next stack
+  // frame. This is the CFI data at the point before executing the instruction
+  // at |addr_offset| from the start of the function.
+  uint16_t cfi_data;
+
+  // Return the RA offset for the current unwind row.
+  size_t ra_offset() const { return (cfi_data & kRAMask) << kRAShift; }
+
+  // Returns the CFA offset for the current unwind row.
+  size_t cfa_offset() const { return cfi_data & kCFAMask; }
+};
+
+static_assert(
+    sizeof(CFIUnwindDataRow) == 4,
+    "The CFIUnwindDataRow struct must be exactly 4 bytes for searching.");
+
+}  // namespace
+
+// static
+CFIBacktraceAndroid* CFIBacktraceAndroid::GetInitializedInstance() {
+  static CFIBacktraceAndroid* instance = new CFIBacktraceAndroid();
+  return instance;
+}
+
+CFIBacktraceAndroid::CFIBacktraceAndroid()
+    : thread_local_cfi_cache_(
+          [](void* ptr) { delete static_cast<CFICache*>(ptr); }) {
+  Initialize();
+}
+
+CFIBacktraceAndroid::~CFIBacktraceAndroid() {}
+
+void CFIBacktraceAndroid::Initialize() {
+  // The address |_etext| gives the end of the .text section in the binary. This
+  // value is more accurate than parsing the memory map since the mapped
+  // regions are usualy larger than the .text section.
+  executable_end_addr_ = reinterpret_cast<uintptr_t>(&_etext);
+  // The address of |__executable_start| gives the start address of the
+  // executable. This value is used to find the offset address of the
+  // instruction in binary from PC.
+  executable_start_addr_ = reinterpret_cast<uintptr_t>(&__executable_start);
+
+  // This file name is defined by extract_unwind_tables.gni.
+  static constexpr char kCfiFileName[] = "assets/unwind_cfi_32";
+  MemoryMappedFile::Region cfi_region;
+  int fd = base::android::OpenApkAsset(kCfiFileName, &cfi_region);
+  if (fd < 0)
+    return;
+  cfi_mmap_ = std::make_unique<MemoryMappedFile>();
+  // The CFI region starts at |cfi_region.offset|.
+  if (!cfi_mmap_->Initialize(base::File(fd), cfi_region))
+    return;
+
+  ParseCFITables();
+  can_unwind_stack_frames_ = true;
+}
+
+void CFIBacktraceAndroid::ParseCFITables() {
+  // The first 4 bytes in the file is the size of UNW_INDEX table.
+  static constexpr size_t kUnwIndexRowSize =
+      sizeof(*unw_index_function_col_) + sizeof(*unw_index_indices_col_);
+  size_t unw_index_size = 0;
+  memcpy(&unw_index_size, cfi_mmap_->data(), sizeof(unw_index_size));
+  DCHECK_EQ(0u, unw_index_size % kUnwIndexRowSize);
+  // UNW_INDEX table starts after 4 bytes.
+  unw_index_function_col_ =
+      reinterpret_cast<const uintptr_t*>(cfi_mmap_->data()) + 1;
+  unw_index_row_count_ = unw_index_size / kUnwIndexRowSize;
+  unw_index_indices_col_ = reinterpret_cast<const uint16_t*>(
+      unw_index_function_col_ + unw_index_row_count_);
+
+  // The UNW_DATA table data is right after the end of UNW_INDEX table.
+  // Interpret the UNW_DATA table as an array of 2 byte numbers since the
+  // indexes we have from the UNW_INDEX table are in terms of 2 bytes.
+  unw_data_start_addr_ = unw_index_indices_col_ + unw_index_row_count_;
+}
+
+size_t CFIBacktraceAndroid::Unwind(const void** out_trace, size_t max_depth) {
+  // This function walks the stack using the call frame information to find the
+  // return addresses of all the functions that belong to current binary in call
+  // stack. For each function the CFI table defines the offset of the previous
+  // call frame and offset where the return address is stored.
+  if (!can_unwind_stack_frames())
+    return 0;
+
+  // Get the current register state. This register state can be taken at any
+  // point in the function and the unwind information would be for this point.
+  // Define local variables before trying to get the current PC and SP to make
+  // sure the register state obtained is consistent with each other.
+  uintptr_t pc = 0, sp = 0;
+  asm volatile("mov %0, pc" : "=r"(pc));
+  asm volatile("mov %0, sp" : "=r"(sp));
+
+  // We can only unwind as long as the pc is within the chrome.so.
+  size_t depth = 0;
+  while (pc > executable_start_addr_ && pc <= executable_end_addr_ &&
+         depth < max_depth) {
+    out_trace[depth++] = reinterpret_cast<void*>(pc);
+    // The offset of function from the start of the chrome.so binary:
+    uintptr_t func_addr = pc - executable_start_addr_;
+    CFIRow cfi{};
+    if (!FindCFIRowForPC(func_addr, &cfi))
+      break;
+
+    // The rules for unwinding using the CFI information are:
+    // SP_prev = SP_cur + cfa_offset and
+    // PC_prev = * (SP_prev - ra_offset).
+    sp = sp + cfi.cfa_offset;
+    memcpy(&pc, reinterpret_cast<uintptr_t*>(sp - cfi.ra_offset),
+           sizeof(uintptr_t));
+  }
+  return depth;
+}
+
+bool CFIBacktraceAndroid::FindCFIRowForPC(uintptr_t func_addr,
+                                          CFIBacktraceAndroid::CFIRow* cfi) {
+  auto* cache = GetThreadLocalCFICache();
+  *cfi = {0};
+  if (cache->Find(func_addr, cfi))
+    return true;
+
+  // Consider each column of UNW_INDEX table as arrays of uintptr_t (function
+  // addresses) and uint16_t (indices). Define start and end iterator on the
+  // first column array (addresses) and use std::lower_bound() to binary search
+  // on this array to find the required function address.
+  static const uintptr_t* const unw_index_fn_end =
+      unw_index_function_col_ + unw_index_row_count_;
+  const uintptr_t* found =
+      std::lower_bound(unw_index_function_col_, unw_index_fn_end, func_addr);
+
+  // If found is start, then the given function is not in the table. If the
+  // given pc is start of a function then we cannot unwind.
+  if (found == unw_index_function_col_ || *found == func_addr)
+    return false;
+
+  // std::lower_bound() returns the iter that corresponds to the first address
+  // that is greater than the given address. So, the required iter is always one
+  // less than the value returned by std::lower_bound().
+  --found;
+  uintptr_t func_start_addr = *found;
+  size_t row_num = found - unw_index_function_col_;
+  uint16_t index = unw_index_indices_col_[row_num];
+  DCHECK_LE(func_start_addr, func_addr);
+  // If the index is CANT_UNWIND then we do not have unwind infomation for the
+  // function.
+  if (index == kCantUnwind)
+    return false;
+
+  // The unwind data for the current function is at an offsset of the index
+  // found in UNW_INDEX table.
+  const uint16_t* unwind_data = unw_data_start_addr_ + index;
+  // The value of first 2 bytes is the CFI data row count for the function.
+  uint16_t row_count = 0;
+  memcpy(&row_count, unwind_data, sizeof(row_count));
+  // And the actual CFI rows start after 2 bytes from the |unwind_data|. Cast
+  // the data into an array of CFIUnwindDataRow since the struct is designed to
+  // represent each row. We should be careful to read only |row_count| number of
+  // elements in the array.
+  const CFIUnwindDataRow* function_data =
+      reinterpret_cast<const CFIUnwindDataRow*>(unwind_data + 1);
+
+  // Iterate through the CFI rows of the function to find the row that gives
+  // offset for the given instruction address.
+  CFIUnwindDataRow cfi_row = {0, 0};
+  uint16_t ra_offset = 0;
+  for (uint16_t i = 0; i < row_count; ++i) {
+    CFIUnwindDataRow row;
+    memcpy(&row, function_data + i, sizeof(CFIUnwindDataRow));
+    // The return address of the function is the instruction that is not yet
+    // been executed. The cfi row specifies the unwind info before executing the
+    // given instruction. If the given address is equal to the instruction
+    // offset, then use the current row. Or use the row with highest address
+    // less than the given address.
+    if (row.addr_offset + func_start_addr > func_addr)
+      break;
+
+    cfi_row = row;
+    // The ra offset of the last specified row should be used, if unspecified.
+    // So, keep updating the RA offset till we reach the correct CFI row.
+    // TODO(ssid): This should be fixed in the format and we should always
+    // output ra offset.
+    if (cfi_row.ra_offset())
+      ra_offset = cfi_row.ra_offset();
+  }
+  DCHECK_NE(0u, cfi_row.addr_offset);
+  *cfi = {cfi_row.cfa_offset(), ra_offset};
+  DCHECK(cfi->cfa_offset);
+  DCHECK(cfi->ra_offset);
+
+  // safe to update since the cache is thread local.
+  cache->Add(func_addr, *cfi);
+  return true;
+}
+
+CFIBacktraceAndroid::CFICache* CFIBacktraceAndroid::GetThreadLocalCFICache() {
+  auto* cache = static_cast<CFICache*>(thread_local_cfi_cache_.Get());
+  if (!cache) {
+    cache = new CFICache();
+    thread_local_cfi_cache_.Set(cache);
+  }
+  return cache;
+}
+
+void CFIBacktraceAndroid::CFICache::Add(uintptr_t address, CFIRow cfi) {
+  cache_[address % kLimit] = {address, cfi};
+}
+
+bool CFIBacktraceAndroid::CFICache::Find(uintptr_t address, CFIRow* cfi) {
+  if (cache_[address % kLimit].address == address) {
+    *cfi = cache_[address % kLimit].cfi;
+    return true;
+  }
+  return false;
+}
+
+}  // namespace trace_event
+}  // namespace base
diff --git a/base/trace_event/cfi_backtrace_android.h b/base/trace_event/cfi_backtrace_android.h
new file mode 100644
index 0000000..0c51332
--- /dev/null
+++ b/base/trace_event/cfi_backtrace_android.h
@@ -0,0 +1,157 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TRACE_EVENT_CFI_BACKTRACE_ANDROID_H_
+#define BASE_TRACE_EVENT_CFI_BACKTRACE_ANDROID_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <memory>
+
+#include "base/base_export.h"
+#include "base/debug/debugging_buildflags.h"
+#include "base/files/memory_mapped_file.h"
+#include "base/gtest_prod_util.h"
+#include "base/threading/thread_local_storage.h"
+
+namespace base {
+namespace trace_event {
+
+// This class is used to unwind stack frames in the current thread. The unwind
+// information (dwarf debug info) is stripped from the chrome binary and we do
+// not build with exception tables (ARM EHABI) in release builds. So, we use a
+// custom unwind table which is generated and added to specific android builds,
+// when add_unwind_tables_in_apk build option is specified. This unwind table
+// contains information for unwinding stack frames when the functions calls are
+// from lib[mono]chrome.so. The file is added as an asset to the apk and the
+// table is used to unwind stack frames for profiling. This class implements
+// methods to read and parse the unwind table and unwind stack frames using this
+// data.
+class BASE_EXPORT CFIBacktraceAndroid {
+ public:
+  // Creates and initializes by memory mapping the unwind tables from apk assets
+  // on first call.
+  static CFIBacktraceAndroid* GetInitializedInstance();
+
+  // Returns true if stack unwinding is possible using CFI unwind tables in apk.
+  // There is no need to check this before each unwind call. Will always return
+  // the same value based on CFI tables being present in the binary.
+  bool can_unwind_stack_frames() const { return can_unwind_stack_frames_; }
+
+  // Returns the program counters by unwinding stack in the current thread in
+  // order of latest call frame first. Unwinding works only if
+  // can_unwind_stack_frames() returns true. This function allocates memory from
+  // heap for caches. For each stack frame, this method searches through the
+  // unwind table mapped in memory to find the unwind information for function
+  // and walks the stack to find all the return address. This only works until
+  // the last function call from the chrome.so. We do not have unwind
+  // information to unwind beyond any frame outside of chrome.so. Calls to
+  // Unwind() are thread safe and lock free, once Initialize() returns success.
+  size_t Unwind(const void** out_trace, size_t max_depth);
+
+ private:
+  FRIEND_TEST_ALL_PREFIXES(CFIBacktraceAndroidTest, TestCFICache);
+  FRIEND_TEST_ALL_PREFIXES(CFIBacktraceAndroidTest, TestFindCFIRow);
+  FRIEND_TEST_ALL_PREFIXES(CFIBacktraceAndroidTest, TestUnwinding);
+
+  // The CFI information that correspond to an instruction.
+  struct CFIRow {
+    bool operator==(const CFIBacktraceAndroid::CFIRow& o) const {
+      return cfa_offset == o.cfa_offset && ra_offset == o.ra_offset;
+    }
+
+    // The offset of the call frame address of previous function from the
+    // current stack pointer. Rule for unwinding SP: SP_prev = SP_cur +
+    // cfa_offset.
+    uint16_t cfa_offset = 0;
+    // The offset of location of return address from the previous call frame
+    // address. Rule for unwinding PC: PC_prev = * (SP_prev - ra_offset).
+    uint16_t ra_offset = 0;
+  };
+
+  // A simple cache that stores entries in table using prime modulo hashing.
+  // This cache with 500 entries already gives us 95% hit rate, and fits in a
+  // single system page (usually 4KiB). Using a thread local cache for each
+  // thread gives us 30% improvements on performance of heap profiling.
+  class CFICache {
+   public:
+    // Add new item to the cache. It replaces an existing item with same hash.
+    // Constant time operation.
+    void Add(uintptr_t address, CFIRow cfi);
+
+    // Finds the given address and fills |cfi| with the info for the address.
+    // returns true if found, otherwise false. Assumes |address| is never 0.
+    bool Find(uintptr_t address, CFIRow* cfi);
+
+   private:
+    FRIEND_TEST_ALL_PREFIXES(CFIBacktraceAndroidTest, TestCFICache);
+
+    // Size is the highest prime which fits the cache in a single system page,
+    // usually 4KiB. A prime is chosen to make sure addresses are hashed evenly.
+    static const int kLimit = 509;
+
+    struct AddrAndCFI {
+      uintptr_t address;
+      CFIRow cfi;
+    };
+    AddrAndCFI cache_[kLimit] = {};
+  };
+
+  static_assert(sizeof(CFIBacktraceAndroid::CFICache) < 4096,
+                "The cache does not fit in a single page.");
+
+  CFIBacktraceAndroid();
+  ~CFIBacktraceAndroid();
+
+  // Initializes unwind tables using the CFI asset file in the apk if present.
+  // Also stores the limits of mapped region of the lib[mono]chrome.so binary,
+  // since the unwind is only feasible for addresses within the .so file. Once
+  // initialized, the memory map of the unwind table is never cleared since we
+  // cannot guarantee that all the threads are done using the memory map when
+  // heap profiling is turned off. But since we keep the memory map is clean,
+  // the system can choose to evict the unused pages when needed. This would
+  // still reduce the total amount of address space available in process.
+  void Initialize();
+
+  // Finds the UNW_INDEX and UNW_DATA tables in from the CFI file memory map.
+  void ParseCFITables();
+
+  // Finds the CFI row for the given |func_addr| in terms of offset from
+  // the start of the current binary.
+  bool FindCFIRowForPC(uintptr_t func_addr, CFIRow* out);
+
+  CFICache* GetThreadLocalCFICache();
+
+  // Details about the memory mapped region which contains the libchrome.so
+  // library file.
+  uintptr_t executable_start_addr_ = 0;
+  uintptr_t executable_end_addr_ = 0;
+
+  // The start address of the memory mapped unwind table asset file. Unique ptr
+  // because it is replaced in tests.
+  std::unique_ptr<MemoryMappedFile> cfi_mmap_;
+
+  // The UNW_INDEX table: Start address of the function address column. The
+  // memory segment corresponding to this column is treated as an array of
+  // uintptr_t.
+  const uintptr_t* unw_index_function_col_ = nullptr;
+  // The UNW_INDEX table: Start address of the index column. The memory segment
+  // corresponding to this column is treated as an array of uint16_t.
+  const uint16_t* unw_index_indices_col_ = nullptr;
+  // The number of rows in UNW_INDEX table.
+  size_t unw_index_row_count_ = 0;
+
+  // The start address of UNW_DATA table.
+  const uint16_t* unw_data_start_addr_ = nullptr;
+
+  bool can_unwind_stack_frames_ = false;
+
+  ThreadLocalStorage::Slot thread_local_cfi_cache_;
+};
+
+}  // namespace trace_event
+}  // namespace base
+
+#endif  // BASE_TRACE_EVENT_CFI_BACKTRACE_ANDROID_H_
diff --git a/base/trace_event/cfi_backtrace_android_unittest.cc b/base/trace_event/cfi_backtrace_android_unittest.cc
new file mode 100644
index 0000000..3ad3d33
--- /dev/null
+++ b/base/trace_event/cfi_backtrace_android_unittest.cc
@@ -0,0 +1,197 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/trace_event/cfi_backtrace_android.h"
+
+#include "base/files/file_util.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+namespace trace_event {
+
+namespace {
+
+void* GetPC() {
+  return __builtin_return_address(0);
+}
+
+}  // namespace
+
+TEST(CFIBacktraceAndroidTest, TestUnwinding) {
+  auto* unwinder = CFIBacktraceAndroid::GetInitializedInstance();
+  EXPECT_TRUE(unwinder->can_unwind_stack_frames());
+  EXPECT_GT(unwinder->executable_start_addr_, 0u);
+  EXPECT_GT(unwinder->executable_end_addr_, unwinder->executable_start_addr_);
+  EXPECT_GT(unwinder->cfi_mmap_->length(), 0u);
+
+  const size_t kMaxFrames = 100;
+  const void* frames[kMaxFrames];
+  size_t unwind_count = unwinder->Unwind(frames, kMaxFrames);
+  // Expect at least 2 frames in the result.
+  ASSERT_GT(unwind_count, 2u);
+  EXPECT_LE(unwind_count, kMaxFrames);
+
+  const size_t kMaxCurrentFuncCodeSize = 50;
+  const uintptr_t current_pc = reinterpret_cast<uintptr_t>(GetPC());
+  const uintptr_t actual_frame = reinterpret_cast<uintptr_t>(frames[2]);
+  EXPECT_NEAR(current_pc, actual_frame, kMaxCurrentFuncCodeSize);
+
+  for (size_t i = 0; i < unwind_count; ++i) {
+    EXPECT_GT(reinterpret_cast<uintptr_t>(frames[i]),
+              unwinder->executable_start_addr_);
+    EXPECT_LT(reinterpret_cast<uintptr_t>(frames[i]),
+              unwinder->executable_end_addr_);
+  }
+}
+
+// Flaky: https://bugs.chromium.org/p/chromium/issues/detail?id=829555
+TEST(CFIBacktraceAndroidTest, DISABLED_TestFindCFIRow) {
+  auto* unwinder = CFIBacktraceAndroid::GetInitializedInstance();
+  /* Input is generated from the CFI file:
+  STACK CFI INIT 1000 500
+  STACK CFI 1002 .cfa: sp 272 + .ra: .cfa -4 + ^ r4: .cfa -16 +
+  STACK CFI 1008 .cfa: sp 544 + .r1: .cfa -0 + ^ r4: .cfa -16 + ^
+  STACK CFI 1040 .cfa: sp 816 + .r1: .cfa -0 + ^ r4: .cfa -16 + ^
+  STACK CFI 1050 .cfa: sp 816 + .ra: .cfa -8 + ^ r4: .cfa -16 + ^
+  STACK CFI 1080 .cfa: sp 544 + .r1: .cfa -0 + ^ r4: .cfa -16 + ^
+
+  STACK CFI INIT 2000 22
+  STACK CFI 2004 .cfa: sp 16 + .ra: .cfa -12 + ^ r4: .cfa -16 + ^
+  STACK CFI 2008 .cfa: sp 16 + .ra: .cfa -12 + ^ r4: .cfa -16 + ^
+
+  STACK CFI INIT 2024 100
+  STACK CFI 2030 .cfa: sp 48 + .ra: .cfa -12 + ^ r4: .cfa -16 + ^
+  STACK CFI 2100 .cfa: sp 64 + .r1: .cfa -0 + ^ r4: .cfa -16 + ^
+
+  STACK CFI INIT 2200 10
+  STACK CFI 2204 .cfa: sp 44 + .ra: .cfa -8 + ^ r4: .cfa -16 + ^
+  */
+  uint16_t input[] = {// UNW_INDEX size
+                      0x2A,
+
+                      // UNW_INDEX address column (4 byte rows).
+                      0x0, 0x1000, 0x0, 0x1502, 0x0, 0x2000, 0x0, 0x2024, 0x0,
+                      0x2126, 0x0, 0x2200, 0x0, 0x2212, 0x0,
+
+                      // UNW_INDEX index column (2 byte rows).
+                      0x0, 0xffff, 0xb, 0x10, 0xffff, 0x15, 0xffff,
+
+                      // UNW_DATA table.
+                      0x5, 0x2, 0x111, 0x8, 0x220, 0x40, 0x330, 0x50, 0x332,
+                      0x80, 0x220, 0x2, 0x4, 0x13, 0x8, 0x13, 0x2, 0xc, 0x33,
+                      0xdc, 0x40, 0x1, 0x4, 0x2e};
+  FilePath temp_path;
+  CreateTemporaryFile(&temp_path);
+  EXPECT_EQ(
+      static_cast<int>(sizeof(input)),
+      WriteFile(temp_path, reinterpret_cast<char*>(input), sizeof(input)));
+
+  unwinder->cfi_mmap_.reset(new MemoryMappedFile());
+  unwinder->cfi_mmap_->Initialize(temp_path);
+  unwinder->ParseCFITables();
+
+  CFIBacktraceAndroid::CFIRow cfi_row = {0};
+  EXPECT_FALSE(unwinder->FindCFIRowForPC(0x01, &cfi_row));
+  EXPECT_FALSE(unwinder->FindCFIRowForPC(0x100, &cfi_row));
+  EXPECT_FALSE(unwinder->FindCFIRowForPC(0x1502, &cfi_row));
+  EXPECT_FALSE(unwinder->FindCFIRowForPC(0x3000, &cfi_row));
+  EXPECT_FALSE(unwinder->FindCFIRowForPC(0x2024, &cfi_row));
+  EXPECT_FALSE(unwinder->FindCFIRowForPC(0x2212, &cfi_row));
+
+  const CFIBacktraceAndroid::CFIRow kRow1 = {0x110, 0x4};
+  const CFIBacktraceAndroid::CFIRow kRow2 = {0x220, 0x4};
+  const CFIBacktraceAndroid::CFIRow kRow3 = {0x220, 0x8};
+  const CFIBacktraceAndroid::CFIRow kRow4 = {0x30, 0xc};
+  const CFIBacktraceAndroid::CFIRow kRow5 = {0x2c, 0x8};
+  EXPECT_TRUE(unwinder->FindCFIRowForPC(0x1002, &cfi_row));
+  EXPECT_EQ(kRow1, cfi_row);
+  EXPECT_TRUE(unwinder->FindCFIRowForPC(0x1003, &cfi_row));
+  EXPECT_EQ(kRow1, cfi_row);
+  EXPECT_TRUE(unwinder->FindCFIRowForPC(0x1008, &cfi_row));
+  EXPECT_EQ(kRow2, cfi_row);
+  EXPECT_TRUE(unwinder->FindCFIRowForPC(0x1009, &cfi_row));
+  EXPECT_EQ(kRow2, cfi_row);
+  EXPECT_TRUE(unwinder->FindCFIRowForPC(0x1039, &cfi_row));
+  EXPECT_EQ(kRow2, cfi_row);
+  EXPECT_TRUE(unwinder->FindCFIRowForPC(0x1080, &cfi_row));
+  EXPECT_EQ(kRow3, cfi_row);
+  EXPECT_TRUE(unwinder->FindCFIRowForPC(0x1100, &cfi_row));
+  EXPECT_EQ(kRow3, cfi_row);
+  EXPECT_TRUE(unwinder->FindCFIRowForPC(0x2050, &cfi_row));
+  EXPECT_EQ(kRow4, cfi_row);
+  EXPECT_TRUE(unwinder->FindCFIRowForPC(0x2208, &cfi_row));
+  EXPECT_EQ(kRow5, cfi_row);
+  EXPECT_TRUE(unwinder->FindCFIRowForPC(0x2210, &cfi_row));
+  EXPECT_EQ(kRow5, cfi_row);
+
+  // Test if cache is used on the future calls to Find, all addresses should
+  // have different hash. Resetting the memory map to make sure it is never
+  // accessed in Find().
+  unwinder->cfi_mmap_.reset(new MemoryMappedFile());
+  EXPECT_TRUE(unwinder->FindCFIRowForPC(0x1002, &cfi_row));
+  EXPECT_EQ(kRow1, cfi_row);
+  EXPECT_TRUE(unwinder->FindCFIRowForPC(0x1003, &cfi_row));
+  EXPECT_EQ(kRow1, cfi_row);
+  EXPECT_TRUE(unwinder->FindCFIRowForPC(0x1008, &cfi_row));
+  EXPECT_EQ(kRow2, cfi_row);
+  EXPECT_TRUE(unwinder->FindCFIRowForPC(0x1009, &cfi_row));
+  EXPECT_EQ(kRow2, cfi_row);
+  EXPECT_TRUE(unwinder->FindCFIRowForPC(0x1039, &cfi_row));
+  EXPECT_EQ(kRow2, cfi_row);
+  EXPECT_TRUE(unwinder->FindCFIRowForPC(0x1080, &cfi_row));
+  EXPECT_EQ(kRow3, cfi_row);
+  EXPECT_TRUE(unwinder->FindCFIRowForPC(0x1100, &cfi_row));
+  EXPECT_EQ(kRow3, cfi_row);
+  EXPECT_TRUE(unwinder->FindCFIRowForPC(0x2050, &cfi_row));
+  EXPECT_EQ(kRow4, cfi_row);
+  EXPECT_TRUE(unwinder->FindCFIRowForPC(0x2208, &cfi_row));
+  EXPECT_EQ(kRow5, cfi_row);
+  EXPECT_TRUE(unwinder->FindCFIRowForPC(0x2210, &cfi_row));
+  EXPECT_EQ(kRow5, cfi_row);
+}
+
+TEST(CFIBacktraceAndroidTest, TestCFICache) {
+  // Use ASSERT macros in this function since they are in loop and using EXPECT
+  // prints too many failures.
+  CFIBacktraceAndroid::CFICache cache;
+  CFIBacktraceAndroid::CFIRow cfi;
+
+  // Empty cache should not find anything.
+  EXPECT_FALSE(cache.Find(1, &cfi));
+
+  // Insert 1 - 2*kLimit
+  for (size_t i = 1; i <= 2 * cache.kLimit; ++i) {
+    CFIBacktraceAndroid::CFIRow val = {4 * i, 2 * i};
+    cache.Add(i, val);
+    ASSERT_TRUE(cache.Find(i, &cfi));
+    ASSERT_EQ(cfi, val);
+
+    // Inserting more than kLimit items evicts |i - cache.kLimit| from cache.
+    if (i >= cache.kLimit)
+      ASSERT_FALSE(cache.Find(i - cache.kLimit, &cfi));
+  }
+  // Cache contains kLimit+1 - 2*kLimit.
+
+  // Check that 1 - kLimit cannot be found.
+  for (size_t i = 1; i <= cache.kLimit; ++i) {
+    ASSERT_FALSE(cache.Find(i, &cfi));
+  }
+
+  // Check if kLimit+1 - 2*kLimit still exists in cache.
+  for (size_t i = cache.kLimit + 1; i <= 2 * cache.kLimit; ++i) {
+    CFIBacktraceAndroid::CFIRow val = {4 * i, 2 * i};
+    ASSERT_TRUE(cache.Find(i, &cfi));
+    ASSERT_EQ(cfi, val);
+  }
+
+  // Insert 2*kLimit+1, will evict kLimit.
+  cfi = {1, 1};
+  cache.Add(2 * cache.kLimit + 1, cfi);
+  EXPECT_TRUE(cache.Find(2 * cache.kLimit + 1, &cfi));
+  EXPECT_FALSE(cache.Find(cache.kLimit + 1, &cfi));
+  // Cache contains kLimit+1 - 2*kLimit.
+}
+
+}  // namespace trace_event
+}  // namespace base
diff --git a/base/trace_event/common/trace_event_common.h b/base/trace_event/common/trace_event_common.h
new file mode 100644
index 0000000..e2a5ca0
--- /dev/null
+++ b/base/trace_event/common/trace_event_common.h
@@ -0,0 +1,1114 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TRACE_EVENT_COMMON_TRACE_EVENT_COMMON_H_
+#define BASE_TRACE_EVENT_COMMON_TRACE_EVENT_COMMON_H_
+
+// This header file defines the set of trace_event macros without specifying
+// how the events actually get collected and stored. If you need to expose trace
+// events to some other universe, you can copy-and-paste this file as well as
+// trace_event.h, modifying the macros contained there as necessary for the
+// target platform. The end result is that multiple libraries can funnel events
+// through to a shared trace event collector.
+
+// IMPORTANT: To avoid conflicts, if you need to modify this file for a library,
+// land your change in base/ first, and then copy-and-paste it.
+
+// Trace events are for tracking application performance and resource usage.
+// Macros are provided to track:
+//    Begin and end of function calls
+//    Counters
+//
+// Events are issued against categories. Whereas LOG's
+// categories are statically defined, TRACE categories are created
+// implicitly with a string. For example:
+//   TRACE_EVENT_INSTANT0("MY_SUBSYSTEM", "SomeImportantEvent",
+//                        TRACE_EVENT_SCOPE_THREAD)
+//
+// It is often the case that one trace may belong in multiple categories at the
+// same time. The first argument to the trace can be a comma-separated list of
+// categories, forming a category group, like:
+//
+// TRACE_EVENT_INSTANT0("input,views", "OnMouseOver", TRACE_EVENT_SCOPE_THREAD)
+//
+// We can enable/disable tracing of OnMouseOver by enabling/disabling either
+// category.
+//
+// Events can be INSTANT, or can be pairs of BEGIN and END in the same scope:
+//   TRACE_EVENT_BEGIN0("MY_SUBSYSTEM", "SomethingCostly")
+//   doSomethingCostly()
+//   TRACE_EVENT_END0("MY_SUBSYSTEM", "SomethingCostly")
+// Note: our tools can't always determine the correct BEGIN/END pairs unless
+// these are used in the same scope. Use ASYNC_BEGIN/ASYNC_END macros if you
+// need them to be in separate scopes.
+//
+// A common use case is to trace entire function scopes. This
+// issues a trace BEGIN and END automatically:
+//   void doSomethingCostly() {
+//     TRACE_EVENT0("MY_SUBSYSTEM", "doSomethingCostly");
+//     ...
+//   }
+//
+// Additional parameters can be associated with an event:
+//   void doSomethingCostly2(int howMuch) {
+//     TRACE_EVENT1("MY_SUBSYSTEM", "doSomethingCostly",
+//         "howMuch", howMuch);
+//     ...
+//   }
+//
+// The trace system will automatically add to this information the
+// current process id, thread id, and a timestamp in microseconds.
+//
+// To trace an asynchronous procedure such as an IPC send/receive, use
+// ASYNC_BEGIN and ASYNC_END:
+//   [single threaded sender code]
+//     static int send_count = 0;
+//     ++send_count;
+//     TRACE_EVENT_ASYNC_BEGIN0("ipc", "message", send_count);
+//     Send(new MyMessage(send_count));
+//   [receive code]
+//     void OnMyMessage(send_count) {
+//       TRACE_EVENT_ASYNC_END0("ipc", "message", send_count);
+//     }
+// The third parameter is a unique ID to match ASYNC_BEGIN/ASYNC_END pairs.
+// ASYNC_BEGIN and ASYNC_END can occur on any thread of any traced process.
+// Pointers can be used for the ID parameter, and they will be mangled
+// internally so that the same pointer on two different processes will not
+// match. For example:
+//   class MyTracedClass {
+//    public:
+//     MyTracedClass() {
+//       TRACE_EVENT_ASYNC_BEGIN0("category", "MyTracedClass", this);
+//     }
+//     ~MyTracedClass() {
+//       TRACE_EVENT_ASYNC_END0("category", "MyTracedClass", this);
+//     }
+//   }
+//
+// Trace event also supports counters, which is a way to track a quantity
+// as it varies over time. Counters are created with the following macro:
+//   TRACE_COUNTER1("MY_SUBSYSTEM", "myCounter", g_myCounterValue);
+//
+// Counters are process-specific. The macro itself can be issued from any
+// thread, however.
+//
+// Sometimes, you want to track two counters at once. You can do this with two
+// counter macros:
+//   TRACE_COUNTER1("MY_SUBSYSTEM", "myCounter0", g_myCounterValue[0]);
+//   TRACE_COUNTER1("MY_SUBSYSTEM", "myCounter1", g_myCounterValue[1]);
+// Or you can do it with a combined macro:
+//   TRACE_COUNTER2("MY_SUBSYSTEM", "myCounter",
+//       "bytesPinned", g_myCounterValue[0],
+//       "bytesAllocated", g_myCounterValue[1]);
+// This indicates to the tracing UI that these counters should be displayed
+// in a single graph, as a summed area chart.
+//
+// Since counters are in a global namespace, you may want to disambiguate with a
+// unique ID, by using the TRACE_COUNTER_ID* variations.
+//
+// By default, trace collection is compiled in, but turned off at runtime.
+// Collecting trace data is the responsibility of the embedding
+// application. In Chrome's case, navigating to about:tracing will turn on
+// tracing and display data collected across all active processes.
+//
+//
+// Memory scoping note:
+// Tracing copies the pointers, not the string content, of the strings passed
+// in for category_group, name, and arg_names.  Thus, the following code will
+// cause problems:
+//     char* str = strdup("importantName");
+//     TRACE_EVENT_INSTANT0("SUBSYSTEM", str);  // BAD!
+//     free(str);                   // Trace system now has dangling pointer
+//
+// To avoid this issue with the |name| and |arg_name| parameters, use the
+// TRACE_EVENT_COPY_XXX overloads of the macros at additional runtime overhead.
+// Notes: The category must always be in a long-lived char* (i.e. static const).
+//        The |arg_values|, when used, are always deep copied with the _COPY
+//        macros.
+//
+// When are string argument values copied:
+// const char* arg_values are only referenced by default:
+//     TRACE_EVENT1("category", "name",
+//                  "arg1", "literal string is only referenced");
+// Use TRACE_STR_COPY to force copying of a const char*:
+//     TRACE_EVENT1("category", "name",
+//                  "arg1", TRACE_STR_COPY("string will be copied"));
+// std::string arg_values are always copied:
+//     TRACE_EVENT1("category", "name",
+//                  "arg1", std::string("string will be copied"));
+//
+//
+// Convertable notes:
+// Converting a large data type to a string can be costly. To help with this,
+// the trace framework provides an interface ConvertableToTraceFormat. If you
+// inherit from it and implement the AppendAsTraceFormat method the trace
+// framework will call back to your object to convert a trace output time. This
+// means, if the category for the event is disabled, the conversion will not
+// happen.
+//
+//   class MyData : public base::trace_event::ConvertableToTraceFormat {
+//    public:
+//     MyData() {}
+//     void AppendAsTraceFormat(std::string* out) const override {
+//       out->append("{\"foo\":1}");
+//     }
+//    private:
+//     ~MyData() override {}
+//     DISALLOW_COPY_AND_ASSIGN(MyData);
+//   };
+//
+//   TRACE_EVENT1("foo", "bar", "data",
+//                std::unique_ptr<ConvertableToTraceFormat>(new MyData()));
+//
+// The trace framework will take ownership if the passed pointer and it will
+// be free'd when the trace buffer is flushed.
+//
+// Note, we only do the conversion when the buffer is flushed, so the provided
+// data object should not be modified after it's passed to the trace framework.
+//
+//
+// Thread Safety:
+// A thread safe singleton and mutex are used for thread safety. Category
+// enabled flags are used to limit the performance impact when the system
+// is not enabled.
+//
+// TRACE_EVENT macros first cache a pointer to a category. The categories are
+// statically allocated and safe at all times, even after exit. Fetching a
+// category is protected by the TraceLog::lock_. Multiple threads initializing
+// the static variable is safe, as they will be serialized by the lock and
+// multiple calls will return the same pointer to the category.
+//
+// Then the category_group_enabled flag is checked. This is a unsigned char, and
+// not intended to be multithread safe. It optimizes access to AddTraceEvent
+// which is threadsafe internally via TraceLog::lock_. The enabled flag may
+// cause some threads to incorrectly call or skip calling AddTraceEvent near
+// the time of the system being enabled or disabled. This is acceptable as
+// we tolerate some data loss while the system is being enabled/disabled and
+// because AddTraceEvent is threadsafe internally and checks the enabled state
+// again under lock.
+//
+// Without the use of these static category pointers and enabled flags all
+// trace points would carry a significant performance cost of acquiring a lock
+// and resolving the category.
+
+// Check that nobody includes this file directly.  Clients are supposed to
+// include the surrounding "trace_event.h" of their project instead.
+#if defined(TRACE_EVENT0)
+#error "Another copy of this file has already been included."
+#endif
+
+// This will mark the trace event as disabled by default. The user will need
+// to explicitly enable the event.
+#define TRACE_DISABLED_BY_DEFAULT(name) "disabled-by-default-" name
+
+// Records a pair of begin and end events called "name" for the current
+// scope, with 0, 1 or 2 associated arguments. If the category is not
+// enabled, then this does nothing.
+// - category and name strings must have application lifetime (statics or
+//   literals). They may not include " chars.
+#define TRACE_EVENT0(category_group, name)    \
+  INTERNAL_TRACE_EVENT_ADD_SCOPED(category_group, name)
+#define TRACE_EVENT_WITH_FLOW0(category_group, name, bind_id, flow_flags)  \
+  INTERNAL_TRACE_EVENT_ADD_SCOPED_WITH_FLOW(category_group, name, bind_id, \
+                                            flow_flags)
+#define TRACE_EVENT1(category_group, name, arg1_name, arg1_val) \
+  INTERNAL_TRACE_EVENT_ADD_SCOPED(category_group, name, arg1_name, arg1_val)
+#define TRACE_EVENT_WITH_FLOW1(category_group, name, bind_id, flow_flags,  \
+                               arg1_name, arg1_val)                        \
+  INTERNAL_TRACE_EVENT_ADD_SCOPED_WITH_FLOW(category_group, name, bind_id, \
+                                            flow_flags, arg1_name, arg1_val)
+#define TRACE_EVENT2(category_group, name, arg1_name, arg1_val, arg2_name,   \
+                     arg2_val)                                               \
+  INTERNAL_TRACE_EVENT_ADD_SCOPED(category_group, name, arg1_name, arg1_val, \
+                                  arg2_name, arg2_val)
+#define TRACE_EVENT_WITH_FLOW2(category_group, name, bind_id, flow_flags,    \
+                               arg1_name, arg1_val, arg2_name, arg2_val)     \
+  INTERNAL_TRACE_EVENT_ADD_SCOPED_WITH_FLOW(category_group, name, bind_id,   \
+                                            flow_flags, arg1_name, arg1_val, \
+                                            arg2_name, arg2_val)
+
+// Records a single event called "name" immediately, with 0, 1 or 2
+// associated arguments. If the category is not enabled, then this
+// does nothing.
+// - category and name strings must have application lifetime (statics or
+//   literals). They may not include " chars.
+#define TRACE_EVENT_INSTANT0(category_group, name, scope)                   \
+  INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_INSTANT, category_group, name, \
+                           TRACE_EVENT_FLAG_NONE | scope)
+#define TRACE_EVENT_INSTANT1(category_group, name, scope, arg1_name, arg1_val) \
+  INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_INSTANT, category_group, name,    \
+                           TRACE_EVENT_FLAG_NONE | scope, arg1_name, arg1_val)
+#define TRACE_EVENT_INSTANT2(category_group, name, scope, arg1_name, arg1_val, \
+                             arg2_name, arg2_val)                              \
+  INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_INSTANT, category_group, name,    \
+                           TRACE_EVENT_FLAG_NONE | scope, arg1_name, arg1_val, \
+                           arg2_name, arg2_val)
+#define TRACE_EVENT_COPY_INSTANT0(category_group, name, scope)              \
+  INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_INSTANT, category_group, name, \
+                           TRACE_EVENT_FLAG_COPY | scope)
+#define TRACE_EVENT_COPY_INSTANT1(category_group, name, scope, arg1_name,   \
+                                  arg1_val)                                 \
+  INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_INSTANT, category_group, name, \
+                           TRACE_EVENT_FLAG_COPY | scope, arg1_name, arg1_val)
+#define TRACE_EVENT_COPY_INSTANT2(category_group, name, scope, arg1_name,      \
+                                  arg1_val, arg2_name, arg2_val)               \
+  INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_INSTANT, category_group, name,    \
+                           TRACE_EVENT_FLAG_COPY | scope, arg1_name, arg1_val, \
+                           arg2_name, arg2_val)
+
+#define TRACE_EVENT_INSTANT_WITH_TIMESTAMP0(category_group, name, scope, \
+                                            timestamp)                   \
+  INTERNAL_TRACE_EVENT_ADD_WITH_TIMESTAMP(                               \
+      TRACE_EVENT_PHASE_INSTANT, category_group, name, timestamp,        \
+      TRACE_EVENT_FLAG_NONE | scope)
+
+#define TRACE_EVENT_INSTANT_WITH_TIMESTAMP1(category_group, name, scope,  \
+                                            timestamp, arg_name, arg_val) \
+  INTERNAL_TRACE_EVENT_ADD_WITH_TIMESTAMP(                                \
+      TRACE_EVENT_PHASE_INSTANT, category_group, name, timestamp,         \
+      TRACE_EVENT_FLAG_NONE | scope, arg_name, arg_val)
+
+// Records a single BEGIN event called "name" immediately, with 0, 1 or 2
+// associated arguments. If the category is not enabled, then this
+// does nothing.
+// - category and name strings must have application lifetime (statics or
+//   literals). They may not include " chars.
+#define TRACE_EVENT_BEGIN0(category_group, name)                          \
+  INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_BEGIN, category_group, name, \
+                           TRACE_EVENT_FLAG_NONE)
+#define TRACE_EVENT_BEGIN1(category_group, name, arg1_name, arg1_val)     \
+  INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_BEGIN, category_group, name, \
+                           TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val)
+#define TRACE_EVENT_BEGIN2(category_group, name, arg1_name, arg1_val,     \
+                           arg2_name, arg2_val)                           \
+  INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_BEGIN, category_group, name, \
+                           TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val,    \
+                           arg2_name, arg2_val)
+#define TRACE_EVENT_COPY_BEGIN0(category_group, name)                     \
+  INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_BEGIN, category_group, name, \
+                           TRACE_EVENT_FLAG_COPY)
+#define TRACE_EVENT_COPY_BEGIN1(category_group, name, arg1_name, arg1_val) \
+  INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_BEGIN, category_group, name,  \
+                           TRACE_EVENT_FLAG_COPY, arg1_name, arg1_val)
+#define TRACE_EVENT_COPY_BEGIN2(category_group, name, arg1_name, arg1_val, \
+                                arg2_name, arg2_val)                       \
+  INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_BEGIN, category_group, name,  \
+                           TRACE_EVENT_FLAG_COPY, arg1_name, arg1_val,     \
+                           arg2_name, arg2_val)
+
+// Similar to TRACE_EVENT_BEGINx but with a custom |at| timestamp provided.
+// - |id| is used to match the _BEGIN event with the _END event.
+//   Events are considered to match if their category_group, name and id values
+//   all match. |id| must either be a pointer or an integer value up to 64 bits.
+//   If it's a pointer, the bits will be xored with a hash of the process ID so
+//   that the same pointer on two different processes will not collide.
+#define TRACE_EVENT_BEGIN_WITH_ID_TID_AND_TIMESTAMP0(category_group, name, id, \
+                                                     thread_id, timestamp)     \
+  INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP(                          \
+      TRACE_EVENT_PHASE_ASYNC_BEGIN, category_group, name, id, thread_id,      \
+      timestamp, TRACE_EVENT_FLAG_NONE)
+#define TRACE_EVENT_COPY_BEGIN_WITH_ID_TID_AND_TIMESTAMP0(                \
+    category_group, name, id, thread_id, timestamp)                       \
+  INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP(                     \
+      TRACE_EVENT_PHASE_ASYNC_BEGIN, category_group, name, id, thread_id, \
+      timestamp, TRACE_EVENT_FLAG_COPY)
+#define TRACE_EVENT_COPY_BEGIN_WITH_ID_TID_AND_TIMESTAMP1(                \
+    category_group, name, id, thread_id, timestamp, arg1_name, arg1_val)  \
+  INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP(                     \
+      TRACE_EVENT_PHASE_ASYNC_BEGIN, category_group, name, id, thread_id, \
+      timestamp, TRACE_EVENT_FLAG_COPY, arg1_name, arg1_val)
+#define TRACE_EVENT_COPY_BEGIN_WITH_ID_TID_AND_TIMESTAMP2(                \
+    category_group, name, id, thread_id, timestamp, arg1_name, arg1_val,  \
+    arg2_name, arg2_val)                                                  \
+  INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP(                     \
+      TRACE_EVENT_PHASE_ASYNC_BEGIN, category_group, name, id, thread_id, \
+      timestamp, TRACE_EVENT_FLAG_COPY, arg1_name, arg1_val, arg2_name,   \
+      arg2_val)
+
+// Records a single END event for "name" immediately. If the category
+// is not enabled, then this does nothing.
+// - category and name strings must have application lifetime (statics or
+//   literals). They may not include " chars.
+#define TRACE_EVENT_END0(category_group, name)                          \
+  INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_END, category_group, name, \
+                           TRACE_EVENT_FLAG_NONE)
+#define TRACE_EVENT_END1(category_group, name, arg1_name, arg1_val)     \
+  INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_END, category_group, name, \
+                           TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val)
+#define TRACE_EVENT_END2(category_group, name, arg1_name, arg1_val, arg2_name, \
+                         arg2_val)                                             \
+  INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_END, category_group, name,        \
+                           TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val,         \
+                           arg2_name, arg2_val)
+#define TRACE_EVENT_COPY_END0(category_group, name)                     \
+  INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_END, category_group, name, \
+                           TRACE_EVENT_FLAG_COPY)
+#define TRACE_EVENT_COPY_END1(category_group, name, arg1_name, arg1_val) \
+  INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_END, category_group, name,  \
+                           TRACE_EVENT_FLAG_COPY, arg1_name, arg1_val)
+#define TRACE_EVENT_COPY_END2(category_group, name, arg1_name, arg1_val, \
+                              arg2_name, arg2_val)                       \
+  INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_END, category_group, name,  \
+                           TRACE_EVENT_FLAG_COPY, arg1_name, arg1_val,   \
+                           arg2_name, arg2_val)
+
+#define TRACE_EVENT_MARK_WITH_TIMESTAMP0(category_group, name, timestamp) \
+  INTERNAL_TRACE_EVENT_ADD_WITH_TIMESTAMP(                                \
+      TRACE_EVENT_PHASE_MARK, category_group, name, timestamp,            \
+      TRACE_EVENT_FLAG_NONE)
+
+#define TRACE_EVENT_MARK_WITH_TIMESTAMP1(category_group, name, timestamp, \
+                                         arg1_name, arg1_val)             \
+  INTERNAL_TRACE_EVENT_ADD_WITH_TIMESTAMP(                                \
+      TRACE_EVENT_PHASE_MARK, category_group, name, timestamp,            \
+      TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val)
+
+#define TRACE_EVENT_MARK_WITH_TIMESTAMP2(                                      \
+    category_group, name, timestamp, arg1_name, arg1_val, arg2_name, arg2_val) \
+  INTERNAL_TRACE_EVENT_ADD_WITH_TIMESTAMP(                                     \
+      TRACE_EVENT_PHASE_MARK, category_group, name, timestamp,                 \
+      TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val, arg2_name, arg2_val)
+
+#define TRACE_EVENT_COPY_MARK(category_group, name)                      \
+  INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_MARK, category_group, name, \
+                           TRACE_EVENT_FLAG_COPY)
+
+#define TRACE_EVENT_COPY_MARK_WITH_TIMESTAMP(category_group, name, timestamp) \
+  INTERNAL_TRACE_EVENT_ADD_WITH_TIMESTAMP(                                    \
+      TRACE_EVENT_PHASE_MARK, category_group, name, timestamp,                \
+      TRACE_EVENT_FLAG_COPY)
+
+// Similar to TRACE_EVENT_ENDx but with a custom |at| timestamp provided.
+// - |id| is used to match the _BEGIN event with the _END event.
+//   Events are considered to match if their category_group, name and id values
+//   all match. |id| must either be a pointer or an integer value up to 64 bits.
+//   If it's a pointer, the bits will be xored with a hash of the process ID so
+//   that the same pointer on two different processes will not collide.
+#define TRACE_EVENT_END_WITH_ID_TID_AND_TIMESTAMP0(category_group, name, id, \
+                                                   thread_id, timestamp)     \
+  INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP(                        \
+      TRACE_EVENT_PHASE_ASYNC_END, category_group, name, id, thread_id,      \
+      timestamp, TRACE_EVENT_FLAG_NONE)
+#define TRACE_EVENT_COPY_END_WITH_ID_TID_AND_TIMESTAMP0(                \
+    category_group, name, id, thread_id, timestamp)                     \
+  INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP(                   \
+      TRACE_EVENT_PHASE_ASYNC_END, category_group, name, id, thread_id, \
+      timestamp, TRACE_EVENT_FLAG_COPY)
+#define TRACE_EVENT_COPY_END_WITH_ID_TID_AND_TIMESTAMP1(                 \
+    category_group, name, id, thread_id, timestamp, arg1_name, arg1_val) \
+  INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP(                    \
+      TRACE_EVENT_PHASE_ASYNC_END, category_group, name, id, thread_id,  \
+      timestamp, TRACE_EVENT_FLAG_COPY, arg1_name, arg1_val)
+#define TRACE_EVENT_COPY_END_WITH_ID_TID_AND_TIMESTAMP2(                 \
+    category_group, name, id, thread_id, timestamp, arg1_name, arg1_val, \
+    arg2_name, arg2_val)                                                 \
+  INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP(                    \
+      TRACE_EVENT_PHASE_ASYNC_END, category_group, name, id, thread_id,  \
+      timestamp, TRACE_EVENT_FLAG_COPY, arg1_name, arg1_val, arg2_name,  \
+      arg2_val)
+
+// Records the value of a counter called "name" immediately. Value
+// must be representable as a 32 bit integer.
+// - category and name strings must have application lifetime (statics or
+//   literals). They may not include " chars.
+#define TRACE_COUNTER1(category_group, name, value)                         \
+  INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_COUNTER, category_group, name, \
+                           TRACE_EVENT_FLAG_NONE, "value",                  \
+                           static_cast<int>(value))
+#define TRACE_COPY_COUNTER1(category_group, name, value)                    \
+  INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_COUNTER, category_group, name, \
+                           TRACE_EVENT_FLAG_COPY, "value",                  \
+                           static_cast<int>(value))
+
+// Records the values of a multi-parted counter called "name" immediately.
+// The UI will treat value1 and value2 as parts of a whole, displaying their
+// values as a stacked-bar chart.
+// - category and name strings must have application lifetime (statics or
+//   literals). They may not include " chars.
+#define TRACE_COUNTER2(category_group, name, value1_name, value1_val,       \
+                       value2_name, value2_val)                             \
+  INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_COUNTER, category_group, name, \
+                           TRACE_EVENT_FLAG_NONE, value1_name,              \
+                           static_cast<int>(value1_val), value2_name,       \
+                           static_cast<int>(value2_val))
+#define TRACE_COPY_COUNTER2(category_group, name, value1_name, value1_val,  \
+                            value2_name, value2_val)                        \
+  INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_COUNTER, category_group, name, \
+                           TRACE_EVENT_FLAG_COPY, value1_name,              \
+                           static_cast<int>(value1_val), value2_name,       \
+                           static_cast<int>(value2_val))
+
+// Similar to TRACE_COUNTERx, but with a custom |timestamp| provided.
+#define TRACE_COUNTER_WITH_TIMESTAMP1(category_group, name, timestamp, value) \
+  INTERNAL_TRACE_EVENT_ADD_WITH_TIMESTAMP(                                    \
+      TRACE_EVENT_PHASE_COUNTER, category_group, name, timestamp,             \
+      TRACE_EVENT_FLAG_NONE, "value", static_cast<int>(value))
+
+#define TRACE_COUNTER_WITH_TIMESTAMP2(category_group, name, timestamp,      \
+                                      value1_name, value1_val, value2_name, \
+                                      value2_val)                           \
+  INTERNAL_TRACE_EVENT_ADD_WITH_TIMESTAMP(                                  \
+      TRACE_EVENT_PHASE_COUNTER, category_group, name, timestamp,           \
+      TRACE_EVENT_FLAG_NONE, value1_name, static_cast<int>(value1_val),     \
+      value2_name, static_cast<int>(value2_val))
+
+// Records the value of a counter called "name" immediately. Value
+// must be representable as a 32 bit integer.
+// - category and name strings must have application lifetime (statics or
+//   literals). They may not include " chars.
+// - |id| is used to disambiguate counters with the same name. It must either
+//   be a pointer or an integer value up to 64 bits. If it's a pointer, the bits
+//   will be xored with a hash of the process ID so that the same pointer on
+//   two different processes will not collide.
+#define TRACE_COUNTER_ID1(category_group, name, id, value)                    \
+  INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_COUNTER, category_group, \
+                                   name, id, TRACE_EVENT_FLAG_NONE, "value",  \
+                                   static_cast<int>(value))
+#define TRACE_COPY_COUNTER_ID1(category_group, name, id, value)               \
+  INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_COUNTER, category_group, \
+                                   name, id, TRACE_EVENT_FLAG_COPY, "value",  \
+                                   static_cast<int>(value))
+
+// Records the values of a multi-parted counter called "name" immediately.
+// The UI will treat value1 and value2 as parts of a whole, displaying their
+// values as a stacked-bar chart.
+// - category and name strings must have application lifetime (statics or
+//   literals). They may not include " chars.
+// - |id| is used to disambiguate counters with the same name. It must either
+//   be a pointer or an integer value up to 64 bits. If it's a pointer, the bits
+//   will be xored with a hash of the process ID so that the same pointer on
+//   two different processes will not collide.
+#define TRACE_COUNTER_ID2(category_group, name, id, value1_name, value1_val,  \
+                          value2_name, value2_val)                            \
+  INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_COUNTER, category_group, \
+                                   name, id, TRACE_EVENT_FLAG_NONE,           \
+                                   value1_name, static_cast<int>(value1_val), \
+                                   value2_name, static_cast<int>(value2_val))
+#define TRACE_COPY_COUNTER_ID2(category_group, name, id, value1_name,         \
+                               value1_val, value2_name, value2_val)           \
+  INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_COUNTER, category_group, \
+                                   name, id, TRACE_EVENT_FLAG_COPY,           \
+                                   value1_name, static_cast<int>(value1_val), \
+                                   value2_name, static_cast<int>(value2_val))
+
+// TRACE_EVENT_SAMPLE_* events are injected by the sampling profiler.
+#define TRACE_EVENT_SAMPLE_WITH_TID_AND_TIMESTAMP0(category_group, name,       \
+                                                   thread_id, timestamp)       \
+  INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP(                          \
+      TRACE_EVENT_PHASE_SAMPLE, category_group, name, 0, thread_id, timestamp, \
+      TRACE_EVENT_FLAG_NONE)
+
+#define TRACE_EVENT_SAMPLE_WITH_TID_AND_TIMESTAMP1(                            \
+    category_group, name, thread_id, timestamp, arg1_name, arg1_val)           \
+  INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP(                          \
+      TRACE_EVENT_PHASE_SAMPLE, category_group, name, 0, thread_id, timestamp, \
+      TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val)
+
+#define TRACE_EVENT_SAMPLE_WITH_TID_AND_TIMESTAMP2(category_group, name,       \
+                                                   thread_id, timestamp,       \
+                                                   arg1_name, arg1_val,        \
+                                                   arg2_name, arg2_val)        \
+  INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP(                          \
+      TRACE_EVENT_PHASE_SAMPLE, category_group, name, 0, thread_id, timestamp, \
+      TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val, arg2_name, arg2_val)
+
+#define TRACE_EVENT_SAMPLE_WITH_ID1(category_group, name, id, arg1_name,       \
+                                    arg1_val)                                  \
+  INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_SAMPLE, category_group,   \
+                                   name, id, TRACE_EVENT_FLAG_NONE, arg1_name, \
+                                   arg1_val)
+
+// ASYNC_STEP_* APIs should be only used by legacy code. New code should
+// consider using NESTABLE_ASYNC_* APIs to describe substeps within an async
+// event.
+// Records a single ASYNC_BEGIN event called "name" immediately, with 0, 1 or 2
+// associated arguments. If the category is not enabled, then this
+// does nothing.
+// - category and name strings must have application lifetime (statics or
+//   literals). They may not include " chars.
+// - |id| is used to match the ASYNC_BEGIN event with the ASYNC_END event. ASYNC
+//   events are considered to match if their category_group, name and id values
+//   all match. |id| must either be a pointer or an integer value up to 64 bits.
+//   If it's a pointer, the bits will be xored with a hash of the process ID so
+//   that the same pointer on two different processes will not collide.
+//
+// An asynchronous operation can consist of multiple phases. The first phase is
+// defined by the ASYNC_BEGIN calls. Additional phases can be defined using the
+// ASYNC_STEP_INTO or ASYNC_STEP_PAST macros. The ASYNC_STEP_INTO macro will
+// annotate the block following the call. The ASYNC_STEP_PAST macro will
+// annotate the block prior to the call. Note that any particular event must use
+// only STEP_INTO or STEP_PAST macros; they can not mix and match. When the
+// operation completes, call ASYNC_END.
+//
+// An ASYNC trace typically occurs on a single thread (if not, they will only be
+// drawn on the thread defined in the ASYNC_BEGIN event), but all events in that
+// operation must use the same |name| and |id|. Each step can have its own
+// args.
+#define TRACE_EVENT_ASYNC_BEGIN0(category_group, name, id)        \
+  INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_ASYNC_BEGIN, \
+                                   category_group, name, id,      \
+                                   TRACE_EVENT_FLAG_NONE)
+#define TRACE_EVENT_ASYNC_BEGIN1(category_group, name, id, arg1_name, \
+                                 arg1_val)                            \
+  INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_ASYNC_BEGIN,     \
+                                   category_group, name, id,          \
+                                   TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val)
+#define TRACE_EVENT_ASYNC_BEGIN2(category_group, name, id, arg1_name, \
+                                 arg1_val, arg2_name, arg2_val)       \
+  INTERNAL_TRACE_EVENT_ADD_WITH_ID(                                   \
+      TRACE_EVENT_PHASE_ASYNC_BEGIN, category_group, name, id,        \
+      TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val, arg2_name, arg2_val)
+#define TRACE_EVENT_COPY_ASYNC_BEGIN0(category_group, name, id)   \
+  INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_ASYNC_BEGIN, \
+                                   category_group, name, id,      \
+                                   TRACE_EVENT_FLAG_COPY)
+#define TRACE_EVENT_COPY_ASYNC_BEGIN1(category_group, name, id, arg1_name, \
+                                      arg1_val)                            \
+  INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_ASYNC_BEGIN,          \
+                                   category_group, name, id,               \
+                                   TRACE_EVENT_FLAG_COPY, arg1_name, arg1_val)
+#define TRACE_EVENT_COPY_ASYNC_BEGIN2(category_group, name, id, arg1_name, \
+                                      arg1_val, arg2_name, arg2_val)       \
+  INTERNAL_TRACE_EVENT_ADD_WITH_ID(                                        \
+      TRACE_EVENT_PHASE_ASYNC_BEGIN, category_group, name, id,             \
+      TRACE_EVENT_FLAG_COPY, arg1_name, arg1_val, arg2_name, arg2_val)
+
+// Similar to TRACE_EVENT_ASYNC_BEGINx but with a custom |at| timestamp
+// provided.
+#define TRACE_EVENT_ASYNC_BEGIN_WITH_TIMESTAMP0(category_group, name, id, \
+                                                timestamp)                \
+  INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP(                     \
+      TRACE_EVENT_PHASE_ASYNC_BEGIN, category_group, name, id,            \
+      TRACE_EVENT_API_CURRENT_THREAD_ID, timestamp, TRACE_EVENT_FLAG_NONE)
+#define TRACE_EVENT_ASYNC_BEGIN_WITH_TIMESTAMP1(                           \
+    category_group, name, id, timestamp, arg1_name, arg1_val)              \
+  INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP(                      \
+      TRACE_EVENT_PHASE_ASYNC_BEGIN, category_group, name, id,             \
+      TRACE_EVENT_API_CURRENT_THREAD_ID, timestamp, TRACE_EVENT_FLAG_NONE, \
+      arg1_name, arg1_val)
+#define TRACE_EVENT_ASYNC_BEGIN_WITH_TIMESTAMP2(category_group, name, id,      \
+                                                timestamp, arg1_name,          \
+                                                arg1_val, arg2_name, arg2_val) \
+  INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP(                          \
+      TRACE_EVENT_PHASE_ASYNC_BEGIN, category_group, name, id,                 \
+      TRACE_EVENT_API_CURRENT_THREAD_ID, timestamp, TRACE_EVENT_FLAG_NONE,     \
+      arg1_name, arg1_val, arg2_name, arg2_val)
+#define TRACE_EVENT_COPY_ASYNC_BEGIN_WITH_TIMESTAMP0(category_group, name, id, \
+                                                     timestamp)                \
+  INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP(                          \
+      TRACE_EVENT_PHASE_ASYNC_BEGIN, category_group, name, id,                 \
+      TRACE_EVENT_API_CURRENT_THREAD_ID, timestamp, TRACE_EVENT_FLAG_COPY)
+
+// Records a single ASYNC_STEP_INTO event for |step| immediately. If the
+// category is not enabled, then this does nothing. The |name| and |id| must
+// match the ASYNC_BEGIN event above. The |step| param identifies this step
+// within the async event. This should be called at the beginning of the next
+// phase of an asynchronous operation. The ASYNC_BEGIN event must not have any
+// ASYNC_STEP_PAST events.
+#define TRACE_EVENT_ASYNC_STEP_INTO0(category_group, name, id, step)  \
+  INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_ASYNC_STEP_INTO, \
+                                   category_group, name, id,          \
+                                   TRACE_EVENT_FLAG_NONE, "step", step)
+#define TRACE_EVENT_ASYNC_STEP_INTO1(category_group, name, id, step, \
+                                     arg1_name, arg1_val)            \
+  INTERNAL_TRACE_EVENT_ADD_WITH_ID(                                  \
+      TRACE_EVENT_PHASE_ASYNC_STEP_INTO, category_group, name, id,   \
+      TRACE_EVENT_FLAG_NONE, "step", step, arg1_name, arg1_val)
+
+// Similar to TRACE_EVENT_ASYNC_STEP_INTOx but with a custom |at| timestamp
+// provided.
+#define TRACE_EVENT_ASYNC_STEP_INTO_WITH_TIMESTAMP0(category_group, name, id, \
+                                                    step, timestamp)          \
+  INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP(                         \
+      TRACE_EVENT_PHASE_ASYNC_STEP_INTO, category_group, name, id,            \
+      TRACE_EVENT_API_CURRENT_THREAD_ID, timestamp, TRACE_EVENT_FLAG_NONE,    \
+      "step", step)
+
+// Records a single ASYNC_STEP_PAST event for |step| immediately. If the
+// category is not enabled, then this does nothing. The |name| and |id| must
+// match the ASYNC_BEGIN event above. The |step| param identifies this step
+// within the async event. This should be called at the beginning of the next
+// phase of an asynchronous operation. The ASYNC_BEGIN event must not have any
+// ASYNC_STEP_INTO events.
+#define TRACE_EVENT_ASYNC_STEP_PAST0(category_group, name, id, step)  \
+  INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_ASYNC_STEP_PAST, \
+                                   category_group, name, id,          \
+                                   TRACE_EVENT_FLAG_NONE, "step", step)
+#define TRACE_EVENT_ASYNC_STEP_PAST1(category_group, name, id, step, \
+                                     arg1_name, arg1_val)            \
+  INTERNAL_TRACE_EVENT_ADD_WITH_ID(                                  \
+      TRACE_EVENT_PHASE_ASYNC_STEP_PAST, category_group, name, id,   \
+      TRACE_EVENT_FLAG_NONE, "step", step, arg1_name, arg1_val)
+
+// Records a single ASYNC_END event for "name" immediately. If the category
+// is not enabled, then this does nothing.
+#define TRACE_EVENT_ASYNC_END0(category_group, name, id)        \
+  INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_ASYNC_END, \
+                                   category_group, name, id,    \
+                                   TRACE_EVENT_FLAG_NONE)
+#define TRACE_EVENT_ASYNC_END1(category_group, name, id, arg1_name, arg1_val) \
+  INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_ASYNC_END,               \
+                                   category_group, name, id,                  \
+                                   TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val)
+#define TRACE_EVENT_ASYNC_END2(category_group, name, id, arg1_name, arg1_val, \
+                               arg2_name, arg2_val)                           \
+  INTERNAL_TRACE_EVENT_ADD_WITH_ID(                                           \
+      TRACE_EVENT_PHASE_ASYNC_END, category_group, name, id,                  \
+      TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val, arg2_name, arg2_val)
+#define TRACE_EVENT_COPY_ASYNC_END0(category_group, name, id)   \
+  INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_ASYNC_END, \
+                                   category_group, name, id,    \
+                                   TRACE_EVENT_FLAG_COPY)
+#define TRACE_EVENT_COPY_ASYNC_END1(category_group, name, id, arg1_name, \
+                                    arg1_val)                            \
+  INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_ASYNC_END,          \
+                                   category_group, name, id,             \
+                                   TRACE_EVENT_FLAG_COPY, arg1_name, arg1_val)
+#define TRACE_EVENT_COPY_ASYNC_END2(category_group, name, id, arg1_name, \
+                                    arg1_val, arg2_name, arg2_val)       \
+  INTERNAL_TRACE_EVENT_ADD_WITH_ID(                                      \
+      TRACE_EVENT_PHASE_ASYNC_END, category_group, name, id,             \
+      TRACE_EVENT_FLAG_COPY, arg1_name, arg1_val, arg2_name, arg2_val)
+
+// Similar to TRACE_EVENT_ASYNC_ENDx but with a custom |at| timestamp provided.
+#define TRACE_EVENT_ASYNC_END_WITH_TIMESTAMP0(category_group, name, id, \
+                                              timestamp)                \
+  INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP(                   \
+      TRACE_EVENT_PHASE_ASYNC_END, category_group, name, id,            \
+      TRACE_EVENT_API_CURRENT_THREAD_ID, timestamp, TRACE_EVENT_FLAG_NONE)
+#define TRACE_EVENT_ASYNC_END_WITH_TIMESTAMP1(category_group, name, id,       \
+                                              timestamp, arg1_name, arg1_val) \
+  INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP(                         \
+      TRACE_EVENT_PHASE_ASYNC_END, category_group, name, id,                  \
+      TRACE_EVENT_API_CURRENT_THREAD_ID, timestamp, TRACE_EVENT_FLAG_NONE,    \
+      arg1_name, arg1_val)
+#define TRACE_EVENT_ASYNC_END_WITH_TIMESTAMP2(category_group, name, id,       \
+                                              timestamp, arg1_name, arg1_val, \
+                                              arg2_name, arg2_val)            \
+  INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP(                         \
+      TRACE_EVENT_PHASE_ASYNC_END, category_group, name, id,                  \
+      TRACE_EVENT_API_CURRENT_THREAD_ID, timestamp, TRACE_EVENT_FLAG_NONE,    \
+      arg1_name, arg1_val, arg2_name, arg2_val)
+#define TRACE_EVENT_COPY_ASYNC_END_WITH_TIMESTAMP0(category_group, name, id, \
+                                                   timestamp)                \
+  INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP(                        \
+      TRACE_EVENT_PHASE_ASYNC_END, category_group, name, id,                 \
+      TRACE_EVENT_API_CURRENT_THREAD_ID, timestamp, TRACE_EVENT_FLAG_COPY)
+
+// NESTABLE_ASYNC_* APIs are used to describe an async operation, which can
+// be nested within a NESTABLE_ASYNC event and/or have inner NESTABLE_ASYNC
+// events.
+// - category and name strings must have application lifetime (statics or
+//   literals). They may not include " chars.
+// - A pair of NESTABLE_ASYNC_BEGIN event and NESTABLE_ASYNC_END event is
+//   considered as a match if their category_group, name and id all match.
+// - |id| must either be a pointer or an integer value up to 64 bits.
+//   If it's a pointer, the bits will be xored with a hash of the process ID so
+//   that the same pointer on two different processes will not collide.
+// - |id| is used to match a child NESTABLE_ASYNC event with its parent
+//   NESTABLE_ASYNC event. Therefore, events in the same nested event tree must
+//   be logged using the same id and category_group.
+//
+// Unmatched NESTABLE_ASYNC_END event will be parsed as an event that starts
+// at the first NESTABLE_ASYNC event of that id, and unmatched
+// NESTABLE_ASYNC_BEGIN event will be parsed as an event that ends at the last
+// NESTABLE_ASYNC event of that id. Corresponding warning messages for
+// unmatched events will be shown in the analysis view.
+
+// Records a single NESTABLE_ASYNC_BEGIN event called "name" immediately, with
+// 0, 1 or 2 associated arguments. If the category is not enabled, then this
+// does nothing.
+#define TRACE_EVENT_NESTABLE_ASYNC_BEGIN0(category_group, name, id)        \
+  INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_NESTABLE_ASYNC_BEGIN, \
+                                   category_group, name, id,               \
+                                   TRACE_EVENT_FLAG_NONE)
+#define TRACE_EVENT_NESTABLE_ASYNC_BEGIN1(category_group, name, id, arg1_name, \
+                                          arg1_val)                            \
+  INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_NESTABLE_ASYNC_BEGIN,     \
+                                   category_group, name, id,                   \
+                                   TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val)
+#define TRACE_EVENT_NESTABLE_ASYNC_BEGIN2(category_group, name, id, arg1_name, \
+                                          arg1_val, arg2_name, arg2_val)       \
+  INTERNAL_TRACE_EVENT_ADD_WITH_ID(                                            \
+      TRACE_EVENT_PHASE_NESTABLE_ASYNC_BEGIN, category_group, name, id,        \
+      TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val, arg2_name, arg2_val)
+// Records a single NESTABLE_ASYNC_END event called "name" immediately, with 0
+// or 2 associated arguments. If the category is not enabled, then this does
+// nothing.
+#define TRACE_EVENT_NESTABLE_ASYNC_END0(category_group, name, id)        \
+  INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_NESTABLE_ASYNC_END, \
+                                   category_group, name, id,             \
+                                   TRACE_EVENT_FLAG_NONE)
+// Records a single NESTABLE_ASYNC_END event called "name" immediately, with 1
+// associated argument. If the category is not enabled, then this does nothing.
+#define TRACE_EVENT_NESTABLE_ASYNC_END1(category_group, name, id, arg1_name, \
+                                        arg1_val)                            \
+  INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_NESTABLE_ASYNC_END,     \
+                                   category_group, name, id,                 \
+                                   TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val)
+#define TRACE_EVENT_NESTABLE_ASYNC_END2(category_group, name, id, arg1_name, \
+                                        arg1_val, arg2_name, arg2_val)       \
+  INTERNAL_TRACE_EVENT_ADD_WITH_ID(                                          \
+      TRACE_EVENT_PHASE_NESTABLE_ASYNC_END, category_group, name, id,        \
+      TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val, arg2_name, arg2_val)
+
+// Records a single NESTABLE_ASYNC_INSTANT event called "name" immediately,
+// with none, one or two associated argument. If the category is not enabled,
+// then this does nothing.
+#define TRACE_EVENT_NESTABLE_ASYNC_INSTANT0(category_group, name, id)        \
+  INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_NESTABLE_ASYNC_INSTANT, \
+                                   category_group, name, id,                 \
+                                   TRACE_EVENT_FLAG_NONE)
+
+#define TRACE_EVENT_NESTABLE_ASYNC_INSTANT1(category_group, name, id,        \
+                                            arg1_name, arg1_val)             \
+  INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_NESTABLE_ASYNC_INSTANT, \
+                                   category_group, name, id,                 \
+                                   TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val)
+
+#define TRACE_EVENT_NESTABLE_ASYNC_INSTANT2(                              \
+    category_group, name, id, arg1_name, arg1_val, arg2_name, arg2_val)   \
+  INTERNAL_TRACE_EVENT_ADD_WITH_ID(                                       \
+      TRACE_EVENT_PHASE_NESTABLE_ASYNC_INSTANT, category_group, name, id, \
+      TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val, arg2_name, arg2_val)
+
+#define TRACE_EVENT_COPY_NESTABLE_ASYNC_BEGIN_WITH_TTS2(                       \
+    category_group, name, id, arg1_name, arg1_val, arg2_name, arg2_val)        \
+  INTERNAL_TRACE_EVENT_ADD_WITH_ID(                                            \
+      TRACE_EVENT_PHASE_NESTABLE_ASYNC_BEGIN, category_group, name, id,        \
+      TRACE_EVENT_FLAG_ASYNC_TTS | TRACE_EVENT_FLAG_COPY, arg1_name, arg1_val, \
+      arg2_name, arg2_val)
+#define TRACE_EVENT_COPY_NESTABLE_ASYNC_END_WITH_TTS2(                         \
+    category_group, name, id, arg1_name, arg1_val, arg2_name, arg2_val)        \
+  INTERNAL_TRACE_EVENT_ADD_WITH_ID(                                            \
+      TRACE_EVENT_PHASE_NESTABLE_ASYNC_END, category_group, name, id,          \
+      TRACE_EVENT_FLAG_ASYNC_TTS | TRACE_EVENT_FLAG_COPY, arg1_name, arg1_val, \
+      arg2_name, arg2_val)
+
+// Similar to TRACE_EVENT_NESTABLE_ASYNC_{BEGIN,END}x but with a custom
+// |timestamp| provided.
+#define TRACE_EVENT_NESTABLE_ASYNC_BEGIN_WITH_TIMESTAMP0(category_group, name, \
+                                                         id, timestamp)        \
+  INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP(                          \
+      TRACE_EVENT_PHASE_NESTABLE_ASYNC_BEGIN, category_group, name, id,        \
+      TRACE_EVENT_API_CURRENT_THREAD_ID, timestamp, TRACE_EVENT_FLAG_NONE)
+#define TRACE_EVENT_NESTABLE_ASYNC_END_WITH_TIMESTAMP0(category_group, name, \
+                                                       id, timestamp)        \
+  INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP(                        \
+      TRACE_EVENT_PHASE_NESTABLE_ASYNC_END, category_group, name, id,        \
+      TRACE_EVENT_API_CURRENT_THREAD_ID, timestamp, TRACE_EVENT_FLAG_NONE)
+#define TRACE_EVENT_NESTABLE_ASYNC_END_WITH_TIMESTAMP1(                    \
+    category_group, name, id, timestamp, arg1_name, arg1_val)              \
+  INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP(                      \
+      TRACE_EVENT_PHASE_NESTABLE_ASYNC_END, category_group, name, id,      \
+      TRACE_EVENT_API_CURRENT_THREAD_ID, timestamp, TRACE_EVENT_FLAG_NONE, \
+      arg1_name, arg1_val)
+#define TRACE_EVENT_NESTABLE_ASYNC_INSTANT_WITH_TIMESTAMP0(               \
+    category_group, name, id, timestamp)                                  \
+  INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP(                     \
+      TRACE_EVENT_PHASE_NESTABLE_ASYNC_INSTANT, category_group, name, id, \
+      TRACE_EVENT_API_CURRENT_THREAD_ID, timestamp, TRACE_EVENT_FLAG_NONE)
+#define TRACE_EVENT_COPY_NESTABLE_ASYNC_BEGIN_WITH_TIMESTAMP0(          \
+    category_group, name, id, timestamp)                                \
+  INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP(                   \
+      TRACE_EVENT_PHASE_NESTABLE_ASYNC_BEGIN, category_group, name, id, \
+      TRACE_EVENT_API_CURRENT_THREAD_ID, timestamp, TRACE_EVENT_FLAG_COPY)
+#define TRACE_EVENT_COPY_NESTABLE_ASYNC_END_WITH_TIMESTAMP0(          \
+    category_group, name, id, timestamp)                              \
+  INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP(                 \
+      TRACE_EVENT_PHASE_NESTABLE_ASYNC_END, category_group, name, id, \
+      TRACE_EVENT_API_CURRENT_THREAD_ID, timestamp, TRACE_EVENT_FLAG_COPY)
+
+// Records a single FLOW_BEGIN event called "name" immediately, with 0, 1 or 2
+// associated arguments. If the category is not enabled, then this
+// does nothing.
+// - category and name strings must have application lifetime (statics or
+//   literals). They may not include " chars.
+// - |id| is used to match the FLOW_BEGIN event with the FLOW_END event. FLOW
+//   events are considered to match if their category_group, name and id values
+//   all match. |id| must either be a pointer or an integer value up to 64 bits.
+//   If it's a pointer, the bits will be xored with a hash of the process ID so
+//   that the same pointer on two different processes will not collide.
+// FLOW events are different from ASYNC events in how they are drawn by the
+// tracing UI. A FLOW defines asynchronous data flow, such as posting a task
+// (FLOW_BEGIN) and later executing that task (FLOW_END). Expect FLOWs to be
+// drawn as lines or arrows from FLOW_BEGIN scopes to FLOW_END scopes. Similar
+// to ASYNC, a FLOW can consist of multiple phases. The first phase is defined
+// by the FLOW_BEGIN calls. Additional phases can be defined using the FLOW_STEP
+// macros. When the operation completes, call FLOW_END. An async operation can
+// span threads and processes, but all events in that operation must use the
+// same |name| and |id|. Each event can have its own args.
+#define TRACE_EVENT_FLOW_BEGIN0(category_group, name, id)        \
+  INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_FLOW_BEGIN, \
+                                   category_group, name, id,     \
+                                   TRACE_EVENT_FLAG_NONE)
+#define TRACE_EVENT_FLOW_BEGIN1(category_group, name, id, arg1_name, arg1_val) \
+  INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_FLOW_BEGIN,               \
+                                   category_group, name, id,                   \
+                                   TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val)
+#define TRACE_EVENT_FLOW_BEGIN2(category_group, name, id, arg1_name, arg1_val, \
+                                arg2_name, arg2_val)                           \
+  INTERNAL_TRACE_EVENT_ADD_WITH_ID(                                            \
+      TRACE_EVENT_PHASE_FLOW_BEGIN, category_group, name, id,                  \
+      TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val, arg2_name, arg2_val)
+#define TRACE_EVENT_COPY_FLOW_BEGIN0(category_group, name, id)   \
+  INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_FLOW_BEGIN, \
+                                   category_group, name, id,     \
+                                   TRACE_EVENT_FLAG_COPY)
+#define TRACE_EVENT_COPY_FLOW_BEGIN1(category_group, name, id, arg1_name, \
+                                     arg1_val)                            \
+  INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_FLOW_BEGIN,          \
+                                   category_group, name, id,              \
+                                   TRACE_EVENT_FLAG_COPY, arg1_name, arg1_val)
+#define TRACE_EVENT_COPY_FLOW_BEGIN2(category_group, name, id, arg1_name, \
+                                     arg1_val, arg2_name, arg2_val)       \
+  INTERNAL_TRACE_EVENT_ADD_WITH_ID(                                       \
+      TRACE_EVENT_PHASE_FLOW_BEGIN, category_group, name, id,             \
+      TRACE_EVENT_FLAG_COPY, arg1_name, arg1_val, arg2_name, arg2_val)
+
+// Records a single FLOW_STEP event for |step| immediately. If the category
+// is not enabled, then this does nothing. The |name| and |id| must match the
+// FLOW_BEGIN event above. The |step| param identifies this step within the
+// async event. This should be called at the beginning of the next phase of an
+// asynchronous operation.
+#define TRACE_EVENT_FLOW_STEP0(category_group, name, id, step)  \
+  INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_FLOW_STEP, \
+                                   category_group, name, id,    \
+                                   TRACE_EVENT_FLAG_NONE, "step", step)
+#define TRACE_EVENT_FLOW_STEP1(category_group, name, id, step, arg1_name, \
+                               arg1_val)                                  \
+  INTERNAL_TRACE_EVENT_ADD_WITH_ID(                                       \
+      TRACE_EVENT_PHASE_FLOW_STEP, category_group, name, id,              \
+      TRACE_EVENT_FLAG_NONE, "step", step, arg1_name, arg1_val)
+#define TRACE_EVENT_COPY_FLOW_STEP0(category_group, name, id, step) \
+  INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_FLOW_STEP,     \
+                                   category_group, name, id,        \
+                                   TRACE_EVENT_FLAG_COPY, "step", step)
+#define TRACE_EVENT_COPY_FLOW_STEP1(category_group, name, id, step, arg1_name, \
+                                    arg1_val)                                  \
+  INTERNAL_TRACE_EVENT_ADD_WITH_ID(                                            \
+      TRACE_EVENT_PHASE_FLOW_STEP, category_group, name, id,                   \
+      TRACE_EVENT_FLAG_COPY, "step", step, arg1_name, arg1_val)
+
+// Records a single FLOW_END event for "name" immediately. If the category
+// is not enabled, then this does nothing.
+#define TRACE_EVENT_FLOW_END0(category_group, name, id)                        \
+  INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_FLOW_END, category_group, \
+                                   name, id, TRACE_EVENT_FLAG_NONE)
+#define TRACE_EVENT_FLOW_END_BIND_TO_ENCLOSING0(category_group, name, id)      \
+  INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_FLOW_END, category_group, \
+                                   name, id,                                   \
+                                   TRACE_EVENT_FLAG_BIND_TO_ENCLOSING)
+#define TRACE_EVENT_FLOW_END1(category_group, name, id, arg1_name, arg1_val)   \
+  INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_FLOW_END, category_group, \
+                                   name, id, TRACE_EVENT_FLAG_NONE, arg1_name, \
+                                   arg1_val)
+#define TRACE_EVENT_FLOW_END2(category_group, name, id, arg1_name, arg1_val,   \
+                              arg2_name, arg2_val)                             \
+  INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_FLOW_END, category_group, \
+                                   name, id, TRACE_EVENT_FLAG_NONE, arg1_name, \
+                                   arg1_val, arg2_name, arg2_val)
+#define TRACE_EVENT_COPY_FLOW_END0(category_group, name, id)                   \
+  INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_FLOW_END, category_group, \
+                                   name, id, TRACE_EVENT_FLAG_COPY)
+#define TRACE_EVENT_COPY_FLOW_END1(category_group, name, id, arg1_name,        \
+                                   arg1_val)                                   \
+  INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_FLOW_END, category_group, \
+                                   name, id, TRACE_EVENT_FLAG_COPY, arg1_name, \
+                                   arg1_val)
+#define TRACE_EVENT_COPY_FLOW_END2(category_group, name, id, arg1_name,        \
+                                   arg1_val, arg2_name, arg2_val)              \
+  INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_FLOW_END, category_group, \
+                                   name, id, TRACE_EVENT_FLAG_COPY, arg1_name, \
+                                   arg1_val, arg2_name, arg2_val)
+
+// Special trace event macro to trace task execution with the location where it
+// was posted from.
+#define TRACE_TASK_EXECUTION(run_function, task) \
+  INTERNAL_TRACE_TASK_EXECUTION(run_function, task)
+
+// TRACE_EVENT_METADATA* events are information related to other
+// injected events, not events in their own right.
+#define TRACE_EVENT_METADATA1(category_group, name, arg1_name, arg1_val) \
+  INTERNAL_TRACE_EVENT_METADATA_ADD(category_group, name, arg1_name, arg1_val)
+
+// Records a clock sync event.
+#define TRACE_EVENT_CLOCK_SYNC_RECEIVER(sync_id)                               \
+  INTERNAL_TRACE_EVENT_ADD(                                                    \
+      TRACE_EVENT_PHASE_CLOCK_SYNC, "__metadata", "clock_sync",                \
+      TRACE_EVENT_FLAG_NONE, "sync_id", sync_id)
+#define TRACE_EVENT_CLOCK_SYNC_ISSUER(sync_id, issue_ts, issue_end_ts)         \
+  INTERNAL_TRACE_EVENT_ADD_WITH_TIMESTAMP(                                     \
+      TRACE_EVENT_PHASE_CLOCK_SYNC, "__metadata", "clock_sync",                \
+      issue_end_ts, TRACE_EVENT_FLAG_NONE,                                     \
+      "sync_id", sync_id, "issue_ts", issue_ts)
+
+// Macros to track the life time and value of arbitrary client objects.
+// See also TraceTrackableObject.
+#define TRACE_EVENT_OBJECT_CREATED_WITH_ID(category_group, name, id) \
+  INTERNAL_TRACE_EVENT_ADD_WITH_ID(                                  \
+      TRACE_EVENT_PHASE_CREATE_OBJECT, category_group, name, id,     \
+      TRACE_EVENT_FLAG_NONE)
+
+#define TRACE_EVENT_OBJECT_SNAPSHOT_WITH_ID(category_group, name, id, \
+                                            snapshot)                 \
+  INTERNAL_TRACE_EVENT_ADD_WITH_ID(                                   \
+      TRACE_EVENT_PHASE_SNAPSHOT_OBJECT, category_group, name,        \
+      id, TRACE_EVENT_FLAG_NONE, "snapshot", snapshot)
+
+#define TRACE_EVENT_OBJECT_SNAPSHOT_WITH_ID_AND_TIMESTAMP(                     \
+    category_group, name, id, timestamp, snapshot)                             \
+  INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP(                          \
+      TRACE_EVENT_PHASE_SNAPSHOT_OBJECT, category_group, name,                 \
+      id, TRACE_EVENT_API_CURRENT_THREAD_ID, timestamp, TRACE_EVENT_FLAG_NONE, \
+      "snapshot", snapshot)
+
+#define TRACE_EVENT_OBJECT_DELETED_WITH_ID(category_group, name, id) \
+  INTERNAL_TRACE_EVENT_ADD_WITH_ID(                                  \
+      TRACE_EVENT_PHASE_DELETE_OBJECT, category_group, name, id,     \
+      TRACE_EVENT_FLAG_NONE)
+
+// Records entering and leaving trace event contexts. |category_group| and
+// |name| specify the context category and type. |context| is a
+// snapshotted context object id.
+#define TRACE_EVENT_ENTER_CONTEXT(category_group, name, context)      \
+  INTERNAL_TRACE_EVENT_ADD_WITH_ID(                                   \
+      TRACE_EVENT_PHASE_ENTER_CONTEXT, category_group, name, context, \
+      TRACE_EVENT_FLAG_NONE)
+#define TRACE_EVENT_LEAVE_CONTEXT(category_group, name, context)      \
+  INTERNAL_TRACE_EVENT_ADD_WITH_ID(                                   \
+      TRACE_EVENT_PHASE_LEAVE_CONTEXT, category_group, name, context, \
+      TRACE_EVENT_FLAG_NONE)
+#define TRACE_EVENT_SCOPED_CONTEXT(category_group, name, context) \
+  INTERNAL_TRACE_EVENT_SCOPED_CONTEXT(category_group, name, context)
+
+// Macro to specify that two trace IDs are identical. For example,
+// TRACE_LINK_IDS(
+//     "category", "name",
+//     TRACE_ID_WITH_SCOPE("net::URLRequest", 0x1000),
+//     TRACE_ID_WITH_SCOPE("blink::ResourceFetcher::FetchRequest", 0x2000))
+// tells the trace consumer that events with ID ("net::URLRequest", 0x1000) from
+// the current process have the same ID as events with ID
+// ("blink::ResourceFetcher::FetchRequest", 0x2000).
+#define TRACE_LINK_IDS(category_group, name, id, linked_id) \
+  INTERNAL_TRACE_EVENT_ADD_LINK_IDS(category_group, name, id, linked_id);
+
+// Macro to efficiently determine if a given category group is enabled.
+#define TRACE_EVENT_CATEGORY_GROUP_ENABLED(category_group, ret)             \
+  do {                                                                      \
+    INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO(category_group);                 \
+    if (INTERNAL_TRACE_EVENT_CATEGORY_GROUP_ENABLED_FOR_RECORDING_MODE()) { \
+      *ret = true;                                                          \
+    } else {                                                                \
+      *ret = false;                                                         \
+    }                                                                       \
+  } while (0)
+
+// Macro to explicitly warm up a given category group. This could be useful in
+// cases where we want to initialize a category group before any trace events
+// for that category group is reported. For example, to have a category group
+// always show up in the "record categories" list for manually selecting
+// settings in about://tracing.
+#define TRACE_EVENT_WARMUP_CATEGORY(category_group) \
+  INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO(category_group)
+
+// Macro to efficiently determine, through polling, if a new trace has begun.
+#define TRACE_EVENT_IS_NEW_TRACE(ret)                                      \
+  do {                                                                     \
+    static int INTERNAL_TRACE_EVENT_UID(lastRecordingNumber) = 0;          \
+    int num_traces_recorded = TRACE_EVENT_API_GET_NUM_TRACES_RECORDED();   \
+    if (num_traces_recorded != -1 &&                                       \
+        num_traces_recorded !=                                             \
+            INTERNAL_TRACE_EVENT_UID(lastRecordingNumber)) {               \
+      INTERNAL_TRACE_EVENT_UID(lastRecordingNumber) = num_traces_recorded; \
+      *ret = true;                                                         \
+    } else {                                                               \
+      *ret = false;                                                        \
+    }                                                                      \
+  } while (0)
+
+// Macro for getting the real base::TimeTicks::Now() which can be overridden in
+// headless when VirtualTime is enabled.
+#define TRACE_TIME_TICKS_NOW() INTERNAL_TRACE_TIME_TICKS_NOW()
+
+// Macro for getting the real base::Time::Now() which can be overridden in
+// headless when VirtualTime is enabled.
+#define TRACE_TIME_NOW() INTERNAL_TRACE_TIME_NOW()
+
+// Notes regarding the following definitions:
+// New values can be added and propagated to third party libraries, but existing
+// definitions must never be changed, because third party libraries may use old
+// definitions.
+
+// Phase indicates the nature of an event entry. E.g. part of a begin/end pair.
+#define TRACE_EVENT_PHASE_BEGIN ('B')
+#define TRACE_EVENT_PHASE_END ('E')
+#define TRACE_EVENT_PHASE_COMPLETE ('X')
+#define TRACE_EVENT_PHASE_INSTANT ('I')
+#define TRACE_EVENT_PHASE_ASYNC_BEGIN ('S')
+#define TRACE_EVENT_PHASE_ASYNC_STEP_INTO ('T')
+#define TRACE_EVENT_PHASE_ASYNC_STEP_PAST ('p')
+#define TRACE_EVENT_PHASE_ASYNC_END ('F')
+#define TRACE_EVENT_PHASE_NESTABLE_ASYNC_BEGIN ('b')
+#define TRACE_EVENT_PHASE_NESTABLE_ASYNC_END ('e')
+#define TRACE_EVENT_PHASE_NESTABLE_ASYNC_INSTANT ('n')
+#define TRACE_EVENT_PHASE_FLOW_BEGIN ('s')
+#define TRACE_EVENT_PHASE_FLOW_STEP ('t')
+#define TRACE_EVENT_PHASE_FLOW_END ('f')
+#define TRACE_EVENT_PHASE_METADATA ('M')
+#define TRACE_EVENT_PHASE_COUNTER ('C')
+#define TRACE_EVENT_PHASE_SAMPLE ('P')
+#define TRACE_EVENT_PHASE_CREATE_OBJECT ('N')
+#define TRACE_EVENT_PHASE_SNAPSHOT_OBJECT ('O')
+#define TRACE_EVENT_PHASE_DELETE_OBJECT ('D')
+#define TRACE_EVENT_PHASE_MEMORY_DUMP ('v')
+#define TRACE_EVENT_PHASE_MARK ('R')
+#define TRACE_EVENT_PHASE_CLOCK_SYNC ('c')
+#define TRACE_EVENT_PHASE_ENTER_CONTEXT ('(')
+#define TRACE_EVENT_PHASE_LEAVE_CONTEXT (')')
+#define TRACE_EVENT_PHASE_LINK_IDS ('=')
+
+// Flags for changing the behavior of TRACE_EVENT_API_ADD_TRACE_EVENT.
+#define TRACE_EVENT_FLAG_NONE (static_cast<unsigned int>(0))
+#define TRACE_EVENT_FLAG_COPY (static_cast<unsigned int>(1 << 0))
+#define TRACE_EVENT_FLAG_HAS_ID (static_cast<unsigned int>(1 << 1))
+// TODO(crbug.com/639003): Free this bit after ID mangling is deprecated.
+#define TRACE_EVENT_FLAG_MANGLE_ID (static_cast<unsigned int>(1 << 2))
+#define TRACE_EVENT_FLAG_SCOPE_OFFSET (static_cast<unsigned int>(1 << 3))
+#define TRACE_EVENT_FLAG_SCOPE_EXTRA (static_cast<unsigned int>(1 << 4))
+#define TRACE_EVENT_FLAG_EXPLICIT_TIMESTAMP (static_cast<unsigned int>(1 << 5))
+#define TRACE_EVENT_FLAG_ASYNC_TTS (static_cast<unsigned int>(1 << 6))
+#define TRACE_EVENT_FLAG_BIND_TO_ENCLOSING (static_cast<unsigned int>(1 << 7))
+#define TRACE_EVENT_FLAG_FLOW_IN (static_cast<unsigned int>(1 << 8))
+#define TRACE_EVENT_FLAG_FLOW_OUT (static_cast<unsigned int>(1 << 9))
+#define TRACE_EVENT_FLAG_HAS_CONTEXT_ID (static_cast<unsigned int>(1 << 10))
+#define TRACE_EVENT_FLAG_HAS_PROCESS_ID (static_cast<unsigned int>(1 << 11))
+#define TRACE_EVENT_FLAG_HAS_LOCAL_ID (static_cast<unsigned int>(1 << 12))
+#define TRACE_EVENT_FLAG_HAS_GLOBAL_ID (static_cast<unsigned int>(1 << 13))
+
+#define TRACE_EVENT_FLAG_SCOPE_MASK                          \
+  (static_cast<unsigned int>(TRACE_EVENT_FLAG_SCOPE_OFFSET | \
+                             TRACE_EVENT_FLAG_SCOPE_EXTRA))
+
+// Type values for identifying types in the TraceValue union.
+#define TRACE_VALUE_TYPE_BOOL (static_cast<unsigned char>(1))
+#define TRACE_VALUE_TYPE_UINT (static_cast<unsigned char>(2))
+#define TRACE_VALUE_TYPE_INT (static_cast<unsigned char>(3))
+#define TRACE_VALUE_TYPE_DOUBLE (static_cast<unsigned char>(4))
+#define TRACE_VALUE_TYPE_POINTER (static_cast<unsigned char>(5))
+#define TRACE_VALUE_TYPE_STRING (static_cast<unsigned char>(6))
+#define TRACE_VALUE_TYPE_COPY_STRING (static_cast<unsigned char>(7))
+#define TRACE_VALUE_TYPE_CONVERTABLE (static_cast<unsigned char>(8))
+
+// Enum reflecting the scope of an INSTANT event. Must fit within
+// TRACE_EVENT_FLAG_SCOPE_MASK.
+#define TRACE_EVENT_SCOPE_GLOBAL (static_cast<unsigned char>(0 << 3))
+#define TRACE_EVENT_SCOPE_PROCESS (static_cast<unsigned char>(1 << 3))
+#define TRACE_EVENT_SCOPE_THREAD (static_cast<unsigned char>(2 << 3))
+
+#define TRACE_EVENT_SCOPE_NAME_GLOBAL ('g')
+#define TRACE_EVENT_SCOPE_NAME_PROCESS ('p')
+#define TRACE_EVENT_SCOPE_NAME_THREAD ('t')
+
+#endif  // BASE_TRACE_EVENT_COMMON_TRACE_EVENT_COMMON_H_
diff --git a/base/trace_event/etw_manifest/BUILD.gn b/base/trace_event/etw_manifest/BUILD.gn
new file mode 100644
index 0000000..19c4ecf
--- /dev/null
+++ b/base/trace_event/etw_manifest/BUILD.gn
@@ -0,0 +1,29 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import("//build/win/message_compiler.gni")
+
+assert(is_win, "This only runs on Windows.")
+
+message_compiler("chrome_events_win") {
+  visibility = [
+    "//base/*",
+    "//chrome:main_dll",
+  ]
+
+  sources = [
+    "chrome_events_win.man",
+  ]
+
+  user_mode_logging = true
+
+  # The only code generated from chrome_events_win.man is a header file that
+  # is included by trace_event_etw_export_win.cc, so there is no need to
+  # compile any generated code. The other thing which compile_generated_code
+  # controls in this context is linking in the .res file generated from the
+  # manifest. However this is only needed for ETW provider registration which
+  # is done by UIforETW (https://github.com/google/UIforETW) and therefore the
+  # manifest resource can be skipped in Chrome.
+  compile_generated_code = false
+}
diff --git a/base/trace_event/etw_manifest/chrome_events_win.man b/base/trace_event/etw_manifest/chrome_events_win.man
new file mode 100644
index 0000000..10a8ddf
--- /dev/null
+++ b/base/trace_event/etw_manifest/chrome_events_win.man
@@ -0,0 +1,84 @@
+<?xml version='1.0' encoding='utf-8' standalone='yes'?>
+<instrumentationManifest
+    xmlns="http://schemas.microsoft.com/win/2004/08/events"
+    xmlns:win="http://manifests.microsoft.com/win/2004/08/windows/events"
+    xmlns:xs="http://www.w3.org/2001/XMLSchema"
+    xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+    xsi:schemaLocation="http://schemas.microsoft.com/win/2004/08/events eventman.xsd"
+    >
+  <instrumentation>
+    <events>
+      <provider
+          guid="{D2D578D9-2936-45B6-A09f-30E32715F42D}"
+          messageFileName="chrome.dll"
+          name="Chrome"
+          resourceFileName="chrome.dll"
+          symbol="CHROME"
+          >
+        <channels>
+          <importChannel
+              chid="SYSTEM"
+              name="System"
+              />
+        </channels>
+        <templates>
+          <template tid="tid_chrome_event">
+            <data
+                inType="win:AnsiString"
+                name="Name"
+                />
+            <data
+                inType="win:AnsiString"
+                name="Phase"
+                />
+            <data
+                inType="win:AnsiString"
+                name="Arg Name 1"
+                />
+            <data
+                inType="win:AnsiString"
+                name="Arg Value 1"
+                />
+            <data
+                inType="win:AnsiString"
+                name="Arg Name 2"
+                />
+            <data
+                inType="win:AnsiString"
+                name="Arg Value 2"
+                />
+            <data
+                inType="win:AnsiString"
+                name="Arg Name 3"
+                />
+            <data
+                inType="win:AnsiString"
+                name="Arg Value 3"
+                />
+          </template>
+        </templates>
+        <events>
+          <event
+              channel="SYSTEM"
+              level="win:Informational"
+              message="$(string.ChromeEvent.EventMessage)"
+              opcode="win:Info"
+              symbol="ChromeEvent"
+              template="tid_chrome_event"
+              value="1"
+              />
+        </events>
+      </provider>
+    </events>
+  </instrumentation>
+  <localization xmlns="http://schemas.microsoft.com/win/2004/08/events">
+    <resources culture="en-US">
+      <stringTable>
+        <string
+            id="ChromeEvent.EventMessage"
+            value="Chrome Event: %1 (%2)"
+            />
+      </stringTable>
+    </resources>
+  </localization>
+</instrumentationManifest>
diff --git a/base/trace_event/event_name_filter.cc b/base/trace_event/event_name_filter.cc
new file mode 100644
index 0000000..7bf932e
--- /dev/null
+++ b/base/trace_event/event_name_filter.cc
@@ -0,0 +1,26 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/trace_event/event_name_filter.h"
+
+#include "base/trace_event/trace_event_impl.h"
+
+namespace base {
+namespace trace_event {
+
+// static
+const char EventNameFilter::kName[] = "event_whitelist_predicate";
+
+EventNameFilter::EventNameFilter(
+    std::unique_ptr<EventNamesWhitelist> event_names_whitelist)
+    : event_names_whitelist_(std::move(event_names_whitelist)) {}
+
+EventNameFilter::~EventNameFilter() = default;
+
+bool EventNameFilter::FilterTraceEvent(const TraceEvent& trace_event) const {
+  return event_names_whitelist_->count(trace_event.name()) != 0;
+}
+
+}  // namespace trace_event
+}  // namespace base
diff --git a/base/trace_event/event_name_filter.h b/base/trace_event/event_name_filter.h
new file mode 100644
index 0000000..19333b3
--- /dev/null
+++ b/base/trace_event/event_name_filter.h
@@ -0,0 +1,46 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TRACE_EVENT_EVENT_NAME_FILTER_H_
+#define BASE_TRACE_EVENT_EVENT_NAME_FILTER_H_
+
+#include <memory>
+#include <string>
+#include <unordered_set>
+
+#include "base/base_export.h"
+#include "base/macros.h"
+#include "base/trace_event/trace_event_filter.h"
+
+namespace base {
+namespace trace_event {
+
+class TraceEvent;
+
+// Filters trace events by checking the full name against a whitelist.
+// The current implementation is quite simple and dumb and just uses a
+// hashtable which requires char* to std::string conversion. It could be smarter
+// and use a bloom filter trie. However, today this is used too rarely to
+// justify that cost.
+class BASE_EXPORT EventNameFilter : public TraceEventFilter {
+ public:
+  using EventNamesWhitelist = std::unordered_set<std::string>;
+  static const char kName[];
+
+  EventNameFilter(std::unique_ptr<EventNamesWhitelist>);
+  ~EventNameFilter() override;
+
+  // TraceEventFilter implementation.
+  bool FilterTraceEvent(const TraceEvent&) const override;
+
+ private:
+  std::unique_ptr<const EventNamesWhitelist> event_names_whitelist_;
+
+  DISALLOW_COPY_AND_ASSIGN(EventNameFilter);
+};
+
+}  // namespace trace_event
+}  // namespace base
+
+#endif  // BASE_TRACE_EVENT_EVENT_NAME_FILTER_H_
diff --git a/base/trace_event/event_name_filter_unittest.cc b/base/trace_event/event_name_filter_unittest.cc
new file mode 100644
index 0000000..134be0d
--- /dev/null
+++ b/base/trace_event/event_name_filter_unittest.cc
@@ -0,0 +1,42 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/trace_event/event_name_filter.h"
+
+#include "base/memory/ptr_util.h"
+#include "base/trace_event/trace_event_impl.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+namespace trace_event {
+
+const TraceEvent& MakeTraceEvent(const char* name) {
+  static TraceEvent event;
+  event.Reset();
+  event.Initialize(0, TimeTicks(), ThreadTicks(), 'b', nullptr, name, "", 0, 0,
+                   0, nullptr, nullptr, nullptr, nullptr, 0);
+  return event;
+}
+
+TEST(TraceEventNameFilterTest, Whitelist) {
+  auto empty_whitelist =
+      std::make_unique<EventNameFilter::EventNamesWhitelist>();
+  auto filter = std::make_unique<EventNameFilter>(std::move(empty_whitelist));
+
+  // No events should be filtered if the whitelist is empty.
+  EXPECT_FALSE(filter->FilterTraceEvent(MakeTraceEvent("foo")));
+
+  auto whitelist = std::make_unique<EventNameFilter::EventNamesWhitelist>();
+  whitelist->insert("foo");
+  whitelist->insert("bar");
+  filter = std::make_unique<EventNameFilter>(std::move(whitelist));
+  EXPECT_TRUE(filter->FilterTraceEvent(MakeTraceEvent("foo")));
+  EXPECT_FALSE(filter->FilterTraceEvent(MakeTraceEvent("fooz")));
+  EXPECT_FALSE(filter->FilterTraceEvent(MakeTraceEvent("afoo")));
+  EXPECT_TRUE(filter->FilterTraceEvent(MakeTraceEvent("bar")));
+  EXPECT_FALSE(filter->FilterTraceEvent(MakeTraceEvent("foobar")));
+}
+
+}  // namespace trace_event
+}  // namespace base
diff --git a/base/trace_event/heap_profiler.h b/base/trace_event/heap_profiler.h
new file mode 100644
index 0000000..c8deaf6
--- /dev/null
+++ b/base/trace_event/heap_profiler.h
@@ -0,0 +1,119 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TRACE_EVENT_HEAP_PROFILER_H
+#define BASE_TRACE_EVENT_HEAP_PROFILER_H
+
+#include "base/compiler_specific.h"
+#include "base/trace_event/heap_profiler_allocation_context_tracker.h"
+
+// This header file defines the set of macros that are used to track memory
+// usage in the heap profiler. This is in addition to the macros defined in
+// trace_event.h and are specific to heap profiler. This file also defines
+// implementation details of these macros.
+
+// Implementation detail: heap profiler macros create temporary variables to
+// keep instrumentation overhead low. These macros give each temporary variable
+// a unique name based on the line number to prevent name collisions.
+#define INTERNAL_HEAP_PROFILER_UID3(a, b) heap_profiler_unique_##a##b
+#define INTERNAL_HEAP_PROFILER_UID2(a, b) INTERNAL_HEAP_PROFILER_UID3(a, b)
+#define INTERNAL_HEAP_PROFILER_UID(name_prefix) \
+  INTERNAL_HEAP_PROFILER_UID2(name_prefix, __LINE__)
+
+// Scoped tracker for task execution context in the heap profiler.
+#define TRACE_HEAP_PROFILER_API_SCOPED_TASK_EXECUTION \
+  trace_event_internal::HeapProfilerScopedTaskExecutionTracker
+
+// Scoped tracker that tracks the given program counter as a native stack frame
+// in the heap profiler.
+#define TRACE_HEAP_PROFILER_API_SCOPED_WITH_PROGRAM_COUNTER \
+  trace_event_internal::HeapProfilerScopedStackFrame
+
+// A scoped ignore event used to tell heap profiler to ignore all the
+// allocations in the scope. It is useful to exclude allocations made for
+// tracing from the heap profiler dumps.
+#define HEAP_PROFILER_SCOPED_IGNORE                                          \
+  trace_event_internal::HeapProfilerScopedIgnore INTERNAL_HEAP_PROFILER_UID( \
+      scoped_ignore)
+
+namespace trace_event_internal {
+
+// HeapProfilerScopedTaskExecutionTracker records the current task's context in
+// the heap profiler.
+class HeapProfilerScopedTaskExecutionTracker {
+ public:
+  inline explicit HeapProfilerScopedTaskExecutionTracker(
+      const char* task_context)
+      : context_(task_context) {
+    using base::trace_event::AllocationContextTracker;
+    if (UNLIKELY(AllocationContextTracker::capture_mode() !=
+                 AllocationContextTracker::CaptureMode::DISABLED)) {
+      AllocationContextTracker::GetInstanceForCurrentThread()
+          ->PushCurrentTaskContext(context_);
+    }
+  }
+
+  inline ~HeapProfilerScopedTaskExecutionTracker() {
+    using base::trace_event::AllocationContextTracker;
+    if (UNLIKELY(AllocationContextTracker::capture_mode() !=
+                 AllocationContextTracker::CaptureMode::DISABLED)) {
+      AllocationContextTracker::GetInstanceForCurrentThread()
+          ->PopCurrentTaskContext(context_);
+    }
+  }
+
+ private:
+  const char* context_;
+};
+
+class HeapProfilerScopedStackFrame {
+ public:
+  inline explicit HeapProfilerScopedStackFrame(const void* program_counter)
+      : program_counter_(program_counter) {
+    using base::trace_event::AllocationContextTracker;
+    if (UNLIKELY(AllocationContextTracker::capture_mode() ==
+                 AllocationContextTracker::CaptureMode::MIXED_STACK)) {
+      AllocationContextTracker::GetInstanceForCurrentThread()
+          ->PushNativeStackFrame(program_counter_);
+    }
+  }
+
+  inline ~HeapProfilerScopedStackFrame() {
+    using base::trace_event::AllocationContextTracker;
+    if (UNLIKELY(AllocationContextTracker::capture_mode() ==
+                 AllocationContextTracker::CaptureMode::MIXED_STACK)) {
+      AllocationContextTracker::GetInstanceForCurrentThread()
+          ->PopNativeStackFrame(program_counter_);
+    }
+  }
+
+ private:
+  const void* const program_counter_;
+};
+
+class BASE_EXPORT HeapProfilerScopedIgnore {
+ public:
+  inline HeapProfilerScopedIgnore() {
+    using base::trace_event::AllocationContextTracker;
+    if (UNLIKELY(
+            AllocationContextTracker::capture_mode() !=
+            AllocationContextTracker::CaptureMode::DISABLED)) {
+      AllocationContextTracker::GetInstanceForCurrentThread()
+          ->begin_ignore_scope();
+    }
+  }
+  inline ~HeapProfilerScopedIgnore() {
+    using base::trace_event::AllocationContextTracker;
+    if (UNLIKELY(
+            AllocationContextTracker::capture_mode() !=
+            AllocationContextTracker::CaptureMode::DISABLED)) {
+      AllocationContextTracker::GetInstanceForCurrentThread()
+          ->end_ignore_scope();
+    }
+  }
+};
+
+}  // namespace trace_event_internal
+
+#endif  // BASE_TRACE_EVENT_HEAP_PROFILER_H
diff --git a/base/trace_event/heap_profiler_allocation_context.cc b/base/trace_event/heap_profiler_allocation_context.cc
new file mode 100644
index 0000000..bdc3c58
--- /dev/null
+++ b/base/trace_event/heap_profiler_allocation_context.cc
@@ -0,0 +1,88 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/trace_event/heap_profiler_allocation_context.h"
+
+#include <cstring>
+
+#include "base/hash.h"
+#include "base/macros.h"
+
+namespace base {
+namespace trace_event {
+
+bool operator < (const StackFrame& lhs, const StackFrame& rhs) {
+  return lhs.value < rhs.value;
+}
+
+bool operator == (const StackFrame& lhs, const StackFrame& rhs) {
+  return lhs.value == rhs.value;
+}
+
+bool operator != (const StackFrame& lhs, const StackFrame& rhs) {
+  return !(lhs.value == rhs.value);
+}
+
+Backtrace::Backtrace() = default;
+
+bool operator==(const Backtrace& lhs, const Backtrace& rhs) {
+  if (lhs.frame_count != rhs.frame_count) return false;
+  return std::equal(lhs.frames, lhs.frames + lhs.frame_count, rhs.frames);
+}
+
+bool operator!=(const Backtrace& lhs, const Backtrace& rhs) {
+  return !(lhs == rhs);
+}
+
+AllocationContext::AllocationContext(): type_name(nullptr) {}
+
+AllocationContext::AllocationContext(const Backtrace& backtrace,
+                                     const char* type_name)
+  : backtrace(backtrace), type_name(type_name) {}
+
+bool operator==(const AllocationContext& lhs, const AllocationContext& rhs) {
+  return (lhs.backtrace == rhs.backtrace) && (lhs.type_name == rhs.type_name);
+}
+
+bool operator!=(const AllocationContext& lhs, const AllocationContext& rhs) {
+  return !(lhs == rhs);
+}
+
+}  // namespace trace_event
+}  // namespace base
+
+namespace std {
+
+using base::trace_event::AllocationContext;
+using base::trace_event::Backtrace;
+using base::trace_event::StackFrame;
+
+size_t hash<StackFrame>::operator()(const StackFrame& frame) const {
+  return hash<const void*>()(frame.value);
+}
+
+size_t hash<Backtrace>::operator()(const Backtrace& backtrace) const {
+  const void* values[Backtrace::kMaxFrameCount];
+  for (size_t i = 0; i != backtrace.frame_count; ++i) {
+    values[i] = backtrace.frames[i].value;
+  }
+  return base::PersistentHash(values, backtrace.frame_count * sizeof(*values));
+}
+
+size_t hash<AllocationContext>::operator()(const AllocationContext& ctx) const {
+  size_t backtrace_hash = hash<Backtrace>()(ctx.backtrace);
+
+  // Multiplicative hash from [Knuth 1998]. Works best if |size_t| is 32 bits,
+  // because the magic number is a prime very close to 2^32 / golden ratio, but
+  // will still redistribute keys bijectively on 64-bit architectures because
+  // the magic number is coprime to 2^64.
+  size_t type_hash = reinterpret_cast<size_t>(ctx.type_name) * 2654435761;
+
+  // Multiply one side to break the commutativity of +. Multiplication with a
+  // number coprime to |numeric_limits<size_t>::max() + 1| is bijective so
+  // randomness is preserved.
+  return (backtrace_hash * 3) + type_hash;
+}
+
+}  // namespace std
diff --git a/base/trace_event/heap_profiler_allocation_context.h b/base/trace_event/heap_profiler_allocation_context.h
new file mode 100644
index 0000000..c35663f
--- /dev/null
+++ b/base/trace_event/heap_profiler_allocation_context.h
@@ -0,0 +1,132 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TRACE_EVENT_HEAP_PROFILER_ALLOCATION_CONTEXT_H_
+#define BASE_TRACE_EVENT_HEAP_PROFILER_ALLOCATION_CONTEXT_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <functional>
+
+#include "base/base_export.h"
+
+namespace base {
+namespace trace_event {
+
+// When heap profiling is enabled, tracing keeps track of the allocation
+// context for each allocation intercepted. It is generated by the
+// |AllocationContextTracker| which keeps stacks of context in TLS.
+// The tracker is initialized lazily.
+
+// The backtrace in the allocation context is a snapshot of the stack. For now,
+// this is the pseudo stack where frames are created by trace event macros. In
+// the future, we might add the option to use the native call stack. In that
+// case, |Backtrace| and |AllocationContextTracker::GetContextSnapshot| might
+// have different implementations that can be selected by a compile time flag.
+
+// The number of stack frames stored in the backtrace is a trade off between
+// memory used for tracing and accuracy. Measurements done on a prototype
+// revealed that:
+//
+// - In 60 percent of the cases, pseudo stack depth <= 7.
+// - In 87 percent of the cases, pseudo stack depth <= 9.
+// - In 95 percent of the cases, pseudo stack depth <= 11.
+//
+// See the design doc (https://goo.gl/4s7v7b) for more details.
+
+// Represents (pseudo) stack frame. Used in Backtrace class below.
+//
+// Conceptually stack frame is identified by its value, and type is used
+// mostly to properly format the value. Value is expected to be a valid
+// pointer from process' address space.
+struct BASE_EXPORT StackFrame {
+  enum class Type {
+    TRACE_EVENT_NAME,   // const char* string
+    THREAD_NAME,        // const char* thread name
+    PROGRAM_COUNTER,    // as returned by stack tracing (e.g. by StackTrace)
+  };
+
+  static StackFrame FromTraceEventName(const char* name) {
+    return {Type::TRACE_EVENT_NAME, name};
+  }
+  static StackFrame FromThreadName(const char* name) {
+    return {Type::THREAD_NAME, name};
+  }
+  static StackFrame FromProgramCounter(const void* pc) {
+    return {Type::PROGRAM_COUNTER, pc};
+  }
+
+  Type type;
+  const void* value;
+};
+
+bool BASE_EXPORT operator < (const StackFrame& lhs, const StackFrame& rhs);
+bool BASE_EXPORT operator == (const StackFrame& lhs, const StackFrame& rhs);
+bool BASE_EXPORT operator != (const StackFrame& lhs, const StackFrame& rhs);
+
+struct BASE_EXPORT Backtrace {
+  Backtrace();
+
+  // If the stack is higher than what can be stored here, the top frames
+  // (the ones further from main()) are stored. Depth of 12 is enough for most
+  // pseudo traces (see above), but not for native traces, where we need more.
+  enum { kMaxFrameCount = 48 };
+  StackFrame frames[kMaxFrameCount];
+  size_t frame_count = 0;
+};
+
+bool BASE_EXPORT operator==(const Backtrace& lhs, const Backtrace& rhs);
+bool BASE_EXPORT operator!=(const Backtrace& lhs, const Backtrace& rhs);
+
+// The |AllocationContext| is context metadata that is kept for every allocation
+// when heap profiling is enabled. To simplify memory management for book-
+// keeping, this struct has a fixed size.
+struct BASE_EXPORT AllocationContext {
+  AllocationContext();
+  AllocationContext(const Backtrace& backtrace, const char* type_name);
+
+  Backtrace backtrace;
+
+  // Type name of the type stored in the allocated memory. A null pointer
+  // indicates "unknown type". Grouping is done by comparing pointers, not by
+  // deep string comparison. In a component build, where a type name can have a
+  // string literal in several dynamic libraries, this may distort grouping.
+  const char* type_name;
+};
+
+bool BASE_EXPORT operator==(const AllocationContext& lhs,
+                            const AllocationContext& rhs);
+bool BASE_EXPORT operator!=(const AllocationContext& lhs,
+                            const AllocationContext& rhs);
+
+// Struct to store the size and count of the allocations.
+struct AllocationMetrics {
+  size_t size;
+  size_t count;
+};
+
+}  // namespace trace_event
+}  // namespace base
+
+namespace std {
+
+template <>
+struct BASE_EXPORT hash<base::trace_event::StackFrame> {
+  size_t operator()(const base::trace_event::StackFrame& frame) const;
+};
+
+template <>
+struct BASE_EXPORT hash<base::trace_event::Backtrace> {
+  size_t operator()(const base::trace_event::Backtrace& backtrace) const;
+};
+
+template <>
+struct BASE_EXPORT hash<base::trace_event::AllocationContext> {
+  size_t operator()(const base::trace_event::AllocationContext& context) const;
+};
+
+}  // namespace std
+
+#endif  // BASE_TRACE_EVENT_HEAP_PROFILER_ALLOCATION_CONTEXT_H_
diff --git a/base/trace_event/heap_profiler_allocation_context_tracker.cc b/base/trace_event/heap_profiler_allocation_context_tracker.cc
new file mode 100644
index 0000000..556719e
--- /dev/null
+++ b/base/trace_event/heap_profiler_allocation_context_tracker.cc
@@ -0,0 +1,274 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/trace_event/heap_profiler_allocation_context_tracker.h"
+
+#include <algorithm>
+#include <iterator>
+
+#include "base/atomicops.h"
+#include "base/debug/debugging_buildflags.h"
+#include "base/debug/leak_annotations.h"
+#include "base/debug/stack_trace.h"
+#include "base/no_destructor.h"
+#include "base/threading/platform_thread.h"
+#include "base/threading/thread_local_storage.h"
+#include "base/trace_event/heap_profiler_allocation_context.h"
+#include "build/build_config.h"
+
+#if defined(OS_ANDROID) && BUILDFLAG(CAN_UNWIND_WITH_CFI_TABLE)
+#include "base/trace_event/cfi_backtrace_android.h"
+#endif
+
+#if defined(OS_LINUX) || defined(OS_ANDROID)
+#include <sys/prctl.h>
+#endif
+
+namespace base {
+namespace trace_event {
+
+subtle::Atomic32 AllocationContextTracker::capture_mode_ =
+    static_cast<int32_t>(AllocationContextTracker::CaptureMode::DISABLED);
+
+namespace {
+
+const size_t kMaxStackDepth = 128u;
+const size_t kMaxTaskDepth = 16u;
+AllocationContextTracker* const kInitializingSentinel =
+    reinterpret_cast<AllocationContextTracker*>(-1);
+
+// This function is added to the TLS slot to clean up the instance when the
+// thread exits.
+void DestructAllocationContextTracker(void* alloc_ctx_tracker) {
+  delete static_cast<AllocationContextTracker*>(alloc_ctx_tracker);
+}
+
+ThreadLocalStorage::Slot& AllocationContextTrackerTLS() {
+  static NoDestructor<ThreadLocalStorage::Slot> tls_alloc_ctx_tracker(
+      &DestructAllocationContextTracker);
+  return *tls_alloc_ctx_tracker;
+}
+
+// Cannot call ThreadIdNameManager::GetName because it holds a lock and causes
+// deadlock when lock is already held by ThreadIdNameManager before the current
+// allocation. Gets the thread name from kernel if available or returns a string
+// with id. This function intentionally leaks the allocated strings since they
+// are used to tag allocations even after the thread dies.
+const char* GetAndLeakThreadName() {
+  char name[16];
+#if defined(OS_LINUX) || defined(OS_ANDROID)
+  // If the thread name is not set, try to get it from prctl. Thread name might
+  // not be set in cases where the thread started before heap profiling was
+  // enabled.
+  int err = prctl(PR_GET_NAME, name);
+  if (!err) {
+    return strdup(name);
+  }
+#endif  // defined(OS_LINUX) || defined(OS_ANDROID)
+
+  // Use tid if we don't have a thread name.
+  snprintf(name, sizeof(name), "%lu",
+           static_cast<unsigned long>(PlatformThread::CurrentId()));
+  return strdup(name);
+}
+
+}  // namespace
+
+// static
+AllocationContextTracker*
+AllocationContextTracker::GetInstanceForCurrentThread() {
+  AllocationContextTracker* tracker = static_cast<AllocationContextTracker*>(
+      AllocationContextTrackerTLS().Get());
+  if (tracker == kInitializingSentinel)
+    return nullptr;  // Re-entrancy case.
+
+  if (!tracker) {
+    AllocationContextTrackerTLS().Set(kInitializingSentinel);
+    tracker = new AllocationContextTracker();
+    AllocationContextTrackerTLS().Set(tracker);
+  }
+
+  return tracker;
+}
+
+AllocationContextTracker::AllocationContextTracker()
+    : thread_name_(nullptr), ignore_scope_depth_(0) {
+  tracked_stack_.reserve(kMaxStackDepth);
+  task_contexts_.reserve(kMaxTaskDepth);
+}
+AllocationContextTracker::~AllocationContextTracker() = default;
+
+// static
+void AllocationContextTracker::SetCurrentThreadName(const char* name) {
+  if (name && capture_mode() != CaptureMode::DISABLED) {
+    GetInstanceForCurrentThread()->thread_name_ = name;
+  }
+}
+
+// static
+void AllocationContextTracker::SetCaptureMode(CaptureMode mode) {
+  // Release ordering ensures that when a thread observes |capture_mode_| to
+  // be true through an acquire load, the TLS slot has been initialized.
+  subtle::Release_Store(&capture_mode_, static_cast<int32_t>(mode));
+}
+
+void AllocationContextTracker::PushPseudoStackFrame(
+    AllocationContextTracker::PseudoStackFrame stack_frame) {
+  // Impose a limit on the height to verify that every push is popped, because
+  // in practice the pseudo stack never grows higher than ~20 frames.
+  if (tracked_stack_.size() < kMaxStackDepth) {
+    tracked_stack_.push_back(
+        StackFrame::FromTraceEventName(stack_frame.trace_event_name));
+  } else {
+    NOTREACHED();
+  }
+}
+
+void AllocationContextTracker::PopPseudoStackFrame(
+    AllocationContextTracker::PseudoStackFrame stack_frame) {
+  // Guard for stack underflow. If tracing was started with a TRACE_EVENT in
+  // scope, the frame was never pushed, so it is possible that pop is called
+  // on an empty stack.
+  if (tracked_stack_.empty())
+    return;
+
+  tracked_stack_.pop_back();
+}
+
+void AllocationContextTracker::PushNativeStackFrame(const void* pc) {
+  if (tracked_stack_.size() < kMaxStackDepth)
+    tracked_stack_.push_back(StackFrame::FromProgramCounter(pc));
+  else
+    NOTREACHED();
+}
+
+void AllocationContextTracker::PopNativeStackFrame(const void* pc) {
+  if (tracked_stack_.empty())
+    return;
+
+  DCHECK_EQ(pc, tracked_stack_.back().value);
+  tracked_stack_.pop_back();
+}
+
+void AllocationContextTracker::PushCurrentTaskContext(const char* context) {
+  DCHECK(context);
+  if (task_contexts_.size() < kMaxTaskDepth)
+    task_contexts_.push_back(context);
+  else
+    NOTREACHED();
+}
+
+void AllocationContextTracker::PopCurrentTaskContext(const char* context) {
+  // Guard for stack underflow. If tracing was started with a TRACE_EVENT in
+  // scope, the context was never pushed, so it is possible that pop is called
+  // on an empty stack.
+  if (task_contexts_.empty())
+    return;
+
+  DCHECK_EQ(context, task_contexts_.back())
+      << "Encountered an unmatched context end";
+  task_contexts_.pop_back();
+}
+
+bool AllocationContextTracker::GetContextSnapshot(AllocationContext* ctx) {
+  if (ignore_scope_depth_)
+    return false;
+
+  CaptureMode mode = static_cast<CaptureMode>(
+      subtle::NoBarrier_Load(&capture_mode_));
+
+  auto* backtrace = std::begin(ctx->backtrace.frames);
+  auto* backtrace_end = std::end(ctx->backtrace.frames);
+
+  if (!thread_name_) {
+    // Ignore the string allocation made by GetAndLeakThreadName to avoid
+    // reentrancy.
+    ignore_scope_depth_++;
+    thread_name_ = GetAndLeakThreadName();
+    ANNOTATE_LEAKING_OBJECT_PTR(thread_name_);
+    DCHECK(thread_name_);
+    ignore_scope_depth_--;
+  }
+
+  // Add the thread name as the first entry in pseudo stack.
+  if (thread_name_) {
+    *backtrace++ = StackFrame::FromThreadName(thread_name_);
+  }
+
+  switch (mode) {
+    case CaptureMode::DISABLED:
+      {
+        break;
+      }
+    case CaptureMode::PSEUDO_STACK:
+    case CaptureMode::MIXED_STACK:
+      {
+        for (const StackFrame& stack_frame : tracked_stack_) {
+          if (backtrace == backtrace_end)
+            break;
+          *backtrace++ = stack_frame;
+        }
+        break;
+      }
+    case CaptureMode::NATIVE_STACK:
+      {
+// Backtrace contract requires us to return bottom frames, i.e.
+// from main() and up. Stack unwinding produces top frames, i.e.
+// from this point and up until main(). We intentionally request
+// kMaxFrameCount + 1 frames, so that we know if there are more frames
+// than our backtrace capacity.
+#if !defined(OS_NACL)  // We don't build base/debug/stack_trace.cc for NaCl.
+#if defined(OS_ANDROID) && BUILDFLAG(CAN_UNWIND_WITH_CFI_TABLE)
+        const void* frames[Backtrace::kMaxFrameCount + 1];
+        static_assert(arraysize(frames) >= Backtrace::kMaxFrameCount,
+                      "not requesting enough frames to fill Backtrace");
+        size_t frame_count =
+            CFIBacktraceAndroid::GetInitializedInstance()->Unwind(
+                frames, arraysize(frames));
+#elif BUILDFLAG(CAN_UNWIND_WITH_FRAME_POINTERS)
+        const void* frames[Backtrace::kMaxFrameCount + 1];
+        static_assert(arraysize(frames) >= Backtrace::kMaxFrameCount,
+                      "not requesting enough frames to fill Backtrace");
+        size_t frame_count = debug::TraceStackFramePointers(
+            frames, arraysize(frames),
+            1 /* exclude this function from the trace */);
+#else
+        // Fall-back to capturing the stack with base::debug::StackTrace,
+        // which is likely slower, but more reliable.
+        base::debug::StackTrace stack_trace(Backtrace::kMaxFrameCount + 1);
+        size_t frame_count = 0u;
+        const void* const* frames = stack_trace.Addresses(&frame_count);
+#endif
+
+        // If there are too many frames, keep the ones furthest from main().
+        size_t backtrace_capacity = backtrace_end - backtrace;
+        int32_t starting_frame_index = frame_count;
+        if (frame_count > backtrace_capacity) {
+          starting_frame_index = backtrace_capacity - 1;
+          *backtrace++ = StackFrame::FromTraceEventName("<truncated>");
+        }
+        for (int32_t i = starting_frame_index - 1; i >= 0; --i) {
+          const void* frame = frames[i];
+          *backtrace++ = StackFrame::FromProgramCounter(frame);
+        }
+#endif  // !defined(OS_NACL)
+        break;
+      }
+  }
+
+  ctx->backtrace.frame_count = backtrace - std::begin(ctx->backtrace.frames);
+
+  // TODO(ssid): Fix crbug.com/594803 to add file name as 3rd dimension
+  // (component name) in the heap profiler and not piggy back on the type name.
+  if (!task_contexts_.empty()) {
+    ctx->type_name = task_contexts_.back();
+  } else {
+    ctx->type_name = nullptr;
+  }
+
+  return true;
+}
+
+}  // namespace trace_event
+}  // namespace base
diff --git a/base/trace_event/heap_profiler_allocation_context_tracker.h b/base/trace_event/heap_profiler_allocation_context_tracker.h
new file mode 100644
index 0000000..da03b7f
--- /dev/null
+++ b/base/trace_event/heap_profiler_allocation_context_tracker.h
@@ -0,0 +1,140 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TRACE_EVENT_HEAP_PROFILER_ALLOCATION_CONTEXT_TRACKER_H_
+#define BASE_TRACE_EVENT_HEAP_PROFILER_ALLOCATION_CONTEXT_TRACKER_H_
+
+#include <vector>
+
+#include "base/atomicops.h"
+#include "base/base_export.h"
+#include "base/macros.h"
+#include "base/trace_event/heap_profiler_allocation_context.h"
+
+namespace base {
+namespace trace_event {
+
+// AllocationContextTracker is a thread-local object. Its main purpose is to
+// keep track of a pseudo stack of trace events. Chrome has been instrumented
+// with lots of `TRACE_EVENT` macros. These trace events push their name to a
+// thread-local stack when they go into scope, and pop when they go out of
+// scope, if all of the following conditions have been met:
+//
+//  * A trace is being recorded.
+//  * The category of the event is enabled in the trace config.
+//  * Heap profiling is enabled (with the `--enable-heap-profiling` flag).
+//
+// This means that allocations that occur before tracing is started will not
+// have backtrace information in their context.
+//
+// AllocationContextTracker also keeps track of some thread state not related to
+// trace events. See |AllocationContext|.
+//
+// A thread-local instance of the context tracker is initialized lazily when it
+// is first accessed. This might be because a trace event pushed or popped, or
+// because `GetContextSnapshot()` was called when an allocation occurred
+class BASE_EXPORT AllocationContextTracker {
+ public:
+  enum class CaptureMode : int32_t {
+    DISABLED,      // Don't capture anything
+    PSEUDO_STACK,  // Backtrace has trace events
+    MIXED_STACK,   // Backtrace has trace events + from
+                   // HeapProfilerScopedStackFrame
+    NATIVE_STACK,  // Backtrace has full native backtraces from stack unwinding
+  };
+
+  // Stack frame constructed from trace events in codebase.
+  struct BASE_EXPORT PseudoStackFrame {
+    const char* trace_event_category;
+    const char* trace_event_name;
+
+    bool operator==(const PseudoStackFrame& other) const {
+      return trace_event_category == other.trace_event_category &&
+             trace_event_name == other.trace_event_name;
+    }
+  };
+
+  // Globally sets capturing mode.
+  // TODO(primiano): How to guard against *_STACK -> DISABLED -> *_STACK?
+  static void SetCaptureMode(CaptureMode mode);
+
+  // Returns global capturing mode.
+  inline static CaptureMode capture_mode() {
+    // A little lag after heap profiling is enabled or disabled is fine, it is
+    // more important that the check is as cheap as possible when capturing is
+    // not enabled, so do not issue a memory barrier in the fast path.
+    if (subtle::NoBarrier_Load(&capture_mode_) ==
+            static_cast<int32_t>(CaptureMode::DISABLED))
+      return CaptureMode::DISABLED;
+
+    // In the slow path, an acquire load is required to pair with the release
+    // store in |SetCaptureMode|. This is to ensure that the TLS slot for
+    // the thread-local allocation context tracker has been initialized if
+    // |capture_mode| returns something other than DISABLED.
+    return static_cast<CaptureMode>(subtle::Acquire_Load(&capture_mode_));
+  }
+
+  // Returns the thread-local instance, creating one if necessary. Returns
+  // always a valid instance, unless it is called re-entrantly, in which case
+  // returns nullptr in the nested calls.
+  static AllocationContextTracker* GetInstanceForCurrentThread();
+
+  // Set the thread name in the AllocationContextTracker of the current thread
+  // if capture is enabled.
+  static void SetCurrentThreadName(const char* name);
+
+  // Starts and ends a new ignore scope between which the allocations are
+  // ignored by the heap profiler. GetContextSnapshot() returns false when
+  // allocations are ignored.
+  void begin_ignore_scope() { ignore_scope_depth_++; }
+  void end_ignore_scope() {
+    if (ignore_scope_depth_)
+      ignore_scope_depth_--;
+  }
+
+  // Pushes and pops a frame onto the thread-local pseudo stack.
+  // TODO(ssid): Change PseudoStackFrame to const char*. Only event name is
+  // used.
+  void PushPseudoStackFrame(PseudoStackFrame stack_frame);
+  void PopPseudoStackFrame(PseudoStackFrame stack_frame);
+
+  // Pushes and pops a native stack frame onto thread local tracked stack.
+  void PushNativeStackFrame(const void* pc);
+  void PopNativeStackFrame(const void* pc);
+
+  // Push and pop current task's context. A stack is used to support nested
+  // tasks and the top of the stack will be used in allocation context.
+  void PushCurrentTaskContext(const char* context);
+  void PopCurrentTaskContext(const char* context);
+
+  // Fills a snapshot of the current thread-local context. Doesn't fill and
+  // returns false if allocations are being ignored.
+  bool GetContextSnapshot(AllocationContext* snapshot);
+
+  ~AllocationContextTracker();
+
+ private:
+  AllocationContextTracker();
+
+  static subtle::Atomic32 capture_mode_;
+
+  // The pseudo stack where frames are |TRACE_EVENT| names or inserted PCs.
+  std::vector<StackFrame> tracked_stack_;
+
+  // The thread name is used as the first entry in the pseudo stack.
+  const char* thread_name_;
+
+  // Stack of tasks' contexts. Context serves as a different dimension than
+  // pseudo stack to cluster allocations.
+  std::vector<const char*> task_contexts_;
+
+  uint32_t ignore_scope_depth_;
+
+  DISALLOW_COPY_AND_ASSIGN(AllocationContextTracker);
+};
+
+}  // namespace trace_event
+}  // namespace base
+
+#endif  // BASE_TRACE_EVENT_HEAP_PROFILER_ALLOCATION_CONTEXT_TRACKER_H_
diff --git a/base/trace_event/heap_profiler_allocation_context_tracker_unittest.cc b/base/trace_event/heap_profiler_allocation_context_tracker_unittest.cc
new file mode 100644
index 0000000..c26149e
--- /dev/null
+++ b/base/trace_event/heap_profiler_allocation_context_tracker_unittest.cc
@@ -0,0 +1,350 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <stddef.h>
+
+#include <iterator>
+
+#include "base/memory/ref_counted.h"
+#include "base/pending_task.h"
+#include "base/trace_event/heap_profiler.h"
+#include "base/trace_event/heap_profiler_allocation_context.h"
+#include "base/trace_event/heap_profiler_allocation_context_tracker.h"
+#include "base/trace_event/memory_dump_manager.h"
+#include "base/trace_event/trace_event.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+namespace trace_event {
+
+// Define all strings once, because the pseudo stack requires pointer equality,
+// and string interning is unreliable.
+const char kThreadName[] = "TestThread";
+const char kCupcake[] = "Cupcake";
+const char kDonut[] = "Donut";
+const char kEclair[] = "Eclair";
+const char kFroyo[] = "Froyo";
+const char kGingerbread[] = "Gingerbread";
+
+const char kFilteringTraceConfig[] =
+    "{"
+    "  \"event_filters\": ["
+    "    {"
+    "      \"excluded_categories\": [],"
+    "      \"filter_args\": {},"
+    "      \"filter_predicate\": \"heap_profiler_predicate\","
+    "      \"included_categories\": ["
+    "        \"*\","
+    "        \"" TRACE_DISABLED_BY_DEFAULT("Testing") "\"]"
+    "    }"
+    "  ]"
+    "}";
+
+// Asserts that the fixed-size array |expected_backtrace| matches the backtrace
+// in |AllocationContextTracker::GetContextSnapshot|.
+template <size_t N>
+void AssertBacktraceEquals(const StackFrame(&expected_backtrace)[N]) {
+  AllocationContext ctx;
+  ASSERT_TRUE(AllocationContextTracker::GetInstanceForCurrentThread()
+                  ->GetContextSnapshot(&ctx));
+
+  auto* actual = std::begin(ctx.backtrace.frames);
+  auto* actual_bottom = actual + ctx.backtrace.frame_count;
+  auto expected = std::begin(expected_backtrace);
+  auto expected_bottom = std::end(expected_backtrace);
+
+  // Note that this requires the pointers to be equal, this is not doing a deep
+  // string comparison.
+  for (; actual != actual_bottom && expected != expected_bottom;
+       actual++, expected++)
+    ASSERT_EQ(*expected, *actual);
+
+  // Ensure that the height of the stacks is the same.
+  ASSERT_EQ(actual, actual_bottom);
+  ASSERT_EQ(expected, expected_bottom);
+}
+
+void AssertBacktraceContainsOnlyThreadName() {
+  StackFrame t = StackFrame::FromThreadName(kThreadName);
+  AllocationContext ctx;
+  ASSERT_TRUE(AllocationContextTracker::GetInstanceForCurrentThread()
+                  ->GetContextSnapshot(&ctx));
+
+  ASSERT_EQ(1u, ctx.backtrace.frame_count);
+  ASSERT_EQ(t, ctx.backtrace.frames[0]);
+}
+
+class AllocationContextTrackerTest : public testing::Test {
+ public:
+  void SetUp() override {
+    AllocationContextTracker::SetCaptureMode(
+        AllocationContextTracker::CaptureMode::PSEUDO_STACK);
+    // Enabling memory-infra category sets default memory dump config which
+    // includes filters for capturing pseudo stack.
+    TraceConfig config(kFilteringTraceConfig);
+    TraceLog::GetInstance()->SetEnabled(config, TraceLog::FILTERING_MODE);
+    AllocationContextTracker::SetCurrentThreadName(kThreadName);
+  }
+
+  void TearDown() override {
+    AllocationContextTracker::SetCaptureMode(
+        AllocationContextTracker::CaptureMode::DISABLED);
+    TraceLog::GetInstance()->SetDisabled(TraceLog::FILTERING_MODE);
+  }
+};
+
+// Check that |TRACE_EVENT| macros push and pop to the pseudo stack correctly.
+TEST_F(AllocationContextTrackerTest, PseudoStackScopedTrace) {
+  StackFrame t = StackFrame::FromThreadName(kThreadName);
+  StackFrame c = StackFrame::FromTraceEventName(kCupcake);
+  StackFrame d = StackFrame::FromTraceEventName(kDonut);
+  StackFrame e = StackFrame::FromTraceEventName(kEclair);
+  StackFrame f = StackFrame::FromTraceEventName(kFroyo);
+
+  AssertBacktraceContainsOnlyThreadName();
+
+  {
+    TRACE_EVENT0("Testing", kCupcake);
+    StackFrame frame_c[] = {t, c};
+    AssertBacktraceEquals(frame_c);
+
+    {
+      TRACE_EVENT0("Testing", kDonut);
+      StackFrame frame_cd[] = {t, c, d};
+      AssertBacktraceEquals(frame_cd);
+    }
+
+    AssertBacktraceEquals(frame_c);
+
+    {
+      TRACE_EVENT0("Testing", kEclair);
+      StackFrame frame_ce[] = {t, c, e};
+      AssertBacktraceEquals(frame_ce);
+    }
+
+    {
+      TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("NotTesting"), kDonut);
+      TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("Testing"), kCupcake);
+      StackFrame frame_cc[] = {t, c, c};
+      AssertBacktraceEquals(frame_cc);
+    }
+
+    AssertBacktraceEquals(frame_c);
+  }
+
+  AssertBacktraceContainsOnlyThreadName();
+
+  {
+    TRACE_EVENT0("Testing", kFroyo);
+    StackFrame frame_f[] = {t, f};
+    AssertBacktraceEquals(frame_f);
+  }
+
+  AssertBacktraceContainsOnlyThreadName();
+}
+
+// Same as |PseudoStackScopedTrace|, but now test the |TRACE_EVENT_BEGIN| and
+// |TRACE_EVENT_END| macros.
+TEST_F(AllocationContextTrackerTest, PseudoStackBeginEndTrace) {
+  StackFrame t = StackFrame::FromThreadName(kThreadName);
+  StackFrame c = StackFrame::FromTraceEventName(kCupcake);
+  StackFrame d = StackFrame::FromTraceEventName(kDonut);
+  StackFrame e = StackFrame::FromTraceEventName(kEclair);
+  StackFrame f = StackFrame::FromTraceEventName(kFroyo);
+
+  StackFrame frame_c[] = {t, c};
+  StackFrame frame_cd[] = {t, c, d};
+  StackFrame frame_ce[] = {t, c, e};
+  StackFrame frame_f[] = {t, f};
+
+  AssertBacktraceContainsOnlyThreadName();
+
+  TRACE_EVENT_BEGIN0("Testing", kCupcake);
+  AssertBacktraceEquals(frame_c);
+
+  TRACE_EVENT_BEGIN0("Testing", kDonut);
+  AssertBacktraceEquals(frame_cd);
+  TRACE_EVENT_END0("Testing", kDonut);
+
+  AssertBacktraceEquals(frame_c);
+
+  TRACE_EVENT_BEGIN0("Testing", kEclair);
+  AssertBacktraceEquals(frame_ce);
+  TRACE_EVENT_END0("Testing", kEclair);
+
+  AssertBacktraceEquals(frame_c);
+  TRACE_EVENT_END0("Testing", kCupcake);
+
+  AssertBacktraceContainsOnlyThreadName();
+
+  TRACE_EVENT_BEGIN0("Testing", kFroyo);
+  AssertBacktraceEquals(frame_f);
+  TRACE_EVENT_END0("Testing", kFroyo);
+
+  AssertBacktraceContainsOnlyThreadName();
+}
+
+TEST_F(AllocationContextTrackerTest, PseudoStackMixedTrace) {
+  StackFrame t = StackFrame::FromThreadName(kThreadName);
+  StackFrame c = StackFrame::FromTraceEventName(kCupcake);
+  StackFrame d = StackFrame::FromTraceEventName(kDonut);
+  StackFrame e = StackFrame::FromTraceEventName(kEclair);
+  StackFrame f = StackFrame::FromTraceEventName(kFroyo);
+
+  StackFrame frame_c[] = {t, c};
+  StackFrame frame_cd[] = {t, c, d};
+  StackFrame frame_e[] = {t, e};
+  StackFrame frame_ef[] = {t, e, f};
+
+  AssertBacktraceContainsOnlyThreadName();
+
+  TRACE_EVENT_BEGIN0("Testing", kCupcake);
+  AssertBacktraceEquals(frame_c);
+
+  {
+    TRACE_EVENT0("Testing", kDonut);
+    AssertBacktraceEquals(frame_cd);
+  }
+
+  AssertBacktraceEquals(frame_c);
+  TRACE_EVENT_END0("Testing", kCupcake);
+  AssertBacktraceContainsOnlyThreadName();
+
+  {
+    TRACE_EVENT0("Testing", kEclair);
+    AssertBacktraceEquals(frame_e);
+
+    TRACE_EVENT_BEGIN0("Testing", kFroyo);
+    AssertBacktraceEquals(frame_ef);
+    TRACE_EVENT_END0("Testing", kFroyo);
+    AssertBacktraceEquals(frame_e);
+  }
+
+  AssertBacktraceContainsOnlyThreadName();
+}
+
+TEST_F(AllocationContextTrackerTest, MixedStackWithProgramCounter) {
+  StackFrame t = StackFrame::FromThreadName(kThreadName);
+  StackFrame c = StackFrame::FromTraceEventName(kCupcake);
+  StackFrame f = StackFrame::FromTraceEventName(kFroyo);
+  const void* pc1 = reinterpret_cast<void*>(0x1000);
+  const void* pc2 = reinterpret_cast<void*>(0x2000);
+  StackFrame n1 = StackFrame::FromProgramCounter(pc1);
+  StackFrame n2 = StackFrame::FromProgramCounter(pc2);
+
+  StackFrame frame_c[] = {t, c};
+  StackFrame frame_cd[] = {t, c, n1};
+  StackFrame frame_e[] = {t, n2, n1};
+  StackFrame frame_ef[] = {t, n2, n1, f};
+
+  AssertBacktraceContainsOnlyThreadName();
+
+  AllocationContextTracker::SetCaptureMode(
+      AllocationContextTracker::CaptureMode::MIXED_STACK);
+
+  TRACE_EVENT_BEGIN0("Testing", kCupcake);
+  AssertBacktraceEquals(frame_c);
+
+  {
+    TRACE_HEAP_PROFILER_API_SCOPED_WITH_PROGRAM_COUNTER e1(pc1);
+    AssertBacktraceEquals(frame_cd);
+  }
+
+  AssertBacktraceEquals(frame_c);
+  TRACE_EVENT_END0("Testing", kCupcake);
+  AssertBacktraceContainsOnlyThreadName();
+
+  {
+    TRACE_HEAP_PROFILER_API_SCOPED_WITH_PROGRAM_COUNTER e1(pc2);
+    TRACE_HEAP_PROFILER_API_SCOPED_WITH_PROGRAM_COUNTER e2(pc1);
+    AssertBacktraceEquals(frame_e);
+
+    TRACE_EVENT0("Testing", kFroyo);
+    AssertBacktraceEquals(frame_ef);
+  }
+
+  AssertBacktraceContainsOnlyThreadName();
+  AllocationContextTracker::SetCaptureMode(
+      AllocationContextTracker::CaptureMode::DISABLED);
+}
+
+TEST_F(AllocationContextTrackerTest, BacktraceTakesTop) {
+  StackFrame t = StackFrame::FromThreadName(kThreadName);
+  StackFrame c = StackFrame::FromTraceEventName(kCupcake);
+  StackFrame f = StackFrame::FromTraceEventName(kFroyo);
+
+  // Push 11 events onto the pseudo stack.
+  TRACE_EVENT0("Testing", kCupcake);
+  TRACE_EVENT0("Testing", kCupcake);
+  TRACE_EVENT0("Testing", kCupcake);
+
+  TRACE_EVENT0("Testing", kCupcake);
+  TRACE_EVENT0("Testing", kCupcake);
+  TRACE_EVENT0("Testing", kCupcake);
+  TRACE_EVENT0("Testing", kCupcake);
+
+  TRACE_EVENT0("Testing", kCupcake);
+  TRACE_EVENT0("Testing", kDonut);
+  TRACE_EVENT0("Testing", kEclair);
+  TRACE_EVENT0("Testing", kFroyo);
+
+  {
+    TRACE_EVENT0("Testing", kGingerbread);
+    AllocationContext ctx;
+    ASSERT_TRUE(AllocationContextTracker::GetInstanceForCurrentThread()
+                    ->GetContextSnapshot(&ctx));
+
+    // The pseudo stack relies on pointer equality, not deep string comparisons.
+    ASSERT_EQ(t, ctx.backtrace.frames[0]);
+    ASSERT_EQ(c, ctx.backtrace.frames[1]);
+    ASSERT_EQ(f, ctx.backtrace.frames[11]);
+  }
+
+  {
+    AllocationContext ctx;
+    ASSERT_TRUE(AllocationContextTracker::GetInstanceForCurrentThread()
+                    ->GetContextSnapshot(&ctx));
+    ASSERT_EQ(t, ctx.backtrace.frames[0]);
+    ASSERT_EQ(c, ctx.backtrace.frames[1]);
+    ASSERT_EQ(f, ctx.backtrace.frames[11]);
+  }
+}
+
+TEST_F(AllocationContextTrackerTest, TrackCategoryName) {
+  const char kContext1[] = "context1";
+  const char kContext2[] = "context2";
+  {
+    // The context from the scoped task event should be used as type name.
+    TRACE_HEAP_PROFILER_API_SCOPED_TASK_EXECUTION event1(kContext1);
+    AllocationContext ctx1;
+    ASSERT_TRUE(AllocationContextTracker::GetInstanceForCurrentThread()
+                    ->GetContextSnapshot(&ctx1));
+    ASSERT_EQ(kContext1, ctx1.type_name);
+
+    // In case of nested events, the last event's context should be used.
+    TRACE_HEAP_PROFILER_API_SCOPED_TASK_EXECUTION event2(kContext2);
+    AllocationContext ctx2;
+    ASSERT_TRUE(AllocationContextTracker::GetInstanceForCurrentThread()
+                    ->GetContextSnapshot(&ctx2));
+    ASSERT_EQ(kContext2, ctx2.type_name);
+  }
+
+  // Type should be nullptr without task event.
+  AllocationContext ctx;
+  ASSERT_TRUE(AllocationContextTracker::GetInstanceForCurrentThread()
+                  ->GetContextSnapshot(&ctx));
+  ASSERT_FALSE(ctx.type_name);
+}
+
+TEST_F(AllocationContextTrackerTest, IgnoreAllocationTest) {
+  TRACE_EVENT0("Testing", kCupcake);
+  TRACE_EVENT0("Testing", kDonut);
+  HEAP_PROFILER_SCOPED_IGNORE;
+  AllocationContext ctx;
+  ASSERT_FALSE(AllocationContextTracker::GetInstanceForCurrentThread()
+                   ->GetContextSnapshot(&ctx));
+}
+
+}  // namespace trace_event
+}  // namespace base
diff --git a/base/trace_event/heap_profiler_event_filter.cc b/base/trace_event/heap_profiler_event_filter.cc
new file mode 100644
index 0000000..937072c
--- /dev/null
+++ b/base/trace_event/heap_profiler_event_filter.cc
@@ -0,0 +1,70 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/trace_event/heap_profiler_event_filter.h"
+
+#include "base/trace_event/category_registry.h"
+#include "base/trace_event/heap_profiler_allocation_context_tracker.h"
+#include "base/trace_event/trace_category.h"
+#include "base/trace_event/trace_event.h"
+#include "base/trace_event/trace_event_impl.h"
+
+namespace base {
+namespace trace_event {
+
+namespace {
+
+inline bool IsPseudoStackEnabled() {
+  // Only PSEUDO_STACK and MIXED_STACK modes require trace events.
+  return AllocationContextTracker::capture_mode() ==
+             AllocationContextTracker::CaptureMode::PSEUDO_STACK ||
+         AllocationContextTracker::capture_mode() ==
+             AllocationContextTracker::CaptureMode::MIXED_STACK;
+}
+
+inline AllocationContextTracker* GetThreadLocalTracker() {
+  return AllocationContextTracker::GetInstanceForCurrentThread();
+}
+
+}  // namespace
+
+// static
+const char HeapProfilerEventFilter::kName[] = "heap_profiler_predicate";
+
+HeapProfilerEventFilter::HeapProfilerEventFilter() = default;
+HeapProfilerEventFilter::~HeapProfilerEventFilter() = default;
+
+bool HeapProfilerEventFilter::FilterTraceEvent(
+    const TraceEvent& trace_event) const {
+  if (!IsPseudoStackEnabled())
+    return true;
+
+  // TODO(primiano): Add support for events with copied name crbug.com/581079.
+  if (trace_event.flags() & TRACE_EVENT_FLAG_COPY)
+    return true;
+
+  const auto* category = CategoryRegistry::GetCategoryByStatePtr(
+      trace_event.category_group_enabled());
+  AllocationContextTracker::PseudoStackFrame frame = {category->name(),
+                                                      trace_event.name()};
+  if (trace_event.phase() == TRACE_EVENT_PHASE_BEGIN ||
+      trace_event.phase() == TRACE_EVENT_PHASE_COMPLETE) {
+    GetThreadLocalTracker()->PushPseudoStackFrame(frame);
+  } else if (trace_event.phase() == TRACE_EVENT_PHASE_END) {
+    // The pop for |TRACE_EVENT_PHASE_COMPLETE| events is in |EndEvent|.
+    GetThreadLocalTracker()->PopPseudoStackFrame(frame);
+  }
+  // Do not filter-out any events and always return true. TraceLog adds the
+  // event only if it is enabled for recording.
+  return true;
+}
+
+void HeapProfilerEventFilter::EndEvent(const char* category_name,
+                                       const char* event_name) const {
+  if (IsPseudoStackEnabled())
+    GetThreadLocalTracker()->PopPseudoStackFrame({category_name, event_name});
+}
+
+}  // namespace trace_event
+}  // namespace base
diff --git a/base/trace_event/heap_profiler_event_filter.h b/base/trace_event/heap_profiler_event_filter.h
new file mode 100644
index 0000000..47368a1
--- /dev/null
+++ b/base/trace_event/heap_profiler_event_filter.h
@@ -0,0 +1,40 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TRACE_EVENT_HEAP_PROFILER_EVENT_FILTER_H_
+#define BASE_TRACE_EVENT_HEAP_PROFILER_EVENT_FILTER_H_
+
+#include "base/base_export.h"
+#include "base/macros.h"
+#include "base/trace_event/trace_event_filter.h"
+
+namespace base {
+namespace trace_event {
+
+class TraceEvent;
+
+// This filter unconditionally accepts all events and pushes/pops them from the
+// thread-local AllocationContextTracker instance as they are seen.
+// This is used to cheaply construct the heap profiler pseudo stack without
+// having to actually record all events.
+class BASE_EXPORT HeapProfilerEventFilter : public TraceEventFilter {
+ public:
+  static const char kName[];
+
+  HeapProfilerEventFilter();
+  ~HeapProfilerEventFilter() override;
+
+  // TraceEventFilter implementation.
+  bool FilterTraceEvent(const TraceEvent& trace_event) const override;
+  void EndEvent(const char* category_name,
+                const char* event_name) const override;
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(HeapProfilerEventFilter);
+};
+
+}  // namespace trace_event
+}  // namespace base
+
+#endif  // BASE_TRACE_EVENT_HEAP_PROFILER_EVENT_FILTER_H_
diff --git a/base/trace_event/heap_profiler_heap_dump_writer.cc b/base/trace_event/heap_profiler_heap_dump_writer.cc
new file mode 100644
index 0000000..71c3d97
--- /dev/null
+++ b/base/trace_event/heap_profiler_heap_dump_writer.cc
@@ -0,0 +1,323 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/trace_event/heap_profiler_heap_dump_writer.h"
+
+#include <stdint.h>
+
+#include <algorithm>
+#include <iterator>
+#include <tuple>
+#include <utility>
+#include <vector>
+
+#include "base/format_macros.h"
+#include "base/logging.h"
+#include "base/macros.h"
+#include "base/strings/stringprintf.h"
+#include "base/trace_event/heap_profiler_serialization_state.h"
+#include "base/trace_event/heap_profiler_stack_frame_deduplicator.h"
+#include "base/trace_event/heap_profiler_type_name_deduplicator.h"
+#include "base/trace_event/trace_config.h"
+#include "base/trace_event/trace_event.h"
+#include "base/trace_event/trace_event_argument.h"
+#include "base/trace_event/trace_log.h"
+
+// Most of what the |HeapDumpWriter| does is aggregating detailed information
+// about the heap and deciding what to dump. The Input to this process is a list
+// of |AllocationContext|s and size pairs.
+//
+// The pairs are grouped into |Bucket|s. A bucket is a group of (context, size)
+// pairs where the properties of the contexts share a prefix. (Type name is
+// considered a list of length one here.) First all pairs are put into one
+// bucket that represents the entire heap. Then this bucket is recursively
+// broken down into smaller buckets. Each bucket keeps track of whether further
+// breakdown is possible.
+
+namespace base {
+namespace trace_event {
+namespace internal {
+namespace {
+
+// Denotes a property of |AllocationContext| to break down by.
+enum class BreakDownMode { kByBacktrace, kByTypeName };
+
+// A group of bytes for which the context shares a prefix.
+struct Bucket {
+  Bucket()
+      : size(0),
+        count(0),
+        backtrace_cursor(0),
+        is_broken_down_by_type_name(false) {}
+
+  std::vector<std::pair<const AllocationContext*, AllocationMetrics>>
+      metrics_by_context;
+
+  // The sum of the sizes of |metrics_by_context|.
+  size_t size;
+
+  // The sum of number of allocations of |metrics_by_context|.
+  size_t count;
+
+  // The index of the stack frame that has not yet been broken down by. For all
+  // elements in this bucket, the stack frames 0 up to (but not including) the
+  // cursor, must be equal.
+  size_t backtrace_cursor;
+
+  // When true, the type name for all elements in this bucket must be equal.
+  bool is_broken_down_by_type_name;
+};
+
+// Comparison operator to order buckets by their size.
+bool operator<(const Bucket& lhs, const Bucket& rhs) {
+  return lhs.size < rhs.size;
+}
+
+// Groups the allocations in the bucket by |break_by|. The buckets in the
+// returned list will have |backtrace_cursor| advanced or
+// |is_broken_down_by_type_name| set depending on the property to group by.
+std::vector<Bucket> GetSubbuckets(const Bucket& bucket,
+                                  BreakDownMode break_by) {
+  std::unordered_map<const void*, Bucket> breakdown;
+
+  if (break_by == BreakDownMode::kByBacktrace) {
+    for (const auto& context_and_metrics : bucket.metrics_by_context) {
+      const Backtrace& backtrace = context_and_metrics.first->backtrace;
+      const StackFrame* begin = std::begin(backtrace.frames);
+      const StackFrame* end = begin + backtrace.frame_count;
+      const StackFrame* cursor = begin + bucket.backtrace_cursor;
+
+      DCHECK_LE(cursor, end);
+
+      if (cursor != end) {
+        Bucket& subbucket = breakdown[cursor->value];
+        subbucket.size += context_and_metrics.second.size;
+        subbucket.count += context_and_metrics.second.count;
+        subbucket.metrics_by_context.push_back(context_and_metrics);
+        subbucket.backtrace_cursor = bucket.backtrace_cursor + 1;
+        subbucket.is_broken_down_by_type_name =
+            bucket.is_broken_down_by_type_name;
+        DCHECK_GT(subbucket.size, 0u);
+        DCHECK_GT(subbucket.count, 0u);
+      }
+    }
+  } else if (break_by == BreakDownMode::kByTypeName) {
+    if (!bucket.is_broken_down_by_type_name) {
+      for (const auto& context_and_metrics : bucket.metrics_by_context) {
+        const AllocationContext* context = context_and_metrics.first;
+        Bucket& subbucket = breakdown[context->type_name];
+        subbucket.size += context_and_metrics.second.size;
+        subbucket.count += context_and_metrics.second.count;
+        subbucket.metrics_by_context.push_back(context_and_metrics);
+        subbucket.backtrace_cursor = bucket.backtrace_cursor;
+        subbucket.is_broken_down_by_type_name = true;
+        DCHECK_GT(subbucket.size, 0u);
+        DCHECK_GT(subbucket.count, 0u);
+      }
+    }
+  }
+
+  std::vector<Bucket> buckets;
+  buckets.reserve(breakdown.size());
+  for (auto key_bucket : breakdown)
+    buckets.push_back(key_bucket.second);
+
+  return buckets;
+}
+
+// Breaks down the bucket by |break_by|. Returns only buckets that contribute
+// more than |min_size_bytes| to the total size. The long tail is omitted.
+std::vector<Bucket> BreakDownBy(const Bucket& bucket,
+                                BreakDownMode break_by,
+                                size_t min_size_bytes) {
+  std::vector<Bucket> buckets = GetSubbuckets(bucket, break_by);
+
+  // Ensure that |buckets| is a max-heap (the data structure, not memory heap),
+  // so its front contains the largest bucket. Buckets should be iterated
+  // ordered by size, but sorting the vector is overkill because the long tail
+  // of small buckets will be discarded. By using a max-heap, the optimal case
+  // where all but the first bucket are discarded is O(n). The worst case where
+  // no bucket is discarded is doing a heap sort, which is O(n log n).
+  std::make_heap(buckets.begin(), buckets.end());
+
+  // Keep including buckets until adding one would increase the number of
+  // bytes accounted for by |min_size_bytes|. The large buckets end up in
+  // [it, end()), [begin(), it) is the part that contains the max-heap
+  // of small buckets.
+  std::vector<Bucket>::iterator it;
+  for (it = buckets.end(); it != buckets.begin(); --it) {
+    if (buckets.front().size < min_size_bytes)
+      break;
+
+    // Put the largest bucket in [begin, it) at |it - 1| and max-heapify
+    // [begin, it - 1). This puts the next largest bucket at |buckets.front()|.
+    std::pop_heap(buckets.begin(), it);
+  }
+
+  // At this point, |buckets| looks like this (numbers are bucket sizes):
+  //
+  // <-- max-heap of small buckets --->
+  //                                  <-- large buckets by ascending size -->
+  // [ 19 | 11 | 13 | 7 | 2 | 5 | ... | 83 | 89 | 97 ]
+  //   ^                                ^              ^
+  //   |                                |              |
+  //   begin()                          it             end()
+
+  // Discard the long tail of buckets that contribute less than a percent.
+  buckets.erase(buckets.begin(), it);
+
+  return buckets;
+}
+
+}  // namespace
+
+bool operator<(Entry lhs, Entry rhs) {
+  // There is no need to compare |size|. If the backtrace and type name are
+  // equal then the sizes must be equal as well.
+  return std::tie(lhs.stack_frame_id, lhs.type_id) <
+         std::tie(rhs.stack_frame_id, rhs.type_id);
+}
+
+HeapDumpWriter::HeapDumpWriter(StackFrameDeduplicator* stack_frame_deduplicator,
+                               TypeNameDeduplicator* type_name_deduplicator,
+                               uint32_t breakdown_threshold_bytes)
+    : stack_frame_deduplicator_(stack_frame_deduplicator),
+      type_name_deduplicator_(type_name_deduplicator),
+      breakdown_threshold_bytes_(breakdown_threshold_bytes) {}
+
+HeapDumpWriter::~HeapDumpWriter() = default;
+
+bool HeapDumpWriter::AddEntryForBucket(const Bucket& bucket) {
+  // The contexts in the bucket are all different, but the [begin, cursor) range
+  // is equal for all contexts in the bucket, and the type names are the same if
+  // |is_broken_down_by_type_name| is set.
+  DCHECK(!bucket.metrics_by_context.empty());
+
+  const AllocationContext* context = bucket.metrics_by_context.front().first;
+
+  const StackFrame* backtrace_begin = std::begin(context->backtrace.frames);
+  const StackFrame* backtrace_end = backtrace_begin + bucket.backtrace_cursor;
+  DCHECK_LE(bucket.backtrace_cursor, arraysize(context->backtrace.frames));
+
+  Entry entry;
+  entry.stack_frame_id =
+      stack_frame_deduplicator_->Insert(backtrace_begin, backtrace_end);
+
+  // Deduplicate the type name, or use ID -1 if type name is not set.
+  entry.type_id = bucket.is_broken_down_by_type_name
+                      ? type_name_deduplicator_->Insert(context->type_name)
+                      : -1;
+
+  entry.size = bucket.size;
+  entry.count = bucket.count;
+
+  auto position_and_inserted = entries_.insert(entry);
+  return position_and_inserted.second;
+}
+
+void HeapDumpWriter::BreakDown(const Bucket& bucket) {
+  auto by_backtrace = BreakDownBy(bucket, BreakDownMode::kByBacktrace,
+                                  breakdown_threshold_bytes_);
+  auto by_type_name = BreakDownBy(bucket, BreakDownMode::kByTypeName,
+                                  breakdown_threshold_bytes_);
+
+  // Insert entries for the buckets. If a bucket was not present before, it has
+  // not been broken down before, so recursively continue breaking down in that
+  // case. There might be multiple routes to the same entry (first break down
+  // by type name, then by backtrace, or first by backtrace and then by type),
+  // so a set is used to avoid dumping and breaking down entries more than once.
+
+  for (const Bucket& subbucket : by_backtrace)
+    if (AddEntryForBucket(subbucket))
+      BreakDown(subbucket);
+
+  for (const Bucket& subbucket : by_type_name)
+    if (AddEntryForBucket(subbucket))
+      BreakDown(subbucket);
+}
+
+const std::set<Entry>& HeapDumpWriter::Summarize(
+    const std::unordered_map<AllocationContext, AllocationMetrics>&
+        metrics_by_context) {
+  // Start with one bucket that represents the entire heap. Iterate by
+  // reference, because the allocation contexts are going to point to allocation
+  // contexts stored in |metrics_by_context|.
+  Bucket root_bucket;
+  for (const auto& context_and_metrics : metrics_by_context) {
+    DCHECK_GT(context_and_metrics.second.size, 0u);
+    DCHECK_GT(context_and_metrics.second.count, 0u);
+    const AllocationContext* context = &context_and_metrics.first;
+    root_bucket.metrics_by_context.push_back(
+        std::make_pair(context, context_and_metrics.second));
+    root_bucket.size += context_and_metrics.second.size;
+    root_bucket.count += context_and_metrics.second.count;
+  }
+
+  AddEntryForBucket(root_bucket);
+
+  // Recursively break down the heap and fill |entries_| with entries to dump.
+  BreakDown(root_bucket);
+
+  return entries_;
+}
+
+std::unique_ptr<TracedValue> Serialize(const std::set<Entry>& entries) {
+  std::string buffer;
+  std::unique_ptr<TracedValue> traced_value(new TracedValue);
+
+  traced_value->BeginArray("entries");
+
+  for (const Entry& entry : entries) {
+    traced_value->BeginDictionary();
+
+    // Format size as hexadecimal string into |buffer|.
+    SStringPrintf(&buffer, "%" PRIx64, static_cast<uint64_t>(entry.size));
+    traced_value->SetString("size", buffer);
+
+    SStringPrintf(&buffer, "%" PRIx64, static_cast<uint64_t>(entry.count));
+    traced_value->SetString("count", buffer);
+
+    if (entry.stack_frame_id == -1) {
+      // An empty backtrace (which will have ID -1) is represented by the empty
+      // string, because there is no leaf frame to reference in |stackFrames|.
+      traced_value->SetString("bt", "");
+    } else {
+      // Format index of the leaf frame as a string, because |stackFrames| is a
+      // dictionary, not an array.
+      SStringPrintf(&buffer, "%i", entry.stack_frame_id);
+      traced_value->SetString("bt", buffer);
+    }
+
+    // Type ID -1 (cumulative size for all types) is represented by the absence
+    // of the "type" key in the dictionary.
+    if (entry.type_id != -1) {
+      // Format the type ID as a string.
+      SStringPrintf(&buffer, "%i", entry.type_id);
+      traced_value->SetString("type", buffer);
+    }
+
+    traced_value->EndDictionary();
+  }
+
+  traced_value->EndArray();  // "entries"
+  return traced_value;
+}
+
+}  // namespace internal
+
+std::unique_ptr<TracedValue> ExportHeapDump(
+    const std::unordered_map<AllocationContext, AllocationMetrics>&
+        metrics_by_context,
+    const HeapProfilerSerializationState& heap_profiler_serialization_state) {
+  TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("memory-infra"), "ExportHeapDump");
+  internal::HeapDumpWriter writer(
+      heap_profiler_serialization_state.stack_frame_deduplicator(),
+      heap_profiler_serialization_state.type_name_deduplicator(),
+      heap_profiler_serialization_state
+          .heap_profiler_breakdown_threshold_bytes());
+  return Serialize(writer.Summarize(metrics_by_context));
+}
+
+}  // namespace trace_event
+}  // namespace base
diff --git a/base/trace_event/heap_profiler_heap_dump_writer.h b/base/trace_event/heap_profiler_heap_dump_writer.h
new file mode 100644
index 0000000..3366c28
--- /dev/null
+++ b/base/trace_event/heap_profiler_heap_dump_writer.h
@@ -0,0 +1,115 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TRACE_EVENT_HEAP_PROFILER_HEAP_DUMP_WRITER_H_
+#define BASE_TRACE_EVENT_HEAP_PROFILER_HEAP_DUMP_WRITER_H_
+
+#include <stddef.h>
+
+#include <memory>
+#include <set>
+#include <unordered_map>
+
+#include "base/base_export.h"
+#include "base/macros.h"
+#include "base/trace_event/heap_profiler_allocation_context.h"
+
+namespace base {
+namespace trace_event {
+
+class HeapProfilerSerializationState;
+class StackFrameDeduplicator;
+class TracedValue;
+class TypeNameDeduplicator;
+
+// Aggregates |metrics_by_context|, recursively breaks down the heap, and
+// returns a traced value with an "entries" array that can be dumped in the
+// trace log, following the format described in https://goo.gl/KY7zVE. The
+// number of entries is kept reasonable because long tails are not included.
+BASE_EXPORT std::unique_ptr<TracedValue> ExportHeapDump(
+    const std::unordered_map<AllocationContext, AllocationMetrics>&
+        metrics_by_context,
+    const HeapProfilerSerializationState& heap_profiler_serialization_state);
+
+namespace internal {
+
+namespace {
+struct Bucket;
+}
+
+// An entry in the "entries" array as described in https://goo.gl/KY7zVE.
+struct BASE_EXPORT Entry {
+  size_t size;
+  size_t count;
+
+  // References a backtrace in the stack frame deduplicator. -1 means empty
+  // backtrace (the root of the tree).
+  int stack_frame_id;
+
+  // References a type name in the type name deduplicator. -1 indicates that
+  // the size is the cumulative size for all types (the root of the tree).
+  int type_id;
+};
+
+// Comparison operator to enable putting |Entry| in a |std::set|.
+BASE_EXPORT bool operator<(Entry lhs, Entry rhs);
+
+// Serializes entries to an "entries" array in a traced value.
+BASE_EXPORT std::unique_ptr<TracedValue> Serialize(const std::set<Entry>& dump);
+
+// Helper class to dump a snapshot of an |AllocationRegister| or other heap
+// bookkeeping structure into a |TracedValue|. This class is intended to be
+// used as a one-shot local instance on the stack.
+class BASE_EXPORT HeapDumpWriter {
+ public:
+  // The |stack_frame_deduplicator| and |type_name_deduplicator| are not owned.
+  // The heap dump writer assumes exclusive access to them during the lifetime
+  // of the dump writer. The heap dumps are broken down for allocations bigger
+  // than |breakdown_threshold_bytes|.
+  HeapDumpWriter(StackFrameDeduplicator* stack_frame_deduplicator,
+                 TypeNameDeduplicator* type_name_deduplicator,
+                 uint32_t breakdown_threshold_bytes);
+
+  ~HeapDumpWriter();
+
+  // Aggregates allocations to compute the total size of the heap, then breaks
+  // down the heap recursively. This produces the values that should be dumped
+  // in the "entries" array. The number of entries is kept reasonable because
+  // long tails are not included. Use |Serialize| to convert to a traced value.
+  const std::set<Entry>& Summarize(
+      const std::unordered_map<AllocationContext, AllocationMetrics>&
+          metrics_by_context);
+
+ private:
+  // Inserts an |Entry| for |Bucket| into |entries_|. Returns false if the
+  // entry was present before, true if it was not.
+  bool AddEntryForBucket(const Bucket& bucket);
+
+  // Recursively breaks down a bucket into smaller buckets and adds entries for
+  // the buckets worth dumping to |entries_|.
+  void BreakDown(const Bucket& bucket);
+
+  // The collection of entries that is filled by |Summarize|.
+  std::set<Entry> entries_;
+
+  // Helper for generating the |stackFrames| dictionary. Not owned, must outlive
+  // this heap dump writer instance.
+  StackFrameDeduplicator* const stack_frame_deduplicator_;
+
+  // Helper for converting type names to IDs. Not owned, must outlive this heap
+  // dump writer instance.
+  TypeNameDeduplicator* const type_name_deduplicator_;
+
+  // Minimum size of an allocation for which an allocation bucket will be
+  // broken down with children.
+  uint32_t breakdown_threshold_bytes_;
+
+  DISALLOW_COPY_AND_ASSIGN(HeapDumpWriter);
+};
+
+}  // namespace internal
+}  // namespace trace_event
+}  // namespace base
+
+#endif  // BASE_TRACE_EVENT_HEAP_PROFILER_HEAP_DUMP_WRITER_H_
diff --git a/base/trace_event/heap_profiler_heap_dump_writer_unittest.cc b/base/trace_event/heap_profiler_heap_dump_writer_unittest.cc
new file mode 100644
index 0000000..93e8fee
--- /dev/null
+++ b/base/trace_event/heap_profiler_heap_dump_writer_unittest.cc
@@ -0,0 +1,330 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/trace_event/heap_profiler_heap_dump_writer.h"
+
+#include <stddef.h>
+
+#include <memory>
+#include <set>
+#include <string>
+
+#include "base/json/json_reader.h"
+#include "base/macros.h"
+#include "base/memory/ptr_util.h"
+#include "base/trace_event/heap_profiler_allocation_context.h"
+#include "base/trace_event/heap_profiler_stack_frame_deduplicator.h"
+#include "base/trace_event/heap_profiler_type_name_deduplicator.h"
+#include "base/trace_event/trace_event_argument.h"
+#include "base/values.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace {
+
+using base::trace_event::StackFrame;
+
+// Define all strings once, because the deduplicator requires pointer equality,
+// and string interning is unreliable.
+StackFrame kBrowserMain = StackFrame::FromTraceEventName("BrowserMain");
+StackFrame kRendererMain = StackFrame::FromTraceEventName("RendererMain");
+StackFrame kCreateWidget = StackFrame::FromTraceEventName("CreateWidget");
+StackFrame kInitialize = StackFrame::FromTraceEventName("Initialize");
+StackFrame kGetBitmap = StackFrame::FromTraceEventName("GetBitmap");
+
+const char kInt[] = "int";
+const char kBool[] = "bool";
+const char kString[] = "string";
+
+}  // namespace
+
+namespace base {
+namespace trace_event {
+namespace internal {
+
+std::unique_ptr<const Value> WriteAndReadBack(const std::set<Entry>& entries) {
+  std::unique_ptr<TracedValue> traced_value = Serialize(entries);
+  std::string json;
+  traced_value->AppendAsTraceFormat(&json);
+  return JSONReader::Read(json);
+}
+
+std::unique_ptr<const DictionaryValue> WriteAndReadBackEntry(Entry entry) {
+  std::set<Entry> input_entries;
+  input_entries.insert(entry);
+
+  std::unique_ptr<const Value> json_dict = WriteAndReadBack(input_entries);
+
+  // Note: Ideally these should use |ASSERT_TRUE| instead of |EXPECT_TRUE|, but
+  // |ASSERT_TRUE| can only be used in void functions.
+  const DictionaryValue* dictionary;
+  EXPECT_TRUE(json_dict->GetAsDictionary(&dictionary));
+
+  const ListValue* json_entries;
+  EXPECT_TRUE(dictionary->GetList("entries", &json_entries));
+
+  const DictionaryValue* json_entry;
+  EXPECT_TRUE(json_entries->GetDictionary(0, &json_entry));
+
+  return json_entry->CreateDeepCopy();
+}
+
+// Given a desired stack frame ID and type ID, looks up the entry in the set and
+// asserts that it is present and has the expected size and count.
+void AssertSizeAndCountEq(const std::set<Entry>& entries,
+                          int stack_frame_id,
+                          int type_id,
+                          const AllocationMetrics& expected) {
+  // The comparison operator for |Entry| does not take size into account, so by
+  // setting only stack frame ID and type ID, the real entry can be found.
+  Entry entry;
+  entry.stack_frame_id = stack_frame_id;
+  entry.type_id = type_id;
+  auto it = entries.find(entry);
+
+  ASSERT_NE(entries.end(), it) << "No entry found for sf = " << stack_frame_id
+                               << ", type = " << type_id << ".";
+  ASSERT_EQ(expected.size, it->size) << "Wrong size for sf = " << stack_frame_id
+                                     << ", type = " << type_id << ".";
+  ASSERT_EQ(expected.count, it->count)
+      << "Wrong count for sf = " << stack_frame_id << ", type = " << type_id
+      << ".";
+}
+
+// Given a desired stack frame ID and type ID, asserts that no entry was dumped
+// for that that particular combination of stack frame and type.
+void AssertNotDumped(const std::set<Entry>& entries,
+                     int stack_frame_id,
+                     int type_id) {
+  // The comparison operator for |Entry| does not take size into account, so by
+  // setting only stack frame ID and type ID, the real entry can be found.
+  Entry entry;
+  entry.stack_frame_id = stack_frame_id;
+  entry.type_id = type_id;
+  auto it = entries.find(entry);
+  ASSERT_EQ(entries.end(), it)
+      << "Entry should not be present for sf = " << stack_frame_id
+      << ", type = " << type_id << ".";
+}
+
+TEST(HeapDumpWriterTest, BacktraceIndex) {
+  Entry entry;
+  entry.stack_frame_id = -1;  // -1 means empty backtrace.
+  entry.type_id = 0;
+  entry.size = 1;
+  entry.count = 1;
+
+  std::unique_ptr<const DictionaryValue> json_entry =
+      WriteAndReadBackEntry(entry);
+
+  // For an empty backtrace, the "bt" key cannot reference a stack frame.
+  // Instead it should be set to the empty string.
+  std::string backtrace_index;
+  ASSERT_TRUE(json_entry->GetString("bt", &backtrace_index));
+  ASSERT_EQ("", backtrace_index);
+
+  // Also verify that a non-negative backtrace index is dumped properly.
+  entry.stack_frame_id = 2;
+  json_entry = WriteAndReadBackEntry(entry);
+  ASSERT_TRUE(json_entry->GetString("bt", &backtrace_index));
+  ASSERT_EQ("2", backtrace_index);
+}
+
+TEST(HeapDumpWriterTest, TypeId) {
+  Entry entry;
+  entry.type_id = -1;  // -1 means sum over all types.
+  entry.stack_frame_id = 0;
+  entry.size = 1;
+  entry.count = 1;
+
+  std::unique_ptr<const DictionaryValue> json_entry =
+      WriteAndReadBackEntry(entry);
+
+  // Entries for the cumulative size of all types should not have the "type"
+  // key set.
+  ASSERT_FALSE(json_entry->HasKey("type"));
+
+  // Also verify that a non-negative type ID is dumped properly.
+  entry.type_id = 2;
+  json_entry = WriteAndReadBackEntry(entry);
+  std::string type_id;
+  ASSERT_TRUE(json_entry->GetString("type", &type_id));
+  ASSERT_EQ("2", type_id);
+}
+
+TEST(HeapDumpWriterTest, SizeAndCountAreHexadecimal) {
+  // Take a number between 2^63 and 2^64 (or between 2^31 and 2^32 if |size_t|
+  // is not 64 bits).
+  const size_t large_value =
+      sizeof(size_t) == 8 ? 0xffffffffffffffc5 : 0xffffff9d;
+  const char* large_value_str =
+      sizeof(size_t) == 8 ? "ffffffffffffffc5" : "ffffff9d";
+  Entry entry;
+  entry.type_id = 0;
+  entry.stack_frame_id = 0;
+  entry.size = large_value;
+  entry.count = large_value;
+
+  std::unique_ptr<const DictionaryValue> json_entry =
+      WriteAndReadBackEntry(entry);
+
+  std::string size;
+  ASSERT_TRUE(json_entry->GetString("size", &size));
+  ASSERT_EQ(large_value_str, size);
+
+  std::string count;
+  ASSERT_TRUE(json_entry->GetString("count", &count));
+  ASSERT_EQ(large_value_str, count);
+}
+
+TEST(HeapDumpWriterTest, BacktraceTypeNameTable) {
+  std::unordered_map<AllocationContext, AllocationMetrics> metrics_by_context;
+
+  AllocationContext ctx;
+  ctx.backtrace.frames[0] = kBrowserMain;
+  ctx.backtrace.frames[1] = kCreateWidget;
+  ctx.backtrace.frame_count = 2;
+  ctx.type_name = kInt;
+
+  // 10 bytes with context { type: int, bt: [BrowserMain, CreateWidget] }.
+  metrics_by_context[ctx] = {10, 5};
+
+  ctx.type_name = kBool;
+
+  // 18 bytes with context { type: bool, bt: [BrowserMain, CreateWidget] }.
+  metrics_by_context[ctx] = {18, 18};
+
+  ctx.backtrace.frames[0] = kRendererMain;
+  ctx.backtrace.frames[1] = kInitialize;
+  ctx.backtrace.frame_count = 2;
+
+  // 30 bytes with context { type: bool, bt: [RendererMain, Initialize] }.
+  metrics_by_context[ctx] = {30, 30};
+
+  ctx.type_name = kString;
+
+  // 19 bytes with context { type: string, bt: [RendererMain, Initialize] }.
+  metrics_by_context[ctx] = {19, 4};
+
+  // At this point the heap looks like this:
+  //
+  // |        | CrWidget <- BrMain | Init <- RenMain |     Sum     |
+  // +--------+--------------------+-----------------+-------------+
+  // |        |       size   count |   size    count | size  count |
+  // | int    |         10       5 |      0        0 |   10      5 |
+  // | bool   |         18      18 |     30       30 |   48     48 |
+  // | string |          0       0 |     19        4 |   19      4 |
+  // +--------+--------------------+-----------------+-------------+
+  // | Sum    |         28      23 |     49      34  |   77     57 |
+
+  auto stack_frame_deduplicator = WrapUnique(new StackFrameDeduplicator);
+  auto type_name_deduplicator = WrapUnique(new TypeNameDeduplicator);
+  HeapDumpWriter writer(stack_frame_deduplicator.get(),
+                        type_name_deduplicator.get(), 10u);
+  const std::set<Entry>& dump = writer.Summarize(metrics_by_context);
+
+  // Get the indices of the backtraces and types by adding them again to the
+  // deduplicator. Because they were added before, the same number is returned.
+  StackFrame bt0[] = {kRendererMain, kInitialize};
+  StackFrame bt1[] = {kBrowserMain, kCreateWidget};
+  int bt_renderer_main = stack_frame_deduplicator->Insert(bt0, bt0 + 1);
+  int bt_browser_main = stack_frame_deduplicator->Insert(bt1, bt1 + 1);
+  int bt_renderer_main_initialize =
+      stack_frame_deduplicator->Insert(bt0, bt0 + 2);
+  int bt_browser_main_create_widget =
+      stack_frame_deduplicator->Insert(bt1, bt1 + 2);
+  int type_id_int = type_name_deduplicator->Insert(kInt);
+  int type_id_bool = type_name_deduplicator->Insert(kBool);
+  int type_id_string = type_name_deduplicator->Insert(kString);
+
+  // Full heap should have size 77.
+  AssertSizeAndCountEq(dump, -1, -1, {77, 57});
+
+  // 49 bytes in 34 chunks were allocated in RendererMain and children. Also
+  // check the type breakdown.
+  AssertSizeAndCountEq(dump, bt_renderer_main, -1, {49, 34});
+  AssertSizeAndCountEq(dump, bt_renderer_main, type_id_bool, {30, 30});
+  AssertSizeAndCountEq(dump, bt_renderer_main, type_id_string, {19, 4});
+
+  // 28 bytes in 23 chunks were allocated in BrowserMain and children. Also
+  // check the type breakdown.
+  AssertSizeAndCountEq(dump, bt_browser_main, -1, {28, 23});
+  AssertSizeAndCountEq(dump, bt_browser_main, type_id_int, {10, 5});
+  AssertSizeAndCountEq(dump, bt_browser_main, type_id_bool, {18, 18});
+
+  // In this test all bytes are allocated in leaf nodes, so check again one
+  // level deeper.
+  AssertSizeAndCountEq(dump, bt_renderer_main_initialize, -1, {49, 34});
+  AssertSizeAndCountEq(dump, bt_renderer_main_initialize, type_id_bool,
+                       {30, 30});
+  AssertSizeAndCountEq(dump, bt_renderer_main_initialize, type_id_string,
+                       {19, 4});
+  AssertSizeAndCountEq(dump, bt_browser_main_create_widget, -1, {28, 23});
+  AssertSizeAndCountEq(dump, bt_browser_main_create_widget, type_id_int,
+                       {10, 5});
+  AssertSizeAndCountEq(dump, bt_browser_main_create_widget, type_id_bool,
+                       {18, 18});
+
+  // The type breakdown of the entrie heap should have been dumped as well.
+  AssertSizeAndCountEq(dump, -1, type_id_int, {10, 5});
+  AssertSizeAndCountEq(dump, -1, type_id_bool, {48, 48});
+  AssertSizeAndCountEq(dump, -1, type_id_string, {19, 4});
+}
+
+TEST(HeapDumpWriterTest, InsignificantValuesNotDumped) {
+  std::unordered_map<AllocationContext, AllocationMetrics> metrics_by_context;
+
+  AllocationContext ctx;
+  ctx.backtrace.frames[0] = kBrowserMain;
+  ctx.backtrace.frames[1] = kCreateWidget;
+  ctx.backtrace.frame_count = 2;
+
+  // 0.5 KiB and 1 chunk in BrowserMain -> CreateWidget itself.
+  metrics_by_context[ctx] = {512, 1};
+
+  // 1 MiB and 1 chunk in BrowserMain -> CreateWidget -> GetBitmap.
+  ctx.backtrace.frames[2] = kGetBitmap;
+  ctx.backtrace.frame_count = 3;
+  metrics_by_context[ctx] = {1024 * 1024, 1};
+
+  // 400B and 1 chunk in BrowserMain -> CreateWidget -> Initialize.
+  ctx.backtrace.frames[2] = kInitialize;
+  ctx.backtrace.frame_count = 3;
+  metrics_by_context[ctx] = {400, 1};
+
+  auto stack_frame_deduplicator = WrapUnique(new StackFrameDeduplicator);
+  auto type_name_deduplicator = WrapUnique(new TypeNameDeduplicator);
+  HeapDumpWriter writer(stack_frame_deduplicator.get(),
+                        type_name_deduplicator.get(), 512u);
+  const std::set<Entry>& dump = writer.Summarize(metrics_by_context);
+
+  // Get the indices of the backtraces and types by adding them again to the
+  // deduplicator. Because they were added before, the same number is returned.
+  StackFrame bt0[] = {kBrowserMain, kCreateWidget, kGetBitmap};
+  StackFrame bt1[] = {kBrowserMain, kCreateWidget, kInitialize};
+  int bt_browser_main = stack_frame_deduplicator->Insert(bt0, bt0 + 1);
+  int bt_create_widget = stack_frame_deduplicator->Insert(bt0, bt0 + 2);
+  int bt_get_bitmap = stack_frame_deduplicator->Insert(bt0, bt0 + 3);
+  int bt_initialize = stack_frame_deduplicator->Insert(bt1, bt1 + 3);
+
+  // Full heap should have size of 1 MiB + .9 KiB and 3 chunks.
+  AssertSizeAndCountEq(dump, -1, -1 /* No type specified */,
+                       {1024 * 1024 + 512 + 400, 3});
+
+  // |GetBitmap| allocated 1 MiB and 1 chunk.
+  AssertSizeAndCountEq(dump, bt_get_bitmap, -1, {1024 * 1024, 1});
+
+  // Because |GetBitmap| was dumped, all of its parent nodes should have been
+  // dumped too. |CreateWidget| has 1 MiB in |GetBitmap|, 400 bytes in
+  // |Initialize|, and 512 bytes of its own and each in 1 chunk.
+  AssertSizeAndCountEq(dump, bt_create_widget, -1,
+                       {1024 * 1024 + 400 + 512, 3});
+  AssertSizeAndCountEq(dump, bt_browser_main, -1, {1024 * 1024 + 400 + 512, 3});
+
+  // Initialize was not significant, it should not have been dumped.
+  AssertNotDumped(dump, bt_initialize, -1);
+}
+
+}  // namespace internal
+}  // namespace trace_event
+}  // namespace base
diff --git a/base/trace_event/heap_profiler_serialization_state.cc b/base/trace_event/heap_profiler_serialization_state.cc
new file mode 100644
index 0000000..b1866e7
--- /dev/null
+++ b/base/trace_event/heap_profiler_serialization_state.cc
@@ -0,0 +1,27 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/trace_event/heap_profiler_serialization_state.h"
+
+namespace base {
+namespace trace_event {
+
+HeapProfilerSerializationState::HeapProfilerSerializationState()
+    : heap_profiler_breakdown_threshold_bytes_(0) {}
+HeapProfilerSerializationState::~HeapProfilerSerializationState() = default;
+
+void HeapProfilerSerializationState::SetStackFrameDeduplicator(
+    std::unique_ptr<StackFrameDeduplicator> stack_frame_deduplicator) {
+  DCHECK(!stack_frame_deduplicator_);
+  stack_frame_deduplicator_ = std::move(stack_frame_deduplicator);
+}
+
+void HeapProfilerSerializationState::SetTypeNameDeduplicator(
+    std::unique_ptr<TypeNameDeduplicator> type_name_deduplicator) {
+  DCHECK(!type_name_deduplicator_);
+  type_name_deduplicator_ = std::move(type_name_deduplicator);
+}
+
+}  // namespace trace_event
+}  // namespace base
diff --git a/base/trace_event/heap_profiler_serialization_state.h b/base/trace_event/heap_profiler_serialization_state.h
new file mode 100644
index 0000000..53c5687
--- /dev/null
+++ b/base/trace_event/heap_profiler_serialization_state.h
@@ -0,0 +1,80 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TRACE_EVENT_HEAP_PROFILER_SERIALIZATION_STATE_H_
+#define BASE_TRACE_EVENT_HEAP_PROFILER_SERIALIZATION_STATE_H_
+
+#include <memory>
+#include <set>
+
+#include "base/base_export.h"
+#include "base/trace_event/heap_profiler_stack_frame_deduplicator.h"
+#include "base/trace_event/heap_profiler_type_name_deduplicator.h"
+#include "base/trace_event/memory_dump_request_args.h"
+
+namespace base {
+namespace trace_event {
+
+// Container for state variables that should be shared across all the memory
+// dumps in a tracing session.
+class BASE_EXPORT HeapProfilerSerializationState
+    : public RefCountedThreadSafe<HeapProfilerSerializationState> {
+ public:
+  HeapProfilerSerializationState();
+
+  // Returns the stack frame deduplicator that should be used by memory dump
+  // providers when doing a heap dump.
+  StackFrameDeduplicator* stack_frame_deduplicator() const {
+    return stack_frame_deduplicator_.get();
+  }
+
+  void SetStackFrameDeduplicator(
+      std::unique_ptr<StackFrameDeduplicator> stack_frame_deduplicator);
+
+  // Returns the type name deduplicator that should be used by memory dump
+  // providers when doing a heap dump.
+  TypeNameDeduplicator* type_name_deduplicator() const {
+    return type_name_deduplicator_.get();
+  }
+
+  void SetTypeNameDeduplicator(
+      std::unique_ptr<TypeNameDeduplicator> type_name_deduplicator);
+
+  void SetAllowedDumpModes(
+      std::set<MemoryDumpLevelOfDetail> allowed_dump_modes);
+
+  bool IsDumpModeAllowed(MemoryDumpLevelOfDetail dump_mode) const;
+
+  void set_heap_profiler_breakdown_threshold_bytes(uint32_t value) {
+    heap_profiler_breakdown_threshold_bytes_ = value;
+  }
+
+  uint32_t heap_profiler_breakdown_threshold_bytes() const {
+    return heap_profiler_breakdown_threshold_bytes_;
+  }
+
+  bool is_initialized() const {
+    return stack_frame_deduplicator_ && type_name_deduplicator_ &&
+           heap_profiler_breakdown_threshold_bytes_;
+  }
+
+ private:
+  friend class RefCountedThreadSafe<HeapProfilerSerializationState>;
+  ~HeapProfilerSerializationState();
+
+  // Deduplicates backtraces in heap dumps so they can be written once when the
+  // trace is finalized.
+  std::unique_ptr<StackFrameDeduplicator> stack_frame_deduplicator_;
+
+  // Deduplicates type names in heap dumps so they can be written once when the
+  // trace is finalized.
+  std::unique_ptr<TypeNameDeduplicator> type_name_deduplicator_;
+
+  uint32_t heap_profiler_breakdown_threshold_bytes_;
+};
+
+}  // namespace trace_event
+}  // namespace base
+
+#endif  // BASE_TRACE_EVENT_HEAP_PROFILER_SERIALIZATION_STATE_H
diff --git a/base/trace_event/heap_profiler_stack_frame_deduplicator.cc b/base/trace_event/heap_profiler_stack_frame_deduplicator.cc
new file mode 100644
index 0000000..c05cd0a
--- /dev/null
+++ b/base/trace_event/heap_profiler_stack_frame_deduplicator.cc
@@ -0,0 +1,195 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/trace_event/heap_profiler_stack_frame_deduplicator.h"
+
+#include <inttypes.h>
+#include <stddef.h>
+
+#include <algorithm>
+#include <string>
+#include <utility>
+
+#include "base/hash.h"
+#include "base/strings/stringprintf.h"
+#include "base/trace_event/memory_usage_estimator.h"
+#include "base/trace_event/trace_event.h"
+#include "base/trace_event/trace_event_argument.h"
+#include "base/trace_event/trace_event_memory_overhead.h"
+
+namespace base {
+namespace trace_event {
+
+namespace {
+
+// Dumb hash function that nevertheless works surprisingly well and
+// produces ~0 collisions on real backtraces.
+size_t HashBacktrace(const StackFrame* begin, const StackFrame* end) {
+  size_t hash = 0;
+  for (; begin != end; begin++) {
+    hash += reinterpret_cast<uintptr_t>(begin->value);
+  }
+  return hash;
+}
+
+}  // namespace
+
+StackFrameDeduplicator::FrameNode::FrameNode(StackFrame frame,
+                                             int parent_frame_index)
+    : frame(frame), parent_frame_index(parent_frame_index) {}
+StackFrameDeduplicator::FrameNode::FrameNode(const FrameNode& other) = default;
+StackFrameDeduplicator::FrameNode::~FrameNode() = default;
+
+size_t StackFrameDeduplicator::FrameNode::EstimateMemoryUsage() const {
+  return base::trace_event::EstimateMemoryUsage(children);
+}
+
+StackFrameDeduplicator::StackFrameDeduplicator() = default;
+StackFrameDeduplicator::~StackFrameDeduplicator() = default;
+
+bool StackFrameDeduplicator::Match(int frame_index,
+                                   const StackFrame* begin_frame,
+                                   const StackFrame* end_frame) const {
+  // |frame_index| identifies the bottom frame, i.e. we need to walk
+  // backtrace backwards.
+  const StackFrame* current_frame = end_frame - 1;
+  for (; current_frame >= begin_frame; --current_frame) {
+    const FrameNode& node = frames_[frame_index];
+    if (node.frame != *current_frame) {
+      break;
+    }
+
+    frame_index = node.parent_frame_index;
+    if (frame_index == FrameNode::kInvalidFrameIndex) {
+      if (current_frame == begin_frame) {
+        // We're at the top node and we matched all backtrace frames,
+        // i.e. we successfully matched the backtrace.
+        return true;
+      }
+      break;
+    }
+  }
+
+  return false;
+}
+
+int StackFrameDeduplicator::Insert(const StackFrame* begin_frame,
+                                   const StackFrame* end_frame) {
+  if (begin_frame == end_frame) {
+    return FrameNode::kInvalidFrameIndex;
+  }
+
+  size_t backtrace_hash = HashBacktrace(begin_frame, end_frame);
+
+  // Check if we know about this backtrace.
+  auto backtrace_it = backtrace_lookup_table_.find(backtrace_hash);
+  if (backtrace_it != backtrace_lookup_table_.end()) {
+    int backtrace_index = backtrace_it->second;
+    if (Match(backtrace_index, begin_frame, end_frame)) {
+      return backtrace_index;
+    }
+  }
+
+  int frame_index = FrameNode::kInvalidFrameIndex;
+  base::flat_map<StackFrame, int>* nodes = &roots_;
+
+  // Loop through the frames, early out when a frame is null.
+  for (const StackFrame* it = begin_frame; it != end_frame; it++) {
+    StackFrame frame = *it;
+
+    auto node = nodes->find(frame);
+    if (node == nodes->end()) {
+      // There is no tree node for this frame yet, create it. The parent node
+      // is the node associated with the previous frame.
+      FrameNode frame_node(frame, frame_index);
+
+      // The new frame node will be appended, so its index is the current size
+      // of the vector.
+      frame_index = static_cast<int>(frames_.size());
+
+      // Add the node to the trie so it will be found next time.
+      nodes->insert(std::make_pair(frame, frame_index));
+
+      // Append the node after modifying |nodes|, because the |frames_| vector
+      // might need to resize, and this invalidates the |nodes| pointer.
+      frames_.push_back(frame_node);
+    } else {
+      // A tree node for this frame exists. Look for the next one.
+      frame_index = node->second;
+    }
+
+    nodes = &frames_[frame_index].children;
+  }
+
+  // Remember the backtrace.
+  backtrace_lookup_table_[backtrace_hash] = frame_index;
+
+  return frame_index;
+}
+
+void StackFrameDeduplicator::AppendAsTraceFormat(std::string* out) const {
+  TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("memory-infra"),
+               "StackFrameDeduplicator::AppendAsTraceFormat");
+  out->append("{");  // Begin the |stackFrames| dictionary.
+
+  int i = 0;
+  auto frame_node = begin();
+  auto it_end = end();
+  std::string stringify_buffer;
+
+  while (frame_node != it_end) {
+    // The |stackFrames| format is a dictionary, not an array, so the
+    // keys are stringified indices. Write the index manually, then use
+    // |TracedValue| to format the object. This is to avoid building the
+    // entire dictionary as a |TracedValue| in memory.
+    SStringPrintf(&stringify_buffer, "\"%d\":", i);
+    out->append(stringify_buffer);
+
+    std::unique_ptr<TracedValue> frame_node_value(new TracedValue);
+    const StackFrame& frame = frame_node->frame;
+    switch (frame.type) {
+      case StackFrame::Type::TRACE_EVENT_NAME:
+        frame_node_value->SetString("name",
+                                    static_cast<const char*>(frame.value));
+        break;
+      case StackFrame::Type::THREAD_NAME:
+        SStringPrintf(&stringify_buffer,
+                      "[Thread: %s]",
+                      static_cast<const char*>(frame.value));
+        frame_node_value->SetString("name", stringify_buffer);
+        break;
+      case StackFrame::Type::PROGRAM_COUNTER:
+        SStringPrintf(&stringify_buffer,
+                      "pc:%" PRIxPTR,
+                      reinterpret_cast<uintptr_t>(frame.value));
+        frame_node_value->SetString("name", stringify_buffer);
+        break;
+    }
+    if (frame_node->parent_frame_index != FrameNode::kInvalidFrameIndex) {
+      SStringPrintf(&stringify_buffer, "%d", frame_node->parent_frame_index);
+      frame_node_value->SetString("parent", stringify_buffer);
+    }
+    frame_node_value->AppendAsTraceFormat(out);
+
+    i++;
+    frame_node++;
+
+    if (frame_node != it_end)
+      out->append(",");
+  }
+
+  out->append("}");  // End the |stackFrames| dictionary.
+}
+
+void StackFrameDeduplicator::EstimateTraceMemoryOverhead(
+    TraceEventMemoryOverhead* overhead) {
+  size_t memory_usage = EstimateMemoryUsage(frames_) +
+                        EstimateMemoryUsage(roots_) +
+                        EstimateMemoryUsage(backtrace_lookup_table_);
+  overhead->Add(TraceEventMemoryOverhead::kHeapProfilerStackFrameDeduplicator,
+                sizeof(StackFrameDeduplicator) + memory_usage);
+}
+
+}  // namespace trace_event
+}  // namespace base
diff --git a/base/trace_event/heap_profiler_stack_frame_deduplicator.h b/base/trace_event/heap_profiler_stack_frame_deduplicator.h
new file mode 100644
index 0000000..ac8d895
--- /dev/null
+++ b/base/trace_event/heap_profiler_stack_frame_deduplicator.h
@@ -0,0 +1,94 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TRACE_EVENT_HEAP_PROFILER_STACK_FRAME_DEDUPLICATOR_H_
+#define BASE_TRACE_EVENT_HEAP_PROFILER_STACK_FRAME_DEDUPLICATOR_H_
+
+#include <string>
+#include <unordered_map>
+
+#include "base/base_export.h"
+#include "base/containers/circular_deque.h"
+#include "base/containers/flat_map.h"
+#include "base/macros.h"
+#include "base/trace_event/heap_profiler_allocation_context.h"
+#include "base/trace_event/trace_event_impl.h"
+
+namespace base {
+namespace trace_event {
+
+class TraceEventMemoryOverhead;
+
+// A data structure that allows grouping a set of backtraces in a space-
+// efficient manner by creating a call tree and writing it as a set of (node,
+// parent) pairs. The tree nodes reference both parent and children. The parent
+// is referenced by index into |frames_|. The children are referenced via a map
+// of |StackFrame|s to index into |frames_|. So there is a trie for bottum-up
+// lookup of a backtrace for deduplication, and a tree for compact storage in
+// the trace log.
+class BASE_EXPORT StackFrameDeduplicator : public ConvertableToTraceFormat {
+ public:
+  // A node in the call tree.
+  struct FrameNode {
+    FrameNode(StackFrame frame, int parent_frame_index);
+    FrameNode(const FrameNode& other);
+    ~FrameNode();
+
+    size_t EstimateMemoryUsage() const;
+
+    StackFrame frame;
+
+    // The index of the parent stack frame in |frames_|, or kInvalidFrameIndex
+    // if there is no parent frame (when it is at the bottom of the call stack).
+    int parent_frame_index;
+    constexpr static int kInvalidFrameIndex = -1;
+
+    // Indices into |frames_| of frames called from the current frame.
+    base::flat_map<StackFrame, int> children;
+  };
+
+  using ConstIterator = base::circular_deque<FrameNode>::const_iterator;
+
+  StackFrameDeduplicator();
+  ~StackFrameDeduplicator() override;
+
+  // Inserts a backtrace where |beginFrame| is a pointer to the bottom frame
+  // (e.g. main) and |endFrame| is a pointer past the top frame (most recently
+  // called function), and returns the index of its leaf node in |frames_|.
+  // Returns -1 if the backtrace is empty.
+  int Insert(const StackFrame* beginFrame, const StackFrame* endFrame);
+
+  // Iterators over the frame nodes in the call tree.
+  ConstIterator begin() const { return frames_.begin(); }
+  ConstIterator end() const { return frames_.end(); }
+
+  // Writes the |stackFrames| dictionary as defined in https://goo.gl/GerkV8 to
+  // the trace log.
+  void AppendAsTraceFormat(std::string* out) const override;
+
+  // Estimates memory overhead including |sizeof(StackFrameDeduplicator)|.
+  void EstimateTraceMemoryOverhead(TraceEventMemoryOverhead* overhead) override;
+
+ private:
+  // Checks that existing backtrace identified by |frame_index| equals
+  // to the one identified by |begin_frame|, |end_frame|.
+  bool Match(int frame_index,
+             const StackFrame* begin_frame,
+             const StackFrame* end_frame) const;
+
+  base::flat_map<StackFrame, int> roots_;
+  base::circular_deque<FrameNode> frames_;
+
+  // {backtrace_hash -> frame_index} map for finding backtraces that are
+  // already added. Backtraces themselves are not stored in the map, instead
+  // Match() is used on the found frame_index to detect collisions.
+  std::unordered_map<size_t, int> backtrace_lookup_table_;
+
+  DISALLOW_COPY_AND_ASSIGN(StackFrameDeduplicator);
+};
+
+}  // namespace trace_event
+}  // namespace base
+
+#endif  // BASE_TRACE_EVENT_HEAP_PROFILER_STACK_FRAME_DEDUPLICATOR_H_
diff --git a/base/trace_event/heap_profiler_stack_frame_deduplicator_unittest.cc b/base/trace_event/heap_profiler_stack_frame_deduplicator_unittest.cc
new file mode 100644
index 0000000..194c7aa
--- /dev/null
+++ b/base/trace_event/heap_profiler_stack_frame_deduplicator_unittest.cc
@@ -0,0 +1,152 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/trace_event/heap_profiler_stack_frame_deduplicator.h"
+
+#include <iterator>
+#include <memory>
+
+#include "base/macros.h"
+#include "base/trace_event/heap_profiler_allocation_context.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+namespace trace_event {
+
+// Define all strings once, because the deduplicator requires pointer equality,
+// and string interning is unreliable.
+StackFrame kBrowserMain = StackFrame::FromTraceEventName("BrowserMain");
+StackFrame kRendererMain = StackFrame::FromTraceEventName("RendererMain");
+StackFrame kCreateWidget = StackFrame::FromTraceEventName("CreateWidget");
+StackFrame kInitialize = StackFrame::FromTraceEventName("Initialize");
+StackFrame kMalloc = StackFrame::FromTraceEventName("malloc");
+
+TEST(StackFrameDeduplicatorTest, SingleBacktrace) {
+  StackFrame bt[] = {kBrowserMain, kCreateWidget, kMalloc};
+
+  // The call tree should look like this (index in brackets).
+  //
+  // BrowserMain [0]
+  //   CreateWidget [1]
+  //     malloc [2]
+
+  std::unique_ptr<StackFrameDeduplicator> dedup(new StackFrameDeduplicator);
+  ASSERT_EQ(2, dedup->Insert(std::begin(bt), std::end(bt)));
+
+  auto iter = dedup->begin();
+  ASSERT_EQ(kBrowserMain, (iter + 0)->frame);
+  ASSERT_EQ(-1, (iter + 0)->parent_frame_index);
+
+  ASSERT_EQ(kCreateWidget, (iter + 1)->frame);
+  ASSERT_EQ(0, (iter + 1)->parent_frame_index);
+
+  ASSERT_EQ(kMalloc, (iter + 2)->frame);
+  ASSERT_EQ(1, (iter + 2)->parent_frame_index);
+
+  ASSERT_TRUE(iter + 3 == dedup->end());
+}
+
+TEST(StackFrameDeduplicatorTest, SingleBacktraceWithNull) {
+  StackFrame null_frame = StackFrame::FromTraceEventName(nullptr);
+  StackFrame bt[] = {kBrowserMain, null_frame, kMalloc};
+
+  // Deduplicator doesn't care about what's inside StackFrames,
+  // and handles nullptr StackFrame values as any other.
+  //
+  // So the call tree should look like this (index in brackets).
+  //
+  // BrowserMain [0]
+  //   (null) [1]
+  //     malloc [2]
+
+  std::unique_ptr<StackFrameDeduplicator> dedup(new StackFrameDeduplicator);
+  ASSERT_EQ(2, dedup->Insert(std::begin(bt), std::end(bt)));
+
+  auto iter = dedup->begin();
+  ASSERT_EQ(kBrowserMain, (iter + 0)->frame);
+  ASSERT_EQ(-1, (iter + 0)->parent_frame_index);
+
+  ASSERT_EQ(null_frame, (iter + 1)->frame);
+  ASSERT_EQ(0, (iter + 1)->parent_frame_index);
+
+  ASSERT_EQ(kMalloc, (iter + 2)->frame);
+  ASSERT_EQ(1, (iter + 2)->parent_frame_index);
+
+  ASSERT_TRUE(iter + 3 == dedup->end());
+}
+
+// Test that there can be different call trees (there can be multiple bottom
+// frames). Also verify that frames with the same name but a different caller
+// are represented as distinct nodes.
+TEST(StackFrameDeduplicatorTest, MultipleRoots) {
+  StackFrame bt0[] = {kBrowserMain, kCreateWidget};
+  StackFrame bt1[] = {kRendererMain, kCreateWidget};
+
+  // The call tree should look like this (index in brackets).
+  //
+  // BrowserMain [0]
+  //   CreateWidget [1]
+  // RendererMain [2]
+  //   CreateWidget [3]
+  //
+  // Note that there will be two instances of CreateWidget,
+  // with different parents.
+
+  std::unique_ptr<StackFrameDeduplicator> dedup(new StackFrameDeduplicator);
+  ASSERT_EQ(1, dedup->Insert(std::begin(bt0), std::end(bt0)));
+  ASSERT_EQ(3, dedup->Insert(std::begin(bt1), std::end(bt1)));
+
+  auto iter = dedup->begin();
+  ASSERT_EQ(kBrowserMain, (iter + 0)->frame);
+  ASSERT_EQ(-1, (iter + 0)->parent_frame_index);
+
+  ASSERT_EQ(kCreateWidget, (iter + 1)->frame);
+  ASSERT_EQ(0, (iter + 1)->parent_frame_index);
+
+  ASSERT_EQ(kRendererMain, (iter + 2)->frame);
+  ASSERT_EQ(-1, (iter + 2)->parent_frame_index);
+
+  ASSERT_EQ(kCreateWidget, (iter + 3)->frame);
+  ASSERT_EQ(2, (iter + 3)->parent_frame_index);
+
+  ASSERT_TRUE(iter + 4 == dedup->end());
+}
+
+TEST(StackFrameDeduplicatorTest, Deduplication) {
+  StackFrame bt0[] = {kBrowserMain, kCreateWidget};
+  StackFrame bt1[] = {kBrowserMain, kInitialize};
+
+  // The call tree should look like this (index in brackets).
+  //
+  // BrowserMain [0]
+  //   CreateWidget [1]
+  //   Initialize [2]
+  //
+  // Note that BrowserMain will be re-used.
+
+  std::unique_ptr<StackFrameDeduplicator> dedup(new StackFrameDeduplicator);
+  ASSERT_EQ(1, dedup->Insert(std::begin(bt0), std::end(bt0)));
+  ASSERT_EQ(2, dedup->Insert(std::begin(bt1), std::end(bt1)));
+
+  auto iter = dedup->begin();
+  ASSERT_EQ(kBrowserMain, (iter + 0)->frame);
+  ASSERT_EQ(-1, (iter + 0)->parent_frame_index);
+
+  ASSERT_EQ(kCreateWidget, (iter + 1)->frame);
+  ASSERT_EQ(0, (iter + 1)->parent_frame_index);
+
+  ASSERT_EQ(kInitialize, (iter + 2)->frame);
+  ASSERT_EQ(0, (iter + 2)->parent_frame_index);
+
+  ASSERT_TRUE(iter + 3 == dedup->end());
+
+  // Inserting the same backtrace again should return the index of the existing
+  // node.
+  ASSERT_EQ(1, dedup->Insert(std::begin(bt0), std::end(bt0)));
+  ASSERT_EQ(2, dedup->Insert(std::begin(bt1), std::end(bt1)));
+  ASSERT_TRUE(dedup->begin() + 3 == dedup->end());
+}
+
+}  // namespace trace_event
+}  // namespace base
diff --git a/base/trace_event/heap_profiler_type_name_deduplicator.cc b/base/trace_event/heap_profiler_type_name_deduplicator.cc
new file mode 100644
index 0000000..360f239
--- /dev/null
+++ b/base/trace_event/heap_profiler_type_name_deduplicator.cc
@@ -0,0 +1,82 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/trace_event/heap_profiler_type_name_deduplicator.h"
+
+#include <stddef.h>
+#include <stdlib.h>
+#include <string>
+#include <utility>
+
+#include "base/json/string_escape.h"
+#include "base/strings/string_split.h"
+#include "base/strings/stringprintf.h"
+#include "base/trace_event/memory_usage_estimator.h"
+#include "base/trace_event/trace_event.h"
+#include "base/trace_event/trace_event_memory_overhead.h"
+
+namespace base {
+namespace trace_event {
+
+TypeNameDeduplicator::TypeNameDeduplicator() {
+  // A null pointer has type ID 0 ("unknown type");
+  type_ids_.insert(std::make_pair(nullptr, 0));
+}
+
+TypeNameDeduplicator::~TypeNameDeduplicator() = default;
+
+int TypeNameDeduplicator::Insert(const char* type_name) {
+  auto result = type_ids_.insert(std::make_pair(type_name, 0));
+  auto& elem = result.first;
+  bool did_not_exist_before = result.second;
+
+  if (did_not_exist_before) {
+    // The type IDs are assigned sequentially and they are zero-based, so
+    // |size() - 1| is the ID of the new element.
+    elem->second = static_cast<int>(type_ids_.size() - 1);
+  }
+
+  return elem->second;
+}
+
+void TypeNameDeduplicator::AppendAsTraceFormat(std::string* out) const {
+  TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("memory-infra"),
+               "TypeNameDeduplicator::AppendAsTraceFormat");
+  out->append("{");  // Begin the type names dictionary.
+
+  auto it = type_ids_.begin();
+  std::string buffer;
+
+  // Write the first entry manually; the null pointer must not be dereferenced.
+  // (The first entry is the null pointer because a |std::map| is ordered.)
+  it++;
+  out->append("\"0\":\"[unknown]\"");
+
+  for (; it != type_ids_.end(); it++) {
+    // Type IDs in the trace are strings, write them as stringified keys of
+    // a dictionary.
+    SStringPrintf(&buffer, ",\"%d\":", it->second);
+
+    // TODO(ssid): crbug.com/594803 the type name is misused for file name in
+    // some cases.
+    StringPiece type_info = it->first;
+
+    // |EscapeJSONString| appends, it does not overwrite |buffer|.
+    bool put_in_quotes = true;
+    EscapeJSONString(type_info, put_in_quotes, &buffer);
+    out->append(buffer);
+  }
+
+  out->append("}");  // End the type names dictionary.
+}
+
+void TypeNameDeduplicator::EstimateTraceMemoryOverhead(
+    TraceEventMemoryOverhead* overhead) {
+  size_t memory_usage = EstimateMemoryUsage(type_ids_);
+  overhead->Add(TraceEventMemoryOverhead::kHeapProfilerTypeNameDeduplicator,
+                sizeof(TypeNameDeduplicator) + memory_usage);
+}
+
+}  // namespace trace_event
+}  // namespace base
diff --git a/base/trace_event/heap_profiler_type_name_deduplicator.h b/base/trace_event/heap_profiler_type_name_deduplicator.h
new file mode 100644
index 0000000..2d26c73
--- /dev/null
+++ b/base/trace_event/heap_profiler_type_name_deduplicator.h
@@ -0,0 +1,45 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TRACE_EVENT_HEAP_PROFILER_TYPE_NAME_DEDUPLICATOR_H_
+#define BASE_TRACE_EVENT_HEAP_PROFILER_TYPE_NAME_DEDUPLICATOR_H_
+
+#include <map>
+#include <string>
+
+#include "base/base_export.h"
+#include "base/macros.h"
+#include "base/trace_event/trace_event_impl.h"
+
+namespace base {
+namespace trace_event {
+
+class TraceEventMemoryOverhead;
+
+// Data structure that assigns a unique numeric ID to |const char*|s.
+class BASE_EXPORT TypeNameDeduplicator : public ConvertableToTraceFormat {
+ public:
+  TypeNameDeduplicator();
+  ~TypeNameDeduplicator() override;
+
+  // Inserts a type name and returns its ID.
+  int Insert(const char* type_name);
+
+  // Writes the type ID -> type name mapping to the trace log.
+  void AppendAsTraceFormat(std::string* out) const override;
+
+  // Estimates memory overhead including |sizeof(TypeNameDeduplicator)|.
+  void EstimateTraceMemoryOverhead(TraceEventMemoryOverhead* overhead) override;
+
+ private:
+  // Map from type name to type ID.
+  std::map<const char*, int> type_ids_;
+
+  DISALLOW_COPY_AND_ASSIGN(TypeNameDeduplicator);
+};
+
+}  // namespace trace_event
+}  // namespace base
+
+#endif  // BASE_TRACE_EVENT_HEAP_PROFILER_TYPE_NAME_DEDUPLICATOR_H_
diff --git a/base/trace_event/heap_profiler_type_name_deduplicator_unittest.cc b/base/trace_event/heap_profiler_type_name_deduplicator_unittest.cc
new file mode 100644
index 0000000..f97808b
--- /dev/null
+++ b/base/trace_event/heap_profiler_type_name_deduplicator_unittest.cc
@@ -0,0 +1,83 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <memory>
+#include <string>
+
+#include "base/json/json_reader.h"
+#include "base/trace_event/heap_profiler_type_name_deduplicator.h"
+#include "base/values.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+namespace trace_event {
+
+namespace {
+
+// Define all strings once, because the deduplicator requires pointer equality,
+// and string interning is unreliable.
+const char kInt[] = "int";
+const char kBool[] = "bool";
+const char kString[] = "string";
+const char kNeedsEscape[] = "\"quotes\"";
+
+std::unique_ptr<Value> DumpAndReadBack(
+    const TypeNameDeduplicator& deduplicator) {
+  std::string json;
+  deduplicator.AppendAsTraceFormat(&json);
+  return JSONReader::Read(json);
+}
+
+// Inserts a single type name into a new TypeNameDeduplicator instance and
+// checks if the value gets inserted and the exported value for |type_name| is
+// the same as |expected_value|.
+void TestInsertTypeAndReadback(const char* type_name,
+                               const char* expected_value) {
+  std::unique_ptr<TypeNameDeduplicator> dedup(new TypeNameDeduplicator);
+  ASSERT_EQ(1, dedup->Insert(type_name));
+
+  std::unique_ptr<Value> type_names = DumpAndReadBack(*dedup);
+  ASSERT_NE(nullptr, type_names);
+
+  const DictionaryValue* dictionary;
+  ASSERT_TRUE(type_names->GetAsDictionary(&dictionary));
+
+  // When the type name was inserted, it got ID 1. The exported key "1"
+  // should be equal to |expected_value|.
+  std::string value;
+  ASSERT_TRUE(dictionary->GetString("1", &value));
+  ASSERT_EQ(expected_value, value);
+}
+
+}  // namespace
+
+TEST(TypeNameDeduplicatorTest, Deduplication) {
+  // The type IDs should be like this:
+  // 0: [unknown]
+  // 1: int
+  // 2: bool
+  // 3: string
+
+  std::unique_ptr<TypeNameDeduplicator> dedup(new TypeNameDeduplicator);
+  ASSERT_EQ(1, dedup->Insert(kInt));
+  ASSERT_EQ(2, dedup->Insert(kBool));
+  ASSERT_EQ(3, dedup->Insert(kString));
+
+  // Inserting again should return the same IDs.
+  ASSERT_EQ(2, dedup->Insert(kBool));
+  ASSERT_EQ(1, dedup->Insert(kInt));
+  ASSERT_EQ(3, dedup->Insert(kString));
+
+  // A null pointer should yield type ID 0.
+  ASSERT_EQ(0, dedup->Insert(nullptr));
+}
+
+TEST(TypeNameDeduplicatorTest, EscapeTypeName) {
+  // Reading json should not fail, because the type name should have been
+  // escaped properly and exported value should contain quotes.
+  TestInsertTypeAndReadback(kNeedsEscape, kNeedsEscape);
+}
+
+}  // namespace trace_event
+}  // namespace base
diff --git a/base/trace_event/java_heap_dump_provider_android.cc b/base/trace_event/java_heap_dump_provider_android.cc
new file mode 100644
index 0000000..684f730
--- /dev/null
+++ b/base/trace_event/java_heap_dump_provider_android.cc
@@ -0,0 +1,47 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/trace_event/java_heap_dump_provider_android.h"
+
+#include "base/android/java_runtime.h"
+#include "base/trace_event/process_memory_dump.h"
+
+namespace base {
+namespace trace_event {
+
+// static
+JavaHeapDumpProvider* JavaHeapDumpProvider::GetInstance() {
+  return Singleton<JavaHeapDumpProvider,
+                   LeakySingletonTraits<JavaHeapDumpProvider>>::get();
+}
+
+JavaHeapDumpProvider::JavaHeapDumpProvider() {
+}
+
+JavaHeapDumpProvider::~JavaHeapDumpProvider() {
+}
+
+// Called at trace dump point time. Creates a snapshot with the memory counters
+// for the current process.
+bool JavaHeapDumpProvider::OnMemoryDump(const MemoryDumpArgs& args,
+                                        ProcessMemoryDump* pmd) {
+  // These numbers come from java.lang.Runtime stats.
+  long total_heap_size = 0;
+  long free_heap_size = 0;
+  android::JavaRuntime::GetMemoryUsage(&total_heap_size, &free_heap_size);
+
+  MemoryAllocatorDump* outer_dump = pmd->CreateAllocatorDump("java_heap");
+  outer_dump->AddScalar(MemoryAllocatorDump::kNameSize,
+                        MemoryAllocatorDump::kUnitsBytes, total_heap_size);
+
+  MemoryAllocatorDump* inner_dump =
+      pmd->CreateAllocatorDump("java_heap/allocated_objects");
+  inner_dump->AddScalar(MemoryAllocatorDump::kNameSize,
+                        MemoryAllocatorDump::kUnitsBytes,
+                        total_heap_size - free_heap_size);
+  return true;
+}
+
+}  // namespace trace_event
+}  // namespace base
diff --git a/base/trace_event/java_heap_dump_provider_android.h b/base/trace_event/java_heap_dump_provider_android.h
new file mode 100644
index 0000000..b9f2333
--- /dev/null
+++ b/base/trace_event/java_heap_dump_provider_android.h
@@ -0,0 +1,36 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TRACE_EVENT_JAVA_HEAP_DUMP_PROVIDER_ANDROID_H_
+#define BASE_TRACE_EVENT_JAVA_HEAP_DUMP_PROVIDER_ANDROID_H_
+
+#include "base/macros.h"
+#include "base/memory/singleton.h"
+#include "base/trace_event/memory_dump_provider.h"
+
+namespace base {
+namespace trace_event {
+
+// Dump provider which collects process-wide memory stats.
+class BASE_EXPORT JavaHeapDumpProvider : public MemoryDumpProvider {
+ public:
+  static JavaHeapDumpProvider* GetInstance();
+
+  // MemoryDumpProvider implementation.
+  bool OnMemoryDump(const MemoryDumpArgs& args,
+                    ProcessMemoryDump* pmd) override;
+
+ private:
+  friend struct DefaultSingletonTraits<JavaHeapDumpProvider>;
+
+  JavaHeapDumpProvider();
+  ~JavaHeapDumpProvider() override;
+
+  DISALLOW_COPY_AND_ASSIGN(JavaHeapDumpProvider);
+};
+
+}  // namespace trace_event
+}  // namespace base
+
+#endif  // BASE_TRACE_EVENT_JAVA_HEAP_DUMP_PROVIDER_ANDROID_H_
diff --git a/base/trace_event/java_heap_dump_provider_android_unittest.cc b/base/trace_event/java_heap_dump_provider_android_unittest.cc
new file mode 100644
index 0000000..9b9eb17
--- /dev/null
+++ b/base/trace_event/java_heap_dump_provider_android_unittest.cc
@@ -0,0 +1,23 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/trace_event/java_heap_dump_provider_android.h"
+
+#include "base/trace_event/process_memory_dump.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+namespace trace_event {
+
+TEST(JavaHeapDumpProviderTest, JavaHeapDump) {
+  auto* jhdp = JavaHeapDumpProvider::GetInstance();
+  MemoryDumpArgs dump_args = {MemoryDumpLevelOfDetail::DETAILED};
+  std::unique_ptr<ProcessMemoryDump> pmd(
+      new ProcessMemoryDump(nullptr, dump_args));
+
+  jhdp->OnMemoryDump(dump_args, pmd.get());
+}
+
+}  // namespace trace_event
+}  // namespace base
diff --git a/base/trace_event/malloc_dump_provider.cc b/base/trace_event/malloc_dump_provider.cc
new file mode 100644
index 0000000..46fdb3e
--- /dev/null
+++ b/base/trace_event/malloc_dump_provider.cc
@@ -0,0 +1,189 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/trace_event/malloc_dump_provider.h"
+
+#include <stddef.h>
+
+#include <unordered_map>
+
+#include "base/allocator/allocator_extension.h"
+#include "base/allocator/buildflags.h"
+#include "base/debug/profiler.h"
+#include "base/trace_event/process_memory_dump.h"
+#include "base/trace_event/trace_event_argument.h"
+#include "build/build_config.h"
+
+#if defined(OS_MACOSX)
+#include <malloc/malloc.h>
+#else
+#include <malloc.h>
+#endif
+#if defined(OS_WIN)
+#include <windows.h>
+#endif
+
+namespace base {
+namespace trace_event {
+
+namespace {
+#if defined(OS_WIN)
+// A structure containing some information about a given heap.
+struct WinHeapInfo {
+  size_t committed_size;
+  size_t uncommitted_size;
+  size_t allocated_size;
+  size_t block_count;
+};
+
+// NOTE: crbug.com/665516
+// Unfortunately, there is no safe way to collect information from secondary
+// heaps due to limitations and racy nature of this piece of WinAPI.
+void WinHeapMemoryDumpImpl(WinHeapInfo* crt_heap_info) {
+  // Iterate through whichever heap our CRT is using.
+  HANDLE crt_heap = reinterpret_cast<HANDLE>(_get_heap_handle());
+  ::HeapLock(crt_heap);
+  PROCESS_HEAP_ENTRY heap_entry;
+  heap_entry.lpData = nullptr;
+  // Walk over all the entries in the main heap.
+  while (::HeapWalk(crt_heap, &heap_entry) != FALSE) {
+    if ((heap_entry.wFlags & PROCESS_HEAP_ENTRY_BUSY) != 0) {
+      crt_heap_info->allocated_size += heap_entry.cbData;
+      crt_heap_info->block_count++;
+    } else if ((heap_entry.wFlags & PROCESS_HEAP_REGION) != 0) {
+      crt_heap_info->committed_size += heap_entry.Region.dwCommittedSize;
+      crt_heap_info->uncommitted_size += heap_entry.Region.dwUnCommittedSize;
+    }
+  }
+  CHECK(::HeapUnlock(crt_heap) == TRUE);
+}
+#endif  // defined(OS_WIN)
+}  // namespace
+
+// static
+const char MallocDumpProvider::kAllocatedObjects[] = "malloc/allocated_objects";
+
+// static
+MallocDumpProvider* MallocDumpProvider::GetInstance() {
+  return Singleton<MallocDumpProvider,
+                   LeakySingletonTraits<MallocDumpProvider>>::get();
+}
+
+MallocDumpProvider::MallocDumpProvider() = default;
+MallocDumpProvider::~MallocDumpProvider() = default;
+
+// Called at trace dump point time. Creates a snapshot the memory counters for
+// the current process.
+bool MallocDumpProvider::OnMemoryDump(const MemoryDumpArgs& args,
+                                      ProcessMemoryDump* pmd) {
+  {
+    base::AutoLock auto_lock(emit_metrics_on_memory_dump_lock_);
+    if (!emit_metrics_on_memory_dump_)
+      return true;
+  }
+
+  size_t total_virtual_size = 0;
+  size_t resident_size = 0;
+  size_t allocated_objects_size = 0;
+  size_t allocated_objects_count = 0;
+#if defined(USE_TCMALLOC)
+  bool res =
+      allocator::GetNumericProperty("generic.heap_size", &total_virtual_size);
+  DCHECK(res);
+  res = allocator::GetNumericProperty("generic.total_physical_bytes",
+                                      &resident_size);
+  DCHECK(res);
+  res = allocator::GetNumericProperty("generic.current_allocated_bytes",
+                                      &allocated_objects_size);
+  DCHECK(res);
+#elif defined(OS_MACOSX) || defined(OS_IOS)
+  malloc_statistics_t stats = {0};
+  malloc_zone_statistics(nullptr, &stats);
+  total_virtual_size = stats.size_allocated;
+  allocated_objects_size = stats.size_in_use;
+
+  // Resident size is approximated pretty well by stats.max_size_in_use.
+  // However, on macOS, freed blocks are both resident and reusable, which is
+  // semantically equivalent to deallocated. The implementation of libmalloc
+  // will also only hold a fixed number of freed regions before actually
+  // starting to deallocate them, so stats.max_size_in_use is also not
+  // representative of the peak size. As a result, stats.max_size_in_use is
+  // typically somewhere between actually resident [non-reusable] pages, and
+  // peak size. This is not very useful, so we just use stats.size_in_use for
+  // resident_size, even though it's an underestimate and fails to account for
+  // fragmentation. See
+  // https://bugs.chromium.org/p/chromium/issues/detail?id=695263#c1.
+  resident_size = stats.size_in_use;
+#elif defined(OS_WIN)
+  // This is too expensive on Windows, crbug.com/780735.
+  if (args.level_of_detail == MemoryDumpLevelOfDetail::DETAILED) {
+    WinHeapInfo main_heap_info = {};
+    WinHeapMemoryDumpImpl(&main_heap_info);
+    total_virtual_size =
+        main_heap_info.committed_size + main_heap_info.uncommitted_size;
+    // Resident size is approximated with committed heap size. Note that it is
+    // possible to do this with better accuracy on windows by intersecting the
+    // working set with the virtual memory ranges occuipied by the heap. It's
+    // not clear that this is worth it, as it's fairly expensive to do.
+    resident_size = main_heap_info.committed_size;
+    allocated_objects_size = main_heap_info.allocated_size;
+    allocated_objects_count = main_heap_info.block_count;
+  }
+#elif defined(OS_FUCHSIA)
+// TODO(fuchsia): Port, see https://crbug.com/706592.
+#else
+  struct mallinfo info = mallinfo();
+  DCHECK_GE(info.arena + info.hblkhd, info.uordblks);
+
+  // In case of Android's jemalloc |arena| is 0 and the outer pages size is
+  // reported by |hblkhd|. In case of dlmalloc the total is given by
+  // |arena| + |hblkhd|. For more details see link: http://goo.gl/fMR8lF.
+  total_virtual_size = info.arena + info.hblkhd;
+  resident_size = info.uordblks;
+
+  // Total allocated space is given by |uordblks|.
+  allocated_objects_size = info.uordblks;
+#endif
+
+  MemoryAllocatorDump* outer_dump = pmd->CreateAllocatorDump("malloc");
+  outer_dump->AddScalar("virtual_size", MemoryAllocatorDump::kUnitsBytes,
+                        total_virtual_size);
+  outer_dump->AddScalar(MemoryAllocatorDump::kNameSize,
+                        MemoryAllocatorDump::kUnitsBytes, resident_size);
+
+  MemoryAllocatorDump* inner_dump = pmd->CreateAllocatorDump(kAllocatedObjects);
+  inner_dump->AddScalar(MemoryAllocatorDump::kNameSize,
+                        MemoryAllocatorDump::kUnitsBytes,
+                        allocated_objects_size);
+  if (allocated_objects_count != 0) {
+    inner_dump->AddScalar(MemoryAllocatorDump::kNameObjectCount,
+                          MemoryAllocatorDump::kUnitsObjects,
+                          allocated_objects_count);
+  }
+
+  if (resident_size > allocated_objects_size) {
+    // Explicitly specify why is extra memory resident. In tcmalloc it accounts
+    // for free lists and caches. In mac and ios it accounts for the
+    // fragmentation and metadata.
+    MemoryAllocatorDump* other_dump =
+        pmd->CreateAllocatorDump("malloc/metadata_fragmentation_caches");
+    other_dump->AddScalar(MemoryAllocatorDump::kNameSize,
+                          MemoryAllocatorDump::kUnitsBytes,
+                          resident_size - allocated_objects_size);
+  }
+  return true;
+}
+
+void MallocDumpProvider::EnableMetrics() {
+  base::AutoLock auto_lock(emit_metrics_on_memory_dump_lock_);
+  emit_metrics_on_memory_dump_ = true;
+}
+
+void MallocDumpProvider::DisableMetrics() {
+  base::AutoLock auto_lock(emit_metrics_on_memory_dump_lock_);
+  emit_metrics_on_memory_dump_ = false;
+}
+
+}  // namespace trace_event
+}  // namespace base
diff --git a/base/trace_event/malloc_dump_provider.h b/base/trace_event/malloc_dump_provider.h
new file mode 100644
index 0000000..e02eb9d
--- /dev/null
+++ b/base/trace_event/malloc_dump_provider.h
@@ -0,0 +1,56 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TRACE_EVENT_MALLOC_DUMP_PROVIDER_H_
+#define BASE_TRACE_EVENT_MALLOC_DUMP_PROVIDER_H_
+
+#include "base/macros.h"
+#include "base/memory/singleton.h"
+#include "base/synchronization/lock.h"
+#include "base/trace_event/memory_dump_provider.h"
+#include "build/build_config.h"
+
+#if defined(OS_LINUX) || defined(OS_ANDROID) || defined(OS_WIN) || \
+    (defined(OS_MACOSX) && !defined(OS_IOS))
+#define MALLOC_MEMORY_TRACING_SUPPORTED
+#endif
+
+namespace base {
+namespace trace_event {
+
+// Dump provider which collects process-wide memory stats.
+class BASE_EXPORT MallocDumpProvider : public MemoryDumpProvider {
+ public:
+  // Name of the allocated_objects dump. Use this to declare suballocator dumps
+  // from other dump providers.
+  static const char kAllocatedObjects[];
+
+  static MallocDumpProvider* GetInstance();
+
+  // MemoryDumpProvider implementation.
+  bool OnMemoryDump(const MemoryDumpArgs& args,
+                    ProcessMemoryDump* pmd) override;
+
+  // Used by out-of-process heap-profiling. When malloc is profiled by an
+  // external process, that process will be responsible for emitting metrics on
+  // behalf of this one. Thus, MallocDumpProvider should not do anything.
+  void EnableMetrics();
+  void DisableMetrics();
+
+ private:
+  friend struct DefaultSingletonTraits<MallocDumpProvider>;
+
+  MallocDumpProvider();
+  ~MallocDumpProvider() override;
+
+  bool emit_metrics_on_memory_dump_ = true;
+  base::Lock emit_metrics_on_memory_dump_lock_;
+
+  DISALLOW_COPY_AND_ASSIGN(MallocDumpProvider);
+};
+
+}  // namespace trace_event
+}  // namespace base
+
+#endif  // BASE_TRACE_EVENT_MALLOC_DUMP_PROVIDER_H_
diff --git a/base/trace_event/memory_allocator_dump.cc b/base/trace_event/memory_allocator_dump.cc
new file mode 100644
index 0000000..5260a73
--- /dev/null
+++ b/base/trace_event/memory_allocator_dump.cc
@@ -0,0 +1,148 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/trace_event/memory_allocator_dump.h"
+
+#include <string.h>
+
+#include "base/format_macros.h"
+#include "base/memory/ptr_util.h"
+#include "base/strings/stringprintf.h"
+#include "base/trace_event/memory_dump_manager.h"
+#include "base/trace_event/memory_dump_provider.h"
+#include "base/trace_event/process_memory_dump.h"
+#include "base/trace_event/trace_event_argument.h"
+#include "base/values.h"
+
+namespace base {
+namespace trace_event {
+
+const char MemoryAllocatorDump::kNameSize[] = "size";
+const char MemoryAllocatorDump::kNameObjectCount[] = "object_count";
+const char MemoryAllocatorDump::kTypeScalar[] = "scalar";
+const char MemoryAllocatorDump::kTypeString[] = "string";
+const char MemoryAllocatorDump::kUnitsBytes[] = "bytes";
+const char MemoryAllocatorDump::kUnitsObjects[] = "objects";
+
+MemoryAllocatorDump::MemoryAllocatorDump(
+    const std::string& absolute_name,
+    MemoryDumpLevelOfDetail level_of_detail,
+    const MemoryAllocatorDumpGuid& guid)
+    : absolute_name_(absolute_name),
+      guid_(guid),
+      level_of_detail_(level_of_detail),
+      flags_(Flags::DEFAULT) {
+  // The |absolute_name| cannot be empty.
+  DCHECK(!absolute_name.empty());
+
+  // The |absolute_name| can contain slash separator, but not leading or
+  // trailing ones.
+  DCHECK(absolute_name[0] != '/' && *absolute_name.rbegin() != '/');
+}
+
+MemoryAllocatorDump::~MemoryAllocatorDump() = default;
+
+void MemoryAllocatorDump::AddScalar(const char* name,
+                                    const char* units,
+                                    uint64_t value) {
+  entries_.emplace_back(name, units, value);
+}
+
+void MemoryAllocatorDump::AddString(const char* name,
+                                    const char* units,
+                                    const std::string& value) {
+  // String attributes are disabled in background mode.
+  if (level_of_detail_ == MemoryDumpLevelOfDetail::BACKGROUND) {
+    NOTREACHED();
+    return;
+  }
+  entries_.emplace_back(name, units, value);
+}
+
+void MemoryAllocatorDump::AsValueInto(TracedValue* value) const {
+  std::string string_conversion_buffer;
+  value->BeginDictionaryWithCopiedName(absolute_name_);
+  value->SetString("guid", guid_.ToString());
+  value->BeginDictionary("attrs");
+
+  for (const Entry& entry : entries_) {
+    value->BeginDictionaryWithCopiedName(entry.name);
+    switch (entry.entry_type) {
+      case Entry::kUint64:
+        SStringPrintf(&string_conversion_buffer, "%" PRIx64,
+                      entry.value_uint64);
+        value->SetString("type", kTypeScalar);
+        value->SetString("units", entry.units);
+        value->SetString("value", string_conversion_buffer);
+        break;
+      case Entry::kString:
+        value->SetString("type", kTypeString);
+        value->SetString("units", entry.units);
+        value->SetString("value", entry.value_string);
+        break;
+    }
+    value->EndDictionary();
+  }
+  value->EndDictionary();  // "attrs": { ... }
+  if (flags_)
+    value->SetInteger("flags", flags_);
+  value->EndDictionary();  // "allocator_name/heap_subheap": { ... }
+}
+
+uint64_t MemoryAllocatorDump::GetSizeInternal() const {
+  if (cached_size_.has_value())
+    return *cached_size_;
+  for (const auto& entry : entries_) {
+    if (entry.entry_type == Entry::kUint64 && entry.units == kUnitsBytes &&
+        strcmp(entry.name.c_str(), kNameSize) == 0) {
+      cached_size_ = entry.value_uint64;
+      return entry.value_uint64;
+    }
+  }
+  return 0;
+};
+
+MemoryAllocatorDump::Entry::Entry() : entry_type(kString), value_uint64() {}
+MemoryAllocatorDump::Entry::Entry(MemoryAllocatorDump::Entry&&) noexcept =
+    default;
+MemoryAllocatorDump::Entry& MemoryAllocatorDump::Entry::operator=(
+    MemoryAllocatorDump::Entry&&) = default;
+MemoryAllocatorDump::Entry::Entry(std::string name,
+                                  std::string units,
+                                  uint64_t value)
+    : name(name), units(units), entry_type(kUint64), value_uint64(value) {}
+MemoryAllocatorDump::Entry::Entry(std::string name,
+                                  std::string units,
+                                  std::string value)
+    : name(name), units(units), entry_type(kString), value_string(value) {}
+
+bool MemoryAllocatorDump::Entry::operator==(const Entry& rhs) const {
+  if (!(name == rhs.name && units == rhs.units && entry_type == rhs.entry_type))
+    return false;
+  switch (entry_type) {
+    case EntryType::kUint64:
+      return value_uint64 == rhs.value_uint64;
+    case EntryType::kString:
+      return value_string == rhs.value_string;
+  }
+  NOTREACHED();
+  return false;
+}
+
+void PrintTo(const MemoryAllocatorDump::Entry& entry, std::ostream* out) {
+  switch (entry.entry_type) {
+    case MemoryAllocatorDump::Entry::EntryType::kUint64:
+      *out << "<Entry(\"" << entry.name << "\", \"" << entry.units << "\", "
+           << entry.value_uint64 << ")>";
+      return;
+    case MemoryAllocatorDump::Entry::EntryType::kString:
+      *out << "<Entry(\"" << entry.name << "\", \"" << entry.units << "\", \""
+           << entry.value_string << "\")>";
+      return;
+  }
+  NOTREACHED();
+}
+
+}  // namespace trace_event
+}  // namespace base
diff --git a/base/trace_event/memory_allocator_dump.h b/base/trace_event/memory_allocator_dump.h
new file mode 100644
index 0000000..de38afd
--- /dev/null
+++ b/base/trace_event/memory_allocator_dump.h
@@ -0,0 +1,153 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TRACE_EVENT_MEMORY_ALLOCATOR_DUMP_H_
+#define BASE_TRACE_EVENT_MEMORY_ALLOCATOR_DUMP_H_
+
+#include <stdint.h>
+
+#include <memory>
+#include <ostream>
+#include <string>
+
+#include "base/base_export.h"
+#include "base/gtest_prod_util.h"
+#include "base/logging.h"
+#include "base/macros.h"
+#include "base/optional.h"
+#include "base/trace_event/memory_allocator_dump_guid.h"
+#include "base/trace_event/memory_dump_request_args.h"
+#include "base/trace_event/trace_event_argument.h"
+#include "base/unguessable_token.h"
+#include "base/values.h"
+
+namespace base {
+namespace trace_event {
+
+class ProcessMemoryDump;
+class TracedValue;
+
+// Data model for user-land memory allocator dumps.
+class BASE_EXPORT MemoryAllocatorDump {
+ public:
+  enum Flags {
+    DEFAULT = 0,
+
+    // A dump marked weak will be discarded by TraceViewer.
+    WEAK = 1 << 0,
+  };
+
+  // In the TraceViewer UI table each MemoryAllocatorDump becomes
+  // a row and each Entry generates a column (if it doesn't already
+  // exist).
+  struct BASE_EXPORT Entry {
+    enum EntryType {
+      kUint64,
+      kString,
+    };
+
+    // By design name, units and value_string are  always coming from
+    // indefinitely lived const char* strings, the only reason we copy
+    // them into a std::string is to handle Mojo (de)serialization.
+    // TODO(hjd): Investigate optimization (e.g. using StringPiece).
+    Entry();  // Only for deserialization.
+    Entry(std::string name, std::string units, uint64_t value);
+    Entry(std::string name, std::string units, std::string value);
+    Entry(Entry&& other) noexcept;
+    Entry& operator=(Entry&& other);
+    bool operator==(const Entry& rhs) const;
+
+    std::string name;
+    std::string units;
+
+    EntryType entry_type;
+
+    uint64_t value_uint64;
+    std::string value_string;
+
+    DISALLOW_COPY_AND_ASSIGN(Entry);
+  };
+
+  MemoryAllocatorDump(const std::string& absolute_name,
+                      MemoryDumpLevelOfDetail,
+                      const MemoryAllocatorDumpGuid&);
+  ~MemoryAllocatorDump();
+
+  // Standard attribute |name|s for the AddScalar and AddString() methods.
+  static const char kNameSize[];          // To represent allocated space.
+  static const char kNameObjectCount[];   // To represent number of objects.
+
+  // Standard attribute |unit|s for the AddScalar and AddString() methods.
+  static const char kUnitsBytes[];    // Unit name to represent bytes.
+  static const char kUnitsObjects[];  // Unit name to represent #objects.
+
+  // Constants used only internally and by tests.
+  static const char kTypeScalar[];  // Type name for scalar attributes.
+  static const char kTypeString[];  // Type name for string attributes.
+
+  // Setters for scalar attributes. Some examples:
+  // - "size" column (all dumps are expected to have at least this one):
+  //     AddScalar(kNameSize, kUnitsBytes, 1234);
+  // - Some extra-column reporting internal details of the subsystem:
+  //    AddScalar("number_of_freelist_entries", kUnitsObjects, 42)
+  // - Other informational column:
+  //    AddString("kitten", "name", "shadow");
+  void AddScalar(const char* name, const char* units, uint64_t value);
+  void AddString(const char* name, const char* units, const std::string& value);
+
+  // Absolute name, unique within the scope of an entire ProcessMemoryDump.
+  const std::string& absolute_name() const { return absolute_name_; }
+
+  // Called at trace generation time to populate the TracedValue.
+  void AsValueInto(TracedValue* value) const;
+
+  // Get the size for this dump.
+  // The size is the value set with AddScalar(kNameSize, kUnitsBytes, size);
+  // TODO(hjd): this should return an Optional<uint64_t>.
+  uint64_t GetSizeInternal() const;
+
+  MemoryDumpLevelOfDetail level_of_detail() const { return level_of_detail_; }
+
+  // Use enum Flags to set values.
+  void set_flags(int flags) { flags_ |= flags; }
+  void clear_flags(int flags) { flags_ &= ~flags; }
+  int flags() const { return flags_; }
+
+  // |guid| is an optional global dump identifier, unique across all processes
+  // within the scope of a global dump. It is only required when using the
+  // graph APIs (see TODO_method_name) to express retention / suballocation or
+  // cross process sharing. See crbug.com/492102 for design docs.
+  // Subsequent MemoryAllocatorDump(s) with the same |absolute_name| are
+  // expected to have the same guid.
+  const MemoryAllocatorDumpGuid& guid() const { return guid_; }
+
+  const std::vector<Entry>& entries() const { return entries_; }
+
+  // Only for mojo serialization, which can mutate the collection.
+  std::vector<Entry>* mutable_entries_for_serialization() const {
+    cached_size_.reset();  // The caller can mutate the collection.
+
+    // Mojo takes a const input argument even for move-only types that can be
+    // mutate while serializing (like this one). Hence the const_cast.
+    return const_cast<std::vector<Entry>*>(&entries_);
+  }
+
+ private:
+  const std::string absolute_name_;
+  MemoryAllocatorDumpGuid guid_;
+  MemoryDumpLevelOfDetail level_of_detail_;
+  int flags_;  // See enum Flags.
+  mutable Optional<uint64_t> cached_size_;  // Lazy, for GetSizeInternal().
+  std::vector<Entry> entries_;
+
+  DISALLOW_COPY_AND_ASSIGN(MemoryAllocatorDump);
+};
+
+// This is required by gtest to print a readable output on test failures.
+void BASE_EXPORT PrintTo(const MemoryAllocatorDump::Entry&, std::ostream*);
+
+}  // namespace trace_event
+}  // namespace base
+
+#endif  // BASE_TRACE_EVENT_MEMORY_ALLOCATOR_DUMP_H_
diff --git a/base/trace_event/memory_allocator_dump_guid.cc b/base/trace_event/memory_allocator_dump_guid.cc
new file mode 100644
index 0000000..08ac677
--- /dev/null
+++ b/base/trace_event/memory_allocator_dump_guid.cc
@@ -0,0 +1,40 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/trace_event/memory_allocator_dump_guid.h"
+
+#include "base/format_macros.h"
+#include "base/sha1.h"
+#include "base/strings/stringprintf.h"
+
+namespace base {
+namespace trace_event {
+
+namespace {
+
+uint64_t HashString(const std::string& str) {
+  uint64_t hash[(kSHA1Length + sizeof(uint64_t) - 1) / sizeof(uint64_t)] = {0};
+  SHA1HashBytes(reinterpret_cast<const unsigned char*>(str.data()), str.size(),
+                reinterpret_cast<unsigned char*>(hash));
+  return hash[0];
+}
+
+}  // namespace
+
+MemoryAllocatorDumpGuid::MemoryAllocatorDumpGuid(uint64_t guid) : guid_(guid) {}
+
+MemoryAllocatorDumpGuid::MemoryAllocatorDumpGuid()
+    : MemoryAllocatorDumpGuid(0u) {
+}
+
+MemoryAllocatorDumpGuid::MemoryAllocatorDumpGuid(const std::string& guid_str)
+    : MemoryAllocatorDumpGuid(HashString(guid_str)) {
+}
+
+std::string MemoryAllocatorDumpGuid::ToString() const {
+  return StringPrintf("%" PRIx64, guid_);
+}
+
+}  // namespace trace_event
+}  // namespace base
diff --git a/base/trace_event/memory_allocator_dump_guid.h b/base/trace_event/memory_allocator_dump_guid.h
new file mode 100644
index 0000000..2a420a2
--- /dev/null
+++ b/base/trace_event/memory_allocator_dump_guid.h
@@ -0,0 +1,55 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TRACE_EVENT_MEMORY_ALLOCATOR_DUMP_GUID_H_
+#define BASE_TRACE_EVENT_MEMORY_ALLOCATOR_DUMP_GUID_H_
+
+#include <stdint.h>
+
+#include <string>
+
+#include "base/base_export.h"
+
+namespace base {
+namespace trace_event {
+
+class BASE_EXPORT MemoryAllocatorDumpGuid {
+ public:
+  MemoryAllocatorDumpGuid();
+  explicit MemoryAllocatorDumpGuid(uint64_t guid);
+
+  // Utility ctor to hash a GUID if the caller prefers a string. The caller
+  // still has to ensure that |guid_str| is unique, per snapshot, within the
+  // global scope of all the traced processes.
+  explicit MemoryAllocatorDumpGuid(const std::string& guid_str);
+
+  uint64_t ToUint64() const { return guid_; }
+
+  // Returns a (hex-encoded) string representation of the guid.
+  std::string ToString() const;
+
+  bool empty() const { return guid_ == 0u; }
+
+  bool operator==(const MemoryAllocatorDumpGuid& other) const {
+    return guid_ == other.guid_;
+  }
+
+  bool operator!=(const MemoryAllocatorDumpGuid& other) const {
+    return !(*this == other);
+  }
+
+  bool operator<(const MemoryAllocatorDumpGuid& other) const {
+    return guid_ < other.guid_;
+  }
+
+ private:
+  uint64_t guid_;
+
+  // Deliberately copy-able.
+};
+
+}  // namespace trace_event
+}  // namespace base
+
+#endif  // BASE_TRACE_EVENT_MEMORY_ALLOCATOR_DUMP_GUID_H_
diff --git a/base/trace_event/memory_allocator_dump_unittest.cc b/base/trace_event/memory_allocator_dump_unittest.cc
new file mode 100644
index 0000000..b0b6e74
--- /dev/null
+++ b/base/trace_event/memory_allocator_dump_unittest.cc
@@ -0,0 +1,179 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/trace_event/memory_allocator_dump.h"
+
+#include <stdint.h>
+
+#include "base/format_macros.h"
+#include "base/strings/stringprintf.h"
+#include "base/trace_event/heap_profiler_serialization_state.h"
+#include "base/trace_event/memory_allocator_dump_guid.h"
+#include "base/trace_event/memory_dump_provider.h"
+#include "base/trace_event/process_memory_dump.h"
+#include "base/trace_event/trace_event_argument.h"
+#include "base/values.h"
+#include "build/build_config.h"
+#include "testing/gmock/include/gmock/gmock.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+using testing::ElementsAre;
+using testing::Eq;
+using testing::ByRef;
+using testing::IsEmpty;
+using testing::Contains;
+
+namespace base {
+namespace trace_event {
+
+namespace {
+
+class FakeMemoryAllocatorDumpProvider : public MemoryDumpProvider {
+ public:
+  bool OnMemoryDump(const MemoryDumpArgs& args,
+                    ProcessMemoryDump* pmd) override {
+    MemoryAllocatorDump* root_heap =
+        pmd->CreateAllocatorDump("foobar_allocator");
+
+    root_heap->AddScalar(MemoryAllocatorDump::kNameSize,
+                         MemoryAllocatorDump::kUnitsBytes, 4096);
+    root_heap->AddScalar(MemoryAllocatorDump::kNameObjectCount,
+                         MemoryAllocatorDump::kUnitsObjects, 42);
+    root_heap->AddScalar("attr1", "units1", 1234);
+    root_heap->AddString("attr2", "units2", "string_value");
+
+    MemoryAllocatorDump* sub_heap =
+        pmd->CreateAllocatorDump("foobar_allocator/sub_heap");
+    sub_heap->AddScalar(MemoryAllocatorDump::kNameSize,
+                        MemoryAllocatorDump::kUnitsBytes, 1);
+    sub_heap->AddScalar(MemoryAllocatorDump::kNameObjectCount,
+                        MemoryAllocatorDump::kUnitsObjects, 3);
+
+    pmd->CreateAllocatorDump("foobar_allocator/sub_heap/empty");
+    // Leave the rest of sub heap deliberately uninitialized, to check that
+    // CreateAllocatorDump returns a properly zero-initialized object.
+
+    return true;
+  }
+};
+
+void CheckString(const MemoryAllocatorDump* dump,
+                 const std::string& name,
+                 const char* expected_units,
+                 const std::string& expected_value) {
+  MemoryAllocatorDump::Entry expected(name, expected_units, expected_value);
+  EXPECT_THAT(dump->entries(), Contains(Eq(ByRef(expected))));
+}
+
+void CheckScalar(const MemoryAllocatorDump* dump,
+                 const std::string& name,
+                 const char* expected_units,
+                 uint64_t expected_value) {
+  MemoryAllocatorDump::Entry expected(name, expected_units, expected_value);
+  EXPECT_THAT(dump->entries(), Contains(Eq(ByRef(expected))));
+}
+
+}  // namespace
+
+TEST(MemoryAllocatorDumpTest, GuidGeneration) {
+  std::unique_ptr<MemoryAllocatorDump> mad(new MemoryAllocatorDump(
+      "foo", MemoryDumpLevelOfDetail::FIRST, MemoryAllocatorDumpGuid(0x42u)));
+  ASSERT_EQ("42", mad->guid().ToString());
+}
+
+TEST(MemoryAllocatorDumpTest, DumpIntoProcessMemoryDump) {
+  FakeMemoryAllocatorDumpProvider fmadp;
+  MemoryDumpArgs dump_args = {MemoryDumpLevelOfDetail::DETAILED};
+  ProcessMemoryDump pmd(new HeapProfilerSerializationState, dump_args);
+
+  fmadp.OnMemoryDump(dump_args, &pmd);
+
+  ASSERT_EQ(3u, pmd.allocator_dumps().size());
+
+  const MemoryAllocatorDump* root_heap =
+      pmd.GetAllocatorDump("foobar_allocator");
+  ASSERT_NE(nullptr, root_heap);
+  EXPECT_EQ("foobar_allocator", root_heap->absolute_name());
+  CheckScalar(root_heap, MemoryAllocatorDump::kNameSize,
+              MemoryAllocatorDump::kUnitsBytes, 4096);
+  CheckScalar(root_heap, MemoryAllocatorDump::kNameObjectCount,
+              MemoryAllocatorDump::kUnitsObjects, 42);
+  CheckScalar(root_heap, "attr1", "units1", 1234);
+  CheckString(root_heap, "attr2", "units2", "string_value");
+
+  const MemoryAllocatorDump* sub_heap =
+      pmd.GetAllocatorDump("foobar_allocator/sub_heap");
+  ASSERT_NE(nullptr, sub_heap);
+  EXPECT_EQ("foobar_allocator/sub_heap", sub_heap->absolute_name());
+  CheckScalar(sub_heap, MemoryAllocatorDump::kNameSize,
+              MemoryAllocatorDump::kUnitsBytes, 1);
+  CheckScalar(sub_heap, MemoryAllocatorDump::kNameObjectCount,
+              MemoryAllocatorDump::kUnitsObjects, 3);
+  const MemoryAllocatorDump* empty_sub_heap =
+      pmd.GetAllocatorDump("foobar_allocator/sub_heap/empty");
+  ASSERT_NE(nullptr, empty_sub_heap);
+  EXPECT_EQ("foobar_allocator/sub_heap/empty", empty_sub_heap->absolute_name());
+
+  EXPECT_THAT(empty_sub_heap->entries(), IsEmpty());
+
+  // Check that calling serialization routines doesn't cause a crash.
+  std::unique_ptr<TracedValue> traced_value(new TracedValue);
+  pmd.SerializeAllocatorDumpsInto(traced_value.get());
+  pmd.SerializeHeapProfilerDumpsInto(traced_value.get());
+}
+
+TEST(MemoryAllocatorDumpTest, GetSize) {
+  MemoryDumpArgs dump_args = {MemoryDumpLevelOfDetail::DETAILED};
+  ProcessMemoryDump pmd(new HeapProfilerSerializationState, dump_args);
+  MemoryAllocatorDump* dump = pmd.CreateAllocatorDump("allocator_for_size");
+  dump->AddScalar(MemoryAllocatorDump::kNameSize,
+                  MemoryAllocatorDump::kUnitsBytes, 1);
+  dump->AddScalar("foo", MemoryAllocatorDump::kUnitsBytes, 2);
+  EXPECT_EQ(1u, dump->GetSizeInternal());
+}
+
+TEST(MemoryAllocatorDumpTest, ReadValues) {
+  MemoryDumpArgs dump_args = {MemoryDumpLevelOfDetail::DETAILED};
+  ProcessMemoryDump pmd(new HeapProfilerSerializationState, dump_args);
+  MemoryAllocatorDump* dump = pmd.CreateAllocatorDump("allocator_for_size");
+  dump->AddScalar("one", "byte", 1);
+  dump->AddString("one", "object", "one");
+
+  MemoryAllocatorDump::Entry expected_scalar("one", "byte", 1);
+  MemoryAllocatorDump::Entry expected_string("one", "object", "one");
+  EXPECT_THAT(dump->entries(), ElementsAre(Eq(ByRef(expected_scalar)),
+                                           Eq(ByRef(expected_string))));
+}
+
+TEST(MemoryAllocatorDumpTest, MovingAnEntry) {
+  MemoryAllocatorDump::Entry expected_entry("one", "byte", 1);
+  MemoryAllocatorDump::Entry from_entry("one", "byte", 1);
+  MemoryAllocatorDump::Entry to_entry = std::move(from_entry);
+  EXPECT_EQ(expected_entry, to_entry);
+}
+
+// DEATH tests are not supported in Android/iOS/Fuchsia.
+#if !defined(NDEBUG) && !defined(OS_ANDROID) && !defined(OS_IOS) && \
+    !defined(OS_FUCHSIA)
+TEST(MemoryAllocatorDumpTest, ForbidDuplicatesDeathTest) {
+  FakeMemoryAllocatorDumpProvider fmadp;
+  MemoryDumpArgs dump_args = {MemoryDumpLevelOfDetail::DETAILED};
+  ProcessMemoryDump pmd(new HeapProfilerSerializationState, dump_args);
+  pmd.CreateAllocatorDump("foo_allocator");
+  pmd.CreateAllocatorDump("bar_allocator/heap");
+  ASSERT_DEATH(pmd.CreateAllocatorDump("foo_allocator"), "");
+  ASSERT_DEATH(pmd.CreateAllocatorDump("bar_allocator/heap"), "");
+  ASSERT_DEATH(pmd.CreateAllocatorDump(""), "");
+}
+
+TEST(MemoryAllocatorDumpTest, ForbidStringsInBackgroundModeDeathTest) {
+  MemoryDumpArgs dump_args = {MemoryDumpLevelOfDetail::BACKGROUND};
+  ProcessMemoryDump pmd(new HeapProfilerSerializationState, dump_args);
+  MemoryAllocatorDump* dump = pmd.CreateAllocatorDump("malloc");
+  ASSERT_DEATH(dump->AddString("foo", "bar", "baz"), "");
+}
+#endif
+
+}  // namespace trace_event
+}  // namespace base
diff --git a/base/trace_event/memory_dump_manager.cc b/base/trace_event/memory_dump_manager.cc
new file mode 100644
index 0000000..f6cc832
--- /dev/null
+++ b/base/trace_event/memory_dump_manager.cc
@@ -0,0 +1,877 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/trace_event/memory_dump_manager.h"
+
+#include <inttypes.h>
+#include <stdio.h>
+
+#include <algorithm>
+#include <memory>
+#include <utility>
+
+#include "base/allocator/buildflags.h"
+#include "base/base_switches.h"
+#include "base/command_line.h"
+#include "base/debug/alias.h"
+#include "base/debug/stack_trace.h"
+#include "base/debug/thread_heap_usage_tracker.h"
+#include "base/memory/ptr_util.h"
+#include "base/sequenced_task_runner.h"
+#include "base/strings/string_util.h"
+#include "base/third_party/dynamic_annotations/dynamic_annotations.h"
+#include "base/threading/thread.h"
+#include "base/threading/thread_task_runner_handle.h"
+#include "base/trace_event/heap_profiler.h"
+#include "base/trace_event/heap_profiler_allocation_context_tracker.h"
+#include "base/trace_event/heap_profiler_event_filter.h"
+#include "base/trace_event/heap_profiler_serialization_state.h"
+#include "base/trace_event/heap_profiler_stack_frame_deduplicator.h"
+#include "base/trace_event/heap_profiler_type_name_deduplicator.h"
+#include "base/trace_event/malloc_dump_provider.h"
+#include "base/trace_event/memory_dump_provider.h"
+#include "base/trace_event/memory_dump_scheduler.h"
+#include "base/trace_event/memory_infra_background_whitelist.h"
+#include "base/trace_event/memory_peak_detector.h"
+#include "base/trace_event/process_memory_dump.h"
+#include "base/trace_event/trace_event.h"
+#include "base/trace_event/trace_event_argument.h"
+#include "build/build_config.h"
+
+#if defined(OS_ANDROID)
+#include "base/trace_event/java_heap_dump_provider_android.h"
+
+#if BUILDFLAG(CAN_UNWIND_WITH_CFI_TABLE)
+#include "base/trace_event/cfi_backtrace_android.h"
+#endif
+
+#endif  // defined(OS_ANDROID)
+
+namespace base {
+namespace trace_event {
+
+namespace {
+
+const char* const kTraceEventArgNames[] = {"dumps"};
+const unsigned char kTraceEventArgTypes[] = {TRACE_VALUE_TYPE_CONVERTABLE};
+
+MemoryDumpManager* g_memory_dump_manager_for_testing = nullptr;
+
+// Temporary (until peak detector and scheduler are moved outside of here)
+// trampoline function to match the |request_dump_function| passed to Initialize
+// to the callback expected by MemoryPeakDetector and MemoryDumpScheduler.
+// TODO(primiano): remove this.
+void DoGlobalDumpWithoutCallback(
+    MemoryDumpManager::RequestGlobalDumpFunction global_dump_fn,
+    MemoryDumpType dump_type,
+    MemoryDumpLevelOfDetail level_of_detail) {
+  global_dump_fn.Run(dump_type, level_of_detail);
+}
+
+// Proxy class which wraps a ConvertableToTraceFormat owned by the
+// |heap_profiler_serialization_state| into a proxy object that can be added to
+// the trace event log. This is to solve the problem that the
+// HeapProfilerSerializationState is refcounted but the tracing subsystem wants
+// a std::unique_ptr<ConvertableToTraceFormat>.
+template <typename T>
+struct SessionStateConvertableProxy : public ConvertableToTraceFormat {
+  using GetterFunctPtr = T* (HeapProfilerSerializationState::*)() const;
+
+  SessionStateConvertableProxy(scoped_refptr<HeapProfilerSerializationState>
+                                   heap_profiler_serialization_state,
+                               GetterFunctPtr getter_function)
+      : heap_profiler_serialization_state(heap_profiler_serialization_state),
+        getter_function(getter_function) {}
+
+  void AppendAsTraceFormat(std::string* out) const override {
+    return (heap_profiler_serialization_state.get()->*getter_function)()
+        ->AppendAsTraceFormat(out);
+  }
+
+  void EstimateTraceMemoryOverhead(
+      TraceEventMemoryOverhead* overhead) override {
+    return (heap_profiler_serialization_state.get()->*getter_function)()
+        ->EstimateTraceMemoryOverhead(overhead);
+  }
+
+  scoped_refptr<HeapProfilerSerializationState>
+      heap_profiler_serialization_state;
+  GetterFunctPtr const getter_function;
+};
+
+void NotifyHeapProfilingEnabledOnMDPThread(
+    scoped_refptr<MemoryDumpProviderInfo> mdpinfo,
+    bool profiling_enabled) {
+  mdpinfo->dump_provider->OnHeapProfilingEnabled(profiling_enabled);
+}
+
+inline bool ShouldEnableMDPAllocatorHooks(HeapProfilingMode mode) {
+  return (mode == kHeapProfilingModePseudo) ||
+         (mode == kHeapProfilingModeNative) ||
+         (mode == kHeapProfilingModeBackground);
+}
+
+#if BUILDFLAG(USE_ALLOCATOR_SHIM) && !defined(OS_NACL)
+inline bool IsHeapProfilingModeEnabled(HeapProfilingMode mode) {
+  return mode != kHeapProfilingModeDisabled &&
+         mode != kHeapProfilingModeInvalid;
+}
+
+void EnableFilteringForPseudoStackProfiling() {
+  if (AllocationContextTracker::capture_mode() !=
+          AllocationContextTracker::CaptureMode::PSEUDO_STACK ||
+      (TraceLog::GetInstance()->enabled_modes() & TraceLog::FILTERING_MODE)) {
+    return;
+  }
+  // Create trace config with heap profiling filter.
+  std::string filter_string = JoinString(
+      {"*", TRACE_DISABLED_BY_DEFAULT("net"), TRACE_DISABLED_BY_DEFAULT("cc"),
+       MemoryDumpManager::kTraceCategory},
+      ",");
+  TraceConfigCategoryFilter category_filter;
+  category_filter.InitializeFromString(filter_string);
+
+  TraceConfig::EventFilterConfig heap_profiler_filter_config(
+      HeapProfilerEventFilter::kName);
+  heap_profiler_filter_config.SetCategoryFilter(category_filter);
+
+  TraceConfig::EventFilters filters;
+  filters.push_back(heap_profiler_filter_config);
+  TraceConfig filtering_trace_config;
+  filtering_trace_config.SetEventFilters(filters);
+
+  TraceLog::GetInstance()->SetEnabled(filtering_trace_config,
+                                      TraceLog::FILTERING_MODE);
+}
+#endif  // BUILDFLAG(USE_ALLOCATOR_SHIM) && !defined(OS_NACL)
+
+}  // namespace
+
+// static
+const char* const MemoryDumpManager::kTraceCategory =
+    TRACE_DISABLED_BY_DEFAULT("memory-infra");
+
+// static
+const int MemoryDumpManager::kMaxConsecutiveFailuresCount = 3;
+
+// static
+const uint64_t MemoryDumpManager::kInvalidTracingProcessId = 0;
+
+// static
+const char* const MemoryDumpManager::kSystemAllocatorPoolName =
+#if defined(MALLOC_MEMORY_TRACING_SUPPORTED)
+    MallocDumpProvider::kAllocatedObjects;
+#else
+    nullptr;
+#endif
+
+// static
+MemoryDumpManager* MemoryDumpManager::GetInstance() {
+  if (g_memory_dump_manager_for_testing)
+    return g_memory_dump_manager_for_testing;
+
+  return Singleton<MemoryDumpManager,
+                   LeakySingletonTraits<MemoryDumpManager>>::get();
+}
+
+// static
+std::unique_ptr<MemoryDumpManager>
+MemoryDumpManager::CreateInstanceForTesting() {
+  DCHECK(!g_memory_dump_manager_for_testing);
+  std::unique_ptr<MemoryDumpManager> instance(new MemoryDumpManager());
+  g_memory_dump_manager_for_testing = instance.get();
+  return instance;
+}
+
+MemoryDumpManager::MemoryDumpManager()
+    : is_coordinator_(false),
+      tracing_process_id_(kInvalidTracingProcessId),
+      dumper_registrations_ignored_for_testing_(false),
+      heap_profiling_mode_(kHeapProfilingModeDisabled) {}
+
+MemoryDumpManager::~MemoryDumpManager() {
+  Thread* dump_thread = nullptr;
+  {
+    AutoLock lock(lock_);
+    if (dump_thread_) {
+      dump_thread = dump_thread_.get();
+    }
+  }
+  if (dump_thread) {
+    dump_thread->Stop();
+  }
+  AutoLock lock(lock_);
+  dump_thread_.reset();
+  g_memory_dump_manager_for_testing = nullptr;
+}
+
+bool MemoryDumpManager::EnableHeapProfiling(HeapProfilingMode profiling_mode) {
+  AutoLock lock(lock_);
+#if BUILDFLAG(USE_ALLOCATOR_SHIM) && !defined(OS_NACL)
+  bool notify_mdps = true;
+
+  if (heap_profiling_mode_ == kHeapProfilingModeInvalid)
+    return false;  // Disabled permanently.
+
+  if (IsHeapProfilingModeEnabled(heap_profiling_mode_) ==
+      IsHeapProfilingModeEnabled(profiling_mode)) {
+    if (profiling_mode == kHeapProfilingModeDisabled)
+      heap_profiling_mode_ = kHeapProfilingModeInvalid;  // Disable permanently.
+    return false;
+  }
+
+  switch (profiling_mode) {
+    case kHeapProfilingModeTaskProfiler:
+      if (!base::debug::ThreadHeapUsageTracker::IsHeapTrackingEnabled())
+        base::debug::ThreadHeapUsageTracker::EnableHeapTracking();
+      notify_mdps = false;
+      break;
+
+    case kHeapProfilingModeBackground:
+      AllocationContextTracker::SetCaptureMode(
+          AllocationContextTracker::CaptureMode::MIXED_STACK);
+      break;
+
+    case kHeapProfilingModePseudo:
+      AllocationContextTracker::SetCaptureMode(
+          AllocationContextTracker::CaptureMode::PSEUDO_STACK);
+      EnableFilteringForPseudoStackProfiling();
+      break;
+
+    case kHeapProfilingModeNative:
+#if defined(OS_ANDROID) && BUILDFLAG(CAN_UNWIND_WITH_CFI_TABLE)
+    {
+      bool can_unwind = CFIBacktraceAndroid::GetInitializedInstance()
+                            ->can_unwind_stack_frames();
+      DCHECK(can_unwind);
+    }
+#endif
+      // If we don't have frame pointers and unwind tables then native tracing
+      // falls-back to using base::debug::StackTrace, which may be slow.
+      AllocationContextTracker::SetCaptureMode(
+          AllocationContextTracker::CaptureMode::NATIVE_STACK);
+      break;
+
+    case kHeapProfilingModeDisabled:
+      if (heap_profiling_mode_ == kHeapProfilingModeTaskProfiler) {
+        LOG(ERROR) << "ThreadHeapUsageTracker cannot be disabled.";
+        return false;
+      }
+      if (heap_profiling_mode_ == kHeapProfilingModePseudo)
+        TraceLog::GetInstance()->SetDisabled(TraceLog::FILTERING_MODE);
+      AllocationContextTracker::SetCaptureMode(
+          AllocationContextTracker::CaptureMode::DISABLED);
+      heap_profiling_mode_ = kHeapProfilingModeInvalid;  // Disable permanently.
+      break;
+
+    default:
+      NOTREACHED() << "Incorrect heap profiling mode " << profiling_mode;
+      return false;
+  }
+
+  if (heap_profiling_mode_ != kHeapProfilingModeInvalid)
+    heap_profiling_mode_ = profiling_mode;
+
+  // In case tracing was already enabled, setup the serialization state before
+  // notifying mdps.
+  InitializeHeapProfilerStateIfNeededLocked();
+  if (notify_mdps) {
+    bool enabled = IsHeapProfilingModeEnabled(heap_profiling_mode_);
+    for (const auto& mdpinfo : dump_providers_)
+      NotifyHeapProfilingEnabledLocked(mdpinfo, enabled);
+  }
+  return true;
+#else
+  heap_profiling_mode_ = kHeapProfilingModeInvalid;
+  return false;
+#endif  // BUILDFLAG(USE_ALLOCATOR_SHIM) && !defined(OS_NACL)
+}
+
+HeapProfilingMode MemoryDumpManager::GetHeapProfilingMode() {
+  AutoLock lock(lock_);
+  return heap_profiling_mode_;
+}
+
+void MemoryDumpManager::Initialize(
+    RequestGlobalDumpFunction request_dump_function,
+    bool is_coordinator) {
+  {
+    AutoLock lock(lock_);
+    DCHECK(!request_dump_function.is_null());
+    DCHECK(!can_request_global_dumps());
+    request_dump_function_ = request_dump_function;
+    is_coordinator_ = is_coordinator;
+  }
+
+// Enable the core dump providers.
+#if defined(MALLOC_MEMORY_TRACING_SUPPORTED)
+  base::trace_event::MemoryDumpProvider::Options options;
+  options.supports_heap_profiling = true;
+  RegisterDumpProvider(MallocDumpProvider::GetInstance(), "Malloc", nullptr,
+                       options);
+#endif
+
+#if defined(OS_ANDROID)
+  RegisterDumpProvider(JavaHeapDumpProvider::GetInstance(), "JavaHeap",
+                       nullptr);
+#endif
+
+  TRACE_EVENT_WARMUP_CATEGORY(kTraceCategory);
+}
+
+void MemoryDumpManager::RegisterDumpProvider(
+    MemoryDumpProvider* mdp,
+    const char* name,
+    scoped_refptr<SingleThreadTaskRunner> task_runner,
+    MemoryDumpProvider::Options options) {
+  options.dumps_on_single_thread_task_runner = true;
+  RegisterDumpProviderInternal(mdp, name, std::move(task_runner), options);
+}
+
+void MemoryDumpManager::RegisterDumpProvider(
+    MemoryDumpProvider* mdp,
+    const char* name,
+    scoped_refptr<SingleThreadTaskRunner> task_runner) {
+  // Set |dumps_on_single_thread_task_runner| to true because all providers
+  // without task runner are run on dump thread.
+  MemoryDumpProvider::Options options;
+  options.dumps_on_single_thread_task_runner = true;
+  RegisterDumpProviderInternal(mdp, name, std::move(task_runner), options);
+}
+
+void MemoryDumpManager::RegisterDumpProviderWithSequencedTaskRunner(
+    MemoryDumpProvider* mdp,
+    const char* name,
+    scoped_refptr<SequencedTaskRunner> task_runner,
+    MemoryDumpProvider::Options options) {
+  DCHECK(task_runner);
+  options.dumps_on_single_thread_task_runner = false;
+  RegisterDumpProviderInternal(mdp, name, std::move(task_runner), options);
+}
+
+void MemoryDumpManager::RegisterDumpProviderInternal(
+    MemoryDumpProvider* mdp,
+    const char* name,
+    scoped_refptr<SequencedTaskRunner> task_runner,
+    const MemoryDumpProvider::Options& options) {
+  if (dumper_registrations_ignored_for_testing_)
+    return;
+
+  // Only a handful of MDPs are required to compute the memory metrics. These
+  // have small enough performance overhead that it is resonable to run them
+  // in the background while the user is doing other things. Those MDPs are
+  // 'whitelisted for background mode'.
+  bool whitelisted_for_background_mode = IsMemoryDumpProviderWhitelisted(name);
+
+  scoped_refptr<MemoryDumpProviderInfo> mdpinfo =
+      new MemoryDumpProviderInfo(mdp, name, std::move(task_runner), options,
+                                 whitelisted_for_background_mode);
+
+  if (options.is_fast_polling_supported) {
+    DCHECK(!mdpinfo->task_runner) << "MemoryDumpProviders capable of fast "
+                                     "polling must NOT be thread bound.";
+  }
+
+  {
+    AutoLock lock(lock_);
+    bool already_registered = !dump_providers_.insert(mdpinfo).second;
+    // This actually happens in some tests which don't have a clean tear-down
+    // path for RenderThreadImpl::Init().
+    if (already_registered)
+      return;
+
+    if (options.is_fast_polling_supported)
+      MemoryPeakDetector::GetInstance()->NotifyMemoryDumpProvidersChanged();
+
+    if (ShouldEnableMDPAllocatorHooks(heap_profiling_mode_))
+      NotifyHeapProfilingEnabledLocked(mdpinfo, true);
+  }
+}
+
+void MemoryDumpManager::UnregisterDumpProvider(MemoryDumpProvider* mdp) {
+  UnregisterDumpProviderInternal(mdp, false /* delete_async */);
+}
+
+void MemoryDumpManager::UnregisterAndDeleteDumpProviderSoon(
+    std::unique_ptr<MemoryDumpProvider> mdp) {
+  UnregisterDumpProviderInternal(mdp.release(), true /* delete_async */);
+}
+
+void MemoryDumpManager::UnregisterDumpProviderInternal(
+    MemoryDumpProvider* mdp,
+    bool take_mdp_ownership_and_delete_async) {
+  std::unique_ptr<MemoryDumpProvider> owned_mdp;
+  if (take_mdp_ownership_and_delete_async)
+    owned_mdp.reset(mdp);
+
+  AutoLock lock(lock_);
+
+  auto mdp_iter = dump_providers_.begin();
+  for (; mdp_iter != dump_providers_.end(); ++mdp_iter) {
+    if ((*mdp_iter)->dump_provider == mdp)
+      break;
+  }
+
+  if (mdp_iter == dump_providers_.end())
+    return;  // Not registered / already unregistered.
+
+  if (take_mdp_ownership_and_delete_async) {
+    // The MDP will be deleted whenever the MDPInfo struct will, that is either:
+    // - At the end of this function, if no dump is in progress.
+    // - In ContinueAsyncProcessDump() when MDPInfo is removed from
+    //   |pending_dump_providers|.
+    // - When the provider is removed from other clients (MemoryPeakDetector).
+    DCHECK(!(*mdp_iter)->owned_dump_provider);
+    (*mdp_iter)->owned_dump_provider = std::move(owned_mdp);
+  } else {
+    // If you hit this DCHECK, your dump provider has a bug.
+    // Unregistration of a MemoryDumpProvider is safe only if:
+    // - The MDP has specified a sequenced task runner affinity AND the
+    //   unregistration happens on the same task runner. So that the MDP cannot
+    //   unregister and be in the middle of a OnMemoryDump() at the same time.
+    // - The MDP has NOT specified a task runner affinity and its ownership is
+    //   transferred via UnregisterAndDeleteDumpProviderSoon().
+    // In all the other cases, it is not possible to guarantee that the
+    // unregistration will not race with OnMemoryDump() calls.
+    DCHECK((*mdp_iter)->task_runner &&
+           (*mdp_iter)->task_runner->RunsTasksInCurrentSequence())
+        << "MemoryDumpProvider \"" << (*mdp_iter)->name << "\" attempted to "
+        << "unregister itself in a racy way. Please file a crbug.";
+  }
+
+  if ((*mdp_iter)->options.is_fast_polling_supported) {
+    DCHECK(take_mdp_ownership_and_delete_async);
+    MemoryPeakDetector::GetInstance()->NotifyMemoryDumpProvidersChanged();
+  }
+
+  // The MDPInfo instance can still be referenced by the
+  // |ProcessMemoryDumpAsyncState.pending_dump_providers|. For this reason
+  // the MDPInfo is flagged as disabled. It will cause InvokeOnMemoryDump()
+  // to just skip it, without actually invoking the |mdp|, which might be
+  // destroyed by the caller soon after this method returns.
+  (*mdp_iter)->disabled = true;
+  dump_providers_.erase(mdp_iter);
+}
+
+void MemoryDumpManager::GetDumpProvidersForPolling(
+    std::vector<scoped_refptr<MemoryDumpProviderInfo>>* providers) {
+  DCHECK(providers->empty());
+  AutoLock lock(lock_);
+  for (const scoped_refptr<MemoryDumpProviderInfo>& mdp : dump_providers_) {
+    if (mdp->options.is_fast_polling_supported)
+      providers->push_back(mdp);
+  }
+}
+
+bool MemoryDumpManager::IsDumpProviderRegisteredForTesting(
+    MemoryDumpProvider* provider) {
+  AutoLock lock(lock_);
+
+  for (const auto& info : dump_providers_) {
+    if (info->dump_provider == provider)
+      return true;
+  }
+  return false;
+}
+
+scoped_refptr<base::SequencedTaskRunner>
+MemoryDumpManager::GetOrCreateBgTaskRunnerLocked() {
+  lock_.AssertAcquired();
+
+  if (dump_thread_)
+    return dump_thread_->task_runner();
+
+  dump_thread_ = std::make_unique<Thread>("MemoryInfra");
+  bool started = dump_thread_->Start();
+  CHECK(started);
+
+  return dump_thread_->task_runner();
+}
+
+void MemoryDumpManager::CreateProcessDump(
+    const MemoryDumpRequestArgs& args,
+    const ProcessMemoryDumpCallback& callback) {
+  char guid_str[20];
+  sprintf(guid_str, "0x%" PRIx64, args.dump_guid);
+  TRACE_EVENT_NESTABLE_ASYNC_BEGIN1(kTraceCategory, "ProcessMemoryDump",
+                                    TRACE_ID_LOCAL(args.dump_guid), "dump_guid",
+                                    TRACE_STR_COPY(guid_str));
+
+  // If argument filter is enabled then only background mode dumps should be
+  // allowed. In case the trace config passed for background tracing session
+  // missed the allowed modes argument, it crashes here instead of creating
+  // unexpected dumps.
+  if (TraceLog::GetInstance()
+          ->GetCurrentTraceConfig()
+          .IsArgumentFilterEnabled()) {
+    CHECK_EQ(MemoryDumpLevelOfDetail::BACKGROUND, args.level_of_detail);
+  }
+
+  std::unique_ptr<ProcessMemoryDumpAsyncState> pmd_async_state;
+  {
+    AutoLock lock(lock_);
+
+    // MDM could have been disabled by this point destroying
+    // |heap_profiler_serialization_state|. If heap profiling is enabled we
+    // require session state so if heap profiling is on and session state is
+    // absent we fail the dump immediately. If heap profiler is enabled during
+    // the dump, then the dump succeeds since the dump was requested before, and
+    // the future process dumps will contain heap dumps.
+    if (args.dump_type != MemoryDumpType::SUMMARY_ONLY &&
+        ShouldEnableMDPAllocatorHooks(heap_profiling_mode_) &&
+        !heap_profiler_serialization_state_) {
+      callback.Run(false /* success */, args.dump_guid, nullptr);
+      return;
+    }
+
+    pmd_async_state.reset(new ProcessMemoryDumpAsyncState(
+        args, dump_providers_, heap_profiler_serialization_state_, callback,
+        GetOrCreateBgTaskRunnerLocked()));
+
+    // If enabled, holds back the peak detector resetting its estimation window.
+    MemoryPeakDetector::GetInstance()->Throttle();
+  }
+
+  // Start the process dump. This involves task runner hops as specified by the
+  // MemoryDumpProvider(s) in RegisterDumpProvider()).
+  ContinueAsyncProcessDump(pmd_async_state.release());
+}
+
+// Invokes OnMemoryDump() on all MDPs that are next in the pending list and run
+// on the current sequenced task runner. If the next MDP does not run in current
+// sequenced task runner, then switches to that task runner and continues. All
+// OnMemoryDump() invocations are linearized. |lock_| is used in these functions
+// purely to ensure consistency w.r.t. (un)registrations of |dump_providers_|.
+void MemoryDumpManager::ContinueAsyncProcessDump(
+    ProcessMemoryDumpAsyncState* owned_pmd_async_state) {
+  HEAP_PROFILER_SCOPED_IGNORE;
+  // Initalizes the ThreadLocalEventBuffer to guarantee that the TRACE_EVENTs
+  // in the PostTask below don't end up registering their own dump providers
+  // (for discounting trace memory overhead) while holding the |lock_|.
+  TraceLog::GetInstance()->InitializeThreadLocalEventBufferIfSupported();
+
+  // In theory |owned_pmd_async_state| should be a unique_ptr. The only reason
+  // why it isn't is because of the corner case logic of |did_post_task|
+  // above, which needs to take back the ownership of the |pmd_async_state| when
+  // the PostTask() fails.
+  // Unfortunately, PostTask() destroys the unique_ptr arguments upon failure
+  // to prevent accidental leaks. Using a unique_ptr would prevent us to to
+  // skip the hop and move on. Hence the manual naked -> unique ptr juggling.
+  auto pmd_async_state = WrapUnique(owned_pmd_async_state);
+  owned_pmd_async_state = nullptr;
+
+  while (!pmd_async_state->pending_dump_providers.empty()) {
+    // Read MemoryDumpProviderInfo thread safety considerations in
+    // memory_dump_manager.h when accessing |mdpinfo| fields.
+    MemoryDumpProviderInfo* mdpinfo =
+        pmd_async_state->pending_dump_providers.back().get();
+
+    // If we are in background mode, we should invoke only the whitelisted
+    // providers. Ignore other providers and continue.
+    if (pmd_async_state->req_args.level_of_detail ==
+            MemoryDumpLevelOfDetail::BACKGROUND &&
+        !mdpinfo->whitelisted_for_background_mode) {
+      pmd_async_state->pending_dump_providers.pop_back();
+      continue;
+    }
+
+    // If the dump provider did not specify a task runner affinity, dump on
+    // |dump_thread_|.
+    scoped_refptr<SequencedTaskRunner> task_runner = mdpinfo->task_runner;
+    if (!task_runner) {
+      DCHECK(mdpinfo->options.dumps_on_single_thread_task_runner);
+      task_runner = pmd_async_state->dump_thread_task_runner;
+      DCHECK(task_runner);
+    }
+
+    // If |RunsTasksInCurrentSequence()| is true then no PostTask is
+    // required since we are on the right SequencedTaskRunner.
+    if (task_runner->RunsTasksInCurrentSequence()) {
+      InvokeOnMemoryDump(mdpinfo, pmd_async_state->process_memory_dump.get());
+      pmd_async_state->pending_dump_providers.pop_back();
+      continue;
+    }
+
+    bool did_post_task = task_runner->PostTask(
+        FROM_HERE,
+        BindOnce(&MemoryDumpManager::ContinueAsyncProcessDump, Unretained(this),
+                 Unretained(pmd_async_state.get())));
+
+    if (did_post_task) {
+      // Ownership is tranferred to the posted task.
+      ignore_result(pmd_async_state.release());
+      return;
+    }
+
+    // PostTask usually fails only if the process or thread is shut down. So,
+    // the dump provider is disabled here. But, don't disable unbound dump
+    // providers, since the |dump_thread_| is controlled by MDM.
+    if (mdpinfo->task_runner) {
+      // A locked access is required to R/W |disabled| (for the
+      // UnregisterAndDeleteDumpProviderSoon() case).
+      AutoLock lock(lock_);
+      mdpinfo->disabled = true;
+    }
+
+    // PostTask failed. Ignore the dump provider and continue.
+    pmd_async_state->pending_dump_providers.pop_back();
+  }
+
+  FinishAsyncProcessDump(std::move(pmd_async_state));
+}
+
+// This function is called on the right task runner for current MDP. It is
+// either the task runner specified by MDP or |dump_thread_task_runner| if the
+// MDP did not specify task runner. Invokes the dump provider's OnMemoryDump()
+// (unless disabled).
+void MemoryDumpManager::InvokeOnMemoryDump(MemoryDumpProviderInfo* mdpinfo,
+                                           ProcessMemoryDump* pmd) {
+  HEAP_PROFILER_SCOPED_IGNORE;
+  DCHECK(!mdpinfo->task_runner ||
+         mdpinfo->task_runner->RunsTasksInCurrentSequence());
+
+  TRACE_EVENT1(kTraceCategory, "MemoryDumpManager::InvokeOnMemoryDump",
+               "dump_provider.name", mdpinfo->name);
+
+  // Do not add any other TRACE_EVENT macro (or function that might have them)
+  // below this point. Under some rare circunstances, they can re-initialize
+  // and invalide the current ThreadLocalEventBuffer MDP, making the
+  // |should_dump| check below susceptible to TOCTTOU bugs
+  // (https://crbug.com/763365).
+
+  bool is_thread_bound;
+  {
+    // A locked access is required to R/W |disabled| (for the
+    // UnregisterAndDeleteDumpProviderSoon() case).
+    AutoLock lock(lock_);
+
+    // Unregister the dump provider if it failed too many times consecutively.
+    if (!mdpinfo->disabled &&
+        mdpinfo->consecutive_failures >= kMaxConsecutiveFailuresCount) {
+      mdpinfo->disabled = true;
+      DLOG(ERROR) << "Disabling MemoryDumpProvider \"" << mdpinfo->name
+                  << "\". Dump failed multiple times consecutively.";
+    }
+    if (mdpinfo->disabled)
+      return;
+
+    is_thread_bound = mdpinfo->task_runner != nullptr;
+  }  // AutoLock lock(lock_);
+
+  // Invoke the dump provider.
+
+  // A stack allocated string with dump provider name is useful to debug
+  // crashes while invoking dump after a |dump_provider| is not unregistered
+  // in safe way.
+  char provider_name_for_debugging[16];
+  strncpy(provider_name_for_debugging, mdpinfo->name,
+          sizeof(provider_name_for_debugging) - 1);
+  provider_name_for_debugging[sizeof(provider_name_for_debugging) - 1] = '\0';
+  base::debug::Alias(provider_name_for_debugging);
+
+  ANNOTATE_BENIGN_RACE(&mdpinfo->disabled, "best-effort race detection");
+  CHECK(!is_thread_bound ||
+        !*(static_cast<volatile bool*>(&mdpinfo->disabled)));
+  bool dump_successful =
+      mdpinfo->dump_provider->OnMemoryDump(pmd->dump_args(), pmd);
+  mdpinfo->consecutive_failures =
+      dump_successful ? 0 : mdpinfo->consecutive_failures + 1;
+}
+
+void MemoryDumpManager::FinishAsyncProcessDump(
+    std::unique_ptr<ProcessMemoryDumpAsyncState> pmd_async_state) {
+  HEAP_PROFILER_SCOPED_IGNORE;
+  DCHECK(pmd_async_state->pending_dump_providers.empty());
+  const uint64_t dump_guid = pmd_async_state->req_args.dump_guid;
+  if (!pmd_async_state->callback_task_runner->BelongsToCurrentThread()) {
+    scoped_refptr<SingleThreadTaskRunner> callback_task_runner =
+        pmd_async_state->callback_task_runner;
+    callback_task_runner->PostTask(
+        FROM_HERE, BindOnce(&MemoryDumpManager::FinishAsyncProcessDump,
+                            Unretained(this), std::move(pmd_async_state)));
+    return;
+  }
+
+  TRACE_EVENT0(kTraceCategory, "MemoryDumpManager::FinishAsyncProcessDump");
+
+  // In the general case (allocators and edges) the serialization into the trace
+  // buffer is handled by the memory-infra service (see tracing_observer.cc).
+  // This special case below deals only with serialization of the heap profiler
+  // and is temporary given the upcoming work on the out-of-process heap
+  // profiler.
+  const auto& args = pmd_async_state->req_args;
+  if (!pmd_async_state->process_memory_dump->heap_dumps().empty()) {
+    std::unique_ptr<TracedValue> traced_value = std::make_unique<TracedValue>();
+    pmd_async_state->process_memory_dump->SerializeHeapProfilerDumpsInto(
+        traced_value.get());
+
+    traced_value->SetString("level_of_detail",
+                            base::trace_event::MemoryDumpLevelOfDetailToString(
+                                args.level_of_detail));
+    std::unique_ptr<base::trace_event::ConvertableToTraceFormat> event_value(
+        std::move(traced_value));
+    TRACE_EVENT_API_ADD_TRACE_EVENT_WITH_PROCESS_ID(
+        TRACE_EVENT_PHASE_MEMORY_DUMP,
+        base::trace_event::TraceLog::GetCategoryGroupEnabled(
+            base::trace_event::MemoryDumpManager::kTraceCategory),
+        base::trace_event::MemoryDumpTypeToString(args.dump_type),
+        trace_event_internal::kGlobalScope, args.dump_guid,
+        base::kNullProcessId, 1 /* num_args */, kTraceEventArgNames,
+        kTraceEventArgTypes, nullptr /* arg_values */, &event_value,
+        TRACE_EVENT_FLAG_HAS_ID);
+  }
+
+  if (!pmd_async_state->callback.is_null()) {
+    pmd_async_state->callback.Run(
+        true /* success */, dump_guid,
+        std::move(pmd_async_state->process_memory_dump));
+    pmd_async_state->callback.Reset();
+  }
+
+  TRACE_EVENT_NESTABLE_ASYNC_END0(kTraceCategory, "ProcessMemoryDump",
+                                  TRACE_ID_LOCAL(dump_guid));
+}
+
+void MemoryDumpManager::SetupForTracing(
+    const TraceConfig::MemoryDumpConfig& memory_dump_config) {
+  AutoLock lock(lock_);
+  heap_profiler_serialization_state_ = new HeapProfilerSerializationState();
+  heap_profiler_serialization_state_
+      ->set_heap_profiler_breakdown_threshold_bytes(
+          memory_dump_config.heap_profiler_options.breakdown_threshold_bytes);
+  InitializeHeapProfilerStateIfNeededLocked();
+
+  // At this point we must have the ability to request global dumps.
+  DCHECK(can_request_global_dumps());
+
+  MemoryDumpScheduler::Config periodic_config;
+  bool peak_detector_configured = false;
+  for (const auto& trigger : memory_dump_config.triggers) {
+    if (trigger.trigger_type == MemoryDumpType::PERIODIC_INTERVAL) {
+      if (periodic_config.triggers.empty()) {
+        periodic_config.callback =
+            BindRepeating(&DoGlobalDumpWithoutCallback, request_dump_function_,
+                          MemoryDumpType::PERIODIC_INTERVAL);
+      }
+      periodic_config.triggers.push_back(
+          {trigger.level_of_detail, trigger.min_time_between_dumps_ms});
+    } else if (trigger.trigger_type == MemoryDumpType::PEAK_MEMORY_USAGE) {
+      // At most one peak trigger is allowed.
+      CHECK(!peak_detector_configured);
+      peak_detector_configured = true;
+      MemoryPeakDetector::GetInstance()->Setup(
+          BindRepeating(&MemoryDumpManager::GetDumpProvidersForPolling,
+                        Unretained(this)),
+          GetOrCreateBgTaskRunnerLocked(),
+          BindRepeating(&DoGlobalDumpWithoutCallback, request_dump_function_,
+                        MemoryDumpType::PEAK_MEMORY_USAGE,
+                        trigger.level_of_detail));
+
+      MemoryPeakDetector::Config peak_config;
+      peak_config.polling_interval_ms = 10;
+      peak_config.min_time_between_peaks_ms = trigger.min_time_between_dumps_ms;
+      peak_config.enable_verbose_poll_tracing =
+          trigger.level_of_detail == MemoryDumpLevelOfDetail::DETAILED;
+      MemoryPeakDetector::GetInstance()->Start(peak_config);
+
+      // When peak detection is enabled, trigger a dump straight away as it
+      // gives a good reference point for analyzing the trace.
+      if (is_coordinator_) {
+        GetOrCreateBgTaskRunnerLocked()->PostTask(
+            FROM_HERE,
+            BindOnce(&DoGlobalDumpWithoutCallback, request_dump_function_,
+                     MemoryDumpType::PEAK_MEMORY_USAGE,
+                     trigger.level_of_detail));
+      }
+    }
+  }
+
+  // Only coordinator process triggers periodic memory dumps.
+  if (is_coordinator_ && !periodic_config.triggers.empty()) {
+    MemoryDumpScheduler::GetInstance()->Start(periodic_config,
+                                              GetOrCreateBgTaskRunnerLocked());
+  }
+}
+
+void MemoryDumpManager::TeardownForTracing() {
+  // There might be a memory dump in progress while this happens. Therefore,
+  // ensure that the MDM state which depends on the tracing enabled / disabled
+  // state is always accessed by the dumping methods holding the |lock_|.
+  AutoLock lock(lock_);
+
+  MemoryDumpScheduler::GetInstance()->Stop();
+  MemoryPeakDetector::GetInstance()->TearDown();
+  heap_profiler_serialization_state_ = nullptr;
+}
+
+void MemoryDumpManager::InitializeHeapProfilerStateIfNeededLocked() {
+  lock_.AssertAcquired();
+  if (!ShouldEnableMDPAllocatorHooks(heap_profiling_mode_) ||
+      !heap_profiler_serialization_state_ ||
+      heap_profiler_serialization_state_->is_initialized()) {
+    return;
+  }
+  // If heap profiling is enabled, the stack frame deduplicator and type name
+  // deduplicator will be in use. Add a metadata events to write the frames
+  // and type IDs.
+  heap_profiler_serialization_state_->SetStackFrameDeduplicator(
+      WrapUnique(new StackFrameDeduplicator));
+  heap_profiler_serialization_state_->SetTypeNameDeduplicator(
+      WrapUnique(new TypeNameDeduplicator));
+
+  TRACE_EVENT_API_ADD_METADATA_EVENT(
+      TraceLog::GetCategoryGroupEnabled("__metadata"), "stackFrames",
+      "stackFrames",
+      std::make_unique<SessionStateConvertableProxy<StackFrameDeduplicator>>(
+          heap_profiler_serialization_state_,
+          &HeapProfilerSerializationState::stack_frame_deduplicator));
+
+  TRACE_EVENT_API_ADD_METADATA_EVENT(
+      TraceLog::GetCategoryGroupEnabled("__metadata"), "typeNames", "typeNames",
+      std::make_unique<SessionStateConvertableProxy<TypeNameDeduplicator>>(
+          heap_profiler_serialization_state_,
+          &HeapProfilerSerializationState::type_name_deduplicator));
+}
+
+void MemoryDumpManager::NotifyHeapProfilingEnabledLocked(
+    scoped_refptr<MemoryDumpProviderInfo> mdpinfo,
+    bool enabled) {
+  lock_.AssertAcquired();
+  if (!mdpinfo->options.supports_heap_profiling)
+    return;
+
+  const auto& task_runner = mdpinfo->task_runner
+                                ? mdpinfo->task_runner
+                                : GetOrCreateBgTaskRunnerLocked();
+  // TODO(ssid): Post tasks only for MDPs that support heap profiling.
+  task_runner->PostTask(
+      FROM_HERE,
+      BindOnce(&NotifyHeapProfilingEnabledOnMDPThread, mdpinfo, enabled));
+}
+
+MemoryDumpManager::ProcessMemoryDumpAsyncState::ProcessMemoryDumpAsyncState(
+    MemoryDumpRequestArgs req_args,
+    const MemoryDumpProviderInfo::OrderedSet& dump_providers,
+    scoped_refptr<HeapProfilerSerializationState>
+        heap_profiler_serialization_state_in,
+    ProcessMemoryDumpCallback callback,
+    scoped_refptr<SequencedTaskRunner> dump_thread_task_runner)
+    : req_args(req_args),
+      heap_profiler_serialization_state(
+          std::move(heap_profiler_serialization_state_in)),
+      callback(callback),
+      callback_task_runner(ThreadTaskRunnerHandle::Get()),
+      dump_thread_task_runner(std::move(dump_thread_task_runner)) {
+  pending_dump_providers.reserve(dump_providers.size());
+  pending_dump_providers.assign(dump_providers.rbegin(), dump_providers.rend());
+  MemoryDumpArgs args = {req_args.level_of_detail, req_args.dump_guid};
+  process_memory_dump = std::make_unique<ProcessMemoryDump>(
+      heap_profiler_serialization_state, args);
+}
+
+MemoryDumpManager::ProcessMemoryDumpAsyncState::~ProcessMemoryDumpAsyncState() =
+    default;
+
+}  // namespace trace_event
+}  // namespace base
diff --git a/base/trace_event/memory_dump_manager.h b/base/trace_event/memory_dump_manager.h
new file mode 100644
index 0000000..072a7d6
--- /dev/null
+++ b/base/trace_event/memory_dump_manager.h
@@ -0,0 +1,320 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TRACE_EVENT_MEMORY_DUMP_MANAGER_H_
+#define BASE_TRACE_EVENT_MEMORY_DUMP_MANAGER_H_
+
+#include <stdint.h>
+
+#include <map>
+#include <memory>
+#include <unordered_set>
+#include <vector>
+
+#include "base/atomicops.h"
+#include "base/containers/hash_tables.h"
+#include "base/macros.h"
+#include "base/memory/ref_counted.h"
+#include "base/memory/singleton.h"
+#include "base/synchronization/lock.h"
+#include "base/trace_event/memory_allocator_dump.h"
+#include "base/trace_event/memory_dump_provider_info.h"
+#include "base/trace_event/memory_dump_request_args.h"
+#include "base/trace_event/process_memory_dump.h"
+#include "base/trace_event/trace_event.h"
+
+namespace base {
+
+class SequencedTaskRunner;
+class SingleThreadTaskRunner;
+class Thread;
+
+namespace trace_event {
+
+class MemoryDumpProvider;
+class HeapProfilerSerializationState;
+
+enum HeapProfilingMode {
+  kHeapProfilingModeDisabled,
+  kHeapProfilingModeTaskProfiler,  // Per task counters for allocs and frees.
+  kHeapProfilingModeBackground,    // Pseudo stacks without default filtering.
+  kHeapProfilingModePseudo,  // Pseudo stacks with default filtering categories.
+  kHeapProfilingModeNative,  // Native stacks
+  kHeapProfilingModeInvalid  // Disabled permanently or unsupported.
+};
+
+// This is the interface exposed to the rest of the codebase to deal with
+// memory tracing. The main entry point for clients is represented by
+// RequestDumpPoint(). The extension by Un(RegisterDumpProvider).
+class BASE_EXPORT MemoryDumpManager {
+ public:
+  using RequestGlobalDumpFunction =
+      RepeatingCallback<void(MemoryDumpType, MemoryDumpLevelOfDetail)>;
+
+  static const char* const kTraceCategory;
+
+  // This value is returned as the tracing id of the child processes by
+  // GetTracingProcessId() when tracing is not enabled.
+  static const uint64_t kInvalidTracingProcessId;
+
+  static MemoryDumpManager* GetInstance();
+  static std::unique_ptr<MemoryDumpManager> CreateInstanceForTesting();
+
+  // Invoked once per process to listen to trace begin / end events.
+  // Initialization can happen after (Un)RegisterMemoryDumpProvider() calls
+  // and the MemoryDumpManager guarantees to support this.
+  // On the other side, the MemoryDumpManager will not be fully operational
+  // (any CreateProcessDump() will return a failure) until initialized.
+  // Arguments:
+  //  is_coordinator: True when current process coordinates the periodic dump
+  //      triggering.
+  //  request_dump_function: Function to invoke a global dump. Global dump
+  //      involves embedder-specific behaviors like multiprocess handshaking.
+  //      TODO(primiano): this is only required to trigger global dumps from
+  //      the scheduler and the peak detector. Should be removed once they are
+  //      both moved out of base.
+  void Initialize(RequestGlobalDumpFunction request_dump_function,
+                  bool is_coordinator);
+
+  // (Un)Registers a MemoryDumpProvider instance.
+  // Args:
+  //  - mdp: the MemoryDumpProvider instance to be registered. MemoryDumpManager
+  //      does NOT take memory ownership of |mdp|, which is expected to either
+  //      be a singleton or unregister itself.
+  //  - name: a friendly name (duplicates allowed). Used for debugging and
+  //      run-time profiling of memory-infra internals. Must be a long-lived
+  //      C string.
+  //  - task_runner: either a SingleThreadTaskRunner or SequencedTaskRunner. All
+  //      the calls to |mdp| will be run on the given |task_runner|. If passed
+  //      null |mdp| should be able to handle calls on arbitrary threads.
+  //  - options: extra optional arguments. See memory_dump_provider.h.
+  void RegisterDumpProvider(MemoryDumpProvider* mdp,
+                            const char* name,
+                            scoped_refptr<SingleThreadTaskRunner> task_runner);
+  void RegisterDumpProvider(MemoryDumpProvider* mdp,
+                            const char* name,
+                            scoped_refptr<SingleThreadTaskRunner> task_runner,
+                            MemoryDumpProvider::Options options);
+  void RegisterDumpProviderWithSequencedTaskRunner(
+      MemoryDumpProvider* mdp,
+      const char* name,
+      scoped_refptr<SequencedTaskRunner> task_runner,
+      MemoryDumpProvider::Options options);
+  void UnregisterDumpProvider(MemoryDumpProvider* mdp);
+
+  // Unregisters an unbound dump provider and takes care about its deletion
+  // asynchronously. Can be used only for for dump providers with no
+  // task-runner affinity.
+  // This method takes ownership of the dump provider and guarantees that:
+  //  - The |mdp| will be deleted at some point in the near future.
+  //  - Its deletion will not happen concurrently with the OnMemoryDump() call.
+  // Note that OnMemoryDump() and PollFastMemoryTotal() calls can still happen
+  // after this method returns.
+  void UnregisterAndDeleteDumpProviderSoon(
+      std::unique_ptr<MemoryDumpProvider> mdp);
+
+  // Prepare MemoryDumpManager for CreateProcessDump() calls for tracing-related
+  // modes (i.e. |level_of_detail| != SUMMARY_ONLY).
+  // Also initializes the peak detector, scheduler and heap profiler with the
+  // given config.
+  void SetupForTracing(const TraceConfig::MemoryDumpConfig&);
+
+  // Tear-down tracing related state.
+  // Non-tracing modes (e.g. SUMMARY_ONLY) will continue to work.
+  void TeardownForTracing();
+
+  // Creates a memory dump for the current process and appends it to the trace.
+  // |callback| will be invoked asynchronously upon completion on the same
+  // thread on which CreateProcessDump() was called. This method should only be
+  // used by the memory-infra service while creating a global memory dump.
+  void CreateProcessDump(const MemoryDumpRequestArgs& args,
+                         const ProcessMemoryDumpCallback& callback);
+
+  // Enable heap profiling with specified |profiling_mode|.
+  // Use kHeapProfilingModeDisabled to disable, but it can't be re-enabled then.
+  // Returns true if mode has been *changed* to the desired |profiling_mode|.
+  bool EnableHeapProfiling(HeapProfilingMode profiling_mode);
+  HeapProfilingMode GetHeapProfilingMode();
+
+  // Lets tests see if a dump provider is registered.
+  bool IsDumpProviderRegisteredForTesting(MemoryDumpProvider*);
+
+  const scoped_refptr<HeapProfilerSerializationState>&
+  heap_profiler_serialization_state_for_testing() const {
+    return heap_profiler_serialization_state_;
+  }
+
+  // Returns a unique id for identifying the processes. The id can be
+  // retrieved by child processes only when tracing is enabled. This is
+  // intended to express cross-process sharing of memory dumps on the
+  // child-process side, without having to know its own child process id.
+  uint64_t GetTracingProcessId() const { return tracing_process_id_; }
+  void set_tracing_process_id(uint64_t tracing_process_id) {
+    tracing_process_id_ = tracing_process_id;
+  }
+
+  // Returns the name for a the allocated_objects dump. Use this to declare
+  // suballocator dumps from other dump providers.
+  // It will return nullptr if there is no dump provider for the system
+  // allocator registered (which is currently the case for Mac OS).
+  const char* system_allocator_pool_name() const {
+    return kSystemAllocatorPoolName;
+  };
+
+  // When set to true, calling |RegisterMemoryDumpProvider| is a no-op.
+  void set_dumper_registrations_ignored_for_testing(bool ignored) {
+    dumper_registrations_ignored_for_testing_ = ignored;
+  }
+
+ private:
+  friend std::default_delete<MemoryDumpManager>;  // For the testing instance.
+  friend struct DefaultSingletonTraits<MemoryDumpManager>;
+  friend class MemoryDumpManagerTest;
+  FRIEND_TEST_ALL_PREFIXES(MemoryDumpManagerTest,
+                           NoStackOverflowWithTooManyMDPs);
+
+  // Holds the state of a process memory dump that needs to be carried over
+  // across task runners in order to fulfill an asynchronous CreateProcessDump()
+  // request. At any time exactly one task runner owns a
+  // ProcessMemoryDumpAsyncState.
+  struct ProcessMemoryDumpAsyncState {
+    ProcessMemoryDumpAsyncState(
+        MemoryDumpRequestArgs req_args,
+        const MemoryDumpProviderInfo::OrderedSet& dump_providers,
+        scoped_refptr<HeapProfilerSerializationState>
+            heap_profiler_serialization_state,
+        ProcessMemoryDumpCallback callback,
+        scoped_refptr<SequencedTaskRunner> dump_thread_task_runner);
+    ~ProcessMemoryDumpAsyncState();
+
+    // A ProcessMemoryDump to collect data from MemoryDumpProviders.
+    std::unique_ptr<ProcessMemoryDump> process_memory_dump;
+
+    // The arguments passed to the initial CreateProcessDump() request.
+    const MemoryDumpRequestArgs req_args;
+
+    // An ordered sequence of dump providers that have to be invoked to complete
+    // the dump. This is a copy of |dump_providers_| at the beginning of a dump
+    // and becomes empty at the end, when all dump providers have been invoked.
+    std::vector<scoped_refptr<MemoryDumpProviderInfo>> pending_dump_providers;
+
+    // The HeapProfilerSerializationState object, which is shared by all
+    // the ProcessMemoryDump and MemoryAllocatorDump instances through all the
+    // tracing session lifetime.
+    scoped_refptr<HeapProfilerSerializationState>
+        heap_profiler_serialization_state;
+
+    // Callback passed to the initial call to CreateProcessDump().
+    ProcessMemoryDumpCallback callback;
+
+    // The thread on which FinishAsyncProcessDump() (and hence |callback|)
+    // should be invoked. This is the thread on which the initial
+    // CreateProcessDump() request was called.
+    const scoped_refptr<SingleThreadTaskRunner> callback_task_runner;
+
+    // The thread on which unbound dump providers should be invoked.
+    // This is essentially |dump_thread_|.task_runner() but needs to be kept
+    // as a separate variable as it needs to be accessed by arbitrary dumpers'
+    // threads outside of the lock_ to avoid races when disabling tracing.
+    // It is immutable for all the duration of a tracing session.
+    const scoped_refptr<SequencedTaskRunner> dump_thread_task_runner;
+
+   private:
+    DISALLOW_COPY_AND_ASSIGN(ProcessMemoryDumpAsyncState);
+  };
+
+  static const int kMaxConsecutiveFailuresCount;
+  static const char* const kSystemAllocatorPoolName;
+
+  MemoryDumpManager();
+  virtual ~MemoryDumpManager();
+
+  static void SetInstanceForTesting(MemoryDumpManager* instance);
+
+  // Lazily initializes dump_thread_ and returns its TaskRunner.
+  scoped_refptr<base::SequencedTaskRunner> GetOrCreateBgTaskRunnerLocked();
+
+  // Calls InvokeOnMemoryDump() for the each MDP that belongs to the current
+  // task runner and switches to the task runner of the next MDP. Handles
+  // failures in MDP and thread hops, and always calls FinishAsyncProcessDump()
+  // at the end.
+  void ContinueAsyncProcessDump(
+      ProcessMemoryDumpAsyncState* owned_pmd_async_state);
+
+  // Invokes OnMemoryDump() of the given MDP. Should be called on the MDP task
+  // runner.
+  void InvokeOnMemoryDump(MemoryDumpProviderInfo* mdpinfo,
+                          ProcessMemoryDump* pmd);
+
+  void FinishAsyncProcessDump(
+      std::unique_ptr<ProcessMemoryDumpAsyncState> pmd_async_state);
+
+  // Helper for RegierDumpProvider* functions.
+  void RegisterDumpProviderInternal(
+      MemoryDumpProvider* mdp,
+      const char* name,
+      scoped_refptr<SequencedTaskRunner> task_runner,
+      const MemoryDumpProvider::Options& options);
+
+  // Helper for the public UnregisterDumpProvider* functions.
+  void UnregisterDumpProviderInternal(MemoryDumpProvider* mdp,
+                                      bool take_mdp_ownership_and_delete_async);
+
+  // Fills the passed vector with the subset of dump providers which were
+  // registered with is_fast_polling_supported == true.
+  void GetDumpProvidersForPolling(
+      std::vector<scoped_refptr<MemoryDumpProviderInfo>>*);
+
+  // Initialize |heap_profiler_serialization_state_| when tracing and heap
+  // profiler are enabled.
+  void InitializeHeapProfilerStateIfNeededLocked();
+
+  // Sends OnHeapProfilingEnabled() notifcation to mdp ensuring OnMemoryDump()
+  // is not called at the same time.
+  void NotifyHeapProfilingEnabledLocked(
+      scoped_refptr<MemoryDumpProviderInfo> mdpinfo,
+      bool enabled);
+
+  bool can_request_global_dumps() const {
+    return !request_dump_function_.is_null();
+  }
+
+  // An ordered set of registered MemoryDumpProviderInfo(s), sorted by task
+  // runner affinity (MDPs belonging to the same task runners are adjacent).
+  MemoryDumpProviderInfo::OrderedSet dump_providers_;
+
+  // Shared among all the PMDs to keep state scoped to the tracing session.
+  scoped_refptr<HeapProfilerSerializationState>
+      heap_profiler_serialization_state_;
+
+  // Function provided by the embedder to handle global dump requests.
+  RequestGlobalDumpFunction request_dump_function_;
+
+  // True when current process coordinates the periodic dump triggering.
+  bool is_coordinator_;
+
+  // Protects from concurrent accesses to the local state, eg: to guard against
+  // disabling logging while dumping on another thread.
+  Lock lock_;
+
+  // Thread used for MemoryDumpProviders which don't specify a task runner
+  // affinity.
+  std::unique_ptr<Thread> dump_thread_;
+
+  // The unique id of the child process. This is created only for tracing and is
+  // expected to be valid only when tracing is enabled.
+  uint64_t tracing_process_id_;
+
+  // When true, calling |RegisterMemoryDumpProvider| is a no-op.
+  bool dumper_registrations_ignored_for_testing_;
+
+  HeapProfilingMode heap_profiling_mode_;
+
+  DISALLOW_COPY_AND_ASSIGN(MemoryDumpManager);
+};
+
+}  // namespace trace_event
+}  // namespace base
+
+#endif  // BASE_TRACE_EVENT_MEMORY_DUMP_MANAGER_H_
diff --git a/base/trace_event/memory_dump_manager_test_utils.h b/base/trace_event/memory_dump_manager_test_utils.h
new file mode 100644
index 0000000..413017f
--- /dev/null
+++ b/base/trace_event/memory_dump_manager_test_utils.h
@@ -0,0 +1,38 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TRACE_EVENT_MEMORY_DUMP_MANAGER_TEST_UTILS_H_
+#define BASE_TRACE_EVENT_MEMORY_DUMP_MANAGER_TEST_UTILS_H_
+
+#include "base/bind.h"
+#include "base/trace_event/memory_dump_manager.h"
+#include "base/trace_event/memory_dump_request_args.h"
+
+namespace base {
+namespace trace_event {
+
+void RequestGlobalDumpForInProcessTesting(
+    base::trace_event::MemoryDumpType dump_type,
+    base::trace_event::MemoryDumpLevelOfDetail level_of_detail) {
+  MemoryDumpRequestArgs local_args = {0 /* dump_guid */, dump_type,
+                                      level_of_detail};
+  MemoryDumpManager::GetInstance()->CreateProcessDump(
+      local_args, ProcessMemoryDumpCallback());
+};
+
+// Short circuits the RequestGlobalDumpFunction() to CreateProcessDump(),
+// effectively allowing to use both in unittests with the same behavior.
+// Unittests are in-process only and don't require all the multi-process
+// dump handshaking (which would require bits outside of base).
+void InitializeMemoryDumpManagerForInProcessTesting(bool is_coordinator) {
+  MemoryDumpManager* instance = MemoryDumpManager::GetInstance();
+  instance->set_dumper_registrations_ignored_for_testing(true);
+  instance->Initialize(BindRepeating(&RequestGlobalDumpForInProcessTesting),
+                       is_coordinator);
+}
+
+}  // namespace trace_event
+}  // namespace base
+
+#endif  // BASE_TRACE_EVENT_MEMORY_DUMP_MANAGER_TEST_UTILS_H_
diff --git a/base/trace_event/memory_dump_manager_unittest.cc b/base/trace_event/memory_dump_manager_unittest.cc
new file mode 100644
index 0000000..e92045e
--- /dev/null
+++ b/base/trace_event/memory_dump_manager_unittest.cc
@@ -0,0 +1,1020 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/trace_event/memory_dump_manager.h"
+
+#include <stdint.h>
+
+#include <memory>
+#include <utility>
+#include <vector>
+
+#include "base/allocator/buildflags.h"
+#include "base/base_switches.h"
+#include "base/callback.h"
+#include "base/command_line.h"
+#include "base/debug/thread_heap_usage_tracker.h"
+#include "base/macros.h"
+#include "base/memory/ptr_util.h"
+#include "base/run_loop.h"
+#include "base/single_thread_task_runner.h"
+#include "base/synchronization/waitable_event.h"
+#include "base/task_scheduler/post_task.h"
+#include "base/test/scoped_task_environment.h"
+#include "base/test/test_io_thread.h"
+#include "base/threading/platform_thread.h"
+#include "base/threading/sequenced_task_runner_handle.h"
+#include "base/threading/thread.h"
+#include "base/threading/thread_task_runner_handle.h"
+#include "base/trace_event/memory_dump_manager_test_utils.h"
+#include "base/trace_event/memory_dump_provider.h"
+#include "base/trace_event/memory_dump_request_args.h"
+#include "base/trace_event/memory_dump_scheduler.h"
+#include "base/trace_event/memory_infra_background_whitelist.h"
+#include "base/trace_event/process_memory_dump.h"
+#include "build/build_config.h"
+#include "testing/gmock/include/gmock/gmock.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+using testing::_;
+using testing::AtMost;
+using testing::Between;
+using testing::Invoke;
+using testing::Return;
+
+namespace base {
+namespace trace_event {
+
+// GTest matchers for MemoryDumpRequestArgs arguments.
+MATCHER(IsDetailedDump, "") {
+  return arg.level_of_detail == MemoryDumpLevelOfDetail::DETAILED;
+}
+
+MATCHER(IsLightDump, "") {
+  return arg.level_of_detail == MemoryDumpLevelOfDetail::LIGHT;
+}
+
+namespace {
+
+const char* kMDPName = "TestDumpProvider";
+const char* kWhitelistedMDPName = "WhitelistedTestDumpProvider";
+const char* const kTestMDPWhitelist[] = {kWhitelistedMDPName, nullptr};
+
+void RegisterDumpProvider(
+    MemoryDumpProvider* mdp,
+    scoped_refptr<base::SingleThreadTaskRunner> task_runner,
+    const MemoryDumpProvider::Options& options,
+    const char* name = kMDPName) {
+  MemoryDumpManager* mdm = MemoryDumpManager::GetInstance();
+  mdm->set_dumper_registrations_ignored_for_testing(false);
+  mdm->RegisterDumpProvider(mdp, name, std::move(task_runner), options);
+  mdm->set_dumper_registrations_ignored_for_testing(true);
+}
+
+void RegisterDumpProvider(
+    MemoryDumpProvider* mdp,
+    scoped_refptr<base::SingleThreadTaskRunner> task_runner) {
+  RegisterDumpProvider(mdp, task_runner, MemoryDumpProvider::Options());
+}
+
+void RegisterDumpProviderWithSequencedTaskRunner(
+    MemoryDumpProvider* mdp,
+    scoped_refptr<base::SequencedTaskRunner> task_runner,
+    const MemoryDumpProvider::Options& options) {
+  MemoryDumpManager* mdm = MemoryDumpManager::GetInstance();
+  mdm->set_dumper_registrations_ignored_for_testing(false);
+  mdm->RegisterDumpProviderWithSequencedTaskRunner(mdp, kMDPName, task_runner,
+                                                   options);
+  mdm->set_dumper_registrations_ignored_for_testing(true);
+}
+
+// Posts |task| to |task_runner| and blocks until it is executed.
+void PostTaskAndWait(const Location& from_here,
+                     SequencedTaskRunner* task_runner,
+                     base::OnceClosure task) {
+  base::WaitableEvent event(WaitableEvent::ResetPolicy::MANUAL,
+                            WaitableEvent::InitialState::NOT_SIGNALED);
+  task_runner->PostTask(from_here, std::move(task));
+  task_runner->PostTask(FROM_HERE, base::BindOnce(&WaitableEvent::Signal,
+                                                  base::Unretained(&event)));
+  // The SequencedTaskRunner guarantees that |event| will only be signaled after
+  // |task| is executed.
+  event.Wait();
+}
+
+class MockMemoryDumpProvider : public MemoryDumpProvider {
+ public:
+  MOCK_METHOD0(Destructor, void());
+  MOCK_METHOD2(OnMemoryDump,
+               bool(const MemoryDumpArgs& args, ProcessMemoryDump* pmd));
+  MOCK_METHOD1(OnHeapProfilingEnabled, void(bool enabled));
+  MOCK_METHOD1(PollFastMemoryTotal, void(uint64_t* memory_total));
+  MOCK_METHOD0(SuspendFastMemoryPolling, void());
+
+  MockMemoryDumpProvider() : enable_mock_destructor(false) {
+    ON_CALL(*this, OnMemoryDump(_, _))
+        .WillByDefault(
+            Invoke([](const MemoryDumpArgs&, ProcessMemoryDump* pmd) -> bool {
+              return true;
+            }));
+
+    ON_CALL(*this, PollFastMemoryTotal(_))
+        .WillByDefault(
+            Invoke([](uint64_t* memory_total) -> void { NOTREACHED(); }));
+  }
+  ~MockMemoryDumpProvider() override {
+    if (enable_mock_destructor)
+      Destructor();
+  }
+
+  bool enable_mock_destructor;
+};
+
+class TestSequencedTaskRunner : public SequencedTaskRunner {
+ public:
+  TestSequencedTaskRunner() = default;
+
+  void set_enabled(bool value) { enabled_ = value; }
+  unsigned no_of_post_tasks() const { return num_of_post_tasks_; }
+
+  bool PostNonNestableDelayedTask(const Location& from_here,
+                                  OnceClosure task,
+                                  TimeDelta delay) override {
+    NOTREACHED();
+    return false;
+  }
+
+  bool PostDelayedTask(const Location& from_here,
+                       OnceClosure task,
+                       TimeDelta delay) override {
+    num_of_post_tasks_++;
+    if (enabled_) {
+      return task_runner_->PostDelayedTask(from_here, std::move(task), delay);
+    }
+    return false;
+  }
+
+  bool RunsTasksInCurrentSequence() const override {
+    return task_runner_->RunsTasksInCurrentSequence();
+  }
+
+ private:
+  ~TestSequencedTaskRunner() override = default;
+
+  const scoped_refptr<SequencedTaskRunner> task_runner_ =
+      CreateSequencedTaskRunnerWithTraits({});
+  bool enabled_ = true;
+  unsigned num_of_post_tasks_ = 0;
+};
+
+class TestingThreadHeapUsageTracker : public debug::ThreadHeapUsageTracker {
+ public:
+  using ThreadHeapUsageTracker::DisableHeapTrackingForTesting;
+};
+
+}  // namespace
+
+class MemoryDumpManagerTest : public testing::Test {
+ public:
+  MemoryDumpManagerTest(bool is_coordinator = false)
+      : is_coordinator_(is_coordinator) {}
+
+  void SetUp() override {
+    // Bring up and initialize MemoryDumpManager while single-threaded (before
+    // instantiating ScopedTaskEnvironment) to avoid data races if worker
+    // threads use tracing globals early.
+    mdm_ = MemoryDumpManager::CreateInstanceForTesting();
+    ASSERT_EQ(mdm_.get(), MemoryDumpManager::GetInstance());
+
+    InitializeMemoryDumpManagerForInProcessTesting(is_coordinator_);
+
+    scoped_task_environment_ = std::make_unique<test::ScopedTaskEnvironment>();
+  }
+
+  void TearDown() override {
+    scoped_task_environment_.reset();
+
+    // Tear down the MemoryDumpManager while single-threaded to mirror logic in
+    // SetUp().
+    mdm_.reset();
+    TraceLog::ResetForTesting();
+  }
+
+ protected:
+  // Blocks the current thread (spinning a nested message loop) until the
+  // memory dump is complete. Returns:
+  // - return value: the |success| from the CreateProcessDump() callback.
+  bool RequestProcessDumpAndWait(MemoryDumpType dump_type,
+                                 MemoryDumpLevelOfDetail level_of_detail) {
+    RunLoop run_loop;
+    bool success = false;
+    static uint64_t test_guid = 1;
+    test_guid++;
+    MemoryDumpRequestArgs request_args{test_guid, dump_type, level_of_detail};
+
+    // The signature of the callback delivered by MemoryDumpManager is:
+    // void ProcessMemoryDumpCallback(
+    //     uint64_t dump_guid,
+    //     bool success,
+    //     std::unique_ptr<ProcessMemoryDump> pmd)
+    // The extra arguments prepended to the |callback| below (the ones with the
+    // "curried_" prefix) are just passed from the Bind(). This is just to get
+    // around the limitation of Bind() in supporting only capture-less lambdas.
+    ProcessMemoryDumpCallback callback = Bind(
+        [](bool* curried_success, Closure curried_quit_closure,
+           uint64_t curried_expected_guid, bool success, uint64_t dump_guid,
+           std::unique_ptr<ProcessMemoryDump> pmd) {
+          *curried_success = success;
+          EXPECT_EQ(curried_expected_guid, dump_guid);
+          ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
+                                                  curried_quit_closure);
+        },
+        Unretained(&success), run_loop.QuitClosure(), test_guid);
+
+    mdm_->CreateProcessDump(request_args, callback);
+    run_loop.Run();
+    return success;
+  }
+
+  void EnableForTracing() {
+    mdm_->SetupForTracing(TraceConfig::MemoryDumpConfig());
+  }
+
+  void EnableForTracingWithTraceConfig(const std::string trace_config_string) {
+    TraceConfig trace_config(trace_config_string);
+    mdm_->SetupForTracing(trace_config.memory_dump_config());
+  }
+
+  void DisableTracing() { mdm_->TeardownForTracing(); }
+
+  int GetMaxConsecutiveFailuresCount() const {
+    return MemoryDumpManager::kMaxConsecutiveFailuresCount;
+  }
+
+  const MemoryDumpProvider::Options kDefaultOptions;
+  std::unique_ptr<MemoryDumpManager> mdm_;
+
+ private:
+  // To tear down the singleton instance after each test.
+  ShadowingAtExitManager at_exit_manager_;
+
+  std::unique_ptr<test::ScopedTaskEnvironment> scoped_task_environment_;
+
+  // Whether the test MemoryDumpManager should be initialized as the
+  // coordinator.
+  const bool is_coordinator_;
+
+  DISALLOW_COPY_AND_ASSIGN(MemoryDumpManagerTest);
+};
+
+class MemoryDumpManagerTestAsCoordinator : public MemoryDumpManagerTest {
+ public:
+  MemoryDumpManagerTestAsCoordinator() : MemoryDumpManagerTest(true) {}
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(MemoryDumpManagerTestAsCoordinator);
+};
+
+// Basic sanity checks. Registers a memory dump provider and checks that it is
+// called.
+TEST_F(MemoryDumpManagerTest, SingleDumper) {
+  MockMemoryDumpProvider mdp;
+  RegisterDumpProvider(&mdp, ThreadTaskRunnerHandle::Get());
+
+  // Now repeat enabling the memory category and check that the dumper is
+  // invoked this time.
+  EnableForTracing();
+  EXPECT_CALL(mdp, OnMemoryDump(_, _)).Times(3);
+  for (int i = 0; i < 3; ++i) {
+    EXPECT_TRUE(RequestProcessDumpAndWait(MemoryDumpType::EXPLICITLY_TRIGGERED,
+                                          MemoryDumpLevelOfDetail::DETAILED));
+  }
+  DisableTracing();
+
+  mdm_->UnregisterDumpProvider(&mdp);
+
+  // Finally check the unregister logic: the global dump handler will be invoked
+  // but not the dump provider, as it has been unregistered.
+  EnableForTracing();
+  EXPECT_CALL(mdp, OnMemoryDump(_, _)).Times(0);
+  for (int i = 0; i < 3; ++i) {
+    EXPECT_TRUE(RequestProcessDumpAndWait(MemoryDumpType::EXPLICITLY_TRIGGERED,
+                                          MemoryDumpLevelOfDetail::DETAILED));
+  }
+  DisableTracing();
+}
+
+// Checks that requesting dumps with high level of detail actually propagates
+// the level of the detail properly to OnMemoryDump() call on dump providers.
+TEST_F(MemoryDumpManagerTest, CheckMemoryDumpArgs) {
+  MockMemoryDumpProvider mdp;
+
+  RegisterDumpProvider(&mdp, ThreadTaskRunnerHandle::Get());
+  EnableForTracing();
+  EXPECT_CALL(mdp, OnMemoryDump(IsDetailedDump(), _));
+  EXPECT_TRUE(RequestProcessDumpAndWait(MemoryDumpType::EXPLICITLY_TRIGGERED,
+                                        MemoryDumpLevelOfDetail::DETAILED));
+  DisableTracing();
+  mdm_->UnregisterDumpProvider(&mdp);
+
+  // Check that requesting dumps with low level of detail actually propagates to
+  // OnMemoryDump() call on dump providers.
+  RegisterDumpProvider(&mdp, ThreadTaskRunnerHandle::Get());
+  EnableForTracing();
+  EXPECT_CALL(mdp, OnMemoryDump(IsLightDump(), _));
+  EXPECT_TRUE(RequestProcessDumpAndWait(MemoryDumpType::EXPLICITLY_TRIGGERED,
+                                        MemoryDumpLevelOfDetail::LIGHT));
+  DisableTracing();
+  mdm_->UnregisterDumpProvider(&mdp);
+}
+
+// Checks that the HeapProfilerSerializationState object is actually
+// shared over time.
+TEST_F(MemoryDumpManagerTest, HeapProfilerSerializationState) {
+  MockMemoryDumpProvider mdp1;
+  MockMemoryDumpProvider mdp2;
+  RegisterDumpProvider(&mdp1, nullptr);
+  RegisterDumpProvider(&mdp2, nullptr);
+
+  EnableForTracing();
+  const HeapProfilerSerializationState* heap_profiler_serialization_state =
+      mdm_->heap_profiler_serialization_state_for_testing().get();
+  EXPECT_CALL(mdp1, OnMemoryDump(_, _))
+      .Times(2)
+      .WillRepeatedly(
+          Invoke([heap_profiler_serialization_state](
+                     const MemoryDumpArgs&, ProcessMemoryDump* pmd) -> bool {
+            EXPECT_EQ(heap_profiler_serialization_state,
+                      pmd->heap_profiler_serialization_state().get());
+            return true;
+          }));
+  EXPECT_CALL(mdp2, OnMemoryDump(_, _))
+      .Times(2)
+      .WillRepeatedly(
+          Invoke([heap_profiler_serialization_state](
+                     const MemoryDumpArgs&, ProcessMemoryDump* pmd) -> bool {
+            EXPECT_EQ(heap_profiler_serialization_state,
+                      pmd->heap_profiler_serialization_state().get());
+            return true;
+          }));
+
+  for (int i = 0; i < 2; ++i) {
+    EXPECT_TRUE(RequestProcessDumpAndWait(MemoryDumpType::EXPLICITLY_TRIGGERED,
+                                          MemoryDumpLevelOfDetail::DETAILED));
+  }
+
+  DisableTracing();
+}
+
+// Checks that the (Un)RegisterDumpProvider logic behaves sanely.
+TEST_F(MemoryDumpManagerTest, MultipleDumpers) {
+  MockMemoryDumpProvider mdp1;
+  MockMemoryDumpProvider mdp2;
+
+  // Enable only mdp1.
+  RegisterDumpProvider(&mdp1, ThreadTaskRunnerHandle::Get());
+  EnableForTracing();
+  EXPECT_CALL(mdp1, OnMemoryDump(_, _));
+  EXPECT_CALL(mdp2, OnMemoryDump(_, _)).Times(0);
+  EXPECT_TRUE(RequestProcessDumpAndWait(MemoryDumpType::EXPLICITLY_TRIGGERED,
+                                        MemoryDumpLevelOfDetail::DETAILED));
+  DisableTracing();
+
+  // Invert: enable mdp2 and disable mdp1.
+  mdm_->UnregisterDumpProvider(&mdp1);
+  RegisterDumpProvider(&mdp2, nullptr);
+  EnableForTracing();
+  EXPECT_CALL(mdp1, OnMemoryDump(_, _)).Times(0);
+  EXPECT_CALL(mdp2, OnMemoryDump(_, _));
+  EXPECT_TRUE(RequestProcessDumpAndWait(MemoryDumpType::EXPLICITLY_TRIGGERED,
+                                        MemoryDumpLevelOfDetail::DETAILED));
+  DisableTracing();
+
+  // Enable both mdp1 and mdp2.
+  RegisterDumpProvider(&mdp1, nullptr);
+  EnableForTracing();
+  EXPECT_CALL(mdp1, OnMemoryDump(_, _));
+  EXPECT_CALL(mdp2, OnMemoryDump(_, _));
+  EXPECT_TRUE(RequestProcessDumpAndWait(MemoryDumpType::EXPLICITLY_TRIGGERED,
+                                        MemoryDumpLevelOfDetail::DETAILED));
+  DisableTracing();
+}
+
+// Checks that the dump provider invocations depend only on the current
+// registration state and not on previous registrations and dumps.
+// Flaky on iOS, see crbug.com/706874
+#if defined(OS_IOS)
+#define MAYBE_RegistrationConsistency DISABLED_RegistrationConsistency
+#else
+#define MAYBE_RegistrationConsistency RegistrationConsistency
+#endif
+TEST_F(MemoryDumpManagerTest, MAYBE_RegistrationConsistency) {
+  MockMemoryDumpProvider mdp;
+
+  RegisterDumpProvider(&mdp, ThreadTaskRunnerHandle::Get());
+
+  {
+    EXPECT_CALL(mdp, OnMemoryDump(_, _));
+    EnableForTracing();
+    EXPECT_TRUE(RequestProcessDumpAndWait(MemoryDumpType::EXPLICITLY_TRIGGERED,
+                                          MemoryDumpLevelOfDetail::DETAILED));
+    DisableTracing();
+  }
+
+  mdm_->UnregisterDumpProvider(&mdp);
+
+  {
+    EXPECT_CALL(mdp, OnMemoryDump(_, _)).Times(0);
+    EnableForTracing();
+    EXPECT_TRUE(RequestProcessDumpAndWait(MemoryDumpType::EXPLICITLY_TRIGGERED,
+                                          MemoryDumpLevelOfDetail::DETAILED));
+    DisableTracing();
+  }
+
+  RegisterDumpProvider(&mdp, ThreadTaskRunnerHandle::Get());
+  mdm_->UnregisterDumpProvider(&mdp);
+
+  {
+    EXPECT_CALL(mdp, OnMemoryDump(_, _)).Times(0);
+    EnableForTracing();
+    EXPECT_TRUE(RequestProcessDumpAndWait(MemoryDumpType::EXPLICITLY_TRIGGERED,
+                                          MemoryDumpLevelOfDetail::DETAILED));
+    DisableTracing();
+  }
+
+  RegisterDumpProvider(&mdp, ThreadTaskRunnerHandle::Get());
+  mdm_->UnregisterDumpProvider(&mdp);
+  RegisterDumpProvider(&mdp, ThreadTaskRunnerHandle::Get());
+
+  {
+    EXPECT_CALL(mdp, OnMemoryDump(_, _));
+    EnableForTracing();
+    EXPECT_TRUE(RequestProcessDumpAndWait(MemoryDumpType::EXPLICITLY_TRIGGERED,
+                                          MemoryDumpLevelOfDetail::DETAILED));
+    DisableTracing();
+  }
+}
+
+// Checks that the MemoryDumpManager respects the thread affinity when a
+// MemoryDumpProvider specifies a task_runner(). The test starts creating 8
+// threads and registering a MemoryDumpProvider on each of them. At each
+// iteration, one thread is removed, to check the live unregistration logic.
+TEST_F(MemoryDumpManagerTest, RespectTaskRunnerAffinity) {
+  const uint32_t kNumInitialThreads = 8;
+
+  std::vector<std::unique_ptr<Thread>> threads;
+  std::vector<std::unique_ptr<MockMemoryDumpProvider>> mdps;
+
+  // Create the threads and setup the expectations. Given that at each iteration
+  // we will pop out one thread/MemoryDumpProvider, each MDP is supposed to be
+  // invoked a number of times equal to its index.
+  for (uint32_t i = kNumInitialThreads; i > 0; --i) {
+    threads.push_back(WrapUnique(new Thread("test thread")));
+    auto* thread = threads.back().get();
+    thread->Start();
+    scoped_refptr<SingleThreadTaskRunner> task_runner = thread->task_runner();
+    mdps.push_back(WrapUnique(new MockMemoryDumpProvider()));
+    auto* mdp = mdps.back().get();
+    RegisterDumpProvider(mdp, task_runner, kDefaultOptions);
+    EXPECT_CALL(*mdp, OnMemoryDump(_, _))
+        .Times(i)
+        .WillRepeatedly(Invoke(
+            [task_runner](const MemoryDumpArgs&, ProcessMemoryDump*) -> bool {
+              EXPECT_TRUE(task_runner->RunsTasksInCurrentSequence());
+              return true;
+            }));
+  }
+  EnableForTracing();
+
+  while (!threads.empty()) {
+    EXPECT_TRUE(RequestProcessDumpAndWait(MemoryDumpType::EXPLICITLY_TRIGGERED,
+                                          MemoryDumpLevelOfDetail::DETAILED));
+
+    // Unregister a MDP and destroy one thread at each iteration to check the
+    // live unregistration logic. The unregistration needs to happen on the same
+    // thread the MDP belongs to.
+    {
+      RunLoop run_loop;
+      Closure unregistration =
+          Bind(&MemoryDumpManager::UnregisterDumpProvider,
+               Unretained(mdm_.get()), Unretained(mdps.back().get()));
+      threads.back()->task_runner()->PostTaskAndReply(FROM_HERE, unregistration,
+                                                      run_loop.QuitClosure());
+      run_loop.Run();
+    }
+    mdps.pop_back();
+    threads.back()->Stop();
+    threads.pop_back();
+  }
+
+  DisableTracing();
+}
+
+// Check that the memory dump calls are always posted on task runner for
+// SequencedTaskRunner case and that the dump provider gets disabled when
+// PostTask fails, but the dump still succeeds.
+TEST_F(MemoryDumpManagerTest, PostTaskForSequencedTaskRunner) {
+  std::vector<MockMemoryDumpProvider> mdps(3);
+  scoped_refptr<TestSequencedTaskRunner> task_runner1(
+      MakeRefCounted<TestSequencedTaskRunner>());
+  scoped_refptr<TestSequencedTaskRunner> task_runner2(
+      MakeRefCounted<TestSequencedTaskRunner>());
+  RegisterDumpProviderWithSequencedTaskRunner(&mdps[0], task_runner1,
+                                              kDefaultOptions);
+  RegisterDumpProviderWithSequencedTaskRunner(&mdps[1], task_runner2,
+                                              kDefaultOptions);
+  RegisterDumpProviderWithSequencedTaskRunner(&mdps[2], task_runner2,
+                                              kDefaultOptions);
+  // |mdps[0]| should be disabled permanently after first dump.
+  EXPECT_CALL(mdps[0], OnMemoryDump(_, _)).Times(0);
+  EXPECT_CALL(mdps[1], OnMemoryDump(_, _)).Times(2);
+  EXPECT_CALL(mdps[2], OnMemoryDump(_, _)).Times(2);
+
+  EnableForTracing();
+
+  task_runner1->set_enabled(false);
+  EXPECT_TRUE(RequestProcessDumpAndWait(MemoryDumpType::EXPLICITLY_TRIGGERED,
+                                        MemoryDumpLevelOfDetail::DETAILED));
+  EXPECT_EQ(1u, task_runner1->no_of_post_tasks());
+  EXPECT_EQ(1u, task_runner2->no_of_post_tasks());
+
+  task_runner1->set_enabled(true);
+  EXPECT_TRUE(RequestProcessDumpAndWait(MemoryDumpType::EXPLICITLY_TRIGGERED,
+                                        MemoryDumpLevelOfDetail::DETAILED));
+  EXPECT_EQ(2u, task_runner1->no_of_post_tasks());
+  EXPECT_EQ(2u, task_runner2->no_of_post_tasks());
+  DisableTracing();
+}
+
+// Checks that providers get disabled after 3 consecutive failures, but not
+// otherwise (e.g., if interleaved).
+TEST_F(MemoryDumpManagerTest, DisableFailingDumpers) {
+  MockMemoryDumpProvider mdp1;
+  MockMemoryDumpProvider mdp2;
+
+  RegisterDumpProvider(&mdp1, nullptr);
+  RegisterDumpProvider(&mdp2, nullptr);
+  EnableForTracing();
+
+  EXPECT_CALL(mdp1, OnMemoryDump(_, _))
+      .Times(GetMaxConsecutiveFailuresCount())
+      .WillRepeatedly(Return(false));
+
+  EXPECT_CALL(mdp2, OnMemoryDump(_, _))
+      .WillOnce(Return(false))
+      .WillOnce(Return(true))
+      .WillOnce(Return(false))
+      .WillOnce(Return(false))
+      .WillOnce(Return(true))
+      .WillOnce(Return(false));
+
+  const int kNumDumps = 2 * GetMaxConsecutiveFailuresCount();
+  for (int i = 0; i < kNumDumps; i++) {
+    EXPECT_TRUE(RequestProcessDumpAndWait(MemoryDumpType::EXPLICITLY_TRIGGERED,
+                                          MemoryDumpLevelOfDetail::DETAILED));
+  }
+
+  DisableTracing();
+}
+
+// Sneakily registers an extra memory dump provider while an existing one is
+// dumping and expect it to take part in the already active tracing session.
+TEST_F(MemoryDumpManagerTest, RegisterDumperWhileDumping) {
+  MockMemoryDumpProvider mdp1;
+  MockMemoryDumpProvider mdp2;
+
+  RegisterDumpProvider(&mdp1, nullptr);
+  EnableForTracing();
+
+  EXPECT_CALL(mdp1, OnMemoryDump(_, _))
+      .Times(4)
+      .WillOnce(Return(true))
+      .WillOnce(
+          Invoke([&mdp2](const MemoryDumpArgs&, ProcessMemoryDump*) -> bool {
+            RegisterDumpProvider(&mdp2, nullptr);
+            return true;
+          }))
+      .WillRepeatedly(Return(true));
+
+  // Depending on the insertion order (before or after mdp1), mdp2 might be
+  // called also immediately after it gets registered.
+  EXPECT_CALL(mdp2, OnMemoryDump(_, _)).Times(Between(2, 3));
+
+  for (int i = 0; i < 4; i++) {
+    EXPECT_TRUE(RequestProcessDumpAndWait(MemoryDumpType::EXPLICITLY_TRIGGERED,
+                                          MemoryDumpLevelOfDetail::DETAILED));
+  }
+
+  DisableTracing();
+}
+
+// Like RegisterDumperWhileDumping, but unregister the dump provider instead.
+TEST_F(MemoryDumpManagerTest, UnregisterDumperWhileDumping) {
+  MockMemoryDumpProvider mdp1;
+  MockMemoryDumpProvider mdp2;
+
+  RegisterDumpProvider(&mdp1, ThreadTaskRunnerHandle::Get(), kDefaultOptions);
+  RegisterDumpProvider(&mdp2, ThreadTaskRunnerHandle::Get(), kDefaultOptions);
+  EnableForTracing();
+
+  EXPECT_CALL(mdp1, OnMemoryDump(_, _))
+      .Times(4)
+      .WillOnce(Return(true))
+      .WillOnce(
+          Invoke([&mdp2](const MemoryDumpArgs&, ProcessMemoryDump*) -> bool {
+            MemoryDumpManager::GetInstance()->UnregisterDumpProvider(&mdp2);
+            return true;
+          }))
+      .WillRepeatedly(Return(true));
+
+  // Depending on the insertion order (before or after mdp1), mdp2 might have
+  // been already called when UnregisterDumpProvider happens.
+  EXPECT_CALL(mdp2, OnMemoryDump(_, _)).Times(Between(1, 2));
+
+  for (int i = 0; i < 4; i++) {
+    EXPECT_TRUE(RequestProcessDumpAndWait(MemoryDumpType::EXPLICITLY_TRIGGERED,
+                                          MemoryDumpLevelOfDetail::DETAILED));
+  }
+
+  DisableTracing();
+}
+
+// Checks that the dump does not abort when unregistering a provider while
+// dumping from a different thread than the dumping thread.
+TEST_F(MemoryDumpManagerTest, UnregisterDumperFromThreadWhileDumping) {
+  std::vector<std::unique_ptr<TestIOThread>> threads;
+  std::vector<std::unique_ptr<MockMemoryDumpProvider>> mdps;
+
+  for (int i = 0; i < 2; i++) {
+    threads.push_back(
+        WrapUnique(new TestIOThread(TestIOThread::kAutoStart)));
+    mdps.push_back(WrapUnique(new MockMemoryDumpProvider()));
+    RegisterDumpProvider(mdps.back().get(), threads.back()->task_runner(),
+                         kDefaultOptions);
+  }
+
+  int on_memory_dump_call_count = 0;
+
+  // When OnMemoryDump is called on either of the dump providers, it will
+  // unregister the other one.
+  for (const std::unique_ptr<MockMemoryDumpProvider>& mdp : mdps) {
+    int other_idx = (mdps.front() == mdp);
+    // TestIOThread's task runner must be obtained from the main thread but can
+    // then be used from other threads.
+    scoped_refptr<SingleThreadTaskRunner> other_runner =
+        threads[other_idx]->task_runner();
+    MockMemoryDumpProvider* other_mdp = mdps[other_idx].get();
+    auto on_dump = [this, other_runner, other_mdp, &on_memory_dump_call_count](
+                       const MemoryDumpArgs& args, ProcessMemoryDump* pmd) {
+      PostTaskAndWait(FROM_HERE, other_runner.get(),
+                      base::BindOnce(&MemoryDumpManager::UnregisterDumpProvider,
+                                     base::Unretained(&*mdm_), other_mdp));
+      on_memory_dump_call_count++;
+      return true;
+    };
+
+    // OnMemoryDump is called once for the provider that dumps first, and zero
+    // times for the other provider.
+    EXPECT_CALL(*mdp, OnMemoryDump(_, _))
+        .Times(AtMost(1))
+        .WillOnce(Invoke(on_dump));
+  }
+
+  EnableForTracing();
+  EXPECT_TRUE(RequestProcessDumpAndWait(MemoryDumpType::EXPLICITLY_TRIGGERED,
+                                        MemoryDumpLevelOfDetail::DETAILED));
+  ASSERT_EQ(1, on_memory_dump_call_count);
+
+  DisableTracing();
+}
+
+// If a thread (with a dump provider living on it) is torn down during a dump
+// its dump provider should be skipped but the dump itself should succeed.
+TEST_F(MemoryDumpManagerTest, TearDownThreadWhileDumping) {
+  std::vector<std::unique_ptr<TestIOThread>> threads;
+  std::vector<std::unique_ptr<MockMemoryDumpProvider>> mdps;
+
+  for (int i = 0; i < 2; i++) {
+    threads.push_back(
+        WrapUnique(new TestIOThread(TestIOThread::kAutoStart)));
+    mdps.push_back(WrapUnique(new MockMemoryDumpProvider()));
+    RegisterDumpProvider(mdps.back().get(), threads.back()->task_runner(),
+                         kDefaultOptions);
+  }
+
+  int on_memory_dump_call_count = 0;
+
+  // When OnMemoryDump is called on either of the dump providers, it will
+  // tear down the thread of the other one.
+  for (const std::unique_ptr<MockMemoryDumpProvider>& mdp : mdps) {
+    int other_idx = (mdps.front() == mdp);
+    TestIOThread* other_thread = threads[other_idx].get();
+    // TestIOThread isn't thread-safe and must be stopped on the |main_runner|.
+    scoped_refptr<SequencedTaskRunner> main_runner =
+        SequencedTaskRunnerHandle::Get();
+    auto on_dump = [other_thread, main_runner, &on_memory_dump_call_count](
+                       const MemoryDumpArgs& args, ProcessMemoryDump* pmd) {
+      PostTaskAndWait(
+          FROM_HERE, main_runner.get(),
+          base::BindOnce(&TestIOThread::Stop, base::Unretained(other_thread)));
+      on_memory_dump_call_count++;
+      return true;
+    };
+
+    // OnMemoryDump is called once for the provider that dumps first, and zero
+    // times for the other provider.
+    EXPECT_CALL(*mdp, OnMemoryDump(_, _))
+        .Times(AtMost(1))
+        .WillOnce(Invoke(on_dump));
+  }
+
+  EnableForTracing();
+  EXPECT_TRUE(RequestProcessDumpAndWait(MemoryDumpType::EXPLICITLY_TRIGGERED,
+                                        MemoryDumpLevelOfDetail::DETAILED));
+  ASSERT_EQ(1, on_memory_dump_call_count);
+
+  DisableTracing();
+}
+
+// Checks that the callback is invoked if CreateProcessDump() is called when
+// tracing is not enabled.
+TEST_F(MemoryDumpManagerTest, TriggerDumpWithoutTracing) {
+  MockMemoryDumpProvider mdp;
+  RegisterDumpProvider(&mdp, nullptr);
+  EXPECT_CALL(mdp, OnMemoryDump(_, _));
+  EXPECT_TRUE(RequestProcessDumpAndWait(MemoryDumpType::EXPLICITLY_TRIGGERED,
+                                        MemoryDumpLevelOfDetail::DETAILED));
+}
+
+TEST_F(MemoryDumpManagerTest, BackgroundWhitelisting) {
+  SetDumpProviderWhitelistForTesting(kTestMDPWhitelist);
+
+  // Standard provider with default options (create dump for current process).
+  MockMemoryDumpProvider backgroundMdp;
+  RegisterDumpProvider(&backgroundMdp, nullptr, kDefaultOptions,
+                       kWhitelistedMDPName);
+
+  EnableForTracing();
+
+  EXPECT_CALL(backgroundMdp, OnMemoryDump(_, _)).Times(1);
+  EXPECT_TRUE(RequestProcessDumpAndWait(MemoryDumpType::SUMMARY_ONLY,
+                                        MemoryDumpLevelOfDetail::BACKGROUND));
+  DisableTracing();
+}
+
+// Tests the basics of the UnregisterAndDeleteDumpProviderSoon(): the
+// unregistration should actually delete the providers and not leak them.
+TEST_F(MemoryDumpManagerTest, UnregisterAndDeleteDumpProviderSoon) {
+  static const int kNumProviders = 3;
+  int dtor_count = 0;
+  std::vector<std::unique_ptr<MemoryDumpProvider>> mdps;
+  for (int i = 0; i < kNumProviders; ++i) {
+    std::unique_ptr<MockMemoryDumpProvider> mdp(new MockMemoryDumpProvider);
+    mdp->enable_mock_destructor = true;
+    EXPECT_CALL(*mdp, Destructor())
+        .WillOnce(Invoke([&dtor_count]() { dtor_count++; }));
+    RegisterDumpProvider(mdp.get(), nullptr, kDefaultOptions);
+    mdps.push_back(std::move(mdp));
+  }
+
+  while (!mdps.empty()) {
+    mdm_->UnregisterAndDeleteDumpProviderSoon(std::move(mdps.back()));
+    mdps.pop_back();
+  }
+
+  ASSERT_EQ(kNumProviders, dtor_count);
+}
+
+// This test checks against races when unregistering an unbound dump provider
+// from another thread while dumping. It registers one MDP and, when
+// OnMemoryDump() is called, it invokes UnregisterAndDeleteDumpProviderSoon()
+// from another thread. The OnMemoryDump() and the dtor call are expected to
+// happen on the same thread (the MemoryDumpManager utility thread).
+TEST_F(MemoryDumpManagerTest, UnregisterAndDeleteDumpProviderSoonDuringDump) {
+  std::unique_ptr<MockMemoryDumpProvider> mdp(new MockMemoryDumpProvider);
+  mdp->enable_mock_destructor = true;
+  RegisterDumpProvider(mdp.get(), nullptr, kDefaultOptions);
+
+  base::PlatformThreadRef thread_ref;
+  auto self_unregister_from_another_thread = [&mdp, &thread_ref](
+      const MemoryDumpArgs&, ProcessMemoryDump*) -> bool {
+    thread_ref = PlatformThread::CurrentRef();
+    TestIOThread thread_for_unregistration(TestIOThread::kAutoStart);
+    PostTaskAndWait(
+        FROM_HERE, thread_for_unregistration.task_runner().get(),
+        base::BindOnce(&MemoryDumpManager::UnregisterAndDeleteDumpProviderSoon,
+                       base::Unretained(MemoryDumpManager::GetInstance()),
+                       std::move(mdp)));
+    thread_for_unregistration.Stop();
+    return true;
+  };
+  EXPECT_CALL(*mdp, OnMemoryDump(_, _))
+      .Times(1)
+      .WillOnce(Invoke(self_unregister_from_another_thread));
+  EXPECT_CALL(*mdp, Destructor())
+      .Times(1)
+      .WillOnce(Invoke([&thread_ref]() {
+        EXPECT_EQ(thread_ref, PlatformThread::CurrentRef());
+      }));
+
+  EnableForTracing();
+  for (int i = 0; i < 2; ++i) {
+    EXPECT_TRUE(RequestProcessDumpAndWait(MemoryDumpType::EXPLICITLY_TRIGGERED,
+                                          MemoryDumpLevelOfDetail::DETAILED));
+  }
+  DisableTracing();
+}
+
+#if BUILDFLAG(USE_ALLOCATOR_SHIM) && !defined(OS_NACL)
+TEST_F(MemoryDumpManagerTest, EnableHeapProfilingPseudoStack) {
+  MockMemoryDumpProvider mdp1;
+  MockMemoryDumpProvider mdp2;
+  MockMemoryDumpProvider mdp3;
+  MemoryDumpProvider::Options supported_options;
+  supported_options.supports_heap_profiling = true;
+  RegisterDumpProvider(&mdp1, ThreadTaskRunnerHandle::Get(), supported_options);
+  {
+    testing::InSequence sequence;
+    EXPECT_CALL(mdp1, OnHeapProfilingEnabled(true)).Times(1);
+    EXPECT_CALL(mdp1, OnHeapProfilingEnabled(false)).Times(1);
+  }
+  {
+    testing::InSequence sequence;
+    EXPECT_CALL(mdp2, OnHeapProfilingEnabled(true)).Times(1);
+    EXPECT_CALL(mdp2, OnHeapProfilingEnabled(false)).Times(1);
+  }
+  RegisterDumpProvider(&mdp3, ThreadTaskRunnerHandle::Get());
+  EXPECT_CALL(mdp3, OnHeapProfilingEnabled(_)).Times(0);
+
+  EXPECT_TRUE(mdm_->EnableHeapProfiling(kHeapProfilingModePseudo));
+  RunLoop().RunUntilIdle();
+  ASSERT_EQ(AllocationContextTracker::CaptureMode::PSEUDO_STACK,
+            AllocationContextTracker::capture_mode());
+  EXPECT_EQ(mdm_->GetHeapProfilingMode(), kHeapProfilingModePseudo);
+  EXPECT_EQ(TraceLog::FILTERING_MODE, TraceLog::GetInstance()->enabled_modes());
+  RegisterDumpProvider(&mdp2, ThreadTaskRunnerHandle::Get(), supported_options);
+
+  TraceConfig::MemoryDumpConfig config;
+  config.heap_profiler_options.breakdown_threshold_bytes = 100;
+  mdm_->SetupForTracing(config);
+  EXPECT_EQ(config.heap_profiler_options.breakdown_threshold_bytes,
+            mdm_->heap_profiler_serialization_state_for_testing()
+                ->heap_profiler_breakdown_threshold_bytes());
+  EXPECT_TRUE(
+      mdm_->heap_profiler_serialization_state_for_testing()->is_initialized());
+  EXPECT_EQ(mdm_->GetHeapProfilingMode(), kHeapProfilingModePseudo);
+  mdm_->TeardownForTracing();
+  EXPECT_FALSE(mdm_->heap_profiler_serialization_state_for_testing());
+
+  // Disable will permanently disable heap profiling.
+  EXPECT_TRUE(mdm_->EnableHeapProfiling(kHeapProfilingModeDisabled));
+  RunLoop().RunUntilIdle();
+  EXPECT_EQ(mdm_->GetHeapProfilingMode(), kHeapProfilingModeInvalid);
+  EXPECT_EQ(0, TraceLog::GetInstance()->enabled_modes());
+  EXPECT_FALSE(mdm_->heap_profiler_serialization_state_for_testing());
+  ASSERT_EQ(AllocationContextTracker::CaptureMode::DISABLED,
+            AllocationContextTracker::capture_mode());
+  EXPECT_FALSE(mdm_->EnableHeapProfiling(kHeapProfilingModePseudo));
+  ASSERT_EQ(AllocationContextTracker::CaptureMode::DISABLED,
+            AllocationContextTracker::capture_mode());
+  EXPECT_EQ(mdm_->GetHeapProfilingMode(), kHeapProfilingModeInvalid);
+  EXPECT_FALSE(mdm_->EnableHeapProfiling(kHeapProfilingModeDisabled));
+  EXPECT_EQ(mdm_->GetHeapProfilingMode(), kHeapProfilingModeInvalid);
+}
+
+TEST_F(MemoryDumpManagerTestAsCoordinator, EnableHeapProfilingBackground) {
+  MockMemoryDumpProvider mdp1;
+  MemoryDumpProvider::Options supported_options;
+  supported_options.supports_heap_profiling = true;
+  RegisterDumpProvider(&mdp1, ThreadTaskRunnerHandle::Get(), supported_options);
+  testing::InSequence sequence;
+  EXPECT_CALL(mdp1, OnHeapProfilingEnabled(true)).Times(1);
+  EXPECT_CALL(mdp1, OnHeapProfilingEnabled(false)).Times(1);
+
+  // Enable tracing before heap profiling.
+  TraceConfig::MemoryDumpConfig config;
+  config.heap_profiler_options.breakdown_threshold_bytes = 100;
+  mdm_->SetupForTracing(config);
+  EXPECT_EQ(config.heap_profiler_options.breakdown_threshold_bytes,
+            mdm_->heap_profiler_serialization_state_for_testing()
+                ->heap_profiler_breakdown_threshold_bytes());
+  EXPECT_FALSE(
+      mdm_->heap_profiler_serialization_state_for_testing()->is_initialized());
+
+  EXPECT_TRUE(mdm_->EnableHeapProfiling(kHeapProfilingModeBackground));
+  RunLoop().RunUntilIdle();
+  ASSERT_EQ(AllocationContextTracker::CaptureMode::MIXED_STACK,
+            AllocationContextTracker::capture_mode());
+  EXPECT_EQ(mdm_->GetHeapProfilingMode(), kHeapProfilingModeBackground);
+  EXPECT_EQ(0u, TraceLog::GetInstance()->enabled_modes());
+  EXPECT_TRUE(
+      mdm_->heap_profiler_serialization_state_for_testing()->is_initialized());
+  // Do nothing when already enabled.
+  EXPECT_FALSE(mdm_->EnableHeapProfiling(kHeapProfilingModeBackground));
+  EXPECT_FALSE(mdm_->EnableHeapProfiling(kHeapProfilingModePseudo));
+  ASSERT_EQ(AllocationContextTracker::CaptureMode::MIXED_STACK,
+            AllocationContextTracker::capture_mode());
+  EXPECT_EQ(0u, TraceLog::GetInstance()->enabled_modes());
+  EXPECT_EQ(mdm_->GetHeapProfilingMode(), kHeapProfilingModeBackground);
+  // Disable will permanently disable heap profiling.
+  EXPECT_TRUE(mdm_->EnableHeapProfiling(kHeapProfilingModeDisabled));
+  RunLoop().RunUntilIdle();
+  ASSERT_EQ(AllocationContextTracker::CaptureMode::DISABLED,
+            AllocationContextTracker::capture_mode());
+  EXPECT_FALSE(mdm_->EnableHeapProfiling(kHeapProfilingModePseudo));
+  ASSERT_EQ(AllocationContextTracker::CaptureMode::DISABLED,
+            AllocationContextTracker::capture_mode());
+  EXPECT_EQ(mdm_->GetHeapProfilingMode(), kHeapProfilingModeInvalid);
+  EXPECT_FALSE(mdm_->EnableHeapProfiling(kHeapProfilingModeDisabled));
+  RunLoop().RunUntilIdle();
+  EXPECT_EQ(mdm_->GetHeapProfilingMode(), kHeapProfilingModeInvalid);
+  mdm_->TeardownForTracing();
+  EXPECT_FALSE(mdm_->heap_profiler_serialization_state_for_testing());
+}
+
+TEST_F(MemoryDumpManagerTestAsCoordinator, EnableHeapProfilingTask) {
+  MockMemoryDumpProvider mdp1;
+  MockMemoryDumpProvider mdp2;
+  MemoryDumpProvider::Options supported_options;
+  supported_options.supports_heap_profiling = true;
+  RegisterDumpProvider(&mdp1, ThreadTaskRunnerHandle::Get(), supported_options);
+  EXPECT_CALL(mdp1, OnHeapProfilingEnabled(_)).Times(0);
+  EXPECT_CALL(mdp2, OnHeapProfilingEnabled(_)).Times(0);
+
+  ASSERT_FALSE(base::debug::ThreadHeapUsageTracker::IsHeapTrackingEnabled());
+  EXPECT_TRUE(mdm_->EnableHeapProfiling(kHeapProfilingModeTaskProfiler));
+  RunLoop().RunUntilIdle();
+  ASSERT_EQ(AllocationContextTracker::CaptureMode::DISABLED,
+            AllocationContextTracker::capture_mode());
+  RegisterDumpProvider(&mdp2, ThreadTaskRunnerHandle::Get(), supported_options);
+  EXPECT_EQ(mdm_->GetHeapProfilingMode(), kHeapProfilingModeTaskProfiler);
+  ASSERT_TRUE(debug::ThreadHeapUsageTracker::IsHeapTrackingEnabled());
+  TestingThreadHeapUsageTracker::DisableHeapTrackingForTesting();
+  ASSERT_FALSE(base::debug::ThreadHeapUsageTracker::IsHeapTrackingEnabled());
+}
+
+TEST_F(MemoryDumpManagerTestAsCoordinator, EnableHeapProfilingDisableDisabled) {
+  ASSERT_EQ(mdm_->GetHeapProfilingMode(), kHeapProfilingModeDisabled);
+  EXPECT_FALSE(mdm_->EnableHeapProfiling(kHeapProfilingModeDisabled));
+  EXPECT_EQ(mdm_->GetHeapProfilingMode(), kHeapProfilingModeInvalid);
+}
+#endif  //  BUILDFLAG(USE_ALLOCATOR_SHIM) && !defined(OS_NACL)
+
+// Mock MDP class that tests if the number of OnMemoryDump() calls are expected.
+// It is implemented without gmocks since EXPECT_CALL implementation is slow
+// when there are 1000s of instances, as required in
+// NoStackOverflowWithTooManyMDPs test.
+class SimpleMockMemoryDumpProvider : public MemoryDumpProvider {
+ public:
+  SimpleMockMemoryDumpProvider(int expected_num_dump_calls)
+      : expected_num_dump_calls_(expected_num_dump_calls), num_dump_calls_(0) {}
+
+  ~SimpleMockMemoryDumpProvider() override {
+    EXPECT_EQ(expected_num_dump_calls_, num_dump_calls_);
+  }
+
+  bool OnMemoryDump(const MemoryDumpArgs& args,
+                    ProcessMemoryDump* pmd) override {
+    ++num_dump_calls_;
+    return true;
+  }
+
+ private:
+  int expected_num_dump_calls_;
+  int num_dump_calls_;
+};
+
+TEST_F(MemoryDumpManagerTest, NoStackOverflowWithTooManyMDPs) {
+  SetDumpProviderWhitelistForTesting(kTestMDPWhitelist);
+
+  int kMDPCount = 1000;
+  std::vector<std::unique_ptr<SimpleMockMemoryDumpProvider>> mdps;
+  for (int i = 0; i < kMDPCount; ++i) {
+    mdps.push_back(std::make_unique<SimpleMockMemoryDumpProvider>(1));
+    RegisterDumpProvider(mdps.back().get(), nullptr);
+  }
+  for (int i = 0; i < kMDPCount; ++i) {
+    mdps.push_back(std::make_unique<SimpleMockMemoryDumpProvider>(3));
+    RegisterDumpProvider(mdps.back().get(), nullptr, kDefaultOptions,
+                         kWhitelistedMDPName);
+  }
+  std::unique_ptr<Thread> stopped_thread(new Thread("test thread"));
+  stopped_thread->Start();
+  for (int i = 0; i < kMDPCount; ++i) {
+    mdps.push_back(std::make_unique<SimpleMockMemoryDumpProvider>(0));
+    RegisterDumpProvider(mdps.back().get(), stopped_thread->task_runner(),
+                         kDefaultOptions, kWhitelistedMDPName);
+  }
+  stopped_thread->Stop();
+
+  EXPECT_TRUE(RequestProcessDumpAndWait(MemoryDumpType::EXPLICITLY_TRIGGERED,
+                                        MemoryDumpLevelOfDetail::DETAILED));
+  EXPECT_TRUE(RequestProcessDumpAndWait(MemoryDumpType::EXPLICITLY_TRIGGERED,
+                                        MemoryDumpLevelOfDetail::BACKGROUND));
+  EXPECT_TRUE(RequestProcessDumpAndWait(MemoryDumpType::SUMMARY_ONLY,
+                                        MemoryDumpLevelOfDetail::BACKGROUND));
+}
+
+}  // namespace trace_event
+}  // namespace base
diff --git a/base/trace_event/memory_dump_provider.h b/base/trace_event/memory_dump_provider.h
new file mode 100644
index 0000000..b458bfb
--- /dev/null
+++ b/base/trace_event/memory_dump_provider.h
@@ -0,0 +1,81 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TRACE_EVENT_MEMORY_DUMP_PROVIDER_H_
+#define BASE_TRACE_EVENT_MEMORY_DUMP_PROVIDER_H_
+
+#include "base/base_export.h"
+#include "base/macros.h"
+#include "base/process/process_handle.h"
+#include "base/trace_event/memory_dump_request_args.h"
+
+namespace base {
+namespace trace_event {
+
+class ProcessMemoryDump;
+
+// The contract interface that memory dump providers must implement.
+class BASE_EXPORT MemoryDumpProvider {
+ public:
+  // Optional arguments for MemoryDumpManager::RegisterDumpProvider().
+  struct Options {
+    Options()
+        : dumps_on_single_thread_task_runner(false),
+          is_fast_polling_supported(false),
+          supports_heap_profiling(false) {}
+
+    // |dumps_on_single_thread_task_runner| is true if the dump provider runs on
+    // a SingleThreadTaskRunner, which is usually the case. It is faster to run
+    // all providers that run on the same thread together without thread hops.
+    bool dumps_on_single_thread_task_runner;
+
+    // Set to true if the dump provider implementation supports high frequency
+    // polling. Only providers running without task runner affinity are
+    // supported.
+    bool is_fast_polling_supported;
+
+    // Set to true when the dump provider supports heap profiling. MDM sends
+    // OnHeapProfiling() notifications only if this is set to true.
+    bool supports_heap_profiling;
+  };
+
+  virtual ~MemoryDumpProvider() = default;
+
+  // Called by the MemoryDumpManager when generating memory dumps.
+  // The |args| specify if the embedder should generate light/heavy dumps on
+  // dump requests. The embedder should return true if the |pmd| was
+  // successfully populated, false if something went wrong and the dump should
+  // be considered invalid.
+  // (Note, the MemoryDumpManager has a fail-safe logic which will disable the
+  // MemoryDumpProvider for the entire trace session if it fails consistently).
+  virtual bool OnMemoryDump(const MemoryDumpArgs& args,
+                            ProcessMemoryDump* pmd) = 0;
+
+  // Called by the MemoryDumpManager when an allocator should start or stop
+  // collecting extensive allocation data, if supported. Called only when
+  // |supports_heap_profiling| is set to true.
+  virtual void OnHeapProfilingEnabled(bool enabled) {}
+
+  // Quickly record the total memory usage in |memory_total|. This method will
+  // be called only when the dump provider registration has
+  // |is_fast_polling_supported| set to true. This method is used for polling at
+  // high frequency for detecting peaks. See comment on
+  // |is_fast_polling_supported| option if you need to override this method.
+  virtual void PollFastMemoryTotal(uint64_t* memory_total) {}
+
+  // Indicates that fast memory polling is not going to be used in the near
+  // future and the MDP can tear down any resource kept around for fast memory
+  // polling.
+  virtual void SuspendFastMemoryPolling() {}
+
+ protected:
+  MemoryDumpProvider() = default;
+
+  DISALLOW_COPY_AND_ASSIGN(MemoryDumpProvider);
+};
+
+}  // namespace trace_event
+}  // namespace base
+
+#endif  // BASE_TRACE_EVENT_MEMORY_DUMP_PROVIDER_H_
diff --git a/base/trace_event/memory_dump_provider_info.cc b/base/trace_event/memory_dump_provider_info.cc
new file mode 100644
index 0000000..3220476
--- /dev/null
+++ b/base/trace_event/memory_dump_provider_info.cc
@@ -0,0 +1,43 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/trace_event/memory_dump_provider_info.h"
+
+#include <tuple>
+
+#include "base/sequenced_task_runner.h"
+
+namespace base {
+namespace trace_event {
+
+MemoryDumpProviderInfo::MemoryDumpProviderInfo(
+    MemoryDumpProvider* dump_provider,
+    const char* name,
+    scoped_refptr<SequencedTaskRunner> task_runner,
+    const MemoryDumpProvider::Options& options,
+    bool whitelisted_for_background_mode)
+    : dump_provider(dump_provider),
+      options(options),
+      name(name),
+      task_runner(std::move(task_runner)),
+      whitelisted_for_background_mode(whitelisted_for_background_mode),
+      consecutive_failures(0),
+      disabled(false) {}
+
+MemoryDumpProviderInfo::~MemoryDumpProviderInfo() = default;
+
+bool MemoryDumpProviderInfo::Comparator::operator()(
+    const scoped_refptr<MemoryDumpProviderInfo>& a,
+    const scoped_refptr<MemoryDumpProviderInfo>& b) const {
+  if (!a || !b)
+    return a.get() < b.get();
+  // Ensure that unbound providers (task_runner == nullptr) always run last.
+  // Rationale: some unbound dump providers are known to be slow, keep them last
+  // to avoid skewing timings of the other dump providers.
+  return std::tie(a->task_runner, a->dump_provider) >
+         std::tie(b->task_runner, b->dump_provider);
+}
+
+}  // namespace trace_event
+}  // namespace base
diff --git a/base/trace_event/memory_dump_provider_info.h b/base/trace_event/memory_dump_provider_info.h
new file mode 100644
index 0000000..f0ea1e6
--- /dev/null
+++ b/base/trace_event/memory_dump_provider_info.h
@@ -0,0 +1,108 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TRACE_EVENT_MEMORY_DUMP_PROVIDER_INFO_H_
+#define BASE_TRACE_EVENT_MEMORY_DUMP_PROVIDER_INFO_H_
+
+#include <memory>
+#include <set>
+
+#include "base/base_export.h"
+#include "base/memory/ref_counted.h"
+#include "base/trace_event/memory_dump_provider.h"
+
+namespace base {
+
+class SequencedTaskRunner;
+
+namespace trace_event {
+
+// Wraps a MemoryDumpProvider (MDP), which is registered via
+// MemoryDumpManager(MDM)::RegisterDumpProvider(), holding the extra information
+// required to deal with it (which task runner it should be invoked onto,
+// whether it has been disabled, etc.)
+// More importantly, having a refptr to this object guarantees that a MDP that
+// is not thread-bound (hence which can only be unregistered via
+// MDM::UnregisterAndDeleteDumpProviderSoon()) will stay alive as long as the
+// refptr is held.
+//
+// Lifetime:
+// At any time, there is at most one instance of this class for each instance
+// of a given MemoryDumpProvider, but there might be several scoped_refptr
+// holding onto each of this. Specifically:
+// - In nominal conditions, there is a refptr for each registered MDP in the
+//   MDM's |dump_providers_| list.
+// - In most cases, the only refptr (in the |dump_providers_| list) is destroyed
+//   by MDM::UnregisterDumpProvider().
+// - However, when MDM starts a dump, the list of refptrs is copied into the
+//   ProcessMemoryDumpAsyncState. That list is pruned as MDP(s) are invoked.
+// - If UnregisterDumpProvider() is called on a non-thread-bound MDP while a
+//   dump is in progress, the extar extra of the handle is destroyed in
+//   MDM::SetupNextMemoryDump() or MDM::InvokeOnMemoryDump(), when the copy
+//   inside ProcessMemoryDumpAsyncState is erase()-d.
+// - The PeakDetector can keep extra refptrs when enabled.
+struct BASE_EXPORT MemoryDumpProviderInfo
+    : public RefCountedThreadSafe<MemoryDumpProviderInfo> {
+ public:
+  // Define a total order based on the |task_runner| affinity, so that MDPs
+  // belonging to the same SequencedTaskRunner are adjacent in the set.
+  struct Comparator {
+    bool operator()(const scoped_refptr<MemoryDumpProviderInfo>& a,
+                    const scoped_refptr<MemoryDumpProviderInfo>& b) const;
+  };
+  using OrderedSet =
+      std::set<scoped_refptr<MemoryDumpProviderInfo>, Comparator>;
+
+  MemoryDumpProviderInfo(MemoryDumpProvider* dump_provider,
+                         const char* name,
+                         scoped_refptr<SequencedTaskRunner> task_runner,
+                         const MemoryDumpProvider::Options& options,
+                         bool whitelisted_for_background_mode);
+
+  // It is safe to access the const fields below from any thread as they are
+  // never mutated.
+
+  MemoryDumpProvider* const dump_provider;
+
+  // The |options| arg passed to MDM::RegisterDumpProvider().
+  const MemoryDumpProvider::Options options;
+
+  // Human readable name, not unique (distinct MDP instances might have the same
+  // name). Used for debugging, testing and whitelisting for BACKGROUND mode.
+  const char* const name;
+
+  // The task runner on which the MDP::OnMemoryDump call should be posted onto.
+  // Can be nullptr, in which case the MDP will be invoked on a background
+  // thread handled by MDM.
+  const scoped_refptr<SequencedTaskRunner> task_runner;
+
+  // True if the dump provider is whitelisted for background mode.
+  const bool whitelisted_for_background_mode;
+
+  // These fields below, instead, are not thread safe and can be mutated only:
+  // - On the |task_runner|, when not null (i.e. for thread-bound MDPS).
+  // - By the MDM's background thread (or in any other way that guarantees
+  //   sequencing) for non-thread-bound MDPs.
+
+  // Used to transfer ownership for UnregisterAndDeleteDumpProviderSoon().
+  // nullptr in all other cases.
+  std::unique_ptr<MemoryDumpProvider> owned_dump_provider;
+
+  // For fail-safe logic (auto-disable failing MDPs).
+  int consecutive_failures;
+
+  // Flagged either by the auto-disable logic or during unregistration.
+  bool disabled;
+
+ private:
+  friend class base::RefCountedThreadSafe<MemoryDumpProviderInfo>;
+  ~MemoryDumpProviderInfo();
+
+  DISALLOW_COPY_AND_ASSIGN(MemoryDumpProviderInfo);
+};
+
+}  // namespace trace_event
+}  // namespace base
+
+#endif  // BASE_TRACE_EVENT_MEMORY_DUMP_PROVIDER_INFO_H_
diff --git a/base/trace_event/memory_dump_request_args.cc b/base/trace_event/memory_dump_request_args.cc
new file mode 100644
index 0000000..3cb9cab
--- /dev/null
+++ b/base/trace_event/memory_dump_request_args.cc
@@ -0,0 +1,68 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/trace_event/memory_dump_request_args.h"
+
+#include "base/logging.h"
+
+namespace base {
+namespace trace_event {
+
+// static
+const char* MemoryDumpTypeToString(const MemoryDumpType& dump_type) {
+  switch (dump_type) {
+    case MemoryDumpType::PERIODIC_INTERVAL:
+      return "periodic_interval";
+    case MemoryDumpType::EXPLICITLY_TRIGGERED:
+      return "explicitly_triggered";
+    case MemoryDumpType::PEAK_MEMORY_USAGE:
+      return "peak_memory_usage";
+    case MemoryDumpType::SUMMARY_ONLY:
+      return "summary_only";
+  }
+  NOTREACHED();
+  return "unknown";
+}
+
+MemoryDumpType StringToMemoryDumpType(const std::string& str) {
+  if (str == "periodic_interval")
+    return MemoryDumpType::PERIODIC_INTERVAL;
+  if (str == "explicitly_triggered")
+    return MemoryDumpType::EXPLICITLY_TRIGGERED;
+  if (str == "peak_memory_usage")
+    return MemoryDumpType::PEAK_MEMORY_USAGE;
+  if (str == "summary_only")
+    return MemoryDumpType::SUMMARY_ONLY;
+  NOTREACHED();
+  return MemoryDumpType::LAST;
+}
+
+const char* MemoryDumpLevelOfDetailToString(
+    const MemoryDumpLevelOfDetail& level_of_detail) {
+  switch (level_of_detail) {
+    case MemoryDumpLevelOfDetail::BACKGROUND:
+      return "background";
+    case MemoryDumpLevelOfDetail::LIGHT:
+      return "light";
+    case MemoryDumpLevelOfDetail::DETAILED:
+      return "detailed";
+  }
+  NOTREACHED();
+  return "unknown";
+}
+
+MemoryDumpLevelOfDetail StringToMemoryDumpLevelOfDetail(
+    const std::string& str) {
+  if (str == "background")
+    return MemoryDumpLevelOfDetail::BACKGROUND;
+  if (str == "light")
+    return MemoryDumpLevelOfDetail::LIGHT;
+  if (str == "detailed")
+    return MemoryDumpLevelOfDetail::DETAILED;
+  NOTREACHED();
+  return MemoryDumpLevelOfDetail::LAST;
+}
+
+}  // namespace trace_event
+}  // namespace base
diff --git a/base/trace_event/memory_dump_request_args.h b/base/trace_event/memory_dump_request_args.h
new file mode 100644
index 0000000..41bc99b
--- /dev/null
+++ b/base/trace_event/memory_dump_request_args.h
@@ -0,0 +1,102 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TRACE_EVENT_MEMORY_DUMP_REQUEST_ARGS_H_
+#define BASE_TRACE_EVENT_MEMORY_DUMP_REQUEST_ARGS_H_
+
+// This file defines the types and structs used to issue memory dump requests.
+// These are also used in the IPCs for coordinating inter-process memory dumps.
+
+#include <stdint.h>
+#include <map>
+#include <memory>
+#include <string>
+
+#include "base/base_export.h"
+#include "base/callback.h"
+#include "base/optional.h"
+#include "base/process/process_handle.h"
+
+namespace base {
+namespace trace_event {
+
+class ProcessMemoryDump;
+
+// Captures the reason why a memory dump is being requested. This is to allow
+// selective enabling of dumps, filtering and post-processing. Keep this
+// consistent with memory_instrumentation.mojo and
+// memory_instrumentation_struct_traits.{h,cc}
+enum class MemoryDumpType {
+  PERIODIC_INTERVAL,     // Dumping memory at periodic intervals.
+  EXPLICITLY_TRIGGERED,  // Non maskable dump request.
+  PEAK_MEMORY_USAGE,     // Dumping memory at detected peak total memory usage.
+  SUMMARY_ONLY,          // Calculate just the summary & don't add to the trace.
+  LAST = SUMMARY_ONLY
+};
+
+// Tells the MemoryDumpProvider(s) how much detailed their dumps should be.
+// Keep this consistent with memory_instrumentation.mojo and
+// memory_instrumentation_struct_traits.{h,cc}
+enum class MemoryDumpLevelOfDetail : uint32_t {
+  FIRST,
+
+  // For background tracing mode. The dump time is quick, and typically just the
+  // totals are expected. Suballocations need not be specified. Dump name must
+  // contain only pre-defined strings and string arguments cannot be added.
+  BACKGROUND = FIRST,
+
+  // For the levels below, MemoryDumpProvider instances must guarantee that the
+  // total size reported in the root node is consistent. Only the granularity of
+  // the child MemoryAllocatorDump(s) differs with the levels.
+
+  // Few entries, typically a fixed number, per dump.
+  LIGHT,
+
+  // Unrestricted amount of entries per dump.
+  DETAILED,
+
+  LAST = DETAILED
+};
+
+// Keep this consistent with memory_instrumentation.mojo and
+// memory_instrumentation_struct_traits.{h,cc}
+struct BASE_EXPORT MemoryDumpRequestArgs {
+  // Globally unique identifier. In multi-process dumps, all processes issue a
+  // local dump with the same guid. This allows the trace importers to
+  // reconstruct the global dump.
+  uint64_t dump_guid;
+
+  MemoryDumpType dump_type;
+  MemoryDumpLevelOfDetail level_of_detail;
+};
+
+// Args for ProcessMemoryDump and passed to OnMemoryDump calls for memory dump
+// providers. Dump providers are expected to read the args for creating dumps.
+struct MemoryDumpArgs {
+  // Specifies how detailed the dumps should be.
+  MemoryDumpLevelOfDetail level_of_detail;
+
+  // Globally unique identifier. In multi-process dumps, all processes issue a
+  // local dump with the same guid. This allows the trace importers to
+  // reconstruct the global dump.
+  uint64_t dump_guid;
+};
+
+using ProcessMemoryDumpCallback = Callback<
+    void(bool success, uint64_t dump_guid, std::unique_ptr<ProcessMemoryDump>)>;
+
+BASE_EXPORT const char* MemoryDumpTypeToString(const MemoryDumpType& dump_type);
+
+BASE_EXPORT MemoryDumpType StringToMemoryDumpType(const std::string& str);
+
+BASE_EXPORT const char* MemoryDumpLevelOfDetailToString(
+    const MemoryDumpLevelOfDetail& level_of_detail);
+
+BASE_EXPORT MemoryDumpLevelOfDetail
+StringToMemoryDumpLevelOfDetail(const std::string& str);
+
+}  // namespace trace_event
+}  // namespace base
+
+#endif  // BASE_TRACE_EVENT_MEMORY_DUMP_REQUEST_ARGS_H_
diff --git a/base/trace_event/memory_dump_scheduler.cc b/base/trace_event/memory_dump_scheduler.cc
new file mode 100644
index 0000000..8b03f5c
--- /dev/null
+++ b/base/trace_event/memory_dump_scheduler.cc
@@ -0,0 +1,118 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/trace_event/memory_dump_scheduler.h"
+
+#include <algorithm>
+#include <limits>
+
+#include "base/bind.h"
+#include "base/logging.h"
+#include "base/threading/sequenced_task_runner_handle.h"
+
+namespace base {
+namespace trace_event {
+
+// static
+MemoryDumpScheduler* MemoryDumpScheduler::GetInstance() {
+  static MemoryDumpScheduler* instance = new MemoryDumpScheduler();
+  return instance;
+}
+
+MemoryDumpScheduler::MemoryDumpScheduler() : period_ms_(0), generation_(0) {}
+MemoryDumpScheduler::~MemoryDumpScheduler() {
+  // Hit only in tests. Check that tests don't leave without stopping.
+  DCHECK(!is_enabled_for_testing());
+}
+
+void MemoryDumpScheduler::Start(
+    MemoryDumpScheduler::Config config,
+    scoped_refptr<SequencedTaskRunner> task_runner) {
+  DCHECK(!task_runner_);
+  task_runner_ = task_runner;
+  task_runner->PostTask(FROM_HERE, BindOnce(&MemoryDumpScheduler::StartInternal,
+                                            Unretained(this), config));
+}
+
+void MemoryDumpScheduler::Stop() {
+  if (!task_runner_)
+    return;
+  task_runner_->PostTask(FROM_HERE, BindOnce(&MemoryDumpScheduler::StopInternal,
+                                             Unretained(this)));
+  task_runner_ = nullptr;
+}
+
+void MemoryDumpScheduler::StartInternal(MemoryDumpScheduler::Config config) {
+  uint32_t light_dump_period_ms = 0;
+  uint32_t heavy_dump_period_ms = 0;
+  uint32_t min_period_ms = std::numeric_limits<uint32_t>::max();
+  for (const Config::Trigger& trigger : config.triggers) {
+    DCHECK_GT(trigger.period_ms, 0u);
+    switch (trigger.level_of_detail) {
+      case MemoryDumpLevelOfDetail::BACKGROUND:
+        break;
+      case MemoryDumpLevelOfDetail::LIGHT:
+        DCHECK_EQ(0u, light_dump_period_ms);
+        light_dump_period_ms = trigger.period_ms;
+        break;
+      case MemoryDumpLevelOfDetail::DETAILED:
+        DCHECK_EQ(0u, heavy_dump_period_ms);
+        heavy_dump_period_ms = trigger.period_ms;
+        break;
+    }
+    min_period_ms = std::min(min_period_ms, trigger.period_ms);
+  }
+
+  DCHECK_EQ(0u, light_dump_period_ms % min_period_ms);
+  DCHECK_EQ(0u, heavy_dump_period_ms % min_period_ms);
+  DCHECK(!config.callback.is_null());
+  callback_ = config.callback;
+  period_ms_ = min_period_ms;
+  tick_count_ = 0;
+  light_dump_rate_ = light_dump_period_ms / min_period_ms;
+  heavy_dump_rate_ = heavy_dump_period_ms / min_period_ms;
+
+  // Trigger the first dump after 200ms.
+  // TODO(lalitm): this is a tempoarary hack to delay the first scheduled dump
+  // so that the child processes get tracing enabled notification via IPC.
+  // See crbug.com/770151.
+  SequencedTaskRunnerHandle::Get()->PostDelayedTask(
+      FROM_HERE,
+      BindOnce(&MemoryDumpScheduler::Tick, Unretained(this), ++generation_),
+      TimeDelta::FromMilliseconds(200));
+}
+
+void MemoryDumpScheduler::StopInternal() {
+  period_ms_ = 0;
+  generation_++;
+  callback_.Reset();
+}
+
+void MemoryDumpScheduler::Tick(uint32_t expected_generation) {
+  if (period_ms_ == 0 || generation_ != expected_generation)
+    return;
+
+  MemoryDumpLevelOfDetail level_of_detail = MemoryDumpLevelOfDetail::BACKGROUND;
+  if (light_dump_rate_ > 0 && tick_count_ % light_dump_rate_ == 0)
+    level_of_detail = MemoryDumpLevelOfDetail::LIGHT;
+  if (heavy_dump_rate_ > 0 && tick_count_ % heavy_dump_rate_ == 0)
+    level_of_detail = MemoryDumpLevelOfDetail::DETAILED;
+  tick_count_++;
+
+  callback_.Run(level_of_detail);
+
+  SequencedTaskRunnerHandle::Get()->PostDelayedTask(
+      FROM_HERE,
+      BindOnce(&MemoryDumpScheduler::Tick, Unretained(this),
+               expected_generation),
+      TimeDelta::FromMilliseconds(period_ms_));
+}
+
+MemoryDumpScheduler::Config::Config() = default;
+MemoryDumpScheduler::Config::~Config() = default;
+MemoryDumpScheduler::Config::Config(const MemoryDumpScheduler::Config&) =
+    default;
+
+}  // namespace trace_event
+}  // namespace base
diff --git a/base/trace_event/memory_dump_scheduler.h b/base/trace_event/memory_dump_scheduler.h
new file mode 100644
index 0000000..21334f0
--- /dev/null
+++ b/base/trace_event/memory_dump_scheduler.h
@@ -0,0 +1,76 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TRACE_EVENT_MEMORY_DUMP_SCHEDULER_H
+#define BASE_TRACE_EVENT_MEMORY_DUMP_SCHEDULER_H
+
+#include <stdint.h>
+
+#include <vector>
+
+#include "base/base_export.h"
+#include "base/callback.h"
+#include "base/memory/ref_counted.h"
+#include "base/trace_event/memory_dump_request_args.h"
+
+namespace base {
+class SequencedTaskRunner;
+
+namespace trace_event {
+
+// Schedules global dump requests based on the triggers added. The methods of
+// this class are NOT thread safe and the client has to take care of invoking
+// all the methods of the class safely.
+class BASE_EXPORT MemoryDumpScheduler {
+ public:
+  using PeriodicCallback = RepeatingCallback<void(MemoryDumpLevelOfDetail)>;
+
+  // Passed to Start().
+  struct BASE_EXPORT Config {
+    struct Trigger {
+      MemoryDumpLevelOfDetail level_of_detail;
+      uint32_t period_ms;
+    };
+
+    Config();
+    Config(const Config&);
+    ~Config();
+
+    std::vector<Trigger> triggers;
+    PeriodicCallback callback;
+  };
+
+  static MemoryDumpScheduler* GetInstance();
+
+  void Start(Config, scoped_refptr<SequencedTaskRunner> task_runner);
+  void Stop();
+  bool is_enabled_for_testing() const { return bool(task_runner_); }
+
+ private:
+  friend class MemoryDumpSchedulerTest;
+  MemoryDumpScheduler();
+  ~MemoryDumpScheduler();
+
+  void StartInternal(Config);
+  void StopInternal();
+  void Tick(uint32_t expected_generation);
+
+  // Accessed only by the public methods (never from the task runner itself).
+  scoped_refptr<SequencedTaskRunner> task_runner_;
+
+  // These fields instead are only accessed from within the task runner.
+  uint32_t period_ms_;   // 0 == disabled.
+  uint32_t generation_;  // Used to invalidate outstanding tasks after Stop().
+  uint32_t tick_count_;
+  uint32_t light_dump_rate_;
+  uint32_t heavy_dump_rate_;
+  PeriodicCallback callback_;
+
+  DISALLOW_COPY_AND_ASSIGN(MemoryDumpScheduler);
+};
+
+}  // namespace trace_event
+}  // namespace base
+
+#endif  // BASE_TRACE_EVENT_MEMORY_DUMP_SCHEDULER_H
diff --git a/base/trace_event/memory_dump_scheduler_unittest.cc b/base/trace_event/memory_dump_scheduler_unittest.cc
new file mode 100644
index 0000000..d5993b6
--- /dev/null
+++ b/base/trace_event/memory_dump_scheduler_unittest.cc
@@ -0,0 +1,200 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/trace_event/memory_dump_scheduler.h"
+
+#include <memory>
+
+#include "base/bind.h"
+#include "base/single_thread_task_runner.h"
+#include "base/synchronization/waitable_event.h"
+#include "base/threading/thread.h"
+#include "testing/gmock/include/gmock/gmock.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+using ::testing::AtMost;
+using ::testing::Invoke;
+using ::testing::_;
+
+namespace base {
+namespace trace_event {
+
+namespace {
+
+// Wrapper to use gmock on a callback.
+struct CallbackWrapper {
+  MOCK_METHOD1(OnTick, void(MemoryDumpLevelOfDetail));
+};
+
+}  // namespace
+
+class MemoryDumpSchedulerTest : public testing::Test {
+ public:
+  MemoryDumpSchedulerTest()
+      : testing::Test(),
+        evt_(WaitableEvent::ResetPolicy::MANUAL,
+             WaitableEvent::InitialState::NOT_SIGNALED),
+        bg_thread_("MemoryDumpSchedulerTest Thread") {
+    bg_thread_.Start();
+  }
+
+ protected:
+  MemoryDumpScheduler scheduler_;
+  WaitableEvent evt_;
+  CallbackWrapper on_tick_;
+  Thread bg_thread_;
+};
+
+TEST_F(MemoryDumpSchedulerTest, SingleTrigger) {
+  const uint32_t kPeriodMs = 1;
+  const auto kLevelOfDetail = MemoryDumpLevelOfDetail::DETAILED;
+  const uint32_t kTicks = 5;
+  MemoryDumpScheduler::Config config;
+  config.triggers.push_back({kLevelOfDetail, kPeriodMs});
+  config.callback = Bind(&CallbackWrapper::OnTick, Unretained(&on_tick_));
+
+  testing::InSequence sequence;
+  EXPECT_CALL(on_tick_, OnTick(_)).Times(kTicks - 1);
+  EXPECT_CALL(on_tick_, OnTick(_))
+      .WillRepeatedly(Invoke(
+          [this, kLevelOfDetail](MemoryDumpLevelOfDetail level_of_detail) {
+            EXPECT_EQ(kLevelOfDetail, level_of_detail);
+            this->evt_.Signal();
+          }));
+
+  // Check that Stop() before Start() doesn't cause any error.
+  scheduler_.Stop();
+
+  const TimeTicks tstart = TimeTicks::Now();
+  scheduler_.Start(config, bg_thread_.task_runner());
+  evt_.Wait();
+  const double time_ms = (TimeTicks::Now() - tstart).InMillisecondsF();
+
+  // It takes N-1 ms to perform N ticks of 1ms each.
+  EXPECT_GE(time_ms, kPeriodMs * (kTicks - 1));
+
+  // Check that stopping twice doesn't cause any problems.
+  scheduler_.Stop();
+  scheduler_.Stop();
+}
+
+TEST_F(MemoryDumpSchedulerTest, MultipleTriggers) {
+  const uint32_t kPeriodLightMs = 3;
+  const uint32_t kPeriodDetailedMs = 9;
+  MemoryDumpScheduler::Config config;
+  const MemoryDumpLevelOfDetail kLight = MemoryDumpLevelOfDetail::LIGHT;
+  const MemoryDumpLevelOfDetail kDetailed = MemoryDumpLevelOfDetail::DETAILED;
+  config.triggers.push_back({kLight, kPeriodLightMs});
+  config.triggers.push_back({kDetailed, kPeriodDetailedMs});
+  config.callback = Bind(&CallbackWrapper::OnTick, Unretained(&on_tick_));
+
+  TimeTicks t1, t2, t3;
+
+  testing::InSequence sequence;
+  EXPECT_CALL(on_tick_, OnTick(kDetailed))
+      .WillOnce(
+          Invoke([&t1](MemoryDumpLevelOfDetail) { t1 = TimeTicks::Now(); }));
+  EXPECT_CALL(on_tick_, OnTick(kLight)).Times(1);
+  EXPECT_CALL(on_tick_, OnTick(kLight)).Times(1);
+  EXPECT_CALL(on_tick_, OnTick(kDetailed))
+      .WillOnce(
+          Invoke([&t2](MemoryDumpLevelOfDetail) { t2 = TimeTicks::Now(); }));
+  EXPECT_CALL(on_tick_, OnTick(kLight))
+      .WillOnce(
+          Invoke([&t3](MemoryDumpLevelOfDetail) { t3 = TimeTicks::Now(); }));
+
+  // Rationale for WillRepeatedly and not just WillOnce: Extra ticks might
+  // happen if the Stop() takes time. Not an interesting case, but we need to
+  // avoid gmock to shout in that case.
+  EXPECT_CALL(on_tick_, OnTick(_))
+      .WillRepeatedly(
+          Invoke([this](MemoryDumpLevelOfDetail) { this->evt_.Signal(); }));
+
+  scheduler_.Start(config, bg_thread_.task_runner());
+  evt_.Wait();
+  scheduler_.Stop();
+  EXPECT_GE((t2 - t1).InMillisecondsF(), kPeriodDetailedMs);
+  EXPECT_GE((t3 - t2).InMillisecondsF(), kPeriodLightMs);
+}
+
+TEST_F(MemoryDumpSchedulerTest, StartStopQuickly) {
+  const uint32_t kPeriodMs = 3;
+  const uint32_t kQuickIterations = 5;
+  const uint32_t kDetailedTicks = 10;
+
+  MemoryDumpScheduler::Config light_config;
+  light_config.triggers.push_back({MemoryDumpLevelOfDetail::LIGHT, kPeriodMs});
+  light_config.callback = Bind(&CallbackWrapper::OnTick, Unretained(&on_tick_));
+
+  MemoryDumpScheduler::Config detailed_config;
+  detailed_config.triggers.push_back(
+      {MemoryDumpLevelOfDetail::DETAILED, kPeriodMs});
+  detailed_config.callback =
+      Bind(&CallbackWrapper::OnTick, Unretained(&on_tick_));
+
+  testing::InSequence sequence;
+  EXPECT_CALL(on_tick_, OnTick(MemoryDumpLevelOfDetail::LIGHT))
+      .Times(AtMost(kQuickIterations));
+  EXPECT_CALL(on_tick_, OnTick(MemoryDumpLevelOfDetail::DETAILED))
+      .Times(kDetailedTicks - 1);
+  EXPECT_CALL(on_tick_, OnTick(MemoryDumpLevelOfDetail::DETAILED))
+      .WillRepeatedly(
+          Invoke([this](MemoryDumpLevelOfDetail) { this->evt_.Signal(); }));
+
+  const TimeTicks tstart = TimeTicks::Now();
+  for (unsigned int i = 0; i < kQuickIterations; i++) {
+    scheduler_.Start(light_config, bg_thread_.task_runner());
+    scheduler_.Stop();
+  }
+
+  scheduler_.Start(detailed_config, bg_thread_.task_runner());
+
+  evt_.Wait();
+  const double time_ms = (TimeTicks::Now() - tstart).InMillisecondsF();
+  scheduler_.Stop();
+
+  // It takes N-1 ms to perform N ticks of 1ms each.
+  EXPECT_GE(time_ms, kPeriodMs * (kDetailedTicks - 1));
+}
+
+TEST_F(MemoryDumpSchedulerTest, StopAndStartOnAnotherThread) {
+  const uint32_t kPeriodMs = 1;
+  const uint32_t kTicks = 3;
+  MemoryDumpScheduler::Config config;
+  config.triggers.push_back({MemoryDumpLevelOfDetail::DETAILED, kPeriodMs});
+  config.callback = Bind(&CallbackWrapper::OnTick, Unretained(&on_tick_));
+
+  scoped_refptr<TaskRunner> expected_task_runner = bg_thread_.task_runner();
+  testing::InSequence sequence;
+  EXPECT_CALL(on_tick_, OnTick(_)).Times(kTicks - 1);
+  EXPECT_CALL(on_tick_, OnTick(_))
+      .WillRepeatedly(
+          Invoke([this, expected_task_runner](MemoryDumpLevelOfDetail) {
+            EXPECT_TRUE(expected_task_runner->RunsTasksInCurrentSequence());
+            this->evt_.Signal();
+          }));
+
+  scheduler_.Start(config, bg_thread_.task_runner());
+  evt_.Wait();
+  scheduler_.Stop();
+  bg_thread_.Stop();
+
+  Thread bg_thread_2("MemoryDumpSchedulerTest Thread 2");
+  bg_thread_2.Start();
+  evt_.Reset();
+  expected_task_runner = bg_thread_2.task_runner();
+  EXPECT_CALL(on_tick_, OnTick(_)).Times(kTicks - 1);
+  EXPECT_CALL(on_tick_, OnTick(_))
+      .WillRepeatedly(
+          Invoke([this, expected_task_runner](MemoryDumpLevelOfDetail) {
+            EXPECT_TRUE(expected_task_runner->RunsTasksInCurrentSequence());
+            this->evt_.Signal();
+          }));
+  scheduler_.Start(config, bg_thread_2.task_runner());
+  evt_.Wait();
+  scheduler_.Stop();
+}
+
+}  // namespace trace_event
+}  // namespace base
diff --git a/base/trace_event/memory_infra_background_whitelist.cc b/base/trace_event/memory_infra_background_whitelist.cc
new file mode 100644
index 0000000..0e69c7c
--- /dev/null
+++ b/base/trace_event/memory_infra_background_whitelist.cc
@@ -0,0 +1,386 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/trace_event/memory_infra_background_whitelist.h"
+
+#include <ctype.h>
+#include <string.h>
+
+#include <string>
+
+#include "base/strings/string_util.h"
+
+namespace base {
+namespace trace_event {
+namespace {
+
+// The names of dump providers whitelisted for background tracing. Dump
+// providers can be added here only if the background mode dump has very
+// little processor and memory overhead.
+// TODO(ssid): Some dump providers do not create ownership edges on background
+// dump. So, the effective size will not be correct.
+const char* const kDumpProviderWhitelist[] = {
+    "android::ResourceManagerImpl",
+    "AutocompleteController",
+    "BlinkGC",
+    "BlinkObjectCounters",
+    "BlobStorageContext",
+    "ClientDiscardableSharedMemoryManager",
+    "DOMStorage",
+    "DownloadService",
+    "DiscardableSharedMemoryManager",
+    "gpu::BufferManager",
+    "gpu::RenderbufferManager",
+    "gpu::TextureManager",
+    "FontCaches",
+    "HistoryReport",
+    "IPCChannel",
+    "IndexedDBBackingStore",
+    "InMemoryURLIndex",
+    "JavaHeap",
+    "LevelDB",
+    "LeveldbValueStore",
+    "LocalStorage",
+    "Malloc",
+    "MemoryCache",
+    "MojoHandleTable",
+    "MojoLevelDB",
+    "MojoMessages",
+    "PartitionAlloc",
+    "ProcessMemoryMetrics",
+    "RenderProcessHost",
+    "SharedMemoryTracker",
+    "Skia",
+    "Sql",
+    "URLRequestContext",
+    "V8Isolate",
+    "SyncDirectory",
+    "TabRestoreServiceHelper",
+    nullptr  // End of list marker.
+};
+
+// A list of string names that are allowed for the memory allocator dumps in
+// background mode.
+const char* const kAllocatorDumpNameWhitelist[] = {
+    "blink_gc",
+    "blink_gc/allocated_objects",
+    "blink_objects/AudioHandler",
+    "blink_objects/Document",
+    "blink_objects/Frame",
+    "blink_objects/JSEventListener",
+    "blink_objects/LayoutObject",
+    "blink_objects/MediaKeySession",
+    "blink_objects/MediaKeys",
+    "blink_objects/Node",
+    "blink_objects/Resource",
+    "blink_objects/RTCPeerConnection",
+    "blink_objects/ScriptPromise",
+    "blink_objects/PausableObject",
+    "blink_objects/V8PerContextData",
+    "blink_objects/WorkerGlobalScope",
+    "blink_objects/UACSSResource",
+    "blink_objects/ResourceFetcher",
+    "components/download/controller_0x?",
+    "discardable",
+    "discardable/child_0x?",
+    "extensions/value_store/Extensions.Database.Open.Settings/0x?",
+    "extensions/value_store/Extensions.Database.Open.Rules/0x?",
+    "extensions/value_store/Extensions.Database.Open.State/0x?",
+    "extensions/value_store/Extensions.Database.Open/0x?",
+    "extensions/value_store/Extensions.Database.Restore/0x?",
+    "extensions/value_store/Extensions.Database.Value.Restore/0x?",
+    "font_caches/font_platform_data_cache",
+    "font_caches/shape_caches",
+    "gpu/gl/buffers/share_group_0x?",
+    "gpu/gl/renderbuffers/share_group_0x?",
+    "gpu/gl/textures/share_group_0x?",
+    "history/delta_file_service/leveldb_0x?",
+    "history/usage_reports_buffer/leveldb_0x?",
+    "java_heap",
+    "java_heap/allocated_objects",
+    "leveldatabase",
+    "leveldatabase/block_cache/browser",
+    "leveldatabase/block_cache/in_memory",
+    "leveldatabase/block_cache/unified",
+    "leveldatabase/block_cache/web",
+    "leveldatabase/db_0x?",
+    "leveldatabase/db_0x?/block_cache",
+    "leveldatabase/memenv_0x?",
+    "malloc",
+    "malloc/allocated_objects",
+    "malloc/metadata_fragmentation_caches",
+    "mojo",
+    "mojo/data_pipe_consumer",
+    "mojo/data_pipe_producer",
+    "mojo/messages",
+    "mojo/message_pipe",
+    "mojo/platform_handle",
+    "mojo/queued_ipc_channel_message/0x?",
+    "mojo/render_process_host/0x?",
+    "mojo/shared_buffer",
+    "mojo/unknown",
+    "mojo/watcher",
+    "net/http_network_session_0x?",
+    "net/http_network_session_0x?/quic_stream_factory",
+    "net/http_network_session_0x?/socket_pool",
+    "net/http_network_session_0x?/spdy_session_pool",
+    "net/http_network_session_0x?/stream_factory",
+    "net/ssl_session_cache",
+    "net/url_request_context",
+    "net/url_request_context/app_request",
+    "net/url_request_context/app_request/0x?",
+    "net/url_request_context/app_request/0x?/cookie_monster",
+    "net/url_request_context/app_request/0x?/cookie_monster/cookies",
+    "net/url_request_context/app_request/0x?/cookie_monster/"
+    "tasks_pending_global",
+    "net/url_request_context/app_request/0x?/cookie_monster/"
+    "tasks_pending_for_key",
+    "net/url_request_context/app_request/0x?/http_cache",
+    "net/url_request_context/app_request/0x?/http_cache/memory_backend",
+    "net/url_request_context/app_request/0x?/http_cache/simple_backend",
+    "net/url_request_context/app_request/0x?/http_network_session",
+    "net/url_request_context/extensions",
+    "net/url_request_context/extensions/0x?",
+    "net/url_request_context/extensions/0x?/cookie_monster",
+    "net/url_request_context/extensions/0x?/cookie_monster/cookies",
+    "net/url_request_context/extensions/0x?/cookie_monster/"
+    "tasks_pending_global",
+    "net/url_request_context/extensions/0x?/cookie_monster/"
+    "tasks_pending_for_key",
+    "net/url_request_context/extensions/0x?/http_cache",
+    "net/url_request_context/extensions/0x?/http_cache/memory_backend",
+    "net/url_request_context/extensions/0x?/http_cache/simple_backend",
+    "net/url_request_context/extensions/0x?/http_network_session",
+    "net/url_request_context/isolated_media",
+    "net/url_request_context/isolated_media/0x?",
+    "net/url_request_context/isolated_media/0x?/cookie_monster",
+    "net/url_request_context/isolated_media/0x?/cookie_monster/cookies",
+    "net/url_request_context/isolated_media/0x?/cookie_monster/"
+    "tasks_pending_global",
+    "net/url_request_context/isolated_media/0x?/cookie_monster/"
+    "tasks_pending_for_key",
+    "net/url_request_context/isolated_media/0x?/http_cache",
+    "net/url_request_context/isolated_media/0x?/http_cache/memory_backend",
+    "net/url_request_context/isolated_media/0x?/http_cache/simple_backend",
+    "net/url_request_context/isolated_media/0x?/http_network_session",
+    "net/url_request_context/main",
+    "net/url_request_context/main/0x?",
+    "net/url_request_context/main/0x?/cookie_monster",
+    "net/url_request_context/main/0x?/cookie_monster/cookies",
+    "net/url_request_context/main/0x?/cookie_monster/tasks_pending_global",
+    "net/url_request_context/main/0x?/cookie_monster/tasks_pending_for_key",
+    "net/url_request_context/main/0x?/http_cache",
+    "net/url_request_context/main/0x?/http_cache/memory_backend",
+    "net/url_request_context/main/0x?/http_cache/simple_backend",
+    "net/url_request_context/main/0x?/http_network_session",
+    "net/url_request_context/main_media",
+    "net/url_request_context/main_media/0x?",
+    "net/url_request_context/main_media/0x?/cookie_monster",
+    "net/url_request_context/main_media/0x?/cookie_monster/cookies",
+    "net/url_request_context/main_media/0x?/cookie_monster/"
+    "tasks_pending_global",
+    "net/url_request_context/main_media/0x?/cookie_monster/"
+    "tasks_pending_for_key",
+    "net/url_request_context/main_media/0x?/http_cache",
+    "net/url_request_context/main_media/0x?/http_cache/memory_backend",
+    "net/url_request_context/main_media/0x?/http_cache/simple_backend",
+    "net/url_request_context/main_media/0x?/http_network_session",
+    "net/url_request_context/proxy",
+    "net/url_request_context/proxy/0x?",
+    "net/url_request_context/proxy/0x?/cookie_monster",
+    "net/url_request_context/proxy/0x?/cookie_monster/cookies",
+    "net/url_request_context/proxy/0x?/cookie_monster/tasks_pending_global",
+    "net/url_request_context/proxy/0x?/cookie_monster/tasks_pending_for_key",
+    "net/url_request_context/proxy/0x?/http_cache",
+    "net/url_request_context/proxy/0x?/http_cache/memory_backend",
+    "net/url_request_context/proxy/0x?/http_cache/simple_backend",
+    "net/url_request_context/proxy/0x?/http_network_session",
+    "net/url_request_context/safe_browsing",
+    "net/url_request_context/safe_browsing/0x?",
+    "net/url_request_context/safe_browsing/0x?/cookie_monster",
+    "net/url_request_context/safe_browsing/0x?/cookie_monster/cookies",
+    "net/url_request_context/safe_browsing/0x?/cookie_monster/"
+    "tasks_pending_global",
+    "net/url_request_context/safe_browsing/0x?/cookie_monster/"
+    "tasks_pending_for_key",
+    "net/url_request_context/safe_browsing/0x?/http_cache",
+    "net/url_request_context/safe_browsing/0x?/http_cache/memory_backend",
+    "net/url_request_context/safe_browsing/0x?/http_cache/simple_backend",
+    "net/url_request_context/safe_browsing/0x?/http_network_session",
+    "net/url_request_context/system",
+    "net/url_request_context/system/0x?",
+    "net/url_request_context/system/0x?/cookie_monster",
+    "net/url_request_context/system/0x?/cookie_monster/cookies",
+    "net/url_request_context/system/0x?/cookie_monster/tasks_pending_global",
+    "net/url_request_context/system/0x?/cookie_monster/tasks_pending_for_key",
+    "net/url_request_context/system/0x?/http_cache",
+    "net/url_request_context/system/0x?/http_cache/memory_backend",
+    "net/url_request_context/system/0x?/http_cache/simple_backend",
+    "net/url_request_context/system/0x?/http_network_session",
+    "net/url_request_context/unknown",
+    "net/url_request_context/unknown/0x?",
+    "net/url_request_context/unknown/0x?/cookie_monster",
+    "net/url_request_context/unknown/0x?/cookie_monster/cookies",
+    "net/url_request_context/unknown/0x?/cookie_monster/tasks_pending_global",
+    "net/url_request_context/unknown/0x?/cookie_monster/tasks_pending_for_key",
+    "net/url_request_context/unknown/0x?/http_cache",
+    "net/url_request_context/unknown/0x?/http_cache/memory_backend",
+    "net/url_request_context/unknown/0x?/http_cache/simple_backend",
+    "net/url_request_context/unknown/0x?/http_network_session",
+    "omnibox/autocomplete_controller/0x?",
+    "omnibox/in_memory_url_index/0x?",
+    "web_cache/Image_resources",
+    "web_cache/CSS stylesheet_resources",
+    "web_cache/Script_resources",
+    "web_cache/XSL stylesheet_resources",
+    "web_cache/Font_resources",
+    "web_cache/Other_resources",
+    "partition_alloc/allocated_objects",
+    "partition_alloc/partitions",
+    "partition_alloc/partitions/array_buffer",
+    "partition_alloc/partitions/buffer",
+    "partition_alloc/partitions/fast_malloc",
+    "partition_alloc/partitions/layout",
+    "skia/sk_glyph_cache",
+    "skia/sk_resource_cache",
+    "sqlite",
+    "ui/resource_manager_0x?/default_resource/0x?",
+    "ui/resource_manager_0x?/tinted_resource",
+    "v8/isolate_0x?/contexts/detached_context",
+    "v8/isolate_0x?/contexts/native_context",
+    "v8/isolate_0x?/heap_spaces",
+    "v8/isolate_0x?/heap_spaces/code_space",
+    "v8/isolate_0x?/heap_spaces/large_object_space",
+    "v8/isolate_0x?/heap_spaces/map_space",
+    "v8/isolate_0x?/heap_spaces/new_space",
+    "v8/isolate_0x?/heap_spaces/old_space",
+    "v8/isolate_0x?/heap_spaces/read_only_space",
+    "v8/isolate_0x?/malloc",
+    "v8/isolate_0x?/zapped_for_debug",
+    "site_storage/blob_storage/0x?",
+    "site_storage/index_db/db_0x?",
+    "site_storage/index_db/memenv_0x?",
+    "site_storage/localstorage/0x?/cache_size",
+    "site_storage/localstorage/0x?/leveldb",
+    "site_storage/session_storage/0x?",
+    "site_storage/session_storage/0x?/cache_size",
+    "sync/0x?/kernel",
+    "sync/0x?/store",
+    "sync/0x?/model_type/APP",
+    "sync/0x?/model_type/APP_LIST",
+    "sync/0x?/model_type/APP_NOTIFICATION",
+    "sync/0x?/model_type/APP_SETTING",
+    "sync/0x?/model_type/ARC_PACKAGE",
+    "sync/0x?/model_type/ARTICLE",
+    "sync/0x?/model_type/AUTOFILL",
+    "sync/0x?/model_type/AUTOFILL_PROFILE",
+    "sync/0x?/model_type/AUTOFILL_WALLET",
+    "sync/0x?/model_type/BOOKMARK",
+    "sync/0x?/model_type/DEVICE_INFO",
+    "sync/0x?/model_type/DICTIONARY",
+    "sync/0x?/model_type/EXPERIMENTS",
+    "sync/0x?/model_type/EXTENSION",
+    "sync/0x?/model_type/EXTENSION_SETTING",
+    "sync/0x?/model_type/FAVICON_IMAGE",
+    "sync/0x?/model_type/FAVICON_TRACKING",
+    "sync/0x?/model_type/HISTORY_DELETE_DIRECTIVE",
+    "sync/0x?/model_type/MANAGED_USER",
+    "sync/0x?/model_type/MANAGED_USER_SETTING",
+    "sync/0x?/model_type/MANAGED_USER_SHARED_SETTING",
+    "sync/0x?/model_type/MANAGED_USER_WHITELIST",
+    "sync/0x?/model_type/NIGORI",
+    "sync/0x?/model_type/PASSWORD",
+    "sync/0x?/model_type/PREFERENCE",
+    "sync/0x?/model_type/PRINTER",
+    "sync/0x?/model_type/PRIORITY_PREFERENCE",
+    "sync/0x?/model_type/READING_LIST",
+    "sync/0x?/model_type/SEARCH_ENGINE",
+    "sync/0x?/model_type/SESSION",
+    "sync/0x?/model_type/SYNCED_NOTIFICATION",
+    "sync/0x?/model_type/SYNCED_NOTIFICATION_APP_INFO",
+    "sync/0x?/model_type/THEME",
+    "sync/0x?/model_type/TYPED_URL",
+    "sync/0x?/model_type/USER_EVENT",
+    "sync/0x?/model_type/WALLET_METADATA",
+    "sync/0x?/model_type/WIFI_CREDENTIAL",
+    "tab_restore/service_helper_0x?/entries",
+    "tab_restore/service_helper_0x?/entries/tab_0x?",
+    "tab_restore/service_helper_0x?/entries/window_0x?",
+    "tracing/heap_profiler_blink_gc/AllocationRegister",
+    "tracing/heap_profiler_malloc/AllocationRegister",
+    "tracing/heap_profiler_partition_alloc/AllocationRegister",
+    nullptr  // End of list marker.
+};
+
+const char* const* g_dump_provider_whitelist = kDumpProviderWhitelist;
+const char* const* g_allocator_dump_name_whitelist =
+    kAllocatorDumpNameWhitelist;
+
+bool IsMemoryDumpProviderInList(const char* mdp_name, const char* const* list) {
+  for (size_t i = 0; list[i] != nullptr; ++i) {
+    if (strcmp(mdp_name, list[i]) == 0)
+      return true;
+  }
+  return false;
+}
+
+}  // namespace
+
+bool IsMemoryDumpProviderWhitelisted(const char* mdp_name) {
+  return IsMemoryDumpProviderInList(mdp_name, g_dump_provider_whitelist);
+}
+
+bool IsMemoryAllocatorDumpNameWhitelisted(const std::string& name) {
+  // Global dumps are explicitly whitelisted for background use.
+  if (base::StartsWith(name, "global/", CompareCase::SENSITIVE)) {
+    for (size_t i = strlen("global/"); i < name.size(); i++)
+      if (!base::IsHexDigit(name[i]))
+        return false;
+    return true;
+  }
+
+  if (base::StartsWith(name, "shared_memory/", CompareCase::SENSITIVE)) {
+    for (size_t i = strlen("shared_memory/"); i < name.size(); i++)
+      if (!base::IsHexDigit(name[i]))
+        return false;
+    return true;
+  }
+
+  // Remove special characters, numbers (including hexadecimal which are marked
+  // by '0x') from the given string.
+  const size_t length = name.size();
+  std::string stripped_str;
+  stripped_str.reserve(length);
+  bool parsing_hex = false;
+  for (size_t i = 0; i < length; ++i) {
+    if (parsing_hex && isxdigit(name[i]))
+      continue;
+    parsing_hex = false;
+    if (i + 1 < length && name[i] == '0' && name[i + 1] == 'x') {
+      parsing_hex = true;
+      stripped_str.append("0x?");
+      ++i;
+    } else {
+      stripped_str.push_back(name[i]);
+    }
+  }
+
+  for (size_t i = 0; g_allocator_dump_name_whitelist[i] != nullptr; ++i) {
+    if (stripped_str == g_allocator_dump_name_whitelist[i]) {
+      return true;
+    }
+  }
+  return false;
+}
+
+void SetDumpProviderWhitelistForTesting(const char* const* list) {
+  g_dump_provider_whitelist = list;
+}
+
+void SetAllocatorDumpNameWhitelistForTesting(const char* const* list) {
+  g_allocator_dump_name_whitelist = list;
+}
+
+}  // namespace trace_event
+}  // namespace base
diff --git a/base/trace_event/memory_infra_background_whitelist.h b/base/trace_event/memory_infra_background_whitelist.h
new file mode 100644
index 0000000..b8d704a
--- /dev/null
+++ b/base/trace_event/memory_infra_background_whitelist.h
@@ -0,0 +1,33 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TRACE_EVENT_MEMORY_INFRA_BACKGROUND_WHITELIST_H_
+#define BASE_TRACE_EVENT_MEMORY_INFRA_BACKGROUND_WHITELIST_H_
+
+// This file contains the whitelists for background mode to limit the tracing
+// overhead and remove sensitive information from traces.
+
+#include <string>
+
+#include "base/base_export.h"
+
+namespace base {
+namespace trace_event {
+
+// Checks if the given |mdp_name| is in the whitelist.
+bool BASE_EXPORT IsMemoryDumpProviderWhitelisted(const char* mdp_name);
+
+// Checks if the given |name| matches any of the whitelisted patterns.
+bool BASE_EXPORT IsMemoryAllocatorDumpNameWhitelisted(const std::string& name);
+
+// The whitelist is replaced with the given list for tests. The last element of
+// the list must be nullptr.
+void BASE_EXPORT SetDumpProviderWhitelistForTesting(const char* const* list);
+void BASE_EXPORT
+SetAllocatorDumpNameWhitelistForTesting(const char* const* list);
+
+}  // namespace trace_event
+}  // namespace base
+
+#endif  // BASE_TRACE_EVENT_MEMORY_INFRA_BACKGROUND_WHITELIST_H_
diff --git a/base/trace_event/memory_peak_detector.cc b/base/trace_event/memory_peak_detector.cc
new file mode 100644
index 0000000..5419594
--- /dev/null
+++ b/base/trace_event/memory_peak_detector.cc
@@ -0,0 +1,288 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/trace_event/memory_peak_detector.h"
+
+#include <algorithm>
+
+#include "base/bind.h"
+#include "base/logging.h"
+#include "base/sys_info.h"
+#include "base/threading/sequenced_task_runner_handle.h"
+#include "base/time/time.h"
+#include "base/trace_event/memory_dump_manager.h"
+#include "base/trace_event/memory_dump_provider_info.h"
+#include "base/trace_event/trace_event.h"
+#include "build/build_config.h"
+
+namespace base {
+namespace trace_event {
+
+// static
+MemoryPeakDetector* MemoryPeakDetector::GetInstance() {
+  static MemoryPeakDetector* instance = new MemoryPeakDetector();
+  return instance;
+}
+
+MemoryPeakDetector::MemoryPeakDetector()
+    : generation_(0),
+      state_(NOT_INITIALIZED),
+      poll_tasks_count_for_testing_(0) {}
+
+MemoryPeakDetector::~MemoryPeakDetector() {
+  // This is hit only in tests, in which case the test is expected to TearDown()
+  // cleanly and not leave the peak detector running.
+  DCHECK_EQ(NOT_INITIALIZED, state_);
+}
+
+void MemoryPeakDetector::Setup(
+    const GetDumpProvidersFunction& get_dump_providers_function,
+    const scoped_refptr<SequencedTaskRunner>& task_runner,
+    const OnPeakDetectedCallback& on_peak_detected_callback) {
+  DCHECK(!get_dump_providers_function.is_null());
+  DCHECK(task_runner);
+  DCHECK(!on_peak_detected_callback.is_null());
+  DCHECK(state_ == NOT_INITIALIZED || state_ == DISABLED);
+  DCHECK(dump_providers_.empty());
+  get_dump_providers_function_ = get_dump_providers_function;
+  task_runner_ = task_runner;
+  on_peak_detected_callback_ = on_peak_detected_callback;
+  state_ = DISABLED;
+  config_ = {};
+  ResetPollHistory();
+
+  static_threshold_bytes_ = 0;
+#if !defined(OS_NACL)
+  // Set threshold to 1% of total system memory.
+  static_threshold_bytes_ =
+      static_cast<uint64_t>(SysInfo::AmountOfPhysicalMemory()) / 100;
+#endif
+  // Fallback, mostly for test environments where AmountOfPhysicalMemory() is
+  // broken.
+  static_threshold_bytes_ =
+      std::max(static_threshold_bytes_, static_cast<uint64_t>(5 * 1024 * 1024));
+}
+
+void MemoryPeakDetector::TearDown() {
+  if (task_runner_) {
+    task_runner_->PostTask(
+        FROM_HERE,
+        BindOnce(&MemoryPeakDetector::TearDownInternal, Unretained(this)));
+  }
+  task_runner_ = nullptr;
+}
+
+void MemoryPeakDetector::Start(MemoryPeakDetector::Config config) {
+  if (!config.polling_interval_ms) {
+    NOTREACHED();
+    return;
+  }
+  task_runner_->PostTask(FROM_HERE, BindOnce(&MemoryPeakDetector::StartInternal,
+                                             Unretained(this), config));
+}
+
+void MemoryPeakDetector::Stop() {
+  task_runner_->PostTask(
+      FROM_HERE, BindOnce(&MemoryPeakDetector::StopInternal, Unretained(this)));
+}
+
+void MemoryPeakDetector::Throttle() {
+  if (!task_runner_)
+    return;  // Can be called before Setup().
+  task_runner_->PostTask(
+      FROM_HERE, BindOnce(&MemoryPeakDetector::ResetPollHistory,
+                          Unretained(this), true /* keep_last_sample */));
+}
+
+void MemoryPeakDetector::NotifyMemoryDumpProvidersChanged() {
+  if (!task_runner_)
+    return;  // Can be called before Setup().
+  task_runner_->PostTask(
+      FROM_HERE,
+      BindOnce(&MemoryPeakDetector::ReloadDumpProvidersAndStartPollingIfNeeded,
+               Unretained(this)));
+}
+
+void MemoryPeakDetector::StartInternal(MemoryPeakDetector::Config config) {
+  DCHECK_EQ(DISABLED, state_);
+  state_ = ENABLED;
+  config_ = config;
+  ResetPollHistory();
+
+  // If there are any dump providers available,
+  // NotifyMemoryDumpProvidersChanged will fetch them and start the polling.
+  // Otherwise this will remain in the ENABLED state and the actual polling
+  // will start on the next call to
+  // ReloadDumpProvidersAndStartPollingIfNeeded().
+  // Depending on the sandbox model, it is possible that no polling-capable
+  // dump providers will be ever available.
+  ReloadDumpProvidersAndStartPollingIfNeeded();
+}
+
+void MemoryPeakDetector::StopInternal() {
+  DCHECK_NE(NOT_INITIALIZED, state_);
+  state_ = DISABLED;
+  ++generation_;
+  for (const scoped_refptr<MemoryDumpProviderInfo>& mdp_info : dump_providers_)
+    mdp_info->dump_provider->SuspendFastMemoryPolling();
+  dump_providers_.clear();
+}
+
+void MemoryPeakDetector::TearDownInternal() {
+  StopInternal();
+  get_dump_providers_function_.Reset();
+  on_peak_detected_callback_.Reset();
+  state_ = NOT_INITIALIZED;
+}
+
+void MemoryPeakDetector::ReloadDumpProvidersAndStartPollingIfNeeded() {
+  if (state_ == DISABLED || state_ == NOT_INITIALIZED)
+    return;  // Start() will re-fetch the MDP list later.
+
+  DCHECK((state_ == RUNNING && !dump_providers_.empty()) ||
+         (state_ == ENABLED && dump_providers_.empty()));
+
+  dump_providers_.clear();
+
+  // This is really MemoryDumpManager::GetDumpProvidersForPolling, % testing.
+  get_dump_providers_function_.Run(&dump_providers_);
+
+  if (state_ == ENABLED && !dump_providers_.empty()) {
+    // It's now time to start polling for realz.
+    state_ = RUNNING;
+    task_runner_->PostTask(
+        FROM_HERE, BindOnce(&MemoryPeakDetector::PollMemoryAndDetectPeak,
+                            Unretained(this), ++generation_));
+  } else if (state_ == RUNNING && dump_providers_.empty()) {
+    // Will cause the next PollMemoryAndDetectPeak() task to early return.
+    state_ = ENABLED;
+    ++generation_;
+  }
+}
+
+void MemoryPeakDetector::PollMemoryAndDetectPeak(uint32_t expected_generation) {
+  if (state_ != RUNNING || generation_ != expected_generation)
+    return;
+
+  // We should never end up in a situation where state_ == RUNNING but all dump
+  // providers are gone.
+  DCHECK(!dump_providers_.empty());
+
+  poll_tasks_count_for_testing_++;
+  uint64_t polled_mem_bytes = 0;
+  for (const scoped_refptr<MemoryDumpProviderInfo>& mdp_info :
+       dump_providers_) {
+    DCHECK(mdp_info->options.is_fast_polling_supported);
+    uint64_t value = 0;
+    mdp_info->dump_provider->PollFastMemoryTotal(&value);
+    polled_mem_bytes += value;
+  }
+  if (config_.enable_verbose_poll_tracing) {
+    TRACE_COUNTER1(MemoryDumpManager::kTraceCategory, "PolledMemoryMB",
+                   polled_mem_bytes / 1024 / 1024);
+  }
+
+  // Peak detection logic. Design doc: https://goo.gl/0kOU4A .
+  bool is_peak = false;
+  if (skip_polls_ > 0) {
+    skip_polls_--;
+  } else if (last_dump_memory_total_ == 0) {
+    last_dump_memory_total_ = polled_mem_bytes;
+  } else if (polled_mem_bytes > 0) {
+    int64_t diff_from_last_dump = polled_mem_bytes - last_dump_memory_total_;
+
+    DCHECK_GT(static_threshold_bytes_, 0u);
+    is_peak =
+        diff_from_last_dump > static_cast<int64_t>(static_threshold_bytes_);
+
+    if (!is_peak)
+      is_peak = DetectPeakUsingSlidingWindowStddev(polled_mem_bytes);
+  }
+
+  DCHECK_GT(config_.polling_interval_ms, 0u);
+  SequencedTaskRunnerHandle::Get()->PostDelayedTask(
+      FROM_HERE,
+      BindOnce(&MemoryPeakDetector::PollMemoryAndDetectPeak, Unretained(this),
+               expected_generation),
+      TimeDelta::FromMilliseconds(config_.polling_interval_ms));
+
+  if (!is_peak)
+    return;
+  TRACE_EVENT_INSTANT1(MemoryDumpManager::kTraceCategory,
+                       "Peak memory detected", TRACE_EVENT_SCOPE_PROCESS,
+                       "PolledMemoryMB", polled_mem_bytes / 1024 / 1024);
+  ResetPollHistory(true /* keep_last_sample */);
+  last_dump_memory_total_ = polled_mem_bytes;
+  on_peak_detected_callback_.Run();
+}
+
+bool MemoryPeakDetector::DetectPeakUsingSlidingWindowStddev(
+    uint64_t polled_mem_bytes) {
+  DCHECK(polled_mem_bytes);
+  samples_bytes_[samples_index_] = polled_mem_bytes;
+  samples_index_ = (samples_index_ + 1) % kSlidingWindowNumSamples;
+  float mean = 0;
+  for (uint32_t i = 0; i < kSlidingWindowNumSamples; ++i) {
+    if (samples_bytes_[i] == 0)
+      return false;  // Not enough samples to detect peaks.
+    mean += samples_bytes_[i];
+  }
+  mean /= kSlidingWindowNumSamples;
+  float variance = 0;
+  for (uint32_t i = 0; i < kSlidingWindowNumSamples; ++i) {
+    const float deviation = samples_bytes_[i] - mean;
+    variance += deviation * deviation;
+  }
+  variance /= kSlidingWindowNumSamples;
+
+  // If stddev is less than 0.2% then we consider that the process is inactive.
+  if (variance < (mean / 500) * (mean / 500))
+    return false;
+
+  // (mean + 3.69 * stddev) corresponds to a value that is higher than current
+  // sample with 99.99% probability.
+  const float cur_sample_deviation = polled_mem_bytes - mean;
+  return cur_sample_deviation * cur_sample_deviation > (3.69 * 3.69 * variance);
+}
+
+void MemoryPeakDetector::ResetPollHistory(bool keep_last_sample) {
+  // TODO(primiano,ssid): this logic should probably be revisited. In the case
+  // of Android, the browser process sees the total of all processes memory in
+  // the same peak detector instance. Perhaps the best thing to do here is to
+  // keep the window of samples around and just bump the skip_polls_.
+  last_dump_memory_total_ = 0;
+  if (keep_last_sample) {
+    const uint32_t prev_index =
+        samples_index_ > 0 ? samples_index_ - 1 : kSlidingWindowNumSamples - 1;
+    last_dump_memory_total_ = samples_bytes_[prev_index];
+  }
+  memset(samples_bytes_, 0, sizeof(samples_bytes_));
+  samples_index_ = 0;
+  skip_polls_ = 0;
+  if (config_.polling_interval_ms > 0) {
+    skip_polls_ =
+        (config_.min_time_between_peaks_ms + config_.polling_interval_ms - 1) /
+        config_.polling_interval_ms;
+  }
+}
+
+void MemoryPeakDetector::SetStaticThresholdForTesting(
+    uint64_t static_threshold_bytes) {
+  DCHECK_EQ(DISABLED, state_);
+  static_threshold_bytes_ = static_threshold_bytes;
+}
+
+MemoryPeakDetector::MemoryPeakDetector::Config::Config()
+    : Config(0, 0, false) {}
+
+MemoryPeakDetector::MemoryPeakDetector::Config::Config(
+    uint32_t polling_interval_ms,
+    uint32_t min_time_between_peaks_ms,
+    bool enable_verbose_poll_tracing)
+    : polling_interval_ms(polling_interval_ms),
+      min_time_between_peaks_ms(min_time_between_peaks_ms),
+      enable_verbose_poll_tracing(enable_verbose_poll_tracing) {}
+
+}  // namespace trace_event
+}  // namespace base
diff --git a/base/trace_event/memory_peak_detector.h b/base/trace_event/memory_peak_detector.h
new file mode 100644
index 0000000..bbe205b
--- /dev/null
+++ b/base/trace_event/memory_peak_detector.h
@@ -0,0 +1,184 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TRACE_EVENT_MEMORY_PEAK_DETECTOR_H_
+#define BASE_TRACE_EVENT_MEMORY_PEAK_DETECTOR_H_
+
+#include <stdint.h>
+
+#include <memory>
+#include <vector>
+
+#include "base/base_export.h"
+#include "base/callback.h"
+#include "base/macros.h"
+#include "base/memory/ref_counted.h"
+
+namespace base {
+
+class SequencedTaskRunner;
+
+namespace trace_event {
+
+struct MemoryDumpProviderInfo;
+
+// Detects temporally local memory peaks. Peak detection is based on
+// continuously querying memory usage using MemoryDumpprovider(s) that support
+// fast polling (e.g., ProcessMetricsDumpProvider which under the hoods reads
+// /proc/PID/statm on Linux) and using a combination of:
+// - An static threshold (currently 1% of total system memory).
+// - Sliding window stddev analysis.
+// Design doc: https://goo.gl/0kOU4A .
+// This class is NOT thread-safe, the caller has to ensure linearization of
+// the calls to the public methods. In any case, the public methods do NOT have
+// to be called from the |task_runner| on which the polling tasks run.
+class BASE_EXPORT MemoryPeakDetector {
+ public:
+  using OnPeakDetectedCallback = RepeatingClosure;
+  using DumpProvidersList = std::vector<scoped_refptr<MemoryDumpProviderInfo>>;
+  using GetDumpProvidersFunction = RepeatingCallback<void(DumpProvidersList*)>;
+
+  enum State {
+    NOT_INITIALIZED = 0,  // Before Setup()
+    DISABLED,             // Before Start() or after Stop().
+    ENABLED,              // After Start() but no dump_providers_ are available.
+    RUNNING  // After Start(). The PollMemoryAndDetectPeak() task is scheduled.
+  };
+
+  // Peak detector configuration, passed to Start().
+  struct BASE_EXPORT Config {
+    Config();
+    Config(uint32_t polling_interval_ms,
+           uint32_t min_time_between_peaks_ms,
+           bool enable_verbose_poll_tracing);
+
+    // The rate at which memory will be polled. Polls will happen on the task
+    // runner passed to Setup().
+    uint32_t polling_interval_ms;
+
+    // Two consecutive peak detection callbacks will happen at least
+    // |min_time_between_peaks_ms| apart from each other.
+    uint32_t min_time_between_peaks_ms;
+
+    // When enabled causes a TRACE_COUNTER event to be injected in the trace
+    // for each poll (if tracing is enabled).
+    bool enable_verbose_poll_tracing;
+  };
+
+  static MemoryPeakDetector* GetInstance();
+
+  // Configures the peak detector, binding the polling tasks on the given
+  // thread. Setup() can be called several times, provided that: (1) Stop()
+  // is called; (2a) the previous task_runner is flushed or (2b) the task_runner
+  // remains the same.
+  // GetDumpProvidersFunction: is the function that will be invoked to get
+  //   an updated list of polling-capable dump providers. This is really just
+  //   MemoryDumpManager::GetDumpProvidersForPolling, but this extra level of
+  //   indirection allows easier testing.
+  // SequencedTaskRunner: the task runner where PollMemoryAndDetectPeak() will
+  //  be periodically called.
+  // OnPeakDetectedCallback: a callback that will be invoked on the
+  //   given task runner when a memory peak is detected.
+  void Setup(const GetDumpProvidersFunction&,
+             const scoped_refptr<SequencedTaskRunner>&,
+             const OnPeakDetectedCallback&);
+
+  // Releases the |task_runner_| and the bound callbacks.
+  void TearDown();
+
+  // This posts a task onto the passed task runner which refreshes the list of
+  // dump providers via the GetDumpProvidersFunction. If at least one dump
+  // provider is available, this starts immediately polling on the task runner.
+  // If not, the detector remains in the ENABLED state and will start polling
+  // automatically (i.e. without requiring another call to Start()) on the
+  // next call to NotifyMemoryDumpProvidersChanged().
+  void Start(Config);
+
+  // Stops the polling on the task runner (if was active at all). This doesn't
+  // wait for the task runner to drain pending tasks, so it is possible that
+  // a polling will happen concurrently (or in the immediate future) with the
+  // Stop() call. It is responsibility of the caller to drain or synchronize
+  // with the task runner.
+  void Stop();
+
+  // If Start()-ed, prevents that a peak callback is triggered before the next
+  // |min_time_between_peaks_ms|. No-op if the peak detector is not enabled.
+  void Throttle();
+
+  // Used by MemoryDumpManager to notify that the list of polling-capable dump
+  // providers has changed. The peak detector will reload the list on the next
+  // polling task. This function can be called before Setup(), in which
+  // case will be just a no-op.
+  void NotifyMemoryDumpProvidersChanged();
+
+  void SetStaticThresholdForTesting(uint64_t static_threshold_bytes);
+
+ private:
+  friend class MemoryPeakDetectorTest;
+
+  static constexpr uint32_t kSlidingWindowNumSamples = 50;
+
+  MemoryPeakDetector();
+  ~MemoryPeakDetector();
+
+  // All these methods are always called on the |task_runner_|.
+  void StartInternal(Config);
+  void StopInternal();
+  void TearDownInternal();
+  void ReloadDumpProvidersAndStartPollingIfNeeded();
+  void PollMemoryAndDetectPeak(uint32_t expected_generation);
+  bool DetectPeakUsingSlidingWindowStddev(uint64_t last_sample_bytes);
+  void ResetPollHistory(bool keep_last_sample = false);
+
+  // It is safe to call these testing methods only on the |task_runner_|.
+  State state_for_testing() const { return state_; }
+  uint32_t poll_tasks_count_for_testing() const {
+    return poll_tasks_count_for_testing_;
+  }
+
+  // The task runner where all the internal calls are posted onto. This field
+  // must be NOT be accessed by the tasks posted on the |task_runner_| because
+  // there might still be outstanding tasks on the |task_runner_| while this
+  // refptr is reset. This can only be safely accessed by the public methods
+  // above, which the client of this class is supposed to call sequentially.
+  scoped_refptr<SequencedTaskRunner> task_runner_;
+
+  // After the Setup() call, the fields below, must be accessed only from
+  // the |task_runner_|.
+
+  // Bound function to get an updated list of polling-capable dump providers.
+  GetDumpProvidersFunction get_dump_providers_function_;
+
+  // The callback to invoke when peaks are detected.
+  OnPeakDetectedCallback on_peak_detected_callback_;
+
+  // List of polling-aware dump providers to invoke upon each poll.
+  DumpProvidersList dump_providers_;
+
+  // The generation is incremented every time the |state_| is changed and causes
+  // PollMemoryAndDetectPeak() to early out if the posted task doesn't match the
+  // most recent |generation_|. This allows to drop on the floor outstanding
+  // PostDelayedTask that refer to an old sequence that was later Stop()-ed or
+  // disabled because of NotifyMemoryDumpProvidersChanged().
+  uint32_t generation_;
+
+  State state_;
+
+  // Config passed to Start(), only valid when |state_| = {ENABLED, RUNNING}.
+  Config config_;
+
+  uint64_t static_threshold_bytes_;
+  uint32_t skip_polls_;
+  uint64_t last_dump_memory_total_;
+  uint64_t samples_bytes_[kSlidingWindowNumSamples];
+  uint32_t samples_index_;
+  uint32_t poll_tasks_count_for_testing_;
+
+  DISALLOW_COPY_AND_ASSIGN(MemoryPeakDetector);
+};
+
+}  // namespace trace_event
+}  // namespace base
+
+#endif  // BASE_TRACE_EVENT_MEMORY_PEAK_DETECTOR_H_
diff --git a/base/trace_event/memory_peak_detector_unittest.cc b/base/trace_event/memory_peak_detector_unittest.cc
new file mode 100644
index 0000000..bc10c80
--- /dev/null
+++ b/base/trace_event/memory_peak_detector_unittest.cc
@@ -0,0 +1,564 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/trace_event/memory_peak_detector.h"
+
+#include <memory>
+
+#include "base/bind.h"
+#include "base/logging.h"
+#include "base/run_loop.h"
+#include "base/synchronization/waitable_event.h"
+#include "base/threading/platform_thread.h"
+#include "base/threading/thread.h"
+#include "base/threading/thread_task_runner_handle.h"
+#include "base/trace_event/memory_dump_provider.h"
+#include "base/trace_event/memory_dump_provider_info.h"
+#include "testing/gmock/include/gmock/gmock.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+using ::testing::_;
+using ::testing::Invoke;
+using ::testing::Return;
+
+namespace base {
+namespace trace_event {
+
+namespace {
+
+const TimeDelta kMs = TimeDelta::FromMilliseconds(1);
+const MemoryPeakDetector::Config kConfigNoCallbacks(
+    1 /* polling_interval_ms */,
+    60000 /* min_time_between_peaks_ms */,
+    false /* enable_verbose_poll_tracing */
+    );
+
+class MockMemoryDumpProvider : public MemoryDumpProvider {
+ public:
+  bool OnMemoryDump(const MemoryDumpArgs&, ProcessMemoryDump*) override {
+    NOTREACHED();
+    return true;
+  }
+
+  MOCK_METHOD1(PollFastMemoryTotal, void(uint64_t*));
+};
+
+// Wrapper to use gmock on a callback.
+struct OnPeakDetectedWrapper {
+  MOCK_METHOD0(OnPeak, void());
+};
+
+}  // namespace
+
+class MemoryPeakDetectorTest : public testing::Test {
+ public:
+  struct FriendDeleter {
+    void operator()(MemoryPeakDetector* inst) { delete inst; }
+  };
+
+  MemoryPeakDetectorTest() : testing::Test() {}
+  static const uint64_t kSlidingWindowNumSamples =
+      MemoryPeakDetector::kSlidingWindowNumSamples;
+
+  std::unique_ptr<MemoryPeakDetector, FriendDeleter> NewInstance() {
+    return std::unique_ptr<MemoryPeakDetector, FriendDeleter>(
+        new MemoryPeakDetector());
+  }
+
+  void RestartThreadAndReinitializePeakDetector() {
+    bg_thread_.reset(new Thread("Peak Detector Test Thread"));
+    bg_thread_->Start();
+    peak_detector_ = NewInstance();
+    peak_detector_->Setup(
+        Bind(&MemoryPeakDetectorTest::MockGetDumpProviders, Unretained(this)),
+        bg_thread_->task_runner(),
+        Bind(&OnPeakDetectedWrapper::OnPeak, Unretained(&on_peak_callback_)));
+  }
+
+  void SetUp() override {
+    get_mdp_call_count_ = 0;
+    RestartThreadAndReinitializePeakDetector();
+  }
+
+  void TearDown() override {
+    peak_detector_->TearDown();
+    bg_thread_->FlushForTesting();
+    EXPECT_EQ(MemoryPeakDetector::NOT_INITIALIZED, GetPeakDetectorState());
+    bg_thread_.reset();
+    dump_providers_.clear();
+  }
+
+  // Calls MemoryPeakDetector::state_for_testing() on the bg thread and returns
+  // the result on the current thread.
+  MemoryPeakDetector::State GetPeakDetectorState() {
+    WaitableEvent evt(WaitableEvent::ResetPolicy::MANUAL,
+                      WaitableEvent::InitialState::NOT_SIGNALED);
+    MemoryPeakDetector::State res = MemoryPeakDetector::NOT_INITIALIZED;
+    auto get_fn = [](MemoryPeakDetector* peak_detector, WaitableEvent* evt,
+                     MemoryPeakDetector::State* res) {
+      *res = peak_detector->state_for_testing();
+      evt->Signal();
+    };
+    bg_thread_->task_runner()->PostTask(
+        FROM_HERE, BindOnce(get_fn, Unretained(&*peak_detector_),
+                            Unretained(&evt), Unretained(&res)));
+    evt.Wait();
+    return res;
+  }
+
+  // Calls MemoryPeakDetector::poll_tasks_count_for_testing() on the bg thread
+  // and returns the result on the current thread.
+  uint32_t GetNumPollingTasksRan() {
+    uint32_t res = 0;
+    auto get_fn = [](MemoryPeakDetector* peak_detector, WaitableEvent* evt,
+                     uint32_t* res) {
+      *res = peak_detector->poll_tasks_count_for_testing();
+      evt->Signal();
+    };
+
+    WaitableEvent evt(WaitableEvent::ResetPolicy::MANUAL,
+                      WaitableEvent::InitialState::NOT_SIGNALED);
+    bg_thread_->task_runner()->PostTask(
+        FROM_HERE, BindOnce(get_fn, Unretained(&*peak_detector_),
+                            Unretained(&evt), Unretained(&res)));
+    evt.Wait();
+    return res;
+  }
+
+  // Runs the peak detector with a mock MDP with the given
+  // |config|. The mock MDP will invoke the |poll_function| on any call to
+  // PollFastMemoryTotal(), until |num_samples| have been polled.
+  // It returns the number of peaks detected.
+  uint32_t RunWithCustomPollFunction(
+      MemoryPeakDetector::Config config,
+      uint32_t num_samples,
+      RepeatingCallback<uint64_t(uint32_t)> poll_function) {
+    WaitableEvent evt(WaitableEvent::ResetPolicy::MANUAL,
+                      WaitableEvent::InitialState::NOT_SIGNALED);
+    scoped_refptr<MemoryDumpProviderInfo> mdp = CreateMockDumpProvider();
+    dump_providers_.push_back(mdp);
+    uint32_t cur_sample_idx = 0;
+    EXPECT_CALL(GetMockMDP(mdp), PollFastMemoryTotal(_))
+        .WillRepeatedly(Invoke(
+            [&cur_sample_idx, &evt, poll_function, num_samples](uint64_t* mem) {
+              if (cur_sample_idx >= num_samples) {
+                *mem = 1;
+                evt.Signal();
+              } else {
+                *mem = poll_function.Run(cur_sample_idx++);
+              }
+            }));
+
+    uint32_t num_peaks = 0;
+    EXPECT_CALL(on_peak_callback_, OnPeak())
+        .WillRepeatedly(Invoke([&num_peaks] { num_peaks++; }));
+    peak_detector_->Start(config);
+    evt.Wait();  // Wait for |num_samples| invocations of PollFastMemoryTotal().
+    peak_detector_->Stop();
+    EXPECT_EQ(num_samples, cur_sample_idx);
+    EXPECT_EQ(MemoryPeakDetector::DISABLED, GetPeakDetectorState());
+    return num_peaks;
+  }
+
+  // Called on the |bg_thread_|.
+  void MockGetDumpProviders(MemoryPeakDetector::DumpProvidersList* mdps) {
+    get_mdp_call_count_++;
+    *mdps = dump_providers_;
+  }
+
+  uint32_t GetNumGetDumpProvidersCalls() {
+    bg_thread_->FlushForTesting();
+    return get_mdp_call_count_;
+  }
+
+  scoped_refptr<MemoryDumpProviderInfo> CreateMockDumpProvider() {
+    std::unique_ptr<MockMemoryDumpProvider> mdp(new MockMemoryDumpProvider());
+    MemoryDumpProvider::Options opt;
+    opt.is_fast_polling_supported = true;
+    scoped_refptr<MemoryDumpProviderInfo> mdp_info(new MemoryDumpProviderInfo(
+        mdp.get(), "Mock MDP", nullptr, opt,
+        false /* whitelisted_for_background_mode */));
+
+    // The |mdp| instance will be destroyed together with the |mdp_info|.
+    mdp_info->owned_dump_provider = std::move(mdp);
+    return mdp_info;
+  }
+
+  static MockMemoryDumpProvider& GetMockMDP(
+      const scoped_refptr<MemoryDumpProviderInfo>& mdp_info) {
+    return *static_cast<MockMemoryDumpProvider*>(mdp_info->dump_provider);
+  }
+
+  static uint64_t PollFunctionThatCausesPeakViaStdDev(uint32_t sample_idx) {
+    // Start with a baseline of 50 MB.
+    if (sample_idx < kSlidingWindowNumSamples)
+      return 50000 + (sample_idx % 3) * 100;
+
+    // Then 10 samples around 80 MB
+    if (sample_idx < 10 + kSlidingWindowNumSamples)
+      return 80000 + (sample_idx % 3) * 200;
+
+    // Than back to 60 MB.
+    if (sample_idx < 2 * kSlidingWindowNumSamples)
+      return 60000 + (sample_idx % 3) * 100;
+
+    // Then 20 samples around 120 MB.
+    if (sample_idx < 20 + 2 * kSlidingWindowNumSamples)
+      return 120000 + (sample_idx % 3) * 200;
+
+    // Then back to idle to around 50 MB until the end.
+    return 50000 + (sample_idx % 3) * 100;
+  }
+
+ protected:
+  MemoryPeakDetector::DumpProvidersList dump_providers_;
+  uint32_t get_mdp_call_count_;
+  std::unique_ptr<MemoryPeakDetector, FriendDeleter> peak_detector_;
+  std::unique_ptr<Thread> bg_thread_;
+  OnPeakDetectedWrapper on_peak_callback_;
+};
+
+const uint64_t MemoryPeakDetectorTest::kSlidingWindowNumSamples;
+
+TEST_F(MemoryPeakDetectorTest, GetDumpProvidersFunctionCalled) {
+  EXPECT_EQ(MemoryPeakDetector::DISABLED, GetPeakDetectorState());
+  peak_detector_->Start(kConfigNoCallbacks);
+  EXPECT_EQ(1u, GetNumGetDumpProvidersCalls());
+  EXPECT_EQ(MemoryPeakDetector::ENABLED, GetPeakDetectorState());
+
+  peak_detector_->Stop();
+  EXPECT_EQ(MemoryPeakDetector::DISABLED, GetPeakDetectorState());
+  EXPECT_EQ(0u, GetNumPollingTasksRan());
+}
+
+TEST_F(MemoryPeakDetectorTest, ThrottleAndNotifyBeforeInitialize) {
+  peak_detector_->TearDown();
+
+  WaitableEvent evt(WaitableEvent::ResetPolicy::MANUAL,
+                    WaitableEvent::InitialState::NOT_SIGNALED);
+  scoped_refptr<MemoryDumpProviderInfo> mdp = CreateMockDumpProvider();
+  EXPECT_CALL(GetMockMDP(mdp), PollFastMemoryTotal(_))
+      .WillRepeatedly(Invoke([&evt](uint64_t*) { evt.Signal(); }));
+  dump_providers_.push_back(mdp);
+  peak_detector_->Throttle();
+  peak_detector_->NotifyMemoryDumpProvidersChanged();
+  EXPECT_EQ(MemoryPeakDetector::NOT_INITIALIZED, GetPeakDetectorState());
+  RestartThreadAndReinitializePeakDetector();
+
+  peak_detector_->Start(kConfigNoCallbacks);
+  EXPECT_EQ(MemoryPeakDetector::RUNNING, GetPeakDetectorState());
+  evt.Wait();  // Wait for a PollFastMemoryTotal() call.
+
+  peak_detector_->Stop();
+  EXPECT_EQ(MemoryPeakDetector::DISABLED, GetPeakDetectorState());
+  EXPECT_EQ(1u, GetNumGetDumpProvidersCalls());
+  EXPECT_GE(GetNumPollingTasksRan(), 1u);
+}
+
+TEST_F(MemoryPeakDetectorTest, DoubleStop) {
+  peak_detector_->Start(kConfigNoCallbacks);
+  EXPECT_EQ(MemoryPeakDetector::ENABLED, GetPeakDetectorState());
+
+  peak_detector_->Stop();
+  EXPECT_EQ(MemoryPeakDetector::DISABLED, GetPeakDetectorState());
+
+  peak_detector_->Stop();
+  EXPECT_EQ(MemoryPeakDetector::DISABLED, GetPeakDetectorState());
+
+  EXPECT_EQ(1u, GetNumGetDumpProvidersCalls());
+  EXPECT_EQ(0u, GetNumPollingTasksRan());
+}
+
+TEST_F(MemoryPeakDetectorTest, OneDumpProviderRegisteredBeforeStart) {
+  WaitableEvent evt(WaitableEvent::ResetPolicy::MANUAL,
+                    WaitableEvent::InitialState::NOT_SIGNALED);
+  scoped_refptr<MemoryDumpProviderInfo> mdp = CreateMockDumpProvider();
+  EXPECT_CALL(GetMockMDP(mdp), PollFastMemoryTotal(_))
+      .WillRepeatedly(Invoke([&evt](uint64_t*) { evt.Signal(); }));
+  dump_providers_.push_back(mdp);
+
+  peak_detector_->Start(kConfigNoCallbacks);
+  evt.Wait();  // Signaled when PollFastMemoryTotal() is called on the MockMDP.
+  EXPECT_EQ(MemoryPeakDetector::RUNNING, GetPeakDetectorState());
+
+  peak_detector_->Stop();
+  EXPECT_EQ(MemoryPeakDetector::DISABLED, GetPeakDetectorState());
+  EXPECT_EQ(1u, GetNumGetDumpProvidersCalls());
+  EXPECT_GT(GetNumPollingTasksRan(), 0u);
+}
+
+TEST_F(MemoryPeakDetectorTest, ReInitializeAndRebindToNewThread) {
+  WaitableEvent evt(WaitableEvent::ResetPolicy::MANUAL,
+                    WaitableEvent::InitialState::NOT_SIGNALED);
+  scoped_refptr<MemoryDumpProviderInfo> mdp = CreateMockDumpProvider();
+  EXPECT_CALL(GetMockMDP(mdp), PollFastMemoryTotal(_))
+      .WillRepeatedly(Invoke([&evt](uint64_t*) { evt.Signal(); }));
+  dump_providers_.push_back(mdp);
+
+  for (int i = 0; i < 5; ++i) {
+    evt.Reset();
+    peak_detector_->Start(kConfigNoCallbacks);
+    evt.Wait();  // Wait for a PollFastMemoryTotal() call.
+    // Check that calling TearDown implicitly does a Stop().
+    peak_detector_->TearDown();
+
+    // Reinitialize and re-bind to a new task runner.
+    RestartThreadAndReinitializePeakDetector();
+  }
+}
+
+TEST_F(MemoryPeakDetectorTest, OneDumpProviderRegisteredOutOfBand) {
+  peak_detector_->Start(kConfigNoCallbacks);
+  EXPECT_EQ(MemoryPeakDetector::ENABLED, GetPeakDetectorState());
+  EXPECT_EQ(1u, GetNumGetDumpProvidersCalls());
+
+  // Check that no poll tasks are posted before any dump provider is registered.
+  PlatformThread::Sleep(5 * kConfigNoCallbacks.polling_interval_ms * kMs);
+  EXPECT_EQ(0u, GetNumPollingTasksRan());
+
+  // Registed the MDP After Start() has been issued and expect that the
+  // PeakDetector transitions ENABLED -> RUNNING on the next
+  // NotifyMemoryDumpProvidersChanged() call.
+  WaitableEvent evt(WaitableEvent::ResetPolicy::MANUAL,
+                    WaitableEvent::InitialState::NOT_SIGNALED);
+  scoped_refptr<MemoryDumpProviderInfo> mdp = CreateMockDumpProvider();
+  EXPECT_CALL(GetMockMDP(mdp), PollFastMemoryTotal(_))
+      .WillRepeatedly(Invoke([&evt](uint64_t*) { evt.Signal(); }));
+  dump_providers_.push_back(mdp);
+  peak_detector_->NotifyMemoryDumpProvidersChanged();
+
+  evt.Wait();  // Signaled when PollFastMemoryTotal() is called on the MockMDP.
+  EXPECT_EQ(MemoryPeakDetector::RUNNING, GetPeakDetectorState());
+  EXPECT_EQ(2u, GetNumGetDumpProvidersCalls());
+
+  // Now simulate the unregisration and expect that the PeakDetector transitions
+  // back to ENABLED.
+  dump_providers_.clear();
+  peak_detector_->NotifyMemoryDumpProvidersChanged();
+  EXPECT_EQ(MemoryPeakDetector::ENABLED, GetPeakDetectorState());
+  EXPECT_EQ(3u, GetNumGetDumpProvidersCalls());
+  uint32_t num_poll_tasks = GetNumPollingTasksRan();
+  EXPECT_GT(num_poll_tasks, 0u);
+
+  // At this point, no more polling tasks should be posted.
+  PlatformThread::Sleep(5 * kConfigNoCallbacks.polling_interval_ms * kMs);
+  peak_detector_->Stop();
+  EXPECT_EQ(MemoryPeakDetector::DISABLED, GetPeakDetectorState());
+  EXPECT_EQ(num_poll_tasks, GetNumPollingTasksRan());
+}
+
+// Test that a sequence of Start()/Stop() back-to-back doesn't end up creating
+// several outstanding timer tasks and instead respects the polling_interval_ms.
+TEST_F(MemoryPeakDetectorTest, StartStopQuickly) {
+  WaitableEvent evt(WaitableEvent::ResetPolicy::MANUAL,
+                    WaitableEvent::InitialState::NOT_SIGNALED);
+  scoped_refptr<MemoryDumpProviderInfo> mdp = CreateMockDumpProvider();
+  dump_providers_.push_back(mdp);
+  const uint32_t kNumPolls = 20;
+  uint32_t polls_done = 0;
+  EXPECT_CALL(GetMockMDP(mdp), PollFastMemoryTotal(_))
+      .WillRepeatedly(Invoke([&polls_done, &evt, kNumPolls](uint64_t*) {
+        if (++polls_done == kNumPolls)
+          evt.Signal();
+      }));
+
+  const TimeTicks tstart = TimeTicks::Now();
+  for (int i = 0; i < 5; i++) {
+    peak_detector_->Start(kConfigNoCallbacks);
+    peak_detector_->Stop();
+  }
+
+  bg_thread_->task_runner()->PostTask(
+      FROM_HERE, base::BindOnce([](uint32_t* polls_done) { *polls_done = 0; },
+                                &polls_done));
+
+  peak_detector_->Start(kConfigNoCallbacks);
+  EXPECT_EQ(MemoryPeakDetector::RUNNING, GetPeakDetectorState());
+  evt.Wait();  // Wait for kNumPolls.
+  const double time_ms = (TimeTicks::Now() - tstart).InMillisecondsF();
+
+  EXPECT_GE(time_ms, (kNumPolls - 1) * kConfigNoCallbacks.polling_interval_ms);
+  peak_detector_->Stop();
+}
+
+TEST_F(MemoryPeakDetectorTest, RegisterAndUnregisterTwoDumpProviders) {
+  WaitableEvent evt1(WaitableEvent::ResetPolicy::MANUAL,
+                     WaitableEvent::InitialState::NOT_SIGNALED);
+  WaitableEvent evt2(WaitableEvent::ResetPolicy::MANUAL,
+                     WaitableEvent::InitialState::NOT_SIGNALED);
+  scoped_refptr<MemoryDumpProviderInfo> mdp1 = CreateMockDumpProvider();
+  scoped_refptr<MemoryDumpProviderInfo> mdp2 = CreateMockDumpProvider();
+  EXPECT_CALL(GetMockMDP(mdp1), PollFastMemoryTotal(_))
+      .WillRepeatedly(Invoke([&evt1](uint64_t*) { evt1.Signal(); }));
+  EXPECT_CALL(GetMockMDP(mdp2), PollFastMemoryTotal(_))
+      .WillRepeatedly(Invoke([&evt2](uint64_t*) { evt2.Signal(); }));
+
+  // Register only one MDP and start the detector.
+  dump_providers_.push_back(mdp1);
+  peak_detector_->Start(kConfigNoCallbacks);
+  EXPECT_EQ(MemoryPeakDetector::RUNNING, GetPeakDetectorState());
+
+  // Wait for one poll task and then register also the other one.
+  evt1.Wait();
+  dump_providers_.push_back(mdp2);
+  peak_detector_->NotifyMemoryDumpProvidersChanged();
+  evt2.Wait();
+  EXPECT_EQ(MemoryPeakDetector::RUNNING, GetPeakDetectorState());
+
+  // Now unregister the first MDP and check that everything is still running.
+  dump_providers_.erase(dump_providers_.begin());
+  peak_detector_->NotifyMemoryDumpProvidersChanged();
+  EXPECT_EQ(MemoryPeakDetector::RUNNING, GetPeakDetectorState());
+
+  // Now unregister both and check that the detector goes to idle.
+  dump_providers_.clear();
+  peak_detector_->NotifyMemoryDumpProvidersChanged();
+  EXPECT_EQ(MemoryPeakDetector::ENABLED, GetPeakDetectorState());
+
+  // Now re-register both and check that the detector re-activates posting
+  // new polling tasks.
+  uint32_t num_poll_tasks = GetNumPollingTasksRan();
+  evt1.Reset();
+  evt2.Reset();
+  dump_providers_.push_back(mdp1);
+  dump_providers_.push_back(mdp2);
+  peak_detector_->NotifyMemoryDumpProvidersChanged();
+  evt1.Wait();
+  evt2.Wait();
+  EXPECT_EQ(MemoryPeakDetector::RUNNING, GetPeakDetectorState());
+  EXPECT_GT(GetNumPollingTasksRan(), num_poll_tasks);
+
+  // Stop everything, tear down the MDPs, restart the detector and check that
+  // it detector doesn't accidentally try to re-access them.
+  peak_detector_->Stop();
+  EXPECT_EQ(MemoryPeakDetector::DISABLED, GetPeakDetectorState());
+  dump_providers_.clear();
+  mdp1 = nullptr;
+  mdp2 = nullptr;
+
+  num_poll_tasks = GetNumPollingTasksRan();
+  peak_detector_->Start(kConfigNoCallbacks);
+  EXPECT_EQ(MemoryPeakDetector::ENABLED, GetPeakDetectorState());
+  PlatformThread::Sleep(5 * kConfigNoCallbacks.polling_interval_ms * kMs);
+
+  peak_detector_->Stop();
+  EXPECT_EQ(MemoryPeakDetector::DISABLED, GetPeakDetectorState());
+  EXPECT_EQ(num_poll_tasks, GetNumPollingTasksRan());
+
+  EXPECT_EQ(6u, GetNumGetDumpProvidersCalls());
+}
+
+// Tests the behavior of the static threshold detector, which is supposed to
+// detect a peak whenever an increase >= threshold is detected.
+TEST_F(MemoryPeakDetectorTest, StaticThreshold) {
+  const uint32_t kNumSamples = 2 * kSlidingWindowNumSamples;
+  constexpr uint32_t kNumSamplesPerStep = 10;
+  constexpr uint64_t kThreshold = 1000000;
+  peak_detector_->SetStaticThresholdForTesting(kThreshold);
+  const MemoryPeakDetector::Config kConfig(
+      1 /* polling_interval_ms */, 0 /* min_time_between_peaks_ms */,
+      false /* enable_verbose_poll_tracing */
+      );
+
+  // The mocked PollFastMemoryTotal() will return a step function,
+  // e.g. (1, 1, 1, 5, 5, 5, ...) where the steps are 2x threshold, in order to
+  // trigger only the static threshold logic.
+  auto poll_fn = Bind(
+      [](const uint32_t kNumSamplesPerStep, const uint64_t kThreshold,
+         uint32_t sample_idx) -> uint64_t {
+        return (1 + sample_idx / kNumSamplesPerStep) * 2 * kThreshold;
+      },
+      kNumSamplesPerStep, kThreshold);
+  uint32_t num_peaks = RunWithCustomPollFunction(kConfig, kNumSamples, poll_fn);
+  EXPECT_EQ(kNumSamples / kNumSamplesPerStep - 1, num_peaks);
+}
+
+// Checks the throttling logic of Config's |min_time_between_peaks_ms|.
+TEST_F(MemoryPeakDetectorTest, PeakCallbackThrottling) {
+  const size_t kNumSamples = 2 * kSlidingWindowNumSamples;
+  constexpr uint64_t kThreshold = 1000000;
+  peak_detector_->SetStaticThresholdForTesting(kThreshold);
+  const MemoryPeakDetector::Config kConfig(
+      1 /* polling_interval_ms */, 4 /* min_time_between_peaks_ms */,
+      false /* enable_verbose_poll_tracing */
+      );
+
+  // Each mock value returned is N * 2 * threshold, so all of them would be
+  // eligible to be a peak if throttling wasn't enabled.
+  auto poll_fn = Bind(
+      [](uint64_t kThreshold, uint32_t sample_idx) -> uint64_t {
+        return (sample_idx + 1) * 2 * kThreshold;
+      },
+      kThreshold);
+  uint32_t num_peaks = RunWithCustomPollFunction(kConfig, kNumSamples, poll_fn);
+  const uint32_t kExpectedThrottlingRate =
+      kConfig.min_time_between_peaks_ms / kConfig.polling_interval_ms;
+  EXPECT_LT(num_peaks, kNumSamples / kExpectedThrottlingRate);
+}
+
+TEST_F(MemoryPeakDetectorTest, StdDev) {
+  // Set the threshold to some arbitrarily high value, so that the static
+  // threshold logic is not hit in this test.
+  constexpr uint64_t kThreshold = 1024 * 1024 * 1024;
+  peak_detector_->SetStaticThresholdForTesting(kThreshold);
+  const size_t kNumSamples = 3 * kSlidingWindowNumSamples;
+  const MemoryPeakDetector::Config kConfig(
+      1 /* polling_interval_ms */, 0 /* min_time_between_peaks_ms */,
+      false /* enable_verbose_poll_tracing */
+      );
+
+  auto poll_fn = Bind(&PollFunctionThatCausesPeakViaStdDev);
+  uint32_t num_peaks = RunWithCustomPollFunction(kConfig, kNumSamples, poll_fn);
+  EXPECT_EQ(2u, num_peaks);  // 80 MB, 120 MB.
+}
+
+// Tests that Throttle() actually holds back peak notifications.
+TEST_F(MemoryPeakDetectorTest, Throttle) {
+  constexpr uint64_t kThreshold = 1024 * 1024 * 1024;
+  const uint32_t kNumSamples = 3 * kSlidingWindowNumSamples;
+  peak_detector_->SetStaticThresholdForTesting(kThreshold);
+  const MemoryPeakDetector::Config kConfig(
+      1 /* polling_interval_ms */, 0 /* min_time_between_peaks_ms */,
+      false /* enable_verbose_poll_tracing */
+      );
+
+  auto poll_fn = Bind(
+      [](MemoryPeakDetector* peak_detector, uint32_t sample_idx) -> uint64_t {
+        if (sample_idx % 20 == 20 - 1)
+          peak_detector->Throttle();
+        return PollFunctionThatCausesPeakViaStdDev(sample_idx);
+      },
+      Unretained(&*peak_detector_));
+  uint32_t num_peaks = RunWithCustomPollFunction(kConfig, kNumSamples, poll_fn);
+  EXPECT_EQ(0u, num_peaks);
+}
+
+// Tests that the windows stddev state is not carried over through
+// Stop() -> Start() sequences.
+TEST_F(MemoryPeakDetectorTest, RestartClearsState) {
+  constexpr uint64_t kThreshold = 1024 * 1024 * 1024;
+  peak_detector_->SetStaticThresholdForTesting(kThreshold);
+  const size_t kNumSamples = 3 * kSlidingWindowNumSamples;
+  const MemoryPeakDetector::Config kConfig(
+      1 /* polling_interval_ms */, 0 /* min_time_between_peaks_ms */,
+      false /* enable_verbose_poll_tracing */
+      );
+  auto poll_fn = Bind(
+      [](MemoryPeakDetector* peak_detector,
+         const uint32_t kSlidingWindowNumSamples,
+         MemoryPeakDetector::Config kConfig, uint32_t sample_idx) -> uint64_t {
+        if (sample_idx % kSlidingWindowNumSamples ==
+            kSlidingWindowNumSamples - 1) {
+          peak_detector->Stop();
+          peak_detector->Start(kConfig);
+        }
+        return PollFunctionThatCausesPeakViaStdDev(sample_idx);
+      },
+      Unretained(&*peak_detector_), kSlidingWindowNumSamples, kConfig);
+  uint32_t num_peaks = RunWithCustomPollFunction(kConfig, kNumSamples, poll_fn);
+  EXPECT_EQ(0u, num_peaks);
+}
+
+}  // namespace trace_event
+}  // namespace base
diff --git a/base/trace_event/memory_usage_estimator.cc b/base/trace_event/memory_usage_estimator.cc
new file mode 100644
index 0000000..c769d5b
--- /dev/null
+++ b/base/trace_event/memory_usage_estimator.cc
@@ -0,0 +1,14 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/trace_event/memory_usage_estimator.h"
+
+namespace base {
+namespace trace_event {
+
+template size_t EstimateMemoryUsage(const std::string&);
+template size_t EstimateMemoryUsage(const string16&);
+
+}  // namespace trace_event
+}  // namespace base
diff --git a/base/trace_event/memory_usage_estimator.h b/base/trace_event/memory_usage_estimator.h
new file mode 100644
index 0000000..214c64a
--- /dev/null
+++ b/base/trace_event/memory_usage_estimator.h
@@ -0,0 +1,654 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TRACE_EVENT_MEMORY_USAGE_ESTIMATOR_H_
+#define BASE_TRACE_EVENT_MEMORY_USAGE_ESTIMATOR_H_
+
+#include <stdint.h>
+
+#include <array>
+#include <deque>
+#include <list>
+#include <map>
+#include <memory>
+#include <queue>
+#include <set>
+#include <stack>
+#include <string>
+#include <type_traits>
+#include <unordered_map>
+#include <unordered_set>
+#include <vector>
+
+#include "base/base_export.h"
+#include "base/containers/circular_deque.h"
+#include "base/containers/flat_map.h"
+#include "base/containers/flat_set.h"
+#include "base/containers/linked_list.h"
+#include "base/containers/mru_cache.h"
+#include "base/containers/queue.h"
+#include "base/stl_util.h"
+#include "base/strings/string16.h"
+#include "base/template_util.h"
+
+// Composable memory usage estimators.
+//
+// This file defines set of EstimateMemoryUsage(object) functions that return
+// approximate memory usage of their argument.
+//
+// The ultimate goal is to make memory usage estimation for a class simply a
+// matter of aggregating EstimateMemoryUsage() results over all fields.
+//
+// That is achieved via composability: if EstimateMemoryUsage() is defined
+// for T then EstimateMemoryUsage() is also defined for any combination of
+// containers holding T (e.g. std::map<int, std::vector<T>>).
+//
+// There are two ways of defining EstimateMemoryUsage() for a type:
+//
+// 1. As a global function 'size_t EstimateMemoryUsage(T)' in
+//    in base::trace_event namespace.
+//
+// 2. As 'size_t T::EstimateMemoryUsage() const' method. In this case
+//    EstimateMemoryUsage(T) function in base::trace_event namespace is
+//    provided automatically.
+//
+// Here is an example implementation:
+//
+// size_t foo::bar::MyClass::EstimateMemoryUsage() const {
+//   return base::trace_event::EstimateMemoryUsage(name_) +
+//          base::trace_event::EstimateMemoryUsage(id_) +
+//          base::trace_event::EstimateMemoryUsage(items_);
+// }
+//
+// The approach is simple: first call EstimateMemoryUsage() on all members,
+// then recursively fix compilation errors that are caused by types not
+// implementing EstimateMemoryUsage().
+
+namespace base {
+namespace trace_event {
+
+// Declarations
+
+// If T declares 'EstimateMemoryUsage() const' member function, then
+// global function EstimateMemoryUsage(T) is available, and just calls
+// the member function.
+template <class T>
+auto EstimateMemoryUsage(const T& object)
+    -> decltype(object.EstimateMemoryUsage());
+
+// String
+
+template <class C, class T, class A>
+size_t EstimateMemoryUsage(const std::basic_string<C, T, A>& string);
+
+// Arrays
+
+template <class T, size_t N>
+size_t EstimateMemoryUsage(const std::array<T, N>& array);
+
+template <class T, size_t N>
+size_t EstimateMemoryUsage(T (&array)[N]);
+
+template <class T>
+size_t EstimateMemoryUsage(const T* array, size_t array_length);
+
+// std::unique_ptr
+
+template <class T, class D>
+size_t EstimateMemoryUsage(const std::unique_ptr<T, D>& ptr);
+
+template <class T, class D>
+size_t EstimateMemoryUsage(const std::unique_ptr<T[], D>& array,
+                           size_t array_length);
+
+// std::shared_ptr
+
+template <class T>
+size_t EstimateMemoryUsage(const std::shared_ptr<T>& ptr);
+
+// Containers
+
+template <class F, class S>
+size_t EstimateMemoryUsage(const std::pair<F, S>& pair);
+
+template <class T, class A>
+size_t EstimateMemoryUsage(const std::vector<T, A>& vector);
+
+template <class T, class A>
+size_t EstimateMemoryUsage(const std::list<T, A>& list);
+
+template <class T>
+size_t EstimateMemoryUsage(const base::LinkedList<T>& list);
+
+template <class T, class C, class A>
+size_t EstimateMemoryUsage(const std::set<T, C, A>& set);
+
+template <class T, class C, class A>
+size_t EstimateMemoryUsage(const std::multiset<T, C, A>& set);
+
+template <class K, class V, class C, class A>
+size_t EstimateMemoryUsage(const std::map<K, V, C, A>& map);
+
+template <class K, class V, class C, class A>
+size_t EstimateMemoryUsage(const std::multimap<K, V, C, A>& map);
+
+template <class T, class H, class KE, class A>
+size_t EstimateMemoryUsage(const std::unordered_set<T, H, KE, A>& set);
+
+template <class T, class H, class KE, class A>
+size_t EstimateMemoryUsage(const std::unordered_multiset<T, H, KE, A>& set);
+
+template <class K, class V, class H, class KE, class A>
+size_t EstimateMemoryUsage(const std::unordered_map<K, V, H, KE, A>& map);
+
+template <class K, class V, class H, class KE, class A>
+size_t EstimateMemoryUsage(const std::unordered_multimap<K, V, H, KE, A>& map);
+
+template <class T, class A>
+size_t EstimateMemoryUsage(const std::deque<T, A>& deque);
+
+template <class T, class C>
+size_t EstimateMemoryUsage(const std::queue<T, C>& queue);
+
+template <class T, class C>
+size_t EstimateMemoryUsage(const std::priority_queue<T, C>& queue);
+
+template <class T, class C>
+size_t EstimateMemoryUsage(const std::stack<T, C>& stack);
+
+template <class T>
+size_t EstimateMemoryUsage(const base::circular_deque<T>& deque);
+
+template <class T, class C>
+size_t EstimateMemoryUsage(const base::flat_set<T, C>& set);
+
+template <class K, class V, class C>
+size_t EstimateMemoryUsage(const base::flat_map<K, V, C>& map);
+
+template <class Key,
+          class Payload,
+          class HashOrComp,
+          template <typename, typename, typename> class Map>
+size_t EstimateMemoryUsage(const MRUCacheBase<Key, Payload, HashOrComp, Map>&);
+
+// TODO(dskiba):
+//   std::forward_list
+
+// Definitions
+
+namespace internal {
+
+// HasEMU<T>::value is true iff EstimateMemoryUsage(T) is available.
+// (This is the default version, which is false.)
+template <class T, class X = void>
+struct HasEMU : std::false_type {};
+
+// This HasEMU specialization is only picked up if there exists function
+// EstimateMemoryUsage(const T&) that returns size_t. Simpler ways to
+// achieve this don't work on MSVC.
+template <class T>
+struct HasEMU<
+    T,
+    typename std::enable_if<std::is_same<
+        size_t,
+        decltype(EstimateMemoryUsage(std::declval<const T&>()))>::value>::type>
+    : std::true_type {};
+
+// EMUCaller<T> does three things:
+// 1. Defines Call() method that calls EstimateMemoryUsage(T) if it's
+//    available.
+// 2. If EstimateMemoryUsage(T) is not available, but T has trivial dtor
+//    (i.e. it's POD, integer, pointer, enum, etc.) then it defines Call()
+//    method that returns 0. This is useful for containers, which allocate
+//    memory regardless of T (also for cases like std::map<int, MyClass>).
+// 3. Finally, if EstimateMemoryUsage(T) is not available, then it triggers
+//    a static_assert with a helpful message. That cuts numbers of errors
+//    considerably - if you just call EstimateMemoryUsage(T) but it's not
+//    available for T, then compiler will helpfully list *all* possible
+//    variants of it, with an explanation for each.
+template <class T, class X = void>
+struct EMUCaller {
+  // std::is_same<> below makes static_assert depend on T, in order to
+  // prevent it from asserting regardless instantiation.
+  static_assert(std::is_same<T, std::false_type>::value,
+                "Neither global function 'size_t EstimateMemoryUsage(T)' "
+                "nor member function 'size_t T::EstimateMemoryUsage() const' "
+                "is defined for the type.");
+
+  static size_t Call(const T&) { return 0; }
+};
+
+template <class T>
+struct EMUCaller<T, typename std::enable_if<HasEMU<T>::value>::type> {
+  static size_t Call(const T& value) { return EstimateMemoryUsage(value); }
+};
+
+template <template <class...> class Container, class I, class = void>
+struct IsComplexIteratorForContainer : std::false_type {};
+
+template <template <class...> class Container, class I>
+struct IsComplexIteratorForContainer<
+    Container,
+    I,
+    std::enable_if_t<!std::is_pointer<I>::value &&
+                     base::internal::is_iterator<I>::value>> {
+  using value_type = typename std::iterator_traits<I>::value_type;
+  using container_type = Container<value_type>;
+
+  // We use enum instead of static constexpr bool, beause we don't have inline
+  // variables until c++17.
+  //
+  // The downside is - value is not of type bool.
+  enum : bool {
+    value =
+        std::is_same<typename container_type::iterator, I>::value ||
+        std::is_same<typename container_type::const_iterator, I>::value ||
+        std::is_same<typename container_type::reverse_iterator, I>::value ||
+        std::is_same<typename container_type::const_reverse_iterator, I>::value,
+  };
+};
+
+template <class I, template <class...> class... Containers>
+constexpr bool OneOfContainersComplexIterators() {
+  // We are forced to create a temporary variable to workaround a compilation
+  // error in msvs.
+  const bool all_tests[] = {
+      IsComplexIteratorForContainer<Containers, I>::value...};
+  for (bool test : all_tests)
+    if (test)
+      return true;
+  return false;
+}
+
+// std::array has an extra required template argument. We curry it.
+template <class T>
+using array_test_helper = std::array<T, 1>;
+
+template <class I>
+constexpr bool IsStandardContainerComplexIterator() {
+  // TODO(dyaroshev): deal with maps iterators if there is a need.
+  // It requires to parse pairs into keys and values.
+  // TODO(dyaroshev): deal with unordered containers: they do not have reverse
+  // iterators.
+  return OneOfContainersComplexIterators<
+      I, array_test_helper, std::vector, std::deque,
+      /*std::forward_list,*/ std::list, std::set, std::multiset>();
+}
+
+// Work around MSVS bug. For some reason constexpr function doesn't work.
+// However variable template does.
+template <typename T>
+constexpr bool IsKnownNonAllocatingType_v =
+    std::is_trivially_destructible<T>::value ||
+    IsStandardContainerComplexIterator<T>();
+
+template <class T>
+struct EMUCaller<
+    T,
+    std::enable_if_t<!HasEMU<T>::value && IsKnownNonAllocatingType_v<T>>> {
+  static size_t Call(const T& value) { return 0; }
+};
+
+}  // namespace internal
+
+// Proxy that deducts T and calls EMUCaller<T>.
+// To be used by EstimateMemoryUsage() implementations for containers.
+template <class T>
+size_t EstimateItemMemoryUsage(const T& value) {
+  return internal::EMUCaller<T>::Call(value);
+}
+
+template <class I>
+size_t EstimateIterableMemoryUsage(const I& iterable) {
+  size_t memory_usage = 0;
+  for (const auto& item : iterable) {
+    memory_usage += EstimateItemMemoryUsage(item);
+  }
+  return memory_usage;
+}
+
+// Global EstimateMemoryUsage(T) that just calls T::EstimateMemoryUsage().
+template <class T>
+auto EstimateMemoryUsage(const T& object)
+    -> decltype(object.EstimateMemoryUsage()) {
+  static_assert(
+      std::is_same<decltype(object.EstimateMemoryUsage()), size_t>::value,
+      "'T::EstimateMemoryUsage() const' must return size_t.");
+  return object.EstimateMemoryUsage();
+}
+
+// String
+
+template <class C, class T, class A>
+size_t EstimateMemoryUsage(const std::basic_string<C, T, A>& string) {
+  using string_type = std::basic_string<C, T, A>;
+  using value_type = typename string_type::value_type;
+  // C++11 doesn't leave much room for implementors - std::string can
+  // use short string optimization, but that's about it. We detect SSO
+  // by checking that c_str() points inside |string|.
+  const uint8_t* cstr = reinterpret_cast<const uint8_t*>(string.c_str());
+  const uint8_t* inline_cstr = reinterpret_cast<const uint8_t*>(&string);
+  if (cstr >= inline_cstr && cstr < inline_cstr + sizeof(string)) {
+    // SSO string
+    return 0;
+  }
+  return (string.capacity() + 1) * sizeof(value_type);
+}
+
+// Use explicit instantiations from the .cc file (reduces bloat).
+extern template BASE_EXPORT size_t EstimateMemoryUsage(const std::string&);
+extern template BASE_EXPORT size_t EstimateMemoryUsage(const string16&);
+
+// Arrays
+
+template <class T, size_t N>
+size_t EstimateMemoryUsage(const std::array<T, N>& array) {
+  return EstimateIterableMemoryUsage(array);
+}
+
+template <class T, size_t N>
+size_t EstimateMemoryUsage(T (&array)[N]) {
+  return EstimateIterableMemoryUsage(array);
+}
+
+template <class T>
+size_t EstimateMemoryUsage(const T* array, size_t array_length) {
+  size_t memory_usage = sizeof(T) * array_length;
+  for (size_t i = 0; i != array_length; ++i) {
+    memory_usage += EstimateItemMemoryUsage(array[i]);
+  }
+  return memory_usage;
+}
+
+// std::unique_ptr
+
+template <class T, class D>
+size_t EstimateMemoryUsage(const std::unique_ptr<T, D>& ptr) {
+  return ptr ? (sizeof(T) + EstimateItemMemoryUsage(*ptr)) : 0;
+}
+
+template <class T, class D>
+size_t EstimateMemoryUsage(const std::unique_ptr<T[], D>& array,
+                           size_t array_length) {
+  return EstimateMemoryUsage(array.get(), array_length);
+}
+
+// std::shared_ptr
+
+template <class T>
+size_t EstimateMemoryUsage(const std::shared_ptr<T>& ptr) {
+  auto use_count = ptr.use_count();
+  if (use_count == 0) {
+    return 0;
+  }
+  // Model shared_ptr after libc++,
+  // see __shared_ptr_pointer from include/memory
+  struct SharedPointer {
+    void* vtbl;
+    long shared_owners;
+    long shared_weak_owners;
+    T* value;
+  };
+  // If object of size S shared N > S times we prefer to (potentially)
+  // overestimate than to return 0.
+  return sizeof(SharedPointer) +
+         (EstimateItemMemoryUsage(*ptr) + (use_count - 1)) / use_count;
+}
+
+// std::pair
+
+template <class F, class S>
+size_t EstimateMemoryUsage(const std::pair<F, S>& pair) {
+  return EstimateItemMemoryUsage(pair.first) +
+         EstimateItemMemoryUsage(pair.second);
+}
+
+// std::vector
+
+template <class T, class A>
+size_t EstimateMemoryUsage(const std::vector<T, A>& vector) {
+  return sizeof(T) * vector.capacity() + EstimateIterableMemoryUsage(vector);
+}
+
+// std::list
+
+template <class T, class A>
+size_t EstimateMemoryUsage(const std::list<T, A>& list) {
+  using value_type = typename std::list<T, A>::value_type;
+  struct Node {
+    Node* prev;
+    Node* next;
+    value_type value;
+  };
+  return sizeof(Node) * list.size() +
+         EstimateIterableMemoryUsage(list);
+}
+
+template <class T>
+size_t EstimateMemoryUsage(const base::LinkedList<T>& list) {
+  size_t memory_usage = 0u;
+  for (base::LinkNode<T>* node = list.head(); node != list.end();
+       node = node->next()) {
+    // Since we increment by calling node = node->next() we know that node
+    // isn't nullptr.
+    memory_usage += EstimateMemoryUsage(*node->value()) + sizeof(T);
+  }
+  return memory_usage;
+}
+
+// Tree containers
+
+template <class V>
+size_t EstimateTreeMemoryUsage(size_t size) {
+  // Tree containers are modeled after libc++
+  // (__tree_node from include/__tree)
+  struct Node {
+    Node* left;
+    Node* right;
+    Node* parent;
+    bool is_black;
+    V value;
+  };
+  return sizeof(Node) * size;
+}
+
+template <class T, class C, class A>
+size_t EstimateMemoryUsage(const std::set<T, C, A>& set) {
+  using value_type = typename std::set<T, C, A>::value_type;
+  return EstimateTreeMemoryUsage<value_type>(set.size()) +
+         EstimateIterableMemoryUsage(set);
+}
+
+template <class T, class C, class A>
+size_t EstimateMemoryUsage(const std::multiset<T, C, A>& set) {
+  using value_type = typename std::multiset<T, C, A>::value_type;
+  return EstimateTreeMemoryUsage<value_type>(set.size()) +
+         EstimateIterableMemoryUsage(set);
+}
+
+template <class K, class V, class C, class A>
+size_t EstimateMemoryUsage(const std::map<K, V, C, A>& map) {
+  using value_type = typename std::map<K, V, C, A>::value_type;
+  return EstimateTreeMemoryUsage<value_type>(map.size()) +
+         EstimateIterableMemoryUsage(map);
+}
+
+template <class K, class V, class C, class A>
+size_t EstimateMemoryUsage(const std::multimap<K, V, C, A>& map) {
+  using value_type = typename std::multimap<K, V, C, A>::value_type;
+  return EstimateTreeMemoryUsage<value_type>(map.size()) +
+         EstimateIterableMemoryUsage(map);
+}
+
+// HashMap containers
+
+namespace internal {
+
+// While hashtable containers model doesn't depend on STL implementation, one
+// detail still crept in: bucket_count. It's used in size estimation, but its
+// value after inserting N items is not predictable.
+// This function is specialized by unittests to return constant value, thus
+// excluding bucket_count from testing.
+template <class V>
+size_t HashMapBucketCountForTesting(size_t bucket_count) {
+  return bucket_count;
+}
+
+template <class MruCacheType>
+size_t DoEstimateMemoryUsageForMruCache(const MruCacheType& mru_cache) {
+  return EstimateMemoryUsage(mru_cache.ordering_) +
+         EstimateMemoryUsage(mru_cache.index_);
+}
+
+}  // namespace internal
+
+template <class V>
+size_t EstimateHashMapMemoryUsage(size_t bucket_count, size_t size) {
+  // Hashtable containers are modeled after libc++
+  // (__hash_node from include/__hash_table)
+  struct Node {
+    void* next;
+    size_t hash;
+    V value;
+  };
+  using Bucket = void*;
+  bucket_count = internal::HashMapBucketCountForTesting<V>(bucket_count);
+  return sizeof(Bucket) * bucket_count + sizeof(Node) * size;
+}
+
+template <class K, class H, class KE, class A>
+size_t EstimateMemoryUsage(const std::unordered_set<K, H, KE, A>& set) {
+  using value_type = typename std::unordered_set<K, H, KE, A>::value_type;
+  return EstimateHashMapMemoryUsage<value_type>(set.bucket_count(),
+                                                set.size()) +
+         EstimateIterableMemoryUsage(set);
+}
+
+template <class K, class H, class KE, class A>
+size_t EstimateMemoryUsage(const std::unordered_multiset<K, H, KE, A>& set) {
+  using value_type = typename std::unordered_multiset<K, H, KE, A>::value_type;
+  return EstimateHashMapMemoryUsage<value_type>(set.bucket_count(),
+                                                set.size()) +
+         EstimateIterableMemoryUsage(set);
+}
+
+template <class K, class V, class H, class KE, class A>
+size_t EstimateMemoryUsage(const std::unordered_map<K, V, H, KE, A>& map) {
+  using value_type = typename std::unordered_map<K, V, H, KE, A>::value_type;
+  return EstimateHashMapMemoryUsage<value_type>(map.bucket_count(),
+                                                map.size()) +
+         EstimateIterableMemoryUsage(map);
+}
+
+template <class K, class V, class H, class KE, class A>
+size_t EstimateMemoryUsage(const std::unordered_multimap<K, V, H, KE, A>& map) {
+  using value_type =
+      typename std::unordered_multimap<K, V, H, KE, A>::value_type;
+  return EstimateHashMapMemoryUsage<value_type>(map.bucket_count(),
+                                                map.size()) +
+         EstimateIterableMemoryUsage(map);
+}
+
+// std::deque
+
+template <class T, class A>
+size_t EstimateMemoryUsage(const std::deque<T, A>& deque) {
+// Since std::deque implementations are wildly different
+// (see crbug.com/674287), we can't have one "good enough"
+// way to estimate.
+
+// kBlockSize      - minimum size of a block, in bytes
+// kMinBlockLength - number of elements in a block
+//                   if sizeof(T) > kBlockSize
+#if defined(_LIBCPP_VERSION)
+  size_t kBlockSize = 4096;
+  size_t kMinBlockLength = 16;
+#elif defined(__GLIBCXX__)
+  size_t kBlockSize = 512;
+  size_t kMinBlockLength = 1;
+#elif defined(_MSC_VER)
+  size_t kBlockSize = 16;
+  size_t kMinBlockLength = 1;
+#else
+  size_t kBlockSize = 0;
+  size_t kMinBlockLength = 1;
+#endif
+
+  size_t block_length =
+      (sizeof(T) > kBlockSize) ? kMinBlockLength : kBlockSize / sizeof(T);
+
+  size_t blocks = (deque.size() + block_length - 1) / block_length;
+
+#if defined(__GLIBCXX__)
+  // libstdc++: deque always has at least one block
+  if (!blocks)
+    blocks = 1;
+#endif
+
+#if defined(_LIBCPP_VERSION)
+  // libc++: deque keeps at most two blocks when it shrinks,
+  // so even if the size is zero, deque might be holding up
+  // to 4096 * 2 bytes. One way to know whether deque has
+  // ever allocated (and hence has 1 or 2 blocks) is to check
+  // iterator's pointer. Non-zero value means that deque has
+  // at least one block.
+  if (!blocks && deque.begin().operator->())
+    blocks = 1;
+#endif
+
+  return (blocks * block_length * sizeof(T)) +
+         EstimateIterableMemoryUsage(deque);
+}
+
+// Container adapters
+
+template <class T, class C>
+size_t EstimateMemoryUsage(const std::queue<T, C>& queue) {
+  return EstimateMemoryUsage(GetUnderlyingContainer(queue));
+}
+
+template <class T, class C>
+size_t EstimateMemoryUsage(const std::priority_queue<T, C>& queue) {
+  return EstimateMemoryUsage(GetUnderlyingContainer(queue));
+}
+
+template <class T, class C>
+size_t EstimateMemoryUsage(const std::stack<T, C>& stack) {
+  return EstimateMemoryUsage(GetUnderlyingContainer(stack));
+}
+
+// base::circular_deque
+
+template <class T>
+size_t EstimateMemoryUsage(const base::circular_deque<T>& deque) {
+  return sizeof(T) * deque.capacity() + EstimateIterableMemoryUsage(deque);
+}
+
+// Flat containers
+
+template <class T, class C>
+size_t EstimateMemoryUsage(const base::flat_set<T, C>& set) {
+  using value_type = typename base::flat_set<T, C>::value_type;
+  return sizeof(value_type) * set.capacity() + EstimateIterableMemoryUsage(set);
+}
+
+template <class K, class V, class C>
+size_t EstimateMemoryUsage(const base::flat_map<K, V, C>& map) {
+  using value_type = typename base::flat_map<K, V, C>::value_type;
+  return sizeof(value_type) * map.capacity() + EstimateIterableMemoryUsage(map);
+}
+
+template <class Key,
+          class Payload,
+          class HashOrComp,
+          template <typename, typename, typename> class Map>
+size_t EstimateMemoryUsage(
+    const MRUCacheBase<Key, Payload, HashOrComp, Map>& mru_cache) {
+  return internal::DoEstimateMemoryUsageForMruCache(mru_cache);
+}
+
+}  // namespace trace_event
+}  // namespace base
+
+#endif  // BASE_TRACE_EVENT_MEMORY_USAGE_ESTIMATOR_H_
diff --git a/base/trace_event/memory_usage_estimator_unittest.cc b/base/trace_event/memory_usage_estimator_unittest.cc
new file mode 100644
index 0000000..b525990
--- /dev/null
+++ b/base/trace_event/memory_usage_estimator_unittest.cc
@@ -0,0 +1,265 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/trace_event/memory_usage_estimator.h"
+
+#include <stdlib.h>
+
+#include "base/memory/ptr_util.h"
+#include "base/strings/string16.h"
+#include "build/build_config.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+#if defined(ARCH_CPU_64_BITS)
+#define EXPECT_EQ_32_64(_, e, a) EXPECT_EQ(e, a)
+#else
+#define EXPECT_EQ_32_64(e, _, a) EXPECT_EQ(e, a)
+#endif
+
+namespace base {
+namespace trace_event {
+
+namespace {
+
+// Test class with predictable memory usage.
+class Data {
+ public:
+  explicit Data(size_t size = 17): size_(size) {
+  }
+
+  size_t size() const { return size_; }
+
+  size_t EstimateMemoryUsage() const {
+    return size_;
+  }
+
+  bool operator < (const Data& other) const {
+    return size_ < other.size_;
+  }
+  bool operator == (const Data& other) const {
+    return size_ == other.size_;
+  }
+
+  struct Hasher {
+    size_t operator () (const Data& data) const {
+      return data.size();
+    }
+  };
+
+ private:
+  size_t size_;
+};
+
+}  // namespace
+
+namespace internal {
+
+// This kills variance of bucket_count across STL implementations.
+template <>
+size_t HashMapBucketCountForTesting<Data>(size_t) {
+  return 10;
+}
+template <>
+size_t HashMapBucketCountForTesting<std::pair<const Data, short>>(size_t) {
+  return 10;
+}
+
+}  // namespace internal
+
+TEST(EstimateMemoryUsageTest, String) {
+  std::string string(777, 'a');
+  EXPECT_EQ(string.capacity() + 1, EstimateMemoryUsage(string));
+}
+
+TEST(EstimateMemoryUsageTest, String16) {
+  string16 string(777, 'a');
+  EXPECT_EQ(sizeof(char16) * (string.capacity() + 1),
+            EstimateMemoryUsage(string));
+}
+
+TEST(EstimateMemoryUsageTest, Arrays) {
+  // std::array
+  {
+    std::array<Data, 10> array;
+    EXPECT_EQ(170u, EstimateMemoryUsage(array));
+  }
+
+  // T[N]
+  {
+    Data array[10];
+    EXPECT_EQ(170u, EstimateMemoryUsage(array));
+  }
+
+  // C array
+  {
+    struct Item {
+      char payload[10];
+    };
+    Item* array = new Item[7];
+    EXPECT_EQ(70u, EstimateMemoryUsage(array, 7));
+    delete[] array;
+  }
+}
+
+TEST(EstimateMemoryUsageTest, UniquePtr) {
+  // Empty
+  {
+    std::unique_ptr<Data> ptr;
+    EXPECT_EQ(0u, EstimateMemoryUsage(ptr));
+  }
+
+  // Not empty
+  {
+    std::unique_ptr<Data> ptr(new Data());
+    EXPECT_EQ_32_64(21u, 25u, EstimateMemoryUsage(ptr));
+  }
+
+  // With a pointer
+  {
+    std::unique_ptr<Data*> ptr(new Data*());
+    EXPECT_EQ(sizeof(void*), EstimateMemoryUsage(ptr));
+  }
+
+  // With an array
+  {
+    struct Item {
+      uint32_t payload[10];
+    };
+    std::unique_ptr<Item[]> ptr(new Item[7]);
+    EXPECT_EQ(280u, EstimateMemoryUsage(ptr, 7));
+  }
+}
+
+TEST(EstimateMemoryUsageTest, Vector) {
+  std::vector<Data> vector;
+  vector.reserve(1000);
+
+  // For an empty vector we should return memory usage of its buffer
+  size_t capacity = vector.capacity();
+  size_t expected_size = capacity * sizeof(Data);
+  EXPECT_EQ(expected_size, EstimateMemoryUsage(vector));
+
+  // If vector is not empty, its size should also include memory usages
+  // of all elements.
+  for (size_t i = 0; i != capacity / 2; ++i) {
+    vector.push_back(Data(i));
+    expected_size += EstimateMemoryUsage(vector.back());
+  }
+  EXPECT_EQ(expected_size, EstimateMemoryUsage(vector));
+}
+
+TEST(EstimateMemoryUsageTest, List) {
+  struct POD {
+    short data;
+  };
+  std::list<POD> list;
+  for (int i = 0; i != 1000; ++i) {
+    list.push_back(POD());
+  }
+  EXPECT_EQ_32_64(12000u, 24000u, EstimateMemoryUsage(list));
+}
+
+TEST(EstimateMemoryUsageTest, Set) {
+  std::set<std::pair<int, Data>> set;
+  for (int i = 0; i != 1000; ++i) {
+    set.insert({i, Data(i)});
+  }
+  EXPECT_EQ_32_64(523500u, 547500u, EstimateMemoryUsage(set));
+}
+
+TEST(EstimateMemoryUsageTest, MultiSet) {
+  std::multiset<bool> set;
+  for (int i = 0; i != 1000; ++i) {
+    set.insert((i & 1) != 0);
+  }
+  EXPECT_EQ_32_64(16000u, 32000u, EstimateMemoryUsage(set));
+}
+
+TEST(EstimateMemoryUsageTest, Map) {
+  std::map<Data, int> map;
+  for (int i = 0; i != 1000; ++i) {
+    map.insert({Data(i), i});
+  }
+  EXPECT_EQ_32_64(523500u, 547500u, EstimateMemoryUsage(map));
+}
+
+TEST(EstimateMemoryUsageTest, MultiMap) {
+  std::multimap<char, Data> map;
+  for (int i = 0; i != 1000; ++i) {
+    map.insert({static_cast<char>(i), Data(i)});
+  }
+  EXPECT_EQ_32_64(523500u, 547500u, EstimateMemoryUsage(map));
+}
+
+TEST(EstimateMemoryUsageTest, UnorderedSet) {
+  std::unordered_set<Data, Data::Hasher> set;
+  for (int i = 0; i != 1000; ++i) {
+    set.insert(Data(i));
+  }
+  EXPECT_EQ_32_64(511540u, 523580u, EstimateMemoryUsage(set));
+}
+
+TEST(EstimateMemoryUsageTest, UnorderedMultiSet) {
+  std::unordered_multiset<Data, Data::Hasher> set;
+  for (int i = 0; i != 500; ++i) {
+    set.insert(Data(i));
+    set.insert(Data(i));
+  }
+  EXPECT_EQ_32_64(261540u, 273580u, EstimateMemoryUsage(set));
+}
+
+TEST(EstimateMemoryUsageTest, UnorderedMap) {
+  std::unordered_map<Data, short, Data::Hasher> map;
+  for (int i = 0; i != 1000; ++i) {
+    map.insert({Data(i), static_cast<short>(i)});
+  }
+  EXPECT_EQ_32_64(515540u, 531580u, EstimateMemoryUsage(map));
+}
+
+TEST(EstimateMemoryUsageTest, UnorderedMultiMap) {
+  std::unordered_multimap<Data, short, Data::Hasher> map;
+  for (int i = 0; i != 1000; ++i) {
+    map.insert({Data(i), static_cast<short>(i)});
+  }
+  EXPECT_EQ_32_64(515540u, 531580u, EstimateMemoryUsage(map));
+}
+
+TEST(EstimateMemoryUsageTest, Deque) {
+  std::deque<Data> deque;
+
+  // Pick a large value so that platform-specific accounting
+  // for deque's blocks is small compared to usage of all items.
+  constexpr size_t kDataSize = 100000;
+  for (int i = 0; i != 1500; ++i) {
+    deque.push_back(Data(kDataSize));
+  }
+
+  // Compare against a reasonable minimum (i.e. no overhead).
+  size_t min_expected_usage = deque.size() * (sizeof(Data) + kDataSize);
+  EXPECT_LE(min_expected_usage, EstimateMemoryUsage(deque));
+}
+
+TEST(EstimateMemoryUsageTest, IsStandardContainerComplexIteratorTest) {
+  struct abstract {
+    virtual void method() = 0;
+  };
+
+  static_assert(
+      internal::IsStandardContainerComplexIterator<std::list<int>::iterator>(),
+      "");
+  static_assert(internal::IsStandardContainerComplexIterator<
+                    std::list<int>::const_iterator>(),
+                "");
+  static_assert(internal::IsStandardContainerComplexIterator<
+                    std::list<int>::reverse_iterator>(),
+                "");
+  static_assert(internal::IsStandardContainerComplexIterator<
+                    std::list<int>::const_reverse_iterator>(),
+                "");
+  static_assert(!internal::IsStandardContainerComplexIterator<int>(), "");
+  static_assert(!internal::IsStandardContainerComplexIterator<abstract*>(), "");
+}
+
+}  // namespace trace_event
+}  // namespace base
diff --git a/base/trace_event/process_memory_dump.cc b/base/trace_event/process_memory_dump.cc
new file mode 100644
index 0000000..7442578
--- /dev/null
+++ b/base/trace_event/process_memory_dump.cc
@@ -0,0 +1,545 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/trace_event/process_memory_dump.h"
+
+#include <errno.h>
+
+#include <vector>
+
+#include "base/memory/ptr_util.h"
+#include "base/memory/shared_memory_tracker.h"
+#include "base/process/process_metrics.h"
+#include "base/strings/stringprintf.h"
+#include "base/trace_event/heap_profiler_heap_dump_writer.h"
+#include "base/trace_event/heap_profiler_serialization_state.h"
+#include "base/trace_event/memory_infra_background_whitelist.h"
+#include "base/trace_event/trace_event_argument.h"
+#include "base/unguessable_token.h"
+#include "build/build_config.h"
+
+#if defined(OS_IOS)
+#include <mach/vm_page_size.h>
+#endif
+
+#if defined(OS_POSIX) || defined(OS_FUCHSIA)
+#include <sys/mman.h>
+#endif
+
+#if defined(OS_WIN)
+#include <windows.h>  // Must be in front of other Windows header files
+
+#include <Psapi.h>
+#endif
+
+namespace base {
+namespace trace_event {
+
+namespace {
+
+const char kEdgeTypeOwnership[] = "ownership";
+
+std::string GetSharedGlobalAllocatorDumpName(
+    const MemoryAllocatorDumpGuid& guid) {
+  return "global/" + guid.ToString();
+}
+
+#if defined(COUNT_RESIDENT_BYTES_SUPPORTED)
+size_t GetSystemPageCount(size_t mapped_size, size_t page_size) {
+  return (mapped_size + page_size - 1) / page_size;
+}
+#endif
+
+UnguessableToken GetTokenForCurrentProcess() {
+  static UnguessableToken instance = UnguessableToken::Create();
+  return instance;
+}
+
+}  // namespace
+
+// static
+bool ProcessMemoryDump::is_black_hole_non_fatal_for_testing_ = false;
+
+#if defined(COUNT_RESIDENT_BYTES_SUPPORTED)
+// static
+size_t ProcessMemoryDump::GetSystemPageSize() {
+#if defined(OS_IOS)
+  // On iOS, getpagesize() returns the user page sizes, but for allocating
+  // arrays for mincore(), kernel page sizes is needed. Use vm_kernel_page_size
+  // as recommended by Apple, https://forums.developer.apple.com/thread/47532/.
+  // Refer to http://crbug.com/542671 and Apple rdar://23651782
+  return vm_kernel_page_size;
+#else
+  return base::GetPageSize();
+#endif  // defined(OS_IOS)
+}
+
+// static
+size_t ProcessMemoryDump::CountResidentBytes(void* start_address,
+                                             size_t mapped_size) {
+  const size_t page_size = GetSystemPageSize();
+  const uintptr_t start_pointer = reinterpret_cast<uintptr_t>(start_address);
+  DCHECK_EQ(0u, start_pointer % page_size);
+
+  size_t offset = 0;
+  size_t total_resident_pages = 0;
+  bool failure = false;
+
+  // An array as large as number of pages in memory segment needs to be passed
+  // to the query function. To avoid allocating a large array, the given block
+  // of memory is split into chunks of size |kMaxChunkSize|.
+  const size_t kMaxChunkSize = 8 * 1024 * 1024;
+  size_t max_vec_size =
+      GetSystemPageCount(std::min(mapped_size, kMaxChunkSize), page_size);
+#if defined(OS_WIN)
+  std::unique_ptr<PSAPI_WORKING_SET_EX_INFORMATION[]> vec(
+      new PSAPI_WORKING_SET_EX_INFORMATION[max_vec_size]);
+#elif defined(OS_MACOSX)
+  std::unique_ptr<char[]> vec(new char[max_vec_size]);
+#elif defined(OS_POSIX) || defined(OS_FUCHSIA)
+  std::unique_ptr<unsigned char[]> vec(new unsigned char[max_vec_size]);
+#endif
+
+  while (offset < mapped_size) {
+    uintptr_t chunk_start = (start_pointer + offset);
+    const size_t chunk_size = std::min(mapped_size - offset, kMaxChunkSize);
+    const size_t page_count = GetSystemPageCount(chunk_size, page_size);
+    size_t resident_page_count = 0;
+#if defined(OS_WIN)
+    for (size_t i = 0; i < page_count; i++) {
+      vec[i].VirtualAddress =
+          reinterpret_cast<void*>(chunk_start + i * page_size);
+    }
+    DWORD vec_size = static_cast<DWORD>(
+        page_count * sizeof(PSAPI_WORKING_SET_EX_INFORMATION));
+    failure = !QueryWorkingSetEx(GetCurrentProcess(), vec.get(), vec_size);
+
+    for (size_t i = 0; i < page_count; i++)
+      resident_page_count += vec[i].VirtualAttributes.Valid;
+#elif defined(OS_FUCHSIA)
+    // TODO(fuchsia): Port, see https://crbug.com/706592.
+    ALLOW_UNUSED_LOCAL(chunk_start);
+    ALLOW_UNUSED_LOCAL(page_count);
+#elif defined(OS_MACOSX)
+    // mincore in MAC does not fail with EAGAIN.
+    failure =
+        !!mincore(reinterpret_cast<void*>(chunk_start), chunk_size, vec.get());
+    for (size_t i = 0; i < page_count; i++)
+      resident_page_count += vec[i] & MINCORE_INCORE ? 1 : 0;
+#elif defined(OS_POSIX)
+    int error_counter = 0;
+    int result = 0;
+    // HANDLE_EINTR tries for 100 times. So following the same pattern.
+    do {
+      result =
+#if defined(OS_AIX)
+          mincore(reinterpret_cast<char*>(chunk_start), chunk_size,
+                  reinterpret_cast<char*>(vec.get()));
+#else
+          mincore(reinterpret_cast<void*>(chunk_start), chunk_size, vec.get());
+#endif
+    } while (result == -1 && errno == EAGAIN && error_counter++ < 100);
+    failure = !!result;
+
+    for (size_t i = 0; i < page_count; i++)
+      resident_page_count += vec[i] & 1;
+#endif
+
+    if (failure)
+      break;
+
+    total_resident_pages += resident_page_count * page_size;
+    offset += kMaxChunkSize;
+  }
+
+  DCHECK(!failure);
+  if (failure) {
+    total_resident_pages = 0;
+    LOG(ERROR) << "CountResidentBytes failed. The resident size is invalid";
+  }
+  return total_resident_pages;
+}
+
+// static
+base::Optional<size_t> ProcessMemoryDump::CountResidentBytesInSharedMemory(
+    void* start_address,
+    size_t mapped_size) {
+#if defined(OS_MACOSX) && !defined(OS_IOS)
+  // On macOS, use mach_vm_region instead of mincore for performance
+  // (crbug.com/742042).
+  mach_vm_size_t dummy_size = 0;
+  mach_vm_address_t address =
+      reinterpret_cast<mach_vm_address_t>(start_address);
+  vm_region_top_info_data_t info;
+  MachVMRegionResult result =
+      GetTopInfo(mach_task_self(), &dummy_size, &address, &info);
+  if (result == MachVMRegionResult::Error) {
+    LOG(ERROR) << "CountResidentBytesInSharedMemory failed. The resident size "
+                  "is invalid";
+    return base::Optional<size_t>();
+  }
+
+  size_t resident_pages =
+      info.private_pages_resident + info.shared_pages_resident;
+
+  // On macOS, measurements for private memory footprint overcount by
+  // faulted pages in anonymous shared memory. To discount for this, we touch
+  // all the resident pages in anonymous shared memory here, thus making them
+  // faulted as well. This relies on two assumptions:
+  //
+  // 1) Consumers use shared memory from front to back. Thus, if there are
+  // (N) resident pages, those pages represent the first N * PAGE_SIZE bytes in
+  // the shared memory region.
+  //
+  // 2) This logic is run shortly before the logic that calculates
+  // phys_footprint, thus ensuring that the discrepancy between faulted and
+  // resident pages is minimal.
+  //
+  // The performance penalty is expected to be small.
+  //
+  // * Most of the time, we expect the pages to already be resident and faulted,
+  // thus incurring a cache penalty read hit [since we read from each resident
+  // page].
+  //
+  // * Rarely, we expect the pages to be resident but not faulted, resulting in
+  // soft faults + cache penalty.
+  //
+  // * If assumption (1) is invalid, this will potentially fault some
+  // previously non-resident pages, thus increasing memory usage, without fixing
+  // the accounting.
+  //
+  // Sanity check in case the mapped size is less than the total size of the
+  // region.
+  size_t pages_to_fault =
+      std::min(resident_pages, (mapped_size + PAGE_SIZE - 1) / PAGE_SIZE);
+
+  volatile char* base_address = static_cast<char*>(start_address);
+  for (size_t i = 0; i < pages_to_fault; ++i) {
+    // Reading from a volatile is a visible side-effect for the purposes of
+    // optimization. This guarantees that the optimizer will not kill this line.
+    base_address[i * PAGE_SIZE];
+  }
+
+  return resident_pages * PAGE_SIZE;
+#else
+  return CountResidentBytes(start_address, mapped_size);
+#endif  // defined(OS_MACOSX) && !defined(OS_IOS)
+}
+
+#endif  // defined(COUNT_RESIDENT_BYTES_SUPPORTED)
+
+ProcessMemoryDump::ProcessMemoryDump(
+    scoped_refptr<HeapProfilerSerializationState>
+        heap_profiler_serialization_state,
+    const MemoryDumpArgs& dump_args)
+    : process_token_(GetTokenForCurrentProcess()),
+      heap_profiler_serialization_state_(
+          std::move(heap_profiler_serialization_state)),
+      dump_args_(dump_args) {}
+
+ProcessMemoryDump::~ProcessMemoryDump() = default;
+ProcessMemoryDump::ProcessMemoryDump(ProcessMemoryDump&& other) = default;
+ProcessMemoryDump& ProcessMemoryDump::operator=(ProcessMemoryDump&& other) =
+    default;
+
+MemoryAllocatorDump* ProcessMemoryDump::CreateAllocatorDump(
+    const std::string& absolute_name) {
+  return AddAllocatorDumpInternal(std::make_unique<MemoryAllocatorDump>(
+      absolute_name, dump_args_.level_of_detail, GetDumpId(absolute_name)));
+}
+
+MemoryAllocatorDump* ProcessMemoryDump::CreateAllocatorDump(
+    const std::string& absolute_name,
+    const MemoryAllocatorDumpGuid& guid) {
+  return AddAllocatorDumpInternal(std::make_unique<MemoryAllocatorDump>(
+      absolute_name, dump_args_.level_of_detail, guid));
+}
+
+MemoryAllocatorDump* ProcessMemoryDump::AddAllocatorDumpInternal(
+    std::unique_ptr<MemoryAllocatorDump> mad) {
+  // In background mode return the black hole dump, if invalid dump name is
+  // given.
+  if (dump_args_.level_of_detail == MemoryDumpLevelOfDetail::BACKGROUND &&
+      !IsMemoryAllocatorDumpNameWhitelisted(mad->absolute_name())) {
+    return GetBlackHoleMad();
+  }
+
+  auto insertion_result = allocator_dumps_.insert(
+      std::make_pair(mad->absolute_name(), std::move(mad)));
+  MemoryAllocatorDump* inserted_mad = insertion_result.first->second.get();
+  DCHECK(insertion_result.second) << "Duplicate name: "
+                                  << inserted_mad->absolute_name();
+  return inserted_mad;
+}
+
+MemoryAllocatorDump* ProcessMemoryDump::GetAllocatorDump(
+    const std::string& absolute_name) const {
+  auto it = allocator_dumps_.find(absolute_name);
+  if (it != allocator_dumps_.end())
+    return it->second.get();
+  if (black_hole_mad_)
+    return black_hole_mad_.get();
+  return nullptr;
+}
+
+MemoryAllocatorDump* ProcessMemoryDump::GetOrCreateAllocatorDump(
+    const std::string& absolute_name) {
+  MemoryAllocatorDump* mad = GetAllocatorDump(absolute_name);
+  return mad ? mad : CreateAllocatorDump(absolute_name);
+}
+
+MemoryAllocatorDump* ProcessMemoryDump::CreateSharedGlobalAllocatorDump(
+    const MemoryAllocatorDumpGuid& guid) {
+  // A shared allocator dump can be shared within a process and the guid could
+  // have been created already.
+  MemoryAllocatorDump* mad = GetSharedGlobalAllocatorDump(guid);
+  if (mad && mad != black_hole_mad_.get()) {
+    // The weak flag is cleared because this method should create a non-weak
+    // dump.
+    mad->clear_flags(MemoryAllocatorDump::Flags::WEAK);
+    return mad;
+  }
+  return CreateAllocatorDump(GetSharedGlobalAllocatorDumpName(guid), guid);
+}
+
+MemoryAllocatorDump* ProcessMemoryDump::CreateWeakSharedGlobalAllocatorDump(
+    const MemoryAllocatorDumpGuid& guid) {
+  MemoryAllocatorDump* mad = GetSharedGlobalAllocatorDump(guid);
+  if (mad && mad != black_hole_mad_.get())
+    return mad;
+  mad = CreateAllocatorDump(GetSharedGlobalAllocatorDumpName(guid), guid);
+  mad->set_flags(MemoryAllocatorDump::Flags::WEAK);
+  return mad;
+}
+
+MemoryAllocatorDump* ProcessMemoryDump::GetSharedGlobalAllocatorDump(
+    const MemoryAllocatorDumpGuid& guid) const {
+  return GetAllocatorDump(GetSharedGlobalAllocatorDumpName(guid));
+}
+
+void ProcessMemoryDump::DumpHeapUsage(
+    const std::unordered_map<base::trace_event::AllocationContext,
+                             base::trace_event::AllocationMetrics>&
+        metrics_by_context,
+    base::trace_event::TraceEventMemoryOverhead& overhead,
+    const char* allocator_name) {
+  // The heap profiler serialization state can be null here if heap profiler was
+  // enabled when a process dump is in progress.
+  if (heap_profiler_serialization_state() && !metrics_by_context.empty()) {
+    DCHECK_EQ(0ul, heap_dumps_.count(allocator_name));
+    std::unique_ptr<TracedValue> heap_dump = ExportHeapDump(
+        metrics_by_context, *heap_profiler_serialization_state());
+    heap_dumps_[allocator_name] = std::move(heap_dump);
+  }
+
+  std::string base_name = base::StringPrintf("tracing/heap_profiler_%s",
+                                             allocator_name);
+  overhead.DumpInto(base_name.c_str(), this);
+}
+
+void ProcessMemoryDump::SetAllocatorDumpsForSerialization(
+    std::vector<std::unique_ptr<MemoryAllocatorDump>> dumps) {
+  DCHECK(allocator_dumps_.empty());
+  for (std::unique_ptr<MemoryAllocatorDump>& dump : dumps)
+    AddAllocatorDumpInternal(std::move(dump));
+}
+
+std::vector<ProcessMemoryDump::MemoryAllocatorDumpEdge>
+ProcessMemoryDump::GetAllEdgesForSerialization() const {
+  std::vector<MemoryAllocatorDumpEdge> edges;
+  edges.reserve(allocator_dumps_edges_.size());
+  for (const auto& it : allocator_dumps_edges_)
+    edges.push_back(it.second);
+  return edges;
+}
+
+void ProcessMemoryDump::SetAllEdgesForSerialization(
+    const std::vector<ProcessMemoryDump::MemoryAllocatorDumpEdge>& edges) {
+  DCHECK(allocator_dumps_edges_.empty());
+  for (const MemoryAllocatorDumpEdge& edge : edges) {
+    auto it_and_inserted = allocator_dumps_edges_.emplace(edge.source, edge);
+    DCHECK(it_and_inserted.second);
+  }
+}
+
+void ProcessMemoryDump::Clear() {
+  allocator_dumps_.clear();
+  allocator_dumps_edges_.clear();
+  heap_dumps_.clear();
+}
+
+void ProcessMemoryDump::TakeAllDumpsFrom(ProcessMemoryDump* other) {
+  // Moves the ownership of all MemoryAllocatorDump(s) contained in |other|
+  // into this ProcessMemoryDump, checking for duplicates.
+  for (auto& it : other->allocator_dumps_)
+    AddAllocatorDumpInternal(std::move(it.second));
+  other->allocator_dumps_.clear();
+
+  // Move all the edges.
+  allocator_dumps_edges_.insert(other->allocator_dumps_edges_.begin(),
+                                other->allocator_dumps_edges_.end());
+  other->allocator_dumps_edges_.clear();
+
+  for (auto& it : other->heap_dumps_) {
+    DCHECK_EQ(0ul, heap_dumps_.count(it.first));
+    heap_dumps_.insert(std::make_pair(it.first, std::move(it.second)));
+  }
+  other->heap_dumps_.clear();
+}
+
+void ProcessMemoryDump::SerializeAllocatorDumpsInto(TracedValue* value) const {
+  if (allocator_dumps_.size() > 0) {
+    value->BeginDictionary("allocators");
+    for (const auto& allocator_dump_it : allocator_dumps_)
+      allocator_dump_it.second->AsValueInto(value);
+    value->EndDictionary();
+  }
+
+  value->BeginArray("allocators_graph");
+  for (const auto& it : allocator_dumps_edges_) {
+    const MemoryAllocatorDumpEdge& edge = it.second;
+    value->BeginDictionary();
+    value->SetString("source", edge.source.ToString());
+    value->SetString("target", edge.target.ToString());
+    value->SetInteger("importance", edge.importance);
+    value->SetString("type", kEdgeTypeOwnership);
+    value->EndDictionary();
+  }
+  value->EndArray();
+}
+
+void ProcessMemoryDump::SerializeHeapProfilerDumpsInto(
+    TracedValue* value) const {
+  if (heap_dumps_.size() == 0)
+    return;
+  value->BeginDictionary("heaps");
+  for (const auto& name_and_dump : heap_dumps_)
+    value->SetValueWithCopiedName(name_and_dump.first, *name_and_dump.second);
+  value->EndDictionary();  // "heaps"
+}
+
+void ProcessMemoryDump::AddOwnershipEdge(const MemoryAllocatorDumpGuid& source,
+                                         const MemoryAllocatorDumpGuid& target,
+                                         int importance) {
+  // This will either override an existing edge or create a new one.
+  auto it = allocator_dumps_edges_.find(source);
+  int max_importance = importance;
+  if (it != allocator_dumps_edges_.end()) {
+    DCHECK_EQ(target.ToUint64(), it->second.target.ToUint64());
+    max_importance = std::max(importance, it->second.importance);
+  }
+  allocator_dumps_edges_[source] = {source, target, max_importance,
+                                    false /* overridable */};
+}
+
+void ProcessMemoryDump::AddOwnershipEdge(
+    const MemoryAllocatorDumpGuid& source,
+    const MemoryAllocatorDumpGuid& target) {
+  AddOwnershipEdge(source, target, 0 /* importance */);
+}
+
+void ProcessMemoryDump::AddOverridableOwnershipEdge(
+    const MemoryAllocatorDumpGuid& source,
+    const MemoryAllocatorDumpGuid& target,
+    int importance) {
+  if (allocator_dumps_edges_.count(source) == 0) {
+    allocator_dumps_edges_[source] = {source, target, importance,
+                                      true /* overridable */};
+  } else {
+    // An edge between the source and target already exits. So, do nothing here
+    // since the new overridable edge is implicitly overridden by a strong edge
+    // which was created earlier.
+    DCHECK(!allocator_dumps_edges_[source].overridable);
+  }
+}
+
+void ProcessMemoryDump::CreateSharedMemoryOwnershipEdge(
+    const MemoryAllocatorDumpGuid& client_local_dump_guid,
+    const UnguessableToken& shared_memory_guid,
+    int importance) {
+  CreateSharedMemoryOwnershipEdgeInternal(client_local_dump_guid,
+                                          shared_memory_guid, importance,
+                                          false /*is_weak*/);
+}
+
+void ProcessMemoryDump::CreateWeakSharedMemoryOwnershipEdge(
+    const MemoryAllocatorDumpGuid& client_local_dump_guid,
+    const UnguessableToken& shared_memory_guid,
+    int importance) {
+  CreateSharedMemoryOwnershipEdgeInternal(
+      client_local_dump_guid, shared_memory_guid, importance, true /*is_weak*/);
+}
+
+void ProcessMemoryDump::CreateSharedMemoryOwnershipEdgeInternal(
+    const MemoryAllocatorDumpGuid& client_local_dump_guid,
+    const UnguessableToken& shared_memory_guid,
+    int importance,
+    bool is_weak) {
+  DCHECK(!shared_memory_guid.is_empty());
+  // New model where the global dumps created by SharedMemoryTracker are used
+  // for the clients.
+
+  // The guid of the local dump created by SharedMemoryTracker for the memory
+  // segment.
+  auto local_shm_guid =
+      GetDumpId(SharedMemoryTracker::GetDumpNameForTracing(shared_memory_guid));
+
+  // The dump guid of the global dump created by the tracker for the memory
+  // segment.
+  auto global_shm_guid =
+      SharedMemoryTracker::GetGlobalDumpIdForTracing(shared_memory_guid);
+
+  // Create an edge between local dump of the client and the local dump of the
+  // SharedMemoryTracker. Do not need to create the dumps here since the tracker
+  // would create them. The importance is also required here for the case of
+  // single process mode.
+  AddOwnershipEdge(client_local_dump_guid, local_shm_guid, importance);
+
+  // TODO(ssid): Handle the case of weak dumps here. This needs a new function
+  // GetOrCreaetGlobalDump() in PMD since we need to change the behavior of the
+  // created global dump.
+  // Create an edge that overrides the edge created by SharedMemoryTracker.
+  AddOwnershipEdge(local_shm_guid, global_shm_guid, importance);
+}
+
+void ProcessMemoryDump::AddSuballocation(const MemoryAllocatorDumpGuid& source,
+                                         const std::string& target_node_name) {
+  // Do not create new dumps for suballocations in background mode.
+  if (dump_args_.level_of_detail == MemoryDumpLevelOfDetail::BACKGROUND)
+    return;
+
+  std::string child_mad_name = target_node_name + "/__" + source.ToString();
+  MemoryAllocatorDump* target_child_mad = CreateAllocatorDump(child_mad_name);
+  AddOwnershipEdge(source, target_child_mad->guid());
+}
+
+MemoryAllocatorDump* ProcessMemoryDump::GetBlackHoleMad() {
+  DCHECK(is_black_hole_non_fatal_for_testing_);
+  if (!black_hole_mad_) {
+    std::string name = "discarded";
+    black_hole_mad_.reset(new MemoryAllocatorDump(
+        name, dump_args_.level_of_detail, GetDumpId(name)));
+  }
+  return black_hole_mad_.get();
+}
+
+MemoryAllocatorDumpGuid ProcessMemoryDump::GetDumpId(
+    const std::string& absolute_name) {
+  return MemoryAllocatorDumpGuid(StringPrintf(
+      "%s:%s", process_token().ToString().c_str(), absolute_name.c_str()));
+}
+
+bool ProcessMemoryDump::MemoryAllocatorDumpEdge::operator==(
+    const MemoryAllocatorDumpEdge& other) const {
+  return source == other.source && target == other.target &&
+         importance == other.importance && overridable == other.overridable;
+}
+
+bool ProcessMemoryDump::MemoryAllocatorDumpEdge::operator!=(
+    const MemoryAllocatorDumpEdge& other) const {
+  return !(*this == other);
+}
+
+}  // namespace trace_event
+}  // namespace base
diff --git a/base/trace_event/process_memory_dump.h b/base/trace_event/process_memory_dump.h
new file mode 100644
index 0000000..a732a26
--- /dev/null
+++ b/base/trace_event/process_memory_dump.h
@@ -0,0 +1,304 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TRACE_EVENT_PROCESS_MEMORY_DUMP_H_
+#define BASE_TRACE_EVENT_PROCESS_MEMORY_DUMP_H_
+
+#include <stddef.h>
+
+#include <map>
+#include <unordered_map>
+#include <vector>
+
+#include "base/base_export.h"
+#include "base/macros.h"
+#include "base/memory/ref_counted.h"
+#include "base/trace_event/heap_profiler_serialization_state.h"
+#include "base/trace_event/memory_allocator_dump.h"
+#include "base/trace_event/memory_allocator_dump_guid.h"
+#include "base/trace_event/memory_dump_request_args.h"
+#include "build/build_config.h"
+
+// Define COUNT_RESIDENT_BYTES_SUPPORTED if platform supports counting of the
+// resident memory.
+#if !defined(OS_NACL)
+#define COUNT_RESIDENT_BYTES_SUPPORTED
+#endif
+
+namespace base {
+
+class SharedMemory;
+class UnguessableToken;
+
+namespace trace_event {
+
+class HeapProfilerSerializationState;
+class TracedValue;
+
+// ProcessMemoryDump is as a strongly typed container which holds the dumps
+// produced by the MemoryDumpProvider(s) for a specific process.
+class BASE_EXPORT ProcessMemoryDump {
+ public:
+  struct BASE_EXPORT MemoryAllocatorDumpEdge {
+    bool operator==(const MemoryAllocatorDumpEdge&) const;
+    bool operator!=(const MemoryAllocatorDumpEdge&) const;
+
+    MemoryAllocatorDumpGuid source;
+    MemoryAllocatorDumpGuid target;
+    int importance = 0;
+    bool overridable = false;
+  };
+
+  // Maps allocator dumps absolute names (allocator_name/heap/subheap) to
+  // MemoryAllocatorDump instances.
+  using AllocatorDumpsMap =
+      std::map<std::string, std::unique_ptr<MemoryAllocatorDump>>;
+
+  using HeapDumpsMap = std::map<std::string, std::unique_ptr<TracedValue>>;
+
+  // Stores allocator dump edges indexed by source allocator dump GUID.
+  using AllocatorDumpEdgesMap =
+      std::map<MemoryAllocatorDumpGuid, MemoryAllocatorDumpEdge>;
+
+#if defined(COUNT_RESIDENT_BYTES_SUPPORTED)
+  // Returns the number of bytes in a kernel memory page. Some platforms may
+  // have a different value for kernel page sizes from user page sizes. It is
+  // important to use kernel memory page sizes for resident bytes calculation.
+  // In most cases, the two are the same.
+  static size_t GetSystemPageSize();
+
+  // Returns the total bytes resident for a virtual address range, with given
+  // |start_address| and |mapped_size|. |mapped_size| is specified in bytes. The
+  // value returned is valid only if the given range is currently mmapped by the
+  // process. The |start_address| must be page-aligned.
+  static size_t CountResidentBytes(void* start_address, size_t mapped_size);
+
+  // The same as above, but the given mapped range should belong to the
+  // shared_memory's mapped region.
+  static base::Optional<size_t> CountResidentBytesInSharedMemory(
+      void* start_address,
+      size_t mapped_size);
+#endif
+
+  ProcessMemoryDump(scoped_refptr<HeapProfilerSerializationState>
+                        heap_profiler_serialization_state,
+                    const MemoryDumpArgs& dump_args);
+  ProcessMemoryDump(ProcessMemoryDump&&);
+  ~ProcessMemoryDump();
+
+  ProcessMemoryDump& operator=(ProcessMemoryDump&&);
+
+  // Creates a new MemoryAllocatorDump with the given name and returns the
+  // empty object back to the caller.
+  // Arguments:
+  //   absolute_name: a name that uniquely identifies allocator dumps produced
+  //       by this provider. It is possible to specify nesting by using a
+  //       path-like string (e.g., v8/isolate1/heap1, v8/isolate1/heap2).
+  //       Leading or trailing slashes are not allowed.
+  //   guid: an optional identifier, unique among all processes within the
+  //       scope of a global dump. This is only relevant when using
+  //       AddOwnershipEdge() to express memory sharing. If omitted,
+  //       it will be automatically generated.
+  // ProcessMemoryDump handles the memory ownership of its MemoryAllocatorDumps.
+  MemoryAllocatorDump* CreateAllocatorDump(const std::string& absolute_name);
+  MemoryAllocatorDump* CreateAllocatorDump(const std::string& absolute_name,
+                                           const MemoryAllocatorDumpGuid& guid);
+
+  // Looks up a MemoryAllocatorDump given its allocator and heap names, or
+  // nullptr if not found.
+  MemoryAllocatorDump* GetAllocatorDump(const std::string& absolute_name) const;
+
+  // Do NOT use this method. All dump providers should use
+  // CreateAllocatorDump(). Tries to create a new MemoryAllocatorDump only if it
+  // doesn't already exist. Creating multiple dumps with same name using
+  // GetOrCreateAllocatorDump() would override the existing scalars in MAD and
+  // cause misreporting. This method is used only in rare cases multiple
+  // components create allocator dumps with same name and only one of them adds
+  // size.
+  MemoryAllocatorDump* GetOrCreateAllocatorDump(
+      const std::string& absolute_name);
+
+  // Creates a shared MemoryAllocatorDump, to express cross-process sharing.
+  // Shared allocator dumps are allowed to have duplicate guids within the
+  // global scope, in order to reference the same dump from multiple processes.
+  // See the design doc goo.gl/keU6Bf for reference usage patterns.
+  MemoryAllocatorDump* CreateSharedGlobalAllocatorDump(
+      const MemoryAllocatorDumpGuid& guid);
+
+  // Creates a shared MemoryAllocatorDump as CreateSharedGlobalAllocatorDump,
+  // but with a WEAK flag. A weak dump will be discarded unless a non-weak dump
+  // is created using CreateSharedGlobalAllocatorDump by at least one process.
+  // The WEAK flag does not apply if a non-weak dump with the same GUID already
+  // exists or is created later. All owners and children of the discarded dump
+  // will also be discarded transitively.
+  MemoryAllocatorDump* CreateWeakSharedGlobalAllocatorDump(
+      const MemoryAllocatorDumpGuid& guid);
+
+  // Looks up a shared MemoryAllocatorDump given its guid.
+  MemoryAllocatorDump* GetSharedGlobalAllocatorDump(
+      const MemoryAllocatorDumpGuid& guid) const;
+
+  // Returns the map of the MemoryAllocatorDumps added to this dump.
+  const AllocatorDumpsMap& allocator_dumps() const { return allocator_dumps_; }
+
+  AllocatorDumpsMap* mutable_allocator_dumps_for_serialization() const {
+    // Mojo takes a const input argument even for move-only types that can be
+    // mutate while serializing (like this one). Hence the const_cast.
+    return const_cast<AllocatorDumpsMap*>(&allocator_dumps_);
+  }
+  void SetAllocatorDumpsForSerialization(
+      std::vector<std::unique_ptr<MemoryAllocatorDump>>);
+
+  // Only for mojo serialization.
+  std::vector<MemoryAllocatorDumpEdge> GetAllEdgesForSerialization() const;
+  void SetAllEdgesForSerialization(const std::vector<MemoryAllocatorDumpEdge>&);
+
+  // Dumps heap usage with |allocator_name|.
+  void DumpHeapUsage(
+      const std::unordered_map<base::trace_event::AllocationContext,
+                               base::trace_event::AllocationMetrics>&
+          metrics_by_context,
+      base::trace_event::TraceEventMemoryOverhead& overhead,
+      const char* allocator_name);
+
+  // Adds an ownership relationship between two MemoryAllocatorDump(s) with the
+  // semantics: |source| owns |target|, and has the effect of attributing
+  // the memory usage of |target| to |source|. |importance| is optional and
+  // relevant only for the cases of co-ownership, where it acts as a z-index:
+  // the owner with the highest importance will be attributed |target|'s memory.
+  // If an edge is present, its importance will not be updated unless
+  // |importance| is larger.
+  void AddOwnershipEdge(const MemoryAllocatorDumpGuid& source,
+                        const MemoryAllocatorDumpGuid& target,
+                        int importance);
+  void AddOwnershipEdge(const MemoryAllocatorDumpGuid& source,
+                        const MemoryAllocatorDumpGuid& target);
+
+  // Adds edges that can be overriden by a later or earlier call to
+  // AddOwnershipEdge() with the same source and target with a different
+  // |importance| value.
+  void AddOverridableOwnershipEdge(const MemoryAllocatorDumpGuid& source,
+                                   const MemoryAllocatorDumpGuid& target,
+                                   int importance);
+
+  // Creates ownership edges for memory backed by base::SharedMemory. Handles
+  // the case of cross process sharing and importnace of ownership for the case
+  // with and without the base::SharedMemory dump provider. The new version
+  // should just use global dumps created by SharedMemoryTracker and this
+  // function handles the transition until we get SharedMemory IDs through mojo
+  // channel crbug.com/713763. The weak version creates a weak global dump.
+  // |client_local_dump_guid| The guid of the local dump created by the client
+  // of base::SharedMemory.
+  // |shared_memory_guid| The ID of the base::SharedMemory that is assigned
+  // globally, used to create global dump edges in the new model.
+  // |importance| Importance of the global dump edges to say if the current
+  // process owns the memory segment.
+  void CreateSharedMemoryOwnershipEdge(
+      const MemoryAllocatorDumpGuid& client_local_dump_guid,
+      const UnguessableToken& shared_memory_guid,
+      int importance);
+  void CreateWeakSharedMemoryOwnershipEdge(
+      const MemoryAllocatorDumpGuid& client_local_dump_guid,
+      const UnguessableToken& shared_memory_guid,
+      int importance);
+
+  const AllocatorDumpEdgesMap& allocator_dumps_edges() const {
+    return allocator_dumps_edges_;
+  }
+
+  // Utility method to add a suballocation relationship with the following
+  // semantics: |source| is suballocated from |target_node_name|.
+  // This creates a child node of |target_node_name| and adds an ownership edge
+  // between |source| and the new child node. As a result, the UI will not
+  // account the memory of |source| in the target node.
+  void AddSuballocation(const MemoryAllocatorDumpGuid& source,
+                        const std::string& target_node_name);
+
+  const scoped_refptr<HeapProfilerSerializationState>&
+  heap_profiler_serialization_state() const {
+    return heap_profiler_serialization_state_;
+  }
+
+  // Removes all the MemoryAllocatorDump(s) contained in this instance. This
+  // ProcessMemoryDump can be safely reused as if it was new once this returns.
+  void Clear();
+
+  // Merges all MemoryAllocatorDump(s) contained in |other| inside this
+  // ProcessMemoryDump, transferring their ownership to this instance.
+  // |other| will be an empty ProcessMemoryDump after this method returns.
+  // This is to allow dump providers to pre-populate ProcessMemoryDump instances
+  // and later move their contents into the ProcessMemoryDump passed as argument
+  // of the MemoryDumpProvider::OnMemoryDump(ProcessMemoryDump*) callback.
+  void TakeAllDumpsFrom(ProcessMemoryDump* other);
+
+  // Populate the traced value with information about the memory allocator
+  // dumps.
+  void SerializeAllocatorDumpsInto(TracedValue* value) const;
+
+  // Populate the traced value with information about the heap profiler.
+  void SerializeHeapProfilerDumpsInto(TracedValue* value) const;
+
+  const HeapDumpsMap& heap_dumps() const { return heap_dumps_; }
+
+  const MemoryDumpArgs& dump_args() const { return dump_args_; }
+
+ private:
+  FRIEND_TEST_ALL_PREFIXES(ProcessMemoryDumpTest, BackgroundModeTest);
+  FRIEND_TEST_ALL_PREFIXES(ProcessMemoryDumpTest, SharedMemoryOwnershipTest);
+  FRIEND_TEST_ALL_PREFIXES(ProcessMemoryDumpTest, GuidsTest);
+
+  MemoryAllocatorDump* AddAllocatorDumpInternal(
+      std::unique_ptr<MemoryAllocatorDump> mad);
+
+  // A per-process token, valid throughout all the lifetime of the current
+  // process, used to disambiguate dumps with the same name generated in
+  // different processes.
+  const UnguessableToken& process_token() const { return process_token_; }
+  void set_process_token_for_testing(UnguessableToken token) {
+    process_token_ = token;
+  };
+
+  // Returns the Guid of the dump for the given |absolute_name| for
+  // for the given process' token. |process_token| is used to disambiguate GUIDs
+  // derived from the same name under different processes.
+  MemoryAllocatorDumpGuid GetDumpId(const std::string& absolute_name);
+
+  void CreateSharedMemoryOwnershipEdgeInternal(
+      const MemoryAllocatorDumpGuid& client_local_dump_guid,
+      const UnguessableToken& shared_memory_guid,
+      int importance,
+      bool is_weak);
+
+  MemoryAllocatorDump* GetBlackHoleMad();
+
+  UnguessableToken process_token_;
+  AllocatorDumpsMap allocator_dumps_;
+  HeapDumpsMap heap_dumps_;
+
+  // State shared among all PMDs instances created in a given trace session.
+  scoped_refptr<HeapProfilerSerializationState>
+      heap_profiler_serialization_state_;
+
+  // Keeps track of relationships between MemoryAllocatorDump(s).
+  AllocatorDumpEdgesMap allocator_dumps_edges_;
+
+  // Level of detail of the current dump.
+  MemoryDumpArgs dump_args_;
+
+  // This allocator dump is returned when an invalid dump is created in
+  // background mode. The attributes of the dump are ignored and not added to
+  // the trace.
+  std::unique_ptr<MemoryAllocatorDump> black_hole_mad_;
+
+  // When set to true, the DCHECK(s) for invalid dump creations on the
+  // background mode are disabled for testing.
+  static bool is_black_hole_non_fatal_for_testing_;
+
+  DISALLOW_COPY_AND_ASSIGN(ProcessMemoryDump);
+};
+
+}  // namespace trace_event
+}  // namespace base
+
+#endif  // BASE_TRACE_EVENT_PROCESS_MEMORY_DUMP_H_
diff --git a/base/trace_event/process_memory_dump_unittest.cc b/base/trace_event/process_memory_dump_unittest.cc
new file mode 100644
index 0000000..f1209ca
--- /dev/null
+++ b/base/trace_event/process_memory_dump_unittest.cc
@@ -0,0 +1,584 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/trace_event/process_memory_dump.h"
+
+#include <stddef.h>
+
+#include "base/memory/aligned_memory.h"
+#include "base/memory/ptr_util.h"
+#include "base/memory/shared_memory_tracker.h"
+#include "base/process/process_metrics.h"
+#include "base/trace_event/memory_allocator_dump_guid.h"
+#include "base/trace_event/memory_infra_background_whitelist.h"
+#include "base/trace_event/trace_event_argument.h"
+#include "base/trace_event/trace_log.h"
+#include "build/build_config.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+#if defined(OS_WIN)
+#include <windows.h>
+#include "winbase.h"
+#elif defined(OS_POSIX) || defined(OS_FUCHSIA)
+#include <sys/mman.h>
+#endif
+
+#if defined(OS_IOS)
+#include "base/ios/ios_util.h"
+#endif
+
+namespace base {
+namespace trace_event {
+
+namespace {
+
+const MemoryDumpArgs kDetailedDumpArgs = {MemoryDumpLevelOfDetail::DETAILED};
+const char* const kTestDumpNameWhitelist[] = {
+    "Whitelisted/TestName", "Whitelisted/TestName_0x?",
+    "Whitelisted/0x?/TestName", "Whitelisted/0x?", nullptr};
+
+TracedValue* GetHeapDump(const ProcessMemoryDump& pmd, const char* name) {
+  auto it = pmd.heap_dumps().find(name);
+  return it == pmd.heap_dumps().end() ? nullptr : it->second.get();
+}
+
+void* Map(size_t size) {
+#if defined(OS_WIN)
+  return ::VirtualAlloc(nullptr, size, MEM_RESERVE | MEM_COMMIT,
+                        PAGE_READWRITE);
+#elif defined(OS_POSIX) || defined(OS_FUCHSIA)
+  return ::mmap(nullptr, size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON,
+                0, 0);
+#endif
+}
+
+void Unmap(void* addr, size_t size) {
+#if defined(OS_WIN)
+  ::VirtualFree(addr, 0, MEM_DECOMMIT);
+#elif defined(OS_POSIX) || defined(OS_FUCHSIA)
+  ::munmap(addr, size);
+#else
+#error This architecture is not (yet) supported.
+#endif
+}
+
+}  // namespace
+
+TEST(ProcessMemoryDumpTest, MoveConstructor) {
+  auto heap_state = MakeRefCounted<HeapProfilerSerializationState>();
+  heap_state->SetStackFrameDeduplicator(
+      std::make_unique<StackFrameDeduplicator>());
+  heap_state->SetTypeNameDeduplicator(std::make_unique<TypeNameDeduplicator>());
+
+  ProcessMemoryDump pmd1 = ProcessMemoryDump(heap_state, kDetailedDumpArgs);
+  pmd1.CreateAllocatorDump("mad1");
+  pmd1.CreateAllocatorDump("mad2");
+  pmd1.AddOwnershipEdge(MemoryAllocatorDumpGuid(42),
+                        MemoryAllocatorDumpGuid(4242));
+
+  ProcessMemoryDump pmd2(std::move(pmd1));
+
+  EXPECT_EQ(1u, pmd2.allocator_dumps().count("mad1"));
+  EXPECT_EQ(1u, pmd2.allocator_dumps().count("mad2"));
+  EXPECT_EQ(MemoryDumpLevelOfDetail::DETAILED,
+            pmd2.dump_args().level_of_detail);
+  EXPECT_EQ(1u, pmd2.allocator_dumps_edges().size());
+  EXPECT_EQ(heap_state.get(), pmd2.heap_profiler_serialization_state().get());
+
+  // Check that calling serialization routines doesn't cause a crash.
+  auto traced_value = std::make_unique<TracedValue>();
+  pmd2.SerializeAllocatorDumpsInto(traced_value.get());
+  pmd2.SerializeHeapProfilerDumpsInto(traced_value.get());
+}
+
+TEST(ProcessMemoryDumpTest, MoveAssignment) {
+  auto heap_state = MakeRefCounted<HeapProfilerSerializationState>();
+  heap_state->SetStackFrameDeduplicator(
+      std::make_unique<StackFrameDeduplicator>());
+  heap_state->SetTypeNameDeduplicator(std::make_unique<TypeNameDeduplicator>());
+
+  ProcessMemoryDump pmd1 = ProcessMemoryDump(heap_state, kDetailedDumpArgs);
+  pmd1.CreateAllocatorDump("mad1");
+  pmd1.CreateAllocatorDump("mad2");
+  pmd1.AddOwnershipEdge(MemoryAllocatorDumpGuid(42),
+                        MemoryAllocatorDumpGuid(4242));
+
+  ProcessMemoryDump pmd2(nullptr, {MemoryDumpLevelOfDetail::BACKGROUND});
+  pmd2.CreateAllocatorDump("malloc");
+
+  pmd2 = std::move(pmd1);
+  EXPECT_EQ(1u, pmd2.allocator_dumps().count("mad1"));
+  EXPECT_EQ(1u, pmd2.allocator_dumps().count("mad2"));
+  EXPECT_EQ(0u, pmd2.allocator_dumps().count("mad3"));
+  EXPECT_EQ(MemoryDumpLevelOfDetail::DETAILED,
+            pmd2.dump_args().level_of_detail);
+  EXPECT_EQ(1u, pmd2.allocator_dumps_edges().size());
+  EXPECT_EQ(heap_state.get(), pmd2.heap_profiler_serialization_state().get());
+
+  // Check that calling serialization routines doesn't cause a crash.
+  auto traced_value = std::make_unique<TracedValue>();
+  pmd2.SerializeAllocatorDumpsInto(traced_value.get());
+  pmd2.SerializeHeapProfilerDumpsInto(traced_value.get());
+}
+
+TEST(ProcessMemoryDumpTest, Clear) {
+  std::unique_ptr<ProcessMemoryDump> pmd1(
+      new ProcessMemoryDump(nullptr, kDetailedDumpArgs));
+  pmd1->CreateAllocatorDump("mad1");
+  pmd1->CreateAllocatorDump("mad2");
+  ASSERT_FALSE(pmd1->allocator_dumps().empty());
+
+  pmd1->AddOwnershipEdge(MemoryAllocatorDumpGuid(42),
+                         MemoryAllocatorDumpGuid(4242));
+
+  MemoryAllocatorDumpGuid shared_mad_guid1(1);
+  MemoryAllocatorDumpGuid shared_mad_guid2(2);
+  pmd1->CreateSharedGlobalAllocatorDump(shared_mad_guid1);
+  pmd1->CreateSharedGlobalAllocatorDump(shared_mad_guid2);
+
+  pmd1->Clear();
+  ASSERT_TRUE(pmd1->allocator_dumps().empty());
+  ASSERT_TRUE(pmd1->allocator_dumps_edges().empty());
+  ASSERT_EQ(nullptr, pmd1->GetAllocatorDump("mad1"));
+  ASSERT_EQ(nullptr, pmd1->GetAllocatorDump("mad2"));
+  ASSERT_EQ(nullptr, pmd1->GetSharedGlobalAllocatorDump(shared_mad_guid1));
+  ASSERT_EQ(nullptr, pmd1->GetSharedGlobalAllocatorDump(shared_mad_guid2));
+
+  // Check that calling serialization routines doesn't cause a crash.
+  auto traced_value = std::make_unique<TracedValue>();
+  pmd1->SerializeAllocatorDumpsInto(traced_value.get());
+  pmd1->SerializeHeapProfilerDumpsInto(traced_value.get());
+
+  // Check that the pmd can be reused and behaves as expected.
+  auto* mad1 = pmd1->CreateAllocatorDump("mad1");
+  auto* mad3 = pmd1->CreateAllocatorDump("mad3");
+  auto* shared_mad1 = pmd1->CreateSharedGlobalAllocatorDump(shared_mad_guid1);
+  auto* shared_mad2 =
+      pmd1->CreateWeakSharedGlobalAllocatorDump(shared_mad_guid2);
+  ASSERT_EQ(4u, pmd1->allocator_dumps().size());
+  ASSERT_EQ(mad1, pmd1->GetAllocatorDump("mad1"));
+  ASSERT_EQ(nullptr, pmd1->GetAllocatorDump("mad2"));
+  ASSERT_EQ(mad3, pmd1->GetAllocatorDump("mad3"));
+  ASSERT_EQ(shared_mad1, pmd1->GetSharedGlobalAllocatorDump(shared_mad_guid1));
+  ASSERT_EQ(MemoryAllocatorDump::Flags::DEFAULT, shared_mad1->flags());
+  ASSERT_EQ(shared_mad2, pmd1->GetSharedGlobalAllocatorDump(shared_mad_guid2));
+  ASSERT_EQ(MemoryAllocatorDump::Flags::WEAK, shared_mad2->flags());
+
+  traced_value.reset(new TracedValue);
+  pmd1->SerializeAllocatorDumpsInto(traced_value.get());
+  pmd1->SerializeHeapProfilerDumpsInto(traced_value.get());
+
+  pmd1.reset();
+}
+
+TEST(ProcessMemoryDumpTest, TakeAllDumpsFrom) {
+  std::unique_ptr<TracedValue> traced_value(new TracedValue);
+  std::unordered_map<AllocationContext, AllocationMetrics> metrics_by_context;
+  metrics_by_context[AllocationContext()] = {1, 1};
+  TraceEventMemoryOverhead overhead;
+
+  scoped_refptr<HeapProfilerSerializationState>
+      heap_profiler_serialization_state = new HeapProfilerSerializationState;
+  heap_profiler_serialization_state->SetStackFrameDeduplicator(
+      WrapUnique(new StackFrameDeduplicator));
+  heap_profiler_serialization_state->SetTypeNameDeduplicator(
+      WrapUnique(new TypeNameDeduplicator));
+  std::unique_ptr<ProcessMemoryDump> pmd1(new ProcessMemoryDump(
+      heap_profiler_serialization_state.get(), kDetailedDumpArgs));
+  auto* mad1_1 = pmd1->CreateAllocatorDump("pmd1/mad1");
+  auto* mad1_2 = pmd1->CreateAllocatorDump("pmd1/mad2");
+  pmd1->AddOwnershipEdge(mad1_1->guid(), mad1_2->guid());
+  pmd1->DumpHeapUsage(metrics_by_context, overhead, "pmd1/heap_dump1");
+  pmd1->DumpHeapUsage(metrics_by_context, overhead, "pmd1/heap_dump2");
+
+  std::unique_ptr<ProcessMemoryDump> pmd2(new ProcessMemoryDump(
+      heap_profiler_serialization_state.get(), kDetailedDumpArgs));
+  auto* mad2_1 = pmd2->CreateAllocatorDump("pmd2/mad1");
+  auto* mad2_2 = pmd2->CreateAllocatorDump("pmd2/mad2");
+  pmd2->AddOwnershipEdge(mad2_1->guid(), mad2_2->guid());
+  pmd2->DumpHeapUsage(metrics_by_context, overhead, "pmd2/heap_dump1");
+  pmd2->DumpHeapUsage(metrics_by_context, overhead, "pmd2/heap_dump2");
+
+  MemoryAllocatorDumpGuid shared_mad_guid1(1);
+  MemoryAllocatorDumpGuid shared_mad_guid2(2);
+  auto* shared_mad1 = pmd2->CreateSharedGlobalAllocatorDump(shared_mad_guid1);
+  auto* shared_mad2 =
+      pmd2->CreateWeakSharedGlobalAllocatorDump(shared_mad_guid2);
+
+  pmd1->TakeAllDumpsFrom(pmd2.get());
+
+  // Make sure that pmd2 is empty but still usable after it has been emptied.
+  ASSERT_TRUE(pmd2->allocator_dumps().empty());
+  ASSERT_TRUE(pmd2->allocator_dumps_edges().empty());
+  ASSERT_TRUE(pmd2->heap_dumps().empty());
+  pmd2->CreateAllocatorDump("pmd2/this_mad_stays_with_pmd2");
+  ASSERT_EQ(1u, pmd2->allocator_dumps().size());
+  ASSERT_EQ(1u, pmd2->allocator_dumps().count("pmd2/this_mad_stays_with_pmd2"));
+  pmd2->AddOwnershipEdge(MemoryAllocatorDumpGuid(42),
+                         MemoryAllocatorDumpGuid(4242));
+
+  // Check that calling serialization routines doesn't cause a crash.
+  pmd2->SerializeAllocatorDumpsInto(traced_value.get());
+  pmd2->SerializeHeapProfilerDumpsInto(traced_value.get());
+
+  // Free the |pmd2| to check that the memory ownership of the two MAD(s)
+  // has been transferred to |pmd1|.
+  pmd2.reset();
+
+  // Now check that |pmd1| has been effectively merged.
+  ASSERT_EQ(6u, pmd1->allocator_dumps().size());
+  ASSERT_EQ(1u, pmd1->allocator_dumps().count("pmd1/mad1"));
+  ASSERT_EQ(1u, pmd1->allocator_dumps().count("pmd1/mad2"));
+  ASSERT_EQ(1u, pmd1->allocator_dumps().count("pmd2/mad1"));
+  ASSERT_EQ(1u, pmd1->allocator_dumps().count("pmd1/mad2"));
+  ASSERT_EQ(2u, pmd1->allocator_dumps_edges().size());
+  ASSERT_EQ(shared_mad1, pmd1->GetSharedGlobalAllocatorDump(shared_mad_guid1));
+  ASSERT_EQ(shared_mad2, pmd1->GetSharedGlobalAllocatorDump(shared_mad_guid2));
+  ASSERT_TRUE(MemoryAllocatorDump::Flags::WEAK & shared_mad2->flags());
+  ASSERT_EQ(4u, pmd1->heap_dumps().size());
+  ASSERT_TRUE(GetHeapDump(*pmd1, "pmd1/heap_dump1") != nullptr);
+  ASSERT_TRUE(GetHeapDump(*pmd1, "pmd1/heap_dump2") != nullptr);
+  ASSERT_TRUE(GetHeapDump(*pmd1, "pmd2/heap_dump1") != nullptr);
+  ASSERT_TRUE(GetHeapDump(*pmd1, "pmd2/heap_dump2") != nullptr);
+
+  // Check that calling serialization routines doesn't cause a crash.
+  traced_value.reset(new TracedValue);
+  pmd1->SerializeAllocatorDumpsInto(traced_value.get());
+  pmd1->SerializeHeapProfilerDumpsInto(traced_value.get());
+
+  pmd1.reset();
+}
+
+TEST(ProcessMemoryDumpTest, OverrideOwnershipEdge) {
+  std::unique_ptr<ProcessMemoryDump> pmd(
+      new ProcessMemoryDump(nullptr, kDetailedDumpArgs));
+
+  auto* shm_dump1 = pmd->CreateAllocatorDump("shared_mem/seg1");
+  auto* shm_dump2 = pmd->CreateAllocatorDump("shared_mem/seg2");
+  auto* shm_dump3 = pmd->CreateAllocatorDump("shared_mem/seg3");
+  auto* shm_dump4 = pmd->CreateAllocatorDump("shared_mem/seg4");
+
+  // Create one allocation with an auto-assigned guid and mark it as a
+  // suballocation of "fakealloc/allocated_objects".
+  auto* child1_dump = pmd->CreateAllocatorDump("shared_mem/child/seg1");
+  pmd->AddOverridableOwnershipEdge(child1_dump->guid(), shm_dump1->guid(),
+                                   0 /* importance */);
+  auto* child2_dump = pmd->CreateAllocatorDump("shared_mem/child/seg2");
+  pmd->AddOwnershipEdge(child2_dump->guid(), shm_dump2->guid(),
+                        3 /* importance */);
+  MemoryAllocatorDumpGuid shared_mad_guid(1);
+  pmd->CreateSharedGlobalAllocatorDump(shared_mad_guid);
+  pmd->AddOverridableOwnershipEdge(shm_dump3->guid(), shared_mad_guid,
+                                   0 /* importance */);
+  auto* child4_dump = pmd->CreateAllocatorDump("shared_mem/child/seg4");
+  pmd->AddOverridableOwnershipEdge(child4_dump->guid(), shm_dump4->guid(),
+                                   4 /* importance */);
+
+  const ProcessMemoryDump::AllocatorDumpEdgesMap& edges =
+      pmd->allocator_dumps_edges();
+  EXPECT_EQ(4u, edges.size());
+  EXPECT_EQ(shm_dump1->guid(), edges.find(child1_dump->guid())->second.target);
+  EXPECT_EQ(0, edges.find(child1_dump->guid())->second.importance);
+  EXPECT_TRUE(edges.find(child1_dump->guid())->second.overridable);
+  EXPECT_EQ(shm_dump2->guid(), edges.find(child2_dump->guid())->second.target);
+  EXPECT_EQ(3, edges.find(child2_dump->guid())->second.importance);
+  EXPECT_FALSE(edges.find(child2_dump->guid())->second.overridable);
+  EXPECT_EQ(shared_mad_guid, edges.find(shm_dump3->guid())->second.target);
+  EXPECT_EQ(0, edges.find(shm_dump3->guid())->second.importance);
+  EXPECT_TRUE(edges.find(shm_dump3->guid())->second.overridable);
+  EXPECT_EQ(shm_dump4->guid(), edges.find(child4_dump->guid())->second.target);
+  EXPECT_EQ(4, edges.find(child4_dump->guid())->second.importance);
+  EXPECT_TRUE(edges.find(child4_dump->guid())->second.overridable);
+
+  // These should override old edges:
+  pmd->AddOwnershipEdge(child1_dump->guid(), shm_dump1->guid(),
+                        1 /* importance */);
+  pmd->AddOwnershipEdge(shm_dump3->guid(), shared_mad_guid, 2 /* importance */);
+  // This should not change the old edges.
+  pmd->AddOverridableOwnershipEdge(child2_dump->guid(), shm_dump2->guid(),
+                                   0 /* importance */);
+  pmd->AddOwnershipEdge(child4_dump->guid(), shm_dump4->guid(),
+                        0 /* importance */);
+
+  EXPECT_EQ(4u, edges.size());
+  EXPECT_EQ(shm_dump1->guid(), edges.find(child1_dump->guid())->second.target);
+  EXPECT_EQ(1, edges.find(child1_dump->guid())->second.importance);
+  EXPECT_FALSE(edges.find(child1_dump->guid())->second.overridable);
+  EXPECT_EQ(shm_dump2->guid(), edges.find(child2_dump->guid())->second.target);
+  EXPECT_EQ(3, edges.find(child2_dump->guid())->second.importance);
+  EXPECT_FALSE(edges.find(child2_dump->guid())->second.overridable);
+  EXPECT_EQ(shared_mad_guid, edges.find(shm_dump3->guid())->second.target);
+  EXPECT_EQ(2, edges.find(shm_dump3->guid())->second.importance);
+  EXPECT_FALSE(edges.find(shm_dump3->guid())->second.overridable);
+  EXPECT_EQ(shm_dump4->guid(), edges.find(child4_dump->guid())->second.target);
+  EXPECT_EQ(4, edges.find(child4_dump->guid())->second.importance);
+  EXPECT_FALSE(edges.find(child4_dump->guid())->second.overridable);
+}
+
+TEST(ProcessMemoryDumpTest, Suballocations) {
+  std::unique_ptr<ProcessMemoryDump> pmd(
+      new ProcessMemoryDump(nullptr, kDetailedDumpArgs));
+  const std::string allocator_dump_name = "fakealloc/allocated_objects";
+  pmd->CreateAllocatorDump(allocator_dump_name);
+
+  // Create one allocation with an auto-assigned guid and mark it as a
+  // suballocation of "fakealloc/allocated_objects".
+  auto* pic1_dump = pmd->CreateAllocatorDump("picturemanager/picture1");
+  pmd->AddSuballocation(pic1_dump->guid(), allocator_dump_name);
+
+  // Same here, but this time create an allocation with an explicit guid.
+  auto* pic2_dump = pmd->CreateAllocatorDump("picturemanager/picture2",
+                                            MemoryAllocatorDumpGuid(0x42));
+  pmd->AddSuballocation(pic2_dump->guid(), allocator_dump_name);
+
+  // Now check that AddSuballocation() has created anonymous child dumps under
+  // "fakealloc/allocated_objects".
+  auto anon_node_1_it = pmd->allocator_dumps().find(
+      allocator_dump_name + "/__" + pic1_dump->guid().ToString());
+  ASSERT_NE(pmd->allocator_dumps().end(), anon_node_1_it);
+
+  auto anon_node_2_it =
+      pmd->allocator_dumps().find(allocator_dump_name + "/__42");
+  ASSERT_NE(pmd->allocator_dumps().end(), anon_node_2_it);
+
+  // Finally check that AddSuballocation() has created also the
+  // edges between the pictures and the anonymous allocator child dumps.
+  bool found_edge[2]{false, false};
+  for (const auto& e : pmd->allocator_dumps_edges()) {
+    found_edge[0] |= (e.first == pic1_dump->guid() &&
+                      e.second.target == anon_node_1_it->second->guid());
+    found_edge[1] |= (e.first == pic2_dump->guid() &&
+                      e.second.target == anon_node_2_it->second->guid());
+  }
+  ASSERT_TRUE(found_edge[0]);
+  ASSERT_TRUE(found_edge[1]);
+
+  // Check that calling serialization routines doesn't cause a crash.
+  std::unique_ptr<TracedValue> traced_value(new TracedValue);
+  pmd->SerializeAllocatorDumpsInto(traced_value.get());
+  pmd->SerializeHeapProfilerDumpsInto(traced_value.get());
+
+  pmd.reset();
+}
+
+TEST(ProcessMemoryDumpTest, GlobalAllocatorDumpTest) {
+  std::unique_ptr<ProcessMemoryDump> pmd(
+      new ProcessMemoryDump(nullptr, kDetailedDumpArgs));
+  MemoryAllocatorDumpGuid shared_mad_guid(1);
+  auto* shared_mad1 = pmd->CreateWeakSharedGlobalAllocatorDump(shared_mad_guid);
+  ASSERT_EQ(shared_mad_guid, shared_mad1->guid());
+  ASSERT_EQ(MemoryAllocatorDump::Flags::WEAK, shared_mad1->flags());
+
+  auto* shared_mad2 = pmd->GetSharedGlobalAllocatorDump(shared_mad_guid);
+  ASSERT_EQ(shared_mad1, shared_mad2);
+  ASSERT_EQ(MemoryAllocatorDump::Flags::WEAK, shared_mad1->flags());
+
+  auto* shared_mad3 = pmd->CreateWeakSharedGlobalAllocatorDump(shared_mad_guid);
+  ASSERT_EQ(shared_mad1, shared_mad3);
+  ASSERT_EQ(MemoryAllocatorDump::Flags::WEAK, shared_mad1->flags());
+
+  auto* shared_mad4 = pmd->CreateSharedGlobalAllocatorDump(shared_mad_guid);
+  ASSERT_EQ(shared_mad1, shared_mad4);
+  ASSERT_EQ(MemoryAllocatorDump::Flags::DEFAULT, shared_mad1->flags());
+
+  auto* shared_mad5 = pmd->CreateWeakSharedGlobalAllocatorDump(shared_mad_guid);
+  ASSERT_EQ(shared_mad1, shared_mad5);
+  ASSERT_EQ(MemoryAllocatorDump::Flags::DEFAULT, shared_mad1->flags());
+}
+
+TEST(ProcessMemoryDumpTest, SharedMemoryOwnershipTest) {
+  std::unique_ptr<ProcessMemoryDump> pmd(
+      new ProcessMemoryDump(nullptr, kDetailedDumpArgs));
+  const ProcessMemoryDump::AllocatorDumpEdgesMap& edges =
+      pmd->allocator_dumps_edges();
+
+  auto* client_dump2 = pmd->CreateAllocatorDump("discardable/segment2");
+  auto shm_token2 = UnguessableToken::Create();
+  MemoryAllocatorDumpGuid shm_local_guid2 =
+      pmd->GetDumpId(SharedMemoryTracker::GetDumpNameForTracing(shm_token2));
+  MemoryAllocatorDumpGuid shm_global_guid2 =
+      SharedMemoryTracker::GetGlobalDumpIdForTracing(shm_token2);
+  pmd->AddOverridableOwnershipEdge(shm_local_guid2, shm_global_guid2,
+                                   0 /* importance */);
+
+  pmd->CreateSharedMemoryOwnershipEdge(client_dump2->guid(), shm_token2,
+                                       1 /* importance */);
+  EXPECT_EQ(2u, edges.size());
+
+  EXPECT_EQ(shm_global_guid2, edges.find(shm_local_guid2)->second.target);
+  EXPECT_EQ(1, edges.find(shm_local_guid2)->second.importance);
+  EXPECT_FALSE(edges.find(shm_local_guid2)->second.overridable);
+  EXPECT_EQ(shm_local_guid2, edges.find(client_dump2->guid())->second.target);
+  EXPECT_EQ(1, edges.find(client_dump2->guid())->second.importance);
+  EXPECT_FALSE(edges.find(client_dump2->guid())->second.overridable);
+}
+
+TEST(ProcessMemoryDumpTest, BackgroundModeTest) {
+  MemoryDumpArgs background_args = {MemoryDumpLevelOfDetail::BACKGROUND};
+  std::unique_ptr<ProcessMemoryDump> pmd(
+      new ProcessMemoryDump(nullptr, background_args));
+  ProcessMemoryDump::is_black_hole_non_fatal_for_testing_ = true;
+  SetAllocatorDumpNameWhitelistForTesting(kTestDumpNameWhitelist);
+  MemoryAllocatorDump* black_hole_mad = pmd->GetBlackHoleMad();
+
+  // Invalid dump names.
+  EXPECT_EQ(black_hole_mad,
+            pmd->CreateAllocatorDump("NotWhitelisted/TestName"));
+  EXPECT_EQ(black_hole_mad, pmd->CreateAllocatorDump("TestName"));
+  EXPECT_EQ(black_hole_mad, pmd->CreateAllocatorDump("Whitelisted/Test"));
+  EXPECT_EQ(black_hole_mad,
+            pmd->CreateAllocatorDump("Not/Whitelisted/TestName"));
+  EXPECT_EQ(black_hole_mad,
+            pmd->CreateAllocatorDump("Whitelisted/TestName/Google"));
+  EXPECT_EQ(black_hole_mad,
+            pmd->CreateAllocatorDump("Whitelisted/TestName/0x1a2Google"));
+  EXPECT_EQ(black_hole_mad,
+            pmd->CreateAllocatorDump("Whitelisted/TestName/__12/Google"));
+
+  // Suballocations.
+  MemoryAllocatorDumpGuid guid(1);
+  pmd->AddSuballocation(guid, "malloc/allocated_objects");
+  EXPECT_EQ(0u, pmd->allocator_dumps_edges_.size());
+  EXPECT_EQ(0u, pmd->allocator_dumps_.size());
+
+  // Global dumps.
+  EXPECT_NE(black_hole_mad, pmd->CreateSharedGlobalAllocatorDump(guid));
+  EXPECT_NE(black_hole_mad, pmd->CreateWeakSharedGlobalAllocatorDump(guid));
+  EXPECT_NE(black_hole_mad, pmd->GetSharedGlobalAllocatorDump(guid));
+
+  // Valid dump names.
+  EXPECT_NE(black_hole_mad, pmd->CreateAllocatorDump("Whitelisted/TestName"));
+  EXPECT_NE(black_hole_mad,
+            pmd->CreateAllocatorDump("Whitelisted/TestName_0xA1b2"));
+  EXPECT_NE(black_hole_mad,
+            pmd->CreateAllocatorDump("Whitelisted/0xaB/TestName"));
+
+  // GetAllocatorDump is consistent.
+  EXPECT_EQ(black_hole_mad, pmd->GetAllocatorDump("NotWhitelisted/TestName"));
+  EXPECT_NE(black_hole_mad, pmd->GetAllocatorDump("Whitelisted/TestName"));
+
+  // Test whitelisted entries.
+  ASSERT_TRUE(IsMemoryAllocatorDumpNameWhitelisted("Whitelisted/TestName"));
+
+  // Global dumps should be whitelisted.
+  ASSERT_TRUE(IsMemoryAllocatorDumpNameWhitelisted("global/13456"));
+
+  // Global dumps with non-guids should not be.
+  ASSERT_FALSE(IsMemoryAllocatorDumpNameWhitelisted("global/random"));
+
+  // Random names should not.
+  ASSERT_FALSE(IsMemoryAllocatorDumpNameWhitelisted("NotWhitelisted/TestName"));
+
+  // Check hex processing.
+  ASSERT_TRUE(IsMemoryAllocatorDumpNameWhitelisted("Whitelisted/0xA1b2"));
+}
+
+TEST(ProcessMemoryDumpTest, GuidsTest) {
+  MemoryDumpArgs dump_args = {MemoryDumpLevelOfDetail::DETAILED};
+
+  const auto process_token_one = UnguessableToken::Create();
+  const auto process_token_two = UnguessableToken::Create();
+
+  ProcessMemoryDump pmd1(nullptr, dump_args);
+  pmd1.set_process_token_for_testing(process_token_one);
+  MemoryAllocatorDump* mad1 = pmd1.CreateAllocatorDump("foo");
+
+  ProcessMemoryDump pmd2(nullptr, dump_args);
+  pmd2.set_process_token_for_testing(process_token_one);
+  MemoryAllocatorDump* mad2 = pmd2.CreateAllocatorDump("foo");
+
+  // If we don't pass the argument we get a random PMD:
+  ProcessMemoryDump pmd3(nullptr, dump_args);
+  MemoryAllocatorDump* mad3 = pmd3.CreateAllocatorDump("foo");
+
+  // PMD's for different processes produce different GUIDs even for the same
+  // names:
+  ProcessMemoryDump pmd4(nullptr, dump_args);
+  pmd4.set_process_token_for_testing(process_token_two);
+  MemoryAllocatorDump* mad4 = pmd4.CreateAllocatorDump("foo");
+
+  ASSERT_EQ(mad1->guid(), mad2->guid());
+
+  ASSERT_NE(mad2->guid(), mad3->guid());
+  ASSERT_NE(mad3->guid(), mad4->guid());
+  ASSERT_NE(mad4->guid(), mad2->guid());
+
+  ASSERT_EQ(mad1->guid(), pmd1.GetDumpId("foo"));
+}
+
+#if defined(COUNT_RESIDENT_BYTES_SUPPORTED)
+TEST(ProcessMemoryDumpTest, CountResidentBytes) {
+  const size_t page_size = ProcessMemoryDump::GetSystemPageSize();
+
+  // Allocate few page of dirty memory and check if it is resident.
+  const size_t size1 = 5 * page_size;
+  void* memory1 = Map(size1);
+  memset(memory1, 0, size1);
+  size_t res1 = ProcessMemoryDump::CountResidentBytes(memory1, size1);
+  ASSERT_EQ(res1, size1);
+  Unmap(memory1, size1);
+
+  // Allocate a large memory segment (> 8Mib).
+  const size_t kVeryLargeMemorySize = 15 * 1024 * 1024;
+  void* memory2 = Map(kVeryLargeMemorySize);
+  memset(memory2, 0, kVeryLargeMemorySize);
+  size_t res2 =
+      ProcessMemoryDump::CountResidentBytes(memory2, kVeryLargeMemorySize);
+  ASSERT_EQ(res2, kVeryLargeMemorySize);
+  Unmap(memory2, kVeryLargeMemorySize);
+}
+
+TEST(ProcessMemoryDumpTest, CountResidentBytesInSharedMemory) {
+#if defined(OS_IOS)
+  // TODO(crbug.com/748410): Reenable this test.
+  if (!base::ios::IsRunningOnIOS10OrLater()) {
+    return;
+  }
+#endif
+
+  const size_t page_size = ProcessMemoryDump::GetSystemPageSize();
+
+  // Allocate few page of dirty memory and check if it is resident.
+  const size_t size1 = 5 * page_size;
+  SharedMemory shared_memory1;
+  shared_memory1.CreateAndMapAnonymous(size1);
+  memset(shared_memory1.memory(), 0, size1);
+  base::Optional<size_t> res1 =
+      ProcessMemoryDump::CountResidentBytesInSharedMemory(
+          shared_memory1.memory(), shared_memory1.mapped_size());
+  ASSERT_TRUE(res1.has_value());
+  ASSERT_EQ(res1.value(), size1);
+  shared_memory1.Unmap();
+  shared_memory1.Close();
+
+  // Allocate a large memory segment (> 8Mib).
+  const size_t kVeryLargeMemorySize = 15 * 1024 * 1024;
+  SharedMemory shared_memory2;
+  shared_memory2.CreateAndMapAnonymous(kVeryLargeMemorySize);
+  memset(shared_memory2.memory(), 0, kVeryLargeMemorySize);
+  base::Optional<size_t> res2 =
+      ProcessMemoryDump::CountResidentBytesInSharedMemory(
+          shared_memory2.memory(), shared_memory2.mapped_size());
+  ASSERT_TRUE(res2.has_value());
+  ASSERT_EQ(res2.value(), kVeryLargeMemorySize);
+  shared_memory2.Unmap();
+  shared_memory2.Close();
+
+  // Allocate a large memory segment, but touch about half of all pages.
+  const size_t kTouchedMemorySize = 7 * 1024 * 1024;
+  SharedMemory shared_memory3;
+  shared_memory3.CreateAndMapAnonymous(kVeryLargeMemorySize);
+  memset(shared_memory3.memory(), 0, kTouchedMemorySize);
+  base::Optional<size_t> res3 =
+      ProcessMemoryDump::CountResidentBytesInSharedMemory(
+          shared_memory3.memory(), shared_memory3.mapped_size());
+  ASSERT_TRUE(res3.has_value());
+  ASSERT_EQ(res3.value(), kTouchedMemorySize);
+  shared_memory3.Unmap();
+  shared_memory3.Close();
+}
+#endif  // defined(COUNT_RESIDENT_BYTES_SUPPORTED)
+
+}  // namespace trace_event
+}  // namespace base
diff --git a/base/trace_event/trace_buffer.cc b/base/trace_event/trace_buffer.cc
new file mode 100644
index 0000000..8de470f
--- /dev/null
+++ b/base/trace_event/trace_buffer.cc
@@ -0,0 +1,347 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/trace_event/trace_buffer.h"
+
+#include <memory>
+#include <utility>
+#include <vector>
+
+#include "base/macros.h"
+#include "base/trace_event/heap_profiler.h"
+#include "base/trace_event/trace_event_impl.h"
+
+namespace base {
+namespace trace_event {
+
+namespace {
+
+class TraceBufferRingBuffer : public TraceBuffer {
+ public:
+  TraceBufferRingBuffer(size_t max_chunks)
+      : max_chunks_(max_chunks),
+        recyclable_chunks_queue_(new size_t[queue_capacity()]),
+        queue_head_(0),
+        queue_tail_(max_chunks),
+        current_iteration_index_(0),
+        current_chunk_seq_(1) {
+    chunks_.reserve(max_chunks);
+    for (size_t i = 0; i < max_chunks; ++i)
+      recyclable_chunks_queue_[i] = i;
+  }
+
+  std::unique_ptr<TraceBufferChunk> GetChunk(size_t* index) override {
+    HEAP_PROFILER_SCOPED_IGNORE;
+
+    // Because the number of threads is much less than the number of chunks,
+    // the queue should never be empty.
+    DCHECK(!QueueIsEmpty());
+
+    *index = recyclable_chunks_queue_[queue_head_];
+    queue_head_ = NextQueueIndex(queue_head_);
+    current_iteration_index_ = queue_head_;
+
+    if (*index >= chunks_.size())
+      chunks_.resize(*index + 1);
+
+    TraceBufferChunk* chunk = chunks_[*index].release();
+    chunks_[*index] = nullptr;  // Put nullptr in the slot of a in-flight chunk.
+    if (chunk)
+      chunk->Reset(current_chunk_seq_++);
+    else
+      chunk = new TraceBufferChunk(current_chunk_seq_++);
+
+    return std::unique_ptr<TraceBufferChunk>(chunk);
+  }
+
+  void ReturnChunk(size_t index,
+                   std::unique_ptr<TraceBufferChunk> chunk) override {
+    // When this method is called, the queue should not be full because it
+    // can contain all chunks including the one to be returned.
+    DCHECK(!QueueIsFull());
+    DCHECK(chunk);
+    DCHECK_LT(index, chunks_.size());
+    DCHECK(!chunks_[index]);
+    chunks_[index] = std::move(chunk);
+    recyclable_chunks_queue_[queue_tail_] = index;
+    queue_tail_ = NextQueueIndex(queue_tail_);
+  }
+
+  bool IsFull() const override { return false; }
+
+  size_t Size() const override {
+    // This is approximate because not all of the chunks are full.
+    return chunks_.size() * TraceBufferChunk::kTraceBufferChunkSize;
+  }
+
+  size_t Capacity() const override {
+    return max_chunks_ * TraceBufferChunk::kTraceBufferChunkSize;
+  }
+
+  TraceEvent* GetEventByHandle(TraceEventHandle handle) override {
+    if (handle.chunk_index >= chunks_.size())
+      return nullptr;
+    TraceBufferChunk* chunk = chunks_[handle.chunk_index].get();
+    if (!chunk || chunk->seq() != handle.chunk_seq)
+      return nullptr;
+    return chunk->GetEventAt(handle.event_index);
+  }
+
+  const TraceBufferChunk* NextChunk() override {
+    if (chunks_.empty())
+      return nullptr;
+
+    while (current_iteration_index_ != queue_tail_) {
+      size_t chunk_index = recyclable_chunks_queue_[current_iteration_index_];
+      current_iteration_index_ = NextQueueIndex(current_iteration_index_);
+      if (chunk_index >= chunks_.size())  // Skip uninitialized chunks.
+        continue;
+      DCHECK(chunks_[chunk_index]);
+      return chunks_[chunk_index].get();
+    }
+    return nullptr;
+  }
+
+  void EstimateTraceMemoryOverhead(
+      TraceEventMemoryOverhead* overhead) override {
+    overhead->Add(TraceEventMemoryOverhead::kTraceBuffer, sizeof(*this));
+    for (size_t queue_index = queue_head_; queue_index != queue_tail_;
+         queue_index = NextQueueIndex(queue_index)) {
+      size_t chunk_index = recyclable_chunks_queue_[queue_index];
+      if (chunk_index >= chunks_.size())  // Skip uninitialized chunks.
+        continue;
+      chunks_[chunk_index]->EstimateTraceMemoryOverhead(overhead);
+    }
+  }
+
+ private:
+  bool QueueIsEmpty() const { return queue_head_ == queue_tail_; }
+
+  size_t QueueSize() const {
+    return queue_tail_ > queue_head_
+               ? queue_tail_ - queue_head_
+               : queue_tail_ + queue_capacity() - queue_head_;
+  }
+
+  bool QueueIsFull() const { return QueueSize() == queue_capacity() - 1; }
+
+  size_t queue_capacity() const {
+    // One extra space to help distinguish full state and empty state.
+    return max_chunks_ + 1;
+  }
+
+  size_t NextQueueIndex(size_t index) const {
+    index++;
+    if (index >= queue_capacity())
+      index = 0;
+    return index;
+  }
+
+  size_t max_chunks_;
+  std::vector<std::unique_ptr<TraceBufferChunk>> chunks_;
+
+  std::unique_ptr<size_t[]> recyclable_chunks_queue_;
+  size_t queue_head_;
+  size_t queue_tail_;
+
+  size_t current_iteration_index_;
+  uint32_t current_chunk_seq_;
+
+  DISALLOW_COPY_AND_ASSIGN(TraceBufferRingBuffer);
+};
+
+class TraceBufferVector : public TraceBuffer {
+ public:
+  TraceBufferVector(size_t max_chunks)
+      : in_flight_chunk_count_(0),
+        current_iteration_index_(0),
+        max_chunks_(max_chunks) {
+    chunks_.reserve(max_chunks_);
+  }
+
+  std::unique_ptr<TraceBufferChunk> GetChunk(size_t* index) override {
+    HEAP_PROFILER_SCOPED_IGNORE;
+
+    // This function may be called when adding normal events or indirectly from
+    // AddMetadataEventsWhileLocked(). We can not DECHECK(!IsFull()) because we
+    // have to add the metadata events and flush thread-local buffers even if
+    // the buffer is full.
+    *index = chunks_.size();
+    // Put nullptr in the slot of a in-flight chunk.
+    chunks_.push_back(nullptr);
+    ++in_flight_chunk_count_;
+    // + 1 because zero chunk_seq is not allowed.
+    return std::unique_ptr<TraceBufferChunk>(
+        new TraceBufferChunk(static_cast<uint32_t>(*index) + 1));
+  }
+
+  void ReturnChunk(size_t index,
+                   std::unique_ptr<TraceBufferChunk> chunk) override {
+    DCHECK_GT(in_flight_chunk_count_, 0u);
+    DCHECK_LT(index, chunks_.size());
+    DCHECK(!chunks_[index]);
+    --in_flight_chunk_count_;
+    chunks_[index] = std::move(chunk);
+  }
+
+  bool IsFull() const override { return chunks_.size() >= max_chunks_; }
+
+  size_t Size() const override {
+    // This is approximate because not all of the chunks are full.
+    return chunks_.size() * TraceBufferChunk::kTraceBufferChunkSize;
+  }
+
+  size_t Capacity() const override {
+    return max_chunks_ * TraceBufferChunk::kTraceBufferChunkSize;
+  }
+
+  TraceEvent* GetEventByHandle(TraceEventHandle handle) override {
+    if (handle.chunk_index >= chunks_.size())
+      return nullptr;
+    TraceBufferChunk* chunk = chunks_[handle.chunk_index].get();
+    if (!chunk || chunk->seq() != handle.chunk_seq)
+      return nullptr;
+    return chunk->GetEventAt(handle.event_index);
+  }
+
+  const TraceBufferChunk* NextChunk() override {
+    while (current_iteration_index_ < chunks_.size()) {
+      // Skip in-flight chunks.
+      const TraceBufferChunk* chunk = chunks_[current_iteration_index_++].get();
+      if (chunk)
+        return chunk;
+    }
+    return nullptr;
+  }
+
+  void EstimateTraceMemoryOverhead(
+      TraceEventMemoryOverhead* overhead) override {
+    const size_t chunks_ptr_vector_allocated_size =
+        sizeof(*this) + max_chunks_ * sizeof(decltype(chunks_)::value_type);
+    const size_t chunks_ptr_vector_resident_size =
+        sizeof(*this) + chunks_.size() * sizeof(decltype(chunks_)::value_type);
+    overhead->Add(TraceEventMemoryOverhead::kTraceBuffer,
+                  chunks_ptr_vector_allocated_size,
+                  chunks_ptr_vector_resident_size);
+    for (size_t i = 0; i < chunks_.size(); ++i) {
+      TraceBufferChunk* chunk = chunks_[i].get();
+      // Skip the in-flight (nullptr) chunks. They will be accounted by the
+      // per-thread-local dumpers, see ThreadLocalEventBuffer::OnMemoryDump.
+      if (chunk)
+        chunk->EstimateTraceMemoryOverhead(overhead);
+    }
+  }
+
+ private:
+  size_t in_flight_chunk_count_;
+  size_t current_iteration_index_;
+  size_t max_chunks_;
+  std::vector<std::unique_ptr<TraceBufferChunk>> chunks_;
+
+  DISALLOW_COPY_AND_ASSIGN(TraceBufferVector);
+};
+
+}  // namespace
+
+TraceBufferChunk::TraceBufferChunk(uint32_t seq) : next_free_(0), seq_(seq) {}
+
+TraceBufferChunk::~TraceBufferChunk() = default;
+
+void TraceBufferChunk::Reset(uint32_t new_seq) {
+  for (size_t i = 0; i < next_free_; ++i)
+    chunk_[i].Reset();
+  next_free_ = 0;
+  seq_ = new_seq;
+  cached_overhead_estimate_.reset();
+}
+
+TraceEvent* TraceBufferChunk::AddTraceEvent(size_t* event_index) {
+  DCHECK(!IsFull());
+  *event_index = next_free_++;
+  return &chunk_[*event_index];
+}
+
+void TraceBufferChunk::EstimateTraceMemoryOverhead(
+    TraceEventMemoryOverhead* overhead) {
+  if (!cached_overhead_estimate_) {
+    cached_overhead_estimate_.reset(new TraceEventMemoryOverhead);
+
+    // When estimating the size of TraceBufferChunk, exclude the array of trace
+    // events, as they are computed individually below.
+    cached_overhead_estimate_->Add(TraceEventMemoryOverhead::kTraceBufferChunk,
+                                   sizeof(*this) - sizeof(chunk_));
+  }
+
+  const size_t num_cached_estimated_events =
+      cached_overhead_estimate_->GetCount(
+          TraceEventMemoryOverhead::kTraceEvent);
+  DCHECK_LE(num_cached_estimated_events, size());
+
+  if (IsFull() && num_cached_estimated_events == size()) {
+    overhead->Update(*cached_overhead_estimate_);
+    return;
+  }
+
+  for (size_t i = num_cached_estimated_events; i < size(); ++i)
+    chunk_[i].EstimateTraceMemoryOverhead(cached_overhead_estimate_.get());
+
+  if (IsFull()) {
+    cached_overhead_estimate_->AddSelf();
+  } else {
+    // The unused TraceEvents in |chunks_| are not cached. They will keep
+    // changing as new TraceEvents are added to this chunk, so they are
+    // computed on the fly.
+    const size_t num_unused_trace_events = capacity() - size();
+    overhead->Add(TraceEventMemoryOverhead::kUnusedTraceEvent,
+                  num_unused_trace_events * sizeof(TraceEvent));
+  }
+
+  overhead->Update(*cached_overhead_estimate_);
+}
+
+TraceResultBuffer::OutputCallback
+TraceResultBuffer::SimpleOutput::GetCallback() {
+  return Bind(&SimpleOutput::Append, Unretained(this));
+}
+
+void TraceResultBuffer::SimpleOutput::Append(
+    const std::string& json_trace_output) {
+  json_output += json_trace_output;
+}
+
+TraceResultBuffer::TraceResultBuffer() : append_comma_(false) {}
+
+TraceResultBuffer::~TraceResultBuffer() = default;
+
+void TraceResultBuffer::SetOutputCallback(
+    const OutputCallback& json_chunk_callback) {
+  output_callback_ = json_chunk_callback;
+}
+
+void TraceResultBuffer::Start() {
+  append_comma_ = false;
+  output_callback_.Run("[");
+}
+
+void TraceResultBuffer::AddFragment(const std::string& trace_fragment) {
+  if (append_comma_)
+    output_callback_.Run(",");
+  append_comma_ = true;
+  output_callback_.Run(trace_fragment);
+}
+
+void TraceResultBuffer::Finish() {
+  output_callback_.Run("]");
+}
+
+TraceBuffer* TraceBuffer::CreateTraceBufferRingBuffer(size_t max_chunks) {
+  return new TraceBufferRingBuffer(max_chunks);
+}
+
+TraceBuffer* TraceBuffer::CreateTraceBufferVectorOfSize(size_t max_chunks) {
+  return new TraceBufferVector(max_chunks);
+}
+
+}  // namespace trace_event
+}  // namespace base
diff --git a/base/trace_event/trace_buffer.h b/base/trace_event/trace_buffer.h
new file mode 100644
index 0000000..3d6465f
--- /dev/null
+++ b/base/trace_event/trace_buffer.h
@@ -0,0 +1,130 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TRACE_EVENT_TRACE_BUFFER_H_
+#define BASE_TRACE_EVENT_TRACE_BUFFER_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include "base/base_export.h"
+#include "base/trace_event/trace_event.h"
+#include "base/trace_event/trace_event_impl.h"
+
+namespace base {
+
+namespace trace_event {
+
+// TraceBufferChunk is the basic unit of TraceBuffer.
+class BASE_EXPORT TraceBufferChunk {
+ public:
+  explicit TraceBufferChunk(uint32_t seq);
+  ~TraceBufferChunk();
+
+  void Reset(uint32_t new_seq);
+  TraceEvent* AddTraceEvent(size_t* event_index);
+  bool IsFull() const { return next_free_ == kTraceBufferChunkSize; }
+
+  uint32_t seq() const { return seq_; }
+  size_t capacity() const { return kTraceBufferChunkSize; }
+  size_t size() const { return next_free_; }
+
+  TraceEvent* GetEventAt(size_t index) {
+    DCHECK(index < size());
+    return &chunk_[index];
+  }
+  const TraceEvent* GetEventAt(size_t index) const {
+    DCHECK(index < size());
+    return &chunk_[index];
+  }
+
+  void EstimateTraceMemoryOverhead(TraceEventMemoryOverhead* overhead);
+
+  // These values must be kept consistent with the numbers of bits of
+  // chunk_index and event_index fields in TraceEventHandle
+  // (in trace_event_impl.h).
+  static const size_t kMaxChunkIndex = (1u << 26) - 1;
+  static const size_t kTraceBufferChunkSize = 64;
+
+ private:
+  size_t next_free_;
+  std::unique_ptr<TraceEventMemoryOverhead> cached_overhead_estimate_;
+  TraceEvent chunk_[kTraceBufferChunkSize];
+  uint32_t seq_;
+};
+
+// TraceBuffer holds the events as they are collected.
+class BASE_EXPORT TraceBuffer {
+ public:
+  virtual ~TraceBuffer() = default;
+
+  virtual std::unique_ptr<TraceBufferChunk> GetChunk(size_t* index) = 0;
+  virtual void ReturnChunk(size_t index,
+                           std::unique_ptr<TraceBufferChunk> chunk) = 0;
+
+  virtual bool IsFull() const = 0;
+  virtual size_t Size() const = 0;
+  virtual size_t Capacity() const = 0;
+  virtual TraceEvent* GetEventByHandle(TraceEventHandle handle) = 0;
+
+  // For iteration. Each TraceBuffer can only be iterated once.
+  virtual const TraceBufferChunk* NextChunk() = 0;
+
+
+  // Computes an estimate of the size of the buffer, including all the retained
+  // objects.
+  virtual void EstimateTraceMemoryOverhead(
+      TraceEventMemoryOverhead* overhead) = 0;
+
+  static TraceBuffer* CreateTraceBufferRingBuffer(size_t max_chunks);
+  static TraceBuffer* CreateTraceBufferVectorOfSize(size_t max_chunks);
+};
+
+// TraceResultBuffer collects and converts trace fragments returned by TraceLog
+// to JSON output.
+class BASE_EXPORT TraceResultBuffer {
+ public:
+  typedef base::Callback<void(const std::string&)> OutputCallback;
+
+  // If you don't need to stream JSON chunks out efficiently, and just want to
+  // get a complete JSON string after calling Finish, use this struct to collect
+  // JSON trace output.
+  struct BASE_EXPORT SimpleOutput {
+    OutputCallback GetCallback();
+    void Append(const std::string& json_string);
+
+    // Do what you want with the json_output_ string after calling
+    // TraceResultBuffer::Finish.
+    std::string json_output;
+  };
+
+  TraceResultBuffer();
+  ~TraceResultBuffer();
+
+  // Set callback. The callback will be called during Start with the initial
+  // JSON output and during AddFragment and Finish with following JSON output
+  // chunks. The callback target must live past the last calls to
+  // TraceResultBuffer::Start/AddFragment/Finish.
+  void SetOutputCallback(const OutputCallback& json_chunk_callback);
+
+  // Start JSON output. This resets all internal state, so you can reuse
+  // the TraceResultBuffer by calling Start.
+  void Start();
+
+  // Call AddFragment 0 or more times to add trace fragments from TraceLog.
+  void AddFragment(const std::string& trace_fragment);
+
+  // When all fragments have been added, call Finish to complete the JSON
+  // formatted output.
+  void Finish();
+
+ private:
+  OutputCallback output_callback_;
+  bool append_comma_;
+};
+
+}  // namespace trace_event
+}  // namespace base
+
+#endif  // BASE_TRACE_EVENT_TRACE_BUFFER_H_
diff --git a/base/trace_event/trace_category.h b/base/trace_event/trace_category.h
new file mode 100644
index 0000000..792bc5e
--- /dev/null
+++ b/base/trace_event/trace_category.h
@@ -0,0 +1,109 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TRACE_EVENT_TRACE_CATEGORY_H_
+#define BASE_TRACE_EVENT_TRACE_CATEGORY_H_
+
+#include <stdint.h>
+
+namespace base {
+namespace trace_event {
+
+// Captures the state of an invidivual trace category. Nothing except tracing
+// internals (e.g., TraceLog) is supposed to have non-const Category pointers.
+struct TraceCategory {
+  // The TRACE_EVENT macros should only use this value as a bool.
+  // These enum values are effectively a public API and third_party projects
+  // depend on their value. Hence, never remove or recycle existing bits, unless
+  // you are sure that all the third-party projects that depend on this have
+  // been updated.
+  enum StateFlags : uint8_t {
+    ENABLED_FOR_RECORDING = 1 << 0,
+
+    // Not used anymore.
+    DEPRECATED_ENABLED_FOR_MONITORING = 1 << 1,
+    DEPRECATED_ENABLED_FOR_EVENT_CALLBACK = 1 << 2,
+
+    ENABLED_FOR_ETW_EXPORT = 1 << 3,
+    ENABLED_FOR_FILTERING = 1 << 4
+  };
+
+  static const TraceCategory* FromStatePtr(const uint8_t* state_ptr) {
+    static_assert(
+        offsetof(TraceCategory, state_) == 0,
+        "|state_| must be the first field of the TraceCategory class.");
+    return reinterpret_cast<const TraceCategory*>(state_ptr);
+  }
+
+  bool is_valid() const { return name_ != nullptr; }
+  void set_name(const char* name) { name_ = name; }
+  const char* name() const {
+    DCHECK(is_valid());
+    return name_;
+  }
+
+  // TODO(primiano): This is an intermediate solution to deal with the fact that
+  // today TRACE_EVENT* macros cache the state ptr. They should just cache the
+  // full TraceCategory ptr, which is immutable, and use these helper function
+  // here. This will get rid of the need of this awkward ptr getter completely.
+  const uint8_t* state_ptr() const {
+    return const_cast<const uint8_t*>(&state_);
+  }
+
+  uint8_t state() const {
+    return *const_cast<volatile const uint8_t*>(&state_);
+  }
+
+  bool is_enabled() const { return state() != 0; }
+
+  void set_state(uint8_t state) {
+    *const_cast<volatile uint8_t*>(&state_) = state;
+  }
+
+  void clear_state_flag(StateFlags flag) { set_state(state() & (~flag)); }
+  void set_state_flag(StateFlags flag) { set_state(state() | flag); }
+
+  uint32_t enabled_filters() const {
+    return *const_cast<volatile const uint32_t*>(&enabled_filters_);
+  }
+
+  bool is_filter_enabled(size_t index) const {
+    DCHECK(index < sizeof(enabled_filters_) * 8);
+    return (enabled_filters() & (1 << index)) != 0;
+  }
+
+  void set_enabled_filters(uint32_t enabled_filters) {
+    *const_cast<volatile uint32_t*>(&enabled_filters_) = enabled_filters;
+  }
+
+  void reset_for_testing() {
+    set_state(0);
+    set_enabled_filters(0);
+  }
+
+  // These fields should not be accessed directly, not even by tracing code.
+  // The only reason why these are not private is because it makes it impossible
+  // to have a global array of TraceCategory in category_registry.cc without
+  // creating initializers. See discussion on goo.gl/qhZN94 and
+  // crbug.com/{660967,660828}.
+
+  // The enabled state. TRACE_EVENT* macros will capture events if any of the
+  // flags here are set. Since TRACE_EVENTx macros are used in a lot of
+  // fast-paths, accesses to this field are non-barriered and racy by design.
+  // This field is mutated when starting/stopping tracing and we don't care
+  // about missing some events.
+  uint8_t state_;
+
+  // When ENABLED_FOR_FILTERING is set, this contains a bitmap to the
+  // corresponding filter (see event_filters.h).
+  uint32_t enabled_filters_;
+
+  // TraceCategory group names are long lived static strings.
+  const char* name_;
+};
+
+}  // namespace trace_event
+}  // namespace base
+
+#endif  // BASE_TRACE_EVENT_TRACE_CATEGORY_H_
diff --git a/base/trace_event/trace_category_unittest.cc b/base/trace_event/trace_category_unittest.cc
new file mode 100644
index 0000000..964064e
--- /dev/null
+++ b/base/trace_event/trace_category_unittest.cc
@@ -0,0 +1,148 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <string.h>
+
+#include <memory>
+
+#include "base/bind.h"
+#include "base/lazy_instance.h"
+#include "base/synchronization/lock.h"
+#include "base/synchronization/waitable_event.h"
+#include "base/threading/thread.h"
+#include "base/trace_event/category_registry.h"
+#include "base/trace_event/trace_category.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+namespace trace_event {
+
+// Static initializers are generally forbidden. However, in the past we ran in
+// the case of some test using tracing in a static initializer. This test checks
+// That the category registry doesn't rely on static initializers itself and is
+// functional even if called from another static initializer.
+bool Initializer() {
+  return CategoryRegistry::kCategoryMetadata &&
+         CategoryRegistry::kCategoryMetadata->is_valid();
+}
+bool g_initializer_check = Initializer();
+
+class TraceCategoryTest : public testing::Test {
+ public:
+  void SetUp() override { CategoryRegistry::Initialize(); }
+
+  void TearDown() override { CategoryRegistry::ResetForTesting(); }
+
+  static bool GetOrCreateCategoryByName(const char* name, TraceCategory** cat) {
+    static LazyInstance<Lock>::Leaky g_lock = LAZY_INSTANCE_INITIALIZER;
+    bool is_new_cat = false;
+    *cat = CategoryRegistry::GetCategoryByName(name);
+    if (!*cat) {
+      AutoLock lock(g_lock.Get());
+      is_new_cat = CategoryRegistry::GetOrCreateCategoryLocked(
+          name, [](TraceCategory*) {}, cat);
+    }
+    return is_new_cat;
+  };
+
+  static CategoryRegistry::Range GetAllCategories() {
+    return CategoryRegistry::GetAllCategories();
+  }
+
+  static void TestRaceThreadMain(WaitableEvent* event) {
+    TraceCategory* cat = nullptr;
+    event->Wait();
+    GetOrCreateCategoryByName("__test_race", &cat);
+    EXPECT_NE(nullptr, cat);
+  }
+};
+
+TEST_F(TraceCategoryTest, Basic) {
+  ASSERT_NE(nullptr, CategoryRegistry::kCategoryMetadata);
+  ASSERT_TRUE(CategoryRegistry::kCategoryMetadata->is_valid());
+  ASSERT_FALSE(CategoryRegistry::kCategoryMetadata->is_enabled());
+
+  // Metadata category is built-in and should create a new category.
+  TraceCategory* cat_meta = nullptr;
+  const char* kMetadataName = CategoryRegistry::kCategoryMetadata->name();
+  ASSERT_FALSE(GetOrCreateCategoryByName(kMetadataName, &cat_meta));
+  ASSERT_EQ(CategoryRegistry::kCategoryMetadata, cat_meta);
+
+  TraceCategory* cat_1 = nullptr;
+  ASSERT_TRUE(GetOrCreateCategoryByName("__test_basic_ab", &cat_1));
+  ASSERT_FALSE(cat_1->is_enabled());
+  ASSERT_EQ(0u, cat_1->enabled_filters());
+  cat_1->set_state_flag(TraceCategory::ENABLED_FOR_RECORDING);
+  cat_1->set_state_flag(TraceCategory::ENABLED_FOR_FILTERING);
+  ASSERT_EQ(TraceCategory::ENABLED_FOR_RECORDING |
+                TraceCategory::ENABLED_FOR_FILTERING,
+            cat_1->state());
+
+  cat_1->set_enabled_filters(129);
+  ASSERT_EQ(129u, cat_1->enabled_filters());
+  ASSERT_EQ(cat_1, CategoryRegistry::GetCategoryByStatePtr(cat_1->state_ptr()));
+
+  cat_1->clear_state_flag(TraceCategory::ENABLED_FOR_FILTERING);
+  ASSERT_EQ(TraceCategory::ENABLED_FOR_RECORDING, cat_1->state());
+  ASSERT_EQ(TraceCategory::ENABLED_FOR_RECORDING, *cat_1->state_ptr());
+  ASSERT_TRUE(cat_1->is_enabled());
+
+  TraceCategory* cat_2 = nullptr;
+  ASSERT_TRUE(GetOrCreateCategoryByName("__test_basic_a", &cat_2));
+  ASSERT_FALSE(cat_2->is_enabled());
+  cat_2->set_state_flag(TraceCategory::ENABLED_FOR_RECORDING);
+
+  TraceCategory* cat_2_copy = nullptr;
+  ASSERT_FALSE(GetOrCreateCategoryByName("__test_basic_a", &cat_2_copy));
+  ASSERT_EQ(cat_2, cat_2_copy);
+
+  TraceCategory* cat_3 = nullptr;
+  ASSERT_TRUE(
+      GetOrCreateCategoryByName("__test_basic_ab,__test_basic_a", &cat_3));
+  ASSERT_FALSE(cat_3->is_enabled());
+  ASSERT_EQ(0u, cat_3->enabled_filters());
+
+  int num_test_categories_seen = 0;
+  for (const TraceCategory& cat : GetAllCategories()) {
+    if (strcmp(cat.name(), kMetadataName) == 0)
+      ASSERT_TRUE(CategoryRegistry::IsBuiltinCategory(&cat));
+
+    if (strncmp(cat.name(), "__test_basic_", 13) == 0) {
+      ASSERT_FALSE(CategoryRegistry::IsBuiltinCategory(&cat));
+      num_test_categories_seen++;
+    }
+  }
+  ASSERT_EQ(3, num_test_categories_seen);
+  ASSERT_TRUE(g_initializer_check);
+}
+
+// Tries to cover the case of multiple threads creating the same category
+// simultaneously. Should never end up with distinct entries with the same name.
+TEST_F(TraceCategoryTest, ThreadRaces) {
+  const int kNumThreads = 32;
+  std::unique_ptr<Thread> threads[kNumThreads];
+  for (int i = 0; i < kNumThreads; i++) {
+    threads[i].reset(new Thread("test thread"));
+    threads[i]->Start();
+  }
+  WaitableEvent sync_event(WaitableEvent::ResetPolicy::MANUAL,
+                           WaitableEvent::InitialState::NOT_SIGNALED);
+  for (int i = 0; i < kNumThreads; i++) {
+    threads[i]->task_runner()->PostTask(
+        FROM_HERE, BindOnce(&TestRaceThreadMain, Unretained(&sync_event)));
+  }
+  sync_event.Signal();
+  for (int i = 0; i < kNumThreads; i++)
+    threads[i]->Stop();
+
+  int num_times_seen = 0;
+  for (const TraceCategory& cat : GetAllCategories()) {
+    if (strcmp(cat.name(), "__test_race") == 0)
+      num_times_seen++;
+  }
+  ASSERT_EQ(1, num_times_seen);
+}
+
+}  // namespace trace_event
+}  // namespace base
diff --git a/base/trace_event/trace_config.cc b/base/trace_event/trace_config.cc
new file mode 100644
index 0000000..624a29c
--- /dev/null
+++ b/base/trace_event/trace_config.cc
@@ -0,0 +1,557 @@
+// Copyright (c) 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/trace_event/trace_config.h"
+
+#include <stddef.h>
+
+#include <utility>
+
+#include "base/json/json_reader.h"
+#include "base/json/json_writer.h"
+#include "base/memory/ptr_util.h"
+#include "base/strings/string_split.h"
+#include "base/trace_event/memory_dump_manager.h"
+#include "base/trace_event/memory_dump_request_args.h"
+#include "base/trace_event/trace_event.h"
+
+namespace base {
+namespace trace_event {
+
+namespace {
+
+// String options that can be used to initialize TraceOptions.
+const char kRecordUntilFull[] = "record-until-full";
+const char kRecordContinuously[] = "record-continuously";
+const char kRecordAsMuchAsPossible[] = "record-as-much-as-possible";
+const char kTraceToConsole[] = "trace-to-console";
+const char kEnableSystrace[] = "enable-systrace";
+const char kEnableArgumentFilter[] = "enable-argument-filter";
+
+// String parameters that can be used to parse the trace config string.
+const char kRecordModeParam[] = "record_mode";
+const char kEnableSystraceParam[] = "enable_systrace";
+const char kEnableArgumentFilterParam[] = "enable_argument_filter";
+
+// String parameters that is used to parse memory dump config in trace config
+// string.
+const char kMemoryDumpConfigParam[] = "memory_dump_config";
+const char kAllowedDumpModesParam[] = "allowed_dump_modes";
+const char kTriggersParam[] = "triggers";
+const char kTriggerModeParam[] = "mode";
+const char kMinTimeBetweenDumps[] = "min_time_between_dumps_ms";
+const char kTriggerTypeParam[] = "type";
+const char kPeriodicIntervalLegacyParam[] = "periodic_interval_ms";
+const char kHeapProfilerOptions[] = "heap_profiler_options";
+const char kBreakdownThresholdBytes[] = "breakdown_threshold_bytes";
+
+// String parameters used to parse category event filters.
+const char kEventFiltersParam[] = "event_filters";
+const char kFilterPredicateParam[] = "filter_predicate";
+const char kFilterArgsParam[] = "filter_args";
+
+class ConvertableTraceConfigToTraceFormat
+    : public base::trace_event::ConvertableToTraceFormat {
+ public:
+  explicit ConvertableTraceConfigToTraceFormat(const TraceConfig& trace_config)
+      : trace_config_(trace_config) {}
+
+  ~ConvertableTraceConfigToTraceFormat() override = default;
+
+  void AppendAsTraceFormat(std::string* out) const override {
+    out->append(trace_config_.ToString());
+  }
+
+ private:
+  const TraceConfig trace_config_;
+};
+
+std::set<MemoryDumpLevelOfDetail> GetDefaultAllowedMemoryDumpModes() {
+  std::set<MemoryDumpLevelOfDetail> all_modes;
+  for (uint32_t mode = static_cast<uint32_t>(MemoryDumpLevelOfDetail::FIRST);
+       mode <= static_cast<uint32_t>(MemoryDumpLevelOfDetail::LAST); mode++) {
+    all_modes.insert(static_cast<MemoryDumpLevelOfDetail>(mode));
+  }
+  return all_modes;
+}
+
+}  // namespace
+
+TraceConfig::MemoryDumpConfig::HeapProfiler::HeapProfiler()
+    : breakdown_threshold_bytes(kDefaultBreakdownThresholdBytes) {}
+
+void TraceConfig::MemoryDumpConfig::HeapProfiler::Clear() {
+  breakdown_threshold_bytes = kDefaultBreakdownThresholdBytes;
+}
+
+void TraceConfig::ResetMemoryDumpConfig(
+    const TraceConfig::MemoryDumpConfig& memory_dump_config) {
+  memory_dump_config_.Clear();
+  memory_dump_config_ = memory_dump_config;
+}
+
+TraceConfig::MemoryDumpConfig::MemoryDumpConfig() = default;
+
+TraceConfig::MemoryDumpConfig::MemoryDumpConfig(
+    const MemoryDumpConfig& other) = default;
+
+TraceConfig::MemoryDumpConfig::~MemoryDumpConfig() = default;
+
+void TraceConfig::MemoryDumpConfig::Clear() {
+  allowed_dump_modes.clear();
+  triggers.clear();
+  heap_profiler_options.Clear();
+}
+
+void TraceConfig::MemoryDumpConfig::Merge(
+    const TraceConfig::MemoryDumpConfig& config) {
+  triggers.insert(triggers.end(), config.triggers.begin(),
+                  config.triggers.end());
+  allowed_dump_modes.insert(config.allowed_dump_modes.begin(),
+                            config.allowed_dump_modes.end());
+  heap_profiler_options.breakdown_threshold_bytes =
+      std::min(heap_profiler_options.breakdown_threshold_bytes,
+               config.heap_profiler_options.breakdown_threshold_bytes);
+}
+
+TraceConfig::EventFilterConfig::EventFilterConfig(
+    const std::string& predicate_name)
+    : predicate_name_(predicate_name) {}
+
+TraceConfig::EventFilterConfig::~EventFilterConfig() = default;
+
+TraceConfig::EventFilterConfig::EventFilterConfig(const EventFilterConfig& tc) {
+  *this = tc;
+}
+
+TraceConfig::EventFilterConfig& TraceConfig::EventFilterConfig::operator=(
+    const TraceConfig::EventFilterConfig& rhs) {
+  if (this == &rhs)
+    return *this;
+
+  predicate_name_ = rhs.predicate_name_;
+  category_filter_ = rhs.category_filter_;
+
+  if (rhs.args_)
+    args_ = rhs.args_->CreateDeepCopy();
+
+  return *this;
+}
+
+void TraceConfig::EventFilterConfig::InitializeFromConfigDict(
+    const base::DictionaryValue* event_filter) {
+  category_filter_.InitializeFromConfigDict(*event_filter);
+
+  const base::DictionaryValue* args_dict = nullptr;
+  if (event_filter->GetDictionary(kFilterArgsParam, &args_dict))
+    args_ = args_dict->CreateDeepCopy();
+}
+
+void TraceConfig::EventFilterConfig::SetCategoryFilter(
+    const TraceConfigCategoryFilter& category_filter) {
+  category_filter_ = category_filter;
+}
+
+void TraceConfig::EventFilterConfig::ToDict(
+    DictionaryValue* filter_dict) const {
+  filter_dict->SetString(kFilterPredicateParam, predicate_name());
+
+  category_filter_.ToDict(filter_dict);
+
+  if (args_)
+    filter_dict->Set(kFilterArgsParam, args_->CreateDeepCopy());
+}
+
+bool TraceConfig::EventFilterConfig::GetArgAsSet(
+    const char* key,
+    std::unordered_set<std::string>* out_set) const {
+  const ListValue* list = nullptr;
+  if (!args_->GetList(key, &list))
+    return false;
+  for (size_t i = 0; i < list->GetSize(); ++i) {
+    std::string value;
+    if (list->GetString(i, &value))
+      out_set->insert(value);
+  }
+  return true;
+}
+
+bool TraceConfig::EventFilterConfig::IsCategoryGroupEnabled(
+    const StringPiece& category_group_name) const {
+  return category_filter_.IsCategoryGroupEnabled(category_group_name);
+}
+
+// static
+std::string TraceConfig::TraceRecordModeToStr(TraceRecordMode record_mode) {
+  switch (record_mode) {
+    case RECORD_UNTIL_FULL:
+      return kRecordUntilFull;
+    case RECORD_CONTINUOUSLY:
+      return kRecordContinuously;
+    case RECORD_AS_MUCH_AS_POSSIBLE:
+      return kRecordAsMuchAsPossible;
+    case ECHO_TO_CONSOLE:
+      return kTraceToConsole;
+    default:
+      NOTREACHED();
+  }
+  return kRecordUntilFull;
+}
+
+TraceConfig::TraceConfig() {
+  InitializeDefault();
+}
+
+TraceConfig::TraceConfig(StringPiece category_filter_string,
+                         StringPiece trace_options_string) {
+  InitializeFromStrings(category_filter_string, trace_options_string);
+}
+
+TraceConfig::TraceConfig(StringPiece category_filter_string,
+                         TraceRecordMode record_mode) {
+  InitializeFromStrings(category_filter_string,
+                        TraceConfig::TraceRecordModeToStr(record_mode));
+}
+
+TraceConfig::TraceConfig(const DictionaryValue& config) {
+  InitializeFromConfigDict(config);
+}
+
+TraceConfig::TraceConfig(StringPiece config_string) {
+  if (!config_string.empty())
+    InitializeFromConfigString(config_string);
+  else
+    InitializeDefault();
+}
+
+TraceConfig::TraceConfig(const TraceConfig& tc) = default;
+
+TraceConfig::~TraceConfig() = default;
+
+TraceConfig& TraceConfig::operator=(const TraceConfig& rhs) {
+  if (this == &rhs)
+    return *this;
+
+  record_mode_ = rhs.record_mode_;
+  enable_systrace_ = rhs.enable_systrace_;
+  enable_argument_filter_ = rhs.enable_argument_filter_;
+  category_filter_ = rhs.category_filter_;
+  memory_dump_config_ = rhs.memory_dump_config_;
+  event_filters_ = rhs.event_filters_;
+  return *this;
+}
+
+std::string TraceConfig::ToString() const {
+  std::unique_ptr<DictionaryValue> dict = ToDict();
+  std::string json;
+  JSONWriter::Write(*dict, &json);
+  return json;
+}
+
+std::unique_ptr<ConvertableToTraceFormat>
+TraceConfig::AsConvertableToTraceFormat() const {
+  return std::make_unique<ConvertableTraceConfigToTraceFormat>(*this);
+}
+
+std::string TraceConfig::ToCategoryFilterString() const {
+  return category_filter_.ToFilterString();
+}
+
+bool TraceConfig::IsCategoryGroupEnabled(
+    const StringPiece& category_group_name) const {
+  // TraceLog should call this method only as part of enabling/disabling
+  // categories.
+  return category_filter_.IsCategoryGroupEnabled(category_group_name);
+}
+
+void TraceConfig::Merge(const TraceConfig& config) {
+  if (record_mode_ != config.record_mode_
+      || enable_systrace_ != config.enable_systrace_
+      || enable_argument_filter_ != config.enable_argument_filter_) {
+    DLOG(ERROR) << "Attempting to merge trace config with a different "
+                << "set of options.";
+  }
+
+  category_filter_.Merge(config.category_filter_);
+
+  memory_dump_config_.Merge(config.memory_dump_config_);
+
+  event_filters_.insert(event_filters_.end(), config.event_filters().begin(),
+                        config.event_filters().end());
+}
+
+void TraceConfig::Clear() {
+  record_mode_ = RECORD_UNTIL_FULL;
+  enable_systrace_ = false;
+  enable_argument_filter_ = false;
+  category_filter_.Clear();
+  memory_dump_config_.Clear();
+  event_filters_.clear();
+}
+
+void TraceConfig::InitializeDefault() {
+  record_mode_ = RECORD_UNTIL_FULL;
+  enable_systrace_ = false;
+  enable_argument_filter_ = false;
+}
+
+void TraceConfig::InitializeFromConfigDict(const DictionaryValue& dict) {
+  record_mode_ = RECORD_UNTIL_FULL;
+  std::string record_mode;
+  if (dict.GetString(kRecordModeParam, &record_mode)) {
+    if (record_mode == kRecordUntilFull) {
+      record_mode_ = RECORD_UNTIL_FULL;
+    } else if (record_mode == kRecordContinuously) {
+      record_mode_ = RECORD_CONTINUOUSLY;
+    } else if (record_mode == kTraceToConsole) {
+      record_mode_ = ECHO_TO_CONSOLE;
+    } else if (record_mode == kRecordAsMuchAsPossible) {
+      record_mode_ = RECORD_AS_MUCH_AS_POSSIBLE;
+    }
+  }
+
+  bool val;
+  enable_systrace_ = dict.GetBoolean(kEnableSystraceParam, &val) ? val : false;
+  enable_argument_filter_ =
+      dict.GetBoolean(kEnableArgumentFilterParam, &val) ? val : false;
+
+  category_filter_.InitializeFromConfigDict(dict);
+
+  const base::ListValue* category_event_filters = nullptr;
+  if (dict.GetList(kEventFiltersParam, &category_event_filters))
+    SetEventFiltersFromConfigList(*category_event_filters);
+
+  if (category_filter_.IsCategoryEnabled(MemoryDumpManager::kTraceCategory)) {
+    // If dump triggers not set, the client is using the legacy with just
+    // category enabled. So, use the default periodic dump config.
+    const DictionaryValue* memory_dump_config = nullptr;
+    if (dict.GetDictionary(kMemoryDumpConfigParam, &memory_dump_config))
+      SetMemoryDumpConfigFromConfigDict(*memory_dump_config);
+    else
+      SetDefaultMemoryDumpConfig();
+  }
+}
+
+void TraceConfig::InitializeFromConfigString(StringPiece config_string) {
+  auto dict = DictionaryValue::From(JSONReader::Read(config_string));
+  if (dict)
+    InitializeFromConfigDict(*dict);
+  else
+    InitializeDefault();
+}
+
+void TraceConfig::InitializeFromStrings(StringPiece category_filter_string,
+                                        StringPiece trace_options_string) {
+  if (!category_filter_string.empty())
+    category_filter_.InitializeFromString(category_filter_string);
+
+  record_mode_ = RECORD_UNTIL_FULL;
+  enable_systrace_ = false;
+  enable_argument_filter_ = false;
+  if (!trace_options_string.empty()) {
+    std::vector<std::string> split =
+        SplitString(trace_options_string, ",", TRIM_WHITESPACE, SPLIT_WANT_ALL);
+    for (const std::string& token : split) {
+      if (token == kRecordUntilFull) {
+        record_mode_ = RECORD_UNTIL_FULL;
+      } else if (token == kRecordContinuously) {
+        record_mode_ = RECORD_CONTINUOUSLY;
+      } else if (token == kTraceToConsole) {
+        record_mode_ = ECHO_TO_CONSOLE;
+      } else if (token == kRecordAsMuchAsPossible) {
+        record_mode_ = RECORD_AS_MUCH_AS_POSSIBLE;
+      } else if (token == kEnableSystrace) {
+        enable_systrace_ = true;
+      } else if (token == kEnableArgumentFilter) {
+        enable_argument_filter_ = true;
+      }
+    }
+  }
+
+  if (category_filter_.IsCategoryEnabled(MemoryDumpManager::kTraceCategory)) {
+    SetDefaultMemoryDumpConfig();
+  }
+}
+
+void TraceConfig::SetMemoryDumpConfigFromConfigDict(
+    const DictionaryValue& memory_dump_config) {
+  // Set allowed dump modes.
+  memory_dump_config_.allowed_dump_modes.clear();
+  const ListValue* allowed_modes_list;
+  if (memory_dump_config.GetList(kAllowedDumpModesParam, &allowed_modes_list)) {
+    for (size_t i = 0; i < allowed_modes_list->GetSize(); ++i) {
+      std::string level_of_detail_str;
+      allowed_modes_list->GetString(i, &level_of_detail_str);
+      memory_dump_config_.allowed_dump_modes.insert(
+          StringToMemoryDumpLevelOfDetail(level_of_detail_str));
+    }
+  } else {
+    // If allowed modes param is not given then allow all modes by default.
+    memory_dump_config_.allowed_dump_modes = GetDefaultAllowedMemoryDumpModes();
+  }
+
+  // Set triggers
+  memory_dump_config_.triggers.clear();
+  const ListValue* trigger_list = nullptr;
+  if (memory_dump_config.GetList(kTriggersParam, &trigger_list) &&
+      trigger_list->GetSize() > 0) {
+    for (size_t i = 0; i < trigger_list->GetSize(); ++i) {
+      const DictionaryValue* trigger = nullptr;
+      if (!trigger_list->GetDictionary(i, &trigger))
+        continue;
+
+      MemoryDumpConfig::Trigger dump_config;
+      int interval = 0;
+      if (!trigger->GetInteger(kMinTimeBetweenDumps, &interval)) {
+        // If "min_time_between_dumps_ms" param was not given, then the trace
+        // config uses old format where only periodic dumps are supported.
+        trigger->GetInteger(kPeriodicIntervalLegacyParam, &interval);
+        dump_config.trigger_type = MemoryDumpType::PERIODIC_INTERVAL;
+      } else {
+        std::string trigger_type_str;
+        trigger->GetString(kTriggerTypeParam, &trigger_type_str);
+        dump_config.trigger_type = StringToMemoryDumpType(trigger_type_str);
+      }
+      DCHECK_GT(interval, 0);
+      dump_config.min_time_between_dumps_ms = static_cast<uint32_t>(interval);
+
+      std::string level_of_detail_str;
+      trigger->GetString(kTriggerModeParam, &level_of_detail_str);
+      dump_config.level_of_detail =
+          StringToMemoryDumpLevelOfDetail(level_of_detail_str);
+
+      memory_dump_config_.triggers.push_back(dump_config);
+    }
+  }
+
+  // Set heap profiler options
+  const DictionaryValue* heap_profiler_options = nullptr;
+  if (memory_dump_config.GetDictionary(kHeapProfilerOptions,
+                                       &heap_profiler_options)) {
+    int min_size_bytes = 0;
+    if (heap_profiler_options->GetInteger(kBreakdownThresholdBytes,
+                                         &min_size_bytes)
+        && min_size_bytes >= 0) {
+      memory_dump_config_.heap_profiler_options.breakdown_threshold_bytes =
+          static_cast<size_t>(min_size_bytes);
+    } else {
+      memory_dump_config_.heap_profiler_options.breakdown_threshold_bytes =
+          MemoryDumpConfig::HeapProfiler::kDefaultBreakdownThresholdBytes;
+    }
+  }
+}
+
+void TraceConfig::SetDefaultMemoryDumpConfig() {
+  memory_dump_config_.Clear();
+  memory_dump_config_.allowed_dump_modes = GetDefaultAllowedMemoryDumpModes();
+}
+
+void TraceConfig::SetEventFiltersFromConfigList(
+    const base::ListValue& category_event_filters) {
+  event_filters_.clear();
+
+  for (size_t event_filter_index = 0;
+       event_filter_index < category_event_filters.GetSize();
+       ++event_filter_index) {
+    const base::DictionaryValue* event_filter = nullptr;
+    if (!category_event_filters.GetDictionary(event_filter_index,
+                                              &event_filter))
+      continue;
+
+    std::string predicate_name;
+    CHECK(event_filter->GetString(kFilterPredicateParam, &predicate_name))
+        << "Invalid predicate name in category event filter.";
+
+    EventFilterConfig new_config(predicate_name);
+    new_config.InitializeFromConfigDict(event_filter);
+    event_filters_.push_back(new_config);
+  }
+}
+
+std::unique_ptr<DictionaryValue> TraceConfig::ToDict() const {
+  auto dict = std::make_unique<DictionaryValue>();
+  dict->SetString(kRecordModeParam,
+                  TraceConfig::TraceRecordModeToStr(record_mode_));
+  dict->SetBoolean(kEnableSystraceParam, enable_systrace_);
+  dict->SetBoolean(kEnableArgumentFilterParam, enable_argument_filter_);
+
+  category_filter_.ToDict(dict.get());
+
+  if (!event_filters_.empty()) {
+    std::unique_ptr<base::ListValue> filter_list(new base::ListValue());
+    for (const EventFilterConfig& filter : event_filters_) {
+      std::unique_ptr<base::DictionaryValue> filter_dict(
+          new base::DictionaryValue());
+      filter.ToDict(filter_dict.get());
+      filter_list->Append(std::move(filter_dict));
+    }
+    dict->Set(kEventFiltersParam, std::move(filter_list));
+  }
+
+  if (category_filter_.IsCategoryEnabled(MemoryDumpManager::kTraceCategory)) {
+    auto allowed_modes = std::make_unique<ListValue>();
+    for (auto dump_mode : memory_dump_config_.allowed_dump_modes)
+      allowed_modes->AppendString(MemoryDumpLevelOfDetailToString(dump_mode));
+
+    auto memory_dump_config = std::make_unique<DictionaryValue>();
+    memory_dump_config->Set(kAllowedDumpModesParam, std::move(allowed_modes));
+
+    auto triggers_list = std::make_unique<ListValue>();
+    for (const auto& config : memory_dump_config_.triggers) {
+      auto trigger_dict = std::make_unique<DictionaryValue>();
+      trigger_dict->SetString(kTriggerTypeParam,
+                              MemoryDumpTypeToString(config.trigger_type));
+      trigger_dict->SetInteger(
+          kMinTimeBetweenDumps,
+          static_cast<int>(config.min_time_between_dumps_ms));
+      trigger_dict->SetString(
+          kTriggerModeParam,
+          MemoryDumpLevelOfDetailToString(config.level_of_detail));
+      triggers_list->Append(std::move(trigger_dict));
+    }
+
+    // Empty triggers will still be specified explicitly since it means that
+    // the periodic dumps are not enabled.
+    memory_dump_config->Set(kTriggersParam, std::move(triggers_list));
+
+    if (memory_dump_config_.heap_profiler_options.breakdown_threshold_bytes !=
+        MemoryDumpConfig::HeapProfiler::kDefaultBreakdownThresholdBytes) {
+      auto options = std::make_unique<DictionaryValue>();
+      options->SetInteger(
+          kBreakdownThresholdBytes,
+          memory_dump_config_.heap_profiler_options.breakdown_threshold_bytes);
+      memory_dump_config->Set(kHeapProfilerOptions, std::move(options));
+    }
+    dict->Set(kMemoryDumpConfigParam, std::move(memory_dump_config));
+  }
+  return dict;
+}
+
+std::string TraceConfig::ToTraceOptionsString() const {
+  std::string ret;
+  switch (record_mode_) {
+    case RECORD_UNTIL_FULL:
+      ret = kRecordUntilFull;
+      break;
+    case RECORD_CONTINUOUSLY:
+      ret = kRecordContinuously;
+      break;
+    case RECORD_AS_MUCH_AS_POSSIBLE:
+      ret = kRecordAsMuchAsPossible;
+      break;
+    case ECHO_TO_CONSOLE:
+      ret = kTraceToConsole;
+      break;
+    default:
+      NOTREACHED();
+  }
+  if (enable_systrace_)
+    ret = ret + "," + kEnableSystrace;
+  if (enable_argument_filter_)
+    ret = ret + "," + kEnableArgumentFilter;
+  return ret;
+}
+
+}  // namespace trace_event
+}  // namespace base
diff --git a/base/trace_event/trace_config.h b/base/trace_event/trace_config.h
new file mode 100644
index 0000000..decd54d
--- /dev/null
+++ b/base/trace_event/trace_config.h
@@ -0,0 +1,289 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TRACE_EVENT_TRACE_CONFIG_H_
+#define BASE_TRACE_EVENT_TRACE_CONFIG_H_
+
+#include <stdint.h>
+
+#include <memory>
+#include <set>
+#include <string>
+#include <unordered_set>
+#include <vector>
+
+#include "base/base_export.h"
+#include "base/gtest_prod_util.h"
+#include "base/strings/string_piece.h"
+#include "base/trace_event/memory_dump_request_args.h"
+#include "base/trace_event/trace_config_category_filter.h"
+#include "base/values.h"
+
+namespace base {
+namespace trace_event {
+
+class ConvertableToTraceFormat;
+
+// Options determines how the trace buffer stores data.
+// A Java counterpart will be generated for this enum.
+// GENERATED_JAVA_ENUM_PACKAGE: org.chromium.base
+enum TraceRecordMode {
+  // Record until the trace buffer is full.
+  RECORD_UNTIL_FULL,
+
+  // Record until the user ends the trace. The trace buffer is a fixed size
+  // and we use it as a ring buffer during recording.
+  RECORD_CONTINUOUSLY,
+
+  // Record until the trace buffer is full, but with a huge buffer size.
+  RECORD_AS_MUCH_AS_POSSIBLE,
+
+  // Echo to console. Events are discarded.
+  ECHO_TO_CONSOLE,
+};
+
+class BASE_EXPORT TraceConfig {
+ public:
+  using StringList = std::vector<std::string>;
+
+  // Specifies the memory dump config for tracing.
+  // Used only when "memory-infra" category is enabled.
+  struct BASE_EXPORT MemoryDumpConfig {
+    MemoryDumpConfig();
+    MemoryDumpConfig(const MemoryDumpConfig& other);
+    ~MemoryDumpConfig();
+
+    // Specifies the triggers in the memory dump config.
+    struct Trigger {
+      uint32_t min_time_between_dumps_ms;
+      MemoryDumpLevelOfDetail level_of_detail;
+      MemoryDumpType trigger_type;
+    };
+
+    // Specifies the configuration options for the heap profiler.
+    struct HeapProfiler {
+      // Default value for |breakdown_threshold_bytes|.
+      enum { kDefaultBreakdownThresholdBytes = 1024 };
+
+      HeapProfiler();
+
+      // Reset the options to default.
+      void Clear();
+
+      uint32_t breakdown_threshold_bytes;
+    };
+
+    // Reset the values in the config.
+    void Clear();
+
+    void Merge(const MemoryDumpConfig& config);
+
+    // Set of memory dump modes allowed for the tracing session. The explicitly
+    // triggered dumps will be successful only if the dump mode is allowed in
+    // the config.
+    std::set<MemoryDumpLevelOfDetail> allowed_dump_modes;
+
+    std::vector<Trigger> triggers;
+    HeapProfiler heap_profiler_options;
+  };
+
+  class BASE_EXPORT EventFilterConfig {
+   public:
+    EventFilterConfig(const std::string& predicate_name);
+    EventFilterConfig(const EventFilterConfig& tc);
+
+    ~EventFilterConfig();
+
+    EventFilterConfig& operator=(const EventFilterConfig& rhs);
+
+    void InitializeFromConfigDict(const base::DictionaryValue* event_filter);
+
+    void SetCategoryFilter(const TraceConfigCategoryFilter& category_filter);
+
+    void ToDict(DictionaryValue* filter_dict) const;
+
+    bool GetArgAsSet(const char* key, std::unordered_set<std::string>*) const;
+
+    bool IsCategoryGroupEnabled(const StringPiece& category_group_name) const;
+
+    const std::string& predicate_name() const { return predicate_name_; }
+    base::DictionaryValue* filter_args() const { return args_.get(); }
+    const TraceConfigCategoryFilter& category_filter() const {
+      return category_filter_;
+    }
+
+   private:
+    std::string predicate_name_;
+    TraceConfigCategoryFilter category_filter_;
+    std::unique_ptr<base::DictionaryValue> args_;
+  };
+  typedef std::vector<EventFilterConfig> EventFilters;
+
+  static std::string TraceRecordModeToStr(TraceRecordMode record_mode);
+
+  TraceConfig();
+
+  // Create TraceConfig object from category filter and trace options strings.
+  //
+  // |category_filter_string| is a comma-delimited list of category wildcards.
+  // A category can have an optional '-' prefix to make it an excluded category.
+  // All the same rules apply above, so for example, having both included and
+  // excluded categories in the same list would not be supported.
+  //
+  // |trace_options_string| is a comma-delimited list of trace options.
+  // Possible options are: "record-until-full", "record-continuously",
+  // "record-as-much-as-possible", "trace-to-console", "enable-systrace" and
+  // "enable-argument-filter".
+  // The first 4 options are trace recoding modes and hence
+  // mutually exclusive. If more than one trace recording modes appear in the
+  // options_string, the last one takes precedence. If none of the trace
+  // recording mode is specified, recording mode is RECORD_UNTIL_FULL.
+  //
+  // The trace option will first be reset to the default option
+  // (record_mode set to RECORD_UNTIL_FULL, enable_systrace and
+  // enable_argument_filter set to false) before options parsed from
+  // |trace_options_string| are applied on it. If |trace_options_string| is
+  // invalid, the final state of trace options is undefined.
+  //
+  // Example: TraceConfig("test_MyTest*", "record-until-full");
+  // Example: TraceConfig("test_MyTest*,test_OtherStuff",
+  //                      "record-continuously");
+  // Example: TraceConfig("-excluded_category1,-excluded_category2",
+  //                      "record-until-full, trace-to-console");
+  //          would set ECHO_TO_CONSOLE as the recording mode.
+  // Example: TraceConfig("-*,webkit", "");
+  //          would disable everything but webkit; and use default options.
+  // Example: TraceConfig("-webkit", "");
+  //          would enable everything but webkit; and use default options.
+  TraceConfig(StringPiece category_filter_string,
+              StringPiece trace_options_string);
+
+  TraceConfig(StringPiece category_filter_string, TraceRecordMode record_mode);
+
+  // Create TraceConfig object from the trace config string.
+  //
+  // |config_string| is a dictionary formatted as a JSON string, containing both
+  // category filters and trace options.
+  //
+  // Example:
+  //   {
+  //     "record_mode": "record-continuously",
+  //     "enable_systrace": true,
+  //     "enable_argument_filter": true,
+  //     "included_categories": ["included",
+  //                             "inc_pattern*",
+  //                             "disabled-by-default-memory-infra"],
+  //     "excluded_categories": ["excluded", "exc_pattern*"],
+  //     "memory_dump_config": {
+  //       "triggers": [
+  //         {
+  //           "mode": "detailed",
+  //           "periodic_interval_ms": 2000
+  //         }
+  //       ]
+  //     }
+  //   }
+  //
+  // Note: memory_dump_config can be specified only if
+  // disabled-by-default-memory-infra category is enabled.
+  explicit TraceConfig(StringPiece config_string);
+
+  // Functionally identical to the above, but takes a parsed dictionary as input
+  // instead of its JSON serialization.
+  explicit TraceConfig(const DictionaryValue& config);
+
+  TraceConfig(const TraceConfig& tc);
+
+  ~TraceConfig();
+
+  TraceConfig& operator=(const TraceConfig& rhs);
+
+  TraceRecordMode GetTraceRecordMode() const { return record_mode_; }
+  bool IsSystraceEnabled() const { return enable_systrace_; }
+  bool IsArgumentFilterEnabled() const { return enable_argument_filter_; }
+
+  void SetTraceRecordMode(TraceRecordMode mode) { record_mode_ = mode; }
+  void EnableSystrace() { enable_systrace_ = true; }
+  void EnableArgumentFilter() { enable_argument_filter_ = true; }
+
+  // Writes the string representation of the TraceConfig. The string is JSON
+  // formatted.
+  std::string ToString() const;
+
+  // Returns a copy of the TraceConfig wrapped in a ConvertableToTraceFormat
+  std::unique_ptr<ConvertableToTraceFormat> AsConvertableToTraceFormat() const;
+
+  // Write the string representation of the CategoryFilter part.
+  std::string ToCategoryFilterString() const;
+
+  // Returns true if at least one category in the list is enabled by this
+  // trace config. This is used to determine if the category filters are
+  // enabled in the TRACE_* macros.
+  bool IsCategoryGroupEnabled(const StringPiece& category_group_name) const;
+
+  // Merges config with the current TraceConfig
+  void Merge(const TraceConfig& config);
+
+  void Clear();
+
+  // Clears and resets the memory dump config.
+  void ResetMemoryDumpConfig(const MemoryDumpConfig& memory_dump_config);
+
+  const TraceConfigCategoryFilter& category_filter() const {
+    return category_filter_;
+  }
+
+  const MemoryDumpConfig& memory_dump_config() const {
+    return memory_dump_config_;
+  }
+
+  const EventFilters& event_filters() const { return event_filters_; }
+  void SetEventFilters(const EventFilters& filter_configs) {
+    event_filters_ = filter_configs;
+  }
+
+ private:
+  FRIEND_TEST_ALL_PREFIXES(TraceConfigTest, TraceConfigFromValidLegacyFormat);
+  FRIEND_TEST_ALL_PREFIXES(TraceConfigTest,
+                           TraceConfigFromInvalidLegacyStrings);
+
+  // The default trace config, used when none is provided.
+  // Allows all non-disabled-by-default categories through, except if they end
+  // in the suffix 'Debug' or 'Test'.
+  void InitializeDefault();
+
+  // Initialize from a config dictionary.
+  void InitializeFromConfigDict(const DictionaryValue& dict);
+
+  // Initialize from a config string.
+  void InitializeFromConfigString(StringPiece config_string);
+
+  // Initialize from category filter and trace options strings
+  void InitializeFromStrings(StringPiece category_filter_string,
+                             StringPiece trace_options_string);
+
+  void SetMemoryDumpConfigFromConfigDict(
+      const DictionaryValue& memory_dump_config);
+  void SetDefaultMemoryDumpConfig();
+
+  void SetEventFiltersFromConfigList(const base::ListValue& event_filters);
+  std::unique_ptr<DictionaryValue> ToDict() const;
+
+  std::string ToTraceOptionsString() const;
+
+  TraceRecordMode record_mode_;
+  bool enable_systrace_ : 1;
+  bool enable_argument_filter_ : 1;
+
+  TraceConfigCategoryFilter category_filter_;
+
+  MemoryDumpConfig memory_dump_config_;
+
+  EventFilters event_filters_;
+};
+
+}  // namespace trace_event
+}  // namespace base
+
+#endif  // BASE_TRACE_EVENT_TRACE_CONFIG_H_
diff --git a/base/trace_event/trace_config_category_filter.cc b/base/trace_event/trace_config_category_filter.cc
new file mode 100644
index 0000000..d188430
--- /dev/null
+++ b/base/trace_event/trace_config_category_filter.cc
@@ -0,0 +1,235 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/trace_event/trace_config_category_filter.h"
+
+#include "base/memory/ptr_util.h"
+#include "base/strings/pattern.h"
+#include "base/strings/string_split.h"
+#include "base/strings/string_tokenizer.h"
+#include "base/strings/string_util.h"
+#include "base/strings/stringprintf.h"
+#include "base/trace_event/trace_event.h"
+
+namespace base {
+namespace trace_event {
+
+namespace {
+const char kIncludedCategoriesParam[] = "included_categories";
+const char kExcludedCategoriesParam[] = "excluded_categories";
+}
+
+TraceConfigCategoryFilter::TraceConfigCategoryFilter() = default;
+
+TraceConfigCategoryFilter::TraceConfigCategoryFilter(
+    const TraceConfigCategoryFilter& other) = default;
+
+TraceConfigCategoryFilter::~TraceConfigCategoryFilter() = default;
+
+TraceConfigCategoryFilter& TraceConfigCategoryFilter::operator=(
+    const TraceConfigCategoryFilter& rhs) = default;
+
+void TraceConfigCategoryFilter::InitializeFromString(
+    const StringPiece& category_filter_string) {
+  std::vector<StringPiece> split = SplitStringPiece(
+      category_filter_string, ",", TRIM_WHITESPACE, SPLIT_WANT_ALL);
+  for (const StringPiece& category : split) {
+    // Ignore empty categories.
+    if (category.empty())
+      continue;
+    if (category.front() == '-') {
+      // Excluded categories start with '-'.
+      // Remove '-' from category string.
+      excluded_categories_.push_back(category.substr(1).as_string());
+    } else if (category.starts_with(TRACE_DISABLED_BY_DEFAULT(""))) {
+      disabled_categories_.push_back(category.as_string());
+    } else {
+      included_categories_.push_back(category.as_string());
+    }
+  }
+}
+
+void TraceConfigCategoryFilter::InitializeFromConfigDict(
+    const DictionaryValue& dict) {
+  const ListValue* category_list = nullptr;
+  if (dict.GetList(kIncludedCategoriesParam, &category_list))
+    SetCategoriesFromIncludedList(*category_list);
+  if (dict.GetList(kExcludedCategoriesParam, &category_list))
+    SetCategoriesFromExcludedList(*category_list);
+}
+
+bool TraceConfigCategoryFilter::IsCategoryGroupEnabled(
+    const StringPiece& category_group_name) const {
+  bool had_enabled_by_default = false;
+  DCHECK(!category_group_name.empty());
+  CStringTokenizer category_group_tokens(category_group_name.begin(),
+                                         category_group_name.end(), ",");
+  while (category_group_tokens.GetNext()) {
+    StringPiece category_group_token = category_group_tokens.token_piece();
+    // Don't allow empty tokens, nor tokens with leading or trailing space.
+    DCHECK(IsCategoryNameAllowed(category_group_token))
+        << "Disallowed category string";
+    if (IsCategoryEnabled(category_group_token))
+      return true;
+
+    if (!MatchPattern(category_group_token, TRACE_DISABLED_BY_DEFAULT("*")))
+      had_enabled_by_default = true;
+  }
+  // Do a second pass to check for explicitly disabled categories
+  // (those explicitly enabled have priority due to first pass).
+  category_group_tokens.Reset();
+  bool category_group_disabled = false;
+  while (category_group_tokens.GetNext()) {
+    StringPiece category_group_token = category_group_tokens.token_piece();
+    for (const std::string& category : excluded_categories_) {
+      if (MatchPattern(category_group_token, category)) {
+        // Current token of category_group_name is present in excluded_list.
+        // Flag the exclusion and proceed further to check if any of the
+        // remaining categories of category_group_name is not present in the
+        // excluded_ list.
+        category_group_disabled = true;
+        break;
+      }
+      // One of the category of category_group_name is not present in
+      // excluded_ list. So, if it's not a disabled-by-default category,
+      // it has to be included_ list. Enable the category_group_name
+      // for recording.
+      if (!MatchPattern(category_group_token, TRACE_DISABLED_BY_DEFAULT("*")))
+        category_group_disabled = false;
+    }
+    // One of the categories present in category_group_name is not present in
+    // excluded_ list. Implies this category_group_name group can be enabled
+    // for recording, since one of its groups is enabled for recording.
+    if (!category_group_disabled)
+      break;
+  }
+  // If the category group is not excluded, and there are no included patterns
+  // we consider this category group enabled, as long as it had categories
+  // other than disabled-by-default.
+  return !category_group_disabled && had_enabled_by_default &&
+         included_categories_.empty();
+}
+
+bool TraceConfigCategoryFilter::IsCategoryEnabled(
+    const StringPiece& category_name) const {
+  // Check the disabled- filters and the disabled-* wildcard first so that a
+  // "*" filter does not include the disabled.
+  for (const std::string& category : disabled_categories_) {
+    if (MatchPattern(category_name, category))
+      return true;
+  }
+
+  if (MatchPattern(category_name, TRACE_DISABLED_BY_DEFAULT("*")))
+    return false;
+
+  for (const std::string& category : included_categories_) {
+    if (MatchPattern(category_name, category))
+      return true;
+  }
+
+  return false;
+}
+
+void TraceConfigCategoryFilter::Merge(const TraceConfigCategoryFilter& config) {
+  // Keep included patterns only if both filters have an included entry.
+  // Otherwise, one of the filter was specifying "*" and we want to honor the
+  // broadest filter.
+  if (!included_categories_.empty() && !config.included_categories_.empty()) {
+    included_categories_.insert(included_categories_.end(),
+                                config.included_categories_.begin(),
+                                config.included_categories_.end());
+  } else {
+    included_categories_.clear();
+  }
+
+  disabled_categories_.insert(disabled_categories_.end(),
+                              config.disabled_categories_.begin(),
+                              config.disabled_categories_.end());
+  excluded_categories_.insert(excluded_categories_.end(),
+                              config.excluded_categories_.begin(),
+                              config.excluded_categories_.end());
+}
+
+void TraceConfigCategoryFilter::Clear() {
+  included_categories_.clear();
+  disabled_categories_.clear();
+  excluded_categories_.clear();
+}
+
+void TraceConfigCategoryFilter::ToDict(DictionaryValue* dict) const {
+  StringList categories(included_categories_);
+  categories.insert(categories.end(), disabled_categories_.begin(),
+                    disabled_categories_.end());
+  AddCategoriesToDict(categories, kIncludedCategoriesParam, dict);
+  AddCategoriesToDict(excluded_categories_, kExcludedCategoriesParam, dict);
+}
+
+std::string TraceConfigCategoryFilter::ToFilterString() const {
+  std::string filter_string;
+  WriteCategoryFilterString(included_categories_, &filter_string, true);
+  WriteCategoryFilterString(disabled_categories_, &filter_string, true);
+  WriteCategoryFilterString(excluded_categories_, &filter_string, false);
+  return filter_string;
+}
+
+void TraceConfigCategoryFilter::SetCategoriesFromIncludedList(
+    const ListValue& included_list) {
+  included_categories_.clear();
+  for (size_t i = 0; i < included_list.GetSize(); ++i) {
+    std::string category;
+    if (!included_list.GetString(i, &category))
+      continue;
+    if (category.compare(0, strlen(TRACE_DISABLED_BY_DEFAULT("")),
+                         TRACE_DISABLED_BY_DEFAULT("")) == 0) {
+      disabled_categories_.push_back(category);
+    } else {
+      included_categories_.push_back(category);
+    }
+  }
+}
+
+void TraceConfigCategoryFilter::SetCategoriesFromExcludedList(
+    const ListValue& excluded_list) {
+  excluded_categories_.clear();
+  for (size_t i = 0; i < excluded_list.GetSize(); ++i) {
+    std::string category;
+    if (excluded_list.GetString(i, &category))
+      excluded_categories_.push_back(category);
+  }
+}
+
+void TraceConfigCategoryFilter::AddCategoriesToDict(
+    const StringList& categories,
+    const char* param,
+    DictionaryValue* dict) const {
+  if (categories.empty())
+    return;
+
+  auto list = std::make_unique<ListValue>();
+  for (const std::string& category : categories)
+    list->AppendString(category);
+  dict->Set(param, std::move(list));
+}
+
+void TraceConfigCategoryFilter::WriteCategoryFilterString(
+    const StringList& values,
+    std::string* out,
+    bool included) const {
+  bool prepend_comma = !out->empty();
+  int token_cnt = 0;
+  for (const std::string& category : values) {
+    if (token_cnt > 0 || prepend_comma)
+      StringAppendF(out, ",");
+    StringAppendF(out, "%s%s", (included ? "" : "-"), category.c_str());
+    ++token_cnt;
+  }
+}
+
+// static
+bool TraceConfigCategoryFilter::IsCategoryNameAllowed(StringPiece str) {
+  return !str.empty() && str.front() != ' ' && str.back() != ' ';
+}
+
+}  // namespace trace_event
+}  // namespace base
diff --git a/base/trace_event/trace_config_category_filter.h b/base/trace_event/trace_config_category_filter.h
new file mode 100644
index 0000000..0140c1d
--- /dev/null
+++ b/base/trace_event/trace_config_category_filter.h
@@ -0,0 +1,81 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TRACE_EVENT_TRACE_CONFIG_CATEGORY_FILTER_H_
+#define BASE_TRACE_EVENT_TRACE_CONFIG_CATEGORY_FILTER_H_
+
+#include <string>
+#include <vector>
+
+#include "base/base_export.h"
+#include "base/strings/string_piece.h"
+#include "base/values.h"
+
+namespace base {
+namespace trace_event {
+
+// Configuration of categories enabled and disabled in TraceConfig.
+class BASE_EXPORT TraceConfigCategoryFilter {
+ public:
+  using StringList = std::vector<std::string>;
+
+  TraceConfigCategoryFilter();
+  TraceConfigCategoryFilter(const TraceConfigCategoryFilter& other);
+  ~TraceConfigCategoryFilter();
+
+  TraceConfigCategoryFilter& operator=(const TraceConfigCategoryFilter& rhs);
+
+  // Initializes from category filter string. See TraceConfig constructor for
+  // description of how to write category filter string.
+  void InitializeFromString(const StringPiece& category_filter_string);
+
+  // Initializes TraceConfigCategoryFilter object from the config dictionary.
+  void InitializeFromConfigDict(const DictionaryValue& dict);
+
+  // Merges this with category filter config.
+  void Merge(const TraceConfigCategoryFilter& config);
+  void Clear();
+
+  // Returns true if at least one category in the list is enabled by this
+  // trace config. This is used to determine if the category filters are
+  // enabled in the TRACE_* macros.
+  bool IsCategoryGroupEnabled(const StringPiece& category_group_name) const;
+
+  // Returns true if the category is enabled according to this trace config.
+  // This tells whether a category is enabled from the TraceConfig's
+  // perspective. Please refer to IsCategoryGroupEnabled() to determine if a
+  // category is enabled from the tracing runtime's perspective.
+  bool IsCategoryEnabled(const StringPiece& category_name) const;
+
+  void ToDict(DictionaryValue* dict) const;
+
+  std::string ToFilterString() const;
+
+  // Returns true if category name is a valid string.
+  static bool IsCategoryNameAllowed(StringPiece str);
+
+  const StringList& included_categories() const { return included_categories_; }
+  const StringList& excluded_categories() const { return excluded_categories_; }
+
+ private:
+  void SetCategoriesFromIncludedList(const ListValue& included_list);
+  void SetCategoriesFromExcludedList(const ListValue& excluded_list);
+
+  void AddCategoriesToDict(const StringList& categories,
+                           const char* param,
+                           DictionaryValue* dict) const;
+
+  void WriteCategoryFilterString(const StringList& values,
+                                 std::string* out,
+                                 bool included) const;
+
+  StringList included_categories_;
+  StringList disabled_categories_;
+  StringList excluded_categories_;
+};
+
+}  // namespace trace_event
+}  // namespace base
+
+#endif  // BASE_TRACE_EVENT_TRACE_CONFIG_CATEGORY_FILTER_H_
diff --git a/base/trace_event/trace_config_memory_test_util.h b/base/trace_event/trace_config_memory_test_util.h
new file mode 100644
index 0000000..57608fd
--- /dev/null
+++ b/base/trace_event/trace_config_memory_test_util.h
@@ -0,0 +1,178 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TRACE_EVENT_TRACE_CONFIG_MEMORY_TEST_UTIL_H_
+#define BASE_TRACE_EVENT_TRACE_CONFIG_MEMORY_TEST_UTIL_H_
+
+#include "base/strings/stringprintf.h"
+#include "base/trace_event/memory_dump_manager.h"
+
+namespace base {
+namespace trace_event {
+
+class TraceConfigMemoryTestUtil {
+ public:
+  static std::string GetTraceConfig_LegacyPeriodicTriggers(int light_period,
+                                                           int heavy_period) {
+    return StringPrintf(
+        "{"
+        "\"enable_argument_filter\":false,"
+        "\"enable_systrace\":false,"
+        "\"excluded_categories\":["
+        "\"*\""
+        "],"
+        "\"included_categories\":["
+        "\"%s\""
+        "],"
+        "\"memory_dump_config\":{"
+        "\"allowed_dump_modes\":[\"background\",\"light\",\"detailed\"],"
+        "\"heap_profiler_options\":{"
+        "\"breakdown_threshold_bytes\":2048"
+        "},"
+        "\"triggers\":["
+        "{"
+        "\"mode\":\"light\","
+        "\"periodic_interval_ms\":%d"
+        "},"
+        "{"
+        "\"mode\":\"detailed\","
+        "\"periodic_interval_ms\":%d"
+        "}"
+        "]"
+        "},"
+        "\"record_mode\":\"record-until-full\""
+        "}",
+        MemoryDumpManager::kTraceCategory, light_period, heavy_period);
+    ;
+  }
+
+  static std::string GetTraceConfig_PeriodicTriggers(int light_period,
+                                                     int heavy_period) {
+    return StringPrintf(
+        "{"
+        "\"enable_argument_filter\":false,"
+        "\"enable_systrace\":false,"
+        "\"excluded_categories\":["
+        "\"*\""
+        "],"
+        "\"included_categories\":["
+        "\"%s\""
+        "],"
+        "\"memory_dump_config\":{"
+        "\"allowed_dump_modes\":[\"background\",\"light\",\"detailed\"],"
+        "\"heap_profiler_options\":{"
+        "\"breakdown_threshold_bytes\":2048"
+        "},"
+        "\"triggers\":["
+        "{"
+        "\"min_time_between_dumps_ms\":%d,"
+        "\"mode\":\"light\","
+        "\"type\":\"periodic_interval\""
+        "},"
+        "{"
+        "\"min_time_between_dumps_ms\":%d,"
+        "\"mode\":\"detailed\","
+        "\"type\":\"periodic_interval\""
+        "}"
+        "]"
+        "},"
+        "\"record_mode\":\"record-until-full\""
+        "}",
+        MemoryDumpManager::kTraceCategory, light_period, heavy_period);
+  }
+
+  static std::string GetTraceConfig_EmptyTriggers() {
+    return StringPrintf(
+        "{"
+        "\"enable_argument_filter\":false,"
+        "\"enable_systrace\":false,"
+        "\"excluded_categories\":["
+        "\"*\""
+        "],"
+        "\"included_categories\":["
+        "\"%s\""
+        "],"
+        "\"memory_dump_config\":{"
+        "\"allowed_dump_modes\":[\"background\",\"light\",\"detailed\"],"
+        "\"triggers\":["
+        "]"
+        "},"
+        "\"record_mode\":\"record-until-full\""
+        "}",
+        MemoryDumpManager::kTraceCategory);
+  }
+
+  static std::string GetTraceConfig_NoTriggers() {
+    return StringPrintf(
+        "{"
+        "\"enable_argument_filter\":false,"
+        "\"enable_systrace\":false,"
+        "\"excluded_categories\":["
+        "\"*\""
+        "],"
+        "\"included_categories\":["
+        "\"%s\""
+        "],"
+        "\"record_mode\":\"record-until-full\""
+        "}",
+        MemoryDumpManager::kTraceCategory);
+  }
+
+  static std::string GetTraceConfig_BackgroundTrigger(int period_ms) {
+    return StringPrintf(
+        "{"
+        "\"enable_argument_filter\":false,"
+        "\"enable_systrace\":false,"
+        "\"excluded_categories\":["
+        "\"*\""
+        "],"
+        "\"included_categories\":["
+        "\"%s\""
+        "],"
+        "\"memory_dump_config\":{"
+        "\"allowed_dump_modes\":[\"background\"],"
+        "\"triggers\":["
+        "{"
+        "\"min_time_between_dumps_ms\":%d,"
+        "\"mode\":\"background\","
+        "\"type\":\"periodic_interval\""
+        "}"
+        "]"
+        "},"
+        "\"record_mode\":\"record-until-full\""
+        "}",
+        MemoryDumpManager::kTraceCategory, period_ms);
+  }
+
+  static std::string GetTraceConfig_PeakDetectionTrigger(int heavy_period) {
+    return StringPrintf(
+        "{"
+        "\"enable_argument_filter\":false,"
+        "\"enable_systrace\":false,"
+        "\"excluded_categories\":["
+        "\"*\""
+        "],"
+        "\"included_categories\":["
+        "\"%s\""
+        "],"
+        "\"memory_dump_config\":{"
+        "\"allowed_dump_modes\":[\"background\",\"light\",\"detailed\"],"
+        "\"triggers\":["
+        "{"
+        "\"min_time_between_dumps_ms\":%d,"
+        "\"mode\":\"detailed\","
+        "\"type\":\"peak_memory_usage\""
+        "}"
+        "]"
+        "},"
+        "\"record_mode\":\"record-until-full\""
+        "}",
+        MemoryDumpManager::kTraceCategory, heavy_period);
+  }
+};
+
+}  // namespace trace_event
+}  // namespace base
+
+#endif  // BASE_TRACE_EVENT_TRACE_CONFIG_MEMORY_TEST_UTIL_H_
diff --git a/base/trace_event/trace_config_unittest.cc b/base/trace_event/trace_config_unittest.cc
new file mode 100644
index 0000000..3cb6d61
--- /dev/null
+++ b/base/trace_event/trace_config_unittest.cc
@@ -0,0 +1,673 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <stddef.h>
+
+#include "base/json/json_reader.h"
+#include "base/json/json_writer.h"
+#include "base/macros.h"
+#include "base/trace_event/memory_dump_manager.h"
+#include "base/trace_event/trace_config.h"
+#include "base/trace_event/trace_config_memory_test_util.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+namespace trace_event {
+
+namespace {
+
+const char kDefaultTraceConfigString[] =
+  "{"
+    "\"enable_argument_filter\":false,"
+    "\"enable_systrace\":false,"
+    "\"record_mode\":\"record-until-full\""
+  "}";
+
+const char kCustomTraceConfigString[] =
+    "{"
+    "\"enable_argument_filter\":true,"
+    "\"enable_systrace\":true,"
+    "\"event_filters\":["
+    "{"
+    "\"excluded_categories\":[\"unfiltered_cat\"],"
+    "\"filter_args\":{\"event_name_whitelist\":[\"a snake\",\"a dog\"]},"
+    "\"filter_predicate\":\"event_whitelist_predicate\","
+    "\"included_categories\":[\"*\"]"
+    "}"
+    "],"
+    "\"excluded_categories\":[\"excluded\",\"exc_pattern*\"],"
+    "\"included_categories\":["
+    "\"included\","
+    "\"inc_pattern*\","
+    "\"disabled-by-default-cc\","
+    "\"disabled-by-default-memory-infra\"],"
+    "\"memory_dump_config\":{"
+    "\"allowed_dump_modes\":[\"background\",\"light\",\"detailed\"],"
+    "\"heap_profiler_options\":{"
+    "\"breakdown_threshold_bytes\":10240"
+    "},"
+    "\"triggers\":["
+    "{"
+    "\"min_time_between_dumps_ms\":50,"
+    "\"mode\":\"light\","
+    "\"type\":\"periodic_interval\""
+    "},"
+    "{"
+    "\"min_time_between_dumps_ms\":1000,"
+    "\"mode\":\"detailed\","
+    "\"type\":\"peak_memory_usage\""
+    "}"
+    "]"
+    "},"
+    "\"record_mode\":\"record-continuously\""
+    "}";
+
+void CheckDefaultTraceConfigBehavior(const TraceConfig& tc) {
+  EXPECT_EQ(RECORD_UNTIL_FULL, tc.GetTraceRecordMode());
+  EXPECT_FALSE(tc.IsSystraceEnabled());
+  EXPECT_FALSE(tc.IsArgumentFilterEnabled());
+
+  // Default trace config enables every category filter except the
+  // disabled-by-default-* ones.
+  EXPECT_TRUE(tc.IsCategoryGroupEnabled("Category1"));
+  EXPECT_TRUE(tc.IsCategoryGroupEnabled("not-excluded-category"));
+  EXPECT_FALSE(tc.IsCategoryGroupEnabled("disabled-by-default-cc"));
+
+  EXPECT_TRUE(tc.IsCategoryGroupEnabled("Category1,not-excluded-category"));
+  EXPECT_TRUE(tc.IsCategoryGroupEnabled("Category1,disabled-by-default-cc"));
+  EXPECT_FALSE(tc.IsCategoryGroupEnabled(
+      "disabled-by-default-cc,disabled-by-default-cc2"));
+}
+
+}  // namespace
+
+TEST(TraceConfigTest, TraceConfigFromValidLegacyFormat) {
+  // From trace options strings
+  TraceConfig config("", "record-until-full");
+  EXPECT_EQ(RECORD_UNTIL_FULL, config.GetTraceRecordMode());
+  EXPECT_FALSE(config.IsSystraceEnabled());
+  EXPECT_FALSE(config.IsArgumentFilterEnabled());
+  EXPECT_STREQ("record-until-full", config.ToTraceOptionsString().c_str());
+
+  config = TraceConfig("", "record-continuously");
+  EXPECT_EQ(RECORD_CONTINUOUSLY, config.GetTraceRecordMode());
+  EXPECT_FALSE(config.IsSystraceEnabled());
+  EXPECT_FALSE(config.IsArgumentFilterEnabled());
+  EXPECT_STREQ("record-continuously", config.ToTraceOptionsString().c_str());
+
+  config = TraceConfig("", "trace-to-console");
+  EXPECT_EQ(ECHO_TO_CONSOLE, config.GetTraceRecordMode());
+  EXPECT_FALSE(config.IsSystraceEnabled());
+  EXPECT_FALSE(config.IsArgumentFilterEnabled());
+  EXPECT_STREQ("trace-to-console", config.ToTraceOptionsString().c_str());
+
+  config = TraceConfig("", "record-as-much-as-possible");
+  EXPECT_EQ(RECORD_AS_MUCH_AS_POSSIBLE, config.GetTraceRecordMode());
+  EXPECT_FALSE(config.IsSystraceEnabled());
+  EXPECT_FALSE(config.IsArgumentFilterEnabled());
+  EXPECT_STREQ("record-as-much-as-possible",
+               config.ToTraceOptionsString().c_str());
+
+  config = TraceConfig("", "enable-systrace, record-continuously");
+  EXPECT_EQ(RECORD_CONTINUOUSLY, config.GetTraceRecordMode());
+  EXPECT_TRUE(config.IsSystraceEnabled());
+  EXPECT_FALSE(config.IsArgumentFilterEnabled());
+  EXPECT_STREQ("record-continuously,enable-systrace",
+               config.ToTraceOptionsString().c_str());
+
+  config = TraceConfig("", "enable-argument-filter,record-as-much-as-possible");
+  EXPECT_EQ(RECORD_AS_MUCH_AS_POSSIBLE, config.GetTraceRecordMode());
+  EXPECT_FALSE(config.IsSystraceEnabled());
+  EXPECT_TRUE(config.IsArgumentFilterEnabled());
+  EXPECT_STREQ("record-as-much-as-possible,enable-argument-filter",
+               config.ToTraceOptionsString().c_str());
+
+  config = TraceConfig(
+    "",
+    "enable-systrace,trace-to-console,enable-argument-filter");
+  EXPECT_EQ(ECHO_TO_CONSOLE, config.GetTraceRecordMode());
+  EXPECT_TRUE(config.IsSystraceEnabled());
+  EXPECT_TRUE(config.IsArgumentFilterEnabled());
+  EXPECT_STREQ(
+    "trace-to-console,enable-systrace,enable-argument-filter",
+    config.ToTraceOptionsString().c_str());
+
+  config = TraceConfig(
+    "", "record-continuously, record-until-full, trace-to-console");
+  EXPECT_EQ(ECHO_TO_CONSOLE, config.GetTraceRecordMode());
+  EXPECT_FALSE(config.IsSystraceEnabled());
+  EXPECT_FALSE(config.IsArgumentFilterEnabled());
+  EXPECT_STREQ("trace-to-console", config.ToTraceOptionsString().c_str());
+
+  // From TraceRecordMode
+  config = TraceConfig("", RECORD_UNTIL_FULL);
+  EXPECT_EQ(RECORD_UNTIL_FULL, config.GetTraceRecordMode());
+  EXPECT_FALSE(config.IsSystraceEnabled());
+  EXPECT_FALSE(config.IsArgumentFilterEnabled());
+  EXPECT_STREQ("record-until-full", config.ToTraceOptionsString().c_str());
+
+  config = TraceConfig("", RECORD_CONTINUOUSLY);
+  EXPECT_EQ(RECORD_CONTINUOUSLY, config.GetTraceRecordMode());
+  EXPECT_FALSE(config.IsSystraceEnabled());
+  EXPECT_FALSE(config.IsArgumentFilterEnabled());
+  EXPECT_STREQ("record-continuously", config.ToTraceOptionsString().c_str());
+
+  config = TraceConfig("", ECHO_TO_CONSOLE);
+  EXPECT_EQ(ECHO_TO_CONSOLE, config.GetTraceRecordMode());
+  EXPECT_FALSE(config.IsSystraceEnabled());
+  EXPECT_FALSE(config.IsArgumentFilterEnabled());
+  EXPECT_STREQ("trace-to-console", config.ToTraceOptionsString().c_str());
+
+  config = TraceConfig("", RECORD_AS_MUCH_AS_POSSIBLE);
+  EXPECT_EQ(RECORD_AS_MUCH_AS_POSSIBLE, config.GetTraceRecordMode());
+  EXPECT_FALSE(config.IsSystraceEnabled());
+  EXPECT_FALSE(config.IsArgumentFilterEnabled());
+  EXPECT_STREQ("record-as-much-as-possible",
+               config.ToTraceOptionsString().c_str());
+
+  // From category filter strings
+  config = TraceConfig("included,-excluded,inc_pattern*,-exc_pattern*", "");
+  EXPECT_STREQ("included,inc_pattern*,-excluded,-exc_pattern*",
+               config.ToCategoryFilterString().c_str());
+
+  config = TraceConfig("only_inc_cat", "");
+  EXPECT_STREQ("only_inc_cat", config.ToCategoryFilterString().c_str());
+
+  config = TraceConfig("-only_exc_cat", "");
+  EXPECT_STREQ("-only_exc_cat", config.ToCategoryFilterString().c_str());
+
+  config = TraceConfig("disabled-by-default-cc,-excluded", "");
+  EXPECT_STREQ("disabled-by-default-cc,-excluded",
+               config.ToCategoryFilterString().c_str());
+
+  config = TraceConfig("disabled-by-default-cc,included", "");
+  EXPECT_STREQ("included,disabled-by-default-cc",
+               config.ToCategoryFilterString().c_str());
+
+  // From both trace options and category filter strings
+  config = TraceConfig("", "");
+  EXPECT_EQ(RECORD_UNTIL_FULL, config.GetTraceRecordMode());
+  EXPECT_FALSE(config.IsSystraceEnabled());
+  EXPECT_FALSE(config.IsArgumentFilterEnabled());
+  EXPECT_STREQ("", config.ToCategoryFilterString().c_str());
+  EXPECT_STREQ("record-until-full", config.ToTraceOptionsString().c_str());
+
+  config = TraceConfig("included,-excluded,inc_pattern*,-exc_pattern*",
+                       "enable-systrace, trace-to-console");
+  EXPECT_EQ(ECHO_TO_CONSOLE, config.GetTraceRecordMode());
+  EXPECT_TRUE(config.IsSystraceEnabled());
+  EXPECT_FALSE(config.IsArgumentFilterEnabled());
+  EXPECT_STREQ("included,inc_pattern*,-excluded,-exc_pattern*",
+               config.ToCategoryFilterString().c_str());
+  EXPECT_STREQ("trace-to-console,enable-systrace",
+               config.ToTraceOptionsString().c_str());
+
+  // From both trace options and category filter strings with spaces.
+  config = TraceConfig(" included , -excluded, inc_pattern*, ,-exc_pattern*   ",
+                       "enable-systrace, ,trace-to-console  ");
+  EXPECT_EQ(ECHO_TO_CONSOLE, config.GetTraceRecordMode());
+  EXPECT_TRUE(config.IsSystraceEnabled());
+  EXPECT_FALSE(config.IsArgumentFilterEnabled());
+  EXPECT_STREQ("included,inc_pattern*,-excluded,-exc_pattern*",
+               config.ToCategoryFilterString().c_str());
+  EXPECT_STREQ("trace-to-console,enable-systrace",
+               config.ToTraceOptionsString().c_str());
+
+  // From category filter string and TraceRecordMode
+  config = TraceConfig("included,-excluded,inc_pattern*,-exc_pattern*",
+                       RECORD_CONTINUOUSLY);
+  EXPECT_EQ(RECORD_CONTINUOUSLY, config.GetTraceRecordMode());
+  EXPECT_FALSE(config.IsSystraceEnabled());
+  EXPECT_FALSE(config.IsArgumentFilterEnabled());
+  EXPECT_STREQ("included,inc_pattern*,-excluded,-exc_pattern*",
+               config.ToCategoryFilterString().c_str());
+  EXPECT_STREQ("record-continuously", config.ToTraceOptionsString().c_str());
+}
+
+TEST(TraceConfigTest, TraceConfigFromInvalidLegacyStrings) {
+  TraceConfig config("", "foo-bar-baz");
+  EXPECT_EQ(RECORD_UNTIL_FULL, config.GetTraceRecordMode());
+  EXPECT_FALSE(config.IsSystraceEnabled());
+  EXPECT_FALSE(config.IsArgumentFilterEnabled());
+  EXPECT_STREQ("", config.ToCategoryFilterString().c_str());
+  EXPECT_STREQ("record-until-full", config.ToTraceOptionsString().c_str());
+
+  config = TraceConfig("arbitrary-category", "foo-bar-baz, enable-systrace");
+  EXPECT_EQ(RECORD_UNTIL_FULL, config.GetTraceRecordMode());
+  EXPECT_TRUE(config.IsSystraceEnabled());
+  EXPECT_FALSE(config.IsArgumentFilterEnabled());
+  EXPECT_STREQ("arbitrary-category", config.ToCategoryFilterString().c_str());
+  EXPECT_STREQ("record-until-full,enable-systrace",
+               config.ToTraceOptionsString().c_str());
+}
+
+TEST(TraceConfigTest, ConstructDefaultTraceConfig) {
+  TraceConfig tc;
+  EXPECT_STREQ("", tc.ToCategoryFilterString().c_str());
+  EXPECT_STREQ(kDefaultTraceConfigString, tc.ToString().c_str());
+  CheckDefaultTraceConfigBehavior(tc);
+
+  // Constructors from category filter string and trace option string.
+  TraceConfig tc_asterisk("*", "");
+  EXPECT_STREQ("*", tc_asterisk.ToCategoryFilterString().c_str());
+  CheckDefaultTraceConfigBehavior(tc_asterisk);
+
+  TraceConfig tc_empty_category_filter("", "");
+  EXPECT_STREQ("", tc_empty_category_filter.ToCategoryFilterString().c_str());
+  EXPECT_STREQ(kDefaultTraceConfigString,
+               tc_empty_category_filter.ToString().c_str());
+  CheckDefaultTraceConfigBehavior(tc_empty_category_filter);
+
+  // Constructor from JSON formated config string.
+  TraceConfig tc_empty_json_string("");
+  EXPECT_STREQ("", tc_empty_json_string.ToCategoryFilterString().c_str());
+  EXPECT_STREQ(kDefaultTraceConfigString,
+               tc_empty_json_string.ToString().c_str());
+  CheckDefaultTraceConfigBehavior(tc_empty_json_string);
+
+  // Constructor from dictionary value.
+  DictionaryValue dict;
+  TraceConfig tc_dict(dict);
+  EXPECT_STREQ("", tc_dict.ToCategoryFilterString().c_str());
+  EXPECT_STREQ(kDefaultTraceConfigString, tc_dict.ToString().c_str());
+  CheckDefaultTraceConfigBehavior(tc_dict);
+}
+
+TEST(TraceConfigTest, EmptyAndAsteriskCategoryFilterString) {
+  TraceConfig tc_empty("", "");
+  TraceConfig tc_asterisk("*", "");
+
+  EXPECT_STREQ("", tc_empty.ToCategoryFilterString().c_str());
+  EXPECT_STREQ("*", tc_asterisk.ToCategoryFilterString().c_str());
+
+  // Both fall back to default config.
+  CheckDefaultTraceConfigBehavior(tc_empty);
+  CheckDefaultTraceConfigBehavior(tc_asterisk);
+
+  // They differ only for internal checking.
+  EXPECT_FALSE(tc_empty.category_filter().IsCategoryEnabled("Category1"));
+  EXPECT_FALSE(
+      tc_empty.category_filter().IsCategoryEnabled("not-excluded-category"));
+  EXPECT_TRUE(tc_asterisk.category_filter().IsCategoryEnabled("Category1"));
+  EXPECT_TRUE(
+      tc_asterisk.category_filter().IsCategoryEnabled("not-excluded-category"));
+}
+
+TEST(TraceConfigTest, DisabledByDefaultCategoryFilterString) {
+  TraceConfig tc("foo,disabled-by-default-foo", "");
+  EXPECT_STREQ("foo,disabled-by-default-foo",
+               tc.ToCategoryFilterString().c_str());
+  EXPECT_TRUE(tc.IsCategoryGroupEnabled("foo"));
+  EXPECT_TRUE(tc.IsCategoryGroupEnabled("disabled-by-default-foo"));
+  EXPECT_FALSE(tc.IsCategoryGroupEnabled("bar"));
+  EXPECT_FALSE(tc.IsCategoryGroupEnabled("disabled-by-default-bar"));
+
+  EXPECT_TRUE(tc.event_filters().empty());
+  // Enabling only the disabled-by-default-* category means the default ones
+  // are also enabled.
+  tc = TraceConfig("disabled-by-default-foo", "");
+  EXPECT_STREQ("disabled-by-default-foo", tc.ToCategoryFilterString().c_str());
+  EXPECT_TRUE(tc.IsCategoryGroupEnabled("disabled-by-default-foo"));
+  EXPECT_TRUE(tc.IsCategoryGroupEnabled("foo"));
+  EXPECT_TRUE(tc.IsCategoryGroupEnabled("bar"));
+  EXPECT_FALSE(tc.IsCategoryGroupEnabled("disabled-by-default-bar"));
+}
+
+TEST(TraceConfigTest, TraceConfigFromDict) {
+  // Passing in empty dictionary will result in default trace config.
+  DictionaryValue dict;
+  TraceConfig tc(dict);
+  EXPECT_STREQ(kDefaultTraceConfigString, tc.ToString().c_str());
+  EXPECT_EQ(RECORD_UNTIL_FULL, tc.GetTraceRecordMode());
+  EXPECT_FALSE(tc.IsSystraceEnabled());
+  EXPECT_FALSE(tc.IsArgumentFilterEnabled());
+  EXPECT_STREQ("", tc.ToCategoryFilterString().c_str());
+
+  std::unique_ptr<Value> default_value(
+      JSONReader::Read(kDefaultTraceConfigString));
+  DCHECK(default_value);
+  const DictionaryValue* default_dict = nullptr;
+  bool is_dict = default_value->GetAsDictionary(&default_dict);
+  DCHECK(is_dict);
+  TraceConfig default_tc(*default_dict);
+  EXPECT_STREQ(kDefaultTraceConfigString, default_tc.ToString().c_str());
+  EXPECT_EQ(RECORD_UNTIL_FULL, default_tc.GetTraceRecordMode());
+  EXPECT_FALSE(default_tc.IsSystraceEnabled());
+  EXPECT_FALSE(default_tc.IsArgumentFilterEnabled());
+  EXPECT_STREQ("", default_tc.ToCategoryFilterString().c_str());
+
+  std::unique_ptr<Value> custom_value(
+      JSONReader::Read(kCustomTraceConfigString));
+  DCHECK(custom_value);
+  const DictionaryValue* custom_dict = nullptr;
+  is_dict = custom_value->GetAsDictionary(&custom_dict);
+  DCHECK(is_dict);
+  TraceConfig custom_tc(*custom_dict);
+  EXPECT_STREQ(kCustomTraceConfigString, custom_tc.ToString().c_str());
+  EXPECT_EQ(RECORD_CONTINUOUSLY, custom_tc.GetTraceRecordMode());
+  EXPECT_TRUE(custom_tc.IsSystraceEnabled());
+  EXPECT_TRUE(custom_tc.IsArgumentFilterEnabled());
+  EXPECT_STREQ(
+      "included,inc_pattern*,"
+      "disabled-by-default-cc,disabled-by-default-memory-infra,"
+      "-excluded,-exc_pattern*",
+      custom_tc.ToCategoryFilterString().c_str());
+}
+
+TEST(TraceConfigTest, TraceConfigFromValidString) {
+  // Using some non-empty config string.
+  const char config_string[] =
+      "{"
+      "\"enable_argument_filter\":true,"
+      "\"enable_systrace\":true,"
+      "\"event_filters\":["
+      "{"
+      "\"excluded_categories\":[\"unfiltered_cat\"],"
+      "\"filter_args\":{\"event_name_whitelist\":[\"a snake\",\"a dog\"]},"
+      "\"filter_predicate\":\"event_whitelist_predicate\","
+      "\"included_categories\":[\"*\"]"
+      "}"
+      "],"
+      "\"excluded_categories\":[\"excluded\",\"exc_pattern*\"],"
+      "\"included_categories\":[\"included\","
+      "\"inc_pattern*\","
+      "\"disabled-by-default-cc\"],"
+      "\"record_mode\":\"record-continuously\""
+      "}";
+  TraceConfig tc(config_string);
+
+  EXPECT_STREQ(config_string, tc.ToString().c_str());
+  EXPECT_EQ(RECORD_CONTINUOUSLY, tc.GetTraceRecordMode());
+  EXPECT_TRUE(tc.IsSystraceEnabled());
+  EXPECT_TRUE(tc.IsArgumentFilterEnabled());
+  EXPECT_STREQ(
+      "included,inc_pattern*,disabled-by-default-cc,-excluded,"
+      "-exc_pattern*",
+      tc.ToCategoryFilterString().c_str());
+
+  EXPECT_TRUE(tc.category_filter().IsCategoryEnabled("included"));
+  EXPECT_TRUE(tc.category_filter().IsCategoryEnabled("inc_pattern_category"));
+  EXPECT_TRUE(tc.category_filter().IsCategoryEnabled("disabled-by-default-cc"));
+  EXPECT_FALSE(tc.category_filter().IsCategoryEnabled("excluded"));
+  EXPECT_FALSE(tc.category_filter().IsCategoryEnabled("exc_pattern_category"));
+  EXPECT_FALSE(
+      tc.category_filter().IsCategoryEnabled("disabled-by-default-others"));
+  EXPECT_FALSE(
+      tc.category_filter().IsCategoryEnabled("not-excluded-nor-included"));
+
+  EXPECT_TRUE(tc.IsCategoryGroupEnabled("included"));
+  EXPECT_TRUE(tc.IsCategoryGroupEnabled("inc_pattern_category"));
+  EXPECT_TRUE(tc.IsCategoryGroupEnabled("disabled-by-default-cc"));
+  EXPECT_FALSE(tc.IsCategoryGroupEnabled("excluded"));
+  EXPECT_FALSE(tc.IsCategoryGroupEnabled("exc_pattern_category"));
+  EXPECT_FALSE(tc.IsCategoryGroupEnabled("disabled-by-default-others"));
+  EXPECT_FALSE(tc.IsCategoryGroupEnabled("not-excluded-nor-included"));
+
+  EXPECT_TRUE(tc.IsCategoryGroupEnabled("included,excluded"));
+  EXPECT_FALSE(tc.IsCategoryGroupEnabled("excluded,exc_pattern_category"));
+  EXPECT_TRUE(tc.IsCategoryGroupEnabled("included"));
+
+  EXPECT_EQ(tc.event_filters().size(), 1u);
+  const TraceConfig::EventFilterConfig& event_filter = tc.event_filters()[0];
+  EXPECT_STREQ("event_whitelist_predicate",
+               event_filter.predicate_name().c_str());
+  EXPECT_EQ(1u, event_filter.category_filter().included_categories().size());
+  EXPECT_STREQ("*",
+               event_filter.category_filter().included_categories()[0].c_str());
+  EXPECT_EQ(1u, event_filter.category_filter().excluded_categories().size());
+  EXPECT_STREQ("unfiltered_cat",
+               event_filter.category_filter().excluded_categories()[0].c_str());
+  EXPECT_TRUE(event_filter.filter_args());
+
+  std::string json_out;
+  base::JSONWriter::Write(*event_filter.filter_args(), &json_out);
+  EXPECT_STREQ(json_out.c_str(),
+               "{\"event_name_whitelist\":[\"a snake\",\"a dog\"]}");
+  std::unordered_set<std::string> filter_values;
+  EXPECT_TRUE(event_filter.GetArgAsSet("event_name_whitelist", &filter_values));
+  EXPECT_EQ(2u, filter_values.size());
+  EXPECT_EQ(1u, filter_values.count("a snake"));
+  EXPECT_EQ(1u, filter_values.count("a dog"));
+
+  const char config_string_2[] = "{\"included_categories\":[\"*\"]}";
+  TraceConfig tc2(config_string_2);
+  EXPECT_TRUE(tc2.category_filter().IsCategoryEnabled(
+      "non-disabled-by-default-pattern"));
+  EXPECT_FALSE(
+      tc2.category_filter().IsCategoryEnabled("disabled-by-default-pattern"));
+  EXPECT_TRUE(tc2.IsCategoryGroupEnabled("non-disabled-by-default-pattern"));
+  EXPECT_FALSE(tc2.IsCategoryGroupEnabled("disabled-by-default-pattern"));
+
+  // Clear
+  tc.Clear();
+  EXPECT_STREQ(tc.ToString().c_str(),
+               "{"
+                 "\"enable_argument_filter\":false,"
+                 "\"enable_systrace\":false,"
+                 "\"record_mode\":\"record-until-full\""
+               "}");
+}
+
+TEST(TraceConfigTest, TraceConfigFromInvalidString) {
+  // The config string needs to be a dictionary correctly formatted as a JSON
+  // string. Otherwise, it will fall back to the default initialization.
+  TraceConfig tc("");
+  EXPECT_STREQ(kDefaultTraceConfigString, tc.ToString().c_str());
+  EXPECT_EQ(RECORD_UNTIL_FULL, tc.GetTraceRecordMode());
+  EXPECT_FALSE(tc.IsSystraceEnabled());
+  EXPECT_FALSE(tc.IsArgumentFilterEnabled());
+  EXPECT_STREQ("", tc.ToCategoryFilterString().c_str());
+  CheckDefaultTraceConfigBehavior(tc);
+
+  tc = TraceConfig("This is an invalid config string.");
+  EXPECT_STREQ(kDefaultTraceConfigString, tc.ToString().c_str());
+  EXPECT_EQ(RECORD_UNTIL_FULL, tc.GetTraceRecordMode());
+  EXPECT_FALSE(tc.IsSystraceEnabled());
+  EXPECT_FALSE(tc.IsArgumentFilterEnabled());
+  EXPECT_STREQ("", tc.ToCategoryFilterString().c_str());
+  CheckDefaultTraceConfigBehavior(tc);
+
+  tc = TraceConfig("[\"This\", \"is\", \"not\", \"a\", \"dictionary\"]");
+  EXPECT_STREQ(kDefaultTraceConfigString, tc.ToString().c_str());
+  EXPECT_EQ(RECORD_UNTIL_FULL, tc.GetTraceRecordMode());
+  EXPECT_FALSE(tc.IsSystraceEnabled());
+  EXPECT_FALSE(tc.IsArgumentFilterEnabled());
+  EXPECT_STREQ("", tc.ToCategoryFilterString().c_str());
+  CheckDefaultTraceConfigBehavior(tc);
+
+  tc = TraceConfig("{\"record_mode\": invalid-value-needs-double-quote}");
+  EXPECT_STREQ(kDefaultTraceConfigString, tc.ToString().c_str());
+  EXPECT_EQ(RECORD_UNTIL_FULL, tc.GetTraceRecordMode());
+  EXPECT_FALSE(tc.IsSystraceEnabled());
+  EXPECT_FALSE(tc.IsArgumentFilterEnabled());
+  EXPECT_STREQ("", tc.ToCategoryFilterString().c_str());
+  CheckDefaultTraceConfigBehavior(tc);
+
+  // If the config string a dictionary formatted as a JSON string, it will
+  // initialize TraceConfig with best effort.
+  tc = TraceConfig("{}");
+  EXPECT_EQ(RECORD_UNTIL_FULL, tc.GetTraceRecordMode());
+  EXPECT_FALSE(tc.IsSystraceEnabled());
+  EXPECT_FALSE(tc.IsArgumentFilterEnabled());
+  EXPECT_STREQ("", tc.ToCategoryFilterString().c_str());
+  CheckDefaultTraceConfigBehavior(tc);
+
+  tc = TraceConfig("{\"arbitrary-key\":\"arbitrary-value\"}");
+  EXPECT_EQ(RECORD_UNTIL_FULL, tc.GetTraceRecordMode());
+  EXPECT_FALSE(tc.IsSystraceEnabled());
+  EXPECT_FALSE(tc.IsArgumentFilterEnabled());
+  EXPECT_STREQ("", tc.ToCategoryFilterString().c_str());
+  CheckDefaultTraceConfigBehavior(tc);
+
+  const char invalid_config_string[] =
+      "{"
+      "\"enable_systrace\":1,"
+      "\"excluded_categories\":[\"excluded\"],"
+      "\"included_categories\":\"not a list\","
+      "\"record_mode\":\"arbitrary-mode\""
+      "}";
+  tc = TraceConfig(invalid_config_string);
+  EXPECT_EQ(RECORD_UNTIL_FULL, tc.GetTraceRecordMode());
+  EXPECT_FALSE(tc.IsSystraceEnabled());
+  EXPECT_FALSE(tc.IsArgumentFilterEnabled());
+
+  const char invalid_config_string_2[] =
+    "{"
+      "\"included_categories\":[\"category\",\"disabled-by-default-pattern\"],"
+      "\"excluded_categories\":[\"category\",\"disabled-by-default-pattern\"]"
+    "}";
+  tc = TraceConfig(invalid_config_string_2);
+  EXPECT_TRUE(tc.category_filter().IsCategoryEnabled("category"));
+  EXPECT_TRUE(
+      tc.category_filter().IsCategoryEnabled("disabled-by-default-pattern"));
+  EXPECT_TRUE(tc.IsCategoryGroupEnabled("category"));
+  EXPECT_TRUE(tc.IsCategoryGroupEnabled("disabled-by-default-pattern"));
+}
+
+TEST(TraceConfigTest, MergingTraceConfigs) {
+  // Merge
+  TraceConfig tc;
+  TraceConfig tc2("included,-excluded,inc_pattern*,-exc_pattern*", "");
+  tc.Merge(tc2);
+  EXPECT_STREQ("{"
+                 "\"enable_argument_filter\":false,"
+                 "\"enable_systrace\":false,"
+                 "\"excluded_categories\":[\"excluded\",\"exc_pattern*\"],"
+                 "\"record_mode\":\"record-until-full\""
+               "}",
+               tc.ToString().c_str());
+}
+
+TEST(TraceConfigTest, IsCategoryGroupEnabled) {
+  // Enabling a disabled- category does not require all categories to be traced
+  // to be included.
+  TraceConfig tc("disabled-by-default-cc,-excluded", "");
+  EXPECT_STREQ("disabled-by-default-cc,-excluded",
+               tc.ToCategoryFilterString().c_str());
+  EXPECT_TRUE(tc.IsCategoryGroupEnabled("disabled-by-default-cc"));
+  EXPECT_TRUE(tc.IsCategoryGroupEnabled("some_other_group"));
+  EXPECT_FALSE(tc.IsCategoryGroupEnabled("excluded"));
+
+  // Enabled a disabled- category and also including makes all categories to
+  // be traced require including.
+  tc = TraceConfig("disabled-by-default-cc,included", "");
+  EXPECT_STREQ("included,disabled-by-default-cc",
+               tc.ToCategoryFilterString().c_str());
+  EXPECT_TRUE(tc.IsCategoryGroupEnabled("disabled-by-default-cc"));
+  EXPECT_TRUE(tc.IsCategoryGroupEnabled("included"));
+  EXPECT_FALSE(tc.IsCategoryGroupEnabled("other_included"));
+
+  // Excluding categories won't enable disabled-by-default ones with the
+  // excluded category is also present in the group.
+  tc = TraceConfig("-excluded", "");
+  EXPECT_STREQ("-excluded", tc.ToCategoryFilterString().c_str());
+  EXPECT_FALSE(tc.IsCategoryGroupEnabled("excluded,disabled-by-default-cc"));
+}
+
+TEST(TraceConfigTest, IsCategoryNameAllowed) {
+  // Test that IsCategoryNameAllowed actually catches categories that are
+  // explicitly forbidden. This method is called in a DCHECK to assert that we
+  // don't have these types of strings as categories.
+  EXPECT_FALSE(
+      TraceConfigCategoryFilter::IsCategoryNameAllowed(" bad_category "));
+  EXPECT_FALSE(
+      TraceConfigCategoryFilter::IsCategoryNameAllowed(" bad_category"));
+  EXPECT_FALSE(
+      TraceConfigCategoryFilter::IsCategoryNameAllowed("bad_category "));
+  EXPECT_FALSE(
+      TraceConfigCategoryFilter::IsCategoryNameAllowed("   bad_category"));
+  EXPECT_FALSE(
+      TraceConfigCategoryFilter::IsCategoryNameAllowed("bad_category   "));
+  EXPECT_FALSE(
+      TraceConfigCategoryFilter::IsCategoryNameAllowed("   bad_category   "));
+  EXPECT_FALSE(TraceConfigCategoryFilter::IsCategoryNameAllowed(""));
+  EXPECT_TRUE(
+      TraceConfigCategoryFilter::IsCategoryNameAllowed("good_category"));
+}
+
+TEST(TraceConfigTest, SetTraceOptionValues) {
+  TraceConfig tc;
+  EXPECT_EQ(RECORD_UNTIL_FULL, tc.GetTraceRecordMode());
+  EXPECT_FALSE(tc.IsSystraceEnabled());
+
+  tc.SetTraceRecordMode(RECORD_AS_MUCH_AS_POSSIBLE);
+  EXPECT_EQ(RECORD_AS_MUCH_AS_POSSIBLE, tc.GetTraceRecordMode());
+
+  tc.EnableSystrace();
+  EXPECT_TRUE(tc.IsSystraceEnabled());
+}
+
+TEST(TraceConfigTest, TraceConfigFromMemoryConfigString) {
+  std::string tc_str1 =
+      TraceConfigMemoryTestUtil::GetTraceConfig_PeriodicTriggers(200, 2000);
+  TraceConfig tc1(tc_str1);
+  EXPECT_EQ(tc_str1, tc1.ToString());
+  TraceConfig tc2(
+      TraceConfigMemoryTestUtil::GetTraceConfig_LegacyPeriodicTriggers(200,
+                                                                       2000));
+  EXPECT_EQ(tc_str1, tc2.ToString());
+
+  EXPECT_TRUE(tc1.IsCategoryGroupEnabled(MemoryDumpManager::kTraceCategory));
+  ASSERT_EQ(2u, tc1.memory_dump_config().triggers.size());
+
+  EXPECT_EQ(200u,
+            tc1.memory_dump_config().triggers[0].min_time_between_dumps_ms);
+  EXPECT_EQ(MemoryDumpLevelOfDetail::LIGHT,
+            tc1.memory_dump_config().triggers[0].level_of_detail);
+
+  EXPECT_EQ(2000u,
+            tc1.memory_dump_config().triggers[1].min_time_between_dumps_ms);
+  EXPECT_EQ(MemoryDumpLevelOfDetail::DETAILED,
+            tc1.memory_dump_config().triggers[1].level_of_detail);
+  EXPECT_EQ(
+      2048u,
+      tc1.memory_dump_config().heap_profiler_options.breakdown_threshold_bytes);
+
+  std::string tc_str3 =
+      TraceConfigMemoryTestUtil::GetTraceConfig_BackgroundTrigger(
+          1 /* period_ms */);
+  TraceConfig tc3(tc_str3);
+  EXPECT_EQ(tc_str3, tc3.ToString());
+  EXPECT_TRUE(tc3.IsCategoryGroupEnabled(MemoryDumpManager::kTraceCategory));
+  ASSERT_EQ(1u, tc3.memory_dump_config().triggers.size());
+  EXPECT_EQ(1u, tc3.memory_dump_config().triggers[0].min_time_between_dumps_ms);
+  EXPECT_EQ(MemoryDumpLevelOfDetail::BACKGROUND,
+            tc3.memory_dump_config().triggers[0].level_of_detail);
+
+  std::string tc_str4 =
+      TraceConfigMemoryTestUtil::GetTraceConfig_PeakDetectionTrigger(
+          1 /*heavy_period */);
+  TraceConfig tc4(tc_str4);
+  EXPECT_EQ(tc_str4, tc4.ToString());
+  ASSERT_EQ(1u, tc4.memory_dump_config().triggers.size());
+  EXPECT_EQ(1u, tc4.memory_dump_config().triggers[0].min_time_between_dumps_ms);
+  EXPECT_EQ(MemoryDumpLevelOfDetail::DETAILED,
+            tc4.memory_dump_config().triggers[0].level_of_detail);
+}
+
+TEST(TraceConfigTest, EmptyMemoryDumpConfigTest) {
+  // Empty trigger list should also be specified when converting back to string.
+  TraceConfig tc(TraceConfigMemoryTestUtil::GetTraceConfig_EmptyTriggers());
+  EXPECT_EQ(TraceConfigMemoryTestUtil::GetTraceConfig_EmptyTriggers(),
+            tc.ToString());
+  EXPECT_EQ(0u, tc.memory_dump_config().triggers.size());
+  EXPECT_EQ(
+      static_cast<uint32_t>(TraceConfig::MemoryDumpConfig::HeapProfiler::
+                                kDefaultBreakdownThresholdBytes),
+      tc.memory_dump_config().heap_profiler_options.breakdown_threshold_bytes);
+}
+
+TEST(TraceConfigTest, LegacyStringToMemoryDumpConfig) {
+  TraceConfig tc(MemoryDumpManager::kTraceCategory, "");
+  EXPECT_TRUE(tc.IsCategoryGroupEnabled(MemoryDumpManager::kTraceCategory));
+  EXPECT_NE(std::string::npos, tc.ToString().find("memory_dump_config"));
+  EXPECT_EQ(0u, tc.memory_dump_config().triggers.size());
+  EXPECT_EQ(
+      static_cast<uint32_t>(TraceConfig::MemoryDumpConfig::HeapProfiler::
+                                kDefaultBreakdownThresholdBytes),
+      tc.memory_dump_config().heap_profiler_options.breakdown_threshold_bytes);
+}
+
+}  // namespace trace_event
+}  // namespace base
diff --git a/base/trace_event/trace_event.h b/base/trace_event/trace_event.h
new file mode 100644
index 0000000..38528aa
--- /dev/null
+++ b/base/trace_event/trace_event.h
@@ -0,0 +1,1191 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TRACE_EVENT_TRACE_EVENT_H_
+#define BASE_TRACE_EVENT_TRACE_EVENT_H_
+
+// This header file defines implementation details of how the trace macros in
+// trace_event_common.h collect and store trace events. Anything not
+// implementation-specific should go in trace_event_common.h instead of here.
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <string>
+
+#include "base/atomicops.h"
+#include "base/debug/debugging_buildflags.h"
+#include "base/macros.h"
+#include "base/time/time.h"
+#include "base/time/time_override.h"
+#include "base/trace_event/common/trace_event_common.h"
+#include "base/trace_event/heap_profiler.h"
+#include "base/trace_event/trace_category.h"
+#include "base/trace_event/trace_event_system_stats_monitor.h"
+#include "base/trace_event/trace_log.h"
+#include "build/build_config.h"
+
+// By default, const char* argument values are assumed to have long-lived scope
+// and will not be copied. Use this macro to force a const char* to be copied.
+#define TRACE_STR_COPY(str) \
+    trace_event_internal::TraceStringWithCopy(str)
+
+// DEPRECATED: do not use: Consider using TRACE_ID_{GLOBAL, LOCAL} macros,
+// instead. By default, uint64_t ID argument values are not mangled with the
+// Process ID in TRACE_EVENT_ASYNC macros. Use this macro to force Process ID
+// mangling.
+#define TRACE_ID_MANGLE(id) \
+    trace_event_internal::TraceID::ForceMangle(id)
+
+// DEPRECATED: do not use: Consider using TRACE_ID_{GLOBAL, LOCAL} macros,
+// instead. By default, pointers are mangled with the Process ID in
+// TRACE_EVENT_ASYNC macros. Use this macro to prevent Process ID mangling.
+#define TRACE_ID_DONT_MANGLE(id) \
+    trace_event_internal::TraceID::DontMangle(id)
+
+// By default, trace IDs are eventually converted to a single 64-bit number. Use
+// this macro to add a scope string. For example,
+//
+// TRACE_EVENT_NESTABLE_ASYNC_BEGIN0(
+//     "network", "ResourceLoad",
+//     TRACE_ID_WITH_SCOPE("BlinkResourceID", resourceID));
+//
+// Also, it is possible to prepend the ID with another number, like the process
+// ID. This is useful in creating IDs that are unique among all processes. To do
+// that, pass two numbers after the scope string instead of one. For example,
+//
+// TRACE_EVENT_NESTABLE_ASYNC_BEGIN0(
+//     "network", "ResourceLoad",
+//     TRACE_ID_WITH_SCOPE("BlinkResourceID", pid, resourceID));
+#define TRACE_ID_WITH_SCOPE(scope, ...) \
+  trace_event_internal::TraceID::WithScope(scope, ##__VA_ARGS__)
+
+#define TRACE_ID_GLOBAL(id) trace_event_internal::TraceID::GlobalId(id)
+#define TRACE_ID_LOCAL(id) trace_event_internal::TraceID::LocalId(id)
+
+#define TRACE_EVENT_API_CURRENT_THREAD_ID \
+  static_cast<int>(base::PlatformThread::CurrentId())
+
+#define INTERNAL_TRACE_EVENT_CATEGORY_GROUP_ENABLED_FOR_RECORDING_MODE() \
+  UNLIKELY(*INTERNAL_TRACE_EVENT_UID(category_group_enabled) &           \
+           (base::trace_event::TraceCategory::ENABLED_FOR_RECORDING |    \
+            base::trace_event::TraceCategory::ENABLED_FOR_ETW_EXPORT))
+
+#define INTERNAL_TRACE_EVENT_CATEGORY_GROUP_ENABLED()                  \
+  UNLIKELY(*INTERNAL_TRACE_EVENT_UID(category_group_enabled) &         \
+           (base::trace_event::TraceCategory::ENABLED_FOR_RECORDING |  \
+            base::trace_event::TraceCategory::ENABLED_FOR_ETW_EXPORT | \
+            base::trace_event::TraceCategory::ENABLED_FOR_FILTERING))
+
+////////////////////////////////////////////////////////////////////////////////
+// Implementation specific tracing API definitions.
+
+// Get a pointer to the enabled state of the given trace category. Only
+// long-lived literal strings should be given as the category group. The
+// returned pointer can be held permanently in a local static for example. If
+// the unsigned char is non-zero, tracing is enabled. If tracing is enabled,
+// TRACE_EVENT_API_ADD_TRACE_EVENT can be called. It's OK if tracing is disabled
+// between the load of the tracing state and the call to
+// TRACE_EVENT_API_ADD_TRACE_EVENT, because this flag only provides an early out
+// for best performance when tracing is disabled.
+// const unsigned char*
+//     TRACE_EVENT_API_GET_CATEGORY_GROUP_ENABLED(const char* category_group)
+#define TRACE_EVENT_API_GET_CATEGORY_GROUP_ENABLED \
+    base::trace_event::TraceLog::GetCategoryGroupEnabled
+
+// Get the number of times traces have been recorded. This is used to implement
+// the TRACE_EVENT_IS_NEW_TRACE facility.
+// unsigned int TRACE_EVENT_API_GET_NUM_TRACES_RECORDED()
+#define TRACE_EVENT_API_GET_NUM_TRACES_RECORDED \
+    base::trace_event::TraceLog::GetInstance()->GetNumTracesRecorded
+
+// Add a trace event to the platform tracing system.
+// base::trace_event::TraceEventHandle TRACE_EVENT_API_ADD_TRACE_EVENT(
+//                    char phase,
+//                    const unsigned char* category_group_enabled,
+//                    const char* name,
+//                    const char* scope,
+//                    unsigned long long id,
+//                    int num_args,
+//                    const char** arg_names,
+//                    const unsigned char* arg_types,
+//                    const unsigned long long* arg_values,
+//                    std::unique_ptr<ConvertableToTraceFormat>*
+//                    convertable_values,
+//                    unsigned int flags)
+#define TRACE_EVENT_API_ADD_TRACE_EVENT \
+    base::trace_event::TraceLog::GetInstance()->AddTraceEvent
+
+// Add a trace event to the platform tracing system.
+// base::trace_event::TraceEventHandle
+// TRACE_EVENT_API_ADD_TRACE_EVENT_WITH_BIND_ID(
+//                    char phase,
+//                    const unsigned char* category_group_enabled,
+//                    const char* name,
+//                    const char* scope,
+//                    unsigned long long id,
+//                    unsigned long long bind_id,
+//                    int num_args,
+//                    const char** arg_names,
+//                    const unsigned char* arg_types,
+//                    const unsigned long long* arg_values,
+//                    std::unique_ptr<ConvertableToTraceFormat>*
+//                    convertable_values,
+//                    unsigned int flags)
+#define TRACE_EVENT_API_ADD_TRACE_EVENT_WITH_BIND_ID \
+  base::trace_event::TraceLog::GetInstance()->AddTraceEventWithBindId
+
+// Add a trace event to the platform tracing system overriding the pid.
+// The resulting event will have tid = pid == (process_id passed here).
+// base::trace_event::TraceEventHandle
+// TRACE_EVENT_API_ADD_TRACE_EVENT_WITH_PROCESS_ID(
+//                    char phase,
+//                    const unsigned char* category_group_enabled,
+//                    const char* name,
+//                    const char* scope,
+//                    unsigned long long id,
+//                    int process_id,
+//                    int num_args,
+//                    const char** arg_names,
+//                    const unsigned char* arg_types,
+//                    const unsigned long long* arg_values,
+//                    std::unique_ptr<ConvertableToTraceFormat>*
+//                    convertable_values,
+//                    unsigned int flags)
+#define TRACE_EVENT_API_ADD_TRACE_EVENT_WITH_PROCESS_ID \
+  base::trace_event::TraceLog::GetInstance()->AddTraceEventWithProcessId
+
+// Add a trace event to the platform tracing system.
+// base::trace_event::TraceEventHandle
+// TRACE_EVENT_API_ADD_TRACE_EVENT_WITH_TIMESTAMP(
+//                    char phase,
+//                    const unsigned char* category_group_enabled,
+//                    const char* name,
+//                    const char* scope,
+//                    unsigned long long id,
+//                    int thread_id,
+//                    const TimeTicks& timestamp,
+//                    int num_args,
+//                    const char** arg_names,
+//                    const unsigned char* arg_types,
+//                    const unsigned long long* arg_values,
+//                    std::unique_ptr<ConvertableToTraceFormat>*
+//                    convertable_values,
+//                    unsigned int flags)
+#define TRACE_EVENT_API_ADD_TRACE_EVENT_WITH_THREAD_ID_AND_TIMESTAMP \
+    base::trace_event::TraceLog::GetInstance() \
+      ->AddTraceEventWithThreadIdAndTimestamp
+
+// Set the duration field of a COMPLETE trace event.
+// void TRACE_EVENT_API_UPDATE_TRACE_EVENT_DURATION(
+//     const unsigned char* category_group_enabled,
+//     const char* name,
+//     base::trace_event::TraceEventHandle id)
+#define TRACE_EVENT_API_UPDATE_TRACE_EVENT_DURATION \
+    base::trace_event::TraceLog::GetInstance()->UpdateTraceEventDuration
+
+// Set the duration field of a COMPLETE trace event.
+// void TRACE_EVENT_API_UPDATE_TRACE_EVENT_DURATION_EXPLICIT(
+//     const unsigned char* category_group_enabled,
+//     const char* name,
+//     base::trace_event::TraceEventHandle id,
+//     const TimeTicks& now,
+//     const ThreadTicks* thread_now)
+#define TRACE_EVENT_API_UPDATE_TRACE_EVENT_DURATION_EXPLICIT \
+  base::trace_event::TraceLog::GetInstance()->UpdateTraceEventDurationExplicit
+
+// Adds a metadata event to the trace log. The |AppendValueAsTraceFormat| method
+// on the convertable value will be called at flush time.
+// TRACE_EVENT_API_ADD_METADATA_EVENT(
+//     const unsigned char* category_group_enabled,
+//     const char* event_name,
+//     const char* arg_name,
+//     std::unique_ptr<ConvertableToTraceFormat> arg_value)
+#define TRACE_EVENT_API_ADD_METADATA_EVENT \
+    trace_event_internal::AddMetadataEvent
+
+// Defines atomic operations used internally by the tracing system.
+#define TRACE_EVENT_API_ATOMIC_WORD base::subtle::AtomicWord
+#define TRACE_EVENT_API_ATOMIC_LOAD(var) base::subtle::NoBarrier_Load(&(var))
+#define TRACE_EVENT_API_ATOMIC_STORE(var, value) \
+    base::subtle::NoBarrier_Store(&(var), (value))
+
+// Defines visibility for classes in trace_event.h
+#define TRACE_EVENT_API_CLASS_EXPORT BASE_EXPORT
+
+////////////////////////////////////////////////////////////////////////////////
+
+// Implementation detail: trace event macros create temporary variables
+// to keep instrumentation overhead low. These macros give each temporary
+// variable a unique name based on the line number to prevent name collisions.
+#define INTERNAL_TRACE_EVENT_UID3(a,b) \
+    trace_event_unique_##a##b
+#define INTERNAL_TRACE_EVENT_UID2(a,b) \
+    INTERNAL_TRACE_EVENT_UID3(a,b)
+#define INTERNAL_TRACE_EVENT_UID(name_prefix) \
+    INTERNAL_TRACE_EVENT_UID2(name_prefix, __LINE__)
+
+// Implementation detail: internal macro to create static category.
+// No barriers are needed, because this code is designed to operate safely
+// even when the unsigned char* points to garbage data (which may be the case
+// on processors without cache coherency).
+#define INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO_CUSTOM_VARIABLES( \
+    category_group, atomic, category_group_enabled) \
+    category_group_enabled = \
+        reinterpret_cast<const unsigned char*>(TRACE_EVENT_API_ATOMIC_LOAD( \
+            atomic)); \
+    if (UNLIKELY(!category_group_enabled)) { \
+      category_group_enabled = \
+          TRACE_EVENT_API_GET_CATEGORY_GROUP_ENABLED(category_group); \
+      TRACE_EVENT_API_ATOMIC_STORE(atomic, \
+          reinterpret_cast<TRACE_EVENT_API_ATOMIC_WORD>( \
+              category_group_enabled)); \
+    }
+
+#define INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO(category_group) \
+    static TRACE_EVENT_API_ATOMIC_WORD INTERNAL_TRACE_EVENT_UID(atomic) = 0; \
+    const unsigned char* INTERNAL_TRACE_EVENT_UID(category_group_enabled); \
+    INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO_CUSTOM_VARIABLES(category_group, \
+        INTERNAL_TRACE_EVENT_UID(atomic), \
+        INTERNAL_TRACE_EVENT_UID(category_group_enabled));
+
+// Implementation detail: internal macro to return unoverridden
+// base::TimeTicks::Now(). This is important because in headless VirtualTime can
+// override base:TimeTicks::Now().
+#define INTERNAL_TRACE_TIME_TICKS_NOW() \
+  base::subtle::TimeTicksNowIgnoringOverride()
+
+// Implementation detail: internal macro to return unoverridden
+// base::Time::Now(). This is important because in headless VirtualTime can
+// override base:TimeTicks::Now().
+#define INTERNAL_TRACE_TIME_NOW() base::subtle::TimeNowIgnoringOverride()
+
+// Implementation detail: internal macro to create static category and add
+// event if the category is enabled.
+#define INTERNAL_TRACE_EVENT_ADD(phase, category_group, name, flags, ...)  \
+  do {                                                                     \
+    INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO(category_group);                \
+    if (INTERNAL_TRACE_EVENT_CATEGORY_GROUP_ENABLED()) {                   \
+      trace_event_internal::AddTraceEvent(                                 \
+          phase, INTERNAL_TRACE_EVENT_UID(category_group_enabled), name,   \
+          trace_event_internal::kGlobalScope, trace_event_internal::kNoId, \
+          flags, trace_event_internal::kNoId, ##__VA_ARGS__);              \
+    }                                                                      \
+  } while (0)
+
+// Implementation detail: internal macro to create static category and add begin
+// event if the category is enabled. Also adds the end event when the scope
+// ends.
+#define INTERNAL_TRACE_EVENT_ADD_SCOPED(category_group, name, ...)           \
+  INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO(category_group);                    \
+  trace_event_internal::ScopedTracer INTERNAL_TRACE_EVENT_UID(tracer);       \
+  if (INTERNAL_TRACE_EVENT_CATEGORY_GROUP_ENABLED()) {                       \
+    base::trace_event::TraceEventHandle h =                                  \
+        trace_event_internal::AddTraceEvent(                                 \
+            TRACE_EVENT_PHASE_COMPLETE,                                      \
+            INTERNAL_TRACE_EVENT_UID(category_group_enabled), name,          \
+            trace_event_internal::kGlobalScope, trace_event_internal::kNoId, \
+            TRACE_EVENT_FLAG_NONE, trace_event_internal::kNoId,              \
+            ##__VA_ARGS__);                                                  \
+    INTERNAL_TRACE_EVENT_UID(tracer).Initialize(                             \
+        INTERNAL_TRACE_EVENT_UID(category_group_enabled), name, h);          \
+  }
+
+#define INTERNAL_TRACE_EVENT_ADD_SCOPED_WITH_FLOW(category_group, name,      \
+                                                  bind_id, flow_flags, ...)  \
+  INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO(category_group);                    \
+  trace_event_internal::ScopedTracer INTERNAL_TRACE_EVENT_UID(tracer);       \
+  if (INTERNAL_TRACE_EVENT_CATEGORY_GROUP_ENABLED()) {                       \
+    trace_event_internal::TraceID trace_event_bind_id((bind_id));            \
+    unsigned int trace_event_flags =                                         \
+        flow_flags | trace_event_bind_id.id_flags();                         \
+    base::trace_event::TraceEventHandle h =                                  \
+        trace_event_internal::AddTraceEvent(                                 \
+            TRACE_EVENT_PHASE_COMPLETE,                                      \
+            INTERNAL_TRACE_EVENT_UID(category_group_enabled), name,          \
+            trace_event_internal::kGlobalScope, trace_event_internal::kNoId, \
+            trace_event_flags, trace_event_bind_id.raw_id(), ##__VA_ARGS__); \
+    INTERNAL_TRACE_EVENT_UID(tracer).Initialize(                             \
+        INTERNAL_TRACE_EVENT_UID(category_group_enabled), name, h);          \
+  }
+
+// Implementation detail: internal macro to create static category and add
+// event if the category is enabled.
+#define INTERNAL_TRACE_EVENT_ADD_WITH_ID(phase, category_group, name, id, \
+                                         flags, ...)                      \
+  do {                                                                    \
+    INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO(category_group);               \
+    if (INTERNAL_TRACE_EVENT_CATEGORY_GROUP_ENABLED()) {                  \
+      trace_event_internal::TraceID trace_event_trace_id((id));           \
+      unsigned int trace_event_flags =                                    \
+          flags | trace_event_trace_id.id_flags();                        \
+      trace_event_internal::AddTraceEvent(                                \
+          phase, INTERNAL_TRACE_EVENT_UID(category_group_enabled), name,  \
+          trace_event_trace_id.scope(), trace_event_trace_id.raw_id(),    \
+          trace_event_flags, trace_event_internal::kNoId, ##__VA_ARGS__); \
+    }                                                                     \
+  } while (0)
+
+// Implementation detail: internal macro to create static category and add
+// event if the category is enabled.
+#define INTERNAL_TRACE_EVENT_ADD_WITH_TIMESTAMP(phase, category_group, name, \
+                                                timestamp, flags, ...)       \
+  do {                                                                       \
+    INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO(category_group);                  \
+    if (INTERNAL_TRACE_EVENT_CATEGORY_GROUP_ENABLED()) {                     \
+      trace_event_internal::AddTraceEventWithThreadIdAndTimestamp(           \
+          phase, INTERNAL_TRACE_EVENT_UID(category_group_enabled), name,     \
+          trace_event_internal::kGlobalScope, trace_event_internal::kNoId,   \
+          TRACE_EVENT_API_CURRENT_THREAD_ID, timestamp,                      \
+          flags | TRACE_EVENT_FLAG_EXPLICIT_TIMESTAMP,                       \
+          trace_event_internal::kNoId, ##__VA_ARGS__);                       \
+    }                                                                        \
+  } while (0)
+
+// Implementation detail: internal macro to create static category and add
+// event if the category is enabled.
+#define INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP(              \
+    phase, category_group, name, id, thread_id, timestamp, flags, ...)   \
+  do {                                                                   \
+    INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO(category_group);              \
+    if (INTERNAL_TRACE_EVENT_CATEGORY_GROUP_ENABLED()) {                 \
+      trace_event_internal::TraceID trace_event_trace_id((id));          \
+      unsigned int trace_event_flags =                                   \
+          flags | trace_event_trace_id.id_flags();                       \
+      trace_event_internal::AddTraceEventWithThreadIdAndTimestamp(       \
+          phase, INTERNAL_TRACE_EVENT_UID(category_group_enabled), name, \
+          trace_event_trace_id.scope(), trace_event_trace_id.raw_id(),   \
+          thread_id, timestamp,                                          \
+          trace_event_flags | TRACE_EVENT_FLAG_EXPLICIT_TIMESTAMP,       \
+          trace_event_internal::kNoId, ##__VA_ARGS__);                   \
+    }                                                                    \
+  } while (0)
+
+// Implementation detail: internal macro to create static category and add
+// event if the category is enabled.
+#define INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMPS(                \
+    category_group, name, id, thread_id, begin_timestamp, end_timestamp,    \
+    thread_end_timestamp, flags, ...)                                       \
+  do {                                                                      \
+    INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO(category_group);                 \
+    if (INTERNAL_TRACE_EVENT_CATEGORY_GROUP_ENABLED()) {                    \
+      trace_event_internal::TraceID trace_event_trace_id((id));             \
+      unsigned int trace_event_flags =                                      \
+          flags | trace_event_trace_id.id_flags();                          \
+      const unsigned char* uid_category_group_enabled =                     \
+          INTERNAL_TRACE_EVENT_UID(category_group_enabled);                 \
+      auto handle =                                                         \
+          trace_event_internal::AddTraceEventWithThreadIdAndTimestamp(      \
+              TRACE_EVENT_PHASE_COMPLETE, uid_category_group_enabled, name, \
+              trace_event_trace_id.scope(), trace_event_trace_id.raw_id(),  \
+              thread_id, begin_timestamp,                                   \
+              trace_event_flags | TRACE_EVENT_FLAG_EXPLICIT_TIMESTAMP,      \
+              trace_event_internal::kNoId, ##__VA_ARGS__);                  \
+      TRACE_EVENT_API_UPDATE_TRACE_EVENT_DURATION_EXPLICIT(                 \
+          uid_category_group_enabled, name, handle, end_timestamp,          \
+          thread_end_timestamp);                                            \
+    }                                                                       \
+  } while (0)
+
+// The linked ID will not be mangled.
+#define INTERNAL_TRACE_EVENT_ADD_LINK_IDS(category_group, name, id1, id2) \
+  do {                                                                    \
+    INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO(category_group);               \
+    if (INTERNAL_TRACE_EVENT_CATEGORY_GROUP_ENABLED()) {                  \
+      trace_event_internal::TraceID source_id((id1));                     \
+      unsigned int source_flags = source_id.id_flags();                   \
+      trace_event_internal::TraceID target_id((id2));                     \
+      trace_event_internal::AddTraceEvent(                                \
+          TRACE_EVENT_PHASE_LINK_IDS,                                     \
+          INTERNAL_TRACE_EVENT_UID(category_group_enabled), name,         \
+          source_id.scope(), source_id.raw_id(), source_flags,            \
+          trace_event_internal::kNoId, "linked_id",                       \
+          target_id.AsConvertableToTraceFormat());                        \
+    }                                                                     \
+  } while (0)
+
+// Implementation detail: internal macro to create static category and add
+// metadata event if the category is enabled.
+#define INTERNAL_TRACE_EVENT_METADATA_ADD(category_group, name, ...) \
+  do {                                                               \
+    INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO(category_group);          \
+    if (INTERNAL_TRACE_EVENT_CATEGORY_GROUP_ENABLED()) {             \
+      TRACE_EVENT_API_ADD_METADATA_EVENT(                            \
+          INTERNAL_TRACE_EVENT_UID(category_group_enabled), name,    \
+          ##__VA_ARGS__);                                            \
+    }                                                                \
+  } while (0)
+
+// Implementation detail: internal macro to enter and leave a
+// context based on the current scope.
+#define INTERNAL_TRACE_EVENT_SCOPED_CONTEXT(category_group, name, context) \
+  struct INTERNAL_TRACE_EVENT_UID(ScopedContext) {                         \
+   public:                                                                 \
+    INTERNAL_TRACE_EVENT_UID(ScopedContext)(uint64_t cid) : cid_(cid) {    \
+      TRACE_EVENT_ENTER_CONTEXT(category_group, name, cid_);               \
+    }                                                                      \
+    ~INTERNAL_TRACE_EVENT_UID(ScopedContext)() {                           \
+      TRACE_EVENT_LEAVE_CONTEXT(category_group, name, cid_);               \
+    }                                                                      \
+                                                                           \
+   private:                                                                \
+    uint64_t cid_;                                                         \
+    /* Local class friendly DISALLOW_COPY_AND_ASSIGN */                    \
+    INTERNAL_TRACE_EVENT_UID(ScopedContext)                                \
+    (const INTERNAL_TRACE_EVENT_UID(ScopedContext)&) {};                   \
+    void operator=(const INTERNAL_TRACE_EVENT_UID(ScopedContext)&) {};     \
+  };                                                                       \
+  INTERNAL_TRACE_EVENT_UID(ScopedContext)                                  \
+  INTERNAL_TRACE_EVENT_UID(scoped_context)(context);
+
+#if BUILDFLAG(ENABLE_LOCATION_SOURCE)
+
+// Implementation detail: internal macro to trace a task execution with the
+// location where it was posted from.
+//
+// This implementation is for when location sources are available.
+// TODO(ssid): The program counter of the current task should be added here.
+#define INTERNAL_TRACE_TASK_EXECUTION(run_function, task)                 \
+  TRACE_EVENT2("toplevel", run_function, "src_file",                      \
+               (task).posted_from.file_name(), "src_func",                \
+               (task).posted_from.function_name());                       \
+  TRACE_HEAP_PROFILER_API_SCOPED_TASK_EXECUTION INTERNAL_TRACE_EVENT_UID( \
+      task_event)((task).posted_from.file_name());                        \
+  TRACE_HEAP_PROFILER_API_SCOPED_WITH_PROGRAM_COUNTER                     \
+  INTERNAL_TRACE_EVENT_UID(task_pc_event)((task).posted_from.program_counter());
+
+#else
+
+// TODO(http://crbug.com760702) remove file name and just pass the program
+// counter to the heap profiler macro.
+// TODO(ssid): The program counter of the current task should be added here.
+#define INTERNAL_TRACE_TASK_EXECUTION(run_function, task)                      \
+  TRACE_EVENT1("toplevel", run_function, "src", (task).posted_from.ToString()) \
+  TRACE_HEAP_PROFILER_API_SCOPED_TASK_EXECUTION INTERNAL_TRACE_EVENT_UID(      \
+      task_event)((task).posted_from.file_name());                             \
+  TRACE_HEAP_PROFILER_API_SCOPED_WITH_PROGRAM_COUNTER                          \
+  INTERNAL_TRACE_EVENT_UID(task_pc_event)((task).posted_from.program_counter());
+
+#endif
+
+namespace trace_event_internal {
+
+// Specify these values when the corresponding argument of AddTraceEvent is not
+// used.
+const int kZeroNumArgs = 0;
+const std::nullptr_t kGlobalScope = nullptr;
+const unsigned long long kNoId = 0;
+
+// TraceID encapsulates an ID that can either be an integer or pointer. Pointers
+// are by default mangled with the Process ID so that they are unlikely to
+// collide when the same pointer is used on different processes.
+class BASE_EXPORT TraceID {
+ public:
+  // Can be combined with WithScope.
+  class LocalId {
+   public:
+    explicit LocalId(const void* raw_id)
+        : raw_id_(static_cast<unsigned long long>(
+              reinterpret_cast<uintptr_t>(raw_id))) {}
+    explicit LocalId(unsigned long long raw_id) : raw_id_(raw_id) {}
+    unsigned long long raw_id() const { return raw_id_; }
+   private:
+    unsigned long long raw_id_;
+  };
+
+  // Can be combined with WithScope.
+  class GlobalId {
+   public:
+    explicit GlobalId(unsigned long long raw_id) : raw_id_(raw_id) {}
+    unsigned long long raw_id() const { return raw_id_; }
+   private:
+    unsigned long long raw_id_;
+  };
+
+  class WithScope {
+   public:
+    WithScope(const char* scope, unsigned long long raw_id)
+        : scope_(scope), raw_id_(raw_id) {}
+    WithScope(const char* scope, LocalId local_id)
+        : scope_(scope), raw_id_(local_id.raw_id()) {
+      id_flags_ = TRACE_EVENT_FLAG_HAS_LOCAL_ID;
+    }
+    WithScope(const char* scope, GlobalId global_id)
+        : scope_(scope), raw_id_(global_id.raw_id()) {
+      id_flags_ = TRACE_EVENT_FLAG_HAS_GLOBAL_ID;
+    }
+    WithScope(const char* scope,
+              unsigned long long prefix,
+              unsigned long long raw_id)
+        : scope_(scope), has_prefix_(true), prefix_(prefix), raw_id_(raw_id) {}
+    WithScope(const char* scope, unsigned long long prefix, GlobalId global_id)
+        : scope_(scope),
+          has_prefix_(true),
+          prefix_(prefix),
+          raw_id_(global_id.raw_id()) {
+      id_flags_ = TRACE_EVENT_FLAG_HAS_GLOBAL_ID;
+    }
+    unsigned long long raw_id() const { return raw_id_; }
+    const char* scope() const { return scope_; }
+    bool has_prefix() const { return has_prefix_; }
+    unsigned long long prefix() const { return prefix_; }
+    unsigned int id_flags() const { return id_flags_; }
+
+   private:
+    const char* scope_ = nullptr;
+    bool has_prefix_ = false;
+    unsigned long long prefix_;
+    unsigned long long raw_id_;
+    unsigned int id_flags_ = TRACE_EVENT_FLAG_HAS_ID;
+  };
+
+  // DEPRECATED: consider using LocalId or GlobalId, instead.
+  class DontMangle {
+   public:
+    explicit DontMangle(const void* raw_id)
+        : raw_id_(static_cast<unsigned long long>(
+              reinterpret_cast<uintptr_t>(raw_id))) {}
+    explicit DontMangle(unsigned long long raw_id) : raw_id_(raw_id) {}
+    explicit DontMangle(unsigned long raw_id) : raw_id_(raw_id) {}
+    explicit DontMangle(unsigned int raw_id) : raw_id_(raw_id) {}
+    explicit DontMangle(unsigned short raw_id) : raw_id_(raw_id) {}
+    explicit DontMangle(unsigned char raw_id) : raw_id_(raw_id) {}
+    explicit DontMangle(long long raw_id)
+        : raw_id_(static_cast<unsigned long long>(raw_id)) {}
+    explicit DontMangle(long raw_id)
+        : raw_id_(static_cast<unsigned long long>(raw_id)) {}
+    explicit DontMangle(int raw_id)
+        : raw_id_(static_cast<unsigned long long>(raw_id)) {}
+    explicit DontMangle(short raw_id)
+        : raw_id_(static_cast<unsigned long long>(raw_id)) {}
+    explicit DontMangle(signed char raw_id)
+        : raw_id_(static_cast<unsigned long long>(raw_id)) {}
+    unsigned long long raw_id() const { return raw_id_; }
+   private:
+    unsigned long long raw_id_;
+  };
+
+  // DEPRECATED: consider using LocalId or GlobalId, instead.
+  class ForceMangle {
+   public:
+    explicit ForceMangle(unsigned long long raw_id) : raw_id_(raw_id) {}
+    explicit ForceMangle(unsigned long raw_id) : raw_id_(raw_id) {}
+    explicit ForceMangle(unsigned int raw_id) : raw_id_(raw_id) {}
+    explicit ForceMangle(unsigned short raw_id) : raw_id_(raw_id) {}
+    explicit ForceMangle(unsigned char raw_id) : raw_id_(raw_id) {}
+    explicit ForceMangle(long long raw_id)
+        : raw_id_(static_cast<unsigned long long>(raw_id)) {}
+    explicit ForceMangle(long raw_id)
+        : raw_id_(static_cast<unsigned long long>(raw_id)) {}
+    explicit ForceMangle(int raw_id)
+        : raw_id_(static_cast<unsigned long long>(raw_id)) {}
+    explicit ForceMangle(short raw_id)
+        : raw_id_(static_cast<unsigned long long>(raw_id)) {}
+    explicit ForceMangle(signed char raw_id)
+        : raw_id_(static_cast<unsigned long long>(raw_id)) {}
+    unsigned long long raw_id() const { return raw_id_; }
+   private:
+    unsigned long long raw_id_;
+  };
+
+  TraceID(const void* raw_id) : raw_id_(static_cast<unsigned long long>(
+                                        reinterpret_cast<uintptr_t>(raw_id))) {
+    id_flags_ = TRACE_EVENT_FLAG_HAS_ID | TRACE_EVENT_FLAG_MANGLE_ID;
+  }
+  TraceID(ForceMangle raw_id) : raw_id_(raw_id.raw_id()) {
+    id_flags_ = TRACE_EVENT_FLAG_HAS_ID | TRACE_EVENT_FLAG_MANGLE_ID;
+  }
+  TraceID(DontMangle raw_id) : raw_id_(raw_id.raw_id()) {}
+  TraceID(unsigned long long raw_id) : raw_id_(raw_id) {}
+  TraceID(unsigned long raw_id) : raw_id_(raw_id) {}
+  TraceID(unsigned int raw_id) : raw_id_(raw_id) {}
+  TraceID(unsigned short raw_id) : raw_id_(raw_id) {}
+  TraceID(unsigned char raw_id) : raw_id_(raw_id) {}
+  TraceID(long long raw_id)
+      : raw_id_(static_cast<unsigned long long>(raw_id)) {}
+  TraceID(long raw_id)
+      : raw_id_(static_cast<unsigned long long>(raw_id)) {}
+  TraceID(int raw_id)
+      : raw_id_(static_cast<unsigned long long>(raw_id)) {}
+  TraceID(short raw_id)
+      : raw_id_(static_cast<unsigned long long>(raw_id)) {}
+  TraceID(signed char raw_id)
+      : raw_id_(static_cast<unsigned long long>(raw_id)) {}
+  TraceID(LocalId raw_id) : raw_id_(raw_id.raw_id()) {
+    id_flags_ = TRACE_EVENT_FLAG_HAS_LOCAL_ID;
+  }
+  TraceID(GlobalId raw_id) : raw_id_(raw_id.raw_id()) {
+    id_flags_ = TRACE_EVENT_FLAG_HAS_GLOBAL_ID;
+  }
+  TraceID(WithScope scoped_id)
+      : scope_(scoped_id.scope()),
+        has_prefix_(scoped_id.has_prefix()),
+        prefix_(scoped_id.prefix()),
+        raw_id_(scoped_id.raw_id()),
+        id_flags_(scoped_id.id_flags()) {}
+
+  unsigned long long raw_id() const { return raw_id_; }
+  const char* scope() const { return scope_; }
+  bool has_prefix() const { return has_prefix_; }
+  unsigned long long prefix() const { return prefix_; }
+  unsigned int id_flags() const { return id_flags_; }
+
+  std::unique_ptr<base::trace_event::ConvertableToTraceFormat>
+  AsConvertableToTraceFormat() const;
+
+ private:
+  const char* scope_ = nullptr;
+  bool has_prefix_ = false;
+  unsigned long long prefix_;
+  unsigned long long raw_id_;
+  unsigned int id_flags_ = TRACE_EVENT_FLAG_HAS_ID;
+};
+
+// Simple union to store various types as unsigned long long.
+union TraceValueUnion {
+  bool as_bool;
+  unsigned long long as_uint;
+  long long as_int;
+  double as_double;
+  const void* as_pointer;
+  const char* as_string;
+};
+
+// Simple container for const char* that should be copied instead of retained.
+class TraceStringWithCopy {
+ public:
+  explicit TraceStringWithCopy(const char* str) : str_(str) {}
+  const char* str() const { return str_; }
+ private:
+  const char* str_;
+};
+
+// Define SetTraceValue for each allowed type. It stores the type and
+// value in the return arguments. This allows this API to avoid declaring any
+// structures so that it is portable to third_party libraries.
+#define INTERNAL_DECLARE_SET_TRACE_VALUE(actual_type, \
+                                         arg_expression, \
+                                         union_member, \
+                                         value_type_id) \
+    static inline void SetTraceValue( \
+        actual_type arg, \
+        unsigned char* type, \
+        unsigned long long* value) { \
+      TraceValueUnion type_value; \
+      type_value.union_member = arg_expression; \
+      *type = value_type_id; \
+      *value = type_value.as_uint; \
+    }
+// Simpler form for int types that can be safely casted.
+#define INTERNAL_DECLARE_SET_TRACE_VALUE_INT(actual_type, \
+                                             value_type_id) \
+    static inline void SetTraceValue( \
+        actual_type arg, \
+        unsigned char* type, \
+        unsigned long long* value) { \
+      *type = value_type_id; \
+      *value = static_cast<unsigned long long>(arg); \
+    }
+
+INTERNAL_DECLARE_SET_TRACE_VALUE_INT(unsigned long long, TRACE_VALUE_TYPE_UINT)
+INTERNAL_DECLARE_SET_TRACE_VALUE_INT(unsigned long, TRACE_VALUE_TYPE_UINT)
+INTERNAL_DECLARE_SET_TRACE_VALUE_INT(unsigned int, TRACE_VALUE_TYPE_UINT)
+INTERNAL_DECLARE_SET_TRACE_VALUE_INT(unsigned short, TRACE_VALUE_TYPE_UINT)
+INTERNAL_DECLARE_SET_TRACE_VALUE_INT(unsigned char, TRACE_VALUE_TYPE_UINT)
+INTERNAL_DECLARE_SET_TRACE_VALUE_INT(long long, TRACE_VALUE_TYPE_INT)
+INTERNAL_DECLARE_SET_TRACE_VALUE_INT(long, TRACE_VALUE_TYPE_INT)
+INTERNAL_DECLARE_SET_TRACE_VALUE_INT(int, TRACE_VALUE_TYPE_INT)
+INTERNAL_DECLARE_SET_TRACE_VALUE_INT(short, TRACE_VALUE_TYPE_INT)
+INTERNAL_DECLARE_SET_TRACE_VALUE_INT(signed char, TRACE_VALUE_TYPE_INT)
+INTERNAL_DECLARE_SET_TRACE_VALUE(bool, arg, as_bool, TRACE_VALUE_TYPE_BOOL)
+INTERNAL_DECLARE_SET_TRACE_VALUE(double, arg, as_double,
+                                 TRACE_VALUE_TYPE_DOUBLE)
+INTERNAL_DECLARE_SET_TRACE_VALUE(const void*, arg, as_pointer,
+                                 TRACE_VALUE_TYPE_POINTER)
+INTERNAL_DECLARE_SET_TRACE_VALUE(const char*, arg, as_string,
+                                 TRACE_VALUE_TYPE_STRING)
+INTERNAL_DECLARE_SET_TRACE_VALUE(const TraceStringWithCopy&, arg.str(),
+                                 as_string, TRACE_VALUE_TYPE_COPY_STRING)
+
+#undef INTERNAL_DECLARE_SET_TRACE_VALUE
+#undef INTERNAL_DECLARE_SET_TRACE_VALUE_INT
+
+// std::string version of SetTraceValue so that trace arguments can be strings.
+static inline void SetTraceValue(const std::string& arg,
+                                 unsigned char* type,
+                                 unsigned long long* value) {
+  TraceValueUnion type_value;
+  type_value.as_string = arg.c_str();
+  *type = TRACE_VALUE_TYPE_COPY_STRING;
+  *value = type_value.as_uint;
+}
+
+// base::Time, base::TimeTicks, etc. versions of SetTraceValue to make it easier
+// to trace these types.
+static inline void SetTraceValue(const base::Time arg,
+                                 unsigned char* type,
+                                 unsigned long long* value) {
+  *type = TRACE_VALUE_TYPE_INT;
+  *value = arg.ToInternalValue();
+}
+
+static inline void SetTraceValue(const base::TimeTicks arg,
+                                 unsigned char* type,
+                                 unsigned long long* value) {
+  *type = TRACE_VALUE_TYPE_INT;
+  *value = arg.ToInternalValue();
+}
+
+static inline void SetTraceValue(const base::ThreadTicks arg,
+                                 unsigned char* type,
+                                 unsigned long long* value) {
+  *type = TRACE_VALUE_TYPE_INT;
+  *value = arg.ToInternalValue();
+}
+
+// These AddTraceEvent and AddTraceEventWithThreadIdAndTimestamp template
+// functions are defined here instead of in the macro, because the arg_values
+// could be temporary objects, such as std::string. In order to store
+// pointers to the internal c_str and pass through to the tracing API,
+// the arg_values must live throughout these procedures.
+
+template <class ARG1_CONVERTABLE_TYPE>
+static inline base::trace_event::TraceEventHandle
+AddTraceEventWithThreadIdAndTimestamp(
+    char phase,
+    const unsigned char* category_group_enabled,
+    const char* name,
+    const char* scope,
+    unsigned long long id,
+    int thread_id,
+    const base::TimeTicks& timestamp,
+    unsigned int flags,
+    unsigned long long bind_id,
+    const char* arg1_name,
+    std::unique_ptr<ARG1_CONVERTABLE_TYPE> arg1_val) {
+  const int num_args = 1;
+  unsigned char arg_types[1] = { TRACE_VALUE_TYPE_CONVERTABLE };
+  std::unique_ptr<base::trace_event::ConvertableToTraceFormat>
+      convertable_values[1] = {std::move(arg1_val)};
+  return TRACE_EVENT_API_ADD_TRACE_EVENT_WITH_THREAD_ID_AND_TIMESTAMP(
+      phase, category_group_enabled, name, scope, id, bind_id, thread_id,
+      timestamp, num_args, &arg1_name, arg_types, NULL, convertable_values,
+      flags);
+}
+
+template <class ARG1_TYPE, class ARG2_CONVERTABLE_TYPE>
+static inline base::trace_event::TraceEventHandle
+AddTraceEventWithThreadIdAndTimestamp(
+    char phase,
+    const unsigned char* category_group_enabled,
+    const char* name,
+    const char* scope,
+    unsigned long long id,
+    int thread_id,
+    const base::TimeTicks& timestamp,
+    unsigned int flags,
+    unsigned long long bind_id,
+    const char* arg1_name,
+    const ARG1_TYPE& arg1_val,
+    const char* arg2_name,
+    std::unique_ptr<ARG2_CONVERTABLE_TYPE> arg2_val) {
+  const int num_args = 2;
+  const char* arg_names[2] = { arg1_name, arg2_name };
+
+  unsigned char arg_types[2];
+  unsigned long long arg_values[2];
+  SetTraceValue(arg1_val, &arg_types[0], &arg_values[0]);
+  arg_types[1] = TRACE_VALUE_TYPE_CONVERTABLE;
+  std::unique_ptr<base::trace_event::ConvertableToTraceFormat>
+      convertable_values[2] = {nullptr, std::move(arg2_val)};
+  return TRACE_EVENT_API_ADD_TRACE_EVENT_WITH_THREAD_ID_AND_TIMESTAMP(
+      phase, category_group_enabled, name, scope, id, bind_id, thread_id,
+      timestamp, num_args, arg_names, arg_types, arg_values, convertable_values,
+      flags);
+}
+
+template <class ARG1_CONVERTABLE_TYPE, class ARG2_TYPE>
+static inline base::trace_event::TraceEventHandle
+AddTraceEventWithThreadIdAndTimestamp(
+    char phase,
+    const unsigned char* category_group_enabled,
+    const char* name,
+    const char* scope,
+    unsigned long long id,
+    int thread_id,
+    const base::TimeTicks& timestamp,
+    unsigned int flags,
+    unsigned long long bind_id,
+    const char* arg1_name,
+    std::unique_ptr<ARG1_CONVERTABLE_TYPE> arg1_val,
+    const char* arg2_name,
+    const ARG2_TYPE& arg2_val) {
+  const int num_args = 2;
+  const char* arg_names[2] = { arg1_name, arg2_name };
+
+  unsigned char arg_types[2];
+  unsigned long long arg_values[2];
+  arg_types[0] = TRACE_VALUE_TYPE_CONVERTABLE;
+  arg_values[0] = 0;
+  SetTraceValue(arg2_val, &arg_types[1], &arg_values[1]);
+  std::unique_ptr<base::trace_event::ConvertableToTraceFormat>
+      convertable_values[2] = {std::move(arg1_val), nullptr};
+  return TRACE_EVENT_API_ADD_TRACE_EVENT_WITH_THREAD_ID_AND_TIMESTAMP(
+      phase, category_group_enabled, name, scope, id, bind_id, thread_id,
+      timestamp, num_args, arg_names, arg_types, arg_values, convertable_values,
+      flags);
+}
+
+template <class ARG1_CONVERTABLE_TYPE, class ARG2_CONVERTABLE_TYPE>
+static inline base::trace_event::TraceEventHandle
+AddTraceEventWithThreadIdAndTimestamp(
+    char phase,
+    const unsigned char* category_group_enabled,
+    const char* name,
+    const char* scope,
+    unsigned long long id,
+    int thread_id,
+    const base::TimeTicks& timestamp,
+    unsigned int flags,
+    unsigned long long bind_id,
+    const char* arg1_name,
+    std::unique_ptr<ARG1_CONVERTABLE_TYPE> arg1_val,
+    const char* arg2_name,
+    std::unique_ptr<ARG2_CONVERTABLE_TYPE> arg2_val) {
+  const int num_args = 2;
+  const char* arg_names[2] = { arg1_name, arg2_name };
+  unsigned char arg_types[2] =
+      { TRACE_VALUE_TYPE_CONVERTABLE, TRACE_VALUE_TYPE_CONVERTABLE };
+  std::unique_ptr<base::trace_event::ConvertableToTraceFormat>
+      convertable_values[2] = {std::move(arg1_val), std::move(arg2_val)};
+  return TRACE_EVENT_API_ADD_TRACE_EVENT_WITH_THREAD_ID_AND_TIMESTAMP(
+      phase, category_group_enabled, name, scope, id, bind_id, thread_id,
+      timestamp, num_args, arg_names, arg_types, NULL, convertable_values,
+      flags);
+}
+
+static inline base::trace_event::TraceEventHandle
+AddTraceEventWithThreadIdAndTimestamp(
+    char phase,
+    const unsigned char* category_group_enabled,
+    const char* name,
+    const char* scope,
+    unsigned long long id,
+    int thread_id,
+    const base::TimeTicks& timestamp,
+    unsigned int flags,
+    unsigned long long bind_id) {
+  return TRACE_EVENT_API_ADD_TRACE_EVENT_WITH_THREAD_ID_AND_TIMESTAMP(
+      phase, category_group_enabled, name, scope, id, bind_id, thread_id,
+      timestamp, kZeroNumArgs, NULL, NULL, NULL, NULL, flags);
+}
+
+static inline base::trace_event::TraceEventHandle AddTraceEvent(
+    char phase,
+    const unsigned char* category_group_enabled,
+    const char* name,
+    const char* scope,
+    unsigned long long id,
+    unsigned int flags,
+    unsigned long long bind_id) {
+  const int thread_id = static_cast<int>(base::PlatformThread::CurrentId());
+  const base::TimeTicks now = TRACE_TIME_TICKS_NOW();
+  return AddTraceEventWithThreadIdAndTimestamp(
+      phase, category_group_enabled, name, scope, id, thread_id, now, flags,
+      bind_id);
+}
+
+template<class ARG1_TYPE>
+static inline base::trace_event::TraceEventHandle
+AddTraceEventWithThreadIdAndTimestamp(
+    char phase,
+    const unsigned char* category_group_enabled,
+    const char* name,
+    const char* scope,
+    unsigned long long id,
+    int thread_id,
+    const base::TimeTicks& timestamp,
+    unsigned int flags,
+    unsigned long long bind_id,
+    const char* arg1_name,
+    const ARG1_TYPE& arg1_val) {
+  const int num_args = 1;
+  unsigned char arg_types[1];
+  unsigned long long arg_values[1];
+  SetTraceValue(arg1_val, &arg_types[0], &arg_values[0]);
+  return TRACE_EVENT_API_ADD_TRACE_EVENT_WITH_THREAD_ID_AND_TIMESTAMP(
+      phase, category_group_enabled, name, scope, id, bind_id, thread_id,
+      timestamp, num_args, &arg1_name, arg_types, arg_values, NULL, flags);
+}
+
+template<class ARG1_TYPE>
+static inline base::trace_event::TraceEventHandle AddTraceEvent(
+    char phase,
+    const unsigned char* category_group_enabled,
+    const char* name,
+    const char* scope,
+    unsigned long long id,
+    unsigned int flags,
+    unsigned long long bind_id,
+    const char* arg1_name,
+    const ARG1_TYPE& arg1_val) {
+  int thread_id = static_cast<int>(base::PlatformThread::CurrentId());
+  base::TimeTicks now = TRACE_TIME_TICKS_NOW();
+  return AddTraceEventWithThreadIdAndTimestamp(
+      phase, category_group_enabled, name, scope, id, thread_id, now, flags,
+      bind_id, arg1_name, arg1_val);
+}
+
+template <class ARG1_CONVERTABLE_TYPE>
+static inline base::trace_event::TraceEventHandle AddTraceEvent(
+    char phase,
+    const unsigned char* category_group_enabled,
+    const char* name,
+    const char* scope,
+    unsigned long long id,
+    unsigned int flags,
+    unsigned long long bind_id,
+    const char* arg1_name,
+    std::unique_ptr<ARG1_CONVERTABLE_TYPE> arg1_val) {
+  int thread_id = static_cast<int>(base::PlatformThread::CurrentId());
+  base::TimeTicks now = TRACE_TIME_TICKS_NOW();
+  return AddTraceEventWithThreadIdAndTimestamp(
+      phase, category_group_enabled, name, scope, id, thread_id, now, flags,
+      bind_id, arg1_name, std::move(arg1_val));
+}
+
+template<class ARG1_TYPE, class ARG2_TYPE>
+static inline base::trace_event::TraceEventHandle
+AddTraceEventWithThreadIdAndTimestamp(
+    char phase,
+    const unsigned char* category_group_enabled,
+    const char* name,
+    const char* scope,
+    unsigned long long id,
+    int thread_id,
+    const base::TimeTicks& timestamp,
+    unsigned int flags,
+    unsigned long long bind_id,
+    const char* arg1_name,
+    const ARG1_TYPE& arg1_val,
+    const char* arg2_name,
+    const ARG2_TYPE& arg2_val) {
+  const int num_args = 2;
+  const char* arg_names[2] = { arg1_name, arg2_name };
+  unsigned char arg_types[2];
+  unsigned long long arg_values[2];
+  SetTraceValue(arg1_val, &arg_types[0], &arg_values[0]);
+  SetTraceValue(arg2_val, &arg_types[1], &arg_values[1]);
+  return TRACE_EVENT_API_ADD_TRACE_EVENT_WITH_THREAD_ID_AND_TIMESTAMP(
+      phase, category_group_enabled, name, scope, id, bind_id, thread_id,
+      timestamp, num_args, arg_names, arg_types, arg_values, NULL, flags);
+}
+
+template <class ARG1_CONVERTABLE_TYPE, class ARG2_TYPE>
+static inline base::trace_event::TraceEventHandle AddTraceEvent(
+    char phase,
+    const unsigned char* category_group_enabled,
+    const char* name,
+    const char* scope,
+    unsigned long long id,
+    unsigned int flags,
+    unsigned long long bind_id,
+    const char* arg1_name,
+    std::unique_ptr<ARG1_CONVERTABLE_TYPE> arg1_val,
+    const char* arg2_name,
+    const ARG2_TYPE& arg2_val) {
+  int thread_id = static_cast<int>(base::PlatformThread::CurrentId());
+  base::TimeTicks now = TRACE_TIME_TICKS_NOW();
+  return AddTraceEventWithThreadIdAndTimestamp(
+      phase, category_group_enabled, name, scope, id, thread_id, now, flags,
+      bind_id, arg1_name, std::move(arg1_val), arg2_name, arg2_val);
+}
+
+template <class ARG1_TYPE, class ARG2_CONVERTABLE_TYPE>
+static inline base::trace_event::TraceEventHandle AddTraceEvent(
+    char phase,
+    const unsigned char* category_group_enabled,
+    const char* name,
+    const char* scope,
+    unsigned long long id,
+    unsigned int flags,
+    unsigned long long bind_id,
+    const char* arg1_name,
+    const ARG1_TYPE& arg1_val,
+    const char* arg2_name,
+    std::unique_ptr<ARG2_CONVERTABLE_TYPE> arg2_val) {
+  int thread_id = static_cast<int>(base::PlatformThread::CurrentId());
+  base::TimeTicks now = TRACE_TIME_TICKS_NOW();
+  return AddTraceEventWithThreadIdAndTimestamp(
+      phase, category_group_enabled, name, scope, id, thread_id, now, flags,
+      bind_id, arg1_name, arg1_val, arg2_name, std::move(arg2_val));
+}
+
+template <class ARG1_CONVERTABLE_TYPE, class ARG2_CONVERTABLE_TYPE>
+static inline base::trace_event::TraceEventHandle AddTraceEvent(
+    char phase,
+    const unsigned char* category_group_enabled,
+    const char* name,
+    const char* scope,
+    unsigned long long id,
+    unsigned int flags,
+    unsigned long long bind_id,
+    const char* arg1_name,
+    std::unique_ptr<ARG1_CONVERTABLE_TYPE> arg1_val,
+    const char* arg2_name,
+    std::unique_ptr<ARG2_CONVERTABLE_TYPE> arg2_val) {
+  int thread_id = static_cast<int>(base::PlatformThread::CurrentId());
+  base::TimeTicks now = TRACE_TIME_TICKS_NOW();
+  return AddTraceEventWithThreadIdAndTimestamp(
+      phase, category_group_enabled, name, scope, id, thread_id, now, flags,
+      bind_id, arg1_name, std::move(arg1_val), arg2_name, std::move(arg2_val));
+}
+
+template<class ARG1_TYPE, class ARG2_TYPE>
+static inline base::trace_event::TraceEventHandle AddTraceEvent(
+    char phase,
+    const unsigned char* category_group_enabled,
+    const char* name,
+    const char* scope,
+    unsigned long long id,
+    unsigned int flags,
+    unsigned long long bind_id,
+    const char* arg1_name,
+    const ARG1_TYPE& arg1_val,
+    const char* arg2_name,
+    const ARG2_TYPE& arg2_val) {
+  int thread_id = static_cast<int>(base::PlatformThread::CurrentId());
+  base::TimeTicks now = TRACE_TIME_TICKS_NOW();
+  return AddTraceEventWithThreadIdAndTimestamp(
+      phase, category_group_enabled, name, scope, id, thread_id, now, flags,
+      bind_id, arg1_name, arg1_val, arg2_name, arg2_val);
+}
+
+template <class ARG1_CONVERTABLE_TYPE>
+static inline void AddMetadataEvent(
+    const unsigned char* category_group_enabled,
+    const char* event_name,
+    const char* arg_name,
+    std::unique_ptr<ARG1_CONVERTABLE_TYPE> arg_value) {
+  const char* arg_names[1] = {arg_name};
+  unsigned char arg_types[1] = {TRACE_VALUE_TYPE_CONVERTABLE};
+  std::unique_ptr<base::trace_event::ConvertableToTraceFormat>
+      convertable_values[1] = {std::move(arg_value)};
+  base::trace_event::TraceLog::GetInstance()->AddMetadataEvent(
+      category_group_enabled, event_name,
+      1,  // num_args
+      arg_names, arg_types,
+      nullptr,  // arg_values
+      convertable_values, TRACE_EVENT_FLAG_NONE);
+}
+
+template <class ARG1_TYPE>
+static void AddMetadataEvent(const unsigned char* category_group_enabled,
+                             const char* event_name,
+                             const char* arg_name,
+                             const ARG1_TYPE& arg_val) {
+  const int num_args = 1;
+  const char* arg_names[1] = {arg_name};
+  unsigned char arg_types[1];
+  unsigned long long arg_values[1];
+  SetTraceValue(arg_val, &arg_types[0], &arg_values[0]);
+
+  base::trace_event::TraceLog::GetInstance()->AddMetadataEvent(
+      category_group_enabled, event_name, num_args, arg_names, arg_types,
+      arg_values, nullptr, TRACE_EVENT_FLAG_NONE);
+}
+
+// Used by TRACE_EVENTx macros. Do not use directly.
+class TRACE_EVENT_API_CLASS_EXPORT ScopedTracer {
+ public:
+  // Note: members of data_ intentionally left uninitialized. See Initialize.
+  ScopedTracer() : p_data_(NULL) {}
+
+  ~ScopedTracer() {
+    if (p_data_ && *data_.category_group_enabled) {
+      TRACE_EVENT_API_UPDATE_TRACE_EVENT_DURATION(
+          data_.category_group_enabled, data_.name, data_.event_handle);
+    }
+  }
+
+  void Initialize(const unsigned char* category_group_enabled,
+                  const char* name,
+                  base::trace_event::TraceEventHandle event_handle) {
+    data_.category_group_enabled = category_group_enabled;
+    data_.name = name;
+    data_.event_handle = event_handle;
+    p_data_ = &data_;
+  }
+
+ private:
+  // This Data struct workaround is to avoid initializing all the members
+  // in Data during construction of this object, since this object is always
+  // constructed, even when tracing is disabled. If the members of Data were
+  // members of this class instead, compiler warnings occur about potential
+  // uninitialized accesses.
+  struct Data {
+    const unsigned char* category_group_enabled;
+    const char* name;
+    base::trace_event::TraceEventHandle event_handle;
+  };
+  Data* p_data_;
+  Data data_;
+};
+
+// Used by TRACE_EVENT_BINARY_EFFICIENTx macro. Do not use directly.
+class TRACE_EVENT_API_CLASS_EXPORT ScopedTraceBinaryEfficient {
+ public:
+  ScopedTraceBinaryEfficient(const char* category_group, const char* name);
+  ~ScopedTraceBinaryEfficient();
+
+ private:
+  const unsigned char* category_group_enabled_;
+  const char* name_;
+  base::trace_event::TraceEventHandle event_handle_;
+};
+
+// This macro generates less code then TRACE_EVENT0 but is also
+// slower to execute when tracing is off. It should generally only be
+// used with code that is seldom executed or conditionally executed
+// when debugging.
+// For now the category_group must be "gpu".
+#define TRACE_EVENT_BINARY_EFFICIENT0(category_group, name) \
+    trace_event_internal::ScopedTraceBinaryEfficient \
+        INTERNAL_TRACE_EVENT_UID(scoped_trace)(category_group, name);
+
+}  // namespace trace_event_internal
+
+namespace base {
+namespace trace_event {
+
+template<typename IDType> class TraceScopedTrackableObject {
+ public:
+  TraceScopedTrackableObject(const char* category_group, const char* name,
+      IDType id)
+    : category_group_(category_group),
+      name_(name),
+      id_(id) {
+    TRACE_EVENT_OBJECT_CREATED_WITH_ID(category_group_, name_, id_);
+  }
+
+  template <typename ArgType> void snapshot(ArgType snapshot) {
+    TRACE_EVENT_OBJECT_SNAPSHOT_WITH_ID(category_group_, name_, id_, snapshot);
+  }
+
+  ~TraceScopedTrackableObject() {
+    TRACE_EVENT_OBJECT_DELETED_WITH_ID(category_group_, name_, id_);
+  }
+
+ private:
+  const char* category_group_;
+  const char* name_;
+  IDType id_;
+
+  DISALLOW_COPY_AND_ASSIGN(TraceScopedTrackableObject);
+};
+
+}  // namespace trace_event
+}  // namespace base
+
+#endif  // BASE_TRACE_EVENT_TRACE_EVENT_H_
diff --git a/base/trace_event/trace_event_android.cc b/base/trace_event/trace_event_android.cc
new file mode 100644
index 0000000..30d9c74
--- /dev/null
+++ b/base/trace_event/trace_event_android.cc
@@ -0,0 +1,216 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/trace_event/trace_event_impl.h"
+
+#include <fcntl.h>
+#include <stddef.h>
+#include <stdint.h>
+
+#include "base/format_macros.h"
+#include "base/logging.h"
+#include "base/posix/eintr_wrapper.h"
+#include "base/strings/stringprintf.h"
+#include "base/synchronization/waitable_event.h"
+#include "base/threading/thread.h"
+#include "base/trace_event/trace_event.h"
+
+namespace base {
+namespace trace_event {
+
+namespace {
+
+int g_atrace_fd = -1;
+const char kATraceMarkerFile[] = "/sys/kernel/debug/tracing/trace_marker";
+
+void WriteToATrace(int fd, const char* buffer, size_t size) {
+  size_t total_written = 0;
+  while (total_written < size) {
+    ssize_t written = HANDLE_EINTR(write(
+        fd, buffer + total_written, size - total_written));
+    if (written <= 0)
+      break;
+    total_written += written;
+  }
+  if (total_written < size) {
+    PLOG(WARNING) << "Failed to write buffer '" << std::string(buffer, size)
+                  << "' to " << kATraceMarkerFile;
+  }
+}
+
+void WriteEvent(
+    char phase,
+    const char* category_group,
+    const char* name,
+    unsigned long long id,
+    const char** arg_names,
+    const unsigned char* arg_types,
+    const TraceEvent::TraceValue* arg_values,
+    const std::unique_ptr<ConvertableToTraceFormat>* convertable_values,
+    unsigned int flags) {
+  std::string out = StringPrintf("%c|%d|%s", phase, getpid(), name);
+  if (flags & TRACE_EVENT_FLAG_HAS_ID)
+    StringAppendF(&out, "-%" PRIx64, static_cast<uint64_t>(id));
+  out += '|';
+
+  for (int i = 0; i < kTraceMaxNumArgs && arg_names[i];
+       ++i) {
+    if (i)
+      out += ';';
+    out += arg_names[i];
+    out += '=';
+    std::string::size_type value_start = out.length();
+    if (arg_types[i] == TRACE_VALUE_TYPE_CONVERTABLE)
+      convertable_values[i]->AppendAsTraceFormat(&out);
+    else
+      TraceEvent::AppendValueAsJSON(arg_types[i], arg_values[i], &out);
+
+    // Remove the quotes which may confuse the atrace script.
+    ReplaceSubstringsAfterOffset(&out, value_start, "\\\"", "'");
+    ReplaceSubstringsAfterOffset(&out, value_start, "\"", "");
+    // Replace chars used for separators with similar chars in the value.
+    std::replace(out.begin() + value_start, out.end(), ';', ',');
+    std::replace(out.begin() + value_start, out.end(), '|', '!');
+  }
+
+  out += '|';
+  out += category_group;
+  WriteToATrace(g_atrace_fd, out.c_str(), out.size());
+}
+
+void NoOpOutputCallback(WaitableEvent* complete_event,
+                        const scoped_refptr<RefCountedString>&,
+                        bool has_more_events) {
+  if (!has_more_events)
+    complete_event->Signal();
+}
+
+void EndChromeTracing(TraceLog* trace_log,
+                      WaitableEvent* complete_event) {
+  trace_log->SetDisabled();
+  // Delete the buffered trace events as they have been sent to atrace.
+  trace_log->Flush(Bind(&NoOpOutputCallback, complete_event));
+}
+
+}  // namespace
+
+// These functions support Android systrace.py when 'webview' category is
+// traced. With the new adb_profile_chrome, we may have two phases:
+// - before WebView is ready for combined tracing, we can use adb_profile_chrome
+//   to trace android categories other than 'webview' and chromium categories.
+//   In this way we can avoid the conflict between StartATrace/StopATrace and
+//   the intents.
+// - TODO(wangxianzhu): after WebView is ready for combined tracing, remove
+//   StartATrace, StopATrace and SendToATrace, and perhaps send Java traces
+//   directly to atrace in trace_event_binding.cc.
+
+void TraceLog::StartATrace() {
+  if (g_atrace_fd != -1)
+    return;
+
+  g_atrace_fd = HANDLE_EINTR(open(kATraceMarkerFile, O_WRONLY));
+  if (g_atrace_fd == -1) {
+    PLOG(WARNING) << "Couldn't open " << kATraceMarkerFile;
+    return;
+  }
+  TraceConfig trace_config;
+  trace_config.SetTraceRecordMode(RECORD_CONTINUOUSLY);
+  SetEnabled(trace_config, TraceLog::RECORDING_MODE);
+}
+
+void TraceLog::StopATrace() {
+  if (g_atrace_fd == -1)
+    return;
+
+  close(g_atrace_fd);
+  g_atrace_fd = -1;
+
+  // TraceLog::Flush() requires the current thread to have a message loop, but
+  // this thread called from Java may not have one, so flush in another thread.
+  Thread end_chrome_tracing_thread("end_chrome_tracing");
+  WaitableEvent complete_event(WaitableEvent::ResetPolicy::AUTOMATIC,
+                               WaitableEvent::InitialState::NOT_SIGNALED);
+  end_chrome_tracing_thread.Start();
+  end_chrome_tracing_thread.task_runner()->PostTask(
+      FROM_HERE, base::Bind(&EndChromeTracing, Unretained(this),
+                            Unretained(&complete_event)));
+  complete_event.Wait();
+}
+
+void TraceEvent::SendToATrace() {
+  if (g_atrace_fd == -1)
+    return;
+
+  const char* category_group =
+      TraceLog::GetCategoryGroupName(category_group_enabled_);
+
+  switch (phase_) {
+    case TRACE_EVENT_PHASE_BEGIN:
+      WriteEvent('B', category_group, name_, id_,
+                 arg_names_, arg_types_, arg_values_, convertable_values_,
+                 flags_);
+      break;
+
+    case TRACE_EVENT_PHASE_COMPLETE:
+      WriteEvent(duration_.ToInternalValue() == -1 ? 'B' : 'E',
+                 category_group, name_, id_,
+                 arg_names_, arg_types_, arg_values_, convertable_values_,
+                 flags_);
+      break;
+
+    case TRACE_EVENT_PHASE_END:
+      // Though a single 'E' is enough, here append pid, name and
+      // category_group etc. So that unpaired events can be found easily.
+      WriteEvent('E', category_group, name_, id_,
+                 arg_names_, arg_types_, arg_values_, convertable_values_,
+                 flags_);
+      break;
+
+    case TRACE_EVENT_PHASE_INSTANT:
+      // Simulate an instance event with a pair of begin/end events.
+      WriteEvent('B', category_group, name_, id_,
+                 arg_names_, arg_types_, arg_values_, convertable_values_,
+                 flags_);
+      WriteToATrace(g_atrace_fd, "E", 1);
+      break;
+
+    case TRACE_EVENT_PHASE_COUNTER:
+      for (int i = 0; i < kTraceMaxNumArgs && arg_names_[i]; ++i) {
+        DCHECK(arg_types_[i] == TRACE_VALUE_TYPE_INT);
+        std::string out = base::StringPrintf(
+            "C|%d|%s-%s", getpid(), name_, arg_names_[i]);
+        if (flags_ & TRACE_EVENT_FLAG_HAS_ID)
+          StringAppendF(&out, "-%" PRIx64, static_cast<uint64_t>(id_));
+        StringAppendF(&out, "|%d|%s",
+                      static_cast<int>(arg_values_[i].as_int), category_group);
+        WriteToATrace(g_atrace_fd, out.c_str(), out.size());
+      }
+      break;
+
+    default:
+      // Do nothing.
+      break;
+  }
+}
+
+void TraceLog::AddClockSyncMetadataEvent() {
+  int atrace_fd = HANDLE_EINTR(open(kATraceMarkerFile, O_WRONLY | O_APPEND));
+  if (atrace_fd == -1) {
+    PLOG(WARNING) << "Couldn't open " << kATraceMarkerFile;
+    return;
+  }
+
+  // Android's kernel trace system has a trace_marker feature: this is a file on
+  // debugfs that takes the written data and pushes it onto the trace
+  // buffer. So, to establish clock sync, we write our monotonic clock into that
+  // trace buffer.
+  double now_in_seconds = (TRACE_TIME_TICKS_NOW() - TimeTicks()).InSecondsF();
+  std::string marker = StringPrintf(
+      "trace_event_clock_sync: parent_ts=%f\n", now_in_seconds);
+  WriteToATrace(atrace_fd, marker.c_str(), marker.size());
+  close(atrace_fd);
+}
+
+}  // namespace trace_event
+}  // namespace base
diff --git a/base/trace_event/trace_event_android_unittest.cc b/base/trace_event/trace_event_android_unittest.cc
new file mode 100644
index 0000000..58bd77e
--- /dev/null
+++ b/base/trace_event/trace_event_android_unittest.cc
@@ -0,0 +1,22 @@
+// Copyright (c) 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/trace_event/trace_event.h"
+
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+namespace trace_event {
+
+TEST(TraceEventAndroidTest, WriteToATrace) {
+  // Just a smoke test to ensure no crash.
+  TraceLog* trace_log = TraceLog::GetInstance();
+  trace_log->StartATrace();
+  TRACE_EVENT0("test", "test-event");
+  trace_log->StopATrace();
+  trace_log->AddClockSyncMetadataEvent();
+}
+
+}  // namespace trace_event
+}  // namespace base
diff --git a/base/trace_event/trace_event_argument.cc b/base/trace_event/trace_event_argument.cc
new file mode 100644
index 0000000..e614b27
--- /dev/null
+++ b/base/trace_event/trace_event_argument.cc
@@ -0,0 +1,576 @@
+// Copyright (c) 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/trace_event/trace_event_argument.h"
+
+#include <stdint.h>
+
+#include <utility>
+
+#include "base/bits.h"
+#include "base/containers/circular_deque.h"
+#include "base/json/string_escape.h"
+#include "base/memory/ptr_util.h"
+#include "base/trace_event/trace_event.h"
+#include "base/trace_event/trace_event_impl.h"
+#include "base/trace_event/trace_event_memory_overhead.h"
+#include "base/values.h"
+
+namespace base {
+namespace trace_event {
+
+namespace {
+const char kTypeStartDict = '{';
+const char kTypeEndDict = '}';
+const char kTypeStartArray = '[';
+const char kTypeEndArray = ']';
+const char kTypeBool = 'b';
+const char kTypeInt = 'i';
+const char kTypeDouble = 'd';
+const char kTypeString = 's';
+const char kTypeCStr = '*';  // only used for key names
+
+#ifndef NDEBUG
+const bool kStackTypeDict = false;
+const bool kStackTypeArray = true;
+#define DCHECK_CURRENT_CONTAINER_IS(x) DCHECK_EQ(x, nesting_stack_.back())
+#define DCHECK_CONTAINER_STACK_DEPTH_EQ(x) DCHECK_EQ(x, nesting_stack_.size())
+#define DEBUG_PUSH_CONTAINER(x) nesting_stack_.push_back(x)
+#define DEBUG_POP_CONTAINER() nesting_stack_.pop_back()
+#else
+#define DCHECK_CURRENT_CONTAINER_IS(x) do {} while (0)
+#define DCHECK_CONTAINER_STACK_DEPTH_EQ(x) do {} while (0)
+#define DEBUG_PUSH_CONTAINER(x) do {} while (0)
+#define DEBUG_POP_CONTAINER() do {} while (0)
+#endif
+
+inline void WriteKeyNameAsRawPtr(Pickle& pickle, const char* ptr) {
+  pickle.WriteBytes(&kTypeCStr, 1);
+  pickle.WriteUInt64(static_cast<uint64_t>(reinterpret_cast<uintptr_t>(ptr)));
+}
+
+inline void WriteKeyNameWithCopy(Pickle& pickle, base::StringPiece str) {
+  pickle.WriteBytes(&kTypeString, 1);
+  pickle.WriteString(str);
+}
+
+std::string ReadKeyName(PickleIterator& pickle_iterator) {
+  const char* type = nullptr;
+  bool res = pickle_iterator.ReadBytes(&type, 1);
+  std::string key_name;
+  if (res && *type == kTypeCStr) {
+    uint64_t ptr_value = 0;
+    res = pickle_iterator.ReadUInt64(&ptr_value);
+    key_name = reinterpret_cast<const char*>(static_cast<uintptr_t>(ptr_value));
+  } else if (res && *type == kTypeString) {
+    res = pickle_iterator.ReadString(&key_name);
+  }
+  DCHECK(res);
+  return key_name;
+}
+}  // namespace
+
+TracedValue::TracedValue() : TracedValue(0) {
+}
+
+TracedValue::TracedValue(size_t capacity) {
+  DEBUG_PUSH_CONTAINER(kStackTypeDict);
+  if (capacity)
+    pickle_.Reserve(capacity);
+}
+
+TracedValue::~TracedValue() {
+  DCHECK_CURRENT_CONTAINER_IS(kStackTypeDict);
+  DEBUG_POP_CONTAINER();
+  DCHECK_CONTAINER_STACK_DEPTH_EQ(0u);
+}
+
+void TracedValue::SetInteger(const char* name, int value) {
+  DCHECK_CURRENT_CONTAINER_IS(kStackTypeDict);
+  pickle_.WriteBytes(&kTypeInt, 1);
+  pickle_.WriteInt(value);
+  WriteKeyNameAsRawPtr(pickle_, name);
+}
+
+void TracedValue::SetIntegerWithCopiedName(base::StringPiece name, int value) {
+  DCHECK_CURRENT_CONTAINER_IS(kStackTypeDict);
+  pickle_.WriteBytes(&kTypeInt, 1);
+  pickle_.WriteInt(value);
+  WriteKeyNameWithCopy(pickle_, name);
+}
+
+void TracedValue::SetDouble(const char* name, double value) {
+  DCHECK_CURRENT_CONTAINER_IS(kStackTypeDict);
+  pickle_.WriteBytes(&kTypeDouble, 1);
+  pickle_.WriteDouble(value);
+  WriteKeyNameAsRawPtr(pickle_, name);
+}
+
+void TracedValue::SetDoubleWithCopiedName(base::StringPiece name,
+                                          double value) {
+  DCHECK_CURRENT_CONTAINER_IS(kStackTypeDict);
+  pickle_.WriteBytes(&kTypeDouble, 1);
+  pickle_.WriteDouble(value);
+  WriteKeyNameWithCopy(pickle_, name);
+}
+
+void TracedValue::SetBoolean(const char* name, bool value) {
+  DCHECK_CURRENT_CONTAINER_IS(kStackTypeDict);
+  pickle_.WriteBytes(&kTypeBool, 1);
+  pickle_.WriteBool(value);
+  WriteKeyNameAsRawPtr(pickle_, name);
+}
+
+void TracedValue::SetBooleanWithCopiedName(base::StringPiece name,
+                                           bool value) {
+  DCHECK_CURRENT_CONTAINER_IS(kStackTypeDict);
+  pickle_.WriteBytes(&kTypeBool, 1);
+  pickle_.WriteBool(value);
+  WriteKeyNameWithCopy(pickle_, name);
+}
+
+void TracedValue::SetString(const char* name, base::StringPiece value) {
+  DCHECK_CURRENT_CONTAINER_IS(kStackTypeDict);
+  pickle_.WriteBytes(&kTypeString, 1);
+  pickle_.WriteString(value);
+  WriteKeyNameAsRawPtr(pickle_, name);
+}
+
+void TracedValue::SetStringWithCopiedName(base::StringPiece name,
+                                          base::StringPiece value) {
+  DCHECK_CURRENT_CONTAINER_IS(kStackTypeDict);
+  pickle_.WriteBytes(&kTypeString, 1);
+  pickle_.WriteString(value);
+  WriteKeyNameWithCopy(pickle_, name);
+}
+
+void TracedValue::SetValue(const char* name, const TracedValue& value) {
+  DCHECK_CURRENT_CONTAINER_IS(kStackTypeDict);
+  BeginDictionary(name);
+  pickle_.WriteBytes(value.pickle_.payload(),
+                     static_cast<int>(value.pickle_.payload_size()));
+  EndDictionary();
+}
+
+void TracedValue::SetValueWithCopiedName(base::StringPiece name,
+                                         const TracedValue& value) {
+  DCHECK_CURRENT_CONTAINER_IS(kStackTypeDict);
+  BeginDictionaryWithCopiedName(name);
+  pickle_.WriteBytes(value.pickle_.payload(),
+                     static_cast<int>(value.pickle_.payload_size()));
+  EndDictionary();
+}
+
+void TracedValue::BeginDictionary(const char* name) {
+  DCHECK_CURRENT_CONTAINER_IS(kStackTypeDict);
+  DEBUG_PUSH_CONTAINER(kStackTypeDict);
+  pickle_.WriteBytes(&kTypeStartDict, 1);
+  WriteKeyNameAsRawPtr(pickle_, name);
+}
+
+void TracedValue::BeginDictionaryWithCopiedName(base::StringPiece name) {
+  DCHECK_CURRENT_CONTAINER_IS(kStackTypeDict);
+  DEBUG_PUSH_CONTAINER(kStackTypeDict);
+  pickle_.WriteBytes(&kTypeStartDict, 1);
+  WriteKeyNameWithCopy(pickle_, name);
+}
+
+void TracedValue::BeginArray(const char* name) {
+  DCHECK_CURRENT_CONTAINER_IS(kStackTypeDict);
+  DEBUG_PUSH_CONTAINER(kStackTypeArray);
+  pickle_.WriteBytes(&kTypeStartArray, 1);
+  WriteKeyNameAsRawPtr(pickle_, name);
+}
+
+void TracedValue::BeginArrayWithCopiedName(base::StringPiece name) {
+  DCHECK_CURRENT_CONTAINER_IS(kStackTypeDict);
+  DEBUG_PUSH_CONTAINER(kStackTypeArray);
+  pickle_.WriteBytes(&kTypeStartArray, 1);
+  WriteKeyNameWithCopy(pickle_, name);
+}
+
+void TracedValue::EndDictionary() {
+  DCHECK_CURRENT_CONTAINER_IS(kStackTypeDict);
+  DEBUG_POP_CONTAINER();
+  pickle_.WriteBytes(&kTypeEndDict, 1);
+}
+
+void TracedValue::AppendInteger(int value) {
+  DCHECK_CURRENT_CONTAINER_IS(kStackTypeArray);
+  pickle_.WriteBytes(&kTypeInt, 1);
+  pickle_.WriteInt(value);
+}
+
+void TracedValue::AppendDouble(double value) {
+  DCHECK_CURRENT_CONTAINER_IS(kStackTypeArray);
+  pickle_.WriteBytes(&kTypeDouble, 1);
+  pickle_.WriteDouble(value);
+}
+
+void TracedValue::AppendBoolean(bool value) {
+  DCHECK_CURRENT_CONTAINER_IS(kStackTypeArray);
+  pickle_.WriteBytes(&kTypeBool, 1);
+  pickle_.WriteBool(value);
+}
+
+void TracedValue::AppendString(base::StringPiece value) {
+  DCHECK_CURRENT_CONTAINER_IS(kStackTypeArray);
+  pickle_.WriteBytes(&kTypeString, 1);
+  pickle_.WriteString(value);
+}
+
+void TracedValue::BeginArray() {
+  DCHECK_CURRENT_CONTAINER_IS(kStackTypeArray);
+  DEBUG_PUSH_CONTAINER(kStackTypeArray);
+  pickle_.WriteBytes(&kTypeStartArray, 1);
+}
+
+void TracedValue::BeginDictionary() {
+  DCHECK_CURRENT_CONTAINER_IS(kStackTypeArray);
+  DEBUG_PUSH_CONTAINER(kStackTypeDict);
+  pickle_.WriteBytes(&kTypeStartDict, 1);
+}
+
+void TracedValue::EndArray() {
+  DCHECK_CURRENT_CONTAINER_IS(kStackTypeArray);
+  DEBUG_POP_CONTAINER();
+  pickle_.WriteBytes(&kTypeEndArray, 1);
+}
+
+void TracedValue::SetValue(const char* name,
+                           std::unique_ptr<base::Value> value) {
+  SetBaseValueWithCopiedName(name, *value);
+}
+
+void TracedValue::SetBaseValueWithCopiedName(base::StringPiece name,
+                                             const base::Value& value) {
+  DCHECK_CURRENT_CONTAINER_IS(kStackTypeDict);
+  switch (value.type()) {
+    case base::Value::Type::NONE:
+    case base::Value::Type::BINARY:
+      NOTREACHED();
+      break;
+
+    case base::Value::Type::BOOLEAN: {
+      bool bool_value;
+      value.GetAsBoolean(&bool_value);
+      SetBooleanWithCopiedName(name, bool_value);
+    } break;
+
+    case base::Value::Type::INTEGER: {
+      int int_value;
+      value.GetAsInteger(&int_value);
+      SetIntegerWithCopiedName(name, int_value);
+    } break;
+
+    case base::Value::Type::DOUBLE: {
+      double double_value;
+      value.GetAsDouble(&double_value);
+      SetDoubleWithCopiedName(name, double_value);
+    } break;
+
+    case base::Value::Type::STRING: {
+      const Value* string_value;
+      value.GetAsString(&string_value);
+      SetStringWithCopiedName(name, string_value->GetString());
+    } break;
+
+    case base::Value::Type::DICTIONARY: {
+      const DictionaryValue* dict_value;
+      value.GetAsDictionary(&dict_value);
+      BeginDictionaryWithCopiedName(name);
+      for (DictionaryValue::Iterator it(*dict_value); !it.IsAtEnd();
+           it.Advance()) {
+        SetBaseValueWithCopiedName(it.key(), it.value());
+      }
+      EndDictionary();
+    } break;
+
+    case base::Value::Type::LIST: {
+      const ListValue* list_value;
+      value.GetAsList(&list_value);
+      BeginArrayWithCopiedName(name);
+      for (const auto& base_value : *list_value)
+        AppendBaseValue(base_value);
+      EndArray();
+    } break;
+  }
+}
+
+void TracedValue::AppendBaseValue(const base::Value& value) {
+  DCHECK_CURRENT_CONTAINER_IS(kStackTypeArray);
+  switch (value.type()) {
+    case base::Value::Type::NONE:
+    case base::Value::Type::BINARY:
+      NOTREACHED();
+      break;
+
+    case base::Value::Type::BOOLEAN: {
+      bool bool_value;
+      value.GetAsBoolean(&bool_value);
+      AppendBoolean(bool_value);
+    } break;
+
+    case base::Value::Type::INTEGER: {
+      int int_value;
+      value.GetAsInteger(&int_value);
+      AppendInteger(int_value);
+    } break;
+
+    case base::Value::Type::DOUBLE: {
+      double double_value;
+      value.GetAsDouble(&double_value);
+      AppendDouble(double_value);
+    } break;
+
+    case base::Value::Type::STRING: {
+      const Value* string_value;
+      value.GetAsString(&string_value);
+      AppendString(string_value->GetString());
+    } break;
+
+    case base::Value::Type::DICTIONARY: {
+      const DictionaryValue* dict_value;
+      value.GetAsDictionary(&dict_value);
+      BeginDictionary();
+      for (DictionaryValue::Iterator it(*dict_value); !it.IsAtEnd();
+           it.Advance()) {
+        SetBaseValueWithCopiedName(it.key(), it.value());
+      }
+      EndDictionary();
+    } break;
+
+    case base::Value::Type::LIST: {
+      const ListValue* list_value;
+      value.GetAsList(&list_value);
+      BeginArray();
+      for (const auto& base_value : *list_value)
+        AppendBaseValue(base_value);
+      EndArray();
+    } break;
+  }
+}
+
+std::unique_ptr<base::Value> TracedValue::ToBaseValue() const {
+  base::Value root(base::Value::Type::DICTIONARY);
+  Value* cur_dict = &root;
+  Value* cur_list = nullptr;
+  std::vector<Value*> stack;
+  PickleIterator it(pickle_);
+  const char* type;
+
+  while (it.ReadBytes(&type, 1)) {
+    DCHECK((cur_dict && !cur_list) || (cur_list && !cur_dict));
+    switch (*type) {
+      case kTypeStartDict: {
+        base::Value new_dict(base::Value::Type::DICTIONARY);
+        if (cur_dict) {
+          stack.push_back(cur_dict);
+          cur_dict = cur_dict->SetKey(ReadKeyName(it), std::move(new_dict));
+        } else {
+          cur_list->GetList().push_back(std::move(new_dict));
+          // |new_dict| is invalidated at this point, so |cur_dict| needs to be
+          // reset.
+          cur_dict = &cur_list->GetList().back();
+          stack.push_back(cur_list);
+          cur_list = nullptr;
+        }
+      } break;
+
+      case kTypeEndArray:
+      case kTypeEndDict: {
+        if (stack.back()->is_dict()) {
+          cur_dict = stack.back();
+          cur_list = nullptr;
+        } else if (stack.back()->is_list()) {
+          cur_list = stack.back();
+          cur_dict = nullptr;
+        }
+        stack.pop_back();
+      } break;
+
+      case kTypeStartArray: {
+        base::Value new_list(base::Value::Type::LIST);
+        if (cur_dict) {
+          stack.push_back(cur_dict);
+          cur_list = cur_dict->SetKey(ReadKeyName(it), std::move(new_list));
+          cur_dict = nullptr;
+        } else {
+          cur_list->GetList().push_back(std::move(new_list));
+          stack.push_back(cur_list);
+          // |cur_list| is invalidated at this point by the Append, so it needs
+          // to be reset.
+          cur_list = &cur_list->GetList().back();
+        }
+      } break;
+
+      case kTypeBool: {
+        bool value;
+        CHECK(it.ReadBool(&value));
+        base::Value new_bool(value);
+        if (cur_dict) {
+          cur_dict->SetKey(ReadKeyName(it), std::move(new_bool));
+        } else {
+          cur_list->GetList().push_back(std::move(new_bool));
+        }
+      } break;
+
+      case kTypeInt: {
+        int value;
+        CHECK(it.ReadInt(&value));
+        base::Value new_int(value);
+        if (cur_dict) {
+          cur_dict->SetKey(ReadKeyName(it), std::move(new_int));
+        } else {
+          cur_list->GetList().push_back(std::move(new_int));
+        }
+      } break;
+
+      case kTypeDouble: {
+        double value;
+        CHECK(it.ReadDouble(&value));
+        base::Value new_double(value);
+        if (cur_dict) {
+          cur_dict->SetKey(ReadKeyName(it), std::move(new_double));
+        } else {
+          cur_list->GetList().push_back(std::move(new_double));
+        }
+      } break;
+
+      case kTypeString: {
+        std::string value;
+        CHECK(it.ReadString(&value));
+        base::Value new_str(std::move(value));
+        if (cur_dict) {
+          cur_dict->SetKey(ReadKeyName(it), std::move(new_str));
+        } else {
+          cur_list->GetList().push_back(std::move(new_str));
+        }
+      } break;
+
+      default:
+        NOTREACHED();
+    }
+  }
+  DCHECK(stack.empty());
+  return base::Value::ToUniquePtrValue(std::move(root));
+}
+
+void TracedValue::AppendAsTraceFormat(std::string* out) const {
+  DCHECK_CURRENT_CONTAINER_IS(kStackTypeDict);
+  DCHECK_CONTAINER_STACK_DEPTH_EQ(1u);
+
+  struct State {
+    enum Type { kTypeDict, kTypeArray };
+    Type type;
+    bool needs_comma;
+  };
+
+  auto maybe_append_key_name = [](State current_state, PickleIterator* it,
+                                  std::string* out) {
+    if (current_state.type == State::kTypeDict) {
+      EscapeJSONString(ReadKeyName(*it), true, out);
+      out->append(":");
+    }
+  };
+
+  base::circular_deque<State> state_stack;
+
+  out->append("{");
+  state_stack.push_back({State::kTypeDict});
+
+  PickleIterator it(pickle_);
+  for (const char* type; it.ReadBytes(&type, 1);) {
+    switch (*type) {
+      case kTypeEndDict:
+        out->append("}");
+        state_stack.pop_back();
+        continue;
+
+      case kTypeEndArray:
+        out->append("]");
+        state_stack.pop_back();
+        continue;
+    }
+
+    // Use an index so it will stay valid across resizes.
+    size_t current_state_index = state_stack.size() - 1;
+    if (state_stack[current_state_index].needs_comma)
+      out->append(",");
+
+    switch (*type) {
+      case kTypeStartDict: {
+        maybe_append_key_name(state_stack[current_state_index], &it, out);
+        out->append("{");
+        state_stack.push_back({State::kTypeDict});
+        break;
+      }
+
+      case kTypeStartArray: {
+        maybe_append_key_name(state_stack[current_state_index], &it, out);
+        out->append("[");
+        state_stack.push_back({State::kTypeArray});
+        break;
+      }
+
+      case kTypeBool: {
+        TraceEvent::TraceValue json_value;
+        CHECK(it.ReadBool(&json_value.as_bool));
+        maybe_append_key_name(state_stack[current_state_index], &it, out);
+        TraceEvent::AppendValueAsJSON(TRACE_VALUE_TYPE_BOOL, json_value, out);
+        break;
+      }
+
+      case kTypeInt: {
+        int value;
+        CHECK(it.ReadInt(&value));
+        maybe_append_key_name(state_stack[current_state_index], &it, out);
+        TraceEvent::TraceValue json_value;
+        json_value.as_int = value;
+        TraceEvent::AppendValueAsJSON(TRACE_VALUE_TYPE_INT, json_value, out);
+        break;
+      }
+
+      case kTypeDouble: {
+        TraceEvent::TraceValue json_value;
+        CHECK(it.ReadDouble(&json_value.as_double));
+        maybe_append_key_name(state_stack[current_state_index], &it, out);
+        TraceEvent::AppendValueAsJSON(TRACE_VALUE_TYPE_DOUBLE, json_value, out);
+        break;
+      }
+
+      case kTypeString: {
+        std::string value;
+        CHECK(it.ReadString(&value));
+        maybe_append_key_name(state_stack[current_state_index], &it, out);
+        TraceEvent::TraceValue json_value;
+        json_value.as_string = value.c_str();
+        TraceEvent::AppendValueAsJSON(TRACE_VALUE_TYPE_STRING, json_value, out);
+        break;
+      }
+
+      default:
+        NOTREACHED();
+    }
+
+    state_stack[current_state_index].needs_comma = true;
+  }
+
+  out->append("}");
+  state_stack.pop_back();
+
+  DCHECK(state_stack.empty());
+}
+
+void TracedValue::EstimateTraceMemoryOverhead(
+    TraceEventMemoryOverhead* overhead) {
+  overhead->Add(TraceEventMemoryOverhead::kTracedValue,
+                /* allocated size */
+                pickle_.GetTotalAllocatedSize(),
+                /* resident size */
+                pickle_.size());
+}
+
+}  // namespace trace_event
+}  // namespace base
diff --git a/base/trace_event/trace_event_argument.h b/base/trace_event/trace_event_argument.h
new file mode 100644
index 0000000..81d8c01
--- /dev/null
+++ b/base/trace_event/trace_event_argument.h
@@ -0,0 +1,92 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TRACE_EVENT_TRACE_EVENT_ARGUMENT_H_
+#define BASE_TRACE_EVENT_TRACE_EVENT_ARGUMENT_H_
+
+#include <stddef.h>
+
+#include <memory>
+#include <string>
+#include <vector>
+
+#include "base/macros.h"
+#include "base/pickle.h"
+#include "base/strings/string_piece.h"
+#include "base/trace_event/trace_event_impl.h"
+
+namespace base {
+
+class Value;
+
+namespace trace_event {
+
+class BASE_EXPORT TracedValue : public ConvertableToTraceFormat {
+ public:
+  TracedValue();
+  explicit TracedValue(size_t capacity);
+  ~TracedValue() override;
+
+  void EndDictionary();
+  void EndArray();
+
+  // These methods assume that |name| is a long lived "quoted" string.
+  void SetInteger(const char* name, int value);
+  void SetDouble(const char* name, double value);
+  void SetBoolean(const char* name, bool value);
+  void SetString(const char* name, base::StringPiece value);
+  void SetValue(const char* name, const TracedValue& value);
+  void BeginDictionary(const char* name);
+  void BeginArray(const char* name);
+
+  // These, instead, can be safely passed a temporary string.
+  void SetIntegerWithCopiedName(base::StringPiece name, int value);
+  void SetDoubleWithCopiedName(base::StringPiece name, double value);
+  void SetBooleanWithCopiedName(base::StringPiece name, bool value);
+  void SetStringWithCopiedName(base::StringPiece name,
+                               base::StringPiece value);
+  void SetValueWithCopiedName(base::StringPiece name,
+                              const TracedValue& value);
+  void BeginDictionaryWithCopiedName(base::StringPiece name);
+  void BeginArrayWithCopiedName(base::StringPiece name);
+
+  void AppendInteger(int);
+  void AppendDouble(double);
+  void AppendBoolean(bool);
+  void AppendString(base::StringPiece);
+  void BeginArray();
+  void BeginDictionary();
+
+  // ConvertableToTraceFormat implementation.
+  void AppendAsTraceFormat(std::string* out) const override;
+
+  void EstimateTraceMemoryOverhead(TraceEventMemoryOverhead* overhead) override;
+
+  // DEPRECATED: do not use, here only for legacy reasons. These methods causes
+  // a copy-and-translation of the base::Value into the equivalent TracedValue.
+  // TODO(primiano): migrate the (three) existing clients to the cheaper
+  // SetValue(TracedValue) API. crbug.com/495628.
+  void SetValue(const char* name, std::unique_ptr<base::Value> value);
+  void SetBaseValueWithCopiedName(base::StringPiece name,
+                                  const base::Value& value);
+  void AppendBaseValue(const base::Value& value);
+
+  // Public for tests only.
+  std::unique_ptr<base::Value> ToBaseValue() const;
+
+ private:
+  Pickle pickle_;
+
+#ifndef NDEBUG
+  // In debug builds checks the pairings of {Start,End}{Dictionary,Array}
+  std::vector<bool> nesting_stack_;
+#endif
+
+  DISALLOW_COPY_AND_ASSIGN(TracedValue);
+};
+
+}  // namespace trace_event
+}  // namespace base
+
+#endif  // BASE_TRACE_EVENT_TRACE_EVENT_ARGUMENT_H_
diff --git a/base/trace_event/trace_event_argument_unittest.cc b/base/trace_event/trace_event_argument_unittest.cc
new file mode 100644
index 0000000..448b2d5
--- /dev/null
+++ b/base/trace_event/trace_event_argument_unittest.cc
@@ -0,0 +1,165 @@
+// Copyright (c) 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/trace_event/trace_event_argument.h"
+
+#include <stddef.h>
+
+#include <utility>
+
+#include "base/memory/ptr_util.h"
+#include "base/values.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+namespace trace_event {
+
+TEST(TraceEventArgumentTest, FlatDictionary) {
+  std::unique_ptr<TracedValue> value(new TracedValue());
+  value->SetBoolean("bool", true);
+  value->SetDouble("double", 0.0);
+  value->SetInteger("int", 2014);
+  value->SetString("string", "string");
+  std::string json = "PREFIX";
+  value->AppendAsTraceFormat(&json);
+  EXPECT_EQ(
+      "PREFIX{\"bool\":true,\"double\":0.0,\"int\":2014,\"string\":\"string\"}",
+      json);
+}
+
+TEST(TraceEventArgumentTest, NoDotPathExpansion) {
+  std::unique_ptr<TracedValue> value(new TracedValue());
+  value->SetBoolean("bo.ol", true);
+  value->SetDouble("doub.le", 0.0);
+  value->SetInteger("in.t", 2014);
+  value->SetString("str.ing", "str.ing");
+  std::string json;
+  value->AppendAsTraceFormat(&json);
+  EXPECT_EQ(
+      "{\"bo.ol\":true,\"doub.le\":0.0,\"in.t\":2014,\"str.ing\":\"str.ing\"}",
+      json);
+}
+
+TEST(TraceEventArgumentTest, Hierarchy) {
+  std::unique_ptr<TracedValue> value(new TracedValue());
+  value->BeginArray("a1");
+  value->AppendInteger(1);
+  value->AppendBoolean(true);
+  value->BeginDictionary();
+  value->SetInteger("i2", 3);
+  value->EndDictionary();
+  value->EndArray();
+  value->SetBoolean("b0", true);
+  value->SetDouble("d0", 0.0);
+  value->BeginDictionary("dict1");
+  value->BeginDictionary("dict2");
+  value->SetBoolean("b2", false);
+  value->EndDictionary();
+  value->SetInteger("i1", 2014);
+  value->SetString("s1", "foo");
+  value->EndDictionary();
+  value->SetInteger("i0", 2014);
+  value->SetString("s0", "foo");
+  std::string json;
+  value->AppendAsTraceFormat(&json);
+  EXPECT_EQ(
+      "{\"a1\":[1,true,{\"i2\":3}],\"b0\":true,\"d0\":0.0,\"dict1\":{\"dict2\":"
+      "{\"b2\":false},\"i1\":2014,\"s1\":\"foo\"},\"i0\":2014,\"s0\":"
+      "\"foo\"}",
+      json);
+}
+
+TEST(TraceEventArgumentTest, LongStrings) {
+  std::string kLongString = "supercalifragilisticexpialidocious";
+  std::string kLongString2 = "0123456789012345678901234567890123456789";
+  char kLongString3[4096];
+  for (size_t i = 0; i < sizeof(kLongString3); ++i)
+    kLongString3[i] = 'a' + (i % 25);
+  kLongString3[sizeof(kLongString3) - 1] = '\0';
+
+  std::unique_ptr<TracedValue> value(new TracedValue());
+  value->SetString("a", "short");
+  value->SetString("b", kLongString);
+  value->BeginArray("c");
+  value->AppendString(kLongString2);
+  value->AppendString("");
+  value->BeginDictionary();
+  value->SetString("a", kLongString3);
+  value->EndDictionary();
+  value->EndArray();
+
+  std::string json;
+  value->AppendAsTraceFormat(&json);
+  EXPECT_EQ("{\"a\":\"short\",\"b\":\"" + kLongString + "\",\"c\":[\"" +
+                kLongString2 + "\",\"\",{\"a\":\"" + kLongString3 + "\"}]}",
+            json);
+}
+
+TEST(TraceEventArgumentTest, PassBaseValue) {
+  Value int_value(42);
+  Value bool_value(true);
+  Value double_value(42.0f);
+
+  auto dict_value = WrapUnique(new DictionaryValue);
+  dict_value->SetBoolean("bool", true);
+  dict_value->SetInteger("int", 42);
+  dict_value->SetDouble("double", 42.0f);
+  dict_value->SetString("string", std::string("a") + "b");
+  dict_value->SetString("string", std::string("a") + "b");
+
+  auto list_value = WrapUnique(new ListValue);
+  list_value->AppendBoolean(false);
+  list_value->AppendInteger(1);
+  list_value->AppendString("in_list");
+  list_value->Append(std::move(dict_value));
+
+  std::unique_ptr<TracedValue> value(new TracedValue());
+  value->BeginDictionary("outer_dict");
+  value->SetValue("inner_list", std::move(list_value));
+  value->EndDictionary();
+
+  dict_value.reset();
+  list_value.reset();
+
+  std::string json;
+  value->AppendAsTraceFormat(&json);
+  EXPECT_EQ(
+      "{\"outer_dict\":{\"inner_list\":[false,1,\"in_list\",{\"bool\":true,"
+      "\"double\":42.0,\"int\":42,\"string\":\"ab\"}]}}",
+      json);
+}
+
+TEST(TraceEventArgumentTest, PassTracedValue) {
+  auto dict_value = std::make_unique<TracedValue>();
+  dict_value->SetInteger("a", 1);
+
+  auto nested_dict_value = std::make_unique<TracedValue>();
+  nested_dict_value->SetInteger("b", 2);
+  nested_dict_value->BeginArray("c");
+  nested_dict_value->AppendString("foo");
+  nested_dict_value->EndArray();
+
+  dict_value->SetValue("e", *nested_dict_value);
+
+  // Check the merged result.
+  std::string json;
+  dict_value->AppendAsTraceFormat(&json);
+  EXPECT_EQ("{\"a\":1,\"e\":{\"b\":2,\"c\":[\"foo\"]}}", json);
+
+  // Check that the passed nestd dict was left unouthced.
+  json = "";
+  nested_dict_value->AppendAsTraceFormat(&json);
+  EXPECT_EQ("{\"b\":2,\"c\":[\"foo\"]}", json);
+
+  // And that it is still usable.
+  nested_dict_value->SetInteger("f", 3);
+  nested_dict_value->BeginDictionary("g");
+  nested_dict_value->EndDictionary();
+  json = "";
+  nested_dict_value->AppendAsTraceFormat(&json);
+  EXPECT_EQ("{\"b\":2,\"c\":[\"foo\"],\"f\":3,\"g\":{}}", json);
+}
+
+}  // namespace trace_event
+}  // namespace base
diff --git a/base/trace_event/trace_event_etw_export_win.cc b/base/trace_event/trace_event_etw_export_win.cc
new file mode 100644
index 0000000..993a222
--- /dev/null
+++ b/base/trace_event/trace_event_etw_export_win.cc
@@ -0,0 +1,380 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/trace_event/trace_event_etw_export_win.h"
+
+#include <stddef.h>
+
+#include "base/command_line.h"
+#include "base/logging.h"
+#include "base/memory/singleton.h"
+#include "base/strings/string_tokenizer.h"
+#include "base/strings/utf_string_conversions.h"
+#include "base/threading/platform_thread.h"
+#include "base/trace_event/trace_event.h"
+#include "base/trace_event/trace_event_impl.h"
+
+#include <windows.h>
+
+// The GetProcAddress technique is borrowed from
+// https://github.com/google/UIforETW/tree/master/ETWProviders
+//
+// EVNTAPI is used in evntprov.h which is included by chrome_events_win.h.
+// We define EVNTAPI without the DECLSPEC_IMPORT specifier so that we can
+// implement these functions locally instead of using the import library, and
+// can therefore still run on Windows XP.
+#define EVNTAPI __stdcall
+// Include the event register/write/unregister macros compiled from the manifest
+// file. Note that this includes evntprov.h which requires a Vista+ Windows SDK.
+//
+// In SHARED_INTERMEDIATE_DIR.
+#include "base/trace_event/etw_manifest/chrome_events_win.h"  // NOLINT
+
+namespace {
+// |kFilteredEventGroupNames| contains the event categories that can be
+// exported individually. These categories can be enabled by passing the correct
+// keyword when starting the trace. A keyword is a 64-bit flag and we attribute
+// one bit per category. We can therefore enable a particular category by
+// setting its corresponding bit in the keyword. For events that are not present
+// in |kFilteredEventGroupNames|, we have two bits that control their
+// behaviour. When bit 61 is enabled, any event that is not disabled by default
+// (ie. doesn't start with disabled-by-default-) will be exported. Likewise,
+// when bit 62 is enabled, any event that is disabled by default will be
+// exported.
+//
+// Note that bit 63 (MSB) must always be set, otherwise tracing will be disabled
+// by ETW. Therefore, the keyword will always be greater than
+// 0x8000000000000000.
+//
+// Examples of passing keywords to the provider using xperf:
+// # This exports "benchmark" and "cc" events
+// xperf -start chrome -on Chrome:0x8000000000000009
+//
+// # This exports "gpu", "netlog" and all other events that are not disabled by
+// # default
+// xperf -start chrome -on Chrome:0xA0000000000000A0
+//
+// More info about starting a trace and keyword can be obtained by using the
+// help section of xperf (xperf -help start). Note that xperf documentation
+// refers to keywords as flags and there are two ways to enable them, using
+// group names or the hex representation. We only support the latter. Also, we
+// ignore the level.
+const char* const kFilteredEventGroupNames[] = {
+    "benchmark",                                       // 0x1
+    "blink",                                           // 0x2
+    "browser",                                         // 0x4
+    "cc",                                              // 0x8
+    "evdev",                                           // 0x10
+    "gpu",                                             // 0x20
+    "input",                                           // 0x40
+    "netlog",                                          // 0x80
+    "sequence_manager",                                // 0x100
+    "toplevel",                                        // 0x200
+    "v8",                                              // 0x400
+    "disabled-by-default-cc.debug",                    // 0x800
+    "disabled-by-default-cc.debug.picture",            // 0x1000
+    "disabled-by-default-toplevel.flow",               // 0x2000
+    "startup"};                                        // 0x4000
+const char kOtherEventsGroupName[] = "__OTHER_EVENTS";  // 0x2000000000000000
+const char kDisabledOtherEventsGroupName[] =
+    "__DISABLED_OTHER_EVENTS";  // 0x4000000000000000
+const uint64_t kOtherEventsKeywordBit = 1ULL << 61;
+const uint64_t kDisabledOtherEventsKeywordBit = 1ULL << 62;
+const size_t kNumberOfCategories = ARRAYSIZE(kFilteredEventGroupNames) + 2U;
+
+}  // namespace
+
+namespace base {
+namespace trace_event {
+
+// This object will be created by each process. It's a background (low-priority)
+// thread that will monitor the ETW keyword for any changes.
+class TraceEventETWExport::ETWKeywordUpdateThread
+    : public PlatformThread::Delegate {
+ public:
+  ETWKeywordUpdateThread() {}
+  ~ETWKeywordUpdateThread() override {}
+
+  // Implementation of PlatformThread::Delegate:
+  void ThreadMain() override {
+    PlatformThread::SetName("ETW Keyword Update Thread");
+    TimeDelta sleep_time = TimeDelta::FromMilliseconds(kUpdateTimerDelayMs);
+    while (1) {
+      PlatformThread::Sleep(sleep_time);
+      trace_event::TraceEventETWExport::UpdateETWKeyword();
+    }
+  }
+
+ private:
+  // Time between checks for ETW keyword changes (in milliseconds).
+  unsigned int kUpdateTimerDelayMs = 1000;
+};
+
+
+TraceEventETWExport::TraceEventETWExport()
+    : etw_export_enabled_(false), etw_match_any_keyword_(0ULL) {
+  // Register the ETW provider. If registration fails then the event logging
+  // calls will fail.
+  EventRegisterChrome();
+
+  // Make sure to initialize the map with all the group names. Subsequent
+  // modifications will be made by the background thread and only affect the
+  // values of the keys (no key addition/deletion). Therefore, the map does not
+  // require a lock for access.
+  for (size_t i = 0; i < ARRAYSIZE(kFilteredEventGroupNames); i++)
+    categories_status_[kFilteredEventGroupNames[i]] = false;
+  categories_status_[kOtherEventsGroupName] = false;
+  categories_status_[kDisabledOtherEventsGroupName] = false;
+  DCHECK_EQ(kNumberOfCategories, categories_status_.size());
+}
+
+TraceEventETWExport::~TraceEventETWExport() {
+  EventUnregisterChrome();
+}
+
+// static
+TraceEventETWExport* TraceEventETWExport::GetInstance() {
+  return Singleton<TraceEventETWExport,
+                   StaticMemorySingletonTraits<TraceEventETWExport>>::get();
+}
+
+// static
+void TraceEventETWExport::EnableETWExport() {
+  auto* instance = GetInstance();
+  if (instance && !instance->etw_export_enabled_) {
+    instance->etw_export_enabled_ = true;
+    // Sync the enabled categories with ETW by calling UpdateEnabledCategories()
+    // that checks the keyword. Then create a thread that will call that same
+    // function periodically, to make sure we stay in sync.
+    instance->UpdateEnabledCategories();
+    if (instance->keyword_update_thread_handle_.is_null()) {
+      instance->keyword_update_thread_.reset(new ETWKeywordUpdateThread);
+      PlatformThread::CreateWithPriority(
+          0, instance->keyword_update_thread_.get(),
+          &instance->keyword_update_thread_handle_, ThreadPriority::BACKGROUND);
+    }
+  }
+}
+
+// static
+void TraceEventETWExport::DisableETWExport() {
+  auto* instance = GetInstance();
+  if (instance && instance->etw_export_enabled_)
+    instance->etw_export_enabled_ = false;
+}
+
+// static
+bool TraceEventETWExport::IsETWExportEnabled() {
+  auto* instance = GetInstance();
+  return (instance && instance->etw_export_enabled_);
+}
+
+// static
+void TraceEventETWExport::AddEvent(
+    char phase,
+    const unsigned char* category_group_enabled,
+    const char* name,
+    unsigned long long id,
+    int num_args,
+    const char* const* arg_names,
+    const unsigned char* arg_types,
+    const unsigned long long* arg_values,
+    const std::unique_ptr<ConvertableToTraceFormat>* convertable_values) {
+  // We bail early in case exporting is disabled or no consumer is listening.
+  auto* instance = GetInstance();
+  if (!instance || !instance->etw_export_enabled_ || !EventEnabledChromeEvent())
+    return;
+
+  const char* phase_string = nullptr;
+  // Space to store the phase identifier and null-terminator, when needed.
+  char phase_buffer[2];
+  switch (phase) {
+    case TRACE_EVENT_PHASE_BEGIN:
+      phase_string = "Begin";
+      break;
+    case TRACE_EVENT_PHASE_END:
+      phase_string = "End";
+      break;
+    case TRACE_EVENT_PHASE_COMPLETE:
+      phase_string = "Complete";
+      break;
+    case TRACE_EVENT_PHASE_INSTANT:
+      phase_string = "Instant";
+      break;
+    case TRACE_EVENT_PHASE_ASYNC_BEGIN:
+      phase_string = "Async Begin";
+      break;
+    case TRACE_EVENT_PHASE_ASYNC_STEP_INTO:
+      phase_string = "Async Step Into";
+      break;
+    case TRACE_EVENT_PHASE_ASYNC_STEP_PAST:
+      phase_string = "Async Step Past";
+      break;
+    case TRACE_EVENT_PHASE_ASYNC_END:
+      phase_string = "Async End";
+      break;
+    case TRACE_EVENT_PHASE_NESTABLE_ASYNC_BEGIN:
+      phase_string = "Nestable Async Begin";
+      break;
+    case TRACE_EVENT_PHASE_NESTABLE_ASYNC_END:
+      phase_string = "Nestable Async End";
+      break;
+    case TRACE_EVENT_PHASE_NESTABLE_ASYNC_INSTANT:
+      phase_string = "Nestable Async Instant";
+      break;
+    case TRACE_EVENT_PHASE_FLOW_BEGIN:
+      phase_string = "Phase Flow Begin";
+      break;
+    case TRACE_EVENT_PHASE_FLOW_STEP:
+      phase_string = "Phase Flow Step";
+      break;
+    case TRACE_EVENT_PHASE_FLOW_END:
+      phase_string = "Phase Flow End";
+      break;
+    case TRACE_EVENT_PHASE_METADATA:
+      phase_string = "Phase Metadata";
+      break;
+    case TRACE_EVENT_PHASE_COUNTER:
+      phase_string = "Phase Counter";
+      break;
+    case TRACE_EVENT_PHASE_SAMPLE:
+      phase_string = "Phase Sample";
+      break;
+    case TRACE_EVENT_PHASE_CREATE_OBJECT:
+      phase_string = "Phase Create Object";
+      break;
+    case TRACE_EVENT_PHASE_SNAPSHOT_OBJECT:
+      phase_string = "Phase Snapshot Object";
+      break;
+    case TRACE_EVENT_PHASE_DELETE_OBJECT:
+      phase_string = "Phase Delete Object";
+      break;
+    default:
+      phase_buffer[0] = phase;
+      phase_buffer[1] = 0;
+      phase_string = phase_buffer;
+      break;
+  }
+
+  std::string arg_values_string[3];
+  for (int i = 0; i < num_args; i++) {
+    if (arg_types[i] == TRACE_VALUE_TYPE_CONVERTABLE) {
+      // Temporarily do nothing here. This function consumes 1/3 to 1/2 of
+      // *total* process CPU time when ETW tracing, and many of the strings
+      // created exceed WPA's 4094 byte limit and are shown as:
+      // "Unable to parse data". See crbug.com/488257
+      // convertable_values[i]->AppendAsTraceFormat(arg_values_string + i);
+    } else {
+      TraceEvent::TraceValue trace_event;
+      trace_event.as_uint = arg_values[i];
+      TraceEvent::AppendValueAsJSON(arg_types[i], trace_event,
+                                    arg_values_string + i);
+    }
+  }
+
+  EventWriteChromeEvent(
+      name, phase_string, num_args > 0 ? arg_names[0] : "",
+      arg_values_string[0].c_str(), num_args > 1 ? arg_names[1] : "",
+      arg_values_string[1].c_str(), num_args > 2 ? arg_names[2] : "",
+      arg_values_string[2].c_str());
+}
+
+// static
+void TraceEventETWExport::AddCompleteEndEvent(const char* name) {
+  auto* instance = GetInstance();
+  if (!instance || !instance->etw_export_enabled_ || !EventEnabledChromeEvent())
+    return;
+
+  EventWriteChromeEvent(name, "Complete End", "", "", "", "", "", "");
+}
+
+// static
+bool TraceEventETWExport::IsCategoryGroupEnabled(
+    StringPiece category_group_name) {
+  DCHECK(!category_group_name.empty());
+  auto* instance = GetInstance();
+  if (instance == nullptr)
+    return false;
+
+  if (!instance->IsETWExportEnabled())
+    return false;
+
+  CStringTokenizer category_group_tokens(category_group_name.begin(),
+                                         category_group_name.end(), ",");
+  while (category_group_tokens.GetNext()) {
+    StringPiece category_group_token = category_group_tokens.token_piece();
+    if (instance->IsCategoryEnabled(category_group_token)) {
+      return true;
+    }
+  }
+  return false;
+}
+
+bool TraceEventETWExport::UpdateEnabledCategories() {
+  if (etw_match_any_keyword_ == CHROME_Context.MatchAnyKeyword)
+    return false;
+
+  // If the keyword has changed, update each category.
+  // Chrome_Context.MatchAnyKeyword is set by UIforETW (or other ETW trace
+  // recording tools) using the ETW infrastructure. This value will be set in
+  // all Chrome processes that have registered their ETW provider.
+  etw_match_any_keyword_ = CHROME_Context.MatchAnyKeyword;
+  for (size_t i = 0; i < ARRAYSIZE(kFilteredEventGroupNames); i++) {
+    if (etw_match_any_keyword_ & (1ULL << i)) {
+      categories_status_[kFilteredEventGroupNames[i]] = true;
+    } else {
+      categories_status_[kFilteredEventGroupNames[i]] = false;
+    }
+  }
+
+  // Also update the two default categories.
+  if (etw_match_any_keyword_ & kOtherEventsKeywordBit) {
+    categories_status_[kOtherEventsGroupName] = true;
+  } else {
+    categories_status_[kOtherEventsGroupName] = false;
+  }
+  if (etw_match_any_keyword_ & kDisabledOtherEventsKeywordBit) {
+    categories_status_[kDisabledOtherEventsGroupName] = true;
+  } else {
+    categories_status_[kDisabledOtherEventsGroupName] = false;
+  }
+
+  DCHECK_EQ(kNumberOfCategories, categories_status_.size());
+
+  // Update the categories in TraceLog.
+  TraceLog::GetInstance()->UpdateETWCategoryGroupEnabledFlags();
+
+  return true;
+}
+
+bool TraceEventETWExport::IsCategoryEnabled(StringPiece category_name) const {
+  DCHECK_EQ(kNumberOfCategories, categories_status_.size());
+  // Try to find the category and return its status if found
+  auto it = categories_status_.find(category_name);
+  if (it != categories_status_.end())
+    return it->second;
+
+  // Otherwise return the corresponding default status by first checking if the
+  // category is disabled by default.
+  if (category_name.starts_with("disabled-by-default")) {
+    DCHECK(categories_status_.find(kDisabledOtherEventsGroupName) !=
+           categories_status_.end());
+    return categories_status_.find(kDisabledOtherEventsGroupName)->second;
+  } else {
+    DCHECK(categories_status_.find(kOtherEventsGroupName) !=
+           categories_status_.end());
+    return categories_status_.find(kOtherEventsGroupName)->second;
+  }
+}
+
+// static
+void TraceEventETWExport::UpdateETWKeyword() {
+  if (!IsETWExportEnabled())
+    return;
+  auto* instance = GetInstance();
+  DCHECK(instance);
+  instance->UpdateEnabledCategories();
+}
+}  // namespace trace_event
+}  // namespace base
diff --git a/base/trace_event/trace_event_etw_export_win.h b/base/trace_event/trace_event_etw_export_win.h
new file mode 100644
index 0000000..8a85b22
--- /dev/null
+++ b/base/trace_event/trace_event_etw_export_win.h
@@ -0,0 +1,99 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file contains the Windows-specific exporting to ETW.
+#ifndef BASE_TRACE_EVENT_TRACE_EVENT_ETW_EXPORT_WIN_H_
+#define BASE_TRACE_EVENT_TRACE_EVENT_ETW_EXPORT_WIN_H_
+
+#include <stdint.h>
+
+#include <map>
+
+#include "base/base_export.h"
+#include "base/macros.h"
+#include "base/strings/string_piece.h"
+#include "base/trace_event/trace_event_impl.h"
+
+namespace base {
+
+template <typename Type>
+struct StaticMemorySingletonTraits;
+
+namespace trace_event {
+
+class BASE_EXPORT TraceEventETWExport {
+ public:
+  ~TraceEventETWExport();
+
+  // Retrieves the singleton.
+  // Note that this may return NULL post-AtExit processing.
+  static TraceEventETWExport* GetInstance();
+
+  // Enables/disables exporting of events to ETW. If disabled,
+  // AddEvent and AddCustomEvent will simply return when called.
+  static void EnableETWExport();
+  static void DisableETWExport();
+
+  // Returns true if ETW is enabled. For now, this is true if the command line
+  // flag is specified.
+  static bool IsETWExportEnabled();
+
+  // Exports an event to ETW. This is mainly used in
+  // TraceLog::AddTraceEventWithThreadIdAndTimestamp to export internal events.
+  static void AddEvent(
+      char phase,
+      const unsigned char* category_group_enabled,
+      const char* name,
+      unsigned long long id,
+      int num_args,
+      const char* const* arg_names,
+      const unsigned char* arg_types,
+      const unsigned long long* arg_values,
+      const std::unique_ptr<ConvertableToTraceFormat>* convertable_values);
+
+  // Exports an ETW event that marks the end of a complete event.
+  static void AddCompleteEndEvent(const char* name);
+
+  // Returns true if any category in the group is enabled.
+  static bool IsCategoryGroupEnabled(StringPiece category_group_name);
+
+ private:
+  // Ensure only the provider can construct us.
+  friend struct StaticMemorySingletonTraits<TraceEventETWExport>;
+  // To have access to UpdateKeyword().
+  class ETWKeywordUpdateThread;
+  TraceEventETWExport();
+
+  // Updates the list of enabled categories by consulting the ETW keyword.
+  // Returns true if there was a change, false otherwise.
+  bool UpdateEnabledCategories();
+
+  // Returns true if the category is enabled.
+  bool IsCategoryEnabled(StringPiece category_name) const;
+
+  // Called back by the update thread to check for potential changes to the
+  // keyword.
+  static void UpdateETWKeyword();
+
+  // True if ETW is enabled. Allows hiding the exporting behind a flag.
+  bool etw_export_enabled_;
+
+  // Maps category names to their status (enabled/disabled).
+  std::map<StringPiece, bool> categories_status_;
+
+  // Local copy of the ETW keyword.
+  uint64_t etw_match_any_keyword_;
+
+  // Background thread that monitors changes to the ETW keyword and updates
+  // the enabled categories when a change occurs.
+  std::unique_ptr<ETWKeywordUpdateThread> keyword_update_thread_;
+  PlatformThreadHandle keyword_update_thread_handle_;
+
+  DISALLOW_COPY_AND_ASSIGN(TraceEventETWExport);
+};
+
+}  // namespace trace_event
+}  // namespace base
+
+#endif  // BASE_TRACE_EVENT_TRACE_EVENT_ETW_EXPORT_WIN_H_
diff --git a/base/trace_event/trace_event_filter.cc b/base/trace_event/trace_event_filter.cc
new file mode 100644
index 0000000..d0b116e
--- /dev/null
+++ b/base/trace_event/trace_event_filter.cc
@@ -0,0 +1,17 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/trace_event/trace_event_filter.h"
+
+namespace base {
+namespace trace_event {
+
+TraceEventFilter::TraceEventFilter() = default;
+TraceEventFilter::~TraceEventFilter() = default;
+
+void TraceEventFilter::EndEvent(const char* category_name,
+                                const char* event_name) const {}
+
+}  // namespace trace_event
+}  // namespace base
diff --git a/base/trace_event/trace_event_filter.h b/base/trace_event/trace_event_filter.h
new file mode 100644
index 0000000..48c6711
--- /dev/null
+++ b/base/trace_event/trace_event_filter.h
@@ -0,0 +1,51 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TRACE_EVENT_TRACE_EVENT_FILTER_H_
+#define BASE_TRACE_EVENT_TRACE_EVENT_FILTER_H_
+
+#include <memory>
+
+#include "base/base_export.h"
+#include "base/macros.h"
+
+namespace base {
+namespace trace_event {
+
+class TraceEvent;
+
+// TraceEventFilter is like iptables for TRACE_EVENT macros. Filters can be
+// enabled on a per-category basis, hence a single filter instance can serve
+// more than a TraceCategory. There are two use cases for filters:
+// 1. Snooping TRACE_EVENT macros without adding them to the TraceLog. This is
+//    possible by setting the ENABLED_FOR_FILTERING flag on a category w/o
+//    ENABLED_FOR_RECORDING (see TraceConfig for user-facing configuration).
+// 2. Filtering TRACE_EVENT macros before they are added to the TraceLog. This
+//    requires both the ENABLED_FOR_FILTERING and ENABLED_FOR_RECORDING flags
+//    on the category.
+// More importantly, filters must be thread-safe. The FilterTraceEvent and
+// EndEvent methods can be called concurrently as trace macros are hit on
+// different threads.
+class BASE_EXPORT TraceEventFilter {
+ public:
+  TraceEventFilter();
+  virtual ~TraceEventFilter();
+
+  // If the category is ENABLED_FOR_RECORDING, the event is added iff all the
+  // filters enabled for the category return true. false causes the event to be
+  // discarded.
+  virtual bool FilterTraceEvent(const TraceEvent& trace_event) const = 0;
+
+  // Notifies the end of a duration event when the RAII macro goes out of scope.
+  virtual void EndEvent(const char* category_name,
+                        const char* event_name) const;
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(TraceEventFilter);
+};
+
+}  // namespace trace_event
+}  // namespace base
+
+#endif  // BASE_TRACE_EVENT_TRACE_EVENT_FILTER_H_
diff --git a/base/trace_event/trace_event_filter_test_utils.cc b/base/trace_event/trace_event_filter_test_utils.cc
new file mode 100644
index 0000000..85b4cfa
--- /dev/null
+++ b/base/trace_event/trace_event_filter_test_utils.cc
@@ -0,0 +1,61 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/trace_event/trace_event_filter_test_utils.h"
+
+#include "base/logging.h"
+
+namespace base {
+namespace trace_event {
+
+namespace {
+TestEventFilter::HitsCounter* g_hits_counter;
+}  // namespace;
+
+// static
+const char TestEventFilter::kName[] = "testing_predicate";
+bool TestEventFilter::filter_return_value_;
+
+// static
+std::unique_ptr<TraceEventFilter> TestEventFilter::Factory(
+    const std::string& predicate_name) {
+  std::unique_ptr<TraceEventFilter> res;
+  if (predicate_name == kName)
+    res.reset(new TestEventFilter());
+  return res;
+}
+
+TestEventFilter::TestEventFilter() = default;
+TestEventFilter::~TestEventFilter() = default;
+
+bool TestEventFilter::FilterTraceEvent(const TraceEvent& trace_event) const {
+  if (g_hits_counter)
+    g_hits_counter->filter_trace_event_hit_count++;
+  return filter_return_value_;
+}
+
+void TestEventFilter::EndEvent(const char* category_name,
+                               const char* name) const {
+  if (g_hits_counter)
+    g_hits_counter->end_event_hit_count++;
+}
+
+TestEventFilter::HitsCounter::HitsCounter() {
+  Reset();
+  DCHECK(!g_hits_counter);
+  g_hits_counter = this;
+}
+
+TestEventFilter::HitsCounter::~HitsCounter() {
+  DCHECK(g_hits_counter);
+  g_hits_counter = nullptr;
+}
+
+void TestEventFilter::HitsCounter::Reset() {
+  filter_trace_event_hit_count = 0;
+  end_event_hit_count = 0;
+}
+
+}  // namespace trace_event
+}  // namespace base
diff --git a/base/trace_event/trace_event_filter_test_utils.h b/base/trace_event/trace_event_filter_test_utils.h
new file mode 100644
index 0000000..419068b
--- /dev/null
+++ b/base/trace_event/trace_event_filter_test_utils.h
@@ -0,0 +1,53 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TRACE_EVENT_TRACE_EVENT_FILTER_TEST_UTILS_H_
+#define BASE_TRACE_EVENT_TRACE_EVENT_FILTER_TEST_UTILS_H_
+
+#include <memory>
+#include <string>
+
+#include "base/macros.h"
+#include "base/trace_event/trace_event_filter.h"
+
+namespace base {
+namespace trace_event {
+
+class TestEventFilter : public TraceEventFilter {
+ public:
+  struct HitsCounter {
+    HitsCounter();
+    ~HitsCounter();
+    void Reset();
+    size_t filter_trace_event_hit_count;
+    size_t end_event_hit_count;
+  };
+
+  static const char kName[];
+
+  // Factory method for TraceLog::SetFilterFactoryForTesting().
+  static std::unique_ptr<TraceEventFilter> Factory(
+      const std::string& predicate_name);
+
+  TestEventFilter();
+  ~TestEventFilter() override;
+
+  // TraceEventFilter implementation.
+  bool FilterTraceEvent(const TraceEvent& trace_event) const override;
+  void EndEvent(const char* category_name, const char* name) const override;
+
+  static void set_filter_return_value(bool value) {
+    filter_return_value_ = value;
+  }
+
+ private:
+  static bool filter_return_value_;
+
+  DISALLOW_COPY_AND_ASSIGN(TestEventFilter);
+};
+
+}  // namespace trace_event
+}  // namespace base
+
+#endif  // BASE_TRACE_EVENT_TRACE_EVENT_FILTER_TEST_UTILS_H_
diff --git a/base/trace_event/trace_event_impl.cc b/base/trace_event/trace_event_impl.cc
new file mode 100644
index 0000000..c72e1fc
--- /dev/null
+++ b/base/trace_event/trace_event_impl.cc
@@ -0,0 +1,489 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/trace_event/trace_event_impl.h"
+
+#include <stddef.h>
+
+#include "base/format_macros.h"
+#include "base/json/string_escape.h"
+#include "base/memory/ptr_util.h"
+#include "base/process/process_handle.h"
+#include "base/stl_util.h"
+#include "base/strings/string_number_conversions.h"
+#include "base/strings/string_util.h"
+#include "base/strings/stringprintf.h"
+#include "base/strings/utf_string_conversions.h"
+#include "base/trace_event/trace_event.h"
+#include "base/trace_event/trace_event_argument.h"
+#include "base/trace_event/trace_log.h"
+
+namespace base {
+namespace trace_event {
+
+namespace {
+
+size_t GetAllocLength(const char* str) { return str ? strlen(str) + 1 : 0; }
+
+// Copies |*member| into |*buffer|, sets |*member| to point to this new
+// location, and then advances |*buffer| by the amount written.
+void CopyTraceEventParameter(char** buffer,
+                             const char** member,
+                             const char* end) {
+  if (*member) {
+    size_t written = strlcpy(*buffer, *member, end - *buffer) + 1;
+    DCHECK_LE(static_cast<int>(written), end - *buffer);
+    *member = *buffer;
+    *buffer += written;
+  }
+}
+
+}  // namespace
+
+TraceEvent::TraceEvent()
+    : duration_(TimeDelta::FromInternalValue(-1)),
+      scope_(trace_event_internal::kGlobalScope),
+      id_(0u),
+      category_group_enabled_(nullptr),
+      name_(nullptr),
+      thread_id_(0),
+      flags_(0),
+      phase_(TRACE_EVENT_PHASE_BEGIN) {
+  for (int i = 0; i < kTraceMaxNumArgs; ++i)
+    arg_names_[i] = nullptr;
+  memset(arg_values_, 0, sizeof(arg_values_));
+}
+
+TraceEvent::~TraceEvent() = default;
+
+void TraceEvent::MoveFrom(std::unique_ptr<TraceEvent> other) {
+  timestamp_ = other->timestamp_;
+  thread_timestamp_ = other->thread_timestamp_;
+  duration_ = other->duration_;
+  scope_ = other->scope_;
+  id_ = other->id_;
+  category_group_enabled_ = other->category_group_enabled_;
+  name_ = other->name_;
+  if (other->flags_ & TRACE_EVENT_FLAG_HAS_PROCESS_ID)
+    process_id_ = other->process_id_;
+  else
+    thread_id_ = other->thread_id_;
+  phase_ = other->phase_;
+  flags_ = other->flags_;
+  parameter_copy_storage_ = std::move(other->parameter_copy_storage_);
+
+  for (int i = 0; i < kTraceMaxNumArgs; ++i) {
+    arg_names_[i] = other->arg_names_[i];
+    arg_types_[i] = other->arg_types_[i];
+    arg_values_[i] = other->arg_values_[i];
+    convertable_values_[i] = std::move(other->convertable_values_[i]);
+  }
+}
+
+void TraceEvent::Initialize(
+    int thread_id,
+    TimeTicks timestamp,
+    ThreadTicks thread_timestamp,
+    char phase,
+    const unsigned char* category_group_enabled,
+    const char* name,
+    const char* scope,
+    unsigned long long id,
+    unsigned long long bind_id,
+    int num_args,
+    const char* const* arg_names,
+    const unsigned char* arg_types,
+    const unsigned long long* arg_values,
+    std::unique_ptr<ConvertableToTraceFormat>* convertable_values,
+    unsigned int flags) {
+  timestamp_ = timestamp;
+  thread_timestamp_ = thread_timestamp;
+  duration_ = TimeDelta::FromInternalValue(-1);
+  scope_ = scope;
+  id_ = id;
+  category_group_enabled_ = category_group_enabled;
+  name_ = name;
+  thread_id_ = thread_id;
+  phase_ = phase;
+  flags_ = flags;
+  bind_id_ = bind_id;
+
+  // Clamp num_args since it may have been set by a third_party library.
+  num_args = (num_args > kTraceMaxNumArgs) ? kTraceMaxNumArgs : num_args;
+  int i = 0;
+  for (; i < num_args; ++i) {
+    arg_names_[i] = arg_names[i];
+    arg_types_[i] = arg_types[i];
+
+    if (arg_types[i] == TRACE_VALUE_TYPE_CONVERTABLE) {
+      convertable_values_[i] = std::move(convertable_values[i]);
+    } else {
+      arg_values_[i].as_uint = arg_values[i];
+      convertable_values_[i].reset();
+    }
+  }
+  for (; i < kTraceMaxNumArgs; ++i) {
+    arg_names_[i] = nullptr;
+    arg_values_[i].as_uint = 0u;
+    convertable_values_[i].reset();
+    arg_types_[i] = TRACE_VALUE_TYPE_UINT;
+  }
+
+  bool copy = !!(flags & TRACE_EVENT_FLAG_COPY);
+  size_t alloc_size = 0;
+  if (copy) {
+    alloc_size += GetAllocLength(name) + GetAllocLength(scope);
+    for (i = 0; i < num_args; ++i) {
+      alloc_size += GetAllocLength(arg_names_[i]);
+      if (arg_types_[i] == TRACE_VALUE_TYPE_STRING)
+        arg_types_[i] = TRACE_VALUE_TYPE_COPY_STRING;
+    }
+  }
+
+  bool arg_is_copy[kTraceMaxNumArgs];
+  for (i = 0; i < num_args; ++i) {
+    // No copying of convertable types, we retain ownership.
+    if (arg_types_[i] == TRACE_VALUE_TYPE_CONVERTABLE)
+      continue;
+
+    // We only take a copy of arg_vals if they are of type COPY_STRING.
+    arg_is_copy[i] = (arg_types_[i] == TRACE_VALUE_TYPE_COPY_STRING);
+    if (arg_is_copy[i])
+      alloc_size += GetAllocLength(arg_values_[i].as_string);
+  }
+
+  if (alloc_size) {
+    parameter_copy_storage_.reset(new std::string);
+    parameter_copy_storage_->resize(alloc_size);
+    char* ptr = base::data(*parameter_copy_storage_);
+    const char* end = ptr + alloc_size;
+    if (copy) {
+      CopyTraceEventParameter(&ptr, &name_, end);
+      CopyTraceEventParameter(&ptr, &scope_, end);
+      for (i = 0; i < num_args; ++i) {
+        CopyTraceEventParameter(&ptr, &arg_names_[i], end);
+      }
+    }
+    for (i = 0; i < num_args; ++i) {
+      if (arg_types_[i] == TRACE_VALUE_TYPE_CONVERTABLE)
+        continue;
+      if (arg_is_copy[i])
+        CopyTraceEventParameter(&ptr, &arg_values_[i].as_string, end);
+    }
+    DCHECK_EQ(end, ptr) << "Overrun by " << ptr - end;
+  }
+}
+
+void TraceEvent::Reset() {
+  // Only reset fields that won't be initialized in Initialize(), or that may
+  // hold references to other objects.
+  duration_ = TimeDelta::FromInternalValue(-1);
+  parameter_copy_storage_.reset();
+  for (int i = 0; i < kTraceMaxNumArgs; ++i)
+    convertable_values_[i].reset();
+}
+
+void TraceEvent::UpdateDuration(const TimeTicks& now,
+                                const ThreadTicks& thread_now) {
+  DCHECK_EQ(duration_.ToInternalValue(), -1);
+  duration_ = now - timestamp_;
+
+  // |thread_timestamp_| can be empty if the thread ticks clock wasn't
+  // initialized when it was recorded.
+  if (thread_timestamp_ != ThreadTicks())
+    thread_duration_ = thread_now - thread_timestamp_;
+}
+
+void TraceEvent::EstimateTraceMemoryOverhead(
+    TraceEventMemoryOverhead* overhead) {
+  overhead->Add(TraceEventMemoryOverhead::kTraceEvent, sizeof(*this));
+
+  if (parameter_copy_storage_)
+    overhead->AddString(*parameter_copy_storage_);
+
+  for (size_t i = 0; i < kTraceMaxNumArgs; ++i) {
+    if (arg_types_[i] == TRACE_VALUE_TYPE_CONVERTABLE)
+      convertable_values_[i]->EstimateTraceMemoryOverhead(overhead);
+  }
+}
+
+// static
+void TraceEvent::AppendValueAsJSON(unsigned char type,
+                                   TraceEvent::TraceValue value,
+                                   std::string* out) {
+  switch (type) {
+    case TRACE_VALUE_TYPE_BOOL:
+      *out += value.as_bool ? "true" : "false";
+      break;
+    case TRACE_VALUE_TYPE_UINT:
+      StringAppendF(out, "%" PRIu64, static_cast<uint64_t>(value.as_uint));
+      break;
+    case TRACE_VALUE_TYPE_INT:
+      StringAppendF(out, "%" PRId64, static_cast<int64_t>(value.as_int));
+      break;
+    case TRACE_VALUE_TYPE_DOUBLE: {
+      // FIXME: base/json/json_writer.cc is using the same code,
+      //        should be made into a common method.
+      std::string real;
+      double val = value.as_double;
+      if (std::isfinite(val)) {
+        real = NumberToString(val);
+        // Ensure that the number has a .0 if there's no decimal or 'e'.  This
+        // makes sure that when we read the JSON back, it's interpreted as a
+        // real rather than an int.
+        if (real.find('.') == std::string::npos &&
+            real.find('e') == std::string::npos &&
+            real.find('E') == std::string::npos) {
+          real.append(".0");
+        }
+        // The JSON spec requires that non-integer values in the range (-1,1)
+        // have a zero before the decimal point - ".52" is not valid, "0.52" is.
+        if (real[0] == '.') {
+          real.insert(0, "0");
+        } else if (real.length() > 1 && real[0] == '-' && real[1] == '.') {
+          // "-.1" bad "-0.1" good
+          real.insert(1, "0");
+        }
+      } else if (std::isnan(val)){
+        // The JSON spec doesn't allow NaN and Infinity (since these are
+        // objects in EcmaScript).  Use strings instead.
+        real = "\"NaN\"";
+      } else if (val < 0) {
+        real = "\"-Infinity\"";
+      } else {
+        real = "\"Infinity\"";
+      }
+      StringAppendF(out, "%s", real.c_str());
+      break;
+    }
+    case TRACE_VALUE_TYPE_POINTER:
+      // JSON only supports double and int numbers.
+      // So as not to lose bits from a 64-bit pointer, output as a hex string.
+      StringAppendF(
+          out, "\"0x%" PRIx64 "\"",
+          static_cast<uint64_t>(reinterpret_cast<uintptr_t>(value.as_pointer)));
+      break;
+    case TRACE_VALUE_TYPE_STRING:
+    case TRACE_VALUE_TYPE_COPY_STRING:
+      EscapeJSONString(value.as_string ? value.as_string : "NULL", true, out);
+      break;
+    default:
+      NOTREACHED() << "Don't know how to print this value";
+      break;
+  }
+}
+
+void TraceEvent::AppendAsJSON(
+    std::string* out,
+    const ArgumentFilterPredicate& argument_filter_predicate) const {
+  int64_t time_int64 = timestamp_.ToInternalValue();
+  int process_id;
+  int thread_id;
+  if ((flags_ & TRACE_EVENT_FLAG_HAS_PROCESS_ID) &&
+      process_id_ != kNullProcessId) {
+    process_id = process_id_;
+    thread_id = -1;
+  } else {
+    process_id = TraceLog::GetInstance()->process_id();
+    thread_id = thread_id_;
+  }
+  const char* category_group_name =
+      TraceLog::GetCategoryGroupName(category_group_enabled_);
+
+  // Category group checked at category creation time.
+  DCHECK(!strchr(name_, '"'));
+  StringAppendF(out, "{\"pid\":%i,\"tid\":%i,\"ts\":%" PRId64
+                     ",\"ph\":\"%c\",\"cat\":\"%s\",\"name\":",
+                process_id, thread_id, time_int64, phase_, category_group_name);
+  EscapeJSONString(name_, true, out);
+  *out += ",\"args\":";
+
+  // Output argument names and values, stop at first NULL argument name.
+  // TODO(oysteine): The dual predicates here is a bit ugly; if the filtering
+  // capabilities need to grow even more precise we should rethink this
+  // approach
+  ArgumentNameFilterPredicate argument_name_filter_predicate;
+  bool strip_args =
+      arg_names_[0] && !argument_filter_predicate.is_null() &&
+      !argument_filter_predicate.Run(category_group_name, name_,
+                                     &argument_name_filter_predicate);
+
+  if (strip_args) {
+    *out += "\"__stripped__\"";
+  } else {
+    *out += "{";
+
+    for (int i = 0; i < kTraceMaxNumArgs && arg_names_[i]; ++i) {
+      if (i > 0)
+        *out += ",";
+      *out += "\"";
+      *out += arg_names_[i];
+      *out += "\":";
+
+      if (argument_name_filter_predicate.is_null() ||
+          argument_name_filter_predicate.Run(arg_names_[i])) {
+        if (arg_types_[i] == TRACE_VALUE_TYPE_CONVERTABLE)
+          convertable_values_[i]->AppendAsTraceFormat(out);
+        else
+          AppendValueAsJSON(arg_types_[i], arg_values_[i], out);
+      } else {
+        *out += "\"__stripped__\"";
+      }
+    }
+
+    *out += "}";
+  }
+
+  if (phase_ == TRACE_EVENT_PHASE_COMPLETE) {
+    int64_t duration = duration_.ToInternalValue();
+    if (duration != -1)
+      StringAppendF(out, ",\"dur\":%" PRId64, duration);
+    if (!thread_timestamp_.is_null()) {
+      int64_t thread_duration = thread_duration_.ToInternalValue();
+      if (thread_duration != -1)
+        StringAppendF(out, ",\"tdur\":%" PRId64, thread_duration);
+    }
+  }
+
+  // Output tts if thread_timestamp is valid.
+  if (!thread_timestamp_.is_null()) {
+    int64_t thread_time_int64 = thread_timestamp_.ToInternalValue();
+    StringAppendF(out, ",\"tts\":%" PRId64, thread_time_int64);
+  }
+
+  // Output async tts marker field if flag is set.
+  if (flags_ & TRACE_EVENT_FLAG_ASYNC_TTS) {
+    StringAppendF(out, ", \"use_async_tts\":1");
+  }
+
+  // If id_ is set, print it out as a hex string so we don't loose any
+  // bits (it might be a 64-bit pointer).
+  unsigned int id_flags_ = flags_ & (TRACE_EVENT_FLAG_HAS_ID |
+                                     TRACE_EVENT_FLAG_HAS_LOCAL_ID |
+                                     TRACE_EVENT_FLAG_HAS_GLOBAL_ID);
+  if (id_flags_) {
+    if (scope_ != trace_event_internal::kGlobalScope)
+      StringAppendF(out, ",\"scope\":\"%s\"", scope_);
+
+    switch (id_flags_) {
+      case TRACE_EVENT_FLAG_HAS_ID:
+        StringAppendF(out, ",\"id\":\"0x%" PRIx64 "\"",
+                      static_cast<uint64_t>(id_));
+        break;
+
+      case TRACE_EVENT_FLAG_HAS_LOCAL_ID:
+        StringAppendF(out, ",\"id2\":{\"local\":\"0x%" PRIx64 "\"}",
+                      static_cast<uint64_t>(id_));
+        break;
+
+      case TRACE_EVENT_FLAG_HAS_GLOBAL_ID:
+        StringAppendF(out, ",\"id2\":{\"global\":\"0x%" PRIx64 "\"}",
+                      static_cast<uint64_t>(id_));
+        break;
+
+      default:
+        NOTREACHED() << "More than one of the ID flags are set";
+        break;
+    }
+  }
+
+  if (flags_ & TRACE_EVENT_FLAG_BIND_TO_ENCLOSING)
+    StringAppendF(out, ",\"bp\":\"e\"");
+
+  if ((flags_ & TRACE_EVENT_FLAG_FLOW_OUT) ||
+      (flags_ & TRACE_EVENT_FLAG_FLOW_IN)) {
+    StringAppendF(out, ",\"bind_id\":\"0x%" PRIx64 "\"",
+                  static_cast<uint64_t>(bind_id_));
+  }
+  if (flags_ & TRACE_EVENT_FLAG_FLOW_IN)
+    StringAppendF(out, ",\"flow_in\":true");
+  if (flags_ & TRACE_EVENT_FLAG_FLOW_OUT)
+    StringAppendF(out, ",\"flow_out\":true");
+
+  // Instant events also output their scope.
+  if (phase_ == TRACE_EVENT_PHASE_INSTANT) {
+    char scope = '?';
+    switch (flags_ & TRACE_EVENT_FLAG_SCOPE_MASK) {
+      case TRACE_EVENT_SCOPE_GLOBAL:
+        scope = TRACE_EVENT_SCOPE_NAME_GLOBAL;
+        break;
+
+      case TRACE_EVENT_SCOPE_PROCESS:
+        scope = TRACE_EVENT_SCOPE_NAME_PROCESS;
+        break;
+
+      case TRACE_EVENT_SCOPE_THREAD:
+        scope = TRACE_EVENT_SCOPE_NAME_THREAD;
+        break;
+    }
+    StringAppendF(out, ",\"s\":\"%c\"", scope);
+  }
+
+  *out += "}";
+}
+
+void TraceEvent::AppendPrettyPrinted(std::ostringstream* out) const {
+  *out << name_ << "[";
+  *out << TraceLog::GetCategoryGroupName(category_group_enabled_);
+  *out << "]";
+  if (arg_names_[0]) {
+    *out << ", {";
+    for (int i = 0; i < kTraceMaxNumArgs && arg_names_[i]; ++i) {
+      if (i > 0)
+        *out << ", ";
+      *out << arg_names_[i] << ":";
+      std::string value_as_text;
+
+      if (arg_types_[i] == TRACE_VALUE_TYPE_CONVERTABLE)
+        convertable_values_[i]->AppendAsTraceFormat(&value_as_text);
+      else
+        AppendValueAsJSON(arg_types_[i], arg_values_[i], &value_as_text);
+
+      *out << value_as_text;
+    }
+    *out << "}";
+  }
+}
+
+}  // namespace trace_event
+}  // namespace base
+
+namespace trace_event_internal {
+
+std::unique_ptr<base::trace_event::ConvertableToTraceFormat>
+TraceID::AsConvertableToTraceFormat() const {
+  auto value = std::make_unique<base::trace_event::TracedValue>();
+
+  if (scope_ != kGlobalScope)
+    value->SetString("scope", scope_);
+
+  const char* id_field_name = "id";
+  if (id_flags_ == TRACE_EVENT_FLAG_HAS_GLOBAL_ID) {
+    id_field_name = "global";
+    value->BeginDictionary("id2");
+  } else if (id_flags_ == TRACE_EVENT_FLAG_HAS_LOCAL_ID) {
+    id_field_name = "local";
+    value->BeginDictionary("id2");
+  } else if (id_flags_ != TRACE_EVENT_FLAG_HAS_ID) {
+    NOTREACHED() << "Unrecognized ID flag";
+  }
+
+  if (has_prefix_) {
+    value->SetString(id_field_name,
+                     base::StringPrintf("0x%" PRIx64 "/0x%" PRIx64,
+                                        static_cast<uint64_t>(prefix_),
+                                        static_cast<uint64_t>(raw_id_)));
+  } else {
+    value->SetString(
+        id_field_name,
+        base::StringPrintf("0x%" PRIx64, static_cast<uint64_t>(raw_id_)));
+  }
+
+  if (id_flags_ != TRACE_EVENT_FLAG_HAS_ID)
+    value->EndDictionary();
+
+  return std::move(value);
+}
+
+}  // namespace trace_event_internal
diff --git a/base/trace_event/trace_event_impl.h b/base/trace_event/trace_event_impl.h
new file mode 100644
index 0000000..b1c67b1
--- /dev/null
+++ b/base/trace_event/trace_event_impl.h
@@ -0,0 +1,187 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+
+#ifndef BASE_TRACE_EVENT_TRACE_EVENT_IMPL_H_
+#define BASE_TRACE_EVENT_TRACE_EVENT_IMPL_H_
+
+#include <stdint.h>
+
+#include <memory>
+#include <string>
+#include <vector>
+
+#include "base/atomicops.h"
+#include "base/base_export.h"
+#include "base/callback.h"
+#include "base/containers/hash_tables.h"
+#include "base/macros.h"
+#include "base/observer_list.h"
+#include "base/single_thread_task_runner.h"
+#include "base/strings/string_util.h"
+#include "base/synchronization/condition_variable.h"
+#include "base/synchronization/lock.h"
+#include "base/threading/thread_local.h"
+#include "base/trace_event/trace_event_memory_overhead.h"
+#include "build/build_config.h"
+
+namespace base {
+namespace trace_event {
+
+typedef base::Callback<bool(const char* arg_name)> ArgumentNameFilterPredicate;
+
+typedef base::Callback<bool(const char* category_group_name,
+                            const char* event_name,
+                            ArgumentNameFilterPredicate*)>
+    ArgumentFilterPredicate;
+
+// For any argument of type TRACE_VALUE_TYPE_CONVERTABLE the provided
+// class must implement this interface.
+class BASE_EXPORT ConvertableToTraceFormat {
+ public:
+  ConvertableToTraceFormat() = default;
+  virtual ~ConvertableToTraceFormat() = default;
+
+  // Append the class info to the provided |out| string. The appended
+  // data must be a valid JSON object. Strings must be properly quoted, and
+  // escaped. There is no processing applied to the content after it is
+  // appended.
+  virtual void AppendAsTraceFormat(std::string* out) const = 0;
+
+  virtual void EstimateTraceMemoryOverhead(TraceEventMemoryOverhead* overhead);
+
+  std::string ToString() const {
+    std::string result;
+    AppendAsTraceFormat(&result);
+    return result;
+  }
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(ConvertableToTraceFormat);
+};
+
+const int kTraceMaxNumArgs = 2;
+
+struct TraceEventHandle {
+  uint32_t chunk_seq;
+  // These numbers of bits must be kept consistent with
+  // TraceBufferChunk::kMaxTrunkIndex and
+  // TraceBufferChunk::kTraceBufferChunkSize (in trace_buffer.h).
+  unsigned chunk_index : 26;
+  unsigned event_index : 6;
+};
+
+class BASE_EXPORT TraceEvent {
+ public:
+  union TraceValue {
+    bool as_bool;
+    unsigned long long as_uint;
+    long long as_int;
+    double as_double;
+    const void* as_pointer;
+    const char* as_string;
+  };
+
+  TraceEvent();
+  ~TraceEvent();
+
+  void MoveFrom(std::unique_ptr<TraceEvent> other);
+
+  void Initialize(int thread_id,
+                  TimeTicks timestamp,
+                  ThreadTicks thread_timestamp,
+                  char phase,
+                  const unsigned char* category_group_enabled,
+                  const char* name,
+                  const char* scope,
+                  unsigned long long id,
+                  unsigned long long bind_id,
+                  int num_args,
+                  const char* const* arg_names,
+                  const unsigned char* arg_types,
+                  const unsigned long long* arg_values,
+                  std::unique_ptr<ConvertableToTraceFormat>* convertable_values,
+                  unsigned int flags);
+
+  void Reset();
+
+  void UpdateDuration(const TimeTicks& now, const ThreadTicks& thread_now);
+
+  void EstimateTraceMemoryOverhead(TraceEventMemoryOverhead* overhead);
+
+  // Serialize event data to JSON
+  void AppendAsJSON(
+      std::string* out,
+      const ArgumentFilterPredicate& argument_filter_predicate) const;
+  void AppendPrettyPrinted(std::ostringstream* out) const;
+
+  static void AppendValueAsJSON(unsigned char type,
+                                TraceValue value,
+                                std::string* out);
+
+  TimeTicks timestamp() const { return timestamp_; }
+  ThreadTicks thread_timestamp() const { return thread_timestamp_; }
+  char phase() const { return phase_; }
+  int thread_id() const { return thread_id_; }
+  TimeDelta duration() const { return duration_; }
+  TimeDelta thread_duration() const { return thread_duration_; }
+  const char* scope() const { return scope_; }
+  unsigned long long id() const { return id_; }
+  unsigned int flags() const { return flags_; }
+  unsigned long long bind_id() const { return bind_id_; }
+  // Exposed for unittesting:
+
+  const std::string* parameter_copy_storage() const {
+    return parameter_copy_storage_.get();
+  }
+
+  const unsigned char* category_group_enabled() const {
+    return category_group_enabled_;
+  }
+
+  const char* name() const { return name_; }
+
+  unsigned char arg_type(size_t index) const { return arg_types_[index]; }
+  const char* arg_name(size_t index) const { return arg_names_[index]; }
+  const TraceValue& arg_value(size_t index) const { return arg_values_[index]; }
+
+#if defined(OS_ANDROID)
+  void SendToATrace();
+#endif
+
+ private:
+  // Note: these are ordered by size (largest first) for optimal packing.
+  TimeTicks timestamp_;
+  ThreadTicks thread_timestamp_;
+  TimeDelta duration_;
+  TimeDelta thread_duration_;
+  // scope_ and id_ can be used to store phase-specific data.
+  const char* scope_;
+  unsigned long long id_;
+  TraceValue arg_values_[kTraceMaxNumArgs];
+  const char* arg_names_[kTraceMaxNumArgs];
+  std::unique_ptr<ConvertableToTraceFormat>
+      convertable_values_[kTraceMaxNumArgs];
+  const unsigned char* category_group_enabled_;
+  const char* name_;
+  std::unique_ptr<std::string> parameter_copy_storage_;
+  // Depending on TRACE_EVENT_FLAG_HAS_PROCESS_ID the event will have either:
+  //  tid: thread_id_, pid: current_process_id (default case).
+  //  tid: -1, pid: process_id_ (when flags_ & TRACE_EVENT_FLAG_HAS_PROCESS_ID).
+  union {
+    int thread_id_;
+    int process_id_;
+  };
+  unsigned int flags_;
+  unsigned long long bind_id_;
+  unsigned char arg_types_[kTraceMaxNumArgs];
+  char phase_;
+
+  DISALLOW_COPY_AND_ASSIGN(TraceEvent);
+};
+
+}  // namespace trace_event
+}  // namespace base
+
+#endif  // BASE_TRACE_EVENT_TRACE_EVENT_IMPL_H_
diff --git a/base/trace_event/trace_event_memory_overhead.cc b/base/trace_event/trace_event_memory_overhead.cc
new file mode 100644
index 0000000..d5875f8
--- /dev/null
+++ b/base/trace_event/trace_event_memory_overhead.cc
@@ -0,0 +1,177 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/trace_event/trace_event_memory_overhead.h"
+
+#include <algorithm>
+
+#include "base/bits.h"
+#include "base/memory/ref_counted_memory.h"
+#include "base/strings/stringprintf.h"
+#include "base/trace_event/memory_allocator_dump.h"
+#include "base/trace_event/memory_usage_estimator.h"
+#include "base/trace_event/process_memory_dump.h"
+#include "base/values.h"
+
+namespace base {
+namespace trace_event {
+
+namespace {
+
+const char* ObjectTypeToString(TraceEventMemoryOverhead::ObjectType type) {
+  switch (type) {
+    case TraceEventMemoryOverhead::kOther:
+      return "(Other)";
+    case TraceEventMemoryOverhead::kTraceBuffer:
+      return "TraceBuffer";
+    case TraceEventMemoryOverhead::kTraceBufferChunk:
+      return "TraceBufferChunk";
+    case TraceEventMemoryOverhead::kTraceEvent:
+      return "TraceEvent";
+    case TraceEventMemoryOverhead::kUnusedTraceEvent:
+      return "TraceEvent(Unused)";
+    case TraceEventMemoryOverhead::kTracedValue:
+      return "TracedValue";
+    case TraceEventMemoryOverhead::kConvertableToTraceFormat:
+      return "ConvertableToTraceFormat";
+    case TraceEventMemoryOverhead::kHeapProfilerAllocationRegister:
+      return "AllocationRegister";
+    case TraceEventMemoryOverhead::kHeapProfilerTypeNameDeduplicator:
+      return "TypeNameDeduplicator";
+    case TraceEventMemoryOverhead::kHeapProfilerStackFrameDeduplicator:
+      return "StackFrameDeduplicator";
+    case TraceEventMemoryOverhead::kStdString:
+      return "std::string";
+    case TraceEventMemoryOverhead::kBaseValue:
+      return "base::Value";
+    case TraceEventMemoryOverhead::kTraceEventMemoryOverhead:
+      return "TraceEventMemoryOverhead";
+    case TraceEventMemoryOverhead::kLast:
+      NOTREACHED();
+  }
+  NOTREACHED();
+  return "BUG";
+}
+
+}  // namespace
+
+TraceEventMemoryOverhead::TraceEventMemoryOverhead() : allocated_objects_() {}
+
+TraceEventMemoryOverhead::~TraceEventMemoryOverhead() = default;
+
+void TraceEventMemoryOverhead::AddInternal(ObjectType object_type,
+                                           size_t count,
+                                           size_t allocated_size_in_bytes,
+                                           size_t resident_size_in_bytes) {
+  ObjectCountAndSize& count_and_size =
+      allocated_objects_[static_cast<uint32_t>(object_type)];
+  count_and_size.count += count;
+  count_and_size.allocated_size_in_bytes += allocated_size_in_bytes;
+  count_and_size.resident_size_in_bytes += resident_size_in_bytes;
+}
+
+void TraceEventMemoryOverhead::Add(ObjectType object_type,
+                                   size_t allocated_size_in_bytes) {
+  Add(object_type, allocated_size_in_bytes, allocated_size_in_bytes);
+}
+
+void TraceEventMemoryOverhead::Add(ObjectType object_type,
+                                   size_t allocated_size_in_bytes,
+                                   size_t resident_size_in_bytes) {
+  AddInternal(object_type, 1, allocated_size_in_bytes, resident_size_in_bytes);
+}
+
+void TraceEventMemoryOverhead::AddString(const std::string& str) {
+  Add(kStdString, EstimateMemoryUsage(str));
+}
+
+void TraceEventMemoryOverhead::AddRefCountedString(
+    const RefCountedString& str) {
+  Add(kOther, sizeof(RefCountedString));
+  AddString(str.data());
+}
+
+void TraceEventMemoryOverhead::AddValue(const Value& value) {
+  switch (value.type()) {
+    case Value::Type::NONE:
+    case Value::Type::BOOLEAN:
+    case Value::Type::INTEGER:
+    case Value::Type::DOUBLE:
+      Add(kBaseValue, sizeof(Value));
+      break;
+
+    case Value::Type::STRING: {
+      const Value* string_value = nullptr;
+      value.GetAsString(&string_value);
+      Add(kBaseValue, sizeof(Value));
+      AddString(string_value->GetString());
+    } break;
+
+    case Value::Type::BINARY: {
+      Add(kBaseValue, sizeof(Value) + value.GetBlob().size());
+    } break;
+
+    case Value::Type::DICTIONARY: {
+      const DictionaryValue* dictionary_value = nullptr;
+      value.GetAsDictionary(&dictionary_value);
+      Add(kBaseValue, sizeof(DictionaryValue));
+      for (DictionaryValue::Iterator it(*dictionary_value); !it.IsAtEnd();
+           it.Advance()) {
+        AddString(it.key());
+        AddValue(it.value());
+      }
+    } break;
+
+    case Value::Type::LIST: {
+      const ListValue* list_value = nullptr;
+      value.GetAsList(&list_value);
+      Add(kBaseValue, sizeof(ListValue));
+      for (const auto& v : *list_value)
+        AddValue(v);
+    } break;
+
+    default:
+      NOTREACHED();
+  }
+}
+
+void TraceEventMemoryOverhead::AddSelf() {
+  Add(kTraceEventMemoryOverhead, sizeof(*this));
+}
+
+size_t TraceEventMemoryOverhead::GetCount(ObjectType object_type) const {
+  CHECK(object_type < kLast);
+  return allocated_objects_[static_cast<uint32_t>(object_type)].count;
+}
+
+void TraceEventMemoryOverhead::Update(const TraceEventMemoryOverhead& other) {
+  for (uint32_t i = 0; i < kLast; i++) {
+    const ObjectCountAndSize& other_entry = other.allocated_objects_[i];
+    AddInternal(static_cast<ObjectType>(i), other_entry.count,
+                other_entry.allocated_size_in_bytes,
+                other_entry.resident_size_in_bytes);
+  }
+}
+
+void TraceEventMemoryOverhead::DumpInto(const char* base_name,
+                                        ProcessMemoryDump* pmd) const {
+  for (uint32_t i = 0; i < kLast; i++) {
+    const ObjectCountAndSize& count_and_size = allocated_objects_[i];
+    if (count_and_size.allocated_size_in_bytes == 0)
+      continue;
+    std::string dump_name = StringPrintf(
+        "%s/%s", base_name, ObjectTypeToString(static_cast<ObjectType>(i)));
+    MemoryAllocatorDump* mad = pmd->CreateAllocatorDump(dump_name);
+    mad->AddScalar(MemoryAllocatorDump::kNameSize,
+                   MemoryAllocatorDump::kUnitsBytes,
+                   count_and_size.allocated_size_in_bytes);
+    mad->AddScalar("resident_size", MemoryAllocatorDump::kUnitsBytes,
+                   count_and_size.resident_size_in_bytes);
+    mad->AddScalar(MemoryAllocatorDump::kNameObjectCount,
+                   MemoryAllocatorDump::kUnitsObjects, count_and_size.count);
+  }
+}
+
+}  // namespace trace_event
+}  // namespace base
diff --git a/base/trace_event/trace_event_memory_overhead.h b/base/trace_event/trace_event_memory_overhead.h
new file mode 100644
index 0000000..1587a30
--- /dev/null
+++ b/base/trace_event/trace_event_memory_overhead.h
@@ -0,0 +1,94 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TRACE_EVENT_TRACE_EVENT_MEMORY_OVERHEAD_H_
+#define BASE_TRACE_EVENT_TRACE_EVENT_MEMORY_OVERHEAD_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <unordered_map>
+
+#include "base/base_export.h"
+#include "base/macros.h"
+
+namespace base {
+
+class RefCountedString;
+class Value;
+
+namespace trace_event {
+
+class ProcessMemoryDump;
+
+// Used to estimate the memory overhead of the tracing infrastructure.
+class BASE_EXPORT TraceEventMemoryOverhead {
+ public:
+  enum ObjectType : uint32_t {
+    kOther = 0,
+    kTraceBuffer,
+    kTraceBufferChunk,
+    kTraceEvent,
+    kUnusedTraceEvent,
+    kTracedValue,
+    kConvertableToTraceFormat,
+    kHeapProfilerAllocationRegister,
+    kHeapProfilerTypeNameDeduplicator,
+    kHeapProfilerStackFrameDeduplicator,
+    kStdString,
+    kBaseValue,
+    kTraceEventMemoryOverhead,
+    kLast
+  };
+
+  TraceEventMemoryOverhead();
+  ~TraceEventMemoryOverhead();
+
+  // Use this method to account the overhead of an object for which an estimate
+  // is known for both the allocated and resident memory.
+  void Add(ObjectType object_type,
+           size_t allocated_size_in_bytes,
+           size_t resident_size_in_bytes);
+
+  // Similar to Add() above, but assumes that
+  // |resident_size_in_bytes| == |allocated_size_in_bytes|.
+  void Add(ObjectType object_type, size_t allocated_size_in_bytes);
+
+  // Specialized profiling functions for commonly used object types.
+  void AddString(const std::string& str);
+  void AddValue(const Value& value);
+  void AddRefCountedString(const RefCountedString& str);
+
+  // Call this after all the Add* methods above to account the memory used by
+  // this TraceEventMemoryOverhead instance itself.
+  void AddSelf();
+
+  // Retrieves the count, that is, the count of Add*(|object_type|, ...) calls.
+  size_t GetCount(ObjectType object_type) const;
+
+  // Adds up and merges all the values from |other| to this instance.
+  void Update(const TraceEventMemoryOverhead& other);
+
+  void DumpInto(const char* base_name, ProcessMemoryDump* pmd) const;
+
+ private:
+  struct ObjectCountAndSize {
+    size_t count;
+    size_t allocated_size_in_bytes;
+    size_t resident_size_in_bytes;
+  };
+  ObjectCountAndSize allocated_objects_[ObjectType::kLast];
+
+  void AddInternal(ObjectType object_type,
+                   size_t count,
+                   size_t allocated_size_in_bytes,
+                   size_t resident_size_in_bytes);
+
+  DISALLOW_COPY_AND_ASSIGN(TraceEventMemoryOverhead);
+};
+
+}  // namespace trace_event
+}  // namespace base
+
+#endif  // BASE_TRACE_EVENT_TRACE_EVENT_MEMORY_OVERHEAD_H_
diff --git a/base/trace_event/trace_event_system_stats_monitor.cc b/base/trace_event/trace_event_system_stats_monitor.cc
new file mode 100644
index 0000000..7e082f3
--- /dev/null
+++ b/base/trace_event/trace_event_system_stats_monitor.cc
@@ -0,0 +1,132 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/trace_event/trace_event_system_stats_monitor.h"
+
+#include <memory>
+
+#include "base/debug/leak_annotations.h"
+#include "base/json/json_writer.h"
+#include "base/lazy_instance.h"
+#include "base/logging.h"
+#include "base/macros.h"
+#include "base/process/process_metrics.h"
+#include "base/strings/string_number_conversions.h"
+#include "base/strings/string_util.h"
+#include "base/threading/thread_local_storage.h"
+#include "base/threading/thread_task_runner_handle.h"
+#include "base/trace_event/trace_event.h"
+
+namespace base {
+namespace trace_event {
+
+namespace {
+
+/////////////////////////////////////////////////////////////////////////////
+// Holds profiled system stats until the tracing system needs to serialize it.
+class SystemStatsHolder : public base::trace_event::ConvertableToTraceFormat {
+ public:
+  SystemStatsHolder() = default;
+  ~SystemStatsHolder() override = default;
+
+  // Fills system_metrics_ with profiled system memory and disk stats.
+  // Uses the previous stats to compute rates if this is not the first profile.
+  void GetSystemProfilingStats();
+
+  // base::trace_event::ConvertableToTraceFormat overrides:
+  void AppendAsTraceFormat(std::string* out) const override {
+    AppendSystemProfileAsTraceFormat(system_stats_, out);
+  }
+
+ private:
+  SystemMetrics system_stats_;
+
+  DISALLOW_COPY_AND_ASSIGN(SystemStatsHolder);
+};
+
+void SystemStatsHolder::GetSystemProfilingStats() {
+  system_stats_ = SystemMetrics::Sample();
+}
+
+}  // namespace
+
+//////////////////////////////////////////////////////////////////////////////
+
+TraceEventSystemStatsMonitor::TraceEventSystemStatsMonitor(
+    scoped_refptr<SingleThreadTaskRunner> task_runner)
+    : task_runner_(task_runner),
+      weak_factory_(this) {
+  // Force the "system_stats" category to show up in the trace viewer.
+  TraceLog::GetCategoryGroupEnabled(TRACE_DISABLED_BY_DEFAULT("system_stats"));
+
+  // Allow this to be instantiated on unsupported platforms, but don't run.
+  TraceLog::GetInstance()->AddEnabledStateObserver(this);
+}
+
+TraceEventSystemStatsMonitor::~TraceEventSystemStatsMonitor() {
+  if (dump_timer_.IsRunning())
+    StopProfiling();
+  TraceLog::GetInstance()->RemoveEnabledStateObserver(this);
+}
+
+void TraceEventSystemStatsMonitor::OnTraceLogEnabled() {
+  // Check to see if system tracing is enabled.
+  bool enabled;
+
+  TRACE_EVENT_CATEGORY_GROUP_ENABLED(TRACE_DISABLED_BY_DEFAULT(
+                                     "system_stats"), &enabled);
+  if (!enabled)
+    return;
+  task_runner_->PostTask(
+      FROM_HERE, base::BindOnce(&TraceEventSystemStatsMonitor::StartProfiling,
+                                weak_factory_.GetWeakPtr()));
+}
+
+void TraceEventSystemStatsMonitor::OnTraceLogDisabled() {
+  task_runner_->PostTask(
+      FROM_HERE, base::BindOnce(&TraceEventSystemStatsMonitor::StopProfiling,
+                                weak_factory_.GetWeakPtr()));
+}
+
+void TraceEventSystemStatsMonitor::StartProfiling() {
+  // Watch for the tracing framework sending enabling more than once.
+  if (dump_timer_.IsRunning())
+    return;
+
+  dump_timer_.Start(FROM_HERE,
+                    TimeDelta::FromMilliseconds(TraceEventSystemStatsMonitor::
+                                                kSamplingIntervalMilliseconds),
+                    base::Bind(&TraceEventSystemStatsMonitor::
+                               DumpSystemStats,
+                               weak_factory_.GetWeakPtr()));
+}
+
+// If system tracing is enabled, dumps a profile to the tracing system.
+void TraceEventSystemStatsMonitor::DumpSystemStats() {
+  std::unique_ptr<SystemStatsHolder> dump_holder(new SystemStatsHolder());
+  dump_holder->GetSystemProfilingStats();
+
+  TRACE_EVENT_OBJECT_SNAPSHOT_WITH_ID(
+      TRACE_DISABLED_BY_DEFAULT("system_stats"),
+      "base::TraceEventSystemStatsMonitor::SystemStats", this,
+      std::move(dump_holder));
+}
+
+void TraceEventSystemStatsMonitor::StopProfiling() {
+  dump_timer_.Stop();
+}
+
+bool TraceEventSystemStatsMonitor::IsTimerRunningForTest() const {
+  return dump_timer_.IsRunning();
+}
+
+void AppendSystemProfileAsTraceFormat(const SystemMetrics& system_metrics,
+                                      std::string* output) {
+  std::string tmp;
+  base::JSONWriter::Write(*system_metrics.ToValue(), &tmp);
+  *output += tmp;
+}
+
+}  // namespace trace_event
+}  // namespace base
diff --git a/base/trace_event/trace_event_system_stats_monitor.h b/base/trace_event/trace_event_system_stats_monitor.h
new file mode 100644
index 0000000..14aa568
--- /dev/null
+++ b/base/trace_event/trace_event_system_stats_monitor.h
@@ -0,0 +1,76 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TRACE_EVENT_TRACE_EVENT_SYSTEM_STATS_MONITOR_H_
+#define BASE_TRACE_EVENT_TRACE_EVENT_SYSTEM_STATS_MONITOR_H_
+
+#include "base/base_export.h"
+#include "base/gtest_prod_util.h"
+#include "base/macros.h"
+#include "base/memory/ref_counted.h"
+#include "base/memory/weak_ptr.h"
+#include "base/process/process_metrics.h"
+#include "base/timer/timer.h"
+#include "base/trace_event/trace_log.h"
+
+namespace base {
+
+class SingleThreadTaskRunner;
+
+namespace trace_event {
+
+// Watches for chrome://tracing to be enabled or disabled. When tracing is
+// enabled, also enables system events profiling. This class is the preferred
+// way to turn system tracing on and off.
+class BASE_EXPORT TraceEventSystemStatsMonitor
+    : public TraceLog::EnabledStateObserver {
+ public:
+  // Length of time interval between stat profiles.
+  static const int kSamplingIntervalMilliseconds = 2000;
+
+  // |task_runner| must be the primary thread for the client
+  // process, e.g. the UI thread in a browser.
+  explicit TraceEventSystemStatsMonitor(
+      scoped_refptr<SingleThreadTaskRunner> task_runner);
+
+  ~TraceEventSystemStatsMonitor() override;
+
+  // base::trace_event::TraceLog::EnabledStateChangedObserver overrides:
+  void OnTraceLogEnabled() override;
+  void OnTraceLogDisabled() override;
+
+  // Retrieves system profiling at the current time.
+  void DumpSystemStats();
+
+ private:
+  FRIEND_TEST_ALL_PREFIXES(TraceSystemStatsMonitorTest,
+                           TraceEventSystemStatsMonitor);
+
+  bool IsTimerRunningForTest() const;
+
+  void StartProfiling();
+
+  void StopProfiling();
+
+  // Ensures the observer starts and stops tracing on the primary thread.
+  scoped_refptr<SingleThreadTaskRunner> task_runner_;
+
+  // Timer to schedule system profile dumps.
+  RepeatingTimer dump_timer_;
+
+  WeakPtrFactory<TraceEventSystemStatsMonitor> weak_factory_;
+
+  DISALLOW_COPY_AND_ASSIGN(TraceEventSystemStatsMonitor);
+};
+
+// Converts system memory profiling stats in |input| to
+// trace event compatible JSON and appends to |output|. Visible for testing.
+BASE_EXPORT void AppendSystemProfileAsTraceFormat(const SystemMetrics&
+                                                  system_stats,
+                                                  std::string* output);
+
+}  // namespace trace_event
+}  // namespace base
+
+#endif  // BASE_TRACE_EVENT_TRACE_EVENT_SYSTEM_STATS_MONITOR_H_
diff --git a/base/trace_event/trace_event_system_stats_monitor_unittest.cc b/base/trace_event/trace_event_system_stats_monitor_unittest.cc
new file mode 100644
index 0000000..52a05ba
--- /dev/null
+++ b/base/trace_event/trace_event_system_stats_monitor_unittest.cc
@@ -0,0 +1,68 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/trace_event/trace_event_system_stats_monitor.h"
+
+#include <sstream>
+#include <string>
+
+#include "base/macros.h"
+#include "base/message_loop/message_loop.h"
+#include "base/run_loop.h"
+#include "base/trace_event/trace_event_impl.h"
+#include "build/build_config.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+namespace trace_event {
+
+#if !defined(OS_IOS)
+// Tests for the system stats monitor.
+// Exists as a class so it can be a friend of TraceEventSystemStatsMonitor.
+class TraceSystemStatsMonitorTest : public testing::Test {
+ public:
+  TraceSystemStatsMonitorTest() = default;
+  ~TraceSystemStatsMonitorTest() override = default;
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(TraceSystemStatsMonitorTest);
+};
+
+//////////////////////////////////////////////////////////////////////////////
+
+TEST_F(TraceSystemStatsMonitorTest, TraceEventSystemStatsMonitor) {
+  MessageLoop message_loop;
+
+  // Start with no observers of the TraceLog.
+  EXPECT_EQ(0u, TraceLog::GetInstance()->GetObserverCountForTest());
+
+  // Creating a system stats monitor adds it to the TraceLog observer list.
+  std::unique_ptr<TraceEventSystemStatsMonitor> system_stats_monitor(
+      new TraceEventSystemStatsMonitor(message_loop.task_runner()));
+  EXPECT_EQ(1u, TraceLog::GetInstance()->GetObserverCountForTest());
+  EXPECT_TRUE(
+      TraceLog::GetInstance()->HasEnabledStateObserver(
+          system_stats_monitor.get()));
+
+  // By default the observer isn't dumping memory profiles.
+  EXPECT_FALSE(system_stats_monitor->IsTimerRunningForTest());
+
+  // Simulate enabling tracing.
+  system_stats_monitor->StartProfiling();
+  RunLoop().RunUntilIdle();
+  EXPECT_TRUE(system_stats_monitor->IsTimerRunningForTest());
+
+  // Simulate disabling tracing.
+  system_stats_monitor->StopProfiling();
+  RunLoop().RunUntilIdle();
+  EXPECT_FALSE(system_stats_monitor->IsTimerRunningForTest());
+
+  // Deleting the observer removes it from the TraceLog observer list.
+  system_stats_monitor.reset();
+  EXPECT_EQ(0u, TraceLog::GetInstance()->GetObserverCountForTest());
+}
+#endif  // !defined(OS_IOS)
+
+}  // namespace trace_event
+}  // namespace base
diff --git a/base/trace_event/trace_event_unittest.cc b/base/trace_event/trace_event_unittest.cc
new file mode 100644
index 0000000..a413ee5
--- /dev/null
+++ b/base/trace_event/trace_event_unittest.cc
@@ -0,0 +1,3169 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/trace_event/trace_event.h"
+
+#include <math.h>
+#include <stddef.h>
+#include <stdint.h>
+
+#include <cstdlib>
+#include <memory>
+#include <utility>
+
+#include "base/bind.h"
+#include "base/command_line.h"
+#include "base/json/json_reader.h"
+#include "base/json/json_writer.h"
+#include "base/location.h"
+#include "base/macros.h"
+#include "base/memory/ptr_util.h"
+#include "base/memory/ref_counted_memory.h"
+#include "base/memory/singleton.h"
+#include "base/process/process_handle.h"
+#include "base/single_thread_task_runner.h"
+#include "base/stl_util.h"
+#include "base/strings/pattern.h"
+#include "base/strings/stringprintf.h"
+#include "base/synchronization/waitable_event.h"
+#include "base/threading/platform_thread.h"
+#include "base/threading/thread.h"
+#include "base/time/time.h"
+#include "base/trace_event/event_name_filter.h"
+#include "base/trace_event/heap_profiler_event_filter.h"
+#include "base/trace_event/trace_buffer.h"
+#include "base/trace_event/trace_event.h"
+#include "base/trace_event/trace_event_filter.h"
+#include "base/trace_event/trace_event_filter_test_utils.h"
+#include "base/values.h"
+#include "testing/gmock/include/gmock/gmock.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+namespace trace_event {
+
+namespace {
+
+enum CompareOp {
+  IS_EQUAL,
+  IS_NOT_EQUAL,
+};
+
+struct JsonKeyValue {
+  const char* key;
+  const char* value;
+  CompareOp op;
+};
+
+const int kThreadId = 42;
+const int kAsyncId = 5;
+const char kAsyncIdStr[] = "0x5";
+const int kAsyncId2 = 6;
+const char kAsyncId2Str[] = "0x6";
+const int kFlowId = 7;
+const char kFlowIdStr[] = "0x7";
+
+const  char kRecordAllCategoryFilter[] = "*";
+
+class TraceEventTestFixture : public testing::Test {
+ public:
+  void OnTraceDataCollected(
+      WaitableEvent* flush_complete_event,
+      const scoped_refptr<base::RefCountedString>& events_str,
+      bool has_more_events);
+  DictionaryValue* FindMatchingTraceEntry(const JsonKeyValue* key_values);
+  DictionaryValue* FindNamePhase(const char* name, const char* phase);
+  DictionaryValue* FindNamePhaseKeyValue(const char* name,
+                                         const char* phase,
+                                         const char* key,
+                                         const char* value);
+  void DropTracedMetadataRecords();
+  bool FindMatchingValue(const char* key,
+                         const char* value);
+  bool FindNonMatchingValue(const char* key,
+                            const char* value);
+  void Clear() {
+    trace_parsed_.Clear();
+    json_output_.json_output.clear();
+  }
+
+  void BeginTrace() {
+    BeginSpecificTrace("*");
+  }
+
+  void BeginSpecificTrace(const std::string& filter) {
+    TraceLog::GetInstance()->SetEnabled(TraceConfig(filter, ""),
+                                        TraceLog::RECORDING_MODE);
+  }
+
+  void CancelTrace() {
+    WaitableEvent flush_complete_event(
+        WaitableEvent::ResetPolicy::AUTOMATIC,
+        WaitableEvent::InitialState::NOT_SIGNALED);
+    CancelTraceAsync(&flush_complete_event);
+    flush_complete_event.Wait();
+  }
+
+  void EndTraceAndFlush() {
+    num_flush_callbacks_ = 0;
+    WaitableEvent flush_complete_event(
+        WaitableEvent::ResetPolicy::AUTOMATIC,
+        WaitableEvent::InitialState::NOT_SIGNALED);
+    EndTraceAndFlushAsync(&flush_complete_event);
+    flush_complete_event.Wait();
+  }
+
+  // Used when testing thread-local buffers which requires the thread initiating
+  // flush to have a message loop.
+  void EndTraceAndFlushInThreadWithMessageLoop() {
+    WaitableEvent flush_complete_event(
+        WaitableEvent::ResetPolicy::AUTOMATIC,
+        WaitableEvent::InitialState::NOT_SIGNALED);
+    Thread flush_thread("flush");
+    flush_thread.Start();
+    flush_thread.task_runner()->PostTask(
+        FROM_HERE,
+        base::BindOnce(&TraceEventTestFixture::EndTraceAndFlushAsync,
+                       base::Unretained(this), &flush_complete_event));
+    flush_complete_event.Wait();
+  }
+
+  void CancelTraceAsync(WaitableEvent* flush_complete_event) {
+    TraceLog::GetInstance()->CancelTracing(
+        base::Bind(&TraceEventTestFixture::OnTraceDataCollected,
+                   base::Unretained(static_cast<TraceEventTestFixture*>(this)),
+                   base::Unretained(flush_complete_event)));
+  }
+
+  void EndTraceAndFlushAsync(WaitableEvent* flush_complete_event) {
+    TraceLog::GetInstance()->SetDisabled(TraceLog::RECORDING_MODE |
+                                         TraceLog::FILTERING_MODE);
+    TraceLog::GetInstance()->Flush(
+        base::Bind(&TraceEventTestFixture::OnTraceDataCollected,
+                   base::Unretained(static_cast<TraceEventTestFixture*>(this)),
+                   base::Unretained(flush_complete_event)));
+  }
+
+  void SetUp() override {
+    const char* name = PlatformThread::GetName();
+    old_thread_name_ = name ? strdup(name) : nullptr;
+
+    TraceLog::ResetForTesting();
+    TraceLog* tracelog = TraceLog::GetInstance();
+    ASSERT_TRUE(tracelog);
+    ASSERT_FALSE(tracelog->IsEnabled());
+    trace_buffer_.SetOutputCallback(json_output_.GetCallback());
+    num_flush_callbacks_ = 0;
+  }
+  void TearDown() override {
+    if (TraceLog::GetInstance())
+      EXPECT_FALSE(TraceLog::GetInstance()->IsEnabled());
+    PlatformThread::SetName(old_thread_name_ ? old_thread_name_ : "");
+    free(old_thread_name_);
+    old_thread_name_ = nullptr;
+    // We want our singleton torn down after each test.
+    TraceLog::ResetForTesting();
+  }
+
+  char* old_thread_name_;
+  ListValue trace_parsed_;
+  TraceResultBuffer trace_buffer_;
+  TraceResultBuffer::SimpleOutput json_output_;
+  size_t num_flush_callbacks_;
+
+ private:
+  // We want our singleton torn down after each test.
+  ShadowingAtExitManager at_exit_manager_;
+  Lock lock_;
+};
+
+void TraceEventTestFixture::OnTraceDataCollected(
+    WaitableEvent* flush_complete_event,
+    const scoped_refptr<base::RefCountedString>& events_str,
+    bool has_more_events) {
+  num_flush_callbacks_++;
+  if (num_flush_callbacks_ > 1) {
+    EXPECT_FALSE(events_str->data().empty());
+  }
+  AutoLock lock(lock_);
+  json_output_.json_output.clear();
+  trace_buffer_.Start();
+  trace_buffer_.AddFragment(events_str->data());
+  trace_buffer_.Finish();
+
+  std::unique_ptr<Value> root =
+      base::JSONReader::Read(json_output_.json_output, JSON_PARSE_RFC);
+
+  if (!root.get()) {
+    LOG(ERROR) << json_output_.json_output;
+  }
+
+  ListValue* root_list = nullptr;
+  ASSERT_TRUE(root.get());
+  ASSERT_TRUE(root->GetAsList(&root_list));
+
+  // Move items into our aggregate collection
+  while (root_list->GetSize()) {
+    std::unique_ptr<Value> item;
+    root_list->Remove(0, &item);
+    trace_parsed_.Append(std::move(item));
+  }
+
+  if (!has_more_events)
+    flush_complete_event->Signal();
+}
+
+static bool CompareJsonValues(const std::string& lhs,
+                              const std::string& rhs,
+                              CompareOp op) {
+  switch (op) {
+    case IS_EQUAL:
+      return lhs == rhs;
+    case IS_NOT_EQUAL:
+      return lhs != rhs;
+    default:
+      CHECK(0);
+  }
+  return false;
+}
+
+static bool IsKeyValueInDict(const JsonKeyValue* key_value,
+                             DictionaryValue* dict) {
+  Value* value = nullptr;
+  std::string value_str;
+  if (dict->Get(key_value->key, &value) &&
+      value->GetAsString(&value_str) &&
+      CompareJsonValues(value_str, key_value->value, key_value->op))
+    return true;
+
+  // Recurse to test arguments
+  DictionaryValue* args_dict = nullptr;
+  dict->GetDictionary("args", &args_dict);
+  if (args_dict)
+    return IsKeyValueInDict(key_value, args_dict);
+
+  return false;
+}
+
+static bool IsAllKeyValueInDict(const JsonKeyValue* key_values,
+                                DictionaryValue* dict) {
+  // Scan all key_values, they must all be present and equal.
+  while (key_values && key_values->key) {
+    if (!IsKeyValueInDict(key_values, dict))
+      return false;
+    ++key_values;
+  }
+  return true;
+}
+
+DictionaryValue* TraceEventTestFixture::FindMatchingTraceEntry(
+    const JsonKeyValue* key_values) {
+  // Scan all items
+  size_t trace_parsed_count = trace_parsed_.GetSize();
+  for (size_t i = 0; i < trace_parsed_count; i++) {
+    Value* value = nullptr;
+    trace_parsed_.Get(i, &value);
+    if (!value || value->type() != Value::Type::DICTIONARY)
+      continue;
+    DictionaryValue* dict = static_cast<DictionaryValue*>(value);
+
+    if (IsAllKeyValueInDict(key_values, dict))
+      return dict;
+  }
+  return nullptr;
+}
+
+void TraceEventTestFixture::DropTracedMetadataRecords() {
+  std::unique_ptr<ListValue> old_trace_parsed(trace_parsed_.CreateDeepCopy());
+  size_t old_trace_parsed_size = old_trace_parsed->GetSize();
+  trace_parsed_.Clear();
+
+  for (size_t i = 0; i < old_trace_parsed_size; i++) {
+    Value* value = nullptr;
+    old_trace_parsed->Get(i, &value);
+    if (!value || value->type() != Value::Type::DICTIONARY) {
+      trace_parsed_.Append(value->CreateDeepCopy());
+      continue;
+    }
+    DictionaryValue* dict = static_cast<DictionaryValue*>(value);
+    std::string tmp;
+    if (dict->GetString("ph", &tmp) && tmp == "M")
+      continue;
+
+    trace_parsed_.Append(value->CreateDeepCopy());
+  }
+}
+
+DictionaryValue* TraceEventTestFixture::FindNamePhase(const char* name,
+                                                      const char* phase) {
+  JsonKeyValue key_values[] = {{"name", name, IS_EQUAL},
+                               {"ph", phase, IS_EQUAL},
+                               {nullptr, nullptr, IS_EQUAL}};
+  return FindMatchingTraceEntry(key_values);
+}
+
+DictionaryValue* TraceEventTestFixture::FindNamePhaseKeyValue(
+    const char* name,
+    const char* phase,
+    const char* key,
+    const char* value) {
+  JsonKeyValue key_values[] = {{"name", name, IS_EQUAL},
+                               {"ph", phase, IS_EQUAL},
+                               {key, value, IS_EQUAL},
+                               {nullptr, nullptr, IS_EQUAL}};
+  return FindMatchingTraceEntry(key_values);
+}
+
+bool TraceEventTestFixture::FindMatchingValue(const char* key,
+                                              const char* value) {
+  JsonKeyValue key_values[] = {{key, value, IS_EQUAL},
+                               {nullptr, nullptr, IS_EQUAL}};
+  return FindMatchingTraceEntry(key_values);
+}
+
+bool TraceEventTestFixture::FindNonMatchingValue(const char* key,
+                                                 const char* value) {
+  JsonKeyValue key_values[] = {{key, value, IS_NOT_EQUAL},
+                               {nullptr, nullptr, IS_EQUAL}};
+  return FindMatchingTraceEntry(key_values);
+}
+
+bool IsStringInDict(const char* string_to_match, const DictionaryValue* dict) {
+  for (DictionaryValue::Iterator it(*dict); !it.IsAtEnd(); it.Advance()) {
+    if (it.key().find(string_to_match) != std::string::npos)
+      return true;
+
+    std::string value_str;
+    it.value().GetAsString(&value_str);
+    if (value_str.find(string_to_match) != std::string::npos)
+      return true;
+  }
+
+  // Recurse to test arguments
+  const DictionaryValue* args_dict = nullptr;
+  dict->GetDictionary("args", &args_dict);
+  if (args_dict)
+    return IsStringInDict(string_to_match, args_dict);
+
+  return false;
+}
+
+const DictionaryValue* FindTraceEntry(
+    const ListValue& trace_parsed,
+    const char* string_to_match,
+    const DictionaryValue* match_after_this_item = nullptr) {
+  // Scan all items
+  size_t trace_parsed_count = trace_parsed.GetSize();
+  for (size_t i = 0; i < trace_parsed_count; i++) {
+    const Value* value = nullptr;
+    trace_parsed.Get(i, &value);
+    if (match_after_this_item) {
+      if (value == match_after_this_item)
+        match_after_this_item = nullptr;
+      continue;
+    }
+    if (!value || value->type() != Value::Type::DICTIONARY)
+      continue;
+    const DictionaryValue* dict = static_cast<const DictionaryValue*>(value);
+
+    if (IsStringInDict(string_to_match, dict))
+      return dict;
+  }
+  return nullptr;
+}
+
+std::vector<const DictionaryValue*> FindTraceEntries(
+    const ListValue& trace_parsed,
+    const char* string_to_match) {
+  std::vector<const DictionaryValue*> hits;
+  size_t trace_parsed_count = trace_parsed.GetSize();
+  for (size_t i = 0; i < trace_parsed_count; i++) {
+    const Value* value = nullptr;
+    trace_parsed.Get(i, &value);
+    if (!value || value->type() != Value::Type::DICTIONARY)
+      continue;
+    const DictionaryValue* dict = static_cast<const DictionaryValue*>(value);
+
+    if (IsStringInDict(string_to_match, dict))
+      hits.push_back(dict);
+  }
+  return hits;
+}
+
+const char kControlCharacters[] = "\001\002\003\n\r";
+
+void TraceWithAllMacroVariants(WaitableEvent* task_complete_event) {
+  {
+    TRACE_EVENT0("all", "TRACE_EVENT0 call");
+    TRACE_EVENT1("all", "TRACE_EVENT1 call", "name1", "value1");
+    TRACE_EVENT2("all", "TRACE_EVENT2 call",
+                 "name1", "\"value1\"",
+                 "name2", "value\\2");
+
+    TRACE_EVENT_INSTANT0("all", "TRACE_EVENT_INSTANT0 call",
+                         TRACE_EVENT_SCOPE_GLOBAL);
+    TRACE_EVENT_INSTANT1("all", "TRACE_EVENT_INSTANT1 call",
+                         TRACE_EVENT_SCOPE_PROCESS, "name1", "value1");
+    TRACE_EVENT_INSTANT2("all", "TRACE_EVENT_INSTANT2 call",
+                         TRACE_EVENT_SCOPE_THREAD,
+                         "name1", "value1",
+                         "name2", "value2");
+
+    TRACE_EVENT_BEGIN0("all", "TRACE_EVENT_BEGIN0 call");
+    TRACE_EVENT_BEGIN1("all", "TRACE_EVENT_BEGIN1 call", "name1", "value1");
+    TRACE_EVENT_BEGIN2("all", "TRACE_EVENT_BEGIN2 call",
+                       "name1", "value1",
+                       "name2", "value2");
+
+    TRACE_EVENT_END0("all", "TRACE_EVENT_END0 call");
+    TRACE_EVENT_END1("all", "TRACE_EVENT_END1 call", "name1", "value1");
+    TRACE_EVENT_END2("all", "TRACE_EVENT_END2 call",
+                     "name1", "value1",
+                     "name2", "value2");
+
+    TRACE_EVENT_ASYNC_BEGIN0("all", "TRACE_EVENT_ASYNC_BEGIN0 call", kAsyncId);
+    TRACE_EVENT_ASYNC_BEGIN1("all", "TRACE_EVENT_ASYNC_BEGIN1 call", kAsyncId,
+                             "name1", "value1");
+    TRACE_EVENT_ASYNC_BEGIN2("all", "TRACE_EVENT_ASYNC_BEGIN2 call", kAsyncId,
+                             "name1", "value1",
+                             "name2", "value2");
+
+    TRACE_EVENT_ASYNC_STEP_INTO0("all", "TRACE_EVENT_ASYNC_STEP_INTO0 call",
+                                 kAsyncId, "step_begin1");
+    TRACE_EVENT_ASYNC_STEP_INTO1("all", "TRACE_EVENT_ASYNC_STEP_INTO1 call",
+                                 kAsyncId, "step_begin2", "name1", "value1");
+
+    TRACE_EVENT_ASYNC_END0("all", "TRACE_EVENT_ASYNC_END0 call", kAsyncId);
+    TRACE_EVENT_ASYNC_END1("all", "TRACE_EVENT_ASYNC_END1 call", kAsyncId,
+                           "name1", "value1");
+    TRACE_EVENT_ASYNC_END2("all", "TRACE_EVENT_ASYNC_END2 call", kAsyncId,
+                           "name1", "value1",
+                           "name2", "value2");
+
+    TRACE_EVENT_FLOW_BEGIN0("all", "TRACE_EVENT_FLOW_BEGIN0 call", kFlowId);
+    TRACE_EVENT_FLOW_STEP0("all", "TRACE_EVENT_FLOW_STEP0 call",
+                           kFlowId, "step1");
+    TRACE_EVENT_FLOW_END_BIND_TO_ENCLOSING0("all",
+        "TRACE_EVENT_FLOW_END_BIND_TO_ENCLOSING0 call", kFlowId);
+
+    TRACE_COUNTER1("all", "TRACE_COUNTER1 call", 31415);
+    TRACE_COUNTER2("all", "TRACE_COUNTER2 call",
+                   "a", 30000,
+                   "b", 1415);
+
+    TRACE_COUNTER_WITH_TIMESTAMP1("all", "TRACE_COUNTER_WITH_TIMESTAMP1 call",
+                                  TimeTicks::FromInternalValue(42), 31415);
+    TRACE_COUNTER_WITH_TIMESTAMP2("all", "TRACE_COUNTER_WITH_TIMESTAMP2 call",
+                                  TimeTicks::FromInternalValue(42),
+                                  "a", 30000, "b", 1415);
+
+    TRACE_COUNTER_ID1("all", "TRACE_COUNTER_ID1 call", 0x319009, 31415);
+    TRACE_COUNTER_ID2("all", "TRACE_COUNTER_ID2 call", 0x319009,
+                      "a", 30000, "b", 1415);
+
+    TRACE_EVENT_COPY_BEGIN_WITH_ID_TID_AND_TIMESTAMP0("all",
+        "TRACE_EVENT_COPY_BEGIN_WITH_ID_TID_AND_TIMESTAMP0 call",
+        kAsyncId, kThreadId, TimeTicks::FromInternalValue(12345));
+    TRACE_EVENT_COPY_END_WITH_ID_TID_AND_TIMESTAMP0("all",
+        "TRACE_EVENT_COPY_END_WITH_ID_TID_AND_TIMESTAMP0 call",
+        kAsyncId, kThreadId, TimeTicks::FromInternalValue(23456));
+
+    TRACE_EVENT_BEGIN_WITH_ID_TID_AND_TIMESTAMP0("all",
+        "TRACE_EVENT_BEGIN_WITH_ID_TID_AND_TIMESTAMP0 call",
+        kAsyncId2, kThreadId, TimeTicks::FromInternalValue(34567));
+    TRACE_EVENT_ASYNC_STEP_PAST0("all", "TRACE_EVENT_ASYNC_STEP_PAST0 call",
+                                 kAsyncId2, "step_end1");
+    TRACE_EVENT_ASYNC_STEP_PAST1("all", "TRACE_EVENT_ASYNC_STEP_PAST1 call",
+                                 kAsyncId2, "step_end2", "name1", "value1");
+
+    TRACE_EVENT_END_WITH_ID_TID_AND_TIMESTAMP0("all",
+        "TRACE_EVENT_END_WITH_ID_TID_AND_TIMESTAMP0 call",
+        kAsyncId2, kThreadId, TimeTicks::FromInternalValue(45678));
+
+    TRACE_EVENT_OBJECT_CREATED_WITH_ID("all", "tracked object 1", 0x42);
+    TRACE_EVENT_OBJECT_SNAPSHOT_WITH_ID(
+        "all", "tracked object 1", 0x42, "hello");
+    TRACE_EVENT_OBJECT_DELETED_WITH_ID("all", "tracked object 1", 0x42);
+
+    TraceScopedTrackableObject<int> trackable("all", "tracked object 2",
+                                              0x2128506);
+    trackable.snapshot("world");
+
+    TRACE_EVENT_OBJECT_CREATED_WITH_ID(
+        "all", "tracked object 3", TRACE_ID_WITH_SCOPE("scope", 0x42));
+    TRACE_EVENT_OBJECT_SNAPSHOT_WITH_ID(
+        "all", "tracked object 3", TRACE_ID_WITH_SCOPE("scope", 0x42), "hello");
+    TRACE_EVENT_OBJECT_DELETED_WITH_ID(
+        "all", "tracked object 3", TRACE_ID_WITH_SCOPE("scope", 0x42));
+
+    TRACE_EVENT1(kControlCharacters, kControlCharacters,
+                 kControlCharacters, kControlCharacters);
+
+    uint64_t context_id = 0x20151021;
+
+    TRACE_EVENT_ENTER_CONTEXT("all", "TRACE_EVENT_ENTER_CONTEXT call",
+                              TRACE_ID_WITH_SCOPE("scope", context_id));
+    TRACE_EVENT_LEAVE_CONTEXT("all", "TRACE_EVENT_LEAVE_CONTEXT call",
+                              TRACE_ID_WITH_SCOPE("scope", context_id));
+    TRACE_EVENT_SCOPED_CONTEXT("disabled-by-default-cat",
+                               "TRACE_EVENT_SCOPED_CONTEXT disabled call",
+                               context_id);
+    TRACE_EVENT_SCOPED_CONTEXT("all", "TRACE_EVENT_SCOPED_CONTEXT call",
+                               context_id);
+
+    TRACE_LINK_IDS("all", "TRACE_LINK_IDS simple call", 0x1000, 0x2000);
+    TRACE_LINK_IDS("all", "TRACE_LINK_IDS scoped call",
+                   TRACE_ID_WITH_SCOPE("scope 1", 0x1000),
+                   TRACE_ID_WITH_SCOPE("scope 2", 0x2000));
+    TRACE_LINK_IDS("all", "TRACE_LINK_IDS to a local ID", 0x1000,
+                   TRACE_ID_LOCAL(0x2000));
+    TRACE_LINK_IDS("all", "TRACE_LINK_IDS to a global ID", 0x1000,
+                   TRACE_ID_GLOBAL(0x2000));
+    TRACE_LINK_IDS("all", "TRACE_LINK_IDS to a composite ID", 0x1000,
+                   TRACE_ID_WITH_SCOPE("scope 1", 0x2000, 0x3000));
+
+    TRACE_EVENT_ASYNC_BEGIN0("all", "async default process scope", 0x1000);
+    TRACE_EVENT_ASYNC_BEGIN0("all", "async local id", TRACE_ID_LOCAL(0x2000));
+    TRACE_EVENT_ASYNC_BEGIN0("all", "async global id", TRACE_ID_GLOBAL(0x3000));
+    TRACE_EVENT_ASYNC_BEGIN0("all", "async global id with scope string",
+                             TRACE_ID_WITH_SCOPE("scope string",
+                                                 TRACE_ID_GLOBAL(0x4000)));
+  }  // Scope close causes TRACE_EVENT0 etc to send their END events.
+
+  if (task_complete_event)
+    task_complete_event->Signal();
+}
+
+void ValidateAllTraceMacrosCreatedData(const ListValue& trace_parsed) {
+  const DictionaryValue* item = nullptr;
+
+#define EXPECT_FIND_(string) \
+    item = FindTraceEntry(trace_parsed, string); \
+    EXPECT_TRUE(item);
+#define EXPECT_NOT_FIND_(string) \
+    item = FindTraceEntry(trace_parsed, string); \
+    EXPECT_FALSE(item);
+#define EXPECT_SUB_FIND_(string) \
+    if (item) \
+      EXPECT_TRUE(IsStringInDict(string, item));
+
+  EXPECT_FIND_("TRACE_EVENT0 call");
+  {
+    std::string ph;
+    std::string ph_end;
+    EXPECT_TRUE((item = FindTraceEntry(trace_parsed, "TRACE_EVENT0 call")));
+    EXPECT_TRUE((item && item->GetString("ph", &ph)));
+    EXPECT_EQ("X", ph);
+    item = FindTraceEntry(trace_parsed, "TRACE_EVENT0 call", item);
+    EXPECT_FALSE(item);
+  }
+  EXPECT_FIND_("TRACE_EVENT1 call");
+  EXPECT_SUB_FIND_("name1");
+  EXPECT_SUB_FIND_("value1");
+  EXPECT_FIND_("TRACE_EVENT2 call");
+  EXPECT_SUB_FIND_("name1");
+  EXPECT_SUB_FIND_("\"value1\"");
+  EXPECT_SUB_FIND_("name2");
+  EXPECT_SUB_FIND_("value\\2");
+
+  EXPECT_FIND_("TRACE_EVENT_INSTANT0 call");
+  {
+    std::string scope;
+    EXPECT_TRUE((item && item->GetString("s", &scope)));
+    EXPECT_EQ("g", scope);
+  }
+  EXPECT_FIND_("TRACE_EVENT_INSTANT1 call");
+  {
+    std::string scope;
+    EXPECT_TRUE((item && item->GetString("s", &scope)));
+    EXPECT_EQ("p", scope);
+  }
+  EXPECT_SUB_FIND_("name1");
+  EXPECT_SUB_FIND_("value1");
+  EXPECT_FIND_("TRACE_EVENT_INSTANT2 call");
+  {
+    std::string scope;
+    EXPECT_TRUE((item && item->GetString("s", &scope)));
+    EXPECT_EQ("t", scope);
+  }
+  EXPECT_SUB_FIND_("name1");
+  EXPECT_SUB_FIND_("value1");
+  EXPECT_SUB_FIND_("name2");
+  EXPECT_SUB_FIND_("value2");
+
+  EXPECT_FIND_("TRACE_EVENT_BEGIN0 call");
+  EXPECT_FIND_("TRACE_EVENT_BEGIN1 call");
+  EXPECT_SUB_FIND_("name1");
+  EXPECT_SUB_FIND_("value1");
+  EXPECT_FIND_("TRACE_EVENT_BEGIN2 call");
+  EXPECT_SUB_FIND_("name1");
+  EXPECT_SUB_FIND_("value1");
+  EXPECT_SUB_FIND_("name2");
+  EXPECT_SUB_FIND_("value2");
+
+  EXPECT_FIND_("TRACE_EVENT_END0 call");
+  EXPECT_FIND_("TRACE_EVENT_END1 call");
+  EXPECT_SUB_FIND_("name1");
+  EXPECT_SUB_FIND_("value1");
+  EXPECT_FIND_("TRACE_EVENT_END2 call");
+  EXPECT_SUB_FIND_("name1");
+  EXPECT_SUB_FIND_("value1");
+  EXPECT_SUB_FIND_("name2");
+  EXPECT_SUB_FIND_("value2");
+
+  EXPECT_FIND_("TRACE_EVENT_ASYNC_BEGIN0 call");
+  EXPECT_SUB_FIND_("id");
+  EXPECT_SUB_FIND_(kAsyncIdStr);
+  EXPECT_FIND_("TRACE_EVENT_ASYNC_BEGIN1 call");
+  EXPECT_SUB_FIND_("id");
+  EXPECT_SUB_FIND_(kAsyncIdStr);
+  EXPECT_SUB_FIND_("name1");
+  EXPECT_SUB_FIND_("value1");
+  EXPECT_FIND_("TRACE_EVENT_ASYNC_BEGIN2 call");
+  EXPECT_SUB_FIND_("id");
+  EXPECT_SUB_FIND_(kAsyncIdStr);
+  EXPECT_SUB_FIND_("name1");
+  EXPECT_SUB_FIND_("value1");
+  EXPECT_SUB_FIND_("name2");
+  EXPECT_SUB_FIND_("value2");
+
+  EXPECT_FIND_("TRACE_EVENT_ASYNC_STEP_INTO0 call");
+  EXPECT_SUB_FIND_("id");
+  EXPECT_SUB_FIND_(kAsyncIdStr);
+  EXPECT_SUB_FIND_("step_begin1");
+  EXPECT_FIND_("TRACE_EVENT_ASYNC_STEP_INTO1 call");
+  EXPECT_SUB_FIND_("id");
+  EXPECT_SUB_FIND_(kAsyncIdStr);
+  EXPECT_SUB_FIND_("step_begin2");
+  EXPECT_SUB_FIND_("name1");
+  EXPECT_SUB_FIND_("value1");
+
+  EXPECT_FIND_("TRACE_EVENT_ASYNC_END0 call");
+  EXPECT_SUB_FIND_("id");
+  EXPECT_SUB_FIND_(kAsyncIdStr);
+  EXPECT_FIND_("TRACE_EVENT_ASYNC_END1 call");
+  EXPECT_SUB_FIND_("id");
+  EXPECT_SUB_FIND_(kAsyncIdStr);
+  EXPECT_SUB_FIND_("name1");
+  EXPECT_SUB_FIND_("value1");
+  EXPECT_FIND_("TRACE_EVENT_ASYNC_END2 call");
+  EXPECT_SUB_FIND_("id");
+  EXPECT_SUB_FIND_(kAsyncIdStr);
+  EXPECT_SUB_FIND_("name1");
+  EXPECT_SUB_FIND_("value1");
+  EXPECT_SUB_FIND_("name2");
+  EXPECT_SUB_FIND_("value2");
+
+  EXPECT_FIND_("TRACE_EVENT_FLOW_BEGIN0 call");
+  EXPECT_SUB_FIND_("id");
+  EXPECT_SUB_FIND_(kFlowIdStr);
+  EXPECT_FIND_("TRACE_EVENT_FLOW_STEP0 call");
+  EXPECT_SUB_FIND_("id");
+  EXPECT_SUB_FIND_(kFlowIdStr);
+  EXPECT_SUB_FIND_("step1");
+  EXPECT_FIND_("TRACE_EVENT_FLOW_END_BIND_TO_ENCLOSING0 call");
+  EXPECT_SUB_FIND_("id");
+  EXPECT_SUB_FIND_(kFlowIdStr);
+
+  EXPECT_FIND_("TRACE_COUNTER1 call");
+  {
+    std::string ph;
+    EXPECT_TRUE((item && item->GetString("ph", &ph)));
+    EXPECT_EQ("C", ph);
+
+    int value;
+    EXPECT_TRUE((item && item->GetInteger("args.value", &value)));
+    EXPECT_EQ(31415, value);
+  }
+
+  EXPECT_FIND_("TRACE_COUNTER2 call");
+  {
+    std::string ph;
+    EXPECT_TRUE((item && item->GetString("ph", &ph)));
+    EXPECT_EQ("C", ph);
+
+    int value;
+    EXPECT_TRUE((item && item->GetInteger("args.a", &value)));
+    EXPECT_EQ(30000, value);
+
+    EXPECT_TRUE((item && item->GetInteger("args.b", &value)));
+    EXPECT_EQ(1415, value);
+  }
+
+  EXPECT_FIND_("TRACE_COUNTER_WITH_TIMESTAMP1 call");
+  {
+    std::string ph;
+    EXPECT_TRUE((item && item->GetString("ph", &ph)));
+    EXPECT_EQ("C", ph);
+
+    int value;
+    EXPECT_TRUE((item && item->GetInteger("args.value", &value)));
+    EXPECT_EQ(31415, value);
+
+    int ts;
+    EXPECT_TRUE((item && item->GetInteger("ts", &ts)));
+    EXPECT_EQ(42, ts);
+  }
+
+  EXPECT_FIND_("TRACE_COUNTER_WITH_TIMESTAMP2 call");
+  {
+    std::string ph;
+    EXPECT_TRUE((item && item->GetString("ph", &ph)));
+    EXPECT_EQ("C", ph);
+
+    int value;
+    EXPECT_TRUE((item && item->GetInteger("args.a", &value)));
+    EXPECT_EQ(30000, value);
+
+    EXPECT_TRUE((item && item->GetInteger("args.b", &value)));
+    EXPECT_EQ(1415, value);
+
+    int ts;
+    EXPECT_TRUE((item && item->GetInteger("ts", &ts)));
+    EXPECT_EQ(42, ts);
+  }
+
+  EXPECT_FIND_("TRACE_COUNTER_ID1 call");
+  {
+    std::string id;
+    EXPECT_TRUE((item && item->GetString("id", &id)));
+    EXPECT_EQ("0x319009", id);
+
+    std::string ph;
+    EXPECT_TRUE((item && item->GetString("ph", &ph)));
+    EXPECT_EQ("C", ph);
+
+    int value;
+    EXPECT_TRUE((item && item->GetInteger("args.value", &value)));
+    EXPECT_EQ(31415, value);
+  }
+
+  EXPECT_FIND_("TRACE_COUNTER_ID2 call");
+  {
+    std::string id;
+    EXPECT_TRUE((item && item->GetString("id", &id)));
+    EXPECT_EQ("0x319009", id);
+
+    std::string ph;
+    EXPECT_TRUE((item && item->GetString("ph", &ph)));
+    EXPECT_EQ("C", ph);
+
+    int value;
+    EXPECT_TRUE((item && item->GetInteger("args.a", &value)));
+    EXPECT_EQ(30000, value);
+
+    EXPECT_TRUE((item && item->GetInteger("args.b", &value)));
+    EXPECT_EQ(1415, value);
+  }
+
+  EXPECT_FIND_("TRACE_EVENT_COPY_BEGIN_WITH_ID_TID_AND_TIMESTAMP0 call");
+  {
+    int val;
+    EXPECT_TRUE((item && item->GetInteger("ts", &val)));
+    EXPECT_EQ(12345, val);
+    EXPECT_TRUE((item && item->GetInteger("tid", &val)));
+    EXPECT_EQ(kThreadId, val);
+    std::string id;
+    EXPECT_TRUE((item && item->GetString("id", &id)));
+    EXPECT_EQ(kAsyncIdStr, id);
+  }
+
+  EXPECT_FIND_("TRACE_EVENT_COPY_END_WITH_ID_TID_AND_TIMESTAMP0 call");
+  {
+    int val;
+    EXPECT_TRUE((item && item->GetInteger("ts", &val)));
+    EXPECT_EQ(23456, val);
+    EXPECT_TRUE((item && item->GetInteger("tid", &val)));
+    EXPECT_EQ(kThreadId, val);
+    std::string id;
+    EXPECT_TRUE((item && item->GetString("id", &id)));
+    EXPECT_EQ(kAsyncIdStr, id);
+  }
+
+  EXPECT_FIND_("TRACE_EVENT_BEGIN_WITH_ID_TID_AND_TIMESTAMP0 call");
+  {
+    int val;
+    EXPECT_TRUE((item && item->GetInteger("ts", &val)));
+    EXPECT_EQ(34567, val);
+    EXPECT_TRUE((item && item->GetInteger("tid", &val)));
+    EXPECT_EQ(kThreadId, val);
+    std::string id;
+    EXPECT_TRUE((item && item->GetString("id", &id)));
+    EXPECT_EQ(kAsyncId2Str, id);
+  }
+
+  EXPECT_FIND_("TRACE_EVENT_ASYNC_STEP_PAST0 call");
+  {
+    EXPECT_SUB_FIND_("id");
+    EXPECT_SUB_FIND_(kAsyncId2Str);
+    EXPECT_SUB_FIND_("step_end1");
+    EXPECT_FIND_("TRACE_EVENT_ASYNC_STEP_PAST1 call");
+    EXPECT_SUB_FIND_("id");
+    EXPECT_SUB_FIND_(kAsyncId2Str);
+    EXPECT_SUB_FIND_("step_end2");
+    EXPECT_SUB_FIND_("name1");
+    EXPECT_SUB_FIND_("value1");
+  }
+
+  EXPECT_FIND_("TRACE_EVENT_END_WITH_ID_TID_AND_TIMESTAMP0 call");
+  {
+    int val;
+    EXPECT_TRUE((item && item->GetInteger("ts", &val)));
+    EXPECT_EQ(45678, val);
+    EXPECT_TRUE((item && item->GetInteger("tid", &val)));
+    EXPECT_EQ(kThreadId, val);
+    std::string id;
+    EXPECT_TRUE((item && item->GetString("id", &id)));
+    EXPECT_EQ(kAsyncId2Str, id);
+  }
+
+  EXPECT_FIND_("tracked object 1");
+  {
+    std::string phase;
+    std::string id;
+    std::string snapshot;
+
+    EXPECT_TRUE((item && item->GetString("ph", &phase)));
+    EXPECT_EQ("N", phase);
+    EXPECT_FALSE((item && item->HasKey("scope")));
+    EXPECT_TRUE((item && item->GetString("id", &id)));
+    EXPECT_EQ("0x42", id);
+
+    item = FindTraceEntry(trace_parsed, "tracked object 1", item);
+    EXPECT_TRUE(item);
+    EXPECT_TRUE(item && item->GetString("ph", &phase));
+    EXPECT_EQ("O", phase);
+    EXPECT_FALSE((item && item->HasKey("scope")));
+    EXPECT_TRUE(item && item->GetString("id", &id));
+    EXPECT_EQ("0x42", id);
+    EXPECT_TRUE(item && item->GetString("args.snapshot", &snapshot));
+    EXPECT_EQ("hello", snapshot);
+
+    item = FindTraceEntry(trace_parsed, "tracked object 1", item);
+    EXPECT_TRUE(item);
+    EXPECT_TRUE(item && item->GetString("ph", &phase));
+    EXPECT_EQ("D", phase);
+    EXPECT_FALSE((item && item->HasKey("scope")));
+    EXPECT_TRUE(item && item->GetString("id", &id));
+    EXPECT_EQ("0x42", id);
+  }
+
+  EXPECT_FIND_("tracked object 2");
+  {
+    std::string phase;
+    std::string id;
+    std::string snapshot;
+
+    EXPECT_TRUE(item && item->GetString("ph", &phase));
+    EXPECT_EQ("N", phase);
+    EXPECT_TRUE(item && item->GetString("id", &id));
+    EXPECT_EQ("0x2128506", id);
+
+    item = FindTraceEntry(trace_parsed, "tracked object 2", item);
+    EXPECT_TRUE(item);
+    EXPECT_TRUE(item && item->GetString("ph", &phase));
+    EXPECT_EQ("O", phase);
+    EXPECT_TRUE(item && item->GetString("id", &id));
+    EXPECT_EQ("0x2128506", id);
+    EXPECT_TRUE(item && item->GetString("args.snapshot", &snapshot));
+    EXPECT_EQ("world", snapshot);
+
+    item = FindTraceEntry(trace_parsed, "tracked object 2", item);
+    EXPECT_TRUE(item);
+    EXPECT_TRUE(item && item->GetString("ph", &phase));
+    EXPECT_EQ("D", phase);
+    EXPECT_TRUE(item && item->GetString("id", &id));
+    EXPECT_EQ("0x2128506", id);
+  }
+
+  EXPECT_FIND_("tracked object 3");
+  {
+    std::string phase;
+    std::string scope;
+    std::string id;
+    std::string snapshot;
+
+    EXPECT_TRUE((item && item->GetString("ph", &phase)));
+    EXPECT_EQ("N", phase);
+    EXPECT_TRUE((item && item->GetString("scope", &scope)));
+    EXPECT_EQ("scope", scope);
+    EXPECT_TRUE((item && item->GetString("id", &id)));
+    EXPECT_EQ("0x42", id);
+
+    item = FindTraceEntry(trace_parsed, "tracked object 3", item);
+    EXPECT_TRUE(item);
+    EXPECT_TRUE(item && item->GetString("ph", &phase));
+    EXPECT_EQ("O", phase);
+    EXPECT_TRUE((item && item->GetString("scope", &scope)));
+    EXPECT_EQ("scope", scope);
+    EXPECT_TRUE(item && item->GetString("id", &id));
+    EXPECT_EQ("0x42", id);
+    EXPECT_TRUE(item && item->GetString("args.snapshot", &snapshot));
+    EXPECT_EQ("hello", snapshot);
+
+    item = FindTraceEntry(trace_parsed, "tracked object 3", item);
+    EXPECT_TRUE(item);
+    EXPECT_TRUE(item && item->GetString("ph", &phase));
+    EXPECT_EQ("D", phase);
+    EXPECT_TRUE((item && item->GetString("scope", &scope)));
+    EXPECT_EQ("scope", scope);
+    EXPECT_TRUE(item && item->GetString("id", &id));
+    EXPECT_EQ("0x42", id);
+  }
+
+  EXPECT_FIND_(kControlCharacters);
+  EXPECT_SUB_FIND_(kControlCharacters);
+
+  EXPECT_FIND_("TRACE_EVENT_ENTER_CONTEXT call");
+  {
+    std::string ph;
+    EXPECT_TRUE((item && item->GetString("ph", &ph)));
+    EXPECT_EQ("(", ph);
+
+    std::string scope;
+    std::string id;
+    EXPECT_TRUE((item && item->GetString("scope", &scope)));
+    EXPECT_EQ("scope", scope);
+    EXPECT_TRUE((item && item->GetString("id", &id)));
+    EXPECT_EQ("0x20151021", id);
+  }
+
+  EXPECT_FIND_("TRACE_EVENT_LEAVE_CONTEXT call");
+  {
+    std::string ph;
+    EXPECT_TRUE((item && item->GetString("ph", &ph)));
+    EXPECT_EQ(")", ph);
+
+    std::string scope;
+    std::string id;
+    EXPECT_TRUE((item && item->GetString("scope", &scope)));
+    EXPECT_EQ("scope", scope);
+    EXPECT_TRUE((item && item->GetString("id", &id)));
+    EXPECT_EQ("0x20151021", id);
+  }
+
+  std::vector<const DictionaryValue*> scoped_context_calls =
+      FindTraceEntries(trace_parsed, "TRACE_EVENT_SCOPED_CONTEXT call");
+  EXPECT_EQ(2u, scoped_context_calls.size());
+  {
+    item = scoped_context_calls[0];
+    std::string ph;
+    EXPECT_TRUE((item && item->GetString("ph", &ph)));
+    EXPECT_EQ("(", ph);
+
+    std::string id;
+    EXPECT_FALSE((item && item->HasKey("scope")));
+    EXPECT_TRUE((item && item->GetString("id", &id)));
+    EXPECT_EQ("0x20151021", id);
+  }
+
+  {
+    item = scoped_context_calls[1];
+    std::string ph;
+    EXPECT_TRUE((item && item->GetString("ph", &ph)));
+    EXPECT_EQ(")", ph);
+
+    std::string id;
+    EXPECT_FALSE((item && item->HasKey("scope")));
+    EXPECT_TRUE((item && item->GetString("id", &id)));
+    EXPECT_EQ("0x20151021", id);
+  }
+
+  EXPECT_FIND_("TRACE_LINK_IDS simple call");
+  {
+    std::string ph;
+    EXPECT_TRUE((item && item->GetString("ph", &ph)));
+    EXPECT_EQ("=", ph);
+
+    EXPECT_FALSE((item && item->HasKey("scope")));
+    std::string id1;
+    EXPECT_TRUE((item && item->GetString("id", &id1)));
+    EXPECT_EQ("0x1000", id1);
+
+    EXPECT_FALSE((item && item->HasKey("args.linked_id.scope")));
+    std::string id2;
+    EXPECT_TRUE((item && item->GetString("args.linked_id.id", &id2)));
+    EXPECT_EQ("0x2000", id2);
+  }
+
+  EXPECT_FIND_("TRACE_LINK_IDS scoped call");
+  {
+    std::string ph;
+    EXPECT_TRUE((item && item->GetString("ph", &ph)));
+    EXPECT_EQ("=", ph);
+
+    std::string scope1;
+    EXPECT_TRUE((item && item->GetString("scope", &scope1)));
+    EXPECT_EQ("scope 1", scope1);
+    std::string id1;
+    EXPECT_TRUE((item && item->GetString("id", &id1)));
+    EXPECT_EQ("0x1000", id1);
+
+    std::string scope2;
+    EXPECT_TRUE((item && item->GetString("args.linked_id.scope", &scope2)));
+    EXPECT_EQ("scope 2", scope2);
+    std::string id2;
+    EXPECT_TRUE((item && item->GetString("args.linked_id.id", &id2)));
+    EXPECT_EQ("0x2000", id2);
+  }
+
+  EXPECT_FIND_("TRACE_LINK_IDS to a local ID");
+  {
+    std::string ph;
+    EXPECT_TRUE((item && item->GetString("ph", &ph)));
+    EXPECT_EQ("=", ph);
+
+    EXPECT_FALSE((item && item->HasKey("scope")));
+    std::string id1;
+    EXPECT_TRUE((item && item->GetString("id", &id1)));
+    EXPECT_EQ("0x1000", id1);
+
+    EXPECT_FALSE((item && item->HasKey("args.linked_id.scope")));
+    std::string id2;
+    EXPECT_TRUE((item && item->GetString("args.linked_id.id2.local", &id2)));
+    EXPECT_EQ("0x2000", id2);
+  }
+
+  EXPECT_FIND_("TRACE_LINK_IDS to a global ID");
+  {
+    std::string ph;
+    EXPECT_TRUE((item && item->GetString("ph", &ph)));
+    EXPECT_EQ("=", ph);
+
+    EXPECT_FALSE((item && item->HasKey("scope")));
+    std::string id1;
+    EXPECT_TRUE((item && item->GetString("id", &id1)));
+    EXPECT_EQ("0x1000", id1);
+
+    EXPECT_FALSE((item && item->HasKey("args.linked_id.scope")));
+    std::string id2;
+    EXPECT_TRUE((item && item->GetString("args.linked_id.id2.global", &id2)));
+    EXPECT_EQ("0x2000", id2);
+  }
+
+  EXPECT_FIND_("TRACE_LINK_IDS to a composite ID");
+  {
+    std::string ph;
+    EXPECT_TRUE((item && item->GetString("ph", &ph)));
+    EXPECT_EQ("=", ph);
+
+    EXPECT_FALSE(item->HasKey("scope"));
+    std::string id1;
+    EXPECT_TRUE(item->GetString("id", &id1));
+    EXPECT_EQ("0x1000", id1);
+
+    std::string scope;
+    EXPECT_TRUE(item->GetString("args.linked_id.scope", &scope));
+    EXPECT_EQ("scope 1", scope);
+    std::string id2;
+    EXPECT_TRUE(item->GetString("args.linked_id.id", &id2));
+    EXPECT_EQ(id2, "0x2000/0x3000");
+  }
+
+  EXPECT_FIND_("async default process scope");
+  {
+    std::string ph;
+    EXPECT_TRUE((item && item->GetString("ph", &ph)));
+    EXPECT_EQ("S", ph);
+
+    std::string id;
+    EXPECT_TRUE((item && item->GetString("id", &id)));
+    EXPECT_EQ("0x1000", id);
+  }
+
+  EXPECT_FIND_("async local id");
+  {
+    std::string ph;
+    EXPECT_TRUE((item && item->GetString("ph", &ph)));
+    EXPECT_EQ("S", ph);
+
+    std::string id;
+    EXPECT_TRUE((item && item->GetString("id2.local", &id)));
+    EXPECT_EQ("0x2000", id);
+  }
+
+  EXPECT_FIND_("async global id");
+  {
+    std::string ph;
+    EXPECT_TRUE((item && item->GetString("ph", &ph)));
+    EXPECT_EQ("S", ph);
+
+    std::string id;
+    EXPECT_TRUE((item && item->GetString("id2.global", &id)));
+    EXPECT_EQ("0x3000", id);
+  }
+
+  EXPECT_FIND_("async global id with scope string");
+  {
+    std::string ph;
+    EXPECT_TRUE((item && item->GetString("ph", &ph)));
+    EXPECT_EQ("S", ph);
+
+    std::string id;
+    EXPECT_TRUE((item && item->GetString("id2.global", &id)));
+    EXPECT_EQ("0x4000", id);
+    std::string scope;
+    EXPECT_TRUE((item && item->GetString("scope", &scope)));
+    EXPECT_EQ("scope string", scope);
+  }
+}
+
+void TraceManyInstantEvents(int thread_id, int num_events,
+                            WaitableEvent* task_complete_event) {
+  for (int i = 0; i < num_events; i++) {
+    TRACE_EVENT_INSTANT2("all", "multi thread event",
+                         TRACE_EVENT_SCOPE_THREAD,
+                         "thread", thread_id,
+                         "event", i);
+  }
+
+  if (task_complete_event)
+    task_complete_event->Signal();
+}
+
+void ValidateInstantEventPresentOnEveryThread(const ListValue& trace_parsed,
+                                              int num_threads,
+                                              int num_events) {
+  std::map<int, std::map<int, bool> > results;
+
+  size_t trace_parsed_count = trace_parsed.GetSize();
+  for (size_t i = 0; i < trace_parsed_count; i++) {
+    const Value* value = nullptr;
+    trace_parsed.Get(i, &value);
+    if (!value || value->type() != Value::Type::DICTIONARY)
+      continue;
+    const DictionaryValue* dict = static_cast<const DictionaryValue*>(value);
+    std::string name;
+    dict->GetString("name", &name);
+    if (name != "multi thread event")
+      continue;
+
+    int thread = 0;
+    int event = 0;
+    EXPECT_TRUE(dict->GetInteger("args.thread", &thread));
+    EXPECT_TRUE(dict->GetInteger("args.event", &event));
+    results[thread][event] = true;
+  }
+
+  EXPECT_FALSE(results[-1][-1]);
+  for (int thread = 0; thread < num_threads; thread++) {
+    for (int event = 0; event < num_events; event++) {
+      EXPECT_TRUE(results[thread][event]);
+    }
+  }
+}
+
+void CheckTraceDefaultCategoryFilters(const TraceLog& trace_log) {
+  // Default enables all category filters except the disabled-by-default-* ones.
+  EXPECT_TRUE(*trace_log.GetCategoryGroupEnabled("foo"));
+  EXPECT_TRUE(*trace_log.GetCategoryGroupEnabled("bar"));
+  EXPECT_TRUE(*trace_log.GetCategoryGroupEnabled("foo,bar"));
+  EXPECT_TRUE(*trace_log.GetCategoryGroupEnabled(
+        "foo,disabled-by-default-foo"));
+  EXPECT_FALSE(*trace_log.GetCategoryGroupEnabled(
+        "disabled-by-default-foo,disabled-by-default-bar"));
+}
+
+}  // namespace
+
+// Simple Test for emitting data and validating it was received.
+TEST_F(TraceEventTestFixture, DataCaptured) {
+  TraceLog::GetInstance()->SetEnabled(TraceConfig(kRecordAllCategoryFilter, ""),
+                                      TraceLog::RECORDING_MODE);
+
+  TraceWithAllMacroVariants(nullptr);
+
+  EndTraceAndFlush();
+
+  ValidateAllTraceMacrosCreatedData(trace_parsed_);
+}
+
+// Emit some events and validate that only empty strings are received
+// if we tell Flush() to discard events.
+TEST_F(TraceEventTestFixture, DataDiscarded) {
+  TraceLog::GetInstance()->SetEnabled(TraceConfig(kRecordAllCategoryFilter, ""),
+                                      TraceLog::RECORDING_MODE);
+
+  TraceWithAllMacroVariants(nullptr);
+
+  CancelTrace();
+
+  EXPECT_TRUE(trace_parsed_.empty());
+}
+
+class MockEnabledStateChangedObserver :
+      public TraceLog::EnabledStateObserver {
+ public:
+  MOCK_METHOD0(OnTraceLogEnabled, void());
+  MOCK_METHOD0(OnTraceLogDisabled, void());
+};
+
+TEST_F(TraceEventTestFixture, EnabledObserverFiresOnEnable) {
+  MockEnabledStateChangedObserver observer;
+  TraceLog::GetInstance()->AddEnabledStateObserver(&observer);
+
+  EXPECT_CALL(observer, OnTraceLogEnabled())
+      .Times(1);
+  TraceLog::GetInstance()->SetEnabled(TraceConfig(kRecordAllCategoryFilter, ""),
+                                      TraceLog::RECORDING_MODE);
+  testing::Mock::VerifyAndClear(&observer);
+  EXPECT_TRUE(TraceLog::GetInstance()->IsEnabled());
+
+  // Cleanup.
+  TraceLog::GetInstance()->RemoveEnabledStateObserver(&observer);
+  TraceLog::GetInstance()->SetDisabled();
+}
+
+TEST_F(TraceEventTestFixture, EnabledObserverDoesntFireOnSecondEnable) {
+  TraceLog::GetInstance()->SetEnabled(TraceConfig(kRecordAllCategoryFilter, ""),
+                                      TraceLog::RECORDING_MODE);
+
+  testing::StrictMock<MockEnabledStateChangedObserver> observer;
+  TraceLog::GetInstance()->AddEnabledStateObserver(&observer);
+
+  EXPECT_CALL(observer, OnTraceLogEnabled())
+      .Times(0);
+  EXPECT_CALL(observer, OnTraceLogDisabled())
+      .Times(0);
+  TraceLog::GetInstance()->SetEnabled(TraceConfig(kRecordAllCategoryFilter, ""),
+                                      TraceLog::RECORDING_MODE);
+  testing::Mock::VerifyAndClear(&observer);
+  EXPECT_TRUE(TraceLog::GetInstance()->IsEnabled());
+
+  // Cleanup.
+  TraceLog::GetInstance()->RemoveEnabledStateObserver(&observer);
+  TraceLog::GetInstance()->SetDisabled();
+  TraceLog::GetInstance()->SetDisabled();
+}
+
+TEST_F(TraceEventTestFixture, EnabledObserverFiresOnFirstDisable) {
+  TraceConfig tc_inc_all("*", "");
+  TraceLog::GetInstance()->SetEnabled(tc_inc_all, TraceLog::RECORDING_MODE);
+  TraceLog::GetInstance()->SetEnabled(tc_inc_all, TraceLog::RECORDING_MODE);
+
+  testing::StrictMock<MockEnabledStateChangedObserver> observer;
+  TraceLog::GetInstance()->AddEnabledStateObserver(&observer);
+
+  EXPECT_CALL(observer, OnTraceLogEnabled())
+      .Times(0);
+  EXPECT_CALL(observer, OnTraceLogDisabled())
+      .Times(1);
+  TraceLog::GetInstance()->SetDisabled();
+  testing::Mock::VerifyAndClear(&observer);
+
+  // Cleanup.
+  TraceLog::GetInstance()->RemoveEnabledStateObserver(&observer);
+  TraceLog::GetInstance()->SetDisabled();
+}
+
+TEST_F(TraceEventTestFixture, EnabledObserverFiresOnDisable) {
+  TraceLog::GetInstance()->SetEnabled(TraceConfig(kRecordAllCategoryFilter, ""),
+                                      TraceLog::RECORDING_MODE);
+
+  MockEnabledStateChangedObserver observer;
+  TraceLog::GetInstance()->AddEnabledStateObserver(&observer);
+
+  EXPECT_CALL(observer, OnTraceLogDisabled())
+      .Times(1);
+  TraceLog::GetInstance()->SetDisabled();
+  testing::Mock::VerifyAndClear(&observer);
+
+  // Cleanup.
+  TraceLog::GetInstance()->RemoveEnabledStateObserver(&observer);
+}
+
+// Tests the IsEnabled() state of TraceLog changes before callbacks.
+class AfterStateChangeEnabledStateObserver
+    : public TraceLog::EnabledStateObserver {
+ public:
+  AfterStateChangeEnabledStateObserver() = default;
+  ~AfterStateChangeEnabledStateObserver() override = default;
+
+  // TraceLog::EnabledStateObserver overrides:
+  void OnTraceLogEnabled() override {
+    EXPECT_TRUE(TraceLog::GetInstance()->IsEnabled());
+  }
+
+  void OnTraceLogDisabled() override {
+    EXPECT_FALSE(TraceLog::GetInstance()->IsEnabled());
+  }
+};
+
+TEST_F(TraceEventTestFixture, ObserversFireAfterStateChange) {
+  AfterStateChangeEnabledStateObserver observer;
+  TraceLog::GetInstance()->AddEnabledStateObserver(&observer);
+
+  TraceLog::GetInstance()->SetEnabled(TraceConfig(kRecordAllCategoryFilter, ""),
+                                      TraceLog::RECORDING_MODE);
+  EXPECT_TRUE(TraceLog::GetInstance()->IsEnabled());
+
+  TraceLog::GetInstance()->SetDisabled();
+  EXPECT_FALSE(TraceLog::GetInstance()->IsEnabled());
+
+  TraceLog::GetInstance()->RemoveEnabledStateObserver(&observer);
+}
+
+// Tests that a state observer can remove itself during a callback.
+class SelfRemovingEnabledStateObserver
+    : public TraceLog::EnabledStateObserver {
+ public:
+  SelfRemovingEnabledStateObserver() = default;
+  ~SelfRemovingEnabledStateObserver() override = default;
+
+  // TraceLog::EnabledStateObserver overrides:
+  void OnTraceLogEnabled() override {}
+
+  void OnTraceLogDisabled() override {
+    TraceLog::GetInstance()->RemoveEnabledStateObserver(this);
+  }
+};
+
+TEST_F(TraceEventTestFixture, SelfRemovingObserver) {
+  ASSERT_EQ(0u, TraceLog::GetInstance()->GetObserverCountForTest());
+
+  SelfRemovingEnabledStateObserver observer;
+  TraceLog::GetInstance()->AddEnabledStateObserver(&observer);
+  EXPECT_EQ(1u, TraceLog::GetInstance()->GetObserverCountForTest());
+
+  TraceLog::GetInstance()->SetEnabled(TraceConfig(kRecordAllCategoryFilter, ""),
+                                      TraceLog::RECORDING_MODE);
+  TraceLog::GetInstance()->SetDisabled();
+  // The observer removed itself on disable.
+  EXPECT_EQ(0u, TraceLog::GetInstance()->GetObserverCountForTest());
+}
+
+bool IsNewTrace() {
+  bool is_new_trace;
+  TRACE_EVENT_IS_NEW_TRACE(&is_new_trace);
+  return is_new_trace;
+}
+
+TEST_F(TraceEventTestFixture, NewTraceRecording) {
+  ASSERT_FALSE(IsNewTrace());
+  TraceLog::GetInstance()->SetEnabled(TraceConfig(kRecordAllCategoryFilter, ""),
+                                      TraceLog::RECORDING_MODE);
+  // First call to IsNewTrace() should succeed. But, the second shouldn't.
+  ASSERT_TRUE(IsNewTrace());
+  ASSERT_FALSE(IsNewTrace());
+  EndTraceAndFlush();
+
+  // IsNewTrace() should definitely be false now.
+  ASSERT_FALSE(IsNewTrace());
+
+  // Start another trace. IsNewTrace() should become true again, briefly, as
+  // before.
+  TraceLog::GetInstance()->SetEnabled(TraceConfig(kRecordAllCategoryFilter, ""),
+                                      TraceLog::RECORDING_MODE);
+  ASSERT_TRUE(IsNewTrace());
+  ASSERT_FALSE(IsNewTrace());
+
+  // Cleanup.
+  EndTraceAndFlush();
+}
+
+TEST_F(TraceEventTestFixture, TestTraceFlush) {
+  size_t min_traces = 1;
+  size_t max_traces = 1;
+  do {
+    max_traces *= 2;
+    TraceLog::GetInstance()->SetEnabled(TraceConfig(),
+                                        TraceLog::RECORDING_MODE);
+    for (size_t i = 0; i < max_traces; i++) {
+      TRACE_EVENT_INSTANT0("x", "y", TRACE_EVENT_SCOPE_THREAD);
+    }
+    EndTraceAndFlush();
+  } while (num_flush_callbacks_ < 2);
+
+  while (min_traces + 50 <  max_traces) {
+    size_t traces = (min_traces + max_traces) / 2;
+    TraceLog::GetInstance()->SetEnabled(TraceConfig(),
+                                        TraceLog::RECORDING_MODE);
+    for (size_t i = 0; i < traces; i++) {
+      TRACE_EVENT_INSTANT0("x", "y", TRACE_EVENT_SCOPE_THREAD);
+    }
+    EndTraceAndFlush();
+    if (num_flush_callbacks_ < 2) {
+      min_traces = traces - 10;
+    } else {
+      max_traces = traces + 10;
+    }
+  }
+
+  for (size_t traces = min_traces; traces < max_traces; traces++) {
+    TraceLog::GetInstance()->SetEnabled(TraceConfig(),
+                                        TraceLog::RECORDING_MODE);
+    for (size_t i = 0; i < traces; i++) {
+      TRACE_EVENT_INSTANT0("x", "y", TRACE_EVENT_SCOPE_THREAD);
+    }
+    EndTraceAndFlush();
+  }
+}
+
+TEST_F(TraceEventTestFixture, AddMetadataEvent) {
+  int num_calls = 0;
+
+  class Convertable : public ConvertableToTraceFormat {
+   public:
+    explicit Convertable(int* num_calls) : num_calls_(num_calls) {}
+    ~Convertable() override = default;
+    void AppendAsTraceFormat(std::string* out) const override {
+      (*num_calls_)++;
+      out->append("\"metadata_value\"");
+    }
+
+   private:
+    int* num_calls_;
+  };
+
+  std::unique_ptr<ConvertableToTraceFormat> conv1(new Convertable(&num_calls));
+  std::unique_ptr<Convertable> conv2(new Convertable(&num_calls));
+
+  BeginTrace();
+  TRACE_EVENT_API_ADD_METADATA_EVENT(
+      TraceLog::GetCategoryGroupEnabled("__metadata"), "metadata_event_1",
+      "metadata_arg_name", std::move(conv1));
+  TRACE_EVENT_API_ADD_METADATA_EVENT(
+      TraceLog::GetCategoryGroupEnabled("__metadata"), "metadata_event_2",
+      "metadata_arg_name", std::move(conv2));
+  // |AppendAsTraceFormat| should only be called on flush, not when the event
+  // is added.
+  ASSERT_EQ(0, num_calls);
+  EndTraceAndFlush();
+  ASSERT_EQ(2, num_calls);
+  EXPECT_TRUE(FindNamePhaseKeyValue("metadata_event_1", "M",
+                                    "metadata_arg_name", "metadata_value"));
+  EXPECT_TRUE(FindNamePhaseKeyValue("metadata_event_2", "M",
+                                    "metadata_arg_name", "metadata_value"));
+
+  // The metadata event should only be adde to the current trace. In this new
+  // trace, the event should not appear.
+  BeginTrace();
+  EndTraceAndFlush();
+  ASSERT_EQ(2, num_calls);
+}
+
+// Test that categories work.
+TEST_F(TraceEventTestFixture, Categories) {
+  // Test that categories that are used can be retrieved whether trace was
+  // enabled or disabled when the trace event was encountered.
+  TRACE_EVENT_INSTANT0("c1", "name", TRACE_EVENT_SCOPE_THREAD);
+  TRACE_EVENT_INSTANT0("c2", "name", TRACE_EVENT_SCOPE_THREAD);
+  BeginTrace();
+  TRACE_EVENT_INSTANT0("c3", "name", TRACE_EVENT_SCOPE_THREAD);
+  TRACE_EVENT_INSTANT0("c4", "name", TRACE_EVENT_SCOPE_THREAD);
+  // Category groups containing more than one category.
+  TRACE_EVENT_INSTANT0("c5,c6", "name", TRACE_EVENT_SCOPE_THREAD);
+  TRACE_EVENT_INSTANT0("c7,c8", "name", TRACE_EVENT_SCOPE_THREAD);
+  TRACE_EVENT_INSTANT0(TRACE_DISABLED_BY_DEFAULT("c9"), "name",
+                       TRACE_EVENT_SCOPE_THREAD);
+
+  EndTraceAndFlush();
+  std::vector<std::string> cat_groups;
+  TraceLog::GetInstance()->GetKnownCategoryGroups(&cat_groups);
+  EXPECT_TRUE(ContainsValue(cat_groups, "c1"));
+  EXPECT_TRUE(ContainsValue(cat_groups, "c2"));
+  EXPECT_TRUE(ContainsValue(cat_groups, "c3"));
+  EXPECT_TRUE(ContainsValue(cat_groups, "c4"));
+  EXPECT_TRUE(ContainsValue(cat_groups, "c5,c6"));
+  EXPECT_TRUE(ContainsValue(cat_groups, "c7,c8"));
+  EXPECT_TRUE(ContainsValue(cat_groups, "disabled-by-default-c9"));
+  // Make sure metadata isn't returned.
+  EXPECT_FALSE(ContainsValue(cat_groups, "__metadata"));
+
+  const std::vector<std::string> empty_categories;
+  std::vector<std::string> included_categories;
+  std::vector<std::string> excluded_categories;
+
+  // Test that category filtering works.
+
+  // Include nonexistent category -> no events
+  Clear();
+  included_categories.clear();
+  TraceLog::GetInstance()->SetEnabled(TraceConfig("not_found823564786", ""),
+                                      TraceLog::RECORDING_MODE);
+  TRACE_EVENT_INSTANT0("cat1", "name", TRACE_EVENT_SCOPE_THREAD);
+  TRACE_EVENT_INSTANT0("cat2", "name", TRACE_EVENT_SCOPE_THREAD);
+  EndTraceAndFlush();
+  DropTracedMetadataRecords();
+  EXPECT_TRUE(trace_parsed_.empty());
+
+  // Include existent category -> only events of that category
+  Clear();
+  included_categories.clear();
+  TraceLog::GetInstance()->SetEnabled(TraceConfig("inc", ""),
+                                      TraceLog::RECORDING_MODE);
+  TRACE_EVENT_INSTANT0("inc", "name", TRACE_EVENT_SCOPE_THREAD);
+  TRACE_EVENT_INSTANT0("inc2", "name", TRACE_EVENT_SCOPE_THREAD);
+  EndTraceAndFlush();
+  DropTracedMetadataRecords();
+  EXPECT_TRUE(FindMatchingValue("cat", "inc"));
+  EXPECT_FALSE(FindNonMatchingValue("cat", "inc"));
+
+  // Include existent wildcard -> all categories matching wildcard
+  Clear();
+  included_categories.clear();
+  TraceLog::GetInstance()->SetEnabled(
+      TraceConfig("inc_wildcard_*,inc_wildchar_?_end", ""),
+      TraceLog::RECORDING_MODE);
+  TRACE_EVENT_INSTANT0("inc_wildcard_abc", "included",
+      TRACE_EVENT_SCOPE_THREAD);
+  TRACE_EVENT_INSTANT0("inc_wildcard_", "included", TRACE_EVENT_SCOPE_THREAD);
+  TRACE_EVENT_INSTANT0("inc_wildchar_x_end", "included",
+      TRACE_EVENT_SCOPE_THREAD);
+  TRACE_EVENT_INSTANT0("inc_wildchar_bla_end", "not_inc",
+      TRACE_EVENT_SCOPE_THREAD);
+  TRACE_EVENT_INSTANT0("cat1", "not_inc", TRACE_EVENT_SCOPE_THREAD);
+  TRACE_EVENT_INSTANT0("cat2", "not_inc", TRACE_EVENT_SCOPE_THREAD);
+  TRACE_EVENT_INSTANT0("inc_wildcard_category,other_category", "included",
+      TRACE_EVENT_SCOPE_THREAD);
+  TRACE_EVENT_INSTANT0(
+      "non_included_category,inc_wildcard_category", "included",
+      TRACE_EVENT_SCOPE_THREAD);
+  EndTraceAndFlush();
+  EXPECT_TRUE(FindMatchingValue("cat", "inc_wildcard_abc"));
+  EXPECT_TRUE(FindMatchingValue("cat", "inc_wildcard_"));
+  EXPECT_TRUE(FindMatchingValue("cat", "inc_wildchar_x_end"));
+  EXPECT_FALSE(FindMatchingValue("name", "not_inc"));
+  EXPECT_TRUE(FindMatchingValue("cat", "inc_wildcard_category,other_category"));
+  EXPECT_TRUE(FindMatchingValue("cat",
+                                "non_included_category,inc_wildcard_category"));
+
+  included_categories.clear();
+
+  // Exclude nonexistent category -> all events
+  Clear();
+  TraceLog::GetInstance()->SetEnabled(TraceConfig("-not_found823564786", ""),
+                                      TraceLog::RECORDING_MODE);
+  TRACE_EVENT_INSTANT0("cat1", "name", TRACE_EVENT_SCOPE_THREAD);
+  TRACE_EVENT_INSTANT0("cat2", "name", TRACE_EVENT_SCOPE_THREAD);
+  TRACE_EVENT_INSTANT0("category1,category2", "name", TRACE_EVENT_SCOPE_THREAD);
+  EndTraceAndFlush();
+  EXPECT_TRUE(FindMatchingValue("cat", "cat1"));
+  EXPECT_TRUE(FindMatchingValue("cat", "cat2"));
+  EXPECT_TRUE(FindMatchingValue("cat", "category1,category2"));
+
+  // Exclude existent category -> only events of other categories
+  Clear();
+  TraceLog::GetInstance()->SetEnabled(TraceConfig("-inc", ""),
+                                      TraceLog::RECORDING_MODE);
+  TRACE_EVENT_INSTANT0("inc", "name", TRACE_EVENT_SCOPE_THREAD);
+  TRACE_EVENT_INSTANT0("inc2", "name", TRACE_EVENT_SCOPE_THREAD);
+  TRACE_EVENT_INSTANT0("inc2,inc", "name", TRACE_EVENT_SCOPE_THREAD);
+  TRACE_EVENT_INSTANT0("inc,inc2", "name", TRACE_EVENT_SCOPE_THREAD);
+  EndTraceAndFlush();
+  EXPECT_TRUE(FindMatchingValue("cat", "inc2"));
+  EXPECT_FALSE(FindMatchingValue("cat", "inc"));
+  EXPECT_TRUE(FindMatchingValue("cat", "inc2,inc"));
+  EXPECT_TRUE(FindMatchingValue("cat", "inc,inc2"));
+
+  // Exclude existent wildcard -> all categories not matching wildcard
+  Clear();
+  TraceLog::GetInstance()->SetEnabled(
+      TraceConfig("-inc_wildcard_*,-inc_wildchar_?_end", ""),
+      TraceLog::RECORDING_MODE);
+  TRACE_EVENT_INSTANT0("inc_wildcard_abc", "not_inc",
+      TRACE_EVENT_SCOPE_THREAD);
+  TRACE_EVENT_INSTANT0("inc_wildcard_", "not_inc",
+      TRACE_EVENT_SCOPE_THREAD);
+  TRACE_EVENT_INSTANT0("inc_wildchar_x_end", "not_inc",
+      TRACE_EVENT_SCOPE_THREAD);
+  TRACE_EVENT_INSTANT0("inc_wildchar_bla_end", "included",
+      TRACE_EVENT_SCOPE_THREAD);
+  TRACE_EVENT_INSTANT0("cat1", "included", TRACE_EVENT_SCOPE_THREAD);
+  TRACE_EVENT_INSTANT0("cat2", "included", TRACE_EVENT_SCOPE_THREAD);
+  EndTraceAndFlush();
+  EXPECT_TRUE(FindMatchingValue("cat", "inc_wildchar_bla_end"));
+  EXPECT_TRUE(FindMatchingValue("cat", "cat1"));
+  EXPECT_TRUE(FindMatchingValue("cat", "cat2"));
+  EXPECT_FALSE(FindMatchingValue("name", "not_inc"));
+}
+
+
+// Test ASYNC_BEGIN/END events
+TEST_F(TraceEventTestFixture, AsyncBeginEndEvents) {
+  BeginTrace();
+
+  unsigned long long id = 0xfeedbeeffeedbeefull;
+  TRACE_EVENT_ASYNC_BEGIN0("cat", "name1", id);
+  TRACE_EVENT_ASYNC_STEP_INTO0("cat", "name1", id, "step1");
+  TRACE_EVENT_ASYNC_END0("cat", "name1", id);
+  TRACE_EVENT_BEGIN0("cat", "name2");
+  TRACE_EVENT_ASYNC_BEGIN0("cat", "name3", 0);
+  TRACE_EVENT_ASYNC_STEP_PAST0("cat", "name3", 0, "step2");
+
+  EndTraceAndFlush();
+
+  EXPECT_TRUE(FindNamePhase("name1", "S"));
+  EXPECT_TRUE(FindNamePhase("name1", "T"));
+  EXPECT_TRUE(FindNamePhase("name1", "F"));
+
+  std::string id_str;
+  StringAppendF(&id_str, "0x%llx", id);
+
+  EXPECT_TRUE(FindNamePhaseKeyValue("name1", "S", "id", id_str.c_str()));
+  EXPECT_TRUE(FindNamePhaseKeyValue("name1", "T", "id", id_str.c_str()));
+  EXPECT_TRUE(FindNamePhaseKeyValue("name1", "F", "id", id_str.c_str()));
+  EXPECT_TRUE(FindNamePhaseKeyValue("name3", "S", "id", "0x0"));
+  EXPECT_TRUE(FindNamePhaseKeyValue("name3", "p", "id", "0x0"));
+
+  // BEGIN events should not have id
+  EXPECT_FALSE(FindNamePhaseKeyValue("name2", "B", "id", "0"));
+}
+
+// Test ASYNC_BEGIN/END events
+TEST_F(TraceEventTestFixture, AsyncBeginEndPointerMangling) {
+  void* ptr = this;
+
+  TraceLog::GetInstance()->SetProcessID(100);
+  BeginTrace();
+  TRACE_EVENT_ASYNC_BEGIN0("cat", "name1", ptr);
+  TRACE_EVENT_ASYNC_BEGIN0("cat", "name2", ptr);
+  EndTraceAndFlush();
+
+  TraceLog::GetInstance()->SetProcessID(200);
+  BeginTrace();
+  TRACE_EVENT_ASYNC_END0("cat", "name1", ptr);
+  EndTraceAndFlush();
+
+  DictionaryValue* async_begin = FindNamePhase("name1", "S");
+  DictionaryValue* async_begin2 = FindNamePhase("name2", "S");
+  DictionaryValue* async_end = FindNamePhase("name1", "F");
+  EXPECT_TRUE(async_begin);
+  EXPECT_TRUE(async_begin2);
+  EXPECT_TRUE(async_end);
+
+  Value* value = nullptr;
+  std::string async_begin_id_str;
+  std::string async_begin2_id_str;
+  std::string async_end_id_str;
+  ASSERT_TRUE(async_begin->Get("id", &value));
+  ASSERT_TRUE(value->GetAsString(&async_begin_id_str));
+  ASSERT_TRUE(async_begin2->Get("id", &value));
+  ASSERT_TRUE(value->GetAsString(&async_begin2_id_str));
+  ASSERT_TRUE(async_end->Get("id", &value));
+  ASSERT_TRUE(value->GetAsString(&async_end_id_str));
+
+  EXPECT_STREQ(async_begin_id_str.c_str(), async_begin2_id_str.c_str());
+  EXPECT_STRNE(async_begin_id_str.c_str(), async_end_id_str.c_str());
+}
+
+// Test that static strings are not copied.
+TEST_F(TraceEventTestFixture, StaticStringVsString) {
+  TraceLog* tracer = TraceLog::GetInstance();
+  // Make sure old events are flushed:
+  EXPECT_EQ(0u, tracer->GetStatus().event_count);
+  const unsigned char* category_group_enabled =
+      TRACE_EVENT_API_GET_CATEGORY_GROUP_ENABLED("cat");
+
+  {
+    BeginTrace();
+    // Test that string arguments are copied.
+    TraceEventHandle handle1 =
+        trace_event_internal::AddTraceEvent(
+            TRACE_EVENT_PHASE_INSTANT, category_group_enabled, "name1",
+            trace_event_internal::kGlobalScope, trace_event_internal::kNoId,
+            0, trace_event_internal::kNoId,
+            "arg1", std::string("argval"), "arg2", std::string("argval"));
+    // Test that static TRACE_STR_COPY string arguments are copied.
+    TraceEventHandle handle2 =
+        trace_event_internal::AddTraceEvent(
+            TRACE_EVENT_PHASE_INSTANT, category_group_enabled, "name2",
+            trace_event_internal::kGlobalScope, trace_event_internal::kNoId,
+            0, trace_event_internal::kNoId,
+            "arg1", TRACE_STR_COPY("argval"),
+            "arg2", TRACE_STR_COPY("argval"));
+    EXPECT_GT(tracer->GetStatus().event_count, 1u);
+    const TraceEvent* event1 = tracer->GetEventByHandle(handle1);
+    const TraceEvent* event2 = tracer->GetEventByHandle(handle2);
+    ASSERT_TRUE(event1);
+    ASSERT_TRUE(event2);
+    EXPECT_STREQ("name1", event1->name());
+    EXPECT_STREQ("name2", event2->name());
+    EXPECT_TRUE(event1->parameter_copy_storage() != nullptr);
+    EXPECT_TRUE(event2->parameter_copy_storage() != nullptr);
+    EXPECT_GT(event1->parameter_copy_storage()->size(), 0u);
+    EXPECT_GT(event2->parameter_copy_storage()->size(), 0u);
+    EndTraceAndFlush();
+  }
+
+  {
+    BeginTrace();
+    // Test that static literal string arguments are not copied.
+    TraceEventHandle handle1 =
+        trace_event_internal::AddTraceEvent(
+            TRACE_EVENT_PHASE_INSTANT, category_group_enabled, "name1",
+            trace_event_internal::kGlobalScope, trace_event_internal::kNoId,
+            0, trace_event_internal::kNoId,
+            "arg1", "argval", "arg2", "argval");
+    // Test that static TRACE_STR_COPY NULL string arguments are not copied.
+    const char* str1 = nullptr;
+    const char* str2 = nullptr;
+    TraceEventHandle handle2 =
+        trace_event_internal::AddTraceEvent(
+            TRACE_EVENT_PHASE_INSTANT, category_group_enabled, "name2",
+            trace_event_internal::kGlobalScope, trace_event_internal::kNoId,
+            0, trace_event_internal::kNoId,
+            "arg1", TRACE_STR_COPY(str1),
+            "arg2", TRACE_STR_COPY(str2));
+    EXPECT_GT(tracer->GetStatus().event_count, 1u);
+    const TraceEvent* event1 = tracer->GetEventByHandle(handle1);
+    const TraceEvent* event2 = tracer->GetEventByHandle(handle2);
+    ASSERT_TRUE(event1);
+    ASSERT_TRUE(event2);
+    EXPECT_STREQ("name1", event1->name());
+    EXPECT_STREQ("name2", event2->name());
+    EXPECT_TRUE(event1->parameter_copy_storage() == nullptr);
+    EXPECT_TRUE(event2->parameter_copy_storage() == nullptr);
+    EndTraceAndFlush();
+  }
+}
+
+// Test that data sent from other threads is gathered
+TEST_F(TraceEventTestFixture, DataCapturedOnThread) {
+  BeginTrace();
+
+  Thread thread("1");
+  WaitableEvent task_complete_event(WaitableEvent::ResetPolicy::AUTOMATIC,
+                                    WaitableEvent::InitialState::NOT_SIGNALED);
+  thread.Start();
+
+  thread.task_runner()->PostTask(
+      FROM_HERE,
+      base::BindOnce(&TraceWithAllMacroVariants, &task_complete_event));
+  task_complete_event.Wait();
+  thread.Stop();
+
+  EndTraceAndFlush();
+  ValidateAllTraceMacrosCreatedData(trace_parsed_);
+}
+
+// Test that data sent from multiple threads is gathered
+TEST_F(TraceEventTestFixture, DataCapturedManyThreads) {
+  BeginTrace();
+
+  const int num_threads = 4;
+  const int num_events = 4000;
+  Thread* threads[num_threads];
+  WaitableEvent* task_complete_events[num_threads];
+  for (int i = 0; i < num_threads; i++) {
+    threads[i] = new Thread(StringPrintf("Thread %d", i));
+    task_complete_events[i] =
+        new WaitableEvent(WaitableEvent::ResetPolicy::AUTOMATIC,
+                          WaitableEvent::InitialState::NOT_SIGNALED);
+    threads[i]->Start();
+    threads[i]->task_runner()->PostTask(
+        FROM_HERE, base::BindOnce(&TraceManyInstantEvents, i, num_events,
+                                  task_complete_events[i]));
+  }
+
+  for (int i = 0; i < num_threads; i++) {
+    task_complete_events[i]->Wait();
+  }
+
+  // Let half of the threads end before flush.
+  for (int i = 0; i < num_threads / 2; i++) {
+    threads[i]->Stop();
+    delete threads[i];
+    delete task_complete_events[i];
+  }
+
+  EndTraceAndFlushInThreadWithMessageLoop();
+  ValidateInstantEventPresentOnEveryThread(trace_parsed_,
+                                           num_threads, num_events);
+
+  // Let the other half of the threads end after flush.
+  for (int i = num_threads / 2; i < num_threads; i++) {
+    threads[i]->Stop();
+    delete threads[i];
+    delete task_complete_events[i];
+  }
+}
+
+// Test that thread and process names show up in the trace
+TEST_F(TraceEventTestFixture, ThreadNames) {
+  // Create threads before we enable tracing to make sure
+  // that tracelog still captures them.
+  const int kNumThreads = 4;
+  const int kNumEvents = 10;
+  Thread* threads[kNumThreads];
+  PlatformThreadId thread_ids[kNumThreads];
+  for (int i = 0; i < kNumThreads; i++)
+    threads[i] = new Thread(StringPrintf("Thread %d", i));
+
+  // Enable tracing.
+  BeginTrace();
+
+  // Now run some trace code on these threads.
+  WaitableEvent* task_complete_events[kNumThreads];
+  for (int i = 0; i < kNumThreads; i++) {
+    task_complete_events[i] =
+        new WaitableEvent(WaitableEvent::ResetPolicy::AUTOMATIC,
+                          WaitableEvent::InitialState::NOT_SIGNALED);
+    threads[i]->Start();
+    thread_ids[i] = threads[i]->GetThreadId();
+    threads[i]->task_runner()->PostTask(
+        FROM_HERE, base::BindOnce(&TraceManyInstantEvents, i, kNumEvents,
+                                  task_complete_events[i]));
+  }
+  for (int i = 0; i < kNumThreads; i++) {
+    task_complete_events[i]->Wait();
+  }
+
+  // Shut things down.
+  for (int i = 0; i < kNumThreads; i++) {
+    threads[i]->Stop();
+    delete threads[i];
+    delete task_complete_events[i];
+  }
+
+  EndTraceAndFlush();
+
+  std::string tmp;
+  int tmp_int;
+  const DictionaryValue* item;
+
+  // Make sure we get thread name metadata.
+  // Note, the test suite may have created a ton of threads.
+  // So, we'll have thread names for threads we didn't create.
+  std::vector<const DictionaryValue*> items =
+      FindTraceEntries(trace_parsed_, "thread_name");
+  for (int i = 0; i < static_cast<int>(items.size()); i++) {
+    item = items[i];
+    ASSERT_TRUE(item);
+    EXPECT_TRUE(item->GetInteger("tid", &tmp_int));
+
+    // See if this thread name is one of the threads we just created
+    for (int j = 0; j < kNumThreads; j++) {
+      if (static_cast<int>(thread_ids[j]) != tmp_int)
+        continue;
+
+      std::string expected_name = StringPrintf("Thread %d", j);
+      EXPECT_TRUE(item->GetString("ph", &tmp) && tmp == "M");
+      EXPECT_TRUE(item->GetInteger("pid", &tmp_int) &&
+                  tmp_int == static_cast<int>(base::GetCurrentProcId()));
+      // If the thread name changes or the tid gets reused, the name will be
+      // a comma-separated list of thread names, so look for a substring.
+      EXPECT_TRUE(item->GetString("args.name", &tmp) &&
+                  tmp.find(expected_name) != std::string::npos);
+    }
+  }
+}
+
+TEST_F(TraceEventTestFixture, ThreadNameChanges) {
+  BeginTrace();
+
+  PlatformThread::SetName("");
+  TRACE_EVENT_INSTANT0("drink", "water", TRACE_EVENT_SCOPE_THREAD);
+
+  PlatformThread::SetName("cafe");
+  TRACE_EVENT_INSTANT0("drink", "coffee", TRACE_EVENT_SCOPE_THREAD);
+
+  PlatformThread::SetName("shop");
+  // No event here, so won't appear in combined name.
+
+  PlatformThread::SetName("pub");
+  TRACE_EVENT_INSTANT0("drink", "beer", TRACE_EVENT_SCOPE_THREAD);
+  TRACE_EVENT_INSTANT0("drink", "wine", TRACE_EVENT_SCOPE_THREAD);
+
+  PlatformThread::SetName(" bar");
+  TRACE_EVENT_INSTANT0("drink", "whisky", TRACE_EVENT_SCOPE_THREAD);
+
+  EndTraceAndFlush();
+
+  std::vector<const DictionaryValue*> items =
+      FindTraceEntries(trace_parsed_, "thread_name");
+  EXPECT_EQ(1u, items.size());
+  ASSERT_GT(items.size(), 0u);
+  const DictionaryValue* item = items[0];
+  ASSERT_TRUE(item);
+  int tid;
+  EXPECT_TRUE(item->GetInteger("tid", &tid));
+  EXPECT_EQ(PlatformThread::CurrentId(), static_cast<PlatformThreadId>(tid));
+
+  std::string expected_name = "cafe,pub, bar";
+  std::string tmp;
+  EXPECT_TRUE(item->GetString("args.name", &tmp));
+  EXPECT_EQ(expected_name, tmp);
+}
+
+// Test that the disabled trace categories are included/excluded from the
+// trace output correctly.
+TEST_F(TraceEventTestFixture, DisabledCategories) {
+  BeginTrace();
+  TRACE_EVENT_INSTANT0(TRACE_DISABLED_BY_DEFAULT("cc"), "first",
+                       TRACE_EVENT_SCOPE_THREAD);
+  TRACE_EVENT_INSTANT0("included", "first", TRACE_EVENT_SCOPE_THREAD);
+  EndTraceAndFlush();
+  {
+    const DictionaryValue* item = nullptr;
+    ListValue& trace_parsed = trace_parsed_;
+    EXPECT_NOT_FIND_("disabled-by-default-cc");
+    EXPECT_FIND_("included");
+  }
+  Clear();
+
+  BeginSpecificTrace("disabled-by-default-cc");
+  TRACE_EVENT_INSTANT0(TRACE_DISABLED_BY_DEFAULT("cc"), "second",
+                       TRACE_EVENT_SCOPE_THREAD);
+  TRACE_EVENT_INSTANT0("other_included", "second", TRACE_EVENT_SCOPE_THREAD);
+  EndTraceAndFlush();
+
+  {
+    const DictionaryValue* item = nullptr;
+    ListValue& trace_parsed = trace_parsed_;
+    EXPECT_FIND_("disabled-by-default-cc");
+    EXPECT_FIND_("other_included");
+  }
+
+  Clear();
+
+  BeginSpecificTrace("other_included");
+  TRACE_EVENT_INSTANT0(TRACE_DISABLED_BY_DEFAULT("cc") ",other_included",
+                       "first", TRACE_EVENT_SCOPE_THREAD);
+  TRACE_EVENT_INSTANT0("other_included," TRACE_DISABLED_BY_DEFAULT("cc"),
+                       "second", TRACE_EVENT_SCOPE_THREAD);
+  EndTraceAndFlush();
+
+  {
+    const DictionaryValue* item = nullptr;
+    ListValue& trace_parsed = trace_parsed_;
+    EXPECT_FIND_("disabled-by-default-cc,other_included");
+    EXPECT_FIND_("other_included,disabled-by-default-cc");
+  }
+}
+
+TEST_F(TraceEventTestFixture, NormallyNoDeepCopy) {
+  // Test that the TRACE_EVENT macros do not deep-copy their string. If they
+  // do so it may indicate a performance regression, but more-over it would
+  // make the DEEP_COPY overloads redundant.
+  std::string name_string("event name");
+
+  BeginTrace();
+  TRACE_EVENT_INSTANT0("category", name_string.c_str(),
+                       TRACE_EVENT_SCOPE_THREAD);
+
+  // Modify the string in place (a wholesale reassignment may leave the old
+  // string intact on the heap).
+  name_string[0] = '@';
+
+  EndTraceAndFlush();
+
+  EXPECT_FALSE(FindTraceEntry(trace_parsed_, "event name"));
+  EXPECT_TRUE(FindTraceEntry(trace_parsed_, name_string.c_str()));
+}
+
+TEST_F(TraceEventTestFixture, DeepCopy) {
+  static const char kOriginalName1[] = "name1";
+  static const char kOriginalName2[] = "name2";
+  static const char kOriginalName3[] = "name3";
+  std::string name1(kOriginalName1);
+  std::string name2(kOriginalName2);
+  std::string name3(kOriginalName3);
+  std::string arg1("arg1");
+  std::string arg2("arg2");
+  std::string val1("val1");
+  std::string val2("val2");
+
+  BeginTrace();
+  TRACE_EVENT_COPY_INSTANT0("category", name1.c_str(),
+                            TRACE_EVENT_SCOPE_THREAD);
+  TRACE_EVENT_COPY_BEGIN1("category", name2.c_str(),
+                          arg1.c_str(), 5);
+  TRACE_EVENT_COPY_END2("category", name3.c_str(),
+                        arg1.c_str(), val1,
+                        arg2.c_str(), val2);
+
+  // As per NormallyNoDeepCopy, modify the strings in place.
+  name1[0] = name2[0] = name3[0] = arg1[0] = arg2[0] = val1[0] = val2[0] = '@';
+
+  EndTraceAndFlush();
+
+  EXPECT_FALSE(FindTraceEntry(trace_parsed_, name1.c_str()));
+  EXPECT_FALSE(FindTraceEntry(trace_parsed_, name2.c_str()));
+  EXPECT_FALSE(FindTraceEntry(trace_parsed_, name3.c_str()));
+
+  const DictionaryValue* entry1 = FindTraceEntry(trace_parsed_, kOriginalName1);
+  const DictionaryValue* entry2 = FindTraceEntry(trace_parsed_, kOriginalName2);
+  const DictionaryValue* entry3 = FindTraceEntry(trace_parsed_, kOriginalName3);
+  ASSERT_TRUE(entry1);
+  ASSERT_TRUE(entry2);
+  ASSERT_TRUE(entry3);
+
+  int i;
+  EXPECT_FALSE(entry2->GetInteger("args.@rg1", &i));
+  EXPECT_TRUE(entry2->GetInteger("args.arg1", &i));
+  EXPECT_EQ(5, i);
+
+  std::string s;
+  EXPECT_TRUE(entry3->GetString("args.arg1", &s));
+  EXPECT_EQ("val1", s);
+  EXPECT_TRUE(entry3->GetString("args.arg2", &s));
+  EXPECT_EQ("val2", s);
+}
+
+// Test that TraceResultBuffer outputs the correct result whether it is added
+// in chunks or added all at once.
+TEST_F(TraceEventTestFixture, TraceResultBuffer) {
+  Clear();
+
+  trace_buffer_.Start();
+  trace_buffer_.AddFragment("bla1");
+  trace_buffer_.AddFragment("bla2");
+  trace_buffer_.AddFragment("bla3,bla4");
+  trace_buffer_.Finish();
+  EXPECT_STREQ(json_output_.json_output.c_str(), "[bla1,bla2,bla3,bla4]");
+
+  Clear();
+
+  trace_buffer_.Start();
+  trace_buffer_.AddFragment("bla1,bla2,bla3,bla4");
+  trace_buffer_.Finish();
+  EXPECT_STREQ(json_output_.json_output.c_str(), "[bla1,bla2,bla3,bla4]");
+}
+
+// Test that trace_event parameters are not evaluated if the tracing
+// system is disabled.
+TEST_F(TraceEventTestFixture, TracingIsLazy) {
+  BeginTrace();
+
+  int a = 0;
+  TRACE_EVENT_INSTANT1("category", "test", TRACE_EVENT_SCOPE_THREAD, "a", a++);
+  EXPECT_EQ(1, a);
+
+  TraceLog::GetInstance()->SetDisabled();
+
+  TRACE_EVENT_INSTANT1("category", "test", TRACE_EVENT_SCOPE_THREAD, "a", a++);
+  EXPECT_EQ(1, a);
+
+  EndTraceAndFlush();
+}
+
+TEST_F(TraceEventTestFixture, TraceEnableDisable) {
+  TraceLog* trace_log = TraceLog::GetInstance();
+  TraceConfig tc_inc_all("*", "");
+  trace_log->SetEnabled(tc_inc_all, TraceLog::RECORDING_MODE);
+  EXPECT_TRUE(trace_log->IsEnabled());
+  trace_log->SetDisabled();
+  EXPECT_FALSE(trace_log->IsEnabled());
+
+  trace_log->SetEnabled(tc_inc_all, TraceLog::RECORDING_MODE);
+  EXPECT_TRUE(trace_log->IsEnabled());
+  const std::vector<std::string> empty;
+  trace_log->SetEnabled(TraceConfig(), TraceLog::RECORDING_MODE);
+  EXPECT_TRUE(trace_log->IsEnabled());
+  trace_log->SetDisabled();
+  EXPECT_FALSE(trace_log->IsEnabled());
+  trace_log->SetDisabled();
+  EXPECT_FALSE(trace_log->IsEnabled());
+}
+
+TEST_F(TraceEventTestFixture, TraceCategoriesAfterNestedEnable) {
+  TraceLog* trace_log = TraceLog::GetInstance();
+  trace_log->SetEnabled(TraceConfig("foo,bar", ""), TraceLog::RECORDING_MODE);
+  EXPECT_TRUE(*trace_log->GetCategoryGroupEnabled("foo"));
+  EXPECT_TRUE(*trace_log->GetCategoryGroupEnabled("bar"));
+  EXPECT_FALSE(*trace_log->GetCategoryGroupEnabled("baz"));
+  trace_log->SetEnabled(TraceConfig("foo2", ""), TraceLog::RECORDING_MODE);
+  EXPECT_TRUE(*trace_log->GetCategoryGroupEnabled("foo2"));
+  EXPECT_FALSE(*trace_log->GetCategoryGroupEnabled("baz"));
+  // The "" becomes the default catergory set when applied.
+  trace_log->SetEnabled(TraceConfig(), TraceLog::RECORDING_MODE);
+  EXPECT_TRUE(*trace_log->GetCategoryGroupEnabled("foo"));
+  EXPECT_TRUE(*trace_log->GetCategoryGroupEnabled("baz"));
+  EXPECT_STREQ(
+    "",
+    trace_log->GetCurrentTraceConfig().ToCategoryFilterString().c_str());
+  trace_log->SetDisabled();
+  trace_log->SetDisabled();
+  trace_log->SetDisabled();
+  EXPECT_FALSE(*trace_log->GetCategoryGroupEnabled("foo"));
+  EXPECT_FALSE(*trace_log->GetCategoryGroupEnabled("baz"));
+
+  trace_log->SetEnabled(TraceConfig("-foo,-bar", ""), TraceLog::RECORDING_MODE);
+  EXPECT_FALSE(*trace_log->GetCategoryGroupEnabled("foo"));
+  EXPECT_TRUE(*trace_log->GetCategoryGroupEnabled("baz"));
+  trace_log->SetEnabled(TraceConfig("moo", ""), TraceLog::RECORDING_MODE);
+  EXPECT_TRUE(*trace_log->GetCategoryGroupEnabled("baz"));
+  EXPECT_TRUE(*trace_log->GetCategoryGroupEnabled("moo"));
+  EXPECT_FALSE(*trace_log->GetCategoryGroupEnabled("foo"));
+  EXPECT_STREQ(
+    "-foo,-bar",
+    trace_log->GetCurrentTraceConfig().ToCategoryFilterString().c_str());
+  trace_log->SetDisabled();
+  trace_log->SetDisabled();
+
+  // Make sure disabled categories aren't cleared if we set in the second.
+  trace_log->SetEnabled(TraceConfig("disabled-by-default-cc,foo", ""),
+                        TraceLog::RECORDING_MODE);
+  EXPECT_FALSE(*trace_log->GetCategoryGroupEnabled("bar"));
+  trace_log->SetEnabled(TraceConfig("disabled-by-default-gpu", ""),
+                        TraceLog::RECORDING_MODE);
+  EXPECT_TRUE(*trace_log->GetCategoryGroupEnabled("disabled-by-default-cc"));
+  EXPECT_TRUE(*trace_log->GetCategoryGroupEnabled("disabled-by-default-gpu"));
+  EXPECT_TRUE(*trace_log->GetCategoryGroupEnabled("bar"));
+  EXPECT_STREQ(
+    "disabled-by-default-cc,disabled-by-default-gpu",
+    trace_log->GetCurrentTraceConfig().ToCategoryFilterString().c_str());
+  trace_log->SetDisabled();
+  trace_log->SetDisabled();
+}
+
+TEST_F(TraceEventTestFixture, TraceWithDefaultCategoryFilters) {
+  TraceLog* trace_log = TraceLog::GetInstance();
+
+  trace_log->SetEnabled(TraceConfig(), TraceLog::RECORDING_MODE);
+  CheckTraceDefaultCategoryFilters(*trace_log);
+  trace_log->SetDisabled();
+
+  trace_log->SetEnabled(TraceConfig("", ""), TraceLog::RECORDING_MODE);
+  CheckTraceDefaultCategoryFilters(*trace_log);
+  trace_log->SetDisabled();
+
+  trace_log->SetEnabled(TraceConfig("*", ""), TraceLog::RECORDING_MODE);
+  CheckTraceDefaultCategoryFilters(*trace_log);
+  trace_log->SetDisabled();
+
+  trace_log->SetEnabled(TraceConfig(""), TraceLog::RECORDING_MODE);
+  CheckTraceDefaultCategoryFilters(*trace_log);
+  trace_log->SetDisabled();
+}
+
+TEST_F(TraceEventTestFixture, TraceWithDisabledByDefaultCategoryFilters) {
+  TraceLog* trace_log = TraceLog::GetInstance();
+
+  trace_log->SetEnabled(TraceConfig("foo,disabled-by-default-foo", ""),
+                        TraceLog::RECORDING_MODE);
+  EXPECT_TRUE(*trace_log->GetCategoryGroupEnabled("foo"));
+  EXPECT_TRUE(*trace_log->GetCategoryGroupEnabled("disabled-by-default-foo"));
+  EXPECT_FALSE(*trace_log->GetCategoryGroupEnabled("bar"));
+  EXPECT_FALSE(*trace_log->GetCategoryGroupEnabled("disabled-by-default-bar"));
+  trace_log->SetDisabled();
+
+  // Enabling only the disabled-by-default-* category means the default ones
+  // are also enabled.
+  trace_log->SetEnabled(TraceConfig("disabled-by-default-foo", ""),
+                        TraceLog::RECORDING_MODE);
+  EXPECT_TRUE(*trace_log->GetCategoryGroupEnabled("disabled-by-default-foo"));
+  EXPECT_TRUE(*trace_log->GetCategoryGroupEnabled("foo"));
+  EXPECT_TRUE(*trace_log->GetCategoryGroupEnabled("bar"));
+  EXPECT_FALSE(*trace_log->GetCategoryGroupEnabled("disabled-by-default-bar"));
+  trace_log->SetDisabled();
+}
+
+class MyData : public ConvertableToTraceFormat {
+ public:
+  MyData() = default;
+  ~MyData() override = default;
+
+  void AppendAsTraceFormat(std::string* out) const override {
+    out->append("{\"foo\":1}");
+  }
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(MyData);
+};
+
+TEST_F(TraceEventTestFixture, ConvertableTypes) {
+  TraceLog::GetInstance()->SetEnabled(TraceConfig(kRecordAllCategoryFilter, ""),
+                                      TraceLog::RECORDING_MODE);
+
+  std::unique_ptr<ConvertableToTraceFormat> data(new MyData());
+  std::unique_ptr<ConvertableToTraceFormat> data1(new MyData());
+  std::unique_ptr<ConvertableToTraceFormat> data2(new MyData());
+  TRACE_EVENT1("foo", "bar", "data", std::move(data));
+  TRACE_EVENT2("foo", "baz", "data1", std::move(data1), "data2",
+               std::move(data2));
+
+  // Check that std::unique_ptr<DerivedClassOfConvertable> are properly treated
+  // as
+  // convertable and not accidentally casted to bool.
+  std::unique_ptr<MyData> convertData1(new MyData());
+  std::unique_ptr<MyData> convertData2(new MyData());
+  std::unique_ptr<MyData> convertData3(new MyData());
+  std::unique_ptr<MyData> convertData4(new MyData());
+  TRACE_EVENT2("foo", "string_first", "str", "string value 1", "convert",
+               std::move(convertData1));
+  TRACE_EVENT2("foo", "string_second", "convert", std::move(convertData2),
+               "str", "string value 2");
+  TRACE_EVENT2("foo", "both_conv", "convert1", std::move(convertData3),
+               "convert2", std::move(convertData4));
+  EndTraceAndFlush();
+
+  // One arg version.
+  DictionaryValue* dict = FindNamePhase("bar", "X");
+  ASSERT_TRUE(dict);
+
+  const DictionaryValue* args_dict = nullptr;
+  dict->GetDictionary("args", &args_dict);
+  ASSERT_TRUE(args_dict);
+
+  const Value* value = nullptr;
+  const DictionaryValue* convertable_dict = nullptr;
+  EXPECT_TRUE(args_dict->Get("data", &value));
+  ASSERT_TRUE(value->GetAsDictionary(&convertable_dict));
+
+  int foo_val;
+  EXPECT_TRUE(convertable_dict->GetInteger("foo", &foo_val));
+  EXPECT_EQ(1, foo_val);
+
+  // Two arg version.
+  dict = FindNamePhase("baz", "X");
+  ASSERT_TRUE(dict);
+
+  args_dict = nullptr;
+  dict->GetDictionary("args", &args_dict);
+  ASSERT_TRUE(args_dict);
+
+  value = nullptr;
+  convertable_dict = nullptr;
+  EXPECT_TRUE(args_dict->Get("data1", &value));
+  ASSERT_TRUE(value->GetAsDictionary(&convertable_dict));
+
+  value = nullptr;
+  convertable_dict = nullptr;
+  EXPECT_TRUE(args_dict->Get("data2", &value));
+  ASSERT_TRUE(value->GetAsDictionary(&convertable_dict));
+
+  // Convertable with other types.
+  dict = FindNamePhase("string_first", "X");
+  ASSERT_TRUE(dict);
+
+  args_dict = nullptr;
+  dict->GetDictionary("args", &args_dict);
+  ASSERT_TRUE(args_dict);
+
+  std::string str_value;
+  EXPECT_TRUE(args_dict->GetString("str", &str_value));
+  EXPECT_STREQ("string value 1", str_value.c_str());
+
+  value = nullptr;
+  convertable_dict = nullptr;
+  foo_val = 0;
+  EXPECT_TRUE(args_dict->Get("convert", &value));
+  ASSERT_TRUE(value->GetAsDictionary(&convertable_dict));
+  EXPECT_TRUE(convertable_dict->GetInteger("foo", &foo_val));
+  EXPECT_EQ(1, foo_val);
+
+  dict = FindNamePhase("string_second", "X");
+  ASSERT_TRUE(dict);
+
+  args_dict = nullptr;
+  dict->GetDictionary("args", &args_dict);
+  ASSERT_TRUE(args_dict);
+
+  EXPECT_TRUE(args_dict->GetString("str", &str_value));
+  EXPECT_STREQ("string value 2", str_value.c_str());
+
+  value = nullptr;
+  convertable_dict = nullptr;
+  foo_val = 0;
+  EXPECT_TRUE(args_dict->Get("convert", &value));
+  ASSERT_TRUE(value->GetAsDictionary(&convertable_dict));
+  EXPECT_TRUE(convertable_dict->GetInteger("foo", &foo_val));
+  EXPECT_EQ(1, foo_val);
+
+  dict = FindNamePhase("both_conv", "X");
+  ASSERT_TRUE(dict);
+
+  args_dict = nullptr;
+  dict->GetDictionary("args", &args_dict);
+  ASSERT_TRUE(args_dict);
+
+  value = nullptr;
+  convertable_dict = nullptr;
+  foo_val = 0;
+  EXPECT_TRUE(args_dict->Get("convert1", &value));
+  ASSERT_TRUE(value->GetAsDictionary(&convertable_dict));
+  EXPECT_TRUE(args_dict->Get("convert2", &value));
+  ASSERT_TRUE(value->GetAsDictionary(&convertable_dict));
+}
+
+TEST_F(TraceEventTestFixture, PrimitiveArgs) {
+  TraceLog::GetInstance()->SetEnabled(TraceConfig(kRecordAllCategoryFilter, ""),
+                                      TraceLog::RECORDING_MODE);
+
+  TRACE_EVENT1("foo", "event1", "int_one", 1);
+  TRACE_EVENT1("foo", "event2", "int_neg_ten", -10);
+  TRACE_EVENT1("foo", "event3", "float_one", 1.0f);
+  TRACE_EVENT1("foo", "event4", "float_half", .5f);
+  TRACE_EVENT1("foo", "event5", "float_neghalf", -.5f);
+  TRACE_EVENT1("foo", "event6", "float_infinity",
+      std::numeric_limits<float>::infinity());
+  TRACE_EVENT1("foo", "event6b", "float_neg_infinity",
+      -std::numeric_limits<float>::infinity());
+  TRACE_EVENT1("foo", "event7", "double_nan",
+      std::numeric_limits<double>::quiet_NaN());
+  void* p = nullptr;
+  TRACE_EVENT1("foo", "event8", "pointer_null", p);
+  p = reinterpret_cast<void*>(0xbadf00d);
+  TRACE_EVENT1("foo", "event9", "pointer_badf00d", p);
+  TRACE_EVENT1("foo", "event10", "bool_true", true);
+  TRACE_EVENT1("foo", "event11", "bool_false", false);
+  TRACE_EVENT1("foo", "event12", "time_null",
+      base::Time());
+  TRACE_EVENT1("foo", "event13", "time_one",
+      base::Time::FromInternalValue(1));
+  TRACE_EVENT1("foo", "event14", "timeticks_null",
+      base::TimeTicks());
+  TRACE_EVENT1("foo", "event15", "timeticks_one",
+      base::TimeTicks::FromInternalValue(1));
+  EndTraceAndFlush();
+
+  const DictionaryValue* args_dict = nullptr;
+  DictionaryValue* dict = nullptr;
+  const Value* value = nullptr;
+  std::string str_value;
+  int int_value;
+  double double_value;
+  bool bool_value;
+
+  dict = FindNamePhase("event1", "X");
+  ASSERT_TRUE(dict);
+  dict->GetDictionary("args", &args_dict);
+  ASSERT_TRUE(args_dict);
+  EXPECT_TRUE(args_dict->GetInteger("int_one", &int_value));
+  EXPECT_EQ(1, int_value);
+
+  dict = FindNamePhase("event2", "X");
+  ASSERT_TRUE(dict);
+  dict->GetDictionary("args", &args_dict);
+  ASSERT_TRUE(args_dict);
+  EXPECT_TRUE(args_dict->GetInteger("int_neg_ten", &int_value));
+  EXPECT_EQ(-10, int_value);
+
+  // 1f must be serlized to JSON as "1.0" in order to be a double, not an int.
+  dict = FindNamePhase("event3", "X");
+  ASSERT_TRUE(dict);
+  dict->GetDictionary("args", &args_dict);
+  ASSERT_TRUE(args_dict);
+  EXPECT_TRUE(args_dict->Get("float_one", &value));
+  EXPECT_TRUE(value->is_double());
+  EXPECT_TRUE(value->GetAsDouble(&double_value));
+  EXPECT_EQ(1, double_value);
+
+  // .5f must be serlized to JSON as "0.5".
+  dict = FindNamePhase("event4", "X");
+  ASSERT_TRUE(dict);
+  dict->GetDictionary("args", &args_dict);
+  ASSERT_TRUE(args_dict);
+  EXPECT_TRUE(args_dict->Get("float_half", &value));
+  EXPECT_TRUE(value->is_double());
+  EXPECT_TRUE(value->GetAsDouble(&double_value));
+  EXPECT_EQ(0.5, double_value);
+
+  // -.5f must be serlized to JSON as "-0.5".
+  dict = FindNamePhase("event5", "X");
+  ASSERT_TRUE(dict);
+  dict->GetDictionary("args", &args_dict);
+  ASSERT_TRUE(args_dict);
+  EXPECT_TRUE(args_dict->Get("float_neghalf", &value));
+  EXPECT_TRUE(value->is_double());
+  EXPECT_TRUE(value->GetAsDouble(&double_value));
+  EXPECT_EQ(-0.5, double_value);
+
+  // Infinity is serialized to JSON as a string.
+  dict = FindNamePhase("event6", "X");
+  ASSERT_TRUE(dict);
+  dict->GetDictionary("args", &args_dict);
+  ASSERT_TRUE(args_dict);
+  EXPECT_TRUE(args_dict->GetString("float_infinity", &str_value));
+  EXPECT_STREQ("Infinity", str_value.c_str());
+  dict = FindNamePhase("event6b", "X");
+  ASSERT_TRUE(dict);
+  dict->GetDictionary("args", &args_dict);
+  ASSERT_TRUE(args_dict);
+  EXPECT_TRUE(args_dict->GetString("float_neg_infinity", &str_value));
+  EXPECT_STREQ("-Infinity", str_value.c_str());
+
+  // NaN is serialized to JSON as a string.
+  dict = FindNamePhase("event7", "X");
+  ASSERT_TRUE(dict);
+  dict->GetDictionary("args", &args_dict);
+  ASSERT_TRUE(args_dict);
+  EXPECT_TRUE(args_dict->GetString("double_nan", &str_value));
+  EXPECT_STREQ("NaN", str_value.c_str());
+
+  // NULL pointers should be serialized as "0x0".
+  dict = FindNamePhase("event8", "X");
+  ASSERT_TRUE(dict);
+  dict->GetDictionary("args", &args_dict);
+  ASSERT_TRUE(args_dict);
+  EXPECT_TRUE(args_dict->GetString("pointer_null", &str_value));
+  EXPECT_STREQ("0x0", str_value.c_str());
+
+  // Other pointers should be serlized as a hex string.
+  dict = FindNamePhase("event9", "X");
+  ASSERT_TRUE(dict);
+  dict->GetDictionary("args", &args_dict);
+  ASSERT_TRUE(args_dict);
+  EXPECT_TRUE(args_dict->GetString("pointer_badf00d", &str_value));
+  EXPECT_STREQ("0xbadf00d", str_value.c_str());
+
+  dict = FindNamePhase("event10", "X");
+  ASSERT_TRUE(dict);
+  dict->GetDictionary("args", &args_dict);
+  ASSERT_TRUE(args_dict);
+  EXPECT_TRUE(args_dict->GetBoolean("bool_true", &bool_value));
+  EXPECT_TRUE(bool_value);
+
+  dict = FindNamePhase("event11", "X");
+  ASSERT_TRUE(dict);
+  dict->GetDictionary("args", &args_dict);
+  ASSERT_TRUE(args_dict);
+  EXPECT_TRUE(args_dict->GetBoolean("bool_false", &bool_value));
+  EXPECT_FALSE(bool_value);
+
+  dict = FindNamePhase("event12", "X");
+  ASSERT_TRUE(dict);
+  dict->GetDictionary("args", &args_dict);
+  ASSERT_TRUE(args_dict);
+  EXPECT_TRUE(args_dict->GetInteger("time_null", &int_value));
+  EXPECT_EQ(0, int_value);
+
+  dict = FindNamePhase("event13", "X");
+  ASSERT_TRUE(dict);
+  dict->GetDictionary("args", &args_dict);
+  ASSERT_TRUE(args_dict);
+  EXPECT_TRUE(args_dict->GetInteger("time_one", &int_value));
+  EXPECT_EQ(1, int_value);
+
+  dict = FindNamePhase("event14", "X");
+  ASSERT_TRUE(dict);
+  dict->GetDictionary("args", &args_dict);
+  ASSERT_TRUE(args_dict);
+  EXPECT_TRUE(args_dict->GetInteger("timeticks_null", &int_value));
+  EXPECT_EQ(0, int_value);
+
+  dict = FindNamePhase("event15", "X");
+  ASSERT_TRUE(dict);
+  dict->GetDictionary("args", &args_dict);
+  ASSERT_TRUE(args_dict);
+  EXPECT_TRUE(args_dict->GetInteger("timeticks_one", &int_value));
+  EXPECT_EQ(1, int_value);
+}
+
+TEST_F(TraceEventTestFixture, NameIsEscaped) {
+  TraceLog::GetInstance()->SetEnabled(TraceConfig(kRecordAllCategoryFilter, ""),
+                                      TraceLog::RECORDING_MODE);
+  TRACE_EVENT0("category", "name\\with\\backspaces");
+  EndTraceAndFlush();
+
+  EXPECT_TRUE(FindMatchingValue("cat", "category"));
+  EXPECT_TRUE(FindMatchingValue("name", "name\\with\\backspaces"));
+}
+
+namespace {
+
+bool IsArgNameWhitelisted(const char* arg_name) {
+  return base::MatchPattern(arg_name, "granular_arg_whitelisted");
+}
+
+bool IsTraceEventArgsWhitelisted(const char* category_group_name,
+                                 const char* event_name,
+                                 ArgumentNameFilterPredicate* arg_filter) {
+  if (base::MatchPattern(category_group_name, "toplevel") &&
+      base::MatchPattern(event_name, "*")) {
+    return true;
+  }
+
+  if (base::MatchPattern(category_group_name, "benchmark") &&
+      base::MatchPattern(event_name, "granularly_whitelisted")) {
+    *arg_filter = base::Bind(&IsArgNameWhitelisted);
+    return true;
+  }
+
+  return false;
+}
+
+}  // namespace
+
+TEST_F(TraceEventTestFixture, ArgsWhitelisting) {
+  TraceLog::GetInstance()->SetArgumentFilterPredicate(
+      base::Bind(&IsTraceEventArgsWhitelisted));
+
+  TraceLog::GetInstance()->SetEnabled(
+    TraceConfig(kRecordAllCategoryFilter, "enable-argument-filter"),
+    TraceLog::RECORDING_MODE);
+
+  TRACE_EVENT1("toplevel", "event1", "int_one", 1);
+  TRACE_EVENT1("whitewashed", "event2", "int_two", 1);
+
+  TRACE_EVENT2("benchmark", "granularly_whitelisted",
+               "granular_arg_whitelisted", "whitelisted_value",
+               "granular_arg_blacklisted", "blacklisted_value");
+
+  EndTraceAndFlush();
+
+  const DictionaryValue* args_dict = nullptr;
+  DictionaryValue* dict = nullptr;
+  int int_value;
+
+  dict = FindNamePhase("event1", "X");
+  ASSERT_TRUE(dict);
+  dict->GetDictionary("args", &args_dict);
+  ASSERT_TRUE(args_dict);
+  EXPECT_TRUE(args_dict->GetInteger("int_one", &int_value));
+  EXPECT_EQ(1, int_value);
+
+  dict = FindNamePhase("event2", "X");
+  ASSERT_TRUE(dict);
+  dict->GetDictionary("args", &args_dict);
+  ASSERT_TRUE(args_dict);
+  EXPECT_FALSE(args_dict->GetInteger("int_two", &int_value));
+
+  std::string args_string;
+  EXPECT_TRUE(dict->GetString("args", &args_string));
+  EXPECT_EQ(args_string, "__stripped__");
+
+  dict = FindNamePhase("granularly_whitelisted", "X");
+  ASSERT_TRUE(dict);
+  dict->GetDictionary("args", &args_dict);
+  ASSERT_TRUE(args_dict);
+
+  EXPECT_TRUE(args_dict->GetString("granular_arg_whitelisted", &args_string));
+  EXPECT_EQ(args_string, "whitelisted_value");
+
+  EXPECT_TRUE(args_dict->GetString("granular_arg_blacklisted", &args_string));
+  EXPECT_EQ(args_string, "__stripped__");
+}
+
+TEST_F(TraceEventTestFixture, TraceBufferVectorReportFull) {
+  TraceLog* trace_log = TraceLog::GetInstance();
+  trace_log->SetEnabled(
+      TraceConfig(kRecordAllCategoryFilter, ""), TraceLog::RECORDING_MODE);
+  trace_log->logged_events_.reset(
+      TraceBuffer::CreateTraceBufferVectorOfSize(100));
+  do {
+    TRACE_EVENT_BEGIN_WITH_ID_TID_AND_TIMESTAMP0(
+        "all", "with_timestamp", 0, 0, TimeTicks::Now());
+    TRACE_EVENT_END_WITH_ID_TID_AND_TIMESTAMP0(
+        "all", "with_timestamp", 0, 0, TimeTicks::Now());
+  } while (!trace_log->BufferIsFull());
+
+  EndTraceAndFlush();
+
+  const DictionaryValue* trace_full_metadata = nullptr;
+
+  trace_full_metadata = FindTraceEntry(trace_parsed_,
+                                       "overflowed_at_ts");
+  std::string phase;
+  double buffer_limit_reached_timestamp = 0;
+
+  EXPECT_TRUE(trace_full_metadata);
+  EXPECT_TRUE(trace_full_metadata->GetString("ph", &phase));
+  EXPECT_EQ("M", phase);
+  EXPECT_TRUE(trace_full_metadata->GetDouble(
+      "args.overflowed_at_ts", &buffer_limit_reached_timestamp));
+  EXPECT_DOUBLE_EQ(
+      static_cast<double>(
+          trace_log->buffer_limit_reached_timestamp_.ToInternalValue()),
+      buffer_limit_reached_timestamp);
+
+  // Test that buffer_limit_reached_timestamp's value is between the timestamp
+  // of the last trace event and current time.
+  DropTracedMetadataRecords();
+  const DictionaryValue* last_trace_event = nullptr;
+  double last_trace_event_timestamp = 0;
+  EXPECT_TRUE(trace_parsed_.GetDictionary(trace_parsed_.GetSize() - 1,
+                                          &last_trace_event));
+  EXPECT_TRUE(last_trace_event->GetDouble("ts", &last_trace_event_timestamp));
+  EXPECT_LE(last_trace_event_timestamp, buffer_limit_reached_timestamp);
+  EXPECT_LE(buffer_limit_reached_timestamp,
+            trace_log->OffsetNow().ToInternalValue());
+}
+
+TEST_F(TraceEventTestFixture, TraceBufferRingBufferGetReturnChunk) {
+  TraceLog::GetInstance()->SetEnabled(
+      TraceConfig(kRecordAllCategoryFilter, RECORD_CONTINUOUSLY),
+      TraceLog::RECORDING_MODE);
+  TraceBuffer* buffer = TraceLog::GetInstance()->trace_buffer();
+  size_t capacity = buffer->Capacity();
+  size_t num_chunks = capacity / TraceBufferChunk::kTraceBufferChunkSize;
+  uint32_t last_seq = 0;
+  size_t chunk_index;
+  EXPECT_EQ(0u, buffer->Size());
+
+  std::unique_ptr<TraceBufferChunk* []> chunks(
+      new TraceBufferChunk*[num_chunks]);
+  for (size_t i = 0; i < num_chunks; ++i) {
+    chunks[i] = buffer->GetChunk(&chunk_index).release();
+    EXPECT_TRUE(chunks[i]);
+    EXPECT_EQ(i, chunk_index);
+    EXPECT_GT(chunks[i]->seq(), last_seq);
+    EXPECT_EQ((i + 1) * TraceBufferChunk::kTraceBufferChunkSize,
+              buffer->Size());
+    last_seq = chunks[i]->seq();
+  }
+
+  // Ring buffer is never full.
+  EXPECT_FALSE(buffer->IsFull());
+
+  // Return all chunks in original order.
+  for (size_t i = 0; i < num_chunks; ++i)
+    buffer->ReturnChunk(i, std::unique_ptr<TraceBufferChunk>(chunks[i]));
+
+  // Should recycle the chunks in the returned order.
+  for (size_t i = 0; i < num_chunks; ++i) {
+    chunks[i] = buffer->GetChunk(&chunk_index).release();
+    EXPECT_TRUE(chunks[i]);
+    EXPECT_EQ(i, chunk_index);
+    EXPECT_GT(chunks[i]->seq(), last_seq);
+    last_seq = chunks[i]->seq();
+  }
+
+  // Return all chunks in reverse order.
+  for (size_t i = 0; i < num_chunks; ++i) {
+    buffer->ReturnChunk(num_chunks - i - 1, std::unique_ptr<TraceBufferChunk>(
+                                                chunks[num_chunks - i - 1]));
+  }
+
+  // Should recycle the chunks in the returned order.
+  for (size_t i = 0; i < num_chunks; ++i) {
+    chunks[i] = buffer->GetChunk(&chunk_index).release();
+    EXPECT_TRUE(chunks[i]);
+    EXPECT_EQ(num_chunks - i - 1, chunk_index);
+    EXPECT_GT(chunks[i]->seq(), last_seq);
+    last_seq = chunks[i]->seq();
+  }
+
+  for (size_t i = 0; i < num_chunks; ++i)
+    buffer->ReturnChunk(i, std::unique_ptr<TraceBufferChunk>(chunks[i]));
+
+  TraceLog::GetInstance()->SetDisabled();
+}
+
+TEST_F(TraceEventTestFixture, TraceBufferRingBufferHalfIteration) {
+  TraceLog::GetInstance()->SetEnabled(
+      TraceConfig(kRecordAllCategoryFilter, RECORD_CONTINUOUSLY),
+      TraceLog::RECORDING_MODE);
+  TraceBuffer* buffer = TraceLog::GetInstance()->trace_buffer();
+  size_t capacity = buffer->Capacity();
+  size_t num_chunks = capacity / TraceBufferChunk::kTraceBufferChunkSize;
+  size_t chunk_index;
+  EXPECT_EQ(0u, buffer->Size());
+  EXPECT_FALSE(buffer->NextChunk());
+
+  size_t half_chunks = num_chunks / 2;
+  std::unique_ptr<TraceBufferChunk* []> chunks(
+      new TraceBufferChunk*[half_chunks]);
+
+  for (size_t i = 0; i < half_chunks; ++i) {
+    chunks[i] = buffer->GetChunk(&chunk_index).release();
+    EXPECT_TRUE(chunks[i]);
+    EXPECT_EQ(i, chunk_index);
+  }
+  for (size_t i = 0; i < half_chunks; ++i)
+    buffer->ReturnChunk(i, std::unique_ptr<TraceBufferChunk>(chunks[i]));
+
+  for (size_t i = 0; i < half_chunks; ++i)
+    EXPECT_EQ(chunks[i], buffer->NextChunk());
+  EXPECT_FALSE(buffer->NextChunk());
+  TraceLog::GetInstance()->SetDisabled();
+}
+
+TEST_F(TraceEventTestFixture, TraceBufferRingBufferFullIteration) {
+  TraceLog::GetInstance()->SetEnabled(
+      TraceConfig(kRecordAllCategoryFilter, RECORD_CONTINUOUSLY),
+      TraceLog::RECORDING_MODE);
+  TraceBuffer* buffer = TraceLog::GetInstance()->trace_buffer();
+  size_t capacity = buffer->Capacity();
+  size_t num_chunks = capacity / TraceBufferChunk::kTraceBufferChunkSize;
+  size_t chunk_index;
+  EXPECT_EQ(0u, buffer->Size());
+  EXPECT_FALSE(buffer->NextChunk());
+
+  std::unique_ptr<TraceBufferChunk* []> chunks(
+      new TraceBufferChunk*[num_chunks]);
+
+  for (size_t i = 0; i < num_chunks; ++i) {
+    chunks[i] = buffer->GetChunk(&chunk_index).release();
+    EXPECT_TRUE(chunks[i]);
+    EXPECT_EQ(i, chunk_index);
+  }
+  for (size_t i = 0; i < num_chunks; ++i)
+    buffer->ReturnChunk(i, std::unique_ptr<TraceBufferChunk>(chunks[i]));
+
+  for (size_t i = 0; i < num_chunks; ++i)
+    EXPECT_TRUE(chunks[i] == buffer->NextChunk());
+  EXPECT_FALSE(buffer->NextChunk());
+  TraceLog::GetInstance()->SetDisabled();
+}
+
+TEST_F(TraceEventTestFixture, TraceRecordAsMuchAsPossibleMode) {
+  TraceLog::GetInstance()->SetEnabled(
+    TraceConfig(kRecordAllCategoryFilter, RECORD_AS_MUCH_AS_POSSIBLE),
+    TraceLog::RECORDING_MODE);
+  TraceBuffer* buffer = TraceLog::GetInstance()->trace_buffer();
+  EXPECT_EQ(512000000UL, buffer->Capacity());
+  TraceLog::GetInstance()->SetDisabled();
+}
+
+void BlockUntilStopped(WaitableEvent* task_start_event,
+                       WaitableEvent* task_stop_event) {
+  task_start_event->Signal();
+  task_stop_event->Wait();
+}
+
+TEST_F(TraceEventTestFixture, SetCurrentThreadBlocksMessageLoopBeforeTracing) {
+  BeginTrace();
+
+  Thread thread("1");
+  WaitableEvent task_complete_event(WaitableEvent::ResetPolicy::AUTOMATIC,
+                                    WaitableEvent::InitialState::NOT_SIGNALED);
+  thread.Start();
+  thread.task_runner()->PostTask(
+      FROM_HERE, BindOnce(&TraceLog::SetCurrentThreadBlocksMessageLoop,
+                          Unretained(TraceLog::GetInstance())));
+
+  thread.task_runner()->PostTask(
+      FROM_HERE, BindOnce(&TraceWithAllMacroVariants, &task_complete_event));
+  task_complete_event.Wait();
+
+  WaitableEvent task_start_event(WaitableEvent::ResetPolicy::AUTOMATIC,
+                                 WaitableEvent::InitialState::NOT_SIGNALED);
+  WaitableEvent task_stop_event(WaitableEvent::ResetPolicy::AUTOMATIC,
+                                WaitableEvent::InitialState::NOT_SIGNALED);
+  thread.task_runner()->PostTask(
+      FROM_HERE,
+      BindOnce(&BlockUntilStopped, &task_start_event, &task_stop_event));
+  task_start_event.Wait();
+
+  EndTraceAndFlush();
+  ValidateAllTraceMacrosCreatedData(trace_parsed_);
+
+  task_stop_event.Signal();
+  thread.Stop();
+}
+
+TEST_F(TraceEventTestFixture, ConvertTraceConfigToInternalOptions) {
+  TraceLog* trace_log = TraceLog::GetInstance();
+  EXPECT_EQ(TraceLog::kInternalRecordUntilFull,
+            trace_log->GetInternalOptionsFromTraceConfig(
+                TraceConfig(kRecordAllCategoryFilter, RECORD_UNTIL_FULL)));
+
+  EXPECT_EQ(TraceLog::kInternalRecordContinuously,
+            trace_log->GetInternalOptionsFromTraceConfig(
+                TraceConfig(kRecordAllCategoryFilter, RECORD_CONTINUOUSLY)));
+
+  EXPECT_EQ(TraceLog::kInternalEchoToConsole,
+            trace_log->GetInternalOptionsFromTraceConfig(
+                TraceConfig(kRecordAllCategoryFilter, ECHO_TO_CONSOLE)));
+
+  EXPECT_EQ(TraceLog::kInternalEchoToConsole,
+            trace_log->GetInternalOptionsFromTraceConfig(
+                TraceConfig("*", "trace-to-console,enable-systrace")));
+}
+
+void SetBlockingFlagAndBlockUntilStopped(WaitableEvent* task_start_event,
+                                         WaitableEvent* task_stop_event) {
+  TraceLog::GetInstance()->SetCurrentThreadBlocksMessageLoop();
+  BlockUntilStopped(task_start_event, task_stop_event);
+}
+
+TEST_F(TraceEventTestFixture, SetCurrentThreadBlocksMessageLoopAfterTracing) {
+  BeginTrace();
+
+  Thread thread("1");
+  WaitableEvent task_complete_event(WaitableEvent::ResetPolicy::AUTOMATIC,
+                                    WaitableEvent::InitialState::NOT_SIGNALED);
+  thread.Start();
+
+  thread.task_runner()->PostTask(
+      FROM_HERE, BindOnce(&TraceWithAllMacroVariants, &task_complete_event));
+  task_complete_event.Wait();
+
+  WaitableEvent task_start_event(WaitableEvent::ResetPolicy::AUTOMATIC,
+                                 WaitableEvent::InitialState::NOT_SIGNALED);
+  WaitableEvent task_stop_event(WaitableEvent::ResetPolicy::AUTOMATIC,
+                                WaitableEvent::InitialState::NOT_SIGNALED);
+  thread.task_runner()->PostTask(FROM_HERE,
+                                 BindOnce(&SetBlockingFlagAndBlockUntilStopped,
+                                          &task_start_event, &task_stop_event));
+  task_start_event.Wait();
+
+  EndTraceAndFlush();
+  ValidateAllTraceMacrosCreatedData(trace_parsed_);
+
+  task_stop_event.Signal();
+  thread.Stop();
+}
+
+TEST_F(TraceEventTestFixture, ThreadOnceBlocking) {
+  BeginTrace();
+
+  Thread thread("1");
+  WaitableEvent task_complete_event(WaitableEvent::ResetPolicy::AUTOMATIC,
+                                    WaitableEvent::InitialState::NOT_SIGNALED);
+  thread.Start();
+
+  thread.task_runner()->PostTask(
+      FROM_HERE, BindOnce(&TraceWithAllMacroVariants, &task_complete_event));
+  task_complete_event.Wait();
+
+  WaitableEvent task_start_event(WaitableEvent::ResetPolicy::AUTOMATIC,
+                                 WaitableEvent::InitialState::NOT_SIGNALED);
+  WaitableEvent task_stop_event(WaitableEvent::ResetPolicy::AUTOMATIC,
+                                WaitableEvent::InitialState::NOT_SIGNALED);
+  thread.task_runner()->PostTask(
+      FROM_HERE,
+      BindOnce(&BlockUntilStopped, &task_start_event, &task_stop_event));
+  task_start_event.Wait();
+
+  // The thread will timeout in this flush.
+  EndTraceAndFlushInThreadWithMessageLoop();
+  Clear();
+
+  // Let the thread's message loop continue to spin.
+  task_stop_event.Signal();
+
+  // The following sequence ensures that the FlushCurrentThread task has been
+  // executed in the thread before continuing.
+  thread.task_runner()->PostTask(
+      FROM_HERE,
+      BindOnce(&BlockUntilStopped, &task_start_event, &task_stop_event));
+  task_start_event.Wait();
+  task_stop_event.Signal();
+  Clear();
+
+  // TraceLog should discover the generation mismatch and recover the thread
+  // local buffer for the thread without any error.
+  BeginTrace();
+  thread.task_runner()->PostTask(
+      FROM_HERE, BindOnce(&TraceWithAllMacroVariants, &task_complete_event));
+  task_complete_event.Wait();
+  EndTraceAndFlushInThreadWithMessageLoop();
+  ValidateAllTraceMacrosCreatedData(trace_parsed_);
+}
+
+std::string* g_log_buffer = nullptr;
+bool MockLogMessageHandler(int, const char*, int, size_t,
+                           const std::string& str) {
+  if (!g_log_buffer)
+    g_log_buffer = new std::string();
+  g_log_buffer->append(str);
+  return false;
+}
+
+TEST_F(TraceEventTestFixture, EchoToConsole) {
+  logging::LogMessageHandlerFunction old_log_message_handler =
+      logging::GetLogMessageHandler();
+  logging::SetLogMessageHandler(MockLogMessageHandler);
+
+  TraceLog::GetInstance()->SetEnabled(
+      TraceConfig(kRecordAllCategoryFilter, ECHO_TO_CONSOLE),
+      TraceLog::RECORDING_MODE);
+  TRACE_EVENT_BEGIN0("a", "begin_end");
+  {
+    TRACE_EVENT0("b", "duration");
+    TRACE_EVENT0("b1", "duration1");
+  }
+  TRACE_EVENT_INSTANT0("c", "instant", TRACE_EVENT_SCOPE_GLOBAL);
+  TRACE_EVENT_END0("a", "begin_end");
+
+  EXPECT_NE(std::string::npos, g_log_buffer->find("begin_end[a]\x1b"));
+  EXPECT_NE(std::string::npos, g_log_buffer->find("| duration[b]\x1b"));
+  EXPECT_NE(std::string::npos, g_log_buffer->find("| | duration1[b1]\x1b"));
+  EXPECT_NE(std::string::npos, g_log_buffer->find("| | duration1[b1] ("));
+  EXPECT_NE(std::string::npos, g_log_buffer->find("| duration[b] ("));
+  EXPECT_NE(std::string::npos, g_log_buffer->find("| instant[c]\x1b"));
+  EXPECT_NE(std::string::npos, g_log_buffer->find("begin_end[a] ("));
+
+  EndTraceAndFlush();
+  delete g_log_buffer;
+  logging::SetLogMessageHandler(old_log_message_handler);
+  g_log_buffer = nullptr;
+}
+
+bool LogMessageHandlerWithTraceEvent(int, const char*, int, size_t,
+                                     const std::string&) {
+  TRACE_EVENT0("log", "trace_event");
+  return false;
+}
+
+TEST_F(TraceEventTestFixture, EchoToConsoleTraceEventRecursion) {
+  logging::LogMessageHandlerFunction old_log_message_handler =
+      logging::GetLogMessageHandler();
+  logging::SetLogMessageHandler(LogMessageHandlerWithTraceEvent);
+
+  TraceLog::GetInstance()->SetEnabled(
+      TraceConfig(kRecordAllCategoryFilter, ECHO_TO_CONSOLE),
+      TraceLog::RECORDING_MODE);
+  {
+    // This should not cause deadlock or infinite recursion.
+    TRACE_EVENT0("b", "duration");
+  }
+
+  EndTraceAndFlush();
+  logging::SetLogMessageHandler(old_log_message_handler);
+}
+
+TEST_F(TraceEventTestFixture, TimeOffset) {
+  BeginTrace();
+  // Let TraceLog timer start from 0.
+  TimeDelta time_offset = TimeTicks::Now() - TimeTicks();
+  TraceLog::GetInstance()->SetTimeOffset(time_offset);
+
+  {
+    TRACE_EVENT0("all", "duration1");
+    TRACE_EVENT0("all", "duration2");
+  }
+  TRACE_EVENT_BEGIN_WITH_ID_TID_AND_TIMESTAMP0(
+      "all", "with_timestamp", 0, 0, TimeTicks::Now());
+  TRACE_EVENT_END_WITH_ID_TID_AND_TIMESTAMP0(
+      "all", "with_timestamp", 0, 0, TimeTicks::Now());
+
+  EndTraceAndFlush();
+  DropTracedMetadataRecords();
+
+  double end_time = static_cast<double>(
+      (TimeTicks::Now() - time_offset).ToInternalValue());
+  double last_timestamp = 0;
+  for (size_t i = 0; i < trace_parsed_.GetSize(); ++i) {
+    const DictionaryValue* item;
+    EXPECT_TRUE(trace_parsed_.GetDictionary(i, &item));
+    double timestamp;
+    EXPECT_TRUE(item->GetDouble("ts", &timestamp));
+    EXPECT_GE(timestamp, last_timestamp);
+    EXPECT_LE(timestamp, end_time);
+    last_timestamp = timestamp;
+  }
+}
+
+TEST_F(TraceEventTestFixture, TraceFilteringMode) {
+  const char config_json[] =
+      "{"
+      "  \"event_filters\": ["
+      "     {"
+      "       \"filter_predicate\": \"testing_predicate\", "
+      "       \"included_categories\": [\"*\"]"
+      "     }"
+      "  ]"
+      "}";
+
+  // Run RECORDING_MODE within FILTERING_MODE:
+  TestEventFilter::HitsCounter filter_hits_counter;
+  TestEventFilter::set_filter_return_value(true);
+  TraceLog::GetInstance()->SetFilterFactoryForTesting(TestEventFilter::Factory);
+
+  // Only filtering mode is enabled with test filters.
+  TraceLog::GetInstance()->SetEnabled(TraceConfig(config_json),
+                                      TraceLog::FILTERING_MODE);
+  EXPECT_EQ(TraceLog::FILTERING_MODE, TraceLog::GetInstance()->enabled_modes());
+  {
+    void* ptr = this;
+    TRACE_EVENT0("c0", "name0");
+    TRACE_EVENT_ASYNC_BEGIN0("c1", "name1", ptr);
+    TRACE_EVENT_INSTANT0("c0", "name0", TRACE_EVENT_SCOPE_THREAD);
+    TRACE_EVENT_ASYNC_END0("c1", "name1", ptr);
+  }
+
+  // Recording mode is enabled when filtering mode is turned on.
+  TraceLog::GetInstance()->SetEnabled(TraceConfig("", ""),
+                                      TraceLog::RECORDING_MODE);
+  EXPECT_EQ(TraceLog::RECORDING_MODE | TraceLog::FILTERING_MODE,
+            TraceLog::GetInstance()->enabled_modes());
+  {
+    TRACE_EVENT0("c2", "name2");
+  }
+  // Only recording mode is disabled and filtering mode will continue to run.
+  TraceLog::GetInstance()->SetDisabled(TraceLog::RECORDING_MODE);
+  EXPECT_EQ(TraceLog::FILTERING_MODE, TraceLog::GetInstance()->enabled_modes());
+
+  {
+    TRACE_EVENT0("c0", "name0");
+  }
+  // Filtering mode is disabled and no tracing mode should be enabled.
+  TraceLog::GetInstance()->SetDisabled(TraceLog::FILTERING_MODE);
+  EXPECT_EQ(0, TraceLog::GetInstance()->enabled_modes());
+
+  EndTraceAndFlush();
+  EXPECT_FALSE(FindMatchingValue("cat", "c0"));
+  EXPECT_FALSE(FindMatchingValue("cat", "c1"));
+  EXPECT_FALSE(FindMatchingValue("name", "name0"));
+  EXPECT_FALSE(FindMatchingValue("name", "name1"));
+  EXPECT_TRUE(FindMatchingValue("cat", "c2"));
+  EXPECT_TRUE(FindMatchingValue("name", "name2"));
+  EXPECT_EQ(6u, filter_hits_counter.filter_trace_event_hit_count);
+  EXPECT_EQ(3u, filter_hits_counter.end_event_hit_count);
+  Clear();
+  filter_hits_counter.Reset();
+
+  // Run FILTERING_MODE within RECORDING_MODE:
+  // Only recording mode is enabled and all events must be recorded.
+  TraceLog::GetInstance()->SetEnabled(TraceConfig("", ""),
+                                      TraceLog::RECORDING_MODE);
+  EXPECT_EQ(TraceLog::RECORDING_MODE, TraceLog::GetInstance()->enabled_modes());
+  {
+    TRACE_EVENT0("c0", "name0");
+  }
+
+  // Filtering mode is also enabled and all events must be filtered-out.
+  TestEventFilter::set_filter_return_value(false);
+  TraceLog::GetInstance()->SetEnabled(TraceConfig(config_json),
+                                      TraceLog::FILTERING_MODE);
+  EXPECT_EQ(TraceLog::RECORDING_MODE | TraceLog::FILTERING_MODE,
+            TraceLog::GetInstance()->enabled_modes());
+  {
+    TRACE_EVENT0("c1", "name1");
+  }
+  // Only filtering mode is disabled and recording mode should continue to run
+  // with all events being recorded.
+  TraceLog::GetInstance()->SetDisabled(TraceLog::FILTERING_MODE);
+  EXPECT_EQ(TraceLog::RECORDING_MODE, TraceLog::GetInstance()->enabled_modes());
+
+  {
+    TRACE_EVENT0("c2", "name2");
+  }
+  // Recording mode is disabled and no tracing mode should be enabled.
+  TraceLog::GetInstance()->SetDisabled(TraceLog::RECORDING_MODE);
+  EXPECT_EQ(0, TraceLog::GetInstance()->enabled_modes());
+
+  EndTraceAndFlush();
+  EXPECT_TRUE(FindMatchingValue("cat", "c0"));
+  EXPECT_TRUE(FindMatchingValue("cat", "c2"));
+  EXPECT_TRUE(FindMatchingValue("name", "name0"));
+  EXPECT_TRUE(FindMatchingValue("name", "name2"));
+  EXPECT_FALSE(FindMatchingValue("cat", "c1"));
+  EXPECT_FALSE(FindMatchingValue("name", "name1"));
+  EXPECT_EQ(1u, filter_hits_counter.filter_trace_event_hit_count);
+  EXPECT_EQ(1u, filter_hits_counter.end_event_hit_count);
+  Clear();
+}
+
+TEST_F(TraceEventTestFixture, EventFiltering) {
+  const char config_json[] =
+      "{"
+      "  \"included_categories\": ["
+      "    \"filtered_cat\","
+      "    \"unfiltered_cat\","
+      "    \"" TRACE_DISABLED_BY_DEFAULT("filtered_cat") "\","
+      "    \"" TRACE_DISABLED_BY_DEFAULT("unfiltered_cat") "\"],"
+      "  \"event_filters\": ["
+      "     {"
+      "       \"filter_predicate\": \"testing_predicate\", "
+      "       \"included_categories\": ["
+      "         \"filtered_cat\","
+      "         \"" TRACE_DISABLED_BY_DEFAULT("filtered_cat") "\"]"
+      "     }"
+      "    "
+      "  ]"
+      "}";
+
+  TestEventFilter::HitsCounter filter_hits_counter;
+  TestEventFilter::set_filter_return_value(true);
+  TraceLog::GetInstance()->SetFilterFactoryForTesting(TestEventFilter::Factory);
+
+  TraceConfig trace_config(config_json);
+  TraceLog::GetInstance()->SetEnabled(
+      trace_config, TraceLog::RECORDING_MODE | TraceLog::FILTERING_MODE);
+  ASSERT_TRUE(TraceLog::GetInstance()->IsEnabled());
+
+  TRACE_EVENT0("filtered_cat", "a snake");
+  TRACE_EVENT0("filtered_cat", "a mushroom");
+  TRACE_EVENT0("unfiltered_cat", "a horse");
+
+  TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("filtered_cat"), "a dog");
+  TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("unfiltered_cat"), "a pony");
+
+  // This is scoped so we can test the end event being filtered.
+  { TRACE_EVENT0("filtered_cat", "another cat whoa"); }
+
+  EndTraceAndFlush();
+
+  EXPECT_EQ(4u, filter_hits_counter.filter_trace_event_hit_count);
+  EXPECT_EQ(1u, filter_hits_counter.end_event_hit_count);
+}
+
+TEST_F(TraceEventTestFixture, EventWhitelistFiltering) {
+  std::string config_json = StringPrintf(
+      "{"
+      "  \"included_categories\": ["
+      "    \"filtered_cat\","
+      "    \"unfiltered_cat\","
+      "    \"" TRACE_DISABLED_BY_DEFAULT("filtered_cat") "\"],"
+      "  \"event_filters\": ["
+      "     {"
+      "       \"filter_predicate\": \"%s\", "
+      "       \"included_categories\": ["
+      "         \"filtered_cat\","
+      "         \"" TRACE_DISABLED_BY_DEFAULT("*") "\"], "
+      "       \"filter_args\": {"
+      "           \"event_name_whitelist\": [\"a snake\", \"a dog\"]"
+      "         }"
+      "     }"
+      "    "
+      "  ]"
+      "}",
+      EventNameFilter::kName);
+
+  TraceConfig trace_config(config_json);
+  TraceLog::GetInstance()->SetEnabled(
+      trace_config, TraceLog::RECORDING_MODE | TraceLog::FILTERING_MODE);
+  EXPECT_TRUE(TraceLog::GetInstance()->IsEnabled());
+
+  TRACE_EVENT0("filtered_cat", "a snake");
+  TRACE_EVENT0("filtered_cat", "a mushroom");
+  TRACE_EVENT0("unfiltered_cat", "a cat");
+  TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("filtered_cat"), "a dog");
+  TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("filtered_cat"), "a pony");
+
+  EndTraceAndFlush();
+
+  EXPECT_TRUE(FindMatchingValue("name", "a snake"));
+  EXPECT_FALSE(FindMatchingValue("name", "a mushroom"));
+  EXPECT_TRUE(FindMatchingValue("name", "a cat"));
+  EXPECT_TRUE(FindMatchingValue("name", "a dog"));
+  EXPECT_FALSE(FindMatchingValue("name", "a pony"));
+}
+
+TEST_F(TraceEventTestFixture, HeapProfilerFiltering) {
+  std::string config_json = StringPrintf(
+      "{"
+      "  \"included_categories\": ["
+      "    \"filtered_cat\","
+      "    \"unfiltered_cat\","
+      "    \"" TRACE_DISABLED_BY_DEFAULT("filtered_cat") "\","
+      "    \"" TRACE_DISABLED_BY_DEFAULT("unfiltered_cat") "\"],"
+      "  \"excluded_categories\": [\"excluded_cat\"],"
+      "  \"event_filters\": ["
+      "     {"
+      "       \"filter_predicate\": \"%s\", "
+      "       \"included_categories\": ["
+      "         \"*\","
+      "         \"" TRACE_DISABLED_BY_DEFAULT("filtered_cat") "\"]"
+      "     }"
+      "  ]"
+      "}",
+      HeapProfilerEventFilter::kName);
+
+  TraceConfig trace_config(config_json);
+  TraceLog::GetInstance()->SetEnabled(
+      trace_config, TraceLog::RECORDING_MODE | TraceLog::FILTERING_MODE);
+  EXPECT_TRUE(TraceLog::GetInstance()->IsEnabled());
+
+  TRACE_EVENT0("filtered_cat", "a snake");
+  TRACE_EVENT0("excluded_cat", "a mushroom");
+  TRACE_EVENT0("unfiltered_cat", "a cat");
+  TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("filtered_cat"), "a dog");
+  TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("unfiltered_cat"), "a pony");
+
+  EndTraceAndFlush();
+
+  // The predicate should not change behavior of the trace events.
+  EXPECT_TRUE(FindMatchingValue("name", "a snake"));
+  EXPECT_FALSE(FindMatchingValue("name", "a mushroom"));
+  EXPECT_TRUE(FindMatchingValue("name", "a cat"));
+  EXPECT_TRUE(FindMatchingValue("name", "a dog"));
+  EXPECT_TRUE(FindMatchingValue("name", "a pony"));
+}
+
+TEST_F(TraceEventTestFixture, ClockSyncEventsAreAlwaysAddedToTrace) {
+  BeginSpecificTrace("-*");
+  TRACE_EVENT_CLOCK_SYNC_RECEIVER(1);
+  EndTraceAndFlush();
+  EXPECT_TRUE(FindNamePhase("clock_sync", "c"));
+}
+
+}  // namespace trace_event
+}  // namespace base
diff --git a/base/trace_event/trace_log.cc b/base/trace_event/trace_log.cc
new file mode 100644
index 0000000..338e0f6
--- /dev/null
+++ b/base/trace_event/trace_log.cc
@@ -0,0 +1,1784 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/trace_event/trace_log.h"
+
+#include <algorithm>
+#include <cmath>
+#include <memory>
+#include <utility>
+
+#include "base/base_switches.h"
+#include "base/bind.h"
+#include "base/command_line.h"
+#include "base/debug/leak_annotations.h"
+#include "base/location.h"
+#include "base/macros.h"
+#include "base/memory/ptr_util.h"
+#include "base/memory/ref_counted_memory.h"
+#include "base/message_loop/message_loop.h"
+#include "base/message_loop/message_loop_current.h"
+#include "base/no_destructor.h"
+#include "base/process/process_info.h"
+#include "base/process/process_metrics.h"
+#include "base/stl_util.h"
+#include "base/strings/string_piece.h"
+#include "base/strings/string_split.h"
+#include "base/strings/string_tokenizer.h"
+#include "base/strings/stringprintf.h"
+#include "base/sys_info.h"
+#include "base/task_scheduler/post_task.h"
+#include "base/threading/platform_thread.h"
+#include "base/threading/thread_id_name_manager.h"
+#include "base/threading/thread_task_runner_handle.h"
+#include "base/time/time.h"
+#include "base/trace_event/category_registry.h"
+#include "base/trace_event/event_name_filter.h"
+#include "base/trace_event/heap_profiler.h"
+#include "base/trace_event/heap_profiler_allocation_context_tracker.h"
+#include "base/trace_event/heap_profiler_event_filter.h"
+#include "base/trace_event/memory_dump_manager.h"
+#include "base/trace_event/memory_dump_provider.h"
+#include "base/trace_event/process_memory_dump.h"
+#include "base/trace_event/trace_buffer.h"
+#include "base/trace_event/trace_event.h"
+#include "build/build_config.h"
+
+#if defined(OS_WIN)
+#include "base/trace_event/trace_event_etw_export_win.h"
+#endif
+
+#if defined(OS_ANDROID)
+// The linker assigns the virtual address of the start of current library to
+// this symbol.
+extern char __executable_start;
+#endif
+
+namespace base {
+namespace trace_event {
+
+namespace {
+
+// Controls the number of trace events we will buffer in-memory
+// before throwing them away.
+const size_t kTraceBufferChunkSize = TraceBufferChunk::kTraceBufferChunkSize;
+
+const size_t kTraceEventVectorBigBufferChunks =
+    512000000 / kTraceBufferChunkSize;
+static_assert(
+    kTraceEventVectorBigBufferChunks <= TraceBufferChunk::kMaxChunkIndex,
+    "Too many big buffer chunks");
+const size_t kTraceEventVectorBufferChunks = 256000 / kTraceBufferChunkSize;
+static_assert(
+    kTraceEventVectorBufferChunks <= TraceBufferChunk::kMaxChunkIndex,
+    "Too many vector buffer chunks");
+const size_t kTraceEventRingBufferChunks = kTraceEventVectorBufferChunks / 4;
+
+// ECHO_TO_CONSOLE needs a small buffer to hold the unfinished COMPLETE events.
+const size_t kEchoToConsoleTraceEventBufferChunks = 256;
+
+const size_t kTraceEventBufferSizeInBytes = 100 * 1024;
+const int kThreadFlushTimeoutMs = 3000;
+
+TraceLog* g_trace_log_for_testing = nullptr;
+
+#define MAX_TRACE_EVENT_FILTERS 32
+
+// List of TraceEventFilter objects from the most recent tracing session.
+std::vector<std::unique_ptr<TraceEventFilter>>& GetCategoryGroupFilters() {
+  static auto* filters = new std::vector<std::unique_ptr<TraceEventFilter>>();
+  return *filters;
+}
+
+ThreadTicks ThreadNow() {
+  return ThreadTicks::IsSupported()
+             ? base::subtle::ThreadTicksNowIgnoringOverride()
+             : ThreadTicks();
+}
+
+template <typename T>
+void InitializeMetadataEvent(TraceEvent* trace_event,
+                             int thread_id,
+                             const char* metadata_name,
+                             const char* arg_name,
+                             const T& value) {
+  if (!trace_event)
+    return;
+
+  int num_args = 1;
+  unsigned char arg_type;
+  unsigned long long arg_value;
+  ::trace_event_internal::SetTraceValue(value, &arg_type, &arg_value);
+  trace_event->Initialize(
+      thread_id,
+      TimeTicks(),
+      ThreadTicks(),
+      TRACE_EVENT_PHASE_METADATA,
+      CategoryRegistry::kCategoryMetadata->state_ptr(),
+      metadata_name,
+      trace_event_internal::kGlobalScope,  // scope
+      trace_event_internal::kNoId,  // id
+      trace_event_internal::kNoId,  // bind_id
+      num_args,
+      &arg_name,
+      &arg_type,
+      &arg_value,
+      nullptr,
+      TRACE_EVENT_FLAG_NONE);
+}
+
+class AutoThreadLocalBoolean {
+ public:
+  explicit AutoThreadLocalBoolean(ThreadLocalBoolean* thread_local_boolean)
+      : thread_local_boolean_(thread_local_boolean) {
+    DCHECK(!thread_local_boolean_->Get());
+    thread_local_boolean_->Set(true);
+  }
+  ~AutoThreadLocalBoolean() { thread_local_boolean_->Set(false); }
+
+ private:
+  ThreadLocalBoolean* thread_local_boolean_;
+  DISALLOW_COPY_AND_ASSIGN(AutoThreadLocalBoolean);
+};
+
+// Use this function instead of TraceEventHandle constructor to keep the
+// overhead of ScopedTracer (trace_event.h) constructor minimum.
+void MakeHandle(uint32_t chunk_seq,
+                size_t chunk_index,
+                size_t event_index,
+                TraceEventHandle* handle) {
+  DCHECK(chunk_seq);
+  DCHECK(chunk_index <= TraceBufferChunk::kMaxChunkIndex);
+  DCHECK(event_index < TraceBufferChunk::kTraceBufferChunkSize);
+  DCHECK(chunk_index <= std::numeric_limits<uint16_t>::max());
+  handle->chunk_seq = chunk_seq;
+  handle->chunk_index = static_cast<uint16_t>(chunk_index);
+  handle->event_index = static_cast<uint16_t>(event_index);
+}
+
+template <typename Function>
+void ForEachCategoryFilter(const unsigned char* category_group_enabled,
+                           Function filter_fn) {
+  const TraceCategory* category =
+      CategoryRegistry::GetCategoryByStatePtr(category_group_enabled);
+  uint32_t filter_bitmap = category->enabled_filters();
+  for (int index = 0; filter_bitmap != 0; filter_bitmap >>= 1, index++) {
+    if (filter_bitmap & 1 && GetCategoryGroupFilters()[index])
+      filter_fn(GetCategoryGroupFilters()[index].get());
+  }
+}
+
+}  // namespace
+
+// A helper class that allows the lock to be acquired in the middle of the scope
+// and unlocks at the end of scope if locked.
+class TraceLog::OptionalAutoLock {
+ public:
+  explicit OptionalAutoLock(Lock* lock) : lock_(lock), locked_(false) {}
+
+  ~OptionalAutoLock() {
+    if (locked_)
+      lock_->Release();
+  }
+
+  void EnsureAcquired() {
+    if (!locked_) {
+      lock_->Acquire();
+      locked_ = true;
+    }
+  }
+
+ private:
+  Lock* lock_;
+  bool locked_;
+  DISALLOW_COPY_AND_ASSIGN(OptionalAutoLock);
+};
+
+class TraceLog::ThreadLocalEventBuffer
+    : public MessageLoopCurrent::DestructionObserver,
+      public MemoryDumpProvider {
+ public:
+  explicit ThreadLocalEventBuffer(TraceLog* trace_log);
+  ~ThreadLocalEventBuffer() override;
+
+  TraceEvent* AddTraceEvent(TraceEventHandle* handle);
+
+  TraceEvent* GetEventByHandle(TraceEventHandle handle) {
+    if (!chunk_ || handle.chunk_seq != chunk_->seq() ||
+        handle.chunk_index != chunk_index_) {
+      return nullptr;
+    }
+
+    return chunk_->GetEventAt(handle.event_index);
+  }
+
+  int generation() const { return generation_; }
+
+ private:
+  // MessageLoopCurrent::DestructionObserver
+  void WillDestroyCurrentMessageLoop() override;
+
+  // MemoryDumpProvider implementation.
+  bool OnMemoryDump(const MemoryDumpArgs& args,
+                    ProcessMemoryDump* pmd) override;
+
+  void FlushWhileLocked();
+
+  void CheckThisIsCurrentBuffer() const {
+    DCHECK(trace_log_->thread_local_event_buffer_.Get() == this);
+  }
+
+  // Since TraceLog is a leaky singleton, trace_log_ will always be valid
+  // as long as the thread exists.
+  TraceLog* trace_log_;
+  std::unique_ptr<TraceBufferChunk> chunk_;
+  size_t chunk_index_;
+  int generation_;
+
+  DISALLOW_COPY_AND_ASSIGN(ThreadLocalEventBuffer);
+};
+
+TraceLog::ThreadLocalEventBuffer::ThreadLocalEventBuffer(TraceLog* trace_log)
+    : trace_log_(trace_log),
+      chunk_index_(0),
+      generation_(trace_log->generation()) {
+  // ThreadLocalEventBuffer is created only if the thread has a message loop, so
+  // the following message_loop won't be NULL.
+  MessageLoop* message_loop = MessageLoop::current();
+  message_loop->AddDestructionObserver(this);
+
+  // This is to report the local memory usage when memory-infra is enabled.
+  MemoryDumpManager::GetInstance()->RegisterDumpProvider(
+      this, "ThreadLocalEventBuffer", ThreadTaskRunnerHandle::Get());
+
+  AutoLock lock(trace_log->lock_);
+  trace_log->thread_message_loops_.insert(message_loop);
+}
+
+TraceLog::ThreadLocalEventBuffer::~ThreadLocalEventBuffer() {
+  CheckThisIsCurrentBuffer();
+  MessageLoop::current()->RemoveDestructionObserver(this);
+  MemoryDumpManager::GetInstance()->UnregisterDumpProvider(this);
+
+  {
+    AutoLock lock(trace_log_->lock_);
+    FlushWhileLocked();
+    trace_log_->thread_message_loops_.erase(MessageLoop::current());
+  }
+  trace_log_->thread_local_event_buffer_.Set(nullptr);
+}
+
+TraceEvent* TraceLog::ThreadLocalEventBuffer::AddTraceEvent(
+    TraceEventHandle* handle) {
+  CheckThisIsCurrentBuffer();
+
+  if (chunk_ && chunk_->IsFull()) {
+    AutoLock lock(trace_log_->lock_);
+    FlushWhileLocked();
+    chunk_.reset();
+  }
+  if (!chunk_) {
+    AutoLock lock(trace_log_->lock_);
+    chunk_ = trace_log_->logged_events_->GetChunk(&chunk_index_);
+    trace_log_->CheckIfBufferIsFullWhileLocked();
+  }
+  if (!chunk_)
+    return nullptr;
+
+  size_t event_index;
+  TraceEvent* trace_event = chunk_->AddTraceEvent(&event_index);
+  if (trace_event && handle)
+    MakeHandle(chunk_->seq(), chunk_index_, event_index, handle);
+
+  return trace_event;
+}
+
+void TraceLog::ThreadLocalEventBuffer::WillDestroyCurrentMessageLoop() {
+  delete this;
+}
+
+bool TraceLog::ThreadLocalEventBuffer::OnMemoryDump(const MemoryDumpArgs& args,
+                                                    ProcessMemoryDump* pmd) {
+  if (!chunk_)
+    return true;
+  std::string dump_base_name = StringPrintf(
+      "tracing/thread_%d", static_cast<int>(PlatformThread::CurrentId()));
+  TraceEventMemoryOverhead overhead;
+  chunk_->EstimateTraceMemoryOverhead(&overhead);
+  overhead.DumpInto(dump_base_name.c_str(), pmd);
+  return true;
+}
+
+void TraceLog::ThreadLocalEventBuffer::FlushWhileLocked() {
+  if (!chunk_)
+    return;
+
+  trace_log_->lock_.AssertAcquired();
+  if (trace_log_->CheckGeneration(generation_)) {
+    // Return the chunk to the buffer only if the generation matches.
+    trace_log_->logged_events_->ReturnChunk(chunk_index_, std::move(chunk_));
+  }
+  // Otherwise this method may be called from the destructor, or TraceLog will
+  // find the generation mismatch and delete this buffer soon.
+}
+
+void TraceLog::SetAddTraceEventOverride(
+    const AddTraceEventOverrideCallback& override) {
+  subtle::NoBarrier_Store(&trace_event_override_,
+                          reinterpret_cast<subtle::AtomicWord>(override));
+}
+
+struct TraceLog::RegisteredAsyncObserver {
+  explicit RegisteredAsyncObserver(WeakPtr<AsyncEnabledStateObserver> observer)
+      : observer(observer), task_runner(ThreadTaskRunnerHandle::Get()) {}
+  ~RegisteredAsyncObserver() = default;
+
+  WeakPtr<AsyncEnabledStateObserver> observer;
+  scoped_refptr<SequencedTaskRunner> task_runner;
+};
+
+TraceLogStatus::TraceLogStatus() : event_capacity(0), event_count(0) {}
+
+TraceLogStatus::~TraceLogStatus() = default;
+
+// static
+TraceLog* TraceLog::GetInstance() {
+  static base::NoDestructor<TraceLog> instance;
+  return instance.get();
+}
+
+// static
+void TraceLog::ResetForTesting() {
+  if (!g_trace_log_for_testing)
+    return;
+  CategoryRegistry::ResetForTesting();
+  g_trace_log_for_testing->~TraceLog();
+  new (g_trace_log_for_testing) TraceLog;
+}
+
+TraceLog::TraceLog()
+    : enabled_modes_(0),
+      num_traces_recorded_(0),
+      dispatching_to_observer_list_(false),
+      process_sort_index_(0),
+      process_id_hash_(0),
+      process_id_(0),
+      trace_options_(kInternalRecordUntilFull),
+      trace_config_(TraceConfig()),
+      thread_shared_chunk_index_(0),
+      generation_(0),
+      use_worker_thread_(false),
+      trace_event_override_(0),
+      filter_factory_for_testing_(nullptr) {
+  CategoryRegistry::Initialize();
+
+#if defined(OS_NACL)  // NaCl shouldn't expose the process id.
+  SetProcessID(0);
+#else
+  SetProcessID(static_cast<int>(GetCurrentProcId()));
+#endif
+
+// Linux renderer processes and Android O processes are not allowed to read
+// "proc/stat" file, crbug.com/788870.
+#if defined(OS_WIN) || (defined(OS_MACOSX) && !defined(OS_IOS))
+  process_creation_time_ = CurrentProcessInfo::CreationTime();
+#else
+  // Use approximate time when creation time is not available.
+  process_creation_time_ = TRACE_TIME_NOW();
+#endif
+
+  logged_events_.reset(CreateTraceBuffer());
+
+  MemoryDumpManager::GetInstance()->RegisterDumpProvider(this, "TraceLog",
+                                                         nullptr);
+  g_trace_log_for_testing = this;
+}
+
+TraceLog::~TraceLog() = default;
+
+void TraceLog::InitializeThreadLocalEventBufferIfSupported() {
+  // A ThreadLocalEventBuffer needs the message loop
+  // - to know when the thread exits;
+  // - to handle the final flush.
+  // For a thread without a message loop or the message loop may be blocked, the
+  // trace events will be added into the main buffer directly.
+  if (thread_blocks_message_loop_.Get() || !MessageLoopCurrent::IsSet())
+    return;
+  HEAP_PROFILER_SCOPED_IGNORE;
+  auto* thread_local_event_buffer = thread_local_event_buffer_.Get();
+  if (thread_local_event_buffer &&
+      !CheckGeneration(thread_local_event_buffer->generation())) {
+    delete thread_local_event_buffer;
+    thread_local_event_buffer = nullptr;
+  }
+  if (!thread_local_event_buffer) {
+    thread_local_event_buffer = new ThreadLocalEventBuffer(this);
+    thread_local_event_buffer_.Set(thread_local_event_buffer);
+  }
+}
+
+bool TraceLog::OnMemoryDump(const MemoryDumpArgs& args,
+                            ProcessMemoryDump* pmd) {
+  // TODO(ssid): Use MemoryDumpArgs to create light dumps when requested
+  // (crbug.com/499731).
+  TraceEventMemoryOverhead overhead;
+  overhead.Add(TraceEventMemoryOverhead::kOther, sizeof(*this));
+  {
+    AutoLock lock(lock_);
+    if (logged_events_)
+      logged_events_->EstimateTraceMemoryOverhead(&overhead);
+
+    for (auto& metadata_event : metadata_events_)
+      metadata_event->EstimateTraceMemoryOverhead(&overhead);
+  }
+  overhead.AddSelf();
+  overhead.DumpInto("tracing/main_trace_log", pmd);
+  return true;
+}
+
+const unsigned char* TraceLog::GetCategoryGroupEnabled(
+    const char* category_group) {
+  TraceLog* tracelog = GetInstance();
+  if (!tracelog) {
+    DCHECK(!CategoryRegistry::kCategoryAlreadyShutdown->is_enabled());
+    return CategoryRegistry::kCategoryAlreadyShutdown->state_ptr();
+  }
+  TraceCategory* category = CategoryRegistry::GetCategoryByName(category_group);
+  if (!category) {
+    // Slow path: in the case of a new category we have to repeat the check
+    // holding the lock, as multiple threads might have reached this point
+    // at the same time.
+    auto category_initializer = [](TraceCategory* category) {
+      TraceLog::GetInstance()->UpdateCategoryState(category);
+    };
+    AutoLock lock(tracelog->lock_);
+    CategoryRegistry::GetOrCreateCategoryLocked(
+        category_group, category_initializer, &category);
+  }
+  DCHECK(category->state_ptr());
+  return category->state_ptr();
+}
+
+const char* TraceLog::GetCategoryGroupName(
+    const unsigned char* category_group_enabled) {
+  return CategoryRegistry::GetCategoryByStatePtr(category_group_enabled)
+      ->name();
+}
+
+void TraceLog::UpdateCategoryState(TraceCategory* category) {
+  lock_.AssertAcquired();
+  DCHECK(category->is_valid());
+  unsigned char state_flags = 0;
+  if (enabled_modes_ & RECORDING_MODE &&
+      trace_config_.IsCategoryGroupEnabled(category->name())) {
+    state_flags |= TraceCategory::ENABLED_FOR_RECORDING;
+  }
+
+  // TODO(primiano): this is a temporary workaround for catapult:#2341,
+  // to guarantee that metadata events are always added even if the category
+  // filter is "-*". See crbug.com/618054 for more details and long-term fix.
+  if (enabled_modes_ & RECORDING_MODE &&
+      category == CategoryRegistry::kCategoryMetadata) {
+    state_flags |= TraceCategory::ENABLED_FOR_RECORDING;
+  }
+
+#if defined(OS_WIN)
+  if (base::trace_event::TraceEventETWExport::IsCategoryGroupEnabled(
+          category->name())) {
+    state_flags |= TraceCategory::ENABLED_FOR_ETW_EXPORT;
+  }
+#endif
+
+  uint32_t enabled_filters_bitmap = 0;
+  int index = 0;
+  for (const auto& event_filter : enabled_event_filters_) {
+    if (event_filter.IsCategoryGroupEnabled(category->name())) {
+      state_flags |= TraceCategory::ENABLED_FOR_FILTERING;
+      DCHECK(GetCategoryGroupFilters()[index]);
+      enabled_filters_bitmap |= 1 << index;
+    }
+    if (index++ >= MAX_TRACE_EVENT_FILTERS) {
+      NOTREACHED();
+      break;
+    }
+  }
+  category->set_enabled_filters(enabled_filters_bitmap);
+  category->set_state(state_flags);
+}
+
+void TraceLog::UpdateCategoryRegistry() {
+  lock_.AssertAcquired();
+  CreateFiltersForTraceConfig();
+  for (TraceCategory& category : CategoryRegistry::GetAllCategories()) {
+    UpdateCategoryState(&category);
+  }
+}
+
+void TraceLog::CreateFiltersForTraceConfig() {
+  if (!(enabled_modes_ & FILTERING_MODE))
+    return;
+
+  // Filters were already added and tracing could be enabled. Filters list
+  // cannot be changed when trace events are using them.
+  if (GetCategoryGroupFilters().size())
+    return;
+
+  for (auto& filter_config : enabled_event_filters_) {
+    if (GetCategoryGroupFilters().size() >= MAX_TRACE_EVENT_FILTERS) {
+      NOTREACHED()
+          << "Too many trace event filters installed in the current session";
+      break;
+    }
+
+    std::unique_ptr<TraceEventFilter> new_filter;
+    const std::string& predicate_name = filter_config.predicate_name();
+    if (predicate_name == EventNameFilter::kName) {
+      auto whitelist = std::make_unique<std::unordered_set<std::string>>();
+      CHECK(filter_config.GetArgAsSet("event_name_whitelist", &*whitelist));
+      new_filter = std::make_unique<EventNameFilter>(std::move(whitelist));
+    } else if (predicate_name == HeapProfilerEventFilter::kName) {
+      new_filter = std::make_unique<HeapProfilerEventFilter>();
+    } else {
+      if (filter_factory_for_testing_)
+        new_filter = filter_factory_for_testing_(predicate_name);
+      CHECK(new_filter) << "Unknown trace filter " << predicate_name;
+    }
+    GetCategoryGroupFilters().push_back(std::move(new_filter));
+  }
+}
+
+void TraceLog::GetKnownCategoryGroups(
+    std::vector<std::string>* category_groups) {
+  for (const auto& category : CategoryRegistry::GetAllCategories()) {
+    if (!CategoryRegistry::IsBuiltinCategory(&category))
+      category_groups->push_back(category.name());
+  }
+}
+
+void TraceLog::SetEnabled(const TraceConfig& trace_config,
+                          uint8_t modes_to_enable) {
+  std::vector<EnabledStateObserver*> observer_list;
+  std::map<AsyncEnabledStateObserver*, RegisteredAsyncObserver> observer_map;
+  {
+    AutoLock lock(lock_);
+
+    // Can't enable tracing when Flush() is in progress.
+    DCHECK(!flush_task_runner_);
+
+    InternalTraceOptions new_options =
+        GetInternalOptionsFromTraceConfig(trace_config);
+
+    InternalTraceOptions old_options = trace_options();
+
+    if (dispatching_to_observer_list_) {
+      // TODO(ssid): Change to NOTREACHED after fixing crbug.com/625170.
+      DLOG(ERROR)
+          << "Cannot manipulate TraceLog::Enabled state from an observer.";
+      return;
+    }
+
+    // Clear all filters from previous tracing session. These filters are not
+    // cleared at the end of tracing because some threads which hit trace event
+    // when disabling, could try to use the filters.
+    if (!enabled_modes_)
+      GetCategoryGroupFilters().clear();
+
+    // Update trace config for recording.
+    const bool already_recording = enabled_modes_ & RECORDING_MODE;
+    if (modes_to_enable & RECORDING_MODE) {
+      if (already_recording) {
+        // TODO(ssid): Stop suporting enabling of RECODING_MODE when already
+        // enabled crbug.com/625170.
+        DCHECK_EQ(new_options, old_options) << "Attempting to re-enable "
+                                               "tracing with a different set "
+                                               "of options.";
+        trace_config_.Merge(trace_config);
+      } else {
+        trace_config_ = trace_config;
+      }
+    }
+
+    // Update event filters only if filtering was not enabled.
+    if (modes_to_enable & FILTERING_MODE && enabled_event_filters_.empty()) {
+      DCHECK(!trace_config.event_filters().empty());
+      enabled_event_filters_ = trace_config.event_filters();
+    }
+    // Keep the |trace_config_| updated with only enabled filters in case anyone
+    // tries to read it using |GetCurrentTraceConfig| (even if filters are
+    // empty).
+    trace_config_.SetEventFilters(enabled_event_filters_);
+
+    enabled_modes_ |= modes_to_enable;
+    UpdateCategoryRegistry();
+
+    // Do not notify observers or create trace buffer if only enabled for
+    // filtering or if recording was already enabled.
+    if (!(modes_to_enable & RECORDING_MODE) || already_recording)
+      return;
+
+    if (new_options != old_options) {
+      subtle::NoBarrier_Store(&trace_options_, new_options);
+      UseNextTraceBuffer();
+    }
+
+    num_traces_recorded_++;
+
+    UpdateCategoryRegistry();
+
+    dispatching_to_observer_list_ = true;
+    observer_list = enabled_state_observer_list_;
+    observer_map = async_observers_;
+  }
+  // Notify observers outside the lock in case they trigger trace events.
+  for (EnabledStateObserver* observer : observer_list)
+    observer->OnTraceLogEnabled();
+  for (const auto& it : observer_map) {
+    it.second.task_runner->PostTask(
+        FROM_HERE, BindOnce(&AsyncEnabledStateObserver::OnTraceLogEnabled,
+                            it.second.observer));
+  }
+
+  {
+    AutoLock lock(lock_);
+    dispatching_to_observer_list_ = false;
+  }
+}
+
+void TraceLog::SetArgumentFilterPredicate(
+    const ArgumentFilterPredicate& argument_filter_predicate) {
+  AutoLock lock(lock_);
+  DCHECK(!argument_filter_predicate.is_null());
+  DCHECK(argument_filter_predicate_.is_null());
+  argument_filter_predicate_ = argument_filter_predicate;
+}
+
+TraceLog::InternalTraceOptions TraceLog::GetInternalOptionsFromTraceConfig(
+    const TraceConfig& config) {
+  InternalTraceOptions ret = config.IsArgumentFilterEnabled()
+                                 ? kInternalEnableArgumentFilter
+                                 : kInternalNone;
+  switch (config.GetTraceRecordMode()) {
+    case RECORD_UNTIL_FULL:
+      return ret | kInternalRecordUntilFull;
+    case RECORD_CONTINUOUSLY:
+      return ret | kInternalRecordContinuously;
+    case ECHO_TO_CONSOLE:
+      return ret | kInternalEchoToConsole;
+    case RECORD_AS_MUCH_AS_POSSIBLE:
+      return ret | kInternalRecordAsMuchAsPossible;
+  }
+  NOTREACHED();
+  return kInternalNone;
+}
+
+TraceConfig TraceLog::GetCurrentTraceConfig() const {
+  AutoLock lock(lock_);
+  return trace_config_;
+}
+
+void TraceLog::SetDisabled() {
+  AutoLock lock(lock_);
+  SetDisabledWhileLocked(RECORDING_MODE);
+}
+
+void TraceLog::SetDisabled(uint8_t modes_to_disable) {
+  AutoLock lock(lock_);
+  SetDisabledWhileLocked(modes_to_disable);
+}
+
+void TraceLog::SetDisabledWhileLocked(uint8_t modes_to_disable) {
+  lock_.AssertAcquired();
+
+  if (!(enabled_modes_ & modes_to_disable))
+    return;
+
+  if (dispatching_to_observer_list_) {
+    // TODO(ssid): Change to NOTREACHED after fixing crbug.com/625170.
+    DLOG(ERROR)
+        << "Cannot manipulate TraceLog::Enabled state from an observer.";
+    return;
+  }
+
+  bool is_recording_mode_disabled =
+      (enabled_modes_ & RECORDING_MODE) && (modes_to_disable & RECORDING_MODE);
+  enabled_modes_ &= ~modes_to_disable;
+
+  if (modes_to_disable & FILTERING_MODE)
+    enabled_event_filters_.clear();
+
+  if (modes_to_disable & RECORDING_MODE)
+    trace_config_.Clear();
+
+  UpdateCategoryRegistry();
+
+  // Add metadata events and notify observers only if recording mode was
+  // disabled now.
+  if (!is_recording_mode_disabled)
+    return;
+
+  AddMetadataEventsWhileLocked();
+
+  // Remove metadata events so they will not get added to a subsequent trace.
+  metadata_events_.clear();
+
+  dispatching_to_observer_list_ = true;
+  std::vector<EnabledStateObserver*> observer_list =
+      enabled_state_observer_list_;
+  std::map<AsyncEnabledStateObserver*, RegisteredAsyncObserver> observer_map =
+      async_observers_;
+
+  {
+    // Dispatch to observers outside the lock in case the observer triggers a
+    // trace event.
+    AutoUnlock unlock(lock_);
+    for (EnabledStateObserver* observer : observer_list)
+      observer->OnTraceLogDisabled();
+    for (const auto& it : observer_map) {
+      it.second.task_runner->PostTask(
+          FROM_HERE, BindOnce(&AsyncEnabledStateObserver::OnTraceLogDisabled,
+                              it.second.observer));
+    }
+  }
+  dispatching_to_observer_list_ = false;
+}
+
+int TraceLog::GetNumTracesRecorded() {
+  AutoLock lock(lock_);
+  if (!IsEnabled())
+    return -1;
+  return num_traces_recorded_;
+}
+
+void TraceLog::AddEnabledStateObserver(EnabledStateObserver* listener) {
+  AutoLock lock(lock_);
+  enabled_state_observer_list_.push_back(listener);
+}
+
+void TraceLog::RemoveEnabledStateObserver(EnabledStateObserver* listener) {
+  AutoLock lock(lock_);
+  std::vector<EnabledStateObserver*>::iterator it =
+      std::find(enabled_state_observer_list_.begin(),
+                enabled_state_observer_list_.end(), listener);
+  if (it != enabled_state_observer_list_.end())
+    enabled_state_observer_list_.erase(it);
+}
+
+bool TraceLog::HasEnabledStateObserver(EnabledStateObserver* listener) const {
+  AutoLock lock(lock_);
+  return ContainsValue(enabled_state_observer_list_, listener);
+}
+
+TraceLogStatus TraceLog::GetStatus() const {
+  AutoLock lock(lock_);
+  TraceLogStatus result;
+  result.event_capacity = static_cast<uint32_t>(logged_events_->Capacity());
+  result.event_count = static_cast<uint32_t>(logged_events_->Size());
+  return result;
+}
+
+bool TraceLog::BufferIsFull() const {
+  AutoLock lock(lock_);
+  return logged_events_->IsFull();
+}
+
+TraceEvent* TraceLog::AddEventToThreadSharedChunkWhileLocked(
+    TraceEventHandle* handle,
+    bool check_buffer_is_full) {
+  lock_.AssertAcquired();
+
+  if (thread_shared_chunk_ && thread_shared_chunk_->IsFull()) {
+    logged_events_->ReturnChunk(thread_shared_chunk_index_,
+                                std::move(thread_shared_chunk_));
+  }
+
+  if (!thread_shared_chunk_) {
+    thread_shared_chunk_ =
+        logged_events_->GetChunk(&thread_shared_chunk_index_);
+    if (check_buffer_is_full)
+      CheckIfBufferIsFullWhileLocked();
+  }
+  if (!thread_shared_chunk_)
+    return nullptr;
+
+  size_t event_index;
+  TraceEvent* trace_event = thread_shared_chunk_->AddTraceEvent(&event_index);
+  if (trace_event && handle) {
+    MakeHandle(thread_shared_chunk_->seq(), thread_shared_chunk_index_,
+               event_index, handle);
+  }
+  return trace_event;
+}
+
+void TraceLog::CheckIfBufferIsFullWhileLocked() {
+  lock_.AssertAcquired();
+  if (logged_events_->IsFull()) {
+    if (buffer_limit_reached_timestamp_.is_null()) {
+      buffer_limit_reached_timestamp_ = OffsetNow();
+    }
+    SetDisabledWhileLocked(RECORDING_MODE);
+  }
+}
+
+// Flush() works as the following:
+// 1. Flush() is called in thread A whose task runner is saved in
+//    flush_task_runner_;
+// 2. If thread_message_loops_ is not empty, thread A posts task to each message
+//    loop to flush the thread local buffers; otherwise finish the flush;
+// 3. FlushCurrentThread() deletes the thread local event buffer:
+//    - The last batch of events of the thread are flushed into the main buffer;
+//    - The message loop will be removed from thread_message_loops_;
+//    If this is the last message loop, finish the flush;
+// 4. If any thread hasn't finish its flush in time, finish the flush.
+void TraceLog::Flush(const TraceLog::OutputCallback& cb,
+                     bool use_worker_thread) {
+  FlushInternal(cb, use_worker_thread, false);
+}
+
+void TraceLog::CancelTracing(const OutputCallback& cb) {
+  SetDisabled();
+  FlushInternal(cb, false, true);
+}
+
+void TraceLog::FlushInternal(const TraceLog::OutputCallback& cb,
+                             bool use_worker_thread,
+                             bool discard_events) {
+  use_worker_thread_ = use_worker_thread;
+  if (IsEnabled()) {
+    // Can't flush when tracing is enabled because otherwise PostTask would
+    // - generate more trace events;
+    // - deschedule the calling thread on some platforms causing inaccurate
+    //   timing of the trace events.
+    scoped_refptr<RefCountedString> empty_result = new RefCountedString;
+    if (!cb.is_null())
+      cb.Run(empty_result, false);
+    LOG(WARNING) << "Ignored TraceLog::Flush called when tracing is enabled";
+    return;
+  }
+
+  int gen = generation();
+  // Copy of thread_message_loops_ to be used without locking.
+  std::vector<scoped_refptr<SingleThreadTaskRunner>>
+      thread_message_loop_task_runners;
+  {
+    AutoLock lock(lock_);
+    DCHECK(!flush_task_runner_);
+    flush_task_runner_ = ThreadTaskRunnerHandle::IsSet()
+                             ? ThreadTaskRunnerHandle::Get()
+                             : nullptr;
+    DCHECK(thread_message_loops_.empty() || flush_task_runner_);
+    flush_output_callback_ = cb;
+
+    if (thread_shared_chunk_) {
+      logged_events_->ReturnChunk(thread_shared_chunk_index_,
+                                  std::move(thread_shared_chunk_));
+    }
+
+    for (MessageLoop* loop : thread_message_loops_)
+      thread_message_loop_task_runners.push_back(loop->task_runner());
+  }
+
+  if (!thread_message_loop_task_runners.empty()) {
+    for (auto& task_runner : thread_message_loop_task_runners) {
+      task_runner->PostTask(
+          FROM_HERE, BindOnce(&TraceLog::FlushCurrentThread, Unretained(this),
+                              gen, discard_events));
+    }
+    flush_task_runner_->PostDelayedTask(
+        FROM_HERE,
+        BindOnce(&TraceLog::OnFlushTimeout, Unretained(this), gen,
+                 discard_events),
+        TimeDelta::FromMilliseconds(kThreadFlushTimeoutMs));
+    return;
+  }
+
+  FinishFlush(gen, discard_events);
+}
+
+// Usually it runs on a different thread.
+void TraceLog::ConvertTraceEventsToTraceFormat(
+    std::unique_ptr<TraceBuffer> logged_events,
+    const OutputCallback& flush_output_callback,
+    const ArgumentFilterPredicate& argument_filter_predicate) {
+  if (flush_output_callback.is_null())
+    return;
+
+  HEAP_PROFILER_SCOPED_IGNORE;
+  // The callback need to be called at least once even if there is no events
+  // to let the caller know the completion of flush.
+  scoped_refptr<RefCountedString> json_events_str_ptr = new RefCountedString();
+  const size_t kReserveCapacity = kTraceEventBufferSizeInBytes * 5 / 4;
+  json_events_str_ptr->data().reserve(kReserveCapacity);
+  while (const TraceBufferChunk* chunk = logged_events->NextChunk()) {
+    for (size_t j = 0; j < chunk->size(); ++j) {
+      size_t size = json_events_str_ptr->size();
+      if (size > kTraceEventBufferSizeInBytes) {
+        flush_output_callback.Run(json_events_str_ptr, true);
+        json_events_str_ptr = new RefCountedString();
+        json_events_str_ptr->data().reserve(kReserveCapacity);
+      } else if (size) {
+        json_events_str_ptr->data().append(",\n");
+      }
+      chunk->GetEventAt(j)->AppendAsJSON(&(json_events_str_ptr->data()),
+                                         argument_filter_predicate);
+    }
+  }
+  flush_output_callback.Run(json_events_str_ptr, false);
+}
+
+void TraceLog::FinishFlush(int generation, bool discard_events) {
+  std::unique_ptr<TraceBuffer> previous_logged_events;
+  OutputCallback flush_output_callback;
+  ArgumentFilterPredicate argument_filter_predicate;
+
+  if (!CheckGeneration(generation))
+    return;
+
+  {
+    AutoLock lock(lock_);
+
+    previous_logged_events.swap(logged_events_);
+    UseNextTraceBuffer();
+    thread_message_loops_.clear();
+
+    flush_task_runner_ = nullptr;
+    flush_output_callback = flush_output_callback_;
+    flush_output_callback_.Reset();
+
+    if (trace_options() & kInternalEnableArgumentFilter) {
+      CHECK(!argument_filter_predicate_.is_null());
+      argument_filter_predicate = argument_filter_predicate_;
+    }
+  }
+
+  if (discard_events) {
+    if (!flush_output_callback.is_null()) {
+      scoped_refptr<RefCountedString> empty_result = new RefCountedString;
+      flush_output_callback.Run(empty_result, false);
+    }
+    return;
+  }
+
+  if (use_worker_thread_) {
+    base::PostTaskWithTraits(
+        FROM_HERE,
+        {MayBlock(), TaskPriority::BACKGROUND,
+         TaskShutdownBehavior::CONTINUE_ON_SHUTDOWN},
+        BindOnce(&TraceLog::ConvertTraceEventsToTraceFormat,
+                 std::move(previous_logged_events), flush_output_callback,
+                 argument_filter_predicate));
+    return;
+  }
+
+  ConvertTraceEventsToTraceFormat(std::move(previous_logged_events),
+                                  flush_output_callback,
+                                  argument_filter_predicate);
+}
+
+// Run in each thread holding a local event buffer.
+void TraceLog::FlushCurrentThread(int generation, bool discard_events) {
+  {
+    AutoLock lock(lock_);
+    if (!CheckGeneration(generation) || !flush_task_runner_) {
+      // This is late. The corresponding flush has finished.
+      return;
+    }
+  }
+
+  // This will flush the thread local buffer.
+  delete thread_local_event_buffer_.Get();
+
+  // Scheduler uses TRACE_EVENT macros when posting a task, which can lead
+  // to acquiring a tracing lock. Given that posting a task requires grabbing
+  // a scheduler lock, we need to post this task outside tracing lock to avoid
+  // deadlocks.
+  scoped_refptr<SingleThreadTaskRunner> cached_flush_task_runner;
+  {
+    AutoLock lock(lock_);
+    cached_flush_task_runner = flush_task_runner_;
+    if (!CheckGeneration(generation) || !flush_task_runner_ ||
+        !thread_message_loops_.empty())
+      return;
+  }
+  cached_flush_task_runner->PostTask(
+      FROM_HERE, BindOnce(&TraceLog::FinishFlush, Unretained(this), generation,
+                          discard_events));
+}
+
+void TraceLog::OnFlushTimeout(int generation, bool discard_events) {
+  {
+    AutoLock lock(lock_);
+    if (!CheckGeneration(generation) || !flush_task_runner_) {
+      // Flush has finished before timeout.
+      return;
+    }
+
+    LOG(WARNING)
+        << "The following threads haven't finished flush in time. "
+           "If this happens stably for some thread, please call "
+           "TraceLog::GetInstance()->SetCurrentThreadBlocksMessageLoop() from "
+           "the thread to avoid its trace events from being lost.";
+    for (hash_set<MessageLoop*>::const_iterator it =
+             thread_message_loops_.begin();
+         it != thread_message_loops_.end(); ++it) {
+      LOG(WARNING) << "Thread: " << (*it)->GetThreadName();
+    }
+  }
+  FinishFlush(generation, discard_events);
+}
+
+void TraceLog::UseNextTraceBuffer() {
+  logged_events_.reset(CreateTraceBuffer());
+  subtle::NoBarrier_AtomicIncrement(&generation_, 1);
+  thread_shared_chunk_.reset();
+  thread_shared_chunk_index_ = 0;
+}
+
+TraceEventHandle TraceLog::AddTraceEvent(
+    char phase,
+    const unsigned char* category_group_enabled,
+    const char* name,
+    const char* scope,
+    unsigned long long id,
+    int num_args,
+    const char* const* arg_names,
+    const unsigned char* arg_types,
+    const unsigned long long* arg_values,
+    std::unique_ptr<ConvertableToTraceFormat>* convertable_values,
+    unsigned int flags) {
+  int thread_id = static_cast<int>(base::PlatformThread::CurrentId());
+  base::TimeTicks now = TRACE_TIME_TICKS_NOW();
+  return AddTraceEventWithThreadIdAndTimestamp(
+      phase,
+      category_group_enabled,
+      name,
+      scope,
+      id,
+      trace_event_internal::kNoId,  // bind_id
+      thread_id,
+      now,
+      num_args,
+      arg_names,
+      arg_types,
+      arg_values,
+      convertable_values,
+      flags);
+}
+
+TraceEventHandle TraceLog::AddTraceEventWithBindId(
+    char phase,
+    const unsigned char* category_group_enabled,
+    const char* name,
+    const char* scope,
+    unsigned long long id,
+    unsigned long long bind_id,
+    int num_args,
+    const char* const* arg_names,
+    const unsigned char* arg_types,
+    const unsigned long long* arg_values,
+    std::unique_ptr<ConvertableToTraceFormat>* convertable_values,
+    unsigned int flags) {
+  int thread_id = static_cast<int>(base::PlatformThread::CurrentId());
+  base::TimeTicks now = TRACE_TIME_TICKS_NOW();
+  return AddTraceEventWithThreadIdAndTimestamp(
+      phase,
+      category_group_enabled,
+      name,
+      scope,
+      id,
+      bind_id,
+      thread_id,
+      now,
+      num_args,
+      arg_names,
+      arg_types,
+      arg_values,
+      convertable_values,
+      flags | TRACE_EVENT_FLAG_HAS_CONTEXT_ID);
+}
+
+TraceEventHandle TraceLog::AddTraceEventWithProcessId(
+    char phase,
+    const unsigned char* category_group_enabled,
+    const char* name,
+    const char* scope,
+    unsigned long long id,
+    int process_id,
+    int num_args,
+    const char* const* arg_names,
+    const unsigned char* arg_types,
+    const unsigned long long* arg_values,
+    std::unique_ptr<ConvertableToTraceFormat>* convertable_values,
+    unsigned int flags) {
+  base::TimeTicks now = TRACE_TIME_TICKS_NOW();
+  return AddTraceEventWithThreadIdAndTimestamp(
+      phase,
+      category_group_enabled,
+      name,
+      scope,
+      id,
+      trace_event_internal::kNoId,  // bind_id
+      process_id,
+      now,
+      num_args,
+      arg_names,
+      arg_types,
+      arg_values,
+      convertable_values,
+      flags | TRACE_EVENT_FLAG_HAS_PROCESS_ID);
+}
+
+// Handle legacy calls to AddTraceEventWithThreadIdAndTimestamp
+// with kNoId as bind_id
+TraceEventHandle TraceLog::AddTraceEventWithThreadIdAndTimestamp(
+    char phase,
+    const unsigned char* category_group_enabled,
+    const char* name,
+    const char* scope,
+    unsigned long long id,
+    int thread_id,
+    const TimeTicks& timestamp,
+    int num_args,
+    const char* const* arg_names,
+    const unsigned char* arg_types,
+    const unsigned long long* arg_values,
+    std::unique_ptr<ConvertableToTraceFormat>* convertable_values,
+    unsigned int flags) {
+  return AddTraceEventWithThreadIdAndTimestamp(
+      phase,
+      category_group_enabled,
+      name,
+      scope,
+      id,
+      trace_event_internal::kNoId,  // bind_id
+      thread_id,
+      timestamp,
+      num_args,
+      arg_names,
+      arg_types,
+      arg_values,
+      convertable_values,
+      flags);
+}
+
+TraceEventHandle TraceLog::AddTraceEventWithThreadIdAndTimestamp(
+    char phase,
+    const unsigned char* category_group_enabled,
+    const char* name,
+    const char* scope,
+    unsigned long long id,
+    unsigned long long bind_id,
+    int thread_id,
+    const TimeTicks& timestamp,
+    int num_args,
+    const char* const* arg_names,
+    const unsigned char* arg_types,
+    const unsigned long long* arg_values,
+    std::unique_ptr<ConvertableToTraceFormat>* convertable_values,
+    unsigned int flags) {
+  TraceEventHandle handle = {0, 0, 0};
+  if (!*category_group_enabled)
+    return handle;
+
+  // Avoid re-entrance of AddTraceEvent. This may happen in GPU process when
+  // ECHO_TO_CONSOLE is enabled: AddTraceEvent -> LOG(ERROR) ->
+  // GpuProcessLogMessageHandler -> PostPendingTask -> TRACE_EVENT ...
+  if (thread_is_in_trace_event_.Get())
+    return handle;
+
+  AutoThreadLocalBoolean thread_is_in_trace_event(&thread_is_in_trace_event_);
+
+  DCHECK(name);
+  DCHECK(!timestamp.is_null());
+
+  if (flags & TRACE_EVENT_FLAG_MANGLE_ID) {
+    if ((flags & TRACE_EVENT_FLAG_FLOW_IN) ||
+        (flags & TRACE_EVENT_FLAG_FLOW_OUT))
+      bind_id = MangleEventId(bind_id);
+    id = MangleEventId(id);
+  }
+
+  TimeTicks offset_event_timestamp = OffsetTimestamp(timestamp);
+  ThreadTicks thread_now = ThreadNow();
+
+  ThreadLocalEventBuffer* thread_local_event_buffer = nullptr;
+  if (*category_group_enabled & RECORDING_MODE) {
+    // |thread_local_event_buffer_| can be null if the current thread doesn't
+    // have a message loop or the message loop is blocked.
+    InitializeThreadLocalEventBufferIfSupported();
+    thread_local_event_buffer = thread_local_event_buffer_.Get();
+  }
+
+  // Check and update the current thread name only if the event is for the
+  // current thread to avoid locks in most cases.
+  if (thread_id == static_cast<int>(PlatformThread::CurrentId())) {
+    const char* new_name =
+        ThreadIdNameManager::GetInstance()->GetName(thread_id);
+    // Check if the thread name has been set or changed since the previous
+    // call (if any), but don't bother if the new name is empty. Note this will
+    // not detect a thread name change within the same char* buffer address: we
+    // favor common case performance over corner case correctness.
+    static auto* current_thread_name = new ThreadLocalPointer<const char>();
+    if (new_name != current_thread_name->Get() && new_name && *new_name) {
+      current_thread_name->Set(new_name);
+
+      AutoLock thread_info_lock(thread_info_lock_);
+
+      auto existing_name = thread_names_.find(thread_id);
+      if (existing_name == thread_names_.end()) {
+        // This is a new thread id, and a new name.
+        thread_names_[thread_id] = new_name;
+      } else {
+        // This is a thread id that we've seen before, but potentially with a
+        // new name.
+        std::vector<StringPiece> existing_names = base::SplitStringPiece(
+            existing_name->second, ",", base::KEEP_WHITESPACE,
+            base::SPLIT_WANT_NONEMPTY);
+        if (!ContainsValue(existing_names, new_name)) {
+          if (!existing_names.empty())
+            existing_name->second.push_back(',');
+          existing_name->second.append(new_name);
+        }
+      }
+    }
+  }
+
+#if defined(OS_WIN)
+  // This is done sooner rather than later, to avoid creating the event and
+  // acquiring the lock, which is not needed for ETW as it's already threadsafe.
+  if (*category_group_enabled & TraceCategory::ENABLED_FOR_ETW_EXPORT)
+    TraceEventETWExport::AddEvent(phase, category_group_enabled, name, id,
+                                  num_args, arg_names, arg_types, arg_values,
+                                  convertable_values);
+#endif  // OS_WIN
+
+  AddTraceEventOverrideCallback trace_event_override =
+      reinterpret_cast<AddTraceEventOverrideCallback>(
+          subtle::NoBarrier_Load(&trace_event_override_));
+  if (trace_event_override) {
+    TraceEvent new_trace_event;
+    // If we have an override in place for events, rather than sending
+    // them to the tracelog, we don't have a way of going back and updating
+    // the duration of _COMPLETE events. Instead, we emit separate _BEGIN
+    // and _END events.
+    if (phase == TRACE_EVENT_PHASE_COMPLETE)
+      phase = TRACE_EVENT_PHASE_BEGIN;
+
+    new_trace_event.Initialize(thread_id, offset_event_timestamp, thread_now,
+                               phase, category_group_enabled, name, scope, id,
+                               bind_id, num_args, arg_names, arg_types,
+                               arg_values, convertable_values, flags);
+
+    trace_event_override(new_trace_event);
+    return handle;
+  }
+
+  std::string console_message;
+  std::unique_ptr<TraceEvent> filtered_trace_event;
+  bool disabled_by_filters = false;
+  if (*category_group_enabled & TraceCategory::ENABLED_FOR_FILTERING) {
+    std::unique_ptr<TraceEvent> new_trace_event(new TraceEvent);
+    new_trace_event->Initialize(thread_id, offset_event_timestamp, thread_now,
+                                phase, category_group_enabled, name, scope, id,
+                                bind_id, num_args, arg_names, arg_types,
+                                arg_values, convertable_values, flags);
+
+    disabled_by_filters = true;
+    ForEachCategoryFilter(
+        category_group_enabled, [&new_trace_event, &disabled_by_filters](
+                                    TraceEventFilter* trace_event_filter) {
+          if (trace_event_filter->FilterTraceEvent(*new_trace_event))
+            disabled_by_filters = false;
+        });
+    if (!disabled_by_filters)
+      filtered_trace_event = std::move(new_trace_event);
+  }
+
+  // If enabled for recording, the event should be added only if one of the
+  // filters indicates or category is not enabled for filtering.
+  if ((*category_group_enabled & TraceCategory::ENABLED_FOR_RECORDING) &&
+      !disabled_by_filters) {
+    OptionalAutoLock lock(&lock_);
+
+    TraceEvent* trace_event = nullptr;
+    if (thread_local_event_buffer) {
+      trace_event = thread_local_event_buffer->AddTraceEvent(&handle);
+    } else {
+      lock.EnsureAcquired();
+      trace_event = AddEventToThreadSharedChunkWhileLocked(&handle, true);
+    }
+
+    if (trace_event) {
+      if (filtered_trace_event) {
+        trace_event->MoveFrom(std::move(filtered_trace_event));
+      } else {
+        trace_event->Initialize(thread_id, offset_event_timestamp, thread_now,
+                                phase, category_group_enabled, name, scope, id,
+                                bind_id, num_args, arg_names, arg_types,
+                                arg_values, convertable_values, flags);
+      }
+
+#if defined(OS_ANDROID)
+      trace_event->SendToATrace();
+#endif
+    }
+
+    if (trace_options() & kInternalEchoToConsole) {
+      console_message = EventToConsoleMessage(
+          phase == TRACE_EVENT_PHASE_COMPLETE ? TRACE_EVENT_PHASE_BEGIN : phase,
+          timestamp, trace_event);
+    }
+  }
+
+  if (!console_message.empty())
+    LOG(ERROR) << console_message;
+
+  return handle;
+}
+
+void TraceLog::AddMetadataEvent(
+    const unsigned char* category_group_enabled,
+    const char* name,
+    int num_args,
+    const char* const* arg_names,
+    const unsigned char* arg_types,
+    const unsigned long long* arg_values,
+    std::unique_ptr<ConvertableToTraceFormat>* convertable_values,
+    unsigned int flags) {
+  HEAP_PROFILER_SCOPED_IGNORE;
+  std::unique_ptr<TraceEvent> trace_event(new TraceEvent);
+  int thread_id = static_cast<int>(base::PlatformThread::CurrentId());
+  ThreadTicks thread_now = ThreadNow();
+  TimeTicks now = OffsetNow();
+  AutoLock lock(lock_);
+  trace_event->Initialize(
+      thread_id, now, thread_now, TRACE_EVENT_PHASE_METADATA,
+      category_group_enabled, name,
+      trace_event_internal::kGlobalScope,  // scope
+      trace_event_internal::kNoId,         // id
+      trace_event_internal::kNoId,         // bind_id
+      num_args, arg_names, arg_types, arg_values, convertable_values, flags);
+  metadata_events_.push_back(std::move(trace_event));
+}
+
+// May be called when a COMPELETE event ends and the unfinished event has been
+// recycled (phase == TRACE_EVENT_PHASE_END and trace_event == NULL).
+std::string TraceLog::EventToConsoleMessage(unsigned char phase,
+                                            const TimeTicks& timestamp,
+                                            TraceEvent* trace_event) {
+  HEAP_PROFILER_SCOPED_IGNORE;
+  AutoLock thread_info_lock(thread_info_lock_);
+
+  // The caller should translate TRACE_EVENT_PHASE_COMPLETE to
+  // TRACE_EVENT_PHASE_BEGIN or TRACE_EVENT_END.
+  DCHECK(phase != TRACE_EVENT_PHASE_COMPLETE);
+
+  TimeDelta duration;
+  int thread_id =
+      trace_event ? trace_event->thread_id() : PlatformThread::CurrentId();
+  if (phase == TRACE_EVENT_PHASE_END) {
+    duration = timestamp - thread_event_start_times_[thread_id].top();
+    thread_event_start_times_[thread_id].pop();
+  }
+
+  std::string thread_name = thread_names_[thread_id];
+  if (thread_colors_.find(thread_name) == thread_colors_.end()) {
+    size_t next_color = (thread_colors_.size() % 6) + 1;
+    thread_colors_[thread_name] = next_color;
+  }
+
+  std::ostringstream log;
+  log << base::StringPrintf("%s: \x1b[0;3%dm", thread_name.c_str(),
+                            thread_colors_[thread_name]);
+
+  size_t depth = 0;
+  auto it = thread_event_start_times_.find(thread_id);
+  if (it != thread_event_start_times_.end())
+    depth = it->second.size();
+
+  for (size_t i = 0; i < depth; ++i)
+    log << "| ";
+
+  if (trace_event)
+    trace_event->AppendPrettyPrinted(&log);
+  if (phase == TRACE_EVENT_PHASE_END)
+    log << base::StringPrintf(" (%.3f ms)", duration.InMillisecondsF());
+
+  log << "\x1b[0;m";
+
+  if (phase == TRACE_EVENT_PHASE_BEGIN)
+    thread_event_start_times_[thread_id].push(timestamp);
+
+  return log.str();
+}
+
+void TraceLog::EndFilteredEvent(const unsigned char* category_group_enabled,
+                                const char* name,
+                                TraceEventHandle handle) {
+  const char* category_name = GetCategoryGroupName(category_group_enabled);
+  ForEachCategoryFilter(
+      category_group_enabled,
+      [name, category_name](TraceEventFilter* trace_event_filter) {
+        trace_event_filter->EndEvent(category_name, name);
+      });
+}
+
+void TraceLog::UpdateTraceEventDuration(
+    const unsigned char* category_group_enabled,
+    const char* name,
+    TraceEventHandle handle) {
+  char category_group_enabled_local = *category_group_enabled;
+  if (!category_group_enabled_local)
+    return;
+
+  UpdateTraceEventDurationExplicit(category_group_enabled, name, handle,
+                                   OffsetNow(), ThreadNow());
+}
+
+void TraceLog::UpdateTraceEventDurationExplicit(
+    const unsigned char* category_group_enabled,
+    const char* name,
+    TraceEventHandle handle,
+    const TimeTicks& now,
+    const ThreadTicks& thread_now) {
+  char category_group_enabled_local = *category_group_enabled;
+  if (!category_group_enabled_local)
+    return;
+
+  // Avoid re-entrance of AddTraceEvent. This may happen in GPU process when
+  // ECHO_TO_CONSOLE is enabled: AddTraceEvent -> LOG(ERROR) ->
+  // GpuProcessLogMessageHandler -> PostPendingTask -> TRACE_EVENT ...
+  if (thread_is_in_trace_event_.Get())
+    return;
+  AutoThreadLocalBoolean thread_is_in_trace_event(&thread_is_in_trace_event_);
+
+#if defined(OS_WIN)
+  // Generate an ETW event that marks the end of a complete event.
+  if (category_group_enabled_local & TraceCategory::ENABLED_FOR_ETW_EXPORT)
+    TraceEventETWExport::AddCompleteEndEvent(name);
+#endif  // OS_WIN
+
+  std::string console_message;
+  if (category_group_enabled_local & TraceCategory::ENABLED_FOR_RECORDING) {
+    AddTraceEventOverrideCallback trace_event_override =
+        reinterpret_cast<AddTraceEventOverrideCallback>(
+            subtle::NoBarrier_Load(&trace_event_override_));
+
+    // If we send events off to an override instead of the TraceBuffer,
+    // we don't have way of updating the prior event so we'll emit a
+    // separate _END event instead.
+    if (trace_event_override) {
+      TraceEvent new_trace_event;
+      new_trace_event.Initialize(
+          static_cast<int>(base::PlatformThread::CurrentId()), now, thread_now,
+          TRACE_EVENT_PHASE_END, category_group_enabled, name,
+          trace_event_internal::kGlobalScope,
+          trace_event_internal::kNoId /* id */,
+          trace_event_internal::kNoId /* bind_id */, 0, nullptr, nullptr,
+          nullptr, nullptr, TRACE_EVENT_FLAG_NONE);
+      trace_event_override(new_trace_event);
+      return;
+    }
+
+    OptionalAutoLock lock(&lock_);
+
+    TraceEvent* trace_event = GetEventByHandleInternal(handle, &lock);
+    if (trace_event) {
+      DCHECK(trace_event->phase() == TRACE_EVENT_PHASE_COMPLETE);
+      // TEMP(oysteine) to debug crbug.com/638744
+      if (trace_event->duration().ToInternalValue() != -1) {
+        DVLOG(1) << "TraceHandle: chunk_seq " << handle.chunk_seq
+                 << ", chunk_index " << handle.chunk_index << ", event_index "
+                 << handle.event_index;
+
+        std::string serialized_event;
+        trace_event->AppendAsJSON(&serialized_event, ArgumentFilterPredicate());
+        DVLOG(1) << "TraceEvent: " << serialized_event;
+        lock_.AssertAcquired();
+      }
+
+      trace_event->UpdateDuration(now, thread_now);
+#if defined(OS_ANDROID)
+      trace_event->SendToATrace();
+#endif
+    }
+
+    if (trace_options() & kInternalEchoToConsole) {
+      console_message =
+          EventToConsoleMessage(TRACE_EVENT_PHASE_END, now, trace_event);
+    }
+  }
+
+  if (!console_message.empty())
+    LOG(ERROR) << console_message;
+
+  if (category_group_enabled_local & TraceCategory::ENABLED_FOR_FILTERING)
+    EndFilteredEvent(category_group_enabled, name, handle);
+}
+
+uint64_t TraceLog::MangleEventId(uint64_t id) {
+  return id ^ process_id_hash_;
+}
+
+void TraceLog::AddMetadataEventsWhileLocked() {
+  lock_.AssertAcquired();
+
+  // Move metadata added by |AddMetadataEvent| into the trace log.
+  while (!metadata_events_.empty()) {
+    TraceEvent* event = AddEventToThreadSharedChunkWhileLocked(nullptr, false);
+    event->MoveFrom(std::move(metadata_events_.back()));
+    metadata_events_.pop_back();
+  }
+
+#if !defined(OS_NACL)  // NaCl shouldn't expose the process id.
+  InitializeMetadataEvent(
+      AddEventToThreadSharedChunkWhileLocked(nullptr, false), 0, "num_cpus",
+      "number", base::SysInfo::NumberOfProcessors());
+#endif
+
+  int current_thread_id = static_cast<int>(base::PlatformThread::CurrentId());
+  if (process_sort_index_ != 0) {
+    InitializeMetadataEvent(
+        AddEventToThreadSharedChunkWhileLocked(nullptr, false),
+        current_thread_id, "process_sort_index", "sort_index",
+        process_sort_index_);
+  }
+
+  if (!process_name_.empty()) {
+    InitializeMetadataEvent(
+        AddEventToThreadSharedChunkWhileLocked(nullptr, false),
+        current_thread_id, "process_name", "name", process_name_);
+  }
+
+  TimeDelta process_uptime = TRACE_TIME_NOW() - process_creation_time_;
+  InitializeMetadataEvent(
+      AddEventToThreadSharedChunkWhileLocked(nullptr, false), current_thread_id,
+      "process_uptime_seconds", "uptime", process_uptime.InSeconds());
+
+#if defined(OS_ANDROID)
+  InitializeMetadataEvent(
+      AddEventToThreadSharedChunkWhileLocked(nullptr, false), current_thread_id,
+      "chrome_library_address", "start_address",
+      base::StringPrintf("%p", &__executable_start));
+#endif
+
+  if (!process_labels_.empty()) {
+    std::vector<base::StringPiece> labels;
+    for (const auto& it : process_labels_)
+      labels.push_back(it.second);
+    InitializeMetadataEvent(
+        AddEventToThreadSharedChunkWhileLocked(nullptr, false),
+        current_thread_id, "process_labels", "labels",
+        base::JoinString(labels, ","));
+  }
+
+  // Thread sort indices.
+  for (const auto& it : thread_sort_indices_) {
+    if (it.second == 0)
+      continue;
+    InitializeMetadataEvent(
+        AddEventToThreadSharedChunkWhileLocked(nullptr, false), it.first,
+        "thread_sort_index", "sort_index", it.second);
+  }
+
+  // Thread names.
+  AutoLock thread_info_lock(thread_info_lock_);
+  for (const auto& it : thread_names_) {
+    if (it.second.empty())
+      continue;
+    InitializeMetadataEvent(
+        AddEventToThreadSharedChunkWhileLocked(nullptr, false), it.first,
+        "thread_name", "name", it.second);
+  }
+
+  // If buffer is full, add a metadata record to report this.
+  if (!buffer_limit_reached_timestamp_.is_null()) {
+    InitializeMetadataEvent(
+        AddEventToThreadSharedChunkWhileLocked(nullptr, false),
+        current_thread_id, "trace_buffer_overflowed", "overflowed_at_ts",
+        buffer_limit_reached_timestamp_);
+  }
+}
+
+TraceEvent* TraceLog::GetEventByHandle(TraceEventHandle handle) {
+  return GetEventByHandleInternal(handle, nullptr);
+}
+
+TraceEvent* TraceLog::GetEventByHandleInternal(TraceEventHandle handle,
+                                               OptionalAutoLock* lock) {
+  if (!handle.chunk_seq)
+    return nullptr;
+
+  DCHECK(handle.chunk_seq);
+  DCHECK(handle.chunk_index <= TraceBufferChunk::kMaxChunkIndex);
+  DCHECK(handle.event_index <= TraceBufferChunk::kTraceBufferChunkSize - 1);
+
+  if (thread_local_event_buffer_.Get()) {
+    TraceEvent* trace_event =
+        thread_local_event_buffer_.Get()->GetEventByHandle(handle);
+    if (trace_event)
+      return trace_event;
+  }
+
+  // The event has been out-of-control of the thread local buffer.
+  // Try to get the event from the main buffer with a lock.
+  if (lock)
+    lock->EnsureAcquired();
+
+  if (thread_shared_chunk_ &&
+      handle.chunk_index == thread_shared_chunk_index_) {
+    return handle.chunk_seq == thread_shared_chunk_->seq()
+               ? thread_shared_chunk_->GetEventAt(handle.event_index)
+               : nullptr;
+  }
+
+  return logged_events_->GetEventByHandle(handle);
+}
+
+void TraceLog::SetProcessID(int process_id) {
+  process_id_ = process_id;
+  // Create a FNV hash from the process ID for XORing.
+  // See http://isthe.com/chongo/tech/comp/fnv/ for algorithm details.
+  const unsigned long long kOffsetBasis = 14695981039346656037ull;
+  const unsigned long long kFnvPrime = 1099511628211ull;
+  const unsigned long long pid = static_cast<unsigned long long>(process_id_);
+  process_id_hash_ = (kOffsetBasis ^ pid) * kFnvPrime;
+}
+
+void TraceLog::SetProcessSortIndex(int sort_index) {
+  AutoLock lock(lock_);
+  process_sort_index_ = sort_index;
+}
+
+void TraceLog::UpdateProcessLabel(int label_id,
+                                  const std::string& current_label) {
+  if (!current_label.length())
+    return RemoveProcessLabel(label_id);
+
+  AutoLock lock(lock_);
+  process_labels_[label_id] = current_label;
+}
+
+void TraceLog::RemoveProcessLabel(int label_id) {
+  AutoLock lock(lock_);
+  process_labels_.erase(label_id);
+}
+
+void TraceLog::SetThreadSortIndex(PlatformThreadId thread_id, int sort_index) {
+  AutoLock lock(lock_);
+  thread_sort_indices_[static_cast<int>(thread_id)] = sort_index;
+}
+
+void TraceLog::SetTimeOffset(TimeDelta offset) {
+  time_offset_ = offset;
+}
+
+size_t TraceLog::GetObserverCountForTest() const {
+  return enabled_state_observer_list_.size();
+}
+
+void TraceLog::SetCurrentThreadBlocksMessageLoop() {
+  thread_blocks_message_loop_.Set(true);
+  // This will flush the thread local buffer.
+  delete thread_local_event_buffer_.Get();
+}
+
+TraceBuffer* TraceLog::CreateTraceBuffer() {
+  HEAP_PROFILER_SCOPED_IGNORE;
+  InternalTraceOptions options = trace_options();
+  if (options & kInternalRecordContinuously) {
+    return TraceBuffer::CreateTraceBufferRingBuffer(
+        kTraceEventRingBufferChunks);
+  }
+  if (options & kInternalEchoToConsole) {
+    return TraceBuffer::CreateTraceBufferRingBuffer(
+        kEchoToConsoleTraceEventBufferChunks);
+  }
+  if (options & kInternalRecordAsMuchAsPossible) {
+    return TraceBuffer::CreateTraceBufferVectorOfSize(
+        kTraceEventVectorBigBufferChunks);
+  }
+  return TraceBuffer::CreateTraceBufferVectorOfSize(
+      kTraceEventVectorBufferChunks);
+}
+
+#if defined(OS_WIN)
+void TraceLog::UpdateETWCategoryGroupEnabledFlags() {
+  // Go through each category and set/clear the ETW bit depending on whether the
+  // category is enabled.
+  for (TraceCategory& category : CategoryRegistry::GetAllCategories()) {
+    if (base::trace_event::TraceEventETWExport::IsCategoryGroupEnabled(
+            category.name())) {
+      category.set_state_flag(TraceCategory::ENABLED_FOR_ETW_EXPORT);
+    } else {
+      category.clear_state_flag(TraceCategory::ENABLED_FOR_ETW_EXPORT);
+    }
+  }
+}
+#endif  // defined(OS_WIN)
+
+void TraceLog::SetTraceBufferForTesting(
+    std::unique_ptr<TraceBuffer> trace_buffer) {
+  AutoLock lock(lock_);
+  logged_events_ = std::move(trace_buffer);
+}
+
+void ConvertableToTraceFormat::EstimateTraceMemoryOverhead(
+    TraceEventMemoryOverhead* overhead) {
+  overhead->Add(TraceEventMemoryOverhead::kConvertableToTraceFormat,
+                sizeof(*this));
+}
+
+void TraceLog::AddAsyncEnabledStateObserver(
+    WeakPtr<AsyncEnabledStateObserver> listener) {
+  AutoLock lock(lock_);
+  async_observers_.insert(
+      std::make_pair(listener.get(), RegisteredAsyncObserver(listener)));
+}
+
+void TraceLog::RemoveAsyncEnabledStateObserver(
+    AsyncEnabledStateObserver* listener) {
+  AutoLock lock(lock_);
+  async_observers_.erase(listener);
+}
+
+bool TraceLog::HasAsyncEnabledStateObserver(
+    AsyncEnabledStateObserver* listener) const {
+  AutoLock lock(lock_);
+  return ContainsKey(async_observers_, listener);
+}
+
+}  // namespace trace_event
+}  // namespace base
+
+namespace trace_event_internal {
+
+ScopedTraceBinaryEfficient::ScopedTraceBinaryEfficient(
+    const char* category_group,
+    const char* name) {
+  // The single atom works because for now the category_group can only be "gpu".
+  DCHECK_EQ(strcmp(category_group, "gpu"), 0);
+  static TRACE_EVENT_API_ATOMIC_WORD atomic = 0;
+  INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO_CUSTOM_VARIABLES(
+      category_group, atomic, category_group_enabled_);
+  name_ = name;
+  if (*category_group_enabled_) {
+    event_handle_ =
+        TRACE_EVENT_API_ADD_TRACE_EVENT_WITH_THREAD_ID_AND_TIMESTAMP(
+            TRACE_EVENT_PHASE_COMPLETE, category_group_enabled_, name,
+            trace_event_internal::kGlobalScope,                   // scope
+            trace_event_internal::kNoId,                          // id
+            static_cast<int>(base::PlatformThread::CurrentId()),  // thread_id
+            TRACE_TIME_TICKS_NOW(), trace_event_internal::kZeroNumArgs, nullptr,
+            nullptr, nullptr, nullptr, TRACE_EVENT_FLAG_NONE);
+  }
+}
+
+ScopedTraceBinaryEfficient::~ScopedTraceBinaryEfficient() {
+  if (*category_group_enabled_) {
+    TRACE_EVENT_API_UPDATE_TRACE_EVENT_DURATION(category_group_enabled_, name_,
+                                                event_handle_);
+  }
+}
+
+}  // namespace trace_event_internal
diff --git a/base/trace_event/trace_log.h b/base/trace_event/trace_log.h
new file mode 100644
index 0000000..2c23189
--- /dev/null
+++ b/base/trace_event/trace_log.h
@@ -0,0 +1,528 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TRACE_EVENT_TRACE_LOG_H_
+#define BASE_TRACE_EVENT_TRACE_LOG_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <memory>
+#include <string>
+#include <unordered_map>
+#include <vector>
+
+#include "base/atomicops.h"
+#include "base/containers/stack.h"
+#include "base/gtest_prod_util.h"
+#include "base/macros.h"
+#include "base/time/time_override.h"
+#include "base/trace_event/memory_dump_provider.h"
+#include "base/trace_event/trace_config.h"
+#include "base/trace_event/trace_event_impl.h"
+#include "build/build_config.h"
+
+namespace base {
+
+class MessageLoop;
+class RefCountedString;
+
+template <typename T>
+class NoDestructor;
+
+namespace trace_event {
+
+struct TraceCategory;
+class TraceBuffer;
+class TraceBufferChunk;
+class TraceEvent;
+class TraceEventFilter;
+class TraceEventMemoryOverhead;
+
+struct BASE_EXPORT TraceLogStatus {
+  TraceLogStatus();
+  ~TraceLogStatus();
+  uint32_t event_capacity;
+  uint32_t event_count;
+};
+
+class BASE_EXPORT TraceLog : public MemoryDumpProvider {
+ public:
+  // Argument passed to TraceLog::SetEnabled.
+  enum Mode : uint8_t {
+    // Enables normal tracing (recording trace events in the trace buffer).
+    RECORDING_MODE = 1 << 0,
+
+    // Trace events are enabled just for filtering but not for recording. Only
+    // event filters config of |trace_config| argument is used.
+    FILTERING_MODE = 1 << 1
+  };
+
+  static TraceLog* GetInstance();
+
+  // Get set of known category groups. This can change as new code paths are
+  // reached. The known category groups are inserted into |category_groups|.
+  void GetKnownCategoryGroups(std::vector<std::string>* category_groups);
+
+  // Retrieves a copy (for thread-safety) of the current TraceConfig.
+  TraceConfig GetCurrentTraceConfig() const;
+
+  // Initializes the thread-local event buffer, if not already initialized and
+  // if the current thread supports that (has a message loop).
+  void InitializeThreadLocalEventBufferIfSupported();
+
+  // See TraceConfig comments for details on how to control which categories
+  // will be traced. SetDisabled must be called distinctly for each mode that is
+  // enabled. If tracing has already been enabled for recording, category filter
+  // (enabled and disabled categories) will be merged into the current category
+  // filter. Enabling RECORDING_MODE does not enable filters. Trace event
+  // filters will be used only if FILTERING_MODE is set on |modes_to_enable|.
+  // Conversely to RECORDING_MODE, FILTERING_MODE doesn't support upgrading,
+  // i.e. filters can only be enabled if not previously enabled.
+  void SetEnabled(const TraceConfig& trace_config, uint8_t modes_to_enable);
+
+  // TODO(ssid): Remove the default SetEnabled and IsEnabled. They should take
+  // Mode as argument.
+
+  // Disables tracing for all categories for the specified |modes_to_disable|
+  // only. Only RECORDING_MODE is taken as default |modes_to_disable|.
+  void SetDisabled();
+  void SetDisabled(uint8_t modes_to_disable);
+
+  // Returns true if TraceLog is enabled on recording mode.
+  // Note: Returns false even if FILTERING_MODE is enabled.
+  bool IsEnabled() { return enabled_modes_ & RECORDING_MODE; }
+
+  // Returns a bitmap of enabled modes from TraceLog::Mode.
+  uint8_t enabled_modes() { return enabled_modes_; }
+
+  // The number of times we have begun recording traces. If tracing is off,
+  // returns -1. If tracing is on, then it returns the number of times we have
+  // recorded a trace. By watching for this number to increment, you can
+  // passively discover when a new trace has begun. This is then used to
+  // implement the TRACE_EVENT_IS_NEW_TRACE() primitive.
+  int GetNumTracesRecorded();
+
+#if defined(OS_ANDROID)
+  void StartATrace();
+  void StopATrace();
+  void AddClockSyncMetadataEvent();
+#endif
+
+  // Enabled state listeners give a callback when tracing is enabled or
+  // disabled. This can be used to tie into other library's tracing systems
+  // on-demand.
+  class BASE_EXPORT EnabledStateObserver {
+   public:
+    virtual ~EnabledStateObserver() = default;
+
+    // Called just after the tracing system becomes enabled, outside of the
+    // |lock_|. TraceLog::IsEnabled() is true at this point.
+    virtual void OnTraceLogEnabled() = 0;
+
+    // Called just after the tracing system disables, outside of the |lock_|.
+    // TraceLog::IsEnabled() is false at this point.
+    virtual void OnTraceLogDisabled() = 0;
+  };
+  void AddEnabledStateObserver(EnabledStateObserver* listener);
+  void RemoveEnabledStateObserver(EnabledStateObserver* listener);
+  bool HasEnabledStateObserver(EnabledStateObserver* listener) const;
+
+  // Asynchronous enabled state listeners. When tracing is enabled or disabled,
+  // for each observer, a task for invoking its appropriate callback is posted
+  // to the thread from which AddAsyncEnabledStateObserver() was called. This
+  // allows the observer to be safely destroyed, provided that it happens on the
+  // same thread that invoked AddAsyncEnabledStateObserver().
+  class BASE_EXPORT AsyncEnabledStateObserver {
+   public:
+    virtual ~AsyncEnabledStateObserver() = default;
+
+    // Posted just after the tracing system becomes enabled, outside |lock_|.
+    // TraceLog::IsEnabled() is true at this point.
+    virtual void OnTraceLogEnabled() = 0;
+
+    // Posted just after the tracing system becomes disabled, outside |lock_|.
+    // TraceLog::IsEnabled() is false at this point.
+    virtual void OnTraceLogDisabled() = 0;
+  };
+  void AddAsyncEnabledStateObserver(
+      WeakPtr<AsyncEnabledStateObserver> listener);
+  void RemoveAsyncEnabledStateObserver(AsyncEnabledStateObserver* listener);
+  bool HasAsyncEnabledStateObserver(AsyncEnabledStateObserver* listener) const;
+
+  TraceLogStatus GetStatus() const;
+  bool BufferIsFull() const;
+
+  // Computes an estimate of the size of the TraceLog including all the retained
+  // objects.
+  void EstimateTraceMemoryOverhead(TraceEventMemoryOverhead* overhead);
+
+  void SetArgumentFilterPredicate(
+      const ArgumentFilterPredicate& argument_filter_predicate);
+
+  // Flush all collected events to the given output callback. The callback will
+  // be called one or more times either synchronously or asynchronously from
+  // the current thread with IPC-bite-size chunks. The string format is
+  // undefined. Use TraceResultBuffer to convert one or more trace strings to
+  // JSON. The callback can be null if the caller doesn't want any data.
+  // Due to the implementation of thread-local buffers, flush can't be
+  // done when tracing is enabled. If called when tracing is enabled, the
+  // callback will be called directly with (empty_string, false) to indicate
+  // the end of this unsuccessful flush. Flush does the serialization
+  // on the same thread if the caller doesn't set use_worker_thread explicitly.
+  typedef base::Callback<void(const scoped_refptr<base::RefCountedString>&,
+                              bool has_more_events)> OutputCallback;
+  void Flush(const OutputCallback& cb, bool use_worker_thread = false);
+
+  // Cancels tracing and discards collected data.
+  void CancelTracing(const OutputCallback& cb);
+
+  typedef void (*AddTraceEventOverrideCallback)(const TraceEvent&);
+  // The callback will be called up until the point where the flush is
+  // finished, i.e. must be callable until OutputCallback is called with
+  // has_more_events==false.
+  void SetAddTraceEventOverride(const AddTraceEventOverrideCallback& override);
+
+  // Called by TRACE_EVENT* macros, don't call this directly.
+  // The name parameter is a category group for example:
+  // TRACE_EVENT0("renderer,webkit", "WebViewImpl::HandleInputEvent")
+  static const unsigned char* GetCategoryGroupEnabled(const char* name);
+  static const char* GetCategoryGroupName(
+      const unsigned char* category_group_enabled);
+
+  // Called by TRACE_EVENT* macros, don't call this directly.
+  // If |copy| is set, |name|, |arg_name1| and |arg_name2| will be deep copied
+  // into the event; see "Memory scoping note" and TRACE_EVENT_COPY_XXX above.
+  TraceEventHandle AddTraceEvent(
+      char phase,
+      const unsigned char* category_group_enabled,
+      const char* name,
+      const char* scope,
+      unsigned long long id,
+      int num_args,
+      const char* const* arg_names,
+      const unsigned char* arg_types,
+      const unsigned long long* arg_values,
+      std::unique_ptr<ConvertableToTraceFormat>* convertable_values,
+      unsigned int flags);
+  TraceEventHandle AddTraceEventWithBindId(
+      char phase,
+      const unsigned char* category_group_enabled,
+      const char* name,
+      const char* scope,
+      unsigned long long id,
+      unsigned long long bind_id,
+      int num_args,
+      const char* const* arg_names,
+      const unsigned char* arg_types,
+      const unsigned long long* arg_values,
+      std::unique_ptr<ConvertableToTraceFormat>* convertable_values,
+      unsigned int flags);
+  TraceEventHandle AddTraceEventWithProcessId(
+      char phase,
+      const unsigned char* category_group_enabled,
+      const char* name,
+      const char* scope,
+      unsigned long long id,
+      int process_id,
+      int num_args,
+      const char* const* arg_names,
+      const unsigned char* arg_types,
+      const unsigned long long* arg_values,
+      std::unique_ptr<ConvertableToTraceFormat>* convertable_values,
+      unsigned int flags);
+  TraceEventHandle AddTraceEventWithThreadIdAndTimestamp(
+      char phase,
+      const unsigned char* category_group_enabled,
+      const char* name,
+      const char* scope,
+      unsigned long long id,
+      int thread_id,
+      const TimeTicks& timestamp,
+      int num_args,
+      const char* const* arg_names,
+      const unsigned char* arg_types,
+      const unsigned long long* arg_values,
+      std::unique_ptr<ConvertableToTraceFormat>* convertable_values,
+      unsigned int flags);
+  TraceEventHandle AddTraceEventWithThreadIdAndTimestamp(
+      char phase,
+      const unsigned char* category_group_enabled,
+      const char* name,
+      const char* scope,
+      unsigned long long id,
+      unsigned long long bind_id,
+      int thread_id,
+      const TimeTicks& timestamp,
+      int num_args,
+      const char* const* arg_names,
+      const unsigned char* arg_types,
+      const unsigned long long* arg_values,
+      std::unique_ptr<ConvertableToTraceFormat>* convertable_values,
+      unsigned int flags);
+
+  // Adds a metadata event that will be written when the trace log is flushed.
+  void AddMetadataEvent(
+      const unsigned char* category_group_enabled,
+      const char* name,
+      int num_args,
+      const char* const* arg_names,
+      const unsigned char* arg_types,
+      const unsigned long long* arg_values,
+      std::unique_ptr<ConvertableToTraceFormat>* convertable_values,
+      unsigned int flags);
+
+  void UpdateTraceEventDuration(const unsigned char* category_group_enabled,
+                                const char* name,
+                                TraceEventHandle handle);
+
+  void UpdateTraceEventDurationExplicit(
+      const unsigned char* category_group_enabled,
+      const char* name,
+      TraceEventHandle handle,
+      const TimeTicks& now,
+      const ThreadTicks& thread_now);
+
+  void EndFilteredEvent(const unsigned char* category_group_enabled,
+                        const char* name,
+                        TraceEventHandle handle);
+
+  int process_id() const { return process_id_; }
+
+  uint64_t MangleEventId(uint64_t id);
+
+  // Exposed for unittesting:
+
+  // Testing factory for TraceEventFilter.
+  typedef std::unique_ptr<TraceEventFilter> (*FilterFactoryForTesting)(
+      const std::string& /* predicate_name */);
+  void SetFilterFactoryForTesting(FilterFactoryForTesting factory) {
+    filter_factory_for_testing_ = factory;
+  }
+
+  // Allows clearing up our singleton instance.
+  static void ResetForTesting();
+
+  // Allow tests to inspect TraceEvents.
+  TraceEvent* GetEventByHandle(TraceEventHandle handle);
+
+  void SetProcessID(int process_id);
+
+  // Process sort indices, if set, override the order of a process will appear
+  // relative to other processes in the trace viewer. Processes are sorted first
+  // on their sort index, ascending, then by their name, and then tid.
+  void SetProcessSortIndex(int sort_index);
+
+  // Sets the name of the process.
+  void set_process_name(const std::string& process_name) {
+    AutoLock lock(lock_);
+    process_name_ = process_name;
+  }
+
+  bool IsProcessNameEmpty() const { return process_name_.empty(); }
+
+  // Processes can have labels in addition to their names. Use labels, for
+  // instance, to list out the web page titles that a process is handling.
+  void UpdateProcessLabel(int label_id, const std::string& current_label);
+  void RemoveProcessLabel(int label_id);
+
+  // Thread sort indices, if set, override the order of a thread will appear
+  // within its process in the trace viewer. Threads are sorted first on their
+  // sort index, ascending, then by their name, and then tid.
+  void SetThreadSortIndex(PlatformThreadId thread_id, int sort_index);
+
+  // Allow setting an offset between the current TimeTicks time and the time
+  // that should be reported.
+  void SetTimeOffset(TimeDelta offset);
+
+  size_t GetObserverCountForTest() const;
+
+  // Call this method if the current thread may block the message loop to
+  // prevent the thread from using the thread-local buffer because the thread
+  // may not handle the flush request in time causing lost of unflushed events.
+  void SetCurrentThreadBlocksMessageLoop();
+
+#if defined(OS_WIN)
+  // This function is called by the ETW exporting module whenever the ETW
+  // keyword (flags) changes. This keyword indicates which categories should be
+  // exported, so whenever it changes, we adjust accordingly.
+  void UpdateETWCategoryGroupEnabledFlags();
+#endif
+
+  // Replaces |logged_events_| with a new TraceBuffer for testing.
+  void SetTraceBufferForTesting(std::unique_ptr<TraceBuffer> trace_buffer);
+
+ private:
+  typedef unsigned int InternalTraceOptions;
+
+  FRIEND_TEST_ALL_PREFIXES(TraceEventTestFixture,
+                           TraceBufferRingBufferGetReturnChunk);
+  FRIEND_TEST_ALL_PREFIXES(TraceEventTestFixture,
+                           TraceBufferRingBufferHalfIteration);
+  FRIEND_TEST_ALL_PREFIXES(TraceEventTestFixture,
+                           TraceBufferRingBufferFullIteration);
+  FRIEND_TEST_ALL_PREFIXES(TraceEventTestFixture, TraceBufferVectorReportFull);
+  FRIEND_TEST_ALL_PREFIXES(TraceEventTestFixture,
+                           ConvertTraceConfigToInternalOptions);
+  FRIEND_TEST_ALL_PREFIXES(TraceEventTestFixture,
+                           TraceRecordAsMuchAsPossibleMode);
+
+  friend class base::NoDestructor<TraceLog>;
+
+  // MemoryDumpProvider implementation.
+  bool OnMemoryDump(const MemoryDumpArgs& args,
+                    ProcessMemoryDump* pmd) override;
+
+  // Enable/disable each category group based on the current mode_,
+  // category_filter_ and event_filters_enabled_.
+  // Enable the category group in the recording mode if category_filter_ matches
+  // the category group, is not null. Enable category for filtering if any
+  // filter in event_filters_enabled_ enables it.
+  void UpdateCategoryRegistry();
+  void UpdateCategoryState(TraceCategory* category);
+
+  void CreateFiltersForTraceConfig();
+
+  InternalTraceOptions GetInternalOptionsFromTraceConfig(
+      const TraceConfig& config);
+
+  class ThreadLocalEventBuffer;
+  class OptionalAutoLock;
+  struct RegisteredAsyncObserver;
+
+  TraceLog();
+  ~TraceLog() override;
+  void AddMetadataEventsWhileLocked();
+
+  InternalTraceOptions trace_options() const {
+    return static_cast<InternalTraceOptions>(
+        subtle::NoBarrier_Load(&trace_options_));
+  }
+
+  TraceBuffer* trace_buffer() const { return logged_events_.get(); }
+  TraceBuffer* CreateTraceBuffer();
+
+  std::string EventToConsoleMessage(unsigned char phase,
+                                    const TimeTicks& timestamp,
+                                    TraceEvent* trace_event);
+
+  TraceEvent* AddEventToThreadSharedChunkWhileLocked(TraceEventHandle* handle,
+                                                     bool check_buffer_is_full);
+  void CheckIfBufferIsFullWhileLocked();
+  void SetDisabledWhileLocked(uint8_t modes);
+
+  TraceEvent* GetEventByHandleInternal(TraceEventHandle handle,
+                                       OptionalAutoLock* lock);
+
+  void FlushInternal(const OutputCallback& cb,
+                     bool use_worker_thread,
+                     bool discard_events);
+
+  // |generation| is used in the following callbacks to check if the callback
+  // is called for the flush of the current |logged_events_|.
+  void FlushCurrentThread(int generation, bool discard_events);
+  // Usually it runs on a different thread.
+  static void ConvertTraceEventsToTraceFormat(
+      std::unique_ptr<TraceBuffer> logged_events,
+      const TraceLog::OutputCallback& flush_output_callback,
+      const ArgumentFilterPredicate& argument_filter_predicate);
+  void FinishFlush(int generation, bool discard_events);
+  void OnFlushTimeout(int generation, bool discard_events);
+
+  int generation() const {
+    return static_cast<int>(subtle::NoBarrier_Load(&generation_));
+  }
+  bool CheckGeneration(int generation) const {
+    return generation == this->generation();
+  }
+  void UseNextTraceBuffer();
+
+  TimeTicks OffsetNow() const {
+    // This should be TRACE_TIME_TICKS_NOW but include order makes that hard.
+    return OffsetTimestamp(base::subtle::TimeTicksNowIgnoringOverride());
+  }
+  TimeTicks OffsetTimestamp(const TimeTicks& timestamp) const {
+    return timestamp - time_offset_;
+  }
+
+  // Internal representation of trace options since we store the currently used
+  // trace option as an AtomicWord.
+  static const InternalTraceOptions kInternalNone;
+  static const InternalTraceOptions kInternalRecordUntilFull;
+  static const InternalTraceOptions kInternalRecordContinuously;
+  static const InternalTraceOptions kInternalEchoToConsole;
+  static const InternalTraceOptions kInternalRecordAsMuchAsPossible;
+  static const InternalTraceOptions kInternalEnableArgumentFilter;
+
+  // This lock protects TraceLog member accesses (except for members protected
+  // by thread_info_lock_) from arbitrary threads.
+  mutable Lock lock_;
+  // This lock protects accesses to thread_names_, thread_event_start_times_
+  // and thread_colors_.
+  Lock thread_info_lock_;
+  uint8_t enabled_modes_;  // See TraceLog::Mode.
+  int num_traces_recorded_;
+  std::unique_ptr<TraceBuffer> logged_events_;
+  std::vector<std::unique_ptr<TraceEvent>> metadata_events_;
+  bool dispatching_to_observer_list_;
+  std::vector<EnabledStateObserver*> enabled_state_observer_list_;
+  std::map<AsyncEnabledStateObserver*, RegisteredAsyncObserver>
+      async_observers_;
+
+  std::string process_name_;
+  std::unordered_map<int, std::string> process_labels_;
+  int process_sort_index_;
+  std::unordered_map<int, int> thread_sort_indices_;
+  std::unordered_map<int, std::string> thread_names_;
+  base::Time process_creation_time_;
+
+  // The following two maps are used only when ECHO_TO_CONSOLE.
+  std::unordered_map<int, base::stack<TimeTicks>> thread_event_start_times_;
+  std::unordered_map<std::string, int> thread_colors_;
+
+  TimeTicks buffer_limit_reached_timestamp_;
+
+  // XORed with TraceID to make it unlikely to collide with other processes.
+  unsigned long long process_id_hash_;
+
+  int process_id_;
+
+  TimeDelta time_offset_;
+
+  subtle::AtomicWord /* Options */ trace_options_;
+
+  TraceConfig trace_config_;
+  TraceConfig::EventFilters enabled_event_filters_;
+
+  ThreadLocalPointer<ThreadLocalEventBuffer> thread_local_event_buffer_;
+  ThreadLocalBoolean thread_blocks_message_loop_;
+  ThreadLocalBoolean thread_is_in_trace_event_;
+
+  // Contains the message loops of threads that have had at least one event
+  // added into the local event buffer. Not using SingleThreadTaskRunner
+  // because we need to know the life time of the message loops.
+  hash_set<MessageLoop*> thread_message_loops_;
+
+  // For events which can't be added into the thread local buffer, e.g. events
+  // from threads without a message loop.
+  std::unique_ptr<TraceBufferChunk> thread_shared_chunk_;
+  size_t thread_shared_chunk_index_;
+
+  // Set when asynchronous Flush is in progress.
+  OutputCallback flush_output_callback_;
+  scoped_refptr<SingleThreadTaskRunner> flush_task_runner_;
+  ArgumentFilterPredicate argument_filter_predicate_;
+  subtle::AtomicWord generation_;
+  bool use_worker_thread_;
+  subtle::AtomicWord trace_event_override_;
+
+  FilterFactoryForTesting filter_factory_for_testing_;
+
+  DISALLOW_COPY_AND_ASSIGN(TraceLog);
+};
+
+}  // namespace trace_event
+}  // namespace base
+
+#endif  // BASE_TRACE_EVENT_TRACE_LOG_H_
diff --git a/base/trace_event/trace_log_constants.cc b/base/trace_event/trace_log_constants.cc
new file mode 100644
index 0000000..65dca2e
--- /dev/null
+++ b/base/trace_event/trace_log_constants.cc
@@ -0,0 +1,26 @@
+// Copyright (c) 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/trace_event/trace_log.h"
+
+namespace base {
+namespace trace_event {
+
+// Constant used by TraceLog's internal implementation of trace_option.
+const TraceLog::InternalTraceOptions
+    TraceLog::kInternalNone = 0;
+const TraceLog::InternalTraceOptions
+    TraceLog::kInternalRecordUntilFull = 1 << 0;
+const TraceLog::InternalTraceOptions
+    TraceLog::kInternalRecordContinuously = 1 << 1;
+// 1 << 2 is reserved for the DEPRECATED kInternalEnableSampling. DO NOT USE.
+const TraceLog::InternalTraceOptions
+    TraceLog::kInternalEchoToConsole = 1 << 3;
+const TraceLog::InternalTraceOptions
+    TraceLog::kInternalRecordAsMuchAsPossible = 1 << 4;
+const TraceLog::InternalTraceOptions
+    TraceLog::kInternalEnableArgumentFilter = 1 << 5;
+
+}  // namespace trace_event
+}  // namespace base
diff --git a/base/trace_event/tracing_agent.cc b/base/trace_event/tracing_agent.cc
new file mode 100644
index 0000000..e48feff
--- /dev/null
+++ b/base/trace_event/tracing_agent.cc
@@ -0,0 +1,24 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/trace_event/tracing_agent.h"
+
+namespace base {
+namespace trace_event {
+
+TracingAgent::~TracingAgent() = default;
+
+bool TracingAgent::SupportsExplicitClockSync() {
+  return false;
+}
+
+void TracingAgent::RecordClockSyncMarker(
+    const std::string& sync_id,
+    RecordClockSyncMarkerCallback callback) {
+  DCHECK(SupportsExplicitClockSync());
+}
+
+
+}  // namespace trace_event
+}  // namespace base
diff --git a/base/trace_event/tracing_agent.h b/base/trace_event/tracing_agent.h
new file mode 100644
index 0000000..f818457
--- /dev/null
+++ b/base/trace_event/tracing_agent.h
@@ -0,0 +1,96 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TRACE_EVENT_TRACING_AGENT_H_
+#define BASE_TRACE_EVENT_TRACING_AGENT_H_
+
+#include "base/base_export.h"
+#include "base/callback.h"
+#include "base/memory/ref_counted_memory.h"
+#include "base/values.h"
+
+namespace base {
+
+class TimeTicks;
+
+namespace trace_event {
+
+class TraceConfig;
+
+// A tracing agent is an entity that records its own sort of trace. Each
+// tracing method that produces its own trace log should implement this
+// interface. All tracing agents must only be controlled by TracingController.
+// Some existing examples include TracingControllerImpl for Chrome trace events,
+// DebugDaemonClient for CrOs system trace, EtwTracingAgent for Windows system
+// trace and PowerTracingAgent for BattOr power trace.
+class BASE_EXPORT TracingAgent {
+ public:
+  using StartAgentTracingCallback =
+      base::OnceCallback<void(const std::string& agent_name, bool success)>;
+  // Passing a null or empty events_str_ptr indicates that no trace data is
+  // available for the specified agent.
+  using StopAgentTracingCallback = base::OnceCallback<void(
+      const std::string& agent_name,
+      const std::string& events_label,
+      const scoped_refptr<base::RefCountedString>& events_str_ptr)>;
+  using RecordClockSyncMarkerCallback =
+      base::OnceCallback<void(const std::string& sync_id,
+                              const TimeTicks& issue_ts,
+                              const TimeTicks& issue_end_ts)>;
+
+  virtual ~TracingAgent();
+
+  // Gets the name of the tracing agent. Each tracing agent's name should be
+  // unique.
+  virtual std::string GetTracingAgentName() = 0;
+
+  // Gets the trace event label of this tracing agent. The label will be used to
+  // label this agent's trace when all traces from different tracing agents are
+  // combined. Multiple tracing agents could have the same label. The tracing
+  // agents using the same label should not be able to run at the same time. For
+  // example, ETW on Windows and CrOS system tracing both use
+  // "systemTraceEvents" as the label. Those two agents never run at the same
+  // time because they are for different platforms.
+  virtual std::string GetTraceEventLabel() = 0;
+
+  // Starts tracing on the tracing agent with the trace configuration.
+  virtual void StartAgentTracing(const TraceConfig& trace_config,
+                                 StartAgentTracingCallback callback) = 0;
+
+  // Stops tracing on the tracing agent. The trace data will be passed back to
+  // the TracingController via the callback.
+  virtual void StopAgentTracing(StopAgentTracingCallback callback) = 0;
+
+  // Checks if the tracing agent supports explicit clock synchronization.
+  virtual bool SupportsExplicitClockSync();
+
+  // Records a clock sync marker issued by another tracing agent. This is only
+  // used if the tracing agent supports explicit clock synchronization.
+  //
+  // Two things need to be done:
+  // 1. The issuer asks the receiver to record the clock sync marker.
+  // 2. The issuer records how long the receiver takes to do the recording.
+  //
+  // In Chrome, the receiver thread also runs in Chrome and it will talk to the
+  // real receiver entity, e.g., power monitor or Android device system, via
+  // different communication methods, e.g., through USB or file reading/writing.
+  // The 2nd task measures that communication latency.
+  //
+  // Having a reliable timing measurement for the 2nd task requires synchronous
+  // function call without any cross-thread or cross-process activity. However,
+  // tracing agents in Chrome run in their own threads. Therefore, the issuer
+  // needs to dedicate the 2nd task to the receiver to take time measurements
+  // in the receiver thread, and the receiver thread needs to pass them back to
+  // the issuer in the callback.
+  //
+  // The assumption is that the receiver thread knows the issuer's clock, which
+  // is true in Chrome because all agent threads' clocks are Chrome clock.
+  virtual void RecordClockSyncMarker(const std::string& sync_id,
+                                     RecordClockSyncMarkerCallback callback);
+};
+
+}  // namespace trace_event
+}  // namespace base
+
+#endif  // BASE_TRACE_EVENT_TRACING_AGENT_H_
diff --git a/base/tuple.h b/base/tuple.h
new file mode 100644
index 0000000..58681d5
--- /dev/null
+++ b/base/tuple.h
@@ -0,0 +1,112 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Use std::tuple as tuple type. This file contains helper functions for
+// working with std::tuples.
+// The functions DispatchToMethod and DispatchToFunction take a function pointer
+// or instance and method pointer, and unpack a tuple into arguments to the
+// call.
+//
+// Example usage:
+//   // These two methods of creating a Tuple are identical.
+//   std::tuple<int, const char*> tuple_a(1, "wee");
+//   std::tuple<int, const char*> tuple_b = std::make_tuple(1, "wee");
+//
+//   void SomeFunc(int a, const char* b) { }
+//   DispatchToFunction(&SomeFunc, tuple_a);  // SomeFunc(1, "wee")
+//   DispatchToFunction(
+//       &SomeFunc, std::make_tuple(10, "foo"));    // SomeFunc(10, "foo")
+//
+//   struct { void SomeMeth(int a, int b, int c) { } } foo;
+//   DispatchToMethod(&foo, &Foo::SomeMeth, std::make_tuple(1, 2, 3));
+//   // foo->SomeMeth(1, 2, 3);
+
+#ifndef BASE_TUPLE_H_
+#define BASE_TUPLE_H_
+
+#include <stddef.h>
+#include <tuple>
+#include <utility>
+
+#include "build/build_config.h"
+
+namespace base {
+
+// Dispatchers ----------------------------------------------------------------
+//
+// Helper functions that call the given method on an object, with the unpacked
+// tuple arguments.  Notice that they all have the same number of arguments,
+// so you need only write:
+//   DispatchToMethod(object, &Object::method, args);
+// This is very useful for templated dispatchers, since they don't need to know
+// what type |args| is.
+
+// Non-Static Dispatchers with no out params.
+
+template <typename ObjT, typename Method, typename Tuple, size_t... Ns>
+inline void DispatchToMethodImpl(const ObjT& obj,
+                                 Method method,
+                                 Tuple&& args,
+                                 std::index_sequence<Ns...>) {
+  (obj->*method)(std::get<Ns>(std::forward<Tuple>(args))...);
+}
+
+template <typename ObjT, typename Method, typename Tuple>
+inline void DispatchToMethod(const ObjT& obj,
+                             Method method,
+                             Tuple&& args) {
+  constexpr size_t size = std::tuple_size<std::decay_t<Tuple>>::value;
+  DispatchToMethodImpl(obj, method, std::forward<Tuple>(args),
+                       std::make_index_sequence<size>());
+}
+
+// Static Dispatchers with no out params.
+
+template <typename Function, typename Tuple, size_t... Ns>
+inline void DispatchToFunctionImpl(Function function,
+                                   Tuple&& args,
+                                   std::index_sequence<Ns...>) {
+  (*function)(std::get<Ns>(std::forward<Tuple>(args))...);
+}
+
+template <typename Function, typename Tuple>
+inline void DispatchToFunction(Function function, Tuple&& args) {
+  constexpr size_t size = std::tuple_size<std::decay_t<Tuple>>::value;
+  DispatchToFunctionImpl(function, std::forward<Tuple>(args),
+                         std::make_index_sequence<size>());
+}
+
+// Dispatchers with out parameters.
+
+template <typename ObjT,
+          typename Method,
+          typename InTuple,
+          typename OutTuple,
+          size_t... InNs,
+          size_t... OutNs>
+inline void DispatchToMethodImpl(const ObjT& obj,
+                                 Method method,
+                                 InTuple&& in,
+                                 OutTuple* out,
+                                 std::index_sequence<InNs...>,
+                                 std::index_sequence<OutNs...>) {
+  (obj->*method)(std::get<InNs>(std::forward<InTuple>(in))...,
+                 &std::get<OutNs>(*out)...);
+}
+
+template <typename ObjT, typename Method, typename InTuple, typename OutTuple>
+inline void DispatchToMethod(const ObjT& obj,
+                             Method method,
+                             InTuple&& in,
+                             OutTuple* out) {
+  constexpr size_t in_size = std::tuple_size<std::decay_t<InTuple>>::value;
+  constexpr size_t out_size = std::tuple_size<OutTuple>::value;
+  DispatchToMethodImpl(obj, method, std::forward<InTuple>(in), out,
+                       std::make_index_sequence<in_size>(),
+                       std::make_index_sequence<out_size>());
+}
+
+}  // namespace base
+
+#endif  // BASE_TUPLE_H_
diff --git a/base/tuple_unittest.cc b/base/tuple_unittest.cc
new file mode 100644
index 0000000..4b38797
--- /dev/null
+++ b/base/tuple_unittest.cc
@@ -0,0 +1,118 @@
+// Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/tuple.h"
+
+#include "base/compiler_specific.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+
+namespace {
+
+void DoAdd(int a, int b, int c, int* res) {
+  *res = a + b + c;
+}
+
+struct Addy {
+  Addy() = default;
+  void DoAdd(int a, int b, int c, int d, int* res) {
+    *res = a + b + c + d;
+  }
+};
+
+struct Addz {
+  Addz() = default;
+  void DoAdd(int a, int b, int c, int d, int e, int* res) {
+    *res = a + b + c + d + e;
+  }
+};
+
+}  // namespace
+
+TEST(TupleTest, Basic) {
+  std::tuple<> t0 = std::make_tuple();
+  ALLOW_UNUSED_LOCAL(t0);
+  std::tuple<int> t1(1);
+  std::tuple<int, const char*> t2 =
+      std::make_tuple(1, static_cast<const char*>("wee"));
+  ALLOW_UNUSED_LOCAL(t2);
+  std::tuple<int, int, int> t3(1, 2, 3);
+  ALLOW_UNUSED_LOCAL(t3);
+  std::tuple<int, int, int, int*> t4(1, 2, 3, &std::get<0>(t1));
+  std::tuple<int, int, int, int, int*> t5(1, 2, 3, 4, &std::get<0>(t4));
+  std::tuple<int, int, int, int, int, int*> t6(1, 2, 3, 4, 5, &std::get<0>(t4));
+
+  EXPECT_EQ(1, std::get<0>(t1));
+  DispatchToFunction(&DoAdd, t4);
+  EXPECT_EQ(6, std::get<0>(t1));
+
+  int res = 0;
+  DispatchToFunction(&DoAdd, std::make_tuple(9, 8, 7, &res));
+  EXPECT_EQ(24, res);
+
+  Addy addy;
+  EXPECT_EQ(1, std::get<0>(t4));
+  DispatchToMethod(&addy, &Addy::DoAdd, t5);
+  EXPECT_EQ(10, std::get<0>(t4));
+
+  Addz addz;
+  EXPECT_EQ(10, std::get<0>(t4));
+  DispatchToMethod(&addz, &Addz::DoAdd, t6);
+  EXPECT_EQ(15, std::get<0>(t4));
+}
+
+namespace {
+
+struct CopyLogger {
+  CopyLogger() { ++TimesConstructed; }
+  CopyLogger(const CopyLogger& tocopy) { ++TimesConstructed; ++TimesCopied; }
+  ~CopyLogger() = default;
+
+  static int TimesCopied;
+  static int TimesConstructed;
+};
+
+void SomeLoggerMethRef(const CopyLogger& logy, const CopyLogger* ptr, bool* b) {
+  *b = &logy == ptr;
+}
+
+void SomeLoggerMethCopy(CopyLogger logy, const CopyLogger* ptr, bool* b) {
+  *b = &logy == ptr;
+}
+
+int CopyLogger::TimesCopied = 0;
+int CopyLogger::TimesConstructed = 0;
+
+}  // namespace
+
+TEST(TupleTest, Copying) {
+  CopyLogger logger;
+  EXPECT_EQ(0, CopyLogger::TimesCopied);
+  EXPECT_EQ(1, CopyLogger::TimesConstructed);
+
+  bool res = false;
+
+  // Creating the tuple should copy the class to store internally in the tuple.
+  std::tuple<CopyLogger, CopyLogger*, bool*> tuple(logger, &logger, &res);
+  std::get<CopyLogger*>(tuple) = &std::get<CopyLogger>(tuple);
+  EXPECT_EQ(2, CopyLogger::TimesConstructed);
+  EXPECT_EQ(1, CopyLogger::TimesCopied);
+
+  // Our internal Logger and the one passed to the function should be the same.
+  res = false;
+  DispatchToFunction(&SomeLoggerMethRef, tuple);
+  EXPECT_TRUE(res);
+  EXPECT_EQ(2, CopyLogger::TimesConstructed);
+  EXPECT_EQ(1, CopyLogger::TimesCopied);
+
+  // Now they should be different, since the function call will make a copy.
+  res = false;
+  DispatchToFunction(&SomeLoggerMethCopy, tuple);
+  EXPECT_FALSE(res);
+  EXPECT_EQ(3, CopyLogger::TimesConstructed);
+  EXPECT_EQ(2, CopyLogger::TimesCopied);
+}
+
+}  // namespace base
diff --git a/base/unguessable_token.cc b/base/unguessable_token.cc
new file mode 100644
index 0000000..0d8aad3
--- /dev/null
+++ b/base/unguessable_token.cc
@@ -0,0 +1,41 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/unguessable_token.h"
+
+#include "base/format_macros.h"
+#include "base/rand_util.h"
+#include "base/strings/stringprintf.h"
+
+namespace base {
+
+UnguessableToken::UnguessableToken(uint64_t high, uint64_t low)
+    : high_(high), low_(low) {}
+
+std::string UnguessableToken::ToString() const {
+  return base::StringPrintf("%016" PRIX64 "%016" PRIX64, high_, low_);
+}
+
+// static
+UnguessableToken UnguessableToken::Create() {
+  UnguessableToken token;
+  // Use base::RandBytes instead of crypto::RandBytes, because crypto calls the
+  // base version directly, and to prevent the dependency from base/ to crypto/.
+  base::RandBytes(&token, sizeof(token));
+  return token;
+}
+
+// static
+UnguessableToken UnguessableToken::Deserialize(uint64_t high, uint64_t low) {
+  // Receiving a zeroed out UnguessableToken from another process means that it
+  // was never initialized via Create(). Treat this case as a security issue.
+  DCHECK(!(high == 0 && low == 0));
+  return UnguessableToken(high, low);
+}
+
+std::ostream& operator<<(std::ostream& out, const UnguessableToken& token) {
+  return out << "(" << token.ToString() << ")";
+}
+
+}  // namespace base
diff --git a/base/unguessable_token.h b/base/unguessable_token.h
new file mode 100644
index 0000000..6858e22
--- /dev/null
+++ b/base/unguessable_token.h
@@ -0,0 +1,110 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_UNGUESSABLE_TOKEN_H_
+#define BASE_UNGUESSABLE_TOKEN_H_
+
+#include <stdint.h>
+#include <string.h>
+#include <iosfwd>
+#include <tuple>
+
+#include "base/base_export.h"
+#include "base/hash.h"
+#include "base/logging.h"
+
+namespace base {
+
+struct UnguessableTokenHash;
+
+// A UnguessableToken is an 128-bit token generated from a cryptographically
+// strong random source. It can be used as part of a larger aggregate type,
+// or as an ID in and of itself.
+//
+// UnguessableToken can be used to implement "Capability-Based Security".
+// In other words, UnguessableToken can be used when the resource associated
+// with the ID needs to be protected against manipulation by other untrusted
+// agents in the system, and there is no other convenient way to verify the
+// authority of the agent to do so (because the resource is part of a table
+// shared across processes, for instance). In such a scheme, knowledge of the
+// token value in and of itself is sufficient proof of authority to carry out
+// an operation against the associated resource.
+//
+// Use Create() for creating new UnguessableTokens.
+//
+// NOTE: It is illegal to send empty UnguessableTokens across processes, and
+// sending/receiving empty tokens should be treated as a security issue.
+// If there is a valid scenario for sending "no token" across processes,
+// base::Optional should be used instead of an empty token.
+class BASE_EXPORT UnguessableToken {
+ public:
+  // Create a unique UnguessableToken.
+  static UnguessableToken Create();
+
+  // Return a UnguessableToken built from the high/low bytes provided.
+  // It should only be used in deserialization scenarios.
+  //
+  // NOTE: If the deserialized token is empty, it means that it was never
+  // initialized via Create(). This is a security issue, and should be handled.
+  static UnguessableToken Deserialize(uint64_t high, uint64_t low);
+
+  // Creates an empty UnguessableToken.
+  // Assign to it with Create() before using it.
+  constexpr UnguessableToken() = default;
+
+  // NOTE: Serializing an empty UnguessableToken is an illegal operation.
+  uint64_t GetHighForSerialization() const {
+    DCHECK(!is_empty());
+    return high_;
+  }
+
+  // NOTE: Serializing an empty UnguessableToken is an illegal operation.
+  uint64_t GetLowForSerialization() const {
+    DCHECK(!is_empty());
+    return low_;
+  }
+
+  bool is_empty() const { return high_ == 0 && low_ == 0; }
+
+  // Hex representation of the unguessable token.
+  std::string ToString() const;
+
+  explicit operator bool() const { return !is_empty(); }
+
+  bool operator<(const UnguessableToken& other) const {
+    return std::tie(high_, low_) < std::tie(other.high_, other.low_);
+  }
+
+  bool operator==(const UnguessableToken& other) const {
+    return high_ == other.high_ && low_ == other.low_;
+  }
+
+  bool operator!=(const UnguessableToken& other) const {
+    return !(*this == other);
+  }
+
+ private:
+  friend struct UnguessableTokenHash;
+  UnguessableToken(uint64_t high, uint64_t low);
+
+  // Note: Two uint64_t are used instead of uint8_t[16], in order to have a
+  // simpler ToString() and is_empty().
+  uint64_t high_ = 0;
+  uint64_t low_ = 0;
+};
+
+BASE_EXPORT std::ostream& operator<<(std::ostream& out,
+                                     const UnguessableToken& token);
+
+// For use in std::unordered_map.
+struct UnguessableTokenHash {
+  size_t operator()(const base::UnguessableToken& token) const {
+    DCHECK(token);
+    return base::HashInts64(token.high_, token.low_);
+  }
+};
+
+}  // namespace base
+
+#endif  // BASE_UNGUESSABLE_TOKEN_H_
diff --git a/base/unguessable_token_unittest.cc b/base/unguessable_token_unittest.cc
new file mode 100644
index 0000000..b70cc72
--- /dev/null
+++ b/base/unguessable_token_unittest.cc
@@ -0,0 +1,155 @@
+// Copyright (c) 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/unguessable_token.h"
+
+#include <memory>
+#include <sstream>
+#include <type_traits>
+
+#include "base/value_conversions.h"
+#include "base/values.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+
+void TestSmallerThanOperator(const UnguessableToken& a,
+                             const UnguessableToken& b) {
+  EXPECT_TRUE(a < b);
+  EXPECT_FALSE(b < a);
+}
+
+TEST(UnguessableTokenTest, VerifyEqualityOperators) {
+  // Deserialize is used for testing purposes.
+  // Use UnguessableToken::Create() in production code instead.
+  UnguessableToken token = UnguessableToken::Deserialize(1, 2);
+  UnguessableToken same_token = UnguessableToken::Deserialize(1, 2);
+  UnguessableToken diff_token = UnguessableToken::Deserialize(1, 3);
+
+  EXPECT_TRUE(token == token);
+  EXPECT_FALSE(token != token);
+
+  EXPECT_TRUE(token == same_token);
+  EXPECT_FALSE(token != same_token);
+
+  EXPECT_FALSE(token == diff_token);
+  EXPECT_FALSE(diff_token == token);
+  EXPECT_TRUE(token != diff_token);
+  EXPECT_TRUE(diff_token != token);
+}
+
+TEST(UnguessableTokenTest, VerifyConstructors) {
+  UnguessableToken token = UnguessableToken::Create();
+  EXPECT_FALSE(token.is_empty());
+  EXPECT_TRUE(token);
+
+  UnguessableToken copied_token(token);
+  EXPECT_TRUE(copied_token);
+  EXPECT_EQ(token, copied_token);
+
+  UnguessableToken uninitialized;
+  EXPECT_TRUE(uninitialized.is_empty());
+  EXPECT_FALSE(uninitialized);
+
+  EXPECT_TRUE(UnguessableToken().is_empty());
+  EXPECT_FALSE(UnguessableToken());
+}
+
+TEST(UnguessableTokenTest, VerifySerialization) {
+  UnguessableToken token = UnguessableToken::Create();
+
+  uint64_t high = token.GetHighForSerialization();
+  uint64_t low = token.GetLowForSerialization();
+
+  EXPECT_TRUE(high);
+  EXPECT_TRUE(low);
+
+  UnguessableToken Deserialized = UnguessableToken::Deserialize(high, low);
+  EXPECT_EQ(token, Deserialized);
+}
+
+TEST(UnguessableTokenTest, VerifyValueSerialization) {
+  UnguessableToken token = UnguessableToken::Create();
+  std::unique_ptr<Value> value = CreateUnguessableTokenValue(token);
+
+  UnguessableToken deserialized;
+  EXPECT_TRUE(GetValueAsUnguessableToken(*value, &deserialized));
+  EXPECT_EQ(token, deserialized);
+}
+
+// Common case (~88% of the time) - no leading zeroes in high_ nor low_.
+TEST(UnguessableTokenTest, VerifyToString1) {
+  UnguessableToken token =
+      UnguessableToken::Deserialize(0x1234567890ABCDEF, 0xFEDCBA0987654321);
+  std::string expected = "1234567890ABCDEFFEDCBA0987654321";
+
+  EXPECT_EQ(expected, token.ToString());
+
+  std::string expected_stream = "(1234567890ABCDEFFEDCBA0987654321)";
+  std::stringstream stream;
+  stream << token;
+  EXPECT_EQ(expected_stream, stream.str());
+}
+
+// Less common case - leading zeroes in high_ or low_ (testing with both).
+TEST(UnguessableTokenTest, VerifyToString2) {
+  UnguessableToken token = UnguessableToken::Deserialize(0x123, 0xABC);
+  std::string expected = "00000000000001230000000000000ABC";
+
+  EXPECT_EQ(expected, token.ToString());
+
+  std::string expected_stream = "(00000000000001230000000000000ABC)";
+  std::stringstream stream;
+  stream << token;
+  EXPECT_EQ(expected_stream, stream.str());
+}
+
+TEST(UnguessableTokenTest, VerifyToStringUniqueness) {
+  const UnguessableToken token1 =
+      UnguessableToken::Deserialize(0x0000000012345678, 0x0000000123456789);
+  const UnguessableToken token2 =
+      UnguessableToken::Deserialize(0x0000000123456781, 0x0000000023456789);
+  EXPECT_NE(token1.ToString(), token2.ToString());
+}
+
+TEST(UnguessableTokenTest, VerifySmallerThanOperator) {
+  // Deserialize is used for testing purposes.
+  // Use UnguessableToken::Create() in production code instead.
+  {
+    SCOPED_TRACE("a.low < b.low and a.high == b.high.");
+    TestSmallerThanOperator(UnguessableToken::Deserialize(0, 1),
+                            UnguessableToken::Deserialize(0, 5));
+  }
+  {
+    SCOPED_TRACE("a.low == b.low and a.high < b.high.");
+    TestSmallerThanOperator(UnguessableToken::Deserialize(1, 0),
+                            UnguessableToken::Deserialize(5, 0));
+  }
+  {
+    SCOPED_TRACE("a.low < b.low and a.high < b.high.");
+    TestSmallerThanOperator(UnguessableToken::Deserialize(1, 1),
+                            UnguessableToken::Deserialize(5, 5));
+  }
+  {
+    SCOPED_TRACE("a.low > b.low and a.high < b.high.");
+    TestSmallerThanOperator(UnguessableToken::Deserialize(1, 10),
+                            UnguessableToken::Deserialize(10, 1));
+  }
+}
+
+TEST(UnguessableTokenTest, VerifyHash) {
+  UnguessableToken token = UnguessableToken::Create();
+
+  EXPECT_EQ(base::HashInts64(token.GetHighForSerialization(),
+                             token.GetLowForSerialization()),
+            UnguessableTokenHash()(token));
+}
+
+TEST(UnguessableTokenTest, VerifyBasicUniqueness) {
+  EXPECT_NE(UnguessableToken::Create(), UnguessableToken::Create());
+
+  UnguessableToken token = UnguessableToken::Create();
+  EXPECT_NE(token.GetHighForSerialization(), token.GetLowForSerialization());
+}
+}
diff --git a/base/value_conversions.cc b/base/value_conversions.cc
new file mode 100644
index 0000000..7e3fd94
--- /dev/null
+++ b/base/value_conversions.cc
@@ -0,0 +1,99 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/value_conversions.h"
+
+#include <stdint.h>
+
+#include <algorithm>
+#include <string>
+#include <vector>
+
+#include "base/files/file_path.h"
+#include "base/memory/ptr_util.h"
+#include "base/strings/string_number_conversions.h"
+#include "base/time/time.h"
+#include "base/unguessable_token.h"
+#include "base/values.h"
+
+namespace base {
+namespace {
+// Helper for serialize/deserialize UnguessableToken.
+union UnguessableTokenRepresentation {
+  struct Field {
+    uint64_t high;
+    uint64_t low;
+  } field;
+
+  uint8_t buffer[sizeof(Field)];
+};
+}  // namespace
+
+// |Value| internally stores strings in UTF-8, so we have to convert from the
+// system native code to UTF-8 and back.
+std::unique_ptr<Value> CreateFilePathValue(const FilePath& in_value) {
+  return std::make_unique<Value>(in_value.AsUTF8Unsafe());
+}
+
+bool GetValueAsFilePath(const Value& value, FilePath* file_path) {
+  std::string str;
+  if (!value.GetAsString(&str))
+    return false;
+  if (file_path)
+    *file_path = FilePath::FromUTF8Unsafe(str);
+  return true;
+}
+
+// |Value| does not support 64-bit integers, and doubles do not have enough
+// precision, so we store the 64-bit time value as a string instead.
+std::unique_ptr<Value> CreateTimeDeltaValue(const TimeDelta& time) {
+  std::string string_value = base::Int64ToString(time.ToInternalValue());
+  return std::make_unique<Value>(string_value);
+}
+
+bool GetValueAsTimeDelta(const Value& value, TimeDelta* time) {
+  std::string str;
+  int64_t int_value;
+  if (!value.GetAsString(&str) || !base::StringToInt64(str, &int_value))
+    return false;
+  if (time)
+    *time = TimeDelta::FromInternalValue(int_value);
+  return true;
+}
+
+std::unique_ptr<Value> CreateUnguessableTokenValue(
+    const UnguessableToken& token) {
+  UnguessableTokenRepresentation representation;
+  representation.field.high = token.GetHighForSerialization();
+  representation.field.low = token.GetLowForSerialization();
+
+  return std::make_unique<Value>(
+      HexEncode(representation.buffer, sizeof(representation.buffer)));
+}
+
+bool GetValueAsUnguessableToken(const Value& value, UnguessableToken* token) {
+  if (!value.is_string()) {
+    return false;
+  }
+
+  // TODO(dcheng|yucliu): Make a function that accepts non vector variant and
+  // reads a fixed number of bytes.
+  std::vector<uint8_t> high_low_bytes;
+  if (!HexStringToBytes(value.GetString(), &high_low_bytes)) {
+    return false;
+  }
+
+  UnguessableTokenRepresentation representation;
+  if (high_low_bytes.size() != sizeof(representation.buffer)) {
+    return false;
+  }
+
+  std::copy(high_low_bytes.begin(), high_low_bytes.end(),
+            std::begin(representation.buffer));
+  *token = UnguessableToken::Deserialize(representation.field.high,
+                                         representation.field.low);
+  return true;
+}
+
+}  // namespace base
diff --git a/base/value_conversions.h b/base/value_conversions.h
new file mode 100644
index 0000000..bd095cd
--- /dev/null
+++ b/base/value_conversions.h
@@ -0,0 +1,35 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_VALUE_CONVERSIONS_H_
+#define BASE_VALUE_CONVERSIONS_H_
+
+// This file contains methods to convert things to a |Value| and back.
+
+#include <memory>
+#include "base/base_export.h"
+
+namespace base {
+
+class FilePath;
+class TimeDelta;
+class UnguessableToken;
+class Value;
+
+// The caller takes ownership of the returned value.
+BASE_EXPORT std::unique_ptr<Value> CreateFilePathValue(
+    const FilePath& in_value);
+BASE_EXPORT bool GetValueAsFilePath(const Value& value, FilePath* file_path);
+
+BASE_EXPORT std::unique_ptr<Value> CreateTimeDeltaValue(const TimeDelta& time);
+BASE_EXPORT bool GetValueAsTimeDelta(const Value& value, TimeDelta* time);
+
+BASE_EXPORT std::unique_ptr<Value> CreateUnguessableTokenValue(
+    const UnguessableToken& token);
+BASE_EXPORT bool GetValueAsUnguessableToken(const Value& value,
+                                            UnguessableToken* token);
+
+}  // namespace base
+
+#endif  // BASE_VALUE_CONVERSIONS_H_
diff --git a/base/value_iterators.cc b/base/value_iterators.cc
new file mode 100644
index 0000000..ba9c730
--- /dev/null
+++ b/base/value_iterators.cc
@@ -0,0 +1,228 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/value_iterators.h"
+
+namespace base {
+
+namespace detail {
+
+// ----------------------------------------------------------------------------
+// dict_iterator.
+
+dict_iterator::pointer::pointer(const reference& ref) : ref_(ref) {}
+
+dict_iterator::pointer::pointer(const pointer& ptr) = default;
+
+dict_iterator::dict_iterator(DictStorage::iterator dict_iter)
+    : dict_iter_(dict_iter) {}
+
+dict_iterator::dict_iterator(const dict_iterator& dict_iter) = default;
+
+dict_iterator& dict_iterator::operator=(const dict_iterator& dict_iter) =
+    default;
+
+dict_iterator::~dict_iterator() = default;
+
+dict_iterator::reference dict_iterator::operator*() {
+  return {dict_iter_->first, *dict_iter_->second};
+}
+
+dict_iterator::pointer dict_iterator::operator->() {
+  return pointer(operator*());
+}
+
+dict_iterator& dict_iterator::operator++() {
+  ++dict_iter_;
+  return *this;
+}
+
+dict_iterator dict_iterator::operator++(int) {
+  dict_iterator tmp(*this);
+  ++dict_iter_;
+  return tmp;
+}
+
+dict_iterator& dict_iterator::operator--() {
+  --dict_iter_;
+  return *this;
+}
+
+dict_iterator dict_iterator::operator--(int) {
+  dict_iterator tmp(*this);
+  --dict_iter_;
+  return tmp;
+}
+
+bool operator==(const dict_iterator& lhs, const dict_iterator& rhs) {
+  return lhs.dict_iter_ == rhs.dict_iter_;
+}
+
+bool operator!=(const dict_iterator& lhs, const dict_iterator& rhs) {
+  return !(lhs == rhs);
+}
+
+// ----------------------------------------------------------------------------
+// const_dict_iterator.
+
+const_dict_iterator::pointer::pointer(const reference& ref) : ref_(ref) {}
+
+const_dict_iterator::pointer::pointer(const pointer& ptr) = default;
+
+const_dict_iterator::const_dict_iterator(DictStorage::const_iterator dict_iter)
+    : dict_iter_(dict_iter) {}
+
+const_dict_iterator::const_dict_iterator(const const_dict_iterator& dict_iter) =
+    default;
+
+const_dict_iterator& const_dict_iterator::operator=(
+    const const_dict_iterator& dict_iter) = default;
+
+const_dict_iterator::~const_dict_iterator() = default;
+
+const_dict_iterator::reference const_dict_iterator::operator*() const {
+  return {dict_iter_->first, *dict_iter_->second};
+}
+
+const_dict_iterator::pointer const_dict_iterator::operator->() const {
+  return pointer(operator*());
+}
+
+const_dict_iterator& const_dict_iterator::operator++() {
+  ++dict_iter_;
+  return *this;
+}
+
+const_dict_iterator const_dict_iterator::operator++(int) {
+  const_dict_iterator tmp(*this);
+  ++dict_iter_;
+  return tmp;
+}
+
+const_dict_iterator& const_dict_iterator::operator--() {
+  --dict_iter_;
+  return *this;
+}
+
+const_dict_iterator const_dict_iterator::operator--(int) {
+  const_dict_iterator tmp(*this);
+  --dict_iter_;
+  return tmp;
+}
+
+bool operator==(const const_dict_iterator& lhs,
+                const const_dict_iterator& rhs) {
+  return lhs.dict_iter_ == rhs.dict_iter_;
+}
+
+bool operator!=(const const_dict_iterator& lhs,
+                const const_dict_iterator& rhs) {
+  return !(lhs == rhs);
+}
+
+// ----------------------------------------------------------------------------
+// dict_iterator_proxy.
+
+dict_iterator_proxy::dict_iterator_proxy(DictStorage* storage)
+    : storage_(storage) {}
+
+dict_iterator_proxy::iterator dict_iterator_proxy::begin() {
+  return iterator(storage_->begin());
+}
+
+dict_iterator_proxy::const_iterator dict_iterator_proxy::begin() const {
+  return const_iterator(storage_->begin());
+}
+
+dict_iterator_proxy::iterator dict_iterator_proxy::end() {
+  return iterator(storage_->end());
+}
+
+dict_iterator_proxy::const_iterator dict_iterator_proxy::end() const {
+  return const_iterator(storage_->end());
+}
+
+dict_iterator_proxy::reverse_iterator dict_iterator_proxy::rbegin() {
+  return reverse_iterator(end());
+}
+
+dict_iterator_proxy::const_reverse_iterator dict_iterator_proxy::rbegin()
+    const {
+  return const_reverse_iterator(end());
+}
+
+dict_iterator_proxy::reverse_iterator dict_iterator_proxy::rend() {
+  return reverse_iterator(begin());
+}
+
+dict_iterator_proxy::const_reverse_iterator dict_iterator_proxy::rend() const {
+  return const_reverse_iterator(begin());
+}
+
+dict_iterator_proxy::const_iterator dict_iterator_proxy::cbegin() const {
+  return const_iterator(begin());
+}
+
+dict_iterator_proxy::const_iterator dict_iterator_proxy::cend() const {
+  return const_iterator(end());
+}
+
+dict_iterator_proxy::const_reverse_iterator dict_iterator_proxy::crbegin()
+    const {
+  return const_reverse_iterator(rbegin());
+}
+
+dict_iterator_proxy::const_reverse_iterator dict_iterator_proxy::crend() const {
+  return const_reverse_iterator(rend());
+}
+
+// ----------------------------------------------------------------------------
+// const_dict_iterator_proxy.
+
+const_dict_iterator_proxy::const_dict_iterator_proxy(const DictStorage* storage)
+    : storage_(storage) {}
+
+const_dict_iterator_proxy::const_iterator const_dict_iterator_proxy::begin()
+    const {
+  return const_iterator(storage_->begin());
+}
+
+const_dict_iterator_proxy::const_iterator const_dict_iterator_proxy::end()
+    const {
+  return const_iterator(storage_->end());
+}
+
+const_dict_iterator_proxy::const_reverse_iterator
+const_dict_iterator_proxy::rbegin() const {
+  return const_reverse_iterator(end());
+}
+
+const_dict_iterator_proxy::const_reverse_iterator
+const_dict_iterator_proxy::rend() const {
+  return const_reverse_iterator(begin());
+}
+
+const_dict_iterator_proxy::const_iterator const_dict_iterator_proxy::cbegin()
+    const {
+  return const_iterator(begin());
+}
+
+const_dict_iterator_proxy::const_iterator const_dict_iterator_proxy::cend()
+    const {
+  return const_iterator(end());
+}
+
+const_dict_iterator_proxy::const_reverse_iterator
+const_dict_iterator_proxy::crbegin() const {
+  return const_reverse_iterator(rbegin());
+}
+
+const_dict_iterator_proxy::const_reverse_iterator
+const_dict_iterator_proxy::crend() const {
+  return const_reverse_iterator(rend());
+}
+
+}  // namespace detail
+
+}  // namespace base
diff --git a/base/value_iterators.h b/base/value_iterators.h
new file mode 100644
index 0000000..2e05127
--- /dev/null
+++ b/base/value_iterators.h
@@ -0,0 +1,194 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_VALUE_ITERATORS_H_
+#define BASE_VALUE_ITERATORS_H_
+
+#include <memory>
+#include <string>
+#include <utility>
+
+#include "base/base_export.h"
+#include "base/containers/flat_map.h"
+#include "base/macros.h"
+
+namespace base {
+
+class Value;
+
+namespace detail {
+
+using DictStorage = base::flat_map<std::string, std::unique_ptr<Value>>;
+
+// This iterator closely resembles DictStorage::iterator, with one
+// important exception. It abstracts the underlying unique_ptr away, meaning its
+// value_type is std::pair<const std::string, Value>. It's reference type is a
+// std::pair<const std::string&, Value&>, so that callers have read-write
+// access without incurring a copy.
+class BASE_EXPORT dict_iterator {
+ public:
+  using difference_type = DictStorage::iterator::difference_type;
+  using value_type = std::pair<const std::string, Value>;
+  using reference = std::pair<const std::string&, Value&>;
+  using iterator_category = std::bidirectional_iterator_tag;
+
+  class pointer {
+   public:
+    explicit pointer(const reference& ref);
+    pointer(const pointer& ptr);
+    pointer& operator=(const pointer& ptr) = delete;
+
+    reference* operator->() { return &ref_; }
+
+   private:
+    reference ref_;
+  };
+
+  explicit dict_iterator(DictStorage::iterator dict_iter);
+  dict_iterator(const dict_iterator& dict_iter);
+  dict_iterator& operator=(const dict_iterator& dict_iter);
+  ~dict_iterator();
+
+  reference operator*();
+  pointer operator->();
+
+  dict_iterator& operator++();
+  dict_iterator operator++(int);
+  dict_iterator& operator--();
+  dict_iterator operator--(int);
+
+  BASE_EXPORT friend bool operator==(const dict_iterator& lhs,
+                                     const dict_iterator& rhs);
+  BASE_EXPORT friend bool operator!=(const dict_iterator& lhs,
+                                     const dict_iterator& rhs);
+
+ private:
+  DictStorage::iterator dict_iter_;
+};
+
+// This iterator closely resembles DictStorage::const_iterator, with one
+// important exception. It abstracts the underlying unique_ptr away, meaning its
+// value_type is std::pair<const std::string, Value>. It's reference type is a
+// std::pair<const std::string&, const Value&>, so that callers have read-only
+// access without incurring a copy.
+class BASE_EXPORT const_dict_iterator {
+ public:
+  using difference_type = DictStorage::const_iterator::difference_type;
+  using value_type = std::pair<const std::string, Value>;
+  using reference = std::pair<const std::string&, const Value&>;
+  using iterator_category = std::bidirectional_iterator_tag;
+
+  class pointer {
+   public:
+    explicit pointer(const reference& ref);
+    pointer(const pointer& ptr);
+    pointer& operator=(const pointer& ptr) = delete;
+
+    const reference* operator->() const { return &ref_; }
+
+   private:
+    const reference ref_;
+  };
+
+  explicit const_dict_iterator(DictStorage::const_iterator dict_iter);
+  const_dict_iterator(const const_dict_iterator& dict_iter);
+  const_dict_iterator& operator=(const const_dict_iterator& dict_iter);
+  ~const_dict_iterator();
+
+  reference operator*() const;
+  pointer operator->() const;
+
+  const_dict_iterator& operator++();
+  const_dict_iterator operator++(int);
+  const_dict_iterator& operator--();
+  const_dict_iterator operator--(int);
+
+  BASE_EXPORT friend bool operator==(const const_dict_iterator& lhs,
+                                     const const_dict_iterator& rhs);
+  BASE_EXPORT friend bool operator!=(const const_dict_iterator& lhs,
+                                     const const_dict_iterator& rhs);
+
+ private:
+  DictStorage::const_iterator dict_iter_;
+};
+
+// This class wraps the various |begin| and |end| methods of the underlying
+// DictStorage in dict_iterators and const_dict_iterators. This allows callers
+// to use this class for easy iteration over the underlying values, granting
+// them either read-only or read-write access, depending on the
+// const-qualification.
+class BASE_EXPORT dict_iterator_proxy {
+ public:
+  using key_type = DictStorage::key_type;
+  using mapped_type = DictStorage::mapped_type::element_type;
+  using value_type = std::pair<key_type, mapped_type>;
+  using key_compare = DictStorage::key_compare;
+  using size_type = DictStorage::size_type;
+  using difference_type = DictStorage::difference_type;
+
+  using iterator = dict_iterator;
+  using const_iterator = const_dict_iterator;
+  using reverse_iterator = std::reverse_iterator<iterator>;
+  using const_reverse_iterator = std::reverse_iterator<const_iterator>;
+
+  explicit dict_iterator_proxy(DictStorage* storage);
+
+  iterator begin();
+  const_iterator begin() const;
+  iterator end();
+  const_iterator end() const;
+
+  reverse_iterator rbegin();
+  const_reverse_iterator rbegin() const;
+  reverse_iterator rend();
+  const_reverse_iterator rend() const;
+
+  const_dict_iterator cbegin() const;
+  const_dict_iterator cend() const;
+  const_reverse_iterator crbegin() const;
+  const_reverse_iterator crend() const;
+
+ private:
+  DictStorage* storage_;
+};
+
+// This class wraps the various const |begin| and |end| methods of the
+// underlying DictStorage in const_dict_iterators. This allows callers to use
+// this class for easy iteration over the underlying values, granting them
+// either read-only access.
+class BASE_EXPORT const_dict_iterator_proxy {
+ public:
+  using key_type = const DictStorage::key_type;
+  using mapped_type = const DictStorage::mapped_type::element_type;
+  using value_type = std::pair<key_type, mapped_type>;
+  using key_compare = DictStorage::key_compare;
+  using size_type = DictStorage::size_type;
+  using difference_type = DictStorage::difference_type;
+
+  using iterator = const_dict_iterator;
+  using const_iterator = const_dict_iterator;
+  using reverse_iterator = std::reverse_iterator<iterator>;
+  using const_reverse_iterator = std::reverse_iterator<const_iterator>;
+
+  explicit const_dict_iterator_proxy(const DictStorage* storage);
+
+  const_iterator begin() const;
+  const_iterator end() const;
+
+  const_reverse_iterator rbegin() const;
+  const_reverse_iterator rend() const;
+
+  const_iterator cbegin() const;
+  const_iterator cend() const;
+  const_reverse_iterator crbegin() const;
+  const_reverse_iterator crend() const;
+
+ private:
+  const DictStorage* storage_;
+};
+}  // namespace detail
+
+}  // namespace base
+
+#endif  // BASE_VALUE_ITERATORS_H_
diff --git a/base/value_iterators_unittest.cc b/base/value_iterators_unittest.cc
new file mode 100644
index 0000000..ed86182
--- /dev/null
+++ b/base/value_iterators_unittest.cc
@@ -0,0 +1,335 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/value_iterators.h"
+
+#include <type_traits>
+
+#include "base/memory/ptr_util.h"
+#include "base/values.h"
+#include "testing/gmock/include/gmock/gmock.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+
+namespace detail {
+
+namespace {
+
+// Implementation of std::equal variant that is missing in C++11.
+template <class BinaryPredicate, class InputIterator1, class InputIterator2>
+bool are_equal(InputIterator1 first1,
+               InputIterator1 last1,
+               InputIterator2 first2,
+               InputIterator2 last2,
+               BinaryPredicate pred) {
+  for (; first1 != last1 && first2 != last2; ++first1, ++first2) {
+    if (!pred(*first1, *first2))
+      return false;
+  }
+  return first1 == last1 && first2 == last2;
+}
+
+}  // namespace
+
+TEST(ValueIteratorsTest, SameDictStorage) {
+  static_assert(std::is_same<Value::DictStorage, DictStorage>::value,
+                "DictStorage differs between Value and Value Iterators.");
+}
+
+TEST(ValueIteratorsTest, IsAssignable) {
+  static_assert(
+      !std::is_assignable<dict_iterator::reference::first_type, std::string>(),
+      "Can assign strings to dict_iterator");
+
+  static_assert(
+      std::is_assignable<dict_iterator::reference::second_type, Value>(),
+      "Can't assign Values to dict_iterator");
+
+  static_assert(!std::is_assignable<const_dict_iterator::reference::first_type,
+                                    std::string>(),
+                "Can assign strings to const_dict_iterator");
+
+  static_assert(
+      !std::is_assignable<const_dict_iterator::reference::second_type, Value>(),
+      "Can assign Values to const_dict_iterator");
+}
+
+TEST(ValueIteratorsTest, DictIteratorOperatorStar) {
+  DictStorage storage;
+  storage.emplace("0", std::make_unique<Value>(0));
+
+  using iterator = dict_iterator;
+  iterator iter(storage.begin());
+  EXPECT_EQ("0", (*iter).first);
+  EXPECT_EQ(Value(0), (*iter).second);
+
+  (*iter).second = Value(1);
+  EXPECT_EQ(Value(1), *storage["0"]);
+}
+
+TEST(ValueIteratorsTest, DictIteratorOperatorArrow) {
+  DictStorage storage;
+  storage.emplace("0", std::make_unique<Value>(0));
+
+  using iterator = dict_iterator;
+  iterator iter(storage.begin());
+  EXPECT_EQ("0", iter->first);
+  EXPECT_EQ(Value(0), iter->second);
+
+  iter->second = Value(1);
+  EXPECT_EQ(Value(1), *storage["0"]);
+}
+
+TEST(ValueIteratorsTest, DictIteratorPreIncrement) {
+  DictStorage storage;
+  storage.emplace("0", std::make_unique<Value>(0));
+  storage.emplace("1", std::make_unique<Value>(1));
+
+  using iterator = dict_iterator;
+  iterator iter(storage.begin());
+  EXPECT_EQ("0", iter->first);
+  EXPECT_EQ(Value(0), iter->second);
+
+  iterator& iter_ref = ++iter;
+  EXPECT_EQ(&iter, &iter_ref);
+
+  EXPECT_EQ("1", iter_ref->first);
+  EXPECT_EQ(Value(1), iter_ref->second);
+}
+
+TEST(ValueIteratorsTest, DictIteratorPostIncrement) {
+  DictStorage storage;
+  storage.emplace("0", std::make_unique<Value>(0));
+  storage.emplace("1", std::make_unique<Value>(1));
+
+  using iterator = dict_iterator;
+  iterator iter(storage.begin());
+  iterator iter_old = iter++;
+
+  EXPECT_EQ("0", iter_old->first);
+  EXPECT_EQ(Value(0), iter_old->second);
+
+  EXPECT_EQ("1", iter->first);
+  EXPECT_EQ(Value(1), iter->second);
+}
+
+TEST(ValueIteratorsTest, DictIteratorPreDecrement) {
+  DictStorage storage;
+  storage.emplace("0", std::make_unique<Value>(0));
+  storage.emplace("1", std::make_unique<Value>(1));
+
+  using iterator = dict_iterator;
+  iterator iter(++storage.begin());
+  EXPECT_EQ("1", iter->first);
+  EXPECT_EQ(Value(1), iter->second);
+
+  iterator& iter_ref = --iter;
+  EXPECT_EQ(&iter, &iter_ref);
+
+  EXPECT_EQ("0", iter_ref->first);
+  EXPECT_EQ(Value(0), iter_ref->second);
+}
+
+TEST(ValueIteratorsTest, DictIteratorPostDecrement) {
+  DictStorage storage;
+  storage.emplace("0", std::make_unique<Value>(0));
+  storage.emplace("1", std::make_unique<Value>(1));
+
+  using iterator = dict_iterator;
+  iterator iter(++storage.begin());
+  iterator iter_old = iter--;
+
+  EXPECT_EQ("1", iter_old->first);
+  EXPECT_EQ(Value(1), iter_old->second);
+
+  EXPECT_EQ("0", iter->first);
+  EXPECT_EQ(Value(0), iter->second);
+}
+
+TEST(ValueIteratorsTest, DictIteratorOperatorEQ) {
+  DictStorage storage;
+  using iterator = dict_iterator;
+  EXPECT_EQ(iterator(storage.begin()), iterator(storage.begin()));
+  EXPECT_EQ(iterator(storage.end()), iterator(storage.end()));
+}
+
+TEST(ValueIteratorsTest, DictIteratorOperatorNE) {
+  DictStorage storage;
+  storage.emplace("0", std::make_unique<Value>(0));
+
+  using iterator = dict_iterator;
+  EXPECT_NE(iterator(storage.begin()), iterator(storage.end()));
+}
+
+TEST(ValueIteratorsTest, ConstDictIteratorOperatorStar) {
+  DictStorage storage;
+  storage.emplace("0", std::make_unique<Value>(0));
+
+  using iterator = const_dict_iterator;
+  iterator iter(storage.begin());
+  EXPECT_EQ("0", (*iter).first);
+  EXPECT_EQ(Value(0), (*iter).second);
+}
+
+TEST(ValueIteratorsTest, ConstDictIteratorOperatorArrow) {
+  DictStorage storage;
+  storage.emplace("0", std::make_unique<Value>(0));
+
+  using iterator = const_dict_iterator;
+  iterator iter(storage.begin());
+  EXPECT_EQ("0", iter->first);
+  EXPECT_EQ(Value(0), iter->second);
+}
+
+TEST(ValueIteratorsTest, ConstDictIteratorPreIncrement) {
+  DictStorage storage;
+  storage.emplace("0", std::make_unique<Value>(0));
+  storage.emplace("1", std::make_unique<Value>(1));
+
+  using iterator = const_dict_iterator;
+  iterator iter(storage.begin());
+  EXPECT_EQ("0", iter->first);
+  EXPECT_EQ(Value(0), iter->second);
+
+  iterator& iter_ref = ++iter;
+  EXPECT_EQ(&iter, &iter_ref);
+
+  EXPECT_EQ("1", iter_ref->first);
+  EXPECT_EQ(Value(1), iter_ref->second);
+}
+
+TEST(ValueIteratorsTest, ConstDictIteratorPostIncrement) {
+  DictStorage storage;
+  storage.emplace("0", std::make_unique<Value>(0));
+  storage.emplace("1", std::make_unique<Value>(1));
+
+  using iterator = const_dict_iterator;
+  iterator iter(storage.begin());
+  iterator iter_old = iter++;
+
+  EXPECT_EQ("0", iter_old->first);
+  EXPECT_EQ(Value(0), iter_old->second);
+
+  EXPECT_EQ("1", iter->first);
+  EXPECT_EQ(Value(1), iter->second);
+}
+
+TEST(ValueIteratorsTest, ConstDictIteratorPreDecrement) {
+  DictStorage storage;
+  storage.emplace("0", std::make_unique<Value>(0));
+  storage.emplace("1", std::make_unique<Value>(1));
+
+  using iterator = const_dict_iterator;
+  iterator iter(++storage.begin());
+  EXPECT_EQ("1", iter->first);
+  EXPECT_EQ(Value(1), iter->second);
+
+  iterator& iter_ref = --iter;
+  EXPECT_EQ(&iter, &iter_ref);
+
+  EXPECT_EQ("0", iter_ref->first);
+  EXPECT_EQ(Value(0), iter_ref->second);
+}
+
+TEST(ValueIteratorsTest, ConstDictIteratorPostDecrement) {
+  DictStorage storage;
+  storage.emplace("0", std::make_unique<Value>(0));
+  storage.emplace("1", std::make_unique<Value>(1));
+
+  using iterator = const_dict_iterator;
+  iterator iter(++storage.begin());
+  iterator iter_old = iter--;
+
+  EXPECT_EQ("1", iter_old->first);
+  EXPECT_EQ(Value(1), iter_old->second);
+
+  EXPECT_EQ("0", iter->first);
+  EXPECT_EQ(Value(0), iter->second);
+}
+
+TEST(ValueIteratorsTest, ConstDictIteratorOperatorEQ) {
+  DictStorage storage;
+  using iterator = const_dict_iterator;
+  EXPECT_EQ(iterator(storage.begin()), iterator(storage.begin()));
+  EXPECT_EQ(iterator(storage.end()), iterator(storage.end()));
+}
+
+TEST(ValueIteratorsTest, ConstDictIteratorOperatorNE) {
+  DictStorage storage;
+  storage.emplace("0", std::make_unique<Value>(0));
+
+  using iterator = const_dict_iterator;
+  EXPECT_NE(iterator(storage.begin()), iterator(storage.end()));
+}
+
+TEST(ValueIteratorsTest, DictIteratorProxy) {
+  DictStorage storage;
+  storage.emplace("null", std::make_unique<Value>(Value::Type::NONE));
+  storage.emplace("bool", std::make_unique<Value>(Value::Type::BOOLEAN));
+  storage.emplace("int", std::make_unique<Value>(Value::Type::INTEGER));
+  storage.emplace("double", std::make_unique<Value>(Value::Type::DOUBLE));
+  storage.emplace("string", std::make_unique<Value>(Value::Type::STRING));
+  storage.emplace("blob", std::make_unique<Value>(Value::Type::BINARY));
+  storage.emplace("dict", std::make_unique<Value>(Value::Type::DICTIONARY));
+  storage.emplace("list", std::make_unique<Value>(Value::Type::LIST));
+
+  using iterator = const_dict_iterator;
+  using iterator_proxy = dict_iterator_proxy;
+  iterator_proxy proxy(&storage);
+
+  auto equal_to = [](const DictStorage::value_type& lhs,
+                     const iterator::reference& rhs) {
+    return std::tie(lhs.first, *lhs.second) == std::tie(rhs.first, rhs.second);
+  };
+
+  EXPECT_TRUE(are_equal(storage.begin(), storage.end(), proxy.begin(),
+                        proxy.end(), equal_to));
+
+  EXPECT_TRUE(are_equal(storage.rbegin(), storage.rend(), proxy.rbegin(),
+                        proxy.rend(), equal_to));
+
+  EXPECT_TRUE(are_equal(storage.cbegin(), storage.cend(), proxy.cbegin(),
+                        proxy.cend(), equal_to));
+
+  EXPECT_TRUE(are_equal(storage.crbegin(), storage.crend(), proxy.crbegin(),
+                        proxy.crend(), equal_to));
+}
+
+TEST(ValueIteratorsTest, ConstDictIteratorProxy) {
+  DictStorage storage;
+  storage.emplace("null", std::make_unique<Value>(Value::Type::NONE));
+  storage.emplace("bool", std::make_unique<Value>(Value::Type::BOOLEAN));
+  storage.emplace("int", std::make_unique<Value>(Value::Type::INTEGER));
+  storage.emplace("double", std::make_unique<Value>(Value::Type::DOUBLE));
+  storage.emplace("string", std::make_unique<Value>(Value::Type::STRING));
+  storage.emplace("blob", std::make_unique<Value>(Value::Type::BINARY));
+  storage.emplace("dict", std::make_unique<Value>(Value::Type::DICTIONARY));
+  storage.emplace("list", std::make_unique<Value>(Value::Type::LIST));
+
+  using iterator = const_dict_iterator;
+  using iterator_proxy = const_dict_iterator_proxy;
+  iterator_proxy proxy(&storage);
+
+  auto equal_to = [](const DictStorage::value_type& lhs,
+                     const iterator::reference& rhs) {
+    return std::tie(lhs.first, *lhs.second) == std::tie(rhs.first, rhs.second);
+  };
+
+  EXPECT_TRUE(are_equal(storage.begin(), storage.end(), proxy.begin(),
+                        proxy.end(), equal_to));
+
+  EXPECT_TRUE(are_equal(storage.rbegin(), storage.rend(), proxy.rbegin(),
+                        proxy.rend(), equal_to));
+
+  EXPECT_TRUE(are_equal(storage.cbegin(), storage.cend(), proxy.cbegin(),
+                        proxy.cend(), equal_to));
+
+  EXPECT_TRUE(are_equal(storage.crbegin(), storage.crend(), proxy.crbegin(),
+                        proxy.crend(), equal_to));
+}
+
+}  // namespace detail
+
+}  // namespace base
diff --git a/base/values.cc b/base/values.cc
new file mode 100644
index 0000000..085f0f0
--- /dev/null
+++ b/base/values.cc
@@ -0,0 +1,1401 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/values.h"
+
+#include <string.h>
+
+#include <algorithm>
+#include <cmath>
+#include <new>
+#include <ostream>
+#include <utility>
+
+#include "base/json/json_writer.h"
+#include "base/logging.h"
+#include "base/memory/ptr_util.h"
+#include "base/stl_util.h"
+#include "base/strings/string_util.h"
+#include "base/strings/utf_string_conversions.h"
+#include "base/trace_event/memory_usage_estimator.h"
+
+namespace base {
+
+namespace {
+
+const char* const kTypeNames[] = {"null",   "boolean", "integer",    "double",
+                                  "string", "binary",  "dictionary", "list"};
+static_assert(arraysize(kTypeNames) ==
+                  static_cast<size_t>(Value::Type::LIST) + 1,
+              "kTypeNames Has Wrong Size");
+
+std::unique_ptr<Value> CopyWithoutEmptyChildren(const Value& node);
+
+// Make a deep copy of |node|, but don't include empty lists or dictionaries
+// in the copy. It's possible for this function to return NULL and it
+// expects |node| to always be non-NULL.
+std::unique_ptr<Value> CopyListWithoutEmptyChildren(const Value& list) {
+  Value copy(Value::Type::LIST);
+  for (const auto& entry : list.GetList()) {
+    std::unique_ptr<Value> child_copy = CopyWithoutEmptyChildren(entry);
+    if (child_copy)
+      copy.GetList().push_back(std::move(*child_copy));
+  }
+  return copy.GetList().empty() ? nullptr
+                                : std::make_unique<Value>(std::move(copy));
+}
+
+std::unique_ptr<DictionaryValue> CopyDictionaryWithoutEmptyChildren(
+    const DictionaryValue& dict) {
+  std::unique_ptr<DictionaryValue> copy;
+  for (DictionaryValue::Iterator it(dict); !it.IsAtEnd(); it.Advance()) {
+    std::unique_ptr<Value> child_copy = CopyWithoutEmptyChildren(it.value());
+    if (child_copy) {
+      if (!copy)
+        copy = std::make_unique<DictionaryValue>();
+      copy->SetWithoutPathExpansion(it.key(), std::move(child_copy));
+    }
+  }
+  return copy;
+}
+
+std::unique_ptr<Value> CopyWithoutEmptyChildren(const Value& node) {
+  switch (node.type()) {
+    case Value::Type::LIST:
+      return CopyListWithoutEmptyChildren(static_cast<const ListValue&>(node));
+
+    case Value::Type::DICTIONARY:
+      return CopyDictionaryWithoutEmptyChildren(
+          static_cast<const DictionaryValue&>(node));
+
+    default:
+      return std::make_unique<Value>(node.Clone());
+  }
+}
+
+}  // namespace
+
+// static
+std::unique_ptr<Value> Value::CreateWithCopiedBuffer(const char* buffer,
+                                                     size_t size) {
+  return std::make_unique<Value>(BlobStorage(buffer, buffer + size));
+}
+
+// static
+Value Value::FromUniquePtrValue(std::unique_ptr<Value> val) {
+  return std::move(*val);
+}
+
+// static
+std::unique_ptr<Value> Value::ToUniquePtrValue(Value val) {
+  return std::make_unique<Value>(std::move(val));
+}
+
+Value::Value(Value&& that) noexcept {
+  InternalMoveConstructFrom(std::move(that));
+}
+
+Value::Value() noexcept : type_(Type::NONE) {}
+
+Value::Value(Type type) : type_(type) {
+  // Initialize with the default value.
+  switch (type_) {
+    case Type::NONE:
+      return;
+
+    case Type::BOOLEAN:
+      bool_value_ = false;
+      return;
+    case Type::INTEGER:
+      int_value_ = 0;
+      return;
+    case Type::DOUBLE:
+      double_value_ = 0.0;
+      return;
+    case Type::STRING:
+      new (&string_value_) std::string();
+      return;
+    case Type::BINARY:
+      new (&binary_value_) BlobStorage();
+      return;
+    case Type::DICTIONARY:
+      new (&dict_) DictStorage();
+      return;
+    case Type::LIST:
+      new (&list_) ListStorage();
+      return;
+  }
+}
+
+Value::Value(bool in_bool) : type_(Type::BOOLEAN), bool_value_(in_bool) {}
+
+Value::Value(int in_int) : type_(Type::INTEGER), int_value_(in_int) {}
+
+Value::Value(double in_double) : type_(Type::DOUBLE), double_value_(in_double) {
+  if (!std::isfinite(double_value_)) {
+    NOTREACHED() << "Non-finite (i.e. NaN or positive/negative infinity) "
+                 << "values cannot be represented in JSON";
+    double_value_ = 0.0;
+  }
+}
+
+Value::Value(const char* in_string) : Value(std::string(in_string)) {}
+
+Value::Value(StringPiece in_string) : Value(std::string(in_string)) {}
+
+Value::Value(std::string&& in_string) noexcept
+    : type_(Type::STRING), string_value_(std::move(in_string)) {
+  DCHECK(IsStringUTF8(string_value_));
+}
+
+Value::Value(const char16* in_string16) : Value(StringPiece16(in_string16)) {}
+
+Value::Value(StringPiece16 in_string16) : Value(UTF16ToUTF8(in_string16)) {}
+
+Value::Value(const BlobStorage& in_blob)
+    : type_(Type::BINARY), binary_value_(in_blob) {}
+
+Value::Value(BlobStorage&& in_blob) noexcept
+    : type_(Type::BINARY), binary_value_(std::move(in_blob)) {}
+
+Value::Value(const DictStorage& in_dict) : type_(Type::DICTIONARY), dict_() {
+  dict_.reserve(in_dict.size());
+  for (const auto& it : in_dict) {
+    dict_.try_emplace(dict_.end(), it.first,
+                      std::make_unique<Value>(it.second->Clone()));
+  }
+}
+
+Value::Value(DictStorage&& in_dict) noexcept
+    : type_(Type::DICTIONARY), dict_(std::move(in_dict)) {}
+
+Value::Value(const ListStorage& in_list) : type_(Type::LIST), list_() {
+  list_.reserve(in_list.size());
+  for (const auto& val : in_list)
+    list_.emplace_back(val.Clone());
+}
+
+Value::Value(ListStorage&& in_list) noexcept
+    : type_(Type::LIST), list_(std::move(in_list)) {}
+
+Value& Value::operator=(Value&& that) noexcept {
+  InternalCleanup();
+  InternalMoveConstructFrom(std::move(that));
+
+  return *this;
+}
+
+Value Value::Clone() const {
+  switch (type_) {
+    case Type::NONE:
+      return Value();
+    case Type::BOOLEAN:
+      return Value(bool_value_);
+    case Type::INTEGER:
+      return Value(int_value_);
+    case Type::DOUBLE:
+      return Value(double_value_);
+    case Type::STRING:
+      return Value(string_value_);
+    case Type::BINARY:
+      return Value(binary_value_);
+    case Type::DICTIONARY:
+      return Value(dict_);
+    case Type::LIST:
+      return Value(list_);
+  }
+
+  NOTREACHED();
+  return Value();
+}
+
+Value::~Value() {
+  InternalCleanup();
+}
+
+// static
+const char* Value::GetTypeName(Value::Type type) {
+  DCHECK_GE(static_cast<int>(type), 0);
+  DCHECK_LT(static_cast<size_t>(type), arraysize(kTypeNames));
+  return kTypeNames[static_cast<size_t>(type)];
+}
+
+bool Value::GetBool() const {
+  CHECK(is_bool());
+  return bool_value_;
+}
+
+int Value::GetInt() const {
+  CHECK(is_int());
+  return int_value_;
+}
+
+double Value::GetDouble() const {
+  if (is_double())
+    return double_value_;
+  if (is_int())
+    return int_value_;
+  CHECK(false);
+  return 0.0;
+}
+
+const std::string& Value::GetString() const {
+  CHECK(is_string());
+  return string_value_;
+}
+
+const Value::BlobStorage& Value::GetBlob() const {
+  CHECK(is_blob());
+  return binary_value_;
+}
+
+Value::ListStorage& Value::GetList() {
+  CHECK(is_list());
+  return list_;
+}
+
+const Value::ListStorage& Value::GetList() const {
+  CHECK(is_list());
+  return list_;
+}
+
+Value* Value::FindKey(StringPiece key) {
+  return const_cast<Value*>(static_cast<const Value*>(this)->FindKey(key));
+}
+
+const Value* Value::FindKey(StringPiece key) const {
+  CHECK(is_dict());
+  auto found = dict_.find(key);
+  if (found == dict_.end())
+    return nullptr;
+  return found->second.get();
+}
+
+Value* Value::FindKeyOfType(StringPiece key, Type type) {
+  return const_cast<Value*>(
+      static_cast<const Value*>(this)->FindKeyOfType(key, type));
+}
+
+const Value* Value::FindKeyOfType(StringPiece key, Type type) const {
+  const Value* result = FindKey(key);
+  if (!result || result->type() != type)
+    return nullptr;
+  return result;
+}
+
+bool Value::RemoveKey(StringPiece key) {
+  CHECK(is_dict());
+  // NOTE: Can't directly return dict_->erase(key) due to MSVC warning C4800.
+  return dict_.erase(key) != 0;
+}
+
+Value* Value::SetKey(StringPiece key, Value value) {
+  CHECK(is_dict());
+  // NOTE: We can't use |insert_or_assign| here, as only |try_emplace| does
+  // an explicit conversion from StringPiece to std::string if necessary.
+  auto val_ptr = std::make_unique<Value>(std::move(value));
+  auto result = dict_.try_emplace(key, std::move(val_ptr));
+  if (!result.second) {
+    // val_ptr is guaranteed to be still intact at this point.
+    result.first->second = std::move(val_ptr);
+  }
+  return result.first->second.get();
+}
+
+Value* Value::SetKey(std::string&& key, Value value) {
+  CHECK(is_dict());
+  return dict_
+      .insert_or_assign(std::move(key),
+                        std::make_unique<Value>(std::move(value)))
+      .first->second.get();
+}
+
+Value* Value::SetKey(const char* key, Value value) {
+  return SetKey(StringPiece(key), std::move(value));
+}
+
+Value* Value::FindPath(std::initializer_list<StringPiece> path) {
+  return const_cast<Value*>(const_cast<const Value*>(this)->FindPath(path));
+}
+
+Value* Value::FindPath(span<const StringPiece> path) {
+  return const_cast<Value*>(const_cast<const Value*>(this)->FindPath(path));
+}
+
+const Value* Value::FindPath(std::initializer_list<StringPiece> path) const {
+  DCHECK_GE(path.size(), 2u) << "Use FindKey() for a path of length 1.";
+  return FindPath(make_span(path.begin(), path.size()));
+}
+
+const Value* Value::FindPath(span<const StringPiece> path) const {
+  const Value* cur = this;
+  for (const StringPiece component : path) {
+    if (!cur->is_dict() || (cur = cur->FindKey(component)) == nullptr)
+      return nullptr;
+  }
+  return cur;
+}
+
+Value* Value::FindPathOfType(std::initializer_list<StringPiece> path,
+                             Type type) {
+  return const_cast<Value*>(
+      const_cast<const Value*>(this)->FindPathOfType(path, type));
+}
+
+Value* Value::FindPathOfType(span<const StringPiece> path, Type type) {
+  return const_cast<Value*>(
+      const_cast<const Value*>(this)->FindPathOfType(path, type));
+}
+
+const Value* Value::FindPathOfType(std::initializer_list<StringPiece> path,
+                                   Type type) const {
+  DCHECK_GE(path.size(), 2u) << "Use FindKeyOfType() for a path of length 1.";
+  return FindPathOfType(make_span(path.begin(), path.size()), type);
+}
+
+const Value* Value::FindPathOfType(span<const StringPiece> path,
+                                   Type type) const {
+  const Value* result = FindPath(path);
+  if (!result || result->type() != type)
+    return nullptr;
+  return result;
+}
+
+Value* Value::SetPath(std::initializer_list<StringPiece> path, Value value) {
+  DCHECK_GE(path.size(), 2u) << "Use SetKey() for a path of length 1.";
+  return SetPath(make_span(path.begin(), path.size()), std::move(value));
+}
+
+Value* Value::SetPath(span<const StringPiece> path, Value value) {
+  DCHECK_NE(path.begin(), path.end());  // Can't be empty path.
+
+  // Walk/construct intermediate dictionaries. The last element requires
+  // special handling so skip it in this loop.
+  Value* cur = this;
+  const StringPiece* cur_path = path.begin();
+  for (; (cur_path + 1) < path.end(); ++cur_path) {
+    if (!cur->is_dict())
+      return nullptr;
+
+    // Use lower_bound to avoid doing the search twice for missing keys.
+    const StringPiece path_component = *cur_path;
+    auto found = cur->dict_.lower_bound(path_component);
+    if (found == cur->dict_.end() || found->first != path_component) {
+      // No key found, insert one.
+      auto inserted = cur->dict_.try_emplace(
+          found, path_component, std::make_unique<Value>(Type::DICTIONARY));
+      cur = inserted->second.get();
+    } else {
+      cur = found->second.get();
+    }
+  }
+
+  // "cur" will now contain the last dictionary to insert or replace into.
+  if (!cur->is_dict())
+    return nullptr;
+  return cur->SetKey(*cur_path, std::move(value));
+}
+
+bool Value::RemovePath(std::initializer_list<StringPiece> path) {
+  DCHECK_GE(path.size(), 2u) << "Use RemoveKey() for a path of length 1.";
+  return RemovePath(make_span(path.begin(), path.size()));
+}
+
+bool Value::RemovePath(span<const StringPiece> path) {
+  if (!is_dict() || path.empty())
+    return false;
+
+  if (path.size() == 1)
+    return RemoveKey(path[0]);
+
+  auto found = dict_.find(path[0]);
+  if (found == dict_.end() || !found->second->is_dict())
+    return false;
+
+  bool removed = found->second->RemovePath(path.subspan(1));
+  if (removed && found->second->dict_.empty())
+    dict_.erase(found);
+
+  return removed;
+}
+
+Value::dict_iterator_proxy Value::DictItems() {
+  CHECK(is_dict());
+  return dict_iterator_proxy(&dict_);
+}
+
+Value::const_dict_iterator_proxy Value::DictItems() const {
+  CHECK(is_dict());
+  return const_dict_iterator_proxy(&dict_);
+}
+
+size_t Value::DictSize() const {
+  CHECK(is_dict());
+  return dict_.size();
+}
+
+bool Value::DictEmpty() const {
+  CHECK(is_dict());
+  return dict_.empty();
+}
+
+bool Value::GetAsBoolean(bool* out_value) const {
+  if (out_value && is_bool()) {
+    *out_value = bool_value_;
+    return true;
+  }
+  return is_bool();
+}
+
+bool Value::GetAsInteger(int* out_value) const {
+  if (out_value && is_int()) {
+    *out_value = int_value_;
+    return true;
+  }
+  return is_int();
+}
+
+bool Value::GetAsDouble(double* out_value) const {
+  if (out_value && is_double()) {
+    *out_value = double_value_;
+    return true;
+  } else if (out_value && is_int()) {
+    // Allow promotion from int to double.
+    *out_value = int_value_;
+    return true;
+  }
+  return is_double() || is_int();
+}
+
+bool Value::GetAsString(std::string* out_value) const {
+  if (out_value && is_string()) {
+    *out_value = string_value_;
+    return true;
+  }
+  return is_string();
+}
+
+bool Value::GetAsString(string16* out_value) const {
+  if (out_value && is_string()) {
+    *out_value = UTF8ToUTF16(string_value_);
+    return true;
+  }
+  return is_string();
+}
+
+bool Value::GetAsString(const Value** out_value) const {
+  if (out_value && is_string()) {
+    *out_value = static_cast<const Value*>(this);
+    return true;
+  }
+  return is_string();
+}
+
+bool Value::GetAsString(StringPiece* out_value) const {
+  if (out_value && is_string()) {
+    *out_value = string_value_;
+    return true;
+  }
+  return is_string();
+}
+
+bool Value::GetAsList(ListValue** out_value) {
+  if (out_value && is_list()) {
+    *out_value = static_cast<ListValue*>(this);
+    return true;
+  }
+  return is_list();
+}
+
+bool Value::GetAsList(const ListValue** out_value) const {
+  if (out_value && is_list()) {
+    *out_value = static_cast<const ListValue*>(this);
+    return true;
+  }
+  return is_list();
+}
+
+bool Value::GetAsDictionary(DictionaryValue** out_value) {
+  if (out_value && is_dict()) {
+    *out_value = static_cast<DictionaryValue*>(this);
+    return true;
+  }
+  return is_dict();
+}
+
+bool Value::GetAsDictionary(const DictionaryValue** out_value) const {
+  if (out_value && is_dict()) {
+    *out_value = static_cast<const DictionaryValue*>(this);
+    return true;
+  }
+  return is_dict();
+}
+
+Value* Value::DeepCopy() const {
+  return new Value(Clone());
+}
+
+std::unique_ptr<Value> Value::CreateDeepCopy() const {
+  return std::make_unique<Value>(Clone());
+}
+
+bool operator==(const Value& lhs, const Value& rhs) {
+  if (lhs.type_ != rhs.type_)
+    return false;
+
+  switch (lhs.type_) {
+    case Value::Type::NONE:
+      return true;
+    case Value::Type::BOOLEAN:
+      return lhs.bool_value_ == rhs.bool_value_;
+    case Value::Type::INTEGER:
+      return lhs.int_value_ == rhs.int_value_;
+    case Value::Type::DOUBLE:
+      return lhs.double_value_ == rhs.double_value_;
+    case Value::Type::STRING:
+      return lhs.string_value_ == rhs.string_value_;
+    case Value::Type::BINARY:
+      return lhs.binary_value_ == rhs.binary_value_;
+    // TODO(crbug.com/646113): Clean this up when DictionaryValue and ListValue
+    // are completely inlined.
+    case Value::Type::DICTIONARY:
+      if (lhs.dict_.size() != rhs.dict_.size())
+        return false;
+      return std::equal(std::begin(lhs.dict_), std::end(lhs.dict_),
+                        std::begin(rhs.dict_),
+                        [](const auto& u, const auto& v) {
+                          return std::tie(u.first, *u.second) ==
+                                 std::tie(v.first, *v.second);
+                        });
+    case Value::Type::LIST:
+      return lhs.list_ == rhs.list_;
+  }
+
+  NOTREACHED();
+  return false;
+}
+
+bool operator!=(const Value& lhs, const Value& rhs) {
+  return !(lhs == rhs);
+}
+
+bool operator<(const Value& lhs, const Value& rhs) {
+  if (lhs.type_ != rhs.type_)
+    return lhs.type_ < rhs.type_;
+
+  switch (lhs.type_) {
+    case Value::Type::NONE:
+      return false;
+    case Value::Type::BOOLEAN:
+      return lhs.bool_value_ < rhs.bool_value_;
+    case Value::Type::INTEGER:
+      return lhs.int_value_ < rhs.int_value_;
+    case Value::Type::DOUBLE:
+      return lhs.double_value_ < rhs.double_value_;
+    case Value::Type::STRING:
+      return lhs.string_value_ < rhs.string_value_;
+    case Value::Type::BINARY:
+      return lhs.binary_value_ < rhs.binary_value_;
+    // TODO(crbug.com/646113): Clean this up when DictionaryValue and ListValue
+    // are completely inlined.
+    case Value::Type::DICTIONARY:
+      return std::lexicographical_compare(
+          std::begin(lhs.dict_), std::end(lhs.dict_), std::begin(rhs.dict_),
+          std::end(rhs.dict_),
+          [](const Value::DictStorage::value_type& u,
+             const Value::DictStorage::value_type& v) {
+            return std::tie(u.first, *u.second) < std::tie(v.first, *v.second);
+          });
+    case Value::Type::LIST:
+      return lhs.list_ < rhs.list_;
+  }
+
+  NOTREACHED();
+  return false;
+}
+
+bool operator>(const Value& lhs, const Value& rhs) {
+  return rhs < lhs;
+}
+
+bool operator<=(const Value& lhs, const Value& rhs) {
+  return !(rhs < lhs);
+}
+
+bool operator>=(const Value& lhs, const Value& rhs) {
+  return !(lhs < rhs);
+}
+
+bool Value::Equals(const Value* other) const {
+  DCHECK(other);
+  return *this == *other;
+}
+
+size_t Value::EstimateMemoryUsage() const {
+  switch (type_) {
+    case Type::STRING:
+      return base::trace_event::EstimateMemoryUsage(string_value_);
+    case Type::BINARY:
+      return base::trace_event::EstimateMemoryUsage(binary_value_);
+    case Type::DICTIONARY:
+      return base::trace_event::EstimateMemoryUsage(dict_);
+    case Type::LIST:
+      return base::trace_event::EstimateMemoryUsage(list_);
+    default:
+      return 0;
+  }
+}
+
+void Value::InternalMoveConstructFrom(Value&& that) {
+  type_ = that.type_;
+
+  switch (type_) {
+    case Type::NONE:
+      return;
+    case Type::BOOLEAN:
+      bool_value_ = that.bool_value_;
+      return;
+    case Type::INTEGER:
+      int_value_ = that.int_value_;
+      return;
+    case Type::DOUBLE:
+      double_value_ = that.double_value_;
+      return;
+    case Type::STRING:
+      new (&string_value_) std::string(std::move(that.string_value_));
+      return;
+    case Type::BINARY:
+      new (&binary_value_) BlobStorage(std::move(that.binary_value_));
+      return;
+    case Type::DICTIONARY:
+      new (&dict_) DictStorage(std::move(that.dict_));
+      return;
+    case Type::LIST:
+      new (&list_) ListStorage(std::move(that.list_));
+      return;
+  }
+}
+
+void Value::InternalCleanup() {
+  switch (type_) {
+    case Type::NONE:
+    case Type::BOOLEAN:
+    case Type::INTEGER:
+    case Type::DOUBLE:
+      // Nothing to do
+      return;
+
+    case Type::STRING:
+      string_value_.~basic_string();
+      return;
+    case Type::BINARY:
+      binary_value_.~BlobStorage();
+      return;
+    case Type::DICTIONARY:
+      dict_.~DictStorage();
+      return;
+    case Type::LIST:
+      list_.~ListStorage();
+      return;
+  }
+}
+
+///////////////////// DictionaryValue ////////////////////
+
+// static
+std::unique_ptr<DictionaryValue> DictionaryValue::From(
+    std::unique_ptr<Value> value) {
+  DictionaryValue* out;
+  if (value && value->GetAsDictionary(&out)) {
+    ignore_result(value.release());
+    return WrapUnique(out);
+  }
+  return nullptr;
+}
+
+DictionaryValue::DictionaryValue() : Value(Type::DICTIONARY) {}
+DictionaryValue::DictionaryValue(const DictStorage& in_dict) : Value(in_dict) {}
+DictionaryValue::DictionaryValue(DictStorage&& in_dict) noexcept
+    : Value(std::move(in_dict)) {}
+
+bool DictionaryValue::HasKey(StringPiece key) const {
+  DCHECK(IsStringUTF8(key));
+  auto current_entry = dict_.find(key);
+  DCHECK((current_entry == dict_.end()) || current_entry->second);
+  return current_entry != dict_.end();
+}
+
+void DictionaryValue::Clear() {
+  dict_.clear();
+}
+
+Value* DictionaryValue::Set(StringPiece path, std::unique_ptr<Value> in_value) {
+  DCHECK(IsStringUTF8(path));
+  DCHECK(in_value);
+
+  StringPiece current_path(path);
+  Value* current_dictionary = this;
+  for (size_t delimiter_position = current_path.find('.');
+       delimiter_position != StringPiece::npos;
+       delimiter_position = current_path.find('.')) {
+    // Assume that we're indexing into a dictionary.
+    StringPiece key = current_path.substr(0, delimiter_position);
+    Value* child_dictionary =
+        current_dictionary->FindKeyOfType(key, Type::DICTIONARY);
+    if (!child_dictionary) {
+      child_dictionary =
+          current_dictionary->SetKey(key, Value(Type::DICTIONARY));
+    }
+
+    current_dictionary = child_dictionary;
+    current_path = current_path.substr(delimiter_position + 1);
+  }
+
+  return static_cast<DictionaryValue*>(current_dictionary)
+      ->SetWithoutPathExpansion(current_path, std::move(in_value));
+}
+
+Value* DictionaryValue::SetBoolean(StringPiece path, bool in_value) {
+  return Set(path, std::make_unique<Value>(in_value));
+}
+
+Value* DictionaryValue::SetInteger(StringPiece path, int in_value) {
+  return Set(path, std::make_unique<Value>(in_value));
+}
+
+Value* DictionaryValue::SetDouble(StringPiece path, double in_value) {
+  return Set(path, std::make_unique<Value>(in_value));
+}
+
+Value* DictionaryValue::SetString(StringPiece path, StringPiece in_value) {
+  return Set(path, std::make_unique<Value>(in_value));
+}
+
+Value* DictionaryValue::SetString(StringPiece path, const string16& in_value) {
+  return Set(path, std::make_unique<Value>(in_value));
+}
+
+DictionaryValue* DictionaryValue::SetDictionary(
+    StringPiece path,
+    std::unique_ptr<DictionaryValue> in_value) {
+  return static_cast<DictionaryValue*>(Set(path, std::move(in_value)));
+}
+
+ListValue* DictionaryValue::SetList(StringPiece path,
+                                    std::unique_ptr<ListValue> in_value) {
+  return static_cast<ListValue*>(Set(path, std::move(in_value)));
+}
+
+Value* DictionaryValue::SetWithoutPathExpansion(
+    StringPiece key,
+    std::unique_ptr<Value> in_value) {
+  // NOTE: We can't use |insert_or_assign| here, as only |try_emplace| does
+  // an explicit conversion from StringPiece to std::string if necessary.
+  auto result = dict_.try_emplace(key, std::move(in_value));
+  if (!result.second) {
+    // in_value is guaranteed to be still intact at this point.
+    result.first->second = std::move(in_value);
+  }
+  return result.first->second.get();
+}
+
+bool DictionaryValue::Get(StringPiece path,
+                          const Value** out_value) const {
+  DCHECK(IsStringUTF8(path));
+  StringPiece current_path(path);
+  const DictionaryValue* current_dictionary = this;
+  for (size_t delimiter_position = current_path.find('.');
+       delimiter_position != std::string::npos;
+       delimiter_position = current_path.find('.')) {
+    const DictionaryValue* child_dictionary = nullptr;
+    if (!current_dictionary->GetDictionaryWithoutPathExpansion(
+            current_path.substr(0, delimiter_position), &child_dictionary)) {
+      return false;
+    }
+
+    current_dictionary = child_dictionary;
+    current_path = current_path.substr(delimiter_position + 1);
+  }
+
+  return current_dictionary->GetWithoutPathExpansion(current_path, out_value);
+}
+
+bool DictionaryValue::Get(StringPiece path, Value** out_value)  {
+  return static_cast<const DictionaryValue&>(*this).Get(
+      path,
+      const_cast<const Value**>(out_value));
+}
+
+bool DictionaryValue::GetBoolean(StringPiece path, bool* bool_value) const {
+  const Value* value;
+  if (!Get(path, &value))
+    return false;
+
+  return value->GetAsBoolean(bool_value);
+}
+
+bool DictionaryValue::GetInteger(StringPiece path, int* out_value) const {
+  const Value* value;
+  if (!Get(path, &value))
+    return false;
+
+  return value->GetAsInteger(out_value);
+}
+
+bool DictionaryValue::GetDouble(StringPiece path, double* out_value) const {
+  const Value* value;
+  if (!Get(path, &value))
+    return false;
+
+  return value->GetAsDouble(out_value);
+}
+
+bool DictionaryValue::GetString(StringPiece path,
+                                std::string* out_value) const {
+  const Value* value;
+  if (!Get(path, &value))
+    return false;
+
+  return value->GetAsString(out_value);
+}
+
+bool DictionaryValue::GetString(StringPiece path, string16* out_value) const {
+  const Value* value;
+  if (!Get(path, &value))
+    return false;
+
+  return value->GetAsString(out_value);
+}
+
+bool DictionaryValue::GetStringASCII(StringPiece path,
+                                     std::string* out_value) const {
+  std::string out;
+  if (!GetString(path, &out))
+    return false;
+
+  if (!IsStringASCII(out)) {
+    NOTREACHED();
+    return false;
+  }
+
+  out_value->assign(out);
+  return true;
+}
+
+bool DictionaryValue::GetBinary(StringPiece path,
+                                const Value** out_value) const {
+  const Value* value;
+  bool result = Get(path, &value);
+  if (!result || !value->is_blob())
+    return false;
+
+  if (out_value)
+    *out_value = value;
+
+  return true;
+}
+
+bool DictionaryValue::GetBinary(StringPiece path, Value** out_value) {
+  return static_cast<const DictionaryValue&>(*this).GetBinary(
+      path, const_cast<const Value**>(out_value));
+}
+
+bool DictionaryValue::GetDictionary(StringPiece path,
+                                    const DictionaryValue** out_value) const {
+  const Value* value;
+  bool result = Get(path, &value);
+  if (!result || !value->is_dict())
+    return false;
+
+  if (out_value)
+    *out_value = static_cast<const DictionaryValue*>(value);
+
+  return true;
+}
+
+bool DictionaryValue::GetDictionary(StringPiece path,
+                                    DictionaryValue** out_value) {
+  return static_cast<const DictionaryValue&>(*this).GetDictionary(
+      path,
+      const_cast<const DictionaryValue**>(out_value));
+}
+
+bool DictionaryValue::GetList(StringPiece path,
+                              const ListValue** out_value) const {
+  const Value* value;
+  bool result = Get(path, &value);
+  if (!result || !value->is_list())
+    return false;
+
+  if (out_value)
+    *out_value = static_cast<const ListValue*>(value);
+
+  return true;
+}
+
+bool DictionaryValue::GetList(StringPiece path, ListValue** out_value) {
+  return static_cast<const DictionaryValue&>(*this).GetList(
+      path,
+      const_cast<const ListValue**>(out_value));
+}
+
+bool DictionaryValue::GetWithoutPathExpansion(StringPiece key,
+                                              const Value** out_value) const {
+  DCHECK(IsStringUTF8(key));
+  auto entry_iterator = dict_.find(key);
+  if (entry_iterator == dict_.end())
+    return false;
+
+  if (out_value)
+    *out_value = entry_iterator->second.get();
+  return true;
+}
+
+bool DictionaryValue::GetWithoutPathExpansion(StringPiece key,
+                                              Value** out_value) {
+  return static_cast<const DictionaryValue&>(*this).GetWithoutPathExpansion(
+      key,
+      const_cast<const Value**>(out_value));
+}
+
+bool DictionaryValue::GetBooleanWithoutPathExpansion(StringPiece key,
+                                                     bool* out_value) const {
+  const Value* value;
+  if (!GetWithoutPathExpansion(key, &value))
+    return false;
+
+  return value->GetAsBoolean(out_value);
+}
+
+bool DictionaryValue::GetIntegerWithoutPathExpansion(StringPiece key,
+                                                     int* out_value) const {
+  const Value* value;
+  if (!GetWithoutPathExpansion(key, &value))
+    return false;
+
+  return value->GetAsInteger(out_value);
+}
+
+bool DictionaryValue::GetDoubleWithoutPathExpansion(StringPiece key,
+                                                    double* out_value) const {
+  const Value* value;
+  if (!GetWithoutPathExpansion(key, &value))
+    return false;
+
+  return value->GetAsDouble(out_value);
+}
+
+bool DictionaryValue::GetStringWithoutPathExpansion(
+    StringPiece key,
+    std::string* out_value) const {
+  const Value* value;
+  if (!GetWithoutPathExpansion(key, &value))
+    return false;
+
+  return value->GetAsString(out_value);
+}
+
+bool DictionaryValue::GetStringWithoutPathExpansion(StringPiece key,
+                                                    string16* out_value) const {
+  const Value* value;
+  if (!GetWithoutPathExpansion(key, &value))
+    return false;
+
+  return value->GetAsString(out_value);
+}
+
+bool DictionaryValue::GetDictionaryWithoutPathExpansion(
+    StringPiece key,
+    const DictionaryValue** out_value) const {
+  const Value* value;
+  bool result = GetWithoutPathExpansion(key, &value);
+  if (!result || !value->is_dict())
+    return false;
+
+  if (out_value)
+    *out_value = static_cast<const DictionaryValue*>(value);
+
+  return true;
+}
+
+bool DictionaryValue::GetDictionaryWithoutPathExpansion(
+    StringPiece key,
+    DictionaryValue** out_value) {
+  const DictionaryValue& const_this =
+      static_cast<const DictionaryValue&>(*this);
+  return const_this.GetDictionaryWithoutPathExpansion(
+          key,
+          const_cast<const DictionaryValue**>(out_value));
+}
+
+bool DictionaryValue::GetListWithoutPathExpansion(
+    StringPiece key,
+    const ListValue** out_value) const {
+  const Value* value;
+  bool result = GetWithoutPathExpansion(key, &value);
+  if (!result || !value->is_list())
+    return false;
+
+  if (out_value)
+    *out_value = static_cast<const ListValue*>(value);
+
+  return true;
+}
+
+bool DictionaryValue::GetListWithoutPathExpansion(StringPiece key,
+                                                  ListValue** out_value) {
+  return
+      static_cast<const DictionaryValue&>(*this).GetListWithoutPathExpansion(
+          key,
+          const_cast<const ListValue**>(out_value));
+}
+
+bool DictionaryValue::Remove(StringPiece path,
+                             std::unique_ptr<Value>* out_value) {
+  DCHECK(IsStringUTF8(path));
+  StringPiece current_path(path);
+  DictionaryValue* current_dictionary = this;
+  size_t delimiter_position = current_path.rfind('.');
+  if (delimiter_position != StringPiece::npos) {
+    if (!GetDictionary(current_path.substr(0, delimiter_position),
+                       &current_dictionary))
+      return false;
+    current_path = current_path.substr(delimiter_position + 1);
+  }
+
+  return current_dictionary->RemoveWithoutPathExpansion(current_path,
+                                                        out_value);
+}
+
+bool DictionaryValue::RemoveWithoutPathExpansion(
+    StringPiece key,
+    std::unique_ptr<Value>* out_value) {
+  DCHECK(IsStringUTF8(key));
+  auto entry_iterator = dict_.find(key);
+  if (entry_iterator == dict_.end())
+    return false;
+
+  if (out_value)
+    *out_value = std::move(entry_iterator->second);
+  dict_.erase(entry_iterator);
+  return true;
+}
+
+bool DictionaryValue::RemovePath(StringPiece path,
+                                 std::unique_ptr<Value>* out_value) {
+  bool result = false;
+  size_t delimiter_position = path.find('.');
+
+  if (delimiter_position == std::string::npos)
+    return RemoveWithoutPathExpansion(path, out_value);
+
+  StringPiece subdict_path = path.substr(0, delimiter_position);
+  DictionaryValue* subdict = nullptr;
+  if (!GetDictionary(subdict_path, &subdict))
+    return false;
+  result = subdict->RemovePath(path.substr(delimiter_position + 1),
+                               out_value);
+  if (result && subdict->empty())
+    RemoveWithoutPathExpansion(subdict_path, nullptr);
+
+  return result;
+}
+
+std::unique_ptr<DictionaryValue> DictionaryValue::DeepCopyWithoutEmptyChildren()
+    const {
+  std::unique_ptr<DictionaryValue> copy =
+      CopyDictionaryWithoutEmptyChildren(*this);
+  if (!copy)
+    copy = std::make_unique<DictionaryValue>();
+  return copy;
+}
+
+void DictionaryValue::MergeDictionary(const DictionaryValue* dictionary) {
+  CHECK(dictionary->is_dict());
+  for (DictionaryValue::Iterator it(*dictionary); !it.IsAtEnd(); it.Advance()) {
+    const Value* merge_value = &it.value();
+    // Check whether we have to merge dictionaries.
+    if (merge_value->is_dict()) {
+      DictionaryValue* sub_dict;
+      if (GetDictionaryWithoutPathExpansion(it.key(), &sub_dict)) {
+        sub_dict->MergeDictionary(
+            static_cast<const DictionaryValue*>(merge_value));
+        continue;
+      }
+    }
+    // All other cases: Make a copy and hook it up.
+    SetKey(it.key(), merge_value->Clone());
+  }
+}
+
+void DictionaryValue::Swap(DictionaryValue* other) {
+  CHECK(other->is_dict());
+  dict_.swap(other->dict_);
+}
+
+DictionaryValue::Iterator::Iterator(const DictionaryValue& target)
+    : target_(target), it_(target.dict_.begin()) {}
+
+DictionaryValue::Iterator::Iterator(const Iterator& other) = default;
+
+DictionaryValue::Iterator::~Iterator() = default;
+
+DictionaryValue* DictionaryValue::DeepCopy() const {
+  return new DictionaryValue(dict_);
+}
+
+std::unique_ptr<DictionaryValue> DictionaryValue::CreateDeepCopy() const {
+  return std::make_unique<DictionaryValue>(dict_);
+}
+
+///////////////////// ListValue ////////////////////
+
+// static
+std::unique_ptr<ListValue> ListValue::From(std::unique_ptr<Value> value) {
+  ListValue* out;
+  if (value && value->GetAsList(&out)) {
+    ignore_result(value.release());
+    return WrapUnique(out);
+  }
+  return nullptr;
+}
+
+ListValue::ListValue() : Value(Type::LIST) {}
+ListValue::ListValue(const ListStorage& in_list) : Value(in_list) {}
+ListValue::ListValue(ListStorage&& in_list) noexcept
+    : Value(std::move(in_list)) {}
+
+void ListValue::Clear() {
+  list_.clear();
+}
+
+void ListValue::Reserve(size_t n) {
+  list_.reserve(n);
+}
+
+bool ListValue::Set(size_t index, std::unique_ptr<Value> in_value) {
+  if (!in_value)
+    return false;
+
+  if (index >= list_.size())
+    list_.resize(index + 1);
+
+  list_[index] = std::move(*in_value);
+  return true;
+}
+
+bool ListValue::Get(size_t index, const Value** out_value) const {
+  if (index >= list_.size())
+    return false;
+
+  if (out_value)
+    *out_value = &list_[index];
+
+  return true;
+}
+
+bool ListValue::Get(size_t index, Value** out_value) {
+  return static_cast<const ListValue&>(*this).Get(
+      index,
+      const_cast<const Value**>(out_value));
+}
+
+bool ListValue::GetBoolean(size_t index, bool* bool_value) const {
+  const Value* value;
+  if (!Get(index, &value))
+    return false;
+
+  return value->GetAsBoolean(bool_value);
+}
+
+bool ListValue::GetInteger(size_t index, int* out_value) const {
+  const Value* value;
+  if (!Get(index, &value))
+    return false;
+
+  return value->GetAsInteger(out_value);
+}
+
+bool ListValue::GetDouble(size_t index, double* out_value) const {
+  const Value* value;
+  if (!Get(index, &value))
+    return false;
+
+  return value->GetAsDouble(out_value);
+}
+
+bool ListValue::GetString(size_t index, std::string* out_value) const {
+  const Value* value;
+  if (!Get(index, &value))
+    return false;
+
+  return value->GetAsString(out_value);
+}
+
+bool ListValue::GetString(size_t index, string16* out_value) const {
+  const Value* value;
+  if (!Get(index, &value))
+    return false;
+
+  return value->GetAsString(out_value);
+}
+
+bool ListValue::GetDictionary(size_t index,
+                              const DictionaryValue** out_value) const {
+  const Value* value;
+  bool result = Get(index, &value);
+  if (!result || !value->is_dict())
+    return false;
+
+  if (out_value)
+    *out_value = static_cast<const DictionaryValue*>(value);
+
+  return true;
+}
+
+bool ListValue::GetDictionary(size_t index, DictionaryValue** out_value) {
+  return static_cast<const ListValue&>(*this).GetDictionary(
+      index,
+      const_cast<const DictionaryValue**>(out_value));
+}
+
+bool ListValue::GetList(size_t index, const ListValue** out_value) const {
+  const Value* value;
+  bool result = Get(index, &value);
+  if (!result || !value->is_list())
+    return false;
+
+  if (out_value)
+    *out_value = static_cast<const ListValue*>(value);
+
+  return true;
+}
+
+bool ListValue::GetList(size_t index, ListValue** out_value) {
+  return static_cast<const ListValue&>(*this).GetList(
+      index,
+      const_cast<const ListValue**>(out_value));
+}
+
+bool ListValue::Remove(size_t index, std::unique_ptr<Value>* out_value) {
+  if (index >= list_.size())
+    return false;
+
+  if (out_value)
+    *out_value = std::make_unique<Value>(std::move(list_[index]));
+
+  list_.erase(list_.begin() + index);
+  return true;
+}
+
+bool ListValue::Remove(const Value& value, size_t* index) {
+  auto it = std::find(list_.begin(), list_.end(), value);
+
+  if (it == list_.end())
+    return false;
+
+  if (index)
+    *index = std::distance(list_.begin(), it);
+
+  list_.erase(it);
+  return true;
+}
+
+ListValue::iterator ListValue::Erase(iterator iter,
+                                     std::unique_ptr<Value>* out_value) {
+  if (out_value)
+    *out_value = std::make_unique<Value>(std::move(*iter));
+
+  return list_.erase(iter);
+}
+
+void ListValue::Append(std::unique_ptr<Value> in_value) {
+  list_.push_back(std::move(*in_value));
+}
+
+void ListValue::AppendBoolean(bool in_value) {
+  list_.emplace_back(in_value);
+}
+
+void ListValue::AppendInteger(int in_value) {
+  list_.emplace_back(in_value);
+}
+
+void ListValue::AppendDouble(double in_value) {
+  list_.emplace_back(in_value);
+}
+
+void ListValue::AppendString(StringPiece in_value) {
+  list_.emplace_back(in_value);
+}
+
+void ListValue::AppendString(const string16& in_value) {
+  list_.emplace_back(in_value);
+}
+
+void ListValue::AppendStrings(const std::vector<std::string>& in_values) {
+  list_.reserve(list_.size() + in_values.size());
+  for (const auto& in_value : in_values)
+    list_.emplace_back(in_value);
+}
+
+void ListValue::AppendStrings(const std::vector<string16>& in_values) {
+  list_.reserve(list_.size() + in_values.size());
+  for (const auto& in_value : in_values)
+    list_.emplace_back(in_value);
+}
+
+bool ListValue::AppendIfNotPresent(std::unique_ptr<Value> in_value) {
+  DCHECK(in_value);
+  if (ContainsValue(list_, *in_value))
+    return false;
+
+  list_.push_back(std::move(*in_value));
+  return true;
+}
+
+bool ListValue::Insert(size_t index, std::unique_ptr<Value> in_value) {
+  DCHECK(in_value);
+  if (index > list_.size())
+    return false;
+
+  list_.insert(list_.begin() + index, std::move(*in_value));
+  return true;
+}
+
+ListValue::const_iterator ListValue::Find(const Value& value) const {
+  return std::find(list_.begin(), list_.end(), value);
+}
+
+void ListValue::Swap(ListValue* other) {
+  CHECK(other->is_list());
+  list_.swap(other->list_);
+}
+
+ListValue* ListValue::DeepCopy() const {
+  return new ListValue(list_);
+}
+
+std::unique_ptr<ListValue> ListValue::CreateDeepCopy() const {
+  return std::make_unique<ListValue>(list_);
+}
+
+ValueSerializer::~ValueSerializer() = default;
+
+ValueDeserializer::~ValueDeserializer() = default;
+
+std::ostream& operator<<(std::ostream& out, const Value& value) {
+  std::string json;
+  JSONWriter::WriteWithOptions(value, JSONWriter::OPTIONS_PRETTY_PRINT, &json);
+  return out << json;
+}
+
+std::ostream& operator<<(std::ostream& out, const Value::Type& type) {
+  if (static_cast<int>(type) < 0 ||
+      static_cast<size_t>(type) >= arraysize(kTypeNames))
+    return out << "Invalid Type (index = " << static_cast<int>(type) << ")";
+  return out << Value::GetTypeName(type);
+}
+
+}  // namespace base
diff --git a/base/values.h b/base/values.h
new file mode 100644
index 0000000..e9253db
--- /dev/null
+++ b/base/values.h
@@ -0,0 +1,782 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file specifies a recursive data storage class called Value intended for
+// storing settings and other persistable data.
+//
+// A Value represents something that can be stored in JSON or passed to/from
+// JavaScript. As such, it is NOT a generalized variant type, since only the
+// types supported by JavaScript/JSON are supported.
+//
+// IN PARTICULAR this means that there is no support for int64_t or unsigned
+// numbers. Writing JSON with such types would violate the spec. If you need
+// something like this, either use a double or make a string value containing
+// the number you want.
+//
+// NOTE: A Value parameter that is always a Value::STRING should just be passed
+// as a std::string. Similarly for Values that are always Value::DICTIONARY
+// (should be flat_map), Value::LIST (should be std::vector), et cetera.
+
+#ifndef BASE_VALUES_H_
+#define BASE_VALUES_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <iosfwd>
+#include <map>
+#include <memory>
+#include <string>
+#include <utility>
+#include <vector>
+
+#include "base/base_export.h"
+#include "base/containers/flat_map.h"
+#include "base/containers/span.h"
+#include "base/macros.h"
+#include "base/strings/string16.h"
+#include "base/strings/string_piece.h"
+#include "base/value_iterators.h"
+
+namespace base {
+
+class DictionaryValue;
+class ListValue;
+class Value;
+
+// The Value class is the base class for Values. A Value can be instantiated
+// via passing the appropriate type or backing storage to the constructor.
+//
+// See the file-level comment above for more information.
+//
+// base::Value is currently in the process of being refactored. Design doc:
+// https://docs.google.com/document/d/1uDLu5uTRlCWePxQUEHc8yNQdEoE1BDISYdpggWEABnw
+//
+// Previously (which is how most code that currently exists is written), Value
+// used derived types to implement the individual data types, and base::Value
+// was just a base class to refer to them. This required everything be heap
+// allocated.
+//
+// OLD WAY:
+//
+//   std::unique_ptr<base::Value> GetFoo() {
+//     std::unique_ptr<DictionaryValue> dict;
+//     dict->SetString("mykey", foo);
+//     return dict;
+//   }
+//
+// The new design makes base::Value a variant type that holds everything in
+// a union. It is now recommended to pass by value with std::move rather than
+// use heap allocated values. The DictionaryValue and ListValue subclasses
+// exist only as a compatibility shim that we're in the process of removing.
+//
+// NEW WAY:
+//
+//   base::Value GetFoo() {
+//     base::Value dict(base::Value::Type::DICTIONARY);
+//     dict.SetKey("mykey", base::Value(foo));
+//     return dict;
+//   }
+class BASE_EXPORT Value {
+ public:
+  using BlobStorage = std::vector<char>;
+  using DictStorage = flat_map<std::string, std::unique_ptr<Value>>;
+  using ListStorage = std::vector<Value>;
+
+  enum class Type {
+    NONE = 0,
+    BOOLEAN,
+    INTEGER,
+    DOUBLE,
+    STRING,
+    BINARY,
+    DICTIONARY,
+    LIST
+    // Note: Do not add more types. See the file-level comment above for why.
+  };
+
+  // For situations where you want to keep ownership of your buffer, this
+  // factory method creates a new BinaryValue by copying the contents of the
+  // buffer that's passed in.
+  // DEPRECATED, use std::make_unique<Value>(const BlobStorage&) instead.
+  // TODO(crbug.com/646113): Delete this and migrate callsites.
+  static std::unique_ptr<Value> CreateWithCopiedBuffer(const char* buffer,
+                                                       size_t size);
+
+  // Adaptors for converting from the old way to the new way and vice versa.
+  static Value FromUniquePtrValue(std::unique_ptr<Value> val);
+  static std::unique_ptr<Value> ToUniquePtrValue(Value val);
+
+  Value(Value&& that) noexcept;
+  Value() noexcept;  // A null value.
+
+  // Value's copy constructor and copy assignment operator are deleted. Use this
+  // to obtain a deep copy explicitly.
+  Value Clone() const;
+
+  explicit Value(Type type);
+  explicit Value(bool in_bool);
+  explicit Value(int in_int);
+  explicit Value(double in_double);
+
+  // Value(const char*) and Value(const char16*) are required despite
+  // Value(StringPiece) and Value(StringPiece16) because otherwise the
+  // compiler will choose the Value(bool) constructor for these arguments.
+  // Value(std::string&&) allow for efficient move construction.
+  explicit Value(const char* in_string);
+  explicit Value(StringPiece in_string);
+  explicit Value(std::string&& in_string) noexcept;
+  explicit Value(const char16* in_string16);
+  explicit Value(StringPiece16 in_string16);
+
+  explicit Value(const BlobStorage& in_blob);
+  explicit Value(BlobStorage&& in_blob) noexcept;
+
+  explicit Value(const DictStorage& in_dict);
+  explicit Value(DictStorage&& in_dict) noexcept;
+
+  explicit Value(const ListStorage& in_list);
+  explicit Value(ListStorage&& in_list) noexcept;
+
+  Value& operator=(Value&& that) noexcept;
+
+  ~Value();
+
+  // Returns the name for a given |type|.
+  static const char* GetTypeName(Type type);
+
+  // Returns the type of the value stored by the current Value object.
+  Type type() const { return type_; }
+
+  // Returns true if the current object represents a given type.
+  bool is_none() const { return type() == Type::NONE; }
+  bool is_bool() const { return type() == Type::BOOLEAN; }
+  bool is_int() const { return type() == Type::INTEGER; }
+  bool is_double() const { return type() == Type::DOUBLE; }
+  bool is_string() const { return type() == Type::STRING; }
+  bool is_blob() const { return type() == Type::BINARY; }
+  bool is_dict() const { return type() == Type::DICTIONARY; }
+  bool is_list() const { return type() == Type::LIST; }
+
+  // These will all fatally assert if the type doesn't match.
+  bool GetBool() const;
+  int GetInt() const;
+  double GetDouble() const;  // Implicitly converts from int if necessary.
+  const std::string& GetString() const;
+  const BlobStorage& GetBlob() const;
+
+  ListStorage& GetList();
+  const ListStorage& GetList() const;
+
+  // |FindKey| looks up |key| in the underlying dictionary. If found, it returns
+  // a pointer to the element. Otherwise it returns nullptr.
+  // returned. Callers are expected to perform a check against null before using
+  // the pointer.
+  // Note: This fatally asserts if type() is not Type::DICTIONARY.
+  //
+  // Example:
+  //   auto* found = FindKey("foo");
+  Value* FindKey(StringPiece key);
+  const Value* FindKey(StringPiece key) const;
+
+  // |FindKeyOfType| is similar to |FindKey|, but it also requires the found
+  // value to have type |type|. If no type is found, or the found value is of a
+  // different type nullptr is returned.
+  // Callers are expected to perform a check against null before using the
+  // pointer.
+  // Note: This fatally asserts if type() is not Type::DICTIONARY.
+  //
+  // Example:
+  //   auto* found = FindKey("foo", Type::DOUBLE);
+  Value* FindKeyOfType(StringPiece key, Type type);
+  const Value* FindKeyOfType(StringPiece key, Type type) const;
+
+  // |SetKey| looks up |key| in the underlying dictionary and sets the mapped
+  // value to |value|. If |key| could not be found, a new element is inserted.
+  // A pointer to the modified item is returned.
+  // Note: This fatally asserts if type() is not Type::DICTIONARY.
+  //
+  // Example:
+  //   SetKey("foo", std::move(myvalue));
+  Value* SetKey(StringPiece key, Value value);
+  // This overload results in a performance improvement for std::string&&.
+  Value* SetKey(std::string&& key, Value value);
+  // This overload is necessary to avoid ambiguity for const char* arguments.
+  Value* SetKey(const char* key, Value value);
+
+  // This attemps to remove the value associated with |key|. In case of failure,
+  // e.g. the key does not exist, |false| is returned and the underlying
+  // dictionary is not changed. In case of success, |key| is deleted from the
+  // dictionary and the method returns |true|.
+  // Note: This fatally asserts if type() is not Type::DICTIONARY.
+  //
+  // Example:
+  //   bool success = RemoveKey("foo");
+  bool RemoveKey(StringPiece key);
+
+  // Searches a hierarchy of dictionary values for a given value. If a path
+  // of dictionaries exist, returns the item at that path. If any of the path
+  // components do not exist or if any but the last path components are not
+  // dictionaries, returns nullptr.
+  //
+  // The type of the leaf Value is not checked.
+  //
+  // Implementation note: This can't return an iterator because the iterator
+  // will actually be into another Value, so it can't be compared to iterators
+  // from this one (in particular, the DictItems().end() iterator).
+  //
+  // Example:
+  //   auto* found = FindPath({"foo", "bar"});
+  //
+  //   std::vector<StringPiece> components = ...
+  //   auto* found = FindPath(components);
+  //
+  // Note: If there is only one component in the path, use FindKey() instead.
+  Value* FindPath(std::initializer_list<StringPiece> path);
+  Value* FindPath(span<const StringPiece> path);
+  const Value* FindPath(std::initializer_list<StringPiece> path) const;
+  const Value* FindPath(span<const StringPiece> path) const;
+
+  // Like FindPath() but will only return the value if the leaf Value type
+  // matches the given type. Will return nullptr otherwise.
+  //
+  // Note: If there is only one component in the path, use FindKeyOfType()
+  // instead.
+  Value* FindPathOfType(std::initializer_list<StringPiece> path, Type type);
+  Value* FindPathOfType(span<const StringPiece> path, Type type);
+  const Value* FindPathOfType(std::initializer_list<StringPiece> path,
+                              Type type) const;
+  const Value* FindPathOfType(span<const StringPiece> path, Type type) const;
+
+  // Sets the given path, expanding and creating dictionary keys as necessary.
+  //
+  // If the current value is not a dictionary, the function returns nullptr. If
+  // path components do not exist, they will be created. If any but the last
+  // components matches a value that is not a dictionary, the function will fail
+  // (it will not overwrite the value) and return nullptr. The last path
+  // component will be unconditionally overwritten if it exists, and created if
+  // it doesn't.
+  //
+  // Example:
+  //   value.SetPath({"foo", "bar"}, std::move(myvalue));
+  //
+  //   std::vector<StringPiece> components = ...
+  //   value.SetPath(components, std::move(myvalue));
+  //
+  // Note: If there is only one component in the path, use SetKey() instead.
+  Value* SetPath(std::initializer_list<StringPiece> path, Value value);
+  Value* SetPath(span<const StringPiece> path, Value value);
+
+  // Tries to remove a Value at the given path.
+  //
+  // If the current value is not a dictionary or any path components does not
+  // exist, this operation fails, leaves underlying Values untouched and returns
+  // |false|. In case intermediate dictionaries become empty as a result of this
+  // path removal, they will be removed as well.
+  //
+  // Example:
+  //   bool success = value.RemovePath({"foo", "bar"});
+  //
+  //   std::vector<StringPiece> components = ...
+  //   bool success = value.RemovePath(components);
+  //
+  // Note: If there is only one component in the path, use RemoveKey() instead.
+  bool RemovePath(std::initializer_list<StringPiece> path);
+  bool RemovePath(span<const StringPiece> path);
+
+  using dict_iterator_proxy = detail::dict_iterator_proxy;
+  using const_dict_iterator_proxy = detail::const_dict_iterator_proxy;
+
+  // |DictItems| returns a proxy object that exposes iterators to the underlying
+  // dictionary. These are intended for iteration over all items in the
+  // dictionary and are compatible with for-each loops and standard library
+  // algorithms.
+  // Note: This fatally asserts if type() is not Type::DICTIONARY.
+  dict_iterator_proxy DictItems();
+  const_dict_iterator_proxy DictItems() const;
+
+  // Returns the size of the dictionary, and if the dictionary is empty.
+  // Note: This fatally asserts if type() is not Type::DICTIONARY.
+  size_t DictSize() const;
+  bool DictEmpty() const;
+
+  // These methods allow the convenient retrieval of the contents of the Value.
+  // If the current object can be converted into the given type, the value is
+  // returned through the |out_value| parameter and true is returned;
+  // otherwise, false is returned and |out_value| is unchanged.
+  // DEPRECATED, use GetBool() instead.
+  bool GetAsBoolean(bool* out_value) const;
+  // DEPRECATED, use GetInt() instead.
+  bool GetAsInteger(int* out_value) const;
+  // DEPRECATED, use GetDouble() instead.
+  bool GetAsDouble(double* out_value) const;
+  // DEPRECATED, use GetString() instead.
+  bool GetAsString(std::string* out_value) const;
+  bool GetAsString(string16* out_value) const;
+  bool GetAsString(const Value** out_value) const;
+  bool GetAsString(StringPiece* out_value) const;
+  // ListValue::From is the equivalent for std::unique_ptr conversions.
+  // DEPRECATED, use GetList() instead.
+  bool GetAsList(ListValue** out_value);
+  bool GetAsList(const ListValue** out_value) const;
+  // DictionaryValue::From is the equivalent for std::unique_ptr conversions.
+  bool GetAsDictionary(DictionaryValue** out_value);
+  bool GetAsDictionary(const DictionaryValue** out_value) const;
+  // Note: Do not add more types. See the file-level comment above for why.
+
+  // This creates a deep copy of the entire Value tree, and returns a pointer
+  // to the copy. The caller gets ownership of the copy, of course.
+  // Subclasses return their own type directly in their overrides;
+  // this works because C++ supports covariant return types.
+  // DEPRECATED, use Value::Clone() instead.
+  // TODO(crbug.com/646113): Delete this and migrate callsites.
+  Value* DeepCopy() const;
+  // DEPRECATED, use Value::Clone() instead.
+  // TODO(crbug.com/646113): Delete this and migrate callsites.
+  std::unique_ptr<Value> CreateDeepCopy() const;
+
+  // Comparison operators so that Values can easily be used with standard
+  // library algorithms and associative containers.
+  BASE_EXPORT friend bool operator==(const Value& lhs, const Value& rhs);
+  BASE_EXPORT friend bool operator!=(const Value& lhs, const Value& rhs);
+  BASE_EXPORT friend bool operator<(const Value& lhs, const Value& rhs);
+  BASE_EXPORT friend bool operator>(const Value& lhs, const Value& rhs);
+  BASE_EXPORT friend bool operator<=(const Value& lhs, const Value& rhs);
+  BASE_EXPORT friend bool operator>=(const Value& lhs, const Value& rhs);
+
+  // Compares if two Value objects have equal contents.
+  // DEPRECATED, use operator==(const Value& lhs, const Value& rhs) instead.
+  // TODO(crbug.com/646113): Delete this and migrate callsites.
+  bool Equals(const Value* other) const;
+
+  // Estimates dynamic memory usage.
+  // See base/trace_event/memory_usage_estimator.h for more info.
+  size_t EstimateMemoryUsage() const;
+
+ protected:
+  // TODO(crbug.com/646113): Make these private once DictionaryValue and
+  // ListValue are properly inlined.
+  Type type_;
+
+  union {
+    bool bool_value_;
+    int int_value_;
+    double double_value_;
+    std::string string_value_;
+    BlobStorage binary_value_;
+    DictStorage dict_;
+    ListStorage list_;
+  };
+
+ private:
+  void InternalMoveConstructFrom(Value&& that);
+  void InternalCleanup();
+
+  DISALLOW_COPY_AND_ASSIGN(Value);
+};
+
+// DictionaryValue provides a key-value dictionary with (optional) "path"
+// parsing for recursive access; see the comment at the top of the file. Keys
+// are |std::string|s and should be UTF-8 encoded.
+class BASE_EXPORT DictionaryValue : public Value {
+ public:
+  using const_iterator = DictStorage::const_iterator;
+  using iterator = DictStorage::iterator;
+
+  // Returns |value| if it is a dictionary, nullptr otherwise.
+  static std::unique_ptr<DictionaryValue> From(std::unique_ptr<Value> value);
+
+  DictionaryValue();
+  explicit DictionaryValue(const DictStorage& in_dict);
+  explicit DictionaryValue(DictStorage&& in_dict) noexcept;
+
+  // Returns true if the current dictionary has a value for the given key.
+  // DEPRECATED, use Value::FindKey(key) instead.
+  bool HasKey(StringPiece key) const;
+
+  // Returns the number of Values in this dictionary.
+  size_t size() const { return dict_.size(); }
+
+  // Returns whether the dictionary is empty.
+  bool empty() const { return dict_.empty(); }
+
+  // Clears any current contents of this dictionary.
+  void Clear();
+
+  // Sets the Value associated with the given path starting from this object.
+  // A path has the form "<key>" or "<key>.<key>.[...]", where "." indexes
+  // into the next DictionaryValue down.  Obviously, "." can't be used
+  // within a key, but there are no other restrictions on keys.
+  // If the key at any step of the way doesn't exist, or exists but isn't
+  // a DictionaryValue, a new DictionaryValue will be created and attached
+  // to the path in that location. |in_value| must be non-null.
+  // Returns a pointer to the inserted value.
+  // DEPRECATED, use Value::SetPath(path, value) instead.
+  Value* Set(StringPiece path, std::unique_ptr<Value> in_value);
+
+  // Convenience forms of Set().  These methods will replace any existing
+  // value at that path, even if it has a different type.
+  // DEPRECATED, use Value::SetPath(path, Value(bool)) instead.
+  Value* SetBoolean(StringPiece path, bool in_value);
+  // DEPRECATED, use Value::SetPath(path, Value(int)) instead.
+  Value* SetInteger(StringPiece path, int in_value);
+  // DEPRECATED, use Value::SetPath(path, Value(double)) instead.
+  Value* SetDouble(StringPiece path, double in_value);
+  // DEPRECATED, use Value::SetPath(path, Value(StringPiece)) instead.
+  Value* SetString(StringPiece path, StringPiece in_value);
+  // DEPRECATED, use Value::SetPath(path, Value(const string& 16)) instead.
+  Value* SetString(StringPiece path, const string16& in_value);
+  // DEPRECATED, use Value::SetPath(path, Value(Type::DICTIONARY)) instead.
+  DictionaryValue* SetDictionary(StringPiece path,
+                                 std::unique_ptr<DictionaryValue> in_value);
+  // DEPRECATED, use Value::SetPath(path, Value(Type::LIST)) instead.
+  ListValue* SetList(StringPiece path, std::unique_ptr<ListValue> in_value);
+
+  // Like Set(), but without special treatment of '.'.  This allows e.g. URLs to
+  // be used as paths.
+  // DEPRECATED, use Value::SetKey(key, value) instead.
+  Value* SetWithoutPathExpansion(StringPiece key,
+                                 std::unique_ptr<Value> in_value);
+
+  // Gets the Value associated with the given path starting from this object.
+  // A path has the form "<key>" or "<key>.<key>.[...]", where "." indexes
+  // into the next DictionaryValue down.  If the path can be resolved
+  // successfully, the value for the last key in the path will be returned
+  // through the |out_value| parameter, and the function will return true.
+  // Otherwise, it will return false and |out_value| will be untouched.
+  // Note that the dictionary always owns the value that's returned.
+  // |out_value| is optional and will only be set if non-NULL.
+  // DEPRECATED, use Value::FindPath(path) instead.
+  bool Get(StringPiece path, const Value** out_value) const;
+  // DEPRECATED, use Value::FindPath(path) instead.
+  bool Get(StringPiece path, Value** out_value);
+
+  // These are convenience forms of Get().  The value will be retrieved
+  // and the return value will be true if the path is valid and the value at
+  // the end of the path can be returned in the form specified.
+  // |out_value| is optional and will only be set if non-NULL.
+  // DEPRECATED, use Value::FindPath(path) and Value::GetBool() instead.
+  bool GetBoolean(StringPiece path, bool* out_value) const;
+  // DEPRECATED, use Value::FindPath(path) and Value::GetInt() instead.
+  bool GetInteger(StringPiece path, int* out_value) const;
+  // Values of both type Type::INTEGER and Type::DOUBLE can be obtained as
+  // doubles.
+  // DEPRECATED, use Value::FindPath(path) and Value::GetDouble() instead.
+  bool GetDouble(StringPiece path, double* out_value) const;
+  // DEPRECATED, use Value::FindPath(path) and Value::GetString() instead.
+  bool GetString(StringPiece path, std::string* out_value) const;
+  // DEPRECATED, use Value::FindPath(path) and Value::GetString() instead.
+  bool GetString(StringPiece path, string16* out_value) const;
+  // DEPRECATED, use Value::FindPath(path) and Value::GetString() instead.
+  bool GetStringASCII(StringPiece path, std::string* out_value) const;
+  // DEPRECATED, use Value::FindPath(path) and Value::GetBlob() instead.
+  bool GetBinary(StringPiece path, const Value** out_value) const;
+  // DEPRECATED, use Value::FindPath(path) and Value::GetBlob() instead.
+  bool GetBinary(StringPiece path, Value** out_value);
+  // DEPRECATED, use Value::FindPath(path) and Value's Dictionary API instead.
+  bool GetDictionary(StringPiece path,
+                     const DictionaryValue** out_value) const;
+  // DEPRECATED, use Value::FindPath(path) and Value's Dictionary API instead.
+  bool GetDictionary(StringPiece path, DictionaryValue** out_value);
+  // DEPRECATED, use Value::FindPath(path) and Value::GetList() instead.
+  bool GetList(StringPiece path, const ListValue** out_value) const;
+  // DEPRECATED, use Value::FindPath(path) and Value::GetList() instead.
+  bool GetList(StringPiece path, ListValue** out_value);
+
+  // Like Get(), but without special treatment of '.'.  This allows e.g. URLs to
+  // be used as paths.
+  // DEPRECATED, use Value::FindKey(key) instead.
+  bool GetWithoutPathExpansion(StringPiece key, const Value** out_value) const;
+  // DEPRECATED, use Value::FindKey(key) instead.
+  bool GetWithoutPathExpansion(StringPiece key, Value** out_value);
+  // DEPRECATED, use Value::FindKey(key) and Value::GetBool() instead.
+  bool GetBooleanWithoutPathExpansion(StringPiece key, bool* out_value) const;
+  // DEPRECATED, use Value::FindKey(key) and Value::GetInt() instead.
+  bool GetIntegerWithoutPathExpansion(StringPiece key, int* out_value) const;
+  // DEPRECATED, use Value::FindKey(key) and Value::GetDouble() instead.
+  bool GetDoubleWithoutPathExpansion(StringPiece key, double* out_value) const;
+  // DEPRECATED, use Value::FindKey(key) and Value::GetString() instead.
+  bool GetStringWithoutPathExpansion(StringPiece key,
+                                     std::string* out_value) const;
+  // DEPRECATED, use Value::FindKey(key) and Value::GetString() instead.
+  bool GetStringWithoutPathExpansion(StringPiece key,
+                                     string16* out_value) const;
+  // DEPRECATED, use Value::FindKey(key) and Value's Dictionary API instead.
+  bool GetDictionaryWithoutPathExpansion(
+      StringPiece key,
+      const DictionaryValue** out_value) const;
+  // DEPRECATED, use Value::FindKey(key) and Value's Dictionary API instead.
+  bool GetDictionaryWithoutPathExpansion(StringPiece key,
+                                         DictionaryValue** out_value);
+  // DEPRECATED, use Value::FindKey(key) and Value::GetList() instead.
+  bool GetListWithoutPathExpansion(StringPiece key,
+                                   const ListValue** out_value) const;
+  // DEPRECATED, use Value::FindKey(key) and Value::GetList() instead.
+  bool GetListWithoutPathExpansion(StringPiece key, ListValue** out_value);
+
+  // Removes the Value with the specified path from this dictionary (or one
+  // of its child dictionaries, if the path is more than just a local key).
+  // If |out_value| is non-NULL, the removed Value will be passed out via
+  // |out_value|.  If |out_value| is NULL, the removed value will be deleted.
+  // This method returns true if |path| is a valid path; otherwise it will
+  // return false and the DictionaryValue object will be unchanged.
+  // DEPRECATED, use Value::RemovePath(path) instead.
+  bool Remove(StringPiece path, std::unique_ptr<Value>* out_value);
+
+  // Like Remove(), but without special treatment of '.'.  This allows e.g. URLs
+  // to be used as paths.
+  // DEPRECATED, use Value::RemoveKey(key) instead.
+  bool RemoveWithoutPathExpansion(StringPiece key,
+                                  std::unique_ptr<Value>* out_value);
+
+  // Removes a path, clearing out all dictionaries on |path| that remain empty
+  // after removing the value at |path|.
+  // DEPRECATED, use Value::RemovePath(path) instead.
+  bool RemovePath(StringPiece path, std::unique_ptr<Value>* out_value);
+
+  using Value::RemovePath;  // DictionaryValue::RemovePath shadows otherwise.
+
+  // Makes a copy of |this| but doesn't include empty dictionaries and lists in
+  // the copy.  This never returns NULL, even if |this| itself is empty.
+  std::unique_ptr<DictionaryValue> DeepCopyWithoutEmptyChildren() const;
+
+  // Merge |dictionary| into this dictionary. This is done recursively, i.e. any
+  // sub-dictionaries will be merged as well. In case of key collisions, the
+  // passed in dictionary takes precedence and data already present will be
+  // replaced. Values within |dictionary| are deep-copied, so |dictionary| may
+  // be freed any time after this call.
+  void MergeDictionary(const DictionaryValue* dictionary);
+
+  // Swaps contents with the |other| dictionary.
+  void Swap(DictionaryValue* other);
+
+  // This class provides an iterator over both keys and values in the
+  // dictionary.  It can't be used to modify the dictionary.
+  // DEPRECATED, use Value::DictItems() instead.
+  class BASE_EXPORT Iterator {
+   public:
+    explicit Iterator(const DictionaryValue& target);
+    Iterator(const Iterator& other);
+    ~Iterator();
+
+    bool IsAtEnd() const { return it_ == target_.dict_.end(); }
+    void Advance() { ++it_; }
+
+    const std::string& key() const { return it_->first; }
+    const Value& value() const { return *it_->second; }
+
+   private:
+    const DictionaryValue& target_;
+    DictStorage::const_iterator it_;
+  };
+
+  // Iteration.
+  // DEPRECATED, use Value::DictItems() instead.
+  iterator begin() { return dict_.begin(); }
+  iterator end() { return dict_.end(); }
+
+  // DEPRECATED, use Value::DictItems() instead.
+  const_iterator begin() const { return dict_.begin(); }
+  const_iterator end() const { return dict_.end(); }
+
+  // DEPRECATED, use Value::Clone() instead.
+  // TODO(crbug.com/646113): Delete this and migrate callsites.
+  DictionaryValue* DeepCopy() const;
+  // DEPRECATED, use Value::Clone() instead.
+  // TODO(crbug.com/646113): Delete this and migrate callsites.
+  std::unique_ptr<DictionaryValue> CreateDeepCopy() const;
+};
+
+// This type of Value represents a list of other Value values.
+class BASE_EXPORT ListValue : public Value {
+ public:
+  using const_iterator = ListStorage::const_iterator;
+  using iterator = ListStorage::iterator;
+
+  // Returns |value| if it is a list, nullptr otherwise.
+  static std::unique_ptr<ListValue> From(std::unique_ptr<Value> value);
+
+  ListValue();
+  explicit ListValue(const ListStorage& in_list);
+  explicit ListValue(ListStorage&& in_list) noexcept;
+
+  // Clears the contents of this ListValue
+  // DEPRECATED, use GetList()::clear() instead.
+  void Clear();
+
+  // Returns the number of Values in this list.
+  // DEPRECATED, use GetList()::size() instead.
+  size_t GetSize() const { return list_.size(); }
+
+  // Returns whether the list is empty.
+  // DEPRECATED, use GetList()::empty() instead.
+  bool empty() const { return list_.empty(); }
+
+  // Reserves storage for at least |n| values.
+  // DEPRECATED, use GetList()::reserve() instead.
+  void Reserve(size_t n);
+
+  // Sets the list item at the given index to be the Value specified by
+  // the value given.  If the index beyond the current end of the list, null
+  // Values will be used to pad out the list.
+  // Returns true if successful, or false if the index was negative or
+  // the value is a null pointer.
+  // DEPRECATED, use GetList()::operator[] instead.
+  bool Set(size_t index, std::unique_ptr<Value> in_value);
+
+  // Gets the Value at the given index.  Modifies |out_value| (and returns true)
+  // only if the index falls within the current list range.
+  // Note that the list always owns the Value passed out via |out_value|.
+  // |out_value| is optional and will only be set if non-NULL.
+  // DEPRECATED, use GetList()::operator[] instead.
+  bool Get(size_t index, const Value** out_value) const;
+  bool Get(size_t index, Value** out_value);
+
+  // Convenience forms of Get().  Modifies |out_value| (and returns true)
+  // only if the index is valid and the Value at that index can be returned
+  // in the specified form.
+  // |out_value| is optional and will only be set if non-NULL.
+  // DEPRECATED, use GetList()::operator[]::GetBool() instead.
+  bool GetBoolean(size_t index, bool* out_value) const;
+  // DEPRECATED, use GetList()::operator[]::GetInt() instead.
+  bool GetInteger(size_t index, int* out_value) const;
+  // Values of both type Type::INTEGER and Type::DOUBLE can be obtained as
+  // doubles.
+  // DEPRECATED, use GetList()::operator[]::GetDouble() instead.
+  bool GetDouble(size_t index, double* out_value) const;
+  // DEPRECATED, use GetList()::operator[]::GetString() instead.
+  bool GetString(size_t index, std::string* out_value) const;
+  bool GetString(size_t index, string16* out_value) const;
+
+  bool GetDictionary(size_t index, const DictionaryValue** out_value) const;
+  bool GetDictionary(size_t index, DictionaryValue** out_value);
+
+  using Value::GetList;
+  // DEPRECATED, use GetList()::operator[]::GetList() instead.
+  bool GetList(size_t index, const ListValue** out_value) const;
+  bool GetList(size_t index, ListValue** out_value);
+
+  // Removes the Value with the specified index from this list.
+  // If |out_value| is non-NULL, the removed Value AND ITS OWNERSHIP will be
+  // passed out via |out_value|.  If |out_value| is NULL, the removed value will
+  // be deleted.  This method returns true if |index| is valid; otherwise
+  // it will return false and the ListValue object will be unchanged.
+  // DEPRECATED, use GetList()::erase() instead.
+  bool Remove(size_t index, std::unique_ptr<Value>* out_value);
+
+  // Removes the first instance of |value| found in the list, if any, and
+  // deletes it. |index| is the location where |value| was found. Returns false
+  // if not found.
+  // DEPRECATED, use GetList()::erase() instead.
+  bool Remove(const Value& value, size_t* index);
+
+  // Removes the element at |iter|. If |out_value| is NULL, the value will be
+  // deleted, otherwise ownership of the value is passed back to the caller.
+  // Returns an iterator pointing to the location of the element that
+  // followed the erased element.
+  // DEPRECATED, use GetList()::erase() instead.
+  iterator Erase(iterator iter, std::unique_ptr<Value>* out_value);
+
+  // Appends a Value to the end of the list.
+  // DEPRECATED, use GetList()::push_back() instead.
+  void Append(std::unique_ptr<Value> in_value);
+
+  // Convenience forms of Append.
+  // DEPRECATED, use GetList()::emplace_back() instead.
+  void AppendBoolean(bool in_value);
+  void AppendInteger(int in_value);
+  void AppendDouble(double in_value);
+  void AppendString(StringPiece in_value);
+  void AppendString(const string16& in_value);
+  // DEPRECATED, use GetList()::emplace_back() in a loop instead.
+  void AppendStrings(const std::vector<std::string>& in_values);
+  void AppendStrings(const std::vector<string16>& in_values);
+
+  // Appends a Value if it's not already present. Returns true if successful,
+  // or false if the value was already
+  // DEPRECATED, use std::find() with GetList()::push_back() instead.
+  bool AppendIfNotPresent(std::unique_ptr<Value> in_value);
+
+  // Insert a Value at index.
+  // Returns true if successful, or false if the index was out of range.
+  // DEPRECATED, use GetList()::insert() instead.
+  bool Insert(size_t index, std::unique_ptr<Value> in_value);
+
+  // Searches for the first instance of |value| in the list using the Equals
+  // method of the Value type.
+  // Returns a const_iterator to the found item or to end() if none exists.
+  // DEPRECATED, use std::find() instead.
+  const_iterator Find(const Value& value) const;
+
+  // Swaps contents with the |other| list.
+  // DEPRECATED, use GetList()::swap() instead.
+  void Swap(ListValue* other);
+
+  // Iteration.
+  // DEPRECATED, use GetList()::begin() instead.
+  iterator begin() { return list_.begin(); }
+  // DEPRECATED, use GetList()::end() instead.
+  iterator end() { return list_.end(); }
+
+  // DEPRECATED, use GetList()::begin() instead.
+  const_iterator begin() const { return list_.begin(); }
+  // DEPRECATED, use GetList()::end() instead.
+  const_iterator end() const { return list_.end(); }
+
+  // DEPRECATED, use Value::Clone() instead.
+  // TODO(crbug.com/646113): Delete this and migrate callsites.
+  ListValue* DeepCopy() const;
+  // DEPRECATED, use Value::Clone() instead.
+  // TODO(crbug.com/646113): Delete this and migrate callsites.
+  std::unique_ptr<ListValue> CreateDeepCopy() const;
+};
+
+// This interface is implemented by classes that know how to serialize
+// Value objects.
+class BASE_EXPORT ValueSerializer {
+ public:
+  virtual ~ValueSerializer();
+
+  virtual bool Serialize(const Value& root) = 0;
+};
+
+// This interface is implemented by classes that know how to deserialize Value
+// objects.
+class BASE_EXPORT ValueDeserializer {
+ public:
+  virtual ~ValueDeserializer();
+
+  // This method deserializes the subclass-specific format into a Value object.
+  // If the return value is non-NULL, the caller takes ownership of returned
+  // Value. If the return value is NULL, and if error_code is non-NULL,
+  // error_code will be set with the underlying error.
+  // If |error_message| is non-null, it will be filled in with a formatted
+  // error message including the location of the error if appropriate.
+  virtual std::unique_ptr<Value> Deserialize(int* error_code,
+                                             std::string* error_str) = 0;
+};
+
+// Stream operator so Values can be used in assertion statements.  In order that
+// gtest uses this operator to print readable output on test failures, we must
+// override each specific type. Otherwise, the default template implementation
+// is preferred over an upcast.
+BASE_EXPORT std::ostream& operator<<(std::ostream& out, const Value& value);
+
+BASE_EXPORT inline std::ostream& operator<<(std::ostream& out,
+                                            const DictionaryValue& value) {
+  return out << static_cast<const Value&>(value);
+}
+
+BASE_EXPORT inline std::ostream& operator<<(std::ostream& out,
+                                            const ListValue& value) {
+  return out << static_cast<const Value&>(value);
+}
+
+// Stream operator so that enum class Types can be used in log statements.
+BASE_EXPORT std::ostream& operator<<(std::ostream& out,
+                                     const Value::Type& type);
+
+}  // namespace base
+
+#endif  // BASE_VALUES_H_
diff --git a/base/values_unittest.cc b/base/values_unittest.cc
new file mode 100644
index 0000000..b8efac7
--- /dev/null
+++ b/base/values_unittest.cc
@@ -0,0 +1,1922 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/values.h"
+
+#include <stddef.h>
+
+#include <functional>
+#include <limits>
+#include <memory>
+#include <string>
+#include <type_traits>
+#include <utility>
+#include <vector>
+
+#include "base/containers/adapters.h"
+#include "base/memory/ptr_util.h"
+#include "base/strings/string16.h"
+#include "base/strings/string_piece.h"
+#include "base/strings/utf_string_conversions.h"
+#include "testing/gmock/include/gmock/gmock.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+
+TEST(ValuesTest, TestNothrow) {
+  static_assert(std::is_nothrow_move_constructible<Value>::value,
+                "IsNothrowMoveConstructible");
+  static_assert(std::is_nothrow_default_constructible<Value>::value,
+                "IsNothrowDefaultConstructible");
+  static_assert(std::is_nothrow_constructible<Value, std::string&&>::value,
+                "IsNothrowMoveConstructibleFromString");
+  static_assert(
+      std::is_nothrow_constructible<Value, Value::BlobStorage&&>::value,
+      "IsNothrowMoveConstructibleFromBlob");
+  static_assert(
+      std::is_nothrow_constructible<Value, Value::ListStorage&&>::value,
+      "IsNothrowMoveConstructibleFromList");
+  static_assert(std::is_nothrow_move_assignable<Value>::value,
+                "IsNothrowMoveAssignable");
+  static_assert(
+      std::is_nothrow_constructible<ListValue, Value::ListStorage&&>::value,
+      "ListIsNothrowMoveConstructibleFromList");
+}
+
+// Group of tests for the value constructors.
+TEST(ValuesTest, ConstructBool) {
+  Value true_value(true);
+  EXPECT_EQ(Value::Type::BOOLEAN, true_value.type());
+  EXPECT_TRUE(true_value.GetBool());
+
+  Value false_value(false);
+  EXPECT_EQ(Value::Type::BOOLEAN, false_value.type());
+  EXPECT_FALSE(false_value.GetBool());
+}
+
+TEST(ValuesTest, ConstructInt) {
+  Value value(-37);
+  EXPECT_EQ(Value::Type::INTEGER, value.type());
+  EXPECT_EQ(-37, value.GetInt());
+}
+
+TEST(ValuesTest, ConstructDouble) {
+  Value value(-4.655);
+  EXPECT_EQ(Value::Type::DOUBLE, value.type());
+  EXPECT_EQ(-4.655, value.GetDouble());
+}
+
+TEST(ValuesTest, ConstructStringFromConstCharPtr) {
+  const char* str = "foobar";
+  Value value(str);
+  EXPECT_EQ(Value::Type::STRING, value.type());
+  EXPECT_EQ("foobar", value.GetString());
+}
+
+TEST(ValuesTest, ConstructStringFromStringPiece) {
+  std::string str = "foobar";
+  Value value{StringPiece(str)};
+  EXPECT_EQ(Value::Type::STRING, value.type());
+  EXPECT_EQ("foobar", value.GetString());
+}
+
+TEST(ValuesTest, ConstructStringFromStdStringRRef) {
+  std::string str = "foobar";
+  Value value(std::move(str));
+  EXPECT_EQ(Value::Type::STRING, value.type());
+  EXPECT_EQ("foobar", value.GetString());
+}
+
+TEST(ValuesTest, ConstructStringFromConstChar16Ptr) {
+  string16 str = ASCIIToUTF16("foobar");
+  Value value(str.c_str());
+  EXPECT_EQ(Value::Type::STRING, value.type());
+  EXPECT_EQ("foobar", value.GetString());
+}
+
+TEST(ValuesTest, ConstructStringFromStringPiece16) {
+  string16 str = ASCIIToUTF16("foobar");
+  Value value{StringPiece16(str)};
+  EXPECT_EQ(Value::Type::STRING, value.type());
+  EXPECT_EQ("foobar", value.GetString());
+}
+
+TEST(ValuesTest, ConstructBinary) {
+  Value value(Value::BlobStorage({0xF, 0x0, 0x0, 0xB, 0xA, 0x2}));
+  EXPECT_EQ(Value::Type::BINARY, value.type());
+  EXPECT_EQ(Value::BlobStorage({0xF, 0x0, 0x0, 0xB, 0xA, 0x2}),
+            value.GetBlob());
+}
+
+TEST(ValuesTest, ConstructDict) {
+  DictionaryValue value;
+  EXPECT_EQ(Value::Type::DICTIONARY, value.type());
+}
+
+TEST(ValuesTest, ConstructDictFromStorage) {
+  Value::DictStorage storage;
+  storage.emplace("foo", std::make_unique<Value>("bar"));
+  {
+    DictionaryValue value(storage);
+    EXPECT_EQ(Value::Type::DICTIONARY, value.type());
+    EXPECT_EQ(Value::Type::STRING, value.FindKey("foo")->type());
+    EXPECT_EQ("bar", value.FindKey("foo")->GetString());
+  }
+
+  *storage["foo"] = base::Value("baz");
+  {
+    DictionaryValue value(std::move(storage));
+    EXPECT_EQ(Value::Type::DICTIONARY, value.type());
+    EXPECT_EQ(Value::Type::STRING, value.FindKey("foo")->type());
+    EXPECT_EQ("baz", value.FindKey("foo")->GetString());
+  }
+}
+
+TEST(ValuesTest, ConstructList) {
+  ListValue value;
+  EXPECT_EQ(Value::Type::LIST, value.type());
+}
+
+TEST(ValuesTest, ConstructListFromStorage) {
+  Value::ListStorage storage;
+  storage.emplace_back("foo");
+  {
+    ListValue value(storage);
+    EXPECT_EQ(Value::Type::LIST, value.type());
+    EXPECT_EQ(1u, value.GetList().size());
+    EXPECT_EQ(Value::Type::STRING, value.GetList()[0].type());
+    EXPECT_EQ("foo", value.GetList()[0].GetString());
+  }
+
+  storage.back() = base::Value("bar");
+  {
+    ListValue value(std::move(storage));
+    EXPECT_EQ(Value::Type::LIST, value.type());
+    EXPECT_EQ(1u, value.GetList().size());
+    EXPECT_EQ(Value::Type::STRING, value.GetList()[0].type());
+    EXPECT_EQ("bar", value.GetList()[0].GetString());
+  }
+}
+
+// Group of tests for the copy constructors and copy-assigmnent. For equality
+// checks comparisons of the interesting fields are done instead of relying on
+// Equals being correct.
+TEST(ValuesTest, CopyBool) {
+  Value true_value(true);
+  Value copied_true_value(true_value.Clone());
+  EXPECT_EQ(true_value.type(), copied_true_value.type());
+  EXPECT_EQ(true_value.GetBool(), copied_true_value.GetBool());
+
+  Value false_value(false);
+  Value copied_false_value(false_value.Clone());
+  EXPECT_EQ(false_value.type(), copied_false_value.type());
+  EXPECT_EQ(false_value.GetBool(), copied_false_value.GetBool());
+
+  Value blank;
+
+  blank = true_value.Clone();
+  EXPECT_EQ(true_value.type(), blank.type());
+  EXPECT_EQ(true_value.GetBool(), blank.GetBool());
+
+  blank = false_value.Clone();
+  EXPECT_EQ(false_value.type(), blank.type());
+  EXPECT_EQ(false_value.GetBool(), blank.GetBool());
+}
+
+TEST(ValuesTest, CopyInt) {
+  Value value(74);
+  Value copied_value(value.Clone());
+  EXPECT_EQ(value.type(), copied_value.type());
+  EXPECT_EQ(value.GetInt(), copied_value.GetInt());
+
+  Value blank;
+
+  blank = value.Clone();
+  EXPECT_EQ(value.type(), blank.type());
+  EXPECT_EQ(value.GetInt(), blank.GetInt());
+}
+
+TEST(ValuesTest, CopyDouble) {
+  Value value(74.896);
+  Value copied_value(value.Clone());
+  EXPECT_EQ(value.type(), copied_value.type());
+  EXPECT_EQ(value.GetDouble(), copied_value.GetDouble());
+
+  Value blank;
+
+  blank = value.Clone();
+  EXPECT_EQ(value.type(), blank.type());
+  EXPECT_EQ(value.GetDouble(), blank.GetDouble());
+}
+
+TEST(ValuesTest, CopyString) {
+  Value value("foobar");
+  Value copied_value(value.Clone());
+  EXPECT_EQ(value.type(), copied_value.type());
+  EXPECT_EQ(value.GetString(), copied_value.GetString());
+
+  Value blank;
+
+  blank = value.Clone();
+  EXPECT_EQ(value.type(), blank.type());
+  EXPECT_EQ(value.GetString(), blank.GetString());
+}
+
+TEST(ValuesTest, CopyBinary) {
+  Value value(Value::BlobStorage({0xF, 0x0, 0x0, 0xB, 0xA, 0x2}));
+  Value copied_value(value.Clone());
+  EXPECT_EQ(value.type(), copied_value.type());
+  EXPECT_EQ(value.GetBlob(), copied_value.GetBlob());
+
+  Value blank;
+
+  blank = value.Clone();
+  EXPECT_EQ(value.type(), blank.type());
+  EXPECT_EQ(value.GetBlob(), blank.GetBlob());
+}
+
+TEST(ValuesTest, CopyDictionary) {
+  Value::DictStorage storage;
+  storage.emplace("Int", std::make_unique<Value>(123));
+  Value value(std::move(storage));
+
+  Value copied_value(value.Clone());
+  EXPECT_EQ(value, copied_value);
+
+  Value blank;
+  blank = value.Clone();
+  EXPECT_EQ(value, blank);
+}
+
+TEST(ValuesTest, CopyList) {
+  Value::ListStorage storage;
+  storage.emplace_back(123);
+  Value value(std::move(storage));
+
+  Value copied_value(value.Clone());
+  EXPECT_EQ(value, copied_value);
+
+  Value blank;
+  blank = value.Clone();
+  EXPECT_EQ(value, blank);
+}
+
+// Group of tests for the move constructors and move-assigmnent.
+TEST(ValuesTest, MoveBool) {
+  Value true_value(true);
+  Value moved_true_value(std::move(true_value));
+  EXPECT_EQ(Value::Type::BOOLEAN, moved_true_value.type());
+  EXPECT_TRUE(moved_true_value.GetBool());
+
+  Value false_value(false);
+  Value moved_false_value(std::move(false_value));
+  EXPECT_EQ(Value::Type::BOOLEAN, moved_false_value.type());
+  EXPECT_FALSE(moved_false_value.GetBool());
+
+  Value blank;
+
+  blank = Value(true);
+  EXPECT_EQ(Value::Type::BOOLEAN, blank.type());
+  EXPECT_TRUE(blank.GetBool());
+
+  blank = Value(false);
+  EXPECT_EQ(Value::Type::BOOLEAN, blank.type());
+  EXPECT_FALSE(blank.GetBool());
+}
+
+TEST(ValuesTest, MoveInt) {
+  Value value(74);
+  Value moved_value(std::move(value));
+  EXPECT_EQ(Value::Type::INTEGER, moved_value.type());
+  EXPECT_EQ(74, moved_value.GetInt());
+
+  Value blank;
+
+  blank = Value(47);
+  EXPECT_EQ(Value::Type::INTEGER, blank.type());
+  EXPECT_EQ(47, blank.GetInt());
+}
+
+TEST(ValuesTest, MoveDouble) {
+  Value value(74.896);
+  Value moved_value(std::move(value));
+  EXPECT_EQ(Value::Type::DOUBLE, moved_value.type());
+  EXPECT_EQ(74.896, moved_value.GetDouble());
+
+  Value blank;
+
+  blank = Value(654.38);
+  EXPECT_EQ(Value::Type::DOUBLE, blank.type());
+  EXPECT_EQ(654.38, blank.GetDouble());
+}
+
+TEST(ValuesTest, MoveString) {
+  Value value("foobar");
+  Value moved_value(std::move(value));
+  EXPECT_EQ(Value::Type::STRING, moved_value.type());
+  EXPECT_EQ("foobar", moved_value.GetString());
+
+  Value blank;
+
+  blank = Value("foobar");
+  EXPECT_EQ(Value::Type::STRING, blank.type());
+  EXPECT_EQ("foobar", blank.GetString());
+}
+
+TEST(ValuesTest, MoveBinary) {
+  const Value::BlobStorage buffer = {0xF, 0x0, 0x0, 0xB, 0xA, 0x2};
+  Value value(buffer);
+  Value moved_value(std::move(value));
+  EXPECT_EQ(Value::Type::BINARY, moved_value.type());
+  EXPECT_EQ(buffer, moved_value.GetBlob());
+
+  Value blank;
+
+  blank = Value(buffer);
+  EXPECT_EQ(Value::Type::BINARY, blank.type());
+  EXPECT_EQ(buffer, blank.GetBlob());
+}
+
+TEST(ValuesTest, MoveConstructDictionary) {
+  Value::DictStorage storage;
+  storage.emplace("Int", std::make_unique<Value>(123));
+
+  Value value(std::move(storage));
+  Value moved_value(std::move(value));
+  EXPECT_EQ(Value::Type::DICTIONARY, moved_value.type());
+  EXPECT_EQ(123, moved_value.FindKey("Int")->GetInt());
+}
+
+TEST(ValuesTest, MoveAssignDictionary) {
+  Value::DictStorage storage;
+  storage.emplace("Int", std::make_unique<Value>(123));
+
+  Value blank;
+  blank = Value(std::move(storage));
+  EXPECT_EQ(Value::Type::DICTIONARY, blank.type());
+  EXPECT_EQ(123, blank.FindKey("Int")->GetInt());
+}
+
+TEST(ValuesTest, MoveList) {
+  Value::ListStorage storage;
+  storage.emplace_back(123);
+  Value value(storage);
+  Value moved_value(std::move(value));
+  EXPECT_EQ(Value::Type::LIST, moved_value.type());
+  EXPECT_EQ(123, moved_value.GetList().back().GetInt());
+
+  Value blank;
+  blank = Value(std::move(storage));
+  EXPECT_EQ(Value::Type::LIST, blank.type());
+  EXPECT_EQ(123, blank.GetList().back().GetInt());
+}
+
+TEST(ValuesTest, FindKey) {
+  Value::DictStorage storage;
+  storage.emplace("foo", std::make_unique<Value>("bar"));
+  Value dict(std::move(storage));
+  EXPECT_NE(nullptr, dict.FindKey("foo"));
+  EXPECT_EQ(nullptr, dict.FindKey("baz"));
+
+  // Single not found key.
+  bool found = dict.FindKey("notfound");
+  EXPECT_FALSE(found);
+}
+
+TEST(ValuesTest, FindKeyChangeValue) {
+  Value::DictStorage storage;
+  storage.emplace("foo", std::make_unique<Value>("bar"));
+  Value dict(std::move(storage));
+  Value* found = dict.FindKey("foo");
+  EXPECT_NE(nullptr, found);
+  EXPECT_EQ("bar", found->GetString());
+
+  *found = Value(123);
+  EXPECT_EQ(123, dict.FindKey("foo")->GetInt());
+}
+
+TEST(ValuesTest, FindKeyConst) {
+  Value::DictStorage storage;
+  storage.emplace("foo", std::make_unique<Value>("bar"));
+  const Value dict(std::move(storage));
+  EXPECT_NE(nullptr, dict.FindKey("foo"));
+  EXPECT_EQ(nullptr, dict.FindKey("baz"));
+}
+
+TEST(ValuesTest, FindKeyOfType) {
+  Value::DictStorage storage;
+  storage.emplace("null", std::make_unique<Value>(Value::Type::NONE));
+  storage.emplace("bool", std::make_unique<Value>(Value::Type::BOOLEAN));
+  storage.emplace("int", std::make_unique<Value>(Value::Type::INTEGER));
+  storage.emplace("double", std::make_unique<Value>(Value::Type::DOUBLE));
+  storage.emplace("string", std::make_unique<Value>(Value::Type::STRING));
+  storage.emplace("blob", std::make_unique<Value>(Value::Type::BINARY));
+  storage.emplace("list", std::make_unique<Value>(Value::Type::LIST));
+  storage.emplace("dict", std::make_unique<Value>(Value::Type::DICTIONARY));
+
+  Value dict(std::move(storage));
+  EXPECT_NE(nullptr, dict.FindKeyOfType("null", Value::Type::NONE));
+  EXPECT_EQ(nullptr, dict.FindKeyOfType("null", Value::Type::BOOLEAN));
+  EXPECT_EQ(nullptr, dict.FindKeyOfType("null", Value::Type::INTEGER));
+  EXPECT_EQ(nullptr, dict.FindKeyOfType("null", Value::Type::DOUBLE));
+  EXPECT_EQ(nullptr, dict.FindKeyOfType("null", Value::Type::STRING));
+  EXPECT_EQ(nullptr, dict.FindKeyOfType("null", Value::Type::BINARY));
+  EXPECT_EQ(nullptr, dict.FindKeyOfType("null", Value::Type::LIST));
+  EXPECT_EQ(nullptr, dict.FindKeyOfType("null", Value::Type::DICTIONARY));
+
+  EXPECT_EQ(nullptr, dict.FindKeyOfType("bool", Value::Type::NONE));
+  EXPECT_NE(nullptr, dict.FindKeyOfType("bool", Value::Type::BOOLEAN));
+  EXPECT_EQ(nullptr, dict.FindKeyOfType("bool", Value::Type::INTEGER));
+  EXPECT_EQ(nullptr, dict.FindKeyOfType("bool", Value::Type::DOUBLE));
+  EXPECT_EQ(nullptr, dict.FindKeyOfType("bool", Value::Type::STRING));
+  EXPECT_EQ(nullptr, dict.FindKeyOfType("bool", Value::Type::BINARY));
+  EXPECT_EQ(nullptr, dict.FindKeyOfType("bool", Value::Type::LIST));
+  EXPECT_EQ(nullptr, dict.FindKeyOfType("bool", Value::Type::DICTIONARY));
+
+  EXPECT_EQ(nullptr, dict.FindKeyOfType("int", Value::Type::NONE));
+  EXPECT_EQ(nullptr, dict.FindKeyOfType("int", Value::Type::BOOLEAN));
+  EXPECT_NE(nullptr, dict.FindKeyOfType("int", Value::Type::INTEGER));
+  EXPECT_EQ(nullptr, dict.FindKeyOfType("int", Value::Type::DOUBLE));
+  EXPECT_EQ(nullptr, dict.FindKeyOfType("int", Value::Type::STRING));
+  EXPECT_EQ(nullptr, dict.FindKeyOfType("int", Value::Type::BINARY));
+  EXPECT_EQ(nullptr, dict.FindKeyOfType("int", Value::Type::LIST));
+  EXPECT_EQ(nullptr, dict.FindKeyOfType("int", Value::Type::DICTIONARY));
+
+  EXPECT_EQ(nullptr, dict.FindKeyOfType("double", Value::Type::NONE));
+  EXPECT_EQ(nullptr, dict.FindKeyOfType("double", Value::Type::BOOLEAN));
+  EXPECT_EQ(nullptr, dict.FindKeyOfType("double", Value::Type::INTEGER));
+  EXPECT_NE(nullptr, dict.FindKeyOfType("double", Value::Type::DOUBLE));
+  EXPECT_EQ(nullptr, dict.FindKeyOfType("double", Value::Type::STRING));
+  EXPECT_EQ(nullptr, dict.FindKeyOfType("double", Value::Type::BINARY));
+  EXPECT_EQ(nullptr, dict.FindKeyOfType("double", Value::Type::LIST));
+  EXPECT_EQ(nullptr, dict.FindKeyOfType("double", Value::Type::DICTIONARY));
+
+  EXPECT_EQ(nullptr, dict.FindKeyOfType("string", Value::Type::NONE));
+  EXPECT_EQ(nullptr, dict.FindKeyOfType("string", Value::Type::BOOLEAN));
+  EXPECT_EQ(nullptr, dict.FindKeyOfType("string", Value::Type::INTEGER));
+  EXPECT_EQ(nullptr, dict.FindKeyOfType("string", Value::Type::DOUBLE));
+  EXPECT_NE(nullptr, dict.FindKeyOfType("string", Value::Type::STRING));
+  EXPECT_EQ(nullptr, dict.FindKeyOfType("string", Value::Type::BINARY));
+  EXPECT_EQ(nullptr, dict.FindKeyOfType("string", Value::Type::LIST));
+  EXPECT_EQ(nullptr, dict.FindKeyOfType("string", Value::Type::DICTIONARY));
+
+  EXPECT_EQ(nullptr, dict.FindKeyOfType("blob", Value::Type::NONE));
+  EXPECT_EQ(nullptr, dict.FindKeyOfType("blob", Value::Type::BOOLEAN));
+  EXPECT_EQ(nullptr, dict.FindKeyOfType("blob", Value::Type::INTEGER));
+  EXPECT_EQ(nullptr, dict.FindKeyOfType("blob", Value::Type::DOUBLE));
+  EXPECT_EQ(nullptr, dict.FindKeyOfType("blob", Value::Type::STRING));
+  EXPECT_NE(nullptr, dict.FindKeyOfType("blob", Value::Type::BINARY));
+  EXPECT_EQ(nullptr, dict.FindKeyOfType("blob", Value::Type::LIST));
+  EXPECT_EQ(nullptr, dict.FindKeyOfType("blob", Value::Type::DICTIONARY));
+
+  EXPECT_EQ(nullptr, dict.FindKeyOfType("list", Value::Type::NONE));
+  EXPECT_EQ(nullptr, dict.FindKeyOfType("list", Value::Type::BOOLEAN));
+  EXPECT_EQ(nullptr, dict.FindKeyOfType("list", Value::Type::INTEGER));
+  EXPECT_EQ(nullptr, dict.FindKeyOfType("list", Value::Type::DOUBLE));
+  EXPECT_EQ(nullptr, dict.FindKeyOfType("list", Value::Type::STRING));
+  EXPECT_EQ(nullptr, dict.FindKeyOfType("list", Value::Type::BINARY));
+  EXPECT_NE(nullptr, dict.FindKeyOfType("list", Value::Type::LIST));
+  EXPECT_EQ(nullptr, dict.FindKeyOfType("list", Value::Type::DICTIONARY));
+
+  EXPECT_EQ(nullptr, dict.FindKeyOfType("dict", Value::Type::NONE));
+  EXPECT_EQ(nullptr, dict.FindKeyOfType("dict", Value::Type::BOOLEAN));
+  EXPECT_EQ(nullptr, dict.FindKeyOfType("dict", Value::Type::INTEGER));
+  EXPECT_EQ(nullptr, dict.FindKeyOfType("dict", Value::Type::DOUBLE));
+  EXPECT_EQ(nullptr, dict.FindKeyOfType("dict", Value::Type::STRING));
+  EXPECT_EQ(nullptr, dict.FindKeyOfType("dict", Value::Type::BINARY));
+  EXPECT_EQ(nullptr, dict.FindKeyOfType("dict", Value::Type::LIST));
+  EXPECT_NE(nullptr, dict.FindKeyOfType("dict", Value::Type::DICTIONARY));
+}
+
+TEST(ValuesTest, FindKeyOfTypeConst) {
+  Value::DictStorage storage;
+  storage.emplace("null", std::make_unique<Value>(Value::Type::NONE));
+  storage.emplace("bool", std::make_unique<Value>(Value::Type::BOOLEAN));
+  storage.emplace("int", std::make_unique<Value>(Value::Type::INTEGER));
+  storage.emplace("double", std::make_unique<Value>(Value::Type::DOUBLE));
+  storage.emplace("string", std::make_unique<Value>(Value::Type::STRING));
+  storage.emplace("blob", std::make_unique<Value>(Value::Type::BINARY));
+  storage.emplace("list", std::make_unique<Value>(Value::Type::LIST));
+  storage.emplace("dict", std::make_unique<Value>(Value::Type::DICTIONARY));
+
+  const Value dict(std::move(storage));
+  EXPECT_NE(nullptr, dict.FindKeyOfType("null", Value::Type::NONE));
+  EXPECT_EQ(nullptr, dict.FindKeyOfType("null", Value::Type::BOOLEAN));
+  EXPECT_EQ(nullptr, dict.FindKeyOfType("null", Value::Type::INTEGER));
+  EXPECT_EQ(nullptr, dict.FindKeyOfType("null", Value::Type::DOUBLE));
+  EXPECT_EQ(nullptr, dict.FindKeyOfType("null", Value::Type::STRING));
+  EXPECT_EQ(nullptr, dict.FindKeyOfType("null", Value::Type::BINARY));
+  EXPECT_EQ(nullptr, dict.FindKeyOfType("null", Value::Type::LIST));
+  EXPECT_EQ(nullptr, dict.FindKeyOfType("null", Value::Type::DICTIONARY));
+
+  EXPECT_EQ(nullptr, dict.FindKeyOfType("bool", Value::Type::NONE));
+  EXPECT_NE(nullptr, dict.FindKeyOfType("bool", Value::Type::BOOLEAN));
+  EXPECT_EQ(nullptr, dict.FindKeyOfType("bool", Value::Type::INTEGER));
+  EXPECT_EQ(nullptr, dict.FindKeyOfType("bool", Value::Type::DOUBLE));
+  EXPECT_EQ(nullptr, dict.FindKeyOfType("bool", Value::Type::STRING));
+  EXPECT_EQ(nullptr, dict.FindKeyOfType("bool", Value::Type::BINARY));
+  EXPECT_EQ(nullptr, dict.FindKeyOfType("bool", Value::Type::LIST));
+  EXPECT_EQ(nullptr, dict.FindKeyOfType("bool", Value::Type::DICTIONARY));
+
+  EXPECT_EQ(nullptr, dict.FindKeyOfType("int", Value::Type::NONE));
+  EXPECT_EQ(nullptr, dict.FindKeyOfType("int", Value::Type::BOOLEAN));
+  EXPECT_NE(nullptr, dict.FindKeyOfType("int", Value::Type::INTEGER));
+  EXPECT_EQ(nullptr, dict.FindKeyOfType("int", Value::Type::DOUBLE));
+  EXPECT_EQ(nullptr, dict.FindKeyOfType("int", Value::Type::STRING));
+  EXPECT_EQ(nullptr, dict.FindKeyOfType("int", Value::Type::BINARY));
+  EXPECT_EQ(nullptr, dict.FindKeyOfType("int", Value::Type::LIST));
+  EXPECT_EQ(nullptr, dict.FindKeyOfType("int", Value::Type::DICTIONARY));
+
+  EXPECT_EQ(nullptr, dict.FindKeyOfType("double", Value::Type::NONE));
+  EXPECT_EQ(nullptr, dict.FindKeyOfType("double", Value::Type::BOOLEAN));
+  EXPECT_EQ(nullptr, dict.FindKeyOfType("double", Value::Type::INTEGER));
+  EXPECT_NE(nullptr, dict.FindKeyOfType("double", Value::Type::DOUBLE));
+  EXPECT_EQ(nullptr, dict.FindKeyOfType("double", Value::Type::STRING));
+  EXPECT_EQ(nullptr, dict.FindKeyOfType("double", Value::Type::BINARY));
+  EXPECT_EQ(nullptr, dict.FindKeyOfType("double", Value::Type::LIST));
+  EXPECT_EQ(nullptr, dict.FindKeyOfType("double", Value::Type::DICTIONARY));
+
+  EXPECT_EQ(nullptr, dict.FindKeyOfType("string", Value::Type::NONE));
+  EXPECT_EQ(nullptr, dict.FindKeyOfType("string", Value::Type::BOOLEAN));
+  EXPECT_EQ(nullptr, dict.FindKeyOfType("string", Value::Type::INTEGER));
+  EXPECT_EQ(nullptr, dict.FindKeyOfType("string", Value::Type::DOUBLE));
+  EXPECT_NE(nullptr, dict.FindKeyOfType("string", Value::Type::STRING));
+  EXPECT_EQ(nullptr, dict.FindKeyOfType("string", Value::Type::BINARY));
+  EXPECT_EQ(nullptr, dict.FindKeyOfType("string", Value::Type::LIST));
+  EXPECT_EQ(nullptr, dict.FindKeyOfType("string", Value::Type::DICTIONARY));
+
+  EXPECT_EQ(nullptr, dict.FindKeyOfType("blob", Value::Type::NONE));
+  EXPECT_EQ(nullptr, dict.FindKeyOfType("blob", Value::Type::BOOLEAN));
+  EXPECT_EQ(nullptr, dict.FindKeyOfType("blob", Value::Type::INTEGER));
+  EXPECT_EQ(nullptr, dict.FindKeyOfType("blob", Value::Type::DOUBLE));
+  EXPECT_EQ(nullptr, dict.FindKeyOfType("blob", Value::Type::STRING));
+  EXPECT_NE(nullptr, dict.FindKeyOfType("blob", Value::Type::BINARY));
+  EXPECT_EQ(nullptr, dict.FindKeyOfType("blob", Value::Type::LIST));
+  EXPECT_EQ(nullptr, dict.FindKeyOfType("blob", Value::Type::DICTIONARY));
+
+  EXPECT_EQ(nullptr, dict.FindKeyOfType("list", Value::Type::NONE));
+  EXPECT_EQ(nullptr, dict.FindKeyOfType("list", Value::Type::BOOLEAN));
+  EXPECT_EQ(nullptr, dict.FindKeyOfType("list", Value::Type::INTEGER));
+  EXPECT_EQ(nullptr, dict.FindKeyOfType("list", Value::Type::DOUBLE));
+  EXPECT_EQ(nullptr, dict.FindKeyOfType("list", Value::Type::STRING));
+  EXPECT_EQ(nullptr, dict.FindKeyOfType("list", Value::Type::BINARY));
+  EXPECT_NE(nullptr, dict.FindKeyOfType("list", Value::Type::LIST));
+  EXPECT_EQ(nullptr, dict.FindKeyOfType("list", Value::Type::DICTIONARY));
+
+  EXPECT_EQ(nullptr, dict.FindKeyOfType("dict", Value::Type::NONE));
+  EXPECT_EQ(nullptr, dict.FindKeyOfType("dict", Value::Type::BOOLEAN));
+  EXPECT_EQ(nullptr, dict.FindKeyOfType("dict", Value::Type::INTEGER));
+  EXPECT_EQ(nullptr, dict.FindKeyOfType("dict", Value::Type::DOUBLE));
+  EXPECT_EQ(nullptr, dict.FindKeyOfType("dict", Value::Type::STRING));
+  EXPECT_EQ(nullptr, dict.FindKeyOfType("dict", Value::Type::BINARY));
+  EXPECT_EQ(nullptr, dict.FindKeyOfType("dict", Value::Type::LIST));
+  EXPECT_NE(nullptr, dict.FindKeyOfType("dict", Value::Type::DICTIONARY));
+}
+
+TEST(ValuesTest, SetKey) {
+  Value::DictStorage storage;
+  storage.emplace("null", std::make_unique<Value>(Value::Type::NONE));
+  storage.emplace("bool", std::make_unique<Value>(Value::Type::BOOLEAN));
+  storage.emplace("int", std::make_unique<Value>(Value::Type::INTEGER));
+  storage.emplace("double", std::make_unique<Value>(Value::Type::DOUBLE));
+  storage.emplace("string", std::make_unique<Value>(Value::Type::STRING));
+  storage.emplace("blob", std::make_unique<Value>(Value::Type::BINARY));
+  storage.emplace("list", std::make_unique<Value>(Value::Type::LIST));
+  storage.emplace("dict", std::make_unique<Value>(Value::Type::DICTIONARY));
+
+  Value dict(Value::Type::DICTIONARY);
+  dict.SetKey(StringPiece("null"), Value(Value::Type::NONE));
+  dict.SetKey(StringPiece("bool"), Value(Value::Type::BOOLEAN));
+  dict.SetKey(std::string("int"), Value(Value::Type::INTEGER));
+  dict.SetKey(std::string("double"), Value(Value::Type::DOUBLE));
+  dict.SetKey(std::string("string"), Value(Value::Type::STRING));
+  dict.SetKey("blob", Value(Value::Type::BINARY));
+  dict.SetKey("list", Value(Value::Type::LIST));
+  dict.SetKey("dict", Value(Value::Type::DICTIONARY));
+
+  EXPECT_EQ(Value(std::move(storage)), dict);
+}
+
+TEST(ValuesTest, FindPath) {
+  // Construct a dictionary path {root}.foo.bar = 123
+  Value foo(Value::Type::DICTIONARY);
+  foo.SetKey("bar", Value(123));
+
+  Value root(Value::Type::DICTIONARY);
+  root.SetKey("foo", std::move(foo));
+
+  // No key (stupid but well-defined and takes work to prevent).
+  Value* found = root.FindPath(std::vector<StringPiece>{});
+  EXPECT_EQ(&root, found);
+
+  // Double key, second not found.
+  found = root.FindPath(std::vector<StringPiece>{"foo", "notfound"});
+  EXPECT_FALSE(found);
+
+  // Double key, found.
+  found = root.FindPath(std::vector<StringPiece>{"foo", "bar"});
+  EXPECT_TRUE(found);
+  EXPECT_TRUE(found->is_int());
+  EXPECT_EQ(123, found->GetInt());
+}
+
+TEST(ValuesTest, SetPath) {
+  Value root(Value::Type::DICTIONARY);
+
+  Value* inserted = root.SetPath({"one", "two"}, Value(123));
+  Value* found = root.FindPathOfType({"one", "two"}, Value::Type::INTEGER);
+  ASSERT_TRUE(found);
+  EXPECT_EQ(inserted, found);
+  EXPECT_EQ(123, found->GetInt());
+
+  inserted = root.SetPath(std::vector<StringPiece>{"foo", "bar"}, Value(123));
+  found = root.FindPathOfType({"foo", "bar"}, Value::Type::INTEGER);
+  ASSERT_TRUE(found);
+  EXPECT_EQ(inserted, found);
+  EXPECT_EQ(123, found->GetInt());
+
+  // Overwrite with a different value.
+  root.SetPath({"foo", "bar"}, Value("hello"));
+  found = root.FindPathOfType(std::vector<StringPiece>{"foo", "bar"},
+                              Value::Type::STRING);
+  ASSERT_TRUE(found);
+  EXPECT_EQ("hello", found->GetString());
+
+  // Can't change existing non-dictionary keys to dictionaries.
+  found =
+      root.SetPath(std::vector<StringPiece>{"foo", "bar", "baz"}, Value(123));
+  EXPECT_FALSE(found);
+}
+
+TEST(ValuesTest, RemoveKey) {
+  Value root(Value::Type::DICTIONARY);
+  root.SetKey("one", Value(123));
+
+  // Removal of missing key should fail.
+  EXPECT_FALSE(root.RemoveKey("two"));
+
+  // Removal of existing key should succeed.
+  EXPECT_TRUE(root.RemoveKey("one"));
+
+  // Second removal of previously existing key should fail.
+  EXPECT_FALSE(root.RemoveKey("one"));
+}
+
+TEST(ValuesTest, RemovePath) {
+  Value root(Value::Type::DICTIONARY);
+  root.SetPath({"one", "two", "three"}, Value(123));
+
+  // Removal of missing key should fail.
+  EXPECT_FALSE(root.RemovePath({"one", "two", "four"}));
+
+  // Removal of existing key should succeed.
+  EXPECT_TRUE(root.RemovePath({"one", "two", "three"}));
+
+  // Second removal of previously existing key should fail.
+  EXPECT_FALSE(root.RemovePath({"one", "two", "three"}));
+
+  // Intermediate empty dictionaries should be cleared.
+  EXPECT_FALSE(root.FindKey("one"));
+
+  root.SetPath({"one", "two", "three"}, Value(123));
+  root.SetPath({"one", "two", "four"}, Value(124));
+
+  EXPECT_TRUE(root.RemovePath(std::vector<StringPiece>{"one", "two", "three"}));
+  // Intermediate non-empty dictionaries should be kept.
+  EXPECT_TRUE(root.FindKey("one"));
+  EXPECT_TRUE(root.FindPath({"one", "two"}));
+  EXPECT_TRUE(root.FindPath({"one", "two", "four"}));
+}
+
+TEST(ValuesTest, Basic) {
+  // Test basic dictionary getting/setting
+  DictionaryValue settings;
+  std::string homepage = "http://google.com";
+  ASSERT_FALSE(settings.GetString("global.homepage", &homepage));
+  ASSERT_EQ(std::string("http://google.com"), homepage);
+
+  ASSERT_FALSE(settings.Get("global", nullptr));
+  settings.SetBoolean("global", true);
+  ASSERT_TRUE(settings.Get("global", nullptr));
+  settings.SetString("global.homepage", "http://scurvy.com");
+  ASSERT_TRUE(settings.Get("global", nullptr));
+  homepage = "http://google.com";
+  ASSERT_TRUE(settings.GetString("global.homepage", &homepage));
+  ASSERT_EQ(std::string("http://scurvy.com"), homepage);
+
+  // Test storing a dictionary in a list.
+  ListValue* toolbar_bookmarks;
+  ASSERT_FALSE(
+    settings.GetList("global.toolbar.bookmarks", &toolbar_bookmarks));
+
+  std::unique_ptr<ListValue> new_toolbar_bookmarks(new ListValue);
+  settings.Set("global.toolbar.bookmarks", std::move(new_toolbar_bookmarks));
+  ASSERT_TRUE(settings.GetList("global.toolbar.bookmarks", &toolbar_bookmarks));
+
+  std::unique_ptr<DictionaryValue> new_bookmark(new DictionaryValue);
+  new_bookmark->SetString("name", "Froogle");
+  new_bookmark->SetString("url", "http://froogle.com");
+  toolbar_bookmarks->Append(std::move(new_bookmark));
+
+  ListValue* bookmark_list;
+  ASSERT_TRUE(settings.GetList("global.toolbar.bookmarks", &bookmark_list));
+  DictionaryValue* bookmark;
+  ASSERT_EQ(1U, bookmark_list->GetSize());
+  ASSERT_TRUE(bookmark_list->GetDictionary(0, &bookmark));
+  std::string bookmark_name = "Unnamed";
+  ASSERT_TRUE(bookmark->GetString("name", &bookmark_name));
+  ASSERT_EQ(std::string("Froogle"), bookmark_name);
+  std::string bookmark_url;
+  ASSERT_TRUE(bookmark->GetString("url", &bookmark_url));
+  ASSERT_EQ(std::string("http://froogle.com"), bookmark_url);
+}
+
+TEST(ValuesTest, List) {
+  std::unique_ptr<ListValue> mixed_list(new ListValue());
+  mixed_list->Set(0, std::make_unique<Value>(true));
+  mixed_list->Set(1, std::make_unique<Value>(42));
+  mixed_list->Set(2, std::make_unique<Value>(88.8));
+  mixed_list->Set(3, std::make_unique<Value>("foo"));
+  ASSERT_EQ(4u, mixed_list->GetSize());
+
+  Value* value = nullptr;
+  bool bool_value = false;
+  int int_value = 0;
+  double double_value = 0.0;
+  std::string string_value;
+
+  ASSERT_FALSE(mixed_list->Get(4, &value));
+
+  ASSERT_FALSE(mixed_list->GetInteger(0, &int_value));
+  ASSERT_EQ(0, int_value);
+  ASSERT_FALSE(mixed_list->GetBoolean(1, &bool_value));
+  ASSERT_FALSE(bool_value);
+  ASSERT_FALSE(mixed_list->GetString(2, &string_value));
+  ASSERT_EQ("", string_value);
+  ASSERT_FALSE(mixed_list->GetInteger(2, &int_value));
+  ASSERT_EQ(0, int_value);
+  ASSERT_FALSE(mixed_list->GetBoolean(3, &bool_value));
+  ASSERT_FALSE(bool_value);
+
+  ASSERT_TRUE(mixed_list->GetBoolean(0, &bool_value));
+  ASSERT_TRUE(bool_value);
+  ASSERT_TRUE(mixed_list->GetInteger(1, &int_value));
+  ASSERT_EQ(42, int_value);
+  // implicit conversion from Integer to Double should be possible.
+  ASSERT_TRUE(mixed_list->GetDouble(1, &double_value));
+  ASSERT_EQ(42, double_value);
+  ASSERT_TRUE(mixed_list->GetDouble(2, &double_value));
+  ASSERT_EQ(88.8, double_value);
+  ASSERT_TRUE(mixed_list->GetString(3, &string_value));
+  ASSERT_EQ("foo", string_value);
+
+  // Try searching in the mixed list.
+  base::Value sought_value(42);
+  base::Value not_found_value(false);
+
+  ASSERT_NE(mixed_list->end(), mixed_list->Find(sought_value));
+  ASSERT_TRUE((*mixed_list->Find(sought_value)).GetAsInteger(&int_value));
+  ASSERT_EQ(42, int_value);
+  ASSERT_EQ(mixed_list->end(), mixed_list->Find(not_found_value));
+}
+
+TEST(ValuesTest, BinaryValue) {
+  // Default constructor creates a BinaryValue with a buffer of size 0.
+  auto binary = std::make_unique<Value>(Value::Type::BINARY);
+  ASSERT_TRUE(binary.get());
+  ASSERT_TRUE(binary->GetBlob().empty());
+
+  // Test the common case of a non-empty buffer
+  Value::BlobStorage buffer(15);
+  char* original_buffer = buffer.data();
+  binary.reset(new Value(std::move(buffer)));
+  ASSERT_TRUE(binary.get());
+  ASSERT_TRUE(binary->GetBlob().data());
+  ASSERT_EQ(original_buffer, binary->GetBlob().data());
+  ASSERT_EQ(15U, binary->GetBlob().size());
+
+  char stack_buffer[42];
+  memset(stack_buffer, '!', 42);
+  binary = Value::CreateWithCopiedBuffer(stack_buffer, 42);
+  ASSERT_TRUE(binary.get());
+  ASSERT_TRUE(binary->GetBlob().data());
+  ASSERT_NE(stack_buffer, binary->GetBlob().data());
+  ASSERT_EQ(42U, binary->GetBlob().size());
+  ASSERT_EQ(0, memcmp(stack_buffer, binary->GetBlob().data(),
+                      binary->GetBlob().size()));
+}
+
+TEST(ValuesTest, StringValue) {
+  // Test overloaded StringValue constructor.
+  std::unique_ptr<Value> narrow_value(new Value("narrow"));
+  ASSERT_TRUE(narrow_value.get());
+  ASSERT_TRUE(narrow_value->is_string());
+  std::unique_ptr<Value> utf16_value(new Value(ASCIIToUTF16("utf16")));
+  ASSERT_TRUE(utf16_value.get());
+  ASSERT_TRUE(utf16_value->is_string());
+
+  // Test overloaded GetAsString.
+  std::string narrow = "http://google.com";
+  string16 utf16 = ASCIIToUTF16("http://google.com");
+  const Value* string_value = nullptr;
+  ASSERT_TRUE(narrow_value->GetAsString(&narrow));
+  ASSERT_TRUE(narrow_value->GetAsString(&utf16));
+  ASSERT_TRUE(narrow_value->GetAsString(&string_value));
+  ASSERT_EQ(std::string("narrow"), narrow);
+  ASSERT_EQ(ASCIIToUTF16("narrow"), utf16);
+  ASSERT_EQ(string_value->GetString(), narrow);
+
+  ASSERT_TRUE(utf16_value->GetAsString(&narrow));
+  ASSERT_TRUE(utf16_value->GetAsString(&utf16));
+  ASSERT_TRUE(utf16_value->GetAsString(&string_value));
+  ASSERT_EQ(std::string("utf16"), narrow);
+  ASSERT_EQ(ASCIIToUTF16("utf16"), utf16);
+  ASSERT_EQ(string_value->GetString(), narrow);
+
+  // Don't choke on NULL values.
+  ASSERT_TRUE(narrow_value->GetAsString(static_cast<string16*>(nullptr)));
+  ASSERT_TRUE(narrow_value->GetAsString(static_cast<std::string*>(nullptr)));
+  ASSERT_TRUE(narrow_value->GetAsString(static_cast<const Value**>(nullptr)));
+}
+
+TEST(ValuesTest, ListDeletion) {
+  ListValue list;
+  list.Append(std::make_unique<Value>());
+  EXPECT_FALSE(list.empty());
+  list.Clear();
+  EXPECT_TRUE(list.empty());
+}
+
+TEST(ValuesTest, ListRemoval) {
+  std::unique_ptr<Value> removed_item;
+
+  {
+    ListValue list;
+    list.Append(std::make_unique<Value>());
+    EXPECT_EQ(1U, list.GetSize());
+    EXPECT_FALSE(list.Remove(std::numeric_limits<size_t>::max(),
+                             &removed_item));
+    EXPECT_FALSE(list.Remove(1, &removed_item));
+    EXPECT_TRUE(list.Remove(0, &removed_item));
+    ASSERT_TRUE(removed_item);
+    EXPECT_EQ(0U, list.GetSize());
+  }
+  removed_item.reset();
+
+  {
+    ListValue list;
+    list.Append(std::make_unique<Value>());
+    EXPECT_TRUE(list.Remove(0, nullptr));
+    EXPECT_EQ(0U, list.GetSize());
+  }
+
+  {
+    ListValue list;
+    auto value = std::make_unique<Value>();
+    Value original_value = value->Clone();
+    list.Append(std::move(value));
+    size_t index = 0;
+    list.Remove(original_value, &index);
+    EXPECT_EQ(0U, index);
+    EXPECT_EQ(0U, list.GetSize());
+  }
+}
+
+TEST(ValuesTest, DictionaryDeletion) {
+  std::string key = "test";
+  DictionaryValue dict;
+  dict.Set(key, std::make_unique<Value>());
+  EXPECT_FALSE(dict.empty());
+  EXPECT_FALSE(dict.DictEmpty());
+  EXPECT_EQ(1U, dict.DictSize());
+  dict.Clear();
+  EXPECT_TRUE(dict.empty());
+  EXPECT_TRUE(dict.DictEmpty());
+  EXPECT_EQ(0U, dict.DictSize());
+}
+
+TEST(ValuesTest, DictionarySetReturnsPointer) {
+  {
+    DictionaryValue dict;
+    Value* blank_ptr = dict.Set("foo.bar", std::make_unique<base::Value>());
+    EXPECT_EQ(Value::Type::NONE, blank_ptr->type());
+  }
+
+  {
+    DictionaryValue dict;
+    Value* blank_ptr = dict.SetWithoutPathExpansion(
+        "foo.bar", std::make_unique<base::Value>());
+    EXPECT_EQ(Value::Type::NONE, blank_ptr->type());
+  }
+
+  {
+    DictionaryValue dict;
+    Value* int_ptr = dict.SetInteger("foo.bar", 42);
+    EXPECT_EQ(Value::Type::INTEGER, int_ptr->type());
+    EXPECT_EQ(42, int_ptr->GetInt());
+  }
+
+  {
+    DictionaryValue dict;
+    Value* double_ptr = dict.SetDouble("foo.bar", 3.142);
+    EXPECT_EQ(Value::Type::DOUBLE, double_ptr->type());
+    EXPECT_EQ(3.142, double_ptr->GetDouble());
+  }
+
+  {
+    DictionaryValue dict;
+    Value* string_ptr = dict.SetString("foo.bar", "foo");
+    EXPECT_EQ(Value::Type::STRING, string_ptr->type());
+    EXPECT_EQ("foo", string_ptr->GetString());
+  }
+
+  {
+    DictionaryValue dict;
+    Value* string16_ptr = dict.SetString("foo.bar", ASCIIToUTF16("baz"));
+    EXPECT_EQ(Value::Type::STRING, string16_ptr->type());
+    EXPECT_EQ("baz", string16_ptr->GetString());
+  }
+
+  {
+    DictionaryValue dict;
+    DictionaryValue* dict_ptr = dict.SetDictionary(
+        "foo.bar", std::make_unique<base::DictionaryValue>());
+    EXPECT_EQ(Value::Type::DICTIONARY, dict_ptr->type());
+  }
+
+  {
+    DictionaryValue dict;
+    ListValue* list_ptr =
+        dict.SetList("foo.bar", std::make_unique<base::ListValue>());
+    EXPECT_EQ(Value::Type::LIST, list_ptr->type());
+  }
+}
+
+TEST(ValuesTest, DictionaryRemoval) {
+  std::string key = "test";
+  std::unique_ptr<Value> removed_item;
+
+  {
+    DictionaryValue dict;
+    EXPECT_EQ(0U, dict.DictSize());
+    EXPECT_TRUE(dict.DictEmpty());
+    dict.Set(key, std::make_unique<Value>());
+    EXPECT_TRUE(dict.HasKey(key));
+    EXPECT_FALSE(dict.Remove("absent key", &removed_item));
+    EXPECT_EQ(1U, dict.DictSize());
+    EXPECT_FALSE(dict.DictEmpty());
+
+    EXPECT_TRUE(dict.Remove(key, &removed_item));
+    EXPECT_FALSE(dict.HasKey(key));
+    ASSERT_TRUE(removed_item);
+    EXPECT_EQ(0U, dict.DictSize());
+    EXPECT_TRUE(dict.DictEmpty());
+  }
+
+  {
+    DictionaryValue dict;
+    dict.Set(key, std::make_unique<Value>());
+    EXPECT_TRUE(dict.HasKey(key));
+    EXPECT_TRUE(dict.Remove(key, nullptr));
+    EXPECT_FALSE(dict.HasKey(key));
+  }
+}
+
+TEST(ValuesTest, DictionaryWithoutPathExpansion) {
+  DictionaryValue dict;
+  dict.Set("this.is.expanded", std::make_unique<Value>());
+  dict.SetWithoutPathExpansion("this.isnt.expanded", std::make_unique<Value>());
+
+  EXPECT_FALSE(dict.HasKey("this.is.expanded"));
+  EXPECT_TRUE(dict.HasKey("this"));
+  Value* value1;
+  EXPECT_TRUE(dict.Get("this", &value1));
+  DictionaryValue* value2;
+  ASSERT_TRUE(dict.GetDictionaryWithoutPathExpansion("this", &value2));
+  EXPECT_EQ(value1, value2);
+  EXPECT_EQ(1U, value2->size());
+
+  EXPECT_TRUE(dict.HasKey("this.isnt.expanded"));
+  Value* value3;
+  EXPECT_FALSE(dict.Get("this.isnt.expanded", &value3));
+  Value* value4;
+  ASSERT_TRUE(dict.GetWithoutPathExpansion("this.isnt.expanded", &value4));
+  EXPECT_EQ(Value::Type::NONE, value4->type());
+}
+
+// Tests the deprecated version of SetWithoutPathExpansion.
+// TODO(estade): remove.
+TEST(ValuesTest, DictionaryWithoutPathExpansionDeprecated) {
+  DictionaryValue dict;
+  dict.Set("this.is.expanded", std::make_unique<Value>());
+  dict.SetWithoutPathExpansion("this.isnt.expanded", std::make_unique<Value>());
+
+  EXPECT_FALSE(dict.HasKey("this.is.expanded"));
+  EXPECT_TRUE(dict.HasKey("this"));
+  Value* value1;
+  EXPECT_TRUE(dict.Get("this", &value1));
+  DictionaryValue* value2;
+  ASSERT_TRUE(dict.GetDictionaryWithoutPathExpansion("this", &value2));
+  EXPECT_EQ(value1, value2);
+  EXPECT_EQ(1U, value2->size());
+
+  EXPECT_TRUE(dict.HasKey("this.isnt.expanded"));
+  Value* value3;
+  EXPECT_FALSE(dict.Get("this.isnt.expanded", &value3));
+  Value* value4;
+  ASSERT_TRUE(dict.GetWithoutPathExpansion("this.isnt.expanded", &value4));
+  EXPECT_EQ(Value::Type::NONE, value4->type());
+}
+
+TEST(ValuesTest, DictionaryRemovePath) {
+  DictionaryValue dict;
+  dict.SetInteger("a.long.way.down", 1);
+  dict.SetBoolean("a.long.key.path", true);
+
+  std::unique_ptr<Value> removed_item;
+  EXPECT_TRUE(dict.RemovePath("a.long.way.down", &removed_item));
+  ASSERT_TRUE(removed_item);
+  EXPECT_TRUE(removed_item->is_int());
+  EXPECT_FALSE(dict.HasKey("a.long.way.down"));
+  EXPECT_FALSE(dict.HasKey("a.long.way"));
+  EXPECT_TRUE(dict.Get("a.long.key.path", nullptr));
+
+  removed_item.reset();
+  EXPECT_FALSE(dict.RemovePath("a.long.way.down", &removed_item));
+  EXPECT_FALSE(removed_item);
+  EXPECT_TRUE(dict.Get("a.long.key.path", nullptr));
+
+  removed_item.reset();
+  EXPECT_TRUE(dict.RemovePath("a.long.key.path", &removed_item));
+  ASSERT_TRUE(removed_item);
+  EXPECT_TRUE(removed_item->is_bool());
+  EXPECT_TRUE(dict.empty());
+}
+
+TEST(ValuesTest, DeepCopy) {
+  DictionaryValue original_dict;
+  Value* null_weak = original_dict.Set("null", std::make_unique<Value>());
+  Value* bool_weak = original_dict.Set("bool", std::make_unique<Value>(true));
+  Value* int_weak = original_dict.Set("int", std::make_unique<Value>(42));
+  Value* double_weak =
+      original_dict.Set("double", std::make_unique<Value>(3.14));
+  Value* string_weak =
+      original_dict.Set("string", std::make_unique<Value>("hello"));
+  Value* string16_weak = original_dict.Set(
+      "string16", std::make_unique<Value>(ASCIIToUTF16("hello16")));
+
+  Value* binary_weak = original_dict.Set(
+      "binary", std::make_unique<Value>(Value::BlobStorage(42, '!')));
+
+  Value::ListStorage storage;
+  storage.emplace_back(0);
+  storage.emplace_back(1);
+  Value* list_weak =
+      original_dict.Set("list", std::make_unique<Value>(std::move(storage)));
+  Value* list_element_0_weak = &list_weak->GetList()[0];
+  Value* list_element_1_weak = &list_weak->GetList()[1];
+
+  DictionaryValue* dict_weak = original_dict.SetDictionary(
+      "dictionary", std::make_unique<DictionaryValue>());
+  dict_weak->SetString("key", "value");
+
+  auto copy_dict = original_dict.CreateDeepCopy();
+  ASSERT_TRUE(copy_dict.get());
+  ASSERT_NE(copy_dict.get(), &original_dict);
+
+  Value* copy_null = nullptr;
+  ASSERT_TRUE(copy_dict->Get("null", &copy_null));
+  ASSERT_TRUE(copy_null);
+  ASSERT_NE(copy_null, null_weak);
+  ASSERT_TRUE(copy_null->is_none());
+
+  Value* copy_bool = nullptr;
+  ASSERT_TRUE(copy_dict->Get("bool", &copy_bool));
+  ASSERT_TRUE(copy_bool);
+  ASSERT_NE(copy_bool, bool_weak);
+  ASSERT_TRUE(copy_bool->is_bool());
+  bool copy_bool_value = false;
+  ASSERT_TRUE(copy_bool->GetAsBoolean(&copy_bool_value));
+  ASSERT_TRUE(copy_bool_value);
+
+  Value* copy_int = nullptr;
+  ASSERT_TRUE(copy_dict->Get("int", &copy_int));
+  ASSERT_TRUE(copy_int);
+  ASSERT_NE(copy_int, int_weak);
+  ASSERT_TRUE(copy_int->is_int());
+  int copy_int_value = 0;
+  ASSERT_TRUE(copy_int->GetAsInteger(&copy_int_value));
+  ASSERT_EQ(42, copy_int_value);
+
+  Value* copy_double = nullptr;
+  ASSERT_TRUE(copy_dict->Get("double", &copy_double));
+  ASSERT_TRUE(copy_double);
+  ASSERT_NE(copy_double, double_weak);
+  ASSERT_TRUE(copy_double->is_double());
+  double copy_double_value = 0;
+  ASSERT_TRUE(copy_double->GetAsDouble(&copy_double_value));
+  ASSERT_EQ(3.14, copy_double_value);
+
+  Value* copy_string = nullptr;
+  ASSERT_TRUE(copy_dict->Get("string", &copy_string));
+  ASSERT_TRUE(copy_string);
+  ASSERT_NE(copy_string, string_weak);
+  ASSERT_TRUE(copy_string->is_string());
+  std::string copy_string_value;
+  string16 copy_string16_value;
+  ASSERT_TRUE(copy_string->GetAsString(&copy_string_value));
+  ASSERT_TRUE(copy_string->GetAsString(&copy_string16_value));
+  ASSERT_EQ(std::string("hello"), copy_string_value);
+  ASSERT_EQ(ASCIIToUTF16("hello"), copy_string16_value);
+
+  Value* copy_string16 = nullptr;
+  ASSERT_TRUE(copy_dict->Get("string16", &copy_string16));
+  ASSERT_TRUE(copy_string16);
+  ASSERT_NE(copy_string16, string16_weak);
+  ASSERT_TRUE(copy_string16->is_string());
+  ASSERT_TRUE(copy_string16->GetAsString(&copy_string_value));
+  ASSERT_TRUE(copy_string16->GetAsString(&copy_string16_value));
+  ASSERT_EQ(std::string("hello16"), copy_string_value);
+  ASSERT_EQ(ASCIIToUTF16("hello16"), copy_string16_value);
+
+  Value* copy_binary = nullptr;
+  ASSERT_TRUE(copy_dict->Get("binary", &copy_binary));
+  ASSERT_TRUE(copy_binary);
+  ASSERT_NE(copy_binary, binary_weak);
+  ASSERT_TRUE(copy_binary->is_blob());
+  ASSERT_NE(binary_weak->GetBlob().data(), copy_binary->GetBlob().data());
+  ASSERT_EQ(binary_weak->GetBlob(), copy_binary->GetBlob());
+
+  Value* copy_value = nullptr;
+  ASSERT_TRUE(copy_dict->Get("list", &copy_value));
+  ASSERT_TRUE(copy_value);
+  ASSERT_NE(copy_value, list_weak);
+  ASSERT_TRUE(copy_value->is_list());
+  ListValue* copy_list = nullptr;
+  ASSERT_TRUE(copy_value->GetAsList(&copy_list));
+  ASSERT_TRUE(copy_list);
+  ASSERT_EQ(2U, copy_list->GetSize());
+
+  Value* copy_list_element_0;
+  ASSERT_TRUE(copy_list->Get(0, &copy_list_element_0));
+  ASSERT_TRUE(copy_list_element_0);
+  ASSERT_NE(copy_list_element_0, list_element_0_weak);
+  int copy_list_element_0_value;
+  ASSERT_TRUE(copy_list_element_0->GetAsInteger(&copy_list_element_0_value));
+  ASSERT_EQ(0, copy_list_element_0_value);
+
+  Value* copy_list_element_1;
+  ASSERT_TRUE(copy_list->Get(1, &copy_list_element_1));
+  ASSERT_TRUE(copy_list_element_1);
+  ASSERT_NE(copy_list_element_1, list_element_1_weak);
+  int copy_list_element_1_value;
+  ASSERT_TRUE(copy_list_element_1->GetAsInteger(&copy_list_element_1_value));
+  ASSERT_EQ(1, copy_list_element_1_value);
+
+  copy_value = nullptr;
+  ASSERT_TRUE(copy_dict->Get("dictionary", &copy_value));
+  ASSERT_TRUE(copy_value);
+  ASSERT_NE(copy_value, dict_weak);
+  ASSERT_TRUE(copy_value->is_dict());
+  DictionaryValue* copy_nested_dictionary = nullptr;
+  ASSERT_TRUE(copy_value->GetAsDictionary(&copy_nested_dictionary));
+  ASSERT_TRUE(copy_nested_dictionary);
+  EXPECT_TRUE(copy_nested_dictionary->HasKey("key"));
+}
+
+TEST(ValuesTest, Equals) {
+  auto null1 = std::make_unique<Value>();
+  auto null2 = std::make_unique<Value>();
+  EXPECT_NE(null1.get(), null2.get());
+  EXPECT_EQ(*null1, *null2);
+
+  Value boolean(false);
+  EXPECT_NE(*null1, boolean);
+
+  DictionaryValue dv;
+  dv.SetBoolean("a", false);
+  dv.SetInteger("b", 2);
+  dv.SetDouble("c", 2.5);
+  dv.SetString("d1", "string");
+  dv.SetString("d2", ASCIIToUTF16("http://google.com"));
+  dv.Set("e", std::make_unique<Value>());
+
+  auto copy = dv.CreateDeepCopy();
+  EXPECT_EQ(dv, *copy);
+
+  std::unique_ptr<ListValue> list(new ListValue);
+  list->Append(std::make_unique<Value>());
+  list->Append(WrapUnique(new DictionaryValue));
+  auto list_copy = std::make_unique<Value>(list->Clone());
+
+  ListValue* list_weak = dv.SetList("f", std::move(list));
+  EXPECT_NE(dv, *copy);
+  copy->Set("f", std::move(list_copy));
+  EXPECT_EQ(dv, *copy);
+
+  list_weak->Append(std::make_unique<Value>(true));
+  EXPECT_NE(dv, *copy);
+
+  // Check if Equals detects differences in only the keys.
+  copy = dv.CreateDeepCopy();
+  EXPECT_EQ(dv, *copy);
+  copy->Remove("a", nullptr);
+  copy->SetBoolean("aa", false);
+  EXPECT_NE(dv, *copy);
+}
+
+TEST(ValuesTest, Comparisons) {
+  // Test None Values.
+  Value null1;
+  Value null2;
+  EXPECT_EQ(null1, null2);
+  EXPECT_FALSE(null1 != null2);
+  EXPECT_FALSE(null1 < null2);
+  EXPECT_FALSE(null1 > null2);
+  EXPECT_LE(null1, null2);
+  EXPECT_GE(null1, null2);
+
+  // Test Bool Values.
+  Value bool1(false);
+  Value bool2(true);
+  EXPECT_FALSE(bool1 == bool2);
+  EXPECT_NE(bool1, bool2);
+  EXPECT_LT(bool1, bool2);
+  EXPECT_FALSE(bool1 > bool2);
+  EXPECT_LE(bool1, bool2);
+  EXPECT_FALSE(bool1 >= bool2);
+
+  // Test Int Values.
+  Value int1(1);
+  Value int2(2);
+  EXPECT_FALSE(int1 == int2);
+  EXPECT_NE(int1, int2);
+  EXPECT_LT(int1, int2);
+  EXPECT_FALSE(int1 > int2);
+  EXPECT_LE(int1, int2);
+  EXPECT_FALSE(int1 >= int2);
+
+  // Test Double Values.
+  Value double1(1.0);
+  Value double2(2.0);
+  EXPECT_FALSE(double1 == double2);
+  EXPECT_NE(double1, double2);
+  EXPECT_LT(double1, double2);
+  EXPECT_FALSE(double1 > double2);
+  EXPECT_LE(double1, double2);
+  EXPECT_FALSE(double1 >= double2);
+
+  // Test String Values.
+  Value string1("1");
+  Value string2("2");
+  EXPECT_FALSE(string1 == string2);
+  EXPECT_NE(string1, string2);
+  EXPECT_LT(string1, string2);
+  EXPECT_FALSE(string1 > string2);
+  EXPECT_LE(string1, string2);
+  EXPECT_FALSE(string1 >= string2);
+
+  // Test Binary Values.
+  Value binary1(Value::BlobStorage{0x01});
+  Value binary2(Value::BlobStorage{0x02});
+  EXPECT_FALSE(binary1 == binary2);
+  EXPECT_NE(binary1, binary2);
+  EXPECT_LT(binary1, binary2);
+  EXPECT_FALSE(binary1 > binary2);
+  EXPECT_LE(binary1, binary2);
+  EXPECT_FALSE(binary1 >= binary2);
+
+  // Test Empty List Values.
+  ListValue null_list1;
+  ListValue null_list2;
+  EXPECT_EQ(null_list1, null_list2);
+  EXPECT_FALSE(null_list1 != null_list2);
+  EXPECT_FALSE(null_list1 < null_list2);
+  EXPECT_FALSE(null_list1 > null_list2);
+  EXPECT_LE(null_list1, null_list2);
+  EXPECT_GE(null_list1, null_list2);
+
+  // Test Non Empty List Values.
+  ListValue int_list1;
+  ListValue int_list2;
+  int_list1.AppendInteger(1);
+  int_list2.AppendInteger(2);
+  EXPECT_FALSE(int_list1 == int_list2);
+  EXPECT_NE(int_list1, int_list2);
+  EXPECT_LT(int_list1, int_list2);
+  EXPECT_FALSE(int_list1 > int_list2);
+  EXPECT_LE(int_list1, int_list2);
+  EXPECT_FALSE(int_list1 >= int_list2);
+
+  // Test Empty Dict Values.
+  DictionaryValue null_dict1;
+  DictionaryValue null_dict2;
+  EXPECT_EQ(null_dict1, null_dict2);
+  EXPECT_FALSE(null_dict1 != null_dict2);
+  EXPECT_FALSE(null_dict1 < null_dict2);
+  EXPECT_FALSE(null_dict1 > null_dict2);
+  EXPECT_LE(null_dict1, null_dict2);
+  EXPECT_GE(null_dict1, null_dict2);
+
+  // Test Non Empty Dict Values.
+  DictionaryValue int_dict1;
+  DictionaryValue int_dict2;
+  int_dict1.SetInteger("key", 1);
+  int_dict2.SetInteger("key", 2);
+  EXPECT_FALSE(int_dict1 == int_dict2);
+  EXPECT_NE(int_dict1, int_dict2);
+  EXPECT_LT(int_dict1, int_dict2);
+  EXPECT_FALSE(int_dict1 > int_dict2);
+  EXPECT_LE(int_dict1, int_dict2);
+  EXPECT_FALSE(int_dict1 >= int_dict2);
+
+  // Test Values of different types.
+  std::vector<Value> values;
+  values.emplace_back(std::move(null1));
+  values.emplace_back(std::move(bool1));
+  values.emplace_back(std::move(int1));
+  values.emplace_back(std::move(double1));
+  values.emplace_back(std::move(string1));
+  values.emplace_back(std::move(binary1));
+  values.emplace_back(std::move(int_dict1));
+  values.emplace_back(std::move(int_list1));
+  for (size_t i = 0; i < values.size(); ++i) {
+    for (size_t j = i + 1; j < values.size(); ++j) {
+      EXPECT_FALSE(values[i] == values[j]);
+      EXPECT_NE(values[i], values[j]);
+      EXPECT_LT(values[i], values[j]);
+      EXPECT_FALSE(values[i] > values[j]);
+      EXPECT_LE(values[i], values[j]);
+      EXPECT_FALSE(values[i] >= values[j]);
+    }
+  }
+}
+
+TEST(ValuesTest, DeepCopyCovariantReturnTypes) {
+  DictionaryValue original_dict;
+  Value* null_weak = original_dict.SetKey("null", Value());
+  Value* bool_weak = original_dict.SetKey("bool", Value(true));
+  Value* int_weak = original_dict.SetKey("int", Value(42));
+  Value* double_weak = original_dict.SetKey("double", Value(3.14));
+  Value* string_weak = original_dict.SetKey("string", Value("hello"));
+  Value* string16_weak =
+      original_dict.SetKey("string16", Value(ASCIIToUTF16("hello16")));
+  Value* binary_weak =
+      original_dict.SetKey("binary", Value(Value::BlobStorage(42, '!')));
+
+  Value::ListStorage storage;
+  storage.emplace_back(0);
+  storage.emplace_back(1);
+  Value* list_weak = original_dict.SetKey("list", Value(std::move(storage)));
+
+  auto copy_dict = std::make_unique<Value>(original_dict.Clone());
+  auto copy_null = std::make_unique<Value>(null_weak->Clone());
+  auto copy_bool = std::make_unique<Value>(bool_weak->Clone());
+  auto copy_int = std::make_unique<Value>(int_weak->Clone());
+  auto copy_double = std::make_unique<Value>(double_weak->Clone());
+  auto copy_string = std::make_unique<Value>(string_weak->Clone());
+  auto copy_string16 = std::make_unique<Value>(string16_weak->Clone());
+  auto copy_binary = std::make_unique<Value>(binary_weak->Clone());
+  auto copy_list = std::make_unique<Value>(list_weak->Clone());
+
+  EXPECT_EQ(original_dict, *copy_dict);
+  EXPECT_EQ(*null_weak, *copy_null);
+  EXPECT_EQ(*bool_weak, *copy_bool);
+  EXPECT_EQ(*int_weak, *copy_int);
+  EXPECT_EQ(*double_weak, *copy_double);
+  EXPECT_EQ(*string_weak, *copy_string);
+  EXPECT_EQ(*string16_weak, *copy_string16);
+  EXPECT_EQ(*binary_weak, *copy_binary);
+  EXPECT_EQ(*list_weak, *copy_list);
+}
+
+TEST(ValuesTest, RemoveEmptyChildren) {
+  auto root = std::make_unique<DictionaryValue>();
+  // Remove empty lists and dictionaries.
+  root->Set("empty_dict", std::make_unique<DictionaryValue>());
+  root->Set("empty_list", std::make_unique<ListValue>());
+  root->SetWithoutPathExpansion("a.b.c.d.e",
+                                std::make_unique<DictionaryValue>());
+  root = root->DeepCopyWithoutEmptyChildren();
+  EXPECT_TRUE(root->empty());
+
+  // Make sure we don't prune too much.
+  root->SetBoolean("bool", true);
+  root->Set("empty_dict", std::make_unique<DictionaryValue>());
+  root->SetString("empty_string", std::string());
+  root = root->DeepCopyWithoutEmptyChildren();
+  EXPECT_EQ(2U, root->size());
+
+  // Should do nothing.
+  root = root->DeepCopyWithoutEmptyChildren();
+  EXPECT_EQ(2U, root->size());
+
+  // Nested test cases.  These should all reduce back to the bool and string
+  // set above.
+  {
+    root->Set("a.b.c.d.e", std::make_unique<DictionaryValue>());
+    root = root->DeepCopyWithoutEmptyChildren();
+    EXPECT_EQ(2U, root->size());
+  }
+  {
+    auto inner = std::make_unique<DictionaryValue>();
+    inner->Set("empty_dict", std::make_unique<DictionaryValue>());
+    inner->Set("empty_list", std::make_unique<ListValue>());
+    root->Set("dict_with_empty_children", std::move(inner));
+    root = root->DeepCopyWithoutEmptyChildren();
+    EXPECT_EQ(2U, root->size());
+  }
+  {
+    auto inner = std::make_unique<ListValue>();
+    inner->Append(std::make_unique<DictionaryValue>());
+    inner->Append(std::make_unique<ListValue>());
+    root->Set("list_with_empty_children", std::move(inner));
+    root = root->DeepCopyWithoutEmptyChildren();
+    EXPECT_EQ(2U, root->size());
+  }
+
+  // Nested with siblings.
+  {
+    auto inner = std::make_unique<ListValue>();
+    inner->Append(std::make_unique<DictionaryValue>());
+    inner->Append(std::make_unique<ListValue>());
+    root->Set("list_with_empty_children", std::move(inner));
+    auto inner2 = std::make_unique<DictionaryValue>();
+    inner2->Set("empty_dict", std::make_unique<DictionaryValue>());
+    inner2->Set("empty_list", std::make_unique<ListValue>());
+    root->Set("dict_with_empty_children", std::move(inner2));
+    root = root->DeepCopyWithoutEmptyChildren();
+    EXPECT_EQ(2U, root->size());
+  }
+
+  // Make sure nested values don't get pruned.
+  {
+    auto inner = std::make_unique<ListValue>();
+    auto inner2 = std::make_unique<ListValue>();
+    inner2->Append(std::make_unique<Value>("hello"));
+    inner->Append(std::make_unique<DictionaryValue>());
+    inner->Append(std::move(inner2));
+    root->Set("list_with_empty_children", std::move(inner));
+    root = root->DeepCopyWithoutEmptyChildren();
+    EXPECT_EQ(3U, root->size());
+
+    ListValue* inner_value, *inner_value2;
+    EXPECT_TRUE(root->GetList("list_with_empty_children", &inner_value));
+    EXPECT_EQ(1U, inner_value->GetSize());  // Dictionary was pruned.
+    EXPECT_TRUE(inner_value->GetList(0, &inner_value2));
+    EXPECT_EQ(1U, inner_value2->GetSize());
+  }
+}
+
+TEST(ValuesTest, MergeDictionary) {
+  std::unique_ptr<DictionaryValue> base(new DictionaryValue);
+  base->SetString("base_key", "base_key_value_base");
+  base->SetString("collide_key", "collide_key_value_base");
+  std::unique_ptr<DictionaryValue> base_sub_dict(new DictionaryValue);
+  base_sub_dict->SetString("sub_base_key", "sub_base_key_value_base");
+  base_sub_dict->SetString("sub_collide_key", "sub_collide_key_value_base");
+  base->Set("sub_dict_key", std::move(base_sub_dict));
+
+  std::unique_ptr<DictionaryValue> merge(new DictionaryValue);
+  merge->SetString("merge_key", "merge_key_value_merge");
+  merge->SetString("collide_key", "collide_key_value_merge");
+  std::unique_ptr<DictionaryValue> merge_sub_dict(new DictionaryValue);
+  merge_sub_dict->SetString("sub_merge_key", "sub_merge_key_value_merge");
+  merge_sub_dict->SetString("sub_collide_key", "sub_collide_key_value_merge");
+  merge->Set("sub_dict_key", std::move(merge_sub_dict));
+
+  base->MergeDictionary(merge.get());
+
+  EXPECT_EQ(4U, base->size());
+  std::string base_key_value;
+  EXPECT_TRUE(base->GetString("base_key", &base_key_value));
+  EXPECT_EQ("base_key_value_base", base_key_value); // Base value preserved.
+  std::string collide_key_value;
+  EXPECT_TRUE(base->GetString("collide_key", &collide_key_value));
+  EXPECT_EQ("collide_key_value_merge", collide_key_value); // Replaced.
+  std::string merge_key_value;
+  EXPECT_TRUE(base->GetString("merge_key", &merge_key_value));
+  EXPECT_EQ("merge_key_value_merge", merge_key_value); // Merged in.
+
+  DictionaryValue* res_sub_dict;
+  EXPECT_TRUE(base->GetDictionary("sub_dict_key", &res_sub_dict));
+  EXPECT_EQ(3U, res_sub_dict->size());
+  std::string sub_base_key_value;
+  EXPECT_TRUE(res_sub_dict->GetString("sub_base_key", &sub_base_key_value));
+  EXPECT_EQ("sub_base_key_value_base", sub_base_key_value); // Preserved.
+  std::string sub_collide_key_value;
+  EXPECT_TRUE(res_sub_dict->GetString("sub_collide_key",
+                                      &sub_collide_key_value));
+  EXPECT_EQ("sub_collide_key_value_merge", sub_collide_key_value); // Replaced.
+  std::string sub_merge_key_value;
+  EXPECT_TRUE(res_sub_dict->GetString("sub_merge_key", &sub_merge_key_value));
+  EXPECT_EQ("sub_merge_key_value_merge", sub_merge_key_value); // Merged in.
+}
+
+TEST(ValuesTest, MergeDictionaryDeepCopy) {
+  std::unique_ptr<DictionaryValue> child(new DictionaryValue);
+  DictionaryValue* original_child = child.get();
+  child->SetString("test", "value");
+  EXPECT_EQ(1U, child->size());
+
+  std::string value;
+  EXPECT_TRUE(child->GetString("test", &value));
+  EXPECT_EQ("value", value);
+
+  std::unique_ptr<DictionaryValue> base(new DictionaryValue);
+  base->Set("dict", std::move(child));
+  EXPECT_EQ(1U, base->size());
+
+  DictionaryValue* ptr;
+  EXPECT_TRUE(base->GetDictionary("dict", &ptr));
+  EXPECT_EQ(original_child, ptr);
+
+  std::unique_ptr<DictionaryValue> merged(new DictionaryValue);
+  merged->MergeDictionary(base.get());
+  EXPECT_EQ(1U, merged->size());
+  EXPECT_TRUE(merged->GetDictionary("dict", &ptr));
+  EXPECT_NE(original_child, ptr);
+  EXPECT_TRUE(ptr->GetString("test", &value));
+  EXPECT_EQ("value", value);
+
+  original_child->SetString("test", "overwrite");
+  base.reset();
+  EXPECT_TRUE(ptr->GetString("test", &value));
+  EXPECT_EQ("value", value);
+}
+
+TEST(ValuesTest, DictionaryIterator) {
+  DictionaryValue dict;
+  for (DictionaryValue::Iterator it(dict); !it.IsAtEnd(); it.Advance()) {
+    ADD_FAILURE();
+  }
+
+  Value value1("value1");
+  dict.SetKey("key1", value1.Clone());
+  bool seen1 = false;
+  for (DictionaryValue::Iterator it(dict); !it.IsAtEnd(); it.Advance()) {
+    EXPECT_FALSE(seen1);
+    EXPECT_EQ("key1", it.key());
+    EXPECT_EQ(value1, it.value());
+    seen1 = true;
+  }
+  EXPECT_TRUE(seen1);
+
+  Value value2("value2");
+  dict.SetKey("key2", value2.Clone());
+  bool seen2 = seen1 = false;
+  for (DictionaryValue::Iterator it(dict); !it.IsAtEnd(); it.Advance()) {
+    if (it.key() == "key1") {
+      EXPECT_FALSE(seen1);
+      EXPECT_EQ(value1, it.value());
+      seen1 = true;
+    } else if (it.key() == "key2") {
+      EXPECT_FALSE(seen2);
+      EXPECT_EQ(value2, it.value());
+      seen2 = true;
+    } else {
+      ADD_FAILURE();
+    }
+  }
+  EXPECT_TRUE(seen1);
+  EXPECT_TRUE(seen2);
+}
+
+TEST(ValuesTest, StdDictionaryIterator) {
+  DictionaryValue dict;
+  for (auto it = dict.begin(); it != dict.end(); ++it) {
+    ADD_FAILURE();
+  }
+
+  Value value1("value1");
+  dict.SetKey("key1", value1.Clone());
+  bool seen1 = false;
+  for (const auto& it : dict) {
+    EXPECT_FALSE(seen1);
+    EXPECT_EQ("key1", it.first);
+    EXPECT_EQ(value1, *it.second);
+    seen1 = true;
+  }
+  EXPECT_TRUE(seen1);
+
+  Value value2("value2");
+  dict.SetKey("key2", value2.Clone());
+  bool seen2 = seen1 = false;
+  for (const auto& it : dict) {
+    if (it.first == "key1") {
+      EXPECT_FALSE(seen1);
+      EXPECT_EQ(value1, *it.second);
+      seen1 = true;
+    } else if (it.first == "key2") {
+      EXPECT_FALSE(seen2);
+      EXPECT_EQ(value2, *it.second);
+      seen2 = true;
+    } else {
+      ADD_FAILURE();
+    }
+  }
+  EXPECT_TRUE(seen1);
+  EXPECT_TRUE(seen2);
+}
+
+// DictionaryValue/ListValue's Get*() methods should accept NULL as an out-value
+// and still return true/false based on success.
+TEST(ValuesTest, GetWithNullOutValue) {
+  DictionaryValue main_dict;
+  ListValue main_list;
+
+  Value bool_value(false);
+  Value int_value(1234);
+  Value double_value(12.34567);
+  Value string_value("foo");
+  Value binary_value(Value::Type::BINARY);
+  DictionaryValue dict_value;
+  ListValue list_value;
+
+  main_dict.SetKey("bool", bool_value.Clone());
+  main_dict.SetKey("int", int_value.Clone());
+  main_dict.SetKey("double", double_value.Clone());
+  main_dict.SetKey("string", string_value.Clone());
+  main_dict.SetKey("binary", binary_value.Clone());
+  main_dict.SetKey("dict", dict_value.Clone());
+  main_dict.SetKey("list", list_value.Clone());
+
+  main_list.Append(std::make_unique<Value>(bool_value.Clone()));
+  main_list.Append(std::make_unique<Value>(int_value.Clone()));
+  main_list.Append(std::make_unique<Value>(double_value.Clone()));
+  main_list.Append(std::make_unique<Value>(string_value.Clone()));
+  main_list.Append(std::make_unique<Value>(binary_value.Clone()));
+  main_list.Append(std::make_unique<Value>(dict_value.Clone()));
+  main_list.Append(std::make_unique<Value>(list_value.Clone()));
+
+  EXPECT_TRUE(main_dict.Get("bool", nullptr));
+  EXPECT_TRUE(main_dict.Get("int", nullptr));
+  EXPECT_TRUE(main_dict.Get("double", nullptr));
+  EXPECT_TRUE(main_dict.Get("string", nullptr));
+  EXPECT_TRUE(main_dict.Get("binary", nullptr));
+  EXPECT_TRUE(main_dict.Get("dict", nullptr));
+  EXPECT_TRUE(main_dict.Get("list", nullptr));
+  EXPECT_FALSE(main_dict.Get("DNE", nullptr));
+
+  EXPECT_TRUE(main_dict.GetBoolean("bool", nullptr));
+  EXPECT_FALSE(main_dict.GetBoolean("int", nullptr));
+  EXPECT_FALSE(main_dict.GetBoolean("double", nullptr));
+  EXPECT_FALSE(main_dict.GetBoolean("string", nullptr));
+  EXPECT_FALSE(main_dict.GetBoolean("binary", nullptr));
+  EXPECT_FALSE(main_dict.GetBoolean("dict", nullptr));
+  EXPECT_FALSE(main_dict.GetBoolean("list", nullptr));
+  EXPECT_FALSE(main_dict.GetBoolean("DNE", nullptr));
+
+  EXPECT_FALSE(main_dict.GetInteger("bool", nullptr));
+  EXPECT_TRUE(main_dict.GetInteger("int", nullptr));
+  EXPECT_FALSE(main_dict.GetInteger("double", nullptr));
+  EXPECT_FALSE(main_dict.GetInteger("string", nullptr));
+  EXPECT_FALSE(main_dict.GetInteger("binary", nullptr));
+  EXPECT_FALSE(main_dict.GetInteger("dict", nullptr));
+  EXPECT_FALSE(main_dict.GetInteger("list", nullptr));
+  EXPECT_FALSE(main_dict.GetInteger("DNE", nullptr));
+
+  // Both int and double values can be obtained from GetDouble.
+  EXPECT_FALSE(main_dict.GetDouble("bool", nullptr));
+  EXPECT_TRUE(main_dict.GetDouble("int", nullptr));
+  EXPECT_TRUE(main_dict.GetDouble("double", nullptr));
+  EXPECT_FALSE(main_dict.GetDouble("string", nullptr));
+  EXPECT_FALSE(main_dict.GetDouble("binary", nullptr));
+  EXPECT_FALSE(main_dict.GetDouble("dict", nullptr));
+  EXPECT_FALSE(main_dict.GetDouble("list", nullptr));
+  EXPECT_FALSE(main_dict.GetDouble("DNE", nullptr));
+
+  EXPECT_FALSE(main_dict.GetString("bool", static_cast<std::string*>(nullptr)));
+  EXPECT_FALSE(main_dict.GetString("int", static_cast<std::string*>(nullptr)));
+  EXPECT_FALSE(
+      main_dict.GetString("double", static_cast<std::string*>(nullptr)));
+  EXPECT_TRUE(
+      main_dict.GetString("string", static_cast<std::string*>(nullptr)));
+  EXPECT_FALSE(
+      main_dict.GetString("binary", static_cast<std::string*>(nullptr)));
+  EXPECT_FALSE(main_dict.GetString("dict", static_cast<std::string*>(nullptr)));
+  EXPECT_FALSE(main_dict.GetString("list", static_cast<std::string*>(nullptr)));
+  EXPECT_FALSE(main_dict.GetString("DNE", static_cast<std::string*>(nullptr)));
+
+  EXPECT_FALSE(main_dict.GetString("bool", static_cast<string16*>(nullptr)));
+  EXPECT_FALSE(main_dict.GetString("int", static_cast<string16*>(nullptr)));
+  EXPECT_FALSE(main_dict.GetString("double", static_cast<string16*>(nullptr)));
+  EXPECT_TRUE(main_dict.GetString("string", static_cast<string16*>(nullptr)));
+  EXPECT_FALSE(main_dict.GetString("binary", static_cast<string16*>(nullptr)));
+  EXPECT_FALSE(main_dict.GetString("dict", static_cast<string16*>(nullptr)));
+  EXPECT_FALSE(main_dict.GetString("list", static_cast<string16*>(nullptr)));
+  EXPECT_FALSE(main_dict.GetString("DNE", static_cast<string16*>(nullptr)));
+
+  EXPECT_FALSE(main_dict.GetBinary("bool", nullptr));
+  EXPECT_FALSE(main_dict.GetBinary("int", nullptr));
+  EXPECT_FALSE(main_dict.GetBinary("double", nullptr));
+  EXPECT_FALSE(main_dict.GetBinary("string", nullptr));
+  EXPECT_TRUE(main_dict.GetBinary("binary", nullptr));
+  EXPECT_FALSE(main_dict.GetBinary("dict", nullptr));
+  EXPECT_FALSE(main_dict.GetBinary("list", nullptr));
+  EXPECT_FALSE(main_dict.GetBinary("DNE", nullptr));
+
+  EXPECT_FALSE(main_dict.GetDictionary("bool", nullptr));
+  EXPECT_FALSE(main_dict.GetDictionary("int", nullptr));
+  EXPECT_FALSE(main_dict.GetDictionary("double", nullptr));
+  EXPECT_FALSE(main_dict.GetDictionary("string", nullptr));
+  EXPECT_FALSE(main_dict.GetDictionary("binary", nullptr));
+  EXPECT_TRUE(main_dict.GetDictionary("dict", nullptr));
+  EXPECT_FALSE(main_dict.GetDictionary("list", nullptr));
+  EXPECT_FALSE(main_dict.GetDictionary("DNE", nullptr));
+
+  EXPECT_FALSE(main_dict.GetList("bool", nullptr));
+  EXPECT_FALSE(main_dict.GetList("int", nullptr));
+  EXPECT_FALSE(main_dict.GetList("double", nullptr));
+  EXPECT_FALSE(main_dict.GetList("string", nullptr));
+  EXPECT_FALSE(main_dict.GetList("binary", nullptr));
+  EXPECT_FALSE(main_dict.GetList("dict", nullptr));
+  EXPECT_TRUE(main_dict.GetList("list", nullptr));
+  EXPECT_FALSE(main_dict.GetList("DNE", nullptr));
+
+  EXPECT_TRUE(main_dict.GetWithoutPathExpansion("bool", nullptr));
+  EXPECT_TRUE(main_dict.GetWithoutPathExpansion("int", nullptr));
+  EXPECT_TRUE(main_dict.GetWithoutPathExpansion("double", nullptr));
+  EXPECT_TRUE(main_dict.GetWithoutPathExpansion("string", nullptr));
+  EXPECT_TRUE(main_dict.GetWithoutPathExpansion("binary", nullptr));
+  EXPECT_TRUE(main_dict.GetWithoutPathExpansion("dict", nullptr));
+  EXPECT_TRUE(main_dict.GetWithoutPathExpansion("list", nullptr));
+  EXPECT_FALSE(main_dict.GetWithoutPathExpansion("DNE", nullptr));
+
+  EXPECT_TRUE(main_dict.GetBooleanWithoutPathExpansion("bool", nullptr));
+  EXPECT_FALSE(main_dict.GetBooleanWithoutPathExpansion("int", nullptr));
+  EXPECT_FALSE(main_dict.GetBooleanWithoutPathExpansion("double", nullptr));
+  EXPECT_FALSE(main_dict.GetBooleanWithoutPathExpansion("string", nullptr));
+  EXPECT_FALSE(main_dict.GetBooleanWithoutPathExpansion("binary", nullptr));
+  EXPECT_FALSE(main_dict.GetBooleanWithoutPathExpansion("dict", nullptr));
+  EXPECT_FALSE(main_dict.GetBooleanWithoutPathExpansion("list", nullptr));
+  EXPECT_FALSE(main_dict.GetBooleanWithoutPathExpansion("DNE", nullptr));
+
+  EXPECT_FALSE(main_dict.GetIntegerWithoutPathExpansion("bool", nullptr));
+  EXPECT_TRUE(main_dict.GetIntegerWithoutPathExpansion("int", nullptr));
+  EXPECT_FALSE(main_dict.GetIntegerWithoutPathExpansion("double", nullptr));
+  EXPECT_FALSE(main_dict.GetIntegerWithoutPathExpansion("string", nullptr));
+  EXPECT_FALSE(main_dict.GetIntegerWithoutPathExpansion("binary", nullptr));
+  EXPECT_FALSE(main_dict.GetIntegerWithoutPathExpansion("dict", nullptr));
+  EXPECT_FALSE(main_dict.GetIntegerWithoutPathExpansion("list", nullptr));
+  EXPECT_FALSE(main_dict.GetIntegerWithoutPathExpansion("DNE", nullptr));
+
+  EXPECT_FALSE(main_dict.GetDoubleWithoutPathExpansion("bool", nullptr));
+  EXPECT_TRUE(main_dict.GetDoubleWithoutPathExpansion("int", nullptr));
+  EXPECT_TRUE(main_dict.GetDoubleWithoutPathExpansion("double", nullptr));
+  EXPECT_FALSE(main_dict.GetDoubleWithoutPathExpansion("string", nullptr));
+  EXPECT_FALSE(main_dict.GetDoubleWithoutPathExpansion("binary", nullptr));
+  EXPECT_FALSE(main_dict.GetDoubleWithoutPathExpansion("dict", nullptr));
+  EXPECT_FALSE(main_dict.GetDoubleWithoutPathExpansion("list", nullptr));
+  EXPECT_FALSE(main_dict.GetDoubleWithoutPathExpansion("DNE", nullptr));
+
+  EXPECT_FALSE(main_dict.GetStringWithoutPathExpansion(
+      "bool", static_cast<std::string*>(nullptr)));
+  EXPECT_FALSE(main_dict.GetStringWithoutPathExpansion(
+      "int", static_cast<std::string*>(nullptr)));
+  EXPECT_FALSE(main_dict.GetStringWithoutPathExpansion(
+      "double", static_cast<std::string*>(nullptr)));
+  EXPECT_TRUE(main_dict.GetStringWithoutPathExpansion(
+      "string", static_cast<std::string*>(nullptr)));
+  EXPECT_FALSE(main_dict.GetStringWithoutPathExpansion(
+      "binary", static_cast<std::string*>(nullptr)));
+  EXPECT_FALSE(main_dict.GetStringWithoutPathExpansion(
+      "dict", static_cast<std::string*>(nullptr)));
+  EXPECT_FALSE(main_dict.GetStringWithoutPathExpansion(
+      "list", static_cast<std::string*>(nullptr)));
+  EXPECT_FALSE(main_dict.GetStringWithoutPathExpansion(
+      "DNE", static_cast<std::string*>(nullptr)));
+
+  EXPECT_FALSE(main_dict.GetStringWithoutPathExpansion(
+      "bool", static_cast<string16*>(nullptr)));
+  EXPECT_FALSE(main_dict.GetStringWithoutPathExpansion(
+      "int", static_cast<string16*>(nullptr)));
+  EXPECT_FALSE(main_dict.GetStringWithoutPathExpansion(
+      "double", static_cast<string16*>(nullptr)));
+  EXPECT_TRUE(main_dict.GetStringWithoutPathExpansion(
+      "string", static_cast<string16*>(nullptr)));
+  EXPECT_FALSE(main_dict.GetStringWithoutPathExpansion(
+      "binary", static_cast<string16*>(nullptr)));
+  EXPECT_FALSE(main_dict.GetStringWithoutPathExpansion(
+      "dict", static_cast<string16*>(nullptr)));
+  EXPECT_FALSE(main_dict.GetStringWithoutPathExpansion(
+      "list", static_cast<string16*>(nullptr)));
+  EXPECT_FALSE(main_dict.GetStringWithoutPathExpansion(
+      "DNE", static_cast<string16*>(nullptr)));
+
+  // There is no GetBinaryWithoutPathExpansion for some reason, but if there
+  // were it should be tested here...
+
+  EXPECT_FALSE(main_dict.GetDictionaryWithoutPathExpansion("bool", nullptr));
+  EXPECT_FALSE(main_dict.GetDictionaryWithoutPathExpansion("int", nullptr));
+  EXPECT_FALSE(main_dict.GetDictionaryWithoutPathExpansion("double", nullptr));
+  EXPECT_FALSE(main_dict.GetDictionaryWithoutPathExpansion("string", nullptr));
+  EXPECT_FALSE(main_dict.GetDictionaryWithoutPathExpansion("binary", nullptr));
+  EXPECT_TRUE(main_dict.GetDictionaryWithoutPathExpansion("dict", nullptr));
+  EXPECT_FALSE(main_dict.GetDictionaryWithoutPathExpansion("list", nullptr));
+  EXPECT_FALSE(main_dict.GetDictionaryWithoutPathExpansion("DNE", nullptr));
+
+  EXPECT_FALSE(main_dict.GetListWithoutPathExpansion("bool", nullptr));
+  EXPECT_FALSE(main_dict.GetListWithoutPathExpansion("int", nullptr));
+  EXPECT_FALSE(main_dict.GetListWithoutPathExpansion("double", nullptr));
+  EXPECT_FALSE(main_dict.GetListWithoutPathExpansion("string", nullptr));
+  EXPECT_FALSE(main_dict.GetListWithoutPathExpansion("binary", nullptr));
+  EXPECT_FALSE(main_dict.GetListWithoutPathExpansion("dict", nullptr));
+  EXPECT_TRUE(main_dict.GetListWithoutPathExpansion("list", nullptr));
+  EXPECT_FALSE(main_dict.GetListWithoutPathExpansion("DNE", nullptr));
+
+  EXPECT_TRUE(main_list.Get(0, nullptr));
+  EXPECT_TRUE(main_list.Get(1, nullptr));
+  EXPECT_TRUE(main_list.Get(2, nullptr));
+  EXPECT_TRUE(main_list.Get(3, nullptr));
+  EXPECT_TRUE(main_list.Get(4, nullptr));
+  EXPECT_TRUE(main_list.Get(5, nullptr));
+  EXPECT_TRUE(main_list.Get(6, nullptr));
+  EXPECT_FALSE(main_list.Get(7, nullptr));
+
+  EXPECT_TRUE(main_list.GetBoolean(0, nullptr));
+  EXPECT_FALSE(main_list.GetBoolean(1, nullptr));
+  EXPECT_FALSE(main_list.GetBoolean(2, nullptr));
+  EXPECT_FALSE(main_list.GetBoolean(3, nullptr));
+  EXPECT_FALSE(main_list.GetBoolean(4, nullptr));
+  EXPECT_FALSE(main_list.GetBoolean(5, nullptr));
+  EXPECT_FALSE(main_list.GetBoolean(6, nullptr));
+  EXPECT_FALSE(main_list.GetBoolean(7, nullptr));
+
+  EXPECT_FALSE(main_list.GetInteger(0, nullptr));
+  EXPECT_TRUE(main_list.GetInteger(1, nullptr));
+  EXPECT_FALSE(main_list.GetInteger(2, nullptr));
+  EXPECT_FALSE(main_list.GetInteger(3, nullptr));
+  EXPECT_FALSE(main_list.GetInteger(4, nullptr));
+  EXPECT_FALSE(main_list.GetInteger(5, nullptr));
+  EXPECT_FALSE(main_list.GetInteger(6, nullptr));
+  EXPECT_FALSE(main_list.GetInteger(7, nullptr));
+
+  EXPECT_FALSE(main_list.GetDouble(0, nullptr));
+  EXPECT_TRUE(main_list.GetDouble(1, nullptr));
+  EXPECT_TRUE(main_list.GetDouble(2, nullptr));
+  EXPECT_FALSE(main_list.GetDouble(3, nullptr));
+  EXPECT_FALSE(main_list.GetDouble(4, nullptr));
+  EXPECT_FALSE(main_list.GetDouble(5, nullptr));
+  EXPECT_FALSE(main_list.GetDouble(6, nullptr));
+  EXPECT_FALSE(main_list.GetDouble(7, nullptr));
+
+  EXPECT_FALSE(main_list.GetString(0, static_cast<std::string*>(nullptr)));
+  EXPECT_FALSE(main_list.GetString(1, static_cast<std::string*>(nullptr)));
+  EXPECT_FALSE(main_list.GetString(2, static_cast<std::string*>(nullptr)));
+  EXPECT_TRUE(main_list.GetString(3, static_cast<std::string*>(nullptr)));
+  EXPECT_FALSE(main_list.GetString(4, static_cast<std::string*>(nullptr)));
+  EXPECT_FALSE(main_list.GetString(5, static_cast<std::string*>(nullptr)));
+  EXPECT_FALSE(main_list.GetString(6, static_cast<std::string*>(nullptr)));
+  EXPECT_FALSE(main_list.GetString(7, static_cast<std::string*>(nullptr)));
+
+  EXPECT_FALSE(main_list.GetString(0, static_cast<string16*>(nullptr)));
+  EXPECT_FALSE(main_list.GetString(1, static_cast<string16*>(nullptr)));
+  EXPECT_FALSE(main_list.GetString(2, static_cast<string16*>(nullptr)));
+  EXPECT_TRUE(main_list.GetString(3, static_cast<string16*>(nullptr)));
+  EXPECT_FALSE(main_list.GetString(4, static_cast<string16*>(nullptr)));
+  EXPECT_FALSE(main_list.GetString(5, static_cast<string16*>(nullptr)));
+  EXPECT_FALSE(main_list.GetString(6, static_cast<string16*>(nullptr)));
+  EXPECT_FALSE(main_list.GetString(7, static_cast<string16*>(nullptr)));
+
+  EXPECT_FALSE(main_list.GetDictionary(0, nullptr));
+  EXPECT_FALSE(main_list.GetDictionary(1, nullptr));
+  EXPECT_FALSE(main_list.GetDictionary(2, nullptr));
+  EXPECT_FALSE(main_list.GetDictionary(3, nullptr));
+  EXPECT_FALSE(main_list.GetDictionary(4, nullptr));
+  EXPECT_TRUE(main_list.GetDictionary(5, nullptr));
+  EXPECT_FALSE(main_list.GetDictionary(6, nullptr));
+  EXPECT_FALSE(main_list.GetDictionary(7, nullptr));
+
+  EXPECT_FALSE(main_list.GetList(0, nullptr));
+  EXPECT_FALSE(main_list.GetList(1, nullptr));
+  EXPECT_FALSE(main_list.GetList(2, nullptr));
+  EXPECT_FALSE(main_list.GetList(3, nullptr));
+  EXPECT_FALSE(main_list.GetList(4, nullptr));
+  EXPECT_FALSE(main_list.GetList(5, nullptr));
+  EXPECT_TRUE(main_list.GetList(6, nullptr));
+  EXPECT_FALSE(main_list.GetList(7, nullptr));
+}
+
+TEST(ValuesTest, SelfSwap) {
+  base::Value test(1);
+  std::swap(test, test);
+  EXPECT_EQ(1, test.GetInt());
+}
+
+TEST(ValuesTest, FromToUniquePtrValue) {
+  std::unique_ptr<DictionaryValue> dict = std::make_unique<DictionaryValue>();
+  dict->SetString("name", "Froogle");
+  dict->SetString("url", "http://froogle.com");
+  Value dict_copy = dict->Clone();
+
+  Value dict_converted = Value::FromUniquePtrValue(std::move(dict));
+  EXPECT_EQ(dict_copy, dict_converted);
+
+  std::unique_ptr<Value> val =
+      Value::ToUniquePtrValue(std::move(dict_converted));
+  EXPECT_EQ(dict_copy, *val);
+}
+
+}  // namespace base
diff --git a/base/version.cc b/base/version.cc
new file mode 100644
index 0000000..3a54607
--- /dev/null
+++ b/base/version.cc
@@ -0,0 +1,194 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/version.h"
+
+#include <stddef.h>
+
+#include <algorithm>
+
+#include "base/logging.h"
+#include "base/strings/string_number_conversions.h"
+#include "base/strings/string_split.h"
+#include "base/strings/string_util.h"
+
+namespace base {
+
+namespace {
+
+// Parses the |numbers| vector representing the different numbers
+// inside the version string and constructs a vector of valid integers. It stops
+// when it reaches an invalid item (including the wildcard character). |parsed|
+// is the resulting integer vector. Function returns true if all numbers were
+// parsed successfully, false otherwise.
+bool ParseVersionNumbers(const std::string& version_str,
+                         std::vector<uint32_t>* parsed) {
+  std::vector<StringPiece> numbers =
+      SplitStringPiece(version_str, ".", KEEP_WHITESPACE, SPLIT_WANT_ALL);
+  if (numbers.empty())
+    return false;
+
+  for (auto it = numbers.begin(); it != numbers.end(); ++it) {
+    if (StartsWith(*it, "+", CompareCase::SENSITIVE))
+      return false;
+
+    unsigned int num;
+    if (!StringToUint(*it, &num))
+      return false;
+
+    // This throws out leading zeros for the first item only.
+    if (it == numbers.begin() && UintToString(num) != *it)
+      return false;
+
+    // StringToUint returns unsigned int but Version fields are uint32_t.
+    static_assert(sizeof (uint32_t) == sizeof (unsigned int),
+        "uint32_t must be same as unsigned int");
+    parsed->push_back(num);
+  }
+  return true;
+}
+
+// Compares version components in |components1| with components in
+// |components2|. Returns -1, 0 or 1 if |components1| is less than, equal to,
+// or greater than |components2|, respectively.
+int CompareVersionComponents(const std::vector<uint32_t>& components1,
+                             const std::vector<uint32_t>& components2) {
+  const size_t count = std::min(components1.size(), components2.size());
+  for (size_t i = 0; i < count; ++i) {
+    if (components1[i] > components2[i])
+      return 1;
+    if (components1[i] < components2[i])
+      return -1;
+  }
+  if (components1.size() > components2.size()) {
+    for (size_t i = count; i < components1.size(); ++i) {
+      if (components1[i] > 0)
+        return 1;
+    }
+  } else if (components1.size() < components2.size()) {
+    for (size_t i = count; i < components2.size(); ++i) {
+      if (components2[i] > 0)
+        return -1;
+    }
+  }
+  return 0;
+}
+
+}  // namespace
+
+Version::Version() = default;
+
+Version::Version(const Version& other) = default;
+
+Version::~Version() = default;
+
+Version::Version(const std::string& version_str) {
+  std::vector<uint32_t> parsed;
+  if (!ParseVersionNumbers(version_str, &parsed))
+    return;
+
+  components_.swap(parsed);
+}
+
+Version::Version(std::vector<uint32_t> components)
+    : components_(std::move(components)) {}
+
+bool Version::IsValid() const {
+  return (!components_.empty());
+}
+
+// static
+bool Version::IsValidWildcardString(const std::string& wildcard_string) {
+  std::string version_string = wildcard_string;
+  if (EndsWith(version_string, ".*", CompareCase::SENSITIVE))
+    version_string.resize(version_string.size() - 2);
+
+  Version version(version_string);
+  return version.IsValid();
+}
+
+int Version::CompareToWildcardString(const std::string& wildcard_string) const {
+  DCHECK(IsValid());
+  DCHECK(Version::IsValidWildcardString(wildcard_string));
+
+  // Default behavior if the string doesn't end with a wildcard.
+  if (!EndsWith(wildcard_string, ".*", CompareCase::SENSITIVE)) {
+    Version version(wildcard_string);
+    DCHECK(version.IsValid());
+    return CompareTo(version);
+  }
+
+  std::vector<uint32_t> parsed;
+  const bool success = ParseVersionNumbers(
+      wildcard_string.substr(0, wildcard_string.length() - 2), &parsed);
+  DCHECK(success);
+  const int comparison = CompareVersionComponents(components_, parsed);
+  // If the version is smaller than the wildcard version's |parsed| vector,
+  // then the wildcard has no effect (e.g. comparing 1.2.3 and 1.3.*) and the
+  // version is still smaller. Same logic for equality (e.g. comparing 1.2.2 to
+  // 1.2.2.* is 0 regardless of the wildcard). Under this logic,
+  // 1.2.0.0.0.0 compared to 1.2.* is 0.
+  if (comparison == -1 || comparison == 0)
+    return comparison;
+
+  // Catch the case where the digits of |parsed| are found in |components_|,
+  // which means that the two are equal since |parsed| has a trailing "*".
+  // (e.g. 1.2.3 vs. 1.2.* will return 0). All other cases return 1 since
+  // components is greater (e.g. 3.2.3 vs 1.*).
+  DCHECK_GT(parsed.size(), 0UL);
+  const size_t min_num_comp = std::min(components_.size(), parsed.size());
+  for (size_t i = 0; i < min_num_comp; ++i) {
+    if (components_[i] != parsed[i])
+      return 1;
+  }
+  return 0;
+}
+
+int Version::CompareTo(const Version& other) const {
+  DCHECK(IsValid());
+  DCHECK(other.IsValid());
+  return CompareVersionComponents(components_, other.components_);
+}
+
+const std::string Version::GetString() const {
+  DCHECK(IsValid());
+  std::string version_str;
+  size_t count = components_.size();
+  for (size_t i = 0; i < count - 1; ++i) {
+    version_str.append(UintToString(components_[i]));
+    version_str.append(".");
+  }
+  version_str.append(UintToString(components_[count - 1]));
+  return version_str;
+}
+
+bool operator==(const Version& v1, const Version& v2) {
+  return v1.CompareTo(v2) == 0;
+}
+
+bool operator!=(const Version& v1, const Version& v2) {
+  return !(v1 == v2);
+}
+
+bool operator<(const Version& v1, const Version& v2) {
+  return v1.CompareTo(v2) < 0;
+}
+
+bool operator<=(const Version& v1, const Version& v2) {
+  return v1.CompareTo(v2) <= 0;
+}
+
+bool operator>(const Version& v1, const Version& v2) {
+  return v1.CompareTo(v2) > 0;
+}
+
+bool operator>=(const Version& v1, const Version& v2) {
+  return v1.CompareTo(v2) >= 0;
+}
+
+std::ostream& operator<<(std::ostream& stream, const Version& v) {
+  return stream << v.GetString();
+}
+
+}  // namespace base
diff --git a/base/version.h b/base/version.h
new file mode 100644
index 0000000..b3a0956
--- /dev/null
+++ b/base/version.h
@@ -0,0 +1,76 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_VERSION_H_
+#define BASE_VERSION_H_
+
+#include <stdint.h>
+
+#include <iosfwd>
+#include <string>
+#include <vector>
+
+#include "base/base_export.h"
+
+namespace base {
+
+// Version represents a dotted version number, like "1.2.3.4", supporting
+// parsing and comparison.
+class BASE_EXPORT Version {
+ public:
+  // The only thing you can legally do to a default constructed
+  // Version object is assign to it.
+  Version();
+
+  Version(const Version& other);
+
+  // Initializes from a decimal dotted version number, like "0.1.1".
+  // Each component is limited to a uint16_t. Call IsValid() to learn
+  // the outcome.
+  explicit Version(const std::string& version_str);
+
+  // Initializes from a vector of components, like {1, 2, 3, 4}. Call IsValid()
+  // to learn the outcome.
+  explicit Version(std::vector<uint32_t> components);
+
+  ~Version();
+
+  // Returns true if the object contains a valid version number.
+  bool IsValid() const;
+
+  // Returns true if the version wildcard string is valid. The version wildcard
+  // string may end with ".*" (e.g. 1.2.*, 1.*). Any other arrangement with "*"
+  // is invalid (e.g. 1.*.3 or 1.2.3*). This functions defaults to standard
+  // Version behavior (IsValid) if no wildcard is present.
+  static bool IsValidWildcardString(const std::string& wildcard_string);
+
+  // Returns -1, 0, 1 for <, ==, >.
+  int CompareTo(const Version& other) const;
+
+  // Given a valid version object, compare if a |wildcard_string| results in a
+  // newer version. This function will default to CompareTo if the string does
+  // not end in wildcard sequence ".*". IsValidWildcard(wildcard_string) must be
+  // true before using this function.
+  int CompareToWildcardString(const std::string& wildcard_string) const;
+
+  // Return the string representation of this version.
+  const std::string GetString() const;
+
+  const std::vector<uint32_t>& components() const { return components_; }
+
+ private:
+  std::vector<uint32_t> components_;
+};
+
+BASE_EXPORT bool operator==(const Version& v1, const Version& v2);
+BASE_EXPORT bool operator!=(const Version& v1, const Version& v2);
+BASE_EXPORT bool operator<(const Version& v1, const Version& v2);
+BASE_EXPORT bool operator<=(const Version& v1, const Version& v2);
+BASE_EXPORT bool operator>(const Version& v1, const Version& v2);
+BASE_EXPORT bool operator>=(const Version& v1, const Version& v2);
+BASE_EXPORT std::ostream& operator<<(std::ostream& stream, const Version& v);
+
+}  // namespace base
+
+#endif  // BASE_VERSION_H_
diff --git a/base/version_unittest.cc b/base/version_unittest.cc
new file mode 100644
index 0000000..285ca9c
--- /dev/null
+++ b/base/version_unittest.cc
@@ -0,0 +1,200 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/version.h"
+
+#include <stddef.h>
+#include <stdint.h>
+#include <utility>
+
+#include "base/macros.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace {
+
+TEST(VersionTest, DefaultConstructor) {
+  base::Version v;
+  EXPECT_FALSE(v.IsValid());
+}
+
+TEST(VersionTest, ValueSemantics) {
+  base::Version v1("1.2.3.4");
+  EXPECT_TRUE(v1.IsValid());
+  base::Version v3;
+  EXPECT_FALSE(v3.IsValid());
+  {
+    base::Version v2(v1);
+    v3 = v2;
+    EXPECT_TRUE(v2.IsValid());
+    EXPECT_EQ(v1, v2);
+  }
+  EXPECT_EQ(v3, v1);
+}
+
+TEST(VersionTest, MoveSemantics) {
+  const std::vector<uint32_t> components = {1, 2, 3, 4};
+  base::Version v1(std::move(components));
+  EXPECT_TRUE(v1.IsValid());
+  base::Version v2("1.2.3.4");
+  EXPECT_EQ(v1, v2);
+}
+
+TEST(VersionTest, GetVersionFromString) {
+  static const struct version_string {
+    const char* input;
+    size_t parts;
+    uint32_t firstpart;
+    bool success;
+  } cases[] = {
+    {"", 0, 0, false},
+    {" ", 0, 0, false},
+    {"\t", 0, 0, false},
+    {"\n", 0, 0, false},
+    {"  ", 0, 0, false},
+    {".", 0, 0, false},
+    {" . ", 0, 0, false},
+    {"0", 1, 0, true},
+    {"0.", 0, 0, false},
+    {"0.0", 2, 0, true},
+    {"4294967295.0", 2, 4294967295, true},
+    {"4294967296.0", 0, 0, false},
+    {"-1.0", 0, 0, false},
+    {"1.-1.0", 0, 0, false},
+    {"1,--1.0", 0, 0, false},
+    {"+1.0", 0, 0, false},
+    {"1.+1.0", 0, 0, false},
+    {"1+1.0", 0, 0, false},
+    {"++1.0", 0, 0, false},
+    {"1.0a", 0, 0, false},
+    {"1.2.3.4.5.6.7.8.9.0", 10, 1, true},
+    {"02.1", 0, 0, false},
+    {"0.01", 2, 0, true},
+    {"f.1", 0, 0, false},
+    {"15.007.20011", 3, 15, true},
+    {"15.5.28.130162", 4, 15, true},
+  };
+
+  for (size_t i = 0; i < arraysize(cases); ++i) {
+    base::Version version(cases[i].input);
+    EXPECT_EQ(cases[i].success, version.IsValid());
+    if (cases[i].success) {
+      EXPECT_EQ(cases[i].parts, version.components().size());
+      EXPECT_EQ(cases[i].firstpart, version.components()[0]);
+    }
+  }
+}
+
+TEST(VersionTest, Compare) {
+  static const struct version_compare {
+    const char* lhs;
+    const char* rhs;
+    int expected;
+  } cases[] = {
+      {"1.0", "1.0", 0},
+      {"1.0", "0.0", 1},
+      {"1.0", "2.0", -1},
+      {"1.0", "1.1", -1},
+      {"1.1", "1.0", 1},
+      {"1.0", "1.0.1", -1},
+      {"1.1", "1.0.1", 1},
+      {"1.1", "1.0.1", 1},
+      {"1.0.0", "1.0", 0},
+      {"1.0.3", "1.0.20", -1},
+      {"11.0.10", "15.007.20011", -1},
+      {"11.0.10", "15.5.28.130162", -1},
+      {"15.5.28.130162", "15.5.28.130162", 0},
+  };
+  for (size_t i = 0; i < arraysize(cases); ++i) {
+    base::Version lhs(cases[i].lhs);
+    base::Version rhs(cases[i].rhs);
+    EXPECT_EQ(lhs.CompareTo(rhs), cases[i].expected) <<
+        cases[i].lhs << " ? " << cases[i].rhs;
+    // CompareToWildcardString() should have same behavior as CompareTo() when
+    // no wildcards are present.
+    EXPECT_EQ(lhs.CompareToWildcardString(cases[i].rhs), cases[i].expected)
+        << cases[i].lhs << " ? " << cases[i].rhs;
+    EXPECT_EQ(rhs.CompareToWildcardString(cases[i].lhs), -cases[i].expected)
+        << cases[i].lhs << " ? " << cases[i].rhs;
+
+    // Test comparison operators
+    switch (cases[i].expected) {
+    case -1:
+      EXPECT_LT(lhs, rhs);
+      EXPECT_LE(lhs, rhs);
+      EXPECT_NE(lhs, rhs);
+      EXPECT_FALSE(lhs == rhs);
+      EXPECT_FALSE(lhs >= rhs);
+      EXPECT_FALSE(lhs > rhs);
+      break;
+    case 0:
+      EXPECT_FALSE(lhs < rhs);
+      EXPECT_LE(lhs, rhs);
+      EXPECT_FALSE(lhs != rhs);
+      EXPECT_EQ(lhs, rhs);
+      EXPECT_GE(lhs, rhs);
+      EXPECT_FALSE(lhs > rhs);
+      break;
+    case 1:
+      EXPECT_FALSE(lhs < rhs);
+      EXPECT_FALSE(lhs <= rhs);
+      EXPECT_NE(lhs, rhs);
+      EXPECT_FALSE(lhs == rhs);
+      EXPECT_GE(lhs, rhs);
+      EXPECT_GT(lhs, rhs);
+      break;
+    }
+  }
+}
+
+TEST(VersionTest, CompareToWildcardString) {
+  static const struct version_compare {
+    const char* lhs;
+    const char* rhs;
+    int expected;
+  } cases[] = {
+    {"1.0", "1.*", 0},
+    {"1.0", "0.*", 1},
+    {"1.0", "2.*", -1},
+    {"1.2.3", "1.2.3.*", 0},
+    {"10.0", "1.0.*", 1},
+    {"1.0", "3.0.*", -1},
+    {"1.4", "1.3.0.*", 1},
+    {"1.3.9", "1.3.*", 0},
+    {"1.4.1", "1.3.*", 1},
+    {"1.3", "1.4.5.*", -1},
+    {"1.5", "1.4.5.*", 1},
+    {"1.3.9", "1.3.*", 0},
+    {"1.2.0.0.0.0", "1.2.*", 0},
+  };
+  for (size_t i = 0; i < arraysize(cases); ++i) {
+    const base::Version version(cases[i].lhs);
+    const int result = version.CompareToWildcardString(cases[i].rhs);
+    EXPECT_EQ(result, cases[i].expected) << cases[i].lhs << "?" << cases[i].rhs;
+  }
+}
+
+TEST(VersionTest, IsValidWildcardString) {
+  static const struct version_compare {
+    const char* version;
+    bool expected;
+  } cases[] = {
+    {"1.0", true},
+    {"", false},
+    {"1.2.3.4.5.6", true},
+    {"1.2.3.*", true},
+    {"1.2.3.5*", false},
+    {"1.2.3.56*", false},
+    {"1.*.3", false},
+    {"20.*", true},
+    {"+2.*", false},
+    {"*", false},
+    {"*.2", false},
+  };
+  for (size_t i = 0; i < arraysize(cases); ++i) {
+    EXPECT_EQ(base::Version::IsValidWildcardString(cases[i].version),
+        cases[i].expected) << cases[i].version << "?" << cases[i].expected;
+  }
+}
+
+}  // namespace
diff --git a/base/vlog.cc b/base/vlog.cc
new file mode 100644
index 0000000..fbe1897
--- /dev/null
+++ b/base/vlog.cc
@@ -0,0 +1,181 @@
+// Copyright (c) 2010 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/vlog.h"
+
+#include <stddef.h>
+
+#include <ostream>
+#include <utility>
+
+#include "base/logging.h"
+#include "base/macros.h"
+#include "base/strings/string_number_conversions.h"
+#include "base/strings/string_split.h"
+
+namespace logging {
+
+const int VlogInfo::kDefaultVlogLevel = 0;
+
+struct VlogInfo::VmodulePattern {
+  enum MatchTarget { MATCH_MODULE, MATCH_FILE };
+
+  explicit VmodulePattern(const std::string& pattern);
+
+  VmodulePattern();
+
+  std::string pattern;
+  int vlog_level;
+  MatchTarget match_target;
+};
+
+VlogInfo::VmodulePattern::VmodulePattern(const std::string& pattern)
+    : pattern(pattern),
+      vlog_level(VlogInfo::kDefaultVlogLevel),
+      match_target(MATCH_MODULE) {
+  // If the pattern contains a {forward,back} slash, we assume that
+  // it's meant to be tested against the entire __FILE__ string.
+  std::string::size_type first_slash = pattern.find_first_of("\\/");
+  if (first_slash != std::string::npos)
+    match_target = MATCH_FILE;
+}
+
+VlogInfo::VmodulePattern::VmodulePattern()
+    : vlog_level(VlogInfo::kDefaultVlogLevel),
+      match_target(MATCH_MODULE) {}
+
+VlogInfo::VlogInfo(const std::string& v_switch,
+                   const std::string& vmodule_switch,
+                   int* min_log_level)
+    : min_log_level_(min_log_level) {
+  DCHECK_NE(min_log_level, nullptr);
+
+  int vlog_level = 0;
+  if (!v_switch.empty()) {
+    if (base::StringToInt(v_switch, &vlog_level)) {
+      SetMaxVlogLevel(vlog_level);
+    } else {
+      DLOG(WARNING) << "Could not parse v switch \"" << v_switch << "\"";
+    }
+  }
+
+  base::StringPairs kv_pairs;
+  if (!base::SplitStringIntoKeyValuePairs(
+          vmodule_switch, '=', ',', &kv_pairs)) {
+    DLOG(WARNING) << "Could not fully parse vmodule switch \""
+                  << vmodule_switch << "\"";
+  }
+  for (base::StringPairs::const_iterator it = kv_pairs.begin();
+       it != kv_pairs.end(); ++it) {
+    VmodulePattern pattern(it->first);
+    if (!base::StringToInt(it->second, &pattern.vlog_level)) {
+      DLOG(WARNING) << "Parsed vlog level for \""
+                    << it->first << "=" << it->second
+                    << "\" as " << pattern.vlog_level;
+    }
+    vmodule_levels_.push_back(pattern);
+  }
+}
+
+VlogInfo::~VlogInfo() = default;
+
+namespace {
+
+// Given a path, returns the basename with the extension chopped off
+// (and any -inl suffix).  We avoid using FilePath to minimize the
+// number of dependencies the logging system has.
+base::StringPiece GetModule(const base::StringPiece& file) {
+  base::StringPiece module(file);
+  base::StringPiece::size_type last_slash_pos =
+      module.find_last_of("\\/");
+  if (last_slash_pos != base::StringPiece::npos)
+    module.remove_prefix(last_slash_pos + 1);
+  base::StringPiece::size_type extension_start = module.rfind('.');
+  module = module.substr(0, extension_start);
+  static const char kInlSuffix[] = "-inl";
+  static const int kInlSuffixLen = arraysize(kInlSuffix) - 1;
+  if (module.ends_with(kInlSuffix))
+    module.remove_suffix(kInlSuffixLen);
+  return module;
+}
+
+}  // namespace
+
+int VlogInfo::GetVlogLevel(const base::StringPiece& file) const {
+  if (!vmodule_levels_.empty()) {
+    base::StringPiece module(GetModule(file));
+    for (std::vector<VmodulePattern>::const_iterator it =
+             vmodule_levels_.begin(); it != vmodule_levels_.end(); ++it) {
+      base::StringPiece target(
+          (it->match_target == VmodulePattern::MATCH_FILE) ? file : module);
+      if (MatchVlogPattern(target, it->pattern))
+        return it->vlog_level;
+    }
+  }
+  return GetMaxVlogLevel();
+}
+
+void VlogInfo::SetMaxVlogLevel(int level) {
+  // Log severity is the negative verbosity.
+  *min_log_level_ = -level;
+}
+
+int VlogInfo::GetMaxVlogLevel() const {
+  return -*min_log_level_;
+}
+
+bool MatchVlogPattern(const base::StringPiece& string,
+                      const base::StringPiece& vlog_pattern) {
+  base::StringPiece p(vlog_pattern);
+  base::StringPiece s(string);
+  // Consume characters until the next star.
+  while (!p.empty() && !s.empty() && (p[0] != '*')) {
+    switch (p[0]) {
+      // A slash (forward or back) must match a slash (forward or back).
+      case '/':
+      case '\\':
+        if ((s[0] != '/') && (s[0] != '\\'))
+          return false;
+        break;
+
+      // A '?' matches anything.
+      case '?':
+        break;
+
+      // Anything else must match literally.
+      default:
+        if (p[0] != s[0])
+          return false;
+        break;
+    }
+    p.remove_prefix(1), s.remove_prefix(1);
+  }
+
+  // An empty pattern here matches only an empty string.
+  if (p.empty())
+    return s.empty();
+
+  // Coalesce runs of consecutive stars.  There should be at least
+  // one.
+  while (!p.empty() && (p[0] == '*'))
+    p.remove_prefix(1);
+
+  // Since we moved past the stars, an empty pattern here matches
+  // anything.
+  if (p.empty())
+    return true;
+
+  // Since we moved past the stars and p is non-empty, if some
+  // non-empty substring of s matches p, then we ourselves match.
+  while (!s.empty()) {
+    if (MatchVlogPattern(s, p))
+      return true;
+    s.remove_prefix(1);
+  }
+
+  // Otherwise, we couldn't find a match.
+  return false;
+}
+
+}  // namespace logging
diff --git a/base/vlog.h b/base/vlog.h
new file mode 100644
index 0000000..2950904
--- /dev/null
+++ b/base/vlog.h
@@ -0,0 +1,77 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_VLOG_H_
+#define BASE_VLOG_H_
+
+#include <string>
+#include <vector>
+
+#include "base/base_export.h"
+#include "base/macros.h"
+#include "base/strings/string_piece.h"
+
+namespace logging {
+
+// A helper class containing all the settings for vlogging.
+class BASE_EXPORT VlogInfo {
+ public:
+  static const int kDefaultVlogLevel;
+
+  // |v_switch| gives the default maximal active V-logging level; 0 is
+  // the default.  Normally positive values are used for V-logging
+  // levels.
+  //
+  // |vmodule_switch| gives the per-module maximal V-logging levels to
+  // override the value given by |v_switch|.
+  // E.g. "my_module=2,foo*=3" would change the logging level for all
+  // code in source files "my_module.*" and "foo*.*" ("-inl" suffixes
+  // are also disregarded for this matching).
+  //
+  // |log_severity| points to an int that stores the log level. If a valid
+  // |v_switch| is provided, it will set the log level, and the default
+  // vlog severity will be read from there..
+  //
+  // Any pattern containing a forward or backward slash will be tested
+  // against the whole pathname and not just the module.  E.g.,
+  // "*/foo/bar/*=2" would change the logging level for all code in
+  // source files under a "foo/bar" directory.
+  VlogInfo(const std::string& v_switch,
+           const std::string& vmodule_switch,
+           int* min_log_level);
+  ~VlogInfo();
+
+  // Returns the vlog level for a given file (usually taken from
+  // __FILE__).
+  int GetVlogLevel(const base::StringPiece& file) const;
+
+ private:
+  void SetMaxVlogLevel(int level);
+  int GetMaxVlogLevel() const;
+
+  // VmodulePattern holds all the information for each pattern parsed
+  // from |vmodule_switch|.
+  struct VmodulePattern;
+  std::vector<VmodulePattern> vmodule_levels_;
+  int* min_log_level_;
+
+  DISALLOW_COPY_AND_ASSIGN(VlogInfo);
+};
+
+// Returns true if the string passed in matches the vlog pattern.  The
+// vlog pattern string can contain wildcards like * and ?.  ? matches
+// exactly one character while * matches 0 or more characters.  Also,
+// as a special case, a / or \ character matches either / or \.
+//
+// Examples:
+//   "kh?n" matches "khan" but not "khn" or "khaan"
+//   "kh*n" matches "khn", "khan", or even "khaaaaan"
+//   "/foo\bar" matches "/foo/bar", "\foo\bar", or "/foo\bar"
+//     (disregarding C escaping rules)
+BASE_EXPORT bool MatchVlogPattern(const base::StringPiece& string,
+                                  const base::StringPiece& vlog_pattern);
+
+}  // namespace logging
+
+#endif  // BASE_VLOG_H_
diff --git a/base/vlog_unittest.cc b/base/vlog_unittest.cc
new file mode 100644
index 0000000..3c3f49c
--- /dev/null
+++ b/base/vlog_unittest.cc
@@ -0,0 +1,124 @@
+// Copyright (c) 2010 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/vlog.h"
+
+#include "base/logging.h"
+#include "base/time/time.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace logging {
+
+namespace {
+
+TEST(VlogTest, NoVmodule) {
+  int min_log_level = 0;
+  EXPECT_EQ(0,
+            VlogInfo(std::string(), std::string(), &min_log_level)
+                .GetVlogLevel("test1"));
+  EXPECT_EQ(0,
+            VlogInfo("0", std::string(), &min_log_level).GetVlogLevel("test2"));
+  EXPECT_EQ(
+      0, VlogInfo("blah", std::string(), &min_log_level).GetVlogLevel("test3"));
+  EXPECT_EQ(
+      0,
+      VlogInfo("0blah1", std::string(), &min_log_level).GetVlogLevel("test4"));
+  EXPECT_EQ(1,
+            VlogInfo("1", std::string(), &min_log_level).GetVlogLevel("test5"));
+  EXPECT_EQ(5,
+            VlogInfo("5", std::string(), &min_log_level).GetVlogLevel("test6"));
+}
+
+TEST(VlogTest, MatchVlogPattern) {
+  // Degenerate cases.
+  EXPECT_TRUE(MatchVlogPattern("", ""));
+  EXPECT_TRUE(MatchVlogPattern("", "****"));
+  EXPECT_FALSE(MatchVlogPattern("", "x"));
+  EXPECT_FALSE(MatchVlogPattern("x", ""));
+
+  // Basic.
+  EXPECT_TRUE(MatchVlogPattern("blah", "blah"));
+
+  // ? should match exactly one character.
+  EXPECT_TRUE(MatchVlogPattern("blah", "bl?h"));
+  EXPECT_FALSE(MatchVlogPattern("blh", "bl?h"));
+  EXPECT_FALSE(MatchVlogPattern("blaah", "bl?h"));
+  EXPECT_TRUE(MatchVlogPattern("blah", "?lah"));
+  EXPECT_FALSE(MatchVlogPattern("lah", "?lah"));
+  EXPECT_FALSE(MatchVlogPattern("bblah", "?lah"));
+
+  // * can match any number (even 0) of characters.
+  EXPECT_TRUE(MatchVlogPattern("blah", "bl*h"));
+  EXPECT_TRUE(MatchVlogPattern("blabcdefh", "bl*h"));
+  EXPECT_TRUE(MatchVlogPattern("blh", "bl*h"));
+  EXPECT_TRUE(MatchVlogPattern("blah", "*blah"));
+  EXPECT_TRUE(MatchVlogPattern("ohblah", "*blah"));
+  EXPECT_TRUE(MatchVlogPattern("blah", "blah*"));
+  EXPECT_TRUE(MatchVlogPattern("blahhhh", "blah*"));
+  EXPECT_TRUE(MatchVlogPattern("blahhhh", "blah*"));
+  EXPECT_TRUE(MatchVlogPattern("blah", "*blah*"));
+  EXPECT_TRUE(MatchVlogPattern("blahhhh", "*blah*"));
+  EXPECT_TRUE(MatchVlogPattern("bbbblahhhh", "*blah*"));
+
+  // Multiple *s should work fine.
+  EXPECT_TRUE(MatchVlogPattern("ballaah", "b*la*h"));
+  EXPECT_TRUE(MatchVlogPattern("blah", "b*la*h"));
+  EXPECT_TRUE(MatchVlogPattern("bbbblah", "b*la*h"));
+  EXPECT_TRUE(MatchVlogPattern("blaaah", "b*la*h"));
+
+  // There should be no escaping going on.
+  EXPECT_TRUE(MatchVlogPattern("bl\\ah", "bl\\?h"));
+  EXPECT_FALSE(MatchVlogPattern("bl?h", "bl\\?h"));
+  EXPECT_TRUE(MatchVlogPattern("bl\\aaaah", "bl\\*h"));
+  EXPECT_FALSE(MatchVlogPattern("bl*h", "bl\\*h"));
+
+  // Any slash matches any slash.
+  EXPECT_TRUE(MatchVlogPattern("/b\\lah", "/b\\lah"));
+  EXPECT_TRUE(MatchVlogPattern("\\b/lah", "/b\\lah"));
+}
+
+TEST(VlogTest, VmoduleBasic) {
+  const char kVSwitch[] = "-1";
+  const char kVModuleSwitch[] =
+      "foo=,bar=0,baz=blah,,qux=0blah1,quux=1,corge.ext=5";
+  int min_log_level = 0;
+  VlogInfo vlog_info(kVSwitch, kVModuleSwitch, &min_log_level);
+  EXPECT_EQ(-1, vlog_info.GetVlogLevel("/path/to/grault.cc"));
+  EXPECT_EQ(0, vlog_info.GetVlogLevel("/path/to/foo.cc"));
+  EXPECT_EQ(0, vlog_info.GetVlogLevel("D:\\Path\\To\\bar-inl.mm"));
+  EXPECT_EQ(-1, vlog_info.GetVlogLevel("D:\\path\\to what/bar_unittest.m"));
+  EXPECT_EQ(0, vlog_info.GetVlogLevel("baz.h"));
+  EXPECT_EQ(0, vlog_info.GetVlogLevel("/another/path/to/qux.h"));
+  EXPECT_EQ(1, vlog_info.GetVlogLevel("/path/to/quux"));
+  EXPECT_EQ(5, vlog_info.GetVlogLevel("c:\\path/to/corge.ext.h"));
+}
+
+TEST(VlogTest, VmoduleDirs) {
+  const char kVModuleSwitch[] =
+      "foo/bar.cc=1,baz\\*\\qux.cc=2,*quux/*=3,*/*-inl.h=4";
+  int min_log_level = 0;
+  VlogInfo vlog_info(std::string(), kVModuleSwitch, &min_log_level);
+  EXPECT_EQ(0, vlog_info.GetVlogLevel("/foo/bar.cc"));
+  EXPECT_EQ(0, vlog_info.GetVlogLevel("bar.cc"));
+  EXPECT_EQ(1, vlog_info.GetVlogLevel("foo/bar.cc"));
+
+  EXPECT_EQ(0, vlog_info.GetVlogLevel("baz/grault/qux.h"));
+  EXPECT_EQ(0, vlog_info.GetVlogLevel("/baz/grault/qux.cc"));
+  EXPECT_EQ(2, vlog_info.GetVlogLevel("baz/grault/qux.cc"));
+  EXPECT_EQ(2, vlog_info.GetVlogLevel("baz/grault/blah/qux.cc"));
+  EXPECT_EQ(2, vlog_info.GetVlogLevel("baz\\grault\\qux.cc"));
+  EXPECT_EQ(2, vlog_info.GetVlogLevel("baz\\grault//blah\\qux.cc"));
+
+  EXPECT_EQ(0, vlog_info.GetVlogLevel("/foo/bar/baz/quux.cc"));
+  EXPECT_EQ(3, vlog_info.GetVlogLevel("/foo/bar/baz/quux/grault.cc"));
+  EXPECT_EQ(3, vlog_info.GetVlogLevel("/foo\\bar/baz\\quux/grault.cc"));
+
+  EXPECT_EQ(0, vlog_info.GetVlogLevel("foo/bar/test-inl.cc"));
+  EXPECT_EQ(4, vlog_info.GetVlogLevel("foo/bar/test-inl.h"));
+  EXPECT_EQ(4, vlog_info.GetVlogLevel("foo/bar/baz/blah-inl.h"));
+}
+
+}  // namespace
+
+}  // namespace logging
diff --git a/base/win/BUILD.gn b/base/win/BUILD.gn
new file mode 100644
index 0000000..19c2982
--- /dev/null
+++ b/base/win/BUILD.gn
@@ -0,0 +1,32 @@
+# Copyright (c) 2016 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import("//build/buildflag_header.gni")
+
+declare_args() {
+  # Indicates if the handle verifier should operate in a single module mode. By
+  # default a single instance gets shared by all the modules.
+  single_module_mode_handle_verifier = false
+}
+
+# Ensure that the handle verifier is always used in a single module mode for the
+# component builds.
+if (is_component_build) {
+  single_module_mode_handle_verifier = true
+}
+
+buildflag_header("base_win_buildflags") {
+  header = "base_win_buildflags.h"
+  header_dir = "base/win"
+  flags = [
+    "SINGLE_MODULE_MODE_HANDLE_VERIFIER=$single_module_mode_handle_verifier",
+  ]
+}
+
+static_library("pe_image") {
+  sources = [
+    "pe_image.cc",
+    "pe_image.h",
+  ]
+}
diff --git a/base/win/OWNERS b/base/win/OWNERS
new file mode 100644
index 0000000..4593b2c
--- /dev/null
+++ b/base/win/OWNERS
@@ -0,0 +1,7 @@
+brucedawson@chromium.org
+grt@chromium.org
+jschuh@chromium.org
+robliao@chromium.org
+scottmg@chromium.org
+
+# COMPONENT: Internals>PlatformIntegration
diff --git a/base/win/async_operation.h b/base/win/async_operation.h
new file mode 100644
index 0000000..2c41ddf
--- /dev/null
+++ b/base/win/async_operation.h
@@ -0,0 +1,244 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_WIN_ASYNC_OPERATION_H_
+#define BASE_WIN_ASYNC_OPERATION_H_
+
+#include <unknwn.h>
+#include <windows.foundation.h>
+#include <wrl/async.h>
+#include <wrl/client.h>
+
+#include <type_traits>
+#include <utility>
+
+#include "base/bind.h"
+#include "base/callback.h"
+#include "base/macros.h"
+#include "base/memory/weak_ptr.h"
+#include "base/optional.h"
+#include "base/threading/thread_checker.h"
+
+namespace base {
+namespace win {
+
+// This file provides an implementation of Windows::Foundation::IAsyncOperation.
+// Specializations exist for "regular" types and interface types that inherit
+// from IUnknown. Both specializations expose a callback() method, which can be
+// used to provide the result that will be forwarded to the registered
+// completion handler. For regular types it expects an instance of that type,
+// and for interface types it expects a corresponding ComPtr. This class is
+// thread-affine and all member methods should be called on the same thread that
+// constructed the object. In order to offload heavy result computation,
+// base's PostTaskAndReplyWithResult() should be used with the ResultCallback
+// passed as a reply.
+//
+// Example usages:
+//
+// // Regular types
+// auto regular_op = WRL::Make<base::win::AsyncOperation<int>>();
+// auto cb = regular_op->callback();
+// regular_op->put_Completed(...event handler...);
+// ...
+// // This will invoke the event handler.
+// std::move(cb).Run(123);
+// ...
+// // Results can be queried:
+// int results = 0;
+// regular_op->GetResults(&results);
+// EXPECT_EQ(123, results);
+//
+// // Interface types
+// auto interface_op = WRL::Make<base::win::AsyncOperation<FooBar*>>();
+// auto cb = interface_op->callback();
+// interface_op->put_Completed(...event handler...);
+// ...
+// // This will invoke the event handler.
+// std::move(cb).Run(WRL::Make<IFooBarImpl>());
+// ...
+// // Results can be queried:
+// WRL::ComPtr<IFooBar> results;
+// interface_op->GetResults(&results);
+// // |results| points to the provided IFooBarImpl instance.
+//
+// // Offloading a heavy computation:
+// auto my_op = WRL::Make<base::win::AsyncOperation<FooBar*>>();
+// base::PostTaskAndReplyWithResult(
+//     base::BindOnce(MakeFooBar), my_op->callback());
+
+namespace internal {
+
+// Template tricks needed to dispatch to the correct implementation below.
+//
+// For all types which are neither InterfaceGroups nor RuntimeClasses, the
+// following three typedefs are synonyms for a single C++ type.  But for
+// InterfaceGroups and RuntimeClasses, they are different types:
+//   LogicalT: The C++ Type for the InterfaceGroup or RuntimeClass, when
+//             used as a template parameter.  Eg "RCFoo*"
+//   AbiT:     The C++ type for the default interface used to represent the
+//             InterfaceGroup or RuntimeClass when passed as a method parameter.
+//             Eg "IFoo*"
+//   ComplexT: An instantiation of the Internal "AggregateType" template that
+//             combines LogicalT with AbiT. Eg "AggregateType<RCFoo*,IFoo*>"
+//
+// windows.foundation.collections.h defines the following template and
+// semantics in Windows::Foundation::Internal:
+//
+// template <class LogicalType, class AbiType>
+// struct AggregateType;
+//
+//   LogicalType - the Windows Runtime type (eg, runtime class, inteface group,
+//                 etc) being provided as an argument to an _impl template, when
+//                 that type cannot be represented at the ABI.
+//   AbiType     - the type used for marshalling, ie "at the ABI", for the
+//                 logical type.
+template <typename T>
+using ComplexT =
+    typename ABI::Windows::Foundation::IAsyncOperation<T>::TResult_complex;
+
+template <typename T>
+using AbiT =
+    typename ABI::Windows::Foundation::Internal::GetAbiType<ComplexT<T>>::type;
+
+template <typename T>
+using LogicalT = typename ABI::Windows::Foundation::Internal::GetLogicalType<
+    ComplexT<T>>::type;
+
+template <typename T>
+using InterfaceT = std::remove_pointer_t<AbiT<T>>;
+
+// Implementation of shared functionality.
+template <class T>
+class AsyncOperationBase
+    : public Microsoft::WRL::RuntimeClass<
+          Microsoft::WRL::RuntimeClassFlags<
+              Microsoft::WRL::WinRt | Microsoft::WRL::InhibitRoOriginateError>,
+          ABI::Windows::Foundation::IAsyncOperation<T>> {
+ public:
+  using Handler = ABI::Windows::Foundation::IAsyncOperationCompletedHandler<T>;
+
+  AsyncOperationBase() = default;
+  ~AsyncOperationBase() { DCHECK_CALLED_ON_VALID_THREAD(thread_checker_); }
+
+  // ABI::Windows::Foundation::IAsyncOperation:
+  IFACEMETHODIMP put_Completed(Handler* handler) override {
+    DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
+    handler_ = handler;
+    return S_OK;
+  }
+
+  IFACEMETHODIMP get_Completed(Handler** handler) override {
+    DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
+    return handler_.CopyTo(handler);
+  }
+
+ protected:
+  void InvokeCompletedHandler() {
+    handler_->Invoke(this, ABI::Windows::Foundation::AsyncStatus::Completed);
+  }
+
+  THREAD_CHECKER(thread_checker_);
+
+ private:
+  Microsoft::WRL::ComPtr<Handler> handler_;
+
+  DISALLOW_COPY_AND_ASSIGN(AsyncOperationBase);
+};
+
+}  // namespace internal
+
+template <typename T, typename Enable = void>
+class AsyncOperation;
+
+template <typename T>
+class AsyncOperation<
+    T,
+    std::enable_if_t<std::is_base_of<IUnknown, internal::InterfaceT<T>>::value>>
+    : public internal::AsyncOperationBase<T> {
+ public:
+  using InterfacePointer = Microsoft::WRL::ComPtr<internal::InterfaceT<T>>;
+  using ResultCallback = base::OnceCallback<void(InterfacePointer)>;
+
+  AsyncOperation() : weak_factory_(this) {
+    // Note: This can't be done in the constructor initializer list. This is
+    // because it relies on weak_factory_ to be initialized, which needs to be
+    // the last class member. Also applies below.
+    callback_ =
+        base::BindOnce(&AsyncOperation::OnResult, weak_factory_.GetWeakPtr());
+  }
+
+  ResultCallback callback() {
+    // Note: `this->` here and below is necessary due to the
+    // -Wmicrosoft-template compiler warning.
+    DCHECK_CALLED_ON_VALID_THREAD(this->thread_checker_);
+    DCHECK(!callback_.is_null());
+    return std::move(callback_);
+  }
+
+  // ABI::Windows::Foundation::IAsyncOperation:
+  IFACEMETHODIMP GetResults(internal::AbiT<T>* results) override {
+    DCHECK_CALLED_ON_VALID_THREAD(this->thread_checker_);
+    return ptr_ ? ptr_.CopyTo(results) : E_PENDING;
+  }
+
+ private:
+  void OnResult(InterfacePointer ptr) {
+    DCHECK_CALLED_ON_VALID_THREAD(this->thread_checker_);
+    DCHECK(!ptr_);
+    ptr_ = std::move(ptr);
+    this->InvokeCompletedHandler();
+  }
+
+  ResultCallback callback_;
+  InterfacePointer ptr_;
+  base::WeakPtrFactory<AsyncOperation> weak_factory_;
+};
+
+template <typename T>
+class AsyncOperation<
+    T,
+    std::enable_if_t<
+        !std::is_base_of<IUnknown, internal::InterfaceT<T>>::value>>
+    : public internal::AsyncOperationBase<T> {
+ public:
+  using ResultCallback = base::OnceCallback<void(T)>;
+
+  AsyncOperation() : weak_factory_(this) {
+    callback_ =
+        base::BindOnce(&AsyncOperation::OnResult, weak_factory_.GetWeakPtr());
+  }
+
+  ResultCallback callback() {
+    DCHECK_CALLED_ON_VALID_THREAD(this->thread_checker_);
+    DCHECK(!callback_.is_null());
+    return std::move(callback_);
+  }
+
+  // ABI::Windows::Foundation::IAsyncOperation:
+  IFACEMETHODIMP GetResults(internal::AbiT<T>* results) override {
+    DCHECK_CALLED_ON_VALID_THREAD(this->thread_checker_);
+    if (!value_)
+      return E_PENDING;
+
+    *results = *value_;
+    return S_OK;
+  }
+
+ private:
+  void OnResult(T result) {
+    DCHECK_CALLED_ON_VALID_THREAD(this->thread_checker_);
+    DCHECK(!value_);
+    value_.emplace(std::move(result));
+    this->InvokeCompletedHandler();
+  }
+
+  ResultCallback callback_;
+  base::Optional<T> value_;
+  base::WeakPtrFactory<AsyncOperation> weak_factory_;
+};
+
+}  // namespace win
+}  // namespace base
+
+#endif  // BASE_WIN_ASYNC_OPERATION_H_
diff --git a/base/win/async_operation_unittest.cc b/base/win/async_operation_unittest.cc
new file mode 100644
index 0000000..b29e181
--- /dev/null
+++ b/base/win/async_operation_unittest.cc
@@ -0,0 +1,174 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/win/async_operation.h"
+
+#include <utility>
+
+#include "base/test/gtest_util.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace WRL = Microsoft::WRL;
+
+using ABI::Windows::Foundation::IAsyncOperation;
+using ABI::Windows::Foundation::IAsyncOperationCompletedHandler;
+
+// In order to exercise the interface logic of AsyncOperation we define an empty
+// dummy interface, its implementation, and the necessary boilerplate to hook it
+// up with IAsyncOperation and IAsyncOperationCompletedHandler.
+namespace {
+
+// Chosen by fair `uuidgen` invocation. Also applies to the UUIDs below.
+MIDL_INTERFACE("756358C7-8083-4D78-9D27-9278B76096d4")
+IFooBar : public IInspectable{};
+
+class FooBar
+    : public WRL::RuntimeClass<
+          WRL::RuntimeClassFlags<WRL::WinRt | WRL::InhibitRoOriginateError>,
+          IFooBar> {};
+
+}  // namespace
+
+namespace ABI {
+namespace Windows {
+namespace Foundation {
+
+// Provide the required template specializations to register
+// IAsyncOperation<Foobar*> as an AggregateType. This is similar to how it is
+// done for UWP classes.
+template <>
+struct DECLSPEC_UUID("124858e4-f97e-409c-86ae-418c4781144c")
+    IAsyncOperation<FooBar*>
+    : IAsyncOperation_impl<Internal::AggregateType<FooBar*, IFooBar*>> {
+  static const wchar_t* z_get_rc_name_impl() {
+    return L"Windows.Foundation.IAsyncOperation<FooBar>";
+  }
+};
+
+template <>
+struct DECLSPEC_UUID("9e49373c-200c-4715-abd7-4214ba669c81")
+    IAsyncOperationCompletedHandler<FooBar*>
+    : IAsyncOperationCompletedHandler_impl<
+          Internal::AggregateType<FooBar*, IFooBar*>> {
+  static const wchar_t* z_get_rc_name_impl() {
+    return L"Windows.Foundation.AsyncOperationCompletedHandler<FooBar>";
+  }
+};
+
+}  // namespace Foundation
+}  // namespace Windows
+}  // namespace ABI
+
+namespace base {
+namespace win {
+
+namespace {
+
+// Utility method to add a completion callback to |async_op|. |*called_cb| will
+// be set to true once the callback is invoked.
+template <typename T>
+void PutCallback(AsyncOperation<T>* async_op, bool* called_cb) {
+  async_op->put_Completed(
+      WRL::Callback<IAsyncOperationCompletedHandler<T>>(
+          [=](IAsyncOperation<T>* iasync_op, AsyncStatus status) {
+            EXPECT_EQ(async_op, iasync_op);
+            *called_cb = true;
+            return S_OK;
+          })
+          .Get());
+}
+
+}  // namespace
+
+TEST(AsyncOperationTest, TestInt) {
+  bool called_cb = false;
+
+  auto int_op = WRL::Make<AsyncOperation<int>>();
+  PutCallback(int_op.Get(), &called_cb);
+
+  int results;
+  EXPECT_TRUE(FAILED(int_op->GetResults(&results)));
+  EXPECT_FALSE(called_cb);
+  int_op->callback().Run(123);
+
+  EXPECT_TRUE(called_cb);
+  EXPECT_TRUE(SUCCEEDED(int_op->GetResults(&results)));
+  EXPECT_EQ(123, results);
+
+  // GetResults should be idempotent.
+  EXPECT_TRUE(SUCCEEDED(int_op->GetResults(&results)));
+  EXPECT_EQ(123, results);
+}
+
+TEST(AsyncOperationTest, TestBool) {
+  bool called_cb = false;
+
+  auto bool_op = WRL::Make<AsyncOperation<bool>>();
+  PutCallback(bool_op.Get(), &called_cb);
+
+  // AsyncOperation<bool> is an aggregate of bool and boolean, and requires a
+  // pointer to the latter to get the results.
+  boolean results;
+  EXPECT_TRUE(FAILED(bool_op->GetResults(&results)));
+  EXPECT_FALSE(called_cb);
+  bool_op->callback().Run(true);
+
+  EXPECT_TRUE(called_cb);
+  EXPECT_TRUE(SUCCEEDED(bool_op->GetResults(&results)));
+  EXPECT_TRUE(results);
+}
+
+TEST(AsyncOperationTest, TestInterface) {
+  bool called_cb = false;
+
+  auto foobar_op = WRL::Make<AsyncOperation<FooBar*>>();
+  PutCallback(foobar_op.Get(), &called_cb);
+
+  // AsyncOperation<FooBar*> is an aggregate of FooBar* and IFooBar*.
+  WRL::ComPtr<IFooBar> results;
+  EXPECT_TRUE(FAILED(foobar_op->GetResults(&results)));
+  EXPECT_FALSE(called_cb);
+
+  auto foobar = WRL::Make<FooBar>();
+  IFooBar* foobar_ptr = foobar.Get();
+  foobar_op->callback().Run(std::move(foobar));
+
+  EXPECT_TRUE(called_cb);
+  EXPECT_TRUE(SUCCEEDED(foobar_op->GetResults(&results)));
+  EXPECT_EQ(foobar_ptr, results.Get());
+}
+
+TEST(AsyncOperationTest, TestIdempotence) {
+  bool called_cb = false;
+
+  auto int_op = WRL::Make<AsyncOperation<int>>();
+  PutCallback(int_op.Get(), &called_cb);
+
+  int results;
+  EXPECT_TRUE(FAILED(int_op->GetResults(&results)));
+  EXPECT_FALSE(called_cb);
+  // Calling GetResults twice shouldn't change the result.
+  EXPECT_TRUE(FAILED(int_op->GetResults(&results)));
+  EXPECT_FALSE(called_cb);
+
+  int_op->callback().Run(42);
+
+  EXPECT_TRUE(called_cb);
+  EXPECT_TRUE(SUCCEEDED(int_op->GetResults(&results)));
+  EXPECT_EQ(42, results);
+  // Calling GetResults twice shouldn't change the result.
+  EXPECT_TRUE(SUCCEEDED(int_op->GetResults(&results)));
+  EXPECT_EQ(42, results);
+}
+
+TEST(AsyncOperationTest, DoubleCallbackFails) {
+  auto int_op = WRL::Make<AsyncOperation<int>>();
+  auto cb = int_op->callback();
+
+  // Obtaining another callback should result in a DCHECK failure.
+  EXPECT_DCHECK_DEATH(int_op->callback());
+}
+
+}  // namespace win
+}  // namespace base
diff --git a/base/win/com_init_check_hook.cc b/base/win/com_init_check_hook.cc
new file mode 100644
index 0000000..3da7622
--- /dev/null
+++ b/base/win/com_init_check_hook.cc
@@ -0,0 +1,298 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/win/com_init_check_hook.h"
+
+#include <windows.h>
+#include <objbase.h>
+#include <stdint.h>
+#include <string.h>
+
+#include "base/strings/stringprintf.h"
+#include "base/synchronization/lock.h"
+#include "base/win/com_init_util.h"
+#include "base/win/patch_util.h"
+
+namespace base {
+namespace win {
+
+#if defined(COM_INIT_CHECK_HOOK_ENABLED)
+
+namespace {
+
+// Hotpatchable Microsoft x86 32-bit functions take one of two forms:
+// Newer format:
+// RelAddr  Binary     Instruction                 Remarks
+//      -5  cc         int 3
+//      -4  cc         int 3
+//      -3  cc         int 3
+//      -2  cc         int 3
+//      -1  cc         int 3
+//       0  8bff       mov edi,edi                 Actual entry point and no-op.
+//       2  ...                                    Actual body.
+//
+// Older format:
+// RelAddr  Binary     Instruction                 Remarks
+//      -5  90         nop
+//      -4  90         nop
+//      -3  90         nop
+//      -2  90         nop
+//      -1  90         nop
+//       0  8bff       mov edi,edi                 Actual entry point and no-op.
+//       2  ...                                    Actual body.
+//
+// The "int 3" or nop sled as well as entry point no-op are critical, as they
+// are just enough to patch in a short backwards jump to -5 (2 bytes) then that
+// can do a relative 32-bit jump about 2GB before or after the current address.
+//
+// To perform a hotpatch, we need to figure out where we want to go and where
+// we are now as the final jump is relative. Let's say we want to jump to
+// 0x12345678. Relative jumps are calculated from eip, which for our jump is the
+// next instruction address. For the example above, that means we start at a 0
+// base address.
+//
+// Our patch will then look as follows:
+// RelAddr  Binary     Instruction                 Remarks
+//      -5  e978563412 jmp 0x12345678-(-0x5+0x5)   Note little-endian format.
+//       0  ebf9       jmp -0x5-(0x0+0x2)          Goes to RelAddr -0x5.
+//       2  ...                                    Actual body.
+// Note: The jmp instructions above are structured as
+//       Address(Destination)-(Address(jmp Instruction)+sizeof(jmp Instruction))
+
+// The struct below is provided for convenience and must be packed together byte
+// by byte with no word alignment padding. This comes at a very small
+// performance cost because now there are shifts handling the fields, but
+// it improves readability.
+#pragma pack(push, 1)
+struct StructuredHotpatch {
+  unsigned char jmp_32_relative = 0xe9;  // jmp relative 32-bit.
+  int32_t relative_address = 0;          // 32-bit signed operand.
+  unsigned char jmp_8_relative = 0xeb;   // jmp relative 8-bit.
+  unsigned char back_address = 0xf9;     // Operand of -7.
+};
+#pragma pack(pop)
+
+static_assert(sizeof(StructuredHotpatch) == 7,
+              "Needs to be exactly 7 bytes for the hotpatch to work.");
+
+// nop Function Padding with "mov edi,edi"
+const unsigned char g_hotpatch_placeholder_nop[] = {0x90, 0x90, 0x90, 0x90,
+                                                    0x90, 0x8b, 0xff};
+
+// int 3 Function Padding with "mov edi,edi"
+const unsigned char g_hotpatch_placeholder_int3[] = {0xcc, 0xcc, 0xcc, 0xcc,
+                                                     0xcc, 0x8b, 0xff};
+
+class HookManager {
+ public:
+  static HookManager* GetInstance() {
+    static auto* hook_manager = new HookManager();
+    return hook_manager;
+  }
+
+  void RegisterHook() {
+    AutoLock auto_lock(lock_);
+    if (init_count_ == 0)
+      WriteHook();
+
+    ++init_count_;
+  }
+
+  void UnregisterHook() {
+    AutoLock auto_lock(lock_);
+    DCHECK_NE(0U, init_count_);
+    if (init_count_ == 1)
+      RevertHook();
+
+    --init_count_;
+  }
+
+ private:
+  enum class HotpatchPlaceholderFormat {
+    // The hotpatch placeholder is currently unknown
+    UNKNOWN,
+    // The hotpatch placeholder used int 3's in the sled.
+    INT3,
+    // The hotpatch placeholder used nop's in the sled.
+    NOP,
+    // This function has already been patched by a different component.
+    EXTERNALLY_PATCHED,
+  };
+
+  HookManager() = default;
+  ~HookManager() = default;
+
+  void WriteHook() {
+    lock_.AssertAcquired();
+    DCHECK(!ole32_library_);
+    ole32_library_ = ::LoadLibrary(L"ole32.dll");
+
+    if (!ole32_library_)
+      return;
+
+    // See banner comment above why this subtracts 5 bytes.
+    co_create_instance_padded_address_ =
+        reinterpret_cast<uint32_t>(
+            GetProcAddress(ole32_library_, "CoCreateInstance")) - 5;
+
+    // See banner comment above why this adds 7 bytes.
+    original_co_create_instance_body_function_ =
+        reinterpret_cast<decltype(original_co_create_instance_body_function_)>(
+            co_create_instance_padded_address_ + 7);
+
+    HotpatchPlaceholderFormat format = GetHotpatchPlaceholderFormat(
+        reinterpret_cast<const void*>(co_create_instance_padded_address_));
+    if (format == HotpatchPlaceholderFormat::UNKNOWN) {
+      NOTREACHED() << "Unrecognized hotpatch function format: "
+                   << FirstSevenBytesToString(
+                          co_create_instance_padded_address_);
+      return;
+    } else if (format == HotpatchPlaceholderFormat::EXTERNALLY_PATCHED) {
+      hotpatch_placeholder_format_ = format;
+      NOTREACHED() << "CoCreateInstance appears to be previously patched. ("
+                   << FirstSevenBytesToString(
+                          co_create_instance_padded_address_)
+                   << ")";
+      return;
+    }
+
+    uint32_t dchecked_co_create_instance_address =
+        reinterpret_cast<uint32_t>(&HookManager::DCheckedCoCreateInstance);
+    uint32_t jmp_offset_base_address = co_create_instance_padded_address_ + 5;
+    StructuredHotpatch structured_hotpatch;
+    structured_hotpatch.relative_address =
+        dchecked_co_create_instance_address - jmp_offset_base_address;
+
+    DCHECK_EQ(hotpatch_placeholder_format_, HotpatchPlaceholderFormat::UNKNOWN);
+    DWORD patch_result = internal::ModifyCode(
+        reinterpret_cast<void*>(co_create_instance_padded_address_),
+        reinterpret_cast<void*>(&structured_hotpatch),
+        sizeof(structured_hotpatch));
+    if (patch_result == NO_ERROR)
+      hotpatch_placeholder_format_ = format;
+  }
+
+  void RevertHook() {
+    lock_.AssertAcquired();
+    switch (hotpatch_placeholder_format_) {
+      case HotpatchPlaceholderFormat::INT3:
+        internal::ModifyCode(
+            reinterpret_cast<void*>(co_create_instance_padded_address_),
+            reinterpret_cast<const void*>(&g_hotpatch_placeholder_int3),
+            sizeof(g_hotpatch_placeholder_int3));
+        break;
+      case HotpatchPlaceholderFormat::NOP:
+        internal::ModifyCode(
+            reinterpret_cast<void*>(co_create_instance_padded_address_),
+            reinterpret_cast<const void*>(&g_hotpatch_placeholder_nop),
+            sizeof(g_hotpatch_placeholder_nop));
+        break;
+      case HotpatchPlaceholderFormat::EXTERNALLY_PATCHED:
+      case HotpatchPlaceholderFormat::UNKNOWN:
+        break;
+    }
+
+    hotpatch_placeholder_format_ = HotpatchPlaceholderFormat::UNKNOWN;
+
+    if (ole32_library_) {
+      ::FreeLibrary(ole32_library_);
+      ole32_library_ = nullptr;
+    }
+
+    co_create_instance_padded_address_ = 0;
+    original_co_create_instance_body_function_ = nullptr;
+  }
+
+  HotpatchPlaceholderFormat GetHotpatchPlaceholderFormat(const void* address) {
+    if (::memcmp(reinterpret_cast<void*>(co_create_instance_padded_address_),
+                 reinterpret_cast<const void*>(&g_hotpatch_placeholder_int3),
+                 sizeof(g_hotpatch_placeholder_int3)) == 0) {
+      return HotpatchPlaceholderFormat::INT3;
+    }
+
+    if (::memcmp(reinterpret_cast<void*>(co_create_instance_padded_address_),
+                 reinterpret_cast<const void*>(&g_hotpatch_placeholder_nop),
+                 sizeof(g_hotpatch_placeholder_nop)) == 0) {
+      return HotpatchPlaceholderFormat::NOP;
+    }
+
+    const unsigned char* instruction_bytes =
+        reinterpret_cast<const unsigned char*>(
+            co_create_instance_padded_address_);
+    const unsigned char entry_point_byte = instruction_bytes[5];
+    // Check for all of the common jmp opcodes.
+    if (entry_point_byte == 0xeb || entry_point_byte == 0xe9 ||
+        entry_point_byte == 0xff || entry_point_byte == 0xea) {
+      return HotpatchPlaceholderFormat::EXTERNALLY_PATCHED;
+    }
+
+    return HotpatchPlaceholderFormat::UNKNOWN;
+  }
+
+  static HRESULT __stdcall DCheckedCoCreateInstance(const CLSID& rclsid,
+                                                    IUnknown* pUnkOuter,
+                                                    DWORD dwClsContext,
+                                                    REFIID riid,
+                                                    void** ppv) {
+    // Chromium COM callers need to make sure that their thread is configured to
+    // process COM objects to avoid creating an implicit MTA or silently failing
+    // STA object creation call due to the SUCCEEDED() pattern for COM calls.
+    //
+    // If you hit this assert as part of migrating to the Task Scheduler,
+    // evaluate your threading guarantees and dispatch your work with
+    // base::CreateCOMSTATaskRunnerWithTraits().
+    //
+    // If you need MTA support, ping //base/task_scheduler/OWNERS.
+    AssertComInitialized(
+        "CoCreateInstance calls in Chromium require explicit COM "
+        "initialization via base::CreateCOMSTATaskRunnerWithTraits() or "
+        "ScopedCOMInitializer. See the comment in DCheckedCoCreateInstance for "
+        "more details.");
+    return original_co_create_instance_body_function_(rclsid, pUnkOuter,
+                                                      dwClsContext, riid, ppv);
+  }
+
+  // Returns the first 7 bytes in hex as a string at |address|.
+  static std::string FirstSevenBytesToString(uint32_t address) {
+    const unsigned char* bytes =
+        reinterpret_cast<const unsigned char*>(address);
+    return base::StringPrintf("%02x %02x %02x %02x %02x %02x %02x", bytes[0],
+                              bytes[1], bytes[2], bytes[3], bytes[4], bytes[5],
+                              bytes[6]);
+  }
+
+  // Synchronizes everything in this class.
+  base::Lock lock_;
+  size_t init_count_ = 0;
+  HMODULE ole32_library_ = nullptr;
+  uint32_t co_create_instance_padded_address_ = 0;
+  HotpatchPlaceholderFormat hotpatch_placeholder_format_ =
+      HotpatchPlaceholderFormat::UNKNOWN;
+  static decltype(
+      ::CoCreateInstance)* original_co_create_instance_body_function_;
+
+  DISALLOW_COPY_AND_ASSIGN(HookManager);
+};
+
+decltype(::CoCreateInstance)*
+    HookManager::original_co_create_instance_body_function_ = nullptr;
+
+}  // namespace
+
+#endif  // defined(COM_INIT_CHECK_HOOK_ENABLED)
+
+ComInitCheckHook::ComInitCheckHook() {
+#if defined(COM_INIT_CHECK_HOOK_ENABLED)
+  HookManager::GetInstance()->RegisterHook();
+#endif  // defined(COM_INIT_CHECK_HOOK_ENABLED)
+}
+
+ComInitCheckHook::~ComInitCheckHook() {
+#if defined(COM_INIT_CHECK_HOOK_ENABLED)
+  HookManager::GetInstance()->UnregisterHook();
+#endif  // defined(COM_INIT_CHECK_HOOK_ENABLED)
+}
+
+}  // namespace win
+}  // namespace base
diff --git a/base/win/com_init_check_hook.h b/base/win/com_init_check_hook.h
new file mode 100644
index 0000000..c998233
--- /dev/null
+++ b/base/win/com_init_check_hook.h
@@ -0,0 +1,43 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_WIN_COM_INIT_CHECK_HOOK_H_
+#define BASE_WIN_COM_INIT_CHECK_HOOK_H_
+
+#include "base/base_export.h"
+#include "base/logging.h"
+#include "base/macros.h"
+#include "build/build_config.h"
+
+namespace base {
+namespace win {
+
+// Hotpatching is only supported in Intel 32-bit x86 processors because Windows
+// binaries contain a convenient 2 byte hotpatch noop. This doesn't exist in
+// 64-bit binaries.
+
+#if DCHECK_IS_ON() && defined(ARCH_CPU_X86_FAMILY) &&             \
+    defined(ARCH_CPU_32_BITS) && !defined(GOOGLE_CHROME_BUILD) && \
+    !defined(OFFICIAL_BUILD) &&                                   \
+    !defined(COM_INIT_CHECK_HOOK_DISABLED)  // See crbug/737090 for details.
+#define COM_INIT_CHECK_HOOK_ENABLED
+#endif
+
+// Manages the installation of consistency DCHECK hooks of COM APIs that require
+// COM to be initialized and only works if COM_INIT_CHECK_HOOK_ENABLED is
+// defined. Care should be taken if this is instantiated with multiple threads
+// running as the hotpatch does not apply atomically.
+class BASE_EXPORT ComInitCheckHook {
+ public:
+  ComInitCheckHook();
+  ~ComInitCheckHook();
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(ComInitCheckHook);
+};
+
+}  // namespace win
+}  // namespace base
+
+#endif  // BASE_WIN_COM_INIT_CHECK_HOOK_H_
diff --git a/base/win/com_init_check_hook_unittest.cc b/base/win/com_init_check_hook_unittest.cc
new file mode 100644
index 0000000..32aede4
--- /dev/null
+++ b/base/win/com_init_check_hook_unittest.cc
@@ -0,0 +1,134 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/win/com_init_check_hook.h"
+
+#include <objbase.h>
+#include <shlobj.h>
+#include <wrl/client.h>
+
+#include "base/test/gtest_util.h"
+#include "base/win/com_init_util.h"
+#include "base/win/patch_util.h"
+#include "base/win/scoped_com_initializer.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+namespace win {
+
+using Microsoft::WRL::ComPtr;
+
+TEST(ComInitCheckHook, AssertNotInitialized) {
+  ComInitCheckHook com_check_hook;
+  AssertComApartmentType(ComApartmentType::NONE);
+  ComPtr<IUnknown> shell_link;
+#if defined(COM_INIT_CHECK_HOOK_ENABLED)
+  EXPECT_DCHECK_DEATH(::CoCreateInstance(CLSID_ShellLink, nullptr, CLSCTX_ALL,
+                                         IID_PPV_ARGS(&shell_link)));
+#else
+  EXPECT_EQ(CO_E_NOTINITIALIZED,
+            ::CoCreateInstance(CLSID_ShellLink, nullptr, CLSCTX_ALL,
+                               IID_PPV_ARGS(&shell_link)));
+#endif
+}
+
+TEST(ComInitCheckHook, HookRemoval) {
+  AssertComApartmentType(ComApartmentType::NONE);
+  { ComInitCheckHook com_check_hook; }
+  ComPtr<IUnknown> shell_link;
+  EXPECT_EQ(CO_E_NOTINITIALIZED,
+            ::CoCreateInstance(CLSID_ShellLink, nullptr, CLSCTX_ALL,
+                               IID_PPV_ARGS(&shell_link)));
+}
+
+TEST(ComInitCheckHook, NoAssertComInitialized) {
+  ComInitCheckHook com_check_hook;
+  ScopedCOMInitializer com_initializer;
+  ComPtr<IUnknown> shell_link;
+  EXPECT_TRUE(SUCCEEDED(::CoCreateInstance(CLSID_ShellLink, nullptr, CLSCTX_ALL,
+                                           IID_PPV_ARGS(&shell_link))));
+}
+
+TEST(ComInitCheckHook, MultipleHooks) {
+  ComInitCheckHook com_check_hook_1;
+  ComInitCheckHook com_check_hook_2;
+  AssertComApartmentType(ComApartmentType::NONE);
+  ComPtr<IUnknown> shell_link;
+#if defined(COM_INIT_CHECK_HOOK_ENABLED)
+  EXPECT_DCHECK_DEATH(::CoCreateInstance(CLSID_ShellLink, nullptr, CLSCTX_ALL,
+                                         IID_PPV_ARGS(&shell_link)));
+#else
+  EXPECT_EQ(CO_E_NOTINITIALIZED,
+            ::CoCreateInstance(CLSID_ShellLink, nullptr, CLSCTX_ALL,
+                               IID_PPV_ARGS(&shell_link)));
+#endif
+}
+
+TEST(ComInitCheckHook, UnexpectedHook) {
+#if defined(COM_INIT_CHECK_HOOK_ENABLED)
+  HMODULE ole32_library = ::LoadLibrary(L"ole32.dll");
+  ASSERT_TRUE(ole32_library);
+
+  uint32_t co_create_instance_padded_address =
+      reinterpret_cast<uint32_t>(
+          GetProcAddress(ole32_library, "CoCreateInstance")) - 5;
+  const unsigned char* co_create_instance_bytes =
+      reinterpret_cast<const unsigned char*>(co_create_instance_padded_address);
+  const unsigned char original_byte = co_create_instance_bytes[0];
+  const unsigned char unexpected_byte = 0xdb;
+  ASSERT_EQ(static_cast<DWORD>(NO_ERROR),
+            internal::ModifyCode(
+                reinterpret_cast<void*>(co_create_instance_padded_address),
+                reinterpret_cast<const void*>(&unexpected_byte),
+                sizeof(unexpected_byte)));
+
+  EXPECT_DCHECK_DEATH({ ComInitCheckHook com_check_hook; });
+
+  // If this call fails, really bad things are going to happen to other tests
+  // so CHECK here.
+  CHECK_EQ(static_cast<DWORD>(NO_ERROR),
+           internal::ModifyCode(
+               reinterpret_cast<void*>(co_create_instance_padded_address),
+               reinterpret_cast<const void*>(&original_byte),
+               sizeof(original_byte)));
+
+  ::FreeLibrary(ole32_library);
+  ole32_library = nullptr;
+#endif
+}
+
+TEST(ComInitCheckHook, ExternallyHooked) {
+#if defined(COM_INIT_CHECK_HOOK_ENABLED)
+  HMODULE ole32_library = ::LoadLibrary(L"ole32.dll");
+  ASSERT_TRUE(ole32_library);
+
+  uint32_t co_create_instance_address = reinterpret_cast<uint32_t>(
+      GetProcAddress(ole32_library, "CoCreateInstance"));
+  const unsigned char* co_create_instance_bytes =
+      reinterpret_cast<const unsigned char*>(co_create_instance_address);
+  const unsigned char original_byte = co_create_instance_bytes[0];
+  const unsigned char jmp_byte = 0xe9;
+  ASSERT_EQ(static_cast<DWORD>(NO_ERROR),
+            internal::ModifyCode(
+                reinterpret_cast<void*>(co_create_instance_address),
+                reinterpret_cast<const void*>(&jmp_byte), sizeof(jmp_byte)));
+
+  // Externally patched instances should crash so we catch these cases on bots.
+  EXPECT_DCHECK_DEATH({ ComInitCheckHook com_check_hook; });
+
+  // If this call fails, really bad things are going to happen to other tests
+  // so CHECK here.
+  CHECK_EQ(
+      static_cast<DWORD>(NO_ERROR),
+      internal::ModifyCode(reinterpret_cast<void*>(co_create_instance_address),
+                           reinterpret_cast<const void*>(&original_byte),
+                           sizeof(original_byte)));
+
+  ::FreeLibrary(ole32_library);
+  ole32_library = nullptr;
+#endif
+}
+
+}  // namespace win
+}  // namespace base
diff --git a/base/win/com_init_util.cc b/base/win/com_init_util.cc
new file mode 100644
index 0000000..d81f420
--- /dev/null
+++ b/base/win/com_init_util.cc
@@ -0,0 +1,81 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/win/com_init_util.h"
+
+#include <windows.h>
+#include <winternl.h>
+
+namespace base {
+namespace win {
+
+#if DCHECK_IS_ON()
+
+namespace {
+
+const char kComNotInitialized[] = "COM is not initialized on this thread.";
+
+// Derived from combase.dll.
+struct OleTlsData {
+  enum ApartmentFlags {
+    LOGICAL_THREAD_REGISTERED = 0x2,
+    STA = 0x80,
+    MTA = 0x140,
+  };
+
+  void* thread_base;
+  void* sm_allocator;
+  DWORD apartment_id;
+  DWORD apartment_flags;
+  // There are many more fields than this, but for our purposes, we only care
+  // about |apartment_flags|. Correctly declaring the previous types allows this
+  // to work between x86 and x64 builds.
+};
+
+OleTlsData* GetOleTlsData() {
+  TEB* teb = NtCurrentTeb();
+  return reinterpret_cast<OleTlsData*>(teb->ReservedForOle);
+}
+
+ComApartmentType GetComApartmentTypeForThread() {
+  OleTlsData* ole_tls_data = GetOleTlsData();
+  if (!ole_tls_data)
+    return ComApartmentType::NONE;
+
+  if (ole_tls_data->apartment_flags & OleTlsData::ApartmentFlags::STA)
+    return ComApartmentType::STA;
+
+  if ((ole_tls_data->apartment_flags & OleTlsData::ApartmentFlags::MTA) ==
+      OleTlsData::ApartmentFlags::MTA) {
+    return ComApartmentType::MTA;
+  }
+
+  return ComApartmentType::NONE;
+}
+
+}  // namespace
+
+void AssertComInitialized(const char* message) {
+  if (GetComApartmentTypeForThread() != ComApartmentType::NONE)
+    return;
+
+  // COM worker threads don't always set up the apartment, but they do perform
+  // some thread registration, so we allow those.
+  OleTlsData* ole_tls_data = GetOleTlsData();
+  if (ole_tls_data && (ole_tls_data->apartment_flags &
+                       OleTlsData::ApartmentFlags::LOGICAL_THREAD_REGISTERED)) {
+    return;
+  }
+
+  NOTREACHED() << (message ? message : kComNotInitialized);
+}
+
+void AssertComApartmentType(ComApartmentType apartment_type) {
+  DCHECK_EQ(apartment_type, GetComApartmentTypeForThread());
+}
+
+#endif  // DCHECK_IS_ON()
+
+}  // namespace win
+}  // namespace base
diff --git a/base/win/com_init_util.h b/base/win/com_init_util.h
new file mode 100644
index 0000000..be5a1b4
--- /dev/null
+++ b/base/win/com_init_util.h
@@ -0,0 +1,41 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_WIN_COM_INIT_UTIL_H_
+#define BASE_WIN_COM_INIT_UTIL_H_
+
+#include "base/base_export.h"
+#include "base/logging.h"
+
+namespace base {
+namespace win {
+
+enum class ComApartmentType {
+  // Uninitialized or has an unrecognized apartment type.
+  NONE,
+  // Single-threaded Apartment.
+  STA,
+  // Multi-threaded Apartment.
+  MTA,
+};
+
+#if DCHECK_IS_ON()
+
+// DCHECKs if COM is not initialized on this thread as an STA or MTA.
+// |message| is optional and is used for the DCHECK if specified.
+BASE_EXPORT void AssertComInitialized(const char* message = nullptr);
+
+// DCHECKs if |apartment_type| is not the same as the current thread's apartment
+// type.
+BASE_EXPORT void AssertComApartmentType(ComApartmentType apartment_type);
+
+#else   // DCHECK_IS_ON()
+inline void AssertComInitialized() {}
+inline void AssertComApartmentType(ComApartmentType apartment_type) {}
+#endif  // DCHECK_IS_ON()
+
+}  // namespace win
+}  // namespace base
+
+#endif  // BASE_WIN_COM_INIT_UTIL_H_
diff --git a/base/win/com_init_util_unittest.cc b/base/win/com_init_util_unittest.cc
new file mode 100644
index 0000000..fb897dd
--- /dev/null
+++ b/base/win/com_init_util_unittest.cc
@@ -0,0 +1,75 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/win/com_init_util.h"
+
+#include "base/test/gtest_util.h"
+#include "base/win/scoped_com_initializer.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+namespace win {
+
+TEST(ComInitUtil, AssertNotInitialized) {
+  EXPECT_DCHECK_DEATH(AssertComInitialized());
+}
+
+TEST(ComInitUtil, AssertUninitialized) {
+  // When COM is uninitialized, the TLS data will remain, but the apartment
+  // status will be updated. This covers that case.
+  {
+    ScopedCOMInitializer com_initializer;
+    ASSERT_TRUE(com_initializer.Succeeded());
+  }
+  EXPECT_DCHECK_DEATH(AssertComInitialized());
+}
+
+TEST(ComInitUtil, AssertSTAInitialized) {
+  ScopedCOMInitializer com_initializer;
+  ASSERT_TRUE(com_initializer.Succeeded());
+
+  AssertComInitialized();
+}
+
+TEST(ComInitUtil, AssertMTAInitialized) {
+  ScopedCOMInitializer com_initializer(ScopedCOMInitializer::kMTA);
+  ASSERT_TRUE(com_initializer.Succeeded());
+
+  AssertComInitialized();
+}
+
+TEST(ComInitUtil, AssertNoneApartmentType) {
+  AssertComApartmentType(ComApartmentType::NONE);
+  EXPECT_DCHECK_DEATH(AssertComApartmentType(ComApartmentType::STA));
+  EXPECT_DCHECK_DEATH(AssertComApartmentType(ComApartmentType::MTA));
+}
+
+TEST(ComInitUtil, AssertNoneApartmentTypeUninitialized) {
+  // When COM is uninitialized, the TLS data will remain, but the apartment
+  // status will be updated. This covers that case.
+  {
+    ScopedCOMInitializer com_initializer;
+    ASSERT_TRUE(com_initializer.Succeeded());
+  }
+  AssertComApartmentType(ComApartmentType::NONE);
+  EXPECT_DCHECK_DEATH(AssertComApartmentType(ComApartmentType::STA));
+  EXPECT_DCHECK_DEATH(AssertComApartmentType(ComApartmentType::MTA));
+}
+
+TEST(ComInitUtil, AssertSTAApartmentType) {
+  ScopedCOMInitializer com_initializer;
+  EXPECT_DCHECK_DEATH(AssertComApartmentType(ComApartmentType::NONE));
+  AssertComApartmentType(ComApartmentType::STA);
+  EXPECT_DCHECK_DEATH(AssertComApartmentType(ComApartmentType::MTA));
+}
+
+TEST(ComInitUtil, AssertMTAApartmentType) {
+  ScopedCOMInitializer com_initializer(ScopedCOMInitializer::kMTA);
+  EXPECT_DCHECK_DEATH(AssertComApartmentType(ComApartmentType::NONE));
+  EXPECT_DCHECK_DEATH(AssertComApartmentType(ComApartmentType::STA));
+  AssertComApartmentType(ComApartmentType::MTA);
+}
+
+}  // namespace win
+}  // namespace base
diff --git a/base/win/core_winrt_util.cc b/base/win/core_winrt_util.cc
new file mode 100644
index 0000000..7a30490
--- /dev/null
+++ b/base/win/core_winrt_util.cc
@@ -0,0 +1,83 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/win/core_winrt_util.h"
+
+namespace {
+
+FARPROC LoadComBaseFunction(const char* function_name) {
+  static HMODULE const handle = ::LoadLibrary(L"combase.dll");
+  return handle ? ::GetProcAddress(handle, function_name) : nullptr;
+}
+
+decltype(&::RoInitialize) GetRoInitializeFunction() {
+  static decltype(&::RoInitialize) const function =
+      reinterpret_cast<decltype(&::RoInitialize)>(
+          LoadComBaseFunction("RoInitialize"));
+  return function;
+}
+
+decltype(&::RoUninitialize) GetRoUninitializeFunction() {
+  static decltype(&::RoUninitialize) const function =
+      reinterpret_cast<decltype(&::RoUninitialize)>(
+          LoadComBaseFunction("RoUninitialize"));
+  return function;
+}
+
+decltype(&::RoActivateInstance) GetRoActivateInstanceFunction() {
+  static decltype(&::RoActivateInstance) const function =
+      reinterpret_cast<decltype(&::RoActivateInstance)>(
+          LoadComBaseFunction("RoActivateInstance"));
+  return function;
+}
+
+decltype(&::RoGetActivationFactory) GetRoGetActivationFactoryFunction() {
+  static decltype(&::RoGetActivationFactory) const function =
+      reinterpret_cast<decltype(&::RoGetActivationFactory)>(
+          LoadComBaseFunction("RoGetActivationFactory"));
+  return function;
+}
+
+}  // namespace
+
+namespace base {
+namespace win {
+
+bool ResolveCoreWinRTDelayload() {
+  // TODO(finnur): Add AssertIOAllowed once crbug.com/770193 is fixed.
+  return GetRoInitializeFunction() && GetRoUninitializeFunction() &&
+         GetRoActivateInstanceFunction() && GetRoGetActivationFactoryFunction();
+}
+
+HRESULT RoInitialize(RO_INIT_TYPE init_type) {
+  auto ro_initialize_func = GetRoInitializeFunction();
+  if (!ro_initialize_func)
+    return E_FAIL;
+  return ro_initialize_func(init_type);
+}
+
+void RoUninitialize() {
+  auto ro_uninitialize_func = GetRoUninitializeFunction();
+  if (ro_uninitialize_func)
+    ro_uninitialize_func();
+}
+
+HRESULT RoGetActivationFactory(HSTRING class_id,
+                               const IID& iid,
+                               void** out_factory) {
+  auto get_factory_func = GetRoGetActivationFactoryFunction();
+  if (!get_factory_func)
+    return E_FAIL;
+  return get_factory_func(class_id, iid, out_factory);
+}
+
+HRESULT RoActivateInstance(HSTRING class_id, IInspectable** instance) {
+  auto activate_instance_func = GetRoActivateInstanceFunction();
+  if (!activate_instance_func)
+    return E_FAIL;
+  return activate_instance_func(class_id, instance);
+}
+
+}  // namespace win
+}  // namespace base
diff --git a/base/win/core_winrt_util.h b/base/win/core_winrt_util.h
new file mode 100644
index 0000000..c86aed6
--- /dev/null
+++ b/base/win/core_winrt_util.h
@@ -0,0 +1,54 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_WIN_CORE_WINRT_UTIL_H_
+#define BASE_WIN_CORE_WINRT_UTIL_H_
+
+#include <hstring.h>
+#include <inspectable.h>
+#include <roapi.h>
+#include <windef.h>
+
+#include "base/base_export.h"
+#include "base/strings/string16.h"
+#include "base/win/scoped_hstring.h"
+
+namespace base {
+namespace win {
+
+// Provides access to Core WinRT functions which may not be available on
+// Windows 7. Loads functions dynamically at runtime to prevent library
+// dependencies.
+
+BASE_EXPORT bool ResolveCoreWinRTDelayload();
+
+// The following stubs are provided for when component build is enabled, in
+// order to avoid the propagation of delay-loading CoreWinRT to other modules.
+
+BASE_EXPORT HRESULT RoInitialize(RO_INIT_TYPE init_type);
+
+BASE_EXPORT void RoUninitialize();
+
+BASE_EXPORT HRESULT RoGetActivationFactory(HSTRING class_id,
+                                           const IID& iid,
+                                           void** out_factory);
+
+BASE_EXPORT HRESULT RoActivateInstance(HSTRING class_id,
+                                       IInspectable** instance);
+
+// Retrieves an activation factory for the type specified.
+template <typename InterfaceType, char16 const* runtime_class_id>
+HRESULT GetActivationFactory(InterfaceType** factory) {
+  ScopedHString class_id_hstring = ScopedHString::Create(runtime_class_id);
+  if (!class_id_hstring.is_valid())
+    return E_FAIL;
+
+  return base::win::RoGetActivationFactory(class_id_hstring.get(),
+                                           IID_PPV_ARGS(factory));
+}
+
+}  // namespace win
+}  // namespace base
+
+#endif  // BASE_WIN_CORE_WINRT_UTIL_H_
diff --git a/base/win/core_winrt_util_unittest.cc b/base/win/core_winrt_util_unittest.cc
new file mode 100644
index 0000000..11d08b8
--- /dev/null
+++ b/base/win/core_winrt_util_unittest.cc
@@ -0,0 +1,34 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/win/core_winrt_util.h"
+
+#include "base/win/com_init_util.h"
+#include "base/win/scoped_com_initializer.h"
+#include "base/win/windows_version.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+namespace win {
+
+TEST(CoreWinrtUtilTest, PreloadFunctions) {
+  if (GetVersion() < VERSION_WIN8)
+    EXPECT_FALSE(ResolveCoreWinRTDelayload());
+  else
+    EXPECT_TRUE(ResolveCoreWinRTDelayload());
+}
+
+TEST(CoreWinrtUtilTest, RoInitializeAndUninitialize) {
+  if (GetVersion() < VERSION_WIN8)
+    return;
+
+  ASSERT_TRUE(ResolveCoreWinRTDelayload());
+  ASSERT_HRESULT_SUCCEEDED(base::win::RoInitialize(RO_INIT_MULTITHREADED));
+  AssertComApartmentType(ComApartmentType::MTA);
+  base::win::RoUninitialize();
+  AssertComApartmentType(ComApartmentType::NONE);
+}
+
+}  // namespace win
+}  // namespace base
diff --git a/base/win/current_module.h b/base/win/current_module.h
new file mode 100644
index 0000000..ee141db
--- /dev/null
+++ b/base/win/current_module.h
@@ -0,0 +1,17 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_WIN_CURRENT_MODULE_H_
+#define BASE_WIN_CURRENT_MODULE_H_
+
+#include <windows.h>
+
+// http://blogs.msdn.com/oldnewthing/archive/2004/10/25/247180.aspx
+extern "C" IMAGE_DOS_HEADER __ImageBase;
+
+// Returns the HMODULE of the dll the macro was expanded in.
+// Only use in cc files, not in h files.
+#define CURRENT_MODULE() reinterpret_cast<HMODULE>(&__ImageBase)
+
+#endif  // BASE_WIN_CURRENT_MODULE_H_
diff --git a/base/win/dllmain.cc b/base/win/dllmain.cc
new file mode 100644
index 0000000..907c7f4
--- /dev/null
+++ b/base/win/dllmain.cc
@@ -0,0 +1,123 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Windows doesn't support pthread_key_create's destr_function, and in fact
+// it's a bit tricky to get code to run when a thread exits.  This is
+// cargo-cult magic from http://www.codeproject.com/threads/tls.asp.
+// We are trying to be compatible with both a LoadLibrary style invocation, as
+// well as static linking. This code only needs to be included if we use
+// LoadLibrary, but it hooks into the "standard" set of TLS callbacks that are
+// provided for static linking.
+
+// This code is deliberately written to match the style of calls seen in
+// base/threading/thread_local_storage_win.cc.  Please keep the two in sync if
+// coding conventions are changed.
+
+// WARNING: Do *NOT* try to include this in the construction of the base
+// library, even though it potentially drives code in
+// base/threading/thread_local_storage_win.cc.  If you do, some users will end
+// up getting duplicate definition of DllMain() in some of their later links.
+
+// Force a reference to _tls_used to make the linker create the TLS directory
+// if it's not already there (that is, even if __declspec(thread) is not used).
+// Force a reference to p_thread_callback_dllmain_typical_entry to prevent whole
+// program optimization from discarding the variables.
+
+#include <windows.h>
+
+#include "base/compiler_specific.h"
+#include "base/win/win_util.h"
+
+// Indicate if another service is scanning the callbacks.  When this becomes
+// set to true, then DllMain() will stop supporting the callback service. This
+// value is set to true the first time any of our callbacks are called, as that
+// shows that some other service is handling callbacks.
+static bool linker_notifications_are_active = false;
+
+// This will be our mostly no-op callback that we'll list.  We won't
+// deliberately call it, and if it is called, that means we don't need to do any
+// of the callbacks anymore.  We expect such a call to arrive via a
+// THREAD_ATTACH message, long before we'd have to perform our THREAD_DETACH
+// callbacks.
+static void NTAPI on_callback(PVOID h, DWORD reason, PVOID reserved);
+
+#ifdef _WIN64
+
+#pragma comment(linker, "/INCLUDE:_tls_used")
+#pragma comment(linker, "/INCLUDE:p_thread_callback_dllmain_typical_entry")
+
+#else  // _WIN64
+
+#pragma comment(linker, "/INCLUDE:__tls_used")
+#pragma comment(linker, "/INCLUDE:_p_thread_callback_dllmain_typical_entry")
+
+#endif  // _WIN64
+
+// Explicitly depend on VC\crt\src\tlssup.c variables
+// to bracket the list of TLS callbacks.
+extern "C" PIMAGE_TLS_CALLBACK __xl_a, __xl_z;
+
+// extern "C" suppresses C++ name mangling so we know the symbol names for the
+// linker /INCLUDE:symbol pragmas above.
+extern "C" {
+#ifdef _WIN64
+
+// .CRT section is merged with .rdata on x64 so it must be constant data.
+#pragma data_seg(push, old_seg)
+// Use a typical possible name in the .CRT$XL? list of segments.
+#pragma const_seg(".CRT$XLB")
+// When defining a const variable, it must have external linkage to be sure the
+// linker doesn't discard it.
+extern const PIMAGE_TLS_CALLBACK p_thread_callback_dllmain_typical_entry;
+const PIMAGE_TLS_CALLBACK p_thread_callback_dllmain_typical_entry = on_callback;
+#pragma data_seg(pop, old_seg)
+
+#else  // _WIN64
+
+#pragma data_seg(push, old_seg)
+// Use a typical possible name in the .CRT$XL? list of segments.
+#pragma data_seg(".CRT$XLB")
+PIMAGE_TLS_CALLBACK p_thread_callback_dllmain_typical_entry = on_callback;
+#pragma data_seg(pop, old_seg)
+
+#endif  // _WIN64
+}  // extern "C"
+
+// Custom crash code to get a unique entry in crash reports.
+NOINLINE static void CrashOnProcessDetach() {
+  *static_cast<volatile int*>(0) = 0x356;
+}
+
+// Make DllMain call the listed callbacks.  This way any third parties that are
+// linked in will also be called.
+BOOL WINAPI DllMain(PVOID h, DWORD reason, PVOID reserved) {
+  if (DLL_PROCESS_DETACH == reason && base::win::ShouldCrashOnProcessDetach())
+    CrashOnProcessDetach();
+
+  if (DLL_THREAD_DETACH != reason && DLL_PROCESS_DETACH != reason)
+    return true;  // We won't service THREAD_ATTACH calls.
+
+  if (linker_notifications_are_active)
+    return true;  // Some other service is doing this work.
+
+  for (PIMAGE_TLS_CALLBACK* it = &__xl_a; it < &__xl_z; ++it) {
+    if (*it == NULL || *it == on_callback)
+      continue;  // Don't bother to call our own callback.
+    (*it)(h, reason, reserved);
+  }
+  return true;
+}
+
+static void NTAPI on_callback(PVOID h, DWORD reason, PVOID reserved) {
+  // Do nothing.  We were just a place holder in the list used to test that we
+  // call all items.
+  // If we are called, it means that some other system is scanning the callbacks
+  // and we don't need to do so in DllMain().
+  linker_notifications_are_active = true;
+  // Note: If some other routine some how plays this same game... we could both
+  // decide not to do the scanning <sigh>, but this trick should suppress
+  // duplicate calls on Vista, where the runtime takes care of the callbacks,
+  // and allow us to do the callbacks on XP, where we are currently devoid of
+  // callbacks (due to an explicit LoadLibrary call).
+}
diff --git a/base/win/enum_variant.cc b/base/win/enum_variant.cc
new file mode 100644
index 0000000..2975560
--- /dev/null
+++ b/base/win/enum_variant.cc
@@ -0,0 +1,83 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/win/enum_variant.h"
+
+#include <algorithm>
+
+#include "base/logging.h"
+
+namespace base {
+namespace win {
+
+EnumVariant::EnumVariant(unsigned long count)
+    : items_(new VARIANT[count]),
+      count_(count),
+      current_index_(0) {
+}
+
+EnumVariant::~EnumVariant() {
+}
+
+VARIANT* EnumVariant::ItemAt(unsigned long index) {
+  DCHECK(index < count_);
+  return &items_[index];
+}
+
+ULONG STDMETHODCALLTYPE EnumVariant::AddRef() {
+  return IUnknownImpl::AddRef();
+}
+
+ULONG STDMETHODCALLTYPE EnumVariant::Release() {
+  return IUnknownImpl::Release();
+}
+
+STDMETHODIMP EnumVariant::QueryInterface(REFIID riid, void** ppv) {
+  if (riid == IID_IEnumVARIANT) {
+    *ppv = static_cast<IEnumVARIANT*>(this);
+    AddRef();
+    return S_OK;
+  }
+
+  return IUnknownImpl::QueryInterface(riid, ppv);
+}
+
+STDMETHODIMP EnumVariant::Next(ULONG requested_count,
+                               VARIANT* out_elements,
+                               ULONG* out_elements_received) {
+  unsigned long count = std::min(requested_count, count_ - current_index_);
+  for (unsigned long i = 0; i < count; ++i)
+    out_elements[i] = items_[current_index_ + i];
+  current_index_ += count;
+  *out_elements_received = count;
+
+  return (count == requested_count ? S_OK : S_FALSE);
+}
+
+STDMETHODIMP EnumVariant::Skip(ULONG skip_count) {
+  unsigned long count = skip_count;
+  if (current_index_ + count > count_)
+    count = count_ - current_index_;
+
+  current_index_ += count;
+  return (count == skip_count ? S_OK : S_FALSE);
+}
+
+STDMETHODIMP EnumVariant::Reset() {
+  current_index_ = 0;
+  return S_OK;
+}
+
+STDMETHODIMP EnumVariant::Clone(IEnumVARIANT** out_cloned_object) {
+  EnumVariant* other = new EnumVariant(count_);
+  if (count_ > 0)
+    memcpy(other->ItemAt(0), &items_[0], count_ * sizeof(VARIANT));
+  other->Skip(current_index_);
+  other->AddRef();
+  *out_cloned_object = other;
+  return S_OK;
+}
+
+}  // namespace win
+}  // namespace base
diff --git a/base/win/enum_variant.h b/base/win/enum_variant.h
new file mode 100644
index 0000000..e27afcd
--- /dev/null
+++ b/base/win/enum_variant.h
@@ -0,0 +1,53 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_WIN_ENUM_VARIANT_H_
+#define BASE_WIN_ENUM_VARIANT_H_
+
+#include <unknwn.h>
+
+#include <memory>
+
+#include "base/win/iunknown_impl.h"
+
+namespace base {
+namespace win {
+
+// A simple implementation of IEnumVARIANT.
+class BASE_EXPORT EnumVariant
+  : public IEnumVARIANT,
+    public IUnknownImpl {
+ public:
+  // The constructor allocates an array of size |count|. Then use
+  // ItemAt to set the value of each item in the array to initialize it.
+  explicit EnumVariant(unsigned long count);
+
+  // Returns a mutable pointer to the item at position |index|.
+  VARIANT* ItemAt(unsigned long index);
+
+  // IUnknown.
+  ULONG STDMETHODCALLTYPE AddRef() override;
+  ULONG STDMETHODCALLTYPE Release() override;
+  STDMETHODIMP QueryInterface(REFIID riid, void** ppv) override;
+
+  // IEnumVARIANT.
+  STDMETHODIMP Next(ULONG requested_count,
+                    VARIANT* out_elements,
+                    ULONG* out_elements_received) override;
+  STDMETHODIMP Skip(ULONG skip_count) override;
+  STDMETHODIMP Reset() override;
+  STDMETHODIMP Clone(IEnumVARIANT** out_cloned_object) override;
+
+ private:
+  ~EnumVariant() override;
+
+  std::unique_ptr<VARIANT[]> items_;
+  unsigned long count_;
+  unsigned long current_index_;
+};
+
+}  // namespace win
+}  // namespace base
+
+#endif  // BASE_WIN_ENUM_VARIANT_H_
diff --git a/base/win/enum_variant_unittest.cc b/base/win/enum_variant_unittest.cc
new file mode 100644
index 0000000..288c97e
--- /dev/null
+++ b/base/win/enum_variant_unittest.cc
@@ -0,0 +1,118 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/win/enum_variant.h"
+
+#include "base/win/scoped_com_initializer.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+namespace win {
+
+TEST(EnumVariantTest, EmptyEnumVariant) {
+  ScopedCOMInitializer com_initializer;
+
+  EnumVariant* ev = new EnumVariant(0);
+  ev->AddRef();
+
+  IUnknown* iunknown;
+  EXPECT_TRUE(SUCCEEDED(
+      ev->QueryInterface(IID_IUnknown, reinterpret_cast<void**>(&iunknown))));
+  iunknown->Release();
+
+  IEnumVARIANT* ienumvariant;
+  EXPECT_TRUE(SUCCEEDED(
+      ev->QueryInterface(IID_IEnumVARIANT,
+                         reinterpret_cast<void**>(&ienumvariant))));
+  EXPECT_EQ(ev, ienumvariant);
+  ienumvariant->Release();
+
+  VARIANT out_element;
+  ULONG out_received = 0;
+  EXPECT_EQ(S_FALSE, ev->Next(1, &out_element, &out_received));
+  EXPECT_EQ(0u, out_received);
+
+  EXPECT_EQ(S_FALSE, ev->Skip(1));
+
+  EXPECT_EQ(S_OK, ev->Reset());
+
+  IEnumVARIANT* ev2 = NULL;
+  EXPECT_EQ(S_OK, ev->Clone(&ev2));
+
+  EXPECT_NE(static_cast<IEnumVARIANT*>(NULL), ev2);
+  EXPECT_NE(ev, ev2);
+  EXPECT_EQ(S_FALSE, ev2->Skip(1));
+  EXPECT_EQ(S_OK, ev2->Reset());
+
+  ULONG ev2_finalrefcount = ev2->Release();
+  EXPECT_EQ(0u, ev2_finalrefcount);
+
+  ULONG ev_finalrefcount = ev->Release();
+  EXPECT_EQ(0u, ev_finalrefcount);
+}
+
+TEST(EnumVariantTest, SimpleEnumVariant) {
+  ScopedCOMInitializer com_initializer;
+
+  EnumVariant* ev = new EnumVariant(3);
+  ev->AddRef();
+  ev->ItemAt(0)->vt = VT_I4;
+  ev->ItemAt(0)->lVal = 10;
+  ev->ItemAt(1)->vt = VT_I4;
+  ev->ItemAt(1)->lVal = 20;
+  ev->ItemAt(2)->vt = VT_I4;
+  ev->ItemAt(2)->lVal = 30;
+
+  // Get elements one at a time.
+  VARIANT out_element;
+  ULONG out_received = 0;
+  EXPECT_EQ(S_OK, ev->Next(1, &out_element, &out_received));
+  EXPECT_EQ(1u, out_received);
+  EXPECT_EQ(VT_I4, out_element.vt);
+  EXPECT_EQ(10, out_element.lVal);
+  EXPECT_EQ(S_OK, ev->Skip(1));
+  EXPECT_EQ(S_OK, ev->Next(1, &out_element, &out_received));
+  EXPECT_EQ(1u, out_received);
+  EXPECT_EQ(VT_I4, out_element.vt);
+  EXPECT_EQ(30, out_element.lVal);
+  EXPECT_EQ(S_FALSE, ev->Next(1, &out_element, &out_received));
+
+  // Reset and get all elements at once.
+  VARIANT out_elements[3];
+  EXPECT_EQ(S_OK, ev->Reset());
+  EXPECT_EQ(S_OK, ev->Next(3, out_elements, &out_received));
+  EXPECT_EQ(3u, out_received);
+  EXPECT_EQ(VT_I4, out_elements[0].vt);
+  EXPECT_EQ(10, out_elements[0].lVal);
+  EXPECT_EQ(VT_I4, out_elements[1].vt);
+  EXPECT_EQ(20, out_elements[1].lVal);
+  EXPECT_EQ(VT_I4, out_elements[2].vt);
+  EXPECT_EQ(30, out_elements[2].lVal);
+  EXPECT_EQ(S_FALSE, ev->Next(1, &out_element, &out_received));
+
+  // Clone it.
+  IEnumVARIANT* ev2 = NULL;
+  EXPECT_EQ(S_OK, ev->Clone(&ev2));
+  EXPECT_TRUE(ev2 != NULL);
+  EXPECT_EQ(S_FALSE, ev->Next(1, &out_element, &out_received));
+  EXPECT_EQ(S_OK, ev2->Reset());
+  EXPECT_EQ(S_OK, ev2->Next(3, out_elements, &out_received));
+  EXPECT_EQ(3u, out_received);
+  EXPECT_EQ(VT_I4, out_elements[0].vt);
+  EXPECT_EQ(10, out_elements[0].lVal);
+  EXPECT_EQ(VT_I4, out_elements[1].vt);
+  EXPECT_EQ(20, out_elements[1].lVal);
+  EXPECT_EQ(VT_I4, out_elements[2].vt);
+  EXPECT_EQ(30, out_elements[2].lVal);
+  EXPECT_EQ(S_FALSE, ev2->Next(1, &out_element, &out_received));
+
+  ULONG ev2_finalrefcount = ev2->Release();
+  EXPECT_EQ(0u, ev2_finalrefcount);
+
+  ULONG ev_finalrefcount = ev->Release();
+  EXPECT_EQ(0u, ev_finalrefcount);
+}
+
+}  // namespace win
+}  // namespace base
diff --git a/base/win/event_trace_consumer.h b/base/win/event_trace_consumer.h
new file mode 100644
index 0000000..9f97e0d
--- /dev/null
+++ b/base/win/event_trace_consumer.h
@@ -0,0 +1,150 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Declaration of a Windows event trace consumer base class.
+#ifndef BASE_WIN_EVENT_TRACE_CONSUMER_H_
+#define BASE_WIN_EVENT_TRACE_CONSUMER_H_
+
+#include <windows.h>
+#include <wmistr.h>
+#include <evntrace.h>
+#include <stddef.h>
+#include <vector>
+
+#include "base/macros.h"
+
+namespace base {
+namespace win {
+
+// This class is a base class that makes it easier to consume events
+// from realtime or file sessions. Concrete consumers need to subclass
+// a specialization of this class and override the ProcessEvent and/or
+// the ProcessBuffer methods to implement the event consumption logic.
+// Usage might look like:
+// class MyConsumer: public EtwTraceConsumerBase<MyConsumer, 1> {
+//  protected:
+//    static VOID WINAPI ProcessEvent(PEVENT_TRACE event);
+// };
+//
+// MyConsumer consumer;
+// consumer.OpenFileSession(file_path);
+// consumer.Consume();
+template <class ImplClass>
+class EtwTraceConsumerBase {
+ public:
+  // Constructs a closed consumer.
+  EtwTraceConsumerBase() {
+  }
+
+  ~EtwTraceConsumerBase() {
+    Close();
+  }
+
+  // Opens the named realtime session, which must be existent.
+  // Note: You can use OpenRealtimeSession or OpenFileSession
+  //    to open as many as MAXIMUM_WAIT_OBJECTS (63) sessions at
+  //    any one time, though only one of them may be a realtime
+  //    session.
+  HRESULT OpenRealtimeSession(const wchar_t* session_name);
+
+  // Opens the event trace log in "file_name", which must be a full or
+  // relative path to an existing event trace log file.
+  // Note: You can use OpenRealtimeSession or OpenFileSession
+  //    to open as many as kNumSessions at any one time.
+  HRESULT OpenFileSession(const wchar_t* file_name);
+
+  // Consume all open sessions from beginning to end.
+  HRESULT Consume();
+
+  // Close all open sessions.
+  HRESULT Close();
+
+ protected:
+  // Override in subclasses to handle events.
+  static void ProcessEvent(EVENT_TRACE* event) {
+  }
+  // Override in subclasses to handle buffers.
+  static bool ProcessBuffer(EVENT_TRACE_LOGFILE* buffer) {
+    return true;  // keep going
+  }
+
+ protected:
+  // Currently open sessions.
+  std::vector<TRACEHANDLE> trace_handles_;
+
+ private:
+  // These delegate to ImplClass callbacks with saner signatures.
+  static void WINAPI ProcessEventCallback(EVENT_TRACE* event) {
+    ImplClass::ProcessEvent(event);
+  }
+  static ULONG WINAPI ProcessBufferCallback(PEVENT_TRACE_LOGFILE buffer) {
+    return ImplClass::ProcessBuffer(buffer);
+  }
+
+  DISALLOW_COPY_AND_ASSIGN(EtwTraceConsumerBase);
+};
+
+template <class ImplClass> inline
+HRESULT EtwTraceConsumerBase<ImplClass>::OpenRealtimeSession(
+    const wchar_t* session_name) {
+  EVENT_TRACE_LOGFILE logfile = {};
+  logfile.LoggerName = const_cast<wchar_t*>(session_name);
+  logfile.LogFileMode = EVENT_TRACE_REAL_TIME_MODE;
+  logfile.BufferCallback = &ProcessBufferCallback;
+  logfile.EventCallback = &ProcessEventCallback;
+  logfile.Context = this;
+  TRACEHANDLE trace_handle = ::OpenTrace(&logfile);
+  if (reinterpret_cast<TRACEHANDLE>(INVALID_HANDLE_VALUE) == trace_handle)
+    return HRESULT_FROM_WIN32(::GetLastError());
+
+  trace_handles_.push_back(trace_handle);
+  return S_OK;
+}
+
+template <class ImplClass> inline
+HRESULT EtwTraceConsumerBase<ImplClass>::OpenFileSession(
+    const wchar_t* file_name) {
+  EVENT_TRACE_LOGFILE logfile = {};
+  logfile.LogFileName = const_cast<wchar_t*>(file_name);
+  logfile.BufferCallback = &ProcessBufferCallback;
+  logfile.EventCallback = &ProcessEventCallback;
+  logfile.Context = this;
+  TRACEHANDLE trace_handle = ::OpenTrace(&logfile);
+  if (reinterpret_cast<TRACEHANDLE>(INVALID_HANDLE_VALUE) == trace_handle)
+    return HRESULT_FROM_WIN32(::GetLastError());
+
+  trace_handles_.push_back(trace_handle);
+  return S_OK;
+}
+
+template <class ImplClass> inline
+HRESULT EtwTraceConsumerBase<ImplClass>::Consume() {
+  ULONG err = ::ProcessTrace(&trace_handles_[0],
+                             static_cast<ULONG>(trace_handles_.size()),
+                             NULL,
+                             NULL);
+  return HRESULT_FROM_WIN32(err);
+}
+
+template <class ImplClass> inline
+HRESULT EtwTraceConsumerBase<ImplClass>::Close() {
+  HRESULT hr = S_OK;
+  for (size_t i = 0; i < trace_handles_.size(); ++i) {
+    if (NULL != trace_handles_[i]) {
+      ULONG ret = ::CloseTrace(trace_handles_[i]);
+      trace_handles_[i] = NULL;
+
+      if (FAILED(HRESULT_FROM_WIN32(ret)))
+        hr = HRESULT_FROM_WIN32(ret);
+    }
+  }
+  trace_handles_.clear();
+
+  return hr;
+}
+
+}  // namespace win
+}  // namespace base
+
+#endif  // BASE_WIN_EVENT_TRACE_CONSUMER_H_
diff --git a/base/win/event_trace_consumer_unittest.cc b/base/win/event_trace_consumer_unittest.cc
new file mode 100644
index 0000000..9c4c242
--- /dev/null
+++ b/base/win/event_trace_consumer_unittest.cc
@@ -0,0 +1,367 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Unit tests for event trace consumer base class.
+#include "base/win/event_trace_consumer.h"
+
+#include <list>
+
+#include <objbase.h>
+
+#include "base/files/file_path.h"
+#include "base/files/file_util.h"
+#include "base/files/scoped_temp_dir.h"
+#include "base/logging.h"
+#include "base/macros.h"
+#include "base/process/process_handle.h"
+#include "base/strings/stringprintf.h"
+#include "base/win/event_trace_controller.h"
+#include "base/win/event_trace_provider.h"
+#include "base/win/scoped_handle.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+#include <initguid.h>  // NOLINT - has to be last
+
+namespace base {
+namespace win {
+
+namespace {
+
+typedef std::list<EVENT_TRACE> EventQueue;
+
+class TestConsumer: public EtwTraceConsumerBase<TestConsumer> {
+ public:
+  TestConsumer() {
+    sank_event_.Set(::CreateEvent(NULL, TRUE, FALSE, NULL));
+    ClearQueue();
+  }
+
+  ~TestConsumer() {
+    ClearQueue();
+    sank_event_.Close();
+  }
+
+  void ClearQueue() {
+    for (EventQueue::const_iterator it(events_.begin()), end(events_.end());
+         it != end; ++it) {
+      delete[] reinterpret_cast<char*>(it->MofData);
+    }
+
+    events_.clear();
+  }
+
+  static void EnqueueEvent(EVENT_TRACE* event) {
+    events_.push_back(*event);
+    EVENT_TRACE& back = events_.back();
+
+    if (event->MofData != NULL && event->MofLength != 0) {
+      back.MofData = new char[event->MofLength];
+      memcpy(back.MofData, event->MofData, event->MofLength);
+    }
+  }
+
+  static void ProcessEvent(EVENT_TRACE* event) {
+    EnqueueEvent(event);
+    ::SetEvent(sank_event_.Get());
+  }
+
+  static ScopedHandle sank_event_;
+  static EventQueue events_;
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(TestConsumer);
+};
+
+ScopedHandle TestConsumer::sank_event_;
+EventQueue TestConsumer::events_;
+
+class EtwTraceConsumerBaseTest: public testing::Test {
+ public:
+  EtwTraceConsumerBaseTest()
+      : session_name_(StringPrintf(L"TestSession-%d", GetCurrentProcId())) {
+  }
+
+  void SetUp() override {
+    // Cleanup any potentially dangling sessions.
+    EtwTraceProperties ignore;
+    EtwTraceController::Stop(session_name_.c_str(), &ignore);
+
+    // Allocate a new GUID for each provider test.
+    ASSERT_HRESULT_SUCCEEDED(::CoCreateGuid(&test_provider_));
+  }
+
+  void TearDown() override {
+    // Cleanup any potentially dangling sessions.
+    EtwTraceProperties ignore;
+    EtwTraceController::Stop(session_name_.c_str(), &ignore);
+  }
+
+ protected:
+  GUID test_provider_;
+  std::wstring session_name_;
+};
+
+}  // namespace
+
+TEST_F(EtwTraceConsumerBaseTest, Initialize) {
+  TestConsumer consumer_;
+}
+
+TEST_F(EtwTraceConsumerBaseTest, OpenRealtimeSucceedsWhenNoSession) {
+  TestConsumer consumer_;
+  ASSERT_HRESULT_SUCCEEDED(
+      consumer_.OpenRealtimeSession(session_name_.c_str()));
+}
+
+TEST_F(EtwTraceConsumerBaseTest, ConsumerImmediateFailureWhenNoSession) {
+  TestConsumer consumer_;
+  ASSERT_HRESULT_SUCCEEDED(
+      consumer_.OpenRealtimeSession(session_name_.c_str()));
+  ASSERT_HRESULT_FAILED(consumer_.Consume());
+}
+
+namespace {
+
+class EtwTraceConsumerRealtimeTest: public EtwTraceConsumerBaseTest {
+ public:
+  void SetUp() override {
+    EtwTraceConsumerBaseTest::SetUp();
+    ASSERT_HRESULT_SUCCEEDED(
+        consumer_.OpenRealtimeSession(session_name_.c_str()));
+  }
+
+  void TearDown() override {
+    consumer_.Close();
+    EtwTraceConsumerBaseTest::TearDown();
+  }
+
+  DWORD ConsumerThread() {
+    ::SetEvent(consumer_ready_.Get());
+    return consumer_.Consume();
+  }
+
+  static DWORD WINAPI ConsumerThreadMainProc(void* arg) {
+    return reinterpret_cast<EtwTraceConsumerRealtimeTest*>(arg)->
+        ConsumerThread();
+  }
+
+  HRESULT StartConsumerThread() {
+    consumer_ready_.Set(::CreateEvent(NULL, TRUE, FALSE, NULL));
+    EXPECT_TRUE(consumer_ready_.IsValid());
+    consumer_thread_.Set(::CreateThread(NULL, 0, ConsumerThreadMainProc, this,
+                                        0, NULL));
+    if (consumer_thread_.Get() == NULL)
+      return HRESULT_FROM_WIN32(::GetLastError());
+
+    HANDLE events[] = { consumer_ready_.Get(), consumer_thread_.Get() };
+    DWORD result = ::WaitForMultipleObjects(arraysize(events), events,
+                                            FALSE, INFINITE);
+    switch (result) {
+      case WAIT_OBJECT_0:
+        // The event was set, the consumer_ is ready.
+        return S_OK;
+      case WAIT_OBJECT_0 + 1: {
+          // The thread finished. This may race with the event, so check
+          // explicitly for the event here, before concluding there's trouble.
+          if (::WaitForSingleObject(consumer_ready_.Get(), 0) == WAIT_OBJECT_0)
+            return S_OK;
+          DWORD exit_code = 0;
+          if (::GetExitCodeThread(consumer_thread_.Get(), &exit_code))
+            return exit_code;
+          return HRESULT_FROM_WIN32(::GetLastError());
+        }
+      default:
+        return E_UNEXPECTED;
+    }
+  }
+
+  // Waits for consumer_ thread to exit, and returns its exit code.
+  HRESULT JoinConsumerThread() {
+    if (::WaitForSingleObject(consumer_thread_.Get(), INFINITE) !=
+        WAIT_OBJECT_0) {
+      return HRESULT_FROM_WIN32(::GetLastError());
+    }
+
+    DWORD exit_code = 0;
+    if (::GetExitCodeThread(consumer_thread_.Get(), &exit_code))
+      return exit_code;
+
+    return HRESULT_FROM_WIN32(::GetLastError());
+  }
+
+  TestConsumer consumer_;
+  ScopedHandle consumer_ready_;
+  ScopedHandle consumer_thread_;
+};
+
+}  // namespace
+
+TEST_F(EtwTraceConsumerRealtimeTest, ConsumerReturnsWhenSessionClosed) {
+  EtwTraceController controller;
+  if (controller.StartRealtimeSession(session_name_.c_str(), 100 * 1024) ==
+      E_ACCESSDENIED) {
+    VLOG(1) << "You must be an administrator to run this test on Vista";
+    return;
+  }
+
+  // Start the consumer_.
+  ASSERT_HRESULT_SUCCEEDED(StartConsumerThread());
+
+  // Wait around for the consumer_ thread a bit.
+  ASSERT_EQ(static_cast<DWORD>(WAIT_TIMEOUT),
+            ::WaitForSingleObject(consumer_thread_.Get(), 50));
+  ASSERT_HRESULT_SUCCEEDED(controller.Stop(NULL));
+
+  // The consumer_ returns success on session stop.
+  ASSERT_HRESULT_SUCCEEDED(JoinConsumerThread());
+}
+
+namespace {
+
+// {57E47923-A549-476f-86CA-503D57F59E62}
+DEFINE_GUID(
+    kTestEventType,
+    0x57e47923, 0xa549, 0x476f, 0x86, 0xca, 0x50, 0x3d, 0x57, 0xf5, 0x9e, 0x62);
+
+}  // namespace
+
+TEST_F(EtwTraceConsumerRealtimeTest, ConsumeEvent) {
+  EtwTraceController controller;
+  if (controller.StartRealtimeSession(session_name_.c_str(), 100 * 1024) ==
+      E_ACCESSDENIED) {
+    VLOG(1) << "You must be an administrator to run this test on Vista";
+    return;
+  }
+
+  ASSERT_HRESULT_SUCCEEDED(controller.EnableProvider(
+      test_provider_, TRACE_LEVEL_VERBOSE, 0xFFFFFFFF));
+
+  EtwTraceProvider provider(test_provider_);
+  ASSERT_EQ(static_cast<DWORD>(ERROR_SUCCESS), provider.Register());
+
+  // Start the consumer_.
+  ASSERT_HRESULT_SUCCEEDED(StartConsumerThread());
+  ASSERT_EQ(0u, TestConsumer::events_.size());
+
+  EtwMofEvent<1> event(kTestEventType, 1, TRACE_LEVEL_ERROR);
+  EXPECT_EQ(static_cast<DWORD>(ERROR_SUCCESS), provider.Log(&event.header));
+  EXPECT_EQ(WAIT_OBJECT_0,
+            ::WaitForSingleObject(TestConsumer::sank_event_.Get(), INFINITE));
+  ASSERT_HRESULT_SUCCEEDED(controller.Stop(NULL));
+  ASSERT_HRESULT_SUCCEEDED(JoinConsumerThread());
+  ASSERT_NE(0u, TestConsumer::events_.size());
+}
+
+namespace {
+
+// We run events through a file session to assert that
+// the content comes through.
+class EtwTraceConsumerDataTest: public EtwTraceConsumerBaseTest {
+ public:
+  EtwTraceConsumerDataTest() {
+  }
+
+  void SetUp() override {
+    EtwTraceConsumerBaseTest::SetUp();
+
+    EtwTraceProperties prop;
+    EtwTraceController::Stop(session_name_.c_str(), &prop);
+
+    // Create a temp dir for this test.
+    ASSERT_TRUE(temp_dir_.CreateUniqueTempDir());
+    // Construct a temp file name in our dir.
+    temp_file_ = temp_dir_.GetPath().Append(L"test.etl");
+  }
+
+  void TearDown() override {
+    EXPECT_TRUE(base::DeleteFile(temp_file_, false));
+
+    EtwTraceConsumerBaseTest::TearDown();
+  }
+
+  HRESULT LogEventToTempSession(PEVENT_TRACE_HEADER header) {
+    EtwTraceController controller;
+
+    // Set up a file session.
+    HRESULT hr = controller.StartFileSession(session_name_.c_str(),
+                                             temp_file_.value().c_str());
+    if (FAILED(hr))
+      return hr;
+
+    // Enable our provider.
+    EXPECT_HRESULT_SUCCEEDED(controller.EnableProvider(
+        test_provider_, TRACE_LEVEL_VERBOSE, 0xFFFFFFFF));
+
+    EtwTraceProvider provider(test_provider_);
+    // Then register our provider, means we get a session handle immediately.
+    EXPECT_EQ(static_cast<DWORD>(ERROR_SUCCESS), provider.Register());
+    // Trace the event, it goes to the temp file.
+    EXPECT_EQ(static_cast<DWORD>(ERROR_SUCCESS), provider.Log(header));
+    EXPECT_HRESULT_SUCCEEDED(controller.DisableProvider(test_provider_));
+    EXPECT_HRESULT_SUCCEEDED(provider.Unregister());
+    EXPECT_HRESULT_SUCCEEDED(controller.Flush(NULL));
+    EXPECT_HRESULT_SUCCEEDED(controller.Stop(NULL));
+
+    return S_OK;
+  }
+
+  HRESULT ConsumeEventFromTempSession() {
+    // Now consume the event(s).
+    TestConsumer consumer_;
+    HRESULT hr = consumer_.OpenFileSession(temp_file_.value().c_str());
+    if (SUCCEEDED(hr))
+      hr = consumer_.Consume();
+    consumer_.Close();
+    // And nab the result.
+    events_.swap(TestConsumer::events_);
+    return hr;
+  }
+
+  HRESULT RoundTripEvent(PEVENT_TRACE_HEADER header, PEVENT_TRACE* trace) {
+    base::DeleteFile(temp_file_, false);
+
+    HRESULT hr = LogEventToTempSession(header);
+    if (SUCCEEDED(hr))
+      hr = ConsumeEventFromTempSession();
+
+    if (FAILED(hr))
+      return hr;
+
+    // We should now have the event in the queue.
+    if (events_.empty())
+      return E_FAIL;
+
+    *trace = &events_.back();
+    return S_OK;
+  }
+
+  EventQueue events_;
+  ScopedTempDir temp_dir_;
+  FilePath temp_file_;
+};
+
+}  // namespace
+
+
+TEST_F(EtwTraceConsumerDataTest, RoundTrip) {
+  EtwMofEvent<1> event(kTestEventType, 1, TRACE_LEVEL_ERROR);
+
+  static const char kData[] = "This is but test data";
+  event.fields[0].DataPtr = reinterpret_cast<ULONG64>(kData);
+  event.fields[0].Length = sizeof(kData);
+
+  PEVENT_TRACE trace = NULL;
+  HRESULT hr = RoundTripEvent(&event.header, &trace);
+  if (hr == E_ACCESSDENIED) {
+    VLOG(1) << "You must be an administrator to run this test on Vista";
+    return;
+  }
+  ASSERT_HRESULT_SUCCEEDED(hr) << "RoundTripEvent failed";
+  ASSERT_TRUE(trace != NULL);
+  ASSERT_EQ(sizeof(kData), trace->MofLength);
+  ASSERT_STREQ(kData, reinterpret_cast<const char*>(trace->MofData));
+}
+
+}  // namespace win
+}  // namespace base
diff --git a/base/win/event_trace_controller.cc b/base/win/event_trace_controller.cc
new file mode 100644
index 0000000..ff392a3
--- /dev/null
+++ b/base/win/event_trace_controller.cc
@@ -0,0 +1,174 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Implementation of a Windows event trace controller class.
+#include "base/win/event_trace_controller.h"
+#include "base/logging.h"
+
+namespace base {
+namespace win {
+
+EtwTraceProperties::EtwTraceProperties() {
+  memset(buffer_, 0, sizeof(buffer_));
+  EVENT_TRACE_PROPERTIES* prop = get();
+
+  prop->Wnode.BufferSize = sizeof(buffer_);
+  prop->Wnode.Flags = WNODE_FLAG_TRACED_GUID;
+  prop->LoggerNameOffset = sizeof(EVENT_TRACE_PROPERTIES);
+  prop->LogFileNameOffset = sizeof(EVENT_TRACE_PROPERTIES) +
+                            sizeof(wchar_t) * kMaxStringLen;
+}
+
+HRESULT EtwTraceProperties::SetLoggerName(const wchar_t* logger_name) {
+  size_t len = wcslen(logger_name) + 1;
+  if (kMaxStringLen < len)
+    return E_INVALIDARG;
+
+  memcpy(buffer_ + get()->LoggerNameOffset,
+         logger_name,
+         sizeof(wchar_t) * len);
+  return S_OK;
+}
+
+HRESULT EtwTraceProperties::SetLoggerFileName(const wchar_t* logger_file_name) {
+  size_t len = wcslen(logger_file_name) + 1;
+  if (kMaxStringLen < len)
+    return E_INVALIDARG;
+
+  memcpy(buffer_ + get()->LogFileNameOffset,
+         logger_file_name,
+         sizeof(wchar_t) * len);
+  return S_OK;
+}
+
+EtwTraceController::EtwTraceController() : session_(NULL) {
+}
+
+EtwTraceController::~EtwTraceController() {
+  if (session_)
+    Stop(NULL);
+}
+
+HRESULT EtwTraceController::Start(const wchar_t* session_name,
+    EtwTraceProperties* prop) {
+  DCHECK(NULL == session_ && session_name_.empty());
+  EtwTraceProperties ignore;
+  if (prop == NULL)
+    prop = &ignore;
+
+  HRESULT hr = Start(session_name, prop, &session_);
+  if (SUCCEEDED(hr))
+    session_name_ = session_name;
+
+  return hr;
+}
+
+HRESULT EtwTraceController::StartFileSession(const wchar_t* session_name,
+    const wchar_t* logfile_path, bool realtime) {
+  DCHECK(NULL == session_ && session_name_.empty());
+
+  EtwTraceProperties prop;
+  prop.SetLoggerFileName(logfile_path);
+  EVENT_TRACE_PROPERTIES& p = *prop.get();
+  p.Wnode.ClientContext = 1;  // QPC timer accuracy.
+  p.LogFileMode = EVENT_TRACE_FILE_MODE_SEQUENTIAL;  // Sequential log.
+  if (realtime)
+    p.LogFileMode |= EVENT_TRACE_REAL_TIME_MODE;
+
+  p.MaximumFileSize = 100;  // 100M file size.
+  p.FlushTimer = 30;  // 30 seconds flush lag.
+  return Start(session_name, &prop);
+}
+
+HRESULT EtwTraceController::StartRealtimeSession(const wchar_t* session_name,
+    size_t buffer_size) {
+  DCHECK(NULL == session_ && session_name_.empty());
+  EtwTraceProperties prop;
+  EVENT_TRACE_PROPERTIES& p = *prop.get();
+  p.LogFileMode = EVENT_TRACE_REAL_TIME_MODE | EVENT_TRACE_USE_PAGED_MEMORY;
+  p.FlushTimer = 1;  // flush every second.
+  p.BufferSize = 16;  // 16 K buffers.
+  p.LogFileNameOffset = 0;
+  return Start(session_name, &prop);
+}
+
+HRESULT EtwTraceController::EnableProvider(REFGUID provider, UCHAR level,
+    ULONG flags) {
+  ULONG error = ::EnableTrace(TRUE, flags, level, &provider, session_);
+  return HRESULT_FROM_WIN32(error);
+}
+
+HRESULT EtwTraceController::DisableProvider(REFGUID provider) {
+  ULONG error = ::EnableTrace(FALSE, 0, 0, &provider, session_);
+  return HRESULT_FROM_WIN32(error);
+}
+
+HRESULT EtwTraceController::Stop(EtwTraceProperties* properties) {
+  EtwTraceProperties ignore;
+  if (properties == NULL)
+    properties = &ignore;
+
+  ULONG error = ::ControlTrace(session_, NULL, properties->get(),
+    EVENT_TRACE_CONTROL_STOP);
+  if (ERROR_SUCCESS != error)
+    return HRESULT_FROM_WIN32(error);
+
+  session_ = NULL;
+  session_name_.clear();
+  return S_OK;
+}
+
+HRESULT EtwTraceController::Flush(EtwTraceProperties* properties) {
+  EtwTraceProperties ignore;
+  if (properties == NULL)
+    properties = &ignore;
+
+  ULONG error = ::ControlTrace(session_, NULL, properties->get(),
+                               EVENT_TRACE_CONTROL_FLUSH);
+  if (ERROR_SUCCESS != error)
+    return HRESULT_FROM_WIN32(error);
+
+  return S_OK;
+}
+
+HRESULT EtwTraceController::Start(const wchar_t* session_name,
+    EtwTraceProperties* properties, TRACEHANDLE* session_handle) {
+  DCHECK(properties != NULL);
+  ULONG err = ::StartTrace(session_handle, session_name, properties->get());
+  return HRESULT_FROM_WIN32(err);
+}
+
+HRESULT EtwTraceController::Query(const wchar_t* session_name,
+    EtwTraceProperties* properties) {
+  ULONG err = ::ControlTrace(NULL, session_name, properties->get(),
+                             EVENT_TRACE_CONTROL_QUERY);
+  return HRESULT_FROM_WIN32(err);
+};
+
+HRESULT EtwTraceController::Update(const wchar_t* session_name,
+    EtwTraceProperties* properties) {
+  DCHECK(properties != NULL);
+  ULONG err = ::ControlTrace(NULL, session_name, properties->get(),
+                             EVENT_TRACE_CONTROL_UPDATE);
+  return HRESULT_FROM_WIN32(err);
+}
+
+HRESULT EtwTraceController::Stop(const wchar_t* session_name,
+    EtwTraceProperties* properties) {
+  DCHECK(properties != NULL);
+  ULONG err = ::ControlTrace(NULL, session_name, properties->get(),
+                             EVENT_TRACE_CONTROL_STOP);
+  return HRESULT_FROM_WIN32(err);
+}
+
+HRESULT EtwTraceController::Flush(const wchar_t* session_name,
+    EtwTraceProperties* properties) {
+  DCHECK(properties != NULL);
+  ULONG err = ::ControlTrace(NULL, session_name, properties->get(),
+                             EVENT_TRACE_CONTROL_FLUSH);
+  return HRESULT_FROM_WIN32(err);
+}
+
+}  // namespace win
+}  // namespace base
diff --git a/base/win/event_trace_controller.h b/base/win/event_trace_controller.h
new file mode 100644
index 0000000..2e32b4c
--- /dev/null
+++ b/base/win/event_trace_controller.h
@@ -0,0 +1,152 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Declaration of a Windows event trace controller class.
+// The controller takes care of creating and manipulating event trace
+// sessions.
+//
+// Event tracing for Windows is a system-provided service that provides
+// logging control and high-performance transport for generic, binary trace
+// events. Event trace providers register with the system by their name,
+// which is a GUID, and can from that point forward receive callbacks that
+// start or end tracing and that change their trace level and enable mask.
+//
+// A trace controller can create an event tracing session, which either
+// sends events to a binary file, or to a realtime consumer, or both.
+//
+// A trace consumer consumes events from zero or one realtime session,
+// as well as potentially from multiple binary trace files.
+#ifndef BASE_WIN_EVENT_TRACE_CONTROLLER_H_
+#define BASE_WIN_EVENT_TRACE_CONTROLLER_H_
+
+#include <windows.h>
+#include <wmistr.h>
+#include <evntrace.h>
+#include <stddef.h>
+#include <string>
+
+#include "base/base_export.h"
+#include "base/macros.h"
+
+namespace base {
+namespace win {
+
+// Utility class to make it easier to work with EVENT_TRACE_PROPERTIES.
+// The EVENT_TRACE_PROPERTIES structure contains information about an
+// event tracing session.
+class BASE_EXPORT EtwTraceProperties {
+ public:
+  EtwTraceProperties();
+
+  EVENT_TRACE_PROPERTIES* get() {
+    return &properties_;
+  }
+
+  const EVENT_TRACE_PROPERTIES* get() const {
+    return reinterpret_cast<const EVENT_TRACE_PROPERTIES*>(&properties_);
+  }
+
+  const wchar_t* GetLoggerName() const {
+    return reinterpret_cast<const wchar_t *>(buffer_ + get()->LoggerNameOffset);
+  }
+
+  // Copies logger_name to the properties structure.
+  HRESULT SetLoggerName(const wchar_t* logger_name);
+  const wchar_t* GetLoggerFileName() const {
+    return reinterpret_cast<const wchar_t*>(buffer_ + get()->LogFileNameOffset);
+  }
+
+  // Copies logger_file_name to the properties structure.
+  HRESULT SetLoggerFileName(const wchar_t* logger_file_name);
+
+  // Max string len for name and session name is 1024 per documentation.
+  static const size_t kMaxStringLen = 1024;
+  // Properties buffer allocates space for header and for
+  // max length for name and session name.
+  static const size_t kBufSize = sizeof(EVENT_TRACE_PROPERTIES)
+      + 2 * sizeof(wchar_t) * (kMaxStringLen);
+
+ private:
+  // The EVENT_TRACE_PROPERTIES structure needs to be overlaid on a
+  // larger buffer to allow storing the logger name and logger file
+  // name contiguously with the structure.
+  union {
+   public:
+    // Our properties header.
+    EVENT_TRACE_PROPERTIES properties_;
+    // The actual size of the buffer is forced by this member.
+    char buffer_[kBufSize];
+  };
+
+  DISALLOW_COPY_AND_ASSIGN(EtwTraceProperties);
+};
+
+// This class implements an ETW controller, which knows how to start and
+// stop event tracing sessions, as well as controlling ETW provider
+// log levels and enable bit masks under the session.
+class BASE_EXPORT EtwTraceController {
+ public:
+  EtwTraceController();
+  ~EtwTraceController();
+
+  // Start a session with given name and properties.
+  HRESULT Start(const wchar_t* session_name, EtwTraceProperties* prop);
+
+  // Starts a session tracing to a file with some default properties.
+  HRESULT StartFileSession(const wchar_t* session_name,
+                           const wchar_t* logfile_path,
+                           bool realtime = false);
+
+  // Starts a realtime session with some default properties.
+  HRESULT StartRealtimeSession(const wchar_t* session_name,
+                               size_t buffer_size);
+
+  // Enables "provider" at "level" for this session.
+  // This will cause all providers registered with the GUID
+  // "provider" to start tracing at the new level, systemwide.
+  HRESULT EnableProvider(const GUID& provider, UCHAR level,
+                         ULONG flags = 0xFFFFFFFF);
+  // Disables "provider".
+  HRESULT DisableProvider(const GUID& provider);
+
+  // Stops our session and retrieve the new properties of the session,
+  // properties may be NULL.
+  HRESULT Stop(EtwTraceProperties* properties);
+
+  // Flushes our session and retrieve the current properties,
+  // properties may be NULL.
+  HRESULT Flush(EtwTraceProperties* properties);
+
+  // Static utility functions for controlling
+  // sessions we don't necessarily own.
+  static HRESULT Start(const wchar_t* session_name,
+                       EtwTraceProperties* properties,
+                       TRACEHANDLE* session_handle);
+
+  static HRESULT Query(const wchar_t* session_name,
+                       EtwTraceProperties* properties);
+
+  static HRESULT Update(const wchar_t* session_name,
+                        EtwTraceProperties* properties);
+
+  static HRESULT Stop(const wchar_t* session_name,
+                      EtwTraceProperties* properties);
+  static HRESULT Flush(const wchar_t* session_name,
+                       EtwTraceProperties* properties);
+
+  // Accessors.
+  TRACEHANDLE session() const { return session_; }
+  const wchar_t* session_name() const { return session_name_.c_str(); }
+
+ private:
+  std::wstring session_name_;
+  TRACEHANDLE session_;
+
+  DISALLOW_COPY_AND_ASSIGN(EtwTraceController);
+};
+
+}  // namespace win
+}  // namespace base
+
+#endif  // BASE_WIN_EVENT_TRACE_CONTROLLER_H_
diff --git a/base/win/event_trace_controller_unittest.cc b/base/win/event_trace_controller_unittest.cc
new file mode 100644
index 0000000..f19ee31
--- /dev/null
+++ b/base/win/event_trace_controller_unittest.cc
@@ -0,0 +1,244 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Unit tests for event trace controller.
+
+#include <objbase.h>
+#include <initguid.h>
+
+#include "base/files/file_path.h"
+#include "base/files/file_util.h"
+#include "base/files/scoped_temp_dir.h"
+#include "base/logging.h"
+#include "base/macros.h"
+#include "base/process/process_handle.h"
+#include "base/strings/stringprintf.h"
+#include "base/sys_info.h"
+#include "base/win/event_trace_controller.h"
+#include "base/win/event_trace_provider.h"
+#include "base/win/scoped_handle.h"
+#include "base/win/windows_version.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+namespace win {
+
+namespace {
+
+DEFINE_GUID(kGuidNull,
+    0x0000000, 0x0000, 0x0000, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0);
+
+const ULONG kTestProviderFlags = 0xCAFEBABE;
+
+class TestingProvider: public EtwTraceProvider {
+ public:
+  explicit TestingProvider(const GUID& provider_name)
+      : EtwTraceProvider(provider_name) {
+    callback_event_.Set(::CreateEvent(NULL, TRUE, FALSE, NULL));
+  }
+
+  void WaitForCallback() {
+    ::WaitForSingleObject(callback_event_.Get(), INFINITE);
+    ::ResetEvent(callback_event_.Get());
+  }
+
+ private:
+  void OnEventsEnabled() override { ::SetEvent(callback_event_.Get()); }
+  void PostEventsDisabled() override { ::SetEvent(callback_event_.Get()); }
+
+  ScopedHandle callback_event_;
+
+  DISALLOW_COPY_AND_ASSIGN(TestingProvider);
+};
+
+}  // namespace
+
+TEST(EtwTracePropertiesTest, Initialization) {
+  EtwTraceProperties prop;
+
+  EVENT_TRACE_PROPERTIES* p = prop.get();
+  EXPECT_NE(0u, p->Wnode.BufferSize);
+  EXPECT_EQ(0u, p->Wnode.ProviderId);
+  EXPECT_EQ(0u, p->Wnode.HistoricalContext);
+
+  EXPECT_TRUE(kGuidNull == p->Wnode.Guid);
+  EXPECT_EQ(0u, p->Wnode.ClientContext);
+  EXPECT_EQ(static_cast<ULONG>(WNODE_FLAG_TRACED_GUID), p->Wnode.Flags);
+
+  EXPECT_EQ(0u, p->BufferSize);
+  EXPECT_EQ(0u, p->MinimumBuffers);
+  EXPECT_EQ(0u, p->MaximumBuffers);
+  EXPECT_EQ(0u, p->MaximumFileSize);
+  EXPECT_EQ(0u, p->LogFileMode);
+  EXPECT_EQ(0u, p->FlushTimer);
+  EXPECT_EQ(0u, p->EnableFlags);
+  EXPECT_EQ(0, p->AgeLimit);
+
+  EXPECT_EQ(0u, p->NumberOfBuffers);
+  EXPECT_EQ(0u, p->FreeBuffers);
+  EXPECT_EQ(0u, p->EventsLost);
+  EXPECT_EQ(0u, p->BuffersWritten);
+  EXPECT_EQ(0u, p->LogBuffersLost);
+  EXPECT_EQ(0u, p->RealTimeBuffersLost);
+  EXPECT_EQ(0u, p->LoggerThreadId);
+  EXPECT_NE(0u, p->LogFileNameOffset);
+  EXPECT_NE(0u, p->LoggerNameOffset);
+}
+
+TEST(EtwTracePropertiesTest, Strings) {
+  EtwTraceProperties prop;
+
+  ASSERT_STREQ(L"", prop.GetLoggerFileName());
+  ASSERT_STREQ(L"", prop.GetLoggerName());
+
+  std::wstring name(1023, L'A');
+  ASSERT_HRESULT_SUCCEEDED(prop.SetLoggerFileName(name.c_str()));
+  ASSERT_HRESULT_SUCCEEDED(prop.SetLoggerName(name.c_str()));
+  ASSERT_STREQ(name.c_str(), prop.GetLoggerFileName());
+  ASSERT_STREQ(name.c_str(), prop.GetLoggerName());
+
+  std::wstring name2(1024, L'A');
+  ASSERT_HRESULT_FAILED(prop.SetLoggerFileName(name2.c_str()));
+  ASSERT_HRESULT_FAILED(prop.SetLoggerName(name2.c_str()));
+}
+
+namespace {
+
+class EtwTraceControllerTest : public testing::Test {
+ public:
+  EtwTraceControllerTest()
+      : session_name_(StringPrintf(L"TestSession-%d", GetCurrentProcId())) {
+  }
+
+  void SetUp() override {
+    EtwTraceProperties ignore;
+    EtwTraceController::Stop(session_name_.c_str(), &ignore);
+
+    // Allocate a new provider name GUID for each test.
+    ASSERT_HRESULT_SUCCEEDED(::CoCreateGuid(&test_provider_));
+  }
+
+  void TearDown() override {
+    EtwTraceProperties prop;
+    EtwTraceController::Stop(session_name_.c_str(), &prop);
+  }
+
+ protected:
+  GUID test_provider_;
+  std::wstring session_name_;
+};
+
+}  // namespace
+
+TEST_F(EtwTraceControllerTest, Initialize) {
+  EtwTraceController controller;
+
+  EXPECT_EQ(0u, controller.session());
+  EXPECT_STREQ(L"", controller.session_name());
+}
+
+
+TEST_F(EtwTraceControllerTest, StartRealTimeSession) {
+  EtwTraceController controller;
+
+  HRESULT hr = controller.StartRealtimeSession(session_name_.c_str(),
+                                               100 * 1024);
+  if (hr == E_ACCESSDENIED) {
+    VLOG(1) << "You must be an administrator to run this test on Vista";
+    return;
+  }
+
+  EXPECT_NE(0u, controller.session());
+  EXPECT_STREQ(session_name_.c_str(), controller.session_name());
+
+  EXPECT_HRESULT_SUCCEEDED(controller.Stop(NULL));
+  EXPECT_EQ(0u, controller.session());
+  EXPECT_STREQ(L"", controller.session_name());
+}
+
+TEST_F(EtwTraceControllerTest, StartFileSession) {
+  ScopedTempDir temp_dir;
+  ASSERT_TRUE(temp_dir.CreateUniqueTempDir());
+  FilePath temp;
+  ASSERT_TRUE(base::CreateTemporaryFileInDir(temp_dir.GetPath(), &temp));
+
+  EtwTraceController controller;
+  HRESULT hr = controller.StartFileSession(session_name_.c_str(),
+                                           temp.value().c_str());
+  if (hr == E_ACCESSDENIED) {
+    VLOG(1) << "You must be an administrator to run this test on Vista";
+    base::DeleteFile(temp, false);
+    return;
+  }
+
+  EXPECT_NE(0u, controller.session());
+  EXPECT_STREQ(session_name_.c_str(), controller.session_name());
+
+  EXPECT_HRESULT_SUCCEEDED(controller.Stop(NULL));
+  EXPECT_EQ(0u, controller.session());
+  EXPECT_STREQ(L"", controller.session_name());
+  base::DeleteFile(temp, false);
+}
+
+// This test is flaky for unclear reasons. See bugs 525297 and 534184
+TEST_F(EtwTraceControllerTest, DISABLED_EnableDisable) {
+  TestingProvider provider(test_provider_);
+
+  EXPECT_EQ(static_cast<DWORD>(ERROR_SUCCESS), provider.Register());
+  EXPECT_EQ(0u, provider.session_handle());
+
+  EtwTraceController controller;
+  HRESULT hr = controller.StartRealtimeSession(session_name_.c_str(),
+                                               100 * 1024);
+  if (hr == E_ACCESSDENIED) {
+    VLOG(1) << "You must be an administrator to run this test on Vista";
+    return;
+  }
+
+  EXPECT_HRESULT_SUCCEEDED(controller.EnableProvider(test_provider_,
+                           TRACE_LEVEL_VERBOSE, kTestProviderFlags));
+
+  provider.WaitForCallback();
+
+  EXPECT_EQ(TRACE_LEVEL_VERBOSE, provider.enable_level());
+  EXPECT_EQ(kTestProviderFlags, provider.enable_flags());
+
+  EXPECT_HRESULT_SUCCEEDED(controller.DisableProvider(test_provider_));
+
+  provider.WaitForCallback();
+
+  EXPECT_EQ(0, provider.enable_level());
+  EXPECT_EQ(0u, provider.enable_flags());
+
+  EXPECT_EQ(static_cast<DWORD>(ERROR_SUCCESS), provider.Unregister());
+
+  // Enable the provider again, before registering.
+  EXPECT_HRESULT_SUCCEEDED(controller.EnableProvider(test_provider_,
+                           TRACE_LEVEL_VERBOSE, kTestProviderFlags));
+
+  // Register the provider again, the settings above
+  // should take immediate effect.
+  EXPECT_EQ(static_cast<DWORD>(ERROR_SUCCESS), provider.Register());
+
+  EXPECT_EQ(TRACE_LEVEL_VERBOSE, provider.enable_level());
+  EXPECT_EQ(kTestProviderFlags, provider.enable_flags());
+
+  // Consume the callback event of the previous controller.EnableProvider().
+  provider.WaitForCallback();
+
+  EXPECT_HRESULT_SUCCEEDED(controller.Stop(NULL));
+
+  // Windows 7 does not call the callback when Stop() is called so we
+  // can't wait, and enable_level and enable_flags are not zeroed.
+  if (base::win::GetVersion() >= VERSION_WIN8) {
+    provider.WaitForCallback();
+
+    // Session should have wound down.
+    EXPECT_EQ(0, provider.enable_level());
+    EXPECT_EQ(0u, provider.enable_flags());
+  }
+}
+
+}  // namespace win
+}  // namespace base
diff --git a/base/win/event_trace_provider.cc b/base/win/event_trace_provider.cc
new file mode 100644
index 0000000..8fcf67d
--- /dev/null
+++ b/base/win/event_trace_provider.cc
@@ -0,0 +1,134 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+#include "base/win/event_trace_provider.h"
+#include <windows.h>
+#include <cguid.h>
+
+namespace base {
+namespace win {
+
+TRACE_GUID_REGISTRATION EtwTraceProvider::obligatory_guid_registration_ = {
+  &GUID_NULL,
+  NULL
+};
+
+EtwTraceProvider::EtwTraceProvider(const GUID& provider_name)
+    : provider_name_(provider_name), registration_handle_(NULL),
+      session_handle_(NULL), enable_flags_(0), enable_level_(0) {
+}
+
+EtwTraceProvider::EtwTraceProvider()
+    : provider_name_(GUID_NULL), registration_handle_(NULL),
+      session_handle_(NULL), enable_flags_(0), enable_level_(0) {
+}
+
+EtwTraceProvider::~EtwTraceProvider() {
+  Unregister();
+}
+
+ULONG EtwTraceProvider::EnableEvents(void* buffer) {
+  session_handle_ = ::GetTraceLoggerHandle(buffer);
+  if (NULL == session_handle_) {
+    return ::GetLastError();
+  }
+
+  enable_flags_ = ::GetTraceEnableFlags(session_handle_);
+  enable_level_ = ::GetTraceEnableLevel(session_handle_);
+
+  // Give subclasses a chance to digest the state change.
+  OnEventsEnabled();
+
+  return ERROR_SUCCESS;
+}
+
+ULONG EtwTraceProvider::DisableEvents() {
+  // Give subclasses a chance to digest the state change.
+  OnEventsDisabled();
+
+  enable_level_ = 0;
+  enable_flags_ = 0;
+  session_handle_ = NULL;
+
+  PostEventsDisabled();
+
+  return ERROR_SUCCESS;
+}
+
+ULONG EtwTraceProvider::Callback(WMIDPREQUESTCODE request, void* buffer) {
+  switch (request) {
+    case WMI_ENABLE_EVENTS:
+      return EnableEvents(buffer);
+    case WMI_DISABLE_EVENTS:
+      return DisableEvents();
+    default:
+      return ERROR_INVALID_PARAMETER;
+  }
+  // Not reached.
+}
+
+ULONG WINAPI EtwTraceProvider::ControlCallback(WMIDPREQUESTCODE request,
+    void* context, ULONG *reserved, void* buffer) {
+  EtwTraceProvider *provider = reinterpret_cast<EtwTraceProvider*>(context);
+
+  return provider->Callback(request, buffer);
+}
+
+ULONG EtwTraceProvider::Register() {
+  if (provider_name_ == GUID_NULL)
+    return ERROR_INVALID_NAME;
+
+  return ::RegisterTraceGuids(ControlCallback, this, &provider_name_,
+      1, &obligatory_guid_registration_, NULL, NULL, &registration_handle_);
+}
+
+ULONG EtwTraceProvider::Unregister() {
+  // If a session is active, notify subclasses that it's going away.
+  if (session_handle_ != NULL)
+    DisableEvents();
+
+  ULONG ret = ::UnregisterTraceGuids(registration_handle_);
+
+  registration_handle_ = NULL;
+
+  return ret;
+}
+
+ULONG EtwTraceProvider::Log(const EtwEventClass& event_class,
+    EtwEventType type, EtwEventLevel level, const char *message) {
+  if (NULL == session_handle_ || enable_level_ < level)
+    return ERROR_SUCCESS;  // No one listening.
+
+  EtwMofEvent<1> event(event_class, type, level);
+
+  event.fields[0].DataPtr = reinterpret_cast<ULONG64>(message);
+  event.fields[0].Length = message ?
+      static_cast<ULONG>(sizeof(message[0]) * (1 + strlen(message))) : 0;
+
+  return ::TraceEvent(session_handle_, &event.header);
+}
+
+ULONG EtwTraceProvider::Log(const EtwEventClass& event_class,
+    EtwEventType type, EtwEventLevel level, const wchar_t *message) {
+  if (NULL == session_handle_ || enable_level_ < level)
+    return ERROR_SUCCESS;  // No one listening.
+
+  EtwMofEvent<1> event(event_class, type, level);
+
+  event.fields[0].DataPtr = reinterpret_cast<ULONG64>(message);
+  event.fields[0].Length = message ?
+      static_cast<ULONG>(sizeof(message[0]) * (1 + wcslen(message))) : 0;
+
+  return ::TraceEvent(session_handle_, &event.header);
+}
+
+ULONG EtwTraceProvider::Log(EVENT_TRACE_HEADER* event) {
+  if (enable_level_ < event->Class.Level)
+    return ERROR_SUCCESS;
+
+  return ::TraceEvent(session_handle_, event);
+}
+
+}  // namespace win
+}  // namespace base
diff --git a/base/win/event_trace_provider.h b/base/win/event_trace_provider.h
new file mode 100644
index 0000000..d550dd6
--- /dev/null
+++ b/base/win/event_trace_provider.h
@@ -0,0 +1,184 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Declaration of a Windows event trace provider class, to allow using
+// Windows Event Tracing for logging transport and control.
+#ifndef BASE_WIN_EVENT_TRACE_PROVIDER_H_
+#define BASE_WIN_EVENT_TRACE_PROVIDER_H_
+
+#include <windows.h>
+#include <wmistr.h>
+#include <evntrace.h>
+#include <stddef.h>
+#include <stdint.h>
+
+#include <limits>
+
+#include "base/base_export.h"
+#include "base/macros.h"
+
+namespace base {
+namespace win {
+
+typedef GUID EtwEventClass;
+typedef UCHAR EtwEventType;
+typedef UCHAR EtwEventLevel;
+typedef USHORT EtwEventVersion;
+typedef ULONG EtwEventFlags;
+
+// Base class is a POD for correctness.
+template <size_t N> struct EtwMofEventBase {
+  EVENT_TRACE_HEADER header;
+  MOF_FIELD fields[N];
+};
+
+// Utility class to auto-initialize event trace header structures.
+template <size_t N> class EtwMofEvent: public EtwMofEventBase<N> {
+ public:
+  typedef EtwMofEventBase<N> Super;
+
+  // Clang and the C++ standard don't allow unqualified lookup into dependent
+  // bases, hence these using decls to explicitly pull the names out.
+  using EtwMofEventBase<N>::header;
+  using EtwMofEventBase<N>::fields;
+
+  EtwMofEvent() {
+    memset(static_cast<Super*>(this), 0, sizeof(Super));
+  }
+
+  EtwMofEvent(const EtwEventClass& event_class, EtwEventType type,
+              EtwEventLevel level) {
+    memset(static_cast<Super*>(this), 0, sizeof(Super));
+    header.Size = sizeof(Super);
+    header.Guid = event_class;
+    header.Class.Type = type;
+    header.Class.Level = level;
+    header.Flags = WNODE_FLAG_TRACED_GUID | WNODE_FLAG_USE_MOF_PTR;
+  }
+
+  EtwMofEvent(const EtwEventClass& event_class, EtwEventType type,
+              EtwEventVersion version, EtwEventLevel level) {
+    memset(static_cast<Super*>(this), 0, sizeof(Super));
+    header.Size = sizeof(Super);
+    header.Guid = event_class;
+    header.Class.Type = type;
+    header.Class.Version = version;
+    header.Class.Level = level;
+    header.Flags = WNODE_FLAG_TRACED_GUID | WNODE_FLAG_USE_MOF_PTR;
+  }
+
+  void SetField(size_t field, size_t size, const void* data) {
+    // DCHECK(field < N);
+    if ((field < N) && (size <= std::numeric_limits<uint32_t>::max())) {
+      fields[field].DataPtr = reinterpret_cast<ULONG64>(data);
+      fields[field].Length = static_cast<ULONG>(size);
+    }
+  }
+
+  EVENT_TRACE_HEADER* get() { return& header; }
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(EtwMofEvent);
+};
+
+// Trace provider with Event Tracing for Windows. The trace provider
+// registers with ETW by its name which is a GUID. ETW calls back to
+// the object whenever the trace level or enable flags for this provider
+// name changes.
+// Users of this class can test whether logging is currently enabled at
+// a particular trace level, and whether particular enable flags are set,
+// before other resources are consumed to generate and issue the log
+// messages themselves.
+class BASE_EXPORT EtwTraceProvider {
+ public:
+  // Creates an event trace provider identified by provider_name, which
+  // will be the name registered with Event Tracing for Windows (ETW).
+  explicit EtwTraceProvider(const GUID& provider_name);
+
+  // Creates an unnamed event trace provider, the provider must be given
+  // a name before registration.
+  EtwTraceProvider();
+  virtual ~EtwTraceProvider();
+
+  // Registers the trace provider with Event Tracing for Windows.
+  // Note: from this point forward ETW may call the provider's control
+  //    callback. If the provider's name is enabled in some trace session
+  //    already, the callback may occur recursively from this call, so
+  //    call this only when you're ready to handle callbacks.
+  ULONG Register();
+  // Unregisters the trace provider with ETW.
+  ULONG Unregister();
+
+  // Accessors.
+  void set_provider_name(const GUID& provider_name) {
+    provider_name_ = provider_name;
+  }
+  const GUID& provider_name() const { return provider_name_; }
+  TRACEHANDLE registration_handle() const { return registration_handle_; }
+  TRACEHANDLE session_handle() const { return session_handle_; }
+  EtwEventFlags enable_flags() const { return enable_flags_; }
+  EtwEventLevel enable_level() const { return enable_level_; }
+
+  // Returns true iff logging should be performed for "level" and "flags".
+  // Note: flags is treated as a bitmask, and should normally have a single
+  //      bit set, to test whether to log for a particular sub "facility".
+  bool ShouldLog(EtwEventLevel level, EtwEventFlags flags) {
+    return NULL != session_handle_ && level >= enable_level_ &&
+        (0 != (flags & enable_flags_));
+  }
+
+  // Simple wrappers to log Unicode and ANSI strings.
+  // Do nothing if !ShouldLog(level, 0xFFFFFFFF).
+  ULONG Log(const EtwEventClass& event_class, EtwEventType type,
+            EtwEventLevel level, const char *message);
+  ULONG Log(const EtwEventClass& event_class, EtwEventType type,
+            EtwEventLevel level, const wchar_t *message);
+
+  // Log the provided event.
+  ULONG Log(EVENT_TRACE_HEADER* event);
+
+ protected:
+  // Called after events have been enabled, override in subclasses
+  // to set up state or log at the start of a session.
+  // Note: This function may be called ETW's thread and may be racy,
+  //    bring your own locking if needed.
+  virtual void OnEventsEnabled() {}
+
+  // Called just before events are disabled, override in subclasses
+  // to tear down state or log at the end of a session.
+  // Note: This function may be called ETW's thread and may be racy,
+  //    bring your own locking if needed.
+  virtual void OnEventsDisabled() {}
+
+  // Called just after events have been disabled, override in subclasses
+  // to tear down state at the end of a session. At this point it's
+  // to late to log anything to the session.
+  // Note: This function may be called ETW's thread and may be racy,
+  //    bring your own locking if needed.
+  virtual void PostEventsDisabled() {}
+
+ private:
+  ULONG EnableEvents(PVOID buffer);
+  ULONG DisableEvents();
+  ULONG Callback(WMIDPREQUESTCODE request, PVOID buffer);
+  static ULONG WINAPI ControlCallback(WMIDPREQUESTCODE request, PVOID context,
+                                      ULONG *reserved, PVOID buffer);
+
+  GUID provider_name_;
+  TRACEHANDLE registration_handle_;
+  TRACEHANDLE session_handle_;
+  EtwEventFlags enable_flags_;
+  EtwEventLevel enable_level_;
+
+  // We don't use this, but on XP we're obliged to pass one in to
+  // RegisterTraceGuids. Non-const, because that's how the API needs it.
+  static TRACE_GUID_REGISTRATION obligatory_guid_registration_;
+
+  DISALLOW_COPY_AND_ASSIGN(EtwTraceProvider);
+};
+
+}  // namespace win
+}  // namespace base
+
+#endif  // BASE_WIN_EVENT_TRACE_PROVIDER_H_
diff --git a/base/win/event_trace_provider_unittest.cc b/base/win/event_trace_provider_unittest.cc
new file mode 100644
index 0000000..7d57773
--- /dev/null
+++ b/base/win/event_trace_provider_unittest.cc
@@ -0,0 +1,110 @@
+// Copyright (c) 2010 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Unit tests for event trace provider.
+#include "base/win/event_trace_provider.h"
+#include <new>
+#include "testing/gtest/include/gtest/gtest.h"
+#include <initguid.h>  // NOLINT - has to be last
+
+namespace {
+
+using base::win::EtwTraceProvider;
+using base::win::EtwMofEvent;
+
+// {7F0FD37F-FA3C-4cd6-9242-DF60967A2CB2}
+DEFINE_GUID(kTestProvider,
+  0x7f0fd37f, 0xfa3c, 0x4cd6, 0x92, 0x42, 0xdf, 0x60, 0x96, 0x7a, 0x2c, 0xb2);
+
+// {7F0FD37F-FA3C-4cd6-9242-DF60967A2CB2}
+DEFINE_GUID(kTestEventClass,
+  0x7f0fd37f, 0xfa3c, 0x4cd6, 0x92, 0x42, 0xdf, 0x60, 0x96, 0x7a, 0x2c, 0xb2);
+
+}  // namespace
+
+TEST(EtwTraceProviderTest, ToleratesPreCreateInvocations) {
+  // Because the trace provider is used in logging, it's important that
+  // it be possible to use static provider instances without regard to
+  // whether they've been constructed or destructed.
+  // The interface of the class is designed to tolerate this usage.
+  char buf[sizeof(EtwTraceProvider)] = {0};
+  EtwTraceProvider& provider = reinterpret_cast<EtwTraceProvider&>(buf);
+
+  EXPECT_EQ(0u, provider.registration_handle());
+  EXPECT_EQ(0u, provider.session_handle());
+  EXPECT_EQ(0u, provider.enable_flags());
+  EXPECT_EQ(0u, provider.enable_level());
+
+  EXPECT_FALSE(provider.ShouldLog(TRACE_LEVEL_FATAL, 0xfffffff));
+
+  // We expect these not to crash.
+  provider.Log(kTestEventClass, 0, TRACE_LEVEL_FATAL, "foo");
+  provider.Log(kTestEventClass, 0, TRACE_LEVEL_FATAL, L"foo");
+
+  EtwMofEvent<1> dummy(kTestEventClass, 0, TRACE_LEVEL_FATAL);
+  DWORD data = 0;
+  dummy.SetField(0, sizeof(data), &data);
+  provider.Log(dummy.get());
+
+  // Placement-new the provider into our buffer.
+  new (buf) EtwTraceProvider(kTestProvider);
+
+  // Registration is now safe.
+  EXPECT_EQ(static_cast<ULONG>(ERROR_SUCCESS), provider.Register());
+
+  // Destruct the instance, this should unregister it.
+  provider.EtwTraceProvider::~EtwTraceProvider();
+
+  // And post-destruction, all of the above should still be safe.
+  EXPECT_EQ(0u, provider.registration_handle());
+  EXPECT_EQ(0u, provider.session_handle());
+  EXPECT_EQ(0u, provider.enable_flags());
+  EXPECT_EQ(0u, provider.enable_level());
+
+  EXPECT_FALSE(provider.ShouldLog(TRACE_LEVEL_FATAL, 0xfffffff));
+
+  // We expect these not to crash.
+  provider.Log(kTestEventClass, 0, TRACE_LEVEL_FATAL, "foo");
+  provider.Log(kTestEventClass, 0, TRACE_LEVEL_FATAL, L"foo");
+  provider.Log(dummy.get());
+}
+
+TEST(EtwTraceProviderTest, Initialize) {
+  EtwTraceProvider provider(kTestProvider);
+
+  EXPECT_EQ(0u, provider.registration_handle());
+  EXPECT_EQ(0u, provider.session_handle());
+  EXPECT_EQ(0u, provider.enable_flags());
+  EXPECT_EQ(0u, provider.enable_level());
+}
+
+TEST(EtwTraceProviderTest, Register) {
+  EtwTraceProvider provider(kTestProvider);
+
+  ASSERT_EQ(static_cast<ULONG>(ERROR_SUCCESS), provider.Register());
+  EXPECT_NE(0u, provider.registration_handle());
+  ASSERT_EQ(static_cast<ULONG>(ERROR_SUCCESS), provider.Unregister());
+  EXPECT_EQ(0u, provider.registration_handle());
+}
+
+TEST(EtwTraceProviderTest, RegisterWithNoNameFails) {
+  EtwTraceProvider provider;
+
+  EXPECT_TRUE(provider.Register() != ERROR_SUCCESS);
+}
+
+TEST(EtwTraceProviderTest, Enable) {
+  EtwTraceProvider provider(kTestProvider);
+
+  ASSERT_EQ(static_cast<ULONG>(ERROR_SUCCESS), provider.Register());
+  EXPECT_NE(0u, provider.registration_handle());
+
+  // No session so far.
+  EXPECT_EQ(0u, provider.session_handle());
+  EXPECT_EQ(0u, provider.enable_flags());
+  EXPECT_EQ(0u, provider.enable_level());
+
+  ASSERT_EQ(static_cast<ULONG>(ERROR_SUCCESS), provider.Unregister());
+  EXPECT_EQ(0u, provider.registration_handle());
+}
diff --git a/base/win/i18n.cc b/base/win/i18n.cc
new file mode 100644
index 0000000..d7017e3
--- /dev/null
+++ b/base/win/i18n.cc
@@ -0,0 +1,171 @@
+// Copyright (c) 2010 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/win/i18n.h"
+
+#include <windows.h>
+
+#include "base/logging.h"
+#include "base/macros.h"
+
+namespace {
+
+// Keep this enum in sync with kLanguageFunctionNames.
+enum LanguageFunction {
+  SYSTEM_LANGUAGES,
+  USER_LANGUAGES,
+  PROCESS_LANGUAGES,
+  THREAD_LANGUAGES,
+  NUM_FUNCTIONS
+};
+
+const char kSystemLanguagesFunctionName[] = "GetSystemPreferredUILanguages";
+const char kUserLanguagesFunctionName[] = "GetUserPreferredUILanguages";
+const char kProcessLanguagesFunctionName[] = "GetProcessPreferredUILanguages";
+const char kThreadLanguagesFunctionName[] = "GetThreadPreferredUILanguages";
+
+// Keep this array in sync with enum LanguageFunction.
+const char *const kLanguageFunctionNames[] = {
+  &kSystemLanguagesFunctionName[0],
+  &kUserLanguagesFunctionName[0],
+  &kProcessLanguagesFunctionName[0],
+  &kThreadLanguagesFunctionName[0]
+};
+
+static_assert(NUM_FUNCTIONS == arraysize(kLanguageFunctionNames),
+              "LanguageFunction enum and kLanguageFunctionNames array must be "
+              "kept in sync");
+
+// Calls one of the MUI Get*PreferredUILanguages functions, placing the result
+// in |languages|.  |function| identifies the function to call and |flags| is
+// the function-specific flags (callers must not specify MUI_LANGUAGE_ID or
+// MUI_LANGUAGE_NAME).  Returns true if at least one language is placed in
+// |languages|.
+bool GetMUIPreferredUILanguageList(LanguageFunction function, ULONG flags,
+                                   std::vector<wchar_t>* languages) {
+  DCHECK(0 <= function && NUM_FUNCTIONS > function);
+  DCHECK_EQ(0U, (flags & (MUI_LANGUAGE_ID | MUI_LANGUAGE_NAME)));
+  DCHECK(languages);
+
+  HMODULE kernel32 = GetModuleHandle(L"kernel32.dll");
+  if (NULL != kernel32) {
+    typedef BOOL (WINAPI* GetPreferredUILanguages_Fn)(
+        DWORD, PULONG, PZZWSTR, PULONG);
+    GetPreferredUILanguages_Fn get_preferred_ui_languages =
+        reinterpret_cast<GetPreferredUILanguages_Fn>(
+            GetProcAddress(kernel32, kLanguageFunctionNames[function]));
+    if (NULL != get_preferred_ui_languages) {
+      const ULONG call_flags = flags | MUI_LANGUAGE_NAME;
+      ULONG language_count = 0;
+      ULONG buffer_length = 0;
+      if (get_preferred_ui_languages(call_flags, &language_count, NULL,
+                                     &buffer_length) &&
+          0 != buffer_length) {
+        languages->resize(buffer_length);
+        if (get_preferred_ui_languages(call_flags, &language_count,
+                                       &(*languages)[0], &buffer_length) &&
+            0 != language_count) {
+          DCHECK(languages->size() == buffer_length);
+          return true;
+        } else {
+          DPCHECK(0 == language_count)
+              << "Failed getting preferred UI languages.";
+        }
+      } else {
+        DPCHECK(0 == buffer_length)
+            << "Failed getting size of preferred UI languages.";
+      }
+    } else {
+      DVLOG(2) << "MUI not available.";
+    }
+  } else {
+    NOTREACHED() << "kernel32.dll not found.";
+  }
+
+  return false;
+}
+
+bool GetUserDefaultUILanguage(std::wstring* language, std::wstring* region) {
+  DCHECK(language);
+
+  LANGID lang_id = ::GetUserDefaultUILanguage();
+  if (LOCALE_CUSTOM_UI_DEFAULT != lang_id) {
+    const LCID locale_id = MAKELCID(lang_id, SORT_DEFAULT);
+    // max size for LOCALE_SISO639LANGNAME and LOCALE_SISO3166CTRYNAME is 9
+    wchar_t result_buffer[9];
+    int result_length =
+        GetLocaleInfo(locale_id, LOCALE_SISO639LANGNAME, &result_buffer[0],
+                      arraysize(result_buffer));
+    DPCHECK(0 != result_length) << "Failed getting language id";
+    if (1 < result_length) {
+      language->assign(&result_buffer[0], result_length - 1);
+      region->clear();
+      if (SUBLANG_NEUTRAL != SUBLANGID(lang_id)) {
+        result_length =
+            GetLocaleInfo(locale_id, LOCALE_SISO3166CTRYNAME, &result_buffer[0],
+                          arraysize(result_buffer));
+        DPCHECK(0 != result_length) << "Failed getting region id";
+        if (1 < result_length)
+          region->assign(&result_buffer[0], result_length - 1);
+      }
+      return true;
+    }
+  } else {
+    // This is entirely unexpected on pre-Vista, which is the only time we
+    // should try GetUserDefaultUILanguage anyway.
+    NOTREACHED() << "Cannot determine language for a supplemental locale.";
+  }
+  return false;
+}
+
+bool GetPreferredUILanguageList(LanguageFunction function, ULONG flags,
+                                std::vector<std::wstring>* languages) {
+  std::vector<wchar_t> buffer;
+  std::wstring language;
+  std::wstring region;
+
+  if (GetMUIPreferredUILanguageList(function, flags, &buffer)) {
+    std::vector<wchar_t>::const_iterator scan = buffer.begin();
+    language.assign(&*scan);
+    while (!language.empty()) {
+      languages->push_back(language);
+      scan += language.size() + 1;
+      language.assign(&*scan);
+    }
+  } else if (GetUserDefaultUILanguage(&language, &region)) {
+    // Mimic the MUI behavior of putting the neutral version of the lang after
+    // the regional one (e.g., "fr-CA, fr").
+    if (!region.empty())
+      languages->push_back(std::wstring(language)
+                               .append(1, L'-')
+                               .append(region));
+    languages->push_back(language);
+  } else {
+    return false;
+  }
+
+  return true;
+}
+
+}  // namespace
+
+namespace base {
+namespace win {
+namespace i18n {
+
+bool GetUserPreferredUILanguageList(std::vector<std::wstring>* languages) {
+  DCHECK(languages);
+  return GetPreferredUILanguageList(USER_LANGUAGES, 0, languages);
+}
+
+bool GetThreadPreferredUILanguageList(std::vector<std::wstring>* languages) {
+  DCHECK(languages);
+  return GetPreferredUILanguageList(
+      THREAD_LANGUAGES, MUI_MERGE_SYSTEM_FALLBACK | MUI_MERGE_USER_FALLBACK,
+      languages);
+}
+
+}  // namespace i18n
+}  // namespace win
+}  // namespace base
diff --git a/base/win/i18n.h b/base/win/i18n.h
new file mode 100644
index 0000000..9e74d3f
--- /dev/null
+++ b/base/win/i18n.h
@@ -0,0 +1,33 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_WIN_I18N_H_
+#define BASE_WIN_I18N_H_
+
+#include <string>
+#include <vector>
+
+#include "base/base_export.h"
+
+namespace base {
+namespace win {
+namespace i18n {
+
+// Adds to |languages| the list of user preferred UI languages from MUI, if
+// available, falling-back on the user default UI language otherwise.  Returns
+// true if at least one language is added.
+BASE_EXPORT bool GetUserPreferredUILanguageList(
+    std::vector<std::wstring>* languages);
+
+// Adds to |languages| the list of thread, process, user, and system preferred
+// UI languages from MUI, if available, falling-back on the user default UI
+// language otherwise.  Returns true if at least one language is added.
+BASE_EXPORT bool GetThreadPreferredUILanguageList(
+    std::vector<std::wstring>* languages);
+
+}  // namespace i18n
+}  // namespace win
+}  // namespace base
+
+#endif  // BASE_WIN_I18N_H_
diff --git a/base/win/i18n_unittest.cc b/base/win/i18n_unittest.cc
new file mode 100644
index 0000000..9af6dbf
--- /dev/null
+++ b/base/win/i18n_unittest.cc
@@ -0,0 +1,44 @@
+// Copyright (c) 2010 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file contains unit tests for Windows internationalization funcs.
+
+#include "testing/gtest/include/gtest/gtest.h"
+
+#include <stddef.h>
+
+#include "base/win/i18n.h"
+#include "base/win/windows_version.h"
+
+namespace base {
+namespace win {
+namespace i18n {
+
+// Tests that at least one user preferred UI language can be obtained.
+TEST(I18NTest, GetUserPreferredUILanguageList) {
+  std::vector<std::wstring> languages;
+  EXPECT_TRUE(GetUserPreferredUILanguageList(&languages));
+  EXPECT_NE(static_cast<std::vector<std::wstring>::size_type>(0),
+            languages.size());
+  for (std::vector<std::wstring>::const_iterator scan = languages.begin(),
+          end = languages.end(); scan != end; ++scan) {
+    EXPECT_FALSE((*scan).empty());
+  }
+}
+
+// Tests that at least one thread preferred UI language can be obtained.
+TEST(I18NTest, GetThreadPreferredUILanguageList) {
+  std::vector<std::wstring> languages;
+  EXPECT_TRUE(GetThreadPreferredUILanguageList(&languages));
+  EXPECT_NE(static_cast<std::vector<std::wstring>::size_type>(0),
+            languages.size());
+  for (std::vector<std::wstring>::const_iterator scan = languages.begin(),
+          end = languages.end(); scan != end; ++scan) {
+    EXPECT_FALSE((*scan).empty());
+  }
+}
+
+}  // namespace i18n
+}  // namespace win
+}  // namespace base
diff --git a/base/win/iat_patch_function.cc b/base/win/iat_patch_function.cc
new file mode 100644
index 0000000..3cc747b
--- /dev/null
+++ b/base/win/iat_patch_function.cc
@@ -0,0 +1,256 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/win/iat_patch_function.h"
+
+#include "base/logging.h"
+#include "base/win/patch_util.h"
+#include "base/win/pe_image.h"
+
+namespace base {
+namespace win {
+
+namespace {
+
+struct InterceptFunctionInformation {
+  bool finished_operation;
+  const char* imported_from_module;
+  const char* function_name;
+  void* new_function;
+  void** old_function;
+  IMAGE_THUNK_DATA** iat_thunk;
+  DWORD return_code;
+};
+
+void* GetIATFunction(IMAGE_THUNK_DATA* iat_thunk) {
+  if (NULL == iat_thunk) {
+    NOTREACHED();
+    return NULL;
+  }
+
+  // Works around the 64 bit portability warning:
+  // The Function member inside IMAGE_THUNK_DATA is really a pointer
+  // to the IAT function. IMAGE_THUNK_DATA correctly maps to IMAGE_THUNK_DATA32
+  // or IMAGE_THUNK_DATA64 for correct pointer size.
+  union FunctionThunk {
+    IMAGE_THUNK_DATA thunk;
+    void* pointer;
+  } iat_function;
+
+  iat_function.thunk = *iat_thunk;
+  return iat_function.pointer;
+}
+
+bool InterceptEnumCallback(const base::win::PEImage& image, const char* module,
+                           DWORD ordinal, const char* name, DWORD hint,
+                           IMAGE_THUNK_DATA* iat, void* cookie) {
+  InterceptFunctionInformation* intercept_information =
+    reinterpret_cast<InterceptFunctionInformation*>(cookie);
+
+  if (NULL == intercept_information) {
+    NOTREACHED();
+    return false;
+  }
+
+  DCHECK(module);
+
+  if ((0 == lstrcmpiA(module, intercept_information->imported_from_module)) &&
+     (NULL != name) &&
+     (0 == lstrcmpiA(name, intercept_information->function_name))) {
+    // Save the old pointer.
+    if (NULL != intercept_information->old_function) {
+      *(intercept_information->old_function) = GetIATFunction(iat);
+    }
+
+    if (NULL != intercept_information->iat_thunk) {
+      *(intercept_information->iat_thunk) = iat;
+    }
+
+    // portability check
+    static_assert(
+        sizeof(iat->u1.Function) == sizeof(intercept_information->new_function),
+        "unknown IAT thunk format");
+
+    // Patch the function.
+    intercept_information->return_code = internal::ModifyCode(
+        &(iat->u1.Function), &(intercept_information->new_function),
+        sizeof(intercept_information->new_function));
+
+    // Terminate further enumeration.
+    intercept_information->finished_operation = true;
+    return false;
+  }
+
+  return true;
+}
+
+// Helper to intercept a function in an import table of a specific
+// module.
+//
+// Arguments:
+// module_handle          Module to be intercepted
+// imported_from_module   Module that exports the symbol
+// function_name          Name of the API to be intercepted
+// new_function           Interceptor function
+// old_function           Receives the original function pointer
+// iat_thunk              Receives pointer to IAT_THUNK_DATA
+//                        for the API from the import table.
+//
+// Returns: Returns NO_ERROR on success or Windows error code
+//          as defined in winerror.h
+DWORD InterceptImportedFunction(HMODULE module_handle,
+                                const char* imported_from_module,
+                                const char* function_name, void* new_function,
+                                void** old_function,
+                                IMAGE_THUNK_DATA** iat_thunk) {
+  if ((NULL == module_handle) || (NULL == imported_from_module) ||
+     (NULL == function_name) || (NULL == new_function)) {
+    NOTREACHED();
+    return ERROR_INVALID_PARAMETER;
+  }
+
+  base::win::PEImage target_image(module_handle);
+  if (!target_image.VerifyMagic()) {
+    NOTREACHED();
+    return ERROR_INVALID_PARAMETER;
+  }
+
+  InterceptFunctionInformation intercept_information = {
+    false,
+    imported_from_module,
+    function_name,
+    new_function,
+    old_function,
+    iat_thunk,
+    ERROR_GEN_FAILURE};
+
+  // First go through the IAT. If we don't find the import we are looking
+  // for in IAT, search delay import table.
+  target_image.EnumAllImports(InterceptEnumCallback, &intercept_information);
+  if (!intercept_information.finished_operation) {
+    target_image.EnumAllDelayImports(InterceptEnumCallback,
+                                     &intercept_information);
+  }
+
+  return intercept_information.return_code;
+}
+
+// Restore intercepted IAT entry with the original function.
+//
+// Arguments:
+// intercept_function     Interceptor function
+// original_function      Receives the original function pointer
+//
+// Returns: Returns NO_ERROR on success or Windows error code
+//          as defined in winerror.h
+DWORD RestoreImportedFunction(void* intercept_function,
+                              void* original_function,
+                              IMAGE_THUNK_DATA* iat_thunk) {
+  if ((NULL == intercept_function) || (NULL == original_function) ||
+      (NULL == iat_thunk)) {
+    NOTREACHED();
+    return ERROR_INVALID_PARAMETER;
+  }
+
+  if (GetIATFunction(iat_thunk) != intercept_function) {
+    // Check if someone else has intercepted on top of us.
+    // We cannot unpatch in this case, just raise a red flag.
+    NOTREACHED();
+    return ERROR_INVALID_FUNCTION;
+  }
+
+  return internal::ModifyCode(&(iat_thunk->u1.Function), &original_function,
+                              sizeof(original_function));
+}
+
+}  // namespace
+
+IATPatchFunction::IATPatchFunction()
+    : module_handle_(NULL),
+      intercept_function_(NULL),
+      original_function_(NULL),
+      iat_thunk_(NULL) {
+}
+
+IATPatchFunction::~IATPatchFunction() {
+  if (NULL != intercept_function_) {
+    DWORD error = Unpatch();
+    DCHECK_EQ(static_cast<DWORD>(NO_ERROR), error);
+  }
+}
+
+DWORD IATPatchFunction::Patch(const wchar_t* module,
+                              const char* imported_from_module,
+                              const char* function_name,
+                              void* new_function) {
+  HMODULE module_handle = LoadLibraryW(module);
+  if (module_handle == NULL) {
+    NOTREACHED();
+    return GetLastError();
+  }
+
+  DWORD error = PatchFromModule(module_handle, imported_from_module,
+                                function_name, new_function);
+  if (NO_ERROR == error) {
+    module_handle_ = module_handle;
+  } else {
+    FreeLibrary(module_handle);
+  }
+
+  return error;
+}
+
+DWORD IATPatchFunction::PatchFromModule(HMODULE module,
+                                        const char* imported_from_module,
+                                        const char* function_name,
+                                        void* new_function) {
+  DCHECK_EQ(static_cast<void*>(NULL), original_function_);
+  DCHECK_EQ(static_cast<IMAGE_THUNK_DATA*>(NULL), iat_thunk_);
+  DCHECK_EQ(static_cast<void*>(NULL), intercept_function_);
+  DCHECK(module);
+
+  DWORD error = InterceptImportedFunction(module,
+                                          imported_from_module,
+                                          function_name,
+                                          new_function,
+                                          &original_function_,
+                                          &iat_thunk_);
+
+  if (NO_ERROR == error) {
+    DCHECK_NE(original_function_, intercept_function_);
+    intercept_function_ = new_function;
+  }
+
+  return error;
+}
+
+DWORD IATPatchFunction::Unpatch() {
+  DWORD error = RestoreImportedFunction(intercept_function_,
+                                        original_function_,
+                                        iat_thunk_);
+  DCHECK_EQ(static_cast<DWORD>(NO_ERROR), error);
+
+  // Hands off the intercept if we fail to unpatch.
+  // If IATPatchFunction::Unpatch fails during RestoreImportedFunction
+  // it means that we cannot safely unpatch the import address table
+  // patch. In this case its better to be hands off the intercept as
+  // trying to unpatch again in the destructor of IATPatchFunction is
+  // not going to be any safer
+  if (module_handle_)
+    FreeLibrary(module_handle_);
+  module_handle_ = NULL;
+  intercept_function_ = NULL;
+  original_function_ = NULL;
+  iat_thunk_ = NULL;
+
+  return error;
+}
+
+void* IATPatchFunction::original_function() const {
+  DCHECK(is_patched());
+  return original_function_;
+}
+
+}  // namespace win
+}  // namespace base
diff --git a/base/win/iat_patch_function.h b/base/win/iat_patch_function.h
new file mode 100644
index 0000000..86ad295
--- /dev/null
+++ b/base/win/iat_patch_function.h
@@ -0,0 +1,81 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_WIN_IAT_PATCH_FUNCTION_H_
+#define BASE_WIN_IAT_PATCH_FUNCTION_H_
+
+#include <windows.h>
+
+#include "base/base_export.h"
+#include "base/macros.h"
+
+namespace base {
+namespace win {
+
+// A class that encapsulates Import Address Table patching helpers and restores
+// the original function in the destructor.
+//
+// It will intercept functions for a specific DLL imported from another DLL.
+// This is the case when, for example, we want to intercept
+// CertDuplicateCertificateContext function (exported from crypt32.dll) called
+// by wininet.dll.
+class BASE_EXPORT IATPatchFunction {
+ public:
+  IATPatchFunction();
+  ~IATPatchFunction();
+
+  // Intercept a function in an import table of a specific
+  // module. Save the original function and the import
+  // table address. These values will be used later
+  // during Unpatch
+  //
+  // Arguments:
+  // module                 Module to be intercepted
+  // imported_from_module   Module that exports the 'function_name'
+  // function_name          Name of the API to be intercepted
+  //
+  // Returns: Windows error code (winerror.h). NO_ERROR if successful
+  //
+  // Note: Patching a function will make the IAT patch take some "ownership" on
+  // |module|.  It will LoadLibrary(module) to keep the DLL alive until a call
+  // to Unpatch(), which will call FreeLibrary() and allow the module to be
+  // unloaded.  The idea is to help prevent the DLL from going away while a
+  // patch is still active.
+  DWORD Patch(const wchar_t* module,
+              const char* imported_from_module,
+              const char* function_name,
+              void* new_function);
+
+  // Same as Patch(), but uses a handle to a |module| instead of the DLL name.
+  DWORD PatchFromModule(HMODULE module,
+                        const char* imported_from_module,
+                        const char* function_name,
+                        void* new_function);
+
+  // Unpatch the IAT entry using internally saved original
+  // function.
+  //
+  // Returns: Windows error code (winerror.h). NO_ERROR if successful
+  DWORD Unpatch();
+
+  bool is_patched() const {
+    return (NULL != intercept_function_);
+  }
+
+  void* original_function() const;
+
+
+ private:
+  HMODULE module_handle_;
+  void* intercept_function_;
+  void* original_function_;
+  IMAGE_THUNK_DATA* iat_thunk_;
+
+  DISALLOW_COPY_AND_ASSIGN(IATPatchFunction);
+};
+
+}  // namespace win
+}  // namespace base
+
+#endif  // BASE_WIN_IAT_PATCH_FUNCTION_H_
diff --git a/base/win/iunknown_impl.cc b/base/win/iunknown_impl.cc
new file mode 100644
index 0000000..2a88439
--- /dev/null
+++ b/base/win/iunknown_impl.cc
@@ -0,0 +1,42 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/win/iunknown_impl.h"
+
+namespace base {
+namespace win {
+
+IUnknownImpl::IUnknownImpl()
+    : ref_count_(0) {
+}
+
+IUnknownImpl::~IUnknownImpl() {
+}
+
+ULONG STDMETHODCALLTYPE IUnknownImpl::AddRef() {
+  ref_count_.Increment();
+  return 1;
+}
+
+ULONG STDMETHODCALLTYPE IUnknownImpl::Release() {
+  if (!ref_count_.Decrement()) {
+    delete this;
+    return 0;
+  }
+  return 1;
+}
+
+STDMETHODIMP IUnknownImpl::QueryInterface(REFIID riid, void** ppv) {
+  if (riid == IID_IUnknown) {
+    *ppv = static_cast<IUnknown*>(this);
+    AddRef();
+    return S_OK;
+  }
+
+  *ppv = NULL;
+  return E_NOINTERFACE;
+}
+
+}  // namespace win
+}  // namespace base
diff --git a/base/win/iunknown_impl.h b/base/win/iunknown_impl.h
new file mode 100644
index 0000000..b7de205
--- /dev/null
+++ b/base/win/iunknown_impl.h
@@ -0,0 +1,38 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_WIN_IUNKNOWN_IMPL_H_
+#define BASE_WIN_IUNKNOWN_IMPL_H_
+
+#include <unknwn.h>
+
+#include "base/atomic_ref_count.h"
+#include "base/base_export.h"
+#include "base/compiler_specific.h"
+
+namespace base {
+namespace win {
+
+// IUnknown implementation for other classes to derive from.
+class BASE_EXPORT IUnknownImpl : public IUnknown {
+ public:
+  IUnknownImpl();
+
+  ULONG STDMETHODCALLTYPE AddRef() override;
+  ULONG STDMETHODCALLTYPE Release() override;
+
+  // Subclasses should extend this to return any interfaces they provide.
+  STDMETHODIMP QueryInterface(REFIID riid, void** ppv) override;
+
+ protected:
+  virtual ~IUnknownImpl();
+
+ private:
+  AtomicRefCount ref_count_;
+};
+
+}  // namespace win
+}  // namespace base
+
+#endif  // BASE_WIN_IUNKNOWN_IMPL_H_
diff --git a/base/win/iunknown_impl_unittest.cc b/base/win/iunknown_impl_unittest.cc
new file mode 100644
index 0000000..c6c3539
--- /dev/null
+++ b/base/win/iunknown_impl_unittest.cc
@@ -0,0 +1,49 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/win/iunknown_impl.h"
+
+#include "base/win/scoped_com_initializer.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+namespace win {
+
+class TestIUnknownImplSubclass : public IUnknownImpl {
+ public:
+  TestIUnknownImplSubclass() {
+    ++instance_count;
+  }
+  ~TestIUnknownImplSubclass() override { --instance_count; }
+  static int instance_count;
+};
+
+// static
+int TestIUnknownImplSubclass::instance_count = 0;
+
+TEST(IUnknownImplTest, IUnknownImpl) {
+  ScopedCOMInitializer com_initializer;
+
+  EXPECT_EQ(0, TestIUnknownImplSubclass::instance_count);
+  IUnknown* u = new TestIUnknownImplSubclass();
+
+  EXPECT_EQ(1, TestIUnknownImplSubclass::instance_count);
+
+  EXPECT_EQ(1u, u->AddRef());
+  EXPECT_EQ(1u, u->AddRef());
+
+  IUnknown* other = NULL;
+  EXPECT_EQ(E_NOINTERFACE, u->QueryInterface(
+      IID_IDispatch, reinterpret_cast<void**>(&other)));
+  EXPECT_EQ(S_OK, u->QueryInterface(
+      IID_IUnknown, reinterpret_cast<void**>(&other)));
+  other->Release();
+
+  EXPECT_EQ(1u, u->Release());
+  EXPECT_EQ(0u, u->Release());
+  EXPECT_EQ(0, TestIUnknownImplSubclass::instance_count);
+}
+
+}  // namespace win
+}  // namespace base
diff --git a/base/win/message_window.cc b/base/win/message_window.cc
new file mode 100644
index 0000000..8858b41
--- /dev/null
+++ b/base/win/message_window.cc
@@ -0,0 +1,165 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/win/message_window.h"
+
+#include "base/lazy_instance.h"
+#include "base/logging.h"
+#include "base/macros.h"
+#include "base/win/current_module.h"
+#include "base/win/wrapped_window_proc.h"
+
+const wchar_t kMessageWindowClassName[] = L"Chrome_MessageWindow";
+
+namespace base {
+namespace win {
+
+// Used along with LazyInstance to register a window class for message-only
+// windows created by MessageWindow.
+class MessageWindow::WindowClass {
+ public:
+  WindowClass();
+  ~WindowClass();
+
+  ATOM atom() { return atom_; }
+  HINSTANCE instance() { return instance_; }
+
+ private:
+  ATOM atom_;
+  HINSTANCE instance_;
+
+  DISALLOW_COPY_AND_ASSIGN(WindowClass);
+};
+
+static LazyInstance<MessageWindow::WindowClass>::DestructorAtExit
+    g_window_class = LAZY_INSTANCE_INITIALIZER;
+
+MessageWindow::WindowClass::WindowClass()
+    : atom_(0), instance_(CURRENT_MODULE()) {
+  WNDCLASSEX window_class;
+  window_class.cbSize = sizeof(window_class);
+  window_class.style = 0;
+  window_class.lpfnWndProc = &base::win::WrappedWindowProc<WindowProc>;
+  window_class.cbClsExtra = 0;
+  window_class.cbWndExtra = 0;
+  window_class.hInstance = instance_;
+  window_class.hIcon = NULL;
+  window_class.hCursor = NULL;
+  window_class.hbrBackground = NULL;
+  window_class.lpszMenuName = NULL;
+  window_class.lpszClassName = kMessageWindowClassName;
+  window_class.hIconSm = NULL;
+  atom_ = RegisterClassEx(&window_class);
+  if (atom_ == 0) {
+    PLOG(ERROR)
+        << "Failed to register the window class for a message-only window";
+  }
+}
+
+MessageWindow::WindowClass::~WindowClass() {
+  if (atom_ != 0) {
+    BOOL result = UnregisterClass(MAKEINTATOM(atom_), instance_);
+    // Hitting this DCHECK usually means that some MessageWindow objects were
+    // leaked. For example not calling
+    // ui::Clipboard::DestroyClipboardForCurrentThread() results in a leaked
+    // MessageWindow.
+    DCHECK(result);
+  }
+}
+
+MessageWindow::MessageWindow()
+    : window_(NULL) {
+}
+
+MessageWindow::~MessageWindow() {
+  DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
+
+  if (window_ != NULL) {
+    BOOL result = DestroyWindow(window_);
+    DCHECK(result);
+  }
+}
+
+bool MessageWindow::Create(MessageCallback message_callback) {
+  return DoCreate(std::move(message_callback), NULL);
+}
+
+bool MessageWindow::CreateNamed(MessageCallback message_callback,
+                                const string16& window_name) {
+  return DoCreate(std::move(message_callback), window_name.c_str());
+}
+
+// static
+HWND MessageWindow::FindWindow(const string16& window_name) {
+  return FindWindowEx(HWND_MESSAGE, NULL, kMessageWindowClassName,
+                      window_name.c_str());
+}
+
+bool MessageWindow::DoCreate(MessageCallback message_callback,
+                             const wchar_t* window_name) {
+  DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
+  DCHECK(message_callback_.is_null());
+  DCHECK(!window_);
+
+  message_callback_ = std::move(message_callback);
+
+  WindowClass& window_class = g_window_class.Get();
+  window_ = CreateWindow(MAKEINTATOM(window_class.atom()), window_name, 0, 0, 0,
+                         0, 0, HWND_MESSAGE, 0, window_class.instance(), this);
+  if (!window_) {
+    PLOG(ERROR) << "Failed to create a message-only window";
+    return false;
+  }
+
+  return true;
+}
+
+// static
+LRESULT CALLBACK MessageWindow::WindowProc(HWND hwnd,
+                                           UINT message,
+                                           WPARAM wparam,
+                                           LPARAM lparam) {
+  MessageWindow* self = reinterpret_cast<MessageWindow*>(
+      GetWindowLongPtr(hwnd, GWLP_USERDATA));
+
+  switch (message) {
+    // Set up the self before handling WM_CREATE.
+    case WM_CREATE: {
+      CREATESTRUCT* cs = reinterpret_cast<CREATESTRUCT*>(lparam);
+      self = reinterpret_cast<MessageWindow*>(cs->lpCreateParams);
+
+      // Make |hwnd| available to the message handler. At this point the control
+      // hasn't returned from CreateWindow() yet.
+      self->window_ = hwnd;
+
+      // Store pointer to the self to the window's user data.
+      SetLastError(ERROR_SUCCESS);
+      LONG_PTR result = SetWindowLongPtr(
+          hwnd, GWLP_USERDATA, reinterpret_cast<LONG_PTR>(self));
+      CHECK(result != 0 || GetLastError() == ERROR_SUCCESS);
+      break;
+    }
+
+    // Clear the pointer to stop calling the self once WM_DESTROY is
+    // received.
+    case WM_DESTROY: {
+      SetLastError(ERROR_SUCCESS);
+      LONG_PTR result = SetWindowLongPtr(hwnd, GWLP_USERDATA, NULL);
+      CHECK(result != 0 || GetLastError() == ERROR_SUCCESS);
+      break;
+    }
+  }
+
+  // Handle the message.
+  if (self) {
+    LRESULT message_result;
+    if (self->message_callback_.Run(message, wparam, lparam, &message_result))
+      return message_result;
+  }
+
+  return DefWindowProc(hwnd, message, wparam, lparam);
+}
+
+}  // namespace win
+}  // namespace base
diff --git a/base/win/message_window.h b/base/win/message_window.h
new file mode 100644
index 0000000..2fef480
--- /dev/null
+++ b/base/win/message_window.h
@@ -0,0 +1,74 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_WIN_MESSAGE_WINDOW_H_
+#define BASE_WIN_MESSAGE_WINDOW_H_
+
+#include <windows.h>
+
+#include "base/base_export.h"
+#include "base/callback.h"
+#include "base/compiler_specific.h"
+#include "base/macros.h"
+#include "base/strings/string16.h"
+#include "base/threading/thread_checker.h"
+
+namespace base {
+namespace win {
+
+// Implements a message-only window.
+class BASE_EXPORT MessageWindow {
+ public:
+  // Used to register a process-wide message window class.
+  class WindowClass;
+
+  // Implement this callback to handle messages received by the message window.
+  // If the callback returns |false|, the first four parameters are passed to
+  // DefWindowProc(). Otherwise, |*result| is returned by the window procedure.
+  using MessageCallback = base::RepeatingCallback<
+      bool(UINT message, WPARAM wparam, LPARAM lparam, LRESULT* result)>;
+
+  MessageWindow();
+  ~MessageWindow();
+
+  // Creates a message-only window. The incoming messages will be passed by
+  // |message_callback|. |message_callback| must outlive |this|.
+  bool Create(MessageCallback message_callback);
+
+  // Same as Create() but assigns the name to the created window.
+  bool CreateNamed(MessageCallback message_callback,
+                   const string16& window_name);
+
+  HWND hwnd() const { return window_; }
+
+  // Retrieves a handle of the first message-only window with matching
+  // |window_name|.
+  static HWND FindWindow(const string16& window_name);
+
+ private:
+  // Give |WindowClass| access to WindowProc().
+  friend class WindowClass;
+
+  // Contains the actual window creation code.
+  bool DoCreate(MessageCallback message_callback, const wchar_t* window_name);
+
+  // Invoked by the OS to process incoming window messages.
+  static LRESULT CALLBACK WindowProc(HWND hwnd, UINT message, WPARAM wparam,
+                                     LPARAM lparam);
+
+  // Invoked to handle messages received by the window.
+  MessageCallback message_callback_;
+
+  // Handle of the input window.
+  HWND window_;
+
+  THREAD_CHECKER(thread_checker_);
+
+  DISALLOW_COPY_AND_ASSIGN(MessageWindow);
+};
+
+}  // namespace win
+}  // namespace base
+
+#endif  // BASE_WIN_MESSAGE_WINDOW_H_
diff --git a/base/win/message_window_unittest.cc b/base/win/message_window_unittest.cc
new file mode 100644
index 0000000..00248bf
--- /dev/null
+++ b/base/win/message_window_unittest.cc
@@ -0,0 +1,61 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/bind.h"
+#include "base/guid.h"
+#include "base/strings/utf_string_conversions.h"
+#include "base/win/message_window.h"
+#include "testing/gmock/include/gmock/gmock.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+
+namespace {
+
+bool HandleMessage(
+    UINT message, WPARAM wparam, LPARAM lparam, LRESULT* result) {
+  // Return |wparam| as the result of WM_USER message.
+  if (message == WM_USER) {
+    *result = wparam;
+    return true;
+  }
+
+  return false;
+}
+
+}  // namespace
+
+// Checks that a window can be created.
+TEST(MessageWindowTest, Create) {
+  win::MessageWindow window;
+  EXPECT_TRUE(window.Create(base::Bind(&HandleMessage)));
+}
+
+// Checks that a named window can be created.
+TEST(MessageWindowTest, CreateNamed) {
+  win::MessageWindow window;
+  EXPECT_TRUE(window.CreateNamed(base::Bind(&HandleMessage),
+              UTF8ToUTF16("test_message_window")));
+}
+
+// Verifies that the created window can receive messages.
+TEST(MessageWindowTest, SendMessage) {
+  win::MessageWindow window;
+  EXPECT_TRUE(window.Create(base::Bind(&HandleMessage)));
+
+  EXPECT_EQ(SendMessage(window.hwnd(), WM_USER, 100, 0), 100);
+}
+
+// Verifies that a named window can be found by name.
+TEST(MessageWindowTest, FindWindow) {
+  string16 name = UTF8ToUTF16(base::GenerateGUID());
+  win::MessageWindow window;
+  EXPECT_TRUE(window.CreateNamed(base::Bind(&HandleMessage), name));
+
+  HWND hwnd = win::MessageWindow::FindWindow(name);
+  EXPECT_TRUE(hwnd != NULL);
+  EXPECT_EQ(SendMessage(hwnd, WM_USER, 200, 0), 200);
+}
+
+}  // namespace base
diff --git a/base/win/object_watcher.cc b/base/win/object_watcher.cc
new file mode 100644
index 0000000..4c1c235
--- /dev/null
+++ b/base/win/object_watcher.cc
@@ -0,0 +1,123 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/win/object_watcher.h"
+
+#include "base/bind.h"
+#include "base/logging.h"
+#include "base/threading/sequenced_task_runner_handle.h"
+
+#include <windows.h>
+
+namespace base {
+namespace win {
+
+//-----------------------------------------------------------------------------
+
+ObjectWatcher::ObjectWatcher() : weak_factory_(this) {}
+
+ObjectWatcher::~ObjectWatcher() {
+  StopWatching();
+}
+
+bool ObjectWatcher::StartWatchingOnce(HANDLE object, Delegate* delegate) {
+  return StartWatchingInternal(object, delegate, true);
+}
+
+bool ObjectWatcher::StartWatchingMultipleTimes(HANDLE object,
+                                               Delegate* delegate) {
+  return StartWatchingInternal(object, delegate, false);
+}
+
+bool ObjectWatcher::StopWatching() {
+  if (!wait_object_)
+    return false;
+
+  // Make sure ObjectWatcher is used in a sequenced fashion.
+  DCHECK(task_runner_->RunsTasksInCurrentSequence());
+
+  // Blocking call to cancel the wait. Any callbacks already in progress will
+  // finish before we return from this call.
+  if (!UnregisterWaitEx(wait_object_, INVALID_HANDLE_VALUE)) {
+    DPLOG(FATAL) << "UnregisterWaitEx failed";
+    return false;
+  }
+
+  Reset();
+  return true;
+}
+
+bool ObjectWatcher::IsWatching() const {
+  return object_ != nullptr;
+}
+
+HANDLE ObjectWatcher::GetWatchedObject() const {
+  return object_;
+}
+
+// static
+void CALLBACK ObjectWatcher::DoneWaiting(void* param, BOOLEAN timed_out) {
+  DCHECK(!timed_out);
+
+  // The destructor blocks on any callbacks that are in flight, so we know that
+  // that is always a pointer to a valid ObjectWater.
+  ObjectWatcher* that = static_cast<ObjectWatcher*>(param);
+  that->task_runner_->PostTask(FROM_HERE, that->callback_);
+  if (that->run_once_)
+    that->callback_.Reset();
+}
+
+bool ObjectWatcher::StartWatchingInternal(HANDLE object, Delegate* delegate,
+                                          bool execute_only_once) {
+  DCHECK(delegate);
+  DCHECK(!wait_object_) << "Already watching an object";
+  DCHECK(SequencedTaskRunnerHandle::IsSet());
+
+  task_runner_ = SequencedTaskRunnerHandle::Get();
+
+  run_once_ = execute_only_once;
+
+  // Since our job is to just notice when an object is signaled and report the
+  // result back to this sequence, we can just run on a Windows wait thread.
+  DWORD wait_flags = WT_EXECUTEINWAITTHREAD;
+  if (run_once_)
+    wait_flags |= WT_EXECUTEONLYONCE;
+
+  // DoneWaiting can be synchronously called from RegisterWaitForSingleObject,
+  // so set up all state now.
+  callback_ =
+      Bind(&ObjectWatcher::Signal, weak_factory_.GetWeakPtr(), delegate);
+  object_ = object;
+
+  if (!RegisterWaitForSingleObject(&wait_object_, object, DoneWaiting,
+                                   this, INFINITE, wait_flags)) {
+    DPLOG(FATAL) << "RegisterWaitForSingleObject failed";
+    Reset();
+    return false;
+  }
+
+  return true;
+}
+
+void ObjectWatcher::Signal(Delegate* delegate) {
+  // Signaling the delegate may result in our destruction or a nested call to
+  // StartWatching(). As a result, we save any state we need and clear previous
+  // watcher state before signaling the delegate.
+  HANDLE object = object_;
+  if (run_once_)
+    StopWatching();
+  delegate->OnObjectSignaled(object);
+}
+
+void ObjectWatcher::Reset() {
+  callback_.Reset();
+  object_ = nullptr;
+  wait_object_ = nullptr;
+  task_runner_ = nullptr;
+  run_once_ = true;
+  weak_factory_.InvalidateWeakPtrs();
+}
+
+}  // namespace win
+}  // namespace base
diff --git a/base/win/object_watcher.h b/base/win/object_watcher.h
new file mode 100644
index 0000000..b7ed76d
--- /dev/null
+++ b/base/win/object_watcher.h
@@ -0,0 +1,131 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_WIN_OBJECT_WATCHER_H_
+#define BASE_WIN_OBJECT_WATCHER_H_
+
+#include "base/win/windows_types.h"
+
+#include "base/base_export.h"
+#include "base/callback.h"
+#include "base/macros.h"
+#include "base/memory/ref_counted.h"
+#include "base/memory/weak_ptr.h"
+#include "base/sequenced_task_runner.h"
+
+namespace base {
+namespace win {
+
+// A class that provides a means to asynchronously wait for a Windows object to
+// become signaled.  It is an abstraction around RegisterWaitForSingleObject
+// that provides a notification callback, OnObjectSignaled, that runs back on
+// the origin sequence (i.e., the sequence that called StartWatching).
+//
+// This class acts like a smart pointer such that when it goes out-of-scope,
+// UnregisterWaitEx is automatically called, and any in-flight notification is
+// suppressed.
+//
+// The waiting handle MUST NOT be closed while watching is in progress. If this
+// handle is closed while the wait is still pending, the behavior is undefined
+// (see MSDN:RegisterWaitForSingleObject).
+//
+// Typical usage:
+//
+//   class MyClass : public base::win::ObjectWatcher::Delegate {
+//    public:
+//     void DoStuffWhenSignaled(HANDLE object) {
+//       watcher_.StartWatchingOnce(object, this);
+//     }
+//     void OnObjectSignaled(HANDLE object) override {
+//       // OK, time to do stuff!
+//     }
+//    private:
+//     base::win::ObjectWatcher watcher_;
+//   };
+//
+// In the above example, MyClass wants to "do stuff" when object becomes
+// signaled.  ObjectWatcher makes this task easy.  When MyClass goes out of
+// scope, the watcher_ will be destroyed, and there is no need to worry about
+// OnObjectSignaled being called on a deleted MyClass pointer.  Easy!
+// If the object is already signaled before being watched, OnObjectSignaled is
+// still called after (but not necessarily immediately after) watch is started.
+//
+// NOTE: Except for the constructor, all public methods of this class must be
+// called in sequence, in a scope where SequencedTaskRunnerHandle::IsSet().
+class BASE_EXPORT ObjectWatcher {
+ public:
+  class BASE_EXPORT Delegate {
+   public:
+    virtual ~Delegate() {}
+    // Called from the sequence that started the watch when a signaled object is
+    // detected. To continue watching the object, StartWatching must be called
+    // again.
+    virtual void OnObjectSignaled(HANDLE object) = 0;
+  };
+
+  ObjectWatcher();
+  ~ObjectWatcher();
+
+  // When the object is signaled, the given delegate is notified on the sequence
+  // where StartWatchingOnce is called. The ObjectWatcher is not responsible for
+  // deleting the delegate.
+  // Returns whether watching was successfully initiated.
+  bool StartWatchingOnce(HANDLE object, Delegate* delegate);
+
+  // Notifies the delegate, on the sequence where this method is called, each
+  // time the object is set. By definition, the handle must be an auto-reset
+  // object. The caller must ensure that it (or any Windows system code) doesn't
+  // reset the event or else the delegate won't be called.
+  // Returns whether watching was successfully initiated.
+  bool StartWatchingMultipleTimes(HANDLE object, Delegate* delegate);
+
+  // Stops watching.  Does nothing if the watch has already completed.  If the
+  // watch is still active, then it is canceled, and the associated delegate is
+  // not notified.
+  //
+  // Returns true if the watch was canceled.  Otherwise, false is returned.
+  bool StopWatching();
+
+  // Returns true if currently watching an object.
+  bool IsWatching() const;
+
+  // Returns the handle of the object being watched.
+  HANDLE GetWatchedObject() const;
+
+ private:
+  // Called on a background thread when done waiting.
+  static void CALLBACK DoneWaiting(void* param, BOOLEAN timed_out);
+
+  // Helper used by StartWatchingOnce and StartWatchingMultipleTimes.
+  bool StartWatchingInternal(HANDLE object, Delegate* delegate,
+                             bool execute_only_once);
+
+  void Signal(Delegate* delegate);
+
+  void Reset();
+
+  // A callback pre-bound to Signal() that is posted to the caller's task runner
+  // when the wait completes.
+  Closure callback_;
+
+  // The object being watched.
+  HANDLE object_ = nullptr;
+
+  // The wait handle returned by RegisterWaitForSingleObject.
+  HANDLE wait_object_ = nullptr;
+
+  // The task runner of the sequence on which the watch was started.
+  scoped_refptr<SequencedTaskRunner> task_runner_;
+
+  bool run_once_ = true;
+
+  WeakPtrFactory<ObjectWatcher> weak_factory_;
+
+  DISALLOW_COPY_AND_ASSIGN(ObjectWatcher);
+};
+
+}  // namespace win
+}  // namespace base
+
+#endif  // BASE_WIN_OBJECT_WATCHER_H_
diff --git a/base/win/object_watcher_unittest.cc b/base/win/object_watcher_unittest.cc
new file mode 100644
index 0000000..5aa3891
--- /dev/null
+++ b/base/win/object_watcher_unittest.cc
@@ -0,0 +1,221 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/win/object_watcher.h"
+
+#include <process.h>
+
+#include "base/message_loop/message_loop.h"
+#include "base/run_loop.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+namespace win {
+
+namespace {
+
+class QuitDelegate : public ObjectWatcher::Delegate {
+ public:
+  void OnObjectSignaled(HANDLE object) override {
+    RunLoop::QuitCurrentWhenIdleDeprecated();
+  }
+};
+
+class DecrementCountDelegate : public ObjectWatcher::Delegate {
+ public:
+  explicit DecrementCountDelegate(int* counter) : counter_(counter) {
+  }
+  void OnObjectSignaled(HANDLE object) override { --(*counter_); }
+
+ private:
+  int* counter_;
+};
+
+void RunTest_BasicSignal(MessageLoop::Type message_loop_type) {
+  MessageLoop message_loop(message_loop_type);
+
+  ObjectWatcher watcher;
+  EXPECT_FALSE(watcher.IsWatching());
+
+  // A manual-reset event that is not yet signaled.
+  HANDLE event = CreateEvent(NULL, TRUE, FALSE, NULL);
+
+  QuitDelegate delegate;
+  bool ok = watcher.StartWatchingOnce(event, &delegate);
+  EXPECT_TRUE(ok);
+  EXPECT_TRUE(watcher.IsWatching());
+  EXPECT_EQ(event, watcher.GetWatchedObject());
+
+  SetEvent(event);
+
+  RunLoop().Run();
+
+  EXPECT_FALSE(watcher.IsWatching());
+  CloseHandle(event);
+}
+
+void RunTest_BasicCancel(MessageLoop::Type message_loop_type) {
+  MessageLoop message_loop(message_loop_type);
+
+  ObjectWatcher watcher;
+
+  // A manual-reset event that is not yet signaled.
+  HANDLE event = CreateEvent(NULL, TRUE, FALSE, NULL);
+
+  QuitDelegate delegate;
+  bool ok = watcher.StartWatchingOnce(event, &delegate);
+  EXPECT_TRUE(ok);
+
+  watcher.StopWatching();
+
+  CloseHandle(event);
+}
+
+void RunTest_CancelAfterSet(MessageLoop::Type message_loop_type) {
+  MessageLoop message_loop(message_loop_type);
+
+  ObjectWatcher watcher;
+
+  int counter = 1;
+  DecrementCountDelegate delegate(&counter);
+
+  // A manual-reset event that is not yet signaled.
+  HANDLE event = CreateEvent(NULL, TRUE, FALSE, NULL);
+
+  bool ok = watcher.StartWatchingOnce(event, &delegate);
+  EXPECT_TRUE(ok);
+
+  SetEvent(event);
+
+  // Let the background thread do its business
+  Sleep(30);
+
+  watcher.StopWatching();
+
+  RunLoop().RunUntilIdle();
+
+  // Our delegate should not have fired.
+  EXPECT_EQ(1, counter);
+
+  CloseHandle(event);
+}
+
+void RunTest_SignalBeforeWatch(MessageLoop::Type message_loop_type) {
+  MessageLoop message_loop(message_loop_type);
+
+  ObjectWatcher watcher;
+
+  // A manual-reset event that is signaled before we begin watching.
+  HANDLE event = CreateEvent(NULL, TRUE, TRUE, NULL);
+
+  QuitDelegate delegate;
+  bool ok = watcher.StartWatchingOnce(event, &delegate);
+  EXPECT_TRUE(ok);
+
+  RunLoop().Run();
+
+  EXPECT_FALSE(watcher.IsWatching());
+  CloseHandle(event);
+}
+
+void RunTest_OutlivesMessageLoop(MessageLoop::Type message_loop_type) {
+  // Simulate a MessageLoop that dies before an ObjectWatcher.  This ordinarily
+  // doesn't happen when people use the Thread class, but it can happen when
+  // people use the Singleton pattern or atexit.
+  HANDLE event = CreateEvent(NULL, TRUE, FALSE, NULL);  // not signaled
+  {
+    ObjectWatcher watcher;
+    {
+      MessageLoop message_loop(message_loop_type);
+
+      QuitDelegate delegate;
+      watcher.StartWatchingOnce(event, &delegate);
+    }
+  }
+  CloseHandle(event);
+}
+
+class QuitAfterMultipleDelegate : public ObjectWatcher::Delegate {
+ public:
+  QuitAfterMultipleDelegate(HANDLE event, int iterations)
+      : event_(event), iterations_(iterations) {}
+  void OnObjectSignaled(HANDLE object) override {
+    if (--iterations_) {
+      SetEvent(event_);
+    } else {
+      RunLoop::QuitCurrentWhenIdleDeprecated();
+    }
+  }
+
+ private:
+  HANDLE event_;
+  int iterations_;
+};
+
+void RunTest_ExecuteMultipleTimes(MessageLoop::Type message_loop_type) {
+  MessageLoop message_loop(message_loop_type);
+
+  ObjectWatcher watcher;
+  EXPECT_FALSE(watcher.IsWatching());
+
+  // An auto-reset event that is not yet signaled.
+  HANDLE event = CreateEvent(NULL, FALSE, FALSE, NULL);
+
+  QuitAfterMultipleDelegate delegate(event, 2);
+  bool ok = watcher.StartWatchingMultipleTimes(event, &delegate);
+  EXPECT_TRUE(ok);
+  EXPECT_TRUE(watcher.IsWatching());
+  EXPECT_EQ(event, watcher.GetWatchedObject());
+
+  SetEvent(event);
+
+  RunLoop().Run();
+
+  EXPECT_TRUE(watcher.IsWatching());
+  EXPECT_TRUE(watcher.StopWatching());
+  CloseHandle(event);
+}
+
+}  // namespace
+
+//-----------------------------------------------------------------------------
+
+TEST(ObjectWatcherTest, BasicSignal) {
+  RunTest_BasicSignal(MessageLoop::TYPE_DEFAULT);
+  RunTest_BasicSignal(MessageLoop::TYPE_IO);
+  RunTest_BasicSignal(MessageLoop::TYPE_UI);
+}
+
+TEST(ObjectWatcherTest, BasicCancel) {
+  RunTest_BasicCancel(MessageLoop::TYPE_DEFAULT);
+  RunTest_BasicCancel(MessageLoop::TYPE_IO);
+  RunTest_BasicCancel(MessageLoop::TYPE_UI);
+}
+
+TEST(ObjectWatcherTest, CancelAfterSet) {
+  RunTest_CancelAfterSet(MessageLoop::TYPE_DEFAULT);
+  RunTest_CancelAfterSet(MessageLoop::TYPE_IO);
+  RunTest_CancelAfterSet(MessageLoop::TYPE_UI);
+}
+
+TEST(ObjectWatcherTest, SignalBeforeWatch) {
+  RunTest_SignalBeforeWatch(MessageLoop::TYPE_DEFAULT);
+  RunTest_SignalBeforeWatch(MessageLoop::TYPE_IO);
+  RunTest_SignalBeforeWatch(MessageLoop::TYPE_UI);
+}
+
+TEST(ObjectWatcherTest, OutlivesMessageLoop) {
+  RunTest_OutlivesMessageLoop(MessageLoop::TYPE_DEFAULT);
+  RunTest_OutlivesMessageLoop(MessageLoop::TYPE_IO);
+  RunTest_OutlivesMessageLoop(MessageLoop::TYPE_UI);
+}
+
+TEST(ObjectWatcherTest, ExecuteMultipleTimes) {
+  RunTest_ExecuteMultipleTimes(MessageLoop::TYPE_DEFAULT);
+  RunTest_ExecuteMultipleTimes(MessageLoop::TYPE_IO);
+  RunTest_ExecuteMultipleTimes(MessageLoop::TYPE_UI);
+}
+
+}  // namespace win
+}  // namespace base
diff --git a/base/win/patch_util.cc b/base/win/patch_util.cc
new file mode 100644
index 0000000..eb3bd65
--- /dev/null
+++ b/base/win/patch_util.cc
@@ -0,0 +1,52 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/win/patch_util.h"
+
+#include "base/logging.h"
+
+namespace base {
+namespace win {
+namespace internal {
+
+DWORD ModifyCode(void* destination, const void* source, int length) {
+  if ((NULL == destination) || (NULL == source) || (0 == length)) {
+    NOTREACHED();
+    return ERROR_INVALID_PARAMETER;
+  }
+
+  // Change the page protection so that we can write.
+  MEMORY_BASIC_INFORMATION memory_info;
+  DWORD error = NO_ERROR;
+  DWORD old_page_protection = 0;
+
+  if (!VirtualQuery(destination, &memory_info, sizeof(memory_info))) {
+    error = GetLastError();
+    return error;
+  }
+
+  DWORD is_executable = (PAGE_EXECUTE | PAGE_EXECUTE_READ |
+                         PAGE_EXECUTE_READWRITE | PAGE_EXECUTE_WRITECOPY) &
+                        memory_info.Protect;
+
+  if (VirtualProtect(destination, length,
+                     is_executable ? PAGE_EXECUTE_READWRITE : PAGE_READWRITE,
+                     &old_page_protection)) {
+    // Write the data.
+    CopyMemory(destination, source, length);
+
+    // Restore the old page protection.
+    error = ERROR_SUCCESS;
+    VirtualProtect(destination, length, old_page_protection,
+                   &old_page_protection);
+  } else {
+    error = GetLastError();
+  }
+
+  return error;
+}
+
+}  // namespace internal
+}  // namespace win
+}  // namespace bsae
diff --git a/base/win/patch_util.h b/base/win/patch_util.h
new file mode 100644
index 0000000..035fb83
--- /dev/null
+++ b/base/win/patch_util.h
@@ -0,0 +1,25 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_WIN_PATCH_UTIL_H_
+#define BASE_WIN_PATCH_UTIL_H_
+
+#include <windows.h>
+
+#include "base/base_export.h"
+
+namespace base {
+namespace win {
+namespace internal {
+
+// Copies |length| bytes from |source| to |destination|, temporarily setting
+// |destination| to writable. Returns a Windows error code or NO_ERROR if
+// successful.
+BASE_EXPORT DWORD ModifyCode(void* destination, const void* source, int length);
+
+}  // namespace internal
+}  // namespace win
+}  // namespace bsae
+
+#endif  // BASE_WIN_PATCH_UTIL_H_
diff --git a/base/win/pe_image.cc b/base/win/pe_image.cc
new file mode 100644
index 0000000..4705348
--- /dev/null
+++ b/base/win/pe_image.cc
@@ -0,0 +1,581 @@
+// Copyright (c) 2010 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file implements PEImage, a generic class to manipulate PE files.
+// This file was adapted from GreenBorder's Code.
+
+#include <stddef.h>
+
+#include "base/win/pe_image.h"
+
+namespace base {
+namespace win {
+
+// Structure to perform imports enumerations.
+struct EnumAllImportsStorage {
+  PEImage::EnumImportsFunction callback;
+  PVOID cookie;
+};
+
+namespace {
+
+// PdbInfo Signature
+const DWORD kPdbInfoSignature = 'SDSR';
+
+// Compare two strings byte by byte on an unsigned basis.
+//   if s1 == s2, return 0
+//   if s1 < s2, return negative
+//   if s1 > s2, return positive
+// Exception if inputs are invalid.
+int StrCmpByByte(LPCSTR s1, LPCSTR s2) {
+  while (*s1 != '\0' && *s1 == *s2) {
+    ++s1;
+    ++s2;
+  }
+
+  return (*reinterpret_cast<const unsigned char*>(s1) -
+          *reinterpret_cast<const unsigned char*>(s2));
+}
+
+struct PdbInfo {
+  DWORD Signature;
+  GUID Guid;
+  DWORD Age;
+  char PdbFileName[1];
+};
+
+}  // namespace
+
+// Callback used to enumerate imports. See EnumImportChunksFunction.
+bool ProcessImportChunk(const PEImage &image, LPCSTR module,
+                        PIMAGE_THUNK_DATA name_table,
+                        PIMAGE_THUNK_DATA iat, PVOID cookie) {
+  EnumAllImportsStorage& storage =
+      *reinterpret_cast<EnumAllImportsStorage*>(cookie);
+
+  return image.EnumOneImportChunk(storage.callback, module, name_table, iat,
+                                  storage.cookie);
+}
+
+// Callback used to enumerate delay imports. See EnumDelayImportChunksFunction.
+bool ProcessDelayImportChunk(const PEImage& image,
+                             PImgDelayDescr delay_descriptor,
+                             LPCSTR module,
+                             PIMAGE_THUNK_DATA name_table,
+                             PIMAGE_THUNK_DATA iat,
+                             PVOID cookie) {
+  EnumAllImportsStorage& storage =
+      *reinterpret_cast<EnumAllImportsStorage*>(cookie);
+
+  return image.EnumOneDelayImportChunk(storage.callback, delay_descriptor,
+                                       module, name_table, iat, storage.cookie);
+}
+
+void PEImage::set_module(HMODULE module) {
+  module_ = module;
+}
+
+PIMAGE_DOS_HEADER PEImage::GetDosHeader() const {
+  return reinterpret_cast<PIMAGE_DOS_HEADER>(module_);
+}
+
+PIMAGE_NT_HEADERS PEImage::GetNTHeaders() const {
+  PIMAGE_DOS_HEADER dos_header = GetDosHeader();
+
+  return reinterpret_cast<PIMAGE_NT_HEADERS>(
+      reinterpret_cast<char*>(dos_header) + dos_header->e_lfanew);
+}
+
+PIMAGE_SECTION_HEADER PEImage::GetSectionHeader(UINT section) const {
+  PIMAGE_NT_HEADERS nt_headers = GetNTHeaders();
+  PIMAGE_SECTION_HEADER first_section = IMAGE_FIRST_SECTION(nt_headers);
+
+  if (section < nt_headers->FileHeader.NumberOfSections)
+    return first_section + section;
+  else
+    return NULL;
+}
+
+WORD PEImage::GetNumSections() const {
+  return GetNTHeaders()->FileHeader.NumberOfSections;
+}
+
+DWORD PEImage::GetImageDirectoryEntrySize(UINT directory) const {
+  PIMAGE_NT_HEADERS nt_headers = GetNTHeaders();
+
+  return nt_headers->OptionalHeader.DataDirectory[directory].Size;
+}
+
+PVOID PEImage::GetImageDirectoryEntryAddr(UINT directory) const {
+  PIMAGE_NT_HEADERS nt_headers = GetNTHeaders();
+
+  return RVAToAddr(
+      nt_headers->OptionalHeader.DataDirectory[directory].VirtualAddress);
+}
+
+PIMAGE_SECTION_HEADER PEImage::GetImageSectionFromAddr(PVOID address) const {
+  PBYTE target = reinterpret_cast<PBYTE>(address);
+  PIMAGE_SECTION_HEADER section;
+
+  for (UINT i = 0; NULL != (section = GetSectionHeader(i)); i++) {
+    // Don't use the virtual RVAToAddr.
+    PBYTE start = reinterpret_cast<PBYTE>(
+                      PEImage::RVAToAddr(section->VirtualAddress));
+
+    DWORD size = section->Misc.VirtualSize;
+
+    if ((start <= target) && (start + size > target))
+      return section;
+  }
+
+  return NULL;
+}
+
+PIMAGE_SECTION_HEADER PEImage::GetImageSectionHeaderByName(
+    LPCSTR section_name) const {
+  if (NULL == section_name)
+    return NULL;
+
+  PIMAGE_SECTION_HEADER ret = NULL;
+  int num_sections = GetNumSections();
+
+  for (int i = 0; i < num_sections; i++) {
+    PIMAGE_SECTION_HEADER section = GetSectionHeader(i);
+    if (0 == _strnicmp(reinterpret_cast<LPCSTR>(section->Name), section_name,
+                       sizeof(section->Name))) {
+      ret = section;
+      break;
+    }
+  }
+
+  return ret;
+}
+
+bool PEImage::GetDebugId(LPGUID guid, LPDWORD age, LPCSTR* pdb_filename) const {
+  DWORD debug_directory_size =
+      GetImageDirectoryEntrySize(IMAGE_DIRECTORY_ENTRY_DEBUG);
+  PIMAGE_DEBUG_DIRECTORY debug_directory =
+      reinterpret_cast<PIMAGE_DEBUG_DIRECTORY>(
+      GetImageDirectoryEntryAddr(IMAGE_DIRECTORY_ENTRY_DEBUG));
+
+  size_t directory_count =
+      debug_directory_size / sizeof(IMAGE_DEBUG_DIRECTORY);
+
+  for (size_t index = 0; index < directory_count; ++index) {
+    if (debug_directory[index].Type == IMAGE_DEBUG_TYPE_CODEVIEW) {
+      PdbInfo* pdb_info = reinterpret_cast<PdbInfo*>(
+          RVAToAddr(debug_directory[index].AddressOfRawData));
+      if (pdb_info->Signature != kPdbInfoSignature) {
+        // Unsupported PdbInfo signature
+        return false;
+      }
+
+      if (guid)
+        *guid = pdb_info->Guid;
+      if (age)
+        *age = pdb_info->Age;
+      if (pdb_filename)
+        *pdb_filename = pdb_info->PdbFileName;
+      return true;
+    }
+  }
+  return false;
+}
+
+PDWORD PEImage::GetExportEntry(LPCSTR name) const {
+  PIMAGE_EXPORT_DIRECTORY exports = GetExportDirectory();
+
+  if (NULL == exports)
+    return NULL;
+
+  WORD ordinal = 0;
+  if (!GetProcOrdinal(name, &ordinal))
+    return NULL;
+
+  PDWORD functions = reinterpret_cast<PDWORD>(
+                         RVAToAddr(exports->AddressOfFunctions));
+
+  return functions + ordinal - exports->Base;
+}
+
+FARPROC PEImage::GetProcAddress(LPCSTR function_name) const {
+  PDWORD export_entry = GetExportEntry(function_name);
+  if (NULL == export_entry)
+    return NULL;
+
+  PBYTE function = reinterpret_cast<PBYTE>(RVAToAddr(*export_entry));
+
+  PBYTE exports = reinterpret_cast<PBYTE>(
+      GetImageDirectoryEntryAddr(IMAGE_DIRECTORY_ENTRY_EXPORT));
+  DWORD size = GetImageDirectoryEntrySize(IMAGE_DIRECTORY_ENTRY_EXPORT);
+
+  // Check for forwarded exports as a special case.
+  if (exports <= function && exports + size > function)
+    return reinterpret_cast<FARPROC>(-1);
+
+  return reinterpret_cast<FARPROC>(function);
+}
+
+bool PEImage::GetProcOrdinal(LPCSTR function_name, WORD *ordinal) const {
+  if (NULL == ordinal)
+    return false;
+
+  PIMAGE_EXPORT_DIRECTORY exports = GetExportDirectory();
+
+  if (NULL == exports)
+    return false;
+
+  if (IsOrdinal(function_name)) {
+    *ordinal = ToOrdinal(function_name);
+  } else {
+    PDWORD names = reinterpret_cast<PDWORD>(RVAToAddr(exports->AddressOfNames));
+    PDWORD lower = names;
+    PDWORD upper = names + exports->NumberOfNames;
+    int cmp = -1;
+
+    // Binary Search for the name.
+    while (lower != upper) {
+      PDWORD middle = lower + (upper - lower) / 2;
+      LPCSTR name = reinterpret_cast<LPCSTR>(RVAToAddr(*middle));
+
+      // This may be called by sandbox before MSVCRT dll loads, so can't use
+      // CRT function here.
+      cmp = StrCmpByByte(function_name, name);
+
+      if (cmp == 0) {
+        lower = middle;
+        break;
+      }
+
+      if (cmp > 0)
+        lower = middle + 1;
+      else
+        upper = middle;
+    }
+
+    if (cmp != 0)
+      return false;
+
+
+    PWORD ordinals = reinterpret_cast<PWORD>(
+                         RVAToAddr(exports->AddressOfNameOrdinals));
+
+    *ordinal = ordinals[lower - names] + static_cast<WORD>(exports->Base);
+  }
+
+  return true;
+}
+
+bool PEImage::EnumSections(EnumSectionsFunction callback, PVOID cookie) const {
+  PIMAGE_NT_HEADERS nt_headers = GetNTHeaders();
+  UINT num_sections = nt_headers->FileHeader.NumberOfSections;
+  PIMAGE_SECTION_HEADER section = GetSectionHeader(0);
+
+  for (UINT i = 0; i < num_sections; i++, section++) {
+    PVOID section_start = RVAToAddr(section->VirtualAddress);
+    DWORD size = section->Misc.VirtualSize;
+
+    if (!callback(*this, section, section_start, size, cookie))
+      return false;
+  }
+
+  return true;
+}
+
+bool PEImage::EnumExports(EnumExportsFunction callback, PVOID cookie) const {
+  PVOID directory = GetImageDirectoryEntryAddr(IMAGE_DIRECTORY_ENTRY_EXPORT);
+  DWORD size = GetImageDirectoryEntrySize(IMAGE_DIRECTORY_ENTRY_EXPORT);
+
+  // Check if there are any exports at all.
+  if (NULL == directory || 0 == size)
+    return true;
+
+  PIMAGE_EXPORT_DIRECTORY exports = reinterpret_cast<PIMAGE_EXPORT_DIRECTORY>(
+                                        directory);
+  UINT ordinal_base = exports->Base;
+  UINT num_funcs = exports->NumberOfFunctions;
+  UINT num_names = exports->NumberOfNames;
+  PDWORD functions  = reinterpret_cast<PDWORD>(RVAToAddr(
+                          exports->AddressOfFunctions));
+  PDWORD names = reinterpret_cast<PDWORD>(RVAToAddr(exports->AddressOfNames));
+  PWORD ordinals = reinterpret_cast<PWORD>(RVAToAddr(
+                       exports->AddressOfNameOrdinals));
+
+  for (UINT count = 0; count < num_funcs; count++) {
+    PVOID func = RVAToAddr(functions[count]);
+    if (NULL == func)
+      continue;
+
+    // Check for a name.
+    LPCSTR name = NULL;
+    UINT hint;
+    for (hint = 0; hint < num_names; hint++) {
+      if (ordinals[hint] == count) {
+        name = reinterpret_cast<LPCSTR>(RVAToAddr(names[hint]));
+        break;
+      }
+    }
+
+    if (name == NULL)
+      hint = 0;
+
+    // Check for forwarded exports.
+    LPCSTR forward = NULL;
+    if (reinterpret_cast<char*>(func) >= reinterpret_cast<char*>(directory) &&
+        reinterpret_cast<char*>(func) <= reinterpret_cast<char*>(directory) +
+            size) {
+      forward = reinterpret_cast<LPCSTR>(func);
+      func = 0;
+    }
+
+    if (!callback(*this, ordinal_base + count, hint, name, func, forward,
+                  cookie))
+      return false;
+  }
+
+  return true;
+}
+
+bool PEImage::EnumRelocs(EnumRelocsFunction callback, PVOID cookie) const {
+  PVOID directory = GetImageDirectoryEntryAddr(IMAGE_DIRECTORY_ENTRY_BASERELOC);
+  DWORD size = GetImageDirectoryEntrySize(IMAGE_DIRECTORY_ENTRY_BASERELOC);
+  PIMAGE_BASE_RELOCATION base = reinterpret_cast<PIMAGE_BASE_RELOCATION>(
+      directory);
+
+  if (!directory)
+    return true;
+
+  while (size >= sizeof(IMAGE_BASE_RELOCATION) && base->SizeOfBlock &&
+         size >= base->SizeOfBlock) {
+    PWORD reloc = reinterpret_cast<PWORD>(base + 1);
+    UINT num_relocs = (base->SizeOfBlock - sizeof(IMAGE_BASE_RELOCATION)) /
+        sizeof(WORD);
+
+    for (UINT i = 0; i < num_relocs; i++, reloc++) {
+      WORD type = *reloc >> 12;
+      PVOID address = RVAToAddr(base->VirtualAddress + (*reloc & 0x0FFF));
+
+      if (!callback(*this, type, address, cookie))
+        return false;
+    }
+
+    size -= base->SizeOfBlock;
+    base = reinterpret_cast<PIMAGE_BASE_RELOCATION>(
+               reinterpret_cast<char*>(base) + base->SizeOfBlock);
+  }
+
+  return true;
+}
+
+bool PEImage::EnumImportChunks(EnumImportChunksFunction callback,
+                               PVOID cookie) const {
+  DWORD size = GetImageDirectoryEntrySize(IMAGE_DIRECTORY_ENTRY_IMPORT);
+  PIMAGE_IMPORT_DESCRIPTOR import = GetFirstImportChunk();
+
+  if (import == NULL || size < sizeof(IMAGE_IMPORT_DESCRIPTOR))
+    return true;
+
+  for (; import->FirstThunk; import++) {
+    LPCSTR module_name = reinterpret_cast<LPCSTR>(RVAToAddr(import->Name));
+    PIMAGE_THUNK_DATA name_table = reinterpret_cast<PIMAGE_THUNK_DATA>(
+                                       RVAToAddr(import->OriginalFirstThunk));
+    PIMAGE_THUNK_DATA iat = reinterpret_cast<PIMAGE_THUNK_DATA>(
+                                RVAToAddr(import->FirstThunk));
+
+    if (!callback(*this, module_name, name_table, iat, cookie))
+      return false;
+  }
+
+  return true;
+}
+
+bool PEImage::EnumOneImportChunk(EnumImportsFunction callback,
+                                 LPCSTR module_name,
+                                 PIMAGE_THUNK_DATA name_table,
+                                 PIMAGE_THUNK_DATA iat, PVOID cookie) const {
+  if (NULL == name_table)
+    return false;
+
+  for (; name_table && name_table->u1.Ordinal; name_table++, iat++) {
+    LPCSTR name = NULL;
+    WORD ordinal = 0;
+    WORD hint = 0;
+
+    if (IMAGE_SNAP_BY_ORDINAL(name_table->u1.Ordinal)) {
+      ordinal = static_cast<WORD>(IMAGE_ORDINAL32(name_table->u1.Ordinal));
+    } else {
+      PIMAGE_IMPORT_BY_NAME import = reinterpret_cast<PIMAGE_IMPORT_BY_NAME>(
+          RVAToAddr(name_table->u1.ForwarderString));
+
+      hint = import->Hint;
+      name = reinterpret_cast<LPCSTR>(&import->Name);
+    }
+
+    if (!callback(*this, module_name, ordinal, name, hint, iat, cookie))
+      return false;
+  }
+
+  return true;
+}
+
+bool PEImage::EnumAllImports(EnumImportsFunction callback, PVOID cookie) const {
+  EnumAllImportsStorage temp = { callback, cookie };
+  return EnumImportChunks(ProcessImportChunk, &temp);
+}
+
+bool PEImage::EnumDelayImportChunks(EnumDelayImportChunksFunction callback,
+                                    PVOID cookie) const {
+  PVOID directory = GetImageDirectoryEntryAddr(
+                        IMAGE_DIRECTORY_ENTRY_DELAY_IMPORT);
+  DWORD size = GetImageDirectoryEntrySize(IMAGE_DIRECTORY_ENTRY_DELAY_IMPORT);
+  PImgDelayDescr delay_descriptor = reinterpret_cast<PImgDelayDescr>(directory);
+
+  if (directory == NULL || size == 0)
+    return true;
+
+  for (; delay_descriptor->rvaHmod; delay_descriptor++) {
+    PIMAGE_THUNK_DATA name_table;
+    PIMAGE_THUNK_DATA iat;
+    LPCSTR module_name;
+
+    // check if VC7-style imports, using RVAs instead of
+    // VC6-style addresses.
+    bool rvas = (delay_descriptor->grAttrs & dlattrRva) != 0;
+
+    if (rvas) {
+      module_name =
+          reinterpret_cast<LPCSTR>(RVAToAddr(delay_descriptor->rvaDLLName));
+      name_table = reinterpret_cast<PIMAGE_THUNK_DATA>(
+          RVAToAddr(delay_descriptor->rvaINT));
+      iat = reinterpret_cast<PIMAGE_THUNK_DATA>(
+          RVAToAddr(delay_descriptor->rvaIAT));
+    } else {
+      // Values in IMAGE_DIRECTORY_ENTRY_DELAY_IMPORT are 32-bit, even on 64-bit
+      // platforms. See section 4.8 of PECOFF image spec rev 8.3.
+      module_name = reinterpret_cast<LPCSTR>(
+          static_cast<uintptr_t>(delay_descriptor->rvaDLLName));
+      name_table = reinterpret_cast<PIMAGE_THUNK_DATA>(
+          static_cast<uintptr_t>(delay_descriptor->rvaINT));
+      iat = reinterpret_cast<PIMAGE_THUNK_DATA>(
+          static_cast<uintptr_t>(delay_descriptor->rvaIAT));
+    }
+
+    if (!callback(*this, delay_descriptor, module_name, name_table, iat,
+                  cookie))
+      return false;
+  }
+
+  return true;
+}
+
+bool PEImage::EnumOneDelayImportChunk(EnumImportsFunction callback,
+                                      PImgDelayDescr delay_descriptor,
+                                      LPCSTR module_name,
+                                      PIMAGE_THUNK_DATA name_table,
+                                      PIMAGE_THUNK_DATA iat,
+                                      PVOID cookie) const {
+  for (; name_table->u1.Ordinal; name_table++, iat++) {
+    LPCSTR name = NULL;
+    WORD ordinal = 0;
+    WORD hint = 0;
+
+    if (IMAGE_SNAP_BY_ORDINAL(name_table->u1.Ordinal)) {
+      ordinal = static_cast<WORD>(IMAGE_ORDINAL32(name_table->u1.Ordinal));
+    } else {
+      PIMAGE_IMPORT_BY_NAME import;
+      bool rvas = (delay_descriptor->grAttrs & dlattrRva) != 0;
+
+      if (rvas) {
+        import = reinterpret_cast<PIMAGE_IMPORT_BY_NAME>(
+                     RVAToAddr(name_table->u1.ForwarderString));
+      } else {
+        import = reinterpret_cast<PIMAGE_IMPORT_BY_NAME>(
+                     name_table->u1.ForwarderString);
+      }
+
+      hint = import->Hint;
+      name = reinterpret_cast<LPCSTR>(&import->Name);
+    }
+
+    if (!callback(*this, module_name, ordinal, name, hint, iat, cookie))
+      return false;
+  }
+
+  return true;
+}
+
+bool PEImage::EnumAllDelayImports(EnumImportsFunction callback,
+                                  PVOID cookie) const {
+  EnumAllImportsStorage temp = { callback, cookie };
+  return EnumDelayImportChunks(ProcessDelayImportChunk, &temp);
+}
+
+bool PEImage::VerifyMagic() const {
+  PIMAGE_DOS_HEADER dos_header = GetDosHeader();
+
+  if (dos_header->e_magic != IMAGE_DOS_SIGNATURE)
+    return false;
+
+  PIMAGE_NT_HEADERS nt_headers = GetNTHeaders();
+
+  if (nt_headers->Signature != IMAGE_NT_SIGNATURE)
+    return false;
+
+  if (nt_headers->FileHeader.SizeOfOptionalHeader !=
+      sizeof(IMAGE_OPTIONAL_HEADER))
+    return false;
+
+  if (nt_headers->OptionalHeader.Magic != IMAGE_NT_OPTIONAL_HDR_MAGIC)
+    return false;
+
+  return true;
+}
+
+bool PEImage::ImageRVAToOnDiskOffset(DWORD rva, DWORD* on_disk_offset) const {
+  LPVOID address = RVAToAddr(rva);
+  return ImageAddrToOnDiskOffset(address, on_disk_offset);
+}
+
+bool PEImage::ImageAddrToOnDiskOffset(LPVOID address,
+                                      DWORD* on_disk_offset) const {
+  if (NULL == address)
+    return false;
+
+  // Get the section that this address belongs to.
+  PIMAGE_SECTION_HEADER section_header = GetImageSectionFromAddr(address);
+  if (NULL == section_header)
+    return false;
+
+  // Don't follow the virtual RVAToAddr, use the one on the base.
+  DWORD offset_within_section =
+      static_cast<DWORD>(reinterpret_cast<uintptr_t>(address)) -
+      static_cast<DWORD>(reinterpret_cast<uintptr_t>(
+          PEImage::RVAToAddr(section_header->VirtualAddress)));
+
+  *on_disk_offset = section_header->PointerToRawData + offset_within_section;
+  return true;
+}
+
+PVOID PEImage::RVAToAddr(DWORD rva) const {
+  if (rva == 0)
+    return NULL;
+
+  return reinterpret_cast<char*>(module_) + rva;
+}
+
+PVOID PEImageAsData::RVAToAddr(DWORD rva) const {
+  if (rva == 0)
+    return NULL;
+
+  PVOID in_memory = PEImage::RVAToAddr(rva);
+  DWORD disk_offset;
+
+  if (!ImageAddrToOnDiskOffset(in_memory, &disk_offset))
+    return NULL;
+
+  return PEImage::RVAToAddr(disk_offset);
+}
+
+}  // namespace win
+}  // namespace base
diff --git a/base/win/pe_image.h b/base/win/pe_image.h
new file mode 100644
index 0000000..3f8f868
--- /dev/null
+++ b/base/win/pe_image.h
@@ -0,0 +1,267 @@
+// Copyright (c) 2010 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file was adapted from GreenBorder's Code.
+// To understand what this class is about (for other than well known functions
+// as GetProcAddress), a good starting point is "An In-Depth Look into the
+// Win32 Portable Executable File Format" by Matt Pietrek:
+// http://msdn.microsoft.com/msdnmag/issues/02/02/PE/default.aspx
+
+#ifndef BASE_WIN_PE_IMAGE_H_
+#define BASE_WIN_PE_IMAGE_H_
+
+#include <windows.h>
+
+#if defined(_WIN32_WINNT_WIN8)
+// The Windows 8 SDK defines FACILITY_VISUALCPP in winerror.h.
+#undef FACILITY_VISUALCPP
+#endif
+#include <DelayIMP.h>
+
+namespace base {
+namespace win {
+
+// This class is a wrapper for the Portable Executable File Format (PE).
+// Its main purpose is to provide an easy way to work with imports and exports
+// from a file, mapped in memory as image.
+class PEImage {
+ public:
+  // Callback to enumerate sections.
+  // cookie is the value passed to the enumerate method.
+  // Returns true to continue the enumeration.
+  typedef bool (*EnumSectionsFunction)(const PEImage &image,
+                                       PIMAGE_SECTION_HEADER header,
+                                       PVOID section_start, DWORD section_size,
+                                       PVOID cookie);
+
+  // Callback to enumerate exports.
+  // function is the actual address of the symbol. If forward is not null, it
+  // contains the dll and symbol to forward this export to. cookie is the value
+  // passed to the enumerate method.
+  // Returns true to continue the enumeration.
+  typedef bool (*EnumExportsFunction)(const PEImage &image, DWORD ordinal,
+                                      DWORD hint, LPCSTR name, PVOID function,
+                                      LPCSTR forward, PVOID cookie);
+
+  // Callback to enumerate import blocks.
+  // name_table and iat point to the imports name table and address table for
+  // this block. cookie is the value passed to the enumerate method.
+  // Returns true to continue the enumeration.
+  typedef bool (*EnumImportChunksFunction)(const PEImage &image, LPCSTR module,
+                                           PIMAGE_THUNK_DATA name_table,
+                                           PIMAGE_THUNK_DATA iat, PVOID cookie);
+
+  // Callback to enumerate imports.
+  // module is the dll that exports this symbol. cookie is the value passed to
+  // the enumerate method.
+  // Returns true to continue the enumeration.
+  typedef bool (*EnumImportsFunction)(const PEImage &image, LPCSTR module,
+                                      DWORD ordinal, LPCSTR name, DWORD hint,
+                                      PIMAGE_THUNK_DATA iat, PVOID cookie);
+
+  // Callback to enumerate delayed import blocks.
+  // module is the dll that exports this block of symbols. cookie is the value
+  // passed to the enumerate method.
+  // Returns true to continue the enumeration.
+  typedef bool (*EnumDelayImportChunksFunction)(const PEImage &image,
+                                                PImgDelayDescr delay_descriptor,
+                                                LPCSTR module,
+                                                PIMAGE_THUNK_DATA name_table,
+                                                PIMAGE_THUNK_DATA iat,
+                                                PVOID cookie);
+
+  // Callback to enumerate relocations.
+  // cookie is the value passed to the enumerate method.
+  // Returns true to continue the enumeration.
+  typedef bool (*EnumRelocsFunction)(const PEImage &image, WORD type,
+                                     PVOID address, PVOID cookie);
+
+  explicit PEImage(HMODULE module) : module_(module) {}
+  explicit PEImage(const void* module) {
+    module_ = reinterpret_cast<HMODULE>(const_cast<void*>(module));
+  }
+
+  virtual ~PEImage() {}
+
+  // Gets the HMODULE for this object.
+  HMODULE module() const;
+
+  // Sets this object's HMODULE.
+  void set_module(HMODULE module);
+
+  // Checks if this symbol is actually an ordinal.
+  static bool IsOrdinal(LPCSTR name);
+
+  // Converts a named symbol to the corresponding ordinal.
+  static WORD ToOrdinal(LPCSTR name);
+
+  // Returns the DOS_HEADER for this PE.
+  PIMAGE_DOS_HEADER GetDosHeader() const;
+
+  // Returns the NT_HEADER for this PE.
+  PIMAGE_NT_HEADERS GetNTHeaders() const;
+
+  // Returns number of sections of this PE.
+  WORD GetNumSections() const;
+
+  // Returns the header for a given section.
+  // returns NULL if there is no such section.
+  PIMAGE_SECTION_HEADER GetSectionHeader(UINT section) const;
+
+  // Returns the size of a given directory entry.
+  DWORD GetImageDirectoryEntrySize(UINT directory) const;
+
+  // Returns the address of a given directory entry.
+  PVOID GetImageDirectoryEntryAddr(UINT directory) const;
+
+  // Returns the section header for a given address.
+  // Use: s = image.GetImageSectionFromAddr(a);
+  // Post: 's' is the section header of the section that contains 'a'
+  //       or NULL if there is no such section.
+  PIMAGE_SECTION_HEADER GetImageSectionFromAddr(PVOID address) const;
+
+  // Returns the section header for a given section.
+  PIMAGE_SECTION_HEADER GetImageSectionHeaderByName(LPCSTR section_name) const;
+
+  // Returns the first block of imports.
+  PIMAGE_IMPORT_DESCRIPTOR GetFirstImportChunk() const;
+
+  // Returns the exports directory.
+  PIMAGE_EXPORT_DIRECTORY GetExportDirectory() const;
+
+  // Returns the debug id (guid+age) and |pdb_filename|. Parameters are optional
+  // and can be null. |pdb_filename| is a direct reference to PEImage and
+  // doesn't not need to be freed.
+  bool GetDebugId(LPGUID guid, LPDWORD age, LPCSTR* pdb_filename) const;
+
+  // Returns a given export entry.
+  // Use: e = image.GetExportEntry(f);
+  // Pre: 'f' is either a zero terminated string or ordinal
+  // Post: 'e' is a pointer to the export directory entry
+  //       that contains 'f's export RVA, or NULL if 'f'
+  //       is not exported from this image
+  PDWORD GetExportEntry(LPCSTR name) const;
+
+  // Returns the address for a given exported symbol.
+  // Use: p = image.GetProcAddress(f);
+  // Pre: 'f' is either a zero terminated string or ordinal.
+  // Post: if 'f' is a non-forwarded export from image, 'p' is
+  //       the exported function. If 'f' is a forwarded export
+  //       then p is the special value -1. In this case
+  //       RVAToAddr(*GetExportEntry) can be used to resolve
+  //       the string that describes the forward.
+  FARPROC GetProcAddress(LPCSTR function_name) const;
+
+  // Retrieves the ordinal for a given exported symbol.
+  // Returns true if the symbol was found.
+  bool GetProcOrdinal(LPCSTR function_name, WORD *ordinal) const;
+
+  // Enumerates PE sections.
+  // cookie is a generic cookie to pass to the callback.
+  // Returns true on success.
+  bool EnumSections(EnumSectionsFunction callback, PVOID cookie) const;
+
+  // Enumerates PE exports.
+  // cookie is a generic cookie to pass to the callback.
+  // Returns true on success.
+  bool EnumExports(EnumExportsFunction callback, PVOID cookie) const;
+
+  // Enumerates PE imports.
+  // cookie is a generic cookie to pass to the callback.
+  // Returns true on success.
+  bool EnumAllImports(EnumImportsFunction callback, PVOID cookie) const;
+
+  // Enumerates PE import blocks.
+  // cookie is a generic cookie to pass to the callback.
+  // Returns true on success.
+  bool EnumImportChunks(EnumImportChunksFunction callback, PVOID cookie) const;
+
+  // Enumerates the imports from a single PE import block.
+  // cookie is a generic cookie to pass to the callback.
+  // Returns true on success.
+  bool EnumOneImportChunk(EnumImportsFunction callback, LPCSTR module_name,
+                          PIMAGE_THUNK_DATA name_table, PIMAGE_THUNK_DATA iat,
+                          PVOID cookie) const;
+
+
+  // Enumerates PE delay imports.
+  // cookie is a generic cookie to pass to the callback.
+  // Returns true on success.
+  bool EnumAllDelayImports(EnumImportsFunction callback, PVOID cookie) const;
+
+  // Enumerates PE delay import blocks.
+  // cookie is a generic cookie to pass to the callback.
+  // Returns true on success.
+  bool EnumDelayImportChunks(EnumDelayImportChunksFunction callback,
+                             PVOID cookie) const;
+
+  // Enumerates imports from a single PE delay import block.
+  // cookie is a generic cookie to pass to the callback.
+  // Returns true on success.
+  bool EnumOneDelayImportChunk(EnumImportsFunction callback,
+                               PImgDelayDescr delay_descriptor,
+                               LPCSTR module_name,
+                               PIMAGE_THUNK_DATA name_table,
+                               PIMAGE_THUNK_DATA iat,
+                               PVOID cookie) const;
+
+  // Enumerates PE relocation entries.
+  // cookie is a generic cookie to pass to the callback.
+  // Returns true on success.
+  bool EnumRelocs(EnumRelocsFunction callback, PVOID cookie) const;
+
+  // Verifies the magic values on the PE file.
+  // Returns true if all values are correct.
+  bool VerifyMagic() const;
+
+  // Converts an rva value to the appropriate address.
+  virtual PVOID RVAToAddr(DWORD rva) const;
+
+  // Converts an rva value to an offset on disk.
+  // Returns true on success.
+  bool ImageRVAToOnDiskOffset(DWORD rva, DWORD *on_disk_offset) const;
+
+  // Converts an address to an offset on disk.
+  // Returns true on success.
+  bool ImageAddrToOnDiskOffset(LPVOID address, DWORD *on_disk_offset) const;
+
+ private:
+  HMODULE module_;
+};
+
+// This class is an extension to the PEImage class that allows working with PE
+// files mapped as data instead of as image file.
+class PEImageAsData : public PEImage {
+ public:
+  explicit PEImageAsData(HMODULE hModule) : PEImage(hModule) {}
+
+  PVOID RVAToAddr(DWORD rva) const override;
+};
+
+inline bool PEImage::IsOrdinal(LPCSTR name) {
+  return reinterpret_cast<uintptr_t>(name) <= 0xFFFF;
+}
+
+inline WORD PEImage::ToOrdinal(LPCSTR name) {
+  return static_cast<WORD>(reinterpret_cast<intptr_t>(name));
+}
+
+inline HMODULE PEImage::module() const {
+  return module_;
+}
+
+inline PIMAGE_IMPORT_DESCRIPTOR PEImage::GetFirstImportChunk() const {
+  return reinterpret_cast<PIMAGE_IMPORT_DESCRIPTOR>(
+             GetImageDirectoryEntryAddr(IMAGE_DIRECTORY_ENTRY_IMPORT));
+}
+
+inline PIMAGE_EXPORT_DIRECTORY PEImage::GetExportDirectory() const {
+  return reinterpret_cast<PIMAGE_EXPORT_DIRECTORY>(
+             GetImageDirectoryEntryAddr(IMAGE_DIRECTORY_ENTRY_EXPORT));
+}
+
+}  // namespace win
+}  // namespace base
+
+#endif  // BASE_WIN_PE_IMAGE_H_
diff --git a/base/win/pe_image_test.cc b/base/win/pe_image_test.cc
new file mode 100644
index 0000000..8591495
--- /dev/null
+++ b/base/win/pe_image_test.cc
@@ -0,0 +1,33 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <windows.h>
+
+#include <cfgmgr32.h>
+#include <shellapi.h>
+
+#pragma comment(linker, "/export:FwdExport=KERNEL32.CreateFileA")
+
+extern "C" {
+
+__declspec(dllexport) void ExportFunc1() {
+  // Call into user32.dll.
+  HWND dummy = GetDesktopWindow();
+  SetWindowTextA(dummy, "dummy");
+}
+
+__declspec(dllexport) void ExportFunc2() {
+  // Call into cfgmgr32.dll.
+  CM_MapCrToWin32Err(CR_SUCCESS, ERROR_SUCCESS);
+
+  // Call into shell32.dll.
+  SHFILEOPSTRUCT file_operation = {0};
+  SHFileOperation(&file_operation);
+
+  // Call into kernel32.dll.
+  HANDLE h = CreateEvent(NULL, FALSE, FALSE, NULL);
+  CloseHandle(h);
+}
+
+}  // extern "C"
diff --git a/base/win/pe_image_unittest.cc b/base/win/pe_image_unittest.cc
new file mode 100644
index 0000000..7890ce6
--- /dev/null
+++ b/base/win/pe_image_unittest.cc
@@ -0,0 +1,225 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file contains unit tests for PEImage.
+#include <algorithm>
+#include <vector>
+
+#include "base/files/file_path.h"
+#include "base/path_service.h"
+#include "base/scoped_native_library.h"
+#include "base/win/pe_image.h"
+#include "build/build_config.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+namespace win {
+
+namespace {
+
+// Just counts the number of invocations.
+bool ImportsCallback(const PEImage& image,
+                     LPCSTR module,
+                     DWORD ordinal,
+                     LPCSTR name,
+                     DWORD hint,
+                     PIMAGE_THUNK_DATA iat,
+                     PVOID cookie) {
+  int* count = reinterpret_cast<int*>(cookie);
+  (*count)++;
+  return true;
+}
+
+// Just counts the number of invocations.
+bool SectionsCallback(const PEImage& image,
+                      PIMAGE_SECTION_HEADER header,
+                      PVOID section_start,
+                      DWORD section_size,
+                      PVOID cookie) {
+  int* count = reinterpret_cast<int*>(cookie);
+  (*count)++;
+  return true;
+}
+
+// Just counts the number of invocations.
+bool RelocsCallback(const PEImage& image,
+                    WORD type,
+                    PVOID address,
+                    PVOID cookie) {
+  int* count = reinterpret_cast<int*>(cookie);
+  (*count)++;
+  return true;
+}
+
+// Just counts the number of invocations.
+bool ImportChunksCallback(const PEImage& image,
+                          LPCSTR module,
+                          PIMAGE_THUNK_DATA name_table,
+                          PIMAGE_THUNK_DATA iat,
+                          PVOID cookie) {
+  int* count = reinterpret_cast<int*>(cookie);
+  (*count)++;
+  return true;
+}
+
+// Just counts the number of invocations.
+bool DelayImportChunksCallback(const PEImage& image,
+                               PImgDelayDescr delay_descriptor,
+                               LPCSTR module,
+                               PIMAGE_THUNK_DATA name_table,
+                               PIMAGE_THUNK_DATA iat,
+                               PVOID cookie) {
+  int* count = reinterpret_cast<int*>(cookie);
+  (*count)++;
+  return true;
+}
+
+// Just counts the number of invocations.
+bool ExportsCallback(const PEImage& image,
+                     DWORD ordinal,
+                     DWORD hint,
+                     LPCSTR name,
+                     PVOID function,
+                     LPCSTR forward,
+                     PVOID cookie) {
+  int* count = reinterpret_cast<int*>(cookie);
+  (*count)++;
+  return true;
+}
+
+}  // namespace
+
+// Tests that we are able to enumerate stuff from a PE file, and that
+// the actual number of items found matches an expected value.
+TEST(PEImageTest, EnumeratesPE) {
+  base::FilePath pe_image_test_path;
+  ASSERT_TRUE(PathService::Get(DIR_TEST_DATA, &pe_image_test_path));
+  pe_image_test_path = pe_image_test_path.Append(FILE_PATH_LITERAL("pe_image"));
+
+#if defined(ARCH_CPU_64_BITS)
+  pe_image_test_path =
+      pe_image_test_path.Append(FILE_PATH_LITERAL("pe_image_test_64.dll"));
+  const int sections = 6;
+  const int imports_dlls = 2;
+  const int delay_dlls = 2;
+  const int exports = 3;
+  const int imports = 70;
+  const int delay_imports = 2;
+  const int relocs = 976;
+#else
+  pe_image_test_path =
+      pe_image_test_path.Append(FILE_PATH_LITERAL("pe_image_test_32.dll"));
+  const int sections = 5;
+  const int imports_dlls = 2;
+  const int delay_dlls = 2;
+  const int exports = 3;
+  const int imports = 66;
+  const int delay_imports = 2;
+  const int relocs = 2114;
+#endif
+
+  ScopedNativeLibrary module(pe_image_test_path);
+  ASSERT_TRUE(module.is_valid());
+
+  PEImage pe(module.get());
+  int count = 0;
+  EXPECT_TRUE(pe.VerifyMagic());
+
+  pe.EnumSections(SectionsCallback, &count);
+  EXPECT_EQ(sections, count);
+
+  count = 0;
+  pe.EnumImportChunks(ImportChunksCallback, &count);
+  EXPECT_EQ(imports_dlls, count);
+
+  count = 0;
+  pe.EnumDelayImportChunks(DelayImportChunksCallback, &count);
+  EXPECT_EQ(delay_dlls, count);
+
+  count = 0;
+  pe.EnumExports(ExportsCallback, &count);
+  EXPECT_EQ(exports, count);
+
+  count = 0;
+  pe.EnumAllImports(ImportsCallback, &count);
+  EXPECT_EQ(imports, count);
+
+  count = 0;
+  pe.EnumAllDelayImports(ImportsCallback, &count);
+  EXPECT_EQ(delay_imports, count);
+
+  count = 0;
+  pe.EnumRelocs(RelocsCallback, &count);
+  EXPECT_EQ(relocs, count);
+}
+
+// Tests that we can locate an specific exported symbol, by name and by ordinal.
+TEST(PEImageTest, RetrievesExports) {
+  ScopedNativeLibrary module(FilePath(L"advapi32.dll"));
+  ASSERT_TRUE(module.is_valid());
+
+  PEImage pe(module.get());
+  WORD ordinal;
+
+  EXPECT_TRUE(pe.GetProcOrdinal("RegEnumKeyExW", &ordinal));
+
+  FARPROC address1 = pe.GetProcAddress("RegEnumKeyExW");
+  FARPROC address2 = pe.GetProcAddress(reinterpret_cast<char*>(ordinal));
+  EXPECT_TRUE(address1 != NULL);
+  EXPECT_TRUE(address2 != NULL);
+  EXPECT_TRUE(address1 == address2);
+}
+
+// Tests that we can locate a forwarded export.
+TEST(PEImageTest, ForwardedExport) {
+  base::FilePath pe_image_test_path;
+  ASSERT_TRUE(PathService::Get(DIR_TEST_DATA, &pe_image_test_path));
+  pe_image_test_path = pe_image_test_path.Append(FILE_PATH_LITERAL("pe_image"));
+
+#if defined(ARCH_CPU_64_BITS)
+  pe_image_test_path =
+      pe_image_test_path.Append(FILE_PATH_LITERAL("pe_image_test_64.dll"));
+#else
+  pe_image_test_path =
+      pe_image_test_path.Append(FILE_PATH_LITERAL("pe_image_test_32.dll"));
+#endif
+
+  ScopedNativeLibrary module(pe_image_test_path);
+
+  ASSERT_TRUE(module.is_valid());
+
+  PEImage pe(module.get());
+
+  FARPROC addr = pe.GetProcAddress("FwdExport");
+  EXPECT_EQ(FARPROC(-1), addr);
+
+  PDWORD export_entry = pe.GetExportEntry("FwdExport");
+  EXPECT_NE(nullptr, export_entry);
+  PVOID fwd_addr = pe.RVAToAddr(*export_entry);
+  const char expected_fwd[] = "KERNEL32.CreateFileA";
+  EXPECT_STREQ(expected_fwd, reinterpret_cast<char*>(fwd_addr));
+}
+
+// Test that we can get debug id out of a module.
+TEST(PEImageTest, GetDebugId) {
+  ScopedNativeLibrary module(FilePath(L"advapi32.dll"));
+  ASSERT_TRUE(module.is_valid());
+
+  PEImage pe(module.get());
+  GUID guid = {0};
+  DWORD age = 0;
+  LPCSTR pdb_file = nullptr;
+  EXPECT_TRUE(pe.GetDebugId(&guid, &age, &pdb_file));
+  EXPECT_STREQ("advapi32.pdb", pdb_file);
+
+  // Should be valid to call without parameters.
+  EXPECT_TRUE(pe.GetDebugId(nullptr, nullptr, nullptr));
+
+  GUID empty_guid = {0};
+  EXPECT_TRUE(!IsEqualGUID(empty_guid, guid));
+  EXPECT_NE(0U, age);
+}
+
+}  // namespace win
+}  // namespace base
diff --git a/base/win/process_startup_helper.cc b/base/win/process_startup_helper.cc
new file mode 100644
index 0000000..7a01211
--- /dev/null
+++ b/base/win/process_startup_helper.cc
@@ -0,0 +1,55 @@
+// Copyright (c) 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/win/process_startup_helper.h"
+
+#include <crtdbg.h>
+#include <new.h>
+
+#include "base/base_switches.h"
+#include "base/command_line.h"
+
+namespace {
+
+#pragma optimize("", off)
+// Handlers for invalid parameter and pure call. They generate a breakpoint to
+// tell breakpad that it needs to dump the process.
+void InvalidParameter(const wchar_t* expression, const wchar_t* function,
+                      const wchar_t* file, unsigned int line,
+                      uintptr_t reserved) {
+  __debugbreak();
+  _exit(1);
+}
+
+void PureCall() {
+  __debugbreak();
+  _exit(1);
+}
+#pragma optimize("", on)
+
+}  // namespace
+
+namespace base {
+namespace win {
+
+// Register the invalid param handler and pure call handler to be able to
+// notify breakpad when it happens.
+void RegisterInvalidParamHandler() {
+  _set_invalid_parameter_handler(InvalidParameter);
+  _set_purecall_handler(PureCall);
+}
+
+void SetupCRT(const CommandLine& command_line) {
+#if defined(_CRTDBG_MAP_ALLOC)
+  _CrtSetReportFile(_CRT_WARN, _CRTDBG_FILE_STDERR);
+  _CrtSetReportMode(_CRT_WARN, _CRTDBG_MODE_FILE);
+#else
+  if (!command_line.HasSwitch(switches::kDisableBreakpad)) {
+    _CrtSetReportMode(_CRT_ASSERT, 0);
+  }
+#endif
+}
+
+}  // namespace win
+}  // namespace base
diff --git a/base/win/process_startup_helper.h b/base/win/process_startup_helper.h
new file mode 100644
index 0000000..f633dda
--- /dev/null
+++ b/base/win/process_startup_helper.h
@@ -0,0 +1,26 @@
+// Copyright (c) 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_WIN_PROCESS_STARTUP_HELPER_H_
+#define BASE_WIN_PROCESS_STARTUP_HELPER_H_
+
+#include "base/base_export.h"
+
+namespace base {
+
+class CommandLine;
+
+namespace win {
+
+// Register the invalid param handler and pure call handler to be able to
+// notify breakpad when it happens.
+BASE_EXPORT void RegisterInvalidParamHandler();
+
+// Sets up the CRT's debugging macros to output to stdout.
+BASE_EXPORT void SetupCRT(const CommandLine& command_line);
+
+}  // namespace win
+}  // namespace base
+
+#endif  // BASE_WIN_PROCESS_STARTUP_HELPER_H_
diff --git a/base/win/registry.cc b/base/win/registry.cc
new file mode 100644
index 0000000..2fe53cf
--- /dev/null
+++ b/base/win/registry.cc
@@ -0,0 +1,681 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/win/registry.h"
+
+#include <shlwapi.h>
+#include <stddef.h>
+#include <algorithm>
+
+#include "base/logging.h"
+#include "base/macros.h"
+#include "base/strings/string_util.h"
+#include "base/threading/thread_restrictions.h"
+#include "base/win/windows_version.h"
+
+namespace base {
+namespace win {
+
+namespace {
+
+// RegEnumValue() reports the number of characters from the name that were
+// written to the buffer, not how many there are. This constant is the maximum
+// name size, such that a buffer with this size should read any name.
+const DWORD MAX_REGISTRY_NAME_SIZE = 16384;
+
+// Registry values are read as BYTE* but can have wchar_t* data whose last
+// wchar_t is truncated. This function converts the reported |byte_size| to
+// a size in wchar_t that can store a truncated wchar_t if necessary.
+inline DWORD to_wchar_size(DWORD byte_size) {
+  return (byte_size + sizeof(wchar_t) - 1) / sizeof(wchar_t);
+}
+
+// Mask to pull WOW64 access flags out of REGSAM access.
+const REGSAM kWow64AccessMask = KEY_WOW64_32KEY | KEY_WOW64_64KEY;
+
+}  // namespace
+
+// Watches for modifications to a key.
+class RegKey::Watcher : public ObjectWatcher::Delegate {
+ public:
+  Watcher() {}
+  ~Watcher() override {}
+
+  bool StartWatching(HKEY key, const ChangeCallback& callback);
+
+  // Implementation of ObjectWatcher::Delegate.
+  void OnObjectSignaled(HANDLE object) override {
+    DCHECK(watch_event_.IsValid() && watch_event_.Get() == object);
+    ChangeCallback callback = callback_;
+    callback_.Reset();
+    callback.Run();
+  }
+
+ private:
+  ScopedHandle watch_event_;
+  ObjectWatcher object_watcher_;
+  ChangeCallback callback_;
+  DISALLOW_COPY_AND_ASSIGN(Watcher);
+};
+
+bool RegKey::Watcher::StartWatching(HKEY key, const ChangeCallback& callback) {
+  DCHECK(key);
+  DCHECK(callback_.is_null());
+
+  if (!watch_event_.IsValid())
+    watch_event_.Set(CreateEvent(NULL, TRUE, FALSE, NULL));
+
+  if (!watch_event_.IsValid())
+    return false;
+
+  DWORD filter = REG_NOTIFY_CHANGE_NAME |
+                 REG_NOTIFY_CHANGE_ATTRIBUTES |
+                 REG_NOTIFY_CHANGE_LAST_SET |
+                 REG_NOTIFY_CHANGE_SECURITY;
+
+  // Watch the registry key for a change of value.
+  LONG result = RegNotifyChangeKeyValue(key, TRUE, filter, watch_event_.Get(),
+                                        TRUE);
+  if (result != ERROR_SUCCESS) {
+    watch_event_.Close();
+    return false;
+  }
+
+  callback_ = callback;
+  return object_watcher_.StartWatchingOnce(watch_event_.Get(), this);
+}
+
+// RegKey ----------------------------------------------------------------------
+
+RegKey::RegKey() : key_(NULL), wow64access_(0) {
+}
+
+RegKey::RegKey(HKEY key) : key_(key), wow64access_(0) {
+}
+
+RegKey::RegKey(HKEY rootkey, const wchar_t* subkey, REGSAM access)
+    : key_(NULL),
+      wow64access_(0) {
+  if (rootkey) {
+    if (access & (KEY_SET_VALUE | KEY_CREATE_SUB_KEY | KEY_CREATE_LINK))
+      Create(rootkey, subkey, access);
+    else
+      Open(rootkey, subkey, access);
+  } else {
+    DCHECK(!subkey);
+    wow64access_ = access & kWow64AccessMask;
+  }
+}
+
+RegKey::~RegKey() {
+  Close();
+}
+
+LONG RegKey::Create(HKEY rootkey, const wchar_t* subkey, REGSAM access) {
+  DWORD disposition_value;
+  return CreateWithDisposition(rootkey, subkey, &disposition_value, access);
+}
+
+LONG RegKey::CreateWithDisposition(HKEY rootkey, const wchar_t* subkey,
+                                   DWORD* disposition, REGSAM access) {
+  DCHECK(rootkey && subkey && access && disposition);
+  HKEY subhkey = NULL;
+  LONG result = RegCreateKeyEx(rootkey, subkey, 0, NULL,
+                               REG_OPTION_NON_VOLATILE, access, NULL, &subhkey,
+                               disposition);
+  if (result == ERROR_SUCCESS) {
+    Close();
+    key_ = subhkey;
+    wow64access_ = access & kWow64AccessMask;
+  }
+
+  return result;
+}
+
+LONG RegKey::CreateKey(const wchar_t* name, REGSAM access) {
+  DCHECK(name && access);
+  // After the application has accessed an alternate registry view using one of
+  // the [KEY_WOW64_32KEY / KEY_WOW64_64KEY] flags, all subsequent operations
+  // (create, delete, or open) on child registry keys must explicitly use the
+  // same flag. Otherwise, there can be unexpected behavior.
+  // http://msdn.microsoft.com/en-us/library/windows/desktop/aa384129.aspx.
+  if ((access & kWow64AccessMask) != wow64access_) {
+    NOTREACHED();
+    return ERROR_INVALID_PARAMETER;
+  }
+  HKEY subkey = NULL;
+  LONG result = RegCreateKeyEx(key_, name, 0, NULL, REG_OPTION_NON_VOLATILE,
+                               access, NULL, &subkey, NULL);
+  if (result == ERROR_SUCCESS) {
+    Close();
+    key_ = subkey;
+    wow64access_ = access & kWow64AccessMask;
+  }
+
+  return result;
+}
+
+LONG RegKey::Open(HKEY rootkey, const wchar_t* subkey, REGSAM access) {
+  DCHECK(rootkey && subkey && access);
+  HKEY subhkey = NULL;
+
+  LONG result = RegOpenKeyEx(rootkey, subkey, 0, access, &subhkey);
+  if (result == ERROR_SUCCESS) {
+    Close();
+    key_ = subhkey;
+    wow64access_ = access & kWow64AccessMask;
+  }
+
+  return result;
+}
+
+LONG RegKey::OpenKey(const wchar_t* relative_key_name, REGSAM access) {
+  DCHECK(relative_key_name && access);
+  // After the application has accessed an alternate registry view using one of
+  // the [KEY_WOW64_32KEY / KEY_WOW64_64KEY] flags, all subsequent operations
+  // (create, delete, or open) on child registry keys must explicitly use the
+  // same flag. Otherwise, there can be unexpected behavior.
+  // http://msdn.microsoft.com/en-us/library/windows/desktop/aa384129.aspx.
+  if ((access & kWow64AccessMask) != wow64access_) {
+    NOTREACHED();
+    return ERROR_INVALID_PARAMETER;
+  }
+  HKEY subkey = NULL;
+  LONG result = RegOpenKeyEx(key_, relative_key_name, 0, access, &subkey);
+
+  // We have to close the current opened key before replacing it with the new
+  // one.
+  if (result == ERROR_SUCCESS) {
+    Close();
+    key_ = subkey;
+    wow64access_ = access & kWow64AccessMask;
+  }
+  return result;
+}
+
+void RegKey::Close() {
+  if (key_) {
+    ::RegCloseKey(key_);
+    key_ = NULL;
+    wow64access_ = 0;
+  }
+}
+
+// TODO(wfh): Remove this and other unsafe methods. See http://crbug.com/375400
+void RegKey::Set(HKEY key) {
+  if (key_ != key) {
+    Close();
+    key_ = key;
+  }
+}
+
+HKEY RegKey::Take() {
+  DCHECK_EQ(wow64access_, 0u);
+  HKEY key = key_;
+  key_ = NULL;
+  return key;
+}
+
+bool RegKey::HasValue(const wchar_t* name) const {
+  return RegQueryValueEx(key_, name, 0, NULL, NULL, NULL) == ERROR_SUCCESS;
+}
+
+DWORD RegKey::GetValueCount() const {
+  DWORD count = 0;
+  LONG result = RegQueryInfoKey(key_, NULL, 0, NULL, NULL, NULL, NULL, &count,
+                                NULL, NULL, NULL, NULL);
+  return (result == ERROR_SUCCESS) ? count : 0;
+}
+
+LONG RegKey::GetValueNameAt(int index, std::wstring* name) const {
+  wchar_t buf[256];
+  DWORD bufsize = arraysize(buf);
+  LONG r = ::RegEnumValue(key_, index, buf, &bufsize, NULL, NULL, NULL, NULL);
+  if (r == ERROR_SUCCESS)
+    *name = buf;
+
+  return r;
+}
+
+LONG RegKey::DeleteKey(const wchar_t* name) {
+  DCHECK(key_);
+  DCHECK(name);
+  HKEY subkey = NULL;
+
+  // Verify the key exists before attempting delete to replicate previous
+  // behavior.
+  LONG result =
+      RegOpenKeyEx(key_, name, 0, READ_CONTROL | wow64access_, &subkey);
+  if (result != ERROR_SUCCESS)
+    return result;
+  RegCloseKey(subkey);
+
+  return RegDelRecurse(key_, std::wstring(name), wow64access_);
+}
+
+LONG RegKey::DeleteEmptyKey(const wchar_t* name) {
+  DCHECK(key_);
+  DCHECK(name);
+
+  HKEY target_key = NULL;
+  LONG result = RegOpenKeyEx(key_, name, 0, KEY_READ | wow64access_,
+                             &target_key);
+
+  if (result != ERROR_SUCCESS)
+    return result;
+
+  DWORD count = 0;
+  result = RegQueryInfoKey(target_key, NULL, 0, NULL, NULL, NULL, NULL, &count,
+                           NULL, NULL, NULL, NULL);
+
+  RegCloseKey(target_key);
+
+  if (result != ERROR_SUCCESS)
+    return result;
+
+  if (count == 0)
+    return RegDeleteKeyExWrapper(key_, name, wow64access_, 0);
+
+  return ERROR_DIR_NOT_EMPTY;
+}
+
+LONG RegKey::DeleteValue(const wchar_t* value_name) {
+  DCHECK(key_);
+  LONG result = RegDeleteValue(key_, value_name);
+  return result;
+}
+
+LONG RegKey::ReadValueDW(const wchar_t* name, DWORD* out_value) const {
+  DCHECK(out_value);
+  DWORD type = REG_DWORD;
+  DWORD size = sizeof(DWORD);
+  DWORD local_value = 0;
+  LONG result = ReadValue(name, &local_value, &size, &type);
+  if (result == ERROR_SUCCESS) {
+    if ((type == REG_DWORD || type == REG_BINARY) && size == sizeof(DWORD))
+      *out_value = local_value;
+    else
+      result = ERROR_CANTREAD;
+  }
+
+  return result;
+}
+
+LONG RegKey::ReadInt64(const wchar_t* name, int64_t* out_value) const {
+  DCHECK(out_value);
+  DWORD type = REG_QWORD;
+  int64_t local_value = 0;
+  DWORD size = sizeof(local_value);
+  LONG result = ReadValue(name, &local_value, &size, &type);
+  if (result == ERROR_SUCCESS) {
+    if ((type == REG_QWORD || type == REG_BINARY) &&
+        size == sizeof(local_value))
+      *out_value = local_value;
+    else
+      result = ERROR_CANTREAD;
+  }
+
+  return result;
+}
+
+LONG RegKey::ReadValue(const wchar_t* name, std::wstring* out_value) const {
+  DCHECK(out_value);
+  const size_t kMaxStringLength = 1024;  // This is after expansion.
+  // Use the one of the other forms of ReadValue if 1024 is too small for you.
+  wchar_t raw_value[kMaxStringLength];
+  DWORD type = REG_SZ, size = sizeof(raw_value);
+  LONG result = ReadValue(name, raw_value, &size, &type);
+  if (result == ERROR_SUCCESS) {
+    if (type == REG_SZ) {
+      *out_value = raw_value;
+    } else if (type == REG_EXPAND_SZ) {
+      wchar_t expanded[kMaxStringLength];
+      size = ExpandEnvironmentStrings(raw_value, expanded, kMaxStringLength);
+      // Success: returns the number of wchar_t's copied
+      // Fail: buffer too small, returns the size required
+      // Fail: other, returns 0
+      if (size == 0 || size > kMaxStringLength) {
+        result = ERROR_MORE_DATA;
+      } else {
+        *out_value = expanded;
+      }
+    } else {
+      // Not a string. Oops.
+      result = ERROR_CANTREAD;
+    }
+  }
+
+  return result;
+}
+
+LONG RegKey::ReadValue(const wchar_t* name,
+                       void* data,
+                       DWORD* dsize,
+                       DWORD* dtype) const {
+  LONG result = RegQueryValueEx(key_, name, 0, dtype,
+                                reinterpret_cast<LPBYTE>(data), dsize);
+  return result;
+}
+
+LONG RegKey::ReadValues(const wchar_t* name,
+                        std::vector<std::wstring>* values) {
+  values->clear();
+
+  DWORD type = REG_MULTI_SZ;
+  DWORD size = 0;
+  LONG result = ReadValue(name, NULL, &size, &type);
+  if (result != ERROR_SUCCESS || size == 0)
+    return result;
+
+  if (type != REG_MULTI_SZ)
+    return ERROR_CANTREAD;
+
+  std::vector<wchar_t> buffer(size / sizeof(wchar_t));
+  result = ReadValue(name, &buffer[0], &size, NULL);
+  if (result != ERROR_SUCCESS || size == 0)
+    return result;
+
+  // Parse the double-null-terminated list of strings.
+  // Note: This code is paranoid to not read outside of |buf|, in the case where
+  // it may not be properly terminated.
+  const wchar_t* entry = &buffer[0];
+  const wchar_t* buffer_end = entry + (size / sizeof(wchar_t));
+  while (entry < buffer_end && entry[0] != '\0') {
+    const wchar_t* entry_end = std::find(entry, buffer_end, L'\0');
+    values->push_back(std::wstring(entry, entry_end));
+    entry = entry_end + 1;
+  }
+  return 0;
+}
+
+LONG RegKey::WriteValue(const wchar_t* name, DWORD in_value) {
+  return WriteValue(
+      name, &in_value, static_cast<DWORD>(sizeof(in_value)), REG_DWORD);
+}
+
+LONG RegKey::WriteValue(const wchar_t * name, const wchar_t* in_value) {
+  return WriteValue(name, in_value,
+      static_cast<DWORD>(sizeof(*in_value) * (wcslen(in_value) + 1)), REG_SZ);
+}
+
+LONG RegKey::WriteValue(const wchar_t* name,
+                        const void* data,
+                        DWORD dsize,
+                        DWORD dtype) {
+  DCHECK(data || !dsize);
+
+  LONG result = RegSetValueEx(key_, name, 0, dtype,
+      reinterpret_cast<LPBYTE>(const_cast<void*>(data)), dsize);
+  return result;
+}
+
+bool RegKey::StartWatching(const ChangeCallback& callback) {
+  if (!key_watcher_)
+    key_watcher_.reset(new Watcher());
+
+  if (!key_watcher_->StartWatching(key_, callback))
+    return false;
+
+  return true;
+}
+
+// static
+LONG RegKey::RegDeleteKeyExWrapper(HKEY hKey,
+                                   const wchar_t* lpSubKey,
+                                   REGSAM samDesired,
+                                   DWORD Reserved) {
+  typedef LSTATUS(WINAPI* RegDeleteKeyExPtr)(HKEY, LPCWSTR, REGSAM, DWORD);
+
+  RegDeleteKeyExPtr reg_delete_key_ex_func =
+      reinterpret_cast<RegDeleteKeyExPtr>(
+          GetProcAddress(GetModuleHandleA("advapi32.dll"), "RegDeleteKeyExW"));
+
+  if (reg_delete_key_ex_func)
+    return reg_delete_key_ex_func(hKey, lpSubKey, samDesired, Reserved);
+
+  // Windows XP does not support RegDeleteKeyEx, so fallback to RegDeleteKey.
+  return RegDeleteKey(hKey, lpSubKey);
+}
+
+// static
+LONG RegKey::RegDelRecurse(HKEY root_key,
+                           const std::wstring& name,
+                           REGSAM access) {
+  // First, see if the key can be deleted without having to recurse.
+  LONG result = RegDeleteKeyExWrapper(root_key, name.c_str(), access, 0);
+  if (result == ERROR_SUCCESS)
+    return result;
+
+  HKEY target_key = NULL;
+  result = RegOpenKeyEx(
+      root_key, name.c_str(), 0, KEY_ENUMERATE_SUB_KEYS | access, &target_key);
+
+  if (result == ERROR_FILE_NOT_FOUND)
+    return ERROR_SUCCESS;
+  if (result != ERROR_SUCCESS)
+    return result;
+
+  std::wstring subkey_name(name);
+
+  // Check for an ending slash and add one if it is missing.
+  if (!name.empty() && subkey_name[name.length() - 1] != L'\\')
+    subkey_name += L"\\";
+
+  // Enumerate the keys
+  result = ERROR_SUCCESS;
+  const DWORD kMaxKeyNameLength = MAX_PATH;
+  const size_t base_key_length = subkey_name.length();
+  std::wstring key_name;
+  while (result == ERROR_SUCCESS) {
+    DWORD key_size = kMaxKeyNameLength;
+    result = RegEnumKeyEx(target_key,
+                          0,
+                          WriteInto(&key_name, kMaxKeyNameLength),
+                          &key_size,
+                          NULL,
+                          NULL,
+                          NULL,
+                          NULL);
+
+    if (result != ERROR_SUCCESS)
+      break;
+
+    key_name.resize(key_size);
+    subkey_name.resize(base_key_length);
+    subkey_name += key_name;
+
+    if (RegDelRecurse(root_key, subkey_name, access) != ERROR_SUCCESS)
+      break;
+  }
+
+  RegCloseKey(target_key);
+
+  // Try again to delete the key.
+  result = RegDeleteKeyExWrapper(root_key, name.c_str(), access, 0);
+
+  return result;
+}
+
+// RegistryValueIterator ------------------------------------------------------
+
+RegistryValueIterator::RegistryValueIterator(HKEY root_key,
+                                             const wchar_t* folder_key,
+                                             REGSAM wow64access)
+    : name_(MAX_PATH, L'\0'),
+      value_(MAX_PATH, L'\0') {
+  Initialize(root_key, folder_key, wow64access);
+}
+
+RegistryValueIterator::RegistryValueIterator(HKEY root_key,
+                                             const wchar_t* folder_key)
+    : name_(MAX_PATH, L'\0'),
+      value_(MAX_PATH, L'\0') {
+  Initialize(root_key, folder_key, 0);
+}
+
+void RegistryValueIterator::Initialize(HKEY root_key,
+                                       const wchar_t* folder_key,
+                                       REGSAM wow64access) {
+  DCHECK_EQ(wow64access & ~kWow64AccessMask, static_cast<REGSAM>(0));
+  LONG result =
+      RegOpenKeyEx(root_key, folder_key, 0, KEY_READ | wow64access, &key_);
+  if (result != ERROR_SUCCESS) {
+    key_ = NULL;
+  } else {
+    DWORD count = 0;
+    result = ::RegQueryInfoKey(key_, NULL, 0, NULL, NULL, NULL, NULL, &count,
+                               NULL, NULL, NULL, NULL);
+
+    if (result != ERROR_SUCCESS) {
+      ::RegCloseKey(key_);
+      key_ = NULL;
+    } else {
+      index_ = count - 1;
+    }
+  }
+
+  Read();
+}
+
+RegistryValueIterator::~RegistryValueIterator() {
+  if (key_)
+    ::RegCloseKey(key_);
+}
+
+DWORD RegistryValueIterator::ValueCount() const {
+  DWORD count = 0;
+  LONG result = ::RegQueryInfoKey(key_, NULL, 0, NULL, NULL, NULL, NULL,
+                                  &count, NULL, NULL, NULL, NULL);
+  if (result != ERROR_SUCCESS)
+    return 0;
+
+  return count;
+}
+
+bool RegistryValueIterator::Valid() const {
+  return key_ != NULL && index_ >= 0;
+}
+
+void RegistryValueIterator::operator++() {
+  --index_;
+  Read();
+}
+
+bool RegistryValueIterator::Read() {
+  if (Valid()) {
+    DWORD capacity = static_cast<DWORD>(name_.capacity());
+    DWORD name_size = capacity;
+    // |value_size_| is in bytes. Reserve the last character for a NUL.
+    value_size_ = static_cast<DWORD>((value_.size() - 1) * sizeof(wchar_t));
+    LONG result = ::RegEnumValue(
+        key_, index_, WriteInto(&name_, name_size), &name_size, NULL, &type_,
+        reinterpret_cast<BYTE*>(value_.data()), &value_size_);
+
+    if (result == ERROR_MORE_DATA) {
+      // Registry key names are limited to 255 characters and fit within
+      // MAX_PATH (which is 260) but registry value names can use up to 16,383
+      // characters and the value itself is not limited
+      // (from http://msdn.microsoft.com/en-us/library/windows/desktop/
+      // ms724872(v=vs.85).aspx).
+      // Resize the buffers and retry if their size caused the failure.
+      DWORD value_size_in_wchars = to_wchar_size(value_size_);
+      if (value_size_in_wchars + 1 > value_.size())
+        value_.resize(value_size_in_wchars + 1, L'\0');
+      value_size_ = static_cast<DWORD>((value_.size() - 1) * sizeof(wchar_t));
+      name_size = name_size == capacity ? MAX_REGISTRY_NAME_SIZE : capacity;
+      result = ::RegEnumValue(
+          key_, index_, WriteInto(&name_, name_size), &name_size, NULL, &type_,
+          reinterpret_cast<BYTE*>(value_.data()), &value_size_);
+    }
+
+    if (result == ERROR_SUCCESS) {
+      DCHECK_LT(to_wchar_size(value_size_), value_.size());
+      value_[to_wchar_size(value_size_)] = L'\0';
+      return true;
+    }
+  }
+
+  name_[0] = L'\0';
+  value_[0] = L'\0';
+  value_size_ = 0;
+  return false;
+}
+
+// RegistryKeyIterator --------------------------------------------------------
+
+RegistryKeyIterator::RegistryKeyIterator(HKEY root_key,
+                                         const wchar_t* folder_key) {
+  Initialize(root_key, folder_key, 0);
+}
+
+RegistryKeyIterator::RegistryKeyIterator(HKEY root_key,
+                                         const wchar_t* folder_key,
+                                         REGSAM wow64access) {
+  Initialize(root_key, folder_key, wow64access);
+}
+
+RegistryKeyIterator::~RegistryKeyIterator() {
+  if (key_)
+    ::RegCloseKey(key_);
+}
+
+DWORD RegistryKeyIterator::SubkeyCount() const {
+  DWORD count = 0;
+  LONG result = ::RegQueryInfoKey(key_, NULL, 0, NULL, &count, NULL, NULL,
+                                  NULL, NULL, NULL, NULL, NULL);
+  if (result != ERROR_SUCCESS)
+    return 0;
+
+  return count;
+}
+
+bool RegistryKeyIterator::Valid() const {
+  return key_ != NULL && index_ >= 0;
+}
+
+void RegistryKeyIterator::operator++() {
+  --index_;
+  Read();
+}
+
+bool RegistryKeyIterator::Read() {
+  if (Valid()) {
+    DWORD ncount = arraysize(name_);
+    FILETIME written;
+    LONG r = ::RegEnumKeyEx(key_, index_, name_, &ncount, NULL, NULL,
+                            NULL, &written);
+    if (ERROR_SUCCESS == r)
+      return true;
+  }
+
+  name_[0] = '\0';
+  return false;
+}
+
+void RegistryKeyIterator::Initialize(HKEY root_key,
+                                     const wchar_t* folder_key,
+                                     REGSAM wow64access) {
+  DCHECK_EQ(wow64access & ~kWow64AccessMask, static_cast<REGSAM>(0));
+  LONG result =
+      RegOpenKeyEx(root_key, folder_key, 0, KEY_READ | wow64access, &key_);
+  if (result != ERROR_SUCCESS) {
+    key_ = NULL;
+  } else {
+    DWORD count = 0;
+    result = ::RegQueryInfoKey(key_, NULL, 0, NULL, &count, NULL, NULL, NULL,
+                               NULL, NULL, NULL, NULL);
+
+    if (result != ERROR_SUCCESS) {
+      ::RegCloseKey(key_);
+      key_ = NULL;
+    } else {
+      index_ = count - 1;
+    }
+  }
+
+  Read();
+}
+
+}  // namespace win
+}  // namespace base
diff --git a/base/win/registry.h b/base/win/registry.h
new file mode 100644
index 0000000..53327ec
--- /dev/null
+++ b/base/win/registry.h
@@ -0,0 +1,261 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_WIN_REGISTRY_H_
+#define BASE_WIN_REGISTRY_H_
+
+#include <stdint.h>
+#include <string>
+#include <vector>
+#include "base/win/windows_types.h"
+
+#include "base/base_export.h"
+#include "base/macros.h"
+#include "base/win/object_watcher.h"
+#include "base/win/scoped_handle.h"
+
+namespace base {
+namespace win {
+
+// Utility class to read, write and manipulate the Windows Registry.
+// Registry vocabulary primer: a "key" is like a folder, in which there
+// are "values", which are <name, data> pairs, with an associated data type.
+//
+// Note:
+//  * ReadValue family of functions guarantee that the out-parameter
+//    is not touched in case of failure.
+//  * Functions returning LONG indicate success as ERROR_SUCCESS or an
+//    error as a (non-zero) win32 error code.
+class BASE_EXPORT RegKey {
+ public:
+  // Called from the MessageLoop when the key changes.
+  typedef base::Callback<void()> ChangeCallback;
+
+  RegKey();
+  explicit RegKey(HKEY key);
+  RegKey(HKEY rootkey, const wchar_t* subkey, REGSAM access);
+  ~RegKey();
+
+  LONG Create(HKEY rootkey, const wchar_t* subkey, REGSAM access);
+
+  LONG CreateWithDisposition(HKEY rootkey, const wchar_t* subkey,
+                             DWORD* disposition, REGSAM access);
+
+  // Creates a subkey or open it if it already exists.
+  LONG CreateKey(const wchar_t* name, REGSAM access);
+
+  // Opens an existing reg key.
+  LONG Open(HKEY rootkey, const wchar_t* subkey, REGSAM access);
+
+  // Opens an existing reg key, given the relative key name.
+  LONG OpenKey(const wchar_t* relative_key_name, REGSAM access);
+
+  // Closes this reg key.
+  void Close();
+
+  // Replaces the handle of the registry key and takes ownership of the handle.
+  void Set(HKEY key);
+
+  // Transfers ownership away from this object.
+  HKEY Take();
+
+  // Returns false if this key does not have the specified value, or if an error
+  // occurrs while attempting to access it.
+  bool HasValue(const wchar_t* value_name) const;
+
+  // Returns the number of values for this key, or 0 if the number cannot be
+  // determined.
+  DWORD GetValueCount() const;
+
+  // Determines the nth value's name.
+  LONG GetValueNameAt(int index, std::wstring* name) const;
+
+  // True while the key is valid.
+  bool Valid() const { return key_ != NULL; }
+
+  // Kills a key and everything that lives below it; please be careful when
+  // using it.
+  LONG DeleteKey(const wchar_t* name);
+
+  // Deletes an empty subkey.  If the subkey has subkeys or values then this
+  // will fail.
+  LONG DeleteEmptyKey(const wchar_t* name);
+
+  // Deletes a single value within the key.
+  LONG DeleteValue(const wchar_t* name);
+
+  // Getters:
+
+  // Reads a REG_DWORD (uint32_t) into |out_value|. If |name| is null or empty,
+  // reads the key's default value, if any.
+  LONG ReadValueDW(const wchar_t* name, DWORD* out_value) const;
+
+  // Reads a REG_QWORD (int64_t) into |out_value|. If |name| is null or empty,
+  // reads the key's default value, if any.
+  LONG ReadInt64(const wchar_t* name, int64_t* out_value) const;
+
+  // Reads a string into |out_value|. If |name| is null or empty, reads
+  // the key's default value, if any.
+  LONG ReadValue(const wchar_t* name, std::wstring* out_value) const;
+
+  // Reads a REG_MULTI_SZ registry field into a vector of strings. Clears
+  // |values| initially and adds further strings to the list. Returns
+  // ERROR_CANTREAD if type is not REG_MULTI_SZ.
+  LONG ReadValues(const wchar_t* name, std::vector<std::wstring>* values);
+
+  // Reads raw data into |data|. If |name| is null or empty, reads the key's
+  // default value, if any.
+  LONG ReadValue(const wchar_t* name,
+                 void* data,
+                 DWORD* dsize,
+                 DWORD* dtype) const;
+
+  // Setters:
+
+  // Sets an int32_t value.
+  LONG WriteValue(const wchar_t* name, DWORD in_value);
+
+  // Sets a string value.
+  LONG WriteValue(const wchar_t* name, const wchar_t* in_value);
+
+  // Sets raw data, including type.
+  LONG WriteValue(const wchar_t* name,
+                  const void* data,
+                  DWORD dsize,
+                  DWORD dtype);
+
+  // Starts watching the key to see if any of its values have changed.
+  // The key must have been opened with the KEY_NOTIFY access privilege.
+  // Returns true on success.
+  // To stop watching, delete this RegKey object. To continue watching the
+  // object after the callback is invoked, call StartWatching again.
+  bool StartWatching(const ChangeCallback& callback);
+
+  HKEY Handle() const { return key_; }
+
+ private:
+  class Watcher;
+
+  // Calls RegDeleteKeyEx on supported platforms, alternatively falls back to
+  // RegDeleteKey.
+  static LONG RegDeleteKeyExWrapper(HKEY hKey,
+                                    const wchar_t* lpSubKey,
+                                    REGSAM samDesired,
+                                    DWORD Reserved);
+
+  // Recursively deletes a key and all of its subkeys.
+  static LONG RegDelRecurse(HKEY root_key,
+                            const std::wstring& name,
+                            REGSAM access);
+
+  HKEY key_;  // The registry key being iterated.
+  REGSAM wow64access_;
+  std::unique_ptr<Watcher> key_watcher_;
+
+  DISALLOW_COPY_AND_ASSIGN(RegKey);
+};
+
+// Iterates the entries found in a particular folder on the registry.
+class BASE_EXPORT RegistryValueIterator {
+ public:
+  // Constructs a Registry Value Iterator with default WOW64 access.
+  RegistryValueIterator(HKEY root_key, const wchar_t* folder_key);
+
+  // Constructs a Registry Key Iterator with specific WOW64 access, one of
+  // KEY_WOW64_32KEY or KEY_WOW64_64KEY, or 0.
+  // Note: |wow64access| should be the same access used to open |root_key|
+  // previously, or a predefined key (e.g. HKEY_LOCAL_MACHINE).
+  // See http://msdn.microsoft.com/en-us/library/windows/desktop/aa384129.aspx.
+  RegistryValueIterator(HKEY root_key,
+                        const wchar_t* folder_key,
+                        REGSAM wow64access);
+
+  ~RegistryValueIterator();
+
+  DWORD ValueCount() const;
+
+  // True while the iterator is valid.
+  bool Valid() const;
+
+  // Advances to the next registry entry.
+  void operator++();
+
+  const wchar_t* Name() const { return name_.c_str(); }
+  const wchar_t* Value() const { return value_.data(); }
+  // ValueSize() is in bytes.
+  DWORD ValueSize() const { return value_size_; }
+  DWORD Type() const { return type_; }
+
+  int Index() const { return index_; }
+
+ private:
+  // Reads in the current values.
+  bool Read();
+
+  void Initialize(HKEY root_key, const wchar_t* folder_key, REGSAM wow64access);
+
+  // The registry key being iterated.
+  HKEY key_;
+
+  // Current index of the iteration.
+  int index_;
+
+  // Current values.
+  std::wstring name_;
+  std::vector<wchar_t> value_;
+  DWORD value_size_;
+  DWORD type_;
+
+  DISALLOW_COPY_AND_ASSIGN(RegistryValueIterator);
+};
+
+class BASE_EXPORT RegistryKeyIterator {
+ public:
+  // Constructs a Registry Key Iterator with default WOW64 access.
+  RegistryKeyIterator(HKEY root_key, const wchar_t* folder_key);
+
+  // Constructs a Registry Value Iterator with specific WOW64 access, one of
+  // KEY_WOW64_32KEY or KEY_WOW64_64KEY, or 0.
+  // Note: |wow64access| should be the same access used to open |root_key|
+  // previously, or a predefined key (e.g. HKEY_LOCAL_MACHINE).
+  // See http://msdn.microsoft.com/en-us/library/windows/desktop/aa384129.aspx.
+  RegistryKeyIterator(HKEY root_key,
+                      const wchar_t* folder_key,
+                      REGSAM wow64access);
+
+  ~RegistryKeyIterator();
+
+  DWORD SubkeyCount() const;
+
+  // True while the iterator is valid.
+  bool Valid() const;
+
+  // Advances to the next entry in the folder.
+  void operator++();
+
+  const wchar_t* Name() const { return name_; }
+
+  int Index() const { return index_; }
+
+ private:
+  // Reads in the current values.
+  bool Read();
+
+  void Initialize(HKEY root_key, const wchar_t* folder_key, REGSAM wow64access);
+
+  // The registry key being iterated.
+  HKEY key_;
+
+  // Current index of the iteration.
+  int index_;
+
+  wchar_t name_[MAX_PATH];
+
+  DISALLOW_COPY_AND_ASSIGN(RegistryKeyIterator);
+};
+
+}  // namespace win
+}  // namespace base
+
+#endif  // BASE_WIN_REGISTRY_H_
diff --git a/base/win/registry_unittest.cc b/base/win/registry_unittest.cc
new file mode 100644
index 0000000..5a18ffa
--- /dev/null
+++ b/base/win/registry_unittest.cc
@@ -0,0 +1,423 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/win/registry.h"
+
+#include <stdint.h>
+
+#include <cstring>
+#include <vector>
+
+#include "base/bind.h"
+#include "base/compiler_specific.h"
+#include "base/macros.h"
+#include "base/message_loop/message_loop.h"
+#include "base/run_loop.h"
+#include "base/stl_util.h"
+#include "base/win/windows_version.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+namespace win {
+
+namespace {
+
+class RegistryTest : public testing::Test {
+ protected:
+#if defined(_WIN64)
+  static const REGSAM kNativeViewMask = KEY_WOW64_64KEY;
+  static const REGSAM kRedirectedViewMask = KEY_WOW64_32KEY;
+#else
+  static const REGSAM kNativeViewMask = KEY_WOW64_32KEY;
+  static const REGSAM kRedirectedViewMask = KEY_WOW64_64KEY;
+#endif  //  _WIN64
+
+  RegistryTest() {}
+  void SetUp() override {
+    // Create a temporary key.
+    RegKey key(HKEY_CURRENT_USER, L"", KEY_ALL_ACCESS);
+    key.DeleteKey(kRootKey);
+    ASSERT_NE(ERROR_SUCCESS, key.Open(HKEY_CURRENT_USER, kRootKey, KEY_READ));
+    ASSERT_EQ(ERROR_SUCCESS, key.Create(HKEY_CURRENT_USER, kRootKey, KEY_READ));
+    foo_software_key_ = L"Software\\";
+    foo_software_key_ += kRootKey;
+    foo_software_key_ += L"\\Foo";
+  }
+
+  void TearDown() override {
+    // Clean up the temporary key.
+    RegKey key(HKEY_CURRENT_USER, L"", KEY_SET_VALUE);
+    ASSERT_EQ(ERROR_SUCCESS, key.DeleteKey(kRootKey));
+    ASSERT_NE(ERROR_SUCCESS, key.Open(HKEY_CURRENT_USER, kRootKey, KEY_READ));
+  }
+
+  static bool IsRedirectorPresent() {
+#if defined(_WIN64)
+    return true;
+#else
+    return OSInfo::GetInstance()->wow64_status() == OSInfo::WOW64_ENABLED;
+#endif
+  }
+
+  const wchar_t* const kRootKey = L"Base_Registry_Unittest";
+  std::wstring foo_software_key_;
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(RegistryTest);
+};
+
+// static
+const REGSAM RegistryTest::kNativeViewMask;
+const REGSAM RegistryTest::kRedirectedViewMask;
+
+TEST_F(RegistryTest, ValueTest) {
+  RegKey key;
+
+  std::wstring foo_key(kRootKey);
+  foo_key += L"\\Foo";
+  ASSERT_EQ(ERROR_SUCCESS, key.Create(HKEY_CURRENT_USER, foo_key.c_str(),
+                                      KEY_READ));
+
+  {
+    ASSERT_EQ(ERROR_SUCCESS, key.Open(HKEY_CURRENT_USER, foo_key.c_str(),
+                                      KEY_READ | KEY_SET_VALUE));
+    ASSERT_TRUE(key.Valid());
+
+    const wchar_t kStringValueName[] = L"StringValue";
+    const wchar_t kDWORDValueName[] = L"DWORDValue";
+    const wchar_t kInt64ValueName[] = L"Int64Value";
+    const wchar_t kStringData[] = L"string data";
+    const DWORD kDWORDData = 0xdeadbabe;
+    const int64_t kInt64Data = 0xdeadbabedeadbabeLL;
+
+    // Test value creation
+    ASSERT_EQ(ERROR_SUCCESS, key.WriteValue(kStringValueName, kStringData));
+    ASSERT_EQ(ERROR_SUCCESS, key.WriteValue(kDWORDValueName, kDWORDData));
+    ASSERT_EQ(ERROR_SUCCESS, key.WriteValue(kInt64ValueName, &kInt64Data,
+                                            sizeof(kInt64Data), REG_QWORD));
+    EXPECT_EQ(3U, key.GetValueCount());
+    EXPECT_TRUE(key.HasValue(kStringValueName));
+    EXPECT_TRUE(key.HasValue(kDWORDValueName));
+    EXPECT_TRUE(key.HasValue(kInt64ValueName));
+
+    // Test Read
+    std::wstring string_value;
+    DWORD dword_value = 0;
+    int64_t int64_value = 0;
+    ASSERT_EQ(ERROR_SUCCESS, key.ReadValue(kStringValueName, &string_value));
+    ASSERT_EQ(ERROR_SUCCESS, key.ReadValueDW(kDWORDValueName, &dword_value));
+    ASSERT_EQ(ERROR_SUCCESS, key.ReadInt64(kInt64ValueName, &int64_value));
+    EXPECT_STREQ(kStringData, string_value.c_str());
+    EXPECT_EQ(kDWORDData, dword_value);
+    EXPECT_EQ(kInt64Data, int64_value);
+
+    // Make sure out args are not touched if ReadValue fails
+    const wchar_t* kNonExistent = L"NonExistent";
+    ASSERT_NE(ERROR_SUCCESS, key.ReadValue(kNonExistent, &string_value));
+    ASSERT_NE(ERROR_SUCCESS, key.ReadValueDW(kNonExistent, &dword_value));
+    ASSERT_NE(ERROR_SUCCESS, key.ReadInt64(kNonExistent, &int64_value));
+    EXPECT_STREQ(kStringData, string_value.c_str());
+    EXPECT_EQ(kDWORDData, dword_value);
+    EXPECT_EQ(kInt64Data, int64_value);
+
+    // Test delete
+    ASSERT_EQ(ERROR_SUCCESS, key.DeleteValue(kStringValueName));
+    ASSERT_EQ(ERROR_SUCCESS, key.DeleteValue(kDWORDValueName));
+    ASSERT_EQ(ERROR_SUCCESS, key.DeleteValue(kInt64ValueName));
+    EXPECT_EQ(0U, key.GetValueCount());
+    EXPECT_FALSE(key.HasValue(kStringValueName));
+    EXPECT_FALSE(key.HasValue(kDWORDValueName));
+    EXPECT_FALSE(key.HasValue(kInt64ValueName));
+  }
+}
+
+TEST_F(RegistryTest, BigValueIteratorTest) {
+  RegKey key;
+  std::wstring foo_key(kRootKey);
+  foo_key += L"\\Foo";
+  ASSERT_EQ(ERROR_SUCCESS, key.Create(HKEY_CURRENT_USER, foo_key.c_str(),
+                                      KEY_READ));
+  ASSERT_EQ(ERROR_SUCCESS, key.Open(HKEY_CURRENT_USER, foo_key.c_str(),
+                                    KEY_READ | KEY_SET_VALUE));
+  ASSERT_TRUE(key.Valid());
+
+  // Create a test value that is larger than MAX_PATH.
+  std::wstring data(MAX_PATH * 2, L'a');
+
+  ASSERT_EQ(ERROR_SUCCESS, key.WriteValue(data.c_str(), data.c_str()));
+
+  RegistryValueIterator iterator(HKEY_CURRENT_USER, foo_key.c_str());
+  ASSERT_TRUE(iterator.Valid());
+  EXPECT_STREQ(data.c_str(), iterator.Name());
+  EXPECT_STREQ(data.c_str(), iterator.Value());
+  // ValueSize() is in bytes, including NUL.
+  EXPECT_EQ((MAX_PATH * 2 + 1) * sizeof(wchar_t), iterator.ValueSize());
+  ++iterator;
+  EXPECT_FALSE(iterator.Valid());
+}
+
+TEST_F(RegistryTest, TruncatedCharTest) {
+  RegKey key;
+  std::wstring foo_key(kRootKey);
+  foo_key += L"\\Foo";
+  ASSERT_EQ(ERROR_SUCCESS, key.Create(HKEY_CURRENT_USER, foo_key.c_str(),
+                                      KEY_READ));
+  ASSERT_EQ(ERROR_SUCCESS, key.Open(HKEY_CURRENT_USER, foo_key.c_str(),
+                                    KEY_READ | KEY_SET_VALUE));
+  ASSERT_TRUE(key.Valid());
+
+  const wchar_t kName[] = L"name";
+  // kData size is not a multiple of sizeof(wchar_t).
+  const uint8_t kData[] = {1, 2, 3, 4, 5};
+  EXPECT_EQ(5u, arraysize(kData));
+  ASSERT_EQ(ERROR_SUCCESS, key.WriteValue(kName, kData,
+                                          arraysize(kData), REG_BINARY));
+
+  RegistryValueIterator iterator(HKEY_CURRENT_USER, foo_key.c_str());
+  ASSERT_TRUE(iterator.Valid());
+  EXPECT_STREQ(kName, iterator.Name());
+  // ValueSize() is in bytes.
+  ASSERT_EQ(arraysize(kData), iterator.ValueSize());
+  // Value() is NUL terminated.
+  int end = (iterator.ValueSize() + sizeof(wchar_t) - 1) / sizeof(wchar_t);
+  EXPECT_NE(L'\0', iterator.Value()[end-1]);
+  EXPECT_EQ(L'\0', iterator.Value()[end]);
+  EXPECT_EQ(0, std::memcmp(kData, iterator.Value(), arraysize(kData)));
+  ++iterator;
+  EXPECT_FALSE(iterator.Valid());
+}
+
+TEST_F(RegistryTest, RecursiveDelete) {
+  RegKey key;
+  // Create kRootKey->Foo
+  //                  \->Bar (TestValue)
+  //                     \->Foo (TestValue)
+  //                        \->Bar
+  //                           \->Foo
+  //                  \->Moo
+  //                  \->Foo
+  // and delete kRootKey->Foo
+  std::wstring foo_key(kRootKey);
+  foo_key += L"\\Foo";
+  ASSERT_EQ(ERROR_SUCCESS,
+            key.Create(HKEY_CURRENT_USER, foo_key.c_str(), KEY_WRITE));
+  ASSERT_EQ(ERROR_SUCCESS, key.CreateKey(L"Bar", KEY_WRITE));
+  ASSERT_EQ(ERROR_SUCCESS, key.WriteValue(L"TestValue", L"TestData"));
+  ASSERT_EQ(ERROR_SUCCESS,
+            key.Create(HKEY_CURRENT_USER, foo_key.c_str(), KEY_WRITE));
+  ASSERT_EQ(ERROR_SUCCESS, key.CreateKey(L"Moo", KEY_WRITE));
+  ASSERT_EQ(ERROR_SUCCESS,
+            key.Create(HKEY_CURRENT_USER, foo_key.c_str(), KEY_WRITE));
+  ASSERT_EQ(ERROR_SUCCESS, key.CreateKey(L"Foo", KEY_WRITE));
+  foo_key += L"\\Bar";
+  ASSERT_EQ(ERROR_SUCCESS,
+            key.Open(HKEY_CURRENT_USER, foo_key.c_str(), KEY_WRITE));
+  foo_key += L"\\Foo";
+  ASSERT_EQ(ERROR_SUCCESS, key.CreateKey(L"Foo", KEY_WRITE));
+  ASSERT_EQ(ERROR_SUCCESS, key.WriteValue(L"TestValue", L"TestData"));
+  ASSERT_EQ(ERROR_SUCCESS,
+            key.Open(HKEY_CURRENT_USER, foo_key.c_str(), KEY_READ));
+
+  ASSERT_EQ(ERROR_SUCCESS, key.Open(HKEY_CURRENT_USER, kRootKey, KEY_WRITE));
+  ASSERT_NE(ERROR_SUCCESS, key.DeleteKey(L"Bar"));
+  ASSERT_NE(ERROR_SUCCESS, key.DeleteEmptyKey(L"Foo"));
+  ASSERT_NE(ERROR_SUCCESS, key.DeleteEmptyKey(L"Foo\\Bar\\Foo"));
+  ASSERT_NE(ERROR_SUCCESS, key.DeleteEmptyKey(L"Foo\\Bar"));
+  ASSERT_EQ(ERROR_SUCCESS, key.DeleteEmptyKey(L"Foo\\Foo"));
+
+  ASSERT_EQ(ERROR_SUCCESS,
+            key.Open(HKEY_CURRENT_USER, foo_key.c_str(), KEY_WRITE));
+  ASSERT_EQ(ERROR_SUCCESS, key.CreateKey(L"Bar", KEY_WRITE));
+  ASSERT_EQ(ERROR_SUCCESS, key.CreateKey(L"Foo", KEY_WRITE));
+  ASSERT_EQ(ERROR_SUCCESS,
+            key.Open(HKEY_CURRENT_USER, foo_key.c_str(), KEY_WRITE));
+  ASSERT_EQ(ERROR_SUCCESS, key.DeleteKey(L""));
+  ASSERT_NE(ERROR_SUCCESS,
+            key.Open(HKEY_CURRENT_USER, foo_key.c_str(), KEY_READ));
+
+  ASSERT_EQ(ERROR_SUCCESS, key.Open(HKEY_CURRENT_USER, kRootKey, KEY_WRITE));
+  ASSERT_EQ(ERROR_SUCCESS, key.DeleteKey(L"Foo"));
+  ASSERT_NE(ERROR_SUCCESS, key.DeleteKey(L"Foo"));
+  ASSERT_NE(ERROR_SUCCESS,
+            key.Open(HKEY_CURRENT_USER, foo_key.c_str(), KEY_READ));
+}
+
+// This test requires running as an Administrator as it tests redirected
+// registry writes to HKLM\Software
+// http://msdn.microsoft.com/en-us/library/windows/desktop/aa384253.aspx
+// TODO(wfh): flaky test on Vista.  See http://crbug.com/377917
+TEST_F(RegistryTest, DISABLED_Wow64RedirectedFromNative) {
+  if (!IsRedirectorPresent())
+    return;
+
+  RegKey key;
+
+  // Test redirected key access from non-redirected.
+  ASSERT_EQ(ERROR_SUCCESS,
+            key.Create(HKEY_LOCAL_MACHINE,
+                       foo_software_key_.c_str(),
+                       KEY_WRITE | kRedirectedViewMask));
+  ASSERT_NE(ERROR_SUCCESS,
+            key.Open(HKEY_LOCAL_MACHINE, foo_software_key_.c_str(), KEY_READ));
+  ASSERT_NE(ERROR_SUCCESS,
+            key.Open(HKEY_LOCAL_MACHINE,
+                     foo_software_key_.c_str(),
+                     KEY_READ | kNativeViewMask));
+
+  // Open the non-redirected view of the parent and try to delete the test key.
+  ASSERT_EQ(ERROR_SUCCESS,
+            key.Open(HKEY_LOCAL_MACHINE, L"Software", KEY_SET_VALUE));
+  ASSERT_NE(ERROR_SUCCESS, key.DeleteKey(kRootKey));
+  ASSERT_EQ(ERROR_SUCCESS,
+            key.Open(HKEY_LOCAL_MACHINE,
+                     L"Software",
+                     KEY_SET_VALUE | kNativeViewMask));
+  ASSERT_NE(ERROR_SUCCESS, key.DeleteKey(kRootKey));
+
+  // Open the redirected view and delete the key created above.
+  ASSERT_EQ(ERROR_SUCCESS,
+            key.Open(HKEY_LOCAL_MACHINE,
+                     L"Software",
+                     KEY_SET_VALUE | kRedirectedViewMask));
+  ASSERT_EQ(ERROR_SUCCESS, key.DeleteKey(kRootKey));
+}
+
+// Test for the issue found in http://crbug.com/384587 where OpenKey would call
+// Close() and reset wow64_access_ flag to 0 and cause a NOTREACHED to hit on a
+// subsequent OpenKey call.
+TEST_F(RegistryTest, SameWowFlags) {
+  RegKey key;
+
+  ASSERT_EQ(ERROR_SUCCESS,
+            key.Open(HKEY_LOCAL_MACHINE,
+                     L"Software",
+                     KEY_READ | KEY_WOW64_64KEY));
+  ASSERT_EQ(ERROR_SUCCESS,
+            key.OpenKey(L"Microsoft",
+                        KEY_READ | KEY_WOW64_64KEY));
+  ASSERT_EQ(ERROR_SUCCESS,
+            key.OpenKey(L"Windows",
+                        KEY_READ | KEY_WOW64_64KEY));
+}
+
+// TODO(wfh): flaky test on Vista.  See http://crbug.com/377917
+TEST_F(RegistryTest, DISABLED_Wow64NativeFromRedirected) {
+  if (!IsRedirectorPresent())
+    return;
+  RegKey key;
+
+  // Test non-redirected key access from redirected.
+  ASSERT_EQ(ERROR_SUCCESS,
+            key.Create(HKEY_LOCAL_MACHINE,
+                       foo_software_key_.c_str(),
+                       KEY_WRITE | kNativeViewMask));
+  ASSERT_EQ(ERROR_SUCCESS,
+            key.Open(HKEY_LOCAL_MACHINE, foo_software_key_.c_str(), KEY_READ));
+  ASSERT_NE(ERROR_SUCCESS,
+            key.Open(HKEY_LOCAL_MACHINE,
+                     foo_software_key_.c_str(),
+                     KEY_READ | kRedirectedViewMask));
+
+  // Open the redirected view of the parent and try to delete the test key
+  // from the non-redirected view.
+  ASSERT_EQ(ERROR_SUCCESS,
+            key.Open(HKEY_LOCAL_MACHINE,
+                     L"Software",
+                     KEY_SET_VALUE | kRedirectedViewMask));
+  ASSERT_NE(ERROR_SUCCESS, key.DeleteKey(kRootKey));
+
+  ASSERT_EQ(ERROR_SUCCESS,
+            key.Open(HKEY_LOCAL_MACHINE,
+                     L"Software",
+                     KEY_SET_VALUE | kNativeViewMask));
+  ASSERT_EQ(ERROR_SUCCESS, key.DeleteKey(kRootKey));
+}
+
+TEST_F(RegistryTest, OpenSubKey) {
+  RegKey key;
+  ASSERT_EQ(ERROR_SUCCESS,
+            key.Open(HKEY_CURRENT_USER,
+                     kRootKey,
+                     KEY_READ | KEY_CREATE_SUB_KEY));
+
+  ASSERT_NE(ERROR_SUCCESS, key.OpenKey(L"foo", KEY_READ));
+  ASSERT_EQ(ERROR_SUCCESS, key.CreateKey(L"foo", KEY_READ));
+  ASSERT_EQ(ERROR_SUCCESS, key.Open(HKEY_CURRENT_USER, kRootKey, KEY_READ));
+  ASSERT_EQ(ERROR_SUCCESS, key.OpenKey(L"foo", KEY_READ));
+
+  std::wstring foo_key(kRootKey);
+  foo_key += L"\\Foo";
+  ASSERT_EQ(ERROR_SUCCESS,
+            key.Open(HKEY_CURRENT_USER, foo_key.c_str(), KEY_READ));
+
+  ASSERT_EQ(ERROR_SUCCESS, key.Open(HKEY_CURRENT_USER, kRootKey, KEY_WRITE));
+  ASSERT_EQ(ERROR_SUCCESS, key.DeleteKey(L"foo"));
+}
+
+class TestChangeDelegate {
+ public:
+   TestChangeDelegate() : called_(false) {}
+   ~TestChangeDelegate() {}
+
+   void OnKeyChanged() {
+     RunLoop::QuitCurrentWhenIdleDeprecated();
+     called_ = true;
+   }
+
+   bool WasCalled() {
+     bool was_called = called_;
+     called_ = false;
+     return was_called;
+   }
+
+ private:
+  bool called_;
+};
+
+TEST_F(RegistryTest, ChangeCallback) {
+  RegKey key;
+  TestChangeDelegate delegate;
+  MessageLoop message_loop;
+
+  std::wstring foo_key(kRootKey);
+  foo_key += L"\\Foo";
+  ASSERT_EQ(ERROR_SUCCESS, key.Create(HKEY_CURRENT_USER, foo_key.c_str(),
+                                      KEY_READ));
+
+  ASSERT_TRUE(key.StartWatching(Bind(&TestChangeDelegate::OnKeyChanged,
+                                     Unretained(&delegate))));
+  EXPECT_FALSE(delegate.WasCalled());
+
+  // Make some change.
+  RegKey key2;
+  ASSERT_EQ(ERROR_SUCCESS, key2.Open(HKEY_CURRENT_USER, foo_key.c_str(),
+                                      KEY_READ | KEY_SET_VALUE));
+  ASSERT_TRUE(key2.Valid());
+  EXPECT_EQ(ERROR_SUCCESS, key2.WriteValue(L"name", L"data"));
+
+  // Allow delivery of the notification.
+  EXPECT_FALSE(delegate.WasCalled());
+  base::RunLoop().Run();
+
+  ASSERT_TRUE(delegate.WasCalled());
+  EXPECT_FALSE(delegate.WasCalled());
+
+  ASSERT_TRUE(key.StartWatching(Bind(&TestChangeDelegate::OnKeyChanged,
+                                     Unretained(&delegate))));
+
+  // Change something else.
+  EXPECT_EQ(ERROR_SUCCESS, key2.WriteValue(L"name2", L"data2"));
+  base::RunLoop().Run();
+  ASSERT_TRUE(delegate.WasCalled());
+
+  ASSERT_TRUE(key.StartWatching(Bind(&TestChangeDelegate::OnKeyChanged,
+                                     Unretained(&delegate))));
+  base::RunLoop().RunUntilIdle();
+  EXPECT_FALSE(delegate.WasCalled());
+}
+
+}  // namespace
+
+}  // namespace win
+}  // namespace base
diff --git a/base/win/resource_util.cc b/base/win/resource_util.cc
new file mode 100644
index 0000000..0c10078
--- /dev/null
+++ b/base/win/resource_util.cc
@@ -0,0 +1,51 @@
+// Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/logging.h"
+#include "base/win/resource_util.h"
+
+namespace base {
+namespace win {
+
+bool GetResourceFromModule(HMODULE module,
+                           int resource_id,
+                           LPCTSTR resource_type,
+                           void** data,
+                           size_t* length) {
+  if (!module)
+    return false;
+
+  if (!IS_INTRESOURCE(resource_id)) {
+    NOTREACHED();
+    return false;
+  }
+
+  HRSRC hres_info = FindResource(module, MAKEINTRESOURCE(resource_id),
+                                 resource_type);
+  if (NULL == hres_info)
+    return false;
+
+  DWORD data_size = SizeofResource(module, hres_info);
+  HGLOBAL hres = LoadResource(module, hres_info);
+  if (!hres)
+    return false;
+
+  void* resource = LockResource(hres);
+  if (!resource)
+    return false;
+
+  *data = resource;
+  *length = static_cast<size_t>(data_size);
+  return true;
+}
+
+bool GetDataResourceFromModule(HMODULE module,
+                               int resource_id,
+                               void** data,
+                               size_t* length) {
+  return GetResourceFromModule(module, resource_id, L"BINDATA", data, length);
+}
+
+}  // namespace win
+}  // namespace base
diff --git a/base/win/resource_util.h b/base/win/resource_util.h
new file mode 100644
index 0000000..00687b9
--- /dev/null
+++ b/base/win/resource_util.h
@@ -0,0 +1,39 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file contains utility functions for accessing resources in external
+// files (DLLs) or embedded in the executable itself.
+
+#ifndef BASE_WIN_RESOURCE_UTIL_H_
+#define BASE_WIN_RESOURCE_UTIL_H_
+
+#include <windows.h>
+#include <stddef.h>
+
+#include "base/base_export.h"
+
+namespace base {
+namespace win {
+
+// Function for getting a data resource of the specified |resource_type| from
+// a dll.  Some resources are optional, especially in unit tests, so this
+// returns false but doesn't raise an error if the resource can't be loaded.
+bool BASE_EXPORT GetResourceFromModule(HMODULE module,
+                                       int resource_id,
+                                       LPCTSTR resource_type,
+                                       void** data,
+                                       size_t* length);
+
+// Function for getting a data resource (BINDATA) from a dll.  Some
+// resources are optional, especially in unit tests, so this returns false
+// but doesn't raise an error if the resource can't be loaded.
+bool BASE_EXPORT GetDataResourceFromModule(HMODULE module,
+                                           int resource_id,
+                                           void** data,
+                                           size_t* length);
+
+}  // namespace win
+}  // namespace base
+
+#endif  // BASE_WIN_RESOURCE_UTIL_H_
diff --git a/base/win/scoped_bstr.cc b/base/win/scoped_bstr.cc
new file mode 100644
index 0000000..02f3d54
--- /dev/null
+++ b/base/win/scoped_bstr.cc
@@ -0,0 +1,73 @@
+// Copyright (c) 2010 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/win/scoped_bstr.h"
+
+#include <stdint.h>
+
+#include "base/logging.h"
+
+namespace base {
+namespace win {
+
+ScopedBstr::ScopedBstr(const char16* non_bstr)
+    : bstr_(SysAllocString(non_bstr)) {
+}
+
+ScopedBstr::~ScopedBstr() {
+  static_assert(sizeof(ScopedBstr) == sizeof(BSTR), "ScopedBstrSize");
+  SysFreeString(bstr_);
+}
+
+void ScopedBstr::Reset(BSTR bstr) {
+  if (bstr != bstr_) {
+    // if |bstr_| is NULL, SysFreeString does nothing.
+    SysFreeString(bstr_);
+    bstr_ = bstr;
+  }
+}
+
+BSTR ScopedBstr::Release() {
+  BSTR bstr = bstr_;
+  bstr_ = NULL;
+  return bstr;
+}
+
+void ScopedBstr::Swap(ScopedBstr& bstr2) {
+  BSTR tmp = bstr_;
+  bstr_ = bstr2.bstr_;
+  bstr2.bstr_ = tmp;
+}
+
+BSTR* ScopedBstr::Receive() {
+  DCHECK(!bstr_) << "BSTR leak.";
+  return &bstr_;
+}
+
+BSTR ScopedBstr::Allocate(const char16* str) {
+  Reset(SysAllocString(str));
+  return bstr_;
+}
+
+BSTR ScopedBstr::AllocateBytes(size_t bytes) {
+  Reset(SysAllocStringByteLen(NULL, static_cast<UINT>(bytes)));
+  return bstr_;
+}
+
+void ScopedBstr::SetByteLen(size_t bytes) {
+  DCHECK(bstr_ != NULL) << "attempting to modify a NULL bstr";
+  uint32_t* data = reinterpret_cast<uint32_t*>(bstr_);
+  data[-1] = static_cast<uint32_t>(bytes);
+}
+
+size_t ScopedBstr::Length() const {
+  return SysStringLen(bstr_);
+}
+
+size_t ScopedBstr::ByteLength() const {
+  return SysStringByteLen(bstr_);
+}
+
+}  // namespace win
+}  // namespace base
diff --git a/base/win/scoped_bstr.h b/base/win/scoped_bstr.h
new file mode 100644
index 0000000..2109c20
--- /dev/null
+++ b/base/win/scoped_bstr.h
@@ -0,0 +1,99 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_WIN_SCOPED_BSTR_H_
+#define BASE_WIN_SCOPED_BSTR_H_
+
+#include <windows.h>
+#include <oleauto.h>
+#include <stddef.h>
+
+#include "base/base_export.h"
+#include "base/logging.h"
+#include "base/macros.h"
+#include "base/strings/string16.h"
+
+namespace base {
+namespace win {
+
+// Manages a BSTR string pointer.
+// The class interface is based on unique_ptr.
+class BASE_EXPORT ScopedBstr {
+ public:
+  ScopedBstr() : bstr_(NULL) {
+  }
+
+  // Constructor to create a new BSTR.
+  //
+  // NOTE: Do not pass a BSTR to this constructor expecting ownership to
+  // be transferred - even though it compiles! ;-)
+  explicit ScopedBstr(const char16* non_bstr);
+  ~ScopedBstr();
+
+  // Give ScopedBstr ownership over an already allocated BSTR or NULL.
+  // If you need to allocate a new BSTR instance, use |allocate| instead.
+  void Reset(BSTR bstr = NULL);
+
+  // Releases ownership of the BSTR to the caller.
+  BSTR Release();
+
+  // Creates a new BSTR from a 16-bit C-style string.
+  //
+  // If you already have a BSTR and want to transfer ownership to the
+  // ScopedBstr instance, call |reset| instead.
+  //
+  // Returns a pointer to the new BSTR, or NULL if allocation failed.
+  BSTR Allocate(const char16* str);
+
+  // Allocates a new BSTR with the specified number of bytes.
+  // Returns a pointer to the new BSTR, or NULL if allocation failed.
+  BSTR AllocateBytes(size_t bytes);
+
+  // Sets the allocated length field of the already-allocated BSTR to be
+  // |bytes|.  This is useful when the BSTR was preallocated with e.g.
+  // SysAllocStringLen or SysAllocStringByteLen (call |AllocateBytes|) and then
+  // not all the bytes are being used.
+  //
+  // Note that if you want to set the length to a specific number of
+  // characters, you need to multiply by sizeof(wchar_t).  Oddly, there's no
+  // public API to set the length, so we do this ourselves by hand.
+  //
+  // NOTE: The actual allocated size of the BSTR MUST be >= bytes.  That
+  // responsibility is with the caller.
+  void SetByteLen(size_t bytes);
+
+  // Swap values of two ScopedBstr's.
+  void Swap(ScopedBstr& bstr2);
+
+  // Retrieves the pointer address.
+  // Used to receive BSTRs as out arguments (and take ownership).
+  // The function DCHECKs on the current value being NULL.
+  // Usage: GetBstr(bstr.Receive());
+  BSTR* Receive();
+
+  // Returns number of chars in the BSTR.
+  size_t Length() const;
+
+  // Returns the number of bytes allocated for the BSTR.
+  size_t ByteLength() const;
+
+  operator BSTR() const {
+    return bstr_;
+  }
+
+ protected:
+  BSTR bstr_;
+
+ private:
+  // Forbid comparison of ScopedBstr types.  You should never have the same
+  // BSTR owned by two different scoped_ptrs.
+  bool operator==(const ScopedBstr& bstr2) const;
+  bool operator!=(const ScopedBstr& bstr2) const;
+  DISALLOW_COPY_AND_ASSIGN(ScopedBstr);
+};
+
+}  // namespace win
+}  // namespace base
+
+#endif  // BASE_WIN_SCOPED_BSTR_H_
diff --git a/base/win/scoped_bstr_unittest.cc b/base/win/scoped_bstr_unittest.cc
new file mode 100644
index 0000000..d305e5a
--- /dev/null
+++ b/base/win/scoped_bstr_unittest.cc
@@ -0,0 +1,80 @@
+// Copyright (c) 2010 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <stddef.h>
+
+#include "base/macros.h"
+#include "base/win/scoped_bstr.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+namespace win {
+
+namespace {
+
+static const wchar_t kTestString1[] = L"123";
+static const wchar_t kTestString2[] = L"456789";
+size_t test1_len = arraysize(kTestString1) - 1;
+size_t test2_len = arraysize(kTestString2) - 1;
+
+void DumbBstrTests() {
+  ScopedBstr b;
+  EXPECT_TRUE(b == NULL);
+  EXPECT_EQ(0u, b.Length());
+  EXPECT_EQ(0u, b.ByteLength());
+  b.Reset(NULL);
+  EXPECT_TRUE(b == NULL);
+  EXPECT_TRUE(b.Release() == NULL);
+  ScopedBstr b2;
+  b.Swap(b2);
+  EXPECT_TRUE(b2 == NULL);
+}
+
+void GiveMeABstr(BSTR* ret) {
+  *ret = SysAllocString(kTestString1);
+}
+
+void BasicBstrTests() {
+  ScopedBstr b1(kTestString1);
+  EXPECT_EQ(test1_len, b1.Length());
+  EXPECT_EQ(test1_len * sizeof(kTestString1[0]), b1.ByteLength());
+
+  ScopedBstr b2;
+  b1.Swap(b2);
+  EXPECT_EQ(test1_len, b2.Length());
+  EXPECT_EQ(0u, b1.Length());
+  EXPECT_EQ(0, lstrcmp(b2, kTestString1));
+  BSTR tmp = b2.Release();
+  EXPECT_TRUE(tmp != NULL);
+  EXPECT_EQ(0, lstrcmp(tmp, kTestString1));
+  EXPECT_TRUE(b2 == NULL);
+  SysFreeString(tmp);
+
+  GiveMeABstr(b2.Receive());
+  EXPECT_TRUE(b2 != NULL);
+  b2.Reset();
+  EXPECT_TRUE(b2.AllocateBytes(100) != NULL);
+  EXPECT_EQ(100u, b2.ByteLength());
+  EXPECT_EQ(100 / sizeof(kTestString1[0]), b2.Length());
+  lstrcpy(static_cast<BSTR>(b2), kTestString1);
+  EXPECT_EQ(test1_len, static_cast<size_t>(lstrlen(b2)));
+  EXPECT_EQ(100 / sizeof(kTestString1[0]), b2.Length());
+  b2.SetByteLen(lstrlen(b2) * sizeof(kTestString2[0]));
+  EXPECT_EQ(b2.Length(), static_cast<size_t>(lstrlen(b2)));
+
+  EXPECT_TRUE(b1.Allocate(kTestString2) != NULL);
+  EXPECT_EQ(test2_len, b1.Length());
+  b1.SetByteLen((test2_len - 1) * sizeof(kTestString2[0]));
+  EXPECT_EQ(test2_len - 1, b1.Length());
+}
+
+}  // namespace
+
+TEST(ScopedBstrTest, ScopedBstr) {
+  DumbBstrTests();
+  BasicBstrTests();
+}
+
+}  // namespace win
+}  // namespace base
diff --git a/base/win/scoped_co_mem.h b/base/win/scoped_co_mem.h
new file mode 100644
index 0000000..a3737dd
--- /dev/null
+++ b/base/win/scoped_co_mem.h
@@ -0,0 +1,68 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_WIN_SCOPED_CO_MEM_H_
+#define BASE_WIN_SCOPED_CO_MEM_H_
+
+#include <objbase.h>
+
+#include "base/logging.h"
+#include "base/macros.h"
+
+namespace base {
+namespace win {
+
+// Simple scoped memory releaser class for COM allocated memory.
+// Example:
+//   base::win::ScopedCoMem<ITEMIDLIST> file_item;
+//   SHGetSomeInfo(&file_item, ...);
+//   ...
+//   return;  <-- memory released
+template<typename T>
+class ScopedCoMem {
+ public:
+  ScopedCoMem() : mem_ptr_(NULL) {}
+  ~ScopedCoMem() {
+    Reset(NULL);
+  }
+
+  T** operator&() {  // NOLINT
+    DCHECK(mem_ptr_ == NULL);  // To catch memory leaks.
+    return &mem_ptr_;
+  }
+
+  operator T*() {
+    return mem_ptr_;
+  }
+
+  T* operator->() {
+    DCHECK(mem_ptr_ != NULL);
+    return mem_ptr_;
+  }
+
+  const T* operator->() const {
+    DCHECK(mem_ptr_ != NULL);
+    return mem_ptr_;
+  }
+
+  void Reset(T* ptr) {
+    if (mem_ptr_)
+      CoTaskMemFree(mem_ptr_);
+    mem_ptr_ = ptr;
+  }
+
+  T* get() const {
+    return mem_ptr_;
+  }
+
+ private:
+  T* mem_ptr_;
+
+  DISALLOW_COPY_AND_ASSIGN(ScopedCoMem);
+};
+
+}  // namespace win
+}  // namespace base
+
+#endif  // BASE_WIN_SCOPED_CO_MEM_H_
diff --git a/base/win/scoped_com_initializer.cc b/base/win/scoped_com_initializer.cc
new file mode 100644
index 0000000..73e1b5c
--- /dev/null
+++ b/base/win/scoped_com_initializer.cc
@@ -0,0 +1,37 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/win/scoped_com_initializer.h"
+
+#include "base/logging.h"
+
+namespace base {
+namespace win {
+
+ScopedCOMInitializer::ScopedCOMInitializer() {
+  Initialize(COINIT_APARTMENTTHREADED);
+}
+
+ScopedCOMInitializer::ScopedCOMInitializer(SelectMTA mta) {
+  Initialize(COINIT_MULTITHREADED);
+}
+
+ScopedCOMInitializer::~ScopedCOMInitializer() {
+  DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
+  if (Succeeded())
+    CoUninitialize();
+}
+
+bool ScopedCOMInitializer::Succeeded() const {
+  return SUCCEEDED(hr_);
+}
+
+void ScopedCOMInitializer::Initialize(COINIT init) {
+  DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
+  hr_ = CoInitializeEx(NULL, init);
+  DCHECK_NE(RPC_E_CHANGED_MODE, hr_) << "Invalid COM thread model change";
+}
+
+}  // namespace win
+}  // namespace base
diff --git a/base/win/scoped_com_initializer.h b/base/win/scoped_com_initializer.h
new file mode 100644
index 0000000..3bb5795
--- /dev/null
+++ b/base/win/scoped_com_initializer.h
@@ -0,0 +1,53 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_WIN_SCOPED_COM_INITIALIZER_H_
+#define BASE_WIN_SCOPED_COM_INITIALIZER_H_
+
+#include <objbase.h>
+
+#include "base/base_export.h"
+#include "base/macros.h"
+#include "base/threading/thread_checker.h"
+#include "base/win/scoped_windows_thread_environment.h"
+
+namespace base {
+namespace win {
+
+// Initializes COM in the constructor (STA or MTA), and uninitializes COM in the
+// destructor.
+//
+// WARNING: This should only be used once per thread, ideally scoped to a
+// similar lifetime as the thread itself.  You should not be using this in
+// random utility functions that make COM calls -- instead ensure these
+// functions are running on a COM-supporting thread!
+class BASE_EXPORT ScopedCOMInitializer : public ScopedWindowsThreadEnvironment {
+ public:
+  // Enum value provided to initialize the thread as an MTA instead of STA.
+  enum SelectMTA { kMTA };
+
+  // Constructor for STA initialization.
+  ScopedCOMInitializer();
+
+  // Constructor for MTA initialization.
+  explicit ScopedCOMInitializer(SelectMTA mta);
+
+  ~ScopedCOMInitializer() override;
+
+  // ScopedWindowsThreadEnvironment:
+  bool Succeeded() const override;
+
+ private:
+  void Initialize(COINIT init);
+
+  HRESULT hr_;
+  THREAD_CHECKER(thread_checker_);
+
+  DISALLOW_COPY_AND_ASSIGN(ScopedCOMInitializer);
+};
+
+}  // namespace win
+}  // namespace base
+
+#endif  // BASE_WIN_SCOPED_COM_INITIALIZER_H_
diff --git a/base/win/scoped_gdi_object.h b/base/win/scoped_gdi_object.h
new file mode 100644
index 0000000..9d8465b
--- /dev/null
+++ b/base/win/scoped_gdi_object.h
@@ -0,0 +1,45 @@
+// Copyright (c) 2010 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_WIN_SCOPED_GDI_OBJECT_H_
+#define BASE_WIN_SCOPED_GDI_OBJECT_H_
+
+#include <windows.h>
+
+#include "base/scoped_generic.h"
+
+namespace base {
+namespace win {
+
+namespace internal {
+
+template <class T>
+struct ScopedGDIObjectTraits {
+  static T InvalidValue() { return nullptr; }
+  static void Free(T object) { DeleteObject(object); }
+};
+
+// An explicit specialization for HICON because we have to call DestroyIcon()
+// instead of DeleteObject() for HICON.
+template <>
+void inline ScopedGDIObjectTraits<HICON>::Free(HICON icon) {
+  DestroyIcon(icon);
+}
+
+}  // namespace internal
+
+// Like ScopedHandle but for GDI objects.
+template <class T>
+using ScopedGDIObject = ScopedGeneric<T, internal::ScopedGDIObjectTraits<T>>;
+
+// Typedefs for some common use cases.
+typedef ScopedGDIObject<HBITMAP> ScopedBitmap;
+typedef ScopedGDIObject<HRGN> ScopedRegion;
+typedef ScopedGDIObject<HFONT> ScopedHFONT;
+typedef ScopedGDIObject<HICON> ScopedHICON;
+
+}  // namespace win
+}  // namespace base
+
+#endif  // BASE_WIN_SCOPED_GDI_OBJECT_H_
diff --git a/base/win/scoped_handle.cc b/base/win/scoped_handle.cc
new file mode 100644
index 0000000..4f54df5
--- /dev/null
+++ b/base/win/scoped_handle.cc
@@ -0,0 +1,40 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/win/scoped_handle.h"
+#include "base/win/scoped_handle_verifier.h"
+#include "base/win/windows_types.h"
+
+namespace base {
+namespace win {
+
+using base::win::internal::ScopedHandleVerifier;
+
+// Static.
+bool HandleTraits::CloseHandle(HANDLE handle) {
+  return ScopedHandleVerifier::Get()->CloseHandle(handle);
+}
+
+// Static.
+void VerifierTraits::StartTracking(HANDLE handle, const void* owner,
+                                   const void* pc1, const void* pc2) {
+  return ScopedHandleVerifier::Get()->StartTracking(handle, owner, pc1, pc2);
+}
+
+// Static.
+void VerifierTraits::StopTracking(HANDLE handle, const void* owner,
+                                  const void* pc1, const void* pc2) {
+  return ScopedHandleVerifier::Get()->StopTracking(handle, owner, pc1, pc2);
+}
+
+void DisableHandleVerifier() {
+  return ScopedHandleVerifier::Get()->Disable();
+}
+
+void OnHandleBeingClosed(HANDLE handle) {
+  return ScopedHandleVerifier::Get()->OnHandleBeingClosed(handle);
+}
+
+}  // namespace win
+}  // namespace base
diff --git a/base/win/scoped_handle.h b/base/win/scoped_handle.h
new file mode 100644
index 0000000..1b630bb
--- /dev/null
+++ b/base/win/scoped_handle.h
@@ -0,0 +1,183 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_WIN_SCOPED_HANDLE_H_
+#define BASE_WIN_SCOPED_HANDLE_H_
+
+#include "base/win/windows_types.h"
+
+#include "base/base_export.h"
+#include "base/gtest_prod_util.h"
+#include "base/location.h"
+#include "base/logging.h"
+#include "base/macros.h"
+
+// TODO(rvargas): remove this with the rest of the verifier.
+#if defined(COMPILER_MSVC)
+#include <intrin.h>
+#define BASE_WIN_GET_CALLER _ReturnAddress()
+#elif defined(COMPILER_GCC)
+#define BASE_WIN_GET_CALLER __builtin_extract_return_addr(\\
+    __builtin_return_address(0))
+#endif
+
+namespace base {
+namespace win {
+
+// Generic wrapper for raw handles that takes care of closing handles
+// automatically. The class interface follows the style of
+// the ScopedFILE class with two additions:
+//   - IsValid() method can tolerate multiple invalid handle values such as NULL
+//     and INVALID_HANDLE_VALUE (-1) for Win32 handles.
+//   - Set() (and the constructors and assignment operators that call it)
+//     preserve the Windows LastError code. This ensures that GetLastError() can
+//     be called after stashing a handle in a GenericScopedHandle object. Doing
+//     this explicitly is necessary because of bug 528394 and VC++ 2015.
+template <class Traits, class Verifier>
+class GenericScopedHandle {
+ public:
+  typedef typename Traits::Handle Handle;
+
+  GenericScopedHandle() : handle_(Traits::NullHandle()) {}
+
+  explicit GenericScopedHandle(Handle handle) : handle_(Traits::NullHandle()) {
+    Set(handle);
+  }
+
+  GenericScopedHandle(GenericScopedHandle&& other)
+      : handle_(Traits::NullHandle()) {
+    Set(other.Take());
+  }
+
+  ~GenericScopedHandle() {
+    Close();
+  }
+
+  bool IsValid() const {
+    return Traits::IsHandleValid(handle_);
+  }
+
+  GenericScopedHandle& operator=(GenericScopedHandle&& other) {
+    DCHECK_NE(this, &other);
+    Set(other.Take());
+    return *this;
+  }
+
+  void Set(Handle handle) {
+    if (handle_ != handle) {
+      // Preserve old LastError to avoid bug 528394.
+      auto last_error = ::GetLastError();
+      Close();
+
+      if (Traits::IsHandleValid(handle)) {
+        handle_ = handle;
+        Verifier::StartTracking(handle, this, BASE_WIN_GET_CALLER,
+                                GetProgramCounter());
+      }
+      ::SetLastError(last_error);
+    }
+  }
+
+  Handle Get() const {
+    return handle_;
+  }
+
+  // Transfers ownership away from this object.
+  Handle Take() {
+    Handle temp = handle_;
+    handle_ = Traits::NullHandle();
+    if (Traits::IsHandleValid(temp)) {
+      Verifier::StopTracking(temp, this, BASE_WIN_GET_CALLER,
+                             GetProgramCounter());
+    }
+    return temp;
+  }
+
+  // Explicitly closes the owned handle.
+  void Close() {
+    if (Traits::IsHandleValid(handle_)) {
+      Verifier::StopTracking(handle_, this, BASE_WIN_GET_CALLER,
+                             GetProgramCounter());
+
+      Traits::CloseHandle(handle_);
+      handle_ = Traits::NullHandle();
+    }
+  }
+
+ private:
+  FRIEND_TEST_ALL_PREFIXES(ScopedHandleTest, ActiveVerifierWrongOwner);
+  FRIEND_TEST_ALL_PREFIXES(ScopedHandleTest, ActiveVerifierUntrackedHandle);
+  Handle handle_;
+
+  DISALLOW_COPY_AND_ASSIGN(GenericScopedHandle);
+};
+
+#undef BASE_WIN_GET_CALLER
+
+// The traits class for Win32 handles that can be closed via CloseHandle() API.
+class HandleTraits {
+ public:
+  typedef HANDLE Handle;
+
+  // Closes the handle.
+  static bool BASE_EXPORT CloseHandle(HANDLE handle);
+
+  // Returns true if the handle value is valid.
+  static bool IsHandleValid(HANDLE handle) {
+    return handle != NULL && handle != INVALID_HANDLE_VALUE;
+  }
+
+  // Returns NULL handle value.
+  static HANDLE NullHandle() {
+    return NULL;
+  }
+
+ private:
+  DISALLOW_IMPLICIT_CONSTRUCTORS(HandleTraits);
+};
+
+// Do-nothing verifier.
+class DummyVerifierTraits {
+ public:
+  typedef HANDLE Handle;
+
+  static void StartTracking(HANDLE handle, const void* owner,
+                            const void* pc1, const void* pc2) {}
+  static void StopTracking(HANDLE handle, const void* owner,
+                           const void* pc1, const void* pc2) {}
+
+ private:
+  DISALLOW_IMPLICIT_CONSTRUCTORS(DummyVerifierTraits);
+};
+
+// Performs actual run-time tracking.
+class BASE_EXPORT VerifierTraits {
+ public:
+  typedef HANDLE Handle;
+
+  static void StartTracking(HANDLE handle, const void* owner,
+                            const void* pc1, const void* pc2);
+  static void StopTracking(HANDLE handle, const void* owner,
+                           const void* pc1, const void* pc2);
+
+ private:
+  DISALLOW_IMPLICIT_CONSTRUCTORS(VerifierTraits);
+};
+
+typedef GenericScopedHandle<HandleTraits, VerifierTraits> ScopedHandle;
+
+// This function may be called by the embedder to disable the use of
+// VerifierTraits at runtime. It has no effect if DummyVerifierTraits is used
+// for ScopedHandle.
+BASE_EXPORT void DisableHandleVerifier();
+
+// This should be called whenever the OS is closing a handle, if extended
+// verification of improper handle closing is desired. If |handle| is being
+// tracked by the handle verifier and ScopedHandle is not the one closing it,
+// a CHECK is generated.
+BASE_EXPORT void OnHandleBeingClosed(HANDLE handle);
+}  // namespace win
+}  // namespace base
+
+#endif  // BASE_WIN_SCOPED_HANDLE_H_
diff --git a/base/win/scoped_handle_test_dll.cc b/base/win/scoped_handle_test_dll.cc
new file mode 100644
index 0000000..75484aa
--- /dev/null
+++ b/base/win/scoped_handle_test_dll.cc
@@ -0,0 +1,127 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <windows.h>
+
+#include <vector>
+
+#include "base/win/base_win_buildflags.h"
+#include "base/win/current_module.h"
+#include "base/win/scoped_handle.h"
+#include "base/win/scoped_handle_verifier.h"
+
+namespace base {
+namespace win {
+namespace testing {
+
+extern "C" bool __declspec(dllexport) RunTest();
+
+namespace {
+
+struct ThreadParams {
+  HANDLE ready_event;
+  HANDLE start_event;
+};
+
+// Note, this must use all native functions to avoid instantiating the
+// ActiveVerifier. e.g. can't use base::Thread or even base::PlatformThread.
+DWORD __stdcall ThreadFunc(void* params) {
+  ThreadParams* thread_params = reinterpret_cast<ThreadParams*>(params);
+  HANDLE handle = ::CreateMutex(nullptr, false, nullptr);
+
+  ::SetEvent(thread_params->ready_event);
+  ::WaitForSingleObject(thread_params->start_event, INFINITE);
+  ScopedHandle handle_holder(handle);
+  return 0;
+}
+
+bool InternalRunThreadTest() {
+  std::vector<HANDLE> threads_;
+  // From manual testing, the bug fixed by crrev.com/678736a starts reliably
+  // causing handle verifier asserts to trigger at around 100 threads, so make
+  // it 200 to be sure to detect any future regressions.
+  const size_t kNumThreads = 200;
+
+  // bManualReset is set to true to allow signalling multiple threads.
+  HANDLE start_event = ::CreateEvent(nullptr, true, false, nullptr);
+  if (!start_event)
+    return false;
+
+  HANDLE ready_event = CreateEvent(nullptr, false, false, nullptr);
+  if (!ready_event)
+    return false;
+
+  ThreadParams thread_params = { ready_event, start_event };
+
+  for (size_t i = 0; i < kNumThreads; i++) {
+    HANDLE thread_handle =
+        ::CreateThread(nullptr, 0, ThreadFunc,
+                       reinterpret_cast<void*>(&thread_params), 0, nullptr);
+    if (!thread_handle)
+      break;
+    ::WaitForSingleObject(ready_event, INFINITE);
+    threads_.push_back(thread_handle);
+  }
+
+  ::CloseHandle(ready_event);
+
+  if (threads_.size() != kNumThreads) {
+    for (auto* thread : threads_)
+      ::CloseHandle(thread);
+    ::CloseHandle(start_event);
+    return false;
+  }
+
+  ::SetEvent(start_event);
+  ::CloseHandle(start_event);
+  for (auto* thread : threads_) {
+    ::WaitForSingleObject(thread, INFINITE);
+    ::CloseHandle(thread);
+  }
+
+  return true;
+}
+
+bool InternalRunLocationTest() {
+  // Create a new handle and then set LastError again.
+  HANDLE handle = ::CreateMutex(nullptr, false, nullptr);
+  if (!handle)
+    return false;
+  ScopedHandle handle_holder(handle);
+
+  HMODULE verifier_module =
+      base::win::internal::GetHandleVerifierModuleForTesting();
+  if (!verifier_module)
+    return false;
+
+  // Get my module
+  HMODULE my_module = CURRENT_MODULE();
+  if (!my_module)
+    return false;
+
+  HMODULE main_module = ::GetModuleHandle(NULL);
+
+#if BUILDFLAG(SINGLE_MODULE_MODE_HANDLE_VERIFIER)
+  // In a component build ActiveVerifier will always be created inside base.dll
+  // as the code always lives there.
+  if (verifier_module == my_module || verifier_module == main_module)
+    return false;
+#else
+  // In a non-component build, ActiveVerifier should always be created in the
+  // version of base linked with the main executable.
+  if (verifier_module == my_module || verifier_module != main_module)
+    return false;
+#endif
+  return true;
+}
+
+}  // namespace
+
+bool RunTest() {
+  return InternalRunThreadTest() && InternalRunLocationTest();
+}
+
+}  // testing
+}  // win
+}  // base
diff --git a/base/win/scoped_handle_unittest.cc b/base/win/scoped_handle_unittest.cc
new file mode 100644
index 0000000..ca6fb45
--- /dev/null
+++ b/base/win/scoped_handle_unittest.cc
@@ -0,0 +1,142 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <windows.h>
+#include <winternl.h>
+
+#include "base/base_switches.h"
+#include "base/command_line.h"
+#include "base/files/file_path.h"
+#include "base/scoped_native_library.h"
+#include "base/test/multiprocess_test.h"
+#include "base/test/test_timeouts.h"
+#include "base/win/scoped_handle.h"
+
+#include "testing/gtest/include/gtest/gtest.h"
+#include "testing/multiprocess_func_list.h"
+
+namespace base {
+namespace win {
+
+namespace testing {
+extern "C" bool __declspec(dllexport) RunTest();
+}  // namespace testing
+
+TEST(ScopedHandleTest, ScopedHandle) {
+  // Any illegal error code will do. We just need to test that it is preserved
+  // by ScopedHandle to avoid bug 528394.
+  const DWORD magic_error = 0x12345678;
+
+  HANDLE handle = ::CreateMutex(nullptr, false, nullptr);
+  // Call SetLastError after creating the handle.
+  ::SetLastError(magic_error);
+  base::win::ScopedHandle handle_holder(handle);
+  EXPECT_EQ(magic_error, ::GetLastError());
+
+  // Create a new handle and then set LastError again.
+  handle = ::CreateMutex(nullptr, false, nullptr);
+  ::SetLastError(magic_error);
+  handle_holder.Set(handle);
+  EXPECT_EQ(magic_error, ::GetLastError());
+
+  // Create a new handle and then set LastError again.
+  handle = ::CreateMutex(nullptr, false, nullptr);
+  base::win::ScopedHandle handle_source(handle);
+  ::SetLastError(magic_error);
+  handle_holder = std::move(handle_source);
+  EXPECT_EQ(magic_error, ::GetLastError());
+}
+
+TEST(ScopedHandleTest, ActiveVerifierTrackedHasBeenClosed) {
+  HANDLE handle = ::CreateMutex(nullptr, false, nullptr);
+  ASSERT_NE(HANDLE(nullptr), handle);
+  typedef NTSTATUS(WINAPI * NtCloseFunc)(HANDLE);
+  NtCloseFunc ntclose = reinterpret_cast<NtCloseFunc>(
+      GetProcAddress(GetModuleHandle(L"ntdll.dll"), "NtClose"));
+  ASSERT_NE(nullptr, ntclose);
+
+  ASSERT_DEATH({
+    base::win::ScopedHandle handle_holder(handle);
+    ntclose(handle);
+    // Destructing a ScopedHandle with an illegally closed handle should fail.
+  }, "");
+}
+
+TEST(ScopedHandleTest, ActiveVerifierDoubleTracking) {
+  HANDLE handle = ::CreateMutex(nullptr, false, nullptr);
+  ASSERT_NE(HANDLE(nullptr), handle);
+
+  base::win::ScopedHandle handle_holder(handle);
+
+  ASSERT_DEATH({
+    base::win::ScopedHandle handle_holder2(handle);
+  }, "");
+}
+
+TEST(ScopedHandleTest, ActiveVerifierWrongOwner) {
+  HANDLE handle = ::CreateMutex(nullptr, false, nullptr);
+  ASSERT_NE(HANDLE(nullptr), handle);
+
+  base::win::ScopedHandle handle_holder(handle);
+  ASSERT_DEATH({
+    base::win::ScopedHandle handle_holder2;
+    handle_holder2.handle_ = handle;
+  }, "");
+  ASSERT_TRUE(handle_holder.IsValid());
+  handle_holder.Close();
+}
+
+TEST(ScopedHandleTest, ActiveVerifierUntrackedHandle) {
+  HANDLE handle = ::CreateMutex(nullptr, false, nullptr);
+  ASSERT_NE(HANDLE(nullptr), handle);
+
+  ASSERT_DEATH({
+    base::win::ScopedHandle handle_holder;
+    handle_holder.handle_ = handle;
+  }, "");
+
+  ASSERT_TRUE(::CloseHandle(handle));
+}
+
+// Under ASan, the multi-process test crashes during process shutdown for
+// unknown reasons. Disable it for now. http://crbug.com/685262
+#if defined(ADDRESS_SANITIZER)
+#define MAYBE_MultiProcess DISABLED_MultiProcess
+#else
+#define MAYBE_MultiProcess MultiProcess
+#endif
+
+TEST(ScopedHandleTest, MAYBE_MultiProcess) {
+  // Initializing ICU in the child process causes a scoped handle to be created
+  // before the test gets a chance to test the race condition, so disable ICU
+  // for the child process here.
+  CommandLine command_line(base::GetMultiProcessTestChildBaseCommandLine());
+  command_line.AppendSwitch(switches::kTestDoNotInitializeIcu);
+
+  base::Process test_child_process = base::SpawnMultiProcessTestChild(
+      "ActiveVerifierChildProcess", command_line, LaunchOptions());
+
+  int rv = -1;
+  ASSERT_TRUE(test_child_process.WaitForExitWithTimeout(
+      TestTimeouts::action_timeout(), &rv));
+  EXPECT_EQ(0, rv);
+}
+
+MULTIPROCESS_TEST_MAIN(ActiveVerifierChildProcess) {
+  ScopedNativeLibrary module(FilePath(L"scoped_handle_test_dll.dll"));
+
+  if (!module.is_valid())
+    return 1;
+  auto run_test_function = reinterpret_cast<decltype(&testing::RunTest)>(
+      module.GetFunctionPointer("RunTest"));
+  if (!run_test_function)
+    return 1;
+  if (!run_test_function())
+    return 1;
+
+  return 0;
+}
+
+}  // namespace win
+}  // namespace base
diff --git a/base/win/scoped_handle_verifier.cc b/base/win/scoped_handle_verifier.cc
new file mode 100644
index 0000000..191a240
--- /dev/null
+++ b/base/win/scoped_handle_verifier.cc
@@ -0,0 +1,231 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/win/scoped_handle_verifier.h"
+
+#include <stddef.h>
+#include <windows.h>
+
+#include <unordered_map>
+
+#include "base/debug/alias.h"
+#include "base/debug/stack_trace.h"
+#include "base/synchronization/lock_impl.h"
+#include "base/win/base_win_buildflags.h"
+#include "base/win/current_module.h"
+
+extern "C" {
+__declspec(dllexport) void* GetHandleVerifier();
+
+void* GetHandleVerifier() {
+  return base::win::internal::ScopedHandleVerifier::Get();
+}
+}  // extern C
+
+namespace {
+
+base::win::internal::ScopedHandleVerifier* g_active_verifier = NULL;
+typedef void* (*GetHandleVerifierFn)();
+typedef std::unordered_map<HANDLE,
+                           base::win::internal::ScopedHandleVerifierInfo,
+                           base::win::internal::HandleHash>
+    HandleMap;
+typedef base::internal::LockImpl NativeLock;
+
+NativeLock* GetLock() {
+  static auto* native_lock = new NativeLock();
+  return native_lock;
+}
+
+// Simple automatic locking using a native critical section so it supports
+// recursive locking.
+class AutoNativeLock {
+ public:
+  explicit AutoNativeLock(NativeLock& lock) : lock_(lock) { lock_.Lock(); }
+
+  ~AutoNativeLock() { lock_.Unlock(); }
+
+ private:
+  NativeLock& lock_;
+  DISALLOW_COPY_AND_ASSIGN(AutoNativeLock);
+};
+
+}  // namespace
+
+namespace base {
+namespace win {
+namespace internal {
+
+ScopedHandleVerifier::ScopedHandleVerifier(bool enabled)
+    : enabled_(enabled), lock_(GetLock()) {}
+
+// static
+ScopedHandleVerifier* ScopedHandleVerifier::Get() {
+  if (!g_active_verifier)
+    ScopedHandleVerifier::InstallVerifier();
+
+  return g_active_verifier;
+}
+
+bool CloseHandleWrapper(HANDLE handle) {
+  if (!::CloseHandle(handle))
+    CHECK(false);  // CloseHandle failed.
+  return true;
+}
+
+// Assigns the g_active_verifier global within the GetLock() lock.
+// If |existing_verifier| is non-null then |enabled| is ignored.
+void ThreadSafeAssignOrCreateScopedHandleVerifier(
+    ScopedHandleVerifier* existing_verifier,
+    bool enabled) {
+  AutoNativeLock lock(*GetLock());
+  // Another thread in this module might be trying to assign the global
+  // verifier, so check that within the lock here.
+  if (g_active_verifier)
+    return;
+  g_active_verifier =
+      existing_verifier ? existing_verifier : new ScopedHandleVerifier(enabled);
+}
+
+// static
+void ScopedHandleVerifier::InstallVerifier() {
+#if BUILDFLAG(SINGLE_MODULE_MODE_HANDLE_VERIFIER)
+  // Component build has one Active Verifier per module.
+  ThreadSafeAssignOrCreateScopedHandleVerifier(nullptr, true);
+#else
+  // If you are reading this, wondering why your process seems deadlocked, take
+  // a look at your DllMain code and remove things that should not be done
+  // there, like doing whatever gave you that nice windows handle you are trying
+  // to store in a ScopedHandle.
+  HMODULE main_module = ::GetModuleHandle(NULL);
+  GetHandleVerifierFn get_handle_verifier =
+      reinterpret_cast<GetHandleVerifierFn>(
+          ::GetProcAddress(main_module, "GetHandleVerifier"));
+
+  // This should only happen if running in a DLL is linked with base but the
+  // hosting EXE is not. In this case, create an ScopedHandleVerifier for the
+  // current
+  // module but leave it disabled.
+  if (!get_handle_verifier) {
+    ThreadSafeAssignOrCreateScopedHandleVerifier(nullptr, false);
+    return;
+  }
+
+  // Check if in the main module.
+  if (get_handle_verifier == GetHandleVerifier) {
+    ThreadSafeAssignOrCreateScopedHandleVerifier(nullptr, true);
+    return;
+  }
+
+  ScopedHandleVerifier* main_module_verifier =
+      reinterpret_cast<ScopedHandleVerifier*>(get_handle_verifier());
+
+  // Main module should always on-demand create a verifier.
+  DCHECK(main_module_verifier);
+
+  ThreadSafeAssignOrCreateScopedHandleVerifier(main_module_verifier, false);
+#endif
+}
+
+bool ScopedHandleVerifier::CloseHandle(HANDLE handle) {
+  if (!enabled_)
+    return CloseHandleWrapper(handle);
+
+  closing_.Set(true);
+  CloseHandleWrapper(handle);
+  closing_.Set(false);
+
+  return true;
+}
+
+// static
+NativeLock* ScopedHandleVerifier::GetLock() {
+  return ::GetLock();
+}
+
+void ScopedHandleVerifier::StartTracking(HANDLE handle,
+                                         const void* owner,
+                                         const void* pc1,
+                                         const void* pc2) {
+  if (!enabled_)
+    return;
+
+  // Grab the thread id before the lock.
+  DWORD thread_id = GetCurrentThreadId();
+
+  AutoNativeLock lock(*lock_);
+
+  ScopedHandleVerifierInfo handle_info = {owner, pc1, pc2,
+                                          base::debug::StackTrace(), thread_id};
+  std::pair<HANDLE, ScopedHandleVerifierInfo> item(handle, handle_info);
+  std::pair<HandleMap::iterator, bool> result = map_.insert(item);
+  if (!result.second) {
+    ScopedHandleVerifierInfo other = result.first->second;
+    base::debug::Alias(&other);
+    auto creation_stack = creation_stack_;
+    base::debug::Alias(&creation_stack);
+    CHECK(false);  // Attempt to start tracking already tracked handle.
+  }
+}
+
+void ScopedHandleVerifier::StopTracking(HANDLE handle,
+                                        const void* owner,
+                                        const void* pc1,
+                                        const void* pc2) {
+  if (!enabled_)
+    return;
+
+  AutoNativeLock lock(*lock_);
+  HandleMap::iterator i = map_.find(handle);
+  if (i == map_.end()) {
+    auto creation_stack = creation_stack_;
+    base::debug::Alias(&creation_stack);
+    CHECK(false);  // Attempting to close an untracked handle.
+  }
+
+  ScopedHandleVerifierInfo other = i->second;
+  if (other.owner != owner) {
+    base::debug::Alias(&other);
+    auto creation_stack = creation_stack_;
+    base::debug::Alias(&creation_stack);
+    CHECK(false);  // Attempting to close a handle not owned by opener.
+  }
+
+  map_.erase(i);
+}
+
+void ScopedHandleVerifier::Disable() {
+  enabled_ = false;
+}
+
+void ScopedHandleVerifier::OnHandleBeingClosed(HANDLE handle) {
+  if (!enabled_)
+    return;
+
+  if (closing_.Get())
+    return;
+
+  AutoNativeLock lock(*lock_);
+  HandleMap::iterator i = map_.find(handle);
+  if (i == map_.end())
+    return;
+
+  ScopedHandleVerifierInfo other = i->second;
+  base::debug::Alias(&other);
+  auto creation_stack = creation_stack_;
+  base::debug::Alias(&creation_stack);
+  CHECK(false);  // CloseHandle called on tracked handle.
+}
+
+HMODULE ScopedHandleVerifier::GetModule() const {
+  return CURRENT_MODULE();
+}
+
+HMODULE GetHandleVerifierModuleForTesting() {
+  return g_active_verifier->GetModule();
+}
+
+}  // namespace internal
+}  // namespace win
+}  // namespace base
diff --git a/base/win/scoped_handle_verifier.h b/base/win/scoped_handle_verifier.h
new file mode 100644
index 0000000..008e790
--- /dev/null
+++ b/base/win/scoped_handle_verifier.h
@@ -0,0 +1,90 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_WIN_SCOPED_HANDLE_VERIFIER_H_
+#define BASE_WIN_SCOPED_HANDLE_VERIFIER_H_
+
+#include "base/win/windows_types.h"
+
+#include <unordered_map>
+
+#include "base/base_export.h"
+#include "base/debug/stack_trace.h"
+#include "base/hash.h"
+#include "base/synchronization/lock_impl.h"
+#include "base/threading/thread_local.h"
+
+namespace base {
+namespace win {
+namespace internal {
+
+struct HandleHash {
+  size_t operator()(const HANDLE& handle) const {
+    char buffer[sizeof(handle)];
+    memcpy(buffer, &handle, sizeof(handle));
+    return base::Hash(buffer, sizeof(buffer));
+  }
+};
+
+struct ScopedHandleVerifierInfo {
+  const void* owner;
+  const void* pc1;
+  const void* pc2;
+  base::debug::StackTrace stack;
+  DWORD thread_id;
+};
+
+// Implements the actual object that is verifying handles for this process.
+// The active instance is shared across the module boundary but there is no
+// way to delete this object from the wrong side of it (or any side, actually).
+// We need [[clang::lto_visibility_public]] because instances of this class are
+// passed across module boundaries. This means different modules must have
+// compatible definitions of the class even when whole program optimization is
+// enabled - which is what this attribute accomplishes. The pragma stops MSVC
+// from emitting an unrecognized attribute warning.
+#pragma warning(push)
+#pragma warning(disable : 5030)
+class [[clang::lto_visibility_public]] ScopedHandleVerifier {
+#pragma warning(pop)
+ public:
+  explicit ScopedHandleVerifier(bool enabled);
+
+  // Retrieves the current verifier.
+  static ScopedHandleVerifier* Get();
+
+  // The methods required by HandleTraits. They are virtual because we need to
+  // forward the call execution to another module, instead of letting the
+  // compiler call the version that is linked in the current module.
+  virtual bool CloseHandle(HANDLE handle);
+  virtual void StartTracking(HANDLE handle, const void* owner, const void* pc1,
+                             const void* pc2);
+  virtual void StopTracking(HANDLE handle, const void* owner, const void* pc1,
+                            const void* pc2);
+  virtual void Disable();
+  virtual void OnHandleBeingClosed(HANDLE handle);
+  virtual HMODULE GetModule() const;
+
+ private:
+  ~ScopedHandleVerifier();  // Not implemented.
+
+  static base::internal::LockImpl* GetLock();
+  static void InstallVerifier();
+
+  base::debug::StackTrace creation_stack_;
+  bool enabled_;
+  base::ThreadLocalBoolean closing_;
+  base::internal::LockImpl* lock_;
+  std::unordered_map<HANDLE, ScopedHandleVerifierInfo, HandleHash> map_;
+  DISALLOW_COPY_AND_ASSIGN(ScopedHandleVerifier);
+};
+
+// This testing function returns the module that the ActiveVerifier concrete
+// implementation was instantiated in.
+BASE_EXPORT HMODULE GetHandleVerifierModuleForTesting();
+
+}  // namespace internal
+}  // namespace win
+}  // namespace base
+
+#endif  // BASE_WIN_SCOPED_HANDLE_VERIFIER_H_
diff --git a/base/win/scoped_hdc.h b/base/win/scoped_hdc.h
new file mode 100644
index 0000000..890e34a
--- /dev/null
+++ b/base/win/scoped_hdc.h
@@ -0,0 +1,78 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_WIN_SCOPED_HDC_H_
+#define BASE_WIN_SCOPED_HDC_H_
+
+#include <windows.h>
+
+#include "base/debug/gdi_debug_util_win.h"
+#include "base/logging.h"
+#include "base/macros.h"
+#include "base/win/scoped_handle.h"
+
+namespace base {
+namespace win {
+
+// Like ScopedHandle but for HDC.  Only use this on HDCs returned from
+// GetDC.
+class ScopedGetDC {
+ public:
+  explicit ScopedGetDC(HWND hwnd)
+      : hwnd_(hwnd),
+        hdc_(GetDC(hwnd)) {
+    if (hwnd_) {
+      DCHECK(IsWindow(hwnd_));
+      DCHECK(hdc_);
+    } else {
+      // If GetDC(NULL) returns NULL, something really bad has happened, like
+      // GDI handle exhaustion.  In this case Chrome is going to behave badly no
+      // matter what, so we may as well just force a crash now.
+      if (!hdc_)
+        base::debug::CollectGDIUsageAndDie();
+    }
+  }
+
+  ~ScopedGetDC() {
+    if (hdc_)
+      ReleaseDC(hwnd_, hdc_);
+  }
+
+  operator HDC() { return hdc_; }
+
+ private:
+  HWND hwnd_;
+  HDC hdc_;
+
+  DISALLOW_COPY_AND_ASSIGN(ScopedGetDC);
+};
+
+// Like ScopedHandle but for HDC.  Only use this on HDCs returned from
+// CreateCompatibleDC, CreateDC and CreateIC.
+class CreateDCTraits {
+ public:
+  typedef HDC Handle;
+
+  static bool CloseHandle(HDC handle) {
+    return ::DeleteDC(handle) != FALSE;
+  }
+
+  static bool IsHandleValid(HDC handle) {
+    return handle != NULL;
+  }
+
+  static HDC NullHandle() {
+    return NULL;
+  }
+
+ private:
+  DISALLOW_IMPLICIT_CONSTRUCTORS(CreateDCTraits);
+};
+
+typedef GenericScopedHandle<CreateDCTraits, DummyVerifierTraits> ScopedCreateDC;
+
+}  // namespace win
+}  // namespace base
+
+#endif  // BASE_WIN_SCOPED_HDC_H_
diff --git a/base/win/scoped_hglobal.h b/base/win/scoped_hglobal.h
new file mode 100644
index 0000000..abe9a5a
--- /dev/null
+++ b/base/win/scoped_hglobal.h
@@ -0,0 +1,53 @@
+// Copyright (c) 2010 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_WIN_SCOPED_HGLOBAL_H_
+#define BASE_WIN_SCOPED_HGLOBAL_H_
+
+#include <windows.h>
+#include <stddef.h>
+
+#include "base/macros.h"
+
+namespace base {
+namespace win {
+
+// Like ScopedHandle except for HGLOBAL.
+template<class T>
+class ScopedHGlobal {
+ public:
+  explicit ScopedHGlobal(HGLOBAL glob) : glob_(glob) {
+    data_ = static_cast<T>(GlobalLock(glob_));
+  }
+  ~ScopedHGlobal() {
+    GlobalUnlock(glob_);
+  }
+
+  T get() { return data_; }
+
+  size_t Size() const { return GlobalSize(glob_); }
+
+  T operator->() const {
+    assert(data_ != 0);
+    return data_;
+  }
+
+  T release() {
+    T data = data_;
+    data_ = NULL;
+    return data;
+  }
+
+ private:
+  HGLOBAL glob_;
+
+  T data_;
+
+  DISALLOW_COPY_AND_ASSIGN(ScopedHGlobal);
+};
+
+}  // namespace win
+}  // namespace base
+
+#endif  // BASE_WIN_SCOPED_HGLOBAL_H_
diff --git a/base/win/scoped_hstring.cc b/base/win/scoped_hstring.cc
new file mode 100644
index 0000000..89d1f49
--- /dev/null
+++ b/base/win/scoped_hstring.cc
@@ -0,0 +1,131 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/win/scoped_hstring.h"
+
+#include <winstring.h>
+
+#include "base/strings/string_piece.h"
+#include "base/strings/utf_string_conversions.h"
+
+namespace base {
+
+namespace {
+
+static bool g_load_succeeded = false;
+
+FARPROC LoadComBaseFunction(const char* function_name) {
+  static HMODULE const handle = ::LoadLibrary(L"combase.dll");
+  return handle ? ::GetProcAddress(handle, function_name) : nullptr;
+}
+
+decltype(&::WindowsCreateString) GetWindowsCreateString() {
+  static decltype(&::WindowsCreateString) const function =
+      reinterpret_cast<decltype(&::WindowsCreateString)>(
+          LoadComBaseFunction("WindowsCreateString"));
+  return function;
+}
+
+decltype(&::WindowsDeleteString) GetWindowsDeleteString() {
+  static decltype(&::WindowsDeleteString) const function =
+      reinterpret_cast<decltype(&::WindowsDeleteString)>(
+          LoadComBaseFunction("WindowsDeleteString"));
+  return function;
+}
+
+decltype(&::WindowsGetStringRawBuffer) GetWindowsGetStringRawBuffer() {
+  static decltype(&::WindowsGetStringRawBuffer) const function =
+      reinterpret_cast<decltype(&::WindowsGetStringRawBuffer)>(
+          LoadComBaseFunction("WindowsGetStringRawBuffer"));
+  return function;
+}
+
+HRESULT WindowsCreateString(const base::char16* src,
+                            uint32_t len,
+                            HSTRING* out_hstr) {
+  decltype(&::WindowsCreateString) create_string_func =
+      GetWindowsCreateString();
+  if (!create_string_func)
+    return E_FAIL;
+  return create_string_func(src, len, out_hstr);
+}
+
+HRESULT WindowsDeleteString(HSTRING hstr) {
+  decltype(&::WindowsDeleteString) delete_string_func =
+      GetWindowsDeleteString();
+  if (!delete_string_func)
+    return E_FAIL;
+  return delete_string_func(hstr);
+}
+
+const base::char16* WindowsGetStringRawBuffer(HSTRING hstr, uint32_t* out_len) {
+  decltype(&::WindowsGetStringRawBuffer) get_string_raw_buffer_func =
+      GetWindowsGetStringRawBuffer();
+  if (!get_string_raw_buffer_func) {
+    *out_len = 0;
+    return nullptr;
+  }
+  return get_string_raw_buffer_func(hstr, out_len);
+}
+
+}  // namespace
+
+namespace internal {
+
+// static
+void ScopedHStringTraits::Free(HSTRING hstr) {
+  base::WindowsDeleteString(hstr);
+}
+
+}  // namespace internal
+
+namespace win {
+
+// static
+ScopedHString ScopedHString::Create(StringPiece16 str) {
+  DCHECK(g_load_succeeded);
+  HSTRING hstr;
+  HRESULT hr = base::WindowsCreateString(str.data(), str.length(), &hstr);
+  if (SUCCEEDED(hr))
+    return ScopedHString(hstr);
+  DLOG(ERROR) << "Failed to create HSTRING" << std::hex << hr;
+  return ScopedHString(nullptr);
+}
+
+ScopedHString ScopedHString::Create(StringPiece str) {
+  return Create(UTF8ToWide(str));
+}
+
+ScopedHString::ScopedHString(HSTRING hstr) : ScopedGeneric(hstr) {
+  DCHECK(g_load_succeeded);
+}
+
+// static
+bool ScopedHString::ResolveCoreWinRTStringDelayload() {
+  // TODO(finnur): Add AssertIOAllowed once crbug.com/770193 is fixed.
+
+  static const bool load_succeeded = []() {
+    bool success = GetWindowsCreateString() && GetWindowsDeleteString() &&
+                   GetWindowsGetStringRawBuffer();
+    g_load_succeeded = success;
+    return success;
+  }();
+  return load_succeeded;
+}
+
+StringPiece16 ScopedHString::Get() const {
+  UINT32 length = 0;
+  const wchar_t* buffer = base::WindowsGetStringRawBuffer(get(), &length);
+  return StringPiece16(buffer, length);
+}
+
+std::string ScopedHString::GetAsUTF8() const {
+  std::string result;
+  const StringPiece16 wide_string = Get();
+  WideToUTF8(wide_string.data(), wide_string.length(), &result);
+  return result;
+}
+
+}  // namespace win
+}  // namespace base
diff --git a/base/win/scoped_hstring.h b/base/win/scoped_hstring.h
new file mode 100644
index 0000000..6ba1dab
--- /dev/null
+++ b/base/win/scoped_hstring.h
@@ -0,0 +1,73 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_WIN_SCOPED_HSTRING_H_
+#define BASE_WIN_SCOPED_HSTRING_H_
+
+#include <hstring.h>
+
+#include "base/scoped_generic.h"
+#include "base/strings/string_piece_forward.h"
+
+namespace base {
+
+namespace internal {
+
+// Scoped HSTRING class to maintain lifetime of HSTRINGs allocated with
+// WindowsCreateString().
+struct BASE_EXPORT ScopedHStringTraits {
+  static HSTRING InvalidValue() { return nullptr; }
+  static void Free(HSTRING hstr);
+};
+
+}  // namespace internal
+
+namespace win {
+
+// ScopedHString is a wrapper around an HSTRING. Note that it requires certain
+// functions that are only available on Windows 8 and later, and that these
+// functions need to be delayloaded to avoid breaking Chrome on Windows 7.
+//
+// Callers MUST check the return value of ResolveCoreWinRTStringDelayLoad()
+// *before* using ScopedHString.
+//
+// One-time Initialization for ScopedHString:
+//
+//   bool success = ScopedHString::ResolveCoreWinRTStringDelayload();
+//   if (!success) {
+//     // ScopeHString can be used.
+//   } else {
+//     // Handle error.
+//   }
+//
+// Example use:
+//
+//   ScopedHString string = ScopedHString::Create(L"abc");
+//
+// Also:
+//
+//   HSTRING win_string;
+//   HRESULT hr = WindowsCreateString(..., &win_string);
+//   ScopedHString string(win_string);
+//
+class BASE_EXPORT ScopedHString
+    : public ScopedGeneric<HSTRING, base::internal::ScopedHStringTraits> {
+ public:
+  // Constructs a ScopedHString from an HSTRING, and takes ownership of |hstr|.
+  explicit ScopedHString(HSTRING hstr);
+
+  static ScopedHString Create(StringPiece16 str);
+  static ScopedHString Create(StringPiece str);
+
+  // Loads all required HSTRING functions, available from Win8 and onwards.
+  static bool ResolveCoreWinRTStringDelayload();
+
+  StringPiece16 Get() const;
+  std::string GetAsUTF8() const;
+};
+
+}  // namespace win
+}  // namespace base
+
+#endif  // BASE_WIN_SCOPED_HSTRING_H_
diff --git a/base/win/scoped_hstring_unittest.cc b/base/win/scoped_hstring_unittest.cc
new file mode 100644
index 0000000..fdcf8ff
--- /dev/null
+++ b/base/win/scoped_hstring_unittest.cc
@@ -0,0 +1,55 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/win/scoped_hstring.h"
+
+#include <winstring.h>
+
+#include "base/strings/utf_string_conversions.h"
+#include "base/win/core_winrt_util.h"
+#include "base/win/windows_version.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+namespace win {
+
+namespace {
+
+constexpr wchar_t kTestString1[] = L"123";
+constexpr wchar_t kTestString2[] = L"456789";
+
+}  // namespace
+
+TEST(ScopedHStringTest, Init) {
+  // ScopedHString requires WinRT core functions, which are not available in
+  // older versions.
+  if (GetVersion() < VERSION_WIN8) {
+    EXPECT_FALSE(ScopedHString::ResolveCoreWinRTStringDelayload());
+    return;
+  }
+
+  EXPECT_TRUE(ScopedHString::ResolveCoreWinRTStringDelayload());
+
+  ScopedHString hstring = ScopedHString::Create(kTestString1);
+  std::string buffer = hstring.GetAsUTF8();
+  EXPECT_EQ(kTestString1, base::UTF8ToWide(buffer));
+  base::StringPiece16 contents = hstring.Get();
+  EXPECT_EQ(kTestString1, contents);
+
+  hstring.reset();
+  EXPECT_TRUE(hstring == NULL);
+  EXPECT_EQ(NULL, hstring.get());
+
+  ScopedHString hstring2 = ScopedHString::Create(kTestString2);
+  hstring.swap(hstring2);
+  EXPECT_TRUE(hstring2 == NULL);
+
+  buffer = hstring.GetAsUTF8();
+  EXPECT_EQ(kTestString2, base::UTF8ToWide(buffer));
+  contents = hstring.Get();
+  EXPECT_EQ(kTestString2, contents);
+}
+
+}  // namespace win
+}  // namespace base
diff --git a/base/win/scoped_process_information.cc b/base/win/scoped_process_information.cc
new file mode 100644
index 0000000..935a4cc
--- /dev/null
+++ b/base/win/scoped_process_information.cc
@@ -0,0 +1,110 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/win/scoped_process_information.h"
+
+#include "base/logging.h"
+#include "base/win/scoped_handle.h"
+
+namespace base {
+namespace win {
+
+namespace {
+
+// Duplicates source into target, returning true upon success. |target| is
+// guaranteed to be untouched in case of failure. Succeeds with no side-effects
+// if source is NULL.
+bool CheckAndDuplicateHandle(HANDLE source, ScopedHandle* target) {
+  if (!source)
+    return true;
+
+  HANDLE temp = NULL;
+  if (!::DuplicateHandle(::GetCurrentProcess(), source,
+                         ::GetCurrentProcess(), &temp, 0, FALSE,
+                         DUPLICATE_SAME_ACCESS)) {
+    DWORD last_error = ::GetLastError();
+    DPLOG(ERROR) << "Failed to duplicate a handle " << last_error;
+    ::SetLastError(last_error);
+    return false;
+  }
+  target->Set(temp);
+  return true;
+}
+
+}  // namespace
+
+ScopedProcessInformation::ScopedProcessInformation()
+    : process_id_(0), thread_id_(0) {
+}
+
+ScopedProcessInformation::ScopedProcessInformation(
+    const PROCESS_INFORMATION& process_info) : process_id_(0), thread_id_(0) {
+  Set(process_info);
+}
+
+ScopedProcessInformation::~ScopedProcessInformation() {
+  Close();
+}
+
+bool ScopedProcessInformation::IsValid() const {
+  return process_id_ || process_handle_.Get() ||
+         thread_id_ || thread_handle_.Get();
+}
+
+void ScopedProcessInformation::Close() {
+  process_handle_.Close();
+  thread_handle_.Close();
+  process_id_ = 0;
+  thread_id_ = 0;
+}
+
+void ScopedProcessInformation::Set(const PROCESS_INFORMATION& process_info) {
+  if (IsValid())
+    Close();
+
+  process_handle_.Set(process_info.hProcess);
+  thread_handle_.Set(process_info.hThread);
+  process_id_ = process_info.dwProcessId;
+  thread_id_ = process_info.dwThreadId;
+}
+
+bool ScopedProcessInformation::DuplicateFrom(
+    const ScopedProcessInformation& other) {
+  DCHECK(!IsValid()) << "target ScopedProcessInformation must be NULL";
+  DCHECK(other.IsValid()) << "source ScopedProcessInformation must be valid";
+
+  if (CheckAndDuplicateHandle(other.process_handle(), &process_handle_) &&
+      CheckAndDuplicateHandle(other.thread_handle(), &thread_handle_)) {
+    process_id_ = other.process_id();
+    thread_id_ = other.thread_id();
+    return true;
+  }
+
+  return false;
+}
+
+PROCESS_INFORMATION ScopedProcessInformation::Take() {
+  PROCESS_INFORMATION process_information = {};
+  process_information.hProcess = process_handle_.Take();
+  process_information.hThread = thread_handle_.Take();
+  process_information.dwProcessId = process_id();
+  process_information.dwThreadId = thread_id();
+  process_id_ = 0;
+  thread_id_ = 0;
+
+  return process_information;
+}
+
+HANDLE ScopedProcessInformation::TakeProcessHandle() {
+  process_id_ = 0;
+  return process_handle_.Take();
+}
+
+HANDLE ScopedProcessInformation::TakeThreadHandle() {
+  thread_id_ = 0;
+  return thread_handle_.Take();
+}
+
+}  // namespace win
+}  // namespace base
diff --git a/base/win/scoped_process_information.h b/base/win/scoped_process_information.h
new file mode 100644
index 0000000..01df861
--- /dev/null
+++ b/base/win/scoped_process_information.h
@@ -0,0 +1,83 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_WIN_SCOPED_PROCESS_INFORMATION_H_
+#define BASE_WIN_SCOPED_PROCESS_INFORMATION_H_
+
+#include <windows.h>
+
+#include "base/base_export.h"
+#include "base/macros.h"
+#include "base/win/scoped_handle.h"
+
+namespace base {
+namespace win {
+
+// Manages the closing of process and thread handles from PROCESS_INFORMATION
+// structures. Allows clients to take ownership of either handle independently.
+class BASE_EXPORT ScopedProcessInformation {
+ public:
+  ScopedProcessInformation();
+  explicit ScopedProcessInformation(const PROCESS_INFORMATION& process_info);
+  ~ScopedProcessInformation();
+
+  // Returns true iff this instance is holding a thread and/or process handle.
+  bool IsValid() const;
+
+  // Closes the held thread and process handles, if any.
+  void Close();
+
+  // Populates this instance with the provided |process_info|.
+  void Set(const PROCESS_INFORMATION& process_info);
+
+  // Populates this instance with duplicate handles and the thread/process IDs
+  // from |other|. Returns false in case of failure, in which case this instance
+  // will be completely unpopulated.
+  bool DuplicateFrom(const ScopedProcessInformation& other);
+
+  // Transfers ownership of the held PROCESS_INFORMATION, if any, away from this
+  // instance.
+  PROCESS_INFORMATION Take();
+
+  // Transfers ownership of the held process handle, if any, away from this
+  // instance. Note that the related process_id will also be cleared.
+  HANDLE TakeProcessHandle();
+
+  // Transfers ownership of the held thread handle, if any, away from this
+  // instance. Note that the related thread_id will also be cleared.
+  HANDLE TakeThreadHandle();
+
+  // Returns the held process handle, if any, while retaining ownership.
+  HANDLE process_handle() const {
+    return process_handle_.Get();
+  }
+
+  // Returns the held thread handle, if any, while retaining ownership.
+  HANDLE thread_handle() const {
+    return thread_handle_.Get();
+  }
+
+  // Returns the held process id, if any.
+  DWORD process_id() const {
+    return process_id_;
+  }
+
+  // Returns the held thread id, if any.
+  DWORD thread_id() const {
+    return thread_id_;
+  }
+
+ private:
+  ScopedHandle process_handle_;
+  ScopedHandle thread_handle_;
+  DWORD process_id_;
+  DWORD thread_id_;
+
+  DISALLOW_COPY_AND_ASSIGN(ScopedProcessInformation);
+};
+
+}  // namespace win
+}  // namespace base
+
+#endif  // BASE_WIN_SCOPED_PROCESS_INFORMATION_H_
diff --git a/base/win/scoped_process_information_unittest.cc b/base/win/scoped_process_information_unittest.cc
new file mode 100644
index 0000000..799b273
--- /dev/null
+++ b/base/win/scoped_process_information_unittest.cc
@@ -0,0 +1,166 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <windows.h>
+
+#include <string>
+
+#include "base/command_line.h"
+#include "base/process/kill.h"
+#include "base/process/process.h"
+#include "base/test/multiprocess_test.h"
+#include "base/win/scoped_process_information.h"
+#include "testing/multiprocess_func_list.h"
+
+namespace {
+
+const DWORD kProcessId = 4321;
+const DWORD kThreadId = 1234;
+const HANDLE kProcessHandle = reinterpret_cast<HANDLE>(7651);
+const HANDLE kThreadHandle = reinterpret_cast<HANDLE>(1567);
+
+void MockCreateProcess(base::win::ScopedProcessInformation* process_info) {
+  PROCESS_INFORMATION process_information = {};
+  process_information.dwProcessId = kProcessId;
+  process_information.dwThreadId = kThreadId;
+  process_information.hProcess = kProcessHandle;
+  process_information.hThread = kThreadHandle;
+  process_info->Set(process_information);
+}
+
+}  // namespace
+
+class ScopedProcessInformationTest : public base::MultiProcessTest {
+ protected:
+  void DoCreateProcess(const std::string& main_id,
+                       PROCESS_INFORMATION* process_handle);
+};
+
+MULTIPROCESS_TEST_MAIN(ReturnSeven) {
+  return 7;
+}
+
+MULTIPROCESS_TEST_MAIN(ReturnNine) {
+  return 9;
+}
+
+void ScopedProcessInformationTest::DoCreateProcess(
+    const std::string& main_id, PROCESS_INFORMATION* process_handle) {
+  std::wstring cmd_line = MakeCmdLine(main_id).GetCommandLineString();
+  STARTUPINFO startup_info = {};
+  startup_info.cb = sizeof(startup_info);
+
+  EXPECT_TRUE(::CreateProcess(NULL, &cmd_line[0],
+                              NULL, NULL, false, 0, NULL, NULL,
+                              &startup_info, process_handle));
+}
+
+TEST_F(ScopedProcessInformationTest, InitiallyInvalid) {
+  base::win::ScopedProcessInformation process_info;
+  ASSERT_FALSE(process_info.IsValid());
+}
+
+TEST_F(ScopedProcessInformationTest, Receive) {
+  base::win::ScopedProcessInformation process_info;
+  MockCreateProcess(&process_info);
+
+  EXPECT_TRUE(process_info.IsValid());
+  EXPECT_EQ(kProcessId, process_info.process_id());
+  EXPECT_EQ(kThreadId, process_info.thread_id());
+  EXPECT_EQ(kProcessHandle, process_info.process_handle());
+  EXPECT_EQ(kThreadHandle, process_info.thread_handle());
+  process_info.Take();
+}
+
+TEST_F(ScopedProcessInformationTest, TakeProcess) {
+  base::win::ScopedProcessInformation process_info;
+  MockCreateProcess(&process_info);
+
+  HANDLE process = process_info.TakeProcessHandle();
+  EXPECT_EQ(kProcessHandle, process);
+  EXPECT_EQ(NULL, process_info.process_handle());
+  EXPECT_EQ(0u, process_info.process_id());
+  EXPECT_TRUE(process_info.IsValid());
+  process_info.Take();
+}
+
+TEST_F(ScopedProcessInformationTest, TakeThread) {
+  base::win::ScopedProcessInformation process_info;
+  MockCreateProcess(&process_info);
+
+  HANDLE thread = process_info.TakeThreadHandle();
+  EXPECT_EQ(kThreadHandle, thread);
+  EXPECT_EQ(NULL, process_info.thread_handle());
+  EXPECT_EQ(0u, process_info.thread_id());
+  EXPECT_TRUE(process_info.IsValid());
+  process_info.Take();
+}
+
+TEST_F(ScopedProcessInformationTest, TakeBoth) {
+  base::win::ScopedProcessInformation process_info;
+  MockCreateProcess(&process_info);
+
+  process_info.TakeProcessHandle();
+  process_info.TakeThreadHandle();
+  EXPECT_FALSE(process_info.IsValid());
+  process_info.Take();
+}
+
+TEST_F(ScopedProcessInformationTest, TakeWholeStruct) {
+  base::win::ScopedProcessInformation process_info;
+  MockCreateProcess(&process_info);
+
+  PROCESS_INFORMATION to_discard = process_info.Take();
+  EXPECT_EQ(kProcessId, to_discard.dwProcessId);
+  EXPECT_EQ(kThreadId, to_discard.dwThreadId);
+  EXPECT_EQ(kProcessHandle, to_discard.hProcess);
+  EXPECT_EQ(kThreadHandle, to_discard.hThread);
+  EXPECT_FALSE(process_info.IsValid());
+}
+
+TEST_F(ScopedProcessInformationTest, Duplicate) {
+  PROCESS_INFORMATION temp_process_information;
+  DoCreateProcess("ReturnSeven", &temp_process_information);
+  base::win::ScopedProcessInformation process_info;
+  process_info.Set(temp_process_information);
+
+  base::win::ScopedProcessInformation duplicate;
+  duplicate.DuplicateFrom(process_info);
+
+  ASSERT_TRUE(process_info.IsValid());
+  ASSERT_NE(0u, process_info.process_id());
+  ASSERT_EQ(duplicate.process_id(), process_info.process_id());
+  ASSERT_NE(0u, process_info.thread_id());
+  ASSERT_EQ(duplicate.thread_id(), process_info.thread_id());
+
+  // Validate that we have separate handles that are good.
+  int exit_code = 0;
+  base::Process process(process_info.TakeProcessHandle());
+  ASSERT_TRUE(process.WaitForExit(&exit_code));
+  ASSERT_EQ(7, exit_code);
+
+  exit_code = 0;
+  base::Process dup_process(duplicate.TakeProcessHandle());
+  ASSERT_TRUE(dup_process.WaitForExit(&exit_code));
+  ASSERT_EQ(7, exit_code);
+
+  ASSERT_TRUE(::CloseHandle(process_info.TakeThreadHandle()));
+  ASSERT_TRUE(::CloseHandle(duplicate.TakeThreadHandle()));
+}
+
+TEST_F(ScopedProcessInformationTest, Set) {
+  base::win::ScopedProcessInformation base_process_info;
+  MockCreateProcess(&base_process_info);
+
+  PROCESS_INFORMATION base_struct = base_process_info.Take();
+
+  base::win::ScopedProcessInformation process_info;
+  process_info.Set(base_struct);
+
+  EXPECT_EQ(kProcessId, process_info.process_id());
+  EXPECT_EQ(kThreadId, process_info.thread_id());
+  EXPECT_EQ(kProcessHandle, process_info.process_handle());
+  EXPECT_EQ(kThreadHandle, process_info.thread_handle());
+  base_struct = process_info.Take();
+}
diff --git a/base/win/scoped_propvariant.h b/base/win/scoped_propvariant.h
new file mode 100644
index 0000000..aa9afec
--- /dev/null
+++ b/base/win/scoped_propvariant.h
@@ -0,0 +1,58 @@
+// Copyright (c) 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_WIN_SCOPED_PROPVARIANT_H_
+#define BASE_WIN_SCOPED_PROPVARIANT_H_
+
+#include <propidl.h>
+
+#include "base/logging.h"
+#include "base/macros.h"
+
+namespace base {
+namespace win {
+
+// A PROPVARIANT that is automatically initialized and cleared upon respective
+// construction and destruction of this class.
+class ScopedPropVariant {
+ public:
+  ScopedPropVariant() {
+    PropVariantInit(&pv_);
+  }
+
+  ~ScopedPropVariant() {
+    Reset();
+  }
+
+  // Returns a pointer to the underlying PROPVARIANT for use as an out param in
+  // a function call.
+  PROPVARIANT* Receive() {
+    DCHECK_EQ(pv_.vt, VT_EMPTY);
+    return &pv_;
+  }
+
+  // Clears the instance to prepare it for re-use (e.g., via Receive).
+  void Reset() {
+    if (pv_.vt != VT_EMPTY) {
+      HRESULT result = PropVariantClear(&pv_);
+      DCHECK_EQ(result, S_OK);
+    }
+  }
+
+  const PROPVARIANT& get() const { return pv_; }
+  const PROPVARIANT* ptr() const { return &pv_; }
+
+ private:
+  PROPVARIANT pv_;
+
+  // Comparison operators for ScopedPropVariant are not supported at this point.
+  bool operator==(const ScopedPropVariant&) const;
+  bool operator!=(const ScopedPropVariant&) const;
+  DISALLOW_COPY_AND_ASSIGN(ScopedPropVariant);
+};
+
+}  // namespace win
+}  // namespace base
+
+#endif  // BASE_WIN_SCOPED_PROPVARIANT_H_
diff --git a/base/win/scoped_select_object.h b/base/win/scoped_select_object.h
new file mode 100644
index 0000000..59b21c1
--- /dev/null
+++ b/base/win/scoped_select_object.h
@@ -0,0 +1,43 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_WIN_SCOPED_SELECT_OBJECT_H_
+#define BASE_WIN_SCOPED_SELECT_OBJECT_H_
+
+#include <windows.h>
+
+#include "base/logging.h"
+#include "base/macros.h"
+
+namespace base {
+namespace win {
+
+// Helper class for deselecting object from DC.
+class ScopedSelectObject {
+ public:
+  ScopedSelectObject(HDC hdc, HGDIOBJ object)
+      : hdc_(hdc),
+        oldobj_(SelectObject(hdc, object)) {
+    DCHECK(hdc_);
+    DCHECK(object);
+    DCHECK(oldobj_ != NULL && oldobj_ != HGDI_ERROR);
+  }
+
+  ~ScopedSelectObject() {
+    HGDIOBJ object = SelectObject(hdc_, oldobj_);
+    DCHECK((GetObjectType(oldobj_) != OBJ_REGION && object != NULL) ||
+           (GetObjectType(oldobj_) == OBJ_REGION && object != HGDI_ERROR));
+  }
+
+ private:
+  HDC hdc_;
+  HGDIOBJ oldobj_;
+
+  DISALLOW_COPY_AND_ASSIGN(ScopedSelectObject);
+};
+
+}  // namespace win
+}  // namespace base
+
+#endif  // BASE_WIN_SCOPED_SELECT_OBJECT_H_
diff --git a/base/win/scoped_variant.cc b/base/win/scoped_variant.cc
new file mode 100644
index 0000000..0c1ee31
--- /dev/null
+++ b/base/win/scoped_variant.cc
@@ -0,0 +1,277 @@
+// Copyright (c) 2010 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/win/scoped_variant.h"
+
+#include "base/logging.h"
+
+namespace base {
+namespace win {
+
+// Global, const instance of an empty variant.
+const VARIANT ScopedVariant::kEmptyVariant = {{{VT_EMPTY}}};
+
+ScopedVariant::~ScopedVariant() {
+  static_assert(sizeof(ScopedVariant) == sizeof(VARIANT), "ScopedVariantSize");
+  ::VariantClear(&var_);
+}
+
+ScopedVariant::ScopedVariant(const wchar_t* str) {
+  var_.vt = VT_EMPTY;
+  Set(str);
+}
+
+ScopedVariant::ScopedVariant(const wchar_t* str, UINT length) {
+  var_.vt = VT_BSTR;
+  var_.bstrVal = ::SysAllocStringLen(str, length);
+}
+
+ScopedVariant::ScopedVariant(int value, VARTYPE vt) {
+  var_.vt = vt;
+  var_.lVal = value;
+}
+
+ScopedVariant::ScopedVariant(double value, VARTYPE vt) {
+  DCHECK(vt == VT_R8 || vt == VT_DATE);
+  var_.vt = vt;
+  var_.dblVal = value;
+}
+
+ScopedVariant::ScopedVariant(IDispatch* dispatch) {
+  var_.vt = VT_EMPTY;
+  Set(dispatch);
+}
+
+ScopedVariant::ScopedVariant(IUnknown* unknown) {
+  var_.vt = VT_EMPTY;
+  Set(unknown);
+}
+
+ScopedVariant::ScopedVariant(SAFEARRAY* safearray) {
+  var_.vt = VT_EMPTY;
+  Set(safearray);
+}
+
+ScopedVariant::ScopedVariant(const VARIANT& var) {
+  var_.vt = VT_EMPTY;
+  Set(var);
+}
+
+void ScopedVariant::Reset(const VARIANT& var) {
+  if (&var != &var_) {
+    ::VariantClear(&var_);
+    var_ = var;
+  }
+}
+
+VARIANT ScopedVariant::Release() {
+  VARIANT var = var_;
+  var_.vt = VT_EMPTY;
+  return var;
+}
+
+void ScopedVariant::Swap(ScopedVariant& var) {
+  VARIANT tmp = var_;
+  var_ = var.var_;
+  var.var_ = tmp;
+}
+
+VARIANT* ScopedVariant::Receive() {
+  DCHECK(!IsLeakableVarType(var_.vt)) << "variant leak. type: " << var_.vt;
+  return &var_;
+}
+
+VARIANT ScopedVariant::Copy() const {
+  VARIANT ret = {{{VT_EMPTY}}};
+  ::VariantCopy(&ret, &var_);
+  return ret;
+}
+
+int ScopedVariant::Compare(const VARIANT& var, bool ignore_case) const {
+  ULONG flags = ignore_case ? NORM_IGNORECASE : 0;
+  HRESULT hr = ::VarCmp(const_cast<VARIANT*>(&var_), const_cast<VARIANT*>(&var),
+                        LOCALE_USER_DEFAULT, flags);
+  int ret = 0;
+
+  switch (hr) {
+    case VARCMP_LT:
+      ret = -1;
+      break;
+
+    case VARCMP_GT:
+    case VARCMP_NULL:
+      ret = 1;
+      break;
+
+    default:
+      // Equal.
+      break;
+  }
+
+  return ret;
+}
+
+void ScopedVariant::Set(const wchar_t* str) {
+  DCHECK(!IsLeakableVarType(var_.vt)) << "leaking variant: " << var_.vt;
+  var_.vt = VT_BSTR;
+  var_.bstrVal = ::SysAllocString(str);
+}
+
+void ScopedVariant::Set(int8_t i8) {
+  DCHECK(!IsLeakableVarType(var_.vt)) << "leaking variant: " << var_.vt;
+  var_.vt = VT_I1;
+  var_.cVal = i8;
+}
+
+void ScopedVariant::Set(uint8_t ui8) {
+  DCHECK(!IsLeakableVarType(var_.vt)) << "leaking variant: " << var_.vt;
+  var_.vt = VT_UI1;
+  var_.bVal = ui8;
+}
+
+void ScopedVariant::Set(int16_t i16) {
+  DCHECK(!IsLeakableVarType(var_.vt)) << "leaking variant: " << var_.vt;
+  var_.vt = VT_I2;
+  var_.iVal = i16;
+}
+
+void ScopedVariant::Set(uint16_t ui16) {
+  DCHECK(!IsLeakableVarType(var_.vt)) << "leaking variant: " << var_.vt;
+  var_.vt = VT_UI2;
+  var_.uiVal = ui16;
+}
+
+void ScopedVariant::Set(int32_t i32) {
+  DCHECK(!IsLeakableVarType(var_.vt)) << "leaking variant: " << var_.vt;
+  var_.vt = VT_I4;
+  var_.lVal = i32;
+}
+
+void ScopedVariant::Set(uint32_t ui32) {
+  DCHECK(!IsLeakableVarType(var_.vt)) << "leaking variant: " << var_.vt;
+  var_.vt = VT_UI4;
+  var_.ulVal = ui32;
+}
+
+void ScopedVariant::Set(int64_t i64) {
+  DCHECK(!IsLeakableVarType(var_.vt)) << "leaking variant: " << var_.vt;
+  var_.vt = VT_I8;
+  var_.llVal = i64;
+}
+
+void ScopedVariant::Set(uint64_t ui64) {
+  DCHECK(!IsLeakableVarType(var_.vt)) << "leaking variant: " << var_.vt;
+  var_.vt = VT_UI8;
+  var_.ullVal = ui64;
+}
+
+void ScopedVariant::Set(float r32) {
+  DCHECK(!IsLeakableVarType(var_.vt)) << "leaking variant: " << var_.vt;
+  var_.vt = VT_R4;
+  var_.fltVal = r32;
+}
+
+void ScopedVariant::Set(double r64) {
+  DCHECK(!IsLeakableVarType(var_.vt)) << "leaking variant: " << var_.vt;
+  var_.vt = VT_R8;
+  var_.dblVal = r64;
+}
+
+void ScopedVariant::SetDate(DATE date) {
+  DCHECK(!IsLeakableVarType(var_.vt)) << "leaking variant: " << var_.vt;
+  var_.vt = VT_DATE;
+  var_.date = date;
+}
+
+void ScopedVariant::Set(IDispatch* disp) {
+  DCHECK(!IsLeakableVarType(var_.vt)) << "leaking variant: " << var_.vt;
+  var_.vt = VT_DISPATCH;
+  var_.pdispVal = disp;
+  if (disp)
+    disp->AddRef();
+}
+
+void ScopedVariant::Set(bool b) {
+  DCHECK(!IsLeakableVarType(var_.vt)) << "leaking variant: " << var_.vt;
+  var_.vt = VT_BOOL;
+  var_.boolVal = b ? VARIANT_TRUE : VARIANT_FALSE;
+}
+
+void ScopedVariant::Set(IUnknown* unk) {
+  DCHECK(!IsLeakableVarType(var_.vt)) << "leaking variant: " << var_.vt;
+  var_.vt = VT_UNKNOWN;
+  var_.punkVal = unk;
+  if (unk)
+    unk->AddRef();
+}
+
+void ScopedVariant::Set(SAFEARRAY* array) {
+  DCHECK(!IsLeakableVarType(var_.vt)) << "leaking variant: " << var_.vt;
+  if (SUCCEEDED(::SafeArrayGetVartype(array, &var_.vt))) {
+    var_.vt |= VT_ARRAY;
+    var_.parray = array;
+  } else {
+    DCHECK(!array) << "Unable to determine safearray vartype";
+    var_.vt = VT_EMPTY;
+  }
+}
+
+void ScopedVariant::Set(const VARIANT& var) {
+  DCHECK(!IsLeakableVarType(var_.vt)) << "leaking variant: " << var_.vt;
+  if (FAILED(::VariantCopy(&var_, &var))) {
+    DLOG(ERROR) << "VariantCopy failed";
+    var_.vt = VT_EMPTY;
+  }
+}
+
+ScopedVariant& ScopedVariant::operator=(const VARIANT& var) {
+  if (&var != &var_) {
+    VariantClear(&var_);
+    Set(var);
+  }
+  return *this;
+}
+
+bool ScopedVariant::IsLeakableVarType(VARTYPE vt) {
+  bool leakable = false;
+  switch (vt & VT_TYPEMASK) {
+    case VT_BSTR:
+    case VT_DISPATCH:
+    // we treat VT_VARIANT as leakable to err on the safe side.
+    case VT_VARIANT:
+    case VT_UNKNOWN:
+    case VT_SAFEARRAY:
+
+    // very rarely used stuff (if ever):
+    case VT_VOID:
+    case VT_PTR:
+    case VT_CARRAY:
+    case VT_USERDEFINED:
+    case VT_LPSTR:
+    case VT_LPWSTR:
+    case VT_RECORD:
+    case VT_INT_PTR:
+    case VT_UINT_PTR:
+    case VT_FILETIME:
+    case VT_BLOB:
+    case VT_STREAM:
+    case VT_STORAGE:
+    case VT_STREAMED_OBJECT:
+    case VT_STORED_OBJECT:
+    case VT_BLOB_OBJECT:
+    case VT_VERSIONED_STREAM:
+    case VT_BSTR_BLOB:
+      leakable = true;
+      break;
+  }
+
+  if (!leakable && (vt & VT_ARRAY) != 0) {
+    leakable = true;
+  }
+
+  return leakable;
+}
+
+}  // namespace win
+}  // namespace base
diff --git a/base/win/scoped_variant.h b/base/win/scoped_variant.h
new file mode 100644
index 0000000..81f4b2b
--- /dev/null
+++ b/base/win/scoped_variant.h
@@ -0,0 +1,166 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_WIN_SCOPED_VARIANT_H_
+#define BASE_WIN_SCOPED_VARIANT_H_
+
+#include <windows.h>
+#include <oleauto.h>
+#include <stdint.h>
+
+#include "base/base_export.h"
+#include "base/macros.h"
+
+namespace base {
+namespace win {
+
+// Scoped VARIANT class for automatically freeing a COM VARIANT at the
+// end of a scope.  Additionally provides a few functions to make the
+// encapsulated VARIANT easier to use.
+// Instead of inheriting from VARIANT, we take the containment approach
+// in order to have more control over the usage of the variant and guard
+// against memory leaks.
+class BASE_EXPORT ScopedVariant {
+ public:
+  // Declaration of a global variant variable that's always VT_EMPTY
+  static const VARIANT kEmptyVariant;
+
+  // Default constructor.
+  ScopedVariant() {
+    // This is equivalent to what VariantInit does, but less code.
+    var_.vt = VT_EMPTY;
+  }
+
+  // Constructor to create a new VT_BSTR VARIANT.
+  // NOTE: Do not pass a BSTR to this constructor expecting ownership to
+  // be transferred
+  explicit ScopedVariant(const wchar_t* str);
+
+  // Creates a new VT_BSTR variant of a specified length.
+  ScopedVariant(const wchar_t* str, UINT length);
+
+  // Creates a new integral type variant and assigns the value to
+  // VARIANT.lVal (32 bit sized field).
+  explicit ScopedVariant(int value, VARTYPE vt = VT_I4);
+
+  // Creates a new double-precision type variant.  |vt| must be either VT_R8
+  // or VT_DATE.
+  explicit ScopedVariant(double value, VARTYPE vt = VT_R8);
+
+  // VT_DISPATCH
+  explicit ScopedVariant(IDispatch* dispatch);
+
+  // VT_UNKNOWN
+  explicit ScopedVariant(IUnknown* unknown);
+
+  // SAFEARRAY
+  explicit ScopedVariant(SAFEARRAY* safearray);
+
+  // Copies the variant.
+  explicit ScopedVariant(const VARIANT& var);
+
+  ~ScopedVariant();
+
+  inline VARTYPE type() const {
+    return var_.vt;
+  }
+
+  // Give ScopedVariant ownership over an already allocated VARIANT.
+  void Reset(const VARIANT& var = kEmptyVariant);
+
+  // Releases ownership of the VARIANT to the caller.
+  VARIANT Release();
+
+  // Swap two ScopedVariant's.
+  void Swap(ScopedVariant& var);
+
+  // Returns a copy of the variant.
+  VARIANT Copy() const;
+
+  // The return value is 0 if the variants are equal, 1 if this object is
+  // greater than |var|, -1 if it is smaller.
+  int Compare(const VARIANT& var, bool ignore_case = false) const;
+
+  // Retrieves the pointer address.
+  // Used to receive a VARIANT as an out argument (and take ownership).
+  // The function DCHECKs on the current value being empty/null.
+  // Usage: GetVariant(var.receive());
+  VARIANT* Receive();
+
+  void Set(const wchar_t* str);
+
+  // Setters for simple types.
+  void Set(int8_t i8);
+  void Set(uint8_t ui8);
+  void Set(int16_t i16);
+  void Set(uint16_t ui16);
+  void Set(int32_t i32);
+  void Set(uint32_t ui32);
+  void Set(int64_t i64);
+  void Set(uint64_t ui64);
+  void Set(float r32);
+  void Set(double r64);
+  void Set(bool b);
+
+  // Creates a copy of |var| and assigns as this instance's value.
+  // Note that this is different from the Reset() method that's used to
+  // free the current value and assume ownership.
+  void Set(const VARIANT& var);
+
+  // COM object setters
+  void Set(IDispatch* disp);
+  void Set(IUnknown* unk);
+
+  // SAFEARRAY support
+  void Set(SAFEARRAY* array);
+
+  // Special setter for DATE since DATE is a double and we already have
+  // a setter for double.
+  void SetDate(DATE date);
+
+  // Allows const access to the contained variant without DCHECKs etc.
+  // This support is necessary for the V_XYZ (e.g. V_BSTR) set of macros to
+  // work properly but still doesn't allow modifications since we want control
+  // over that.
+  const VARIANT* ptr() const { return &var_; }
+
+  // Like other scoped classes (e.g. scoped_refptr, ScopedBstr,
+  // Microsoft::WRL::ComPtr) we support the assignment operator for the type we
+  // wrap.
+  ScopedVariant& operator=(const VARIANT& var);
+
+  // A hack to pass a pointer to the variant where the accepting
+  // function treats the variant as an input-only, read-only value
+  // but the function prototype requires a non const variant pointer.
+  // There's no DCHECK or anything here.  Callers must know what they're doing.
+  VARIANT* AsInput() const {
+    // The nature of this function is const, so we declare
+    // it as such and cast away the constness here.
+    return const_cast<VARIANT*>(&var_);
+  }
+
+  // Allows the ScopedVariant instance to be passed to functions either by value
+  // or by const reference.
+  operator const VARIANT&() const {
+    return var_;
+  }
+
+  // Used as a debug check to see if we're leaking anything.
+  static bool IsLeakableVarType(VARTYPE vt);
+
+ protected:
+  VARIANT var_;
+
+ private:
+  // Comparison operators for ScopedVariant are not supported at this point.
+  // Use the Compare method instead.
+  bool operator==(const ScopedVariant& var) const;
+  bool operator!=(const ScopedVariant& var) const;
+  DISALLOW_COPY_AND_ASSIGN(ScopedVariant);
+};
+
+}  // namespace win
+}  // namespace base
+
+#endif  // BASE_WIN_SCOPED_VARIANT_H_
diff --git a/base/win/scoped_variant_unittest.cc b/base/win/scoped_variant_unittest.cc
new file mode 100644
index 0000000..7d61e28
--- /dev/null
+++ b/base/win/scoped_variant_unittest.cc
@@ -0,0 +1,263 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <stdint.h>
+
+#include "base/win/scoped_variant.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+namespace win {
+
+namespace {
+
+static const wchar_t kTestString1[] = L"Used to create BSTRs";
+static const wchar_t kTestString2[] = L"Also used to create BSTRs";
+
+void GiveMeAVariant(VARIANT* ret) {
+  EXPECT_TRUE(ret != NULL);
+  ret->vt = VT_BSTR;
+  V_BSTR(ret) = ::SysAllocString(kTestString1);
+}
+
+// A dummy IDispatch implementation (if you can call it that).
+// The class does nothing intelligent really.  Only increments a counter
+// when AddRef is called and decrements it when Release is called.
+class FakeComObject : public IDispatch {
+ public:
+  FakeComObject() : ref_(0) {
+  }
+
+  STDMETHOD_(DWORD, AddRef)() override {
+    ref_++;
+    return ref_;
+  }
+
+  STDMETHOD_(DWORD, Release)() override {
+    ref_--;
+    return ref_;
+  }
+
+  STDMETHOD(QueryInterface)(REFIID, void**) override { return E_NOTIMPL; }
+
+  STDMETHOD(GetTypeInfoCount)(UINT*) override { return E_NOTIMPL; }
+
+  STDMETHOD(GetTypeInfo)(UINT, LCID, ITypeInfo**) override { return E_NOTIMPL; }
+
+  STDMETHOD(GetIDsOfNames)(REFIID, LPOLESTR*, UINT, LCID, DISPID*) override {
+    return E_NOTIMPL;
+  }
+
+  STDMETHOD(Invoke)(DISPID,
+                    REFIID,
+                    LCID,
+                    WORD,
+                    DISPPARAMS*,
+                    VARIANT*,
+                    EXCEPINFO*,
+                    UINT*) override {
+    return E_NOTIMPL;
+  }
+
+  // A way to check the internal reference count of the class.
+  int ref_count() const {
+    return ref_;
+  }
+
+ protected:
+  int ref_;
+};
+
+}  // namespace
+
+TEST(ScopedVariantTest, ScopedVariant) {
+  ScopedVariant var;
+  EXPECT_TRUE(var.type() == VT_EMPTY);
+  // V_BSTR(var.ptr()) = NULL;  <- NOTE: Assignment like that is not supported.
+
+  ScopedVariant var_bstr(L"VT_BSTR");
+  EXPECT_EQ(VT_BSTR, V_VT(var_bstr.ptr()));
+  EXPECT_TRUE(V_BSTR(var_bstr.ptr()) != NULL);  // can't use EXPECT_NE for BSTR
+  var_bstr.Reset();
+  EXPECT_NE(VT_BSTR, V_VT(var_bstr.ptr()));
+  var_bstr.Set(kTestString2);
+  EXPECT_EQ(VT_BSTR, V_VT(var_bstr.ptr()));
+
+  VARIANT tmp = var_bstr.Release();
+  EXPECT_EQ(VT_EMPTY, V_VT(var_bstr.ptr()));
+  EXPECT_EQ(VT_BSTR, V_VT(&tmp));
+  EXPECT_EQ(0, lstrcmp(V_BSTR(&tmp), kTestString2));
+
+  var.Reset(tmp);
+  EXPECT_EQ(VT_BSTR, V_VT(var.ptr()));
+  EXPECT_EQ(0, lstrcmpW(V_BSTR(var.ptr()), kTestString2));
+
+  var_bstr.Swap(var);
+  EXPECT_EQ(VT_EMPTY, V_VT(var.ptr()));
+  EXPECT_EQ(VT_BSTR, V_VT(var_bstr.ptr()));
+  EXPECT_EQ(0, lstrcmpW(V_BSTR(var_bstr.ptr()), kTestString2));
+  var_bstr.Reset();
+
+  // Test the Compare and Copy routines.
+  GiveMeAVariant(var_bstr.Receive());
+  ScopedVariant var_bstr2(V_BSTR(var_bstr.ptr()));
+  EXPECT_EQ(0, var_bstr.Compare(var_bstr2));
+  var_bstr2.Reset();
+  EXPECT_NE(0, var_bstr.Compare(var_bstr2));
+  var_bstr2.Reset(var_bstr.Copy());
+  EXPECT_EQ(0, var_bstr.Compare(var_bstr2));
+  var_bstr2.Reset();
+  var_bstr2.Set(V_BSTR(var_bstr.ptr()));
+  EXPECT_EQ(0, var_bstr.Compare(var_bstr2));
+  var_bstr2.Reset();
+  var_bstr.Reset();
+
+  // Test for the SetDate setter.
+  SYSTEMTIME sys_time;
+  ::GetSystemTime(&sys_time);
+  DATE date;
+  ::SystemTimeToVariantTime(&sys_time, &date);
+  var.Reset();
+  var.SetDate(date);
+  EXPECT_EQ(VT_DATE, var.type());
+  EXPECT_EQ(date, V_DATE(var.ptr()));
+
+  // Simple setter tests.  These do not require resetting the variant
+  // after each test since the variant type is not "leakable" (i.e. doesn't
+  // need to be freed explicitly).
+
+  // We need static cast here since char defaults to int (!?).
+  var.Set(static_cast<int8_t>('v'));
+  EXPECT_EQ(VT_I1, var.type());
+  EXPECT_EQ('v', V_I1(var.ptr()));
+
+  var.Set(static_cast<short>(123));
+  EXPECT_EQ(VT_I2, var.type());
+  EXPECT_EQ(123, V_I2(var.ptr()));
+
+  var.Set(static_cast<int32_t>(123));
+  EXPECT_EQ(VT_I4, var.type());
+  EXPECT_EQ(123, V_I4(var.ptr()));
+
+  var.Set(static_cast<int64_t>(123));
+  EXPECT_EQ(VT_I8, var.type());
+  EXPECT_EQ(123, V_I8(var.ptr()));
+
+  var.Set(static_cast<uint8_t>(123));
+  EXPECT_EQ(VT_UI1, var.type());
+  EXPECT_EQ(123u, V_UI1(var.ptr()));
+
+  var.Set(static_cast<unsigned short>(123));
+  EXPECT_EQ(VT_UI2, var.type());
+  EXPECT_EQ(123u, V_UI2(var.ptr()));
+
+  var.Set(static_cast<uint32_t>(123));
+  EXPECT_EQ(VT_UI4, var.type());
+  EXPECT_EQ(123u, V_UI4(var.ptr()));
+
+  var.Set(static_cast<uint64_t>(123));
+  EXPECT_EQ(VT_UI8, var.type());
+  EXPECT_EQ(123u, V_UI8(var.ptr()));
+
+  var.Set(123.123f);
+  EXPECT_EQ(VT_R4, var.type());
+  EXPECT_EQ(123.123f, V_R4(var.ptr()));
+
+  var.Set(static_cast<double>(123.123));
+  EXPECT_EQ(VT_R8, var.type());
+  EXPECT_EQ(123.123, V_R8(var.ptr()));
+
+  var.Set(true);
+  EXPECT_EQ(VT_BOOL, var.type());
+  EXPECT_EQ(VARIANT_TRUE, V_BOOL(var.ptr()));
+  var.Set(false);
+  EXPECT_EQ(VT_BOOL, var.type());
+  EXPECT_EQ(VARIANT_FALSE, V_BOOL(var.ptr()));
+
+  // Com interface tests
+
+  var.Set(static_cast<IDispatch*>(NULL));
+  EXPECT_EQ(VT_DISPATCH, var.type());
+  EXPECT_EQ(NULL, V_DISPATCH(var.ptr()));
+  var.Reset();
+
+  var.Set(static_cast<IUnknown*>(NULL));
+  EXPECT_EQ(VT_UNKNOWN, var.type());
+  EXPECT_EQ(NULL, V_UNKNOWN(var.ptr()));
+  var.Reset();
+
+  FakeComObject faker;
+  EXPECT_EQ(0, faker.ref_count());
+  var.Set(static_cast<IDispatch*>(&faker));
+  EXPECT_EQ(VT_DISPATCH, var.type());
+  EXPECT_EQ(&faker, V_DISPATCH(var.ptr()));
+  EXPECT_EQ(1, faker.ref_count());
+  var.Reset();
+  EXPECT_EQ(0, faker.ref_count());
+
+  var.Set(static_cast<IUnknown*>(&faker));
+  EXPECT_EQ(VT_UNKNOWN, var.type());
+  EXPECT_EQ(&faker, V_UNKNOWN(var.ptr()));
+  EXPECT_EQ(1, faker.ref_count());
+  var.Reset();
+  EXPECT_EQ(0, faker.ref_count());
+
+  {
+    ScopedVariant disp_var(&faker);
+    EXPECT_EQ(VT_DISPATCH, disp_var.type());
+    EXPECT_EQ(&faker, V_DISPATCH(disp_var.ptr()));
+    EXPECT_EQ(1, faker.ref_count());
+  }
+  EXPECT_EQ(0, faker.ref_count());
+
+  {
+    ScopedVariant ref1(&faker);
+    EXPECT_EQ(1, faker.ref_count());
+    ScopedVariant ref2(static_cast<const VARIANT&>(ref1));
+    EXPECT_EQ(2, faker.ref_count());
+    ScopedVariant ref3;
+    ref3 = static_cast<const VARIANT&>(ref2);
+    EXPECT_EQ(3, faker.ref_count());
+  }
+  EXPECT_EQ(0, faker.ref_count());
+
+  {
+    ScopedVariant unk_var(static_cast<IUnknown*>(&faker));
+    EXPECT_EQ(VT_UNKNOWN, unk_var.type());
+    EXPECT_EQ(&faker, V_UNKNOWN(unk_var.ptr()));
+    EXPECT_EQ(1, faker.ref_count());
+  }
+  EXPECT_EQ(0, faker.ref_count());
+
+  VARIANT raw;
+  raw.vt = VT_UNKNOWN;
+  raw.punkVal = &faker;
+  EXPECT_EQ(0, faker.ref_count());
+  var.Set(raw);
+  EXPECT_EQ(1, faker.ref_count());
+  var.Reset();
+  EXPECT_EQ(0, faker.ref_count());
+
+  {
+    ScopedVariant number(123);
+    EXPECT_EQ(VT_I4, number.type());
+    EXPECT_EQ(123, V_I4(number.ptr()));
+  }
+
+  // SAFEARRAY tests
+  var.Set(static_cast<SAFEARRAY*>(NULL));
+  EXPECT_EQ(VT_EMPTY, var.type());
+
+  SAFEARRAY* sa = ::SafeArrayCreateVector(VT_UI1, 0, 100);
+  ASSERT_TRUE(sa != NULL);
+
+  var.Set(sa);
+  EXPECT_TRUE(ScopedVariant::IsLeakableVarType(var.type()));
+  EXPECT_EQ(VT_ARRAY | VT_UI1, var.type());
+  EXPECT_EQ(sa, V_ARRAY(var.ptr()));
+  // The array is destroyed in the destructor of var.
+}
+
+}  // namespace win
+}  // namespace base
diff --git a/base/win/scoped_windows_thread_environment.h b/base/win/scoped_windows_thread_environment.h
new file mode 100644
index 0000000..51f2a0d
--- /dev/null
+++ b/base/win/scoped_windows_thread_environment.h
@@ -0,0 +1,28 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_WIN_SCOPED_WINDOWS_THREAD_ENVIRONMENT_H_
+#define BASE_WIN_SCOPED_WINDOWS_THREAD_ENVIRONMENT_H_
+
+#include "base/macros.h"
+
+namespace base {
+namespace win {
+
+// Serves as a root class for ScopedCOMInitializer and ScopedWinrtInitializer.
+class ScopedWindowsThreadEnvironment {
+ public:
+  ScopedWindowsThreadEnvironment() {}
+  virtual ~ScopedWindowsThreadEnvironment() {}
+
+  virtual bool Succeeded() const = 0;
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(ScopedWindowsThreadEnvironment);
+};
+
+}  // namespace win
+}  // namespace base
+
+#endif  // BASE_WIN_SCOPED_WINDOWS_THREAD_ENVIRONMENT_H_
diff --git a/base/win/scoped_winrt_initializer.cc b/base/win/scoped_winrt_initializer.cc
new file mode 100644
index 0000000..e05679a
--- /dev/null
+++ b/base/win/scoped_winrt_initializer.cc
@@ -0,0 +1,38 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/win/scoped_winrt_initializer.h"
+
+#include "base/logging.h"
+#include "base/win/com_init_util.h"
+#include "base/win/core_winrt_util.h"
+#include "base/win/windows_version.h"
+
+namespace base {
+namespace win {
+
+ScopedWinrtInitializer::ScopedWinrtInitializer()
+    : hr_(base::win::RoInitialize(RO_INIT_MULTITHREADED)) {
+  DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
+  DCHECK_GE(GetVersion(), VERSION_WIN8);
+#if DCHECK_IS_ON()
+  if (SUCCEEDED(hr_))
+    AssertComApartmentType(ComApartmentType::MTA);
+  else
+    DCHECK_NE(RPC_E_CHANGED_MODE, hr_) << "Invalid COM thread model change";
+#endif
+}
+
+ScopedWinrtInitializer::~ScopedWinrtInitializer() {
+  DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
+  if (SUCCEEDED(hr_))
+    base::win::RoUninitialize();
+}
+
+bool ScopedWinrtInitializer::Succeeded() const {
+  return SUCCEEDED(hr_);
+}
+
+}  // namespace win
+}  // namespace base
diff --git a/base/win/scoped_winrt_initializer.h b/base/win/scoped_winrt_initializer.h
new file mode 100644
index 0000000..7c76515
--- /dev/null
+++ b/base/win/scoped_winrt_initializer.h
@@ -0,0 +1,48 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_WIN_SCOPED_WINRT_INITIALIZER_H_
+#define BASE_WIN_SCOPED_WINRT_INITIALIZER_H_
+
+#include <objbase.h>
+
+#include "base/base_export.h"
+#include "base/threading/thread_checker.h"
+#include "base/win/scoped_windows_thread_environment.h"
+
+namespace base {
+namespace win {
+
+// Initializes the Windows Runtime in the constructor and uninitalizes the
+// Windows Runtime in the destructor. As a side effect, COM is also initialized
+// as an MTA in the constructor and correspondingly uninitialized in the
+// destructor.
+//
+// Generally, you should only use this on Windows 8 or above. It is redundant
+// to use ScopedComInitializer in conjunction with ScopedWinrtInitializer.
+//
+// WARNING: This should only be used once per thread, ideally scoped to a
+// similar lifetime as the thread itself. You should not be using this in random
+// utility functions that make Windows Runtime calls -- instead ensure these
+// functions are running on a Windows Runtime supporting thread!
+class BASE_EXPORT ScopedWinrtInitializer
+    : public ScopedWindowsThreadEnvironment {
+ public:
+  ScopedWinrtInitializer();
+  ~ScopedWinrtInitializer() override;
+
+  // ScopedWindowsThreadEnvironment:
+  bool Succeeded() const override;
+
+ private:
+  const HRESULT hr_;
+  THREAD_CHECKER(thread_checker_);
+
+  DISALLOW_COPY_AND_ASSIGN(ScopedWinrtInitializer);
+};
+
+}  // namespace win
+}  // namespace base
+
+#endif  // BASE_WIN_SCOPED_WINRT_INITIALIZER_H_
diff --git a/base/win/scoped_winrt_initializer_unittest.cc b/base/win/scoped_winrt_initializer_unittest.cc
new file mode 100644
index 0000000..9df1187
--- /dev/null
+++ b/base/win/scoped_winrt_initializer_unittest.cc
@@ -0,0 +1,47 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/win/scoped_winrt_initializer.h"
+
+#include "base/test/gtest_util.h"
+#include "base/win/com_init_util.h"
+#include "base/win/scoped_com_initializer.h"
+#include "base/win/windows_version.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+namespace win {
+
+TEST(ScopedWinrtInitializer, BasicFunctionality) {
+  if (GetVersion() < VERSION_WIN8)
+    return;
+
+  AssertComApartmentType(ComApartmentType::NONE);
+  {
+    ScopedWinrtInitializer scoped_winrt_initializer;
+    AssertComApartmentType(ComApartmentType::MTA);
+  }
+  AssertComApartmentType(ComApartmentType::NONE);
+}
+
+TEST(ScopedWinrtInitializer, ApartmentChangeCheck) {
+  if (GetVersion() < VERSION_WIN8)
+    return;
+
+  ScopedCOMInitializer com_initializer;
+  // ScopedCOMInitializer initialized an STA and the following should be a
+  // failed request for an MTA.
+  EXPECT_DCHECK_DEATH({ ScopedWinrtInitializer scoped_winrt_initializer; });
+}
+
+TEST(ScopedWinrtInitializer, VersionCheck) {
+  if (GetVersion() >= VERSION_WIN8)
+    return;
+
+  // ScopedWinrtInitializer is unsupported on versions prior to Windows 8.
+  EXPECT_DCHECK_DEATH({ ScopedWinrtInitializer scoped_winrt_initializer; });
+}
+
+}  // namespace win
+}  // namespace base
diff --git a/base/win/shortcut.cc b/base/win/shortcut.cc
new file mode 100644
index 0000000..5663452
--- /dev/null
+++ b/base/win/shortcut.cc
@@ -0,0 +1,375 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/win/shortcut.h"
+
+#include <objbase.h>
+#include <shellapi.h>
+#include <shlobj.h>
+#include <propkey.h>
+#include <wrl/client.h>
+
+#include "base/files/file_util.h"
+#include "base/threading/thread_restrictions.h"
+#include "base/win/scoped_propvariant.h"
+#include "base/win/win_util.h"
+#include "base/win/windows_version.h"
+
+namespace base {
+namespace win {
+
+namespace {
+
+using Microsoft::WRL::ComPtr;
+
+// Initializes |i_shell_link| and |i_persist_file| (releasing them first if they
+// are already initialized).
+// If |shortcut| is not NULL, loads |shortcut| into |i_persist_file|.
+// If any of the above steps fail, both |i_shell_link| and |i_persist_file| will
+// be released.
+void InitializeShortcutInterfaces(const wchar_t* shortcut,
+                                  ComPtr<IShellLink>* i_shell_link,
+                                  ComPtr<IPersistFile>* i_persist_file) {
+  i_shell_link->Reset();
+  i_persist_file->Reset();
+  if (FAILED(::CoCreateInstance(CLSID_ShellLink, NULL, CLSCTX_INPROC_SERVER,
+                                IID_PPV_ARGS(i_shell_link->GetAddressOf()))) ||
+      FAILED(i_shell_link->CopyTo(i_persist_file->GetAddressOf())) ||
+      (shortcut && FAILED((*i_persist_file)->Load(shortcut, STGM_READWRITE)))) {
+    i_shell_link->Reset();
+    i_persist_file->Reset();
+  }
+}
+
+}  // namespace
+
+ShortcutProperties::ShortcutProperties()
+    : icon_index(-1), dual_mode(false), options(0U) {
+}
+
+ShortcutProperties::ShortcutProperties(const ShortcutProperties& other) =
+    default;
+
+ShortcutProperties::~ShortcutProperties() {
+}
+
+bool CreateOrUpdateShortcutLink(const FilePath& shortcut_path,
+                                const ShortcutProperties& properties,
+                                ShortcutOperation operation) {
+  AssertBlockingAllowed();
+
+  // A target is required unless |operation| is SHORTCUT_UPDATE_EXISTING.
+  if (operation != SHORTCUT_UPDATE_EXISTING &&
+      !(properties.options & ShortcutProperties::PROPERTIES_TARGET)) {
+    NOTREACHED();
+    return false;
+  }
+
+  bool shortcut_existed = PathExists(shortcut_path);
+
+  // Interfaces to the old shortcut when replacing an existing shortcut.
+  ComPtr<IShellLink> old_i_shell_link;
+  ComPtr<IPersistFile> old_i_persist_file;
+
+  // Interfaces to the shortcut being created/updated.
+  ComPtr<IShellLink> i_shell_link;
+  ComPtr<IPersistFile> i_persist_file;
+  switch (operation) {
+    case SHORTCUT_CREATE_ALWAYS:
+      InitializeShortcutInterfaces(NULL, &i_shell_link, &i_persist_file);
+      break;
+    case SHORTCUT_UPDATE_EXISTING:
+      InitializeShortcutInterfaces(shortcut_path.value().c_str(), &i_shell_link,
+                                   &i_persist_file);
+      break;
+    case SHORTCUT_REPLACE_EXISTING:
+      InitializeShortcutInterfaces(shortcut_path.value().c_str(),
+                                   &old_i_shell_link, &old_i_persist_file);
+      // Confirm |shortcut_path| exists and is a shortcut by verifying
+      // |old_i_persist_file| was successfully initialized in the call above. If
+      // so, initialize the interfaces to begin writing a new shortcut (to
+      // overwrite the current one if successful).
+      if (old_i_persist_file.Get())
+        InitializeShortcutInterfaces(NULL, &i_shell_link, &i_persist_file);
+      break;
+    default:
+      NOTREACHED();
+  }
+
+  // Return false immediately upon failure to initialize shortcut interfaces.
+  if (!i_persist_file.Get())
+    return false;
+
+  if ((properties.options & ShortcutProperties::PROPERTIES_TARGET) &&
+      FAILED(i_shell_link->SetPath(properties.target.value().c_str()))) {
+    return false;
+  }
+
+  if ((properties.options & ShortcutProperties::PROPERTIES_WORKING_DIR) &&
+      FAILED(i_shell_link->SetWorkingDirectory(
+          properties.working_dir.value().c_str()))) {
+    return false;
+  }
+
+  if (properties.options & ShortcutProperties::PROPERTIES_ARGUMENTS) {
+    if (FAILED(i_shell_link->SetArguments(properties.arguments.c_str())))
+      return false;
+  } else if (old_i_persist_file.Get()) {
+    wchar_t current_arguments[MAX_PATH] = {0};
+    if (SUCCEEDED(old_i_shell_link->GetArguments(current_arguments,
+                                                 MAX_PATH))) {
+      i_shell_link->SetArguments(current_arguments);
+    }
+  }
+
+  if ((properties.options & ShortcutProperties::PROPERTIES_DESCRIPTION) &&
+      FAILED(i_shell_link->SetDescription(properties.description.c_str()))) {
+    return false;
+  }
+
+  if ((properties.options & ShortcutProperties::PROPERTIES_ICON) &&
+      FAILED(i_shell_link->SetIconLocation(properties.icon.value().c_str(),
+                                           properties.icon_index))) {
+    return false;
+  }
+
+  bool has_app_id =
+      (properties.options & ShortcutProperties::PROPERTIES_APP_ID) != 0;
+  bool has_dual_mode =
+      (properties.options & ShortcutProperties::PROPERTIES_DUAL_MODE) != 0;
+  bool has_toast_activator_clsid =
+      (properties.options &
+       ShortcutProperties::PROPERTIES_TOAST_ACTIVATOR_CLSID) != 0;
+  if (has_app_id || has_dual_mode || has_toast_activator_clsid) {
+    ComPtr<IPropertyStore> property_store;
+    if (FAILED(i_shell_link.CopyTo(property_store.GetAddressOf())) ||
+        !property_store.Get())
+      return false;
+
+    if (has_app_id &&
+        !SetAppIdForPropertyStore(property_store.Get(),
+                                  properties.app_id.c_str())) {
+      return false;
+    }
+    if (has_dual_mode &&
+        !SetBooleanValueForPropertyStore(property_store.Get(),
+                                         PKEY_AppUserModel_IsDualMode,
+                                         properties.dual_mode)) {
+      return false;
+    }
+    if (has_toast_activator_clsid &&
+        !SetClsidForPropertyStore(property_store.Get(),
+                                  PKEY_AppUserModel_ToastActivatorCLSID,
+                                  properties.toast_activator_clsid)) {
+      return false;
+    }
+  }
+
+  // Release the interfaces to the old shortcut to make sure it doesn't prevent
+  // overwriting it if needed.
+  old_i_persist_file.Reset();
+  old_i_shell_link.Reset();
+
+  HRESULT result = i_persist_file->Save(shortcut_path.value().c_str(), TRUE);
+
+  // Release the interfaces in case the SHChangeNotify call below depends on
+  // the operations above being fully completed.
+  i_persist_file.Reset();
+  i_shell_link.Reset();
+
+  // If we successfully created/updated the icon, notify the shell that we have
+  // done so.
+  const bool succeeded = SUCCEEDED(result);
+  if (succeeded) {
+    if (shortcut_existed) {
+      // TODO(gab): SHCNE_UPDATEITEM might be sufficient here; further testing
+      // required.
+      SHChangeNotify(SHCNE_ASSOCCHANGED, SHCNF_IDLIST, NULL, NULL);
+    } else {
+      SHChangeNotify(SHCNE_CREATE, SHCNF_PATH, shortcut_path.value().c_str(),
+                     NULL);
+    }
+  }
+
+  return succeeded;
+}
+
+bool ResolveShortcutProperties(const FilePath& shortcut_path,
+                               uint32_t options,
+                               ShortcutProperties* properties) {
+  DCHECK(options && properties);
+  AssertBlockingAllowed();
+
+  if (options & ~ShortcutProperties::PROPERTIES_ALL)
+    NOTREACHED() << "Unhandled property is used.";
+
+  ComPtr<IShellLink> i_shell_link;
+
+  // Get pointer to the IShellLink interface.
+  if (FAILED(::CoCreateInstance(CLSID_ShellLink, NULL, CLSCTX_INPROC_SERVER,
+                                IID_PPV_ARGS(&i_shell_link)))) {
+    return false;
+  }
+
+  ComPtr<IPersistFile> persist;
+  // Query IShellLink for the IPersistFile interface.
+  if (FAILED(i_shell_link.CopyTo(persist.GetAddressOf())))
+    return false;
+
+  // Load the shell link.
+  if (FAILED(persist->Load(shortcut_path.value().c_str(), STGM_READ)))
+    return false;
+
+  // Reset |properties|.
+  properties->options = 0;
+
+  wchar_t temp[MAX_PATH];
+  if (options & ShortcutProperties::PROPERTIES_TARGET) {
+    if (FAILED(i_shell_link->GetPath(temp, MAX_PATH, NULL, SLGP_UNCPRIORITY)))
+      return false;
+    properties->set_target(FilePath(temp));
+  }
+
+  if (options & ShortcutProperties::PROPERTIES_WORKING_DIR) {
+    if (FAILED(i_shell_link->GetWorkingDirectory(temp, MAX_PATH)))
+      return false;
+    properties->set_working_dir(FilePath(temp));
+  }
+
+  if (options & ShortcutProperties::PROPERTIES_ARGUMENTS) {
+    if (FAILED(i_shell_link->GetArguments(temp, MAX_PATH)))
+      return false;
+    properties->set_arguments(temp);
+  }
+
+  if (options & ShortcutProperties::PROPERTIES_DESCRIPTION) {
+    // Note: description length constrained by MAX_PATH.
+    if (FAILED(i_shell_link->GetDescription(temp, MAX_PATH)))
+      return false;
+    properties->set_description(temp);
+  }
+
+  if (options & ShortcutProperties::PROPERTIES_ICON) {
+    int temp_index;
+    if (FAILED(i_shell_link->GetIconLocation(temp, MAX_PATH, &temp_index)))
+      return false;
+    properties->set_icon(FilePath(temp), temp_index);
+  }
+
+  if (options & (ShortcutProperties::PROPERTIES_APP_ID |
+                 ShortcutProperties::PROPERTIES_DUAL_MODE |
+                 ShortcutProperties::PROPERTIES_TOAST_ACTIVATOR_CLSID)) {
+    ComPtr<IPropertyStore> property_store;
+    if (FAILED(i_shell_link.CopyTo(property_store.GetAddressOf())))
+      return false;
+
+    if (options & ShortcutProperties::PROPERTIES_APP_ID) {
+      ScopedPropVariant pv_app_id;
+      if (property_store->GetValue(PKEY_AppUserModel_ID,
+                                   pv_app_id.Receive()) != S_OK) {
+        return false;
+      }
+      switch (pv_app_id.get().vt) {
+        case VT_EMPTY:
+          properties->set_app_id(L"");
+          break;
+        case VT_LPWSTR:
+          properties->set_app_id(pv_app_id.get().pwszVal);
+          break;
+        default:
+          NOTREACHED() << "Unexpected variant type: " << pv_app_id.get().vt;
+          return false;
+      }
+    }
+
+    if (options & ShortcutProperties::PROPERTIES_DUAL_MODE) {
+      ScopedPropVariant pv_dual_mode;
+      if (property_store->GetValue(PKEY_AppUserModel_IsDualMode,
+                                   pv_dual_mode.Receive()) != S_OK) {
+        return false;
+      }
+      switch (pv_dual_mode.get().vt) {
+        case VT_EMPTY:
+          properties->set_dual_mode(false);
+          break;
+        case VT_BOOL:
+          properties->set_dual_mode(pv_dual_mode.get().boolVal == VARIANT_TRUE);
+          break;
+        default:
+          NOTREACHED() << "Unexpected variant type: " << pv_dual_mode.get().vt;
+          return false;
+      }
+    }
+
+    if (options & ShortcutProperties::PROPERTIES_TOAST_ACTIVATOR_CLSID) {
+      ScopedPropVariant pv_toast_activator_clsid;
+      if (property_store->GetValue(PKEY_AppUserModel_ToastActivatorCLSID,
+                                   pv_toast_activator_clsid.Receive()) !=
+          S_OK) {
+        return false;
+      }
+      switch (pv_toast_activator_clsid.get().vt) {
+        case VT_EMPTY:
+          properties->set_toast_activator_clsid(CLSID_NULL);
+          break;
+        case VT_CLSID:
+          properties->set_toast_activator_clsid(
+              *(pv_toast_activator_clsid.get().puuid));
+          break;
+        default:
+          NOTREACHED() << "Unexpected variant type: "
+                       << pv_toast_activator_clsid.get().vt;
+          return false;
+      }
+    }
+  }
+
+  return true;
+}
+
+bool ResolveShortcut(const FilePath& shortcut_path,
+                     FilePath* target_path,
+                     string16* args) {
+  uint32_t options = 0;
+  if (target_path)
+    options |= ShortcutProperties::PROPERTIES_TARGET;
+  if (args)
+    options |= ShortcutProperties::PROPERTIES_ARGUMENTS;
+  DCHECK(options);
+
+  ShortcutProperties properties;
+  if (!ResolveShortcutProperties(shortcut_path, options, &properties))
+    return false;
+
+  if (target_path)
+    *target_path = properties.target;
+  if (args)
+    *args = properties.arguments;
+  return true;
+}
+
+bool CanPinShortcutToTaskbar() {
+  // "Pin to taskbar" stopped being supported in Windows 10.
+  return GetVersion() < VERSION_WIN10;
+}
+
+bool PinShortcutToTaskbar(const FilePath& shortcut) {
+  AssertBlockingAllowed();
+  DCHECK(CanPinShortcutToTaskbar());
+
+  intptr_t result = reinterpret_cast<intptr_t>(ShellExecute(
+      NULL, L"taskbarpin", shortcut.value().c_str(), NULL, NULL, 0));
+  return result > 32;
+}
+
+bool UnpinShortcutFromTaskbar(const FilePath& shortcut) {
+  AssertBlockingAllowed();
+
+  intptr_t result = reinterpret_cast<intptr_t>(ShellExecute(
+      NULL, L"taskbarunpin", shortcut.value().c_str(), NULL, NULL, 0));
+  return result > 32;
+}
+
+}  // namespace win
+}  // namespace base
diff --git a/base/win/shortcut.h b/base/win/shortcut.h
new file mode 100644
index 0000000..38c12b7
--- /dev/null
+++ b/base/win/shortcut.h
@@ -0,0 +1,181 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_WIN_SHORTCUT_H_
+#define BASE_WIN_SHORTCUT_H_
+
+#include <windows.h>
+#include <stdint.h>
+
+#include "base/base_export.h"
+#include "base/files/file_path.h"
+#include "base/logging.h"
+#include "base/strings/string16.h"
+
+namespace base {
+namespace win {
+
+enum ShortcutOperation {
+  // Create a new shortcut (overwriting if necessary).
+  SHORTCUT_CREATE_ALWAYS = 0,
+  // Overwrite an existing shortcut (fails if the shortcut doesn't exist).
+  // If the arguments are not specified on the new shortcut, keep the old
+  // shortcut's arguments.
+  SHORTCUT_REPLACE_EXISTING,
+  // Update specified properties only on an existing shortcut.
+  SHORTCUT_UPDATE_EXISTING,
+};
+
+// Properties for shortcuts. Properties set will be applied to the shortcut on
+// creation/update, others will be ignored.
+// Callers are encouraged to use the setters provided which take care of
+// setting |options| as desired.
+struct BASE_EXPORT ShortcutProperties {
+  enum IndividualProperties {
+    PROPERTIES_TARGET = 1U << 0,
+    PROPERTIES_WORKING_DIR = 1U << 1,
+    PROPERTIES_ARGUMENTS = 1U << 2,
+    PROPERTIES_DESCRIPTION = 1U << 3,
+    PROPERTIES_ICON = 1U << 4,
+    PROPERTIES_APP_ID = 1U << 5,
+    PROPERTIES_DUAL_MODE = 1U << 6,
+    PROPERTIES_TOAST_ACTIVATOR_CLSID = 1U << 7,
+    // Be sure to update the values below when adding a new property.
+    PROPERTIES_ALL = PROPERTIES_TARGET | PROPERTIES_WORKING_DIR |
+                     PROPERTIES_ARGUMENTS | PROPERTIES_DESCRIPTION |
+                     PROPERTIES_ICON | PROPERTIES_APP_ID |
+                     PROPERTIES_DUAL_MODE | PROPERTIES_TOAST_ACTIVATOR_CLSID
+  };
+
+  ShortcutProperties();
+  ShortcutProperties(const ShortcutProperties& other);
+  ~ShortcutProperties();
+
+  void set_target(const FilePath& target_in) {
+    target = target_in;
+    options |= PROPERTIES_TARGET;
+  }
+
+  void set_working_dir(const FilePath& working_dir_in) {
+    working_dir = working_dir_in;
+    options |= PROPERTIES_WORKING_DIR;
+  }
+
+  void set_arguments(const string16& arguments_in) {
+    // Size restriction as per MSDN at http://goo.gl/TJ7q5.
+    DCHECK(arguments_in.size() < MAX_PATH);
+    arguments = arguments_in;
+    options |= PROPERTIES_ARGUMENTS;
+  }
+
+  void set_description(const string16& description_in) {
+    // Size restriction as per MSDN at http://goo.gl/OdNQq.
+    DCHECK(description_in.size() < MAX_PATH);
+    description = description_in;
+    options |= PROPERTIES_DESCRIPTION;
+  }
+
+  void set_icon(const FilePath& icon_in, int icon_index_in) {
+    icon = icon_in;
+    icon_index = icon_index_in;
+    options |= PROPERTIES_ICON;
+  }
+
+  void set_app_id(const string16& app_id_in) {
+    app_id = app_id_in;
+    options |= PROPERTIES_APP_ID;
+  }
+
+  void set_dual_mode(bool dual_mode_in) {
+    dual_mode = dual_mode_in;
+    options |= PROPERTIES_DUAL_MODE;
+  }
+
+  void set_toast_activator_clsid(const CLSID& toast_activator_clsid_in) {
+    toast_activator_clsid = toast_activator_clsid_in;
+    options |= PROPERTIES_TOAST_ACTIVATOR_CLSID;
+  }
+
+  // The target to launch from this shortcut. This is mandatory when creating
+  // a shortcut.
+  FilePath target;
+  // The name of the working directory when launching the shortcut.
+  FilePath working_dir;
+  // The arguments to be applied to |target| when launching from this shortcut.
+  // The length of this string must be less than MAX_PATH.
+  string16 arguments;
+  // The localized description of the shortcut.
+  // The length of this string must be less than MAX_PATH.
+  string16 description;
+  // The path to the icon (can be a dll or exe, in which case |icon_index| is
+  // the resource id).
+  FilePath icon;
+  int icon_index;
+  // The app model id for the shortcut.
+  string16 app_id;
+  // Whether this is a dual mode shortcut (Win8+).
+  bool dual_mode;
+  // The CLSID of the COM object registered with the OS via the shortcut. This
+  // is for app activation via user interaction with a toast notification in the
+  // Action Center. (Win10 version 1607, build 14393, and beyond).
+  CLSID toast_activator_clsid;
+  // Bitfield made of IndividualProperties. Properties set in |options| will be
+  // set on the shortcut, others will be ignored.
+  uint32_t options;
+};
+
+// This method creates (or updates) a shortcut link at |shortcut_path| using the
+// information given through |properties|.
+// Ensure you have initialized COM before calling into this function.
+// |operation|: a choice from the ShortcutOperation enum.
+// If |operation| is SHORTCUT_REPLACE_EXISTING or SHORTCUT_UPDATE_EXISTING and
+// |shortcut_path| does not exist, this method is a no-op and returns false.
+BASE_EXPORT bool CreateOrUpdateShortcutLink(
+    const FilePath& shortcut_path,
+    const ShortcutProperties& properties,
+    ShortcutOperation operation);
+
+// Resolves Windows shortcut (.LNK file).
+// This methods tries to resolve selected properties of a shortcut .LNK file.
+// The path of the shortcut to resolve is in |shortcut_path|. |options| is a bit
+// field composed of ShortcutProperties::IndividualProperties, to specify which
+// properties to read. It should be non-0. The resulting data are read into
+// |properties|, which must not be NULL. Note: PROPERTIES_TARGET will retrieve
+// the target path as stored in the shortcut but won't attempt to resolve that
+// path so it may not be valid. The function returns true if all requested
+// properties are successfully read. Otherwise some reads have failed and
+// intermediate values written to |properties| should be ignored.
+BASE_EXPORT bool ResolveShortcutProperties(const FilePath& shortcut_path,
+                                           uint32_t options,
+                                           ShortcutProperties* properties);
+
+// Resolves Windows shortcut (.LNK file).
+// This is a wrapper to ResolveShortcutProperties() to handle the common use
+// case of resolving target and arguments. |target_path| and |args| are
+// optional output variables that are ignored if NULL (but at least one must be
+// non-NULL). The function returns true if all requested fields are found
+// successfully. Callers can safely use the same variable for both
+// |shortcut_path| and |target_path|.
+BASE_EXPORT bool ResolveShortcut(const FilePath& shortcut_path,
+                                 FilePath* target_path,
+                                 string16* args);
+
+// Pin to taskbar is only supported on Windows 7 and Windows 8. Returns true on
+// those platforms.
+BASE_EXPORT bool CanPinShortcutToTaskbar();
+
+// Pins a shortcut to the taskbar on Windows 7 and 8. The |shortcut| file must
+// already exist and be a shortcut that points to an executable. The app id of
+// the shortcut is used to group windows and must be set correctly.
+BASE_EXPORT bool PinShortcutToTaskbar(const FilePath& shortcut);
+
+// Unpins a shortcut from the Windows 7+ taskbar. The |shortcut| must exist and
+// already be pinned to the taskbar. The app id of the shortcut is used as the
+// identifier for the taskbar item to remove and must be set correctly.
+BASE_EXPORT bool UnpinShortcutFromTaskbar(const FilePath& shortcut);
+
+}  // namespace win
+}  // namespace base
+
+#endif  // BASE_WIN_SHORTCUT_H_
diff --git a/base/win/shortcut_unittest.cc b/base/win/shortcut_unittest.cc
new file mode 100644
index 0000000..3c1c26f
--- /dev/null
+++ b/base/win/shortcut_unittest.cc
@@ -0,0 +1,330 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/win/shortcut.h"
+
+#include <stdint.h>
+
+#include <string>
+
+#include "base/files/file_path.h"
+#include "base/files/file_util.h"
+#include "base/files/scoped_temp_dir.h"
+#include "base/macros.h"
+#include "base/test/test_file_util.h"
+#include "base/test/test_shortcut_win.h"
+#include "base/win/scoped_com_initializer.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+namespace win {
+
+namespace {
+
+static const char kFileContents[] = "This is a target.";
+static const char kFileContents2[] = "This is another target.";
+
+class ShortcutTest : public testing::Test {
+ protected:
+  void SetUp() override {
+    ASSERT_TRUE(temp_dir_.CreateUniqueTempDir());
+    ASSERT_TRUE(temp_dir_2_.CreateUniqueTempDir());
+
+    link_file_ = temp_dir_.GetPath().Append(L"My Link.lnk");
+
+    // Shortcut 1's properties
+    {
+      const FilePath target_file(temp_dir_.GetPath().Append(L"Target 1.txt"));
+      WriteFile(target_file, kFileContents, arraysize(kFileContents));
+
+      link_properties_.set_target(target_file);
+      link_properties_.set_working_dir(temp_dir_.GetPath());
+      link_properties_.set_arguments(L"--magic --awesome");
+      link_properties_.set_description(L"Chrome is awesome.");
+      link_properties_.set_icon(link_properties_.target, 4);
+      link_properties_.set_app_id(L"Chrome");
+      link_properties_.set_dual_mode(false);
+
+      // The CLSID below was randomly selected.
+      static constexpr CLSID toast_activator_clsid = {
+          0x08d401c2,
+          0x3f79,
+          0x41d8,
+          {0x89, 0xd0, 0x99, 0x25, 0xee, 0x16, 0x28, 0x63}};
+      link_properties_.set_toast_activator_clsid(toast_activator_clsid);
+    }
+
+    // Shortcut 2's properties (all different from properties of shortcut 1).
+    {
+      const FilePath target_file_2(temp_dir_.GetPath().Append(L"Target 2.txt"));
+      WriteFile(target_file_2, kFileContents2, arraysize(kFileContents2));
+
+      FilePath icon_path_2;
+      CreateTemporaryFileInDir(temp_dir_.GetPath(), &icon_path_2);
+
+      link_properties_2_.set_target(target_file_2);
+      link_properties_2_.set_working_dir(temp_dir_2_.GetPath());
+      link_properties_2_.set_arguments(L"--super --crazy");
+      link_properties_2_.set_description(L"The best in the west.");
+      link_properties_2_.set_icon(icon_path_2, 0);
+      link_properties_2_.set_app_id(L"Chrome.UserLevelCrazySuffix");
+      link_properties_2_.set_dual_mode(true);
+      link_properties_2_.set_toast_activator_clsid(CLSID_NULL);
+    }
+  }
+
+  ScopedCOMInitializer com_initializer_;
+  ScopedTempDir temp_dir_;
+  ScopedTempDir temp_dir_2_;
+
+  // The link file to be created/updated in the shortcut tests below.
+  FilePath link_file_;
+
+  // Properties for the created shortcut.
+  ShortcutProperties link_properties_;
+
+  // Properties for the updated shortcut.
+  ShortcutProperties link_properties_2_;
+};
+
+}  // namespace
+
+TEST_F(ShortcutTest, CreateAndResolveShortcutProperties) {
+  // Test all properties.
+  FilePath file_1(temp_dir_.GetPath().Append(L"Link1.lnk"));
+  ASSERT_TRUE(CreateOrUpdateShortcutLink(
+      file_1, link_properties_, SHORTCUT_CREATE_ALWAYS));
+
+  ShortcutProperties properties_read_1;
+  ASSERT_TRUE(ResolveShortcutProperties(
+      file_1, ShortcutProperties::PROPERTIES_ALL, &properties_read_1));
+  EXPECT_EQ(static_cast<unsigned>(ShortcutProperties::PROPERTIES_ALL),
+            properties_read_1.options);
+  ValidatePathsAreEqual(link_properties_.target, properties_read_1.target);
+  ValidatePathsAreEqual(link_properties_.working_dir,
+                        properties_read_1.working_dir);
+  EXPECT_EQ(link_properties_.arguments, properties_read_1.arguments);
+  EXPECT_EQ(link_properties_.description, properties_read_1.description);
+  ValidatePathsAreEqual(link_properties_.icon, properties_read_1.icon);
+  EXPECT_EQ(link_properties_.icon_index, properties_read_1.icon_index);
+  EXPECT_EQ(link_properties_.app_id, properties_read_1.app_id);
+  EXPECT_EQ(link_properties_.dual_mode, properties_read_1.dual_mode);
+  EXPECT_EQ(link_properties_.toast_activator_clsid,
+            properties_read_1.toast_activator_clsid);
+
+  // Test simple shortcut with no special properties set.
+  FilePath file_2(temp_dir_.GetPath().Append(L"Link2.lnk"));
+  ShortcutProperties only_target_properties;
+  only_target_properties.set_target(link_properties_.target);
+  ASSERT_TRUE(CreateOrUpdateShortcutLink(
+      file_2, only_target_properties, SHORTCUT_CREATE_ALWAYS));
+
+  ShortcutProperties properties_read_2;
+  ASSERT_TRUE(ResolveShortcutProperties(
+      file_2, ShortcutProperties::PROPERTIES_ALL, &properties_read_2));
+  EXPECT_EQ(static_cast<unsigned>(ShortcutProperties::PROPERTIES_ALL),
+            properties_read_2.options);
+  ValidatePathsAreEqual(only_target_properties.target,
+                        properties_read_2.target);
+  ValidatePathsAreEqual(FilePath(), properties_read_2.working_dir);
+  EXPECT_EQ(L"", properties_read_2.arguments);
+  EXPECT_EQ(L"", properties_read_2.description);
+  ValidatePathsAreEqual(FilePath(), properties_read_2.icon);
+  EXPECT_EQ(0, properties_read_2.icon_index);
+  EXPECT_EQ(L"", properties_read_2.app_id);
+  EXPECT_FALSE(properties_read_2.dual_mode);
+  EXPECT_EQ(CLSID_NULL, properties_read_2.toast_activator_clsid);
+}
+
+TEST_F(ShortcutTest, CreateAndResolveShortcut) {
+  ShortcutProperties only_target_properties;
+  only_target_properties.set_target(link_properties_.target);
+
+  ASSERT_TRUE(CreateOrUpdateShortcutLink(
+      link_file_, only_target_properties, SHORTCUT_CREATE_ALWAYS));
+
+  FilePath resolved_name;
+  EXPECT_TRUE(ResolveShortcut(link_file_, &resolved_name, NULL));
+
+  char read_contents[arraysize(kFileContents)];
+  base::ReadFile(resolved_name, read_contents, arraysize(read_contents));
+  EXPECT_STREQ(kFileContents, read_contents);
+}
+
+TEST_F(ShortcutTest, ResolveShortcutWithArgs) {
+  ASSERT_TRUE(CreateOrUpdateShortcutLink(
+      link_file_, link_properties_, SHORTCUT_CREATE_ALWAYS));
+
+  FilePath resolved_name;
+  string16 args;
+  EXPECT_TRUE(ResolveShortcut(link_file_, &resolved_name, &args));
+
+  char read_contents[arraysize(kFileContents)];
+  base::ReadFile(resolved_name, read_contents, arraysize(read_contents));
+  EXPECT_STREQ(kFileContents, read_contents);
+  EXPECT_EQ(link_properties_.arguments, args);
+}
+
+TEST_F(ShortcutTest, CreateShortcutWithOnlySomeProperties) {
+  ShortcutProperties target_and_args_properties;
+  target_and_args_properties.set_target(link_properties_.target);
+  target_and_args_properties.set_arguments(link_properties_.arguments);
+
+  ASSERT_TRUE(CreateOrUpdateShortcutLink(
+      link_file_, target_and_args_properties,
+      SHORTCUT_CREATE_ALWAYS));
+
+  ValidateShortcut(link_file_, target_and_args_properties);
+}
+
+TEST_F(ShortcutTest, CreateShortcutVerifyProperties) {
+  ASSERT_TRUE(CreateOrUpdateShortcutLink(
+      link_file_, link_properties_, SHORTCUT_CREATE_ALWAYS));
+
+  ValidateShortcut(link_file_, link_properties_);
+}
+
+TEST_F(ShortcutTest, UpdateShortcutVerifyProperties) {
+  ASSERT_TRUE(CreateOrUpdateShortcutLink(
+      link_file_, link_properties_, SHORTCUT_CREATE_ALWAYS));
+
+  ASSERT_TRUE(CreateOrUpdateShortcutLink(
+      link_file_, link_properties_2_, SHORTCUT_UPDATE_EXISTING));
+
+  ValidateShortcut(link_file_, link_properties_2_);
+}
+
+TEST_F(ShortcutTest, UpdateShortcutUpdateOnlyTargetAndResolve) {
+  ASSERT_TRUE(CreateOrUpdateShortcutLink(
+      link_file_, link_properties_, SHORTCUT_CREATE_ALWAYS));
+
+  ShortcutProperties update_only_target_properties;
+  update_only_target_properties.set_target(link_properties_2_.target);
+
+  ASSERT_TRUE(CreateOrUpdateShortcutLink(
+      link_file_, update_only_target_properties,
+      SHORTCUT_UPDATE_EXISTING));
+
+  ShortcutProperties expected_properties = link_properties_;
+  expected_properties.set_target(link_properties_2_.target);
+  ValidateShortcut(link_file_, expected_properties);
+
+  FilePath resolved_name;
+  EXPECT_TRUE(ResolveShortcut(link_file_, &resolved_name, NULL));
+
+  char read_contents[arraysize(kFileContents2)];
+  base::ReadFile(resolved_name, read_contents, arraysize(read_contents));
+  EXPECT_STREQ(kFileContents2, read_contents);
+}
+
+TEST_F(ShortcutTest, UpdateShortcutMakeDualMode) {
+  ASSERT_TRUE(CreateOrUpdateShortcutLink(
+      link_file_, link_properties_, SHORTCUT_CREATE_ALWAYS));
+
+  ShortcutProperties make_dual_mode_properties;
+  make_dual_mode_properties.set_dual_mode(true);
+
+  ASSERT_TRUE(CreateOrUpdateShortcutLink(
+      link_file_, make_dual_mode_properties,
+      SHORTCUT_UPDATE_EXISTING));
+
+  ShortcutProperties expected_properties = link_properties_;
+  expected_properties.set_dual_mode(true);
+  ValidateShortcut(link_file_, expected_properties);
+}
+
+TEST_F(ShortcutTest, UpdateShortcutRemoveDualMode) {
+  ASSERT_TRUE(CreateOrUpdateShortcutLink(
+      link_file_, link_properties_2_, SHORTCUT_CREATE_ALWAYS));
+
+  ShortcutProperties remove_dual_mode_properties;
+  remove_dual_mode_properties.set_dual_mode(false);
+
+  ASSERT_TRUE(CreateOrUpdateShortcutLink(
+      link_file_, remove_dual_mode_properties,
+      SHORTCUT_UPDATE_EXISTING));
+
+  ShortcutProperties expected_properties = link_properties_2_;
+  expected_properties.set_dual_mode(false);
+  ValidateShortcut(link_file_, expected_properties);
+}
+
+TEST_F(ShortcutTest, UpdateShortcutClearArguments) {
+  ASSERT_TRUE(CreateOrUpdateShortcutLink(
+      link_file_, link_properties_, SHORTCUT_CREATE_ALWAYS));
+
+  ShortcutProperties clear_arguments_properties;
+  clear_arguments_properties.set_arguments(string16());
+
+  ASSERT_TRUE(CreateOrUpdateShortcutLink(
+      link_file_, clear_arguments_properties,
+      SHORTCUT_UPDATE_EXISTING));
+
+  ShortcutProperties expected_properties = link_properties_;
+  expected_properties.set_arguments(string16());
+  ValidateShortcut(link_file_, expected_properties);
+}
+
+TEST_F(ShortcutTest, FailUpdateShortcutThatDoesNotExist) {
+  ASSERT_FALSE(CreateOrUpdateShortcutLink(
+      link_file_, link_properties_, SHORTCUT_UPDATE_EXISTING));
+  ASSERT_FALSE(PathExists(link_file_));
+}
+
+TEST_F(ShortcutTest, ReplaceShortcutAllProperties) {
+  ASSERT_TRUE(CreateOrUpdateShortcutLink(
+      link_file_, link_properties_, SHORTCUT_CREATE_ALWAYS));
+
+  ASSERT_TRUE(CreateOrUpdateShortcutLink(
+      link_file_, link_properties_2_, SHORTCUT_REPLACE_EXISTING));
+
+  ValidateShortcut(link_file_, link_properties_2_);
+}
+
+TEST_F(ShortcutTest, ReplaceShortcutSomeProperties) {
+  ASSERT_TRUE(CreateOrUpdateShortcutLink(
+      link_file_, link_properties_, SHORTCUT_CREATE_ALWAYS));
+
+  ShortcutProperties new_properties;
+  new_properties.set_target(link_properties_2_.target);
+  new_properties.set_arguments(link_properties_2_.arguments);
+  new_properties.set_description(link_properties_2_.description);
+  ASSERT_TRUE(CreateOrUpdateShortcutLink(
+      link_file_, new_properties, SHORTCUT_REPLACE_EXISTING));
+
+  // Expect only properties in |new_properties| to be set, all other properties
+  // should have been overwritten.
+  ShortcutProperties expected_properties(new_properties);
+  expected_properties.set_working_dir(FilePath());
+  expected_properties.set_icon(FilePath(), 0);
+  expected_properties.set_app_id(string16());
+  expected_properties.set_dual_mode(false);
+  ValidateShortcut(link_file_, expected_properties);
+}
+
+TEST_F(ShortcutTest, FailReplaceShortcutThatDoesNotExist) {
+  ASSERT_FALSE(CreateOrUpdateShortcutLink(
+      link_file_, link_properties_, SHORTCUT_REPLACE_EXISTING));
+  ASSERT_FALSE(PathExists(link_file_));
+}
+
+// Test that the old arguments remain on the replaced shortcut when not
+// otherwise specified.
+TEST_F(ShortcutTest, ReplaceShortcutKeepOldArguments) {
+  ASSERT_TRUE(CreateOrUpdateShortcutLink(
+      link_file_, link_properties_, SHORTCUT_CREATE_ALWAYS));
+
+  // Do not explicitly set the arguments.
+  link_properties_2_.options &=
+      ~ShortcutProperties::PROPERTIES_ARGUMENTS;
+  ASSERT_TRUE(CreateOrUpdateShortcutLink(
+      link_file_, link_properties_2_, SHORTCUT_REPLACE_EXISTING));
+
+  ShortcutProperties expected_properties(link_properties_2_);
+  expected_properties.set_arguments(link_properties_.arguments);
+  ValidateShortcut(link_file_, expected_properties);
+}
+
+}  // namespace win
+}  // namespace base
diff --git a/base/win/startup_information.cc b/base/win/startup_information.cc
new file mode 100644
index 0000000..9986674
--- /dev/null
+++ b/base/win/startup_information.cc
@@ -0,0 +1,101 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/win/startup_information.h"
+
+#include "base/logging.h"
+
+namespace {
+
+typedef BOOL (WINAPI *InitializeProcThreadAttributeListFunction)(
+    LPPROC_THREAD_ATTRIBUTE_LIST attribute_list,
+    DWORD attribute_count,
+    DWORD flags,
+    PSIZE_T size);
+static InitializeProcThreadAttributeListFunction
+    initialize_proc_thread_attribute_list;
+
+typedef BOOL (WINAPI *UpdateProcThreadAttributeFunction)(
+    LPPROC_THREAD_ATTRIBUTE_LIST attribute_list,
+    DWORD flags,
+    DWORD_PTR attribute,
+    PVOID value,
+    SIZE_T size,
+    PVOID previous_value,
+    PSIZE_T return_size);
+static UpdateProcThreadAttributeFunction update_proc_thread_attribute_list;
+
+typedef VOID (WINAPI *DeleteProcThreadAttributeListFunction)(
+    LPPROC_THREAD_ATTRIBUTE_LIST lpAttributeList);
+static DeleteProcThreadAttributeListFunction delete_proc_thread_attribute_list;
+
+}  // namespace
+
+namespace base {
+namespace win {
+
+StartupInformation::StartupInformation() {
+  memset(&startup_info_, 0, sizeof(startup_info_));
+  startup_info_.StartupInfo.cb = sizeof(startup_info_);
+
+  // Load the attribute API functions.
+  if (!initialize_proc_thread_attribute_list ||
+      !update_proc_thread_attribute_list ||
+      !delete_proc_thread_attribute_list) {
+    HMODULE module = ::GetModuleHandleW(L"kernel32.dll");
+    initialize_proc_thread_attribute_list =
+        reinterpret_cast<InitializeProcThreadAttributeListFunction>(
+            ::GetProcAddress(module, "InitializeProcThreadAttributeList"));
+    update_proc_thread_attribute_list =
+        reinterpret_cast<UpdateProcThreadAttributeFunction>(
+            ::GetProcAddress(module, "UpdateProcThreadAttribute"));
+    delete_proc_thread_attribute_list =
+        reinterpret_cast<DeleteProcThreadAttributeListFunction>(
+            ::GetProcAddress(module, "DeleteProcThreadAttributeList"));
+  }
+}
+
+StartupInformation::~StartupInformation() {
+  if (startup_info_.lpAttributeList) {
+    delete_proc_thread_attribute_list(startup_info_.lpAttributeList);
+    delete [] reinterpret_cast<BYTE*>(startup_info_.lpAttributeList);
+  }
+}
+
+bool StartupInformation::InitializeProcThreadAttributeList(
+    DWORD attribute_count) {
+  if (startup_info_.StartupInfo.cb != sizeof(startup_info_) ||
+      startup_info_.lpAttributeList)
+    return false;
+
+  SIZE_T size = 0;
+  initialize_proc_thread_attribute_list(NULL, attribute_count, 0, &size);
+  if (size == 0)
+    return false;
+
+  startup_info_.lpAttributeList =
+      reinterpret_cast<LPPROC_THREAD_ATTRIBUTE_LIST>(new BYTE[size]);
+  if (!initialize_proc_thread_attribute_list(startup_info_.lpAttributeList,
+                                           attribute_count, 0, &size)) {
+    delete [] reinterpret_cast<BYTE*>(startup_info_.lpAttributeList);
+    startup_info_.lpAttributeList = NULL;
+    return false;
+  }
+
+  return true;
+}
+
+bool StartupInformation::UpdateProcThreadAttribute(
+    DWORD_PTR attribute,
+    void* value,
+    size_t size) {
+  if (!startup_info_.lpAttributeList)
+    return false;
+  return !!update_proc_thread_attribute_list(startup_info_.lpAttributeList, 0,
+                                       attribute, value, size, NULL, NULL);
+}
+
+}  // namespace win
+}  // namespace base
+
diff --git a/base/win/startup_information.h b/base/win/startup_information.h
new file mode 100644
index 0000000..5b777ba
--- /dev/null
+++ b/base/win/startup_information.h
@@ -0,0 +1,51 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_WIN_STARTUP_INFORMATION_H_
+#define BASE_WIN_STARTUP_INFORMATION_H_
+
+#include <windows.h>
+#include <stddef.h>
+
+#include "base/base_export.h"
+#include "base/macros.h"
+
+namespace base {
+namespace win {
+
+// Manages the lifetime of additional attributes in STARTUPINFOEX.
+class BASE_EXPORT StartupInformation {
+ public:
+  StartupInformation();
+
+  ~StartupInformation();
+
+  // Initialize the attribute list for the specified number of entries.
+  bool InitializeProcThreadAttributeList(DWORD attribute_count);
+
+  // Sets one entry in the initialized attribute list.
+  // |value| needs to live at least as long as the StartupInformation object
+  // this is called on.
+  bool UpdateProcThreadAttribute(DWORD_PTR attribute,
+                                 void* value,
+                                 size_t size);
+
+  LPSTARTUPINFOW startup_info() { return &startup_info_.StartupInfo; }
+  LPSTARTUPINFOW startup_info() const {
+    return const_cast<const LPSTARTUPINFOW>(&startup_info_.StartupInfo);
+  }
+
+  bool has_extended_startup_info() const {
+    return !!startup_info_.lpAttributeList;
+  }
+
+ private:
+  STARTUPINFOEXW startup_info_;
+  DISALLOW_COPY_AND_ASSIGN(StartupInformation);
+};
+
+}  // namespace win
+}  // namespace base
+
+#endif  // BASE_WIN_STARTUP_INFORMATION_H_
diff --git a/base/win/startup_information_unittest.cc b/base/win/startup_information_unittest.cc
new file mode 100644
index 0000000..f5d1f37
--- /dev/null
+++ b/base/win/startup_information_unittest.cc
@@ -0,0 +1,75 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <windows.h>
+#include <stddef.h>
+
+#include <string>
+
+#include "base/command_line.h"
+#include "base/test/multiprocess_test.h"
+#include "base/win/scoped_handle.h"
+#include "base/win/scoped_process_information.h"
+#include "base/win/startup_information.h"
+#include "testing/multiprocess_func_list.h"
+
+const wchar_t kSectionName[] = L"EventTestSection";
+const size_t kSectionSize = 4096;
+
+MULTIPROCESS_TEST_MAIN(FireInheritedEvents) {
+  HANDLE section = ::OpenFileMappingW(PAGE_READWRITE, false, kSectionName);
+  HANDLE* events = reinterpret_cast<HANDLE*>(::MapViewOfFile(section,
+      PAGE_READWRITE, 0, 0, kSectionSize));
+  // This event should not be valid because it wasn't explicitly inherited.
+  if (::SetEvent(events[1]))
+    return -1;
+  // This event should be valid because it was explicitly inherited.
+  if (!::SetEvent(events[0]))
+    return -1;
+
+  return 0;
+}
+
+class StartupInformationTest : public base::MultiProcessTest {};
+
+// Verify that only the explicitly specified event is inherited.
+TEST_F(StartupInformationTest, InheritStdOut) {
+  base::win::StartupInformation startup_info;
+
+  HANDLE section = ::CreateFileMappingW(INVALID_HANDLE_VALUE, NULL,
+                                        PAGE_READWRITE, 0, kSectionSize,
+                                        kSectionName);
+  ASSERT_TRUE(section);
+
+  HANDLE* events = reinterpret_cast<HANDLE*>(::MapViewOfFile(section,
+      FILE_MAP_READ | FILE_MAP_WRITE, 0, 0, kSectionSize));
+
+  // Make two inheritable events.
+  SECURITY_ATTRIBUTES security_attributes = { sizeof(security_attributes),
+                                              NULL, true };
+  events[0] = ::CreateEvent(&security_attributes, false, false, NULL);
+  ASSERT_TRUE(events[0]);
+  events[1] = ::CreateEvent(&security_attributes, false, false, NULL);
+  ASSERT_TRUE(events[1]);
+
+  ASSERT_TRUE(startup_info.InitializeProcThreadAttributeList(1));
+  ASSERT_TRUE(startup_info.UpdateProcThreadAttribute(
+      PROC_THREAD_ATTRIBUTE_HANDLE_LIST, &events[0],
+      sizeof(events[0])));
+
+  std::wstring cmd_line =
+      MakeCmdLine("FireInheritedEvents").GetCommandLineString();
+
+  PROCESS_INFORMATION temp_process_info = {};
+  ASSERT_TRUE(::CreateProcess(NULL, &cmd_line[0],
+                              NULL, NULL, true, EXTENDED_STARTUPINFO_PRESENT,
+                              NULL, NULL, startup_info.startup_info(),
+                              &temp_process_info)) << ::GetLastError();
+  base::win::ScopedProcessInformation process_info(temp_process_info);
+
+  // Only the first event should be signalled
+  EXPECT_EQ(WAIT_OBJECT_0, ::WaitForMultipleObjects(2, events, false,
+                                                    4000));
+}
+
diff --git a/base/win/typed_event_handler.h b/base/win/typed_event_handler.h
new file mode 100644
index 0000000..fd62782
--- /dev/null
+++ b/base/win/typed_event_handler.h
@@ -0,0 +1,49 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_WIN_TYPED_EVENT_HANDLER_H_
+#define BASE_WIN_TYPED_EVENT_HANDLER_H_
+
+#include <windows.foundation.collections.h>
+#include <wrl/implements.h>
+
+#include <utility>
+
+#include "base/callback.h"
+
+namespace base {
+namespace win {
+
+// This file provides an implementation of Windows::Foundation's
+// ITypedEventHandler. It serves as a thin wrapper around a RepeatingCallback,
+// that forwards the arguments to its |Invoke| method to the callback's |Run|
+// method.
+template <typename SenderT, typename ArgsT>
+class TypedEventHandler
+    : public Microsoft::WRL::RuntimeClass<
+          Microsoft::WRL::RuntimeClassFlags<Microsoft::WRL::ClassicCom>,
+          ABI::Windows::Foundation::ITypedEventHandler<SenderT, ArgsT>> {
+ public:
+  using SenderAbiT =
+      typename ABI::Windows::Foundation::Internal::GetAbiType<SenderT>::type;
+  using ArgsAbiT =
+      typename ABI::Windows::Foundation::Internal::GetAbiType<ArgsT>::type;
+
+  using Handler = base::RepeatingCallback<HRESULT(SenderAbiT, ArgsAbiT)>;
+
+  explicit TypedEventHandler(Handler handler) : handler_(std::move(handler)) {}
+
+  // ABI::Windows::Foundation::ITypedEventHandler:
+  IFACEMETHODIMP Invoke(SenderAbiT sender, ArgsAbiT args) override {
+    return handler_.Run(std::move(sender), std::move(args));
+  }
+
+ private:
+  Handler handler_;
+};
+
+}  // namespace win
+}  // namespace base
+
+#endif  // BASE_WIN_TYPED_EVENT_HANDLER_H_
diff --git a/base/win/typed_event_handler_unittest.cc b/base/win/typed_event_handler_unittest.cc
new file mode 100644
index 0000000..76dba80
--- /dev/null
+++ b/base/win/typed_event_handler_unittest.cc
@@ -0,0 +1,49 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/win/typed_event_handler.h"
+
+#include <windows.foundation.h>
+
+#include "base/test/bind_test_util.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+namespace win {
+
+TEST(TypedEventHandlerTest, InvokeSuccess) {
+  bool called_callback = false;
+  TypedEventHandler<IInspectable*, IInspectable*> handler(
+      base::BindLambdaForTesting([&](IInspectable* sender, IInspectable* args) {
+        EXPECT_EQ(reinterpret_cast<IInspectable*>(0x01), sender);
+        EXPECT_EQ(reinterpret_cast<IInspectable*>(0x02), args);
+        called_callback = true;
+        return S_OK;
+      }));
+
+  EXPECT_FALSE(called_callback);
+  HRESULT hr = handler.Invoke(reinterpret_cast<IInspectable*>(0x01),
+                              reinterpret_cast<IInspectable*>(0x02));
+  EXPECT_TRUE(called_callback);
+  EXPECT_EQ(S_OK, hr);
+}
+
+TEST(TypedEventHandlerTest, InvokeFail) {
+  bool called_callback = false;
+  TypedEventHandler<IInspectable*, IInspectable*> handler(
+      base::BindLambdaForTesting([&](IInspectable* sender, IInspectable* args) {
+        EXPECT_EQ(nullptr, sender);
+        EXPECT_EQ(nullptr, args);
+        called_callback = true;
+        return E_FAIL;
+      }));
+
+  EXPECT_FALSE(called_callback);
+  HRESULT hr = handler.Invoke(nullptr, nullptr);
+  EXPECT_TRUE(called_callback);
+  EXPECT_EQ(E_FAIL, hr);
+}
+
+}  // namespace win
+}  // namespace base
diff --git a/base/win/wait_chain.cc b/base/win/wait_chain.cc
new file mode 100644
index 0000000..a18a1ea
--- /dev/null
+++ b/base/win/wait_chain.cc
@@ -0,0 +1,150 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/win/wait_chain.h"
+
+#include <memory>
+
+#include "base/logging.h"
+#include "base/strings/stringprintf.h"
+
+namespace base {
+namespace win {
+
+namespace {
+
+// Helper deleter to hold a HWCT into a unique_ptr.
+struct WaitChainSessionDeleter {
+  using pointer = HWCT;
+  void operator()(HWCT session_handle) const {
+    ::CloseThreadWaitChainSession(session_handle);
+  }
+};
+
+using ScopedWaitChainSessionHandle =
+    std::unique_ptr<HWCT, WaitChainSessionDeleter>;
+
+const wchar_t* WctObjectTypeToString(WCT_OBJECT_TYPE type) {
+  switch (type) {
+    case WctCriticalSectionType:
+      return L"CriticalSection";
+    case WctSendMessageType:
+      return L"SendMessage";
+    case WctMutexType:
+      return L"Mutex";
+    case WctAlpcType:
+      return L"Alpc";
+    case WctComType:
+      return L"Com";
+    case WctThreadWaitType:
+      return L"ThreadWait";
+    case WctProcessWaitType:
+      return L"ProcessWait";
+    case WctThreadType:
+      return L"Thread";
+    case WctComActivationType:
+      return L"ComActivation";
+    case WctUnknownType:
+      return L"Unknown";
+    case WctSocketIoType:
+      return L"SocketIo";
+    case WctSmbIoType:
+      return L"SmbIo";
+    case WctMaxType:
+      break;
+  }
+  NOTREACHED();
+  return L"";
+}
+
+const wchar_t* WctObjectStatusToString(WCT_OBJECT_STATUS status) {
+  switch (status) {
+    case WctStatusNoAccess:
+      return L"NoAccess";
+    case WctStatusRunning:
+      return L"Running";
+    case WctStatusBlocked:
+      return L"Blocked";
+    case WctStatusPidOnly:
+      return L"PidOnly";
+    case WctStatusPidOnlyRpcss:
+      return L"PidOnlyRpcss";
+    case WctStatusOwned:
+      return L"Owned";
+    case WctStatusNotOwned:
+      return L"NotOwned";
+    case WctStatusAbandoned:
+      return L"Abandoned";
+    case WctStatusUnknown:
+      return L"Unknown";
+    case WctStatusError:
+      return L"Error";
+    case WctStatusMax:
+      break;
+  }
+  NOTREACHED();
+  return L"";
+}
+
+}  // namespace
+
+bool GetThreadWaitChain(DWORD thread_id,
+                        WaitChainNodeVector* wait_chain,
+                        bool* is_deadlock,
+                        base::string16* failure_reason,
+                        DWORD* last_error) {
+  DCHECK(wait_chain);
+  DCHECK(is_deadlock);
+
+  constexpr wchar_t kWaitChainSessionFailureReason[] =
+      L"OpenThreadWaitChainSession() failed.";
+  constexpr wchar_t kGetWaitChainFailureReason[] =
+      L"GetThreadWaitChain() failed.";
+
+  // Open a synchronous session.
+  ScopedWaitChainSessionHandle session_handle(
+      ::OpenThreadWaitChainSession(0, nullptr));
+  if (!session_handle) {
+    if (last_error)
+      *last_error = ::GetLastError();
+    if (failure_reason)
+      *failure_reason = kWaitChainSessionFailureReason;
+    DPLOG(ERROR) << kWaitChainSessionFailureReason;
+    return false;
+  }
+
+  DWORD nb_nodes = WCT_MAX_NODE_COUNT;
+  wait_chain->resize(nb_nodes);
+  BOOL is_cycle;
+  if (!::GetThreadWaitChain(session_handle.get(), NULL, 0, thread_id, &nb_nodes,
+                            wait_chain->data(), &is_cycle)) {
+    if (last_error)
+      *last_error = ::GetLastError();
+    if (failure_reason)
+      *failure_reason = kGetWaitChainFailureReason;
+    DPLOG(ERROR) << kGetWaitChainFailureReason;
+    return false;
+  }
+
+  *is_deadlock = is_cycle ? true : false;
+  wait_chain->resize(nb_nodes);
+
+  return true;
+}
+
+base::string16 WaitChainNodeToString(const WAITCHAIN_NODE_INFO& node) {
+  if (node.ObjectType == WctThreadType) {
+    return base::StringPrintf(L"Thread %d in process %d with status %ls",
+                              node.ThreadObject.ThreadId,
+                              node.ThreadObject.ProcessId,
+                              WctObjectStatusToString(node.ObjectStatus));
+  } else {
+    return base::StringPrintf(L"Lock of type %ls with status %ls",
+                              WctObjectTypeToString(node.ObjectType),
+                              WctObjectStatusToString(node.ObjectStatus));
+  }
+}
+
+}  // namespace win
+}  // namespace base
diff --git a/base/win/wait_chain.h b/base/win/wait_chain.h
new file mode 100644
index 0000000..b2e19de
--- /dev/null
+++ b/base/win/wait_chain.h
@@ -0,0 +1,44 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_WIN_WAIT_CHAIN_H_
+#define BASE_WIN_WAIT_CHAIN_H_
+
+#include <windows.h>
+#include <wct.h>
+
+#include <vector>
+
+#include "base/strings/string16.h"
+
+namespace base {
+namespace win {
+
+using WaitChainNodeVector = std::vector<WAITCHAIN_NODE_INFO>;
+
+// Gets the wait chain for |thread_id|. Also specifies if the |wait_chain|
+// contains a deadlock. Returns true on success.
+//
+// From MSDN: A wait chain is an alternating sequence of threads and
+// synchronization objects; each thread waits for the object that follows it,
+// which is owned by the subsequent thread in the chain.
+//
+// On error, |failure_reason| and/or |last_error| will contain the details of
+// the failure, given that they are not null.
+// TODO(pmonette): Remove |failure_reason| and |last_error| when UMA is
+// supported in the watcher process and pre-rendez-vous.
+BASE_EXPORT bool GetThreadWaitChain(DWORD thread_id,
+                                    WaitChainNodeVector* wait_chain,
+                                    bool* is_deadlock,
+                                    base::string16* failure_reason,
+                                    DWORD* last_error);
+
+// Returns a string that represents the node for a wait chain.
+BASE_EXPORT base::string16 WaitChainNodeToString(
+    const WAITCHAIN_NODE_INFO& node);
+
+}  // namespace win
+}  // namespace base
+
+#endif  // BASE_WIN_WAIT_CHAIN_H_
diff --git a/base/win/wait_chain_unittest.cc b/base/win/wait_chain_unittest.cc
new file mode 100644
index 0000000..74a0ce5
--- /dev/null
+++ b/base/win/wait_chain_unittest.cc
@@ -0,0 +1,317 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/win/wait_chain.h"
+
+#include <memory>
+#include <string>
+
+#include "base/bind.h"
+#include "base/callback.h"
+#include "base/command_line.h"
+#include "base/strings/string_number_conversions.h"
+#include "base/strings/string_piece.h"
+#include "base/test/multiprocess_test.h"
+#include "base/threading/simple_thread.h"
+#include "base/win/win_util.h"
+#include "testing/gtest/include/gtest/gtest.h"
+#include "testing/multiprocess_func_list.h"
+
+namespace base {
+namespace win {
+
+namespace {
+
+// Appends |handle| as a command line switch.
+void AppendSwitchHandle(CommandLine* command_line,
+                        StringPiece switch_name,
+                        HANDLE handle) {
+  command_line->AppendSwitchASCII(switch_name.as_string(),
+                                  UintToString(HandleToUint32(handle)));
+}
+
+// Retrieves the |handle| associated to |switch_name| from the command line.
+ScopedHandle GetSwitchValueHandle(CommandLine* command_line,
+                                  StringPiece switch_name) {
+  std::string switch_string =
+      command_line->GetSwitchValueASCII(switch_name.as_string());
+  unsigned int switch_uint = 0;
+  if (switch_string.empty() || !StringToUint(switch_string, &switch_uint)) {
+    DLOG(ERROR) << "Missing or invalid " << switch_name << " argument.";
+    return ScopedHandle();
+  }
+  return ScopedHandle(reinterpret_cast<HANDLE>(switch_uint));
+}
+
+// Helper function to create a mutex.
+ScopedHandle CreateMutex(bool inheritable) {
+  SECURITY_ATTRIBUTES security_attributes = {sizeof(SECURITY_ATTRIBUTES),
+                                             nullptr, inheritable};
+  return ScopedHandle(::CreateMutex(&security_attributes, FALSE, NULL));
+}
+
+// Helper function to create an event.
+ScopedHandle CreateEvent(bool inheritable) {
+  SECURITY_ATTRIBUTES security_attributes = {sizeof(SECURITY_ATTRIBUTES),
+                                             nullptr, inheritable};
+  return ScopedHandle(
+      ::CreateEvent(&security_attributes, FALSE, FALSE, nullptr));
+}
+
+// Helper thread class that runs the callback then stops.
+class SingleTaskThread : public SimpleThread {
+ public:
+  explicit SingleTaskThread(const Closure& task)
+      : SimpleThread("WaitChainTest SingleTaskThread"), task_(task) {}
+
+  void Run() override { task_.Run(); }
+
+ private:
+  Closure task_;
+
+  DISALLOW_COPY_AND_ASSIGN(SingleTaskThread);
+};
+
+// Helper thread to cause a deadlock by acquiring 2 mutexes in a given order.
+class DeadlockThread : public SimpleThread {
+ public:
+  DeadlockThread(HANDLE mutex_1, HANDLE mutex_2)
+      : SimpleThread("WaitChainTest DeadlockThread"),
+        wait_event_(CreateEvent(false)),
+        mutex_acquired_event_(CreateEvent(false)),
+        mutex_1_(mutex_1),
+        mutex_2_(mutex_2) {}
+
+  void Run() override {
+    // Acquire the mutex then signal the main thread.
+    EXPECT_EQ(WAIT_OBJECT_0, ::WaitForSingleObject(mutex_1_, INFINITE));
+    EXPECT_TRUE(::SetEvent(mutex_acquired_event_.Get()));
+
+    // Wait until both threads are holding their mutex before trying to acquire
+    // the other one.
+    EXPECT_EQ(WAIT_OBJECT_0,
+              ::WaitForSingleObject(wait_event_.Get(), INFINITE));
+
+    // To unblock the deadlock, one of the threads will get terminated (via
+    // TerminateThread()) without releasing the mutex. This causes the other
+    // thread to wake up with WAIT_ABANDONED.
+    EXPECT_EQ(WAIT_ABANDONED, ::WaitForSingleObject(mutex_2_, INFINITE));
+  }
+
+  // Blocks until a mutex is acquired.
+  void WaitForMutexAcquired() {
+    EXPECT_EQ(WAIT_OBJECT_0,
+              ::WaitForSingleObject(mutex_acquired_event_.Get(), INFINITE));
+  }
+
+  // Signal the thread to acquire the second mutex.
+  void SignalToAcquireMutex() { EXPECT_TRUE(::SetEvent(wait_event_.Get())); }
+
+  // Terminates the thread.
+  bool Terminate() {
+    ScopedHandle thread_handle(::OpenThread(THREAD_TERMINATE, FALSE, tid()));
+    return ::TerminateThread(thread_handle.Get(), 0);
+  }
+
+ private:
+  ScopedHandle wait_event_;
+  ScopedHandle mutex_acquired_event_;
+
+  // The 2 mutex to acquire.
+  HANDLE mutex_1_;
+  HANDLE mutex_2_;
+
+  DISALLOW_COPY_AND_ASSIGN(DeadlockThread);
+};
+
+// Creates a thread that joins |thread_to_join| and then terminates when it
+// finishes execution.
+std::unique_ptr<SingleTaskThread> CreateJoiningThread(
+    SimpleThread* thread_to_join) {
+  std::unique_ptr<SingleTaskThread> thread(new SingleTaskThread(
+      Bind(&SimpleThread::Join, Unretained(thread_to_join))));
+  thread->Start();
+
+  return thread;
+}
+
+// Creates a thread that calls WaitForSingleObject() on the handle and then
+// terminates when it unblocks.
+std::unique_ptr<SingleTaskThread> CreateWaitingThread(HANDLE handle) {
+  std::unique_ptr<SingleTaskThread> thread(new SingleTaskThread(
+      Bind(IgnoreResult(&::WaitForSingleObject), handle, INFINITE)));
+  thread->Start();
+
+  return thread;
+}
+
+// Creates a thread that blocks on |mutex_2| after acquiring |mutex_1|.
+std::unique_ptr<DeadlockThread> CreateDeadlockThread(HANDLE mutex_1,
+                                                     HANDLE mutex_2) {
+  std::unique_ptr<DeadlockThread> thread(new DeadlockThread(mutex_1, mutex_2));
+  thread->Start();
+
+  // Wait until the first mutex is acquired before returning.
+  thread->WaitForMutexAcquired();
+
+  return thread;
+}
+
+// Child process to test the cross-process capability of the WCT api.
+// This process will simulate a hang while holding a mutex that the parent
+// process is waiting on.
+MULTIPROCESS_TEST_MAIN(WaitChainTestProc) {
+  CommandLine* command_line = CommandLine::ForCurrentProcess();
+
+  ScopedHandle mutex = GetSwitchValueHandle(command_line, "mutex");
+  CHECK(mutex.IsValid());
+
+  ScopedHandle sync_event(GetSwitchValueHandle(command_line, "sync_event"));
+  CHECK(sync_event.IsValid());
+
+  // Acquire mutex.
+  CHECK(::WaitForSingleObject(mutex.Get(), INFINITE) == WAIT_OBJECT_0);
+
+  // Signal back to the parent process that the mutex is hold.
+  CHECK(::SetEvent(sync_event.Get()));
+
+  // Wait on a signal from the parent process before terminating.
+  CHECK(::WaitForSingleObject(sync_event.Get(), INFINITE) == WAIT_OBJECT_0);
+
+  return 0;
+}
+
+// Start a child process and passes the |mutex| and the |sync_event| to the
+// command line.
+Process StartChildProcess(HANDLE mutex, HANDLE sync_event) {
+  CommandLine command_line = GetMultiProcessTestChildBaseCommandLine();
+
+  AppendSwitchHandle(&command_line, "mutex", mutex);
+  AppendSwitchHandle(&command_line, "sync_event", sync_event);
+
+  LaunchOptions options;
+  options.handles_to_inherit.push_back(mutex);
+  options.handles_to_inherit.push_back(sync_event);
+  return SpawnMultiProcessTestChild("WaitChainTestProc", command_line, options);
+}
+
+// Returns true if the |wait_chain| is an alternating sequence of thread objects
+// and synchronization objects.
+bool WaitChainStructureIsCorrect(const WaitChainNodeVector& wait_chain) {
+  // Checks thread objects.
+  for (size_t i = 0; i < wait_chain.size(); i += 2) {
+    if (wait_chain[i].ObjectType != WctThreadType)
+      return false;
+  }
+
+  // Check synchronization objects.
+  for (size_t i = 1; i < wait_chain.size(); i += 2) {
+    if (wait_chain[i].ObjectType == WctThreadType)
+      return false;
+  }
+  return true;
+}
+
+// Returns true if the |wait_chain| goes through more than 1 process.
+bool WaitChainIsCrossProcess(const WaitChainNodeVector& wait_chain) {
+  if (wait_chain.size() == 0)
+    return false;
+
+  // Just check that the process id changes somewhere in the chain.
+  // Note: ThreadObjects are every 2 nodes.
+  DWORD first_process = wait_chain[0].ThreadObject.ProcessId;
+  for (size_t i = 2; i < wait_chain.size(); i += 2) {
+    if (first_process != wait_chain[i].ThreadObject.ProcessId)
+      return true;
+  }
+  return false;
+}
+
+}  // namespace
+
+// Creates 2 threads that acquire their designated mutex and then try to
+// acquire each others' mutex to cause a deadlock.
+TEST(WaitChainTest, Deadlock) {
+  // 2 mutexes are needed to get a deadlock.
+  ScopedHandle mutex_1 = CreateMutex(false);
+  ASSERT_TRUE(mutex_1.IsValid());
+  ScopedHandle mutex_2 = CreateMutex(false);
+  ASSERT_TRUE(mutex_2.IsValid());
+
+  std::unique_ptr<DeadlockThread> deadlock_thread_1 =
+      CreateDeadlockThread(mutex_1.Get(), mutex_2.Get());
+  std::unique_ptr<DeadlockThread> deadlock_thread_2 =
+      CreateDeadlockThread(mutex_2.Get(), mutex_1.Get());
+
+  // Signal the threads to try to acquire the other mutex.
+  deadlock_thread_1->SignalToAcquireMutex();
+  deadlock_thread_2->SignalToAcquireMutex();
+  // Sleep to make sure the 2 threads got a chance to execute.
+  Sleep(10);
+
+  // Create a few waiting threads to get a longer wait chain.
+  std::unique_ptr<SingleTaskThread> waiting_thread_1 =
+      CreateJoiningThread(deadlock_thread_1.get());
+  std::unique_ptr<SingleTaskThread> waiting_thread_2 =
+      CreateJoiningThread(waiting_thread_1.get());
+
+  WaitChainNodeVector wait_chain;
+  bool is_deadlock;
+  ASSERT_TRUE(GetThreadWaitChain(waiting_thread_2->tid(), &wait_chain,
+                                 &is_deadlock, nullptr, nullptr));
+
+  EXPECT_EQ(9U, wait_chain.size());
+  EXPECT_TRUE(is_deadlock);
+  EXPECT_TRUE(WaitChainStructureIsCorrect(wait_chain));
+  EXPECT_FALSE(WaitChainIsCrossProcess(wait_chain));
+
+  ASSERT_TRUE(deadlock_thread_1->Terminate());
+
+  // The SimpleThread API expect Join() to be called before destruction.
+  deadlock_thread_2->Join();
+  waiting_thread_2->Join();
+}
+
+// Creates a child process that acquires a mutex and then blocks. A chain of
+// threads then blocks on that mutex.
+TEST(WaitChainTest, CrossProcess) {
+  ScopedHandle mutex = CreateMutex(true);
+  ASSERT_TRUE(mutex.IsValid());
+  ScopedHandle sync_event = CreateEvent(true);
+  ASSERT_TRUE(sync_event.IsValid());
+
+  Process child_process = StartChildProcess(mutex.Get(), sync_event.Get());
+  ASSERT_TRUE(child_process.IsValid());
+
+  // Wait for the child process to signal when it's holding the mutex.
+  EXPECT_EQ(WAIT_OBJECT_0, ::WaitForSingleObject(sync_event.Get(), INFINITE));
+
+  // Create a few waiting threads to get a longer wait chain.
+  std::unique_ptr<SingleTaskThread> waiting_thread_1 =
+      CreateWaitingThread(mutex.Get());
+  std::unique_ptr<SingleTaskThread> waiting_thread_2 =
+      CreateJoiningThread(waiting_thread_1.get());
+  std::unique_ptr<SingleTaskThread> waiting_thread_3 =
+      CreateJoiningThread(waiting_thread_2.get());
+
+  WaitChainNodeVector wait_chain;
+  bool is_deadlock;
+  ASSERT_TRUE(GetThreadWaitChain(waiting_thread_3->tid(), &wait_chain,
+                                 &is_deadlock, nullptr, nullptr));
+
+  EXPECT_EQ(7U, wait_chain.size());
+  EXPECT_FALSE(is_deadlock);
+  EXPECT_TRUE(WaitChainStructureIsCorrect(wait_chain));
+  EXPECT_TRUE(WaitChainIsCrossProcess(wait_chain));
+
+  // Unblock child process and wait for it to terminate.
+  ASSERT_TRUE(::SetEvent(sync_event.Get()));
+  ASSERT_TRUE(child_process.WaitForExit(nullptr));
+
+  // The SimpleThread API expect Join() to be called before destruction.
+  waiting_thread_3->Join();
+}
+
+}  // namespace win
+}  // namespace base
diff --git a/base/win/win_client_metrics.h b/base/win/win_client_metrics.h
new file mode 100644
index 0000000..102148f
--- /dev/null
+++ b/base/win/win_client_metrics.h
@@ -0,0 +1,41 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file is separate from base/win/win_util.h to avoid pulling windows.h
+// into too many translation units.
+
+#ifndef BASE_WIN_WIN_CLIENT_METRICS_H_
+#define BASE_WIN_WIN_CLIENT_METRICS_H_
+
+#include <windows.h>
+
+// This is the same as NONCLIENTMETRICS except that the
+// unused member |iPaddedBorderWidth| has been removed.
+struct NONCLIENTMETRICS_XP {
+  UINT cbSize;
+  int iBorderWidth;
+  int iScrollWidth;
+  int iScrollHeight;
+  int iCaptionWidth;
+  int iCaptionHeight;
+  LOGFONTW lfCaptionFont;
+  int iSmCaptionWidth;
+  int iSmCaptionHeight;
+  LOGFONTW lfSmCaptionFont;
+  int iMenuWidth;
+  int iMenuHeight;
+  LOGFONTW lfMenuFont;
+  LOGFONTW lfStatusFont;
+  LOGFONTW lfMessageFont;
+};
+
+namespace base {
+namespace win {
+
+BASE_EXPORT void GetNonClientMetrics(NONCLIENTMETRICS_XP* metrics);
+
+}  // namespace win
+}  // namespace base
+
+#endif  // BASE_WIN_WIN_CLIENT_METRICS_H_
diff --git a/base/win/win_includes_unittest.cc b/base/win/win_includes_unittest.cc
new file mode 100644
index 0000000..20c6cbc
--- /dev/null
+++ b/base/win/win_includes_unittest.cc
@@ -0,0 +1,33 @@
+// Copyright (c) 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file ensures that these header files don't include Windows.h and can
+// compile without including Windows.h. This helps to improve compile times.
+
+#include "base/atomicops.h"
+#include "base/files/file_util.h"
+#include "base/files/platform_file.h"
+#include "base/process/process_handle.h"
+#include "base/synchronization/condition_variable.h"
+#include "base/synchronization/lock.h"
+#include "base/threading/platform_thread.h"
+#include "base/threading/thread_local_storage.h"
+#include "base/win/registry.h"
+#include "base/win/scoped_handle.h"
+#include "base/win/win_util.h"
+
+#ifdef _WINDOWS_
+#error Windows.h was included inappropriately.
+#endif
+
+// Make sure windows.h can be included after windows_types.h
+#include "base/win/windows_types.h"
+
+#include <windows.h>
+
+// Check that type sizes match.
+static_assert(sizeof(CHROME_CONDITION_VARIABLE) == sizeof(CONDITION_VARIABLE),
+              "Definition mismatch.");
+static_assert(sizeof(CHROME_SRWLOCK) == sizeof(SRWLOCK),
+              "Definition mismatch.");
diff --git a/base/win/win_util.cc b/base/win/win_util.cc
new file mode 100644
index 0000000..01b7544
--- /dev/null
+++ b/base/win/win_util.cc
@@ -0,0 +1,709 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/win/win_util.h"
+
+#include <aclapi.h>
+#include <cfgmgr32.h>
+#include <initguid.h>
+#include <powrprof.h>
+#include <shobjidl.h>  // Must be before propkey.
+
+#include <inspectable.h>
+#include <mdmregistration.h>
+#include <objbase.h>
+#include <propkey.h>
+#include <propvarutil.h>
+#include <psapi.h>
+#include <roapi.h>
+#include <sddl.h>
+#include <setupapi.h>
+#include <shellscalingapi.h>
+#include <shlwapi.h>
+#include <signal.h>
+#include <stddef.h>
+#include <stdlib.h>
+#include <tchar.h>  // Must be before tpcshrd.h or for any use of _T macro
+#include <tpcshrd.h>
+#include <uiviewsettingsinterop.h>
+#include <windows.ui.viewmanagement.h>
+#include <winstring.h>
+#include <wrl/client.h>
+#include <wrl/wrappers/corewrappers.h>
+
+#include <memory>
+
+#include "base/base_switches.h"
+#include "base/command_line.h"
+#include "base/files/file_path.h"
+#include "base/logging.h"
+#include "base/macros.h"
+#include "base/scoped_native_library.h"
+#include "base/strings/string_util.h"
+#include "base/strings/stringprintf.h"
+#include "base/strings/utf_string_conversions.h"
+#include "base/threading/thread_restrictions.h"
+#include "base/win/core_winrt_util.h"
+#include "base/win/registry.h"
+#include "base/win/scoped_co_mem.h"
+#include "base/win/scoped_handle.h"
+#include "base/win/scoped_hstring.h"
+#include "base/win/scoped_propvariant.h"
+#include "base/win/win_client_metrics.h"
+#include "base/win/windows_version.h"
+
+namespace base {
+namespace win {
+
+namespace {
+
+// Sets the value of |property_key| to |property_value| in |property_store|.
+bool SetPropVariantValueForPropertyStore(
+    IPropertyStore* property_store,
+    const PROPERTYKEY& property_key,
+    const ScopedPropVariant& property_value) {
+  DCHECK(property_store);
+
+  HRESULT result = property_store->SetValue(property_key, property_value.get());
+  if (result == S_OK)
+    result = property_store->Commit();
+  if (SUCCEEDED(result))
+    return true;
+#if DCHECK_IS_ON()
+  ScopedCoMem<OLECHAR> guidString;
+  ::StringFromCLSID(property_key.fmtid, &guidString);
+  if (HRESULT_FACILITY(result) == FACILITY_WIN32)
+    ::SetLastError(HRESULT_CODE(result));
+  // See third_party/perl/c/i686-w64-mingw32/include/propkey.h for GUID and
+  // PID definitions.
+  DPLOG(ERROR) << "Failed to set property with GUID " << guidString << " PID "
+               << property_key.pid;
+#endif
+  return false;
+}
+
+void __cdecl ForceCrashOnSigAbort(int) {
+  *((volatile int*)0) = 0x1337;
+}
+
+// Returns the current platform role. We use the PowerDeterminePlatformRoleEx
+// API for that.
+POWER_PLATFORM_ROLE GetPlatformRole() {
+  return PowerDeterminePlatformRoleEx(POWER_PLATFORM_ROLE_V2);
+}
+
+// Method used for Windows 8.1 and later.
+// Since we support versions earlier than 8.1, we must dynamically load this
+// function from user32.dll, so it won't fail to load in runtime. For earlier
+// Windows versions GetProcAddress will return null and report failure so that
+// callers can fall back on the deprecated SetProcessDPIAware.
+bool SetProcessDpiAwarenessWrapper(PROCESS_DPI_AWARENESS value) {
+  decltype(&::SetProcessDpiAwareness) set_process_dpi_awareness_func =
+      reinterpret_cast<decltype(&::SetProcessDpiAwareness)>(GetProcAddress(
+          GetModuleHandle(L"user32.dll"), "SetProcessDpiAwarenessInternal"));
+  if (set_process_dpi_awareness_func) {
+    HRESULT hr = set_process_dpi_awareness_func(value);
+    if (SUCCEEDED(hr))
+      return true;
+    DLOG_IF(ERROR, hr == E_ACCESSDENIED)
+        << "Access denied error from SetProcessDpiAwarenessInternal. Function "
+           "called twice, or manifest was used.";
+    NOTREACHED()
+        << "SetProcessDpiAwarenessInternal failed with unexpected error: "
+        << hr;
+    return false;
+  }
+
+  DCHECK_LT(GetVersion(), VERSION_WIN8_1) << "SetProcessDpiAwarenessInternal "
+                                             "should be available on all "
+                                             "platforms >= Windows 8.1";
+  return false;
+}
+
+}  // namespace
+
+// Uses the Windows 10 WRL API's to query the current system state. The API's
+// we are using in the function below are supported in Win32 apps as per msdn.
+// It looks like the API implementation is buggy at least on Surface 4 causing
+// it to always return UserInteractionMode_Touch which as per documentation
+// indicates tablet mode.
+bool IsWindows10TabletMode(HWND hwnd) {
+  if (GetVersion() < VERSION_WIN10)
+    return false;
+
+  if (!ResolveCoreWinRTDelayload() ||
+      !ScopedHString::ResolveCoreWinRTStringDelayload()) {
+    return false;
+  }
+
+  ScopedHString view_settings_guid = ScopedHString::Create(
+      RuntimeClass_Windows_UI_ViewManagement_UIViewSettings);
+  Microsoft::WRL::ComPtr<IUIViewSettingsInterop> view_settings_interop;
+  HRESULT hr = base::win::RoGetActivationFactory(
+      view_settings_guid.get(), IID_PPV_ARGS(&view_settings_interop));
+  if (FAILED(hr))
+    return false;
+
+  Microsoft::WRL::ComPtr<ABI::Windows::UI::ViewManagement::IUIViewSettings>
+      view_settings;
+  hr = view_settings_interop->GetForWindow(hwnd, IID_PPV_ARGS(&view_settings));
+  if (FAILED(hr))
+    return false;
+
+  ABI::Windows::UI::ViewManagement::UserInteractionMode mode =
+      ABI::Windows::UI::ViewManagement::UserInteractionMode_Mouse;
+  view_settings->get_UserInteractionMode(&mode);
+  return mode == ABI::Windows::UI::ViewManagement::UserInteractionMode_Touch;
+}
+
+// Returns true if a physical keyboard is detected on Windows 8 and up.
+// Uses the Setup APIs to enumerate the attached keyboards and returns true
+// if the keyboard count is 1 or more.. While this will work in most cases
+// it won't work if there are devices which expose keyboard interfaces which
+// are attached to the machine.
+bool IsKeyboardPresentOnSlate(std::string* reason, HWND hwnd) {
+  bool result = false;
+
+  if (GetVersion() < VERSION_WIN8) {
+    if (reason)
+      *reason = "Detection not supported";
+    return false;
+  }
+
+  // This function is only supported for Windows 8 and up.
+  if (CommandLine::ForCurrentProcess()->HasSwitch(
+          switches::kDisableUsbKeyboardDetect)) {
+    if (reason)
+      *reason = "Detection disabled";
+    return false;
+  }
+
+  // This function should be only invoked for machines with touch screens.
+  if ((GetSystemMetrics(SM_DIGITIZER) & NID_INTEGRATED_TOUCH)
+        != NID_INTEGRATED_TOUCH) {
+    if (reason) {
+      *reason += "NID_INTEGRATED_TOUCH\n";
+      result = true;
+    } else {
+      return true;
+    }
+  }
+
+  // If it is a tablet device we assume that there is no keyboard attached.
+  if (IsTabletDevice(reason, hwnd)) {
+    if (reason)
+      *reason += "Tablet device.\n";
+    return false;
+  } else {
+    if (reason) {
+      *reason += "Not a tablet device";
+      result = true;
+    } else {
+      return true;
+    }
+  }
+
+  // To determine whether a keyboard is present on the device, we do the
+  // following:-
+  // 1. Check whether the device supports auto rotation. If it does then
+  //    it possibly supports flipping from laptop to slate mode. If it
+  //    does not support auto rotation, then we assume it is a desktop
+  //    or a normal laptop and assume that there is a keyboard.
+
+  // 2. If the device supports auto rotation, then we get its platform role
+  //    and check the system metric SM_CONVERTIBLESLATEMODE to see if it is
+  //    being used in slate mode. If yes then we return false here to ensure
+  //    that the OSK is displayed.
+
+  // 3. If step 1 and 2 fail then we check attached keyboards and return true
+  //    if we find ACPI\* or HID\VID* keyboards.
+
+  typedef BOOL (WINAPI* GetAutoRotationState)(PAR_STATE state);
+
+  GetAutoRotationState get_rotation_state =
+      reinterpret_cast<GetAutoRotationState>(::GetProcAddress(
+          GetModuleHandle(L"user32.dll"), "GetAutoRotationState"));
+
+  if (get_rotation_state) {
+    AR_STATE auto_rotation_state = AR_ENABLED;
+    get_rotation_state(&auto_rotation_state);
+    if ((auto_rotation_state & AR_NOSENSOR) ||
+        (auto_rotation_state & AR_NOT_SUPPORTED)) {
+      // If there is no auto rotation sensor or rotation is not supported in
+      // the current configuration, then we can assume that this is a desktop
+      // or a traditional laptop.
+      if (reason) {
+        *reason += (auto_rotation_state & AR_NOSENSOR) ? "AR_NOSENSOR\n" :
+                                                         "AR_NOT_SUPPORTED\n";
+        result = true;
+      } else {
+        return true;
+      }
+    }
+  }
+
+  const GUID KEYBOARD_CLASS_GUID =
+      { 0x4D36E96B, 0xE325,  0x11CE,
+          { 0xBF, 0xC1, 0x08, 0x00, 0x2B, 0xE1, 0x03, 0x18 } };
+
+  // Query for all the keyboard devices.
+  HDEVINFO device_info =
+      SetupDiGetClassDevs(&KEYBOARD_CLASS_GUID, NULL, NULL, DIGCF_PRESENT);
+  if (device_info == INVALID_HANDLE_VALUE) {
+    if (reason)
+      *reason += "No keyboard info\n";
+    return result;
+  }
+
+  // Enumerate all keyboards and look for ACPI\PNP and HID\VID devices. If
+  // the count is more than 1 we assume that a keyboard is present. This is
+  // under the assumption that there will always be one keyboard device.
+  for (DWORD i = 0;; ++i) {
+    SP_DEVINFO_DATA device_info_data = { 0 };
+    device_info_data.cbSize = sizeof(device_info_data);
+    if (!SetupDiEnumDeviceInfo(device_info, i, &device_info_data))
+      break;
+
+    // Get the device ID.
+    wchar_t device_id[MAX_DEVICE_ID_LEN];
+    CONFIGRET status = CM_Get_Device_ID(device_info_data.DevInst,
+                                        device_id,
+                                        MAX_DEVICE_ID_LEN,
+                                        0);
+    if (status == CR_SUCCESS) {
+      // To reduce the scope of the hack we only look for ACPI and HID\\VID
+      // prefixes in the keyboard device ids.
+      if (StartsWith(device_id, L"ACPI", CompareCase::INSENSITIVE_ASCII) ||
+          StartsWith(device_id, L"HID\\VID", CompareCase::INSENSITIVE_ASCII)) {
+        if (reason) {
+          *reason += "device: ";
+          *reason += WideToUTF8(device_id);
+          *reason += '\n';
+        }
+        // The heuristic we are using is to check the count of keyboards and
+        // return true if the API's report one or more keyboards. Please note
+        // that this will break for non keyboard devices which expose a
+        // keyboard PDO.
+        result = true;
+      }
+    }
+  }
+  return result;
+}
+
+static bool g_crash_on_process_detach = false;
+
+void GetNonClientMetrics(NONCLIENTMETRICS_XP* metrics) {
+  DCHECK(metrics);
+  metrics->cbSize = sizeof(*metrics);
+  const bool success = !!SystemParametersInfo(
+      SPI_GETNONCLIENTMETRICS,
+      metrics->cbSize,
+      reinterpret_cast<NONCLIENTMETRICS*>(metrics),
+      0);
+  DCHECK(success);
+}
+
+bool GetUserSidString(std::wstring* user_sid) {
+  // Get the current token.
+  HANDLE token = NULL;
+  if (!::OpenProcessToken(::GetCurrentProcess(), TOKEN_QUERY, &token))
+    return false;
+  ScopedHandle token_scoped(token);
+
+  DWORD size = sizeof(TOKEN_USER) + SECURITY_MAX_SID_SIZE;
+  std::unique_ptr<BYTE[]> user_bytes(new BYTE[size]);
+  TOKEN_USER* user = reinterpret_cast<TOKEN_USER*>(user_bytes.get());
+
+  if (!::GetTokenInformation(token, TokenUser, user, size, &size))
+    return false;
+
+  if (!user->User.Sid)
+    return false;
+
+  // Convert the data to a string.
+  wchar_t* sid_string;
+  if (!::ConvertSidToStringSid(user->User.Sid, &sid_string))
+    return false;
+
+  *user_sid = sid_string;
+
+  ::LocalFree(sid_string);
+
+  return true;
+}
+
+bool UserAccountControlIsEnabled() {
+  // This can be slow if Windows ends up going to disk.  Should watch this key
+  // for changes and only read it once, preferably on the file thread.
+  //   http://code.google.com/p/chromium/issues/detail?id=61644
+  ThreadRestrictions::ScopedAllowIO allow_io;
+
+  RegKey key(HKEY_LOCAL_MACHINE,
+             L"SOFTWARE\\Microsoft\\Windows\\CurrentVersion\\Policies\\System",
+             KEY_READ);
+  DWORD uac_enabled;
+  if (key.ReadValueDW(L"EnableLUA", &uac_enabled) != ERROR_SUCCESS)
+    return true;
+  // Users can set the EnableLUA value to something arbitrary, like 2, which
+  // Vista will treat as UAC enabled, so we make sure it is not set to 0.
+  return (uac_enabled != 0);
+}
+
+bool SetBooleanValueForPropertyStore(IPropertyStore* property_store,
+                                     const PROPERTYKEY& property_key,
+                                     bool property_bool_value) {
+  ScopedPropVariant property_value;
+  if (FAILED(InitPropVariantFromBoolean(property_bool_value,
+                                        property_value.Receive()))) {
+    return false;
+  }
+
+  return SetPropVariantValueForPropertyStore(property_store,
+                                             property_key,
+                                             property_value);
+}
+
+bool SetStringValueForPropertyStore(IPropertyStore* property_store,
+                                    const PROPERTYKEY& property_key,
+                                    const wchar_t* property_string_value) {
+  ScopedPropVariant property_value;
+  if (FAILED(InitPropVariantFromString(property_string_value,
+                                       property_value.Receive()))) {
+    return false;
+  }
+
+  return SetPropVariantValueForPropertyStore(property_store,
+                                             property_key,
+                                             property_value);
+}
+
+bool SetClsidForPropertyStore(IPropertyStore* property_store,
+                              const PROPERTYKEY& property_key,
+                              const CLSID& property_clsid_value) {
+  ScopedPropVariant property_value;
+  if (FAILED(InitPropVariantFromCLSID(property_clsid_value,
+                                      property_value.Receive()))) {
+    return false;
+  }
+
+  return SetPropVariantValueForPropertyStore(property_store, property_key,
+                                             property_value);
+}
+
+bool SetAppIdForPropertyStore(IPropertyStore* property_store,
+                              const wchar_t* app_id) {
+  // App id should be less than 64 chars and contain no space. And recommended
+  // format is CompanyName.ProductName[.SubProduct.ProductNumber].
+  // See http://msdn.microsoft.com/en-us/library/dd378459%28VS.85%29.aspx
+  DCHECK(lstrlen(app_id) < 64 && wcschr(app_id, L' ') == NULL);
+
+  return SetStringValueForPropertyStore(property_store,
+                                        PKEY_AppUserModel_ID,
+                                        app_id);
+}
+
+static const char16 kAutoRunKeyPath[] =
+    L"Software\\Microsoft\\Windows\\CurrentVersion\\Run";
+
+bool AddCommandToAutoRun(HKEY root_key, const string16& name,
+                         const string16& command) {
+  RegKey autorun_key(root_key, kAutoRunKeyPath, KEY_SET_VALUE);
+  return (autorun_key.WriteValue(name.c_str(), command.c_str()) ==
+      ERROR_SUCCESS);
+}
+
+bool RemoveCommandFromAutoRun(HKEY root_key, const string16& name) {
+  RegKey autorun_key(root_key, kAutoRunKeyPath, KEY_SET_VALUE);
+  return (autorun_key.DeleteValue(name.c_str()) == ERROR_SUCCESS);
+}
+
+bool ReadCommandFromAutoRun(HKEY root_key,
+                            const string16& name,
+                            string16* command) {
+  RegKey autorun_key(root_key, kAutoRunKeyPath, KEY_QUERY_VALUE);
+  return (autorun_key.ReadValue(name.c_str(), command) == ERROR_SUCCESS);
+}
+
+void SetShouldCrashOnProcessDetach(bool crash) {
+  g_crash_on_process_detach = crash;
+}
+
+bool ShouldCrashOnProcessDetach() {
+  return g_crash_on_process_detach;
+}
+
+void SetAbortBehaviorForCrashReporting() {
+  // Prevent CRT's abort code from prompting a dialog or trying to "report" it.
+  // Disabling the _CALL_REPORTFAULT behavior is important since otherwise it
+  // has the sideffect of clearing our exception filter, which means we
+  // don't get any crash.
+  _set_abort_behavior(0, _WRITE_ABORT_MSG | _CALL_REPORTFAULT);
+
+  // Set a SIGABRT handler for good measure. We will crash even if the default
+  // is left in place, however this allows us to crash earlier. And it also
+  // lets us crash in response to code which might directly call raise(SIGABRT)
+  signal(SIGABRT, ForceCrashOnSigAbort);
+}
+
+bool IsTabletDevice(std::string* reason, HWND hwnd) {
+  if (GetVersion() < VERSION_WIN8) {
+    if (reason)
+      *reason = "Tablet device detection not supported below Windows 8\n";
+    return false;
+  }
+
+  if (IsWindows10TabletMode(hwnd))
+    return true;
+
+  return IsDeviceUsedAsATablet(reason);
+}
+
+// This method is used to set the right interactions media queries,
+// see https://drafts.csswg.org/mediaqueries-4/#mf-interaction. It doesn't
+// check the Windows 10 tablet mode because it doesn't reflect the actual
+// input configuration of the device and can be manually triggered by the user
+// independently from the hardware state.
+bool IsDeviceUsedAsATablet(std::string* reason) {
+  if (GetVersion() < VERSION_WIN8) {
+    if (reason)
+      *reason = "Tablet device detection not supported below Windows 8\n";
+    return false;
+  }
+
+  if (GetSystemMetrics(SM_MAXIMUMTOUCHES) == 0) {
+    if (reason) {
+      *reason += "Device does not support touch.\n";
+    } else {
+      return false;
+    }
+  }
+
+  // If the device is docked, the user is treating the device as a PC.
+  if (GetSystemMetrics(SM_SYSTEMDOCKED) != 0) {
+    if (reason) {
+      *reason += "SM_SYSTEMDOCKED\n";
+    } else {
+      return false;
+    }
+  }
+
+  // If the device is not supporting rotation, it's unlikely to be a tablet,
+  // a convertible or a detachable.
+  // See
+  // https://msdn.microsoft.com/en-us/library/windows/desktop/dn629263(v=vs.85).aspx
+  typedef decltype(GetAutoRotationState)* GetAutoRotationStateType;
+  GetAutoRotationStateType get_auto_rotation_state_func =
+      reinterpret_cast<GetAutoRotationStateType>(GetProcAddress(
+          GetModuleHandle(L"user32.dll"), "GetAutoRotationState"));
+
+  if (get_auto_rotation_state_func) {
+    AR_STATE rotation_state = AR_ENABLED;
+    if (get_auto_rotation_state_func(&rotation_state) &&
+        (rotation_state & (AR_NOT_SUPPORTED | AR_LAPTOP | AR_NOSENSOR)) != 0)
+      return false;
+  }
+
+  // PlatformRoleSlate was added in Windows 8+.
+  POWER_PLATFORM_ROLE role = GetPlatformRole();
+  bool is_tablet = false;
+  if (role == PlatformRoleMobile || role == PlatformRoleSlate) {
+    is_tablet = !GetSystemMetrics(SM_CONVERTIBLESLATEMODE);
+    if (!is_tablet) {
+      if (reason) {
+        *reason += "Not in slate mode.\n";
+      } else {
+        return false;
+      }
+    } else {
+      if (reason) {
+        *reason += (role == PlatformRoleMobile) ? "PlatformRoleMobile\n"
+                                                : "PlatformRoleSlate\n";
+      }
+    }
+  } else {
+    if (reason)
+      *reason += "Device role is not mobile or slate.\n";
+  }
+  return is_tablet;
+}
+
+enum DomainEnrollmentState {UNKNOWN = -1, NOT_ENROLLED, ENROLLED};
+static volatile long int g_domain_state = UNKNOWN;
+
+bool IsEnrolledToDomain() {
+  // Doesn't make any sense to retry inside a user session because joining a
+  // domain will only kick in on a restart.
+  if (g_domain_state == UNKNOWN) {
+    ::InterlockedCompareExchange(&g_domain_state,
+                                 IsOS(OS_DOMAINMEMBER) ?
+                                     ENROLLED : NOT_ENROLLED,
+                                 UNKNOWN);
+  }
+
+  return g_domain_state == ENROLLED;
+}
+
+bool IsDeviceRegisteredWithManagement() {
+  static bool is_device_registered_with_management = []() {
+    ScopedNativeLibrary library(
+        FilePath(FILE_PATH_LITERAL("MDMRegistration.dll")));
+    if (!library.is_valid())
+      return false;
+
+    using IsDeviceRegisteredWithManagementFunction =
+        decltype(&::IsDeviceRegisteredWithManagement);
+    IsDeviceRegisteredWithManagementFunction
+        is_device_registered_with_management_function =
+            reinterpret_cast<IsDeviceRegisteredWithManagementFunction>(
+                library.GetFunctionPointer("IsDeviceRegisteredWithManagement"));
+    if (!is_device_registered_with_management_function)
+      return false;
+
+    BOOL is_managed = false;
+    HRESULT hr =
+        is_device_registered_with_management_function(&is_managed, 0, nullptr);
+    return SUCCEEDED(hr) && is_managed;
+  }();
+  return is_device_registered_with_management;
+}
+
+bool IsEnterpriseManaged() {
+  // TODO(rogerta): this function should really be:
+  //
+  //    return IsEnrolledToDomain() || IsDeviceRegisteredWithManagement();
+  //
+  // However, for now it is decided to collect some UMA metrics about
+  // IsDeviceRegisteredWithMdm() before changing chrome's behavior.
+  return IsEnrolledToDomain();
+}
+
+void SetDomainStateForTesting(bool state) {
+  g_domain_state = state ? ENROLLED : NOT_ENROLLED;
+}
+
+bool IsUser32AndGdi32Available() {
+  static auto is_user32_and_gdi32_available = []() {
+    // If win32k syscalls aren't disabled, then user32 and gdi32 are available.
+
+    // Can't disable win32k prior to windows 8.
+    if (GetVersion() < VERSION_WIN8)
+      return true;
+
+    typedef decltype(
+        GetProcessMitigationPolicy)* GetProcessMitigationPolicyType;
+    GetProcessMitigationPolicyType get_process_mitigation_policy_func =
+        reinterpret_cast<GetProcessMitigationPolicyType>(GetProcAddress(
+            GetModuleHandle(L"kernel32.dll"), "GetProcessMitigationPolicy"));
+
+    if (!get_process_mitigation_policy_func)
+      return true;
+
+    PROCESS_MITIGATION_SYSTEM_CALL_DISABLE_POLICY policy = {};
+    if (get_process_mitigation_policy_func(GetCurrentProcess(),
+                                           ProcessSystemCallDisablePolicy,
+                                           &policy, sizeof(policy))) {
+      return policy.DisallowWin32kSystemCalls == 0;
+    }
+
+    return true;
+  }();
+  return is_user32_and_gdi32_available;
+}
+
+bool GetLoadedModulesSnapshot(HANDLE process, std::vector<HMODULE>* snapshot) {
+  DCHECK(snapshot);
+  DCHECK_EQ(0u, snapshot->size());
+  snapshot->resize(128);
+
+  // We will retry at least once after first determining |bytes_required|. If
+  // the list of modules changes after we receive |bytes_required| we may retry
+  // more than once.
+  int retries_remaining = 5;
+  do {
+    DWORD bytes_required = 0;
+    // EnumProcessModules returns 'success' even if the buffer size is too
+    // small.
+    DCHECK_GE(std::numeric_limits<DWORD>::max(),
+              snapshot->size() * sizeof(HMODULE));
+    if (!::EnumProcessModules(
+            process, &(*snapshot)[0],
+            static_cast<DWORD>(snapshot->size() * sizeof(HMODULE)),
+            &bytes_required)) {
+      DPLOG(ERROR) << "::EnumProcessModules failed.";
+      return false;
+    }
+    DCHECK_EQ(0u, bytes_required % sizeof(HMODULE));
+    size_t num_modules = bytes_required / sizeof(HMODULE);
+    if (num_modules <= snapshot->size()) {
+      // Buffer size was too big, presumably because a module was unloaded.
+      snapshot->erase(snapshot->begin() + num_modules, snapshot->end());
+      return true;
+    } else if (num_modules == 0) {
+      DLOG(ERROR) << "Can't determine the module list size.";
+      return false;
+    } else {
+      // Buffer size was too small. Try again with a larger buffer. A little
+      // more room is given to avoid multiple expensive calls to
+      // ::EnumProcessModules() just because one module has been added.
+      snapshot->resize(num_modules + 8, NULL);
+    }
+  } while (--retries_remaining);
+
+  DLOG(ERROR) << "Failed to enumerate modules.";
+  return false;
+}
+
+void EnableFlicks(HWND hwnd) {
+  ::RemoveProp(hwnd, MICROSOFT_TABLETPENSERVICE_PROPERTY);
+}
+
+void DisableFlicks(HWND hwnd) {
+  ::SetProp(hwnd, MICROSOFT_TABLETPENSERVICE_PROPERTY,
+      reinterpret_cast<HANDLE>(TABLET_DISABLE_FLICKS |
+          TABLET_DISABLE_FLICKFALLBACKKEYS));
+}
+
+bool IsProcessPerMonitorDpiAware() {
+  enum class PerMonitorDpiAware {
+    UNKNOWN = 0,
+    PER_MONITOR_DPI_UNAWARE,
+    PER_MONITOR_DPI_AWARE,
+  };
+  static PerMonitorDpiAware per_monitor_dpi_aware = PerMonitorDpiAware::UNKNOWN;
+  if (per_monitor_dpi_aware == PerMonitorDpiAware::UNKNOWN) {
+    per_monitor_dpi_aware = PerMonitorDpiAware::PER_MONITOR_DPI_UNAWARE;
+    HMODULE shcore_dll = ::LoadLibrary(L"shcore.dll");
+    if (shcore_dll) {
+      auto get_process_dpi_awareness_func =
+          reinterpret_cast<decltype(::GetProcessDpiAwareness)*>(
+              ::GetProcAddress(shcore_dll, "GetProcessDpiAwareness"));
+      if (get_process_dpi_awareness_func) {
+        PROCESS_DPI_AWARENESS awareness;
+        if (SUCCEEDED(get_process_dpi_awareness_func(nullptr, &awareness)) &&
+            awareness == PROCESS_PER_MONITOR_DPI_AWARE)
+          per_monitor_dpi_aware = PerMonitorDpiAware::PER_MONITOR_DPI_AWARE;
+      }
+    }
+  }
+  return per_monitor_dpi_aware == PerMonitorDpiAware::PER_MONITOR_DPI_AWARE;
+}
+
+void EnableHighDPISupport() {
+  // Enable per-monitor DPI for Win10 or above instead of Win8.1 since Win8.1
+  // does not have EnableChildWindowDpiMessage, necessary for correct non-client
+  // area scaling across monitors.
+  PROCESS_DPI_AWARENESS process_dpi_awareness =
+      GetVersion() >= VERSION_WIN10 ? PROCESS_PER_MONITOR_DPI_AWARE
+                                    : PROCESS_SYSTEM_DPI_AWARE;
+  if (!SetProcessDpiAwarenessWrapper(process_dpi_awareness)) {
+    // For windows versions where SetProcessDpiAwareness is not available or
+    // failed, try its predecessor.
+    BOOL result = ::SetProcessDPIAware();
+    DCHECK(result) << "SetProcessDPIAware failed.";
+  }
+}
+
+}  // namespace win
+}  // namespace base
diff --git a/base/win/win_util.h b/base/win/win_util.h
new file mode 100644
index 0000000..9d2f858
--- /dev/null
+++ b/base/win/win_util.h
@@ -0,0 +1,197 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// =============================================================================
+// PLEASE READ
+//
+// In general, you should not be adding stuff to this file.
+//
+// - If your thing is only used in one place, just put it in a reasonable
+//   location in or near that one place. It's nice you want people to be able
+//   to re-use your function, but realistically, if it hasn't been necessary
+//   before after so many years of development, it's probably not going to be
+//   used in other places in the future unless you know of them now.
+//
+// - If your thing is used by multiple callers and is UI-related, it should
+//   probably be in app/win/ instead. Try to put it in the most specific file
+//   possible (avoiding the *_util files when practical).
+//
+// =============================================================================
+
+#ifndef BASE_WIN_WIN_UTIL_H_
+#define BASE_WIN_WIN_UTIL_H_
+
+#include <stdint.h>
+#include "base/win/windows_types.h"
+
+#include <string>
+#include <vector>
+
+#include "base/base_export.h"
+#include "base/strings/string16.h"
+
+struct IPropertyStore;
+struct _tagpropertykey;
+typedef _tagpropertykey PROPERTYKEY;
+
+namespace base {
+namespace win {
+
+inline uint32_t HandleToUint32(HANDLE h) {
+  // Cast through uintptr_t and then unsigned int to make the truncation to
+  // 32 bits explicit. Handles are size of-pointer but are always 32-bit values.
+  // https://msdn.microsoft.com/en-us/library/aa384203(VS.85).aspx says:
+  // 64-bit versions of Windows use 32-bit handles for interoperability.
+  return static_cast<uint32_t>(reinterpret_cast<uintptr_t>(h));
+}
+
+inline HANDLE Uint32ToHandle(uint32_t h) {
+  return reinterpret_cast<HANDLE>(
+      static_cast<uintptr_t>(static_cast<int32_t>(h)));
+}
+
+// Returns the string representing the current user sid.
+BASE_EXPORT bool GetUserSidString(std::wstring* user_sid);
+
+// Returns false if user account control (UAC) has been disabled with the
+// EnableLUA registry flag. Returns true if user account control is enabled.
+// NOTE: The EnableLUA registry flag, which is ignored on Windows XP
+// machines, might still exist and be set to 0 (UAC disabled), in which case
+// this function will return false. You should therefore check this flag only
+// if the OS is Vista or later.
+BASE_EXPORT bool UserAccountControlIsEnabled();
+
+// Sets the boolean value for a given key in given IPropertyStore.
+BASE_EXPORT bool SetBooleanValueForPropertyStore(
+    IPropertyStore* property_store,
+    const PROPERTYKEY& property_key,
+    bool property_bool_value);
+
+// Sets the string value for a given key in given IPropertyStore.
+BASE_EXPORT bool SetStringValueForPropertyStore(
+    IPropertyStore* property_store,
+    const PROPERTYKEY& property_key,
+    const wchar_t* property_string_value);
+
+// Sets the CLSID value for a given key in a given IPropertyStore.
+BASE_EXPORT bool SetClsidForPropertyStore(IPropertyStore* property_store,
+                                          const PROPERTYKEY& property_key,
+                                          const CLSID& property_clsid_value);
+
+// Sets the application id in given IPropertyStore. The function is intended
+// for tagging application/chromium shortcut, browser window and jump list for
+// Win7.
+BASE_EXPORT bool SetAppIdForPropertyStore(IPropertyStore* property_store,
+                                          const wchar_t* app_id);
+
+// Adds the specified |command| using the specified |name| to the AutoRun key.
+// |root_key| could be HKCU or HKLM or the root of any user hive.
+BASE_EXPORT bool AddCommandToAutoRun(HKEY root_key, const string16& name,
+                                     const string16& command);
+// Removes the command specified by |name| from the AutoRun key. |root_key|
+// could be HKCU or HKLM or the root of any user hive.
+BASE_EXPORT bool RemoveCommandFromAutoRun(HKEY root_key, const string16& name);
+
+// Reads the command specified by |name| from the AutoRun key. |root_key|
+// could be HKCU or HKLM or the root of any user hive. Used for unit-tests.
+BASE_EXPORT bool ReadCommandFromAutoRun(HKEY root_key,
+                                        const string16& name,
+                                        string16* command);
+
+// Sets whether to crash the process during exit. This is inspected by DLLMain
+// and used to intercept unexpected terminations of the process (via calls to
+// exit(), abort(), _exit(), ExitProcess()) and convert them into crashes.
+// Note that not all mechanisms for terminating the process are covered by
+// this. In particular, TerminateProcess() is not caught.
+BASE_EXPORT void SetShouldCrashOnProcessDetach(bool crash);
+BASE_EXPORT bool ShouldCrashOnProcessDetach();
+
+// Adjusts the abort behavior so that crash reports can be generated when the
+// process is aborted.
+BASE_EXPORT void SetAbortBehaviorForCrashReporting();
+
+// Checks whether the supplied |hwnd| is in Windows 10 tablet mode. Will return
+// false on versions below 10.
+BASE_EXPORT bool IsWindows10TabletMode(HWND hwnd);
+
+// A tablet is a device that is touch enabled and also is being used
+// "like a tablet". This is used by the following:
+// 1. Metrics: To gain insight into how users use Chrome.
+// 2. Physical keyboard presence: If a device is in tablet mode, it means
+//    that there is no physical keyboard attached.
+// This function optionally sets the |reason| parameter to determine as to why
+// or why not a device was deemed to be a tablet.
+// Returns true if the user has set Windows 10 in tablet mode.
+BASE_EXPORT bool IsTabletDevice(std::string* reason, HWND hwnd);
+
+// Return true if the device is physically used as a tablet independently of
+// Windows tablet mode. It checks if the device:
+// - Is running Windows 8 or newer,
+// - Has a touch digitizer,
+// - Is not docked,
+// - Has a supported rotation sensor,
+// - Is not in laptop mode,
+// - prefers the mobile or slate power management profile (per OEM choice), and
+// - Is in slate mode.
+// This function optionally sets the |reason| parameter to determine as to why
+// or why not a device was deemed to be a tablet.
+BASE_EXPORT bool IsDeviceUsedAsATablet(std::string* reason);
+
+// A slate is a touch device that may have a keyboard attached. This function
+// returns true if a keyboard is attached and optionally will set the |reason|
+// parameter to the detection method that was used to detect the keyboard.
+BASE_EXPORT bool IsKeyboardPresentOnSlate(std::string* reason, HWND hwnd);
+
+// Get the size of a struct up to and including the specified member.
+// This is necessary to set compatible struct sizes for different versions
+// of certain Windows APIs (e.g. SystemParametersInfo).
+#define SIZEOF_STRUCT_WITH_SPECIFIED_LAST_MEMBER(struct_name, member) \
+    offsetof(struct_name, member) + \
+    (sizeof static_cast<struct_name*>(NULL)->member)
+
+// Returns true if the machine is enrolled to a domain.
+BASE_EXPORT bool IsEnrolledToDomain();
+
+// Returns true if the machine is being managed by an MDM system.
+BASE_EXPORT bool IsDeviceRegisteredWithManagement();
+
+// Returns true if the current machine is considered enterprise managed in some
+// fashion.  A machine is considered managed if it is either domain enrolled
+// or registered with an MDM.
+BASE_EXPORT bool IsEnterpriseManaged();
+
+// Used by tests to mock any wanted state. Call with |state| set to true to
+// simulate being in a domain and false otherwise.
+BASE_EXPORT void SetDomainStateForTesting(bool state);
+
+// Returns true if the current process can make USER32 or GDI32 calls such as
+// CreateWindow and CreateDC. Windows 8 and above allow the kernel component
+// of these calls to be disabled which can cause undefined behaviour such as
+// crashes. This function can be used to guard areas of code using these calls
+// and provide a fallback path if necessary.
+BASE_EXPORT bool IsUser32AndGdi32Available();
+
+// Takes a snapshot of the modules loaded in the |process|. The returned
+// HMODULEs are not add-ref'd, so they should not be closed and may be
+// invalidated at any time (should a module be unloaded). |process| requires
+// the PROCESS_QUERY_INFORMATION and PROCESS_VM_READ permissions.
+BASE_EXPORT bool GetLoadedModulesSnapshot(HANDLE process,
+                                          std::vector<HMODULE>* snapshot);
+
+// Adds or removes the MICROSOFT_TABLETPENSERVICE_PROPERTY property with the
+// TABLET_DISABLE_FLICKS & TABLET_DISABLE_FLICKFALLBACKKEYS flags in order to
+// disable pen flick gestures for the given HWND.
+BASE_EXPORT void EnableFlicks(HWND hwnd);
+BASE_EXPORT void DisableFlicks(HWND hwnd);
+
+// Returns true if the process is per monitor DPI aware.
+BASE_EXPORT bool IsProcessPerMonitorDpiAware();
+
+// Enable high-DPI support for the current process.
+BASE_EXPORT void EnableHighDPISupport();
+
+}  // namespace win
+}  // namespace base
+
+#endif  // BASE_WIN_WIN_UTIL_H_
diff --git a/base/win/win_util_unittest.cc b/base/win/win_util_unittest.cc
new file mode 100644
index 0000000..6d5cf61
--- /dev/null
+++ b/base/win/win_util_unittest.cc
@@ -0,0 +1,82 @@
+// Copyright (c) 2010 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/win/win_util.h"
+
+#include "base/files/file_path.h"
+#include "base/macros.h"
+#include "base/scoped_native_library.h"
+#include "base/stl_util.h"
+#include "base/win/win_client_metrics.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+namespace win {
+
+namespace {
+
+// Saves the current thread's locale ID when initialized, and restores it when
+// the instance is going out of scope.
+class ThreadLocaleSaver {
+ public:
+  ThreadLocaleSaver() : original_locale_id_(GetThreadLocale()) {}
+  ~ThreadLocaleSaver() { SetThreadLocale(original_locale_id_); }
+
+ private:
+  LCID original_locale_id_;
+
+  DISALLOW_COPY_AND_ASSIGN(ThreadLocaleSaver);
+};
+
+}  // namespace
+
+// The test is somewhat silly, because some bots some have UAC enabled and some
+// have it disabled. At least we check that it does not crash.
+TEST(BaseWinUtilTest, TestIsUACEnabled) {
+  UserAccountControlIsEnabled();
+}
+
+TEST(BaseWinUtilTest, TestGetUserSidString) {
+  std::wstring user_sid;
+  EXPECT_TRUE(GetUserSidString(&user_sid));
+  EXPECT_TRUE(!user_sid.empty());
+}
+
+TEST(BaseWinUtilTest, TestGetNonClientMetrics) {
+  NONCLIENTMETRICS_XP metrics = {0};
+  GetNonClientMetrics(&metrics);
+  EXPECT_GT(metrics.cbSize, 0u);
+  EXPECT_GT(metrics.iScrollWidth, 0);
+  EXPECT_GT(metrics.iScrollHeight, 0);
+}
+
+TEST(BaseWinUtilTest, TestGetLoadedModulesSnapshot) {
+  std::vector<HMODULE> snapshot;
+
+  ASSERT_TRUE(GetLoadedModulesSnapshot(::GetCurrentProcess(), &snapshot));
+  size_t original_snapshot_size = snapshot.size();
+  ASSERT_GT(original_snapshot_size, 0u);
+  snapshot.clear();
+
+  // Load in a new module. Pick msvidc32.dll as it is present from WinXP to
+  // Win10 and yet rarely used.
+  const wchar_t dll_name[] = L"msvidc32.dll";
+  ASSERT_EQ(NULL, ::GetModuleHandle(dll_name));
+
+  base::ScopedNativeLibrary new_dll((base::FilePath(dll_name)));
+  ASSERT_NE(static_cast<HMODULE>(NULL), new_dll.get());
+  ASSERT_TRUE(GetLoadedModulesSnapshot(::GetCurrentProcess(), &snapshot));
+  ASSERT_GT(snapshot.size(), original_snapshot_size);
+  ASSERT_TRUE(base::ContainsValue(snapshot, new_dll.get()));
+}
+
+TEST(BaseWinUtilTest, TestUint32ToInvalidHandle) {
+  // Ensure that INVALID_HANDLE_VALUE is preserved when going to a 32-bit value
+  // and back on 64-bit platforms.
+  uint32_t invalid_handle = base::win::HandleToUint32(INVALID_HANDLE_VALUE);
+  EXPECT_EQ(INVALID_HANDLE_VALUE, base::win::Uint32ToHandle(invalid_handle));
+}
+
+}  // namespace win
+}  // namespace base
diff --git a/base/win/windows_full.h b/base/win/windows_full.h
new file mode 100644
index 0000000..8b9e43a
--- /dev/null
+++ b/base/win/windows_full.h
@@ -0,0 +1,13 @@
+// Copyright (c) 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This header is needed so that mojo typemap files can specify their dependence
+// on Windows.h. This can be removed once https://crbug.com/798763 is resolved.
+
+#ifndef BASE_WIN_WINDOWS_FULL_H
+#define BASE_WIN_WINDOWS_FULL_H
+
+#include <windows.h>
+
+#endif  // BASE_WIN_WINDOWS_FULL_H
diff --git a/base/win/windows_types.h b/base/win/windows_types.h
new file mode 100644
index 0000000..2a86195
--- /dev/null
+++ b/base/win/windows_types.h
@@ -0,0 +1,253 @@
+// Copyright (c) 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file contains defines and typedefs that allow popular Windows types to
+// be used without the overhead of including windows.h.
+
+#ifndef BASE_WIN_WINDOWS_TYPES_H
+#define BASE_WIN_WINDOWS_TYPES_H
+
+// Needed for function prototypes.
+#include <concurrencysal.h>
+#include <sal.h>
+#include <specstrings.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+// typedef and define the most commonly used Windows integer types.
+
+typedef unsigned long DWORD;
+typedef long LONG;
+typedef __int64 LONGLONG;
+typedef unsigned __int64 ULONGLONG;
+
+#define VOID void
+typedef char CHAR;
+typedef short SHORT;
+typedef long LONG;
+typedef int INT;
+typedef unsigned int UINT;
+typedef unsigned int* PUINT;
+typedef void* LPVOID;
+typedef void* PVOID;
+typedef void* HANDLE;
+typedef int BOOL;
+typedef unsigned char BYTE;
+typedef BYTE BOOLEAN;
+typedef DWORD ULONG;
+typedef unsigned short WORD;
+typedef WORD UWORD;
+typedef WORD ATOM;
+
+#if defined(_WIN64)
+typedef __int64 INT_PTR, *PINT_PTR;
+typedef unsigned __int64 UINT_PTR, *PUINT_PTR;
+
+typedef __int64 LONG_PTR, *PLONG_PTR;
+typedef unsigned __int64 ULONG_PTR, *PULONG_PTR;
+#else
+typedef __w64 int INT_PTR, *PINT_PTR;
+typedef __w64 unsigned int UINT_PTR, *PUINT_PTR;
+
+typedef __w64 long LONG_PTR, *PLONG_PTR;
+typedef __w64 unsigned long ULONG_PTR, *PULONG_PTR;
+#endif
+
+typedef UINT_PTR WPARAM;
+typedef LONG_PTR LPARAM;
+typedef LONG_PTR LRESULT;
+#define LRESULT LONG_PTR
+typedef _Return_type_success_(return >= 0) long HRESULT;
+
+typedef ULONG_PTR SIZE_T, *PSIZE_T;
+typedef LONG_PTR SSIZE_T, *PSSIZE_T;
+
+typedef DWORD ACCESS_MASK;
+typedef ACCESS_MASK REGSAM;
+
+
+// Forward declare Windows compatible handles.
+
+#define CHROME_DECLARE_HANDLE(name) \
+  struct name##__;                  \
+  typedef struct name##__* name
+CHROME_DECLARE_HANDLE(HGLRC);
+CHROME_DECLARE_HANDLE(HICON);
+CHROME_DECLARE_HANDLE(HINSTANCE);
+CHROME_DECLARE_HANDLE(HKEY);
+CHROME_DECLARE_HANDLE(HKL);
+CHROME_DECLARE_HANDLE(HMENU);
+CHROME_DECLARE_HANDLE(HWND);
+typedef HINSTANCE HMODULE;
+#undef CHROME_DECLARE_HANDLE
+
+
+// Forward declare some Windows struct/typedef sets.
+
+typedef struct _OVERLAPPED OVERLAPPED;
+typedef struct tagMSG MSG, *PMSG, *NPMSG, *LPMSG;
+
+typedef struct _RTL_SRWLOCK RTL_SRWLOCK;
+typedef RTL_SRWLOCK SRWLOCK, *PSRWLOCK;
+
+typedef struct _GUID GUID;
+typedef GUID CLSID;
+
+typedef struct tagLOGFONTW LOGFONTW, *PLOGFONTW, *NPLOGFONTW, *LPLOGFONTW;
+typedef LOGFONTW LOGFONT;
+
+typedef struct _FILETIME FILETIME;
+
+typedef struct tagMENUITEMINFOW MENUITEMINFOW, MENUITEMINFO;
+
+typedef struct tagNMHDR NMHDR;
+
+// Declare Chrome versions of some Windows structures. These are needed for
+// when we need a concrete type but don't want to pull in Windows.h. We can't
+// declare the Windows types so we declare our types and cast to the Windows
+// types in a few places.
+
+struct CHROME_SRWLOCK {
+  PVOID Ptr;
+};
+
+struct CHROME_CONDITION_VARIABLE {
+  PVOID Ptr;
+};
+
+
+// Define some commonly used Windows constants. Note that the layout of these
+// macros - including internal spacing - must be 100% consistent with windows.h.
+
+#ifndef INVALID_HANDLE_VALUE
+// Work around there being two slightly different definitions in the SDK.
+#define INVALID_HANDLE_VALUE ((HANDLE)(LONG_PTR)-1)
+#endif
+#define TLS_OUT_OF_INDEXES ((DWORD)0xFFFFFFFF)
+#define HTNOWHERE 0
+#define MAX_PATH 260
+#define CS_GLOBALCLASS 0x4000
+
+#define ERROR_SUCCESS 0L
+#define ERROR_FILE_NOT_FOUND 2L
+#define ERROR_ACCESS_DENIED 5L
+#define ERROR_INVALID_HANDLE 6L
+#define ERROR_SHARING_VIOLATION 32L
+#define ERROR_LOCK_VIOLATION 33L
+#define REG_BINARY ( 3ul )
+
+#define STATUS_PENDING ((DWORD   )0x00000103L)
+#define STILL_ACTIVE STATUS_PENDING
+#define SUCCEEDED(hr) (((HRESULT)(hr)) >= 0)
+#define FAILED(hr) (((HRESULT)(hr)) < 0)
+
+#define HKEY_CLASSES_ROOT (( HKEY ) (ULONG_PTR)((LONG)0x80000000) )
+#define HKEY_LOCAL_MACHINE (( HKEY ) (ULONG_PTR)((LONG)0x80000002) )
+#define HKEY_CURRENT_USER (( HKEY ) (ULONG_PTR)((LONG)0x80000001) )
+#define KEY_QUERY_VALUE (0x0001)
+#define KEY_SET_VALUE (0x0002)
+#define KEY_CREATE_SUB_KEY (0x0004)
+#define KEY_ENUMERATE_SUB_KEYS (0x0008)
+#define KEY_NOTIFY (0x0010)
+#define KEY_CREATE_LINK (0x0020)
+#define KEY_WOW64_32KEY (0x0200)
+#define KEY_WOW64_64KEY (0x0100)
+#define KEY_WOW64_RES (0x0300)
+
+#define READ_CONTROL (0x00020000L)
+#define SYNCHRONIZE (0x00100000L)
+
+#define STANDARD_RIGHTS_READ (READ_CONTROL)
+#define STANDARD_RIGHTS_WRITE (READ_CONTROL)
+#define STANDARD_RIGHTS_ALL (0x001F0000L)
+
+#define KEY_READ                ((STANDARD_RIGHTS_READ       |\
+                                  KEY_QUERY_VALUE            |\
+                                  KEY_ENUMERATE_SUB_KEYS     |\
+                                  KEY_NOTIFY)                 \
+                                  &                           \
+                                 (~SYNCHRONIZE))
+
+
+#define KEY_WRITE               ((STANDARD_RIGHTS_WRITE      |\
+                                  KEY_SET_VALUE              |\
+                                  KEY_CREATE_SUB_KEY)         \
+                                  &                           \
+                                 (~SYNCHRONIZE))
+
+#define KEY_ALL_ACCESS          ((STANDARD_RIGHTS_ALL        |\
+                                  KEY_QUERY_VALUE            |\
+                                  KEY_SET_VALUE              |\
+                                  KEY_CREATE_SUB_KEY         |\
+                                  KEY_ENUMERATE_SUB_KEYS     |\
+                                  KEY_NOTIFY                 |\
+                                  KEY_CREATE_LINK)            \
+                                  &                           \
+                                 (~SYNCHRONIZE))
+
+// Define some macros needed when prototyping Windows functions.
+
+#define DECLSPEC_IMPORT __declspec(dllimport)
+#define WINBASEAPI DECLSPEC_IMPORT
+#define WINUSERAPI DECLSPEC_IMPORT
+#define WINAPI __stdcall
+#define CALLBACK __stdcall
+
+// Needed for optimal lock performance.
+WINBASEAPI _Releases_exclusive_lock_(*SRWLock) VOID WINAPI
+    ReleaseSRWLockExclusive(_Inout_ PSRWLOCK SRWLock);
+
+// Needed to support protobuf's GetMessage macro magic.
+WINUSERAPI BOOL WINAPI GetMessageW(_Out_ LPMSG lpMsg,
+                                   _In_opt_ HWND hWnd,
+                                   _In_ UINT wMsgFilterMin,
+                                   _In_ UINT wMsgFilterMax);
+
+// Needed for thread_local_storage.h
+WINBASEAPI LPVOID WINAPI TlsGetValue(_In_ DWORD dwTlsIndex);
+
+// Needed for scoped_handle.h
+WINBASEAPI _Check_return_ _Post_equals_last_error_ DWORD WINAPI
+    GetLastError(VOID);
+
+WINBASEAPI VOID WINAPI SetLastError(_In_ DWORD dwErrCode);
+
+#ifdef __cplusplus
+}
+#endif
+
+// These macros are all defined by windows.h and are also used as the names of
+// functions in the Chromium code base. Add to this list as needed whenever
+// there is a Windows macro which causes a function call to be renamed. This
+// ensures that the same renaming will happen everywhere. Includes of this file
+// can be added wherever needed to ensure this consistent renaming.
+
+#define CopyFile CopyFileW
+#define CreateDirectory CreateDirectoryW
+#define CreateEvent CreateEventW
+#define CreateFile CreateFileW
+#define CreateService CreateServiceW
+#define DeleteFile DeleteFileW
+#define DispatchMessage DispatchMessageW
+#define DrawText DrawTextW
+#define GetComputerName GetComputerNameW
+#define GetCurrentDirectory GetCurrentDirectoryW
+#define GetCurrentTime() GetTickCount()
+#define GetFileAttributes GetFileAttributesW
+#define GetMessage GetMessageW
+#define GetUserName GetUserNameW
+#define LoadIcon LoadIconW
+#define LoadImage LoadImageW
+#define PostMessage PostMessageW
+#define ReplaceFile ReplaceFileW
+#define ReportEvent ReportEventW
+#define SendMessage SendMessageW
+#define SendMessageCallback SendMessageCallbackW
+#define SetCurrentDirectory SetCurrentDirectoryW
+#define StartService StartServiceW
+#define StrCat StrCatW
+
+#endif  // BASE_WIN_WINDOWS_TYPES_H
diff --git a/base/win/windows_version.cc b/base/win/windows_version.cc
new file mode 100644
index 0000000..a74c80a
--- /dev/null
+++ b/base/win/windows_version.cc
@@ -0,0 +1,281 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/win/windows_version.h"
+
+#include <windows.h>
+
+#include <memory>
+
+#include "base/file_version_info_win.h"
+#include "base/files/file_path.h"
+#include "base/logging.h"
+#include "base/strings/utf_string_conversions.h"
+#include "base/win/registry.h"
+
+#if !defined(__clang__) && _MSC_FULL_VER < 191125507
+#error VS 2017 Update 3.2 or higher is required
+#endif
+
+#if !defined(NTDDI_WIN10_RS2)
+// Windows 10 April 2018 SDK is required to build Chrome.
+#error April 2018 SDK (10.0.17134.0) or higher required.
+#endif
+
+namespace {
+typedef BOOL (WINAPI *GetProductInfoPtr)(DWORD, DWORD, DWORD, DWORD, PDWORD);
+}  // namespace
+
+namespace base {
+namespace win {
+
+namespace {
+
+// Helper to map a major.minor.x.build version (e.g. 6.1) to a Windows release.
+Version MajorMinorBuildToVersion(int major, int minor, int build) {
+  if ((major == 5) && (minor > 0)) {
+    // Treat XP Pro x64, Home Server, and Server 2003 R2 as Server 2003.
+    return (minor == 1) ? VERSION_XP : VERSION_SERVER_2003;
+  } else if (major == 6) {
+    switch (minor) {
+      case 0:
+        // Treat Windows Server 2008 the same as Windows Vista.
+        return VERSION_VISTA;
+      case 1:
+        // Treat Windows Server 2008 R2 the same as Windows 7.
+        return VERSION_WIN7;
+      case 2:
+        // Treat Windows Server 2012 the same as Windows 8.
+        return VERSION_WIN8;
+      default:
+        DCHECK_EQ(minor, 3);
+        return VERSION_WIN8_1;
+    }
+  } else if (major == 10) {
+    if (build < 10586) {
+      return VERSION_WIN10;
+    } else if (build < 14393) {
+      return VERSION_WIN10_TH2;
+    } else if (build < 15063) {
+      return VERSION_WIN10_RS1;
+    } else if (build < 16299) {
+      return VERSION_WIN10_RS2;
+    } else if (build < 17134) {
+      return VERSION_WIN10_RS3;
+    } else {
+      return VERSION_WIN10_RS4;
+    }
+  } else if (major > 6) {
+    NOTREACHED();
+    return VERSION_WIN_LAST;
+  }
+
+  return VERSION_PRE_XP;
+}
+
+// Retrieve a version from kernel32. This is useful because when running in
+// compatibility mode for a down-level version of the OS, the file version of
+// kernel32 will still be the "real" version.
+Version GetVersionFromKernel32() {
+  std::unique_ptr<FileVersionInfoWin> file_version_info(
+      static_cast<FileVersionInfoWin*>(
+          FileVersionInfoWin::CreateFileVersionInfo(
+              base::FilePath(FILE_PATH_LITERAL("kernel32.dll")))));
+  if (file_version_info) {
+    const int major =
+        HIWORD(file_version_info->fixed_file_info()->dwFileVersionMS);
+    const int minor =
+        LOWORD(file_version_info->fixed_file_info()->dwFileVersionMS);
+    const int build =
+        HIWORD(file_version_info->fixed_file_info()->dwFileVersionLS);
+    return MajorMinorBuildToVersion(major, minor, build);
+  }
+
+  NOTREACHED();
+  return VERSION_WIN_LAST;
+}
+
+// Returns the the "UBR" value from the registry. Introduced in Windows 10,
+// this undocumented value appears to be similar to a patch number.
+// Returns 0 if the value does not exist or it could not be read.
+int GetUBR() {
+  // The values under the CurrentVersion registry hive are mirrored under
+  // the corresponding Wow6432 hive.
+  static constexpr wchar_t kRegKeyWindowsNTCurrentVersion[] =
+      L"SOFTWARE\\Microsoft\\Windows NT\\CurrentVersion";
+
+  base::win::RegKey key;
+  if (key.Open(HKEY_LOCAL_MACHINE, kRegKeyWindowsNTCurrentVersion,
+               KEY_QUERY_VALUE) != ERROR_SUCCESS) {
+    return 0;
+  }
+
+  DWORD ubr = 0;
+  key.ReadValueDW(L"UBR", &ubr);
+
+  return static_cast<int>(ubr);
+}
+
+}  // namespace
+
+// static
+OSInfo* OSInfo::GetInstance() {
+  // Note: we don't use the Singleton class because it depends on AtExitManager,
+  // and it's convenient for other modules to use this classs without it. This
+  // pattern is copied from gurl.cc.
+  static OSInfo* info;
+  if (!info) {
+    OSInfo* new_info = new OSInfo();
+    if (InterlockedCompareExchangePointer(
+        reinterpret_cast<PVOID*>(&info), new_info, NULL)) {
+      delete new_info;
+    }
+  }
+  return info;
+}
+
+OSInfo::OSInfo()
+    : version_(VERSION_PRE_XP),
+      kernel32_version_(VERSION_PRE_XP),
+      got_kernel32_version_(false),
+      architecture_(OTHER_ARCHITECTURE),
+      wow64_status_(GetWOW64StatusForProcess(GetCurrentProcess())) {
+  OSVERSIONINFOEX version_info = { sizeof version_info };
+  ::GetVersionEx(reinterpret_cast<OSVERSIONINFO*>(&version_info));
+  version_number_.major = version_info.dwMajorVersion;
+  version_number_.minor = version_info.dwMinorVersion;
+  version_number_.build = version_info.dwBuildNumber;
+  version_number_.patch = GetUBR();
+  version_ = MajorMinorBuildToVersion(
+      version_number_.major, version_number_.minor, version_number_.build);
+  service_pack_.major = version_info.wServicePackMajor;
+  service_pack_.minor = version_info.wServicePackMinor;
+  service_pack_str_ = base::WideToUTF8(version_info.szCSDVersion);
+
+  SYSTEM_INFO system_info = {};
+  ::GetNativeSystemInfo(&system_info);
+  switch (system_info.wProcessorArchitecture) {
+    case PROCESSOR_ARCHITECTURE_INTEL: architecture_ = X86_ARCHITECTURE; break;
+    case PROCESSOR_ARCHITECTURE_AMD64: architecture_ = X64_ARCHITECTURE; break;
+    case PROCESSOR_ARCHITECTURE_IA64:  architecture_ = IA64_ARCHITECTURE; break;
+  }
+  processors_ = system_info.dwNumberOfProcessors;
+  allocation_granularity_ = system_info.dwAllocationGranularity;
+
+  GetProductInfoPtr get_product_info;
+  DWORD os_type;
+
+  if (version_info.dwMajorVersion == 6 || version_info.dwMajorVersion == 10) {
+    // Only present on Vista+.
+    get_product_info = reinterpret_cast<GetProductInfoPtr>(
+        ::GetProcAddress(::GetModuleHandle(L"kernel32.dll"), "GetProductInfo"));
+
+    get_product_info(version_info.dwMajorVersion, version_info.dwMinorVersion,
+                     0, 0, &os_type);
+    switch (os_type) {
+      case PRODUCT_CLUSTER_SERVER:
+      case PRODUCT_DATACENTER_SERVER:
+      case PRODUCT_DATACENTER_SERVER_CORE:
+      case PRODUCT_ENTERPRISE_SERVER:
+      case PRODUCT_ENTERPRISE_SERVER_CORE:
+      case PRODUCT_ENTERPRISE_SERVER_IA64:
+      case PRODUCT_SMALLBUSINESS_SERVER:
+      case PRODUCT_SMALLBUSINESS_SERVER_PREMIUM:
+      case PRODUCT_STANDARD_SERVER:
+      case PRODUCT_STANDARD_SERVER_CORE:
+      case PRODUCT_WEB_SERVER:
+        version_type_ = SUITE_SERVER;
+        break;
+      case PRODUCT_PROFESSIONAL:
+      case PRODUCT_ULTIMATE:
+        version_type_ = SUITE_PROFESSIONAL;
+        break;
+      case PRODUCT_ENTERPRISE:
+      case PRODUCT_ENTERPRISE_E:
+      case PRODUCT_ENTERPRISE_EVALUATION:
+      case PRODUCT_ENTERPRISE_N:
+      case PRODUCT_ENTERPRISE_N_EVALUATION:
+      case PRODUCT_ENTERPRISE_S:
+      case PRODUCT_ENTERPRISE_S_EVALUATION:
+      case PRODUCT_ENTERPRISE_S_N:
+      case PRODUCT_ENTERPRISE_S_N_EVALUATION:
+      case PRODUCT_BUSINESS:
+      case PRODUCT_BUSINESS_N:
+        version_type_ = SUITE_ENTERPRISE;
+        break;
+      case PRODUCT_EDUCATION:
+      case PRODUCT_EDUCATION_N:
+        version_type_ = SUITE_EDUCATION;
+        break;
+      case PRODUCT_HOME_BASIC:
+      case PRODUCT_HOME_PREMIUM:
+      case PRODUCT_STARTER:
+      default:
+        version_type_ = SUITE_HOME;
+        break;
+    }
+  } else if (version_info.dwMajorVersion == 5 &&
+             version_info.dwMinorVersion == 2) {
+    if (version_info.wProductType == VER_NT_WORKSTATION &&
+        system_info.wProcessorArchitecture == PROCESSOR_ARCHITECTURE_AMD64) {
+      version_type_ = SUITE_PROFESSIONAL;
+    } else if (version_info.wSuiteMask & VER_SUITE_WH_SERVER) {
+      version_type_ = SUITE_HOME;
+    } else {
+      version_type_ = SUITE_SERVER;
+    }
+  } else if (version_info.dwMajorVersion == 5 &&
+             version_info.dwMinorVersion == 1) {
+    if (version_info.wSuiteMask & VER_SUITE_PERSONAL)
+      version_type_ = SUITE_HOME;
+    else
+      version_type_ = SUITE_PROFESSIONAL;
+  } else {
+    // Windows is pre XP so we don't care but pick a safe default.
+    version_type_ = SUITE_HOME;
+  }
+}
+
+OSInfo::~OSInfo() {
+}
+
+Version OSInfo::Kernel32Version() const {
+  if (!got_kernel32_version_) {
+    kernel32_version_ = GetVersionFromKernel32();
+    got_kernel32_version_ = true;
+  }
+  return kernel32_version_;
+}
+
+std::string OSInfo::processor_model_name() {
+  if (processor_model_name_.empty()) {
+    const wchar_t kProcessorNameString[] =
+        L"HARDWARE\\DESCRIPTION\\System\\CentralProcessor\\0";
+    base::win::RegKey key(HKEY_LOCAL_MACHINE, kProcessorNameString, KEY_READ);
+    string16 value;
+    key.ReadValue(L"ProcessorNameString", &value);
+    processor_model_name_ = UTF16ToUTF8(value);
+  }
+  return processor_model_name_;
+}
+
+// static
+OSInfo::WOW64Status OSInfo::GetWOW64StatusForProcess(HANDLE process_handle) {
+  typedef BOOL (WINAPI* IsWow64ProcessFunc)(HANDLE, PBOOL);
+  IsWow64ProcessFunc is_wow64_process = reinterpret_cast<IsWow64ProcessFunc>(
+      GetProcAddress(GetModuleHandle(L"kernel32.dll"), "IsWow64Process"));
+  if (!is_wow64_process)
+    return WOW64_DISABLED;
+  BOOL is_wow64 = FALSE;
+  if (!(*is_wow64_process)(process_handle, &is_wow64))
+    return WOW64_UNKNOWN;
+  return is_wow64 ? WOW64_ENABLED : WOW64_DISABLED;
+}
+
+Version GetVersion() {
+  return OSInfo::GetInstance()->version();
+}
+
+}  // namespace win
+}  // namespace base
diff --git a/base/win/windows_version.h b/base/win/windows_version.h
new file mode 100644
index 0000000..978df3d
--- /dev/null
+++ b/base/win/windows_version.h
@@ -0,0 +1,149 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_WIN_WINDOWS_VERSION_H_
+#define BASE_WIN_WINDOWS_VERSION_H_
+
+#include <stddef.h>
+
+#include <string>
+
+#include "base/base_export.h"
+#include "base/macros.h"
+
+typedef void* HANDLE;
+
+namespace base {
+namespace win {
+
+// The running version of Windows.  This is declared outside OSInfo for
+// syntactic sugar reasons; see the declaration of GetVersion() below.
+// NOTE: Keep these in order so callers can do things like
+// "if (base::win::GetVersion() >= base::win::VERSION_VISTA) ...".
+//
+// This enum is used in metrics histograms, so they shouldn't be reordered or
+// removed. New values can be added before VERSION_WIN_LAST.
+enum Version {
+  VERSION_PRE_XP = 0,  // Not supported.
+  VERSION_XP = 1,
+  VERSION_SERVER_2003 = 2,  // Also includes XP Pro x64 and Server 2003 R2.
+  VERSION_VISTA = 3,        // Also includes Windows Server 2008.
+  VERSION_WIN7 = 4,         // Also includes Windows Server 2008 R2.
+  VERSION_WIN8 = 5,         // Also includes Windows Server 2012.
+  VERSION_WIN8_1 = 6,       // Also includes Windows Server 2012 R2.
+  VERSION_WIN10 = 7,        // Threshold 1: Version 1507, Build 10240.
+  VERSION_WIN10_TH2 = 8,    // Threshold 2: Version 1511, Build 10586.
+  VERSION_WIN10_RS1 = 9,    // Redstone 1: Version 1607, Build 14393.
+  VERSION_WIN10_RS2 = 10,   // Redstone 2: Version 1703, Build 15063.
+  VERSION_WIN10_RS3 = 11,   // Redstone 3: Version 1709, Build 16299.
+  VERSION_WIN10_RS4 = 12,   // Redstone 4: Version 1803, Build 17134.
+  // On edit, update tools\metrics\histograms\enums.xml "WindowsVersion" and
+  // "GpuBlacklistFeatureTestResultsWindows2".
+  VERSION_WIN_LAST,  // Indicates error condition.
+};
+
+// A rough bucketing of the available types of versions of Windows. This is used
+// to distinguish enterprise enabled versions from home versions and potentially
+// server versions. Keep these values in the same order, since they are used as
+// is for metrics histogram ids.
+enum VersionType {
+  SUITE_HOME = 0,
+  SUITE_PROFESSIONAL,
+  SUITE_SERVER,
+  SUITE_ENTERPRISE,
+  SUITE_EDUCATION,
+  SUITE_LAST,
+};
+
+// A singleton that can be used to query various pieces of information about the
+// OS and process state. Note that this doesn't use the base Singleton class, so
+// it can be used without an AtExitManager.
+class BASE_EXPORT OSInfo {
+ public:
+  struct VersionNumber {
+    int major;
+    int minor;
+    int build;
+    int patch;
+  };
+
+  struct ServicePack {
+    int major;
+    int minor;
+  };
+
+  // The processor architecture this copy of Windows natively uses.  For
+  // example, given an x64-capable processor, we have three possibilities:
+  //   32-bit Chrome running on 32-bit Windows:           X86_ARCHITECTURE
+  //   32-bit Chrome running on 64-bit Windows via WOW64: X64_ARCHITECTURE
+  //   64-bit Chrome running on 64-bit Windows:           X64_ARCHITECTURE
+  enum WindowsArchitecture {
+    X86_ARCHITECTURE,
+    X64_ARCHITECTURE,
+    IA64_ARCHITECTURE,
+    OTHER_ARCHITECTURE,
+  };
+
+  // Whether a process is running under WOW64 (the wrapper that allows 32-bit
+  // processes to run on 64-bit versions of Windows).  This will return
+  // WOW64_DISABLED for both "32-bit Chrome on 32-bit Windows" and "64-bit
+  // Chrome on 64-bit Windows".  WOW64_UNKNOWN means "an error occurred", e.g.
+  // the process does not have sufficient access rights to determine this.
+  enum WOW64Status {
+    WOW64_DISABLED,
+    WOW64_ENABLED,
+    WOW64_UNKNOWN,
+  };
+
+  static OSInfo* GetInstance();
+
+  Version version() const { return version_; }
+  Version Kernel32Version() const;
+  // The next two functions return arrays of values, [major, minor(, build)].
+  VersionNumber version_number() const { return version_number_; }
+  VersionType version_type() const { return version_type_; }
+  ServicePack service_pack() const { return service_pack_; }
+  std::string service_pack_str() const { return service_pack_str_; }
+  WindowsArchitecture architecture() const { return architecture_; }
+  int processors() const { return processors_; }
+  size_t allocation_granularity() const { return allocation_granularity_; }
+  WOW64Status wow64_status() const { return wow64_status_; }
+  std::string processor_model_name();
+
+  // Like wow64_status(), but for the supplied handle instead of the current
+  // process.  This doesn't touch member state, so you can bypass the singleton.
+  static WOW64Status GetWOW64StatusForProcess(HANDLE process_handle);
+
+ private:
+  OSInfo();
+  ~OSInfo();
+
+  Version version_;
+  mutable Version kernel32_version_;
+  mutable bool got_kernel32_version_;
+  VersionNumber version_number_;
+  VersionType version_type_;
+  ServicePack service_pack_;
+
+  // A string, such as "Service Pack 3", that indicates the latest Service Pack
+  // installed on the system. If no Service Pack has been installed, the string
+  // is empty.
+  std::string service_pack_str_;
+  WindowsArchitecture architecture_;
+  int processors_;
+  size_t allocation_granularity_;
+  WOW64Status wow64_status_;
+  std::string processor_model_name_;
+
+  DISALLOW_COPY_AND_ASSIGN(OSInfo);
+};
+
+// Because this is by far the most commonly-requested value from the above
+// singleton, we add a global-scope accessor here as syntactic sugar.
+BASE_EXPORT Version GetVersion();
+
+}  // namespace win
+}  // namespace base
+
+#endif  // BASE_WIN_WINDOWS_VERSION_H_
diff --git a/base/win/windows_version_unittest.cc b/base/win/windows_version_unittest.cc
new file mode 100644
index 0000000..f0d6d96
--- /dev/null
+++ b/base/win/windows_version_unittest.cc
@@ -0,0 +1,22 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/win/windows_version.h"
+
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+namespace win {
+namespace {
+
+TEST(WindowsVersion, GetVersionExAndKernelVersionMatch) {
+  // If this fails, we're running in compatibility mode, or need to update the
+  // application manifest.
+  EXPECT_EQ(OSInfo::GetInstance()->version(),
+            OSInfo::GetInstance()->Kernel32Version());
+}
+
+}  // namespace
+}  // namespace win
+}  // namespace base
diff --git a/base/win/winrt_storage_util.cc b/base/win/winrt_storage_util.cc
new file mode 100644
index 0000000..262d817
--- /dev/null
+++ b/base/win/winrt_storage_util.cc
@@ -0,0 +1,72 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/win/winrt_storage_util.h"
+
+#include <robuffer.h>
+#include <string.h>
+#include <wrl/client.h>
+
+#include "base/strings/string_util.h"
+#include "base/win/core_winrt_util.h"
+#include "base/win/scoped_hstring.h"
+
+namespace base {
+namespace win {
+
+using IBuffer = ABI::Windows::Storage::Streams::IBuffer;
+
+HRESULT GetPointerToBufferData(IBuffer* buffer, uint8_t** out, UINT32* length) {
+  *out = nullptr;
+
+  Microsoft::WRL::ComPtr<Windows::Storage::Streams::IBufferByteAccess>
+      buffer_byte_access;
+  HRESULT hr = buffer->QueryInterface(IID_PPV_ARGS(&buffer_byte_access));
+  if (FAILED(hr))
+    return hr;
+
+  hr = buffer->get_Length(length);
+  if (FAILED(hr))
+    return hr;
+
+  // Lifetime of the pointing buffer is controlled by the buffer object.
+  return buffer_byte_access->Buffer(out);
+}
+
+HRESULT CreateIBufferFromData(const uint8_t* data,
+                              UINT32 length,
+                              Microsoft::WRL::ComPtr<IBuffer>* buffer) {
+  *buffer = nullptr;
+
+  Microsoft::WRL::ComPtr<ABI::Windows::Storage::Streams::IBufferFactory>
+      buffer_factory;
+  HRESULT hr = base::win::GetActivationFactory<
+      ABI::Windows::Storage::Streams::IBufferFactory,
+      RuntimeClass_Windows_Storage_Streams_Buffer>(&buffer_factory);
+  if (FAILED(hr))
+    return hr;
+
+  Microsoft::WRL::ComPtr<IBuffer> internal_buffer;
+  hr = buffer_factory->Create(length, internal_buffer.GetAddressOf());
+  if (FAILED(hr))
+    return hr;
+
+  hr = internal_buffer->put_Length(length);
+  if (FAILED(hr))
+    return hr;
+
+  uint8_t* p_buffer_data;
+  hr = GetPointerToBufferData(internal_buffer.Get(), &p_buffer_data, &length);
+  if (FAILED(hr))
+    return hr;
+
+  memcpy(p_buffer_data, data, length);
+
+  *buffer = std::move(internal_buffer);
+
+  return S_OK;
+}
+
+}  // namespace win
+}  // namespace base
diff --git a/base/win/winrt_storage_util.h b/base/win/winrt_storage_util.h
new file mode 100644
index 0000000..e24336c
--- /dev/null
+++ b/base/win/winrt_storage_util.h
@@ -0,0 +1,34 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_WIN_WINRT_STORAGE_UTIL_H_
+#define BASE_WIN_WINRT_STORAGE_UTIL_H_
+
+#include <stdint.h>
+#include <windows.storage.streams.h>
+#include <wrl/client.h>
+
+#include "base/base_export.h"
+
+namespace base {
+namespace win {
+
+// Gets an array of bytes in the |buffer|, |out| represents a array of
+// bytes used by byte stream read and write.
+BASE_EXPORT HRESULT
+GetPointerToBufferData(ABI::Windows::Storage::Streams::IBuffer* buffer,
+                       uint8_t** out,
+                       UINT32* length);
+
+// Creates stream |buffer| from |data| that represents a array of bytes
+// and the |length| of bytes.
+BASE_EXPORT HRESULT CreateIBufferFromData(
+    const uint8_t* data,
+    UINT32 length,
+    Microsoft::WRL::ComPtr<ABI::Windows::Storage::Streams::IBuffer>* buffer);
+
+}  // namespace win
+}  // namespace base
+
+#endif  // BASE_WIN_WINRT_STORAGE_UTIL_H_
diff --git a/base/win/winrt_storage_util_unittest.cc b/base/win/winrt_storage_util_unittest.cc
new file mode 100644
index 0000000..530ab23
--- /dev/null
+++ b/base/win/winrt_storage_util_unittest.cc
@@ -0,0 +1,42 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/win/winrt_storage_util.h"
+
+#include <string.h>
+#include <wrl/client.h>
+
+#include "base/strings/string_util.h"
+#include "base/win/core_winrt_util.h"
+#include "base/win/scoped_com_initializer.h"
+#include "base/win/scoped_hstring.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+namespace win {
+
+TEST(WinrtStorageUtilTest, CreateBufferFromData) {
+  ScopedCOMInitializer com_initializer(ScopedCOMInitializer::kMTA);
+
+  if (!ResolveCoreWinRTDelayload() ||
+      !ScopedHString::ResolveCoreWinRTStringDelayload()) {
+    return;
+  }
+
+  const std::vector<uint8_t> data = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10};
+  Microsoft::WRL::ComPtr<ABI::Windows::Storage::Streams::IBuffer> buffer;
+  ASSERT_HRESULT_SUCCEEDED(
+      CreateIBufferFromData(data.data(), data.size(), &buffer));
+
+  uint8_t* p_buffer_data;
+  uint32_t length;
+  ASSERT_HRESULT_SUCCEEDED(
+      GetPointerToBufferData(buffer.Get(), &p_buffer_data, &length));
+
+  ASSERT_EQ(data.size(), length);
+  EXPECT_EQ(0, memcmp(p_buffer_data, data.data(), data.size()));
+}
+
+}  // namespace win
+}  // namespace base
diff --git a/base/win/wrapped_window_proc.cc b/base/win/wrapped_window_proc.cc
new file mode 100644
index 0000000..0a00996
--- /dev/null
+++ b/base/win/wrapped_window_proc.cc
@@ -0,0 +1,78 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/win/wrapped_window_proc.h"
+
+#include "base/atomicops.h"
+#include "base/logging.h"
+
+namespace {
+
+base::win::WinProcExceptionFilter s_exception_filter = NULL;
+
+HMODULE GetModuleFromWndProc(WNDPROC window_proc) {
+  HMODULE instance = NULL;
+  // Converting a pointer-to-function to a void* is undefined behavior, but
+  // Windows (and POSIX) APIs require it to work.
+  void* address = reinterpret_cast<void*>(window_proc);
+  if (!::GetModuleHandleExA(GET_MODULE_HANDLE_EX_FLAG_FROM_ADDRESS |
+                            GET_MODULE_HANDLE_EX_FLAG_UNCHANGED_REFCOUNT,
+                            static_cast<char*>(address),
+                            &instance)) {
+    NOTREACHED();
+  }
+  return instance;
+}
+
+}  // namespace.
+
+namespace base {
+namespace win {
+
+WinProcExceptionFilter SetWinProcExceptionFilter(
+    WinProcExceptionFilter filter) {
+  subtle::AtomicWord rv = subtle::NoBarrier_AtomicExchange(
+      reinterpret_cast<subtle::AtomicWord*>(&s_exception_filter),
+      reinterpret_cast<subtle::AtomicWord>(filter));
+  return reinterpret_cast<WinProcExceptionFilter>(rv);
+}
+
+int CallExceptionFilter(EXCEPTION_POINTERS* info) {
+  return s_exception_filter ? s_exception_filter(info) :
+                              EXCEPTION_CONTINUE_SEARCH;
+}
+
+BASE_EXPORT void InitializeWindowClass(
+    const char16* class_name,
+    WNDPROC window_proc,
+    UINT style,
+    int class_extra,
+    int window_extra,
+    HCURSOR cursor,
+    HBRUSH background,
+    const char16* menu_name,
+    HICON large_icon,
+    HICON small_icon,
+    WNDCLASSEX* class_out) {
+  class_out->cbSize = sizeof(WNDCLASSEX);
+  class_out->style = style;
+  class_out->lpfnWndProc = window_proc;
+  class_out->cbClsExtra = class_extra;
+  class_out->cbWndExtra = window_extra;
+  // RegisterClassEx uses a handle of the module containing the window procedure
+  // to distinguish identically named classes registered in different modules.
+  class_out->hInstance = GetModuleFromWndProc(window_proc);
+  class_out->hIcon = large_icon;
+  class_out->hCursor = cursor;
+  class_out->hbrBackground = background;
+  class_out->lpszMenuName = menu_name;
+  class_out->lpszClassName = class_name;
+  class_out->hIconSm = small_icon;
+
+  // Check if |window_proc| is valid.
+  DCHECK(class_out->hInstance != NULL);
+}
+
+}  // namespace win
+}  // namespace base
diff --git a/base/win/wrapped_window_proc.h b/base/win/wrapped_window_proc.h
new file mode 100644
index 0000000..c586be0
--- /dev/null
+++ b/base/win/wrapped_window_proc.h
@@ -0,0 +1,85 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Provides a way to handle exceptions that happen while a WindowProc is
+// running. The behavior of exceptions generated inside a WindowProc is OS
+// dependent, but it is possible that the OS just ignores the exception and
+// continues execution, which leads to unpredictable behavior for Chrome.
+
+#ifndef BASE_WIN_WRAPPED_WINDOW_PROC_H_
+#define BASE_WIN_WRAPPED_WINDOW_PROC_H_
+
+#include <windows.h>
+
+#include "base/base_export.h"
+#include "base/strings/string16.h"
+
+namespace base {
+namespace win {
+
+// An exception filter for a WindowProc. The return value determines how the
+// exception should be handled, following standard SEH rules. However, the
+// expected behavior for this function is to not return, instead of returning
+// EXCEPTION_EXECUTE_HANDLER or similar, given that in general we are not
+// prepared to handle exceptions.
+typedef int (__cdecl *WinProcExceptionFilter)(EXCEPTION_POINTERS* info);
+
+// Sets the filter to deal with exceptions inside a WindowProc. Returns the old
+// exception filter, if any.
+// This function should be called before any window is created.
+BASE_EXPORT WinProcExceptionFilter SetWinProcExceptionFilter(
+    WinProcExceptionFilter filter);
+
+// Calls the registered exception filter.
+BASE_EXPORT int CallExceptionFilter(EXCEPTION_POINTERS* info);
+
+// Initializes the WNDCLASSEX structure |*class_out| to be passed to
+// RegisterClassEx() making sure that it is associated with the module
+// containing the window procedure.
+BASE_EXPORT void InitializeWindowClass(
+    const char16* class_name,
+    WNDPROC window_proc,
+    UINT style,
+    int class_extra,
+    int window_extra,
+    HCURSOR cursor,
+    HBRUSH background,
+    const char16* menu_name,
+    HICON large_icon,
+    HICON small_icon,
+    WNDCLASSEX* class_out);
+
+// Wrapper that supplies a standard exception frame for the provided WindowProc.
+// The normal usage is something like this:
+//
+// LRESULT CALLBACK MyWinProc(HWND hwnd, UINT message,
+//                            WPARAM wparam, LPARAM lparam) {
+//   // Do Something.
+// }
+//
+// ...
+//
+//   WNDCLASSEX wc = {0};
+//   wc.lpfnWndProc = WrappedWindowProc<MyWinProc>;
+//   wc.lpszClassName = class_name;
+//   ...
+//   RegisterClassEx(&wc);
+//
+//   CreateWindowW(class_name, window_name, ...
+//
+template <WNDPROC proc>
+LRESULT CALLBACK
+WrappedWindowProc(HWND hwnd, UINT message, WPARAM wparam, LPARAM lparam) {
+  LRESULT rv = 0;
+  __try {
+    rv = proc(hwnd, message, wparam, lparam);
+  } __except(CallExceptionFilter(GetExceptionInformation())) {
+  }
+  return rv;
+}
+
+}  // namespace win
+}  // namespace base
+
+#endif  // BASE_WIN_WRAPPED_WINDOW_PROC_H_
diff --git a/base/win/wrapped_window_proc_unittest.cc b/base/win/wrapped_window_proc_unittest.cc
new file mode 100644
index 0000000..25ba2d4
--- /dev/null
+++ b/base/win/wrapped_window_proc_unittest.cc
@@ -0,0 +1,78 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/win/wrapped_window_proc.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace {
+
+DWORD kExceptionCode = 12345;
+WPARAM kCrashMsg = 98765;
+
+// A trivial WindowProc that generates an exception.
+LRESULT CALLBACK TestWindowProc(HWND hwnd, UINT message,
+                                WPARAM wparam, LPARAM lparam) {
+  if (message == kCrashMsg)
+    RaiseException(kExceptionCode, 0, 0, NULL);
+  return DefWindowProc(hwnd, message, wparam, lparam);
+}
+
+// This class implements an exception filter that can be queried about a past
+// exception.
+class TestWrappedExceptionFiter {
+ public:
+  TestWrappedExceptionFiter() : called_(false) {
+    EXPECT_FALSE(s_filter_);
+    s_filter_ = this;
+  }
+
+  ~TestWrappedExceptionFiter() {
+    EXPECT_EQ(s_filter_, this);
+    s_filter_ = NULL;
+  }
+
+  bool called() {
+    return called_;
+  }
+
+  // The actual exception filter just records the exception.
+  static int Filter(EXCEPTION_POINTERS* info) {
+    EXPECT_FALSE(s_filter_->called_);
+    if (info->ExceptionRecord->ExceptionCode == kExceptionCode)
+      s_filter_->called_ = true;
+    return EXCEPTION_EXECUTE_HANDLER;
+  }
+
+ private:
+  bool called_;
+  static TestWrappedExceptionFiter* s_filter_;
+};
+TestWrappedExceptionFiter* TestWrappedExceptionFiter::s_filter_ = NULL;
+
+}  // namespace.
+
+TEST(WrappedWindowProc, CatchesExceptions) {
+  HINSTANCE hinst = GetModuleHandle(NULL);
+  std::wstring class_name(L"TestClass");
+
+  WNDCLASS wc = {0};
+  wc.lpfnWndProc = base::win::WrappedWindowProc<TestWindowProc>;
+  wc.hInstance = hinst;
+  wc.lpszClassName = class_name.c_str();
+  RegisterClass(&wc);
+
+  HWND window = CreateWindow(class_name.c_str(), 0, 0, 0, 0, 0, 0, HWND_MESSAGE,
+                             0, hinst, 0);
+  ASSERT_TRUE(window);
+
+  // Before generating the exception we make sure that the filter will see it.
+  TestWrappedExceptionFiter wrapper;
+  base::win::WinProcExceptionFilter old_filter =
+      base::win::SetWinProcExceptionFilter(TestWrappedExceptionFiter::Filter);
+
+  SendMessage(window, kCrashMsg, 0, 0);
+  EXPECT_TRUE(wrapper.called());
+
+  base::win::SetWinProcExceptionFilter(old_filter);
+}